public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-04-20 11:23 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-04-20 11:23 UTC (permalink / raw
  To: gentoo-commits

commit:     c93f1513f37f80b6e6b333f00fae5bc62bbd4743
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr 20 11:23:36 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr 20 11:23:36 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c93f1513

Linux patch 4.1.22

 0000_README             |    4 +
 1021_linux-4.1.22.patch | 9212 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 9216 insertions(+)

diff --git a/0000_README b/0000_README
index cb424ff..3075177 100644
--- a/0000_README
+++ b/0000_README
@@ -127,6 +127,10 @@ Patch:  1020_linux-4.1.21.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.21
 
+Patch:  1021_linux-4.1.22.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.22
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1021_linux-4.1.22.patch b/1021_linux-4.1.22.patch
new file mode 100644
index 0000000..8c18119
--- /dev/null
+++ b/1021_linux-4.1.22.patch
@@ -0,0 +1,9212 @@
+diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
+index c477af086e65..686a64bba775 100644
+--- a/Documentation/filesystems/efivarfs.txt
++++ b/Documentation/filesystems/efivarfs.txt
+@@ -14,3 +14,10 @@ filesystem.
+ efivarfs is typically mounted like this,
+ 
+ 	mount -t efivarfs none /sys/firmware/efi/efivars
++
++Due to the presence of numerous firmware bugs where removing non-standard
++UEFI variables causes the system firmware to fail to POST, efivarfs
++files that are not well-known standardized variables are created
++as immutable files.  This doesn't prevent removal - "chattr -i" will work -
++but it does prevent this kind of failure from being accomplished
++accidentally.
+diff --git a/MAINTAINERS b/MAINTAINERS
+index d8afd2953678..0b51c8a3c627 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -9533,9 +9533,11 @@ S:	Maintained
+ F:	drivers/net/ethernet/dlink/sundance.c
+ 
+ SUPERH
++M:	Yoshinori Sato <ysato@users.sourceforge.jp>
++M:	Rich Felker <dalias@libc.org>
+ L:	linux-sh@vger.kernel.org
+ Q:	http://patchwork.kernel.org/project/linux-sh/list/
+-S:	Orphan
++S:	Maintained
+ F:	Documentation/sh/
+ F:	arch/sh/
+ F:	drivers/sh/
+diff --git a/Makefile b/Makefile
+index 79fab0d55218..7f4a4039fdd9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 21
++SUBLEVEL = 22
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
+index f076ff856d8b..07f61bb1697a 100644
+--- a/arch/arm/boot/dts/armada-375.dtsi
++++ b/arch/arm/boot/dts/armada-375.dtsi
+@@ -508,7 +508,7 @@
+ 			};
+ 
+ 			sata@a0000 {
+-				compatible = "marvell,orion-sata";
++				compatible = "marvell,armada-370-sata";
+ 				reg = <0xa0000 0x5000>;
+ 				interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ 				clocks = <&gateclk 14>, <&gateclk 20>;
+diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
+index c25ef3ec6d1f..e3789fb02c9c 100644
+--- a/arch/arm/include/asm/psci.h
++++ b/arch/arm/include/asm/psci.h
+@@ -37,7 +37,7 @@ struct psci_operations {
+ extern struct psci_operations psci_ops;
+ extern struct smp_operations psci_smp_ops;
+ 
+-#ifdef CONFIG_ARM_PSCI
++#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
+ int psci_init(void);
+ bool psci_smp_available(void);
+ #else
+diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
+index 8e2a7acb823b..1b9f0520dea9 100644
+--- a/arch/arm/mach-omap2/sleep34xx.S
++++ b/arch/arm/mach-omap2/sleep34xx.S
+@@ -436,12 +436,14 @@ skipl2dis:
+ 	and	r1, #0x700
+ 	cmp	r1, #0x300
+ 	beq	l2_inv_gp
++	adr	r0, l2_inv_api_params_offset
++	ldr	r3, [r0]
++	add	r3, r3, r0		@ r3 points to dummy parameters
+ 	mov	r0, #40			@ set service ID for PPA
+ 	mov	r12, r0			@ copy secure Service ID in r12
+ 	mov	r1, #0			@ set task id for ROM code in r1
+ 	mov	r2, #4			@ set some flags in r2, r6
+ 	mov	r6, #0xff
+-	adr	r3, l2_inv_api_params	@ r3 points to dummy parameters
+ 	dsb				@ data write barrier
+ 	dmb				@ data memory barrier
+ 	smc	#1			@ call SMI monitor (smi #1)
+@@ -475,8 +477,8 @@ skipl2dis:
+ 	b	logic_l1_restore
+ 
+ 	.align
+-l2_inv_api_params:
+-	.word	0x1, 0x00
++l2_inv_api_params_offset:
++	.long	l2_inv_api_params - .
+ l2_inv_gp:
+ 	/* Execute smi to invalidate L2 cache */
+ 	mov r12, #0x1			@ set up to invalidate L2
+@@ -531,6 +533,10 @@ l2dis_3630_offset:
+ l2dis_3630:
+ 	.word	0
+ 
++	.data
++l2_inv_api_params:
++	.word	0x1, 0x00
++
+ /*
+  * Internal functions
+  */
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 3258174e6152..f462e6e4ce07 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -18,6 +18,8 @@ GZFLAGS		:=-9
+ KBUILD_DEFCONFIG := defconfig
+ 
+ KBUILD_CFLAGS	+= -mgeneral-regs-only
++KBUILD_CFLAGS	+= $(call cc-option, -mpc-relative-literal-loads)
++
+ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+ KBUILD_CPPFLAGS	+= -mbig-endian
+ AS		+= -EB
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index cf7319422768..526a9cb218d3 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -33,7 +33,7 @@
+ /*
+  * VMALLOC and SPARSEMEM_VMEMMAP ranges.
+  *
+- * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
++ * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
+  *	(rounded up to PUD_SIZE).
+  * VMALLOC_START: beginning of the kernel VA space
+  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
+@@ -43,7 +43,9 @@
+ #define VMALLOC_START		(UL(0xffffffffffffffff) << VA_BITS)
+ #define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
+ 
+-#define vmemmap			((struct page *)(VMALLOC_END + SZ_64K))
++#define VMEMMAP_START		(VMALLOC_END + SZ_64K)
++#define vmemmap			((struct page *)VMEMMAP_START - \
++				 SECTION_ALIGN_DOWN(memstart_addr >> PAGE_SHIFT))
+ 
+ #define FIRST_USER_ADDRESS	0UL
+ 
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index ad87ce826cce..ae8f940152aa 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -312,8 +312,8 @@ void __init mem_init(void)
+ 		  "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+ 		  MLG(VMALLOC_START, VMALLOC_END),
+ #ifdef CONFIG_SPARSEMEM_VMEMMAP
+-		  MLG((unsigned long)vmemmap,
+-		      (unsigned long)vmemmap + VMEMMAP_SIZE),
++		  MLG(VMEMMAP_START,
++		      VMEMMAP_START + VMEMMAP_SIZE),
+ 		  MLM((unsigned long)virt_to_page(PAGE_OFFSET),
+ 		      (unsigned long)virt_to_page(high_memory)),
+ #endif
+diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
+index 1d8b147282cf..bbf777c04cbf 100644
+--- a/arch/avr32/mach-at32ap/at32ap700x.c
++++ b/arch/avr32/mach-at32ap/at32ap700x.c
+@@ -1328,6 +1328,21 @@ static struct clk atmel_mci0_pclk = {
+ 	.index		= 9,
+ };
+ 
++static bool at32_mci_dma_filter(struct dma_chan *chan, void *pdata)
++{
++	struct mci_dma_data *sl = pdata;
++
++	if (!sl)
++		return false;
++
++	if (find_slave_dev(sl) == chan->device->dev) {
++		chan->private = slave_data_ptr(sl);
++		return true;
++	}
++
++	return false;
++}
++
+ struct platform_device *__init
+ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
+ {
+@@ -1362,6 +1377,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
+ 	slave->sdata.dst_master = 0;
+ 
+ 	data->dma_slave = slave;
++	data->dma_filter = at32_mci_dma_filter;
+ 
+ 	if (platform_device_add_data(pdev, data,
+ 				sizeof(struct mci_platform_data)))
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index d0744cc77ea7..3cef551908f4 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -120,6 +120,7 @@ static inline void calculate_cpu_foreign_map(void)
+ 	cpumask_t temp_foreign_map;
+ 
+ 	/* Re-calculate the mask */
++	cpumask_clear(&temp_foreign_map);
+ 	for_each_online_cpu(i) {
+ 		core_present = 0;
+ 		for_each_cpu(k, &temp_foreign_map)
+diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
+index 59663af9315f..e4f7d4eed20c 100644
+--- a/arch/powerpc/kernel/module_64.c
++++ b/arch/powerpc/kernel/module_64.c
+@@ -335,7 +335,7 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
+ 		if (syms[i].st_shndx == SHN_UNDEF) {
+ 			char *name = strtab + syms[i].st_name;
+ 			if (name[0] == '.')
+-				memmove(name, name+1, strlen(name));
++				syms[i].st_name++;
+ 		}
+ 	}
+ }
+diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
+index a648338c434a..4e33fe339b3d 100644
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -45,7 +45,7 @@ struct zpci_fmb {
+ 	u64 rpcit_ops;
+ 	u64 dma_rbytes;
+ 	u64 dma_wbytes;
+-} __packed __aligned(16);
++} __packed __aligned(64);
+ 
+ enum zpci_state {
+ 	ZPCI_FN_STATE_RESERVED,
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 598f023cf8a6..50a79a5fc116 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -871,8 +871,11 @@ static inline int barsize(u8 size)
+ 
+ static int zpci_mem_init(void)
+ {
++	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
++		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
++
+ 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
+-				16, 0, NULL);
++					   __alignof__(struct zpci_fmb), 0, NULL);
+ 	if (!zdev_fmb_cache)
+ 		goto error_zdev;
+ 
+diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
+index 29880c9b324e..e22e57298522 100644
+--- a/arch/um/drivers/mconsole_kern.c
++++ b/arch/um/drivers/mconsole_kern.c
+@@ -133,7 +133,7 @@ void mconsole_proc(struct mc_request *req)
+ 	ptr += strlen("proc");
+ 	ptr = skip_spaces(ptr);
+ 
+-	file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY);
++	file = file_open_root(mnt->mnt_root, mnt, ptr, O_RDONLY, 0);
+ 	if (IS_ERR(file)) {
+ 		mconsole_reply(req, "Failed to open file", 1, 0);
+ 		printk(KERN_ERR "open /proc/%s: %ld\n", ptr, PTR_ERR(file));
+diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
+index 72bf2680f819..27e54946ef35 100644
+--- a/arch/x86/ia32/ia32entry.S
++++ b/arch/x86/ia32/ia32entry.S
+@@ -511,6 +511,7 @@ ENTRY(ia32_syscall)
+ 	 * it is too small to ever cause noticeable irq latency.
+ 	 */
+ 	PARAVIRT_ADJUST_EXCEPTION_FRAME
++	ASM_CLAC			/* Do this early to minimize exposure */
+ 	SWAPGS
+ 	ENABLE_INTERRUPTS(CLBR_NONE)
+ 
+diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
+index 976b86a325e5..a197e15a0e49 100644
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -640,8 +640,8 @@ static inline void entering_irq(void)
+ 
+ static inline void entering_ack_irq(void)
+ {
+-	ack_APIC_irq();
+ 	entering_irq();
++	ack_APIC_irq();
+ }
+ 
+ static inline void exiting_irq(void)
+diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
+index dc0f6ed35b08..5a2ed3ed2f26 100644
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -159,6 +159,14 @@ struct x86_pmu_capability {
+  */
+ #define INTEL_PMC_IDX_FIXED_BTS				(INTEL_PMC_IDX_FIXED + 16)
+ 
++#define GLOBAL_STATUS_COND_CHG				BIT_ULL(63)
++#define GLOBAL_STATUS_BUFFER_OVF			BIT_ULL(62)
++#define GLOBAL_STATUS_UNC_OVF				BIT_ULL(61)
++#define GLOBAL_STATUS_ASIF				BIT_ULL(60)
++#define GLOBAL_STATUS_COUNTERS_FROZEN			BIT_ULL(59)
++#define GLOBAL_STATUS_LBRS_FROZEN			BIT_ULL(58)
++#define GLOBAL_STATUS_TRACE_TOPAPMI			BIT_ULL(55)
++
+ /*
+  * IBS cpuid feature detection
+  */
+diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
+index d866959e5685..d2ad00a42234 100644
+--- a/arch/x86/include/asm/xen/hypervisor.h
++++ b/arch/x86/include/asm/xen/hypervisor.h
+@@ -57,4 +57,6 @@ static inline bool xen_x2apic_para_available(void)
+ }
+ #endif
+ 
++extern void xen_set_iopl_mask(unsigned mask);
++
+ #endif /* _ASM_X86_XEN_HYPERVISOR_H */
+diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
+index 3c6bb342a48f..06b407f79b24 100644
+--- a/arch/x86/include/uapi/asm/msr-index.h
++++ b/arch/x86/include/uapi/asm/msr-index.h
+@@ -72,6 +72,12 @@
+ #define MSR_LBR_CORE_FROM		0x00000040
+ #define MSR_LBR_CORE_TO			0x00000060
+ 
++#define MSR_LBR_INFO_0			0x00000dc0 /* ... 0xddf for _31 */
++#define LBR_INFO_MISPRED		BIT_ULL(63)
++#define LBR_INFO_IN_TX			BIT_ULL(62)
++#define LBR_INFO_ABORT			BIT_ULL(61)
++#define LBR_INFO_CYCLES			0xffff
++
+ #define MSR_IA32_PEBS_ENABLE		0x000003f1
+ #define MSR_IA32_DS_AREA		0x00000600
+ #define MSR_IA32_PERF_CAPABILITIES	0x00000345
+diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
+index 37dae792dbbe..589b3193f102 100644
+--- a/arch/x86/kernel/ioport.c
++++ b/arch/x86/kernel/ioport.c
+@@ -96,9 +96,14 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
+ SYSCALL_DEFINE1(iopl, unsigned int, level)
+ {
+ 	struct pt_regs *regs = current_pt_regs();
+-	unsigned int old = (regs->flags >> 12) & 3;
+ 	struct thread_struct *t = &current->thread;
+ 
++	/*
++	 * Careful: the IOPL bits in regs->flags are undefined under Xen PV
++	 * and changing them has no effect.
++	 */
++	unsigned int old = t->iopl >> X86_EFLAGS_IOPL_BIT;
++
+ 	if (level > 3)
+ 		return -EINVAL;
+ 	/* Trying to gain more privileges? */
+@@ -106,8 +111,9 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
+ 		if (!capable(CAP_SYS_RAWIO))
+ 			return -EPERM;
+ 	}
+-	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
+-	t->iopl = level << 12;
++	regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) |
++		(level << X86_EFLAGS_IOPL_BIT);
++	t->iopl = level << X86_EFLAGS_IOPL_BIT;
+ 	set_iopl_mask(t->iopl);
+ 
+ 	return 0;
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 58e02d938218..f7724a1d7de1 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -49,6 +49,7 @@
+ #include <asm/syscalls.h>
+ #include <asm/debugreg.h>
+ #include <asm/switch_to.h>
++#include <asm/xen/hypervisor.h>
+ 
+ asmlinkage extern void ret_from_fork(void);
+ 
+@@ -419,6 +420,17 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
+ 		     task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
+ 		__switch_to_xtra(prev_p, next_p, tss);
+ 
++#ifdef CONFIG_XEN
++	/*
++	 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
++	 * current_pt_regs()->flags may not match the current task's
++	 * intended IOPL.  We need to switch it manually.
++	 */
++	if (unlikely(xen_pv_domain() &&
++		     prev->iopl != next->iopl))
++		xen_set_iopl_mask(next->iopl);
++#endif
++
+ 	if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
+ 		/*
+ 		 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index f90952f64e79..e6a4c57100ea 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -244,7 +244,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
+ 		 * PIC is being reset.  Handle it gracefully here
+ 		 */
+ 		atomic_inc(&ps->pending);
+-	else if (value > 0)
++	else if (value > 0 && ps->reinject)
+ 		/* in this case, we had multiple outstanding pit interrupts
+ 		 * that we needed to inject.  Reinject
+ 		 */
+@@ -287,7 +287,9 @@ static void pit_do_work(struct kthread_work *work)
+ 	 * last one has been acked.
+ 	 */
+ 	spin_lock(&ps->inject_lock);
+-	if (ps->irq_ack) {
++	if (!ps->reinject)
++		inject = 1;
++	else if (ps->irq_ack) {
+ 		ps->irq_ack = 0;
+ 		inject = 1;
+ 	}
+@@ -316,10 +318,10 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
+ 	struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
+ 	struct kvm_pit *pt = ps->kvm->arch.vpit;
+ 
+-	if (ps->reinject || !atomic_read(&ps->pending)) {
++	if (ps->reinject)
+ 		atomic_inc(&ps->pending);
+-		queue_kthread_work(&pt->worker, &pt->expired);
+-	}
++
++	queue_kthread_work(&pt->worker, &pt->expired);
+ 
+ 	if (ps->is_periodic) {
+ 		hrtimer_add_expires_ns(&ps->timer, ps->period);
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 917148620f49..1274fac7c28f 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7210,6 +7210,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
+ 	if (!(types & (1UL << type))) {
+ 		nested_vmx_failValid(vcpu,
+ 				VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
++		skip_emulated_instruction(vcpu);
+ 		return 1;
+ 	}
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 41a3fb4ed346..c228d8da1f8c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3732,13 +3732,13 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
+ 
+ static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
+ {
+-	int r = 0;
+-
++	int i;
+ 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
+ 	memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
+-	kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
++	for (i = 0; i < 3; i++)
++		kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
+ 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+-	return r;
++	return 0;
+ }
+ 
+ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
+@@ -3757,6 +3757,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
+ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
+ {
+ 	int r = 0, start = 0;
++	int i;
+ 	u32 prev_legacy, cur_legacy;
+ 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
+ 	prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
+@@ -3766,7 +3767,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
+ 	memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
+ 	       sizeof(kvm->arch.vpit->pit_state.channels));
+ 	kvm->arch.vpit->pit_state.flags = ps->flags;
+-	kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
++	for (i = 0; i < 3; i++)
++		kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
+ 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ 	return r;
+ }
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 9a2b7101ae8a..f16af96c60a2 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -553,3 +553,10 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
+         }
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
++
++static void pci_bdwep_bar(struct pci_dev *dev)
++{
++	dev->non_compliant_bars = 1;
++}
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index a10ed8915bf4..1ecae556d4ed 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -959,7 +959,7 @@ static void xen_load_sp0(struct tss_struct *tss,
+ 	tss->x86_tss.sp0 = thread->sp0;
+ }
+ 
+-static void xen_set_iopl_mask(unsigned mask)
++void xen_set_iopl_mask(unsigned mask)
+ {
+ 	struct physdev_set_iopl set_iopl;
+ 
+diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
+index 15a461e2a0ed..80eb62f853a7 100644
+--- a/arch/xtensa/kernel/head.S
++++ b/arch/xtensa/kernel/head.S
+@@ -128,7 +128,7 @@ ENTRY(_startup)
+ 	wsr	a0, icountlevel
+ 
+ 	.set	_index, 0
+-	.rept	XCHAL_NUM_DBREAK - 1
++	.rept	XCHAL_NUM_DBREAK
+ 	wsr	a0, SREG_DBREAKC + _index
+ 	.set	_index, _index + 1
+ 	.endr
+diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
+index d75aa1476da7..1a804a2f9a5b 100644
+--- a/arch/xtensa/mm/cache.c
++++ b/arch/xtensa/mm/cache.c
+@@ -97,11 +97,11 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
+ 	unsigned long paddr;
+ 	void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
+ 
+-	pagefault_disable();
++	preempt_disable();
+ 	kmap_invalidate_coherent(page, vaddr);
+ 	set_bit(PG_arch_1, &page->flags);
+ 	clear_page_alias(kvaddr, paddr);
+-	pagefault_enable();
++	preempt_enable();
+ }
+ 
+ void copy_user_highpage(struct page *dst, struct page *src,
+@@ -113,11 +113,11 @@ void copy_user_highpage(struct page *dst, struct page *src,
+ 	void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
+ 					  &src_paddr);
+ 
+-	pagefault_disable();
++	preempt_disable();
+ 	kmap_invalidate_coherent(dst, vaddr);
+ 	set_bit(PG_arch_1, &dst->flags);
+ 	copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
+-	pagefault_enable();
++	preempt_enable();
+ }
+ 
+ #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
+diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
+index 70cb408bc20d..92d785fefb6d 100644
+--- a/arch/xtensa/platforms/iss/console.c
++++ b/arch/xtensa/platforms/iss/console.c
+@@ -100,21 +100,23 @@ static void rs_poll(unsigned long priv)
+ {
+ 	struct tty_port *port = (struct tty_port *)priv;
+ 	int i = 0;
++	int rd = 1;
+ 	unsigned char c;
+ 
+ 	spin_lock(&timer_lock);
+ 
+ 	while (simc_poll(0)) {
+-		simc_read(0, &c, 1);
++		rd = simc_read(0, &c, 1);
++		if (rd <= 0)
++			break;
+ 		tty_insert_flip_char(port, c, TTY_NORMAL);
+ 		i++;
+ 	}
+ 
+ 	if (i)
+ 		tty_flip_buffer_push(port);
+-
+-
+-	mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
++	if (rd)
++		mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
+ 	spin_unlock(&timer_lock);
+ }
+ 
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 03b5f8d77f37..7f29dc0237d1 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2067,7 +2067,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+ 	if (q->mq_ops) {
+ 		if (blk_queue_io_stat(q))
+ 			blk_account_io_start(rq, true);
+-		blk_mq_insert_request(rq, false, true, true);
++		blk_mq_insert_request(rq, false, true, false);
+ 		return 0;
+ 	}
+ 
+diff --git a/crypto/asymmetric_keys/Makefile b/crypto/asymmetric_keys/Makefile
+index e47fcd9ac5e8..cd1406f9b14a 100644
+--- a/crypto/asymmetric_keys/Makefile
++++ b/crypto/asymmetric_keys/Makefile
+@@ -15,15 +15,21 @@ obj-$(CONFIG_PUBLIC_KEY_ALGO_RSA) += rsa.o
+ obj-$(CONFIG_X509_CERTIFICATE_PARSER) += x509_key_parser.o
+ x509_key_parser-y := \
+ 	x509-asn1.o \
++	x509_akid-asn1.o \
+ 	x509_rsakey-asn1.o \
+ 	x509_cert_parser.o \
+ 	x509_public_key.o
+ 
+-$(obj)/x509_cert_parser.o: $(obj)/x509-asn1.h $(obj)/x509_rsakey-asn1.h
++$(obj)/x509_cert_parser.o: \
++	$(obj)/x509-asn1.h \
++	$(obj)/x509_akid-asn1.h \
++	$(obj)/x509_rsakey-asn1.h
+ $(obj)/x509-asn1.o: $(obj)/x509-asn1.c $(obj)/x509-asn1.h
++$(obj)/x509_akid-asn1.o: $(obj)/x509_akid-asn1.c $(obj)/x509_akid-asn1.h
+ $(obj)/x509_rsakey-asn1.o: $(obj)/x509_rsakey-asn1.c $(obj)/x509_rsakey-asn1.h
+ 
+ clean-files	+= x509-asn1.c x509-asn1.h
++clean-files	+= x509_akid-asn1.c x509_akid-asn1.h
+ clean-files	+= x509_rsakey-asn1.c x509_rsakey-asn1.h
+ 
+ #
+diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
+index 1d29376072da..0f6463b6692b 100644
+--- a/crypto/asymmetric_keys/pkcs7_trust.c
++++ b/crypto/asymmetric_keys/pkcs7_trust.c
+@@ -85,8 +85,8 @@ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
+ 	/* No match - see if the root certificate has a signer amongst the
+ 	 * trusted keys.
+ 	 */
+-	if (last && last->authority) {
+-		key = x509_request_asymmetric_key(trust_keyring, last->authority,
++	if (last && last->akid_skid) {
++		key = x509_request_asymmetric_key(trust_keyring, last->akid_skid,
+ 						  false);
+ 		if (!IS_ERR(key)) {
+ 			x509 = last;
+diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
+index cd455450b069..a4d083f7e9e1 100644
+--- a/crypto/asymmetric_keys/pkcs7_verify.c
++++ b/crypto/asymmetric_keys/pkcs7_verify.c
+@@ -187,11 +187,11 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
+ 			goto maybe_missing_crypto_in_x509;
+ 
+ 		pr_debug("- issuer %s\n", x509->issuer);
+-		if (x509->authority)
++		if (x509->akid_skid)
+ 			pr_debug("- authkeyid %*phN\n",
+-				 x509->authority->len, x509->authority->data);
++				 x509->akid_skid->len, x509->akid_skid->data);
+ 
+-		if (!x509->authority ||
++		if (!x509->akid_skid ||
+ 		    strcmp(x509->subject, x509->issuer) == 0) {
+ 			/* If there's no authority certificate specified, then
+ 			 * the certificate must be self-signed and is the root
+@@ -216,13 +216,13 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
+ 		 * list to see if the next one is there.
+ 		 */
+ 		pr_debug("- want %*phN\n",
+-			 x509->authority->len, x509->authority->data);
++			 x509->akid_skid->len, x509->akid_skid->data);
+ 		for (p = pkcs7->certs; p; p = p->next) {
+ 			if (!p->skid)
+ 				continue;
+ 			pr_debug("- cmp [%u] %*phN\n",
+ 				 p->index, p->skid->len, p->skid->data);
+-			if (asymmetric_key_id_same(p->skid, x509->authority))
++			if (asymmetric_key_id_same(p->skid, x509->akid_skid))
+ 				goto found_issuer;
+ 		}
+ 
+@@ -338,8 +338,6 @@ int pkcs7_verify(struct pkcs7_message *pkcs7)
+ 		ret = x509_get_sig_params(x509);
+ 		if (ret < 0)
+ 			return ret;
+-		pr_debug("X.509[%u] %*phN\n",
+-			 n, x509->authority->len, x509->authority->data);
+ 	}
+ 
+ 	for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) {
+diff --git a/crypto/asymmetric_keys/x509_akid.asn1 b/crypto/asymmetric_keys/x509_akid.asn1
+new file mode 100644
+index 000000000000..1a33231a75a8
+--- /dev/null
++++ b/crypto/asymmetric_keys/x509_akid.asn1
+@@ -0,0 +1,35 @@
++-- X.509 AuthorityKeyIdentifier
++-- rfc5280 section 4.2.1.1
++
++AuthorityKeyIdentifier ::= SEQUENCE {
++	keyIdentifier			[0] IMPLICIT KeyIdentifier		OPTIONAL,
++	authorityCertIssuer		[1] IMPLICIT GeneralNames		OPTIONAL,
++	authorityCertSerialNumber	[2] IMPLICIT CertificateSerialNumber	OPTIONAL
++	}
++
++KeyIdentifier ::= OCTET STRING ({ x509_akid_note_kid })
++
++CertificateSerialNumber ::= INTEGER ({ x509_akid_note_serial })
++
++GeneralNames ::= SEQUENCE OF GeneralName
++
++GeneralName ::= CHOICE {
++	otherName			[0] ANY,
++	rfc822Name			[1] IA5String,
++	dNSName				[2] IA5String,
++	x400Address			[3] ANY,
++	directoryName			[4] Name ({ x509_akid_note_name }),
++	ediPartyName			[5] ANY,
++	uniformResourceIdentifier	[6] IA5String,
++	iPAddress			[7] OCTET STRING,
++	registeredID			[8] OBJECT IDENTIFIER
++	}
++
++Name ::= SEQUENCE OF RelativeDistinguishedName
++
++RelativeDistinguishedName ::= SET OF AttributeValueAssertion
++
++AttributeValueAssertion ::= SEQUENCE {
++	attributeType		OBJECT IDENTIFIER ({ x509_note_OID }),
++	attributeValue		ANY ({ x509_extract_name_segment })
++	}
+diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
+index a668d90302d3..1995d6d962f5 100644
+--- a/crypto/asymmetric_keys/x509_cert_parser.c
++++ b/crypto/asymmetric_keys/x509_cert_parser.c
+@@ -18,6 +18,7 @@
+ #include "public_key.h"
+ #include "x509_parser.h"
+ #include "x509-asn1.h"
++#include "x509_akid-asn1.h"
+ #include "x509_rsakey-asn1.h"
+ 
+ struct x509_parse_context {
+@@ -35,6 +36,10 @@ struct x509_parse_context {
+ 	u16		o_offset;		/* Offset of organizationName (O) */
+ 	u16		cn_offset;		/* Offset of commonName (CN) */
+ 	u16		email_offset;		/* Offset of emailAddress */
++	unsigned	raw_akid_size;
++	const void	*raw_akid;		/* Raw authorityKeyId in ASN.1 */
++	const void	*akid_raw_issuer;	/* Raw directoryName in authorityKeyId */
++	unsigned	akid_raw_issuer_size;
+ };
+ 
+ /*
+@@ -48,7 +53,8 @@ void x509_free_certificate(struct x509_certificate *cert)
+ 		kfree(cert->subject);
+ 		kfree(cert->id);
+ 		kfree(cert->skid);
+-		kfree(cert->authority);
++		kfree(cert->akid_id);
++		kfree(cert->akid_skid);
+ 		kfree(cert->sig.digest);
+ 		mpi_free(cert->sig.rsa.s);
+ 		kfree(cert);
+@@ -85,6 +91,18 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen)
+ 	if (ret < 0)
+ 		goto error_decode;
+ 
++	/* Decode the AuthorityKeyIdentifier */
++	if (ctx->raw_akid) {
++		pr_devel("AKID: %u %*phN\n",
++			 ctx->raw_akid_size, ctx->raw_akid_size, ctx->raw_akid);
++		ret = asn1_ber_decoder(&x509_akid_decoder, ctx,
++				       ctx->raw_akid, ctx->raw_akid_size);
++		if (ret < 0) {
++			pr_warn("Couldn't decode AuthKeyIdentifier\n");
++			goto error_decode;
++		}
++	}
++
+ 	/* Decode the public key */
+ 	ret = asn1_ber_decoder(&x509_rsakey_decoder, ctx,
+ 			       ctx->key, ctx->key_size);
+@@ -422,7 +440,6 @@ int x509_process_extension(void *context, size_t hdrlen,
+ 	struct x509_parse_context *ctx = context;
+ 	struct asymmetric_key_id *kid;
+ 	const unsigned char *v = value;
+-	int i;
+ 
+ 	pr_debug("Extension: %u\n", ctx->last_oid);
+ 
+@@ -449,117 +466,113 @@ int x509_process_extension(void *context, size_t hdrlen,
+ 
+ 	if (ctx->last_oid == OID_authorityKeyIdentifier) {
+ 		/* Get hold of the CA key fingerprint */
+-		if (ctx->cert->authority || vlen < 5)
+-			return -EBADMSG;
+-
+-		/* Authority Key Identifier must be a Constructed SEQUENCE */
+-		if (v[0] != (ASN1_SEQ | (ASN1_CONS << 5)))
+-			return -EBADMSG;
+-
+-		/* Authority Key Identifier is not indefinite length */
+-		if (unlikely(vlen == ASN1_INDEFINITE_LENGTH))
+-			return -EBADMSG;
+-
+-		if (vlen < ASN1_INDEFINITE_LENGTH) {
+-			/* Short Form length */
+-			if (v[1] != vlen - 2 ||
+-			    v[2] != SEQ_TAG_KEYID ||
+-			    v[3] > vlen - 4)
+-				return -EBADMSG;
+-
+-			vlen = v[3];
+-			v += 4;
+-		} else {
+-			/* Long Form length */
+-			size_t seq_len = 0;
+-			size_t sub = v[1] - ASN1_INDEFINITE_LENGTH;
+-
+-			if (sub > 2)
+-				return -EBADMSG;
+-
+-			/* calculate the length from subsequent octets */
+-			v += 2;
+-			for (i = 0; i < sub; i++) {
+-				seq_len <<= 8;
+-				seq_len |= v[i];
+-			}
+-
+-			if (seq_len != vlen - 2 - sub ||
+-			    v[sub] != SEQ_TAG_KEYID ||
+-			    v[sub + 1] > vlen - 4 - sub)
+-				return -EBADMSG;
+-
+-			vlen = v[sub + 1];
+-			v += (sub + 2);
+-		}
+-
+-		kid = asymmetric_key_generate_id(ctx->cert->raw_issuer,
+-						 ctx->cert->raw_issuer_size,
+-						 v, vlen);
+-		if (IS_ERR(kid))
+-			return PTR_ERR(kid);
+-		pr_debug("authkeyid %*phN\n", kid->len, kid->data);
+-		ctx->cert->authority = kid;
++		ctx->raw_akid = v;
++		ctx->raw_akid_size = vlen;
+ 		return 0;
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-/*
+- * Record a certificate time.
++/**
++ * x509_decode_time - Decode an X.509 time ASN.1 object
++ * @_t: The time to fill in
++ * @hdrlen: The length of the object header
++ * @tag: The object tag
++ * @value: The object value
++ * @vlen: The size of the object value
++ *
++ * Decode an ASN.1 universal time or generalised time field into a struct the
++ * kernel can handle and check it for validity.  The time is decoded thus:
++ *
++ *	[RFC5280 §4.1.2.5]
++ *	CAs conforming to this profile MUST always encode certificate validity
++ *	dates through the year 2049 as UTCTime; certificate validity dates in
++ *	2050 or later MUST be encoded as GeneralizedTime.  Conforming
++ *	applications MUST be able to process validity dates that are encoded in
++ *	either UTCTime or GeneralizedTime.
+  */
+-static int x509_note_time(struct tm *tm,  size_t hdrlen,
+-			  unsigned char tag,
+-			  const unsigned char *value, size_t vlen)
++int x509_decode_time(time64_t *_t,  size_t hdrlen,
++		     unsigned char tag,
++		     const unsigned char *value, size_t vlen)
+ {
++	static const unsigned char month_lengths[] = { 31, 28, 31, 30, 31, 30,
++						       31, 31, 30, 31, 30, 31 };
+ 	const unsigned char *p = value;
++	unsigned year, mon, day, hour, min, sec, mon_len;
+ 
+-#define dec2bin(X) ((X) - '0')
++#define dec2bin(X) ({ unsigned char x = (X) - '0'; if (x > 9) goto invalid_time; x; })
+ #define DD2bin(P) ({ unsigned x = dec2bin(P[0]) * 10 + dec2bin(P[1]); P += 2; x; })
+ 
+ 	if (tag == ASN1_UNITIM) {
+ 		/* UTCTime: YYMMDDHHMMSSZ */
+ 		if (vlen != 13)
+ 			goto unsupported_time;
+-		tm->tm_year = DD2bin(p);
+-		if (tm->tm_year >= 50)
+-			tm->tm_year += 1900;
++		year = DD2bin(p);
++		if (year >= 50)
++			year += 1900;
+ 		else
+-			tm->tm_year += 2000;
++			year += 2000;
+ 	} else if (tag == ASN1_GENTIM) {
+ 		/* GenTime: YYYYMMDDHHMMSSZ */
+ 		if (vlen != 15)
+ 			goto unsupported_time;
+-		tm->tm_year = DD2bin(p) * 100 + DD2bin(p);
++		year = DD2bin(p) * 100 + DD2bin(p);
++		if (year >= 1950 && year <= 2049)
++			goto invalid_time;
+ 	} else {
+ 		goto unsupported_time;
+ 	}
+ 
+-	tm->tm_year -= 1900;
+-	tm->tm_mon  = DD2bin(p) - 1;
+-	tm->tm_mday = DD2bin(p);
+-	tm->tm_hour = DD2bin(p);
+-	tm->tm_min  = DD2bin(p);
+-	tm->tm_sec  = DD2bin(p);
++	mon  = DD2bin(p);
++	day = DD2bin(p);
++	hour = DD2bin(p);
++	min  = DD2bin(p);
++	sec  = DD2bin(p);
+ 
+ 	if (*p != 'Z')
+ 		goto unsupported_time;
+ 
++	mon_len = month_lengths[mon];
++	if (mon == 2) {
++		if (year % 4 == 0) {
++			mon_len = 29;
++			if (year % 100 == 0) {
++				mon_len = 28;
++				if (year % 400 == 0)
++					mon_len = 29;
++			}
++		}
++	}
++
++	if (year < 1970 ||
++	    mon < 1 || mon > 12 ||
++	    day < 1 || day > mon_len ||
++	    hour < 0 || hour > 23 ||
++	    min < 0 || min > 59 ||
++	    sec < 0 || sec > 59)
++		goto invalid_time;
++	
++	*_t = mktime64(year, mon, day, hour, min, sec);
+ 	return 0;
+ 
+ unsupported_time:
+-	pr_debug("Got unsupported time [tag %02x]: '%*.*s'\n",
+-		 tag, (int)vlen, (int)vlen, value);
++	pr_debug("Got unsupported time [tag %02x]: '%*phN'\n",
++		 tag, (int)vlen, value);
++	return -EBADMSG;
++invalid_time:
++	pr_debug("Got invalid time [tag %02x]: '%*phN'\n",
++		 tag, (int)vlen, value);
+ 	return -EBADMSG;
+ }
++EXPORT_SYMBOL_GPL(x509_decode_time);
+ 
+ int x509_note_not_before(void *context, size_t hdrlen,
+ 			 unsigned char tag,
+ 			 const void *value, size_t vlen)
+ {
+ 	struct x509_parse_context *ctx = context;
+-	return x509_note_time(&ctx->cert->valid_from, hdrlen, tag, value, vlen);
++	return x509_decode_time(&ctx->cert->valid_from, hdrlen, tag, value, vlen);
+ }
+ 
+ int x509_note_not_after(void *context, size_t hdrlen,
+@@ -567,5 +580,73 @@ int x509_note_not_after(void *context, size_t hdrlen,
+ 			const void *value, size_t vlen)
+ {
+ 	struct x509_parse_context *ctx = context;
+-	return x509_note_time(&ctx->cert->valid_to, hdrlen, tag, value, vlen);
++	return x509_decode_time(&ctx->cert->valid_to, hdrlen, tag, value, vlen);
++}
++
++/*
++ * Note a key identifier-based AuthorityKeyIdentifier
++ */
++int x509_akid_note_kid(void *context, size_t hdrlen,
++		       unsigned char tag,
++		       const void *value, size_t vlen)
++{
++	struct x509_parse_context *ctx = context;
++	struct asymmetric_key_id *kid;
++
++	pr_debug("AKID: keyid: %*phN\n", (int)vlen, value);
++
++	if (ctx->cert->akid_skid)
++		return 0;
++
++	kid = asymmetric_key_generate_id(ctx->cert->raw_issuer,
++					 ctx->cert->raw_issuer_size,
++					 value, vlen);
++	if (IS_ERR(kid))
++		return PTR_ERR(kid);
++	pr_debug("authkeyid %*phN\n", kid->len, kid->data);
++	ctx->cert->akid_skid = kid;
++	return 0;
++}
++
++/*
++ * Note a directoryName in an AuthorityKeyIdentifier
++ */
++int x509_akid_note_name(void *context, size_t hdrlen,
++			unsigned char tag,
++			const void *value, size_t vlen)
++{
++	struct x509_parse_context *ctx = context;
++
++	pr_debug("AKID: name: %*phN\n", (int)vlen, value);
++
++	ctx->akid_raw_issuer = value;
++	ctx->akid_raw_issuer_size = vlen;
++	return 0;
++}
++
++/*
++ * Note a serial number in an AuthorityKeyIdentifier
++ */
++int x509_akid_note_serial(void *context, size_t hdrlen,
++			  unsigned char tag,
++			  const void *value, size_t vlen)
++{
++	struct x509_parse_context *ctx = context;
++	struct asymmetric_key_id *kid;
++
++	pr_debug("AKID: serial: %*phN\n", (int)vlen, value);
++
++	if (!ctx->akid_raw_issuer || ctx->cert->akid_id)
++		return 0;
++
++	kid = asymmetric_key_generate_id(value,
++					 vlen,
++					 ctx->akid_raw_issuer,
++					 ctx->akid_raw_issuer_size);
++	if (IS_ERR(kid))
++		return PTR_ERR(kid);
++
++	pr_debug("authkeyid %*phN\n", kid->len, kid->data);
++	ctx->cert->akid_id = kid;
++	return 0;
+ }
+diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h
+index 3dfe6b5d6f0b..1de01eaec884 100644
+--- a/crypto/asymmetric_keys/x509_parser.h
++++ b/crypto/asymmetric_keys/x509_parser.h
+@@ -19,11 +19,12 @@ struct x509_certificate {
+ 	struct public_key_signature sig;	/* Signature parameters */
+ 	char		*issuer;		/* Name of certificate issuer */
+ 	char		*subject;		/* Name of certificate subject */
+-	struct asymmetric_key_id *id;		/* Serial number + issuer */
++	struct asymmetric_key_id *id;		/* Issuer + Serial number */
+ 	struct asymmetric_key_id *skid;		/* Subject + subjectKeyId (optional) */
+-	struct asymmetric_key_id *authority;	/* Authority key identifier (optional) */
+-	struct tm	valid_from;
+-	struct tm	valid_to;
++	struct asymmetric_key_id *akid_id;	/* CA AuthKeyId matching ->id (optional) */
++	struct asymmetric_key_id *akid_skid;	/* CA AuthKeyId matching ->skid (optional) */
++	time64_t	valid_from;
++	time64_t	valid_to;
+ 	const void	*tbs;			/* Signed data */
+ 	unsigned	tbs_size;		/* Size of signed data */
+ 	unsigned	raw_sig_size;		/* Size of sigature */
+@@ -48,6 +49,9 @@ struct x509_certificate {
+  */
+ extern void x509_free_certificate(struct x509_certificate *cert);
+ extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen);
++extern int x509_decode_time(time64_t *_t,  size_t hdrlen,
++			    unsigned char tag,
++			    const unsigned char *value, size_t vlen);
+ 
+ /*
+  * x509_public_key.c
+diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
+index 4c850ac474e2..727752851dcf 100644
+--- a/crypto/asymmetric_keys/x509_public_key.c
++++ b/crypto/asymmetric_keys/x509_public_key.c
+@@ -227,10 +227,10 @@ static int x509_validate_trust(struct x509_certificate *cert,
+ 	if (!trust_keyring)
+ 		return -EOPNOTSUPP;
+ 
+-	if (ca_keyid && !asymmetric_key_id_partial(cert->authority, ca_keyid))
++	if (ca_keyid && !asymmetric_key_id_partial(cert->akid_skid, ca_keyid))
+ 		return -EPERM;
+ 
+-	key = x509_request_asymmetric_key(trust_keyring, cert->authority,
++	key = x509_request_asymmetric_key(trust_keyring, cert->akid_skid,
+ 					  false);
+ 	if (!IS_ERR(key))  {
+ 		if (!use_builtin_keys
+@@ -271,14 +271,7 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
+ 	}
+ 
+ 	pr_devel("Cert Key Algo: %s\n", pkey_algo_name[cert->pub->pkey_algo]);
+-	pr_devel("Cert Valid From: %04ld-%02d-%02d %02d:%02d:%02d\n",
+-		 cert->valid_from.tm_year + 1900, cert->valid_from.tm_mon + 1,
+-		 cert->valid_from.tm_mday, cert->valid_from.tm_hour,
+-		 cert->valid_from.tm_min,  cert->valid_from.tm_sec);
+-	pr_devel("Cert Valid To: %04ld-%02d-%02d %02d:%02d:%02d\n",
+-		 cert->valid_to.tm_year + 1900, cert->valid_to.tm_mon + 1,
+-		 cert->valid_to.tm_mday, cert->valid_to.tm_hour,
+-		 cert->valid_to.tm_min,  cert->valid_to.tm_sec);
++	pr_devel("Cert Valid period: %lld-%lld\n", cert->valid_from, cert->valid_to);
+ 	pr_devel("Cert Signature: %s + %s\n",
+ 		 pkey_algo_name[cert->sig.pkey_algo],
+ 		 hash_algo_name[cert->sig.pkey_hash_algo]);
+@@ -287,8 +280,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
+ 	cert->pub->id_type = PKEY_ID_X509;
+ 
+ 	/* Check the signature on the key if it appears to be self-signed */
+-	if (!cert->authority ||
+-	    asymmetric_key_id_same(cert->skid, cert->authority)) {
++	if (!cert->akid_skid ||
++	    asymmetric_key_id_same(cert->skid, cert->akid_skid)) {
+ 		ret = x509_check_signature(cert->pub, cert); /* self-signed */
+ 		if (ret < 0)
+ 			goto error_free_cert;
+diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
+index c42feb2bacd0..33e3db548a29 100644
+--- a/drivers/acpi/video_detect.c
++++ b/drivers/acpi/video_detect.c
+@@ -166,14 +166,6 @@ static struct dmi_system_id video_detect_dmi_table[] = {
+ 		DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
+ 		},
+ 	},
+-	{
+-	.callback = video_detect_force_vendor,
+-	.ident = "Dell Inspiron 5737",
+-	.matches = {
+-		DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+-		DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
+-		},
+-	},
+ 	{ },
+ };
+ 
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
+index 3bd7ca9853a8..2af8b29656af 100644
+--- a/drivers/block/mtip32xx/mtip32xx.c
++++ b/drivers/block/mtip32xx/mtip32xx.c
+@@ -705,7 +705,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
+ 			fail_reason = "thermal shutdown";
+ 		}
+ 		if (buf[288] == 0xBF) {
+-			set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag);
++			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
+ 			dev_info(&dd->pdev->dev,
+ 				"Drive indicates rebuild has failed. Secure erase required.\n");
+ 			fail_all_ncq_cmds = 1;
+@@ -896,6 +896,10 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
+ 
+ 		/* Acknowledge the interrupt status on the port.*/
+ 		port_stat = readl(port->mmio + PORT_IRQ_STAT);
++		if (unlikely(port_stat == 0xFFFFFFFF)) {
++			mtip_check_surprise_removal(dd->pdev);
++			return IRQ_HANDLED;
++		}
+ 		writel(port_stat, port->mmio + PORT_IRQ_STAT);
+ 
+ 		/* Demux port status */
+@@ -991,15 +995,11 @@ static bool mtip_pause_ncq(struct mtip_port *port,
+ 	reply = port->rxfis + RX_FIS_D2H_REG;
+ 	task_file_data = readl(port->mmio+PORT_TFDATA);
+ 
+-	if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
+-		clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+-
+ 	if ((task_file_data & 1))
+ 		return false;
+ 
+ 	if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
+ 		set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+-		set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+ 		port->ic_pause_timer = jiffies;
+ 		return true;
+ 	} else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
+@@ -1011,6 +1011,8 @@ static bool mtip_pause_ncq(struct mtip_port *port,
+ 		((fis->command == 0xFC) &&
+ 			(fis->features == 0x27 || fis->features == 0x72 ||
+ 			 fis->features == 0x62 || fis->features == 0x26))) {
++		clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
++		clear_bit(MTIP_DDF_REBUILD_FAILED_BIT, &port->dd->dd_flag);
+ 		/* Com reset after secure erase or lowlevel format */
+ 		mtip_restart_port(port);
+ 		return false;
+@@ -1102,6 +1104,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 	struct mtip_cmd *int_cmd;
+ 	struct driver_data *dd = port->dd;
+ 	int rv = 0;
++	unsigned long start;
+ 
+ 	/* Make sure the buffer is 8 byte aligned. This is asic specific. */
+ 	if (buffer & 0x00000007) {
+@@ -1164,6 +1167,8 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 	/* Populate the command header */
+ 	int_cmd->command_header->byte_count = 0;
+ 
++	start = jiffies;
++
+ 	/* Issue the command to the hardware */
+ 	mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
+ 
+@@ -1172,10 +1177,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
+ 		if ((rv = wait_for_completion_interruptible_timeout(
+ 				&wait,
+ 				msecs_to_jiffies(timeout))) <= 0) {
++
+ 			if (rv == -ERESTARTSYS) { /* interrupted */
+ 				dev_err(&dd->pdev->dev,
+-					"Internal command [%02X] was interrupted after %lu ms\n",
+-					fis->command, timeout);
++					"Internal command [%02X] was interrupted after %u ms\n",
++					fis->command,
++					jiffies_to_msecs(jiffies - start));
+ 				rv = -EINTR;
+ 				goto exec_ic_exit;
+ 			} else if (rv == 0) /* timeout */
+@@ -2780,48 +2787,6 @@ static void mtip_hw_debugfs_exit(struct driver_data *dd)
+ 		debugfs_remove_recursive(dd->dfs_node);
+ }
+ 
+-static int mtip_free_orphan(struct driver_data *dd)
+-{
+-	struct kobject *kobj;
+-
+-	if (dd->bdev) {
+-		if (dd->bdev->bd_holders >= 1)
+-			return -2;
+-
+-		bdput(dd->bdev);
+-		dd->bdev = NULL;
+-	}
+-
+-	mtip_hw_debugfs_exit(dd);
+-
+-	spin_lock(&rssd_index_lock);
+-	ida_remove(&rssd_index_ida, dd->index);
+-	spin_unlock(&rssd_index_lock);
+-
+-	if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag) &&
+-			test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) {
+-		put_disk(dd->disk);
+-	} else {
+-		if (dd->disk) {
+-			kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+-			if (kobj) {
+-				mtip_hw_sysfs_exit(dd, kobj);
+-				kobject_put(kobj);
+-			}
+-			del_gendisk(dd->disk);
+-			dd->disk = NULL;
+-		}
+-		if (dd->queue) {
+-			dd->queue->queuedata = NULL;
+-			blk_cleanup_queue(dd->queue);
+-			blk_mq_free_tag_set(&dd->tags);
+-			dd->queue = NULL;
+-		}
+-	}
+-	kfree(dd);
+-	return 0;
+-}
+-
+ /*
+  * Perform any init/resume time hardware setup
+  *
+@@ -2969,7 +2934,6 @@ static int mtip_service_thread(void *data)
+ 	unsigned long slot, slot_start, slot_wrap;
+ 	unsigned int num_cmd_slots = dd->slot_groups * 32;
+ 	struct mtip_port *port = dd->port;
+-	int ret;
+ 
+ 	while (1) {
+ 		if (kthread_should_stop() ||
+@@ -3055,18 +3019,6 @@ restart_eh:
+ 		if (kthread_should_stop())
+ 			goto st_out;
+ 	}
+-
+-	while (1) {
+-		ret = mtip_free_orphan(dd);
+-		if (!ret) {
+-			/* NOTE: All data structures are invalid, do not
+-			 * access any here */
+-			return 0;
+-		}
+-		msleep_interruptible(1000);
+-		if (kthread_should_stop())
+-			goto st_out;
+-	}
+ st_out:
+ 	return 0;
+ }
+@@ -3178,7 +3130,7 @@ static int mtip_hw_get_identify(struct driver_data *dd)
+ 		if (buf[288] == 0xBF) {
+ 			dev_info(&dd->pdev->dev,
+ 				"Drive indicates rebuild has failed.\n");
+-			/* TODO */
++			set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag);
+ 		}
+ 	}
+ 
+@@ -3352,20 +3304,25 @@ out1:
+ 	return rv;
+ }
+ 
+-static void mtip_standby_drive(struct driver_data *dd)
++static int mtip_standby_drive(struct driver_data *dd)
+ {
+-	if (dd->sr)
+-		return;
++	int rv = 0;
+ 
++	if (dd->sr || !dd->port)
++		return -ENODEV;
+ 	/*
+ 	 * Send standby immediate (E0h) to the drive so that it
+ 	 * saves its state.
+ 	 */
+ 	if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) &&
+-	    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))
+-		if (mtip_standby_immediate(dd->port))
++	    !test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag) &&
++	    !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) {
++		rv = mtip_standby_immediate(dd->port);
++		if (rv)
+ 			dev_warn(&dd->pdev->dev,
+ 				"STANDBY IMMEDIATE failed\n");
++	}
++	return rv;
+ }
+ 
+ /*
+@@ -3394,6 +3351,7 @@ static int mtip_hw_exit(struct driver_data *dd)
+ 	/* Release the IRQ. */
+ 	irq_set_affinity_hint(dd->pdev->irq, NULL);
+ 	devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
++	msleep(1000);
+ 
+ 	/* Free dma regions */
+ 	mtip_dma_free(dd);
+@@ -3422,8 +3380,7 @@ static int mtip_hw_shutdown(struct driver_data *dd)
+ 	 * Send standby immediate (E0h) to the drive so that it
+ 	 * saves its state.
+ 	 */
+-	if (!dd->sr && dd->port)
+-		mtip_standby_immediate(dd->port);
++	mtip_standby_drive(dd);
+ 
+ 	return 0;
+ }
+@@ -3446,7 +3403,7 @@ static int mtip_hw_suspend(struct driver_data *dd)
+ 	 * Send standby immediate (E0h) to the drive
+ 	 * so that it saves its state.
+ 	 */
+-	if (mtip_standby_immediate(dd->port) != 0) {
++	if (mtip_standby_drive(dd) != 0) {
+ 		dev_err(&dd->pdev->dev,
+ 			"Failed standby-immediate command\n");
+ 		return -EFAULT;
+@@ -3684,6 +3641,28 @@ static int mtip_block_getgeo(struct block_device *dev,
+ 	return 0;
+ }
+ 
++static int mtip_block_open(struct block_device *dev, fmode_t mode)
++{
++	struct driver_data *dd;
++
++	if (dev && dev->bd_disk) {
++		dd = (struct driver_data *) dev->bd_disk->private_data;
++
++		if (dd) {
++			if (test_bit(MTIP_DDF_REMOVAL_BIT,
++							&dd->dd_flag)) {
++				return -ENODEV;
++			}
++			return 0;
++		}
++	}
++	return -ENODEV;
++}
++
++void mtip_block_release(struct gendisk *disk, fmode_t mode)
++{
++}
++
+ /*
+  * Block device operation function.
+  *
+@@ -3691,6 +3670,8 @@ static int mtip_block_getgeo(struct block_device *dev,
+  * layer.
+  */
+ static const struct block_device_operations mtip_block_ops = {
++	.open		= mtip_block_open,
++	.release	= mtip_block_release,
+ 	.ioctl		= mtip_block_ioctl,
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl	= mtip_block_compat_ioctl,
+@@ -3729,10 +3710,9 @@ static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
+ 				rq_data_dir(rq))) {
+ 			return -ENODATA;
+ 		}
+-		if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)))
++		if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag) ||
++			test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)))
+ 			return -ENODATA;
+-		if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag))
+-			return -ENXIO;
+ 	}
+ 
+ 	if (rq->cmd_flags & REQ_DISCARD) {
+@@ -4066,52 +4046,51 @@ static int mtip_block_remove(struct driver_data *dd)
+ {
+ 	struct kobject *kobj;
+ 
+-	if (!dd->sr) {
+-		mtip_hw_debugfs_exit(dd);
++	mtip_hw_debugfs_exit(dd);
+ 
+-		if (dd->mtip_svc_handler) {
+-			set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
+-			wake_up_interruptible(&dd->port->svc_wait);
+-			kthread_stop(dd->mtip_svc_handler);
+-		}
++	if (dd->mtip_svc_handler) {
++		set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
++		wake_up_interruptible(&dd->port->svc_wait);
++		kthread_stop(dd->mtip_svc_handler);
++	}
+ 
+-		/* Clean up the sysfs attributes, if created */
+-		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
+-			kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+-			if (kobj) {
+-				mtip_hw_sysfs_exit(dd, kobj);
+-				kobject_put(kobj);
+-			}
++	/* Clean up the sysfs attributes, if created */
++	if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
++		kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
++		if (kobj) {
++			mtip_hw_sysfs_exit(dd, kobj);
++			kobject_put(kobj);
+ 		}
++	}
+ 
++	if (!dd->sr)
+ 		mtip_standby_drive(dd);
+-
+-		/*
+-		 * Delete our gendisk structure. This also removes the device
+-		 * from /dev
+-		 */
+-		if (dd->bdev) {
+-			bdput(dd->bdev);
+-			dd->bdev = NULL;
+-		}
+-		if (dd->disk) {
+-			if (dd->disk->queue) {
+-				del_gendisk(dd->disk);
+-				blk_cleanup_queue(dd->queue);
+-				blk_mq_free_tag_set(&dd->tags);
+-				dd->queue = NULL;
+-			} else
+-				put_disk(dd->disk);
+-		}
+-		dd->disk  = NULL;
+-
+-		spin_lock(&rssd_index_lock);
+-		ida_remove(&rssd_index_ida, dd->index);
+-		spin_unlock(&rssd_index_lock);
+-	} else {
++	else
+ 		dev_info(&dd->pdev->dev, "device %s surprise removal\n",
+ 						dd->disk->disk_name);
++
++	/*
++	 * Delete our gendisk structure. This also removes the device
++	 * from /dev
++	 */
++	if (dd->bdev) {
++		bdput(dd->bdev);
++		dd->bdev = NULL;
+ 	}
++	if (dd->disk) {
++		del_gendisk(dd->disk);
++		if (dd->disk->queue) {
++			blk_cleanup_queue(dd->queue);
++			blk_mq_free_tag_set(&dd->tags);
++			dd->queue = NULL;
++		}
++		put_disk(dd->disk);
++	}
++	dd->disk  = NULL;
++
++	spin_lock(&rssd_index_lock);
++	ida_remove(&rssd_index_ida, dd->index);
++	spin_unlock(&rssd_index_lock);
+ 
+ 	/* De-initialize the protocol layer. */
+ 	mtip_hw_exit(dd);
+@@ -4140,12 +4119,12 @@ static int mtip_block_shutdown(struct driver_data *dd)
+ 		dev_info(&dd->pdev->dev,
+ 			"Shutting down %s ...\n", dd->disk->disk_name);
+ 
++		del_gendisk(dd->disk);
+ 		if (dd->disk->queue) {
+-			del_gendisk(dd->disk);
+ 			blk_cleanup_queue(dd->queue);
+ 			blk_mq_free_tag_set(&dd->tags);
+-		} else
+-			put_disk(dd->disk);
++		}
++		put_disk(dd->disk);
+ 		dd->disk  = NULL;
+ 		dd->queue = NULL;
+ 	}
+@@ -4485,7 +4464,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
+ 	struct driver_data *dd = pci_get_drvdata(pdev);
+ 	unsigned long flags, to;
+ 
+-	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
++	set_bit(MTIP_DDF_REMOVAL_BIT, &dd->dd_flag);
+ 
+ 	spin_lock_irqsave(&dev_lock, flags);
+ 	list_del_init(&dd->online_list);
+@@ -4502,11 +4481,18 @@ static void mtip_pci_remove(struct pci_dev *pdev)
+ 	} while (atomic_read(&dd->irq_workers_active) != 0 &&
+ 		time_before(jiffies, to));
+ 
++	fsync_bdev(dd->bdev);
++
+ 	if (atomic_read(&dd->irq_workers_active) != 0) {
+ 		dev_warn(&dd->pdev->dev,
+ 			"Completion workers still active!\n");
+ 	}
+ 
++	if (dd->sr)
++		blk_mq_stop_hw_queues(dd->queue);
++
++	set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
++
+ 	/* Clean up the block layer. */
+ 	mtip_block_remove(dd);
+ 
+@@ -4524,10 +4510,8 @@ static void mtip_pci_remove(struct pci_dev *pdev)
+ 	list_del_init(&dd->remove_list);
+ 	spin_unlock_irqrestore(&dev_lock, flags);
+ 
+-	if (!dd->sr)
+-		kfree(dd);
+-	else
+-		set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag);
++	kfree(dd);
++	set_bit(MTIP_DDF_REMOVE_DONE_BIT, &dd->dd_flag);
+ 
+ 	pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
+ 	pci_set_drvdata(pdev, NULL);
+diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
+index ba1b31ee22ec..76695265dffb 100644
+--- a/drivers/block/mtip32xx/mtip32xx.h
++++ b/drivers/block/mtip32xx/mtip32xx.h
+@@ -155,6 +155,7 @@ enum {
+ 	MTIP_DDF_RESUME_BIT         = 6,
+ 	MTIP_DDF_INIT_DONE_BIT      = 7,
+ 	MTIP_DDF_REBUILD_FAILED_BIT = 8,
++	MTIP_DDF_REMOVAL_BIT	    = 9,
+ 
+ 	MTIP_DDF_STOP_IO      = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
+ 				(1 << MTIP_DDF_SEC_LOCK_BIT) |
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index fa893c3ec408..0beaa52df66b 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f) },
+ 	{ USB_DEVICE(0x0489, 0xe076) },
+ 	{ USB_DEVICE(0x0489, 0xe078) },
++	{ USB_DEVICE(0x0489, 0xe095) },
+ 	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x04CA, 0x3004) },
+ 	{ USB_DEVICE(0x04CA, 0x3005) },
+@@ -92,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x300d) },
+ 	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
++	{ USB_DEVICE(0x04CA, 0x3014) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
+ 	{ USB_DEVICE(0x0930, 0x021c) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
+@@ -113,10 +115,12 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362) },
+ 	{ USB_DEVICE(0x13d3, 0x3375) },
+ 	{ USB_DEVICE(0x13d3, 0x3393) },
++	{ USB_DEVICE(0x13d3, 0x3395) },
+ 	{ USB_DEVICE(0x13d3, 0x3402) },
+ 	{ USB_DEVICE(0x13d3, 0x3408) },
+ 	{ USB_DEVICE(0x13d3, 0x3423) },
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
++	{ USB_DEVICE(0x13d3, 0x3472) },
+ 	{ USB_DEVICE(0x13d3, 0x3474) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+@@ -144,6 +148,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+@@ -154,6 +159,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -175,10 +181,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index fdba79c3877c..ac553f997a1c 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -184,6 +184,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
+@@ -194,6 +195,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+@@ -215,10 +217,12 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
+index e98d15eaa799..1827fc4d15c1 100644
+--- a/drivers/bus/imx-weim.c
++++ b/drivers/bus/imx-weim.c
+@@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
+ 			return ret;
+ 	}
+ 
+-	for_each_child_of_node(pdev->dev.of_node, child) {
++	for_each_available_child_of_node(pdev->dev.of_node, child) {
+ 		if (!child->name)
+ 			continue;
+ 
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 2b971b3e5c1c..b02d4b160403 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -309,11 +309,11 @@ static int crb_acpi_remove(struct acpi_device *device)
+ 	struct device *dev = &device->dev;
+ 	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 
+-	tpm_chip_unregister(chip);
+-
+ 	if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ 		tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ 
++	tpm_chip_unregister(chip);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
+index 556ce041d371..404a9665879e 100644
+--- a/drivers/clk/rockchip/clk-rk3188.c
++++ b/drivers/clk/rockchip/clk-rk3188.c
+@@ -708,6 +708,9 @@ static const char *const rk3188_critical_clocks[] __initconst = {
+ 	"aclk_cpu",
+ 	"aclk_peri",
+ 	"hclk_peri",
++	"pclk_cpu",
++	"pclk_peri",
++	"hclk_cpubus"
+ };
+ 
+ static void __init rk3188_common_clk_init(struct device_node *np)
+diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
+index a71c97c03c39..3178f84d2757 100644
+--- a/drivers/crypto/atmel-sha.c
++++ b/drivers/crypto/atmel-sha.c
+@@ -1492,13 +1492,6 @@ static int atmel_sha_remove(struct platform_device *pdev)
+ 
+ 	clk_unprepare(sha_dd->iclk);
+ 
+-	iounmap(sha_dd->io_base);
+-
+-	clk_put(sha_dd->iclk);
+-
+-	if (sha_dd->irq >= 0)
+-		free_irq(sha_dd->irq, sha_dd);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+index ea7e8446956a..0a4973b47c99 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+@@ -202,6 +202,39 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
+ 	return ccp_aes_cmac_finup(req);
+ }
+ 
++static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
++{
++	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_aes_cmac_exp_ctx state;
++
++	state.null_msg = rctx->null_msg;
++	memcpy(state.iv, rctx->iv, sizeof(state.iv));
++	state.buf_count = rctx->buf_count;
++	memcpy(state.buf, rctx->buf, sizeof(state.buf));
++
++	/* 'out' may not be aligned so memcpy from local variable */
++	memcpy(out, &state, sizeof(state));
++
++	return 0;
++}
++
++static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
++{
++	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_aes_cmac_exp_ctx state;
++
++	/* 'in' may not be aligned so memcpy to local variable */
++	memcpy(&state, in, sizeof(state));
++
++	memset(rctx, 0, sizeof(*rctx));
++	rctx->null_msg = state.null_msg;
++	memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
++	rctx->buf_count = state.buf_count;
++	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
++
++	return 0;
++}
++
+ static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+ 			       unsigned int key_len)
+ {
+@@ -334,10 +367,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
+ 	alg->final = ccp_aes_cmac_final;
+ 	alg->finup = ccp_aes_cmac_finup;
+ 	alg->digest = ccp_aes_cmac_digest;
++	alg->export = ccp_aes_cmac_export;
++	alg->import = ccp_aes_cmac_import;
+ 	alg->setkey = ccp_aes_cmac_setkey;
+ 
+ 	halg = &alg->halg;
+ 	halg->digestsize = AES_BLOCK_SIZE;
++	halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
+ 
+ 	base = &halg->base;
+ 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
+diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
+index 507b34e0cc19..9711b6d29162 100644
+--- a/drivers/crypto/ccp/ccp-crypto-sha.c
++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
+@@ -194,6 +194,43 @@ static int ccp_sha_digest(struct ahash_request *req)
+ 	return ccp_sha_finup(req);
+ }
+ 
++static int ccp_sha_export(struct ahash_request *req, void *out)
++{
++	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_sha_exp_ctx state;
++
++	state.type = rctx->type;
++	state.msg_bits = rctx->msg_bits;
++	state.first = rctx->first;
++	memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
++	state.buf_count = rctx->buf_count;
++	memcpy(state.buf, rctx->buf, sizeof(state.buf));
++
++	/* 'out' may not be aligned so memcpy from local variable */
++	memcpy(out, &state, sizeof(state));
++
++	return 0;
++}
++
++static int ccp_sha_import(struct ahash_request *req, const void *in)
++{
++	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
++	struct ccp_sha_exp_ctx state;
++
++	/* 'in' may not be aligned so memcpy to local variable */
++	memcpy(&state, in, sizeof(state));
++
++	memset(rctx, 0, sizeof(*rctx));
++	rctx->type = state.type;
++	rctx->msg_bits = state.msg_bits;
++	rctx->first = state.first;
++	memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
++	rctx->buf_count = state.buf_count;
++	memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
++
++	return 0;
++}
++
+ static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
+ 			  unsigned int key_len)
+ {
+@@ -390,9 +427,12 @@ static int ccp_register_sha_alg(struct list_head *head,
+ 	alg->final = ccp_sha_final;
+ 	alg->finup = ccp_sha_finup;
+ 	alg->digest = ccp_sha_digest;
++	alg->export = ccp_sha_export;
++	alg->import = ccp_sha_import;
+ 
+ 	halg = &alg->halg;
+ 	halg->digestsize = def->digest_size;
++	halg->statesize = sizeof(struct ccp_sha_exp_ctx);
+ 
+ 	base = &halg->base;
+ 	snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
+index 76a96f0f44c6..a326ec20bfa8 100644
+--- a/drivers/crypto/ccp/ccp-crypto.h
++++ b/drivers/crypto/ccp/ccp-crypto.h
+@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
+ 	struct ccp_cmd cmd;
+ };
+ 
++struct ccp_aes_cmac_exp_ctx {
++	unsigned int null_msg;
++
++	u8 iv[AES_BLOCK_SIZE];
++
++	unsigned int buf_count;
++	u8 buf[AES_BLOCK_SIZE];
++};
++
+ /***** SHA related defines *****/
+ #define MAX_SHA_CONTEXT_SIZE	SHA256_DIGEST_SIZE
+ #define MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
+@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
+ 	struct ccp_cmd cmd;
+ };
+ 
++struct ccp_sha_exp_ctx {
++	enum ccp_sha_type type;
++
++	u64 msg_bits;
++
++	unsigned int first;
++
++	u8 ctx[MAX_SHA_CONTEXT_SIZE];
++
++	unsigned int buf_count;
++	u8 buf[MAX_SHA_BLOCK_SIZE];
++};
++
+ /***** Common Context Structure *****/
+ struct ccp_ctx {
+ 	int (*complete)(struct crypto_async_request *req, int ret);
+diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
+index 92772fffc52f..45f734eec954 100644
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -1437,7 +1437,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
+ 	u64 chan_off;
+ 	u64 dram_base		= get_dram_base(pvt, range);
+ 	u64 hole_off		= f10_dhar_offset(pvt);
+-	u64 dct_sel_base_off	= (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
++	u64 dct_sel_base_off	= (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+ 
+ 	if (hi_rng) {
+ 		/*
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index cd6b9c72c8ac..adcc628b1f93 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1043,8 +1043,8 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
+ 			 n_tads, gb, (mb*1000)/1024,
+ 			 ((u64)tmp_mb) << 20L,
+-			 (u32)TAD_SOCK(reg),
+-			 (u32)TAD_CH(reg),
++			 (u32)(1 << TAD_SOCK(reg)),
++			 (u32)TAD_CH(reg) + 1,
+ 			 (u32)TAD_TGT0(reg),
+ 			 (u32)TAD_TGT1(reg),
+ 			 (u32)TAD_TGT2(reg),
+@@ -1316,7 +1316,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 	}
+ 
+ 	ch_way = TAD_CH(reg) + 1;
+-	sck_way = TAD_SOCK(reg) + 1;
++	sck_way = 1 << TAD_SOCK(reg);
+ 
+ 	if (ch_way == 3)
+ 		idx = addr >> 6;
+@@ -1373,7 +1373,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 		 n_tads,
+ 		 addr,
+ 		 limit,
+-		 (u32)TAD_SOCK(reg),
++		 sck_way,
+ 		 ch_way,
+ 		 offset,
+ 		 idx,
+@@ -1388,18 +1388,12 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 			offset, addr);
+ 		return -EINVAL;
+ 	}
+-	addr -= offset;
+-	/* Store the low bits [0:6] of the addr */
+-	ch_addr = addr & 0x7f;
+-	/* Remove socket wayness and remove 6 bits */
+-	addr >>= 6;
+-	addr = div_u64(addr, sck_xch);
+-#if 0
+-	/* Divide by channel way */
+-	addr = addr / ch_way;
+-#endif
+-	/* Recover the last 6 bits */
+-	ch_addr |= addr << 6;
++
++	ch_addr = addr - offset;
++	ch_addr >>= (6 + shiftup);
++	ch_addr /= ch_way * sck_way;
++	ch_addr <<= (6 + shiftup);
++	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
+ 
+ 	/*
+ 	 * Step 3) Decode rank
+diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
+index 7b2e0496e0c0..10e6774ab2a2 100644
+--- a/drivers/firmware/efi/efivars.c
++++ b/drivers/firmware/efi/efivars.c
+@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
+ 	}
+ 
+ 	if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
+-	    efivar_validate(name, data, size) == false) {
++	    efivar_validate(vendor, name, data, size) == false) {
+ 		printk(KERN_ERR "efivars: Malformed variable content\n");
+ 		return -EINVAL;
+ 	}
+@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
+ 	}
+ 
+ 	if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
+-	    efivar_validate(name, data, size) == false) {
++	    efivar_validate(new_var->VendorGuid, name, data,
++			    size) == false) {
+ 		printk(KERN_ERR "efivars: Malformed variable content\n");
+ 		return -EINVAL;
+ 	}
+@@ -535,50 +536,43 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
+  * efivar_create_sysfs_entry - create a new entry in sysfs
+  * @new_var: efivar entry to create
+  *
+- * Returns 1 on failure, 0 on success
++ * Returns 0 on success, negative error code on failure
+  */
+ static int
+ efivar_create_sysfs_entry(struct efivar_entry *new_var)
+ {
+-	int i, short_name_size;
++	int short_name_size;
+ 	char *short_name;
+-	unsigned long variable_name_size;
+-	efi_char16_t *variable_name;
+-
+-	variable_name = new_var->var.VariableName;
+-	variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
++	unsigned long utf8_name_size;
++	efi_char16_t *variable_name = new_var->var.VariableName;
++	int ret;
+ 
+ 	/*
+-	 * Length of the variable bytes in ASCII, plus the '-' separator,
++	 * Length of the variable bytes in UTF8, plus the '-' separator,
+ 	 * plus the GUID, plus trailing NUL
+ 	 */
+-	short_name_size = variable_name_size / sizeof(efi_char16_t)
+-				+ 1 + EFI_VARIABLE_GUID_LEN + 1;
+-
+-	short_name = kzalloc(short_name_size, GFP_KERNEL);
++	utf8_name_size = ucs2_utf8size(variable_name);
++	short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
+ 
++	short_name = kmalloc(short_name_size, GFP_KERNEL);
+ 	if (!short_name)
+-		return 1;
++		return -ENOMEM;
++
++	ucs2_as_utf8(short_name, variable_name, short_name_size);
+ 
+-	/* Convert Unicode to normal chars (assume top bits are 0),
+-	   ala UTF-8 */
+-	for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
+-		short_name[i] = variable_name[i] & 0xFF;
+-	}
+ 	/* This is ugly, but necessary to separate one vendor's
+ 	   private variables from another's.         */
+-
+-	*(short_name + strlen(short_name)) = '-';
++	short_name[utf8_name_size] = '-';
+ 	efi_guid_to_str(&new_var->var.VendorGuid,
+-			 short_name + strlen(short_name));
++			 short_name + utf8_name_size + 1);
+ 
+ 	new_var->kobj.kset = efivars_kset;
+ 
+-	i = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
++	ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
+ 				   NULL, "%s", short_name);
+ 	kfree(short_name);
+-	if (i)
+-		return 1;
++	if (ret)
++		return ret;
+ 
+ 	kobject_uevent(&new_var->kobj, KOBJ_ADD);
+ 	efivar_entry_add(new_var, &efivar_sysfs_list);
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 70a0fb10517f..7f2ea21c730d 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
+ }
+ 
+ struct variable_validate {
++	efi_guid_t vendor;
+ 	char *name;
+ 	bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
+ 			 unsigned long len);
+ };
+ 
++/*
++ * This is the list of variables we need to validate, as well as the
++ * whitelist for what we think is safe not to default to immutable.
++ *
++ * If it has a validate() method that's not NULL, it'll go into the
++ * validation routine.  If not, it is assumed valid, but still used for
++ * whitelisting.
++ *
++ * Note that it's sorted by {vendor,name}, but globbed names must come after
++ * any other name with the same prefix.
++ */
+ static const struct variable_validate variable_validate[] = {
+-	{ "BootNext", validate_uint16 },
+-	{ "BootOrder", validate_boot_order },
+-	{ "DriverOrder", validate_boot_order },
+-	{ "Boot*", validate_load_option },
+-	{ "Driver*", validate_load_option },
+-	{ "ConIn", validate_device_path },
+-	{ "ConInDev", validate_device_path },
+-	{ "ConOut", validate_device_path },
+-	{ "ConOutDev", validate_device_path },
+-	{ "ErrOut", validate_device_path },
+-	{ "ErrOutDev", validate_device_path },
+-	{ "Timeout", validate_uint16 },
+-	{ "Lang", validate_ascii_string },
+-	{ "PlatformLang", validate_ascii_string },
+-	{ "", NULL },
++	{ EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
++	{ EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
++	{ EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
++	{ EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
++	{ EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
++	{ EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
++	{ LINUX_EFI_CRASH_GUID, "*", NULL },
++	{ NULL_GUID, "", NULL },
+ };
+ 
++static bool
++variable_matches(const char *var_name, size_t len, const char *match_name,
++		 int *match)
++{
++	for (*match = 0; ; (*match)++) {
++		char c = match_name[*match];
++		char u = var_name[*match];
++
++		/* Wildcard in the matching name means we've matched */
++		if (c == '*')
++			return true;
++
++		/* Case sensitive match */
++		if (!c && *match == len)
++			return true;
++
++		if (c != u)
++			return false;
++
++		if (!c)
++			return true;
++	}
++	return true;
++}
++
+ bool
+-efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
++efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
++		unsigned long data_size)
+ {
+ 	int i;
+-	u16 *unicode_name = var_name;
++	unsigned long utf8_size;
++	u8 *utf8_name;
+ 
+-	for (i = 0; variable_validate[i].validate != NULL; i++) {
+-		const char *name = variable_validate[i].name;
+-		int match;
++	utf8_size = ucs2_utf8size(var_name);
++	utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
++	if (!utf8_name)
++		return false;
+ 
+-		for (match = 0; ; match++) {
+-			char c = name[match];
+-			u16 u = unicode_name[match];
++	ucs2_as_utf8(utf8_name, var_name, utf8_size);
++	utf8_name[utf8_size] = '\0';
+ 
+-			/* All special variables are plain ascii */
+-			if (u > 127)
+-				return true;
++	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
++		const char *name = variable_validate[i].name;
++		int match = 0;
+ 
+-			/* Wildcard in the matching name means we've matched */
+-			if (c == '*')
+-				return variable_validate[i].validate(var_name,
+-							     match, data, len);
++		if (efi_guidcmp(vendor, variable_validate[i].vendor))
++			continue;
+ 
+-			/* Case sensitive match */
+-			if (c != u)
++		if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
++			if (variable_validate[i].validate == NULL)
+ 				break;
+-
+-			/* Reached the end of the string while matching */
+-			if (!c)
+-				return variable_validate[i].validate(var_name,
+-							     match, data, len);
++			kfree(utf8_name);
++			return variable_validate[i].validate(var_name, match,
++							     data, data_size);
+ 		}
+ 	}
+-
++	kfree(utf8_name);
+ 	return true;
+ }
+ EXPORT_SYMBOL_GPL(efivar_validate);
+ 
++bool
++efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
++			     size_t len)
++{
++	int i;
++	bool found = false;
++	int match = 0;
++
++	/*
++	 * Check if our variable is in the validated variables list
++	 */
++	for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
++		if (efi_guidcmp(variable_validate[i].vendor, vendor))
++			continue;
++
++		if (variable_matches(var_name, len,
++				     variable_validate[i].name, &match)) {
++			found = true;
++			break;
++		}
++	}
++
++	/*
++	 * If it's in our list, it is removable.
++	 */
++	return found;
++}
++EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
++
+ static efi_status_t
+ check_var_size(u32 attributes, unsigned long size)
+ {
+@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
+ 
+ 	*set = false;
+ 
+-	if (efivar_validate(name, data, *size) == false)
++	if (efivar_validate(*vendor, name, data, *size) == false)
+ 		return -EINVAL;
+ 
+ 	/*
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 0ec9ad50ba7c..9e33705d4d0e 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -798,6 +798,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
+ 	return mstb;
+ }
+ 
++static void drm_dp_free_mst_port(struct kref *kref);
++
++static void drm_dp_free_mst_branch_device(struct kref *kref)
++{
++	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
++	if (mstb->port_parent) {
++		if (list_empty(&mstb->port_parent->next))
++			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
++	}
++	kfree(mstb);
++}
++
+ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+ {
+ 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+@@ -805,6 +817,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+ 	bool wake_tx = false;
+ 
+ 	/*
++	 * init kref again to be used by ports to remove mst branch when it is
++	 * not needed anymore
++	 */
++	kref_init(kref);
++
++	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
++		kref_get(&mstb->port_parent->kref);
++
++	/*
+ 	 * destroy all ports - don't need lock
+ 	 * as there are no more references to the mst branch
+ 	 * device at this point.
+@@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+ 
+ 	if (wake_tx)
+ 		wake_up(&mstb->mgr->tx_waitq);
+-	kfree(mstb);
++
++	kref_put(kref, drm_dp_free_mst_branch_device);
+ }
+ 
+ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+@@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
+ 			 * from an EDID retrieval */
+ 
+ 			mutex_lock(&mgr->destroy_connector_lock);
++			kref_get(&port->parent->kref);
+ 			list_add(&port->next, &mgr->destroy_connector_list);
+ 			mutex_unlock(&mgr->destroy_connector_lock);
+ 			schedule_work(&mgr->destroy_connector_work);
+@@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
+ 	return send_link;
+ }
+ 
+-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
+-				   struct drm_dp_mst_port *port)
++static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+ {
+ 	int ret;
+-	if (port->dpcd_rev >= 0x12) {
+-		port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
+-		if (!port->guid_valid) {
+-			ret = drm_dp_send_dpcd_write(mstb->mgr,
+-						     port,
+-						     DP_GUID,
+-						     16, port->guid);
+-			port->guid_valid = true;
++
++	memcpy(mstb->guid, guid, 16);
++
++	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
++		if (mstb->port_parent) {
++			ret = drm_dp_send_dpcd_write(
++					mstb->mgr,
++					mstb->port_parent,
++					DP_GUID,
++					16,
++					mstb->guid);
++		} else {
++
++			ret = drm_dp_dpcd_write(
++					mstb->mgr->aux,
++					DP_GUID,
++					mstb->guid,
++					16);
+ 		}
+ 	}
+ }
+@@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ 	port->dpcd_rev = port_msg->dpcd_revision;
+ 	port->num_sdp_streams = port_msg->num_sdp_streams;
+ 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
+-	memcpy(port->guid, port_msg->peer_guid, 16);
+ 
+ 	/* manage mstb port lists with mgr lock - take a reference
+ 	   for this list */
+@@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ 
+ 	if (old_ddps != port->ddps) {
+ 		if (port->ddps) {
+-			drm_dp_check_port_guid(mstb, port);
+ 			if (!port->input)
+ 				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
+ 		} else {
+-			port->guid_valid = false;
+ 			port->available_pbn = 0;
+ 			}
+ 	}
+@@ -1156,10 +1185,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
+ 
+ 	if (old_ddps != port->ddps) {
+ 		if (port->ddps) {
+-			drm_dp_check_port_guid(mstb, port);
+ 			dowork = true;
+ 		} else {
+-			port->guid_valid = false;
+ 			port->available_pbn = 0;
+ 		}
+ 	}
+@@ -1216,13 +1243,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
+ 	struct drm_dp_mst_branch *found_mstb;
+ 	struct drm_dp_mst_port *port;
+ 
++	if (memcmp(mstb->guid, guid, 16) == 0)
++		return mstb;
++
++
+ 	list_for_each_entry(port, &mstb->ports, next) {
+ 		if (!port->mstb)
+ 			continue;
+ 
+-		if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
+-			return port->mstb;
+-
+ 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
+ 
+ 		if (found_mstb)
+@@ -1241,10 +1269,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
+ 	/* find the port by iterating down */
+ 	mutex_lock(&mgr->lock);
+ 
+-	if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
+-		mstb = mgr->mst_primary;
+-	else
+-		mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
++	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
+ 
+ 	if (mstb)
+ 		kref_get(&mstb->kref);
+@@ -1549,6 +1574,9 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ 				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
+ 				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
+ 			}
++
++			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
++
+ 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
+ 				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
+ 			}
+@@ -1595,6 +1623,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
+ 	return 0;
+ }
+ 
++static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
++{
++	if (!mstb->port_parent)
++		return NULL;
++
++	if (mstb->port_parent->mstb != mstb)
++		return mstb->port_parent;
++
++	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
++}
++
++static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
++									 struct drm_dp_mst_branch *mstb,
++									 int *port_num)
++{
++	struct drm_dp_mst_branch *rmstb = NULL;
++	struct drm_dp_mst_port *found_port;
++	mutex_lock(&mgr->lock);
++	if (mgr->mst_primary) {
++		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
++
++		if (found_port) {
++			rmstb = found_port->parent;
++			kref_get(&rmstb->kref);
++			*port_num = found_port->port_num;
++		}
++	}
++	mutex_unlock(&mgr->lock);
++	return rmstb;
++}
++
+ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ 				   struct drm_dp_mst_port *port,
+ 				   int id,
+@@ -1602,11 +1661,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ {
+ 	struct drm_dp_sideband_msg_tx *txmsg;
+ 	struct drm_dp_mst_branch *mstb;
+-	int len, ret;
++	int len, ret, port_num;
+ 
++	port_num = port->port_num;
+ 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+-	if (!mstb)
+-		return -EINVAL;
++	if (!mstb) {
++		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
++
++		if (!mstb)
++			return -EINVAL;
++	}
+ 
+ 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ 	if (!txmsg) {
+@@ -1615,7 +1679,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ 	}
+ 
+ 	txmsg->dst = mstb;
+-	len = build_allocate_payload(txmsg, port->port_num,
++	len = build_allocate_payload(txmsg, port_num,
+ 				     id,
+ 				     pbn);
+ 
+@@ -1969,31 +2033,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
+ 		mgr->mst_primary = mstb;
+ 		kref_get(&mgr->mst_primary->kref);
+ 
+-		{
+-			struct drm_dp_payload reset_pay;
+-			reset_pay.start_slot = 0;
+-			reset_pay.num_slots = 0x3f;
+-			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+-		}
+-
+ 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+-					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
++							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ 		if (ret < 0) {
+ 			goto out_unlock;
+ 		}
+ 
+-
+-		/* sort out guid */
+-		ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
+-		if (ret != 16) {
+-			DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
+-			goto out_unlock;
+-		}
+-
+-		mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
+-		if (!mgr->guid_valid) {
+-			ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
+-			mgr->guid_valid = true;
++		{
++			struct drm_dp_payload reset_pay;
++			reset_pay.start_slot = 0;
++			reset_pay.num_slots = 0x3f;
++			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
+ 		}
+ 
+ 		queue_work(system_long_wq, &mgr->work);
+@@ -2217,6 +2267,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 			}
+ 
+ 			drm_dp_update_port(mstb, &msg.u.conn_stat);
++
+ 			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
+ 			(*mgr->cbs->hotplug)(mgr);
+ 
+@@ -2749,6 +2800,13 @@ static void drm_dp_tx_work(struct work_struct *work)
+ 	mutex_unlock(&mgr->qlock);
+ }
+ 
++static void drm_dp_free_mst_port(struct kref *kref)
++{
++	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
++	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
++	kfree(port);
++}
++
+ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ {
+ 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
+@@ -2769,13 +2827,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ 		list_del(&port->next);
+ 		mutex_unlock(&mgr->destroy_connector_lock);
+ 
++		kref_init(&port->kref);
++		INIT_LIST_HEAD(&port->next);
++
+ 		mgr->cbs->destroy_connector(mgr, port->connector);
+ 
+ 		drm_dp_port_teardown_pdt(port, port->pdt);
+ 
+-		if (!port->input && port->vcpi.vcpi > 0)
+-			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+-		kfree(port);
++		if (!port->input && port->vcpi.vcpi > 0) {
++			if (mgr->mst_state) {
++				drm_dp_mst_reset_vcpi_slots(mgr, port);
++				drm_dp_update_payload_part1(mgr);
++				drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
++			}
++		}
++
++		kref_put(&port->kref, drm_dp_free_mst_port);
+ 		send_hotplug = true;
+ 	}
+ 	if (send_hotplug)
+diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
+index c707fa6fca85..e3bdc8b1c32c 100644
+--- a/drivers/gpu/drm/gma500/gem.c
++++ b/drivers/gpu/drm/gma500/gem.c
+@@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
+ 		return ret;
+ 	}
+ 	/* We have the initial and handle reference but need only one now */
+-	drm_gem_object_unreference(&r->gem);
++	drm_gem_object_unreference_unlocked(&r->gem);
+ 	*handlep = handle;
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index bb292143997e..adf74f4366bb 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -892,8 +892,6 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
+ 			else
+ 				args.v1.ucLaneNum = 4;
+ 
+-			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+-				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ 			switch (radeon_encoder->encoder_id) {
+ 			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ 				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+@@ -910,6 +908,10 @@ atombios_dig_encoder_setup2(struct drm_encoder *encoder, int action, int panel_m
+ 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ 			else
+ 				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
++
++			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
++				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
++
+ 			break;
+ 		case 2:
+ 		case 3:
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index 8bc7d0bbd3c8..1523cf94bcdc 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -62,6 +62,10 @@ bool radeon_has_atpx(void) {
+ 	return radeon_atpx_priv.atpx_detected;
+ }
+ 
++bool radeon_has_atpx_dgpu_power_cntl(void) {
++	return radeon_atpx_priv.atpx.functions.power_cntl;
++}
++
+ /**
+  * radeon_atpx_call - call an ATPX method
+  *
+@@ -141,10 +145,6 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
+  */
+ static int radeon_atpx_validate(struct radeon_atpx *atpx)
+ {
+-	/* make sure required functions are enabled */
+-	/* dGPU power control is required */
+-	atpx->functions.power_cntl = true;
+-
+ 	if (atpx->functions.px_params) {
+ 		union acpi_object *info;
+ 		struct atpx_px_params output;
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index ccab94ed9d94..9cbdd8aac28f 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -103,6 +103,12 @@ static const char radeon_family_name[][16] = {
+ 	"LAST",
+ };
+ 
++#if defined(CONFIG_VGA_SWITCHEROO)
++bool radeon_has_atpx_dgpu_power_cntl(void);
++#else
++static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
++#endif
++
+ #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
+ #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+ 
+@@ -1427,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
+ 	 * ignore it */
+ 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+ 
+-	if (rdev->flags & RADEON_IS_PX)
++	if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
+ 		runtime = true;
+ 	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
+ 	if (runtime)
+@@ -1734,7 +1740,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ 	}
+ 
+ 	drm_kms_helper_poll_enable(dev);
+-	drm_helper_hpd_irq_event(dev);
+ 
+ 	/* set the power state here in case we are a PX system or headless */
+ 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
+index 634793ea8418..3fa9f67ba8df 100644
+--- a/drivers/gpu/drm/radeon/radeon_fb.c
++++ b/drivers/gpu/drm/radeon/radeon_fb.c
+@@ -333,7 +333,8 @@ out_unref:
+ 
+ void radeon_fb_output_poll_changed(struct radeon_device *rdev)
+ {
+-	drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
++	if (rdev->mode_info.rfbdev)
++		drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+ }
+ 
+ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
+@@ -373,6 +374,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
+ 	int bpp_sel = 32;
+ 	int ret;
+ 
++	/* don't enable fbdev if no connectors */
++	if (list_empty(&rdev->ddev->mode_config.connector_list))
++		return 0;
++
+ 	/* select 8 bpp console on RN50 or 16MB cards */
+ 	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+ 		bpp_sel = 8;
+@@ -425,11 +430,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
+ 
+ void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
+ {
+-	fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
++	if (rdev->mode_info.rfbdev)
++		fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+ }
+ 
+ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+ {
++	if (!rdev->mode_info.rfbdev)
++		return false;
++
+ 	if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
+ 		return true;
+ 	return false;
+@@ -437,10 +446,12 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
+ 
+ void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
+ {
+-	drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
++	if (rdev->mode_info.rfbdev)
++		drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ }
+ 
+ void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
+ {
+-	drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
++	if (rdev->mode_info.rfbdev)
++		drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ }
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 9ce9dfeb1258..bc23db196930 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2584,9 +2584,10 @@ int hid_add_device(struct hid_device *hdev)
+ 	/*
+ 	 * Scan generic devices for group information
+ 	 */
+-	if (hid_ignore_special_drivers ||
+-	    (!hdev->group &&
+-	     !hid_match_id(hdev, hid_have_special_driver))) {
++	if (hid_ignore_special_drivers) {
++		hdev->group = HID_GROUP_GENERIC;
++	} else if (!hdev->group &&
++		   !hid_match_id(hdev, hid_have_special_driver)) {
+ 		ret = hid_scan_report(hdev);
+ 		if (ret)
+ 			hid_warn(hdev, "bad device descriptor (%d)\n", ret);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 6a9b05b328a9..1180664d1206 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -322,8 +322,19 @@ static void mt_feature_mapping(struct hid_device *hdev,
+ 			break;
+ 		}
+ 
+-		td->inputmode = field->report->id;
+-		td->inputmode_index = usage->usage_index;
++		if (td->inputmode < 0) {
++			td->inputmode = field->report->id;
++			td->inputmode_index = usage->usage_index;
++		} else {
++			/*
++			 * Some elan panels wrongly declare 2 input mode
++			 * features, and silently ignore when we set the
++			 * value in the second field. Skip the second feature
++			 * and hope for the best.
++			 */
++			dev_info(&hdev->dev,
++				 "Ignoring the extra HID_DG_INPUTMODE\n");
++		}
+ 
+ 		break;
+ 	case HID_DG_CONTACTMAX:
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index 92d6cdf02460..c4c9d9523694 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -280,17 +280,21 @@ static int i2c_hid_set_or_send_report(struct i2c_client *client, u8 reportType,
+ 	u16 dataRegister = le16_to_cpu(ihid->hdesc.wDataRegister);
+ 	u16 outputRegister = le16_to_cpu(ihid->hdesc.wOutputRegister);
+ 	u16 maxOutputLength = le16_to_cpu(ihid->hdesc.wMaxOutputLength);
++	u16 size;
++	int args_len;
++	int index = 0;
++
++	i2c_hid_dbg(ihid, "%s\n", __func__);
++
++	if (data_len > ihid->bufsize)
++		return -EINVAL;
+ 
+-	/* hid_hw_* already checked that data_len < HID_MAX_BUFFER_SIZE */
+-	u16 size =	2			/* size */ +
++	size =		2			/* size */ +
+ 			(reportID ? 1 : 0)	/* reportID */ +
+ 			data_len		/* buf */;
+-	int args_len =	(reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
++	args_len =	(reportID >= 0x0F ? 1 : 0) /* optional third byte */ +
+ 			2			/* dataRegister */ +
+ 			size			/* args */;
+-	int index = 0;
+-
+-	i2c_hid_dbg(ihid, "%s\n", __func__);
+ 
+ 	if (!use_data && maxOutputLength == 0)
+ 		return -ENOSYS;
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index eab5bd6a2442..1764a168888c 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -477,8 +477,6 @@ static void hid_ctrl(struct urb *urb)
+ 	struct usbhid_device *usbhid = hid->driver_data;
+ 	int unplug = 0, status = urb->status;
+ 
+-	spin_lock(&usbhid->lock);
+-
+ 	switch (status) {
+ 	case 0:			/* success */
+ 		if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN)
+@@ -498,6 +496,8 @@ static void hid_ctrl(struct urb *urb)
+ 		hid_warn(urb->dev, "ctrl urb status %d received\n", status);
+ 	}
+ 
++	spin_lock(&usbhid->lock);
++
+ 	if (unplug) {
+ 		usbhid->ctrltail = usbhid->ctrlhead;
+ 	} else {
+diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
+index 43d14588448d..b4dde8315210 100644
+--- a/drivers/iio/dac/mcp4725.c
++++ b/drivers/iio/dac/mcp4725.c
+@@ -300,6 +300,7 @@ static int mcp4725_probe(struct i2c_client *client,
+ 	data->client = client;
+ 
+ 	indio_dev->dev.parent = &client->dev;
++	indio_dev->name = id->name;
+ 	indio_dev->info = &mcp4725_info;
+ 	indio_dev->channels = &mcp4725_channel;
+ 	indio_dev->num_channels = 1;
+diff --git a/drivers/iio/imu/adis_buffer.c b/drivers/iio/imu/adis_buffer.c
+index cb32b593f1c5..36607d52fee0 100644
+--- a/drivers/iio/imu/adis_buffer.c
++++ b/drivers/iio/imu/adis_buffer.c
+@@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
+ 		return -ENOMEM;
+ 
+ 	rx = adis->buffer;
+-	tx = rx + indio_dev->scan_bytes;
++	tx = rx + scan_count;
+ 
+ 	spi_message_init(&adis->msg);
+ 
+diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
+index f5ecd6e19f5d..a0d7deeac62f 100644
+--- a/drivers/iio/pressure/mpl115.c
++++ b/drivers/iio/pressure/mpl115.c
+@@ -117,7 +117,7 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
+ 		*val = ret >> 6;
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_OFFSET:
+-		*val = 605;
++		*val = -605;
+ 		*val2 = 750000;
+ 		return IIO_VAL_INT_PLUS_MICRO;
+ 	case IIO_CHAN_INFO_SCALE:
+diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+index cb78b1e9bcd9..f504ba73e5dc 100644
+--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
+@@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
+ 	error = l2t_send(tdev, skb, l2e);
+ 	if (error < 0)
+ 		kfree_skb(skb);
+-	return error;
++	return error < 0 ? error : 0;
+ }
+ 
+ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
+@@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
+ 	error = cxgb3_ofld_send(tdev, skb);
+ 	if (error < 0)
+ 		kfree_skb(skb);
+-	return error;
++	return error < 0 ? error : 0;
+ }
+ 
+ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 353e2ab090ee..b52a704c3449 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -865,7 +865,7 @@ isert_put_conn(struct isert_conn *isert_conn)
+  * @isert_conn: isert connection struct
+  *
+  * Notes:
+- * In case the connection state is FULL_FEATURE, move state
++ * In case the connection state is BOUND, move state
+  * to TEMINATING and start teardown sequence (rdma_disconnect).
+  * In case the connection state is UP, complete flush as well.
+  *
+@@ -881,6 +881,7 @@ isert_conn_terminate(struct isert_conn *isert_conn)
+ 	case ISER_CONN_TERMINATING:
+ 		break;
+ 	case ISER_CONN_UP:
++	case ISER_CONN_BOUND:
+ 	case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+ 		isert_info("Terminating conn %p state %d\n",
+ 			   isert_conn, isert_conn->state);
+@@ -927,14 +928,9 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
+ 			   enum rdma_cm_event_type event)
+ {
+ 	struct isert_np *isert_np = cma_id->context;
+-	struct isert_conn *isert_conn;
++	struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ 	bool terminating = false;
+ 
+-	if (isert_np->np_cm_id == cma_id)
+-		return isert_np_cma_handler(cma_id->context, event);
+-
+-	isert_conn = cma_id->qp->qp_context;
+-
+ 	mutex_lock(&isert_conn->mutex);
+ 	terminating = (isert_conn->state == ISER_CONN_TERMINATING);
+ 	isert_conn_terminate(isert_conn);
+@@ -972,11 +968,15 @@ isert_connect_error(struct rdma_cm_id *cma_id)
+ static int
+ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+ {
++	struct isert_np *isert_np = cma_id->context;
+ 	int ret = 0;
+ 
+ 	isert_info("event %d status %d id %p np %p\n", event->event,
+ 		   event->status, cma_id, cma_id->context);
+ 
++	if (isert_np->np_cm_id == cma_id)
++		return isert_np_cma_handler(cma_id->context, event->event);
++
+ 	switch (event->event) {
+ 	case RDMA_CM_EVENT_CONNECT_REQUEST:
+ 		ret = isert_connect_request(cma_id, event);
+@@ -2059,7 +2059,8 @@ is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
+ 	void *start = isert_conn->rx_descs;
+ 	int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
+ 
+-	if (wr_id >= start && wr_id < start + len)
++	if ((wr_id >= start && wr_id < start + len) ||
++	    (wr_id == isert_conn->login_req_buf))
+ 		return false;
+ 
+ 	return true;
+@@ -2085,7 +2086,8 @@ isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
+ 			isert_completion_put(desc, isert_cmd, ib_dev, true);
+ 	} else {
+ 		isert_conn->post_recv_buf_count--;
+-		if (!isert_conn->post_recv_buf_count)
++		if (!isert_conn->post_recv_buf_count &&
++		    isert_conn->state >= ISER_CONN_BOUND)
+ 			iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+ 	}
+ }
+@@ -3268,6 +3270,7 @@ accept_wait:
+ 
+ 	conn->context = isert_conn;
+ 	isert_conn->conn = conn;
++	isert_conn->state = ISER_CONN_BOUND;
+ 
+ 	isert_set_conn_info(np, conn, isert_conn);
+ 
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
+index 9ec23a786c02..621c222f3235 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.h
++++ b/drivers/infiniband/ulp/isert/ib_isert.h
+@@ -50,6 +50,7 @@ enum iser_ib_op_code {
+ enum iser_conn_state {
+ 	ISER_CONN_INIT,
+ 	ISER_CONN_UP,
++	ISER_CONN_BOUND,
+ 	ISER_CONN_FULL_FEATURE,
+ 	ISER_CONN_TERMINATING,
+ 	ISER_CONN_DOWN,
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 6fbc7bc824d2..27e5b0090e40 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1742,47 +1742,6 @@ send_sense:
+ 	return -1;
+ }
+ 
+-/**
+- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
+- * @ch: RDMA channel of the task management request.
+- * @fn: Task management function to perform.
+- * @req_tag: Tag of the SRP task management request.
+- * @mgmt_ioctx: I/O context of the task management request.
+- *
+- * Returns zero if the target core will process the task management
+- * request asynchronously.
+- *
+- * Note: It is assumed that the initiator serializes tag-based task management
+- * requests.
+- */
+-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
+-{
+-	struct srpt_device *sdev;
+-	struct srpt_rdma_ch *ch;
+-	struct srpt_send_ioctx *target;
+-	int ret, i;
+-
+-	ret = -EINVAL;
+-	ch = ioctx->ch;
+-	BUG_ON(!ch);
+-	BUG_ON(!ch->sport);
+-	sdev = ch->sport->sdev;
+-	BUG_ON(!sdev);
+-	spin_lock_irq(&sdev->spinlock);
+-	for (i = 0; i < ch->rq_size; ++i) {
+-		target = ch->ioctx_ring[i];
+-		if (target->cmd.se_lun == ioctx->cmd.se_lun &&
+-		    target->tag == tag &&
+-		    srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
+-			ret = 0;
+-			/* now let the target core abort &target->cmd; */
+-			break;
+-		}
+-	}
+-	spin_unlock_irq(&sdev->spinlock);
+-	return ret;
+-}
+-
+ static int srp_tmr_to_tcm(int fn)
+ {
+ 	switch (fn) {
+@@ -1817,7 +1776,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ 	struct se_cmd *cmd;
+ 	struct se_session *sess = ch->sess;
+ 	uint64_t unpacked_lun;
+-	uint32_t tag = 0;
+ 	int tcm_tmr;
+ 	int rc;
+ 
+@@ -1833,25 +1791,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ 	srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
+ 	send_ioctx->tag = srp_tsk->tag;
+ 	tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
+-	if (tcm_tmr < 0) {
+-		send_ioctx->cmd.se_tmr_req->response =
+-			TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+-		goto fail;
+-	}
+ 	unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
+ 				       sizeof(srp_tsk->lun));
+-
+-	if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
+-		rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+-		if (rc < 0) {
+-			send_ioctx->cmd.se_tmr_req->response =
+-					TMR_TASK_DOES_NOT_EXIST;
+-			goto fail;
+-		}
+-		tag = srp_tsk->task_tag;
+-	}
+ 	rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
+-				srp_tsk, tcm_tmr, GFP_KERNEL, tag,
++				srp_tsk, tcm_tmr, GFP_KERNEL, srp_tsk->task_tag,
+ 				TARGET_SCF_ACK_KREF);
+ 	if (rc != 0) {
+ 		send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
+index f63341f20b91..e8c6a4842e91 100644
+--- a/drivers/input/misc/ati_remote2.c
++++ b/drivers/input/misc/ati_remote2.c
+@@ -817,26 +817,49 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	ar2->udev = udev;
+ 
++	/* Sanity check, first interface must have an endpoint */
++	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
++		dev_err(&interface->dev,
++			"%s(): interface 0 must have an endpoint\n", __func__);
++		r = -ENODEV;
++		goto fail1;
++	}
+ 	ar2->intf[0] = interface;
+ 	ar2->ep[0] = &alt->endpoint[0].desc;
+ 
++	/* Sanity check, the device must have two interfaces */
+ 	ar2->intf[1] = usb_ifnum_to_if(udev, 1);
++	if ((udev->actconfig->desc.bNumInterfaces < 2) || !ar2->intf[1]) {
++		dev_err(&interface->dev, "%s(): need 2 interfaces, found %d\n",
++			__func__, udev->actconfig->desc.bNumInterfaces);
++		r = -ENODEV;
++		goto fail1;
++	}
++
+ 	r = usb_driver_claim_interface(&ati_remote2_driver, ar2->intf[1], ar2);
+ 	if (r)
+ 		goto fail1;
++
++	/* Sanity check, second interface must have an endpoint */
+ 	alt = ar2->intf[1]->cur_altsetting;
++	if (alt->desc.bNumEndpoints < 1 || !alt->endpoint) {
++		dev_err(&interface->dev,
++			"%s(): interface 1 must have an endpoint\n", __func__);
++		r = -ENODEV;
++		goto fail2;
++	}
+ 	ar2->ep[1] = &alt->endpoint[0].desc;
+ 
+ 	r = ati_remote2_urb_init(ar2);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	ar2->channel_mask = channel_mask;
+ 	ar2->mode_mask = mode_mask;
+ 
+ 	r = ati_remote2_setup(ar2, ar2->channel_mask);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	usb_make_path(udev, ar2->phys, sizeof(ar2->phys));
+ 	strlcat(ar2->phys, "/input0", sizeof(ar2->phys));
+@@ -845,11 +868,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group);
+ 	if (r)
+-		goto fail2;
++		goto fail3;
+ 
+ 	r = ati_remote2_input_init(ar2);
+ 	if (r)
+-		goto fail3;
++		goto fail4;
+ 
+ 	usb_set_intfdata(interface, ar2);
+ 
+@@ -857,10 +880,11 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d
+ 
+ 	return 0;
+ 
+- fail3:
++ fail4:
+ 	sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group);
+- fail2:
++ fail3:
+ 	ati_remote2_urb_cleanup(ar2);
++ fail2:
+ 	usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]);
+  fail1:
+ 	kfree(ar2);
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index ac1fa5f44580..9c0ea36913b4 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -1663,6 +1663,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
+ 
+ 	pcu->ctrl_intf = usb_ifnum_to_if(pcu->udev,
+ 					 union_desc->bMasterInterface0);
++	if (!pcu->ctrl_intf)
++		return -EINVAL;
+ 
+ 	alt = pcu->ctrl_intf->cur_altsetting;
+ 	pcu->ep_ctrl = &alt->endpoint[0].desc;
+@@ -1670,6 +1672,8 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc
+ 
+ 	pcu->data_intf = usb_ifnum_to_if(pcu->udev,
+ 					 union_desc->bSlaveInterface0);
++	if (!pcu->data_intf)
++		return -EINVAL;
+ 
+ 	alt = pcu->data_intf->cur_altsetting;
+ 	if (alt->desc.bNumEndpoints != 2) {
+diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
+index 63b539d3daba..84909a12ff36 100644
+--- a/drivers/input/misc/powermate.c
++++ b/drivers/input/misc/powermate.c
+@@ -307,6 +307,9 @@ static int powermate_probe(struct usb_interface *intf, const struct usb_device_i
+ 	int error = -ENOMEM;
+ 
+ 	interface = intf->cur_altsetting;
++	if (interface->desc.bNumEndpoints < 1)
++		return -EINVAL;
++
+ 	endpoint = &interface->endpoint[0].desc;
+ 	if (!usb_endpoint_is_int_in(endpoint))
+ 		return -EIO;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 3a32caf06bf1..8cfeec06d8d9 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -862,8 +862,9 @@ static void synaptics_report_ext_buttons(struct psmouse *psmouse,
+ 	if (!SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap))
+ 		return;
+ 
+-	/* Bug in FW 8.1, buttons are reported only when ExtBit is 1 */
+-	if (SYN_ID_FULL(priv->identity) == 0x801 &&
++	/* Bug in FW 8.1 & 8.2, buttons are reported only when ExtBit is 1 */
++	if ((SYN_ID_FULL(priv->identity) == 0x801 ||
++	     SYN_ID_FULL(priv->identity) == 0x802) &&
+ 	    !((psmouse->packet[0] ^ psmouse->packet[3]) & 0x02))
+ 		return;
+ 
+diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
+index a569c6dbd1d1..5ba876e9b805 100644
+--- a/drivers/irqchip/irq-omap-intc.c
++++ b/drivers/irqchip/irq-omap-intc.c
+@@ -48,6 +48,7 @@
+ #define INTC_ILR0		0x0100
+ 
+ #define ACTIVEIRQ_MASK		0x7f	/* omap2/3 active interrupt bits */
++#define SPURIOUSIRQ_MASK	(0x1ffffff << 7)
+ #define INTCPS_NR_ILR_REGS	128
+ #define INTCPS_NR_MIR_REGS	4
+ 
+@@ -331,37 +332,36 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
+ static asmlinkage void __exception_irq_entry
+ omap_intc_handle_irq(struct pt_regs *regs)
+ {
+-	u32 irqnr = 0;
+-	int handled_irq = 0;
+-	int i;
+-
+-	do {
+-		for (i = 0; i < omap_nr_pending; i++) {
+-			irqnr = intc_readl(INTC_PENDING_IRQ0 + (0x20 * i));
+-			if (irqnr)
+-				goto out;
+-		}
+-
+-out:
+-		if (!irqnr)
+-			break;
++	extern unsigned long irq_err_count;
++	u32 irqnr;
+ 
+-		irqnr = intc_readl(INTC_SIR);
+-		irqnr &= ACTIVEIRQ_MASK;
+-
+-		if (irqnr) {
+-			handle_domain_irq(domain, irqnr, regs);
+-			handled_irq = 1;
+-		}
+-	} while (irqnr);
++	irqnr = intc_readl(INTC_SIR);
+ 
+ 	/*
+-	 * If an irq is masked or deasserted while active, we will
+-	 * keep ending up here with no irq handled. So remove it from
+-	 * the INTC with an ack.
++	 * A spurious IRQ can result if interrupt that triggered the
++	 * sorting is no longer active during the sorting (10 INTC
++	 * functional clock cycles after interrupt assertion). Or a
++	 * change in interrupt mask affected the result during sorting
++	 * time. There is no special handling required except ignoring
++	 * the SIR register value just read and retrying.
++	 * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
++	 *
++	 * Many a times, a spurious interrupt situation has been fixed
++	 * by adding a flush for the posted write acking the IRQ in
++	 * the device driver. Typically, this is going be the device
++	 * driver whose interrupt was handled just before the spurious
++	 * IRQ occurred. Pay attention to those device drivers if you
++	 * run into hitting the spurious IRQ condition below.
+ 	 */
+-	if (!handled_irq)
++	if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
++		pr_err_once("%s: spurious irq!\n", __func__);
++		irq_err_count++;
+ 		omap_ack_irq(NULL);
++		return;
++	}
++
++	irqnr &= ACTIVEIRQ_MASK;
++	handle_domain_irq(domain, irqnr, regs);
+ }
+ 
+ void __init omap3_init_irq(void)
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 42522c8f13c6..2a102834c2ee 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1046,8 +1046,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ 	 */
+ 	atomic_set(&dc->count, 1);
+ 
+-	if (bch_cached_dev_writeback_start(dc))
++	/* Block writeback thread, but spawn it */
++	down_write(&dc->writeback_lock);
++	if (bch_cached_dev_writeback_start(dc)) {
++		up_write(&dc->writeback_lock);
+ 		return -ENOMEM;
++	}
+ 
+ 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+ 		bch_sectors_dirty_init(dc);
+@@ -1059,6 +1063,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ 	bch_cached_dev_run(dc);
+ 	bcache_device_link(&dc->disk, c, "bdev");
+ 
++	/* Allow the writeback thread to proceed */
++	up_write(&dc->writeback_lock);
++
+ 	pr_info("Caching %s as %s on set %pU",
+ 		bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
+ 		dc->disk.c->sb.set_uuid);
+@@ -1397,6 +1404,9 @@ static void cache_set_flush(struct closure *cl)
+ 	struct btree *b;
+ 	unsigned i;
+ 
++	if (!c)
++		closure_return(cl);
++
+ 	bch_cache_accounting_destroy(&c->accounting);
+ 
+ 	kobject_put(&c->internal);
+@@ -1862,11 +1872,12 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
+ 	return 0;
+ }
+ 
+-static void register_cache(struct cache_sb *sb, struct page *sb_page,
++static int register_cache(struct cache_sb *sb, struct page *sb_page,
+ 				struct block_device *bdev, struct cache *ca)
+ {
+ 	char name[BDEVNAME_SIZE];
+-	const char *err = "cannot allocate memory";
++	const char *err = NULL;
++	int ret = 0;
+ 
+ 	memcpy(&ca->sb, sb, sizeof(struct cache_sb));
+ 	ca->bdev = bdev;
+@@ -1881,27 +1892,35 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
+ 	if (blk_queue_discard(bdev_get_queue(ca->bdev)))
+ 		ca->discard = CACHE_DISCARD(&ca->sb);
+ 
+-	if (cache_alloc(sb, ca) != 0)
++	ret = cache_alloc(sb, ca);
++	if (ret != 0)
+ 		goto err;
+ 
+-	err = "error creating kobject";
+-	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache"))
+-		goto err;
++	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
++		err = "error calling kobject_add";
++		ret = -ENOMEM;
++		goto out;
++	}
+ 
+ 	mutex_lock(&bch_register_lock);
+ 	err = register_cache_set(ca);
+ 	mutex_unlock(&bch_register_lock);
+ 
+-	if (err)
+-		goto err;
++	if (err) {
++		ret = -ENODEV;
++		goto out;
++	}
+ 
+ 	pr_info("registered cache device %s", bdevname(bdev, name));
++
+ out:
+ 	kobject_put(&ca->kobj);
+-	return;
++
+ err:
+-	pr_notice("error opening %s: %s", bdevname(bdev, name), err);
+-	goto out;
++	if (err)
++		pr_notice("error opening %s: %s", bdevname(bdev, name), err);
++
++	return ret;
+ }
+ 
+ /* Global interfaces/init */
+@@ -1999,7 +2018,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ 		if (!ca)
+ 			goto err_close;
+ 
+-		register_cache(sb, sb_page, bdev, ca);
++		if (register_cache(sb, sb_page, bdev, ca) != 0)
++			goto err_close;
+ 	}
+ out:
+ 	if (sb_page)
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index cde1d6749017..9b4f73e55d7c 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1803,5 +1803,8 @@ bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
+ 
+ void dm_pool_issue_prefetches(struct dm_pool_metadata *pmd)
+ {
+-	dm_tm_issue_prefetches(pmd->tm);
++	down_read(&pmd->root_lock);
++	if (!pmd->fail_io)
++		dm_tm_issue_prefetches(pmd->tm);
++	up_read(&pmd->root_lock);
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 62610aafaac7..1f37781f7765 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1065,12 +1065,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
+ 	 * back into ->request_fn() could deadlock attempting to grab the
+ 	 * queue lock again.
+ 	 */
+-	if (run_queue) {
+-		if (md->queue->mq_ops)
+-			blk_mq_run_hw_queues(md->queue, true);
+-		else
+-			blk_run_queue_async(md->queue);
+-	}
++	if (!md->queue->mq_ops && run_queue)
++		blk_run_queue_async(md->queue);
+ 
+ 	/*
+ 	 * dm_put() must be at the end of this function. See the comment above
+@@ -1296,7 +1292,10 @@ static void dm_complete_request(struct request *rq, int error)
+ 	struct dm_rq_target_io *tio = tio_from_request(rq);
+ 
+ 	tio->error = error;
+-	blk_complete_request(rq);
++	if (!rq->q->mq_ops)
++		blk_complete_request(rq);
++	else
++		blk_mq_complete_request(rq);
+ }
+ 
+ /*
+diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
+index ac3ede2bd00e..ac37ef18201c 100644
+--- a/drivers/md/multipath.c
++++ b/drivers/md/multipath.c
+@@ -129,7 +129,9 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
+ 	}
+ 	multipath = conf->multipaths + mp_bh->path;
+ 
+-	mp_bh->bio = *bio;
++	bio_init(&mp_bh->bio);
++	__bio_clone_fast(&mp_bh->bio, bio);
++
+ 	mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
+ 	mp_bh->bio.bi_bdev = multipath->rdev->bdev;
+ 	mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 0d767e31f455..ef0a99a3a779 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2080,6 +2080,14 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
+ 	unsigned long cpu;
+ 	int err = 0;
+ 
++	/*
++	 * Never shrink. And mddev_suspend() could deadlock if this is called
++	 * from raid5d. In that case, scribble_disks and scribble_sectors
++	 * should equal to new_disks and new_sectors
++	 */
++	if (conf->scribble_disks >= new_disks &&
++	    conf->scribble_sectors >= new_sectors)
++		return 0;
+ 	mddev_suspend(conf->mddev);
+ 	get_online_cpus();
+ 	for_each_present_cpu(cpu) {
+@@ -2101,6 +2109,10 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
+ 	}
+ 	put_online_cpus();
+ 	mddev_resume(conf->mddev);
++	if (!err) {
++		conf->scribble_disks = new_disks;
++		conf->scribble_sectors = new_sectors;
++	}
+ 	return err;
+ }
+ 
+@@ -4220,7 +4232,6 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 		WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
+ 					  (1 << STRIPE_SYNCING) |
+ 					  (1 << STRIPE_REPLACED) |
+-					  (1 << STRIPE_PREREAD_ACTIVE) |
+ 					  (1 << STRIPE_DELAYED) |
+ 					  (1 << STRIPE_BIT_DELAY) |
+ 					  (1 << STRIPE_FULL_WRITE) |
+@@ -4235,6 +4246,7 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 					      (1 << STRIPE_REPLACED)));
+ 
+ 		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
++					    (1 << STRIPE_PREREAD_ACTIVE) |
+ 					    (1 << STRIPE_DEGRADED)),
+ 			      head_sh->state & (1 << STRIPE_INSYNC));
+ 
+@@ -6366,6 +6378,12 @@ static int raid5_alloc_percpu(struct r5conf *conf)
+ 	}
+ 	put_online_cpus();
+ 
++	if (!err) {
++		conf->scribble_disks = max(conf->raid_disks,
++			conf->previous_raid_disks);
++		conf->scribble_sectors = max(conf->chunk_sectors,
++			conf->prev_chunk_sectors);
++	}
+ 	return err;
+ }
+ 
+@@ -6942,8 +6960,8 @@ static int run(struct mddev *mddev)
+ 		}
+ 
+ 		if (discard_supported &&
+-		   mddev->queue->limits.max_discard_sectors >= stripe &&
+-		   mddev->queue->limits.discard_granularity >= stripe)
++		    mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
++		    mddev->queue->limits.discard_granularity >= stripe)
+ 			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ 						mddev->queue);
+ 		else
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index 03472fbbd882..d31ed93bb8a9 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -501,6 +501,8 @@ struct r5conf {
+ 					      * conversions
+ 					      */
+ 	} __percpu *percpu;
++	int scribble_disks;
++	int scribble_sectors;
+ #ifdef CONFIG_HOTPLUG_CPU
+ 	struct notifier_block	cpu_notify;
+ #endif
+diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
+index 12d93203d405..57bb74299643 100644
+--- a/drivers/media/i2c/adv7511.c
++++ b/drivers/media/i2c/adv7511.c
+@@ -1048,12 +1048,23 @@ static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, in
+ 	}
+ }
+ 
++static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
++{
++	struct adv7511_state *state = get_adv7511_state(sd);
++	struct adv7511_edid_detect ed;
++
++	/* We failed to read the EDID, so send an event for this. */
++	ed.present = false;
++	ed.segment = adv7511_rd(sd, 0xc4);
++	v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
++	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
++}
++
+ static void adv7511_edid_handler(struct work_struct *work)
+ {
+ 	struct delayed_work *dwork = to_delayed_work(work);
+ 	struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
+ 	struct v4l2_subdev *sd = &state->sd;
+-	struct adv7511_edid_detect ed;
+ 
+ 	v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+ 
+@@ -1078,9 +1089,7 @@ static void adv7511_edid_handler(struct work_struct *work)
+ 	}
+ 
+ 	/* We failed to read the EDID, so send an event for this. */
+-	ed.present = false;
+-	ed.segment = adv7511_rd(sd, 0xc4);
+-	v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
++	adv7511_notify_no_edid(sd);
+ 	v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
+ }
+ 
+@@ -1151,7 +1160,6 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+ 	/* update read only ctrls */
+ 	v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
+ 	v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
+-	v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+ 
+ 	if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
+ 		v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
+@@ -1181,6 +1189,7 @@ static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+ 		}
+ 		adv7511_s_power(sd, false);
+ 		memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
++		adv7511_notify_no_edid(sd);
+ 	}
+ }
+ 
+@@ -1257,6 +1266,7 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+ 		}
+ 		/* one more segment read ok */
+ 		state->edid.segments = segment + 1;
++		v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
+ 		if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
+ 			/* Request next EDID segment */
+ 			v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
+@@ -1276,7 +1286,6 @@ static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+ 		ed.present = true;
+ 		ed.segment = 0;
+ 		state->edid_detect_counter++;
+-		v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+ 		v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ 		return ed.present;
+ 	}
+diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
+index bc12060e0882..88a9d3a10131 100644
+--- a/drivers/media/pci/bt8xx/bttv-driver.c
++++ b/drivers/media/pci/bt8xx/bttv-driver.c
+@@ -2334,6 +2334,19 @@ static int bttv_g_fmt_vid_overlay(struct file *file, void *priv,
+ 	return 0;
+ }
+ 
++static void bttv_get_width_mask_vid_cap(const struct bttv_format *fmt,
++					unsigned int *width_mask,
++					unsigned int *width_bias)
++{
++	if (fmt->flags & FORMAT_FLAGS_PLANAR) {
++		*width_mask = ~15; /* width must be a multiple of 16 pixels */
++		*width_bias = 8;   /* nearest */
++	} else {
++		*width_mask = ~3; /* width must be a multiple of 4 pixels */
++		*width_bias = 2;  /* nearest */
++	}
++}
++
+ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 						struct v4l2_format *f)
+ {
+@@ -2343,6 +2356,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 	enum v4l2_field field;
+ 	__s32 width, height;
+ 	__s32 height2;
++	unsigned int width_mask, width_bias;
+ 	int rc;
+ 
+ 	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+@@ -2375,9 +2389,9 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv,
+ 	width = f->fmt.pix.width;
+ 	height = f->fmt.pix.height;
+ 
++	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
+ 	rc = limit_scaled_size_lock(fh, &width, &height, field,
+-			       /* width_mask: 4 pixels */ ~3,
+-			       /* width_bias: nearest */ 2,
++			       width_mask, width_bias,
+ 			       /* adjust_size */ 1,
+ 			       /* adjust_crop */ 0);
+ 	if (0 != rc)
+@@ -2410,6 +2424,7 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 	struct bttv_fh *fh = priv;
+ 	struct bttv *btv = fh->btv;
+ 	__s32 width, height;
++	unsigned int width_mask, width_bias;
+ 	enum v4l2_field field;
+ 
+ 	retval = bttv_switch_type(fh, f->type);
+@@ -2424,9 +2439,10 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 	height = f->fmt.pix.height;
+ 	field = f->fmt.pix.field;
+ 
++	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
++	bttv_get_width_mask_vid_cap(fmt, &width_mask, &width_bias);
+ 	retval = limit_scaled_size_lock(fh, &width, &height, f->fmt.pix.field,
+-			       /* width_mask: 4 pixels */ ~3,
+-			       /* width_bias: nearest */ 2,
++			       width_mask, width_bias,
+ 			       /* adjust_size */ 1,
+ 			       /* adjust_crop */ 1);
+ 	if (0 != retval)
+@@ -2434,8 +2450,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv,
+ 
+ 	f->fmt.pix.field = field;
+ 
+-	fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+-
+ 	/* update our state informations */
+ 	fh->fmt              = fmt;
+ 	fh->cap.field        = f->fmt.pix.field;
+diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
+index 99d09a7566d3..175d3c1f1e6d 100644
+--- a/drivers/media/pci/saa7134/saa7134-video.c
++++ b/drivers/media/pci/saa7134/saa7134-video.c
+@@ -1211,10 +1211,13 @@ static int saa7134_g_fmt_vid_cap(struct file *file, void *priv,
+ 	f->fmt.pix.height       = dev->height;
+ 	f->fmt.pix.field        = dev->field;
+ 	f->fmt.pix.pixelformat  = dev->fmt->fourcc;
+-	f->fmt.pix.bytesperline =
+-		(f->fmt.pix.width * dev->fmt->depth) >> 3;
++	if (dev->fmt->planar)
++		f->fmt.pix.bytesperline = f->fmt.pix.width;
++	else
++		f->fmt.pix.bytesperline =
++			(f->fmt.pix.width * dev->fmt->depth) / 8;
+ 	f->fmt.pix.sizeimage =
+-		f->fmt.pix.height * f->fmt.pix.bytesperline;
++		(f->fmt.pix.height * f->fmt.pix.width * dev->fmt->depth) / 8;
+ 	f->fmt.pix.colorspace   = V4L2_COLORSPACE_SMPTE170M;
+ 	return 0;
+ }
+@@ -1290,10 +1293,13 @@ static int saa7134_try_fmt_vid_cap(struct file *file, void *priv,
+ 	if (f->fmt.pix.height > maxh)
+ 		f->fmt.pix.height = maxh;
+ 	f->fmt.pix.width &= ~0x03;
+-	f->fmt.pix.bytesperline =
+-		(f->fmt.pix.width * fmt->depth) >> 3;
++	if (fmt->planar)
++		f->fmt.pix.bytesperline = f->fmt.pix.width;
++	else
++		f->fmt.pix.bytesperline =
++			(f->fmt.pix.width * fmt->depth) / 8;
+ 	f->fmt.pix.sizeimage =
+-		f->fmt.pix.height * f->fmt.pix.bytesperline;
++		(f->fmt.pix.height * f->fmt.pix.width * fmt->depth) / 8;
+ 	f->fmt.pix.colorspace   = V4L2_COLORSPACE_SMPTE170M;
+ 
+ 	return 0;
+diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
+index c5dbba5b5bc9..6684e2ee3db1 100644
+--- a/drivers/media/tuners/si2157.c
++++ b/drivers/media/tuners/si2157.c
+@@ -167,6 +167,7 @@ static int si2157_init(struct dvb_frontend *fe)
+ 		len = fw->data[fw->size - remaining];
+ 		if (len > SI2157_ARGLEN) {
+ 			dev_err(&client->dev, "Bad firmware length\n");
++			ret = -EINVAL;
+ 			goto err_release_firmware;
+ 		}
+ 		memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
+diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
+index 702267e208ba..a7e1f6f37790 100644
+--- a/drivers/media/usb/pwc/pwc-if.c
++++ b/drivers/media/usb/pwc/pwc-if.c
+@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
+ 	{ USB_DEVICE(0x0471, 0x0312) },
+ 	{ USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
+ 	{ USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
++	{ USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
+ 	{ USB_DEVICE(0x069A, 0x0001) }, /* Askey */
+ 	{ USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
+ 	{ USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
+@@ -802,6 +803,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
+ 			name = "Philips SPC 900NC webcam";
+ 			type_id = 740;
+ 			break;
++		case 0x032C:
++			PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
++			name = "Philips SPC 880NC webcam";
++			type_id = 740;
++			break;
+ 		default:
+ 			return -ENODEV;
+ 			break;
+diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
+index 12b403e78d52..ef5815de2785 100644
+--- a/drivers/media/usb/usbvision/usbvision-video.c
++++ b/drivers/media/usb/usbvision/usbvision-video.c
+@@ -1524,9 +1524,23 @@ static int usbvision_probe(struct usb_interface *intf,
+ 
+ 	if (usbvision_device_data[model].interface >= 0)
+ 		interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
+-	else
++	else if (ifnum < dev->actconfig->desc.bNumInterfaces)
+ 		interface = &dev->actconfig->interface[ifnum]->altsetting[0];
++	else {
++		dev_err(&intf->dev, "interface %d is invalid, max is %d\n",
++		    ifnum, dev->actconfig->desc.bNumInterfaces - 1);
++		ret = -ENODEV;
++		goto err_usb;
++	}
++
++	if (interface->desc.bNumEndpoints < 2) {
++		dev_err(&intf->dev, "interface %d has %d endpoints, but must"
++		    " have minimum 2\n", ifnum, interface->desc.bNumEndpoints);
++		ret = -ENODEV;
++		goto err_usb;
++	}
+ 	endpoint = &interface->endpoint[1].desc;
++
+ 	if (!usb_endpoint_xfer_isoc(endpoint)) {
+ 		dev_err(&intf->dev, "%s: interface %d. has non-ISO endpoint!\n",
+ 		    __func__, ifnum);
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index af635430524e..8085059ce925 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -394,7 +394,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 		get_user(kp->index, &up->index) ||
+ 		get_user(kp->type, &up->type) ||
+ 		get_user(kp->flags, &up->flags) ||
+-		get_user(kp->memory, &up->memory))
++		get_user(kp->memory, &up->memory) ||
++		get_user(kp->length, &up->length))
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_OUTPUT(kp->type))
+@@ -406,9 +407,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+-		if (get_user(kp->length, &up->length))
+-			return -EFAULT;
+-
+ 		num_planes = kp->length;
+ 		if (num_planes == 0) {
+ 			kp->m.planes = NULL;
+@@ -441,16 +439,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 	} else {
+ 		switch (kp->memory) {
+ 		case V4L2_MEMORY_MMAP:
+-			if (get_user(kp->length, &up->length) ||
+-				get_user(kp->m.offset, &up->m.offset))
++			if (get_user(kp->m.offset, &up->m.offset))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_USERPTR:
+ 			{
+ 			compat_long_t tmp;
+ 
+-			if (get_user(kp->length, &up->length) ||
+-			    get_user(tmp, &up->m.userptr))
++			if (get_user(tmp, &up->m.userptr))
+ 				return -EFAULT;
+ 
+ 			kp->m.userptr = (unsigned long)compat_ptr(tmp);
+@@ -492,7 +488,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 		copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
+ 		put_user(kp->sequence, &up->sequence) ||
+ 		put_user(kp->reserved2, &up->reserved2) ||
+-		put_user(kp->reserved, &up->reserved))
++		put_user(kp->reserved, &up->reserved) ||
++		put_user(kp->length, &up->length))
+ 			return -EFAULT;
+ 
+ 	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+@@ -515,13 +512,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 	} else {
+ 		switch (kp->memory) {
+ 		case V4L2_MEMORY_MMAP:
+-			if (put_user(kp->length, &up->length) ||
+-				put_user(kp->m.offset, &up->m.offset))
++			if (put_user(kp->m.offset, &up->m.offset))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_USERPTR:
+-			if (put_user(kp->length, &up->length) ||
+-				put_user(kp->m.userptr, &up->m.userptr))
++			if (put_user(kp->m.userptr, &up->m.userptr))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_OVERLAY:
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index 4cf38c39878a..883ba74fbc1e 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -238,6 +238,11 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+ 	dev = cl->dev;
+ 
+ 	mutex_lock(&dev->device_lock);
++	if (dev->dev_state != MEI_DEV_ENABLED) {
++		rets = -ENODEV;
++		goto out;
++	}
++
+ 	if (!mei_cl_is_connected(cl)) {
+ 		rets = -ENODEV;
+ 		goto out;
+@@ -287,6 +292,10 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
+ 	dev = cl->dev;
+ 
+ 	mutex_lock(&dev->device_lock);
++	if (dev->dev_state != MEI_DEV_ENABLED) {
++		rets = -ENODEV;
++		goto out;
++	}
+ 
+ 	cb = mei_cl_read_cb(cl, NULL);
+ 	if (cb)
+diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
+index 9a39e0b7e583..0c864eb21f58 100644
+--- a/drivers/mmc/host/atmel-mci.c
++++ b/drivers/mmc/host/atmel-mci.c
+@@ -2281,6 +2281,23 @@ static int atmci_configure_dma(struct atmel_mci *host)
+ {
+ 	host->dma.chan = dma_request_slave_channel_reason(&host->pdev->dev,
+ 							"rxtx");
++
++	if (PTR_ERR(host->dma.chan) == -ENODEV) {
++		struct mci_platform_data *pdata = host->pdev->dev.platform_data;
++		dma_cap_mask_t mask;
++
++		if (!pdata || !pdata->dma_filter)
++			return -ENODEV;
++
++		dma_cap_zero(mask);
++		dma_cap_set(DMA_SLAVE, mask);
++
++		host->dma.chan = dma_request_channel(mask, pdata->dma_filter,
++						     pdata->dma_slave);
++		if (!host->dma.chan)
++			host->dma.chan = ERR_PTR(-ENODEV);
++	}
++
+ 	if (IS_ERR(host->dma.chan))
+ 		return PTR_ERR(host->dma.chan);
+ 
+diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
+index ae19d83bb9de..055cad1e94d5 100644
+--- a/drivers/mmc/host/mmc_spi.c
++++ b/drivers/mmc/host/mmc_spi.c
+@@ -1436,6 +1436,12 @@ static int mmc_spi_probe(struct spi_device *spi)
+ 					     host->pdata->cd_debounce);
+ 		if (status != 0)
+ 			goto fail_add_host;
++
++		/* The platform has a CD GPIO signal that may support
++		 * interrupts, so let mmc_gpiod_request_cd_irq() decide
++		 * if polling is needed or not.
++		 */
++		mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+ 		mmc_gpiod_request_cd_irq(mmc);
+ 	}
+ 
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index 065dc70caa1d..404a33b3c33e 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -136,6 +136,10 @@ static int armada_38x_quirks(struct platform_device *pdev,
+ 	struct resource *res;
+ 
+ 	host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
++
++	host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
++	host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
++
+ 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ 					   "conf-sdio3");
+ 	if (res) {
+@@ -149,7 +153,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
+ 		 * Configuration register, if the adjustment is not done,
+ 		 * remove them from the capabilities.
+ 		 */
+-		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ 		host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50);
+ 
+ 		dev_warn(&pdev->dev, "conf-sdio3 register not found: disabling SDR50 and DDR50 modes.\nConsider updating your dtb\n");
+@@ -160,7 +163,6 @@ static int armada_38x_quirks(struct platform_device *pdev,
+ 	 * controller has different capabilities than the ones shown
+ 	 * in its registers
+ 	 */
+-	host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+ 	if (of_property_read_bool(np, "no-1-8-v")) {
+ 		host->caps &= ~SDHCI_CAN_VDD_180;
+ 		host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index f47c4a8370be..c60dde917e49 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -681,9 +681,20 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+ 	if (!data)
+ 		target_timeout = cmd->busy_timeout * 1000;
+ 	else {
+-		target_timeout = data->timeout_ns / 1000;
+-		if (host->clock)
+-			target_timeout += data->timeout_clks / host->clock;
++		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
++		if (host->clock && data->timeout_clks) {
++			unsigned long long val;
++
++			/*
++			 * data->timeout_clks is in units of clock cycles.
++			 * host->clock is in Hz.  target_timeout is in us.
++			 * Hence, us = 1000000 * cycles / Hz.  Round up.
++			 */
++			val = 1000000 * data->timeout_clks;
++			if (do_div(val, host->clock))
++				target_timeout++;
++			target_timeout += val;
++		}
+ 	}
+ 
+ 	/*
+@@ -3126,14 +3137,14 @@ int sdhci_add_host(struct sdhci_host *host)
+ 		if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)
+ 			host->timeout_clk *= 1000;
+ 
++		if (override_timeout_clk)
++			host->timeout_clk = override_timeout_clk;
++
+ 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
+ 			host->ops->get_max_timeout_count(host) : 1 << 27;
+ 		mmc->max_busy_timeout /= host->timeout_clk;
+ 	}
+ 
+-	if (override_timeout_clk)
+-		host->timeout_clk = override_timeout_clk;
+-
+ 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
+ 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+ 
+diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
+index 43b3392ffee7..652d01832873 100644
+--- a/drivers/mtd/onenand/onenand_base.c
++++ b/drivers/mtd/onenand/onenand_base.c
+@@ -2599,6 +2599,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+  */
+ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+ {
++	struct onenand_chip *this = mtd->priv;
+ 	int ret;
+ 
+ 	ret = onenand_block_isbad(mtd, ofs);
+@@ -2610,7 +2611,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+ 	}
+ 
+ 	onenand_get_device(mtd, FL_WRITING);
+-	ret = mtd_block_markbad(mtd, ofs);
++	ret = this->block_markbad(mtd, ofs);
+ 	onenand_release_device(mtd);
+ 	return ret;
+ }
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index e07afc673d7a..e4c079612100 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3157,7 +3157,7 @@ static int mvneta_probe(struct platform_device *pdev)
+ 	dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ 	dev->hw_features |= dev->features;
+ 	dev->vlan_features |= dev->features;
+-	dev->priv_flags |= IFF_UNICAST_FLT;
++	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
+ 	dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
+ 
+ 	err = register_netdev(dev);
+diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
+index 696852eb23c3..7a3f990c1935 100644
+--- a/drivers/net/irda/irtty-sir.c
++++ b/drivers/net/irda/irtty-sir.c
+@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
+ 
+ 	/* Module stuff handled via irda_ldisc.owner - Jean II */
+ 
+-	/* First make sure we're not already connected. */
+-	if (tty->disc_data != NULL) {
+-		priv = tty->disc_data;
+-		if (priv && priv->magic == IRTTY_MAGIC) {
+-			ret = -EEXIST;
+-			goto out;
+-		}
+-		tty->disc_data = NULL;		/* ### */
+-	}
+-
+ 	/* stop the underlying  driver */
+ 	irtty_stop_receiver(tty, TRUE);
+ 	if (tty->ops->stop)
+diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
+index dac7a0d9bb46..18cc2c8d5447 100644
+--- a/drivers/net/rionet.c
++++ b/drivers/net/rionet.c
+@@ -280,7 +280,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
+ 	struct net_device *ndev = dev_id;
+ 	struct rionet_private *rnet = netdev_priv(ndev);
+ 
+-	spin_lock(&rnet->lock);
++	spin_lock(&rnet->tx_lock);
+ 
+ 	if (netif_msg_intr(rnet))
+ 		printk(KERN_INFO
+@@ -299,7 +299,7 @@ static void rionet_outb_msg_event(struct rio_mport *mport, void *dev_id, int mbo
+ 	if (rnet->tx_cnt < RIONET_TX_RING_SIZE)
+ 		netif_wake_queue(ndev);
+ 
+-	spin_unlock(&rnet->lock);
++	spin_unlock(&rnet->tx_lock);
+ }
+ 
+ static int rionet_open(struct net_device *ndev)
+diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
+index 726ebe792813..26fb7b0b856c 100644
+--- a/drivers/of/of_reserved_mem.c
++++ b/drivers/of/of_reserved_mem.c
+@@ -31,11 +31,13 @@ int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
+ 	phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
+ 	phys_addr_t *res_base)
+ {
++	phys_addr_t base;
+ 	/*
+ 	 * We use __memblock_alloc_base() because memblock_alloc_base()
+ 	 * panic()s on allocation failure.
+ 	 */
+-	phys_addr_t base = __memblock_alloc_base(size, align, end);
++	end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
++	base = __memblock_alloc_base(size, align, end);
+ 	if (!base)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index c91185721345..25ad1b27ffae 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -176,6 +176,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 	u16 orig_cmd;
+ 	struct pci_bus_region region, inverted_region;
+ 
++	if (dev->non_compliant_bars)
++		return 0;
++
+ 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+ 
+ 	/* No printks while decoding is disabled! */
+@@ -973,6 +976,8 @@ void set_pcie_port_type(struct pci_dev *pdev)
+ {
+ 	int pos;
+ 	u16 reg16;
++	int type;
++	struct pci_dev *parent;
+ 
+ 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ 	if (!pos)
+@@ -982,6 +987,22 @@ void set_pcie_port_type(struct pci_dev *pdev)
+ 	pdev->pcie_flags_reg = reg16;
+ 	pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
+ 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
++
++	/*
++	 * A Root Port is always the upstream end of a Link.  No PCIe
++	 * component has two Links.  Two Links are connected by a Switch
++	 * that has a Port on each Link and internal logic to connect the
++	 * two Ports.
++	 */
++	type = pci_pcie_type(pdev);
++	if (type == PCI_EXP_TYPE_ROOT_PORT)
++		pdev->has_secondary_link = 1;
++	else if (type == PCI_EXP_TYPE_UPSTREAM ||
++		 type == PCI_EXP_TYPE_DOWNSTREAM) {
++		parent = pci_upstream_bridge(pdev);
++		if (!parent->has_secondary_link)
++			pdev->has_secondary_link = 1;
++	}
+ }
+ 
+ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
+@@ -1098,6 +1119,7 @@ int pci_cfg_space_size(struct pci_dev *dev)
+ int pci_setup_device(struct pci_dev *dev)
+ {
+ 	u32 class;
++	u16 cmd;
+ 	u8 hdr_type;
+ 	struct pci_slot *slot;
+ 	int pos = 0;
+@@ -1145,6 +1167,16 @@ int pci_setup_device(struct pci_dev *dev)
+ 	/* device class may be changed after fixup */
+ 	class = dev->class >> 8;
+ 
++	if (dev->non_compliant_bars) {
++		pci_read_config_word(dev, PCI_COMMAND, &cmd);
++		if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
++			dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
++			cmd &= ~PCI_COMMAND_IO;
++			cmd &= ~PCI_COMMAND_MEMORY;
++			pci_write_config_word(dev, PCI_COMMAND, cmd);
++		}
++	}
++
+ 	switch (dev->hdr_type) {		    /* header type */
+ 	case PCI_HEADER_TYPE_NORMAL:		    /* standard header */
+ 		if (class == PCI_CLASS_BRIDGE_PCI)
+diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+index 8d908e3f42c3..03ad08ca7e3b 100644
+--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
++++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+@@ -786,7 +786,7 @@ static int bcm2835_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
+ 		}
+ 		if (num_pulls) {
+ 			err = of_property_read_u32_index(np, "brcm,pull",
+-					(num_funcs > 1) ? i : 0, &pull);
++					(num_pulls > 1) ? i : 0, &pull);
+ 			if (err)
+ 				goto out;
+ 			err = bcm2835_pctl_dt_node_to_map_pull(pc, np, pin,
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index 9a92d13e3917..8cad6c165680 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -845,6 +845,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ 		},
+ 	},
+ 	{
++		.ident = "Lenovo ideapad Y700-15ISK",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-15ISK"),
++		},
++	},
++	{
++		.ident = "Lenovo ideapad Y700 Touch-15ISK",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700 Touch-15ISK"),
++		},
++	},
++	{
+ 		.ident = "Lenovo ideapad Y700-17ISK",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
+index 40fe65c91b41..18c9c0648bd0 100644
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -939,6 +939,7 @@ struct fib {
+ 	 */
+ 	struct list_head	fiblink;
+ 	void			*data;
++	u32			vector_no;
+ 	struct hw_fib		*hw_fib_va;		/* Actual shared object */
+ 	dma_addr_t		hw_fib_pa;		/* physical address of hw_fib*/
+ };
+@@ -2098,6 +2099,7 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
+ #define AAC_OWNER_FIRMWARE	0x106
+ 
+ const char *aac_driverinfo(struct Scsi_Host *);
++void aac_fib_vector_assign(struct aac_dev *dev);
+ struct fib *aac_fib_alloc(struct aac_dev *dev);
+ int aac_fib_setup(struct aac_dev *dev);
+ void aac_fib_map_free(struct aac_dev *dev);
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 4da574925284..4b79d9511778 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -83,13 +83,38 @@ static int fib_map_alloc(struct aac_dev *dev)
+ 
+ void aac_fib_map_free(struct aac_dev *dev)
+ {
+-	pci_free_consistent(dev->pdev,
+-	  dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
+-	  dev->hw_fib_va, dev->hw_fib_pa);
++	if (dev->hw_fib_va && dev->max_fib_size) {
++		pci_free_consistent(dev->pdev,
++		(dev->max_fib_size *
++		(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)),
++		dev->hw_fib_va, dev->hw_fib_pa);
++	}
+ 	dev->hw_fib_va = NULL;
+ 	dev->hw_fib_pa = 0;
+ }
+ 
++void aac_fib_vector_assign(struct aac_dev *dev)
++{
++	u32 i = 0;
++	u32 vector = 1;
++	struct fib *fibptr = NULL;
++
++	for (i = 0, fibptr = &dev->fibs[i];
++		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
++		i++, fibptr++) {
++		if ((dev->max_msix == 1) ||
++		  (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
++			- dev->vector_cap))) {
++			fibptr->vector_no = 0;
++		} else {
++			fibptr->vector_no = vector;
++			vector++;
++			if (vector == dev->max_msix)
++				vector = 1;
++		}
++	}
++}
++
+ /**
+  *	aac_fib_setup	-	setup the fibs
+  *	@dev: Adapter to set up
+@@ -151,6 +176,12 @@ int aac_fib_setup(struct aac_dev * dev)
+ 		hw_fib_pa = hw_fib_pa +
+ 			dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
+ 	}
++
++	/*
++	 *Assign vector numbers to fibs
++	 */
++	aac_fib_vector_assign(dev);
++
+ 	/*
+ 	 *	Add the fib chain to the free list
+ 	 */
+diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
+index 4596e9dd757c..81315a14ef39 100644
+--- a/drivers/scsi/aacraid/src.c
++++ b/drivers/scsi/aacraid/src.c
+@@ -156,8 +156,8 @@ irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+ 				break;
+ 			if (dev->msi_enabled && dev->max_msix > 1)
+ 				atomic_dec(&dev->rrq_outstanding[vector_no]);
+-			aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+ 			dev->host_rrq[index++] = 0;
++			aac_intr_normal(dev, handle-1, 0, isFastResponse, NULL);
+ 			if (index == (vector_no + 1) * dev->vector_cap)
+ 				index = vector_no * dev->vector_cap;
+ 			dev->host_rrq_idx[vector_no] = index;
+@@ -448,36 +448,20 @@ static int aac_src_deliver_message(struct fib *fib)
+ 	dma_addr_t address;
+ 	struct aac_fib_xporthdr *pFibX;
+ 	u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
++	u16 vector_no;
+ 
+ 	atomic_inc(&q->numpending);
+ 
+ 	if (dev->msi_enabled && fib->hw_fib_va->header.Command != AifRequest &&
+ 	    dev->max_msix > 1) {
+-		u_int16_t vector_no, first_choice = 0xffff;
+-
+-		vector_no = dev->fibs_pushed_no % dev->max_msix;
+-		do {
+-			vector_no += 1;
+-			if (vector_no == dev->max_msix)
+-				vector_no = 1;
+-			if (atomic_read(&dev->rrq_outstanding[vector_no]) <
+-			    dev->vector_cap)
+-				break;
+-			if (0xffff == first_choice)
+-				first_choice = vector_no;
+-			else if (vector_no == first_choice)
+-				break;
+-		} while (1);
+-		if (vector_no == first_choice)
+-			vector_no = 0;
+-		atomic_inc(&dev->rrq_outstanding[vector_no]);
+-		if (dev->fibs_pushed_no == 0xffffffff)
+-			dev->fibs_pushed_no = 0;
+-		else
+-			dev->fibs_pushed_no++;
++		vector_no = fib->vector_no;
+ 		fib->hw_fib_va->header.Handle += (vector_no << 16);
++	} else {
++		vector_no = 0;
+ 	}
+ 
++	atomic_inc(&dev->rrq_outstanding[vector_no]);
++
+ 	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ 		/* Calculate the amount to the fibsize bits */
+ 		fibsize = (hdr_size + 127) / 128 - 1;
+diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
+index a2f2c774cd6b..82529f9830f3 100644
+--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
++++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
+@@ -1337,6 +1337,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
+ 	case AHC_DEV_Q_TAGGED:
+ 		scsi_change_queue_depth(sdev,
+ 				dev->openings + dev->active);
++		break;
+ 	default:
+ 		/*
+ 		 * We allow the OS to queue 2 untagged transactions to
+diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
+index 1f74760ce86c..77a1598bc78d 100644
+--- a/drivers/scsi/be2iscsi/be_main.c
++++ b/drivers/scsi/be2iscsi/be_main.c
+@@ -4434,6 +4434,7 @@ put_shost:
+ 	scsi_host_put(phba->shost);
+ free_kset:
+ 	iscsi_boot_destroy_kset(phba->boot_kset);
++	phba->boot_kset = NULL;
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 3bbf4853733c..ec192939750e 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -652,7 +652,8 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+ 	else
+ 		hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
+ 	hp->dxfer_len = mxsize;
+-	if (hp->dxfer_direction == SG_DXFER_TO_DEV)
++	if ((hp->dxfer_direction == SG_DXFER_TO_DEV) ||
++	    (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV))
+ 		hp->dxferp = (char __user *)buf + cmd_size;
+ 	else
+ 		hp->dxferp = NULL;
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index c66affd993aa..34b3a522668f 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -247,24 +247,24 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
+ {
+ 	if (dev->mmio)
+ 		writel(data, dev->mmio + reg);
+-
+-	outl(data, dev->iobase + reg);
++	else
++		outl(data, dev->iobase + reg);
+ }
+ 
+ static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
+ {
+ 	if (dev->mmio)
+ 		writew(data, dev->mmio + reg);
+-
+-	outw(data, dev->iobase + reg);
++	else
++		outw(data, dev->iobase + reg);
+ }
+ 
+ static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
+ {
+ 	if (dev->mmio)
+ 		writeb(data, dev->mmio + reg);
+-
+-	outb(data, dev->iobase + reg);
++	else
++		outb(data, dev->iobase + reg);
+ }
+ 
+ static uint32_t ni_readl(struct comedi_device *dev, int reg)
+diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
+index 9b124b09e914..bfd4c3b12cd0 100644
+--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
++++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
+@@ -92,7 +92,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+-	if (trig_num != cmd->start_src)
++	if (trig_num != cmd->start_arg)
+ 		return -EINVAL;
+ 
+ 	spin_lock_irqsave(&counter->lock, flags);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index be12b9d84052..e8848e7fe5d4 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2617,8 +2617,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
+ 
+ 	list_for_each_entry_safe(se_cmd, tmp_cmd,
+ 				&se_sess->sess_wait_list, se_cmd_list) {
+-		list_del_init(&se_cmd->se_cmd_list);
+-
+ 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+ 			" %d\n", se_cmd, se_cmd->t_state,
+ 			se_cmd->se_tfo->get_cmd_state(se_cmd));
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index a3282bfb343d..09f1e5f2f013 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -391,6 +391,10 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
+ {
+ 	enum thermal_trip_type type;
+ 
++	/* Ignore disabled trip points */
++	if (test_bit(trip, &tz->trips_disabled))
++		return;
++
+ 	tz->ops->get_trip_type(tz, trip, &type);
+ 
+ 	if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
+@@ -1487,6 +1491,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ {
+ 	struct thermal_zone_device *tz;
+ 	enum thermal_trip_type trip_type;
++	int trip_temp;
+ 	int result;
+ 	int count;
+ 	int passive = 0;
+@@ -1557,9 +1562,15 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ 		goto unregister;
+ 
+ 	for (count = 0; count < trips; count++) {
+-		tz->ops->get_trip_type(tz, count, &trip_type);
++		if (tz->ops->get_trip_type(tz, count, &trip_type))
++			set_bit(count, &tz->trips_disabled);
+ 		if (trip_type == THERMAL_TRIP_PASSIVE)
+ 			passive = 1;
++		if (tz->ops->get_trip_temp(tz, count, &trip_temp))
++			set_bit(count, &tz->trips_disabled);
++		/* Check for bogus trip points */
++		if (trip_temp == 0)
++			set_bit(count, &tz->trips_disabled);
+ 	}
+ 
+ 	if (!passive) {
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index df3deb000a80..09084c9da8b7 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1118,6 +1118,9 @@ static int acm_probe(struct usb_interface *intf,
+ 	if (quirks == NO_UNION_NORMAL) {
+ 		data_interface = usb_ifnum_to_if(usb_dev, 1);
+ 		control_interface = usb_ifnum_to_if(usb_dev, 0);
++		/* we would crash */
++		if (!data_interface || !control_interface)
++			return -ENODEV;
+ 		goto skip_normal_probe;
+ 	}
+ 
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 818369afff63..7792c0e2d3b6 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -499,11 +499,15 @@ static int usb_unbind_interface(struct device *dev)
+ int usb_driver_claim_interface(struct usb_driver *driver,
+ 				struct usb_interface *iface, void *priv)
+ {
+-	struct device *dev = &iface->dev;
++	struct device *dev;
+ 	struct usb_device *udev;
+ 	int retval = 0;
+ 	int lpm_disable_error;
+ 
++	if (!iface)
++		return -ENODEV;
++
++	dev = &iface->dev;
+ 	if (dev->driver)
+ 		return -EBUSY;
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index e56ad83b35a4..ae9eb716c02f 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4237,7 +4237,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ {
+ 	struct usb_device	*hdev = hub->hdev;
+ 	struct usb_hcd		*hcd = bus_to_hcd(hdev->bus);
+-	int			i, j, retval;
++	int			retries, operations, retval, i;
+ 	unsigned		delay = HUB_SHORT_RESET_TIME;
+ 	enum usb_device_speed	oldspeed = udev->speed;
+ 	const char		*speed;
+@@ -4339,7 +4339,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	 * first 8 bytes of the device descriptor to get the ep0 maxpacket
+ 	 * value.
+ 	 */
+-	for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) {
++	for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) {
+ 		bool did_new_scheme = false;
+ 
+ 		if (use_new_scheme(udev, retry_counter)) {
+@@ -4366,7 +4366,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 			 * 255 is for WUSB devices, we actually need to use
+ 			 * 512 (WUSB1.0[4.8.1]).
+ 			 */
+-			for (j = 0; j < 3; ++j) {
++			for (operations = 0; operations < 3; ++operations) {
+ 				buf->bMaxPacketSize0 = 0;
+ 				r = usb_control_msg(udev, usb_rcvaddr0pipe(),
+ 					USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+@@ -4386,7 +4386,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 						r = -EPROTO;
+ 					break;
+ 				}
+-				if (r == 0)
++				/*
++				 * Some devices time out if they are powered on
++				 * when already connected. They need a second
++				 * reset. But only on the first attempt,
++				 * lest we get into a time out/reset loop
++				 */
++				if (r == 0  || (r == -ETIMEDOUT && retries == 0))
+ 					break;
+ 			}
+ 			udev->descriptor.bMaxPacketSize0 =
+@@ -4418,7 +4424,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 		 * authorization will assign the final address.
+ 		 */
+ 		if (udev->wusb == 0) {
+-			for (j = 0; j < SET_ADDRESS_TRIES; ++j) {
++			for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) {
+ 				retval = hub_set_address(udev, devnum);
+ 				if (retval >= 0)
+ 					break;
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index c6bfd13f6c92..1950e87b4219 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -787,6 +787,12 @@ static int iowarrior_probe(struct usb_interface *interface,
+ 	iface_desc = interface->cur_altsetting;
+ 	dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
+ 
++	if (iface_desc->desc.bNumEndpoints < 1) {
++		dev_err(&interface->dev, "Invalid number of endpoints\n");
++		retval = -EINVAL;
++		goto error;
++	}
++
+ 	/* set up the endpoint information */
+ 	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ 		endpoint = &iface_desc->endpoint[i].desc;
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 75e4979e6c15..637ee7754ad5 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -812,7 +812,7 @@ static struct scsi_host_template uas_host_template = {
+ 	.slave_configure = uas_slave_configure,
+ 	.eh_abort_handler = uas_eh_abort_handler,
+ 	.eh_bus_reset_handler = uas_eh_bus_reset_handler,
+-	.can_queue = 65536,	/* Is there a limit on the _host_ ? */
++	.can_queue = MAX_CMNDS,
+ 	.this_id = -1,
+ 	.sg_tablesize = SG_NONE,
+ 	.cmd_per_lun = 1,	/* until we override it */
+diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
+index 71e78ef4b736..3a75f3b53452 100644
+--- a/drivers/watchdog/rc32434_wdt.c
++++ b/drivers/watchdog/rc32434_wdt.c
+@@ -237,7 +237,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd,
+ 			return -EINVAL;
+ 		/* Fall through */
+ 	case WDIOC_GETTIMEOUT:
+-		return copy_to_user(argp, &timeout, sizeof(int));
++		return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0;
+ 	default:
+ 		return -ENOTTY;
+ 	}
+diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
+index df9932b00d08..1848705506ff 100644
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -316,8 +316,8 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
+ 		list_add_tail(&work->ordered_list, &wq->ordered_list);
+ 		spin_unlock_irqrestore(&wq->list_lock, flags);
+ 	}
+-	queue_work(wq->normal_wq, &work->normal_work);
+ 	trace_btrfs_work_queued(work);
++	queue_work(wq->normal_wq, &work->normal_work);
+ }
+ 
+ void btrfs_queue_work(struct btrfs_workqueue *wq,
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 99e8f60c7962..e4ca0f5746ae 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1574,8 +1574,23 @@ int btrfs_init_fs_root(struct btrfs_root *root)
+ 	ret = get_anon_bdev(&root->anon_dev);
+ 	if (ret)
+ 		goto free_writers;
++
++	mutex_lock(&root->objectid_mutex);
++	ret = btrfs_find_highest_objectid(root,
++					&root->highest_objectid);
++	if (ret) {
++		mutex_unlock(&root->objectid_mutex);
++		goto free_root_dev;
++	}
++
++	ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
++
++	mutex_unlock(&root->objectid_mutex);
++
+ 	return 0;
+ 
++free_root_dev:
++	free_anon_bdev(root->anon_dev);
+ free_writers:
+ 	btrfs_free_subvolume_writers(root->subv_writers);
+ fail:
+@@ -2622,6 +2637,7 @@ int open_ctree(struct super_block *sb,
+ 	if (btrfs_check_super_csum(bh->b_data)) {
+ 		printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
+ 		err = -EINVAL;
++		brelse(bh);
+ 		goto fail_alloc;
+ 	}
+ 
+@@ -2849,6 +2865,18 @@ retry_root_backup:
+ 	tree_root->commit_root = btrfs_root_node(tree_root);
+ 	btrfs_set_root_refs(&tree_root->root_item, 1);
+ 
++	mutex_lock(&tree_root->objectid_mutex);
++	ret = btrfs_find_highest_objectid(tree_root,
++					&tree_root->highest_objectid);
++	if (ret) {
++		mutex_unlock(&tree_root->objectid_mutex);
++		goto recovery_tree_root;
++	}
++
++	ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
++
++	mutex_unlock(&tree_root->objectid_mutex);
++
+ 	ret = btrfs_read_roots(fs_info, tree_root);
+ 	if (ret)
+ 		goto recovery_tree_root;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 3c1938000a5d..d1ae1322648a 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3975,6 +3975,11 @@ commit_trans:
+ 		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
+ 			need_commit--;
+ 
++			if (need_commit > 0) {
++				btrfs_start_delalloc_roots(fs_info, 0, -1);
++				btrfs_wait_ordered_roots(fs_info, -1);
++			}
++
+ 			trans = btrfs_join_transaction(root);
+ 			if (IS_ERR(trans))
+ 				return PTR_ERR(trans);
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index d4a582ac3f73..9f06e8b4add1 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -515,7 +515,7 @@ out:
+ 	return ret;
+ }
+ 
+-static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
++int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
+ {
+ 	struct btrfs_path *path;
+ 	int ret;
+@@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
+ 	int ret;
+ 	mutex_lock(&root->objectid_mutex);
+ 
+-	if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
+-		ret = btrfs_find_highest_objectid(root,
+-						  &root->highest_objectid);
+-		if (ret)
+-			goto out;
+-	}
+-
+ 	if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
+ 		ret = -ENOSPC;
+ 		goto out;
+diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
+index ddb347bfee23..c8e864b2d530 100644
+--- a/fs/btrfs/inode-map.h
++++ b/fs/btrfs/inode-map.h
+@@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
+ 			 struct btrfs_trans_handle *trans);
+ 
+ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
++int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
+ 
+ #endif
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index b114a0539d3d..f751ab47e9a5 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -6421,7 +6421,7 @@ out_unlock_inode:
+ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ 		      struct dentry *dentry)
+ {
+-	struct btrfs_trans_handle *trans;
++	struct btrfs_trans_handle *trans = NULL;
+ 	struct btrfs_root *root = BTRFS_I(dir)->root;
+ 	struct inode *inode = d_inode(old_dentry);
+ 	u64 index;
+@@ -6447,6 +6447,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ 	trans = btrfs_start_transaction(root, 5);
+ 	if (IS_ERR(trans)) {
+ 		err = PTR_ERR(trans);
++		trans = NULL;
+ 		goto fail;
+ 	}
+ 
+@@ -6480,9 +6481,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
+ 		btrfs_log_new_name(trans, inode, NULL, parent);
+ 	}
+ 
+-	btrfs_end_transaction(trans, root);
+ 	btrfs_balance_delayed_items(root);
+ fail:
++	if (trans)
++		btrfs_end_transaction(trans, root);
+ 	if (drop_inode) {
+ 		inode_dec_link_count(inode);
+ 		iput(inode);
+@@ -8416,15 +8418,28 @@ int btrfs_readpage(struct file *file, struct page *page)
+ static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
+ {
+ 	struct extent_io_tree *tree;
+-
++	struct inode *inode = page->mapping->host;
++	int ret;
+ 
+ 	if (current->flags & PF_MEMALLOC) {
+ 		redirty_page_for_writepage(wbc, page);
+ 		unlock_page(page);
+ 		return 0;
+ 	}
++
++	/*
++	 * If we are under memory pressure we will call this directly from the
++	 * VM, we need to make sure we have the inode referenced for the ordered
++	 * extent.  If not just return like we didn't do anything.
++	 */
++	if (!igrab(inode)) {
++		redirty_page_for_writepage(wbc, page);
++		return AOP_WRITEPAGE_ACTIVATE;
++	}
+ 	tree = &BTRFS_I(page->mapping->host)->io_tree;
+-	return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
++	ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
++	btrfs_add_delayed_iput(inode);
++	return ret;
+ }
+ 
+ static int btrfs_writepages(struct address_space *mapping,
+@@ -9505,9 +9520,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
+ 	/*
+ 	 * 2 items for inode item and ref
+ 	 * 2 items for dir items
++	 * 1 item for updating parent inode item
++	 * 1 item for the inline extent item
+ 	 * 1 item for xattr if selinux is on
+ 	 */
+-	trans = btrfs_start_transaction(root, 5);
++	trans = btrfs_start_transaction(root, 7);
+ 	if (IS_ERR(trans))
+ 		return PTR_ERR(trans);
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 87c720865ebf..5189d54417ab 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -567,6 +567,10 @@ static noinline int create_subvol(struct inode *dir,
+ 		goto fail;
+ 	}
+ 
++	mutex_lock(&new_root->objectid_mutex);
++	new_root->highest_objectid = new_dirid;
++	mutex_unlock(&new_root->objectid_mutex);
++
+ 	/*
+ 	 * insert the directory item
+ 	 */
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index a1216f9b4917..b2c1ab7cae78 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1462,7 +1462,21 @@ static int read_symlink(struct btrfs_root *root,
+ 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ 	if (ret < 0)
+ 		goto out;
+-	BUG_ON(ret);
++	if (ret) {
++		/*
++		 * An empty symlink inode. Can happen in rare error paths when
++		 * creating a symlink (transaction committed before the inode
++		 * eviction handler removed the symlink inode items and a crash
++		 * happened in between or the subvol was snapshoted in between).
++		 * Print an informative message to dmesg/syslog so that the user
++		 * can delete the symlink.
++		 */
++		btrfs_err(root->fs_info,
++			  "Found empty symlink inode %llu at root %llu",
++			  ino, root->root_key.objectid);
++		ret = -EIO;
++		goto out;
++	}
+ 
+ 	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ 			struct btrfs_file_extent_item);
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 9e66f5e724db..70734d89193a 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1822,6 +1822,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
+  * there are other factors that may change the result (like a new metadata
+  * chunk).
+  *
++ * If metadata is exhausted, f_bavail will be 0.
++ *
+  * FIXME: not accurate for mixed block groups, total and free/used are ok,
+  * available appears slightly larger.
+  */
+@@ -1833,11 +1835,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	struct btrfs_space_info *found;
+ 	u64 total_used = 0;
+ 	u64 total_free_data = 0;
++	u64 total_free_meta = 0;
+ 	int bits = dentry->d_sb->s_blocksize_bits;
+ 	__be32 *fsid = (__be32 *)fs_info->fsid;
+ 	unsigned factor = 1;
+ 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
+ 	int ret;
++	u64 thresh = 0;
+ 
+ 	/*
+ 	 * holding chunk_muext to avoid allocating new chunks, holding
+@@ -1863,6 +1867,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 				}
+ 			}
+ 		}
++		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
++			total_free_meta += found->disk_total - found->disk_used;
+ 
+ 		total_used += found->disk_used;
+ 	}
+@@ -1885,6 +1891,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ 	buf->f_bavail += div_u64(total_free_data, factor);
+ 	buf->f_bavail = buf->f_bavail >> bits;
+ 
++	/*
++	 * We calculate the remaining metadata space minus global reserve. If
++	 * this is (supposedly) smaller than zero, there's no space. But this
++	 * does not hold in practice, the exhausted state happens where's still
++	 * some positive delta. So we apply some guesswork and compare the
++	 * delta to a 4M threshold.  (Practically observed delta was ~2M.)
++	 *
++	 * We probably cannot calculate the exact threshold value because this
++	 * depends on the internal reservations requested by various
++	 * operations, so some operations that consume a few metadata will
++	 * succeed even if the Avail is zero. But this is better than the other
++	 * way around.
++	 */
++	thresh = 4 * 1024 * 1024;
++
++	if (total_free_meta - thresh < block_rsv->size)
++		buf->f_bavail = 0;
++
+ 	buf->f_type = BTRFS_SUPER_MAGIC;
+ 	buf->f_bsize = dentry->d_sb->s_blocksize;
+ 	buf->f_namelen = BTRFS_NAME_LEN;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 5113b7257b45..18a3573e1444 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -152,6 +152,7 @@ static struct btrfs_device *__alloc_device(void)
+ 	spin_lock_init(&dev->reada_lock);
+ 	atomic_set(&dev->reada_in_flight, 0);
+ 	atomic_set(&dev->dev_stats_ccnt, 0);
++	btrfs_device_data_ordered_init(dev);
+ 	INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_WAIT);
+ 	INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_WAIT);
+ 
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 8dd099dc5f9b..26d05e3bc6db 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -32,6 +32,10 @@
+ #include <linux/pipe_fs_i.h>
+ #include <linux/oom.h>
+ #include <linux/compat.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/path.h>
++#include <linux/timekeeping.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/mmu_context.h>
+@@ -225,9 +229,10 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
+ 				break;
+ 			/* UNIX time of coredump */
+ 			case 't': {
+-				struct timeval tv;
+-				do_gettimeofday(&tv);
+-				err = cn_printf(cn, "%lu", tv.tv_sec);
++				time64_t time;
++
++				time = ktime_get_real_seconds();
++				err = cn_printf(cn, "%lld", time);
+ 				break;
+ 			}
+ 			/* hostname */
+@@ -621,6 +626,8 @@ void do_coredump(const siginfo_t *siginfo)
+ 		}
+ 	} else {
+ 		struct inode *inode;
++		int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW |
++				 O_LARGEFILE | O_EXCL;
+ 
+ 		if (cprm.limit < binfmt->min_coredump)
+ 			goto fail_unlock;
+@@ -659,10 +666,27 @@ void do_coredump(const siginfo_t *siginfo)
+ 		 * what matters is that at least one of the two processes
+ 		 * writes its coredump successfully, not which one.
+ 		 */
+-		cprm.file = filp_open(cn.corename,
+-				 O_CREAT | 2 | O_NOFOLLOW |
+-				 O_LARGEFILE | O_EXCL,
+-				 0600);
++		if (need_suid_safe) {
++			/*
++			 * Using user namespaces, normal user tasks can change
++			 * their current->fs->root to point to arbitrary
++			 * directories. Since the intention of the "only dump
++			 * with a fully qualified path" rule is to control where
++			 * coredumps may be placed using root privileges,
++			 * current->fs->root must not be used. Instead, use the
++			 * root directory of init_task.
++			 */
++			struct path root;
++
++			task_lock(&init_task);
++			get_fs_root(init_task.fs, &root);
++			task_unlock(&init_task);
++			cprm.file = file_open_root(root.dentry, root.mnt,
++				cn.corename, open_flags, 0600);
++			path_put(&root);
++		} else {
++			cprm.file = filp_open(cn.corename, open_flags, 0600);
++		}
+ 		if (IS_ERR(cprm.file))
+ 			goto fail_unlock;
+ 
+diff --git a/fs/dax.c b/fs/dax.c
+index 6f65f00e58ec..4bb5b7cd5dfd 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -309,14 +309,11 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
+  out:
+ 	i_mmap_unlock_read(mapping);
+ 
+-	if (bh->b_end_io)
+-		bh->b_end_io(bh, 1);
+-
+ 	return error;
+ }
+ 
+ static int do_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+-			get_block_t get_block)
++			get_block_t get_block, dax_iodone_t complete_unwritten)
+ {
+ 	struct file *file = vma->vm_file;
+ 	struct address_space *mapping = file->f_mapping;
+@@ -417,7 +414,19 @@ static int do_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+ 		page_cache_release(page);
+ 	}
+ 
++	/*
++	 * If we successfully insert the new mapping over an unwritten extent,
++	 * we need to ensure we convert the unwritten extent. If there is an
++	 * error inserting the mapping, the filesystem needs to leave it as
++	 * unwritten to prevent exposure of the stale underlying data to
++	 * userspace, but we still need to call the completion function so
++	 * the private resources on the mapping buffer can be released. We
++	 * indicate what the callback should do via the uptodate variable, same
++	 * as for normal BH based IO completions.
++	 */
+ 	error = dax_insert_mapping(inode, &bh, vma, vmf);
++	if (buffer_unwritten(&bh))
++		complete_unwritten(&bh, !error);
+ 
+  out:
+ 	if (error == -ENOMEM)
+@@ -445,7 +454,7 @@ static int do_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+  * fault handler for DAX files.
+  */
+ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+-			get_block_t get_block)
++	      get_block_t get_block, dax_iodone_t complete_unwritten)
+ {
+ 	int result;
+ 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
+@@ -454,7 +463,7 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+ 		sb_start_pagefault(sb);
+ 		file_update_time(vma->vm_file);
+ 	}
+-	result = do_dax_fault(vma, vmf, get_block);
++	result = do_dax_fault(vma, vmf, get_block, complete_unwritten);
+ 	if (vmf->flags & FAULT_FLAG_WRITE)
+ 		sb_end_pagefault(sb);
+ 
+diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
+index 90001da9abfd..66842e55c48c 100644
+--- a/fs/efivarfs/file.c
++++ b/fs/efivarfs/file.c
+@@ -10,6 +10,7 @@
+ #include <linux/efi.h>
+ #include <linux/fs.h>
+ #include <linux/slab.h>
++#include <linux/mount.h>
+ 
+ #include "internal.h"
+ 
+@@ -103,9 +104,78 @@ out_free:
+ 	return size;
+ }
+ 
++static int
++efivarfs_ioc_getxflags(struct file *file, void __user *arg)
++{
++	struct inode *inode = file->f_mapping->host;
++	unsigned int i_flags;
++	unsigned int flags = 0;
++
++	i_flags = inode->i_flags;
++	if (i_flags & S_IMMUTABLE)
++		flags |= FS_IMMUTABLE_FL;
++
++	if (copy_to_user(arg, &flags, sizeof(flags)))
++		return -EFAULT;
++	return 0;
++}
++
++static int
++efivarfs_ioc_setxflags(struct file *file, void __user *arg)
++{
++	struct inode *inode = file->f_mapping->host;
++	unsigned int flags;
++	unsigned int i_flags = 0;
++	int error;
++
++	if (!inode_owner_or_capable(inode))
++		return -EACCES;
++
++	if (copy_from_user(&flags, arg, sizeof(flags)))
++		return -EFAULT;
++
++	if (flags & ~FS_IMMUTABLE_FL)
++		return -EOPNOTSUPP;
++
++	if (!capable(CAP_LINUX_IMMUTABLE))
++		return -EPERM;
++
++	if (flags & FS_IMMUTABLE_FL)
++		i_flags |= S_IMMUTABLE;
++
++
++	error = mnt_want_write_file(file);
++	if (error)
++		return error;
++
++	mutex_lock(&inode->i_mutex);
++	inode_set_flags(inode, i_flags, S_IMMUTABLE);
++	mutex_unlock(&inode->i_mutex);
++
++	mnt_drop_write_file(file);
++
++	return 0;
++}
++
++long
++efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
++{
++	void __user *arg = (void __user *)p;
++
++	switch (cmd) {
++	case FS_IOC_GETFLAGS:
++		return efivarfs_ioc_getxflags(file, arg);
++	case FS_IOC_SETFLAGS:
++		return efivarfs_ioc_setxflags(file, arg);
++	}
++
++	return -ENOTTY;
++}
++
+ const struct file_operations efivarfs_file_operations = {
+ 	.open	= simple_open,
+ 	.read	= efivarfs_file_read,
+ 	.write	= efivarfs_file_write,
+ 	.llseek	= no_llseek,
++	.unlocked_ioctl = efivarfs_file_ioctl,
+ };
+diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
+index 3381b9da9ee6..e2ab6d0497f2 100644
+--- a/fs/efivarfs/inode.c
++++ b/fs/efivarfs/inode.c
+@@ -15,7 +15,8 @@
+ #include "internal.h"
+ 
+ struct inode *efivarfs_get_inode(struct super_block *sb,
+-				const struct inode *dir, int mode, dev_t dev)
++				const struct inode *dir, int mode,
++				dev_t dev, bool is_removable)
+ {
+ 	struct inode *inode = new_inode(sb);
+ 
+@@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
+ 		inode->i_ino = get_next_ino();
+ 		inode->i_mode = mode;
+ 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
++		inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
+ 		switch (mode & S_IFMT) {
+ 		case S_IFREG:
+ 			inode->i_fop = &efivarfs_file_operations;
+@@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
+ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ 			  umode_t mode, bool excl)
+ {
+-	struct inode *inode;
++	struct inode *inode = NULL;
+ 	struct efivar_entry *var;
+ 	int namelen, i = 0, err = 0;
++	bool is_removable = false;
+ 
+ 	if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
+ 		return -EINVAL;
+ 
+-	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
+-	if (!inode)
+-		return -ENOMEM;
+-
+ 	var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
+-	if (!var) {
+-		err = -ENOMEM;
+-		goto out;
+-	}
++	if (!var)
++		return -ENOMEM;
+ 
+ 	/* length of the variable name itself: remove GUID and separator */
+ 	namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
+@@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ 	efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
+ 			&var->var.VendorGuid);
+ 
++	if (efivar_variable_is_removable(var->var.VendorGuid,
++					 dentry->d_name.name, namelen))
++		is_removable = true;
++
++	inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
++	if (!inode) {
++		err = -ENOMEM;
++		goto out;
++	}
++
+ 	for (i = 0; i < namelen; i++)
+ 		var->var.VariableName[i] = dentry->d_name.name[i];
+ 
+@@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
+ out:
+ 	if (err) {
+ 		kfree(var);
+-		iput(inode);
++		if (inode)
++			iput(inode);
+ 	}
+ 	return err;
+ }
+diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
+index b5ff16addb7c..b4505188e799 100644
+--- a/fs/efivarfs/internal.h
++++ b/fs/efivarfs/internal.h
+@@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
+ extern const struct inode_operations efivarfs_dir_inode_operations;
+ extern bool efivarfs_valid_name(const char *str, int len);
+ extern struct inode *efivarfs_get_inode(struct super_block *sb,
+-			const struct inode *dir, int mode, dev_t dev);
++			const struct inode *dir, int mode, dev_t dev,
++			bool is_removable);
+ 
+ extern struct list_head efivarfs_list;
+ 
+diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
+index 86a2121828c3..abb244b06024 100644
+--- a/fs/efivarfs/super.c
++++ b/fs/efivarfs/super.c
+@@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ 	struct dentry *dentry, *root = sb->s_root;
+ 	unsigned long size = 0;
+ 	char *name;
+-	int len, i;
++	int len;
+ 	int err = -ENOMEM;
++	bool is_removable = false;
+ 
+ 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ 	if (!entry)
+@@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ 	memcpy(entry->var.VariableName, name16, name_size);
+ 	memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
+ 
+-	len = ucs2_strlen(entry->var.VariableName);
++	len = ucs2_utf8size(entry->var.VariableName);
+ 
+ 	/* name, plus '-', plus GUID, plus NUL*/
+ 	name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
+ 	if (!name)
+ 		goto fail;
+ 
+-	for (i = 0; i < len; i++)
+-		name[i] = entry->var.VariableName[i] & 0xFF;
++	ucs2_as_utf8(name, entry->var.VariableName, len);
++
++	if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
++		is_removable = true;
+ 
+ 	name[len] = '-';
+ 
+@@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
+ 
+ 	name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
+ 
+-	inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
++	inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
++				   is_removable);
+ 	if (!inode)
+ 		goto fail_name;
+ 
+@@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
+ 	sb->s_d_op		= &efivarfs_d_ops;
+ 	sb->s_time_gran         = 1;
+ 
+-	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
++	inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
+ 	if (!inode)
+ 		return -ENOMEM;
+ 	inode->i_op = &efivarfs_dir_inode_operations;
+diff --git a/fs/ext2/file.c b/fs/ext2/file.c
+index 3a0a6c6406d0..3b57c9f83c9b 100644
+--- a/fs/ext2/file.c
++++ b/fs/ext2/file.c
+@@ -28,12 +28,12 @@
+ #ifdef CONFIG_FS_DAX
+ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+-	return dax_fault(vma, vmf, ext2_get_block);
++	return dax_fault(vma, vmf, ext2_get_block, NULL);
+ }
+ 
+ static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+-	return dax_mkwrite(vma, vmf, ext2_get_block);
++	return dax_mkwrite(vma, vmf, ext2_get_block, NULL);
+ }
+ 
+ static const struct vm_operations_struct ext2_dax_vm_ops = {
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index dd65fac5ff2f..0d062ffacb24 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -192,15 +192,27 @@ out:
+ }
+ 
+ #ifdef CONFIG_FS_DAX
++static void ext4_end_io_unwritten(struct buffer_head *bh, int uptodate)
++{
++	struct inode *inode = bh->b_assoc_map->host;
++	/* XXX: breaks on 32-bit > 16GB. Is that even supported? */
++	loff_t offset = (loff_t)(uintptr_t)bh->b_private << inode->i_blkbits;
++	int err;
++	if (!uptodate)
++		return;
++	WARN_ON(!buffer_unwritten(bh));
++	err = ext4_convert_unwritten_extents(NULL, inode, offset, bh->b_size);
++}
++
+ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+-	return dax_fault(vma, vmf, ext4_get_block);
++	return dax_fault(vma, vmf, ext4_get_block, ext4_end_io_unwritten);
+ 					/* Is this the right get_block? */
+ }
+ 
+ static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ {
+-	return dax_mkwrite(vma, vmf, ext4_get_block);
++	return dax_mkwrite(vma, vmf, ext4_get_block, ext4_end_io_unwritten);
+ }
+ 
+ static const struct vm_operations_struct ext4_dax_vm_ops = {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 3291e1af0e24..f43996884242 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -656,16 +656,32 @@ has_zeroout:
+ 	return retval;
+ }
+ 
+-static void ext4_end_io_unwritten(struct buffer_head *bh, int uptodate)
++/*
++ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
++ * we have to be careful as someone else may be manipulating b_state as well.
++ */
++static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
+ {
+-	struct inode *inode = bh->b_assoc_map->host;
+-	/* XXX: breaks on 32-bit > 16GB. Is that even supported? */
+-	loff_t offset = (loff_t)(uintptr_t)bh->b_private << inode->i_blkbits;
+-	int err;
+-	if (!uptodate)
++	unsigned long old_state;
++	unsigned long new_state;
++
++	flags &= EXT4_MAP_FLAGS;
++
++	/* Dummy buffer_head? Set non-atomically. */
++	if (!bh->b_page) {
++		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
+ 		return;
+-	WARN_ON(!buffer_unwritten(bh));
+-	err = ext4_convert_unwritten_extents(NULL, inode, offset, bh->b_size);
++	}
++	/*
++	 * Someone else may be modifying b_state. Be careful! This is ugly but
++	 * once we get rid of using bh as a container for mapping information
++	 * to pass to / from get_block functions, this can go away.
++	 */
++	do {
++		old_state = READ_ONCE(bh->b_state);
++		new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
++	} while (unlikely(
++		 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
+ }
+ 
+ /* Maximum number of blocks we map for direct IO at once. */
+@@ -704,11 +720,16 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
+ 		ext4_io_end_t *io_end = ext4_inode_aio(inode);
+ 
+ 		map_bh(bh, inode->i_sb, map.m_pblk);
+-		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+-		if (IS_DAX(inode) && buffer_unwritten(bh) && !io_end) {
++		ext4_update_bh_state(bh, map.m_flags);
++		if (IS_DAX(inode) && buffer_unwritten(bh)) {
++			/*
++			 * dgc: I suspect unwritten conversion on ext4+DAX is
++			 * fundamentally broken here when there are concurrent
++			 * read/write in progress on this inode.
++			 */
++			WARN_ON_ONCE(io_end);
+ 			bh->b_assoc_map = inode->i_mapping;
+ 			bh->b_private = (void *)(unsigned long)iblock;
+-			bh->b_end_io = ext4_end_io_unwritten;
+ 		}
+ 		if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
+ 			set_buffer_defer_completion(bh);
+@@ -1655,7 +1676,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
+ 		return ret;
+ 
+ 	map_bh(bh, inode->i_sb, map.m_pblk);
+-	bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
++	ext4_update_bh_state(bh, map.m_flags);
+ 
+ 	if (buffer_unwritten(bh)) {
+ 		/* A delayed write to unwritten bh should be marked
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 7da8ac1047f8..3fb92abe5707 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -393,6 +393,7 @@ data_copy:
+ 		*err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
+ 		if (*err < 0)
+ 			break;
++		bh = bh->b_this_page;
+ 	}
+ 	if (!*err)
+ 		*err = block_commit_write(pagep[0], from, from + replaced_size);
+diff --git a/fs/fhandle.c b/fs/fhandle.c
+index d59712dfa3e7..ca3c3dd01789 100644
+--- a/fs/fhandle.c
++++ b/fs/fhandle.c
+@@ -228,7 +228,7 @@ long do_handle_open(int mountdirfd,
+ 		path_put(&path);
+ 		return fd;
+ 	}
+-	file = file_open_root(path.dentry, path.mnt, "", open_flag);
++	file = file_open_root(path.dentry, path.mnt, "", open_flag, 0);
+ 	if (IS_ERR(file)) {
+ 		put_unused_fd(fd);
+ 		retval =  PTR_ERR(file);
+diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
+index e5bbf748b698..709a0d917b44 100644
+--- a/fs/fuse/cuse.c
++++ b/fs/fuse/cuse.c
+@@ -90,7 +90,7 @@ static struct list_head *cuse_conntbl_head(dev_t devt)
+ 
+ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
+ 	loff_t pos = 0;
+ 
+ 	return fuse_direct_io(&io, to, &pos, FUSE_DIO_CUSE);
+@@ -98,7 +98,7 @@ static ssize_t cuse_read_iter(struct kiocb *kiocb, struct iov_iter *to)
+ 
+ static ssize_t cuse_write_iter(struct kiocb *kiocb, struct iov_iter *from)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = kiocb->ki_filp };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(kiocb->ki_filp);
+ 	loff_t pos = 0;
+ 	/*
+ 	 * No locking or generic_write_checks(), the server is
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 5ef05b5c4cff..3227091c2a64 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -528,6 +528,11 @@ static void fuse_release_user_pages(struct fuse_req *req, int write)
+ 	}
+ }
+ 
++static void fuse_io_release(struct kref *kref)
++{
++	kfree(container_of(kref, struct fuse_io_priv, refcnt));
++}
++
+ static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
+ {
+ 	if (io->err)
+@@ -585,8 +590,9 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
+ 		}
+ 
+ 		io->iocb->ki_complete(io->iocb, res, 0);
+-		kfree(io);
+ 	}
++
++	kref_put(&io->refcnt, fuse_io_release);
+ }
+ 
+ static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
+@@ -613,6 +619,7 @@ static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
+ 		size_t num_bytes, struct fuse_io_priv *io)
+ {
+ 	spin_lock(&io->lock);
++	kref_get(&io->refcnt);
+ 	io->size += num_bytes;
+ 	io->reqs++;
+ 	spin_unlock(&io->lock);
+@@ -691,7 +698,7 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode,
+ 
+ static int fuse_do_readpage(struct file *file, struct page *page)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = file };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ 	struct inode *inode = page->mapping->host;
+ 	struct fuse_conn *fc = get_fuse_conn(inode);
+ 	struct fuse_req *req;
+@@ -984,7 +991,7 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
+ 	size_t res;
+ 	unsigned offset;
+ 	unsigned i;
+-	struct fuse_io_priv io = { .async = 0, .file = file };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ 
+ 	for (i = 0; i < req->num_pages; i++)
+ 		fuse_wait_on_page_writeback(inode, req->pages[i]->index);
+@@ -1049,6 +1056,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
+ 		tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
+ 		flush_dcache_page(page);
+ 
++		iov_iter_advance(ii, tmp);
+ 		if (!tmp) {
+ 			unlock_page(page);
+ 			page_cache_release(page);
+@@ -1061,7 +1069,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
+ 		req->page_descs[req->num_pages].length = tmp;
+ 		req->num_pages++;
+ 
+-		iov_iter_advance(ii, tmp);
+ 		count += tmp;
+ 		pos += tmp;
+ 		offset += tmp;
+@@ -1398,7 +1405,7 @@ static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
+ 
+ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
+ {
+-	struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb->ki_filp);
+ 	return __fuse_direct_read(&io, to, &iocb->ki_pos);
+ }
+ 
+@@ -1406,7 +1413,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
+ {
+ 	struct file *file = iocb->ki_filp;
+ 	struct inode *inode = file_inode(file);
+-	struct fuse_io_priv io = { .async = 0, .file = file };
++	struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(file);
+ 	ssize_t res;
+ 
+ 	if (is_bad_inode(inode))
+@@ -2786,6 +2793,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 	loff_t i_size;
+ 	size_t count = iov_iter_count(iter);
+ 	struct fuse_io_priv *io;
++	bool is_sync = is_sync_kiocb(iocb);
+ 
+ 	pos = offset;
+ 	inode = file->f_mapping->host;
+@@ -2806,6 +2814,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 	if (!io)
+ 		return -ENOMEM;
+ 	spin_lock_init(&io->lock);
++	kref_init(&io->refcnt);
+ 	io->reqs = 1;
+ 	io->bytes = -1;
+ 	io->size = 0;
+@@ -2825,12 +2834,18 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 	 * to wait on real async I/O requests, so we must submit this request
+ 	 * synchronously.
+ 	 */
+-	if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
++	if (!is_sync && (offset + count > i_size) &&
+ 	    iov_iter_rw(iter) == WRITE)
+ 		io->async = false;
+ 
+-	if (io->async && is_sync_kiocb(iocb))
++	if (io->async && is_sync) {
++		/*
++		 * Additional reference to keep io around after
++		 * calling fuse_aio_complete()
++		 */
++		kref_get(&io->refcnt);
+ 		io->done = &wait;
++	}
+ 
+ 	if (iov_iter_rw(iter) == WRITE) {
+ 		ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
+@@ -2843,14 +2858,14 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
+ 		fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
+ 
+ 		/* we have a non-extending, async request, so return */
+-		if (!is_sync_kiocb(iocb))
++		if (!is_sync)
+ 			return -EIOCBQUEUED;
+ 
+ 		wait_for_completion(&wait);
+ 		ret = fuse_get_res_by_io(io);
+ 	}
+ 
+-	kfree(io);
++	kref_put(&io->refcnt, fuse_io_release);
+ 
+ 	if (iov_iter_rw(iter) == WRITE) {
+ 		if (ret > 0)
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 7354dc142a50..85f9d8273455 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -22,6 +22,7 @@
+ #include <linux/rbtree.h>
+ #include <linux/poll.h>
+ #include <linux/workqueue.h>
++#include <linux/kref.h>
+ 
+ /** Max number of pages that can be used in a single read request */
+ #define FUSE_MAX_PAGES_PER_REQ 32
+@@ -253,6 +254,7 @@ enum fuse_req_state {
+ 
+ /** The request IO state (for asynchronous processing) */
+ struct fuse_io_priv {
++	struct kref refcnt;
+ 	int async;
+ 	spinlock_t lock;
+ 	unsigned reqs;
+@@ -266,6 +268,13 @@ struct fuse_io_priv {
+ 	struct completion *done;
+ };
+ 
++#define FUSE_IO_PRIV_SYNC(f) \
++{					\
++	.refcnt = { ATOMIC_INIT(1) },	\
++	.async = 0,			\
++	.file = f,			\
++}
++
+ /**
+  * A request to the client
+  */
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 0469f32918a5..e4d224315a1f 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1423,11 +1423,12 @@ out:
+ /**
+  * jbd2_mark_journal_empty() - Mark on disk journal as empty.
+  * @journal: The journal to update.
++ * @write_op: With which operation should we write the journal sb
+  *
+  * Update a journal's dynamic superblock fields to show that journal is empty.
+  * Write updated superblock to disk waiting for IO to complete.
+  */
+-static void jbd2_mark_journal_empty(journal_t *journal)
++static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
+ {
+ 	journal_superblock_t *sb = journal->j_superblock;
+ 
+@@ -1445,7 +1446,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
+ 	sb->s_start    = cpu_to_be32(0);
+ 	read_unlock(&journal->j_state_lock);
+ 
+-	jbd2_write_superblock(journal, WRITE_FUA);
++	jbd2_write_superblock(journal, write_op);
+ 
+ 	/* Log is no longer empty */
+ 	write_lock(&journal->j_state_lock);
+@@ -1730,7 +1731,13 @@ int jbd2_journal_destroy(journal_t *journal)
+ 	if (journal->j_sb_buffer) {
+ 		if (!is_journal_aborted(journal)) {
+ 			mutex_lock(&journal->j_checkpoint_mutex);
+-			jbd2_mark_journal_empty(journal);
++
++			write_lock(&journal->j_state_lock);
++			journal->j_tail_sequence =
++				++journal->j_transaction_sequence;
++			write_unlock(&journal->j_state_lock);
++
++			jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
+ 			mutex_unlock(&journal->j_checkpoint_mutex);
+ 		} else
+ 			err = -EIO;
+@@ -1989,7 +1996,7 @@ int jbd2_journal_flush(journal_t *journal)
+ 	 * the magic code for a fully-recovered superblock.  Any future
+ 	 * commits of data to the journal will restore the current
+ 	 * s_start value. */
+-	jbd2_mark_journal_empty(journal);
++	jbd2_mark_journal_empty(journal, WRITE_FUA);
+ 	mutex_unlock(&journal->j_checkpoint_mutex);
+ 	write_lock(&journal->j_state_lock);
+ 	J_ASSERT(!journal->j_running_transaction);
+@@ -2035,7 +2042,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
+ 	if (write) {
+ 		/* Lock to make assertions happy... */
+ 		mutex_lock(&journal->j_checkpoint_mutex);
+-		jbd2_mark_journal_empty(journal);
++		jbd2_mark_journal_empty(journal, WRITE_FUA);
+ 		mutex_unlock(&journal->j_checkpoint_mutex);
+ 	}
+ 
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 864e2003e8de..2b50bc0c545e 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -881,6 +881,7 @@ nfsd4_secinfo(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 				    &exp, &dentry);
+ 	if (err)
+ 		return err;
++	fh_unlock(&cstate->current_fh);
+ 	if (d_really_is_negative(dentry)) {
+ 		exp_put(exp);
+ 		err = nfserr_noent;
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 3dd1b616b92b..d8297542f8b3 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1071,8 +1071,9 @@ nfsd4_decode_rename(struct nfsd4_compoundargs *argp, struct nfsd4_rename *rename
+ 
+ 	READ_BUF(4);
+ 	rename->rn_snamelen = be32_to_cpup(p++);
+-	READ_BUF(rename->rn_snamelen + 4);
++	READ_BUF(rename->rn_snamelen);
+ 	SAVEMEM(rename->rn_sname, rename->rn_snamelen);
++	READ_BUF(4);
+ 	rename->rn_tnamelen = be32_to_cpup(p++);
+ 	READ_BUF(rename->rn_tnamelen);
+ 	SAVEMEM(rename->rn_tname, rename->rn_tnamelen);
+@@ -1154,13 +1155,14 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
+ 	READ_BUF(8);
+ 	setclientid->se_callback_prog = be32_to_cpup(p++);
+ 	setclientid->se_callback_netid_len = be32_to_cpup(p++);
+-
+-	READ_BUF(setclientid->se_callback_netid_len + 4);
++	READ_BUF(setclientid->se_callback_netid_len);
+ 	SAVEMEM(setclientid->se_callback_netid_val, setclientid->se_callback_netid_len);
++	READ_BUF(4);
+ 	setclientid->se_callback_addr_len = be32_to_cpup(p++);
+ 
+-	READ_BUF(setclientid->se_callback_addr_len + 4);
++	READ_BUF(setclientid->se_callback_addr_len);
+ 	SAVEMEM(setclientid->se_callback_addr_val, setclientid->se_callback_addr_len);
++	READ_BUF(4);
+ 	setclientid->se_callback_ident = be32_to_cpup(p++);
+ 
+ 	DECODE_TAIL;
+@@ -1814,8 +1816,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
+ 
+ 	READ_BUF(4);
+ 	argp->taglen = be32_to_cpup(p++);
+-	READ_BUF(argp->taglen + 8);
++	READ_BUF(argp->taglen);
+ 	SAVEMEM(argp->tag, argp->taglen);
++	READ_BUF(8);
+ 	argp->minorversion = be32_to_cpup(p++);
+ 	argp->opcnt = be32_to_cpup(p++);
+ 	max_reply += 4 + (XDR_QUADLEN(argp->taglen) << 2);
+diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
+index e36d63ff1783..f90931335c6b 100644
+--- a/fs/ocfs2/dlm/dlmconvert.c
++++ b/fs/ocfs2/dlm/dlmconvert.c
+@@ -262,6 +262,7 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 				  struct dlm_lock *lock, int flags, int type)
+ {
+ 	enum dlm_status status;
++	u8 old_owner = res->owner;
+ 
+ 	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+ 	     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+@@ -287,6 +288,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 		status = DLM_DENIED;
+ 		goto bail;
+ 	}
++
++	if (lock->ml.type == type && lock->ml.convert_type == LKM_IVMODE) {
++		mlog(0, "last convert request returned DLM_RECOVERING, but "
++		     "owner has already queued and sent ast to me. res %.*s, "
++		     "(cookie=%u:%llu, type=%d, conv=%d)\n",
++		     res->lockname.len, res->lockname.name,
++		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
++		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
++		     lock->ml.type, lock->ml.convert_type);
++		status = DLM_NORMAL;
++		goto bail;
++	}
++
+ 	res->state |= DLM_LOCK_RES_IN_PROGRESS;
+ 	/* move lock to local convert queue */
+ 	/* do not alter lock refcount.  switching lists. */
+@@ -316,11 +330,19 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 	spin_lock(&res->spinlock);
+ 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+ 	lock->convert_pending = 0;
+-	/* if it failed, move it back to granted queue */
++	/* if it failed, move it back to granted queue.
++	 * if master returns DLM_NORMAL and then down before sending ast,
++	 * it may have already been moved to granted queue, reset to
++	 * DLM_RECOVERING and retry convert */
+ 	if (status != DLM_NORMAL) {
+ 		if (status != DLM_NOTQUEUED)
+ 			dlm_error(status);
+ 		dlm_revert_pending_convert(res, lock);
++	} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
++			(old_owner != res->owner)) {
++		mlog(0, "res %.*s is in recovering or has been recovered.\n",
++				res->lockname.len, res->lockname.name);
++		status = DLM_RECOVERING;
+ 	}
+ bail:
+ 	spin_unlock(&res->spinlock);
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index f25ff5d3a2f9..dad6d841f0da 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -2064,7 +2064,6 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
+ 			dlm_lock_get(lock);
+ 			if (lock->convert_pending) {
+ 				/* move converting lock back to granted */
+-				BUG_ON(i != DLM_CONVERTING_LIST);
+ 				mlog(0, "node died with convert pending "
+ 				     "on %.*s. move back to granted list.\n",
+ 				     res->lockname.len, res->lockname.name);
+diff --git a/fs/open.c b/fs/open.c
+index f9d2bf935099..ff80b2542989 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -987,14 +987,12 @@ struct file *filp_open(const char *filename, int flags, umode_t mode)
+ EXPORT_SYMBOL(filp_open);
+ 
+ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
+-			    const char *filename, int flags)
++			    const char *filename, int flags, umode_t mode)
+ {
+ 	struct open_flags op;
+-	int err = build_open_flags(flags, 0, &op);
++	int err = build_open_flags(flags, mode, &op);
+ 	if (err)
+ 		return ERR_PTR(err);
+-	if (flags & O_CREAT)
+-		return ERR_PTR(-EINVAL);
+ 	return do_file_open_root(dentry, mnt, filename, &op);
+ }
+ EXPORT_SYMBOL(file_open_root);
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index e505b44a9184..edd2a4a5fd3c 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -62,15 +62,15 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (err)
+ 		goto out;
+ 
+-	upperdentry = ovl_dentry_upper(dentry);
+-	if (upperdentry) {
++	err = ovl_copy_up(dentry);
++	if (!err) {
++		upperdentry = ovl_dentry_upper(dentry);
++
+ 		mutex_lock(&upperdentry->d_inode->i_mutex);
+ 		err = notify_change(upperdentry, attr, NULL);
+ 		if (!err)
+ 			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
+ 		mutex_unlock(&upperdentry->d_inode->i_mutex);
+-	} else {
+-		err = ovl_copy_up_last(dentry, attr, false);
+ 	}
+ 	ovl_drop_write(dentry);
+ out:
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 70f9c4cba31f..dca04edb6b90 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -384,7 +384,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 
+ 	state = *get_task_state(task);
+ 	vsize = eip = esp = 0;
+-	permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
++	permitted = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS | PTRACE_MODE_NOAUDIT);
+ 	mm = get_task_mm(task);
+ 	if (mm) {
+ 		vsize = task_vsize(mm);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index fcdeb1eb3921..68d51ed1666f 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -211,7 +211,7 @@ static int proc_pid_cmdline(struct seq_file *m, struct pid_namespace *ns,
+ static int proc_pid_auxv(struct seq_file *m, struct pid_namespace *ns,
+ 			 struct pid *pid, struct task_struct *task)
+ {
+-	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
++	struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
+ 	if (mm && !IS_ERR(mm)) {
+ 		unsigned int nwords = 0;
+ 		do {
+@@ -238,7 +238,8 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
+ 
+ 	wchan = get_wchan(task);
+ 
+-	if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
++	if (wchan && ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)
++			&& !lookup_symbol_name(wchan, symname))
+ 		seq_printf(m, "%s", symname);
+ 	else
+ 		seq_putc(m, '0');
+@@ -252,7 +253,7 @@ static int lock_trace(struct task_struct *task)
+ 	int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
+ 	if (err)
+ 		return err;
+-	if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
++	if (!ptrace_may_access(task, PTRACE_MODE_ATTACH_FSCREDS)) {
+ 		mutex_unlock(&task->signal->cred_guard_mutex);
+ 		return -EPERM;
+ 	}
+@@ -502,7 +503,7 @@ static int proc_fd_access_allowed(struct inode *inode)
+ 	 */
+ 	task = get_proc_task(inode);
+ 	if (task) {
+-		allowed = ptrace_may_access(task, PTRACE_MODE_READ);
++		allowed = ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
+ 		put_task_struct(task);
+ 	}
+ 	return allowed;
+@@ -537,7 +538,7 @@ static bool has_pid_permissions(struct pid_namespace *pid,
+ 		return true;
+ 	if (in_group_p(pid->pid_gid))
+ 		return true;
+-	return ptrace_may_access(task, PTRACE_MODE_READ);
++	return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
+ }
+ 
+ 
+@@ -614,7 +615,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
+ 	struct mm_struct *mm = ERR_PTR(-ESRCH);
+ 
+ 	if (task) {
+-		mm = mm_access(task, mode);
++		mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
+ 		put_task_struct(task);
+ 
+ 		if (!IS_ERR_OR_NULL(mm)) {
+@@ -1676,7 +1677,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
+ 	if (!task)
+ 		goto out_notask;
+ 
+-	mm = mm_access(task, PTRACE_MODE_READ);
++	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
+ 	if (IS_ERR_OR_NULL(mm))
+ 		goto out;
+ 
+@@ -1808,7 +1809,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
+ 		goto out;
+ 
+ 	result = -EACCES;
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
++	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+ 		goto out_put_task;
+ 
+ 	result = -ENOENT;
+@@ -1865,7 +1866,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
+ 		goto out;
+ 
+ 	ret = -EACCES;
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
++	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS))
+ 		goto out_put_task;
+ 
+ 	ret = 0;
+@@ -2345,7 +2346,7 @@ static int do_io_accounting(struct task_struct *task, struct seq_file *m, int wh
+ 	if (result)
+ 		return result;
+ 
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
++	if (!ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
+ 		result = -EACCES;
+ 		goto out_unlock;
+ 	}
+diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
+index e512642dbbdc..1c70dec8809d 100644
+--- a/fs/proc/namespaces.c
++++ b/fs/proc/namespaces.c
+@@ -42,7 +42,7 @@ static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd)
+ 	if (!task)
+ 		return error;
+ 
+-	if (ptrace_may_access(task, PTRACE_MODE_READ)) {
++	if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
+ 		error = ns_get_path(&ns_path, task, ns_ops);
+ 		if (!error)
+ 			nd_jump_link(nd, &ns_path);
+@@ -63,7 +63,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
+ 	if (!task)
+ 		return res;
+ 
+-	if (ptrace_may_access(task, PTRACE_MODE_READ)) {
++	if (ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS)) {
+ 		res = ns_get_name(name, sizeof(name), task, ns_ops);
+ 		if (res >= 0)
+ 			res = readlink_copy(buffer, buflen, name);
+diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
+index 8db932da4009..a5644c41a182 100644
+--- a/fs/proc_namespace.c
++++ b/fs/proc_namespace.c
+@@ -196,6 +196,8 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
+ 	if (sb->s_op->show_devname) {
+ 		seq_puts(m, "device ");
+ 		err = sb->s_op->show_devname(m, mnt_path.dentry);
++		if (err)
++			goto out;
+ 	} else {
+ 		if (r->mnt_devname) {
+ 			seq_puts(m, "device ");
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 20d1f74561cf..19c777ad0084 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -1393,7 +1393,7 @@ static int dquot_active(const struct inode *inode)
+ static void __dquot_initialize(struct inode *inode, int type)
+ {
+ 	int cnt, init_needed = 0;
+-	struct dquot **dquots, *got[MAXQUOTAS];
++	struct dquot **dquots, *got[MAXQUOTAS] = {};
+ 	struct super_block *sb = inode->i_sb;
+ 	qsize_t rsv;
+ 
+@@ -1408,7 +1408,6 @@ static void __dquot_initialize(struct inode *inode, int type)
+ 		kprojid_t projid;
+ 		int rc;
+ 
+-		got[cnt] = NULL;
+ 		if (type != -1 && cnt != type)
+ 			continue;
+ 		/*
+diff --git a/fs/splice.c b/fs/splice.c
+index bfe62ae40f40..e7522c486068 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -185,6 +185,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ 	unsigned int spd_pages = spd->nr_pages;
+ 	int ret, do_wakeup, page_nr;
+ 
++	if (!spd_pages)
++		return 0;
++
+ 	ret = 0;
+ 	do_wakeup = 0;
+ 	page_nr = 0;
+diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
+index 65fb37a18e92..8f7e09d0d0f0 100644
+--- a/fs/xfs/xfs_attr_list.c
++++ b/fs/xfs/xfs_attr_list.c
+@@ -202,8 +202,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
+ 					sbp->namelen,
+ 					sbp->valuelen,
+ 					&sbp->name[sbp->namelen]);
+-		if (error)
++		if (error) {
++			kmem_free(sbuf);
+ 			return error;
++		}
+ 		if (context->seen_enough)
+ 			break;
+ 		cursor->offset++;
+@@ -454,14 +456,13 @@ xfs_attr3_leaf_list_int(
+ 				args.rmtblkcnt = xfs_attr3_rmt_blocks(
+ 							args.dp->i_mount, valuelen);
+ 				retval = xfs_attr_rmtval_get(&args);
+-				if (retval)
+-					return retval;
+-				retval = context->put_listent(context,
+-						entry->flags,
+-						name_rmt->name,
+-						(int)name_rmt->namelen,
+-						valuelen,
+-						args.value);
++				if (!retval)
++					retval = context->put_listent(context,
++							entry->flags,
++							name_rmt->name,
++							(int)name_rmt->namelen,
++							valuelen,
++							args.value);
+ 				kmem_free(args.value);
+ 			} else {
+ 				retval = context->put_listent(context,
+diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
+index c30266e94806..8ef0ccbf8167 100644
+--- a/include/asm-generic/bitops/lock.h
++++ b/include/asm-generic/bitops/lock.h
+@@ -29,16 +29,16 @@ do {					\
+  * @nr: the bit to set
+  * @addr: the address to start counting from
+  *
+- * This operation is like clear_bit_unlock, however it is not atomic.
+- * It does provide release barrier semantics so it can be used to unlock
+- * a bit lock, however it would only be used if no other CPU can modify
+- * any bits in the memory until the lock is released (a good example is
+- * if the bit lock itself protects access to the other bits in the word).
++ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
++ * the bits in the word are protected by this lock some archs can use weaker
++ * ops to safely unlock.
++ *
++ * See for example x86's implementation.
+  */
+ #define __clear_bit_unlock(nr, addr)	\
+ do {					\
+-	smp_mb();			\
+-	__clear_bit(nr, addr);		\
++	smp_mb__before_atomic();	\
++	clear_bit(nr, addr);		\
+ } while (0)
+ 
+ #endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
+index c7f01d1aa562..653acf4cc44a 100644
+--- a/include/drm/drm_dp_mst_helper.h
++++ b/include/drm/drm_dp_mst_helper.h
+@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
+ /**
+  * struct drm_dp_mst_port - MST port
+  * @kref: reference count for this port.
+- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
+- * @guid: guid for DP 1.2 device on this port.
+  * @port_num: port number
+  * @input: if this port is an input port.
+  * @mcs: message capability status - DP 1.2 spec.
+@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
+ struct drm_dp_mst_port {
+ 	struct kref kref;
+ 
+-	/* if dpcd 1.2 device is on this port - its GUID info */
+-	bool guid_valid;
+-	u8 guid[16];
+-
+ 	u8 port_num;
+ 	bool input;
+ 	bool mcs;
+@@ -109,10 +103,12 @@ struct drm_dp_mst_port {
+  * @tx_slots: transmission slots for this device.
+  * @last_seqno: last sequence number used to talk to this.
+  * @link_address_sent: if a link address message has been sent to this device yet.
++ * @guid: guid for DP 1.2 branch device. port under this branch can be
++ * identified by port #.
+  *
+  * This structure represents an MST branch device, there is one
+- * primary branch device at the root, along with any others connected
+- * to downstream ports
++ * primary branch device at the root, along with any other branches connected
++ * to downstream port of parent branches.
+  */
+ struct drm_dp_mst_branch {
+ 	struct kref kref;
+@@ -131,6 +127,9 @@ struct drm_dp_mst_branch {
+ 	struct drm_dp_sideband_msg_tx *tx_slots[2];
+ 	int last_seqno;
+ 	bool link_address_sent;
++
++	/* global unique identifier to identify branch devices */
++	u8 guid[16];
+ };
+ 
+ 
+@@ -404,11 +403,9 @@ struct drm_dp_payload {
+  * @conn_base_id: DRM connector ID this mgr is connected to.
+  * @down_rep_recv: msg receiver state for down replies.
+  * @up_req_recv: msg receiver state for up requests.
+- * @lock: protects mst state, primary, guid, dpcd.
++ * @lock: protects mst state, primary, dpcd.
+  * @mst_state: if this manager is enabled for an MST capable port.
+  * @mst_primary: pointer to the primary branch device.
+- * @guid_valid: GUID valid for the primary branch device.
+- * @guid: GUID for primary port.
+  * @dpcd: cache of DPCD for primary port.
+  * @pbn_div: PBN to slots divisor.
+  *
+@@ -430,13 +427,11 @@ struct drm_dp_mst_topology_mgr {
+ 	struct drm_dp_sideband_msg_rx up_req_recv;
+ 
+ 	/* pointer to info about the initial MST device */
+-	struct mutex lock; /* protects mst_state + primary + guid + dpcd */
++	struct mutex lock; /* protects mst_state + primary + dpcd */
+ 
+ 	bool mst_state;
+ 	struct drm_dp_mst_branch *mst_primary;
+-	/* primary MST device GUID */
+-	bool guid_valid;
+-	u8 guid[16];
++
+ 	u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ 	u8 sink_count;
+ 	int pbn_div;
+diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
+index 9177947bf032..e753062b9355 100644
+--- a/include/linux/atmel-mci.h
++++ b/include/linux/atmel-mci.h
+@@ -2,6 +2,7 @@
+ #define __LINUX_ATMEL_MCI_H
+ 
+ #include <linux/types.h>
++#include <linux/dmaengine.h>
+ 
+ #define ATMCI_MAX_NR_SLOTS	2
+ 
+@@ -37,6 +38,7 @@ struct mci_slot_pdata {
+  */
+ struct mci_platform_data {
+ 	struct mci_dma_data	*dma_slave;
++	dma_filter_fn		dma_filter;
+ 	struct mci_slot_pdata	slot[ATMCI_MAX_NR_SLOTS];
+ };
+ 
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index af5be0368dec..7f764000dab9 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1162,7 +1162,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
+ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
+ 				       struct list_head *head, bool remove);
+ 
+-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
++bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
++		     unsigned long data_size);
++bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
++				  size_t len);
+ 
+ extern struct work_struct efivar_work;
+ void efivar_run_worker(void);
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index fdc369fa69e8..ae327f6a53f6 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -70,6 +70,7 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock,
+ 			struct buffer_head *bh_result, int create);
+ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+ 			ssize_t bytes, void *private);
++typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
+ 
+ #define MAY_EXEC		0x00000001
+ #define MAY_WRITE		0x00000002
+@@ -2201,7 +2202,7 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
+ extern struct file *file_open_name(struct filename *, int, umode_t);
+ extern struct file *filp_open(const char *, int, umode_t);
+ extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+-				   const char *, int);
++				   const char *, int, umode_t);
+ extern struct file * dentry_open(const struct path *, int, const struct cred *);
+ extern int filp_close(struct file *, fl_owner_t id);
+ 
+@@ -2635,9 +2636,10 @@ ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
+ int dax_clear_blocks(struct inode *, sector_t block, long size);
+ int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
+ int dax_truncate_page(struct inode *, loff_t from, get_block_t);
+-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
++int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
++		dax_iodone_t);
+ int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
+-#define dax_mkwrite(vma, vmf, gb)	dax_fault(vma, vmf, gb)
++#define dax_mkwrite(vma, vmf, gb, iod)	dax_fault(vma, vmf, gb, iod)
+ 
+ #ifdef CONFIG_BLOCK
+ typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode,
+diff --git a/include/linux/kernel.h b/include/linux/kernel.h
+index 3a5b48e52a9e..d837f2a41665 100644
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -606,7 +606,7 @@ do {							\
+ 
+ #define do_trace_printk(fmt, args...)					\
+ do {									\
+-	static const char *trace_printk_fmt				\
++	static const char *trace_printk_fmt __used			\
+ 		__attribute__((section("__trace_printk_fmt"))) =	\
+ 		__builtin_constant_p(fmt) ? fmt : NULL;			\
+ 									\
+@@ -650,7 +650,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
+  */
+ 
+ #define trace_puts(str) ({						\
+-	static const char *trace_printk_fmt				\
++	static const char *trace_printk_fmt __used			\
+ 		__attribute__((section("__trace_printk_fmt"))) =	\
+ 		__builtin_constant_p(str) ? str : NULL;			\
+ 									\
+@@ -672,7 +672,7 @@ extern void trace_dump_stack(int skip);
+ #define ftrace_vprintk(fmt, vargs)					\
+ do {									\
+ 	if (__builtin_constant_p(fmt)) {				\
+-		static const char *trace_printk_fmt			\
++		static const char *trace_printk_fmt __used		\
+ 		  __attribute__((section("__trace_printk_fmt"))) =	\
+ 			__builtin_constant_p(fmt) ? fmt : NULL;		\
+ 									\
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index b2085582d44e..6b85ec64d302 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -588,7 +588,7 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+ 	return page[1].compound_dtor;
+ }
+ 
+-static inline int compound_order(struct page *page)
++static inline unsigned int compound_order(struct page *page)
+ {
+ 	if (!PageHead(page))
+ 		return 0;
+@@ -1771,7 +1771,8 @@ extern void si_meminfo(struct sysinfo * val);
+ extern void si_meminfo_node(struct sysinfo *val, int nid);
+ 
+ extern __printf(3, 4)
+-void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
++void warn_alloc_failed(gfp_t gfp_mask, unsigned int order,
++		const char *fmt, ...);
+ 
+ extern void setup_per_cpu_pageset(void);
+ 
+diff --git a/include/linux/module.h b/include/linux/module.h
+index c883b86ea964..b2da02e1591d 100644
+--- a/include/linux/module.h
++++ b/include/linux/module.h
+@@ -210,6 +210,12 @@ enum module_state {
+ 	MODULE_STATE_UNFORMED,	/* Still setting it up. */
+ };
+ 
++struct mod_kallsyms {
++	Elf_Sym *symtab;
++	unsigned int num_symtab;
++	char *strtab;
++};
++
+ struct module {
+ 	enum module_state state;
+ 
+@@ -297,14 +303,9 @@ struct module {
+ #endif
+ 
+ #ifdef CONFIG_KALLSYMS
+-	/*
+-	 * We keep the symbol and string tables for kallsyms.
+-	 * The core_* fields below are temporary, loader-only (they
+-	 * could really be discarded after module init).
+-	 */
+-	Elf_Sym *symtab, *core_symtab;
+-	unsigned int num_symtab, core_num_syms;
+-	char *strtab, *core_strtab;
++	/* Protected by RCU and/or module_mutex: use rcu_dereference() */
++	struct mod_kallsyms *kallsyms;
++	struct mod_kallsyms core_kallsyms;
+ 
+ 	/* Section attributes */
+ 	struct module_sect_attrs *sect_attrs;
+diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
+index 2baeee12f48e..e942558b3585 100644
+--- a/include/linux/pageblock-flags.h
++++ b/include/linux/pageblock-flags.h
+@@ -44,7 +44,7 @@ enum pageblock_bits {
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+ 
+ /* Huge page sizes are variable */
+-extern int pageblock_order;
++extern unsigned int pageblock_order;
+ 
+ #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
+ 
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 6e935e5eab56..109ccee9e3e6 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -357,6 +357,8 @@ struct pci_dev {
+ 	unsigned int	broken_intx_masking:1;
+ 	unsigned int	io_window_1k:1;	/* Intel P2P bridge 1K I/O windows */
+ 	unsigned int	irq_managed:1;
++	unsigned int	has_secondary_link:1;
++	unsigned int	non_compliant_bars:1;	/* broken BARs; ignore them */
+ 	pci_dev_flags_t dev_flags;
+ 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
+ 
+diff --git a/include/linux/poison.h b/include/linux/poison.h
+index 2110a81c5e2a..253c9b4198ef 100644
+--- a/include/linux/poison.h
++++ b/include/linux/poison.h
+@@ -19,8 +19,8 @@
+  * under normal circumstances, used to verify that nobody uses
+  * non-initialized list entries.
+  */
+-#define LIST_POISON1  ((void *) 0x00100100 + POISON_POINTER_DELTA)
+-#define LIST_POISON2  ((void *) 0x00200200 + POISON_POINTER_DELTA)
++#define LIST_POISON1  ((void *) 0x100 + POISON_POINTER_DELTA)
++#define LIST_POISON2  ((void *) 0x200 + POISON_POINTER_DELTA)
+ 
+ /********** include/linux/timer.h **********/
+ /*
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index 987a73a40ef8..998c098dd172 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -56,7 +56,29 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
+ #define PTRACE_MODE_READ	0x01
+ #define PTRACE_MODE_ATTACH	0x02
+ #define PTRACE_MODE_NOAUDIT	0x04
+-/* Returns true on success, false on denial. */
++#define PTRACE_MODE_FSCREDS 0x08
++#define PTRACE_MODE_REALCREDS 0x10
++
++/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
++#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
++#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
++#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
++#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
++
++/**
++ * ptrace_may_access - check whether the caller is permitted to access
++ * a target task.
++ * @task: target task
++ * @mode: selects type of access and caller credentials
++ *
++ * Returns true on success, false on denial.
++ *
++ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
++ * be set in @mode to specify whether the access was requested through
++ * a filesystem syscall (should use effective capabilities and fsuid
++ * of the caller) or through an explicit syscall such as
++ * process_vm_writev or ptrace (and should use the real credentials).
++ */
+ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
+ 
+ static inline int ptrace_reparented(struct task_struct *child)
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 2e7d0f7a0ecc..63830c4a49e6 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -146,6 +146,7 @@ struct thermal_attr {
+  * @trip_hyst_attrs:	attributes for trip points for sysfs: trip hysteresis
+  * @devdata:	private pointer for device private data
+  * @trips:	number of trip points the thermal zone supports
++ * @trips_disabled;	bitmap for disabled trips
+  * @passive_delay:	number of milliseconds to wait between polls when
+  *			performing passive cooling.  Currenty only used by the
+  *			step-wise governor
+@@ -182,6 +183,7 @@ struct thermal_zone_device {
+ 	struct thermal_attr *trip_hyst_attrs;
+ 	void *devdata;
+ 	int trips;
++	unsigned long trips_disabled;	/* bitmap for disabled trips */
+ 	int passive_delay;
+ 	int polling_delay;
+ 	int temperature;
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 9580c09afdbe..5e704e26f9a2 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -592,7 +592,7 @@ static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+ 		count = ld->ops->receive_buf2(ld->tty, p, f, count);
+ 	else {
+ 		count = min_t(int, count, ld->tty->receive_room);
+-		if (count)
++		if (count && ld->ops->receive_buf)
+ 			ld->ops->receive_buf(ld->tty, p, f, count);
+ 	}
+ 	return count;
+diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
+index cbb20afdbc01..bb679b48f408 100644
+--- a/include/linux/ucs2_string.h
++++ b/include/linux/ucs2_string.h
+@@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
+ unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
+ int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+ 
++unsigned long ucs2_utf8size(const ucs2_char_t *src);
++unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
++			   unsigned long maxlength);
++
+ #endif /* _LINUX_UCS2_STRING_H_ */
+diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
+index 2a8aa9dfb83d..3b78437d0c4c 100644
+--- a/include/sound/hdaudio.h
++++ b/include/sound/hdaudio.h
+@@ -137,15 +137,15 @@ static inline int snd_hdac_read_parm(struct hdac_device *codec, hda_nid_t nid,
+ }
+ 
+ #ifdef CONFIG_PM
+-void snd_hdac_power_up(struct hdac_device *codec);
+-void snd_hdac_power_down(struct hdac_device *codec);
+-void snd_hdac_power_up_pm(struct hdac_device *codec);
+-void snd_hdac_power_down_pm(struct hdac_device *codec);
++int snd_hdac_power_up(struct hdac_device *codec);
++int snd_hdac_power_down(struct hdac_device *codec);
++int snd_hdac_power_up_pm(struct hdac_device *codec);
++int snd_hdac_power_down_pm(struct hdac_device *codec);
+ #else
+-static inline void snd_hdac_power_up(struct hdac_device *codec) {}
+-static inline void snd_hdac_power_down(struct hdac_device *codec) {}
+-static inline void snd_hdac_power_up_pm(struct hdac_device *codec) {}
+-static inline void snd_hdac_power_down_pm(struct hdac_device *codec) {}
++static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
++static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
++static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
++static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
+ #endif
+ 
+ /*
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 66e6568a4736..6da64f0d0630 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3314,7 +3314,7 @@ find_lively_task_by_vpid(pid_t vpid)
+ 
+ 	/* Reuse ptrace permission checks for now. */
+ 	err = -EACCES;
+-	if (!ptrace_may_access(task, PTRACE_MODE_READ))
++	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+ 		goto errout;
+ 
+ 	return task;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index f3043db6d36f..b75fbddacf0e 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2763,7 +2763,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
+ 	}
+ 
+ 	ret = -EPERM;
+-	if (!ptrace_may_access(p, PTRACE_MODE_READ))
++	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
+ 		goto err_unlock;
+ 
+ 	head = p->robust_list;
+diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
+index 55c8c9349cfe..4ae3232e7a28 100644
+--- a/kernel/futex_compat.c
++++ b/kernel/futex_compat.c
+@@ -155,7 +155,7 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
+ 	}
+ 
+ 	ret = -EPERM;
+-	if (!ptrace_may_access(p, PTRACE_MODE_READ))
++	if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS))
+ 		goto err_unlock;
+ 
+ 	head = p->compat_robust_list;
+diff --git a/kernel/kcmp.c b/kernel/kcmp.c
+index 0aa69ea1d8fd..3a47fa998fe0 100644
+--- a/kernel/kcmp.c
++++ b/kernel/kcmp.c
+@@ -122,8 +122,8 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
+ 			&task2->signal->cred_guard_mutex);
+ 	if (ret)
+ 		goto err;
+-	if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
+-	    !ptrace_may_access(task2, PTRACE_MODE_READ)) {
++	if (!ptrace_may_access(task1, PTRACE_MODE_READ_REALCREDS) ||
++	    !ptrace_may_access(task2, PTRACE_MODE_READ_REALCREDS)) {
+ 		ret = -EPERM;
+ 		goto err_unlock;
+ 	}
+diff --git a/kernel/module.c b/kernel/module.c
+index 3b9ff966edb9..be8971d817ed 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -178,6 +178,9 @@ struct load_info {
+ 	struct _ddebug *debug;
+ 	unsigned int num_debug;
+ 	bool sig_ok;
++#ifdef CONFIG_KALLSYMS
++	unsigned long mod_kallsyms_init_off;
++#endif
+ 	struct {
+ 		unsigned int sym, str, mod, vers, info, pcpu;
+ 	} index;
+@@ -2321,10 +2324,21 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+ 	strsect->sh_flags |= SHF_ALLOC;
+ 	strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
+ 					 info->index.str) | INIT_OFFSET_MASK;
+-	mod->init_size = debug_align(mod->init_size);
+ 	pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
++
++	/* We'll tack temporary mod_kallsyms on the end. */
++	mod->init_size = ALIGN(mod->init_size,
++			       __alignof__(struct mod_kallsyms));
++	info->mod_kallsyms_init_off = mod->init_size;
++	mod->init_size += sizeof(struct mod_kallsyms);
++	mod->init_size = debug_align(mod->init_size);
+ }
+ 
++/*
++ * We use the full symtab and strtab which layout_symtab arranged to
++ * be appended to the init section.  Later we switch to the cut-down
++ * core-only ones.
++ */
+ static void add_kallsyms(struct module *mod, const struct load_info *info)
+ {
+ 	unsigned int i, ndst;
+@@ -2333,28 +2347,33 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+ 	char *s;
+ 	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
+ 
+-	mod->symtab = (void *)symsec->sh_addr;
+-	mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
++	/* Set up to point into init section. */
++	mod->kallsyms = mod->module_init + info->mod_kallsyms_init_off;
++
++	mod->kallsyms->symtab = (void *)symsec->sh_addr;
++	mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+ 	/* Make sure we get permanent strtab: don't use info->strtab. */
+-	mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
++	mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+ 
+ 	/* Set types up while we still have access to sections. */
+-	for (i = 0; i < mod->num_symtab; i++)
+-		mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
+-
+-	mod->core_symtab = dst = mod->module_core + info->symoffs;
+-	mod->core_strtab = s = mod->module_core + info->stroffs;
+-	src = mod->symtab;
+-	for (ndst = i = 0; i < mod->num_symtab; i++) {
++	for (i = 0; i < mod->kallsyms->num_symtab; i++)
++		mod->kallsyms->symtab[i].st_info
++			= elf_type(&mod->kallsyms->symtab[i], info);
++
++	/* Now populate the cut down core kallsyms for after init. */
++	mod->core_kallsyms.symtab = dst = mod->module_core + info->symoffs;
++	mod->core_kallsyms.strtab = s = mod->module_core + info->stroffs;
++	src = mod->kallsyms->symtab;
++	for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
+ 		if (i == 0 ||
+ 		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
+ 			dst[ndst] = src[i];
+-			dst[ndst++].st_name = s - mod->core_strtab;
+-			s += strlcpy(s, &mod->strtab[src[i].st_name],
++			dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
++			s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
+ 				     KSYM_NAME_LEN) + 1;
+ 		}
+ 	}
+-	mod->core_num_syms = ndst;
++	mod->core_kallsyms.num_symtab = ndst;
+ }
+ #else
+ static inline void layout_symtab(struct module *mod, struct load_info *info)
+@@ -3119,9 +3138,8 @@ static noinline int do_init_module(struct module *mod)
+ 	module_put(mod);
+ 	trim_init_extable(mod);
+ #ifdef CONFIG_KALLSYMS
+-	mod->num_symtab = mod->core_num_syms;
+-	mod->symtab = mod->core_symtab;
+-	mod->strtab = mod->core_strtab;
++	/* Switch to core kallsyms now init is done: kallsyms may be walking! */
++	rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
+ #endif
+ 	unset_module_init_ro_nx(mod);
+ 	module_arch_freeing_init(mod);
+@@ -3469,6 +3487,11 @@ static inline int is_arm_mapping_symbol(const char *str)
+ 	       && (str[2] == '\0' || str[2] == '.');
+ }
+ 
++static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
++{
++	return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
++}
++
+ static const char *get_ksymbol(struct module *mod,
+ 			       unsigned long addr,
+ 			       unsigned long *size,
+@@ -3476,6 +3499,7 @@ static const char *get_ksymbol(struct module *mod,
+ {
+ 	unsigned int i, best = 0;
+ 	unsigned long nextval;
++	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
+ 
+ 	/* At worse, next value is at end of module */
+ 	if (within_module_init(addr, mod))
+@@ -3485,32 +3509,32 @@ static const char *get_ksymbol(struct module *mod,
+ 
+ 	/* Scan for closest preceding symbol, and next symbol. (ELF
+ 	   starts real symbols at 1). */
+-	for (i = 1; i < mod->num_symtab; i++) {
+-		if (mod->symtab[i].st_shndx == SHN_UNDEF)
++	for (i = 1; i < kallsyms->num_symtab; i++) {
++		if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+ 			continue;
+ 
+ 		/* We ignore unnamed symbols: they're uninformative
+ 		 * and inserted at a whim. */
+-		if (mod->symtab[i].st_value <= addr
+-		    && mod->symtab[i].st_value > mod->symtab[best].st_value
+-		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
+-		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
++		if (*symname(kallsyms, i) == '\0'
++		    || is_arm_mapping_symbol(symname(kallsyms, i)))
++			continue;
++
++		if (kallsyms->symtab[i].st_value <= addr
++		    && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
+ 			best = i;
+-		if (mod->symtab[i].st_value > addr
+-		    && mod->symtab[i].st_value < nextval
+-		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
+-		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
+-			nextval = mod->symtab[i].st_value;
++		if (kallsyms->symtab[i].st_value > addr
++		    && kallsyms->symtab[i].st_value < nextval)
++			nextval = kallsyms->symtab[i].st_value;
+ 	}
+ 
+ 	if (!best)
+ 		return NULL;
+ 
+ 	if (size)
+-		*size = nextval - mod->symtab[best].st_value;
++		*size = nextval - kallsyms->symtab[best].st_value;
+ 	if (offset)
+-		*offset = addr - mod->symtab[best].st_value;
+-	return mod->strtab + mod->symtab[best].st_name;
++		*offset = addr - kallsyms->symtab[best].st_value;
++	return symname(kallsyms, best);
+ }
+ 
+ /* For kallsyms to ask for address resolution.  NULL means not found.  Careful
+@@ -3603,19 +3627,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ 
+ 	preempt_disable();
+ 	list_for_each_entry_rcu(mod, &modules, list) {
++		struct mod_kallsyms *kallsyms;
++
+ 		if (mod->state == MODULE_STATE_UNFORMED)
+ 			continue;
+-		if (symnum < mod->num_symtab) {
+-			*value = mod->symtab[symnum].st_value;
+-			*type = mod->symtab[symnum].st_info;
+-			strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
+-				KSYM_NAME_LEN);
++		kallsyms = rcu_dereference_sched(mod->kallsyms);
++		if (symnum < kallsyms->num_symtab) {
++			*value = kallsyms->symtab[symnum].st_value;
++			*type = kallsyms->symtab[symnum].st_info;
++			strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
+ 			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
+ 			*exported = is_exported(name, *value, mod);
+ 			preempt_enable();
+ 			return 0;
+ 		}
+-		symnum -= mod->num_symtab;
++		symnum -= kallsyms->num_symtab;
+ 	}
+ 	preempt_enable();
+ 	return -ERANGE;
+@@ -3624,11 +3650,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
+ static unsigned long mod_find_symname(struct module *mod, const char *name)
+ {
+ 	unsigned int i;
++	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
+ 
+-	for (i = 0; i < mod->num_symtab; i++)
+-		if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
+-		    mod->symtab[i].st_info != 'U')
+-			return mod->symtab[i].st_value;
++	for (i = 0; i < kallsyms->num_symtab; i++)
++		if (strcmp(name, symname(kallsyms, i)) == 0 &&
++		    kallsyms->symtab[i].st_info != 'U')
++			return kallsyms->symtab[i].st_value;
+ 	return 0;
+ }
+ 
+@@ -3665,11 +3692,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
+ 	int ret;
+ 
+ 	list_for_each_entry(mod, &modules, list) {
++		/* We hold module_mutex: no need for rcu_dereference_sched */
++		struct mod_kallsyms *kallsyms = mod->kallsyms;
++
+ 		if (mod->state == MODULE_STATE_UNFORMED)
+ 			continue;
+-		for (i = 0; i < mod->num_symtab; i++) {
+-			ret = fn(data, mod->strtab + mod->symtab[i].st_name,
+-				 mod, mod->symtab[i].st_value);
++		for (i = 0; i < kallsyms->num_symtab; i++) {
++			ret = fn(data, symname(kallsyms, i),
++				 mod, kallsyms->symtab[i].st_value);
+ 			if (ret != 0)
+ 				return ret;
+ 		}
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index c8e0e050a36a..261ee21e62db 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -219,6 +219,14 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
+ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+ {
+ 	const struct cred *cred = current_cred(), *tcred;
++	int dumpable = 0;
++	kuid_t caller_uid;
++	kgid_t caller_gid;
++
++	if (!(mode & PTRACE_MODE_FSCREDS) == !(mode & PTRACE_MODE_REALCREDS)) {
++		WARN(1, "denying ptrace access check without PTRACE_MODE_*CREDS\n");
++		return -EPERM;
++	}
+ 
+ 	/* May we inspect the given task?
+ 	 * This check is used both for attaching with ptrace
+@@ -228,18 +236,33 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+ 	 * because setting up the necessary parent/child relationship
+ 	 * or halting the specified task is impossible.
+ 	 */
+-	int dumpable = 0;
++
+ 	/* Don't let security modules deny introspection */
+ 	if (same_thread_group(task, current))
+ 		return 0;
+ 	rcu_read_lock();
++	if (mode & PTRACE_MODE_FSCREDS) {
++		caller_uid = cred->fsuid;
++		caller_gid = cred->fsgid;
++	} else {
++		/*
++		 * Using the euid would make more sense here, but something
++		 * in userland might rely on the old behavior, and this
++		 * shouldn't be a security problem since
++		 * PTRACE_MODE_REALCREDS implies that the caller explicitly
++		 * used a syscall that requests access to another process
++		 * (and not a filesystem syscall to procfs).
++		 */
++		caller_uid = cred->uid;
++		caller_gid = cred->gid;
++	}
+ 	tcred = __task_cred(task);
+-	if (uid_eq(cred->uid, tcred->euid) &&
+-	    uid_eq(cred->uid, tcred->suid) &&
+-	    uid_eq(cred->uid, tcred->uid)  &&
+-	    gid_eq(cred->gid, tcred->egid) &&
+-	    gid_eq(cred->gid, tcred->sgid) &&
+-	    gid_eq(cred->gid, tcred->gid))
++	if (uid_eq(caller_uid, tcred->euid) &&
++	    uid_eq(caller_uid, tcred->suid) &&
++	    uid_eq(caller_uid, tcred->uid)  &&
++	    gid_eq(caller_gid, tcred->egid) &&
++	    gid_eq(caller_gid, tcred->sgid) &&
++	    gid_eq(caller_gid, tcred->gid))
+ 		goto ok;
+ 	if (ptrace_has_cap(tcred->user_ns, mode))
+ 		goto ok;
+@@ -306,7 +329,7 @@ static int ptrace_attach(struct task_struct *task, long request,
+ 		goto out;
+ 
+ 	task_lock(task);
+-	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
++	retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH_REALCREDS);
+ 	task_unlock(task);
+ 	if (retval)
+ 		goto unlock_creds;
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 90552aab5f2d..a7c27cb71fc5 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -1072,9 +1072,10 @@ struct resource * __request_region(struct resource *parent,
+ 		if (!conflict)
+ 			break;
+ 		if (conflict != parent) {
+-			parent = conflict;
+-			if (!(conflict->flags & IORESOURCE_BUSY))
++			if (!(conflict->flags & IORESOURCE_BUSY)) {
++				parent = conflict;
+ 				continue;
++			}
+ 		}
+ 		if (conflict->flags & flags & IORESOURCE_MUXED) {
+ 			add_wait_queue(&muxed_resource_wait, &wait);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 4d870eb6086b..3b0f4c09ab92 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6480,7 +6480,7 @@ static void sched_init_numa(void)
+ 
+ 			sched_domains_numa_masks[i][j] = mask;
+ 
+-			for (k = 0; k < nr_node_ids; k++) {
++			for_each_node(k) {
+ 				if (node_distance(j, k) > sched_domains_numa_distance[i])
+ 					continue;
+ 
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+index 8394b1ee600c..87b8576cbd50 100644
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -259,21 +259,21 @@ static __always_inline bool steal_account_process_tick(void)
+ #ifdef CONFIG_PARAVIRT
+ 	if (static_key_false(&paravirt_steal_enabled)) {
+ 		u64 steal;
+-		cputime_t steal_ct;
++		unsigned long steal_jiffies;
+ 
+ 		steal = paravirt_steal_clock(smp_processor_id());
+ 		steal -= this_rq()->prev_steal_time;
+ 
+ 		/*
+-		 * cputime_t may be less precise than nsecs (eg: if it's
+-		 * based on jiffies). Lets cast the result to cputime
++		 * steal is in nsecs but our caller is expecting steal
++		 * time in jiffies. Lets cast the result to jiffies
+ 		 * granularity and account the rest on the next rounds.
+ 		 */
+-		steal_ct = nsecs_to_cputime(steal);
+-		this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct);
++		steal_jiffies = nsecs_to_jiffies(steal);
++		this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
+ 
+-		account_steal_time(steal_ct);
+-		return steal_ct;
++		account_steal_time(jiffies_to_cputime(steal_jiffies));
++		return steal_jiffies;
+ 	}
+ #endif
+ 	return false;
+diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
+index 7e7746a42a62..10a1d7dc9313 100644
+--- a/kernel/sysctl_binary.c
++++ b/kernel/sysctl_binary.c
+@@ -1321,7 +1321,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
+ 	}
+ 
+ 	mnt = task_active_pid_ns(current)->proc_mnt;
+-	file = file_open_root(mnt->mnt_root, mnt, pathname, flags);
++	file = file_open_root(mnt->mnt_root, mnt, pathname, flags, 0);
+ 	result = PTR_ERR(file);
+ 	if (IS_ERR(file))
+ 		goto out_putname;
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 05330494a0df..de6ea94c41bb 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4916,7 +4916,10 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
+ 
+ 	spd.nr_pages = i;
+ 
+-	ret = splice_to_pipe(pipe, &spd);
++	if (i)
++		ret = splice_to_pipe(pipe, &spd);
++	else
++		ret = 0;
+ out:
+ 	splice_shrink_spd(&spd);
+ 	return ret;
+diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
+index 36c1455b7567..6d6c0411cbe8 100644
+--- a/kernel/trace/trace_printk.c
++++ b/kernel/trace/trace_printk.c
+@@ -289,6 +289,9 @@ static int t_show(struct seq_file *m, void *v)
+ 	const char *str = *fmt;
+ 	int i;
+ 
++	if (!*fmt)
++		return 0;
++
+ 	seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
+ 
+ 	/*
+diff --git a/kernel/watchdog.c b/kernel/watchdog.c
+index 581a68a04c64..f89ea713213f 100644
+--- a/kernel/watchdog.c
++++ b/kernel/watchdog.c
+@@ -812,6 +812,9 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
+ 		 * Update the run state of the lockup detectors.
+ 		 * Restore 'watchdog_enabled' on failure.
+ 		 */
++		if (old == new)
++			goto out;
++
+ 		err = proc_watchdog_update();
+ 		if (err)
+ 			watchdog_enabled = old;
+@@ -857,7 +860,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
+ int proc_watchdog_thresh(struct ctl_table *table, int write,
+ 			 void __user *buffer, size_t *lenp, loff_t *ppos)
+ {
+-	int err, old;
++	int err, old, new;
+ 
+ 	mutex_lock(&watchdog_proc_mutex);
+ 
+@@ -871,6 +874,10 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
+ 	 * Update the sample period.
+ 	 * Restore 'watchdog_thresh' on failure.
+ 	 */
++	new = ACCESS_ONCE(watchdog_thresh);
++	if (old == new)
++		goto out;
++
+ 	set_sample_period();
+ 	err = proc_watchdog_update();
+ 	if (err)
+diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
+index 6f500ef2301d..f0b323abb4c6 100644
+--- a/lib/ucs2_string.c
++++ b/lib/ucs2_string.c
+@@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
+         }
+ }
+ EXPORT_SYMBOL(ucs2_strncmp);
++
++unsigned long
++ucs2_utf8size(const ucs2_char_t *src)
++{
++	unsigned long i;
++	unsigned long j = 0;
++
++	for (i = 0; i < ucs2_strlen(src); i++) {
++		u16 c = src[i];
++
++		if (c >= 0x800)
++			j += 3;
++		else if (c >= 0x80)
++			j += 2;
++		else
++			j += 1;
++	}
++
++	return j;
++}
++EXPORT_SYMBOL(ucs2_utf8size);
++
++/*
++ * copy at most maxlength bytes of whole utf8 characters to dest from the
++ * ucs2 string src.
++ *
++ * The return value is the number of characters copied, not including the
++ * final NUL character.
++ */
++unsigned long
++ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
++{
++	unsigned int i;
++	unsigned long j = 0;
++	unsigned long limit = ucs2_strnlen(src, maxlength);
++
++	for (i = 0; maxlength && i < limit; i++) {
++		u16 c = src[i];
++
++		if (c >= 0x800) {
++			if (maxlength < 3)
++				break;
++			maxlength -= 3;
++			dest[j++] = 0xe0 | (c & 0xf000) >> 12;
++			dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
++			dest[j++] = 0x80 | (c & 0x003f);
++		} else if (c >= 0x80) {
++			if (maxlength < 2)
++				break;
++			maxlength -= 2;
++			dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
++			dest[j++] = 0x80 | (c & 0x03f);
++		} else {
++			maxlength -= 1;
++			dest[j++] = c & 0x7f;
++		}
++	}
++	if (maxlength)
++		dest[j] = '\0';
++	return j;
++}
++EXPORT_SYMBOL(ucs2_as_utf8);
+diff --git a/mm/bootmem.c b/mm/bootmem.c
+index 477be696511d..a23dd1934654 100644
+--- a/mm/bootmem.c
++++ b/mm/bootmem.c
+@@ -164,7 +164,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
+ 	end = PFN_DOWN(physaddr + size);
+ 
+ 	for (; cursor < end; cursor++) {
+-		__free_pages_bootmem(pfn_to_page(cursor), 0);
++		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
+ 		totalram_pages++;
+ 	}
+ }
+@@ -172,7 +172,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
+ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
+ {
+ 	struct page *page;
+-	unsigned long *map, start, end, pages, count = 0;
++	unsigned long *map, start, end, pages, cur, count = 0;
+ 
+ 	if (!bdata->node_bootmem_map)
+ 		return 0;
+@@ -210,17 +210,17 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
+ 		if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
+ 			int order = ilog2(BITS_PER_LONG);
+ 
+-			__free_pages_bootmem(pfn_to_page(start), order);
++			__free_pages_bootmem(pfn_to_page(start), start, order);
+ 			count += BITS_PER_LONG;
+ 			start += BITS_PER_LONG;
+ 		} else {
+-			unsigned long cur = start;
++			cur = start;
+ 
+ 			start = ALIGN(start + 1, BITS_PER_LONG);
+ 			while (vec && cur != start) {
+ 				if (vec & 1) {
+ 					page = pfn_to_page(cur);
+-					__free_pages_bootmem(page, 0);
++					__free_pages_bootmem(page, cur, 0);
+ 					count++;
+ 				}
+ 				vec >>= 1;
+@@ -229,12 +229,13 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
+ 		}
+ 	}
+ 
++	cur = bdata->node_min_pfn;
+ 	page = virt_to_page(bdata->node_bootmem_map);
+ 	pages = bdata->node_low_pfn - bdata->node_min_pfn;
+ 	pages = bootmem_bootmap_pages(pages);
+ 	count += pages;
+ 	while (pages--)
+-		__free_pages_bootmem(page++, 0);
++		__free_pages_bootmem(page++, cur++, 0);
+ 
+ 	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index a6ff935476e3..9d724c0383d2 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -755,7 +755,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
+ 
+ #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
+ static void destroy_compound_gigantic_page(struct page *page,
+-					unsigned long order)
++					unsigned int order)
+ {
+ 	int i;
+ 	int nr_pages = 1 << order;
+@@ -771,7 +771,7 @@ static void destroy_compound_gigantic_page(struct page *page,
+ 	__ClearPageHead(page);
+ }
+ 
+-static void free_gigantic_page(struct page *page, unsigned order)
++static void free_gigantic_page(struct page *page, unsigned int order)
+ {
+ 	free_contig_range(page_to_pfn(page), 1 << order);
+ }
+@@ -815,7 +815,7 @@ static bool zone_spans_last_pfn(const struct zone *zone,
+ 	return zone_spans_pfn(zone, last_pfn);
+ }
+ 
+-static struct page *alloc_gigantic_page(int nid, unsigned order)
++static struct page *alloc_gigantic_page(int nid, unsigned int order)
+ {
+ 	unsigned long nr_pages = 1 << order;
+ 	unsigned long ret, pfn, flags;
+@@ -851,7 +851,7 @@ static struct page *alloc_gigantic_page(int nid, unsigned order)
+ }
+ 
+ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
+-static void prep_compound_gigantic_page(struct page *page, unsigned long order);
++static void prep_compound_gigantic_page(struct page *page, unsigned int order);
+ 
+ static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
+ {
+@@ -884,9 +884,9 @@ static int alloc_fresh_gigantic_page(struct hstate *h,
+ static inline bool gigantic_page_supported(void) { return true; }
+ #else
+ static inline bool gigantic_page_supported(void) { return false; }
+-static inline void free_gigantic_page(struct page *page, unsigned order) { }
++static inline void free_gigantic_page(struct page *page, unsigned int order) { }
+ static inline void destroy_compound_gigantic_page(struct page *page,
+-						unsigned long order) { }
++						unsigned int order) { }
+ static inline int alloc_fresh_gigantic_page(struct hstate *h,
+ 					nodemask_t *nodes_allowed) { return 0; }
+ #endif
+@@ -1013,7 +1013,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
+ 	put_page(page); /* free it into the hugepage allocator */
+ }
+ 
+-static void prep_compound_gigantic_page(struct page *page, unsigned long order)
++static void prep_compound_gigantic_page(struct page *page, unsigned int order)
+ {
+ 	int i;
+ 	int nr_pages = 1 << order;
+@@ -1567,7 +1567,8 @@ found:
+ 	return 1;
+ }
+ 
+-static void __init prep_compound_huge_page(struct page *page, int order)
++static void __init prep_compound_huge_page(struct page *page,
++		unsigned int order)
+ {
+ 	if (unlikely(order > (MAX_ORDER - 1)))
+ 		prep_compound_gigantic_page(page, order);
+@@ -2278,7 +2279,7 @@ static int __init hugetlb_init(void)
+ module_init(hugetlb_init);
+ 
+ /* Should be called on processing a hugepagesz=... option */
+-void __init hugetlb_add_hstate(unsigned order)
++void __init hugetlb_add_hstate(unsigned int order)
+ {
+ 	struct hstate *h;
+ 	unsigned long i;
+diff --git a/mm/internal.h b/mm/internal.h
+index a25e359a4039..a415872aab06 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -155,8 +155,9 @@ __find_buddy_index(unsigned long page_idx, unsigned int order)
+ }
+ 
+ extern int __isolate_free_page(struct page *page, unsigned int order);
+-extern void __free_pages_bootmem(struct page *page, unsigned int order);
+-extern void prep_compound_page(struct page *page, unsigned long order);
++extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
++					unsigned int order);
++extern void prep_compound_page(struct page *page, unsigned int order);
+ #ifdef CONFIG_MEMORY_FAILURE
+ extern bool is_free_buddy_page(struct page *page);
+ #endif
+@@ -213,7 +214,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
+  * page cannot be allocated or merged in parallel. Alternatively, it must
+  * handle invalid values gracefully, and use page_order_unsafe() below.
+  */
+-static inline unsigned long page_order(struct page *page)
++static inline unsigned int page_order(struct page *page)
+ {
+ 	/* PageBuddy() must be checked by the caller */
+ 	return page_private(page);
+diff --git a/mm/memblock.c b/mm/memblock.c
+index 9318b567ed79..9742d1ac10a5 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -1316,7 +1316,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
+ 	end = PFN_DOWN(base + size);
+ 
+ 	for (; cursor < end; cursor++) {
+-		__free_pages_bootmem(pfn_to_page(cursor), 0);
++		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
+ 		totalram_pages++;
+ 	}
+ }
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index aac1c98a9bc7..221762e24a68 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5289,6 +5289,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
+ 				 char *buf, size_t nbytes, loff_t off)
+ {
+ 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
++	unsigned long nr_pages;
+ 	unsigned long high;
+ 	int err;
+ 
+@@ -5299,6 +5300,11 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
+ 
+ 	memcg->high = high;
+ 
++	nr_pages = page_counter_read(&memcg->memory);
++	if (nr_pages > high)
++		try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
++					     GFP_KERNEL, true);
++
+ 	return nbytes;
+ }
+ 
+diff --git a/mm/nobootmem.c b/mm/nobootmem.c
+index 90b50468333e..4bea539921df 100644
+--- a/mm/nobootmem.c
++++ b/mm/nobootmem.c
+@@ -77,7 +77,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size)
+ 	end = PFN_DOWN(addr + size);
+ 
+ 	for (; cursor < end; cursor++) {
+-		__free_pages_bootmem(pfn_to_page(cursor), 0);
++		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
+ 		totalram_pages++;
+ 	}
+ }
+@@ -92,7 +92,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
+ 		while (start + (1UL << order) > end)
+ 			order--;
+ 
+-		__free_pages_bootmem(pfn_to_page(start), order);
++		__free_pages_bootmem(pfn_to_page(start), start, order);
+ 
+ 		start += (1UL << order);
+ 	}
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 18490f3bd7f1..872b2ac95dec 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -162,7 +162,7 @@ bool pm_suspended_storage(void)
+ #endif /* CONFIG_PM_SLEEP */
+ 
+ #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
+-int pageblock_order __read_mostly;
++unsigned int pageblock_order __read_mostly;
+ #endif
+ 
+ static void __free_pages_ok(struct page *page, unsigned int order);
+@@ -362,7 +362,7 @@ static void free_compound_page(struct page *page)
+ 	__free_pages_ok(page, compound_order(page));
+ }
+ 
+-void prep_compound_page(struct page *page, unsigned long order)
++void prep_compound_page(struct page *page, unsigned int order)
+ {
+ 	int i;
+ 	int nr_pages = 1 << order;
+@@ -579,34 +579,28 @@ static inline void __free_one_page(struct page *page,
+ 	unsigned long combined_idx;
+ 	unsigned long uninitialized_var(buddy_idx);
+ 	struct page *buddy;
+-	int max_order = MAX_ORDER;
++	unsigned int max_order;
++
++	max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
+ 
+ 	VM_BUG_ON(!zone_is_initialized(zone));
+ 	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
+ 
+ 	VM_BUG_ON(migratetype == -1);
+-	if (is_migrate_isolate(migratetype)) {
+-		/*
+-		 * We restrict max order of merging to prevent merge
+-		 * between freepages on isolate pageblock and normal
+-		 * pageblock. Without this, pageblock isolation
+-		 * could cause incorrect freepage accounting.
+-		 */
+-		max_order = min(MAX_ORDER, pageblock_order + 1);
+-	} else {
++	if (likely(!is_migrate_isolate(migratetype)))
+ 		__mod_zone_freepage_state(zone, 1 << order, migratetype);
+-	}
+ 
+-	page_idx = pfn & ((1 << max_order) - 1);
++	page_idx = pfn & ((1 << MAX_ORDER) - 1);
+ 
+ 	VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
+ 	VM_BUG_ON_PAGE(bad_range(zone, page), page);
+ 
++continue_merging:
+ 	while (order < max_order - 1) {
+ 		buddy_idx = __find_buddy_index(page_idx, order);
+ 		buddy = page + (buddy_idx - page_idx);
+ 		if (!page_is_buddy(page, buddy, order))
+-			break;
++			goto done_merging;
+ 		/*
+ 		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
+ 		 * merge with it and move up one order.
+@@ -623,6 +617,32 @@ static inline void __free_one_page(struct page *page,
+ 		page_idx = combined_idx;
+ 		order++;
+ 	}
++	if (max_order < MAX_ORDER) {
++		/* If we are here, it means order is >= pageblock_order.
++		 * We want to prevent merge between freepages on isolate
++		 * pageblock and normal pageblock. Without this, pageblock
++		 * isolation could cause incorrect freepage or CMA accounting.
++		 *
++		 * We don't want to hit this code for the more frequent
++		 * low-order merging.
++		 */
++		if (unlikely(has_isolate_pageblock(zone))) {
++			int buddy_mt;
++
++			buddy_idx = __find_buddy_index(page_idx, order);
++			buddy = page + (buddy_idx - page_idx);
++			buddy_mt = get_pageblock_migratetype(buddy);
++
++			if (migratetype != buddy_mt
++					&& (is_migrate_isolate(migratetype) ||
++						is_migrate_isolate(buddy_mt)))
++				goto done_merging;
++		}
++		max_order++;
++		goto continue_merging;
++	}
++
++done_merging:
+ 	set_page_order(page, order);
+ 
+ 	/*
+@@ -832,7 +852,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
+ 	local_irq_restore(flags);
+ }
+ 
+-void __init __free_pages_bootmem(struct page *page, unsigned int order)
++void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
++							unsigned int order)
+ {
+ 	unsigned int nr_pages = 1 << order;
+ 	struct page *p = page;
+@@ -1066,7 +1087,7 @@ int move_freepages(struct zone *zone,
+ 			  int migratetype)
+ {
+ 	struct page *page;
+-	unsigned long order;
++	unsigned int order;
+ 	int pages_moved = 0;
+ 
+ #ifndef CONFIG_HOLES_IN_ZONE
+@@ -1180,7 +1201,7 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
+ static void steal_suitable_fallback(struct zone *zone, struct page *page,
+ 							  int start_type)
+ {
+-	int current_order = page_order(page);
++	unsigned int current_order = page_order(page);
+ 	int pages;
+ 
+ 	/* Take ownership for orders >= pageblock_order */
+@@ -2283,7 +2304,7 @@ static DEFINE_RATELIMIT_STATE(nopage_rs,
+ 		DEFAULT_RATELIMIT_INTERVAL,
+ 		DEFAULT_RATELIMIT_BURST);
+ 
+-void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
++void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
+ {
+ 	unsigned int filter = SHOW_MEM_FILTER_NODES;
+ 
+@@ -2317,7 +2338,7 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
+ 		va_end(args);
+ 	}
+ 
+-	pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
++	pr_warn("%s: page allocation failure: order:%u, mode:0x%x\n",
+ 		current->comm, order, gfp_mask);
+ 
+ 	dump_stack();
+@@ -3018,7 +3039,8 @@ void free_kmem_pages(unsigned long addr, unsigned int order)
+ 	}
+ }
+ 
+-static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
++static void *make_alloc_exact(unsigned long addr, unsigned int order,
++		size_t size)
+ {
+ 	if (addr) {
+ 		unsigned long alloc_end = addr + (PAGE_SIZE << order);
+@@ -3070,7 +3092,7 @@ EXPORT_SYMBOL(alloc_pages_exact);
+  */
+ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
+ {
+-	unsigned order = get_order(size);
++	unsigned int order = get_order(size);
+ 	struct page *p = alloc_pages_node(nid, gfp_mask, order);
+ 	if (!p)
+ 		return NULL;
+@@ -3372,7 +3394,8 @@ void show_free_areas(unsigned int filter)
+ 	}
+ 
+ 	for_each_populated_zone(zone) {
+-		unsigned long nr[MAX_ORDER], flags, order, total = 0;
++		unsigned int order;
++		unsigned long nr[MAX_ORDER], flags, total = 0;
+ 		unsigned char types[MAX_ORDER];
+ 
+ 		if (skip_free_areas_node(filter, zone_to_nid(zone)))
+@@ -3721,7 +3744,7 @@ static void build_zonelists(pg_data_t *pgdat)
+ 	nodemask_t used_mask;
+ 	int local_node, prev_node;
+ 	struct zonelist *zonelist;
+-	int order = current_zonelist_order;
++	unsigned int order = current_zonelist_order;
+ 
+ 	/* initialize zonelists */
+ 	for (i = 0; i < MAX_ZONELISTS; i++) {
+@@ -6408,7 +6431,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
+ 		       unsigned migratetype)
+ {
+ 	unsigned long outer_start, outer_end;
+-	int ret = 0, order;
++	unsigned int order;
++	int ret = 0;
+ 
+ 	struct compact_control cc = {
+ 		.nr_migratepages = 0,
+diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
+index e88d071648c2..5d453e58ddbf 100644
+--- a/mm/process_vm_access.c
++++ b/mm/process_vm_access.c
+@@ -194,7 +194,7 @@ static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
+ 		goto free_proc_pages;
+ 	}
+ 
+-	mm = mm_access(task, PTRACE_MODE_ATTACH);
++	mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
+ 	if (!mm || IS_ERR(mm)) {
+ 		rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
+ 		/*
+diff --git a/scripts/coccinelle/iterators/use_after_iter.cocci b/scripts/coccinelle/iterators/use_after_iter.cocci
+index f085f5968c52..ce8cc9c006e5 100644
+--- a/scripts/coccinelle/iterators/use_after_iter.cocci
++++ b/scripts/coccinelle/iterators/use_after_iter.cocci
+@@ -123,7 +123,7 @@ list_remove_head(x,c,...)
+ |
+ sizeof(<+...c...+>)
+ |
+-&c->member
++ &c->member
+ |
+ c = E
+ |
+diff --git a/security/commoncap.c b/security/commoncap.c
+index f2875cd9f677..4cdc8eb8d5d2 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -142,12 +142,17 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
+ {
+ 	int ret = 0;
+ 	const struct cred *cred, *child_cred;
++	const kernel_cap_t *caller_caps;
+ 
+ 	rcu_read_lock();
+ 	cred = current_cred();
+ 	child_cred = __task_cred(child);
++	if (mode & PTRACE_MODE_FSCREDS)
++		caller_caps = &cred->cap_effective;
++	else
++		caller_caps = &cred->cap_permitted;
+ 	if (cred->user_ns == child_cred->user_ns &&
+-	    cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
++	    cap_issubset(child_cred->cap_permitted, *caller_caps))
+ 		goto out;
+ 	if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
+ 		goto out;
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index 7bed4ad7cd76..0a374a2ce030 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -845,6 +845,8 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
+ 	size_t datalen = prep->datalen;
+ 	int ret = 0;
+ 
++	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
++		return -ENOKEY;
+ 	if (datalen <= 0 || datalen > 32767 || !prep->data)
+ 		return -EINVAL;
+ 
+diff --git a/security/keys/trusted.c b/security/keys/trusted.c
+index c0594cb07ada..aeb38f1a12e7 100644
+--- a/security/keys/trusted.c
++++ b/security/keys/trusted.c
+@@ -984,13 +984,16 @@ static void trusted_rcu_free(struct rcu_head *rcu)
+  */
+ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
+ {
+-	struct trusted_key_payload *p = key->payload.data;
++	struct trusted_key_payload *p;
+ 	struct trusted_key_payload *new_p;
+ 	struct trusted_key_options *new_o;
+ 	size_t datalen = prep->datalen;
+ 	char *datablob;
+ 	int ret = 0;
+ 
++	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
++		return -ENOKEY;
++	p = key->payload.data;
+ 	if (!p->migratable)
+ 		return -EPERM;
+ 	if (datalen <= 0 || datalen > 32767 || !prep->data)
+diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
+index 36b47bbd3d8c..7cf22260bdff 100644
+--- a/security/keys/user_defined.c
++++ b/security/keys/user_defined.c
+@@ -120,7 +120,10 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
+ 
+ 	if (ret == 0) {
+ 		/* attach the new data, displacing the old */
+-		zap = key->payload.data;
++		if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
++			zap = key->payload.data;
++		else
++			zap = NULL;
+ 		rcu_assign_keypointer(key, upayload);
+ 		key->expiry = 0;
+ 	}
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index b644757886bc..e45f0a3df127 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -354,12 +354,10 @@ static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead,
+  */
+ static inline unsigned int smk_ptrace_mode(unsigned int mode)
+ {
+-	switch (mode) {
+-	case PTRACE_MODE_READ:
+-		return MAY_READ;
+-	case PTRACE_MODE_ATTACH:
++	if (mode & PTRACE_MODE_ATTACH)
+ 		return MAY_READWRITE;
+-	}
++	if (mode & PTRACE_MODE_READ)
++		return MAY_READ;
+ 
+ 	return 0;
+ }
+diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
+index 24aae2ae2b30..d3f7fb55b835 100644
+--- a/security/yama/yama_lsm.c
++++ b/security/yama/yama_lsm.c
+@@ -292,7 +292,7 @@ int yama_ptrace_access_check(struct task_struct *child,
+ 		return rc;
+ 
+ 	/* require ptrace target be a child of ptracer on attach */
+-	if (mode == PTRACE_MODE_ATTACH) {
++	if (mode & PTRACE_MODE_ATTACH) {
+ 		switch (ptrace_scope) {
+ 		case YAMA_SCOPE_DISABLED:
+ 			/* No additional restrictions. */
+@@ -318,7 +318,7 @@ int yama_ptrace_access_check(struct task_struct *child,
+ 		}
+ 	}
+ 
+-	if (rc) {
++	if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
+ 		printk_ratelimited(KERN_NOTICE
+ 			"ptrace of pid %d was attempted by: %s (pid %d)\n",
+ 			child->pid, current->comm, current->pid);
+diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
+index f75bf5622687..961ca32ee989 100644
+--- a/sound/hda/hdac_device.c
++++ b/sound/hda/hdac_device.c
+@@ -500,23 +500,27 @@ EXPORT_SYMBOL_GPL(snd_hdac_get_connections);
+  * This function calls the runtime PM helper to power up the given codec.
+  * Unlike snd_hdac_power_up_pm(), you should call this only for the code
+  * path that isn't included in PM path.  Otherwise it gets stuck.
++ *
++ * Returns zero if successful, or a negative error code.
+  */
+-void snd_hdac_power_up(struct hdac_device *codec)
++int snd_hdac_power_up(struct hdac_device *codec)
+ {
+-	pm_runtime_get_sync(&codec->dev);
++	return pm_runtime_get_sync(&codec->dev);
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_power_up);
+ 
+ /**
+  * snd_hdac_power_down - power down the codec
+  * @codec: the codec object
++ *
++ * Returns zero if successful, or a negative error code.
+  */
+-void snd_hdac_power_down(struct hdac_device *codec)
++int snd_hdac_power_down(struct hdac_device *codec)
+ {
+ 	struct device *dev = &codec->dev;
+ 
+ 	pm_runtime_mark_last_busy(dev);
+-	pm_runtime_put_autosuspend(dev);
++	return pm_runtime_put_autosuspend(dev);
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_power_down);
+ 
+@@ -528,11 +532,14 @@ EXPORT_SYMBOL_GPL(snd_hdac_power_down);
+  * which may be called by PM suspend/resume again.  OTOH, if a power-up
+  * call must wake up the sleeper (e.g. in a kctl callback), use
+  * snd_hdac_power_up() instead.
++ *
++ * Returns zero if successful, or a negative error code.
+  */
+-void snd_hdac_power_up_pm(struct hdac_device *codec)
++int snd_hdac_power_up_pm(struct hdac_device *codec)
+ {
+ 	if (!atomic_inc_not_zero(&codec->in_pm))
+-		snd_hdac_power_up(codec);
++		return snd_hdac_power_up(codec);
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
+ 
+@@ -542,11 +549,14 @@ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
+  *
+  * Like snd_hdac_power_up_pm(), this function is used in a recursive
+  * code path like init code which may be called by PM suspend/resume again.
++ *
++ * Returns zero if successful, or a negative error code.
+  */
+-void snd_hdac_power_down_pm(struct hdac_device *codec)
++int snd_hdac_power_down_pm(struct hdac_device *codec)
+ {
+ 	if (atomic_dec_if_positive(&codec->in_pm) < 0)
+-		snd_hdac_power_down(codec);
++		return snd_hdac_power_down(codec);
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_power_down_pm);
+ #endif
+diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
+index 1eabcdf69457..b0ed870ffb88 100644
+--- a/sound/hda/hdac_regmap.c
++++ b/sound/hda/hdac_regmap.c
+@@ -410,8 +410,9 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
+ 
+ 	err = reg_raw_write(codec, reg, val);
+ 	if (err == -EAGAIN) {
+-		snd_hdac_power_up_pm(codec);
+-		err = reg_raw_write(codec, reg, val);
++		err = snd_hdac_power_up_pm(codec);
++		if (!err)
++			err = reg_raw_write(codec, reg, val);
+ 		snd_hdac_power_down_pm(codec);
+ 	}
+ 	return err;
+@@ -442,8 +443,9 @@ int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
+ 
+ 	err = reg_raw_read(codec, reg, val);
+ 	if (err == -EAGAIN) {
+-		snd_hdac_power_up_pm(codec);
+-		err = reg_raw_read(codec, reg, val);
++		err = snd_hdac_power_up_pm(codec);
++		if (!err)
++			err = reg_raw_read(codec, reg, val);
+ 		snd_hdac_power_down_pm(codec);
+ 	}
+ 	return err;
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 194627c6c42b..16e0ebacbdb0 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -771,9 +771,6 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
+ 	unsigned int caps;
+ 	unsigned int mask, val;
+ 
+-	if (!enable && is_active_nid(codec, nid, dir, idx_to_check))
+-		return;
+-
+ 	caps = query_amp_caps(codec, nid, dir);
+ 	val = get_amp_val_to_activate(codec, nid, dir, caps, enable);
+ 	mask = get_amp_mask_to_modify(codec, nid, dir, idx_to_check, caps);
+@@ -784,12 +781,22 @@ static void activate_amp(struct hda_codec *codec, hda_nid_t nid, int dir,
+ 	update_amp(codec, nid, dir, idx, mask, val);
+ }
+ 
++static void check_and_activate_amp(struct hda_codec *codec, hda_nid_t nid,
++				   int dir, int idx, int idx_to_check,
++				   bool enable)
++{
++	/* check whether the given amp is still used by others */
++	if (!enable && is_active_nid(codec, nid, dir, idx_to_check))
++		return;
++	activate_amp(codec, nid, dir, idx, idx_to_check, enable);
++}
++
+ static void activate_amp_out(struct hda_codec *codec, struct nid_path *path,
+ 			     int i, bool enable)
+ {
+ 	hda_nid_t nid = path->path[i];
+ 	init_amp(codec, nid, HDA_OUTPUT, 0);
+-	activate_amp(codec, nid, HDA_OUTPUT, 0, 0, enable);
++	check_and_activate_amp(codec, nid, HDA_OUTPUT, 0, 0, enable);
+ }
+ 
+ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
+@@ -817,9 +824,16 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
+ 	 * when aa-mixer is available, we need to enable the path as well
+ 	 */
+ 	for (n = 0; n < nums; n++) {
+-		if (n != idx && (!add_aamix || conn[n] != spec->mixer_merge_nid))
+-			continue;
+-		activate_amp(codec, nid, HDA_INPUT, n, idx, enable);
++		if (n != idx) {
++			if (conn[n] != spec->mixer_merge_nid)
++				continue;
++			/* when aamix is disabled, force to off */
++			if (!add_aamix) {
++				activate_amp(codec, nid, HDA_INPUT, n, n, false);
++				continue;
++			}
++		}
++		check_and_activate_amp(codec, nid, HDA_INPUT, n, idx, enable);
+ 	}
+ }
+ 
+@@ -1580,6 +1594,12 @@ static bool map_singles(struct hda_codec *codec, int outs,
+ 	return found;
+ }
+ 
++static inline bool has_aamix_out_paths(struct hda_gen_spec *spec)
++{
++	return spec->aamix_out_paths[0] || spec->aamix_out_paths[1] ||
++		spec->aamix_out_paths[2];
++}
++
+ /* create a new path including aamix if available, and return its index */
+ static int check_aamix_out_path(struct hda_codec *codec, int path_idx)
+ {
+@@ -2422,25 +2442,51 @@ static void update_aamix_paths(struct hda_codec *codec, bool do_mix,
+ 	}
+ }
+ 
++/* re-initialize the output paths; only called from loopback_mixing_put() */
++static void update_output_paths(struct hda_codec *codec, int num_outs,
++				const int *paths)
++{
++	struct hda_gen_spec *spec = codec->spec;
++	struct nid_path *path;
++	int i;
++
++	for (i = 0; i < num_outs; i++) {
++		path = snd_hda_get_path_from_idx(codec, paths[i]);
++		if (path)
++			snd_hda_activate_path(codec, path, path->active,
++					      spec->aamix_mode);
++	}
++}
++
+ static int loopback_mixing_put(struct snd_kcontrol *kcontrol,
+ 			       struct snd_ctl_elem_value *ucontrol)
+ {
+ 	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ 	struct hda_gen_spec *spec = codec->spec;
++	const struct auto_pin_cfg *cfg = &spec->autocfg;
+ 	unsigned int val = ucontrol->value.enumerated.item[0];
+ 
+ 	if (val == spec->aamix_mode)
+ 		return 0;
+ 	spec->aamix_mode = val;
+-	update_aamix_paths(codec, val, spec->out_paths[0],
+-			   spec->aamix_out_paths[0],
+-			   spec->autocfg.line_out_type);
+-	update_aamix_paths(codec, val, spec->hp_paths[0],
+-			   spec->aamix_out_paths[1],
+-			   AUTO_PIN_HP_OUT);
+-	update_aamix_paths(codec, val, spec->speaker_paths[0],
+-			   spec->aamix_out_paths[2],
+-			   AUTO_PIN_SPEAKER_OUT);
++	if (has_aamix_out_paths(spec)) {
++		update_aamix_paths(codec, val, spec->out_paths[0],
++				   spec->aamix_out_paths[0],
++				   cfg->line_out_type);
++		update_aamix_paths(codec, val, spec->hp_paths[0],
++				   spec->aamix_out_paths[1],
++				   AUTO_PIN_HP_OUT);
++		update_aamix_paths(codec, val, spec->speaker_paths[0],
++				   spec->aamix_out_paths[2],
++				   AUTO_PIN_SPEAKER_OUT);
++	} else {
++		update_output_paths(codec, cfg->line_outs, spec->out_paths);
++		if (cfg->line_out_type != AUTO_PIN_HP_OUT)
++			update_output_paths(codec, cfg->hp_outs, spec->hp_paths);
++		if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
++			update_output_paths(codec, cfg->speaker_outs,
++					    spec->speaker_paths);
++	}
+ 	return 1;
+ }
+ 
+@@ -2458,12 +2504,13 @@ static int create_loopback_mixing_ctl(struct hda_codec *codec)
+ 
+ 	if (!spec->mixer_nid)
+ 		return 0;
+-	if (!(spec->aamix_out_paths[0] || spec->aamix_out_paths[1] ||
+-	      spec->aamix_out_paths[2]))
+-		return 0;
+ 	if (!snd_hda_gen_add_kctl(spec, NULL, &loopback_mixing_enum))
+ 		return -ENOMEM;
+ 	spec->have_aamix_ctl = 1;
++	/* if no explicit aamix path is present (e.g. for Realtek codecs),
++	 * enable aamix as default -- just for compatibility
++	 */
++	spec->aamix_mode = !has_aamix_out_paths(spec);
+ 	return 0;
+ }
+ 
+@@ -5664,6 +5711,8 @@ static void init_aamix_paths(struct hda_codec *codec)
+ 
+ 	if (!spec->have_aamix_ctl)
+ 		return;
++	if (!has_aamix_out_paths(spec))
++		return;
+ 	update_aamix_paths(codec, spec->aamix_mode, spec->out_paths[0],
+ 			   spec->aamix_out_paths[0],
+ 			   spec->autocfg.line_out_type);
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 8f50a257a80d..aeb054ca9ed9 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -174,8 +174,12 @@ static void cs_automute(struct hda_codec *codec)
+ 	snd_hda_gen_update_outputs(codec);
+ 
+ 	if (spec->gpio_eapd_hp || spec->gpio_eapd_speaker) {
+-		spec->gpio_data = spec->gen.hp_jack_present ?
+-			spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
++		if (spec->gen.automute_speaker)
++			spec->gpio_data = spec->gen.hp_jack_present ?
++				spec->gpio_eapd_hp : spec->gpio_eapd_speaker;
++		else
++			spec->gpio_data =
++				spec->gpio_eapd_hp | spec->gpio_eapd_speaker;
+ 		snd_hda_codec_write(codec, 0x01, 0,
+ 				    AC_VERB_SET_GPIO_DATA, spec->gpio_data);
+ 	}
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 488f4c7be33e..91b77bad03ea 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -204,8 +204,13 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
+ {
+ 	struct conexant_spec *spec = codec->spec;
+ 
+-	if (codec->core.vendor_id != 0x14f150f2)
++	switch (codec->core.vendor_id) {
++	case 0x14f150f2: /* CX20722 */
++	case 0x14f150f4: /* CX20724 */
++		break;
++	default:
+ 		return;
++	}
+ 
+ 	/* Turn the CX20722 codec into D3 to avoid spurious noises
+ 	   from the internal speaker during (and after) reboot */
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 51d519554744..193426e223c9 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -1526,6 +1526,57 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
+ 	return 0;
+ }
+ 
++/* update per_pin ELD from the given new ELD;
++ * setup info frame and notification accordingly
++ */
++static void update_eld(struct hda_codec *codec,
++		       struct hdmi_spec_per_pin *per_pin,
++		       struct hdmi_eld *eld)
++{
++	struct hdmi_eld *pin_eld = &per_pin->sink_eld;
++	bool old_eld_valid = pin_eld->eld_valid;
++	bool eld_changed;
++
++	if (eld->eld_valid)
++		snd_hdmi_show_eld(codec, &eld->info);
++
++	eld_changed = (pin_eld->eld_valid != eld->eld_valid);
++	if (eld->eld_valid && pin_eld->eld_valid)
++		if (pin_eld->eld_size != eld->eld_size ||
++		    memcmp(pin_eld->eld_buffer, eld->eld_buffer,
++			   eld->eld_size) != 0)
++			eld_changed = true;
++
++	pin_eld->monitor_present = eld->monitor_present;
++	pin_eld->eld_valid = eld->eld_valid;
++	pin_eld->eld_size = eld->eld_size;
++	if (eld->eld_valid)
++		memcpy(pin_eld->eld_buffer, eld->eld_buffer, eld->eld_size);
++	pin_eld->info = eld->info;
++
++	/*
++	 * Re-setup pin and infoframe. This is needed e.g. when
++	 * - sink is first plugged-in
++	 * - transcoder can change during stream playback on Haswell
++	 *   and this can make HW reset converter selection on a pin.
++	 */
++	if (eld->eld_valid && !old_eld_valid && per_pin->setup) {
++		if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
++			intel_verify_pin_cvt_connect(codec, per_pin);
++			intel_not_share_assigned_cvt(codec, per_pin->pin_nid,
++						     per_pin->mux_idx);
++		}
++
++		hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
++	}
++
++	if (eld_changed)
++		snd_ctl_notify(codec->card,
++			       SNDRV_CTL_EVENT_MASK_VALUE |
++			       SNDRV_CTL_EVENT_MASK_INFO,
++			       &per_pin->eld_ctl->id);
++}
++
+ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ {
+ 	struct hda_jack_tbl *jack;
+@@ -1543,8 +1594,6 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ 	 * the unsolicited response to avoid custom WARs.
+ 	 */
+ 	int present;
+-	bool update_eld = false;
+-	bool eld_changed = false;
+ 	bool ret;
+ 
+ 	snd_hda_power_up_pm(codec);
+@@ -1552,6 +1601,8 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ 
+ 	mutex_lock(&per_pin->lock);
+ 	pin_eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE);
++	eld->monitor_present = pin_eld->monitor_present;
++
+ 	if (pin_eld->monitor_present)
+ 		eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
+ 	else
+@@ -1571,61 +1622,13 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
+ 						    eld->eld_size) < 0)
+ 				eld->eld_valid = false;
+ 		}
+-
+-		if (eld->eld_valid) {
+-			snd_hdmi_show_eld(codec, &eld->info);
+-			update_eld = true;
+-		}
+-		else if (repoll) {
+-			schedule_delayed_work(&per_pin->work,
+-					      msecs_to_jiffies(300));
+-			goto unlock;
+-		}
+ 	}
+ 
+-	if (pin_eld->eld_valid != eld->eld_valid)
+-		eld_changed = true;
+-
+-	if (pin_eld->eld_valid && !eld->eld_valid)
+-		update_eld = true;
+-
+-	if (update_eld) {
+-		bool old_eld_valid = pin_eld->eld_valid;
+-		pin_eld->eld_valid = eld->eld_valid;
+-		if (pin_eld->eld_size != eld->eld_size ||
+-			      memcmp(pin_eld->eld_buffer, eld->eld_buffer,
+-				     eld->eld_size) != 0) {
+-			memcpy(pin_eld->eld_buffer, eld->eld_buffer,
+-			       eld->eld_size);
+-			eld_changed = true;
+-		}
+-		pin_eld->eld_size = eld->eld_size;
+-		pin_eld->info = eld->info;
+-
+-		/*
+-		 * Re-setup pin and infoframe. This is needed e.g. when
+-		 * - sink is first plugged-in (infoframe is not set up if !monitor_present)
+-		 * - transcoder can change during stream playback on Haswell
+-		 *   and this can make HW reset converter selection on a pin.
+-		 */
+-		if (eld->eld_valid && !old_eld_valid && per_pin->setup) {
+-			if (is_haswell_plus(codec) ||
+-				is_valleyview_plus(codec)) {
+-				intel_verify_pin_cvt_connect(codec, per_pin);
+-				intel_not_share_assigned_cvt(codec, pin_nid,
+-							per_pin->mux_idx);
+-			}
+-
+-			hdmi_setup_audio_infoframe(codec, per_pin,
+-						   per_pin->non_pcm);
+-		}
+-	}
++	if (!eld->eld_valid && repoll)
++		schedule_delayed_work(&per_pin->work, msecs_to_jiffies(300));
++	else
++		update_eld(codec, per_pin, eld);
+ 
+-	if (eld_changed)
+-		snd_ctl_notify(codec->card,
+-			       SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO,
+-			       &per_pin->eld_ctl->id);
+- unlock:
+ 	ret = !repoll || !pin_eld->monitor_present || pin_eld->eld_valid;
+ 
+ 	jack = snd_hda_jack_tbl_get(codec, pin_nid);
+@@ -3336,6 +3339,8 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",	.patch = patch_nvhdmi },
+ { .id = 0x10de0072, .name = "GPU 72 HDMI/DP",	.patch = patch_nvhdmi },
+ { .id = 0x10de007d, .name = "GPU 7d HDMI/DP",	.patch = patch_nvhdmi },
++{ .id = 0x10de0082, .name = "GPU 82 HDMI/DP",	.patch = patch_nvhdmi },
++{ .id = 0x10de0083, .name = "GPU 83 HDMI/DP",	.patch = patch_nvhdmi },
+ { .id = 0x10de8001, .name = "MCP73 HDMI",	.patch = patch_nvhdmi_2ch },
+ { .id = 0x11069f80, .name = "VX900 HDMI/DP",	.patch = patch_via_hdmi },
+ { .id = 0x11069f81, .name = "VX900 HDMI/DP",	.patch = patch_via_hdmi },
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 91cc6897d595..90cf6168267e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4608,6 +4608,7 @@ enum {
+ 	ALC290_FIXUP_SUBWOOFER,
+ 	ALC290_FIXUP_SUBWOOFER_HSJACK,
+ 	ALC269_FIXUP_THINKPAD_ACPI,
++	ALC269_FIXUP_DMIC_THINKPAD_ACPI,
+ 	ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
+ 	ALC255_FIXUP_HEADSET_MODE,
+@@ -5046,6 +5047,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = hda_fixup_thinkpad_acpi,
+ 	},
++	[ALC269_FIXUP_DMIC_THINKPAD_ACPI] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_inv_dmic,
++		.chained = true,
++		.chain_id = ALC269_FIXUP_THINKPAD_ACPI,
++	},
+ 	[ALC255_FIXUP_DELL1_MIC_NO_PRESENCE] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -5280,6 +5287,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+ 	SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
++	SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
++	SND_PCI_QUIRK(0x1028, 0x05be, "Dell Latitude E6540", ALC292_FIXUP_DELL_E7X),
+ 	SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
+ 	SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
+ 	SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
+@@ -5414,6 +5423,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
++	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
++	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
+index b120925223ae..51a684c6d8e3 100644
+--- a/sound/pci/intel8x0.c
++++ b/sound/pci/intel8x0.c
+@@ -2879,6 +2879,7 @@ static void intel8x0_measure_ac97_clock(struct intel8x0 *chip)
+ 
+ static struct snd_pci_quirk intel8x0_clock_list[] = {
+ 	SND_PCI_QUIRK(0x0e11, 0x008a, "AD1885", 41000),
++	SND_PCI_QUIRK(0x1014, 0x0581, "AD1981B", 48000),
+ 	SND_PCI_QUIRK(0x1028, 0x00be, "AD1885", 44100),
+ 	SND_PCI_QUIRK(0x1028, 0x0177, "AD1980", 48000),
+ 	SND_PCI_QUIRK(0x1028, 0x01ad, "AD1981B", 48000),
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 0450593980fd..86cf7b585e01 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -365,13 +365,15 @@ static int snd_usb_audio_create(struct usb_interface *intf,
+ 	}
+ 
+ 	mutex_init(&chip->mutex);
+-	init_rwsem(&chip->shutdown_rwsem);
++	init_waitqueue_head(&chip->shutdown_wait);
+ 	chip->index = idx;
+ 	chip->dev = dev;
+ 	chip->card = card;
+ 	chip->setup = device_setup[idx];
+ 	chip->autoclock = autoclock;
+ 	chip->probing = 1;
++	atomic_set(&chip->usage_count, 0);
++	atomic_set(&chip->shutdown, 0);
+ 
+ 	chip->usb_id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
+ 			      le16_to_cpu(dev->descriptor.idProduct));
+@@ -495,7 +497,7 @@ static int usb_audio_probe(struct usb_interface *intf,
+ 	mutex_lock(&register_mutex);
+ 	for (i = 0; i < SNDRV_CARDS; i++) {
+ 		if (usb_chip[i] && usb_chip[i]->dev == dev) {
+-			if (usb_chip[i]->shutdown) {
++			if (atomic_read(&usb_chip[i]->shutdown)) {
+ 				dev_err(&dev->dev, "USB device is in the shutdown state, cannot create a card instance\n");
+ 				err = -EIO;
+ 				goto __error;
+@@ -585,23 +587,23 @@ static void usb_audio_disconnect(struct usb_interface *intf)
+ 	struct snd_usb_audio *chip = usb_get_intfdata(intf);
+ 	struct snd_card *card;
+ 	struct list_head *p;
+-	bool was_shutdown;
+ 
+ 	if (chip == (void *)-1L)
+ 		return;
+ 
+ 	card = chip->card;
+-	down_write(&chip->shutdown_rwsem);
+-	was_shutdown = chip->shutdown;
+-	chip->shutdown = 1;
+-	up_write(&chip->shutdown_rwsem);
+ 
+ 	mutex_lock(&register_mutex);
+-	if (!was_shutdown) {
++	if (atomic_inc_return(&chip->shutdown) == 1) {
+ 		struct snd_usb_stream *as;
+ 		struct snd_usb_endpoint *ep;
+ 		struct usb_mixer_interface *mixer;
+ 
++		/* wait until all pending tasks done;
++		 * they are protected by snd_usb_lock_shutdown()
++		 */
++		wait_event(chip->shutdown_wait,
++			   !atomic_read(&chip->usage_count));
+ 		snd_card_disconnect(card);
+ 		/* release the pcm resources */
+ 		list_for_each_entry(as, &chip->pcm_list, list) {
+@@ -631,28 +633,56 @@ static void usb_audio_disconnect(struct usb_interface *intf)
+ 	}
+ }
+ 
+-#ifdef CONFIG_PM
+-
+-int snd_usb_autoresume(struct snd_usb_audio *chip)
++/* lock the shutdown (disconnect) task and autoresume */
++int snd_usb_lock_shutdown(struct snd_usb_audio *chip)
+ {
+-	int err = -ENODEV;
++	int err;
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (chip->probing || chip->in_pm)
+-		err = 0;
+-	else if (!chip->shutdown)
+-		err = usb_autopm_get_interface(chip->pm_intf);
+-	up_read(&chip->shutdown_rwsem);
++	atomic_inc(&chip->usage_count);
++	if (atomic_read(&chip->shutdown)) {
++		err = -EIO;
++		goto error;
++	}
++	err = snd_usb_autoresume(chip);
++	if (err < 0)
++		goto error;
++	return 0;
+ 
++ error:
++	if (atomic_dec_and_test(&chip->usage_count))
++		wake_up(&chip->shutdown_wait);
+ 	return err;
+ }
+ 
++/* autosuspend and unlock the shutdown */
++void snd_usb_unlock_shutdown(struct snd_usb_audio *chip)
++{
++	snd_usb_autosuspend(chip);
++	if (atomic_dec_and_test(&chip->usage_count))
++		wake_up(&chip->shutdown_wait);
++}
++
++#ifdef CONFIG_PM
++
++int snd_usb_autoresume(struct snd_usb_audio *chip)
++{
++	if (atomic_read(&chip->shutdown))
++		return -EIO;
++	if (chip->probing)
++		return 0;
++	if (atomic_inc_return(&chip->active) == 1)
++		return usb_autopm_get_interface(chip->pm_intf);
++	return 0;
++}
++
+ void snd_usb_autosuspend(struct snd_usb_audio *chip)
+ {
+-	down_read(&chip->shutdown_rwsem);
+-	if (!chip->shutdown && !chip->probing && !chip->in_pm)
++	if (chip->probing)
++		return;
++	if (atomic_read(&chip->shutdown))
++		return;
++	if (atomic_dec_and_test(&chip->active))
+ 		usb_autopm_put_interface(chip->pm_intf);
+-	up_read(&chip->shutdown_rwsem);
+ }
+ 
+ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
+@@ -705,7 +735,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+ 	if (--chip->num_suspended_intf)
+ 		return 0;
+ 
+-	chip->in_pm = 1;
++	atomic_inc(&chip->active); /* avoid autopm */
+ 	/*
+ 	 * ALSA leaves material resumption to user space
+ 	 * we just notify and restart the mixers
+@@ -725,7 +755,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+ 	chip->autosuspended = 0;
+ 
+ err_out:
+-	chip->in_pm = 0;
++	atomic_dec(&chip->active); /* allow autopm after this point */
+ 	return err;
+ }
+ 
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 2ed260b10f6d..7ccbcaf6a147 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -285,6 +285,8 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
+ 	unsigned char data[3];
+ 	int err, crate;
+ 
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	/* if endpoint doesn't have sampling rate control, bail out */
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 03b074419964..c2131b851602 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -355,8 +355,10 @@ static void snd_complete_urb(struct urb *urb)
+ 	if (unlikely(urb->status == -ENOENT ||		/* unlinked */
+ 		     urb->status == -ENODEV ||		/* device removed */
+ 		     urb->status == -ECONNRESET ||	/* unlinked */
+-		     urb->status == -ESHUTDOWN ||	/* device disabled */
+-		     ep->chip->shutdown))		/* device disconnected */
++		     urb->status == -ESHUTDOWN))	/* device disabled */
++		goto exit_clear;
++	/* device disconnected */
++	if (unlikely(atomic_read(&ep->chip->shutdown)))
+ 		goto exit_clear;
+ 
+ 	if (usb_pipeout(ep->pipe)) {
+@@ -413,6 +415,9 @@ exit_clear:
+  *
+  * New endpoints will be added to chip->ep_list and must be freed by
+  * calling snd_usb_endpoint_free().
++ *
++ * For SND_USB_ENDPOINT_TYPE_SYNC, the caller needs to guarantee that
++ * bNumEndpoints > 1 beforehand.
+  */
+ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
+ 					      struct usb_host_interface *alts,
+@@ -529,7 +534,7 @@ static int deactivate_urbs(struct snd_usb_endpoint *ep, bool force)
+ {
+ 	unsigned int i;
+ 
+-	if (!force && ep->chip->shutdown) /* to be sure... */
++	if (!force && atomic_read(&ep->chip->shutdown)) /* to be sure... */
+ 		return -EBADFD;
+ 
+ 	clear_bit(EP_FLAG_RUNNING, &ep->flags);
+@@ -868,7 +873,7 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep)
+ 	int err;
+ 	unsigned int i;
+ 
+-	if (ep->chip->shutdown)
++	if (atomic_read(&ep->chip->shutdown))
+ 		return -EBADFD;
+ 
+ 	/* already running? */
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index f9a9752d4dbc..e0fc02763024 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -282,6 +282,21 @@ static int get_abs_value(struct usb_mixer_elem_info *cval, int val)
+ 	return val;
+ }
+ 
++static int uac2_ctl_value_size(int val_type)
++{
++	switch (val_type) {
++	case USB_MIXER_S32:
++	case USB_MIXER_U32:
++		return 4;
++	case USB_MIXER_S16:
++	case USB_MIXER_U16:
++		return 2;
++	default:
++		return 1;
++	}
++	return 0; /* unreachable */
++}
++
+ 
+ /*
+  * retrieve a mixer value
+@@ -296,14 +311,11 @@ static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request,
+ 	int timeout = 10;
+ 	int idx = 0, err;
+ 
+-	err = snd_usb_autoresume(chip);
++	err = snd_usb_lock_shutdown(chip);
+ 	if (err < 0)
+ 		return -EIO;
+ 
+-	down_read(&chip->shutdown_rwsem);
+ 	while (timeout-- > 0) {
+-		if (chip->shutdown)
+-			break;
+ 		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+ 		if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
+ 				    USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+@@ -319,8 +331,7 @@ static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request,
+ 	err = -EINVAL;
+ 
+  out:
+-	up_read(&chip->shutdown_rwsem);
+-	snd_usb_autosuspend(chip);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+@@ -328,14 +339,14 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
+ 			    int validx, int *value_ret)
+ {
+ 	struct snd_usb_audio *chip = cval->head.mixer->chip;
+-	unsigned char buf[2 + 3 * sizeof(__u16)]; /* enough space for one range */
++	unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
+ 	unsigned char *val;
+ 	int idx = 0, ret, size;
+ 	__u8 bRequest;
+ 
+ 	if (request == UAC_GET_CUR) {
+ 		bRequest = UAC2_CS_CUR;
+-		size = sizeof(__u16);
++		size = uac2_ctl_value_size(cval->val_type);
+ 	} else {
+ 		bRequest = UAC2_CS_RANGE;
+ 		size = sizeof(buf);
+@@ -343,21 +354,15 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
+ 
+ 	memset(buf, 0, sizeof(buf));
+ 
+-	ret = snd_usb_autoresume(chip) ? -EIO : 0;
++	ret = snd_usb_lock_shutdown(chip) ? -EIO : 0;
+ 	if (ret)
+ 		goto error;
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (chip->shutdown) {
+-		ret = -ENODEV;
+-	} else {
+-		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+-		ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
++	idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
++	ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
+ 			      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ 			      validx, idx, buf, size);
+-	}
+-	up_read(&chip->shutdown_rwsem);
+-	snd_usb_autosuspend(chip);
++	snd_usb_unlock_shutdown(chip);
+ 
+ 	if (ret < 0) {
+ error:
+@@ -446,7 +451,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ 				int request, int validx, int value_set)
+ {
+ 	struct snd_usb_audio *chip = cval->head.mixer->chip;
+-	unsigned char buf[2];
++	unsigned char buf[4];
+ 	int idx = 0, val_len, err, timeout = 10;
+ 
+ 	validx += cval->idx_off;
+@@ -454,8 +459,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ 	if (cval->head.mixer->protocol == UAC_VERSION_1) {
+ 		val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
+ 	} else { /* UAC_VERSION_2 */
+-		/* audio class v2 controls are always 2 bytes in size */
+-		val_len = sizeof(__u16);
++		val_len = uac2_ctl_value_size(cval->val_type);
+ 
+ 		/* FIXME */
+ 		if (request != UAC_SET_CUR) {
+@@ -469,13 +473,14 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ 	value_set = convert_bytes_value(cval, value_set);
+ 	buf[0] = value_set & 0xff;
+ 	buf[1] = (value_set >> 8) & 0xff;
+-	err = snd_usb_autoresume(chip);
++	buf[2] = (value_set >> 16) & 0xff;
++	buf[3] = (value_set >> 24) & 0xff;
++
++	err = snd_usb_lock_shutdown(chip);
+ 	if (err < 0)
+ 		return -EIO;
+-	down_read(&chip->shutdown_rwsem);
++
+ 	while (timeout-- > 0) {
+-		if (chip->shutdown)
+-			break;
+ 		idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+ 		if (snd_usb_ctl_msg(chip->dev,
+ 				    usb_sndctrlpipe(chip->dev, 0), request,
+@@ -490,8 +495,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
+ 	err = -EINVAL;
+ 
+  out:
+-	up_read(&chip->shutdown_rwsem);
+-	snd_usb_autosuspend(chip);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+@@ -798,24 +802,25 @@ static int check_input_term(struct mixer_build *state, int id,
+ /* feature unit control information */
+ struct usb_feature_control_info {
+ 	const char *name;
+-	unsigned int type;	/* control type (mute, volume, etc.) */
++	int type;	/* data type for uac1 */
++	int type_uac2;	/* data type for uac2 if different from uac1, else -1 */
+ };
+ 
+ static struct usb_feature_control_info audio_feature_info[] = {
+-	{ "Mute",			USB_MIXER_INV_BOOLEAN },
+-	{ "Volume",			USB_MIXER_S16 },
+-	{ "Tone Control - Bass",	USB_MIXER_S8 },
+-	{ "Tone Control - Mid",		USB_MIXER_S8 },
+-	{ "Tone Control - Treble",	USB_MIXER_S8 },
+-	{ "Graphic Equalizer",		USB_MIXER_S8 }, /* FIXME: not implemeted yet */
+-	{ "Auto Gain Control",		USB_MIXER_BOOLEAN },
+-	{ "Delay Control",		USB_MIXER_U16 },
+-	{ "Bass Boost",			USB_MIXER_BOOLEAN },
+-	{ "Loudness",			USB_MIXER_BOOLEAN },
++	{ "Mute",			USB_MIXER_INV_BOOLEAN, -1 },
++	{ "Volume",			USB_MIXER_S16, -1 },
++	{ "Tone Control - Bass",	USB_MIXER_S8, -1 },
++	{ "Tone Control - Mid",		USB_MIXER_S8, -1 },
++	{ "Tone Control - Treble",	USB_MIXER_S8, -1 },
++	{ "Graphic Equalizer",		USB_MIXER_S8, -1 }, /* FIXME: not implemeted yet */
++	{ "Auto Gain Control",		USB_MIXER_BOOLEAN, -1 },
++	{ "Delay Control",		USB_MIXER_U16, USB_MIXER_U32 },
++	{ "Bass Boost",			USB_MIXER_BOOLEAN, -1 },
++	{ "Loudness",			USB_MIXER_BOOLEAN, -1 },
+ 	/* UAC2 specific */
+-	{ "Input Gain Control",		USB_MIXER_U16 },
+-	{ "Input Gain Pad Control",	USB_MIXER_BOOLEAN },
+-	{ "Phase Inverter Control",	USB_MIXER_BOOLEAN },
++	{ "Input Gain Control",		USB_MIXER_S16, -1 },
++	{ "Input Gain Pad Control",	USB_MIXER_S16, -1 },
++	{ "Phase Inverter Control",	USB_MIXER_BOOLEAN, -1 },
+ };
+ 
+ /* private_free callback */
+@@ -1215,6 +1220,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
+ 			      int readonly_mask)
+ {
+ 	struct uac_feature_unit_descriptor *desc = raw_desc;
++	struct usb_feature_control_info *ctl_info;
+ 	unsigned int len = 0;
+ 	int mapped_name = 0;
+ 	int nameid = uac_feature_unit_iFeature(desc);
+@@ -1240,7 +1246,13 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
+ 	snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
+ 	cval->control = control;
+ 	cval->cmask = ctl_mask;
+-	cval->val_type = audio_feature_info[control-1].type;
++	ctl_info = &audio_feature_info[control-1];
++	if (state->mixer->protocol == UAC_VERSION_1)
++		cval->val_type = ctl_info->type;
++	else /* UAC_VERSION_2 */
++		cval->val_type = ctl_info->type_uac2 >= 0 ?
++			ctl_info->type_uac2 : ctl_info->type;
++
+ 	if (ctl_mask == 0) {
+ 		cval->channels = 1;	/* master channel */
+ 		cval->master_readonly = readonly_mask;
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index d3268f0ee2b3..3417ef347e40 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -33,6 +33,8 @@ enum {
+ 	USB_MIXER_U8,
+ 	USB_MIXER_S16,
+ 	USB_MIXER_U16,
++	USB_MIXER_S32,
++	USB_MIXER_U32,
+ };
+ 
+ typedef void (*usb_mixer_elem_dump_func_t)(struct snd_info_buffer *buffer,
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index db9547d04f38..940442848fc8 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -309,11 +309,10 @@ static int snd_audigy2nx_led_update(struct usb_mixer_interface *mixer,
+ 	struct snd_usb_audio *chip = mixer->chip;
+ 	int err;
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (chip->shutdown) {
+-		err = -ENODEV;
+-		goto out;
+-	}
++	err = snd_usb_lock_shutdown(chip);
++	if (err < 0)
++		return err;
++
+ 	if (chip->usb_id == USB_ID(0x041e, 0x3042))
+ 		err = snd_usb_ctl_msg(chip->dev,
+ 			      usb_sndctrlpipe(chip->dev, 0), 0x24,
+@@ -330,8 +329,7 @@ static int snd_audigy2nx_led_update(struct usb_mixer_interface *mixer,
+ 			      usb_sndctrlpipe(chip->dev, 0), 0x24,
+ 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ 			      value, index + 2, NULL, 0);
+- out:
+-	up_read(&chip->shutdown_rwsem);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+@@ -442,16 +440,15 @@ static void snd_audigy2nx_proc_read(struct snd_info_entry *entry,
+ 
+ 	for (i = 0; jacks[i].name; ++i) {
+ 		snd_iprintf(buffer, "%s: ", jacks[i].name);
+-		down_read(&mixer->chip->shutdown_rwsem);
+-		if (mixer->chip->shutdown)
+-			err = 0;
+-		else
+-			err = snd_usb_ctl_msg(mixer->chip->dev,
++		err = snd_usb_lock_shutdown(mixer->chip);
++		if (err < 0)
++			return;
++		err = snd_usb_ctl_msg(mixer->chip->dev,
+ 				      usb_rcvctrlpipe(mixer->chip->dev, 0),
+ 				      UAC_GET_MEM, USB_DIR_IN | USB_TYPE_CLASS |
+ 				      USB_RECIP_INTERFACE, 0,
+ 				      jacks[i].unitid << 8, buf, 3);
+-		up_read(&mixer->chip->shutdown_rwsem);
++		snd_usb_unlock_shutdown(mixer->chip);
+ 		if (err == 3 && (buf[0] == 3 || buf[0] == 6))
+ 			snd_iprintf(buffer, "%02x %02x\n", buf[1], buf[2]);
+ 		else
+@@ -482,11 +479,9 @@ static int snd_emu0204_ch_switch_update(struct usb_mixer_interface *mixer,
+ 	int err;
+ 	unsigned char buf[2];
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (mixer->chip->shutdown) {
+-		err = -ENODEV;
+-		goto out;
+-	}
++	err = snd_usb_lock_shutdown(chip);
++	if (err < 0)
++		return err;
+ 
+ 	buf[0] = 0x01;
+ 	buf[1] = value ? 0x02 : 0x01;
+@@ -494,8 +489,7 @@ static int snd_emu0204_ch_switch_update(struct usb_mixer_interface *mixer,
+ 		      usb_sndctrlpipe(chip->dev, 0), UAC_SET_CUR,
+ 		      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+ 		      0x0400, 0x0e00, buf, 2);
+- out:
+-	up_read(&chip->shutdown_rwsem);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+@@ -555,15 +549,14 @@ static int snd_xonar_u1_switch_update(struct usb_mixer_interface *mixer,
+ 	struct snd_usb_audio *chip = mixer->chip;
+ 	int err;
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (chip->shutdown)
+-		err = -ENODEV;
+-	else
+-		err = snd_usb_ctl_msg(chip->dev,
++	err = snd_usb_lock_shutdown(chip);
++	if (err < 0)
++		return err;
++	err = snd_usb_ctl_msg(chip->dev,
+ 			      usb_sndctrlpipe(chip->dev, 0), 0x08,
+ 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+ 			      50, 0, &status, 1);
+-	up_read(&chip->shutdown_rwsem);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+@@ -624,11 +617,9 @@ static int snd_mbox1_switch_update(struct usb_mixer_interface *mixer, int val)
+ 	int err;
+ 	unsigned char buff[3];
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (chip->shutdown) {
+-		err = -ENODEV;
+-		goto err;
+-	}
++	err = snd_usb_lock_shutdown(chip);
++	if (err < 0)
++		return err;
+ 
+ 	/* Prepare for magic command to toggle clock source */
+ 	err = snd_usb_ctl_msg(chip->dev,
+@@ -684,7 +675,7 @@ static int snd_mbox1_switch_update(struct usb_mixer_interface *mixer, int val)
+ 		goto err;
+ 
+ err:
+-	up_read(&chip->shutdown_rwsem);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+@@ -779,15 +770,14 @@ static int snd_ni_update_cur_val(struct usb_mixer_elem_list *list)
+ 	unsigned int pval = list->kctl->private_value;
+ 	int err;
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (chip->shutdown)
+-		err = -ENODEV;
+-	else
+-		err = usb_control_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0),
+-				      (pval >> 16) & 0xff,
+-				      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+-				      pval >> 24, pval & 0xffff, NULL, 0, 1000);
+-	up_read(&chip->shutdown_rwsem);
++	err = snd_usb_lock_shutdown(chip);
++	if (err < 0)
++		return err;
++	err = usb_control_msg(chip->dev, usb_sndctrlpipe(chip->dev, 0),
++			      (pval >> 16) & 0xff,
++			      USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
++			      pval >> 24, pval & 0xffff, NULL, 0, 1000);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+@@ -945,18 +935,17 @@ static int snd_ftu_eff_switch_update(struct usb_mixer_elem_list *list)
+ 	value[0] = pval >> 24;
+ 	value[1] = 0;
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (chip->shutdown)
+-		err = -ENODEV;
+-	else
+-		err = snd_usb_ctl_msg(chip->dev,
+-				      usb_sndctrlpipe(chip->dev, 0),
+-				      UAC_SET_CUR,
+-				      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
+-				      pval & 0xff00,
+-				      snd_usb_ctrl_intf(chip) | ((pval & 0xff) << 8),
+-				      value, 2);
+-	up_read(&chip->shutdown_rwsem);
++	err = snd_usb_lock_shutdown(chip);
++	if (err < 0)
++		return err;
++	err = snd_usb_ctl_msg(chip->dev,
++			      usb_sndctrlpipe(chip->dev, 0),
++			      UAC_SET_CUR,
++			      USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
++			      pval & 0xff00,
++			      snd_usb_ctrl_intf(chip) | ((pval & 0xff) << 8),
++			      value, 2);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+@@ -1520,11 +1509,9 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
+ 	unsigned char data[3];
+ 	int rate;
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (chip->shutdown) {
+-		err = -ENODEV;
+-		goto end;
+-	}
++	err = snd_usb_lock_shutdown(chip);
++	if (err < 0)
++		return err;
+ 
+ 	ucontrol->value.iec958.status[0] = kcontrol->private_value & 0xff;
+ 	ucontrol->value.iec958.status[1] = (kcontrol->private_value >> 8) & 0xff;
+@@ -1532,7 +1519,11 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
+ 
+ 	/* use known values for that card: interface#1 altsetting#1 */
+ 	iface = usb_ifnum_to_if(chip->dev, 1);
++	if (!iface || iface->num_altsetting < 2)
++		return -EINVAL;
+ 	alts = &iface->altsetting[1];
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	err = snd_usb_ctl_msg(chip->dev,
+@@ -1552,7 +1543,7 @@ static int snd_microii_spdif_default_get(struct snd_kcontrol *kcontrol,
+ 
+ 	err = 0;
+  end:
+-	up_read(&chip->shutdown_rwsem);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+@@ -1563,11 +1554,9 @@ static int snd_microii_spdif_default_update(struct usb_mixer_elem_list *list)
+ 	u8 reg;
+ 	int err;
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (chip->shutdown) {
+-		err = -ENODEV;
+-		goto end;
+-	}
++	err = snd_usb_lock_shutdown(chip);
++	if (err < 0)
++		return err;
+ 
+ 	reg = ((pval >> 4) & 0xf0) | (pval & 0x0f);
+ 	err = snd_usb_ctl_msg(chip->dev,
+@@ -1595,7 +1584,7 @@ static int snd_microii_spdif_default_update(struct usb_mixer_elem_list *list)
+ 		goto end;
+ 
+  end:
+-	up_read(&chip->shutdown_rwsem);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+@@ -1651,11 +1640,9 @@ static int snd_microii_spdif_switch_update(struct usb_mixer_elem_list *list)
+ 	u8 reg = list->kctl->private_value;
+ 	int err;
+ 
+-	down_read(&chip->shutdown_rwsem);
+-	if (chip->shutdown) {
+-		err = -ENODEV;
+-		goto end;
+-	}
++	err = snd_usb_lock_shutdown(chip);
++	if (err < 0)
++		return err;
+ 
+ 	err = snd_usb_ctl_msg(chip->dev,
+ 			usb_sndctrlpipe(chip->dev, 0),
+@@ -1666,8 +1653,7 @@ static int snd_microii_spdif_switch_update(struct usb_mixer_elem_list *list)
+ 			NULL,
+ 			0);
+ 
+- end:
+-	up_read(&chip->shutdown_rwsem);
++	snd_usb_unlock_shutdown(chip);
+ 	return err;
+ }
+ 
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index b4ef410e5a98..a51155197277 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -80,7 +80,7 @@ static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream
+ 	unsigned int hwptr_done;
+ 
+ 	subs = (struct snd_usb_substream *)substream->runtime->private_data;
+-	if (subs->stream->chip->shutdown)
++	if (atomic_read(&subs->stream->chip->shutdown))
+ 		return SNDRV_PCM_POS_XRUN;
+ 	spin_lock(&subs->lock);
+ 	hwptr_done = subs->hwptr_done;
+@@ -159,6 +159,8 @@ static int init_pitch_v1(struct snd_usb_audio *chip, int iface,
+ 	unsigned char data[1];
+ 	int err;
+ 
++	if (get_iface_desc(alts)->bNumEndpoints < 1)
++		return -EINVAL;
+ 	ep = get_endpoint(alts, 0)->bEndpointAddress;
+ 
+ 	data[0] = 1;
+@@ -707,12 +709,11 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 
+-	down_read(&subs->stream->chip->shutdown_rwsem);
+-	if (subs->stream->chip->shutdown)
+-		ret = -ENODEV;
+-	else
+-		ret = set_format(subs, fmt);
+-	up_read(&subs->stream->chip->shutdown_rwsem);
++	ret = snd_usb_lock_shutdown(subs->stream->chip);
++	if (ret < 0)
++		return ret;
++	ret = set_format(subs, fmt);
++	snd_usb_unlock_shutdown(subs->stream->chip);
+ 	if (ret < 0)
+ 		return ret;
+ 
+@@ -735,13 +736,12 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
+ 	subs->cur_audiofmt = NULL;
+ 	subs->cur_rate = 0;
+ 	subs->period_bytes = 0;
+-	down_read(&subs->stream->chip->shutdown_rwsem);
+-	if (!subs->stream->chip->shutdown) {
++	if (!snd_usb_lock_shutdown(subs->stream->chip)) {
+ 		stop_endpoints(subs, true);
+ 		snd_usb_endpoint_deactivate(subs->sync_endpoint);
+ 		snd_usb_endpoint_deactivate(subs->data_endpoint);
++		snd_usb_unlock_shutdown(subs->stream->chip);
+ 	}
+-	up_read(&subs->stream->chip->shutdown_rwsem);
+ 	return snd_pcm_lib_free_vmalloc_buffer(substream);
+ }
+ 
+@@ -763,11 +763,9 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ 		return -ENXIO;
+ 	}
+ 
+-	down_read(&subs->stream->chip->shutdown_rwsem);
+-	if (subs->stream->chip->shutdown) {
+-		ret = -ENODEV;
+-		goto unlock;
+-	}
++	ret = snd_usb_lock_shutdown(subs->stream->chip);
++	if (ret < 0)
++		return ret;
+ 	if (snd_BUG_ON(!subs->data_endpoint)) {
+ 		ret = -EIO;
+ 		goto unlock;
+@@ -816,7 +814,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
+ 		ret = start_endpoints(subs, true);
+ 
+  unlock:
+-	up_read(&subs->stream->chip->shutdown_rwsem);
++	snd_usb_unlock_shutdown(subs->stream->chip);
+ 	return ret;
+ }
+ 
+@@ -1218,9 +1216,11 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
+ 
+ 	stop_endpoints(subs, true);
+ 
+-	if (!as->chip->shutdown && subs->interface >= 0) {
++	if (subs->interface >= 0 &&
++	    !snd_usb_lock_shutdown(subs->stream->chip)) {
+ 		usb_set_interface(subs->dev, subs->interface, 0);
+ 		subs->interface = -1;
++		snd_usb_unlock_shutdown(subs->stream->chip);
+ 	}
+ 
+ 	subs->pcm_substream = NULL;
+diff --git a/sound/usb/proc.c b/sound/usb/proc.c
+index 5f761ab34c01..0ac89e294d31 100644
+--- a/sound/usb/proc.c
++++ b/sound/usb/proc.c
+@@ -46,14 +46,14 @@ static inline unsigned get_high_speed_hz(unsigned int usb_rate)
+ static void proc_audio_usbbus_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
+ {
+ 	struct snd_usb_audio *chip = entry->private_data;
+-	if (!chip->shutdown)
++	if (!atomic_read(&chip->shutdown))
+ 		snd_iprintf(buffer, "%03d/%03d\n", chip->dev->bus->busnum, chip->dev->devnum);
+ }
+ 
+ static void proc_audio_usbid_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer)
+ {
+ 	struct snd_usb_audio *chip = entry->private_data;
+-	if (!chip->shutdown)
++	if (!atomic_read(&chip->shutdown))
+ 		snd_iprintf(buffer, "%04x:%04x\n", 
+ 			    USB_ID_VENDOR(chip->usb_id),
+ 			    USB_ID_PRODUCT(chip->usb_id));
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index a4d03e5da3e0..5fb308d39e2a 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -177,6 +177,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 	}
+ 	alts = &iface->altsetting[fp->altset_idx];
+ 	altsd = get_iface_desc(alts);
++	if (altsd->bNumEndpoints < 1) {
++		kfree(fp);
++		kfree(rate_table);
++		return -EINVAL;
++	}
++
+ 	fp->protocol = altsd->bInterfaceProtocol;
+ 
+ 	if (fp->datainterval == 0)
+@@ -1118,6 +1124,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	switch (chip->usb_id) {
+ 	case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
+ 	case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
++	case USB_ID(0x045E, 0x076E): /* MS Lifecam HD-5001 */
+ 	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
+ 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 991aa84491cd..66cba05258af 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -37,11 +37,12 @@ struct snd_usb_audio {
+ 	struct usb_interface *pm_intf;
+ 	u32 usb_id;
+ 	struct mutex mutex;
+-	struct rw_semaphore shutdown_rwsem;
+-	unsigned int shutdown:1;
+ 	unsigned int probing:1;
+-	unsigned int in_pm:1;
+ 	unsigned int autosuspended:1;	
++	atomic_t active;
++	atomic_t shutdown;
++	atomic_t usage_count;
++	wait_queue_head_t shutdown_wait;
+ 	unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
+ 	
+ 	int num_interfaces;
+@@ -116,4 +117,7 @@ struct snd_usb_audio_quirk {
+ #define combine_triple(s)  (combine_word(s) | ((unsigned int)(s)[2] << 16))
+ #define combine_quad(s)    (combine_triple(s) | ((unsigned int)(s)[3] << 24))
+ 
++int snd_usb_lock_shutdown(struct snd_usb_audio *chip);
++void snd_usb_unlock_shutdown(struct snd_usb_audio *chip);
++
+ #endif /* __USBAUDIO_H */
+diff --git a/tools/hv/Makefile b/tools/hv/Makefile
+index a8ab79556926..a8c4644022a6 100644
+--- a/tools/hv/Makefile
++++ b/tools/hv/Makefile
+@@ -5,6 +5,8 @@ PTHREAD_LIBS = -lpthread
+ WARNINGS = -Wall -Wextra
+ CFLAGS = $(WARNINGS) -g $(PTHREAD_LIBS) $(shell getconf LFS_CFLAGS)
+ 
++CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
++
+ all: hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
+ %: %.c
+ 	$(CC) $(CFLAGS) -o $@ $^
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index 48411674da0f..8b02a4355659 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -263,13 +263,12 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
+ {
+ 	struct dirent *evt_ent;
+ 	DIR *event_dir;
+-	int ret = 0;
+ 
+ 	event_dir = opendir(dir);
+ 	if (!event_dir)
+ 		return -EINVAL;
+ 
+-	while (!ret && (evt_ent = readdir(event_dir))) {
++	while ((evt_ent = readdir(event_dir))) {
+ 		char path[PATH_MAX];
+ 		char *name = evt_ent->d_name;
+ 		FILE *file;
+@@ -285,17 +284,19 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
+ 
+ 		snprintf(path, PATH_MAX, "%s/%s", dir, name);
+ 
+-		ret = -EINVAL;
+ 		file = fopen(path, "r");
+-		if (!file)
+-			break;
++		if (!file) {
++			pr_debug("Cannot open %s\n", path);
++			continue;
++		}
+ 
+-		ret = perf_pmu__new_alias(head, dir, name, file);
++		if (perf_pmu__new_alias(head, dir, name, file) < 0)
++			pr_debug("Cannot set up %s\n", name);
+ 		fclose(file);
+ 	}
+ 
+ 	closedir(event_dir);
+-	return ret;
++	return 0;
+ }
+ 
+ /*
+diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
+index 77edcdcc016b..057278448515 100755
+--- a/tools/testing/selftests/efivarfs/efivarfs.sh
++++ b/tools/testing/selftests/efivarfs/efivarfs.sh
+@@ -88,7 +88,11 @@ test_delete()
+ 		exit 1
+ 	fi
+ 
+-	rm $file
++	rm $file 2>/dev/null
++	if [ $? -ne 0 ]; then
++		chattr -i $file
++		rm $file
++	fi
+ 
+ 	if [ -e $file ]; then
+ 		echo "$file couldn't be deleted" >&2
+@@ -111,6 +115,7 @@ test_zero_size_delete()
+ 		exit 1
+ 	fi
+ 
++	chattr -i $file
+ 	printf "$attrs" > $file
+ 
+ 	if [ -e $file ]; then
+@@ -141,7 +146,11 @@ test_valid_filenames()
+ 			echo "$file could not be created" >&2
+ 			ret=1
+ 		else
+-			rm $file
++			rm $file 2>/dev/null
++			if [ $? -ne 0 ]; then
++				chattr -i $file
++				rm $file
++			fi
+ 		fi
+ 	done
+ 
+@@ -174,7 +183,11 @@ test_invalid_filenames()
+ 
+ 		if [ -e $file ]; then
+ 			echo "Creating $file should have failed" >&2
+-			rm $file
++			rm $file 2>/dev/null
++			if [ $? -ne 0 ]; then
++				chattr -i $file
++				rm $file
++			fi
+ 			ret=1
+ 		fi
+ 	done
+diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
+index 8c0764407b3c..4af74f733036 100644
+--- a/tools/testing/selftests/efivarfs/open-unlink.c
++++ b/tools/testing/selftests/efivarfs/open-unlink.c
+@@ -1,10 +1,68 @@
++#include <errno.h>
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <stdlib.h>
+ #include <unistd.h>
++#include <sys/ioctl.h>
+ #include <sys/types.h>
+ #include <sys/stat.h>
+ #include <fcntl.h>
++#include <linux/fs.h>
++
++static int set_immutable(const char *path, int immutable)
++{
++	unsigned int flags;
++	int fd;
++	int rc;
++	int error;
++
++	fd = open(path, O_RDONLY);
++	if (fd < 0)
++		return fd;
++
++	rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
++	if (rc < 0) {
++		error = errno;
++		close(fd);
++		errno = error;
++		return rc;
++	}
++
++	if (immutable)
++		flags |= FS_IMMUTABLE_FL;
++	else
++		flags &= ~FS_IMMUTABLE_FL;
++
++	rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
++	error = errno;
++	close(fd);
++	errno = error;
++	return rc;
++}
++
++static int get_immutable(const char *path)
++{
++	unsigned int flags;
++	int fd;
++	int rc;
++	int error;
++
++	fd = open(path, O_RDONLY);
++	if (fd < 0)
++		return fd;
++
++	rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
++	if (rc < 0) {
++		error = errno;
++		close(fd);
++		errno = error;
++		return rc;
++	}
++	close(fd);
++	if (flags & FS_IMMUTABLE_FL)
++		return 1;
++	return 0;
++}
+ 
+ int main(int argc, char **argv)
+ {
+@@ -27,7 +85,7 @@ int main(int argc, char **argv)
+ 	buf[4] = 0;
+ 
+ 	/* create a test variable */
+-	fd = open(path, O_WRONLY | O_CREAT);
++	fd = open(path, O_WRONLY | O_CREAT, 0600);
+ 	if (fd < 0) {
+ 		perror("open(O_WRONLY)");
+ 		return EXIT_FAILURE;
+@@ -41,6 +99,18 @@ int main(int argc, char **argv)
+ 
+ 	close(fd);
+ 
++	rc = get_immutable(path);
++	if (rc < 0) {
++		perror("ioctl(FS_IOC_GETFLAGS)");
++		return EXIT_FAILURE;
++	} else if (rc) {
++		rc = set_immutable(path, 0);
++		if (rc < 0) {
++			perror("ioctl(FS_IOC_SETFLAGS)");
++			return EXIT_FAILURE;
++		}
++	}
++
+ 	fd = open(path, O_RDONLY);
+ 	if (fd < 0) {
+ 		perror("open");
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 85422985235f..c2f87ff0061d 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -457,6 +457,16 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ 	if (!kvm)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	spin_lock_init(&kvm->mmu_lock);
++	atomic_inc(&current->mm->mm_count);
++	kvm->mm = current->mm;
++	kvm_eventfd_init(kvm);
++	mutex_init(&kvm->lock);
++	mutex_init(&kvm->irq_lock);
++	mutex_init(&kvm->slots_lock);
++	atomic_set(&kvm->users_count, 1);
++	INIT_LIST_HEAD(&kvm->devices);
++
+ 	r = kvm_arch_init_vm(kvm, type);
+ 	if (r)
+ 		goto out_err_no_disable;
+@@ -494,16 +504,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
+ 			goto out_err;
+ 	}
+ 
+-	spin_lock_init(&kvm->mmu_lock);
+-	kvm->mm = current->mm;
+-	atomic_inc(&kvm->mm->mm_count);
+-	kvm_eventfd_init(kvm);
+-	mutex_init(&kvm->lock);
+-	mutex_init(&kvm->irq_lock);
+-	mutex_init(&kvm->slots_lock);
+-	atomic_set(&kvm->users_count, 1);
+-	INIT_LIST_HEAD(&kvm->devices);
+-
+ 	r = kvm_init_mmu_notifier(kvm);
+ 	if (r)
+ 		goto out_err;
+@@ -525,6 +525,7 @@ out_err_no_disable:
+ 		kfree(kvm->buses[i]);
+ 	kvfree(kvm->memslots);
+ 	kvm_arch_free_vm(kvm);
++	mmdrop(current->mm);
+ 	return ERR_PTR(r);
+ }
+ 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2018-05-29 10:34 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2018-05-29 10:34 UTC (permalink / raw
  To: gentoo-commits

commit:     b9fd7bdba6609d0fa42485ee3cdee8f607a321ec
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue May 29 10:33:56 2018 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue May 29 10:33:56 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b9fd7bdb

Linux patches 4.1.50, 4.1.51, 4.1.52

 0000_README             |    12 +
 1049_linux-4.1.50.patch | 18012 ++++++++++++++++++++++++++++++++++++++++++++++
 1050_linux-4.1.51.patch |  2698 +++++++
 1051_linux-4.1.52.patch | 15535 +++++++++++++++++++++++++++++++++++++++
 4 files changed, 36257 insertions(+)

diff --git a/0000_README b/0000_README
index 3abfafc..431a915 100644
--- a/0000_README
+++ b/0000_README
@@ -239,6 +239,18 @@ Patch:  1048_linux-4.1.49.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.49
 
+Patch:  1049_linux-4.1.50.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.50
+
+Patch:  1050_linux-4.1.51.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.51
+
+Patch:  1051_linux-4.1.52.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.52
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1049_linux-4.1.50.patch b/1049_linux-4.1.50.patch
new file mode 100644
index 0000000..95262b7
--- /dev/null
+++ b/1049_linux-4.1.50.patch
@@ -0,0 +1,18012 @@
+diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
+index c261598164a7..17d43ca27f41 100644
+--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
++++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
+@@ -58,6 +58,6 @@ Example:
+ 		interrupts = <0 35 0x4>;
+ 		status = "disabled";
+ 		dmas = <&dmahost 12 0 1>,
+-			<&dmahost 13 0 1 0>;
++			<&dmahost 13 1 0>;
+ 		dma-names = "rx", "rx";
+ 	};
+diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
+index 6c0108eb0137..2139ea253142 100644
+--- a/Documentation/filesystems/ext4.txt
++++ b/Documentation/filesystems/ext4.txt
+@@ -233,7 +233,7 @@ data_err=ignore(*)	Just print an error message if an error occurs
+ data_err=abort		Abort the journal if an error occurs in a file
+ 			data buffer in ordered mode.
+ 
+-grpid			Give objects the same group ID as their creator.
++grpid			New objects have the group ID of their parent.
+ bsdgroups
+ 
+ nogrpid		(*)	New objects have the group ID of their creator.
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 08dc303d0d47..19e9f2e77bdf 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2435,6 +2435,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 
+ 	nointroute	[IA-64]
+ 
++	noinvpcid	[X86] Disable the INVPCID cpu feature.
++
+ 	nojitter	[IA-64] Disables jitter checking for ITC timers.
+ 
+ 	no-kvmclock	[X86,KVM] Disable paravirtualized KVM clock driver
+@@ -2469,11 +2471,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 	nopat		[X86] Disable PAT (page attribute table extension of
+ 			pagetables) support.
+ 
++	nopcid		[X86-64] Disable the PCID cpu feature.
++
+ 	norandmaps	Don't use address space randomization.  Equivalent to
+ 			echo 0 > /proc/sys/kernel/randomize_va_space
+ 
+-	noreplace-paravirt	[X86,IA-64,PV_OPS] Don't patch paravirt_ops
+-
+ 	noreplace-smp	[X86-32,SMP] Don't replace SMP instructions
+ 			with UP alternatives
+ 
+diff --git a/Documentation/speculation.txt b/Documentation/speculation.txt
+new file mode 100644
+index 000000000000..e9e6cbae2841
+--- /dev/null
++++ b/Documentation/speculation.txt
+@@ -0,0 +1,90 @@
++This document explains potential effects of speculation, and how undesirable
++effects can be mitigated portably using common APIs.
++
++===========
++Speculation
++===========
++
++To improve performance and minimize average latencies, many contemporary CPUs
++employ speculative execution techniques such as branch prediction, performing
++work which may be discarded at a later stage.
++
++Typically speculative execution cannot be observed from architectural state,
++such as the contents of registers. However, in some cases it is possible to
++observe its impact on microarchitectural state, such as the presence or
++absence of data in caches. Such state may form side-channels which can be
++observed to extract secret information.
++
++For example, in the presence of branch prediction, it is possible for bounds
++checks to be ignored by code which is speculatively executed. Consider the
++following code:
++
++	int load_array(int *array, unsigned int index)
++	{
++		if (index >= MAX_ARRAY_ELEMS)
++			return 0;
++		else
++			return array[index];
++	}
++
++Which, on arm64, may be compiled to an assembly sequence such as:
++
++	CMP	<index>, #MAX_ARRAY_ELEMS
++	B.LT	less
++	MOV	<returnval>, #0
++	RET
++  less:
++	LDR	<returnval>, [<array>, <index>]
++	RET
++
++It is possible that a CPU mis-predicts the conditional branch, and
++speculatively loads array[index], even if index >= MAX_ARRAY_ELEMS. This
++value will subsequently be discarded, but the speculated load may affect
++microarchitectural state which can be subsequently measured.
++
++More complex sequences involving multiple dependent memory accesses may
++result in sensitive information being leaked. Consider the following
++code, building on the prior example:
++
++	int load_dependent_arrays(int *arr1, int *arr2, int index)
++	{
++		int val1, val2,
++
++		val1 = load_array(arr1, index);
++		val2 = load_array(arr2, val1);
++
++		return val2;
++	}
++
++Under speculation, the first call to load_array() may return the value
++of an out-of-bounds address, while the second call will influence
++microarchitectural state dependent on this value. This may provide an
++arbitrary read primitive.
++
++====================================
++Mitigating speculation side-channels
++====================================
++
++The kernel provides a generic API to ensure that bounds checks are
++respected even under speculation. Architectures which are affected by
++speculation-based side-channels are expected to implement these
++primitives.
++
++The array_index_nospec() helper in <linux/nospec.h> can be used to
++prevent information from being leaked via side-channels.
++
++A call to array_index_nospec(index, size) returns a sanitized index
++value that is bounded to [0, size) even under cpu speculation
++conditions.
++
++This can be used to protect the earlier load_array() example:
++
++	int load_array(int *array, unsigned int index)
++	{
++		if (index >= MAX_ARRAY_ELEMS)
++			return 0;
++		else {
++			index = array_index_nospec(index, MAX_ARRAY_ELEMS);
++			return array[index];
++		}
++	}
+diff --git a/Makefile b/Makefile
+index a51938e99e37..a655f63aedeb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 49
++SUBLEVEL = 50
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+@@ -772,6 +772,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
+ # disable invalid "can't wrap" optimizations for signed / pointers
+ KBUILD_CFLAGS	+= $(call cc-option,-fno-strict-overflow)
+ 
++# Make sure -fstack-check isn't enabled (like gentoo apparently did)
++KBUILD_CFLAGS  += $(call cc-option,-fno-stack-check,)
++
+ # conserve stack if available
+ KBUILD_CFLAGS   += $(call cc-option,-fconserve-stack)
+ 
+diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h
+index 4c51c05333c6..4cafffa80e2c 100644
+--- a/arch/alpha/include/asm/mmu_context.h
++++ b/arch/alpha/include/asm/mmu_context.h
+@@ -7,6 +7,7 @@
+  * Copyright (C) 1996, Linus Torvalds
+  */
+ 
++#include <linux/sched.h>
+ #include <asm/machvec.h>
+ #include <asm/compiler.h>
+ #include <asm-generic/mm_hooks.h>
+diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
+index 2b0ac429f5eb..412bb3c24f36 100644
+--- a/arch/alpha/kernel/pci_impl.h
++++ b/arch/alpha/kernel/pci_impl.h
+@@ -143,7 +143,8 @@ struct pci_iommu_arena
+ };
+ 
+ #if defined(CONFIG_ALPHA_SRM) && \
+-    (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA))
++    (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA) || \
++     defined(CONFIG_ALPHA_AVANTI))
+ # define NEED_SRM_SAVE_RESTORE
+ #else
+ # undef NEED_SRM_SAVE_RESTORE
+diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
+index 84d13263ce46..8095fb2c5c94 100644
+--- a/arch/alpha/kernel/process.c
++++ b/arch/alpha/kernel/process.c
+@@ -273,12 +273,13 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
+ 	   application calling fork.  */
+ 	if (clone_flags & CLONE_SETTLS)
+ 		childti->pcb.unique = regs->r20;
++	else
++		regs->r20 = 0;	/* OSF/1 has some strange fork() semantics.  */
+ 	childti->pcb.usp = usp ?: rdusp();
+ 	*childregs = *regs;
+ 	childregs->r0 = 0;
+ 	childregs->r19 = 0;
+ 	childregs->r20 = 1;	/* OSF/1 has some strange fork() semantics.  */
+-	regs->r20 = 0;
+ 	stack = ((struct switch_stack *) regs) - 1;
+ 	*childstack = *stack;
+ 	childstack->r26 = (unsigned long) ret_from_fork;
+diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
+index 156d05efcb70..01288546bda1 100644
+--- a/arch/arm/boot/dts/am335x-evmsk.dts
++++ b/arch/arm/boot/dts/am335x-evmsk.dts
+@@ -646,6 +646,7 @@
+ 	ti,non-removable;
+ 	bus-width = <4>;
+ 	cap-power-off-card;
++	keep-power-in-suspend;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&mmc2_pins>;
+ 
+diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
+index c80a3e233792..96222885ad27 100644
+--- a/arch/arm/boot/dts/am4372.dtsi
++++ b/arch/arm/boot/dts/am4372.dtsi
+@@ -750,7 +750,8 @@
+ 			reg = <0x48038000 0x2000>,
+ 			      <0x46000000 0x400000>;
+ 			reg-names = "mpu", "dat";
+-			interrupts = <80>, <81>;
++			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "tx", "rx";
+ 			status = "disabled";
+ 			dmas = <&edma 8>,
+@@ -764,7 +765,8 @@
+ 			reg = <0x4803C000 0x2000>,
+ 			      <0x46400000 0x400000>;
+ 			reg-names = "mpu", "dat";
+-			interrupts = <82>, <83>;
++			interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
++				     <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ 			interrupt-names = "tx", "rx";
+ 			status = "disabled";
+ 			dmas = <&edma 10>,
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index bc04b754fe36..a13618266234 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -216,6 +216,7 @@
+ 				device_type = "pci";
+ 				ranges = <0x81000000 0 0          0x03000 0 0x00010000
+ 					  0x82000000 0 0x20013000 0x13000 0 0xffed000>;
++				bus-range = <0x00 0xff>;
+ 				#interrupt-cells = <1>;
+ 				num-lanes = <1>;
+ 				ti,hwmods = "pcie1";
+@@ -251,6 +252,7 @@
+ 				device_type = "pci";
+ 				ranges = <0x81000000 0 0          0x03000 0 0x00010000
+ 					  0x82000000 0 0x30013000 0x13000 0 0xffed000>;
++				bus-range = <0x00 0xff>;
+ 				#interrupt-cells = <1>;
+ 				num-lanes = <1>;
+ 				ti,hwmods = "pcie2";
+diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
+index d5e3bc518968..d57f48543f76 100644
+--- a/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
++++ b/arch/arm/boot/dts/kirkwood-openblocks_a7.dts
+@@ -53,7 +53,8 @@
+ 		};
+ 
+ 		pinctrl: pin-controller@10000 {
+-			pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header>;
++			pinctrl-0 = <&pmx_dip_switches &pmx_gpio_header
++				     &pmx_gpio_header_gpo>;
+ 			pinctrl-names = "default";
+ 
+ 			pmx_uart0: pmx-uart0 {
+@@ -85,11 +86,16 @@
+ 			 * ground.
+ 			 */
+ 			pmx_gpio_header: pmx-gpio-header {
+-				marvell,pins = "mpp17", "mpp7", "mpp29", "mpp28",
++				marvell,pins = "mpp17", "mpp29", "mpp28",
+ 					       "mpp35", "mpp34", "mpp40";
+ 				marvell,function = "gpio";
+ 			};
+ 
++			pmx_gpio_header_gpo: pxm-gpio-header-gpo {
++				marvell,pins = "mpp7";
++				marvell,function = "gpo";
++			};
++
+ 			pmx_gpio_init: pmx-init {
+ 				marvell,pins = "mpp38";
+ 				marvell,function = "gpio";
+diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
+index 84be9da74c7e..48c5a3b23d03 100644
+--- a/arch/arm/boot/dts/omap4.dtsi
++++ b/arch/arm/boot/dts/omap4.dtsi
+@@ -841,14 +841,12 @@
+ 			usbhsohci: ohci@4a064800 {
+ 				compatible = "ti,ohci-omap3";
+ 				reg = <0x4a064800 0x400>;
+-				interrupt-parent = <&gic>;
+ 				interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ 			};
+ 
+ 			usbhsehci: ehci@4a064c00 {
+ 				compatible = "ti,ehci-omap";
+ 				reg = <0x4a064c00 0x400>;
+-				interrupt-parent = <&gic>;
+ 				interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ 			};
+ 		};
+diff --git a/arch/arm/boot/dts/s5pv210.dtsi b/arch/arm/boot/dts/s5pv210.dtsi
+index 8344a0ee2b86..b03fe747b98c 100644
+--- a/arch/arm/boot/dts/s5pv210.dtsi
++++ b/arch/arm/boot/dts/s5pv210.dtsi
+@@ -461,6 +461,7 @@
+ 			compatible = "samsung,exynos4210-ohci";
+ 			reg = <0xec300000 0x100>;
+ 			interrupts = <23>;
++			interrupt-parent = <&vic1>;
+ 			clocks = <&clocks CLK_USB_HOST>;
+ 			clock-names = "usbhost";
+ 			#address-cells = <1>;
+diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
+index d42c84b1df8d..9cff28d476be 100644
+--- a/arch/arm/boot/dts/spear1310-evb.dts
++++ b/arch/arm/boot/dts/spear1310-evb.dts
+@@ -349,7 +349,7 @@
+ 			spi0: spi@e0100000 {
+ 				status = "okay";
+ 				num-cs = <3>;
+-				cs-gpios = <&gpio1 7 0>, <&spics 0>, <&spics 1>;
++				cs-gpios = <&gpio1 7 0>, <&spics 0 0>, <&spics 1 0>;
+ 
+ 				stmpe610@0 {
+ 					compatible = "st,stmpe610";
+diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
+index 13e1aa33daa2..69bc407b4a5a 100644
+--- a/arch/arm/boot/dts/spear1340.dtsi
++++ b/arch/arm/boot/dts/spear1340.dtsi
+@@ -141,8 +141,8 @@
+ 				reg = <0xb4100000 0x1000>;
+ 				interrupts = <0 105 0x4>;
+ 				status = "disabled";
+-				dmas = <&dwdma0 0x600 0 0 1>, /* 0xC << 11 */
+-					<&dwdma0 0x680 0 1 0>; /* 0xD << 7 */
++				dmas = <&dwdma0 12 0 1>,
++					<&dwdma0 13 1 0>;
+ 				dma-names = "tx", "rx";
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
+index 40accc87e3a2..4e5a2770eac8 100644
+--- a/arch/arm/boot/dts/spear13xx.dtsi
++++ b/arch/arm/boot/dts/spear13xx.dtsi
+@@ -100,7 +100,7 @@
+ 			reg = <0xb2800000 0x1000>;
+ 			interrupts = <0 29 0x4>;
+ 			status = "disabled";
+-			dmas = <&dwdma0 0 0 0 0>;
++			dmas = <&dwdma0 0 0 0>;
+ 			dma-names = "data";
+ 		};
+ 
+@@ -288,8 +288,8 @@
+ 				#size-cells = <0>;
+ 				interrupts = <0 31 0x4>;
+ 				status = "disabled";
+-				dmas = <&dwdma0 0x2000 0 0 0>, /* 0x4 << 11 */
+-					<&dwdma0 0x0280 0 0 0>;  /* 0x5 << 7 */
++				dmas = <&dwdma0 4 0 0>,
++					<&dwdma0 5 0 0>;
+ 				dma-names = "tx", "rx";
+ 			};
+ 
+diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
+index 9f60a7b6a42b..bd379034993c 100644
+--- a/arch/arm/boot/dts/spear600.dtsi
++++ b/arch/arm/boot/dts/spear600.dtsi
+@@ -194,6 +194,7 @@
+ 			rtc@fc900000 {
+ 				compatible = "st,spear600-rtc";
+ 				reg = <0xfc900000 0x1000>;
++				interrupt-parent = <&vic0>;
+ 				interrupts = <10>;
+ 				status = "disabled";
+ 			};
+diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
+index aacd6668d1a0..d43122f0993d 100644
+--- a/arch/arm/include/asm/kvm_arm.h
++++ b/arch/arm/include/asm/kvm_arm.h
+@@ -161,8 +161,7 @@
+ #else
+ #define VTTBR_X		(5 - KVM_T0SZ)
+ #endif
+-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+-#define VTTBR_BADDR_MASK  (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
++#define VTTBR_BADDR_MASK  (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_X)
+ #define VTTBR_VMID_SHIFT  (48LLU)
+ #define VTTBR_VMID_MASK	  (0xffLLU << VTTBR_VMID_SHIFT)
+ 
+diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
+index 9b32f76bb0dd..10f662498eb7 100644
+--- a/arch/arm/include/asm/mmu_context.h
++++ b/arch/arm/include/asm/mmu_context.h
+@@ -61,6 +61,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
+ 		cpu_switch_mm(mm->pgd, mm);
+ }
+ 
++#ifndef MODULE
+ #define finish_arch_post_lock_switch \
+ 	finish_arch_post_lock_switch
+ static inline void finish_arch_post_lock_switch(void)
+@@ -82,6 +83,7 @@ static inline void finish_arch_post_lock_switch(void)
+ 		preempt_enable_no_resched();
+ 	}
+ }
++#endif /* !MODULE */
+ 
+ #endif	/* CONFIG_MMU */
+ 
+diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
+index f36b5b1acd1f..05b2f8294968 100644
+--- a/arch/arm/kvm/handle_exit.c
++++ b/arch/arm/kvm/handle_exit.c
+@@ -45,7 +45,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 
+ 	ret = kvm_psci_call(vcpu);
+ 	if (ret < 0) {
+-		kvm_inject_undefined(vcpu);
++		vcpu_set_reg(vcpu, 0, ~0UL);
+ 		return 1;
+ 	}
+ 
+@@ -54,7 +54,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 
+ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+-	kvm_inject_undefined(vcpu);
++	/*
++	 * "If an SMC instruction executed at Non-secure EL1 is
++	 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
++	 * Trap exception, not a Secure Monitor Call exception [...]"
++	 *
++	 * We need to advance the PC after the trap, as it would
++	 * otherwise return to the same address...
++	 */
++	vcpu_set_reg(vcpu, 0, ~0UL);
++	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ 	return 1;
+ }
+ 
+diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
+index 974b1c606d04..04e5004b34e1 100644
+--- a/arch/arm/kvm/mmio.c
++++ b/arch/arm/kvm/mmio.c
+@@ -113,7 +113,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 		}
+ 
+ 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+-			       data);
++			       &data);
+ 		data = vcpu_data_host_to_guest(vcpu, data, len);
+ 		*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
+ 	}
+@@ -188,14 +188,14 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ 	if (is_write) {
+ 		data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
+ 
+-		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
++		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
+ 		mmio_write_buf(data_buf, len, data);
+ 
+ 		ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
+ 				       data_buf);
+ 	} else {
+ 		trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
+-			       fault_ipa, 0);
++			       fault_ipa, NULL);
+ 
+ 		ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
+ 				      data_buf);
+diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
+index 5ac122e88f67..9ff92050053c 100644
+--- a/arch/arm/mach-omap2/omap-secure.c
++++ b/arch/arm/mach-omap2/omap-secure.c
+@@ -73,6 +73,25 @@ phys_addr_t omap_secure_ram_mempool_base(void)
+ 	return omap_secure_memblock_base;
+ }
+ 
++u32 omap3_save_secure_ram(void __iomem *addr, int size)
++{
++	u32 ret;
++	u32 param[5];
++
++	if (size != OMAP3_SAVE_SECURE_RAM_SZ)
++		return OMAP3_SAVE_SECURE_RAM_SZ;
++
++	param[0] = 4;		/* Number of arguments */
++	param[1] = __pa(addr);	/* Physical address for saving */
++	param[2] = 0;
++	param[3] = 1;
++	param[4] = 1;
++
++	ret = save_secure_ram_context(__pa(param));
++
++	return ret;
++}
++
+ /**
+  * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
+  * @idx: The PPA API index
+diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
+index af2851fbcdf0..ab6ce2597a88 100644
+--- a/arch/arm/mach-omap2/omap-secure.h
++++ b/arch/arm/mach-omap2/omap-secure.h
+@@ -31,6 +31,8 @@
+ /* Maximum Secure memory storage size */
+ #define OMAP_SECURE_RAM_STORAGE	(88 * SZ_1K)
+ 
++#define OMAP3_SAVE_SECURE_RAM_SZ	0x803F
++
+ /* Secure low power HAL API index */
+ #define OMAP4_HAL_SAVESECURERAM_INDEX	0x1a
+ #define OMAP4_HAL_SAVEHW_INDEX		0x1b
+@@ -64,6 +66,8 @@ extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
+ extern u32 omap_smc3(u32 id, u32 process, u32 flag, u32 pargs);
+ extern phys_addr_t omap_secure_ram_mempool_base(void);
+ extern int omap_secure_ram_reserve_memblock(void);
++extern u32 save_secure_ram_context(u32 args_pa);
++extern u32 omap3_save_secure_ram(void __iomem *save_regs, int size);
+ 
+ extern u32 rx51_secure_dispatcher(u32 idx, u32 process, u32 flag, u32 nargs,
+ 				  u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
+index 425bfcd67db6..326218953737 100644
+--- a/arch/arm/mach-omap2/pm.h
++++ b/arch/arm/mach-omap2/pm.h
+@@ -81,10 +81,6 @@ extern unsigned int omap3_do_wfi_sz;
+ /* ... and its pointer from SRAM after copy */
+ extern void (*omap3_do_wfi_sram)(void);
+ 
+-/* save_secure_ram_context function pointer and size, for copy to SRAM */
+-extern int save_secure_ram_context(u32 *addr);
+-extern unsigned int save_secure_ram_context_sz;
+-
+ extern void omap3_save_scratchpad_contents(void);
+ 
+ #define PM_RTA_ERRATUM_i608		(1 << 0)
+diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
+index 87b98bf92366..0b8ab6c00071 100644
+--- a/arch/arm/mach-omap2/pm34xx.c
++++ b/arch/arm/mach-omap2/pm34xx.c
+@@ -48,6 +48,7 @@
+ #include "prm3xxx.h"
+ #include "pm.h"
+ #include "sdrc.h"
++#include "omap-secure.h"
+ #include "sram.h"
+ #include "control.h"
+ #include "vc.h"
+@@ -66,7 +67,6 @@ struct power_state {
+ 
+ static LIST_HEAD(pwrst_list);
+ 
+-static int (*_omap_save_secure_sram)(u32 *addr);
+ void (*omap3_do_wfi_sram)(void);
+ 
+ static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
+@@ -121,8 +121,8 @@ static void omap3_save_secure_ram_context(void)
+ 		 * will hang the system.
+ 		 */
+ 		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
+-		ret = _omap_save_secure_sram((u32 *)(unsigned long)
+-				__pa(omap3_secure_ram_storage));
++		ret = omap3_save_secure_ram(omap3_secure_ram_storage,
++					    OMAP3_SAVE_SECURE_RAM_SZ);
+ 		pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
+ 		/* Following is for error tracking, it should not happen */
+ 		if (ret) {
+@@ -431,15 +431,10 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
+  *
+  * The minimum set of functions is pushed to SRAM for execution:
+  * - omap3_do_wfi for erratum i581 WA,
+- * - save_secure_ram_context for security extensions.
+  */
+ void omap_push_sram_idle(void)
+ {
+ 	omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
+-
+-	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
+-		_omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
+-				save_secure_ram_context_sz);
+ }
+ 
+ static void __init pm_errata_configure(void)
+@@ -551,7 +546,7 @@ int __init omap3_pm_init(void)
+ 	clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
+ 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
+ 		omap3_secure_ram_storage =
+-			kmalloc(0x803F, GFP_KERNEL);
++			kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
+ 		if (!omap3_secure_ram_storage)
+ 			pr_err("Memory allocation failed when allocating for secure sram context\n");
+ 
+diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c
+index dcb5001d77da..973bcd754e1c 100644
+--- a/arch/arm/mach-omap2/prm33xx.c
++++ b/arch/arm/mach-omap2/prm33xx.c
+@@ -176,17 +176,6 @@ static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm)
+ 	return v;
+ }
+ 
+-static int am33xx_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
+-{
+-	u32 v;
+-
+-	v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
+-	v &= AM33XX_LASTPOWERSTATEENTERED_MASK;
+-	v >>= AM33XX_LASTPOWERSTATEENTERED_SHIFT;
+-
+-	return v;
+-}
+-
+ static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
+ {
+ 	am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK,
+@@ -357,7 +346,6 @@ struct pwrdm_ops am33xx_pwrdm_operations = {
+ 	.pwrdm_set_next_pwrst		= am33xx_pwrdm_set_next_pwrst,
+ 	.pwrdm_read_next_pwrst		= am33xx_pwrdm_read_next_pwrst,
+ 	.pwrdm_read_pwrst		= am33xx_pwrdm_read_pwrst,
+-	.pwrdm_read_prev_pwrst		= am33xx_pwrdm_read_prev_pwrst,
+ 	.pwrdm_set_logic_retst		= am33xx_pwrdm_set_logic_retst,
+ 	.pwrdm_read_logic_pwrst		= am33xx_pwrdm_read_logic_pwrst,
+ 	.pwrdm_read_logic_retst		= am33xx_pwrdm_read_logic_retst,
+diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
+index 1b9f0520dea9..3e0d802c59da 100644
+--- a/arch/arm/mach-omap2/sleep34xx.S
++++ b/arch/arm/mach-omap2/sleep34xx.S
+@@ -93,20 +93,13 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
+ ENDPROC(enable_omap3630_toggle_l2_on_restore)
+ 
+ /*
+- * Function to call rom code to save secure ram context. This gets
+- * relocated to SRAM, so it can be all in .data section. Otherwise
+- * we need to initialize api_params separately.
++ * Function to call rom code to save secure ram context.
++ *
++ * r0 = physical address of the parameters
+  */
+-	.data
+-	.align	3
+ ENTRY(save_secure_ram_context)
+ 	stmfd	sp!, {r4 - r11, lr}	@ save registers on stack
+-	adr	r3, api_params		@ r3 points to parameters
+-	str	r0, [r3,#0x4]		@ r0 has sdram address
+-	ldr	r12, high_mask
+-	and	r3, r3, r12
+-	ldr	r12, sram_phy_addr_mask
+-	orr	r3, r3, r12
++	mov	r3, r0			@ physical address of parameters
+ 	mov	r0, #25			@ set service ID for PPA
+ 	mov	r12, r0			@ copy secure service ID in r12
+ 	mov	r1, #0			@ set task id for ROM code in r1
+@@ -120,18 +113,7 @@ ENTRY(save_secure_ram_context)
+ 	nop
+ 	nop
+ 	ldmfd	sp!, {r4 - r11, pc}
+-	.align
+-sram_phy_addr_mask:
+-	.word	SRAM_BASE_P
+-high_mask:
+-	.word	0xffff
+-api_params:
+-	.word	0x4, 0x0, 0x0, 0x1, 0x1
+ ENDPROC(save_secure_ram_context)
+-ENTRY(save_secure_ram_context_sz)
+-	.word	. - save_secure_ram_context
+-
+-	.text
+ 
+ /*
+  * ======================
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 64d7486262e5..e37c04facc1d 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -764,13 +764,31 @@ static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_add
+ 	__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
+ }
+ 
++/*
++ * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
++ * that the intention is to allow exporting memory allocated via the
++ * coherent DMA APIs through the dma_buf API, which only accepts a
++ * scattertable.  This presents a couple of problems:
++ * 1. Not all memory allocated via the coherent DMA APIs is backed by
++ *    a struct page
++ * 2. Passing coherent DMA memory into the streaming APIs is not allowed
++ *    as we will try to flush the memory through a different alias to that
++ *    actually being used (and the flushes are redundant.)
++ */
+ int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+ 		 void *cpu_addr, dma_addr_t handle, size_t size,
+ 		 struct dma_attrs *attrs)
+ {
+-	struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
++	unsigned long pfn = dma_to_pfn(dev, handle);
++	struct page *page;
+ 	int ret;
+ 
++	/* If the PFN is not valid, we do not have a struct page */
++	if (!pfn_valid(pfn))
++		return -ENXIO;
++
++	page = pfn_to_page(pfn);
++
+ 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ 	if (unlikely(ret))
+ 		return ret;
+diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
+index a4ec240ee7ba..3eb018fa1a1f 100644
+--- a/arch/arm/probes/kprobes/core.c
++++ b/arch/arm/probes/kprobes/core.c
+@@ -433,6 +433,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+ 	struct hlist_node *tmp;
+ 	unsigned long flags, orig_ret_address = 0;
+ 	unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
++	kprobe_opcode_t *correct_ret_addr = NULL;
+ 
+ 	INIT_HLIST_HEAD(&empty_rp);
+ 	kretprobe_hash_lock(current, &head, &flags);
+@@ -455,14 +456,34 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+ 			/* another task is sharing our hash bucket */
+ 			continue;
+ 
++		orig_ret_address = (unsigned long)ri->ret_addr;
++
++		if (orig_ret_address != trampoline_address)
++			/*
++			 * This is the real return address. Any other
++			 * instances associated with this task are for
++			 * other calls deeper on the call stack
++			 */
++			break;
++	}
++
++	kretprobe_assert(ri, orig_ret_address, trampoline_address);
++
++	correct_ret_addr = ri->ret_addr;
++	hlist_for_each_entry_safe(ri, tmp, head, hlist) {
++		if (ri->task != current)
++			/* another task is sharing our hash bucket */
++			continue;
++
++		orig_ret_address = (unsigned long)ri->ret_addr;
+ 		if (ri->rp && ri->rp->handler) {
+ 			__this_cpu_write(current_kprobe, &ri->rp->kp);
+ 			get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
++			ri->ret_addr = correct_ret_addr;
+ 			ri->rp->handler(ri, regs);
+ 			__this_cpu_write(current_kprobe, NULL);
+ 		}
+ 
+-		orig_ret_address = (unsigned long)ri->ret_addr;
+ 		recycle_rp_inst(ri, &empty_rp);
+ 
+ 		if (orig_ret_address != trampoline_address)
+@@ -474,7 +495,6 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+ 			break;
+ 	}
+ 
+-	kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ 	kretprobe_hash_unlock(current, &flags);
+ 
+ 	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
+index 9775de22e2ff..a48354de1aa1 100644
+--- a/arch/arm/probes/kprobes/test-core.c
++++ b/arch/arm/probes/kprobes/test-core.c
+@@ -976,7 +976,10 @@ static void coverage_end(void)
+ void __naked __kprobes_test_case_start(void)
+ {
+ 	__asm__ __volatile__ (
+-		"stmdb	sp!, {r4-r11}				\n\t"
++		"mov	r2, sp					\n\t"
++		"bic	r3, r2, #7				\n\t"
++		"mov	sp, r3					\n\t"
++		"stmdb	sp!, {r2-r11}				\n\t"
+ 		"sub	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+ 		"bic	r0, lr, #1  @ r0 = inline data		\n\t"
+ 		"mov	r1, sp					\n\t"
+@@ -996,7 +999,8 @@ void __naked __kprobes_test_case_end_32(void)
+ 		"movne	pc, r0					\n\t"
+ 		"mov	r0, r4					\n\t"
+ 		"add	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+-		"ldmia	sp!, {r4-r11}				\n\t"
++		"ldmia	sp!, {r2-r11}				\n\t"
++		"mov	sp, r2					\n\t"
+ 		"mov	pc, r0					\n\t"
+ 	);
+ }
+@@ -1012,7 +1016,8 @@ void __naked __kprobes_test_case_end_16(void)
+ 		"bxne	r0					\n\t"
+ 		"mov	r0, r4					\n\t"
+ 		"add	sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+-		"ldmia	sp!, {r4-r11}				\n\t"
++		"ldmia	sp!, {r2-r11}				\n\t"
++		"mov	sp, r2					\n\t"
+ 		"bx	r0					\n\t"
+ 	);
+ }
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 8bbd57efae78..9322be69ca09 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -46,7 +46,7 @@ static const char *handler[]= {
+ 	"Error"
+ };
+ 
+-int show_unhandled_signals = 1;
++int show_unhandled_signals = 0;
+ 
+ /*
+  * Dump out the contents of some memory nicely...
+diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
+index 524fa25671fc..2d357aed5e66 100644
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -42,7 +42,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 
+ 	ret = kvm_psci_call(vcpu);
+ 	if (ret < 0) {
+-		kvm_inject_undefined(vcpu);
++		vcpu_set_reg(vcpu, 0, ~0UL);
+ 		return 1;
+ 	}
+ 
+@@ -51,7 +51,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 
+ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
+-	kvm_inject_undefined(vcpu);
++	vcpu_set_reg(vcpu, 0, ~0UL);
+ 	return 1;
+ }
+ 
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index ae8f940152aa..b8ed781807ef 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -177,6 +177,7 @@ void __init arm64_memblock_init(void)
+ 		arm64_dma_phys_limit = max_zone_dma_phys();
+ 	else
+ 		arm64_dma_phys_limit = PHYS_MASK + 1;
++	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
+ 	dma_contiguous_reserve(arm64_dma_phys_limit);
+ 
+ 	memblock_allow_resize();
+@@ -201,7 +202,6 @@ void __init bootmem_init(void)
+ 	sparse_init();
+ 	zone_sizes_init(min, max);
+ 
+-	high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
+ 	max_pfn = max_low_pfn = max;
+ }
+ 
+diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
+index 5afbb7b41160..d60986fdc288 100644
+--- a/arch/mips/ar7/platform.c
++++ b/arch/mips/ar7/platform.c
+@@ -577,7 +577,7 @@ static int __init ar7_register_uarts(void)
+ 	uart_port.type		= PORT_AR7;
+ 	uart_port.uartclk	= clk_get_rate(bus_clk) / 2;
+ 	uart_port.iotype	= UPIO_MEM32;
+-	uart_port.flags		= UPF_FIXED_TYPE;
++	uart_port.flags		= UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
+ 	uart_port.regshift	= 2;
+ 
+ 	uart_port.line		= 0;
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index ded8b8ba34fd..18cb5eb10e55 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -631,6 +631,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+ 	unsigned long switch_count;
+ 	struct task_struct *t;
+ 
++	/* If nothing to change, return right away, successfully.  */
++	if (value == mips_get_process_fp_mode(task))
++		return 0;
++
++	/* Only accept a mode change if 64-bit FP enabled for o32.  */
++	if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
++		return -EOPNOTSUPP;
++
++	/* And only for o32 tasks.  */
++	if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
++		return -EOPNOTSUPP;
++
+ 	/* Check the value is valid */
+ 	if (value & ~known_bits)
+ 		return -EOPNOTSUPP;
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 2bea5db01b0b..938d7576f455 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -438,63 +438,160 @@ static int gpr64_set(struct task_struct *target,
+ 
+ #endif /* CONFIG_64BIT */
+ 
++/*
++ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
++ * !CONFIG_CPU_HAS_MSA variant.  FP context's general register slots
++ * correspond 1:1 to buffer slots.  Only general registers are copied.
++ */
++static int fpr_get_fpa(struct task_struct *target,
++		       unsigned int *pos, unsigned int *count,
++		       void **kbuf, void __user **ubuf)
++{
++	return user_regset_copyout(pos, count, kbuf, ubuf,
++				   &target->thread.fpu,
++				   0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
++}
++
++/*
++ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
++ * CONFIG_CPU_HAS_MSA variant.  Only lower 64 bits of FP context's
++ * general register slots are copied to buffer slots.  Only general
++ * registers are copied.
++ */
++static int fpr_get_msa(struct task_struct *target,
++		       unsigned int *pos, unsigned int *count,
++		       void **kbuf, void __user **ubuf)
++{
++	unsigned int i;
++	u64 fpr_val;
++	int err;
++
++	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
++	for (i = 0; i < NUM_FPU_REGS; i++) {
++		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
++		err = user_regset_copyout(pos, count, kbuf, ubuf,
++					  &fpr_val, i * sizeof(elf_fpreg_t),
++					  (i + 1) * sizeof(elf_fpreg_t));
++		if (err)
++			return err;
++	}
++
++	return 0;
++}
++
++/*
++ * Copy the floating-point context to the supplied NT_PRFPREG buffer.
++ * Choose the appropriate helper for general registers, and then copy
++ * the FCSR register separately.
++ */
+ static int fpr_get(struct task_struct *target,
+ 		   const struct user_regset *regset,
+ 		   unsigned int pos, unsigned int count,
+ 		   void *kbuf, void __user *ubuf)
+ {
+-	unsigned i;
++	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+ 	int err;
+-	u64 fpr_val;
+ 
+-	/* XXX fcr31  */
++	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
++		err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
++	else
++		err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
++	if (err)
++		return err;
+ 
+-	if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
+-		return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+-					   &target->thread.fpu,
+-					   0, sizeof(elf_fpregset_t));
++	err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
++				  &target->thread.fpu.fcr31,
++				  fcr31_pos, fcr31_pos + sizeof(u32));
+ 
+-	for (i = 0; i < NUM_FPU_REGS; i++) {
+-		fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+-		err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+-					  &fpr_val, i * sizeof(elf_fpreg_t),
+-					  (i + 1) * sizeof(elf_fpreg_t));
++	return err;
++}
++
++/*
++ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
++ * !CONFIG_CPU_HAS_MSA variant.   Buffer slots correspond 1:1 to FP
++ * context's general register slots.  Only general registers are copied.
++ */
++static int fpr_set_fpa(struct task_struct *target,
++		       unsigned int *pos, unsigned int *count,
++		       const void **kbuf, const void __user **ubuf)
++{
++	return user_regset_copyin(pos, count, kbuf, ubuf,
++				  &target->thread.fpu,
++				  0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
++}
++
++/*
++ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
++ * CONFIG_CPU_HAS_MSA variant.  Buffer slots are copied to lower 64
++ * bits only of FP context's general register slots.  Only general
++ * registers are copied.
++ */
++static int fpr_set_msa(struct task_struct *target,
++		       unsigned int *pos, unsigned int *count,
++		       const void **kbuf, const void __user **ubuf)
++{
++	unsigned int i;
++	u64 fpr_val;
++	int err;
++
++	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
++	for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
++		err = user_regset_copyin(pos, count, kbuf, ubuf,
++					 &fpr_val, i * sizeof(elf_fpreg_t),
++					 (i + 1) * sizeof(elf_fpreg_t));
+ 		if (err)
+ 			return err;
++		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+ 	}
+ 
+ 	return 0;
+ }
+ 
++/*
++ * Copy the supplied NT_PRFPREG buffer to the floating-point context.
++ * Choose the appropriate helper for general registers, and then copy
++ * the FCSR register separately.
++ *
++ * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
++ * which is supposed to have been guaranteed by the kernel before
++ * calling us, e.g. in `ptrace_regset'.  We enforce that requirement,
++ * so that we can safely avoid preinitializing temporaries for
++ * partial register writes.
++ */
+ static int fpr_set(struct task_struct *target,
+ 		   const struct user_regset *regset,
+ 		   unsigned int pos, unsigned int count,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+-	unsigned i;
++	const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
++	u32 fcr31;
+ 	int err;
+-	u64 fpr_val;
+ 
+-	/* XXX fcr31  */
++	BUG_ON(count % sizeof(elf_fpreg_t));
++
++	if (pos + count > sizeof(elf_fpregset_t))
++		return -EIO;
+ 
+ 	init_fp_ctx(target);
+ 
+-	if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
+-		return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+-					  &target->thread.fpu,
+-					  0, sizeof(elf_fpregset_t));
++	if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
++		err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
++	else
++		err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
++	if (err)
++		return err;
+ 
+-	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+-	for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
++	if (count > 0) {
+ 		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+-					 &fpr_val, i * sizeof(elf_fpreg_t),
+-					 (i + 1) * sizeof(elf_fpreg_t));
++					 &fcr31,
++					 fcr31_pos, fcr31_pos + sizeof(u32));
+ 		if (err)
+ 			return err;
+-		set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
++
++		ptrace_setfcr31(target, fcr31);
+ 	}
+ 
+-	return 0;
++	return err;
+ }
+ 
+ enum mips_regset {
+diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
+index b9920b1edd5a..70cef54dc40f 100644
+--- a/arch/mn10300/mm/misalignment.c
++++ b/arch/mn10300/mm/misalignment.c
+@@ -437,7 +437,7 @@ transfer_failed:
+ 
+ 	info.si_signo	= SIGSEGV;
+ 	info.si_errno	= 0;
+-	info.si_code	= 0;
++	info.si_code	= SEGV_MAPERR;
+ 	info.si_addr	= (void *) regs->pc;
+ 	force_sig_info(SIGSEGV, &info, current);
+ 	return;
+diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
+index 3d3f6062f49c..605a284922fb 100644
+--- a/arch/openrisc/kernel/traps.c
++++ b/arch/openrisc/kernel/traps.c
+@@ -302,12 +302,12 @@ asmlinkage void do_unaligned_access(struct pt_regs *regs, unsigned long address)
+ 	siginfo_t info;
+ 
+ 	if (user_mode(regs)) {
+-		/* Send a SIGSEGV */
+-		info.si_signo = SIGSEGV;
++		/* Send a SIGBUS */
++		info.si_signo = SIGBUS;
+ 		info.si_errno = 0;
+-		/* info.si_code has been set above */
+-		info.si_addr = (void *)address;
+-		force_sig_info(SIGSEGV, &info, current);
++		info.si_code = BUS_ADRALN;
++		info.si_addr = (void __user *)address;
++		force_sig_info(SIGBUS, &info, current);
+ 	} else {
+ 		printk("KERNEL: Unaligned Access 0x%.8lx\n", address);
+ 		show_registers(regs);
+diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
+index 8121aa6db2ff..51bb6b8eade6 100644
+--- a/arch/parisc/include/asm/ldcw.h
++++ b/arch/parisc/include/asm/ldcw.h
+@@ -11,6 +11,7 @@
+    for the semaphore.  */
+ 
+ #define __PA_LDCW_ALIGNMENT	16
++#define __PA_LDCW_ALIGN_ORDER	4
+ #define __ldcw_align(a) ({					\
+ 	unsigned long __ret = (unsigned long) &(a)->lock[0];	\
+ 	__ret = (__ret + __PA_LDCW_ALIGNMENT - 1)		\
+@@ -28,6 +29,7 @@
+    ldcd). */
+ 
+ #define __PA_LDCW_ALIGNMENT	4
++#define __PA_LDCW_ALIGN_ORDER	2
+ #define __ldcw_align(a) (&(a)->slock)
+ #define __LDCW	"ldcw,co"
+ 
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index c5ef4081b01d..b523fa90a727 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -35,6 +35,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/signal.h>
+ #include <asm/unistd.h>
++#include <asm/ldcw.h>
+ #include <asm/thread_info.h>
+ 
+ #include <linux/linkage.h>
+@@ -46,6 +47,14 @@
+ #endif
+ 
+ 	.import		pa_tlb_lock,data
++	.macro  load_pa_tlb_lock reg
++#if __PA_LDCW_ALIGNMENT > 4
++	load32	PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
++	depi	0,31,__PA_LDCW_ALIGN_ORDER, \reg
++#else
++	load32	PA(pa_tlb_lock), \reg
++#endif
++	.endm
+ 
+ 	/* space_to_prot macro creates a prot id from a space id */
+ 
+@@ -457,7 +466,7 @@
+ 	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
+ #ifdef CONFIG_SMP
+ 	cmpib,COND(=),n	0,\spc,2f
+-	load32		PA(pa_tlb_lock),\tmp
++	load_pa_tlb_lock \tmp
+ 1:	LDCW		0(\tmp),\tmp1
+ 	cmpib,COND(=)	0,\tmp1,1b
+ 	nop
+@@ -480,7 +489,7 @@
+ 	/* Release pa_tlb_lock lock. */
+ 	.macro		tlb_unlock1	spc,tmp
+ #ifdef CONFIG_SMP
+-	load32		PA(pa_tlb_lock),\tmp
++	load_pa_tlb_lock \tmp
+ 	tlb_unlock0	\spc,\tmp
+ #endif
+ 	.endm
+diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
+index b743a80eaba0..ddc2f0cf1c73 100644
+--- a/arch/parisc/kernel/pacache.S
++++ b/arch/parisc/kernel/pacache.S
+@@ -36,6 +36,7 @@
+ #include <asm/assembly.h>
+ #include <asm/pgtable.h>
+ #include <asm/cache.h>
++#include <asm/ldcw.h>
+ #include <linux/linkage.h>
+ 
+ 	.text
+@@ -333,8 +334,12 @@ ENDPROC(flush_data_cache_local)
+ 
+ 	.macro	tlb_lock	la,flags,tmp
+ #ifdef CONFIG_SMP
+-	ldil		L%pa_tlb_lock,%r1
+-	ldo		R%pa_tlb_lock(%r1),\la
++#if __PA_LDCW_ALIGNMENT > 4
++	load32		pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
++	depi		0,31,__PA_LDCW_ALIGN_ORDER, \la
++#else
++	load32		pa_tlb_lock, \la
++#endif
+ 	rsm		PSW_SM_I,\flags
+ 1:	LDCW		0(\la),\tmp
+ 	cmpib,<>,n	0,\tmp,3f
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 4b8c928a9873..a55a246fc784 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -126,13 +126,14 @@ config PPC
+ 	select IRQ_FORCED_THREADING
+ 	select HAVE_RCU_TABLE_FREE if SMP
+ 	select HAVE_SYSCALL_TRACEPOINTS
+-	select HAVE_BPF_JIT
++	select HAVE_BPF_JIT if CPU_BIG_ENDIAN
+ 	select HAVE_ARCH_JUMP_LABEL
+ 	select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ 	select ARCH_HAS_GCOV_PROFILE_ALL
+ 	select GENERIC_SMP_IDLE_THREAD
+ 	select GENERIC_CMOS_UPDATE
+ 	select GENERIC_TIME_VSYSCALL_OLD
++	select GENERIC_CPU_VULNERABILITIES	if PPC_BOOK3S_64
+ 	select GENERIC_CLOCKEVENTS
+ 	select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
+index a8b52b61043f..bd8958445336 100644
+--- a/arch/powerpc/include/asm/exception-64e.h
++++ b/arch/powerpc/include/asm/exception-64e.h
+@@ -208,5 +208,11 @@ exc_##label##_book3e:
+ 	ori	r3,r3,interrupt_base_book3e@l;	\
+ 	mtspr	SPRN_IVOR##vector_number,r3;
+ 
++#define RFI_TO_KERNEL							\
++	rfi
++
++#define RFI_TO_USER							\
++	rfi
++
+ #endif /* _ASM_POWERPC_EXCEPTION_64E_H */
+ 
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index 77f52b26dad6..9bddbec441b8 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -50,6 +50,59 @@
+ #define EX_PPR		88	/* SMT thread status register (priority) */
+ #define EX_CTR		96
+ 
++/*
++ * Macros for annotating the expected destination of (h)rfid
++ *
++ * The nop instructions allow us to insert one or more instructions to flush the
++ * L1-D cache when returning to userspace or a guest.
++ */
++#define RFI_FLUSH_SLOT							\
++	RFI_FLUSH_FIXUP_SECTION;					\
++	nop;								\
++	nop;								\
++	nop
++
++#define RFI_TO_KERNEL							\
++	rfid
++
++#define RFI_TO_USER							\
++	RFI_FLUSH_SLOT;							\
++	rfid;								\
++	b	rfi_flush_fallback
++
++#define RFI_TO_USER_OR_KERNEL						\
++	RFI_FLUSH_SLOT;							\
++	rfid;								\
++	b	rfi_flush_fallback
++
++#define RFI_TO_GUEST							\
++	RFI_FLUSH_SLOT;							\
++	rfid;								\
++	b	rfi_flush_fallback
++
++#define HRFI_TO_KERNEL							\
++	hrfid
++
++#define HRFI_TO_USER							\
++	RFI_FLUSH_SLOT;							\
++	hrfid;								\
++	b	hrfi_flush_fallback
++
++#define HRFI_TO_USER_OR_KERNEL						\
++	RFI_FLUSH_SLOT;							\
++	hrfid;								\
++	b	hrfi_flush_fallback
++
++#define HRFI_TO_GUEST							\
++	RFI_FLUSH_SLOT;							\
++	hrfid;								\
++	b	hrfi_flush_fallback
++
++#define HRFI_TO_UNKNOWN							\
++	RFI_FLUSH_SLOT;							\
++	hrfid;								\
++	b	hrfi_flush_fallback
++
+ #ifdef CONFIG_RELOCATABLE
+ #define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h)			\
+ 	ld	r12,PACAKBASE(r13);	/* get high part of &label */	\
+@@ -191,7 +244,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
+ 	mtspr	SPRN_##h##SRR0,r12;					\
+ 	mfspr	r12,SPRN_##h##SRR1;	/* and SRR1 */			\
+ 	mtspr	SPRN_##h##SRR1,r10;					\
+-	h##rfid;							\
++	h##RFI_TO_KERNEL;						\
+ 	b	.	/* prevent speculative execution */
+ #define EXCEPTION_PROLOG_PSERIES_1(label, h)				\
+ 	__EXCEPTION_PROLOG_PSERIES_1(label, h)
+diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
+index 9a67a38bf7b9..7068bafbb2d6 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -184,4 +184,19 @@ label##3:					       	\
+ 	FTR_ENTRY_OFFSET label##1b-label##3b;		\
+ 	.popsection;
+ 
++#define RFI_FLUSH_FIXUP_SECTION				\
++951:							\
++	.pushsection __rfi_flush_fixup,"a";		\
++	.align 2;					\
++952:							\
++	FTR_ENTRY_OFFSET 951b-952b;			\
++	.popsection;
++
++
++#ifndef __ASSEMBLY__
++
++extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
++
++#endif
++
+ #endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index 85bc8c0d257b..449bbb87c257 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -239,6 +239,7 @@
+ #define H_GET_HCA_INFO          0x1B8
+ #define H_GET_PERF_COUNT        0x1BC
+ #define H_MANAGE_TRACE          0x1C0
++#define H_GET_CPU_CHARACTERISTICS 0x1C8
+ #define H_FREE_LOGICAL_LAN_BUFFER 0x1D4
+ #define H_QUERY_INT_STATE       0x1E4
+ #define H_POLL_PENDING		0x1D8
+@@ -285,7 +286,19 @@
+ #define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE	3
+ #define H_SET_MODE_RESOURCE_LE			4
+ 
++/* H_GET_CPU_CHARACTERISTICS return values */
++#define H_CPU_CHAR_SPEC_BAR_ORI31	(1ull << 63) // IBM bit 0
++#define H_CPU_CHAR_BCCTRL_SERIALISED	(1ull << 62) // IBM bit 1
++#define H_CPU_CHAR_L1D_FLUSH_ORI30	(1ull << 61) // IBM bit 2
++#define H_CPU_CHAR_L1D_FLUSH_TRIG2	(1ull << 60) // IBM bit 3
++#define H_CPU_CHAR_L1D_THREAD_PRIV	(1ull << 59) // IBM bit 4
++
++#define H_CPU_BEHAV_FAVOUR_SECURITY	(1ull << 63) // IBM bit 0
++#define H_CPU_BEHAV_L1D_FLUSH_PR	(1ull << 62) // IBM bit 1
++#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR	(1ull << 61) // IBM bit 2
++
+ #ifndef __ASSEMBLY__
++#include <linux/types.h>
+ 
+ /**
+  * plpar_hcall_norets: - Make a pseries hypervisor call with no return arguments
+@@ -423,6 +436,11 @@ extern long pseries_big_endian_exceptions(void);
+ 
+ #endif /* CONFIG_PPC_PSERIES */
+ 
++struct h_cpu_char_result {
++	u64 character;
++	u64 behaviour;
++};
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __KERNEL__ */
+ #endif /* _ASM_POWERPC_HVCALL_H */
+diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
+index 70bd4381f8e6..08e5df3395fa 100644
+--- a/arch/powerpc/include/asm/paca.h
++++ b/arch/powerpc/include/asm/paca.h
+@@ -192,6 +192,15 @@ struct paca_struct {
+ #endif
+ 	struct kvmppc_host_state kvm_hstate;
+ #endif
++#ifdef CONFIG_PPC_BOOK3S_64
++	/*
++	 * rfi fallback flush must be in its own cacheline to prevent
++	 * other paca data leaking into the L1d
++	 */
++	u64 exrfi[13] __aligned(0x80);
++	void *rfi_flush_fallback_area;
++	u64 l1d_flush_size;
++#endif
+ };
+ 
+ extern struct paca_struct *paca;
+diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
+index 67859edbf8fd..6e05cb397a5c 100644
+--- a/arch/powerpc/include/asm/plpar_wrappers.h
++++ b/arch/powerpc/include/asm/plpar_wrappers.h
+@@ -323,4 +323,18 @@ static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawr
+ 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
+ }
+ 
++static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
++{
++	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
++	long rc;
++
++	rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
++	if (rc == H_SUCCESS) {
++		p->character = retbuf[0];
++		p->behaviour = retbuf[1];
++	}
++
++	return rc;
++}
++
+ #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
+diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
+index dd0fc18d8103..160bb2311bbb 100644
+--- a/arch/powerpc/include/asm/ppc_asm.h
++++ b/arch/powerpc/include/asm/ppc_asm.h
+@@ -224,6 +224,16 @@ name: \
+ 	.globl name; \
+ name:
+ 
++#define _KPROBE_TOC(name)			\
++	.section ".kprobes.text","a";		\
++	.align 2 ;				\
++	.type name,@function;			\
++	.globl name;				\
++name:						\
++0:	addis r2,r12,(.TOC.-0b)@ha;		\
++	addi r2,r2,(.TOC.-0b)@l;		\
++	.localentry name,.-name
++
+ #define DOTSYM(a)	a
+ 
+ #else
+@@ -261,6 +271,8 @@ name: \
+ 	.type GLUE(.,name),@function; \
+ GLUE(.,name):
+ 
++#define _KPROBE_TOC(n)	_KPROBE(n)
++
+ #define DOTSYM(a)	GLUE(.,a)
+ 
+ #endif
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index e9d384cbd021..7916b56f2e60 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -26,6 +26,19 @@ void initmem_init(void);
+ void setup_panic(void);
+ #define ARCH_PANIC_TIMEOUT 180
+ 
++void rfi_flush_enable(bool enable);
++
++/* These are bit flags */
++enum l1d_flush_type {
++	L1D_FLUSH_NONE		= 0x1,
++	L1D_FLUSH_FALLBACK	= 0x2,
++	L1D_FLUSH_ORI		= 0x4,
++	L1D_FLUSH_MTTRIG	= 0x8,
++};
++
++void __init setup_rfi_flush(enum l1d_flush_type, bool enable);
++void do_rfi_flush_fixups(enum l1d_flush_type types);
++
+ #endif /* !__ASSEMBLY__ */
+ 
+ #endif	/* _ASM_POWERPC_SETUP_H */
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index d8d332e65078..23fe603a98d3 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -244,6 +244,9 @@ int main(void)
+ #ifdef CONFIG_PPC_BOOK3S_64
+ 	DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp));
+ 	DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce));
++	DEFINE(PACA_RFI_FLUSH_FALLBACK_AREA, offsetof(struct paca_struct, rfi_flush_fallback_area));
++	DEFINE(PACA_EXRFI, offsetof(struct paca_struct, exrfi));
++	DEFINE(PACA_L1D_FLUSH_SIZE, offsetof(struct paca_struct, l1d_flush_size));
+ #endif
+ 	DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
+ 	DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index de276553cc79..36a8bf3d053b 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -34,6 +34,11 @@
+ #include <asm/ftrace.h>
+ #include <asm/hw_irq.h>
+ #include <asm/context_tracking.h>
++#ifdef CONFIG_PPC_BOOK3S
++#include <asm/exception-64s.h>
++#else
++#include <asm/exception-64e.h>
++#endif
+ 
+ /*
+  * System calls.
+@@ -218,13 +223,23 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+ 	ACCOUNT_CPU_USER_EXIT(r11, r12)
+ 	HMT_MEDIUM_LOW_HAS_PPR
+ 	ld	r13,GPR13(r1)	/* only restore r13 if returning to usermode */
++	ld	r2,GPR2(r1)
++	ld	r1,GPR1(r1)
++	mtlr	r4
++	mtcr	r5
++	mtspr	SPRN_SRR0,r7
++	mtspr	SPRN_SRR1,r8
++	RFI_TO_USER
++	b	.	/* prevent speculative execution */
++
++	/* exit to kernel */
+ 1:	ld	r2,GPR2(r1)
+ 	ld	r1,GPR1(r1)
+ 	mtlr	r4
+ 	mtcr	r5
+ 	mtspr	SPRN_SRR0,r7
+ 	mtspr	SPRN_SRR1,r8
+-	RFI
++	RFI_TO_KERNEL
+ 	b	.	/* prevent speculative execution */
+ 
+ syscall_error:	
+@@ -840,7 +855,7 @@ BEGIN_FTR_SECTION
+ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ 	ACCOUNT_CPU_USER_EXIT(r2, r4)
+ 	REST_GPR(13, r1)
+-1:
++
+ 	mtspr	SPRN_SRR1,r3
+ 
+ 	ld	r2,_CCR(r1)
+@@ -853,8 +868,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ 	ld	r3,GPR3(r1)
+ 	ld	r4,GPR4(r1)
+ 	ld	r1,GPR1(r1)
++	RFI_TO_USER
++	b	.	/* prevent speculative execution */
++
++1:	mtspr	SPRN_SRR1,r3
++
++	ld	r2,_CCR(r1)
++	mtcrf	0xFF,r2
++	ld	r2,_NIP(r1)
++	mtspr	SPRN_SRR0,r2
+ 
+-	rfid
++	ld	r0,GPR0(r1)
++	ld	r2,GPR2(r1)
++	ld	r3,GPR3(r1)
++	ld	r4,GPR4(r1)
++	ld	r1,GPR1(r1)
++	RFI_TO_KERNEL
+ 	b	.	/* prevent speculative execution */
+ 
+ #endif /* CONFIG_PPC_BOOK3E */
+@@ -1030,7 +1059,7 @@ _GLOBAL(enter_rtas)
+ 	
+ 	mtspr	SPRN_SRR0,r5
+ 	mtspr	SPRN_SRR1,r6
+-	rfid
++	RFI_TO_KERNEL
+ 	b	.	/* prevent speculative execution */
+ 
+ rtas_return_loc:
+@@ -1055,7 +1084,7 @@ rtas_return_loc:
+ 
+ 	mtspr	SPRN_SRR0,r3
+ 	mtspr	SPRN_SRR1,r4
+-	rfid
++	RFI_TO_KERNEL
+ 	b	.	/* prevent speculative execution */
+ 
+ 	.align	3
+@@ -1126,7 +1155,7 @@ _GLOBAL(enter_prom)
+ 	LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
+ 	andc	r11,r11,r12
+ 	mtsrr1	r11
+-	rfid
++	RFI_TO_KERNEL
+ #endif /* CONFIG_PPC_BOOK3E */
+ 
+ 1:	/* Return from OF */
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 7662bfae0493..3b8991df5101 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -46,7 +46,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
+ 	mtspr	SPRN_SRR0,r10 ; 				\
+ 	ld	r10,PACAKMSR(r13) ;				\
+ 	mtspr	SPRN_SRR1,r10 ; 				\
+-	rfid ; 							\
++	RFI_TO_KERNEL ; 							\
+ 	b	. ;	/* prevent speculative execution */
+ 
+ #define SYSCALL_PSERIES_3					\
+@@ -54,7 +54,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)				\
+ 1:	mfspr	r12,SPRN_SRR1 ;					\
+ 	xori	r12,r12,MSR_LE ;				\
+ 	mtspr	SPRN_SRR1,r12 ;					\
+-	rfid ;		/* return to userspace */		\
++	RFI_TO_USER ;		/* return to userspace */		\
+ 	b	. ;	/* prevent speculative execution */
+ 
+ #if defined(CONFIG_RELOCATABLE)
+@@ -508,7 +508,7 @@ BEGIN_FTR_SECTION
+ 	LOAD_HANDLER(r12, machine_check_handle_early)
+ 1:	mtspr	SPRN_SRR0,r12
+ 	mtspr	SPRN_SRR1,r11
+-	rfid
++	RFI_TO_KERNEL
+ 	b	.	/* prevent speculative execution */
+ 2:
+ 	/* Stack overflow. Stay on emergency stack and panic.
+@@ -602,7 +602,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ 	ld	r11,PACA_EXGEN+EX_R11(r13)
+ 	ld	r12,PACA_EXGEN+EX_R12(r13)
+ 	ld	r13,PACA_EXGEN+EX_R13(r13)
+-	HRFID
++	HRFI_TO_UNKNOWN
+ 	b	.
+ #endif
+ 
+@@ -667,7 +667,7 @@ masked_##_H##interrupt:					\
+ 	ld	r10,PACA_EXGEN+EX_R10(r13);		\
+ 	ld	r11,PACA_EXGEN+EX_R11(r13);		\
+ 	GET_SCRATCH0(r13);				\
+-	##_H##rfid;					\
++	##_H##RFI_TO_KERNEL;				\
+ 	b	.
+ 	
+ 	MASKED_INTERRUPT()
+@@ -757,7 +757,7 @@ kvmppc_skip_interrupt:
+ 	addi	r13, r13, 4
+ 	mtspr	SPRN_SRR0, r13
+ 	GET_SCRATCH0(r13)
+-	rfid
++	RFI_TO_KERNEL
+ 	b	.
+ 
+ kvmppc_skip_Hinterrupt:
+@@ -769,7 +769,7 @@ kvmppc_skip_Hinterrupt:
+ 	addi	r13, r13, 4
+ 	mtspr	SPRN_HSRR0, r13
+ 	GET_SCRATCH0(r13)
+-	hrfid
++	HRFI_TO_KERNEL
+ 	b	.
+ #endif
+ 
+@@ -1447,7 +1447,7 @@ machine_check_handle_early:
+ 	li	r3,MSR_ME
+ 	andc	r10,r10,r3		/* Turn off MSR_ME */
+ 	mtspr	SPRN_SRR1,r10
+-	rfid
++	RFI_TO_KERNEL
+ 	b	.
+ 2:
+ 	/*
+@@ -1465,7 +1465,7 @@ machine_check_handle_early:
+ 	 */
+ 	bl	machine_check_queue_event
+ 	MACHINE_CHECK_HANDLER_WINDUP
+-	rfid
++	RFI_TO_USER_OR_KERNEL
+ 9:
+ 	/* Deliver the machine check to host kernel in V mode. */
+ 	MACHINE_CHECK_HANDLER_WINDUP
+@@ -1511,6 +1511,8 @@ slb_miss_realmode:
+ 
+ 	andi.	r10,r12,MSR_RI	/* check for unrecoverable exception */
+ 	beq-	2f
++	andi.	r10,r12,MSR_PR	/* check for user mode (PR != 0) */
++	bne	1f
+ 
+ .machine	push
+ .machine	"power4"
+@@ -1524,7 +1526,23 @@ slb_miss_realmode:
+ 	ld	r11,PACA_EXSLB+EX_R11(r13)
+ 	ld	r12,PACA_EXSLB+EX_R12(r13)
+ 	ld	r13,PACA_EXSLB+EX_R13(r13)
+-	rfid
++	RFI_TO_KERNEL
++	b	.	/* prevent speculative execution */
++
++1:
++.machine	push
++.machine	"power4"
++	mtcrf	0x80,r9
++	mtcrf	0x01,r9		/* slb_allocate uses cr0 and cr7 */
++.machine	pop
++
++	RESTORE_PPR_PACA(PACA_EXSLB, r9)
++	ld	r9,PACA_EXSLB+EX_R9(r13)
++	ld	r10,PACA_EXSLB+EX_R10(r13)
++	ld	r11,PACA_EXSLB+EX_R11(r13)
++	ld	r12,PACA_EXSLB+EX_R12(r13)
++	ld	r13,PACA_EXSLB+EX_R13(r13)
++	RFI_TO_USER
+ 	b	.	/* prevent speculative execution */
+ 
+ 2:	mfspr	r11,SPRN_SRR0
+@@ -1533,7 +1551,7 @@ slb_miss_realmode:
+ 	mtspr	SPRN_SRR0,r10
+ 	ld	r10,PACAKMSR(r13)
+ 	mtspr	SPRN_SRR1,r10
+-	rfid
++	RFI_TO_KERNEL
+ 	b	.
+ 
+ unrecov_slb:
+@@ -1554,6 +1572,88 @@ power4_fixup_nap:
+ 	blr
+ #endif
+ 
++	.globl rfi_flush_fallback
++rfi_flush_fallback:
++	SET_SCRATCH0(r13);
++	GET_PACA(r13);
++	std	r9,PACA_EXRFI+EX_R9(r13)
++	std	r10,PACA_EXRFI+EX_R10(r13)
++	std	r11,PACA_EXRFI+EX_R11(r13)
++	mfctr	r9
++	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
++	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
++	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
++	mtctr	r11
++	DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
++
++	/* order ld/st prior to dcbt stop all streams with flushing */
++	sync
++
++	/*
++	 * The load adresses are at staggered offsets within cachelines,
++	 * which suits some pipelines better (on others it should not
++	 * hurt).
++	 */
++1:
++	ld	r11,(0x80 + 8)*0(r10)
++	ld	r11,(0x80 + 8)*1(r10)
++	ld	r11,(0x80 + 8)*2(r10)
++	ld	r11,(0x80 + 8)*3(r10)
++	ld	r11,(0x80 + 8)*4(r10)
++	ld	r11,(0x80 + 8)*5(r10)
++	ld	r11,(0x80 + 8)*6(r10)
++	ld	r11,(0x80 + 8)*7(r10)
++	addi	r10,r10,0x80*8
++	bdnz	1b
++
++	mtctr	r9
++	ld	r9,PACA_EXRFI+EX_R9(r13)
++	ld	r10,PACA_EXRFI+EX_R10(r13)
++	ld	r11,PACA_EXRFI+EX_R11(r13)
++	GET_SCRATCH0(r13);
++	rfid
++
++	.globl hrfi_flush_fallback
++hrfi_flush_fallback:
++	SET_SCRATCH0(r13);
++	GET_PACA(r13);
++	std	r9,PACA_EXRFI+EX_R9(r13)
++	std	r10,PACA_EXRFI+EX_R10(r13)
++	std	r11,PACA_EXRFI+EX_R11(r13)
++	mfctr	r9
++	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
++	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
++	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
++	mtctr	r11
++	DCBT_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
++
++	/* order ld/st prior to dcbt stop all streams with flushing */
++	sync
++
++	/*
++	 * The load adresses are at staggered offsets within cachelines,
++	 * which suits some pipelines better (on others it should not
++	 * hurt).
++	 */
++1:
++	ld	r11,(0x80 + 8)*0(r10)
++	ld	r11,(0x80 + 8)*1(r10)
++	ld	r11,(0x80 + 8)*2(r10)
++	ld	r11,(0x80 + 8)*3(r10)
++	ld	r11,(0x80 + 8)*4(r10)
++	ld	r11,(0x80 + 8)*5(r10)
++	ld	r11,(0x80 + 8)*6(r10)
++	ld	r11,(0x80 + 8)*7(r10)
++	addi	r10,r10,0x80*8
++	bdnz	1b
++
++	mtctr	r9
++	ld	r9,PACA_EXRFI+EX_R9(r13)
++	ld	r10,PACA_EXRFI+EX_R10(r13)
++	ld	r11,PACA_EXRFI+EX_R11(r13)
++	GET_SCRATCH0(r13);
++	hrfid
++
+ /*
+  * Hash table stuff
+  */
+diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
+index 4e314b90c75d..1f979d5617a2 100644
+--- a/arch/powerpc/kernel/misc_64.S
++++ b/arch/powerpc/kernel/misc_64.S
+@@ -65,7 +65,7 @@ PPC64_CACHES:
+  *   flush all bytes from start through stop-1 inclusive
+  */
+ 
+-_KPROBE(flush_icache_range)
++_KPROBE_TOC(flush_icache_range)
+ BEGIN_FTR_SECTION
+ 	PURGE_PREFETCHED_INS
+ 	blr
+@@ -116,7 +116,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
+  *
+  *    flush all bytes from start to stop-1 inclusive
+  */
+-_GLOBAL(flush_dcache_range)
++_GLOBAL_TOC(flush_dcache_range)
+ 
+ /*
+  * Flush the data cache to memory 
+@@ -634,31 +634,3 @@ _GLOBAL(kexec_sequence)
+ 	li	r5,0
+ 	blr	/* image->start(physid, image->start, 0); */
+ #endif /* CONFIG_KEXEC */
+-
+-#ifdef CONFIG_MODULES
+-#if defined(_CALL_ELF) && _CALL_ELF == 2
+-
+-#ifdef CONFIG_MODVERSIONS
+-.weak __crc_TOC.
+-.section "___kcrctab+TOC.","a"
+-.globl __kcrctab_TOC.
+-__kcrctab_TOC.:
+-	.llong	__crc_TOC.
+-#endif
+-
+-/*
+- * Export a fake .TOC. since both modpost and depmod will complain otherwise.
+- * Both modpost and depmod strip the leading . so we do the same here.
+- */
+-.section "__ksymtab_strings","a"
+-__kstrtab_TOC.:
+-	.asciz "TOC."
+-
+-.section "___ksymtab+TOC.","a"
+-/* This symbol name is important: it's used by modpost to find exported syms */
+-.globl __ksymtab_TOC.
+-__ksymtab_TOC.:
+-	.llong 0 /* .value */
+-	.llong __kstrtab_TOC.
+-#endif /* ELFv2 */
+-#endif /* MODULES */
+diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
+index e4f7d4eed20c..08b7a40de5f8 100644
+--- a/arch/powerpc/kernel/module_64.c
++++ b/arch/powerpc/kernel/module_64.c
+@@ -326,7 +326,10 @@ static void dedotify_versions(struct modversion_info *vers,
+ 		}
+ }
+ 
+-/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
++/*
++ * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
++ * seem to be defined (value set later).
++ */
+ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
+ {
+ 	unsigned int i;
+@@ -334,8 +337,11 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
+ 	for (i = 1; i < numsyms; i++) {
+ 		if (syms[i].st_shndx == SHN_UNDEF) {
+ 			char *name = strtab + syms[i].st_name;
+-			if (name[0] == '.')
++			if (name[0] == '.') {
++				if (strcmp(name+1, "TOC.") == 0)
++					syms[i].st_shndx = SHN_ABS;
+ 				syms[i].st_name++;
++			}
+ 		}
+ 	}
+ }
+@@ -351,7 +357,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
+ 	numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
+ 
+ 	for (i = 1; i < numsyms; i++) {
+-		if (syms[i].st_shndx == SHN_UNDEF
++		if (syms[i].st_shndx == SHN_ABS
+ 		    && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
+ 			return &syms[i];
+ 	}
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index dd023904bac5..9579f9c13315 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -209,7 +209,8 @@ void enable_kernel_vsx(void)
+ 	WARN_ON(preemptible());
+ 
+ #ifdef CONFIG_SMP
+-	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
++	if (current->thread.regs &&
++	    (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)))
+ 		giveup_vsx(current);
+ 	else
+ 		giveup_vsx(NULL);	/* just enable vsx for kernel - force */
+@@ -231,7 +232,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
+ {
+ 	if (tsk->thread.regs) {
+ 		preempt_disable();
+-		if (tsk->thread.regs->msr & MSR_VSX) {
++		if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
+ #ifdef CONFIG_SMP
+ 			BUG_ON(tsk != current);
+ #endif
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index f21897b42057..93f200f14e19 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -376,7 +376,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ 
+ #else
+ 	BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+-		     offsetof(struct thread_fp_state, fpr[32][0]));
++		     offsetof(struct thread_fp_state, fpr[32]));
+ 
+ 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ 				   &target->thread.fp_state, 0, -1);
+@@ -404,7 +404,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ 	return 0;
+ #else
+ 	BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+-		     offsetof(struct thread_fp_state, fpr[32][0]));
++		     offsetof(struct thread_fp_state, fpr[32]));
+ 
+ 	return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				  &target->thread.fp_state, 0, -1);
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index 8161d66830a2..c6ebe398def1 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -38,6 +38,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/memory.h>
+ #include <linux/nmi.h>
++#include <linux/debugfs.h>
+ 
+ #include <asm/io.h>
+ #include <asm/kdump.h>
+@@ -807,4 +808,131 @@ static int __init disable_hardlockup_detector(void)
+ 	return 0;
+ }
+ early_initcall(disable_hardlockup_detector);
++
++#ifdef CONFIG_PPC_BOOK3S_64
++static enum l1d_flush_type enabled_flush_types;
++static void *l1d_flush_fallback_area;
++static bool no_rfi_flush;
++bool rfi_flush;
++
++static int __init handle_no_rfi_flush(char *p)
++{
++	pr_info("rfi-flush: disabled on command line.");
++	no_rfi_flush = true;
++	return 0;
++}
++early_param("no_rfi_flush", handle_no_rfi_flush);
++
++/*
++ * The RFI flush is not KPTI, but because users will see doco that says to use
++ * nopti we hijack that option here to also disable the RFI flush.
++ */
++static int __init handle_no_pti(char *p)
++{
++	pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
++	handle_no_rfi_flush(NULL);
++	return 0;
++}
++early_param("nopti", handle_no_pti);
++
++static void do_nothing(void *unused)
++{
++	/*
++	 * We don't need to do the flush explicitly, just enter+exit kernel is
++	 * sufficient, the RFI exit handlers will do the right thing.
++	 */
++}
++
++void rfi_flush_enable(bool enable)
++{
++	if (rfi_flush == enable)
++		return;
++
++	if (enable) {
++		do_rfi_flush_fixups(enabled_flush_types);
++		on_each_cpu(do_nothing, NULL, 1);
++	} else
++		do_rfi_flush_fixups(L1D_FLUSH_NONE);
++
++	rfi_flush = enable;
++}
++
++static void init_fallback_flush(void)
++{
++	u64 l1d_size, limit;
++	int cpu;
++
++	l1d_size = ppc64_caches.dsize;
++	limit = min(safe_stack_limit(), ppc64_rma_size);
++
++	/*
++	 * Align to L1d size, and size it at 2x L1d size, to catch possible
++	 * hardware prefetch runoff. We don't have a recipe for load patterns to
++	 * reliably avoid the prefetcher.
++	 */
++	l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
++	memset(l1d_flush_fallback_area, 0, l1d_size * 2);
++
++	for_each_possible_cpu(cpu) {
++		paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
++		paca[cpu].l1d_flush_size = l1d_size;
++	}
++}
++
++void __init setup_rfi_flush(enum l1d_flush_type types, bool enable)
++{
++	if (types & L1D_FLUSH_FALLBACK) {
++		pr_info("rfi-flush: Using fallback displacement flush\n");
++		init_fallback_flush();
++	}
++
++	if (types & L1D_FLUSH_ORI)
++		pr_info("rfi-flush: Using ori type flush\n");
++
++	if (types & L1D_FLUSH_MTTRIG)
++		pr_info("rfi-flush: Using mttrig type flush\n");
++
++	enabled_flush_types = types;
++
++	if (!no_rfi_flush)
++		rfi_flush_enable(enable);
++}
++
++#ifdef CONFIG_DEBUG_FS
++static int rfi_flush_set(void *data, u64 val)
++{
++	if (val == 1)
++		rfi_flush_enable(true);
++	else if (val == 0)
++		rfi_flush_enable(false);
++	else
++		return -EINVAL;
++
++	return 0;
++}
++
++static int rfi_flush_get(void *data, u64 *val)
++{
++	*val = rfi_flush ? 1 : 0;
++	return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
++
++static __init int rfi_flush_debugfs_init(void)
++{
++	debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
++	return 0;
++}
++device_initcall(rfi_flush_debugfs_init);
++#endif
++
++ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
++{
++	if (rfi_flush)
++		return sprintf(buf, "Mitigation: RFI Flush\n");
++
++	return sprintf(buf, "Vulnerable\n");
++}
++#endif /* CONFIG_PPC_BOOK3S_64 */
+ #endif
+diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
+index 79796de11737..3263ee23170d 100644
+--- a/arch/powerpc/kernel/vdso64/datapage.S
++++ b/arch/powerpc/kernel/vdso64/datapage.S
+@@ -57,7 +57,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
+ 	bl	V_LOCAL_FUNC(__get_datapage)
+ 	mtlr	r12
+ 	addi	r3,r3,CFG_SYSCALL_MAP64
+-	cmpli	cr0,r4,0
++	cmpldi	cr0,r4,0
+ 	crclr	cr0*4+so
+ 	beqlr
+ 	li	r0,__NR_syscalls
+diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
+index a76b4af37ef2..382021324883 100644
+--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
++++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
+@@ -145,7 +145,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
+ 	bne	cr0,99f
+ 
+ 	li	r3,0
+-	cmpli	cr0,r4,0
++	cmpldi	cr0,r4,0
+ 	crclr	cr0*4+so
+ 	beqlr
+ 	lis	r5,CLOCK_REALTIME_RES@h
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index 1db685104ffc..b542a80477c8 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -72,6 +72,15 @@ SECTIONS
+ 	/* Read-only data */
+ 	RODATA
+ 
++#ifdef CONFIG_PPC64
++	. = ALIGN(8);
++	__rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
++		__start___rfi_flush_fixup = .;
++		*(__rfi_flush_fixup)
++		__stop___rfi_flush_fixup = .;
++	}
++#endif
++
+ 	EXCEPTION_TABLE(0)
+ 
+ 	NOTES :kernel :notes
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index a3018f109cd3..a7bd4100f158 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -64,7 +64,7 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
+ 	mtmsrd	r0,1		/* clear RI in MSR */
+ 	mtsrr0	r5
+ 	mtsrr1	r6
+-	RFI
++	RFI_TO_KERNEL
+ 
+ kvmppc_call_hv_entry:
+ 	ld	r4, HSTATE_KVM_VCPU(r13)
+@@ -164,7 +164,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ 	mtsrr0	r8
+ 	mtsrr1	r7
+ 	beq	cr1, 13f		/* machine check */
+-	RFI
++	RFI_TO_KERNEL
+ 
+ 	/* On POWER7, we have external interrupts set to use HSRR0/1 */
+ 11:	mtspr	SPRN_HSRR0, r8
+@@ -877,8 +877,7 @@ BEGIN_FTR_SECTION
+ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ 	ld	r0, VCPU_GPR(R0)(r4)
+ 	ld	r4, VCPU_GPR(R4)(r4)
+-
+-	hrfid
++	HRFI_TO_GUEST
+ 	b	.
+ 
+ secondary_too_late:
+diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
+index 16c4d88ba27d..a328f99a887c 100644
+--- a/arch/powerpc/kvm/book3s_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_rmhandlers.S
+@@ -46,6 +46,9 @@
+ 
+ #define FUNC(name)		name
+ 
++#define RFI_TO_KERNEL	RFI
++#define RFI_TO_GUEST	RFI
++
+ .macro INTERRUPT_TRAMPOLINE intno
+ 
+ .global kvmppc_trampoline_\intno
+@@ -141,7 +144,7 @@ kvmppc_handler_skip_ins:
+ 	GET_SCRATCH0(r13)
+ 
+ 	/* And get back into the code */
+-	RFI
++	RFI_TO_KERNEL
+ #endif
+ 
+ /*
+@@ -164,6 +167,6 @@ _GLOBAL_TOC(kvmppc_entry_trampoline)
+ 	ori	r5, r5, MSR_EE
+ 	mtsrr0	r7
+ 	mtsrr1	r6
+-	RFI
++	RFI_TO_KERNEL
+ 
+ #include "book3s_segment.S"
+diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
+index acee37cde840..af3a91c00b46 100644
+--- a/arch/powerpc/kvm/book3s_segment.S
++++ b/arch/powerpc/kvm/book3s_segment.S
+@@ -156,7 +156,7 @@ no_dcbz32_on:
+ 	PPC_LL	r9, SVCPU_R9(r3)
+ 	PPC_LL	r3, (SVCPU_R3)(r3)
+ 
+-	RFI
++	RFI_TO_GUEST
+ kvmppc_handler_trampoline_enter_end:
+ 
+ 
+@@ -389,5 +389,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+ 	cmpwi	r12, BOOK3S_INTERRUPT_DOORBELL
+ 	beqa	BOOK3S_INTERRUPT_DOORBELL
+ 
+-	RFI
++	RFI_TO_KERNEL
+ kvmppc_handler_trampoline_exit_end:
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index 7ce3870d7ddd..a18d648d31a6 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -20,6 +20,7 @@
+ #include <asm/code-patching.h>
+ #include <asm/page.h>
+ #include <asm/sections.h>
++#include <asm/setup.h>
+ 
+ 
+ struct fixup_entry {
+@@ -113,6 +114,47 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ 	}
+ }
+ 
++#ifdef CONFIG_PPC_BOOK3S_64
++void do_rfi_flush_fixups(enum l1d_flush_type types)
++{
++	unsigned int instrs[3], *dest;
++	long *start, *end;
++	int i;
++
++	start = PTRRELOC(&__start___rfi_flush_fixup),
++	end = PTRRELOC(&__stop___rfi_flush_fixup);
++
++	instrs[0] = 0x60000000; /* nop */
++	instrs[1] = 0x60000000; /* nop */
++	instrs[2] = 0x60000000; /* nop */
++
++	if (types & L1D_FLUSH_FALLBACK)
++		/* b .+16 to fallback flush */
++		instrs[0] = 0x48000010;
++
++	i = 0;
++	if (types & L1D_FLUSH_ORI) {
++		instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++		instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
++	}
++
++	if (types & L1D_FLUSH_MTTRIG)
++		instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
++
++	for (i = 0; start < end; start++, i++) {
++		dest = (void *)start + *start;
++
++		pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++		patch_instruction(dest, instrs[0]);
++		patch_instruction(dest + 1, instrs[1]);
++		patch_instruction(dest + 2, instrs[2]);
++	}
++
++	printk(KERN_DEBUG "rfi-flush: patched %d locations\n", i);
++}
++#endif /* CONFIG_PPC_BOOK3S_64 */
++
+ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ {
+ 	long *start, *end;
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index d90893b76e7c..b7e1307fe633 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -401,8 +401,12 @@ static __u64 power_pmu_bhrb_to(u64 addr)
+ 	int ret;
+ 	__u64 target;
+ 
+-	if (is_kernel_addr(addr))
+-		return branch_target((unsigned int *)addr);
++	if (is_kernel_addr(addr)) {
++		if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
++			return 0;
++
++		return branch_target(&instr);
++	}
+ 
+ 	/* Userspace: need copy instruction here then translate it */
+ 	pagefault_disable();
+@@ -1377,7 +1381,7 @@ static int collect_events(struct perf_event *group, int max_count,
+ 	int n = 0;
+ 	struct perf_event *event;
+ 
+-	if (!is_software_event(group)) {
++	if (group->pmu->task_ctx_nr == perf_hw_context) {
+ 		if (n >= max_count)
+ 			return -1;
+ 		ctrs[n] = group;
+@@ -1385,7 +1389,7 @@ static int collect_events(struct perf_event *group, int max_count,
+ 		events[n++] = group->hw.config;
+ 	}
+ 	list_for_each_entry(event, &group->sibling_list, group_entry) {
+-		if (!is_software_event(event) &&
++		if (event->pmu->task_ctx_nr == perf_hw_context &&
+ 		    event->state != PERF_EVENT_STATE_OFF) {
+ 			if (n >= max_count)
+ 				return -1;
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 20974478f8d0..e3c98361e6e4 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -38,14 +38,63 @@
+ #include <asm/cputhreads.h>
+ #include <asm/cpuidle.h>
+ #include <asm/code-patching.h>
++#include <asm/setup.h>
+ 
+ #include "powernv.h"
+ #include "subcore.h"
+ 
++static void pnv_setup_rfi_flush(void)
++{
++	struct device_node *np, *fw_features;
++	enum l1d_flush_type type;
++	int enable;
++
++	/* Default to fallback in case fw-features are not available */
++	type = L1D_FLUSH_FALLBACK;
++	enable = 1;
++
++	np = of_find_node_by_name(NULL, "ibm,opal");
++	fw_features = of_get_child_by_name(np, "fw-features");
++	of_node_put(np);
++
++	if (fw_features) {
++		np = of_get_child_by_name(fw_features, "inst-l1d-flush-trig2");
++		if (np && of_property_read_bool(np, "enabled"))
++			type = L1D_FLUSH_MTTRIG;
++
++		of_node_put(np);
++
++		np = of_get_child_by_name(fw_features, "inst-l1d-flush-ori30,30,0");
++		if (np && of_property_read_bool(np, "enabled"))
++			type = L1D_FLUSH_ORI;
++
++		of_node_put(np);
++
++		/* Enable unless firmware says NOT to */
++		enable = 2;
++		np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-hv-1-to-0");
++		if (np && of_property_read_bool(np, "disabled"))
++			enable--;
++
++		of_node_put(np);
++
++		np = of_get_child_by_name(fw_features, "needs-l1d-flush-msr-pr-0-to-1");
++		if (np && of_property_read_bool(np, "disabled"))
++			enable--;
++
++		of_node_put(np);
++		of_node_put(fw_features);
++	}
++
++	setup_rfi_flush(type, enable > 0);
++}
++
+ static void __init pnv_setup_arch(void)
+ {
+ 	set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+ 
++	pnv_setup_rfi_flush();
++
+ 	/* Initialize SMP */
+ 	pnv_smp_init();
+ 
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index e6e8b241d717..b4867b4d5a1e 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -507,6 +507,39 @@ static void __init find_and_init_phbs(void)
+ 	}
+ }
+ 
++static void pseries_setup_rfi_flush(void)
++{
++	struct h_cpu_char_result result;
++	enum l1d_flush_type types;
++	bool enable;
++	long rc;
++
++	/* Enable by default */
++	enable = true;
++
++	rc = plpar_get_cpu_characteristics(&result);
++	if (rc == H_SUCCESS) {
++		types = L1D_FLUSH_NONE;
++
++		if (result.character & H_CPU_CHAR_L1D_FLUSH_TRIG2)
++			types |= L1D_FLUSH_MTTRIG;
++		if (result.character & H_CPU_CHAR_L1D_FLUSH_ORI30)
++			types |= L1D_FLUSH_ORI;
++
++		/* Use fallback if nothing set in hcall */
++		if (types == L1D_FLUSH_NONE)
++			types = L1D_FLUSH_FALLBACK;
++
++		if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
++			enable = false;
++	} else {
++		/* Default to fallback if case hcall is not available */
++		types = L1D_FLUSH_FALLBACK;
++	}
++
++	setup_rfi_flush(types, enable);
++}
++
+ static void __init pSeries_setup_arch(void)
+ {
+ 	set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
+@@ -523,7 +556,9 @@ static void __init pSeries_setup_arch(void)
+ 
+ 	fwnmi_init();
+ 
+-	/* By default, only probe PCI (can be overriden by rtas_pci) */
++	pseries_setup_rfi_flush();
++
++	/* By default, only probe PCI (can be overridden by rtas_pci) */
+ 	pci_add_flags(PCI_PROBE_ONLY);
+ 
+ 	/* Find and initialize PCI host bridges */
+diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
+index 437e61159279..86f934255eb6 100644
+--- a/arch/s390/kernel/compat_linux.c
++++ b/arch/s390/kernel/compat_linux.c
+@@ -110,7 +110,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setregid16, u16, rgid, u16, egid)
+ 
+ COMPAT_SYSCALL_DEFINE1(s390_setgid16, u16, gid)
+ {
+-	return sys_setgid((gid_t)gid);
++	return sys_setgid(low2highgid(gid));
+ }
+ 
+ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
+@@ -120,7 +120,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setreuid16, u16, ruid, u16, euid)
+ 
+ COMPAT_SYSCALL_DEFINE1(s390_setuid16, u16, uid)
+ {
+-	return sys_setuid((uid_t)uid);
++	return sys_setuid(low2highuid(uid));
+ }
+ 
+ COMPAT_SYSCALL_DEFINE3(s390_setresuid16, u16, ruid, u16, euid, u16, suid)
+@@ -173,12 +173,12 @@ COMPAT_SYSCALL_DEFINE3(s390_getresgid16, u16 __user *, rgidp,
+ 
+ COMPAT_SYSCALL_DEFINE1(s390_setfsuid16, u16, uid)
+ {
+-	return sys_setfsuid((uid_t)uid);
++	return sys_setfsuid(low2highuid(uid));
+ }
+ 
+ COMPAT_SYSCALL_DEFINE1(s390_setfsgid16, u16, gid)
+ {
+-	return sys_setfsgid((gid_t)gid);
++	return sys_setfsgid(low2highgid(gid));
+ }
+ 
+ static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
+@@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis
+ 		return retval;
+ 	}
+ 
++	groups_sort(group_info);
+ 	retval = set_current_groups(group_info);
+ 	put_group_info(group_info);
+ 
+diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
+index ff639342a8be..c5b997757988 100644
+--- a/arch/sh/kernel/traps_32.c
++++ b/arch/sh/kernel/traps_32.c
+@@ -607,7 +607,8 @@ asmlinkage void do_divide_error(unsigned long r4)
+ 		break;
+ 	}
+ 
+-	force_sig_info(SIGFPE, &info, current);
++	info.si_signo = SIGFPE;
++	force_sig_info(info.si_signo, &info, current);
+ }
+ #endif
+ 
+diff --git a/arch/um/Makefile b/arch/um/Makefile
+index 17d4460b1af3..01558aeeba50 100644
+--- a/arch/um/Makefile
++++ b/arch/um/Makefile
+@@ -116,7 +116,7 @@ archheaders:
+ archprepare: include/generated/user_constants.h
+ 
+ LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
+-LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib
++LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
+ 
+ CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
+ 	$(call cc-option, -fno-stack-protector,) \
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index a3d283addbde..ffb2cb0495c3 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -982,7 +982,7 @@ config X86_MCE_THRESHOLD
+ 	def_bool y
+ 
+ config X86_MCE_INJECT
+-	depends on X86_MCE
++	depends on X86_MCE && X86_LOCAL_APIC
+ 	tristate "Machine check injector support"
+ 	---help---
+ 	  Provide support for injecting machine checks for testing purposes.
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 78c366462e70..48740eb2910c 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -64,12 +64,13 @@ GCOV_PROFILE := n
+ $(obj)/bzImage: asflags-y  := $(SVGA_MODE)
+ 
+ quiet_cmd_image = BUILD   $@
++silent_redirect_image = >/dev/null
+ cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
+-			       $(obj)/zoffset.h $@
++			       $(obj)/zoffset.h $@ $($(quiet)redirect_image)
+ 
+ $(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
+ 	$(call if_changed,image)
+-	@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
++	@$(kecho) 'Kernel: $@ is ready' ' (#'`cat .version`')'
+ 
+ OBJCOPYFLAGS_vmlinux.bin := -O binary -R .note -R .comment -S
+ $(obj)/vmlinux.bin: $(obj)/compressed/vmlinux FORCE
+diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+index 1c3b7ceb36d2..e7273a606a07 100644
+--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
++++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+@@ -55,29 +55,31 @@
+ #define RAB1bl %bl
+ #define RAB2bl %cl
+ 
++#define CD0 0x0(%rsp)
++#define CD1 0x8(%rsp)
++#define CD2 0x10(%rsp)
++
++# used only before/after all rounds
+ #define RCD0 %r8
+ #define RCD1 %r9
+ #define RCD2 %r10
+ 
+-#define RCD0d %r8d
+-#define RCD1d %r9d
+-#define RCD2d %r10d
+-
+-#define RX0 %rbp
+-#define RX1 %r11
+-#define RX2 %r12
++# used only during rounds
++#define RX0 %r8
++#define RX1 %r9
++#define RX2 %r10
+ 
+-#define RX0d %ebp
+-#define RX1d %r11d
+-#define RX2d %r12d
++#define RX0d %r8d
++#define RX1d %r9d
++#define RX2d %r10d
+ 
+-#define RY0 %r13
+-#define RY1 %r14
+-#define RY2 %r15
++#define RY0 %r11
++#define RY1 %r12
++#define RY2 %r13
+ 
+-#define RY0d %r13d
+-#define RY1d %r14d
+-#define RY2d %r15d
++#define RY0d %r11d
++#define RY1d %r12d
++#define RY2d %r13d
+ 
+ #define RT0 %rdx
+ #define RT1 %rsi
+@@ -85,6 +87,8 @@
+ #define RT0d %edx
+ #define RT1d %esi
+ 
++#define RT1bl %sil
++
+ #define do16bit_ror(rot, op1, op2, T0, T1, tmp1, tmp2, ab, dst) \
+ 	movzbl ab ## bl,		tmp2 ## d; \
+ 	movzbl ab ## bh,		tmp1 ## d; \
+@@ -92,6 +96,11 @@
+ 	op1##l T0(CTX, tmp2, 4),	dst ## d; \
+ 	op2##l T1(CTX, tmp1, 4),	dst ## d;
+ 
++#define swap_ab_with_cd(ab, cd, tmp)	\
++	movq cd, tmp;			\
++	movq ab, cd;			\
++	movq tmp, ab;
++
+ /*
+  * Combined G1 & G2 function. Reordered with help of rotates to have moves
+  * at begining.
+@@ -110,15 +119,15 @@
+ 	/* G1,2 && G2,2 */ \
+ 	do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 0, x ## 0); \
+ 	do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 0, y ## 0); \
+-	xchgq cd ## 0, ab ## 0; \
++	swap_ab_with_cd(ab ## 0, cd ## 0, RT0); \
+ 	\
+ 	do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 1, x ## 1); \
+ 	do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 1, y ## 1); \
+-	xchgq cd ## 1, ab ## 1; \
++	swap_ab_with_cd(ab ## 1, cd ## 1, RT0); \
+ 	\
+ 	do16bit_ror(32, xor, xor, Tx2, Tx3, RT0, RT1, ab ## 2, x ## 2); \
+ 	do16bit_ror(16, xor, xor, Ty3, Ty0, RT0, RT1, ab ## 2, y ## 2); \
+-	xchgq cd ## 2, ab ## 2;
++	swap_ab_with_cd(ab ## 2, cd ## 2, RT0);
+ 
+ #define enc_round_end(ab, x, y, n) \
+ 	addl y ## d,			x ## d; \
+@@ -168,6 +177,16 @@
+ 	decrypt_round3(ba, dc, (n*2)+1); \
+ 	decrypt_round3(ba, dc, (n*2));
+ 
++#define push_cd()	\
++	pushq RCD2;	\
++	pushq RCD1;	\
++	pushq RCD0;
++
++#define pop_cd()	\
++	popq RCD0;	\
++	popq RCD1;	\
++	popq RCD2;
++
+ #define inpack3(in, n, xy, m) \
+ 	movq 4*(n)(in),			xy ## 0; \
+ 	xorq w+4*m(CTX),		xy ## 0; \
+@@ -223,11 +242,8 @@ ENTRY(__twofish_enc_blk_3way)
+ 	 *	%rdx: src, RIO
+ 	 *	%rcx: bool, if true: xor output
+ 	 */
+-	pushq %r15;
+-	pushq %r14;
+ 	pushq %r13;
+ 	pushq %r12;
+-	pushq %rbp;
+ 	pushq %rbx;
+ 
+ 	pushq %rcx; /* bool xor */
+@@ -235,40 +251,36 @@ ENTRY(__twofish_enc_blk_3way)
+ 
+ 	inpack_enc3();
+ 
+-	encrypt_cycle3(RAB, RCD, 0);
+-	encrypt_cycle3(RAB, RCD, 1);
+-	encrypt_cycle3(RAB, RCD, 2);
+-	encrypt_cycle3(RAB, RCD, 3);
+-	encrypt_cycle3(RAB, RCD, 4);
+-	encrypt_cycle3(RAB, RCD, 5);
+-	encrypt_cycle3(RAB, RCD, 6);
+-	encrypt_cycle3(RAB, RCD, 7);
++	push_cd();
++	encrypt_cycle3(RAB, CD, 0);
++	encrypt_cycle3(RAB, CD, 1);
++	encrypt_cycle3(RAB, CD, 2);
++	encrypt_cycle3(RAB, CD, 3);
++	encrypt_cycle3(RAB, CD, 4);
++	encrypt_cycle3(RAB, CD, 5);
++	encrypt_cycle3(RAB, CD, 6);
++	encrypt_cycle3(RAB, CD, 7);
++	pop_cd();
+ 
+ 	popq RIO; /* dst */
+-	popq %rbp; /* bool xor */
++	popq RT1; /* bool xor */
+ 
+-	testb %bpl, %bpl;
++	testb RT1bl, RT1bl;
+ 	jnz .L__enc_xor3;
+ 
+ 	outunpack_enc3(mov);
+ 
+ 	popq %rbx;
+-	popq %rbp;
+ 	popq %r12;
+ 	popq %r13;
+-	popq %r14;
+-	popq %r15;
+ 	ret;
+ 
+ .L__enc_xor3:
+ 	outunpack_enc3(xor);
+ 
+ 	popq %rbx;
+-	popq %rbp;
+ 	popq %r12;
+ 	popq %r13;
+-	popq %r14;
+-	popq %r15;
+ 	ret;
+ ENDPROC(__twofish_enc_blk_3way)
+ 
+@@ -278,35 +290,31 @@ ENTRY(twofish_dec_blk_3way)
+ 	 *	%rsi: dst
+ 	 *	%rdx: src, RIO
+ 	 */
+-	pushq %r15;
+-	pushq %r14;
+ 	pushq %r13;
+ 	pushq %r12;
+-	pushq %rbp;
+ 	pushq %rbx;
+ 
+ 	pushq %rsi; /* dst */
+ 
+ 	inpack_dec3();
+ 
+-	decrypt_cycle3(RAB, RCD, 7);
+-	decrypt_cycle3(RAB, RCD, 6);
+-	decrypt_cycle3(RAB, RCD, 5);
+-	decrypt_cycle3(RAB, RCD, 4);
+-	decrypt_cycle3(RAB, RCD, 3);
+-	decrypt_cycle3(RAB, RCD, 2);
+-	decrypt_cycle3(RAB, RCD, 1);
+-	decrypt_cycle3(RAB, RCD, 0);
++	push_cd();
++	decrypt_cycle3(RAB, CD, 7);
++	decrypt_cycle3(RAB, CD, 6);
++	decrypt_cycle3(RAB, CD, 5);
++	decrypt_cycle3(RAB, CD, 4);
++	decrypt_cycle3(RAB, CD, 3);
++	decrypt_cycle3(RAB, CD, 2);
++	decrypt_cycle3(RAB, CD, 1);
++	decrypt_cycle3(RAB, CD, 0);
++	pop_cd();
+ 
+ 	popq RIO; /* dst */
+ 
+ 	outunpack_dec3();
+ 
+ 	popq %rbx;
+-	popq %rbp;
+ 	popq %r12;
+ 	popq %r13;
+-	popq %r14;
+-	popq %r15;
+ 	ret;
+ ENDPROC(twofish_dec_blk_3way)
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index c97effa6c72b..cb8fd023b23f 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -1,6 +1,8 @@
+ #ifndef _ASM_X86_ALTERNATIVE_H
+ #define _ASM_X86_ALTERNATIVE_H
+ 
++#ifndef __ASSEMBLY__
++
+ #include <linux/types.h>
+ #include <linux/stddef.h>
+ #include <linux/stringify.h>
+@@ -132,7 +134,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ 	".popsection\n"							\
+ 	".pushsection .altinstr_replacement, \"ax\"\n"			\
+ 	ALTINSTR_REPLACEMENT(newinstr, feature, 1)			\
+-	".popsection"
++	".popsection\n"
+ 
+ #define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
+ 	OLDINSTR_2(oldinstr, 1, 2)					\
+@@ -143,7 +145,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ 	".pushsection .altinstr_replacement, \"ax\"\n"			\
+ 	ALTINSTR_REPLACEMENT(newinstr1, feature1, 1)			\
+ 	ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)			\
+-	".popsection"
++	".popsection\n"
+ 
+ /*
+  * This must be included *after* the definition of ALTERNATIVE due to
+@@ -265,4 +267,6 @@ extern void *text_poke(void *addr, const void *opcode, size_t len);
+ extern int poke_int3_handler(struct pt_regs *regs);
+ extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
+ 
++#endif /* __ASSEMBLY__ */
++
+ #endif /* _ASM_X86_ALTERNATIVE_H */
+diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
+new file mode 100644
+index 000000000000..44b8762fa0c7
+--- /dev/null
++++ b/arch/x86/include/asm/asm-prototypes.h
+@@ -0,0 +1,16 @@
++#include <asm/ftrace.h>
++#include <asm/uaccess.h>
++#include <asm/string.h>
++#include <asm/page.h>
++#include <asm/checksum.h>
++
++#include <asm-generic/asm-prototypes.h>
++
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/special_insns.h>
++#include <asm/preempt.h>
++
++#ifndef CONFIG_X86_CMPXCHG64
++extern void cmpxchg8b_emu(void);
++#endif
+diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
+index e2015452177d..37496d271033 100644
+--- a/arch/x86/include/asm/asm.h
++++ b/arch/x86/include/asm/asm.h
+@@ -88,4 +88,15 @@
+ /* For C file, we already have NOKPROBE_SYMBOL macro */
+ #endif
+ 
++#ifndef __ASSEMBLY__
++/*
++ * This output constraint should be used for any inline asm which has a "call"
++ * instruction.  Otherwise the asm may be inserted before the frame pointer
++ * gets set up by the containing function.  If you forget to do this, objtool
++ * may print a "call without frame pointer save/setup" warning.
++ */
++register unsigned long current_stack_pointer asm(_ASM_SP);
++#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
++#endif
++
+ #endif /* _ASM_X86_ASM_H */
+diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
+index 959e45b81fe2..0295dd893884 100644
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -24,6 +24,30 @@
+ #define wmb()	asm volatile("sfence" ::: "memory")
+ #endif
+ 
++/**
++ * array_index_mask_nospec() - generate a mask that is ~0UL when the
++ * 	bounds check succeeds and 0 otherwise
++ * @index: array element index
++ * @size: number of elements in array
++ *
++ * Returns:
++ *     0 - (index < size)
++ */
++static inline unsigned long array_index_mask_nospec(unsigned long index,
++		unsigned long size)
++{
++	unsigned long mask;
++
++	asm ("cmp %1,%2; sbb %0,%0;"
++			:"=r" (mask)
++			:"r"(size),"r" (index)
++			:"cc");
++	return mask;
++}
++
++/* Override the default implementation from linux/nospec.h. */
++#define array_index_mask_nospec array_index_mask_nospec
++
+ #ifdef CONFIG_X86_PPRO_FENCE
+ #define dma_rmb()	rmb()
+ #else
+diff --git a/arch/x86/include/asm/cmdline.h b/arch/x86/include/asm/cmdline.h
+index e01f7f7ccb0c..84ae170bc3d0 100644
+--- a/arch/x86/include/asm/cmdline.h
++++ b/arch/x86/include/asm/cmdline.h
+@@ -2,5 +2,7 @@
+ #define _ASM_X86_CMDLINE_H
+ 
+ int cmdline_find_option_bool(const char *cmdline_ptr, const char *option);
++int cmdline_find_option(const char *cmdline_ptr, const char *option,
++			char *buffer, int bufsize);
+ 
+ #endif /* _ASM_X86_CMDLINE_H */
+diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
+index 3d6606fb97d0..026c0b4ae086 100644
+--- a/arch/x86/include/asm/cpufeature.h
++++ b/arch/x86/include/asm/cpufeature.h
+@@ -348,6 +348,8 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
+ 	set_bit(bit, (unsigned long *)cpu_caps_set);	\
+ } while (0)
+ 
++#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
++
+ #define cpu_has_fpu		boot_cpu_has(X86_FEATURE_FPU)
+ #define cpu_has_de		boot_cpu_has(X86_FEATURE_DE)
+ #define cpu_has_pse		boot_cpu_has(X86_FEATURE_PSE)
+diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
+index f226df064660..8b17c2ad1048 100644
+--- a/arch/x86/include/asm/disabled-features.h
++++ b/arch/x86/include/asm/disabled-features.h
+@@ -21,11 +21,13 @@
+ # define DISABLE_K6_MTRR	(1<<(X86_FEATURE_K6_MTRR & 31))
+ # define DISABLE_CYRIX_ARR	(1<<(X86_FEATURE_CYRIX_ARR & 31))
+ # define DISABLE_CENTAUR_MCR	(1<<(X86_FEATURE_CENTAUR_MCR & 31))
++# define DISABLE_PCID		0
+ #else
+ # define DISABLE_VME		0
+ # define DISABLE_K6_MTRR	0
+ # define DISABLE_CYRIX_ARR	0
+ # define DISABLE_CENTAUR_MCR	0
++# define DISABLE_PCID		(1<<(X86_FEATURE_PCID & 31))
+ #endif /* CONFIG_X86_64 */
+ 
+ /*
+@@ -35,7 +37,7 @@
+ #define DISABLED_MASK1	0
+ #define DISABLED_MASK2	0
+ #define DISABLED_MASK3	(DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
+-#define DISABLED_MASK4	0
++#define DISABLED_MASK4	(DISABLE_PCID)
+ #define DISABLED_MASK5	0
+ #define DISABLED_MASK6	0
+ #define DISABLED_MASK7	0
+diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
+index 0f5fb6b6567e..ebaf64d0a785 100644
+--- a/arch/x86/include/asm/hardirq.h
++++ b/arch/x86/include/asm/hardirq.h
+@@ -21,10 +21,6 @@ typedef struct {
+ #ifdef CONFIG_SMP
+ 	unsigned int irq_resched_count;
+ 	unsigned int irq_call_count;
+-	/*
+-	 * irq_tlb_count is double-counted in irq_call_count, so it must be
+-	 * subtracted from irq_call_count when displaying irq_call_count
+-	 */
+ 	unsigned int irq_tlb_count;
+ #endif
+ #ifdef CONFIG_X86_THERMAL_VECTOR
+diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
+new file mode 100644
+index 000000000000..6999f7d01a0d
+--- /dev/null
++++ b/arch/x86/include/asm/intel-family.h
+@@ -0,0 +1,68 @@
++#ifndef _ASM_X86_INTEL_FAMILY_H
++#define _ASM_X86_INTEL_FAMILY_H
++
++/*
++ * "Big Core" Processors (Branded as Core, Xeon, etc...)
++ *
++ * The "_X" parts are generally the EP and EX Xeons, or the
++ * "Extreme" ones, like Broadwell-E.
++ *
++ * Things ending in "2" are usually because we have no better
++ * name for them.  There's no processor called "WESTMERE2".
++ */
++
++#define INTEL_FAM6_CORE_YONAH		0x0E
++#define INTEL_FAM6_CORE2_MEROM		0x0F
++#define INTEL_FAM6_CORE2_MEROM_L	0x16
++#define INTEL_FAM6_CORE2_PENRYN		0x17
++#define INTEL_FAM6_CORE2_DUNNINGTON	0x1D
++
++#define INTEL_FAM6_NEHALEM		0x1E
++#define INTEL_FAM6_NEHALEM_EP		0x1A
++#define INTEL_FAM6_NEHALEM_EX		0x2E
++#define INTEL_FAM6_WESTMERE		0x25
++#define INTEL_FAM6_WESTMERE2		0x1F
++#define INTEL_FAM6_WESTMERE_EP		0x2C
++#define INTEL_FAM6_WESTMERE_EX		0x2F
++
++#define INTEL_FAM6_SANDYBRIDGE		0x2A
++#define INTEL_FAM6_SANDYBRIDGE_X	0x2D
++#define INTEL_FAM6_IVYBRIDGE		0x3A
++#define INTEL_FAM6_IVYBRIDGE_X		0x3E
++
++#define INTEL_FAM6_HASWELL_CORE		0x3C
++#define INTEL_FAM6_HASWELL_X		0x3F
++#define INTEL_FAM6_HASWELL_ULT		0x45
++#define INTEL_FAM6_HASWELL_GT3E		0x46
++
++#define INTEL_FAM6_BROADWELL_CORE	0x3D
++#define INTEL_FAM6_BROADWELL_XEON_D	0x56
++#define INTEL_FAM6_BROADWELL_GT3E	0x47
++#define INTEL_FAM6_BROADWELL_X		0x4F
++
++#define INTEL_FAM6_SKYLAKE_MOBILE	0x4E
++#define INTEL_FAM6_SKYLAKE_DESKTOP	0x5E
++#define INTEL_FAM6_SKYLAKE_X		0x55
++#define INTEL_FAM6_KABYLAKE_MOBILE	0x8E
++#define INTEL_FAM6_KABYLAKE_DESKTOP	0x9E
++
++/* "Small Core" Processors (Atom) */
++
++#define INTEL_FAM6_ATOM_PINEVIEW	0x1C
++#define INTEL_FAM6_ATOM_LINCROFT	0x26
++#define INTEL_FAM6_ATOM_PENWELL		0x27
++#define INTEL_FAM6_ATOM_CLOVERVIEW	0x35
++#define INTEL_FAM6_ATOM_CEDARVIEW	0x36
++#define INTEL_FAM6_ATOM_SILVERMONT1	0x37 /* BayTrail/BYT / Valleyview */
++#define INTEL_FAM6_ATOM_SILVERMONT2	0x4D /* Avaton/Rangely */
++#define INTEL_FAM6_ATOM_AIRMONT		0x4C /* CherryTrail / Braswell */
++#define INTEL_FAM6_ATOM_MERRIFIELD1	0x4A /* Tangier */
++#define INTEL_FAM6_ATOM_MERRIFIELD2	0x5A /* Annidale */
++#define INTEL_FAM6_ATOM_GOLDMONT	0x5C
++#define INTEL_FAM6_ATOM_DENVERTON	0x5F /* Goldmont Microserver */
++
++/* Xeon Phi */
++
++#define INTEL_FAM6_XEON_PHI_KNL		0x57 /* Knights Landing */
++
++#endif /* _ASM_X86_INTEL_FAMILY_H */
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 606f5fff1989..6e014befd522 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -933,7 +933,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
+ 			int emulation_type)
+ {
+-	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
++	return x86_emulate_instruction(vcpu, 0,
++			emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
+ }
+ 
+ void kvm_enable_efer_bits(u64);
+diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
+index 23ba6765b718..4ac06db325a2 100644
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -150,8 +150,8 @@ extern struct cpuinfo_x86	boot_cpu_data;
+ extern struct cpuinfo_x86	new_cpu_data;
+ 
+ extern struct tss_struct	doublefault_tss;
+-extern __u32			cpu_caps_cleared[NCAPINTS];
+-extern __u32			cpu_caps_set[NCAPINTS];
++extern __u32			cpu_caps_cleared[NCAPINTS + NBUGINTS];
++extern __u32			cpu_caps_set[NCAPINTS + NBUGINTS];
+ 
+ #ifdef CONFIG_SMP
+ DECLARE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
+@@ -701,7 +701,7 @@ static inline void sync_core(void)
+ {
+ 	int tmp;
+ 
+-#ifdef CONFIG_M486
++#ifdef CONFIG_X86_32
+ 	/*
+ 	 * Do a CPUID if available, otherwise do a jump.  The jump
+ 	 * can conveniently enough be the jump around CPUID.
+diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
+index b4bdec3e9523..b58daa40eae9 100644
+--- a/arch/x86/include/asm/thread_info.h
++++ b/arch/x86/include/asm/thread_info.h
+@@ -184,17 +184,6 @@ static inline struct thread_info *current_thread_info(void)
+ 	return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
+ }
+ 
+-static inline unsigned long current_stack_pointer(void)
+-{
+-	unsigned long sp;
+-#ifdef CONFIG_X86_64
+-	asm("mov %%rsp,%0" : "=g" (sp));
+-#else
+-	asm("mov %%esp,%0" : "=g" (sp));
+-#endif
+-	return sp;
+-}
+-
+ #else /* !__ASSEMBLY__ */
+ 
+ /* Load thread_info address into "reg" */
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index 7e459b7ee708..13c1a094cead 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -7,6 +7,54 @@
+ #include <asm/processor.h>
+ #include <asm/special_insns.h>
+ 
++static inline void __invpcid(unsigned long pcid, unsigned long addr,
++			     unsigned long type)
++{
++	struct { u64 d[2]; } desc = { { pcid, addr } };
++
++	/*
++	 * The memory clobber is because the whole point is to invalidate
++	 * stale TLB entries and, especially if we're flushing global
++	 * mappings, we don't want the compiler to reorder any subsequent
++	 * memory accesses before the TLB flush.
++	 *
++	 * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
++	 * invpcid (%rcx), %rax in long mode.
++	 */
++	asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
++		      : : "m" (desc), "a" (type), "c" (&desc) : "memory");
++}
++
++#define INVPCID_TYPE_INDIV_ADDR		0
++#define INVPCID_TYPE_SINGLE_CTXT	1
++#define INVPCID_TYPE_ALL_INCL_GLOBAL	2
++#define INVPCID_TYPE_ALL_NON_GLOBAL	3
++
++/* Flush all mappings for a given pcid and addr, not including globals. */
++static inline void invpcid_flush_one(unsigned long pcid,
++				     unsigned long addr)
++{
++	__invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
++}
++
++/* Flush all mappings for a given PCID, not including globals. */
++static inline void invpcid_flush_single_context(unsigned long pcid)
++{
++	__invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
++}
++
++/* Flush all mappings, including globals, for all PCIDs. */
++static inline void invpcid_flush_all(void)
++{
++	__invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
++}
++
++/* Flush all mappings for all PCIDs except globals. */
++static inline void invpcid_flush_all_nonglobals(void)
++{
++	__invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
++}
++
+ #ifdef CONFIG_PARAVIRT
+ #include <asm/paravirt.h>
+ #else
+@@ -86,7 +134,14 @@ static inline void cr4_set_bits_and_update_boot(unsigned long mask)
+ 
+ static inline void __native_flush_tlb(void)
+ {
++	/*
++	 * If current->mm == NULL then we borrow a mm which may change during a
++	 * task switch and therefore we must not be preempted while we write CR3
++	 * back:
++	 */
++	preempt_disable();
+ 	native_write_cr3(native_read_cr3());
++	preempt_enable();
+ }
+ 
+ static inline void __native_flush_tlb_global_irq_disabled(void)
+@@ -104,6 +159,15 @@ static inline void __native_flush_tlb_global(void)
+ {
+ 	unsigned long flags;
+ 
++	if (static_cpu_has(X86_FEATURE_INVPCID)) {
++		/*
++		 * Using INVPCID is considerably faster than a pair of writes
++		 * to CR4 sandwiched inside an IRQ flag save/restore.
++		 */
++		invpcid_flush_all();
++		return;
++	}
++
+ 	/*
+ 	 * Read-modify-write to CR4 - protect it from preemption and
+ 	 * from interrupts. (Use the raw variant because this code can
+@@ -127,6 +191,14 @@ static inline void __flush_tlb_all(void)
+ 		__flush_tlb_global();
+ 	else
+ 		__flush_tlb();
++
++	/*
++	 * Note: if we somehow had PCID but not PGE, then this wouldn't work --
++	 * we'd end up flushing kernel translations for the current ASID but
++	 * we might fail to flush kernel translations for other cached ASIDs.
++	 *
++	 * To avoid this issue, we force PCID off if PGE is off.
++	 */
+ }
+ 
+ static inline void __flush_tlb_one(unsigned long addr)
+@@ -140,7 +212,6 @@ static inline void __flush_tlb_one(unsigned long addr)
+ /*
+  * TLB flushing:
+  *
+- *  - flush_tlb() flushes the current mm struct TLBs
+  *  - flush_tlb_all() flushes all processes TLBs
+  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+  *  - flush_tlb_page(vma, vmaddr) flushes one page
+@@ -172,11 +243,6 @@ static inline void flush_tlb_all(void)
+ 	__flush_tlb_all();
+ }
+ 
+-static inline void flush_tlb(void)
+-{
+-	__flush_tlb_up();
+-}
+-
+ static inline void local_flush_tlb(void)
+ {
+ 	__flush_tlb_up();
+@@ -238,14 +304,11 @@ static inline void flush_tlb_kernel_range(unsigned long start,
+ 		flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
+ 
+ extern void flush_tlb_all(void);
+-extern void flush_tlb_current_task(void);
+ extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+ extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ 				unsigned long end, unsigned long vmflag);
+ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+ 
+-#define flush_tlb()	flush_tlb_current_task()
+-
+ void native_flush_tlb_others(const struct cpumask *cpumask,
+ 				struct mm_struct *mm,
+ 				unsigned long start, unsigned long end);
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index d1918a8c4393..9b5f8e6a9864 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -41,17 +41,6 @@ static int __init setup_noreplace_smp(char *str)
+ }
+ __setup("noreplace-smp", setup_noreplace_smp);
+ 
+-#ifdef CONFIG_PARAVIRT
+-static int __initdata_or_module noreplace_paravirt = 0;
+-
+-static int __init setup_noreplace_paravirt(char *str)
+-{
+-	noreplace_paravirt = 1;
+-	return 1;
+-}
+-__setup("noreplace-paravirt", setup_noreplace_paravirt);
+-#endif
+-
+ #define DPRINTK(fmt, args...)						\
+ do {									\
+ 	if (debug_alternative)						\
+@@ -326,9 +315,12 @@ done:
+ static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
+ {
+ 	unsigned long flags;
++	int i;
+ 
+-	if (instr[0] != 0x90)
+-		return;
++	for (i = 0; i < a->padlen; i++) {
++		if (instr[i] != 0x90)
++			return;
++	}
+ 
+ 	local_irq_save(flags);
+ 	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
+@@ -571,9 +563,6 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
+ 	struct paravirt_patch_site *p;
+ 	char insnbuf[MAX_PATCH_LEN];
+ 
+-	if (noreplace_paravirt)
+-		return;
+-
+ 	for (p = start; p < end; p++) {
+ 		unsigned int used;
+ 
+diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
+index f4dc2462a1ac..484ffab4d3e8 100644
+--- a/arch/x86/kernel/apic/io_apic.c
++++ b/arch/x86/kernel/apic/io_apic.c
+@@ -2728,8 +2728,8 @@ static struct resource * __init ioapic_setup_resources(void)
+ 		res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ 		snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
+ 		mem += IOAPIC_RESOURCE_NAME_SIZE;
++		ioapics[i].iomem_res = &res[num];
+ 		num++;
+-		ioapics[i].iomem_res = res;
+ 	}
+ 
+ 	ioapic_resources = res;
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index 03445346ee0a..4c7dd836304a 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -65,6 +65,14 @@ static void __init check_fpu(void)
+ 
+ void __init check_bugs(void)
+ {
++#ifdef CONFIG_X86_32
++	/*
++	 * Regardless of whether PCID is enumerated, the SDM says
++	 * that it can't be enabled in 32-bit mode.
++	 */
++	setup_clear_cpu_cap(X86_FEATURE_PCID);
++#endif
++
+ 	identify_boot_cpu();
+ #ifndef CONFIG_SMP
+ 	pr_info("CPU: ");
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 5732326ec126..9613a72723cd 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -172,6 +172,40 @@ static int __init x86_xsaves_setup(char *s)
+ }
+ __setup("noxsaves", x86_xsaves_setup);
+ 
++#ifdef CONFIG_X86_64
++static int __init x86_pcid_setup(char *s)
++{
++	/* require an exact match without trailing characters */
++	if (strlen(s))
++		return 0;
++
++	/* do not emit a message if the feature is not present */
++	if (!boot_cpu_has(X86_FEATURE_PCID))
++		return 1;
++
++	setup_clear_cpu_cap(X86_FEATURE_PCID);
++	pr_info("nopcid: PCID feature disabled\n");
++	return 1;
++}
++__setup("nopcid", x86_pcid_setup);
++#endif
++
++static int __init x86_noinvpcid_setup(char *s)
++{
++	/* noinvpcid doesn't accept parameters */
++	if (s)
++		return -EINVAL;
++
++	/* do not emit a message if the feature is not present */
++	if (!boot_cpu_has(X86_FEATURE_INVPCID))
++		return 0;
++
++	setup_clear_cpu_cap(X86_FEATURE_INVPCID);
++	pr_info("noinvpcid: INVPCID feature disabled\n");
++	return 0;
++}
++early_param("noinvpcid", x86_noinvpcid_setup);
++
+ #ifdef CONFIG_X86_32
+ static int cachesize_override = -1;
+ static int disable_x86_serial_nr = 1;
+@@ -305,6 +339,25 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+ 	}
+ }
+ 
++static void setup_pcid(struct cpuinfo_x86 *c)
++{
++	if (cpu_has(c, X86_FEATURE_PCID)) {
++		if (cpu_has(c, X86_FEATURE_PGE)) {
++			cr4_set_bits(X86_CR4_PCIDE);
++		} else {
++			/*
++			 * flush_tlb_all(), as currently implemented, won't
++			 * work if PCID is on but PGE is not.  Since that
++			 * combination doesn't exist on real hardware, there's
++			 * no reason to try to fully support it, but it's
++			 * polite to avoid corrupting data if we're on
++			 * an improperly configured VM.
++			 */
++			clear_cpu_cap(c, X86_FEATURE_PCID);
++		}
++	}
++}
++
+ /*
+  * Some CPU features depend on higher CPUID levels, which may not always
+  * be available due to CPUID level capping or broken virtualization
+@@ -383,8 +436,8 @@ static const char *table_lookup_model(struct cpuinfo_x86 *c)
+ 	return NULL;		/* Not found */
+ }
+ 
+-__u32 cpu_caps_cleared[NCAPINTS];
+-__u32 cpu_caps_set[NCAPINTS];
++__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS];
++__u32 cpu_caps_set[NCAPINTS + NBUGINTS];
+ 
+ void load_percpu_segment(int cpu)
+ {
+@@ -613,6 +666,16 @@ void cpu_detect(struct cpuinfo_x86 *c)
+ 	}
+ }
+ 
++static void apply_forced_caps(struct cpuinfo_x86 *c)
++{
++	int i;
++
++	for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
++		c->x86_capability[i] &= ~cpu_caps_cleared[i];
++		c->x86_capability[i] |= cpu_caps_set[i];
++	}
++}
++
+ void get_cpu_cap(struct cpuinfo_x86 *c)
+ {
+ 	u32 tfms, xlvl;
+@@ -904,11 +967,8 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+ 	if (this_cpu->c_identify)
+ 		this_cpu->c_identify(c);
+ 
+-	/* Clear/Set all flags overriden by options, after probe */
+-	for (i = 0; i < NCAPINTS; i++) {
+-		c->x86_capability[i] &= ~cpu_caps_cleared[i];
+-		c->x86_capability[i] |= cpu_caps_set[i];
+-	}
++ 	/* Clear/Set all flags overridden by options, after probe */
++	apply_forced_caps(c);
+ 
+ #ifdef CONFIG_X86_64
+ 	c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+@@ -934,6 +994,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+ 	setup_smep(c);
+ 	setup_smap(c);
+ 
++	/* Set up PCID */
++	setup_pcid(c);
++
+ 	/*
+ 	 * The vendor-specific functions might have changed features.
+ 	 * Now we do "generic changes."
+@@ -966,10 +1029,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
+ 	 * Clear/Set all flags overriden by options, need do it
+ 	 * before following smp all cpus cap AND.
+ 	 */
+-	for (i = 0; i < NCAPINTS; i++) {
+-		c->x86_capability[i] &= ~cpu_caps_cleared[i];
+-		c->x86_capability[i] |= cpu_caps_set[i];
+-	}
++	apply_forced_caps(c);
+ 
+ 	/*
+ 	 * On SMP, boot_cpu_data holds the common feature set between
+diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
+index edcb0e28c336..13fb13334f2a 100644
+--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
+@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
+ 		ci_leaf_init(this_leaf++, &id4_regs);
+ 		__cache_cpumap_setup(cpu, idx, &id4_regs);
+ 	}
++	this_cpu_ci->cpu_map_populated = true;
++
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
+index 4cfba4371a71..101bfae369e1 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
++++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
+@@ -152,7 +152,6 @@ static void raise_mce(struct mce *m)
+ 	if (context == MCJ_CTX_RANDOM)
+ 		return;
+ 
+-#ifdef CONFIG_X86_LOCAL_APIC
+ 	if (m->inject_flags & (MCJ_IRQ_BROADCAST | MCJ_NMI_BROADCAST)) {
+ 		unsigned long start;
+ 		int cpu;
+@@ -193,9 +192,7 @@ static void raise_mce(struct mce *m)
+ 		raise_local();
+ 		put_cpu();
+ 		put_online_cpus();
+-	} else
+-#endif
+-	{
++	} else {
+ 		preempt_disable();
+ 		raise_local();
+ 		preempt_enable();
+diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
+index 12829c3ced3c..ff422a92f063 100644
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -153,6 +153,7 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
+ #define F14H_MPB_MAX_SIZE 1824
+ #define F15H_MPB_MAX_SIZE 4096
+ #define F16H_MPB_MAX_SIZE 3458
++#define F17H_MPB_MAX_SIZE 3200
+ 
+ 	switch (family) {
+ 	case 0x14:
+@@ -164,6 +165,9 @@ static unsigned int verify_patch_size(u8 family, u32 patch_size,
+ 	case 0x16:
+ 		max_size = F16H_MPB_MAX_SIZE;
+ 		break;
++	case 0x17:
++		max_size = F17H_MPB_MAX_SIZE;
++		break;
+ 	default:
+ 		max_size = F1XH_MPB_MAX_SIZE;
+ 		break;
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 6ca31bf3ccbd..1009c82088ed 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -276,9 +276,17 @@ static bool is_blacklisted(unsigned int cpu)
+ {
+ 	struct cpuinfo_x86 *c = &cpu_data(cpu);
+ 
+-	if (c->x86 == 6 && c->x86_model == 79) {
+-		pr_err_once("late loading on model 79 is disabled.\n");
+-		return true;
++	/*
++	 * Late loading on model 79 with microcode revision less than 0x0b000021
++	 * may result in a system hang. This behavior is documented in item
++	 * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
++	 */
++	if (c->x86 == 6 &&
++	    c->x86_model == 79 &&
++	    c->x86_mask == 0x01 &&
++	    c->microcode < 0x0b000021) {
++		pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
++		pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
+ 	}
+ 
+ 	return false;
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 83f33a2e662f..904b31ebc419 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -187,8 +187,8 @@ static void release_pmc_hardware(void) {}
+ 
+ static bool check_hw_exists(void)
+ {
+-	u64 val, val_fail, val_new= ~0;
+-	int i, reg, reg_fail, ret = 0;
++	u64 val, val_fail = -1, val_new= ~0;
++	int i, reg, reg_fail = -1, ret = 0;
+ 	int bios_fail = 0;
+ 	int reg_safe = -1;
+ 
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 7e429c99c728..63dd2c971db8 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -670,14 +670,17 @@ __PAGE_ALIGNED_BSS
+ initial_pg_pmd:
+ 	.fill 1024*KPMDS,4,0
+ #else
+-ENTRY(initial_page_table)
++.globl initial_page_table
++initial_page_table:
+ 	.fill 1024,4,0
+ #endif
+ initial_pg_fixmap:
+ 	.fill 1024,4,0
+-ENTRY(empty_zero_page)
++.globl empty_zero_page
++empty_zero_page:
+ 	.fill 4096,1,0
+-ENTRY(swapper_pg_dir)
++.globl swapper_pg_dir
++swapper_pg_dir:
+ 	.fill 1024,4,0
+ 
+ /*
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index e5952c225532..b6460c5a9cab 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -96,8 +96,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
+ 	seq_puts(p, "  Rescheduling interrupts\n");
+ 	seq_printf(p, "%*s: ", prec, "CAL");
+ 	for_each_online_cpu(j)
+-		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
+-					irq_stats(j)->irq_tlb_count);
++		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
+ 	seq_puts(p, "  Function call interrupts\n");
+ 	seq_printf(p, "%*s: ", prec, "TLB");
+ 	for_each_online_cpu(j)
+diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
+index f9fd86a7fcc7..9f4ffc122d9e 100644
+--- a/arch/x86/kernel/irq_32.c
++++ b/arch/x86/kernel/irq_32.c
+@@ -71,7 +71,7 @@ static void call_on_stack(void *func, void *stack)
+ 
+ static inline void *current_stack(void)
+ {
+-	return (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
++	return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
+ }
+ 
+ static inline int
+@@ -96,7 +96,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
+ 
+ 	/* Save the next esp at the bottom of the stack */
+ 	prev_esp = (u32 *)irqstk;
+-	*prev_esp = current_stack_pointer();
++	*prev_esp = current_stack_pointer;
+ 
+ 	if (unlikely(overflow))
+ 		call_on_stack(print_stack_overflow, isp);
+@@ -149,7 +149,7 @@ void do_softirq_own_stack(void)
+ 
+ 	/* Push the previous esp onto the stack */
+ 	prev_esp = (u32 *)irqstk;
+-	*prev_esp = current_stack_pointer();
++	*prev_esp = current_stack_pointer;
+ 
+ 	call_on_stack(__do_softirq, isp);
+ }
+diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c
+index a1da6737ba5b..a91d9b9b4bde 100644
+--- a/arch/x86/kernel/paravirt_patch_64.c
++++ b/arch/x86/kernel/paravirt_patch_64.c
+@@ -9,7 +9,6 @@ DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
+ DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
+ DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
+ DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
+-DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
+ DEF_NATIVE(pv_cpu_ops, clts, "clts");
+ DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
+ 
+@@ -57,7 +56,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
+ 		PATCH_SITE(pv_mmu_ops, read_cr3);
+ 		PATCH_SITE(pv_mmu_ops, write_cr3);
+ 		PATCH_SITE(pv_cpu_ops, clts);
+-		PATCH_SITE(pv_mmu_ops, flush_tlb_single);
+ 		PATCH_SITE(pv_cpu_ops, wbinvd);
+ 
+ 	patch_site:
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 0549ae3cb332..d9ea27ec9dbd 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -93,6 +93,10 @@ void __noreturn machine_real_restart(unsigned int type)
+ 	load_cr3(initial_page_table);
+ #else
+ 	write_cr3(real_mode_header->trampoline_pgd);
++
++	/* Exiting long mode will fail if CR4.PCIDE is set. */
++	if (static_cpu_has(X86_FEATURE_PCID))
++		cr4_clear_bits(X86_CR4_PCIDE);
+ #endif
+ 
+ 	/* Jump to the identity-mapped low memory code */
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 50e547eac8cd..f6911cc90a81 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -107,25 +107,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+ 	spin_lock_irqsave(&rtc_lock, flags);
+ 	CMOS_WRITE(0xa, 0xf);
+ 	spin_unlock_irqrestore(&rtc_lock, flags);
+-	local_flush_tlb();
+-	pr_debug("1.\n");
+ 	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
+ 							start_eip >> 4;
+-	pr_debug("2.\n");
+ 	*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
+ 							start_eip & 0xf;
+-	pr_debug("3.\n");
+ }
+ 
+ static inline void smpboot_restore_warm_reset_vector(void)
+ {
+ 	unsigned long flags;
+ 
+-	/*
+-	 * Install writable page 0 entry to set BIOS data area.
+-	 */
+-	local_flush_tlb();
+-
+ 	/*
+ 	 * Paranoid:  Set warm reset code and vector here back
+ 	 * to default values.
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 020248f2cec4..e78c6783a2de 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -175,7 +175,7 @@ void ist_begin_non_atomic(struct pt_regs *regs)
+ 	 * from double_fault.
+ 	 */
+ 	BUG_ON((unsigned long)(current_top_of_stack() -
+-			       current_stack_pointer()) >= THREAD_SIZE);
++			       current_stack_pointer) >= THREAD_SIZE);
+ 
+ 	preempt_count_sub(HARDIRQ_OFFSET);
+ }
+diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
+index fc9db6ef2a95..e0ae0a8ad5bd 100644
+--- a/arch/x86/kernel/vm86_32.c
++++ b/arch/x86/kernel/vm86_32.c
+@@ -194,7 +194,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
+ 	pte_unmap_unlock(pte, ptl);
+ out:
+ 	up_write(&mm->mmap_sem);
+-	flush_tlb();
++	flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
+ }
+ 
+ 
+diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
+index 413a7bf9efbb..64c1920515ea 100644
+--- a/arch/x86/kvm/Kconfig
++++ b/arch/x86/kvm/Kconfig
+@@ -22,7 +22,7 @@ config KVM
+ 	depends on HAVE_KVM
+ 	depends on HIGH_RES_TIMERS
+ 	# for TASKSTATS/TASK_DELAY_ACCT:
+-	depends on NET
++	depends on NET && MULTIUSER
+ 	select PREEMPT_NOTIFIERS
+ 	select MMU_NOTIFIER
+ 	select ANON_INODES
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index d877a59f8de8..dd49efe915e1 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -4480,6 +4480,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+ 	bool op_prefix = false;
+ 	bool has_seg_override = false;
+ 	struct opcode opcode;
++	u16 dummy;
++	struct desc_struct desc;
+ 
+ 	ctxt->memop.type = OP_NONE;
+ 	ctxt->memopp = NULL;
+@@ -4498,6 +4500,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+ 	switch (mode) {
+ 	case X86EMUL_MODE_REAL:
+ 	case X86EMUL_MODE_VM86:
++		def_op_bytes = def_ad_bytes = 2;
++		ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
++		if (desc.d)
++			def_op_bytes = def_ad_bytes = 4;
++		break;
+ 	case X86EMUL_MODE_PROT16:
+ 		def_op_bytes = def_ad_bytes = 2;
+ 		break;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index c5ecf85227e0..6b87d8bcdcdd 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3947,6 +3947,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
+ 		"mov %%r13, %c[r13](%[svm]) \n\t"
+ 		"mov %%r14, %c[r14](%[svm]) \n\t"
+ 		"mov %%r15, %c[r15](%[svm]) \n\t"
++#endif
++		/*
++		* Clear host registers marked as clobbered to prevent
++		* speculative use.
++		*/
++		"xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
++		"xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
++		"xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
++		"xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
++		"xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
++#ifdef CONFIG_X86_64
++		"xor %%r8, %%r8 \n\t"
++		"xor %%r9, %%r9 \n\t"
++		"xor %%r10, %%r10 \n\t"
++		"xor %%r11, %%r11 \n\t"
++		"xor %%r12, %%r12 \n\t"
++		"xor %%r13, %%r13 \n\t"
++		"xor %%r14, %%r14 \n\t"
++		"xor %%r15, %%r15 \n\t"
+ #endif
+ 		"pop %%" _ASM_BP
+ 		:
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 84f2825f19b5..ffd5502dd215 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -32,6 +32,7 @@
+ #include <linux/slab.h>
+ #include <linux/tboot.h>
+ #include <linux/hrtimer.h>
++#include <linux/nospec.h>
+ #include "kvm_cache_regs.h"
+ #include "x86.h"
+ 
+@@ -770,13 +771,18 @@ static const unsigned short vmcs_field_to_offset_table[] = {
+ 
+ static inline short vmcs_field_to_offset(unsigned long field)
+ {
+-	BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
++	const size_t size = ARRAY_SIZE(vmcs_field_to_offset_table);
++	unsigned short offset;
+ 
+-	if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
+-	    vmcs_field_to_offset_table[field] == 0)
++	BUILD_BUG_ON(size > SHRT_MAX);
++	if (field >= size)
+ 		return -ENOENT;
+ 
+-	return vmcs_field_to_offset_table[field];
++	field = array_index_nospec(field, size);
++	offset = vmcs_field_to_offset_table[field];
++	if (offset == 0)
++		return -ENOENT;
++	return offset;
+ }
+ 
+ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
+@@ -4356,7 +4362,7 @@ static int vmx_vm_has_apicv(struct kvm *kvm)
+ 	return enable_apicv && irqchip_in_kernel(kvm);
+ }
+ 
+-static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
++static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+ 	int max_irr;
+@@ -4367,19 +4373,15 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+ 	    vmx->nested.pi_pending) {
+ 		vmx->nested.pi_pending = false;
+ 		if (!pi_test_and_clear_on(vmx->nested.pi_desc))
+-			return 0;
++			return;
+ 
+ 		max_irr = find_last_bit(
+ 			(unsigned long *)vmx->nested.pi_desc->pir, 256);
+ 
+ 		if (max_irr == 256)
+-			return 0;
++			return;
+ 
+ 		vapic_page = kmap(vmx->nested.virtual_apic_page);
+-		if (!vapic_page) {
+-			WARN_ON(1);
+-			return -ENOMEM;
+-		}
+ 		__kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
+ 		kunmap(vmx->nested.virtual_apic_page);
+ 
+@@ -4390,7 +4392,6 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+ 			vmcs_write16(GUEST_INTR_STATUS, status);
+ 		}
+ 	}
+-	return 0;
+ }
+ 
+ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
+@@ -4412,14 +4413,15 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
+ 
+ 	if (is_guest_mode(vcpu) &&
+ 	    vector == vmx->nested.posted_intr_nv) {
+-		/* the PIR and ON have been set by L1. */
+-		kvm_vcpu_trigger_posted_interrupt(vcpu);
+ 		/*
+ 		 * If a posted intr is not recognized by hardware,
+ 		 * we will accomplish it in the next vmentry.
+ 		 */
+ 		vmx->nested.pi_pending = true;
+ 		kvm_make_request(KVM_REQ_EVENT, vcpu);
++		/* the PIR and ON have been set by L1. */
++		if (!kvm_vcpu_trigger_posted_interrupt(vcpu))
++			kvm_vcpu_kick(vcpu);
+ 		return 0;
+ 	}
+ 	return -1;
+@@ -4762,7 +4764,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
+ 	vmcs_writel(GUEST_SYSENTER_ESP, 0);
+ 	vmcs_writel(GUEST_SYSENTER_EIP, 0);
+ 
+-	vmcs_writel(GUEST_RFLAGS, 0x02);
++	kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
+ 	kvm_rip_write(vcpu, 0xfff0);
+ 
+ 	vmcs_writel(GUEST_GDTR_BASE, 0);
+@@ -5921,7 +5923,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
+ 		if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
+ 			return 1;
+ 
+-		err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
++		err = emulate_instruction(vcpu, 0);
+ 
+ 		if (err == EMULATE_USER_EXIT) {
+ 			++vcpu->stat.mmio_exits;
+@@ -8252,6 +8254,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ 		/* Save guest registers, load host registers, keep flags */
+ 		"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
+ 		"pop %0 \n\t"
++		"setbe %c[fail](%0)\n\t"
+ 		"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
+ 		"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
+ 		__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
+@@ -8268,12 +8271,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ 		"mov %%r13, %c[r13](%0) \n\t"
+ 		"mov %%r14, %c[r14](%0) \n\t"
+ 		"mov %%r15, %c[r15](%0) \n\t"
++		"xor %%r8d,  %%r8d \n\t"
++		"xor %%r9d,  %%r9d \n\t"
++		"xor %%r10d, %%r10d \n\t"
++		"xor %%r11d, %%r11d \n\t"
++		"xor %%r12d, %%r12d \n\t"
++		"xor %%r13d, %%r13d \n\t"
++		"xor %%r14d, %%r14d \n\t"
++		"xor %%r15d, %%r15d \n\t"
+ #endif
+ 		"mov %%cr2, %%" _ASM_AX "   \n\t"
+ 		"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
+ 
++		"xor %%eax, %%eax \n\t"
++		"xor %%ebx, %%ebx \n\t"
++		"xor %%esi, %%esi \n\t"
++		"xor %%edi, %%edi \n\t"
+ 		"pop  %%" _ASM_BP "; pop  %%" _ASM_DX " \n\t"
+-		"setbe %c[fail](%0) \n\t"
+ 		".pushsection .rodata \n\t"
+ 		".global vmx_return \n\t"
+ 		"vmx_return: " _ASM_PTR " 2b \n\t"
+@@ -8806,11 +8820,6 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
+ 		return false;
+ 	}
+ 	msr_bitmap = (unsigned long *)kmap(page);
+-	if (!msr_bitmap) {
+-		nested_release_page_clean(page);
+-		WARN_ON(1);
+-		return false;
+-	}
+ 
+ 	if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
+ 		if (nested_cpu_has_apic_reg_virt(vmcs12))
+@@ -9720,7 +9729,8 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
+ 		return 0;
+ 	}
+ 
+-	return vmx_complete_nested_posted_interrupt(vcpu);
++	vmx_complete_nested_posted_interrupt(vcpu);
++	return 0;
+ }
+ 
+ static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index e4e7d45fd551..e05cb66b575b 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4188,7 +4188,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
+ 					 addr, n, v))
+ 		    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
+ 			break;
+-		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
++		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
+ 		handled += n;
+ 		addr += n;
+ 		len -= n;
+@@ -4427,7 +4427,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
+ {
+ 	if (vcpu->mmio_read_completed) {
+ 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
+-			       vcpu->mmio_fragments[0].gpa, *(u64 *)val);
++			       vcpu->mmio_fragments[0].gpa, val);
+ 		vcpu->mmio_read_completed = 0;
+ 		return 1;
+ 	}
+@@ -4449,14 +4449,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
+ 
+ static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
+ {
+-	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
++	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
+ 	return vcpu_mmio_write(vcpu, gpa, bytes, val);
+ }
+ 
+ static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
+ 			  void *val, int bytes)
+ {
+-	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
++	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
+ 	return X86EMUL_IO_NEEDED;
+ }
+ 
+@@ -5187,7 +5187,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
+ 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ 		vcpu->run->internal.ndata = 0;
+-		r = EMULATE_FAIL;
++		r = EMULATE_USER_EXIT;
+ 	}
+ 	kvm_queue_exception(vcpu, UD_VECTOR);
+ 
+@@ -6737,7 +6737,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+ #endif
+ 
+ 	kvm_rip_write(vcpu, regs->rip);
+-	kvm_set_rflags(vcpu, regs->rflags);
++	kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
+ 
+ 	vcpu->arch.exception.pending = false;
+ 
+@@ -7927,6 +7927,13 @@ static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+ 				      sizeof(val));
+ }
+ 
++static int apf_get_user(struct kvm_vcpu *vcpu, u32 *val)
++{
++
++	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val,
++				      sizeof(u32));
++}
++
+ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+ 				     struct kvm_async_pf *work)
+ {
+@@ -7953,21 +7960,32 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+ 				 struct kvm_async_pf *work)
+ {
+ 	struct x86_exception fault;
++	u32 val;
+ 
+-	trace_kvm_async_pf_ready(work->arch.token, work->gva);
+ 	if (work->wakeup_all)
+ 		work->arch.token = ~0; /* broadcast wakeup */
+ 	else
+ 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
++	trace_kvm_async_pf_ready(work->arch.token, work->gva);
+ 
+-	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
+-	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+-		fault.vector = PF_VECTOR;
+-		fault.error_code_valid = true;
+-		fault.error_code = 0;
+-		fault.nested_page_fault = false;
+-		fault.address = work->arch.token;
+-		kvm_inject_page_fault(vcpu, &fault);
++	if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED &&
++	    !apf_get_user(vcpu, &val)) {
++		if (val == KVM_PV_REASON_PAGE_NOT_PRESENT &&
++		    vcpu->arch.exception.pending &&
++		    vcpu->arch.exception.nr == PF_VECTOR &&
++		    !apf_put_user(vcpu, 0)) {
++			vcpu->arch.exception.pending = false;
++			vcpu->arch.exception.nr = 0;
++			vcpu->arch.exception.has_error_code = false;
++			vcpu->arch.exception.error_code = 0;
++		} else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
++			fault.vector = PF_VECTOR;
++			fault.error_code_valid = true;
++			fault.error_code = 0;
++			fault.nested_page_fault = false;
++			fault.address = work->arch.token;
++			kvm_inject_page_fault(vcpu, &fault);
++		}
+ 	}
+ 	vcpu->arch.apf.halted = false;
+ 	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
+index 9bc944a91274..b7518368492a 100644
+--- a/arch/x86/lib/checksum_32.S
++++ b/arch/x86/lib/checksum_32.S
+@@ -29,7 +29,8 @@
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+-				
++#include <asm/nospec-branch.h>
++
+ /*
+  * computes a partial checksum, e.g. for TCP/UDP fragments
+  */
+@@ -159,7 +160,7 @@ ENTRY(csum_partial)
+ 	negl %ebx
+ 	lea 45f(%ebx,%ebx,2), %ebx
+ 	testl %esi, %esi
+-	jmp *%ebx
++	JMP_NOSPEC %ebx
+ 
+ 	# Handle 2-byte-aligned regions
+ 20:	addw (%esi), %ax
+@@ -446,7 +447,7 @@ ENTRY(csum_partial_copy_generic)
+ 	andl $-32,%edx
+ 	lea 3f(%ebx,%ebx), %ebx
+ 	testl %esi, %esi 
+-	jmp *%ebx
++	JMP_NOSPEC %ebx
+ 1:	addl $64,%esi
+ 	addl $64,%edi 
+ 	SRC(movb -32(%edx),%bl)	; SRC(movb (%edx),%bl)
+diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
+index 422db000d727..a744506856b1 100644
+--- a/arch/x86/lib/cmdline.c
++++ b/arch/x86/lib/cmdline.c
+@@ -82,3 +82,108 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
+ 
+ 	return 0;	/* Buffer overrun */
+ }
++
++/*
++ * Find a non-boolean option (i.e. option=argument). In accordance with
++ * standard Linux practice, if this option is repeated, this returns the
++ * last instance on the command line.
++ *
++ * @cmdline: the cmdline string
++ * @max_cmdline_size: the maximum size of cmdline
++ * @option: option string to look for
++ * @buffer: memory buffer to return the option argument
++ * @bufsize: size of the supplied memory buffer
++ *
++ * Returns the length of the argument (regardless of if it was
++ * truncated to fit in the buffer), or -1 on not found.
++ */
++static int
++__cmdline_find_option(const char *cmdline, int max_cmdline_size,
++		      const char *option, char *buffer, int bufsize)
++{
++	char c;
++	int pos = 0, len = -1;
++	const char *opptr = NULL;
++	char *bufptr = buffer;
++	enum {
++		st_wordstart = 0,	/* Start of word/after whitespace */
++		st_wordcmp,	/* Comparing this word */
++		st_wordskip,	/* Miscompare, skip */
++		st_bufcpy,	/* Copying this to buffer */
++	} state = st_wordstart;
++
++	if (!cmdline)
++		return -1;      /* No command line */
++
++	/*
++	 * This 'pos' check ensures we do not overrun
++	 * a non-NULL-terminated 'cmdline'
++	 */
++	while (pos++ < max_cmdline_size) {
++		c = *(char *)cmdline++;
++		if (!c)
++			break;
++
++		switch (state) {
++		case st_wordstart:
++			if (myisspace(c))
++				break;
++
++			state = st_wordcmp;
++			opptr = option;
++			/* fall through */
++
++		case st_wordcmp:
++			if ((c == '=') && !*opptr) {
++				/*
++				 * We matched all the way to the end of the
++				 * option we were looking for, prepare to
++				 * copy the argument.
++				 */
++				len = 0;
++				bufptr = buffer;
++				state = st_bufcpy;
++				break;
++			} else if (c == *opptr++) {
++				/*
++				 * We are currently matching, so continue
++				 * to the next character on the cmdline.
++				 */
++				break;
++			}
++			state = st_wordskip;
++			/* fall through */
++
++		case st_wordskip:
++			if (myisspace(c))
++				state = st_wordstart;
++			break;
++
++		case st_bufcpy:
++			if (myisspace(c)) {
++				state = st_wordstart;
++			} else {
++				/*
++				 * Increment len, but don't overrun the
++				 * supplied buffer and leave room for the
++				 * NULL terminator.
++				 */
++				if (++len < bufsize)
++					*bufptr++ = c;
++			}
++			break;
++		}
++	}
++
++	if (bufsize)
++		*bufptr = '\0';
++
++	return len;
++}
++
++int cmdline_find_option(const char *cmdline, const char *option, char *buffer,
++			int bufsize)
++{
++	return __cmdline_find_option(cmdline, COMMAND_LINE_SIZE, option,
++				     buffer, bufsize);
++}
+diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
+index a4512359656a..3917307fca99 100644
+--- a/arch/x86/lib/getuser.S
++++ b/arch/x86/lib/getuser.S
+@@ -40,6 +40,8 @@ ENTRY(__get_user_1)
+ 	GET_THREAD_INFO(%_ASM_DX)
+ 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ 	jae bad_get_user
++	sbb %_ASM_DX, %_ASM_DX		/* array_index_mask_nospec() */
++	and %_ASM_DX, %_ASM_AX
+ 	ASM_STAC
+ 1:	movzbl (%_ASM_AX),%edx
+ 	xor %eax,%eax
+@@ -55,6 +57,8 @@ ENTRY(__get_user_2)
+ 	GET_THREAD_INFO(%_ASM_DX)
+ 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ 	jae bad_get_user
++	sbb %_ASM_DX, %_ASM_DX		/* array_index_mask_nospec() */
++	and %_ASM_DX, %_ASM_AX
+ 	ASM_STAC
+ 2:	movzwl -1(%_ASM_AX),%edx
+ 	xor %eax,%eax
+@@ -70,6 +74,8 @@ ENTRY(__get_user_4)
+ 	GET_THREAD_INFO(%_ASM_DX)
+ 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ 	jae bad_get_user
++	sbb %_ASM_DX, %_ASM_DX		/* array_index_mask_nospec() */
++	and %_ASM_DX, %_ASM_AX
+ 	ASM_STAC
+ 3:	movl -3(%_ASM_AX),%edx
+ 	xor %eax,%eax
+@@ -86,6 +92,8 @@ ENTRY(__get_user_8)
+ 	GET_THREAD_INFO(%_ASM_DX)
+ 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ 	jae bad_get_user
++	sbb %_ASM_DX, %_ASM_DX		/* array_index_mask_nospec() */
++	and %_ASM_DX, %_ASM_AX
+ 	ASM_STAC
+ 4:	movq -7(%_ASM_AX),%rdx
+ 	xor %eax,%eax
+@@ -97,6 +105,8 @@ ENTRY(__get_user_8)
+ 	GET_THREAD_INFO(%_ASM_DX)
+ 	cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ 	jae bad_get_user_8
++	sbb %_ASM_DX, %_ASM_DX		/* array_index_mask_nospec() */
++	and %_ASM_DX, %_ASM_AX
+ 	ASM_STAC
+ 4:	movl -7(%_ASM_AX),%edx
+ 5:	movl -3(%_ASM_AX),%ecx
+diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
+index a482d105172b..d893640d5c68 100644
+--- a/arch/x86/mm/Makefile
++++ b/arch/x86/mm/Makefile
+@@ -1,5 +1,5 @@
+ obj-y	:=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
+-	    pat.o pgtable.o physaddr.o gup.o setup_nx.o
++	    pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o
+ 
+ # Make sure __phys_addr has no stackprotector
+ nostackp := $(call cc-option, -fno-stack-protector)
+@@ -9,7 +9,6 @@ CFLAGS_setup_nx.o		:= $(nostackp)
+ CFLAGS_fault.o := -I$(src)/../include/asm/trace
+ 
+ obj-$(CONFIG_X86_PAT)		+= pat_rbtree.o
+-obj-$(CONFIG_SMP)		+= tlb.o
+ 
+ obj-$(CONFIG_X86_32)		+= pgtable_32.o iomap_32.o
+ 
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index 3e1bb1c8daea..6fae65ea51d6 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -758,7 +758,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+ #endif
+ 	.cr4 = ~0UL,	/* fail hard if we screw up cr4 shadow initialization */
+ };
+-EXPORT_SYMBOL_GPL(cpu_tlbstate);
++EXPORT_PER_CPU_SYMBOL(cpu_tlbstate);
+ 
+ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
+ {
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index 70e7444c6835..5f3e167daefd 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -304,11 +304,11 @@ void iounmap(volatile void __iomem *addr)
+ 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
+ 		return;
+ 
++	mmiotrace_iounmap(addr);
++
+ 	addr = (volatile void __iomem *)
+ 		(PAGE_MASK & (unsigned long __force)addr);
+ 
+-	mmiotrace_iounmap(addr);
+-
+ 	/* Use the vm area unlocked, assuming the caller
+ 	   ensures there isn't another iounmap for the same address
+ 	   in parallel. Reuse of the virtual address is prevented by
+diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
+index ddb2244b06a1..76604c8a2a48 100644
+--- a/arch/x86/mm/kmmio.c
++++ b/arch/x86/mm/kmmio.c
+@@ -434,17 +434,18 @@ int register_kmmio_probe(struct kmmio_probe *p)
+ 	unsigned long flags;
+ 	int ret = 0;
+ 	unsigned long size = 0;
++	unsigned long addr = p->addr & PAGE_MASK;
+ 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+ 	unsigned int l;
+ 	pte_t *pte;
+ 
+ 	spin_lock_irqsave(&kmmio_lock, flags);
+-	if (get_kmmio_probe(p->addr)) {
++	if (get_kmmio_probe(addr)) {
+ 		ret = -EEXIST;
+ 		goto out;
+ 	}
+ 
+-	pte = lookup_address(p->addr, &l);
++	pte = lookup_address(addr, &l);
+ 	if (!pte) {
+ 		ret = -EINVAL;
+ 		goto out;
+@@ -453,7 +454,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
+ 	kmmio_count++;
+ 	list_add_rcu(&p->list, &kmmio_probes);
+ 	while (size < size_lim) {
+-		if (add_kmmio_fault_page(p->addr + size))
++		if (add_kmmio_fault_page(addr + size))
+ 			pr_err("Unable to set page fault.\n");
+ 		size += page_level_size(l);
+ 	}
+@@ -527,19 +528,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
+ {
+ 	unsigned long flags;
+ 	unsigned long size = 0;
++	unsigned long addr = p->addr & PAGE_MASK;
+ 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+ 	struct kmmio_fault_page *release_list = NULL;
+ 	struct kmmio_delayed_release *drelease;
+ 	unsigned int l;
+ 	pte_t *pte;
+ 
+-	pte = lookup_address(p->addr, &l);
++	pte = lookup_address(addr, &l);
+ 	if (!pte)
+ 		return;
+ 
+ 	spin_lock_irqsave(&kmmio_lock, flags);
+ 	while (size < size_lim) {
+-		release_kmmio_fault_page(p->addr + size, &release_list);
++		release_kmmio_fault_page(addr + size, &release_list);
+ 		size += page_level_size(l);
+ 	}
+ 	list_del_rcu(&p->list);
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 061e0114005e..cd6e3339b19e 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -28,6 +28,8 @@
+  *	Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
+  */
+ 
++#ifdef CONFIG_SMP
++
+ struct flush_tlb_info {
+ 	struct mm_struct *flush_mm;
+ 	unsigned long flush_start;
+@@ -153,23 +155,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
+ 	smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
+ }
+ 
+-void flush_tlb_current_task(void)
+-{
+-	struct mm_struct *mm = current->mm;
+-
+-	preempt_disable();
+-
+-	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+-
+-	/* This is an implicit full barrier that synchronizes with switch_mm. */
+-	local_flush_tlb();
+-
+-	trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
+-	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+-		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
+-	preempt_enable();
+-}
+-
+ /*
+  * See Documentation/x86/tlb.txt for details.  We choose 33
+  * because it is large enough to cover the vast majority (at
+@@ -190,6 +175,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ 	unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
+ 
+ 	preempt_disable();
++
++	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
++		base_pages_to_flush = (end - start) >> PAGE_SHIFT;
++	if (base_pages_to_flush > tlb_single_page_flush_ceiling)
++		base_pages_to_flush = TLB_FLUSH_ALL;
++
+ 	if (current->active_mm != mm) {
+ 		/* Synchronize with switch_mm. */
+ 		smp_mb();
+@@ -206,15 +197,11 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ 		goto out;
+ 	}
+ 
+-	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
+-		base_pages_to_flush = (end - start) >> PAGE_SHIFT;
+-
+ 	/*
+ 	 * Both branches below are implicit full barriers (MOV to CR or
+ 	 * INVLPG) that synchronize with switch_mm.
+ 	 */
+-	if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
+-		base_pages_to_flush = TLB_FLUSH_ALL;
++	if (base_pages_to_flush == TLB_FLUSH_ALL) {
+ 		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+ 		local_flush_tlb();
+ 	} else {
+@@ -346,3 +333,5 @@ static int __init create_tlb_single_page_flush_ceiling(void)
+ 	return 0;
+ }
+ late_initcall(create_tlb_single_page_flush_ceiling);
++
++#endif /* CONFIG_SMP */
+diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
+index 1d2e6392f5fa..f24bd7249536 100644
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -471,7 +471,7 @@ static int nmi_setup(void)
+ 		goto fail;
+ 
+ 	for_each_possible_cpu(cpu) {
+-		if (!cpu)
++		if (!IS_ENABLED(CONFIG_SMP) || !cpu)
+ 			continue;
+ 
+ 		memcpy(per_cpu(cpu_msrs, cpu).counters,
+diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
+index 55130846ac87..c0533fbc39e3 100644
+--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
++++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
+@@ -196,6 +196,7 @@ static int xo15_sci_remove(struct acpi_device *device)
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_PM_SLEEP
+ static int xo15_sci_resume(struct device *dev)
+ {
+ 	/* Enable all EC events */
+@@ -207,6 +208,7 @@ static int xo15_sci_resume(struct device *dev)
+ 
+ 	return 0;
+ }
++#endif
+ 
+ static SIMPLE_DEV_PM_OPS(xo15_sci_pm, NULL, xo15_sci_resume);
+ 
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 1ecae556d4ed..809730c09e2b 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -432,6 +432,12 @@ static void __init xen_init_cpuid_mask(void)
+ 		~((1 << X86_FEATURE_MTRR) |  /* disable MTRR */
+ 		  (1 << X86_FEATURE_ACC));   /* thermal monitoring */
+ 
++	/*
++	 * Xen PV would need some work to support PCID: CR3 handling as well
++	 * as xen_flush_tlb_others() would need updating.
++	 */
++	cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_PCID % 32));  /* disable PCID */
++
+ 	if (!xen_initial_domain())
+ 		cpuid_leaf1_edx_mask &=
+ 			~((1 << X86_FEATURE_ACPI));  /* disable ACPI */
+diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
+index b39531babec0..72bfc1cbc2b5 100644
+--- a/arch/xtensa/include/asm/futex.h
++++ b/arch/xtensa/include/asm/futex.h
+@@ -109,7 +109,6 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ 			      u32 oldval, u32 newval)
+ {
+ 	int ret = 0;
+-	u32 prev;
+ 
+ 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ 		return -EFAULT;
+@@ -120,26 +119,24 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ 
+ 	__asm__ __volatile__ (
+ 	"	# futex_atomic_cmpxchg_inatomic\n"
+-	"1:	l32i	%1, %3, 0\n"
+-	"	mov	%0, %5\n"
+-	"	wsr	%1, scompare1\n"
+-	"2:	s32c1i	%0, %3, 0\n"
+-	"3:\n"
++	"	wsr	%5, scompare1\n"
++	"1:	s32c1i	%1, %4, 0\n"
++	"	s32i	%1, %6, 0\n"
++	"2:\n"
+ 	"	.section .fixup,\"ax\"\n"
+ 	"	.align 4\n"
+-	"4:	.long	3b\n"
+-	"5:	l32r	%1, 4b\n"
+-	"	movi	%0, %6\n"
++	"3:	.long	2b\n"
++	"4:	l32r	%1, 3b\n"
++	"	movi	%0, %7\n"
+ 	"	jx	%1\n"
+ 	"	.previous\n"
+ 	"	.section __ex_table,\"a\"\n"
+-	"	.long 1b,5b,2b,5b\n"
++	"	.long 1b,4b\n"
+ 	"	.previous\n"
+-	: "+r" (ret), "=&r" (prev), "+m" (*uaddr)
+-	: "r" (uaddr), "r" (oldval), "r" (newval), "I" (-EFAULT)
++	: "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
++	: "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
+ 	: "memory");
+ 
+-	*uval = prev;
+ 	return ret;
+ }
+ 
+diff --git a/block/bio.c b/block/bio.c
+index d9cf77c6a847..f90b2abe2fa7 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1290,6 +1290,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+ 	int ret, offset;
+ 	struct iov_iter i;
+ 	struct iovec iov;
++	struct bio_vec *bvec;
+ 
+ 	iov_for_each(iov, i, *iter) {
+ 		unsigned long uaddr = (unsigned long) iov.iov_base;
+@@ -1334,7 +1335,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+ 		ret = get_user_pages_fast(uaddr, local_nr_pages,
+ 				(iter->type & WRITE) != WRITE,
+ 				&pages[cur_page]);
+-		if (ret < local_nr_pages) {
++		if (unlikely(ret < local_nr_pages)) {
++			for (j = cur_page; j < page_limit; j++) {
++				if (!pages[j])
++					break;
++				put_page(pages[j]);
++			}
+ 			ret = -EFAULT;
+ 			goto out_unmap;
+ 		}
+@@ -1396,10 +1402,8 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+ 	return bio;
+ 
+  out_unmap:
+-	for (j = 0; j < nr_pages; j++) {
+-		if (!pages[j])
+-			break;
+-		page_cache_release(pages[j]);
++	bio_for_each_segment_all(bvec, bio, j) {
++		put_page(bvec->bv_page);
+ 	}
+  out:
+ 	kfree(pages);
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index f9caf0f74199..7006dbfd39bd 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -637,5 +637,16 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
+ }
+ EXPORT_SYMBOL_GPL(ahash_attr_alg);
+ 
++bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
++{
++	struct crypto_alg *alg = &halg->base;
++
++	if (alg->cra_type != &crypto_ahash_type)
++		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
++
++	return __crypto_ahash_alg(alg)->setkey != NULL;
++}
++EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
++
+ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 4e69f3161888..35f5efb2ecff 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -160,6 +160,18 @@ void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
+ 
+ 			spawn->alg = NULL;
+ 			spawns = &inst->alg.cra_users;
++
++			/*
++			 * We may encounter an unregistered instance here, since
++			 * an instance's spawns are set up prior to the instance
++			 * being registered.  An unregistered instance will have
++			 * NULL ->cra_users.next, since ->cra_users isn't
++			 * properly initialized until registration.  But an
++			 * unregistered instance cannot have any users, so treat
++			 * it the same as ->cra_users being empty.
++			 */
++			if (spawns->next == NULL)
++				break;
+ 		}
+ 	} while ((spawns = crypto_more_spawns(alg, &stack, &top,
+ 					      &secondary_spawns)));
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 66c9e8262572..4e76f1db3e68 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -645,7 +645,8 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ 	inst->alg.finup  = cryptd_hash_finup_enqueue;
+ 	inst->alg.export = cryptd_hash_export;
+ 	inst->alg.import = cryptd_hash_import;
+-	inst->alg.setkey = cryptd_hash_setkey;
++	if (crypto_shash_alg_has_setkey(salg))
++		inst->alg.setkey = cryptd_hash_setkey;
+ 	inst->alg.digest = cryptd_hash_digest_enqueue;
+ 
+ 	err = ahash_register_instance(tmpl, inst);
+diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
+index cfb68a889ef6..cbe0135839df 100644
+--- a/crypto/mcryptd.c
++++ b/crypto/mcryptd.c
+@@ -80,6 +80,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
+ 		pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
+ 		crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
+ 		INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
++		spin_lock_init(&cpu_queue->q_lock);
+ 	}
+ 	return 0;
+ }
+@@ -103,15 +104,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
+ 	int cpu, err;
+ 	struct mcryptd_cpu_queue *cpu_queue;
+ 
+-	cpu = get_cpu();
+-	cpu_queue = this_cpu_ptr(queue->cpu_queue);
+-	rctx->tag.cpu = cpu;
++	cpu_queue = raw_cpu_ptr(queue->cpu_queue);
++	spin_lock(&cpu_queue->q_lock);
++	cpu = smp_processor_id();
++	rctx->tag.cpu = smp_processor_id();
+ 
+ 	err = crypto_enqueue_request(&cpu_queue->queue, request);
+ 	pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
+ 		 cpu, cpu_queue, request);
++	spin_unlock(&cpu_queue->q_lock);
+ 	queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
+-	put_cpu();
+ 
+ 	return err;
+ }
+@@ -164,16 +166,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
+ 	cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
+ 	i = 0;
+ 	while (i < MCRYPTD_BATCH || single_task_running()) {
+-		/*
+-		 * preempt_disable/enable is used to prevent
+-		 * being preempted by mcryptd_enqueue_request()
+-		 */
+-		local_bh_disable();
+-		preempt_disable();
++
++		spin_lock_bh(&cpu_queue->q_lock);
+ 		backlog = crypto_get_backlog(&cpu_queue->queue);
+ 		req = crypto_dequeue_request(&cpu_queue->queue);
+-		preempt_enable();
+-		local_bh_enable();
++		spin_unlock_bh(&cpu_queue->q_lock);
+ 
+ 		if (!req) {
+ 			mcryptd_opportunistic_flush();
+@@ -188,7 +185,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
+ 		++i;
+ 	}
+ 	if (cpu_queue->queue.qlen)
+-		queue_work(kcrypto_wq, &cpu_queue->work);
++		queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
+ }
+ 
+ void mcryptd_flusher(struct work_struct *__work)
+diff --git a/drivers/Makefile b/drivers/Makefile
+index d7407f0b0d3b..bffce51498df 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -93,6 +93,7 @@ obj-$(CONFIG_TC)		+= tc/
+ obj-$(CONFIG_UWB)		+= uwb/
+ obj-$(CONFIG_USB_PHY)		+= usb/
+ obj-$(CONFIG_USB)		+= usb/
++obj-$(CONFIG_USB_SUPPORT)	+= usb/
+ obj-$(CONFIG_PCI)		+= usb/
+ obj-$(CONFIG_USB_GADGET)	+= usb/
+ obj-$(CONFIG_OF)		+= usb/
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 568f2b942aac..0272d53d5bcb 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -315,15 +315,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
+ 		pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
+ 
+ 		pr->pblk = object.processor.pblk_address;
+-
+-		/*
+-		 * We don't care about error returns - we just try to mark
+-		 * these reserved so that nobody else is confused into thinking
+-		 * that this region might be unused..
+-		 *
+-		 * (In particular, allocating the IO range for Cardbus)
+-		 */
+-		request_region(pr->throttling.address, 6, "ACPI CPU throttle");
+ 	}
+ 
+ 	/*
+diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
+index ed65e9c4b5b0..ba4930c0e98c 100644
+--- a/drivers/acpi/apei/erst.c
++++ b/drivers/acpi/apei/erst.c
+@@ -1023,7 +1023,7 @@ skip:
+ 	/* The record may be cleared by others, try read next record */
+ 	if (len == -ENOENT)
+ 		goto skip;
+-	else if (len < sizeof(*rcd)) {
++	else if (len < 0 || len < sizeof(*rcd)) {
+ 		rc = -EIO;
+ 		goto out;
+ 	}
+diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
+index 84243c32e29c..f3df4b5e5fc9 100644
+--- a/drivers/acpi/processor_throttling.c
++++ b/drivers/acpi/processor_throttling.c
+@@ -680,6 +680,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
+ 	if (!pr->flags.throttling)
+ 		return -ENODEV;
+ 
++	/*
++	 * We don't care about error returns - we just try to mark
++	 * these reserved so that nobody else is confused into thinking
++	 * that this region might be unused..
++	 *
++	 * (In particular, allocating the IO range for Cardbus)
++	 */
++	request_region(pr->throttling.address, 6, "ACPI CPU throttle");
++
+ 	pr->throttling.state = 0;
+ 
+ 	duty_mask = pr->throttling.state_count - 1;
+diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
+index bf034f8b7c1a..030ab2f543df 100644
+--- a/drivers/acpi/sbshc.c
++++ b/drivers/acpi/sbshc.c
+@@ -309,8 +309,8 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
+ 	device->driver_data = hc;
+ 
+ 	acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
+-	printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n",
+-		hc->ec, hc->offset, hc->query_bit);
++	dev_info(&device->dev, "SBS HC: offset = 0x%0x, query_bit = 0x%0x\n",
++		 hc->offset, hc->query_bit);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 235ba1fbabdb..b834278c0c4d 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2618,6 +2618,8 @@ static unsigned int binder_poll(struct file *filp,
+ 	binder_lock(__func__);
+ 
+ 	thread = binder_get_thread(proc);
++	if (!thread)
++		return POLLERR;
+ 
+ 	wait_for_proc_work = thread->transaction_stack == NULL &&
+ 		list_empty(&thread->todo) && thread->return_error == BR_OK;
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 34825d63d483..3b0cebb2122b 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -388,6 +388,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
+ 	{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
+ 	{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
++	{ PCI_VDEVICE(INTEL, 0x0f22), board_ahci }, /* Bay Trail AHCI */
++	{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci }, /* Bay Trail AHCI */
++	{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci }, /* Cherry Trail AHCI */
++	{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci }, /* Apollo Lake AHCI */
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 4c0dac27882f..b31d6853ba7a 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4145,6 +4145,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ 	 */
+ 	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
++	{ "LITEON EP1-*",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
+ 
+ 	/* Devices we expect to fail diagnostics */
+ 
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index fdba441457ec..3f5fb95b0f4c 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -106,6 +106,9 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
+ 	unsigned int index;
+ 	int ret;
+ 
++	if (this_cpu_ci->cpu_map_populated)
++		return 0;
++
+ 	ret = cache_setup_of_node(cpu);
+ 	if (ret)
+ 		return ret;
+diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
+index a311cfa4c5bd..a6975795e7f3 100644
+--- a/drivers/base/power/trace.c
++++ b/drivers/base/power/trace.c
+@@ -166,14 +166,14 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
+ }
+ EXPORT_SYMBOL(generate_pm_trace);
+ 
+-extern char __tracedata_start, __tracedata_end;
++extern char __tracedata_start[], __tracedata_end[];
+ static int show_file_hash(unsigned int value)
+ {
+ 	int match;
+ 	char *tracedata;
+ 
+ 	match = 0;
+-	for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
++	for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
+ 			tracedata += 2 + sizeof(unsigned long)) {
+ 		unsigned short lineno = *(unsigned short *)tracedata;
+ 		const char *file = *(const char **)(tracedata + 2);
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index ea0c863861b9..b5dbce192c6b 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1338,9 +1338,8 @@ out:
+ 	return err;
+ }
+ 
+-static void lo_release(struct gendisk *disk, fmode_t mode)
++static void __lo_release(struct loop_device *lo)
+ {
+-	struct loop_device *lo = disk->private_data;
+ 	int err;
+ 
+ 	mutex_lock(&lo->lo_ctl_mutex);
+@@ -1368,6 +1367,13 @@ out:
+ 	mutex_unlock(&lo->lo_ctl_mutex);
+ }
+ 
++static void lo_release(struct gendisk *disk, fmode_t mode)
++{
++	mutex_lock(&loop_index_mutex);
++	__lo_release(disk->private_data);
++	mutex_unlock(&loop_index_mutex);
++}
++
+ static const struct block_device_operations lo_fops = {
+ 	.owner =	THIS_MODULE,
+ 	.open =		lo_open,
+diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
+index 09e628dafd9d..46098d236476 100644
+--- a/drivers/block/pktcdvd.c
++++ b/drivers/block/pktcdvd.c
+@@ -2798,7 +2798,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
+ 	pd->pkt_dev = MKDEV(pktdev_major, idx);
+ 	ret = pkt_new_dev(pd, dev);
+ 	if (ret)
+-		goto out_new_dev;
++		goto out_mem2;
+ 
+ 	/* inherit events of the host device */
+ 	disk->events = pd->bdev->bd_disk->events;
+@@ -2816,8 +2816,6 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
+ 	mutex_unlock(&ctl_mutex);
+ 	return 0;
+ 
+-out_new_dev:
+-	blk_cleanup_queue(disk->queue);
+ out_mem2:
+ 	put_disk(disk);
+ out_mem:
+diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
+index 83f6437dd91d..1481a3c9b5af 100644
+--- a/drivers/bluetooth/btsdio.c
++++ b/drivers/bluetooth/btsdio.c
+@@ -31,6 +31,7 @@
+ #include <linux/errno.h>
+ #include <linux/skbuff.h>
+ 
++#include <linux/mmc/host.h>
+ #include <linux/mmc/sdio_ids.h>
+ #include <linux/mmc/sdio_func.h>
+ 
+@@ -303,6 +304,14 @@ static int btsdio_probe(struct sdio_func *func,
+ 		tuple = tuple->next;
+ 	}
+ 
++	/* BCM43341 devices soldered onto the PCB (non-removable) use an
++	 * uart connection for bluetooth, ignore the BT SDIO interface.
++	 */
++	if (func->vendor == SDIO_VENDOR_ID_BROADCOM &&
++	    func->device == SDIO_DEVICE_ID_BROADCOM_43341 &&
++	    !mmc_card_is_removable(func->card->host))
++		return -ENODEV;
++
+ 	data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL);
+ 	if (!data)
+ 		return -ENOMEM;
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index 5643b65cee20..0ff7682cfb14 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -238,7 +238,10 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
+ 			goto out;
+ 		}
+ 
+-		mutex_lock(&reading_mutex);
++		if (mutex_lock_interruptible(&reading_mutex)) {
++			err = -ERESTARTSYS;
++			goto out_put;
++		}
+ 		if (!data_avail) {
+ 			bytes_read = rng_get_data(rng, rng_buffer,
+ 				rng_buffer_size(),
+@@ -288,6 +291,7 @@ out:
+ 
+ out_unlock_reading:
+ 	mutex_unlock(&reading_mutex);
++out_put:
+ 	put_rng(rng);
+ 	goto out;
+ }
+diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
+index 659879a56dba..949610360b14 100644
+--- a/drivers/cpufreq/Kconfig
++++ b/drivers/cpufreq/Kconfig
+@@ -236,6 +236,7 @@ endif
+ if MIPS
+ config LOONGSON2_CPUFREQ
+ 	tristate "Loongson2 CPUFreq Driver"
++	depends on LEMOTE_MACH2F
+ 	help
+ 	  This option adds a CPUFreq driver for loongson processors which
+ 	  support software configurable cpu frequency.
+@@ -248,6 +249,7 @@ config LOONGSON2_CPUFREQ
+ 
+ config LOONGSON1_CPUFREQ
+ 	tristate "Loongson1 CPUFreq Driver"
++	depends on LOONGSON1_LS1B
+ 	help
+ 	  This option adds a CPUFreq driver for loongson1 processors which
+ 	  support software configurable cpu frequency.
+diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
+index 3442764a5293..544e0e330afd 100644
+--- a/drivers/cpuidle/cpuidle-powernv.c
++++ b/drivers/cpuidle/cpuidle-powernv.c
+@@ -153,6 +153,24 @@ static int powernv_cpuidle_driver_init(void)
+ 		drv->state_count += 1;
+ 	}
+ 
++	/*
++	 * On the PowerNV platform cpu_present may be less than cpu_possible in
++	 * cases when firmware detects the CPU, but it is not available to the
++	 * OS.  If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at
++	 * run time and hence cpu_devices are not created for those CPUs by the
++	 * generic topology_init().
++	 *
++	 * drv->cpumask defaults to cpu_possible_mask in
++	 * __cpuidle_driver_init().  This breaks cpuidle on PowerNV where
++	 * cpu_devices are not created for CPUs in cpu_possible_mask that
++	 * cannot be hot-added later at run time.
++	 *
++	 * Trying cpuidle_register_device() on a CPU without a cpu_device is
++	 * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
++	 */
++
++	drv->cpumask = (struct cpumask *)cpu_present_mask;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
+index 832a2c3f01ff..9e98a5fbbc1d 100644
+--- a/drivers/cpuidle/sysfs.c
++++ b/drivers/cpuidle/sysfs.c
+@@ -613,6 +613,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
+ 	struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
+ 	int error;
+ 
++	/*
++	 * Return if cpu_device is not setup for this CPU.
++	 *
++	 * This could happen if the arch did not set up cpu_device
++	 * since this CPU is not in cpu_present mask and the
++	 * driver did not send a correct CPU mask during registration.
++	 * Without this check we would end up passing bogus
++	 * value for &cpu_dev->kobj in kobject_init_and_add()
++	 */
++	if (!cpu_dev)
++		return -ENODEV;
++
+ 	kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ 	if (!kdev)
+ 		return -ENOMEM;
+diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
+index bac0bdeb4b5f..b6529b9fcbe2 100644
+--- a/drivers/crypto/amcc/crypto4xx_core.h
++++ b/drivers/crypto/amcc/crypto4xx_core.h
+@@ -32,12 +32,12 @@
+ #define PPC405EX_CE_RESET                       0x00000008
+ 
+ #define CRYPTO4XX_CRYPTO_PRIORITY		300
+-#define PPC4XX_LAST_PD				63
+-#define PPC4XX_NUM_PD				64
+-#define PPC4XX_LAST_GD				1023
++#define PPC4XX_NUM_PD				256
++#define PPC4XX_LAST_PD				(PPC4XX_NUM_PD - 1)
+ #define PPC4XX_NUM_GD				1024
+-#define PPC4XX_LAST_SD				63
+-#define PPC4XX_NUM_SD				64
++#define PPC4XX_LAST_GD				(PPC4XX_NUM_GD - 1)
++#define PPC4XX_NUM_SD				256
++#define PPC4XX_LAST_SD				(PPC4XX_NUM_SD - 1)
+ #define PPC4XX_SD_BUFFER_SIZE			2048
+ 
+ #define PD_ENTRY_INUSE				1
+diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
+index 10a9aeff1666..32035daae8c9 100644
+--- a/drivers/crypto/n2_core.c
++++ b/drivers/crypto/n2_core.c
+@@ -1641,6 +1641,7 @@ static int queue_cache_init(void)
+ 					  CWQ_ENTRY_SIZE, 0, NULL);
+ 	if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
+ 		kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
++		queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
+ 		return -ENOMEM;
+ 	}
+ 	return 0;
+@@ -1650,6 +1651,8 @@ static void queue_cache_destroy(void)
+ {
+ 	kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
+ 	kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
++	queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
++	queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
+ }
+ 
+ static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
+diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
+index 4f0c4a3cc5c5..89219806fd8e 100644
+--- a/drivers/crypto/s5p-sss.c
++++ b/drivers/crypto/s5p-sss.c
+@@ -419,16 +419,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
+ 	uint32_t                    aes_control;
+ 	int                         err;
+ 	unsigned long               flags;
++	u8 *iv;
+ 
+ 	aes_control = SSS_AES_KEY_CHANGE_MODE;
+ 	if (mode & FLAGS_AES_DECRYPT)
+ 		aes_control |= SSS_AES_MODE_DECRYPT;
+ 
+-	if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
++	if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
+ 		aes_control |= SSS_AES_CHAIN_MODE_CBC;
+-	else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
++		iv = req->info;
++	} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
+ 		aes_control |= SSS_AES_CHAIN_MODE_CTR;
+-
++		iv = req->info;
++	} else {
++		iv = NULL; /* AES_ECB */
++	}
+ 	if (dev->ctx->keylen == AES_KEYSIZE_192)
+ 		aes_control |= SSS_AES_KEY_SIZE_192;
+ 	else if (dev->ctx->keylen == AES_KEYSIZE_256)
+@@ -458,7 +463,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
+ 		goto outdata_error;
+ 
+ 	SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
+-	s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
++	s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
+ 
+ 	s5p_set_dma_indata(dev,  req->src);
+ 	s5p_set_dma_outdata(dev, req->dst);
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index ca1b362d77e2..3373561caae8 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -584,7 +584,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev,
+ 	devfreq = devfreq_add_device(dev, profile, governor_name, data);
+ 	if (IS_ERR(devfreq)) {
+ 		devres_free(ptr);
+-		return ERR_PTR(-ENOMEM);
++		return devfreq;
+ 	}
+ 
+ 	*ptr = devfreq;
+diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
+index 7638b24ce8d0..35fc58f4bf4b 100644
+--- a/drivers/dma/dma-jz4740.c
++++ b/drivers/dma/dma-jz4740.c
+@@ -557,7 +557,7 @@ static int jz4740_dma_probe(struct platform_device *pdev)
+ 
+ 	ret = dma_async_device_register(dd);
+ 	if (ret)
+-		return ret;
++		goto err_clk;
+ 
+ 	irq = platform_get_irq(pdev, 0);
+ 	ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev);
+@@ -570,6 +570,8 @@ static int jz4740_dma_probe(struct platform_device *pdev)
+ 
+ err_unregister:
+ 	dma_async_device_unregister(dd);
++err_clk:
++	clk_disable_unprepare(dmadev->clk);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index 2e9bc49d30ec..5e4fe755a4d8 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -329,7 +329,7 @@ static void dmatest_callback(void *arg)
+ {
+ 	struct dmatest_done *done = arg;
+ 	struct dmatest_thread *thread =
+-		container_of(arg, struct dmatest_thread, done_wait);
++		container_of(done, struct dmatest_thread, test_done);
+ 	if (!thread->done) {
+ 		done->done = true;
+ 		wake_up_all(done->wait);
+diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
+index cda6dab5067a..6b65a102b49d 100644
+--- a/drivers/edac/octeon_edac-lmc.c
++++ b/drivers/edac/octeon_edac-lmc.c
+@@ -79,6 +79,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
+ 	if (!pvt->inject)
+ 		int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+ 	else {
++		int_reg.u64 = 0;
+ 		if (pvt->error_type == 1)
+ 			int_reg.s.sec_err = 1;
+ 		if (pvt->error_type == 2)
+diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
+index 14945fd9d5e1..935fa3bce6d0 100644
+--- a/drivers/gpio/gpio-intel-mid.c
++++ b/drivers/gpio/gpio-intel-mid.c
+@@ -326,7 +326,7 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
+ 	}
+ }
+ 
+-static int intel_gpio_runtime_idle(struct device *dev)
++static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
+ {
+ 	int err = pm_schedule_suspend(dev, 500);
+ 	return err ?: -EBUSY;
+diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
+index 2ed0237a8baf..304e68633d29 100644
+--- a/drivers/gpio/gpio-iop.c
++++ b/drivers/gpio/gpio-iop.c
+@@ -129,3 +129,7 @@ static int __init iop3xx_gpio_init(void)
+ 	return platform_driver_register(&iop3xx_gpio_driver);
+ }
+ arch_initcall(iop3xx_gpio_init);
++
++MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
++MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
+index 18a8182d4fec..7f1f32324504 100644
+--- a/drivers/gpio/gpio-xgene.c
++++ b/drivers/gpio/gpio-xgene.c
+@@ -42,9 +42,7 @@ struct xgene_gpio {
+ 	struct gpio_chip	chip;
+ 	void __iomem		*base;
+ 	spinlock_t		lock;
+-#ifdef CONFIG_PM
+ 	u32			set_dr_val[XGENE_MAX_GPIO_BANKS];
+-#endif
+ };
+ 
+ static inline struct xgene_gpio *to_xgene_gpio(struct gpio_chip *chip)
+@@ -132,8 +130,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_PM
+-static int xgene_gpio_suspend(struct device *dev)
++static __maybe_unused int xgene_gpio_suspend(struct device *dev)
+ {
+ 	struct xgene_gpio *gpio = dev_get_drvdata(dev);
+ 	unsigned long bank_offset;
+@@ -146,7 +143,7 @@ static int xgene_gpio_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
+-static int xgene_gpio_resume(struct device *dev)
++static __maybe_unused int xgene_gpio_resume(struct device *dev)
+ {
+ 	struct xgene_gpio *gpio = dev_get_drvdata(dev);
+ 	unsigned long bank_offset;
+@@ -160,10 +157,6 @@ static int xgene_gpio_resume(struct device *dev)
+ }
+ 
+ static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
+-#define XGENE_GPIO_PM_OPS	(&xgene_gpio_pm)
+-#else
+-#define XGENE_GPIO_PM_OPS	NULL
+-#endif
+ 
+ static int xgene_gpio_probe(struct platform_device *pdev)
+ {
+@@ -230,7 +223,7 @@ static struct platform_driver xgene_gpio_driver = {
+ 	.driver = {
+ 		.name = "xgene-gpio",
+ 		.of_match_table = xgene_gpio_of_match,
+-		.pm     = XGENE_GPIO_PM_OPS,
++		.pm     = &xgene_gpio_pm,
+ 	},
+ 	.probe = xgene_gpio_probe,
+ 	.remove = xgene_gpio_remove,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 530b82c4e78b..7c736e8d7f33 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -189,6 +189,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
+ 
+ 	switch (type) {
+ 	case KFD_QUEUE_TYPE_SDMA:
++		if (dev->dqm->queue_count >=
++			CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
++			pr_err("Over-subscription is not allowed for SDMA.\n");
++			retval = -EPERM;
++			goto err_create_queue;
++		}
++
++		retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
++		if (retval != 0)
++			goto err_create_queue;
++		pqn->q = q;
++		pqn->kq = NULL;
++		retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
++						&q->properties.vmid);
++		pr_debug("DQM returned %d for create_queue\n", retval);
++		print_queue(q);
++		break;
++
+ 	case KFD_QUEUE_TYPE_COMPUTE:
+ 		/* check if there is over subscription */
+ 		if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+index d4813e03f5ee..00275c3856ce 100644
+--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+@@ -821,14 +821,18 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
+ 	struct drm_device *dev = dsi_config->dev;
+ 	struct drm_psb_private *dev_priv = dev->dev_private;
+ 	int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
+-
+ 	u32 pipeconf_reg = PIPEACONF;
+ 	u32 dspcntr_reg = DSPACNTR;
++	u32 pipeconf, dspcntr;
+ 
+-	u32 pipeconf = dev_priv->pipeconf[pipe];
+-	u32 dspcntr = dev_priv->dspcntr[pipe];
+ 	u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
+ 
++	if (WARN_ON(pipe < 0))
++		return;
++
++	pipeconf = dev_priv->pipeconf[pipe];
++	dspcntr = dev_priv->dspcntr[pipe];
++
+ 	if (pipe) {
+ 		pipeconf_reg = PIPECCONF;
+ 		dspcntr_reg = DSPCCNTR;
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+index 89f705c3a5eb..910a2f253990 100644
+--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+@@ -382,16 +382,6 @@ static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
+ 	return MODE_OK;
+ }
+ 
+-static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
+-{
+-	if (mode == connector->dpms)
+-		return;
+-
+-	/*first, execute dpms*/
+-
+-	drm_helper_connector_dpms(connector, mode);
+-}
+-
+ static struct drm_encoder *mdfld_dsi_connector_best_encoder(
+ 				struct drm_connector *connector)
+ {
+@@ -404,7 +394,7 @@ static struct drm_encoder *mdfld_dsi_connector_best_encoder(
+ 
+ /*DSI connector funcs*/
+ static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
+-	.dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
++	.dpms = drm_helper_connector_dpms,
+ 	.save = mdfld_dsi_connector_save,
+ 	.restore = mdfld_dsi_connector_restore,
+ 	.detect = mdfld_dsi_connector_detect,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 58c959265b1a..36000f76e31d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -368,7 +368,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
+ 	struct nouveau_cli *cli = nouveau_cli(file_priv);
+ 	struct drm_device *dev = chan->drm->dev;
+ 	int trycnt = 0;
+-	int ret, i;
++	int ret = -EINVAL, i;
+ 	struct nouveau_bo *res_bo = NULL;
+ 	LIST_HEAD(gart_list);
+ 	LIST_HEAD(vram_list);
+diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+index 042038e8a662..6e6634cd1d17 100644
+--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+@@ -610,7 +610,8 @@ static int omap_dmm_probe(struct platform_device *dev)
+ 		match = of_match_node(dmm_of_match, dev->dev.of_node);
+ 		if (!match) {
+ 			dev_err(&dev->dev, "failed to find matching device node\n");
+-			return -ENODEV;
++			ret = -ENODEV;
++			goto fail;
+ 		}
+ 
+ 		omap_dmm->plat_data = match->data;
+diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
+index 6edcb5485092..b35ebabd6a9f 100644
+--- a/drivers/gpu/drm/radeon/radeon_uvd.c
++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
+@@ -946,7 +946,7 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
+ 		/* calc dclk divider with current vco freq */
+ 		dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
+ 							 pd_min, pd_even);
+-		if (vclk_div > pd_max)
++		if (dclk_div > pd_max)
+ 			break; /* vco is too big, it has to stop */
+ 
+ 		/* calc score with current vco freq */
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index 824c835330df..de155c77f739 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -511,7 +511,7 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
+ 	status = rcar_du_crtc_read(rcrtc, DSSR);
+ 	rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
+ 
+-	if (status & DSSR_FRM) {
++	if (status & DSSR_VBK) {
+ 		drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
+ 		rcar_du_crtc_finish_page_flip(rcrtc);
+ 		ret = IRQ_HANDLED;
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index d786b48f5d7b..d8638d8221ea 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2010,6 +2010,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+@@ -2349,6 +2350,9 @@ static const struct hid_device_id hid_ignore_list[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index e995058ad264..62b337d61fe1 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -559,6 +559,9 @@
+ #define USB_DEVICE_ID_LD_MICROCASSYTIME		0x1033
+ #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE	0x1035
+ #define USB_DEVICE_ID_LD_MICROCASSYPH		0x1038
++#define USB_DEVICE_ID_LD_POWERANALYSERCASSY	0x1040
++#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY	0x1042
++#define USB_DEVICE_ID_LD_MACHINETESTCASSY	0x1043
+ #define USB_DEVICE_ID_LD_JWM		0x1080
+ #define USB_DEVICE_ID_LD_DMMP		0x1081
+ #define USB_DEVICE_ID_LD_UMIP		0x1090
+@@ -1011,6 +1014,7 @@
+ 
+ #define USB_VENDOR_ID_XIN_MO			0x16c0
+ #define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE	0x05e1
++#define USB_DEVICE_ID_THT_2P_ARCADE		0x75e1
+ 
+ #define USB_VENDOR_ID_XIROKU		0x1477
+ #define USB_DEVICE_ID_XIROKU_SPX	0x1006
+diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
+index 7df5227a7e61..9ad7731d2e10 100644
+--- a/drivers/hid/hid-xinmo.c
++++ b/drivers/hid/hid-xinmo.c
+@@ -46,6 +46,7 @@ static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
+ 
+ static const struct hid_device_id xinmo_devices[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
+ 	{ }
+ };
+ 
+diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
+index cccef87963e0..975c43d446f8 100644
+--- a/drivers/hwmon/asus_atk0110.c
++++ b/drivers/hwmon/asus_atk0110.c
+@@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
+ 		else
+ 			err = atk_read_value_new(sensor, value);
+ 
++		if (err)
++			return err;
++
+ 		sensor->is_valid = true;
+ 		sensor->last_updated = jiffies;
+ 		sensor->cached_value = *value;
+diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
+index f2e47c7dd808..1362de353076 100644
+--- a/drivers/hwmon/pmbus/pmbus_core.c
++++ b/drivers/hwmon/pmbus/pmbus_core.c
+@@ -20,6 +20,7 @@
+  */
+ 
+ #include <linux/kernel.h>
++#include <linux/math64.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
+ #include <linux/err.h>
+@@ -476,8 +477,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
+ static long pmbus_reg2data_direct(struct pmbus_data *data,
+ 				  struct pmbus_sensor *sensor)
+ {
+-	long val = (s16) sensor->data;
+-	long m, b, R;
++	s64 b, val = (s16)sensor->data;
++	s32 m, R;
+ 
+ 	m = data->info->m[sensor->class];
+ 	b = data->info->b[sensor->class];
+@@ -505,11 +506,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
+ 		R--;
+ 	}
+ 	while (R < 0) {
+-		val = DIV_ROUND_CLOSEST(val, 10);
++		val = div_s64(val + 5LL, 10L);  /* round closest */
+ 		R++;
+ 	}
+ 
+-	return (val - b) / m;
++	val = div_s64(val - b, m);
++	return clamp_val(val, LONG_MIN, LONG_MAX);
+ }
+ 
+ /*
+@@ -621,7 +623,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
+ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
+ 				 struct pmbus_sensor *sensor, long val)
+ {
+-	long m, b, R;
++	s64 b, val64 = val;
++	s32 m, R;
+ 
+ 	m = data->info->m[sensor->class];
+ 	b = data->info->b[sensor->class];
+@@ -638,18 +641,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
+ 		R -= 3;		/* Adjust R and b for data in milli-units */
+ 		b *= 1000;
+ 	}
+-	val = val * m + b;
++	val64 = val64 * m + b;
+ 
+ 	while (R > 0) {
+-		val *= 10;
++		val64 *= 10;
+ 		R--;
+ 	}
+ 	while (R < 0) {
+-		val = DIV_ROUND_CLOSEST(val, 10);
++		val64 = div_s64(val64 + 5LL, 10L);  /* round closest */
+ 		R++;
+ 	}
+ 
+-	return val;
++	return (u16)clamp_val(val64, S16_MIN, S16_MAX);
+ }
+ 
+ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
+diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
+index 90e322959303..42c25aed671d 100644
+--- a/drivers/i2c/i2c-boardinfo.c
++++ b/drivers/i2c/i2c-boardinfo.c
+@@ -56,9 +56,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
+  * The board info passed can safely be __initdata, but be careful of embedded
+  * pointers (for platform_data, functions, etc) since that won't be copied.
+  */
+-int __init
+-i2c_register_board_info(int busnum,
+-	struct i2c_board_info const *info, unsigned len)
++int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
+ {
+ 	int status;
+ 
+diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
+index 1c626a3cc7f2..f3a64a45c512 100644
+--- a/drivers/iio/adc/axp288_adc.c
++++ b/drivers/iio/adc/axp288_adc.c
+@@ -44,7 +44,7 @@ struct axp288_adc_info {
+ 	struct regmap *regmap;
+ };
+ 
+-static const struct iio_chan_spec const axp288_adc_channels[] = {
++static const struct iio_chan_spec axp288_adc_channels[] = {
+ 	{
+ 		.indexed = 1,
+ 		.type = IIO_TEMP,
+diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
+index f53e9a803a0e..93b99bd93738 100644
+--- a/drivers/iio/imu/adis_trigger.c
++++ b/drivers/iio/imu/adis_trigger.c
+@@ -47,6 +47,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
+ 	if (adis->trig == NULL)
+ 		return -ENOMEM;
+ 
++	adis->trig->dev.parent = &adis->spi->dev;
++	adis->trig->ops = &adis_trigger_ops;
++	iio_trigger_set_drvdata(adis->trig, adis);
++
+ 	ret = request_irq(adis->spi->irq,
+ 			  &iio_trigger_generic_data_rdy_poll,
+ 			  IRQF_TRIGGER_RISING,
+@@ -55,9 +59,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
+ 	if (ret)
+ 		goto error_free_trig;
+ 
+-	adis->trig->dev.parent = &adis->spi->dev;
+-	adis->trig->ops = &adis_trigger_ops;
+-	iio_trigger_set_drvdata(adis->trig, adis);
+ 	ret = iio_trigger_register(adis->trig);
+ 
+ 	indio_dev->trig = iio_trigger_get(adis->trig);
+diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
+index 7fa280b28ecb..ec6b26f008d9 100644
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -150,7 +150,7 @@ unsigned int iio_buffer_poll(struct file *filp,
+ 	struct iio_dev *indio_dev = filp->private_data;
+ 	struct iio_buffer *rb = indio_dev->buffer;
+ 
+-	if (!indio_dev->info)
++	if (!indio_dev->info || rb == NULL)
+ 		return 0;
+ 
+ 	poll_wait(filp, &rb->pollq, wait);
+diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
+index 68ddb3710215..c1e8c01f4ab3 100644
+--- a/drivers/infiniband/hw/cxgb4/cq.c
++++ b/drivers/infiniband/hw/cxgb4/cq.c
+@@ -581,10 +581,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
+ 			ret = -EAGAIN;
+ 			goto skip_cqe;
+ 		}
+-		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
++		if (unlikely(!CQE_STATUS(hw_cqe) &&
++			     CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
+ 			t4_set_wq_in_error(wq);
+-			hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
+-			goto proc_cqe;
++			hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
+ 		}
+ 		goto proc_cqe;
+ 	}
+diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
+index 1563ee64a180..640bb7360537 100644
+--- a/drivers/infiniband/hw/mlx4/main.c
++++ b/drivers/infiniband/hw/mlx4/main.c
+@@ -2436,9 +2436,8 @@ err_steer_free_bitmap:
+ 	kfree(ibdev->ib_uc_qpns_bitmap);
+ 
+ err_steer_qp_release:
+-	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
+-		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+-				      ibdev->steer_qpn_count);
++	mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
++			      ibdev->steer_qpn_count);
+ err_counter:
+ 	for (; i; --i)
+ 		if (ibdev->counters[i - 1] != -1)
+@@ -2540,11 +2539,9 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
+ 		ibdev->iboe.nb.notifier_call = NULL;
+ 	}
+ 
+-	if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+-		mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
+-				      ibdev->steer_qpn_count);
+-		kfree(ibdev->ib_uc_qpns_bitmap);
+-	}
++	mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
++			      ibdev->steer_qpn_count);
++	kfree(ibdev->ib_uc_qpns_bitmap);
+ 
+ 	if (ibdev->iboe.nb_inet.notifier_call) {
+ 		if (unregister_inetaddr_notifier(&ibdev->iboe.nb_inet))
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 416cd07ab87a..6c30192dcb78 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -958,8 +958,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+ 		return -ENOMEM;
+ 
+ 	attr->qp_state = IB_QPS_INIT;
+-	attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
+-	    IB_ACCESS_REMOTE_WRITE;
++	attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
+ 	attr->port_num = ch->sport->port;
+ 	attr->pkey_index = 0;
+ 
+diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
+index 4e491c1762cf..5c4f7f8f2c20 100644
+--- a/drivers/input/keyboard/tca8418_keypad.c
++++ b/drivers/input/keyboard/tca8418_keypad.c
+@@ -164,11 +164,18 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
+ 	int error, col, row;
+ 	u8 reg, state, code;
+ 
+-	/* Initial read of the key event FIFO */
+-	error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
++	do {
++		error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
++		if (error < 0) {
++			dev_err(&keypad_data->client->dev,
++				"unable to read REG_KEY_EVENT_A\n");
++			break;
++		}
++
++		/* Assume that key code 0 signifies empty FIFO */
++		if (reg <= 0)
++			break;
+ 
+-	/* Assume that key code 0 signifies empty FIFO */
+-	while (error >= 0 && reg > 0) {
+ 		state = reg & KEY_EVENT_VALUE;
+ 		code  = reg & KEY_EVENT_CODE;
+ 
+@@ -184,11 +191,7 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
+ 
+ 		/* Read for next loop */
+ 		error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+-	}
+-
+-	if (error < 0)
+-		dev_err(&keypad_data->client->dev,
+-			"unable to read REG_KEY_EVENT_A\n");
++	} while (1);
+ 
+ 	input_sync(input);
+ }
+diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
+index 0e0d094df2e6..6caeb1a2670c 100644
+--- a/drivers/input/misc/twl6040-vibra.c
++++ b/drivers/input/misc/twl6040-vibra.c
+@@ -262,7 +262,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
+ 	int vddvibr_uV = 0;
+ 	int error;
+ 
+-	twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
++	twl6040_core_node = of_get_child_by_name(twl6040_core_dev->of_node,
+ 						 "vibra");
+ 	if (!twl6040_core_node) {
+ 		dev_err(&pdev->dev, "parent of node is missing?\n");
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index c3c5d492cba0..07ce8f4314ba 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1550,7 +1550,7 @@ static int elantech_set_properties(struct elantech_data *etd)
+ 		case 5:
+ 			etd->hw_version = 3;
+ 			break;
+-		case 6 ... 14:
++		case 6 ... 15:
+ 			etd->hw_version = 4;
+ 			break;
+ 		default:
+diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
+index 7e2dc5e56632..0b49f29bf0da 100644
+--- a/drivers/input/mouse/trackpoint.c
++++ b/drivers/input/mouse/trackpoint.c
+@@ -383,6 +383,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+ 	if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
+ 		psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+ 		button_info = 0x33;
++	} else if (!button_info) {
++		psmouse_warn(psmouse, "got 0 in extended button data, assuming 3 buttons\n");
++		button_info = 0x33;
+ 	}
+ 
+ 	psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
+diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
+index 251ff2aa0633..7a0dbce4dae9 100644
+--- a/drivers/input/touchscreen/88pm860x-ts.c
++++ b/drivers/input/touchscreen/88pm860x-ts.c
+@@ -126,7 +126,7 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
+ 	int data, n, ret;
+ 	if (!np)
+ 		return -ENODEV;
+-	np = of_find_node_by_name(np, "touch");
++	np = of_get_child_by_name(np, "touch");
+ 	if (!np) {
+ 		dev_err(&pdev->dev, "Can't find touch node\n");
+ 		return -EINVAL;
+@@ -144,13 +144,13 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
+ 	if (data) {
+ 		ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
+ 		if (ret < 0)
+-			return -EINVAL;
++			goto err_put_node;
+ 	}
+ 	/* set tsi prebias time */
+ 	if (!of_property_read_u32(np, "marvell,88pm860x-tsi-prebias", &data)) {
+ 		ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
+ 		if (ret < 0)
+-			return -EINVAL;
++			goto err_put_node;
+ 	}
+ 	/* set prebias & prechg time of pen detect */
+ 	data = 0;
+@@ -161,10 +161,18 @@ static int pm860x_touch_dt_init(struct platform_device *pdev,
+ 	if (data) {
+ 		ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
+ 		if (ret < 0)
+-			return -EINVAL;
++			goto err_put_node;
+ 	}
+ 	of_property_read_u32(np, "marvell,88pm860x-resistor-X", res_x);
++
++	of_node_put(np);
++
+ 	return 0;
++
++err_put_node:
++	of_node_put(np);
++
++	return -EINVAL;
+ }
+ #else
+ #define pm860x_touch_dt_init(x, y, z)	(-1)
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 9976c37b9c64..f2b3a0152860 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -584,7 +584,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+ 	 * Ensure that stores to Normal memory are visible to the
+ 	 * other CPUs before issuing the IPI.
+ 	 */
+-	smp_wmb();
++	wmb();
+ 
+ 	for_each_cpu(cpu, mask) {
+ 		u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
+diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
+index 823f6985b260..dd7e38ac29bd 100644
+--- a/drivers/isdn/capi/kcapi.c
++++ b/drivers/isdn/capi/kcapi.c
+@@ -1032,6 +1032,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
+ 						     sizeof(avmb1_carddef))))
+ 				return -EFAULT;
+ 			cdef.cardtype = AVM_CARDTYPE_B1;
++			cdef.cardnr = 0;
+ 		} else {
+ 			if ((retval = copy_from_user(&cdef, data,
+ 						     sizeof(avmb1_extcarddef))))
+diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
+index 7b4ddf0a39ec..2d28530b7e82 100644
+--- a/drivers/isdn/hardware/eicon/message.c
++++ b/drivers/isdn/hardware/eicon/message.c
+@@ -147,7 +147,7 @@ static word plci_remove_check(PLCI *);
+ static void listen_check(DIVA_CAPI_ADAPTER *);
+ static byte AddInfo(byte **, byte **, byte *, byte *);
+ static byte getChannel(API_PARSE *);
+-static void IndParse(PLCI *, word *, byte **, byte);
++static void IndParse(PLCI *, const word *, byte **, byte);
+ static byte ie_compare(byte *, byte *);
+ static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
+ static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
+@@ -4860,7 +4860,7 @@ static void sig_ind(PLCI *plci)
+ 	/* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
+ 	/* SMSG is situated at the end because its 0 (for compatibility reasons */
+ 	/* (see Info_Mask Bit 4, first IE. then the message type)           */
+-	word parms_id[] =
++	static const word parms_id[] =
+ 		{MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
+ 		 UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
+ 		 RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
+@@ -4868,12 +4868,12 @@ static void sig_ind(PLCI *plci)
+ 	/* 14 FTY repl by ESC_CHI */
+ 	/* 18 PI  repl by ESC_LAW */
+ 	/* removed OAD changed to 0xff for future use, OAD is multiIE now */
+-	word multi_fac_id[] = {1, FTY};
+-	word multi_pi_id[]  = {1, PI};
+-	word multi_CiPN_id[]  = {1, OAD};
+-	word multi_ssext_id[]  = {1, ESC_SSEXT};
++	static const word multi_fac_id[] = {1, FTY};
++	static const word multi_pi_id[]  = {1, PI};
++	static const word multi_CiPN_id[]  = {1, OAD};
++	static const word multi_ssext_id[]  = {1, ESC_SSEXT};
+ 
+-	word multi_vswitch_id[]  = {1, ESC_VSWITCH};
++	static const word multi_vswitch_id[]  = {1, ESC_VSWITCH};
+ 
+ 	byte *cau;
+ 	word ncci;
+@@ -8926,7 +8926,7 @@ static void listen_check(DIVA_CAPI_ADAPTER *a)
+ /* functions for all parameters sent in INDs                        */
+ /*------------------------------------------------------------------*/
+ 
+-static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize)
++static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize)
+ {
+ 	word ploc;            /* points to current location within packet */
+ 	byte w;
+diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
+index 358a574d9e8b..46d957c34be1 100644
+--- a/drivers/isdn/icn/icn.c
++++ b/drivers/isdn/icn/icn.c
+@@ -718,7 +718,7 @@ icn_sendbuf(int channel, int ack, struct sk_buff *skb, icn_card *card)
+ 			return 0;
+ 		if (card->sndcount[channel] > ICN_MAX_SQUEUE)
+ 			return 0;
+-#warning TODO test headroom or use skb->nb to flag ACK
++		/* TODO test headroom or use skb->nb to flag ACK */
+ 		nskb = skb_clone(skb, GFP_ATOMIC);
+ 		if (nskb) {
+ 			/* Push ACK flag as one
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index f0b75d54951a..ee2927b460c9 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -808,7 +808,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
+ 	c->shrink.scan_objects = bch_mca_scan;
+ 	c->shrink.seeks = 4;
+ 	c->shrink.batch = c->btree_pages * 2;
+-	register_shrinker(&c->shrink);
++
++	if (register_shrinker(&c->shrink))
++		pr_warn("bcache: %s: could not register shrinker",
++				__func__);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 51dc353f7962..657b8f763754 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1521,7 +1521,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+ 	int l;
+ 	struct dm_buffer *b, *tmp;
+ 	unsigned long freed = 0;
+-	unsigned long count = nr_to_scan;
++	unsigned long count = c->n_buffers[LIST_CLEAN] +
++			      c->n_buffers[LIST_DIRTY];
+ 	unsigned long retain_target = get_retain_buffers(c);
+ 
+ 	for (l = 0; l < LIST_SIZE; l++) {
+@@ -1558,6 +1559,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+ {
+ 	struct dm_bufio_client *c;
+ 	unsigned long count;
++	unsigned long retain_target;
+ 
+ 	c = container_of(shrink, struct dm_bufio_client, shrinker);
+ 	if (sc->gfp_mask & __GFP_FS)
+@@ -1566,8 +1568,9 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+ 		return 0;
+ 
+ 	count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
++	retain_target = get_retain_buffers(c);
+ 	dm_bufio_unlock(c);
+-	return count;
++	return (count < retain_target) ? 0 : (count - retain_target);
+ }
+ 
+ /*
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index a178134abbe8..c9f51f7c1063 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -81,10 +81,14 @@
+ #define SECTOR_TO_BLOCK_SHIFT 3
+ 
+ /*
++ * For btree insert:
+  *  3 for btree insert +
+  *  2 for btree lookup used within space map
++ * For btree remove:
++ *  2 for shadow spine +
++ *  4 for rebalance 3 child node
+  */
+-#define THIN_MAX_CONCURRENT_LOCKS 5
++#define THIN_MAX_CONCURRENT_LOCKS 6
+ 
+ /* This should be plenty */
+ #define SPACE_MAP_ROOT_SIZE 128
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 1fdcd5735418..03bcc1ab2e9d 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1043,8 +1043,9 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor
+ 	 * (not needed for Linear and RAID0 as metadata doesn't
+ 	 * record this size)
+ 	 */
+-	if (rdev->sectors >= (2ULL << 32) && sb->level >= 1)
+-		rdev->sectors = (2ULL << 32) - 2;
++	if (IS_ENABLED(CONFIG_LBDAF) && (u64)rdev->sectors >= (2ULL << 32) &&
++	    sb->level >= 1)
++		rdev->sectors = (sector_t)(2ULL << 32) - 2;
+ 
+ 	if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
+ 		/* "this cannot possibly happen" ... */
+@@ -1337,8 +1338,9 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
+ 	/* Limit to 4TB as metadata cannot record more than that.
+ 	 * 4TB == 2^32 KB, or 2*2^32 sectors.
+ 	 */
+-	if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
+-		num_sectors = (2ULL << 32) - 2;
++	if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) &&
++	    rdev->mddev->level >= 1)
++		num_sectors = (sector_t)(2ULL << 32) - 2;
+ 	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ 		       rdev->sb_page);
+ 	md_super_wait(rdev->mddev);
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 360c22d44647..f2a8e4c69d9f 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -572,23 +572,8 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+ 	pn->keys[1] = rn->keys[0];
+ 	memcpy_disk(value_ptr(pn, 1), &val, sizeof(__le64));
+ 
+-	/*
+-	 * rejig the spine.  This is ugly, since it knows too
+-	 * much about the spine
+-	 */
+-	if (s->nodes[0] != new_parent) {
+-		unlock_block(s->info, s->nodes[0]);
+-		s->nodes[0] = new_parent;
+-	}
+-	if (key < le64_to_cpu(rn->keys[0])) {
+-		unlock_block(s->info, right);
+-		s->nodes[1] = left;
+-	} else {
+-		unlock_block(s->info, left);
+-		s->nodes[1] = right;
+-	}
+-	s->count = 2;
+-
++	unlock_block(s->info, left);
++	unlock_block(s->info, right);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
+index de803a11efb4..768ae2115f1a 100644
+--- a/drivers/media/i2c/s5k6aa.c
++++ b/drivers/media/i2c/s5k6aa.c
+@@ -421,6 +421,7 @@ static int s5k6aa_set_ahb_address(struct i2c_client *client)
+ 
+ /**
+  * s5k6aa_configure_pixel_clock - apply ISP main clock/PLL configuration
++ * @s5k6aa: pointer to &struct s5k6aa describing the device
+  *
+  * Configure the internal ISP PLL for the required output frequency.
+  * Locking: called with s5k6aa.lock mutex held.
+@@ -669,6 +670,7 @@ static int s5k6aa_set_input_params(struct s5k6aa *s5k6aa)
+ 
+ /**
+  * s5k6aa_configure_video_bus - configure the video output interface
++ * @s5k6aa: pointer to &struct s5k6aa describing the device
+  * @bus_type: video bus type: parallel or MIPI-CSI
+  * @nlanes: number of MIPI lanes to be used (MIPI-CSI only)
+  *
+@@ -724,6 +726,8 @@ static int s5k6aa_new_config_sync(struct i2c_client *client, int timeout,
+ 
+ /**
+  * s5k6aa_set_prev_config - write user preview register set
++ * @s5k6aa: pointer to &struct s5k6aa describing the device
++ * @preset: s5kaa preset to be applied
+  *
+  * Configure output resolution and color fromat, pixel clock
+  * frequency range, device frame rate type and frame period range.
+@@ -777,6 +781,7 @@ static int s5k6aa_set_prev_config(struct s5k6aa *s5k6aa,
+ 
+ /**
+  * s5k6aa_initialize_isp - basic ISP MCU initialization
++ * @sd: pointer to V4L2 sub-device descriptor
+  *
+  * Configure AHB addresses for registers read/write; configure PLLs for
+  * required output pixel clock. The ISP power supply needs to be already
+diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
+index 8e74fb7f2a07..2d673516a614 100644
+--- a/drivers/media/platform/soc_camera/soc_scale_crop.c
++++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
+@@ -400,3 +400,7 @@ void soc_camera_calc_client_output(struct soc_camera_device *icd,
+ 	mf->height = soc_camera_shift_scale(rect->height, shift, scale_v);
+ }
+ EXPORT_SYMBOL(soc_camera_calc_client_output);
++
++MODULE_DESCRIPTION("soc-camera scaling-cropping functions");
++MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/media/tuners/r820t.c b/drivers/media/tuners/r820t.c
+index 71159a58860f..4bfd64b0c0ad 100644
+--- a/drivers/media/tuners/r820t.c
++++ b/drivers/media/tuners/r820t.c
+@@ -410,9 +410,11 @@ static int r820t_write(struct r820t_priv *priv, u8 reg, const u8 *val,
+ 	return 0;
+ }
+ 
+-static int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
++static inline int r820t_write_reg(struct r820t_priv *priv, u8 reg, u8 val)
+ {
+-	return r820t_write(priv, reg, &val, 1);
++	u8 tmp = val; /* work around GCC PR81715 with asan-stack=1 */
++
++	return r820t_write(priv, reg, &tmp, 1);
+ }
+ 
+ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
+@@ -425,17 +427,18 @@ static int r820t_read_cache_reg(struct r820t_priv *priv, int reg)
+ 		return -EINVAL;
+ }
+ 
+-static int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
++static inline int r820t_write_reg_mask(struct r820t_priv *priv, u8 reg, u8 val,
+ 				u8 bit_mask)
+ {
++	u8 tmp = val;
+ 	int rc = r820t_read_cache_reg(priv, reg);
+ 
+ 	if (rc < 0)
+ 		return rc;
+ 
+-	val = (rc & ~bit_mask) | (val & bit_mask);
++	tmp = (rc & ~bit_mask) | (tmp & bit_mask);
+ 
+-	return r820t_write(priv, reg, &val, 1);
++	return r820t_write(priv, reg, &tmp, 1);
+ }
+ 
+ static int r820t_read(struct r820t_priv *priv, u8 reg, u8 *val, int len)
+diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+index 5de6f7c04d09..7399bd58e286 100644
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -444,18 +444,23 @@ static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid,
+ 
+ static int lme2510_return_status(struct dvb_usb_device *d)
+ {
+-	int ret = 0;
++	int ret;
+ 	u8 *data;
+ 
+-	data = kzalloc(10, GFP_KERNEL);
++	data = kzalloc(6, GFP_KERNEL);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+-	ret |= usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
+-			0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200);
+-	info("Firmware Status: %x (%x)", ret , data[2]);
++	ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0),
++			      0x06, 0x80, 0x0302, 0x00,
++			      data, 0x6, 200);
++	if (ret != 6)
++		ret = -EINVAL;
++	else
++		ret = data[2];
++
++	info("Firmware Status: %6ph", data);
+ 
+-	ret = (ret < 0) ? -ENODEV : data[2];
+ 	kfree(data);
+ 	return ret;
+ }
+@@ -1029,8 +1034,6 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
+ 
+ 		if (adap->fe[0]) {
+ 			info("FE Found M88RS2000");
+-			dvb_attach(ts2020_attach, adap->fe[0], &ts2020_config,
+-					&d->i2c_adap);
+ 			st->i2c_tuner_gate_w = 5;
+ 			st->i2c_tuner_gate_r = 5;
+ 			st->i2c_tuner_addr = 0x60;
+@@ -1096,17 +1099,18 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
+ 			ret = st->tuner_config;
+ 		break;
+ 	case TUNER_RS2000:
+-		ret = st->tuner_config;
++		if (dvb_attach(ts2020_attach, adap->fe[0],
++			       &ts2020_config, &d->i2c_adap))
++			ret = st->tuner_config;
+ 		break;
+ 	default:
+ 		break;
+ 	}
+ 
+-	if (ret)
++	if (ret) {
+ 		info("TUN Found %s tuner", tun_msg[ret]);
+-	else {
+-		info("TUN No tuner found --- resetting device");
+-		lme_coldreset(d);
++	} else {
++		info("TUN No tuner found");
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1150,6 +1154,7 @@ static int lme2510_get_adapter_count(struct dvb_usb_device *d)
+ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
+ {
+ 	struct lme2510_state *st = d->priv;
++	int status;
+ 
+ 	usb_reset_configuration(d->udev);
+ 
+@@ -1158,12 +1163,16 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
+ 
+ 	st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware;
+ 
+-	if (lme2510_return_status(d) == 0x44) {
++	status = lme2510_return_status(d);
++	if (status == 0x44) {
+ 		*name = lme_firmware_switch(d, 0);
+ 		return COLD;
+ 	}
+ 
+-	return 0;
++	if (status != 0x47)
++		return -EINVAL;
++
++	return WARM;
+ }
+ 
+ static int lme2510_get_stream_config(struct dvb_frontend *fe, u8 *ts_type,
+diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
+index ffc3704abded..d89de44d94a0 100644
+--- a/drivers/media/usb/dvb-usb/cxusb.c
++++ b/drivers/media/usb/dvb-usb/cxusb.c
+@@ -818,6 +818,8 @@ static int dvico_bluebird_xc2028_callback(void *ptr, int component,
+ 	case XC2028_RESET_CLK:
+ 		deb_info("%s: XC2028_RESET_CLK %d\n", __func__, arg);
+ 		break;
++	case XC2028_I2C_FLUSH:
++		break;
+ 	default:
+ 		deb_info("%s: unknown command %d, arg %d\n", __func__,
+ 			 command, arg);
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index 0d7565158207..97057ae10509 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -431,6 +431,7 @@ static int stk7700ph_xc3028_callback(void *ptr, int component,
+ 		state->dib7000p_ops.set_gpio(adap->fe_adap[0].fe, 8, 0, 1);
+ 		break;
+ 	case XC2028_RESET_CLK:
++	case XC2028_I2C_FLUSH:
+ 		break;
+ 	default:
+ 		err("%s: unknown command %d, arg %d\n", __func__,
+diff --git a/drivers/media/usb/em28xx/Kconfig b/drivers/media/usb/em28xx/Kconfig
+index e382210c4ada..75323f5efd0f 100644
+--- a/drivers/media/usb/em28xx/Kconfig
++++ b/drivers/media/usb/em28xx/Kconfig
+@@ -11,7 +11,7 @@ config VIDEO_EM28XX_V4L2
+ 	select VIDEO_SAA711X if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_TVP5150 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_MSP3400 if MEDIA_SUBDRV_AUTOSELECT
+-	select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT
++	select VIDEO_MT9V011 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
+ 
+ 	---help---
+ 	  This is a video4linux driver for Empia 28xx based TV cards.
+diff --git a/drivers/media/usb/go7007/Kconfig b/drivers/media/usb/go7007/Kconfig
+index 95a3af644a92..af1d02430931 100644
+--- a/drivers/media/usb/go7007/Kconfig
++++ b/drivers/media/usb/go7007/Kconfig
+@@ -11,7 +11,7 @@ config VIDEO_GO7007
+ 	select VIDEO_TW2804 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_TW9903 if MEDIA_SUBDRV_AUTOSELECT
+ 	select VIDEO_TW9906 if MEDIA_SUBDRV_AUTOSELECT
+-	select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT
++	select VIDEO_OV7640 if MEDIA_SUBDRV_AUTOSELECT && MEDIA_CAMERA_SUPPORT
+ 	select VIDEO_UDA1342 if MEDIA_SUBDRV_AUTOSELECT
+ 	---help---
+ 	  This is a video4linux driver for the WIS GO7007 MPEG
+diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
+index 3fc64197b4e6..08f0ca7aa012 100644
+--- a/drivers/media/usb/hdpvr/hdpvr-core.c
++++ b/drivers/media/usb/hdpvr/hdpvr-core.c
+@@ -273,7 +273,9 @@ static int hdpvr_probe(struct usb_interface *interface,
+ 	struct hdpvr_device *dev;
+ 	struct usb_host_interface *iface_desc;
+ 	struct usb_endpoint_descriptor *endpoint;
++#if IS_ENABLED(CONFIG_I2C)
+ 	struct i2c_client *client;
++#endif
+ 	size_t buffer_size;
+ 	int i;
+ 	int retval = -ENOMEM;
+diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
+index a7e1f6f37790..a8f265cee365 100644
+--- a/drivers/media/usb/pwc/pwc-if.c
++++ b/drivers/media/usb/pwc/pwc-if.c
+@@ -1110,8 +1110,10 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
+ 
+ 	return 0;
+ 
++#ifdef CONFIG_USB_PWC_INPUT_EVDEV
+ err_video_unreg:
+ 	video_unregister_device(&pdev->vdev);
++#endif
+ err_unregister_v4l2_dev:
+ 	v4l2_device_unregister(&pdev->v4l2_dev);
+ err_free_controls:
+diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
+index 29428bef272c..3bbc77aa6a33 100644
+--- a/drivers/media/usb/usbtv/usbtv-core.c
++++ b/drivers/media/usb/usbtv/usbtv-core.c
+@@ -127,6 +127,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
+ 
+ static struct usb_device_id usbtv_id_table[] = {
+ 	{ USB_DEVICE(0x1b71, 0x3002) },
++	{ USB_DEVICE(0x1f71, 0x3301) },
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(usb, usbtv_id_table);
+diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
+index 3b3becc5718d..7b12710becac 100644
+--- a/drivers/media/usb/usbvision/usbvision-video.c
++++ b/drivers/media/usb/usbvision/usbvision-video.c
+@@ -1522,6 +1522,13 @@ static int usbvision_probe(struct usb_interface *intf,
+ 	printk(KERN_INFO "%s: %s found\n", __func__,
+ 				usbvision_device_data[model].model_string);
+ 
++	/*
++	 * this is a security check.
++	 * an exploit using an incorrect bInterfaceNumber is known
++	 */
++	if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
++		return -ENODEV;
++
+ 	if (usbvision_device_data[model].interface >= 0)
+ 		interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
+ 	else if (ifnum < dev->actconfig->desc.bNumInterfaces)
+diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig
+index ba7e21a73023..b0372b1908f1 100644
+--- a/drivers/media/v4l2-core/Kconfig
++++ b/drivers/media/v4l2-core/Kconfig
+@@ -37,7 +37,6 @@ config VIDEO_PCI_SKELETON
+ # Used by drivers that need tuner.ko
+ config VIDEO_TUNER
+ 	tristate
+-	depends on MEDIA_TUNER
+ 
+ # Used by drivers that need v4l2-mem2mem.ko
+ config V4L2_MEM2MEM_DEV
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 4f002d0bebb1..e03aa0961360 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -18,8 +18,18 @@
+ #include <linux/videodev2.h>
+ #include <linux/v4l2-subdev.h>
+ #include <media/v4l2-dev.h>
++#include <media/v4l2-fh.h>
++#include <media/v4l2-ctrls.h>
+ #include <media/v4l2-ioctl.h>
+ 
++/* Use the same argument order as copy_in_user */
++#define assign_in_user(to, from)					\
++({									\
++	typeof(*from) __assign_tmp;					\
++									\
++	get_user(__assign_tmp, from) || put_user(__assign_tmp, to);	\
++})
++
+ static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+ 	long ret = -ENOIOCTLCMD;
+@@ -33,117 +43,88 @@ static long native_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 
+ struct v4l2_clip32 {
+ 	struct v4l2_rect        c;
+-	compat_caddr_t 		next;
++	compat_caddr_t		next;
+ };
+ 
+ struct v4l2_window32 {
+ 	struct v4l2_rect        w;
+-	__u32		  	field;	/* enum v4l2_field */
++	__u32			field;	/* enum v4l2_field */
+ 	__u32			chromakey;
+ 	compat_caddr_t		clips; /* actually struct v4l2_clip32 * */
+ 	__u32			clipcount;
+ 	compat_caddr_t		bitmap;
++	__u8                    global_alpha;
+ };
+ 
+-static int get_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
+-{
+-	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_window32)) ||
+-		copy_from_user(&kp->w, &up->w, sizeof(up->w)) ||
+-		get_user(kp->field, &up->field) ||
+-		get_user(kp->chromakey, &up->chromakey) ||
+-		get_user(kp->clipcount, &up->clipcount))
+-			return -EFAULT;
+-	if (kp->clipcount > 2048)
+-		return -EINVAL;
+-	if (kp->clipcount) {
+-		struct v4l2_clip32 __user *uclips;
+-		struct v4l2_clip __user *kclips;
+-		int n = kp->clipcount;
+-		compat_caddr_t p;
+-
+-		if (get_user(p, &up->clips))
+-			return -EFAULT;
+-		uclips = compat_ptr(p);
+-		kclips = compat_alloc_user_space(n * sizeof(struct v4l2_clip));
+-		kp->clips = kclips;
+-		while (--n >= 0) {
+-			if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
+-				return -EFAULT;
+-			if (put_user(n ? kclips + 1 : NULL, &kclips->next))
+-				return -EFAULT;
+-			uclips += 1;
+-			kclips += 1;
+-		}
+-	} else
+-		kp->clips = NULL;
+-	return 0;
+-}
+-
+-static int put_v4l2_window32(struct v4l2_window *kp, struct v4l2_window32 __user *up)
++static int get_v4l2_window32(struct v4l2_window __user *kp,
++			     struct v4l2_window32 __user *up,
++			     void __user *aux_buf, u32 aux_space)
+ {
+-	if (copy_to_user(&up->w, &kp->w, sizeof(kp->w)) ||
+-		put_user(kp->field, &up->field) ||
+-		put_user(kp->chromakey, &up->chromakey) ||
+-		put_user(kp->clipcount, &up->clipcount))
+-			return -EFAULT;
+-	return 0;
+-}
+-
+-static inline int get_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+-{
+-	if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format)))
+-		return -EFAULT;
+-	return 0;
+-}
+-
+-static inline int get_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+-				struct v4l2_pix_format_mplane __user *up)
+-{
+-	if (copy_from_user(kp, up, sizeof(struct v4l2_pix_format_mplane)))
++	struct v4l2_clip32 __user *uclips;
++	struct v4l2_clip __user *kclips;
++	compat_caddr_t p;
++	u32 clipcount;
++
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
++	    copy_in_user(&kp->w, &up->w, sizeof(up->w)) ||
++	    assign_in_user(&kp->field, &up->field) ||
++	    assign_in_user(&kp->chromakey, &up->chromakey) ||
++	    assign_in_user(&kp->global_alpha, &up->global_alpha) ||
++	    get_user(clipcount, &up->clipcount) ||
++	    put_user(clipcount, &kp->clipcount))
+ 		return -EFAULT;
+-	return 0;
+-}
++	if (clipcount > 2048)
++		return -EINVAL;
++	if (!clipcount)
++		return put_user(NULL, &kp->clips);
+ 
+-static inline int put_v4l2_pix_format(struct v4l2_pix_format *kp, struct v4l2_pix_format __user *up)
+-{
+-	if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format)))
++	if (get_user(p, &up->clips))
+ 		return -EFAULT;
+-	return 0;
+-}
+-
+-static inline int put_v4l2_pix_format_mplane(struct v4l2_pix_format_mplane *kp,
+-				struct v4l2_pix_format_mplane __user *up)
+-{
+-	if (copy_to_user(up, kp, sizeof(struct v4l2_pix_format_mplane)))
++	uclips = compat_ptr(p);
++	if (aux_space < clipcount * sizeof(*kclips))
+ 		return -EFAULT;
+-	return 0;
+-}
+-
+-static inline int get_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+-{
+-	if (copy_from_user(kp, up, sizeof(struct v4l2_vbi_format)))
++	kclips = aux_buf;
++	if (put_user(kclips, &kp->clips))
+ 		return -EFAULT;
+-	return 0;
+-}
+ 
+-static inline int put_v4l2_vbi_format(struct v4l2_vbi_format *kp, struct v4l2_vbi_format __user *up)
+-{
+-	if (copy_to_user(up, kp, sizeof(struct v4l2_vbi_format)))
+-		return -EFAULT;
++	while (clipcount--) {
++		if (copy_in_user(&kclips->c, &uclips->c, sizeof(uclips->c)))
++			return -EFAULT;
++		if (put_user(clipcount ? kclips + 1 : NULL, &kclips->next))
++			return -EFAULT;
++		uclips++;
++		kclips++;
++	}
+ 	return 0;
+ }
+ 
+-static inline int get_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
++static int put_v4l2_window32(struct v4l2_window __user *kp,
++			     struct v4l2_window32 __user *up)
+ {
+-	if (copy_from_user(kp, up, sizeof(struct v4l2_sliced_vbi_format)))
++	struct v4l2_clip __user *kclips = kp->clips;
++	struct v4l2_clip32 __user *uclips;
++	compat_caddr_t p;
++	u32 clipcount;
++
++	if (copy_in_user(&up->w, &kp->w, sizeof(kp->w)) ||
++	    assign_in_user(&up->field, &kp->field) ||
++	    assign_in_user(&up->chromakey, &kp->chromakey) ||
++	    assign_in_user(&up->global_alpha, &kp->global_alpha) ||
++	    get_user(clipcount, &kp->clipcount) ||
++	    put_user(clipcount, &up->clipcount))
+ 		return -EFAULT;
+-	return 0;
+-}
++	if (!clipcount)
++		return 0;
+ 
+-static inline int put_v4l2_sliced_vbi_format(struct v4l2_sliced_vbi_format *kp, struct v4l2_sliced_vbi_format __user *up)
+-{
+-	if (copy_to_user(up, kp, sizeof(struct v4l2_sliced_vbi_format)))
++	if (get_user(p, &up->clips))
+ 		return -EFAULT;
++	uclips = compat_ptr(p);
++	while (clipcount--) {
++		if (copy_in_user(&uclips->c, &kclips->c, sizeof(uclips->c)))
++			return -EFAULT;
++		uclips++;
++		kclips++;
++	}
+ 	return 0;
+ }
+ 
+@@ -176,91 +157,150 @@ struct v4l2_create_buffers32 {
+ 	__u32			reserved[8];
+ };
+ 
+-static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
++static int __bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size)
+ {
+-	if (get_user(kp->type, &up->type))
++	u32 type;
++
++	if (get_user(type, &up->type))
+ 		return -EFAULT;
+ 
+-	switch (kp->type) {
++	switch (type) {
++	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
++	case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY: {
++		u32 clipcount;
++
++		if (get_user(clipcount, &up->fmt.win.clipcount))
++			return -EFAULT;
++		if (clipcount > 2048)
++			return -EINVAL;
++		*size = clipcount * sizeof(struct v4l2_clip);
++		return 0;
++	}
++	default:
++		*size = 0;
++		return 0;
++	}
++}
++
++static int bufsize_v4l2_format(struct v4l2_format32 __user *up, u32 *size)
++{
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)))
++		return -EFAULT;
++	return __bufsize_v4l2_format(up, size);
++}
++
++static int __get_v4l2_format32(struct v4l2_format __user *kp,
++			       struct v4l2_format32 __user *up,
++			       void __user *aux_buf, u32 aux_space)
++{
++	u32 type;
++
++	if (get_user(type, &up->type) || put_user(type, &kp->type))
++		return -EFAULT;
++
++	switch (type) {
+ 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+-		return get_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
++		return copy_in_user(&kp->fmt.pix, &up->fmt.pix,
++				    sizeof(kp->fmt.pix)) ? -EFAULT : 0;
+ 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+-		return get_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
+-						  &up->fmt.pix_mp);
++		return copy_in_user(&kp->fmt.pix_mp, &up->fmt.pix_mp,
++				    sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0;
+ 	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+-		return get_v4l2_window32(&kp->fmt.win, &up->fmt.win);
++		return get_v4l2_window32(&kp->fmt.win, &up->fmt.win,
++					 aux_buf, aux_space);
+ 	case V4L2_BUF_TYPE_VBI_CAPTURE:
+ 	case V4L2_BUF_TYPE_VBI_OUTPUT:
+-		return get_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
++		return copy_in_user(&kp->fmt.vbi, &up->fmt.vbi,
++				    sizeof(kp->fmt.vbi)) ? -EFAULT : 0;
+ 	case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ 	case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+-		return get_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
++		return copy_in_user(&kp->fmt.sliced, &up->fmt.sliced,
++				    sizeof(kp->fmt.sliced)) ? -EFAULT : 0;
+ 	default:
+-		printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
+-								kp->type);
+ 		return -EINVAL;
+ 	}
+ }
+ 
+-static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
++static int get_v4l2_format32(struct v4l2_format __user *kp,
++			     struct v4l2_format32 __user *up,
++			     void __user *aux_buf, u32 aux_space)
+ {
+-	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)))
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)))
+ 		return -EFAULT;
+-	return __get_v4l2_format32(kp, up);
++	return __get_v4l2_format32(kp, up, aux_buf, aux_space);
+ }
+ 
+-static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
++static int bufsize_v4l2_create(struct v4l2_create_buffers32 __user *up,
++			       u32 *size)
+ {
+-	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) ||
+-	    copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format)))
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)))
+ 		return -EFAULT;
+-	return __get_v4l2_format32(&kp->format, &up->format);
++	return __bufsize_v4l2_format(&up->format, size);
+ }
+ 
+-static int __put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
++static int get_v4l2_create32(struct v4l2_create_buffers __user *kp,
++			     struct v4l2_create_buffers32 __user *up,
++			     void __user *aux_buf, u32 aux_space)
+ {
+-	if (put_user(kp->type, &up->type))
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
++	    copy_in_user(kp, up,
++			 offsetof(struct v4l2_create_buffers32, format)))
+ 		return -EFAULT;
++	return __get_v4l2_format32(&kp->format, &up->format,
++				   aux_buf, aux_space);
++}
++
++static int __put_v4l2_format32(struct v4l2_format __user *kp,
++			       struct v4l2_format32 __user *up)
++{
++	u32 type;
+ 
+-	switch (kp->type) {
++	if (get_user(type, &kp->type))
++		return -EFAULT;
++
++	switch (type) {
+ 	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ 	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+-		return put_v4l2_pix_format(&kp->fmt.pix, &up->fmt.pix);
++		return copy_in_user(&up->fmt.pix, &kp->fmt.pix,
++				    sizeof(kp->fmt.pix)) ? -EFAULT : 0;
+ 	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+-		return put_v4l2_pix_format_mplane(&kp->fmt.pix_mp,
+-						  &up->fmt.pix_mp);
++		return copy_in_user(&up->fmt.pix_mp, &kp->fmt.pix_mp,
++				    sizeof(kp->fmt.pix_mp)) ? -EFAULT : 0;
+ 	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
+ 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
+ 		return put_v4l2_window32(&kp->fmt.win, &up->fmt.win);
+ 	case V4L2_BUF_TYPE_VBI_CAPTURE:
+ 	case V4L2_BUF_TYPE_VBI_OUTPUT:
+-		return put_v4l2_vbi_format(&kp->fmt.vbi, &up->fmt.vbi);
++		return copy_in_user(&up->fmt.vbi, &kp->fmt.vbi,
++				    sizeof(kp->fmt.vbi)) ? -EFAULT : 0;
+ 	case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ 	case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+-		return put_v4l2_sliced_vbi_format(&kp->fmt.sliced, &up->fmt.sliced);
++		return copy_in_user(&up->fmt.sliced, &kp->fmt.sliced,
++				    sizeof(kp->fmt.sliced)) ? -EFAULT : 0;
+ 	default:
+-		printk(KERN_INFO "compat_ioctl32: unexpected VIDIOC_FMT type %d\n",
+-								kp->type);
+ 		return -EINVAL;
+ 	}
+ }
+ 
+-static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up)
++static int put_v4l2_format32(struct v4l2_format __user *kp,
++			     struct v4l2_format32 __user *up)
+ {
+-	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_format32)))
++	if (!access_ok(VERIFY_WRITE, up, sizeof(*up)))
+ 		return -EFAULT;
+ 	return __put_v4l2_format32(kp, up);
+ }
+ 
+-static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
++static int put_v4l2_create32(struct v4l2_create_buffers __user *kp,
++			     struct v4l2_create_buffers32 __user *up)
+ {
+-	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
+-	    copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
+-	    copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
++	if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
++	    copy_in_user(up, kp,
++			 offsetof(struct v4l2_create_buffers32, format)) ||
++	    copy_in_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
+ 		return -EFAULT;
+ 	return __put_v4l2_format32(&kp->format, &up->format);
+ }
+@@ -274,25 +314,28 @@ struct v4l2_standard32 {
+ 	__u32		     reserved[4];
+ };
+ 
+-static int get_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
++static int get_v4l2_standard32(struct v4l2_standard __user *kp,
++			       struct v4l2_standard32 __user *up)
+ {
+ 	/* other fields are not set by the user, nor used by the driver */
+-	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_standard32)) ||
+-		get_user(kp->index, &up->index))
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
++	    assign_in_user(&kp->index, &up->index))
+ 		return -EFAULT;
+ 	return 0;
+ }
+ 
+-static int put_v4l2_standard32(struct v4l2_standard *kp, struct v4l2_standard32 __user *up)
++static int put_v4l2_standard32(struct v4l2_standard __user *kp,
++			       struct v4l2_standard32 __user *up)
+ {
+-	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_standard32)) ||
+-		put_user(kp->index, &up->index) ||
+-		copy_to_user(up->id, &kp->id, sizeof(__u64)) ||
+-		copy_to_user(up->name, kp->name, 24) ||
+-		copy_to_user(&up->frameperiod, &kp->frameperiod, sizeof(kp->frameperiod)) ||
+-		put_user(kp->framelines, &up->framelines) ||
+-		copy_to_user(up->reserved, kp->reserved, 4 * sizeof(__u32)))
+-			return -EFAULT;
++	if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
++	    assign_in_user(&up->index, &kp->index) ||
++	    copy_in_user(&up->id, &kp->id, sizeof(up->id)) ||
++	    copy_in_user(up->name, kp->name, sizeof(up->name)) ||
++	    copy_in_user(&up->frameperiod, &kp->frameperiod,
++			 sizeof(up->frameperiod)) ||
++	    assign_in_user(&up->framelines, &kp->framelines) ||
++	    copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
++		return -EFAULT;
+ 	return 0;
+ }
+ 
+@@ -331,134 +374,186 @@ struct v4l2_buffer32 {
+ 	__u32			reserved;
+ };
+ 
+-static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
+-				enum v4l2_memory memory)
++static int get_v4l2_plane32(struct v4l2_plane __user *up,
++			    struct v4l2_plane32 __user *up32,
++			    enum v4l2_memory memory)
+ {
+-	void __user *up_pln;
+-	compat_long_t p;
++	compat_ulong_t p;
+ 
+ 	if (copy_in_user(up, up32, 2 * sizeof(__u32)) ||
+-		copy_in_user(&up->data_offset, &up32->data_offset,
+-				sizeof(__u32)))
++	    copy_in_user(&up->data_offset, &up32->data_offset,
++			 sizeof(up->data_offset)))
+ 		return -EFAULT;
+ 
+-	if (memory == V4L2_MEMORY_USERPTR) {
+-		if (get_user(p, &up32->m.userptr))
+-			return -EFAULT;
+-		up_pln = compat_ptr(p);
+-		if (put_user((unsigned long)up_pln, &up->m.userptr))
++	switch (memory) {
++	case V4L2_MEMORY_MMAP:
++	case V4L2_MEMORY_OVERLAY:
++		if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
++				 sizeof(up32->m.mem_offset)))
+ 			return -EFAULT;
+-	} else if (memory == V4L2_MEMORY_DMABUF) {
+-		if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(int)))
++		break;
++	case V4L2_MEMORY_USERPTR:
++		if (get_user(p, &up32->m.userptr) ||
++		    put_user((unsigned long)compat_ptr(p), &up->m.userptr))
+ 			return -EFAULT;
+-	} else {
+-		if (copy_in_user(&up->m.mem_offset, &up32->m.mem_offset,
+-					sizeof(__u32)))
++		break;
++	case V4L2_MEMORY_DMABUF:
++		if (copy_in_user(&up->m.fd, &up32->m.fd, sizeof(up32->m.fd)))
+ 			return -EFAULT;
++		break;
+ 	}
+ 
+ 	return 0;
+ }
+ 
+-static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
+-				enum v4l2_memory memory)
++static int put_v4l2_plane32(struct v4l2_plane __user *up,
++			    struct v4l2_plane32 __user *up32,
++			    enum v4l2_memory memory)
+ {
++	unsigned long p;
++
+ 	if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
+-		copy_in_user(&up32->data_offset, &up->data_offset,
+-				sizeof(__u32)))
++	    copy_in_user(&up32->data_offset, &up->data_offset,
++			 sizeof(up->data_offset)))
+ 		return -EFAULT;
+ 
+-	/* For MMAP, driver might've set up the offset, so copy it back.
+-	 * USERPTR stays the same (was userspace-provided), so no copying. */
+-	if (memory == V4L2_MEMORY_MMAP)
++	switch (memory) {
++	case V4L2_MEMORY_MMAP:
++	case V4L2_MEMORY_OVERLAY:
+ 		if (copy_in_user(&up32->m.mem_offset, &up->m.mem_offset,
+-					sizeof(__u32)))
++				 sizeof(up->m.mem_offset)))
+ 			return -EFAULT;
+-	/* For DMABUF, driver might've set up the fd, so copy it back. */
+-	if (memory == V4L2_MEMORY_DMABUF)
+-		if (copy_in_user(&up32->m.fd, &up->m.fd,
+-					sizeof(int)))
++		break;
++	case V4L2_MEMORY_USERPTR:
++		if (get_user(p, &up->m.userptr) ||
++		    put_user((compat_ulong_t)ptr_to_compat((__force void *)p),
++			     &up32->m.userptr))
++			return -EFAULT;
++		break;
++	case V4L2_MEMORY_DMABUF:
++		if (copy_in_user(&up32->m.fd, &up->m.fd, sizeof(up->m.fd)))
+ 			return -EFAULT;
++		break;
++	}
++
++	return 0;
++}
++
++static int bufsize_v4l2_buffer(struct v4l2_buffer32 __user *up, u32 *size)
++{
++	u32 type;
++	u32 length;
++
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
++	    get_user(type, &up->type) ||
++	    get_user(length, &up->length))
++		return -EFAULT;
+ 
++	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
++		if (length > VIDEO_MAX_PLANES)
++			return -EINVAL;
++
++		/*
++		 * We don't really care if userspace decides to kill itself
++		 * by passing a very big length value
++		 */
++		*size = length * sizeof(struct v4l2_plane);
++	} else {
++		*size = 0;
++	}
+ 	return 0;
+ }
+ 
+-static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
++static int get_v4l2_buffer32(struct v4l2_buffer __user *kp,
++			     struct v4l2_buffer32 __user *up,
++			     void __user *aux_buf, u32 aux_space)
+ {
++	u32 type;
++	u32 length;
++	enum v4l2_memory memory;
+ 	struct v4l2_plane32 __user *uplane32;
+ 	struct v4l2_plane __user *uplane;
+ 	compat_caddr_t p;
+-	int num_planes;
+ 	int ret;
+ 
+-	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_buffer32)) ||
+-		get_user(kp->index, &up->index) ||
+-		get_user(kp->type, &up->type) ||
+-		get_user(kp->flags, &up->flags) ||
+-		get_user(kp->memory, &up->memory) ||
+-		get_user(kp->length, &up->length))
+-			return -EFAULT;
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
++	    assign_in_user(&kp->index, &up->index) ||
++	    get_user(type, &up->type) ||
++	    put_user(type, &kp->type) ||
++	    assign_in_user(&kp->flags, &up->flags) ||
++	    get_user(memory, &up->memory) ||
++	    put_user(memory, &kp->memory) ||
++	    get_user(length, &up->length) ||
++	    put_user(length, &kp->length))
++		return -EFAULT;
+ 
+-	if (V4L2_TYPE_IS_OUTPUT(kp->type))
+-		if (get_user(kp->bytesused, &up->bytesused) ||
+-			get_user(kp->field, &up->field) ||
+-			get_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+-			get_user(kp->timestamp.tv_usec,
+-					&up->timestamp.tv_usec))
++	if (V4L2_TYPE_IS_OUTPUT(type))
++		if (assign_in_user(&kp->bytesused, &up->bytesused) ||
++		    assign_in_user(&kp->field, &up->field) ||
++		    assign_in_user(&kp->timestamp.tv_sec,
++				   &up->timestamp.tv_sec) ||
++		    assign_in_user(&kp->timestamp.tv_usec,
++				   &up->timestamp.tv_usec))
+ 			return -EFAULT;
+ 
+-	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+-		num_planes = kp->length;
++	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
++		u32 num_planes = length;
++
+ 		if (num_planes == 0) {
+-			kp->m.planes = NULL;
+-			/* num_planes == 0 is legal, e.g. when userspace doesn't
+-			 * need planes array on DQBUF*/
+-			return 0;
++			/*
++			 * num_planes == 0 is legal, e.g. when userspace doesn't
++			 * need planes array on DQBUF
++			 */
++			return put_user(NULL, &kp->m.planes);
+ 		}
++		if (num_planes > VIDEO_MAX_PLANES)
++			return -EINVAL;
+ 
+ 		if (get_user(p, &up->m.planes))
+ 			return -EFAULT;
+ 
+ 		uplane32 = compat_ptr(p);
+ 		if (!access_ok(VERIFY_READ, uplane32,
+-				num_planes * sizeof(struct v4l2_plane32)))
++			       num_planes * sizeof(*uplane32)))
+ 			return -EFAULT;
+ 
+-		/* We don't really care if userspace decides to kill itself
+-		 * by passing a very big num_planes value */
+-		uplane = compat_alloc_user_space(num_planes *
+-						sizeof(struct v4l2_plane));
+-		kp->m.planes = (__force struct v4l2_plane *)uplane;
++		/*
++		 * We don't really care if userspace decides to kill itself
++		 * by passing a very big num_planes value
++		 */
++		if (aux_space < num_planes * sizeof(*uplane))
++			return -EFAULT;
++
++		uplane = aux_buf;
++		if (put_user((__force struct v4l2_plane *)uplane,
++			     &kp->m.planes))
++			return -EFAULT;
+ 
+-		while (--num_planes >= 0) {
+-			ret = get_v4l2_plane32(uplane, uplane32, kp->memory);
++		while (num_planes--) {
++			ret = get_v4l2_plane32(uplane, uplane32, memory);
+ 			if (ret)
+ 				return ret;
+-			++uplane;
+-			++uplane32;
++			uplane++;
++			uplane32++;
+ 		}
+ 	} else {
+-		switch (kp->memory) {
++		switch (memory) {
+ 		case V4L2_MEMORY_MMAP:
+-			if (get_user(kp->m.offset, &up->m.offset))
++		case V4L2_MEMORY_OVERLAY:
++			if (assign_in_user(&kp->m.offset, &up->m.offset))
+ 				return -EFAULT;
+ 			break;
+-		case V4L2_MEMORY_USERPTR:
+-			{
+-			compat_long_t tmp;
++		case V4L2_MEMORY_USERPTR: {
++			compat_ulong_t userptr;
+ 
+-			if (get_user(tmp, &up->m.userptr))
+-				return -EFAULT;
+-
+-			kp->m.userptr = (unsigned long)compat_ptr(tmp);
+-			}
+-			break;
+-		case V4L2_MEMORY_OVERLAY:
+-			if (get_user(kp->m.offset, &up->m.offset))
++			if (get_user(userptr, &up->m.userptr) ||
++			    put_user((unsigned long)compat_ptr(userptr),
++				     &kp->m.userptr))
+ 				return -EFAULT;
+ 			break;
++		}
+ 		case V4L2_MEMORY_DMABUF:
+-			if (get_user(kp->m.fd, &up->m.fd))
++			if (assign_in_user(&kp->m.fd, &up->m.fd))
+ 				return -EFAULT;
+ 			break;
+ 		}
+@@ -467,65 +562,70 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ 	return 0;
+ }
+ 
+-static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user *up)
++static int put_v4l2_buffer32(struct v4l2_buffer __user *kp,
++			     struct v4l2_buffer32 __user *up)
+ {
++	u32 type;
++	u32 length;
++	enum v4l2_memory memory;
+ 	struct v4l2_plane32 __user *uplane32;
+ 	struct v4l2_plane __user *uplane;
+ 	compat_caddr_t p;
+-	int num_planes;
+ 	int ret;
+ 
+-	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_buffer32)) ||
+-		put_user(kp->index, &up->index) ||
+-		put_user(kp->type, &up->type) ||
+-		put_user(kp->flags, &up->flags) ||
+-		put_user(kp->memory, &up->memory))
+-			return -EFAULT;
++	if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
++	    assign_in_user(&up->index, &kp->index) ||
++	    get_user(type, &kp->type) ||
++	    put_user(type, &up->type) ||
++	    assign_in_user(&up->flags, &kp->flags) ||
++	    get_user(memory, &kp->memory) ||
++	    put_user(memory, &up->memory))
++		return -EFAULT;
+ 
+-	if (put_user(kp->bytesused, &up->bytesused) ||
+-		put_user(kp->field, &up->field) ||
+-		put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+-		put_user(kp->timestamp.tv_usec, &up->timestamp.tv_usec) ||
+-		copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
+-		put_user(kp->sequence, &up->sequence) ||
+-		put_user(kp->reserved2, &up->reserved2) ||
+-		put_user(kp->reserved, &up->reserved) ||
+-		put_user(kp->length, &up->length))
+-			return -EFAULT;
++	if (assign_in_user(&up->bytesused, &kp->bytesused) ||
++	    assign_in_user(&up->field, &kp->field) ||
++	    assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
++	    assign_in_user(&up->timestamp.tv_usec, &kp->timestamp.tv_usec) ||
++	    copy_in_user(&up->timecode, &kp->timecode, sizeof(kp->timecode)) ||
++	    assign_in_user(&up->sequence, &kp->sequence) ||
++	    assign_in_user(&up->reserved2, &kp->reserved2) ||
++	    assign_in_user(&up->reserved, &kp->reserved) ||
++	    get_user(length, &kp->length) ||
++	    put_user(length, &up->length))
++		return -EFAULT;
++
++	if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
++		u32 num_planes = length;
+ 
+-	if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
+-		num_planes = kp->length;
+ 		if (num_planes == 0)
+ 			return 0;
+ 
+-		uplane = (__force struct v4l2_plane __user *)kp->m.planes;
++		if (get_user(uplane, ((__force struct v4l2_plane __user **)&kp->m.planes)))
++			return -EFAULT;
+ 		if (get_user(p, &up->m.planes))
+ 			return -EFAULT;
+ 		uplane32 = compat_ptr(p);
+ 
+-		while (--num_planes >= 0) {
+-			ret = put_v4l2_plane32(uplane, uplane32, kp->memory);
++		while (num_planes--) {
++			ret = put_v4l2_plane32(uplane, uplane32, memory);
+ 			if (ret)
+ 				return ret;
+ 			++uplane;
+ 			++uplane32;
+ 		}
+ 	} else {
+-		switch (kp->memory) {
++		switch (memory) {
+ 		case V4L2_MEMORY_MMAP:
+-			if (put_user(kp->m.offset, &up->m.offset))
++		case V4L2_MEMORY_OVERLAY:
++			if (assign_in_user(&up->m.offset, &kp->m.offset))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_USERPTR:
+-			if (put_user(kp->m.userptr, &up->m.userptr))
+-				return -EFAULT;
+-			break;
+-		case V4L2_MEMORY_OVERLAY:
+-			if (put_user(kp->m.offset, &up->m.offset))
++			if (assign_in_user(&up->m.userptr, &kp->m.userptr))
+ 				return -EFAULT;
+ 			break;
+ 		case V4L2_MEMORY_DMABUF:
+-			if (put_user(kp->m.fd, &up->m.fd))
++			if (assign_in_user(&up->m.fd, &kp->m.fd))
+ 				return -EFAULT;
+ 			break;
+ 		}
+@@ -537,7 +637,7 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
+ struct v4l2_framebuffer32 {
+ 	__u32			capability;
+ 	__u32			flags;
+-	compat_caddr_t 		base;
++	compat_caddr_t		base;
+ 	struct {
+ 		__u32		width;
+ 		__u32		height;
+@@ -550,30 +650,33 @@ struct v4l2_framebuffer32 {
+ 	} fmt;
+ };
+ 
+-static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
++static int get_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
++				  struct v4l2_framebuffer32 __user *up)
+ {
+-	u32 tmp;
+-
+-	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_framebuffer32)) ||
+-		get_user(tmp, &up->base) ||
+-		get_user(kp->capability, &up->capability) ||
+-		get_user(kp->flags, &up->flags) ||
+-		copy_from_user(&kp->fmt, &up->fmt, sizeof(up->fmt)))
+-			return -EFAULT;
+-	kp->base = (__force void *)compat_ptr(tmp);
++	compat_caddr_t tmp;
++
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
++	    get_user(tmp, &up->base) ||
++	    put_user((__force void *)compat_ptr(tmp), &kp->base) ||
++	    assign_in_user(&kp->capability, &up->capability) ||
++	    assign_in_user(&kp->flags, &up->flags) ||
++	    copy_in_user(&kp->fmt, &up->fmt, sizeof(kp->fmt)))
++		return -EFAULT;
+ 	return 0;
+ }
+ 
+-static int put_v4l2_framebuffer32(struct v4l2_framebuffer *kp, struct v4l2_framebuffer32 __user *up)
++static int put_v4l2_framebuffer32(struct v4l2_framebuffer __user *kp,
++				  struct v4l2_framebuffer32 __user *up)
+ {
+-	u32 tmp = (u32)((unsigned long)kp->base);
+-
+-	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_framebuffer32)) ||
+-		put_user(tmp, &up->base) ||
+-		put_user(kp->capability, &up->capability) ||
+-		put_user(kp->flags, &up->flags) ||
+-		copy_to_user(&up->fmt, &kp->fmt, sizeof(up->fmt)))
+-			return -EFAULT;
++	void *base;
++
++	if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
++	    get_user(base, &kp->base) ||
++	    put_user(ptr_to_compat(base), &up->base) ||
++	    assign_in_user(&up->capability, &kp->capability) ||
++	    assign_in_user(&up->flags, &kp->flags) ||
++	    copy_in_user(&up->fmt, &kp->fmt, sizeof(kp->fmt)))
++		return -EFAULT;
+ 	return 0;
+ }
+ 
+@@ -585,31 +688,36 @@ struct v4l2_input32 {
+ 	__u32        tuner;             /*  Associated tuner */
+ 	v4l2_std_id  std;
+ 	__u32	     status;
+-	__u32	     reserved[4];
+-} __attribute__ ((packed));
++	__u32	     capabilities;
++	__u32	     reserved[3];
++};
+ 
+-/* The 64-bit v4l2_input struct has extra padding at the end of the struct.
+-   Otherwise it is identical to the 32-bit version. */
+-static inline int get_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
++/*
++ * The 64-bit v4l2_input struct has extra padding at the end of the struct.
++ * Otherwise it is identical to the 32-bit version.
++ */
++static inline int get_v4l2_input32(struct v4l2_input __user *kp,
++				   struct v4l2_input32 __user *up)
+ {
+-	if (copy_from_user(kp, up, sizeof(struct v4l2_input32)))
++	if (copy_in_user(kp, up, sizeof(*up)))
+ 		return -EFAULT;
+ 	return 0;
+ }
+ 
+-static inline int put_v4l2_input32(struct v4l2_input *kp, struct v4l2_input32 __user *up)
++static inline int put_v4l2_input32(struct v4l2_input __user *kp,
++				   struct v4l2_input32 __user *up)
+ {
+-	if (copy_to_user(up, kp, sizeof(struct v4l2_input32)))
++	if (copy_in_user(up, kp, sizeof(*up)))
+ 		return -EFAULT;
+ 	return 0;
+ }
+ 
+ struct v4l2_ext_controls32 {
+-       __u32 ctrl_class;
+-       __u32 count;
+-       __u32 error_idx;
+-       __u32 reserved[2];
+-       compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
++	__u32 ctrl_class;
++	__u32 count;
++	__u32 error_idx;
++	__u32 reserved[2];
++	compat_caddr_t controls; /* actually struct v4l2_ext_control32 * */
+ };
+ 
+ struct v4l2_ext_control32 {
+@@ -623,57 +731,95 @@ struct v4l2_ext_control32 {
+ 	};
+ } __attribute__ ((packed));
+ 
+-/* The following function really belong in v4l2-common, but that causes
+-   a circular dependency between modules. We need to think about this, but
+-   for now this will do. */
+-
+-/* Return non-zero if this control is a pointer type. Currently only
+-   type STRING is a pointer type. */
+-static inline int ctrl_is_pointer(u32 id)
++/* Return true if this control is a pointer type. */
++static inline bool ctrl_is_pointer(struct file *file, u32 id)
+ {
+-	switch (id) {
+-	case V4L2_CID_RDS_TX_PS_NAME:
+-	case V4L2_CID_RDS_TX_RADIO_TEXT:
+-		return 1;
+-	default:
+-		return 0;
++	struct video_device *vdev = video_devdata(file);
++	struct v4l2_fh *fh = NULL;
++	struct v4l2_ctrl_handler *hdl = NULL;
++	struct v4l2_query_ext_ctrl qec = { id };
++	const struct v4l2_ioctl_ops *ops = vdev->ioctl_ops;
++
++	if (test_bit(V4L2_FL_USES_V4L2_FH, &vdev->flags))
++		fh = file->private_data;
++
++	if (fh && fh->ctrl_handler)
++		hdl = fh->ctrl_handler;
++	else if (vdev->ctrl_handler)
++		hdl = vdev->ctrl_handler;
++
++	if (hdl) {
++		struct v4l2_ctrl *ctrl = v4l2_ctrl_find(hdl, id);
++
++		return ctrl && ctrl->is_ptr;
+ 	}
++
++	if (!ops || !ops->vidioc_query_ext_ctrl)
++		return false;
++
++	return !ops->vidioc_query_ext_ctrl(file, fh, &qec) &&
++		(qec.flags & V4L2_CTRL_FLAG_HAS_PAYLOAD);
++}
++
++static int bufsize_v4l2_ext_controls(struct v4l2_ext_controls32 __user *up,
++				     u32 *size)
++{
++	u32 count;
++
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
++	    get_user(count, &up->count))
++		return -EFAULT;
++	if (count > V4L2_CID_MAX_CTRLS)
++		return -EINVAL;
++	*size = count * sizeof(struct v4l2_ext_control);
++	return 0;
+ }
+ 
+-static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
++static int get_v4l2_ext_controls32(struct file *file,
++				   struct v4l2_ext_controls __user *kp,
++				   struct v4l2_ext_controls32 __user *up,
++				   void __user *aux_buf, u32 aux_space)
+ {
+ 	struct v4l2_ext_control32 __user *ucontrols;
+ 	struct v4l2_ext_control __user *kcontrols;
+-	int n;
++	u32 count;
++	u32 n;
+ 	compat_caddr_t p;
+ 
+-	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_ext_controls32)) ||
+-		get_user(kp->ctrl_class, &up->ctrl_class) ||
+-		get_user(kp->count, &up->count) ||
+-		get_user(kp->error_idx, &up->error_idx) ||
+-		copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+-			return -EFAULT;
+-	n = kp->count;
+-	if (n == 0) {
+-		kp->controls = NULL;
+-		return 0;
+-	}
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
++	    assign_in_user(&kp->ctrl_class, &up->ctrl_class) ||
++	    get_user(count, &up->count) ||
++	    put_user(count, &kp->count) ||
++	    assign_in_user(&kp->error_idx, &up->error_idx) ||
++	    copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
++		return -EFAULT;
++
++	if (count == 0)
++		return put_user(NULL, &kp->controls);
++	if (count > V4L2_CID_MAX_CTRLS)
++		return -EINVAL;
+ 	if (get_user(p, &up->controls))
+ 		return -EFAULT;
+ 	ucontrols = compat_ptr(p);
+-	if (!access_ok(VERIFY_READ, ucontrols,
+-			n * sizeof(struct v4l2_ext_control32)))
++	if (!access_ok(VERIFY_READ, ucontrols, count * sizeof(*ucontrols)))
+ 		return -EFAULT;
+-	kcontrols = compat_alloc_user_space(n * sizeof(struct v4l2_ext_control));
+-	kp->controls = (__force struct v4l2_ext_control *)kcontrols;
+-	while (--n >= 0) {
++	if (aux_space < count * sizeof(*kcontrols))
++		return -EFAULT;
++	kcontrols = aux_buf;
++	if (put_user((__force struct v4l2_ext_control *)kcontrols,
++		     &kp->controls))
++		return -EFAULT;
++
++	for (n = 0; n < count; n++) {
+ 		u32 id;
+ 
+ 		if (copy_in_user(kcontrols, ucontrols, sizeof(*ucontrols)))
+ 			return -EFAULT;
++
+ 		if (get_user(id, &kcontrols->id))
+ 			return -EFAULT;
+-		if (ctrl_is_pointer(id)) {
++
++		if (ctrl_is_pointer(file, id)) {
+ 			void __user *s;
+ 
+ 			if (get_user(p, &ucontrols->string))
+@@ -688,43 +834,55 @@ static int get_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext
+ 	return 0;
+ }
+ 
+-static int put_v4l2_ext_controls32(struct v4l2_ext_controls *kp, struct v4l2_ext_controls32 __user *up)
++static int put_v4l2_ext_controls32(struct file *file,
++				   struct v4l2_ext_controls __user *kp,
++				   struct v4l2_ext_controls32 __user *up)
+ {
+ 	struct v4l2_ext_control32 __user *ucontrols;
+-	struct v4l2_ext_control __user *kcontrols =
+-		(__force struct v4l2_ext_control __user *)kp->controls;
+-	int n = kp->count;
++	struct v4l2_ext_control __user *kcontrols;
++	u32 count;
++	u32 n;
+ 	compat_caddr_t p;
+ 
+-	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_ext_controls32)) ||
+-		put_user(kp->ctrl_class, &up->ctrl_class) ||
+-		put_user(kp->count, &up->count) ||
+-		put_user(kp->error_idx, &up->error_idx) ||
+-		copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+-			return -EFAULT;
+-	if (!kp->count)
+-		return 0;
++	if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
++	    assign_in_user(&up->ctrl_class, &kp->ctrl_class) ||
++	    get_user(count, &kp->count) ||
++	    put_user(count, &up->count) ||
++	    assign_in_user(&up->error_idx, &kp->error_idx) ||
++	    copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)) ||
++	    get_user(kcontrols, &kp->controls))
++		return -EFAULT;
+ 
++	if (!count)
++		return 0;
+ 	if (get_user(p, &up->controls))
+ 		return -EFAULT;
+ 	ucontrols = compat_ptr(p);
+-	if (!access_ok(VERIFY_WRITE, ucontrols,
+-			n * sizeof(struct v4l2_ext_control32)))
++	if (!access_ok(VERIFY_WRITE, ucontrols, count * sizeof(*ucontrols)))
+ 		return -EFAULT;
+ 
+-	while (--n >= 0) {
+-		unsigned size = sizeof(*ucontrols);
++	for (n = 0; n < count; n++) {
++		unsigned int size = sizeof(*ucontrols);
+ 		u32 id;
+ 
+-		if (get_user(id, &kcontrols->id))
++		if (get_user(id, &kcontrols->id) ||
++		    put_user(id, &ucontrols->id) ||
++		    assign_in_user(&ucontrols->size, &kcontrols->size) ||
++		    copy_in_user(&ucontrols->reserved2, &kcontrols->reserved2,
++				 sizeof(ucontrols->reserved2)))
+ 			return -EFAULT;
+-		/* Do not modify the pointer when copying a pointer control.
+-		   The contents of the pointer was changed, not the pointer
+-		   itself. */
+-		if (ctrl_is_pointer(id))
++
++		/*
++		 * Do not modify the pointer when copying a pointer control.
++		 * The contents of the pointer was changed, not the pointer
++		 * itself.
++		 */
++		if (ctrl_is_pointer(file, id))
+ 			size -= sizeof(ucontrols->value64);
++
+ 		if (copy_in_user(ucontrols, kcontrols, size))
+ 			return -EFAULT;
++
+ 		ucontrols++;
+ 		kcontrols++;
+ 	}
+@@ -743,18 +901,19 @@ struct v4l2_event32 {
+ 	__u32				reserved[8];
+ };
+ 
+-static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *up)
++static int put_v4l2_event32(struct v4l2_event __user *kp,
++			    struct v4l2_event32 __user *up)
+ {
+-	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_event32)) ||
+-		put_user(kp->type, &up->type) ||
+-		copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
+-		put_user(kp->pending, &up->pending) ||
+-		put_user(kp->sequence, &up->sequence) ||
+-		put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
+-		put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
+-		put_user(kp->id, &up->id) ||
+-		copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
+-			return -EFAULT;
++	if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
++	    assign_in_user(&up->type, &kp->type) ||
++	    copy_in_user(&up->u, &kp->u, sizeof(kp->u)) ||
++	    assign_in_user(&up->pending, &kp->pending) ||
++	    assign_in_user(&up->sequence, &kp->sequence) ||
++	    assign_in_user(&up->timestamp.tv_sec, &kp->timestamp.tv_sec) ||
++	    assign_in_user(&up->timestamp.tv_nsec, &kp->timestamp.tv_nsec) ||
++	    assign_in_user(&up->id, &kp->id) ||
++	    copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
++		return -EFAULT;
+ 	return 0;
+ }
+ 
+@@ -766,32 +925,35 @@ struct v4l2_edid32 {
+ 	compat_caddr_t edid;
+ };
+ 
+-static int get_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
++static int get_v4l2_edid32(struct v4l2_edid __user *kp,
++			   struct v4l2_edid32 __user *up)
+ {
+-	u32 tmp;
+-
+-	if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_edid32)) ||
+-		get_user(kp->pad, &up->pad) ||
+-		get_user(kp->start_block, &up->start_block) ||
+-		get_user(kp->blocks, &up->blocks) ||
+-		get_user(tmp, &up->edid) ||
+-		copy_from_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
+-			return -EFAULT;
+-	kp->edid = (__force u8 *)compat_ptr(tmp);
++	compat_uptr_t tmp;
++
++	if (!access_ok(VERIFY_READ, up, sizeof(*up)) ||
++	    assign_in_user(&kp->pad, &up->pad) ||
++	    assign_in_user(&kp->start_block, &up->start_block) ||
++	    assign_in_user(&kp->blocks, &up->blocks) ||
++	    get_user(tmp, &up->edid) ||
++	    put_user(compat_ptr(tmp), &kp->edid) ||
++	    copy_in_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
++		return -EFAULT;
+ 	return 0;
+ }
+ 
+-static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
++static int put_v4l2_edid32(struct v4l2_edid __user *kp,
++			   struct v4l2_edid32 __user *up)
+ {
+-	u32 tmp = (u32)((unsigned long)kp->edid);
+-
+-	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_edid32)) ||
+-		put_user(kp->pad, &up->pad) ||
+-		put_user(kp->start_block, &up->start_block) ||
+-		put_user(kp->blocks, &up->blocks) ||
+-		put_user(tmp, &up->edid) ||
+-		copy_to_user(up->reserved, kp->reserved, sizeof(up->reserved)))
+-			return -EFAULT;
++	void *edid;
++
++	if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) ||
++	    assign_in_user(&up->pad, &kp->pad) ||
++	    assign_in_user(&up->start_block, &kp->start_block) ||
++	    assign_in_user(&up->blocks, &kp->blocks) ||
++	    get_user(edid, &kp->edid) ||
++	    put_user(ptr_to_compat(edid), &up->edid) ||
++	    copy_in_user(up->reserved, kp->reserved, sizeof(up->reserved)))
++		return -EFAULT;
+ 	return 0;
+ }
+ 
+@@ -807,7 +969,7 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+ #define VIDIOC_ENUMINPUT32	_IOWR('V', 26, struct v4l2_input32)
+ #define VIDIOC_G_EDID32		_IOWR('V', 40, struct v4l2_edid32)
+ #define VIDIOC_S_EDID32		_IOWR('V', 41, struct v4l2_edid32)
+-#define VIDIOC_TRY_FMT32      	_IOWR('V', 64, struct v4l2_format32)
++#define VIDIOC_TRY_FMT32	_IOWR('V', 64, struct v4l2_format32)
+ #define VIDIOC_G_EXT_CTRLS32    _IOWR('V', 71, struct v4l2_ext_controls32)
+ #define VIDIOC_S_EXT_CTRLS32    _IOWR('V', 72, struct v4l2_ext_controls32)
+ #define VIDIOC_TRY_EXT_CTRLS32  _IOWR('V', 73, struct v4l2_ext_controls32)
+@@ -823,22 +985,23 @@ static int put_v4l2_edid32(struct v4l2_edid *kp, struct v4l2_edid32 __user *up)
+ #define VIDIOC_G_OUTPUT32	_IOR ('V', 46, s32)
+ #define VIDIOC_S_OUTPUT32	_IOWR('V', 47, s32)
+ 
++static int alloc_userspace(unsigned int size, u32 aux_space,
++			   void __user **up_native)
++{
++	*up_native = compat_alloc_user_space(size + aux_space);
++	if (!*up_native)
++		return -ENOMEM;
++	if (clear_user(*up_native, size))
++		return -EFAULT;
++	return 0;
++}
++
+ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+-	union {
+-		struct v4l2_format v2f;
+-		struct v4l2_buffer v2b;
+-		struct v4l2_framebuffer v2fb;
+-		struct v4l2_input v2i;
+-		struct v4l2_standard v2s;
+-		struct v4l2_ext_controls v2ecs;
+-		struct v4l2_event v2ev;
+-		struct v4l2_create_buffers v2crt;
+-		struct v4l2_edid v2edid;
+-		unsigned long vx;
+-		int vi;
+-	} karg;
+ 	void __user *up = compat_ptr(arg);
++	void __user *up_native = NULL;
++	void __user *aux_buf;
++	u32 aux_space;
+ 	int compatible_arg = 1;
+ 	long err = 0;
+ 
+@@ -877,30 +1040,52 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
+ 	case VIDIOC_STREAMOFF:
+ 	case VIDIOC_S_INPUT:
+ 	case VIDIOC_S_OUTPUT:
+-		err = get_user(karg.vi, (s32 __user *)up);
++		err = alloc_userspace(sizeof(unsigned int), 0, &up_native);
++		if (!err && assign_in_user((unsigned int __user *)up_native,
++					   (compat_uint_t __user *)up))
++			err = -EFAULT;
+ 		compatible_arg = 0;
+ 		break;
+ 
+ 	case VIDIOC_G_INPUT:
+ 	case VIDIOC_G_OUTPUT:
++		err = alloc_userspace(sizeof(unsigned int), 0, &up_native);
+ 		compatible_arg = 0;
+ 		break;
+ 
+ 	case VIDIOC_G_EDID:
+ 	case VIDIOC_S_EDID:
+-		err = get_v4l2_edid32(&karg.v2edid, up);
++		err = alloc_userspace(sizeof(struct v4l2_edid), 0, &up_native);
++		if (!err)
++			err = get_v4l2_edid32(up_native, up);
+ 		compatible_arg = 0;
+ 		break;
+ 
+ 	case VIDIOC_G_FMT:
+ 	case VIDIOC_S_FMT:
+ 	case VIDIOC_TRY_FMT:
+-		err = get_v4l2_format32(&karg.v2f, up);
++		err = bufsize_v4l2_format(up, &aux_space);
++		if (!err)
++			err = alloc_userspace(sizeof(struct v4l2_format),
++					      aux_space, &up_native);
++		if (!err) {
++			aux_buf = up_native + sizeof(struct v4l2_format);
++			err = get_v4l2_format32(up_native, up,
++						aux_buf, aux_space);
++		}
+ 		compatible_arg = 0;
+ 		break;
+ 
+ 	case VIDIOC_CREATE_BUFS:
+-		err = get_v4l2_create32(&karg.v2crt, up);
++		err = bufsize_v4l2_create(up, &aux_space);
++		if (!err)
++			err = alloc_userspace(sizeof(struct v4l2_create_buffers),
++					      aux_space, &up_native);
++		if (!err) {
++			aux_buf = up_native + sizeof(struct v4l2_create_buffers);
++			err = get_v4l2_create32(up_native, up,
++						aux_buf, aux_space);
++		}
+ 		compatible_arg = 0;
+ 		break;
+ 
+@@ -908,36 +1093,63 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
+ 	case VIDIOC_QUERYBUF:
+ 	case VIDIOC_QBUF:
+ 	case VIDIOC_DQBUF:
+-		err = get_v4l2_buffer32(&karg.v2b, up);
++		err = bufsize_v4l2_buffer(up, &aux_space);
++		if (!err)
++			err = alloc_userspace(sizeof(struct v4l2_buffer),
++					      aux_space, &up_native);
++		if (!err) {
++			aux_buf = up_native + sizeof(struct v4l2_buffer);
++			err = get_v4l2_buffer32(up_native, up,
++						aux_buf, aux_space);
++		}
+ 		compatible_arg = 0;
+ 		break;
+ 
+ 	case VIDIOC_S_FBUF:
+-		err = get_v4l2_framebuffer32(&karg.v2fb, up);
++		err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
++				      &up_native);
++		if (!err)
++			err = get_v4l2_framebuffer32(up_native, up);
+ 		compatible_arg = 0;
+ 		break;
+ 
+ 	case VIDIOC_G_FBUF:
++		err = alloc_userspace(sizeof(struct v4l2_framebuffer), 0,
++				      &up_native);
+ 		compatible_arg = 0;
+ 		break;
+ 
+ 	case VIDIOC_ENUMSTD:
+-		err = get_v4l2_standard32(&karg.v2s, up);
++		err = alloc_userspace(sizeof(struct v4l2_standard), 0,
++				      &up_native);
++		if (!err)
++			err = get_v4l2_standard32(up_native, up);
+ 		compatible_arg = 0;
+ 		break;
+ 
+ 	case VIDIOC_ENUMINPUT:
+-		err = get_v4l2_input32(&karg.v2i, up);
++		err = alloc_userspace(sizeof(struct v4l2_input), 0, &up_native);
++		if (!err)
++			err = get_v4l2_input32(up_native, up);
+ 		compatible_arg = 0;
+ 		break;
+ 
+ 	case VIDIOC_G_EXT_CTRLS:
+ 	case VIDIOC_S_EXT_CTRLS:
+ 	case VIDIOC_TRY_EXT_CTRLS:
+-		err = get_v4l2_ext_controls32(&karg.v2ecs, up);
++		err = bufsize_v4l2_ext_controls(up, &aux_space);
++		if (!err)
++			err = alloc_userspace(sizeof(struct v4l2_ext_controls),
++					      aux_space, &up_native);
++		if (!err) {
++			aux_buf = up_native + sizeof(struct v4l2_ext_controls);
++			err = get_v4l2_ext_controls32(file, up_native, up,
++						      aux_buf, aux_space);
++		}
+ 		compatible_arg = 0;
+ 		break;
+ 	case VIDIOC_DQEVENT:
++		err = alloc_userspace(sizeof(struct v4l2_event), 0, &up_native);
+ 		compatible_arg = 0;
+ 		break;
+ 	}
+@@ -946,22 +1158,26 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
+ 
+ 	if (compatible_arg)
+ 		err = native_ioctl(file, cmd, (unsigned long)up);
+-	else {
+-		mm_segment_t old_fs = get_fs();
++	else
++		err = native_ioctl(file, cmd, (unsigned long)up_native);
+ 
+-		set_fs(KERNEL_DS);
+-		err = native_ioctl(file, cmd, (unsigned long)&karg);
+-		set_fs(old_fs);
+-	}
++	if (err == -ENOTTY)
++		return err;
+ 
+-	/* Special case: even after an error we need to put the
+-	   results back for these ioctls since the error_idx will
+-	   contain information on which control failed. */
++	/*
++	 * Special case: even after an error we need to put the
++	 * results back for these ioctls since the error_idx will
++	 * contain information on which control failed.
++	 */
+ 	switch (cmd) {
+ 	case VIDIOC_G_EXT_CTRLS:
+ 	case VIDIOC_S_EXT_CTRLS:
+ 	case VIDIOC_TRY_EXT_CTRLS:
+-		if (put_v4l2_ext_controls32(&karg.v2ecs, up))
++		if (put_v4l2_ext_controls32(file, up_native, up))
++			err = -EFAULT;
++		break;
++	case VIDIOC_S_EDID:
++		if (put_v4l2_edid32(up_native, up))
+ 			err = -EFAULT;
+ 		break;
+ 	}
+@@ -973,44 +1189,46 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar
+ 	case VIDIOC_S_OUTPUT:
+ 	case VIDIOC_G_INPUT:
+ 	case VIDIOC_G_OUTPUT:
+-		err = put_user(((s32)karg.vi), (s32 __user *)up);
++		if (assign_in_user((compat_uint_t __user *)up,
++				   ((unsigned int __user *)up_native)))
++			err = -EFAULT;
+ 		break;
+ 
+ 	case VIDIOC_G_FBUF:
+-		err = put_v4l2_framebuffer32(&karg.v2fb, up);
++		err = put_v4l2_framebuffer32(up_native, up);
+ 		break;
+ 
+ 	case VIDIOC_DQEVENT:
+-		err = put_v4l2_event32(&karg.v2ev, up);
++		err = put_v4l2_event32(up_native, up);
+ 		break;
+ 
+ 	case VIDIOC_G_EDID:
+-	case VIDIOC_S_EDID:
+-		err = put_v4l2_edid32(&karg.v2edid, up);
++		err = put_v4l2_edid32(up_native, up);
+ 		break;
+ 
+ 	case VIDIOC_G_FMT:
+ 	case VIDIOC_S_FMT:
+ 	case VIDIOC_TRY_FMT:
+-		err = put_v4l2_format32(&karg.v2f, up);
++		err = put_v4l2_format32(up_native, up);
+ 		break;
+ 
+ 	case VIDIOC_CREATE_BUFS:
+-		err = put_v4l2_create32(&karg.v2crt, up);
++		err = put_v4l2_create32(up_native, up);
+ 		break;
+ 
++	case VIDIOC_PREPARE_BUF:
+ 	case VIDIOC_QUERYBUF:
+ 	case VIDIOC_QBUF:
+ 	case VIDIOC_DQBUF:
+-		err = put_v4l2_buffer32(&karg.v2b, up);
++		err = put_v4l2_buffer32(up_native, up);
+ 		break;
+ 
+ 	case VIDIOC_ENUMSTD:
+-		err = put_v4l2_standard32(&karg.v2s, up);
++		err = put_v4l2_standard32(up_native, up);
+ 		break;
+ 
+ 	case VIDIOC_ENUMINPUT:
+-		err = put_v4l2_input32(&karg.v2i, up);
++		err = put_v4l2_input32(up_native, up);
+ 		break;
+ 	}
+ 	return err;
+diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
+index aa407cb5f830..7004477e7ffc 100644
+--- a/drivers/media/v4l2-core/v4l2-ioctl.c
++++ b/drivers/media/v4l2-core/v4l2-ioctl.c
+@@ -2552,8 +2552,11 @@ video_usercopy(struct file *file, unsigned int cmd, unsigned long arg,
+ 
+ 	/* Handles IOCTL */
+ 	err = func(file, cmd, parg);
+-	if (err == -ENOIOCTLCMD)
++	if (err == -ENOTTY || err == -ENOIOCTLCMD) {
+ 		err = -ENOTTY;
++		goto out;
++	}
++
+ 	if (err == 0) {
+ 		if (cmd == VIDIOC_DQBUF)
+ 			trace_v4l2_dqbuf(video_devdata(file)->minor, parg);
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index fd9b252e2b34..079ee4ae9436 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -2119,6 +2119,11 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool n
+ 	dprintk(1, "dqbuf of buffer %d, with state %d\n",
+ 			vb->v4l2_buf.index, vb->state);
+ 
++	/*
++	 * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
++	 * cleared.
++	 */
++	b->flags &= ~V4L2_BUF_FLAG_DONE;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
+index 187f83629f7e..b1c5f02c3cab 100644
+--- a/drivers/message/fusion/mptbase.c
++++ b/drivers/message/fusion/mptbase.c
+@@ -6872,6 +6872,7 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
+ 	*size = y;
+ }
+ 
++#ifdef CONFIG_PROC_FS
+ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int showlan)
+ {
+ 	char expVer[32];
+@@ -6903,6 +6904,7 @@ static void seq_mpt_print_ioc_summary(MPT_ADAPTER *ioc, struct seq_file *m, int
+ 
+ 	seq_putc(m, '\n');
+ }
++#endif
+ 
+ /**
+  *	mpt_set_taskmgmt_in_progress_flag - set flags associated with task management
+diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
+index 0a1606480023..cc832d309599 100644
+--- a/drivers/mfd/twl4030-audio.c
++++ b/drivers/mfd/twl4030-audio.c
+@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
+ EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
+ 
+ static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
+-			      struct device_node *node)
++			      struct device_node *parent)
+ {
++	struct device_node *node;
++
+ 	if (pdata && pdata->codec)
+ 		return true;
+ 
+-	if (of_find_node_by_name(node, "codec"))
++	node = of_get_child_by_name(parent, "codec");
++	if (node) {
++		of_node_put(node);
+ 		return true;
++	}
+ 
+ 	return false;
+ }
+diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
+index 6aacd205a774..aec1ab253f7f 100644
+--- a/drivers/mfd/twl6040.c
++++ b/drivers/mfd/twl6040.c
+@@ -97,12 +97,16 @@ static struct reg_default twl6040_patch[] = {
+ };
+ 
+ 
+-static bool twl6040_has_vibra(struct device_node *node)
++static bool twl6040_has_vibra(struct device_node *parent)
+ {
+-#ifdef CONFIG_OF
+-	if (of_find_node_by_name(node, "vibra"))
++	struct device_node *node;
++
++	node = of_get_child_by_name(parent, "vibra");
++	if (node) {
++		of_node_put(node);
+ 		return true;
+-#endif
++	}
++
+ 	return false;
+ }
+ 
+diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
+index 6bc1f94333a5..e956231de30f 100644
+--- a/drivers/mtd/chips/Kconfig
++++ b/drivers/mtd/chips/Kconfig
+@@ -66,6 +66,10 @@ endchoice
+ config MTD_CFI_GEOMETRY
+ 	bool "Specific CFI Flash geometry selection"
+ 	depends on MTD_CFI_ADV_OPTIONS
++	select MTD_MAP_BANK_WIDTH_1 if  !(MTD_MAP_BANK_WIDTH_2 || \
++		 MTD_MAP_BANK_WIDTH_4  || MTD_MAP_BANK_WIDTH_8 || \
++		 MTD_MAP_BANK_WIDTH_16 || MTD_MAP_BANK_WIDTH_32)
++	select MTD_CFI_I1 if !(MTD_CFI_I2 || MTD_CFI_I4 || MTD_CFI_I8)
+ 	help
+ 	  This option does not affect the code directly, but will enable
+ 	  some other configuration options which would allow you to reduce
+diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c
+index 0455166f05fa..4f206a99164c 100644
+--- a/drivers/mtd/maps/ck804xrom.c
++++ b/drivers/mtd/maps/ck804xrom.c
+@@ -112,8 +112,8 @@ static void ck804xrom_cleanup(struct ck804xrom_window *window)
+ }
+ 
+ 
+-static int ck804xrom_init_one(struct pci_dev *pdev,
+-			      const struct pci_device_id *ent)
++static int __init ck804xrom_init_one(struct pci_dev *pdev,
++				     const struct pci_device_id *ent)
+ {
+ 	static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ 	u8 byte;
+diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c
+index f784cf0caa13..a47b374b1b0c 100644
+--- a/drivers/mtd/maps/esb2rom.c
++++ b/drivers/mtd/maps/esb2rom.c
+@@ -144,8 +144,8 @@ static void esb2rom_cleanup(struct esb2rom_window *window)
+ 	pci_dev_put(window->pdev);
+ }
+ 
+-static int esb2rom_init_one(struct pci_dev *pdev,
+-			    const struct pci_device_id *ent)
++static int __init esb2rom_init_one(struct pci_dev *pdev,
++				   const struct pci_device_id *ent)
+ {
+ 	static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ 	struct esb2rom_window *window = &esb2rom_window;
+diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c
+index c7478e18f485..aa83e7b87cfe 100644
+--- a/drivers/mtd/maps/ichxrom.c
++++ b/drivers/mtd/maps/ichxrom.c
+@@ -57,10 +57,12 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
+ {
+ 	struct ichxrom_map_info *map, *scratch;
+ 	u16 word;
++	int ret;
+ 
+ 	/* Disable writes through the rom window */
+-	pci_read_config_word(window->pdev, BIOS_CNTL, &word);
+-	pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
++	ret = pci_read_config_word(window->pdev, BIOS_CNTL, &word);
++	if (!ret)
++		pci_write_config_word(window->pdev, BIOS_CNTL, word & ~1);
+ 	pci_dev_put(window->pdev);
+ 
+ 	/* Free all of the mtd devices */
+@@ -84,8 +86,8 @@ static void ichxrom_cleanup(struct ichxrom_window *window)
+ }
+ 
+ 
+-static int ichxrom_init_one(struct pci_dev *pdev,
+-			    const struct pci_device_id *ent)
++static int __init ichxrom_init_one(struct pci_dev *pdev,
++				   const struct pci_device_id *ent)
+ {
+ 	static char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
+ 	struct ichxrom_window *window = &ichxrom_window;
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index f84113fc7cb7..14a5f559e300 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -1889,6 +1889,7 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
+ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+ 			    struct mtd_oob_ops *ops)
+ {
++	unsigned int max_bitflips = 0;
+ 	int page, realpage, chipnr;
+ 	struct nand_chip *chip = mtd->priv;
+ 	struct mtd_ecc_stats stats;
+@@ -1949,6 +1950,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+ 				nand_wait_ready(mtd);
+ 		}
+ 
++		max_bitflips = max_t(unsigned int, max_bitflips, ret);
++
+ 		readlen -= len;
+ 		if (!readlen)
+ 			break;
+@@ -1974,7 +1977,7 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+ 	if (mtd->ecc_stats.failed - stats.failed)
+ 		return -EBADMSG;
+ 
+-	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
++	return max_bitflips;
+ }
+ 
+ /**
+diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
+index c3ce81c1a716..54cf6fce9877 100644
+--- a/drivers/mtd/nand/sh_flctl.c
++++ b/drivers/mtd/nand/sh_flctl.c
+@@ -160,7 +160,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
+ 
+ 	memset(&cfg, 0, sizeof(cfg));
+ 	cfg.direction = DMA_MEM_TO_DEV;
+-	cfg.dst_addr = (dma_addr_t)FLDTFIFO(flctl);
++	cfg.dst_addr = flctl->fifo;
+ 	cfg.src_addr = 0;
+ 	ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
+ 	if (ret < 0)
+@@ -176,7 +176,7 @@ static void flctl_setup_dma(struct sh_flctl *flctl)
+ 
+ 	cfg.direction = DMA_DEV_TO_MEM;
+ 	cfg.dst_addr = 0;
+-	cfg.src_addr = (dma_addr_t)FLDTFIFO(flctl);
++	cfg.src_addr = flctl->fifo;
+ 	ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
+ 	if (ret < 0)
+ 		goto err;
+@@ -1095,6 +1095,7 @@ static int flctl_probe(struct platform_device *pdev)
+ 	flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+ 	if (IS_ERR(flctl->reg))
+ 		return PTR_ERR(flctl->reg);
++	flctl->fifo = res->start + 0x24; /* FLDTFIFO */
+ 
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq < 0) {
+diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
+index 499b8e433d3d..1c8fdc3cec93 100644
+--- a/drivers/mtd/nand/sunxi_nand.c
++++ b/drivers/mtd/nand/sunxi_nand.c
+@@ -933,8 +933,14 @@ static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
+ 
+ 	/* Add ECC info retrieval from DT */
+ 	for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+-		if (ecc->strength <= strengths[i])
++		if (ecc->strength <= strengths[i]) {
++			/*
++			 * Update ecc->strength value with the actual strength
++			 * that will be used by the ECC engine.
++			 */
++			ecc->strength = strengths[i];
+ 			break;
++		}
+ 	}
+ 
+ 	if (i >= ARRAY_SIZE(strengths)) {
+diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
+index df51d6025a90..0eeb248a287e 100644
+--- a/drivers/net/Kconfig
++++ b/drivers/net/Kconfig
+@@ -374,6 +374,9 @@ config XEN_NETDEV_BACKEND
+ config VMXNET3
+ 	tristate "VMware VMXNET3 ethernet driver"
+ 	depends on PCI && INET
++	depends on !(PAGE_SIZE_64KB || ARM64_64K_PAGES || \
++		     IA64_PAGE_SIZE_64KB || MICROBLAZE_64K_PAGES || \
++		     PARISC_PAGE_SIZE_64KB || PPC_64K_PAGES)
+ 	help
+ 	  This driver supports VMware's vmxnet3 virtual ethernet NIC.
+ 	  To compile this driver as a module, choose M here: the
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index 3b850f390fd7..7e6f36a59f06 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -430,7 +430,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
+ 		dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
+ 			rc);
+ 
+-	return rc;
++	return (rc > 0) ? 0 : rc;
+ }
+ 
+ static void gs_usb_xmit_callback(struct urb *urb)
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+index ce44a033f63b..64cc86a82b2d 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+@@ -184,7 +184,7 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
+ 	void *cmd_head = pcan_usb_fd_cmd_buffer(dev);
+ 	int err = 0;
+ 	u8 *packet_ptr;
+-	int i, n = 1, packet_len;
++	int packet_len;
+ 	ptrdiff_t cmd_len;
+ 
+ 	/* usb device unregistered? */
+@@ -201,17 +201,13 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
+ 	}
+ 
+ 	packet_ptr = cmd_head;
++	packet_len = cmd_len;
+ 
+ 	/* firmware is not able to re-assemble 512 bytes buffer in full-speed */
+-	if ((dev->udev->speed != USB_SPEED_HIGH) &&
+-	    (cmd_len > PCAN_UFD_LOSPD_PKT_SIZE)) {
+-		packet_len = PCAN_UFD_LOSPD_PKT_SIZE;
+-		n += cmd_len / packet_len;
+-	} else {
+-		packet_len = cmd_len;
+-	}
++	if (unlikely(dev->udev->speed != USB_SPEED_HIGH))
++		packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE);
+ 
+-	for (i = 0; i < n; i++) {
++	do {
+ 		err = usb_bulk_msg(dev->udev,
+ 				   usb_sndbulkpipe(dev->udev,
+ 						   PCAN_USBPRO_EP_CMDOUT),
+@@ -224,7 +220,12 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
+ 		}
+ 
+ 		packet_ptr += packet_len;
+-	}
++		cmd_len -= packet_len;
++
++		if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE)
++			packet_len = cmd_len;
++
++	} while (packet_len > 0);
+ 
+ 	return err;
+ }
+diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
+index 4547a1b8b958..7677c745fb30 100644
+--- a/drivers/net/ethernet/3com/3c509.c
++++ b/drivers/net/ethernet/3com/3c509.c
+@@ -562,7 +562,7 @@ static void el3_common_remove (struct net_device *dev)
+ }
+ 
+ #ifdef CONFIG_EISA
+-static int __init el3_eisa_probe (struct device *device)
++static int el3_eisa_probe(struct device *device)
+ {
+ 	short i;
+ 	int ioaddr, irq, if_port;
+diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
+index 41095ebad97f..8a876e97597c 100644
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -907,7 +907,7 @@ static struct eisa_device_id vortex_eisa_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
+ 
+-static int __init vortex_eisa_probe(struct device *device)
++static int vortex_eisa_probe(struct device *device)
+ {
+ 	void __iomem *ioaddr;
+ 	struct eisa_device *edev;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index 714905384900..5feddde71f18 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -553,7 +553,7 @@ static int xgbe_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_PM
++#ifdef CONFIG_PM_SLEEP
+ static int xgbe_suspend(struct device *dev)
+ {
+ 	struct net_device *netdev = dev_get_drvdata(dev);
+@@ -591,7 +591,7 @@ static int xgbe_resume(struct device *dev)
+ 
+ 	return ret;
+ }
+-#endif /* CONFIG_PM */
++#endif /* CONFIG_PM_SLEEP */
+ 
+ #ifdef CONFIG_ACPI
+ static const struct acpi_device_id xgbe_acpi_match[] = {
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index f9713fe036ef..7b150085e34d 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -14228,7 +14228,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+ 	/* Reset PHY, otherwise the read DMA engine will be in a mode that
+ 	 * breaks all requests to 256 bytes.
+ 	 */
+-	if (tg3_asic_rev(tp) == ASIC_REV_57766)
++	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
++	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
++	    tg3_asic_rev(tp) == ASIC_REV_5719)
+ 		reset_phy = true;
+ 
+ 	err = tg3_restart_hw(tp, reset_phy);
+diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+index 68f3c13c9ef6..5be892ffdaed 100644
+--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
++++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+@@ -1948,13 +1948,13 @@ static void
+ bfa_ioc_send_enable(struct bfa_ioc *ioc)
+ {
+ 	struct bfi_ioc_ctrl_req enable_req;
+-	struct timeval tv;
+ 
+ 	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
+ 		    bfa_ioc_portid(ioc));
+ 	enable_req.clscode = htons(ioc->clscode);
+-	do_gettimeofday(&tv);
+-	enable_req.tv_sec = ntohl(tv.tv_sec);
++	enable_req.rsvd = htons(0);
++	/* overflow in 2106 */
++	enable_req.tv_sec = ntohl(ktime_get_real_seconds());
+ 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
+ }
+ 
+@@ -1965,6 +1965,10 @@ bfa_ioc_send_disable(struct bfa_ioc *ioc)
+ 
+ 	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
+ 		    bfa_ioc_portid(ioc));
++	disable_req.clscode = htons(ioc->clscode);
++	disable_req.rsvd = htons(0);
++	/* overflow in 2106 */
++	disable_req.tv_sec = ntohl(ktime_get_real_seconds());
+ 	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
+ }
+ 
+diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
+index badff181e719..37827819ae86 100644
+--- a/drivers/net/ethernet/dec/tulip/de4x5.c
++++ b/drivers/net/ethernet/dec/tulip/de4x5.c
+@@ -1990,7 +1990,7 @@ SetMulticastFilter(struct net_device *dev)
+ 
+ static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
+ 
+-static int __init de4x5_eisa_probe (struct device *gendev)
++static int de4x5_eisa_probe(struct device *gendev)
+ {
+ 	struct eisa_device *edev;
+ 	u_long iobase;
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 3ea651afa63d..6075ed694a6c 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -1413,9 +1413,11 @@ static int gfar_probe(struct platform_device *ofdev)
+ 
+ 	gfar_init_addr_hash_table(priv);
+ 
+-	/* Insert receive time stamps into padding alignment bytes */
++	/* Insert receive time stamps into padding alignment bytes, and
++	 * plus 2 bytes padding to ensure the cpu alignment.
++	 */
+ 	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+-		priv->padding = 8;
++		priv->padding = 8 + DEFAULT_PADDING;
+ 
+ 	if (dev->features & NETIF_F_IP_CSUM ||
+ 	    priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
+index ae6e30d39f0f..3daf2d4a7ca0 100644
+--- a/drivers/net/ethernet/hp/hp100.c
++++ b/drivers/net/ethernet/hp/hp100.c
+@@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = {
+ };
+ #endif
+ 
+-#ifdef CONFIG_EISA
+ static struct eisa_device_id hp100_eisa_tbl[] = {
+ 	{ "HWPF180" }, /* HP J2577 rev A */
+ 	{ "HWP1920" }, /* HP 27248B */
+@@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = {
+ 	{ "" }	       /* Mandatory final entry ! */
+ };
+ MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
+-#endif
+ 
+-#ifdef CONFIG_PCI
+ static const struct pci_device_id hp100_pci_tbl[] = {
+ 	{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
+ 	{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
+@@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = {
+ 	{}			/* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
+-#endif
+ 
+ static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
+ static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
+@@ -2842,8 +2838,7 @@ static void cleanup_dev(struct net_device *d)
+ 	free_netdev(d);
+ }
+ 
+-#ifdef CONFIG_EISA
+-static int __init hp100_eisa_probe (struct device *gendev)
++static int hp100_eisa_probe(struct device *gendev)
+ {
+ 	struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
+ 	struct eisa_device *edev = to_eisa_device(gendev);
+@@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = {
+ 		.remove  = hp100_eisa_remove,
+         }
+ };
+-#endif
+ 
+-#ifdef CONFIG_PCI
+ static int hp100_pci_probe(struct pci_dev *pdev,
+ 			   const struct pci_device_id *ent)
+ {
+@@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = {
+ 	.probe		= hp100_pci_probe,
+ 	.remove		= hp100_pci_remove,
+ };
+-#endif
+ 
+ /*
+  *  module section
+@@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void)
+ 	err = hp100_isa_init();
+ 	if (err && err != -ENODEV)
+ 		goto out;
+-#ifdef CONFIG_EISA
+ 	err = eisa_driver_register(&hp100_eisa_driver);
+ 	if (err && err != -ENODEV)
+ 		goto out2;
+-#endif
+-#ifdef CONFIG_PCI
+ 	err = pci_register_driver(&hp100_pci_driver);
+ 	if (err && err != -ENODEV)
+ 		goto out3;
+-#endif
+  out:
+ 	return err;
+  out3:
+-#ifdef CONFIG_EISA
+ 	eisa_driver_unregister (&hp100_eisa_driver);
+  out2:
+-#endif
+ 	hp100_isa_cleanup();
+ 	goto out;
+ }
+@@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void)
+ static void __exit hp100_module_exit(void)
+ {
+ 	hp100_isa_cleanup();
+-#ifdef CONFIG_EISA
+ 	eisa_driver_unregister (&hp100_eisa_driver);
+-#endif
+-#ifdef CONFIG_PCI
+ 	pci_unregister_driver (&hp100_pci_driver);
+-#endif
+ }
+ 
+ module_init(hp100_module_init)
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index e2498dbf3c3b..5e63a8931f2e 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -1345,6 +1345,9 @@ out:
+  *  Checks to see of the link status of the hardware has changed.  If a
+  *  change in link status has been detected, then we read the PHY registers
+  *  to get the current speed/duplex if link exists.
++ *
++ *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
++ *  up).
+  **/
+ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+ {
+@@ -1360,7 +1363,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+ 	 * Change or Rx Sequence Error interrupt.
+ 	 */
+ 	if (!mac->get_link_status)
+-		return 0;
++		return 1;
+ 
+ 	/* First we want to see if the MII Status Register reports
+ 	 * link.  If so, then we want to get the current speed/duplex
+@@ -1519,10 +1522,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+ 	 * different link partner.
+ 	 */
+ 	ret_val = e1000e_config_fc_after_link_up(hw);
+-	if (ret_val)
++	if (ret_val) {
+ 		e_dbg("Error configuring flow control\n");
++		return ret_val;
++	}
+ 
+-	return ret_val;
++	return 1;
+ }
+ 
+ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+index 5b08e6284a3c..8d8fd5d49d60 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+@@ -126,6 +126,9 @@ process_mbx:
+ 		struct fm10k_mbx_info *mbx = &vf_info->mbx;
+ 		u16 glort = vf_info->glort;
+ 
++		/* process the SM mailbox first to drain outgoing messages */
++		hw->mbx.ops.process(hw, &hw->mbx);
++
+ 		/* verify port mapping is valid, if not reset port */
+ 		if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
+ 			hw->iov.ops.reset_lport(hw, vf_info);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 13e0cf90e567..042f3486f79a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3894,8 +3894,12 @@ static void i40e_napi_enable_all(struct i40e_vsi *vsi)
+ 	if (!vsi->netdev)
+ 		return;
+ 
+-	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+-		napi_enable(&vsi->q_vectors[q_idx]->napi);
++	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
++		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
++
++		if (q_vector->rx.ring || q_vector->tx.ring)
++			napi_enable(&q_vector->napi);
++	}
+ }
+ 
+ /**
+@@ -3909,8 +3913,12 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
+ 	if (!vsi->netdev)
+ 		return;
+ 
+-	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
+-		napi_disable(&vsi->q_vectors[q_idx]->napi);
++	for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
++		struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
++
++		if (q_vector->rx.ring || q_vector->tx.ring)
++			napi_disable(&q_vector->napi);
++	}
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 34f15f56b2a1..cfcafea9d2b6 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -2998,6 +2998,8 @@ static int igb_sw_init(struct igb_adapter *adapter)
+ 	/* Setup and initialize a copy of the hw vlan table array */
+ 	adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
+ 				       GFP_ATOMIC);
++	if (!adapter->shadow_vfta)
++		return -ENOMEM;
+ 
+ 	/* This call may decrease the number of queues */
+ 	if (igb_init_interrupt_scheme(adapter, true)) {
+@@ -3167,7 +3169,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
+ 
+ static int igb_close(struct net_device *netdev)
+ {
+-	if (netif_device_present(netdev))
++	if (netif_device_present(netdev) || netdev->dismantle)
+ 		return __igb_close(netdev, false);
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+index 06d8f3cfa099..14f789e72c29 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+@@ -3609,10 +3609,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ 	fw_cmd.ver_build = build;
+ 	fw_cmd.ver_sub = sub;
+ 	fw_cmd.hdr.checksum = 0;
+-	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+-				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+ 	fw_cmd.pad = 0;
+ 	fw_cmd.pad2 = 0;
++	fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
++				(FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+ 
+ 	for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ 		ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+index cf5cf819a6b8..0e1e63ee6c5e 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+@@ -224,6 +224,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
+ 		/* convert offset from words to bytes */
+ 		buffer.address = cpu_to_be32((offset + current_word) * 2);
+ 		buffer.length = cpu_to_be16(words_to_read * 2);
++		buffer.pad2 = 0;
++		buffer.pad3 = 0;
+ 
+ 		status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ 						      sizeof(buffer),
+diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
+index fc2fb25343f4..c122b3b99cd8 100644
+--- a/drivers/net/ethernet/marvell/mvmdio.c
++++ b/drivers/net/ethernet/marvell/mvmdio.c
+@@ -241,7 +241,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
+ 			dev->regs + MVMDIO_ERR_INT_MASK);
+ 
+ 	} else if (dev->err_interrupt == -EPROBE_DEFER) {
+-		return -EPROBE_DEFER;
++		ret = -EPROBE_DEFER;
++		goto out_mdio;
+ 	}
+ 
+ 	mutex_init(&dev->lock);
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 40161dacc9c7..744277984cb8 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -858,6 +858,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
+ 	val &= ~MVNETA_GMAC0_PORT_ENABLE;
+ 	mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
+ 
++	pp->link = 0;
++	pp->duplex = -1;
++	pp->speed = 0;
++
+ 	udelay(200);
+ }
+ 
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index 3e8b1bfb1f2e..eda6b0a2faf4 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -5583,6 +5583,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
+ 	int id = port->id;
+ 	bool allmulti = dev->flags & IFF_ALLMULTI;
+ 
++retry:
+ 	mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
+ 	mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
+ 	mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
+@@ -5590,9 +5591,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
+ 	/* Remove all port->id's mcast enries */
+ 	mvpp2_prs_mcast_del_all(priv, id);
+ 
+-	if (allmulti && !netdev_mc_empty(dev)) {
+-		netdev_for_each_mc_addr(ha, dev)
+-			mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
++	if (!allmulti) {
++		netdev_for_each_mc_addr(ha, dev) {
++			if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
++				allmulti = true;
++				goto retry;
++			}
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
+index b75214a80d0e..eb1dcb7e9e96 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
++++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
+@@ -280,6 +280,9 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+ 	u64 in_param = 0;
+ 	int err;
+ 
++	if (!cnt)
++		return;
++
+ 	if (mlx4_is_mfunc(dev)) {
+ 		set_param_l(&in_param, base_qpn);
+ 		set_param_h(&in_param, cnt);
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+index dd618d7ed257..1c40c524f0c8 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+@@ -3825,7 +3825,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+ 	struct list_head *head = &mbx->cmd_q;
+ 	struct qlcnic_cmd_args *cmd = NULL;
+ 
+-	spin_lock(&mbx->queue_lock);
++	spin_lock_bh(&mbx->queue_lock);
+ 
+ 	while (!list_empty(head)) {
+ 		cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+@@ -3836,7 +3836,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+ 		qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+ 	}
+ 
+-	spin_unlock(&mbx->queue_lock);
++	spin_unlock_bh(&mbx->queue_lock);
+ }
+ 
+ static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+@@ -3872,12 +3872,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
+ {
+ 	struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ 
+-	spin_lock(&mbx->queue_lock);
++	spin_lock_bh(&mbx->queue_lock);
+ 
+ 	list_del(&cmd->list);
+ 	mbx->num_cmds--;
+ 
+-	spin_unlock(&mbx->queue_lock);
++	spin_unlock_bh(&mbx->queue_lock);
+ 
+ 	qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+ }
+@@ -3942,7 +3942,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+ 		init_completion(&cmd->completion);
+ 		cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+ 
+-		spin_lock(&mbx->queue_lock);
++		spin_lock_bh(&mbx->queue_lock);
+ 
+ 		list_add_tail(&cmd->list, &mbx->cmd_q);
+ 		mbx->num_cmds++;
+@@ -3950,7 +3950,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+ 		*timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+ 		queue_work(mbx->work_q, &mbx->work);
+ 
+-		spin_unlock(&mbx->queue_lock);
++		spin_unlock_bh(&mbx->queue_lock);
+ 
+ 		return 0;
+ 	}
+@@ -4046,15 +4046,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+ 		mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
+ 		spin_unlock_irqrestore(&mbx->aen_lock, flags);
+ 
+-		spin_lock(&mbx->queue_lock);
++		spin_lock_bh(&mbx->queue_lock);
+ 
+ 		if (list_empty(head)) {
+-			spin_unlock(&mbx->queue_lock);
++			spin_unlock_bh(&mbx->queue_lock);
+ 			return;
+ 		}
+ 		cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+ 
+-		spin_unlock(&mbx->queue_lock);
++		spin_unlock_bh(&mbx->queue_lock);
+ 
+ 		mbx_ops->encode_cmd(adapter, cmd);
+ 		mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index af4b1f4c24d2..8004de976890 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -1375,7 +1375,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
+ {
+ 	void __iomem *ioaddr = tp->mmio_addr;
+ 
+-	return RTL_R8(IBISR0) & 0x02;
++	return RTL_R8(IBISR0) & 0x20;
+ }
+ 
+ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
+@@ -1383,7 +1383,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
+ 	void __iomem *ioaddr = tp->mmio_addr;
+ 
+ 	RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
+-	rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
++	rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
+ 	RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
+ 	RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
+ }
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index e2dd94a91c15..0ae76e419482 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -3171,18 +3171,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
+ 	/* ioremap the TSU registers */
+ 	if (mdp->cd->tsu) {
+ 		struct resource *rtsu;
++
+ 		rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+-		mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
+-		if (IS_ERR(mdp->tsu_addr)) {
+-			ret = PTR_ERR(mdp->tsu_addr);
++		if (!rtsu) {
++			dev_err(&pdev->dev, "no TSU resource\n");
++			ret = -ENODEV;
++			goto out_release;
++		}
++		/* We can only request the  TSU region  for the first port
++		 * of the two  sharing this TSU for the probe to succeed...
++		 */
++		if (devno % 2 == 0 &&
++		    !devm_request_mem_region(&pdev->dev, rtsu->start,
++					     resource_size(rtsu),
++					     dev_name(&pdev->dev))) {
++			dev_err(&pdev->dev, "can't request TSU resource.\n");
++			ret = -EBUSY;
++			goto out_release;
++		}
++		mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
++					     resource_size(rtsu));
++		if (!mdp->tsu_addr) {
++			dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
++			ret = -ENOMEM;
+ 			goto out_release;
+ 		}
+ 		mdp->port = devno % 2;
+ 		ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
+ 	}
+ 
+-	/* initialize first or needed device */
+-	if (!devno || pd->needs_init) {
++	/* Need to init only the first port of the two sharing a TSU */
++	if (devno % 2 == 0) {
+ 		if (mdp->cd->chip_reset)
+ 			mdp->cd->chip_reset(ndev);
+ 
+diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
+index 691ec936e88d..a0f805142d42 100644
+--- a/drivers/net/ethernet/ti/tlan.c
++++ b/drivers/net/ethernet/ti/tlan.c
+@@ -610,8 +610,8 @@ err_out_regions:
+ #ifdef CONFIG_PCI
+ 	if (pdev)
+ 		pci_release_regions(pdev);
+-#endif
+ err_out:
++#endif
+ 	if (pdev)
+ 		pci_disable_device(pdev);
+ 	return rc;
+diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
+index 7b90a5eba099..9d6c252c1911 100644
+--- a/drivers/net/ethernet/xilinx/Kconfig
++++ b/drivers/net/ethernet/xilinx/Kconfig
+@@ -36,6 +36,7 @@ config XILINX_AXI_EMAC
+ config XILINX_LL_TEMAC
+ 	tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
+ 	depends on (PPC || MICROBLAZE)
++	depends on !64BIT || BROKEN
+ 	select PHYLIB
+ 	---help---
+ 	  This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
+diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
+index 95c0b45a68fb..313e006f74fe 100644
+--- a/drivers/net/hippi/rrunner.c
++++ b/drivers/net/hippi/rrunner.c
+@@ -1381,8 +1381,8 @@ static int rr_close(struct net_device *dev)
+ 			    rrpriv->info_dma);
+ 	rrpriv->info = NULL;
+ 
+-	free_irq(pdev->irq, dev);
+ 	spin_unlock_irqrestore(&rrpriv->lock, flags);
++	free_irq(pdev->irq, dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index c30b5c300c05..f3cd85ecd795 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -345,6 +345,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb)
+ 		.flowi4_oif = dev_get_iflink(dev),
+ 		.flowi4_tos = RT_TOS(ip4h->tos),
+ 		.flowi4_flags = FLOWI_FLAG_ANYSRC,
++		.flowi4_mark = skb->mark,
+ 		.daddr = ip4h->daddr,
+ 		.saddr = ip4h->saddr,
+ 	};
+diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
+index a0849f49bbec..c0192f97ecc8 100644
+--- a/drivers/net/irda/vlsi_ir.c
++++ b/drivers/net/irda/vlsi_ir.c
+@@ -418,8 +418,9 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
+ 		memset(rd, 0, sizeof(*rd));
+ 		rd->hw = hwmap + i;
+ 		rd->buf = kmalloc(len, GFP_KERNEL|GFP_DMA);
+-		if (rd->buf == NULL ||
+-		    !(busaddr = pci_map_single(pdev, rd->buf, len, dir))) {
++		if (rd->buf)
++			busaddr = pci_map_single(pdev, rd->buf, len, dir);
++		if (rd->buf == NULL || pci_dma_mapping_error(pdev, busaddr)) {
+ 			if (rd->buf) {
+ 				net_err_ratelimited("%s: failed to create PCI-MAP for %p\n",
+ 						    __func__, rd->buf);
+@@ -430,8 +431,7 @@ static struct vlsi_ring *vlsi_alloc_ring(struct pci_dev *pdev, struct ring_descr
+ 				rd = r->rd + j;
+ 				busaddr = rd_get_addr(rd);
+ 				rd_set_addr_status(rd, 0, 0);
+-				if (busaddr)
+-					pci_unmap_single(pdev, busaddr, len, dir);
++				pci_unmap_single(pdev, busaddr, len, dir);
+ 				kfree(rd->buf);
+ 				rd->buf = NULL;
+ 			}
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index fabf11d32d27..d4b8ea30cd9d 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -105,7 +105,7 @@ static int at803x_set_wol(struct phy_device *phydev,
+ 		mac = (const u8 *) ndev->dev_addr;
+ 
+ 		if (!is_valid_ether_addr(mac))
+-			return -EFAULT;
++			return -EINVAL;
+ 
+ 		for (i = 0; i < 3; i++) {
+ 			phy_write(phydev, AT803X_MMD_ACCESS_CONTROL,
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index d1c4bc1c4df0..31aa93907b77 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -860,6 +860,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
+ 	struct pppoe_hdr *ph;
+ 	struct net_device *dev;
+ 	char *start;
++	int hlen;
+ 
+ 	lock_sock(sk);
+ 	if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
+@@ -878,16 +879,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
+ 	if (total_len > (dev->mtu + dev->hard_header_len))
+ 		goto end;
+ 
+-
+-	skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
+-			   0, GFP_KERNEL);
++	hlen = LL_RESERVED_SPACE(dev);
++	skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
++			   dev->needed_tailroom, 0, GFP_KERNEL);
+ 	if (!skb) {
+ 		error = -ENOMEM;
+ 		goto end;
+ 	}
+ 
+ 	/* Reserve space for headers. */
+-	skb_reserve(skb, dev->hard_header_len);
++	skb_reserve(skb, hlen);
+ 	skb_reset_network_header(skb);
+ 
+ 	skb->dev = dev;
+@@ -948,7 +949,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
+ 	/* Copy the data if there is no space for the header or if it's
+ 	 * read-only.
+ 	 */
+-	if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
++	if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
+ 		goto abort;
+ 
+ 	__skb_push(skb, sizeof(*ph));
+diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
+index 2b47cbae524c..9a9e0ea05a72 100644
+--- a/drivers/net/usb/Kconfig
++++ b/drivers/net/usb/Kconfig
+@@ -382,6 +382,10 @@ config USB_NET_RNDIS_HOST
+ 	  The protocol specification is incomplete, and is controlled by
+ 	  (and for) Microsoft; it isn't an "Open" ecosystem or market.
+ 
++config USB_NET_CDC_SUBSET_ENABLE
++	tristate
++	depends on USB_NET_CDC_SUBSET
++
+ config USB_NET_CDC_SUBSET
+ 	tristate "Simple USB Network Links (CDC Ethernet subset)"
+ 	depends on USB_USBNET
+@@ -400,6 +404,7 @@ config USB_NET_CDC_SUBSET
+ config USB_ALI_M5632
+ 	bool "ALi M5632 based 'USB 2.0 Data Link' cables"
+ 	depends on USB_NET_CDC_SUBSET
++	select USB_NET_CDC_SUBSET_ENABLE
+ 	help
+ 	  Choose this option if you're using a host-to-host cable
+ 	  based on this design, which supports USB 2.0 high speed.
+@@ -407,6 +412,7 @@ config USB_ALI_M5632
+ config USB_AN2720
+ 	bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
+ 	depends on USB_NET_CDC_SUBSET
++	select USB_NET_CDC_SUBSET_ENABLE
+ 	help
+ 	  Choose this option if you're using a host-to-host cable
+ 	  based on this design.  Note that AnchorChips is now a
+@@ -415,6 +421,7 @@ config USB_AN2720
+ config USB_BELKIN
+ 	bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
+ 	depends on USB_NET_CDC_SUBSET
++	select USB_NET_CDC_SUBSET_ENABLE
+ 	default y
+ 	help
+ 	  Choose this option if you're using a host-to-host cable
+@@ -424,6 +431,7 @@ config USB_BELKIN
+ config USB_ARMLINUX
+ 	bool "Embedded ARM Linux links (iPaq, ...)"
+ 	depends on USB_NET_CDC_SUBSET
++	select USB_NET_CDC_SUBSET_ENABLE
+ 	default y
+ 	help
+ 	  Choose this option to support the "usb-eth" networking driver
+@@ -441,6 +449,7 @@ config USB_ARMLINUX
+ config USB_EPSON2888
+ 	bool "Epson 2888 based firmware (DEVELOPMENT)"
+ 	depends on USB_NET_CDC_SUBSET
++	select USB_NET_CDC_SUBSET_ENABLE
+ 	help
+ 	  Choose this option to support the usb networking links used
+ 	  by some sample firmware from Epson.
+@@ -448,6 +457,7 @@ config USB_EPSON2888
+ config USB_KC2190
+ 	bool "KT Technology KC2190 based cables (InstaNet)"
+ 	depends on USB_NET_CDC_SUBSET
++	select USB_NET_CDC_SUBSET_ENABLE
+ 	help
+ 	  Choose this option if you're using a host-to-host cable
+ 	  with one of these chips.
+diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
+index e2797f1e1b31..ed5a577c1a61 100644
+--- a/drivers/net/usb/Makefile
++++ b/drivers/net/usb/Makefile
+@@ -22,7 +22,7 @@ obj-$(CONFIG_USB_NET_GL620A)	+= gl620a.o
+ obj-$(CONFIG_USB_NET_NET1080)	+= net1080.o
+ obj-$(CONFIG_USB_NET_PLUSB)	+= plusb.o
+ obj-$(CONFIG_USB_NET_RNDIS_HOST)	+= rndis_host.o
+-obj-$(CONFIG_USB_NET_CDC_SUBSET)	+= cdc_subset.o
++obj-$(CONFIG_USB_NET_CDC_SUBSET_ENABLE)	+= cdc_subset.o
+ obj-$(CONFIG_USB_NET_ZAURUS)	+= zaurus.o
+ obj-$(CONFIG_USB_NET_MCS7830)	+= mcs7830.o
+ obj-$(CONFIG_USB_USBNET)	+= usbnet.o
+diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
+index e221bfcee76b..947bea81d924 100644
+--- a/drivers/net/usb/cx82310_eth.c
++++ b/drivers/net/usb/cx82310_eth.c
+@@ -293,12 +293,9 @@ static struct sk_buff *cx82310_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ {
+ 	int len = skb->len;
+ 
+-	if (skb_headroom(skb) < 2) {
+-		struct sk_buff *skb2 = skb_copy_expand(skb, 2, 0, flags);
++	if (skb_cow_head(skb, 2)) {
+ 		dev_kfree_skb_any(skb);
+-		skb = skb2;
+-		if (!skb)
+-			return NULL;
++		return NULL;
+ 	}
+ 	skb_push(skb, 2);
+ 
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 6cf881ce4d4e..3d97fd391793 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -464,6 +464,10 @@ static const struct usb_device_id products[] = {
+ 		USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69),
+ 		.driver_info        = (unsigned long)&qmi_wwan_info,
+ 	},
++	{	/* Motorola Mapphone devices with MDM6600 */
++		USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff),
++		.driver_info        = (unsigned long)&qmi_wwan_info,
++	},
+ 
+ 	/* 2. Combined interface devices matching on class+protocol */
+ 	{	/* Huawei E367 and possibly others in "Windows mode" */
+@@ -775,6 +779,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x1199, 0x9079, 10)},	/* Sierra Wireless EM74xx */
+ 	{QMI_FIXED_INTF(0x1199, 0x907b, 8)},	/* Sierra Wireless EM74xx */
+ 	{QMI_FIXED_INTF(0x1199, 0x907b, 10)},	/* Sierra Wireless EM74xx */
++	{QMI_FIXED_INTF(0x1199, 0x9091, 8)},	/* Sierra Wireless EM7565 */
+ 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
+ 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index e387af61e0d3..55b0129acff7 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -1263,6 +1263,7 @@ static int alloc_all_mem(struct r8152 *tp)
+ 	spin_lock_init(&tp->rx_lock);
+ 	spin_lock_init(&tp->tx_lock);
+ 	INIT_LIST_HEAD(&tp->tx_free);
++	INIT_LIST_HEAD(&tp->rx_done);
+ 	skb_queue_head_init(&tp->tx_queue);
+ 	skb_queue_head_init(&tp->rx_queue);
+ 
+@@ -1928,7 +1929,6 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
+ 	__le32 tmp[2];
+ 	u32 ocp_data;
+ 
+-	clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ 	netif_stop_queue(netdev);
+ 	ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ 	ocp_data &= ~RCR_ACPT_ALL;
+@@ -2363,8 +2363,6 @@ static void rtl_phy_reset(struct r8152 *tp)
+ 	u16 data;
+ 	int i;
+ 
+-	clear_bit(PHY_RESET, &tp->flags);
+-
+ 	data = r8152_mdio_read(tp, MII_BMCR);
+ 
+ 	/* don't reset again before the previous one complete */
+@@ -2859,10 +2857,9 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
+ 	r8152_mdio_write(tp, MII_ADVERTISE, anar);
+ 	r8152_mdio_write(tp, MII_BMCR, bmcr);
+ 
+-	if (test_bit(PHY_RESET, &tp->flags)) {
++	if (test_and_clear_bit(PHY_RESET, &tp->flags)) {
+ 		int i;
+ 
+-		clear_bit(PHY_RESET, &tp->flags);
+ 		for (i = 0; i < 50; i++) {
+ 			msleep(20);
+ 			if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+@@ -2871,7 +2868,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
+ 	}
+ 
+ out:
+-
+ 	return ret;
+ }
+ 
+@@ -2927,7 +2923,6 @@ static void set_carrier(struct r8152 *tp)
+ 	struct net_device *netdev = tp->netdev;
+ 	u8 speed;
+ 
+-	clear_bit(RTL8152_LINK_CHG, &tp->flags);
+ 	speed = rtl8152_get_speed(tp);
+ 
+ 	if (speed & LINK_STATUS) {
+@@ -2968,20 +2963,18 @@ static void rtl_work_func_t(struct work_struct *work)
+ 		goto out1;
+ 	}
+ 
+-	if (test_bit(RTL8152_LINK_CHG, &tp->flags))
++	if (test_and_clear_bit(RTL8152_LINK_CHG, &tp->flags))
+ 		set_carrier(tp);
+ 
+-	if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
++	if (test_and_clear_bit(RTL8152_SET_RX_MODE, &tp->flags))
+ 		_rtl8152_set_rx_mode(tp->netdev);
+ 
+ 	/* don't schedule napi before linking */
+-	if (test_bit(SCHEDULE_NAPI, &tp->flags) &&
+-	    netif_carrier_ok(tp->netdev)) {
+-		clear_bit(SCHEDULE_NAPI, &tp->flags);
++	if (test_and_clear_bit(SCHEDULE_NAPI, &tp->flags) &&
++	    netif_carrier_ok(tp->netdev))
+ 		napi_schedule(&tp->napi);
+-	}
+ 
+-	if (test_bit(PHY_RESET, &tp->flags))
++	if (test_and_clear_bit(PHY_RESET, &tp->flags))
+ 		rtl_phy_reset(tp);
+ 
+ 	mutex_unlock(&tp->control);
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index d9e7892262fa..2c526ca29cde 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -2198,13 +2198,9 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
+ {
+ 	u32 tx_cmd_a, tx_cmd_b;
+ 
+-	if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
+-		struct sk_buff *skb2 =
+-			skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
++	if (skb_cow_head(skb, SMSC75XX_TX_OVERHEAD)) {
+ 		dev_kfree_skb_any(skb);
+-		skb = skb2;
+-		if (!skb)
+-			return NULL;
++		return NULL;
+ 	}
+ 
+ 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN) | TX_CMD_A_FCS;
+diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
+index 4a1e9c489f1f..aadfe1d1c37e 100644
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -456,14 +456,9 @@ static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ 
+ 	len = skb->len;
+ 
+-	if (skb_headroom(skb) < SR_TX_OVERHEAD) {
+-		struct sk_buff *skb2;
+-
+-		skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
++	if (skb_cow_head(skb, SR_TX_OVERHEAD)) {
+ 		dev_kfree_skb_any(skb);
+-		skb = skb2;
+-		if (!skb)
+-			return NULL;
++		return NULL;
+ 	}
+ 
+ 	__skb_push(skb, SR_TX_OVERHEAD);
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 61c0840c448c..0b9c8d61f7d1 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -1431,7 +1431,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
+ 					  rq->rx_ring[i].basePA);
+ 			rq->rx_ring[i].base = NULL;
+ 		}
+-		rq->buf_info[i] = NULL;
+ 	}
+ 
+ 	if (rq->comp_ring.base) {
+@@ -1446,6 +1445,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
+ 			(rq->rx_ring[0].size + rq->rx_ring[1].size);
+ 		dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
+ 				  rq->buf_info_pa);
++		rq->buf_info[0] = rq->buf_info[1] = NULL;
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/cw1200/wsm.c b/drivers/net/wireless/cw1200/wsm.c
+index 9e0ca3048657..3dd46c78c1cc 100644
+--- a/drivers/net/wireless/cw1200/wsm.c
++++ b/drivers/net/wireless/cw1200/wsm.c
+@@ -379,7 +379,6 @@ static int wsm_multi_tx_confirm(struct cw1200_common *priv,
+ {
+ 	int ret;
+ 	int count;
+-	int i;
+ 
+ 	count = WSM_GET32(buf);
+ 	if (WARN_ON(count <= 0))
+@@ -395,11 +394,10 @@ static int wsm_multi_tx_confirm(struct cw1200_common *priv,
+ 	}
+ 
+ 	cw1200_debug_txed_multi(priv, count);
+-	for (i = 0; i < count; ++i) {
++	do {
+ 		ret = wsm_tx_confirm(priv, buf, link_id);
+-		if (ret)
+-			return ret;
+-	}
++	} while (!ret && --count);
++
+ 	return ret;
+ 
+ underflow:
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 8a38a5bd34b8..9e8461466534 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -87,6 +87,8 @@ struct netfront_cb {
+ /* IRQ name is queue name with "-tx" or "-rx" appended */
+ #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+ 
++static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
++
+ struct netfront_stats {
+ 	u64			packets;
+ 	u64			bytes;
+@@ -1999,10 +2001,12 @@ static void netback_changed(struct xenbus_device *dev,
+ 		break;
+ 
+ 	case XenbusStateClosed:
++		wake_up_all(&module_unload_q);
+ 		if (dev->state == XenbusStateClosed)
+ 			break;
+ 		/* Missed the backend's CLOSING state -- fallthrough */
+ 	case XenbusStateClosing:
++		wake_up_all(&module_unload_q);
+ 		xenbus_frontend_closed(dev);
+ 		break;
+ 	}
+@@ -2108,6 +2112,20 @@ static int xennet_remove(struct xenbus_device *dev)
+ 
+ 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ 
++	if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
++		xenbus_switch_state(dev, XenbusStateClosing);
++		wait_event(module_unload_q,
++			   xenbus_read_driver_state(dev->otherend) ==
++			   XenbusStateClosing);
++
++		xenbus_switch_state(dev, XenbusStateClosed);
++		wait_event(module_unload_q,
++			   xenbus_read_driver_state(dev->otherend) ==
++			   XenbusStateClosed ||
++			   xenbus_read_driver_state(dev->otherend) ==
++			   XenbusStateUnknown);
++	}
++
+ 	xennet_disconnect_backend(info);
+ 
+ 	unregister_netdev(info->netdev);
+diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
+index 3901ff66d0ee..2f7978204421 100644
+--- a/drivers/parisc/lba_pci.c
++++ b/drivers/parisc/lba_pci.c
+@@ -1654,3 +1654,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
+ 	iounmap(base_addr);
+ }
+ 
++
++/*
++ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
++ * seems rushed, so that many built-in components simply don't work.
++ * The following quirks disable the serial AUX port and the built-in ATI RV100
++ * Radeon 7000 graphics card which both don't have any external connectors and
++ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
++ * such makes those machines the only PARISC machines on which we can't use
++ * ttyS0 as boot console.
++ */
++static void quirk_diva_ati_card(struct pci_dev *dev)
++{
++	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
++	    dev->subsystem_device != 0x1292)
++		return;
++
++	dev_info(&dev->dev, "Hiding Diva built-in ATI card");
++	dev->device = 0;
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
++	quirk_diva_ati_card);
++
++static void quirk_diva_aux_disable(struct pci_dev *dev)
++{
++	if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
++	    dev->subsystem_device != 0x1291)
++		return;
++
++	dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
++	dev->device = 0;
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
++	quirk_diva_aux_disable);
+diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
+index 75333b0c4f0a..29b018c4e7e4 100644
+--- a/drivers/pci/host/pci-keystone.c
++++ b/drivers/pci/host/pci-keystone.c
+@@ -179,14 +179,16 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
+ 	}
+ 
+ 	/* interrupt controller is in a child node */
+-	*np_temp = of_find_node_by_name(np_pcie, controller);
++	*np_temp = of_get_child_by_name(np_pcie, controller);
+ 	if (!(*np_temp)) {
+ 		dev_err(dev, "Node for %s is absent\n", controller);
+ 		goto out;
+ 	}
+ 	temp = of_irq_count(*np_temp);
+-	if (!temp)
++	if (!temp) {
++		of_node_put(*np_temp);
+ 		goto out;
++	}
+ 	if (temp > max_host_irqs)
+ 		dev_warn(dev, "Too many %s interrupts defined %u\n",
+ 			(legacy ? "legacy" : "MSI"), temp);
+@@ -200,6 +202,9 @@ static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
+ 		if (!host_irqs[temp])
+ 			break;
+ 	}
++
++	of_node_put(*np_temp);
++
+ 	if (temp) {
+ 		*num_irqs = temp;
+ 		ret = 0;
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index 089a1f41e44e..95db37cf5f08 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -156,7 +156,6 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
+ 	pci_device_add(virtfn, virtfn->bus);
+ 	mutex_unlock(&iov->dev->sriov->lock);
+ 
+-	pci_bus_add_device(virtfn);
+ 	sprintf(buf, "virtfn%u", id);
+ 	rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
+ 	if (rc)
+@@ -167,6 +166,8 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset)
+ 
+ 	kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
+ 
++	pci_bus_add_device(virtfn);
++
+ 	return 0;
+ 
+ failed2:
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 98101c4118bb..1363fe636281 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -936,7 +936,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
+ 	if (pci_has_legacy_pm_support(pci_dev))
+ 		return pci_legacy_resume_early(dev);
+ 
+-	pci_update_current_state(pci_dev, PCI_D0);
++	/*
++	 * pci_restore_state() requires the device to be in D0 (because of MSI
++	 * restoration among other things), so force it into D0 in case the
++	 * driver's "freeze" callbacks put it into a low-power state directly.
++	 */
++	pci_set_power_state(pci_dev, PCI_D0);
+ 	pci_restore_state(pci_dev);
+ 
+ 	if (drv && drv->pm && drv->pm->thaw_noirq)
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 409f895b5a3d..ffd8fe77b8ae 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3651,6 +3651,10 @@ static bool pci_bus_resetable(struct pci_bus *bus)
+ {
+ 	struct pci_dev *dev;
+ 
++
++	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
++		return false;
++
+ 	list_for_each_entry(dev, &bus->devices, bus_list) {
+ 		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ 		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index b60a325234c5..cca4b4789ac4 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -360,7 +360,14 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
+ 		 * If the error is reported by an end point, we think this
+ 		 * error is related to the upstream link of the end point.
+ 		 */
+-		pci_walk_bus(dev->bus, cb, &result_data);
++		if (state == pci_channel_io_normal)
++			/*
++			 * the error is non fatal so the bus is ok, just invoke
++			 * the callback for the function that logged the error.
++			 */
++			cb(dev, &result_data);
++		else
++			pci_walk_bus(dev->bus, cb, &result_data);
+ 	}
+ 
+ 	return result_data.result;
+diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
+index 65bf73b70e34..a02a7caf8d4c 100644
+--- a/drivers/pinctrl/pinctrl-st.c
++++ b/drivers/pinctrl/pinctrl-st.c
+@@ -1348,6 +1348,22 @@ static void st_gpio_irq_unmask(struct irq_data *d)
+ 	writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK);
+ }
+ 
++static int st_gpio_irq_request_resources(struct irq_data *d)
++{
++	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++
++	st_gpio_direction_input(gc, d->hwirq);
++
++	return gpiochip_lock_as_irq(gc, d->hwirq);
++}
++
++static void st_gpio_irq_release_resources(struct irq_data *d)
++{
++	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++
++	gpiochip_unlock_as_irq(gc, d->hwirq);
++}
++
+ static int st_gpio_irq_set_type(struct irq_data *d, unsigned type)
+ {
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+@@ -1503,12 +1519,14 @@ static struct gpio_chip st_gpio_template = {
+ };
+ 
+ static struct irq_chip st_gpio_irqchip = {
+-	.name		= "GPIO",
+-	.irq_disable	= st_gpio_irq_mask,
+-	.irq_mask	= st_gpio_irq_mask,
+-	.irq_unmask	= st_gpio_irq_unmask,
+-	.irq_set_type	= st_gpio_irq_set_type,
+-	.flags		= IRQCHIP_SKIP_SET_WAKE,
++	.name			= "GPIO",
++	.irq_request_resources	= st_gpio_irq_request_resources,
++	.irq_release_resources	= st_gpio_irq_release_resources,
++	.irq_disable		= st_gpio_irq_mask,
++	.irq_mask		= st_gpio_irq_mask,
++	.irq_unmask		= st_gpio_irq_unmask,
++	.irq_set_type		= st_gpio_irq_set_type,
++	.flags			= IRQCHIP_SKIP_SET_WAKE,
+ };
+ 
+ static int st_gpiolib_register_bank(struct st_pinctrl *info,
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+index 1b580ba76453..907d7db3fcee 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sun9i-a80.c
+@@ -145,19 +145,19 @@ static const struct sunxi_desc_pin sun9i_a80_pins[] = {
+ 		  SUNXI_FUNCTION(0x0, "gpio_in"),
+ 		  SUNXI_FUNCTION(0x1, "gpio_out"),
+ 		  SUNXI_FUNCTION(0x3, "mcsi"),		/* MCLK */
+-		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 14)),	/* PB_EINT14 */
++		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 14)),	/* PB_EINT14 */
+ 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 15),
+ 		  SUNXI_FUNCTION(0x0, "gpio_in"),
+ 		  SUNXI_FUNCTION(0x1, "gpio_out"),
+ 		  SUNXI_FUNCTION(0x3, "mcsi"),		/* SCK */
+ 		  SUNXI_FUNCTION(0x4, "i2c4"),		/* SCK */
+-		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 15)),	/* PB_EINT15 */
++		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 15)),	/* PB_EINT15 */
+ 	SUNXI_PIN(SUNXI_PINCTRL_PIN(B, 16),
+ 		  SUNXI_FUNCTION(0x0, "gpio_in"),
+ 		  SUNXI_FUNCTION(0x1, "gpio_out"),
+ 		  SUNXI_FUNCTION(0x3, "mcsi"),		/* SDA */
+ 		  SUNXI_FUNCTION(0x4, "i2c4"),		/* SDA */
+-		  SUNXI_FUNCTION_IRQ_BANK(0x6, 0, 16)),	/* PB_EINT16 */
++		  SUNXI_FUNCTION_IRQ_BANK(0x6, 1, 16)),	/* PB_EINT16 */
+ 
+ 	/* Hole */
+ 	SUNXI_PIN(SUNXI_PINCTRL_PIN(C, 0),
+diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
+index f9f205cb1f11..8becddcf130b 100644
+--- a/drivers/platform/x86/Kconfig
++++ b/drivers/platform/x86/Kconfig
+@@ -94,6 +94,7 @@ config DELL_LAPTOP
+ 	tristate "Dell Laptop Extras"
+ 	depends on X86
+ 	depends on DCDBAS
++	depends on DMI
+ 	depends on BACKLIGHT_CLASS_DEVICE
+ 	depends on RFKILL || RFKILL = n
+ 	depends on SERIO_I8042
+@@ -108,6 +109,7 @@ config DELL_LAPTOP
+ config DELL_WMI
+ 	tristate "Dell WMI extras"
+ 	depends on ACPI_WMI
++	depends on DMI
+ 	depends on INPUT
+ 	select INPUT_SPARSEKMAP
+ 	---help---
+diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
+index aeb3f786d2f0..84273a979768 100644
+--- a/drivers/platform/x86/intel_mid_thermal.c
++++ b/drivers/platform/x86/intel_mid_thermal.c
+@@ -416,6 +416,7 @@ static struct thermal_device_info *initialize_sensor(int index)
+ 	return td_info;
+ }
+ 
++#ifdef CONFIG_PM_SLEEP
+ /**
+  * mid_thermal_resume - resume routine
+  * @dev: device structure
+@@ -443,6 +444,7 @@ static int mid_thermal_suspend(struct device *dev)
+ 	 */
+ 	return configure_adc(0);
+ }
++#endif
+ 
+ static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
+ 			 mid_thermal_suspend, mid_thermal_resume);
+diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
+index e36542564131..e89ac8cd20e8 100644
+--- a/drivers/platform/x86/tc1100-wmi.c
++++ b/drivers/platform/x86/tc1100-wmi.c
+@@ -52,7 +52,9 @@ struct tc1100_data {
+ 	u32 jogdial;
+ };
+ 
++#ifdef CONFIG_PM
+ static struct tc1100_data suspend_data;
++#endif
+ 
+ /* --------------------------------------------------------------------------
+ 				Device Management
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index faa81ac2d481..038da40e4038 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -809,7 +809,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+ 	}
+ 
+ 	timerqueue_add(&rtc->timerqueue, &timer->node);
+-	if (!next) {
++	if (!next || ktime_before(timer->node.expires, next->expires)) {
+ 		struct rtc_wkalrm alarm;
+ 		int err;
+ 		alarm.time = rtc_ktime_to_tm(timer->node.expires);
+diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
+index 7061dcae2b09..482af0dda0b0 100644
+--- a/drivers/rtc/rtc-opal.c
++++ b/drivers/rtc/rtc-opal.c
+@@ -58,6 +58,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
+ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
+ {
+ 	long rc = OPAL_BUSY;
++	int retries = 10;
+ 	u32 y_m_d;
+ 	u64 h_m_s_ms;
+ 	__be32 __y_m_d;
+@@ -67,8 +68,11 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
+ 		rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
+ 		if (rc == OPAL_BUSY_EVENT)
+ 			opal_poll_events(NULL);
+-		else
++		else if (retries-- && (rc == OPAL_HARDWARE
++				       || rc == OPAL_INTERNAL_ERROR))
+ 			msleep(10);
++		else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
++			break;
+ 	}
+ 
+ 	if (rc != OPAL_SUCCESS)
+@@ -84,6 +88,7 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
+ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
+ {
+ 	long rc = OPAL_BUSY;
++	int retries = 10;
+ 	u32 y_m_d = 0;
+ 	u64 h_m_s_ms = 0;
+ 
+@@ -92,8 +97,11 @@ static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
+ 		rc = opal_rtc_write(y_m_d, h_m_s_ms);
+ 		if (rc == OPAL_BUSY_EVENT)
+ 			opal_poll_events(NULL);
+-		else
++		else if (retries-- && (rc == OPAL_HARDWARE
++				       || rc == OPAL_INTERNAL_ERROR))
+ 			msleep(10);
++		else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
++			break;
+ 	}
+ 
+ 	return rc == OPAL_SUCCESS ? 0 : -EIO;
+diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
+index 6215f6455eb8..7f31087fca31 100644
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -518,10 +518,12 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
+ 	pfxdata->validity.define_extent = 1;
+ 
+ 	/* private uid is kept up to date, conf_data may be outdated */
+-	if (startpriv->uid.type != UA_BASE_DEVICE) {
++	if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
+ 		pfxdata->validity.verify_base = 1;
+-		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
+-			pfxdata->validity.hyper_pav = 1;
++
++	if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
++		pfxdata->validity.verify_base = 1;
++		pfxdata->validity.hyper_pav = 1;
+ 	}
+ 
+ 	/* define extend data (mostly)*/
+@@ -2964,10 +2966,12 @@ static int prepare_itcw(struct itcw *itcw,
+ 	pfxdata.validity.define_extent = 1;
+ 
+ 	/* private uid is kept up to date, conf_data may be outdated */
+-	if (startpriv->uid.type != UA_BASE_DEVICE) {
++	if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
++		pfxdata.validity.verify_base = 1;
++
++	if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
+ 		pfxdata.validity.verify_base = 1;
+-		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
+-			pfxdata.validity.hyper_pav = 1;
++		pfxdata.validity.hyper_pav = 1;
+ 	}
+ 
+ 	switch (cmd) {
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index fefd3c512386..6dbf0d5a2a22 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -2790,17 +2790,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card,
+ 	char daddr[16];
+ 	struct af_iucv_trans_hdr *iucv_hdr;
+ 
+-	skb_pull(skb, 14);
+-	card->dev->header_ops->create(skb, card->dev, 0,
+-				      card->dev->dev_addr, card->dev->dev_addr,
+-				      card->dev->addr_len);
+-	skb_pull(skb, 14);
+-	iucv_hdr = (struct af_iucv_trans_hdr *)skb->data;
+ 	memset(hdr, 0, sizeof(struct qeth_hdr));
+ 	hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
+ 	hdr->hdr.l3.ext_flags = 0;
+-	hdr->hdr.l3.length = skb->len;
++	hdr->hdr.l3.length = skb->len - ETH_HLEN;
+ 	hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST;
++
++	iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN);
+ 	memset(daddr, 0, sizeof(daddr));
+ 	daddr[0] = 0xfe;
+ 	daddr[1] = 0x80;
+@@ -2983,10 +2979,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if ((card->info.type == QETH_CARD_TYPE_IQD) && (!large_send) &&
+ 	    (skb_shinfo(skb)->nr_frags == 0)) {
+ 		new_skb = skb;
+-		if (new_skb->protocol == ETH_P_AF_IUCV)
+-			data_offset = 0;
+-		else
+-			data_offset = ETH_HLEN;
++		data_offset = ETH_HLEN;
+ 		hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
+ 		if (!hdr)
+ 			goto tx_drop;
+diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
+index ae95e347f37d..42a14c456da0 100644
+--- a/drivers/scsi/advansys.c
++++ b/drivers/scsi/advansys.c
+@@ -6482,18 +6482,17 @@ static uchar AscGetSynPeriodIndex(ASC_DVC_VAR *asc_dvc, uchar syn_time)
+ static uchar
+ AscMsgOutSDTR(ASC_DVC_VAR *asc_dvc, uchar sdtr_period, uchar sdtr_offset)
+ {
+-	EXT_MSG sdtr_buf;
+-	uchar sdtr_period_index;
+-	PortAddr iop_base;
+-
+-	iop_base = asc_dvc->iop_base;
+-	sdtr_buf.msg_type = EXTENDED_MESSAGE;
+-	sdtr_buf.msg_len = MS_SDTR_LEN;
+-	sdtr_buf.msg_req = EXTENDED_SDTR;
+-	sdtr_buf.xfer_period = sdtr_period;
++	PortAddr iop_base = asc_dvc->iop_base;
++	uchar sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
++	EXT_MSG sdtr_buf = {
++		.msg_type = EXTENDED_MESSAGE,
++		.msg_len = MS_SDTR_LEN,
++		.msg_req = EXTENDED_SDTR,
++		.xfer_period = sdtr_period,
++		.req_ack_offset = sdtr_offset,
++	};
+ 	sdtr_offset &= ASC_SYN_MAX_OFFSET;
+-	sdtr_buf.req_ack_offset = sdtr_offset;
+-	sdtr_period_index = AscGetSynPeriodIndex(asc_dvc, sdtr_period);
++
+ 	if (sdtr_period_index <= asc_dvc->max_sdtr_index) {
+ 		AscMemWordCopyPtrToLram(iop_base, ASCV_MSGOUT_BEG,
+ 					(uchar *)&sdtr_buf,
+@@ -11476,6 +11475,9 @@ static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
+ 		ASC_DBG(2, "AdvInitGetConfig()\n");
+ 
+ 		ret = AdvInitGetConfig(pdev, shost) ? -ENODEV : 0;
++#else
++		share_irq = 0;
++		ret = -ENODEV;
+ #endif /* CONFIG_PCI */
+ 	}
+ 
+diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+index dd00e5fe4a5e..18f782bfc874 100644
+--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
++++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+@@ -1332,6 +1332,7 @@ static void release_offload_resources(struct cxgbi_sock *csk)
+ 		csk, csk->state, csk->flags, csk->tid);
+ 
+ 	cxgbi_sock_free_cpl_skbs(csk);
++	cxgbi_sock_purge_write_queue(csk);
+ 	if (csk->wr_cred != csk->wr_max_cred) {
+ 		cxgbi_sock_purge_wr_queue(csk);
+ 		cxgbi_sock_reset_wr_list(csk);
+diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
+index 2806cfbec2b9..8803bafc48e9 100644
+--- a/drivers/scsi/dpt_i2o.c
++++ b/drivers/scsi/dpt_i2o.c
+@@ -180,11 +180,14 @@ static u8 adpt_read_blink_led(adpt_hba* host)
+  *============================================================================
+  */
+ 
++#ifdef MODULE
+ static struct pci_device_id dptids[] = {
+ 	{ PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ 	{ PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ 	{ 0, }
+ };
++#endif
++
+ MODULE_DEVICE_TABLE(pci,dptids);
+ 
+ static int adpt_detect(struct scsi_host_template* sht)
+diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
+index fff682976c56..466d0a30aae7 100644
+--- a/drivers/scsi/fdomain.c
++++ b/drivers/scsi/fdomain.c
+@@ -1769,7 +1769,7 @@ struct scsi_host_template fdomain_driver_template = {
+ };
+ 
+ #ifndef PCMCIA
+-#ifdef CONFIG_PCI
++#if defined(CONFIG_PCI) && defined(MODULE)
+ 
+ static struct pci_device_id fdomain_pci_tbl[] = {
+ 	{ PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70,
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
+index 8fae03215a85..543c10266984 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.h
++++ b/drivers/scsi/ibmvscsi/ibmvfc.h
+@@ -366,7 +366,7 @@ enum ibmvfc_fcp_rsp_info_codes {
+ };
+ 
+ struct ibmvfc_fcp_rsp_info {
+-	__be16 reserved;
++	u8 reserved[3];
+ 	u8 rsp_code;
+ 	u8 reserved2[4];
+ }__attribute__((packed, aligned (2)));
+diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
+index e5dae7b54d9a..51063177f18e 100644
+--- a/drivers/scsi/initio.c
++++ b/drivers/scsi/initio.c
+@@ -110,11 +110,6 @@
+ #define i91u_MAXQUEUE		2
+ #define i91u_REVID "Initio INI-9X00U/UW SCSI device driver; Revision: 1.04a"
+ 
+-#define I950_DEVICE_ID	0x9500	/* Initio's inic-950 product ID   */
+-#define I940_DEVICE_ID	0x9400	/* Initio's inic-940 product ID   */
+-#define I935_DEVICE_ID	0x9401	/* Initio's inic-935 product ID   */
+-#define I920_DEVICE_ID	0x0002	/* Initio's other product ID      */
+-
+ #ifdef DEBUG_i91u
+ static unsigned int i91u_debug = DEBUG_DEFAULT;
+ #endif
+@@ -127,17 +122,6 @@ static int setup_debug = 0;
+ 
+ static void i91uSCBPost(u8 * pHcb, u8 * pScb);
+ 
+-/* PCI Devices supported by this driver */
+-static struct pci_device_id i91u_pci_devices[] = {
+-	{ PCI_VENDOR_ID_INIT,  I950_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+-	{ PCI_VENDOR_ID_INIT,  I940_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+-	{ PCI_VENDOR_ID_INIT,  I935_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+-	{ PCI_VENDOR_ID_INIT,  I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+-	{ PCI_VENDOR_ID_DOMEX, I920_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+-	{ }
+-};
+-MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
+-
+ #define DEBUG_INTERRUPT 0
+ #define DEBUG_QUEUE     0
+ #define DEBUG_STATE     0
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 39e511216fd9..8826110991eb 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1727,7 +1727,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
+ 
+ 	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+ 		reason = FAILURE_SESSION_IN_RECOVERY;
+-		sc->result = DID_REQUEUE;
++		sc->result = DID_REQUEUE << 16;
+ 		goto fault;
+ 	}
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 30f2fe9ba766..9c09ce9b98da 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -6891,7 +6891,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 			did, vport->port_state, ndlp->nlp_flag);
+ 
+ 		phba->fc_stat.elsRcvPRLI++;
+-		if (vport->port_state < LPFC_DISC_AUTH) {
++		if ((vport->port_state < LPFC_DISC_AUTH) &&
++		    (vport->fc_flag & FC_FABRIC)) {
+ 			rjt_err = LSRJT_UNABLE_TPC;
+ 			rjt_exp = LSEXP_NOTHING_MORE;
+ 			break;
+diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
+index 2500f15d437f..574b1a9b2b32 100644
+--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
++++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
+@@ -4767,7 +4767,8 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+ 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ 	if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
+ 	    !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
+-	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
++	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
++	    phba->sli_rev != LPFC_SLI_REV4) {
+ 		/* For this case we need to cleanup the default rpi
+ 		 * allocated by the firmware.
+ 		 */
+diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
+index 3757a7399983..d7ee72ef99ed 100644
+--- a/drivers/scsi/lpfc/lpfc_hw4.h
++++ b/drivers/scsi/lpfc/lpfc_hw4.h
+@@ -2953,7 +2953,7 @@ struct lpfc_mbx_get_port_name {
+ #define MB_CEQ_STATUS_QUEUE_FLUSHING		0x4
+ #define MB_CQE_STATUS_DMA_FAILED		0x5
+ 
+-#define LPFC_MBX_WR_CONFIG_MAX_BDE		8
++#define LPFC_MBX_WR_CONFIG_MAX_BDE		1
+ struct lpfc_mbx_wr_object {
+ 	struct mbox_header header;
+ 	union {
+diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
+index 3e6b866759fe..c51e1537ed8e 100644
+--- a/drivers/scsi/mvumi.c
++++ b/drivers/scsi/mvumi.c
+@@ -2629,7 +2629,7 @@ static void mvumi_shutdown(struct pci_dev *pdev)
+ 	mvumi_flush_cache(mhba);
+ }
+ 
+-static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
++static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
+ {
+ 	struct mvumi_hba *mhba = NULL;
+ 
+@@ -2648,7 +2648,7 @@ static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
+ 	return 0;
+ }
+ 
+-static int mvumi_resume(struct pci_dev *pdev)
++static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
+ {
+ 	int ret;
+ 	struct mvumi_hba *mhba = NULL;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 102806a961da..2589a75f0810 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -160,7 +160,6 @@ typedef struct sg_fd {		/* holds the state of a file descriptor */
+ 	struct list_head rq_list; /* head of request list */
+ 	struct fasync_struct *async_qp;	/* used by asynchronous notification */
+ 	Sg_request req_arr[SG_MAX_QUEUE];	/* used as singly-linked list */
+-	char low_dma;		/* as in parent but possibly overridden to 1 */
+ 	char force_packid;	/* 1 -> pack_id input to read(), 0 -> ignored */
+ 	char cmd_q;		/* 1 -> allow command queuing, 0 -> don't */
+ 	unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
+@@ -926,24 +925,14 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ 				/* strange ..., for backward compatibility */
+ 		return sfp->timeout_user;
+ 	case SG_SET_FORCE_LOW_DMA:
+-		result = get_user(val, ip);
+-		if (result)
+-			return result;
+-		if (val) {
+-			sfp->low_dma = 1;
+-			if ((0 == sfp->low_dma) && !sfp->res_in_use) {
+-				val = (int) sfp->reserve.bufflen;
+-				sg_remove_scat(sfp, &sfp->reserve);
+-				sg_build_reserve(sfp, val);
+-			}
+-		} else {
+-			if (atomic_read(&sdp->detaching))
+-				return -ENODEV;
+-			sfp->low_dma = sdp->device->host->unchecked_isa_dma;
+-		}
++		/*
++		 * N.B. This ioctl never worked properly, but failed to
++		 * return an error value. So returning '0' to keep compability
++		 * with legacy applications.
++		 */
+ 		return 0;
+ 	case SG_GET_LOW_DMA:
+-		return put_user((int) sfp->low_dma, ip);
++		return put_user((int) sdp->device->host->unchecked_isa_dma, ip);
+ 	case SG_GET_SCSI_ID:
+ 		if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
+ 			return -EFAULT;
+@@ -1864,6 +1853,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
+ 	int sg_tablesize = sfp->parentdp->sg_tablesize;
+ 	int blk_size = buff_size, order;
+ 	gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
++	struct sg_device *sdp = sfp->parentdp;
+ 
+ 	if (blk_size < 0)
+ 		return -EFAULT;
+@@ -1889,7 +1879,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
+ 			scatter_elem_sz_prev = num;
+ 	}
+ 
+-	if (sfp->low_dma)
++	if (sdp->device->host->unchecked_isa_dma)
+ 		gfp_mask |= GFP_DMA;
+ 
+ 	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+@@ -2152,8 +2142,6 @@ sg_add_sfp(Sg_device * sdp)
+ 	sfp->timeout = SG_DEFAULT_TIMEOUT;
+ 	sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
+ 	sfp->force_packid = SG_DEF_FORCE_PACK_ID;
+-	sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
+-	    sdp->device->host->unchecked_isa_dma : 1;
+ 	sfp->cmd_q = SG_DEF_COMMAND_Q;
+ 	sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
+ 	sfp->parentdp = sdp;
+@@ -2612,7 +2600,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
+ 			   jiffies_to_msecs(fp->timeout),
+ 			   fp->reserve.bufflen,
+ 			   (int) fp->reserve.k_use_sg,
+-			   (int) fp->low_dma);
++			   (int) sdp->device->host->unchecked_isa_dma);
+ 		seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
+ 			   (int) fp->cmd_q, (int) fp->force_packid,
+ 			   (int) fp->keep_orphan);
+diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
+index 3b3b56f4a830..82ed99848378 100644
+--- a/drivers/scsi/sim710.c
++++ b/drivers/scsi/sim710.c
+@@ -176,8 +176,7 @@ static struct eisa_device_id sim710_eisa_ids[] = {
+ };
+ MODULE_DEVICE_TABLE(eisa, sim710_eisa_ids);
+ 
+-static __init int
+-sim710_eisa_probe(struct device *dev)
++static int sim710_eisa_probe(struct device *dev)
+ {
+ 	struct eisa_device *edev = to_eisa_device(dev);
+ 	unsigned long io_addr = edev->base_addr;
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 648a44675880..05b76cdfb263 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -4290,12 +4290,15 @@ static int ufshcd_config_vreg(struct device *dev,
+ 		struct ufs_vreg *vreg, bool on)
+ {
+ 	int ret = 0;
+-	struct regulator *reg = vreg->reg;
+-	const char *name = vreg->name;
++	struct regulator *reg;
++	const char *name;
+ 	int min_uV, uA_load;
+ 
+ 	BUG_ON(!vreg);
+ 
++	reg = vreg->reg;
++	name = vreg->name;
++
+ 	if (regulator_count_voltages(reg) > 0) {
+ 		min_uV = on ? vreg->min_uV : 0;
+ 		ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index 412b9c86b997..967ba6329a58 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -1221,12 +1221,23 @@ static int spi_imx_remove(struct platform_device *pdev)
+ {
+ 	struct spi_master *master = platform_get_drvdata(pdev);
+ 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
++	int ret;
+ 
+ 	spi_bitbang_stop(&spi_imx->bitbang);
+ 
++	ret = clk_enable(spi_imx->clk_per);
++	if (ret)
++		return ret;
++
++	ret = clk_enable(spi_imx->clk_ipg);
++	if (ret) {
++		clk_disable(spi_imx->clk_per);
++		return ret;
++	}
++
+ 	writel(0, spi_imx->base + MXC_CSPICTRL);
+-	clk_unprepare(spi_imx->clk_ipg);
+-	clk_unprepare(spi_imx->clk_per);
++	clk_disable_unprepare(spi_imx->clk_ipg);
++	clk_disable_unprepare(spi_imx->clk_per);
+ 	spi_imx_sdma_exit(spi_imx);
+ 	spi_master_put(master);
+ 
+diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
+index 39d7c7c70112..2eea3de5a668 100644
+--- a/drivers/spi/spi-sun4i.c
++++ b/drivers/spi/spi-sun4i.c
+@@ -458,7 +458,7 @@ err_free_master:
+ 
+ static int sun4i_spi_remove(struct platform_device *pdev)
+ {
+-	pm_runtime_disable(&pdev->dev);
++	pm_runtime_force_suspend(&pdev->dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
+index 7dd6bde4f325..c40bd7fbc210 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -758,10 +758,12 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		break;
+ 	case ASHMEM_SET_SIZE:
+ 		ret = -EINVAL;
++		mutex_lock(&ashmem_mutex);
+ 		if (!asma->file) {
+ 			ret = 0;
+ 			asma->size = (size_t) arg;
+ 		}
++		mutex_unlock(&ashmem_mutex);
+ 		break;
+ 	case ASHMEM_GET_SIZE:
+ 		ret = asma->size;
+diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+index 96c1c2d4a112..6e73f4e130b5 100644
+--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
++++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+@@ -1397,19 +1397,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
+ 	if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
+ 	    (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
+ 		len = pcur_bss->Ssid.SsidLength;
+-
+-		wrqu->essid.length = len;
+-
+ 		memcpy(extra, pcur_bss->Ssid.Ssid, len);
+-
+-		wrqu->essid.flags = 1;
+ 	} else {
+-		ret = -1;
+-		goto exit;
++		len = 0;
++		*extra = 0;
+ 	}
+-
+-exit:
+-
++	wrqu->essid.length = len;
++	wrqu->essid.flags = 1;
+ 
+ 	return ret;
+ }
+diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+index 0f524bb7b41d..daff4e76b6d6 100644
+--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
++++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+@@ -1039,7 +1039,6 @@ static int synaptics_rmi4_remove(struct i2c_client *client)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_PM
+ /**
+  * synaptics_rmi4_suspend() - suspend the touch screen controller
+  * @dev: pointer to device structure
+@@ -1047,7 +1046,7 @@ static int synaptics_rmi4_remove(struct i2c_client *client)
+  * This function is used to suspend the
+  * touch panel controller and returns integer
+  */
+-static int synaptics_rmi4_suspend(struct device *dev)
++static int __maybe_unused synaptics_rmi4_suspend(struct device *dev)
+ {
+ 	/* Touch sleep mode */
+ 	int retval;
+@@ -1081,7 +1080,7 @@ static int synaptics_rmi4_suspend(struct device *dev)
+  * This function is used to resume the touch panel
+  * controller and returns integer.
+  */
+-static int synaptics_rmi4_resume(struct device *dev)
++static int __maybe_unused synaptics_rmi4_resume(struct device *dev)
+ {
+ 	int retval;
+ 	unsigned char intr_status;
+@@ -1112,8 +1111,6 @@ static int synaptics_rmi4_resume(struct device *dev)
+ 	return 0;
+ }
+ 
+-#endif
+-
+ static SIMPLE_DEV_PM_OPS(synaptics_rmi4_dev_pm_ops, synaptics_rmi4_suspend,
+ 			 synaptics_rmi4_resume);
+ 
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 2d6b0cf0929e..614811e93298 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1755,7 +1755,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 	struct iscsi_tmr_req *tmr_req;
+ 	struct iscsi_tm *hdr;
+ 	int out_of_order_cmdsn = 0, ret;
+-	bool sess_ref = false;
+ 	u8 function, tcm_function = TMR_UNKNOWN;
+ 
+ 	hdr			= (struct iscsi_tm *) buf;
+@@ -1797,18 +1796,17 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 					     buf);
+ 	}
+ 
++	transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
++			      conn->sess->se_sess, 0, DMA_NONE,
++			      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
++
++	target_get_sess_cmd(&cmd->se_cmd, true);
++
+ 	/*
+ 	 * TASK_REASSIGN for ERL=2 / connection stays inside of
+ 	 * LIO-Target $FABRIC_MOD
+ 	 */
+ 	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
+-		transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+-				      conn->sess->se_sess, 0, DMA_NONE,
+-				      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+-
+-		target_get_sess_cmd(&cmd->se_cmd, true);
+-		sess_ref = true;
+-
+ 		switch (function) {
+ 		case ISCSI_TM_FUNC_ABORT_TASK:
+ 			tcm_function = TMR_ABORT_TASK;
+@@ -1947,12 +1945,8 @@ attach:
+ 	 * For connection recovery, this is also the default action for
+ 	 * TMR TASK_REASSIGN.
+ 	 */
+-	if (sess_ref) {
+-		pr_debug("Handle TMR, using sess_ref=true check\n");
+-		target_put_sess_cmd(&cmd->se_cmd);
+-	}
+-
+ 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
++	target_put_sess_cmd(&cmd->se_cmd);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 39e8f22be68b..b2edb5f6e6b9 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -860,7 +860,7 @@ static int tcmu_configure_device(struct se_device *dev)
+ 	info->version = xstr(TCMU_MAILBOX_VERSION);
+ 
+ 	info->mem[0].name = "tcm-user command & data buffer";
+-	info->mem[0].addr = (phys_addr_t) udev->mb_addr;
++	info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
+ 	info->mem[0].size = TCMU_RING_SIZE;
+ 	info->mem[0].memtype = UIO_MEM_VIRTUAL;
+ 
+diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
+index bddb71744a6c..9d42f88a4224 100644
+--- a/drivers/thermal/spear_thermal.c
++++ b/drivers/thermal/spear_thermal.c
+@@ -54,8 +54,7 @@ static struct thermal_zone_device_ops ops = {
+ 	.get_temp = thermal_get_temp,
+ };
+ 
+-#ifdef CONFIG_PM
+-static int spear_thermal_suspend(struct device *dev)
++static int __maybe_unused spear_thermal_suspend(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
+@@ -72,7 +71,7 @@ static int spear_thermal_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
+-static int spear_thermal_resume(struct device *dev)
++static int __maybe_unused spear_thermal_resume(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
+@@ -94,7 +93,6 @@ static int spear_thermal_resume(struct device *dev)
+ 
+ 	return 0;
+ }
+-#endif
+ 
+ static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
+ 		spear_thermal_resume);
+diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
+index c01f45095877..82c4d2e45319 100644
+--- a/drivers/tty/Kconfig
++++ b/drivers/tty/Kconfig
+@@ -226,7 +226,7 @@ config CYCLADES
+ 
+ config CYZ_INTR
+ 	bool "Cyclades-Z interrupt mode operation"
+-	depends on CYCLADES
++	depends on CYCLADES && PCI
+ 	help
+ 	  The Cyclades-Z family of multiport cards allows 2 (two) driver op
+ 	  modes: polling and interrupt. In polling mode, the driver will check
+diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
+index 7a3d146a5f0e..5cc3ca1dd5c9 100644
+--- a/drivers/tty/hvc/hvc_xen.c
++++ b/drivers/tty/hvc/hvc_xen.c
+@@ -322,6 +322,7 @@ void xen_console_resume(void)
+ 	}
+ }
+ 
++#ifdef CONFIG_HVC_XEN_FRONTEND
+ static void xencons_disconnect_backend(struct xencons_info *info)
+ {
+ 	if (info->irq > 0)
+@@ -362,7 +363,6 @@ static int xen_console_remove(struct xencons_info *info)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_HVC_XEN_FRONTEND
+ static int xencons_remove(struct xenbus_device *dev)
+ {
+ 	return xen_console_remove(dev_get_drvdata(&dev->dev));
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index aba20f66bdd9..66e257b5a5b7 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1808,7 +1808,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 
+-	if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
++	if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
+ 		bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
+ 		ldata->line_start = ldata->read_tail;
+ 		if (!L_ICANON(tty) || !read_cnt(ldata)) {
+@@ -2517,7 +2517,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
+ 		return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
+ 	case TIOCINQ:
+ 		down_write(&tty->termios_rwsem);
+-		if (L_ICANON(tty))
++		if (L_ICANON(tty) && !L_EXTPROC(tty))
+ 			retval = inq_canon(ldata);
+ 		else
+ 			retval = read_cnt(ldata);
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 529cc86283e7..9c27ee008dff 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -133,6 +133,12 @@ static void sysrq_handle_crash(int key)
+ {
+ 	char *killer = NULL;
+ 
++	/* we need to release the RCU read lock here,
++	 * otherwise we get an annoying
++	 * 'BUG: sleeping function called from invalid context'
++	 * complaint from the kernel before the panic.
++	 */
++	rcu_read_unlock();
+ 	panic_on_oops = 1;	/* force panic */
+ 	wmb();
+ 	*killer = 1;
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 463d8a3375f5..eeed4b45d35c 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -381,7 +381,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
+ 
+ 	res = usb_submit_urb(acm->read_urbs[index], mem_flags);
+ 	if (res) {
+-		if (res != -EPERM) {
++		if (res != -EPERM && res != -ENODEV) {
+ 			dev_err(&acm->data->dev,
+ 					"%s - usb_submit_urb failed: %d\n",
+ 					__func__, res);
+@@ -1707,6 +1707,9 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
+ 	.driver_info = SINGLE_RX_URB, /* firmware bug */
+ 	},
++	{ USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
++	.driver_info = SINGLE_RX_URB,
++	},
+ 	{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
+ 	.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ 	},
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 50010282c010..774c97bb1c08 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -57,10 +57,11 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Microsoft LifeCam-VX700 v2.0 */
+ 	{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+-	/* Logitech HD Pro Webcams C920, C920-C and C930e */
++	/* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
+ 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ 	{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
+ 	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
++	{ USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
+ 	/* Logitech ConferenceCam CC3000e */
+ 	{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
+@@ -154,6 +155,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
+ 	{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
+ 
++	/* ELSA MicroLink 56K */
++	{ USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
+ 	{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+ 
+@@ -221,6 +225,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ 			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ 
++	/* Corsair K70 RGB */
++	{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Corsair Strafe RGB */
+ 	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index ff56aaa00bf7..3ce30909cbe4 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -2376,6 +2376,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
+ 		break;
+ 	}
+ 
++	dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
++
+ 	/* Enable USB2 LPM Capability */
+ 
+ 	if ((dwc->revision > DWC3_REVISION_194A)
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index a01d90fe37d9..54c15622e133 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -103,7 +103,6 @@ int config_ep_by_speed(struct usb_gadget *g,
+ 			struct usb_function *f,
+ 			struct usb_ep *_ep)
+ {
+-	struct usb_composite_dev	*cdev = get_gadget_data(g);
+ 	struct usb_endpoint_descriptor *chosen_desc = NULL;
+ 	struct usb_descriptor_header **speed_desc = NULL;
+ 
+@@ -170,8 +169,12 @@ ep_found:
+ 			_ep->maxburst = comp_desc->bMaxBurst + 1;
+ 			break;
+ 		default:
+-			if (comp_desc->bMaxBurst != 0)
++			if (comp_desc->bMaxBurst != 0) {
++				struct usb_composite_dev *cdev;
++
++				cdev = get_gadget_data(g);
+ 				ERROR(cdev, "ep0 bMaxBurst must be 0\n");
++			}
+ 			_ep->maxburst = 1;
+ 			break;
+ 		}
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 2c25a5dec442..6b62bb5c021c 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -2756,10 +2756,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
+ 	struct ffs_data *ffs = func->ffs;
+ 
+ 	const int full = !!func->ffs->fs_descs_count;
+-	const int high = gadget_is_dualspeed(func->gadget) &&
+-		func->ffs->hs_descs_count;
+-	const int super = gadget_is_superspeed(func->gadget) &&
+-		func->ffs->ss_descs_count;
++	const int high = !!func->ffs->hs_descs_count;
++	const int super = !!func->ffs->ss_descs_count;
+ 
+ 	int fs_len, hs_len, ss_len, ret, i;
+ 
+@@ -3486,7 +3484,8 @@ static void ffs_closed(struct ffs_data *ffs)
+ 	ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
+ 	ffs_dev_unlock();
+ 
+-	unregister_gadget_item(ci);
++	if (test_bit(FFS_FL_BOUND, &ffs->flags))
++		unregister_gadget_item(ci);
+ 	return;
+ done:
+ 	ffs_dev_unlock();
+diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
+index 7405ce32a690..0e704a857115 100644
+--- a/drivers/usb/gadget/function/f_uvc.c
++++ b/drivers/usb/gadget/function/f_uvc.c
+@@ -611,6 +611,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
+ 	opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U);
+ 	opts->streaming_maxburst = min(opts->streaming_maxburst, 15U);
+ 
++	/* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */
++	if (opts->streaming_maxburst &&
++	    (opts->streaming_maxpacket % 1024) != 0) {
++		opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
++		INFO(cdev, "overriding streaming_maxpacket to %d\n",
++		     opts->streaming_maxpacket);
++	}
++
+ 	/* Fill in the FS/HS/SS Video Streaming specific descriptors from the
+ 	 * module parameters.
+ 	 *
+diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
+index 613547f07828..2e04d6596ac6 100644
+--- a/drivers/usb/gadget/udc/pch_udc.c
++++ b/drivers/usb/gadget/udc/pch_udc.c
+@@ -1534,7 +1534,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
+ 		td = phys_to_virt(addr);
+ 		addr2 = (dma_addr_t)td->next;
+ 		pci_pool_free(dev->data_requests, td, addr);
+-		td->next = 0x00;
+ 		addr = addr2;
+ 	}
+ 	req->chain_len = 1;
+diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
+index 47d2c09e4f35..5cd4b286b198 100644
+--- a/drivers/usb/host/ohci-q.c
++++ b/drivers/usb/host/ohci-q.c
+@@ -1017,6 +1017,8 @@ skip_ed:
+ 		 * have modified this list.  normally it's just prepending
+ 		 * entries (which we'd ignore), but paranoia won't hurt.
+ 		 */
++		*last = ed->ed_next;
++		ed->ed_next = NULL;
+ 		modified = 0;
+ 
+ 		/* unlink urbs as requested, but rescan the list after
+@@ -1075,21 +1077,22 @@ rescan_this:
+ 			goto rescan_this;
+ 
+ 		/*
+-		 * If no TDs are queued, take ED off the ed_rm_list.
++		 * If no TDs are queued, ED is now idle.
+ 		 * Otherwise, if the HC is running, reschedule.
+-		 * If not, leave it on the list for further dequeues.
++		 * If the HC isn't running, add ED back to the
++		 * start of the list for later processing.
+ 		 */
+ 		if (list_empty(&ed->td_list)) {
+-			*last = ed->ed_next;
+-			ed->ed_next = NULL;
+ 			ed->state = ED_IDLE;
+ 			list_del(&ed->in_use_list);
+ 		} else if (ohci->rh_state == OHCI_RH_RUNNING) {
+-			*last = ed->ed_next;
+-			ed->ed_next = NULL;
+ 			ed_schedule(ohci, ed);
+ 		} else {
+-			last = &ed->ed_next;
++			ed->ed_next = ohci->ed_rm_list;
++			ohci->ed_rm_list = ed;
++			/* Don't loop on the same ED */
++			if (last == &ohci->ed_rm_list)
++				last = &ed->ed_next;
+ 		}
+ 
+ 		if (modified)
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index e91cbf360afe..8a82e14829e9 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -181,6 +181,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 		xhci->quirks |= XHCI_BROKEN_STREAMS;
+ 	}
++	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
++			pdev->device == 0x0014)
++		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 	if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ 			pdev->device == 0x0015)
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 23c5bdab988d..e92b9903faa4 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -266,6 +266,7 @@ MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
+ static struct platform_driver usb_xhci_driver = {
+ 	.probe	= xhci_plat_probe,
+ 	.remove	= xhci_plat_remove,
++	.shutdown	= usb_hcd_platform_shutdown,
+ 	.driver	= {
+ 		.name = "xhci-hcd",
+ 		.pm = DEV_PM_OPS,
+diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
+index 82503a7ff6c8..2bbca7d674d6 100644
+--- a/drivers/usb/misc/ldusb.c
++++ b/drivers/usb/misc/ldusb.c
+@@ -46,6 +46,9 @@
+ #define USB_DEVICE_ID_LD_MICROCASSYTIME		0x1033	/* USB Product ID of Micro-CASSY Time (reserved) */
+ #define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE	0x1035	/* USB Product ID of Micro-CASSY Temperature */
+ #define USB_DEVICE_ID_LD_MICROCASSYPH		0x1038	/* USB Product ID of Micro-CASSY pH */
++#define USB_DEVICE_ID_LD_POWERANALYSERCASSY	0x1040	/* USB Product ID of Power Analyser CASSY */
++#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY	0x1042	/* USB Product ID of Converter Controller CASSY */
++#define USB_DEVICE_ID_LD_MACHINETESTCASSY	0x1043	/* USB Product ID of Machine Test CASSY */
+ #define USB_DEVICE_ID_LD_JWM		0x1080	/* USB Product ID of Joule and Wattmeter */
+ #define USB_DEVICE_ID_LD_DMMP		0x1081	/* USB Product ID of Digital Multimeter P (reserved) */
+ #define USB_DEVICE_ID_LD_UMIP		0x1090	/* USB Product ID of UMI P */
+@@ -94,6 +97,9 @@ static const struct usb_device_id ld_usb_table[] = {
+ 	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
+ 	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
+ 	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
++	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
++	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
++	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
+ 	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
+ 	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
+ 	{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
+diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
+index 64ff5b91752d..b117a1f6bfc3 100644
+--- a/drivers/usb/misc/usb3503.c
++++ b/drivers/usb/misc/usb3503.c
+@@ -292,6 +292,8 @@ static int usb3503_probe(struct usb3503 *hub)
+ 	if (gpio_is_valid(hub->gpio_reset)) {
+ 		err = devm_gpio_request_one(dev, hub->gpio_reset,
+ 				GPIOF_OUT_INIT_LOW, "usb3503 reset");
++		/* Datasheet defines a hardware reset to be at least 100us */
++		usleep_range(100, 10000);
+ 		if (err) {
+ 			dev_err(dev,
+ 				"unable to request GPIO %d as reset pin (%d)\n",
+diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
+index 9a62e89d6dc0..bbec84dd34fb 100644
+--- a/drivers/usb/mon/mon_bin.c
++++ b/drivers/usb/mon/mon_bin.c
+@@ -1000,7 +1000,9 @@ static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg
+ 		break;
+ 
+ 	case MON_IOCQ_RING_SIZE:
++		mutex_lock(&rp->fetch_lock);
+ 		ret = rp->b_size;
++		mutex_unlock(&rp->fetch_lock);
+ 		break;
+ 
+ 	case MON_IOCT_RING_SIZE:
+@@ -1227,12 +1229,16 @@ static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ 	unsigned long offset, chunk_idx;
+ 	struct page *pageptr;
+ 
++	mutex_lock(&rp->fetch_lock);
+ 	offset = vmf->pgoff << PAGE_SHIFT;
+-	if (offset >= rp->b_size)
++	if (offset >= rp->b_size) {
++		mutex_unlock(&rp->fetch_lock);
+ 		return VM_FAULT_SIGBUS;
++	}
+ 	chunk_idx = offset / CHUNK_SIZE;
+ 	pageptr = rp->b_vec[chunk_idx].pg;
+ 	get_page(pageptr);
++	mutex_unlock(&rp->fetch_lock);
+ 	vmf->page = pageptr;
+ 	return 0;
+ }
+diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
+index e93845c26bdb..c17495e7fcc5 100644
+--- a/drivers/usb/musb/ux500_dma.c
++++ b/drivers/usb/musb/ux500_dma.c
+@@ -207,9 +207,6 @@ static int ux500_dma_channel_program(struct dma_channel *channel,
+ 	BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
+ 		channel->status == MUSB_DMA_STATUS_BUSY);
+ 
+-	if (!ux500_dma_is_compatible(channel, packet_sz, (void *)dma_addr, len))
+-		return false;
+-
+ 	channel->status = MUSB_DMA_STATUS_BUSY;
+ 	channel->actual_len = 0;
+ 	ret = ux500_configure_channel(channel, packet_sz, mode, dma_addr, len);
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index 8bb9367ada45..6f37966ea54b 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -999,6 +999,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
+ 	if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
+ 		goto usbhsf_pio_prepare_pop;
+ 
++	/* return at this time if the pipe is running */
++	if (usbhs_pipe_is_running(pipe))
++		return 0;
++
+ 	usbhs_pipe_config_change_bfre(pipe, 1);
+ 
+ 	ret = usbhsf_fifo_select(pipe, fifo, 0);
+@@ -1189,6 +1193,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
+ 	usbhsf_fifo_clear(pipe, fifo);
+ 	pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
+ 
++	usbhs_pipe_running(pipe, 0);
+ 	usbhsf_dma_stop(pipe, fifo);
+ 	usbhsf_dma_unmap(pkt);
+ 	usbhsf_fifo_unselect(pipe, pipe->fifo);
+diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
+index b7cf1982d1d9..941716c1177e 100644
+--- a/drivers/usb/serial/Kconfig
++++ b/drivers/usb/serial/Kconfig
+@@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE
+ 		- Google USB serial devices
+ 		- HP4x calculators
+ 		- a number of Motorola phones
++		- Motorola Tetra devices
+ 		- Novatel Wireless GPS receivers
+ 		- Siemens USB/MPI adapter.
+ 		- ViVOtech ViVOpay USB device.
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index b1be08570088..142c876e7b19 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -119,6 +119,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+ 	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
++	{ USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
+ 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+ 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+@@ -168,6 +169,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
++	{ USB_DEVICE(0x18EF, 0xE030) }, /* ELV ALC 8xxx Battery Charger */
+ 	{ USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
+ 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index 749e1b674145..6947985ccfb0 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -2219,7 +2219,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
+ 		/* something went wrong */
+ 		dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
+ 			__func__, status);
+-		usb_kill_urb(urb);
+ 		usb_free_urb(urb);
+ 		atomic_dec(&CmdUrbs);
+ 		return status;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index dc489fb4261b..0d31ca1cbf35 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -277,6 +277,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
+ #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+ #define TELIT_PRODUCT_ME910			0x1100
++#define TELIT_PRODUCT_ME910_DUAL_MODEM		0x1101
+ #define TELIT_PRODUCT_LE920			0x1200
+ #define TELIT_PRODUCT_LE910			0x1201
+ #define TELIT_PRODUCT_LE910_USBCFG4		0x1206
+@@ -374,6 +375,9 @@ static void option_instat_callback(struct urb *urb);
+ #define FOUR_G_SYSTEMS_PRODUCT_W14		0x9603
+ #define FOUR_G_SYSTEMS_PRODUCT_W100		0x9b01
+ 
++/* Fujisoft products */
++#define FUJISOFT_PRODUCT_FS040U			0x9b02
++
+ /* iBall 3.5G connect wireless modem */
+ #define IBALL_3_5G_CONNECT			0x9605
+ 
+@@ -642,6 +646,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
+ 	.reserved = BIT(1) | BIT(3),
+ };
+ 
++static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
++	.sendsetup = BIT(0),
++	.reserved = BIT(3),
++};
++
+ static const struct option_blacklist_info telit_le910_blacklist = {
+ 	.sendsetup = BIT(0),
+ 	.reserved = BIT(1) | BIT(2),
+@@ -1241,6 +1250,8 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ 		.driver_info = (kernel_ulong_t)&telit_me910_blacklist },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
++		.driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
+@@ -1874,6 +1885,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
+ 	  .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
+ 	},
++	{USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
++	 .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
+ 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
+ 	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index a51b28379850..3da25ad267a2 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -39,6 +39,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
++	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
+ 	{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 3b5a15d1dc0d..123289085ee2 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -17,6 +17,7 @@
+ #define PL2303_PRODUCT_ID_DCU11		0x1234
+ #define PL2303_PRODUCT_ID_PHAROS	0xaaa0
+ #define PL2303_PRODUCT_ID_RSAQ3		0xaaa2
++#define PL2303_PRODUCT_ID_CHILITAG	0xaaa8
+ #define PL2303_PRODUCT_ID_ALDIGA	0x0611
+ #define PL2303_PRODUCT_ID_MMX		0x0612
+ #define PL2303_PRODUCT_ID_GPRS		0x0609
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index e98b6e57b703..6aa7ff2c1cf7 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -80,6 +80,11 @@ DEVICE(vivopay, VIVOPAY_IDS);
+ 	{ USB_DEVICE(0x22b8, 0x2c64) }	/* Motorola V950 phone */
+ DEVICE(moto_modem, MOTO_IDS);
+ 
++/* Motorola Tetra driver */
++#define MOTOROLA_TETRA_IDS()			\
++	{ USB_DEVICE(0x0cad, 0x9011) }	/* Motorola Solutions TETRA PEI */
++DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
++
+ /* Novatel Wireless GPS driver */
+ #define NOVATEL_IDS()			\
+ 	{ USB_DEVICE(0x09d7, 0x0100) }	/* NovAtel FlexPack GPS */
+@@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
+ 	&google_device,
+ 	&vivopay_device,
+ 	&moto_modem_device,
++	&motorola_tetra_device,
+ 	&novatel_gps_device,
+ 	&hp4x_device,
+ 	&suunto_device,
+@@ -125,6 +131,7 @@ static const struct usb_device_id id_table[] = {
+ 	GOOGLE_IDS(),
+ 	VIVOPAY_IDS(),
+ 	MOTO_IDS(),
++	MOTOROLA_TETRA_IDS(),
+ 	NOVATEL_IDS(),
+ 	HP4X_IDS(),
+ 	SUUNTO_IDS(),
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index f58ae4a84c11..021d6880a3ed 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -1052,20 +1052,19 @@ static int uas_post_reset(struct usb_interface *intf)
+ 		return 0;
+ 
+ 	err = uas_configure_endpoints(devinfo);
+-	if (err) {
++	if (err && err != ENODEV)
+ 		shost_printk(KERN_ERR, shost,
+ 			     "%s: alloc streams error %d after reset",
+ 			     __func__, err);
+-		return 1;
+-	}
+ 
++	/* we must unblock the host in every case lest we deadlock */
+ 	spin_lock_irqsave(shost->host_lock, flags);
+ 	scsi_report_bus_reset(shost, 0);
+ 	spin_unlock_irqrestore(shost->host_lock, flags);
+ 
+ 	scsi_unblock_requests(shost);
+ 
+-	return 0;
++	return err ? 1 : 0;
+ }
+ 
+ static int uas_suspend(struct usb_interface *intf, pm_message_t message)
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 2f80163ffb94..8ed80f28416f 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -155,6 +155,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_ATA_1X),
+ 
++/* Reported-by: Icenowy Zheng <icenowy@aosc.io> */
++UNUSUAL_DEV(0x2537, 0x1068, 0x0000, 0x9999,
++		"Norelsys",
++		"NS1068X",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_IGNORE_UAS),
++
+ /* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+ UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
+ 		"JMicron",
+diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
+index a3ec49bdc1e6..0931f3271119 100644
+--- a/drivers/usb/usbip/stub_dev.c
++++ b/drivers/usb/usbip/stub_dev.c
+@@ -87,6 +87,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
+ 			goto err;
+ 
+ 		sdev->ud.tcp_socket = socket;
++		sdev->ud.sockfd = sockfd;
+ 
+ 		spin_unlock_irq(&sdev->ud.lock);
+ 
+@@ -163,8 +164,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
+ 	 * step 1?
+ 	 */
+ 	if (ud->tcp_socket) {
+-		dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
+-			ud->tcp_socket);
++		dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
+ 		kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
+ 	}
+ 
+@@ -187,6 +187,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
+ 	if (ud->tcp_socket) {
+ 		sockfd_put(ud->tcp_socket);
+ 		ud->tcp_socket = NULL;
++		ud->sockfd = -1;
+ 	}
+ 
+ 	/* 3. free used data */
+@@ -281,6 +282,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
+ 	sdev->ud.status		= SDEV_ST_AVAILABLE;
+ 	spin_lock_init(&sdev->ud.lock);
+ 	sdev->ud.tcp_socket	= NULL;
++	sdev->ud.sockfd		= -1;
+ 
+ 	INIT_LIST_HEAD(&sdev->priv_init);
+ 	INIT_LIST_HEAD(&sdev->priv_tx);
+diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
+index af10f7b131a4..325b4c05acdd 100644
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -252,11 +252,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
+ 	struct stub_priv *priv;
+ 	struct urb *urb;
+ 
+-	dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
++	dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
+ 
+ 	while ((priv = stub_priv_pop(sdev))) {
+ 		urb = priv->urb;
+-		dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
++		dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
++			priv->seqnum);
+ 		usb_kill_urb(urb);
+ 
+ 		kmem_cache_free(stub_priv_cache, priv);
+diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
+index 00e475c51a12..56cacb68040c 100644
+--- a/drivers/usb/usbip/stub_rx.c
++++ b/drivers/usb/usbip/stub_rx.c
+@@ -230,9 +230,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
+ 		if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
+ 			continue;
+ 
+-		dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
+-			 priv->urb);
+-
+ 		/*
+ 		 * This matched urb is not completed yet (i.e., be in
+ 		 * flight in usb hcd hardware/driver). Now we are
+@@ -271,8 +268,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
+ 		ret = usb_unlink_urb(priv->urb);
+ 		if (ret != -EINPROGRESS)
+ 			dev_err(&priv->urb->dev->dev,
+-				"failed to unlink a urb %p, ret %d\n",
+-				priv->urb, ret);
++				"failed to unlink a urb # %lu, ret %d\n",
++				priv->seqnum, ret);
+ 
+ 		return 0;
+ 	}
+@@ -341,23 +338,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
+ 	return priv;
+ }
+ 
+-static int get_pipe(struct stub_device *sdev, int epnum, int dir)
++static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
+ {
+ 	struct usb_device *udev = sdev->udev;
+ 	struct usb_host_endpoint *ep;
+ 	struct usb_endpoint_descriptor *epd = NULL;
++	int epnum = pdu->base.ep;
++	int dir = pdu->base.direction;
++
++	if (epnum < 0 || epnum > 15)
++		goto err_ret;
+ 
+ 	if (dir == USBIP_DIR_IN)
+ 		ep = udev->ep_in[epnum & 0x7f];
+ 	else
+ 		ep = udev->ep_out[epnum & 0x7f];
+-	if (!ep) {
+-		dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
+-			epnum);
+-		BUG();
+-	}
++	if (!ep)
++		goto err_ret;
+ 
+ 	epd = &ep->desc;
++
+ 	if (usb_endpoint_xfer_control(epd)) {
+ 		if (dir == USBIP_DIR_OUT)
+ 			return usb_sndctrlpipe(udev, epnum);
+@@ -380,15 +380,37 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
+ 	}
+ 
+ 	if (usb_endpoint_xfer_isoc(epd)) {
++		/* validate packet size and number of packets */
++		unsigned int maxp, packets, bytes;
++
++#define USB_EP_MAXP_MULT_SHIFT  11
++#define USB_EP_MAXP_MULT_MASK   (3 << USB_EP_MAXP_MULT_SHIFT)
++#define USB_EP_MAXP_MULT(m) \
++	(((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
++
++		maxp = usb_endpoint_maxp(epd);
++		maxp *= (USB_EP_MAXP_MULT(
++				__le16_to_cpu(epd->wMaxPacketSize)) + 1);
++		bytes = pdu->u.cmd_submit.transfer_buffer_length;
++		packets = DIV_ROUND_UP(bytes, maxp);
++
++		if (pdu->u.cmd_submit.number_of_packets < 0 ||
++		    pdu->u.cmd_submit.number_of_packets > packets) {
++			dev_err(&sdev->udev->dev,
++				"CMD_SUBMIT: isoc invalid num packets %d\n",
++				pdu->u.cmd_submit.number_of_packets);
++			return -1;
++		}
+ 		if (dir == USBIP_DIR_OUT)
+ 			return usb_sndisocpipe(udev, epnum);
+ 		else
+ 			return usb_rcvisocpipe(udev, epnum);
+ 	}
+ 
++err_ret:
+ 	/* NOT REACHED */
+-	dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum);
+-	return 0;
++	dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
++	return -1;
+ }
+ 
+ static void masking_bogus_flags(struct urb *urb)
+@@ -452,7 +474,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
+ 	struct stub_priv *priv;
+ 	struct usbip_device *ud = &sdev->ud;
+ 	struct usb_device *udev = sdev->udev;
+-	int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
++	int pipe = get_pipe(sdev, pdu);
++
++	if (pipe == -1)
++		return;
+ 
+ 	priv = stub_priv_alloc(sdev, pdu);
+ 	if (!priv)
+diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
+index af858d52608a..f4dd30c56f36 100644
+--- a/drivers/usb/usbip/stub_tx.c
++++ b/drivers/usb/usbip/stub_tx.c
+@@ -201,8 +201,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
+ 
+ 		/* 1. setup usbip_header */
+ 		setup_ret_submit_pdu(&pdu_header, urb);
+-		usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
+-				  pdu_header.base.seqnum, urb);
++		usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
++				  pdu_header.base.seqnum);
+ 		usbip_header_correct_endian(&pdu_header, 1);
+ 
+ 		iov[iovnum].iov_base = &pdu_header;
+diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
+index e40da7759a0e..1838f1b2c2fa 100644
+--- a/drivers/usb/usbip/usbip_common.c
++++ b/drivers/usb/usbip/usbip_common.c
+@@ -103,7 +103,7 @@ static void usbip_dump_usb_device(struct usb_device *udev)
+ 	dev_dbg(dev, "       devnum(%d) devpath(%s) usb speed(%s)",
+ 		udev->devnum, udev->devpath, usb_speed_string(udev->speed));
+ 
+-	pr_debug("tt %p, ttport %d\n", udev->tt, udev->ttport);
++	pr_debug("tt hub ttport %d\n", udev->ttport);
+ 
+ 	dev_dbg(dev, "                    ");
+ 	for (i = 0; i < 16; i++)
+@@ -136,12 +136,8 @@ static void usbip_dump_usb_device(struct usb_device *udev)
+ 	}
+ 	pr_debug("\n");
+ 
+-	dev_dbg(dev, "parent %p, bus %p\n", udev->parent, udev->bus);
+-
+-	dev_dbg(dev,
+-		"descriptor %p, config %p, actconfig %p, rawdescriptors %p\n",
+-		&udev->descriptor, udev->config,
+-		udev->actconfig, udev->rawdescriptors);
++	dev_dbg(dev, "parent %s, bus %s\n", dev_name(&udev->parent->dev),
++		udev->bus->bus_name);
+ 
+ 	dev_dbg(dev, "have_langid %d, string_langid %d\n",
+ 		udev->have_langid, udev->string_langid);
+@@ -249,9 +245,6 @@ void usbip_dump_urb(struct urb *urb)
+ 
+ 	dev = &urb->dev->dev;
+ 
+-	dev_dbg(dev, "   urb                   :%p\n", urb);
+-	dev_dbg(dev, "   dev                   :%p\n", urb->dev);
+-
+ 	usbip_dump_usb_device(urb->dev);
+ 
+ 	dev_dbg(dev, "   pipe                  :%08x ", urb->pipe);
+@@ -260,11 +253,9 @@ void usbip_dump_urb(struct urb *urb)
+ 
+ 	dev_dbg(dev, "   status                :%d\n", urb->status);
+ 	dev_dbg(dev, "   transfer_flags        :%08X\n", urb->transfer_flags);
+-	dev_dbg(dev, "   transfer_buffer       :%p\n", urb->transfer_buffer);
+ 	dev_dbg(dev, "   transfer_buffer_length:%d\n",
+ 						urb->transfer_buffer_length);
+ 	dev_dbg(dev, "   actual_length         :%d\n", urb->actual_length);
+-	dev_dbg(dev, "   setup_packet          :%p\n", urb->setup_packet);
+ 
+ 	if (urb->setup_packet && usb_pipetype(urb->pipe) == PIPE_CONTROL)
+ 		usbip_dump_usb_ctrlrequest(
+@@ -274,8 +265,6 @@ void usbip_dump_urb(struct urb *urb)
+ 	dev_dbg(dev, "   number_of_packets     :%d\n", urb->number_of_packets);
+ 	dev_dbg(dev, "   interval              :%d\n", urb->interval);
+ 	dev_dbg(dev, "   error_count           :%d\n", urb->error_count);
+-	dev_dbg(dev, "   context               :%p\n", urb->context);
+-	dev_dbg(dev, "   complete              :%p\n", urb->complete);
+ }
+ EXPORT_SYMBOL_GPL(usbip_dump_urb);
+ 
+@@ -328,18 +317,14 @@ int usbip_recv(struct socket *sock, void *buf, int size)
+ 	struct msghdr msg;
+ 	struct kvec iov;
+ 	int total = 0;
+-
+ 	/* for blocks of if (usbip_dbg_flag_xmit) */
+ 	char *bp = buf;
+ 	int osize = size;
+ 
+-	usbip_dbg_xmit("enter\n");
+-
+-	if (!sock || !buf || !size) {
+-		pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
+-		       size);
++	if (!sock || !buf || !size)
+ 		return -EINVAL;
+-	}
++
++	usbip_dbg_xmit("enter\n");
+ 
+ 	do {
+ 		sock->sk->sk_allocation = GFP_NOIO;
+@@ -352,11 +337,8 @@ int usbip_recv(struct socket *sock, void *buf, int size)
+ 		msg.msg_flags      = MSG_NOSIGNAL;
+ 
+ 		result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
+-		if (result <= 0) {
+-			pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
+-				 sock, buf, size, result, total);
++		if (result <= 0)
+ 			goto err;
+-		}
+ 
+ 		size -= result;
+ 		buf += result;
+diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
+index 86b08475c254..f875ccaa55f9 100644
+--- a/drivers/usb/usbip/usbip_common.h
++++ b/drivers/usb/usbip/usbip_common.h
+@@ -261,6 +261,7 @@ struct usbip_device {
+ 	/* lock for status */
+ 	spinlock_t lock;
+ 
++	int sockfd;
+ 	struct socket *tcp_socket;
+ 
+ 	struct task_struct *tcp_rx;
+diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
+index e9ef1eccdace..17498af82b69 100644
+--- a/drivers/usb/usbip/vhci_hcd.c
++++ b/drivers/usb/usbip/vhci_hcd.c
+@@ -462,9 +462,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ 	int ret = 0;
+ 	struct vhci_device *vdev;
+ 
+-	usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
+-			  hcd, urb, mem_flags);
+-
+ 	/* patch to usb_sg_init() is in 2.5.60 */
+ 	BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
+ 
+@@ -620,8 +617,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 	struct vhci_priv *priv;
+ 	struct vhci_device *vdev;
+ 
+-	pr_info("dequeue a urb %p\n", urb);
+-
+ 	spin_lock(&the_controller->lock);
+ 
+ 	priv = urb->hcpriv;
+@@ -649,7 +644,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		/* tcp connection is closed */
+ 		spin_lock(&vdev->priv_lock);
+ 
+-		pr_info("device %p seems to be disconnected\n", vdev);
+ 		list_del(&priv->list);
+ 		kfree(priv);
+ 		urb->hcpriv = NULL;
+@@ -661,8 +655,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		 * vhci_rx will receive RET_UNLINK and give back the URB.
+ 		 * Otherwise, we give back it here.
+ 		 */
+-		pr_info("gives back urb %p\n", urb);
+-
+ 		usb_hcd_unlink_urb_from_ep(hcd, urb);
+ 
+ 		spin_unlock(&the_controller->lock);
+@@ -691,8 +683,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 
+ 		unlink->unlink_seqnum = priv->seqnum;
+ 
+-		pr_info("device %p seems to be still connected\n", vdev);
+-
+ 		/* send cmd_unlink and try to cancel the pending URB in the
+ 		 * peer */
+ 		list_add_tail(&unlink->list, &vdev->unlink_tx);
+@@ -771,7 +761,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
+ 
+ 	/* need this? see stub_dev.c */
+ 	if (ud->tcp_socket) {
+-		pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
++		pr_debug("shutdown sockfd %d\n", ud->sockfd);
+ 		kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
+ 	}
+ 
+@@ -790,6 +780,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
+ 	if (vdev->ud.tcp_socket) {
+ 		sockfd_put(vdev->ud.tcp_socket);
+ 		vdev->ud.tcp_socket = NULL;
++		vdev->ud.sockfd = -1;
+ 	}
+ 	pr_info("release socket\n");
+ 
+@@ -836,6 +827,7 @@ static void vhci_device_reset(struct usbip_device *ud)
+ 	if (ud->tcp_socket) {
+ 		sockfd_put(ud->tcp_socket);
+ 		ud->tcp_socket = NULL;
++		ud->sockfd = -1;
+ 	}
+ 	ud->status = VDEV_ST_NULL;
+ 
+diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
+index 00e4a54308e4..bc4eb0855314 100644
+--- a/drivers/usb/usbip/vhci_rx.c
++++ b/drivers/usb/usbip/vhci_rx.c
+@@ -37,24 +37,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
+ 		urb = priv->urb;
+ 		status = urb->status;
+ 
+-		usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
+-				urb, priv, seqnum);
++		usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
+ 
+ 		switch (status) {
+ 		case -ENOENT:
+ 			/* fall through */
+ 		case -ECONNRESET:
+-			dev_info(&urb->dev->dev,
+-				 "urb %p was unlinked %ssynchronuously.\n", urb,
+-				 status == -ENOENT ? "" : "a");
++			dev_dbg(&urb->dev->dev,
++				 "urb seq# %u was unlinked %ssynchronuously\n",
++				 seqnum, status == -ENOENT ? "" : "a");
+ 			break;
+ 		case -EINPROGRESS:
+ 			/* no info output */
+ 			break;
+ 		default:
+-			dev_info(&urb->dev->dev,
+-				 "urb %p may be in a error, status %d\n", urb,
+-				 status);
++			dev_dbg(&urb->dev->dev,
++				 "urb seq# %u may be in a error, status %d\n",
++				 seqnum, status);
+ 		}
+ 
+ 		list_del(&priv->list);
+@@ -78,8 +77,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+ 	spin_unlock(&vdev->priv_lock);
+ 
+ 	if (!urb) {
+-		pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
+-		pr_info("max seqnum %d\n",
++		pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
++			pdu->base.seqnum,
+ 			atomic_read(&the_controller->seqnum));
+ 		usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
+ 		return;
+@@ -102,7 +101,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
+ 	if (usbip_dbg_flag_vhci_rx)
+ 		usbip_dump_urb(urb);
+ 
+-	usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
++	usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
+ 
+ 	spin_lock(&the_controller->lock);
+ 	usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
+@@ -165,7 +164,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
+ 		pr_info("the urb (seqnum %d) was already given back\n",
+ 			pdu->base.seqnum);
+ 	} else {
+-		usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
++		usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
+ 
+ 		/* If unlink is successful, status is -ECONNRESET */
+ 		urb->status = pdu->u.ret_unlink.status;
+diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
+index 211f43f67ea2..84c21c4ccf46 100644
+--- a/drivers/usb/usbip/vhci_sysfs.c
++++ b/drivers/usb/usbip/vhci_sysfs.c
+@@ -39,16 +39,20 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ 
+ 	/*
+ 	 * output example:
+-	 * prt sta spd dev socket           local_busid
+-	 * 000 004 000 000         c5a7bb80 1-2.3
+-	 * 001 004 000 000         d8cee980 2-3.4
++	 * port sta spd dev      sockfd local_busid
++	 * 0000 004 000 00000000 000003 1-2.3
++	 * 0001 004 000 00000000 000004 2-3.4
+ 	 *
+-	 * IP address can be retrieved from a socket pointer address by looking
+-	 * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
+-	 * port number and its peer IP address.
++	 * Output includes socket fd instead of socket pointer address to
++	 * avoid leaking kernel memory address in:
++	 *	/sys/devices/platform/vhci_hcd.0/status and in debug output.
++	 * The socket pointer address is not used at the moment and it was
++	 * made visible as a convenient way to find IP address from socket
++	 * pointer address by looking up /proc/net/{tcp,tcp6}. As this opens
++	 * a security hole, the change is made to use sockfd instead.
+ 	 */
+ 	out += sprintf(out,
+-		       "prt sta spd bus dev socket           local_busid\n");
++		       "prt sta spd bus dev sockfd local_busid\n");
+ 
+ 	for (i = 0; i < VHCI_NPORTS; i++) {
+ 		struct vhci_device *vdev = port_to_vdev(i);
+@@ -60,11 +64,11 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+ 			out += sprintf(out, "%03u %08x ",
+ 				       vdev->speed, vdev->devid);
+ 			out += sprintf(out, "%16p ", vdev->ud.tcp_socket);
++			out += sprintf(out, "%06u", vdev->ud.sockfd);
+ 			out += sprintf(out, "%s", dev_name(&vdev->udev->dev));
+ 
+-		} else {
+-			out += sprintf(out, "000 000 000 0000000000000000 0-0");
+-		}
++		} else
++			out += sprintf(out, "000 000 000 000000 0-0");
+ 
+ 		out += sprintf(out, "\n");
+ 		spin_unlock(&vdev->ud.lock);
+@@ -223,6 +227,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
+ 
+ 	vdev->devid         = devid;
+ 	vdev->speed         = speed;
++	vdev->ud.sockfd     = sockfd;
+ 	vdev->ud.tcp_socket = socket;
+ 	vdev->ud.status     = VDEV_ST_NOTASSIGNED;
+ 
+diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
+index 409fd99f3257..3c5796c8633a 100644
+--- a/drivers/usb/usbip/vhci_tx.c
++++ b/drivers/usb/usbip/vhci_tx.c
+@@ -82,7 +82,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
+ 		memset(&msg, 0, sizeof(msg));
+ 		memset(&iov, 0, sizeof(iov));
+ 
+-		usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
++		usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
++				  priv->seqnum);
+ 
+ 		/* 1. setup usbip_header */
+ 		setup_cmd_submit_pdu(&pdu_header, urb);
+diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
+index 7d137a43cc86..14265c4c0203 100644
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -982,6 +982,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
+ 	}
+ 	vhost_net_stop(n, &tx_sock, &rx_sock);
+ 	vhost_net_flush(n);
++	vhost_dev_stop(&n->dev);
+ 	vhost_dev_reset_owner(&n->dev, memory);
+ 	vhost_net_vq_reset(n);
+ done:
+diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
+index 6897f1c1bc73..95d01562ffa2 100644
+--- a/drivers/video/backlight/pwm_bl.c
++++ b/drivers/video/backlight/pwm_bl.c
+@@ -79,14 +79,17 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
+ static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
+ {
+ 	unsigned int lth = pb->lth_brightness;
+-	int duty_cycle;
++	u64 duty_cycle;
+ 
+ 	if (pb->levels)
+ 		duty_cycle = pb->levels[brightness];
+ 	else
+ 		duty_cycle = brightness;
+ 
+-	return (duty_cycle * (pb->period - lth) / pb->scale) + lth;
++	duty_cycle *= pb->period - lth;
++	do_div(duty_cycle, pb->scale);
++
++	return duty_cycle + lth;
+ }
+ 
+ static int pwm_backlight_update_status(struct backlight_device *bl)
+diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
+index 0efc52f11ad0..b30e7d87804b 100644
+--- a/drivers/video/console/dummycon.c
++++ b/drivers/video/console/dummycon.c
+@@ -68,7 +68,6 @@ const struct consw dummy_con = {
+     .con_switch =	DUMMY,
+     .con_blank =	DUMMY,
+     .con_font_set =	DUMMY,
+-    .con_font_get =	DUMMY,
+     .con_font_default =	DUMMY,
+     .con_font_copy =	DUMMY,
+     .con_set_palette =	DUMMY,
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 44eb7c737ea2..34af3a26472c 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1506,6 +1506,7 @@ config FB_SIS
+ 	select FB_CFB_COPYAREA
+ 	select FB_CFB_IMAGEBLIT
+ 	select FB_BOOT_VESA_SUPPORT if FB_SIS = y
++	select FB_SIS_300 if !FB_SIS_315
+ 	help
+ 	  This is the frame buffer device driver for the SiS 300, 315, 330
+ 	  and 340 series as well as XGI V3XT, V5, V8, Z7 graphics chipsets.
+diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
+index 94a8d04e60f9..b16a1c16e212 100644
+--- a/drivers/video/fbdev/atmel_lcdfb.c
++++ b/drivers/video/fbdev/atmel_lcdfb.c
+@@ -1121,7 +1121,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+ 		goto put_display_node;
+ 	}
+ 
+-	timings_np = of_find_node_by_name(display_np, "display-timings");
++	timings_np = of_get_child_by_name(display_np, "display-timings");
+ 	if (!timings_np) {
+ 		dev_err(dev, "failed to find display-timings node\n");
+ 		ret = -ENODEV;
+@@ -1142,6 +1142,12 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
+ 		fb_add_videomode(&fb_vm, &info->modelist);
+ 	}
+ 
++	/*
++	 * FIXME: Make sure we are not referencing any fields in display_np
++	 * and timings_np and drop our references to them before returning to
++	 * avoid leaking the nodes on probe deferral and driver unbind.
++	 */
++
+ 	return 0;
+ 
+ put_timings_node:
+diff --git a/drivers/video/fbdev/auo_k190x.c b/drivers/video/fbdev/auo_k190x.c
+index 8d2499d1cafb..9580374667ba 100644
+--- a/drivers/video/fbdev/auo_k190x.c
++++ b/drivers/video/fbdev/auo_k190x.c
+@@ -773,9 +773,7 @@ static void auok190x_recover(struct auok190xfb_par *par)
+ /*
+  * Power-management
+  */
+-
+-#ifdef CONFIG_PM
+-static int auok190x_runtime_suspend(struct device *dev)
++static int __maybe_unused auok190x_runtime_suspend(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	struct fb_info *info = platform_get_drvdata(pdev);
+@@ -822,7 +820,7 @@ finish:
+ 	return 0;
+ }
+ 
+-static int auok190x_runtime_resume(struct device *dev)
++static int __maybe_unused auok190x_runtime_resume(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	struct fb_info *info = platform_get_drvdata(pdev);
+@@ -856,7 +854,7 @@ static int auok190x_runtime_resume(struct device *dev)
+ 	return 0;
+ }
+ 
+-static int auok190x_suspend(struct device *dev)
++static int __maybe_unused auok190x_suspend(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	struct fb_info *info = platform_get_drvdata(pdev);
+@@ -896,7 +894,7 @@ static int auok190x_suspend(struct device *dev)
+ 	return 0;
+ }
+ 
+-static int auok190x_resume(struct device *dev)
++static int __maybe_unused auok190x_resume(struct device *dev)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	struct fb_info *info = platform_get_drvdata(pdev);
+@@ -933,7 +931,6 @@ static int auok190x_resume(struct device *dev)
+ 
+ 	return 0;
+ }
+-#endif
+ 
+ const struct dev_pm_ops auok190x_pm = {
+ 	SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume,
+diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
+index 95873f26e39c..de2f3e793786 100644
+--- a/drivers/video/fbdev/exynos/s6e8ax0.c
++++ b/drivers/video/fbdev/exynos/s6e8ax0.c
+@@ -829,8 +829,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_PM
+-static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
++static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
+ {
+ 	struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
+ 
+@@ -843,7 +842,7 @@ static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
+ 	return 0;
+ }
+ 
+-static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
++static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
+ {
+ 	struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
+ 
+@@ -855,10 +854,6 @@ static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
+ 
+ 	return 0;
+ }
+-#else
+-#define s6e8ax0_suspend		NULL
+-#define s6e8ax0_resume		NULL
+-#endif
+ 
+ static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
+ 	.name = "s6e8ax0",
+@@ -867,8 +862,8 @@ static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
+ 	.power_on = s6e8ax0_power_on,
+ 	.set_sequence = s6e8ax0_set_sequence,
+ 	.probe = s6e8ax0_probe,
+-	.suspend = s6e8ax0_suspend,
+-	.resume = s6e8ax0_resume,
++	.suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
++	.resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
+ };
+ 
+ static int s6e8ax0_init(void)
+diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c
+index b847d530471a..e8d1309ccefc 100644
+--- a/drivers/video/fbdev/intelfb/intelfbdrv.c
++++ b/drivers/video/fbdev/intelfb/intelfbdrv.c
+@@ -306,7 +306,7 @@ static __inline__ int get_opt_int(const char *this_opt, const char *name,
+ }
+ 
+ static __inline__ int get_opt_bool(const char *this_opt, const char *name,
+-				   int *ret)
++				   bool *ret)
+ {
+ 	if (!ret)
+ 		return 0;
+diff --git a/drivers/video/fbdev/mmp/core.c b/drivers/video/fbdev/mmp/core.c
+index a0f496049db7..3a6bb6561ba0 100644
+--- a/drivers/video/fbdev/mmp/core.c
++++ b/drivers/video/fbdev/mmp/core.c
+@@ -23,6 +23,7 @@
+ #include <linux/slab.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/export.h>
++#include <linux/module.h>
+ #include <video/mmp_disp.h>
+ 
+ static struct mmp_overlay *path_get_overlay(struct mmp_path *path,
+@@ -249,3 +250,7 @@ void mmp_unregister_path(struct mmp_path *path)
+ 	mutex_unlock(&disp_lock);
+ }
+ EXPORT_SYMBOL_GPL(mmp_unregister_path);
++
++MODULE_AUTHOR("Zhou Zhu <zzhu3@marvell.com>");
++MODULE_DESCRIPTION("Marvell MMP display framework");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c
+index 295e0dedaf1f..20f7234e809e 100644
+--- a/drivers/video/fbdev/sis/init301.c
++++ b/drivers/video/fbdev/sis/init301.c
+@@ -2151,17 +2151,15 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor
+ 		unsigned short RefreshRateTableIndex)
+ {
+   unsigned short CRT2Index, VCLKIndex = 0, VCLKIndexGEN = 0, VCLKIndexGENCRT = 0;
+-  unsigned short modeflag, resinfo, tempbx;
++  unsigned short resinfo, tempbx;
+   const unsigned char *CHTVVCLKPtr = NULL;
+ 
+   if(ModeNo <= 0x13) {
+-     modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag;
+      resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo;
+      CRT2Index = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC;
+      VCLKIndexGEN = (SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)) >> 2) & 0x03;
+      VCLKIndexGENCRT = VCLKIndexGEN;
+   } else {
+-     modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
+      resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO;
+      CRT2Index = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
+      VCLKIndexGEN = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
+@@ -7270,7 +7268,7 @@ SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift)
+ static void
+ SiS_SetGroup4_C_ELV(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex)
+ {
+-   unsigned short temp, temp1, resinfo = 0;
++   unsigned short temp, temp1;
+    unsigned char  *ROMAddr = SiS_Pr->VirtualRomBase;
+ 
+    if(!(SiS_Pr->SiS_VBType & VB_SIS30xCLV)) return;
+@@ -7282,10 +7280,6 @@ SiS_SetGroup4_C_ELV(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
+       if(!(ROMAddr[0x61] & 0x04)) return;
+    }
+ 
+-   if(ModeNo > 0x13) {
+-      resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO;
+-   }
+-
+    SiS_SetRegOR(SiS_Pr->SiS_Part4Port,0x3a,0x08);
+    temp = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x3a);
+    if(!(temp & 0x01)) {
+diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
+index f9718f012aae..badee04ef496 100644
+--- a/drivers/video/fbdev/via/viafbdev.c
++++ b/drivers/video/fbdev/via/viafbdev.c
+@@ -1630,16 +1630,14 @@ static void viafb_init_proc(struct viafb_shared *shared)
+ }
+ static void viafb_remove_proc(struct viafb_shared *shared)
+ {
+-	struct proc_dir_entry *viafb_entry = shared->proc_entry,
+-		*iga1_entry = shared->iga1_proc_entry,
+-		*iga2_entry = shared->iga2_proc_entry;
++	struct proc_dir_entry *viafb_entry = shared->proc_entry;
+ 
+ 	if (!viafb_entry)
+ 		return;
+ 
+-	remove_proc_entry("output_devices", iga2_entry);
++	remove_proc_entry("output_devices", shared->iga2_proc_entry);
+ 	remove_proc_entry("iga2", viafb_entry);
+-	remove_proc_entry("output_devices", iga1_entry);
++	remove_proc_entry("output_devices", shared->iga1_proc_entry);
+ 	remove_proc_entry("iga1", viafb_entry);
+ 	remove_proc_entry("supported_output_devices", viafb_entry);
+ 
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 01d15dca940e..7cf26768ea0b 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -239,12 +239,14 @@ static void update_balloon_stats(struct virtio_balloon *vb)
+ 	all_vm_events(events);
+ 	si_meminfo(&i);
+ 
++#ifdef CONFIG_VM_EVENT_COUNTERS
+ 	update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
+ 				pages_to_bytes(events[PSWPIN]));
+ 	update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
+ 				pages_to_bytes(events[PSWPOUT]));
+ 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
+ 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
++#endif
+ 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
+ 				pages_to_bytes(i.freeram));
+ 	update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
+diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
+index 7cd226da15fe..a4918b00308f 100644
+--- a/drivers/xen/Kconfig
++++ b/drivers/xen/Kconfig
+@@ -239,7 +239,7 @@ config XEN_ACPI_HOTPLUG_CPU
+ 
+ config XEN_ACPI_PROCESSOR
+ 	tristate "Xen ACPI processor"
+-	depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ
++	depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ
+ 	default m
+ 	help
+           This ACPI processor uploads Power Management information to the Xen
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 9dbe5b548fa6..0814dffa30c8 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -1260,7 +1260,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+ 	/* Lock all pages first so we can lock the extent safely. */
+ 	ret = io_ctl_prepare_pages(io_ctl, inode, 0);
+ 	if (ret)
+-		goto out;
++		goto out_unlock;
+ 
+ 	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
+ 			 0, &cached_state);
+@@ -1353,6 +1353,7 @@ out_nospc_locked:
+ out_nospc:
+ 	cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
+ 
++out_unlock:
+ 	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+ 		up_write(&block_group->data_rwsem);
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index d14af5bd13d6..884e90e9622a 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1282,8 +1282,11 @@ next_slot:
+ 		leaf = path->nodes[0];
+ 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+ 			ret = btrfs_next_leaf(root, path);
+-			if (ret < 0)
++			if (ret < 0) {
++				if (cow_start != (u64)-1)
++					cur_offset = cow_start;
+ 				goto error;
++			}
+ 			if (ret > 0)
+ 				break;
+ 			leaf = path->nodes[0];
+@@ -1999,7 +2002,15 @@ again:
+ 		goto out;
+ 	 }
+ 
+-	btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state);
++	ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
++					&cached_state);
++	if (ret) {
++		mapping_set_error(page->mapping, ret);
++		end_extent_writepage(page, ret, page_start, page_end);
++		ClearPageChecked(page);
++		goto out;
++	}
++
+ 	ClearPageChecked(page);
+ 	set_page_dirty(page);
+ out:
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 2b115c309e1c..b7f6b473cd16 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2216,7 +2216,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
+ 	if (!path)
+ 		return -ENOMEM;
+ 
+-	ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX];
++	ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
+ 
+ 	key.objectid = tree_id;
+ 	key.type = BTRFS_ROOT_ITEM_KEY;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 6ee954c62fe6..f355bd2d6ad8 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -26,6 +26,7 @@
+ #include "print-tree.h"
+ #include "backref.h"
+ #include "hash.h"
++#include "inode-map.h"
+ 
+ /* magic values for the inode_only field in btrfs_log_inode:
+  *
+@@ -2343,6 +2344,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
+ 							next);
+ 					btrfs_wait_tree_block_writeback(next);
+ 					btrfs_tree_unlock(next);
++				} else {
++					if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
++						clear_extent_buffer_dirty(next);
+ 				}
+ 
+ 				WARN_ON(root_owner !=
+@@ -2422,6 +2426,9 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
+ 							next);
+ 					btrfs_wait_tree_block_writeback(next);
+ 					btrfs_tree_unlock(next);
++				} else {
++					if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
++						clear_extent_buffer_dirty(next);
+ 				}
+ 
+ 				WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
+@@ -2498,6 +2505,9 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
+ 				clean_tree_block(trans, log->fs_info, next);
+ 				btrfs_wait_tree_block_writeback(next);
+ 				btrfs_tree_unlock(next);
++			} else {
++				if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
++					clear_extent_buffer_dirty(next);
+ 			}
+ 
+ 			WARN_ON(log->root_key.objectid !=
+@@ -5294,6 +5304,23 @@ again:
+ 						      path);
+ 		}
+ 
++		if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
++			struct btrfs_root *root = wc.replay_dest;
++
++			btrfs_release_path(path);
++
++			/*
++			 * We have just replayed everything, and the highest
++			 * objectid of fs roots probably has changed in case
++			 * some inode_item's got replayed.
++			 *
++			 * root->objectid_mutex is not acquired as log replay
++			 * could only happen during mount.
++			 */
++			ret = btrfs_find_highest_objectid(root,
++						  &root->highest_objectid);
++		}
++
+ 		key.offset = found_key.offset - 1;
+ 		wc.replay_dest->log_root = NULL;
+ 		free_extent_buffer(log->node);
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index 4acbc390a7d6..1d707a67f8ac 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -306,9 +306,8 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
+ {
+ 	int i;
+ 	int rc;
+-	char password_with_pad[CIFS_ENCPWD_SIZE];
++	char password_with_pad[CIFS_ENCPWD_SIZE] = {0};
+ 
+-	memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
+ 	if (password)
+ 		strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index a2c100aed4b0..d733df946cc6 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -1635,7 +1635,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
+ 			tmp_end++;
+ 			if (!(tmp_end < end && tmp_end[1] == delim)) {
+ 				/* No it is not. Set the password to NULL */
+-				kfree(vol->password);
++				kzfree(vol->password);
+ 				vol->password = NULL;
+ 				break;
+ 			}
+@@ -1673,7 +1673,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
+ 					options = end;
+ 			}
+ 
+-			kfree(vol->password);
++			kzfree(vol->password);
+ 			/* Now build new password string */
+ 			temp_len = strlen(value);
+ 			vol->password = kzalloc(temp_len+1, GFP_KERNEL);
+@@ -4038,7 +4038,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ 		reset_cifs_unix_caps(0, tcon, NULL, vol_info);
+ out:
+ 	kfree(vol_info->username);
+-	kfree(vol_info->password);
++	kzfree(vol_info->password);
+ 	kfree(vol_info);
+ 
+ 	return tcon;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 47e04038a846..1366d2151389 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -3231,20 +3231,18 @@ static struct vm_operations_struct cifs_file_vm_ops = {
+ 
+ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+-	int rc, xid;
++	int xid, rc = 0;
+ 	struct inode *inode = file_inode(file);
+ 
+ 	xid = get_xid();
+ 
+-	if (!CIFS_CACHE_READ(CIFS_I(inode))) {
++	if (!CIFS_CACHE_READ(CIFS_I(inode)))
+ 		rc = cifs_zap_mapping(inode);
+-		if (rc)
+-			return rc;
+-	}
+-
+-	rc = generic_file_mmap(file, vma);
+-	if (rc == 0)
++	if (!rc)
++		rc = generic_file_mmap(file, vma);
++	if (!rc)
+ 		vma->vm_ops = &cifs_file_vm_ops;
++
+ 	free_xid(xid);
+ 	return rc;
+ }
+@@ -3254,16 +3252,16 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
+ 	int rc, xid;
+ 
+ 	xid = get_xid();
++
+ 	rc = cifs_revalidate_file(file);
+-	if (rc) {
++	if (rc)
+ 		cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
+ 			 rc);
+-		free_xid(xid);
+-		return rc;
+-	}
+-	rc = generic_file_mmap(file, vma);
+-	if (rc == 0)
++	if (!rc)
++		rc = generic_file_mmap(file, vma);
++	if (!rc)
+ 		vma->vm_ops = &cifs_file_vm_ops;
++
+ 	free_xid(xid);
+ 	return rc;
+ }
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index 8442b8b8e0be..a9b68cb38c12 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -99,14 +99,11 @@ sesInfoFree(struct cifs_ses *buf_to_free)
+ 	kfree(buf_to_free->serverOS);
+ 	kfree(buf_to_free->serverDomain);
+ 	kfree(buf_to_free->serverNOS);
+-	if (buf_to_free->password) {
+-		memset(buf_to_free->password, 0, strlen(buf_to_free->password));
+-		kfree(buf_to_free->password);
+-	}
++	kzfree(buf_to_free->password);
+ 	kfree(buf_to_free->user_name);
+ 	kfree(buf_to_free->domainName);
+-	kfree(buf_to_free->auth_key.response);
+-	kfree(buf_to_free);
++	kzfree(buf_to_free->auth_key.response);
++	kzfree(buf_to_free);
+ }
+ 
+ struct cifs_tcon *
+@@ -136,10 +133,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
+ 	}
+ 	atomic_dec(&tconInfoAllocCount);
+ 	kfree(buf_to_free->nativeFileSystem);
+-	if (buf_to_free->password) {
+-		memset(buf_to_free->password, 0, strlen(buf_to_free->password));
+-		kfree(buf_to_free->password);
+-	}
++	kzfree(buf_to_free->password);
+ 	kfree(buf_to_free);
+ }
+ 
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 0cf4a76e8e94..69422157c71b 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -507,8 +507,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ 	}
+ 
+ 	/* check validate negotiate info response matches what we got earlier */
+-	if (pneg_rsp->Dialect !=
+-			cpu_to_le16(tcon->ses->server->vals->protocol_id))
++	if (pneg_rsp->Dialect != cpu_to_le16(tcon->ses->server->dialect))
+ 		goto vneg_out;
+ 
+ 	if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
+diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
+index d6aeb84e90b6..d882d873c5a3 100644
+--- a/fs/ext2/acl.c
++++ b/fs/ext2/acl.c
+@@ -178,11 +178,8 @@ ext2_get_acl(struct inode *inode, int type)
+ 	return acl;
+ }
+ 
+-/*
+- * inode->i_mutex: down
+- */
+-int
+-ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
++static int
++__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ {
+ 	int name_index;
+ 	void *value = NULL;
+@@ -192,13 +189,6 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	switch(type) {
+ 		case ACL_TYPE_ACCESS:
+ 			name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
+-			if (acl) {
+-				error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+-				if (error)
+-					return error;
+-				inode->i_ctime = CURRENT_TIME_SEC;
+-				mark_inode_dirty(inode);
+-			}
+ 			break;
+ 
+ 		case ACL_TYPE_DEFAULT:
+@@ -224,6 +214,24 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	return error;
+ }
+ 
++/*
++ * inode->i_mutex: down
++ */
++int
++ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
++{
++	int error;
++
++	if (type == ACL_TYPE_ACCESS && acl) {
++		error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++		if (error)
++			return error;
++		inode->i_ctime = CURRENT_TIME_SEC;
++		mark_inode_dirty(inode);
++	}
++	return __ext2_set_acl(inode, acl, type);
++}
++
+ /*
+  * Initialize the ACLs of a new inode. Called from ext2_new_inode.
+  *
+@@ -241,12 +249,12 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
+ 		return error;
+ 
+ 	if (default_acl) {
+-		error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
++		error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+ 		posix_acl_release(default_acl);
+ 	}
+ 	if (acl) {
+ 		if (!error)
+-			error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
++			error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ 		posix_acl_release(acl);
+ 	}
+ 	return error;
+diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
+index fded02f72299..b7a39a185d01 100644
+--- a/fs/ext4/crypto_fname.c
++++ b/fs/ext4/crypto_fname.c
+@@ -346,8 +346,9 @@ struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(
+ 	if (res == 0)
+ 		return NULL;
+ 
+-	if (!ext4_has_encryption_key(inode))
+-		ext4_generate_encryption_key(inode);
++	res = ext4_generate_encryption_key(inode);
++	if (res)
++		return ERR_PTR(res);
+ 
+ 	/* Get a crypto context based on the key.
+ 	 * A new context is allocated if no context matches the requested key.
+diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
+index 52170d0b7c40..4f9818719d61 100644
+--- a/fs/ext4/crypto_key.c
++++ b/fs/ext4/crypto_key.c
+@@ -99,9 +99,17 @@ int ext4_generate_encryption_key(struct inode *inode)
+ 	struct ext4_encryption_context ctx;
+ 	struct user_key_payload *ukp;
+ 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+-	int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+-				 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
+-				 &ctx, sizeof(ctx));
++	int res;
++
++	mutex_lock(&ei->i_encryption_lock);
++	if (ext4_has_encryption_key(inode)) {
++		mutex_unlock(&ei->i_encryption_lock);
++		return 0;
++	}
++
++	res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
++			     EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
++			     &ctx, sizeof(ctx));
+ 
+ 	if (res != sizeof(ctx)) {
+ 		if (res > 0)
+@@ -154,6 +162,7 @@ out:
+ 		key_put(keyring_key);
+ 	if (res < 0)
+ 		crypt_key->mode = EXT4_ENCRYPTION_MODE_INVALID;
++	mutex_unlock(&ei->i_encryption_lock);
+ 	return res;
+ }
+ 
+diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
+index a6d6291aea16..591fc37dcd9e 100644
+--- a/fs/ext4/crypto_policy.c
++++ b/fs/ext4/crypto_policy.c
+@@ -85,6 +85,9 @@ static int ext4_create_encryption_context_from_policy(
+ int ext4_process_policy(const struct ext4_encryption_policy *policy,
+ 			struct inode *inode)
+ {
++	if (!inode_owner_or_capable(inode))
++		return -EACCES;
++
+ 	if (policy->version != 0)
+ 		return -EINVAL;
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index df67a6f8582a..01771ed4529d 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -989,6 +989,7 @@ struct ext4_inode_info {
+ #ifdef CONFIG_EXT4_FS_ENCRYPTION
+ 	/* Encryption params */
+ 	struct ext4_encryption_key i_encryption_key;
++	struct mutex i_encryption_lock;
+ #endif
+ };
+ 
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 4196aa567784..dbe1ff511794 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -630,6 +630,9 @@ resizefs_out:
+ 		struct ext4_encryption_policy policy;
+ 		int err = 0;
+ 
++		if (!ext4_sb_has_crypto(sb))
++			return -EOPNOTSUPP;
++
+ 		if (copy_from_user(&policy,
+ 				   (struct ext4_encryption_policy __user *)arg,
+ 				   sizeof(policy))) {
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index b29a7ef4953e..c67056a8c901 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -669,6 +669,7 @@ __acquires(bitlock)
+ 	}
+ 
+ 	ext4_unlock_group(sb, grp);
++	ext4_commit_super(sb, 1);
+ 	ext4_handle_error(sb);
+ 	/*
+ 	 * We only get here in the ERRORS_RO case; relocking the group
+@@ -948,6 +949,9 @@ static void init_once(void *foo)
+ 	init_rwsem(&ei->xattr_sem);
+ 	init_rwsem(&ei->i_data_sem);
+ 	init_rwsem(&ei->i_mmap_sem);
++#ifdef CONFIG_EXT4_FS_ENCRYPTION
++	mutex_init(&ei->i_encryption_lock);
++#endif
+ 	inode_init_once(&ei->vfs_inode);
+ }
+ 
+diff --git a/fs/fcntl.c b/fs/fcntl.c
+index 62376451bbce..5df914943d96 100644
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -113,6 +113,10 @@ void f_setown(struct file *filp, unsigned long arg, int force)
+ 	int who = arg;
+ 	type = PIDTYPE_PID;
+ 	if (who < 0) {
++		/* avoid overflow below */
++		if (who == INT_MIN)
++			return;
++
+ 		type = PIDTYPE_PGID;
+ 		who = -who;
+ 	}
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index 9ff28bc294c0..5d084638e1f8 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -272,7 +272,7 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
+ {
+ 	struct kernfs_open_file *of = kernfs_of(file);
+ 	const struct kernfs_ops *ops;
+-	size_t len;
++	ssize_t len;
+ 	char *buf;
+ 
+ 	if (of->atomic_write_len) {
+diff --git a/fs/locks.c b/fs/locks.c
+index 3c234b9fbdd9..af6fcf6e0dd0 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2192,10 +2192,12 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 	error = do_lock_file_wait(filp, cmd, file_lock);
+ 
+ 	/*
+-	 * Attempt to detect a close/fcntl race and recover by
+-	 * releasing the lock that was just acquired.
++	 * Attempt to detect a close/fcntl race and recover by releasing the
++	 * lock that was just acquired. There is no need to do that when we're
++	 * unlocking though, or for OFD locks.
+ 	 */
+-	if (!error && file_lock->fl_type != F_UNLCK) {
++	if (!error && file_lock->fl_type != F_UNLCK &&
++	    !(file_lock->fl_flags & FL_OFDLCK)) {
+ 		/*
+ 		 * We need that spin_lock here - it prevents reordering between
+ 		 * update of i_flctx->flc_posix and check for it done in
+@@ -2334,10 +2336,12 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ 	error = do_lock_file_wait(filp, cmd, file_lock);
+ 
+ 	/*
+-	 * Attempt to detect a close/fcntl race and recover by
+-	 * releasing the lock that was just acquired.
++	 * Attempt to detect a close/fcntl race and recover by releasing the
++	 * lock that was just acquired. There is no need to do that when we're
++	 * unlocking though, or for OFD locks.
+ 	 */
+-	if (!error && file_lock->fl_type != F_UNLCK) {
++	if (!error && file_lock->fl_type != F_UNLCK &&
++	    !(file_lock->fl_flags & FL_OFDLCK)) {
+ 		/*
+ 		 * We need that spin_lock here - it prevents reordering between
+ 		 * update of i_flctx->flc_posix and check for it done in
+diff --git a/fs/namei.c b/fs/namei.c
+index c7a6eabc02a5..0d97235019a9 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1894,6 +1894,9 @@ static int path_init(int dfd, const struct filename *name, unsigned int flags,
+ 	int retval = 0;
+ 	const char *s = name->name;
+ 
++	if (!*s)
++		flags &= ~LOOKUP_RCU;
++
+ 	nd->last_type = LAST_ROOT; /* if there are only slashes... */
+ 	nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
+ 	nd->depth = 0;
+diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
+index 0c2632386f35..d2c969d1d9d2 100644
+--- a/fs/ncpfs/dir.c
++++ b/fs/ncpfs/dir.c
+@@ -133,12 +133,11 @@ ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
+ 		return 0;
+ 
+ 	if (!ncp_case_sensitive(inode)) {
+-		struct super_block *sb = dentry->d_sb;
+ 		struct nls_table *t;
+ 		unsigned long hash;
+ 		int i;
+ 
+-		t = NCP_IO_TABLE(sb);
++		t = NCP_IO_TABLE(dentry->d_sb);
+ 		hash = init_name_hash();
+ 		for (i=0; i<this->len ; i++)
+ 			hash = partial_name_hash(ncp_tolower(t, this->name[i]),
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index 38678d9a5cc4..cb050d1e8146 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -784,10 +784,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
+ 
+ 	spin_lock(&dreq->lock);
+ 
+-	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
+-		dreq->flags = 0;
++	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+ 		dreq->error = hdr->error;
+-	}
+ 	if (dreq->error == 0) {
+ 		nfs_direct_good_bytes(dreq, hdr);
+ 		if (nfs_write_need_commit(hdr)) {
+diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
+index 2e1737c40a29..27c4970ed32f 100644
+--- a/fs/nfs/nfs4idmap.c
++++ b/fs/nfs/nfs4idmap.c
+@@ -582,9 +582,13 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
+ 	struct idmap_msg *im;
+ 	struct idmap *idmap = (struct idmap *)aux;
+ 	struct key *key = cons->key;
+-	int ret = -ENOMEM;
++	int ret = -ENOKEY;
++
++	if (!aux)
++		goto out1;
+ 
+ 	/* msg and im are freed in idmap_pipe_destroy_msg */
++	ret = -ENOMEM;
+ 	data = kzalloc(sizeof(*data), GFP_KERNEL);
+ 	if (!data)
+ 		goto out1;
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 51af4fff890f..209b39ef69dd 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1728,6 +1728,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
+ 		set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
+ 	next:
+ 		nfs_unlock_and_release_request(req);
++		/* Latency breaker */
++		cond_resched();
+ 	}
+ 	nfss = NFS_SERVER(data->inode);
+ 	if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
+diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
+index ae6e58ea4de5..450954d5b7f6 100644
+--- a/fs/nfs_common/grace.c
++++ b/fs/nfs_common/grace.c
+@@ -85,7 +85,9 @@ grace_exit_net(struct net *net)
+ {
+ 	struct list_head *grace_list = net_generic(net, grace_net_id);
+ 
+-	BUG_ON(!list_empty(grace_list));
++	WARN_ONCE(!list_empty(grace_list),
++		  "net %x %s: grace_list is not empty\n",
++		  net->ns.inum, __func__);
+ }
+ 
+ static struct pernet_operations grace_net_ops = {
+diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
+index 9d46a0bdd9f9..67eb154af881 100644
+--- a/fs/nfsd/auth.c
++++ b/fs/nfsd/auth.c
+@@ -59,7 +59,11 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
+ 				GROUP_AT(gi, i) = exp->ex_anon_gid;
+ 			else
+ 				GROUP_AT(gi, i) = GROUP_AT(rqgi, i);
++
+ 		}
++
++		/* Each thread allocates its own gi, no race */
++		groups_sort(gi);
+ 	} else {
+ 		gi = get_group_info(rqgi);
+ 	}
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 529434f926f1..322cf41b5257 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
+ static const stateid_t currentstateid = {
+ 	.si_generation = 1,
+ };
++static const stateid_t close_stateid = {
++	.si_generation = 0xffffffffU,
++};
+ 
+ static u64 current_sessionid = 1;
+ 
+ #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
+ #define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
+ #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
++#define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
+ 
+ /* forward declarations */
+ static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
+@@ -4615,7 +4619,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
+ 	struct nfs4_stid *s;
+ 	__be32 status = nfserr_bad_stateid;
+ 
+-	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
++	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
++		CLOSE_STATEID(stateid))
+ 		return status;
+ 	/* Client debugging aid. */
+ 	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
+@@ -4673,7 +4678,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ 	else if (typemask & NFS4_DELEG_STID)
+ 		typemask |= NFS4_REVOKED_DELEG_STID;
+ 
+-	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
++	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
++		CLOSE_STATEID(stateid))
+ 		return nfserr_bad_stateid;
+ 	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
+ 	if (status == nfserr_stale_clientid) {
+@@ -5107,6 +5113,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 
+ 	nfsd4_close_open_stateid(stp);
+ 
++	/* See RFC5661 sectionm 18.2.4 */
++	if (stp->st_stid.sc_client->cl_minorversion)
++		memcpy(&close->cl_stateid, &close_stateid,
++				sizeof(close->cl_stateid));
++
+ 	/* put reference from nfs4_preprocess_seqid_op */
+ 	nfs4_put_stid(&stp->st_stid);
+ out:
+diff --git a/fs/nsfs.c b/fs/nsfs.c
+index 99521e7c492b..845f29e15ac9 100644
+--- a/fs/nsfs.c
++++ b/fs/nsfs.c
+@@ -94,6 +94,7 @@ slow:
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 	d_instantiate(dentry, inode);
++	dentry->d_flags |= DCACHE_RCUACCESS;
+ 	dentry->d_fsdata = (void *)ns_ops;
+ 	d = atomic_long_cmpxchg(&ns->stashed, 0, (unsigned long)dentry);
+ 	if (d) {
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
+index adcb1398c481..299a6e1d6b77 100644
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -441,10 +441,14 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
+ 	struct dentry *dentry = file->f_path.dentry;
+ 	struct file *realfile = od->realfile;
+ 
++	/* Nothing to sync for lower */
++	if (!OVL_TYPE_UPPER(ovl_path_type(dentry)))
++		return 0;
++
+ 	/*
+ 	 * Need to check if we started out being a lower dir, but got copied up
+ 	 */
+-	if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
++	if (!od->is_upper) {
+ 		struct inode *inode = file_inode(file);
+ 
+ 		realfile = lockless_dereference(od->upperfile);
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 5916c19dbb02..dbea65d88398 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -999,6 +999,9 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+ {
+ 	struct pipe_buffer *bufs;
+ 
++	if (!nr_pages)
++		return -EINVAL;
++
+ 	/*
+ 	 * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
+ 	 * expect a lot of shrink+grow operations, just free and allocate
+@@ -1043,13 +1046,19 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+ 
+ /*
+  * Currently we rely on the pipe array holding a power-of-2 number
+- * of pages.
++ * of pages. Returns 0 on error.
+  */
+ static inline unsigned int round_pipe_size(unsigned int size)
+ {
+ 	unsigned long nr_pages;
+ 
++	if (size < pipe_min_size)
++		size = pipe_min_size;
++
+ 	nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++	if (nr_pages == 0)
++		return 0;
++
+ 	return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
+ }
+ 
+@@ -1060,13 +1069,18 @@ static inline unsigned int round_pipe_size(unsigned int size)
+ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
+ 		 size_t *lenp, loff_t *ppos)
+ {
++	unsigned int rounded_pipe_max_size;
+ 	int ret;
+ 
+ 	ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
+ 	if (ret < 0 || !write)
+ 		return ret;
+ 
+-	pipe_max_size = round_pipe_size(pipe_max_size);
++	rounded_pipe_max_size = round_pipe_size(pipe_max_size);
++	if (rounded_pipe_max_size == 0)
++		return -EINVAL;
++
++	pipe_max_size = rounded_pipe_max_size;
+ 	return ret;
+ }
+ 
+diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
+index 19c777ad0084..4f3b028e3a1f 100644
+--- a/fs/quota/dquot.c
++++ b/fs/quota/dquot.c
+@@ -2881,7 +2881,8 @@ static int __init dquot_init(void)
+ 	pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
+ 		" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
+ 
+-	register_shrinker(&dqcache_shrinker);
++	if (register_shrinker(&dqcache_shrinker))
++		panic("Cannot register dquot shrinker");
+ 
+ 	return 0;
+ }
+diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
+index dc198bc64c61..edc8ef78b63f 100644
+--- a/fs/reiserfs/bitmap.c
++++ b/fs/reiserfs/bitmap.c
+@@ -513,9 +513,17 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
+ 			       "inode has negative prealloc blocks count.");
+ #endif
+ 	while (ei->i_prealloc_count > 0) {
+-		reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
+-		ei->i_prealloc_block++;
++		b_blocknr_t block_to_free;
++
++		/*
++		 * reiserfs_free_prealloc_block can drop the write lock,
++		 * which could allow another caller to free the same block.
++		 * We can protect against it by modifying the prealloc
++		 * state before calling it.
++		 */
++		block_to_free = ei->i_prealloc_block++;
+ 		ei->i_prealloc_count--;
++		reiserfs_free_prealloc_block(th, inode, block_to_free);
+ 		dirty = 1;
+ 	}
+ 	if (dirty)
+@@ -1128,7 +1136,7 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
+ 	hint->prealloc_size = 0;
+ 
+ 	if (!hint->formatted_node && hint->preallocate) {
+-		if (S_ISREG(hint->inode->i_mode)
++		if (S_ISREG(hint->inode->i_mode) && !IS_PRIVATE(hint->inode)
+ 		    && hint->inode->i_size >=
+ 		    REISERFS_SB(hint->th->t_super)->s_alloc_options.
+ 		    preallocmin * hint->inode->i_sb->s_blocksize)
+diff --git a/fs/reiserfs/lbalance.c b/fs/reiserfs/lbalance.c
+index 249594a821e0..f5cebd70d903 100644
+--- a/fs/reiserfs/lbalance.c
++++ b/fs/reiserfs/lbalance.c
+@@ -475,7 +475,7 @@ static void leaf_item_bottle(struct buffer_info *dest_bi,
+ 			 * 'cpy_bytes'; create new item header;
+ 			 * n_ih = new item_header;
+ 			 */
+-			memcpy(&n_ih, ih, SHORT_KEY_SIZE);
++			memcpy(&n_ih.ih_key, &ih->ih_key, KEY_SIZE);
+ 
+ 			/* Endian safe, both le */
+ 			n_ih.ih_version = ih->ih_version;
+diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
+index 2adcde137c3f..5dcf3ab83886 100644
+--- a/fs/reiserfs/reiserfs.h
++++ b/fs/reiserfs/reiserfs.h
+@@ -1326,7 +1326,6 @@ struct cpu_key {
+ #define KEY_NOT_FOUND 0
+ 
+ #define KEY_SIZE (sizeof(struct reiserfs_key))
+-#define SHORT_KEY_SIZE (sizeof (__u32) + sizeof (__u32))
+ 
+ /* return values for search_by_key and clones */
+ #define ITEM_FOUND 1
+diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
+index 9b1824f35501..91b036902a17 100644
+--- a/fs/reiserfs/xattr_acl.c
++++ b/fs/reiserfs/xattr_acl.c
+@@ -37,7 +37,14 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	error = journal_begin(&th, inode->i_sb, jcreate_blocks);
+ 	reiserfs_write_unlock(inode->i_sb);
+ 	if (error == 0) {
++		if (type == ACL_TYPE_ACCESS && acl) {
++			error = posix_acl_update_mode(inode, &inode->i_mode,
++						      &acl);
++			if (error)
++				goto unlock;
++		}
+ 		error = __reiserfs_set_acl(&th, inode, type, acl);
++unlock:
+ 		reiserfs_write_lock(inode->i_sb);
+ 		error2 = journal_end(&th);
+ 		reiserfs_write_unlock(inode->i_sb);
+@@ -245,11 +252,6 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
+ 	switch (type) {
+ 	case ACL_TYPE_ACCESS:
+ 		name = POSIX_ACL_XATTR_ACCESS;
+-		if (acl) {
+-			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+-			if (error)
+-				return error;
+-		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+ 		name = POSIX_ACL_XATTR_DEFAULT;
+diff --git a/fs/select.c b/fs/select.c
+index f684c750e08a..f7e6fc7bf83c 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -29,6 +29,7 @@
+ #include <linux/sched/rt.h>
+ #include <linux/freezer.h>
+ #include <net/busy_poll.h>
++#include <linux/vmalloc.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -550,7 +551,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+ 	fd_set_bits fds;
+ 	void *bits;
+ 	int ret, max_fds;
+-	unsigned int size;
++	size_t size, alloc_size;
+ 	struct fdtable *fdt;
+ 	/* Allocate small arguments on the stack to save memory and be faster */
+ 	long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
+@@ -577,7 +578,14 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+ 	if (size > sizeof(stack_fds) / 6) {
+ 		/* Not enough space in on-stack array; must use kmalloc */
+ 		ret = -ENOMEM;
+-		bits = kmalloc(6 * size, GFP_KERNEL);
++		if (size > (SIZE_MAX / 6))
++			goto out_nofds;
++
++		alloc_size = 6 * size;
++		bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
++		if (!bits && alloc_size > PAGE_SIZE)
++			bits = vmalloc(alloc_size);
++
+ 		if (!bits)
+ 			goto out_nofds;
+ 	}
+@@ -614,7 +622,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
+ 
+ out:
+ 	if (bits != stack_fds)
+-		kfree(bits);
++		kvfree(bits);
+ out_nofds:
+ 	return ret;
+ }
+diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
+index a56960dd1684..123ec87efac0 100644
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -303,7 +303,7 @@ xfs_map_blocks(
+ 	       (ip->i_df.if_flags & XFS_IFEXTENTS));
+ 	ASSERT(offset <= mp->m_super->s_maxbytes);
+ 
+-	if (offset + count > mp->m_super->s_maxbytes)
++	if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
+ 		count = mp->m_super->s_maxbytes - offset;
+ 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
+ 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+@@ -1332,7 +1332,7 @@ xfs_map_trim_size(
+ 	if (mapping_size > size)
+ 		mapping_size = size;
+ 	if (offset < i_size_read(inode) &&
+-	    offset + mapping_size >= i_size_read(inode)) {
++	    (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
+ 		/* limit mapping to block that spans EOF */
+ 		mapping_size = roundup_64(i_size_read(inode) - offset,
+ 					  1 << inode->i_blkbits);
+@@ -1387,7 +1387,7 @@ __xfs_get_blocks(
+ 	}
+ 
+ 	ASSERT(offset <= mp->m_super->s_maxbytes);
+-	if (offset + size > mp->m_super->s_maxbytes)
++	if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
+ 		size = mp->m_super->s_maxbytes - offset;
+ 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
+ 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+diff --git a/include/asm-generic/asm-prototypes.h b/include/asm-generic/asm-prototypes.h
+new file mode 100644
+index 000000000000..df13637e4017
+--- /dev/null
++++ b/include/asm-generic/asm-prototypes.h
+@@ -0,0 +1,7 @@
++#include <linux/bitops.h>
++extern void *__memset(void *, int, __kernel_size_t);
++extern void *__memcpy(void *, const void *, __kernel_size_t);
++extern void *__memmove(void *, const void *, __kernel_size_t);
++extern void *memset(void *, int, __kernel_size_t);
++extern void *memcpy(void *, const void *, __kernel_size_t);
++extern void *memmove(void *, const void *, __kernel_size_t);
+diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
+new file mode 100644
+index 000000000000..43199a049da5
+--- /dev/null
++++ b/include/asm-generic/export.h
+@@ -0,0 +1,94 @@
++#ifndef __ASM_GENERIC_EXPORT_H
++#define __ASM_GENERIC_EXPORT_H
++
++#ifndef KSYM_FUNC
++#define KSYM_FUNC(x) x
++#endif
++#ifdef CONFIG_64BIT
++#define __put .quad
++#ifndef KSYM_ALIGN
++#define KSYM_ALIGN 8
++#endif
++#ifndef KCRC_ALIGN
++#define KCRC_ALIGN 8
++#endif
++#else
++#define __put .long
++#ifndef KSYM_ALIGN
++#define KSYM_ALIGN 4
++#endif
++#ifndef KCRC_ALIGN
++#define KCRC_ALIGN 4
++#endif
++#endif
++
++#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
++#define KSYM(name) _##name
++#else
++#define KSYM(name) name
++#endif
++
++/*
++ * note on .section use: @progbits vs %progbits nastiness doesn't matter,
++ * since we immediately emit into those sections anyway.
++ */
++.macro ___EXPORT_SYMBOL name,val,sec
++#ifdef CONFIG_MODULES
++	.globl KSYM(__ksymtab_\name)
++	.section ___ksymtab\sec+\name,"a"
++	.balign KSYM_ALIGN
++KSYM(__ksymtab_\name):
++	__put \val, KSYM(__kstrtab_\name)
++	.previous
++	.section __ksymtab_strings,"a"
++KSYM(__kstrtab_\name):
++#ifdef CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX
++	.asciz "_\name"
++#else
++	.asciz "\name"
++#endif
++	.previous
++#ifdef CONFIG_MODVERSIONS
++	.section ___kcrctab\sec+\name,"a"
++	.balign KCRC_ALIGN
++KSYM(__kcrctab_\name):
++	__put KSYM(__crc_\name)
++	.weak KSYM(__crc_\name)
++	.previous
++#endif
++#endif
++.endm
++#undef __put
++
++#if defined(__KSYM_DEPS__)
++
++#define __EXPORT_SYMBOL(sym, val, sec)	=== __KSYM_##sym ===
++
++#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
++
++#include <linux/kconfig.h>
++#include <generated/autoksyms.h>
++
++#define __EXPORT_SYMBOL(sym, val, sec)				\
++	__cond_export_sym(sym, val, sec, config_enabled(__KSYM_##sym))
++#define __cond_export_sym(sym, val, sec, conf)			\
++	___cond_export_sym(sym, val, sec, conf)
++#define ___cond_export_sym(sym, val, sec, enabled)		\
++	__cond_export_sym_##enabled(sym, val, sec)
++#define __cond_export_sym_1(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
++#define __cond_export_sym_0(sym, val, sec) /* nothing */
++
++#else
++#define __EXPORT_SYMBOL(sym, val, sec) ___EXPORT_SYMBOL sym, val, sec
++#endif
++
++#define EXPORT_SYMBOL(name)					\
++	__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)),)
++#define EXPORT_SYMBOL_GPL(name) 				\
++	__EXPORT_SYMBOL(name, KSYM_FUNC(KSYM(name)), _gpl)
++#define EXPORT_DATA_SYMBOL(name)				\
++	__EXPORT_SYMBOL(name, KSYM(name),)
++#define EXPORT_DATA_SYMBOL_GPL(name)				\
++	__EXPORT_SYMBOL(name, KSYM(name),_gpl)
++
++#endif
+diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
+index 9779c35f8454..dab9569f22bf 100644
+--- a/include/crypto/internal/hash.h
++++ b/include/crypto/internal/hash.h
+@@ -91,6 +91,8 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
+ 	return alg->setkey != shash_no_setkey;
+ }
+ 
++bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
++
+ int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
+ 			    struct hash_alg_common *alg,
+ 			    struct crypto_instance *inst);
+diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
+index c23ee1f7ee80..c2ff077168d3 100644
+--- a/include/crypto/mcryptd.h
++++ b/include/crypto/mcryptd.h
+@@ -26,6 +26,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
+ 
+ struct mcryptd_cpu_queue {
+ 	struct crypto_queue queue;
++	spinlock_t q_lock;
+ 	struct work_struct work;
+ };
+ 
+diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
+index 3daf5ed392c9..4b97ae264388 100644
+--- a/include/linux/cacheinfo.h
++++ b/include/linux/cacheinfo.h
+@@ -71,6 +71,7 @@ struct cpu_cacheinfo {
+ 	struct cacheinfo *info_list;
+ 	unsigned int num_levels;
+ 	unsigned int num_leaves;
++	bool cpu_map_populated;
+ };
+ 
+ /*
+diff --git a/include/linux/cred.h b/include/linux/cred.h
+index 8b6c083e68a7..536d873ad6e5 100644
+--- a/include/linux/cred.h
++++ b/include/linux/cred.h
+@@ -87,6 +87,7 @@ extern int set_current_groups(struct group_info *);
+ extern void set_groups(struct cred *, struct group_info *);
+ extern int groups_search(const struct group_info *, kgid_t);
+ extern bool may_setgroups(void);
++extern void groups_sort(struct group_info *);
+ 
+ /* access the groups "array" with this macro */
+ #define GROUP_AT(gi, i) \
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 98a1d9748eec..84a1c7e49c51 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -1208,8 +1208,11 @@ do {									\
+ 		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
+ } while (0)
+ #else
+-#define dev_dbg_ratelimited(dev, fmt, ...)			\
+-	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
++#define dev_dbg_ratelimited(dev, fmt, ...)				\
++do {									\
++	if (0)								\
++		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
++} while (0)
+ #endif
+ 
+ #ifdef VERBOSE_DEBUG
+diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
+index 230f87bdf5ad..2c084871e833 100644
+--- a/include/linux/fdtable.h
++++ b/include/linux/fdtable.h
+@@ -9,6 +9,7 @@
+ #include <linux/compiler.h>
+ #include <linux/spinlock.h>
+ #include <linux/rcupdate.h>
++#include <linux/nospec.h>
+ #include <linux/types.h>
+ #include <linux/init.h>
+ #include <linux/fs.h>
+@@ -76,8 +77,10 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i
+ {
+ 	struct fdtable *fdt = rcu_dereference_raw(files->fdt);
+ 
+-	if (fd < fdt->max_fds)
++	if (fd < fdt->max_fds) {
++		fd = array_index_nospec(fd, fdt->max_fds);
+ 		return rcu_dereference_raw(fdt->fd[fd]);
++	}
+ 	return NULL;
+ }
+ 
+diff --git a/include/linux/fscache.h b/include/linux/fscache.h
+index 115bb81912cc..94a8aae8f9e2 100644
+--- a/include/linux/fscache.h
++++ b/include/linux/fscache.h
+@@ -764,7 +764,7 @@ bool fscache_maybe_release_page(struct fscache_cookie *cookie,
+ {
+ 	if (fscache_cookie_valid(cookie) && PageFsCache(page))
+ 		return __fscache_maybe_release_page(cookie, page, gfp);
+-	return false;
++	return true;
+ }
+ 
+ /**
+diff --git a/include/linux/init.h b/include/linux/init.h
+index 21b6d768edd7..5f4d931095ce 100644
+--- a/include/linux/init.h
++++ b/include/linux/init.h
+@@ -4,6 +4,13 @@
+ #include <linux/compiler.h>
+ #include <linux/types.h>
+ 
++/* Built-in __init functions needn't be compiled with retpoline */
++#if defined(RETPOLINE) && !defined(MODULE)
++#define __noretpoline __attribute__((indirect_branch("keep")))
++#else
++#define __noretpoline
++#endif
++
+ /* These macros are used to mark some functions or 
+  * initialized data (doesn't apply to uninitialized data)
+  * as `initialization' functions. The kernel can take this
+@@ -39,7 +46,7 @@
+ 
+ /* These are for everybody (although not all archs will actually
+    discard it in modules) */
+-#define __init		__section(.init.text) __cold notrace
++#define __init		__section(.init.text) __cold notrace __noretpoline
+ #define __initdata	__section(.init.data)
+ #define __initconst	__constsection(.init.rodata)
+ #define __exitdata	__section(.exit.data)
+diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
+index b33c7797eb57..a94b5bf57f51 100644
+--- a/include/linux/kconfig.h
++++ b/include/linux/kconfig.h
+@@ -17,10 +17,11 @@
+  * the last step cherry picks the 2nd arg, we get a zero.
+  */
+ #define __ARG_PLACEHOLDER_1 0,
+-#define config_enabled(cfg) _config_enabled(cfg)
+-#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
+-#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
+-#define ___config_enabled(__ignored, val, ...) val
++#define config_enabled(cfg)		___is_defined(cfg)
++#define __is_defined(x)			___is_defined(x)
++#define ___is_defined(val)		____is_defined(__ARG_PLACEHOLDER_##val)
++#define ____is_defined(arg1_or_junk)	__take_second_arg(arg1_or_junk 1, 0)
++#define __take_second_arg(__ignored, val, ...) val
+ 
+ /*
+  * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
+@@ -42,7 +43,7 @@
+  * built-in code when CONFIG_FOO is set to 'm'.
+  */
+ #define IS_REACHABLE(option) (config_enabled(option) || \
+-		 (config_enabled(option##_MODULE) && config_enabled(MODULE)))
++		 (config_enabled(option##_MODULE) && __is_defined(MODULE)))
+ 
+ /*
+  * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
+diff --git a/include/linux/ktime.h b/include/linux/ktime.h
+index 2b6a204bd8d4..3ffc69ebe967 100644
+--- a/include/linux/ktime.h
++++ b/include/linux/ktime.h
+@@ -63,6 +63,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
+ #define ktime_add(lhs, rhs) \
+ 		({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
+ 
++/*
++ * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
++ * this means that you must check the result for overflow yourself.
++ */
++#define ktime_add_unsafe(lhs, rhs) \
++		({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
++
+ /*
+  * Add a ktime_t variable and a scalar nanosecond value.
+  * res = kt + nsval:
+diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h
+index 70fffeba7495..a4441784503b 100644
+--- a/include/linux/mmu_context.h
++++ b/include/linux/mmu_context.h
+@@ -1,9 +1,16 @@
+ #ifndef _LINUX_MMU_CONTEXT_H
+ #define _LINUX_MMU_CONTEXT_H
+ 
++#include <asm/mmu_context.h>
++
+ struct mm_struct;
+ 
+ void use_mm(struct mm_struct *mm);
+ void unuse_mm(struct mm_struct *mm);
+ 
++/* Architectures that care about IRQ state in switch_mm can override this. */
++#ifndef switch_mm_irqs_off
++# define switch_mm_irqs_off switch_mm
++#endif
++
+ #endif
+diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
+index dfff2baf836b..0deabc012551 100644
+--- a/include/linux/mtd/map.h
++++ b/include/linux/mtd/map.h
+@@ -265,75 +265,67 @@ void map_destroy(struct mtd_info *mtd);
+ #define INVALIDATE_CACHED_RANGE(map, from, size) \
+ 	do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0)
+ 
+-
+-static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2)
+-{
+-	int i;
+-
+-	for (i = 0; i < map_words(map); i++) {
+-		if (val1.x[i] != val2.x[i])
+-			return 0;
+-	}
+-
+-	return 1;
+-}
+-
+-static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2)
+-{
+-	map_word r;
+-	int i;
+-
+-	for (i = 0; i < map_words(map); i++)
+-		r.x[i] = val1.x[i] & val2.x[i];
+-
+-	return r;
+-}
+-
+-static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2)
+-{
+-	map_word r;
+-	int i;
+-
+-	for (i = 0; i < map_words(map); i++)
+-		r.x[i] = val1.x[i] & ~val2.x[i];
+-
+-	return r;
+-}
+-
+-static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2)
+-{
+-	map_word r;
+-	int i;
+-
+-	for (i = 0; i < map_words(map); i++)
+-		r.x[i] = val1.x[i] | val2.x[i];
+-
+-	return r;
+-}
+-
+-static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3)
+-{
+-	int i;
+-
+-	for (i = 0; i < map_words(map); i++) {
+-		if ((val1.x[i] & val2.x[i]) != val3.x[i])
+-			return 0;
+-	}
+-
+-	return 1;
+-}
+-
+-static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2)
+-{
+-	int i;
+-
+-	for (i = 0; i < map_words(map); i++) {
+-		if (val1.x[i] & val2.x[i])
+-			return 1;
+-	}
+-
+-	return 0;
+-}
++#define map_word_equal(map, val1, val2)					\
++({									\
++	int i, ret = 1;							\
++	for (i = 0; i < map_words(map); i++)				\
++		if ((val1).x[i] != (val2).x[i]) {			\
++			ret = 0;					\
++			break;						\
++		}							\
++	ret;								\
++})
++
++#define map_word_and(map, val1, val2)					\
++({									\
++	map_word r;							\
++	int i;								\
++	for (i = 0; i < map_words(map); i++)				\
++		r.x[i] = (val1).x[i] & (val2).x[i];			\
++	r;								\
++})
++
++#define map_word_clr(map, val1, val2)					\
++({									\
++	map_word r;							\
++	int i;								\
++	for (i = 0; i < map_words(map); i++)				\
++		r.x[i] = (val1).x[i] & ~(val2).x[i];			\
++	r;								\
++})
++
++#define map_word_or(map, val1, val2)					\
++({									\
++	map_word r;							\
++	int i;								\
++	for (i = 0; i < map_words(map); i++)				\
++		r.x[i] = (val1).x[i] | (val2).x[i];			\
++	r;								\
++})
++
++#define map_word_andequal(map, val1, val2, val3)			\
++({									\
++	int i, ret = 1;							\
++	for (i = 0; i < map_words(map); i++) {				\
++		if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) {	\
++			ret = 0;					\
++			break;						\
++		}							\
++	}								\
++	ret;								\
++})
++
++#define map_word_bitsset(map, val1, val2)				\
++({									\
++	int i, ret = 0;							\
++	for (i = 0; i < map_words(map); i++) {				\
++		if ((val1).x[i] & (val2).x[i]) {			\
++			ret = 1;					\
++			break;						\
++		}							\
++	}								\
++	ret;								\
++})
+ 
+ static inline map_word map_word_load(struct map_info *map, const void *ptr)
+ {
+diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
+index 1c28f8879b1c..067b37aff4a1 100644
+--- a/include/linux/mtd/sh_flctl.h
++++ b/include/linux/mtd/sh_flctl.h
+@@ -148,6 +148,7 @@ struct sh_flctl {
+ 	struct platform_device	*pdev;
+ 	struct dev_pm_qos_request pm_qos;
+ 	void __iomem		*reg;
++	resource_size_t		fifo;
+ 
+ 	uint8_t	done_buff[2048 + 64];	/* max size 2048 + 64 */
+ 	int	read_bytes;
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+new file mode 100644
+index 000000000000..b99bced39ac2
+--- /dev/null
++++ b/include/linux/nospec.h
+@@ -0,0 +1,72 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright(c) 2018 Linus Torvalds. All rights reserved.
++// Copyright(c) 2018 Alexei Starovoitov. All rights reserved.
++// Copyright(c) 2018 Intel Corporation. All rights reserved.
++
++#ifndef _LINUX_NOSPEC_H
++#define _LINUX_NOSPEC_H
++
++/**
++ * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
++ * @index: array element index
++ * @size: number of elements in array
++ *
++ * When @index is out of bounds (@index >= @size), the sign bit will be
++ * set.  Extend the sign bit to all bits and invert, giving a result of
++ * zero for an out of bounds index, or ~0 if within bounds [0, @size).
++ */
++#ifndef array_index_mask_nospec
++static inline unsigned long array_index_mask_nospec(unsigned long index,
++						    unsigned long size)
++{
++	/*
++	 * Warn developers about inappropriate array_index_nospec() usage.
++	 *
++	 * Even if the CPU speculates past the WARN_ONCE branch, the
++	 * sign bit of @index is taken into account when generating the
++	 * mask.
++	 *
++	 * This warning is compiled out when the compiler can infer that
++	 * @index and @size are less than LONG_MAX.
++	 */
++	if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX,
++			"array_index_nospec() limited to range of [0, LONG_MAX]\n"))
++		return 0;
++
++	/*
++	 * Always calculate and emit the mask even if the compiler
++	 * thinks the mask is not needed. The compiler does not take
++	 * into account the value of @index under speculation.
++	 */
++	OPTIMIZER_HIDE_VAR(index);
++	return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1);
++}
++#endif
++
++/*
++ * array_index_nospec - sanitize an array index after a bounds check
++ *
++ * For a code sequence like:
++ *
++ *     if (index < size) {
++ *         index = array_index_nospec(index, size);
++ *         val = array[index];
++ *     }
++ *
++ * ...if the CPU speculates past the bounds check then
++ * array_index_nospec() will clamp the index within the range of [0,
++ * size).
++ */
++#define array_index_nospec(index, size)					\
++({									\
++	typeof(index) _i = (index);					\
++	typeof(size) _s = (size);					\
++	unsigned long _mask = array_index_mask_nospec(_i, _s);		\
++									\
++	BUILD_BUG_ON(sizeof(_i) > sizeof(long));			\
++	BUILD_BUG_ON(sizeof(_s) > sizeof(long));			\
++									\
++	_i &= _mask;							\
++	_i;								\
++})
++#endif /* _LINUX_NOSPEC_H */
+diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
+index 8c9131db2b25..b050ef51e27e 100644
+--- a/include/linux/sh_eth.h
++++ b/include/linux/sh_eth.h
+@@ -16,7 +16,6 @@ struct sh_eth_plat_data {
+ 	unsigned char mac_addr[ETH_ALEN];
+ 	unsigned no_ether_link:1;
+ 	unsigned ether_link_active_low:1;
+-	unsigned needs_init:1;
+ };
+ 
+ #endif
+diff --git a/include/linux/string.h b/include/linux/string.h
+index e40099e585c9..f8902cc0c10d 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -118,6 +118,7 @@ extern char *kstrdup(const char *s, gfp_t gfp);
+ extern const char *kstrdup_const(const char *s, gfp_t gfp);
+ extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
+ extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
++extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
+ 
+ extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
+ extern void argv_free(char **argv);
+diff --git a/include/linux/tcp.h b/include/linux/tcp.h
+index e8bbf403618f..5eeeca0b25f1 100644
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
+ 	return (struct tcphdr *)skb_transport_header(skb);
+ }
+ 
++static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
++{
++	return th->doff * 4;
++}
++
+ static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
+ {
+-	return tcp_hdr(skb)->doff * 4;
++	return __tcp_hdrlen(tcp_hdr(skb));
+ }
+ 
+ static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
+diff --git a/include/net/arp.h b/include/net/arp.h
+index 5e0f891d476c..1b3f86981757 100644
+--- a/include/net/arp.h
++++ b/include/net/arp.h
+@@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
+ 
+ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
+ {
++	if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
++		key = INADDR_ANY;
++
+ 	return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
+ }
+ 
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 117bde93995d..80b849cadc35 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -33,6 +33,8 @@
+ #include <net/flow.h>
+ #include <net/flow_keys.h>
+ 
++#define IPV4_MIN_MTU		68			/* RFC 791 */
++
+ struct sock;
+ 
+ struct inet_skb_parm {
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index f733656404de..01af6cd44c67 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -203,6 +203,11 @@ int net_eq(const struct net *net1, const struct net *net2)
+ 	return net1 == net2;
+ }
+ 
++static inline int check_net(const struct net *net)
++{
++	return atomic_read(&net->count) != 0;
++}
++
+ void net_drop_ns(void *);
+ 
+ #else
+@@ -227,6 +232,11 @@ int net_eq(const struct net *net1, const struct net *net2)
+ 	return 1;
+ }
+ 
++static inline int check_net(const struct net *net)
++{
++	return 1;
++}
++
+ #define net_drop_ns NULL
+ #endif
+ 
+diff --git a/include/net/netlink.h b/include/net/netlink.h
+index 2a5dbcc90d1c..9bb53469b704 100644
+--- a/include/net/netlink.h
++++ b/include/net/netlink.h
+@@ -745,7 +745,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
+  */
+ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(u8), &value);
++	/* temporary variables to work around GCC PR81715 with asan-stack=1 */
++	u8 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(u8), &tmp);
+ }
+ 
+ /**
+@@ -756,7 +759,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
+  */
+ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(u16), &value);
++	u16 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(u16), &tmp);
+ }
+ 
+ /**
+@@ -767,7 +772,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
+  */
+ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(__be16), &value);
++	__be16 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(__be16), &tmp);
+ }
+ 
+ /**
+@@ -778,7 +785,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
+  */
+ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
+ {
+-	return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
++	__be16 tmp = value;
++
++	return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
+ }
+ 
+ /**
+@@ -789,7 +798,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
+  */
+ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(__le16), &value);
++	__le16 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(__le16), &tmp);
+ }
+ 
+ /**
+@@ -800,7 +811,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
+  */
+ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(u32), &value);
++	u32 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(u32), &tmp);
+ }
+ 
+ /**
+@@ -811,7 +824,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
+  */
+ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(__be32), &value);
++	__be32 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(__be32), &tmp);
+ }
+ 
+ /**
+@@ -822,7 +837,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
+  */
+ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
+ {
+-	return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
++	__be32 tmp = value;
++
++	return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
+ }
+ 
+ /**
+@@ -833,7 +850,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
+  */
+ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(__le32), &value);
++	__le32 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(__le32), &tmp);
+ }
+ 
+ /**
+@@ -844,7 +863,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
+  */
+ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(u64), &value);
++	u64 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(u64), &tmp);
+ }
+ 
+ /**
+@@ -855,7 +876,9 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
+  */
+ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(__be64), &value);
++	__be64 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(__be64), &tmp);
+ }
+ 
+ /**
+@@ -866,7 +889,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
+  */
+ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
+ {
+-	return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
++	__be64 tmp = value;
++
++	return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
+ }
+ 
+ /**
+@@ -877,7 +902,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
+  */
+ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(__le64), &value);
++	__le64 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(__le64), &tmp);
+ }
+ 
+ /**
+@@ -888,7 +915,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
+  */
+ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(s8), &value);
++	s8 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(s8), &tmp);
+ }
+ 
+ /**
+@@ -899,7 +928,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
+  */
+ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(s16), &value);
++	s16 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(s16), &tmp);
+ }
+ 
+ /**
+@@ -910,7 +941,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
+  */
+ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(s32), &value);
++	s32 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(s32), &tmp);
+ }
+ 
+ /**
+@@ -921,7 +954,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
+  */
+ static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
+ {
+-	return nla_put(skb, attrtype, sizeof(s64), &value);
++	s64 tmp = value;
++
++	return nla_put(skb, attrtype, sizeof(s64), &tmp);
+ }
+ 
+ /**
+@@ -969,7 +1004,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
+ static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
+ 				  __be32 addr)
+ {
+-	return nla_put_be32(skb, attrtype, addr);
++	__be32 tmp = addr;
++
++	return nla_put_be32(skb, attrtype, tmp);
+ }
+ 
+ /**
+diff --git a/include/net/red.h b/include/net/red.h
+index 76e0b5f922c6..3618cdfec884 100644
+--- a/include/net/red.h
++++ b/include/net/red.h
+@@ -167,6 +167,17 @@ static inline void red_set_vars(struct red_vars *v)
+ 	v->qcount	= -1;
+ }
+ 
++static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
++{
++	if (fls(qth_min) + Wlog > 32)
++		return false;
++	if (fls(qth_max) + Wlog > 32)
++		return false;
++	if (qth_max < qth_min)
++		return false;
++	return true;
++}
++
+ static inline void red_set_parms(struct red_parms *p,
+ 				 u32 qth_min, u32 qth_max, u8 Wlog, u8 Plog,
+ 				 u8 Scell_log, u8 *stab, u32 max_P)
+@@ -178,7 +189,7 @@ static inline void red_set_parms(struct red_parms *p,
+ 	p->qth_max	= qth_max << Wlog;
+ 	p->Wlog		= Wlog;
+ 	p->Plog		= Plog;
+-	if (delta < 0)
++	if (delta <= 0)
+ 		delta = 1;
+ 	p->qth_delta	= delta;
+ 	if (!max_P) {
+diff --git a/include/scsi/sg.h b/include/scsi/sg.h
+index 3afec7032448..20bc71c3e0b8 100644
+--- a/include/scsi/sg.h
++++ b/include/scsi/sg.h
+@@ -197,7 +197,6 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
+ #define SG_DEFAULT_RETRIES 0
+ 
+ /* Defaults, commented if they differ from original sg driver */
+-#define SG_DEF_FORCE_LOW_DMA 0  /* was 1 -> memory below 16MB on i386 */
+ #define SG_DEF_FORCE_PACK_ID 0
+ #define SG_DEF_KEEP_ORPHAN 0
+ #define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */
+diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
+index 758607226bfd..2cd449328aee 100644
+--- a/include/trace/events/clk.h
++++ b/include/trace/events/clk.h
+@@ -134,12 +134,12 @@ DECLARE_EVENT_CLASS(clk_parent,
+ 
+ 	TP_STRUCT__entry(
+ 		__string(        name,           core->name                )
+-		__string(        pname,          parent->name              )
++		__string(        pname, parent ? parent->name : "none"     )
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__assign_str(name, core->name);
+-		__assign_str(pname, parent->name);
++		__assign_str(pname, parent ? parent->name : "none");
+ 	),
+ 
+ 	TP_printk("%s %s", __get_str(name), __get_str(pname))
+diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
+index a44062da684b..c0cf08e9f38a 100644
+--- a/include/trace/events/kvm.h
++++ b/include/trace/events/kvm.h
+@@ -204,7 +204,7 @@ TRACE_EVENT(kvm_ack_irq,
+ 	{ KVM_TRACE_MMIO_WRITE, "write" }
+ 
+ TRACE_EVENT(kvm_mmio,
+-	TP_PROTO(int type, int len, u64 gpa, u64 val),
++	TP_PROTO(int type, int len, u64 gpa, void *val),
+ 	TP_ARGS(type, len, gpa, val),
+ 
+ 	TP_STRUCT__entry(
+@@ -218,7 +218,10 @@ TRACE_EVENT(kvm_mmio,
+ 		__entry->type		= type;
+ 		__entry->len		= len;
+ 		__entry->gpa		= gpa;
+-		__entry->val		= val;
++		__entry->val		= 0;
++		if (val)
++			memcpy(&__entry->val, val,
++			       min_t(u32, sizeof(__entry->val), len));
+ 	),
+ 
+ 	TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
+diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
+index bc81fb2e1f0e..6f04cb419115 100644
+--- a/include/uapi/linux/eventpoll.h
++++ b/include/uapi/linux/eventpoll.h
+@@ -26,6 +26,19 @@
+ #define EPOLL_CTL_DEL 2
+ #define EPOLL_CTL_MOD 3
+ 
++/* Epoll event masks */
++#define EPOLLIN		0x00000001
++#define EPOLLPRI	0x00000002
++#define EPOLLOUT	0x00000004
++#define EPOLLERR	0x00000008
++#define EPOLLHUP	0x00000010
++#define EPOLLRDNORM	0x00000040
++#define EPOLLRDBAND	0x00000080
++#define EPOLLWRNORM	0x00000100
++#define EPOLLWRBAND	0x00000200
++#define EPOLLMSG	0x00000400
++#define EPOLLRDHUP	0x00002000
++
+ /*
+  * Request the handling of system wakeup events so as to prevent system suspends
+  * from happening while those events are being processed.
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 3b2b0f5149ab..55730c74a42a 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -740,7 +740,10 @@ static inline int convert_mode(long *msgtyp, int msgflg)
+ 	if (*msgtyp == 0)
+ 		return SEARCH_ANY;
+ 	if (*msgtyp < 0) {
+-		*msgtyp = -*msgtyp;
++		if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */
++			*msgtyp = LONG_MAX;
++		else
++			*msgtyp = -*msgtyp;
+ 		return SEARCH_LESSEQUAL;
+ 	}
+ 	if (msgflg & MSG_EXCEPT)
+diff --git a/kernel/acct.c b/kernel/acct.c
+index 74963d192c5d..37f1dc696fbd 100644
+--- a/kernel/acct.c
++++ b/kernel/acct.c
+@@ -99,7 +99,7 @@ static int check_free_space(struct bsd_acct_struct *acct)
+ {
+ 	struct kstatfs sbuf;
+ 
+-	if (time_is_before_jiffies(acct->needcheck))
++	if (time_is_after_jiffies(acct->needcheck))
+ 		goto out;
+ 
+ 	/* May block */
+diff --git a/kernel/async.c b/kernel/async.c
+index 4c3773c0bf63..f1fd155abff6 100644
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -84,20 +84,24 @@ static atomic_t entry_count;
+ 
+ static async_cookie_t lowest_in_progress(struct async_domain *domain)
+ {
+-	struct list_head *pending;
++	struct async_entry *first = NULL;
+ 	async_cookie_t ret = ASYNC_COOKIE_MAX;
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&async_lock, flags);
+ 
+-	if (domain)
+-		pending = &domain->pending;
+-	else
+-		pending = &async_global_pending;
++	if (domain) {
++		if (!list_empty(&domain->pending))
++			first = list_first_entry(&domain->pending,
++					struct async_entry, domain_list);
++	} else {
++		if (!list_empty(&async_global_pending))
++			first = list_first_entry(&async_global_pending,
++					struct async_entry, global_list);
++	}
+ 
+-	if (!list_empty(pending))
+-		ret = list_first_entry(pending, struct async_entry,
+-				       domain_list)->cookie;
++	if (first)
++		ret = first->cookie;
+ 
+ 	spin_unlock_irqrestore(&async_lock, flags);
+ 	return ret;
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 54f0e7fcd0e2..199b54e75359 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -361,7 +361,7 @@ select_insn:
+ 		DST = tmp;
+ 		CONT;
+ 	ALU_MOD_X:
+-		if (unlikely(SRC == 0))
++		if (unlikely((u32)SRC == 0))
+ 			return 0;
+ 		tmp = (u32) DST;
+ 		DST = do_div(tmp, (u32) SRC);
+@@ -380,7 +380,7 @@ select_insn:
+ 		DST = div64_u64(DST, SRC);
+ 		CONT;
+ 	ALU_DIV_X:
+-		if (unlikely(SRC == 0))
++		if (unlikely((u32)SRC == 0))
+ 			return 0;
+ 		tmp = (u32) DST;
+ 		do_div(tmp, (u32) SRC);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 1bdc6f910a1d..03d74868c709 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1020,6 +1020,11 @@ static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn)
+ 			return -EINVAL;
+ 		}
+ 
++		if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
++			verbose("BPF_ARSH not supported for 32 bit ALU\n");
++			return -EINVAL;
++		}
++
+ 		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
+ 		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
+ 			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 585cf96dab32..4195616b27d9 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1514,6 +1514,9 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+ 	struct futex_hash_bucket *hb1, *hb2;
+ 	struct futex_q *this, *next;
+ 
++	if (nr_wake < 0 || nr_requeue < 0)
++		return -EINVAL;
++
+ 	if (requeue_pi) {
+ 		/*
+ 		 * Requeue PI only works on two distinct uaddrs. This
+@@ -1831,8 +1834,12 @@ static int unqueue_me(struct futex_q *q)
+ 
+ 	/* In the common case we don't take the spinlock, which is nice. */
+ retry:
+-	lock_ptr = q->lock_ptr;
+-	barrier();
++	/*
++	 * q->lock_ptr can change between this read and the following spin_lock.
++	 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and
++	 * optimizing lock_ptr out of the logic below.
++	 */
++	lock_ptr = READ_ONCE(q->lock_ptr);
+ 	if (lock_ptr != NULL) {
+ 		spin_lock(lock_ptr);
+ 		/*
+diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
+index c92e44855ddd..1276aabaab55 100644
+--- a/kernel/gcov/Kconfig
++++ b/kernel/gcov/Kconfig
+@@ -37,6 +37,7 @@ config ARCH_HAS_GCOV_PROFILE_ALL
+ 
+ config GCOV_PROFILE_ALL
+ 	bool "Profile entire Kernel"
++	depends on !COMPILE_TEST
+ 	depends on GCOV_KERNEL
+ 	depends on ARCH_HAS_GCOV_PROFILE_ALL
+ 	default n
+diff --git a/kernel/groups.c b/kernel/groups.c
+index 74d431d25251..5ea9847f172f 100644
+--- a/kernel/groups.c
++++ b/kernel/groups.c
+@@ -101,7 +101,7 @@ static int groups_from_user(struct group_info *group_info,
+ }
+ 
+ /* a simple Shell sort */
+-static void groups_sort(struct group_info *group_info)
++void groups_sort(struct group_info *group_info)
+ {
+ 	int base, max, stride;
+ 	int gidsetsize = group_info->ngroups;
+@@ -128,6 +128,7 @@ static void groups_sort(struct group_info *group_info)
+ 		stride /= 3;
+ 	}
+ }
++EXPORT_SYMBOL(groups_sort);
+ 
+ /* a simple bsearch */
+ int groups_search(const struct group_info *group_info, kgid_t grp)
+@@ -159,7 +160,6 @@ int groups_search(const struct group_info *group_info, kgid_t grp)
+ void set_groups(struct cred *new, struct group_info *group_info)
+ {
+ 	put_group_info(new->group_info);
+-	groups_sort(group_info);
+ 	get_group_info(group_info);
+ 	new->group_info = group_info;
+ }
+@@ -243,6 +243,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
+ 		return retval;
+ 	}
+ 
++	groups_sort(group_info);
+ 	retval = set_current_groups(group_info);
+ 	put_group_info(group_info);
+ 
+diff --git a/kernel/module.c b/kernel/module.c
+index 6920d1080cdd..c38bf6e486a4 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2233,7 +2233,7 @@ static char elf_type(const Elf_Sym *sym, const struct load_info *info)
+ 	}
+ 	if (sym->st_shndx == SHN_UNDEF)
+ 		return 'U';
+-	if (sym->st_shndx == SHN_ABS)
++	if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu)
+ 		return 'a';
+ 	if (sym->st_shndx >= SHN_LORESERVE)
+ 		return '?';
+@@ -2262,7 +2262,7 @@ static char elf_type(const Elf_Sym *sym, const struct load_info *info)
+ }
+ 
+ static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
+-			unsigned int shnum)
++			unsigned int shnum, unsigned int pcpundx)
+ {
+ 	const Elf_Shdr *sec;
+ 
+@@ -2271,6 +2271,11 @@ static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
+ 	    || !src->st_name)
+ 		return false;
+ 
++#ifdef CONFIG_KALLSYMS_ALL
++	if (src->st_shndx == pcpundx)
++		return true;
++#endif
++
+ 	sec = sechdrs + src->st_shndx;
+ 	if (!(sec->sh_flags & SHF_ALLOC)
+ #ifndef CONFIG_KALLSYMS_ALL
+@@ -2308,7 +2313,8 @@ static void layout_symtab(struct module *mod, struct load_info *info)
+ 	/* Compute total space required for the core symbols' strtab. */
+ 	for (ndst = i = 0; i < nsrc; i++) {
+ 		if (i == 0 ||
+-		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
++		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
++				   info->index.pcpu)) {
+ 			strtab_size += strlen(&info->strtab[src[i].st_name])+1;
+ 			ndst++;
+ 		}
+@@ -2366,7 +2372,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
+ 	src = mod->kallsyms->symtab;
+ 	for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
+ 		if (i == 0 ||
+-		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
++		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
++				   info->index.pcpu)) {
+ 			dst[ndst] = src[i];
+ 			dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
+ 			s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
+@@ -2726,8 +2733,12 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
+ 		return -ENOEXEC;
+ 	}
+ 
+-	if (!get_modinfo(info, "intree"))
++	if (!get_modinfo(info, "intree")) {
++		if (!test_taint(TAINT_OOT_MODULE))
++			pr_warn("%s: loading out-of-tree module taints kernel.\n",
++				mod->name);
+ 		add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
++	}
+ 
+ 	if (get_modinfo(info, "staging")) {
+ 		add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
+@@ -2892,6 +2903,8 @@ static int move_module(struct module *mod, struct load_info *info)
+ 
+ static int check_module_license_and_versions(struct module *mod)
+ {
++	int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
++
+ 	/*
+ 	 * ndiswrapper is under GPL by itself, but loads proprietary modules.
+ 	 * Don't use add_taint_module(), as it would prevent ndiswrapper from
+@@ -2910,6 +2923,9 @@ static int check_module_license_and_versions(struct module *mod)
+ 		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
+ 				 LOCKDEP_NOW_UNRELIABLE);
+ 
++	if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
++		pr_warn("%s: module license taints kernel.\n", mod->name);
++
+ #ifdef CONFIG_MODVERSIONS
+ 	if ((mod->num_syms && !mod->crcs)
+ 	    || (mod->num_gpl_syms && !mod->gpl_crcs)
+diff --git a/kernel/profile.c b/kernel/profile.c
+index a7bcd28d6e9f..7ad939c708b9 100644
+--- a/kernel/profile.c
++++ b/kernel/profile.c
+@@ -44,7 +44,7 @@ int prof_on __read_mostly;
+ EXPORT_SYMBOL_GPL(prof_on);
+ 
+ static cpumask_var_t prof_cpu_mask;
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
+ static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
+ static DEFINE_PER_CPU(int, cpu_profile_flip);
+ static DEFINE_MUTEX(profile_flip_mutex);
+@@ -201,7 +201,7 @@ int profile_event_unregister(enum profile_type type, struct notifier_block *n)
+ }
+ EXPORT_SYMBOL_GPL(profile_event_unregister);
+ 
+-#ifdef CONFIG_SMP
++#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
+ /*
+  * Each cpu has a pair of open-addressed hashtables for pending
+  * profile hits. read_profile() IPI's all cpus to request them
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 8fbedeb5553f..9c905bd94ff0 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -32,7 +32,7 @@
+ #include <linux/init.h>
+ #include <linux/uaccess.h>
+ #include <linux/highmem.h>
+-#include <asm/mmu_context.h>
++#include <linux/mmu_context.h>
+ #include <linux/interrupt.h>
+ #include <linux/capability.h>
+ #include <linux/completion.h>
+@@ -2339,7 +2339,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
+ 		atomic_inc(&oldmm->mm_count);
+ 		enter_lazy_tlb(oldmm, next);
+ 	} else
+-		switch_mm(oldmm, mm, next);
++		switch_mm_irqs_off(oldmm, mm, next);
+ 
+ 	if (!prev->mm) {
+ 		prev->active_mm = NULL;
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 525a4cda5598..46a2471173b8 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -72,7 +72,7 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
+ 	handler = sig_handler(t, sig);
+ 
+ 	if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
+-			handler == SIG_DFL && !force)
++	    handler == SIG_DFL && !(force && sig_kernel_only(sig)))
+ 		return 1;
+ 
+ 	return sig_handler_ignored(handler, sig);
+@@ -88,13 +88,15 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
+ 	if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
+ 		return 0;
+ 
+-	if (!sig_task_ignored(t, sig, force))
+-		return 0;
+-
+ 	/*
+-	 * Tracers may want to know about even ignored signals.
++	 * Tracers may want to know about even ignored signal unless it
++	 * is SIGKILL which can't be reported anyway but can be ignored
++	 * by SIGNAL_UNKILLABLE task.
+ 	 */
+-	return !t->ptrace;
++	if (t->ptrace && sig != SIGKILL)
++		return 0;
++
++	return sig_task_ignored(t, sig, force);
+ }
+ 
+ /*
+@@ -960,9 +962,9 @@ static void complete_signal(int sig, struct task_struct *p, int group)
+ 	 * then start taking the whole group down immediately.
+ 	 */
+ 	if (sig_fatal(p, sig) &&
+-	    !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
++	    !(signal->flags & SIGNAL_GROUP_EXIT) &&
+ 	    !sigismember(&t->real_blocked, sig) &&
+-	    (sig == SIGKILL || !t->ptrace)) {
++	    (sig == SIGKILL || !p->ptrace)) {
+ 		/*
+ 		 * This signal will be fatal to the whole group.
+ 		 */
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index 93ef7190bdea..e82a5f40a0ac 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -292,7 +292,7 @@ EXPORT_SYMBOL_GPL(__ktime_divns);
+  */
+ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
+ {
+-	ktime_t res = ktime_add(lhs, rhs);
++	ktime_t res = ktime_add_unsafe(lhs, rhs);
+ 
+ 	/*
+ 	 * We use KTIME_SEC_MAX here, the maximum timeout which we can
+diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
+index 31ea01f42e1f..2cca2e79c643 100644
+--- a/kernel/time/posix-timers.c
++++ b/kernel/time/posix-timers.c
+@@ -500,17 +500,22 @@ static struct pid *good_sigevent(sigevent_t * event)
+ {
+ 	struct task_struct *rtn = current->group_leader;
+ 
+-	if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
+-		(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
+-		 !same_thread_group(rtn, current) ||
+-		 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
++	switch (event->sigev_notify) {
++	case SIGEV_SIGNAL | SIGEV_THREAD_ID:
++		rtn = find_task_by_vpid(event->sigev_notify_thread_id);
++		if (!rtn || !same_thread_group(rtn, current))
++			return NULL;
++		/* FALLTHRU */
++	case SIGEV_SIGNAL:
++	case SIGEV_THREAD:
++		if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
++			return NULL;
++		/* FALLTHRU */
++	case SIGEV_NONE:
++		return task_pid(rtn);
++	default:
+ 		return NULL;
+-
+-	if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
+-	    ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
+-		return NULL;
+-
+-	return task_pid(rtn);
++	}
+ }
+ 
+ void posix_timers_register_clock(const clockid_t clock_id,
+@@ -738,8 +743,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
+ 	/* interval timer ? */
+ 	if (iv.tv64)
+ 		cur_setting->it_interval = ktime_to_timespec(iv);
+-	else if (!hrtimer_active(timer) &&
+-		 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
++	else if (!hrtimer_active(timer) && timr->it_sigev_notify != SIGEV_NONE)
+ 		return;
+ 
+ 	now = timer->base->get_time();
+@@ -750,7 +754,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
+ 	 * expiry is > now.
+ 	 */
+ 	if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
+-	    (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
++			timr->it_sigev_notify == SIGEV_NONE))
+ 		timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
+ 
+ 	remaining = ktime_sub(hrtimer_get_expires(timer), now);
+@@ -760,7 +764,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
+ 		 * A single shot SIGEV_NONE timer must return 0, when
+ 		 * it is expired !
+ 		 */
+-		if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE)
++		if (timr->it_sigev_notify != SIGEV_NONE)
+ 			cur_setting->it_value.tv_nsec = 1;
+ 	} else
+ 		cur_setting->it_value = ktime_to_timespec(remaining);
+@@ -858,7 +862,7 @@ common_timer_set(struct k_itimer *timr, int flags,
+ 	timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
+ 
+ 	/* SIGEV_NONE timers are not queued ! See common_timer_get */
+-	if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
++	if (timr->it_sigev_notify == SIGEV_NONE) {
+ 		/* Setup correct expiry time for relative timers */
+ 		if (mode == HRTIMER_MODE_REL) {
+ 			hrtimer_add_expires(timer, timer->base->get_time());
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index 483cecfa5c17..1994901ef239 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -57,7 +57,8 @@ static struct tracer_flags blk_tracer_flags = {
+ };
+ 
+ /* Global reference count of probes */
+-static atomic_t blk_probes_ref = ATOMIC_INIT(0);
++static DEFINE_MUTEX(blk_probe_mutex);
++static int blk_probes_ref;
+ 
+ static void blk_register_tracepoints(void);
+ static void blk_unregister_tracepoints(void);
+@@ -300,11 +301,26 @@ static void blk_trace_free(struct blk_trace *bt)
+ 	kfree(bt);
+ }
+ 
++static void get_probe_ref(void)
++{
++	mutex_lock(&blk_probe_mutex);
++	if (++blk_probes_ref == 1)
++		blk_register_tracepoints();
++	mutex_unlock(&blk_probe_mutex);
++}
++
++static void put_probe_ref(void)
++{
++	mutex_lock(&blk_probe_mutex);
++	if (!--blk_probes_ref)
++		blk_unregister_tracepoints();
++	mutex_unlock(&blk_probe_mutex);
++}
++
+ static void blk_trace_cleanup(struct blk_trace *bt)
+ {
+ 	blk_trace_free(bt);
+-	if (atomic_dec_and_test(&blk_probes_ref))
+-		blk_unregister_tracepoints();
++	put_probe_ref();
+ }
+ 
+ int blk_trace_remove(struct request_queue *q)
+@@ -527,8 +543,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ 		goto err;
+ 	}
+ 
+-	if (atomic_inc_return(&blk_probes_ref) == 1)
+-		blk_register_tracepoints();
++	get_probe_ref();
+ 
+ 	return 0;
+ err:
+@@ -1474,9 +1489,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
+ 	if (bt == NULL)
+ 		return -EINVAL;
+ 
+-	if (atomic_dec_and_test(&blk_probes_ref))
+-		blk_unregister_tracepoints();
+-
++	put_probe_ref();
+ 	blk_trace_free(bt);
+ 	return 0;
+ }
+@@ -1510,8 +1523,7 @@ static int blk_trace_setup_queue(struct request_queue *q,
+ 		goto free_bt;
+ 	}
+ 
+-	if (atomic_inc_return(&blk_probes_ref) == 1)
+-		blk_register_tracepoints();
++	get_probe_ref();
+ 	return 0;
+ 
+ free_bt:
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 75c5271a56c2..40bc77080fad 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -332,6 +332,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
+ /* Missed count stored at end */
+ #define RB_MISSED_STORED	(1 << 30)
+ 
++#define RB_MISSED_FLAGS		(RB_MISSED_EVENTS|RB_MISSED_STORED)
++
+ struct buffer_data_page {
+ 	u64		 time_stamp;	/* page time stamp */
+ 	local_t		 commit;	/* write committed index */
+@@ -383,7 +385,9 @@ static void rb_init_page(struct buffer_data_page *bpage)
+  */
+ size_t ring_buffer_page_len(void *page)
+ {
+-	return local_read(&((struct buffer_data_page *)page)->commit)
++	struct buffer_data_page *bpage = page;
++
++	return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
+ 		+ BUF_PAGE_HDR_SIZE;
+ }
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 7a26798ffbf9..d03f7eadc1f3 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5719,7 +5719,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 		.spd_release	= buffer_spd_release,
+ 	};
+ 	struct buffer_ref *ref;
+-	int entries, size, i;
++	int entries, i;
+ 	ssize_t ret = 0;
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+@@ -5770,14 +5770,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 			break;
+ 		}
+ 
+-		/*
+-		 * zero out any left over data, this is going to
+-		 * user land.
+-		 */
+-		size = ring_buffer_page_len(ref->page);
+-		if (size < PAGE_SIZE)
+-			memset(ref->page + size, 0, PAGE_SIZE - size);
+-
+ 		page = virt_to_page(ref->page);
+ 
+ 		spd.pages[i] = page;
+@@ -6448,6 +6440,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
+ 	buf->data = alloc_percpu(struct trace_array_cpu);
+ 	if (!buf->data) {
+ 		ring_buffer_free(buf->buffer);
++		buf->buffer = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+@@ -6471,7 +6464,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
+ 				    allocate_snapshot ? size : 1);
+ 	if (WARN_ON(ret)) {
+ 		ring_buffer_free(tr->trace_buffer.buffer);
++		tr->trace_buffer.buffer = NULL;
+ 		free_percpu(tr->trace_buffer.data);
++		tr->trace_buffer.data = NULL;
+ 		return -ENOMEM;
+ 	}
+ 	tr->allocated_snapshot = allocate_snapshot;
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 6459f77e2c72..d19406850b0d 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1819,6 +1819,7 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
+ {
+ 	struct ftrace_event_call *call, *p;
+ 	const char *last_system = NULL;
++	bool first = false;
+ 	int last_i;
+ 	int i;
+ 
+@@ -1826,15 +1827,28 @@ void trace_event_enum_update(struct trace_enum_map **map, int len)
+ 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
+ 		/* events are usually grouped together with systems */
+ 		if (!last_system || call->class->system != last_system) {
++			first = true;
+ 			last_i = 0;
+ 			last_system = call->class->system;
+ 		}
+ 
++		/*
++		 * Since calls are grouped by systems, the likelyhood that the
++		 * next call in the iteration belongs to the same system as the
++		 * previous call is high. As an optimization, we skip seaching
++		 * for a map[] that matches the call's system if the last call
++		 * was from the same system. That's what last_i is for. If the
++		 * call has the same system as the previous call, then last_i
++		 * will be the index of the first map[] that has a matching
++		 * system.
++		 */
+ 		for (i = last_i; i < len; i++) {
+ 			if (call->class->system == map[i]->system) {
+ 				/* Save the first system if need be */
+-				if (!last_i)
++				if (first) {
+ 					last_i = i;
++					first = false;
++				}
+ 				update_event_printk(call, map[i]);
+ 			}
+ 		}
+diff --git a/kernel/uid16.c b/kernel/uid16.c
+index d58cc4d8f0d1..651aaa5221ec 100644
+--- a/kernel/uid16.c
++++ b/kernel/uid16.c
+@@ -190,6 +190,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
+ 		return retval;
+ 	}
+ 
++	groups_sort(group_info);
+ 	retval = set_current_groups(group_info);
+ 	put_group_info(group_info);
+ 
+diff --git a/lib/oid_registry.c b/lib/oid_registry.c
+index 318f382a010d..150e04d70303 100644
+--- a/lib/oid_registry.c
++++ b/lib/oid_registry.c
+@@ -116,7 +116,7 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
+ 	int count;
+ 
+ 	if (v >= end)
+-		return -EBADMSG;
++		goto bad;
+ 
+ 	n = *v++;
+ 	ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
+@@ -134,7 +134,7 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
+ 			num = n & 0x7f;
+ 			do {
+ 				if (v >= end)
+-					return -EBADMSG;
++					goto bad;
+ 				n = *v++;
+ 				num <<= 7;
+ 				num |= n & 0x7f;
+@@ -148,6 +148,10 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
+ 	}
+ 
+ 	return ret;
++
++bad:
++	snprintf(buffer, bufsize, "(bad)");
++	return -EBADMSG;
+ }
+ EXPORT_SYMBOL_GPL(sprint_oid);
+ 
+diff --git a/mm/cma.c b/mm/cma.c
+index 3a7a67b93394..3b4e38689202 100644
+--- a/mm/cma.c
++++ b/mm/cma.c
+@@ -54,7 +54,7 @@ unsigned long cma_get_size(const struct cma *cma)
+ }
+ 
+ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
+-					     int align_order)
++					     unsigned int align_order)
+ {
+ 	if (align_order <= cma->order_per_bit)
+ 		return 0;
+@@ -62,17 +62,14 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
+ }
+ 
+ /*
+- * Find a PFN aligned to the specified order and return an offset represented in
+- * order_per_bits.
++ * Find the offset of the base PFN from the specified align_order.
++ * The value returned is represented in order_per_bits.
+  */
+ static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
+-					       int align_order)
++					       unsigned int align_order)
+ {
+-	if (align_order <= cma->order_per_bit)
+-		return 0;
+-
+-	return (ALIGN(cma->base_pfn, (1UL << align_order))
+-		- cma->base_pfn) >> cma->order_per_bit;
++	return (cma->base_pfn & ((1UL << align_order) - 1))
++		>> cma->order_per_bit;
+ }
+ 
+ static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
+diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
+index e10ccd299d66..5edcf1b37fa6 100644
+--- a/mm/early_ioremap.c
++++ b/mm/early_ioremap.c
+@@ -102,7 +102,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
+ 	enum fixed_addresses idx;
+ 	int i, slot;
+ 
+-	WARN_ON(system_state != SYSTEM_BOOTING);
++	WARN_ON(system_state >= SYSTEM_RUNNING);
+ 
+ 	slot = -1;
+ 	for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 221762e24a68..696b5bbac2e0 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -5641,7 +5641,7 @@ static void uncharge_list(struct list_head *page_list)
+ 		next = page->lru.next;
+ 
+ 		VM_BUG_ON_PAGE(PageLRU(page), page);
+-		VM_BUG_ON_PAGE(page_count(page), page);
++		VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
+ 
+ 		if (!page->mem_cgroup)
+ 			continue;
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 321f5632c17b..7c57635958f2 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -582,6 +582,13 @@ static int delete_from_lru_cache(struct page *p)
+ 		 */
+ 		ClearPageActive(p);
+ 		ClearPageUnevictable(p);
++
++		/*
++		 * Poisoned page might never drop its ref count to 0 so we have
++		 * to uncharge it manually from its memcg.
++		 */
++		mem_cgroup_uncharge(p);
++
+ 		/*
+ 		 * drop the page count elevated by isolate_lru_page()
+ 		 */
+diff --git a/mm/memory.c b/mm/memory.c
+index fc449016d10e..942daab4dc57 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -71,7 +71,7 @@
+ 
+ #include "internal.h"
+ 
+-#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
++#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
+ #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
+ #endif
+ 
+diff --git a/mm/mmap.c b/mm/mmap.c
+index 1094833d0e82..d730c1b91a12 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2179,7 +2179,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ 	if (gap_addr < address)
+ 		return -ENOMEM;
+ 	next = vma->vm_next;
+-	if (next && next->vm_start < gap_addr) {
++	if (next && next->vm_start < gap_addr &&
++			(next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+ 		if (!(next->vm_flags & VM_GROWSUP))
+ 			return -ENOMEM;
+ 		/* Check that both stack segments have the same anon_vma? */
+@@ -2260,7 +2261,8 @@ int expand_downwards(struct vm_area_struct *vma,
+ 	if (gap_addr > address)
+ 		return -ENOMEM;
+ 	prev = vma->vm_prev;
+-	if (prev && prev->vm_end > gap_addr) {
++	if (prev && prev->vm_end > gap_addr &&
++			(prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+ 		if (!(prev->vm_flags & VM_GROWSDOWN))
+ 			return -ENOMEM;
+ 		/* Check that both stack segments have the same anon_vma? */
+diff --git a/mm/mmu_context.c b/mm/mmu_context.c
+index f802c2d216a7..6f4d27c5bb32 100644
+--- a/mm/mmu_context.c
++++ b/mm/mmu_context.c
+@@ -4,9 +4,9 @@
+  */
+ 
+ #include <linux/mm.h>
++#include <linux/sched.h>
+ #include <linux/mmu_context.h>
+ #include <linux/export.h>
+-#include <linux/sched.h>
+ 
+ #include <asm/mmu_context.h>
+ 
+diff --git a/mm/page-writeback.c b/mm/page-writeback.c
+index eb59f7eea508..308757ae508d 100644
+--- a/mm/page-writeback.c
++++ b/mm/page-writeback.c
+@@ -967,6 +967,7 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
+ 	unsigned long pos_ratio;
+ 	unsigned long step;
+ 	unsigned long x;
++	unsigned long shift;
+ 
+ 	/*
+ 	 * The dirty rate will match the writeout rate in long term, except
+@@ -1094,11 +1095,11 @@ static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
+ 	 * rate itself is constantly fluctuating. So decrease the track speed
+ 	 * when it gets close to the target. Helps eliminate pointless tremors.
+ 	 */
+-	step >>= dirty_ratelimit / (2 * step + 1);
+-	/*
+-	 * Limit the tracking speed to avoid overshooting.
+-	 */
+-	step = (step + 7) / 8;
++	shift = dirty_ratelimit / (2 * step + 1);
++	if (shift < BITS_PER_LONG)
++		step = DIV_ROUND_UP(step >> shift, 8);
++	else
++		step = 0;
+ 
+ 	if (dirty_ratelimit < balanced_dirty_ratelimit)
+ 		dirty_ratelimit += step;
+diff --git a/mm/util.c b/mm/util.c
+index 68ff8a5361e7..4baa25c35af8 100644
+--- a/mm/util.c
++++ b/mm/util.c
+@@ -80,6 +80,8 @@ EXPORT_SYMBOL(kstrdup_const);
+  * @s: the string to duplicate
+  * @max: read at most @max chars from @s
+  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
++ *
++ * Note: Use kmemdup_nul() instead if the size is known exactly.
+  */
+ char *kstrndup(const char *s, size_t max, gfp_t gfp)
+ {
+@@ -117,6 +119,28 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp)
+ }
+ EXPORT_SYMBOL(kmemdup);
+ 
++/**
++ * kmemdup_nul - Create a NUL-terminated string from unterminated data
++ * @s: The data to stringify
++ * @len: The size of the data
++ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
++ */
++char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
++{
++	char *buf;
++
++	if (!s)
++		return NULL;
++
++	buf = kmalloc_track_caller(len + 1, gfp);
++	if (buf) {
++		memcpy(buf, s, len);
++		buf[len] = '\0';
++	}
++	return buf;
++}
++EXPORT_SYMBOL(kmemdup_nul);
++
+ /**
+  * memdup_user - duplicate memory region from user space
+  *
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index f16e330e1096..c6033260e739 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -223,10 +223,13 @@ EXPORT_SYMBOL(register_shrinker);
+  */
+ void unregister_shrinker(struct shrinker *shrinker)
+ {
++	if (!shrinker->nr_deferred)
++		return;
+ 	down_write(&shrinker_rwsem);
+ 	list_del(&shrinker->list);
+ 	up_write(&shrinker_rwsem);
+ 	kfree(shrinker->nr_deferred);
++	shrinker->nr_deferred = NULL;
+ }
+ EXPORT_SYMBOL(unregister_shrinker);
+ 
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 4f5cd974e11a..9b525cd66ca6 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -1326,7 +1326,9 @@ static int vmstat_show(struct seq_file *m, void *arg)
+ 	unsigned long *l = arg;
+ 	unsigned long off = l - (unsigned long *)m->private;
+ 
+-	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
++	seq_puts(m, vmstat_text[off]);
++	seq_put_decimal_ull(m, ' ', *l);
++	seq_putc(m, '\n');
+ 	return 0;
+ }
+ 
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index ce53c8691604..6ce54eed45e8 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -111,12 +111,7 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
+ 		vlan_gvrp_uninit_applicant(real_dev);
+ 	}
+ 
+-	/* Take it out of our own structures, but be sure to interlock with
+-	 * HW accelerating devices or SW vlan input packet processing if
+-	 * VLAN is not 0 (leave it there for 802.1p).
+-	 */
+-	if (vlan_id)
+-		vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
++	vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
+ 
+ 	/* Get rid of the vlan's reference to real_dev */
+ 	dev_put(real_dev);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 9b6b35977f48..915a584b4e19 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -3346,9 +3346,10 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
+ 			break;
+ 
+ 		case L2CAP_CONF_EFS:
+-			remote_efs = 1;
+-			if (olen == sizeof(efs))
++			if (olen == sizeof(efs)) {
++				remote_efs = 1;
+ 				memcpy(&efs, (void *) val, olen);
++			}
+ 			break;
+ 
+ 		case L2CAP_CONF_EWS:
+@@ -3567,16 +3568,17 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 			break;
+ 
+ 		case L2CAP_CONF_EFS:
+-			if (olen == sizeof(efs))
++			if (olen == sizeof(efs)) {
+ 				memcpy(&efs, (void *)val, olen);
+ 
+-			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+-			    efs.stype != L2CAP_SERV_NOTRAFIC &&
+-			    efs.stype != chan->local_stype)
+-				return -ECONNREFUSED;
++				if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
++				    efs.stype != L2CAP_SERV_NOTRAFIC &&
++				    efs.stype != chan->local_stype)
++					return -ECONNREFUSED;
+ 
+-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+-					   (unsigned long) &efs, endptr - ptr);
++				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
++						   (unsigned long) &efs, endptr - ptr);
++			}
+ 			break;
+ 
+ 		case L2CAP_CONF_FCS:
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 62c635f2bcfc..2a55c0ce9490 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -714,13 +714,12 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	if (unlikely(!net_eq(dev_net(dev), &init_net)))
+ 		goto drop;
+ 
+-	if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+-		      skb->len != CAN_MTU ||
+-		      cfd->len > CAN_MAX_DLEN,
+-		      "PF_CAN: dropped non conform CAN skbuf: "
+-		      "dev type %d, len %d, datalen %d\n",
+-		      dev->type, skb->len, cfd->len))
++	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
++		     cfd->len > CAN_MAX_DLEN)) {
++		pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
++			     dev->type, skb->len, cfd->len);
+ 		goto drop;
++	}
+ 
+ 	can_receive(skb, dev);
+ 	return NET_RX_SUCCESS;
+@@ -738,13 +737,12 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	if (unlikely(!net_eq(dev_net(dev), &init_net)))
+ 		goto drop;
+ 
+-	if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+-		      skb->len != CANFD_MTU ||
+-		      cfd->len > CANFD_MAX_DLEN,
+-		      "PF_CAN: dropped non conform CAN FD skbuf: "
+-		      "dev type %d, len %d, datalen %d\n",
+-		      dev->type, skb->len, cfd->len))
++	if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
++		     cfd->len > CANFD_MAX_DLEN)) {
++		pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
++			     dev->type, skb->len, cfd->len);
+ 		goto drop;
++	}
+ 
+ 	can_receive(skb, dev);
+ 	return NET_RX_SUCCESS;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4d4213b6f7f6..0ccae464b46e 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2513,7 +2513,7 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ 
+ 	segs = skb_mac_gso_segment(skb, features);
+ 
+-	if (unlikely(skb_needs_check(skb, tx_path)))
++	if (unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
+ 		skb_warn_bad_offload(skb);
+ 
+ 	return segs;
+@@ -2803,10 +2803,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
+ 		hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+ 
+ 		/* + transport layer */
+-		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+-			hdr_len += tcp_hdrlen(skb);
+-		else
+-			hdr_len += sizeof(struct udphdr);
++		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
++			const struct tcphdr *th;
++			struct tcphdr _tcphdr;
++
++			th = skb_header_pointer(skb, skb_transport_offset(skb),
++						sizeof(_tcphdr), &_tcphdr);
++			if (likely(th))
++				hdr_len += __tcp_hdrlen(th);
++		} else {
++			struct udphdr _udphdr;
++
++			if (skb_header_pointer(skb, skb_transport_offset(skb),
++					       sizeof(_udphdr), &_udphdr))
++				hdr_len += sizeof(struct udphdr);
++		}
+ 
+ 		if (shinfo->gso_type & SKB_GSO_DODGY)
+ 			gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index cb9a7ab5444c..5fd6c6e699aa 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -492,7 +492,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ 	if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
+ 		nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
+ 
+-	hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
++	hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
+ 
+ 	if (n->parms->dead) {
+ 		rc = ERR_PTR(-EINVAL);
+@@ -504,7 +504,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ 	     n1 != NULL;
+ 	     n1 = rcu_dereference_protected(n1->next,
+ 			lockdep_is_held(&tbl->lock))) {
+-		if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
++		if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
+ 			if (want_ref)
+ 				neigh_hold(n1);
+ 			rc = n1;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 97a1fa140a9b..853e82075ebd 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3694,7 +3694,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
+ 	struct sock *sk = skb->sk;
+ 
+ 	if (!skb_may_tx_timestamp(sk, false))
+-		return;
++		goto err;
+ 
+ 	/* Take a reference to prevent skb_orphan() from freeing the socket,
+ 	 * but only if the socket refcount is not zero.
+@@ -3703,7 +3703,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
+ 		*skb_hwtstamps(skb) = *hwtstamps;
+ 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+ 		sock_put(sk);
++		return;
+ 	}
++
++err:
++	kfree_skb(skb);
+ }
+ EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 76e0b874f378..7697148eec4f 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -729,7 +729,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
+ 		val = min_t(u32, val, sysctl_wmem_max);
+ set_sndbuf:
+ 		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+-		sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
++		sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+ 		/* Wake up sending tasks if we upped the value. */
+ 		sk->sk_write_space(sk);
+ 		break;
+@@ -765,7 +765,7 @@ set_rcvbuf:
+ 		 * returning the value we actually used in getsockopt
+ 		 * is the most desirable behavior.
+ 		 */
+-		sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
++		sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+ 		break;
+ 
+ 	case SO_RCVBUFFORCE:
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index a6beb7b6ae55..f5ef2115871f 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -360,14 +360,16 @@ static struct ctl_table net_core_table[] = {
+ 		.data		= &sysctl_net_busy_poll,
+ 		.maxlen		= sizeof(unsigned int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &zero,
+ 	},
+ 	{
+ 		.procname	= "busy_read",
+ 		.data		= &sysctl_net_busy_read,
+ 		.maxlen		= sizeof(unsigned int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &zero,
+ 	},
+ #endif
+ #ifdef CONFIG_NET_SCHED
+diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
+index 5e3a7302f774..7753681195c1 100644
+--- a/net/dccp/ccids/ccid2.c
++++ b/net/dccp/ccids/ccid2.c
+@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
+ 
+ 	ccid2_pr_debug("RTO_EXPIRE\n");
+ 
++	if (sk->sk_state == DCCP_CLOSED)
++		goto out;
++
+ 	/* back-off timer */
+ 	hc->tx_rto <<= 1;
+ 	if (hc->tx_rto > DCCP_RTO_MAX)
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 522658179cca..a20dc23360f9 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -259,6 +259,7 @@ int dccp_disconnect(struct sock *sk, int flags)
+ {
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	struct inet_sock *inet = inet_sk(sk);
++	struct dccp_sock *dp = dccp_sk(sk);
+ 	int err = 0;
+ 	const int old_state = sk->sk_state;
+ 
+@@ -278,6 +279,10 @@ int dccp_disconnect(struct sock *sk, int flags)
+ 		sk->sk_err = ECONNRESET;
+ 
+ 	dccp_clear_xmit_timers(sk);
++	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
++	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
++	dp->dccps_hc_rx_ccid = NULL;
++	dp->dccps_hc_tx_ccid = NULL;
+ 
+ 	__skb_queue_purge(&sk->sk_receive_queue);
+ 	__skb_queue_purge(&sk->sk_write_queue);
+diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
+index 2783c538ec19..e31f0da7537a 100644
+--- a/net/decnet/af_decnet.c
++++ b/net/decnet/af_decnet.c
+@@ -1337,6 +1337,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
+ 	lock_sock(sk);
+ 	err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
+ 	release_sock(sk);
++#ifdef CONFIG_NETFILTER
++	/* we need to exclude all possible ENOPROTOOPTs except default case */
++	if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
++	    optname != DSO_STREAM && optname != DSO_SEQPACKET)
++		err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
++#endif
+ 
+ 	return err;
+ }
+@@ -1444,15 +1450,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
+ 		dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
+ 		break;
+ 
+-	default:
+-#ifdef CONFIG_NETFILTER
+-		return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
+-#endif
+-	case DSO_LINKINFO:
+-	case DSO_STREAM:
+-	case DSO_SEQPACKET:
+-		return -ENOPROTOOPT;
+-
+ 	case DSO_MAXWINDOW:
+ 		if (optlen != sizeof(unsigned long))
+ 			return -EINVAL;
+@@ -1500,6 +1497,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
+ 			return -EINVAL;
+ 		scp->info_loc = u.info;
+ 		break;
++
++	case DSO_LINKINFO:
++	case DSO_STREAM:
++	case DSO_SEQPACKET:
++	default:
++		return -ENOPROTOOPT;
+ 	}
+ 
+ 	return 0;
+@@ -1513,6 +1516,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use
+ 	lock_sock(sk);
+ 	err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
+ 	release_sock(sk);
++#ifdef CONFIG_NETFILTER
++	if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
++	    optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
++	    optname != DSO_CONREJECT) {
++		int len;
++
++		if (get_user(len, optlen))
++			return -EFAULT;
++
++		err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
++		if (err >= 0)
++			err = put_user(len, optlen);
++	}
++#endif
+ 
+ 	return err;
+ }
+@@ -1578,26 +1595,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
+ 		r_data = &link;
+ 		break;
+ 
+-	default:
+-#ifdef CONFIG_NETFILTER
+-	{
+-		int ret, len;
+-
+-		if (get_user(len, optlen))
+-			return -EFAULT;
+-
+-		ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
+-		if (ret >= 0)
+-			ret = put_user(len, optlen);
+-		return ret;
+-	}
+-#endif
+-	case DSO_STREAM:
+-	case DSO_SEQPACKET:
+-	case DSO_CONACCEPT:
+-	case DSO_CONREJECT:
+-		return -ENOPROTOOPT;
+-
+ 	case DSO_MAXWINDOW:
+ 		if (r_len > sizeof(unsigned long))
+ 			r_len = sizeof(unsigned long);
+@@ -1629,6 +1626,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
+ 			r_len = sizeof(unsigned char);
+ 		r_data = &scp->info_rem;
+ 		break;
++
++	case DSO_STREAM:
++	case DSO_SEQPACKET:
++	case DSO_CONACCEPT:
++	case DSO_CONREJECT:
++	default:
++		return -ENOPROTOOPT;
+ 	}
+ 
+ 	if (r_data) {
+diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
+index a7e74fbf2ce6..24b4174a84bf 100644
+--- a/net/ipv4/arp.c
++++ b/net/ipv4/arp.c
+@@ -221,11 +221,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
+ 
+ static int arp_constructor(struct neighbour *neigh)
+ {
+-	__be32 addr = *(__be32 *)neigh->primary_key;
++	__be32 addr;
+ 	struct net_device *dev = neigh->dev;
+ 	struct in_device *in_dev;
+ 	struct neigh_parms *parms;
++	u32 inaddr_any = INADDR_ANY;
+ 
++	if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
++		memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
++
++	addr = *(__be32 *)neigh->primary_key;
+ 	rcu_read_lock();
+ 	in_dev = __in_dev_get_rcu(dev);
+ 	if (!in_dev) {
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index a57056d87a43..1d2fba4aeeb2 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1359,7 +1359,7 @@ skip:
+ 
+ static bool inetdev_valid_mtu(unsigned int mtu)
+ {
+-	return mtu >= 68;
++	return mtu >= IPV4_MIN_MTU;
+ }
+ 
+ static void inetdev_send_gratuitous_arp(struct net_device *dev,
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index a8db70b7fe45..5095491e6141 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1181,7 +1181,7 @@ fail:
+ 
+ static void ip_fib_net_exit(struct net *net)
+ {
+-	unsigned int i;
++	int i;
+ 
+ 	rtnl_lock();
+ #ifdef CONFIG_IP_MULTIPLE_TABLES
+@@ -1189,7 +1189,12 @@ static void ip_fib_net_exit(struct net *net)
+ 	RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
+ 	RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
+ #endif
+-	for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
++	/* Destroy the tables in reverse order to guarantee that the
++	 * local table, ID 255, is destroyed before the main table, ID
++	 * 254. This is necessary as the local table may contain
++	 * references to data contained in the main table.
++	 */
++	for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
+ 		struct hlist_head *head = &net->ipv4.fib_table_hash[i];
+ 		struct hlist_node *tmp;
+ 		struct fib_table *tb;
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index e2d3d62297ec..c17485bcb18a 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -89,6 +89,7 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/times.h>
+ #include <linux/pkt_sched.h>
++#include <linux/byteorder/generic.h>
+ 
+ #include <net/net_namespace.h>
+ #include <net/arp.h>
+@@ -319,6 +320,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
+ 	return scount;
+ }
+ 
++/* source address selection per RFC 3376 section 4.2.13 */
++static __be32 igmpv3_get_srcaddr(struct net_device *dev,
++				 const struct flowi4 *fl4)
++{
++	struct in_device *in_dev = __in_dev_get_rcu(dev);
++
++	if (!in_dev)
++		return htonl(INADDR_ANY);
++
++	for_ifa(in_dev) {
++		if (fl4->saddr == ifa->ifa_local)
++			return fl4->saddr;
++	} endfor_ifa(in_dev);
++
++	return htonl(INADDR_ANY);
++}
++
+ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
+ {
+ 	struct sk_buff *skb;
+@@ -366,7 +384,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
+ 	pip->frag_off = htons(IP_DF);
+ 	pip->ttl      = 1;
+ 	pip->daddr    = fl4.daddr;
+-	pip->saddr    = fl4.saddr;
++
++	rcu_read_lock();
++	pip->saddr    = igmpv3_get_srcaddr(dev, &fl4);
++	rcu_read_unlock();
++
+ 	pip->protocol = IPPROTO_IGMP;
+ 	pip->tot_len  = 0;	/* filled in later */
+ 	ip_select_ident(net, skb, NULL);
+@@ -402,16 +424,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
+ }
+ 
+ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
+-	int type, struct igmpv3_grec **ppgr)
++	int type, struct igmpv3_grec **ppgr, unsigned int mtu)
+ {
+ 	struct net_device *dev = pmc->interface->dev;
+ 	struct igmpv3_report *pih;
+ 	struct igmpv3_grec *pgr;
+ 
+-	if (!skb)
+-		skb = igmpv3_newpack(dev, dev->mtu);
+-	if (!skb)
+-		return NULL;
++	if (!skb) {
++		skb = igmpv3_newpack(dev, mtu);
++		if (!skb)
++			return NULL;
++	}
+ 	pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
+ 	pgr->grec_type = type;
+ 	pgr->grec_auxwords = 0;
+@@ -433,10 +456,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
+ 	struct igmpv3_grec *pgr = NULL;
+ 	struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
+ 	int scount, stotal, first, isquery, truncate;
++	unsigned int mtu;
+ 
+ 	if (pmc->multiaddr == IGMP_ALL_HOSTS)
+ 		return skb;
+ 
++	mtu = READ_ONCE(dev->mtu);
++	if (mtu < IPV4_MIN_MTU)
++		return skb;
++
+ 	isquery = type == IGMPV3_MODE_IS_INCLUDE ||
+ 		  type == IGMPV3_MODE_IS_EXCLUDE;
+ 	truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
+@@ -457,7 +485,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
+ 		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
+ 			if (skb)
+ 				igmpv3_sendpack(skb);
+-			skb = igmpv3_newpack(dev, dev->mtu);
++			skb = igmpv3_newpack(dev, mtu);
+ 		}
+ 	}
+ 	first = 1;
+@@ -484,12 +512,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
+ 				pgr->grec_nsrcs = htons(scount);
+ 			if (skb)
+ 				igmpv3_sendpack(skb);
+-			skb = igmpv3_newpack(dev, dev->mtu);
++			skb = igmpv3_newpack(dev, mtu);
+ 			first = 1;
+ 			scount = 0;
+ 		}
+ 		if (first) {
+-			skb = add_grhead(skb, pmc, type, &pgr);
++			skb = add_grhead(skb, pmc, type, &pgr, mtu);
+ 			first = 0;
+ 		}
+ 		if (!skb)
+@@ -523,7 +551,7 @@ empty_source:
+ 				igmpv3_sendpack(skb);
+ 				skb = NULL; /* add_grhead will get a new one */
+ 			}
+-			skb = add_grhead(skb, pmc, type, &pgr);
++			skb = add_grhead(skb, pmc, type, &pgr, mtu);
+ 		}
+ 	}
+ 	if (pgr)
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index b6c7bdea4853..67c1333422a4 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -1203,11 +1203,8 @@ int ip_setsockopt(struct sock *sk, int level,
+ 	if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
+ 			optname != IP_IPSEC_POLICY &&
+ 			optname != IP_XFRM_POLICY &&
+-			!ip_mroute_opt(optname)) {
+-		lock_sock(sk);
++			!ip_mroute_opt(optname))
+ 		err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
+-		release_sock(sk);
+-	}
+ #endif
+ 	return err;
+ }
+@@ -1232,12 +1229,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
+ 	if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
+ 			optname != IP_IPSEC_POLICY &&
+ 			optname != IP_XFRM_POLICY &&
+-			!ip_mroute_opt(optname)) {
+-		lock_sock(sk);
+-		err = compat_nf_setsockopt(sk, PF_INET, optname,
+-					   optval, optlen);
+-		release_sock(sk);
+-	}
++			!ip_mroute_opt(optname))
++		err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
++					   optlen);
+ #endif
+ 	return err;
+ }
+@@ -1497,10 +1491,7 @@ int ip_getsockopt(struct sock *sk, int level,
+ 		if (get_user(len, optlen))
+ 			return -EFAULT;
+ 
+-		lock_sock(sk);
+-		err = nf_getsockopt(sk, PF_INET, optname, optval,
+-				&len);
+-		release_sock(sk);
++		err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
+ 		if (err >= 0)
+ 			err = put_user(len, optlen);
+ 		return err;
+@@ -1532,9 +1523,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
+ 		if (get_user(len, optlen))
+ 			return -EFAULT;
+ 
+-		lock_sock(sk);
+ 		err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
+-		release_sock(sk);
+ 		if (err >= 0)
+ 			err = put_user(len, optlen);
+ 		return err;
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 35080a708b59..169bf7d1d8ca 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -393,8 +393,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
+ 	dev->needed_headroom = t_hlen + hlen;
+ 	mtu -= (dev->hard_header_len + t_hlen);
+ 
+-	if (mtu < 68)
+-		mtu = 68;
++	if (mtu < IPV4_MIN_MTU)
++		mtu = IPV4_MIN_MTU;
+ 
+ 	return mtu;
+ }
+diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
+index 8e7328c6a390..6389616ccc3f 100644
+--- a/net/ipv4/ipconfig.c
++++ b/net/ipv4/ipconfig.c
+@@ -148,7 +148,11 @@ static char vendor_class_identifier[253] __initdata;
+ 
+ /* Persistent data: */
+ 
++#ifdef IPCONFIG_DYNAMIC
+ static int ic_proto_used;			/* Protocol used, if any */
++#else
++#define ic_proto_used 0
++#endif
+ static __be32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */
+ static u8 ic_domain[64];		/* DNS (not NIS) domain name */
+ 
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index ebf5821caefb..7510a851d316 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -1330,8 +1330,8 @@ static int translate_compat_table(struct xt_table_info **pinfo,
+ 
+ 	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+-		newinfo->hook_entry[i] = info->hook_entry[i];
+-		newinfo->underflow[i] = info->underflow[i];
++		newinfo->hook_entry[i] = compatr->hook_entry[i];
++		newinfo->underflow[i] = compatr->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries[raw_smp_processor_id()];
+ 	pos = entry1;
+diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
+index 771ab3d01ad3..d098013855f0 100644
+--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
++++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
+@@ -365,7 +365,7 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
+ 	struct ipt_clusterip_tgt_info *cipinfo = par->targinfo;
+ 	const struct ipt_entry *e = par->entryinfo;
+ 	struct clusterip_config *config;
+-	int ret;
++	int ret, i;
+ 
+ 	if (cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP &&
+ 	    cipinfo->hash_mode != CLUSTERIP_HASHMODE_SIP_SPT &&
+@@ -379,8 +379,18 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
+ 		pr_info("Please specify destination IP\n");
+ 		return -EINVAL;
+ 	}
+-
+-	/* FIXME: further sanity checks */
++	if (cipinfo->num_local_nodes > ARRAY_SIZE(cipinfo->local_nodes)) {
++		pr_info("bad num_local_nodes %u\n", cipinfo->num_local_nodes);
++		return -EINVAL;
++	}
++	for (i = 0; i < cipinfo->num_local_nodes; i++) {
++		if (cipinfo->local_nodes[i] - 1 >=
++		    sizeof(config->local_nodes) * 8) {
++			pr_info("bad local_nodes[%d] %u\n",
++				i, cipinfo->local_nodes[i]);
++			return -EINVAL;
++		}
++	}
+ 
+ 	config = clusterip_config_find_get(par->net, e->ip.dst.s_addr, 1);
+ 	if (!config) {
+diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+index 30ad9554b5e9..406d69f033ac 100644
+--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
++++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+@@ -261,15 +261,19 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
+ 	struct nf_conntrack_tuple tuple;
+ 
+ 	memset(&tuple, 0, sizeof(tuple));
++
++	lock_sock(sk);
+ 	tuple.src.u3.ip = inet->inet_rcv_saddr;
+ 	tuple.src.u.tcp.port = inet->inet_sport;
+ 	tuple.dst.u3.ip = inet->inet_daddr;
+ 	tuple.dst.u.tcp.port = inet->inet_dport;
+ 	tuple.src.l3num = PF_INET;
+ 	tuple.dst.protonum = sk->sk_protocol;
++	release_sock(sk);
+ 
+ 	/* We only do TCP and SCTP at the moment: is there a better way? */
+-	if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) {
++	if (tuple.dst.protonum != IPPROTO_TCP &&
++	    tuple.dst.protonum != IPPROTO_SCTP) {
+ 		pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
+ 		return -ENOPROTOOPT;
+ 	}
+diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
+index cc626e1b06d3..64a8bbc06f23 100644
+--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
++++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
+@@ -1260,16 +1260,6 @@ static const struct nf_conntrack_expect_policy snmp_exp_policy = {
+ 	.timeout	= 180,
+ };
+ 
+-static struct nf_conntrack_helper snmp_helper __read_mostly = {
+-	.me			= THIS_MODULE,
+-	.help			= help,
+-	.expect_policy		= &snmp_exp_policy,
+-	.name			= "snmp",
+-	.tuple.src.l3num	= AF_INET,
+-	.tuple.src.u.udp.port	= cpu_to_be16(SNMP_PORT),
+-	.tuple.dst.protonum	= IPPROTO_UDP,
+-};
+-
+ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
+ 	.me			= THIS_MODULE,
+ 	.help			= help,
+@@ -1288,17 +1278,10 @@ static struct nf_conntrack_helper snmp_trap_helper __read_mostly = {
+ 
+ static int __init nf_nat_snmp_basic_init(void)
+ {
+-	int ret = 0;
+-
+ 	BUG_ON(nf_nat_snmp_hook != NULL);
+ 	RCU_INIT_POINTER(nf_nat_snmp_hook, help);
+ 
+-	ret = nf_conntrack_helper_register(&snmp_trap_helper);
+-	if (ret < 0) {
+-		nf_conntrack_helper_unregister(&snmp_helper);
+-		return ret;
+-	}
+-	return ret;
++	return nf_conntrack_helper_register(&snmp_trap_helper);
+ }
+ 
+ static void __exit nf_nat_snmp_basic_fini(void)
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index a1de8300cfce..94a4b28e5da6 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2137,6 +2137,9 @@ adjudge_to_death:
+ 			tcp_send_active_reset(sk, GFP_ATOMIC);
+ 			NET_INC_STATS_BH(sock_net(sk),
+ 					LINUX_MIB_TCPABORTONMEMORY);
++		} else if (!check_net(sock_net(sk))) {
++			/* Not possible to send reset; just close */
++			tcp_set_state(sk, TCP_CLOSE);
+ 		}
+ 	}
+ 
+@@ -2232,6 +2235,12 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 
+ 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
+ 
++	if (sk->sk_frag.page) {
++		put_page(sk->sk_frag.page);
++		sk->sk_frag.page = NULL;
++		sk->sk_frag.offset = 0;
++	}
++
+ 	sk->sk_error_report(sk);
+ 	return err;
+ }
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 2923f7f7932a..38e9dc5b735d 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -814,7 +814,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+ 			tcp_time_stamp,
+ 			req->ts_recent,
+ 			0,
+-			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
++			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
+ 					  AF_INET),
+ 			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
+ 			ip_hdr(skb)->tos);
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index f8c6b2343301..4a3f230ef91b 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -46,11 +46,19 @@ static void tcp_write_err(struct sock *sk)
+  * to prevent DoS attacks. It is called when a retransmission timeout
+  * or zero probe timeout occurs on orphaned socket.
+  *
++ * Also close if our net namespace is exiting; in that case there is no
++ * hope of ever communicating again since all netns interfaces are already
++ * down (or about to be down), and we need to release our dst references,
++ * which have been moved to the netns loopback interface, so the namespace
++ * can finish exiting.  This condition is only possible if we are a kernel
++ * socket, as those do not hold references to the namespace.
++ *
+  * Criteria is still not confirmed experimentally and may change.
+  * We kill the socket, if:
+  * 1. If number of orphaned sockets exceeds an administratively configured
+  *    limit.
+  * 2. If we have strong memory pressure.
++ * 3. If our net namespace is exiting.
+  */
+ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
+ {
+@@ -79,6 +87,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
+ 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
+ 		return 1;
+ 	}
++
++	if (!check_net(sock_net(sk))) {
++		/* Not possible to send reset; just close */
++		tcp_done(sk);
++		return 1;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
+index a6cea1d5e20d..33c0879612f5 100644
+--- a/net/ipv4/tcp_vegas.c
++++ b/net/ipv4/tcp_vegas.c
+@@ -158,7 +158,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
+ 
+ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
+ {
+-	return  min(tp->snd_ssthresh, tp->snd_cwnd-1);
++	return  min(tp->snd_ssthresh, tp->snd_cwnd);
+ }
+ 
+ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 7a6317671d32..97397a3df219 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1217,14 +1217,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
+ 	v6_cork->tclass = tclass;
+ 	if (rt->dst.flags & DST_XFRM_TUNNEL)
+ 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+-		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
++		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
+ 	else
+ 		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
+-		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
++		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
+ 	if (np->frag_size < mtu) {
+ 		if (np->frag_size)
+ 			mtu = np->frag_size;
+ 	}
++	if (mtu < IPV6_MIN_MTU)
++		return -EINVAL;
+ 	cork->base.fragsize = mtu;
+ 	if (dst_allfrag(rt->dst.path))
+ 		cork->base.flags |= IPCORK_ALLFRAG;
+@@ -1757,10 +1759,13 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
+ 	cork.base.flags = 0;
+ 	cork.base.addr = 0;
+ 	cork.base.opt = NULL;
++	cork.base.dst = NULL;
+ 	v6_cork.opt = NULL;
+ 	err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
+-	if (err)
++	if (err) {
++		ip6_cork_release(&cork, &v6_cork);
+ 		return ERR_PTR(err);
++	}
+ 
+ 	if (dontfrag < 0)
+ 		dontfrag = inet6_sk(sk)->dontfrag;
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 1c4db0fe7f88..672dd08dc3dd 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -496,6 +496,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
+ 		return ERR_PTR(-ENOENT);
+ 
+ 	it->mrt = mrt;
++	it->cache = NULL;
+ 	return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
+ 		: SEQ_START_TOKEN;
+ }
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 4449ad1f8114..2ad727bba3a5 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -896,12 +896,8 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
+ #ifdef CONFIG_NETFILTER
+ 	/* we need to exclude all possible ENOPROTOOPTs except default case */
+ 	if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
+-			optname != IPV6_XFRM_POLICY) {
+-		lock_sock(sk);
+-		err = nf_setsockopt(sk, PF_INET6, optname, optval,
+-				optlen);
+-		release_sock(sk);
+-	}
++			optname != IPV6_XFRM_POLICY)
++		err = nf_setsockopt(sk, PF_INET6, optname, optval, optlen);
+ #endif
+ 	return err;
+ }
+@@ -931,12 +927,9 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ #ifdef CONFIG_NETFILTER
+ 	/* we need to exclude all possible ENOPROTOOPTs except default case */
+ 	if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
+-	    optname != IPV6_XFRM_POLICY) {
+-		lock_sock(sk);
+-		err = compat_nf_setsockopt(sk, PF_INET6, optname,
+-					   optval, optlen);
+-		release_sock(sk);
+-	}
++	    optname != IPV6_XFRM_POLICY)
++		err = compat_nf_setsockopt(sk, PF_INET6, optname, optval,
++					   optlen);
+ #endif
+ 	return err;
+ }
+@@ -1338,10 +1331,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
+ 		if (get_user(len, optlen))
+ 			return -EFAULT;
+ 
+-		lock_sock(sk);
+-		err = nf_getsockopt(sk, PF_INET6, optname, optval,
+-				&len);
+-		release_sock(sk);
++		err = nf_getsockopt(sk, PF_INET6, optname, optval, &len);
+ 		if (err >= 0)
+ 			err = put_user(len, optlen);
+ 	}
+@@ -1380,10 +1370,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ 		if (get_user(len, optlen))
+ 			return -EFAULT;
+ 
+-		lock_sock(sk);
+-		err = compat_nf_getsockopt(sk, PF_INET6,
+-					   optname, optval, &len);
+-		release_sock(sk);
++		err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len);
+ 		if (err >= 0)
+ 			err = put_user(len, optlen);
+ 	}
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 9a63110b6548..47de89f57a80 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1668,16 +1668,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
+ }
+ 
+ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+-	int type, struct mld2_grec **ppgr)
++	int type, struct mld2_grec **ppgr, unsigned int mtu)
+ {
+-	struct net_device *dev = pmc->idev->dev;
+ 	struct mld2_report *pmr;
+ 	struct mld2_grec *pgr;
+ 
+-	if (!skb)
+-		skb = mld_newpack(pmc->idev, dev->mtu);
+-	if (!skb)
+-		return NULL;
++	if (!skb) {
++		skb = mld_newpack(pmc->idev, mtu);
++		if (!skb)
++			return NULL;
++	}
+ 	pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
+ 	pgr->grec_type = type;
+ 	pgr->grec_auxwords = 0;
+@@ -1700,10 +1700,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ 	struct mld2_grec *pgr = NULL;
+ 	struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
+ 	int scount, stotal, first, isquery, truncate;
++	unsigned int mtu;
+ 
+ 	if (pmc->mca_flags & MAF_NOREPORT)
+ 		return skb;
+ 
++	mtu = READ_ONCE(dev->mtu);
++	if (mtu < IPV6_MIN_MTU)
++		return skb;
++
+ 	isquery = type == MLD2_MODE_IS_INCLUDE ||
+ 		  type == MLD2_MODE_IS_EXCLUDE;
+ 	truncate = type == MLD2_MODE_IS_EXCLUDE ||
+@@ -1724,7 +1729,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ 		    AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
+ 			if (skb)
+ 				mld_sendpack(skb);
+-			skb = mld_newpack(idev, dev->mtu);
++			skb = mld_newpack(idev, mtu);
+ 		}
+ 	}
+ 	first = 1;
+@@ -1751,12 +1756,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
+ 				pgr->grec_nsrcs = htons(scount);
+ 			if (skb)
+ 				mld_sendpack(skb);
+-			skb = mld_newpack(idev, dev->mtu);
++			skb = mld_newpack(idev, mtu);
+ 			first = 1;
+ 			scount = 0;
+ 		}
+ 		if (first) {
+-			skb = add_grhead(skb, pmc, type, &pgr);
++			skb = add_grhead(skb, pmc, type, &pgr, mtu);
+ 			first = 0;
+ 		}
+ 		if (!skb)
+@@ -1790,7 +1795,7 @@ empty_source:
+ 				mld_sendpack(skb);
+ 				skb = NULL; /* add_grhead will get a new one */
+ 			}
+-			skb = add_grhead(skb, pmc, type, &pgr);
++			skb = add_grhead(skb, pmc, type, &pgr, mtu);
+ 		}
+ 	}
+ 	if (pgr)
+diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+index 4ba0c34c627b..6bb16657db3a 100644
+--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+@@ -232,20 +232,27 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
+ static int
+ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
+ {
+-	const struct inet_sock *inet = inet_sk(sk);
++	struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
+ 	const struct ipv6_pinfo *inet6 = inet6_sk(sk);
++	const struct inet_sock *inet = inet_sk(sk);
+ 	const struct nf_conntrack_tuple_hash *h;
+ 	struct sockaddr_in6 sin6;
+-	struct nf_conntrack_tuple tuple = { .src.l3num = NFPROTO_IPV6 };
+ 	struct nf_conn *ct;
++	__be32 flow_label;
++	int bound_dev_if;
+ 
++	lock_sock(sk);
+ 	tuple.src.u3.in6 = sk->sk_v6_rcv_saddr;
+ 	tuple.src.u.tcp.port = inet->inet_sport;
+ 	tuple.dst.u3.in6 = sk->sk_v6_daddr;
+ 	tuple.dst.u.tcp.port = inet->inet_dport;
+ 	tuple.dst.protonum = sk->sk_protocol;
++	bound_dev_if = sk->sk_bound_dev_if;
++	flow_label = inet6->flow_label;
++	release_sock(sk);
+ 
+-	if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP)
++	if (tuple.dst.protonum != IPPROTO_TCP &&
++	    tuple.dst.protonum != IPPROTO_SCTP)
+ 		return -ENOPROTOOPT;
+ 
+ 	if (*len < 0 || (unsigned int) *len < sizeof(sin6))
+@@ -263,14 +270,13 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
+ 
+ 	sin6.sin6_family = AF_INET6;
+ 	sin6.sin6_port = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.tcp.port;
+-	sin6.sin6_flowinfo = inet6->flow_label & IPV6_FLOWINFO_MASK;
++	sin6.sin6_flowinfo = flow_label & IPV6_FLOWINFO_MASK;
+ 	memcpy(&sin6.sin6_addr,
+ 		&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.in6,
+ 					sizeof(sin6.sin6_addr));
+ 
+ 	nf_ct_put(ct);
+-	sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr,
+-						 sk->sk_bound_dev_if);
++	sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr, bound_dev_if);
+ 	return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
+ }
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 2f0f1b415fbe..9f274781ba57 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -940,7 +940,7 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+ 			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+ 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+ 			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
+-			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
++			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
+ 			0, 0);
+ }
+ 
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 39c78c9e1c68..354c43a1c43d 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -401,6 +401,11 @@ static int verify_address_len(const void *p)
+ #endif
+ 	int len;
+ 
++	if (sp->sadb_address_len <
++	    DIV_ROUND_UP(sizeof(*sp) + offsetofend(typeof(*addr), sa_family),
++			 sizeof(uint64_t)))
++		return -EINVAL;
++
+ 	switch (addr->sa_family) {
+ 	case AF_INET:
+ 		len = DIV_ROUND_UP(sizeof(*sp) + sizeof(*sin), sizeof(uint64_t));
+@@ -511,6 +516,9 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
+ 		uint16_t ext_type;
+ 		int ext_len;
+ 
++		if (len < sizeof(*ehdr))
++			return -EINVAL;
++
+ 		ext_len  = ehdr->sadb_ext_len;
+ 		ext_len *= sizeof(uint64_t);
+ 		ext_type = ehdr->sadb_ext_type;
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index 37e0aa4891a2..cd85cbf9bf39 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -2857,7 +2857,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
+ 	}
+ 	if (beacon->probe_resp_len) {
+ 		new_beacon->probe_resp_len = beacon->probe_resp_len;
+-		beacon->probe_resp = pos;
++		new_beacon->probe_resp = pos;
+ 		memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
+ 		pos += beacon->probe_resp_len;
+ 	}
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
+index 214e63b84e5c..4efc60236cdb 100644
+--- a/net/mac80211/mesh_hwmp.c
++++ b/net/mac80211/mesh_hwmp.c
+@@ -763,7 +763,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ 	struct mesh_path *mpath;
+ 	u8 ttl, flags, hopcount;
+ 	const u8 *orig_addr;
+-	u32 orig_sn, metric, metric_txsta, interval;
++	u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
+ 	bool root_is_gate;
+ 
+ 	ttl = rann->rann_ttl;
+@@ -774,7 +774,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ 	interval = le32_to_cpu(rann->rann_interval);
+ 	hopcount = rann->rann_hopcount;
+ 	hopcount++;
+-	metric = le32_to_cpu(rann->rann_metric);
++	orig_metric = le32_to_cpu(rann->rann_metric);
+ 
+ 	/*  Ignore our own RANNs */
+ 	if (ether_addr_equal(orig_addr, sdata->vif.addr))
+@@ -791,7 +791,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ 		return;
+ 	}
+ 
+-	metric_txsta = airtime_link_metric_get(local, sta);
++	last_hop_metric = airtime_link_metric_get(local, sta);
++	new_metric = orig_metric + last_hop_metric;
++	if (new_metric < orig_metric)
++		new_metric = MAX_METRIC;
+ 
+ 	mpath = mesh_path_lookup(sdata, orig_addr);
+ 	if (!mpath) {
+@@ -804,7 +807,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ 	}
+ 
+ 	if (!(SN_LT(mpath->sn, orig_sn)) &&
+-	    !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
++	    !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
+ 		rcu_read_unlock();
+ 		return;
+ 	}
+@@ -822,7 +825,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ 	}
+ 
+ 	mpath->sn = orig_sn;
+-	mpath->rann_metric = metric + metric_txsta;
++	mpath->rann_metric = new_metric;
+ 	mpath->is_root = true;
+ 	/* Recording RANNs sender address to send individually
+ 	 * addressed PREQs destined for root mesh STA */
+@@ -842,7 +845,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
+ 		mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
+ 				       orig_sn, 0, NULL, 0, broadcast_addr,
+ 				       hopcount, ttl, interval,
+-				       metric + metric_txsta, 0, sdata);
++				       new_metric, 0, sdata);
+ 	}
+ 
+ 	rcu_read_unlock();
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index bc3f791845aa..e951ca98757e 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -695,6 +695,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ 	 * least once for the stats anyway.
+ 	 */
+ 	rcu_read_lock_bh();
++ begin:
+ 	hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
+ 		ct = nf_ct_tuplehash_to_ctrack(h);
+ 		if (ct != ignored_conntrack &&
+@@ -706,6 +707,12 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ 		}
+ 		NF_CT_STAT_INC(net, searched);
+ 	}
++
++	if (get_nulls_value(n) != hash) {
++		NF_CT_STAT_INC(net, search_restart);
++		goto begin;
++	}
++
+ 	rcu_read_unlock_bh();
+ 
+ 	return 0;
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index 4f4c88d70a8f..cba342b37b62 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -557,7 +557,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
+ 	helper = rcu_dereference(nfct_help(expect->master)->helper);
+ 	if (helper) {
+ 		seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
+-		if (helper->expect_policy[expect->class].name)
++		if (helper->expect_policy[expect->class].name[0])
+ 			seq_printf(s, "/%s",
+ 				   helper->expect_policy[expect->class].name);
+ 	}
+diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
+index 885b4aba3695..1665c2159e4b 100644
+--- a/net/netfilter/nf_conntrack_sip.c
++++ b/net/netfilter/nf_conntrack_sip.c
+@@ -1434,9 +1434,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
+ 		handler = &sip_handlers[i];
+ 		if (handler->request == NULL)
+ 			continue;
+-		if (*datalen < handler->len ||
++		if (*datalen < handler->len + 2 ||
+ 		    strncasecmp(*dptr, handler->method, handler->len))
+ 			continue;
++		if ((*dptr)[handler->len] != ' ' ||
++		    !isalpha((*dptr)[handler->len+1]))
++			continue;
+ 
+ 		if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
+ 				      &matchoff, &matchlen) <= 0) {
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
+index 6d10002d23f8..ac143ae4f7b6 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -17,6 +17,7 @@
+ #include <linux/types.h>
+ #include <linux/list.h>
+ #include <linux/errno.h>
++#include <linux/capability.h>
+ #include <net/netlink.h>
+ #include <net/sock.h>
+ 
+@@ -32,6 +33,13 @@ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+ MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
+ 
++struct nfnl_cthelper {
++	struct list_head		list;
++	struct nf_conntrack_helper	helper;
++};
++
++static LIST_HEAD(nfnl_cthelper_list);
++
+ static int
+ nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
+ 			struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+@@ -205,18 +213,20 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
+ 		     struct nf_conntrack_tuple *tuple)
+ {
+ 	struct nf_conntrack_helper *helper;
++	struct nfnl_cthelper *nfcth;
+ 	int ret;
+ 
+ 	if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
+ 		return -EINVAL;
+ 
+-	helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
+-	if (helper == NULL)
++	nfcth = kzalloc(sizeof(*nfcth), GFP_KERNEL);
++	if (nfcth == NULL)
+ 		return -ENOMEM;
++	helper = &nfcth->helper;
+ 
+ 	ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
+ 	if (ret < 0)
+-		goto err;
++		goto err1;
+ 
+ 	strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
+ 	helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
+@@ -247,14 +257,100 @@ nfnl_cthelper_create(const struct nlattr * const tb[],
+ 
+ 	ret = nf_conntrack_helper_register(helper);
+ 	if (ret < 0)
+-		goto err;
++		goto err2;
+ 
++	list_add_tail(&nfcth->list, &nfnl_cthelper_list);
+ 	return 0;
+-err:
+-	kfree(helper);
++err2:
++	kfree(helper->expect_policy);
++err1:
++	kfree(nfcth);
+ 	return ret;
+ }
+ 
++static int
++nfnl_cthelper_update_policy_one(const struct nf_conntrack_expect_policy *policy,
++				struct nf_conntrack_expect_policy *new_policy,
++				const struct nlattr *attr)
++{
++	struct nlattr *tb[NFCTH_POLICY_MAX + 1];
++	int err;
++
++	err = nla_parse_nested(tb, NFCTH_POLICY_MAX, attr,
++			       nfnl_cthelper_expect_pol);
++	if (err < 0)
++		return err;
++
++	if (!tb[NFCTH_POLICY_NAME] ||
++	    !tb[NFCTH_POLICY_EXPECT_MAX] ||
++	    !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
++		return -EINVAL;
++
++	if (nla_strcmp(tb[NFCTH_POLICY_NAME], policy->name))
++		return -EBUSY;
++
++	new_policy->max_expected =
++		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
++	new_policy->timeout =
++		ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
++
++	return 0;
++}
++
++static int nfnl_cthelper_update_policy_all(struct nlattr *tb[],
++					   struct nf_conntrack_helper *helper)
++{
++	struct nf_conntrack_expect_policy new_policy[helper->expect_class_max + 1];
++	struct nf_conntrack_expect_policy *policy;
++	int i, err;
++
++	/* Check first that all policy attributes are well-formed, so we don't
++	 * leave things in inconsistent state on errors.
++	 */
++	for (i = 0; i < helper->expect_class_max + 1; i++) {
++
++		if (!tb[NFCTH_POLICY_SET + i])
++			return -EINVAL;
++
++		err = nfnl_cthelper_update_policy_one(&helper->expect_policy[i],
++						      &new_policy[i],
++						      tb[NFCTH_POLICY_SET + i]);
++		if (err < 0)
++			return err;
++	}
++	/* Now we can safely update them. */
++	for (i = 0; i < helper->expect_class_max + 1; i++) {
++		policy = (struct nf_conntrack_expect_policy *)
++				&helper->expect_policy[i];
++		policy->max_expected = new_policy->max_expected;
++		policy->timeout	= new_policy->timeout;
++	}
++
++	return 0;
++}
++
++static int nfnl_cthelper_update_policy(struct nf_conntrack_helper *helper,
++				       const struct nlattr *attr)
++{
++	struct nlattr *tb[NFCTH_POLICY_SET_MAX + 1];
++	unsigned int class_max;
++	int err;
++
++	err = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
++			       nfnl_cthelper_expect_policy_set);
++	if (err < 0)
++		return err;
++
++	if (!tb[NFCTH_POLICY_SET_NUM])
++		return -EINVAL;
++
++	class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
++	if (helper->expect_class_max + 1 != class_max)
++		return -EBUSY;
++
++	return nfnl_cthelper_update_policy_all(tb, helper);
++}
++
+ static int
+ nfnl_cthelper_update(const struct nlattr * const tb[],
+ 		     struct nf_conntrack_helper *helper)
+@@ -265,8 +361,7 @@ nfnl_cthelper_update(const struct nlattr * const tb[],
+ 		return -EBUSY;
+ 
+ 	if (tb[NFCTH_POLICY]) {
+-		ret = nfnl_cthelper_parse_expect_policy(helper,
+-							tb[NFCTH_POLICY]);
++		ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);
+ 		if (ret < 0)
+ 			return ret;
+ 	}
+@@ -295,7 +390,11 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
+ 	const char *helper_name;
+ 	struct nf_conntrack_helper *cur, *helper = NULL;
+ 	struct nf_conntrack_tuple tuple;
+-	int ret = 0, i;
++	struct nfnl_cthelper *nlcth;
++	int ret = 0;
++
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
+ 
+ 	if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
+ 		return -EINVAL;
+@@ -306,31 +405,22 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	rcu_read_lock();
+-	for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
+-		hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
++	list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
++		cur = &nlcth->helper;
+ 
+-			/* skip non-userspace conntrack helpers. */
+-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+-				continue;
++		if (strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
++			continue;
+ 
+-			if (strncmp(cur->name, helper_name,
+-					NF_CT_HELPER_NAME_LEN) != 0)
+-				continue;
++		if ((tuple.src.l3num != cur->tuple.src.l3num ||
++		     tuple.dst.protonum != cur->tuple.dst.protonum))
++			continue;
+ 
+-			if ((tuple.src.l3num != cur->tuple.src.l3num ||
+-			     tuple.dst.protonum != cur->tuple.dst.protonum))
+-				continue;
++		if (nlh->nlmsg_flags & NLM_F_EXCL)
++			return -EEXIST;
+ 
+-			if (nlh->nlmsg_flags & NLM_F_EXCL) {
+-				ret = -EEXIST;
+-				goto err;
+-			}
+-			helper = cur;
+-			break;
+-		}
++		helper = cur;
++		break;
+ 	}
+-	rcu_read_unlock();
+ 
+ 	if (helper == NULL)
+ 		ret = nfnl_cthelper_create(tb, &tuple);
+@@ -338,9 +428,6 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
+ 		ret = nfnl_cthelper_update(tb, helper);
+ 
+ 	return ret;
+-err:
+-	rcu_read_unlock();
+-	return ret;
+ }
+ 
+ static int
+@@ -504,13 +591,17 @@ static int
+ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
+ 		  const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+ {
+-	int ret = -ENOENT, i;
++	int ret = -ENOENT;
+ 	struct nf_conntrack_helper *cur;
+ 	struct sk_buff *skb2;
+ 	char *helper_name = NULL;
+ 	struct nf_conntrack_tuple tuple;
++	struct nfnl_cthelper *nlcth;
+ 	bool tuple_set = false;
+ 
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++
+ 	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ 		struct netlink_dump_control c = {
+ 			.dump = nfnl_cthelper_dump_table,
+@@ -529,45 +620,39 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
+ 		tuple_set = true;
+ 	}
+ 
+-	for (i = 0; i < nf_ct_helper_hsize; i++) {
+-		hlist_for_each_entry_rcu(cur, &nf_ct_helper_hash[i], hnode) {
+-
+-			/* skip non-userspace conntrack helpers. */
+-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+-				continue;
++	list_for_each_entry(nlcth, &nfnl_cthelper_list, list) {
++		cur = &nlcth->helper;
++		if (helper_name &&
++		    strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
++			continue;
+ 
+-			if (helper_name && strncmp(cur->name, helper_name,
+-						NF_CT_HELPER_NAME_LEN) != 0) {
+-				continue;
+-			}
+-			if (tuple_set &&
+-			    (tuple.src.l3num != cur->tuple.src.l3num ||
+-			     tuple.dst.protonum != cur->tuple.dst.protonum))
+-				continue;
++		if (tuple_set &&
++		    (tuple.src.l3num != cur->tuple.src.l3num ||
++		     tuple.dst.protonum != cur->tuple.dst.protonum))
++			continue;
+ 
+-			skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+-			if (skb2 == NULL) {
+-				ret = -ENOMEM;
+-				break;
+-			}
++		skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
++		if (skb2 == NULL) {
++			ret = -ENOMEM;
++			break;
++		}
+ 
+-			ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
+-						nlh->nlmsg_seq,
+-						NFNL_MSG_TYPE(nlh->nlmsg_type),
+-						NFNL_MSG_CTHELPER_NEW, cur);
+-			if (ret <= 0) {
+-				kfree_skb(skb2);
+-				break;
+-			}
++		ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).portid,
++					      nlh->nlmsg_seq,
++					      NFNL_MSG_TYPE(nlh->nlmsg_type),
++					      NFNL_MSG_CTHELPER_NEW, cur);
++		if (ret <= 0) {
++			kfree_skb(skb2);
++			break;
++		}
+ 
+-			ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
+-						MSG_DONTWAIT);
+-			if (ret > 0)
+-				ret = 0;
++		ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).portid,
++				      MSG_DONTWAIT);
++		if (ret > 0)
++			ret = 0;
+ 
+-			/* this avoids a loop in nfnetlink. */
+-			return ret == -EAGAIN ? -ENOBUFS : ret;
+-		}
++		/* this avoids a loop in nfnetlink. */
++		return ret == -EAGAIN ? -ENOBUFS : ret;
+ 	}
+ 	return ret;
+ }
+@@ -578,10 +663,13 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
+ {
+ 	char *helper_name = NULL;
+ 	struct nf_conntrack_helper *cur;
+-	struct hlist_node *tmp;
+ 	struct nf_conntrack_tuple tuple;
+ 	bool tuple_set = false, found = false;
+-	int i, j = 0, ret;
++	struct nfnl_cthelper *nlcth, *n;
++	int j = 0, ret;
++
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
+ 
+ 	if (tb[NFCTH_NAME])
+ 		helper_name = nla_data(tb[NFCTH_NAME]);
+@@ -594,28 +682,27 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
+ 		tuple_set = true;
+ 	}
+ 
+-	for (i = 0; i < nf_ct_helper_hsize; i++) {
+-		hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
+-								hnode) {
+-			/* skip non-userspace conntrack helpers. */
+-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+-				continue;
++	list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
++		cur = &nlcth->helper;
++		j++;
+ 
+-			j++;
++		if (helper_name &&
++		    strncmp(cur->name, helper_name, NF_CT_HELPER_NAME_LEN))
++			continue;
+ 
+-			if (helper_name && strncmp(cur->name, helper_name,
+-						NF_CT_HELPER_NAME_LEN) != 0) {
+-				continue;
+-			}
+-			if (tuple_set &&
+-			    (tuple.src.l3num != cur->tuple.src.l3num ||
+-			     tuple.dst.protonum != cur->tuple.dst.protonum))
+-				continue;
++		if (tuple_set &&
++		    (tuple.src.l3num != cur->tuple.src.l3num ||
++		     tuple.dst.protonum != cur->tuple.dst.protonum))
++			continue;
+ 
+-			found = true;
+-			nf_conntrack_helper_unregister(cur);
+-		}
++		found = true;
++		nf_conntrack_helper_unregister(cur);
++		kfree(cur->expect_policy);
++
++		list_del(&nlcth->list);
++		kfree(nlcth);
+ 	}
++
+ 	/* Make sure we return success if we flush and there is no helpers */
+ 	return (found || j == 0) ? 0 : -ENOENT;
+ }
+@@ -664,20 +751,16 @@ err_out:
+ static void __exit nfnl_cthelper_exit(void)
+ {
+ 	struct nf_conntrack_helper *cur;
+-	struct hlist_node *tmp;
+-	int i;
++	struct nfnl_cthelper *nlcth, *n;
+ 
+ 	nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
+ 
+-	for (i=0; i<nf_ct_helper_hsize; i++) {
+-		hlist_for_each_entry_safe(cur, tmp, &nf_ct_helper_hash[i],
+-									hnode) {
+-			/* skip non-userspace conntrack helpers. */
+-			if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+-				continue;
++	list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
++		cur = &nlcth->helper;
+ 
+-			nf_conntrack_helper_unregister(cur);
+-		}
++		nf_conntrack_helper_unregister(cur);
++		kfree(cur->expect_policy);
++		kfree(nlcth);
+ 	}
+ }
+ 
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index 703fc9ba6f20..f4fcd9441561 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -38,8 +38,6 @@ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+ MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
+ 
+-#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
+-
+ struct compat_delta {
+ 	unsigned int offset; /* offset in kernel */
+ 	int delta; /* delta in 32bit user land */
+@@ -211,6 +209,9 @@ xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
+ {
+ 	struct xt_match *match;
+ 
++	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
++		return ERR_PTR(-EINVAL);
++
+ 	match = xt_find_match(nfproto, name, revision);
+ 	if (IS_ERR(match)) {
+ 		request_module("%st_%s", xt_prefix[nfproto], name);
+@@ -253,6 +254,9 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
+ {
+ 	struct xt_target *target;
+ 
++	if (strnlen(name, XT_EXTENSION_MAXNAMELEN) == XT_EXTENSION_MAXNAMELEN)
++		return ERR_PTR(-EINVAL);
++
+ 	target = xt_find_target(af, name, revision);
+ 	if (IS_ERR(target)) {
+ 		request_module("%st_%s", xt_prefix[af], name);
+@@ -951,7 +955,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
+ 	int cpu;
+ 
+ 	/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
+-	if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
++	if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
+ 		return NULL;
+ 
+ 	newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
+diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
+index 604df6fae6fc..0be96f8475f7 100644
+--- a/net/netfilter/xt_RATEEST.c
++++ b/net/netfilter/xt_RATEEST.c
+@@ -40,23 +40,31 @@ static void xt_rateest_hash_insert(struct xt_rateest *est)
+ 	hlist_add_head(&est->list, &rateest_hash[h]);
+ }
+ 
+-struct xt_rateest *xt_rateest_lookup(const char *name)
++static struct xt_rateest *__xt_rateest_lookup(const char *name)
+ {
+ 	struct xt_rateest *est;
+ 	unsigned int h;
+ 
+ 	h = xt_rateest_hash(name);
+-	mutex_lock(&xt_rateest_mutex);
+ 	hlist_for_each_entry(est, &rateest_hash[h], list) {
+ 		if (strcmp(est->name, name) == 0) {
+ 			est->refcnt++;
+-			mutex_unlock(&xt_rateest_mutex);
+ 			return est;
+ 		}
+ 	}
+-	mutex_unlock(&xt_rateest_mutex);
++
+ 	return NULL;
+ }
++
++struct xt_rateest *xt_rateest_lookup(const char *name)
++{
++	struct xt_rateest *est;
++
++	mutex_lock(&xt_rateest_mutex);
++	est = __xt_rateest_lookup(name);
++	mutex_unlock(&xt_rateest_mutex);
++	return est;
++}
+ EXPORT_SYMBOL_GPL(xt_rateest_lookup);
+ 
+ void xt_rateest_put(struct xt_rateest *est)
+@@ -104,8 +112,10 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
+ 		rnd_inited = true;
+ 	}
+ 
+-	est = xt_rateest_lookup(info->name);
++	mutex_lock(&xt_rateest_mutex);
++	est = __xt_rateest_lookup(info->name);
+ 	if (est) {
++		mutex_unlock(&xt_rateest_mutex);
+ 		/*
+ 		 * If estimator parameters are specified, they must match the
+ 		 * existing estimator.
+@@ -143,11 +153,13 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
+ 
+ 	info->est = est;
+ 	xt_rateest_hash_insert(est);
++	mutex_unlock(&xt_rateest_mutex);
+ 	return 0;
+ 
+ err2:
+ 	kfree(est);
+ err1:
++	mutex_unlock(&xt_rateest_mutex);
+ 	return ret;
+ }
+ 
+diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
+index 0778855ea5e7..20f7bd64ad40 100644
+--- a/net/netfilter/xt_osf.c
++++ b/net/netfilter/xt_osf.c
+@@ -19,6 +19,7 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ 
++#include <linux/capability.h>
+ #include <linux/if.h>
+ #include <linux/inetdevice.h>
+ #include <linux/ip.h>
+@@ -69,6 +70,9 @@ static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb,
+ 	struct xt_osf_finger *kf = NULL, *sf;
+ 	int err = 0;
+ 
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++
+ 	if (!osf_attrs[OSF_ATTR_FINGER])
+ 		return -EINVAL;
+ 
+@@ -112,6 +116,9 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
+ 	struct xt_osf_finger *sf;
+ 	int err = -ENOENT;
+ 
++	if (!capable(CAP_NET_ADMIN))
++		return -EPERM;
++
+ 	if (!osf_attrs[OSF_ATTR_FINGER])
+ 		return -EINVAL;
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index a5815be7c81c..66c340bc0553 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -94,6 +94,44 @@ EXPORT_SYMBOL_GPL(nl_table);
+ 
+ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
+ 
++static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS];
++
++static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = {
++	"nlk_cb_mutex-ROUTE",
++	"nlk_cb_mutex-1",
++	"nlk_cb_mutex-USERSOCK",
++	"nlk_cb_mutex-FIREWALL",
++	"nlk_cb_mutex-SOCK_DIAG",
++	"nlk_cb_mutex-NFLOG",
++	"nlk_cb_mutex-XFRM",
++	"nlk_cb_mutex-SELINUX",
++	"nlk_cb_mutex-ISCSI",
++	"nlk_cb_mutex-AUDIT",
++	"nlk_cb_mutex-FIB_LOOKUP",
++	"nlk_cb_mutex-CONNECTOR",
++	"nlk_cb_mutex-NETFILTER",
++	"nlk_cb_mutex-IP6_FW",
++	"nlk_cb_mutex-DNRTMSG",
++	"nlk_cb_mutex-KOBJECT_UEVENT",
++	"nlk_cb_mutex-GENERIC",
++	"nlk_cb_mutex-17",
++	"nlk_cb_mutex-SCSITRANSPORT",
++	"nlk_cb_mutex-ECRYPTFS",
++	"nlk_cb_mutex-RDMA",
++	"nlk_cb_mutex-CRYPTO",
++	"nlk_cb_mutex-SMC",
++	"nlk_cb_mutex-23",
++	"nlk_cb_mutex-24",
++	"nlk_cb_mutex-25",
++	"nlk_cb_mutex-26",
++	"nlk_cb_mutex-27",
++	"nlk_cb_mutex-28",
++	"nlk_cb_mutex-29",
++	"nlk_cb_mutex-30",
++	"nlk_cb_mutex-31",
++	"nlk_cb_mutex-MAX_LINKS"
++};
++
+ static int netlink_dump(struct sock *sk);
+ static void netlink_skb_destructor(struct sk_buff *skb);
+ 
+@@ -221,6 +259,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
+ 	struct sock *sk = skb->sk;
+ 	int ret = -ENOMEM;
+ 
++	if (!net_eq(dev_net(dev), sock_net(sk)))
++		return 0;
++
+ 	dev_hold(dev);
+ 
+ 	if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
+@@ -1177,6 +1218,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
+ 	} else {
+ 		nlk->cb_mutex = &nlk->cb_def_mutex;
+ 		mutex_init(nlk->cb_mutex);
++		lockdep_set_class_and_name(nlk->cb_mutex,
++					   nlk_cb_mutex_keys + protocol,
++					   nlk_cb_mutex_key_strings[protocol]);
+ 	}
+ 	init_waitqueue_head(&nlk->wait);
+ #ifdef CONFIG_NETLINK_MMAP
+diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
+index c691b1a1eee0..a2601b0c4b0f 100644
+--- a/net/openvswitch/flow_netlink.c
++++ b/net/openvswitch/flow_netlink.c
+@@ -1531,14 +1531,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
+ 
+ #define MAX_ACTIONS_BUFSIZE	(32 * 1024)
+ 
+-static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
++static struct sw_flow_actions *nla_alloc_flow_actions(int size)
+ {
+ 	struct sw_flow_actions *sfa;
+ 
+-	if (size > MAX_ACTIONS_BUFSIZE) {
+-		OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
+-		return ERR_PTR(-EINVAL);
+-	}
++	WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
+ 
+ 	sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
+ 	if (!sfa)
+@@ -1571,12 +1568,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
+ 	new_acts_size = ksize(*sfa) * 2;
+ 
+ 	if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
+-		if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
++		if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
++			OVS_NLERR(log, "Flow action size exceeds max %u",
++				  MAX_ACTIONS_BUFSIZE);
+ 			return ERR_PTR(-EMSGSIZE);
++		}
+ 		new_acts_size = MAX_ACTIONS_BUFSIZE;
+ 	}
+ 
+-	acts = nla_alloc_flow_actions(new_acts_size, log);
++	acts = nla_alloc_flow_actions(new_acts_size);
+ 	if (IS_ERR(acts))
+ 		return (void *)acts;
+ 
+@@ -2170,7 +2170,7 @@ int ovs_nla_copy_actions(const struct nlattr *attr,
+ {
+ 	int err;
+ 
+-	*sfa = nla_alloc_flow_actions(nla_len(attr), log);
++	*sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
+ 	if (IS_ERR(*sfa))
+ 		return PTR_ERR(*sfa);
+ 
+diff --git a/net/rds/rdma.c b/net/rds/rdma.c
+index 612c3050d514..b1ec96bca937 100644
+--- a/net/rds/rdma.c
++++ b/net/rds/rdma.c
+@@ -516,6 +516,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args)
+ 
+ 	local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
+ 
++	if (args->nr_local == 0)
++		return -EINVAL;
++
+ 	/* figure out the number of pages in the vector */
+ 	for (i = 0; i < args->nr_local; i++) {
+ 		if (copy_from_user(&vec, &local_vec[i],
+@@ -863,6 +866,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
+ err:
+ 	if (page)
+ 		put_page(page);
++	rm->atomic.op_active = 0;
+ 	kfree(rm->atomic.op_notifier);
+ 
+ 	return ret;
+diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
+index 3f6437db9b0f..ec11aced121d 100644
+--- a/net/sched/sch_choke.c
++++ b/net/sched/sch_choke.c
+@@ -431,6 +431,9 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	ctl = nla_data(tb[TCA_CHOKE_PARMS]);
+ 
++	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
++		return -EINVAL;
++
+ 	if (ctl->limit > CHOKE_MAX_QUEUE)
+ 		return -EINVAL;
+ 
+diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
+index 634529e0ce6b..5a476126a8fb 100644
+--- a/net/sched/sch_gred.c
++++ b/net/sched/sch_gred.c
+@@ -388,6 +388,9 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
+ 	struct gred_sched *table = qdisc_priv(sch);
+ 	struct gred_sched_data *q = table->tab[dp];
+ 
++	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
++		return -EINVAL;
++
+ 	if (!q) {
+ 		table->tab[dp] = q = *prealloc;
+ 		*prealloc = NULL;
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index 8c0508c0e287..0505b8408c8b 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -199,6 +199,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
+ 	max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
+ 
+ 	ctl = nla_data(tb[TCA_RED_PARMS]);
++	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
++		return -EINVAL;
+ 
+ 	if (ctl->limit > 0) {
+ 		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 78d0eaf5de61..0dd1f2b2eb10 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -656,6 +656,9 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+ 	if (ctl->divisor &&
+ 	    (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
+ 		return -EINVAL;
++	if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
++					ctl_v1->Wlog))
++		return -EINVAL;
+ 	if (ctl_v1 && ctl_v1->qth_min) {
+ 		p = kmalloc(sizeof(*p), GFP_KERNEL);
+ 		if (!p)
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 946d1c28f93f..c44e3d208804 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -83,7 +83,7 @@
+ static int sctp_writeable(struct sock *sk);
+ static void sctp_wfree(struct sk_buff *skb);
+ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+-				size_t msg_len, struct sock **orig_sk);
++				size_t msg_len);
+ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
+ static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
+ static int sctp_wait_for_accept(struct sock *sk, long timeo);
+@@ -332,16 +332,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
+ 	if (len < sizeof (struct sockaddr))
+ 		return NULL;
+ 
++	if (!opt->pf->af_supported(addr->sa.sa_family, opt))
++		return NULL;
++
+ 	/* V4 mapped address are really of AF_INET family */
+ 	if (addr->sa.sa_family == AF_INET6 &&
+-	    ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
+-		if (!opt->pf->af_supported(AF_INET, opt))
+-			return NULL;
+-	} else {
+-		/* Does this PF support this AF? */
+-		if (!opt->pf->af_supported(addr->sa.sa_family, opt))
+-			return NULL;
+-	}
++	    ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
++	    !opt->pf->af_supported(AF_INET, opt))
++		return NULL;
+ 
+ 	/* If we get this far, af is valid. */
+ 	af = sctp_get_af_specific(addr->sa.sa_family);
+@@ -1948,7 +1946,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
+ 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ 	if (!sctp_wspace(asoc)) {
+ 		/* sk can be changed by peel off when waiting for buf. */
+-		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
++		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
+ 		if (err) {
+ 			if (err == -ESRCH) {
+ 				/* asoc is already dead. */
+@@ -4163,7 +4161,7 @@ static int sctp_init_sock(struct sock *sk)
+ 	SCTP_DBG_OBJCNT_INC(sock);
+ 
+ 	local_bh_disable();
+-	percpu_counter_inc(&sctp_sockets_allocated);
++	sk_sockets_allocated_inc(sk);
+ 	sock_prot_inuse_add(net, sk->sk_prot, 1);
+ 
+ 	/* Nothing can fail after this block, otherwise
+@@ -4207,7 +4205,7 @@ static void sctp_destroy_sock(struct sock *sk)
+ 	}
+ 	sctp_endpoint_free(sp->ep);
+ 	local_bh_disable();
+-	percpu_counter_dec(&sctp_sockets_allocated);
++	sk_sockets_allocated_dec(sk);
+ 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
+ 	local_bh_enable();
+ }
+@@ -6979,12 +6977,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
+ 
+ /* Helper function to wait for space in the sndbuf.  */
+ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+-				size_t msg_len, struct sock **orig_sk)
++				size_t msg_len)
+ {
+ 	struct sock *sk = asoc->base.sk;
+-	int err = 0;
+ 	long current_timeo = *timeo_p;
+ 	DEFINE_WAIT(wait);
++	int err = 0;
+ 
+ 	pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
+ 		 *timeo_p, msg_len);
+@@ -7014,17 +7012,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 		current_timeo = schedule_timeout(current_timeo);
+ 		BUG_ON(sk != asoc->base.sk);
+ 		lock_sock(sk);
+-		if (sk != asoc->base.sk) {
+-			release_sock(sk);
+-			sk = asoc->base.sk;
+-			lock_sock(sk);
+-		}
++		if (sk != asoc->base.sk)
++			goto do_error;
+ 
+ 		*timeo_p = current_timeo;
+ 	}
+ 
+ out:
+-	*orig_sk = sk;
+ 	finish_wait(&asoc->wait, &wait);
+ 
+ 	/* Release the association's refcnt.  */
+diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+index 2410d557ae39..89731c9023f0 100644
+--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+@@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
+ 			goto out_free_groups;
+ 		GROUP_AT(creds->cr_group_info, i) = kgid;
+ 	}
++	groups_sort(creds->cr_group_info);
+ 
+ 	return 0;
+ out_free_groups:
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 033fec307528..036bbf2b44c1 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd,
+ 				goto out;
+ 			GROUP_AT(rsci.cred.cr_group_info, i) = kgid;
+ 		}
++		groups_sort(rsci.cred.cr_group_info);
+ 
+ 		/* mech name */
+ 		len = qword_get(&mesg, buf, mlen);
+diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
+index 621ca7b4a155..98db1715cb17 100644
+--- a/net/sunrpc/svcauth_unix.c
++++ b/net/sunrpc/svcauth_unix.c
+@@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd,
+ 		GROUP_AT(ug.gi, i) = kgid;
+ 	}
+ 
++	groups_sort(ug.gi);
+ 	ugp = unix_gid_lookup(cd, uid);
+ 	if (ugp) {
+ 		struct cache_head *ch;
+@@ -827,6 +828,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
+ 		kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv));
+ 		GROUP_AT(cred->cr_group_info, i) = kgid;
+ 	}
++	groups_sort(cred->cr_group_info);
+ 	if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
+ 		*authp = rpc_autherr_badverf;
+ 		return SVC_DENIED;
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 5e3ad598d3f5..14972988d29d 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2189,6 +2189,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ 	case -ECONNREFUSED:
+ 	case -ECONNRESET:
+ 	case -ENETUNREACH:
++	case -EHOSTUNREACH:
+ 	case -EADDRINUSE:
+ 	case -ENOBUFS:
+ 		/* retry with existing socket, after a delay */
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 71e9b84847f3..a0e465845735 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -390,6 +390,8 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
+ 		if (rv)
+ 			goto use_default_name;
+ 	} else {
++		int rv;
++
+ use_default_name:
+ 		/* NOTE:  This is *probably* safe w/out holding rtnl because of
+ 		 * the restrictions on phy names.  Probably this call could
+@@ -397,7 +399,11 @@ use_default_name:
+ 		 * phyX.  But, might should add some locking and check return
+ 		 * value, and use a different name if this one exists?
+ 		 */
+-		dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
++		rv = dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
++		if (rv < 0) {
++			kfree(rdev);
++			return NULL;
++		}
+ 	}
+ 
+ 	INIT_LIST_HEAD(&rdev->wdev_list);
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index acb4ccf448ba..f96aa76865de 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -15,6 +15,7 @@
+ #include <linux/nl80211.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/netlink.h>
++#include <linux/nospec.h>
+ #include <linux/etherdevice.h>
+ #include <net/net_namespace.h>
+ #include <net/genetlink.h>
+@@ -1874,20 +1875,22 @@ static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = {
+ static int parse_txq_params(struct nlattr *tb[],
+ 			    struct ieee80211_txq_params *txq_params)
+ {
++	u8 ac;
++
+ 	if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] ||
+ 	    !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] ||
+ 	    !tb[NL80211_TXQ_ATTR_AIFS])
+ 		return -EINVAL;
+ 
+-	txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]);
++	ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]);
+ 	txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]);
+ 	txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]);
+ 	txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]);
+ 	txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]);
+ 
+-	if (txq_params->ac >= NL80211_NUM_ACS)
++	if (ac >= NL80211_NUM_ACS)
+ 		return -EINVAL;
+-
++	txq_params->ac = array_index_nospec(ac, NL80211_NUM_ACS);
+ 	return 0;
+ }
+ 
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 30593cadd428..84541b35629a 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1353,11 +1353,14 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
+ 
+ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
+ {
++	u16 prev_family;
+ 	int i;
+ 
+ 	if (nr > XFRM_MAX_DEPTH)
+ 		return -EINVAL;
+ 
++	prev_family = family;
++
+ 	for (i = 0; i < nr; i++) {
+ 		/* We never validated the ut->family value, so many
+ 		 * applications simply leave it at zero.  The check was
+@@ -1369,6 +1372,12 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
+ 		if (!ut[i].family)
+ 			ut[i].family = family;
+ 
++		if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
++		    (ut[i].family != prev_family))
++			return -EINVAL;
++
++		prev_family = ut[i].family;
++
+ 		switch (ut[i].family) {
+ 		case AF_INET:
+ 			break;
+@@ -1379,6 +1388,21 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
+ 		default:
+ 			return -EINVAL;
+ 		}
++
++		switch (ut[i].id.proto) {
++		case IPPROTO_AH:
++		case IPPROTO_ESP:
++		case IPPROTO_COMP:
++#if IS_ENABLED(CONFIG_IPV6)
++		case IPPROTO_ROUTING:
++		case IPPROTO_DSTOPTS:
++#endif
++		case IPSEC_PROTO_ANY:
++			break;
++		default:
++			return -EINVAL;
++		}
++
+ 	}
+ 
+ 	return 0;
+diff --git a/scripts/Makefile.build b/scripts/Makefile.build
+index 01df30af4d4a..18209917e379 100644
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -158,7 +158,8 @@ cmd_cc_i_c       = $(CPP) $(c_flags)   -o $@ $<
+ $(obj)/%.i: $(src)/%.c FORCE
+ 	$(call if_changed_dep,cc_i_c)
+ 
+-cmd_gensymtypes =                                                           \
++# These mirror gensymtypes_S and co below, keep them in synch.
++cmd_gensymtypes_c =                                                         \
+     $(CPP) -D__GENKSYMS__ $(c_flags) $< |                                   \
+     $(GENKSYMS) $(if $(1), -T $(2))                                         \
+      $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
+@@ -168,7 +169,7 @@ cmd_gensymtypes =                                                           \
+ quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@
+ cmd_cc_symtypes_c =                                                         \
+     set -e;                                                                 \
+-    $(call cmd_gensymtypes,true,$@) >/dev/null;                             \
++    $(call cmd_gensymtypes_c,true,$@) >/dev/null;                           \
+     test -s $@ || rm -f $@
+ 
+ $(obj)/%.symtypes : $(src)/%.c FORCE
+@@ -197,9 +198,10 @@ else
+ #   the actual value of the checksum generated by genksyms
+ 
+ cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $<
+-cmd_modversions =								\
++
++cmd_modversions_c =								\
+ 	if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then		\
+-		$(call cmd_gensymtypes,$(KBUILD_SYMTYPES),$(@:.o=.symtypes))	\
++		$(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes))	\
+ 		    > $(@D)/.tmp_$(@F:.o=.ver);					\
+ 										\
+ 		$(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) 			\
+@@ -244,7 +246,7 @@ endif
+ define rule_cc_o_c
+ 	$(call echo-cmd,checksrc) $(cmd_checksrc)			  \
+ 	$(call echo-cmd,cc_o_c) $(cmd_cc_o_c);				  \
+-	$(cmd_modversions)						  \
++	$(cmd_modversions_c)						  \
+ 	$(call echo-cmd,record_mcount)					  \
+ 	$(cmd_record_mcount)						  \
+ 	scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' >    \
+@@ -253,6 +255,15 @@ define rule_cc_o_c
+ 	mv -f $(dot-target).tmp $(dot-target).cmd
+ endef
+ 
++define rule_as_o_S
++       $(call echo-cmd,as_o_S) $(cmd_as_o_S);                            \
++       scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,as_o_S)' >    \
++                                                     $(dot-target).tmp;  \
++       $(cmd_modversions_S)						 \
++       rm -f $(depfile);                                                 \
++       mv -f $(dot-target).tmp $(dot-target).cmd
++endef
++
+ # Built-in and composite module parts
+ $(obj)/%.o: $(src)/%.c $(recordmcount_source) FORCE
+ 	$(call cmd,force_checksrc)
+@@ -281,6 +292,38 @@ modkern_aflags := $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL)
+ $(real-objs-m)      : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
+ $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
+ 
++# .S file exports must have their C prototypes defined in asm/asm-prototypes.h
++# or a file that it includes, in order to get versioned symbols. We build a
++# dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from
++# the .S file (with trailing ';'), and run genksyms on that, to extract vers.
++#
++# This is convoluted. The .S file must first be preprocessed to run guards and
++# expand names, then the resulting exports must be constructed into plain
++# EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed
++# to make the genksyms input.
++#
++# These mirror gensymtypes_c and co above, keep them in synch.
++cmd_gensymtypes_S =                                                         \
++    (echo "\#include <linux/kernel.h>" ;                                    \
++     echo "\#include <asm/asm-prototypes.h>" ;                              \
++    $(CPP) $(a_flags) $< |                                                  \
++      grep "\<___EXPORT_SYMBOL\>" |                                          \
++      sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \
++    $(CPP) -D__GENKSYMS__ $(c_flags) -xc - |                                \
++    $(GENKSYMS) $(if $(1), -T $(2))                                         \
++     $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX))             \
++     $(if $(KBUILD_PRESERVE),-p)                                            \
++     -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null))
++
++quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@
++cmd_cc_symtypes_S =                                                         \
++    set -e;                                                                 \
++    $(call cmd_gensymtypes_S,true,$@) >/dev/null;                           \
++    test -s $@ || rm -f $@
++
++$(obj)/%.symtypes : $(src)/%.S FORCE
++	$(call cmd,cc_symtypes_S)
++
+ quiet_cmd_as_s_S = CPP $(quiet_modtag) $@
+ cmd_as_s_S       = $(CPP) $(a_flags)   -o $@ $<
+ 
+@@ -288,10 +331,40 @@ $(obj)/%.s: $(src)/%.S FORCE
+ 	$(call if_changed_dep,as_s_S)
+ 
+ quiet_cmd_as_o_S = AS $(quiet_modtag)  $@
+-cmd_as_o_S       = $(CC) $(a_flags) -c -o $@ $<
++
++ifndef CONFIG_MODVERSIONS
++cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
++
++else
++
++ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h)
++
++ifeq ($(ASM_PROTOTYPES),)
++cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
++
++else
++
++# versioning matches the C process described above, with difference that
++# we parse asm-prototypes.h C header to get function definitions.
++
++cmd_as_o_S = $(CC) $(a_flags) -c -o $(@D)/.tmp_$(@F) $<
++
++cmd_modversions_S =								\
++	if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then		\
++		$(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes))	\
++		    > $(@D)/.tmp_$(@F:.o=.ver);					\
++										\
++		$(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) 			\
++			-T $(@D)/.tmp_$(@F:.o=.ver);				\
++		rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver);		\
++	else									\
++		mv -f $(@D)/.tmp_$(@F) $@;					\
++	fi;
++endif
++endif
+ 
+ $(obj)/%.o: $(src)/%.S FORCE
+-	$(call if_changed_dep,as_o_S)
++	$(call if_changed_rule,as_o_S)
+ 
+ targets += $(real-objs-y) $(real-objs-m) $(lib-y)
+ targets += $(extra-y) $(MAKECMDGOALS) $(always)
+diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
+index 88632df4381b..dafaf96e0a34 100644
+--- a/scripts/genksyms/genksyms.c
++++ b/scripts/genksyms/genksyms.c
+@@ -423,13 +423,15 @@ static struct string_list *read_node(FILE *f)
+ 	struct string_list node = {
+ 		.string = buffer,
+ 		.tag = SYM_NORMAL };
+-	int c;
++	int c, in_string = 0;
+ 
+ 	while ((c = fgetc(f)) != EOF) {
+-		if (c == ' ') {
++		if (!in_string && c == ' ') {
+ 			if (node.string == buffer)
+ 				continue;
+ 			break;
++		} else if (c == '"') {
++			in_string = !in_string;
+ 		} else if (c == '\n') {
+ 			if (node.string == buffer)
+ 				return NULL;
+diff --git a/scripts/kernel-doc b/scripts/kernel-doc
+index 9922e66883a5..f936d9e5db91 100755
+--- a/scripts/kernel-doc
++++ b/scripts/kernel-doc
+@@ -2616,4 +2616,4 @@ if ($verbose && $warnings) {
+   print STDERR "$warnings warnings\n";
+ }
+ 
+-exit($errors);
++exit($output_mode eq "none" ? 0 : $errors);
+diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
+index 91ee1b2e0f9a..a9f02fe15ce3 100644
+--- a/scripts/mod/modpost.c
++++ b/scripts/mod/modpost.c
+@@ -593,7 +593,8 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
+ 		if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
+ 		    strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
+ 		    strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
+-		    strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
++		    strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0 ||
++		    strcmp(symname, ".TOC.") == 0)
+ 			return 1;
+ 	/* Do not ignore this symbol */
+ 	return 0;
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index 89d5695c51cd..20251ee5c491 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -141,23 +141,22 @@ static int valid_ecryptfs_desc(const char *ecryptfs_desc)
+  */
+ static int valid_master_desc(const char *new_desc, const char *orig_desc)
+ {
+-	if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) {
+-		if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN)
+-			goto out;
+-		if (orig_desc)
+-			if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN))
+-				goto out;
+-	} else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) {
+-		if (strlen(new_desc) == KEY_USER_PREFIX_LEN)
+-			goto out;
+-		if (orig_desc)
+-			if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN))
+-				goto out;
+-	} else
+-		goto out;
++	int prefix_len;
++
++	if (!strncmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN))
++		prefix_len = KEY_TRUSTED_PREFIX_LEN;
++	else if (!strncmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN))
++		prefix_len = KEY_USER_PREFIX_LEN;
++	else
++		return -EINVAL;
++
++	if (!new_desc[prefix_len])
++		return -EINVAL;
++
++	if (orig_desc && strncmp(new_desc, orig_desc, prefix_len))
++		return -EINVAL;
++
+ 	return 0;
+-out:
+-	return -EINVAL;
+ }
+ 
+ /*
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 9e2d82070915..31d1d2ebd6f2 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -823,6 +823,9 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
+ 	int index;
+ 	int rc;
+ 
++	if (!ss_initialized)
++		return 0;
++
+ 	read_lock(&policy_rwlock);
+ 
+ 	rc = -EINVAL;
+@@ -1236,27 +1239,25 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
+ 	if (!scontext_len)
+ 		return -EINVAL;
+ 
++	/* Copy the string to allow changes and ensure a NUL terminator */
++	scontext2 = kmemdup_nul(scontext, scontext_len, gfp_flags);
++	if (!scontext2)
++		return -ENOMEM;
++
+ 	if (!ss_initialized) {
+ 		int i;
+ 
+ 		for (i = 1; i < SECINITSID_NUM; i++) {
+-			if (!strcmp(initial_sid_to_string[i], scontext)) {
++			if (!strcmp(initial_sid_to_string[i], scontext2)) {
+ 				*sid = i;
+-				return 0;
++				goto out;
+ 			}
+ 		}
+ 		*sid = SECINITSID_KERNEL;
+-		return 0;
++		goto out;
+ 	}
+ 	*sid = SECSID_NULL;
+ 
+-	/* Copy the string so that we can modify the copy as we parse it. */
+-	scontext2 = kmalloc(scontext_len + 1, gfp_flags);
+-	if (!scontext2)
+-		return -ENOMEM;
+-	memcpy(scontext2, scontext, scontext_len);
+-	scontext2[scontext_len] = 0;
+-
+ 	if (force) {
+ 		/* Save another copy for storing in uninterpreted form */
+ 		rc = -ENOMEM;
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 33e72c809e50..494b7b533366 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -465,7 +465,6 @@ static int snd_pcm_hw_param_near(struct snd_pcm_substream *pcm,
+ 		v = snd_pcm_hw_param_last(pcm, params, var, dir);
+ 	else
+ 		v = snd_pcm_hw_param_first(pcm, params, var, dir);
+-	snd_BUG_ON(v < 0);
+ 	return v;
+ }
+ 
+@@ -1370,8 +1369,11 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
+ 
+ 	if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
+ 		return tmp;
+-	mutex_lock(&runtime->oss.params_lock);
+ 	while (bytes > 0) {
++		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
++			tmp = -ERESTARTSYS;
++			break;
++		}
+ 		if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
+ 			tmp = bytes;
+ 			if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
+@@ -1415,14 +1417,18 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
+ 			xfer += tmp;
+ 			if ((substream->f_flags & O_NONBLOCK) != 0 &&
+ 			    tmp != runtime->oss.period_bytes)
+-				break;
++				tmp = -EAGAIN;
+ 		}
+-	}
+-	mutex_unlock(&runtime->oss.params_lock);
+-	return xfer;
+-
+  err:
+-	mutex_unlock(&runtime->oss.params_lock);
++		mutex_unlock(&runtime->oss.params_lock);
++		if (tmp < 0)
++			break;
++		if (signal_pending(current)) {
++			tmp = -ERESTARTSYS;
++			break;
++		}
++		tmp = 0;
++	}
+ 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
+ }
+ 
+@@ -1470,8 +1476,11 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
+ 
+ 	if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
+ 		return tmp;
+-	mutex_lock(&runtime->oss.params_lock);
+ 	while (bytes > 0) {
++		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
++			tmp = -ERESTARTSYS;
++			break;
++		}
+ 		if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
+ 			if (runtime->oss.buffer_used == 0) {
+ 				tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
+@@ -1502,12 +1511,16 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
+ 			bytes -= tmp;
+ 			xfer += tmp;
+ 		}
+-	}
+-	mutex_unlock(&runtime->oss.params_lock);
+-	return xfer;
+-
+  err:
+-	mutex_unlock(&runtime->oss.params_lock);
++		mutex_unlock(&runtime->oss.params_lock);
++		if (tmp < 0)
++			break;
++		if (signal_pending(current)) {
++			tmp = -ERESTARTSYS;
++			break;
++		}
++		tmp = 0;
++	}
+ 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
+ }
+ 
+diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
+index 727ac44d39f4..a84a1d3d23e5 100644
+--- a/sound/core/oss/pcm_plugin.c
++++ b/sound/core/oss/pcm_plugin.c
+@@ -591,18 +591,26 @@ snd_pcm_sframes_t snd_pcm_plug_write_transfer(struct snd_pcm_substream *plug, st
+ 	snd_pcm_sframes_t frames = size;
+ 
+ 	plugin = snd_pcm_plug_first(plug);
+-	while (plugin && frames > 0) {
++	while (plugin) {
++		if (frames <= 0)
++			return frames;
+ 		if ((next = plugin->next) != NULL) {
+ 			snd_pcm_sframes_t frames1 = frames;
+-			if (plugin->dst_frames)
++			if (plugin->dst_frames) {
+ 				frames1 = plugin->dst_frames(plugin, frames);
++				if (frames1 <= 0)
++					return frames1;
++			}
+ 			if ((err = next->client_channels(next, frames1, &dst_channels)) < 0) {
+ 				return err;
+ 			}
+ 			if (err != frames1) {
+ 				frames = err;
+-				if (plugin->src_frames)
++				if (plugin->src_frames) {
+ 					frames = plugin->src_frames(plugin, frames1);
++					if (frames <= 0)
++						return frames;
++				}
+ 			}
+ 		} else
+ 			dst_channels = NULL;
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 9a7cc9a56a21..169df070c22b 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -578,7 +578,6 @@ static inline unsigned int muldiv32(unsigned int a, unsigned int b,
+ {
+ 	u_int64_t n = (u_int64_t) a * b;
+ 	if (c == 0) {
+-		snd_BUG_ON(!n);
+ 		*r = 0;
+ 		return UINT_MAX;
+ 	}
+@@ -1663,7 +1662,7 @@ int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
+ 		return changed;
+ 	if (params->rmask) {
+ 		int err = snd_pcm_hw_refine(pcm, params);
+-		if (snd_BUG_ON(err < 0))
++		if (err < 0)
+ 			return err;
+ 	}
+ 	return snd_pcm_hw_param_value(params, var, dir);
+@@ -1710,7 +1709,7 @@ int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm,
+ 		return changed;
+ 	if (params->rmask) {
+ 		int err = snd_pcm_hw_refine(pcm, params);
+-		if (snd_BUG_ON(err < 0))
++		if (err < 0)
+ 			return err;
+ 	}
+ 	return snd_pcm_hw_param_value(params, var, dir);
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index b450a27588c8..16f8124b1150 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
+ 	return 0;
+ }
+ 
+-int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
++static int __snd_rawmidi_info_select(struct snd_card *card,
++				     struct snd_rawmidi_info *info)
+ {
+ 	struct snd_rawmidi *rmidi;
+ 	struct snd_rawmidi_str *pstr;
+ 	struct snd_rawmidi_substream *substream;
+ 
+-	mutex_lock(&register_mutex);
+ 	rmidi = snd_rawmidi_search(card, info->device);
+-	mutex_unlock(&register_mutex);
+ 	if (!rmidi)
+ 		return -ENXIO;
+ 	if (info->stream < 0 || info->stream > 1)
+@@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info
+ 	}
+ 	return -ENXIO;
+ }
++
++int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
++{
++	int ret;
++
++	mutex_lock(&register_mutex);
++	ret = __snd_rawmidi_info_select(card, info);
++	mutex_unlock(&register_mutex);
++	return ret;
++}
+ EXPORT_SYMBOL(snd_rawmidi_info_select);
+ 
+ static int snd_rawmidi_info_select_user(struct snd_card *card,
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index e3767122dd0b..b9ce5da25938 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -236,6 +236,7 @@ static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
+ 	rwlock_init(&client->ports_lock);
+ 	mutex_init(&client->ports_mutex);
+ 	INIT_LIST_HEAD(&client->ports_list_head);
++	mutex_init(&client->ioctl_mutex);
+ 
+ 	/* find free slot in the client table */
+ 	spin_lock_irqsave(&clients_lock, flags);
+@@ -1011,7 +1012,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ {
+ 	struct snd_seq_client *client = file->private_data;
+ 	int written = 0, len;
+-	int err = -EINVAL;
++	int err;
+ 	struct snd_seq_event event;
+ 
+ 	if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
+@@ -1026,11 +1027,15 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ 
+ 	/* allocate the pool now if the pool is not allocated yet */ 
+ 	if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
+-		if (snd_seq_pool_init(client->pool) < 0)
++		mutex_lock(&client->ioctl_mutex);
++		err = snd_seq_pool_init(client->pool);
++		mutex_unlock(&client->ioctl_mutex);
++		if (err < 0)
+ 			return -ENOMEM;
+ 	}
+ 
+ 	/* only process whole events */
++	err = -EINVAL;
+ 	while (count >= sizeof(struct snd_seq_event)) {
+ 		/* Read in the event header from the user */
+ 		len = sizeof(event);
+@@ -2195,6 +2200,7 @@ static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
+ 			    void __user *arg)
+ {
+ 	struct seq_ioctl_table *p;
++	int ret;
+ 
+ 	switch (cmd) {
+ 	case SNDRV_SEQ_IOCTL_PVERSION:
+@@ -2208,8 +2214,12 @@ static int snd_seq_do_ioctl(struct snd_seq_client *client, unsigned int cmd,
+ 	if (! arg)
+ 		return -EFAULT;
+ 	for (p = ioctl_tables; p->cmd; p++) {
+-		if (p->cmd == cmd)
+-			return p->func(client, arg);
++		if (p->cmd == cmd) {
++			mutex_lock(&client->ioctl_mutex);
++			ret = p->func(client, arg);
++			mutex_unlock(&client->ioctl_mutex);
++			return ret;
++		}
+ 	}
+ 	pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
+ 		   cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
+diff --git a/sound/core/seq/seq_clientmgr.h b/sound/core/seq/seq_clientmgr.h
+index 20f0a725ec7d..91f8f165bfdc 100644
+--- a/sound/core/seq/seq_clientmgr.h
++++ b/sound/core/seq/seq_clientmgr.h
+@@ -59,6 +59,7 @@ struct snd_seq_client {
+ 	struct list_head ports_list_head;
+ 	rwlock_t ports_lock;
+ 	struct mutex ports_mutex;
++	struct mutex ioctl_mutex;
+ 	int convert32;		/* convert 32->64bit */
+ 
+ 	/* output pool */
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
+index 7f9126efc1e5..83ae083b192f 100644
+--- a/sound/drivers/aloop.c
++++ b/sound/drivers/aloop.c
+@@ -39,6 +39,7 @@
+ #include <sound/core.h>
+ #include <sound/control.h>
+ #include <sound/pcm.h>
++#include <sound/pcm_params.h>
+ #include <sound/info.h>
+ #include <sound/initval.h>
+ 
+@@ -305,19 +306,6 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
+ 	return 0;
+ }
+ 
+-static void params_change_substream(struct loopback_pcm *dpcm,
+-				    struct snd_pcm_runtime *runtime)
+-{
+-	struct snd_pcm_runtime *dst_runtime;
+-
+-	if (dpcm == NULL || dpcm->substream == NULL)
+-		return;
+-	dst_runtime = dpcm->substream->runtime;
+-	if (dst_runtime == NULL)
+-		return;
+-	dst_runtime->hw = dpcm->cable->hw;
+-}
+-
+ static void params_change(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+@@ -329,10 +317,6 @@ static void params_change(struct snd_pcm_substream *substream)
+ 	cable->hw.rate_max = runtime->rate;
+ 	cable->hw.channels_min = runtime->channels;
+ 	cable->hw.channels_max = runtime->channels;
+-	params_change_substream(cable->streams[SNDRV_PCM_STREAM_PLAYBACK],
+-				runtime);
+-	params_change_substream(cable->streams[SNDRV_PCM_STREAM_CAPTURE],
+-				runtime);
+ }
+ 
+ static int loopback_prepare(struct snd_pcm_substream *substream)
+@@ -620,26 +604,29 @@ static unsigned int get_cable_index(struct snd_pcm_substream *substream)
+ static int rule_format(struct snd_pcm_hw_params *params,
+ 		       struct snd_pcm_hw_rule *rule)
+ {
++	struct loopback_pcm *dpcm = rule->private;
++	struct loopback_cable *cable = dpcm->cable;
++	struct snd_mask m;
+ 
+-	struct snd_pcm_hardware *hw = rule->private;
+-	struct snd_mask *maskp = hw_param_mask(params, rule->var);
+-
+-	maskp->bits[0] &= (u_int32_t)hw->formats;
+-	maskp->bits[1] &= (u_int32_t)(hw->formats >> 32);
+-	memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX-64) / 8); /* clear rest */
+-	if (! maskp->bits[0] && ! maskp->bits[1])
+-		return -EINVAL;
+-	return 0;
++	snd_mask_none(&m);
++	mutex_lock(&dpcm->loopback->cable_lock);
++	m.bits[0] = (u_int32_t)cable->hw.formats;
++	m.bits[1] = (u_int32_t)(cable->hw.formats >> 32);
++	mutex_unlock(&dpcm->loopback->cable_lock);
++	return snd_mask_refine(hw_param_mask(params, rule->var), &m);
+ }
+ 
+ static int rule_rate(struct snd_pcm_hw_params *params,
+ 		     struct snd_pcm_hw_rule *rule)
+ {
+-	struct snd_pcm_hardware *hw = rule->private;
++	struct loopback_pcm *dpcm = rule->private;
++	struct loopback_cable *cable = dpcm->cable;
+ 	struct snd_interval t;
+ 
+-        t.min = hw->rate_min;
+-        t.max = hw->rate_max;
++	mutex_lock(&dpcm->loopback->cable_lock);
++	t.min = cable->hw.rate_min;
++	t.max = cable->hw.rate_max;
++	mutex_unlock(&dpcm->loopback->cable_lock);
+         t.openmin = t.openmax = 0;
+         t.integer = 0;
+ 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
+@@ -648,22 +635,44 @@ static int rule_rate(struct snd_pcm_hw_params *params,
+ static int rule_channels(struct snd_pcm_hw_params *params,
+ 			 struct snd_pcm_hw_rule *rule)
+ {
+-	struct snd_pcm_hardware *hw = rule->private;
++	struct loopback_pcm *dpcm = rule->private;
++	struct loopback_cable *cable = dpcm->cable;
+ 	struct snd_interval t;
+ 
+-        t.min = hw->channels_min;
+-        t.max = hw->channels_max;
++	mutex_lock(&dpcm->loopback->cable_lock);
++	t.min = cable->hw.channels_min;
++	t.max = cable->hw.channels_max;
++	mutex_unlock(&dpcm->loopback->cable_lock);
+         t.openmin = t.openmax = 0;
+         t.integer = 0;
+ 	return snd_interval_refine(hw_param_interval(params, rule->var), &t);
+ }
+ 
++static void free_cable(struct snd_pcm_substream *substream)
++{
++	struct loopback *loopback = substream->private_data;
++	int dev = get_cable_index(substream);
++	struct loopback_cable *cable;
++
++	cable = loopback->cables[substream->number][dev];
++	if (!cable)
++		return;
++	if (cable->streams[!substream->stream]) {
++		/* other stream is still alive */
++		cable->streams[substream->stream] = NULL;
++	} else {
++		/* free the cable */
++		loopback->cables[substream->number][dev] = NULL;
++		kfree(cable);
++	}
++}
++
+ static int loopback_open(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct loopback *loopback = substream->private_data;
+ 	struct loopback_pcm *dpcm;
+-	struct loopback_cable *cable;
++	struct loopback_cable *cable = NULL;
+ 	int err = 0;
+ 	int dev = get_cable_index(substream);
+ 
+@@ -682,7 +691,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
+ 	if (!cable) {
+ 		cable = kzalloc(sizeof(*cable), GFP_KERNEL);
+ 		if (!cable) {
+-			kfree(dpcm);
+ 			err = -ENOMEM;
+ 			goto unlock;
+ 		}
+@@ -700,19 +708,19 @@ static int loopback_open(struct snd_pcm_substream *substream)
+ 	/* are cached -> they do not reflect the actual state */
+ 	err = snd_pcm_hw_rule_add(runtime, 0,
+ 				  SNDRV_PCM_HW_PARAM_FORMAT,
+-				  rule_format, &runtime->hw,
++				  rule_format, dpcm,
+ 				  SNDRV_PCM_HW_PARAM_FORMAT, -1);
+ 	if (err < 0)
+ 		goto unlock;
+ 	err = snd_pcm_hw_rule_add(runtime, 0,
+ 				  SNDRV_PCM_HW_PARAM_RATE,
+-				  rule_rate, &runtime->hw,
++				  rule_rate, dpcm,
+ 				  SNDRV_PCM_HW_PARAM_RATE, -1);
+ 	if (err < 0)
+ 		goto unlock;
+ 	err = snd_pcm_hw_rule_add(runtime, 0,
+ 				  SNDRV_PCM_HW_PARAM_CHANNELS,
+-				  rule_channels, &runtime->hw,
++				  rule_channels, dpcm,
+ 				  SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+ 	if (err < 0)
+ 		goto unlock;
+@@ -724,6 +732,10 @@ static int loopback_open(struct snd_pcm_substream *substream)
+ 	else
+ 		runtime->hw = cable->hw;
+  unlock:
++	if (err < 0) {
++		free_cable(substream);
++		kfree(dpcm);
++	}
+ 	mutex_unlock(&loopback->cable_lock);
+ 	return err;
+ }
+@@ -732,20 +744,10 @@ static int loopback_close(struct snd_pcm_substream *substream)
+ {
+ 	struct loopback *loopback = substream->private_data;
+ 	struct loopback_pcm *dpcm = substream->runtime->private_data;
+-	struct loopback_cable *cable;
+-	int dev = get_cable_index(substream);
+ 
+ 	loopback_timer_stop(dpcm);
+ 	mutex_lock(&loopback->cable_lock);
+-	cable = loopback->cables[substream->number][dev];
+-	if (cable->streams[!substream->stream]) {
+-		/* other stream is still alive */
+-		cable->streams[substream->stream] = NULL;
+-	} else {
+-		/* free the cable */
+-		loopback->cables[substream->number][dev] = NULL;
+-		kfree(cable);
+-	}
++	free_cable(substream);
+ 	mutex_unlock(&loopback->cable_lock);
+ 	return 0;
+ }
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 0374bd5b61c8..1fb951225318 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1452,6 +1452,9 @@ static int dspio_scp(struct hda_codec *codec,
+ 		} else if (ret_size != reply_data_size) {
+ 			codec_dbg(codec, "RetLen and HdrLen .NE.\n");
+ 			return -EINVAL;
++		} else if (!reply) {
++			codec_dbg(codec, "NULL reply\n");
++			return -EINVAL;
+ 		} else {
+ 			*reply_len = ret_size*sizeof(unsigned int);
+ 			memcpy(reply, scp_reply.data, *reply_len);
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index aeb054ca9ed9..b3d222d96a1b 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -394,6 +394,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
+ 	/*SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),*/
+ 
+ 	/* codec SSID */
++	SND_PCI_QUIRK(0x106b, 0x0600, "iMac 14,1", CS420X_IMAC27_122),
+ 	SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
+ 	SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
+ 	SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 75c4e14f4156..861dc57cb082 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3119,6 +3119,19 @@ static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
+ 		spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+ }
+ 
++static void alc269_fixup_pincfg_U7x7_headset_mic(struct hda_codec *codec,
++						 const struct hda_fixup *fix,
++						 int action)
++{
++	unsigned int cfg_headphone = snd_hda_codec_get_pincfg(codec, 0x21);
++	unsigned int cfg_headset_mic = snd_hda_codec_get_pincfg(codec, 0x19);
++
++	if (cfg_headphone && cfg_headset_mic == 0x411111f0)
++		snd_hda_codec_set_pincfg(codec, 0x19,
++			(cfg_headphone & ~AC_DEFCFG_DEVICE) |
++			(AC_JACK_MIC_IN << AC_DEFCFG_DEVICE_SHIFT));
++}
++
+ static void alc269_fixup_hweq(struct hda_codec *codec,
+ 			       const struct hda_fixup *fix, int action)
+ {
+@@ -4675,6 +4688,7 @@ enum {
+ 	ALC269_FIXUP_LIFEBOOK_EXTMIC,
+ 	ALC269_FIXUP_LIFEBOOK_HP_PIN,
+ 	ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
++	ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC,
+ 	ALC269_FIXUP_AMIC,
+ 	ALC269_FIXUP_DMIC,
+ 	ALC269VB_FIXUP_AMIC,
+@@ -4732,6 +4746,7 @@ enum {
+ 	ALC286_FIXUP_HP_GPIO_LED,
+ 	ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
+ 	ALC280_FIXUP_HP_DOCK_PINS,
++	ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED,
+ 	ALC280_FIXUP_HP_9480M,
+ 	ALC288_FIXUP_DELL_HEADSET_MODE,
+ 	ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+@@ -4864,6 +4879,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+ 	},
++	[ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc269_fixup_pincfg_U7x7_headset_mic,
++	},
+ 	[ALC269_FIXUP_AMIC] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -5270,6 +5289,16 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC280_FIXUP_HP_GPIO4
+ 	},
++	[ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x1b, 0x21011020 }, /* line-out */
++			{ 0x18, 0x2181103f }, /* line-in */
++			{ },
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED
++	},
+ 	[ALC280_FIXUP_HP_9480M] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc280_fixup_hp_9480m,
+@@ -5482,6 +5511,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
+ 	SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
++	SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -5522,7 +5552,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+-	SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
++	SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+@@ -5568,6 +5598,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
+ 	SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
++	SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+ 	SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
+@@ -5684,6 +5715,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC269_FIXUP_HEADSET_MIC, .name = "headset-mic"},
+ 	{.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
+ 	{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
++	{.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"},
+ 	{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
+ 	{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
+ 	{.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
+diff --git a/sound/soc/codecs/pcm512x-spi.c b/sound/soc/codecs/pcm512x-spi.c
+index 7b64a9cef704..8adb0912d5f8 100644
+--- a/sound/soc/codecs/pcm512x-spi.c
++++ b/sound/soc/codecs/pcm512x-spi.c
+@@ -71,3 +71,7 @@ static struct spi_driver pcm512x_spi_driver = {
+ };
+ 
+ module_spi_driver(pcm512x_spi_driver);
++
++MODULE_DESCRIPTION("ASoC PCM512x codec driver - SPI");
++MODULE_AUTHOR("Mark Brown <broonie@kernel.org>");
++MODULE_LICENSE("GPL v2");
+diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
+index d04693e9cf9f..3a98c0910560 100644
+--- a/sound/soc/codecs/twl4030.c
++++ b/sound/soc/codecs/twl4030.c
+@@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
+ 	struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
+ 	struct device_node *twl4030_codec_node = NULL;
+ 
+-	twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node,
++	twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node,
+ 						  "codec");
+ 
+ 	if (!pdata && twl4030_codec_node) {
+@@ -241,9 +241,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
+ 				     GFP_KERNEL);
+ 		if (!pdata) {
+ 			dev_err(codec->dev, "Can not allocate memory\n");
++			of_node_put(twl4030_codec_node);
+ 			return NULL;
+ 		}
+ 		twl4030_setup_pdata_of(pdata, twl4030_codec_node);
++		of_node_put(twl4030_codec_node);
+ 	}
+ 
+ 	return pdata;
+diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
+index 4e0c0e502ade..49a1b8050bc7 100644
+--- a/sound/soc/ux500/mop500.c
++++ b/sound/soc/ux500/mop500.c
+@@ -163,3 +163,7 @@ static struct platform_driver snd_soc_mop500_driver = {
+ };
+ 
+ module_platform_driver(snd_soc_mop500_driver);
++
++MODULE_LICENSE("GPL v2");
++MODULE_DESCRIPTION("ASoC MOP500 board driver");
++MODULE_AUTHOR("Ola Lilja");
+diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
+index 51a66a87305a..b4ab903fca1b 100644
+--- a/sound/soc/ux500/ux500_pcm.c
++++ b/sound/soc/ux500/ux500_pcm.c
+@@ -166,3 +166,8 @@ int ux500_pcm_unregister_platform(struct platform_device *pdev)
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(ux500_pcm_unregister_platform);
++
++MODULE_AUTHOR("Ola Lilja");
++MODULE_AUTHOR("Roger Nilsson");
++MODULE_DESCRIPTION("ASoC UX500 driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 2ee449fbe55f..76f7c95b38af 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -343,17 +343,20 @@ static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request,
+ 			    int validx, int *value_ret)
+ {
+ 	struct snd_usb_audio *chip = cval->head.mixer->chip;
+-	unsigned char buf[4 + 3 * sizeof(__u32)]; /* enough space for one range */
++	/* enough space for one range */
++	unsigned char buf[sizeof(__u16) + 3 * sizeof(__u32)];
+ 	unsigned char *val;
+-	int idx = 0, ret, size;
++	int idx = 0, ret, val_size, size;
+ 	__u8 bRequest;
+ 
++	val_size = uac2_ctl_value_size(cval->val_type);
++
+ 	if (request == UAC_GET_CUR) {
+ 		bRequest = UAC2_CS_CUR;
+-		size = uac2_ctl_value_size(cval->val_type);
++		size = val_size;
+ 	} else {
+ 		bRequest = UAC2_CS_RANGE;
+-		size = sizeof(buf);
++		size = sizeof(__u16) + 3 * val_size;
+ 	}
+ 
+ 	memset(buf, 0, sizeof(buf));
+@@ -386,16 +389,17 @@ error:
+ 		val = buf + sizeof(__u16);
+ 		break;
+ 	case UAC_GET_MAX:
+-		val = buf + sizeof(__u16) * 2;
++		val = buf + sizeof(__u16) + val_size;
+ 		break;
+ 	case UAC_GET_RES:
+-		val = buf + sizeof(__u16) * 3;
++		val = buf + sizeof(__u16) + val_size * 2;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+ 	}
+ 
+-	*value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16)));
++	*value_ret = convert_signed_value(cval,
++					  snd_usb_combine_bytes(val, val_size));
+ 
+ 	return 0;
+ }
+@@ -2095,20 +2099,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
+ 	kctl->private_value = (unsigned long)namelist;
+ 	kctl->private_free = usb_mixer_selector_elem_free;
+ 
+-	nameid = uac_selector_unit_iSelector(desc);
++	/* check the static mapping table at first */
+ 	len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
+-	if (len)
+-		;
+-	else if (nameid)
+-		len = snd_usb_copy_string_desc(state, nameid, kctl->id.name,
+-					 sizeof(kctl->id.name));
+-	else
+-		len = get_term_name(state, &state->oterm,
+-				    kctl->id.name, sizeof(kctl->id.name), 0);
+-
+ 	if (!len) {
+-		strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
++		/* no mapping ? */
++		/* if iSelector is given, use it */
++		nameid = uac_selector_unit_iSelector(desc);
++		if (nameid)
++			len = snd_usb_copy_string_desc(state, nameid,
++						       kctl->id.name,
++						       sizeof(kctl->id.name));
++		/* ... or pick up the terminal name at next */
++		if (!len)
++			len = get_term_name(state, &state->oterm,
++				    kctl->id.name, sizeof(kctl->id.name), 0);
++		/* ... or use the fixed string "USB" as the last resort */
++		if (!len)
++			strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
+ 
++		/* and add the proper suffix */
+ 		if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
+ 			append_ctl_name(kctl, " Clock Source");
+ 		else if ((state->oterm.type & 0xff00) == 0x0100)
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index a51155197277..3351e2f9656d 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -343,6 +343,15 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ 		ep = 0x81;
+ 		iface = usb_ifnum_to_if(dev, 2);
+ 
++		if (!iface || iface->num_altsetting == 0)
++			return -EINVAL;
++
++		alts = &iface->altsetting[1];
++		goto add_sync_ep;
++	case USB_ID(0x1397, 0x0002):
++		ep = 0x81;
++		iface = usb_ifnum_to_if(dev, 1);
++
+ 		if (!iface || iface->num_altsetting == 0)
+ 			return -EINVAL;
+ 
+diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
+index ba5efa4710b5..d464305c7c6c 100644
+--- a/tools/perf/bench/numa.c
++++ b/tools/perf/bench/numa.c
+@@ -203,6 +203,47 @@ static const char * const numa_usage[] = {
+ 	NULL
+ };
+ 
++/*
++ * To get number of numa nodes present.
++ */
++static int nr_numa_nodes(void)
++{
++	int i, nr_nodes = 0;
++
++	for (i = 0; i < g->p.nr_nodes; i++) {
++		if (numa_bitmask_isbitset(numa_nodes_ptr, i))
++			nr_nodes++;
++	}
++
++	return nr_nodes;
++}
++
++/*
++ * To check if given numa node is present.
++ */
++static int is_node_present(int node)
++{
++	return numa_bitmask_isbitset(numa_nodes_ptr, node);
++}
++
++/*
++ * To check given numa node has cpus.
++ */
++static bool node_has_cpus(int node)
++{
++	struct bitmask *cpu = numa_allocate_cpumask();
++	unsigned int i;
++
++	if (cpu && !numa_node_to_cpus(node, cpu)) {
++		for (i = 0; i < cpu->size; i++) {
++			if (numa_bitmask_isbitset(cpu, i))
++				return true;
++		}
++	}
++
++	return false; /* lets fall back to nocpus safely */
++}
++
+ static cpu_set_t bind_to_cpu(int target_cpu)
+ {
+ 	cpu_set_t orig_mask, mask;
+@@ -231,12 +272,12 @@ static cpu_set_t bind_to_cpu(int target_cpu)
+ 
+ static cpu_set_t bind_to_node(int target_node)
+ {
+-	int cpus_per_node = g->p.nr_cpus/g->p.nr_nodes;
++	int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
+ 	cpu_set_t orig_mask, mask;
+ 	int cpu;
+ 	int ret;
+ 
+-	BUG_ON(cpus_per_node*g->p.nr_nodes != g->p.nr_cpus);
++	BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
+ 	BUG_ON(!cpus_per_node);
+ 
+ 	ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
+@@ -636,7 +677,7 @@ static int parse_setup_node_list(void)
+ 			int i;
+ 
+ 			for (i = 0; i < mul; i++) {
+-				if (t >= g->p.nr_tasks) {
++				if (t >= g->p.nr_tasks || !node_has_cpus(bind_node)) {
+ 					printf("\n# NOTE: ignoring bind NODEs starting at NODE#%d\n", bind_node);
+ 					goto out;
+ 				}
+@@ -951,6 +992,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
+ 	sum = 0;
+ 
+ 	for (node = 0; node < g->p.nr_nodes; node++) {
++		if (!is_node_present(node))
++			continue;
+ 		nr = nodes[node];
+ 		nr_min = min(nr, nr_min);
+ 		nr_max = max(nr, nr_max);
+@@ -971,8 +1014,11 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
+ 	process_groups = 0;
+ 
+ 	for (node = 0; node < g->p.nr_nodes; node++) {
+-		int processes = count_node_processes(node);
++		int processes;
+ 
++		if (!is_node_present(node))
++			continue;
++		processes = count_node_processes(node);
+ 		nr = nodes[node];
+ 		tprintf(" %2d/%-2d", nr, processes);
+ 
+@@ -1270,7 +1316,7 @@ static void print_summary(void)
+ 
+ 	printf("\n ###\n");
+ 	printf(" # %d %s will execute (on %d nodes, %d CPUs):\n",
+-		g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", g->p.nr_nodes, g->p.nr_cpus);
++		g->p.nr_tasks, g->p.nr_tasks == 1 ? "task" : "tasks", nr_numa_nodes(), g->p.nr_cpus);
+ 	printf(" #      %5dx %5ldMB global  shared mem operations\n",
+ 			g->p.nr_loops, g->p.bytes_global/1024/1024);
+ 	printf(" #      %5dx %5ldMB process shared mem operations\n",
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index 65e138019b99..866b911bcda5 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -69,6 +69,7 @@
+ #include <linux/types.h>
+ 
+ static volatile int done;
++static volatile int resize;
+ 
+ #define HEADER_LINE_NR  5
+ 
+@@ -78,10 +79,13 @@ static void perf_top__update_print_entries(struct perf_top *top)
+ }
+ 
+ static void perf_top__sig_winch(int sig __maybe_unused,
+-				siginfo_t *info __maybe_unused, void *arg)
++				siginfo_t *info __maybe_unused, void *arg __maybe_unused)
+ {
+-	struct perf_top *top = arg;
++	resize = 1;
++}
+ 
++static void perf_top__resize(struct perf_top *top)
++{
+ 	get_term_dimensions(&top->winsize);
+ 	perf_top__update_print_entries(top);
+ }
+@@ -460,7 +464,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
+ 					.sa_sigaction = perf_top__sig_winch,
+ 					.sa_flags     = SA_SIGINFO,
+ 				};
+-				perf_top__sig_winch(SIGWINCH, NULL, top);
++				perf_top__resize(top);
+ 				sigaction(SIGWINCH, &act, NULL);
+ 			} else {
+ 				signal(SIGWINCH, SIG_DFL);
+@@ -998,6 +1002,11 @@ static int __cmd_top(struct perf_top *top)
+ 
+ 		if (hits == top->samples)
+ 			ret = perf_evlist__poll(top->evlist, 100);
++
++		if (resize) {
++			perf_top__resize(top);
++			resize = 0;
++		}
+ 	}
+ 
+ 	ret = 0;
+diff --git a/tools/usb/usbip/libsrc/usbip_common.c b/tools/usb/usbip/libsrc/usbip_common.c
+index ac73710473de..8000445ff884 100644
+--- a/tools/usb/usbip/libsrc/usbip_common.c
++++ b/tools/usb/usbip/libsrc/usbip_common.c
+@@ -215,9 +215,16 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
+ 		       struct usbip_usb_interface *uinf)
+ {
+ 	char busid[SYSFS_BUS_ID_SIZE];
++	int size;
+ 	struct udev_device *sif;
+ 
+-	sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i);
++	size = snprintf(busid, sizeof(busid), "%s:%d.%d",
++			udev->busid, udev->bConfigurationValue, i);
++	if (size < 0 || (unsigned int)size >= sizeof(busid)) {
++		err("busid length %i >= %lu or < 0", size,
++		    (unsigned long)sizeof(busid));
++		return -1;
++	}
+ 
+ 	sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid);
+ 	if (!sif) {
+diff --git a/tools/usb/usbip/libsrc/usbip_host_driver.c b/tools/usb/usbip/libsrc/usbip_host_driver.c
+index bef08d5c44e8..071b9ce99420 100644
+--- a/tools/usb/usbip/libsrc/usbip_host_driver.c
++++ b/tools/usb/usbip/libsrc/usbip_host_driver.c
+@@ -39,13 +39,19 @@ struct udev *udev_context;
+ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
+ {
+ 	char status_attr_path[SYSFS_PATH_MAX];
++	int size;
+ 	int fd;
+ 	int length;
+ 	char status;
+ 	int value = 0;
+ 
+-	snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
+-		 udev->path);
++	size = snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
++			udev->path);
++	if (size < 0 || (unsigned int)size >= sizeof(status_attr_path)) {
++		err("usbip_status path length %i >= %lu or < 0", size,
++		    (unsigned long)sizeof(status_attr_path));
++		return -1;
++	}
+ 
+ 	fd = open(status_attr_path, O_RDONLY);
+ 	if (fd < 0) {
+@@ -225,6 +231,7 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
+ {
+ 	char attr_name[] = "usbip_sockfd";
+ 	char sockfd_attr_path[SYSFS_PATH_MAX];
++	int size;
+ 	char sockfd_buff[30];
+ 	int ret;
+ 
+@@ -244,10 +251,20 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
+ 	}
+ 
+ 	/* only the first interface is true */
+-	snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
+-		 edev->udev.path, attr_name);
++	size = snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
++			edev->udev.path, attr_name);
++	if (size < 0 || (unsigned int)size >= sizeof(sockfd_attr_path)) {
++		err("exported device path length %i >= %lu or < 0", size,
++		    (unsigned long)sizeof(sockfd_attr_path));
++		return -1;
++	}
+ 
+-	snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
++	size = snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
++	if (size < 0 || (unsigned int)size >= sizeof(sockfd_buff)) {
++		err("socket length %i >= %lu or < 0", size,
++		    (unsigned long)sizeof(sockfd_buff));
++		return -1;
++	}
+ 
+ 	ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff,
+ 				    strlen(sockfd_buff));
+diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
+index ad9204773533..1274f326242c 100644
+--- a/tools/usb/usbip/libsrc/vhci_driver.c
++++ b/tools/usb/usbip/libsrc/vhci_driver.c
+@@ -55,12 +55,12 @@ static int parse_status(const char *value)
+ 
+ 	while (*c != '\0') {
+ 		int port, status, speed, devid;
+-		unsigned long socket;
++		int sockfd;
+ 		char lbusid[SYSFS_BUS_ID_SIZE];
+ 
+-		ret = sscanf(c, "%d %d %d %x %lx %31s\n",
++		ret = sscanf(c, "%d %d %d %x %u %31s\n",
+ 				&port, &status, &speed,
+-				&devid, &socket, lbusid);
++				&devid, &sockfd, lbusid);
+ 
+ 		if (ret < 5) {
+ 			dbg("sscanf failed: %d", ret);
+@@ -69,7 +69,7 @@ static int parse_status(const char *value)
+ 
+ 		dbg("port %d status %d speed %d devid %x",
+ 				port, status, speed, devid);
+-		dbg("socket %lx lbusid %s", socket, lbusid);
++		dbg("sockfd %u lbusid %s", sockfd, lbusid);
+ 
+ 
+ 		/* if a device is connected, look at it */
+diff --git a/tools/usb/usbip/src/usbip.c b/tools/usb/usbip/src/usbip.c
+index d7599d943529..73d8eee8130b 100644
+--- a/tools/usb/usbip/src/usbip.c
++++ b/tools/usb/usbip/src/usbip.c
+@@ -176,6 +176,8 @@ int main(int argc, char *argv[])
+ 			break;
+ 		case '?':
+ 			printf("usbip: invalid option\n");
++			/* Terminate after printing error */
++			/* FALLTHRU */
+ 		default:
+ 			usbip_usage();
+ 			goto out;
+diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c
+index fa46141ae68b..e121cfb1746a 100644
+--- a/tools/usb/usbip/src/usbip_bind.c
++++ b/tools/usb/usbip/src/usbip_bind.c
+@@ -144,6 +144,7 @@ static int bind_device(char *busid)
+ 	int rc;
+ 	struct udev *udev;
+ 	struct udev_device *dev;
++	const char *devpath;
+ 
+ 	/* Check whether the device with this bus ID exists. */
+ 	udev = udev_new();
+@@ -152,8 +153,16 @@ static int bind_device(char *busid)
+ 		err("device with the specified bus ID does not exist");
+ 		return -1;
+ 	}
++	devpath = udev_device_get_devpath(dev);
+ 	udev_unref(udev);
+ 
++	/* If the device is already attached to vhci_hcd - bail out */
++	if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
++		err("bind loop detected: device: %s is attached to %s\n",
++		    devpath, USBIP_VHCI_DRV_NAME);
++		return -1;
++	}
++
+ 	rc = unbind_other(busid);
+ 	if (rc == UNBIND_ST_FAILED) {
+ 		err("could not unbind driver from device on busid %s", busid);
+diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
+index d5ce34a410e7..ac6081c3db82 100644
+--- a/tools/usb/usbip/src/usbip_list.c
++++ b/tools/usb/usbip/src/usbip_list.c
+@@ -180,6 +180,7 @@ static int list_devices(bool parsable)
+ 	const char *busid;
+ 	char product_name[128];
+ 	int ret = -1;
++	const char *devpath;
+ 
+ 	/* Create libudev context. */
+ 	udev = udev_new();
+@@ -202,6 +203,14 @@ static int list_devices(bool parsable)
+ 		path = udev_list_entry_get_name(dev_list_entry);
+ 		dev = udev_device_new_from_syspath(udev, path);
+ 
++		/* Ignore devices attached to vhci_hcd */
++		devpath = udev_device_get_devpath(dev);
++		if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
++			dbg("Skip the device %s already attached to %s\n",
++			    devpath, USBIP_VHCI_DRV_NAME);
++			continue;
++		}
++
+ 		/* Get device information. */
+ 		idVendor = udev_device_get_sysattr_value(dev, "idVendor");
+ 		idProduct = udev_device_get_sysattr_value(dev, "idProduct");
+diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
+index 2b3d6d235015..3d7b42e77299 100644
+--- a/tools/usb/usbip/src/utils.c
++++ b/tools/usb/usbip/src/utils.c
+@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
+ 	char command[SYSFS_BUS_ID_SIZE + 4];
+ 	char match_busid_attr_path[SYSFS_PATH_MAX];
+ 	int rc;
++	int cmd_size;
+ 
+ 	snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
+ 		 "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
+@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
+ 		 attr_name);
+ 
+ 	if (add)
+-		snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
++		cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
++				    busid);
+ 	else
+-		snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
++		cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
++				    busid);
+ 
+ 	rc = write_sysfs_attribute(match_busid_attr_path, command,
+-				   sizeof(command));
++				   cmd_size);
+ 	if (rc < 0) {
+ 		dbg("failed to write match_busid: %s", strerror(errno));
+ 		return -1;

diff --git a/1050_linux-4.1.51.patch b/1050_linux-4.1.51.patch
new file mode 100644
index 0000000..e7f030a
--- /dev/null
+++ b/1050_linux-4.1.51.patch
@@ -0,0 +1,2698 @@
+diff --git a/Makefile b/Makefile
+index a655f63aedeb..caccc6f16d62 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 50
++SUBLEVEL = 51
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
+index 642934a5ae9b..fc2acaefafb0 100644
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -28,6 +28,18 @@
+ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
+ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
+ 
++static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
++					 u8 reg_num)
++{
++	return *vcpu_reg(vcpu, reg_num);
++}
++
++static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
++				unsigned long val)
++{
++	*vcpu_reg(vcpu, reg_num) = val;
++}
++
+ bool kvm_condition_valid(struct kvm_vcpu *vcpu);
+ void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
+ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
+diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
+index 04e5004b34e1..387ee2a11e36 100644
+--- a/arch/arm/kvm/mmio.c
++++ b/arch/arm/kvm/mmio.c
+@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
+ 			       &data);
+ 		data = vcpu_data_host_to_guest(vcpu, data, len);
+-		*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
++		vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
+ 	}
+ 
+ 	return 0;
+@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ 	rt = vcpu->arch.mmio_decode.rt;
+ 
+ 	if (is_write) {
+-		data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
++		data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
++					       len);
+ 
+ 		trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
+ 		mmio_write_buf(data_buf, len, data);
+diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
+index 97473168d6b6..60ad79edbc3c 100644
+--- a/arch/arm/mach-mvebu/Kconfig
++++ b/arch/arm/mach-mvebu/Kconfig
+@@ -37,7 +37,7 @@ config MACH_ARMADA_370
+ config MACH_ARMADA_375
+ 	bool "Marvell Armada 375 boards" if ARCH_MULTI_V7
+ 	select ARM_ERRATA_720789
+-	select ARM_ERRATA_753970
++	select PL310_ERRATA_753970
+ 	select ARM_GIC
+ 	select ARMADA_375_CLK
+ 	select HAVE_ARM_SCU
+@@ -52,7 +52,7 @@ config MACH_ARMADA_375
+ config MACH_ARMADA_38X
+ 	bool "Marvell Armada 380/385 boards" if ARCH_MULTI_V7
+ 	select ARM_ERRATA_720789
+-	select ARM_ERRATA_753970
++	select PL310_ERRATA_753970
+ 	select ARM_GIC
+ 	select ARMADA_38X_CLK
+ 	select HAVE_ARM_SCU
+diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
+index 9ff92050053c..fa7f308c9027 100644
+--- a/arch/arm/mach-omap2/omap-secure.c
++++ b/arch/arm/mach-omap2/omap-secure.c
+@@ -73,6 +73,7 @@ phys_addr_t omap_secure_ram_mempool_base(void)
+ 	return omap_secure_memblock_base;
+ }
+ 
++#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
+ u32 omap3_save_secure_ram(void __iomem *addr, int size)
+ {
+ 	u32 ret;
+@@ -91,6 +92,7 @@ u32 omap3_save_secure_ram(void __iomem *addr, int size)
+ 
+ 	return ret;
+ }
++#endif
+ 
+ /**
+  * rx51_secure_dispatcher: Routine to dispatch secure PPA API calls
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index 3e3c4c7a5082..0504f1347af0 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -109,6 +109,19 @@ static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
+ 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+ }
+ 
++static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
++					 u8 reg_num)
++{
++	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
++}
++
++static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
++				unsigned long val)
++{
++	if (reg_num != 31)
++		vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
++}
++
+ /* Get vcpu SPSR for current mode */
+ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
+ {
+diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
+index b8bb78282d6a..7df779f2cb63 100644
+--- a/arch/mips/ath25/board.c
++++ b/arch/mips/ath25/board.c
+@@ -135,6 +135,8 @@ int __init ath25_find_config(phys_addr_t base, unsigned long size)
+ 	}
+ 
+ 	board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
++	if (!board_data)
++		goto error;
+ 	ath25_board.config = (struct ath25_boarddata *)board_data;
+ 	memcpy_fromio(board_data, bcfg, 0x100);
+ 	if (broken_boarddata) {
+diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
+index 10f762557b92..bacefffee16e 100644
+--- a/arch/mips/cavium-octeon/octeon-irq.c
++++ b/arch/mips/cavium-octeon/octeon-irq.c
+@@ -2242,6 +2242,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+ 	}
+ 
+ 	host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
++	if (!host_data)
++		return -ENOMEM;
+ 	raw_spin_lock_init(&host_data->lock);
+ 
+ 	addr = of_get_address(ciu_node, 0, NULL, NULL);
+diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
+index 336708ae5c5b..ee306af6f3d9 100644
+--- a/arch/mips/kernel/smp-bmips.c
++++ b/arch/mips/kernel/smp-bmips.c
+@@ -166,11 +166,11 @@ static void bmips_prepare_cpus(unsigned int max_cpus)
+ 		return;
+ 	}
+ 
+-	if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
+-			"smp_ipi0", NULL))
++	if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
++			IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
+ 		panic("Can't request IPI0 interrupt");
+-	if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
+-			"smp_ipi1", NULL))
++	if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
++			IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
+ 		panic("Can't request IPI1 interrupt");
+ }
+ 
+diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
+index 658326f44df8..5e0267624d8d 100644
+--- a/arch/sh/boards/mach-se/770x/setup.c
++++ b/arch/sh/boards/mach-se/770x/setup.c
+@@ -8,6 +8,7 @@
+  */
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
++#include <linux/sh_eth.h>
+ #include <mach-se/mach/se.h>
+ #include <mach-se/mach/mrshpc.h>
+ #include <asm/machvec.h>
+@@ -114,6 +115,11 @@ static struct platform_device heartbeat_device = {
+ #if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
+ 	defined(CONFIG_CPU_SUBTYPE_SH7712)
+ /* SH771X Ethernet driver */
++static struct sh_eth_plat_data sh_eth_plat = {
++	.phy = PHY_ID,
++	.phy_interface = PHY_INTERFACE_MODE_MII,
++};
++
+ static struct resource sh_eth0_resources[] = {
+ 	[0] = {
+ 		.start = SH_ETH0_BASE,
+@@ -131,7 +137,7 @@ static struct platform_device sh_eth0_device = {
+ 	.name = "sh771x-ether",
+ 	.id = 0,
+ 	.dev = {
+-		.platform_data = PHY_ID,
++		.platform_data = &sh_eth_plat,
+ 	},
+ 	.num_resources = ARRAY_SIZE(sh_eth0_resources),
+ 	.resource = sh_eth0_resources,
+@@ -154,7 +160,7 @@ static struct platform_device sh_eth1_device = {
+ 	.name = "sh771x-ether",
+ 	.id = 1,
+ 	.dev = {
+-		.platform_data = PHY_ID,
++		.platform_data = &sh_eth_plat,
+ 	},
+ 	.num_resources = ARRAY_SIZE(sh_eth1_resources),
+ 	.resource = sh_eth1_resources,
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 8de489937b89..f726068e1804 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -57,6 +57,9 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
+ 			      rcu_read_lock_sched_held() || \
+ 			      lockdep_is_held(&mce_chrdev_read_mutex))
+ 
++/* sysfs synchronization */
++static DEFINE_MUTEX(mce_sysfs_mutex);
++
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/mce.h>
+ 
+@@ -2183,6 +2186,7 @@ static ssize_t set_ignore_ce(struct device *s,
+ 	if (kstrtou64(buf, 0, &new) < 0)
+ 		return -EINVAL;
+ 
++	mutex_lock(&mce_sysfs_mutex);
+ 	if (mca_cfg.ignore_ce ^ !!new) {
+ 		if (new) {
+ 			/* disable ce features */
+@@ -2195,6 +2199,8 @@ static ssize_t set_ignore_ce(struct device *s,
+ 			on_each_cpu(mce_enable_ce, (void *)1, 1);
+ 		}
+ 	}
++	mutex_unlock(&mce_sysfs_mutex);
++
+ 	return size;
+ }
+ 
+@@ -2207,6 +2213,7 @@ static ssize_t set_cmci_disabled(struct device *s,
+ 	if (kstrtou64(buf, 0, &new) < 0)
+ 		return -EINVAL;
+ 
++	mutex_lock(&mce_sysfs_mutex);
+ 	if (mca_cfg.cmci_disabled ^ !!new) {
+ 		if (new) {
+ 			/* disable cmci */
+@@ -2218,6 +2225,8 @@ static ssize_t set_cmci_disabled(struct device *s,
+ 			on_each_cpu(mce_enable_ce, NULL, 1);
+ 		}
+ 	}
++	mutex_unlock(&mce_sysfs_mutex);
++
+ 	return size;
+ }
+ 
+@@ -2225,8 +2234,19 @@ static ssize_t store_int_with_restart(struct device *s,
+ 				      struct device_attribute *attr,
+ 				      const char *buf, size_t size)
+ {
+-	ssize_t ret = device_store_int(s, attr, buf, size);
++	unsigned long old_check_interval = check_interval;
++	ssize_t ret = device_store_ulong(s, attr, buf, size);
++
++	if (check_interval == old_check_interval)
++		return ret;
++
++	if (check_interval < 1)
++		check_interval = 1;
++
++	mutex_lock(&mce_sysfs_mutex);
+ 	mce_restart();
++	mutex_unlock(&mce_sysfs_mutex);
++
+ 	return ret;
+ }
+ 
+diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
+index 415480d3ea84..49ab807eca1c 100644
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -517,6 +517,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
+ 				goto overflow;
+ 			break;
+ 		case R_X86_64_PC32:
++		case R_X86_64_PLT32:
+ 			value -= (u64)address;
+ 			*(u32 *)location = value;
+ 			break;
+diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
+index 005c03e93fc5..94779f66bf49 100644
+--- a/arch/x86/kernel/module.c
++++ b/arch/x86/kernel/module.c
+@@ -170,19 +170,28 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ 		case R_X86_64_NONE:
+ 			break;
+ 		case R_X86_64_64:
++			if (*(u64 *)loc != 0)
++				goto invalid_relocation;
+ 			*(u64 *)loc = val;
+ 			break;
+ 		case R_X86_64_32:
++			if (*(u32 *)loc != 0)
++				goto invalid_relocation;
+ 			*(u32 *)loc = val;
+ 			if (val != *(u32 *)loc)
+ 				goto overflow;
+ 			break;
+ 		case R_X86_64_32S:
++			if (*(s32 *)loc != 0)
++				goto invalid_relocation;
+ 			*(s32 *)loc = val;
+ 			if ((s64)val != *(s32 *)loc)
+ 				goto overflow;
+ 			break;
+ 		case R_X86_64_PC32:
++		case R_X86_64_PLT32:
++			if (*(u32 *)loc != 0)
++				goto invalid_relocation;
+ 			val -= (u64)loc;
+ 			*(u32 *)loc = val;
+ #if 0
+@@ -198,6 +207,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ 	}
+ 	return 0;
+ 
++invalid_relocation:
++	pr_err("x86/modules: Skipping invalid relocation target, existing value is nonzero for type %d, loc %p, val %Lx\n",
++	       (int)ELF64_R_TYPE(rel[i].r_info), loc, val);
++	return -ENOEXEC;
++
+ overflow:
+ 	pr_err("overflow in relocation type %d val %Lx\n",
+ 	       (int)ELF64_R_TYPE(rel[i].r_info), val);
+diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
+index b7518368492a..9bc944a91274 100644
+--- a/arch/x86/lib/checksum_32.S
++++ b/arch/x86/lib/checksum_32.S
+@@ -29,8 +29,7 @@
+ #include <asm/dwarf2.h>
+ #include <asm/errno.h>
+ #include <asm/asm.h>
+-#include <asm/nospec-branch.h>
+-
++				
+ /*
+  * computes a partial checksum, e.g. for TCP/UDP fragments
+  */
+@@ -160,7 +159,7 @@ ENTRY(csum_partial)
+ 	negl %ebx
+ 	lea 45f(%ebx,%ebx,2), %ebx
+ 	testl %esi, %esi
+-	JMP_NOSPEC %ebx
++	jmp *%ebx
+ 
+ 	# Handle 2-byte-aligned regions
+ 20:	addw (%esi), %ax
+@@ -447,7 +446,7 @@ ENTRY(csum_partial_copy_generic)
+ 	andl $-32,%edx
+ 	lea 3f(%ebx,%ebx), %ebx
+ 	testl %esi, %esi 
+-	JMP_NOSPEC %ebx
++	jmp *%ebx
+ 1:	addl $64,%esi
+ 	addl $64,%edi 
+ 	SRC(movb -32(%edx),%bl)	; SRC(movb (%edx),%bl)
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+index 73eb7fd4aec4..5b6c8486a0be 100644
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -769,9 +769,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
+ 		break;
+ 
+ 	case R_X86_64_PC32:
++	case R_X86_64_PLT32:
+ 		/*
+ 		 * PC relative relocations don't need to be adjusted unless
+ 		 * referencing a percpu symbol.
++		 *
++		 * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32.
+ 		 */
+ 		if (is_percpu_sym(sym, symname))
+ 			add_reloc(&relocs32neg, offset);
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 7104d9e1b5f9..4d1cc6982518 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -870,10 +870,8 @@ int blkcg_init_queue(struct request_queue *q)
+ 	if (preloaded)
+ 		radix_tree_preload_end();
+ 
+-	if (IS_ERR(blkg)) {
+-		blkg_free(new_blkg);
++	if (IS_ERR(blkg))
+ 		return PTR_ERR(blkg);
+-	}
+ 
+ 	q->root_blkg = blkg;
+ 	q->root_rl.blkg = blkg;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index b5dbce192c6b..9e72be28ee9f 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -207,7 +207,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
+ 	struct iov_iter i;
+ 	ssize_t bw;
+ 
+-	iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len);
++	iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
+ 
+ 	file_start_write(file);
+ 	bw = vfs_iter_write(file, &i, ppos);
+diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
+index 8d626784cd8d..49e4040eeb55 100644
+--- a/drivers/char/tpm/st33zp24/st33zp24.c
++++ b/drivers/char/tpm/st33zp24/st33zp24.c
+@@ -485,7 +485,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
+ 			    size_t count)
+ {
+ 	int size = 0;
+-	int expected;
++	u32 expected;
+ 
+ 	if (!chip)
+ 		return -EBUSY;
+@@ -502,7 +502,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
+ 	}
+ 
+ 	expected = be32_to_cpu(*(__be32 *)(buf + 2));
+-	if (expected > count) {
++	if (expected > count || expected < TPM_HEADER_SIZE) {
+ 		size = -EIO;
+ 		goto out;
+ 	}
+diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
+index 33c5f360ab01..ff9ed941862a 100644
+--- a/drivers/char/tpm/tpm_i2c_infineon.c
++++ b/drivers/char/tpm/tpm_i2c_infineon.c
+@@ -436,7 +436,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+ 	int size = 0;
+-	int expected, status;
++	int status;
++	u32 expected;
+ 
+ 	if (count < TPM_HEADER_SIZE) {
+ 		size = -EIO;
+@@ -451,7 +452,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	}
+ 
+ 	expected = be32_to_cpu(*(__be32 *)(buf + 2));
+-	if ((size_t) expected > count) {
++	if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
+ 		size = -EIO;
+ 		goto out;
+ 	}
+diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
+index eac6dc93589f..134cef873aeb 100644
+--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
+@@ -275,7 +275,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	struct device *dev = chip->pdev;
+ 	struct i2c_client *client = to_i2c_client(dev);
+ 	s32 rc;
+-	int expected, status, burst_count, retries, size = 0;
++	int status;
++	int burst_count;
++	int retries;
++	int size = 0;
++	u32 expected;
+ 
+ 	if (count < TPM_HEADER_SIZE) {
+ 		i2c_nuvoton_ready(chip);    /* return to idle */
+@@ -317,7 +321,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ 		 * to machine native
+ 		 */
+ 		expected = be32_to_cpu(*(__be32 *) (buf + 2));
+-		if (expected > count) {
++		if (expected > count || expected < size) {
+ 			dev_err(dev, "%s() expected > count\n", __func__);
+ 			size = -EIO;
+ 			continue;
+diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
+index 733aa5153e74..e23fd9a9b8c4 100644
+--- a/drivers/cpufreq/s3c24xx-cpufreq.c
++++ b/drivers/cpufreq/s3c24xx-cpufreq.c
+@@ -364,7 +364,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
+ static int s3c_cpufreq_init(struct cpufreq_policy *policy)
+ {
+ 	policy->clk = clk_arm;
+-	return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
++
++	policy->cpuinfo.transition_latency = cpu_cur.info->latency;
++
++	if (ftab)
++		return cpufreq_table_validate_and_show(policy, ftab);
++
++	return 0;
+ }
+ 
+ static int __init s3c_cpufreq_initclks(void)
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index 2bf5fcb0062a..f80c719642b4 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -3574,35 +3574,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
+ 	case CHIP_KAVERI:
+ 		rdev->config.cik.max_shader_engines = 1;
+ 		rdev->config.cik.max_tile_pipes = 4;
+-		if ((rdev->pdev->device == 0x1304) ||
+-		    (rdev->pdev->device == 0x1305) ||
+-		    (rdev->pdev->device == 0x130C) ||
+-		    (rdev->pdev->device == 0x130F) ||
+-		    (rdev->pdev->device == 0x1310) ||
+-		    (rdev->pdev->device == 0x1311) ||
+-		    (rdev->pdev->device == 0x131C)) {
+-			rdev->config.cik.max_cu_per_sh = 8;
+-			rdev->config.cik.max_backends_per_se = 2;
+-		} else if ((rdev->pdev->device == 0x1309) ||
+-			   (rdev->pdev->device == 0x130A) ||
+-			   (rdev->pdev->device == 0x130D) ||
+-			   (rdev->pdev->device == 0x1313) ||
+-			   (rdev->pdev->device == 0x131D)) {
+-			rdev->config.cik.max_cu_per_sh = 6;
+-			rdev->config.cik.max_backends_per_se = 2;
+-		} else if ((rdev->pdev->device == 0x1306) ||
+-			   (rdev->pdev->device == 0x1307) ||
+-			   (rdev->pdev->device == 0x130B) ||
+-			   (rdev->pdev->device == 0x130E) ||
+-			   (rdev->pdev->device == 0x1315) ||
+-			   (rdev->pdev->device == 0x1318) ||
+-			   (rdev->pdev->device == 0x131B)) {
+-			rdev->config.cik.max_cu_per_sh = 4;
+-			rdev->config.cik.max_backends_per_se = 1;
+-		} else {
+-			rdev->config.cik.max_cu_per_sh = 3;
+-			rdev->config.cik.max_backends_per_se = 1;
+-		}
++		rdev->config.cik.max_cu_per_sh = 8;
++		rdev->config.cik.max_backends_per_se = 2;
+ 		rdev->config.cik.max_sh_per_se = 1;
+ 		rdev->config.cik.max_texture_channel_caches = 4;
+ 		rdev->config.cik.max_gprs = 256;
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index 5d8dfe027b30..75d51ec98e06 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -818,6 +818,8 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+ 	pr_info("Initializing pool allocator\n");
+ 
+ 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
++	if (!_manager)
++		return -ENOMEM;
+ 
+ 	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
+ 
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 81dd84d0b68b..9e7dd06031ae 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1050,6 +1050,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
+ 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ 		return -EFAULT;
+ 
++	if (cmd.qp_state > IB_QPS_ERR)
++		return -EINVAL;
++
+ 	ctx = ucma_get_ctx(file, cmd.id);
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+@@ -1187,6 +1190,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+ 
++	if (unlikely(cmd.optval > KMALLOC_MAX_SIZE))
++		return -EINVAL;
++
+ 	optval = memdup_user((void __user *) (unsigned long) cmd.optval,
+ 			     cmd.optlen);
+ 	if (IS_ERR(optval)) {
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index 2ee6b1051975..ca920c633f25 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -959,7 +959,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ 	if (ucmd.reserved0 || ucmd.reserved1)
+ 		return -EINVAL;
+ 
+-	umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size,
++	/* check multiplication overflow */
++	if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
++		return -EINVAL;
++
++	umem = ib_umem_get(context, ucmd.buf_addr,
++			   (size_t)ucmd.cqe_size * entries,
+ 			   IB_ACCESS_LOCAL_WRITE, 1);
+ 	if (IS_ERR(umem)) {
+ 		err = PTR_ERR(umem);
+diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
+index b370a59cb759..bfa9792b3184 100644
+--- a/drivers/input/keyboard/matrix_keypad.c
++++ b/drivers/input/keyboard/matrix_keypad.c
+@@ -216,8 +216,10 @@ static void matrix_keypad_stop(struct input_dev *dev)
+ {
+ 	struct matrix_keypad *keypad = input_get_drvdata(dev);
+ 
++	spin_lock_irq(&keypad->lock);
+ 	keypad->stopped = true;
+-	mb();
++	spin_unlock_irq(&keypad->lock);
++
+ 	flush_work(&keypad->work.work);
+ 	/*
+ 	 * matrix_keypad_scan() will leave IRQs enabled;
+diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
+index 5c4f7f8f2c20..05c3f25dd8c2 100644
+--- a/drivers/input/keyboard/tca8418_keypad.c
++++ b/drivers/input/keyboard/tca8418_keypad.c
+@@ -189,8 +189,6 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
+ 		input_event(input, EV_MSC, MSC_SCAN, code);
+ 		input_report_key(input, keymap[code], state);
+ 
+-		/* Read for next loop */
+-		error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ 	} while (1);
+ 
+ 	input_sync(input);
+diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
+index 9886dace5ad2..ba6db252e1cb 100644
+--- a/drivers/leds/led-core.c
++++ b/drivers/leds/led-core.c
+@@ -76,7 +76,7 @@ void led_blink_set(struct led_classdev *led_cdev,
+ 		   unsigned long *delay_on,
+ 		   unsigned long *delay_off)
+ {
+-	del_timer_sync(&led_cdev->blink_timer);
++	led_stop_software_blink(led_cdev);
+ 
+ 	led_cdev->flags &= ~LED_BLINK_ONESHOT;
+ 	led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
+index e8b1120f486d..eef3e64ca0a8 100644
+--- a/drivers/leds/led-triggers.c
++++ b/drivers/leds/led-triggers.c
+@@ -88,21 +88,23 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
+ 	down_read(&led_cdev->trigger_lock);
+ 
+ 	if (!led_cdev->trigger)
+-		len += sprintf(buf+len, "[none] ");
++		len += scnprintf(buf+len, PAGE_SIZE - len, "[none] ");
+ 	else
+-		len += sprintf(buf+len, "none ");
++		len += scnprintf(buf+len, PAGE_SIZE - len, "none ");
+ 
+ 	list_for_each_entry(trig, &trigger_list, next_trig) {
+ 		if (led_cdev->trigger && !strcmp(led_cdev->trigger->name,
+ 							trig->name))
+-			len += sprintf(buf+len, "[%s] ", trig->name);
++			len += scnprintf(buf+len, PAGE_SIZE - len, "[%s] ",
++					 trig->name);
+ 		else
+-			len += sprintf(buf+len, "%s ", trig->name);
++			len += scnprintf(buf+len, PAGE_SIZE - len, "%s ",
++					 trig->name);
+ 	}
+ 	up_read(&led_cdev->trigger_lock);
+ 	up_read(&triggers_list_lock);
+ 
+-	len += sprintf(len+buf, "\n");
++	len += scnprintf(len+buf, PAGE_SIZE - len, "\n");
+ 	return len;
+ }
+ EXPORT_SYMBOL_GPL(led_trigger_show);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 6f7bc8a8674b..b8013e386c76 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -966,6 +966,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ 	uint32_t rtime = cpu_to_le32(get_seconds());
+ 	struct uuid_entry *u;
+ 	char buf[BDEVNAME_SIZE];
++	struct cached_dev *exist_dc, *t;
+ 
+ 	bdevname(dc->bdev, buf);
+ 
+@@ -989,6 +990,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ 		return -EINVAL;
+ 	}
+ 
++	/* Check whether already attached */
++	list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
++		if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
++			pr_err("Tried to attach %s but duplicate UUID already attached",
++				buf);
++
++			return -EINVAL;
++		}
++	}
++
+ 	u = uuid_find(c, dc->sb.uuid);
+ 
+ 	if (u &&
+diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
+index 74adcd2c967e..01d7b5785b8e 100644
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -299,6 +299,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
+ 	else if (rw & REQ_WRITE_SAME)
+ 		special_cmd_max_sectors = q->limits.max_write_same_sectors;
+ 	if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
++		atomic_inc(&io->count);
+ 		dec_count(io, region, -EOPNOTSUPP);
+ 		return;
+ 	}
+diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+index 1b8f3500e6d2..d25f2563ffda 100644
+--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
++++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+@@ -1029,9 +1029,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ 		return ret;
+ 	}
+ 
+-	/* handle the block mark swapping */
+-	block_mark_swapping(this, payload_virt, auxiliary_virt);
+-
+ 	/* Loop over status bytes, accumulating ECC status. */
+ 	status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+ 
+@@ -1047,6 +1044,9 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ 		max_bitflips = max_t(unsigned int, max_bitflips, *status);
+ 	}
+ 
++	/* handle the block mark swapping */
++	block_mark_swapping(this, buf, auxiliary_virt);
++
+ 	if (oob_required) {
+ 		/*
+ 		 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
+index 812ecf2d253a..9965acb8aca0 100644
+--- a/drivers/mtd/ubi/vmt.c
++++ b/drivers/mtd/ubi/vmt.c
+@@ -310,6 +310,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
+ 			vol->last_eb_bytes = vol->usable_leb_size;
+ 	}
+ 
++	/* Make volume "available" before it becomes accessible via sysfs */
++	spin_lock(&ubi->volumes_lock);
++	ubi->volumes[vol_id] = vol;
++	ubi->vol_count += 1;
++	spin_unlock(&ubi->volumes_lock);
++
+ 	/* Register character device for the volume */
+ 	cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
+ 	vol->cdev.owner = THIS_MODULE;
+@@ -352,11 +358,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
+ 	if (err)
+ 		goto out_sysfs;
+ 
+-	spin_lock(&ubi->volumes_lock);
+-	ubi->volumes[vol_id] = vol;
+-	ubi->vol_count += 1;
+-	spin_unlock(&ubi->volumes_lock);
+-
+ 	ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED);
+ 	self_check_volumes(ubi);
+ 	return err;
+@@ -376,6 +377,10 @@ out_sysfs:
+ out_cdev:
+ 	cdev_del(&vol->cdev);
+ out_mapping:
++	spin_lock(&ubi->volumes_lock);
++	ubi->volumes[vol_id] = NULL;
++	ubi->vol_count -= 1;
++	spin_unlock(&ubi->volumes_lock);
+ 	if (do_free)
+ 		kfree(vol->eba_tbl);
+ out_acc:
+diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
+index abe1eabc0171..9cc5daed13ed 100644
+--- a/drivers/net/ethernet/arc/emac_main.c
++++ b/drivers/net/ethernet/arc/emac_main.c
+@@ -250,39 +250,48 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
+ 			continue;
+ 		}
+ 
+-		pktlen = info & LEN_MASK;
+-		stats->rx_packets++;
+-		stats->rx_bytes += pktlen;
+-		skb = rx_buff->skb;
+-		skb_put(skb, pktlen);
+-		skb->dev = ndev;
+-		skb->protocol = eth_type_trans(skb, ndev);
+-
+-		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+-				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+-
+-		/* Prepare the BD for next cycle */
+-		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
+-							 EMAC_BUFFER_SIZE);
+-		if (unlikely(!rx_buff->skb)) {
++		/* Prepare the BD for next cycle. netif_receive_skb()
++		 * only if new skb was allocated and mapped to avoid holes
++		 * in the RX fifo.
++		 */
++		skb = netdev_alloc_skb_ip_align(ndev, EMAC_BUFFER_SIZE);
++		if (unlikely(!skb)) {
++			if (net_ratelimit())
++				netdev_err(ndev, "cannot allocate skb\n");
++			/* Return ownership to EMAC */
++			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+ 			stats->rx_errors++;
+-			/* Because receive_skb is below, increment rx_dropped */
+ 			stats->rx_dropped++;
+ 			continue;
+ 		}
+ 
+-		/* receive_skb only if new skb was allocated to avoid holes */
+-		netif_receive_skb(skb);
+-
+-		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
++		addr = dma_map_single(&ndev->dev, (void *)skb->data,
+ 				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
+ 		if (dma_mapping_error(&ndev->dev, addr)) {
+ 			if (net_ratelimit())
+-				netdev_err(ndev, "cannot dma map\n");
+-			dev_kfree_skb(rx_buff->skb);
++				netdev_err(ndev, "cannot map dma buffer\n");
++			dev_kfree_skb(skb);
++			/* Return ownership to EMAC */
++			rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);
+ 			stats->rx_errors++;
++			stats->rx_dropped++;
+ 			continue;
+ 		}
++
++		/* unmap previosly mapped skb */
++		dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
++				 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
++
++		pktlen = info & LEN_MASK;
++		stats->rx_packets++;
++		stats->rx_bytes += pktlen;
++		skb_put(rx_buff->skb, pktlen);
++		rx_buff->skb->dev = ndev;
++		rx_buff->skb->protocol = eth_type_trans(rx_buff->skb, ndev);
++
++		netif_receive_skb(rx_buff->skb);
++
++		rx_buff->skb = skb;
+ 		dma_unmap_addr_set(rx_buff, addr, addr);
+ 		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);
+ 
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 5d9843bc73a1..0eb43586c034 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -2996,7 +2996,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
+ 
+ 	del_timer_sync(&bp->timer);
+ 
+-	if (IS_PF(bp)) {
++	if (IS_PF(bp) && !BP_NOMCP(bp)) {
+ 		/* Set ALWAYS_ALIVE bit in shmem */
+ 		bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+ 		bnx2x_drv_pulse(bp);
+@@ -3078,7 +3078,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
+ 	bp->cnic_loaded = false;
+ 
+ 	/* Clear driver version indication in shmem */
+-	if (IS_PF(bp))
++	if (IS_PF(bp) && !BP_NOMCP(bp))
+ 		bnx2x_update_mng_version(bp);
+ 
+ 	/* Check if there are pending parity attentions. If there are - set
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index a1d149515531..a33580119b7c 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -9488,6 +9488,15 @@ static int bnx2x_init_shmem(struct bnx2x *bp)
+ 
+ 	do {
+ 		bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
++
++		/* If we read all 0xFFs, means we are in PCI error state and
++		 * should bail out to avoid crashes on adapter's FW reads.
++		 */
++		if (bp->common.shmem_base == 0xFFFFFFFF) {
++			bp->flags |= NO_MCP_FLAG;
++			return -ENODEV;
++		}
++
+ 		if (bp->common.shmem_base) {
+ 			val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+ 			if (val & SHR_MEM_VALIDITY_MB)
+@@ -13791,7 +13800,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
+ 		BNX2X_ERR("IO slot reset --> driver unload\n");
+ 
+ 		/* MCP should have been reset; Need to wait for validity */
+-		bnx2x_init_shmem(bp);
++		if (bnx2x_init_shmem(bp)) {
++			rtnl_unlock();
++			return PCI_ERS_RESULT_DISCONNECT;
++		}
+ 
+ 		if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ 			u32 v;
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 7b150085e34d..3640d6abc1e6 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -10051,6 +10051,16 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
+ 
+ 	tw32(GRC_MODE, tp->grc_mode | val);
+ 
++	/* On one of the AMD platform, MRRS is restricted to 4000 because of
++	 * south bridge limitation. As a workaround, Driver is setting MRRS
++	 * to 2048 instead of default 4096.
++	 */
++	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
++	    tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
++		val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
++		tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
++	}
++
+ 	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
+ 	val = tr32(GRC_MISC_CFG);
+ 	val &= ~0xff;
+@@ -14230,7 +14240,8 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+ 	 */
+ 	if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
+ 	    tg3_asic_rev(tp) == ASIC_REV_5717 ||
+-	    tg3_asic_rev(tp) == ASIC_REV_5719)
++	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
++	    tg3_asic_rev(tp) == ASIC_REV_5720)
+ 		reset_phy = true;
+ 
+ 	err = tg3_restart_hw(tp, reset_phy);
+diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
+index 31c9f8295953..19532961e173 100644
+--- a/drivers/net/ethernet/broadcom/tg3.h
++++ b/drivers/net/ethernet/broadcom/tg3.h
+@@ -95,6 +95,7 @@
+ #define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR		0x0106
+ #define TG3PCI_SUBDEVICE_ID_DELL_MERLOT		0x0109
+ #define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT	0x010a
++#define TG3PCI_SUBDEVICE_ID_DELL_5762		0x07f0
+ #define TG3PCI_SUBVENDOR_ID_COMPAQ		PCI_VENDOR_ID_COMPAQ
+ #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE	0x007c
+ #define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2	0x009a
+@@ -280,6 +281,9 @@
+ #define TG3PCI_STD_RING_PROD_IDX	0x00000098 /* 64-bit */
+ #define TG3PCI_RCV_RET_RING_CON_IDX	0x000000a0 /* 64-bit */
+ /* 0xa8 --> 0xb8 unused */
++#define TG3PCI_DEV_STATUS_CTRL		0x000000b4
++#define  MAX_READ_REQ_SIZE_2048		 0x00004000
++#define  MAX_READ_REQ_MASK		 0x00007000
+ #define TG3PCI_DUAL_MAC_CTRL		0x000000b8
+ #define  DUAL_MAC_CTRL_CH_MASK		 0x00000003
+ #define  DUAL_MAC_CTRL_ID		 0x00000004
+diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
+index 8e3cd77aa347..9e5d64f559a4 100644
+--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
++++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
+@@ -314,11 +314,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
+ 	now = tmr_cnt_read(etsects);
+ 	now += delta;
+ 	tmr_cnt_write(etsects, now);
++	set_fipers(etsects);
+ 
+ 	spin_unlock_irqrestore(&etsects->lock, flags);
+ 
+-	set_fipers(etsects);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
+index 69707108d23c..4cd6dac110f0 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000.h
++++ b/drivers/net/ethernet/intel/e1000/e1000.h
+@@ -328,7 +328,8 @@ struct e1000_adapter {
+ enum e1000_state_t {
+ 	__E1000_TESTING,
+ 	__E1000_RESETTING,
+-	__E1000_DOWN
++	__E1000_DOWN,
++	__E1000_DISABLED
+ };
+ 
+ #undef pr_fmt
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 983eb4e6f7aa..4d80c92fa96d 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -940,7 +940,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
+ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ {
+ 	struct net_device *netdev;
+-	struct e1000_adapter *adapter;
++	struct e1000_adapter *adapter = NULL;
+ 	struct e1000_hw *hw;
+ 
+ 	static int cards_found = 0;
+@@ -950,6 +950,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	u16 tmp = 0;
+ 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ 	int bars, need_ioport;
++	bool disable_dev = false;
+ 
+ 	/* do not allocate ioport bars when not needed */
+ 	need_ioport = e1000_is_need_ioport(pdev);
+@@ -1250,11 +1251,13 @@ err_mdio_ioremap:
+ 	iounmap(hw->ce4100_gbe_mdio_base_virt);
+ 	iounmap(hw->hw_addr);
+ err_ioremap:
++	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
+ 	free_netdev(netdev);
+ err_alloc_etherdev:
+ 	pci_release_selected_regions(pdev, bars);
+ err_pci_reg:
+-	pci_disable_device(pdev);
++	if (!adapter || disable_dev)
++		pci_disable_device(pdev);
+ 	return err;
+ }
+ 
+@@ -1272,6 +1275,7 @@ static void e1000_remove(struct pci_dev *pdev)
+ 	struct net_device *netdev = pci_get_drvdata(pdev);
+ 	struct e1000_adapter *adapter = netdev_priv(netdev);
+ 	struct e1000_hw *hw = &adapter->hw;
++	bool disable_dev;
+ 
+ 	e1000_down_and_stop(adapter);
+ 	e1000_release_manageability(adapter);
+@@ -1290,9 +1294,11 @@ static void e1000_remove(struct pci_dev *pdev)
+ 		iounmap(hw->flash_address);
+ 	pci_release_selected_regions(pdev, adapter->bars);
+ 
++	disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
+ 	free_netdev(netdev);
+ 
+-	pci_disable_device(pdev);
++	if (disable_dev)
++		pci_disable_device(pdev);
+ }
+ 
+ /**
+@@ -5137,7 +5143,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
+ 	if (netif_running(netdev))
+ 		e1000_free_irq(adapter);
+ 
+-	pci_disable_device(pdev);
++	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
++		pci_disable_device(pdev);
+ 
+ 	return 0;
+ }
+@@ -5181,6 +5188,10 @@ static int e1000_resume(struct pci_dev *pdev)
+ 		pr_err("Cannot enable PCI device from suspend\n");
+ 		return err;
+ 	}
++
++	/* flush memory to make sure state is correct */
++	smp_mb__before_atomic();
++	clear_bit(__E1000_DISABLED, &adapter->flags);
+ 	pci_set_master(pdev);
+ 
+ 	pci_enable_wake(pdev, PCI_D3hot, 0);
+@@ -5255,7 +5266,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+ 
+ 	if (netif_running(netdev))
+ 		e1000_down(adapter);
+-	pci_disable_device(pdev);
++
++	if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
++		pci_disable_device(pdev);
+ 
+ 	/* Request a slot slot reset. */
+ 	return PCI_ERS_RESULT_NEED_RESET;
+@@ -5283,6 +5296,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+ 		pr_err("Cannot re-enable PCI device after reset.\n");
+ 		return PCI_ERS_RESULT_DISCONNECT;
+ 	}
++
++	/* flush memory to make sure state is correct */
++	smp_mb__before_atomic();
++	clear_bit(__E1000_DISABLED, &adapter->flags);
+ 	pci_set_master(pdev);
+ 
+ 	pci_enable_wake(pdev, PCI_D3hot, 0);
+diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
+index 15bc7f9ea224..afd76e07088b 100644
+--- a/drivers/net/phy/mdio-sun4i.c
++++ b/drivers/net/phy/mdio-sun4i.c
+@@ -128,8 +128,10 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
+ 
+ 	data->regulator = devm_regulator_get(&pdev->dev, "phy");
+ 	if (IS_ERR(data->regulator)) {
+-		if (PTR_ERR(data->regulator) == -EPROBE_DEFER)
+-			return -EPROBE_DEFER;
++		if (PTR_ERR(data->regulator) == -EPROBE_DEFER) {
++			ret = -EPROBE_DEFER;
++			goto err_out_free_mdiobus;
++		}
+ 
+ 		dev_info(&pdev->dev, "no regulator found\n");
+ 	} else {
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index c30c1fc7889a..ebd3fcf6dc62 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -2912,6 +2912,15 @@ ppp_connect_channel(struct channel *pch, int unit)
+ 		goto outl;
+ 
+ 	ppp_lock(ppp);
++	spin_lock_bh(&pch->downl);
++	if (!pch->chan) {
++		/* Don't connect unregistered channels */
++		spin_unlock_bh(&pch->downl);
++		ppp_unlock(ppp);
++		ret = -ENOTCONN;
++		goto outl;
++	}
++	spin_unlock_bh(&pch->downl);
+ 	if (pch->file.hdrlen > ppp->file.hdrlen)
+ 		ppp->file.hdrlen = pch->file.hdrlen;
+ 	hdrlen = pch->file.hdrlen + 2;	/* for protocol bytes */
+diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
+index 0d7645581f91..4842344a96f1 100644
+--- a/drivers/net/wan/hdlc_ppp.c
++++ b/drivers/net/wan/hdlc_ppp.c
+@@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg)
+ 			ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
+ 				     0, NULL);
+ 			proto->restart_counter--;
+-		} else
++		} else if (netif_carrier_ok(proto->dev))
++			ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
++				     0, NULL);
++		else
+ 			ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
+ 				     0, NULL);
+ 		break;
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 9e8461466534..fd9f6ce14e8e 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1284,6 +1284,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
+ 
+ 	netif_carrier_off(netdev);
+ 
++	xenbus_switch_state(dev, XenbusStateInitialising);
+ 	return netdev;
+ 
+  exit:
+diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
+index d26134713682..d05c553eb552 100644
+--- a/drivers/s390/block/dasd_3990_erp.c
++++ b/drivers/s390/block/dasd_3990_erp.c
+@@ -2743,6 +2743,16 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
+ 		erp = dasd_3990_erp_handle_match_erp(cqr, erp);
+ 	}
+ 
++
++	/*
++	 * For path verification work we need to stick with the path that was
++	 * originally chosen so that the per path configuration data is
++	 * assigned correctly.
++	 */
++	if (test_bit(DASD_CQR_VERIFY_PATH, &erp->flags) && cqr->lpm) {
++		erp->lpm = cqr->lpm;
++	}
++
+ 	if (device->features & DASD_FEATURE_ERPLOG) {
+ 		/* print current erp_chain */
+ 		dev_err(&device->cdev->dev,
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index 77336d85a717..02780b8c1c15 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -588,6 +588,11 @@ struct qeth_cmd_buffer {
+ 	void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
+ };
+ 
++static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
++{
++	return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
++}
++
+ /**
+  * definition of a qeth channel, used for read and write
+  */
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index a81215d87ce1..9e9964ca696b 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -2061,7 +2061,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ 	unsigned long flags;
+ 	struct qeth_reply *reply = NULL;
+ 	unsigned long timeout, event_timeout;
+-	struct qeth_ipa_cmd *cmd;
++	struct qeth_ipa_cmd *cmd = NULL;
+ 
+ 	QETH_CARD_TEXT(card, 2, "sendctl");
+ 
+@@ -2075,23 +2075,27 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ 	}
+ 	reply->callback = reply_cb;
+ 	reply->param = reply_param;
+-	if (card->state == CARD_STATE_DOWN)
+-		reply->seqno = QETH_IDX_COMMAND_SEQNO;
+-	else
+-		reply->seqno = card->seqno.ipa++;
++
+ 	init_waitqueue_head(&reply->wait_q);
+-	spin_lock_irqsave(&card->lock, flags);
+-	list_add_tail(&reply->list, &card->cmd_waiter_list);
+-	spin_unlock_irqrestore(&card->lock, flags);
+ 	QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
+ 
+ 	while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
+-	qeth_prepare_control_data(card, len, iob);
+ 
+-	if (IS_IPA(iob->data))
++	if (IS_IPA(iob->data)) {
++		cmd = __ipa_cmd(iob);
++		cmd->hdr.seqno = card->seqno.ipa++;
++		reply->seqno = cmd->hdr.seqno;
+ 		event_timeout = QETH_IPA_TIMEOUT;
+-	else
++	} else {
++		reply->seqno = QETH_IDX_COMMAND_SEQNO;
+ 		event_timeout = QETH_TIMEOUT;
++	}
++	qeth_prepare_control_data(card, len, iob);
++
++	spin_lock_irqsave(&card->lock, flags);
++	list_add_tail(&reply->list, &card->cmd_waiter_list);
++	spin_unlock_irqrestore(&card->lock, flags);
++
+ 	timeout = jiffies + event_timeout;
+ 
+ 	QETH_CARD_TEXT(card, 6, "noirqpnd");
+@@ -2116,9 +2120,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
+ 
+ 	/* we have only one long running ipassist, since we can ensure
+ 	   process context of this command we can sleep */
+-	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+-	if ((cmd->hdr.command == IPA_CMD_SETIP) &&
+-	    (cmd->hdr.prot_version == QETH_PROT_IPV4)) {
++	if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
++	    cmd->hdr.prot_version == QETH_PROT_IPV4) {
+ 		if (!wait_event_timeout(reply->wait_q,
+ 		    atomic_read(&reply->received), event_timeout))
+ 			goto time_err;
+@@ -2925,7 +2928,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
+ 	memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
+ 	cmd->hdr.command = command;
+ 	cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
+-	cmd->hdr.seqno = card->seqno.ipa;
++	/* cmd->hdr.seqno is set by qeth_send_control_data() */
+ 	cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
+ 	cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
+ 	if (card->options.layer2)
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 60f9651f2643..ad7170bffc05 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -365,6 +365,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res)
+ 	srb_t *sp = (srb_t *)ptr;
+ 	struct srb_iocb *abt = &sp->u.iocb_cmd;
+ 
++	del_timer(&sp->u.iocb_cmd.timer);
+ 	complete(&abt->u.abt.comp);
+ }
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 4de1394ebf22..1be1c2dca118 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -5513,7 +5513,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+ 	fc_port_t *fcport;
+ 	int rc;
+ 
+-	fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
++	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
+ 	if (!fcport) {
+ 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
+ 		    "qla_target(%d): Allocation of tmp FC port failed",
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 98b56a7069d3..af4dc8501269 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1067,10 +1067,11 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ 		case TEST_UNIT_READY:
+ 			break;
+ 		default:
+-			set_host_byte(scmnd, DID_TARGET_FAILURE);
++			set_host_byte(scmnd, DID_ERROR);
+ 		}
+ 		break;
+ 	case SRB_STATUS_INVALID_LUN:
++		set_host_byte(scmnd, DID_NO_CONNECT);
+ 		do_work = true;
+ 		process_err_fn = storvsc_remove_lun;
+ 		break;
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index a2f40b1b2225..3fef713f693b 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -1423,12 +1423,12 @@ static int atmel_spi_remove(struct platform_device *pdev)
+ 	pm_runtime_get_sync(&pdev->dev);
+ 
+ 	/* reset the hardware and block queue progress */
+-	spin_lock_irq(&as->lock);
+ 	if (as->use_dma) {
+ 		atmel_spi_stop_dma(as);
+ 		atmel_spi_release_dma(as);
+ 	}
+ 
++	spin_lock_irq(&as->lock);
+ 	spi_writel(as, CR, SPI_BIT(SWRST));
+ 	spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
+ 	spi_readl(as, SR);
+diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
+index c40bd7fbc210..68a5a950bf34 100644
+--- a/drivers/staging/android/ashmem.c
++++ b/drivers/staging/android/ashmem.c
+@@ -330,24 +330,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
+ 	mutex_lock(&ashmem_mutex);
+ 
+ 	if (asma->size == 0) {
+-		ret = -EINVAL;
+-		goto out;
++		mutex_unlock(&ashmem_mutex);
++		return -EINVAL;
+ 	}
+ 
+ 	if (!asma->file) {
+-		ret = -EBADF;
+-		goto out;
++		mutex_unlock(&ashmem_mutex);
++		return -EBADF;
+ 	}
+ 
++	mutex_unlock(&ashmem_mutex);
++
+ 	ret = vfs_llseek(asma->file, offset, origin);
+ 	if (ret < 0)
+-		goto out;
++		return ret;
+ 
+ 	/** Copy f_pos from backing file, since f_ops->llseek() sets it */
+ 	file->f_pos = asma->file->f_pos;
+-
+-out:
+-	mutex_unlock(&ashmem_mutex);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 0a1e9f4d9882..e2956ccdd82c 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -5396,6 +5396,17 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 	{	PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
+ 		PCI_ANY_ID, PCI_ANY_ID, 0, 0,    /* 135a.0dc0 */
+ 		pbn_b2_4_115200 },
++	/*
++	 * BrainBoxes UC-260
++	 */
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0D21,
++		PCI_ANY_ID, PCI_ANY_ID,
++		PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
++		pbn_b2_4_115200 },
++	{	PCI_VENDOR_ID_INTASHIELD, 0x0E34,
++		PCI_ANY_ID, PCI_ANY_ID,
++		 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
++		pbn_b2_4_115200 },
+ 	/*
+ 	 * Perle PCI-RAS cards
+ 	 */
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 1f45a0302b7c..839ba41b6232 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -1687,6 +1687,7 @@ static void atmel_get_ip_name(struct uart_port *port)
+ 		switch (version) {
+ 		case 0x302:
+ 		case 0x10213:
++		case 0x10302:
+ 			dev_dbg(port->dev, "This version is usart\n");
+ 			atmel_port->is_usart = true;
+ 			break;
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index 19316609d4f9..c8c564c71c75 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -736,6 +736,8 @@ static void sci_receive_chars(struct uart_port *port)
+ 		/* Tell the rest of the system the news. New characters! */
+ 		tty_flip_buffer_push(tport);
+ 	} else {
++		/* TTY buffers full; read from RX reg to prevent lockup */
++		serial_port_in(port, SCxRDR);
+ 		serial_port_in(port, SCxSR); /* dummy read */
+ 		serial_port_out(port, SCxSR, SCxSR_RDxF_CLEAR(port));
+ 	}
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index f368d2053da5..251b44300b38 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -147,6 +147,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
+ 
+ 	ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
+ 
++	/* Linger a bit, prior to the next control message. */
++	if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
++		msleep(200);
++
+ 	kfree(dr);
+ 
+ 	return ret;
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 774c97bb1c08..4f1c6f8d4352 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -229,7 +229,8 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
+ 	/* Corsair Strafe RGB */
+-	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
++	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
++	  USB_QUIRK_DELAY_CTRL_MSG },
+ 
+ 	/* Corsair K70 LUX */
+ 	{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 6b62bb5c021c..3f4c6d97a4ff 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1333,7 +1333,6 @@ ffs_fs_kill_sb(struct super_block *sb)
+ 	if (sb->s_fs_info) {
+ 		ffs_release_dev(sb->s_fs_info);
+ 		ffs_data_closed(sb->s_fs_info);
+-		ffs_data_put(sb->s_fs_info);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
+index ad408251d955..108dcc5f5350 100644
+--- a/drivers/usb/mon/mon_text.c
++++ b/drivers/usb/mon/mon_text.c
+@@ -82,6 +82,8 @@ struct mon_reader_text {
+ 
+ 	wait_queue_head_t wait;
+ 	int printf_size;
++	size_t printf_offset;
++	size_t printf_togo;
+ 	char *printf_buf;
+ 	struct mutex printf_lock;
+ 
+@@ -373,73 +375,103 @@ err_alloc:
+ 	return rc;
+ }
+ 
+-/*
+- * For simplicity, we read one record in one system call and throw out
+- * what does not fit. This means that the following does not work:
+- *   dd if=/dbg/usbmon/0t bs=10
+- * Also, we do not allow seeks and do not bother advancing the offset.
+- */
++static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
++    char __user * const buf, const size_t nbytes)
++{
++	const size_t togo = min(nbytes, rp->printf_togo);
++
++	if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
++		return -EFAULT;
++	rp->printf_togo -= togo;
++	rp->printf_offset += togo;
++	return togo;
++}
++
++/* ppos is not advanced since the llseek operation is not permitted. */
+ static ssize_t mon_text_read_t(struct file *file, char __user *buf,
+-				size_t nbytes, loff_t *ppos)
++    size_t nbytes, loff_t *ppos)
+ {
+ 	struct mon_reader_text *rp = file->private_data;
+ 	struct mon_event_text *ep;
+ 	struct mon_text_ptr ptr;
++	ssize_t ret;
+ 
+-	if (IS_ERR(ep = mon_text_read_wait(rp, file)))
+-		return PTR_ERR(ep);
+ 	mutex_lock(&rp->printf_lock);
+-	ptr.cnt = 0;
+-	ptr.pbuf = rp->printf_buf;
+-	ptr.limit = rp->printf_size;
+-
+-	mon_text_read_head_t(rp, &ptr, ep);
+-	mon_text_read_statset(rp, &ptr, ep);
+-	ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
+-	    " %d", ep->length);
+-	mon_text_read_data(rp, &ptr, ep);
+-
+-	if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
+-		ptr.cnt = -EFAULT;
++
++	if (rp->printf_togo == 0) {
++
++		ep = mon_text_read_wait(rp, file);
++		if (IS_ERR(ep)) {
++			mutex_unlock(&rp->printf_lock);
++			return PTR_ERR(ep);
++		}
++		ptr.cnt = 0;
++		ptr.pbuf = rp->printf_buf;
++		ptr.limit = rp->printf_size;
++
++		mon_text_read_head_t(rp, &ptr, ep);
++		mon_text_read_statset(rp, &ptr, ep);
++		ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
++		    " %d", ep->length);
++		mon_text_read_data(rp, &ptr, ep);
++
++		rp->printf_togo = ptr.cnt;
++		rp->printf_offset = 0;
++
++		kmem_cache_free(rp->e_slab, ep);
++	}
++
++	ret = mon_text_copy_to_user(rp, buf, nbytes);
+ 	mutex_unlock(&rp->printf_lock);
+-	kmem_cache_free(rp->e_slab, ep);
+-	return ptr.cnt;
++	return ret;
+ }
+ 
++/* ppos is not advanced since the llseek operation is not permitted. */
+ static ssize_t mon_text_read_u(struct file *file, char __user *buf,
+-				size_t nbytes, loff_t *ppos)
++    size_t nbytes, loff_t *ppos)
+ {
+ 	struct mon_reader_text *rp = file->private_data;
+ 	struct mon_event_text *ep;
+ 	struct mon_text_ptr ptr;
++	ssize_t ret;
+ 
+-	if (IS_ERR(ep = mon_text_read_wait(rp, file)))
+-		return PTR_ERR(ep);
+ 	mutex_lock(&rp->printf_lock);
+-	ptr.cnt = 0;
+-	ptr.pbuf = rp->printf_buf;
+-	ptr.limit = rp->printf_size;
+ 
+-	mon_text_read_head_u(rp, &ptr, ep);
+-	if (ep->type == 'E') {
+-		mon_text_read_statset(rp, &ptr, ep);
+-	} else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
+-		mon_text_read_isostat(rp, &ptr, ep);
+-		mon_text_read_isodesc(rp, &ptr, ep);
+-	} else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
+-		mon_text_read_intstat(rp, &ptr, ep);
+-	} else {
+-		mon_text_read_statset(rp, &ptr, ep);
++	if (rp->printf_togo == 0) {
++
++		ep = mon_text_read_wait(rp, file);
++		if (IS_ERR(ep)) {
++			mutex_unlock(&rp->printf_lock);
++			return PTR_ERR(ep);
++		}
++		ptr.cnt = 0;
++		ptr.pbuf = rp->printf_buf;
++		ptr.limit = rp->printf_size;
++
++		mon_text_read_head_u(rp, &ptr, ep);
++		if (ep->type == 'E') {
++			mon_text_read_statset(rp, &ptr, ep);
++		} else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
++			mon_text_read_isostat(rp, &ptr, ep);
++			mon_text_read_isodesc(rp, &ptr, ep);
++		} else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
++			mon_text_read_intstat(rp, &ptr, ep);
++		} else {
++			mon_text_read_statset(rp, &ptr, ep);
++		}
++		ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
++		    " %d", ep->length);
++		mon_text_read_data(rp, &ptr, ep);
++
++		rp->printf_togo = ptr.cnt;
++		rp->printf_offset = 0;
++
++		kmem_cache_free(rp->e_slab, ep);
+ 	}
+-	ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
+-	    " %d", ep->length);
+-	mon_text_read_data(rp, &ptr, ep);
+ 
+-	if (copy_to_user(buf, rp->printf_buf, ptr.cnt))
+-		ptr.cnt = -EFAULT;
++	ret = mon_text_copy_to_user(rp, buf, nbytes);
+ 	mutex_unlock(&rp->printf_lock);
+-	kmem_cache_free(rp->e_slab, ep);
+-	return ptr.cnt;
++	return ret;
+ }
+ 
+ static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 021d6880a3ed..b3c7670f0652 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -1052,7 +1052,7 @@ static int uas_post_reset(struct usb_interface *intf)
+ 		return 0;
+ 
+ 	err = uas_configure_endpoints(devinfo);
+-	if (err && err != ENODEV)
++	if (err && err != -ENODEV)
+ 		shost_printk(KERN_ERR, shost,
+ 			     "%s: alloc streams error %d after reset",
+ 			     __func__, err);
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index cd2e880979f2..b9e9eaa8c45c 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2131,6 +2131,13 @@ UNUSUAL_DEV(  0x22b8, 0x3010, 0x0001, 0x0001,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
+ 
++/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */
++UNUSUAL_DEV(  0x152d, 0x2567, 0x0117, 0x0117,
++		"JMicron",
++		"USB to ATA/ATAPI Bridge",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_BROKEN_FUA ),
++
+ /* Reported-by George Cherian <george.cherian@cavium.com> */
+ UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
+ 		"JMicron",
+diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
+index ada3e44f9932..792d5fbad4e5 100644
+--- a/drivers/watchdog/hpwdt.c
++++ b/drivers/watchdog/hpwdt.c
+@@ -51,6 +51,7 @@ static char expect_release;
+ static unsigned long hpwdt_is_open;
+ 
+ static void __iomem *pci_mem_addr;		/* the PCI-memory address */
++static unsigned long __iomem *hpwdt_nmistat;
+ static unsigned long __iomem *hpwdt_timer_reg;
+ static unsigned long __iomem *hpwdt_timer_con;
+ 
+@@ -474,6 +475,11 @@ static int hpwdt_time_left(void)
+ }
+ 
+ #ifdef CONFIG_HPWDT_NMI_DECODING
++static int hpwdt_my_nmi(void)
++{
++	return ioread8(hpwdt_nmistat) & 0x6;
++}
++
+ /*
+  *	NMI Handler
+  */
+@@ -485,6 +491,9 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs)
+ 	if (!hpwdt_nmi_decoding)
+ 		goto out;
+ 
++	if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi())
++		return NMI_DONE;
++
+ 	spin_lock_irqsave(&rom_lock, rom_pl);
+ 	if (!die_nmi_called && !is_icru && !is_uefi)
+ 		asminline_call(&cmn_regs, cru_rom_addr);
+@@ -686,7 +695,7 @@ static void dmi_find_icru(const struct dmi_header *dm, void *dummy)
+ 		smbios_proliant_ptr = (struct smbios_proliant_info *) dm;
+ 		if (smbios_proliant_ptr->misc_features & 0x01)
+ 			is_icru = 1;
+-		if (smbios_proliant_ptr->misc_features & 0x408)
++		if (smbios_proliant_ptr->misc_features & 0x1400)
+ 			is_uefi = 1;
+ 	}
+ }
+@@ -826,6 +835,7 @@ static int hpwdt_init_one(struct pci_dev *dev,
+ 		retval = -ENOMEM;
+ 		goto error_pci_iomap;
+ 	}
++	hpwdt_nmistat	= pci_mem_addr + 0x6e;
+ 	hpwdt_timer_reg = pci_mem_addr + 0x70;
+ 	hpwdt_timer_con = pci_mem_addr + 0x72;
+ 
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index cea37ee4c615..078982f509e3 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -378,10 +378,8 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
+ 		}
+ 		range = 0;
+ 		while (range < pages) {
+-			if (map->unmap_ops[offset+range].handle == -1) {
+-				range--;
++			if (map->unmap_ops[offset+range].handle == -1)
+ 				break;
+-			}
+ 			range++;
+ 		}
+ 		err = __unmap_grant_pages(map, offset, range);
+@@ -876,8 +874,10 @@ unlock_out:
+ out_unlock_put:
+ 	mutex_unlock(&priv->lock);
+ out_put_map:
+-	if (use_ptemod)
++	if (use_ptemod) {
+ 		map->vma = NULL;
++		unmap_grant_pages(map, 0, map->count);
++	}
+ 	gntdev_put_map(priv, map);
+ 	return err;
+ }
+diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
+index fb3e64d37cb4..6b16b8653d98 100644
+--- a/fs/btrfs/acl.c
++++ b/fs/btrfs/acl.c
+@@ -82,12 +82,6 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
+ 	switch (type) {
+ 	case ACL_TYPE_ACCESS:
+ 		name = POSIX_ACL_XATTR_ACCESS;
+-		if (acl) {
+-			ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+-			if (ret)
+-				return ret;
+-		}
+-		ret = 0;
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+ 		if (!S_ISDIR(inode->i_mode))
+@@ -123,7 +117,18 @@ out:
+ 
+ int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ {
+-	return __btrfs_set_acl(NULL, inode, acl, type);
++	int ret;
++	umode_t old_mode = inode->i_mode;
++
++	if (type == ACL_TYPE_ACCESS && acl) {
++		ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++		if (ret)
++			return ret;
++	}
++	ret = __btrfs_set_acl(NULL, inode, acl, type);
++	if (ret)
++		inode->i_mode = old_mode;
++	return ret;
+ }
+ 
+ /*
+diff --git a/fs/super.c b/fs/super.c
+index 928c20f47af9..3fa6b945a34e 100644
+--- a/fs/super.c
++++ b/fs/super.c
+@@ -481,7 +481,11 @@ retry:
+ 	hlist_add_head(&s->s_instances, &type->fs_supers);
+ 	spin_unlock(&sb_lock);
+ 	get_filesystem(type);
+-	register_shrinker(&s->s_shrink);
++	err = register_shrinker(&s->s_shrink);
++	if (err) {
++		deactivate_locked_super(s);
++		s = ERR_PTR(err);
++	}
+ 	return s;
+ }
+ 
+diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
+index 7d81032a645f..7806b3e0bf18 100644
+--- a/fs/xfs/xfs_qm.c
++++ b/fs/xfs/xfs_qm.c
+@@ -47,7 +47,7 @@
+ STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
+ STATIC int	xfs_qm_init_quotainfo(xfs_mount_t *);
+ 
+-
++STATIC void	xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
+ STATIC void	xfs_qm_dqfree_one(struct xfs_dquot *dqp);
+ /*
+  * We use the batch lookup interface to iterate over the dquots as it
+@@ -660,9 +660,17 @@ xfs_qm_init_quotainfo(
+ 	qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
+ 	qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
+ 	qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
+-	register_shrinker(&qinf->qi_shrinker);
++
++	error = register_shrinker(&qinf->qi_shrinker);
++	if (error)
++		goto out_free_inos;
++
+ 	return 0;
+ 
++out_free_inos:
++	mutex_destroy(&qinf->qi_quotaofflock);
++	mutex_destroy(&qinf->qi_tree_lock);
++	xfs_qm_destroy_quotainos(qinf);
+ out_free_lru:
+ 	list_lru_destroy(&qinf->qi_lru);
+ out_free_qinf:
+@@ -671,7 +679,6 @@ out_free_qinf:
+ 	return error;
+ }
+ 
+-
+ /*
+  * Gets called when unmounting a filesystem or when all quotas get
+  * turned off.
+@@ -688,19 +695,8 @@ xfs_qm_destroy_quotainfo(
+ 
+ 	unregister_shrinker(&qi->qi_shrinker);
+ 	list_lru_destroy(&qi->qi_lru);
+-
+-	if (qi->qi_uquotaip) {
+-		IRELE(qi->qi_uquotaip);
+-		qi->qi_uquotaip = NULL; /* paranoia */
+-	}
+-	if (qi->qi_gquotaip) {
+-		IRELE(qi->qi_gquotaip);
+-		qi->qi_gquotaip = NULL;
+-	}
+-	if (qi->qi_pquotaip) {
+-		IRELE(qi->qi_pquotaip);
+-		qi->qi_pquotaip = NULL;
+-	}
++	xfs_qm_destroy_quotainos(qi);
++	mutex_destroy(&qi->qi_tree_lock);
+ 	mutex_destroy(&qi->qi_quotaofflock);
+ 	kmem_free(qi);
+ 	mp->m_quotainfo = NULL;
+@@ -1562,6 +1558,24 @@ error_rele:
+ 	return error;
+ }
+ 
++STATIC void
++xfs_qm_destroy_quotainos(
++	xfs_quotainfo_t	*qi)
++{
++	if (qi->qi_uquotaip) {
++		IRELE(qi->qi_uquotaip);
++		qi->qi_uquotaip = NULL; /* paranoia */
++	}
++	if (qi->qi_gquotaip) {
++		IRELE(qi->qi_gquotaip);
++		qi->qi_gquotaip = NULL;
++	}
++	if (qi->qi_pquotaip) {
++		IRELE(qi->qi_pquotaip);
++		qi->qi_pquotaip = NULL;
++	}
++}
++
+ STATIC void
+ xfs_qm_dqfree_one(
+ 	struct xfs_dquot	*dqp)
+diff --git a/include/linux/nospec.h b/include/linux/nospec.h
+index b99bced39ac2..115381228203 100644
+--- a/include/linux/nospec.h
++++ b/include/linux/nospec.h
+@@ -5,6 +5,7 @@
+ 
+ #ifndef _LINUX_NOSPEC_H
+ #define _LINUX_NOSPEC_H
++#include <asm/barrier.h>
+ 
+ /**
+  * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
+@@ -66,7 +67,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ 	BUILD_BUG_ON(sizeof(_i) > sizeof(long));			\
+ 	BUILD_BUG_ON(sizeof(_s) > sizeof(long));			\
+ 									\
+-	_i &= _mask;							\
+-	_i;								\
++	(typeof(_i)) (_i & _mask);					\
+ })
+ #endif /* _LINUX_NOSPEC_H */
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index de2a722fe3cf..ea4f81c2a6d5 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -56,4 +56,7 @@
+  */
+ #define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL	BIT(11)
+ 
++/* Device needs a pause after every control message. */
++#define USB_QUIRK_DELAY_CTRL_MSG		BIT(13)
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
+index deee212af8e0..e450bdbc4d84 100644
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -449,6 +449,7 @@ extern bool cancel_delayed_work_sync(struct delayed_work *dwork);
+ 
+ extern void workqueue_set_max_active(struct workqueue_struct *wq,
+ 				     int max_active);
++extern struct work_struct *current_work(void);
+ extern bool current_is_workqueue_rescuer(void);
+ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
+ extern unsigned int work_busy(struct work_struct *work);
+diff --git a/include/net/udplite.h b/include/net/udplite.h
+index 80761938b9a7..8228155b305e 100644
+--- a/include/net/udplite.h
++++ b/include/net/udplite.h
+@@ -62,6 +62,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
+ 		UDP_SKB_CB(skb)->cscov = cscov;
+ 		if (skb->ip_summed == CHECKSUM_COMPLETE)
+ 			skb->ip_summed = CHECKSUM_NONE;
++		skb->csum_valid = 0;
+         }
+ 
+ 	return 0;
+diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
+index e82a5f40a0ac..20df094556ae 100644
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1148,7 +1148,12 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ 
+ 	cpu_base = raw_cpu_ptr(&hrtimer_bases);
+ 
+-	if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
++	/*
++	 * POSIX magic: Relative CLOCK_REALTIME timers are not affected by
++	 * clock modifications, so they needs to become CLOCK_MONOTONIC to
++	 * ensure POSIX compliance.
++	 */
++	if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
+ 		clock_id = CLOCK_MONOTONIC;
+ 
+ 	base = hrtimer_clockid_to_base(clock_id);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 376db986db9b..782ba721984b 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -4062,6 +4062,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
+ }
+ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
+ 
++/**
++ * current_work - retrieve %current task's work struct
++ *
++ * Determine if %current task is a workqueue worker and what it's working on.
++ * Useful to find out the context that the %current task is running in.
++ *
++ * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
++ */
++struct work_struct *current_work(void)
++{
++	struct worker *worker = current_wq_worker();
++
++	return worker ? worker->current_work : NULL;
++}
++EXPORT_SYMBOL(current_work);
++
+ /**
+  * current_is_workqueue_rescuer - is %current workqueue rescuer?
+  *
+diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
+index a89d041592c8..e2851aafd1d3 100644
+--- a/lib/mpi/longlong.h
++++ b/lib/mpi/longlong.h
+@@ -671,7 +671,23 @@ do {						\
+ 	**************  MIPS/64  **************
+ 	***************************************/
+ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
+-#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
++#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
++/*
++ * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
++ * code below, so we special case MIPS64r6 until the compiler can do better.
++ */
++#define umul_ppmm(w1, w0, u, v)						\
++do {									\
++	__asm__ ("dmulu %0,%1,%2"					\
++		 : "=d" ((UDItype)(w0))					\
++		 : "d" ((UDItype)(u)),					\
++		   "d" ((UDItype)(v)));					\
++	__asm__ ("dmuhu %0,%1,%2"					\
++		 : "=d" ((UDItype)(w1))					\
++		 : "d" ((UDItype)(u)),					\
++		   "d" ((UDItype)(v)));					\
++} while (0)
++#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
+ #define umul_ppmm(w1, w0, u, v) \
+ do {									\
+ 	typedef unsigned int __ll_UTItype __attribute__((mode(TI)));	\
+diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
+index 4905845a94e9..b5c4941fc068 100644
+--- a/net/bridge/br_sysfs_if.c
++++ b/net/bridge/br_sysfs_if.c
+@@ -229,6 +229,9 @@ static ssize_t brport_show(struct kobject *kobj,
+ 	struct brport_attribute *brport_attr = to_brport_attr(attr);
+ 	struct net_bridge_port *p = to_brport(kobj);
+ 
++	if (!brport_attr->show)
++		return -EINVAL;
++
+ 	return brport_attr->show(p, buf);
+ }
+ 
+diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
+index 9024283d2bca..9637a681bdda 100644
+--- a/net/bridge/netfilter/ebt_among.c
++++ b/net/bridge/netfilter/ebt_among.c
+@@ -172,18 +172,35 @@ ebt_among_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ 	return true;
+ }
+ 
++static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
++{
++	return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
++}
++
+ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
+ {
+ 	const struct ebt_among_info *info = par->matchinfo;
+ 	const struct ebt_entry_match *em =
+ 		container_of(par->matchinfo, const struct ebt_entry_match, data);
+-	int expected_length = sizeof(struct ebt_among_info);
++	unsigned int expected_length = sizeof(struct ebt_among_info);
+ 	const struct ebt_mac_wormhash *wh_dst, *wh_src;
+ 	int err;
+ 
++	if (expected_length > em->match_size)
++		return -EINVAL;
++
+ 	wh_dst = ebt_among_wh_dst(info);
+-	wh_src = ebt_among_wh_src(info);
++	if (poolsize_invalid(wh_dst))
++		return -EINVAL;
++
+ 	expected_length += ebt_mac_wormhash_size(wh_dst);
++	if (expected_length > em->match_size)
++		return -EINVAL;
++
++	wh_src = ebt_among_wh_src(info);
++	if (poolsize_invalid(wh_src))
++		return -EINVAL;
++
+ 	expected_length += ebt_mac_wormhash_size(wh_src);
+ 
+ 	if (em->match_size != EBT_ALIGN(expected_length)) {
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 91180a7fc943..3069eafeb33d 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -2019,7 +2019,9 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
+ 		if (match_kern)
+ 			match_kern->match_size = ret;
+ 
+-		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
++		if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
++			return -EINVAL;
++
+ 		match32 = (struct compat_ebt_entry_mwt *) buf;
+ 	}
+ 
+@@ -2076,6 +2078,15 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
+ 	 *
+ 	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
+ 	 */
++	for (i = 0; i < 4 ; ++i) {
++		if (offsets[i] >= *total)
++			return -EINVAL;
++		if (i == 0)
++			continue;
++		if (offsets[i-1] > offsets[i])
++			return -EINVAL;
++	}
++
+ 	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
+ 		struct compat_ebt_entry_mwt *match32;
+ 		unsigned int size;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 0ccae464b46e..c2d927f91a30 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2129,8 +2129,11 @@ EXPORT_SYMBOL(netif_set_xps_queue);
+  */
+ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+ {
++	bool disabling;
+ 	int rc;
+ 
++	disabling = txq < dev->real_num_tx_queues;
++
+ 	if (txq < 1 || txq > dev->num_tx_queues)
+ 		return -EINVAL;
+ 
+@@ -2146,15 +2149,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
+ 		if (dev->num_tc)
+ 			netif_setup_tc(dev, txq);
+ 
+-		if (txq < dev->real_num_tx_queues) {
++		dev->real_num_tx_queues = txq;
++
++		if (disabling) {
++			synchronize_net();
+ 			qdisc_reset_all_tx_gt(dev, txq);
+ #ifdef CONFIG_XPS
+ 			netif_reset_xps_queues_gt(dev, txq);
+ #endif
+ 		}
++	} else {
++		dev->real_num_tx_queues = txq;
+ 	}
+ 
+-	dev->real_num_tx_queues = txq;
+ 	return 0;
+ }
+ EXPORT_SYMBOL(netif_set_real_num_tx_queues);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6a9a495aff23..7a6400345cb9 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -122,10 +122,13 @@ static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));
+ static int ip_rt_error_cost __read_mostly	= HZ;
+ static int ip_rt_error_burst __read_mostly	= 5 * HZ;
+ static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
+-static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
++static u32 ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
+ static int ip_rt_min_advmss __read_mostly	= 256;
+ 
+ static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
++
++static int ip_min_valid_pmtu __read_mostly	= IPV4_MIN_MTU;
++
+ /*
+  *	Interface to generic destination cache.
+  */
+@@ -2689,7 +2692,8 @@ static struct ctl_table ipv4_route_table[] = {
+ 		.data		= &ip_rt_min_pmtu,
+ 		.maxlen		= sizeof(int),
+ 		.mode		= 0644,
+-		.proc_handler	= proc_dointvec,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &ip_min_valid_pmtu,
+ 	},
+ 	{
+ 		.procname	= "min_adv_mss",
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 456718921552..75758e3d6c81 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1733,6 +1733,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
+ 		err = udplite_checksum_init(skb, uh);
+ 		if (err)
+ 			return err;
++
++		if (UDP_SKB_CB(skb)->partial_cov) {
++			skb->csum = inet_compute_pseudo(skb, proto);
++			return 0;
++		}
+ 	}
+ 
+ 	return skb_checksum_init_zero_check(skb, proto, uh->check,
+diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
+index 9a4d7322fb22..391a8fedb27e 100644
+--- a/net/ipv6/ip6_checksum.c
++++ b/net/ipv6/ip6_checksum.c
+@@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
+ 		err = udplite_checksum_init(skb, uh);
+ 		if (err)
+ 			return err;
++
++		if (UDP_SKB_CB(skb)->partial_cov) {
++			skb->csum = ip6_compute_pseudo(skb, proto);
++			return 0;
++		}
+ 	}
+ 
+ 	/* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
+diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+index e76900e0aa92..25df2ce92ad8 100644
+--- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
++++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
+@@ -99,6 +99,10 @@ static bool nf_nat_ipv6_manip_pkt(struct sk_buff *skb,
+ 	    !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff,
+ 				target, maniptype))
+ 		return false;
++
++	/* must reload, offset might have changed */
++	ipv6h = (void *)skb->data + iphdroff;
++
+ manip_addr:
+ 	if (maniptype == NF_NAT_MANIP_SRC)
+ 		ipv6h->saddr = target->src.u3.in6;
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 135fe458bfac..d17efa1b8473 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1379,6 +1379,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
+ 	}
+ 
+ 	rt->dst.flags |= DST_HOST;
++	rt->dst.input = ip6_input;
+ 	rt->dst.output  = ip6_output;
+ 	atomic_set(&rt->dst.__refcnt, 1);
+ 	rt->rt6i_gateway  = fl6->daddr;
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 295502b261a8..f4034c4eadf7 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -176,7 +176,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
+ #ifdef CONFIG_IPV6_SIT_6RD
+ 	struct ip_tunnel *t = netdev_priv(dev);
+ 
+-	if (t->dev == sitn->fb_tunnel_dev) {
++	if (dev == sitn->fb_tunnel_dev) {
+ 		ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
+ 		t->ip6rd.relay_prefix = 0;
+ 		t->ip6rd.prefixlen = 16;
+diff --git a/net/netfilter/nf_nat_proto_common.c b/net/netfilter/nf_nat_proto_common.c
+index fbce552a796e..7d7466dbf663 100644
+--- a/net/netfilter/nf_nat_proto_common.c
++++ b/net/netfilter/nf_nat_proto_common.c
+@@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+ 				 const struct nf_conn *ct,
+ 				 u16 *rover)
+ {
+-	unsigned int range_size, min, i;
++	unsigned int range_size, min, max, i;
+ 	__be16 *portptr;
+ 	u_int16_t off;
+ 
+@@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
+ 		}
+ 	} else {
+ 		min = ntohs(range->min_proto.all);
+-		range_size = ntohs(range->max_proto.all) - min + 1;
++		max = ntohs(range->max_proto.all);
++		if (unlikely(max < min))
++			swap(max, min);
++		range_size = max - min + 1;
+ 	}
+ 
+ 	if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
+diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
+index f407ebc13481..95b6dedc5ac7 100644
+--- a/net/netfilter/xt_IDLETIMER.c
++++ b/net/netfilter/xt_IDLETIMER.c
+@@ -146,11 +146,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
+ 		    (unsigned long) info->timer);
+ 	info->timer->refcnt = 1;
+ 
++	INIT_WORK(&info->timer->work, idletimer_tg_work);
++
+ 	mod_timer(&info->timer->timer,
+ 		  msecs_to_jiffies(info->timeout * 1000) + jiffies);
+ 
+-	INIT_WORK(&info->timer->work, idletimer_tg_work);
+-
+ 	return 0;
+ 
+ out_free_attr:
+@@ -191,7 +191,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
+ 		pr_debug("timeout value is zero\n");
+ 		return -EINVAL;
+ 	}
+-
++	if (info->timeout >= INT_MAX / 1000) {
++		pr_debug("timeout value is too big\n");
++		return -EINVAL;
++	}
+ 	if (info->label[0] == '\0' ||
+ 	    strnlen(info->label,
+ 		    MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
+diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
+index 3ba31c194cce..0858fe17e14a 100644
+--- a/net/netfilter/xt_LED.c
++++ b/net/netfilter/xt_LED.c
+@@ -141,10 +141,11 @@ static int led_tg_check(const struct xt_tgchk_param *par)
+ 		goto exit_alloc;
+ 	}
+ 
+-	/* See if we need to set up a timer */
+-	if (ledinfo->delay > 0)
+-		setup_timer(&ledinternal->timer, led_timeout_callback,
+-			    (unsigned long)ledinternal);
++	/* Since the letinternal timer can be shared between multiple targets,
++	 * always set it up, even if the current target does not need it
++	 */
++	setup_timer(&ledinternal->timer, led_timeout_callback,
++		    (unsigned long)ledinternal);
+ 
+ 	list_add_tail(&ledinternal->list, &xt_led_triggers);
+ 
+@@ -181,8 +182,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
+ 
+ 	list_del(&ledinternal->list);
+ 
+-	if (ledinfo->delay > 0)
+-		del_timer_sync(&ledinternal->timer);
++	del_timer_sync(&ledinternal->timer);
+ 
+ 	led_trigger_unregister(&ledinternal->netfilter_led_trigger);
+ 
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 3d111b053e3e..97c22c818134 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1118,6 +1118,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
+ {
+ 	struct sk_buff *tmp;
+ 	struct net *net, *prev = NULL;
++	bool delivered = false;
+ 	int err;
+ 
+ 	for_each_net_rcu(net) {
+@@ -1129,14 +1130,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
+ 			}
+ 			err = nlmsg_multicast(prev->genl_sock, tmp,
+ 					      portid, group, flags);
+-			if (err)
++			if (!err)
++				delivered = true;
++			else if (err != -ESRCH)
+ 				goto error;
+ 		}
+ 
+ 		prev = net;
+ 	}
+ 
+-	return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
++	err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
++	if (!err)
++		delivered = true;
++	else if (err != -ESRCH)
++		goto error;
++	return delivered ? 0 : -ESRCH;
+  error:
+ 	kfree_skb(skb);
+ 	return err;
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 83a07d468644..5235f56d735d 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -1367,10 +1367,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+ 	sctp_chunkhdr_t *chunk_hdr;
+ 	struct sk_buff *skb;
+ 	struct sock *sk;
++	int chunklen;
++
++	chunklen = sizeof(*chunk_hdr) + paylen;
++	if (chunklen > SCTP_MAX_CHUNK_LEN)
++		goto nodata;
+ 
+ 	/* No need to allocate LL here, as this is only a chunk. */
+-	skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen),
+-			GFP_ATOMIC);
++	skb = alloc_skb(chunklen, GFP_ATOMIC);
+ 	if (!skb)
+ 		goto nodata;
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index c44e3d208804..195b54a19f1e 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -4455,7 +4455,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
+ 	len = sizeof(int);
+ 	if (put_user(len, optlen))
+ 		return -EFAULT;
+-	if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
++	if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
+ 		return -EFAULT;
+ 	return 0;
+ }
+@@ -5032,6 +5032,9 @@ copy_getaddrs:
+ 		err = -EFAULT;
+ 		goto out;
+ 	}
++	/* XXX: We should have accounted for sizeof(struct sctp_getaddrs) too,
++	 * but we can't change it anymore.
++	 */
+ 	if (put_user(bytes_copied, optlen))
+ 		err = -EFAULT;
+ out:
+@@ -5468,7 +5471,7 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
+ 		params.assoc_id = 0;
+ 	} else if (len >= sizeof(struct sctp_assoc_value)) {
+ 		len = sizeof(struct sctp_assoc_value);
+-		if (copy_from_user(&params, optval, sizeof(params)))
++		if (copy_from_user(&params, optval, len))
+ 			return -EFAULT;
+ 	} else
+ 		return -EINVAL;
+@@ -5637,7 +5640,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
+ 
+ 	if (len < sizeof(struct sctp_authkeyid))
+ 		return -EINVAL;
+-	if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
++
++	len = sizeof(struct sctp_authkeyid);
++	if (copy_from_user(&val, optval, len))
+ 		return -EFAULT;
+ 
+ 	asoc = sctp_id2assoc(sk, val.scact_assoc_id);
+@@ -5649,7 +5654,6 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
+ 	else
+ 		val.scact_keynumber = ep->active_key_id;
+ 
+-	len = sizeof(struct sctp_authkeyid);
+ 	if (put_user(len, optlen))
+ 		return -EFAULT;
+ 	if (copy_to_user(optval, &val, len))
+@@ -5675,7 +5679,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
+ 	if (len < sizeof(struct sctp_authchunks))
+ 		return -EINVAL;
+ 
+-	if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
++	if (copy_from_user(&val, optval, sizeof(val)))
+ 		return -EFAULT;
+ 
+ 	to = p->gauth_chunks;
+@@ -5720,7 +5724,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
+ 	if (len < sizeof(struct sctp_authchunks))
+ 		return -EINVAL;
+ 
+-	if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
++	if (copy_from_user(&val, optval, sizeof(val)))
+ 		return -EFAULT;
+ 
+ 	to = p->gauth_chunks;
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index b9ce5da25938..dac0a54e39de 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -919,7 +919,8 @@ int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
+ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
+ 					struct snd_seq_event *event,
+ 					struct file *file, int blocking,
+-					int atomic, int hop)
++					int atomic, int hop,
++					struct mutex *mutexp)
+ {
+ 	struct snd_seq_event_cell *cell;
+ 	int err;
+@@ -957,7 +958,8 @@ static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
+ 		return -ENXIO; /* queue is not allocated */
+ 
+ 	/* allocate an event cell */
+-	err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic, file);
++	err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
++				file, mutexp);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -1026,12 +1028,11 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ 		return -ENXIO;
+ 
+ 	/* allocate the pool now if the pool is not allocated yet */ 
++	mutex_lock(&client->ioctl_mutex);
+ 	if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
+-		mutex_lock(&client->ioctl_mutex);
+ 		err = snd_seq_pool_init(client->pool);
+-		mutex_unlock(&client->ioctl_mutex);
+ 		if (err < 0)
+-			return -ENOMEM;
++			goto out;
+ 	}
+ 
+ 	/* only process whole events */
+@@ -1082,7 +1083,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ 		/* ok, enqueue it */
+ 		err = snd_seq_client_enqueue_event(client, &event, file,
+ 						   !(file->f_flags & O_NONBLOCK),
+-						   0, 0);
++						   0, 0, &client->ioctl_mutex);
+ 		if (err < 0)
+ 			break;
+ 
+@@ -1093,6 +1094,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ 		written += len;
+ 	}
+ 
++ out:
++	mutex_unlock(&client->ioctl_mutex);
+ 	return written ? written : err;
+ }
+ 
+@@ -1924,6 +1927,9 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
+ 	    (! snd_seq_write_pool_allocated(client) ||
+ 	     info.output_pool != client->pool->size)) {
+ 		if (snd_seq_write_pool_allocated(client)) {
++			/* is the pool in use? */
++			if (atomic_read(&client->pool->counter))
++				return -EBUSY;
+ 			/* remove all existing cells */
+ 			snd_seq_pool_mark_closing(client->pool);
+ 			snd_seq_queue_client_leave_cells(client->number);
+@@ -2348,7 +2354,8 @@ static int kernel_client_enqueue(int client, struct snd_seq_event *ev,
+ 	if (! cptr->accept_output)
+ 		result = -EPERM;
+ 	else /* send it */
+-		result = snd_seq_client_enqueue_event(cptr, ev, file, blocking, atomic, hop);
++		result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
++						      atomic, hop, NULL);
+ 
+ 	snd_seq_client_unlock(cptr);
+ 	return result;
+diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
+index 3490d21ab9e7..9acbed1ac982 100644
+--- a/sound/core/seq/seq_fifo.c
++++ b/sound/core/seq/seq_fifo.c
+@@ -123,7 +123,7 @@ int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
+ 		return -EINVAL;
+ 
+ 	snd_use_lock_use(&f->use_lock);
+-	err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL); /* always non-blocking */
++	err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
+ 	if (err < 0) {
+ 		if ((err == -ENOMEM) || (err == -EAGAIN))
+ 			atomic_inc(&f->overflow);
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index 5847c4475bf3..4c8cbcd89887 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -221,7 +221,8 @@ void snd_seq_cell_free(struct snd_seq_event_cell * cell)
+  */
+ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
+ 			      struct snd_seq_event_cell **cellp,
+-			      int nonblock, struct file *file)
++			      int nonblock, struct file *file,
++			      struct mutex *mutexp)
+ {
+ 	struct snd_seq_event_cell *cell;
+ 	unsigned long flags;
+@@ -245,7 +246,11 @@ static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
+ 		set_current_state(TASK_INTERRUPTIBLE);
+ 		add_wait_queue(&pool->output_sleep, &wait);
+ 		spin_unlock_irq(&pool->lock);
++		if (mutexp)
++			mutex_unlock(mutexp);
+ 		schedule();
++		if (mutexp)
++			mutex_lock(mutexp);
+ 		spin_lock_irq(&pool->lock);
+ 		remove_wait_queue(&pool->output_sleep, &wait);
+ 		/* interrupted? */
+@@ -288,7 +293,7 @@ __error:
+  */
+ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
+ 		      struct snd_seq_event_cell **cellp, int nonblock,
+-		      struct file *file)
++		      struct file *file, struct mutex *mutexp)
+ {
+ 	int ncells, err;
+ 	unsigned int extlen;
+@@ -305,7 +310,7 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
+ 	if (ncells >= pool->total_elements)
+ 		return -ENOMEM;
+ 
+-	err = snd_seq_cell_alloc(pool, &cell, nonblock, file);
++	err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
+ 	if (err < 0)
+ 		return err;
+ 
+@@ -331,7 +336,8 @@ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
+ 			int size = sizeof(struct snd_seq_event);
+ 			if (len < size)
+ 				size = len;
+-			err = snd_seq_cell_alloc(pool, &tmp, nonblock, file);
++			err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
++						 mutexp);
+ 			if (err < 0)
+ 				goto __error;
+ 			if (cell->event.data.ext.ptr == NULL)
+diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
+index 32f959c17786..3abe306c394a 100644
+--- a/sound/core/seq/seq_memory.h
++++ b/sound/core/seq/seq_memory.h
+@@ -66,7 +66,8 @@ struct snd_seq_pool {
+ void snd_seq_cell_free(struct snd_seq_event_cell *cell);
+ 
+ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
+-		      struct snd_seq_event_cell **cellp, int nonblock, struct file *file);
++		      struct snd_seq_event_cell **cellp, int nonblock,
++		      struct file *file, struct mutex *mutexp);
+ 
+ /* return number of unused (free) cells */
+ static inline int snd_seq_unused_cells(struct snd_seq_pool *pool)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 861dc57cb082..0fd1402e427b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4615,6 +4615,16 @@ static void alc298_fixup_speaker_volume(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* disable DAC3 (0x06) selection on NID 0x17 as it has no volume amp control */
++static void alc295_fixup_disable_dac3(struct hda_codec *codec,
++				      const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		hda_nid_t conn[2] = { 0x02, 0x03 };
++		snd_hda_override_conn_list(codec, 0x17, 2, conn);
++	}
++}
++
+ /* Hook to update amp GPIO4 for automute */
+ static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
+ 					  struct hda_jack_callback *jack)
+@@ -4764,6 +4774,7 @@ enum {
+ 	ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
+ 	ALC255_FIXUP_DELL_SPK_NOISE,
+ 	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++	ALC295_FIXUP_DISABLE_DAC3,
+ 	ALC280_FIXUP_HP_HEADSET_MIC,
+ 	ALC221_FIXUP_HP_FRONT_MIC,
+ 	ALC292_FIXUP_TPT460,
+@@ -5453,6 +5464,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
+ 	},
++	[ALC295_FIXUP_DISABLE_DAC3] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc295_fixup_disable_dac3,
++	},
+ 	[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -5510,6 +5525,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
++	SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
+ 	SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
+ 	SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 32719f28aa86..c7c8aac1ace6 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3274,4 +3274,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 	}
+ },
+ 
++{
++	/*
++	 * Bower's & Wilkins PX headphones only support the 48 kHz sample rate
++	 * even though it advertises more. The capture interface doesn't work
++	 * even on windows.
++	 */
++	USB_DEVICE(0x19b5, 0x0021),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_MIXER,
++			},
++			/* Capture */
++			{
++				.ifnum = 1,
++				.type = QUIRK_IGNORE_INTERFACE,
++			},
++			/* Playback */
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_FIXED_ENDPOINT,
++				.data = &(const struct audioformat) {
++					.formats = SNDRV_PCM_FMTBIT_S16_LE,
++					.channels = 2,
++					.iface = 2,
++					.altsetting = 1,
++					.altset_idx = 1,
++					.attributes = UAC_EP_CS_ATTR_FILL_MAX |
++						UAC_EP_CS_ATTR_SAMPLE_RATE,
++					.endpoint = 0x03,
++					.ep_attr = USB_ENDPOINT_XFER_ISOC,
++					.rates = SNDRV_PCM_RATE_48000,
++					.rate_min = 48000,
++					.rate_max = 48000,
++					.nr_rates = 1,
++					.rate_table = (unsigned int[]) {
++						48000
++					}
++				}
++			},
++		}
++	}
++},
++
+ #undef USB_DEVICE_VENDOR_SPEC

diff --git a/1051_linux-4.1.52.patch b/1051_linux-4.1.52.patch
new file mode 100644
index 0000000..6648ea5
--- /dev/null
+++ b/1051_linux-4.1.52.patch
@@ -0,0 +1,15535 @@
+diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
+index caf297bee1fb..c28d4eb83b76 100644
+--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
++++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
+@@ -35,6 +35,15 @@ Optional properties:
+ - ti,palmas-enable-dvfs2: Enable DVFS2. Configure pins for DVFS2 mode.
+ 	Selection primary or secondary function associated to GPADC_START
+ 	and SYSEN2 pin/pad for DVFS2 interface
++- ti,palmas-override-powerhold: This is applicable for PMICs for which
++	GPIO7 is configured in POWERHOLD mode which has higher priority
++	over DEV_ON bit and keeps the PMIC supplies on even after the DEV_ON
++	bit is turned off. This property enables driver to over ride the
++	POWERHOLD value to GPIO7 so as to turn off the PMIC in power off
++	scenarios. So for GPIO7 if ti,palmas-override-powerhold is set
++	then the GPIO_7 field should never be muxed to anything else.
++	It should be set to POWERHOLD by default and only in case of
++	power off scenarios the driver will over ride the mux value.
+ 
+ This binding uses the following generic properties as defined in
+ pinctrl-bindings.txt:
+diff --git a/Makefile b/Makefile
+index caccc6f16d62..1f5560281f70 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 51
++SUBLEVEL = 52
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+@@ -772,6 +772,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
+ # disable invalid "can't wrap" optimizations for signed / pointers
+ KBUILD_CFLAGS	+= $(call cc-option,-fno-strict-overflow)
+ 
++# clang sets -fmerge-all-constants by default as optimization, but this
++# is non-conforming behavior for C and in fact breaks the kernel, so we
++# need to disable it here generally.
++KBUILD_CFLAGS	+= $(call cc-option,-fno-merge-all-constants)
++
++# for gcc -fno-merge-all-constants disables everything, but it is fine
++# to have actual conforming behavior enabled.
++KBUILD_CFLAGS	+= $(call cc-option,-fmerge-constants)
++
+ # Make sure -fstack-check isn't enabled (like gentoo apparently did)
+ KBUILD_CFLAGS  += $(call cc-option,-fno-stack-check,)
+ 
+diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c
+index 6a61deed4a85..ab228ed45945 100644
+--- a/arch/alpha/kernel/console.c
++++ b/arch/alpha/kernel/console.c
+@@ -20,6 +20,7 @@
+ struct pci_controller *pci_vga_hose;
+ static struct resource alpha_vga = {
+ 	.name	= "alpha-vga+",
++	.flags	= IORESOURCE_IO,
+ 	.start	= 0x3C0,
+ 	.end	= 0x3DF
+ };
+diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
+index 0d35ab64641c..ac3ca3636405 100644
+--- a/arch/arm/boot/dts/am335x-pepper.dts
++++ b/arch/arm/boot/dts/am335x-pepper.dts
+@@ -138,7 +138,7 @@
+ &audio_codec {
+ 	status = "okay";
+ 
+-	gpio-reset = <&gpio1 16 GPIO_ACTIVE_LOW>;
++	reset-gpios = <&gpio1 16 GPIO_ACTIVE_LOW>;
+ 	AVDD-supply = <&ldo3_reg>;
+ 	IOVDD-supply = <&ldo3_reg>;
+ 	DRVDD-supply = <&ldo3_reg>;
+diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
+index e8397879d0a7..825237d03168 100644
+--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
++++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
+@@ -299,6 +299,7 @@
+ 		interrupt-controller;
+ 
+ 		ti,system-power-controller;
++		ti,palmas-override-powerhold;
+ 
+ 		tps659038_pmic {
+ 			compatible = "ti,tps659038-pmic";
+diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
+index a7da0dd0c98f..0898213f3bb2 100644
+--- a/arch/arm/boot/dts/at91sam9g25.dtsi
++++ b/arch/arm/boot/dts/at91sam9g25.dtsi
+@@ -21,7 +21,7 @@
+ 				atmel,mux-mask = <
+ 				      /*    A         B          C     */
+ 				       0xffffffff 0xffe0399f 0xc000001c  /* pioA */
+-				       0x0007ffff 0x8000fe3f 0x00000000  /* pioB */
++				       0x0007ffff 0x00047e3f 0x00000000  /* pioB */
+ 				       0x80000000 0x07c0ffff 0xb83fffff  /* pioC */
+ 				       0x003fffff 0x003f8000 0x00000000  /* pioD */
+ 				      >;
+diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
+index 096f68be99e2..1e6369c24645 100644
+--- a/arch/arm/boot/dts/dra7-evm.dts
++++ b/arch/arm/boot/dts/dra7-evm.dts
+@@ -285,6 +285,8 @@
+ 	tps659038: tps659038@58 {
+ 		compatible = "ti,tps659038";
+ 		reg = <0x58>;
++		ti,palmas-override-powerhold;
++		ti,system-power-controller;
+ 
+ 		tps659038_pmic {
+ 			compatible = "ti,tps659038-pmic";
+diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+index 9e096d811bed..7a032dd84bb2 100644
+--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+@@ -88,6 +88,7 @@
+ 		clocks = <&clks 201>;
+ 		VDDA-supply = <&reg_2p5v>;
+ 		VDDIO-supply = <&reg_3p3v>;
++		lrclk-strength = <3>;
+ 	};
+ };
+ 
+diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
+index c70bb27ac65a..3429ceb1d577 100644
+--- a/arch/arm/boot/dts/ls1021a.dtsi
++++ b/arch/arm/boot/dts/ls1021a.dtsi
+@@ -128,7 +128,7 @@
+ 		};
+ 
+ 		esdhc: esdhc@1560000 {
+-			compatible = "fsl,esdhc";
++			compatible = "fsl,ls1021a-esdhc", "fsl,esdhc";
+ 			reg = <0x0 0x1560000 0x0 0x10000>;
+ 			interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ 			clock-frequency = <0>;
+diff --git a/arch/arm/boot/dts/moxart-uc7112lx.dts b/arch/arm/boot/dts/moxart-uc7112lx.dts
+index 10d088df0c35..4a962a26482d 100644
+--- a/arch/arm/boot/dts/moxart-uc7112lx.dts
++++ b/arch/arm/boot/dts/moxart-uc7112lx.dts
+@@ -6,7 +6,7 @@
+  */
+ 
+ /dts-v1/;
+-/include/ "moxart.dtsi"
++#include "moxart.dtsi"
+ 
+ / {
+ 	model = "MOXA UC-7112-LX";
+diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
+index 1fd27ed65a01..64f2f44235d0 100644
+--- a/arch/arm/boot/dts/moxart.dtsi
++++ b/arch/arm/boot/dts/moxart.dtsi
+@@ -6,6 +6,7 @@
+  */
+ 
+ /include/ "skeleton.dtsi"
++#include <dt-bindings/interrupt-controller/irq.h>
+ 
+ / {
+ 	compatible = "moxa,moxart";
+@@ -36,8 +37,8 @@
+ 		ranges;
+ 
+ 		intc: interrupt-controller@98800000 {
+-			compatible = "moxa,moxart-ic";
+-			reg = <0x98800000 0x38>;
++			compatible = "moxa,moxart-ic", "faraday,ftintc010";
++			reg = <0x98800000 0x100>;
+ 			interrupt-controller;
+ 			#interrupt-cells = <2>;
+ 			interrupt-mask = <0x00080000>;
+@@ -59,7 +60,7 @@
+ 		timer: timer@98400000 {
+ 			compatible = "moxa,moxart-timer";
+ 			reg = <0x98400000 0x42>;
+-			interrupts = <19 1>;
++			interrupts = <19 IRQ_TYPE_EDGE_FALLING>;
+ 			clocks = <&clk_apb>;
+ 		};
+ 
+@@ -80,7 +81,7 @@
+ 		dma: dma@90500000 {
+ 			compatible = "moxa,moxart-dma";
+ 			reg = <0x90500080 0x40>;
+-			interrupts = <24 0>;
++			interrupts = <24 IRQ_TYPE_LEVEL_HIGH>;
+ 			#dma-cells = <1>;
+ 		};
+ 
+@@ -93,7 +94,7 @@
+ 		sdhci: sdhci@98e00000 {
+ 			compatible = "moxa,moxart-sdhci";
+ 			reg = <0x98e00000 0x5C>;
+-			interrupts = <5 0>;
++			interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&clk_apb>;
+ 			dmas =  <&dma 5>,
+ 				<&dma 5>;
+@@ -120,7 +121,7 @@
+ 		mac0: mac@90900000 {
+ 			compatible = "moxa,moxart-mac";
+ 			reg = <0x90900000 0x90>;
+-			interrupts = <25 0>;
++			interrupts = <25 IRQ_TYPE_LEVEL_HIGH>;
+ 			phy-handle = <&ethphy0>;
+ 			phy-mode = "mii";
+ 			status = "disabled";
+@@ -129,7 +130,7 @@
+ 		mac1: mac@92000000 {
+ 			compatible = "moxa,moxart-mac";
+ 			reg = <0x92000000 0x90>;
+-			interrupts = <27 0>;
++			interrupts = <27 IRQ_TYPE_LEVEL_HIGH>;
+ 			phy-handle = <&ethphy1>;
+ 			phy-mode = "mii";
+ 			status = "disabled";
+@@ -138,7 +139,7 @@
+ 		uart0: uart@98200000 {
+ 			compatible = "ns16550a";
+ 			reg = <0x98200000 0x20>;
+-			interrupts = <31 8>;
++			interrupts = <31 IRQ_TYPE_LEVEL_HIGH>;
+ 			reg-shift = <2>;
+ 			reg-io-width = <4>;
+ 			clock-frequency = <14745600>;
+diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
+index 27cd4abfc74d..731860314ab5 100644
+--- a/arch/arm/boot/dts/omap3-n900.dts
++++ b/arch/arm/boot/dts/omap3-n900.dts
+@@ -488,7 +488,7 @@
+ 	tlv320aic3x: tlv320aic3x@18 {
+ 		compatible = "ti,tlv320aic3x";
+ 		reg = <0x18>;
+-		gpio-reset = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* 60 */
++		reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>; /* 60 */
+ 		ai3x-gpio-func = <
+ 			0 /* AIC3X_GPIO1_FUNC_DISABLED */
+ 			5 /* AIC3X_GPIO2_FUNC_DIGITAL_MIC_INPUT */
+@@ -505,7 +505,7 @@
+ 	tlv320aic3x_aux: tlv320aic3x@19 {
+ 		compatible = "ti,tlv320aic3x";
+ 		reg = <0x19>;
+-		gpio-reset = <&gpio2 28 GPIO_ACTIVE_HIGH>; /* 60 */
++		reset-gpios = <&gpio2 28 GPIO_ACTIVE_LOW>; /* 60 */
+ 
+ 		AVDD-supply = <&vmmc2>;
+ 		DRVDD-supply = <&vmmc2>;
+diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
+index 4bb2f4c17321..07f31efec974 100644
+--- a/arch/arm/boot/dts/r8a7790.dtsi
++++ b/arch/arm/boot/dts/r8a7790.dtsi
+@@ -1227,8 +1227,11 @@
+ 			compatible = "renesas,r8a7790-mstp-clocks", "renesas,cpg-mstp-clocks";
+ 			reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
+ 			clocks = <&p_clk>,
+-				<&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+-				<&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
++				<&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
++				<&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
++				<&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
++				<&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
++				<&mstp10_clks R8A7790_CLK_SSI_ALL>, <&mstp10_clks R8A7790_CLK_SSI_ALL>,
+ 				<&p_clk>,
+ 				<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
+ 				<&mstp10_clks R8A7790_CLK_SCU_ALL>, <&mstp10_clks R8A7790_CLK_SCU_ALL>,
+diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
+index 824ddab9c3ad..19106ceecd3a 100644
+--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
++++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
+@@ -273,7 +273,7 @@
+ 	x2_clk: x2-clock {
+ 		compatible = "fixed-clock";
+ 		#clock-cells = <0>;
+-		clock-frequency = <148500000>;
++		clock-frequency = <74250000>;
+ 	};
+ 
+ 	x13_clk: x13-clock {
+diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
+index 4696062f6dde..96cd539ee4c6 100644
+--- a/arch/arm/boot/dts/r8a7791.dtsi
++++ b/arch/arm/boot/dts/r8a7791.dtsi
+@@ -1232,8 +1232,11 @@
+ 			compatible = "renesas,r8a7791-mstp-clocks", "renesas,cpg-mstp-clocks";
+ 			reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
+ 			clocks = <&p_clk>,
+-				<&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+-				<&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
++				<&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
++				<&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
++				<&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
++				<&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
++				<&mstp10_clks R8A7791_CLK_SSI_ALL>, <&mstp10_clks R8A7791_CLK_SSI_ALL>,
+ 				<&p_clk>,
+ 				<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
+ 				<&mstp10_clks R8A7791_CLK_SCU_ALL>, <&mstp10_clks R8A7791_CLK_SCU_ALL>,
+diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
+index 8b1f37bfeeec..b7aadab9b0e8 100644
+--- a/arch/arm/include/asm/xen/events.h
++++ b/arch/arm/include/asm/xen/events.h
+@@ -16,7 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
+ 	return raw_irqs_disabled_flags(regs->ARM_cpsr);
+ }
+ 
+-#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr),	\
++#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\
+ 							    atomic64_t,	\
+ 							    counter), (val))
+ 
+diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
+index 709ee1d6d4df..faa9a905826e 100644
+--- a/arch/arm/kernel/ftrace.c
++++ b/arch/arm/kernel/ftrace.c
+@@ -29,11 +29,6 @@
+ #endif
+ 
+ #ifdef CONFIG_DYNAMIC_FTRACE
+-#ifdef CONFIG_OLD_MCOUNT
+-#define OLD_MCOUNT_ADDR	((unsigned long) mcount)
+-#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
+-
+-#define	OLD_NOP		0xe1a00000	/* mov r0, r0 */
+ 
+ static int __ftrace_modify_code(void *data)
+ {
+@@ -51,6 +46,12 @@ void arch_ftrace_update_code(int command)
+ 	stop_machine(__ftrace_modify_code, &command, NULL);
+ }
+ 
++#ifdef CONFIG_OLD_MCOUNT
++#define OLD_MCOUNT_ADDR	((unsigned long) mcount)
++#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
++
++#define	OLD_NOP		0xe1a00000	/* mov r0, r0 */
++
+ static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
+ {
+ 	return rec->arch.old_mcount ? OLD_NOP : NOP;
+diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
+index 4a86a0133ac3..016c87fc9b69 100644
+--- a/arch/arm/kernel/perf_event.c
++++ b/arch/arm/kernel/perf_event.c
+@@ -311,10 +311,16 @@ validate_group(struct perf_event *event)
+ 	return 0;
+ }
+ 
++static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
++{
++	struct platform_device *pdev = armpmu->plat_device;
++
++	return pdev ? dev_get_platdata(&pdev->dev) : NULL;
++}
++
+ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
+ {
+ 	struct arm_pmu *armpmu;
+-	struct platform_device *plat_device;
+ 	struct arm_pmu_platdata *plat;
+ 	int ret;
+ 	u64 start_clock, finish_clock;
+@@ -326,8 +332,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
+ 	 * dereference.
+ 	 */
+ 	armpmu = *(void **)dev;
+-	plat_device = armpmu->plat_device;
+-	plat = dev_get_platdata(&plat_device->dev);
++
++	plat = armpmu_get_platdata(armpmu);
+ 
+ 	start_clock = sched_clock();
+ 	if (plat && plat->handle_irq)
+diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
+index ddfdd820e6f2..6bad2a02a2f9 100644
+--- a/arch/arm/mach-davinci/devices-da8xx.c
++++ b/arch/arm/mach-davinci/devices-da8xx.c
+@@ -827,6 +827,8 @@ static struct platform_device da8xx_dsp = {
+ 	.resource	= da8xx_rproc_resources,
+ };
+ 
++static bool rproc_mem_inited __initdata;
++
+ #if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC)
+ 
+ static phys_addr_t rproc_base __initdata;
+@@ -865,6 +867,8 @@ void __init da8xx_rproc_reserve_cma(void)
+ 	ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0);
+ 	if (ret)
+ 		pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret);
++	else
++		rproc_mem_inited = true;
+ }
+ 
+ #else
+@@ -879,6 +883,12 @@ int __init da8xx_register_rproc(void)
+ {
+ 	int ret;
+ 
++	if (!rproc_mem_inited) {
++		pr_warn("%s: memory not reserved for DSP, not registering DSP device\n",
++			__func__);
++		return -ENOMEM;
++	}
++
+ 	ret = platform_device_register(&da8xx_dsp);
+ 	if (ret)
+ 		pr_err("%s: can't register DSP device: %d\n", __func__, ret);
+diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
+index a2e8ef3c0bd9..777531ef58d6 100644
+--- a/arch/arm/mach-imx/clk-imx6q.c
++++ b/arch/arm/mach-imx/clk-imx6q.c
+@@ -402,7 +402,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ 		clk[IMX6QDL_CLK_GPU2D_CORE] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
+ 	clk[IMX6QDL_CLK_GPU3D_CORE]   = imx_clk_gate2("gpu3d_core",    "gpu3d_core_podf",   base + 0x6c, 26);
+ 	clk[IMX6QDL_CLK_HDMI_IAHB]    = imx_clk_gate2("hdmi_iahb",     "ahb",               base + 0x70, 0);
+-	clk[IMX6QDL_CLK_HDMI_ISFR]    = imx_clk_gate2("hdmi_isfr",     "video_27m",         base + 0x70, 4);
++	clk[IMX6QDL_CLK_HDMI_ISFR]    = imx_clk_gate2("hdmi_isfr",     "mipi_core_cfg",     base + 0x70, 4);
+ 	clk[IMX6QDL_CLK_I2C1]         = imx_clk_gate2("i2c1",          "ipg_per",           base + 0x70, 6);
+ 	clk[IMX6QDL_CLK_I2C2]         = imx_clk_gate2("i2c2",          "ipg_per",           base + 0x70, 8);
+ 	clk[IMX6QDL_CLK_I2C3]         = imx_clk_gate2("i2c3",          "ipg_per",           base + 0x70, 10);
+diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
+index 7581e036bda6..70e3b711e79c 100644
+--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
++++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
+@@ -524,7 +524,7 @@ static struct clockdomain pcie_7xx_clkdm = {
+ 	.dep_bit	  = DRA7XX_PCIE_STATDEP_SHIFT,
+ 	.wkdep_srcs	  = pcie_wkup_sleep_deps,
+ 	.sleepdep_srcs	  = pcie_wkup_sleep_deps,
+-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
++	.flags		  = CLKDM_CAN_SWSUP,
+ };
+ 
+ static struct clockdomain atl_7xx_clkdm = {
+diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
+index 5f750dc96e0f..49d057eb93d6 100644
+--- a/arch/arm64/include/asm/futex.h
++++ b/arch/arm64/include/asm/futex.h
+@@ -44,16 +44,16 @@
+ 	: "memory")
+ 
+ static inline int
+-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
++futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
+ {
+ 	int op = (encoded_op >> 28) & 7;
+ 	int cmp = (encoded_op >> 24) & 15;
+-	int oparg = (encoded_op << 8) >> 20;
+-	int cmparg = (encoded_op << 20) >> 20;
++	int oparg = (int)(encoded_op << 8) >> 20;
++	int cmparg = (int)(encoded_op << 20) >> 20;
+ 	int oldval = 0, ret, tmp;
+ 
+ 	if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+-		oparg = 1 << oparg;
++		oparg = 1U << (oparg & 0x1f);
+ 
+ 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ 		return -EFAULT;
+diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h
+index a89bddefdacf..139093fab326 100644
+--- a/arch/frv/include/asm/timex.h
++++ b/arch/frv/include/asm/timex.h
+@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
+ #define vxtime_lock()		do {} while (0)
+ #define vxtime_unlock()		do {} while (0)
+ 
++/* This attribute is used in include/linux/jiffies.h alongside with
++ * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
++ * for frv does not contain another section specification.
++ */
++#define __jiffy_arch_data	__attribute__((__section__(".data")))
++
+ #endif
+ 
+diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
+index 29754aae5177..4268a78d8a5b 100644
+--- a/arch/ia64/kernel/module.c
++++ b/arch/ia64/kernel/module.c
+@@ -153,7 +153,7 @@ slot (const struct insn *insn)
+ static int
+ apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
+ {
+-	if (slot(insn) != 2) {
++	if (slot(insn) != 1 && slot(insn) != 2) {
+ 		printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
+ 		       mod->name, slot(insn));
+ 		return 0;
+@@ -165,7 +165,7 @@ apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
+ static int
+ apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
+ {
+-	if (slot(insn) != 2) {
++	if (slot(insn) != 1 && slot(insn) != 2) {
+ 		printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
+ 		       mod->name, slot(insn));
+ 		return 0;
+diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
+index daba1f9a4f79..174aedce3167 100644
+--- a/arch/mips/include/asm/kprobes.h
++++ b/arch/mips/include/asm/kprobes.h
+@@ -40,7 +40,8 @@ typedef union mips_instruction kprobe_opcode_t;
+ 
+ #define flush_insn_slot(p)						\
+ do {									\
+-	flush_icache_range((unsigned long)p->addr,			\
++	if (p->addr)							\
++		flush_icache_range((unsigned long)p->addr,		\
+ 			   (unsigned long)p->addr +			\
+ 			   (MAX_INSN_SIZE * sizeof(kprobe_opcode_t)));	\
+ } while (0)
+diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
+index d477508450de..805b71ed5129 100644
+--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
++++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
+@@ -1096,10 +1096,20 @@ repeat:
+ 		}
+ 		break;
+ 
+-	case beql_op:
+-	case bnel_op:
+ 	case blezl_op:
+ 	case bgtzl_op:
++		/*
++		 * For BLEZL and BGTZL, rt field must be set to 0. If this
++		 * is not the case, this may be an encoding of a MIPS R6
++		 * instruction, so return to CPU execution if this occurs
++		 */
++		if (MIPSInst_RT(inst)) {
++			err = SIGILL;
++			break;
++		}
++		/* fall through */
++	case beql_op:
++	case bnel_op:
+ 		if (delay_slot(regs)) {
+ 			err = SIGILL;
+ 			break;
+@@ -2329,6 +2339,8 @@ static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
+ 	__this_cpu_write((mipsr2bremustats).bgezl, 0);
+ 	__this_cpu_write((mipsr2bremustats).bltzll, 0);
+ 	__this_cpu_write((mipsr2bremustats).bgezll, 0);
++	__this_cpu_write((mipsr2bremustats).bltzall, 0);
++	__this_cpu_write((mipsr2bremustats).bgezall, 0);
+ 	__this_cpu_write((mipsr2bremustats).bltzal, 0);
+ 	__this_cpu_write((mipsr2bremustats).bgezal, 0);
+ 	__this_cpu_write((mipsr2bremustats).beql, 0);
+diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
+index b8e63fd00375..cda33475a481 100644
+--- a/arch/mips/lib/memset.S
++++ b/arch/mips/lib/memset.S
+@@ -218,7 +218,7 @@
+ 1:	PTR_ADDIU	a0, 1			/* fill bytewise */
+ 	R10KCBARRIER(0(ra))
+ 	bne		t1, a0, 1b
+-	sb		a1, -1(a0)
++	 EX(sb, a1, -1(a0), .Lsmall_fixup\@)
+ 
+ 2:	jr		ra			/* done */
+ 	move		a2, zero
+@@ -249,13 +249,18 @@
+ 	PTR_L		t0, TI_TASK($28)
+ 	andi		a2, STORMASK
+ 	LONG_L		t0, THREAD_BUADDR(t0)
+-	LONG_ADDU	a2, t1
++	LONG_ADDU	a2, a0
+ 	jr		ra
+ 	LONG_SUBU	a2, t0
+ 
+ .Llast_fixup\@:
+ 	jr		ra
+-	andi		v1, a2, STORMASK
++	 nop
++
++.Lsmall_fixup\@:
++	PTR_SUBU	a2, t1, a0
++	jr		ra
++	 PTR_ADDIU	a2, 1
+ 
+ 	.endm
+ 
+diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
+index adc6911ba748..b19a3c506b1e 100644
+--- a/arch/mips/mm/pgtable-32.c
++++ b/arch/mips/mm/pgtable-32.c
+@@ -51,15 +51,15 @@ void __init pagetable_init(void)
+ 	/*
+ 	 * Fixed mappings:
+ 	 */
+-	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+-	fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
++	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
++	fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
+ 
+ #ifdef CONFIG_HIGHMEM
+ 	/*
+ 	 * Permanent kmaps:
+ 	 */
+ 	vaddr = PKMAP_BASE;
+-	fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
++	fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+ 
+ 	pgd = swapper_pg_dir + __pgd_offset(vaddr);
+ 	pud = pud_offset(pgd, vaddr);
+diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
+index d6d27d51d131..5cf32b77f128 100644
+--- a/arch/mips/net/bpf_jit.c
++++ b/arch/mips/net/bpf_jit.c
+@@ -562,7 +562,8 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
+ 	u32 sflags, tmp_flags;
+ 
+ 	/* Adjust the stack pointer */
+-	emit_stack_offset(-align_sp(offset), ctx);
++	if (offset)
++		emit_stack_offset(-align_sp(offset), ctx);
+ 
+ 	if (ctx->flags & SEEN_CALL) {
+ 		/* Argument save area */
+@@ -641,7 +642,8 @@ static void restore_bpf_jit_regs(struct jit_ctx *ctx,
+ 		emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
+ 
+ 	/* Restore the sp and discard the scrach memory */
+-	emit_stack_offset(align_sp(offset), ctx);
++	if (offset)
++		emit_stack_offset(align_sp(offset), ctx);
+ }
+ 
+ static unsigned int get_stack_depth(struct jit_ctx *ctx)
+@@ -685,8 +687,14 @@ static void build_prologue(struct jit_ctx *ctx)
+ 	if (ctx->flags & SEEN_X)
+ 		emit_jit_reg_move(r_X, r_zero, ctx);
+ 
+-	/* Do not leak kernel data to userspace */
+-	if (bpf_needs_clear_a(&ctx->skf->insns[0]))
++	/*
++	 * Do not leak kernel data to userspace, we only need to clear
++	 * r_A if it is ever used.  In fact if it is never used, we
++	 * will not save/restore it, so clearing it in this case would
++	 * corrupt the state of the caller.
++	 */
++	if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
++	    (ctx->flags & SEEN_A))
+ 		emit_jit_reg_move(r_A, r_zero, ctx);
+ }
+ 
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index dba508fe1683..4f7060ec6875 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -648,6 +648,10 @@ static int match_pci_device(struct device *dev, int index,
+ 					(modpath->mod == PCI_FUNC(devfn)));
+ 	}
+ 
++	/* index might be out of bounds for bc[] */
++	if (index >= 6)
++		return 0;
++
+ 	id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
+ 	return (modpath->bc[index] == id);
+ }
+diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
+index a3bf5be111ff..bedaf3e3c558 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -36,7 +36,8 @@
+ 
+ #define set_mb(var, value)	do { var = value; mb(); } while (0)
+ 
+-#ifdef __SUBARCH_HAS_LWSYNC
++/* The sub-arch has lwsync */
++#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+ #    define SMPWMB      LWSYNC
+ #else
+ #    define SMPWMB      eieio
+diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
+index 042af1abfc4d..e1b164f4a8f0 100644
+--- a/arch/powerpc/include/asm/opal.h
++++ b/arch/powerpc/include/asm/opal.h
+@@ -21,6 +21,9 @@
+ /* We calculate number of sg entries based on PAGE_SIZE */
+ #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
+ 
++/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
++#define OPAL_BUSY_DELAY_MS	10
++
+ /* /sys/firmware/opal */
+ extern struct kobject *opal_kobj;
+ 
+diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
+index c50868681f9e..e8d6a842f4bb 100644
+--- a/arch/powerpc/include/asm/synch.h
++++ b/arch/powerpc/include/asm/synch.h
+@@ -5,10 +5,6 @@
+ #include <linux/stringify.h>
+ #include <asm/feature-fixups.h>
+ 
+-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+-#define __SUBARCH_HAS_LWSYNC
+-#endif
+-
+ #ifndef __ASSEMBLY__
+ extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
+ extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index c3e0420b8a42..a9fec93c2144 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -766,7 +766,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
+ 	eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
+ 
+ 	/* PCI Command: 0x4 */
+-	eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
++	eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
++			      PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ 
+ 	/* Check the PCIe link is ready */
+ 	eeh_bridge_check_link(edev);
+diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
+index 56f44848b044..4094e9013c18 100644
+--- a/arch/powerpc/kernel/time.c
++++ b/arch/powerpc/kernel/time.c
+@@ -685,12 +685,20 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
+ static void start_cpu_decrementer(void)
+ {
+ #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
++	unsigned int tcr;
++
+ 	/* Clear any pending timer interrupts */
+ 	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
+ 
+-	/* Enable decrementer interrupt */
+-	mtspr(SPRN_TCR, TCR_DIE);
+-#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
++	tcr = mfspr(SPRN_TCR);
++	/*
++	 * The watchdog may have already been enabled by u-boot. So leave
++	 * TRC[WP] (Watchdog Period) alone.
++	 */
++	tcr &= TCR_WP_MASK;	/* Clear all bits except for TCR[WP] */
++	tcr |= TCR_DIE;		/* Enable decrementer */
++	mtspr(SPRN_TCR, tcr);
++#endif
+ }
+ 
+ void __init generic_calibrate_decr(void)
+diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
+index b982d925c710..c74c9c4134b5 100644
+--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
++++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
+@@ -176,12 +176,15 @@ map_again:
+ 	ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
+ 				 hpsize, hpsize, MMU_SEGSIZE_256M);
+ 
+-	if (ret < 0) {
++	if (ret == -1) {
+ 		/* If we couldn't map a primary PTE, try a secondary */
+ 		hash = ~hash;
+ 		vflags ^= HPTE_V_SECONDARY;
+ 		attempt++;
+ 		goto map_again;
++	} else if (ret < 0) {
++		r = -EIO;
++		goto out_unlock;
+ 	} else {
+ 		trace_kvm_book3s_64_mmu_map(rflags, hpteg,
+ 					    vpn, hpaddr, orig_pte);
+diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
+index f57383941d03..7e66365cd0c9 100644
+--- a/arch/powerpc/kvm/book3s_pr.c
++++ b/arch/powerpc/kvm/book3s_pr.c
+@@ -625,7 +625,11 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ 			kvmppc_mmu_unmap_page(vcpu, &pte);
+ 		}
+ 		/* The guest's PTE is not mapped yet. Map on the host */
+-		kvmppc_mmu_map_page(vcpu, &pte, iswrite);
++		if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
++			/* Exit KVM if mapping failed */
++			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
++			return RESUME_HOST;
++		}
+ 		if (data)
+ 			vcpu->stat.sp_storage++;
+ 		else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
+index f2c75a1e0536..0d91baf63fed 100644
+--- a/arch/powerpc/kvm/book3s_pr_papr.c
++++ b/arch/powerpc/kvm/book3s_pr_papr.c
+@@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
+ 	pteg_addr = get_pteg_addr(vcpu, pte_index);
+ 
+ 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
+-	copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
++	ret = H_FUNCTION;
++	if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
++		goto done;
+ 	hpte = pteg;
+ 
+ 	ret = H_PTEG_FULL;
+@@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
+ 	hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
+ 	hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
+ 	pteg_addr += i * HPTE_SIZE;
+-	copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
++	ret = H_FUNCTION;
++	if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
++		goto done;
+ 	kvmppc_set_gpr(vcpu, 4, pte_index | i);
+ 	ret = H_SUCCESS;
+ 
+@@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
+ 
+ 	pteg = get_pteg_addr(vcpu, pte_index);
+ 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
+-	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
++	ret = H_FUNCTION;
++	if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
++		goto done;
+ 	pte[0] = be64_to_cpu((__force __be64)pte[0]);
+ 	pte[1] = be64_to_cpu((__force __be64)pte[1]);
+ 
+@@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
+ 	    ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
+ 		goto done;
+ 
+-	copy_to_user((void __user *)pteg, &v, sizeof(v));
++	ret = H_FUNCTION;
++	if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
++		goto done;
+ 
+ 	rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
+ 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
+@@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
+ 		}
+ 
+ 		pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
+-		copy_from_user(pte, (void __user *)pteg, sizeof(pte));
++		if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
++			ret = H_FUNCTION;
++			break;
++		}
+ 		pte[0] = be64_to_cpu((__force __be64)pte[0]);
+ 		pte[1] = be64_to_cpu((__force __be64)pte[1]);
+ 
+@@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
+ 			tsh |= H_BULK_REMOVE_NOT_FOUND;
+ 		} else {
+ 			/* Splat the pteg in (userland) hpt */
+-			copy_to_user((void __user *)pteg, &v, sizeof(v));
++			if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
++				ret = H_FUNCTION;
++				break;
++			}
+ 
+ 			rb = compute_tlbie_rb(pte[0], pte[1],
+ 					      tsh & H_BULK_REMOVE_PTEX);
+@@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
+ 
+ 	pteg = get_pteg_addr(vcpu, pte_index);
+ 	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
+-	copy_from_user(pte, (void __user *)pteg, sizeof(pte));
++	ret = H_FUNCTION;
++	if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
++		goto done;
+ 	pte[0] = be64_to_cpu((__force __be64)pte[0]);
+ 	pte[1] = be64_to_cpu((__force __be64)pte[1]);
+ 
+@@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
+ 	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
+ 	pte[0] = (__force u64)cpu_to_be64(pte[0]);
+ 	pte[1] = (__force u64)cpu_to_be64(pte[1]);
+-	copy_to_user((void __user *)pteg, pte, sizeof(pte));
++	ret = H_FUNCTION;
++	if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
++		goto done;
+ 	ret = H_SUCCESS;
+ 
+  done:
+diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
+index b396868d2aa7..f962209a6037 100644
+--- a/arch/powerpc/mm/fault.c
++++ b/arch/powerpc/mm/fault.c
+@@ -293,7 +293,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
+ 	 * can result in fault, which will cause a deadlock when called with
+ 	 * mmap_sem held
+ 	 */
+-	if (user_mode(regs))
++	if (!is_exec && user_mode(regs))
+ 		store_update_sp = store_updates_sp(regs);
+ 
+ 	if (user_mode(regs))
+diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
+index be6212ddbf06..7e42e3ec2142 100644
+--- a/arch/powerpc/platforms/cell/spufs/coredump.c
++++ b/arch/powerpc/platforms/cell/spufs/coredump.c
+@@ -174,6 +174,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
+ 	if (!dump_skip(cprm,
+ 		       roundup(cprm->written - total + sz, 4) - cprm->written))
+ 		goto Eio;
++
++	rc = 0;
+ out:
+ 	free_page((unsigned long)buf);
+ 	return rc;
+diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
+index 9db4398ded5d..1bceb95f422d 100644
+--- a/arch/powerpc/platforms/powernv/opal-nvram.c
++++ b/arch/powerpc/platforms/powernv/opal-nvram.c
+@@ -11,6 +11,7 @@
+ 
+ #define DEBUG
+ 
++#include <linux/delay.h>
+ #include <linux/kernel.h>
+ #include <linux/init.h>
+ #include <linux/of.h>
+@@ -56,9 +57,17 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
+ 
+ 	while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ 		rc = opal_write_nvram(__pa(buf), count, off);
+-		if (rc == OPAL_BUSY_EVENT)
++		if (rc == OPAL_BUSY_EVENT) {
++			msleep(OPAL_BUSY_DELAY_MS);
+ 			opal_poll_events(NULL);
++		} else if (rc == OPAL_BUSY) {
++			msleep(OPAL_BUSY_DELAY_MS);
++		}
+ 	}
++
++	if (rc)
++		return -EIO;
++
+ 	*index += count;
+ 	return count;
+ }
+diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
+index 2eeb0a0f506d..8e40530fd39c 100644
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -323,7 +323,7 @@ static void hypfs_kill_super(struct super_block *sb)
+ 
+ 	if (sb->s_root)
+ 		hypfs_delete_tree(sb->s_root);
+-	if (sb_info->update_file)
++	if (sb_info && sb_info->update_file)
+ 		hypfs_remove(sb_info->update_file);
+ 	kfree(sb->s_fs_info);
+ 	sb->s_fs_info = NULL;
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 7963c6aa1196..09548603d782 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -770,6 +770,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
+ 	/* copy and convert to ebcdic */
+ 	memcpy(ipb->hdr.loadparm, buf, lp_len);
+ 	ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
++	ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
+ 	return len;
+ }
+ 
+diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
+index 445657fe658c..6c553f6e791a 100644
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -21,8 +21,14 @@ SECTIONS
+ {
+ 	. = 0x00000000;
+ 	.text : {
+-	_text = .;		/* Text and read-only data */
++		/* Text and read-only data */
+ 		HEAD_TEXT
++		/*
++		 * E.g. perf doesn't like symbols starting at address zero,
++		 * therefore skip the initial PSW and channel program located
++		 * at address zero and let _text start at 0x200.
++		 */
++	_text = 0x200;
+ 		TEXT_TEXT
+ 		SCHED_TEXT
+ 		LOCK_TEXT
+diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
+index 7d3ca30fcd15..00e6b6c1dd79 100644
+--- a/arch/sparc/kernel/ldc.c
++++ b/arch/sparc/kernel/ldc.c
+@@ -1733,9 +1733,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)
+ 
+ 		lp->rcv_nxt = p->seqid;
+ 
++		/*
++		 * If this is a control-only packet, there is nothing
++		 * else to do but advance the rx queue since the packet
++		 * was already processed above.
++		 */
+ 		if (!(p->type & LDC_DATA)) {
+ 			new = rx_advance(lp, new);
+-			goto no_data;
++			break;
+ 		}
+ 		if (p->stype & (LDC_ACK | LDC_NACK)) {
+ 			err = data_ack_nack(lp, p);
+diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
+index 7b605e4dfffa..2ac6a7e5a179 100644
+--- a/arch/um/os-Linux/signal.c
++++ b/arch/um/os-Linux/signal.c
+@@ -135,7 +135,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
+ 
+ static void hard_handler(int sig, siginfo_t *si, void *p)
+ {
+-	struct ucontext *uc = p;
++	ucontext_t *uc = p;
+ 	mcontext_t *mc = &uc->uc_mcontext;
+ 	unsigned long pending = 1UL << sig;
+ 
+diff --git a/arch/x86/Makefile b/arch/x86/Makefile
+index 2fda005bb334..696c82f9035d 100644
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -158,6 +158,15 @@ KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr)
+ 
+ LDFLAGS := -m elf_$(UTS_MACHINE)
+ 
++#
++# The 64-bit kernel must be aligned to 2MB.  Pass -z max-page-size=0x200000 to
++# the linker to force 2MB page size regardless of the default page size used
++# by the linker.
++#
++ifdef CONFIG_X86_64
++LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
++endif
++
+ # Speed up the build
+ KBUILD_CFLAGS += -pipe
+ # Workaround for a gcc prelease that unfortunately was shipped in a suse release
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index e28437e0f708..fc48e8e11a95 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -349,6 +349,10 @@ static void parse_elf(void *output)
+ 
+ 		switch (phdr->p_type) {
+ 		case PT_LOAD:
++#ifdef CONFIG_X86_64
++			if ((phdr->p_align % 0x200000) != 0)
++				error("Alignment of LOAD segment isn't multiple of 2MB");
++#endif
+ #ifdef CONFIG_RELOCATABLE
+ 			dest = output;
+ 			dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
+diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
+index 236c80974457..75385fcf1074 100644
+--- a/arch/x86/crypto/cast5_avx_glue.c
++++ b/arch/x86/crypto/cast5_avx_glue.c
+@@ -67,8 +67,6 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ 	void (*fn)(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
+ 	int err;
+ 
+-	fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
+-
+ 	err = blkcipher_walk_virt(desc, walk);
+ 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+ 
+@@ -80,6 +78,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+ 
+ 		/* Process multi-block batch */
+ 		if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
++			fn = (enc) ? cast5_ecb_enc_16way : cast5_ecb_dec_16way;
+ 			do {
+ 				fn(ctx, wdst, wsrc);
+ 
+diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
+index da772edd19ab..b1965b6bbd68 100644
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -306,6 +306,7 @@ enum vmcs_field {
+ #define INTR_TYPE_NMI_INTR		(2 << 8) /* NMI */
+ #define INTR_TYPE_HARD_EXCEPTION	(3 << 8) /* processor exception */
+ #define INTR_TYPE_SOFT_INTR             (4 << 8) /* software interrupt */
++#define INTR_TYPE_PRIV_SW_EXCEPTION	(5 << 8) /* ICE breakpoint - undocumented */
+ #define INTR_TYPE_SOFT_EXCEPTION	(6 << 8) /* software exception */
+ 
+ /* GUEST_INTERRUPTIBILITY_INFO flags. */
+diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
+index 06b407f79b24..da43f226e5a2 100644
+--- a/arch/x86/include/uapi/asm/msr-index.h
++++ b/arch/x86/include/uapi/asm/msr-index.h
+@@ -307,6 +307,9 @@
+ #define FAM10H_MMIO_CONF_BASE_MASK	0xfffffffULL
+ #define FAM10H_MMIO_CONF_BASE_SHIFT	20
+ #define MSR_FAM10H_NODE_ID		0xc001100c
++#define MSR_F10H_DECFG			0xc0011029
++#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT	1
++#define MSR_F10H_DECFG_LFENCE_SERIALIZE		BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
+ 
+ /* K8 MSRs */
+ #define MSR_K8_TOP_MEM1			0xc001001a
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index c6c4248ab138..1d64ba0c9496 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -712,8 +712,32 @@ static void init_amd(struct cpuinfo_x86 *c)
+ 		set_cpu_cap(c, X86_FEATURE_K8);
+ 
+ 	if (cpu_has_xmm2) {
+-		/* MFENCE stops RDTSC speculation */
+-		set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
++		unsigned long long val;
++		int ret;
++
++		/*
++		 * A serializing LFENCE has less overhead than MFENCE, so
++		 * use it for execution serialization.  On families which
++		 * don't have that MSR, LFENCE is already serializing.
++		 * msr_set_bit() uses the safe accessors, too, even if the MSR
++		 * is not present.
++		 */
++		msr_set_bit(MSR_F10H_DECFG,
++			    MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
++
++		/*
++		 * Verify that the MSR write was successful (could be running
++		 * under a hypervisor) and only then assume that LFENCE is
++		 * serializing.
++		 */
++		ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
++		if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
++			/* A serializing LFENCE stops RDTSC speculation */
++			set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
++		} else {
++			/* MFENCE stops RDTSC speculation */
++			set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
++		}
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 185ebd2c0c3c..5d77df85c529 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -2493,7 +2493,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
+ 			X86_CONFIG(.event=0xc0, .umask=0x01)) {
+ 		if (left < 128)
+ 			left = 128;
+-		left &= ~0x3fu;
++		left &= ~0x3fULL;
+ 	}
+ 	return left;
+ }
+diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
+index e7cc5370cd2f..6c7e7986939a 100644
+--- a/arch/x86/kernel/i8259.c
++++ b/arch/x86/kernel/i8259.c
+@@ -405,6 +405,7 @@ struct legacy_pic default_legacy_pic = {
+ };
+ 
+ struct legacy_pic *legacy_pic = &default_legacy_pic;
++EXPORT_SYMBOL(legacy_pic);
+ 
+ static int __init i8259A_init_ops(void)
+ {
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 228c233a2f36..106d4ac16a43 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -49,6 +49,7 @@
+ #include <linux/kdebug.h>
+ #include <linux/kallsyms.h>
+ #include <linux/ftrace.h>
++#include <linux/moduleloader.h>
+ 
+ #include <asm/cacheflush.h>
+ #include <asm/desc.h>
+@@ -196,6 +197,8 @@ retry:
+ 		return (opcode != 0x62 && opcode != 0x67);
+ 	case 0x70:
+ 		return 0; /* can't boost conditional jump */
++	case 0x90:
++		return opcode != 0x9a;	/* can't boost call far */
+ 	case 0xc0:
+ 		/* can't boost software-interruptions */
+ 		return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
+@@ -400,10 +403,20 @@ int __copy_instruction(u8 *dest, u8 *src)
+ 	return length;
+ }
+ 
++/* Recover page to RW mode before releasing it */
++void free_insn_page(void *page)
++{
++	set_memory_nx((unsigned long)page & PAGE_MASK, 1);
++	set_memory_rw((unsigned long)page & PAGE_MASK, 1);
++	module_memfree(page);
++}
++
+ static int arch_copy_kprobe(struct kprobe *p)
+ {
+ 	int ret;
+ 
++	set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
++
+ 	/* Copy an instruction with recovering if other optprobe modifies it.*/
+ 	ret = __copy_instruction(p->ainsn.insn, p->addr);
+ 	if (!ret)
+@@ -418,6 +431,8 @@ static int arch_copy_kprobe(struct kprobe *p)
+ 	else
+ 		p->ainsn.boostable = -1;
+ 
++	set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
++
+ 	/* Check whether the instruction modifies Interrupt Flag or not */
+ 	p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
+ 
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index c9d488f3e4cd..085415d88326 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -349,6 +349,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
+ 	}
+ 
+ 	buf = (u8 *)op->optinsn.insn;
++	set_memory_rw((unsigned long)buf & PAGE_MASK, 1);
+ 
+ 	/* Copy instructions into the out-of-line buffer */
+ 	ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
+@@ -371,6 +372,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
+ 	synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+ 			   (u8 *)op->kp.addr + op->optinsn.size);
+ 
++	set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
++
+ 	flush_icache_range((unsigned long) buf,
+ 			   (unsigned long) buf + TMPL_END_IDX +
+ 			   op->optinsn.size + RELATIVEJUMP_SIZE);
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index f6911cc90a81..a92919864e04 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1400,6 +1400,8 @@ static inline void mwait_play_dead(void)
+ 	void *mwait_ptr;
+ 	int i;
+ 
++	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
++		return;
+ 	if (!this_cpu_has(X86_FEATURE_MWAIT))
+ 		return;
+ 	if (!this_cpu_has(X86_FEATURE_CLFLUSH))
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index 21187ebee7d0..8fdcdbf5f309 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -356,6 +356,8 @@ static int __init tsc_setup(char *str)
+ 		tsc_clocksource_reliable = 1;
+ 	if (!strncmp(str, "noirqtime", 9))
+ 		no_sched_irq_time = 1;
++	if (!strcmp(str, "unstable"))
++		mark_tsc_unstable("boot parameter");
+ 	return 1;
+ }
+ 
+@@ -397,7 +399,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
+ 	hpet2 -= hpet1;
+ 	tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
+ 	do_div(tmp, 1000000);
+-	do_div(deltatsc, tmp);
++	deltatsc = div64_u64(deltatsc, tmp);
+ 
+ 	return (unsigned long) deltatsc;
+ }
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 6b87d8bcdcdd..28d48339af32 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1470,6 +1470,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
+ 		 */
+ 		if (var->unusable)
+ 			var->db = 0;
++		/* This is symmetric with svm_set_segment() */
+ 		var->dpl = to_svm(vcpu)->vmcb->save.cpl;
+ 		break;
+ 	}
+@@ -1614,18 +1615,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
+ 	s->base = var->base;
+ 	s->limit = var->limit;
+ 	s->selector = var->selector;
+-	if (var->unusable)
+-		s->attrib = 0;
+-	else {
+-		s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+-		s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+-		s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+-		s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
+-		s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+-		s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+-		s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+-		s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
+-	}
++	s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
++	s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
++	s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
++	s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
++	s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
++	s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
++	s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
++	s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
+ 
+ 	/*
+ 	 * This is always accurate, except if SYSRET returned to a segment
+@@ -1634,7 +1631,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
+ 	 * would entail passing the CPL to userspace and back.
+ 	 */
+ 	if (seg == VCPU_SREG_SS)
+-		svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
++		/* This is symmetric with svm_get_segment() */
++		svm->vmcb->save.cpl = (var->dpl & 3);
+ 
+ 	mark_dirty(svm->vmcb, VMCB_SEG);
+ }
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index ffd5502dd215..67f2d8e44fda 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -942,6 +942,13 @@ static inline bool is_machine_check(u32 intr_info)
+ 		(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
+ }
+ 
++/* Undocumented: icebp/int1 */
++static inline bool is_icebp(u32 intr_info)
++{
++	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
++		== (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
++}
++
+ static inline bool cpu_has_vmx_msr_bitmap(void)
+ {
+ 	return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
+@@ -5151,7 +5158,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
+ 		      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
+ 			vcpu->arch.dr6 &= ~15;
+ 			vcpu->arch.dr6 |= dr6 | DR6_RTM;
+-			if (!(dr6 & ~DR6_RESERVED)) /* icebp */
++			if (is_icebp(intr_info))
+ 				skip_emulated_instruction(vcpu);
+ 
+ 			kvm_queue_exception(vcpu, DB_VECTOR);
+@@ -7417,11 +7424,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
+ {
+ 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ 	int cr = exit_qualification & 15;
+-	int reg = (exit_qualification >> 8) & 15;
+-	unsigned long val = kvm_register_readl(vcpu, reg);
++	int reg;
++	unsigned long val;
+ 
+ 	switch ((exit_qualification >> 4) & 3) {
+ 	case 0: /* mov to cr */
++		reg = (exit_qualification >> 8) & 15;
++		val = kvm_register_readl(vcpu, reg);
+ 		switch (cr) {
+ 		case 0:
+ 			if (vmcs12->cr0_guest_host_mask &
+@@ -7476,6 +7485,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
+ 		 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
+ 		 * cr0. Other attempted changes are ignored, with no exit.
+ 		 */
++		val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
+ 		if (vmcs12->cr0_guest_host_mask & 0xe &
+ 		    (val ^ vmcs12->cr0_read_shadow))
+ 			return true;
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 27bc31f0da52..f6ca8a0d14ee 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -285,7 +285,7 @@ static noinline int vmalloc_fault(unsigned long address)
+ 	if (!pmd_k)
+ 		return -1;
+ 
+-	if (pmd_huge(*pmd_k))
++	if (pmd_large(*pmd_k))
+ 		return 0;
+ 
+ 	pte_k = pte_offset_kernel(pmd_k, address);
+@@ -403,7 +403,7 @@ static noinline int vmalloc_fault(unsigned long address)
+ 	if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
+ 		BUG();
+ 
+-	if (pud_huge(*pud))
++	if (pud_large(*pud))
+ 		return 0;
+ 
+ 	pmd = pmd_offset(pud, address);
+@@ -414,7 +414,7 @@ static noinline int vmalloc_fault(unsigned long address)
+ 	if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
+ 		BUG();
+ 
+-	if (pmd_huge(*pmd))
++	if (pmd_large(*pmd))
+ 		return 0;
+ 
+ 	pte_ref = pte_offset_kernel(pmd_ref, address);
+diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
+index ddeff4844a10..31657a66e3fd 100644
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -971,7 +971,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
+ 	 * may converge on the last pass. In such case do one more
+ 	 * pass to emit the final image
+ 	 */
+-	for (pass = 0; pass < 10 || image; pass++) {
++	for (pass = 0; pass < 20 || image; pass++) {
+ 		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
+ 		if (proglen <= 0) {
+ 			image = NULL;
+@@ -994,6 +994,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
+ 				goto out;
+ 		}
+ 		oldproglen = proglen;
++		cond_resched();
+ 	}
+ 
+ 	if (bpf_jit_enable > 1)
+diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
+index 1518d2805ae8..fd6825537b97 100644
+--- a/arch/x86/um/stub_segv.c
++++ b/arch/x86/um/stub_segv.c
+@@ -10,7 +10,7 @@
+ void __attribute__ ((__section__ (".__syscall_stub")))
+ stub_segv_handler(int sig, siginfo_t *info, void *p)
+ {
+-	struct ucontext *uc = p;
++	ucontext_t *uc = p;
+ 
+ 	GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
+ 			      &uc->uc_mcontext);
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 39ce74d10e2b..21978fcd877a 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -165,6 +165,9 @@ bool bio_integrity_enabled(struct bio *bio)
+ 	if (!bio_is_rw(bio))
+ 		return false;
+ 
++	if (!bio_sectors(bio))
++		return false;
++
+ 	/* Already protected? */
+ 	if (bio_integrity(bio))
+ 		return false;
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 2dc1fd6c5bdb..0145b2ceafae 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1607,7 +1607,8 @@ static void blk_mq_exit_hctx(struct request_queue *q,
+ {
+ 	unsigned flush_start_tag = set->queue_depth;
+ 
+-	blk_mq_tag_idle(hctx);
++	if (blk_mq_hw_queue_mapped(hctx))
++		blk_mq_tag_idle(hctx);
+ 
+ 	if (set->ops->exit_request)
+ 		set->ops->exit_request(set->driver_data,
+diff --git a/block/blk-throttle.c b/block/blk-throttle.c
+index 5b9c6d5c3636..fd51c8be247d 100644
+--- a/block/blk-throttle.c
++++ b/block/blk-throttle.c
+@@ -648,6 +648,17 @@ static void throtl_dequeue_tg(struct throtl_grp *tg)
+ static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
+ 					  unsigned long expires)
+ {
++	unsigned long max_expire = jiffies + 8 * throtl_slice;
++
++	/*
++	 * Since we are adjusting the throttle limit dynamically, the sleep
++	 * time calculated according to previous limit might be invalid. It's
++	 * possible the cgroup sleep time is very long and no other cgroups
++	 * have IO running so notify the limit changes. Make sure the cgroup
++	 * doesn't sleep too long to avoid the missed notification.
++	 */
++	if (time_after(expires, max_expire))
++		expires = max_expire;
+ 	mod_timer(&sq->pending_timer, expires);
+ 	throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
+ 		   expires - jiffies, jiffies);
+diff --git a/block/partition-generic.c b/block/partition-generic.c
+index 0d9e5f97f0a8..94de2055365e 100644
+--- a/block/partition-generic.c
++++ b/block/partition-generic.c
+@@ -309,8 +309,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
+ 
+ 	if (info) {
+ 		struct partition_meta_info *pinfo = alloc_part_info(disk);
+-		if (!pinfo)
++		if (!pinfo) {
++			err = -ENOMEM;
+ 			goto out_free_stats;
++		}
+ 		memcpy(pinfo, info, sizeof(*info));
+ 		p->info = pinfo;
+ 	}
+diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
+index 5610cd537da7..7d8d50c11ce7 100644
+--- a/block/partitions/msdos.c
++++ b/block/partitions/msdos.c
+@@ -300,7 +300,9 @@ static void parse_bsd(struct parsed_partitions *state,
+ 			continue;
+ 		bsd_start = le32_to_cpu(p->p_offset);
+ 		bsd_size = le32_to_cpu(p->p_size);
+-		if (memcmp(flavour, "bsd\0", 4) == 0)
++		/* FreeBSD has relative offset if C partition offset is zero */
++		if (memcmp(flavour, "bsd\0", 4) == 0 &&
++		    le32_to_cpu(l->d_partitions[2].p_offset) == 0)
+ 			bsd_start += offset;
+ 		if (offset == bsd_start && size == bsd_size)
+ 			/* full parent partition, we have it already */
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index 7006dbfd39bd..6978ad86e516 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -91,13 +91,14 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
+ 
+ 	if (nbytes && walk->offset & alignmask && !err) {
+ 		walk->offset = ALIGN(walk->offset, alignmask + 1);
+-		walk->data += walk->offset;
+-
+ 		nbytes = min(nbytes,
+ 			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
+ 		walk->entrylen -= nbytes;
+ 
+-		return nbytes;
++		if (nbytes) {
++			walk->data += walk->offset;
++			return nbytes;
++		}
+ 	}
+ 
+ 	if (walk->flags & CRYPTO_ALG_ASYNC)
+diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
+index 5d355e0c2633..f3c4f0cd62dd 100644
+--- a/crypto/async_tx/async_pq.c
++++ b/crypto/async_tx/async_pq.c
+@@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
+ 	dma_addr_t dma_dest[2];
+ 	int src_off = 0;
+ 
+-	if (submit->flags & ASYNC_TX_FENCE)
+-		dma_flags |= DMA_PREP_FENCE;
+-
+ 	while (src_cnt > 0) {
+ 		submit->flags = flags_orig;
+ 		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
+@@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan,
+ 			if (cb_fn_orig)
+ 				dma_flags |= DMA_PREP_INTERRUPT;
+ 		}
++		if (submit->flags & ASYNC_TX_FENCE)
++			dma_flags |= DMA_PREP_FENCE;
+ 
+ 		/* Drivers force forward progress in case they can not provide
+ 		 * a descriptor
+diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
+index faad911d46b5..fe425951b2d1 100644
+--- a/drivers/acpi/acpica/evxfevnt.c
++++ b/drivers/acpi/acpica/evxfevnt.c
+@@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
+ 
+ 	ACPI_FUNCTION_TRACE(acpi_enable_event);
+ 
++	/* If Hardware Reduced flag is set, there are no fixed events */
++
++	if (acpi_gbl_reduced_hardware) {
++		return_ACPI_STATUS(AE_OK);
++	}
++
+ 	/* Decode the Fixed Event */
+ 
+ 	if (event > ACPI_EVENT_MAX) {
+@@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
+ 
+ 	ACPI_FUNCTION_TRACE(acpi_disable_event);
+ 
++	/* If Hardware Reduced flag is set, there are no fixed events */
++
++	if (acpi_gbl_reduced_hardware) {
++		return_ACPI_STATUS(AE_OK);
++	}
++
+ 	/* Decode the Fixed Event */
+ 
+ 	if (event > ACPI_EVENT_MAX) {
+@@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event)
+ 
+ 	ACPI_FUNCTION_TRACE(acpi_clear_event);
+ 
++	/* If Hardware Reduced flag is set, there are no fixed events */
++
++	if (acpi_gbl_reduced_hardware) {
++		return_ACPI_STATUS(AE_OK);
++	}
++
+ 	/* Decode the Fixed Event */
+ 
+ 	if (event > ACPI_EVENT_MAX) {
+diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
+index 2f5ddd806c58..7a09290628ec 100644
+--- a/drivers/acpi/acpica/psobject.c
++++ b/drivers/acpi/acpica/psobject.c
+@@ -118,6 +118,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
+ 			     (u32)(walk_state->aml_offset +
+ 				   sizeof(struct acpi_table_header)));
+ 
++			ACPI_ERROR((AE_INFO,
++				    "Aborting disassembly, AML byte code is corrupt"));
++
+ 			/* Dump the context surrounding the invalid opcode */
+ 
+ 			acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
+@@ -126,6 +129,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
+ 					     sizeof(struct acpi_table_header) -
+ 					     16));
+ 			acpi_os_printf(" */\n");
++
++			/*
++			 * Just abort the disassembly, cannot continue because the
++			 * parser is essentially lost. The disassembler can then
++			 * randomly fail because an ill-constructed parse tree
++			 * can result.
++			 */
++			return_ACPI_STATUS(AE_AML_BAD_OPCODE);
+ #endif
+ 		}
+ 
+@@ -290,6 +301,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
+ 	if (status == AE_CTRL_PARSE_CONTINUE) {
+ 		return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ 	}
++	if (ACPI_FAILURE(status)) {
++		return_ACPI_STATUS(status);
++	}
+ 
+ 	/* Create Op structure and append to parent's argument list */
+ 
+diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
+index b1def411c0b8..7249ba6b511f 100644
+--- a/drivers/acpi/pci_irq.c
++++ b/drivers/acpi/pci_irq.c
+@@ -136,9 +136,6 @@ static void do_prt_fixups(struct acpi_prt_entry *entry,
+ 		quirk = &prt_quirks[i];
+ 
+ 		/* All current quirks involve link devices, not GSIs */
+-		if (!prt->source)
+-			continue;
+-
+ 		if (dmi_check_system(quirk->system) &&
+ 		    entry->id.segment == quirk->segment &&
+ 		    entry->id.bus == quirk->bus &&
+diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
+index 6a082d4de12c..24a793957bc0 100644
+--- a/drivers/acpi/pmic/intel_pmic_xpower.c
++++ b/drivers/acpi/pmic/intel_pmic_xpower.c
+@@ -28,97 +28,97 @@ static struct pmic_table power_table[] = {
+ 		.address = 0x00,
+ 		.reg = 0x13,
+ 		.bit = 0x05,
+-	},
++	}, /* ALD1 */
+ 	{
+ 		.address = 0x04,
+ 		.reg = 0x13,
+ 		.bit = 0x06,
+-	},
++	}, /* ALD2 */
+ 	{
+ 		.address = 0x08,
+ 		.reg = 0x13,
+ 		.bit = 0x07,
+-	},
++	}, /* ALD3 */
+ 	{
+ 		.address = 0x0c,
+ 		.reg = 0x12,
+ 		.bit = 0x03,
+-	},
++	}, /* DLD1 */
+ 	{
+ 		.address = 0x10,
+ 		.reg = 0x12,
+ 		.bit = 0x04,
+-	},
++	}, /* DLD2 */
+ 	{
+ 		.address = 0x14,
+ 		.reg = 0x12,
+ 		.bit = 0x05,
+-	},
++	}, /* DLD3 */
+ 	{
+ 		.address = 0x18,
+ 		.reg = 0x12,
+ 		.bit = 0x06,
+-	},
++	}, /* DLD4 */
+ 	{
+ 		.address = 0x1c,
+ 		.reg = 0x12,
+ 		.bit = 0x00,
+-	},
++	}, /* ELD1 */
+ 	{
+ 		.address = 0x20,
+ 		.reg = 0x12,
+ 		.bit = 0x01,
+-	},
++	}, /* ELD2 */
+ 	{
+ 		.address = 0x24,
+ 		.reg = 0x12,
+ 		.bit = 0x02,
+-	},
++	}, /* ELD3 */
+ 	{
+ 		.address = 0x28,
+ 		.reg = 0x13,
+ 		.bit = 0x02,
+-	},
++	}, /* FLD1 */
+ 	{
+ 		.address = 0x2c,
+ 		.reg = 0x13,
+ 		.bit = 0x03,
+-	},
++	}, /* FLD2 */
+ 	{
+ 		.address = 0x30,
+ 		.reg = 0x13,
+ 		.bit = 0x04,
+-	},
++	}, /* FLD3 */
+ 	{
+-		.address = 0x38,
++		.address = 0x34,
+ 		.reg = 0x10,
+ 		.bit = 0x03,
+-	},
++	}, /* BUC1 */
+ 	{
+-		.address = 0x3c,
++		.address = 0x38,
+ 		.reg = 0x10,
+ 		.bit = 0x06,
+-	},
++	}, /* BUC2 */
+ 	{
+-		.address = 0x40,
++		.address = 0x3c,
+ 		.reg = 0x10,
+ 		.bit = 0x05,
+-	},
++	}, /* BUC3 */
+ 	{
+-		.address = 0x44,
++		.address = 0x40,
+ 		.reg = 0x10,
+ 		.bit = 0x04,
+-	},
++	}, /* BUC4 */
+ 	{
+-		.address = 0x48,
++		.address = 0x44,
+ 		.reg = 0x10,
+ 		.bit = 0x01,
+-	},
++	}, /* BUC5 */
+ 	{
+-		.address = 0x4c,
++		.address = 0x48,
+ 		.reg = 0x10,
+ 		.bit = 0x00
+-	},
++	}, /* BUC6 */
+ };
+ 
+ /* TMP0 - TMP5 are the same, all from GPADC */
+diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
+index d9f71581b79b..bdc3063f694d 100644
+--- a/drivers/acpi/processor_driver.c
++++ b/drivers/acpi/processor_driver.c
+@@ -231,11 +231,16 @@ static int __acpi_processor_start(struct acpi_device *device)
+ static int acpi_processor_start(struct device *dev)
+ {
+ 	struct acpi_device *device = ACPI_COMPANION(dev);
++	int ret;
+ 
+ 	if (!device)
+ 		return -ENODEV;
+ 
+-	return __acpi_processor_start(device);
++	/* Protect against concurrent CPU hotplug operations */
++	get_online_cpus();
++	ret = __acpi_processor_start(device);
++	put_online_cpus();
++	return ret;
+ }
+ 
+ static int acpi_processor_stop(struct device *dev)
+diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
+index f3df4b5e5fc9..cd7398b7aa67 100644
+--- a/drivers/acpi/processor_throttling.c
++++ b/drivers/acpi/processor_throttling.c
+@@ -66,8 +66,8 @@ struct acpi_processor_throttling_arg {
+ #define THROTTLING_POSTCHANGE      (2)
+ 
+ static int acpi_processor_get_throttling(struct acpi_processor *pr);
+-int acpi_processor_set_throttling(struct acpi_processor *pr,
+-						int state, bool force);
++static int __acpi_processor_set_throttling(struct acpi_processor *pr,
++					   int state, bool force, bool direct);
+ 
+ static int acpi_processor_update_tsd_coord(void)
+ {
+@@ -895,7 +895,8 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
+ 			ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ 				"Invalid throttling state, reset\n"));
+ 			state = 0;
+-			ret = acpi_processor_set_throttling(pr, state, true);
++			ret = __acpi_processor_set_throttling(pr, state, true,
++							      true);
+ 			if (ret)
+ 				return ret;
+ 		}
+@@ -905,36 +906,31 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
+ 	return 0;
+ }
+ 
+-static int acpi_processor_get_throttling(struct acpi_processor *pr)
++static long __acpi_processor_get_throttling(void *data)
+ {
+-	cpumask_var_t saved_mask;
+-	int ret;
++	struct acpi_processor *pr = data;
++
++	return pr->throttling.acpi_processor_get_throttling(pr);
++}
+ 
++static int acpi_processor_get_throttling(struct acpi_processor *pr)
++{
+ 	if (!pr)
+ 		return -EINVAL;
+ 
+ 	if (!pr->flags.throttling)
+ 		return -ENODEV;
+ 
+-	if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+-		return -ENOMEM;
+-
+ 	/*
+-	 * Migrate task to the cpu pointed by pr.
++	 * This is either called from the CPU hotplug callback of
++	 * processor_driver or via the ACPI probe function. In the latter
++	 * case the CPU is not guaranteed to be online. Both call sites are
++	 * protected against CPU hotplug.
+ 	 */
+-	cpumask_copy(saved_mask, &current->cpus_allowed);
+-	/* FIXME: use work_on_cpu() */
+-	if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+-		/* Can't migrate to the target pr->id CPU. Exit */
+-		free_cpumask_var(saved_mask);
++	if (!cpu_online(pr->id))
+ 		return -ENODEV;
+-	}
+-	ret = pr->throttling.acpi_processor_get_throttling(pr);
+-	/* restore the previous state */
+-	set_cpus_allowed_ptr(current, saved_mask);
+-	free_cpumask_var(saved_mask);
+ 
+-	return ret;
++	return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr);
+ }
+ 
+ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
+@@ -1084,8 +1080,15 @@ static long acpi_processor_throttling_fn(void *data)
+ 			arg->target_state, arg->force);
+ }
+ 
+-int acpi_processor_set_throttling(struct acpi_processor *pr,
+-						int state, bool force)
++static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
++{
++	if (direct)
++		return fn(arg);
++	return work_on_cpu(cpu, fn, arg);
++}
++
++static int __acpi_processor_set_throttling(struct acpi_processor *pr,
++					   int state, bool force, bool direct)
+ {
+ 	int ret = 0;
+ 	unsigned int i;
+@@ -1134,7 +1137,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ 		arg.pr = pr;
+ 		arg.target_state = state;
+ 		arg.force = force;
+-		ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
++		ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
++				  direct);
+ 	} else {
+ 		/*
+ 		 * When the T-state coordination is SW_ALL or HW_ALL,
+@@ -1167,8 +1171,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ 			arg.pr = match_pr;
+ 			arg.target_state = state;
+ 			arg.force = force;
+-			ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
+-				&arg);
++			ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
++					  &arg, direct);
+ 		}
+ 	}
+ 	/*
+@@ -1186,6 +1190,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
+ 	return ret;
+ }
+ 
++int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
++				  bool force)
++{
++	return __acpi_processor_set_throttling(pr, state, force, false);
++}
++
+ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
+ {
+ 	int result = 0;
+diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
+index f0099360039e..1accc01fb0ca 100644
+--- a/drivers/amba/bus.c
++++ b/drivers/amba/bus.c
+@@ -68,11 +68,12 @@ static ssize_t driver_override_show(struct device *_dev,
+ 				    struct device_attribute *attr, char *buf)
+ {
+ 	struct amba_device *dev = to_amba_device(_dev);
++	ssize_t len;
+ 
+-	if (!dev->driver_override)
+-		return 0;
+-
+-	return sprintf(buf, "%s\n", dev->driver_override);
++	device_lock(_dev);
++	len = sprintf(buf, "%s\n", dev->driver_override);
++	device_unlock(_dev);
++	return len;
+ }
+ 
+ static ssize_t driver_override_store(struct device *_dev,
+@@ -80,9 +81,10 @@ static ssize_t driver_override_store(struct device *_dev,
+ 				     const char *buf, size_t count)
+ {
+ 	struct amba_device *dev = to_amba_device(_dev);
+-	char *driver_override, *old = dev->driver_override, *cp;
++	char *driver_override, *old, *cp;
+ 
+-	if (count > PATH_MAX)
++	/* We need to keep extra room for a newline */
++	if (count >= (PAGE_SIZE - 1))
+ 		return -EINVAL;
+ 
+ 	driver_override = kstrndup(buf, count, GFP_KERNEL);
+@@ -93,12 +95,15 @@ static ssize_t driver_override_store(struct device *_dev,
+ 	if (cp)
+ 		*cp = '\0';
+ 
++	device_lock(_dev);
++	old = dev->driver_override;
+ 	if (strlen(driver_override)) {
+ 		dev->driver_override = driver_override;
+ 	} else {
+ 	       kfree(driver_override);
+ 	       dev->driver_override = NULL;
+ 	}
++	device_unlock(_dev);
+ 
+ 	kfree(old);
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 3b0cebb2122b..19733cdcd45c 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -542,7 +542,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	  .driver_data = board_ahci_yes_fbs },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
+ 	  .driver_data = board_ahci_yes_fbs },
+-	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642),
++	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
++	  .driver_data = board_ahci_yes_fbs },
++	{ PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
+ 	  .driver_data = board_ahci_yes_fbs },
+ 
+ 	/* Promise */
+diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
+index d89305d289f6..cf7fdb79c992 100644
+--- a/drivers/ata/libahci_platform.c
++++ b/drivers/ata/libahci_platform.c
+@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
+ 
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq <= 0) {
+-		dev_err(dev, "no irq\n");
+-		return -EINVAL;
++		if (irq != -EPROBE_DEFER)
++			dev_err(dev, "no irq\n");
++		return irq;
+ 	}
+ 
+ 	/* prepare host */
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index b31d6853ba7a..accad0598253 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4226,6 +4226,28 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "PIONEER DVD-RW  DVR-212D",	NULL,	ATA_HORKAGE_NOSETXFER },
+ 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
+ 
++	/* Crucial BX100 SSD 500GB has broken LPM support */
++	{ "CT500BX100SSD1",		NULL,	ATA_HORKAGE_NOLPM },
++
++	/* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
++	{ "Crucial_CT512MX100*",	"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM |
++						ATA_HORKAGE_NOLPM, },
++	/* 512GB MX100 with newer firmware has only LPM issues */
++	{ "Crucial_CT512MX100*",	NULL,	ATA_HORKAGE_ZERO_AFTER_TRIM |
++						ATA_HORKAGE_NOLPM, },
++
++	/* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
++	{ "Crucial_CT480M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM |
++						ATA_HORKAGE_NOLPM, },
++	{ "Crucial_CT960M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM |
++						ATA_HORKAGE_NOLPM, },
++
++	/* Sandisk devices which are known to not handle LPM well */
++	{ "SanDisk SD7UB3Q*G1001",	NULL,	ATA_HORKAGE_NOLPM, },
++
+ 	/* devices that don't properly handle queued TRIM commands */
+ 	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+@@ -4237,7 +4259,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Crucial_CT*MX100*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+-	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++	{ "Samsung SSD 840*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM, },
++	{ "Samsung SSD 850*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+@@ -5078,8 +5102,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
+ 	 * We guarantee to LLDs that they will have at least one
+ 	 * non-zero sg if the command is a data command.
+ 	 */
+-	if (WARN_ON_ONCE(ata_is_data(prot) &&
+-			 (!qc->sg || !qc->n_elem || !qc->nbytes)))
++	if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
+ 		goto sys_err;
+ 
+ 	if (ata_is_dma(prot) || (ata_is_pio(prot) &&
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 4d4cdade9d7e..c471bb8637f9 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -3465,7 +3465,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
+ 		if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
+ 			/* relay SCSI command to ATAPI device */
+ 			int len = COMMAND_SIZE(scsi_op);
+-			if (unlikely(len > scmd->cmd_len || len > dev->cdb_len))
++			if (unlikely(len > scmd->cmd_len ||
++				     len > dev->cdb_len ||
++				     scmd->cmd_len > ATAPI_CDB_LEN))
+ 				goto bad_cdb_len;
+ 
+ 			xlat_func = atapi_xlat;
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 9e72be28ee9f..53d22cc3cd3e 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -471,6 +471,9 @@ static int loop_switch(struct loop_device *lo, struct file *file)
+  */
+ static int loop_flush(struct loop_device *lo)
+ {
++	/* loop not yet configured, no running thread, nothing to flush */
++	if (lo->lo_state != Lo_bound)
++		return 0;
+ 	return loop_switch(lo, NULL);
+ }
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 32f5b87fe93c..68a92550b4cd 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -203,7 +203,6 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
+@@ -236,6 +235,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* QCA ROME chipset */
++	{ USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
+ 	{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
+diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
+index 738612c45266..8f78990205ec 100644
+--- a/drivers/bus/brcmstb_gisb.c
++++ b/drivers/bus/brcmstb_gisb.c
+@@ -33,8 +33,6 @@
+ #define  ARB_ERR_CAP_CLEAR		(1 << 0)
+ #define  ARB_ERR_CAP_STATUS_TIMEOUT	(1 << 12)
+ #define  ARB_ERR_CAP_STATUS_TEA		(1 << 11)
+-#define  ARB_ERR_CAP_STATUS_BS_SHIFT	(1 << 2)
+-#define  ARB_ERR_CAP_STATUS_BS_MASK	0x3c
+ #define  ARB_ERR_CAP_STATUS_WRITE	(1 << 1)
+ #define  ARB_ERR_CAP_STATUS_VALID	(1 << 0)
+ 
+@@ -43,7 +41,6 @@ enum {
+ 	ARB_ERR_CAP_CLR,
+ 	ARB_ERR_CAP_HI_ADDR,
+ 	ARB_ERR_CAP_ADDR,
+-	ARB_ERR_CAP_DATA,
+ 	ARB_ERR_CAP_STATUS,
+ 	ARB_ERR_CAP_MASTER,
+ };
+@@ -53,7 +50,6 @@ static const int gisb_offsets_bcm7038[] = {
+ 	[ARB_ERR_CAP_CLR]	= 0x0c4,
+ 	[ARB_ERR_CAP_HI_ADDR]	= -1,
+ 	[ARB_ERR_CAP_ADDR]	= 0x0c8,
+-	[ARB_ERR_CAP_DATA]	= 0x0cc,
+ 	[ARB_ERR_CAP_STATUS]	= 0x0d0,
+ 	[ARB_ERR_CAP_MASTER]	= -1,
+ };
+@@ -63,7 +59,6 @@ static const int gisb_offsets_bcm7400[] = {
+ 	[ARB_ERR_CAP_CLR]	= 0x0c8,
+ 	[ARB_ERR_CAP_HI_ADDR]	= -1,
+ 	[ARB_ERR_CAP_ADDR]	= 0x0cc,
+-	[ARB_ERR_CAP_DATA]	= 0x0d0,
+ 	[ARB_ERR_CAP_STATUS]	= 0x0d4,
+ 	[ARB_ERR_CAP_MASTER]	= 0x0d8,
+ };
+@@ -73,7 +68,6 @@ static const int gisb_offsets_bcm7435[] = {
+ 	[ARB_ERR_CAP_CLR]	= 0x168,
+ 	[ARB_ERR_CAP_HI_ADDR]	= -1,
+ 	[ARB_ERR_CAP_ADDR]	= 0x16c,
+-	[ARB_ERR_CAP_DATA]	= 0x170,
+ 	[ARB_ERR_CAP_STATUS]	= 0x174,
+ 	[ARB_ERR_CAP_MASTER]	= 0x178,
+ };
+@@ -83,7 +77,6 @@ static const int gisb_offsets_bcm7445[] = {
+ 	[ARB_ERR_CAP_CLR]	= 0x7e4,
+ 	[ARB_ERR_CAP_HI_ADDR]	= 0x7e8,
+ 	[ARB_ERR_CAP_ADDR]	= 0x7ec,
+-	[ARB_ERR_CAP_DATA]	= 0x7f0,
+ 	[ARB_ERR_CAP_STATUS]	= 0x7f4,
+ 	[ARB_ERR_CAP_MASTER]	= 0x7f8,
+ };
+@@ -104,13 +97,27 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg)
+ {
+ 	int offset = gdev->gisb_offsets[reg];
+ 
+-	/* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
+-	if (offset == -1)
+-		return 1;
++	if (offset < 0) {
++		/* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */
++		if (reg == ARB_ERR_CAP_MASTER)
++			return 1;
++		else
++			return 0;
++	}
+ 
+ 	return ioread32(gdev->base + offset);
+ }
+ 
++static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev)
++{
++	u64 value;
++
++	value = gisb_read(gdev, ARB_ERR_CAP_ADDR);
++	value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
++
++	return value;
++}
++
+ static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg)
+ {
+ 	int offset = gdev->gisb_offsets[reg];
+@@ -173,7 +180,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
+ 					const char *reason)
+ {
+ 	u32 cap_status;
+-	unsigned long arb_addr;
++	u64 arb_addr;
+ 	u32 master;
+ 	const char *m_name;
+ 	char m_fmt[11];
+@@ -185,10 +192,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
+ 		return 1;
+ 
+ 	/* Read the address and master */
+-	arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff;
+-#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+-	arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32;
+-#endif
++	arb_addr = gisb_read_address(gdev);
+ 	master = gisb_read(gdev, ARB_ERR_CAP_MASTER);
+ 
+ 	m_name = brcmstb_gisb_master_to_str(gdev, master);
+@@ -197,7 +201,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev,
+ 		m_name = m_fmt;
+ 	}
+ 
+-	pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n",
++	pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n",
+ 		__func__, reason, arb_addr,
+ 		cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R',
+ 		cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "",
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 5d28a45d2960..3922ce87c2e4 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2357,7 +2357,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
+ 	if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
+ 		return media_changed(cdi, 1);
+ 
+-	if ((unsigned int)arg >= cdi->capacity)
++	if (arg >= cdi->capacity)
+ 		return -EINVAL;
+ 
+ 	info = kmalloc(sizeof(*info), GFP_KERNEL);
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index c6dea3f6917b..b38e31221a7e 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -859,6 +859,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
+ 		}
+ 	}
+ 	wmb();
++	if (intel_private.driver->chipset_flush)
++		intel_private.driver->chipset_flush();
+ }
+ EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+ 
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 9156bbd90b56..0166be52aacb 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -408,6 +408,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
+ 	msg = ipmi_alloc_smi_msg();
+ 	if (!msg) {
+ 		ssif_info->ssif_state = SSIF_NORMAL;
++		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		return;
+ 	}
+ 
+@@ -430,6 +431,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
+ 	msg = ipmi_alloc_smi_msg();
+ 	if (!msg) {
+ 		ssif_info->ssif_state = SSIF_NORMAL;
++		ipmi_ssif_unlock_cond(ssif_info, flags);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
+index f335fcee09af..0109d3fb5fbc 100644
+--- a/drivers/char/ipmi/ipmi_watchdog.c
++++ b/drivers/char/ipmi/ipmi_watchdog.c
+@@ -509,7 +509,7 @@ static void panic_halt_ipmi_heartbeat(void)
+ 	msg.cmd = IPMI_WDOG_RESET_TIMER;
+ 	msg.data = NULL;
+ 	msg.data_len = 0;
+-	atomic_add(2, &panic_done_count);
++	atomic_add(1, &panic_done_count);
+ 	rv = ipmi_request_supply_msgs(watchdog_user,
+ 				      (struct ipmi_addr *) &addr,
+ 				      0,
+@@ -519,7 +519,7 @@ static void panic_halt_ipmi_heartbeat(void)
+ 				      &panic_halt_heartbeat_recv_msg,
+ 				      1);
+ 	if (rv)
+-		atomic_sub(2, &panic_done_count);
++		atomic_sub(1, &panic_done_count);
+ }
+ 
+ static struct ipmi_smi_msg panic_halt_smi_msg = {
+@@ -543,12 +543,12 @@ static void panic_halt_ipmi_set_timeout(void)
+ 	/* Wait for the messages to be free. */
+ 	while (atomic_read(&panic_done_count) != 0)
+ 		ipmi_poll_interface(watchdog_user);
+-	atomic_add(2, &panic_done_count);
++	atomic_add(1, &panic_done_count);
+ 	rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
+ 				&panic_halt_recv_msg,
+ 				&send_heartbeat_now);
+ 	if (rv) {
+-		atomic_sub(2, &panic_done_count);
++		atomic_sub(1, &panic_done_count);
+ 		printk(KERN_WARNING PFX
+ 		       "Unable to extend the watchdog timeout.");
+ 	} else {
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index d55156fc064d..4ba5c7e4e254 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -704,7 +704,7 @@ retry:
+ 
+ static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+ {
+-	const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
++	const int nbits_max = r->poolinfo->poolwords * 32;
+ 
+ 	/* Cap the value to avoid overflows */
+ 	nbits = min(nbits,  nbits_max);
+@@ -863,12 +863,16 @@ static void add_interrupt_bench(cycles_t start)
+ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
+ {
+ 	__u32 *ptr = (__u32 *) regs;
++	unsigned int idx;
+ 
+ 	if (regs == NULL)
+ 		return 0;
+-	if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
+-		f->reg_idx = 0;
+-	return *(ptr + f->reg_idx++);
++	idx = READ_ONCE(f->reg_idx);
++	if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
++		idx = 0;
++	ptr += idx++;
++	WRITE_ONCE(f->reg_idx, idx);
++	return *ptr;
+ }
+ 
+ void add_interrupt_randomness(int irq, int irq_flags)
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 5809567d3cf0..d696e5c3d079 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -283,7 +283,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
+ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ {
+ 	int size = 0;
+-	int expected, status;
++	int status;
++	u32 expected;
+ 
+ 	if (count < TPM_HEADER_SIZE) {
+ 		size = -EIO;
+@@ -298,7 +299,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+ 	}
+ 
+ 	expected = be32_to_cpu(*(__be32 *) (buf + 2));
+-	if (expected > count) {
++	if (expected > count || expected < TPM_HEADER_SIZE) {
+ 		size = -EIO;
+ 		goto out;
+ 	}
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 374b0006aa7a..a7adfc633db9 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1399,7 +1399,6 @@ static int add_port(struct ports_device *portdev, u32 id)
+ {
+ 	char debugfs_name[16];
+ 	struct port *port;
+-	struct port_buffer *buf;
+ 	dev_t devt;
+ 	unsigned int nr_added_bufs;
+ 	int err;
+@@ -1510,8 +1509,6 @@ static int add_port(struct ports_device *portdev, u32 id)
+ 	return 0;
+ 
+ free_inbufs:
+-	while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+-		free_buf(buf, true);
+ free_device:
+ 	device_destroy(pdrvdata.class, port->dev->devt);
+ free_cdev:
+@@ -1536,34 +1533,14 @@ static void remove_port(struct kref *kref)
+ 
+ static void remove_port_data(struct port *port)
+ {
+-	struct port_buffer *buf;
+-
+ 	spin_lock_irq(&port->inbuf_lock);
+ 	/* Remove unused data this port might have received. */
+ 	discard_port_data(port);
+ 	spin_unlock_irq(&port->inbuf_lock);
+ 
+-	/* Remove buffers we queued up for the Host to send us data in. */
+-	do {
+-		spin_lock_irq(&port->inbuf_lock);
+-		buf = virtqueue_detach_unused_buf(port->in_vq);
+-		spin_unlock_irq(&port->inbuf_lock);
+-		if (buf)
+-			free_buf(buf, true);
+-	} while (buf);
+-
+ 	spin_lock_irq(&port->outvq_lock);
+ 	reclaim_consumed_buffers(port);
+ 	spin_unlock_irq(&port->outvq_lock);
+-
+-	/* Free pending buffers from the out-queue. */
+-	do {
+-		spin_lock_irq(&port->outvq_lock);
+-		buf = virtqueue_detach_unused_buf(port->out_vq);
+-		spin_unlock_irq(&port->outvq_lock);
+-		if (buf)
+-			free_buf(buf, true);
+-	} while (buf);
+ }
+ 
+ /*
+@@ -1788,13 +1765,24 @@ static void control_work_handler(struct work_struct *work)
+ 	spin_unlock(&portdev->c_ivq_lock);
+ }
+ 
++static void flush_bufs(struct virtqueue *vq, bool can_sleep)
++{
++	struct port_buffer *buf;
++	unsigned int len;
++
++	while ((buf = virtqueue_get_buf(vq, &len)))
++		free_buf(buf, can_sleep);
++}
++
+ static void out_intr(struct virtqueue *vq)
+ {
+ 	struct port *port;
+ 
+ 	port = find_port_by_vq(vq->vdev->priv, vq);
+-	if (!port)
++	if (!port) {
++		flush_bufs(vq, false);
+ 		return;
++	}
+ 
+ 	wake_up_interruptible(&port->waitqueue);
+ }
+@@ -1805,8 +1793,10 @@ static void in_intr(struct virtqueue *vq)
+ 	unsigned long flags;
+ 
+ 	port = find_port_by_vq(vq->vdev->priv, vq);
+-	if (!port)
++	if (!port) {
++		flush_bufs(vq, false);
+ 		return;
++	}
+ 
+ 	spin_lock_irqsave(&port->inbuf_lock, flags);
+ 	port->inbuf = get_inbuf(port);
+@@ -1981,6 +1971,15 @@ static const struct file_operations portdev_fops = {
+ 
+ static void remove_vqs(struct ports_device *portdev)
+ {
++	struct virtqueue *vq;
++
++	virtio_device_for_each_vq(portdev->vdev, vq) {
++		struct port_buffer *buf;
++
++		flush_bufs(vq, true);
++		while ((buf = virtqueue_detach_unused_buf(vq)))
++			free_buf(buf, true);
++	}
+ 	portdev->vdev->config->del_vqs(portdev->vdev);
+ 	kfree(portdev->in_vqs);
+ 	kfree(portdev->out_vqs);
+diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
+index 30335d3b99af..9db6d57f7ccc 100644
+--- a/drivers/clk/clk-si5351.c
++++ b/drivers/clk/clk-si5351.c
+@@ -72,7 +72,7 @@ static const char * const si5351_input_names[] = {
+ 	"xtal", "clkin"
+ };
+ static const char * const si5351_pll_names[] = {
+-	"plla", "pllb", "vxco"
++	"si5351_plla", "si5351_pllb", "si5351_vxco"
+ };
+ static const char * const si5351_msynth_names[] = {
+ 	"ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7"
+diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
+index 8bccf4ecdab6..9ff4ea63932d 100644
+--- a/drivers/clk/mvebu/armada-38x.c
++++ b/drivers/clk/mvebu/armada-38x.c
+@@ -46,10 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
+ }
+ 
+ static const u32 armada_38x_cpu_frequencies[] __initconst = {
+-	0, 0, 0, 0,
+-	1066 * 1000 * 1000, 0, 0, 0,
++	666 * 1000 * 1000,  0, 800 * 1000 * 1000, 0,
++	1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
+ 	1332 * 1000 * 1000, 0, 0, 0,
+-	1600 * 1000 * 1000,
++	1600 * 1000 * 1000, 0, 0, 0,
++	1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
+ };
+ 
+ static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
+@@ -75,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
+ };
+ 
+ static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
+-	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+-	{1, 2}, {0, 1}, {0, 1}, {0, 1},
++	{1, 2}, {0, 1}, {1, 2}, {0, 1},
++	{1, 2}, {0, 1}, {1, 2}, {0, 1},
+ 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+ 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+-	{0, 1}, {0, 1}, {0, 1}, {0, 1},
++	{1, 2}, {0, 1}, {0, 1}, {1, 2},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+@@ -90,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
+ 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+ 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+ 	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+-	{0, 1}, {0, 1}, {0, 1}, {0, 1},
++	{1, 2}, {0, 1}, {0, 1}, {7, 15},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+ 	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
+index 86628e22b2a3..719c3d9f07fb 100644
+--- a/drivers/cpufreq/sh-cpufreq.c
++++ b/drivers/cpufreq/sh-cpufreq.c
+@@ -30,54 +30,63 @@
+ 
+ static DEFINE_PER_CPU(struct clk, sh_cpuclk);
+ 
++struct cpufreq_target {
++	struct cpufreq_policy	*policy;
++	unsigned int		freq;
++};
++
+ static unsigned int sh_cpufreq_get(unsigned int cpu)
+ {
+ 	return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
+ }
+ 
+-/*
+- * Here we notify other drivers of the proposed change and the final change.
+- */
+-static int sh_cpufreq_target(struct cpufreq_policy *policy,
+-			     unsigned int target_freq,
+-			     unsigned int relation)
++static long __sh_cpufreq_target(void *arg)
+ {
+-	unsigned int cpu = policy->cpu;
++	struct cpufreq_target *target = arg;
++	struct cpufreq_policy *policy = target->policy;
++	int cpu = policy->cpu;
+ 	struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+-	cpumask_t cpus_allowed;
+ 	struct cpufreq_freqs freqs;
+ 	struct device *dev;
+ 	long freq;
+ 
+-	cpus_allowed = current->cpus_allowed;
+-	set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-
+-	BUG_ON(smp_processor_id() != cpu);
++	if (smp_processor_id() != cpu)
++		return -ENODEV;
+ 
+ 	dev = get_cpu_device(cpu);
+ 
+ 	/* Convert target_freq from kHz to Hz */
+-	freq = clk_round_rate(cpuclk, target_freq * 1000);
++	freq = clk_round_rate(cpuclk, target->freq * 1000);
+ 
+ 	if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
+ 		return -EINVAL;
+ 
+-	dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
++	dev_dbg(dev, "requested frequency %u Hz\n", target->freq * 1000);
+ 
+ 	freqs.old	= sh_cpufreq_get(cpu);
+ 	freqs.new	= (freq + 500) / 1000;
+ 	freqs.flags	= 0;
+ 
+-	cpufreq_freq_transition_begin(policy, &freqs);
+-	set_cpus_allowed_ptr(current, &cpus_allowed);
++	cpufreq_freq_transition_begin(target->policy, &freqs);
+ 	clk_set_rate(cpuclk, freq);
+-	cpufreq_freq_transition_end(policy, &freqs, 0);
++	cpufreq_freq_transition_end(target->policy, &freqs, 0);
+ 
+ 	dev_dbg(dev, "set frequency %lu Hz\n", freq);
+-
+ 	return 0;
+ }
+ 
++/*
++ * Here we notify other drivers of the proposed change and the final change.
++ */
++static int sh_cpufreq_target(struct cpufreq_policy *policy,
++			     unsigned int target_freq,
++			     unsigned int relation)
++{
++	struct cpufreq_target data = { .policy = policy, .freq = target_freq };
++
++	return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
++}
++
+ static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+ {
+ 	struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
+diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
+index a5c111b67f37..ea11a33e7fff 100644
+--- a/drivers/cpuidle/dt_idle_states.c
++++ b/drivers/cpuidle/dt_idle_states.c
+@@ -174,8 +174,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
+ 		if (!state_node)
+ 			break;
+ 
+-		if (!of_device_is_available(state_node))
++		if (!of_device_is_available(state_node)) {
++			of_node_put(state_node);
+ 			continue;
++		}
+ 
+ 		if (!idle_state_valid(state_node, i, cpumask)) {
+ 			pr_warn("%s idle state not valid, bailing out\n",
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index c5e6c82516ce..e4b4c5c07037 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1003,10 +1003,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+ 		check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ 		rmb();
+-		initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
+-		rmb();
+ 		cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ 		rmb();
++		initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
++		rmb();
+ 		cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+ 		rmb();
+ 
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index 62bbd79338e0..348259b0db52 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -1503,17 +1503,24 @@ static int sdma_probe(struct platform_device *pdev)
+ 	if (IS_ERR(sdma->clk_ahb))
+ 		return PTR_ERR(sdma->clk_ahb);
+ 
+-	clk_prepare(sdma->clk_ipg);
+-	clk_prepare(sdma->clk_ahb);
++	ret = clk_prepare(sdma->clk_ipg);
++	if (ret)
++		return ret;
++
++	ret = clk_prepare(sdma->clk_ahb);
++	if (ret)
++		goto err_clk;
+ 
+ 	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
+ 			       sdma);
+ 	if (ret)
+-		return ret;
++		goto err_irq;
+ 
+ 	sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
+-	if (!sdma->script_addrs)
+-		return -ENOMEM;
++	if (!sdma->script_addrs) {
++		ret = -ENOMEM;
++		goto err_irq;
++	}
+ 
+ 	/* initially no scripts available */
+ 	saddr_arr = (s32 *)sdma->script_addrs;
+@@ -1618,6 +1625,10 @@ err_register:
+ 	dma_async_device_unregister(&sdma->dma_device);
+ err_init:
+ 	kfree(sdma->script_addrs);
++err_irq:
++	clk_unprepare(sdma->clk_ahb);
++err_clk:
++	clk_unprepare(sdma->clk_ipg);
+ 	return ret;
+ }
+ 
+@@ -1628,6 +1639,8 @@ static int sdma_remove(struct platform_device *pdev)
+ 
+ 	dma_async_device_unregister(&sdma->dma_device);
+ 	kfree(sdma->script_addrs);
++	clk_unprepare(sdma->clk_ahb);
++	clk_unprepare(sdma->clk_ipg);
+ 	/* Kill the tasklet */
+ 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
+ 		struct sdma_channel *sdmac = &sdma->channel[i];
+diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
+index 0574e1bbe45c..3ce5609b4611 100644
+--- a/drivers/edac/mv64x60_edac.c
++++ b/drivers/edac/mv64x60_edac.c
+@@ -763,7 +763,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
+ 		/* Non-ECC RAM? */
+ 		printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+ 		res = -ENODEV;
+-		goto err2;
++		goto err;
+ 	}
+ 
+ 	edac_dbg(3, "init mci\n");
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 95752d38b7fe..2ce21d9340cd 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1998,7 +1998,8 @@ struct gpio_desc *__must_check __gpiod_get_index(struct device *dev,
+ 		return desc;
+ 	}
+ 
+-	status = gpiod_request(desc, con_id);
++	/* If a connection label was passed use that, else use the device name as label */
++	status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
+ 	if (status < 0)
+ 		return ERR_PTR(status);
+ 
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+index c25728bc388a..fc924d375d95 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+@@ -519,11 +519,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
+ 	return ret;
+ }
+ 
++static void kfd_topology_kobj_release(struct kobject *kobj)
++{
++	kfree(kobj);
++}
++
+ static const struct sysfs_ops sysprops_ops = {
+ 	.show = sysprops_show,
+ };
+ 
+ static struct kobj_type sysprops_type = {
++	.release = kfd_topology_kobj_release,
+ 	.sysfs_ops = &sysprops_ops,
+ };
+ 
+@@ -559,6 +565,7 @@ static const struct sysfs_ops iolink_ops = {
+ };
+ 
+ static struct kobj_type iolink_type = {
++	.release = kfd_topology_kobj_release,
+ 	.sysfs_ops = &iolink_ops,
+ };
+ 
+@@ -586,6 +593,7 @@ static const struct sysfs_ops mem_ops = {
+ };
+ 
+ static struct kobj_type mem_type = {
++	.release = kfd_topology_kobj_release,
+ 	.sysfs_ops = &mem_ops,
+ };
+ 
+@@ -625,6 +633,7 @@ static const struct sysfs_ops cache_ops = {
+ };
+ 
+ static struct kobj_type cache_type = {
++	.release = kfd_topology_kobj_release,
+ 	.sysfs_ops = &cache_ops,
+ };
+ 
+@@ -747,6 +756,7 @@ static const struct sysfs_ops node_ops = {
+ };
+ 
+ static struct kobj_type node_type = {
++	.release = kfd_topology_kobj_release,
+ 	.sysfs_ops = &node_ops,
+ };
+ 
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 53bc7a628909..27d5c7867e92 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -3103,8 +3103,7 @@ monitor_name(struct detailed_timing *t, void *data)
+  * @edid: EDID to parse
+  *
+  * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The
+- * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to
+- * fill in.
++ * HDCP and Port_ID ELD fields are left for the graphics driver to fill in.
+  */
+ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+ {
+@@ -3177,6 +3176,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+ 	}
+ 	eld[5] |= sad_count << 4;
+ 
++	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
++	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
++		eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP;
++	else
++		eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI;
++
+ 	eld[DRM_ELD_BASELINE_ELD_LEN] =
+ 		DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
+ 
+diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
+index af9662e58272..5ab0f02a2ab1 100644
+--- a/drivers/gpu/drm/drm_irq.c
++++ b/drivers/gpu/drm/drm_irq.c
+@@ -1090,9 +1090,9 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
+ 	if (atomic_dec_and_test(&vblank->refcount)) {
+ 		if (drm_vblank_offdelay == 0)
+ 			return;
+-		else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0)
++		else if (drm_vblank_offdelay < 0)
+ 			vblank_disable_fn((unsigned long)vblank);
+-		else
++		else if (!dev->vblank_disable_immediate)
+ 			mod_timer(&vblank->disable_timer,
+ 				  jiffies + ((drm_vblank_offdelay * HZ)/1000));
+ 	}
+@@ -1750,6 +1750,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
+ 	wake_up(&vblank->queue);
+ 	drm_handle_vblank_events(dev, crtc);
+ 
++	/* With instant-off, we defer disabling the interrupt until after
++	 * we finish processing the following vblank. The disable has to
++	 * be last (after drm_handle_vblank_events) so that the timestamp
++	 * is always accurate.
++	 */
++	if (dev->vblank_disable_immediate &&
++	    drm_vblank_offdelay > 0 &&
++	    !atomic_read(&vblank->refcount))
++		vblank_disable_fn((unsigned long)vblank);
++
+ 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ 
+ 	return true;
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
+index 0d5b9698d384..e7d6139528ca 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
+@@ -241,6 +241,15 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
+ 
+ 	exynos_gem_obj->buffer = buf;
+ 
++	if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
++		/*
++		 * when no IOMMU is available, all allocated buffers are
++		 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
++		 */
++		flags &= ~EXYNOS_BO_NONCONTIG;
++		DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
++	}
++
+ 	/* set memory type and cache attribute from user side. */
+ 	exynos_gem_obj->flags = flags;
+ 
+diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
+index 52839769eb6c..e101c2868734 100644
+--- a/drivers/gpu/drm/msm/msm_gem.c
++++ b/drivers/gpu/drm/msm/msm_gem.c
+@@ -89,14 +89,17 @@ static struct page **get_pages(struct drm_gem_object *obj)
+ 			return p;
+ 		}
+ 
++		msm_obj->pages = p;
++
+ 		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ 		if (IS_ERR(msm_obj->sgt)) {
++			void *ptr = ERR_CAST(msm_obj->sgt);
++
+ 			dev_err(dev->dev, "failed to allocate sgt\n");
+-			return ERR_CAST(msm_obj->sgt);
++			msm_obj->sgt = NULL;
++			return ptr;
+ 		}
+ 
+-		msm_obj->pages = p;
+-
+ 		/* For non-cached buffers, ensure the new pages are clean
+ 		 * because display controller, GPU, etc. are not coherent:
+ 		 */
+@@ -119,7 +122,10 @@ static void put_pages(struct drm_gem_object *obj)
+ 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ 			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
+ 					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+-		sg_free_table(msm_obj->sgt);
++
++		if (msm_obj->sgt)
++			sg_free_table(msm_obj->sgt);
++
+ 		kfree(msm_obj->sgt);
+ 
+ 		if (use_pages(obj))
+diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+index 6e6634cd1d17..9eedb17a6b1b 100644
+--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+@@ -287,7 +287,12 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
+ 				msecs_to_jiffies(1))) {
+ 			dev_err(dmm->dev, "timed out waiting for done\n");
+ 			ret = -ETIMEDOUT;
++			goto cleanup;
+ 		}
++
++		/* Check the engine status before continue */
++		ret = wait_status(engine, DMM_PATSTATUS_READY |
++				  DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
+ 	}
+ 
+ cleanup:
+diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
+index e9718b99a8a9..ee43b48ded73 100644
+--- a/drivers/gpu/drm/omapdrm/omap_gem.c
++++ b/drivers/gpu/drm/omapdrm/omap_gem.c
+@@ -158,7 +158,7 @@ static void evict_entry(struct drm_gem_object *obj,
+ 	size_t size = PAGE_SIZE * n;
+ 	loff_t off = mmap_offset(obj) +
+ 			(entry->obj_pgoff << PAGE_SHIFT);
+-	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
++	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
+ 
+ 	if (m > 1) {
+ 		int i;
+@@ -415,7 +415,7 @@ static int fault_2d(struct drm_gem_object *obj,
+ 	 * into account in some of the math, so figure out virtual stride
+ 	 * in pages
+ 	 */
+-	const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
++	const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);
+ 
+ 	/* We don't use vmf->pgoff since that has the fake offset: */
+ 	pgoff = ((unsigned long)vmf->virtual_address -
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 9f699e87320a..6806772f3647 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -89,25 +89,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
+ 		/* don't do anything if sink is not display port, i.e.,
+ 		 * passive dp->(dvi|hdmi) adaptor
+ 		 */
+-		if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+-			int saved_dpms = connector->dpms;
+-			/* Only turn off the display if it's physically disconnected */
+-			if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+-				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+-			} else if (radeon_dp_needs_link_train(radeon_connector)) {
+-				/* Don't try to start link training before we
+-				 * have the dpcd */
+-				if (!radeon_dp_getdpcd(radeon_connector))
+-					return;
+-
+-				/* set it to OFF so that drm_helper_connector_dpms()
+-				 * won't return immediately since the current state
+-				 * is ON at this point.
+-				 */
+-				connector->dpms = DRM_MODE_DPMS_OFF;
+-				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+-			}
+-			connector->dpms = saved_dpms;
++		if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
++		    radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
++		    radeon_dp_needs_link_train(radeon_connector)) {
++			/* Don't start link training before we have the DPCD */
++			if (!radeon_dp_getdpcd(radeon_connector))
++				return;
++
++			/* Turn the connector off and back on immediately, which
++			 * will trigger link training
++			 */
++			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ 		}
+ 	}
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index 6743174acdbc..9dad7810d21b 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1321,6 +1321,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
+ 		return ERR_PTR(-ENOENT);
+ 	}
+ 
++	/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
++	if (obj->import_attach) {
++		DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
++		return ERR_PTR(-EINVAL);
++	}
++
+ 	radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
+ 	if (radeon_fb == NULL) {
+ 		drm_gem_object_unreference_unlocked(obj);
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 741065bd14b3..ad172473f047 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -232,9 +232,10 @@ int radeon_bo_create(struct radeon_device *rdev,
+ 	 * may be slow
+ 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
+ 	 */
+-
++#ifndef CONFIG_COMPILE_TEST
+ #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
+ 	 thanks to write-combining
++#endif
+ 
+ 	DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+ 		      "better performance thanks to write-combining\n");
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 128bd66b8cb0..f11a37832d78 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -5895,9 +5895,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
+ {
+ 	u32 lane_width;
+ 	u32 new_lane_width =
+-		(radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
++		((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ 	u32 current_lane_width =
+-		(radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
++		((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ 
+ 	if (new_lane_width != current_lane_width) {
+ 		radeon_set_pcie_lanes(rdev, new_lane_width);
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+index cd8d183dcfe5..ccb26652198b 100644
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -256,10 +256,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+ {
+ 	unsigned long start = vma->vm_start;
+ 	unsigned long size = vma->vm_end - vma->vm_start;
+-	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
++	unsigned long offset;
+ 	unsigned long page, pos;
+ 
+-	if (offset + size > info->fix.smem_len)
++	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
++		return -EINVAL;
++
++	offset = vma->vm_pgoff << PAGE_SHIFT;
++
++	if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
+ 		return -EINVAL;
+ 
+ 	pos = (unsigned long)info->fix.smem_start + offset;
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index d8638d8221ea..8331dfddbd2c 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1308,7 +1308,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
+ 	 * of implement() working on 8 byte chunks
+ 	 */
+ 
+-	int len = hid_report_len(report) + 7;
++	u32 len = hid_report_len(report) + 7;
+ 
+ 	return kmalloc(len, flags);
+ }
+@@ -1373,7 +1373,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
+ {
+ 	char *buf;
+ 	int ret;
+-	int len;
++	u32 len;
+ 
+ 	buf = hid_alloc_report_buf(report, GFP_KERNEL);
+ 	if (!buf)
+@@ -1399,14 +1399,14 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(__hid_request);
+ 
+-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
+ 		int interrupt)
+ {
+ 	struct hid_report_enum *report_enum = hid->report_enum + type;
+ 	struct hid_report *report;
+ 	struct hid_driver *hdrv;
+ 	unsigned int a;
+-	int rsize, csize = size;
++	u32 rsize, csize = size;
+ 	u8 *cdata = data;
+ 	int ret = 0;
+ 
+@@ -1464,7 +1464,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
+  *
+  * This is data entry for lower layers.
+  */
+-int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
++int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
+ {
+ 	struct hid_report_enum *report_enum;
+ 	struct hid_driver *hdrv;
+diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
+index d0c8a1c1e1fe..2fc43ad7f0b6 100644
+--- a/drivers/hid/hid-elo.c
++++ b/drivers/hid/hid-elo.c
+@@ -42,6 +42,12 @@ static void elo_input_configured(struct hid_device *hdev,
+ {
+ 	struct input_dev *input = hidinput->input;
+ 
++	/*
++	 * ELO devices have one Button usage in GenDesk field, which makes
++	 * hid-input map it to BTN_LEFT; that confuses userspace, which then
++	 * considers the device to be a mouse/touchpad instead of touchscreen.
++	 */
++	clear_bit(BTN_LEFT, input->keybit);
+ 	set_bit(BTN_TOUCH, input->keybit);
+ 	set_bit(ABS_PRESSURE, input->absbit);
+ 	input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0);
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 32d52d29cc68..b4ace7561555 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -1128,18 +1128,26 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
+ 
+ 	/*
+ 	 * Ignore out-of-range values as per HID specification,
+-	 * section 5.10 and 6.2.25.
++	 * section 5.10 and 6.2.25, when NULL state bit is present.
++	 * When it's not, clamp the value to match Microsoft's input
++	 * driver as mentioned in "Required HID usages for digitizers":
++	 * https://msdn.microsoft.com/en-us/library/windows/hardware/dn672278(v=vs.85).asp
+ 	 *
+ 	 * The logical_minimum < logical_maximum check is done so that we
+ 	 * don't unintentionally discard values sent by devices which
+ 	 * don't specify logical min and max.
+ 	 */
+ 	if ((field->flags & HID_MAIN_ITEM_VARIABLE) &&
+-	    (field->logical_minimum < field->logical_maximum) &&
+-	    (value < field->logical_minimum ||
+-	     value > field->logical_maximum)) {
+-		dbg_hid("Ignoring out-of-range value %x\n", value);
+-		return;
++	    (field->logical_minimum < field->logical_maximum)) {
++		if (field->flags & HID_MAIN_ITEM_NULL_STATE &&
++		    (value < field->logical_minimum ||
++		     value > field->logical_maximum)) {
++			dbg_hid("Ignoring out-of-range value %x\n", value);
++			return;
++		}
++		value = clamp(value,
++			      field->logical_minimum,
++			      field->logical_maximum);
+ 	}
+ 
+ 	/*
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index 9c2d7c23f296..c0c4df198725 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -197,6 +197,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
+ 	int ret = 0, len;
+ 	unsigned char report_number;
+ 
++	if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
++		ret = -ENODEV;
++		goto out;
++	}
++
+ 	dev = hidraw_table[minor]->hid;
+ 
+ 	if (!dev->ll_driver->raw_request) {
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index a9054be9bca2..fd72f894865d 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -137,10 +137,10 @@ struct i2c_hid {
+ 						   * register of the HID
+ 						   * descriptor. */
+ 	unsigned int		bufsize;	/* i2c buffer size */
+-	char			*inbuf;		/* Input buffer */
+-	char			*rawbuf;	/* Raw Input buffer */
+-	char			*cmdbuf;	/* Command buffer */
+-	char			*argsbuf;	/* Command arguments buffer */
++	u8			*inbuf;		/* Input buffer */
++	u8			*rawbuf;	/* Raw Input buffer */
++	u8			*cmdbuf;	/* Command buffer */
++	u8			*argsbuf;	/* Command arguments buffer */
+ 
+ 	unsigned long		flags;		/* device flags */
+ 
+@@ -385,7 +385,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ 
+ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ {
+-	int ret, ret_size;
++	int ret;
++	u32 ret_size;
+ 	int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+ 
+ 	if (size > ihid->bufsize)
+@@ -410,7 +411,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
+ 		return;
+ 	}
+ 
+-	if (ret_size > size) {
++	if ((ret_size > size) || (ret_size <= 2)) {
+ 		dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
+ 			__func__, size, ret_size);
+ 		return;
+diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
+index e5c7a969f28b..0cb78f30696b 100644
+--- a/drivers/hsi/clients/ssi_protocol.c
++++ b/drivers/hsi/clients/ssi_protocol.c
+@@ -976,7 +976,7 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		goto drop;
+ 	/* Pad to 32-bits - FIXME: Revisit*/
+ 	if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3)))
+-		goto drop;
++		goto inc_dropped;
+ 
+ 	/*
+ 	 * Modem sends Phonet messages over SSI with its own endianess...
+@@ -1028,8 +1028,9 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev)
+ drop2:
+ 	hsi_free_msg(msg);
+ drop:
+-	dev->stats.tx_dropped++;
+ 	dev_kfree_skb(skb);
++inc_dropped:
++	dev->stats.tx_dropped++;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
+index 3b33af2416bb..12dc09db55e5 100644
+--- a/drivers/hwtracing/coresight/coresight-tpiu.c
++++ b/drivers/hwtracing/coresight/coresight-tpiu.c
+@@ -44,8 +44,11 @@
+ #define TPIU_ITATBCTR0		0xef8
+ 
+ /** register definition **/
++/* FFSR - 0x300 */
++#define FFSR_FT_STOPPED		BIT(1)
+ /* FFCR - 0x304 */
+ #define FFCR_FON_MAN		BIT(6)
++#define FFCR_STOP_FI		BIT(12)
+ 
+ /**
+  * @base:	memory mapped base address for this component.
+@@ -88,10 +91,14 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
+ {
+ 	CS_UNLOCK(drvdata->base);
+ 
+-	/* Clear formatter controle reg. */
+-	writel_relaxed(0x0, drvdata->base + TPIU_FFCR);
++	/* Clear formatter and stop on flush */
++	writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR);
+ 	/* Generate manual flush */
+-	writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
++	writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
++	/* Wait for flush to complete */
++	coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
++	/* Wait for formatter to stop */
++	coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
+ 
+ 	CS_LOCK(drvdata->base);
+ }
+diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
+index 35e51ce93a5c..62a56654df97 100644
+--- a/drivers/hwtracing/coresight/of_coresight.c
++++ b/drivers/hwtracing/coresight/of_coresight.c
+@@ -150,7 +150,7 @@ struct coresight_platform_data *of_get_coresight_platform_data(
+ 				continue;
+ 
+ 			/* The local out port number */
+-			pdata->outports[i] = endpoint.id;
++			pdata->outports[i] = endpoint.port;
+ 
+ 			/*
+ 			 * Get a handle on the remote port and parent
+diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
+index dfc98df7b1b6..7aa7b9cb6203 100644
+--- a/drivers/i2c/busses/i2c-scmi.c
++++ b/drivers/i2c/busses/i2c-scmi.c
+@@ -18,6 +18,9 @@
+ #define ACPI_SMBUS_HC_CLASS		"smbus"
+ #define ACPI_SMBUS_HC_DEVICE_NAME	"cmi"
+ 
++/* SMBUS HID definition as supported by Microsoft Windows */
++#define ACPI_SMBUS_MS_HID		"SMB0001"
++
+ ACPI_MODULE_NAME("smbus_cmi");
+ 
+ struct smbus_methods_t {
+@@ -51,6 +54,7 @@ static const struct smbus_methods_t ibm_smbus_methods = {
+ static const struct acpi_device_id acpi_smbus_cmi_ids[] = {
+ 	{"SMBUS01", (kernel_ulong_t)&smbus_methods},
+ 	{ACPI_SMBUS_IBM_HID, (kernel_ulong_t)&ibm_smbus_methods},
++	{ACPI_SMBUS_MS_HID, (kernel_ulong_t)&smbus_methods},
+ 	{"", 0}
+ };
+ MODULE_DEVICE_TABLE(acpi, acpi_smbus_cmi_ids);
+diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
+index 2ae7150442fc..2f94d1164730 100644
+--- a/drivers/iio/accel/st_accel_core.c
++++ b/drivers/iio/accel/st_accel_core.c
+@@ -535,6 +535,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
+ int st_accel_common_probe(struct iio_dev *indio_dev)
+ {
+ 	struct st_sensor_data *adata = iio_priv(indio_dev);
++	struct st_sensors_platform_data *pdata =
++		(struct st_sensors_platform_data *)adata->dev->platform_data;
+ 	int irq = adata->get_irq_data_ready(indio_dev);
+ 	int err;
+ 
+@@ -559,11 +561,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
+ 					&adata->sensor_settings->fs.fs_avl[0];
+ 	adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
+ 
+-	if (!adata->dev->platform_data)
+-		adata->dev->platform_data =
+-			(struct st_sensors_platform_data *)&default_accel_pdata;
++	if (!pdata)
++		pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
+ 
+-	err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
++	err = st_sensors_init_sensor(indio_dev, pdata);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
+index 7adacf160146..899ed591d666 100644
+--- a/drivers/iio/magnetometer/st_magn_spi.c
++++ b/drivers/iio/magnetometer/st_magn_spi.c
+@@ -48,8 +48,6 @@ static int st_magn_spi_remove(struct spi_device *spi)
+ }
+ 
+ static const struct spi_device_id st_magn_id_table[] = {
+-	{ LSM303DLHC_MAGN_DEV_NAME },
+-	{ LSM303DLM_MAGN_DEV_NAME },
+ 	{ LIS3MDL_MAGN_DEV_NAME },
+ 	{},
+ };
+diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
+index 1f7f844bc0b8..c80bc75790e7 100644
+--- a/drivers/iio/pressure/st_pressure_core.c
++++ b/drivers/iio/pressure/st_pressure_core.c
+@@ -432,6 +432,8 @@ static const struct iio_trigger_ops st_press_trigger_ops = {
+ int st_press_common_probe(struct iio_dev *indio_dev)
+ {
+ 	struct st_sensor_data *press_data = iio_priv(indio_dev);
++	struct st_sensors_platform_data *pdata =
++		(struct st_sensors_platform_data *)press_data->dev->platform_data;
+ 	int irq = press_data->get_irq_data_ready(indio_dev);
+ 	int err;
+ 
+@@ -460,12 +462,10 @@ int st_press_common_probe(struct iio_dev *indio_dev)
+ 	press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
+ 
+ 	/* Some devices don't support a data ready pin. */
+-	if (!press_data->dev->platform_data &&
+-				press_data->sensor_settings->drdy_irq.addr)
+-		press_data->dev->platform_data =
+-			(struct st_sensors_platform_data *)&default_press_pdata;
++	if (!pdata && press_data->sensor_settings->drdy_irq.addr)
++		pdata =	(struct st_sensors_platform_data *)&default_press_pdata;
+ 
+-	err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
++	err = st_sensors_init_sensor(indio_dev, pdata);
+ 	if (err < 0)
+ 		return err;
+ 
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index 38339d220d7f..33d69b5d70ec 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -86,6 +86,22 @@ int rdma_addr_size(struct sockaddr *addr)
+ }
+ EXPORT_SYMBOL(rdma_addr_size);
+ 
++int rdma_addr_size_in6(struct sockaddr_in6 *addr)
++{
++	int ret = rdma_addr_size((struct sockaddr *) addr);
++
++	return ret <= sizeof(*addr) ? ret : 0;
++}
++EXPORT_SYMBOL(rdma_addr_size_in6);
++
++int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
++{
++	int ret = rdma_addr_size((struct sockaddr *) addr);
++
++	return ret <= sizeof(*addr) ? ret : 0;
++}
++EXPORT_SYMBOL(rdma_addr_size_kss);
++
+ static struct rdma_addr_client self;
+ 
+ void rdma_addr_register_client(struct rdma_addr_client *client)
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index de1c8a78374e..8c89dac48a04 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -3363,6 +3363,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
+ 	struct cma_multicast *mc;
+ 	int ret;
+ 
++	if (!id->device)
++		return -EINVAL;
++
+ 	id_priv = container_of(id, struct rdma_id_private, id);
+ 	if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
+ 	    !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
+@@ -3645,7 +3648,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
+ 					  RDMA_NL_RDMA_CM_ATTR_SRC_ADDR))
+ 				goto out;
+ 			if (ibnl_put_attr(skb, nlh,
+-					  rdma_addr_size(cma_src_addr(id_priv)),
++					  rdma_addr_size(cma_dst_addr(id_priv)),
+ 					  cma_dst_addr(id_priv),
+ 					  RDMA_NL_RDMA_CM_ATTR_DST_ADDR))
+ 				goto out;
+diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
+index a626795bf9c7..f1c37b7c666f 100644
+--- a/drivers/infiniband/core/iwpm_util.c
++++ b/drivers/infiniband/core/iwpm_util.c
+@@ -654,6 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
+ 	}
+ 	skb_num++;
+ 	spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
++	ret = -EINVAL;
+ 	for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
+ 		hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
+ 				     hlist_node) {
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 9e7dd06031ae..2daae8b758f1 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -411,6 +411,9 @@ err1:
+ 	mutex_lock(&mut);
+ 	idr_remove(&ctx_idr, ctx->id);
+ 	mutex_unlock(&mut);
++	mutex_lock(&file->mut);
++	list_del(&ctx->list);
++	mutex_unlock(&file->mut);
+ 	kfree(ctx);
+ 	return ret;
+ }
+@@ -522,6 +525,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
+ 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ 		return -EFAULT;
+ 
++	if (!rdma_addr_size_in6(&cmd.addr))
++		return -EINVAL;
++
+ 	ctx = ucma_get_ctx(file, cmd.id);
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+@@ -535,22 +541,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
+ 			 int in_len, int out_len)
+ {
+ 	struct rdma_ucm_bind cmd;
+-	struct sockaddr *addr;
+ 	struct ucma_context *ctx;
+ 	int ret;
+ 
+ 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ 		return -EFAULT;
+ 
+-	addr = (struct sockaddr *) &cmd.addr;
+-	if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
++	if (cmd.reserved || !cmd.addr_size ||
++	    cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
+ 		return -EINVAL;
+ 
+ 	ctx = ucma_get_ctx(file, cmd.id);
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+ 
+-	ret = rdma_bind_addr(ctx->cm_id, addr);
++	ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
+ 	ucma_put_ctx(ctx);
+ 	return ret;
+ }
+@@ -566,13 +571,16 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
+ 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ 		return -EFAULT;
+ 
++	if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
++	    !rdma_addr_size_in6(&cmd.dst_addr))
++		return -EINVAL;
++
+ 	ctx = ucma_get_ctx(file, cmd.id);
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+ 
+ 	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+-				(struct sockaddr *) &cmd.dst_addr,
+-				cmd.timeout_ms);
++				(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
+ 	ucma_put_ctx(ctx);
+ 	return ret;
+ }
+@@ -582,24 +590,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
+ 				 int in_len, int out_len)
+ {
+ 	struct rdma_ucm_resolve_addr cmd;
+-	struct sockaddr *src, *dst;
+ 	struct ucma_context *ctx;
+ 	int ret;
+ 
+ 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ 		return -EFAULT;
+ 
+-	src = (struct sockaddr *) &cmd.src_addr;
+-	dst = (struct sockaddr *) &cmd.dst_addr;
+-	if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
+-	    !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
++	if (cmd.reserved ||
++	    (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
++	    !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
+ 		return -EINVAL;
+ 
+ 	ctx = ucma_get_ctx(file, cmd.id);
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+ 
+-	ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
++	ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
++				(struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
+ 	ucma_put_ctx(ctx);
+ 	return ret;
+ }
+@@ -1057,6 +1064,11 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+ 
++	if (!ctx->cm_id->device) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	resp.qp_attr_mask = 0;
+ 	memset(&qp_attr, 0, sizeof qp_attr);
+ 	qp_attr.qp_state = cmd.qp_state;
+@@ -1127,6 +1139,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
+ 	if (!optlen)
+ 		return -EINVAL;
+ 
++	if (!ctx->cm_id->device)
++		return -EINVAL;
++
+ 	memset(&sa_path, 0, sizeof(sa_path));
+ 	sa_path.vlan_id = 0xffff;
+ 
+@@ -1214,7 +1229,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
+ {
+ 	struct rdma_ucm_notify cmd;
+ 	struct ucma_context *ctx;
+-	int ret;
++	int ret = -EINVAL;
+ 
+ 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ 		return -EFAULT;
+@@ -1223,7 +1238,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
+ 	if (IS_ERR(ctx))
+ 		return PTR_ERR(ctx);
+ 
+-	ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
++	if (ctx->cm_id->device)
++		ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
++
+ 	ucma_put_ctx(ctx);
+ 	return ret;
+ }
+@@ -1241,7 +1258,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
+ 		return -ENOSPC;
+ 
+ 	addr = (struct sockaddr *) &cmd->addr;
+-	if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
++	if (cmd->reserved || (cmd->addr_size != rdma_addr_size(addr)))
+ 		return -EINVAL;
+ 
+ 	ctx = ucma_get_ctx(file, cmd->id);
+@@ -1300,7 +1317,10 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
+ 	join_cmd.response = cmd.response;
+ 	join_cmd.uid = cmd.uid;
+ 	join_cmd.id = cmd.id;
+-	join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
++	join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
++	if (!join_cmd.addr_size)
++		return -EINVAL;
++
+ 	join_cmd.reserved = 0;
+ 	memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
+ 
+@@ -1316,6 +1336,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
+ 	if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ 		return -EFAULT;
+ 
++	if (!rdma_addr_size_kss(&cmd.addr))
++		return -EINVAL;
++
+ 	return ucma_process_join(file, &cmd, out_len);
+ }
+ 
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 38acb3cfc545..bda76b9cf396 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -352,7 +352,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
+ 		return -EINVAL;
+ 	}
+ 
+-	ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length,
++	ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length,
+ 				 offset + ib_umem_offset(umem));
+ 
+ 	if (ret < 0)
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index d35f62d4f4c5..5d4ef3567743 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -236,7 +236,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
+ 	} else {
+ 		if (ucmd) {
+ 			qp->rq.wqe_cnt = ucmd->rq_wqe_count;
++			if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
++				return -EINVAL;
+ 			qp->rq.wqe_shift = ucmd->rq_wqe_shift;
++			if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
++				return -EINVAL;
+ 			qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
+ 			qp->rq.max_post = qp->rq.wqe_cnt;
+ 		} else {
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+index 48d7ef51aa0c..9b8a2c000280 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+@@ -819,7 +819,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
+ 
+ 	dev->reset_stats.type = OCRDMA_RESET_STATS;
+ 	dev->reset_stats.dev = dev;
+-	if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
++	if (!debugfs_create_file("reset_stats", 0200, dev->dir,
+ 				&dev->reset_stats, &ocrdma_dbg_ops))
+ 		goto err;
+ 
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index 545c7ef480e8..18f732aa1510 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -963,6 +963,19 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
+ 		 */
+ 		priv->dev->broadcast[8] = priv->pkey >> 8;
+ 		priv->dev->broadcast[9] = priv->pkey & 0xff;
++
++		/*
++		 * Update the broadcast address in the priv->broadcast object,
++		 * in case it already exists, otherwise no one will do that.
++		 */
++		if (priv->broadcast) {
++			spin_lock_irq(&priv->lock);
++			memcpy(priv->broadcast->mcmember.mgid.raw,
++			       priv->dev->broadcast + 4,
++			sizeof(union ib_gid));
++			spin_unlock_irq(&priv->lock);
++		}
++
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index 7599fb0b2d98..17a1bb19b8d1 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -488,6 +488,22 @@ static void path_rec_completion(int status,
+ 	spin_lock_irqsave(&priv->lock, flags);
+ 
+ 	if (!IS_ERR_OR_NULL(ah)) {
++		/*
++		 * pathrec.dgid is used as the database key from the LLADDR,
++		 * it must remain unchanged even if the SA returns a different
++		 * GID to use in the AH.
++		 */
++		if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
++			   sizeof(union ib_gid))) {
++			ipoib_dbg(
++				priv,
++				"%s got PathRec for gid %pI6 while asked for %pI6\n",
++				dev->name, pathrec->dgid.raw,
++				path->pathrec.dgid.raw);
++			memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
++			       sizeof(union ib_gid));
++		}
++
+ 		path->pathrec = *pathrec;
+ 
+ 		old_ah   = path->ah;
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 18e688d68e66..d210df3f7188 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -2510,9 +2510,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
+ 		ret = FAST_IO_FAIL;
+ 	else
+ 		ret = FAILED;
+-	srp_free_req(ch, req, scmnd, 0);
+-	scmnd->result = DID_ABORT << 16;
+-	scmnd->scsi_done(scmnd);
++	if (ret == SUCCESS) {
++		srp_free_req(ch, req, scmnd, 0);
++		scmnd->result = DID_ABORT << 16;
++		scmnd->scsi_done(scmnd);
++	}
+ 
+ 	return ret;
+ }
+@@ -3245,12 +3247,10 @@ static ssize_t srp_create_target(struct device *dev,
+ 				      num_online_nodes());
+ 		const int ch_end = ((node_idx + 1) * target->ch_count /
+ 				    num_online_nodes());
+-		const int cv_start = (node_idx * ibdev->num_comp_vectors /
+-				      num_online_nodes() + target->comp_vector)
+-				     % ibdev->num_comp_vectors;
+-		const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
+-				    num_online_nodes() + target->comp_vector)
+-				   % ibdev->num_comp_vectors;
++		const int cv_start = node_idx * ibdev->num_comp_vectors /
++				     num_online_nodes();
++		const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
++				   num_online_nodes();
+ 		int cpu_idx = 0;
+ 
+ 		for_each_online_cpu(cpu) {
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 6c30192dcb78..ee696c6a769d 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -2981,12 +2981,8 @@ static void srpt_queue_response(struct se_cmd *cmd)
+ 	}
+ 	spin_unlock_irqrestore(&ioctx->spinlock, flags);
+ 
+-	if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
+-		     || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
+-		atomic_inc(&ch->req_lim_delta);
+-		srpt_abort_cmd(ioctx);
++	if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
+ 		return;
+-	}
+ 
+ 	dir = ioctx->cmd.data_direction;
+ 
+diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
+index 009f75d25268..4e5ad9e120d6 100644
+--- a/drivers/input/misc/drv260x.c
++++ b/drivers/input/misc/drv260x.c
+@@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
+ 	if (!haptics)
+ 		return -ENOMEM;
+ 
+-	haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
++	haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+ 	haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
+ 
+ 	if (pdata) {
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index fd5068b2542d..152d057ed87c 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -972,6 +972,13 @@ static int elan_probe(struct i2c_client *client,
+ 		return error;
+ 	}
+ 
++	/* Make sure there is something at this address */
++	error = i2c_smbus_read_byte(client);
++	if (error < 0) {
++		dev_dbg(&client->dev, "nothing at this address: %d\n", error);
++		return -ENXIO;
++	}
++
+ 	/* Initialize the touchpad. */
+ 	error = elan_initialize(data);
+ 	if (error)
+diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
+index a0acbbf83bfd..4ea436c173be 100644
+--- a/drivers/input/mouse/elan_i2c_i2c.c
++++ b/drivers/input/mouse/elan_i2c_i2c.c
+@@ -555,7 +555,14 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client,
+ 	long ret;
+ 	int error;
+ 	int len;
+-	u8 buffer[ETP_I2C_INF_LENGTH];
++	u8 buffer[ETP_I2C_REPORT_LEN];
++
++	len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN);
++	if (len != ETP_I2C_REPORT_LEN) {
++		error = len < 0 ? len : -EIO;
++		dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n",
++			error, len);
++	}
+ 
+ 	reinit_completion(completion);
+ 	enable_irq(client->irq);
+diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
+index b604564dec5c..30328e57fdda 100644
+--- a/drivers/input/mousedev.c
++++ b/drivers/input/mousedev.c
+@@ -15,6 +15,7 @@
+ #define MOUSEDEV_MINORS		31
+ #define MOUSEDEV_MIX		63
+ 
++#include <linux/bitops.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/poll.h>
+@@ -103,7 +104,7 @@ struct mousedev_client {
+ 	spinlock_t packet_lock;
+ 	int pos_x, pos_y;
+ 
+-	signed char ps2[6];
++	u8 ps2[6];
+ 	unsigned char ready, buffer, bufsiz;
+ 	unsigned char imexseq, impsseq;
+ 	enum mousedev_emul mode;
+@@ -291,11 +292,10 @@ static void mousedev_notify_readers(struct mousedev *mousedev,
+ 		}
+ 
+ 		client->pos_x += packet->dx;
+-		client->pos_x = client->pos_x < 0 ?
+-			0 : (client->pos_x >= xres ? xres : client->pos_x);
++		client->pos_x = clamp_val(client->pos_x, 0, xres);
++
+ 		client->pos_y += packet->dy;
+-		client->pos_y = client->pos_y < 0 ?
+-			0 : (client->pos_y >= yres ? yres : client->pos_y);
++		client->pos_y = clamp_val(client->pos_y, 0, yres);
+ 
+ 		p->dx += packet->dx;
+ 		p->dy += packet->dy;
+@@ -571,44 +571,50 @@ static int mousedev_open(struct inode *inode, struct file *file)
+ 	return error;
+ }
+ 
+-static inline int mousedev_limit_delta(int delta, int limit)
+-{
+-	return delta > limit ? limit : (delta < -limit ? -limit : delta);
+-}
+-
+-static void mousedev_packet(struct mousedev_client *client,
+-			    signed char *ps2_data)
++static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data)
+ {
+ 	struct mousedev_motion *p = &client->packets[client->tail];
++	s8 dx, dy, dz;
++
++	dx = clamp_val(p->dx, -127, 127);
++	p->dx -= dx;
++
++	dy = clamp_val(p->dy, -127, 127);
++	p->dy -= dy;
+ 
+-	ps2_data[0] = 0x08 |
+-		((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07);
+-	ps2_data[1] = mousedev_limit_delta(p->dx, 127);
+-	ps2_data[2] = mousedev_limit_delta(p->dy, 127);
+-	p->dx -= ps2_data[1];
+-	p->dy -= ps2_data[2];
++	ps2_data[0] = BIT(3);
++	ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2);
++	ps2_data[0] |= p->buttons & 0x07;
++	ps2_data[1] = dx;
++	ps2_data[2] = dy;
+ 
+ 	switch (client->mode) {
+ 	case MOUSEDEV_EMUL_EXPS:
+-		ps2_data[3] = mousedev_limit_delta(p->dz, 7);
+-		p->dz -= ps2_data[3];
+-		ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1);
++		dz = clamp_val(p->dz, -7, 7);
++		p->dz -= dz;
++
++		ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1);
+ 		client->bufsiz = 4;
+ 		break;
+ 
+ 	case MOUSEDEV_EMUL_IMPS:
+-		ps2_data[0] |=
+-			((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
+-		ps2_data[3] = mousedev_limit_delta(p->dz, 127);
+-		p->dz -= ps2_data[3];
++		dz = clamp_val(p->dz, -127, 127);
++		p->dz -= dz;
++
++		ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
++			       ((p->buttons & 0x08) >> 1);
++		ps2_data[3] = dz;
++
+ 		client->bufsiz = 4;
+ 		break;
+ 
+ 	case MOUSEDEV_EMUL_PS2:
+ 	default:
+-		ps2_data[0] |=
+-			((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1);
+ 		p->dz = 0;
++
++		ps2_data[0] |= ((p->buttons & 0x10) >> 3) |
++			       ((p->buttons & 0x08) >> 1);
++
+ 		client->bufsiz = 3;
+ 		break;
+ 	}
+@@ -714,7 +720,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
+ {
+ 	struct mousedev_client *client = file->private_data;
+ 	struct mousedev *mousedev = client->mousedev;
+-	signed char data[sizeof(client->ps2)];
++	u8 data[sizeof(client->ps2)];
+ 	int retval = 0;
+ 
+ 	if (!client->ready && !client->buffer && mousedev->exist &&
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 74d69fdbdec9..10e340943218 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -602,6 +602,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+ 		},
+ 	},
++	{
++		/* Lenovo ThinkPad L460 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"),
++		},
++	},
+ 	{
+ 		/* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ 		.matches = {
+diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c
+index f0b954d46a25..b89fbc4d3096 100644
+--- a/drivers/input/touchscreen/ar1021_i2c.c
++++ b/drivers/input/touchscreen/ar1021_i2c.c
+@@ -152,7 +152,7 @@ static int __maybe_unused ar1021_i2c_resume(struct device *dev)
+ static SIMPLE_DEV_PM_OPS(ar1021_i2c_pm, ar1021_i2c_suspend, ar1021_i2c_resume);
+ 
+ static const struct i2c_device_id ar1021_i2c_id[] = {
+-	{ "MICROCHIP_AR1021_I2C", 0 },
++	{ "ar1021", 0 },
+ 	{ },
+ };
+ MODULE_DEVICE_TABLE(i2c, ar1021_i2c_id);
+diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
+index ccc8aa615709..0299a2882ff7 100644
+--- a/drivers/input/touchscreen/tsc2007.c
++++ b/drivers/input/touchscreen/tsc2007.c
+@@ -455,6 +455,14 @@ static int tsc2007_probe(struct i2c_client *client,
+ 
+ 	tsc2007_stop(ts);
+ 
++	/* power down the chip (TSC2007_SETUP does not ACK on I2C) */
++	err = tsc2007_xfer(ts, PWRDOWN);
++	if (err < 0) {
++		dev_err(&client->dev,
++			"Failed to setup chip: %d\n", err);
++		return err;	/* usually, chip does not respond */
++	}
++
+ 	err = input_register_device(input_dev);
+ 	if (err) {
+ 		dev_err(&client->dev,
+diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
+index 9dd8208312c2..3100bc0cc805 100644
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -163,7 +163,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
+ 				break;	/* found a free slot */
+ 		}
+ adjust_limit_pfn:
+-		limit_pfn = curr_iova->pfn_lo - 1;
++		limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0;
+ move_left:
+ 		prev = curr;
+ 		curr = rb_prev(curr);
+diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
+index a22c33d6a486..f604a74cc890 100644
+--- a/drivers/iommu/omap-iommu.c
++++ b/drivers/iommu/omap-iommu.c
+@@ -1386,6 +1386,7 @@ static int __init omap_iommu_init(void)
+ 	const unsigned long flags = SLAB_HWCACHE_ALIGN;
+ 	size_t align = 1 << 10; /* L2 pagetable alignement */
+ 	struct device_node *np;
++	int ret;
+ 
+ 	np = of_find_matching_node(NULL, omap_iommu_of_match);
+ 	if (!np)
+@@ -1399,11 +1400,25 @@ static int __init omap_iommu_init(void)
+ 		return -ENOMEM;
+ 	iopte_cachep = p;
+ 
+-	bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
+-
+ 	omap_iommu_debugfs_init();
+ 
+-	return platform_driver_register(&omap_iommu_driver);
++	ret = platform_driver_register(&omap_iommu_driver);
++	if (ret) {
++		pr_err("%s: failed to register driver\n", __func__);
++		goto fail_driver;
++	}
++
++	ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
++	if (ret)
++		goto fail_bus;
++
++	return 0;
++
++fail_bus:
++	platform_driver_unregister(&omap_iommu_driver);
++fail_driver:
++	kmem_cache_destroy(iopte_cachep);
++	return ret;
+ }
+ /* must be ready before omap3isp is probed */
+ subsys_initcall(omap_iommu_init);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 9a791dd52199..eff99f862e83 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -674,7 +674,7 @@ static struct irq_chip its_msi_irq_chip = {
+  * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
+  */
+ #define IRQS_PER_CHUNK_SHIFT	5
+-#define IRQS_PER_CHUNK		(1 << IRQS_PER_CHUNK_SHIFT)
++#define IRQS_PER_CHUNK		(1UL << IRQS_PER_CHUNK_SHIFT)
+ 
+ static unsigned long *lpi_bitmap;
+ static u32 lpi_chunks;
+@@ -1145,11 +1145,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
+ 
+ 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ 	/*
+-	 * At least one bit of EventID is being used, hence a minimum
+-	 * of two entries. No, the architecture doesn't let you
+-	 * express an ITT with a single entry.
++	 * We allocate at least one chunk worth of LPIs bet device,
++	 * and thus that many ITEs. The device may require less though.
+ 	 */
+-	nr_ites = max(2UL, roundup_pow_of_two(nvecs));
++	nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
+ 	sz = nr_ites * its->ite_size;
+ 	sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
+ 	itt = kzalloc(sz, GFP_KERNEL);
+diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
+index 9cb4b621fbc3..b92a19a594a1 100644
+--- a/drivers/isdn/mISDN/stack.c
++++ b/drivers/isdn/mISDN/stack.c
+@@ -72,7 +72,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
+ 		if (sk->sk_state != MISDN_BOUND)
+ 			continue;
+ 		if (!cskb)
+-			cskb = skb_copy(skb, GFP_KERNEL);
++			cskb = skb_copy(skb, GFP_ATOMIC);
+ 		if (!cskb) {
+ 			printk(KERN_WARNING "%s no skb\n", __func__);
+ 			break;
+diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
+index c3a08b60535b..760deffa9ad3 100644
+--- a/drivers/leds/leds-pca955x.c
++++ b/drivers/leds/leds-pca955x.c
+@@ -281,7 +281,7 @@ static int pca955x_probe(struct i2c_client *client,
+ 			"slave address 0x%02x\n",
+ 			id->name, chip->bits, client->addr);
+ 
+-	if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
++	if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ 		return -EIO;
+ 
+ 	if (pdata) {
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 4d46f2ce606f..aa84fcfd59fc 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -514,15 +514,21 @@ struct open_bucket {
+ 
+ /*
+  * We keep multiple buckets open for writes, and try to segregate different
+- * write streams for better cache utilization: first we look for a bucket where
+- * the last write to it was sequential with the current write, and failing that
+- * we look for a bucket that was last used by the same task.
++ * write streams for better cache utilization: first we try to segregate flash
++ * only volume write streams from cached devices, secondly we look for a bucket
++ * where the last write to it was sequential with the current write, and
++ * failing that we look for a bucket that was last used by the same task.
+  *
+  * The ideas is if you've got multiple tasks pulling data into the cache at the
+  * same time, you'll get better cache utilization if you try to segregate their
+  * data and preserve locality.
+  *
+- * For example, say you've starting Firefox at the same time you're copying a
++ * For example, dirty sectors of flash only volume is not reclaimable, if their
++ * dirty sectors mixed with dirty sectors of cached device, such buckets will
++ * be marked as dirty and won't be reclaimed, though the dirty data of cached
++ * device have been written back to backend device.
++ *
++ * And say you've starting Firefox at the same time you're copying a
+  * bunch of files. Firefox will likely end up being fairly hot and stay in the
+  * cache awhile, but the data you copied might not be; if you wrote all that
+  * data to the same buckets it'd get invalidated at the same time.
+@@ -539,7 +545,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
+ 	struct open_bucket *ret, *ret_task = NULL;
+ 
+ 	list_for_each_entry_reverse(ret, &c->data_buckets, list)
+-		if (!bkey_cmp(&ret->key, search))
++		if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
++		    UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
++			continue;
++		else if (!bkey_cmp(&ret->key, search))
+ 			goto found;
+ 		else if (ret->last_write_point == write_point)
+ 			ret_task = ret;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index b8013e386c76..80812d05b3e2 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -921,6 +921,12 @@ static void cached_dev_detach_finish(struct work_struct *w)
+ 
+ 	mutex_lock(&bch_register_lock);
+ 
++	cancel_delayed_work_sync(&dc->writeback_rate_update);
++	if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
++		kthread_stop(dc->writeback_thread);
++		dc->writeback_thread = NULL;
++	}
++
+ 	memset(&dc->sb.set_uuid, 0, 16);
+ 	SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
+ 
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index df060fd016f6..e8200892ed41 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1773,12 +1773,12 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
+ 	    cmd == DM_LIST_VERSIONS_CMD)
+ 		return 0;
+ 
+-	if ((cmd == DM_DEV_CREATE_CMD)) {
++	if (cmd == DM_DEV_CREATE_CMD) {
+ 		if (!*param->name) {
+ 			DMWARN("name not supplied when creating device");
+ 			return -EINVAL;
+ 		}
+-	} else if ((*param->uuid && *param->name)) {
++	} else if (*param->uuid && *param->name) {
+ 		DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
+index 4eb5cb18f98d..f490382173b6 100644
+--- a/drivers/md/md-cluster.c
++++ b/drivers/md/md-cluster.c
+@@ -850,8 +850,10 @@ static int add_new_disk_start(struct mddev *mddev, struct md_rdev *rdev)
+ 	cmsg.raid_slot = rdev->desc_nr;
+ 	lock_comm(cinfo);
+ 	ret = __sendmsg(cinfo, &cmsg);
+-	if (ret)
++	if (ret) {
++		unlock_comm(cinfo);
+ 		return ret;
++	}
+ 	cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
+ 	ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
+ 	cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 4cbc3df79a2a..641259fe891b 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3702,6 +3702,7 @@ static int run(struct mddev *mddev)
+ 
+ 		if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+ 			discard_supported = true;
++		first = 0;
+ 	}
+ 
+ 	if (mddev->queue) {
+@@ -4110,6 +4111,7 @@ static int raid10_start_reshape(struct mddev *mddev)
+ 				diff = 0;
+ 			if (first || diff < min_offset_diff)
+ 				min_offset_diff = diff;
++			first = 0;
+ 		}
+ 	}
+ 
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 907aa9c6e894..8de0b1684dc6 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -110,8 +110,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
+ static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
+ {
+ 	int i;
+-	local_irq_disable();
+-	spin_lock(conf->hash_locks);
++	spin_lock_irq(conf->hash_locks);
+ 	for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++)
+ 		spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
+ 	spin_lock(&conf->device_lock);
+@@ -121,9 +120,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
+ {
+ 	int i;
+ 	spin_unlock(&conf->device_lock);
+-	for (i = NR_STRIPE_HASH_LOCKS; i; i--)
+-		spin_unlock(conf->hash_locks + i - 1);
+-	local_irq_enable();
++	for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--)
++		spin_unlock(conf->hash_locks + i);
++	spin_unlock_irq(conf->hash_locks);
+ }
+ 
+ /* bio's attached to a stripe+device for I/O are linked together in bi_sector
+@@ -728,12 +727,11 @@ static bool is_full_stripe_write(struct stripe_head *sh)
+ 
+ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+ {
+-	local_irq_disable();
+ 	if (sh1 > sh2) {
+-		spin_lock(&sh2->stripe_lock);
++		spin_lock_irq(&sh2->stripe_lock);
+ 		spin_lock_nested(&sh1->stripe_lock, 1);
+ 	} else {
+-		spin_lock(&sh1->stripe_lock);
++		spin_lock_irq(&sh1->stripe_lock);
+ 		spin_lock_nested(&sh2->stripe_lock, 1);
+ 	}
+ }
+@@ -741,8 +739,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
+ {
+ 	spin_unlock(&sh1->stripe_lock);
+-	spin_unlock(&sh2->stripe_lock);
+-	local_irq_enable();
++	spin_unlock_irq(&sh2->stripe_lock);
+ }
+ 
+ /* Only freshly new full stripe normal write stripe can be added to a batch list */
+@@ -3368,9 +3365,20 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
+ 		BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
+ 		BUG_ON(test_bit(R5_Wantread, &dev->flags));
+ 		BUG_ON(sh->batch_head);
++
++		/*
++		 * In the raid6 case if the only non-uptodate disk is P
++		 * then we already trusted P to compute the other failed
++		 * drives. It is safe to compute rather than re-read P.
++		 * In other cases we only compute blocks from failed
++		 * devices, otherwise check/repair might fail to detect
++		 * a real inconsistency.
++		 */
++
+ 		if ((s->uptodate == disks - 1) &&
++		    ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) ||
+ 		    (s->failed && (disk_idx == s->failed_num[0] ||
+-				   disk_idx == s->failed_num[1]))) {
++				   disk_idx == s->failed_num[1])))) {
+ 			/* have disk failed, and we're requested to fetch it;
+ 			 * do compute it
+ 			 */
+diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
+index 72937756f60c..c084ad3f2811 100644
+--- a/drivers/media/dvb-core/dvb_ca_en50221.c
++++ b/drivers/media/dvb-core/dvb_ca_en50221.c
+@@ -749,6 +749,29 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * b
+ 		goto exit;
+ 	}
+ 
++	/*
++	 * It may need some time for the CAM to settle down, or there might
++	 * be a race condition between the CAM, writing HC and our last
++	 * check for DA. This happens, if the CAM asserts DA, just after
++	 * checking DA before we are setting HC. In this case it might be
++	 * a bug in the CAM to keep the FR bit, the lower layer/HW
++	 * communication requires a longer timeout or the CAM needs more
++	 * time internally. But this happens in reality!
++	 * We need to read the status from the HW again and do the same
++	 * we did for the previous check for DA
++	 */
++	status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
++	if (status < 0)
++		goto exit;
++
++	if (status & (STATUSREG_DA | STATUSREG_RE)) {
++		if (status & STATUSREG_DA)
++			dvb_ca_en50221_thread_wakeup(ca);
++
++		status = -EAGAIN;
++		goto exit;
++	}
++
+ 	/* send the amount of data */
+ 	if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0)
+ 		goto exit;
+diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
+index 391e98395b41..c2cad898072a 100644
+--- a/drivers/media/dvb-frontends/si2168.c
++++ b/drivers/media/dvb-frontends/si2168.c
+@@ -14,6 +14,8 @@
+  *    GNU General Public License for more details.
+  */
+ 
++#include <linux/delay.h>
++
+ #include "si2168_priv.h"
+ 
+ static const struct dvb_frontend_ops si2168_ops;
+@@ -375,6 +377,7 @@ static int si2168_init(struct dvb_frontend *fe)
+ 		if (ret)
+ 			goto err;
+ 
++		udelay(100);
+ 		memcpy(cmd.args, "\x85", 1);
+ 		cmd.wlen = 1;
+ 		cmd.rlen = 1;
+diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
+index bd496447749a..8e4eccc1d952 100644
+--- a/drivers/media/i2c/cx25840/cx25840-core.c
++++ b/drivers/media/i2c/cx25840/cx25840-core.c
+@@ -420,11 +420,13 @@ static void cx25840_initialize(struct i2c_client *client)
+ 	INIT_WORK(&state->fw_work, cx25840_work_handler);
+ 	init_waitqueue_head(&state->fw_wait);
+ 	q = create_singlethread_workqueue("cx25840_fw");
+-	prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+-	queue_work(q, &state->fw_work);
+-	schedule();
+-	finish_wait(&state->fw_wait, &wait);
+-	destroy_workqueue(q);
++	if (q) {
++		prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
++		queue_work(q, &state->fw_work);
++		schedule();
++		finish_wait(&state->fw_wait, &wait);
++		destroy_workqueue(q);
++	}
+ 
+ 	/* 6. */
+ 	cx25840_write(client, 0x115, 0x8c);
+@@ -631,11 +633,13 @@ static void cx23885_initialize(struct i2c_client *client)
+ 	INIT_WORK(&state->fw_work, cx25840_work_handler);
+ 	init_waitqueue_head(&state->fw_wait);
+ 	q = create_singlethread_workqueue("cx25840_fw");
+-	prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+-	queue_work(q, &state->fw_work);
+-	schedule();
+-	finish_wait(&state->fw_wait, &wait);
+-	destroy_workqueue(q);
++	if (q) {
++		prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
++		queue_work(q, &state->fw_work);
++		schedule();
++		finish_wait(&state->fw_wait, &wait);
++		destroy_workqueue(q);
++	}
+ 
+ 	/* Call the cx23888 specific std setup func, we no longer rely on
+ 	 * the generic cx24840 func.
+@@ -746,11 +750,13 @@ static void cx231xx_initialize(struct i2c_client *client)
+ 	INIT_WORK(&state->fw_work, cx25840_work_handler);
+ 	init_waitqueue_head(&state->fw_wait);
+ 	q = create_singlethread_workqueue("cx25840_fw");
+-	prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+-	queue_work(q, &state->fw_work);
+-	schedule();
+-	finish_wait(&state->fw_wait, &wait);
+-	destroy_workqueue(q);
++	if (q) {
++		prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
++		queue_work(q, &state->fw_work);
++		schedule();
++		finish_wait(&state->fw_wait, &wait);
++		destroy_workqueue(q);
++	}
+ 
+ 	cx25840_std_setup(client);
+ 
+diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c
+index f4eef2fa6f6f..cd8f8151d834 100644
+--- a/drivers/media/i2c/soc_camera/ov6650.c
++++ b/drivers/media/i2c/soc_camera/ov6650.c
+@@ -1016,7 +1016,7 @@ static int ov6650_probe(struct i2c_client *client,
+ 	priv->code	  = MEDIA_BUS_FMT_YUYV8_2X8;
+ 	priv->colorspace  = V4L2_COLORSPACE_JPEG;
+ 
+-	priv->clk = v4l2_clk_get(&client->dev, "mclk");
++	priv->clk = v4l2_clk_get(&client->dev, NULL);
+ 	if (IS_ERR(priv->clk)) {
+ 		ret = PTR_ERR(priv->clk);
+ 		goto eclkget;
+diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c
+index 8aa726651630..90fcccc05b56 100644
+--- a/drivers/media/pci/bt8xx/bt878.c
++++ b/drivers/media/pci/bt8xx/bt878.c
+@@ -422,8 +422,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
+ 	       bt878_num);
+ 	if (bt878_num >= BT878_MAX) {
+ 		printk(KERN_ERR "bt878: Too many devices inserted\n");
+-		result = -ENOMEM;
+-		goto fail0;
++		return -ENOMEM;
+ 	}
+ 	if (pci_enable_device(dev))
+ 		return -EIO;
+diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
+index f838d9c7ed12..0fba4a2c1602 100644
+--- a/drivers/media/rc/mceusb.c
++++ b/drivers/media/rc/mceusb.c
+@@ -1370,8 +1370,13 @@ static int mceusb_dev_probe(struct usb_interface *intf,
+ 		goto rc_dev_fail;
+ 
+ 	/* wire up inbound data handler */
+-	usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+-				mceusb_dev_recv, ir, ep_in->bInterval);
++	if (usb_endpoint_xfer_int(ep_in))
++		usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
++				 mceusb_dev_recv, ir, ep_in->bInterval);
++	else
++		usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
++				  mceusb_dev_recv, ir);
++
+ 	ir->urb_in->transfer_dma = ir->dma_in;
+ 	ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ 
+diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
+index 9caea8344547..d793c630f1dd 100644
+--- a/drivers/media/usb/cpia2/cpia2_v4l.c
++++ b/drivers/media/usb/cpia2/cpia2_v4l.c
+@@ -812,7 +812,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+ 	struct camera_data *cam = video_drvdata(file);
+ 
+ 	if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+-	   buf->index > cam->num_frames)
++	   buf->index >= cam->num_frames)
+ 		return -EINVAL;
+ 
+ 	buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer;
+@@ -863,7 +863,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf)
+ 
+ 	if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ 	   buf->memory != V4L2_MEMORY_MMAP ||
+-	   buf->index > cam->num_frames)
++	   buf->index >= cam->num_frames)
+ 		return -EINVAL;
+ 
+ 	DBG("QBUF #%d\n", buf->index);
+diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
+index 3bbc77aa6a33..483457d4904f 100644
+--- a/drivers/media/usb/usbtv/usbtv-core.c
++++ b/drivers/media/usb/usbtv/usbtv-core.c
+@@ -95,6 +95,8 @@ static int usbtv_probe(struct usb_interface *intf,
+ 	return 0;
+ 
+ usbtv_audio_fail:
++	/* we must not free at this point */
++	usb_get_dev(usbtv->udev);
+ 	usbtv_video_free(usbtv);
+ 
+ usbtv_video_fail:
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index e03aa0961360..0686cbc94675 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -101,7 +101,7 @@ static int get_v4l2_window32(struct v4l2_window __user *kp,
+ static int put_v4l2_window32(struct v4l2_window __user *kp,
+ 			     struct v4l2_window32 __user *up)
+ {
+-	struct v4l2_clip __user *kclips = kp->clips;
++	struct v4l2_clip __user *kclips;
+ 	struct v4l2_clip32 __user *uclips;
+ 	compat_caddr_t p;
+ 	u32 clipcount;
+@@ -116,6 +116,8 @@ static int put_v4l2_window32(struct v4l2_window __user *kp,
+ 	if (!clipcount)
+ 		return 0;
+ 
++	if (get_user(kclips, &kp->clips))
++		return -EFAULT;
+ 	if (get_user(p, &up->clips))
+ 		return -EFAULT;
+ 	uclips = compat_ptr(p);
+diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
+index 28cb048f4760..907247bc2501 100644
+--- a/drivers/mfd/palmas.c
++++ b/drivers/mfd/palmas.c
+@@ -430,6 +430,20 @@ static void palmas_power_off(void)
+ {
+ 	unsigned int addr;
+ 	int ret, slave;
++	struct device_node *np = palmas_dev->dev->of_node;
++
++	if (of_property_read_bool(np, "ti,palmas-override-powerhold")) {
++		addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE,
++					  PALMAS_PRIMARY_SECONDARY_PAD2);
++		slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE);
++
++		ret = regmap_update_bits(palmas_dev->regmap[slave], addr,
++				PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0);
++		if (ret)
++			dev_err(palmas_dev->dev,
++				"Unable to write PRIMARY_SECONDARY_PAD2 %d\n",
++				ret);
++	}
+ 
+ 	if (!palmas_dev)
+ 		return;
+diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
+index cc91f7b3d90c..eb29113e0bac 100644
+--- a/drivers/misc/enclosure.c
++++ b/drivers/misc/enclosure.c
+@@ -148,7 +148,7 @@ enclosure_register(struct device *dev, const char *name, int components,
+ 	for (i = 0; i < components; i++) {
+ 		edev->component[i].number = -1;
+ 		edev->component[i].slot = -1;
+-		edev->component[i].power_status = 1;
++		edev->component[i].power_status = -1;
+ 	}
+ 
+ 	mutex_lock(&container_list_lock);
+@@ -600,6 +600,11 @@ static ssize_t get_component_power_status(struct device *cdev,
+ 
+ 	if (edev->cb->get_power_status)
+ 		edev->cb->get_power_status(edev, ecomp);
++
++	/* If still uninitialized, the callback failed or does not exist. */
++	if (ecomp->power_status == -1)
++		return (edev->cb->get_power_status) ? -EIO : -ENOTTY;
++
+ 	return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off");
+ }
+ 
+diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
+index e40bcd03bd47..2353ec9dd7d2 100644
+--- a/drivers/misc/mei/main.c
++++ b/drivers/misc/mei/main.c
+@@ -503,7 +503,6 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
+ 		break;
+ 
+ 	default:
+-		dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
+ 		rets = -ENOIOCTLCMD;
+ 	}
+ 
+diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+index f42d9c4e4561..cc277f7849b0 100644
+--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+@@ -298,8 +298,11 @@ static void *qp_alloc_queue(u64 size, u32 flags)
+ 	size_t pas_size;
+ 	size_t vas_size;
+ 	size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
+-	const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
++	u64 num_pages;
+ 
++	if (size > SIZE_MAX - PAGE_SIZE)
++		return NULL;
++	num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ 	if (num_pages >
+ 		 (SIZE_MAX - queue_size) /
+ 		 (sizeof(*queue->kernel_if->u.g.pas) +
+@@ -624,9 +627,12 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
+ {
+ 	struct vmci_queue *queue;
+ 	size_t queue_page_size;
+-	const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
++	u64 num_pages;
+ 	const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
+ 
++	if (size > SIZE_MAX - PAGE_SIZE)
++		return NULL;
++	num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ 	if (num_pages > (SIZE_MAX - queue_size) /
+ 		 sizeof(*queue->kernel_if->u.h.page))
+ 		return NULL;
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index b5d8906ac34f..9368d49d3e83 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -2668,6 +2668,14 @@ int mmc_pm_notify(struct notifier_block *notify_block,
+ 		if (!err)
+ 			break;
+ 
++		if (!mmc_card_is_removable(host)) {
++			dev_warn(mmc_dev(host),
++				 "pre_suspend failed for non-removable host: "
++				 "%d\n", err);
++			/* Avoid removing non-removable hosts */
++			break;
++		}
++
+ 		/* Calling bus_ops->remove() with a claimed host can deadlock */
+ 		host->bus_ops->remove(host);
+ 		mmc_claim_host(host);
+diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
+index 76e8bce6f46e..ad572a0f2124 100644
+--- a/drivers/mmc/host/jz4740_mmc.c
++++ b/drivers/mmc/host/jz4740_mmc.c
+@@ -368,9 +368,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
+ 		host->irq_mask &= ~irq;
+ 	else
+ 		host->irq_mask |= irq;
+-	spin_unlock_irqrestore(&host->lock, flags);
+ 
+ 	writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
++	spin_unlock_irqrestore(&host->lock, flags);
+ }
+ 
+ static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index d0abdffb0d7c..18b716bb5752 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -1693,8 +1693,8 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host)
+ 	 */
+ 	if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) {
+ 		struct pinctrl *p = devm_pinctrl_get(host->dev);
+-		if (!p) {
+-			ret = -ENODEV;
++		if (IS_ERR(p)) {
++			ret = PTR_ERR(p);
+ 			goto err_free_irq;
+ 		}
+ 		if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) {
+diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
+index 286b97a304cf..4509ee0b294a 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0001.c
++++ b/drivers/mtd/chips/cfi_cmdset_0001.c
+@@ -45,6 +45,7 @@
+ #define I82802AB	0x00ad
+ #define I82802AC	0x00ac
+ #define PF38F4476	0x881c
++#define M28F00AP30	0x8963
+ /* STMicroelectronics chips */
+ #define M50LPW080       0x002F
+ #define M50FLW080A	0x0080
+@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
+ 		extp->MinorVersion = '1';
+ }
+ 
++static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
++{
++	/*
++	 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
++	 * Erase Supend for their small Erase Blocks(0x8000)
++	 */
++	if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
++		return 1;
++	return 0;
++}
++
+ static inline struct cfi_pri_intelext *
+ read_pri_intelext(struct map_info *map, __u16 adr)
+ {
+@@ -825,21 +837,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
+ 		     (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
+ 			goto sleep;
+ 
++		/* Do not allow suspend iff read/write to EB address */
++		if ((adr & chip->in_progress_block_mask) ==
++		    chip->in_progress_block_addr)
++			goto sleep;
++
++		/* do not suspend small EBs, buggy Micron Chips */
++		if (cfi_is_micron_28F00AP30(cfi, chip) &&
++		    (chip->in_progress_block_mask == ~(0x8000-1)))
++			goto sleep;
+ 
+ 		/* Erase suspend */
+-		map_write(map, CMD(0xB0), adr);
++		map_write(map, CMD(0xB0), chip->in_progress_block_addr);
+ 
+ 		/* If the flash has finished erasing, then 'erase suspend'
+ 		 * appears to make some (28F320) flash devices switch to
+ 		 * 'read' mode.  Make sure that we switch to 'read status'
+ 		 * mode so we get the right data. --rmk
+ 		 */
+-		map_write(map, CMD(0x70), adr);
++		map_write(map, CMD(0x70), chip->in_progress_block_addr);
+ 		chip->oldstate = FL_ERASING;
+ 		chip->state = FL_ERASE_SUSPENDING;
+ 		chip->erase_suspended = 1;
+ 		for (;;) {
+-			status = map_read(map, adr);
++			status = map_read(map, chip->in_progress_block_addr);
+ 			if (map_word_andequal(map, status, status_OK, status_OK))
+ 			        break;
+ 
+@@ -1035,8 +1056,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
+ 		   sending the 0x70 (Read Status) command to an erasing
+ 		   chip and expecting it to be ignored, that's what we
+ 		   do. */
+-		map_write(map, CMD(0xd0), adr);
+-		map_write(map, CMD(0x70), adr);
++		map_write(map, CMD(0xd0), chip->in_progress_block_addr);
++		map_write(map, CMD(0x70), chip->in_progress_block_addr);
+ 		chip->oldstate = FL_READY;
+ 		chip->state = FL_ERASING;
+ 		break;
+@@ -1927,6 +1948,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+ 	map_write(map, CMD(0xD0), adr);
+ 	chip->state = FL_ERASING;
+ 	chip->erase_suspended = 0;
++	chip->in_progress_block_addr = adr;
++	chip->in_progress_block_mask = ~(len - 1);
+ 
+ 	ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
+ 				   adr, len,
+diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
+index c50d8cf0f60d..16faa97ac3f2 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -814,9 +814,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
+ 		    (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
+ 			goto sleep;
+ 
+-		/* We could check to see if we're trying to access the sector
+-		 * that is currently being erased. However, no user will try
+-		 * anything like that so we just wait for the timeout. */
++		/* Do not allow suspend iff read/write to EB address */
++		if ((adr & chip->in_progress_block_mask) ==
++		    chip->in_progress_block_addr)
++			goto sleep;
+ 
+ 		/* Erase suspend */
+ 		/* It's harmless to issue the Erase-Suspend and Erase-Resume
+@@ -2265,6 +2266,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
+ 	chip->state = FL_ERASING;
+ 	chip->erase_suspended = 0;
+ 	chip->in_progress_block_addr = adr;
++	chip->in_progress_block_mask = ~(map->size - 1);
+ 
+ 	INVALIDATE_CACHE_UDELAY(map, chip,
+ 				adr, map->size,
+@@ -2354,6 +2356,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
+ 	chip->state = FL_ERASING;
+ 	chip->erase_suspended = 0;
+ 	chip->in_progress_block_addr = adr;
++	chip->in_progress_block_mask = ~(len - 1);
+ 
+ 	INVALIDATE_CACHE_UDELAY(map, chip,
+ 				adr, len,
+diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
+index 7c0b27d132b1..b479bd81120b 100644
+--- a/drivers/mtd/chips/jedec_probe.c
++++ b/drivers/mtd/chips/jedec_probe.c
+@@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
+ 	do {
+ 		uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
+ 		mask = (1 << (cfi->device_type * 8)) - 1;
++		if (ofs >= map->size)
++			return 0;
+ 		result = map_read(map, base + ofs);
+ 		bank++;
+ 	} while ((result.x[0] & mask) == CFI_MFR_CONTINUATION);
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index 14a5f559e300..eb5ff54c9263 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -618,7 +618,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
+ 		chip->cmd_ctrl(mtd, readcmd, ctrl);
+ 		ctrl &= ~NAND_CTRL_CHANGE;
+ 	}
+-	chip->cmd_ctrl(mtd, command, ctrl);
++	if (command != NAND_CMD_NONE)
++		chip->cmd_ctrl(mtd, command, ctrl);
+ 
+ 	/* Address cycle, when necessary */
+ 	ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
+@@ -647,6 +648,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command,
+ 	 */
+ 	switch (command) {
+ 
++	case NAND_CMD_NONE:
+ 	case NAND_CMD_PAGEPROG:
+ 	case NAND_CMD_ERASE1:
+ 	case NAND_CMD_ERASE2:
+@@ -709,7 +711,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ 	}
+ 
+ 	/* Command latch cycle */
+-	chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
++	if (command != NAND_CMD_NONE)
++		chip->cmd_ctrl(mtd, command,
++			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ 
+ 	if (column != -1 || page_addr != -1) {
+ 		int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+@@ -742,6 +746,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ 	 */
+ 	switch (command) {
+ 
++	case NAND_CMD_NONE:
+ 	case NAND_CMD_CACHEDPROG:
+ 	case NAND_CMD_PAGEPROG:
+ 	case NAND_CMD_ERASE1:
+diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
+index c9eb78f10a0d..421ae660d579 100644
+--- a/drivers/mtd/ubi/block.c
++++ b/drivers/mtd/ubi/block.c
+@@ -241,7 +241,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
+ 	 * in any case.
+ 	 */
+ 	if (mode & FMODE_WRITE) {
+-		ret = -EPERM;
++		ret = -EROFS;
+ 		goto out_unlock;
+ 	}
+ 
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 9fd4f7838080..db6957a2b011 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -907,6 +907,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
++	 * MLC NAND is different and needs special care, otherwise UBI or UBIFS
++	 * will die soon and you will lose all your data.
++	 */
++	if (mtd->type == MTD_MLCNANDFLASH) {
++		pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
++			mtd->index);
++		return -EINVAL;
++	}
++
+ 	if (ubi_num == UBI_DEV_NUM_AUTO) {
+ 		/* Search for an empty slot in the @ubi_devices array */
+ 		for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
+index b2a665398bca..4c1d12bacfd0 100644
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -331,7 +331,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
+ {
+ 	int i;
+ 
+-	flush_work(&ubi->fm_work);
+ 	return_unused_pool_pebs(ubi, &ubi->fm_pool);
+ 	return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 16f9c742bc30..32fe93a2d73e 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1476,39 +1476,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ 			goto err_close;
+ 	}
+ 
+-	/* If the mode uses primary, then the following is handled by
+-	 * bond_change_active_slave().
+-	 */
+-	if (!bond_uses_primary(bond)) {
+-		/* set promiscuity level to new slave */
+-		if (bond_dev->flags & IFF_PROMISC) {
+-			res = dev_set_promiscuity(slave_dev, 1);
+-			if (res)
+-				goto err_close;
+-		}
+-
+-		/* set allmulti level to new slave */
+-		if (bond_dev->flags & IFF_ALLMULTI) {
+-			res = dev_set_allmulti(slave_dev, 1);
+-			if (res)
+-				goto err_close;
+-		}
+-
+-		netif_addr_lock_bh(bond_dev);
+-
+-		dev_mc_sync_multiple(slave_dev, bond_dev);
+-		dev_uc_sync_multiple(slave_dev, bond_dev);
+-
+-		netif_addr_unlock_bh(bond_dev);
+-	}
+-
+-	if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+-		/* add lacpdu mc addr to mc list */
+-		u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+-
+-		dev_mc_add(slave_dev, lacpdu_multicast);
+-	}
+-
+ 	res = vlan_vids_add_by_dev(slave_dev, bond_dev);
+ 	if (res) {
+ 		netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
+@@ -1633,8 +1600,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ 	} /* switch(bond_mode) */
+ 
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+-	slave_dev->npinfo = bond->dev->npinfo;
+-	if (slave_dev->npinfo) {
++	if (bond->dev->npinfo) {
+ 		if (slave_enable_netpoll(new_slave)) {
+ 			netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
+ 			res = -EBUSY;
+@@ -1665,6 +1631,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ 		goto err_upper_unlink;
+ 	}
+ 
++	/* If the mode uses primary, then the following is handled by
++	 * bond_change_active_slave().
++	 */
++	if (!bond_uses_primary(bond)) {
++		/* set promiscuity level to new slave */
++		if (bond_dev->flags & IFF_PROMISC) {
++			res = dev_set_promiscuity(slave_dev, 1);
++			if (res)
++				goto err_sysfs_del;
++		}
++
++		/* set allmulti level to new slave */
++		if (bond_dev->flags & IFF_ALLMULTI) {
++			res = dev_set_allmulti(slave_dev, 1);
++			if (res) {
++				if (bond_dev->flags & IFF_PROMISC)
++					dev_set_promiscuity(slave_dev, -1);
++				goto err_sysfs_del;
++			}
++		}
++
++		netif_addr_lock_bh(bond_dev);
++		dev_mc_sync_multiple(slave_dev, bond_dev);
++		dev_uc_sync_multiple(slave_dev, bond_dev);
++		netif_addr_unlock_bh(bond_dev);
++
++		if (BOND_MODE(bond) == BOND_MODE_8023AD) {
++			/* add lacpdu mc addr to mc list */
++			u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
++
++			dev_mc_add(slave_dev, lacpdu_multicast);
++		}
++	}
++
+ 	bond->slave_cnt++;
+ 	bond_compute_features(bond);
+ 	bond_set_carrier(bond);
+@@ -1688,6 +1688,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ 	return 0;
+ 
+ /* Undo stages on error */
++err_sysfs_del:
++	bond_sysfs_slave_del(new_slave);
++
+ err_upper_unlink:
+ 	bond_upper_dev_unlink(bond_dev, slave_dev);
+ 
+@@ -1695,9 +1698,6 @@ err_unregister:
+ 	netdev_rx_handler_unregister(slave_dev);
+ 
+ err_detach:
+-	if (!bond_uses_primary(bond))
+-		bond_hw_addr_flush(bond_dev, slave_dev);
+-
+ 	vlan_vids_del_by_dev(slave_dev, bond_dev);
+ 	if (rcu_access_pointer(bond->primary_slave) == new_slave)
+ 		RCU_INIT_POINTER(bond->primary_slave, NULL);
+@@ -2533,11 +2533,13 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
+ 	bond_for_each_slave_rcu(bond, slave, iter) {
+ 		unsigned long trans_start = dev_trans_start(slave->dev);
+ 
++		slave->new_link = BOND_LINK_NOCHANGE;
++
+ 		if (slave->link != BOND_LINK_UP) {
+ 			if (bond_time_in_interval(bond, trans_start, 1) &&
+ 			    bond_time_in_interval(bond, slave->last_rx, 1)) {
+ 
+-				slave->link  = BOND_LINK_UP;
++				slave->new_link = BOND_LINK_UP;
+ 				slave_state_changed = 1;
+ 
+ 				/* primary_slave has no meaning in round-robin
+@@ -2564,7 +2566,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
+ 			if (!bond_time_in_interval(bond, trans_start, 2) ||
+ 			    !bond_time_in_interval(bond, slave->last_rx, 2)) {
+ 
+-				slave->link  = BOND_LINK_DOWN;
++				slave->new_link = BOND_LINK_DOWN;
+ 				slave_state_changed = 1;
+ 
+ 				if (slave->link_failure_count < UINT_MAX)
+@@ -2595,6 +2597,11 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
+ 		if (!rtnl_trylock())
+ 			goto re_arm;
+ 
++		bond_for_each_slave(bond, slave, iter) {
++			if (slave->new_link != BOND_LINK_NOCHANGE)
++				slave->link = slave->new_link;
++		}
++
+ 		if (slave_state_changed) {
+ 			bond_slave_state_change(bond);
+ 			if (BOND_MODE(bond) == BOND_MODE_XOR)
+@@ -3261,12 +3268,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
+ 	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
+ 		u64 nv = new[i];
+ 		u64 ov = old[i];
++		s64 delta = nv - ov;
+ 
+ 		/* detects if this particular field is 32bit only */
+ 		if (((nv | ov) >> 32) == 0)
+-			res[i] += (u32)nv - (u32)ov;
+-		else
+-			res[i] += nv - ov;
++			delta = (s64)(s32)((u32)nv - (u32)ov);
++
++		/* filter anomalies, some drivers reset their stats
++		 * at down/up events.
++		 */
++		if (delta > 0)
++			res[i] += delta;
+ 	}
+ }
+ 
+diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
+index c11d44984036..76b3c1462139 100644
+--- a/drivers/net/can/cc770/cc770.c
++++ b/drivers/net/can/cc770/cc770.c
+@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,
+ 	return 0;
+ }
+ 
+-static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
++static void cc770_tx(struct net_device *dev, int mo)
+ {
+ 	struct cc770_priv *priv = netdev_priv(dev);
+-	struct net_device_stats *stats = &dev->stats;
+-	struct can_frame *cf = (struct can_frame *)skb->data;
+-	unsigned int mo = obj2msgobj(CC770_OBJ_TX);
++	struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
+ 	u8 dlc, rtr;
+ 	u32 id;
+ 	int i;
+ 
+-	if (can_dropped_invalid_skb(dev, skb))
+-		return NETDEV_TX_OK;
+-
+-	if ((cc770_read_reg(priv,
+-			    msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+-		netdev_err(dev, "TX register is still occupied!\n");
+-		return NETDEV_TX_BUSY;
+-	}
+-
+-	netif_stop_queue(dev);
+-
+ 	dlc = cf->can_dlc;
+ 	id = cf->can_id;
+-	if (cf->can_id & CAN_RTR_FLAG)
+-		rtr = 0;
+-	else
+-		rtr = MSGCFG_DIR;
++	rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
++
++	cc770_write_reg(priv, msgobj[mo].ctrl0,
++			MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+ 	cc770_write_reg(priv, msgobj[mo].ctrl1,
+ 			RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
+-	cc770_write_reg(priv, msgobj[mo].ctrl0,
+-			MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
++
+ 	if (id & CAN_EFF_FLAG) {
+ 		id &= CAN_EFF_MASK;
+ 		cc770_write_reg(priv, msgobj[mo].config,
+@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	for (i = 0; i < dlc; i++)
+ 		cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+ 
+-	/* Store echo skb before starting the transfer */
+-	can_put_echo_skb(skb, dev, 0);
+-
+ 	cc770_write_reg(priv, msgobj[mo].ctrl1,
+-			RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
++			RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
++	cc770_write_reg(priv, msgobj[mo].ctrl0,
++			MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
++}
+ 
+-	stats->tx_bytes += dlc;
++static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++	struct cc770_priv *priv = netdev_priv(dev);
++	unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ 
++	if (can_dropped_invalid_skb(dev, skb))
++		return NETDEV_TX_OK;
+ 
+-	/*
+-	 * HM: We had some cases of repeated IRQs so make sure the
+-	 * INT is acknowledged I know it's already further up, but
+-	 * doing again fixed the issue
+-	 */
+-	cc770_write_reg(priv, msgobj[mo].ctrl0,
+-			MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
++	netif_stop_queue(dev);
++
++	if ((cc770_read_reg(priv,
++			    msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
++		netdev_err(dev, "TX register is still occupied!\n");
++		return NETDEV_TX_BUSY;
++	}
++
++	priv->tx_skb = skb;
++	cc770_tx(dev, mo);
+ 
+ 	return NETDEV_TX_OK;
+ }
+@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
+ 	struct cc770_priv *priv = netdev_priv(dev);
+ 	struct net_device_stats *stats = &dev->stats;
+ 	unsigned int mo = obj2msgobj(o);
++	struct can_frame *cf;
++	u8 ctrl1;
++
++	ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+ 
+-	/* Nothing more to send, switch off interrupts */
+ 	cc770_write_reg(priv, msgobj[mo].ctrl0,
+ 			MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+-	/*
+-	 * We had some cases of repeated IRQ so make sure the
+-	 * INT is acknowledged
++	cc770_write_reg(priv, msgobj[mo].ctrl1,
++			RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
++
++	if (unlikely(!priv->tx_skb)) {
++		netdev_err(dev, "missing tx skb in tx interrupt\n");
++		return;
++	}
++
++	if (unlikely(ctrl1 & MSGLST_SET)) {
++		stats->rx_over_errors++;
++		stats->rx_errors++;
++	}
++
++	/* When the CC770 is sending an RTR message and it receives a regular
++	 * message that matches the id of the RTR message, it will overwrite the
++	 * outgoing message in the TX register. When this happens we must
++	 * process the received message and try to transmit the outgoing skb
++	 * again.
+ 	 */
+-	cc770_write_reg(priv, msgobj[mo].ctrl0,
+-			MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
++	if (unlikely(ctrl1 & NEWDAT_SET)) {
++		cc770_rx(dev, mo, ctrl1);
++		cc770_tx(dev, mo);
++		return;
++	}
+ 
++	cf = (struct can_frame *)priv->tx_skb->data;
++	stats->tx_bytes += cf->can_dlc;
+ 	stats->tx_packets++;
++
++	can_put_echo_skb(priv->tx_skb, dev, 0);
+ 	can_get_echo_skb(dev, 0);
++	priv->tx_skb = NULL;
++
+ 	netif_wake_queue(dev);
+ }
+ 
+@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)
+ 	priv->can.do_set_bittiming = cc770_set_bittiming;
+ 	priv->can.do_set_mode = cc770_set_mode;
+ 	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
++	priv->tx_skb = NULL;
+ 
+ 	memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
+ 
+diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
+index a1739db98d91..95752e1d1283 100644
+--- a/drivers/net/can/cc770/cc770.h
++++ b/drivers/net/can/cc770/cc770.h
+@@ -193,6 +193,8 @@ struct cc770_priv {
+ 	u8 cpu_interface;	/* CPU interface register */
+ 	u8 clkout;		/* Clock out register */
+ 	u8 bus_config;		/* Bus conffiguration register */
++
++	struct sk_buff *tx_skb;
+ };
+ 
+ struct net_device *alloc_cc770dev(int sizeof_priv);
+diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+index b927021c6c40..af3db6b146ab 100644
+--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+@@ -535,6 +535,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
+ 	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
+ 	cb |= CFG_CLE_BYPASS_EN0;
+ 	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
++	CFG_CLE_IP_HDR_LEN_SET(&cb, 0);
+ 	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
+ 
+ 	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
+diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+index d9bc89d69266..2a2b41a4c9bf 100644
+--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
++++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+@@ -145,6 +145,7 @@ enum xgene_enet_rm {
+ #define CFG_TXCLK_MUXSEL0_SET(dst, val)	xgene_set_bits(dst, val, 29, 3)
+ 
+ #define CFG_CLE_IP_PROTOCOL0_SET(dst, val)	xgene_set_bits(dst, val, 16, 2)
++#define CFG_CLE_IP_HDR_LEN_SET(dst, val)	xgene_set_bits(dst, val, 8, 5)
+ #define CFG_CLE_DSTQID0_SET(dst, val)		xgene_set_bits(dst, val, 0, 12)
+ #define CFG_CLE_FPSEL0_SET(dst, val)		xgene_set_bits(dst, val, 16, 4)
+ #define CFG_MACMODE_SET(dst, val)		xgene_set_bits(dst, val, 18, 2)
+diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
+index c31c7407b753..425dae560322 100644
+--- a/drivers/net/ethernet/arc/emac_rockchip.c
++++ b/drivers/net/ethernet/arc/emac_rockchip.c
+@@ -150,8 +150,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
+ 	/* Optional regulator for PHY */
+ 	priv->regulator = devm_regulator_get_optional(dev, "phy");
+ 	if (IS_ERR(priv->regulator)) {
+-		if (PTR_ERR(priv->regulator) == -EPROBE_DEFER)
+-			return -EPROBE_DEFER;
++		if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
++			err = -EPROBE_DEFER;
++			goto out_clk_disable;
++		}
+ 		dev_err(dev, "no regulator found\n");
+ 		priv->regulator = NULL;
+ 	}
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 0eb43586c034..a3348ba658d3 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -2024,6 +2024,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+ 				  ETH_OVREHEAD +
+ 				  mtu +
+ 				  BNX2X_FW_RX_ALIGN_END;
++		fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
+ 		/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
+ 		if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
+ 			fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
+@@ -3877,15 +3878,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		/* when transmitting in a vf, start bd must hold the ethertype
+ 		 * for fw to enforce it
+ 		 */
++		u16 vlan_tci = 0;
+ #ifndef BNX2X_STOP_ON_ERROR
+-		if (IS_VF(bp))
++		if (IS_VF(bp)) {
+ #endif
+-			tx_start_bd->vlan_or_ethertype =
+-				cpu_to_le16(ntohs(eth->h_proto));
++			/* Still need to consider inband vlan for enforced */
++			if (__vlan_get_tag(skb, &vlan_tci)) {
++				tx_start_bd->vlan_or_ethertype =
++					cpu_to_le16(ntohs(eth->h_proto));
++			} else {
++				tx_start_bd->bd_flags.as_bitfield |=
++					(X_ETH_INBAND_VLAN <<
++					 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
++				tx_start_bd->vlan_or_ethertype =
++					cpu_to_le16(vlan_tci);
++			}
+ #ifndef BNX2X_STOP_ON_ERROR
+-		else
++		} else {
+ 			/* used by FW for packet accounting */
+ 			tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
++		}
+ #endif
+ 	}
+ 
+diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+index 5be892ffdaed..767132ec00f9 100644
+--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
++++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+@@ -2861,7 +2861,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
+ static void
+ bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
+ {
+-	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
++	strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ }
+ 
+ static void
+diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+index 482f6de6817d..ae96b4a32d90 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+@@ -2573,8 +2573,8 @@ void t4vf_sge_stop(struct adapter *adapter)
+ int t4vf_sge_init(struct adapter *adapter)
+ {
+ 	struct sge_params *sge_params = &adapter->params.sge;
+-	u32 fl0 = sge_params->sge_fl_buffer_size[0];
+-	u32 fl1 = sge_params->sge_fl_buffer_size[1];
++	u32 fl_small_pg = sge_params->sge_fl_buffer_size[0];
++	u32 fl_large_pg = sge_params->sge_fl_buffer_size[1];
+ 	struct sge *s = &adapter->sge;
+ 	unsigned int ingpadboundary, ingpackboundary;
+ 
+@@ -2583,9 +2583,20 @@ int t4vf_sge_init(struct adapter *adapter)
+ 	 * the Physical Function Driver.  Ideally we should be able to deal
+ 	 * with _any_ configuration.  Practice is different ...
+ 	 */
+-	if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
++
++	/* We only bother using the Large Page logic if the Large Page Buffer
++	 * is larger than our Page Size Buffer.
++	 */
++	if (fl_large_pg <= fl_small_pg)
++		fl_large_pg = 0;
++
++	/* The Page Size Buffer must be exactly equal to our Page Size and the
++	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
++	 */
++	if (fl_small_pg != PAGE_SIZE ||
++	    (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ 		dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+-			fl0, fl1);
++			fl_small_pg, fl_large_pg);
+ 		return -EINVAL;
+ 	}
+ 	if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
+@@ -2596,8 +2607,8 @@ int t4vf_sge_init(struct adapter *adapter)
+ 	/*
+ 	 * Now translate the adapter parameters into our internal forms.
+ 	 */
+-	if (fl1)
+-		s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
++	if (fl_large_pg)
++		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+ 	s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
+ 			? 128 : 64);
+ 	s->pktshift = PKTSHIFT_G(sge_params->sge_control);
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 6d0c5d5eea6d..58c0fccdd8cb 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -28,6 +28,7 @@
+ #include <linux/io.h>
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
++#include <linux/of.h>
+ #include <linux/phy.h>
+ #include <linux/platform_device.h>
+ #include <net/ip.h>
+diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+index 3c40f6b99224..28e97686f6aa 100644
+--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
++++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+@@ -370,7 +370,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
+ {
+ 	const struct of_device_id *id =
+ 		of_match_device(fsl_pq_mdio_match, &pdev->dev);
+-	const struct fsl_pq_mdio_data *data = id->data;
++	const struct fsl_pq_mdio_data *data;
+ 	struct device_node *np = pdev->dev.of_node;
+ 	struct resource res;
+ 	struct device_node *tbi;
+@@ -378,6 +378,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
+ 	struct mii_bus *new_bus;
+ 	int err;
+ 
++	if (!id) {
++		dev_err(&pdev->dev, "Failed to match device\n");
++		return -ENODEV;
++	}
++
++	data = id->data;
++
+ 	dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
+ 
+ 	new_bus = mdiobus_alloc_size(sizeof(*priv));
+diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
+index b9df0cbd0a38..9571f7dad162 100644
+--- a/drivers/net/ethernet/ibm/emac/core.c
++++ b/drivers/net/ethernet/ibm/emac/core.c
+@@ -342,6 +342,7 @@ static int emac_reset(struct emac_instance *dev)
+ {
+ 	struct emac_regs __iomem *p = dev->emacp;
+ 	int n = 20;
++	bool __maybe_unused try_internal_clock = false;
+ 
+ 	DBG(dev, "reset" NL);
+ 
+@@ -354,6 +355,7 @@ static int emac_reset(struct emac_instance *dev)
+ 	}
+ 
+ #ifdef CONFIG_PPC_DCR_NATIVE
++do_retry:
+ 	/*
+ 	 * PPC460EX/GT Embedded Processor Advanced User's Manual
+ 	 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
+@@ -361,10 +363,19 @@ static int emac_reset(struct emac_instance *dev)
+ 	 * of the EMAC. If none is present, select the internal clock
+ 	 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
+ 	 * After a soft reset, select the external clock.
++	 *
++	 * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
++	 * ethernet cable is not attached. This causes the reset to timeout
++	 * and the PHY detection code in emac_init_phy() is unable to
++	 * communicate and detect the AR8035-A PHY. As a result, the emac
++	 * driver bails out early and the user has no ethernet.
++	 * In order to stay compatible with existing configurations, the
++	 * driver will temporarily switch to the internal clock, after
++	 * the first reset fails.
+ 	 */
+ 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
+-		if (dev->phy_address == 0xffffffff &&
+-		    dev->phy_map == 0xffffffff) {
++		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
++					   dev->phy_map == 0xffffffff)) {
+ 			/* No PHY: select internal loop clock before reset */
+ 			dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 				    0, SDR0_ETH_CFG_ECS << dev->cell_index);
+@@ -382,8 +393,15 @@ static int emac_reset(struct emac_instance *dev)
+ 
+ #ifdef CONFIG_PPC_DCR_NATIVE
+ 	if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
+-		if (dev->phy_address == 0xffffffff &&
+-		    dev->phy_map == 0xffffffff) {
++		if (!n && !try_internal_clock) {
++			/* first attempt has timed out. */
++			n = 20;
++			try_internal_clock = true;
++			goto do_retry;
++		}
++
++		if (try_internal_clock || (dev->phy_address == 0xffffffff &&
++					   dev->phy_map == 0xffffffff)) {
+ 			/* No PHY: restore external clock source after reset */
+ 			dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 				    SDR0_ETH_CFG_ECS << dev->cell_index, 0);
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 39e9d7db23df..66cbf19b7635 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -1182,6 +1182,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
+ 	struct e1000_hw *hw = &adapter->hw;
+ 
+ 	if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
++		struct sk_buff *skb = adapter->tx_hwtstamp_skb;
+ 		struct skb_shared_hwtstamps shhwtstamps;
+ 		u64 txstmp;
+ 
+@@ -1190,9 +1191,14 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
+ 
+ 		e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
+ 
+-		skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
+-		dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
++		/* Clear the global tx_hwtstamp_skb pointer and force writes
++		 * prior to notifying the stack of a Tx timestamp.
++		 */
+ 		adapter->tx_hwtstamp_skb = NULL;
++		wmb(); /* force write prior to skb_tstamp_tx */
++
++		skb_tstamp_tx(skb, &shhwtstamps);
++		dev_kfree_skb_any(skb);
+ 	} else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ 			      + adapter->tx_timeout_factor * HZ)) {
+ 		dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+@@ -3524,6 +3530,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
+ 
+ 	switch (hw->mac.type) {
+ 	case e1000_pch2lan:
++		/* Stable 96MHz frequency */
++		incperiod = INCPERIOD_96MHz;
++		incvalue = INCVALUE_96MHz;
++		shift = INCVALUE_SHIFT_96MHz;
++		adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
++		break;
+ 	case e1000_pch_lpt:
+ 	case e1000_pch_spt:
+ 		/* On I217, I218 and I219, the clock frequency is 25MHz
+@@ -6375,12 +6387,17 @@ static int e1000e_pm_thaw(struct device *dev)
+ static int e1000e_pm_suspend(struct device *dev)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
++	int rc;
+ 
+ 	e1000e_flush_lpic(pdev);
+ 
+ 	e1000e_pm_freeze(dev);
+ 
+-	return __e1000_shutdown(pdev, false);
++	rc = __e1000_shutdown(pdev, false);
++	if (rc)
++		e1000e_pm_thaw(dev);
++
++	return rc;
+ }
+ 
+ static int e1000e_pm_resume(struct device *dev)
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+index 4b9d9f88af70..502a54e9ac33 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+@@ -869,7 +869,7 @@ static void fm10k_self_test(struct net_device *dev,
+ 
+ 	memset(data, 0, sizeof(*data) * FM10K_TEST_LEN);
+ 
+-	if (FM10K_REMOVED(hw)) {
++	if (FM10K_REMOVED(hw->hw_addr)) {
+ 		netif_err(interface, drv, dev,
+ 			  "Interface removed - test blocked\n");
+ 		eth_test->flags |= ETH_TEST_FL_FAILED;
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index d9f4498832a1..bc39fd5e22ad 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -5067,7 +5067,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	INIT_WORK(&hw->restart_work, sky2_restart);
+ 
+ 	pci_set_drvdata(pdev, hw);
+-	pdev->d3_delay = 150;
++	pdev->d3_delay = 200;
+ 
+ 	return 0;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
+index bd9ea0d01aae..b0d677cad93a 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
++++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
+@@ -35,6 +35,7 @@
+ #include <linux/etherdevice.h>
+ 
+ #include <linux/mlx4/cmd.h>
++#include <linux/mlx4/qp.h>
+ #include <linux/export.h>
+ 
+ #include "mlx4.h"
+@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
+ 	if (IS_ERR(mailbox))
+ 		return PTR_ERR(mailbox);
+ 
++	if (!mlx4_qp_lookup(dev, rule->qpn)) {
++		mlx4_err_rule(dev, "QP doesn't exist\n", rule);
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	trans_rule_ctrl_to_hw(rule, mailbox->buf);
+ 
+ 	size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
+ 
+ 	list_for_each_entry(cur, &rule->list, list) {
+ 		ret = parse_trans_rule(dev, cur, mailbox->buf + size);
+-		if (ret < 0) {
+-			mlx4_free_cmd_mailbox(dev, mailbox);
+-			return ret;
+-		}
++		if (ret < 0)
++			goto out;
++
+ 		size += ret;
+ 	}
+ 
+@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
+ 		}
+ 	}
+ 
++out:
+ 	mlx4_free_cmd_mailbox(dev, mailbox);
+ 
+ 	return ret;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
+index eb1dcb7e9e96..c20e2d2f911a 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
++++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
+@@ -381,6 +381,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+ 		__mlx4_qp_free_icm(dev, qpn);
+ }
+ 
++struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
++{
++	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
++	struct mlx4_qp *qp;
++
++	spin_lock(&qp_table->lock);
++
++	qp = __mlx4_qp_lookup(dev, qpn);
++
++	spin_unlock(&qp_table->lock);
++	return qp;
++}
++
+ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
+ {
+ 	struct mlx4_priv *priv = mlx4_priv(dev);
+diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+index b8d5270359cd..e30676515529 100644
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+@@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
+ 	cmd.req.arg3 = 0;
+ 
+ 	if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
+-		netxen_issue_cmd(adapter, &cmd);
++		rcode = netxen_issue_cmd(adapter, &cmd);
+ 
+ 	if (rcode != NX_RCODE_SUCCESS)
+ 		return -EIO;
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+index 75ee9e4ced51..01e250935787 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
+ 			}
+ 			return -EIO;
+ 		}
+-		usleep_range(1000, 1500);
++		udelay(1200);
+ 	}
+ 
+ 	if (id_reg)
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+index e6312465fe45..d732c63cd496 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+@@ -126,6 +126,8 @@ static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
+ 		return 0;
+ 
+ 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
++	if (!pos)
++		return 0;
+ 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
+ 	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
+ 
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+index be258d90de9e..e3223f2fe2ff 100644
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+@@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+ 		sizeof(struct mpi_coredump_global_header);
+ 	mpi_coredump->mpi_global_header.imageSize =
+ 		sizeof(struct ql_mpi_coredump);
+-	memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
++	strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ 		sizeof(mpi_coredump->mpi_global_header.idString));
+ 
+ 	/* Get generic NIC reg dump */
+@@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
+ 		sizeof(struct mpi_coredump_global_header);
+ 	mpi_coredump->mpi_global_header.imageSize =
+ 		sizeof(struct ql_reg_dump);
+-	memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
++	strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ 		sizeof(mpi_coredump->mpi_global_header.idString));
+ 
+ 
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index cba41860167c..32113fafc07b 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca)
+ 
+ 	/* Allocate rx SKB if we don't have one available. */
+ 	if (!qca->rx_skb) {
+-		qca->rx_skb = netdev_alloc_skb(net_dev,
+-					       net_dev->mtu + VLAN_ETH_HLEN);
++		qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
++							net_dev->mtu +
++							VLAN_ETH_HLEN);
+ 		if (!qca->rx_skb) {
+ 			netdev_dbg(net_dev, "out of RX resources\n");
+ 			qca->stats.out_of_mem++;
+@@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca)
+ 					qca->rx_skb, qca->rx_skb->dev);
+ 				qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 				netif_rx_ni(qca->rx_skb);
+-				qca->rx_skb = netdev_alloc_skb(net_dev,
++				qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
+ 					net_dev->mtu + VLAN_ETH_HLEN);
+ 				if (!qca->rx_skb) {
+ 					netdev_dbg(net_dev, "out of RX resources\n");
+@@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev)
+ 	if (!qca->rx_buffer)
+ 		return -ENOBUFS;
+ 
+-	qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN);
++	qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
++						VLAN_ETH_HLEN);
+ 	if (!qca->rx_skb) {
+ 		kfree(qca->rx_buffer);
+ 		netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 0ae76e419482..c64ed1613928 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -3217,7 +3217,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
+ 	/* MDIO bus init */
+ 	ret = sh_mdio_init(mdp, pd);
+ 	if (ret) {
+-		dev_err(&ndev->dev, "failed to initialise MDIO\n");
++		dev_err(&pdev->dev, "failed to initialise MDIO\n");
+ 		goto out_release;
+ 	}
+ 
+diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
+index b536b4c82752..9d12f7012798 100644
+--- a/drivers/net/ethernet/ti/cpsw.c
++++ b/drivers/net/ethernet/ti/cpsw.c
+@@ -291,6 +291,10 @@ struct cpsw_ss_regs {
+ /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
+ #define CPSW_V1_SEQ_ID_OFS_SHIFT	16
+ 
++#define CPSW_MAX_BLKS_TX		15
++#define CPSW_MAX_BLKS_TX_SHIFT		4
++#define CPSW_MAX_BLKS_RX		5
++
+ struct cpsw_host_regs {
+ 	u32	max_blks;
+ 	u32	blk_cnt;
+@@ -1126,11 +1130,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+ 	switch (priv->version) {
+ 	case CPSW_VERSION_1:
+ 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
++		/* Increase RX FIFO size to 5 for supporting fullduplex
++		 * flow control mode
++		 */
++		slave_write(slave,
++			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
++			    CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
+ 		break;
+ 	case CPSW_VERSION_2:
+ 	case CPSW_VERSION_3:
+ 	case CPSW_VERSION_4:
+ 		slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
++		/* Increase RX FIFO size to 5 for supporting fullduplex
++		 * flow control mode
++		 */
++		slave_write(slave,
++			    (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
++			    CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
+ 		break;
+ 	}
+ 
+diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
+index 49fe59b180a8..a75ce9051a7f 100644
+--- a/drivers/net/hamradio/hdlcdrv.c
++++ b/drivers/net/hamradio/hdlcdrv.c
+@@ -574,6 +574,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ 	case HDLCDRVCTL_CALIBRATE:
+ 		if(!capable(CAP_SYS_RAWIO))
+ 			return -EPERM;
++		if (s->par.bitrate <= 0)
++			return -EINVAL;
+ 		if (bi.data.calibrate > INT_MAX / s->par.bitrate)
+ 			return -EINVAL;
+ 		s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index f3cd85ecd795..7439df3dd667 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -275,6 +275,10 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
+ 		if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
+ 			success = true;
+ 	} else {
++		if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
++					     ipvlan->phy_dev->dev_addr))
++			skb->pkt_type = PACKET_OTHERHOST;
++
+ 		ret = RX_HANDLER_ANOTHER;
+ 		success = true;
+ 	}
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 1ca78b46c01b..0afda59439d5 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -123,6 +123,12 @@ static inline int phy_aneg_done(struct phy_device *phydev)
+ 	if (phydev->drv->aneg_done)
+ 		return phydev->drv->aneg_done(phydev);
+ 
++	/* Avoid genphy_aneg_done() if the Clause 45 PHY does not
++	 * implement Clause 22 registers
++	 */
++	if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
++		return -EINVAL;
++
+ 	return genphy_aneg_done(phydev);
+ }
+ 
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 31aa93907b77..0123d7d1a391 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -638,6 +638,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	lock_sock(sk);
+ 
+ 	error = -EINVAL;
++
++	if (sockaddr_len != sizeof(struct sockaddr_pppox))
++		goto end;
++
+ 	if (sp->sa_protocol != PX_PROTO_OE)
+ 		goto end;
+ 
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index b35199cc8f34..17407494531e 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -502,7 +502,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	po->chan.mtu = dst_mtu(&rt->dst);
+ 	if (!po->chan.mtu)
+ 		po->chan.mtu = PPP_MRU;
+-	ip_rt_put(rt);
+ 	po->chan.mtu -= PPTP_HEADER_OVERHEAD;
+ 
+ 	po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index 27ed25252aac..cfd81eb1b532 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
+ 		if(x < 0 || x > comp->rslot_limit)
+ 			goto bad;
+ 
++		/* Check if the cstate is initialized */
++		if (!comp->rstate[x].initialized)
++			goto bad;
++
+ 		comp->flags &=~ SLF_TOSS;
+ 		comp->recv_current = x;
+ 	} else {
+@@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
+ 	if (cs->cs_tcp.doff > 5)
+ 	  memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
+ 	cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
++	cs->initialized = true;
+ 	/* Put headers back on packet
+ 	 * Neither header checksum is recalculated
+ 	 */
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index a16b054a4fa6..463b8d013deb 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -247,6 +247,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
+ 	}
+ }
+ 
++static bool __team_option_inst_tmp_find(const struct list_head *opts,
++					const struct team_option_inst *needle)
++{
++	struct team_option_inst *opt_inst;
++
++	list_for_each_entry(opt_inst, opts, tmp_list)
++		if (opt_inst == needle)
++			return true;
++	return false;
++}
++
+ static int __team_options_register(struct team *team,
+ 				   const struct team_option *option,
+ 				   size_t option_count)
+@@ -1034,14 +1045,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
+ }
+ 
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
++static int __team_port_enable_netpoll(struct team_port *port)
+ {
+ 	struct netpoll *np;
+ 	int err;
+ 
+-	if (!team->dev->npinfo)
+-		return 0;
+-
+ 	np = kzalloc(sizeof(*np), GFP_KERNEL);
+ 	if (!np)
+ 		return -ENOMEM;
+@@ -1055,6 +1063,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+ 	return err;
+ }
+ 
++static int team_port_enable_netpoll(struct team_port *port)
++{
++	if (!port->team->dev->npinfo)
++		return 0;
++
++	return __team_port_enable_netpoll(port);
++}
++
+ static void team_port_disable_netpoll(struct team_port *port)
+ {
+ 	struct netpoll *np = port->np;
+@@ -1069,7 +1085,7 @@ static void team_port_disable_netpoll(struct team_port *port)
+ 	kfree(np);
+ }
+ #else
+-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
++static int team_port_enable_netpoll(struct team_port *port)
+ {
+ 	return 0;
+ }
+@@ -1176,7 +1192,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
+ 		goto err_vids_add;
+ 	}
+ 
+-	err = team_port_enable_netpoll(team, port);
++	err = team_port_enable_netpoll(port);
+ 	if (err) {
+ 		netdev_err(dev, "Failed to enable netpoll on device %s\n",
+ 			   portname);
+@@ -1884,7 +1900,7 @@ static int team_netpoll_setup(struct net_device *dev,
+ 
+ 	mutex_lock(&team->lock);
+ 	list_for_each_entry(port, &team->port_list, list) {
+-		err = team_port_enable_netpoll(team, port);
++		err = __team_port_enable_netpoll(port);
+ 		if (err) {
+ 			__team_netpoll_cleanup(team);
+ 			break;
+@@ -2371,7 +2387,7 @@ send_done:
+ 	if (!nlh) {
+ 		err = __send_and_alloc_skb(&skb, team, portid, send_func);
+ 		if (err)
+-			goto errout;
++			return err;
+ 		goto send_done;
+ 	}
+ 
+@@ -2535,6 +2551,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+ 			if (err)
+ 				goto team_put;
+ 			opt_inst->changed = true;
++
++			/* dumb/evil user-space can send us duplicate opt,
++			 * keep only the last one
++			 */
++			if (__team_option_inst_tmp_find(&opt_inst_list,
++							opt_inst))
++				continue;
++
+ 			list_add(&opt_inst->tmp_list, &opt_inst_list);
+ 		}
+ 		if (!opt_found) {
+@@ -2651,7 +2675,7 @@ send_done:
+ 	if (!nlh) {
+ 		err = __send_and_alloc_skb(&skb, team, portid, send_func);
+ 		if (err)
+-			goto errout;
++			return err;
+ 		goto send_done;
+ 	}
+ 
+diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
+index 4545e78840b0..5243d2797c3d 100644
+--- a/drivers/net/usb/cdc_ether.c
++++ b/drivers/net/usb/cdc_ether.c
+@@ -750,6 +750,12 @@ static const struct usb_device_id	products[] = {
+ 				      USB_CDC_SUBCLASS_ETHERNET,
+ 				      USB_CDC_PROTO_NONE),
+ 	.driver_info = (unsigned long)&wwan_info,
++}, {
++	/* Cinterion AHS3 modem by GEMALTO */
++	USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
++				      USB_CDC_SUBCLASS_ETHERNET,
++				      USB_CDC_PROTO_NONE),
++	.driver_info = (unsigned long)&wwan_info,
+ }, {
+ 	/* Telit modules */
+ 	USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 3d97fd391793..23a327e93732 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -674,6 +674,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
++	{QMI_FIXED_INTF(0x05c6, 0x90b2, 3)},    /* ublox R410M */
+ 	{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ 	{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index 2e61a799f32a..611c78be4da9 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -397,6 +397,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
+ 	if (ifmp && (dev->ifindex != 0))
+ 		peer->ifindex = ifmp->ifi_index;
+ 
++	peer->gso_max_size = dev->gso_max_size;
++	peer->gso_max_segs = dev->gso_max_segs;
++
+ 	err = register_netdevice(peer);
+ 	put_net(net);
+ 	net = NULL;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 64ca961bca18..9ecc6ca5e4b4 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -852,7 +852,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+ 	struct virtio_net_hdr_mrg_rxbuf *hdr;
+ 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
+ 	struct virtnet_info *vi = sq->vq->vdev->priv;
+-	unsigned num_sg;
++	int num_sg;
+ 	unsigned hdr_len = vi->hdr_len;
+ 	bool can_push;
+ 
+@@ -905,11 +905,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+ 	if (can_push) {
+ 		__skb_push(skb, hdr_len);
+ 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
++		if (unlikely(num_sg < 0))
++			return num_sg;
+ 		/* Pull header back to avoid skew in tx bytes calculations. */
+ 		__skb_pull(skb, hdr_len);
+ 	} else {
+ 		sg_set_buf(sq->sg, hdr, hdr_len);
+-		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
++		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
++		if (unlikely(num_sg < 0))
++			return num_sg;
++		num_sg++;
+ 	}
+ 	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
+ }
+diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
+index 0b9c8d61f7d1..51998a85e314 100644
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -2655,6 +2655,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
+ 	/* we need to enable NAPI, otherwise dev_close will deadlock */
+ 	for (i = 0; i < adapter->num_rx_queues; i++)
+ 		napi_enable(&adapter->rx_queue[i].napi);
++	/*
++	 * Need to clear the quiesce bit to ensure that vmxnet3_close
++	 * can quiesce the device properly
++	 */
++	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
+ 	dev_close(adapter->netdev);
+ }
+ 
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 1ad3700ed9c7..8ddefc96fb50 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -1001,7 +1001,7 @@ static bool vxlan_snoop(struct net_device *dev,
+ 			return false;
+ 
+ 		/* Don't migrate static entries, drop packets */
+-		if (f->state & NUD_NOARP)
++		if (f->state & (NUD_PERMANENT | NUD_NOARP))
+ 			return true;
+ 
+ 		if (net_ratelimit())
+diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
+index db363856e0b5..2b064998915f 100644
+--- a/drivers/net/wan/pc300too.c
++++ b/drivers/net/wan/pc300too.c
+@@ -347,6 +347,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
+ 	    card->rambase == NULL) {
+ 		pr_err("ioremap() failed\n");
+ 		pc300_pci_remove_one(pdev);
++		return -ENOMEM;
+ 	}
+ 
+ 	/* PLX PCI 9050 workaround for local configuration register read bug */
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index 1b69427fbb29..8b1ab8880113 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -1853,6 +1853,15 @@ static ssize_t ath10k_write_simulate_radar(struct file *file,
+ 					   size_t count, loff_t *ppos)
+ {
+ 	struct ath10k *ar = file->private_data;
++	struct ath10k_vif *arvif;
++
++	/* Just check for for the first vif alone, as all the vifs will be
++	 * sharing the same channel and if the channel is disabled, all the
++	 * vifs will share the same 'is_started' state.
++	 */
++	arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
++	if (!arvif->is_started)
++		return -EINVAL;
+ 
+ 	ieee80211_radar_detected(ar->hw);
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 6757d9c63bf2..561a777ba8c7 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -4001,9 +4001,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+ 				    sta->addr, smps, err);
+ 	}
+ 
+-	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
+-	    changed & IEEE80211_RC_NSS_CHANGED) {
+-		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
++	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
++		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+ 			   sta->addr);
+ 
+ 		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
+diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
+index c70782e8f07b..b5802e37ab24 100644
+--- a/drivers/net/wireless/ath/ath5k/debug.c
++++ b/drivers/net/wireless/ath/ath5k/debug.c
+@@ -939,7 +939,10 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
+ 	}
+ 
+ 	for (i = 0; i < eesize; ++i) {
+-		AR5K_EEPROM_READ(i, val);
++		if (!ath5k_hw_nvram_read(ah, i, &val)) {
++			ret = -EIO;
++			goto freebuf;
++		}
+ 		buf[i] = val;
+ 	}
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
+index 5e15e8e10ed3..bb64d7377a96 100644
+--- a/drivers/net/wireless/ath/ath9k/hw.c
++++ b/drivers/net/wireless/ath/ath9k/hw.c
+@@ -1593,6 +1593,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
+ 	int count = 50;
+ 	u32 reg, last_val;
+ 
++	/* Check if chip failed to wake up */
++	if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
++		return false;
++
+ 	if (AR_SREV_9300(ah))
+ 		return !ath9k_hw_detect_mac_hang(ah);
+ 
+diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
+index 06ea6cc9e30a..62077bda8dde 100644
+--- a/drivers/net/wireless/ath/regd.c
++++ b/drivers/net/wireless/ath/regd.c
+@@ -254,8 +254,12 @@ bool ath_is_49ghz_allowed(u16 regdomain)
+ EXPORT_SYMBOL(ath_is_49ghz_allowed);
+ 
+ /* Frequency is one where radar detection is required */
+-static bool ath_is_radar_freq(u16 center_freq)
++static bool ath_is_radar_freq(u16 center_freq,
++			      struct ath_regulatory *reg)
++
+ {
++	if (reg->country_code == CTRY_INDIA)
++		return (center_freq >= 5500 && center_freq <= 5700);
+ 	return (center_freq >= 5260 && center_freq <= 5700);
+ }
+ 
+@@ -306,7 +310,7 @@ __ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ 				enum nl80211_reg_initiator initiator,
+ 				struct ieee80211_channel *ch)
+ {
+-	if (ath_is_radar_freq(ch->center_freq) ||
++	if (ath_is_radar_freq(ch->center_freq, reg) ||
+ 	    (ch->flags & IEEE80211_CHAN_RADAR))
+ 		return;
+ 
+@@ -395,8 +399,9 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy,
+ 	}
+ }
+ 
+-/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */
+-static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
++/* Always apply Radar/DFS rules on freq range 5500 MHz - 5700 MHz */
++static void ath_reg_apply_radar_flags(struct wiphy *wiphy,
++				      struct ath_regulatory *reg)
+ {
+ 	struct ieee80211_supported_band *sband;
+ 	struct ieee80211_channel *ch;
+@@ -409,7 +414,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
+ 
+ 	for (i = 0; i < sband->n_channels; i++) {
+ 		ch = &sband->channels[i];
+-		if (!ath_is_radar_freq(ch->center_freq))
++		if (!ath_is_radar_freq(ch->center_freq, reg))
+ 			continue;
+ 		/* We always enable radar detection/DFS on this
+ 		 * frequency range. Additionally we also apply on
+@@ -505,7 +510,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
+ 	struct ath_common *common = container_of(reg, struct ath_common,
+ 						 regulatory);
+ 	/* We always apply this */
+-	ath_reg_apply_radar_flags(wiphy);
++	ath_reg_apply_radar_flags(wiphy, reg);
+ 
+ 	/*
+ 	 * This would happen when we have sent a custom regulatory request
+@@ -653,7 +658,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
+ 	}
+ 
+ 	wiphy_apply_custom_regulatory(wiphy, regd);
+-	ath_reg_apply_radar_flags(wiphy);
++	ath_reg_apply_radar_flags(wiphy, reg);
+ 	ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
+ 	return 0;
+ }
+diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
+index a058151f5eed..ec51447365c1 100644
+--- a/drivers/net/wireless/ath/wil6210/main.c
++++ b/drivers/net/wireless/ath/wil6210/main.c
+@@ -118,9 +118,15 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
+ 	u32 *d = dst;
+ 	const volatile u32 __iomem *s = src;
+ 
+-	/* size_t is unsigned, if (count%4 != 0) it will wrap */
+-	for (count += 4; count > 4; count -= 4)
++	for (; count >= 4; count -= 4)
+ 		*d++ = __raw_readl(s++);
++
++	if (unlikely(count)) {
++		/* count can be 1..3 */
++		u32 tmp = __raw_readl(s);
++
++		memcpy(d, &tmp, count);
++	}
+ }
+ 
+ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+@@ -129,8 +135,16 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
+ 	volatile u32 __iomem *d = dst;
+ 	const u32 *s = src;
+ 
+-	for (count += 4; count > 4; count -= 4)
++	for (; count >= 4; count -= 4)
+ 		__raw_writel(*s++, d++);
++
++	if (unlikely(count)) {
++		/* count can be 1..3 */
++		u32 tmp = 0;
++
++		memcpy(&tmp, s, count);
++		__raw_writel(tmp, d);
++	}
+ }
+ 
+ static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+index 710fbe570eb2..a85ac706f892 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+@@ -460,25 +460,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
+  * @dev_addr: optional device address.
+  *
+  * P2P needs mac addresses for P2P device and interface. If no device
+- * address it specified, these are derived from the primary net device, ie.
+- * the permanent ethernet address of the device.
++ * address it specified, these are derived from a random ethernet
++ * address.
+  */
+ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
+ {
+-	struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
+-	bool local_admin = false;
++	bool random_addr = false;
+ 
+-	if (!dev_addr || is_zero_ether_addr(dev_addr)) {
+-		dev_addr = pri_ifp->mac_addr;
+-		local_admin = true;
+-	}
++	if (!dev_addr || is_zero_ether_addr(dev_addr))
++		random_addr = true;
+ 
+-	/* Generate the P2P Device Address.  This consists of the device's
+-	 * primary MAC address with the locally administered bit set.
++	/* Generate the P2P Device Address obtaining a random ethernet
++	 * address with the locally administered bit set.
+ 	 */
+-	memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
+-	if (local_admin)
+-		p2p->dev_addr[0] |= 0x02;
++	if (random_addr)
++		eth_random_addr(p2p->dev_addr);
++	else
++		memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
+ 
+ 	/* Generate the P2P Interface Address.  If the discovery and connection
+ 	 * BSSCFGs need to simultaneously co-exist, then this address must be
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index aafb97ce080d..eadb9ded7070 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -699,16 +699,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val)
+ 	    val != PS_MANUAL_POLL)
+ 		return -EINVAL;
+ 
+-	old_ps = data->ps;
+-	data->ps = val;
+-
+-	local_bh_disable();
+ 	if (val == PS_MANUAL_POLL) {
++		if (data->ps != PS_ENABLED)
++			return -EINVAL;
++		local_bh_disable();
+ 		ieee80211_iterate_active_interfaces_atomic(
+ 			data->hw, IEEE80211_IFACE_ITER_NORMAL,
+ 			hwsim_send_ps_poll, data);
+-		data->ps_poll_pending = true;
+-	} else if (old_ps == PS_DISABLED && val != PS_DISABLED) {
++		local_bh_enable();
++		return 0;
++	}
++	old_ps = data->ps;
++	data->ps = val;
++
++	local_bh_disable();
++	if (old_ps == PS_DISABLED && val != PS_DISABLED) {
+ 		ieee80211_iterate_active_interfaces_atomic(
+ 			data->hw, IEEE80211_IFACE_ITER_NORMAL,
+ 			hwsim_send_nullfunc_ps, data);
+diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
+index 477f86354dc5..4482debcfe84 100644
+--- a/drivers/net/wireless/ray_cs.c
++++ b/drivers/net/wireless/ray_cs.c
+@@ -247,7 +247,10 @@ static const UCHAR b4_default_startup_parms[] = {
+ 	0x04, 0x08,		/* Noise gain, limit offset */
+ 	0x28, 0x28,		/* det rssi, med busy offsets */
+ 	7,			/* det sync thresh */
+-	0, 2, 2			/* test mode, min, max */
++	0, 2, 2,		/* test mode, min, max */
++	0,			/* rx/tx delay */
++	0, 0, 0, 0, 0, 0,	/* current BSS id */
++	0			/* hop set */
+ };
+ 
+ /*===========================================================================*/
+@@ -598,7 +601,7 @@ static void init_startup_params(ray_dev_t *local)
+ 	 *    a_beacon_period = hops    a_beacon_period = KuS
+ 	 *//* 64ms = 010000 */
+ 	if (local->fw_ver == 0x55) {
+-		memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms,
++		memcpy(&local->sparm.b4, b4_default_startup_parms,
+ 		       sizeof(struct b4_startup_params));
+ 		/* Translate sane kus input values to old build 4/5 format */
+ 		/* i = hop time in uS truncated to 3 bytes */
+diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
+index d72ff8e7125d..3f2554fb1a62 100644
+--- a/drivers/net/wireless/rndis_wlan.c
++++ b/drivers/net/wireless/rndis_wlan.c
+@@ -3425,6 +3425,10 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
+ 
+ 	/* because rndis_command() sleeps we need to use workqueue */
+ 	priv->workqueue = create_singlethread_workqueue("rndis_wlan");
++	if (!priv->workqueue) {
++		wiphy_free(wiphy);
++		return -ENOMEM;
++	}
+ 	INIT_WORK(&priv->work, rndis_wlan_worker);
+ 	INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller);
+ 	INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results);
+diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
+index 629ad8cfa17b..6952aaa232f7 100644
+--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
++++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
+@@ -1454,6 +1454,7 @@ static int rtl8187_probe(struct usb_interface *intf,
+ 		goto err_free_dev;
+ 	}
+ 	mutex_init(&priv->io_mutex);
++	mutex_init(&priv->conf_mutex);
+ 
+ 	SET_IEEE80211_DEV(dev, &intf->dev);
+ 	usb_set_intfdata(intf, dev);
+@@ -1627,7 +1628,6 @@ static int rtl8187_probe(struct usb_interface *intf,
+ 		printk(KERN_ERR "rtl8187: Cannot register device\n");
+ 		goto err_free_dmabuf;
+ 	}
+-	mutex_init(&priv->conf_mutex);
+ 	skb_queue_head_init(&priv->b_tx_status.queue);
+ 
+ 	wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index 5b4048041147..1f75586ab3a6 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -1576,7 +1576,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
+ 				dev_kfree_skb_irq(skb);
+ 				ring->idx = (ring->idx + 1) % ring->entries;
+ 			}
++
++			if (rtlpriv->use_new_trx_flow) {
++				rtlpci->tx_ring[i].cur_tx_rp = 0;
++				rtlpci->tx_ring[i].cur_tx_wp = 0;
++			}
++
+ 			ring->idx = 0;
++			ring->entries = rtlpci->txringcount[i];
+ 		}
+ 	}
+ 	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+index c5d4b8013cde..5a0fffaed0f5 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/fw.c
+@@ -664,7 +664,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ 	struct sk_buff *skb = NULL;
+-
++	bool rtstatus;
+ 	u32 totalpacketlen;
+ 	u8 u1rsvdpageloc[5] = { 0 };
+ 	bool b_dlok = false;
+@@ -727,7 +727,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
+ 	memcpy((u8 *)skb_put(skb, totalpacketlen),
+ 	       &reserved_page_packet, totalpacketlen);
+ 
+-	b_dlok = true;
++	rtstatus = rtl_cmd_send_packet(hw, skb);
++	if (rtstatus)
++		b_dlok = true;
+ 
+ 	if (b_dlok) {
+ 		RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
+diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
+index 342678d2ed42..19f0db505a50 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/dm.c
+@@ -2490,9 +2490,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
+ 		for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
+ 			rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
+ 
+-			RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+-				 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+-				 rtldm->thermalvalue, thermal_value);
++		RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
++			 "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
++			 rtldm->thermalvalue, thermal_value);
+ 		/*Record last Power Tracking Thermal Value*/
+ 		rtldm->thermalvalue = thermal_value;
+ 	}
+diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+index 76e52dfb2be5..cf0e54b8846c 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+@@ -1377,6 +1377,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
+ 
+ 	ppsc->wakeup_reason = 0;
+ 
++	do_gettimeofday(&ts);
+ 	rtlhal->last_suspend_sec = ts.tv_sec;
+ 
+ 	switch (fw_reason) {
+diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
+index 040bf3c66958..0ac639ed4be9 100644
+--- a/drivers/net/wireless/ti/wl1251/main.c
++++ b/drivers/net/wireless/ti/wl1251/main.c
+@@ -1201,8 +1201,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
+ 		WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+ 
+ 		enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
+-		wl1251_acx_arp_ip_filter(wl, enable, addr);
+-
++		ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
+ 		if (ret < 0)
+ 			goto out_sleep;
+ 	}
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index fd9f6ce14e8e..58cb86417da0 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1986,7 +1986,10 @@ static void netback_changed(struct xenbus_device *dev,
+ 	case XenbusStateInitialised:
+ 	case XenbusStateReconfiguring:
+ 	case XenbusStateReconfigured:
++		break;
++
+ 	case XenbusStateUnknown:
++		wake_up_all(&module_unload_q);
+ 		break;
+ 
+ 	case XenbusStateInitWait:
+@@ -2117,7 +2120,9 @@ static int xennet_remove(struct xenbus_device *dev)
+ 		xenbus_switch_state(dev, XenbusStateClosing);
+ 		wait_event(module_unload_q,
+ 			   xenbus_read_driver_state(dev->otherend) ==
+-			   XenbusStateClosing);
++			   XenbusStateClosing ||
++			   xenbus_read_driver_state(dev->otherend) ==
++			   XenbusStateUnknown);
+ 
+ 		xenbus_switch_state(dev, XenbusStateClosed);
+ 		wait_event(module_unload_q,
+diff --git a/drivers/of/device.c b/drivers/of/device.c
+index 493b21bd1199..6601cc62a990 100644
+--- a/drivers/of/device.c
++++ b/drivers/of/device.c
+@@ -210,7 +210,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
+ 			str[i] = '_';
+ 	}
+ 
+-	return tsize;
++	return repend;
+ }
+ EXPORT_SYMBOL_GPL(of_device_get_modalias);
+ 
+diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
+index 53d15b30636a..e914007f5523 100644
+--- a/drivers/parport/parport_pc.c
++++ b/drivers/parport/parport_pc.c
+@@ -2646,6 +2646,7 @@ enum parport_pc_pci_cards {
+ 	netmos_9901,
+ 	netmos_9865,
+ 	quatech_sppxp100,
++	wch_ch382l,
+ };
+ 
+ 
+@@ -2708,6 +2709,7 @@ static struct parport_pc_pci {
+ 	/* netmos_9901 */               { 1, { { 0, -1 }, } },
+ 	/* netmos_9865 */               { 1, { { 0, -1 }, } },
+ 	/* quatech_sppxp100 */		{ 1, { { 0, 1 }, } },
++	/* wch_ch382l */		{ 1, { { 2, -1 }, } },
+ };
+ 
+ static const struct pci_device_id parport_pc_pci_tbl[] = {
+@@ -2797,6 +2799,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
+ 	/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
+ 	{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
+ 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
++	/* WCH CH382L PCI-E single parallel port card */
++	{ 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l },
+ 	{ 0, } /* terminate list */
+ };
+ MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl);
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index b60309ee80ed..031f64da6151 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -587,6 +587,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
+ {
+ 	unsigned long long sta = 0;
+ 	struct acpiphp_func *func;
++	u32 dvid;
+ 
+ 	list_for_each_entry(func, &slot->funcs, sibling) {
+ 		if (func->flags & FUNC_HAS_STA) {
+@@ -597,19 +598,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
+ 			if (ACPI_SUCCESS(status) && sta)
+ 				break;
+ 		} else {
+-			u32 dvid;
+-
+-			pci_bus_read_config_dword(slot->bus,
+-						  PCI_DEVFN(slot->device,
+-							    func->function),
+-						  PCI_VENDOR_ID, &dvid);
+-			if (dvid != 0xffffffff) {
++			if (pci_bus_read_dev_vendor_id(slot->bus,
++					PCI_DEVFN(slot->device, func->function),
++					&dvid, 0)) {
+ 				sta = ACPI_STA_ALL;
+ 				break;
+ 			}
+ 		}
+ 	}
+ 
++	if (!sta) {
++		/*
++		 * Check for the slot itself since it may be that the
++		 * ACPI slot is a device below PCIe upstream port so in
++		 * that case it may not even be reachable yet.
++		 */
++		if (pci_bus_read_dev_vendor_id(slot->bus,
++				PCI_DEVFN(slot->device, 0), &dvid, 0)) {
++			sta = ACPI_STA_ALL;
++		}
++	}
++
+ 	return (unsigned int)sta;
+ }
+ 
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 1363fe636281..afb80eb5a528 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -450,8 +450,6 @@ static void pci_device_shutdown(struct device *dev)
+ 
+ 	if (drv && drv->shutdown)
+ 		drv->shutdown(pci_dev);
+-	pci_msi_shutdown(pci_dev);
+-	pci_msix_shutdown(pci_dev);
+ 
+ #ifdef CONFIG_KEXEC
+ 	/*
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index c7dc06636bf6..005fc2478ef4 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -227,7 +227,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 			res->flags |= IORESOURCE_ROM_ENABLE;
+ 		l64 = l & PCI_ROM_ADDRESS_MASK;
+ 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
+-		mask64 = (u32)PCI_ROM_ADDRESS_MASK;
++		mask64 = PCI_ROM_ADDRESS_MASK;
+ 	}
+ 
+ 	if (res->flags & IORESOURCE_MEM_64) {
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 7b9e3564fc43..4de72003515f 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3645,6 +3645,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
+ 			 quirk_dma_func1_alias);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
+ 			 quirk_dma_func1_alias);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
++			 quirk_dma_func1_alias);
+ /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
+ 			 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 00f32ff6f74e..c03763d68a95 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -59,7 +59,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ 		mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
+ 		new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
+ 	} else if (resno == PCI_ROM_RESOURCE) {
+-		mask = (u32)PCI_ROM_ADDRESS_MASK;
++		mask = PCI_ROM_ADDRESS_MASK;
+ 	} else {
+ 		mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ 		new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
+index 18ee2089df4a..db43f8b34e2a 100644
+--- a/drivers/pinctrl/core.c
++++ b/drivers/pinctrl/core.c
+@@ -977,19 +977,16 @@ struct pinctrl_state *pinctrl_lookup_state(struct pinctrl *p,
+ EXPORT_SYMBOL_GPL(pinctrl_lookup_state);
+ 
+ /**
+- * pinctrl_select_state() - select/activate/program a pinctrl state to HW
++ * pinctrl_commit_state() - select/activate/program a pinctrl state to HW
+  * @p: the pinctrl handle for the device that requests configuration
+  * @state: the state handle to select/activate/program
+  */
+-int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
++static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state)
+ {
+ 	struct pinctrl_setting *setting, *setting2;
+ 	struct pinctrl_state *old_state = p->state;
+ 	int ret;
+ 
+-	if (p->state == state)
+-		return 0;
+-
+ 	if (p->state) {
+ 		/*
+ 		 * For each pinmux setting in the old state, forget SW's record
+@@ -1053,6 +1050,19 @@ unapply_new_state:
+ 
+ 	return ret;
+ }
++
++/**
++ * pinctrl_select_state() - select/activate/program a pinctrl state to HW
++ * @p: the pinctrl handle for the device that requests configuration
++ * @state: the state handle to select/activate/program
++ */
++int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state)
++{
++	if (p->state == state)
++		return 0;
++
++	return pinctrl_commit_state(p, state);
++}
+ EXPORT_SYMBOL_GPL(pinctrl_select_state);
+ 
+ static void devm_pinctrl_release(struct device *dev, void *res)
+@@ -1221,7 +1231,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
+ int pinctrl_force_sleep(struct pinctrl_dev *pctldev)
+ {
+ 	if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep))
+-		return pinctrl_select_state(pctldev->p, pctldev->hog_sleep);
++		return pinctrl_commit_state(pctldev->p, pctldev->hog_sleep);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
+@@ -1233,7 +1243,7 @@ EXPORT_SYMBOL_GPL(pinctrl_force_sleep);
+ int pinctrl_force_default(struct pinctrl_dev *pctldev)
+ {
+ 	if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default))
+-		return pinctrl_select_state(pctldev->p, pctldev->hog_default);
++		return pinctrl_commit_state(pctldev->p, pctldev->hog_default);
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(pinctrl_force_default);
+diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
+index abdaed34c728..5ea4c5a72a66 100644
+--- a/drivers/platform/x86/asus-nb-wmi.c
++++ b/drivers/platform/x86/asus-nb-wmi.c
+@@ -99,6 +99,15 @@ static const struct dmi_system_id asus_quirks[] = {
+ 		 */
+ 		.driver_data = &quirk_asus_wapf4,
+ 	},
++	{
++		.callback = dmi_matched,
++		.ident = "ASUSTeK COMPUTER INC. X302UA",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X302UA"),
++		},
++		.driver_data = &quirk_asus_wapf4,
++	},
+ 	{
+ 		.callback = dmi_matched,
+ 		.ident = "ASUSTeK COMPUTER INC. X401U",
+diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
+index dfe1ee89f7c7..922a86787c5c 100644
+--- a/drivers/power/pda_power.c
++++ b/drivers/power/pda_power.c
+@@ -30,9 +30,9 @@ static inline unsigned int get_irq_flags(struct resource *res)
+ static struct device *dev;
+ static struct pda_power_pdata *pdata;
+ static struct resource *ac_irq, *usb_irq;
+-static struct timer_list charger_timer;
+-static struct timer_list supply_timer;
+-static struct timer_list polling_timer;
++static struct delayed_work charger_work;
++static struct delayed_work polling_work;
++static struct delayed_work supply_work;
+ static int polling;
+ static struct power_supply *pda_psy_ac, *pda_psy_usb;
+ 
+@@ -140,7 +140,7 @@ static void update_charger(void)
+ 	}
+ }
+ 
+-static void supply_timer_func(unsigned long unused)
++static void supply_work_func(struct work_struct *work)
+ {
+ 	if (ac_status == PDA_PSY_TO_CHANGE) {
+ 		ac_status = new_ac_status;
+@@ -161,11 +161,12 @@ static void psy_changed(void)
+ 	 * Okay, charger set. Now wait a bit before notifying supplicants,
+ 	 * charge power should stabilize.
+ 	 */
+-	mod_timer(&supply_timer,
+-		  jiffies + msecs_to_jiffies(pdata->wait_for_charger));
++	cancel_delayed_work(&supply_work);
++	schedule_delayed_work(&supply_work,
++			      msecs_to_jiffies(pdata->wait_for_charger));
+ }
+ 
+-static void charger_timer_func(unsigned long unused)
++static void charger_work_func(struct work_struct *work)
+ {
+ 	update_status();
+ 	psy_changed();
+@@ -184,13 +185,14 @@ static irqreturn_t power_changed_isr(int irq, void *power_supply)
+ 	 * Wait a bit before reading ac/usb line status and setting charger,
+ 	 * because ac/usb status readings may lag from irq.
+ 	 */
+-	mod_timer(&charger_timer,
+-		  jiffies + msecs_to_jiffies(pdata->wait_for_status));
++	cancel_delayed_work(&charger_work);
++	schedule_delayed_work(&charger_work,
++			      msecs_to_jiffies(pdata->wait_for_status));
+ 
+ 	return IRQ_HANDLED;
+ }
+ 
+-static void polling_timer_func(unsigned long unused)
++static void polling_work_func(struct work_struct *work)
+ {
+ 	int changed = 0;
+ 
+@@ -211,8 +213,9 @@ static void polling_timer_func(unsigned long unused)
+ 	if (changed)
+ 		psy_changed();
+ 
+-	mod_timer(&polling_timer,
+-		  jiffies + msecs_to_jiffies(pdata->polling_interval));
++	cancel_delayed_work(&polling_work);
++	schedule_delayed_work(&polling_work,
++			      msecs_to_jiffies(pdata->polling_interval));
+ }
+ 
+ #if IS_ENABLED(CONFIG_USB_PHY)
+@@ -250,8 +253,9 @@ static int otg_handle_notification(struct notifier_block *nb,
+ 	 * Wait a bit before reading ac/usb line status and setting charger,
+ 	 * because ac/usb status readings may lag from irq.
+ 	 */
+-	mod_timer(&charger_timer,
+-		  jiffies + msecs_to_jiffies(pdata->wait_for_status));
++	cancel_delayed_work(&charger_work);
++	schedule_delayed_work(&charger_work,
++			      msecs_to_jiffies(pdata->wait_for_status));
+ 
+ 	return NOTIFY_OK;
+ }
+@@ -300,8 +304,8 @@ static int pda_power_probe(struct platform_device *pdev)
+ 	if (!pdata->ac_max_uA)
+ 		pdata->ac_max_uA = 500000;
+ 
+-	setup_timer(&charger_timer, charger_timer_func, 0);
+-	setup_timer(&supply_timer, supply_timer_func, 0);
++	INIT_DELAYED_WORK(&charger_work, charger_work_func);
++	INIT_DELAYED_WORK(&supply_work, supply_work_func);
+ 
+ 	ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac");
+ 	usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb");
+@@ -385,9 +389,10 @@ static int pda_power_probe(struct platform_device *pdev)
+ 
+ 	if (polling) {
+ 		dev_dbg(dev, "will poll for status\n");
+-		setup_timer(&polling_timer, polling_timer_func, 0);
+-		mod_timer(&polling_timer,
+-			  jiffies + msecs_to_jiffies(pdata->polling_interval));
++		INIT_DELAYED_WORK(&polling_work, polling_work_func);
++		cancel_delayed_work(&polling_work);
++		schedule_delayed_work(&polling_work,
++				      msecs_to_jiffies(pdata->polling_interval));
+ 	}
+ 
+ 	if (ac_irq || usb_irq)
+@@ -433,9 +438,9 @@ static int pda_power_remove(struct platform_device *pdev)
+ 		free_irq(ac_irq->start, pda_psy_ac);
+ 
+ 	if (polling)
+-		del_timer_sync(&polling_timer);
+-	del_timer_sync(&charger_timer);
+-	del_timer_sync(&supply_timer);
++		cancel_delayed_work_sync(&polling_work);
++	cancel_delayed_work_sync(&charger_work);
++	cancel_delayed_work_sync(&supply_work);
+ 
+ 	if (pdata->is_usb_online)
+ 		power_supply_unregister(pda_psy_usb);
+diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
+index 84419af16f77..fd12ccc11e26 100644
+--- a/drivers/powercap/powercap_sys.c
++++ b/drivers/powercap/powercap_sys.c
+@@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone(
+ 
+ 	power_zone->id = result;
+ 	idr_init(&power_zone->idr);
++	result = -ENOMEM;
+ 	power_zone->name = kstrdup(name, GFP_KERNEL);
+ 	if (!power_zone->name)
+ 		goto err_name_alloc;
+diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
+index cabd7d8e05cc..3e07855bbea7 100644
+--- a/drivers/pwm/pwm-tegra.c
++++ b/drivers/pwm/pwm-tegra.c
+@@ -69,6 +69,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip);
+ 	unsigned long long c;
+ 	unsigned long rate, hz;
++	unsigned long long ns100 = NSEC_PER_SEC;
+ 	u32 val = 0;
+ 	int err;
+ 
+@@ -87,9 +88,11 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+ 	 * cycles at the PWM clock rate will take period_ns nanoseconds.
+ 	 */
+ 	rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH;
+-	hz = NSEC_PER_SEC / period_ns;
+ 
+-	rate = (rate + (hz / 2)) / hz;
++	/* Consider precision in PWM_SCALE_WIDTH rate calculation */
++	ns100 *= 100;
++	hz = DIV_ROUND_CLOSEST_ULL(ns100, period_ns);
++	rate = DIV_ROUND_CLOSEST(rate * 100, hz);
+ 
+ 	/*
+ 	 * Since the actual PWM divider is the register's frequency divider
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 038da40e4038..f84c0506afd0 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -262,6 +262,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 			missing = year;
+ 	}
+ 
++	/* Can't proceed if alarm is still invalid after replacing
++	 * missing fields.
++	 */
++	err = rtc_valid_tm(&alarm->time);
++	if (err)
++		goto done;
++
+ 	/* with luck, no rollover is needed */
+ 	t_now = rtc_tm_to_time64(&now);
+ 	t_alm = rtc_tm_to_time64(&alarm->time);
+@@ -313,9 +320,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+ 		dev_warn(&rtc->dev, "alarm rollover not handled\n");
+ 	}
+ 
+-done:
+ 	err = rtc_valid_tm(&alarm->time);
+ 
++done:
+ 	if (err) {
+ 		dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n",
+ 			alarm->time.tm_year + 1900, alarm->time.tm_mon + 1,
+diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
+index 167783fa7ac1..216ad22996b8 100644
+--- a/drivers/rtc/rtc-ds1374.c
++++ b/drivers/rtc/rtc-ds1374.c
+@@ -527,6 +527,10 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
+ 		if (get_user(new_margin, (int __user *)arg))
+ 			return -EFAULT;
+ 
++		/* the hardware's tick rate is 4096 Hz, so
++		 * the counter value needs to be scaled accordingly
++		 */
++		new_margin <<= 12;
+ 		if (new_margin < 1 || new_margin > 16777216)
+ 			return -EINVAL;
+ 
+@@ -535,7 +539,8 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
+ 		ds1374_wdt_ping();
+ 		/* fallthrough */
+ 	case WDIOC_GETTIMEOUT:
+-		return put_user(wdt_margin, (int __user *)arg);
++		/* when returning ... inverse is true */
++		return put_user((wdt_margin >> 12), (int __user *)arg);
+ 	case WDIOC_SETOPTIONS:
+ 		if (copy_from_user(&options, (int __user *)arg, sizeof(int)))
+ 			return -EFAULT;
+@@ -543,14 +548,15 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd,
+ 		if (options & WDIOS_DISABLECARD) {
+ 			pr_info("disable watchdog\n");
+ 			ds1374_wdt_disable();
++			return 0;
+ 		}
+ 
+ 		if (options & WDIOS_ENABLECARD) {
+ 			pr_info("enable watchdog\n");
+ 			ds1374_wdt_settimeout(wdt_margin);
+ 			ds1374_wdt_ping();
++			return 0;
+ 		}
+-
+ 		return -EINVAL;
+ 	}
+ 	return -ENOTTY;
+diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
+index 482af0dda0b0..ff217034f6d6 100644
+--- a/drivers/rtc/rtc-opal.c
++++ b/drivers/rtc/rtc-opal.c
+@@ -150,6 +150,16 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
+ 
+ 	y_m_d = be32_to_cpu(__y_m_d);
+ 	h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
++
++	/* check if no alarm is set */
++	if (y_m_d == 0 && h_m_s_ms == 0) {
++		pr_debug("No alarm is set\n");
++		rc = -ENOENT;
++		goto exit;
++	} else {
++		pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms);
++	}
++
+ 	opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
+ 
+ exit:
+diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
+index e3bf885f4a6c..d5f02c3da878 100644
+--- a/drivers/s390/cio/chsc.c
++++ b/drivers/s390/cio/chsc.c
+@@ -362,6 +362,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
+ 
+ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
+ {
++	struct channel_path *chp;
+ 	struct chp_link link;
+ 	struct chp_id chpid;
+ 	int status;
+@@ -374,10 +375,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
+ 	chpid.id = sei_area->rsid;
+ 	/* allocate a new channel path structure, if needed */
+ 	status = chp_get_status(chpid);
+-	if (status < 0)
+-		chp_new(chpid);
+-	else if (!status)
++	if (!status)
+ 		return;
++
++	if (status < 0) {
++		chp_new(chpid);
++	} else {
++		chp = chpid_to_chp(chpid);
++		mutex_lock(&chp->lock);
++		chp_update_desc(chp);
++		mutex_unlock(&chp->lock);
++	}
+ 	memset(&link, 0, sizeof(struct chp_link));
+ 	link.chpid = chpid;
+ 	if ((sei_area->vf & 0xc0) != 0) {
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index 848e3b64ea6e..fb7298920c8c 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -126,7 +126,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
+ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
+ 			int start, int count, int auto_ack)
+ {
+-	int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
++	int rc, tmp_count = count, tmp_start = start, nr = q->nr;
+ 	unsigned int ccq = 0;
+ 
+ 	qperf_inc(q, eqbs);
+@@ -149,14 +149,7 @@ again:
+ 		qperf_inc(q, eqbs_partial);
+ 		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
+ 			tmp_count);
+-		/*
+-		 * Retry once, if that fails bail out and process the
+-		 * extracted buffers before trying again.
+-		 */
+-		if (!retried++)
+-			goto again;
+-		else
+-			return count - tmp_count;
++		return count - tmp_count;
+ 	}
+ 
+ 	DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
+@@ -212,7 +205,10 @@ again:
+ 	return 0;
+ }
+ 
+-/* returns number of examined buffers and their common state in *state */
++/*
++ * Returns number of examined buffers and their common state in *state.
++ * Requested number of buffers-to-examine must be > 0.
++ */
+ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
+ 				 unsigned char *state, unsigned int count,
+ 				 int auto_ack, int merge_pending)
+@@ -223,17 +219,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
+ 	if (is_qebsm(q))
+ 		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
+ 
+-	for (i = 0; i < count; i++) {
+-		if (!__state) {
+-			__state = q->slsb.val[bufnr];
+-			if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
+-				__state = SLSB_P_OUTPUT_EMPTY;
+-		} else if (merge_pending) {
+-			if ((q->slsb.val[bufnr] & __state) != __state)
+-				break;
+-		} else if (q->slsb.val[bufnr] != __state)
+-			break;
++	/* get initial state: */
++	__state = q->slsb.val[bufnr];
++	if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
++		__state = SLSB_P_OUTPUT_EMPTY;
++
++	for (i = 1; i < count; i++) {
+ 		bufnr = next_buf(bufnr);
++
++		/* merge PENDING into EMPTY: */
++		if (merge_pending &&
++		    q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
++		    __state == SLSB_P_OUTPUT_EMPTY)
++			continue;
++
++		/* stop if next state differs from initial state: */
++		if (q->slsb.val[bufnr] != __state)
++			break;
+ 	}
+ 	*state = __state;
+ 	return i;
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 9e9964ca696b..81d23bbbd316 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -517,8 +517,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
+ 	    queue == card->qdio.no_in_queues - 1;
+ }
+ 
+-
+-static int qeth_issue_next_read(struct qeth_card *card)
++static int __qeth_issue_next_read(struct qeth_card *card)
+ {
+ 	int rc;
+ 	struct qeth_cmd_buffer *iob;
+@@ -549,6 +548,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
+ 	return rc;
+ }
+ 
++static int qeth_issue_next_read(struct qeth_card *card)
++{
++	int ret;
++
++	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
++	ret = __qeth_issue_next_read(card);
++	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
++
++	return ret;
++}
++
+ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
+ {
+ 	struct qeth_reply *reply;
+@@ -951,7 +961,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
+ 	spin_lock_irqsave(&card->thread_mask_lock, flags);
+ 	card->thread_running_mask &= ~thread;
+ 	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
+-	wake_up(&card->wait_q);
++	wake_up_all(&card->wait_q);
+ }
+ EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
+ 
+@@ -1155,6 +1165,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ 		}
+ 		rc = qeth_get_problem(cdev, irb);
+ 		if (rc) {
++			card->read_or_write_problem = 1;
+ 			qeth_clear_ipacmd_list(card);
+ 			qeth_schedule_recovery(card);
+ 			goto out;
+@@ -1173,7 +1184,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
+ 		return;
+ 	if (channel == &card->read &&
+ 	    channel->state == CH_STATE_UP)
+-		qeth_issue_next_read(card);
++		__qeth_issue_next_read(card);
+ 
+ 	iob = channel->iob;
+ 	index = channel->buf_no;
+@@ -5017,8 +5028,6 @@ static void qeth_core_free_card(struct qeth_card *card)
+ 	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
+ 	qeth_clean_channel(&card->read);
+ 	qeth_clean_channel(&card->write);
+-	if (card->dev)
+-		free_netdev(card->dev);
+ 	kfree(card->ip_tbd_list);
+ 	qeth_free_qdio_buffers(card);
+ 	unregister_service_level(&card->qeth_service_level);
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index b0413f5611cf..3f79f83451ae 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -913,8 +913,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
+ 		qeth_l2_set_offline(cgdev);
+ 
+ 	if (card->dev) {
+-		netif_napi_del(&card->napi);
+ 		unregister_netdev(card->dev);
++		free_netdev(card->dev);
+ 		card->dev = NULL;
+ 	}
+ 	return;
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index 6dbf0d5a2a22..34c8a4d20498 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -3358,8 +3358,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
+ 		qeth_l3_set_offline(cgdev);
+ 
+ 	if (card->dev) {
+-		netif_napi_del(&card->napi);
+ 		unregister_netdev(card->dev);
++		free_netdev(card->dev);
+ 		card->dev = NULL;
+ 	}
+ 
+diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
+index 1346e052e03c..8009158a6639 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc.h
++++ b/drivers/scsi/bnx2fc/bnx2fc.h
+@@ -191,6 +191,7 @@ struct bnx2fc_hba {
+ 	struct bnx2fc_cmd_mgr *cmd_mgr;
+ 	spinlock_t hba_lock;
+ 	struct mutex hba_mutex;
++	struct mutex hba_stats_mutex;
+ 	unsigned long adapter_state;
+ 		#define ADAPTER_STATE_UP		0
+ 		#define ADAPTER_STATE_GOING_DOWN	1
+diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+index 98d06d151958..d477c687af55 100644
+--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
++++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+@@ -641,15 +641,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
+ 	if (!fw_stats)
+ 		return NULL;
+ 
++	mutex_lock(&hba->hba_stats_mutex);
++
+ 	bnx2fc_stats = fc_get_host_stats(shost);
+ 
+ 	init_completion(&hba->stat_req_done);
+ 	if (bnx2fc_send_stat_req(hba))
+-		return bnx2fc_stats;
++		goto unlock_stats_mutex;
+ 	rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
+ 	if (!rc) {
+ 		BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
+-		return bnx2fc_stats;
++		goto unlock_stats_mutex;
+ 	}
+ 	BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
+ 	bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
+@@ -671,6 +673,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
+ 
+ 	memcpy(&hba->prev_stats, hba->stats_buffer,
+ 	       sizeof(struct fcoe_statistics_params));
++
++unlock_stats_mutex:
++	mutex_unlock(&hba->hba_stats_mutex);
+ 	return bnx2fc_stats;
+ }
+ 
+@@ -1303,6 +1308,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
+ 	}
+ 	spin_lock_init(&hba->hba_lock);
+ 	mutex_init(&hba->hba_mutex);
++	mutex_init(&hba->hba_stats_mutex);
+ 
+ 	hba->cnic = cnic;
+ 
+diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
+index 2e66f34ebb79..13580192691f 100644
+--- a/drivers/scsi/csiostor/csio_hw.c
++++ b/drivers/scsi/csiostor/csio_hw.c
+@@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
+ 		goto bye;
+ 	}
+ 
+-	mempool_free(mbp, hw->mb_mempool);
+ 	if (finicsum != cfcsum) {
+ 		csio_warn(hw,
+ 		      "Config File checksum mismatch: csum=%#x, computed=%#x\n",
+@@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
+ 	rv = csio_hw_validate_caps(hw, mbp);
+ 	if (rv != 0)
+ 		goto bye;
++
++	mempool_free(mbp, hw->mb_mempool);
++	mbp = NULL;
++
+ 	/*
+ 	 * Note that we're operating with parameters
+ 	 * not supplied by the driver, rather than from hard-wired
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index cd52c070701b..3a9648d7f441 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -835,8 +835,10 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
+ 
+ 	qc->err_mask |= AC_ERR_OTHER;
+ 	sata_port->ioasa.status |= ATA_BUSY;
+-	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ 	ata_qc_complete(qc);
++	if (ipr_cmd->eh_comp)
++		complete(ipr_cmd->eh_comp);
++	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ }
+ 
+ /**
+@@ -5859,8 +5861,10 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
+ 		res->in_erp = 0;
+ 	}
+ 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
+-	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ 	scsi_cmd->scsi_done(scsi_cmd);
++	if (ipr_cmd->eh_comp)
++		complete(ipr_cmd->eh_comp);
++	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ }
+ 
+ /**
+@@ -6250,8 +6254,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
+ 	}
+ 
+ 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
+-	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ 	scsi_cmd->scsi_done(scsi_cmd);
++	if (ipr_cmd->eh_comp)
++		complete(ipr_cmd->eh_comp);
++	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ }
+ 
+ /**
+@@ -6277,8 +6283,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
+ 		scsi_dma_unmap(scsi_cmd);
+ 
+ 		spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
+-		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ 		scsi_cmd->scsi_done(scsi_cmd);
++		if (ipr_cmd->eh_comp)
++			complete(ipr_cmd->eh_comp);
++		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ 		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
+ 	} else {
+ 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 8826110991eb..e14bfcd37692 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1695,6 +1695,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
+ 		 */
+ 		switch (session->state) {
+ 		case ISCSI_STATE_FAILED:
++			/*
++			 * cmds should fail during shutdown, if the session
++			 * state is bad, allowing completion to happen
++			 */
++			if (unlikely(system_state != SYSTEM_RUNNING)) {
++				reason = FAILURE_SESSION_FAILED;
++				sc->result = DID_NO_CONNECT << 16;
++				break;
++			}
+ 		case ISCSI_STATE_IN_RECOVERY:
+ 			reason = FAILURE_SESSION_IN_RECOVERY;
+ 			sc->result = DID_IMM_RETRY << 16;
+@@ -1979,6 +1988,19 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
+ 	}
+ 
+ 	if (session->state != ISCSI_STATE_LOGGED_IN) {
++		/*
++		 * During shutdown, if session is prematurely disconnected,
++		 * recovery won't happen and there will be hung cmds. Not
++		 * handling cmds would trigger EH, also bad in this case.
++		 * Instead, handle cmd, allow completion to happen and let
++		 * upper layer to deal with the result.
++		 */
++		if (unlikely(system_state != SYSTEM_RUNNING)) {
++			sc->result = DID_NO_CONNECT << 16;
++			ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
++			rc = BLK_EH_HANDLED;
++			goto done;
++		}
+ 		/*
+ 		 * We are probably in the middle of iscsi recovery so let
+ 		 * that complete and handle the error.
+@@ -2083,7 +2105,7 @@ done:
+ 		task->last_timeout = jiffies;
+ 	spin_unlock(&session->frwd_lock);
+ 	ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
+-		     "timer reset" : "nh");
++		     "timer reset" : "shutdown or nh");
+ 	return rc;
+ }
+ 
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index 022bb6e10d98..12886f96b286 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -282,6 +282,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
+ 	phy->phy->minimum_linkrate = dr->pmin_linkrate;
+ 	phy->phy->maximum_linkrate = dr->pmax_linkrate;
+ 	phy->phy->negotiated_linkrate = phy->linkrate;
++	phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
+ 
+  skip:
+ 	if (new_phy)
+@@ -675,7 +676,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
+ 	res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
+ 			            resp, RPEL_RESP_SIZE);
+ 
+-	if (!res)
++	if (res)
+ 		goto out;
+ 
+ 	phy->invalid_dword_count = scsi_to_u32(&resp[12]);
+@@ -684,6 +685,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
+ 	phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);
+ 
+  out:
++	kfree(req);
+ 	kfree(resp);
+ 	return res;
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 625e3ee877ee..570332956ae7 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -13517,6 +13517,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ 	case LPFC_Q_CREATE_VERSION_1:
+ 		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
+ 		       wq->entry_count);
++		bf_set(lpfc_mbox_hdr_version, &shdr->request,
++		       LPFC_Q_CREATE_VERSION_1);
++
+ 		switch (wq->entry_size) {
+ 		default:
+ 		case 64:
+diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
+index 14c0334f41e4..26c67c42985c 100644
+--- a/drivers/scsi/mac_esp.c
++++ b/drivers/scsi/mac_esp.c
+@@ -55,6 +55,7 @@ struct mac_esp_priv {
+ 	int error;
+ };
+ static struct esp *esp_chips[2];
++static DEFINE_SPINLOCK(esp_chips_lock);
+ 
+ #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
+ 			       platform_get_drvdata((struct platform_device *) \
+@@ -562,15 +563,18 @@ static int esp_mac_probe(struct platform_device *dev)
+ 	}
+ 
+ 	host->irq = IRQ_MAC_SCSI;
+-	esp_chips[dev->id] = esp;
+-	mb();
+-	if (esp_chips[!dev->id] == NULL) {
+-		err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
+-		if (err < 0) {
+-			esp_chips[dev->id] = NULL;
+-			goto fail_free_priv;
+-		}
++
++	/* The request_irq() call is intended to succeed for the first device
++	 * and fail for the second device.
++	 */
++	err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
++	spin_lock(&esp_chips_lock);
++	if (err < 0 && esp_chips[!dev->id] == NULL) {
++		spin_unlock(&esp_chips_lock);
++		goto fail_free_priv;
+ 	}
++	esp_chips[dev->id] = esp;
++	spin_unlock(&esp_chips_lock);
+ 
+ 	err = scsi_esp_register(esp, &dev->dev);
+ 	if (err)
+@@ -579,8 +583,13 @@ static int esp_mac_probe(struct platform_device *dev)
+ 	return 0;
+ 
+ fail_free_irq:
+-	if (esp_chips[!dev->id] == NULL)
++	spin_lock(&esp_chips_lock);
++	esp_chips[dev->id] = NULL;
++	if (esp_chips[!dev->id] == NULL) {
++		spin_unlock(&esp_chips_lock);
+ 		free_irq(host->irq, esp);
++	} else
++		spin_unlock(&esp_chips_lock);
+ fail_free_priv:
+ 	kfree(mep);
+ fail_free_command_block:
+@@ -599,9 +608,13 @@ static int esp_mac_remove(struct platform_device *dev)
+ 
+ 	scsi_esp_unregister(esp);
+ 
++	spin_lock(&esp_chips_lock);
+ 	esp_chips[dev->id] = NULL;
+-	if (!(esp_chips[0] || esp_chips[1]))
++	if (esp_chips[!dev->id] == NULL) {
++		spin_unlock(&esp_chips_lock);
+ 		free_irq(irq, NULL);
++	} else
++		spin_unlock(&esp_chips_lock);
+ 
+ 	kfree(mep);
+ 
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 55df57341858..17c12263dbd1 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -180,7 +180,7 @@ static struct {
+ 	{"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ 	{"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+ 	{"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN},	/* HP VA7400 */
+-	{"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */
++	{"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */
+ 	{"HP", "NetRAID-4M", NULL, BLIST_FORCELUN},
+ 	{"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD},
+ 	{"HP", "C1557A", NULL, BLIST_FORCELUN},
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index a678dd10905f..1977738cb0f5 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1811,6 +1811,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
+ 				break;	/* standby */
+ 			if (sshdr.asc == 4 && sshdr.ascq == 0xc)
+ 				break;	/* unavailable */
++			if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
++				break;	/* sanitize in progress */
+ 			/*
+ 			 * Issue command to spin up drive when not ready
+ 			 */
+diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
+index dcb0d76d7312..2ecf9844eb2e 100644
+--- a/drivers/scsi/ses.c
++++ b/drivers/scsi/ses.c
+@@ -528,7 +528,6 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
+ 					ecomp = &edev->component[components++];
+ 
+ 				if (!IS_ERR(ecomp)) {
+-					ses_get_power_status(edev, ecomp);
+ 					if (addl_desc_ptr)
+ 						ses_process_descriptor(
+ 							ecomp,
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 2589a75f0810..b03ca046c79f 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -535,6 +535,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
+ 	} else
+ 		count = (old_hdr->result == 0) ? 0 : -EIO;
+ 	sg_finish_rem_req(srp);
++	sg_remove_request(sfp, srp);
+ 	retval = count;
+ free_old_hdr:
+ 	kfree(old_hdr);
+@@ -575,6 +576,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
+ 	}
+ err_out:
+ 	err2 = sg_finish_rem_req(srp);
++	sg_remove_request(sfp, srp);
+ 	return err ? : err2 ? : count;
+ }
+ 
+@@ -674,18 +676,14 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+ 	 * is a non-zero input_size, so emit a warning.
+ 	 */
+ 	if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
+-		static char cmd[TASK_COMM_LEN];
+-		if (strcmp(current->comm, cmd)) {
+-			printk_ratelimited(KERN_WARNING
+-					   "sg_write: data in/out %d/%d bytes "
+-					   "for SCSI command 0x%x-- guessing "
+-					   "data in;\n   program %s not setting "
+-					   "count and/or reply_len properly\n",
+-					   old_hdr.reply_len - (int)SZ_SG_HEADER,
+-					   input_size, (unsigned int) cmnd[0],
+-					   current->comm);
+-			strcpy(cmd, current->comm);
+-		}
++		printk_ratelimited(KERN_WARNING
++				   "sg_write: data in/out %d/%d bytes "
++				   "for SCSI command 0x%x-- guessing "
++				   "data in;\n   program %s not setting "
++				   "count and/or reply_len properly\n",
++				   old_hdr.reply_len - (int)SZ_SG_HEADER,
++				   input_size, (unsigned int) cmnd[0],
++				   current->comm);
+ 	}
+ 	k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
+ 	return (k < 0) ? k : count;
+@@ -764,6 +762,35 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
+ 	return count;
+ }
+ 
++static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)
++{
++	switch (hp->dxfer_direction) {
++	case SG_DXFER_NONE:
++		if (hp->dxferp || hp->dxfer_len > 0)
++			return false;
++		return true;
++	case SG_DXFER_FROM_DEV:
++		/*
++		 * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp
++		 * can either be NULL or != NULL so there's no point in checking
++		 * it either. So just return true.
++		 */
++		return true;
++	case SG_DXFER_TO_DEV:
++	case SG_DXFER_TO_FROM_DEV:
++		if (!hp->dxferp || hp->dxfer_len == 0)
++			return false;
++		return true;
++	case SG_DXFER_UNKNOWN:
++		if ((!hp->dxferp && hp->dxfer_len) ||
++		    (hp->dxferp && hp->dxfer_len == 0))
++			return false;
++		return true;
++	default:
++		return false;
++	}
++}
++
+ static int
+ sg_common_write(Sg_fd * sfp, Sg_request * srp,
+ 		unsigned char *cmnd, int timeout, int blocking)
+@@ -784,17 +811,22 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
+ 			"sg_common_write:  scsi opcode=0x%02x, cmd_size=%d\n",
+ 			(int) cmnd[0], (int) hp->cmd_len));
+ 
++	if (!sg_is_valid_dxfer(hp))
++		return -EINVAL;
++
+ 	k = sg_start_req(srp, cmnd);
+ 	if (k) {
+ 		SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp,
+ 			"sg_common_write: start_req err=%d\n", k));
+ 		sg_finish_rem_req(srp);
++		sg_remove_request(sfp, srp);
+ 		return k;	/* probably out of space --> ENOMEM */
+ 	}
+ 	if (atomic_read(&sdp->detaching)) {
+ 		if (srp->bio)
+ 			blk_end_request_all(srp->rq, -EIO);
+ 		sg_finish_rem_req(srp);
++		sg_remove_request(sfp, srp);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1284,6 +1316,7 @@ sg_rq_end_io_usercontext(struct work_struct *work)
+ 	struct sg_fd *sfp = srp->parentfp;
+ 
+ 	sg_finish_rem_req(srp);
++	sg_remove_request(sfp, srp);
+ 	kref_put(&sfp->f_ref, sg_remove_sfp);
+ }
+ 
+@@ -1828,8 +1861,6 @@ sg_finish_rem_req(Sg_request *srp)
+ 	else
+ 		sg_remove_scat(sfp, req_schp);
+ 
+-	sg_remove_request(sfp, srp);
+-
+ 	return ret;
+ }
+ 
+@@ -2066,11 +2097,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+ 		if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ 		    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
+ 			resp->done = 2;	/* guard against other readers */
+-			break;
++			write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
++			return resp;
+ 		}
+ 	}
+ 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-	return resp;
++	return NULL;
+ }
+ 
+ /* always adds to end of list */
+@@ -2176,12 +2208,17 @@ sg_remove_sfp_usercontext(struct work_struct *work)
+ 	struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
+ 	struct sg_device *sdp = sfp->parentdp;
+ 	Sg_request *srp;
++	unsigned long iflags;
+ 
+ 	/* Cleanup any responses which were never read(). */
++	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ 	while (!list_empty(&sfp->rq_list)) {
+ 		srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
+ 		sg_finish_rem_req(srp);
++		list_del(&srp->entry);
++		srp->parentfp = NULL;
+ 	}
++	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ 
+ 	if (sfp->reserve.bufflen > 0) {
+ 		SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index d836414c920d..ac2b06a7142e 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -28,6 +28,7 @@
+ #include <scsi/scsi_device.h>
+ #include <scsi/scsi_cmnd.h>
+ #include <scsi/scsi_tcq.h>
++#include <scsi/scsi_devinfo.h>
+ #include <linux/seqlock.h>
+ 
+ #define VIRTIO_SCSI_MEMPOOL_SZ 64
+@@ -699,6 +700,28 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
+ 	return virtscsi_tmf(vscsi, cmd);
+ }
+ 
++static int virtscsi_device_alloc(struct scsi_device *sdevice)
++{
++	/*
++	 * Passed through SCSI targets (e.g. with qemu's 'scsi-block')
++	 * may have transfer limits which come from the host SCSI
++	 * controller or something on the host side other than the
++	 * target itself.
++	 *
++	 * To make this work properly, the hypervisor can adjust the
++	 * target's VPD information to advertise these limits.  But
++	 * for that to work, the guest has to look at the VPD pages,
++	 * which we won't do by default if it is an SPC-2 device, even
++	 * if it does actually support it.
++	 *
++	 * So, set the blist to always try to read the VPD pages.
++	 */
++	sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES;
++
++	return 0;
++}
++
++
+ /**
+  * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth
+  * @sdev:	Virtscsi target whose queue depth to change
+@@ -770,6 +793,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
+ 	.change_queue_depth = virtscsi_change_queue_depth,
+ 	.eh_abort_handler = virtscsi_abort,
+ 	.eh_device_reset_handler = virtscsi_device_reset,
++	.slave_alloc = virtscsi_device_alloc,
+ 
+ 	.can_queue = 1024,
+ 	.dma_boundary = UINT_MAX,
+@@ -790,6 +814,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
+ 	.eh_abort_handler = virtscsi_abort,
+ 	.eh_device_reset_handler = virtscsi_device_reset,
+ 
++	.slave_alloc = virtscsi_device_alloc,
+ 	.can_queue = 1024,
+ 	.dma_boundary = UINT_MAX,
+ 	.use_clustering = ENABLE_CLUSTERING,
+diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
+index 7edede6e024b..d075ee4de8b5 100644
+--- a/drivers/spi/spi-dw-mmio.c
++++ b/drivers/spi/spi-dw-mmio.c
+@@ -121,8 +121,8 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
+ {
+ 	struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
+ 
+-	clk_disable_unprepare(dwsmmio->clk);
+ 	dw_spi_remove_host(&dwsmmio->dws);
++	clk_disable_unprepare(dwsmmio->clk);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
+index d1a5b9fc3eba..f1c1d84f9268 100644
+--- a/drivers/spi/spi-omap2-mcspi.c
++++ b/drivers/spi/spi-omap2-mcspi.c
+@@ -436,6 +436,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ 	int			elements = 0;
+ 	int			word_len, element_count;
+ 	struct omap2_mcspi_cs	*cs = spi->controller_state;
++	void __iomem		*chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
++
+ 	mcspi = spi_master_get_devdata(spi->master);
+ 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+ 	count = xfer->len;
+@@ -496,8 +498,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ 	if (l & OMAP2_MCSPI_CHCONF_TURBO) {
+ 		elements--;
+ 
+-		if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
+-				   & OMAP2_MCSPI_CHSTAT_RXS)) {
++		if (!mcspi_wait_for_reg_bit(chstat_reg,
++					    OMAP2_MCSPI_CHSTAT_RXS)) {
+ 			u32 w;
+ 
+ 			w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
+@@ -515,8 +517,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ 			return count;
+ 		}
+ 	}
+-	if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
+-				& OMAP2_MCSPI_CHSTAT_RXS)) {
++	if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) {
+ 		u32 w;
+ 
+ 		w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
+diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
+index e77add01b0e9..48888ab630c2 100644
+--- a/drivers/spi/spi-sun6i.c
++++ b/drivers/spi/spi-sun6i.c
+@@ -457,7 +457,7 @@ err_free_master:
+ 
+ static int sun6i_spi_remove(struct platform_device *pdev)
+ {
+-	pm_runtime_disable(&pdev->dev);
++	pm_runtime_force_suspend(&pdev->dev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
+index 0211df60004a..4bd93e584075 100644
+--- a/drivers/staging/speakup/kobjects.c
++++ b/drivers/staging/speakup/kobjects.c
+@@ -830,7 +830,9 @@ static ssize_t message_show(struct kobject *kobj,
+ 	struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
+ 	unsigned long flags;
+ 
+-	BUG_ON(!group);
++	if (WARN_ON(!group))
++		return -EINVAL;
++
+ 	spin_lock_irqsave(&speakup_info.spinlock, flags);
+ 	retval = message_show_helper(buf, group->start, group->end);
+ 	spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+@@ -842,7 +844,9 @@ static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr,
+ {
+ 	struct msg_group_t *group = spk_find_msg_group(attr->attr.name);
+ 
+-	BUG_ON(!group);
++	if (WARN_ON(!group))
++		return -EINVAL;
++
+ 	return message_store_helper(buf, count, group);
+ }
+ 
+diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
+index 013a6240f193..c1ad0aea23b9 100644
+--- a/drivers/staging/wlan-ng/prism2mgmt.c
++++ b/drivers/staging/wlan-ng/prism2mgmt.c
+@@ -169,7 +169,7 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
+ 				     hw->ident_sta_fw.variant) >
+ 	    HFA384x_FIRMWARE_VERSION(1, 5, 0)) {
+ 		if (msg->scantype.data != P80211ENUM_scantype_active)
+-			word = cpu_to_le16(msg->maxchanneltime.data);
++			word = msg->maxchanneltime.data;
+ 		else
+ 			word = 0;
+ 
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index c68fe1222c16..5f3c4f45ab65 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -627,6 +627,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
+ 					    * we just disable hotplug, the
+ 					    * pci-tunnels stay alive.
+ 					    */
++	.thaw_noirq = nhi_resume_noirq,
+ 	.restore_noirq = nhi_resume_noirq,
+ };
+ 
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 2ec337612a79..c41dfe40fd23 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -137,6 +137,9 @@ struct gsm_dlci {
+ 	struct mutex mutex;
+ 
+ 	/* Link layer */
++	int mode;
++#define DLCI_MODE_ABM		0	/* Normal Asynchronous Balanced Mode */
++#define DLCI_MODE_ADM		1	/* Asynchronous Disconnected Mode */
+ 	spinlock_t lock;	/* Protects the internal state */
+ 	struct timer_list t1;	/* Retransmit timer for SABM and UA */
+ 	int retries;
+@@ -1380,7 +1383,13 @@ retry:
+ 	ctrl->data = data;
+ 	ctrl->len = clen;
+ 	gsm->pending_cmd = ctrl;
+-	gsm->cretries = gsm->n2;
++
++	/* If DLCI0 is in ADM mode skip retries, it won't respond */
++	if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
++		gsm->cretries = 1;
++	else
++		gsm->cretries = gsm->n2;
++
+ 	mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
+ 	gsm_control_transmit(gsm, ctrl);
+ 	spin_unlock_irqrestore(&gsm->control_lock, flags);
+@@ -1467,6 +1476,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
+  *	in which case an opening port goes back to closed and a closing port
+  *	is simply put into closed state (any further frames from the other
+  *	end will get a DM response)
++ *
++ *	Some control dlci can stay in ADM mode with other dlci working just
++ *	fine. In that case we can just keep the control dlci open after the
++ *	DLCI_OPENING retries time out.
+  */
+ 
+ static void gsm_dlci_t1(unsigned long data)
+@@ -1480,8 +1493,16 @@ static void gsm_dlci_t1(unsigned long data)
+ 		if (dlci->retries) {
+ 			gsm_command(dlci->gsm, dlci->addr, SABM|PF);
+ 			mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+-		} else
++		} else if (!dlci->addr && gsm->control == (DM | PF)) {
++			if (debug & 8)
++				pr_info("DLCI %d opening in ADM mode.\n",
++					dlci->addr);
++			dlci->mode = DLCI_MODE_ADM;
++			gsm_dlci_open(dlci);
++		} else {
+ 			gsm_dlci_close(dlci);
++		}
++
+ 		break;
+ 	case DLCI_CLOSING:
+ 		dlci->retries--;
+@@ -1499,8 +1520,8 @@ static void gsm_dlci_t1(unsigned long data)
+  *	@dlci: DLCI to open
+  *
+  *	Commence opening a DLCI from the Linux side. We issue SABM messages
+- *	to the modem which should then reply with a UA, at which point we
+- *	will move into open state. Opening is done asynchronously with retry
++ *	to the modem which should then reply with a UA or ADM, at which point
++ *	we will move into open state. Opening is done asynchronously with retry
+  *	running off timers and the responses.
+  */
+ 
+@@ -2871,11 +2892,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
+ static int gsm_carrier_raised(struct tty_port *port)
+ {
+ 	struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
++	struct gsm_mux *gsm = dlci->gsm;
++
+ 	/* Not yet open so no carrier info */
+ 	if (dlci->state != DLCI_OPEN)
+ 		return 0;
+ 	if (debug & 2)
+ 		return 1;
++
++	/*
++	 * Basic mode with control channel in ADM mode may not respond
++	 * to CMD_MSC at all and modem_rx is empty.
++	 */
++	if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
++	    !dlci->modem_rx)
++		return 1;
++
+ 	return dlci->modem_rx & TIOCM_CD;
+ }
+ 
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 66e257b5a5b7..4693a1d0151f 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -2259,6 +2259,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 				}
+ 				if (tty_hung_up_p(file))
+ 					break;
++				/*
++				 * Abort readers for ttys which never actually
++				 * get hung up.  See __tty_hangup().
++				 */
++				if (test_bit(TTY_HUPPING, &tty->flags))
++					break;
+ 				if (!timeout)
+ 					break;
+ 				if (file->f_flags & O_NONBLOCK) {
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index 531d76a276e4..89974a112cab 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -612,6 +612,10 @@ static int omap_8250_startup(struct uart_port *port)
+ 	up->lsr_saved_flags = 0;
+ 	up->msr_saved_flags = 0;
+ 
++	/* Disable DMA for console UART */
++	if (uart_console(port))
++		up->dma = NULL;
++
+ 	if (up->dma) {
+ 		ret = serial8250_request_dma(up);
+ 		if (ret) {
+diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
+index fcf803ffad19..cdd2f942317c 100644
+--- a/drivers/tty/serial/sccnxp.c
++++ b/drivers/tty/serial/sccnxp.c
+@@ -884,14 +884,19 @@ static int sccnxp_probe(struct platform_device *pdev)
+ 
+ 	clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(clk)) {
+-		if (PTR_ERR(clk) == -EPROBE_DEFER) {
+-			ret = -EPROBE_DEFER;
++		ret = PTR_ERR(clk);
++		if (ret == -EPROBE_DEFER)
+ 			goto err_out;
+-		}
++		uartclk = 0;
++	} else {
++		clk_prepare_enable(clk);
++		uartclk = clk_get_rate(clk);
++	}
++
++	if (!uartclk) {
+ 		dev_notice(&pdev->dev, "Using default clock frequency\n");
+ 		uartclk = s->chip->freq_std;
+-	} else
+-		uartclk = clk_get_rate(clk);
++	}
+ 
+ 	/* Check input frequency */
+ 	if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) {
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index be96970646a9..152cd369ce84 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -690,6 +690,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
+ 		return;
+ 	}
+ 
++	/*
++	 * Some console devices aren't actually hung up for technical and
++	 * historical reasons, which can lead to indefinite interruptible
++	 * sleep in n_tty_read().  The following explicitly tells
++	 * n_tty_read() to abort readers.
++	 */
++	set_bit(TTY_HUPPING, &tty->flags);
++
+ 	/* inuse_filps is protected by the single tty lock,
+ 	   this really needs to change if we want to flush the
+ 	   workqueue with the lock held */
+@@ -745,6 +753,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
+ 	 * can't yet guarantee all that.
+ 	 */
+ 	set_bit(TTY_HUPPED, &tty->flags);
++	clear_bit(TTY_HUPPING, &tty->flags);
+ 	tty_unlock(tty);
+ 
+ 	if (f)
+@@ -3151,7 +3160,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
+ 
+ 	kref_init(&tty->kref);
+ 	tty->magic = TTY_MAGIC;
+-	tty_ldisc_init(tty);
++	if (tty_ldisc_init(tty)) {
++		kfree(tty);
++		return NULL;
++	}
+ 	tty->session = NULL;
+ 	tty->pgrp = NULL;
+ 	mutex_init(&tty->legacy_mutex);
+diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
+index 3737f55272d2..f4cfe7ca7d6d 100644
+--- a/drivers/tty/tty_ldisc.c
++++ b/drivers/tty/tty_ldisc.c
+@@ -171,12 +171,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
+ 			return ERR_CAST(ldops);
+ 	}
+ 
+-	ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
+-	if (ld == NULL) {
+-		put_ldops(ldops);
+-		return ERR_PTR(-ENOMEM);
+-	}
+-
++	/*
++	 * There is no way to handle allocation failure of only 16 bytes.
++	 * Let's simplify error handling and save more memory.
++	 */
++	ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
+ 	ld->ops = ldops;
+ 	ld->tty = tty;
+ 
+@@ -800,12 +799,13 @@ void tty_ldisc_release(struct tty_struct *tty)
+  *	the tty structure is not completely set up when this call is made.
+  */
+ 
+-void tty_ldisc_init(struct tty_struct *tty)
++int tty_ldisc_init(struct tty_struct *tty)
+ {
+ 	struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
+ 	if (IS_ERR(ld))
+-		panic("n_tty: init_tty");
++		return PTR_ERR(ld);
+ 	tty->ldisc = ld;
++	return 0;
+ }
+ 
+ /**
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 50b67ff2b6ea..c989a6aa2561 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -1305,6 +1305,11 @@ static void csi_m(struct vc_data *vc)
+ 			case 3:
+ 				vc->vc_italic = 1;
+ 				break;
++			case 21:
++				/*
++				 * No console drivers support double underline, so
++				 * convert it to a single underline.
++				 */
+ 			case 4:
+ 				vc->vc_underline = 1;
+ 				break;
+@@ -1341,7 +1346,6 @@ static void csi_m(struct vc_data *vc)
+ 				vc->vc_disp_ctrl = 1;
+ 				vc->vc_toggle_meta = 1;
+ 				break;
+-			case 21:
+ 			case 22:
+ 				vc->vc_intensity = 1;
+ 				break;
+@@ -1711,7 +1715,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
+ 	default_attr(vc);
+ 	update_attr(vc);
+ 
+-	vc->vc_tab_stop[0]	= 0x01010100;
++	vc->vc_tab_stop[0]	=
+ 	vc->vc_tab_stop[1]	=
+ 	vc->vc_tab_stop[2]	=
+ 	vc->vc_tab_stop[3]	=
+@@ -1754,7 +1758,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
+ 		vc->vc_pos -= (vc->vc_x << 1);
+ 		while (vc->vc_x < vc->vc_cols - 1) {
+ 			vc->vc_x++;
+-			if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31)))
++			if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31)))
+ 				break;
+ 		}
+ 		vc->vc_pos += (vc->vc_x << 1);
+@@ -1814,7 +1818,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
+ 			lf(vc);
+ 			return;
+ 		case 'H':
+-			vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31));
++			vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31));
+ 			return;
+ 		case 'Z':
+ 			respond_ID(tty);
+@@ -2007,7 +2011,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
+ 			return;
+ 		case 'g':
+ 			if (!vc->vc_par[0])
+-				vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31));
++				vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31));
+ 			else if (vc->vc_par[0] == 3) {
+ 				vc->vc_tab_stop[0] =
+ 					vc->vc_tab_stop[1] =
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 3ad48e1c0c57..f37a908b2884 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -656,7 +656,7 @@ static inline void ci_role_destroy(struct ci_hdrc *ci)
+ {
+ 	ci_hdrc_gadget_destroy(ci);
+ 	ci_hdrc_host_destroy(ci);
+-	if (ci->is_otg)
++	if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
+ 		ci_hdrc_otg_destroy(ci);
+ }
+ 
+@@ -755,27 +755,35 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ 	/* initialize role(s) before the interrupt is requested */
+ 	if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
+ 		ret = ci_hdrc_host_init(ci);
+-		if (ret)
+-			dev_info(dev, "doesn't support host\n");
++		if (ret) {
++			if (ret == -ENXIO)
++				dev_info(dev, "doesn't support host\n");
++			else
++				goto deinit_phy;
++		}
+ 	}
+ 
+ 	if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
+ 		ret = ci_hdrc_gadget_init(ci);
+-		if (ret)
+-			dev_info(dev, "doesn't support gadget\n");
++		if (ret) {
++			if (ret == -ENXIO)
++				dev_info(dev, "doesn't support gadget\n");
++			else
++				goto deinit_host;
++		}
+ 	}
+ 
+ 	if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
+ 		dev_err(dev, "no supported roles\n");
+ 		ret = -ENODEV;
+-		goto deinit_phy;
++		goto deinit_gadget;
+ 	}
+ 
+ 	if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) {
+ 		ret = ci_hdrc_otg_init(ci);
+ 		if (ret) {
+ 			dev_err(dev, "init otg fails, ret = %d\n", ret);
+-			goto stop;
++			goto deinit_gadget;
+ 		}
+ 	}
+ 
+@@ -835,7 +843,12 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ 		return 0;
+ 
+ stop:
+-	ci_role_destroy(ci);
++	if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
++		ci_hdrc_otg_destroy(ci);
++deinit_gadget:
++	ci_hdrc_gadget_destroy(ci);
++deinit_host:
++	ci_hdrc_host_destroy(ci);
+ deinit_phy:
+ 	ci_usb_phy_exit(ci);
+ 
+diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
+index 358ca8dd784f..a5240b4d7ab9 100644
+--- a/drivers/usb/core/generic.c
++++ b/drivers/usb/core/generic.c
+@@ -208,8 +208,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
+ 	if (!udev->parent)
+ 		rc = hcd_bus_suspend(udev, msg);
+ 
+-	/* Non-root devices don't need to do anything for FREEZE or PRETHAW */
+-	else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
++	/*
++	 * Non-root USB2 devices don't need to do anything for FREEZE
++	 * or PRETHAW. USB3 devices don't support global suspend and
++	 * needs to be selectively suspended.
++	 */
++	else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
++		 && (udev->speed < USB_SPEED_SUPER))
+ 		rc = 0;
+ 	else
+ 		rc = usb_port_suspend(udev, msg);
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index de0843cdeb9f..2a06bd656963 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2288,6 +2288,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
+ 
+ 	spin_lock_irqsave (&hcd_root_hub_lock, flags);
+ 	if (hcd->rh_registered) {
++		pm_wakeup_event(&hcd->self.root_hub->dev, 0);
+ 		set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
+ 		queue_work(pm_wq, &hcd->wakeup_work);
+ 	}
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 1ba74441d7bf..a2686b95c3dd 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -633,12 +633,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
+ 		unsigned int portnum)
+ {
+ 	struct usb_hub *hub;
++	struct usb_port *port_dev;
+ 
+ 	if (!hdev)
+ 		return;
+ 
+ 	hub = usb_hub_to_struct_hub(hdev);
+ 	if (hub) {
++		port_dev = hub->ports[portnum - 1];
++		if (port_dev && port_dev->child)
++			pm_wakeup_event(&port_dev->child->dev, 0);
++
+ 		set_bit(portnum, hub->wakeup_bits);
+ 		kick_hub_wq(hub);
+ 	}
+@@ -3375,8 +3380,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
+ 
+ 	/* Skip the initial Clear-Suspend step for a remote wakeup */
+ 	status = hub_port_status(hub, port1, &portstatus, &portchange);
+-	if (status == 0 && !port_is_suspended(hub, portstatus))
++	if (status == 0 && !port_is_suspended(hub, portstatus)) {
++		if (portchange & USB_PORT_STAT_C_SUSPEND)
++			pm_wakeup_event(&udev->dev, 0);
+ 		goto SuspendCleared;
++	}
+ 
+ 	/* see 7.1.7.7; affects power usage, but not budgeting */
+ 	if (hub_is_superspeed(hub->hdev))
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 4f1c6f8d4352..40ce175655e6 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -45,6 +45,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
+ 			USB_QUIRK_STRING_FETCH_255 },
+ 
++	/* HP v222w 16GB Mini USB Drive */
++	{ USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Creative SB Audigy 2 NX */
+ 	{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
+index fe3b9335a74e..88a5b798b1ea 100644
+--- a/drivers/usb/dwc3/dwc3-keystone.c
++++ b/drivers/usb/dwc3/dwc3-keystone.c
+@@ -112,6 +112,10 @@ static int kdwc3_probe(struct platform_device *pdev)
+ 	dev->dma_mask = &kdwc3_dma_mask;
+ 
+ 	kdwc->clk = devm_clk_get(kdwc->dev, "usb");
++	if (IS_ERR(kdwc->clk)) {
++		dev_err(kdwc->dev, "unable to get usb clock\n");
++		return PTR_ERR(kdwc->clk);
++	}
+ 
+ 	error = clk_prepare_enable(kdwc->clk);
+ 	if (error < 0) {
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index c702f5d941d9..01816e8411fc 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -124,7 +124,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
+ 	ret = platform_device_add_resources(dwc3, res, ARRAY_SIZE(res));
+ 	if (ret) {
+ 		dev_err(dev, "couldn't add resources to dwc3 device\n");
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	pci_set_drvdata(pci, dwc3);
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index 466640afa7be..4a7861022d20 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -223,6 +223,13 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
+ 	/* pick the first one */
+ 	list = list_first_entry(&hidg->completed_out_req,
+ 				struct f_hidg_req_list, list);
++
++	/*
++	 * Remove this from list to protect it from beign free()
++	 * while host disables our function
++	 */
++	list_del(&list->list);
++
+ 	req = list->req;
+ 	count = min_t(unsigned int, count, req->actual - list->pos);
+ 	spin_unlock_irqrestore(&hidg->spinlock, flags);
+@@ -238,15 +245,20 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer,
+ 	 * call, taking into account its current read position.
+ 	 */
+ 	if (list->pos == req->actual) {
+-		spin_lock_irqsave(&hidg->spinlock, flags);
+-		list_del(&list->list);
+ 		kfree(list);
+-		spin_unlock_irqrestore(&hidg->spinlock, flags);
+ 
+ 		req->length = hidg->report_length;
+ 		ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL);
+-		if (ret < 0)
++		if (ret < 0) {
++			free_ep_req(hidg->out_ep, req);
+ 			return ret;
++		}
++	} else {
++		spin_lock_irqsave(&hidg->spinlock, flags);
++		list_add(&list->list, &hidg->completed_out_req);
++		spin_unlock_irqrestore(&hidg->spinlock, flags);
++
++		wake_up(&hidg->read_queue);
+ 	}
+ 
+ 	return count;
+@@ -490,6 +502,7 @@ static void hidg_disable(struct usb_function *f)
+ {
+ 	struct f_hidg *hidg = func_to_hidg(f);
+ 	struct f_hidg_req_list *list, *next;
++	unsigned long flags;
+ 
+ 	usb_ep_disable(hidg->in_ep);
+ 	hidg->in_ep->driver_data = NULL;
+@@ -497,10 +510,13 @@ static void hidg_disable(struct usb_function *f)
+ 	usb_ep_disable(hidg->out_ep);
+ 	hidg->out_ep->driver_data = NULL;
+ 
++	spin_lock_irqsave(&hidg->spinlock, flags);
+ 	list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) {
++		free_ep_req(hidg->out_ep, list->req);
+ 		list_del(&list->list);
+ 		kfree(list);
+ 	}
++	spin_unlock_irqrestore(&hidg->spinlock, flags);
+ }
+ 
+ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
+index d97f362b3604..a32ed6359b03 100644
+--- a/drivers/usb/gadget/function/f_midi.c
++++ b/drivers/usb/gadget/function/f_midi.c
+@@ -201,12 +201,6 @@ static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep,
+ 	return alloc_ep_req(ep, length, length);
+ }
+ 
+-static void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+-{
+-	kfree(req->buf);
+-	usb_ep_free_request(ep, req);
+-}
+-
+ static const uint8_t f_midi_cin_length[] = {
+ 	0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1
+ };
+diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
+index 3a5ae9900b1e..eedea7f093d1 100644
+--- a/drivers/usb/gadget/function/f_sourcesink.c
++++ b/drivers/usb/gadget/function/f_sourcesink.c
+@@ -307,12 +307,6 @@ static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len)
+ 	return alloc_ep_req(ep, len, buflen);
+ }
+ 
+-void free_ep_req(struct usb_ep *ep, struct usb_request *req)
+-{
+-	kfree(req->buf);
+-	usb_ep_free_request(ep, req);
+-}
+-
+ static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep)
+ {
+ 	int			value;
+diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h
+index 15f180904f8a..5ed90b437f18 100644
+--- a/drivers/usb/gadget/function/g_zero.h
++++ b/drivers/usb/gadget/function/g_zero.h
+@@ -59,7 +59,6 @@ void lb_modexit(void);
+ int lb_modinit(void);
+ 
+ /* common utilities */
+-void free_ep_req(struct usb_ep *ep, struct usb_request *req);
+ void disable_endpoints(struct usb_composite_dev *cdev,
+ 		struct usb_ep *in, struct usb_ep *out,
+ 		struct usb_ep *iso_in, struct usb_ep *iso_out);
+diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c
+index c6276f0268ae..907f8144813c 100644
+--- a/drivers/usb/gadget/u_f.c
++++ b/drivers/usb/gadget/u_f.c
+@@ -11,16 +11,18 @@
+  * published by the Free Software Foundation.
+  */
+ 
+-#include <linux/usb/gadget.h>
+ #include "u_f.h"
++#include <linux/usb/ch9.h>
+ 
+-struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len)
++struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len)
+ {
+ 	struct usb_request      *req;
+ 
+ 	req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+ 	if (req) {
+ 		req->length = len ?: default_len;
++		if (usb_endpoint_dir_out(ep->desc))
++			req->length = usb_ep_align(ep, req->length);
+ 		req->buf = kmalloc(req->length, GFP_ATOMIC);
+ 		if (!req->buf) {
+ 			usb_ep_free_request(ep, req);
+diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
+index 1d5f0eb68552..69a1d10df04f 100644
+--- a/drivers/usb/gadget/u_f.h
++++ b/drivers/usb/gadget/u_f.h
+@@ -16,6 +16,8 @@
+ #ifndef __U_F_H__
+ #define __U_F_H__
+ 
++#include <linux/usb/gadget.h>
++
+ /* Variable Length Array Macros **********************************************/
+ #define vla_group(groupname) size_t groupname##__next = 0
+ #define vla_group_size(groupname) groupname##__next
+@@ -45,8 +47,26 @@
+ struct usb_ep;
+ struct usb_request;
+ 
+-struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len);
+-
+-#endif /* __U_F_H__ */
++/**
++ * alloc_ep_req - returns a usb_request allocated by the gadget driver and
++ * allocates the request's buffer.
++ *
++ * @ep: the endpoint to allocate a usb_request
++ * @len: usb_requests's buffer suggested size
++ * @default_len: used if @len is not provided, ie, is 0
++ *
++ * In case @ep direction is OUT, the @len will be aligned to ep's
++ * wMaxPacketSize. In order to avoid memory leaks or drops, *always* use
++ * usb_requests's length (req->length) to refer to the allocated buffer size.
++ * Requests allocated via alloc_ep_req() *must* be freed by free_ep_req().
++ */
++struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len);
+ 
++/* Frees a usb_request previously allocated by alloc_ep_req() */
++static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
++{
++	kfree(req->buf);
++	usb_ep_free_request(ep, req);
++}
+ 
++#endif /* __U_F_H__ */
+diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
+index 5c8f4effb62a..caec234822c6 100644
+--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
++++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
+@@ -476,7 +476,7 @@ static int bdc_probe(struct platform_device *pdev)
+ 	bdc->dev = dev;
+ 	dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq);
+ 
+-	temp = bdc_readl(bdc->regs, BDC_BDCSC);
++	temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
+ 	if ((temp & BDC_P64) &&
+ 			!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+ 		dev_dbg(bdc->dev, "Using 64-bit address\n");
+diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
+index 02968842b359..708e36f530d8 100644
+--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
++++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
+@@ -82,6 +82,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
+ 	if (ret) {
+ 		dev_err(&pci->dev,
+ 			"couldn't add resources to bdc device\n");
++		platform_device_put(bdc);
+ 		return ret;
+ 	}
+ 
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 35f730324b63..a5a260b7ff25 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -2026,16 +2026,13 @@ static int dummy_hub_control(
+ 			}
+ 			break;
+ 		case USB_PORT_FEAT_POWER:
+-			if (hcd->speed == HCD_USB3) {
+-				if (dum_hcd->port_status & USB_PORT_STAT_POWER)
+-					dev_dbg(dummy_dev(dum_hcd),
+-						"power-off\n");
+-			} else
+-				if (dum_hcd->port_status &
+-							USB_SS_PORT_STAT_POWER)
+-					dev_dbg(dummy_dev(dum_hcd),
+-						"power-off\n");
+-			/* FALLS THROUGH */
++			dev_dbg(dummy_dev(dum_hcd), "power-off\n");
++			if (hcd->speed == HCD_USB3)
++				dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER;
++			else
++				dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
++			set_link_state(dum_hcd);
++			break;
+ 		default:
+ 			dum_hcd->port_status &= ~(1 << wValue);
+ 			set_link_state(dum_hcd);
+@@ -2206,14 +2203,13 @@ static int dummy_hub_control(
+ 				if ((dum_hcd->port_status &
+ 				     USB_SS_PORT_STAT_POWER) != 0) {
+ 					dum_hcd->port_status |= (1 << wValue);
+-					set_link_state(dum_hcd);
+ 				}
+ 			} else
+ 				if ((dum_hcd->port_status &
+ 				     USB_PORT_STAT_POWER) != 0) {
+ 					dum_hcd->port_status |= (1 << wValue);
+-					set_link_state(dum_hcd);
+ 				}
++			set_link_state(dum_hcd);
+ 		}
+ 		break;
+ 	case GetPortErrorCount:
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index e92b9903faa4..23c5bdab988d 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -266,7 +266,6 @@ MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
+ static struct platform_driver usb_xhci_driver = {
+ 	.probe	= xhci_plat_probe,
+ 	.remove	= xhci_plat_remove,
+-	.shutdown	= usb_hcd_platform_shutdown,
+ 	.driver	= {
+ 		.name = "xhci-hcd",
+ 		.pm = DEV_PM_OPS,
+diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
+index 10d30afe4a3c..a0d1417362cd 100644
+--- a/drivers/usb/musb/musb_gadget_ep0.c
++++ b/drivers/usb/musb/musb_gadget_ep0.c
+@@ -114,15 +114,19 @@ static int service_tx_status_request(
+ 		}
+ 
+ 		is_in = epnum & USB_DIR_IN;
+-		if (is_in) {
+-			epnum &= 0x0f;
++		epnum &= 0x0f;
++		if (epnum >= MUSB_C_NUM_EPS) {
++			handled = -EINVAL;
++			break;
++		}
++
++		if (is_in)
+ 			ep = &musb->endpoints[epnum].ep_in;
+-		} else {
++		else
+ 			ep = &musb->endpoints[epnum].ep_out;
+-		}
+ 		regs = musb->endpoints[epnum].regs;
+ 
+-		if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
++		if (!ep->desc) {
+ 			handled = -EINVAL;
+ 			break;
+ 		}
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 1d0c096c1b84..4b707d527855 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -1002,7 +1002,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
+ 			/* set tx_reinit and schedule the next qh */
+ 			ep->tx_reinit = 1;
+ 		}
+-		musb_start_urb(musb, is_in, next_qh);
++
++		if (next_qh)
++			musb_start_urb(musb, is_in, next_qh);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
+index 941716c1177e..3d09c1037e36 100644
+--- a/drivers/usb/serial/Kconfig
++++ b/drivers/usb/serial/Kconfig
+@@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
+ 		- Fundamental Software dongle.
+ 		- Google USB serial devices
+ 		- HP4x calculators
++		- Libtransistor USB console
+ 		- a number of Motorola phones
+ 		- Motorola Tetra devices
+ 		- Novatel Wireless GPS receivers
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 142c876e7b19..1011fc41deb7 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -149,6 +149,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
++	{ USB_DEVICE(0x155A, 0x1006) },	/* ELDAT Easywave RX09 */
+ 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+ 	{ USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
+ 	{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
+@@ -207,6 +208,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ 	{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+ 	{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
++	{ USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
+ 	{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
+ 	{ } /* Terminating Entry */
+ };
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 252f580cf3e7..00b5cc4c9f38 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -773,6 +773,7 @@ static const struct usb_device_id id_table_combined[] = {
+ 		.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
+ 	{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
+ 	{ USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) },
++	{ USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) },
+ 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
+ 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
+ 	{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
+@@ -935,6 +936,7 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_FHE_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
+ 	{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+@@ -1904,7 +1906,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
+ 		return ftdi_jtag_probe(serial);
+ 
+ 	if (udev->product &&
+-		(!strcmp(udev->product, "BeagleBone/XDS100V2") ||
++		(!strcmp(udev->product, "Arrow USB Blaster") ||
++		 !strcmp(udev->product, "BeagleBone/XDS100V2") ||
+ 		 !strcmp(udev->product, "SNAP Connect E10")))
+ 		return ftdi_jtag_probe(serial);
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 6d847ecb423f..eaaada41359e 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -903,6 +903,9 @@
+ /*
+  * RT Systems programming cables for various ham radios
+  */
++/* This device uses the VID of FTDI */
++#define RTSYSTEMS_USB_VX8_PID   0x9e50  /* USB-VX8 USB to 7 pin modular plug for Yaesu VX-8 radio */
++
+ #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
+ #define RTSYSTEMS_USB_S03_PID	0x9001	/* RTS-03 USB to Serial Adapter */
+ #define RTSYSTEMS_USB_59_PID	0x9e50	/* USB-59 USB to 8 pin plug */
+@@ -1421,6 +1424,12 @@
+  */
+ #define FTDI_CINTERION_MC55I_PID	0xA951
+ 
++/*
++ * Product: FirmwareHubEmulator
++ * Manufacturer: Harman Becker Automotive Systems
++ */
++#define FTDI_FHE_PID		0xA9A0
++
+ /*
+  * Product: Comet Caller ID decoder
+  * Manufacturer: Crucible Technologies
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index 6aa7ff2c1cf7..2674da40d9cd 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -66,6 +66,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
+ 					0x01) }
+ DEVICE(google, GOOGLE_IDS);
+ 
++/* Libtransistor USB console */
++#define LIBTRANSISTOR_IDS()			\
++	{ USB_DEVICE(0x1209, 0x8b00) }
++DEVICE(libtransistor, LIBTRANSISTOR_IDS);
++
+ /* ViVOpay USB Serial Driver */
+ #define VIVOPAY_IDS()			\
+ 	{ USB_DEVICE(0x1d5f, 0x1004) }	/* ViVOpay 8800 */
+@@ -113,6 +118,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
+ 	&funsoft_device,
+ 	&flashloader_device,
+ 	&google_device,
++	&libtransistor_device,
+ 	&vivopay_device,
+ 	&moto_modem_device,
+ 	&motorola_tetra_device,
+@@ -129,6 +135,7 @@ static const struct usb_device_id id_table[] = {
+ 	FUNSOFT_IDS(),
+ 	FLASHLOADER_IDS(),
+ 	GOOGLE_IDS(),
++	LIBTRANSISTOR_IDS(),
+ 	VIVOPAY_IDS(),
+ 	MOTO_IDS(),
+ 	MOTOROLA_TETRA_IDS(),
+diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
+index 337a0be89fcf..dbc3801b43eb 100644
+--- a/drivers/usb/serial/visor.c
++++ b/drivers/usb/serial/visor.c
+@@ -338,47 +338,48 @@ static int palm_os_3_probe(struct usb_serial *serial,
+ 		goto exit;
+ 	}
+ 
+-	if (retval == sizeof(*connection_info)) {
+-			connection_info = (struct visor_connection_info *)
+-							transfer_buffer;
+-
+-		num_ports = le16_to_cpu(connection_info->num_ports);
+-		for (i = 0; i < num_ports; ++i) {
+-			switch (
+-			   connection_info->connections[i].port_function_id) {
+-			case VISOR_FUNCTION_GENERIC:
+-				string = "Generic";
+-				break;
+-			case VISOR_FUNCTION_DEBUGGER:
+-				string = "Debugger";
+-				break;
+-			case VISOR_FUNCTION_HOTSYNC:
+-				string = "HotSync";
+-				break;
+-			case VISOR_FUNCTION_CONSOLE:
+-				string = "Console";
+-				break;
+-			case VISOR_FUNCTION_REMOTE_FILE_SYS:
+-				string = "Remote File System";
+-				break;
+-			default:
+-				string = "unknown";
+-				break;
+-			}
+-			dev_info(dev, "%s: port %d, is for %s use\n",
+-				serial->type->description,
+-				connection_info->connections[i].port, string);
+-		}
++	if (retval != sizeof(*connection_info)) {
++		dev_err(dev, "Invalid connection information received from device\n");
++		retval = -ENODEV;
++		goto exit;
+ 	}
+-	/*
+-	* Handle devices that report invalid stuff here.
+-	*/
++
++	connection_info = (struct visor_connection_info *)transfer_buffer;
++
++	num_ports = le16_to_cpu(connection_info->num_ports);
++
++	/* Handle devices that report invalid stuff here. */
+ 	if (num_ports == 0 || num_ports > 2) {
+ 		dev_warn(dev, "%s: No valid connect info available\n",
+ 			serial->type->description);
+ 		num_ports = 2;
+ 	}
+ 
++	for (i = 0; i < num_ports; ++i) {
++		switch (connection_info->connections[i].port_function_id) {
++		case VISOR_FUNCTION_GENERIC:
++			string = "Generic";
++			break;
++		case VISOR_FUNCTION_DEBUGGER:
++			string = "Debugger";
++			break;
++		case VISOR_FUNCTION_HOTSYNC:
++			string = "HotSync";
++			break;
++		case VISOR_FUNCTION_CONSOLE:
++			string = "Console";
++			break;
++		case VISOR_FUNCTION_REMOTE_FILE_SYS:
++			string = "Remote File System";
++			break;
++		default:
++			string = "unknown";
++			break;
++		}
++		dev_info(dev, "%s: port %d, is for %s use\n",
++			serial->type->description,
++			connection_info->connections[i].port, string);
++	}
+ 	dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
+ 		num_ports);
+ 
+diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
+index 26c26e3e21d3..e82e179f3558 100644
+--- a/drivers/usb/storage/ene_ub6250.c
++++ b/drivers/usb/storage/ene_ub6250.c
+@@ -1950,6 +1950,8 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag)
+ 	bcb->CDB[0] = 0xEF;
+ 
+ 	result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
++	if (us->srb != NULL)
++		scsi_set_resid(us->srb, 0);
+ 	info->BIN_FLAG = flag;
+ 	kfree(buf);
+ 
+@@ -2303,21 +2305,22 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
+ 
+ static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
+ {
+-	int result = 0;
++	int result = USB_STOR_XFER_GOOD;
+ 	struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
+ 
+ 	/*US_DEBUG(usb_stor_show_command(us, srb)); */
+ 	scsi_set_resid(srb, 0);
+-	if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) {
++	if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready)))
+ 		result = ene_init(us);
+-	} else {
++	if (result == USB_STOR_XFER_GOOD) {
++		result = USB_STOR_TRANSPORT_ERROR;
+ 		if (info->SD_Status.Ready)
+ 			result = sd_scsi_irp(us, srb);
+ 
+ 		if (info->MS_Status.Ready)
+ 			result = ms_scsi_irp(us, srb);
+ 	}
+-	return 0;
++	return result;
+ }
+ 
+ 
+diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
+index 325b4c05acdd..f761e02e75c9 100644
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -201,7 +201,12 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
+ 	if (!bid)
+ 		return -ENODEV;
+ 
++	/* device_attach() callers should hold parent lock for USB */
++	if (bid->udev->dev.parent)
++		device_lock(bid->udev->dev.parent);
+ 	ret = device_attach(&bid->udev->dev);
++	if (bid->udev->dev.parent)
++		device_unlock(bid->udev->dev.parent);
+ 	if (ret < 0) {
+ 		dev_err(&bid->udev->dev, "rebind failed\n");
+ 		return ret;
+diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
+index f875ccaa55f9..0fc5ace57c0e 100644
+--- a/drivers/usb/usbip/usbip_common.h
++++ b/drivers/usb/usbip/usbip_common.h
+@@ -248,7 +248,7 @@ enum usbip_side {
+ #define	SDEV_EVENT_ERROR_SUBMIT	(USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
+ #define	SDEV_EVENT_ERROR_MALLOC	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
+ 
+-#define	VDEV_EVENT_REMOVED	(USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
++#define	VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
+ #define	VDEV_EVENT_DOWN		(USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
+ #define	VDEV_EVENT_ERROR_TCP	(USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
+ #define	VDEV_EVENT_ERROR_MALLOC	(USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
+diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
+index ff75ca31a199..a9fc4a6d010a 100644
+--- a/drivers/vfio/pci/vfio_pci_config.c
++++ b/drivers/vfio/pci/vfio_pci_config.c
+@@ -685,6 +685,62 @@ static int __init init_pci_cap_pcix_perm(struct perm_bits *perm)
+ 	return 0;
+ }
+ 
++static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
++				 int count, struct perm_bits *perm,
++				 int offset, __le32 val)
++{
++	__le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
++				  offset + PCI_EXP_DEVCTL);
++	int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
++
++	count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
++	if (count < 0)
++		return count;
++
++	/*
++	 * The FLR bit is virtualized, if set and the device supports PCIe
++	 * FLR, issue a reset_function.  Regardless, clear the bit, the spec
++	 * requires it to be always read as zero.  NB, reset_function might
++	 * not use a PCIe FLR, we don't have that level of granularity.
++	 */
++	if (*ctrl & cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR)) {
++		u32 cap;
++		int ret;
++
++		*ctrl &= ~cpu_to_le16(PCI_EXP_DEVCTL_BCR_FLR);
++
++		ret = pci_user_read_config_dword(vdev->pdev,
++						 pos - offset + PCI_EXP_DEVCAP,
++						 &cap);
++
++		if (!ret && (cap & PCI_EXP_DEVCAP_FLR))
++			pci_try_reset_function(vdev->pdev);
++	}
++
++	/*
++	 * MPS is virtualized to the user, writes do not change the physical
++	 * register since determining a proper MPS value requires a system wide
++	 * device view.  The MRRS is largely independent of MPS, but since the
++	 * user does not have that system-wide view, they might set a safe, but
++	 * inefficiently low value.  Here we allow writes through to hardware,
++	 * but we set the floor to the physical device MPS setting, so that
++	 * we can at least use full TLPs, as defined by the MPS value.
++	 *
++	 * NB, if any devices actually depend on an artificially low MRRS
++	 * setting, this will need to be revisited, perhaps with a quirk
++	 * though pcie_set_readrq().
++	 */
++	if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
++		readrq = 128 <<
++			((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
++		readrq = max(readrq, pcie_get_mps(vdev->pdev));
++
++		pcie_set_readrq(vdev->pdev, readrq);
++	}
++
++	return count;
++}
++
+ /* Permissions for PCI Express capability */
+ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
+ {
+@@ -692,26 +748,67 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
+ 	if (alloc_perm_bits(perm, PCI_CAP_EXP_ENDPOINT_SIZEOF_V2))
+ 		return -ENOMEM;
+ 
++	perm->writefn = vfio_exp_config_write;
++
+ 	p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+ 
+ 	/*
+-	 * Allow writes to device control fields (includes FLR!)
+-	 * but not to devctl_phantom which could confuse IOMMU
+-	 * or to the ARI bit in devctl2 which is set at probe time
++	 * Allow writes to device control fields, except devctl_phantom,
++	 * which could confuse IOMMU, MPS, which can break communication
++	 * with other physical devices, and the ARI bit in devctl2, which
++	 * is set at probe time.  FLR and MRRS get virtualized via our
++	 * writefn.
+ 	 */
+-	p_setw(perm, PCI_EXP_DEVCTL, NO_VIRT, ~PCI_EXP_DEVCTL_PHANTOM);
++	p_setw(perm, PCI_EXP_DEVCTL,
++	       PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
++	       PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
+ 	p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
+ 	return 0;
+ }
+ 
++static int vfio_af_config_write(struct vfio_pci_device *vdev, int pos,
++				int count, struct perm_bits *perm,
++				int offset, __le32 val)
++{
++	u8 *ctrl = vdev->vconfig + pos - offset + PCI_AF_CTRL;
++
++	count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
++	if (count < 0)
++		return count;
++
++	/*
++	 * The FLR bit is virtualized, if set and the device supports AF
++	 * FLR, issue a reset_function.  Regardless, clear the bit, the spec
++	 * requires it to be always read as zero.  NB, reset_function might
++	 * not use an AF FLR, we don't have that level of granularity.
++	 */
++	if (*ctrl & PCI_AF_CTRL_FLR) {
++		u8 cap;
++		int ret;
++
++		*ctrl &= ~PCI_AF_CTRL_FLR;
++
++		ret = pci_user_read_config_byte(vdev->pdev,
++						pos - offset + PCI_AF_CAP,
++						&cap);
++
++		if (!ret && (cap & PCI_AF_CAP_FLR) && (cap & PCI_AF_CAP_TP))
++			pci_try_reset_function(vdev->pdev);
++	}
++
++	return count;
++}
++
+ /* Permissions for Advanced Function capability */
+ static int __init init_pci_cap_af_perm(struct perm_bits *perm)
+ {
+ 	if (alloc_perm_bits(perm, pci_cap_length[PCI_CAP_ID_AF]))
+ 		return -ENOMEM;
+ 
++	perm->writefn = vfio_af_config_write;
++
+ 	p_setb(perm, PCI_CAP_LIST_NEXT, (u8)ALL_VIRT, NO_WRITE);
+-	p_setb(perm, PCI_AF_CTRL, NO_VIRT, PCI_AF_CTRL_FLR);
++	p_setb(perm, PCI_AF_CTRL, PCI_AF_CTRL_FLR, PCI_AF_CTRL_FLR);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index fa49d3294cd5..1fd31650e01c 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -96,8 +96,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
+ 	if (mask)
+ 		vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+ 	if (mask & POLLERR) {
+-		if (poll->wqh)
+-			remove_wait_queue(poll->wqh, &poll->wait);
++		vhost_poll_stop(poll);
+ 		ret = -EINVAL;
+ 	}
+ 
+diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
+index 517f565b65d7..598ec7545e84 100644
+--- a/drivers/video/console/vgacon.c
++++ b/drivers/video/console/vgacon.c
+@@ -409,7 +409,10 @@ static const char *vgacon_startup(void)
+ 		vga_video_port_val = VGA_CRT_DM;
+ 		if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) {
+ 			static struct resource ega_console_resource =
+-			    { .name = "ega", .start = 0x3B0, .end = 0x3BF };
++			    { .name	= "ega",
++			      .flags	= IORESOURCE_IO,
++			      .start	= 0x3B0,
++			      .end	= 0x3BF };
+ 			vga_video_type = VIDEO_TYPE_EGAM;
+ 			vga_vram_size = 0x8000;
+ 			display_desc = "EGA+";
+@@ -417,9 +420,15 @@ static const char *vgacon_startup(void)
+ 					 &ega_console_resource);
+ 		} else {
+ 			static struct resource mda1_console_resource =
+-			    { .name = "mda", .start = 0x3B0, .end = 0x3BB };
++			    { .name	= "mda",
++			      .flags	= IORESOURCE_IO,
++			      .start	= 0x3B0,
++			      .end	= 0x3BB };
+ 			static struct resource mda2_console_resource =
+-			    { .name = "mda", .start = 0x3BF, .end = 0x3BF };
++			    { .name	= "mda",
++			      .flags	= IORESOURCE_IO,
++			      .start	= 0x3BF,
++			      .end	= 0x3BF };
+ 			vga_video_type = VIDEO_TYPE_MDA;
+ 			vga_vram_size = 0x2000;
+ 			display_desc = "*MDA";
+@@ -441,15 +450,21 @@ static const char *vgacon_startup(void)
+ 			vga_vram_size = 0x8000;
+ 
+ 			if (!screen_info.orig_video_isVGA) {
+-				static struct resource ega_console_resource
+-				    = { .name = "ega", .start = 0x3C0, .end = 0x3DF };
++				static struct resource ega_console_resource =
++				    { .name	= "ega",
++				      .flags	= IORESOURCE_IO,
++				      .start	= 0x3C0,
++				      .end	= 0x3DF };
+ 				vga_video_type = VIDEO_TYPE_EGAC;
+ 				display_desc = "EGA";
+ 				request_resource(&ioport_resource,
+ 						 &ega_console_resource);
+ 			} else {
+-				static struct resource vga_console_resource
+-				    = { .name = "vga+", .start = 0x3C0, .end = 0x3DF };
++				static struct resource vga_console_resource =
++				    { .name	= "vga+",
++				      .flags	= IORESOURCE_IO,
++				      .start	= 0x3C0,
++				      .end	= 0x3DF };
+ 				vga_video_type = VIDEO_TYPE_VGAC;
+ 				display_desc = "VGA+";
+ 				request_resource(&ioport_resource,
+@@ -493,7 +508,10 @@ static const char *vgacon_startup(void)
+ 			}
+ 		} else {
+ 			static struct resource cga_console_resource =
+-			    { .name = "cga", .start = 0x3D4, .end = 0x3D5 };
++			    { .name	= "cga",
++			      .flags	= IORESOURCE_IO,
++			      .start	= 0x3D4,
++			      .end	= 0x3D5 };
+ 			vga_video_type = VIDEO_TYPE_CGA;
+ 			vga_vram_size = 0x2000;
+ 			display_desc = "*CGA";
+diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
+index 9362424c2340..924b3d6c3e9b 100644
+--- a/drivers/video/fbdev/amba-clcd.c
++++ b/drivers/video/fbdev/amba-clcd.c
+@@ -759,8 +759,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb)
+ 	if (err)
+ 		return err;
+ 
+-	framesize = fb->panel->mode.xres * fb->panel->mode.yres *
+-			fb->panel->bpp / 8;
++	framesize = PAGE_ALIGN(fb->panel->mode.xres * fb->panel->mode.yres *
++			fb->panel->bpp / 8);
+ 	fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize,
+ 			&dma, GFP_KERNEL);
+ 	if (!fb->fb.screen_base)
+diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c
+index d0a4e2f79a57..d215faacce04 100644
+--- a/drivers/video/fbdev/sm501fb.c
++++ b/drivers/video/fbdev/sm501fb.c
+@@ -1600,6 +1600,7 @@ static int sm501fb_start(struct sm501fb_info *info,
+ 	info->fbmem = ioremap(res->start, resource_size(res));
+ 	if (info->fbmem == NULL) {
+ 		dev_err(dev, "cannot remap framebuffer\n");
++		ret = -ENXIO;
+ 		goto err_mem_res;
+ 	}
+ 
+diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
+index d2a985e59fcd..105a269ff66a 100644
+--- a/drivers/video/fbdev/udlfb.c
++++ b/drivers/video/fbdev/udlfb.c
+@@ -1487,15 +1487,25 @@ static struct device_attribute fb_device_attrs[] = {
+ static int dlfb_select_std_channel(struct dlfb_data *dev)
+ {
+ 	int ret;
+-	u8 set_def_chn[] = {	   0x57, 0xCD, 0xDC, 0xA7,
++	void *buf;
++	static const u8 set_def_chn[] = {
++				0x57, 0xCD, 0xDC, 0xA7,
+ 				0x1C, 0x88, 0x5E, 0x15,
+ 				0x60, 0xFE, 0xC6, 0x97,
+ 				0x16, 0x3D, 0x47, 0xF2  };
+ 
++	buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
++
++	if (!buf)
++		return -ENOMEM;
++
+ 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ 			NR_USB_REQUEST_CHANNEL,
+ 			(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
+-			set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
++			buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
++
++	kfree(buf);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
+index 70a897b1e458..146cc3516f61 100644
+--- a/drivers/video/fbdev/vfb.c
++++ b/drivers/video/fbdev/vfb.c
+@@ -284,8 +284,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
+  */
+ static int vfb_set_par(struct fb_info *info)
+ {
++	switch (info->var.bits_per_pixel) {
++	case 1:
++		info->fix.visual = FB_VISUAL_MONO01;
++		break;
++	case 8:
++		info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
++		break;
++	case 16:
++	case 24:
++	case 32:
++		info->fix.visual = FB_VISUAL_TRUECOLOR;
++		break;
++	}
++
+ 	info->fix.line_length = get_line_length(info->var.xres_virtual,
+ 						info->var.bits_per_pixel);
++
+ 	return 0;
+ }
+ 
+@@ -526,6 +541,8 @@ static int vfb_probe(struct platform_device *dev)
+ 		goto err2;
+ 	platform_set_drvdata(dev, info);
+ 
++	vfb_set_par(info);
++
+ 	fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
+ 		videomemorysize >> 10);
+ 	return 0;
+diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
+index 162689227a23..b73520aaf697 100644
+--- a/drivers/video/hdmi.c
++++ b/drivers/video/hdmi.c
+@@ -321,6 +321,17 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
+ }
+ EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
+ 
++static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *frame)
++{
++	/* for side by side (half) we also need to provide 3D_Ext_Data */
++	if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
++		return 6;
++	else if (frame->vic != 0 || frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
++		return 5;
++	else
++		return 4;
++}
++
+ /**
+  * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
+  * @frame: HDMI infoframe
+@@ -341,19 +352,11 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ 	u8 *ptr = buffer;
+ 	size_t length;
+ 
+-	/* empty info frame */
+-	if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
+-		return -EINVAL;
+-
+ 	/* only one of those can be supplied */
+ 	if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+ 		return -EINVAL;
+ 
+-	/* for side by side (half) we also need to provide 3D_Ext_Data */
+-	if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+-		frame->length = 6;
+-	else
+-		frame->length = 5;
++	frame->length = hdmi_vendor_infoframe_length(frame);
+ 
+ 	length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
+ 
+@@ -372,14 +375,16 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
+ 	ptr[5] = 0x0c;
+ 	ptr[6] = 0x00;
+ 
+-	if (frame->vic) {
+-		ptr[7] = 0x1 << 5;	/* video format */
+-		ptr[8] = frame->vic;
+-	} else {
++	if (frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) {
+ 		ptr[7] = 0x2 << 5;	/* video format */
+ 		ptr[8] = (frame->s3d_struct & 0xf) << 4;
+ 		if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ 			ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
++	} else if (frame->vic) {
++		ptr[7] = 0x1 << 5;	/* video format */
++		ptr[8] = frame->vic;
++	} else {
++		ptr[7] = 0x0 << 5;	/* video format */
+ 	}
+ 
+ 	hdmi_infoframe_set_checksum(buffer, length);
+@@ -1161,7 +1166,7 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
+ 
+ 	if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR ||
+ 	    ptr[1] != 1 ||
+-	    (ptr[2] != 5 && ptr[2] != 6))
++	    (ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6))
+ 		return -EINVAL;
+ 
+ 	length = ptr[2];
+@@ -1189,16 +1194,22 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame,
+ 
+ 	hvf->length = length;
+ 
+-	if (hdmi_video_format == 0x1) {
+-		hvf->vic = ptr[4];
+-	} else if (hdmi_video_format == 0x2) {
++	if (hdmi_video_format == 0x2) {
++		if (length != 5 && length != 6)
++			return -EINVAL;
+ 		hvf->s3d_struct = ptr[4] >> 4;
+ 		if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) {
+-			if (length == 6)
+-				hvf->s3d_ext_data = ptr[5] >> 4;
+-			else
++			if (length != 6)
+ 				return -EINVAL;
++			hvf->s3d_ext_data = ptr[5] >> 4;
+ 		}
++	} else if (hdmi_video_format == 0x1) {
++		if (length != 5)
++			return -EINVAL;
++		hvf->vic = ptr[4];
++	} else {
++		if (length != 4)
++			return -EINVAL;
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
+index 016bd9355190..aa93df5833dc 100644
+--- a/drivers/watchdog/f71808e_wdt.c
++++ b/drivers/watchdog/f71808e_wdt.c
+@@ -450,7 +450,7 @@ static bool watchdog_is_running(void)
+ 
+ 	is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
+ 		&& (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
+-			& F71808FG_FLAG_WD_EN);
++			& BIT(F71808FG_FLAG_WD_EN));
+ 
+ 	superio_exit(watchdog.sioaddr);
+ 
+diff --git a/fs/aio.c b/fs/aio.c
+index 480440f4701f..61ada5047da2 100644
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -68,9 +68,9 @@ struct aio_ring {
+ #define AIO_RING_PAGES	8
+ 
+ struct kioctx_table {
+-	struct rcu_head	rcu;
+-	unsigned	nr;
+-	struct kioctx	*table[];
++	struct rcu_head		rcu;
++	unsigned		nr;
++	struct kioctx __rcu	*table[];
+ };
+ 
+ struct kioctx_cpu {
+@@ -115,7 +115,8 @@ struct kioctx {
+ 	struct page		**ring_pages;
+ 	long			nr_pages;
+ 
+-	struct work_struct	free_work;
++	struct rcu_head		free_rcu;
++	struct work_struct	free_work;	/* see free_ioctx() */
+ 
+ 	/*
+ 	 * signals when all in-flight requests are done
+@@ -327,7 +328,7 @@ static int aio_ring_remap(struct file *file, struct vm_area_struct *vma)
+ 	for (i = 0; i < table->nr; i++) {
+ 		struct kioctx *ctx;
+ 
+-		ctx = table->table[i];
++		ctx = rcu_dereference(table->table[i]);
+ 		if (ctx && ctx->aio_ring_file == file) {
+ 			if (!atomic_read(&ctx->dead)) {
+ 				ctx->user_id = ctx->mmap_base = vma->vm_start;
+@@ -559,6 +560,12 @@ static int kiocb_cancel(struct aio_kiocb *kiocb)
+ 	return cancel(&kiocb->common);
+ }
+ 
++/*
++ * free_ioctx() should be RCU delayed to synchronize against the RCU
++ * protected lookup_ioctx() and also needs process context to call
++ * aio_free_ring(), so the double bouncing through kioctx->free_rcu and
++ * ->free_work.
++ */
+ static void free_ioctx(struct work_struct *work)
+ {
+ 	struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+@@ -572,6 +579,14 @@ static void free_ioctx(struct work_struct *work)
+ 	kmem_cache_free(kioctx_cachep, ctx);
+ }
+ 
++static void free_ioctx_rcufn(struct rcu_head *head)
++{
++	struct kioctx *ctx = container_of(head, struct kioctx, free_rcu);
++
++	INIT_WORK(&ctx->free_work, free_ioctx);
++	schedule_work(&ctx->free_work);
++}
++
+ static void free_ioctx_reqs(struct percpu_ref *ref)
+ {
+ 	struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
+@@ -580,8 +595,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
+ 	if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
+ 		complete(&ctx->rq_wait->comp);
+ 
+-	INIT_WORK(&ctx->free_work, free_ioctx);
+-	schedule_work(&ctx->free_work);
++	/* Synchronize against RCU protected table->table[] dereferences */
++	call_rcu(&ctx->free_rcu, free_ioctx_rcufn);
+ }
+ 
+ /*
+@@ -622,9 +637,9 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
+ 	while (1) {
+ 		if (table)
+ 			for (i = 0; i < table->nr; i++)
+-				if (!table->table[i]) {
++				if (!rcu_access_pointer(table->table[i])) {
+ 					ctx->id = i;
+-					table->table[i] = ctx;
++					rcu_assign_pointer(table->table[i], ctx);
+ 					spin_unlock(&mm->ioctx_lock);
+ 
+ 					/* While kioctx setup is in progress,
+@@ -799,11 +814,11 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+ 	}
+ 
+ 	table = rcu_dereference_raw(mm->ioctx_table);
+-	WARN_ON(ctx != table->table[ctx->id]);
+-	table->table[ctx->id] = NULL;
++	WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
++	RCU_INIT_POINTER(table->table[ctx->id], NULL);
+ 	spin_unlock(&mm->ioctx_lock);
+ 
+-	/* percpu_ref_kill() will do the necessary call_rcu() */
++	/* free_ioctx_reqs() will do the necessary RCU synchronization */
+ 	wake_up_all(&ctx->wait);
+ 
+ 	/*
+@@ -845,7 +860,8 @@ void exit_aio(struct mm_struct *mm)
+ 
+ 	skipped = 0;
+ 	for (i = 0; i < table->nr; ++i) {
+-		struct kioctx *ctx = table->table[i];
++		struct kioctx *ctx =
++			rcu_dereference_protected(table->table[i], true);
+ 
+ 		if (!ctx) {
+ 			skipped++;
+@@ -1034,7 +1050,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
+ 	if (!table || id >= table->nr)
+ 		goto out;
+ 
+-	ctx = table->table[id];
++	ctx = rcu_dereference(table->table[id]);
+ 	if (ctx && ctx->user_id == ctx_id) {
+ 		percpu_ref_get(&ctx->users);
+ 		ret = ctx;
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index 7a54c6a867c8..500098cdb960 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -746,7 +746,7 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t m
+ 
+ 	autofs4_del_active(dentry);
+ 
+-	inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
++	inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
+ 	if (!inode)
+ 		return -ENOMEM;
+ 	d_add(dentry, inode);
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 885f533a34d9..f179946d67ed 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2466,7 +2466,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
+ 	if (!uptodate) {
+ 		ClearPageUptodate(page);
+ 		SetPageError(page);
+-		ret = ret < 0 ? ret : -EIO;
++		ret = err < 0 ? err : -EIO;
+ 		mapping_set_error(page->mapping, ret);
+ 	}
+ 	return 0;
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index 5fe5314270fd..68ca200b714a 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -4759,13 +4759,19 @@ static int is_extent_unchanged(struct send_ctx *sctx,
+ 	while (key.offset < ekey->offset + left_len) {
+ 		ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+ 		right_type = btrfs_file_extent_type(eb, ei);
+-		if (right_type != BTRFS_FILE_EXTENT_REG) {
++		if (right_type != BTRFS_FILE_EXTENT_REG &&
++		    right_type != BTRFS_FILE_EXTENT_INLINE) {
+ 			ret = 0;
+ 			goto out;
+ 		}
+ 
+ 		right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
+-		right_len = btrfs_file_extent_num_bytes(eb, ei);
++		if (right_type == BTRFS_FILE_EXTENT_INLINE) {
++			right_len = btrfs_file_extent_inline_len(eb, slot, ei);
++			right_len = PAGE_ALIGN(right_len);
++		} else {
++			right_len = btrfs_file_extent_num_bytes(eb, ei);
++		}
+ 		right_offset = btrfs_file_extent_offset(eb, ei);
+ 		right_gen = btrfs_file_extent_generation(eb, ei);
+ 
+@@ -4779,6 +4785,19 @@ static int is_extent_unchanged(struct send_ctx *sctx,
+ 			goto out;
+ 		}
+ 
++		/*
++		 * We just wanted to see if when we have an inline extent, what
++		 * follows it is a regular extent (wanted to check the above
++		 * condition for inline extents too). This should normally not
++		 * happen but it's possible for example when we have an inline
++		 * compressed extent representing data with a size matching
++		 * the page size (currently the same as sector size).
++		 */
++		if (right_type == BTRFS_FILE_EXTENT_INLINE) {
++			ret = 0;
++			goto out;
++		}
++
+ 		left_offset_fixed = left_offset;
+ 		if (key.offset < ekey->offset) {
+ 			/* Fix the right offset for 2a and 7. */
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 18a3573e1444..4a0318ee4ed1 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4370,10 +4370,13 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+ 	if (devs_max && ndevs > devs_max)
+ 		ndevs = devs_max;
+ 	/*
+-	 * the primary goal is to maximize the number of stripes, so use as many
+-	 * devices as possible, even if the stripes are not maximum sized.
++	 * The primary goal is to maximize the number of stripes, so use as
++	 * many devices as possible, even if the stripes are not maximum sized.
++	 *
++	 * The DUP profile stores more than one stripe per device, the
++	 * max_avail is the total size so we have to adjust.
+ 	 */
+-	stripe_size = devices_info[ndevs-1].max_avail;
++	stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
+ 	num_stripes = ndevs * dev_stripes;
+ 
+ 	/*
+@@ -4413,8 +4416,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+ 			stripe_size = devices_info[ndevs-1].max_avail;
+ 	}
+ 
+-	stripe_size = div_u64(stripe_size, dev_stripes);
+-
+ 	/* align to BTRFS_STRIPE_LEN */
+ 	stripe_size = div_u64(stripe_size, raid_stripe_len);
+ 	stripe_size *= raid_stripe_len;
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 49a0d6b027c1..76dacd5307b9 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -673,6 +673,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
+ 		goto mknod_out;
+ 	}
+ 
++	if (!S_ISCHR(mode) && !S_ISBLK(mode))
++		goto mknod_out;
++
+ 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
+ 		goto mknod_out;
+ 
+@@ -681,10 +684,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
+ 
+ 	buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
+ 	if (buf == NULL) {
+-		kfree(full_path);
+ 		rc = -ENOMEM;
+-		free_xid(xid);
+-		return rc;
++		goto mknod_out;
+ 	}
+ 
+ 	if (backup_cred(cifs_sb))
+@@ -731,7 +732,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
+ 		pdev->minor = cpu_to_le64(MINOR(device_number));
+ 		rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
+ 							&bytes_written, iov, 1);
+-	} /* else if (S_ISFIFO) */
++	}
+ 	tcon->ses->server->ops->close(xid, tcon, &fid);
+ 	d_drop(direntry);
+ 
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 1366d2151389..6f20a8ca5e7c 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -582,7 +582,7 @@ cifs_relock_file(struct cifsFileInfo *cfile)
+ 	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+ 	int rc = 0;
+ 
+-	down_read(&cinode->lock_sem);
++	down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
+ 	if (cinode->can_cache_brlcks) {
+ 		/* can cache locks - no need to relock */
+ 		up_read(&cinode->lock_sem);
+diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
+index abae6dd2c6b9..cc88f4f0325e 100644
+--- a/fs/cifs/netmisc.c
++++ b/fs/cifs/netmisc.c
+@@ -980,10 +980,10 @@ struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
+ 		cifs_dbg(VFS, "illegal hours %d\n", st->Hours);
+ 	days = sd->Day;
+ 	month = sd->Month;
+-	if ((days > 31) || (month > 12)) {
++	if (days < 1 || days > 31 || month < 1 || month > 12) {
+ 		cifs_dbg(VFS, "illegal date, month %d day: %d\n", month, days);
+-		if (month > 12)
+-			month = 12;
++		days = clamp(days, 1, 31);
++		month = clamp(month, 1, 12);
+ 	}
+ 	month -= 1;
+ 	days += total_days_of_prev_months[month];
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 5f9229ddf335..11b562ac8f31 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -344,13 +344,12 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
+ 	/* BB is NTLMV2 session security format easier to use here? */
+ 	flags = NTLMSSP_NEGOTIATE_56 |	NTLMSSP_REQUEST_TARGET |
+ 		NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
+-		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+-	if (ses->server->sign) {
++		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
++		NTLMSSP_NEGOTIATE_SEAL;
++	if (ses->server->sign)
+ 		flags |= NTLMSSP_NEGOTIATE_SIGN;
+-		if (!ses->server->session_estab ||
+-				ses->ntlmssp->sesskey_per_smbsess)
+-			flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+-	}
++	if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
++		flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+ 
+ 	sec_blob->NegotiateFlags = cpu_to_le32(flags);
+ 
+@@ -407,13 +406,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
+ 	flags = NTLMSSP_NEGOTIATE_56 |
+ 		NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
+ 		NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
+-		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+-	if (ses->server->sign) {
++		NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
++		NTLMSSP_NEGOTIATE_SEAL;
++	if (ses->server->sign)
+ 		flags |= NTLMSSP_NEGOTIATE_SIGN;
+-		if (!ses->server->session_estab ||
+-				ses->ntlmssp->sesskey_per_smbsess)
+-			flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+-	}
++	if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
++		flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+ 
+ 	tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
+ 	sec_blob->NegotiateFlags = cpu_to_le32(flags);
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 69422157c71b..4b4b1cbc69b2 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -754,10 +754,8 @@ ssetup_exit:
+ 
+ 	if (!rc) {
+ 		mutex_lock(&server->srv_mutex);
+-		if (server->sign && server->ops->generate_signingkey) {
++		if (server->ops->generate_signingkey) {
+ 			rc = server->ops->generate_signingkey(ses);
+-			kfree(ses->auth_key.response);
+-			ses->auth_key.response = NULL;
+ 			if (rc) {
+ 				cifs_dbg(FYI,
+ 					"SMB3 session key generation failed\n");
+@@ -779,10 +777,6 @@ ssetup_exit:
+ 	}
+ 
+ keygen_exit:
+-	if (!server->sign) {
+-		kfree(ses->auth_key.response);
+-		ses->auth_key.response = NULL;
+-	}
+ 	if (spnego_key) {
+ 		key_invalidate(spnego_key);
+ 		key_put(spnego_key);
+@@ -921,15 +915,19 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
+ 		goto tcon_exit;
+ 	}
+ 
+-	if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
++	switch (rsp->ShareType) {
++	case SMB2_SHARE_TYPE_DISK:
+ 		cifs_dbg(FYI, "connection to disk share\n");
+-	else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
++		break;
++	case SMB2_SHARE_TYPE_PIPE:
+ 		tcon->ipc = true;
+ 		cifs_dbg(FYI, "connection to pipe share\n");
+-	} else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
+-		tcon->print = true;
++		break;
++	case SMB2_SHARE_TYPE_PRINT:
++		tcon->ipc = true;
+ 		cifs_dbg(FYI, "connection to printer\n");
+-	} else {
++		break;
++	default:
+ 		cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
+ 		rc = -EOPNOTSUPP;
+ 		goto tcon_error_exit;
+@@ -1353,6 +1351,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ 	} else
+ 		iov[0].iov_len = get_rfc1002_length(req) + 4;
+ 
++	/* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
++	if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
++		req->hdr.Flags |= SMB2_FLAGS_SIGNED;
+ 
+ 	rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
+ 	rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
+diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
+index 6b8e2f091f5b..5e6798a3c9b6 100644
+--- a/fs/compat_ioctl.c
++++ b/fs/compat_ioctl.c
+@@ -811,7 +811,7 @@ static int compat_ioctl_preallocate(struct file *file,
+  */
+ #define XFORM(i) (((i) ^ ((i) << 27) ^ ((i) << 17)) & 0xffffffff)
+ 
+-#define COMPATIBLE_IOCTL(cmd) XFORM(cmd),
++#define COMPATIBLE_IOCTL(cmd) XFORM((u32)cmd),
+ /* ioctl should not be warned about even if it's not implemented.
+    Valid reasons to use this:
+    - It is implemented with ->compat_ioctl on some device, but programs
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 5ca8f0b2b897..c19576fa779e 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -607,11 +607,16 @@ again:
+ 		spin_unlock(&parent->d_lock);
+ 		goto again;
+ 	}
+-	rcu_read_unlock();
+-	if (parent != dentry)
++	if (parent != dentry) {
+ 		spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+-	else
++		if (unlikely(dentry->d_lockref.count < 0)) {
++			spin_unlock(&parent->d_lock);
++			parent = NULL;
++		}
++	} else {
+ 		parent = NULL;
++	}
++	rcu_read_unlock();
+ 	return parent;
+ }
+ 
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index f57cf1c42ca3..79f974ba1999 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -351,7 +351,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ 		int i, num;
+ 		unsigned long nr_pages;
+ 
+-		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
++		num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
+ 		nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
+ 					  (pgoff_t)num);
+ 		if (nr_pages == 0)
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index c1feaf011515..25fcf7b2bdaa 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1476,6 +1476,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
+ 			BUG_ON(!PageLocked(page));
+ 			BUG_ON(PageWriteback(page));
+ 			if (invalidate) {
++				if (page_mapped(page))
++					clear_page_dirty_for_io(page);
+ 				block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
+ 				ClearPageUptodate(page);
+ 			}
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index c67056a8c901..1f5062222425 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2126,6 +2126,8 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ 				 "Block bitmap for group %u overlaps "
+ 				 "superblock", i);
++			if (!(sb->s_flags & MS_RDONLY))
++				return 0;
+ 		}
+ 		if (block_bitmap < first_block || block_bitmap > last_block) {
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+@@ -2138,6 +2140,8 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ 				 "Inode bitmap for group %u overlaps "
+ 				 "superblock", i);
++			if (!(sb->s_flags & MS_RDONLY))
++				return 0;
+ 		}
+ 		if (inode_bitmap < first_block || inode_bitmap > last_block) {
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+@@ -2150,6 +2154,8 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ 				 "Inode table for group %u overlaps "
+ 				 "superblock", i);
++			if (!(sb->s_flags & MS_RDONLY))
++				return 0;
+ 		}
+ 		if (inode_table < first_block ||
+ 		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index e4d224315a1f..0bb394b4f04b 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -275,11 +275,11 @@ loop:
+ 	goto loop;
+ 
+ end_loop:
+-	write_unlock(&journal->j_state_lock);
+ 	del_timer_sync(&journal->j_commit_timer);
+ 	journal->j_task = NULL;
+ 	wake_up(&journal->j_wait_done_commit);
+ 	jbd_debug(1, "Journal thread exiting.\n");
++	write_unlock(&journal->j_state_lock);
+ 	return 0;
+ }
+ 
+@@ -923,7 +923,7 @@ out:
+ }
+ 
+ /*
+- * This is a variaon of __jbd2_update_log_tail which checks for validity of
++ * This is a variation of __jbd2_update_log_tail which checks for validity of
+  * provided log tail and locks j_checkpoint_mutex. So it is safe against races
+  * with other threads updating log tail.
+  */
+@@ -1399,6 +1399,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+ 	journal_superblock_t *sb = journal->j_superblock;
+ 	int ret;
+ 
++	if (is_journal_aborted(journal))
++		return -EIO;
++
+ 	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+ 	jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
+ 		  tail_block, tail_tid);
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index deff03371626..4ddcaf949a16 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -515,6 +515,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
+ 	 */
+ 	ret = start_this_handle(journal, handle, GFP_NOFS);
+ 	if (ret < 0) {
++		handle->h_journal = journal;
+ 		jbd2_journal_free_reserved(handle);
+ 		return ret;
+ 	}
+diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
+index d86c5e3176a1..600da1a4df29 100644
+--- a/fs/jffs2/super.c
++++ b/fs/jffs2/super.c
+@@ -345,7 +345,7 @@ static void jffs2_put_super (struct super_block *sb)
+ static void jffs2_kill_sb(struct super_block *sb)
+ {
+ 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+-	if (!(sb->s_flags & MS_RDONLY))
++	if (c && !(sb->s_flags & MS_RDONLY))
+ 		jffs2_stop_garbage_collect_thread(c);
+ 	kill_mtd_super(sb);
+ 	kfree(c);
+diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
+index 55505cbe11af..375efc1ced83 100644
+--- a/fs/lockd/svc.c
++++ b/fs/lockd/svc.c
+@@ -129,6 +129,8 @@ lockd(void *vrqstp)
+ {
+ 	int		err = 0;
+ 	struct svc_rqst *rqstp = vrqstp;
++	struct net *net = &init_net;
++	struct lockd_net *ln = net_generic(net, lockd_net_id);
+ 
+ 	/* try_to_freeze() is called from svc_recv() */
+ 	set_freezable();
+@@ -173,6 +175,8 @@ lockd(void *vrqstp)
+ 	if (nlmsvc_ops)
+ 		nlmsvc_invalidate_all();
+ 	nlm_shutdown_hosts();
++	cancel_delayed_work_sync(&ln->grace_period_end);
++	locks_end_grace(&ln->lockd_manager);
+ 	return 0;
+ }
+ 
+@@ -267,8 +271,6 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
+ 	if (ln->nlmsvc_users) {
+ 		if (--ln->nlmsvc_users == 0) {
+ 			nlm_shutdown_hosts_net(net);
+-			cancel_delayed_work_sync(&ln->grace_period_end);
+-			locks_end_grace(&ln->lockd_manager);
+ 			svc_shutdown_net(serv, net);
+ 			dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
+ 		}
+diff --git a/fs/namei.c b/fs/namei.c
+index 0d97235019a9..4d333d26a028 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -219,9 +219,10 @@ getname_kernel(const char * filename)
+ 	if (len <= EMBEDDED_NAME_MAX) {
+ 		result->name = (char *)result->iname;
+ 	} else if (len <= PATH_MAX) {
++		const size_t size = offsetof(struct filename, iname[1]);
+ 		struct filename *tmp;
+ 
+-		tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
++		tmp = kmalloc(size, GFP_KERNEL);
+ 		if (unlikely(!tmp)) {
+ 			__putname(result);
+ 			return ERR_PTR(-ENOMEM);
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 58b281ad30d5..45fc042b84ce 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1007,7 +1007,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
+ 			goto out_free;
+ 	}
+ 
+-	mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
++	mnt->mnt.mnt_flags = old->mnt.mnt_flags;
++	mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
+ 	/* Don't allow unprivileged users to change mount flags */
+ 	if (flag & CL_UNPRIVILEGED) {
+ 		mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
+diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
+index 88dbbc9fcf4d..f571570a2e72 100644
+--- a/fs/ncpfs/ncplib_kernel.c
++++ b/fs/ncpfs/ncplib_kernel.c
+@@ -980,6 +980,10 @@ ncp_read_kernel(struct ncp_server *server, const char *file_id,
+ 		goto out;
+ 	}
+ 	*bytes_read = ncp_reply_be16(server, 0);
++	if (*bytes_read > to_read) {
++		result = -EINVAL;
++		goto out;
++	}
+ 	source = ncp_reply_data(server, 2 + (offset & 1));
+ 
+ 	memcpy(target, source, *bytes_read);
+diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
+index cb050d1e8146..10b055105b36 100644
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -86,9 +86,9 @@ struct nfs_direct_req {
+ 	struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
+ 	int			mirror_count;
+ 
++	loff_t			io_start;	/* Start offset for I/O */
+ 	ssize_t			count,		/* bytes actually processed */
+ 				bytes_left,	/* bytes left to be sent */
+-				io_start,	/* start of IO */
+ 				error;		/* any reported error */
+ 	struct completion	completion;	/* wait for i/o completion */
+ 
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index c2abdc7db6c3..4af8e428e4c9 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -307,6 +307,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
+ 			goto out_err_free;
+ 
+ 		/* fh */
++		rc = -EIO;
+ 		p = xdr_inline_decode(&stream, 4);
+ 		if (!p)
+ 			goto out_err_free;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 0fb0dc739fb2..9b6950a5fcc6 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7531,6 +7531,12 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
+ 		/* fall through */
+ 	case -NFS4ERR_RETRY_UNCACHED_REP:
+ 		return -EAGAIN;
++	case -NFS4ERR_BADSESSION:
++	case -NFS4ERR_DEADSESSION:
++	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
++		nfs4_schedule_session_recovery(clp->cl_session,
++				task->tk_status);
++		break;
+ 	default:
+ 		nfs4_schedule_lease_recovery(clp);
+ 	}
+@@ -7609,7 +7615,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
+ 	if (status == 0)
+ 		status = task->tk_status;
+ 	rpc_put_task(task);
+-	return 0;
+ out:
+ 	dprintk("<-- %s status=%d\n", __func__, status);
+ 	return status;
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index f8d2902ec118..0b50bdfbc32f 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1591,13 +1591,14 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
+ 	nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
+ }
+ 
+-static void nfs4_reclaim_complete(struct nfs_client *clp,
++static int nfs4_reclaim_complete(struct nfs_client *clp,
+ 				 const struct nfs4_state_recovery_ops *ops,
+ 				 struct rpc_cred *cred)
+ {
+ 	/* Notify the server we're done reclaiming our state */
+ 	if (ops->reclaim_complete)
+-		(void)ops->reclaim_complete(clp, cred);
++		return ops->reclaim_complete(clp, cred);
++	return 0;
+ }
+ 
+ static void nfs4_clear_reclaim_server(struct nfs_server *server)
+@@ -1644,13 +1645,16 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
+ {
+ 	const struct nfs4_state_recovery_ops *ops;
+ 	struct rpc_cred *cred;
++	int err;
+ 
+ 	if (!nfs4_state_clear_reclaim_reboot(clp))
+ 		return;
+ 	ops = clp->cl_mvops->reboot_recovery_ops;
+ 	cred = nfs4_get_clid_cred(clp);
+-	nfs4_reclaim_complete(clp, ops, cred);
++	err = nfs4_reclaim_complete(clp, ops, cred);
+ 	put_rpccred(cred);
++	if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
++		set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
+ }
+ 
+ static void nfs_delegation_clear_all(struct nfs_client *clp)
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 93d355c8b467..50d40b129737 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -1277,8 +1277,10 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
+ 		mirror = &desc->pg_mirrors[midx];
+ 		if (!list_empty(&mirror->pg_list)) {
+ 			prev = nfs_list_entry(mirror->pg_list.prev);
+-			if (index != prev->wb_index + 1)
+-				nfs_pageio_complete_mirror(desc, midx);
++			if (index != prev->wb_index + 1) {
++				nfs_pageio_complete(desc);
++				break;
++			}
+ 		}
+ 	}
+ }
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 1e58fa0a28a3..73c93f2d6353 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1256,14 +1256,14 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
+ 	const struct nfsd4_layout_ops *ops;
+ 	struct nfs4_layout_stateid *ls;
+ 	__be32 nfserr;
+-	int accmode;
++	int accmode = NFSD_MAY_READ_IF_EXEC;
+ 
+ 	switch (lgp->lg_seg.iomode) {
+ 	case IOMODE_READ:
+-		accmode = NFSD_MAY_READ;
++		accmode |= NFSD_MAY_READ;
+ 		break;
+ 	case IOMODE_RW:
+-		accmode = NFSD_MAY_READ | NFSD_MAY_WRITE;
++		accmode |= NFSD_MAY_READ | NFSD_MAY_WRITE;
+ 		break;
+ 	default:
+ 		dprintk("%s: invalid iomode %d\n",
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 5eaee287be23..9bfcd93448dc 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -92,6 +92,12 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
+ 	err = follow_down(&path);
+ 	if (err < 0)
+ 		goto out;
++	if (path.mnt == exp->ex_path.mnt && path.dentry == dentry &&
++	    nfsd_mountpoint(dentry, exp) == 2) {
++		/* This is only a mountpoint in some other namespace */
++		path_put(&path);
++		goto out;
++	}
+ 
+ 	exp2 = rqst_exp_get_by_name(rqstp, &path);
+ 	if (IS_ERR(exp2)) {
+@@ -165,16 +171,26 @@ static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, st
+ /*
+  * For nfsd purposes, we treat V4ROOT exports as though there was an
+  * export at *every* directory.
++ * We return:
++ * '1' if this dentry *must* be an export point,
++ * '2' if it might be, if there is really a mount here, and
++ * '0' if there is no chance of an export point here.
+  */
+ int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
+ {
+-	if (d_mountpoint(dentry))
++	if (!d_inode(dentry))
++		return 0;
++	if (exp->ex_flags & NFSEXP_V4ROOT)
+ 		return 1;
+ 	if (nfsd4_is_junction(dentry))
+ 		return 1;
+-	if (!(exp->ex_flags & NFSEXP_V4ROOT))
+-		return 0;
+-	return d_inode(dentry) != NULL;
++	if (d_mountpoint(dentry))
++		/*
++		 * Might only be a mountpoint in a different namespace,
++		 * but we need to check.
++		 */
++		return 2;
++	return 0;
+ }
+ 
+ __be32
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index e0e5f7c3c99f..8a459b179183 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
+ 				       u32 event_mask,
+ 				       void *data, int data_type)
+ {
+-	__u32 marks_mask, marks_ignored_mask;
++	__u32 marks_mask = 0, marks_ignored_mask = 0;
+ 	struct path *path = data;
+ 
+ 	pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
+@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
+ 	    !d_can_lookup(path->dentry))
+ 		return false;
+ 
+-	if (inode_mark && vfsmnt_mark) {
+-		marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
+-		marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
+-	} else if (inode_mark) {
+-		/*
+-		 * if the event is for a child and this inode doesn't care about
+-		 * events on the child, don't send it!
+-		 */
+-		if ((event_mask & FS_EVENT_ON_CHILD) &&
+-		    !(inode_mark->mask & FS_EVENT_ON_CHILD))
+-			return false;
+-		marks_mask = inode_mark->mask;
+-		marks_ignored_mask = inode_mark->ignored_mask;
+-	} else if (vfsmnt_mark) {
+-		marks_mask = vfsmnt_mark->mask;
+-		marks_ignored_mask = vfsmnt_mark->ignored_mask;
+-	} else {
+-		BUG();
++	/*
++	 * if the event is for a child and this inode doesn't care about
++	 * events on the child, don't send it!
++	 */
++	if (inode_mark &&
++	    (!(event_mask & FS_EVENT_ON_CHILD) ||
++	     (inode_mark->mask & FS_EVENT_ON_CHILD))) {
++		marks_mask |= inode_mark->mask;
++		marks_ignored_mask |= inode_mark->ignored_mask;
++	}
++
++	if (vfsmnt_mark) {
++		marks_mask |= vfsmnt_mark->mask;
++		marks_ignored_mask |= vfsmnt_mark->ignored_mask;
+ 	}
+ 
+ 	if (d_is_dir(path->dentry) &&
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index d293034ae2cb..e73f0070a0fc 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -274,6 +274,16 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
+ 	return vfs_getxattr(realpath.dentry, name, value, size);
+ }
+ 
++static bool ovl_can_list(const char *s)
++{
++	/* List all non-trusted xatts */
++	if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
++		return true;
++
++	/* Never list trusted.overlay, list other trusted for superuser only */
++	return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
++}
++
+ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
+ {
+ 	struct path realpath;
+@@ -298,7 +308,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
+ 			return -EIO;
+ 
+ 		len -= slen;
+-		if (ovl_is_private_xattr(s)) {
++		if (!ovl_can_list(s)) {
+ 			res -= slen;
+ 			memmove(s, s + slen, len);
+ 		} else {
+diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
+index 9d6486d416a3..00985f9db9f7 100644
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -1961,7 +1961,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
+ 	 * will be requeued because superblock is being shutdown and doesn't
+ 	 * have MS_ACTIVE set.
+ 	 */
+-	cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
++	reiserfs_cancel_old_flush(sb);
+ 	/* wait for all commits to finish */
+ 	cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
+ 
+@@ -2643,7 +2643,7 @@ static int journal_init_dev(struct super_block *super,
+ 	if (IS_ERR(journal->j_dev_bd)) {
+ 		result = PTR_ERR(journal->j_dev_bd);
+ 		journal->j_dev_bd = NULL;
+-		reiserfs_warning(super,
++		reiserfs_warning(super, "sh-457",
+ 				 "journal_init_dev: Cannot open '%s': %i",
+ 				 jdev_name, result);
+ 		return result;
+diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
+index 5dcf3ab83886..6ca00471afbf 100644
+--- a/fs/reiserfs/reiserfs.h
++++ b/fs/reiserfs/reiserfs.h
+@@ -2948,6 +2948,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s,
+ 				   struct reiserfs_list_bitmap *, unsigned int);
+ 
+ void reiserfs_schedule_old_flush(struct super_block *s);
++void reiserfs_cancel_old_flush(struct super_block *s);
+ void add_save_link(struct reiserfs_transaction_handle *th,
+ 		   struct inode *inode, int truncate);
+ int remove_save_link(struct inode *inode, int truncate);
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index cf6fa25f884b..45ec0e91010a 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -89,7 +89,9 @@ static void flush_old_commits(struct work_struct *work)
+ 	s = sbi->s_journal->j_work_sb;
+ 
+ 	spin_lock(&sbi->old_work_lock);
+-	sbi->work_queued = 0;
++	/* Avoid clobbering the cancel state... */
++	if (sbi->work_queued == 1)
++		sbi->work_queued = 0;
+ 	spin_unlock(&sbi->old_work_lock);
+ 
+ 	reiserfs_sync_fs(s, 1);
+@@ -116,21 +118,22 @@ void reiserfs_schedule_old_flush(struct super_block *s)
+ 	spin_unlock(&sbi->old_work_lock);
+ }
+ 
+-static void cancel_old_flush(struct super_block *s)
++void reiserfs_cancel_old_flush(struct super_block *s)
+ {
+ 	struct reiserfs_sb_info *sbi = REISERFS_SB(s);
+ 
+-	cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
+ 	spin_lock(&sbi->old_work_lock);
+-	sbi->work_queued = 0;
++	/* Make sure no new flushes will be queued */
++	sbi->work_queued = 2;
+ 	spin_unlock(&sbi->old_work_lock);
++	cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
+ }
+ 
+ static int reiserfs_freeze(struct super_block *s)
+ {
+ 	struct reiserfs_transaction_handle th;
+ 
+-	cancel_old_flush(s);
++	reiserfs_cancel_old_flush(s);
+ 
+ 	reiserfs_write_lock(s);
+ 	if (!(s->s_flags & MS_RDONLY)) {
+@@ -151,7 +154,13 @@ static int reiserfs_freeze(struct super_block *s)
+ 
+ static int reiserfs_unfreeze(struct super_block *s)
+ {
++	struct reiserfs_sb_info *sbi = REISERFS_SB(s);
++
+ 	reiserfs_allow_writes(s);
++	spin_lock(&sbi->old_work_lock);
++	/* Allow old_work to run again */
++	sbi->work_queued = 0;
++	spin_unlock(&sbi->old_work_lock);
+ 	return 0;
+ }
+ 
+@@ -2177,7 +2186,7 @@ error_unlocked:
+ 	if (sbi->commit_wq)
+ 		destroy_workqueue(sbi->commit_wq);
+ 
+-	cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
++	reiserfs_cancel_old_flush(s);
+ 
+ 	reiserfs_free_bitmap_cache(s);
+ 	if (SB_BUFFER_WITH_SB(s))
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 75e6f04bb795..48ab0c462f21 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -1724,8 +1724,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
+ 
+ 	dbg_save_space_info(c);
+ 
+-	for (i = 0; i < c->jhead_cnt; i++)
+-		ubifs_wbuf_sync(&c->jheads[i].wbuf);
++	for (i = 0; i < c->jhead_cnt; i++) {
++		err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
++		if (err)
++			ubifs_ro_mode(c, err);
++	}
+ 
+ 	c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
+ 	c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
+@@ -1791,8 +1794,11 @@ static void ubifs_put_super(struct super_block *sb)
+ 			int err;
+ 
+ 			/* Synchronize write-buffers */
+-			for (i = 0; i < c->jhead_cnt; i++)
+-				ubifs_wbuf_sync(&c->jheads[i].wbuf);
++			for (i = 0; i < c->jhead_cnt; i++) {
++				err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
++				if (err)
++					ubifs_ro_mode(c, err);
++			}
+ 
+ 			/*
+ 			 * We are being cleanly unmounted which means the
+diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
+index d1e49d52b640..de179993e039 100644
+--- a/include/linux/compiler-clang.h
++++ b/include/linux/compiler-clang.h
+@@ -10,3 +10,8 @@
+ #undef uninitialized_var
+ #define uninitialized_var(x) x = *(&(x))
+ #endif
++
++/* same as gcc, this was present in clang-2.6 so we can assume it works
++ * with any version that can compile the kernel
++ */
++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index a91b3b75da0f..bb3a4bb35183 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -661,6 +661,11 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
+ void free_cpumask_var(cpumask_var_t mask);
+ void free_bootmem_cpumask_var(cpumask_var_t mask);
+ 
++static inline bool cpumask_available(cpumask_var_t mask)
++{
++	return mask != NULL;
++}
++
+ #else
+ typedef struct cpumask cpumask_var_t[1];
+ 
+@@ -701,6 +706,11 @@ static inline void free_cpumask_var(cpumask_var_t mask)
+ static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
+ {
+ }
++
++static inline bool cpumask_available(cpumask_var_t mask)
++{
++	return true;
++}
+ #endif /* CONFIG_CPUMASK_OFFSTACK */
+ 
+ /* It's common to want to use cpu_all_mask in struct member initializers,
+diff --git a/include/linux/hid.h b/include/linux/hid.h
+index 176b43670e5d..123852d873fa 100644
+--- a/include/linux/hid.h
++++ b/include/linux/hid.h
+@@ -793,7 +793,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
+ extern void hidinput_disconnect(struct hid_device *);
+ 
+ int hid_set_field(struct hid_field *, unsigned, __s32);
+-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
++int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
+ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
+ struct hid_field *hidinput_get_led_field(struct hid_device *hid);
+ unsigned int hidinput_count_leds(struct hid_device *hid);
+@@ -1096,13 +1096,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
+  *
+  * @report: the report we want to know the length
+  */
+-static inline int hid_report_len(struct hid_report *report)
++static inline u32 hid_report_len(struct hid_report *report)
+ {
+ 	/* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
+ 	return ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ }
+ 
+-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
+ 		int interrupt);
+ 
+ /* HID quirks API */
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 99de81a5a4c6..adbf5b313ff8 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -585,7 +585,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
+  * Returns true if the skb is tagged with multiple vlan headers, regardless
+  * of whether it is hardware accelerated or not.
+  */
+-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
++static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
+ {
+ 	__be16 protocol = skb->protocol;
+ 
+@@ -596,6 +596,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+ 			   protocol != htons(ETH_P_8021AD)))
+ 			return false;
+ 
++		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
++			return false;
++
+ 		veh = (struct vlan_ethhdr *)skb->data;
+ 		protocol = veh->h_vlan_encapsulated_proto;
+ 	}
+@@ -613,7 +616,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+  *
+  * Returns features without unsafe ones if the skb has multiple tags.
+  */
+-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
++static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
+ 						    netdev_features_t features)
+ {
+ 	if (skb_vlan_tagged_multi(skb)) {
+diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
+index c367cbdf73ab..443dd702537f 100644
+--- a/include/linux/jiffies.h
++++ b/include/linux/jiffies.h
+@@ -1,6 +1,7 @@
+ #ifndef _LINUX_JIFFIES_H
+ #define _LINUX_JIFFIES_H
+ 
++#include <linux/cache.h>
+ #include <linux/math64.h>
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+@@ -62,19 +63,17 @@ extern int register_refined_jiffies(long clock_tick_rate);
+ /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
+ #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
+ 
+-/* some arch's have a small-data section that can be accessed register-relative
+- * but that can only take up to, say, 4-byte variables. jiffies being part of
+- * an 8-byte variable may not be correctly accessed unless we force the issue
+- */
+-#define __jiffy_data  __attribute__((section(".data")))
++#ifndef __jiffy_arch_data
++#define __jiffy_arch_data
++#endif
+ 
+ /*
+  * The 64-bit value is not atomic - you MUST NOT read it
+  * without sampling the sequence number in jiffies_lock.
+  * get_jiffies_64() will do this for you as appropriate.
+  */
+-extern u64 __jiffy_data jiffies_64;
+-extern unsigned long volatile __jiffy_data jiffies;
++extern u64 __cacheline_aligned_in_smp jiffies_64;
++extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
+ 
+ #if (BITS_PER_LONG < 64)
+ u64 get_jiffies_64(void);
+diff --git a/include/linux/llist.h b/include/linux/llist.h
+index fbf10a0bc095..4d86a9d273b3 100644
+--- a/include/linux/llist.h
++++ b/include/linux/llist.h
+@@ -87,6 +87,23 @@ static inline void init_llist_head(struct llist_head *list)
+ #define llist_entry(ptr, type, member)		\
+ 	container_of(ptr, type, member)
+ 
++/**
++ * member_address_is_nonnull - check whether the member address is not NULL
++ * @ptr:	the object pointer (struct type * that contains the llist_node)
++ * @member:	the name of the llist_node within the struct.
++ *
++ * This macro is conceptually the same as
++ *	&ptr->member != NULL
++ * but it works around the fact that compilers can decide that taking a member
++ * address is never a NULL pointer.
++ *
++ * Real objects that start at a high address and have a member at NULL are
++ * unlikely to exist, but such pointers may be returned e.g. by the
++ * container_of() macro.
++ */
++#define member_address_is_nonnull(ptr, member)	\
++	((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0)
++
+ /**
+  * llist_for_each - iterate over some deleted entries of a lock-less list
+  * @pos:	the &struct llist_node to use as a loop cursor
+@@ -121,7 +138,7 @@ static inline void init_llist_head(struct llist_head *list)
+  */
+ #define llist_for_each_entry(pos, node, member)				\
+ 	for ((pos) = llist_entry((node), typeof(*(pos)), member);	\
+-	     &(pos)->member != NULL;					\
++	     member_address_is_nonnull(pos, member);			\
+ 	     (pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
+ 
+ /**
+@@ -143,7 +160,7 @@ static inline void init_llist_head(struct llist_head *list)
+  */
+ #define llist_for_each_entry_safe(pos, n, node, member)			       \
+ 	for (pos = llist_entry((node), typeof(*pos), member);		       \
+-	     &pos->member != NULL &&					       \
++	     member_address_is_nonnull(pos, member) &&			       \
+ 	        (n = llist_entry(pos->member.next, typeof(*n), member), true); \
+ 	     pos = n)
+ 
+diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
+index 6fed539e5456..066818f0a0ac 100644
+--- a/include/linux/mlx4/qp.h
++++ b/include/linux/mlx4/qp.h
+@@ -450,6 +450,7 @@ struct mlx4_update_qp_params {
+ 	u16	rate_val;
+ };
+ 
++struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
+ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
+ 		   enum mlx4_update_qp_attr attr,
+ 		   struct mlx4_update_qp_params *params);
+diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
+index b63fa457febd..3529683f691e 100644
+--- a/include/linux/mtd/flashchip.h
++++ b/include/linux/mtd/flashchip.h
+@@ -85,6 +85,7 @@ struct flchip {
+ 	unsigned int write_suspended:1;
+ 	unsigned int erase_suspended:1;
+ 	unsigned long in_progress_block_addr;
++	unsigned long in_progress_block_mask;
+ 
+ 	struct mutex mutex;
+ 	wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index cc615e273f80..b95f00cb6219 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -247,6 +247,8 @@ unsigned int *xt_alloc_entry_offsets(unsigned int size);
+ bool xt_find_jump_offset(const unsigned int *offsets,
+ 			 unsigned int target, unsigned int size);
+ 
++int xt_check_proc_name(const char *name, unsigned int size);
++
+ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+ 		   bool inv_proto);
+ int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 30a8f531236c..2629fc3e24e0 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -146,7 +146,7 @@ static inline int page_cache_get_speculative(struct page *page)
+ 
+ #ifdef CONFIG_TINY_RCU
+ # ifdef CONFIG_PREEMPT_COUNT
+-	VM_BUG_ON(!in_atomic());
++	VM_BUG_ON(!in_atomic() && !irqs_disabled());
+ # endif
+ 	/*
+ 	 * Preempt must be disabled here - we rely on rcu_read_lock doing
+@@ -184,7 +184,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
+ 
+ #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
+ # ifdef CONFIG_PREEMPT_COUNT
+-	VM_BUG_ON(!in_atomic());
++	VM_BUG_ON(!in_atomic() && !irqs_disabled());
+ # endif
+ 	VM_BUG_ON_PAGE(page_count(page) == 0, page);
+ 	atomic_add(count, &page->_count);
+diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h
+index 1419133fa69e..4ac1a070af0a 100644
+--- a/include/linux/platform_data/isl9305.h
++++ b/include/linux/platform_data/isl9305.h
+@@ -24,7 +24,7 @@
+ struct regulator_init_data;
+ 
+ struct isl9305_pdata {
+-	struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR];
++	struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1];
+ };
+ 
+ #endif
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 495ad8fbe240..30fb6495315b 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -847,10 +847,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+ 				     unsigned int headroom);
+ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
+ 				int newtailroom, gfp_t priority);
+-int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+-			int offset, int len);
+-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
+-		 int len);
++int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
++				     int offset, int len);
++int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
++			      int offset, int len);
+ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
+ int skb_pad(struct sk_buff *skb, int pad);
+ #define dev_kfree_skb(a)	consume_skb(a)
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 52baf4089bd2..0d56f919bda3 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -343,6 +343,7 @@ struct tty_file_private {
+ #define TTY_PTY_LOCK 		16	/* pty private */
+ #define TTY_NO_WRITE_SPLIT 	17	/* Preserve write boundaries to driver */
+ #define TTY_HUPPED 		18	/* Post driver->hangup() */
++#define TTY_HUPPING		19	/* Hangup in progress */
+ #define TTY_LDISC_HALTED	22	/* Line discipline is halted */
+ 
+ #define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
+@@ -581,7 +582,7 @@ extern int tty_unregister_ldisc(int disc);
+ extern int tty_set_ldisc(struct tty_struct *tty, int ldisc);
+ extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
+ extern void tty_ldisc_release(struct tty_struct *tty);
+-extern void tty_ldisc_init(struct tty_struct *tty);
++extern int __must_check tty_ldisc_init(struct tty_struct *tty);
+ extern void tty_ldisc_deinit(struct tty_struct *tty);
+ extern void tty_ldisc_begin(void);
+ 
+diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
+index 4f3dfb7d0654..96a8870e38fe 100644
+--- a/include/linux/usb/gadget.h
++++ b/include/linux/usb/gadget.h
+@@ -585,9 +585,21 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
+ 	list_for_each_entry(tmp, &(gadget)->ep_list, ep_list)
+ 
+ 
++/**
++ * usb_ep_align - returns @len aligned to ep's maxpacketsize.
++ * @ep: the endpoint whose maxpacketsize is used to align @len
++ * @len: buffer size's length to align to @ep's maxpacketsize
++ *
++ * This helper is used to align buffer's size to an ep's maxpacketsize.
++ */
++static inline size_t usb_ep_align(struct usb_ep *ep, size_t len)
++{
++	return round_up(len, (size_t)le16_to_cpu(ep->desc->wMaxPacketSize));
++}
++
+ /**
+  * usb_ep_align_maybe - returns @len aligned to ep's maxpacketsize if gadget
+- *	requires quirk_ep_out_aligned_size, otherwise reguens len.
++ *	requires quirk_ep_out_aligned_size, otherwise returns len.
+  * @g: controller to check for quirk
+  * @ep: the endpoint whose maxpacketsize is used to align @len
+  * @len: buffer size's length to align to @ep's maxpacketsize
+@@ -598,8 +610,7 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev)
+ static inline size_t
+ usb_ep_align_maybe(struct usb_gadget *g, struct usb_ep *ep, size_t len)
+ {
+-	return !g->quirk_ep_out_aligned_size ? len :
+-			round_up(len, (size_t)ep->desc->wMaxPacketSize);
++	return g->quirk_ep_out_aligned_size ? usb_ep_align(ep, len) : len;
+ }
+ 
+ /**
+diff --git a/include/linux/virtio.h b/include/linux/virtio.h
+index 8f4d4bfa6d46..d7844d215381 100644
+--- a/include/linux/virtio.h
++++ b/include/linux/virtio.h
+@@ -124,6 +124,9 @@ int virtio_device_freeze(struct virtio_device *dev);
+ int virtio_device_restore(struct virtio_device *dev);
+ #endif
+ 
++#define virtio_device_for_each_vq(vdev, vq) \
++	list_for_each_entry(vq, &vdev->vqs, list)
++
+ /**
+  * virtio_driver - operations for a virtio I/O driver
+  * @driver: underlying device driver (populate name and owner).
+diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
+index f8d6813cd5b2..0cd8002cdddd 100644
+--- a/include/net/cfg80211.h
++++ b/include/net/cfg80211.h
+@@ -929,9 +929,9 @@ enum rate_info_flags {
+  * @RATE_INFO_BW_160: 160 MHz bandwidth
+  */
+ enum rate_info_bw {
++	RATE_INFO_BW_20 = 0,
+ 	RATE_INFO_BW_5,
+ 	RATE_INFO_BW_10,
+-	RATE_INFO_BW_20,
+ 	RATE_INFO_BW_40,
+ 	RATE_INFO_BW_80,
+ 	RATE_INFO_BW_160,
+diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
+index 7682cb2ae237..378238f50208 100644
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -55,6 +55,7 @@ struct inet_timewait_sock {
+ #define tw_family		__tw_common.skc_family
+ #define tw_state		__tw_common.skc_state
+ #define tw_reuse		__tw_common.skc_reuse
++#define tw_reuseport		__tw_common.skc_reuseport
+ #define tw_ipv6only		__tw_common.skc_ipv6only
+ #define tw_bound_dev_if		__tw_common.skc_bound_dev_if
+ #define tw_node			__tw_common.skc_nulls_node
+diff --git a/include/net/nexthop.h b/include/net/nexthop.h
+index 3334dbfa5aa4..7fc78663ec9d 100644
+--- a/include/net/nexthop.h
++++ b/include/net/nexthop.h
+@@ -6,7 +6,7 @@
+ 
+ static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
+ {
+-	return remaining >= sizeof(*rtnh) &&
++	return remaining >= (int)sizeof(*rtnh) &&
+ 	       rtnh->rtnh_len >= sizeof(*rtnh) &&
+ 	       rtnh->rtnh_len <= remaining;
+ }
+diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
+index 8716d5942b65..8fcf8908a694 100644
+--- a/include/net/slhc_vj.h
++++ b/include/net/slhc_vj.h
+@@ -127,6 +127,7 @@ typedef __u32 int32;
+  */
+ struct cstate {
+ 	byte_t	cs_this;	/* connection id number (xmit) */
++	bool	initialized;	/* true if initialized */
+ 	struct cstate *next;	/* next in ring (xmit) */
+ 	struct iphdr cs_ip;	/* ip/tcp hdr from most recent packet */
+ 	struct tcphdr cs_tcp;
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 966d229d4482..dc542603d0f1 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1123,9 +1123,11 @@ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
+ 
+ static inline int tcp_win_from_space(int space)
+ {
+-	return sysctl_tcp_adv_win_scale<=0 ?
+-		(space>>(-sysctl_tcp_adv_win_scale)) :
+-		space - (space>>sysctl_tcp_adv_win_scale);
++	int tcp_adv_win_scale = sysctl_tcp_adv_win_scale;
++
++	return tcp_adv_win_scale <= 0 ?
++		(space>>(-tcp_adv_win_scale)) :
++		space - (space>>tcp_adv_win_scale);
+ }
+ 
+ /* Note: caller must be prepared to deal with negative returns */
+diff --git a/include/net/x25.h b/include/net/x25.h
+index c383aa4edbf0..6d30a01d281d 100644
+--- a/include/net/x25.h
++++ b/include/net/x25.h
+@@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *);
+ 
+ /* sysctl_net_x25.c */
+ #ifdef CONFIG_SYSCTL
+-void x25_register_sysctl(void);
++int x25_register_sysctl(void);
+ void x25_unregister_sysctl(void);
+ #else
+-static inline void x25_register_sysctl(void) {};
++static inline int x25_register_sysctl(void) { return 0; };
+ static inline void x25_unregister_sysctl(void) {};
+ #endif /* CONFIG_SYSCTL */
+ 
+diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
+index e6796dc8c764..561b0ca8cb19 100644
+--- a/include/rdma/ib_addr.h
++++ b/include/rdma/ib_addr.h
+@@ -109,6 +109,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
+ 	      const unsigned char *dst_dev_addr);
+ 
+ int rdma_addr_size(struct sockaddr *addr);
++int rdma_addr_size_in6(struct sockaddr_in6 *addr);
++int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
+ 
+ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id);
+ int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *smac,
+diff --git a/include/sound/control.h b/include/sound/control.h
+index 95aad6d3fd1a..8e752793b94a 100644
+--- a/include/sound/control.h
++++ b/include/sound/control.h
+@@ -22,6 +22,7 @@
+  *
+  */
+ 
++#include <linux/nospec.h>
+ #include <sound/asound.h>
+ 
+ #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
+@@ -147,12 +148,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
+ 
+ static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
+ {
+-	return id->numid - kctl->id.numid;
++	unsigned int ioff = id->numid - kctl->id.numid;
++	return array_index_nospec(ioff, kctl->count);
+ }
+ 
+ static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
+ {
+-	return id->index - kctl->id.index;
++	unsigned int ioff = id->index - kctl->id.index;
++	return array_index_nospec(ioff, kctl->count);
+ }
+ 
+ static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
+diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
+index 760c969d885d..12bbf8c81112 100644
+--- a/include/sound/pcm_oss.h
++++ b/include/sound/pcm_oss.h
+@@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
+ 	char *buffer;				/* vmallocated period */
+ 	size_t buffer_used;			/* used length from period buffer */
+ 	struct mutex params_lock;
++	atomic_t rw_ref;		/* concurrent read/write accesses */
+ #ifdef CONFIG_SND_PCM_OSS_PLUGINS
+ 	struct snd_pcm_plugin *plugin_first;
+ 	struct snd_pcm_plugin *plugin_last;
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index 413417f3707b..7aa8cbc23b28 100644
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -106,7 +106,7 @@
+ #define PCI_SUBSYSTEM_ID	0x2e
+ #define PCI_ROM_ADDRESS		0x30	/* Bits 31..11 are address, 10..1 reserved */
+ #define  PCI_ROM_ADDRESS_ENABLE	0x01
+-#define PCI_ROM_ADDRESS_MASK	(~0x7ffUL)
++#define PCI_ROM_ADDRESS_MASK	(~0x7ffU)
+ 
+ #define PCI_CAPABILITY_LIST	0x34	/* Offset of first capability list entry */
+ 
+diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
+index d2314be4f0c0..19f9dc2c06f6 100644
+--- a/include/uapi/linux/usb/audio.h
++++ b/include/uapi/linux/usb/audio.h
+@@ -369,7 +369,7 @@ static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_d
+ {
+ 	return (protocol == UAC_VERSION_1) ?
+ 		desc->baSourceID[desc->bNrInPins + 4] :
+-		desc->baSourceID[desc->bNrInPins + 6];
++		2; /* in UAC2, this value is constant */
+ }
+ 
+ static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
+@@ -377,7 +377,7 @@ static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_de
+ {
+ 	return (protocol == UAC_VERSION_1) ?
+ 		&desc->baSourceID[desc->bNrInPins + 5] :
+-		&desc->baSourceID[desc->bNrInPins + 7];
++		&desc->baSourceID[desc->bNrInPins + 6];
+ }
+ 
+ static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
+diff --git a/ipc/shm.c b/ipc/shm.c
+index c2384d0e4fa6..aa3090ddc9eb 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -198,6 +198,12 @@ static int __shm_open(struct vm_area_struct *vma)
+ 	if (IS_ERR(shp))
+ 		return PTR_ERR(shp);
+ 
++	if (shp->shm_file != sfd->file) {
++		/* ID was reused */
++		shm_unlock(shp);
++		return -EINVAL;
++	}
++
+ 	shp->shm_atim = get_seconds();
+ 	shp->shm_lprid = task_tgid_vnr(current);
+ 	shp->shm_nattch++;
+@@ -414,8 +420,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
+ 	int ret;
+ 
+ 	/*
+-	 * In case of remap_file_pages() emulation, the file can represent
+-	 * removed IPC ID: propogate shm_lock() error to caller.
++	 * In case of remap_file_pages() emulation, the file can represent an
++	 * IPC ID that was removed, and possibly even reused by another shm
++	 * segment already.  Propagate this case as an error to caller.
+ 	 */
+ 	ret =__shm_open(vma);
+ 	if (ret)
+@@ -439,6 +446,7 @@ static int shm_release(struct inode *ino, struct file *file)
+ 	struct shm_file_data *sfd = shm_file_data(file);
+ 
+ 	put_ipc_ns(sfd->ns);
++	fput(sfd->file);
+ 	shm_file_data(file) = NULL;
+ 	kfree(sfd);
+ 	return 0;
+@@ -1198,7 +1206,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
+ 	file->f_mapping = shp->shm_file->f_mapping;
+ 	sfd->id = shp->shm_perm.id;
+ 	sfd->ns = get_ipc_ns(ns);
+-	sfd->file = shp->shm_file;
++	/*
++	 * We need to take a reference to the real shm file to prevent the
++	 * pointer from becoming stale in cases where the lifetime of the outer
++	 * file extends beyond that of the shm segment.  It's not usually
++	 * possible, but it can happen during remap_file_pages() emulation as
++	 * that unmaps the memory, then does ->mmap() via file reference only.
++	 * We'll deny the ->mmap() if the shm segment was since removed, but to
++	 * detect shm ID reuse we need to compare the file pointers.
++	 */
++	sfd->file = get_file(shp->shm_file);
+ 	sfd->vm_ops = NULL;
+ 
+ 	err = security_mmap_file(file, prot, flags);
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 1c1b8ab34037..6c1783bff424 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -75,7 +75,7 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
+ static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+ {
+ 	struct bpf_array *array = container_of(map, struct bpf_array, map);
+-	u32 index = *(u32 *)key;
++	u32 index = key ? *(u32 *)key : U32_MAX;
+ 	u32 *next = (u32 *)next_key;
+ 
+ 	if (index >= array->map.max_entries) {
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 83c209d9b17a..ef7173e82179 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -149,12 +149,15 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+ 	struct hlist_head *head;
+ 	struct htab_elem *l, *next_l;
+ 	u32 hash, key_size;
+-	int i;
++	int i = 0;
+ 
+ 	WARN_ON_ONCE(!rcu_read_lock_held());
+ 
+ 	key_size = map->key_size;
+ 
++	if (!key)
++		goto find_first_elem;
++
+ 	hash = htab_map_hash(key, key_size);
+ 
+ 	head = select_bucket(htab, hash);
+@@ -162,10 +165,8 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
+ 	/* lookup the key */
+ 	l = lookup_elem_raw(head, hash, key, key_size);
+ 
+-	if (!l) {
+-		i = 0;
++	if (!l)
+ 		goto find_first_elem;
+-	}
+ 
+ 	/* key was found, get next key in the same bucket */
+ 	next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 3bae6c591914..0fcb43cb2006 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -311,14 +311,18 @@ static int map_get_next_key(union bpf_attr *attr)
+ 	if (IS_ERR(map))
+ 		return PTR_ERR(map);
+ 
+-	err = -ENOMEM;
+-	key = kmalloc(map->key_size, GFP_USER);
+-	if (!key)
+-		goto err_put;
+-
+-	err = -EFAULT;
+-	if (copy_from_user(key, ukey, map->key_size) != 0)
+-		goto free_key;
++	if (ukey) {
++		err = -ENOMEM;
++		key = kmalloc(map->key_size, GFP_USER);
++		if (!key)
++			goto err_put;
++
++		err = -EFAULT;
++		if (copy_from_user(key, ukey, map->key_size) != 0)
++			goto free_key;
++	} else {
++		key = NULL;
++	}
+ 
+ 	err = -ENOMEM;
+ 	next_key = kmalloc(map->key_size, GFP_USER);
+diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
+index d659487254d5..d37acf86037a 100644
+--- a/kernel/events/callchain.c
++++ b/kernel/events/callchain.c
+@@ -107,14 +107,8 @@ int get_callchain_buffers(void)
+ 		goto exit;
+ 	}
+ 
+-	if (count > 1) {
+-		/* If the allocation failed, give up */
+-		if (!callchain_cpus_entries)
+-			err = -ENOMEM;
+-		goto exit;
+-	}
+-
+-	err = alloc_callchain_buffers();
++	if (count == 1)
++		err = alloc_callchain_buffers();
+ exit:
+ 	if (err)
+ 		atomic_dec(&nr_callchain_events);
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e5553bdaf6c2..c6e653201737 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5107,9 +5107,6 @@ static void perf_output_read_one(struct perf_output_handle *handle,
+ 	__output_copy(handle, values, n * sizeof(u64));
+ }
+ 
+-/*
+- * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
+- */
+ static void perf_output_read_group(struct perf_output_handle *handle,
+ 			    struct perf_event *event,
+ 			    u64 enabled, u64 running)
+@@ -5154,6 +5151,13 @@ static void perf_output_read_group(struct perf_output_handle *handle,
+ #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
+ 				 PERF_FORMAT_TOTAL_TIME_RUNNING)
+ 
++/*
++ * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
++ *
++ * The problem is that its both hard and excessively expensive to iterate the
++ * child list, not to mention that its impossible to IPI the children running
++ * on another CPU, from interrupt/NMI context.
++ */
+ static void perf_output_read(struct perf_output_handle *handle,
+ 			     struct perf_event *event)
+ {
+@@ -7630,9 +7634,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 	local64_set(&hwc->period_left, hwc->sample_period);
+ 
+ 	/*
+-	 * we currently do not support PERF_FORMAT_GROUP on inherited events
++	 * We currently do not support PERF_SAMPLE_READ on inherited events.
++	 * See perf_output_read().
+ 	 */
+-	if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
++	if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
+ 		goto err_ns;
+ 
+ 	if (!has_branch_stack(event))
+@@ -7800,9 +7805,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
+ 		 * __u16 sample size limit.
+ 		 */
+ 		if (attr->sample_stack_user >= USHRT_MAX)
+-			ret = -EINVAL;
++			return -EINVAL;
+ 		else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
+-			ret = -EINVAL;
++			return -EINVAL;
+ 	}
+ 
+ 	if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
+diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
+index 92ce5f4ccc26..a27245fdcd81 100644
+--- a/kernel/events/hw_breakpoint.c
++++ b/kernel/events/hw_breakpoint.c
+@@ -427,16 +427,9 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
+  * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
+  * @bp: the breakpoint structure to modify
+  * @attr: new breakpoint attributes
+- * @triggered: callback to trigger when we hit the breakpoint
+- * @tsk: pointer to 'task_struct' of the process to which the address belongs
+  */
+ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
+ {
+-	u64 old_addr = bp->attr.bp_addr;
+-	u64 old_len = bp->attr.bp_len;
+-	int old_type = bp->attr.bp_type;
+-	int err = 0;
+-
+ 	/*
+ 	 * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
+ 	 * will not be possible to raise IPIs that invoke __perf_event_disable.
+@@ -451,27 +444,18 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
+ 	bp->attr.bp_addr = attr->bp_addr;
+ 	bp->attr.bp_type = attr->bp_type;
+ 	bp->attr.bp_len = attr->bp_len;
++	bp->attr.disabled = 1;
+ 
+-	if (attr->disabled)
+-		goto end;
+-
+-	err = validate_hw_breakpoint(bp);
+-	if (!err)
+-		perf_event_enable(bp);
++	if (!attr->disabled) {
++		int err = validate_hw_breakpoint(bp);
+ 
+-	if (err) {
+-		bp->attr.bp_addr = old_addr;
+-		bp->attr.bp_type = old_type;
+-		bp->attr.bp_len = old_len;
+-		if (!bp->attr.disabled)
+-			perf_event_enable(bp);
++		if (err)
++			return err;
+ 
+-		return err;
++		perf_event_enable(bp);
++		bp->attr.disabled = 0;
+ 	}
+ 
+-end:
+-	bp->attr.disabled = attr->disabled;
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 4195616b27d9..8944e397cd47 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -400,6 +400,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
+ 	unsigned long address = (unsigned long)uaddr;
+ 	struct mm_struct *mm = current->mm;
+ 	struct page *page, *page_head;
++	struct address_space *mapping;
+ 	int err, ro = 0;
+ 
+ 	/*
+@@ -478,7 +479,19 @@ again:
+ 	}
+ #endif
+ 
+-	lock_page(page_head);
++	/*
++	 * The treatment of mapping from this point on is critical. The page
++	 * lock protects many things but in this context the page lock
++	 * stabilizes mapping, prevents inode freeing in the shared
++	 * file-backed region case and guards against movement to swap cache.
++	 *
++	 * Strictly speaking the page lock is not needed in all cases being
++	 * considered here and page lock forces unnecessarily serialization
++	 * From this point on, mapping will be re-verified if necessary and
++	 * page lock will be acquired only if it is unavoidable
++	 */
++
++	mapping = READ_ONCE(page_head->mapping);
+ 
+ 	/*
+ 	 * If page_head->mapping is NULL, then it cannot be a PageAnon
+@@ -495,18 +508,31 @@ again:
+ 	 * shmem_writepage move it from filecache to swapcache beneath us:
+ 	 * an unlikely race, but we do need to retry for page_head->mapping.
+ 	 */
+-	if (!page_head->mapping) {
+-		int shmem_swizzled = PageSwapCache(page_head);
++	if (unlikely(!mapping)) {
++		int shmem_swizzled;
++
++		/*
++		 * Page lock is required to identify which special case above
++		 * applies. If this is really a shmem page then the page lock
++		 * will prevent unexpected transitions.
++		 */
++		lock_page(page);
++		shmem_swizzled = PageSwapCache(page) || page->mapping;
+ 		unlock_page(page_head);
+ 		put_page(page_head);
++
+ 		if (shmem_swizzled)
+ 			goto again;
++
+ 		return -EFAULT;
+ 	}
+ 
+ 	/*
+ 	 * Private mappings are handled in a simple way.
+ 	 *
++	 * If the futex key is stored on an anonymous page, then the associated
++	 * object is the mm which is implicitly pinned by the calling process.
++	 *
+ 	 * NOTE: When userspace waits on a MAP_SHARED mapping, even if
+ 	 * it's a read-only handle, it's expected that futexes attach to
+ 	 * the object not the particular process.
+@@ -524,16 +550,74 @@ again:
+ 		key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
+ 		key->private.mm = mm;
+ 		key->private.address = address;
++
++		get_futex_key_refs(key); /* implies smp_mb(); (B) */
++
+ 	} else {
++		struct inode *inode;
++
++		/*
++		 * The associated futex object in this case is the inode and
++		 * the page->mapping must be traversed. Ordinarily this should
++		 * be stabilised under page lock but it's not strictly
++		 * necessary in this case as we just want to pin the inode, not
++		 * update the radix tree or anything like that.
++		 *
++		 * The RCU read lock is taken as the inode is finally freed
++		 * under RCU. If the mapping still matches expectations then the
++		 * mapping->host can be safely accessed as being a valid inode.
++		 */
++		rcu_read_lock();
++
++		if (READ_ONCE(page_head->mapping) != mapping) {
++			rcu_read_unlock();
++			put_page(page_head);
++
++			goto again;
++		}
++
++		inode = READ_ONCE(mapping->host);
++		if (!inode) {
++			rcu_read_unlock();
++			put_page(page_head);
++
++			goto again;
++		}
++
++		/*
++		 * Take a reference unless it is about to be freed. Previously
++		 * this reference was taken by ihold under the page lock
++		 * pinning the inode in place so i_lock was unnecessary. The
++		 * only way for this check to fail is if the inode was
++		 * truncated in parallel so warn for now if this happens.
++		 *
++		 * We are not calling into get_futex_key_refs() in file-backed
++		 * cases, therefore a successful atomic_inc return below will
++		 * guarantee that get_futex_key() will still imply smp_mb(); (B).
++		 */
++		if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
++			rcu_read_unlock();
++			put_page(page_head);
++
++			goto again;
++		}
++
++		/* Should be impossible but lets be paranoid for now */
++		if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
++			err = -EFAULT;
++			rcu_read_unlock();
++			iput(inode);
++
++			goto out;
++		}
++
+ 		key->both.offset |= FUT_OFF_INODE; /* inode-based key */
+-		key->shared.inode = page_head->mapping->host;
++		key->shared.inode = inode;
+ 		key->shared.pgoff = basepage_index(page);
++		rcu_read_unlock();
+ 	}
+ 
+-	get_futex_key_refs(key); /* implies MB (B) */
+-
+ out:
+-	unlock_page(page_head);
+ 	put_page(page_head);
+ 	return err;
+ }
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index f4b1f0a1dba5..76c9d6f62458 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -1095,8 +1095,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
+ 		 * set the trigger type must match. Also all must
+ 		 * agree on ONESHOT.
+ 		 */
++		unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
++
+ 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
+-		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
++		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
+ 		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
+ 			goto mismatch;
+ 
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 7dd73a3059b8..04486d8e5809 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -125,7 +125,7 @@ static void *alloc_insn_page(void)
+ 	return module_alloc(PAGE_SIZE);
+ }
+ 
+-static void free_insn_page(void *page)
++void __weak free_insn_page(void *page)
+ {
+ 	module_memfree(page);
+ }
+diff --git a/kernel/pid.c b/kernel/pid.c
+index 4fd07d5b7baf..365281244acc 100644
+--- a/kernel/pid.c
++++ b/kernel/pid.c
+@@ -322,8 +322,10 @@ struct pid *alloc_pid(struct pid_namespace *ns)
+ 	}
+ 
+ 	if (unlikely(is_child_reaper(pid))) {
+-		if (pid_ns_prepare_proc(ns))
++		if (pid_ns_prepare_proc(ns)) {
++			disable_pid_allocation(ns);
+ 			goto out_free;
++		}
+ 	}
+ 
+ 	get_pid_ns(ns);
+diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
+index 276762f3a460..e035876af5e2 100644
+--- a/kernel/printk/braille.c
++++ b/kernel/printk/braille.c
+@@ -2,12 +2,13 @@
+ 
+ #include <linux/kernel.h>
+ #include <linux/console.h>
++#include <linux/errno.h>
+ #include <linux/string.h>
+ 
+ #include "console_cmdline.h"
+ #include "braille.h"
+ 
+-char *_braille_console_setup(char **str, char **brl_options)
++int _braille_console_setup(char **str, char **brl_options)
+ {
+ 	if (!memcmp(*str, "brl,", 4)) {
+ 		*brl_options = "";
+@@ -15,14 +16,14 @@ char *_braille_console_setup(char **str, char **brl_options)
+ 	} else if (!memcmp(str, "brl=", 4)) {
+ 		*brl_options = *str + 4;
+ 		*str = strchr(*brl_options, ',');
+-		if (!*str)
++		if (!*str) {
+ 			pr_err("need port name after brl=\n");
+-		else
+-			*((*str)++) = 0;
+-	} else
+-		return NULL;
++			return -EINVAL;
++		}
++		*((*str)++) = 0;
++	}
+ 
+-	return *str;
++	return 0;
+ }
+ 
+ int
+diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h
+index 769d771145c8..749a6756843a 100644
+--- a/kernel/printk/braille.h
++++ b/kernel/printk/braille.h
+@@ -9,7 +9,14 @@ braille_set_options(struct console_cmdline *c, char *brl_options)
+ 	c->brl_options = brl_options;
+ }
+ 
+-char *
++/*
++ * Setup console according to braille options.
++ * Return -EINVAL on syntax error, 0 on success (or no braille option was
++ * actually given).
++ * Modifies str to point to the serial options
++ * Sets brl_options to the parsed braille options.
++ */
++int
+ _braille_console_setup(char **str, char **brl_options);
+ 
+ int
+@@ -25,10 +32,10 @@ braille_set_options(struct console_cmdline *c, char *brl_options)
+ {
+ }
+ 
+-static inline char *
++static inline int
+ _braille_console_setup(char **str, char **brl_options)
+ {
+-	return NULL;
++	return 0;
+ }
+ 
+ static inline int
+diff --git a/kernel/resource.c b/kernel/resource.c
+index cbf725c24c3b..39ee5aeaf1e3 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -600,7 +600,8 @@ static int __find_resource(struct resource *root, struct resource *old,
+ 			alloc.start = constraint->alignf(constraint->alignf_data, &avail,
+ 					size, constraint->align);
+ 			alloc.end = alloc.start + size - 1;
+-			if (resource_contains(&avail, &alloc)) {
++			if (alloc.start <= alloc.end &&
++			    resource_contains(&avail, &alloc)) {
+ 				new->start = alloc.start;
+ 				new->end = alloc.end;
+ 				return 0;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 9c905bd94ff0..5e7608c5b9ec 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -578,7 +578,8 @@ void resched_cpu(int cpu)
+ 	unsigned long flags;
+ 
+ 	raw_spin_lock_irqsave(&rq->lock, flags);
+-	resched_curr(rq);
++	if (cpu_online(cpu) || cpu == smp_processor_id())
++		resched_curr(rq);
+ 	raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+ 
+diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
+index a26036d37a38..382b159d8592 100644
+--- a/kernel/time/sched_clock.c
++++ b/kernel/time/sched_clock.c
+@@ -205,6 +205,11 @@ sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
+ 
+ 	update_clock_read_data(&rd);
+ 
++	if (sched_clock_timer.function != NULL) {
++		/* update timeout for clock wrap */
++		hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
++	}
++
+ 	r = rate;
+ 	if (r >= 4000000) {
+ 		r /= 1000000;
+diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
+index e878c2e0ba45..e81d45097d05 100644
+--- a/kernel/time/timer_list.c
++++ b/kernel/time/timer_list.c
+@@ -16,6 +16,7 @@
+ #include <linux/sched.h>
+ #include <linux/seq_file.h>
+ #include <linux/kallsyms.h>
++#include <linux/nmi.h>
+ 
+ #include <asm/uaccess.h>
+ 
+@@ -91,6 +92,9 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
+ 
+ next_one:
+ 	i = 0;
++
++	touch_nmi_watchdog();
++
+ 	raw_spin_lock_irqsave(&base->cpu_base->lock, flags);
+ 
+ 	curr = timerqueue_getnext(&base->active);
+@@ -202,6 +206,8 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
+ {
+ 	struct clock_event_device *dev = td->evtdev;
+ 
++	touch_nmi_watchdog();
++
+ 	SEQ_printf(m, "Tick Device: mode:     %d\n", td->mode);
+ 	if (cpu < 0)
+ 		SEQ_printf(m, "Broadcast device\n");
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 9a4aee1d3345..2bdb78ab3bd2 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -611,7 +611,7 @@ static int create_trace_kprobe(int argc, char **argv)
+ 	bool is_return = false, is_delete = false;
+ 	char *symbol = NULL, *event = NULL, *group = NULL;
+ 	char *arg;
+-	unsigned long offset = 0;
++	long offset = 0;
+ 	void *addr = NULL;
+ 	char buf[MAX_EVENT_NAME_LEN];
+ 
+@@ -679,7 +679,7 @@ static int create_trace_kprobe(int argc, char **argv)
+ 		symbol = argv[1];
+ 		/* TODO: support .init module functions */
+ 		ret = traceprobe_split_symbol_offset(symbol, &offset);
+-		if (ret) {
++		if (ret || offset < 0 || offset > UINT_MAX) {
+ 			pr_info("Failed to parse either an address or a symbol.\n");
+ 			return ret;
+ 		}
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index 1769a81da8a7..741c00b90fdc 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -293,7 +293,7 @@ static fetch_func_t get_fetch_size_function(const struct fetch_type *type,
+ }
+ 
+ /* Split symbol and offset. */
+-int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
++int traceprobe_split_symbol_offset(char *symbol, long *offset)
+ {
+ 	char *tmp;
+ 	int ret;
+@@ -301,13 +301,11 @@ int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset)
+ 	if (!offset)
+ 		return -EINVAL;
+ 
+-	tmp = strchr(symbol, '+');
++	tmp = strpbrk(symbol, "+-");
+ 	if (tmp) {
+-		/* skip sign because kstrtoul doesn't accept '+' */
+-		ret = kstrtoul(tmp + 1, 0, offset);
++		ret = kstrtol(tmp, 0, offset);
+ 		if (ret)
+ 			return ret;
+-
+ 		*tmp = '\0';
+ 	} else
+ 		*offset = 0;
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index ab283e146b70..80c4ff36896c 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -335,7 +335,7 @@ extern int traceprobe_conflict_field_name(const char *name,
+ extern void traceprobe_update_arg(struct probe_arg *arg);
+ extern void traceprobe_free_probe_arg(struct probe_arg *arg);
+ 
+-extern int traceprobe_split_symbol_offset(char *symbol, unsigned long *offset);
++extern int traceprobe_split_symbol_offset(char *symbol, long *offset);
+ 
+ extern ssize_t traceprobe_probes_write(struct file *file,
+ 		const char __user *buffer, size_t count, loff_t *ppos,
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 6dd022c7b5bc..1b11c3c21a29 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -149,6 +149,8 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
+ 		return;
+ 
+ 	ret = strncpy_from_user(dst, src, maxlen);
++	if (ret == maxlen)
++		dst[--ret] = '\0';
+ 
+ 	if (ret < 0) {	/* Failed to fetch string */
+ 		((u8 *)get_rloc_data(dest))[0] = '\0';
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index 3490407dc7b7..4b12034e15b0 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -185,7 +185,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
+ 			lockdep_is_held(&tracepoints_mutex));
+ 	old = func_add(&tp_funcs, func);
+ 	if (IS_ERR(old)) {
+-		WARN_ON_ONCE(1);
++		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
+ 		return PTR_ERR(old);
+ 	}
+ 
+@@ -218,7 +218,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
+ 			lockdep_is_held(&tracepoints_mutex));
+ 	old = func_remove(&tp_funcs, func);
+ 	if (IS_ERR(old)) {
+-		WARN_ON_ONCE(1);
++		WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
+ 		return PTR_ERR(old);
+ 	}
+ 
+diff --git a/lib/kobject.c b/lib/kobject.c
+index 3b841b97fccd..bb89e879d3a4 100644
+--- a/lib/kobject.c
++++ b/lib/kobject.c
+@@ -234,14 +234,12 @@ static int kobject_add_internal(struct kobject *kobj)
+ 
+ 		/* be noisy on error issues */
+ 		if (error == -EEXIST)
+-			WARN(1, "%s failed for %s with "
+-			     "-EEXIST, don't try to register things with "
+-			     "the same name in the same directory.\n",
+-			     __func__, kobject_name(kobj));
++			pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
++			       __func__, kobject_name(kobj));
+ 		else
+-			WARN(1, "%s failed for %s (error: %d parent: %s)\n",
+-			     __func__, kobject_name(kobj), error,
+-			     parent ? kobject_name(parent) : "'none'");
++			pr_err("%s failed for %s (error: %d parent: %s)\n",
++			       __func__, kobject_name(kobj), error,
++			       parent ? kobject_name(parent) : "'none'");
+ 	} else
+ 		kobj->state_in_sysfs = 1;
+ 
+diff --git a/mm/percpu.c b/mm/percpu.c
+index a40d5e04a3d1..1b95b9fdd616 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -68,6 +68,7 @@
+ #include <linux/vmalloc.h>
+ #include <linux/workqueue.h>
+ #include <linux/kmemleak.h>
++#include <linux/sched.h>
+ 
+ #include <asm/cacheflush.h>
+ #include <asm/sections.h>
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 93010f34c200..00fabb3424df 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -29,6 +29,7 @@
+ #include <linux/net_tstamp.h>
+ #include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
++#include <linux/phy.h>
+ #include <net/arp.h>
+ 
+ #include "vlan.h"
+@@ -559,8 +560,7 @@ static int vlan_dev_init(struct net_device *dev)
+ 			   NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
+ 			   NETIF_F_ALL_FCOE;
+ 
+-	dev->features |= real_dev->vlan_features | NETIF_F_LLTX |
+-			 NETIF_F_GSO_SOFTWARE;
++	dev->features |= dev->hw_features | NETIF_F_LLTX;
+ 	dev->gso_max_size = real_dev->gso_max_size;
+ 	if (dev->features & NETIF_F_VLAN_FEATURES)
+ 		netdev_warn(real_dev, "VLAN features are set incorrectly.  Q-in-Q configurations may not work correctly.\n");
+@@ -655,8 +655,11 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev,
+ {
+ 	const struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ 	const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops;
++	struct phy_device *phydev = vlan->real_dev->phydev;
+ 
+-	if (ops->get_ts_info) {
++	if (phydev && phydev->drv && phydev->drv->ts_info) {
++		 return phydev->drv->ts_info(phydev, info);
++	} else if (ops->get_ts_info) {
+ 		return ops->get_ts_info(vlan->real_dev, info);
+ 	} else {
+ 		info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
+index bd3357e69c5c..1732fe952089 100644
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -1589,10 +1589,22 @@ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
+ 		/* if yes, the client has roamed and we have
+ 		 * to unclaim it.
+ 		 */
+-		batadv_handle_unclaim(bat_priv, primary_if,
+-				      primary_if->net_dev->dev_addr,
+-				      ethhdr->h_source, vid);
+-		goto allow;
++		if (batadv_has_timed_out(claim->lasttime, 100)) {
++			/* only unclaim if the last claim entry is
++			 * older than 100 ms to make sure we really
++			 * have a roaming client here.
++			 */
++			batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Roaming client %pM detected. Unclaim it.\n",
++				   ethhdr->h_source);
++			batadv_handle_unclaim(bat_priv, primary_if,
++					      primary_if->net_dev->dev_addr,
++					      ethhdr->h_source, vid);
++			goto allow;
++		} else {
++			batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_tx(): Race for claim %pM detected. Drop packet.\n",
++				   ethhdr->h_source);
++			goto handled;
++		}
+ 	}
+ 
+ 	/* check if it is a multicast/broadcast frame */
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index c4802f3bd4c5..e0d20501df76 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -663,6 +663,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
+ {
+ 	struct hci_dev *hdev = req->hdev;
+ 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
++	bool changed = false;
+ 
+ 	/* If Connectionless Slave Broadcast master role is supported
+ 	 * enable all necessary events for it.
+@@ -672,6 +673,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
+ 		events[1] |= 0x80;	/* Synchronization Train Complete */
+ 		events[2] |= 0x10;	/* Slave Page Response Timeout */
+ 		events[2] |= 0x20;	/* CSB Channel Map Change */
++		changed = true;
+ 	}
+ 
+ 	/* If Connectionless Slave Broadcast slave role is supported
+@@ -682,13 +684,24 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
+ 		events[2] |= 0x02;	/* CSB Receive */
+ 		events[2] |= 0x04;	/* CSB Timeout */
+ 		events[2] |= 0x08;	/* Truncated Page Complete */
++		changed = true;
+ 	}
+ 
+ 	/* Enable Authenticated Payload Timeout Expired event if supported */
+-	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
++	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
+ 		events[2] |= 0x80;
++		changed = true;
++	}
+ 
+-	hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
++	/* Some Broadcom based controllers indicate support for Set Event
++	 * Mask Page 2 command, but then actually do not support it. Since
++	 * the default value is all bits set to zero, the command is only
++	 * required if the event mask has to be changed. In case no change
++	 * to the event mask is needed, skip this command.
++	 */
++	if (changed)
++		hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
++			    sizeof(events), events);
+ }
+ 
+ static void hci_init3_req(struct hci_request *req, unsigned long opt)
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index e4b56fcb5d4e..e259b9da05f1 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -2250,8 +2250,14 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	else
+ 		sec_level = authreq_to_seclevel(auth);
+ 
+-	if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK))
++	if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) {
++		/* If link is already encrypted with sufficient security we
++		 * still need refresh encryption as per Core Spec 5.0 Vol 3,
++		 * Part H 2.4.6
++		 */
++		smp_ltk_encrypt(conn, hcon->sec_level);
+ 		return 0;
++	}
+ 
+ 	if (sec_level > hcon->pending_sec_level)
+ 		hcon->pending_sec_level = sec_level;
+diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
+index 9637a681bdda..9adf16258cab 100644
+--- a/net/bridge/netfilter/ebt_among.c
++++ b/net/bridge/netfilter/ebt_among.c
+@@ -177,6 +177,28 @@ static bool poolsize_invalid(const struct ebt_mac_wormhash *w)
+ 	return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple));
+ }
+ 
++static bool wormhash_offset_invalid(int off, unsigned int len)
++{
++	if (off == 0) /* not present */
++		return false;
++
++	if (off < (int)sizeof(struct ebt_among_info) ||
++	    off % __alignof__(struct ebt_mac_wormhash))
++		return true;
++
++	off += sizeof(struct ebt_mac_wormhash);
++
++	return off > len;
++}
++
++static bool wormhash_sizes_valid(const struct ebt_mac_wormhash *wh, int a, int b)
++{
++	if (a == 0)
++		a = sizeof(struct ebt_among_info);
++
++	return ebt_mac_wormhash_size(wh) + a == b;
++}
++
+ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
+ {
+ 	const struct ebt_among_info *info = par->matchinfo;
+@@ -189,6 +211,10 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
+ 	if (expected_length > em->match_size)
+ 		return -EINVAL;
+ 
++	if (wormhash_offset_invalid(info->wh_dst_ofs, em->match_size) ||
++	    wormhash_offset_invalid(info->wh_src_ofs, em->match_size))
++		return -EINVAL;
++
+ 	wh_dst = ebt_among_wh_dst(info);
+ 	if (poolsize_invalid(wh_dst))
+ 		return -EINVAL;
+@@ -201,6 +227,14 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
+ 	if (poolsize_invalid(wh_src))
+ 		return -EINVAL;
+ 
++	if (info->wh_src_ofs < info->wh_dst_ofs) {
++		if (!wormhash_sizes_valid(wh_src, info->wh_src_ofs, info->wh_dst_ofs))
++			return -EINVAL;
++	} else {
++		if (!wormhash_sizes_valid(wh_dst, info->wh_dst_ofs, info->wh_src_ofs))
++			return -EINVAL;
++	}
++
+ 	expected_length += ebt_mac_wormhash_size(wh_src);
+ 
+ 	if (em->match_size != EBT_ALIGN(expected_length)) {
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index d30864a8ed57..1e08c25c43f3 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -2518,6 +2518,11 @@ static int try_write(struct ceph_connection *con)
+ 	int ret = 1;
+ 
+ 	dout("try_write start %p state %lu\n", con, con->state);
++	if (con->state != CON_STATE_PREOPEN &&
++	    con->state != CON_STATE_CONNECTING &&
++	    con->state != CON_STATE_NEGOTIATING &&
++	    con->state != CON_STATE_OPEN)
++		return 0;
+ 
+ more:
+ 	dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
+@@ -2543,6 +2548,8 @@ more:
+ 	}
+ 
+ more_kvec:
++	BUG_ON(!con->sock);
++
+ 	/* kvec data queued? */
+ 	if (con->out_kvec_left) {
+ 		ret = write_partial_kvec(con);
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index bc95e48d5cfb..378c9ed00d40 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -295,6 +295,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
+ 		u32 yes;
+ 		struct crush_rule *r;
+ 
++		err = -EINVAL;
+ 		ceph_decode_32_safe(p, end, yes, bad);
+ 		if (!yes) {
+ 			dout("crush_decode NO rule %d off %x %p to %p\n",
+diff --git a/net/core/dev.c b/net/core/dev.c
+index c2d927f91a30..1cbbc79b4509 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -957,7 +957,7 @@ bool dev_valid_name(const char *name)
+ {
+ 	if (*name == '\0')
+ 		return false;
+-	if (strlen(name) >= IFNAMSIZ)
++	if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
+ 		return false;
+ 	if (!strcmp(name, ".") || !strcmp(name, ".."))
+ 		return false;
+@@ -2430,7 +2430,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
+ 		if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
+ 			return 0;
+ 
+-		eth = (struct ethhdr *)skb_mac_header(skb);
++		eth = (struct ethhdr *)skb->data;
+ 		type = eth->h_proto;
+ 	}
+ 
+@@ -2621,7 +2621,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
+ }
+ EXPORT_SYMBOL(passthru_features_check);
+ 
+-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
++static netdev_features_t dflt_features_check(struct sk_buff *skb,
+ 					     struct net_device *dev,
+ 					     netdev_features_t features)
+ {
+diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
+index c0548d268e1a..e3e6a3e2ca22 100644
+--- a/net/core/dev_addr_lists.c
++++ b/net/core/dev_addr_lists.c
+@@ -57,8 +57,8 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
+ 		return -EINVAL;
+ 
+ 	list_for_each_entry(ha, &list->list, list) {
+-		if (!memcmp(ha->addr, addr, addr_len) &&
+-		    ha->type == addr_type) {
++		if (ha->type == addr_type &&
++		    !memcmp(ha->addr, addr, addr_len)) {
+ 			if (global) {
+ 				/* check if addr is already used as global */
+ 				if (ha->global_use)
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 5fd6c6e699aa..c305645b22bc 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -54,7 +54,8 @@ do {						\
+ static void neigh_timer_handler(unsigned long arg);
+ static void __neigh_notify(struct neighbour *n, int type, int flags);
+ static void neigh_update_notify(struct neighbour *neigh);
+-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
++				    struct net_device *dev);
+ 
+ #ifdef CONFIG_PROC_FS
+ static const struct file_operations neigh_stat_seq_fops;
+@@ -254,8 +255,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+ {
+ 	write_lock_bh(&tbl->lock);
+ 	neigh_flush_dev(tbl, dev);
+-	pneigh_ifdown(tbl, dev);
+-	write_unlock_bh(&tbl->lock);
++	pneigh_ifdown_and_unlock(tbl, dev);
+ 
+ 	del_timer_sync(&tbl->proxy_timer);
+ 	pneigh_queue_purge(&tbl->proxy_queue);
+@@ -641,9 +641,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
+ 	return -ENOENT;
+ }
+ 
+-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
++static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
++				    struct net_device *dev)
+ {
+-	struct pneigh_entry *n, **np;
++	struct pneigh_entry *n, **np, *freelist = NULL;
+ 	u32 h;
+ 
+ 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
+@@ -651,16 +652,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+ 		while ((n = *np) != NULL) {
+ 			if (!dev || n->dev == dev) {
+ 				*np = n->next;
+-				if (tbl->pdestructor)
+-					tbl->pdestructor(n);
+-				if (n->dev)
+-					dev_put(n->dev);
+-				kfree(n);
++				n->next = freelist;
++				freelist = n;
+ 				continue;
+ 			}
+ 			np = &n->next;
+ 		}
+ 	}
++	write_unlock_bh(&tbl->lock);
++	while ((n = freelist)) {
++		freelist = n->next;
++		n->next = NULL;
++		if (tbl->pdestructor)
++			tbl->pdestructor(n);
++		if (n->dev)
++			dev_put(n->dev);
++		kfree(n);
++	}
+ 	return -ENOENT;
+ }
+ 
+@@ -1127,10 +1135,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ 		lladdr = neigh->ha;
+ 	}
+ 
+-	if (new & NUD_CONNECTED)
+-		neigh->confirmed = jiffies;
+-	neigh->updated = jiffies;
+-
+ 	/* If entry was valid and address is not changed,
+ 	   do not change entry state, if new one is STALE.
+ 	 */
+@@ -1154,6 +1158,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ 		}
+ 	}
+ 
++	/* Update timestamps only once we know we will make a change to the
++	 * neighbour entry. Otherwise we risk to move the locktime window with
++	 * noop updates and ignore relevant ARP updates.
++	 */
++	if (new != old || lladdr != neigh->ha) {
++		if (new & NUD_CONNECTED)
++			neigh->confirmed = jiffies;
++		neigh->updated = jiffies;
++	}
++
+ 	if (new != old) {
+ 		neigh_del_timer(neigh);
+ 		if (new & NUD_IN_TIMER)
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 572af0011997..9195a109ea79 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -268,6 +268,25 @@ out_undo:
+ 	goto out;
+ }
+ 
++static int __net_init net_defaults_init_net(struct net *net)
++{
++	net->core.sysctl_somaxconn = SOMAXCONN;
++	return 0;
++}
++
++static struct pernet_operations net_defaults_ops = {
++	.init = net_defaults_init_net,
++};
++
++static __init int net_defaults_init(void)
++{
++	if (register_pernet_subsys(&net_defaults_ops))
++		panic("Cannot initialize net default settings");
++
++	return 0;
++}
++
++core_initcall(net_defaults_init);
+ 
+ #ifdef CONFIG_NET_NS
+ static struct kmem_cache *net_cachep;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 853e82075ebd..ac1436be3cf7 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -875,6 +875,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
+ 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
+ 	n->cloned = 1;
+ 	n->nohdr = 0;
++	n->peeked = 0;
+ 	n->destructor = NULL;
+ 	C(tail);
+ 	C(end);
+@@ -2587,7 +2588,8 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
+ {
+ 	int pos = skb_headlen(skb);
+ 
+-	skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
++	skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
++				      SKBTX_SHARED_FRAG;
+ 	if (len < pos)	/* Split line is inside header. */
+ 		skb_split_inside_header(skb, skb1, len, pos);
+ 	else		/* Second chunk has no header, nothing to copy. */
+@@ -3133,8 +3135,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ 		skb_copy_from_linear_data_offset(head_skb, offset,
+ 						 skb_put(nskb, hsize), hsize);
+ 
+-		skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
+-			SKBTX_SHARED_FRAG;
++		skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
++					      SKBTX_SHARED_FRAG;
+ 
+ 		while (pos < offset + len) {
+ 			if (i >= nfrags) {
+@@ -3347,24 +3349,18 @@ void __init skb_init(void)
+ 						NULL);
+ }
+ 
+-/**
+- *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
+- *	@skb: Socket buffer containing the buffers to be mapped
+- *	@sg: The scatter-gather list to map into
+- *	@offset: The offset into the buffer's contents to start mapping
+- *	@len: Length of buffer space to be mapped
+- *
+- *	Fill the specified scatter-gather list with mappings/pointers into a
+- *	region of the buffer space attached to a socket buffer.
+- */
+ static int
+-__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
++__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
++	       unsigned int recursion_level)
+ {
+ 	int start = skb_headlen(skb);
+ 	int i, copy = start - offset;
+ 	struct sk_buff *frag_iter;
+ 	int elt = 0;
+ 
++	if (unlikely(recursion_level >= 24))
++		return -EMSGSIZE;
++
+ 	if (copy > 0) {
+ 		if (copy > len)
+ 			copy = len;
+@@ -3383,6 +3379,8 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+ 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ 		if ((copy = end - offset) > 0) {
+ 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
++			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
++				return -EMSGSIZE;
+ 
+ 			if (copy > len)
+ 				copy = len;
+@@ -3397,16 +3395,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+ 	}
+ 
+ 	skb_walk_frags(skb, frag_iter) {
+-		int end;
++		int end, ret;
+ 
+ 		WARN_ON(start > offset + len);
+ 
+ 		end = start + frag_iter->len;
+ 		if ((copy = end - offset) > 0) {
++			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
++				return -EMSGSIZE;
++
+ 			if (copy > len)
+ 				copy = len;
+-			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
+-					      copy);
++			ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
++					      copy, recursion_level + 1);
++			if (unlikely(ret < 0))
++				return ret;
++			elt += ret;
+ 			if ((len -= copy) == 0)
+ 				return elt;
+ 			offset += copy;
+@@ -3417,6 +3421,31 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+ 	return elt;
+ }
+ 
++/**
++ *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
++ *	@skb: Socket buffer containing the buffers to be mapped
++ *	@sg: The scatter-gather list to map into
++ *	@offset: The offset into the buffer's contents to start mapping
++ *	@len: Length of buffer space to be mapped
++ *
++ *	Fill the specified scatter-gather list with mappings/pointers into a
++ *	region of the buffer space attached to a socket buffer. Returns either
++ *	the number of scatterlist items used, or -EMSGSIZE if the contents
++ *	could not fit.
++ */
++int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
++{
++	int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
++
++	if (nsg <= 0)
++		return nsg;
++
++	sg_mark_end(&sg[nsg - 1]);
++
++	return nsg;
++}
++EXPORT_SYMBOL_GPL(skb_to_sgvec);
++
+ /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
+  * sglist without mark the sg which contain last skb data as the end.
+  * So the caller can mannipulate sg list as will when padding new data after
+@@ -3439,19 +3468,11 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+ int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+ 			int offset, int len)
+ {
+-	return __skb_to_sgvec(skb, sg, offset, len);
++	return __skb_to_sgvec(skb, sg, offset, len, 0);
+ }
+ EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
+ 
+-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+-{
+-	int nsg = __skb_to_sgvec(skb, sg, offset, len);
+ 
+-	sg_mark_end(&sg[nsg - 1]);
+-
+-	return nsg;
+-}
+-EXPORT_SYMBOL_GPL(skb_to_sgvec);
+ 
+ /**
+  *	skb_cow_data - Check that a socket buffer's data buffers are writable
+@@ -3589,7 +3610,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
+ 
+ 	skb_queue_tail(&sk->sk_error_queue, skb);
+ 	if (!sock_flag(sk, SOCK_DEAD))
+-		sk->sk_data_ready(sk);
++		sk->sk_error_report(sk);
+ 	return 0;
+ }
+ EXPORT_SYMBOL(sock_queue_err_skb);
+@@ -3733,7 +3754,8 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
+ 		return;
+ 
+ 	if (tsonly) {
+-		skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
++		skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
++					     SKBTX_ANY_TSTAMP;
+ 		skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
+ 	}
+ 
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index f5ef2115871f..a9a4276609ef 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -423,8 +423,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
+ {
+ 	struct ctl_table *tbl;
+ 
+-	net->core.sysctl_somaxconn = SOMAXCONN;
+-
+ 	tbl = netns_core_table;
+ 	if (!net_eq(net, &init_net)) {
+ 		tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index bafb2223b879..2fa37d32ae3b 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -642,6 +642,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	ireq = inet_rsk(req);
+ 	sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
+ 	sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
++	ireq->ir_mark = inet_request_mark(sk, skb);
+ 	ireq->ireq_family = AF_INET;
+ 	ireq->ir_iif = sk->sk_bound_dev_if;
+ 
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index d61027e78e25..a56919d47403 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -375,6 +375,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
+ 	ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+ 	ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+ 	ireq->ireq_family = AF_INET6;
++	ireq->ir_mark = inet_request_mark(sk, skb);
+ 
+ 	if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
+ 	    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index a20dc23360f9..be71e07ba6f1 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -790,6 +790,11 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	if (skb == NULL)
+ 		goto out_release;
+ 
++	if (sk->sk_state == DCCP_CLOSED) {
++		rc = -ENOTCONN;
++		goto out_discard;
++	}
++
+ 	skb_reserve(skb, sk->sk_prot->max_header);
+ 	rc = memcpy_from_msg(skb_put(skb, len), msg, len);
+ 	if (rc != 0)
+diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
+index 31cd4fd75486..4b437445c2ea 100644
+--- a/net/dns_resolver/dns_key.c
++++ b/net/dns_resolver/dns_key.c
+@@ -25,6 +25,7 @@
+ #include <linux/moduleparam.h>
+ #include <linux/slab.h>
+ #include <linux/string.h>
++#include <linux/ratelimit.h>
+ #include <linux/kernel.h>
+ #include <linux/keyctl.h>
+ #include <linux/err.h>
+@@ -91,9 +92,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ 
+ 			next_opt = memchr(opt, '#', end - opt) ?: end;
+ 			opt_len = next_opt - opt;
+-			if (!opt_len) {
+-				printk(KERN_WARNING
+-				       "Empty option to dns_resolver key\n");
++			if (opt_len <= 0 || opt_len > 128) {
++				pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
++						    opt_len);
+ 				return -EINVAL;
+ 			}
+ 
+@@ -127,10 +128,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
+ 			}
+ 
+ 		bad_option_value:
+-			printk(KERN_WARNING
+-			       "Option '%*.*s' to dns_resolver key:"
+-			       " bad/missing value\n",
+-			       opt_nlen, opt_nlen, opt);
++			pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
++					    opt_nlen, opt_nlen, opt);
+ 			return -EINVAL;
+ 		} while (opt = next_opt + 1, opt < end);
+ 	}
+diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
+index 627a2537634e..7c5040c4fa90 100644
+--- a/net/ieee802154/socket.c
++++ b/net/ieee802154/socket.c
+@@ -310,12 +310,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ 	skb->sk  = sk;
+ 	skb->protocol = htons(ETH_P_IEEE802154);
+ 
+-	dev_put(dev);
+-
+ 	err = dev_queue_xmit(skb);
+ 	if (err > 0)
+ 		err = net_xmit_errno(err);
+ 
++	dev_put(dev);
++
+ 	return err ?: size;
+ 
+ out_skb:
+@@ -697,12 +697,12 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
+ 	skb->sk  = sk;
+ 	skb->protocol = htons(ETH_P_IEEE802154);
+ 
+-	dev_put(dev);
+-
+ 	err = dev_queue_xmit(skb);
+ 	if (err > 0)
+ 		err = net_xmit_errno(err);
+ 
++	dev_put(dev);
++
+ 	return err ?: size;
+ 
+ out_skb:
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index bb96c1c4edd6..35ea352a9cef 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -212,6 +212,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ 		tw->tw_dport	    = inet->inet_dport;
+ 		tw->tw_family	    = sk->sk_family;
+ 		tw->tw_reuse	    = sk->sk_reuse;
++		tw->tw_reuseport    = sk->sk_reuseport;
+ 		tw->tw_hash	    = sk->sk_hash;
+ 		tw->tw_ipv6only	    = 0;
+ 		tw->tw_transparent  = inet->transparent;
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 67c1333422a4..d4bdeed4e0a4 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -239,7 +239,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
+ 			src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+ 			if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
+ 				return -EINVAL;
+-			ipc->oif = src_info->ipi6_ifindex;
++			if (src_info->ipi6_ifindex)
++				ipc->oif = src_info->ipi6_ifindex;
+ 			ipc->addr = src_info->ipi6_addr.s6_addr32[3];
+ 			continue;
+ 		}
+@@ -262,7 +263,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
+ 			if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
+ 				return -EINVAL;
+ 			info = (struct in_pktinfo *)CMSG_DATA(cmsg);
+-			ipc->oif = info->ipi_ifindex;
++			if (info->ipi_ifindex)
++				ipc->oif = info->ipi_ifindex;
+ 			ipc->addr = info->ipi_spec_dst.s_addr;
+ 			break;
+ 		}
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 169bf7d1d8ca..2385ec7083c6 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -301,13 +301,14 @@ static struct net_device *__ip_tunnel_create(struct net *net,
+ 	struct net_device *dev;
+ 	char name[IFNAMSIZ];
+ 
+-	if (parms->name[0])
++	err = -E2BIG;
++	if (parms->name[0]) {
++		if (!dev_valid_name(parms->name))
++			goto failed;
+ 		strlcpy(name, parms->name, IFNAMSIZ);
+-	else {
+-		if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
+-			err = -E2BIG;
++	} else {
++		if (strlen(ops->kind) > (IFNAMSIZ - 3))
+ 			goto failed;
+-		}
+ 		strlcpy(name, ops->kind, IFNAMSIZ);
+ 		strncat(name, "%d", 2);
+ 	}
+diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
+index 574f7ebba0b6..ac8342dcb55e 100644
+--- a/net/ipv4/netfilter/nf_nat_h323.c
++++ b/net/ipv4/netfilter/nf_nat_h323.c
+@@ -252,16 +252,16 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
+ 	if (set_h245_addr(skb, protoff, data, dataoff, taddr,
+ 			  &ct->tuplehash[!dir].tuple.dst.u3,
+ 			  htons((port & htons(1)) ? nated_port + 1 :
+-						    nated_port)) == 0) {
+-		/* Save ports */
+-		info->rtp_port[i][dir] = rtp_port;
+-		info->rtp_port[i][!dir] = htons(nated_port);
+-	} else {
++						    nated_port))) {
+ 		nf_ct_unexpect_related(rtp_exp);
+ 		nf_ct_unexpect_related(rtcp_exp);
+ 		return -1;
+ 	}
+ 
++	/* Save ports */
++	info->rtp_port[i][dir] = rtp_port;
++	info->rtp_port[i][!dir] = htons(nated_port);
++
+ 	/* Success */
+ 	pr_debug("nf_nat_h323: expect RTP %pI4:%hu->%pI4:%hu\n",
+ 		 &rtp_exp->tuple.src.u3.ip,
+@@ -370,15 +370,15 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
+ 	/* Modify signal */
+ 	if (set_h225_addr(skb, protoff, data, dataoff, taddr,
+ 			  &ct->tuplehash[!dir].tuple.dst.u3,
+-			  htons(nated_port)) == 0) {
+-		/* Save ports */
+-		info->sig_port[dir] = port;
+-		info->sig_port[!dir] = htons(nated_port);
+-	} else {
++			  htons(nated_port))) {
+ 		nf_ct_unexpect_related(exp);
+ 		return -1;
+ 	}
+ 
++	/* Save ports */
++	info->sig_port[dir] = port;
++	info->sig_port[!dir] = htons(nated_port);
++
+ 	pr_debug("nf_nat_q931: expect H.245 %pI4:%hu->%pI4:%hu\n",
+ 		 &exp->tuple.src.u3.ip,
+ 		 ntohs(exp->tuple.src.u.tcp.port),
+@@ -462,24 +462,27 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
+ 	/* Modify signal */
+ 	if (set_h225_addr(skb, protoff, data, 0, &taddr[idx],
+ 			  &ct->tuplehash[!dir].tuple.dst.u3,
+-			  htons(nated_port)) == 0) {
+-		/* Save ports */
+-		info->sig_port[dir] = port;
+-		info->sig_port[!dir] = htons(nated_port);
+-
+-		/* Fix for Gnomemeeting */
+-		if (idx > 0 &&
+-		    get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
+-		    (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
+-			set_h225_addr(skb, protoff, data, 0, &taddr[0],
+-				      &ct->tuplehash[!dir].tuple.dst.u3,
+-				      info->sig_port[!dir]);
+-		}
+-	} else {
++			  htons(nated_port))) {
+ 		nf_ct_unexpect_related(exp);
+ 		return -1;
+ 	}
+ 
++	/* Save ports */
++	info->sig_port[dir] = port;
++	info->sig_port[!dir] = htons(nated_port);
++
++	/* Fix for Gnomemeeting */
++	if (idx > 0 &&
++	    get_h225_addr(ct, *data, &taddr[0], &addr, &port) &&
++	    (ntohl(addr.ip) & 0xff000000) == 0x7f000000) {
++		if (set_h225_addr(skb, protoff, data, 0, &taddr[0],
++				  &ct->tuplehash[!dir].tuple.dst.u3,
++				  info->sig_port[!dir])) {
++			nf_ct_unexpect_related(exp);
++			return -1;
++		}
++	}
++
+ 	/* Success */
+ 	pr_debug("nf_nat_ras: expect Q.931 %pI4:%hu->%pI4:%hu\n",
+ 		 &exp->tuple.src.u3.ip,
+@@ -550,9 +553,9 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
+ 	}
+ 
+ 	/* Modify signal */
+-	if (!set_h225_addr(skb, protoff, data, dataoff, taddr,
+-			   &ct->tuplehash[!dir].tuple.dst.u3,
+-			   htons(nated_port)) == 0) {
++	if (set_h225_addr(skb, protoff, data, dataoff, taddr,
++			  &ct->tuplehash[!dir].tuple.dst.u3,
++			  htons(nated_port))) {
+ 		nf_ct_unexpect_related(exp);
+ 		return -1;
+ 	}
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 9a2294d01b9d..acf09ab17a62 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -496,11 +496,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	int err;
+ 	struct ip_options_data opt_copy;
+ 	struct raw_frag_vec rfv;
++	int hdrincl;
+ 
+ 	err = -EMSGSIZE;
+ 	if (len > 0xFFFF)
+ 		goto out;
+ 
++	/* hdrincl should be READ_ONCE(inet->hdrincl)
++	 * but READ_ONCE() doesn't work with bit fields
++	 */
++	hdrincl = inet->hdrincl;
+ 	/*
+ 	 *	Check the flags.
+ 	 */
+@@ -575,7 +580,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		/* Linux does not mangle headers on raw sockets,
+ 		 * so that IP options + IP_HDRINCL is non-sense.
+ 		 */
+-		if (inet->hdrincl)
++		if (hdrincl)
+ 			goto done;
+ 		if (ipc.opt->opt.srr) {
+ 			if (!daddr)
+@@ -597,12 +602,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
+ 			   RT_SCOPE_UNIVERSE,
+-			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
++			   hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+ 			   inet_sk_flowi_flags(sk) |
+-			    (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
++			    (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
+ 			   daddr, saddr, 0, 0);
+ 
+-	if (!inet->hdrincl) {
++	if (!hdrincl) {
+ 		rfv.msg = msg;
+ 		rfv.hlen = 0;
+ 
+@@ -627,7 +632,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		goto do_confirm;
+ back_from_confirm:
+ 
+-	if (inet->hdrincl)
++	if (hdrincl)
+ 		err = raw_send_hdrinc(sk, &fl4, msg, len,
+ 				      &rt, msg->msg_flags);
+ 
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 94a4b28e5da6..07d65ae018ee 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2416,7 +2416,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ 	case TCP_REPAIR_QUEUE:
+ 		if (!tp->repair)
+ 			err = -EPERM;
+-		else if (val < TCP_QUEUES_NR)
++		else if ((unsigned int)val < TCP_QUEUES_NR)
+ 			tp->repair_queue = val;
+ 		else
+ 			err = -EINVAL;
+@@ -2548,8 +2548,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ 
+ #ifdef CONFIG_TCP_MD5SIG
+ 	case TCP_MD5SIG:
+-		/* Read the IP->Key mappings from userspace */
+-		err = tp->af_specific->md5_parse(sk, optval, optlen);
++		if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
++			err = tp->af_specific->md5_parse(sk, optval, optlen);
++		else
++			err = -EINVAL;
+ 		break;
+ #endif
+ 	case TCP_USER_TIMEOUT:
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 4763c431f7d8..d61371cefaf0 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3819,11 +3819,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
+ 	int length = (th->doff << 2) - sizeof(*th);
+ 	const u8 *ptr = (const u8 *)(th + 1);
+ 
+-	/* If the TCP option is too short, we can short cut */
+-	if (length < TCPOLEN_MD5SIG)
+-		return NULL;
+-
+-	while (length > 0) {
++	/* If not enough data remaining, we can short cut */
++	while (length >= TCPOLEN_MD5SIG) {
+ 		int opcode = *ptr++;
+ 		int opsize;
+ 
+@@ -5417,10 +5414,6 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
+ 	else
+ 		tp->pred_flags = 0;
+ 
+-	if (!sock_flag(sk, SOCK_DEAD)) {
+-		sk->sk_state_change(sk);
+-		sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
+-	}
+ }
+ 
+ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
+@@ -5484,6 +5477,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct tcp_fastopen_cookie foc = { .len = -1 };
+ 	int saved_clamp = tp->rx_opt.mss_clamp;
++	bool fastopen_fail;
+ 
+ 	tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
+ 	if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
+@@ -5586,10 +5580,15 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ 
+ 		tcp_finish_connect(sk, skb);
+ 
+-		if ((tp->syn_fastopen || tp->syn_data) &&
+-		    tcp_rcv_fastopen_synack(sk, skb, &foc))
+-			return -1;
++		fastopen_fail = (tp->syn_fastopen || tp->syn_data) &&
++				tcp_rcv_fastopen_synack(sk, skb, &foc);
+ 
++		if (!sock_flag(sk, SOCK_DEAD)) {
++			sk->sk_state_change(sk);
++			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
++		}
++		if (fastopen_fail)
++			return -1;
+ 		if (sk->sk_write_pending ||
+ 		    icsk->icsk_accept_queue.rskq_defer_accept ||
+ 		    icsk->icsk_ack.pingpong) {
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 8441f9939d49..185ccfd781ed 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -886,7 +886,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
+ 	INIT_HLIST_NODE(&ifa->addr_lst);
+ 	ifa->scope = scope;
+ 	ifa->prefix_len = pfxlen;
+-	ifa->flags = flags | IFA_F_TENTATIVE;
++	ifa->flags = flags;
++	/* No need to add the TENTATIVE flag for addresses with NODAD */
++	if (!(flags & IFA_F_NODAD))
++		ifa->flags |= IFA_F_TENTATIVE;
+ 	ifa->valid_lft = valid_lft;
+ 	ifa->prefered_lft = prefered_lft;
+ 	ifa->cstamp = ifa->tstamp = jiffies;
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index b1311da5d7b8..cda3cc6c3535 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -319,11 +319,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
+ 	if (t || !create)
+ 		return t;
+ 
+-	if (parms->name[0])
++	if (parms->name[0]) {
++		if (!dev_valid_name(parms->name))
++			return NULL;
+ 		strlcpy(name, parms->name, IFNAMSIZ);
+-	else
++	} else {
+ 		strcpy(name, "ip6gre%d");
+-
++	}
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+ 			   ip6gre_tunnel_setup);
+ 	if (!dev)
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index c7c2c33aa4af..2219f454c0db 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -316,13 +316,16 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
+ 	struct net_device *dev;
+ 	struct ip6_tnl *t;
+ 	char name[IFNAMSIZ];
+-	int err = -ENOMEM;
++	int err = -E2BIG;
+ 
+-	if (p->name[0])
++	if (p->name[0]) {
++		if (!dev_valid_name(p->name))
++			goto failed;
+ 		strlcpy(name, p->name, IFNAMSIZ);
+-	else
++	} else {
+ 		sprintf(name, "ip6tnl%%d");
+-
++	}
++	err = -ENOMEM;
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+ 			   ip6_tnl_dev_setup);
+ 	if (!dev)
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index c460e653b6a5..9b1c466fd0fd 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -212,10 +212,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
+ 	char name[IFNAMSIZ];
+ 	int err;
+ 
+-	if (p->name[0])
++	if (p->name[0]) {
++		if (!dev_valid_name(p->name))
++			goto failed;
+ 		strlcpy(name, p->name, IFNAMSIZ);
+-	else
++	} else {
+ 		sprintf(name, "ip6_vti%%d");
++	}
+ 
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
+ 	if (!dev)
+@@ -614,6 +617,7 @@ static void vti6_link_config(struct ip6_tnl *t)
+ {
+ 	struct net_device *dev = t->dev;
+ 	struct __ip6_tnl_parm *p = &t->parms;
++	struct net_device *tdev = NULL;
+ 
+ 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
+ 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
+@@ -626,6 +630,25 @@ static void vti6_link_config(struct ip6_tnl *t)
+ 		dev->flags |= IFF_POINTOPOINT;
+ 	else
+ 		dev->flags &= ~IFF_POINTOPOINT;
++
++	if (p->flags & IP6_TNL_F_CAP_XMIT) {
++		int strict = (ipv6_addr_type(&p->raddr) &
++			      (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
++		struct rt6_info *rt = rt6_lookup(t->net,
++						 &p->raddr, &p->laddr,
++						 p->link, strict);
++
++		if (rt)
++			tdev = rt->dst.dev;
++		ip6_rt_put(rt);
++	}
++
++	if (!tdev && p->link)
++		tdev = __dev_get_by_index(t->net, p->link);
++
++	if (tdev)
++		dev->mtu = max_t(int, tdev->mtu - dev->hard_header_len,
++				 IPV6_MIN_MTU);
+ }
+ 
+ /**
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index abb0bdda759a..460f63619552 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1452,7 +1452,8 @@ static void ndisc_fill_redirect_hdr_option(struct sk_buff *skb,
+ 	*(opt++) = (rd_len >> 3);
+ 	opt += 6;
+ 
+-	memcpy(opt, ipv6_hdr(orig_skb), rd_len - 8);
++	skb_copy_bits(orig_skb, skb_network_offset(orig_skb), opt,
++		      rd_len - 8);
+ }
+ 
+ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
+@@ -1655,6 +1656,8 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
+ 	case NETDEV_CHANGEADDR:
+ 		neigh_changeaddr(&nd_tbl, dev);
+ 		fib6_run_gc(0, net, false);
++		/* fallthrough */
++	case NETDEV_UP:
+ 		idev = in6_dev_get(dev);
+ 		if (!idev)
+ 			break;
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index f4034c4eadf7..400548d53a43 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -244,11 +244,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
+ 	if (!create)
+ 		goto failed;
+ 
+-	if (parms->name[0])
++	if (parms->name[0]) {
++		if (!dev_valid_name(parms->name))
++			goto failed;
+ 		strlcpy(name, parms->name, IFNAMSIZ);
+-	else
++	} else {
+ 		strcpy(name, "sit%d");
+-
++	}
+ 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+ 			   ipip6_tunnel_setup);
+ 	if (!dev)
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 123f6f9f854c..8f9493b1bb1f 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -2382,9 +2382,11 @@ static int afiucv_iucv_init(void)
+ 	af_iucv_dev->driver = &af_iucv_driver;
+ 	err = device_register(af_iucv_dev);
+ 	if (err)
+-		goto out_driver;
++		goto out_iucv_dev;
+ 	return 0;
+ 
++out_iucv_dev:
++	put_device(af_iucv_dev);
+ out_driver:
+ 	driver_unregister(&af_iucv_driver);
+ out_iucv:
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 354c43a1c43d..fd4b5a0cb7ee 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -3301,7 +3301,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
+ 		p += pol->sadb_x_policy_len*8;
+ 		sec_ctx = (struct sadb_x_sec_ctx *)p;
+ 		if (len < pol->sadb_x_policy_len*8 +
+-		    sec_ctx->sadb_x_sec_len) {
++		    sec_ctx->sadb_x_sec_len*8) {
+ 			*dir = -EINVAL;
+ 			goto out;
+ 		}
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 0c4de8dd58bf..0b92ff822534 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1521,9 +1521,14 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
+ 		encap = cfg->encap;
+ 
+ 	/* Quick sanity checks */
++	err = -EPROTONOSUPPORT;
++	if (sk->sk_type != SOCK_DGRAM) {
++		pr_debug("tunl %hu: fd %d wrong socket type\n",
++			 tunnel_id, fd);
++		goto err;
++	}
+ 	switch (encap) {
+ 	case L2TP_ENCAPTYPE_UDP:
+-		err = -EPROTONOSUPPORT;
+ 		if (sk->sk_protocol != IPPROTO_UDP) {
+ 			pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+ 			       tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
+@@ -1531,7 +1536,6 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
+ 		}
+ 		break;
+ 	case L2TP_ENCAPTYPE_IP:
+-		err = -EPROTONOSUPPORT;
+ 		if (sk->sk_protocol != IPPROTO_L2TP) {
+ 			pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
+ 			       tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index 1e412ad6ced5..ad2b93aafcd5 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -725,6 +725,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
+ 
+ 	if ((session->ifname[0] &&
+ 	     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
++	    (session->offset &&
++	     nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
+ 	    (session->cookie_len &&
+ 	     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
+ 		     &session->cookie[0])) ||
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 2d4d2230f976..ac518cc76c3e 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -606,6 +606,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	lock_sock(sk);
+ 
+ 	error = -EINVAL;
++
++	if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
++	    sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
++	    sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
++	    sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
++		goto end;
++
+ 	if (sp->sa_protocol != PX_PROTO_OL2TP)
+ 		goto end;
+ 
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index c58f242c00f1..f5d8cf1b96f3 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -197,9 +197,19 @@ static int llc_ui_release(struct socket *sock)
+ 		llc->laddr.lsap, llc->daddr.lsap);
+ 	if (!llc_send_disc(sk))
+ 		llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
+-	if (!sock_flag(sk, SOCK_ZAPPED))
++	if (!sock_flag(sk, SOCK_ZAPPED)) {
++		struct llc_sap *sap = llc->sap;
++
++		/* Hold this for release_sock(), so that llc_backlog_rcv()
++		 * could still use it.
++		 */
++		llc_sap_hold(sap);
+ 		llc_sap_remove_socket(llc->sap, sk);
+-	release_sock(sk);
++		release_sock(sk);
++		llc_sap_put(sap);
++	} else {
++		release_sock(sk);
++	}
+ 	if (llc->dev)
+ 		dev_put(llc->dev);
+ 	sock_put(sk);
+@@ -309,6 +319,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
+ 	int rc = -EINVAL;
+ 
+ 	dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
++
++	lock_sock(sk);
+ 	if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
+ 		goto out;
+ 	rc = -EAFNOSUPPORT;
+@@ -380,6 +392,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
+ out_put:
+ 	llc_sap_put(sap);
+ out:
++	release_sock(sk);
+ 	return rc;
+ }
+ 
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 6e89ab8eac44..83aade477855 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -1418,7 +1418,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
+ 		break;
+ 	case NL80211_IFTYPE_UNSPECIFIED:
+ 	case NUM_NL80211_IFTYPES:
+-		BUG();
++		WARN_ON(1);
+ 		break;
+ 	}
+ 
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 844825829992..41d059ec04b4 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4321,6 +4321,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
+ 	if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
+ 		return -EINVAL;
+ 
++	/* If a reconfig is happening, bail out */
++	if (local->in_reconfig)
++		return -EBUSY;
++
+ 	if (assoc) {
+ 		rcu_read_lock();
+ 		have_sta = sta_info_get(sdata, cbss->bssid);
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 005fdbe39a8b..363c82b08d28 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -193,6 +193,7 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
+ 	}
+ 
+ 	if (ieee80211_is_action(mgmt->frame_control) &&
++	    !ieee80211_has_protected(mgmt->frame_control) &&
+ 	    mgmt->u.action.category == WLAN_CATEGORY_HT &&
+ 	    mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS &&
+ 	    ieee80211_sdata_running(sdata)) {
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index c0e64d15cf34..ee4dfecdc596 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -855,8 +855,13 @@ restart:
+ 	}
+ out:
+ 	local_bh_enable();
+-	if (last)
++	if (last) {
++		/* nf ct hash resize happened, now clear the leftover. */
++		if ((struct nf_conn *)cb->args[1] == last)
++			cb->args[1] = 0;
++
+ 		nf_ct_put(last);
++	}
+ 
+ 	return skb->len;
+ }
+diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
+index 32d0437abdd8..86f7555a98d1 100644
+--- a/net/netfilter/nfnetlink_queue_core.c
++++ b/net/netfilter/nfnetlink_queue_core.c
+@@ -993,10 +993,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
+ 	struct net *net = sock_net(ctnl);
+ 	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
+ 
+-	queue = instance_lookup(q, queue_num);
+-	if (!queue)
+-		queue = verdict_instance_lookup(q, queue_num,
+-						NETLINK_CB(skb).portid);
++	queue = verdict_instance_lookup(q, queue_num,
++					NETLINK_CB(skb).portid);
+ 	if (IS_ERR(queue))
+ 		return PTR_ERR(queue);
+ 
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index f4fcd9441561..48e36611a869 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -367,6 +367,36 @@ textify_hooks(char *buf, size_t size, unsigned int mask, uint8_t nfproto)
+ 	return buf;
+ }
+ 
++/**
++ * xt_check_proc_name - check that name is suitable for /proc file creation
++ *
++ * @name: file name candidate
++ * @size: length of buffer
++ *
++ * some x_tables modules wish to create a file in /proc.
++ * This function makes sure that the name is suitable for this
++ * purpose, it checks that name is NUL terminated and isn't a 'special'
++ * name, like "..".
++ *
++ * returns negative number on error or 0 if name is useable.
++ */
++int xt_check_proc_name(const char *name, unsigned int size)
++{
++	if (name[0] == '\0')
++		return -EINVAL;
++
++	if (strnlen(name, size) == size)
++		return -ENAMETOOLONG;
++
++	if (strcmp(name, ".") == 0 ||
++	    strcmp(name, "..") == 0 ||
++	    strchr(name, '/'))
++		return -EINVAL;
++
++	return 0;
++}
++EXPORT_SYMBOL(xt_check_proc_name);
++
+ int xt_check_match(struct xt_mtchk_param *par,
+ 		   unsigned int size, u_int8_t proto, bool inv_proto)
+ {
+diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
+index 178696852bde..7381be0cdcdf 100644
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -668,8 +668,9 @@ static int hashlimit_mt_check(const struct xt_mtchk_param *par)
+ 
+ 	if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
+ 		return -EINVAL;
+-	if (info->name[sizeof(info->name)-1] != '\0')
+-		return -EINVAL;
++	ret = xt_check_proc_name(info->name, sizeof(info->name));
++	if (ret)
++		return ret;
+ 	if (par->family == NFPROTO_IPV4) {
+ 		if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32)
+ 			return -EINVAL;
+diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
+index 45e1b30e4fb2..f2780ee57163 100644
+--- a/net/netfilter/xt_recent.c
++++ b/net/netfilter/xt_recent.c
+@@ -364,9 +364,9 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
+ 			info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
+ 		return -EINVAL;
+ 	}
+-	if (info->name[0] == '\0' ||
+-	    strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN)
+-		return -EINVAL;
++	ret = xt_check_proc_name(info->name, sizeof(info->name));
++	if (ret)
++		return ret;
+ 
+ 	if (ip_pkt_list_tot && info->hit_count < ip_pkt_list_tot)
+ 		nstamp_mask = roundup_pow_of_two(ip_pkt_list_tot) - 1;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 66c340bc0553..45ecf1f433ad 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1635,6 +1635,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ 	if (addr->sa_family != AF_NETLINK)
+ 		return -EINVAL;
+ 
++	if (alen < sizeof(struct sockaddr_nl))
++		return -EINVAL;
++
+ 	if ((nladdr->nl_groups || nladdr->nl_pid) &&
+ 	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
+ 		return -EPERM;
+@@ -2385,6 +2388,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 
+ 	if (msg->msg_namelen) {
+ 		err = -EINVAL;
++		if (msg->msg_namelen < sizeof(struct sockaddr_nl))
++			goto out;
+ 		if (addr->nl_family != AF_NETLINK)
+ 			goto out;
+ 		dst_portid = addr->nl_pid;
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 97c22c818134..1c58b0326c54 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1143,7 +1143,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
+ 	if (!err)
+ 		delivered = true;
+ 	else if (err != -ESRCH)
+-		goto error;
++		return err;
+ 	return delivered ? 0 : -ESRCH;
+  error:
+ 	kfree_skb(skb);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 7f5d147aff63..b778a3460842 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2652,6 +2652,7 @@ static int packet_release(struct socket *sock)
+ 
+ 	packet_flush_mclist(sk);
+ 
++	lock_sock(sk);
+ 	if (po->rx_ring.pg_vec) {
+ 		memset(&req_u, 0, sizeof(req_u));
+ 		packet_set_ring(sk, &req_u, 1, 0);
+@@ -2661,6 +2662,7 @@ static int packet_release(struct socket *sock)
+ 		memset(&req_u, 0, sizeof(req_u));
+ 		packet_set_ring(sk, &req_u, 1, 1);
+ 	}
++	release_sock(sk);
+ 
+ 	fanout_release(sk);
+ 
+@@ -3320,6 +3322,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 		union tpacket_req_u req_u;
+ 		int len;
+ 
++		lock_sock(sk);
+ 		switch (po->tp_version) {
+ 		case TPACKET_V1:
+ 		case TPACKET_V2:
+@@ -3330,14 +3333,21 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 			len = sizeof(req_u.req3);
+ 			break;
+ 		}
+-		if (optlen < len)
+-			return -EINVAL;
+-		if (pkt_sk(sk)->has_vnet_hdr)
+-			return -EINVAL;
+-		if (copy_from_user(&req_u.req, optval, len))
+-			return -EFAULT;
+-		return packet_set_ring(sk, &req_u, 0,
+-			optname == PACKET_TX_RING);
++		if (optlen < len) {
++			ret = -EINVAL;
++		} else {
++			if (pkt_sk(sk)->has_vnet_hdr) {
++				ret = -EINVAL;
++			} else {
++				if (copy_from_user(&req_u.req, optval, len))
++					ret = -EFAULT;
++				else
++					ret = packet_set_ring(sk, &req_u, 0,
++							      optname == PACKET_TX_RING);
++			}
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_COPY_THRESH:
+ 	{
+@@ -3847,7 +3857,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	/* Added to avoid minimal code churn */
+ 	struct tpacket_req *req = &req_u->req;
+ 
+-	lock_sock(sk);
+ 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+ 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+ 		WARN(1, "Tx-ring is not supported.\n");
+@@ -3983,7 +3992,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	if (pg_vec)
+ 		free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
+-	release_sock(sk);
+ 	return err;
+ }
+ 
+diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
+index f226709ebd8f..ca5f3662a485 100644
+--- a/net/rxrpc/rxkad.c
++++ b/net/rxrpc/rxkad.c
+@@ -209,7 +209,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
+ 	struct sk_buff *trailer;
+ 	unsigned int len;
+ 	u16 check;
+-	int nsg;
++	int nsg, err;
+ 
+ 	sp = rxrpc_skb(skb);
+ 
+@@ -240,7 +240,9 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
+ 	len &= ~(call->conn->size_align - 1);
+ 
+ 	sg_init_table(sg, nsg);
+-	skb_to_sgvec(skb, sg, 0, len);
++	err = skb_to_sgvec(skb, sg, 0, len);
++	if (unlikely(err < 0))
++		return err;
+ 	crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
+ 
+ 	_leave(" = 0");
+@@ -336,7 +338,7 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
+ 	struct sk_buff *trailer;
+ 	u32 data_size, buf;
+ 	u16 check;
+-	int nsg;
++	int nsg, ret;
+ 
+ 	_enter("");
+ 
+@@ -348,7 +350,9 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
+ 		goto nomem;
+ 
+ 	sg_init_table(sg, nsg);
+-	skb_to_sgvec(skb, sg, 0, 8);
++	ret = skb_to_sgvec(skb, sg, 0, 8);
++	if (unlikely(ret < 0))
++		return ret;
+ 
+ 	/* start the decryption afresh */
+ 	memset(&iv, 0, sizeof(iv));
+@@ -411,7 +415,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
+ 	struct sk_buff *trailer;
+ 	u32 data_size, buf;
+ 	u16 check;
+-	int nsg;
++	int nsg, ret;
+ 
+ 	_enter(",{%d}", skb->len);
+ 
+@@ -430,7 +434,12 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
+ 	}
+ 
+ 	sg_init_table(sg, nsg);
+-	skb_to_sgvec(skb, sg, 0, skb->len);
++	ret = skb_to_sgvec(skb, sg, 0, skb->len);
++	if (unlikely(ret < 0)) {
++		if (sg != _sg)
++			kfree(sg);
++		return ret;
++	}
+ 
+ 	/* decrypt from the session key */
+ 	token = call->conn->key->payload.data;
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index c9387f62f634..97dbf5775c47 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -93,8 +93,10 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
+ 			a->order = n_i;
+ 
+ 			nest = nla_nest_start(skb, a->order);
+-			if (nest == NULL)
++			if (nest == NULL) {
++				index--;
+ 				goto nla_put_failure;
++			}
+ 			err = tcf_action_dump_1(skb, a, 0, 0);
+ 			if (err < 0) {
+ 				index--;
+diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
+index 4cd5cf1aedf8..a40ed3d29988 100644
+--- a/net/sched/act_csum.c
++++ b/net/sched/act_csum.c
+@@ -176,6 +176,9 @@ static int tcf_csum_ipv4_tcp(struct sk_buff *skb,
+ 	struct tcphdr *tcph;
+ 	const struct iphdr *iph;
+ 
++	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
++		return 1;
++
+ 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
+ 	if (tcph == NULL)
+ 		return 0;
+@@ -197,6 +200,9 @@ static int tcf_csum_ipv6_tcp(struct sk_buff *skb,
+ 	struct tcphdr *tcph;
+ 	const struct ipv6hdr *ip6h;
+ 
++	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
++		return 1;
++
+ 	tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
+ 	if (tcph == NULL)
+ 		return 0;
+@@ -220,6 +226,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb,
+ 	const struct iphdr *iph;
+ 	u16 ul;
+ 
++	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
++		return 1;
++
+ 	/*
+ 	 * Support both UDP and UDPLITE checksum algorithms, Don't use
+ 	 * udph->len to get the real length without any protocol check,
+@@ -273,6 +282,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb,
+ 	const struct ipv6hdr *ip6h;
+ 	u16 ul;
+ 
++	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
++		return 1;
++
+ 	/*
+ 	 * Support both UDP and UDPLITE checksum algorithms, Don't use
+ 	 * udph->len to get the real length without any protocol check,
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 00db4424faf1..7fee02981619 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -496,46 +496,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
+ 	addr->v6.sin6_scope_id = 0;
+ }
+ 
+-/* Compare addresses exactly.
+- * v4-mapped-v6 is also in consideration.
+- */
+-static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
+-			    const union sctp_addr *addr2)
++static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
++			      const union sctp_addr *addr2)
+ {
+ 	if (addr1->sa.sa_family != addr2->sa.sa_family) {
+ 		if (addr1->sa.sa_family == AF_INET &&
+ 		    addr2->sa.sa_family == AF_INET6 &&
+-		    ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
+-			if (addr2->v6.sin6_port == addr1->v4.sin_port &&
+-			    addr2->v6.sin6_addr.s6_addr32[3] ==
+-			    addr1->v4.sin_addr.s_addr)
+-				return 1;
+-		}
++		    ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
++		    addr2->v6.sin6_addr.s6_addr32[3] ==
++		    addr1->v4.sin_addr.s_addr)
++			return 1;
++
+ 		if (addr2->sa.sa_family == AF_INET &&
+ 		    addr1->sa.sa_family == AF_INET6 &&
+-		    ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
+-			if (addr1->v6.sin6_port == addr2->v4.sin_port &&
+-			    addr1->v6.sin6_addr.s6_addr32[3] ==
+-			    addr2->v4.sin_addr.s_addr)
+-				return 1;
+-		}
++		    ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
++		    addr1->v6.sin6_addr.s6_addr32[3] ==
++		    addr2->v4.sin_addr.s_addr)
++			return 1;
++
+ 		return 0;
+ 	}
+-	if (addr1->v6.sin6_port != addr2->v6.sin6_port)
+-		return 0;
++
+ 	if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
+ 		return 0;
++
+ 	/* If this is a linklocal address, compare the scope_id. */
+-	if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
+-		if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+-		    (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
+-			return 0;
+-		}
+-	}
++	if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
++	    addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
++	    addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
++		return 0;
+ 
+ 	return 1;
+ }
+ 
++/* Compare addresses exactly.
++ * v4-mapped-v6 is also in consideration.
++ */
++static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
++			    const union sctp_addr *addr2)
++{
++	return __sctp_v6_cmp_addr(addr1, addr2) &&
++	       addr1->v6.sin6_port == addr2->v6.sin6_port;
++}
++
+ /* Initialize addr struct to INADDR_ANY. */
+ static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
+ {
+@@ -700,8 +703,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
+ 			sctp_v6_map_v4(addr);
+ 	}
+ 
+-	if (addr->sa.sa_family == AF_INET)
++	if (addr->sa.sa_family == AF_INET) {
++		memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
+ 		return sizeof(struct sockaddr_in);
++	}
+ 	return sizeof(struct sockaddr_in6);
+ }
+ 
+@@ -818,8 +823,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
+ 			       const union sctp_addr *addr2,
+ 			       struct sctp_sock *opt)
+ {
+-	struct sctp_af *af1, *af2;
+ 	struct sock *sk = sctp_opt2sk(opt);
++	struct sctp_af *af1, *af2;
+ 
+ 	af1 = sctp_get_af_specific(addr1->sa.sa_family);
+ 	af2 = sctp_get_af_specific(addr2->sa.sa_family);
+@@ -835,10 +840,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
+ 	if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
+ 		return 1;
+ 
+-	if (addr1->sa.sa_family != addr2->sa.sa_family)
+-		return 0;
+-
+-	return af1->cmp_addr(addr1, addr2);
++	return __sctp_v6_cmp_addr(addr1, addr2);
+ }
+ 
+ /* Verify that the provided sockaddr looks bindable.   Common verification,
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 195b54a19f1e..25127a0aeb3c 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -335,11 +335,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
+ 	if (!opt->pf->af_supported(addr->sa.sa_family, opt))
+ 		return NULL;
+ 
+-	/* V4 mapped address are really of AF_INET family */
+-	if (addr->sa.sa_family == AF_INET6 &&
+-	    ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+-	    !opt->pf->af_supported(AF_INET, opt))
+-		return NULL;
++	if (addr->sa.sa_family == AF_INET6) {
++		if (len < SIN6_LEN_RFC2133)
++			return NULL;
++		/* V4 mapped address are really of AF_INET family */
++		if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
++		    !opt->pf->af_supported(AF_INET, opt))
++			return NULL;
++	}
+ 
+ 	/* If we get this far, af is valid. */
+ 	af = sctp_get_af_specific(addr->sa.sa_family);
+@@ -1512,7 +1515,7 @@ static void sctp_close(struct sock *sk, long timeout)
+ 
+ 	pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);
+ 
+-	lock_sock(sk);
++	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+ 	sk->sk_shutdown = SHUTDOWN_MASK;
+ 	sk->sk_state = SCTP_SS_CLOSING;
+ 
+@@ -1563,7 +1566,7 @@ static void sctp_close(struct sock *sk, long timeout)
+ 	 * held and that should be grabbed before socket lock.
+ 	 */
+ 	spin_lock_bh(&net->sctp.addr_wq_lock);
+-	bh_lock_sock(sk);
++	bh_lock_sock_nested(sk);
+ 
+ 	/* Hold the sock, since sk_common_release() will put sock_put()
+ 	 * and we have just a little more cleanup.
+diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
+index d81186d34558..9103dd15511c 100644
+--- a/net/sunrpc/rpc_pipe.c
++++ b/net/sunrpc/rpc_pipe.c
+@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
+ 	struct dentry *clnt_dir = pipe_dentry->d_parent;
+ 	struct dentry *gssd_dir = clnt_dir->d_parent;
+ 
++	dget(pipe_dentry);
+ 	__rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
+ 	__rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
+ 	__rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 14972988d29d..3721a6422610 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -2192,7 +2192,12 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ 	case -EHOSTUNREACH:
+ 	case -EADDRINUSE:
+ 	case -ENOBUFS:
+-		/* retry with existing socket, after a delay */
++		/*
++		 * xs_tcp_force_close() wakes tasks with -EIO.
++		 * We need to wake them first to ensure the
++		 * correct error code.
++		 */
++		xprt_wake_pending_tasks(xprt, status);
+ 		xs_tcp_force_close(xprt);
+ 		goto out;
+ 	}
+diff --git a/net/tipc/net.c b/net/tipc/net.c
+index a54f3cbe2246..64ead4f47b70 100644
+--- a/net/tipc/net.c
++++ b/net/tipc/net.c
+@@ -43,7 +43,8 @@
+ 
+ static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
+ 	[TIPC_NLA_NET_UNSPEC]	= { .type = NLA_UNSPEC },
+-	[TIPC_NLA_NET_ID]	= { .type = NLA_U32 }
++	[TIPC_NLA_NET_ID]	= { .type = NLA_U32 },
++	[TIPC_NLA_NET_ADDR]	= { .type = NLA_U32 },
+ };
+ 
+ /*
+diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
+index c3ab230e4493..a9072fb5c767 100644
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -1794,32 +1794,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
+ 
+ static int __init x25_init(void)
+ {
+-	int rc = proto_register(&x25_proto, 0);
++	int rc;
+ 
+-	if (rc != 0)
++	rc = proto_register(&x25_proto, 0);
++	if (rc)
+ 		goto out;
+ 
+ 	rc = sock_register(&x25_family_ops);
+-	if (rc != 0)
++	if (rc)
+ 		goto out_proto;
+ 
+ 	dev_add_pack(&x25_packet_type);
+ 
+ 	rc = register_netdevice_notifier(&x25_dev_notifier);
+-	if (rc != 0)
++	if (rc)
+ 		goto out_sock;
+ 
+-	pr_info("Linux Version 0.2\n");
++	rc = x25_register_sysctl();
++	if (rc)
++		goto out_dev;
+ 
+-	x25_register_sysctl();
+ 	rc = x25_proc_init();
+-	if (rc != 0)
+-		goto out_dev;
++	if (rc)
++		goto out_sysctl;
++
++	pr_info("Linux Version 0.2\n");
++
+ out:
+ 	return rc;
++out_sysctl:
++	x25_unregister_sysctl();
+ out_dev:
+ 	unregister_netdevice_notifier(&x25_dev_notifier);
+ out_sock:
++	dev_remove_pack(&x25_packet_type);
+ 	sock_unregister(AF_X25);
+ out_proto:
+ 	proto_unregister(&x25_proto);
+diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
+index 43239527a205..703d46aae7a2 100644
+--- a/net/x25/sysctl_net_x25.c
++++ b/net/x25/sysctl_net_x25.c
+@@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = {
+ 	{ 0, },
+ };
+ 
+-void __init x25_register_sysctl(void)
++int __init x25_register_sysctl(void)
+ {
+ 	x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
++	if (!x25_table_header)
++		return -ENOMEM;
++	return 0;
+ }
+ 
+ void x25_unregister_sysctl(void)
+diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
+index ccfdc7115a83..a00ec715aa46 100644
+--- a/net/xfrm/xfrm_ipcomp.c
++++ b/net/xfrm/xfrm_ipcomp.c
+@@ -283,7 +283,7 @@ static struct crypto_comp * __percpu *ipcomp_alloc_tfms(const char *alg_name)
+ 		struct crypto_comp *tfm;
+ 
+ 		/* This can be any valid CPU ID so we don't need locking. */
+-		tfm = __this_cpu_read(*pos->tfms);
++		tfm = this_cpu_read(*pos->tfms);
+ 
+ 		if (!strcmp(crypto_comp_name(tfm), alg_name)) {
+ 			pos->users++;
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 7306683a7207..94b522fc231e 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1299,7 +1299,7 @@ EXPORT_SYMBOL(xfrm_policy_delete);
+ 
+ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
+ {
+-	struct net *net = xp_net(pol);
++	struct net *net = sock_net(sk);
+ 	struct xfrm_policy *old_pol;
+ 
+ #ifdef CONFIG_XFRM_SUB_POLICY
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 96688cd0f6f1..733e8028f54f 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -1208,6 +1208,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
+ 	x->curlft.add_time = orig->curlft.add_time;
+ 	x->km.state = orig->km.state;
+ 	x->km.seq = orig->km.seq;
++	x->replay = orig->replay;
++	x->preplay = orig->preplay;
+ 
+ 	return x;
+ 
+@@ -1845,6 +1847,18 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
+ 	struct xfrm_mgr *km;
+ 	struct xfrm_policy *pol = NULL;
+ 
++#ifdef CONFIG_COMPAT
++	if (is_compat_task())
++		return -EOPNOTSUPP;
++#endif
++
++	if (!optval && !optlen) {
++		xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
++		xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
++		__sk_dst_reset(sk);
++		return 0;
++	}
++
+ 	if (optlen <= 0 || optlen > PAGE_SIZE)
+ 		return -EMSGSIZE;
+ 
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 84541b35629a..0f6285f9674e 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -120,22 +120,17 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
+ 	struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
+ 	struct xfrm_replay_state_esn *rs;
+ 
+-	if (p->flags & XFRM_STATE_ESN) {
+-		if (!rt)
+-			return -EINVAL;
++	if (!rt)
++		return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
+ 
+-		rs = nla_data(rt);
++	rs = nla_data(rt);
+ 
+-		if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
+-			return -EINVAL;
+-
+-		if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
+-		    nla_len(rt) != sizeof(*rs))
+-			return -EINVAL;
+-	}
++	if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
++		return -EINVAL;
+ 
+-	if (!rt)
+-		return 0;
++	if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
++	    nla_len(rt) != sizeof(*rs))
++		return -EINVAL;
+ 
+ 	/* As only ESP and AH support ESN feature. */
+ 	if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
+@@ -2460,7 +2455,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 
+ #ifdef CONFIG_COMPAT
+ 	if (is_compat_task())
+-		return -ENOTSUPP;
++		return -EOPNOTSUPP;
+ #endif
+ 
+ 	type = nlh->nlmsg_type;
+diff --git a/scripts/tags.sh b/scripts/tags.sh
+index cdb491d84503..7056322b53f0 100755
+--- a/scripts/tags.sh
++++ b/scripts/tags.sh
+@@ -106,6 +106,7 @@ all_compiled_sources()
+ 		case "$i" in
+ 			*.[cS])
+ 				j=${i/\.[cS]/\.o}
++				j="${j#$tree}"
+ 				if [ -e $j ]; then
+ 					echo $i
+ 				fi
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index e5f1561439db..b7e269317e0c 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -735,7 +735,7 @@ module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR);
+ 
+ /* Maximum pathname length before accesses will start getting rejected */
+ unsigned int aa_g_path_max = 2 * PATH_MAX;
+-module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR);
++module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR);
+ 
+ /* Determines how paranoid loading of policy is and how much verification
+  * on the loaded policy is done.
+diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
+index 9ee9139b0b07..7f8c0322548c 100644
+--- a/security/integrity/ima/ima_appraise.c
++++ b/security/integrity/ima/ima_appraise.c
+@@ -206,7 +206,8 @@ int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
+ 		if (opened & FILE_CREATED)
+ 			iint->flags |= IMA_NEW_FILE;
+ 		if ((iint->flags & IMA_NEW_FILE) &&
+-		    !(iint->flags & IMA_DIGSIG_REQUIRED))
++		    (!(iint->flags & IMA_DIGSIG_REQUIRED) ||
++		     (inode->i_size == 0)))
+ 			status = INTEGRITY_PASS;
+ 		goto out;
+ 	}
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 0034eb420b0e..de1e43f6adf1 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -321,18 +321,6 @@ static void superblock_free_security(struct super_block *sb)
+ 	kfree(sbsec);
+ }
+ 
+-/* The file system's label must be initialized prior to use. */
+-
+-static const char *labeling_behaviors[7] = {
+-	"uses xattr",
+-	"uses transition SIDs",
+-	"uses task SIDs",
+-	"uses genfs_contexts",
+-	"not configured for labeling",
+-	"uses mountpoint labeling",
+-	"uses native labeling",
+-};
+-
+ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
+ 
+ static inline int inode_doinit(struct inode *inode)
+@@ -444,10 +432,6 @@ static int sb_finish_set_opts(struct super_block *sb)
+ 		}
+ 	}
+ 
+-	if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
+-		printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
+-		       sb->s_id, sb->s_type->name);
+-
+ 	sbsec->flags |= SE_SBINITIALIZED;
+ 	if (selinux_is_sblabel_mnt(sb))
+ 		sbsec->flags |= SBLABEL_MNT;
+@@ -4106,10 +4090,18 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
+ 		u32 sid, node_perm;
+ 
+ 		if (family == PF_INET) {
++			if (addrlen < sizeof(struct sockaddr_in)) {
++				err = -EINVAL;
++				goto out;
++			}
+ 			addr4 = (struct sockaddr_in *)address;
+ 			snum = ntohs(addr4->sin_port);
+ 			addrp = (char *)&addr4->sin_addr.s_addr;
+ 		} else {
++			if (addrlen < SIN6_LEN_RFC2133) {
++				err = -EINVAL;
++				goto out;
++			}
+ 			addr6 = (struct sockaddr_in6 *)address;
+ 			snum = ntohs(addr6->sin6_port);
+ 			addrp = (char *)&addr6->sin6_addr.s6_addr;
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 31d1d2ebd6f2..f20b2b0a2a54 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -154,7 +154,7 @@ static int selinux_set_mapping(struct policydb *pol,
+ 		}
+ 
+ 		k = 0;
+-		while (p_in->perms && p_in->perms[k]) {
++		while (p_in->perms[k]) {
+ 			/* An empty permission string skips ahead */
+ 			if (!*p_in->perms[k]) {
+ 				k++;
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 494b7b533366..7b2719acbeba 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -833,8 +833,25 @@ static int choose_rate(struct snd_pcm_substream *substream,
+ 	return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
+ }
+ 
+-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+-				     bool trylock)
++/* parameter locking: returns immediately if tried during streaming */
++static int lock_params(struct snd_pcm_runtime *runtime)
++{
++	if (mutex_lock_interruptible(&runtime->oss.params_lock))
++		return -ERESTARTSYS;
++	if (atomic_read(&runtime->oss.rw_ref)) {
++		mutex_unlock(&runtime->oss.params_lock);
++		return -EBUSY;
++	}
++	return 0;
++}
++
++static void unlock_params(struct snd_pcm_runtime *runtime)
++{
++	mutex_unlock(&runtime->oss.params_lock);
++}
++
++/* call with params_lock held */
++static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct snd_pcm_hw_params *params, *sparams;
+@@ -848,12 +865,9 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+ 	struct snd_mask sformat_mask;
+ 	struct snd_mask mask;
+ 
+-	if (trylock) {
+-		if (!(mutex_trylock(&runtime->oss.params_lock)))
+-			return -EAGAIN;
+-	} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
+-		return -EINTR;
+-	sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
++	if (!runtime->oss.params)
++		return 0;
++	sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
+ 	params = kmalloc(sizeof(*params), GFP_KERNEL);
+ 	sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
+ 	if (!sw_params || !params || !sparams) {
+@@ -991,7 +1005,6 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+ 		goto failure;
+ 	}
+ 
+-	memset(sw_params, 0, sizeof(*sw_params));
+ 	if (runtime->oss.trigger) {
+ 		sw_params->start_threshold = 1;
+ 	} else {
+@@ -1079,6 +1092,23 @@ failure:
+ 	kfree(sw_params);
+ 	kfree(params);
+ 	kfree(sparams);
++	return err;
++}
++
++/* this one takes the lock by itself */
++static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
++				     bool trylock)
++{
++	struct snd_pcm_runtime *runtime = substream->runtime;
++	int err;
++
++	if (trylock) {
++		if (!(mutex_trylock(&runtime->oss.params_lock)))
++			return -EAGAIN;
++	} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
++		return -ERESTARTSYS;
++
++	err = snd_pcm_oss_change_params_locked(substream);
+ 	mutex_unlock(&runtime->oss.params_lock);
+ 	return err;
+ }
+@@ -1107,6 +1137,10 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
+ 	return 0;
+ }
+ 
++/* call with params_lock held */
++/* NOTE: this always call PREPARE unconditionally no matter whether
++ * runtime->oss.prepare is set or not
++ */
+ static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
+ {
+ 	int err;
+@@ -1131,14 +1165,35 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
+ 	struct snd_pcm_runtime *runtime;
+ 	int err;
+ 
+-	if (substream == NULL)
+-		return 0;
+ 	runtime = substream->runtime;
+ 	if (runtime->oss.params) {
+ 		err = snd_pcm_oss_change_params(substream, false);
+ 		if (err < 0)
+ 			return err;
+ 	}
++	if (runtime->oss.prepare) {
++		if (mutex_lock_interruptible(&runtime->oss.params_lock))
++			return -ERESTARTSYS;
++		err = snd_pcm_oss_prepare(substream);
++		mutex_unlock(&runtime->oss.params_lock);
++		if (err < 0)
++			return err;
++	}
++	return 0;
++}
++
++/* call with params_lock held */
++static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream)
++{
++	struct snd_pcm_runtime *runtime;
++	int err;
++
++	runtime = substream->runtime;
++	if (runtime->oss.params) {
++		err = snd_pcm_oss_change_params_locked(substream);
++		if (err < 0)
++			return err;
++	}
+ 	if (runtime->oss.prepare) {
+ 		err = snd_pcm_oss_prepare(substream);
+ 		if (err < 0)
+@@ -1367,13 +1422,15 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
+ 	if (atomic_read(&substream->mmap_count))
+ 		return -ENXIO;
+ 
+-	if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
+-		return tmp;
++	atomic_inc(&runtime->oss.rw_ref);
+ 	while (bytes > 0) {
+ 		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+ 			tmp = -ERESTARTSYS;
+ 			break;
+ 		}
++		tmp = snd_pcm_oss_make_ready_locked(substream);
++		if (tmp < 0)
++			goto err;
+ 		if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
+ 			tmp = bytes;
+ 			if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
+@@ -1429,6 +1486,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
+ 		}
+ 		tmp = 0;
+ 	}
++	atomic_dec(&runtime->oss.rw_ref);
+ 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
+ }
+ 
+@@ -1474,13 +1532,15 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
+ 	if (atomic_read(&substream->mmap_count))
+ 		return -ENXIO;
+ 
+-	if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
+-		return tmp;
++	atomic_inc(&runtime->oss.rw_ref);
+ 	while (bytes > 0) {
+ 		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
+ 			tmp = -ERESTARTSYS;
+ 			break;
+ 		}
++		tmp = snd_pcm_oss_make_ready_locked(substream);
++		if (tmp < 0)
++			goto err;
+ 		if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
+ 			if (runtime->oss.buffer_used == 0) {
+ 				tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
+@@ -1521,6 +1581,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
+ 		}
+ 		tmp = 0;
+ 	}
++	atomic_dec(&runtime->oss.rw_ref);
+ 	return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
+ }
+ 
+@@ -1536,10 +1597,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
+ 			continue;
+ 		runtime = substream->runtime;
+ 		snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
++		mutex_lock(&runtime->oss.params_lock);
+ 		runtime->oss.prepare = 1;
+ 		runtime->oss.buffer_used = 0;
+ 		runtime->oss.prev_hw_ptr_period = 0;
+ 		runtime->oss.period_ptr = 0;
++		mutex_unlock(&runtime->oss.params_lock);
+ 	}
+ 	return 0;
+ }
+@@ -1625,9 +1688,13 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 			goto __direct;
+ 		if ((err = snd_pcm_oss_make_ready(substream)) < 0)
+ 			return err;
++		atomic_inc(&runtime->oss.rw_ref);
++		if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
++			atomic_dec(&runtime->oss.rw_ref);
++			return -ERESTARTSYS;
++		}
+ 		format = snd_pcm_oss_format_from(runtime->oss.format);
+ 		width = snd_pcm_format_physical_width(format);
+-		mutex_lock(&runtime->oss.params_lock);
+ 		if (runtime->oss.buffer_used > 0) {
+ #ifdef OSS_DEBUG
+ 			pcm_dbg(substream->pcm, "sync: buffer_used\n");
+@@ -1637,10 +1704,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 						   runtime->oss.buffer + runtime->oss.buffer_used,
+ 						   size);
+ 			err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes);
+-			if (err < 0) {
+-				mutex_unlock(&runtime->oss.params_lock);
+-				return err;
+-			}
++			if (err < 0)
++				goto unlock;
+ 		} else if (runtime->oss.period_ptr > 0) {
+ #ifdef OSS_DEBUG
+ 			pcm_dbg(substream->pcm, "sync: period_ptr\n");
+@@ -1650,10 +1715,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 						   runtime->oss.buffer,
+ 						   size * 8 / width);
+ 			err = snd_pcm_oss_sync1(substream, size);
+-			if (err < 0) {
+-				mutex_unlock(&runtime->oss.params_lock);
+-				return err;
+-			}
++			if (err < 0)
++				goto unlock;
+ 		}
+ 		/*
+ 		 * The ALSA's period might be a bit large than OSS one.
+@@ -1684,7 +1747,11 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 				snd_pcm_lib_writev(substream, buffers, size);
+ 			}
+ 		}
++unlock:
+ 		mutex_unlock(&runtime->oss.params_lock);
++		atomic_dec(&runtime->oss.rw_ref);
++		if (err < 0)
++			return err;
+ 		/*
+ 		 * finish sync: drain the buffer
+ 		 */
+@@ -1695,7 +1762,9 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 		substream->f_flags = saved_f_flags;
+ 		if (err < 0)
+ 			return err;
++		mutex_lock(&runtime->oss.params_lock);
+ 		runtime->oss.prepare = 1;
++		mutex_unlock(&runtime->oss.params_lock);
+ 	}
+ 
+ 	substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
+@@ -1706,8 +1775,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
+ 		err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+ 		if (err < 0)
+ 			return err;
++		mutex_lock(&runtime->oss.params_lock);
+ 		runtime->oss.buffer_used = 0;
+ 		runtime->oss.prepare = 1;
++		mutex_unlock(&runtime->oss.params_lock);
+ 	}
+ 	return 0;
+ }
+@@ -1719,6 +1790,8 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
+ 	for (idx = 1; idx >= 0; --idx) {
+ 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
+ 		struct snd_pcm_runtime *runtime;
++		int err;
++
+ 		if (substream == NULL)
+ 			continue;
+ 		runtime = substream->runtime;
+@@ -1726,10 +1799,14 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
+ 			rate = 1000;
+ 		else if (rate > 192000)
+ 			rate = 192000;
++		err = lock_params(runtime);
++		if (err < 0)
++			return err;
+ 		if (runtime->oss.rate != rate) {
+ 			runtime->oss.params = 1;
+ 			runtime->oss.rate = rate;
+ 		}
++		unlock_params(runtime);
+ 	}
+ 	return snd_pcm_oss_get_rate(pcm_oss_file);
+ }
+@@ -1754,13 +1831,19 @@ static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsig
+ 	for (idx = 1; idx >= 0; --idx) {
+ 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
+ 		struct snd_pcm_runtime *runtime;
++		int err;
++
+ 		if (substream == NULL)
+ 			continue;
+ 		runtime = substream->runtime;
++		err = lock_params(runtime);
++		if (err < 0)
++			return err;
+ 		if (runtime->oss.channels != channels) {
+ 			runtime->oss.params = 1;
+ 			runtime->oss.channels = channels;
+ 		}
++		unlock_params(runtime);
+ 	}
+ 	return snd_pcm_oss_get_channels(pcm_oss_file);
+ }
+@@ -1814,10 +1897,9 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
+ 		return -ENOMEM;
+ 	_snd_pcm_hw_params_any(params);
+ 	err = snd_pcm_hw_refine(substream, params);
+-	format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); 
+-	kfree(params);
+ 	if (err < 0)
+-		return err;
++		goto error;
++	format_mask = *hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ 	for (fmt = 0; fmt < 32; ++fmt) {
+ 		if (snd_mask_test(&format_mask, fmt)) {
+ 			int f = snd_pcm_oss_format_to(fmt);
+@@ -1825,12 +1907,16 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
+ 				formats |= f;
+ 		}
+ 	}
+-	return formats;
++
++ error:
++	kfree(params);
++	return err < 0 ? err : formats;
+ }
+ 
+ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
+ {
+ 	int formats, idx;
++	int err;
+ 	
+ 	if (format != AFMT_QUERY) {
+ 		formats = snd_pcm_oss_get_formats(pcm_oss_file);
+@@ -1844,10 +1930,14 @@ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int for
+ 			if (substream == NULL)
+ 				continue;
+ 			runtime = substream->runtime;
++			err = lock_params(runtime);
++			if (err < 0)
++				return err;
+ 			if (runtime->oss.format != format) {
+ 				runtime->oss.params = 1;
+ 				runtime->oss.format = format;
+ 			}
++			unlock_params(runtime);
+ 		}
+ 	}
+ 	return snd_pcm_oss_get_format(pcm_oss_file);
+@@ -1867,8 +1957,6 @@ static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int s
+ {
+ 	struct snd_pcm_runtime *runtime;
+ 
+-	if (substream == NULL)
+-		return 0;
+ 	runtime = substream->runtime;
+ 	if (subdivide == 0) {
+ 		subdivide = runtime->oss.subdivision;
+@@ -1892,9 +1980,17 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
+ 
+ 	for (idx = 1; idx >= 0; --idx) {
+ 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
++		struct snd_pcm_runtime *runtime;
++
+ 		if (substream == NULL)
+ 			continue;
+-		if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0)
++		runtime = substream->runtime;
++		err = lock_params(runtime);
++		if (err < 0)
++			return err;
++		err = snd_pcm_oss_set_subdivide1(substream, subdivide);
++		unlock_params(runtime);
++		if (err < 0)
+ 			return err;
+ 	}
+ 	return err;
+@@ -1904,8 +2000,6 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
+ {
+ 	struct snd_pcm_runtime *runtime;
+ 
+-	if (substream == NULL)
+-		return 0;
+ 	runtime = substream->runtime;
+ 	if (runtime->oss.subdivision || runtime->oss.fragshift)
+ 		return -EINVAL;
+@@ -1925,9 +2019,17 @@ static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsig
+ 
+ 	for (idx = 1; idx >= 0; --idx) {
+ 		struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
++		struct snd_pcm_runtime *runtime;
++
+ 		if (substream == NULL)
+ 			continue;
+-		if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0)
++		runtime = substream->runtime;
++		err = lock_params(runtime);
++		if (err < 0)
++			return err;
++		err = snd_pcm_oss_set_fragment1(substream, val);
++		unlock_params(runtime);
++		if (err < 0)
+ 			return err;
+ 	}
+ 	return err;
+@@ -2011,6 +2113,9 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
+ 	}
+       	if (psubstream) {
+       		runtime = psubstream->runtime;
++		cmd = 0;
++		if (mutex_lock_interruptible(&runtime->oss.params_lock))
++			return -ERESTARTSYS;
+ 		if (trigger & PCM_ENABLE_OUTPUT) {
+ 			if (runtime->oss.trigger)
+ 				goto _skip1;
+@@ -2028,13 +2133,19 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
+ 			cmd = SNDRV_PCM_IOCTL_DROP;
+ 			runtime->oss.prepare = 1;
+ 		}
+-		err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
+-		if (err < 0)
+-			return err;
+-	}
+  _skip1:
++		mutex_unlock(&runtime->oss.params_lock);
++		if (cmd) {
++			err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
++			if (err < 0)
++				return err;
++		}
++	}
+ 	if (csubstream) {
+       		runtime = csubstream->runtime;
++		cmd = 0;
++		if (mutex_lock_interruptible(&runtime->oss.params_lock))
++			return -ERESTARTSYS;
+ 		if (trigger & PCM_ENABLE_INPUT) {
+ 			if (runtime->oss.trigger)
+ 				goto _skip2;
+@@ -2049,11 +2160,14 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
+ 			cmd = SNDRV_PCM_IOCTL_DROP;
+ 			runtime->oss.prepare = 1;
+ 		}
+-		err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
+-		if (err < 0)
+-			return err;
+-	}
+  _skip2:
++		mutex_unlock(&runtime->oss.params_lock);
++		if (cmd) {
++			err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
++			if (err < 0)
++				return err;
++		}
++	}
+ 	return 0;
+ }
+ 
+@@ -2305,6 +2419,7 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
+ 	runtime->oss.maxfrags = 0;
+ 	runtime->oss.subdivision = 0;
+ 	substream->pcm_release = snd_pcm_oss_release_substream;
++	atomic_set(&runtime->oss.rw_ref, 0);
+ }
+ 
+ static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index 1f64ab0c2a95..7ae080bae15c 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -426,6 +426,8 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
+ 		return -ENOTTY;
+ 	if (substream->stream != dir)
+ 		return -EINVAL;
++	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
++		return -EBADFD;
+ 
+ 	if ((ch = substream->runtime->channels) > 128)
+ 		return -EINVAL;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index aa999e747c94..889087808ebe 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -2729,6 +2729,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
+ 	sync_ptr.s.status.hw_ptr = status->hw_ptr;
+ 	sync_ptr.s.status.tstamp = status->tstamp;
+ 	sync_ptr.s.status.suspended_state = status->suspended_state;
++	sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
+ 	snd_pcm_stream_unlock_irq(substream);
+ 	if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
+ 		return -EFAULT;
+@@ -3410,7 +3411,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
+ 					 area,
+ 					 substream->runtime->dma_area,
+ 					 substream->runtime->dma_addr,
+-					 area->vm_end - area->vm_start);
++					 substream->runtime->dma_bytes);
+ #endif /* CONFIG_X86 */
+ 	/* mmap with fault handler */
+ 	area->vm_ops = &snd_pcm_vm_ops_data_fault;
+diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
+index 09a89094dcf7..4e304a24924a 100644
+--- a/sound/core/rawmidi_compat.c
++++ b/sound/core/rawmidi_compat.c
+@@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
+ 	struct snd_rawmidi_params params;
+ 	unsigned int val;
+ 
+-	if (rfile->output == NULL)
+-		return -EINVAL;
+ 	if (get_user(params.stream, &src->stream) ||
+ 	    get_user(params.buffer_size, &src->buffer_size) ||
+ 	    get_user(params.avail_min, &src->avail_min) ||
+@@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
+ 	params.no_active_sensing = val;
+ 	switch (params.stream) {
+ 	case SNDRV_RAWMIDI_STREAM_OUTPUT:
++		if (!rfile->output)
++			return -EINVAL;
+ 		return snd_rawmidi_output_params(rfile->output, &params);
+ 	case SNDRV_RAWMIDI_STREAM_INPUT:
++		if (!rfile->input)
++			return -EINVAL;
+ 		return snd_rawmidi_input_params(rfile->input, &params);
+ 	}
+ 	return -EINVAL;
+@@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
+ 	int err;
+ 	struct snd_rawmidi_status status;
+ 
+-	if (rfile->output == NULL)
+-		return -EINVAL;
+ 	if (get_user(status.stream, &src->stream))
+ 		return -EFAULT;
+ 
+ 	switch (status.stream) {
+ 	case SNDRV_RAWMIDI_STREAM_OUTPUT:
++		if (!rfile->output)
++			return -EINVAL;
+ 		err = snd_rawmidi_output_status(rfile->output, &status);
+ 		break;
+ 	case SNDRV_RAWMIDI_STREAM_INPUT:
++		if (!rfile->input)
++			return -EINVAL;
+ 		err = snd_rawmidi_input_status(rfile->input, &status);
+ 		break;
+ 	default:
+@@ -113,16 +117,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
+ 	int err;
+ 	struct snd_rawmidi_status status;
+ 
+-	if (rfile->output == NULL)
+-		return -EINVAL;
+ 	if (get_user(status.stream, &src->stream))
+ 		return -EFAULT;
+ 
+ 	switch (status.stream) {
+ 	case SNDRV_RAWMIDI_STREAM_OUTPUT:
++		if (!rfile->output)
++			return -EINVAL;
+ 		err = snd_rawmidi_output_status(rfile->output, &status);
+ 		break;
+ 	case SNDRV_RAWMIDI_STREAM_INPUT:
++		if (!rfile->input)
++			return -EINVAL;
+ 		err = snd_rawmidi_input_status(rfile->input, &status);
+ 		break;
+ 	default:
+diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
+index c3908862bc8b..86ca584c27b2 100644
+--- a/sound/core/seq/oss/seq_oss_event.c
++++ b/sound/core/seq/oss/seq_oss_event.c
+@@ -26,6 +26,7 @@
+ #include <sound/seq_oss_legacy.h>
+ #include "seq_oss_readq.h"
+ #include "seq_oss_writeq.h"
++#include <linux/nospec.h>
+ 
+ 
+ /*
+@@ -287,10 +288,10 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
+ {
+ 	struct seq_oss_synthinfo *info;
+ 
+-	if (!snd_seq_oss_synth_is_valid(dp, dev))
++	info = snd_seq_oss_synth_info(dp, dev);
++	if (!info)
+ 		return -ENXIO;
+ 
+-	info = &dp->synths[dev];
+ 	switch (info->arg.event_passing) {
+ 	case SNDRV_SEQ_OSS_PROCESS_EVENTS:
+ 		if (! info->ch || ch < 0 || ch >= info->nr_voices) {
+@@ -298,6 +299,7 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
+ 			return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
+ 		}
+ 
++		ch = array_index_nospec(ch, info->nr_voices);
+ 		if (note == 255 && info->ch[ch].note >= 0) {
+ 			/* volume control */
+ 			int type;
+@@ -347,10 +349,10 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
+ {
+ 	struct seq_oss_synthinfo *info;
+ 
+-	if (!snd_seq_oss_synth_is_valid(dp, dev))
++	info = snd_seq_oss_synth_info(dp, dev);
++	if (!info)
+ 		return -ENXIO;
+ 
+-	info = &dp->synths[dev];
+ 	switch (info->arg.event_passing) {
+ 	case SNDRV_SEQ_OSS_PROCESS_EVENTS:
+ 		if (! info->ch || ch < 0 || ch >= info->nr_voices) {
+@@ -358,6 +360,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
+ 			return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
+ 		}
+ 
++		ch = array_index_nospec(ch, info->nr_voices);
+ 		if (info->ch[ch].note >= 0) {
+ 			note = info->ch[ch].note;
+ 			info->ch[ch].vel = 0;
+@@ -381,7 +384,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
+ static int
+ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
+ {
+-	if (! snd_seq_oss_synth_is_valid(dp, dev))
++	if (!snd_seq_oss_synth_info(dp, dev))
+ 		return -ENXIO;
+ 	
+ 	ev->type = type;
+@@ -399,7 +402,7 @@ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note,
+ static int
+ set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
+ {
+-	if (! snd_seq_oss_synth_is_valid(dp, dev))
++	if (!snd_seq_oss_synth_info(dp, dev))
+ 		return -ENXIO;
+ 	
+ 	ev->type = type;
+diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
+index 74c68a0f8abe..28c8e6720a08 100644
+--- a/sound/core/seq/oss/seq_oss_midi.c
++++ b/sound/core/seq/oss/seq_oss_midi.c
+@@ -29,6 +29,7 @@
+ #include "../seq_lock.h"
+ #include <linux/init.h>
+ #include <linux/slab.h>
++#include <linux/nospec.h>
+ 
+ 
+ /*
+@@ -315,6 +316,7 @@ get_mididev(struct seq_oss_devinfo *dp, int dev)
+ {
+ 	if (dev < 0 || dev >= dp->max_mididev)
+ 		return NULL;
++	dev = array_index_nospec(dev, dp->max_mididev);
+ 	return get_mdev(dev);
+ }
+ 
+diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
+index f38cf91b4faf..ff3fe10555eb 100644
+--- a/sound/core/seq/oss/seq_oss_synth.c
++++ b/sound/core/seq/oss/seq_oss_synth.c
+@@ -26,6 +26,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/nospec.h>
+ 
+ /*
+  * constants
+@@ -339,17 +340,13 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
+ 	dp->max_synthdev = 0;
+ }
+ 
+-/*
+- * check if the specified device is MIDI mapped device
+- */
+-static int
+-is_midi_dev(struct seq_oss_devinfo *dp, int dev)
++static struct seq_oss_synthinfo *
++get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
+ {
+ 	if (dev < 0 || dev >= dp->max_synthdev)
+-		return 0;
+-	if (dp->synths[dev].is_midi)
+-		return 1;
+-	return 0;
++		return NULL;
++	dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
++	return &dp->synths[dev];
+ }
+ 
+ /*
+@@ -359,14 +356,20 @@ static struct seq_oss_synth *
+ get_synthdev(struct seq_oss_devinfo *dp, int dev)
+ {
+ 	struct seq_oss_synth *rec;
+-	if (dev < 0 || dev >= dp->max_synthdev)
+-		return NULL;
+-	if (! dp->synths[dev].opened)
++	struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
++
++	if (!info)
+ 		return NULL;
+-	if (dp->synths[dev].is_midi)
+-		return &midi_synth_dev;
+-	if ((rec = get_sdev(dev)) == NULL)
++	if (!info->opened)
+ 		return NULL;
++	if (info->is_midi) {
++		rec = &midi_synth_dev;
++		snd_use_lock_use(&rec->use_lock);
++	} else {
++		rec = get_sdev(dev);
++		if (!rec)
++			return NULL;
++	}
+ 	if (! rec->opened) {
+ 		snd_use_lock_free(&rec->use_lock);
+ 		return NULL;
+@@ -402,10 +405,8 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
+ 	struct seq_oss_synth *rec;
+ 	struct seq_oss_synthinfo *info;
+ 
+-	if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev))
+-		return;
+-	info = &dp->synths[dev];
+-	if (! info->opened)
++	info = get_synthinfo_nospec(dp, dev);
++	if (!info || !info->opened)
+ 		return;
+ 	if (info->sysex)
+ 		info->sysex->len = 0; /* reset sysex */
+@@ -454,12 +455,14 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
+ 			    const char __user *buf, int p, int c)
+ {
+ 	struct seq_oss_synth *rec;
++	struct seq_oss_synthinfo *info;
+ 	int rc;
+ 
+-	if (dev < 0 || dev >= dp->max_synthdev)
++	info = get_synthinfo_nospec(dp, dev);
++	if (!info)
+ 		return -ENXIO;
+ 
+-	if (is_midi_dev(dp, dev))
++	if (info->is_midi)
+ 		return 0;
+ 	if ((rec = get_synthdev(dp, dev)) == NULL)
+ 		return -ENXIO;
+@@ -467,24 +470,25 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
+ 	if (rec->oper.load_patch == NULL)
+ 		rc = -ENXIO;
+ 	else
+-		rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c);
++		rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
+ 	snd_use_lock_free(&rec->use_lock);
+ 	return rc;
+ }
+ 
+ /*
+- * check if the device is valid synth device
++ * check if the device is valid synth device and return the synth info
+  */
+-int
+-snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev)
++struct seq_oss_synthinfo *
++snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
+ {
+ 	struct seq_oss_synth *rec;
++
+ 	rec = get_synthdev(dp, dev);
+ 	if (rec) {
+ 		snd_use_lock_free(&rec->use_lock);
+-		return 1;
++		return get_synthinfo_nospec(dp, dev);
+ 	}
+-	return 0;
++	return NULL;
+ }
+ 
+ 
+@@ -499,16 +503,18 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
+ 	int i, send;
+ 	unsigned char *dest;
+ 	struct seq_oss_synth_sysex *sysex;
++	struct seq_oss_synthinfo *info;
+ 
+-	if (! snd_seq_oss_synth_is_valid(dp, dev))
++	info = snd_seq_oss_synth_info(dp, dev);
++	if (!info)
+ 		return -ENXIO;
+ 
+-	sysex = dp->synths[dev].sysex;
++	sysex = info->sysex;
+ 	if (sysex == NULL) {
+ 		sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
+ 		if (sysex == NULL)
+ 			return -ENOMEM;
+-		dp->synths[dev].sysex = sysex;
++		info->sysex = sysex;
+ 	}
+ 
+ 	send = 0;
+@@ -553,10 +559,12 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
+ int
+ snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
+ {
+-	if (! snd_seq_oss_synth_is_valid(dp, dev))
++	struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
++
++	if (!info)
+ 		return -EINVAL;
+-	snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client,
+-			      dp->synths[dev].arg.addr.port);
++	snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
++			      info->arg.addr.port);
+ 	return 0;
+ }
+ 
+@@ -568,16 +576,18 @@ int
+ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
+ {
+ 	struct seq_oss_synth *rec;
++	struct seq_oss_synthinfo *info;
+ 	int rc;
+ 
+-	if (is_midi_dev(dp, dev))
++	info = get_synthinfo_nospec(dp, dev);
++	if (!info || info->is_midi)
+ 		return -ENXIO;
+ 	if ((rec = get_synthdev(dp, dev)) == NULL)
+ 		return -ENXIO;
+ 	if (rec->oper.ioctl == NULL)
+ 		rc = -ENXIO;
+ 	else
+-		rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr);
++		rc = rec->oper.ioctl(&info->arg, cmd, addr);
+ 	snd_use_lock_free(&rec->use_lock);
+ 	return rc;
+ }
+@@ -589,7 +599,10 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u
+ int
+ snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
+ {
+-	if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev))
++	struct seq_oss_synthinfo *info;
++
++	info = snd_seq_oss_synth_info(dp, dev);
++	if (!info || info->is_midi)
+ 		return -ENXIO;
+ 	ev->type = SNDRV_SEQ_EVENT_OSS;
+ 	memcpy(ev->data.raw8.d, data, 8);
+diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
+index 74ac55f166b6..a63f9e22974d 100644
+--- a/sound/core/seq/oss/seq_oss_synth.h
++++ b/sound/core/seq/oss/seq_oss_synth.h
+@@ -37,7 +37,8 @@ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp);
+ void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
+ int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
+ 				 const char __user *buf, int p, int c);
+-int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev);
++struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
++						 int dev);
+ int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
+ 			    struct snd_seq_event *ev);
+ int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index dac0a54e39de..64a1ae720e49 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -270,12 +270,12 @@ static int seq_free_client1(struct snd_seq_client *client)
+ 
+ 	if (!client)
+ 		return 0;
+-	snd_seq_delete_all_ports(client);
+-	snd_seq_queue_client_leave(client->number);
+ 	spin_lock_irqsave(&clients_lock, flags);
+ 	clienttablock[client->number] = 1;
+ 	clienttab[client->number] = NULL;
+ 	spin_unlock_irqrestore(&clients_lock, flags);
++	snd_seq_delete_all_ports(client);
++	snd_seq_queue_client_leave(client->number);
+ 	snd_use_lock_sync(&client->use_lock);
+ 	snd_seq_queue_client_termination(client->number);
+ 	if (client->pool)
+diff --git a/sound/core/seq/seq_prioq.c b/sound/core/seq/seq_prioq.c
+index bc1c8488fc2a..2bc6759e4adc 100644
+--- a/sound/core/seq/seq_prioq.c
++++ b/sound/core/seq/seq_prioq.c
+@@ -87,7 +87,7 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo)
+ 	if (f->cells > 0) {
+ 		/* drain prioQ */
+ 		while (f->cells > 0)
+-			snd_seq_cell_free(snd_seq_prioq_cell_out(f));
++			snd_seq_cell_free(snd_seq_prioq_cell_out(f, NULL));
+ 	}
+ 	
+ 	kfree(f);
+@@ -214,8 +214,18 @@ int snd_seq_prioq_cell_in(struct snd_seq_prioq * f,
+ 	return 0;
+ }
+ 
++/* return 1 if the current time >= event timestamp */
++static int event_is_ready(struct snd_seq_event *ev, void *current_time)
++{
++	if ((ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) == SNDRV_SEQ_TIME_STAMP_TICK)
++		return snd_seq_compare_tick_time(current_time, &ev->time.tick);
++	else
++		return snd_seq_compare_real_time(current_time, &ev->time.time);
++}
++
+ /* dequeue cell from prioq */
+-struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f)
++struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
++						  void *current_time)
+ {
+ 	struct snd_seq_event_cell *cell;
+ 	unsigned long flags;
+@@ -227,6 +237,8 @@ struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f)
+ 	spin_lock_irqsave(&f->lock, flags);
+ 
+ 	cell = f->head;
++	if (cell && current_time && !event_is_ready(&cell->event, current_time))
++		cell = NULL;
+ 	if (cell) {
+ 		f->head = cell->next;
+ 
+@@ -252,18 +264,6 @@ int snd_seq_prioq_avail(struct snd_seq_prioq * f)
+ 	return f->cells;
+ }
+ 
+-
+-/* peek at cell at the head of the prioq */
+-struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq * f)
+-{
+-	if (f == NULL) {
+-		pr_debug("ALSA: seq: snd_seq_prioq_cell_in() called with NULL prioq\n");
+-		return NULL;
+-	}
+-	return f->head;
+-}
+-
+-
+ static inline int prioq_match(struct snd_seq_event_cell *cell,
+ 			      int client, int timestamp)
+ {
+diff --git a/sound/core/seq/seq_prioq.h b/sound/core/seq/seq_prioq.h
+index d38bb78d9345..2c315ca10fc4 100644
+--- a/sound/core/seq/seq_prioq.h
++++ b/sound/core/seq/seq_prioq.h
+@@ -44,14 +44,12 @@ void snd_seq_prioq_delete(struct snd_seq_prioq **fifo);
+ int snd_seq_prioq_cell_in(struct snd_seq_prioq *f, struct snd_seq_event_cell *cell);
+ 
+ /* dequeue cell from prioq */ 
+-struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f);
++struct snd_seq_event_cell *snd_seq_prioq_cell_out(struct snd_seq_prioq *f,
++						  void *current_time);
+ 
+ /* return number of events available in prioq */
+ int snd_seq_prioq_avail(struct snd_seq_prioq *f);
+ 
+-/* peek at cell at the head of the prioq */
+-struct snd_seq_event_cell *snd_seq_prioq_cell_peek(struct snd_seq_prioq *f);
+-
+ /* client left queue */
+ void snd_seq_prioq_leave(struct snd_seq_prioq *f, int client, int timestamp);        
+ 
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index a7bd074f6c0e..b83fdc72011e 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -277,30 +277,20 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
+ 
+       __again:
+ 	/* Process tick queue... */
+-	while ((cell = snd_seq_prioq_cell_peek(q->tickq)) != NULL) {
+-		if (snd_seq_compare_tick_time(&q->timer->tick.cur_tick,
+-					      &cell->event.time.tick)) {
+-			cell = snd_seq_prioq_cell_out(q->tickq);
+-			if (cell)
+-				snd_seq_dispatch_event(cell, atomic, hop);
+-		} else {
+-			/* event remains in the queue */
++	for (;;) {
++		cell = snd_seq_prioq_cell_out(q->tickq,
++					      &q->timer->tick.cur_tick);
++		if (!cell)
+ 			break;
+-		}
++		snd_seq_dispatch_event(cell, atomic, hop);
+ 	}
+ 
+-
+ 	/* Process time queue... */
+-	while ((cell = snd_seq_prioq_cell_peek(q->timeq)) != NULL) {
+-		if (snd_seq_compare_real_time(&q->timer->cur_time,
+-					      &cell->event.time.time)) {
+-			cell = snd_seq_prioq_cell_out(q->timeq);
+-			if (cell)
+-				snd_seq_dispatch_event(cell, atomic, hop);
+-		} else {
+-			/* event remains in the queue */
++	for (;;) {
++		cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
++		if (!cell)
+ 			break;
+-		}
++		snd_seq_dispatch_event(cell, atomic, hop);
+ 	}
+ 
+ 	/* free lock */
+diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
+index 3b126af4a026..ef494ffc1369 100644
+--- a/sound/core/seq/seq_virmidi.c
++++ b/sound/core/seq/seq_virmidi.c
+@@ -174,12 +174,12 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
+ 			}
+ 			return;
+ 		}
++		spin_lock_irqsave(&substream->runtime->lock, flags);
+ 		if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
+ 			if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
+-				return;
++				goto out;
+ 			vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
+ 		}
+-		spin_lock_irqsave(&substream->runtime->lock, flags);
+ 		while (1) {
+ 			count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
+ 			if (count <= 0)
+diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
+index 83ae083b192f..23df6a501648 100644
+--- a/sound/drivers/aloop.c
++++ b/sound/drivers/aloop.c
+@@ -192,6 +192,11 @@ static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
+ 	dpcm->timer.expires = 0;
+ }
+ 
++static inline void loopback_timer_stop_sync(struct loopback_pcm *dpcm)
++{
++	del_timer_sync(&dpcm->timer);
++}
++
+ #define CABLE_VALID_PLAYBACK	(1 << SNDRV_PCM_STREAM_PLAYBACK)
+ #define CABLE_VALID_CAPTURE	(1 << SNDRV_PCM_STREAM_CAPTURE)
+ #define CABLE_VALID_BOTH	(CABLE_VALID_PLAYBACK|CABLE_VALID_CAPTURE)
+@@ -291,6 +296,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
+ 		cable->pause |= stream;
+ 		loopback_timer_stop(dpcm);
+ 		spin_unlock(&cable->lock);
++		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++			loopback_active_notify(dpcm);
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+@@ -299,6 +306,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
+ 		cable->pause &= ~stream;
+ 		loopback_timer_start(dpcm);
+ 		spin_unlock(&cable->lock);
++		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
++			loopback_active_notify(dpcm);
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -326,6 +335,8 @@ static int loopback_prepare(struct snd_pcm_substream *substream)
+ 	struct loopback_cable *cable = dpcm->cable;
+ 	int bps, salign;
+ 
++	loopback_timer_stop_sync(dpcm);
++
+ 	salign = (snd_pcm_format_width(runtime->format) *
+ 						runtime->channels) / 8;
+ 	bps = salign * runtime->rate;
+@@ -659,7 +670,9 @@ static void free_cable(struct snd_pcm_substream *substream)
+ 		return;
+ 	if (cable->streams[!substream->stream]) {
+ 		/* other stream is still alive */
++		spin_lock_irq(&cable->lock);
+ 		cable->streams[substream->stream] = NULL;
++		spin_unlock_irq(&cable->lock);
+ 	} else {
+ 		/* free the cable */
+ 		loopback->cables[substream->number][dev] = NULL;
+@@ -699,7 +712,6 @@ static int loopback_open(struct snd_pcm_substream *substream)
+ 		loopback->cables[substream->number][dev] = cable;
+ 	}
+ 	dpcm->cable = cable;
+-	cable->streams[substream->stream] = dpcm;
+ 
+ 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+ 
+@@ -731,6 +743,11 @@ static int loopback_open(struct snd_pcm_substream *substream)
+ 		runtime->hw = loopback_pcm_hardware;
+ 	else
+ 		runtime->hw = cable->hw;
++
++	spin_lock_irq(&cable->lock);
++	cable->streams[substream->stream] = dpcm;
++	spin_unlock_irq(&cable->lock);
++
+  unlock:
+ 	if (err < 0) {
+ 		free_cable(substream);
+@@ -745,7 +762,7 @@ static int loopback_close(struct snd_pcm_substream *substream)
+ 	struct loopback *loopback = substream->private_data;
+ 	struct loopback_pcm *dpcm = substream->runtime->private_data;
+ 
+-	loopback_timer_stop(dpcm);
++	loopback_timer_stop_sync(dpcm);
+ 	mutex_lock(&loopback->cable_lock);
+ 	free_cable(substream);
+ 	mutex_unlock(&loopback->cable_lock);
+@@ -815,9 +832,11 @@ static int loopback_rate_shift_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ 	
++	mutex_lock(&loopback->cable_lock);
+ 	ucontrol->value.integer.value[0] =
+ 		loopback->setup[kcontrol->id.subdevice]
+ 			       [kcontrol->id.device].rate_shift;
++	mutex_unlock(&loopback->cable_lock);
+ 	return 0;
+ }
+ 
+@@ -849,9 +868,11 @@ static int loopback_notify_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ 	
++	mutex_lock(&loopback->cable_lock);
+ 	ucontrol->value.integer.value[0] =
+ 		loopback->setup[kcontrol->id.subdevice]
+ 			       [kcontrol->id.device].notify;
++	mutex_unlock(&loopback->cable_lock);
+ 	return 0;
+ }
+ 
+@@ -863,12 +884,14 @@ static int loopback_notify_put(struct snd_kcontrol *kcontrol,
+ 	int change = 0;
+ 
+ 	val = ucontrol->value.integer.value[0] ? 1 : 0;
++	mutex_lock(&loopback->cable_lock);
+ 	if (val != loopback->setup[kcontrol->id.subdevice]
+ 				[kcontrol->id.device].notify) {
+ 		loopback->setup[kcontrol->id.subdevice]
+ 			[kcontrol->id.device].notify = val;
+ 		change = 1;
+ 	}
++	mutex_unlock(&loopback->cable_lock);
+ 	return change;
+ }
+ 
+@@ -876,13 +899,18 @@ static int loopback_active_get(struct snd_kcontrol *kcontrol,
+ 			       struct snd_ctl_elem_value *ucontrol)
+ {
+ 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+-	struct loopback_cable *cable = loopback->cables
+-			[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
++	struct loopback_cable *cable;
++
+ 	unsigned int val = 0;
+ 
+-	if (cable != NULL)
+-		val = (cable->running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
+-									1 : 0;
++	mutex_lock(&loopback->cable_lock);
++	cable = loopback->cables[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
++	if (cable != NULL) {
++		unsigned int running = cable->running ^ cable->pause;
++
++		val = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ? 1 : 0;
++	}
++	mutex_unlock(&loopback->cable_lock);
+ 	ucontrol->value.integer.value[0] = val;
+ 	return 0;
+ }
+@@ -925,9 +953,11 @@ static int loopback_rate_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ 	
++	mutex_lock(&loopback->cable_lock);
+ 	ucontrol->value.integer.value[0] =
+ 		loopback->setup[kcontrol->id.subdevice]
+ 			       [kcontrol->id.device].rate;
++	mutex_unlock(&loopback->cable_lock);
+ 	return 0;
+ }
+ 
+@@ -947,9 +977,11 @@ static int loopback_channels_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ 	
++	mutex_lock(&loopback->cable_lock);
+ 	ucontrol->value.integer.value[0] =
+ 		loopback->setup[kcontrol->id.subdevice]
+ 			       [kcontrol->id.device].channels;
++	mutex_unlock(&loopback->cable_lock);
+ 	return 0;
+ }
+ 
+diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
+index ddcc1a325a61..42920a243328 100644
+--- a/sound/drivers/opl3/opl3_synth.c
++++ b/sound/drivers/opl3/opl3_synth.c
+@@ -21,6 +21,7 @@
+ 
+ #include <linux/slab.h>
+ #include <linux/export.h>
++#include <linux/nospec.h>
+ #include <sound/opl3.h>
+ #include <sound/asound_fm.h>
+ 
+@@ -448,7 +449,7 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
+ {
+ 	unsigned short reg_side;
+ 	unsigned char op_offset;
+-	unsigned char voice_offset;
++	unsigned char voice_offset, voice_op;
+ 
+ 	unsigned short opl3_reg;
+ 	unsigned char reg_val;
+@@ -473,7 +474,9 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
+ 		voice_offset = voice->voice - MAX_OPL2_VOICES;
+ 	}
+ 	/* Get register offset of operator */
+-	op_offset = snd_opl3_regmap[voice_offset][voice->op];
++	voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
++	voice_op = array_index_nospec(voice->op, 4);
++	op_offset = snd_opl3_regmap[voice_offset][voice_op];
+ 
+ 	reg_val = 0x00;
+ 	/* Set amplitude modulation (tremolo) effect */
+diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
+index 57df06e76968..cc009a4a3d1d 100644
+--- a/sound/pci/hda/hda_hwdep.c
++++ b/sound/pci/hda/hda_hwdep.c
+@@ -21,6 +21,7 @@
+ #include <linux/init.h>
+ #include <linux/slab.h>
+ #include <linux/compat.h>
++#include <linux/nospec.h>
+ #include <sound/core.h>
+ #include "hda_codec.h"
+ #include "hda_local.h"
+@@ -51,7 +52,16 @@ static int get_wcap_ioctl(struct hda_codec *codec,
+ 	
+ 	if (get_user(verb, &arg->verb))
+ 		return -EFAULT;
+-	res = get_wcaps(codec, verb >> 24);
++	/* open-code get_wcaps(verb>>24) with nospec */
++	verb >>= 24;
++	if (verb < codec->core.start_nid ||
++	    verb >= codec->core.start_nid + codec->core.num_nodes) {
++		res = 0;
++	} else {
++		verb -= codec->core.start_nid;
++		verb = array_index_nospec(verb, codec->core.num_nodes);
++		res = codec->wcaps[verb];
++	}
+ 	if (put_user(res, &arg->res))
+ 		return -EFAULT;
+ 	return 0;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index b8886d493083..8c9345949794 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1393,7 +1393,8 @@ static void azx_check_snoop_available(struct azx *chip)
+ 		 */
+ 		u8 val;
+ 		pci_read_config_byte(chip->pci, 0x42, &val);
+-		if (!(val & 0x80) && chip->pci->revision == 0x30)
++		if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
++				      chip->pci->revision == 0x20))
+ 			snoop = false;
+ 	}
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0fd1402e427b..64214c72a71b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3250,8 +3250,12 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled)
+ 	pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid);
+ 	pinval &= ~AC_PINCTL_VREFEN;
+ 	pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80;
+-	if (spec->mute_led_nid)
++	if (spec->mute_led_nid) {
++		/* temporarily power up/down for setting VREF */
++		snd_hda_power_up_pm(codec);
+ 		snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
++		snd_hda_power_down_pm(codec);
++	}
+ }
+ 
+ /* Make sure the led works even in runtime suspend */
+@@ -6723,6 +6727,7 @@ enum {
+ 	ALC668_FIXUP_DELL_DISABLE_AAMIX,
+ 	ALC668_FIXUP_DELL_XPS13,
+ 	ALC662_FIXUP_ASUS_Nx50,
++	ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
+ 	ALC668_FIXUP_ASUS_Nx51,
+ };
+ 
+@@ -6970,14 +6975,21 @@ static const struct hda_fixup alc662_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC662_FIXUP_BASS_1A
+ 	},
++	[ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_headset_mode_alc668,
++		.chain_id = ALC662_FIXUP_BASS_CHMAP
++	},
+ 	[ALC668_FIXUP_ASUS_Nx51] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+-			{0x1a, 0x90170151}, /* bass speaker */
++			{ 0x19, 0x03a1913d }, /* use as headphone mic, without its own jack detect */
++			{ 0x1a, 0x90170151 }, /* bass speaker */
++			{ 0x1b, 0x03a1113c }, /* use as headset mic, without its own jack detect */
+ 			{}
+ 		},
+ 		.chained = true,
+-		.chain_id = ALC662_FIXUP_BASS_CHMAP,
++		.chain_id = ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
+ 	},
+ };
+ 
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index 7f6190606f5e..61a8eafc575c 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -137,6 +137,7 @@
+ #include <linux/pci.h>
+ #include <linux/math64.h>
+ #include <linux/io.h>
++#include <linux/nospec.h>
+ 
+ #include <sound/core.h>
+ #include <sound/control.h>
+@@ -5692,40 +5693,43 @@ static int snd_hdspm_channel_info(struct snd_pcm_substream *substream,
+ 		struct snd_pcm_channel_info *info)
+ {
+ 	struct hdspm *hdspm = snd_pcm_substream_chip(substream);
++	unsigned int channel = info->channel;
+ 
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+-		if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) {
++		if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
+ 			dev_info(hdspm->card->dev,
+ 				 "snd_hdspm_channel_info: output channel out of range (%d)\n",
+-				 info->channel);
++				 channel);
+ 			return -EINVAL;
+ 		}
+ 
+-		if (hdspm->channel_map_out[info->channel] < 0) {
++		channel = array_index_nospec(channel, hdspm->max_channels_out);
++		if (hdspm->channel_map_out[channel] < 0) {
+ 			dev_info(hdspm->card->dev,
+ 				 "snd_hdspm_channel_info: output channel %d mapped out\n",
+-				 info->channel);
++				 channel);
+ 			return -EINVAL;
+ 		}
+ 
+-		info->offset = hdspm->channel_map_out[info->channel] *
++		info->offset = hdspm->channel_map_out[channel] *
+ 			HDSPM_CHANNEL_BUFFER_BYTES;
+ 	} else {
+-		if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) {
++		if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
+ 			dev_info(hdspm->card->dev,
+ 				 "snd_hdspm_channel_info: input channel out of range (%d)\n",
+-				 info->channel);
++				 channel);
+ 			return -EINVAL;
+ 		}
+ 
+-		if (hdspm->channel_map_in[info->channel] < 0) {
++		channel = array_index_nospec(channel, hdspm->max_channels_in);
++		if (hdspm->channel_map_in[channel] < 0) {
+ 			dev_info(hdspm->card->dev,
+ 				 "snd_hdspm_channel_info: input channel %d mapped out\n",
+-				 info->channel);
++				 channel);
+ 			return -EINVAL;
+ 		}
+ 
+-		info->offset = hdspm->channel_map_in[info->channel] *
++		info->offset = hdspm->channel_map_in[channel] *
+ 			HDSPM_CHANNEL_BUFFER_BYTES;
+ 	}
+ 
+diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
+index fdbc0aa2776a..c253bdf92e36 100644
+--- a/sound/pci/rme9652/rme9652.c
++++ b/sound/pci/rme9652/rme9652.c
+@@ -26,6 +26,7 @@
+ #include <linux/pci.h>
+ #include <linux/module.h>
+ #include <linux/io.h>
++#include <linux/nospec.h>
+ 
+ #include <sound/core.h>
+ #include <sound/control.h>
+@@ -2036,9 +2037,10 @@ static int snd_rme9652_channel_info(struct snd_pcm_substream *substream,
+ 	if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
+ 		return -EINVAL;
+ 
+-	if ((chn = rme9652->channel_map[info->channel]) < 0) {
++	chn = rme9652->channel_map[array_index_nospec(info->channel,
++						      RME9652_NCHANNELS)];
++	if (chn < 0)
+ 		return -EINVAL;
+-	}
+ 
+ 	info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
+ 	info->first = 0;
+diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
+index 314eaece1b7d..ddf67da394de 100644
+--- a/sound/soc/codecs/ssm2602.c
++++ b/sound/soc/codecs/ssm2602.c
+@@ -54,10 +54,17 @@ struct ssm2602_priv {
+  * using 2 wire for device control, so we cache them instead.
+  * There is no point in caching the reset register
+  */
+-static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = {
+-	0x0097, 0x0097, 0x0079, 0x0079,
+-	0x000a, 0x0008, 0x009f, 0x000a,
+-	0x0000, 0x0000
++static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
++	{ .reg = 0x00, .def = 0x0097 },
++	{ .reg = 0x01, .def = 0x0097 },
++	{ .reg = 0x02, .def = 0x0079 },
++	{ .reg = 0x03, .def = 0x0079 },
++	{ .reg = 0x04, .def = 0x000a },
++	{ .reg = 0x05, .def = 0x0008 },
++	{ .reg = 0x06, .def = 0x009f },
++	{ .reg = 0x07, .def = 0x000a },
++	{ .reg = 0x08, .def = 0x0000 },
++	{ .reg = 0x09, .def = 0x0000 }
+ };
+ 
+ 
+@@ -620,8 +627,8 @@ const struct regmap_config ssm2602_regmap_config = {
+ 	.volatile_reg = ssm2602_register_volatile,
+ 
+ 	.cache_type = REGCACHE_RBTREE,
+-	.reg_defaults_raw = ssm2602_reg,
+-	.num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg),
++	.reg_defaults = ssm2602_reg,
++	.num_reg_defaults = ARRAY_SIZE(ssm2602_reg),
+ };
+ EXPORT_SYMBOL_GPL(ssm2602_regmap_config);
+ 
+diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
+index 5c7597191e3f..80e3ca115f15 100644
+--- a/sound/soc/fsl/fsl_esai.c
++++ b/sound/soc/fsl/fsl_esai.c
+@@ -143,6 +143,13 @@ static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio,
+ 
+ 	psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
+ 
++	/* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
++	if (ratio <= 256) {
++		pm = ratio;
++		fp = 1;
++		goto out;
++	}
++
+ 	/* Set the max fluctuation -- 0.1% of the max devisor */
+ 	savesub = (psr ? 1 : 8)  * 256 * maxfp / 1000;
+ 
+diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c
+index a74c64c7053c..e83da42a8c03 100644
+--- a/sound/soc/intel/atom/sst/sst_stream.c
++++ b/sound/soc/intel/atom/sst/sst_stream.c
+@@ -221,7 +221,7 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx,
+ 		sst_free_block(sst_drv_ctx, block);
+ out:
+ 	test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id);
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c
+index 20a28b22e30f..5c3a38612c01 100644
+--- a/sound/soc/intel/boards/cht_bsw_rt5645.c
++++ b/sound/soc/intel/boards/cht_bsw_rt5645.c
+@@ -89,6 +89,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = {
+ 	SND_SOC_DAPM_HP("Headphone", NULL),
+ 	SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ 	SND_SOC_DAPM_MIC("Int Mic", NULL),
++	SND_SOC_DAPM_MIC("Int Analog Mic", NULL),
+ 	SND_SOC_DAPM_SPK("Ext Spk", NULL),
+ 	SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ 			platform_clock_control, SND_SOC_DAPM_POST_PMD),
+@@ -99,6 +100,8 @@ static const struct snd_soc_dapm_route cht_audio_map[] = {
+ 	{"IN1N", NULL, "Headset Mic"},
+ 	{"DMIC L1", NULL, "Int Mic"},
+ 	{"DMIC R1", NULL, "Int Mic"},
++	{"IN2P", NULL, "Int Analog Mic"},
++	{"IN2N", NULL, "Int Analog Mic"},
+ 	{"Headphone", NULL, "HPOL"},
+ 	{"Headphone", NULL, "HPOR"},
+ 	{"Ext Spk", NULL, "SPOL"},
+@@ -112,6 +115,9 @@ static const struct snd_soc_dapm_route cht_audio_map[] = {
+ 	{"Headphone", NULL, "Platform Clock"},
+ 	{"Headset Mic", NULL, "Platform Clock"},
+ 	{"Int Mic", NULL, "Platform Clock"},
++	{"Int Analog Mic", NULL, "Platform Clock"},
++	{"Int Analog Mic", NULL, "micbias1"},
++	{"Int Analog Mic", NULL, "micbias2"},
+ 	{"Ext Spk", NULL, "Platform Clock"},
+ };
+ 
+@@ -119,6 +125,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = {
+ 	SOC_DAPM_PIN_SWITCH("Headphone"),
+ 	SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ 	SOC_DAPM_PIN_SWITCH("Int Mic"),
++	SOC_DAPM_PIN_SWITCH("Int Analog Mic"),
+ 	SOC_DAPM_PIN_SWITCH("Ext Spk"),
+ };
+ 
+diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
+index b6615affe571..fde974d52bb2 100644
+--- a/sound/soc/nuc900/nuc900-ac97.c
++++ b/sound/soc/nuc900/nuc900-ac97.c
+@@ -67,7 +67,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97,
+ 
+ 	/* polling the AC_R_FINISH */
+ 	while (!(AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_R_FINISH)
+-								&& timeout--)
++								&& --timeout)
+ 		mdelay(1);
+ 
+ 	if (!timeout) {
+@@ -121,7 +121,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
+ 
+ 	/* polling the AC_W_FINISH */
+ 	while ((AUDIO_READ(nuc900_audio->mmio + ACTL_ACCON) & AC_W_FINISH)
+-								&& timeout--)
++								&& --timeout)
+ 		mdelay(1);
+ 
+ 	if (!timeout)
+diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
+index 4599983cfc8a..c3b9d01d4e91 100644
+--- a/sound/soc/sh/rcar/ssi.c
++++ b/sound/soc/sh/rcar/ssi.c
+@@ -396,6 +396,13 @@ static irqreturn_t rsnd_ssi_interrupt(int irq, void *data)
+ 		struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ 		u32 *buf = (u32 *)(runtime->dma_area +
+ 				   rsnd_dai_pointer_offset(io, 0));
++		int shift = 0;
++
++		switch (runtime->sample_bits) {
++		case 32:
++			shift = 8;
++			break;
++		}
+ 
+ 		/*
+ 		 * 8/16/32 data can be assesse to TDR/RDR register
+@@ -403,9 +410,9 @@ static irqreturn_t rsnd_ssi_interrupt(int irq, void *data)
+ 		 * see rsnd_ssi_init()
+ 		 */
+ 		if (rsnd_io_is_play(io))
+-			rsnd_mod_write(mod, SSITDR, *buf);
++			rsnd_mod_write(mod, SSITDR, (*buf) << shift);
+ 		else
+-			*buf = rsnd_mod_read(mod, SSIRDR);
++			*buf = (rsnd_mod_read(mod, SSIRDR) >> shift);
+ 
+ 		rsnd_dai_pointer_update(io, sizeof(*buf));
+ 	}
+diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
+index cebea9b7f769..6a9be1df7851 100644
+--- a/sound/usb/line6/midi.c
++++ b/sound/usb/line6/midi.c
+@@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
+ 	}
+ 
+ 	usb_fill_int_urb(urb, line6->usbdev,
+-			 usb_sndbulkpipe(line6->usbdev,
++			 usb_sndintpipe(line6->usbdev,
+ 					 line6->properties->ep_ctrl_w),
+ 			 transfer_buffer, length, midi_sent, line6,
+ 			 line6->interval);
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 1f8fb0d904e0..f5cf23ffb35b 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -351,8 +351,11 @@ static struct usbmix_name_map bose_companion5_map[] = {
+ /*
+  * Dell usb dock with ALC4020 codec had a firmware problem where it got
+  * screwed up when zero volume is passed; just skip it as a workaround
++ *
++ * Also the extension unit gives an access error, so skip it as well.
+  */
+ static const struct usbmix_name_map dell_alc4020_map[] = {
++	{ 4, NULL },	/* extension unit */
+ 	{ 16, NULL },
+ 	{ 19, NULL },
+ 	{ 0 }
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index 09b9b74e4c1b..6b169043db1f 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -1023,6 +1023,10 @@ static struct syscall_fmt {
+ 	{ .name	    = "mlockall",   .errmsg = true,
+ 	  .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
+ 	{ .name	    = "mmap",	    .hexret = true,
++/* The standard mmap maps to old_mmap on s390x */
++#if defined(__s390x__)
++	.alias = "old_mmap",
++#endif
+ 	  .arg_scnprintf = { [0] = SCA_HEX,	  /* addr */
+ 			     [2] = SCA_MMAP_PROT, /* prot */
+ 			     [3] = SCA_MMAP_FLAGS, /* flags */
+diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
+index f671ec37a7c4..0a9362680aa4 100644
+--- a/tools/perf/tests/code-reading.c
++++ b/tools/perf/tests/code-reading.c
+@@ -140,6 +140,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
+ 	unsigned char buf2[BUFSZ];
+ 	size_t ret_len;
+ 	u64 objdump_addr;
++	const char *objdump_name;
++	char decomp_name[KMOD_DECOMP_LEN];
+ 	int ret;
+ 
+ 	pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
+@@ -200,9 +202,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
+ 		state->done[state->done_cnt++] = al.map->start;
+ 	}
+ 
++	objdump_name = al.map->dso->long_name;
++	if (dso__needs_decompress(al.map->dso)) {
++		if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
++						 decomp_name,
++						 sizeof(decomp_name)) < 0) {
++			pr_debug("decompression failed\n");
++			return -1;
++		}
++
++		objdump_name = decomp_name;
++	}
++
+ 	/* Read the object code using objdump */
+ 	objdump_addr = map__rip_2objdump(al.map, al.addr);
+-	ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
++	ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
++
++	if (dso__needs_decompress(al.map->dso))
++		unlink(objdump_name);
++
+ 	if (ret > 0) {
+ 		/*
+ 		 * The kernel maps are inaccurate - assume objdump is right in
+diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
+index 12ad79717d94..36822be05b07 100644
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -221,8 +221,8 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ 	if (machine__is_default_guest(machine))
+ 		return 0;
+ 
+-	snprintf(filename, sizeof(filename), "%s/proc/%d/maps",
+-		 machine->root_dir, pid);
++	snprintf(filename, sizeof(filename), "%s/proc/%d/task/%d/maps",
++		 machine->root_dir, pid, pid);
+ 
+ 	fp = fopen(filename, "r");
+ 	if (fp == NULL) {
+diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c
+index 52be201b9b25..90a986740684 100644
+--- a/tools/perf/util/ordered-events.c
++++ b/tools/perf/util/ordered-events.c
+@@ -79,7 +79,7 @@ static union perf_event *dup_event(struct ordered_events *oe,
+ 
+ static void free_dup_event(struct ordered_events *oe, union perf_event *event)
+ {
+-	if (oe->copy_on_queue) {
++	if (event && oe->copy_on_queue) {
+ 		oe->cur_alloc_size -= event->header.size;
+ 		free(event);
+ 	}
+@@ -150,6 +150,7 @@ void ordered_events__delete(struct ordered_events *oe, struct ordered_event *eve
+ 	list_move(&event->list, &oe->cache);
+ 	oe->nr_events--;
+ 	free_dup_event(oe, event->event);
++	event->event = NULL;
+ }
+ 
+ int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 83054ef6c1a1..f947f069449c 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -132,8 +132,14 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
+ 			if (perf_session__open(session) < 0)
+ 				goto out_close;
+ 
+-			perf_session__set_id_hdr_size(session);
+-			perf_session__set_comm_exec(session);
++			/*
++			 * set session attributes that are present in perf.data
++			 * but not in pipe-mode.
++			 */
++			if (!file->is_pipe) {
++				perf_session__set_id_hdr_size(session);
++				perf_session__set_comm_exec(session);
++			}
+ 		}
+ 	}
+ 
+@@ -146,7 +152,11 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
+ 			pr_warning("Cannot read kernel map\n");
+ 	}
+ 
+-	if (tool && tool->ordering_requires_timestamps &&
++	/*
++	 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
++	 * processed, so perf_evlist__sample_id_all is not meaningful here.
++	 */
++	if ((!file || !file->is_pipe) && tool && tool->ordering_requires_timestamps &&
+ 	    tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
+ 		dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
+ 		tool->ordered_events = false;
+@@ -1193,6 +1203,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session)
+ 	buf = malloc(cur_size);
+ 	if (!buf)
+ 		return -errno;
++	ordered_events__set_copy_on_queue(oe, true);
+ more:
+ 	event = buf;
+ 	err = readn(fd, event, sizeof(struct perf_event_header));
+diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
+index 2dcfe9a7c8d0..60edec383281 100644
+--- a/tools/perf/util/unwind-libdw.c
++++ b/tools/perf/util/unwind-libdw.c
+@@ -37,6 +37,14 @@ static int __report_module(struct addr_location *al, u64 ip,
+ 		return 0;
+ 
+ 	mod = dwfl_addrmodule(ui->dwfl, ip);
++	if (mod) {
++		Dwarf_Addr s;
++
++		dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
++		if (s != al->map->start)
++			mod = 0;
++	}
++
+ 	if (!mod)
+ 		mod = dwfl_report_elf(ui->dwfl, dso->short_name,
+ 				      dso->long_name, -1, al->map->start,
+diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+index 42d4c8caad81..de8dc82e2567 100644
+--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
++++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+@@ -45,12 +45,12 @@ int test_body(void)
+ 	printf("Check DSCR TM context switch: ");
+ 	fflush(stdout);
+ 	for (;;) {
+-		rv = 1;
+ 		asm __volatile__ (
+ 			/* set a known value into the DSCR */
+ 			"ld      3, %[dscr1];"
+ 			"mtspr   %[sprn_dscr], 3;"
+ 
++			"li      %[rv], 1;"
+ 			/* start and suspend a transaction */
+ 			TBEGIN
+ 			"beq     1f;"
+diff --git a/tools/testing/selftests/rcutorture/bin/configinit.sh b/tools/testing/selftests/rcutorture/bin/configinit.sh
+index 15f1a17ca96e..0b679d8382c7 100755
+--- a/tools/testing/selftests/rcutorture/bin/configinit.sh
++++ b/tools/testing/selftests/rcutorture/bin/configinit.sh
+@@ -51,7 +51,7 @@ then
+ 			mkdir $builddir
+ 		fi
+ 	else
+-		echo Bad build directory: \"$builddir\"
++		echo Bad build directory: \"$buildloc\"
+ 		exit 2
+ 	fi
+ fi
+diff --git a/tools/usb/usbip/src/usbipd.c b/tools/usb/usbip/src/usbipd.c
+index 2a7cd2b8d966..8c5b0faba229 100644
+--- a/tools/usb/usbip/src/usbipd.c
++++ b/tools/usb/usbip/src/usbipd.c
+@@ -451,7 +451,7 @@ static void set_signal(void)
+ 	sigaction(SIGTERM, &act, NULL);
+ 	sigaction(SIGINT, &act, NULL);
+ 	act.sa_handler = SIG_IGN;
+-	sigaction(SIGCLD, &act, NULL);
++	sigaction(SIGCHLD, &act, NULL);
+ }
+ 
+ static const char *pid_file;


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2018-01-23  9:37 Alice Ferrazzi
  0 siblings, 0 replies; 71+ messages in thread
From: Alice Ferrazzi @ 2018-01-23  9:37 UTC (permalink / raw
  To: gentoo-commits

commit:     e50c7e98641a337aac5975ac6fe0fe00036b9714
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Tue Jan 23 09:37:39 2018 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Tue Jan 23 09:37:39 2018 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e50c7e98

linux kernel 4.1.49

 0000_README             |    4 +
 1048_linux-4.1.49.patch | 8523 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8527 insertions(+)

diff --git a/0000_README b/0000_README
index 1b3166c..3abfafc 100644
--- a/0000_README
+++ b/0000_README
@@ -235,6 +235,10 @@ Patch:  1047_linux-4.1.48.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.48
 
+Patch:  1048_linux-4.1.49.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.49
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1048_linux-4.1.49.patch b/1048_linux-4.1.49.patch
new file mode 100644
index 0000000..702661c
--- /dev/null
+++ b/1048_linux-4.1.49.patch
@@ -0,0 +1,8523 @@
+diff --git a/Makefile b/Makefile
+index 97edf556bfe4..a51938e99e37 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 48
++SUBLEVEL = 49
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+@@ -612,6 +612,10 @@ include arch/$(SRCARCH)/Makefile
+ KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
+ KBUILD_CFLAGS	+= $(call cc-option,-fno-PIE)
+ KBUILD_AFLAGS	+= $(call cc-option,-fno-PIE)
++KBUILD_CFLAGS	+= $(call cc-disable-warning,frame-address,)
++KBUILD_CFLAGS	+= $(call cc-disable-warning, format-truncation)
++KBUILD_CFLAGS	+= $(call cc-disable-warning, format-overflow)
++KBUILD_CFLAGS	+= $(call cc-disable-warning, int-in-bool-context)
+ 
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+ KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
+diff --git a/arch/arm/include/asm/floppy.h b/arch/arm/include/asm/floppy.h
+index f4882553fbb0..85a34cc8316a 100644
+--- a/arch/arm/include/asm/floppy.h
++++ b/arch/arm/include/asm/floppy.h
+@@ -17,7 +17,7 @@
+ 
+ #define fd_outb(val,port)			\
+ 	do {					\
+-		if ((port) == FD_DOR)		\
++		if ((port) == (u32)FD_DOR)	\
+ 			fd_setdor((val));	\
+ 		else				\
+ 			outb((val),(port));	\
+diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
+index d995821f1698..aacd6668d1a0 100644
+--- a/arch/arm/include/asm/kvm_arm.h
++++ b/arch/arm/include/asm/kvm_arm.h
+@@ -209,6 +209,7 @@
+ #define HSR_EC_IABT_HYP	(0x21)
+ #define HSR_EC_DABT	(0x24)
+ #define HSR_EC_DABT_HYP	(0x25)
++#define HSR_EC_MAX	(0x3f)
+ 
+ #define HSR_WFI_IS_WFE		(1U << 0)
+ 
+diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
+index 95f12b2ccdcb..f36b5b1acd1f 100644
+--- a/arch/arm/kvm/handle_exit.c
++++ b/arch/arm/kvm/handle_exit.c
+@@ -100,7 +100,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 	return 1;
+ }
+ 
++static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
++{
++	u32 hsr = kvm_vcpu_get_hsr(vcpu);
++
++	kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
++		      hsr);
++
++	kvm_inject_undefined(vcpu);
++	return 1;
++}
++
+ static exit_handle_fn arm_exit_handlers[] = {
++	[0 ... HSR_EC_MAX]	= kvm_handle_unknown_ec,
+ 	[HSR_EC_WFI]		= kvm_handle_wfx,
+ 	[HSR_EC_CP15_32]	= kvm_handle_cp15_32,
+ 	[HSR_EC_CP15_64]	= kvm_handle_cp15_64,
+@@ -122,13 +134,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
+ {
+ 	u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+ 
+-	if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
+-	    !arm_exit_handlers[hsr_ec]) {
+-		kvm_err("Unknown exception class: hsr: %#08x\n",
+-			(unsigned int)kvm_vcpu_get_hsr(vcpu));
+-		BUG();
+-	}
+-
+ 	return arm_exit_handlers[hsr_ec];
+ }
+ 
+diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
+index 4be601b638d7..8129e5f9c94d 100644
+--- a/arch/arm/mach-omap1/dma.c
++++ b/arch/arm/mach-omap1/dma.c
+@@ -31,7 +31,6 @@
+ #include <mach/irqs.h>
+ 
+ #define OMAP1_DMA_BASE			(0xfffed800)
+-#define OMAP1_LOGICAL_DMA_CH_COUNT	17
+ 
+ static u32 enable_1510_mode;
+ 
+@@ -311,8 +310,6 @@ static int __init omap1_system_dma_init(void)
+ 		goto exit_iounmap;
+ 	}
+ 
+-	d->lch_count		= OMAP1_LOGICAL_DMA_CH_COUNT;
+-
+ 	/* Valid attributes for omap1 plus processors */
+ 	if (cpu_is_omap15xx())
+ 		d->dev_caps = ENABLE_1510_MODE;
+@@ -329,13 +326,14 @@ static int __init omap1_system_dma_init(void)
+ 	d->dev_caps		|= CLEAR_CSR_ON_READ;
+ 	d->dev_caps		|= IS_WORD_16;
+ 
+-	if (cpu_is_omap15xx())
+-		d->chan_count = 9;
+-	else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
+-		if (!(d->dev_caps & ENABLE_1510_MODE))
+-			d->chan_count = 16;
++	/* available logical channels */
++	if (cpu_is_omap15xx()) {
++		d->lch_count = 9;
++	} else {
++		if (d->dev_caps & ENABLE_1510_MODE)
++			d->lch_count = 9;
+ 		else
+-			d->chan_count = 9;
++			d->lch_count = 16;
+ 	}
+ 
+ 	p = dma_plat_info;
+diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
+index f899e77ff5e6..a9e7733f3c70 100644
+--- a/arch/arm/mach-omap2/gpmc-onenand.c
++++ b/arch/arm/mach-omap2/gpmc-onenand.c
+@@ -363,7 +363,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
+ 	return ret;
+ }
+ 
+-void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
++int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
+ {
+ 	int err;
+ 	struct device *dev = &gpmc_onenand_device.dev;
+@@ -389,15 +389,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
+ 	if (err < 0) {
+ 		dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
+ 			gpmc_onenand_data->cs, err);
+-		return;
++		return err;
+ 	}
+ 
+ 	gpmc_onenand_resource.end = gpmc_onenand_resource.start +
+ 							ONENAND_IO_SIZE - 1;
+ 
+-	if (platform_device_register(&gpmc_onenand_device) < 0) {
++	err = platform_device_register(&gpmc_onenand_device);
++	if (err) {
+ 		dev_err(dev, "Unable to register OneNAND device\n");
+ 		gpmc_cs_free(gpmc_onenand_data->cs);
+-		return;
+ 	}
++
++	return err;
+ }
+diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
+index 2cb587b50905..7ae4f94db1e8 100644
+--- a/arch/arm/mach-ux500/pm.c
++++ b/arch/arm/mach-ux500/pm.c
+@@ -132,8 +132,8 @@ bool prcmu_pending_irq(void)
+  */
+ bool prcmu_is_cpu_in_wfi(int cpu)
+ {
+-	return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
+-		     PRCM_ARM_WFI_STANDBY_WFI0;
++	return readl(PRCM_ARM_WFI_STANDBY) &
++		(cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
+ }
+ 
+ /*
+diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
+index ac6fafb95fe7..4b695bfbb7fd 100644
+--- a/arch/arm64/include/asm/kvm_arm.h
++++ b/arch/arm64/include/asm/kvm_arm.h
+@@ -162,8 +162,7 @@
+ #define VTTBR_X		(37 - VTCR_EL2_T0SZ_40B)
+ #endif
+ 
+-#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
+-#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
++#define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
+ #define VTTBR_VMID_SHIFT  (UL(48))
+ #define VTTBR_VMID_MASK	  (UL(0xFF) << VTTBR_VMID_SHIFT)
+ 
+diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
+index 4849baa914d8..8f65f969f51c 100644
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -21,6 +21,7 @@
+ /*
+  * User space memory access functions
+  */
++#include <linux/bitops.h>
+ #include <linux/string.h>
+ #include <linux/thread_info.h>
+ 
+@@ -100,6 +101,13 @@ static inline void set_fs(mm_segment_t fs)
+ 	flag;								\
+ })
+ 
++/*
++ * When dealing with data aborts, watchpoints, or instruction traps we may end
++ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
++ * pass on to access_ok(), for instance.
++ */
++#define untagged_addr(addr)		sign_extend64(addr, 55)
++
+ #define access_ok(type, addr, size)	__range_ok(addr, size)
+ #define user_addr_max			get_fs
+ 
+diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
+index e7d934d3afe0..30f92321c00f 100644
+--- a/arch/arm64/kernel/hw_breakpoint.c
++++ b/arch/arm64/kernel/hw_breakpoint.c
+@@ -35,6 +35,7 @@
+ #include <asm/traps.h>
+ #include <asm/cputype.h>
+ #include <asm/system_misc.h>
++#include <asm/uaccess.h>
+ 
+ /* Breakpoint currently in use for each BRP. */
+ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
+@@ -688,7 +689,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
+ 
+ 		/* Check if the watchpoint value matches. */
+ 		val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
+-		if (val != (addr & ~alignment_mask))
++		if (val != (untagged_addr(addr) & ~alignment_mask))
+ 			goto unlock;
+ 
+ 		/* Possible match, check the byte address select to confirm. */
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index c6b1f3b96f45..a838d628cff7 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -258,6 +258,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
+ 
+ 	memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
+ 
++	/*
++	 * In case p was allocated the same task_struct pointer as some
++	 * other recently-exited task, make sure p is disassociated from
++	 * any cpu that may have run that now-exited task recently.
++	 * Otherwise we could erroneously skip reloading the FPSIMD
++	 * registers for p.
++	 */
++	fpsimd_flush_task_state(p);
++
+ 	if (likely(!(p->flags & PF_KTHREAD))) {
+ 		*childregs = *current_pt_regs();
+ 		childregs->regs[0] = 0;
+diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
+index af76634f8d98..934573cc1134 100644
+--- a/arch/blackfin/Kconfig
++++ b/arch/blackfin/Kconfig
+@@ -318,11 +318,14 @@ config BF53x
+ 
+ config GPIO_ADI
+ 	def_bool y
++	depends on !PINCTRL
+ 	depends on (BF51x || BF52x || BF53x || BF538 || BF539 || BF561)
+ 
+-config PINCTRL
++config PINCTRL_BLACKFIN_ADI2
+ 	def_bool y
+-	depends on BF54x || BF60x
++	depends on (BF54x || BF60x)
++	select PINCTRL
++	select PINCTRL_ADI2
+ 
+ config MEM_MT48LC64M4A2FB_7E
+ 	bool
+diff --git a/arch/blackfin/Kconfig.debug b/arch/blackfin/Kconfig.debug
+index f3337ee03621..a93cf06a4d6f 100644
+--- a/arch/blackfin/Kconfig.debug
++++ b/arch/blackfin/Kconfig.debug
+@@ -17,6 +17,7 @@ config DEBUG_VERBOSE
+ 
+ config DEBUG_MMRS
+ 	tristate "Generate Blackfin MMR tree"
++	depends on !PINCTRL
+ 	select DEBUG_FS
+ 	help
+ 	  Create a tree of Blackfin MMRs via the debugfs tree.  If
+diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
+index 3c494e84444d..a511ac16a8e3 100644
+--- a/arch/c6x/kernel/ptrace.c
++++ b/arch/c6x/kernel/ptrace.c
+@@ -69,46 +69,6 @@ static int gpr_get(struct task_struct *target,
+ 				   0, sizeof(*regs));
+ }
+ 
+-static int gpr_set(struct task_struct *target,
+-		   const struct user_regset *regset,
+-		   unsigned int pos, unsigned int count,
+-		   const void *kbuf, const void __user *ubuf)
+-{
+-	int ret;
+-	struct pt_regs *regs = task_pt_regs(target);
+-
+-	/* Don't copyin TSR or CSR */
+-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+-				 &regs,
+-				 0, PT_TSR * sizeof(long));
+-	if (ret)
+-		return ret;
+-
+-	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+-					PT_TSR * sizeof(long),
+-					(PT_TSR + 1) * sizeof(long));
+-	if (ret)
+-		return ret;
+-
+-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+-				 &regs,
+-				 (PT_TSR + 1) * sizeof(long),
+-				 PT_CSR * sizeof(long));
+-	if (ret)
+-		return ret;
+-
+-	ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+-					PT_CSR * sizeof(long),
+-					(PT_CSR + 1) * sizeof(long));
+-	if (ret)
+-		return ret;
+-
+-	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+-				 &regs,
+-				 (PT_CSR + 1) * sizeof(long), -1);
+-	return ret;
+-}
+-
+ enum c6x_regset {
+ 	REGSET_GPR,
+ };
+@@ -120,7 +80,6 @@ static const struct user_regset c6x_regsets[] = {
+ 		.size = sizeof(u32),
+ 		.align = sizeof(u32),
+ 		.get = gpr_get,
+-		.set = gpr_set
+ 	},
+ };
+ 
+diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
+index 7563628822bd..5e2dc7defd2c 100644
+--- a/arch/metag/kernel/ptrace.c
++++ b/arch/metag/kernel/ptrace.c
+@@ -24,6 +24,16 @@
+  * user_regset definitions.
+  */
+ 
++static unsigned long user_txstatus(const struct pt_regs *regs)
++{
++	unsigned long data = (unsigned long)regs->ctx.Flags;
++
++	if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
++		data |= USER_GP_REGS_STATUS_CATCH_BIT;
++
++	return data;
++}
++
+ int metag_gp_regs_copyout(const struct pt_regs *regs,
+ 			  unsigned int pos, unsigned int count,
+ 			  void *kbuf, void __user *ubuf)
+@@ -62,9 +72,7 @@ int metag_gp_regs_copyout(const struct pt_regs *regs,
+ 	if (ret)
+ 		goto out;
+ 	/* TXSTATUS */
+-	data = (unsigned long)regs->ctx.Flags;
+-	if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
+-		data |= USER_GP_REGS_STATUS_CATCH_BIT;
++	data = user_txstatus(regs);
+ 	ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ 				  &data, 4*25, 4*26);
+ 	if (ret)
+@@ -119,6 +127,7 @@ int metag_gp_regs_copyin(struct pt_regs *regs,
+ 	if (ret)
+ 		goto out;
+ 	/* TXSTATUS */
++	data = user_txstatus(regs);
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				 &data, 4*25, 4*26);
+ 	if (ret)
+@@ -244,6 +253,8 @@ int metag_rp_state_copyin(struct pt_regs *regs,
+ 	unsigned long long *ptr;
+ 	int ret, i;
+ 
++	if (count < 4*13)
++		return -EINVAL;
+ 	/* Read the entire pipeline before making any changes */
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				 &rp, 0, 4*13);
+@@ -303,7 +314,7 @@ static int metag_tls_set(struct task_struct *target,
+ 			const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	void __user *tls;
++	void __user *tls = target->thread.tls_ptr;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ 	if (ret)
+diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
+index 48e16d98b2cc..6ecc67f3736b 100644
+--- a/arch/mips/configs/ip27_defconfig
++++ b/arch/mips/configs/ip27_defconfig
+@@ -206,7 +206,6 @@ CONFIG_MLX4_EN=m
+ # CONFIG_MLX4_DEBUG is not set
+ CONFIG_TEHUTI=m
+ CONFIG_BNX2X=m
+-CONFIG_QLGE=m
+ CONFIG_SFC=m
+ CONFIG_BE2NET=m
+ CONFIG_LIBERTAS_THINFIRM=m
+diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
+index 8c6f508e59de..554d1da97743 100644
+--- a/arch/mips/dec/int-handler.S
++++ b/arch/mips/dec/int-handler.S
+@@ -146,7 +146,25 @@
+ 		/*
+ 		 * Find irq with highest priority
+ 		 */
+-		 PTR_LA	t1,cpu_mask_nr_tbl
++		# open coded PTR_LA t1, cpu_mask_nr_tbl
++#if (_MIPS_SZPTR == 32)
++		# open coded la t1, cpu_mask_nr_tbl
++		lui	t1, %hi(cpu_mask_nr_tbl)
++		addiu	t1, %lo(cpu_mask_nr_tbl)
++
++#endif
++#if (_MIPS_SZPTR == 64)
++		# open coded dla t1, cpu_mask_nr_tbl
++		.set	push
++		.set	noat
++		lui	t1, %highest(cpu_mask_nr_tbl)
++		lui	AT, %hi(cpu_mask_nr_tbl)
++		daddiu	t1, t1, %higher(cpu_mask_nr_tbl)
++		daddiu	AT, AT, %lo(cpu_mask_nr_tbl)
++		dsll	t1, 32
++		daddu	t1, t1, AT
++		.set	pop
++#endif
+ 1:		lw	t2,(t1)
+ 		nop
+ 		and	t2,t0
+@@ -195,7 +213,25 @@
+ 		/*
+ 		 * Find irq with highest priority
+ 		 */
+-		 PTR_LA	t1,asic_mask_nr_tbl
++		# open coded PTR_LA t1,asic_mask_nr_tbl
++#if (_MIPS_SZPTR == 32)
++		# open coded la t1, asic_mask_nr_tbl
++		lui	t1, %hi(asic_mask_nr_tbl)
++		addiu	t1, %lo(asic_mask_nr_tbl)
++
++#endif
++#if (_MIPS_SZPTR == 64)
++		# open coded dla t1, asic_mask_nr_tbl
++		.set	push
++		.set	noat
++		lui	t1, %highest(asic_mask_nr_tbl)
++		lui	AT, %hi(asic_mask_nr_tbl)
++		daddiu	t1, t1, %higher(asic_mask_nr_tbl)
++		daddiu	AT, AT, %lo(asic_mask_nr_tbl)
++		dsll	t1, 32
++		daddu	t1, t1, AT
++		.set	pop
++#endif
+ 2:		lw	t2,(t1)
+ 		nop
+ 		and	t2,t0
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 81a13fd56d13..2bea5db01b0b 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -484,7 +484,8 @@ static int fpr_set(struct task_struct *target,
+ 					  &target->thread.fpu,
+ 					  0, sizeof(elf_fpregset_t));
+ 
+-	for (i = 0; i < NUM_FPU_REGS; i++) {
++	BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
++	for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
+ 		err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 					 &fpr_val, i * sizeof(elf_fpreg_t),
+ 					 (i + 1) * sizeof(elf_fpreg_t));
+diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
+index d01ade63492f..7e2279ac6c0d 100644
+--- a/arch/mips/lantiq/irq.c
++++ b/arch/mips/lantiq/irq.c
+@@ -269,6 +269,11 @@ static void ltq_hw5_irqdispatch(void)
+ DEFINE_HWx_IRQDISPATCH(5)
+ #endif
+ 
++static void ltq_hw_irq_handler(struct irq_desc *desc)
++{
++	ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
++}
++
+ #ifdef CONFIG_MIPS_MT_SMP
+ void __init arch_init_ipiirq(int irq, struct irqaction *action)
+ {
+@@ -313,23 +318,19 @@ static struct irqaction irq_call = {
+ asmlinkage void plat_irq_dispatch(void)
+ {
+ 	unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
+-	unsigned int i;
+-
+-	if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) {
+-		do_IRQ(MIPS_CPU_TIMER_IRQ);
+-		goto out;
+-	} else {
+-		for (i = 0; i < MAX_IM; i++) {
+-			if (pending & (CAUSEF_IP2 << i)) {
+-				ltq_hw_irqdispatch(i);
+-				goto out;
+-			}
+-		}
++	int irq;
++
++	if (!pending) {
++		spurious_interrupt();
++		return;
+ 	}
+-	pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());
+ 
+-out:
+-	return;
++	pending >>= CAUSEB_IP;
++	while (pending) {
++		irq = fls(pending) - 1;
++		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
++		pending &= ~BIT(irq);
++	}
+ }
+ 
+ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+@@ -354,11 +355,6 @@ static const struct irq_domain_ops irq_domain_ops = {
+ 	.map = icu_map,
+ };
+ 
+-static struct irqaction cascade = {
+-	.handler = no_action,
+-	.name = "cascade",
+-};
+-
+ int __init icu_of_init(struct device_node *node, struct device_node *parent)
+ {
+ 	struct device_node *eiu_node;
+@@ -390,7 +386,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
+ 	mips_cpu_irq_init();
+ 
+ 	for (i = 0; i < MAX_IM; i++)
+-		setup_irq(i + 2, &cascade);
++		irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
+ 
+ 	if (cpu_has_vint) {
+ 		pr_info("Setting up vectored interrupts\n");
+diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
+index dc7c5a5214a9..203e4661bc81 100644
+--- a/arch/mips/mm/sc-ip22.c
++++ b/arch/mips/mm/sc-ip22.c
+@@ -31,26 +31,29 @@ static inline void indy_sc_wipe(unsigned long first, unsigned long last)
+ 	unsigned long tmp;
+ 
+ 	__asm__ __volatile__(
+-	".set\tpush\t\t\t# indy_sc_wipe\n\t"
+-	".set\tnoreorder\n\t"
+-	".set\tmips3\n\t"
+-	".set\tnoat\n\t"
+-	"mfc0\t%2, $12\n\t"
+-	"li\t$1, 0x80\t\t\t# Go 64 bit\n\t"
+-	"mtc0\t$1, $12\n\t"
+-
+-	"dli\t$1, 0x9000000080000000\n\t"
+-	"or\t%0, $1\t\t\t# first line to flush\n\t"
+-	"or\t%1, $1\t\t\t# last line to flush\n\t"
+-	".set\tat\n\t"
+-
+-	"1:\tsw\t$0, 0(%0)\n\t"
+-	"bne\t%0, %1, 1b\n\t"
+-	" daddu\t%0, 32\n\t"
+-
+-	"mtc0\t%2, $12\t\t\t# Back to 32 bit\n\t"
+-	"nop; nop; nop; nop;\n\t"
+-	".set\tpop"
++	"	.set	push			# indy_sc_wipe		\n"
++	"	.set	noreorder					\n"
++	"	.set	mips3						\n"
++	"	.set	noat						\n"
++	"	mfc0	%2, $12						\n"
++	"	li	$1, 0x80		# Go 64 bit		\n"
++	"	mtc0	$1, $12						\n"
++	"								\n"
++	"	dli	$1, 0x9000000080000000				\n"
++	"	or	%0, $1			# first line to flush	\n"
++	"	or	%1, $1			# last line to flush	\n"
++	"	.set	at						\n"
++	"								\n"
++	"1:	sw	$0, 0(%0)					\n"
++	"	bne	%0, %1, 1b					\n"
++	"	 daddu	%0, 32						\n"
++	"								\n"
++	"	mtc0	%2, $12			# Back to 32 bit	\n"
++	"	nop				# pipeline hazard	\n"
++	"	nop							\n"
++	"	nop							\n"
++	"	nop							\n"
++	"	.set	pop						\n"
+ 	: "=r" (first), "=r" (last), "=&r" (tmp)
+ 	: "0" (first), "1" (last));
+ }
+diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
+index 09419f67da39..bdfc253958f4 100644
+--- a/arch/mips/ralink/prom.c
++++ b/arch/mips/ralink/prom.c
+@@ -25,8 +25,10 @@ const char *get_system_type(void)
+ 	return soc_info.sys_type;
+ }
+ 
+-static __init void prom_init_cmdline(int argc, char **argv)
++static __init void prom_init_cmdline(void)
+ {
++	int argc;
++	char **argv;
+ 	int i;
+ 
+ 	pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
+@@ -55,14 +57,11 @@ static __init void prom_init_cmdline(int argc, char **argv)
+ 
+ void __init prom_init(void)
+ {
+-	int argc;
+-	char **argv;
+-
+ 	prom_soc_init(&soc_info);
+ 
+ 	pr_info("SoC Type: %s\n", get_system_type());
+ 
+-	prom_init_cmdline(argc, argv);
++	prom_init_cmdline();
+ }
+ 
+ void __init prom_free_prom_memory(void)
+diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
+index 738cec865f41..1bd7d704b01b 100644
+--- a/arch/mips/ralink/rt288x.c
++++ b/arch/mips/ralink/rt288x.c
+@@ -40,16 +40,6 @@ static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
+ 	{ 0 }
+ };
+ 
+-static void rt288x_wdt_reset(void)
+-{
+-	u32 t;
+-
+-	/* enable WDT reset output on pin SRAM_CS_N */
+-	t = rt_sysc_r32(SYSC_REG_CLKCFG);
+-	t |= CLKCFG_SRAM_CS_N_WDT;
+-	rt_sysc_w32(t, SYSC_REG_CLKCFG);
+-}
+-
+ void __init ralink_clk_init(void)
+ {
+ 	unsigned long cpu_rate, wmac_rate = 40000000;
+diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
+index c40776ab67db..28bf6fbe8b04 100644
+--- a/arch/mips/ralink/rt305x.c
++++ b/arch/mips/ralink/rt305x.c
+@@ -91,17 +91,6 @@ static struct rt2880_pmx_group rt5350_pinmux_data[] = {
+ 	{ 0 }
+ };
+ 
+-static void rt305x_wdt_reset(void)
+-{
+-	u32 t;
+-
+-	/* enable WDT reset output on pin SRAM_CS_N */
+-	t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+-	t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT <<
+-		RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT;
+-	rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
+-}
+-
+ static unsigned long rt5350_get_mem_size(void)
+ {
+ 	void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
+index 5bda39fea483..d7998819954a 100644
+--- a/arch/mips/ralink/rt3883.c
++++ b/arch/mips/ralink/rt3883.c
+@@ -63,16 +63,6 @@ static struct rt2880_pmx_group rt3883_pinmux_data[] = {
+ 	{ 0 }
+ };
+ 
+-static void rt3883_wdt_reset(void)
+-{
+-	u32 t;
+-
+-	/* enable WDT reset output on GPIO 2 */
+-	t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1);
+-	t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT;
+-	rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
+-}
+-
+ void __init ralink_clk_init(void)
+ {
+ 	unsigned long cpu_rate, sys_rate;
+diff --git a/arch/mips/sgi-ip22/Platform b/arch/mips/sgi-ip22/Platform
+index b7a4b7e04c38..e8f6b3a42a48 100644
+--- a/arch/mips/sgi-ip22/Platform
++++ b/arch/mips/sgi-ip22/Platform
+@@ -25,7 +25,7 @@ endif
+ # Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys
+ #
+ ifdef CONFIG_SGI_IP28
+-  ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n)
++  ifeq ($(call cc-option-yn,-march=r10000 -mr10k-cache-barrier=store), n)
+       $(error gcc doesn't support needed option -mr10k-cache-barrier=store)
+   endif
+ endif
+diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
+index 5cc6b4f1b795..1a836afb636d 100644
+--- a/arch/openrisc/include/asm/uaccess.h
++++ b/arch/openrisc/include/asm/uaccess.h
+@@ -215,7 +215,7 @@ do {									\
+ 	case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;		\
+ 	case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;		\
+ 	case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;		\
+-	case 8: __get_user_asm2(x, ptr, retval);			\
++	case 8: __get_user_asm2(x, ptr, retval); break;			\
+ 	default: (x) = __get_user_bad();				\
+ 	}								\
+ } while (0)
+diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
+index 5cc2e7af3a7b..b379146de55b 100644
+--- a/arch/powerpc/kvm/emulate.c
++++ b/arch/powerpc/kvm/emulate.c
+@@ -302,7 +302,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ 			advance = 0;
+ 			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
+ 			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
+-			kvmppc_core_queue_program(vcpu, 0);
+ 		}
+ 	}
+ 
+diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
+index ec2eb20631d1..14ac699d30aa 100644
+--- a/arch/powerpc/perf/hv-24x7.c
++++ b/arch/powerpc/perf/hv-24x7.c
+@@ -503,7 +503,7 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
+ {
+ 	if (s1 < s2)
+ 		return 1;
+-	if (s2 > s1)
++	if (s1 > s2)
+ 		return -1;
+ 
+ 	return memcmp(d1, d2, s1);
+diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
+index 693b6cdac691..3fcaa9ef447b 100644
+--- a/arch/powerpc/platforms/powernv/opal-async.c
++++ b/arch/powerpc/platforms/powernv/opal-async.c
+@@ -39,18 +39,18 @@ int __opal_async_get_token(void)
+ 	int token;
+ 
+ 	spin_lock_irqsave(&opal_async_comp_lock, flags);
+-	token = find_first_bit(opal_async_complete_map, opal_max_async_tokens);
++	token = find_first_zero_bit(opal_async_token_map, opal_max_async_tokens);
+ 	if (token >= opal_max_async_tokens) {
+ 		token = -EBUSY;
+ 		goto out;
+ 	}
+ 
+-	if (__test_and_set_bit(token, opal_async_token_map)) {
++	if (!__test_and_clear_bit(token, opal_async_complete_map)) {
+ 		token = -EBUSY;
+ 		goto out;
+ 	}
+ 
+-	__clear_bit(token, opal_async_complete_map);
++	__set_bit(token, opal_async_token_map);
+ 
+ out:
+ 	spin_unlock_irqrestore(&opal_async_comp_lock, flags);
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 16fdcb23f4c3..20974478f8d0 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -469,7 +469,7 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu)
+ {
+ 	unsigned long ret_freq;
+ 
+-	ret_freq = cpufreq_quick_get(cpu) * 1000ul;
++	ret_freq = cpufreq_get(cpu) * 1000ul;
+ 
+ 	/*
+ 	 * If the backend cpufreq driver does not exist,
+diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
+index ee90db17b097..1629dd5aaacb 100644
+--- a/arch/powerpc/sysdev/axonram.c
++++ b/arch/powerpc/sysdev/axonram.c
+@@ -274,7 +274,9 @@ failed:
+ 			if (bank->disk->major > 0)
+ 				unregister_blkdev(bank->disk->major,
+ 						bank->disk->disk_name);
+-			del_gendisk(bank->disk);
++			if (bank->disk->flags & GENHD_FL_UP)
++				del_gendisk(bank->disk);
++			put_disk(bank->disk);
+ 		}
+ 		device->dev.platform_data = NULL;
+ 		if (bank->io_addr != 0)
+@@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
+ 	device_remove_file(&device->dev, &dev_attr_ecc);
+ 	free_irq(bank->irq_id, device);
+ 	del_gendisk(bank->disk);
++	put_disk(bank->disk);
+ 	iounmap((void __iomem *) bank->io_addr);
+ 	kfree(bank);
+ 
+diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
+index b28733727ed3..2e41a73ad80a 100644
+--- a/arch/powerpc/sysdev/ipic.c
++++ b/arch/powerpc/sysdev/ipic.c
+@@ -843,12 +843,12 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
+ 
+ u32 ipic_get_mcp_status(void)
+ {
+-	return ipic_read(primary_ipic->regs, IPIC_SERMR);
++	return ipic_read(primary_ipic->regs, IPIC_SERSR);
+ }
+ 
+ void ipic_clear_mcp_status(u32 mask)
+ {
+-	ipic_write(primary_ipic->regs, IPIC_SERMR, mask);
++	ipic_write(primary_ipic->regs, IPIC_SERSR, mask);
+ }
+ 
+ /* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
+diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
+index 649eb62c52b3..9e02cb7955c1 100644
+--- a/arch/s390/include/asm/pci_insn.h
++++ b/arch/s390/include/asm/pci_insn.h
+@@ -81,6 +81,6 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
+ int zpci_load(u64 *data, u64 req, u64 offset);
+ int zpci_store(u64 data, u64 req, u64 offset);
+ int zpci_store_block(const u64 *data, u64 req, u64 offset);
+-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
++int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+ 
+ #endif
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 1942f22e6694..208511437a28 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -801,10 +801,10 @@ static void __init setup_randomness(void)
+ {
+ 	struct sysinfo_3_2_2 *vmms;
+ 
+-	vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
+-	if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
+-		add_device_randomness(&vmms, vmms->count);
+-	free_page((unsigned long) vmms);
++	vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
++	if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
++		add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
++	memblock_free((unsigned long) vmms, PAGE_SIZE);
+ }
+ 
+ /*
+diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
+index 50a79a5fc116..666baac18ff0 100644
+--- a/arch/s390/pci/pci.c
++++ b/arch/s390/pci/pci.c
+@@ -364,7 +364,8 @@ static void zpci_irq_handler(struct airq_struct *airq)
+ 				/* End of second scan with interrupts on. */
+ 				break;
+ 			/* First scan complete, reenable interrupts. */
+-			zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
++			if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC))
++				break;
+ 			si = 0;
+ 			continue;
+ 		}
+@@ -922,7 +923,7 @@ static int __init pci_base_init(void)
+ 	if (!s390_pci_probe)
+ 		return 0;
+ 
+-	if (!test_facility(69) || !test_facility(71) || !test_facility(72))
++	if (!test_facility(69) || !test_facility(71))
+ 		return 0;
+ 
+ 	rc = zpci_debug_init();
+diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
+index 6fd8d5836138..888cc878efaa 100644
+--- a/arch/s390/pci/pci_dma.c
++++ b/arch/s390/pci/pci_dma.c
+@@ -432,7 +432,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
+ 	zdev->dma_table = dma_alloc_cpu_table();
+ 	if (!zdev->dma_table) {
+ 		rc = -ENOMEM;
+-		goto out_clean;
++		goto out;
+ 	}
+ 
+ 	zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
+@@ -440,7 +440,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
+ 	zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
+ 	if (!zdev->iommu_bitmap) {
+ 		rc = -ENOMEM;
+-		goto out_reg;
++		goto free_dma_table;
+ 	}
+ 
+ 	rc = zpci_register_ioat(zdev,
+@@ -449,12 +449,16 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
+ 				zdev->start_dma + zdev->iommu_size - 1,
+ 				(u64) zdev->dma_table);
+ 	if (rc)
+-		goto out_reg;
+-	return 0;
++		goto free_bitmap;
+ 
+-out_reg:
++	return 0;
++free_bitmap:
++	vfree(zdev->iommu_bitmap);
++	zdev->iommu_bitmap = NULL;
++free_dma_table:
+ 	dma_free_cpu_table(zdev->dma_table);
+-out_clean:
++	zdev->dma_table = NULL;
++out:
+ 	return rc;
+ }
+ 
+diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
+index 85267c058af8..abccf44c1e65 100644
+--- a/arch/s390/pci/pci_insn.c
++++ b/arch/s390/pci/pci_insn.c
+@@ -7,6 +7,7 @@
+ #include <linux/export.h>
+ #include <linux/errno.h>
+ #include <linux/delay.h>
++#include <asm/facility.h>
+ #include <asm/pci_insn.h>
+ #include <asm/processor.h>
+ 
+@@ -78,11 +79,14 @@ int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
+ }
+ 
+ /* Set Interruption Controls */
+-void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
++int zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+ {
++	if (!test_facility(72))
++		return -EIO;
+ 	asm volatile (
+ 		"	.insn	rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
+ 		: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [u] "Q" (*unused));
++	return 0;
+ }
+ 
+ /* PCI Load */
+diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
+index 9ddc4928a089..c1566170964f 100644
+--- a/arch/sparc/kernel/ptrace_64.c
++++ b/arch/sparc/kernel/ptrace_64.c
+@@ -311,7 +311,7 @@ static int genregs64_set(struct task_struct *target,
+ 	}
+ 
+ 	if (!ret) {
+-		unsigned long y;
++		unsigned long y = regs->y;
+ 
+ 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 					 &y,
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 214da575fdc5..49646684b42c 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -2285,9 +2285,16 @@ void __init mem_init(void)
+ {
+ 	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
+ 
+-	register_page_bootmem_info();
+ 	free_all_bootmem();
+ 
++	/*
++	 * Must be done after boot memory is put on freelist, because here we
++	 * might set fields in deferred struct pages that have not yet been
++	 * initialized, and free_all_bootmem() initializes all the reserved
++	 * deferred pages for us.
++	 */
++	register_page_bootmem_info();
++
+ 	/*
+ 	 * Set up the zero page, mark it reserved, so that page count
+ 	 * is not manipulated when freeing the page from user ptes.
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index daf8d2b9a217..b7ef6e202bb7 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req)
+ 	}
+ }
+ 
++static int ghash_async_import(struct ahash_request *req, const void *in)
++{
++	struct ahash_request *cryptd_req = ahash_request_ctx(req);
++	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
++	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
++
++	ghash_async_init(req);
++	memcpy(dctx, in, sizeof(*dctx));
++	return 0;
++
++}
++
++static int ghash_async_export(struct ahash_request *req, void *out)
++{
++	struct ahash_request *cryptd_req = ahash_request_ctx(req);
++	struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
++	struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
++
++	memcpy(out, dctx, sizeof(*dctx));
++	return 0;
++
++}
++
+ static int ghash_async_digest(struct ahash_request *req)
+ {
+ 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+@@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = {
+ 	.final		= ghash_async_final,
+ 	.setkey		= ghash_async_setkey,
+ 	.digest		= ghash_async_digest,
++	.export		= ghash_async_export,
++	.import		= ghash_async_import,
+ 	.halg = {
+ 		.digestsize	= GHASH_DIGEST_SIZE,
++		.statesize = sizeof(struct ghash_desc_ctx),
+ 		.base = {
+ 			.cra_name		= "ghash",
+ 			.cra_driver_name	= "ghash-clmulni",
+diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
+index 399a29d067d6..cb91a64a99e7 100644
+--- a/arch/x86/crypto/salsa20_glue.c
++++ b/arch/x86/crypto/salsa20_glue.c
+@@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc,
+ 
+ 	salsa20_ivsetup(ctx, walk.iv);
+ 
+-	if (likely(walk.nbytes == nbytes))
+-	{
+-		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
+-				      walk.dst.virt.addr, nbytes);
+-		return blkcipher_walk_done(desc, &walk, 0);
+-	}
+-
+ 	while (walk.nbytes >= 64) {
+ 		salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
+ 				      walk.dst.virt.addr,
+diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
+index 5f8f0b3cc674..2c0b0b645a74 100644
+--- a/arch/x86/kernel/kprobes/ftrace.c
++++ b/arch/x86/kernel/kprobes/ftrace.c
+@@ -26,7 +26,7 @@
+ #include "common.h"
+ 
+ static nokprobe_inline
+-int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
++void __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ 		      struct kprobe_ctlblk *kcb, unsigned long orig_ip)
+ {
+ 	/*
+@@ -41,20 +41,21 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ 	__this_cpu_write(current_kprobe, NULL);
+ 	if (orig_ip)
+ 		regs->ip = orig_ip;
+-	return 1;
+ }
+ 
+ int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
+ 		    struct kprobe_ctlblk *kcb)
+ {
+-	if (kprobe_ftrace(p))
+-		return __skip_singlestep(p, regs, kcb, 0);
+-	else
+-		return 0;
++	if (kprobe_ftrace(p)) {
++		__skip_singlestep(p, regs, kcb, 0);
++		preempt_enable_no_resched();
++		return 1;
++	}
++	return 0;
+ }
+ NOKPROBE_SYMBOL(skip_singlestep);
+ 
+-/* Ftrace callback handler for kprobes */
++/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ 			   struct ftrace_ops *ops, struct pt_regs *regs)
+ {
+@@ -77,13 +78,17 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ 		/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
+ 		regs->ip = ip + sizeof(kprobe_opcode_t);
+ 
++		/* To emulate trap based kprobes, preempt_disable here */
++		preempt_disable();
+ 		__this_cpu_write(current_kprobe, p);
+ 		kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+-		if (!p->pre_handler || !p->pre_handler(p, regs))
++		if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ 			__skip_singlestep(p, regs, kcb, orig_ip);
++			preempt_enable_no_resched();
++		}
+ 		/*
+ 		 * If pre_handler returns !0, it sets regs->ip and
+-		 * resets current kprobe.
++		 * resets current kprobe, and keep preempt count +1.
+ 		 */
+ 	}
+ end:
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 665b10a55b30..84f2825f19b5 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3354,7 +3354,7 @@ static void fix_rmode_seg(int seg, struct kvm_segment *save)
+ 	}
+ 
+ 	vmcs_write16(sf->selector, var.selector);
+-	vmcs_write32(sf->base, var.base);
++	vmcs_writel(sf->base, var.base);
+ 	vmcs_write32(sf->limit, var.limit);
+ 	vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
+ }
+@@ -6075,12 +6075,7 @@ static __init int hardware_setup(void)
+ 	memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
+ 	memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
+ 
+-	/*
+-	 * Allow direct access to the PC debug port (it is often used for I/O
+-	 * delays, but the vmexits simply slow things down).
+-	 */
+ 	memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
+-	clear_bit(0x80, vmx_io_bitmap_a);
+ 
+ 	memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
+ 
+@@ -10140,8 +10135,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+  */
+ static void vmx_leave_nested(struct kvm_vcpu *vcpu)
+ {
+-	if (is_guest_mode(vcpu))
++	if (is_guest_mode(vcpu)) {
++		to_vmx(vcpu)->nested.nested_run_pending = 0;
+ 		nested_vmx_vmexit(vcpu, -1, 0, 0);
++	}
+ 	free_nested(to_vmx(vcpu));
+ }
+ 
+diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
+index bb461cfd01ab..526536c81ddc 100644
+--- a/arch/x86/pci/broadcom_bus.c
++++ b/arch/x86/pci/broadcom_bus.c
+@@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
+ 	 * We should get host bridge information from ACPI unless the BIOS
+ 	 * doesn't support it.
+ 	 */
+-	if (acpi_os_get_root_pointer())
++	if (!acpi_disabled && acpi_os_get_root_pointer())
+ 		return 0;
+ #endif
+ 
+diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
+index 55b6f15dac90..3356fd91bc61 100644
+--- a/block/scsi_ioctl.c
++++ b/block/scsi_ioctl.c
+@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
+ 	__set_bit(WRITE_16, filter->write_ok);
+ 	__set_bit(WRITE_LONG, filter->write_ok);
+ 	__set_bit(WRITE_LONG_2, filter->write_ok);
++	__set_bit(WRITE_SAME, filter->write_ok);
++	__set_bit(WRITE_SAME_16, filter->write_ok);
++	__set_bit(WRITE_SAME_32, filter->write_ok);
+ 	__set_bit(ERASE, filter->write_ok);
+ 	__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
+ 	__set_bit(MODE_SELECT, filter->write_ok);
+diff --git a/crypto/Makefile b/crypto/Makefile
+index 97b7d3ac87e7..16766ced6a44 100644
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -47,6 +47,7 @@ obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
+ obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
+ obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
+ obj-$(CONFIG_CRYPTO_WP512) += wp512.o
++CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
+ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
+ obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
+ obj-$(CONFIG_CRYPTO_ECB) += ecb.o
+@@ -68,6 +69,7 @@ obj-$(CONFIG_CRYPTO_BLOWFISH_COMMON) += blowfish_common.o
+ obj-$(CONFIG_CRYPTO_TWOFISH) += twofish_generic.o
+ obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
+ obj-$(CONFIG_CRYPTO_SERPENT) += serpent_generic.o
++CFLAGS_serpent_generic.o := $(call cc-option,-fsched-pressure)  # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149
+ obj-$(CONFIG_CRYPTO_AES) += aes_generic.o
+ obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia_generic.o
+ obj-$(CONFIG_CRYPTO_CAST_COMMON) += cast_common.o
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index d7a3435280d8..db83dabe5fc9 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -184,7 +184,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+ 	struct alg_sock *ask = alg_sk(sk);
+ 	struct hash_ctx *ctx = ask->private;
+ 	struct ahash_request *req = &ctx->req;
+-	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
++	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1];
+ 	struct sock *sk2;
+ 	struct alg_sock *ask2;
+ 	struct hash_ctx *ctx2;
+diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
+index 1995d6d962f5..2eac68e51433 100644
+--- a/crypto/asymmetric_keys/x509_cert_parser.c
++++ b/crypto/asymmetric_keys/x509_cert_parser.c
+@@ -399,6 +399,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
+ 	ctx->cert->pub->pkey_algo = PKEY_ALGO_RSA;
+ 
+ 	/* Discard the BIT STRING metadata */
++	if (vlen < 1 || *(const u8 *)value != 0)
++		return -EBADMSG;
+ 	ctx->key = value + 1;
+ 	ctx->key_size = vlen - 1;
+ 	return 0;
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index 34e4dfafb94f..66c9e8262572 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -633,6 +633,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ 	inst->alg.halg.base.cra_flags = type;
+ 
+ 	inst->alg.halg.digestsize = salg->digestsize;
++	inst->alg.halg.statesize = salg->statesize;
+ 	inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
+ 
+ 	inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
+diff --git a/crypto/hmac.c b/crypto/hmac.c
+index 72e38c098bb3..ba07fb6221ae 100644
+--- a/crypto/hmac.c
++++ b/crypto/hmac.c
+@@ -194,11 +194,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+ 	salg = shash_attr_alg(tb[1], 0, 0);
+ 	if (IS_ERR(salg))
+ 		return PTR_ERR(salg);
++	alg = &salg->base;
+ 
++	/* The underlying hash algorithm must be unkeyed */
+ 	err = -EINVAL;
++	if (crypto_shash_alg_has_setkey(salg))
++		goto out_put_alg;
++
+ 	ds = salg->digestsize;
+ 	ss = salg->statesize;
+-	alg = &salg->base;
+ 	if (ds > alg->cra_blocksize ||
+ 	    ss < alg->cra_blocksize)
+ 		goto out_put_alg;
+diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
+index fe5b495a434d..cfb68a889ef6 100644
+--- a/crypto/mcryptd.c
++++ b/crypto/mcryptd.c
+@@ -526,6 +526,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+ 	inst->alg.halg.base.cra_flags = type;
+ 
+ 	inst->alg.halg.digestsize = salg->digestsize;
++	inst->alg.halg.statesize = salg->statesize;
+ 	inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
+ 
+ 	inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
+diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
+index f550b5d94630..d7da0eea5622 100644
+--- a/crypto/salsa20_generic.c
++++ b/crypto/salsa20_generic.c
+@@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc,
+ 
+ 	salsa20_ivsetup(ctx, walk.iv);
+ 
+-	if (likely(walk.nbytes == nbytes))
+-	{
+-		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
+-				      walk.src.virt.addr, nbytes);
+-		return blkcipher_walk_done(desc, &walk, 0);
+-	}
+-
+ 	while (walk.nbytes >= 64) {
+ 		salsa20_encrypt_bytes(ctx, walk.dst.virt.addr,
+ 				      walk.src.virt.addr,
+diff --git a/crypto/shash.c b/crypto/shash.c
+index 17510eaf0a36..73c065321867 100644
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -24,11 +24,12 @@
+ 
+ static const struct crypto_type crypto_shash_type;
+ 
+-static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
+-			   unsigned int keylen)
++int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
++		    unsigned int keylen)
+ {
+ 	return -ENOSYS;
+ }
++EXPORT_SYMBOL_GPL(shash_no_setkey);
+ 
+ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
+ 				  unsigned int keylen)
+diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
+index cc79d3fedfb2..493811b895bd 100644
+--- a/drivers/acpi/video.c
++++ b/drivers/acpi/video.c
+@@ -1276,6 +1276,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
+ 	union acpi_object *dod = NULL;
+ 	union acpi_object *obj;
+ 
++	if (!video->cap._DOD)
++		return AE_NOT_EXIST;
++
+ 	status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
+ 	if (!ACPI_SUCCESS(status)) {
+ 		ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index 7dbba387d12a..18de4c457068 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -1480,7 +1480,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
+ 		break;
+ 
+ 	default:
+-		WARN_ON_ONCE(1);
+ 		return AC_ERR_SYSTEM;
+ 	}
+ 
+diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
+index 527bbd595e37..d9b762a62e25 100644
+--- a/drivers/atm/horizon.c
++++ b/drivers/atm/horizon.c
+@@ -2804,7 +2804,7 @@ out:
+ 	return err;
+ 
+ out_free_irq:
+-	free_irq(dev->irq, dev);
++	free_irq(irq, dev);
+ out_free:
+ 	kfree(dev);
+ out_release:
+diff --git a/drivers/base/isa.c b/drivers/base/isa.c
+index 91dba65d7264..901d8185309e 100644
+--- a/drivers/base/isa.c
++++ b/drivers/base/isa.c
+@@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
+ {
+ 	struct isa_driver *isa_driver = dev->platform_data;
+ 
+-	if (isa_driver->probe)
++	if (isa_driver && isa_driver->probe)
+ 		return isa_driver->probe(dev, to_isa_dev(dev)->id);
+ 
+ 	return 0;
+@@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
+ {
+ 	struct isa_driver *isa_driver = dev->platform_data;
+ 
+-	if (isa_driver->remove)
++	if (isa_driver && isa_driver->remove)
+ 		return isa_driver->remove(dev, to_isa_dev(dev)->id);
+ 
+ 	return 0;
+@@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
+ {
+ 	struct isa_driver *isa_driver = dev->platform_data;
+ 
+-	if (isa_driver->shutdown)
++	if (isa_driver && isa_driver->shutdown)
+ 		isa_driver->shutdown(dev, to_isa_dev(dev)->id);
+ }
+ 
+@@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
+ {
+ 	struct isa_driver *isa_driver = dev->platform_data;
+ 
+-	if (isa_driver->suspend)
++	if (isa_driver && isa_driver->suspend)
+ 		return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
+ 
+ 	return 0;
+@@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
+ {
+ 	struct isa_driver *isa_driver = dev->platform_data;
+ 
+-	if (isa_driver->resume)
++	if (isa_driver && isa_driver->resume)
+ 		return isa_driver->resume(dev, to_isa_dev(dev)->id);
+ 
+ 	return 0;
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 0c13dfd1c29d..32f5b87fe93c 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -945,6 +945,10 @@ static int btusb_open(struct hci_dev *hdev)
+ 		return err;
+ 
+ 	data->intf->needs_remote_wakeup = 1;
++	/* device specific wakeup source enabled and required for USB
++	 * remote wakeup while host is suspended
++	 */
++	device_wakeup_enable(&data->udev->dev);
+ 
+ 	if (test_and_set_bit(HCI_RUNNING, &hdev->flags))
+ 		goto done;
+@@ -1008,6 +1012,7 @@ static int btusb_close(struct hci_dev *hdev)
+ 		goto failed;
+ 
+ 	data->intf->needs_remote_wakeup = 0;
++	device_wakeup_disable(&data->udev->dev);
+ 	usb_autopm_put_interface(data->intf);
+ 
+ failed:
+diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
+index 4d523cfe51ce..31342fccd290 100644
+--- a/drivers/bus/arm-ccn.c
++++ b/drivers/bus/arm-ccn.c
+@@ -1157,6 +1157,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
+ 
+ 	/* Perf driver registration */
+ 	ccn->dt.pmu = (struct pmu) {
++		.module = THIS_MODULE,
+ 		.attr_groups = arm_ccn_pmu_attr_groups,
+ 		.task_ctx_nr = perf_invalid_context,
+ 		.event_init = arm_ccn_pmu_event_init,
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index 696ef1d56b4f..5809567d3cf0 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip)
+ 	iowrite32(intmask,
+ 		  chip->vendor.iobase +
+ 		  TPM_INT_ENABLE(chip->vendor.locality));
+-	free_irq(chip->vendor.irq, chip);
++	devm_free_irq(chip->pdev, chip->vendor.irq, chip);
+ 	chip->vendor.irq = 0;
+ }
+ 
+diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
+index 4b26509fc218..803ef926d682 100644
+--- a/drivers/clk/tegra/clk-tegra30.c
++++ b/drivers/clk/tegra/clk-tegra30.c
+@@ -1064,7 +1064,7 @@ static void __init tegra30_super_clk_init(void)
+ 	 * U71 divider of cclk_lp.
+ 	 */
+ 	clk = tegra_clk_register_divider("pll_p_out3_cclklp", "pll_p_out3",
+-				clk_base + SUPER_CCLKG_DIVIDER, 0,
++				clk_base + SUPER_CCLKLP_DIVIDER, 0,
+ 				TEGRA_DIVIDER_INT, 16, 8, 1, NULL);
+ 	clk_register_clkdev(clk, "pll_p_out3_cclklp", NULL);
+ 
+diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
+index 36d936fb259e..4f0c4a3cc5c5 100644
+--- a/drivers/crypto/s5p-sss.c
++++ b/drivers/crypto/s5p-sss.c
+@@ -682,8 +682,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
+ 		dev_warn(dev, "feed control interrupt is not available.\n");
+ 		goto err_irq;
+ 	}
+-	err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
+-			       IRQF_SHARED, pdev->name, pdev);
++	err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
++					s5p_aes_interrupt, IRQF_ONESHOT,
++					pdev->name, pdev);
+ 	if (err < 0) {
+ 		dev_warn(dev, "feed control interrupt is not available.\n");
+ 		goto err_irq;
+diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
+index 3ddfd1f6c23c..3345a0acc975 100644
+--- a/drivers/dma/dmaengine.c
++++ b/drivers/dma/dmaengine.c
+@@ -1002,12 +1002,14 @@ static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
+ 	switch (order) {
+ 	case 0 ... 1:
+ 		return &unmap_pool[0];
++#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
+ 	case 2 ... 4:
+ 		return &unmap_pool[1];
+ 	case 5 ... 7:
+ 		return &unmap_pool[2];
+ 	case 8:
+ 		return &unmap_pool[3];
++#endif
+ 	default:
+ 		BUG();
+ 		return NULL;
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index 897ec0f8d718..2e9bc49d30ec 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -148,6 +148,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)");
+ #define PATTERN_OVERWRITE	0x20
+ #define PATTERN_COUNT_MASK	0x1f
+ 
++/* poor man's completion - we want to use wait_event_freezable() on it */
++struct dmatest_done {
++	bool			done;
++	wait_queue_head_t	*wait;
++};
++
+ struct dmatest_thread {
+ 	struct list_head	node;
+ 	struct dmatest_info	*info;
+@@ -156,6 +162,8 @@ struct dmatest_thread {
+ 	u8			**srcs;
+ 	u8			**dsts;
+ 	enum dma_transaction_type type;
++	wait_queue_head_t done_wait;
++	struct dmatest_done test_done;
+ 	bool			done;
+ };
+ 
+@@ -316,18 +324,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
+ 	return error_count;
+ }
+ 
+-/* poor man's completion - we want to use wait_event_freezable() on it */
+-struct dmatest_done {
+-	bool			done;
+-	wait_queue_head_t	*wait;
+-};
+ 
+ static void dmatest_callback(void *arg)
+ {
+ 	struct dmatest_done *done = arg;
+-
+-	done->done = true;
+-	wake_up_all(done->wait);
++	struct dmatest_thread *thread =
++		container_of(arg, struct dmatest_thread, done_wait);
++	if (!thread->done) {
++		done->done = true;
++		wake_up_all(done->wait);
++	} else {
++		/*
++		 * If thread->done, it means that this callback occurred
++		 * after the parent thread has cleaned up. This can
++		 * happen in the case that driver doesn't implement
++		 * the terminate_all() functionality and a dma operation
++		 * did not occur within the timeout period
++		 */
++		WARN(1, "dmatest: Kernel memory may be corrupted!!\n");
++	}
+ }
+ 
+ static unsigned int min_odd(unsigned int x, unsigned int y)
+@@ -398,9 +413,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len)
+  */
+ static int dmatest_func(void *data)
+ {
+-	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
+ 	struct dmatest_thread	*thread = data;
+-	struct dmatest_done	done = { .wait = &done_wait };
++	struct dmatest_done	*done = &thread->test_done;
+ 	struct dmatest_info	*info;
+ 	struct dmatest_params	*params;
+ 	struct dma_chan		*chan;
+@@ -605,9 +619,9 @@ static int dmatest_func(void *data)
+ 			continue;
+ 		}
+ 
+-		done.done = false;
++		done->done = false;
+ 		tx->callback = dmatest_callback;
+-		tx->callback_param = &done;
++		tx->callback_param = done;
+ 		cookie = tx->tx_submit(tx);
+ 
+ 		if (dma_submit_error(cookie)) {
+@@ -620,21 +634,12 @@ static int dmatest_func(void *data)
+ 		}
+ 		dma_async_issue_pending(chan);
+ 
+-		wait_event_freezable_timeout(done_wait, done.done,
++		wait_event_freezable_timeout(thread->done_wait, done->done,
+ 					     msecs_to_jiffies(params->timeout));
+ 
+ 		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
+ 
+-		if (!done.done) {
+-			/*
+-			 * We're leaving the timed out dma operation with
+-			 * dangling pointer to done_wait.  To make this
+-			 * correct, we'll need to allocate wait_done for
+-			 * each test iteration and perform "who's gonna
+-			 * free it this time?" dancing.  For now, just
+-			 * leave it dangling.
+-			 */
+-			WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
++		if (!done->done) {
+ 			dmaengine_unmap_put(um);
+ 			result("test timed out", total_tests, src_off, dst_off,
+ 			       len, 0);
+@@ -708,7 +713,7 @@ err_thread_type:
+ 		dmatest_KBs(runtime, total_len), ret);
+ 
+ 	/* terminate all transfers on specified channels */
+-	if (ret)
++	if (ret || failed_tests)
+ 		dmaengine_terminate_all(chan);
+ 
+ 	thread->done = true;
+@@ -766,6 +771,8 @@ static int dmatest_add_threads(struct dmatest_info *info,
+ 		thread->info = info;
+ 		thread->chan = dtc->chan;
+ 		thread->type = type;
++		thread->test_done.wait = &thread->done_wait;
++		init_waitqueue_head(&thread->done_wait);
+ 		smp_wmb();
+ 		thread->task = kthread_create(dmatest_func, thread, "%s-%s%u",
+ 				dma_chan_name(chan), op, i);
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 3dabc52b9615..cb731749205a 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -1651,7 +1651,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
+ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
+ {
+ 	struct pl330_thread *thrd = NULL;
+-	unsigned long flags;
+ 	int chans, i;
+ 
+ 	if (pl330->state == DYING)
+@@ -1659,8 +1658,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
+ 
+ 	chans = pl330->pcfg.num_chan;
+ 
+-	spin_lock_irqsave(&pl330->lock, flags);
+-
+ 	for (i = 0; i < chans; i++) {
+ 		thrd = &pl330->channels[i];
+ 		if ((thrd->free) && (!_manager_ns(thrd) ||
+@@ -1678,8 +1675,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
+ 		thrd = NULL;
+ 	}
+ 
+-	spin_unlock_irqrestore(&pl330->lock, flags);
+-
+ 	return thrd;
+ }
+ 
+@@ -1697,7 +1692,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
+ static void pl330_release_channel(struct pl330_thread *thrd)
+ {
+ 	struct pl330_dmac *pl330;
+-	unsigned long flags;
+ 
+ 	if (!thrd || thrd->free)
+ 		return;
+@@ -1709,10 +1703,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
+ 
+ 	pl330 = thrd->dmac;
+ 
+-	spin_lock_irqsave(&pl330->lock, flags);
+ 	_free_event(thrd, thrd->ev);
+ 	thrd->free = true;
+-	spin_unlock_irqrestore(&pl330->lock, flags);
+ }
+ 
+ /* Initialize the structure for PL330 configuration, that can be used
+@@ -2077,20 +2069,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
+ 	struct pl330_dmac *pl330 = pch->dmac;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&pch->lock, flags);
++	spin_lock_irqsave(&pl330->lock, flags);
+ 
+ 	dma_cookie_init(chan);
+ 	pch->cyclic = false;
+ 
+ 	pch->thread = pl330_request_channel(pl330);
+ 	if (!pch->thread) {
+-		spin_unlock_irqrestore(&pch->lock, flags);
++		spin_unlock_irqrestore(&pl330->lock, flags);
+ 		return -ENOMEM;
+ 	}
+ 
+ 	tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
+ 
+-	spin_unlock_irqrestore(&pch->lock, flags);
++	spin_unlock_irqrestore(&pl330->lock, flags);
+ 
+ 	return 1;
+ }
+@@ -2188,12 +2180,13 @@ static int pl330_pause(struct dma_chan *chan)
+ static void pl330_free_chan_resources(struct dma_chan *chan)
+ {
+ 	struct dma_pl330_chan *pch = to_pchan(chan);
++	struct pl330_dmac *pl330 = pch->dmac;
+ 	unsigned long flags;
+ 
+ 	tasklet_kill(&pch->task);
+ 
+ 	pm_runtime_get_sync(pch->dmac->ddma.dev);
+-	spin_lock_irqsave(&pch->lock, flags);
++	spin_lock_irqsave(&pl330->lock, flags);
+ 
+ 	pl330_release_channel(pch->thread);
+ 	pch->thread = NULL;
+@@ -2201,7 +2194,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
+ 	if (pch->cyclic)
+ 		list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
+ 
+-	spin_unlock_irqrestore(&pch->lock, flags);
++	spin_unlock_irqrestore(&pl330->lock, flags);
+ 	pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
+ 	pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
+ }
+diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
+index 72e07e3cf718..16e0eb523439 100644
+--- a/drivers/edac/i5000_edac.c
++++ b/drivers/edac/i5000_edac.c
+@@ -227,7 +227,7 @@
+ #define			NREC_RDWR(x)		(((x)>>11) & 1)
+ #define			NREC_RANK(x)		(((x)>>8) & 0x7)
+ #define		NRECMEMB		0xC0
+-#define			NREC_CAS(x)		(((x)>>16) & 0xFFFFFF)
++#define			NREC_CAS(x)		(((x)>>16) & 0xFFF)
+ #define			NREC_RAS(x)		((x) & 0x7FFF)
+ #define		NRECFGLOG		0xC4
+ #define		NREEECFBDA		0xC8
+@@ -371,7 +371,7 @@ struct i5000_error_info {
+ 	/* These registers are input ONLY if there was a
+ 	 * Non-Recoverable Error */
+ 	u16 nrecmema;		/* Non-Recoverable Mem log A */
+-	u16 nrecmemb;		/* Non-Recoverable Mem log B */
++	u32 nrecmemb;		/* Non-Recoverable Mem log B */
+ 
+ };
+ 
+@@ -407,7 +407,7 @@ static void i5000_get_error_info(struct mem_ctl_info *mci,
+ 				NERR_FAT_FBD, &info->nerr_fat_fbd);
+ 		pci_read_config_word(pvt->branchmap_werrors,
+ 				NRECMEMA, &info->nrecmema);
+-		pci_read_config_word(pvt->branchmap_werrors,
++		pci_read_config_dword(pvt->branchmap_werrors,
+ 				NRECMEMB, &info->nrecmemb);
+ 
+ 		/* Clear the error bits, by writing them back */
+@@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
+ 			dimm->mtype = MEM_FB_DDR2;
+ 
+ 			/* ask what device type on this row */
+-			if (MTR_DRAM_WIDTH(mtr))
++			if (MTR_DRAM_WIDTH(mtr) == 8)
+ 				dimm->dtype = DEV_X8;
+ 			else
+ 				dimm->dtype = DEV_X4;
+diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
+index 6ef6ad1ba16e..2ea2f32e608b 100644
+--- a/drivers/edac/i5400_edac.c
++++ b/drivers/edac/i5400_edac.c
+@@ -368,7 +368,7 @@ struct i5400_error_info {
+ 
+ 	/* These registers are input ONLY if there was a Non-Rec Error */
+ 	u16 nrecmema;		/* Non-Recoverable Mem log A */
+-	u16 nrecmemb;		/* Non-Recoverable Mem log B */
++	u32 nrecmemb;		/* Non-Recoverable Mem log B */
+ 
+ };
+ 
+@@ -458,7 +458,7 @@ static void i5400_get_error_info(struct mem_ctl_info *mci,
+ 				NERR_FAT_FBD, &info->nerr_fat_fbd);
+ 		pci_read_config_word(pvt->branchmap_werrors,
+ 				NRECMEMA, &info->nrecmema);
+-		pci_read_config_word(pvt->branchmap_werrors,
++		pci_read_config_dword(pvt->branchmap_werrors,
+ 				NRECMEMB, &info->nrecmemb);
+ 
+ 		/* Clear the error bits, by writing them back */
+@@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
+ 
+ 			dimm->nr_pages = size_mb << 8;
+ 			dimm->grain = 8;
+-			dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
++			dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
++				      DEV_X8 : DEV_X4;
+ 			dimm->mtype = MEM_FB_DDR2;
+ 			/*
+ 			 * The eccc mechanism is SDDC (aka SECC), with
+ 			 * is similar to Chipkill.
+ 			 */
+-			dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
++			dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
+ 					  EDAC_S8ECD8ED : EDAC_S4ECD4ED;
+ 			ndimms++;
+ 		}
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index cb46c468b01e..25e51151b957 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1700,6 +1700,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
+ 			break;
+ 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
+ 			pvt->pci_ta = pdev;
++			break;
+ 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
+ 			pvt->pci_ras = pdev;
+ 			break;
+diff --git a/drivers/gpio/gpio-74xx-mmio.c b/drivers/gpio/gpio-74xx-mmio.c
+index 0763655cca6c..6ed7c0fb3378 100644
+--- a/drivers/gpio/gpio-74xx-mmio.c
++++ b/drivers/gpio/gpio-74xx-mmio.c
+@@ -129,7 +129,7 @@ static int mmio_74xx_gpio_probe(struct platform_device *pdev)
+ 	if (IS_ERR(dat))
+ 		return PTR_ERR(dat);
+ 
+-	priv->flags = (unsigned)of_id->data;
++	priv->flags = (uintptr_t) of_id->data;
+ 
+ 	err = bgpio_init(&priv->bgc, &pdev->dev,
+ 			 DIV_ROUND_UP(MMIO_74XX_BIT_CNT(priv->flags), 8),
+diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
+index 9e7f93bcd3ea..15031b46c9c4 100644
+--- a/drivers/hv/hv_balloon.c
++++ b/drivers/hv/hv_balloon.c
+@@ -748,7 +748,7 @@ static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
+ 		 * If the pfn range we are dealing with is not in the current
+ 		 * "hot add block", move on.
+ 		 */
+-		if ((start_pfn >= has->end_pfn))
++		if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
+ 			continue;
+ 
+ 		/*
+@@ -811,7 +811,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
+ 		 * If the pfn range we are dealing with is not in the current
+ 		 * "hot add block", move on.
+ 		 */
+-		if ((start_pfn >= has->end_pfn))
++		if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
+ 			continue;
+ 
+ 		old_covered_state = has->covered_end_pfn;
+diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
+index d8803c3bbfdc..16833365475f 100644
+--- a/drivers/i2c/busses/i2c-riic.c
++++ b/drivers/i2c/busses/i2c-riic.c
+@@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
+ 	}
+ 
+ 	if (riic->is_last || riic->err) {
+-		riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
++		riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
+ 		writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
++	} else {
++		/* Transfer is complete, but do not send STOP */
++		riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
++		complete(&riic->msg_done);
+ 	}
+ 
+ 	return IRQ_HANDLED;
+diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
+index a0e7161f040c..58e1a54fc119 100644
+--- a/drivers/iio/adc/ti_am335x_adc.c
++++ b/drivers/iio/adc/ti_am335x_adc.c
+@@ -121,7 +121,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
+ {
+ 	struct iio_dev *indio_dev = private;
+ 	struct tiadc_device *adc_dev = iio_priv(indio_dev);
+-	unsigned int status, config;
++	unsigned int status, config, adc_fsm;
++	unsigned short count = 0;
++
+ 	status = tiadc_readl(adc_dev, REG_IRQSTATUS);
+ 
+ 	/*
+@@ -135,6 +137,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private)
+ 		tiadc_writel(adc_dev, REG_CTRL, config);
+ 		tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
+ 				| IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
++
++		/* wait for idle state.
++		 * ADC needs to finish the current conversion
++		 * before disabling the module
++		 */
++		do {
++			adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM);
++		} while (adc_fsm != 0x10 && count++ < 100);
++
+ 		tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
+ 		return IRQ_HANDLED;
+ 	} else if (status & IRQENB_FIFO1THRES) {
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+index 3460dd0e3e99..a8db38db622e 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+@@ -49,8 +49,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ 			st->report_state.report_id,
+ 			st->report_state.index,
+ 			HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
+-
+-		poll_value = hid_sensor_read_poll_value(st);
+ 	} else {
+ 		int val;
+ 
+@@ -90,7 +88,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ 	sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
+ 			       st->power_state.index,
+ 			       sizeof(state_val), &state_val);
+-	if (state && poll_value)
++	if (state)
++		poll_value = hid_sensor_read_poll_value(st);
++	if (poll_value > 0)
+ 		msleep_interruptible(poll_value * 2);
+ 
+ 	return 0;
+diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
+index a0d7deeac62f..3f90985d545e 100644
+--- a/drivers/iio/pressure/mpl115.c
++++ b/drivers/iio/pressure/mpl115.c
+@@ -136,6 +136,7 @@ static const struct iio_chan_spec mpl115_channels[] = {
+ 	{
+ 		.type = IIO_TEMP,
+ 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
++		.info_mask_shared_by_type =
+ 			BIT(IIO_CHAN_INFO_OFFSET) | BIT(IIO_CHAN_INFO_SCALE),
+ 	},
+ };
+diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
+index 01b2e0b18878..0f5b8767ec2e 100644
+--- a/drivers/iio/pressure/mpl3115.c
++++ b/drivers/iio/pressure/mpl3115.c
+@@ -182,7 +182,7 @@ static const struct iio_chan_spec mpl3115_channels[] = {
+ 	{
+ 		.type = IIO_PRESSURE,
+ 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-			BIT(IIO_CHAN_INFO_SCALE),
++		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ 		.scan_index = 0,
+ 		.scan_type = {
+ 			.sign = 'u',
+@@ -195,7 +195,7 @@ static const struct iio_chan_spec mpl3115_channels[] = {
+ 	{
+ 		.type = IIO_TEMP,
+ 		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+-			BIT(IIO_CHAN_INFO_SCALE),
++		.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ 		.scan_index = 1,
+ 		.scan_type = {
+ 			.sign = 's',
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index 02fc91c68027..ca24eebb5a35 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -1496,7 +1496,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
+ 			context->mtu_msgmax = (IB_MTU_4096 << 5) |
+ 					      ilog2(dev->dev->caps.max_gso_sz);
+ 		else
+-			context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
++			context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
+ 	} else if (attr_mask & IB_QP_PATH_MTU) {
+ 		if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
+ 			pr_err("path MTU (%u) is invalid\n",
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index 57c9809e8b87..031801671b80 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1020,6 +1020,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
+ 	qp->real_qp    = qp;
+ 	qp->uobject    = NULL;
+ 	qp->qp_type    = MLX5_IB_QPT_REG_UMR;
++	qp->send_cq    = init_attr->send_cq;
++	qp->recv_cq    = init_attr->recv_cq;
+ 
+ 	attr->qp_state = IB_QPS_INIT;
+ 	attr->port_num = 1;
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+index 63b92cbb29ad..545c7ef480e8 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+@@ -1052,10 +1052,15 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
+ 		ipoib_ib_dev_down(dev);
+ 
+ 	if (level == IPOIB_FLUSH_HEAVY) {
++		rtnl_lock();
+ 		if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
+ 			ipoib_ib_dev_stop(dev);
+-		if (ipoib_ib_dev_open(dev) != 0)
++
++		result = ipoib_ib_dev_open(dev);
++		rtnl_unlock();
++		if (result)
+ 			return;
++
+ 		if (netif_queue_stopped(dev))
+ 			netif_start_queue(dev);
+ 	}
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 18fd4cd6d3c7..74d69fdbdec9 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -514,6 +514,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
+ 		},
+ 	},
++	{
++		/* TUXEDO BU1406 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 275f59071f56..9d05a711a2f4 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2018,10 +2018,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 		uint64_t tmp;
+ 
+ 		if (!sg_res) {
++			unsigned int pgoff = sg->offset & ~PAGE_MASK;
++
+ 			sg_res = aligned_nrpages(sg->offset, sg->length);
+-			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
++			sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
+ 			sg->dma_length = sg->length;
+-			pteval = page_to_phys(sg_page(sg)) | prot;
++			pteval = (sg_phys(sg) - pgoff) | prot;
+ 			phys_pfn = pteval >> VTD_PAGE_SHIFT;
+ 		}
+ 
+@@ -3324,7 +3326,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
+ 
+ 	for_each_sg(sglist, sg, nelems, i) {
+ 		BUG_ON(!sg_page(sg));
+-		sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
++		sg->dma_address = sg_phys(sg);
+ 		sg->dma_length = sg->length;
+ 	}
+ 	return nelems;
+diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
+index 2f7dffaae93a..5ba30449ee53 100644
+--- a/drivers/irqchip/irq-crossbar.c
++++ b/drivers/irqchip/irq-crossbar.c
+@@ -194,7 +194,7 @@ static const struct irq_domain_ops crossbar_domain_ops = {
+ static int __init crossbar_of_init(struct device_node *node)
+ {
+ 	int i, size, reserved = 0;
+-	u32 max = 0, entry;
++	u32 max = 0, entry, reg_size;
+ 	const __be32 *irqsr;
+ 	int ret = -ENOMEM;
+ 
+@@ -271,9 +271,9 @@ static int __init crossbar_of_init(struct device_node *node)
+ 	if (!cb->register_offsets)
+ 		goto err_irq_map;
+ 
+-	of_property_read_u32(node, "ti,reg-size", &size);
++	of_property_read_u32(node, "ti,reg-size", &reg_size);
+ 
+-	switch (size) {
++	switch (reg_size) {
+ 	case 1:
+ 		cb->write = crossbar_writeb;
+ 		break;
+@@ -299,7 +299,7 @@ static int __init crossbar_of_init(struct device_node *node)
+ 			continue;
+ 
+ 		cb->register_offsets[i] = reserved;
+-		reserved += size;
++		reserved += reg_size;
+ 	}
+ 
+ 	of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index dfdd1908641c..f8a1d20c73f4 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -441,7 +441,7 @@ struct cache {
+ 	 * until a gc finishes - otherwise we could pointlessly burn a ton of
+ 	 * cpu
+ 	 */
+-	unsigned		invalidate_needs_gc:1;
++	unsigned		invalidate_needs_gc;
+ 
+ 	bool			discard; /* Get rid of? */
+ 
+@@ -611,8 +611,8 @@ struct cache_set {
+ 
+ 	/* Counts how many sectors bio_insert has added to the cache */
+ 	atomic_t		sectors_to_gc;
++	wait_queue_head_t	gc_wait;
+ 
+-	wait_queue_head_t	moving_gc_wait;
+ 	struct keybuf		moving_gc_keys;
+ 	/* Number of moving GC bios in flight */
+ 	struct semaphore	moving_in_flight;
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 43829d9493f7..f0b75d54951a 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1762,33 +1762,34 @@ static void bch_btree_gc(struct cache_set *c)
+ 	bch_moving_gc(c);
+ }
+ 
+-static int bch_gc_thread(void *arg)
++static bool gc_should_run(struct cache_set *c)
+ {
+-	struct cache_set *c = arg;
+ 	struct cache *ca;
+ 	unsigned i;
+ 
+-	while (1) {
+-again:
+-		bch_btree_gc(c);
++	for_each_cache(ca, c, i)
++		if (ca->invalidate_needs_gc)
++			return true;
+ 
+-		set_current_state(TASK_INTERRUPTIBLE);
+-		if (kthread_should_stop())
+-			break;
++	if (atomic_read(&c->sectors_to_gc) < 0)
++		return true;
+ 
+-		mutex_lock(&c->bucket_lock);
++	return false;
++}
+ 
+-		for_each_cache(ca, c, i)
+-			if (ca->invalidate_needs_gc) {
+-				mutex_unlock(&c->bucket_lock);
+-				set_current_state(TASK_RUNNING);
+-				goto again;
+-			}
++static int bch_gc_thread(void *arg)
++{
++	struct cache_set *c = arg;
+ 
+-		mutex_unlock(&c->bucket_lock);
++	while (1) {
++		wait_event_interruptible(c->gc_wait,
++			   kthread_should_stop() || gc_should_run(c));
+ 
+-		try_to_freeze();
+-		schedule();
++		if (kthread_should_stop())
++			break;
++
++		set_gc_sectors(c);
++		bch_btree_gc(c);
+ 	}
+ 
+ 	return 0;
+@@ -1796,11 +1797,10 @@ again:
+ 
+ int bch_gc_thread_start(struct cache_set *c)
+ {
+-	c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
++	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
+ 	if (IS_ERR(c->gc_thread))
+ 		return PTR_ERR(c->gc_thread);
+ 
+-	set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
+index 5c391fa01bed..9b80417cd547 100644
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
+ 
+ static inline void wake_up_gc(struct cache_set *c)
+ {
+-	if (c->gc_thread)
+-		wake_up_process(c->gc_thread);
++	wake_up(&c->gc_wait);
+ }
+ 
+ #define MAP_DONE	0
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index ab43faddb447..eb496309af96 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -193,14 +193,12 @@ static void bch_data_insert_start(struct closure *cl)
+ 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ 	struct bio *bio = op->bio, *n;
+ 
+-	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
+-		set_gc_sectors(op->c);
+-		wake_up_gc(op->c);
+-	}
+-
+ 	if (op->bypass)
+ 		return bch_data_invalidate(cl);
+ 
++	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
++		wake_up_gc(op->c);
++
+ 	/*
+ 	 * Journal writes are marked REQ_FLUSH; if the original write was a
+ 	 * flush, it'll wait on the journal write.
+@@ -464,6 +462,7 @@ struct search {
+ 	unsigned		recoverable:1;
+ 	unsigned		write:1;
+ 	unsigned		read_dirty_data:1;
++	unsigned		cache_missed:1;
+ 
+ 	unsigned long		start_time;
+ 
+@@ -646,6 +645,7 @@ static inline struct search *search_alloc(struct bio *bio,
+ 
+ 	s->orig_bio		= bio;
+ 	s->cache_miss		= NULL;
++	s->cache_missed		= 0;
+ 	s->d			= d;
+ 	s->recoverable		= 1;
+ 	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
+@@ -701,7 +701,14 @@ static void cached_dev_read_error(struct closure *cl)
+ 	struct search *s = container_of(cl, struct search, cl);
+ 	struct bio *bio = &s->bio.bio;
+ 
+-	if (s->recoverable) {
++	/*
++	 * If read request hit dirty data (s->read_dirty_data is true),
++	 * then recovery a failed read request from cached device may
++	 * get a stale data back. So read failure recovery is only
++	 * permitted when read request hit clean data in cache device,
++	 * or when cache read race happened.
++	 */
++	if (s->recoverable && !s->read_dirty_data) {
+ 		/* Retry from the backing device: */
+ 		trace_bcache_read_retry(s->orig_bio);
+ 
+@@ -762,7 +769,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
+ 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ 
+ 	bch_mark_cache_accounting(s->iop.c, s->d,
+-				  !s->cache_miss, s->iop.bypass);
++				  !s->cache_missed, s->iop.bypass);
+ 	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
+ 
+ 	if (s->iop.error)
+@@ -781,6 +788,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
+ 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
+ 	struct bio *miss, *cache_bio;
+ 
++	s->cache_missed = 1;
++
+ 	if (s->cache_miss || s->iop.bypass) {
+ 		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
+ 		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 8e5666ac8a6a..6f7bc8a8674b 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1523,6 +1523,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ 	mutex_init(&c->bucket_lock);
+ 	init_waitqueue_head(&c->btree_cache_wait);
+ 	init_waitqueue_head(&c->bucket_wait);
++	init_waitqueue_head(&c->gc_wait);
+ 	sema_init(&c->uuid_write_mutex, 1);
+ 
+ 	spin_lock_init(&c->btree_gc_time.lock);
+@@ -1581,6 +1582,7 @@ static void run_cache_set(struct cache_set *c)
+ 
+ 	for_each_cache(ca, c, i)
+ 		c->nbuckets += ca->sb.nbuckets;
++	set_gc_sectors(c);
+ 
+ 	if (CACHE_SYNC(&c->sb)) {
+ 		LIST_HEAD(journal);
+@@ -2120,6 +2122,7 @@ static void bcache_exit(void)
+ 	if (bcache_major)
+ 		unregister_blkdev(bcache_major, "bcache");
+ 	unregister_reboot_notifier(&reboot);
++	mutex_destroy(&bch_register_lock);
+ }
+ 
+ static int __init bcache_init(void)
+@@ -2138,14 +2141,15 @@ static int __init bcache_init(void)
+ 	bcache_major = register_blkdev(0, "bcache");
+ 	if (bcache_major < 0) {
+ 		unregister_reboot_notifier(&reboot);
++		mutex_destroy(&bch_register_lock);
+ 		return bcache_major;
+ 	}
+ 
+ 	if (!(bcache_wq = create_workqueue("bcache")) ||
+ 	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
+-	    sysfs_create_files(bcache_kobj, files) ||
+ 	    bch_request_init() ||
+-	    bch_debug_init(bcache_kobj))
++	    bch_debug_init(bcache_kobj) ||
++	    sysfs_create_files(bcache_kobj, files))
+ 		goto err;
+ 
+ 	return 0;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index d192ab2ed17c..c2a13fd9e0ca 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1435,11 +1435,62 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
+ }
+ EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
+ 
++/*
++ * Flush current->bio_list when the target map method blocks.
++ * This fixes deadlocks in snapshot and possibly in other targets.
++ */
++struct dm_offload {
++	struct blk_plug plug;
++	struct blk_plug_cb cb;
++};
++
++static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
++{
++	struct dm_offload *o = container_of(cb, struct dm_offload, cb);
++	struct bio_list list;
++	struct bio *bio;
++
++	INIT_LIST_HEAD(&o->cb.list);
++
++	if (unlikely(!current->bio_list))
++		return;
++
++	list = *current->bio_list;
++	bio_list_init(current->bio_list);
++
++	while ((bio = bio_list_pop(&list))) {
++		struct bio_set *bs = bio->bi_pool;
++		if (unlikely(!bs) || bs == fs_bio_set) {
++			bio_list_add(current->bio_list, bio);
++			continue;
++		}
++
++		spin_lock(&bs->rescue_lock);
++		bio_list_add(&bs->rescue_list, bio);
++		queue_work(bs->rescue_workqueue, &bs->rescue_work);
++		spin_unlock(&bs->rescue_lock);
++	}
++}
++
++static void dm_offload_start(struct dm_offload *o)
++{
++	blk_start_plug(&o->plug);
++	o->cb.callback = flush_current_bio_list;
++	list_add(&o->cb.list, &current->plug->cb_list);
++}
++
++static void dm_offload_end(struct dm_offload *o)
++{
++	list_del(&o->cb.list);
++	blk_finish_plug(&o->plug);
++}
++
+ static void __map_bio(struct dm_target_io *tio)
+ {
+ 	int r;
+ 	sector_t sector;
+ 	struct mapped_device *md;
++	struct dm_offload o;
+ 	struct bio *clone = &tio->clone;
+ 	struct dm_target *ti = tio->ti;
+ 
+@@ -1452,7 +1503,11 @@ static void __map_bio(struct dm_target_io *tio)
+ 	 */
+ 	atomic_inc(&tio->io->io_count);
+ 	sector = clone->bi_iter.bi_sector;
++
++	dm_offload_start(&o);
+ 	r = ti->type->map(ti, clone);
++	dm_offload_end(&o);
++
+ 	if (r == DM_MAPIO_REMAPPED) {
+ 		/* the bio has been remapped so dispatch it */
+ 
+diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
+index fcfc4b9b2672..4eb5cb18f98d 100644
+--- a/drivers/md/md-cluster.c
++++ b/drivers/md/md-cluster.c
+@@ -738,6 +738,7 @@ static int leave(struct mddev *mddev)
+ 	lockres_free(cinfo->sb_lock);
+ 	lockres_free(cinfo->bitmap_lockres);
+ 	dlm_release_lockspace(cinfo->lockspace, 2);
++	kfree(cinfo);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 0ba6c358c6e0..4cbc3df79a2a 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1161,6 +1161,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
+ 	int max_sectors;
+ 	int sectors;
+ 
++	md_write_start(mddev, bio);
++
+ 	/*
+ 	 * Register the new request and wait if the reconstruction
+ 	 * thread has put up a bar for new requests.
+@@ -1559,8 +1561,6 @@ static void make_request(struct mddev *mddev, struct bio *bio)
+ 		return;
+ 	}
+ 
+-	md_write_start(mddev, bio);
+-
+ 	do {
+ 
+ 		/*
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 02e6d335f178..907aa9c6e894 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -1679,8 +1679,11 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
+ 		struct r5dev *dev = &sh->dev[i];
+ 
+ 		if (dev->written || i == pd_idx || i == qd_idx) {
+-			if (!discard && !test_bit(R5_SkipCopy, &dev->flags))
++			if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) {
+ 				set_bit(R5_UPTODATE, &dev->flags);
++				if (test_bit(STRIPE_EXPAND_READY, &sh->state))
++					set_bit(R5_Expanded, &dev->flags);
++			}
+ 			if (fua)
+ 				set_bit(R5_WantFUA, &dev->flags);
+ 			if (sync)
+diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
+index ef3a8f75f82e..7b15aea2723d 100644
+--- a/drivers/media/usb/dvb-usb/dibusb-common.c
++++ b/drivers/media/usb/dvb-usb/dibusb-common.c
+@@ -179,8 +179,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo);
+ 
+ int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
+ {
+-	u8 wbuf[1] = { offs };
+-	return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
++	u8 *buf;
++	int rc;
++
++	buf = kmalloc(2, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	buf[0] = offs;
++
++	rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
++	*val = buf[1];
++	kfree(buf);
++
++	return rc;
+ }
+ EXPORT_SYMBOL(dibusb_read_eeprom_byte);
+ 
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 5970dd6a1c1c..a41a4f6a51b2 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -1574,6 +1574,114 @@ static const char *uvc_print_chain(struct uvc_video_chain *chain)
+ 	return buffer;
+ }
+ 
++static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
++{
++	struct uvc_video_chain *chain;
++
++	chain = kzalloc(sizeof(*chain), GFP_KERNEL);
++	if (chain == NULL)
++		return NULL;
++
++	INIT_LIST_HEAD(&chain->entities);
++	mutex_init(&chain->ctrl_mutex);
++	chain->dev = dev;
++	v4l2_prio_init(&chain->prio);
++
++	return chain;
++}
++
++/*
++ * Fallback heuristic for devices that don't connect units and terminals in a
++ * valid chain.
++ *
++ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
++ * to fail, but if we just take the entities we can find and put them together
++ * in the most sensible chain we can think of, turns out they do work anyway.
++ * Note: This heuristic assumes there is a single chain.
++ *
++ * At the time of writing, devices known to have such a broken chain are
++ *  - Acer Integrated Camera (5986:055a)
++ *  - Realtek rtl157a7 (0bda:57a7)
++ */
++static int uvc_scan_fallback(struct uvc_device *dev)
++{
++	struct uvc_video_chain *chain;
++	struct uvc_entity *iterm = NULL;
++	struct uvc_entity *oterm = NULL;
++	struct uvc_entity *entity;
++	struct uvc_entity *prev;
++
++	/*
++	 * Start by locating the input and output terminals. We only support
++	 * devices with exactly one of each for now.
++	 */
++	list_for_each_entry(entity, &dev->entities, list) {
++		if (UVC_ENTITY_IS_ITERM(entity)) {
++			if (iterm)
++				return -EINVAL;
++			iterm = entity;
++		}
++
++		if (UVC_ENTITY_IS_OTERM(entity)) {
++			if (oterm)
++				return -EINVAL;
++			oterm = entity;
++		}
++	}
++
++	if (iterm == NULL || oterm == NULL)
++		return -EINVAL;
++
++	/* Allocate the chain and fill it. */
++	chain = uvc_alloc_chain(dev);
++	if (chain == NULL)
++		return -ENOMEM;
++
++	if (uvc_scan_chain_entity(chain, oterm) < 0)
++		goto error;
++
++	prev = oterm;
++
++	/*
++	 * Add all Processing and Extension Units with two pads. The order
++	 * doesn't matter much, use reverse list traversal to connect units in
++	 * UVC descriptor order as we build the chain from output to input. This
++	 * leads to units appearing in the order meant by the manufacturer for
++	 * the cameras known to require this heuristic.
++	 */
++	list_for_each_entry_reverse(entity, &dev->entities, list) {
++		if (entity->type != UVC_VC_PROCESSING_UNIT &&
++		    entity->type != UVC_VC_EXTENSION_UNIT)
++			continue;
++
++		if (entity->num_pads != 2)
++			continue;
++
++		if (uvc_scan_chain_entity(chain, entity) < 0)
++			goto error;
++
++		prev->baSourceID[0] = entity->id;
++		prev = entity;
++	}
++
++	if (uvc_scan_chain_entity(chain, iterm) < 0)
++		goto error;
++
++	prev->baSourceID[0] = iterm->id;
++
++	list_add_tail(&chain->list, &dev->chains);
++
++	uvc_trace(UVC_TRACE_PROBE,
++		  "Found a video chain by fallback heuristic (%s).\n",
++		  uvc_print_chain(chain));
++
++	return 0;
++
++error:
++	kfree(chain);
++	return -EINVAL;
++}
++
+ /*
+  * Scan the device for video chains and register video devices.
+  *
+@@ -1596,15 +1704,10 @@ static int uvc_scan_device(struct uvc_device *dev)
+ 		if (term->chain.next || term->chain.prev)
+ 			continue;
+ 
+-		chain = kzalloc(sizeof(*chain), GFP_KERNEL);
++		chain = uvc_alloc_chain(dev);
+ 		if (chain == NULL)
+ 			return -ENOMEM;
+ 
+-		INIT_LIST_HEAD(&chain->entities);
+-		mutex_init(&chain->ctrl_mutex);
+-		chain->dev = dev;
+-		v4l2_prio_init(&chain->prio);
+-
+ 		term->flags |= UVC_ENTITY_FLAG_DEFAULT;
+ 
+ 		if (uvc_scan_chain(chain, term) < 0) {
+@@ -1618,6 +1721,9 @@ static int uvc_scan_device(struct uvc_device *dev)
+ 		list_add_tail(&chain->list, &dev->chains);
+ 	}
+ 
++	if (list_empty(&dev->chains))
++		uvc_scan_fallback(dev);
++
+ 	if (list_empty(&dev->chains)) {
+ 		uvc_printk(KERN_INFO, "No valid video chain found.\n");
+ 		return -1;
+diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c
+index 87a19f33e460..6d072cae6134 100644
+--- a/drivers/media/usb/uvc/uvc_queue.c
++++ b/drivers/media/usb/uvc/uvc_queue.c
+@@ -399,7 +399,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
+ 		nextbuf = NULL;
+ 	spin_unlock_irqrestore(&queue->irqlock, flags);
+ 
+-	buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
++	buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE;
+ 	vb2_set_plane_payload(&buf->buf, 0, buf->bytesused);
+ 	vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
+ 
+diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
+index 2c51acce4b34..fedb19d1516b 100644
+--- a/drivers/memory/omap-gpmc.c
++++ b/drivers/memory/omap-gpmc.c
+@@ -1892,9 +1892,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev,
+ 	if (!of_property_read_u32(child, "dma-channel", &val))
+ 		gpmc_onenand_data->dma_channel = val;
+ 
+-	gpmc_onenand_init(gpmc_onenand_data);
+-
+-	return 0;
++	return gpmc_onenand_init(gpmc_onenand_data);
+ }
+ #else
+ static int gpmc_probe_onenand_child(struct platform_device *pdev,
+diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c
+index 744ca5cacc9b..cf54420f943d 100644
+--- a/drivers/mtd/maps/pmcmsp-flash.c
++++ b/drivers/mtd/maps/pmcmsp-flash.c
+@@ -139,15 +139,13 @@ static int __init init_msp_flash(void)
+ 		}
+ 
+ 		msp_maps[i].bankwidth = 1;
+-		msp_maps[i].name = kmalloc(7, GFP_KERNEL);
++		msp_maps[i].name = kstrndup(flash_name, 7, GFP_KERNEL);
+ 		if (!msp_maps[i].name) {
+ 			iounmap(msp_maps[i].virt);
+ 			kfree(msp_parts[i]);
+ 			goto cleanup_loop;
+ 		}
+ 
+-		msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7);
+-
+ 		for (j = 0; j < pcnt; j++) {
+ 			part_name[5] = '0' + i;
+ 			part_name[7] = '0' + j;
+diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
+index e90c6a7333d7..2e4649655181 100644
+--- a/drivers/net/appletalk/ipddp.c
++++ b/drivers/net/appletalk/ipddp.c
+@@ -191,7 +191,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
+  */
+ static int ipddp_create(struct ipddp_route *new_rt)
+ {
+-        struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
++        struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+ 
+         if (rt == NULL)
+                 return -ENOMEM;
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index 7bb3cf38f346..2965453853f3 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -652,6 +652,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
+ 		mbx_mask = hecc_read(priv, HECC_CANMIM);
+ 		mbx_mask |= HECC_TX_MBOX_MASK;
+ 		hecc_write(priv, HECC_CANMIM, mbx_mask);
++	} else {
++		/* repoll is done only if whole budget is used */
++		num_pkts = quota;
+ 	}
+ 
+ 	return num_pkts;
+diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
+index 339b0c5ce60c..a5b92d8ff345 100644
+--- a/drivers/net/can/usb/ems_usb.c
++++ b/drivers/net/can/usb/ems_usb.c
+@@ -290,6 +290,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
+ 
+ 	case -ECONNRESET: /* unlink */
+ 	case -ENOENT:
++	case -EPIPE:
++	case -EPROTO:
+ 	case -ESHUTDOWN:
+ 		return;
+ 
+diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
+index 730a2bac124d..f413c0b7be23 100644
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -396,6 +396,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
+ 		break;
+ 
+ 	case -ENOENT:
++	case -EPIPE:
++	case -EPROTO:
+ 	case -ESHUTDOWN:
+ 		return;
+ 
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index dc77225227c7..699fdaf19895 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -603,8 +603,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
+ 			}
+ 
+ 			if (pos + tmp->len > actual_len) {
+-				dev_err(dev->udev->dev.parent,
+-					"Format error\n");
++				dev_err_ratelimited(dev->udev->dev.parent,
++						    "Format error\n");
+ 				break;
+ 			}
+ 
+@@ -809,6 +809,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
+ 	if (err) {
+ 		netdev_err(netdev, "Error transmitting URB\n");
+ 		usb_unanchor_urb(urb);
++		kfree(buf);
+ 		usb_free_urb(urb);
+ 		return err;
+ 	}
+@@ -1322,6 +1323,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
+ 	case 0:
+ 		break;
+ 	case -ENOENT:
++	case -EPIPE:
++	case -EPROTO:
+ 	case -ESHUTDOWN:
+ 		return;
+ 	default:
+@@ -1330,7 +1333,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
+ 		goto resubmit_urb;
+ 	}
+ 
+-	while (pos <= urb->actual_length - MSG_HEADER_LEN) {
++	while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
+ 		msg = urb->transfer_buffer + pos;
+ 
+ 		/* The Kvaser firmware can only read and write messages that
+@@ -1349,7 +1352,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
+ 		}
+ 
+ 		if (pos + msg->len > urb->actual_length) {
+-			dev_err(dev->udev->dev.parent, "Format error\n");
++			dev_err_ratelimited(dev->udev->dev.parent,
++					    "Format error\n");
+ 			break;
+ 		}
+ 
+@@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
+ 		spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
+ 
+ 		usb_unanchor_urb(urb);
++		kfree(buf);
+ 
+ 		stats->tx_dropped++;
+ 
+diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
+index d60a9fcc9896..4f4a5be07634 100644
+--- a/drivers/net/can/usb/usb_8dev.c
++++ b/drivers/net/can/usb/usb_8dev.c
+@@ -528,6 +528,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
+ 		break;
+ 
+ 	case -ENOENT:
++	case -EPIPE:
++	case -EPROTO:
+ 	case -ESHUTDOWN:
+ 		return;
+ 
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 783543ad1fcf..621a970bddf6 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -955,15 +955,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+ 		goto out;
+ 	}
+ 
+-	/* Insert TSB and checksum infos */
+-	if (priv->tsb_en) {
+-		skb = bcm_sysport_insert_tsb(skb, dev);
+-		if (!skb) {
+-			ret = NETDEV_TX_OK;
+-			goto out;
+-		}
+-	}
+-
+ 	/* The Ethernet switch we are interfaced with needs packets to be at
+ 	 * least 64 bytes (including FCS) otherwise they will be discarded when
+ 	 * they enter the switch port logic. When Broadcom tags are enabled, we
+@@ -971,13 +962,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+ 	 * (including FCS and tag) because the length verification is done after
+ 	 * the Broadcom tag is stripped off the ingress packet.
+ 	 */
+-	if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
++	if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+ 		ret = NETDEV_TX_OK;
+ 		goto out;
+ 	}
+ 
+-	skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
+-			ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
++	/* Insert TSB and checksum infos */
++	if (priv->tsb_en) {
++		skb = bcm_sysport_insert_tsb(skb, dev);
++		if (!skb) {
++			ret = NETDEV_TX_OK;
++			goto out;
++		}
++	}
++
++	skb_len = skb->len;
+ 
+ 	mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+ 	if (dma_mapping_error(kdev, mapping)) {
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 8a97d28f3d65..a1d149515531 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -13227,7 +13227,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+ 	if (!netif_running(bp->dev)) {
+ 		DP(BNX2X_MSG_PTP,
+ 		   "PTP adjfreq called while the interface is down\n");
+-		return -EFAULT;
++		return -ENETDOWN;
+ 	}
+ 
+ 	if (ppb < 0) {
+@@ -13286,6 +13286,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+ {
+ 	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ 
++	if (!netif_running(bp->dev)) {
++		DP(BNX2X_MSG_PTP,
++		   "PTP adjtime called while the interface is down\n");
++		return -ENETDOWN;
++	}
++
+ 	DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
+ 
+ 	timecounter_adjtime(&bp->timecounter, delta);
+@@ -13298,6 +13304,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+ 	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ 	u64 ns;
+ 
++	if (!netif_running(bp->dev)) {
++		DP(BNX2X_MSG_PTP,
++		   "PTP gettime called while the interface is down\n");
++		return -ENETDOWN;
++	}
++
+ 	ns = timecounter_read(&bp->timecounter);
+ 
+ 	DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
+@@ -13313,6 +13325,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
+ 	struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ 	u64 ns;
+ 
++	if (!netif_running(bp->dev)) {
++		DP(BNX2X_MSG_PTP,
++		   "PTP settime called while the interface is down\n");
++		return -ENETDOWN;
++	}
++
+ 	ns = timespec64_to_ns(ts);
+ 
+ 	DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+index 06b8c0d8fd3b..996d2dc21bde 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+@@ -852,7 +852,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
+ 	struct bnx2x *bp = netdev_priv(dev);
+ 	struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
+ 	struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+-	int rc, i = 0;
++	int rc = 0, i = 0;
+ 	struct netdev_hw_addr *ha;
+ 
+ 	if (bp->state != BNX2X_STATE_OPEN) {
+@@ -867,6 +867,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
+ 	/* Get Rx mode requested */
+ 	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+ 
++	/* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
++	if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
++		DP(NETIF_MSG_IFUP,
++		   "VF supports not more than %d multicast MAC addresses\n",
++		   PFVF_MAX_MULTICAST_PER_VF);
++		rc = -EINVAL;
++		goto out;
++	}
++
+ 	netdev_for_each_mc_addr(ha, dev) {
+ 		DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
+ 		   bnx2x_mc_addr(ha));
+@@ -874,16 +883,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
+ 		i++;
+ 	}
+ 
+-	/* We support four PFVF_MAX_MULTICAST_PER_VF mcast
+-	  * addresses tops
+-	  */
+-	if (i >= PFVF_MAX_MULTICAST_PER_VF) {
+-		DP(NETIF_MSG_IFUP,
+-		   "VF supports not more than %d multicast MAC addresses\n",
+-		   PFVF_MAX_MULTICAST_PER_VF);
+-		return -EINVAL;
+-	}
+-
+ 	req->n_multicast = i;
+ 	req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
+ 	req->vf_qid = 0;
+@@ -908,7 +907,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
+ out:
+ 	bnx2x_vfpf_finalize(bp, &req->first_tlv);
+ 
+-	return 0;
++	return rc;
+ }
+ 
+ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index a9fcac044e9e..77fc04271244 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1,7 +1,7 @@
+ /*
+  * Broadcom GENET (Gigabit Ethernet) controller driver
+  *
+- * Copyright (c) 2014 Broadcom Corporation
++ * Copyright (c) 2014-2017 Broadcom
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -631,8 +631,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
+ 	STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+ 	/* Misc UniMAC counters */
+ 	STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
+-			UMAC_RBUF_OVFL_CNT),
+-	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
++			UMAC_RBUF_OVFL_CNT_V1),
++	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
++			UMAC_RBUF_ERR_CNT_V1),
+ 	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+ 	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+ 	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
+@@ -675,6 +676,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
+ 	}
+ }
+ 
++static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
++{
++	u16 new_offset;
++	u32 val;
++
++	switch (offset) {
++	case UMAC_RBUF_OVFL_CNT_V1:
++		if (GENET_IS_V2(priv))
++			new_offset = RBUF_OVFL_CNT_V2;
++		else
++			new_offset = RBUF_OVFL_CNT_V3PLUS;
++
++		val = bcmgenet_rbuf_readl(priv,	new_offset);
++		/* clear if overflowed */
++		if (val == ~0)
++			bcmgenet_rbuf_writel(priv, 0, new_offset);
++		break;
++	case UMAC_RBUF_ERR_CNT_V1:
++		if (GENET_IS_V2(priv))
++			new_offset = RBUF_ERR_CNT_V2;
++		else
++			new_offset = RBUF_ERR_CNT_V3PLUS;
++
++		val = bcmgenet_rbuf_readl(priv,	new_offset);
++		/* clear if overflowed */
++		if (val == ~0)
++			bcmgenet_rbuf_writel(priv, 0, new_offset);
++		break;
++	default:
++		val = bcmgenet_umac_readl(priv, offset);
++		/* clear if overflowed */
++		if (val == ~0)
++			bcmgenet_umac_writel(priv, 0, offset);
++		break;
++	}
++
++	return val;
++}
++
+ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
+ {
+ 	int i, j = 0;
+@@ -690,19 +730,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
+ 		case BCMGENET_STAT_NETDEV:
+ 		case BCMGENET_STAT_SOFT:
+ 			continue;
+-		case BCMGENET_STAT_MIB_RX:
+-		case BCMGENET_STAT_MIB_TX:
+ 		case BCMGENET_STAT_RUNT:
+-			if (s->type != BCMGENET_STAT_MIB_RX)
+-				offset = BCMGENET_STAT_OFFSET;
++			offset += BCMGENET_STAT_OFFSET;
++			/* fall through */
++		case BCMGENET_STAT_MIB_TX:
++			offset += BCMGENET_STAT_OFFSET;
++			/* fall through */
++		case BCMGENET_STAT_MIB_RX:
+ 			val = bcmgenet_umac_readl(priv,
+ 						  UMAC_MIB_START + j + offset);
++			offset = 0;	/* Reset Offset */
+ 			break;
+ 		case BCMGENET_STAT_MISC:
+-			val = bcmgenet_umac_readl(priv, s->reg_offset);
+-			/* clear if overflowed */
+-			if (val == ~0)
+-				bcmgenet_umac_writel(priv, 0, s->reg_offset);
++			if (GENET_IS_V1(priv)) {
++				val = bcmgenet_umac_readl(priv, s->reg_offset);
++				/* clear if overflowed */
++				if (val == ~0)
++					bcmgenet_umac_writel(priv, 0,
++							     s->reg_offset);
++			} else {
++				val = bcmgenet_update_stat_misc(priv,
++								s->reg_offset);
++			}
+ 			break;
+ 		}
+ 
+@@ -3018,6 +3067,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
+ 	 */
+ 	gphy_rev = reg & 0xffff;
+ 
++	/* This is reserved so should require special treatment */
++	if (gphy_rev == 0 || gphy_rev == 0x01ff) {
++		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
++		return;
++	}
++
+ 	/* This is the good old scheme, just GPHY major, no minor nor patch */
+ 	if ((gphy_rev & 0xf0) != 0)
+ 		priv->gphy_rev = gphy_rev << 8;
+@@ -3026,12 +3081,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
+ 	else if ((gphy_rev & 0xff00) != 0)
+ 		priv->gphy_rev = gphy_rev;
+ 
+-	/* This is reserved so should require special treatment */
+-	else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+-		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+-		return;
+-	}
+-
+ #ifdef CONFIG_PHYS_ADDR_T_64BIT
+ 	if (!(params->flags & GENET_HAS_40BITS))
+ 		pr_warn("GENET does not support 40-bits PA\n");
+@@ -3073,6 +3122,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
+ 	const void *macaddr;
+ 	struct resource *r;
+ 	int err = -EIO;
++	const char *phy_mode_str;
+ 
+ 	/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
+ 	dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
+@@ -3175,6 +3225,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
+ 		priv->clk_eee = NULL;
+ 	}
+ 
++	/* If this is an internal GPHY, power it on now, before UniMAC is
++	 * brought out of reset as absolutely no UniMAC activity is allowed
++	 */
++	if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
++	    !strcasecmp(phy_mode_str, "internal"))
++		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
++
+ 	err = reset_umac(priv);
+ 	if (err)
+ 		goto err_clk_disable;
+@@ -3234,7 +3291,8 @@ static int bcmgenet_suspend(struct device *d)
+ 
+ 	bcmgenet_netif_stop(dev);
+ 
+-	phy_suspend(priv->phydev);
++	if (!device_may_wakeup(d))
++		phy_suspend(priv->phydev);
+ 
+ 	netif_device_detach(dev);
+ 
+@@ -3331,7 +3389,8 @@ static int bcmgenet_resume(struct device *d)
+ 
+ 	netif_device_attach(dev);
+ 
+-	phy_resume(priv->phydev);
++	if (!device_may_wakeup(d))
++		phy_resume(priv->phydev);
+ 
+ 	if (priv->eee.eee_enabled)
+ 		bcmgenet_eee_enable_set(dev, true);
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+index 6f2887a5e0be..ae3979a4ca93 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -1,5 +1,5 @@
+ /*
+- * Copyright (c) 2014 Broadcom Corporation
++ * Copyright (c) 2014-2017 Broadcom
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
+ #define  MDIO_REG_SHIFT			16
+ #define  MDIO_REG_MASK			0x1F
+ 
+-#define UMAC_RBUF_OVFL_CNT		0x61C
++#define UMAC_RBUF_OVFL_CNT_V1		0x61C
++#define RBUF_OVFL_CNT_V2		0x80
++#define RBUF_OVFL_CNT_V3PLUS		0x94
+ 
+ #define UMAC_MPD_CTRL			0x620
+ #define  MPD_EN				(1 << 0)
+@@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
+ 
+ #define UMAC_MPD_PW_MS			0x624
+ #define UMAC_MPD_PW_LS			0x628
+-#define UMAC_RBUF_ERR_CNT		0x634
++#define UMAC_RBUF_ERR_CNT_V1		0x634
++#define RBUF_ERR_CNT_V2			0x84
++#define RBUF_ERR_CNT_V3PLUS		0x98
+ #define UMAC_MDF_ERR_CNT		0x638
+ #define UMAC_MDF_CTRL			0x650
+ #define UMAC_MDF_ADDR			0x654
+diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
+index ed41559bae77..b553409e04ad 100644
+--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
++++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
+@@ -98,8 +98,7 @@ static int csr0 = 0x01A00000 | 0x4800;
+ #elif defined(__mips__)
+ static int csr0 = 0x00200000 | 0x4000;
+ #else
+-#warning Processor architecture undefined!
+-static int csr0 = 0x00A00000 | 0x4800;
++static int csr0;
+ #endif
+ 
+ /* Operational parameters that usually are not changed. */
+@@ -1982,6 +1981,12 @@ static int __init tulip_init (void)
+ 	pr_info("%s", version);
+ #endif
+ 
++	if (!csr0) {
++		pr_warn("tulip: unknown CPU architecture, using default csr0\n");
++		/* default to 8 longword cache line alignment */
++		csr0 = 0x00A00000 | 0x4800;
++	}
++
+ 	/* copy module parms into globals */
+ 	tulip_rx_copybreak = rx_copybreak;
+ 	tulip_max_interrupt_work = max_interrupt_work;
+diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
+index 9beb3d34d4ba..3c0e4d5c5fef 100644
+--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
++++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
+@@ -907,7 +907,7 @@ static void init_registers(struct net_device *dev)
+ #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
+ 	i |= 0x4800;
+ #else
+-#warning Processor architecture undefined
++	dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
+ 	i |= 0x4800;
+ #endif
+ 	iowrite32(i, ioaddr + PCIBusCfg);
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 67aec18dd76c..fe5a7d207b1d 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -2850,6 +2850,7 @@ static void set_multicast_list(struct net_device *ndev)
+ 	struct netdev_hw_addr *ha;
+ 	unsigned int i, bit, data, crc, tmp;
+ 	unsigned char hash;
++	unsigned int hash_high = 0, hash_low = 0;
+ 
+ 	if (ndev->flags & IFF_PROMISC) {
+ 		tmp = readl(fep->hwp + FEC_R_CNTRL);
+@@ -2872,11 +2873,7 @@ static void set_multicast_list(struct net_device *ndev)
+ 		return;
+ 	}
+ 
+-	/* Clear filter and add the addresses in hash register
+-	 */
+-	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+-	writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+-
++	/* Add the addresses in hash register */
+ 	netdev_for_each_mc_addr(ha, ndev) {
+ 		/* calculate crc32 value of mac address */
+ 		crc = 0xffffffff;
+@@ -2894,16 +2891,14 @@ static void set_multicast_list(struct net_device *ndev)
+ 		 */
+ 		hash = (crc >> (32 - HASH_BITS)) & 0x3f;
+ 
+-		if (hash > 31) {
+-			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+-			tmp |= 1 << (hash - 32);
+-			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+-		} else {
+-			tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+-			tmp |= 1 << hash;
+-			writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+-		}
++		if (hash > 31)
++			hash_high |= 1 << (hash - 32);
++		else
++			hash_low |= 1 << hash;
+ 	}
++
++	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
++	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+ }
+ 
+ /* Set a MAC change in hardware. */
+diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
+index c1bb64d8366f..62e36649ea09 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
++++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
+@@ -83,6 +83,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
+ 	s32 ret_val = 0;
+ 	u16 phy_id;
+ 
++	/* ensure PHY page selection to fix misconfigured i210 */
++	if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
++		phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
++
+ 	ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+ 	if (ret_val)
+ 		goto out;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+index 3756e45d8cec..1ec0b405aa81 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+@@ -2262,6 +2262,17 @@ static int sync_toggles(struct mlx4_dev *dev)
+ 		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
+ 		if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
+ 			/* PCI might be offline */
++
++			/* If device removal has been requested,
++			 * do not continue retrying.
++			 */
++			if (dev->persist->interface_state &
++			    MLX4_INTERFACE_STATE_NOWAIT) {
++				mlx4_warn(dev,
++					  "communication channel is offline\n");
++				return -EIO;
++			}
++
+ 			msleep(100);
+ 			wr_toggle = swab32(readl(&priv->mfunc.comm->
+ 					   slave_write));
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index 115d301f1f61..1f931e6ae3cb 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -1717,6 +1717,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev)
+ 			       (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
+ 		if (!offline_bit)
+ 			return 0;
++
++		/* If device removal has been requested,
++		 * do not continue retrying.
++		 */
++		if (dev->persist->interface_state &
++		    MLX4_INTERFACE_STATE_NOWAIT)
++			break;
++
+ 		/* There are cases as part of AER/Reset flow that PF needs
+ 		 * around 100 msec to load. We therefore sleep for 100 msec
+ 		 * to allow other tasks to make use of that CPU during this
+@@ -3459,6 +3467,9 @@ static void mlx4_remove_one(struct pci_dev *pdev)
+ 	struct mlx4_priv *priv = mlx4_priv(dev);
+ 	int active_vfs = 0;
+ 
++	if (mlx4_is_slave(dev))
++		persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
++
+ 	mutex_lock(&persist->interface_state_mutex);
+ 	persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
+ 	mutex_unlock(&persist->interface_state_mutex);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index 28425e5ea91f..316fe86040d4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = {
+ 	[2] = {
+ 		.mask		= MLX5_PROF_MASK_QP_SIZE |
+ 				  MLX5_PROF_MASK_MR_CACHE,
+-		.log_max_qp	= 17,
++		.log_max_qp	= 18,
+ 		.mr_cache[0]	= {
+ 			.size	= 500,
+ 			.limit	= 250
+diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
+index dd9430043536..cf5ce371ec21 100644
+--- a/drivers/net/ethernet/ti/cpmac.c
++++ b/drivers/net/ethernet/ti/cpmac.c
+@@ -1235,7 +1235,7 @@ int cpmac_init(void)
+ 		goto fail_alloc;
+ 	}
+ 
+-#warning FIXME: unhardcode gpio&reset bits
++	/* FIXME: unhardcode gpio&reset bits */
+ 	ar7_gpio_disable(26);
+ 	ar7_gpio_disable(27);
+ 	ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
+diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
+index 4e3d2e7c697c..e8c3a8c32534 100644
+--- a/drivers/net/irda/w83977af_ir.c
++++ b/drivers/net/irda/w83977af_ir.c
+@@ -518,7 +518,9 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
+ 		
+ 		mtt = irda_get_mtt(skb);
+ 		pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
+-			if (mtt)
++			if (mtt > 1000)
++				mdelay(mtt/1000);
++			else if (mtt)
+ 				udelay(mtt);
+ 
+ 			/* Enable DMA interrupt */
+diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
+index 9f59f17dc317..e10cff854b24 100644
+--- a/drivers/net/macvlan.c
++++ b/drivers/net/macvlan.c
+@@ -440,7 +440,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
+ 					      struct macvlan_dev, list);
+ 	else
+ 		vlan = macvlan_hash_lookup(port, eth->h_dest);
+-	if (vlan == NULL)
++	if (!vlan || vlan->mode == MACVLAN_MODE_SOURCE)
+ 		return RX_HANDLER_PASS;
+ 
+ 	dev = vlan->dev;
+diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
+index 1b1698f98818..9c582da5a08e 100644
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -763,8 +763,6 @@ static int marvell_read_status(struct phy_device *phydev)
+ 		if (adv < 0)
+ 			return adv;
+ 
+-		lpa &= adv;
+-
+ 		if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
+ 			phydev->duplex = DUPLEX_FULL;
+ 		else
+diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
+index 46530159256b..196f04015e8a 100644
+--- a/drivers/net/phy/spi_ks8995.c
++++ b/drivers/net/phy/spi_ks8995.c
+@@ -332,6 +332,7 @@ static int ks8995_probe(struct spi_device *spi)
+ 	if (err)
+ 		return err;
+ 
++	sysfs_attr_init(&ks->regs_attr.attr);
+ 	err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
+ 	if (err) {
+ 		dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 51ba895f0522..c30c1fc7889a 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -918,6 +918,7 @@ static __net_exit void ppp_exit_net(struct net *net)
+ {
+ 	struct ppp_net *pn = net_generic(net, ppp_net_id);
+ 
++	mutex_destroy(&pn->all_ppp_mutex);
+ 	idr_destroy(&pn->units_idr);
+ }
+ 
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 422a9379a644..1ad3700ed9c7 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2432,7 +2432,7 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
+ 
+ 	if (data[IFLA_VXLAN_ID]) {
+ 		__u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
+-		if (id >= VXLAN_VID_MASK)
++		if (id >= VXLAN_N_VID)
+ 			return -ERANGE;
+ 	}
+ 
+diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
+index e7f5910a6519..f8eb66ef2944 100644
+--- a/drivers/net/wimax/i2400m/usb.c
++++ b/drivers/net/wimax/i2400m/usb.c
+@@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
+ 	struct i2400mu *i2400mu;
+ 	struct usb_device *usb_dev = interface_to_usbdev(iface);
+ 
++	if (iface->cur_altsetting->desc.bNumEndpoints < 4)
++		return -ENODEV;
++
+ 	if (usb_dev->speed != USB_SPEED_HIGH)
+ 		dev_err(dev, "device not connected as high speed\n");
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
+index b4e6304afd40..7ee1a3183a06 100644
+--- a/drivers/net/wireless/ath/ath9k/tx99.c
++++ b/drivers/net/wireless/ath/ath9k/tx99.c
+@@ -180,6 +180,9 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+ 	ssize_t len;
+ 	int r;
+ 
++	if (count < 1)
++		return -EINVAL;
++
+ 	if (sc->cur_chan->nvifs > 1)
+ 		return -EOPNOTSUPP;
+ 
+@@ -187,6 +190,8 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+ 	if (copy_from_user(buf, user_buf, len))
+ 		return -EFAULT;
+ 
++	buf[len] = '\0';
++
+ 	if (strtobool(buf, &start))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index 03097016fd43..aafb97ce080d 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -2884,6 +2884,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ {
+ 	struct hwsim_new_radio_params param = { 0 };
+ 	const char *hwname = NULL;
++	int ret;
+ 
+ 	param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+ 	param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
+@@ -2923,7 +2924,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ 		param.regd = hwsim_world_regdom_custom[idx];
+ 	}
+ 
+-	return mac80211_hwsim_new_radio(info, &param);
++	ret = mac80211_hwsim_new_radio(info, &param);
++	kfree(hwname);
++	return ret;
+ }
+ 
+ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
+diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
+index ee0ebff103a4..089a1f41e44e 100644
+--- a/drivers/pci/iov.c
++++ b/drivers/pci/iov.c
+@@ -302,13 +302,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
+ 			return rc;
+ 	}
+ 
+-	pci_iov_set_numvfs(dev, nr_virtfn);
+-	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+-	pci_cfg_access_lock(dev);
+-	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+-	msleep(100);
+-	pci_cfg_access_unlock(dev);
+-
+ 	iov->initial_VFs = initial;
+ 	if (nr_virtfn < initial)
+ 		initial = nr_virtfn;
+@@ -319,6 +312,13 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
+ 		return retval;
+ 	}
+ 
++	pci_iov_set_numvfs(dev, nr_virtfn);
++	iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
++	pci_cfg_access_lock(dev);
++	pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
++	msleep(100);
++	pci_cfg_access_unlock(dev);
++
+ 	for (i = 0; i < initial; i++) {
+ 		rc = virtfn_add(dev, i, 0);
+ 		if (rc)
+@@ -552,21 +552,61 @@ void pci_iov_release(struct pci_dev *dev)
+ }
+ 
+ /**
+- * pci_iov_resource_bar - get position of the SR-IOV BAR
++ * pci_iov_update_resource - update a VF BAR
+  * @dev: the PCI device
+  * @resno: the resource number
+  *
+- * Returns position of the BAR encapsulated in the SR-IOV capability.
++ * Update a VF BAR in the SR-IOV capability of a PF.
+  */
+-int pci_iov_resource_bar(struct pci_dev *dev, int resno)
++void pci_iov_update_resource(struct pci_dev *dev, int resno)
+ {
+-	if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
+-		return 0;
++	struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
++	struct resource *res = dev->resource + resno;
++	int vf_bar = resno - PCI_IOV_RESOURCES;
++	struct pci_bus_region region;
++	u16 cmd;
++	u32 new;
++	int reg;
++
++	/*
++	 * The generic pci_restore_bars() path calls this for all devices,
++	 * including VFs and non-SR-IOV devices.  If this is not a PF, we
++	 * have nothing to do.
++	 */
++	if (!iov)
++		return;
++
++	pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
++	if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
++		dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
++			 vf_bar, res);
++		return;
++	}
++
++	/*
++	 * Ignore unimplemented BARs, unused resource slots for 64-bit
++	 * BARs, and non-movable resources, e.g., those described via
++	 * Enhanced Allocation.
++	 */
++	if (!res->flags)
++		return;
++
++	if (res->flags & IORESOURCE_UNSET)
++		return;
++
++	if (res->flags & IORESOURCE_PCI_FIXED)
++		return;
+ 
+-	BUG_ON(!dev->is_physfn);
++	pcibios_resource_to_bus(dev->bus, &region, res);
++	new = region.start;
++	new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+ 
+-	return dev->sriov->pos + PCI_SRIOV_BAR +
+-		4 * (resno - PCI_IOV_RESOURCES);
++	reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
++	pci_write_config_dword(dev, reg, new);
++	if (res->flags & IORESOURCE_MEM_64) {
++		new = region.start >> 16 >> 16;
++		pci_write_config_dword(dev, reg + 4, new);
++	}
+ }
+ 
+ resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index b5b80a5560ed..409f895b5a3d 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4270,36 +4270,6 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags)
+ }
+ EXPORT_SYMBOL(pci_select_bars);
+ 
+-/**
+- * pci_resource_bar - get position of the BAR associated with a resource
+- * @dev: the PCI device
+- * @resno: the resource number
+- * @type: the BAR type to be filled in
+- *
+- * Returns BAR position in config space, or 0 if the BAR is invalid.
+- */
+-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
+-{
+-	int reg;
+-
+-	if (resno < PCI_ROM_RESOURCE) {
+-		*type = pci_bar_unknown;
+-		return PCI_BASE_ADDRESS_0 + 4 * resno;
+-	} else if (resno == PCI_ROM_RESOURCE) {
+-		*type = pci_bar_mem32;
+-		return dev->rom_base_reg;
+-	} else if (resno < PCI_BRIDGE_RESOURCES) {
+-		/* device specific resource */
+-		*type = pci_bar_unknown;
+-		reg = pci_iov_resource_bar(dev, resno);
+-		if (reg)
+-			return reg;
+-	}
+-
+-	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
+-	return 0;
+-}
+-
+ /* Some architectures require additional programming to enable VGA */
+ static arch_set_vga_state_t arch_set_vga_state;
+ 
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 0c039200ed79..9dac40998336 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -208,7 +208,6 @@ bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *pl,
+ int pci_setup_device(struct pci_dev *dev);
+ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 		    struct resource *res, unsigned int reg);
+-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
+ void pci_configure_ari(struct pci_dev *dev);
+ void __pci_bus_size_bridges(struct pci_bus *bus,
+ 			struct list_head *realloc_head);
+@@ -263,7 +262,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
+ #ifdef CONFIG_PCI_IOV
+ int pci_iov_init(struct pci_dev *dev);
+ void pci_iov_release(struct pci_dev *dev);
+-int pci_iov_resource_bar(struct pci_dev *dev, int resno);
++void pci_iov_update_resource(struct pci_dev *dev, int resno);
+ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
+ void pci_restore_iov_state(struct pci_dev *dev);
+ int pci_iov_bus_range(struct pci_bus *bus);
+@@ -277,10 +276,6 @@ static inline void pci_iov_release(struct pci_dev *dev)
+ 
+ {
+ }
+-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+-{
+-	return 0;
+-}
+ static inline void pci_restore_iov_state(struct pci_dev *dev)
+ {
+ }
+diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
+index 63fc63911295..deb903112974 100644
+--- a/drivers/pci/pcie/pme.c
++++ b/drivers/pci/pcie/pme.c
+@@ -233,6 +233,9 @@ static void pcie_pme_work_fn(struct work_struct *work)
+ 			break;
+ 
+ 		pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
++		if (rtsta == (u32) ~0)
++			break;
++
+ 		if (rtsta & PCI_EXP_RTSTA_PME) {
+ 			/*
+ 			 * Clear PME status of the port.  If there are other
+@@ -280,7 +283,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
+ 	spin_lock_irqsave(&data->lock, flags);
+ 	pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
+ 
+-	if (!(rtsta & PCI_EXP_RTSTA_PME)) {
++	if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
+ 		spin_unlock_irqrestore(&data->lock, flags);
+ 		return IRQ_NONE;
+ 	}
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 5754d7b48c1f..c7dc06636bf6 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -223,7 +223,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 			mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ 		}
+ 	} else {
+-		res->flags |= (l & IORESOURCE_ROM_ENABLE);
++		if (l & PCI_ROM_ADDRESS_ENABLE)
++			res->flags |= IORESOURCE_ROM_ENABLE;
+ 		l64 = l & PCI_ROM_ADDRESS_MASK;
+ 		sz64 = sz & PCI_ROM_ADDRESS_MASK;
+ 		mask64 = (u32)PCI_ROM_ADDRESS_MASK;
+diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
+index 8a280e9c2ad1..7e67af2bb366 100644
+--- a/drivers/pci/remove.c
++++ b/drivers/pci/remove.c
+@@ -20,9 +20,9 @@ static void pci_stop_dev(struct pci_dev *dev)
+ 	pci_pme_active(dev, false);
+ 
+ 	if (dev->is_added) {
++		device_release_driver(&dev->dev);
+ 		pci_proc_detach_device(dev);
+ 		pci_remove_sysfs_dev_files(dev);
+-		device_release_driver(&dev->dev);
+ 		dev->is_added = 0;
+ 	}
+ 
+diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
+index eb0ad530dc43..3eea7fc5e1a2 100644
+--- a/drivers/pci/rom.c
++++ b/drivers/pci/rom.c
+@@ -31,6 +31,11 @@ int pci_enable_rom(struct pci_dev *pdev)
+ 	if (!res->flags)
+ 		return -1;
+ 
++	/*
++	 * Ideally pci_update_resource() would update the ROM BAR address,
++	 * and we would only set the enable bit here.  But apparently some
++	 * devices have buggy ROM BARs that read as zero when disabled.
++	 */
+ 	pcibios_resource_to_bus(pdev->bus, &region, res);
+ 	pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
+ 	rom_addr &= ~PCI_ROM_ADDRESS_MASK;
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index 232f9254c11a..00f32ff6f74e 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -25,15 +25,13 @@
+ #include <linux/slab.h>
+ #include "pci.h"
+ 
+-
+-void pci_update_resource(struct pci_dev *dev, int resno)
++static void pci_std_update_resource(struct pci_dev *dev, int resno)
+ {
+ 	struct pci_bus_region region;
+ 	bool disable;
+ 	u16 cmd;
+ 	u32 new, check, mask;
+ 	int reg;
+-	enum pci_bar_type type;
+ 	struct resource *res = dev->resource + resno;
+ 
+ 	/*
+@@ -55,21 +53,34 @@ void pci_update_resource(struct pci_dev *dev, int resno)
+ 		return;
+ 
+ 	pcibios_resource_to_bus(dev->bus, &region, res);
++	new = region.start;
+ 
+-	new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
+-	if (res->flags & IORESOURCE_IO)
++	if (res->flags & IORESOURCE_IO) {
+ 		mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
+-	else
++		new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
++	} else if (resno == PCI_ROM_RESOURCE) {
++		mask = (u32)PCI_ROM_ADDRESS_MASK;
++	} else {
+ 		mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
++		new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
++	}
+ 
+-	reg = pci_resource_bar(dev, resno, &type);
+-	if (!reg)
+-		return;
+-	if (type != pci_bar_unknown) {
++	if (resno < PCI_ROM_RESOURCE) {
++		reg = PCI_BASE_ADDRESS_0 + 4 * resno;
++	} else if (resno == PCI_ROM_RESOURCE) {
++
++		/*
++		 * Apparently some Matrox devices have ROM BARs that read
++		 * as zero when disabled, so don't update ROM BARs unless
++		 * they're enabled.  See https://lkml.org/lkml/2005/8/30/138.
++		 */
+ 		if (!(res->flags & IORESOURCE_ROM_ENABLE))
+ 			return;
++
++		reg = dev->rom_base_reg;
+ 		new |= PCI_ROM_ADDRESS_ENABLE;
+-	}
++	} else
++		return;
+ 
+ 	/*
+ 	 * We can't update a 64-bit BAR atomically, so when possible,
+@@ -105,6 +116,16 @@ void pci_update_resource(struct pci_dev *dev, int resno)
+ 		pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ 
++void pci_update_resource(struct pci_dev *dev, int resno)
++{
++	if (resno <= PCI_ROM_RESOURCE)
++		pci_std_update_resource(dev, resno);
++#ifdef CONFIG_PCI_IOV
++	else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
++		pci_iov_update_resource(dev, resno);
++#endif
++}
++
+ int pci_claim_resource(struct pci_dev *dev, int resource)
+ {
+ 	struct resource *res = &dev->resource[resource];
+diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
+index aeb5729fbda6..029ff74ec53c 100644
+--- a/drivers/pinctrl/Kconfig
++++ b/drivers/pinctrl/Kconfig
+@@ -28,7 +28,8 @@ config DEBUG_PINCTRL
+ 
+ config PINCTRL_ADI2
+ 	bool "ADI pin controller driver"
+-	depends on BLACKFIN
++	depends on (BF54x || BF60x)
++	depends on !GPIO_ADI
+ 	select PINMUX
+ 	select IRQ_DOMAIN
+ 	help
+diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
+index 166fc60d8b55..faa81ac2d481 100644
+--- a/drivers/rtc/interface.c
++++ b/drivers/rtc/interface.c
+@@ -793,9 +793,23 @@ EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
+  */
+ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+ {
++	struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
++	struct rtc_time tm;
++	ktime_t now;
++
+ 	timer->enabled = 1;
++	__rtc_read_time(rtc, &tm);
++	now = rtc_tm_to_ktime(tm);
++
++	/* Skip over expired timers */
++	while (next) {
++		if (next->expires.tv64 >= now.tv64)
++			break;
++		next = timerqueue_iterate_next(next);
++	}
++
+ 	timerqueue_add(&rtc->timerqueue, &timer->node);
+-	if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
++	if (!next) {
+ 		struct rtc_wkalrm alarm;
+ 		int err;
+ 		alarm.time = rtc_ktime_to_tm(timer->node.expires);
+diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
+index f40afdd0e5f5..d5b572266a72 100644
+--- a/drivers/rtc/rtc-s35390a.c
++++ b/drivers/rtc/rtc-s35390a.c
+@@ -15,6 +15,7 @@
+ #include <linux/bitrev.h>
+ #include <linux/bcd.h>
+ #include <linux/slab.h>
++#include <linux/delay.h>
+ 
+ #define S35390A_CMD_STATUS1	0
+ #define S35390A_CMD_STATUS2	1
+@@ -34,10 +35,14 @@
+ #define S35390A_ALRM_BYTE_HOURS	1
+ #define S35390A_ALRM_BYTE_MINS	2
+ 
++/* flags for STATUS1 */
+ #define S35390A_FLAG_POC	0x01
+ #define S35390A_FLAG_BLD	0x02
++#define S35390A_FLAG_INT2	0x04
+ #define S35390A_FLAG_24H	0x40
+ #define S35390A_FLAG_RESET	0x80
++
++/* flag for STATUS2 */
+ #define S35390A_FLAG_TEST	0x01
+ 
+ #define S35390A_INT2_MODE_MASK		0xF0
+@@ -94,19 +99,63 @@ static int s35390a_get_reg(struct s35390a *s35390a, int reg, char *buf, int len)
+ 	return 0;
+ }
+ 
+-static int s35390a_reset(struct s35390a *s35390a)
++/*
++ * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset.
++ * To keep the information if an irq is pending, pass the value read from
++ * STATUS1 to the caller.
++ */
++static int s35390a_reset(struct s35390a *s35390a, char *status1)
+ {
+-	char buf[1];
+-
+-	if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0)
+-		return -EIO;
+-
+-	if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD)))
++	char buf;
++	int ret;
++	unsigned initcount = 0;
++
++	ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1);
++	if (ret < 0)
++		return ret;
++
++	if (*status1 & S35390A_FLAG_POC)
++		/*
++		 * Do not communicate for 0.5 seconds since the power-on
++		 * detection circuit is in operation.
++		 */
++		msleep(500);
++	else if (!(*status1 & S35390A_FLAG_BLD))
++		/*
++		 * If both POC and BLD are unset everything is fine.
++		 */
+ 		return 0;
+ 
+-	buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H);
+-	buf[0] &= 0xf0;
+-	return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
++	/*
++	 * At least one of POC and BLD are set, so reinitialise chip. Keeping
++	 * this information in the hardware to know later that the time isn't
++	 * valid is unfortunately not possible because POC and BLD are cleared
++	 * on read. So the reset is best done now.
++	 *
++	 * The 24H bit is kept over reset, so set it already here.
++	 */
++initialize:
++	*status1 = S35390A_FLAG_24H;
++	buf = S35390A_FLAG_RESET | S35390A_FLAG_24H;
++	ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
++
++	if (ret < 0)
++		return ret;
++
++	ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
++	if (ret < 0)
++		return ret;
++
++	if (buf & (S35390A_FLAG_POC | S35390A_FLAG_BLD)) {
++		/* Try up to five times to reset the chip */
++		if (initcount < 5) {
++			++initcount;
++			goto initialize;
++		} else
++			return -EIO;
++	}
++
++	return 1;
+ }
+ 
+ static int s35390a_disable_test_mode(struct s35390a *s35390a)
+@@ -242,6 +291,8 @@ static int s35390a_set_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
+ 
+ 	if (alm->time.tm_wday != -1)
+ 		buf[S35390A_ALRM_BYTE_WDAY] = bin2bcd(alm->time.tm_wday) | 0x80;
++	else
++		buf[S35390A_ALRM_BYTE_WDAY] = 0;
+ 
+ 	buf[S35390A_ALRM_BYTE_HOURS] = s35390a_hr2reg(s35390a,
+ 			alm->time.tm_hour) | 0x80;
+@@ -269,23 +320,43 @@ static int s35390a_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alm)
+ 	if (err < 0)
+ 		return err;
+ 
+-	if (bitrev8(sts) != S35390A_INT2_MODE_ALARM)
+-		return -EINVAL;
++	if ((bitrev8(sts) & S35390A_INT2_MODE_MASK) != S35390A_INT2_MODE_ALARM) {
++		/*
++		 * When the alarm isn't enabled, the register to configure
++		 * the alarm time isn't accessible.
++		 */
++		alm->enabled = 0;
++		return 0;
++	} else {
++		alm->enabled = 1;
++	}
+ 
+ 	err = s35390a_get_reg(s35390a, S35390A_CMD_INT2_REG1, buf, sizeof(buf));
+ 	if (err < 0)
+ 		return err;
+ 
+ 	/* This chip returns the bits of each byte in reverse order */
+-	for (i = 0; i < 3; ++i) {
++	for (i = 0; i < 3; ++i)
+ 		buf[i] = bitrev8(buf[i]);
+-		buf[i] &= ~0x80;
+-	}
+ 
+-	alm->time.tm_wday = bcd2bin(buf[S35390A_ALRM_BYTE_WDAY]);
+-	alm->time.tm_hour = s35390a_reg2hr(s35390a,
+-						buf[S35390A_ALRM_BYTE_HOURS]);
+-	alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS]);
++	/*
++	 * B0 of the three matching registers is an enable flag. Iff it is set
++	 * the configured value is used for matching.
++	 */
++	if (buf[S35390A_ALRM_BYTE_WDAY] & 0x80)
++		alm->time.tm_wday =
++			bcd2bin(buf[S35390A_ALRM_BYTE_WDAY] & ~0x80);
++
++	if (buf[S35390A_ALRM_BYTE_HOURS] & 0x80)
++		alm->time.tm_hour =
++			s35390a_reg2hr(s35390a,
++				       buf[S35390A_ALRM_BYTE_HOURS] & ~0x80);
++
++	if (buf[S35390A_ALRM_BYTE_MINS] & 0x80)
++		alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS] & ~0x80);
++
++	/* alarm triggers always at s=0 */
++	alm->time.tm_sec = 0;
+ 
+ 	dev_dbg(&client->dev, "%s: alm is mins=%d, hours=%d, wday=%d\n",
+ 			__func__, alm->time.tm_min, alm->time.tm_hour,
+@@ -327,11 +398,11 @@ static struct i2c_driver s35390a_driver;
+ static int s35390a_probe(struct i2c_client *client,
+ 			 const struct i2c_device_id *id)
+ {
+-	int err;
++	int err, err_reset;
+ 	unsigned int i;
+ 	struct s35390a *s35390a;
+ 	struct rtc_time tm;
+-	char buf[1];
++	char buf, status1;
+ 
+ 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ 		err = -ENODEV;
+@@ -360,29 +431,35 @@ static int s35390a_probe(struct i2c_client *client,
+ 		}
+ 	}
+ 
+-	err = s35390a_reset(s35390a);
+-	if (err < 0) {
++	err_reset = s35390a_reset(s35390a, &status1);
++	if (err_reset < 0) {
++		err = err_reset;
+ 		dev_err(&client->dev, "error resetting chip\n");
+ 		goto exit_dummy;
+ 	}
+ 
+-	err = s35390a_disable_test_mode(s35390a);
+-	if (err < 0) {
+-		dev_err(&client->dev, "error disabling test mode\n");
+-		goto exit_dummy;
+-	}
+-
+-	err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
+-	if (err < 0) {
+-		dev_err(&client->dev, "error checking 12/24 hour mode\n");
+-		goto exit_dummy;
+-	}
+-	if (buf[0] & S35390A_FLAG_24H)
++	if (status1 & S35390A_FLAG_24H)
+ 		s35390a->twentyfourhour = 1;
+ 	else
+ 		s35390a->twentyfourhour = 0;
+ 
+-	if (s35390a_get_datetime(client, &tm) < 0)
++	if (status1 & S35390A_FLAG_INT2) {
++		/* disable alarm (and maybe test mode) */
++		buf = 0;
++		err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1);
++		if (err < 0) {
++			dev_err(&client->dev, "error disabling alarm");
++			goto exit_dummy;
++		}
++	} else {
++		err = s35390a_disable_test_mode(s35390a);
++		if (err < 0) {
++			dev_err(&client->dev, "error disabling test mode\n");
++			goto exit_dummy;
++		}
++	}
++
++	if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0)
+ 		dev_warn(&client->dev, "clock needs to be set\n");
+ 
+ 	device_set_wakeup_capable(&client->dev, 1);
+@@ -395,6 +472,10 @@ static int s35390a_probe(struct i2c_client *client,
+ 		err = PTR_ERR(s35390a->rtc);
+ 		goto exit_dummy;
+ 	}
++
++	if (status1 & S35390A_FLAG_INT2)
++		rtc_update_irq(s35390a->rtc, 1, RTC_AF);
++
+ 	return 0;
+ 
+ exit_dummy:
+diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
+index 74a307c0a240..8f1c58d4d5b5 100644
+--- a/drivers/scsi/bfa/bfad_debugfs.c
++++ b/drivers/scsi/bfa/bfad_debugfs.c
+@@ -254,7 +254,8 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ 	struct bfad_s *bfad = port->bfad;
+ 	struct bfa_s *bfa = &bfad->bfa;
+ 	struct bfa_ioc_s *ioc = &bfa->ioc;
+-	int addr, len, rc, i;
++	int addr, rc, i;
++	u32 len;
+ 	u32 *regbuf;
+ 	void __iomem *rb, *reg_addr;
+ 	unsigned long flags;
+@@ -265,7 +266,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ 		return PTR_ERR(kern_buf);
+ 
+ 	rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+-	if (rc < 2) {
++	if (rc < 2 || len > (UINT_MAX >> 2)) {
+ 		printk(KERN_INFO
+ 			"bfad[%d]: %s failed to read user buf\n",
+ 			bfad->inst_no, __func__);
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 8eab107b53fb..0ef5d9286e0a 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -4158,7 +4158,7 @@ static void hpsa_scan_complete(struct ctlr_info *h)
+ 
+ 	spin_lock_irqsave(&h->scan_lock, flags);
+ 	h->scan_finished = 1;
+-	wake_up_all(&h->scan_wait_queue);
++	wake_up(&h->scan_wait_queue);
+ 	spin_unlock_irqrestore(&h->scan_lock, flags);
+ }
+ 
+@@ -4176,11 +4176,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
+ 	if (unlikely(lockup_detected(h)))
+ 		return hpsa_scan_complete(h);
+ 
++	/*
++	 * If a scan is already waiting to run, no need to add another
++	 */
++	spin_lock_irqsave(&h->scan_lock, flags);
++	if (h->scan_waiting) {
++		spin_unlock_irqrestore(&h->scan_lock, flags);
++		return;
++	}
++
++	spin_unlock_irqrestore(&h->scan_lock, flags);
++
+ 	/* wait until any scan already in progress is finished. */
+ 	while (1) {
+ 		spin_lock_irqsave(&h->scan_lock, flags);
+ 		if (h->scan_finished)
+ 			break;
++		h->scan_waiting = 1;
+ 		spin_unlock_irqrestore(&h->scan_lock, flags);
+ 		wait_event(h->scan_wait_queue, h->scan_finished);
+ 		/* Note: We don't need to worry about a race between this
+@@ -4190,6 +4202,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh)
+ 		 */
+ 	}
+ 	h->scan_finished = 0; /* mark scan as in progress */
++	h->scan_waiting = 0;
+ 	spin_unlock_irqrestore(&h->scan_lock, flags);
+ 
+ 	if (unlikely(lockup_detected(h)))
+@@ -6936,6 +6949,7 @@ reinit_after_soft_reset:
+ 		goto clean4;
+ 	init_waitqueue_head(&h->scan_wait_queue);
+ 	h->scan_finished = 1; /* no scan currently in progress */
++	h->scan_waiting = 0;
+ 
+ 	pci_set_drvdata(pdev, h);
+ 	h->ndevices = 0;
+diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
+index 657713050349..cbaf416d5456 100644
+--- a/drivers/scsi/hpsa.h
++++ b/drivers/scsi/hpsa.h
+@@ -166,6 +166,7 @@ struct ctlr_info {
+ 	dma_addr_t		errinfo_pool_dhandle;
+ 	unsigned long  		*cmd_pool_bits;
+ 	int			scan_finished;
++	u8			scan_waiting : 1;
+ 	spinlock_t		scan_lock;
+ 	wait_queue_head_t	scan_wait_queue;
+ 
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 4da8963315c7..30f2fe9ba766 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -7286,11 +7286,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+ 			spin_lock_irq(shost->host_lock);
+ 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ 			spin_unlock_irq(shost->host_lock);
+-			if (vport->port_type == LPFC_PHYSICAL_PORT
+-				&& !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
+-				lpfc_issue_init_vfi(vport);
+-			else
++			if (mb->mbxStatus == MBX_NOT_FINISHED)
++				break;
++			if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
++			    !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
++				if (phba->sli_rev == LPFC_SLI_REV4)
++					lpfc_issue_init_vfi(vport);
++				else
++					lpfc_initial_flogi(vport);
++			} else {
+ 				lpfc_initial_fdisc(vport);
++			}
+ 			break;
+ 		}
+ 	} else {
+diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
+index 1813c45946f4..3757a7399983 100644
+--- a/drivers/scsi/lpfc/lpfc_hw4.h
++++ b/drivers/scsi/lpfc/lpfc_hw4.h
+@@ -1180,6 +1180,7 @@ struct lpfc_mbx_wq_create {
+ #define lpfc_mbx_wq_create_page_size_SHIFT	0
+ #define lpfc_mbx_wq_create_page_size_MASK	0x000000FF
+ #define lpfc_mbx_wq_create_page_size_WORD	word1
++#define LPFC_WQ_PAGE_SIZE_4096	0x1
+ #define lpfc_mbx_wq_create_wqe_size_SHIFT	8
+ #define lpfc_mbx_wq_create_wqe_size_MASK	0x0000000F
+ #define lpfc_mbx_wq_create_wqe_size_WORD	word1
+@@ -1251,6 +1252,7 @@ struct rq_context {
+ #define lpfc_rq_context_page_size_SHIFT	0		/* Version 1 Only */
+ #define lpfc_rq_context_page_size_MASK	0x000000FF
+ #define lpfc_rq_context_page_size_WORD	word0
++#define	LPFC_RQ_PAGE_SIZE_4096	0x1
+ 	uint32_t reserved1;
+ 	uint32_t word2;
+ #define lpfc_rq_context_cq_id_SHIFT	16
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 7ca27e5ef079..625e3ee877ee 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -13509,7 +13509,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ 			       LPFC_WQ_WQE_SIZE_128);
+ 			bf_set(lpfc_mbx_wq_create_page_size,
+ 			       &wq_create->u.request_1,
+-			       (PAGE_SIZE/SLI4_PAGE_SIZE));
++			       LPFC_WQ_PAGE_SIZE_4096);
+ 			page = wq_create->u.request_1.page;
+ 			break;
+ 		}
+@@ -13535,8 +13535,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+ 			       LPFC_WQ_WQE_SIZE_128);
+ 			break;
+ 		}
+-		bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
+-		       (PAGE_SIZE/SLI4_PAGE_SIZE));
++		bf_set(lpfc_mbx_wq_create_page_size,
++		       &wq_create->u.request_1,
++		       LPFC_WQ_PAGE_SIZE_4096);
+ 		page = wq_create->u.request_1.page;
+ 		break;
+ 	default:
+@@ -13722,7 +13723,7 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+ 		       LPFC_RQE_SIZE_8);
+ 		bf_set(lpfc_rq_context_page_size,
+ 		       &rq_create->u.request.context,
+-		       (PAGE_SIZE/SLI4_PAGE_SIZE));
++		       LPFC_RQ_PAGE_SIZE_4096);
+ 	} else {
+ 		switch (hrq->entry_count) {
+ 		default:
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 9c780740fb82..e712fe745955 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -737,8 +737,8 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
+ 			mv_dprintk("device %016llx not ready.\n",
+ 				SAS_ADDR(dev->sas_addr));
+ 
+-			rc = SAS_PHY_DOWN;
+-			return rc;
++		rc = SAS_PHY_DOWN;
++		return rc;
+ 	}
+ 	tei.port = dev->port->lldd_port;
+ 	if (tei.port && !tei.port->port_attached && !tmf) {
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 42d3f82e75c7..55df57341858 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -160,7 +160,7 @@ static struct {
+ 	{"DGC", "RAID", NULL, BLIST_SPARSELUN},	/* Dell PV 650F, storage on LUN 0 */
+ 	{"DGC", "DISK", NULL, BLIST_SPARSELUN},	/* Dell PV 650F, no storage on LUN 0 */
+ 	{"EMC",  "Invista", "*", BLIST_SPARSELUN | BLIST_LARGELUN},
+-	{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_FORCELUN},
++	{"EMC", "SYMMETRIX", NULL, BLIST_SPARSELUN | BLIST_LARGELUN | BLIST_REPORTLUN2},
+ 	{"EMULEX", "MD21/S2     ESDI", NULL, BLIST_SINGLELUN},
+ 	{"easyRAID", "16P", NULL, BLIST_NOREPORTLUN},
+ 	{"easyRAID", "X6P", NULL, BLIST_NOREPORTLUN},
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 17fbf1d3eadc..8ef1d5e6619a 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -1119,7 +1119,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
+ 	bool is_mq = (rq->mq_ctx != NULL);
+ 	int error;
+ 
+-	BUG_ON(!rq->nr_phys_segments);
++	if (WARN_ON_ONCE(!rq->nr_phys_segments))
++		return -EINVAL;
+ 
+ 	error = scsi_init_sgtable(rq, &cmd->sdb);
+ 	if (error)
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 80cebe691fee..a678dd10905f 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1383,11 +1383,15 @@ static int media_not_present(struct scsi_disk *sdkp,
+  **/
+ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
+ {
+-	struct scsi_disk *sdkp = scsi_disk(disk);
+-	struct scsi_device *sdp = sdkp->device;
++	struct scsi_disk *sdkp = scsi_disk_get(disk);
++	struct scsi_device *sdp;
+ 	struct scsi_sense_hdr *sshdr = NULL;
+ 	int retval;
+ 
++	if (!sdkp)
++		return 0;
++
++	sdp = sdkp->device;
+ 	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
+ 
+ 	/*
+@@ -1444,6 +1448,7 @@ out:
+ 	kfree(sshdr);
+ 	retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
+ 	sdp->changed = 0;
++	scsi_disk_put(sdkp);
+ 	return retval;
+ }
+ 
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 51a0cc047b5f..98b56a7069d3 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -325,8 +325,6 @@ MODULE_PARM_DESC(vcpus_per_sub_channel, "Ratio of VCPUs to subchannels");
+  */
+ static int storvsc_timeout = 180;
+ 
+-static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
+-
+ 
+ static void storvsc_on_channel_callback(void *context);
+ 
+@@ -584,17 +582,18 @@ static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
+ 	for (i = 0; i < sg_count; i++) {
+ 		if (i == 0) {
+ 			/* make sure 1st one does not have hole */
+-			if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
++			if (sgl->offset + sgl->length != PAGE_SIZE)
+ 				return i;
+ 		} else if (i == sg_count - 1) {
+ 			/* make sure last one does not have hole */
+-			if (sgl[i].offset != 0)
++			if (sgl->offset != 0)
+ 				return i;
+ 		} else {
+ 			/* make sure no hole in the middle */
+-			if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
++			if (sgl->length != PAGE_SIZE || sgl->offset != 0)
+ 				return i;
+ 		}
++		sgl = sg_next(sgl);
+ 	}
+ 	return -1;
+ }
+@@ -1416,6 +1415,22 @@ static int storvsc_do_io(struct hv_device *device,
+ 	return ret;
+ }
+ 
++static int storvsc_device_alloc(struct scsi_device *sdevice)
++{
++	/*
++	 * Set blist flag to permit the reading of the VPD pages even when
++	 * the target may claim SPC-2 compliance. MSFT targets currently
++	 * claim SPC-2 compliance while they implement post SPC-2 features.
++	 * With this flag we can correctly handle WRITE_SAME_16 issues.
++	 *
++	 * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
++	 * still supports REPORT LUN.
++	 */
++	sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
++
++	return 0;
++}
++
+ static int storvsc_device_configure(struct scsi_device *sdevice)
+ {
+ 
+@@ -1427,14 +1442,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
+ 
+ 	sdevice->no_write_same = 1;
+ 
+-	/*
+-	 * Add blist flags to permit the reading of the VPD pages even when
+-	 * the target may claim SPC-2 compliance. MSFT targets currently
+-	 * claim SPC-2 compliance while they implement post SPC-2 features.
+-	 * With this patch we can correctly handle WRITE_SAME_16 issues.
+-	 */
+-	sdevice->sdev_bflags |= msft_blist_flags;
+-
+ 	/*
+ 	 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
+ 	 * if the device is a MSFT virtual device.
+@@ -1722,6 +1729,7 @@ static struct scsi_host_template scsi_driver = {
+ 	.eh_host_reset_handler =	storvsc_host_reset_handler,
+ 	.proc_name =		"storvsc_host",
+ 	.eh_timed_out =		storvsc_eh_timed_out,
++	.slave_alloc =		storvsc_device_alloc,
+ 	.slave_configure =	storvsc_device_configure,
+ 	.cmd_per_lun =		255,
+ 	.this_id =		-1,
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index 7872f3c78b51..dc0ffd3dd96a 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -860,7 +860,7 @@ static int sh_msiof_transfer_one(struct spi_master *master,
+ 				break;
+ 			copy32 = copy_bswap32;
+ 		} else if (bits <= 16) {
+-			if (l & 1)
++			if (l & 3)
+ 				break;
+ 			copy32 = copy_wswap32;
+ 		} else {
+diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
+index a90214bb84dd..7511b4dfdfe5 100644
+--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
++++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
+@@ -404,15 +404,13 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ 		result = VM_FAULT_LOCKED;
+ 		break;
+ 	case -ENODATA:
++	case -EAGAIN:
+ 	case -EFAULT:
+ 		result = VM_FAULT_NOPAGE;
+ 		break;
+ 	case -ENOMEM:
+ 		result = VM_FAULT_OOM;
+ 		break;
+-	case -EAGAIN:
+-		result = VM_FAULT_RETRY;
+-		break;
+ 	default:
+ 		result = VM_FAULT_SIGBUS;
+ 		break;
+diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
+index 376e4a0c15c6..8f1844f0547e 100644
+--- a/drivers/staging/vt6655/device_main.c
++++ b/drivers/staging/vt6655/device_main.c
+@@ -1838,10 +1838,11 @@ static int vt6655_suspend(struct pci_dev *pcid, pm_message_t state)
+ 	MACbShutdown(priv->PortOffset);
+ 
+ 	pci_disable_device(pcid);
+-	pci_set_power_state(pcid, pci_choose_state(pcid, state));
+ 
+ 	spin_unlock_irqrestore(&priv->lock, flags);
+ 
++	pci_set_power_state(pcid, pci_choose_state(pcid, state));
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 8df1ff3766c4..2d6b0cf0929e 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -682,6 +682,7 @@ static int iscsit_add_reject_from_cmd(
+ 	unsigned char *buf)
+ {
+ 	struct iscsi_conn *conn;
++	const bool do_put = cmd->se_cmd.se_tfo != NULL;
+ 
+ 	if (!cmd->conn) {
+ 		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
+@@ -712,7 +713,7 @@ static int iscsit_add_reject_from_cmd(
+ 	 * Perform the kref_put now if se_cmd has already been setup by
+ 	 * scsit_setup_scsi_cmd()
+ 	 */
+-	if (cmd->se_cmd.se_tfo != NULL) {
++	if (do_put) {
+ 		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
+ 		target_put_sess_cmd(&cmd->se_cmd);
+ 	}
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index f6169f722991..bc578d3d2178 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1466,7 +1466,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
+ 	ret = core_tpg_register(&iscsi_ops, wwn, &tpg->tpg_se_tpg,
+ 				tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ 	if (ret < 0)
+-		return NULL;
++		goto free_out;
+ 
+ 	ret = iscsit_tpg_add_portal_group(tiqn, tpg);
+ 	if (ret != 0)
+@@ -1478,6 +1478,7 @@ static struct se_portal_group *lio_target_tiqn_addtpg(
+ 	return &tpg->tpg_se_tpg;
+ out:
+ 	core_tpg_deregister(&tpg->tpg_se_tpg);
++free_out:
+ 	kfree(tpg);
+ 	return NULL;
+ }
+diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
+index 4f8d4d459aa4..edb880faa754 100644
+--- a/drivers/target/target_core_alua.c
++++ b/drivers/target/target_core_alua.c
+@@ -1126,13 +1126,11 @@ static int core_alua_do_transition_tg_pt(
+ 		unsigned long transition_tmo;
+ 
+ 		transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
+-		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+-				   &tg_pt_gp->tg_pt_gp_transition_work,
+-				   transition_tmo);
++		schedule_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work,
++				      transition_tmo);
+ 	} else {
+ 		tg_pt_gp->tg_pt_gp_transition_complete = &wait;
+-		queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
+-				   &tg_pt_gp->tg_pt_gp_transition_work, 0);
++		schedule_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work, 0);
+ 		wait_for_completion(&wait);
+ 		tg_pt_gp->tg_pt_gp_transition_complete = NULL;
+ 	}
+diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
+index dee7dfdf203a..5d5e5cd77261 100644
+--- a/drivers/target/target_core_file.c
++++ b/drivers/target/target_core_file.c
+@@ -539,6 +539,10 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
+ 	struct inode *inode = file->f_mapping->host;
+ 	int ret;
+ 
++	if (!nolb) {
++		return 0;
++	}
++
+ 	if (cmd->se_dev->dev_attrib.pi_prot_type) {
+ 		ret = fd_do_prot_unmap(cmd, lba, nolb);
+ 		if (ret)
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 57fd4e14d4eb..770fc5ae26a7 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -58,8 +58,10 @@ void core_pr_dump_initiator_port(
+ 	char *buf,
+ 	u32 size)
+ {
+-	if (!pr_reg->isid_present_at_reg)
++	if (!pr_reg->isid_present_at_reg) {
+ 		buf[0] = '\0';
++		return;
++	}
+ 
+ 	snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
+ }
+diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
+index ec4ea5940bf7..96932e88464d 100644
+--- a/drivers/thermal/step_wise.c
++++ b/drivers/thermal/step_wise.c
+@@ -31,8 +31,7 @@
+  * If the temperature is higher than a trip point,
+  *    a. if the trend is THERMAL_TREND_RAISING, use higher cooling
+  *       state for this trip point
+- *    b. if the trend is THERMAL_TREND_DROPPING, use lower cooling
+- *       state for this trip point
++ *    b. if the trend is THERMAL_TREND_DROPPING, do nothing
+  *    c. if the trend is THERMAL_TREND_RAISE_FULL, use upper limit
+  *       for this trip point
+  *    d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit
+@@ -94,9 +93,11 @@ static unsigned long get_target_state(struct thermal_instance *instance,
+ 			if (!throttle)
+ 				next_target = THERMAL_NO_TARGET;
+ 		} else {
+-			next_target = cur_state - 1;
+-			if (next_target > instance->upper)
+-				next_target = instance->upper;
++			if (!throttle) {
++				next_target = cur_state - 1;
++				if (next_target > instance->upper)
++					next_target = instance->upper;
++			}
+ 		}
+ 		break;
+ 	case THERMAL_TREND_DROP_FULL:
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index 644ddb841d9f..6d1e2f746ab4 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -114,7 +114,7 @@
+ #define DEFAULT_TX_BUF_COUNT 3
+ 
+ struct n_hdlc_buf {
+-	struct n_hdlc_buf *link;
++	struct list_head  list_item;
+ 	int		  count;
+ 	char		  buf[1];
+ };
+@@ -122,8 +122,7 @@ struct n_hdlc_buf {
+ #define	N_HDLC_BUF_SIZE	(sizeof(struct n_hdlc_buf) + maxframe)
+ 
+ struct n_hdlc_buf_list {
+-	struct n_hdlc_buf *head;
+-	struct n_hdlc_buf *tail;
++	struct list_head  list;
+ 	int		  count;
+ 	spinlock_t	  spinlock;
+ };
+@@ -136,7 +135,6 @@ struct n_hdlc_buf_list {
+  * @backup_tty - TTY to use if tty gets closed
+  * @tbusy - reentrancy flag for tx wakeup code
+  * @woke_up - FIXME: describe this field
+- * @tbuf - currently transmitting tx buffer
+  * @tx_buf_list - list of pending transmit frame buffers
+  * @rx_buf_list - list of received frame buffers
+  * @tx_free_buf_list - list unused transmit frame buffers
+@@ -149,7 +147,6 @@ struct n_hdlc {
+ 	struct tty_struct	*backup_tty;
+ 	int			tbusy;
+ 	int			woke_up;
+-	struct n_hdlc_buf	*tbuf;
+ 	struct n_hdlc_buf_list	tx_buf_list;
+ 	struct n_hdlc_buf_list	rx_buf_list;
+ 	struct n_hdlc_buf_list	tx_free_buf_list;
+@@ -159,7 +156,8 @@ struct n_hdlc {
+ /*
+  * HDLC buffer list manipulation functions
+  */
+-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list);
++static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
++						struct n_hdlc_buf *buf);
+ static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
+ 			   struct n_hdlc_buf *buf);
+ static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
+@@ -209,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty)
+ {
+ 	struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
+ 	struct n_hdlc_buf *buf;
+-	unsigned long flags;
+ 
+ 	while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
+ 		n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
+- 	spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
+-	if (n_hdlc->tbuf) {
+-		n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
+-		n_hdlc->tbuf = NULL;
+-	}
+-	spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+ }
+ 
+ static struct tty_ldisc_ops n_hdlc_ldisc = {
+@@ -284,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
+ 		} else
+ 			break;
+ 	}
+-	kfree(n_hdlc->tbuf);
+ 	kfree(n_hdlc);
+ 	
+ }	/* end of n_hdlc_release() */
+@@ -403,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
+ 	n_hdlc->woke_up = 0;
+ 	spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
+ 
+-	/* get current transmit buffer or get new transmit */
+-	/* buffer from list of pending transmit buffers */
+-		
+-	tbuf = n_hdlc->tbuf;
+-	if (!tbuf)
+-		tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
+-		
++	tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
+ 	while (tbuf) {
+ 		if (debuglevel >= DEBUG_LEVEL_INFO)	
+ 			printk("%s(%d)sending frame %p, count=%d\n",
+@@ -421,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
+ 
+ 		/* rollback was possible and has been done */
+ 		if (actual == -ERESTARTSYS) {
+-			n_hdlc->tbuf = tbuf;
++			n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
+ 			break;
+ 		}
+ 		/* if transmit error, throw frame away by */
+@@ -436,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
+ 					
+ 			/* free current transmit buffer */
+ 			n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
+-			
+-			/* this tx buffer is done */
+-			n_hdlc->tbuf = NULL;
+-			
++
+ 			/* wait up sleeping writers */
+ 			wake_up_interruptible(&tty->write_wait);
+ 	
+@@ -449,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
+ 			if (debuglevel >= DEBUG_LEVEL_INFO)	
+ 				printk("%s(%d)frame %p pending\n",
+ 					__FILE__,__LINE__,tbuf);
+-					
+-			/* buffer not accepted by driver */
+-			/* set this buffer as pending buffer */
+-			n_hdlc->tbuf = tbuf;
++
++			/*
++			 * the buffer was not accepted by driver,
++			 * return it back into tx queue
++			 */
++			n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
+ 			break;
+ 		}
+ 	}
+@@ -750,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
+ 	int error = 0;
+ 	int count;
+ 	unsigned long flags;
+-	
++	struct n_hdlc_buf *buf = NULL;
++
+ 	if (debuglevel >= DEBUG_LEVEL_INFO)	
+ 		printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
+ 			__FILE__,__LINE__,cmd);
+@@ -764,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
+ 		/* report count of read data available */
+ 		/* in next available frame (if any) */
+ 		spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
+-		if (n_hdlc->rx_buf_list.head)
+-			count = n_hdlc->rx_buf_list.head->count;
++		buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
++						struct n_hdlc_buf, list_item);
++		if (buf)
++			count = buf->count;
+ 		else
+ 			count = 0;
+ 		spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
+@@ -777,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
+ 		count = tty_chars_in_buffer(tty);
+ 		/* add size of next output frame in queue */
+ 		spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
+-		if (n_hdlc->tx_buf_list.head)
+-			count += n_hdlc->tx_buf_list.head->count;
++		buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
++						struct n_hdlc_buf, list_item);
++		if (buf)
++			count += buf->count;
+ 		spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
+ 		error = put_user(count, (int __user *)arg);
+ 		break;
+@@ -826,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+ 		poll_wait(filp, &tty->write_wait, wait);
+ 
+ 		/* set bits for operations that won't block */
+-		if (n_hdlc->rx_buf_list.head)
++		if (!list_empty(&n_hdlc->rx_buf_list.list))
+ 			mask |= POLLIN | POLLRDNORM;	/* readable */
+ 		if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ 			mask |= POLLHUP;
+ 		if (tty_hung_up_p(filp))
+ 			mask |= POLLHUP;
+ 		if (!tty_is_writelocked(tty) &&
+-				n_hdlc->tx_free_buf_list.head)
++				!list_empty(&n_hdlc->tx_free_buf_list.list))
+ 			mask |= POLLOUT | POLLWRNORM;	/* writable */
+ 	}
+ 	return mask;
+@@ -853,11 +841,16 @@ static struct n_hdlc *n_hdlc_alloc(void)
+ 	if (!n_hdlc)
+ 		return NULL;
+ 
+-	n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list);
+-	n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list);
+-	n_hdlc_buf_list_init(&n_hdlc->rx_buf_list);
+-	n_hdlc_buf_list_init(&n_hdlc->tx_buf_list);
+-	
++	spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock);
++	spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
++	spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
++	spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
++
++	INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
++	INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
++	INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
++	INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
++
+ 	/* allocate free rx buffer list */
+ 	for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
+ 		buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
+@@ -885,63 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void)
+ }	/* end of n_hdlc_alloc() */
+ 
+ /**
+- * n_hdlc_buf_list_init - initialize specified HDLC buffer list
+- * @list - pointer to buffer list
++ * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
++ * @buf_list - pointer to the buffer list
++ * @buf - pointer to the buffer
+  */
+-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list)
++static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
++						struct n_hdlc_buf *buf)
+ {
+-	memset(list, 0, sizeof(*list));
+-	spin_lock_init(&list->spinlock);
+-}	/* end of n_hdlc_buf_list_init() */
++	unsigned long flags;
++
++	spin_lock_irqsave(&buf_list->spinlock, flags);
++
++	list_add(&buf->list_item, &buf_list->list);
++	buf_list->count++;
++
++	spin_unlock_irqrestore(&buf_list->spinlock, flags);
++}
+ 
+ /**
+  * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
+- * @list - pointer to buffer list
++ * @buf_list - pointer to buffer list
+  * @buf	- pointer to buffer
+  */
+-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
++static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
+ 			   struct n_hdlc_buf *buf)
+ {
+ 	unsigned long flags;
+-	spin_lock_irqsave(&list->spinlock,flags);
+-	
+-	buf->link=NULL;
+-	if (list->tail)
+-		list->tail->link = buf;
+-	else
+-		list->head = buf;
+-	list->tail = buf;
+-	(list->count)++;
+-	
+-	spin_unlock_irqrestore(&list->spinlock,flags);
+-	
++
++	spin_lock_irqsave(&buf_list->spinlock, flags);
++
++	list_add_tail(&buf->list_item, &buf_list->list);
++	buf_list->count++;
++
++	spin_unlock_irqrestore(&buf_list->spinlock, flags);
+ }	/* end of n_hdlc_buf_put() */
+ 
+ /**
+  * n_hdlc_buf_get - remove and return an HDLC buffer from list
+- * @list - pointer to HDLC buffer list
++ * @buf_list - pointer to HDLC buffer list
+  * 
+  * Remove and return an HDLC buffer from the head of the specified HDLC buffer
+  * list.
+  * Returns a pointer to HDLC buffer if available, otherwise %NULL.
+  */
+-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
++static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
+ {
+ 	unsigned long flags;
+ 	struct n_hdlc_buf *buf;
+-	spin_lock_irqsave(&list->spinlock,flags);
+-	
+-	buf = list->head;
++
++	spin_lock_irqsave(&buf_list->spinlock, flags);
++
++	buf = list_first_entry_or_null(&buf_list->list,
++						struct n_hdlc_buf, list_item);
+ 	if (buf) {
+-		list->head = buf->link;
+-		(list->count)--;
++		list_del(&buf->list_item);
++		buf_list->count--;
+ 	}
+-	if (!list->head)
+-		list->tail = NULL;
+-	
+-	spin_unlock_irqrestore(&list->spinlock,flags);
++
++	spin_unlock_irqrestore(&buf_list->spinlock, flags);
+ 	return buf;
+-	
+ }	/* end of n_hdlc_buf_get() */
+ 
+ static char hdlc_banner[] __initdata =
+diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c
+index 5815e81b5fc6..a8a6fe6fc924 100644
+--- a/drivers/tty/serial/8250/8250_fintek.c
++++ b/drivers/tty/serial/8250/8250_fintek.c
+@@ -118,7 +118,7 @@ static int fintek_8250_rs485_config(struct uart_port *port,
+ 
+ 	if ((!!(rs485->flags & SER_RS485_RTS_ON_SEND)) ==
+ 			(!!(rs485->flags & SER_RS485_RTS_AFTER_SEND)))
+-		rs485->flags &= SER_RS485_ENABLED;
++		rs485->flags &= ~SER_RS485_ENABLED;
+ 	else
+ 		config |= RS485_URA;
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 83a145f61f1f..0a1e9f4d9882 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -58,6 +58,7 @@ struct serial_private {
+ 	unsigned int		nr;
+ 	void __iomem		*remapped_bar[PCI_NUM_BAR_RESOURCES];
+ 	struct pci_serial_quirk	*quirk;
++	const struct pciserial_board *board;
+ 	int			line[0];
+ };
+ 
+@@ -4155,6 +4156,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board)
+ 		}
+ 	}
+ 	priv->nr = i;
++	priv->board = board;
+ 	return priv;
+ 
+ err_deinit:
+@@ -4165,7 +4167,7 @@ err_out:
+ }
+ EXPORT_SYMBOL_GPL(pciserial_init_ports);
+ 
+-void pciserial_remove_ports(struct serial_private *priv)
++void pciserial_detach_ports(struct serial_private *priv)
+ {
+ 	struct pci_serial_quirk *quirk;
+ 	int i;
+@@ -4185,7 +4187,11 @@ void pciserial_remove_ports(struct serial_private *priv)
+ 	quirk = find_quirk(priv->dev);
+ 	if (quirk->exit)
+ 		quirk->exit(priv->dev);
++}
+ 
++void pciserial_remove_ports(struct serial_private *priv)
++{
++	pciserial_detach_ports(priv);
+ 	kfree(priv);
+ }
+ EXPORT_SYMBOL_GPL(pciserial_remove_ports);
+@@ -5908,6 +5914,9 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 	{ PCI_DEVICE(0x1601, 0x0800), .driver_data = pbn_b0_4_1250000 },
+ 	{ PCI_DEVICE(0x1601, 0xa801), .driver_data = pbn_b0_4_1250000 },
+ 
++	/* Amazon PCI serial device */
++	{ PCI_DEVICE(0x1d0f, 0x8250), .driver_data = pbn_b0_1_115200 },
++
+ 	/*
+ 	 * These entries match devices with class COMMUNICATION_SERIAL,
+ 	 * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
+@@ -5936,7 +5945,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev,
+ 		return PCI_ERS_RESULT_DISCONNECT;
+ 
+ 	if (priv)
+-		pciserial_suspend_ports(priv);
++		pciserial_detach_ports(priv);
+ 
+ 	pci_disable_device(dev);
+ 
+@@ -5961,9 +5970,16 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
+ static void serial8250_io_resume(struct pci_dev *dev)
+ {
+ 	struct serial_private *priv = pci_get_drvdata(dev);
++	struct serial_private *new;
+ 
+-	if (priv)
+-		pciserial_resume_ports(priv);
++	if (!priv)
++		return;
++
++	new = pciserial_init_ports(dev, priv->board);
++	if (!IS_ERR(new)) {
++		pci_set_drvdata(dev, new);
++		kfree(priv);
++	}
+ }
+ 
+ static const struct pci_error_handlers serial8250_err_handler = {
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 2f7cfa5c7b8b..529cc86283e7 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -237,8 +237,10 @@ static void sysrq_handle_showallcpus(int key)
+ 	 * architecture has no support for it:
+ 	 */
+ 	if (!trigger_all_cpu_backtrace()) {
+-		struct pt_regs *regs = get_irq_regs();
++		struct pt_regs *regs = NULL;
+ 
++		if (in_irq())
++			regs = get_irq_regs();
+ 		if (regs) {
+ 			pr_info("CPU%d:\n", smp_processor_id());
+ 			show_regs(regs);
+@@ -257,7 +259,10 @@ static struct sysrq_key_op sysrq_showallcpus_op = {
+ 
+ static void sysrq_handle_showregs(int key)
+ {
+-	struct pt_regs *regs = get_irq_regs();
++	struct pt_regs *regs = NULL;
++
++	if (in_irq())
++		regs = get_irq_regs();
+ 	if (regs)
+ 		show_regs(regs);
+ 	perf_event_print_debug();
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 0b99f913d7f2..b868e77f3bfb 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -460,6 +460,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 	unsigned iad_num = 0;
+ 
+ 	memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
++	nintf = nintf_orig = config->desc.bNumInterfaces;
++	config->desc.bNumInterfaces = 0;	// Adjusted later
++
+ 	if (config->desc.bDescriptorType != USB_DT_CONFIG ||
+ 	    config->desc.bLength < USB_DT_CONFIG_SIZE ||
+ 	    config->desc.bLength > size) {
+@@ -473,7 +476,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 	buffer += config->desc.bLength;
+ 	size -= config->desc.bLength;
+ 
+-	nintf = nintf_orig = config->desc.bNumInterfaces;
+ 	if (nintf > USB_MAXINTERFACES) {
+ 		dev_warn(ddev, "config %d has too many interfaces: %d, "
+ 		    "using maximum allowed: %d\n",
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 94a15883f8cc..3c31309f06d3 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -113,42 +113,38 @@ enum snoop_when {
+ #define USB_DEVICE_DEV		MKDEV(USB_DEVICE_MAJOR, 0)
+ 
+ /* Limit on the total amount of memory we can allocate for transfers */
+-static unsigned usbfs_memory_mb = 16;
++static u32 usbfs_memory_mb = 16;
+ module_param(usbfs_memory_mb, uint, 0644);
+ MODULE_PARM_DESC(usbfs_memory_mb,
+ 		"maximum MB allowed for usbfs buffers (0 = no limit)");
+ 
+ /* Hard limit, necessary to avoid arithmetic overflow */
+-#define USBFS_XFER_MAX		(UINT_MAX / 2 - 1000000)
++#define USBFS_XFER_MAX         (UINT_MAX / 2 - 1000000)
+ 
+-static atomic_t usbfs_memory_usage;	/* Total memory currently allocated */
++static atomic64_t usbfs_memory_usage;	/* Total memory currently allocated */
+ 
+ /* Check whether it's okay to allocate more memory for a transfer */
+-static int usbfs_increase_memory_usage(unsigned amount)
++static int usbfs_increase_memory_usage(u64 amount)
+ {
+-	unsigned lim;
++	u64 lim;
+ 
+-	/*
+-	 * Convert usbfs_memory_mb to bytes, avoiding overflows.
+-	 * 0 means use the hard limit (effectively unlimited).
+-	 */
+ 	lim = ACCESS_ONCE(usbfs_memory_mb);
+-	if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
+-		lim = USBFS_XFER_MAX;
+-	else
+-		lim <<= 20;
++	lim <<= 20;
+ 
+-	atomic_add(amount, &usbfs_memory_usage);
+-	if (atomic_read(&usbfs_memory_usage) <= lim)
+-		return 0;
+-	atomic_sub(amount, &usbfs_memory_usage);
+-	return -ENOMEM;
++	atomic64_add(amount, &usbfs_memory_usage);
++
++	if (lim > 0 && atomic64_read(&usbfs_memory_usage) > lim) {
++		atomic64_sub(amount, &usbfs_memory_usage);
++		return -ENOMEM;
++	}
++
++	return 0;
+ }
+ 
+ /* Memory for a transfer is being deallocated */
+-static void usbfs_decrease_memory_usage(unsigned amount)
++static void usbfs_decrease_memory_usage(u64 amount)
+ {
+-	atomic_sub(amount, &usbfs_memory_usage);
++	atomic64_sub(amount, &usbfs_memory_usage);
+ }
+ 
+ static int connected(struct usb_dev_state *ps)
+@@ -1077,7 +1073,7 @@ static int proc_bulk(struct usb_dev_state *ps, void __user *arg)
+ 	if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
+ 		return -EINVAL;
+ 	len1 = bulk.len;
+-	if (len1 >= USBFS_XFER_MAX)
++	if (len1 >= (INT_MAX - sizeof(struct urb)))
+ 		return -EINVAL;
+ 	ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
+ 	if (ret)
+@@ -1294,13 +1290,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ 	int number_of_packets = 0;
+ 	unsigned int stream_id = 0;
+ 	void *buf;
+-
+-	if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
+-				USBDEVFS_URB_SHORT_NOT_OK |
++	unsigned long mask =	USBDEVFS_URB_SHORT_NOT_OK |
+ 				USBDEVFS_URB_BULK_CONTINUATION |
+ 				USBDEVFS_URB_NO_FSBR |
+ 				USBDEVFS_URB_ZERO_PACKET |
+-				USBDEVFS_URB_NO_INTERRUPT))
++				USBDEVFS_URB_NO_INTERRUPT;
++	/* USBDEVFS_URB_ISO_ASAP is a special case */
++	if (uurb->type == USBDEVFS_URB_TYPE_ISO)
++		mask |= USBDEVFS_URB_ISO_ASAP;
++
++	if (uurb->flags & ~mask)
++			return -EINVAL;
++
++	if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
+ 		return -EINVAL;
+ 	if (uurb->buffer_length > 0 && !uurb->buffer)
+ 		return -EINVAL;
+@@ -1420,10 +1422,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ 		return -EINVAL;
+ 	}
+ 
+-	if (uurb->buffer_length >= USBFS_XFER_MAX) {
+-		ret = -EINVAL;
+-		goto error;
+-	}
+ 	if (uurb->buffer_length > 0 &&
+ 			!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
+ 				uurb->buffer, uurb->buffer_length)) {
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 8600144a7aab..1ba74441d7bf 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4815,6 +4815,15 @@ loop:
+ 		usb_put_dev(udev);
+ 		if ((status == -ENOTCONN) || (status == -ENOTSUPP))
+ 			break;
++
++		/* When halfway through our retry count, power-cycle the port */
++		if (i == (SET_CONFIG_TRIES / 2) - 1) {
++			dev_info(&port_dev->dev, "attempt power cycle\n");
++			usb_hub_set_port_power(hdev, hub, port1, false);
++			msleep(2 * hub_power_on_good_delay(hub));
++			usb_hub_set_port_power(hdev, hub, port1, true);
++			msleep(hub_power_on_good_delay(hub));
++		}
+ 	}
+ 	if (hub->hdev->parent ||
+ 			!hcd->driver->port_handed_over ||
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 37c418e581fb..50010282c010 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -151,6 +151,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* appletouch */
+ 	{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
++	{ USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
+ 	{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
+index 0495c94a23d7..23330697aff2 100644
+--- a/drivers/usb/gadget/configfs.c
++++ b/drivers/usb/gadget/configfs.c
+@@ -266,6 +266,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct gadget_info *gi,
+ 		ret = unregister_gadget(gi);
+ 		if (ret)
+ 			goto err;
++		kfree(name);
+ 	} else {
+ 		if (gi->udc_name) {
+ 			ret = -EBUSY;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 804b209f4c08..2c25a5dec442 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -791,7 +791,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+ 		}
+ 
+ 		if (io_data->aio) {
+-			req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
++			req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
+ 			if (unlikely(!req))
+ 				goto error_lock;
+ 
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index 63d71400dcaf..ada2a4f5a774 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -1834,8 +1834,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 
+ 	spin_lock_irq (&dev->lock);
+ 	value = -EINVAL;
+-	if (dev->buf)
++	if (dev->buf) {
++		kfree(kbuf);
+ 		goto fail;
++	}
+ 	dev->buf = kbuf;
+ 
+ 	/* full or low speed config */
+diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
+index 524cbf26d992..e37395ef5d49 100644
+--- a/drivers/usb/host/ehci-dbg.c
++++ b/drivers/usb/host/ehci-dbg.c
+@@ -850,7 +850,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+ 			default:		/* unknown */
+ 				break;
+ 			}
+-			temp = (cap >> 8) & 0xff;
++			offset = (cap >> 8) & 0xff;
+ 		}
+ 	}
+ #endif
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 68e37424a26d..3d2b4d1482df 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -981,6 +981,12 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+ 	if (!vdev)
+ 		return;
+ 
++	if (vdev->real_port == 0 ||
++			vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
++		xhci_dbg(xhci, "Bad vdev->real_port.\n");
++		goto out;
++	}
++
+ 	tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ 	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+ 		/* is this a hub device that added a tt_info to the tts list */
+@@ -994,6 +1000,7 @@ void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
+ 			}
+ 		}
+ 	}
++out:
+ 	/* we are now at a leaf device */
+ 	xhci_free_virt_device(xhci, slot_id);
+ }
+@@ -1010,10 +1017,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+ 		return 0;
+ 	}
+ 
+-	xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags);
+-	if (!xhci->devs[slot_id])
++	dev = kzalloc(sizeof(*dev), flags);
++	if (!dev)
+ 		return 0;
+-	dev = xhci->devs[slot_id];
+ 
+ 	/* Allocate the (output) device context that will be used in the HC. */
+ 	dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
+@@ -1061,9 +1067,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+ 		 &xhci->dcbaa->dev_context_ptrs[slot_id],
+ 		 le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id]));
+ 
++	xhci->devs[slot_id] = dev;
++
+ 	return 1;
+ fail:
+-	xhci_free_virt_device(xhci, slot_id);
++
++	if (dev->in_ctx)
++		xhci_free_container_ctx(xhci, dev->in_ctx);
++	if (dev->out_ctx)
++		xhci_free_container_ctx(xhci, dev->out_ctx);
++	kfree(dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
+index 9a9c82a4d35d..d6a8e325950c 100644
+--- a/drivers/usb/musb/da8xx.c
++++ b/drivers/usb/musb/da8xx.c
+@@ -350,7 +350,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
+ 			musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+ 			portstate(musb->port1_status |= USB_PORT_STAT_POWER);
+ 			del_timer(&otg_workaround);
+-		} else {
++		} else if (!(musb->int_usb & MUSB_INTR_BABBLE)){
++			/*
++			 * When babble condition happens, drvvbus interrupt
++			 * is also generated. Ignore this drvvbus interrupt
++			 * and let babble interrupt handler recovers the
++			 * controller; otherwise, the host-mode flag is lost
++			 * due to the MUSB_DEV_MODE() call below and babble
++			 * recovery logic will not called.
++			 */
+ 			musb->is_active = 0;
+ 			MUSB_DEV_MODE(musb);
+ 			otg->default_a = 0;
+diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
+index 8a55b37d1a02..d2ed59a38354 100644
+--- a/drivers/usb/phy/phy-isp1301.c
++++ b/drivers/usb/phy/phy-isp1301.c
+@@ -32,6 +32,12 @@ static const struct i2c_device_id isp1301_id[] = {
+ 	{ }
+ };
+ 
++static const struct of_device_id isp1301_of_match[] = {
++	{.compatible = "nxp,isp1301" },
++	{ },
++};
++MODULE_DEVICE_TABLE(of, isp1301_of_match);
++
+ static struct i2c_client *isp1301_i2c_client;
+ 
+ static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear)
+@@ -129,6 +135,7 @@ static int isp1301_remove(struct i2c_client *client)
+ static struct i2c_driver isp1301_driver = {
+ 	.driver = {
+ 		.name = DRV_NAME,
++		.of_match_table = of_match_ptr(isp1301_of_match),
+ 	},
+ 	.probe = isp1301_probe,
+ 	.remove = isp1301_remove,
+diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
+index a155cd02bce2..ecc83c405a8b 100644
+--- a/drivers/usb/storage/uas-detect.h
++++ b/drivers/usb/storage/uas-detect.h
+@@ -111,6 +111,10 @@ static int uas_use_uas_driver(struct usb_interface *intf,
+ 		}
+ 	}
+ 
++	/* All Seagate disk enclosures have broken ATA pass-through support */
++	if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
++		flags |= US_FL_NO_ATA_1X;
++
+ 	usb_stor_adjust_quirks(udev, &flags);
+ 
+ 	if (flags & US_FL_IGNORE_UAS) {
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 8e80da125b25..cd2e880979f2 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2138,6 +2138,13 @@ UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_REPORT_OPCODES),
+ 
++/* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */
++UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
++		"JMicron",
++		"JMS567",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_BROKEN_FUA),
++
+ /*
+  * Patch by Constantin Baranov <const@tltsu.ru>
+  * Report by Andreas Koenecke.
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index a37ed1e59e99..2f80163ffb94 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -141,6 +141,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
+ 
++/* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */
++UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
++		"JMicron",
++		"JMS567",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_BROKEN_FUA),
++
+ /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
+ 		"VIA",
+diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
+index 021003c4de53..af858d52608a 100644
+--- a/drivers/usb/usbip/stub_tx.c
++++ b/drivers/usb/usbip/stub_tx.c
+@@ -178,6 +178,13 @@ static int stub_send_ret_submit(struct stub_device *sdev)
+ 		memset(&pdu_header, 0, sizeof(pdu_header));
+ 		memset(&msg, 0, sizeof(msg));
+ 
++		if (urb->actual_length > 0 && !urb->transfer_buffer) {
++			dev_err(&sdev->udev->dev,
++				"urb: actual_length %d transfer_buffer null\n",
++				urb->actual_length);
++			return -1;
++		}
++
+ 		if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ 			iovnum = 2 + urb->number_of_packets;
+ 		else
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index b97210671a81..106348c6d993 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -1165,6 +1165,8 @@ static void fbcon_free_font(struct display *p, bool freefont)
+ 	p->userfont = 0;
+ }
+ 
++static void set_vc_hi_font(struct vc_data *vc, bool set);
++
+ static void fbcon_deinit(struct vc_data *vc)
+ {
+ 	struct display *p = &fb_display[vc->vc_num];
+@@ -1200,6 +1202,9 @@ finished:
+ 	if (free_font)
+ 		vc->vc_font.data = NULL;
+ 
++	if (vc->vc_hi_font_mask)
++		set_vc_hi_font(vc, false);
++
+ 	if (!con_is_bound(&fb_con))
+ 		fbcon_exit();
+ 
+@@ -2434,32 +2439,10 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
+ 	return 0;
+ }
+ 
+-static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+-			     const u8 * data, int userfont)
++/* set/clear vc_hi_font_mask and update vc attrs accordingly */
++static void set_vc_hi_font(struct vc_data *vc, bool set)
+ {
+-	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+-	struct fbcon_ops *ops = info->fbcon_par;
+-	struct display *p = &fb_display[vc->vc_num];
+-	int resize;
+-	int cnt;
+-	char *old_data = NULL;
+-
+-	if (CON_IS_VISIBLE(vc) && softback_lines)
+-		fbcon_set_origin(vc);
+-
+-	resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+-	if (p->userfont)
+-		old_data = vc->vc_font.data;
+-	if (userfont)
+-		cnt = FNTCHARCNT(data);
+-	else
+-		cnt = 256;
+-	vc->vc_font.data = (void *)(p->fontdata = data);
+-	if ((p->userfont = userfont))
+-		REFCOUNT(data)++;
+-	vc->vc_font.width = w;
+-	vc->vc_font.height = h;
+-	if (vc->vc_hi_font_mask && cnt == 256) {
++	if (!set) {
+ 		vc->vc_hi_font_mask = 0;
+ 		if (vc->vc_can_do_color) {
+ 			vc->vc_complement_mask >>= 1;
+@@ -2482,7 +2465,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ 			    ((c & 0xfe00) >> 1) | (c & 0xff);
+ 			vc->vc_attr >>= 1;
+ 		}
+-	} else if (!vc->vc_hi_font_mask && cnt == 512) {
++	} else {
+ 		vc->vc_hi_font_mask = 0x100;
+ 		if (vc->vc_can_do_color) {
+ 			vc->vc_complement_mask <<= 1;
+@@ -2514,8 +2497,38 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ 			} else
+ 				vc->vc_video_erase_char = c & ~0x100;
+ 		}
+-
+ 	}
++}
++
++static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
++			     const u8 * data, int userfont)
++{
++	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
++	struct fbcon_ops *ops = info->fbcon_par;
++	struct display *p = &fb_display[vc->vc_num];
++	int resize;
++	int cnt;
++	char *old_data = NULL;
++
++	if (CON_IS_VISIBLE(vc) && softback_lines)
++		fbcon_set_origin(vc);
++
++	resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
++	if (p->userfont)
++		old_data = vc->vc_font.data;
++	if (userfont)
++		cnt = FNTCHARCNT(data);
++	else
++		cnt = 256;
++	vc->vc_font.data = (void *)(p->fontdata = data);
++	if ((p->userfont = userfont))
++		REFCOUNT(data)++;
++	vc->vc_font.width = w;
++	vc->vc_font.height = h;
++	if (vc->vc_hi_font_mask && cnt == 256)
++		set_vc_hi_font(vc, false);
++	else if (!vc->vc_hi_font_mask && cnt == 512)
++		set_vc_hi_font(vc, true);
+ 
+ 	if (resize) {
+ 		int cols, rows;
+diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
+index f9507b1894df..789d3f16ff9f 100644
+--- a/drivers/video/fbdev/au1200fb.c
++++ b/drivers/video/fbdev/au1200fb.c
+@@ -1680,8 +1680,10 @@ static int au1200fb_drv_probe(struct platform_device *dev)
+ 
+ 		fbi = framebuffer_alloc(sizeof(struct au1200fb_device),
+ 					&dev->dev);
+-		if (!fbi)
++		if (!fbi) {
++			ret = -ENOMEM;
+ 			goto failed;
++		}
+ 
+ 		_au1200fb_infos[plane] = fbi;
+ 		fbdev = fbi->par;
+@@ -1699,7 +1701,8 @@ static int au1200fb_drv_probe(struct platform_device *dev)
+ 		if (!fbdev->fb_mem) {
+ 			print_err("fail to allocate frambuffer (size: %dK))",
+ 				  fbdev->fb_len / 1024);
+-			return -ENOMEM;
++			ret = -ENOMEM;
++			goto failed;
+ 		}
+ 
+ 		/*
+diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h
+index 6026c60fc100..261522fabdac 100644
+--- a/drivers/video/fbdev/controlfb.h
++++ b/drivers/video/fbdev/controlfb.h
+@@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = {
+ 	{{ 1, 2}},	/* 1152x870, 75Hz */
+ 	{{ 0, 1}},	/* 1280x960, 75Hz */
+ 	{{ 0, 1}},	/* 1280x1024, 75Hz */
++	{{ 1, 2}},	/* 1152x768, 60Hz */
++	{{ 0, 1}},	/* 1600x1024, 60Hz */
+ };
+ 
+diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
+index ff2b8731a2dc..d2a985e59fcd 100644
+--- a/drivers/video/fbdev/udlfb.c
++++ b/drivers/video/fbdev/udlfb.c
+@@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len)
+ 
+ 	for (i = 0; i < len; i++) {
+ 		ret = usb_control_msg(dev->udev,
+-				    usb_rcvctrlpipe(dev->udev, 0), (0x02),
+-				    (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
+-				    HZ);
+-		if (ret < 1) {
+-			pr_err("Read EDID byte %d failed err %x\n", i, ret);
++				      usb_rcvctrlpipe(dev->udev, 0), 0x02,
++				      (0x80 | (0x02 << 5)), i << 8, 0xA1,
++				      rbuf, 2, USB_CTRL_GET_TIMEOUT);
++		if (ret < 2) {
++			pr_err("Read EDID byte %d failed: %d\n", i, ret);
+ 			i--;
+ 			break;
+ 		}
+diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
+index b1877d73fa56..42a51c872754 100644
+--- a/drivers/virtio/virtio.c
++++ b/drivers/virtio/virtio.c
+@@ -323,6 +323,8 @@ int register_virtio_device(struct virtio_device *dev)
+ 	/* device_register() causes the bus infrastructure to look for a
+ 	 * matching driver. */
+ 	err = device_register(&dev->dev);
++	if (err)
++		ida_simple_remove(&virtio_index_ida, dev->index);
+ out:
+ 	if (err)
+ 		add_status(dev, VIRTIO_CONFIG_S_FAILED);
+diff --git a/fs/afs/callback.c b/fs/afs/callback.c
+index 7ef637d7f3a5..7d54efd73519 100644
+--- a/fs/afs/callback.c
++++ b/fs/afs/callback.c
+@@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work)
+ {
+ 	struct afs_server *server;
+ 	struct afs_vnode *vnode, *xvnode;
+-	time_t now;
++	time64_t now;
+ 	long timeout;
+ 	int ret;
+ 
+@@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work)
+ 
+ 	_enter("");
+ 
+-	now = get_seconds();
++	now = ktime_get_real_seconds();
+ 
+ 	/* find the first vnode to update */
+ 	spin_lock(&server->cb_lock);
+@@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work)
+ 
+ 	/* and then reschedule */
+ 	_debug("reschedule");
+-	vnode->update_at = get_seconds() + afs_vnode_update_timeout;
++	vnode->update_at = ktime_get_real_seconds() +
++			afs_vnode_update_timeout;
+ 
+ 	spin_lock(&server->cb_lock);
+ 
+diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
+index 4b0eff6da674..83a8a33a0d73 100644
+--- a/fs/afs/cmservice.c
++++ b/fs/afs/cmservice.c
+@@ -115,6 +115,9 @@ bool afs_cm_incoming_call(struct afs_call *call)
+ 	case CBProbe:
+ 		call->type = &afs_SRXCBProbe;
+ 		return true;
++	case CBProbeUuid:
++		call->type = &afs_SRXCBProbeUuid;
++		return true;
+ 	case CBTellMeAboutYourself:
+ 		call->type = &afs_SRXCBTellMeAboutYourself;
+ 		return true;
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index 999bc3caec92..cf8a07e282a6 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -29,6 +29,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
+ 
+ const struct file_operations afs_file_operations = {
+ 	.open		= afs_open,
++	.flush		= afs_flush,
+ 	.release	= afs_release,
+ 	.llseek		= generic_file_llseek,
+ 	.read_iter	= generic_file_read_iter,
+diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
+index c2e930ec2888..10ce44214005 100644
+--- a/fs/afs/fsclient.c
++++ b/fs/afs/fsclient.c
+@@ -105,7 +105,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
+ 			vnode->vfs_inode.i_mode = mode;
+ 		}
+ 
+-		vnode->vfs_inode.i_ctime.tv_sec	= status->mtime_server;
++		vnode->vfs_inode.i_ctime.tv_sec	= status->mtime_client;
+ 		vnode->vfs_inode.i_mtime	= vnode->vfs_inode.i_ctime;
+ 		vnode->vfs_inode.i_atime	= vnode->vfs_inode.i_ctime;
+ 		vnode->vfs_inode.i_version	= data_version;
+@@ -139,7 +139,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
+ 	vnode->cb_version	= ntohl(*bp++);
+ 	vnode->cb_expiry	= ntohl(*bp++);
+ 	vnode->cb_type		= ntohl(*bp++);
+-	vnode->cb_expires	= vnode->cb_expiry + get_seconds();
++	vnode->cb_expires	= vnode->cb_expiry + ktime_get_real_seconds();
+ 	*_bp = bp;
+ }
+ 
+@@ -703,8 +703,8 @@ int afs_fs_create(struct afs_server *server,
+ 		memset(bp, 0, padsz);
+ 		bp = (void *) bp + padsz;
+ 	}
+-	*bp++ = htonl(AFS_SET_MODE);
+-	*bp++ = 0; /* mtime */
++	*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
++	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ 	*bp++ = 0; /* owner */
+ 	*bp++ = 0; /* group */
+ 	*bp++ = htonl(mode & S_IALLUGO); /* unix mode */
+@@ -981,8 +981,8 @@ int afs_fs_symlink(struct afs_server *server,
+ 		memset(bp, 0, c_padsz);
+ 		bp = (void *) bp + c_padsz;
+ 	}
+-	*bp++ = htonl(AFS_SET_MODE);
+-	*bp++ = 0; /* mtime */
++	*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
++	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ 	*bp++ = 0; /* owner */
+ 	*bp++ = 0; /* group */
+ 	*bp++ = htonl(S_IRWXUGO); /* unix mode */
+@@ -1192,8 +1192,8 @@ static int afs_fs_store_data64(struct afs_server *server,
+ 	*bp++ = htonl(vnode->fid.vnode);
+ 	*bp++ = htonl(vnode->fid.unique);
+ 
+-	*bp++ = 0; /* mask */
+-	*bp++ = 0; /* mtime */
++	*bp++ = htonl(AFS_SET_MTIME); /* mask */
++	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ 	*bp++ = 0; /* owner */
+ 	*bp++ = 0; /* group */
+ 	*bp++ = 0; /* unix mode */
+@@ -1225,7 +1225,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
+ 	_enter(",%x,{%x:%u},,",
+ 	       key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
+ 
+-	size = to - offset;
++	size = (loff_t)to - (loff_t)offset;
+ 	if (first != last)
+ 		size += (loff_t)(last - first) << PAGE_SHIFT;
+ 	pos = (loff_t)first << PAGE_SHIFT;
+@@ -1269,8 +1269,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
+ 	*bp++ = htonl(vnode->fid.vnode);
+ 	*bp++ = htonl(vnode->fid.unique);
+ 
+-	*bp++ = 0; /* mask */
+-	*bp++ = 0; /* mtime */
++	*bp++ = htonl(AFS_SET_MTIME); /* mask */
++	*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ 	*bp++ = 0; /* owner */
+ 	*bp++ = 0; /* group */
+ 	*bp++ = 0; /* unix mode */
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index e06f5a23352a..f8fa92b1d43c 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -69,9 +69,9 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
+ 
+ 	set_nlink(inode, vnode->status.nlink);
+ 	inode->i_uid		= vnode->status.owner;
+-	inode->i_gid		= GLOBAL_ROOT_GID;
++	inode->i_gid            = vnode->status.group;
+ 	inode->i_size		= vnode->status.size;
+-	inode->i_ctime.tv_sec	= vnode->status.mtime_server;
++	inode->i_ctime.tv_sec	= vnode->status.mtime_client;
+ 	inode->i_ctime.tv_nsec	= 0;
+ 	inode->i_atime		= inode->i_mtime = inode->i_ctime;
+ 	inode->i_blocks		= 0;
+@@ -244,12 +244,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
+ 			vnode->cb_version = 0;
+ 			vnode->cb_expiry = 0;
+ 			vnode->cb_type = 0;
+-			vnode->cb_expires = get_seconds();
++			vnode->cb_expires = ktime_get_real_seconds();
+ 		} else {
+ 			vnode->cb_version = cb->version;
+ 			vnode->cb_expiry = cb->expiry;
+ 			vnode->cb_type = cb->type;
+-			vnode->cb_expires = vnode->cb_expiry + get_seconds();
++			vnode->cb_expires = vnode->cb_expiry +
++				ktime_get_real_seconds();
+ 		}
+ 	}
+ 
+@@ -322,7 +323,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
+ 	    !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
+ 	    !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
+ 	    !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+-		if (vnode->cb_expires < get_seconds() + 10) {
++		if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
+ 			_debug("callback expired");
+ 			set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
+ 		} else {
+diff --git a/fs/afs/internal.h b/fs/afs/internal.h
+index 71d5982312f3..1330b2a695ff 100644
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -11,6 +11,7 @@
+ 
+ #include <linux/compiler.h>
+ #include <linux/kernel.h>
++#include <linux/ktime.h>
+ #include <linux/fs.h>
+ #include <linux/pagemap.h>
+ #include <linux/skbuff.h>
+@@ -247,7 +248,7 @@ struct afs_cache_vhash {
+  */
+ struct afs_vlocation {
+ 	atomic_t		usage;
+-	time_t			time_of_death;	/* time at which put reduced usage to 0 */
++	time64_t		time_of_death;	/* time at which put reduced usage to 0 */
+ 	struct list_head	link;		/* link in cell volume location list */
+ 	struct list_head	grave;		/* link in master graveyard list */
+ 	struct list_head	update;		/* link in master update list */
+@@ -258,7 +259,7 @@ struct afs_vlocation {
+ 	struct afs_cache_vlocation vldb;	/* volume information DB record */
+ 	struct afs_volume	*vols[3];	/* volume access record pointer (index by type) */
+ 	wait_queue_head_t	waitq;		/* status change waitqueue */
+-	time_t			update_at;	/* time at which record should be updated */
++	time64_t		update_at;	/* time at which record should be updated */
+ 	spinlock_t		lock;		/* access lock */
+ 	afs_vlocation_state_t	state;		/* volume location state */
+ 	unsigned short		upd_rej_cnt;	/* ENOMEDIUM count during update */
+@@ -271,7 +272,7 @@ struct afs_vlocation {
+  */
+ struct afs_server {
+ 	atomic_t		usage;
+-	time_t			time_of_death;	/* time at which put reduced usage to 0 */
++	time64_t		time_of_death;	/* time at which put reduced usage to 0 */
+ 	struct in_addr		addr;		/* server address */
+ 	struct afs_cell		*cell;		/* cell in which server resides */
+ 	struct list_head	link;		/* link in cell's server list */
+@@ -374,8 +375,8 @@ struct afs_vnode {
+ 	struct rb_node		server_rb;	/* link in server->fs_vnodes */
+ 	struct rb_node		cb_promise;	/* link in server->cb_promises */
+ 	struct work_struct	cb_broken_work;	/* work to be done on callback break */
+-	time_t			cb_expires;	/* time at which callback expires */
+-	time_t			cb_expires_at;	/* time used to order cb_promise */
++	time64_t		cb_expires;	/* time at which callback expires */
++	time64_t		cb_expires_at;	/* time used to order cb_promise */
+ 	unsigned		cb_version;	/* callback version */
+ 	unsigned		cb_expiry;	/* callback expiry time */
+ 	afs_callback_type_t	cb_type;	/* type of callback */
+@@ -749,6 +750,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
+ extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
+ extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
+ extern int afs_writeback_all(struct afs_vnode *);
++extern int afs_flush(struct file *, fl_owner_t);
+ extern int afs_fsync(struct file *, loff_t, loff_t, int);
+ 
+ 
+diff --git a/fs/afs/security.c b/fs/afs/security.c
+index 8d010422dc89..bfa9d3428383 100644
+--- a/fs/afs/security.c
++++ b/fs/afs/security.c
+@@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask)
+ 	} else {
+ 		if (!(access & AFS_ACE_LOOKUP))
+ 			goto permission_denied;
++		if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
++			goto permission_denied;
+ 		if (mask & (MAY_EXEC | MAY_READ)) {
+ 			if (!(access & AFS_ACE_READ))
+ 				goto permission_denied;
++			if (!(inode->i_mode & S_IRUSR))
++				goto permission_denied;
+ 		} else if (mask & MAY_WRITE) {
+ 			if (!(access & AFS_ACE_WRITE))
+ 				goto permission_denied;
++			if (!(inode->i_mode & S_IWUSR))
++				goto permission_denied;
+ 		}
+ 	}
+ 
+ 	key_put(key);
+-	ret = generic_permission(inode, mask);
+ 	_leave(" = %d", ret);
+ 	return ret;
+ 
+diff --git a/fs/afs/server.c b/fs/afs/server.c
+index f342acf3547d..3bc1a46f0bd6 100644
+--- a/fs/afs/server.c
++++ b/fs/afs/server.c
+@@ -237,7 +237,7 @@ void afs_put_server(struct afs_server *server)
+ 	spin_lock(&afs_server_graveyard_lock);
+ 	if (atomic_read(&server->usage) == 0) {
+ 		list_move_tail(&server->grave, &afs_server_graveyard);
+-		server->time_of_death = get_seconds();
++		server->time_of_death = ktime_get_real_seconds();
+ 		queue_delayed_work(afs_wq, &afs_server_reaper,
+ 				   afs_server_timeout * HZ);
+ 	}
+@@ -272,9 +272,9 @@ static void afs_reap_server(struct work_struct *work)
+ 	LIST_HEAD(corpses);
+ 	struct afs_server *server;
+ 	unsigned long delay, expiry;
+-	time_t now;
++	time64_t now;
+ 
+-	now = get_seconds();
++	now = ktime_get_real_seconds();
+ 	spin_lock(&afs_server_graveyard_lock);
+ 
+ 	while (!list_empty(&afs_server_graveyard)) {
+diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
+index 52976785a32c..ee9015c0db5a 100644
+--- a/fs/afs/vlocation.c
++++ b/fs/afs/vlocation.c
+@@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
+ 	struct afs_vlocation *xvl;
+ 
+ 	/* wait at least 10 minutes before updating... */
+-	vl->update_at = get_seconds() + afs_vlocation_update_timeout;
++	vl->update_at = ktime_get_real_seconds() +
++			afs_vlocation_update_timeout;
+ 
+ 	spin_lock(&afs_vlocation_updates_lock);
+ 
+@@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl)
+ 	if (atomic_read(&vl->usage) == 0) {
+ 		_debug("buried");
+ 		list_move_tail(&vl->grave, &afs_vlocation_graveyard);
+-		vl->time_of_death = get_seconds();
++		vl->time_of_death = ktime_get_real_seconds();
+ 		queue_delayed_work(afs_wq, &afs_vlocation_reap,
+ 				   afs_vlocation_timeout * HZ);
+ 
+@@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work)
+ 	LIST_HEAD(corpses);
+ 	struct afs_vlocation *vl;
+ 	unsigned long delay, expiry;
+-	time_t now;
++	time64_t now;
+ 
+ 	_enter("");
+ 
+-	now = get_seconds();
++	now = ktime_get_real_seconds();
+ 	spin_lock(&afs_vlocation_graveyard_lock);
+ 
+ 	while (!list_empty(&afs_vlocation_graveyard)) {
+@@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work)
+ {
+ 	struct afs_cache_vlocation vldb;
+ 	struct afs_vlocation *vl, *xvl;
+-	time_t now;
++	time64_t now;
+ 	long timeout;
+ 	int ret;
+ 
+ 	_enter("");
+ 
+-	now = get_seconds();
++	now = ktime_get_real_seconds();
+ 
+ 	/* find a record to update */
+ 	spin_lock(&afs_vlocation_updates_lock);
+@@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work)
+ 
+ 	/* and then reschedule */
+ 	_debug("reschedule");
+-	vl->update_at = get_seconds() + afs_vlocation_update_timeout;
++	vl->update_at = ktime_get_real_seconds() +
++			afs_vlocation_update_timeout;
+ 
+ 	spin_lock(&afs_vlocation_updates_lock);
+ 
+diff --git a/fs/afs/write.c b/fs/afs/write.c
+index 0714abcd7f32..5cfc05ca184c 100644
+--- a/fs/afs/write.c
++++ b/fs/afs/write.c
+@@ -148,12 +148,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
+ 		kfree(candidate);
+ 		return -ENOMEM;
+ 	}
+-	*pagep = page;
+-	/* page won't leak in error case: it eventually gets cleaned off LRU */
+ 
+ 	if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) {
+ 		ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page);
+ 		if (ret < 0) {
++			unlock_page(page);
++			put_page(page);
+ 			kfree(candidate);
+ 			_leave(" = %d [prep]", ret);
+ 			return ret;
+@@ -161,6 +161,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
+ 		SetPageUptodate(page);
+ 	}
+ 
++	/* page won't leak in error case: it eventually gets cleaned off LRU */
++	*pagep = page;
++
+ try_again:
+ 	spin_lock(&vnode->writeback_lock);
+ 
+@@ -296,10 +299,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
+ 		ASSERTCMP(pv.nr, ==, count);
+ 
+ 		for (loop = 0; loop < count; loop++) {
+-			ClearPageUptodate(pv.pages[loop]);
++			struct page *page = pv.pages[loop];
++			ClearPageUptodate(page);
+ 			if (error)
+-				SetPageError(pv.pages[loop]);
+-			end_page_writeback(pv.pages[loop]);
++				SetPageError(page);
++			if (PageWriteback(page))
++				end_page_writeback(page);
++			if (page->index >= first)
++				first = page->index + 1;
+ 		}
+ 
+ 		__pagevec_release(&pv);
+@@ -503,6 +510,7 @@ static int afs_writepages_region(struct address_space *mapping,
+ 
+ 		if (PageWriteback(page) || !PageDirty(page)) {
+ 			unlock_page(page);
++			put_page(page);
+ 			continue;
+ 		}
+ 
+@@ -739,6 +747,20 @@ out:
+ 	return ret;
+ }
+ 
++/*
++ * Flush out all outstanding writes on a file opened for writing when it is
++ * closed.
++ */
++int afs_flush(struct file *file, fl_owner_t id)
++{
++	_enter("");
++
++	if ((file->f_mode & FMODE_WRITE) == 0)
++		return 0;
++
++	return vfs_fsync(file, 0);
++}
++
+ /*
+  * notification that a previously read-only page is about to become writable
+  * - if it returns an error, the caller will deliver a bus error signal
+diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
+index fe6e7050fe50..98198c57370b 100644
+--- a/fs/autofs4/waitq.c
++++ b/fs/autofs4/waitq.c
+@@ -174,7 +174,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
+ 
+ 	mutex_unlock(&sbi->wq_mutex);
+ 
+-	if (autofs4_write(sbi, pipe, &pkt, pktsz))
+ 	switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
+ 	case 0:
+ 		break;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index db6115486166..d14af5bd13d6 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -6667,6 +6667,20 @@ static noinline int uncompress_inline(struct btrfs_path *path,
+ 	max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
+ 	ret = btrfs_decompress(compress_type, tmp, page,
+ 			       extent_offset, inline_size, max_size);
++
++	/*
++	 * decompression code contains a memset to fill in any space between the end
++	 * of the uncompressed data and the end of max_size in case the decompressed
++	 * data ends up shorter than ram_bytes.  That doesn't cover the hole between
++	 * the end of an inline extent and the beginning of the next block, so we
++	 * cover that region here.
++	 */
++
++	if (max_size + pg_offset < PAGE_SIZE) {
++		char *map = kmap(page);
++		memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset);
++		kunmap(page);
++	}
+ 	kfree(tmp);
+ 	return ret;
+ }
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 447d64e47c4c..dbc793e31f35 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4723,6 +4723,7 @@ retry:
+ 						    EXT4_INODE_EOFBLOCKS);
+ 		}
+ 		ext4_mark_inode_dirty(handle, inode);
++		ext4_update_inode_fsync_trans(handle, inode, 1);
+ 		ret2 = ext4_journal_stop(handle);
+ 		if (ret2)
+ 			break;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 443ff49dc36f..c1feaf011515 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3531,6 +3531,10 @@ static int ext4_block_truncate_page(handle_t *handle,
+ 	unsigned blocksize;
+ 	struct inode *inode = mapping->host;
+ 
++	/* If we are processing an encrypted inode during orphan list handling */
++	if (ext4_encrypted_inode(inode) && !ext4_has_encryption_key(inode))
++		return 0;
++
+ 	blocksize = inode->i_sb->s_blocksize;
+ 	length = blocksize - (offset & (blocksize - 1));
+ 
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 03482c01fb3e..aa33db84ccba 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -1412,6 +1412,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
+ 			       "falling back\n"));
+ 	}
+ 	nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
++	if (!nblocks) {
++		ret = NULL;
++		goto cleanup_and_exit;
++	}
+ 	start = EXT4_I(dir)->i_dir_start_lookup;
+ 	if (start >= nblocks)
+ 		start = 0;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 807b1df8e134..b29a7ef4953e 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -774,6 +774,7 @@ static void ext4_put_super(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	struct ext4_super_block *es = sbi->s_es;
++	int aborted = 0;
+ 	int i, err;
+ 
+ 	ext4_unregister_li_request(sb);
+@@ -783,9 +784,10 @@ static void ext4_put_super(struct super_block *sb)
+ 	destroy_workqueue(sbi->rsv_conversion_wq);
+ 
+ 	if (sbi->s_journal) {
++		aborted = is_journal_aborted(sbi->s_journal);
+ 		err = jbd2_journal_destroy(sbi->s_journal);
+ 		sbi->s_journal = NULL;
+-		if (err < 0)
++		if ((err < 0) && !aborted)
+ 			ext4_abort(sb, "Couldn't clean up the journal");
+ 	}
+ 
+@@ -796,7 +798,7 @@ static void ext4_put_super(struct super_block *sb)
+ 	ext4_ext_release(sb);
+ 	ext4_xattr_put_super(sb);
+ 
+-	if (!(sb->s_flags & MS_RDONLY)) {
++	if (!(sb->s_flags & MS_RDONLY) && !aborted) {
+ 		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+ 		es->s_state = cpu_to_le16(sbi->s_mount_state);
+ 	}
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index 31892871ea87..2f45f0ce5d66 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -255,7 +255,7 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
+ 			goto out;
+ 	}
+ 	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
+-		if (flags & GFS2_DIF_JDATA)
++		if (new_flags & GFS2_DIF_JDATA)
+ 			gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
+ 		error = filemap_fdatawrite(inode->i_mapping);
+ 		if (error)
+@@ -263,6 +263,8 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
+ 		error = filemap_fdatawait(inode->i_mapping);
+ 		if (error)
+ 			goto out;
++		if (new_flags & GFS2_DIF_JDATA)
++			gfs2_ordered_del_inode(ip);
+ 	}
+ 	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
+ 	if (error)
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 1484ae1907c6..d2cb63259560 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -2076,7 +2076,7 @@ out:
+ 		if (new_inode != NULL)
+ 			nfs_drop_nlink(new_inode);
+ 		d_move(old_dentry, new_dentry);
+-		nfs_set_verifier(new_dentry,
++		nfs_set_verifier(old_dentry,
+ 					nfs_save_change_attribute(new_dir));
+ 	} else if (error == -ENOENT)
+ 		nfs_dentry_handle_enoent(old_dentry);
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index 5dea913baf46..a03ec3b53d9e 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -899,9 +899,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
+ 	server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
+ 	server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
+ 
+-	if (server->rsize > server_resp_sz)
++	if (!server->rsize || server->rsize > server_resp_sz)
+ 		server->rsize = server_resp_sz;
+-	if (server->wsize > server_rqst_sz)
++	if (!server->wsize || server->wsize > server_rqst_sz)
+ 		server->wsize = server_rqst_sz;
+ #endif /* CONFIG_NFS_V4_1 */
+ }
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 1ce18913762a..0fb0dc739fb2 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -38,7 +38,6 @@
+ #include <linux/mm.h>
+ #include <linux/delay.h>
+ #include <linux/errno.h>
+-#include <linux/file.h>
+ #include <linux/string.h>
+ #include <linux/ratelimit.h>
+ #include <linux/printk.h>
+@@ -5605,7 +5604,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
+ 	p->server = server;
+ 	atomic_inc(&lsp->ls_count);
+ 	p->ctx = get_nfs_open_context(ctx);
+-	get_file(fl->fl_file);
+ 	memcpy(&p->fl, fl, sizeof(p->fl));
+ 	return p;
+ out_free_seqid:
+@@ -5718,7 +5716,6 @@ static void nfs4_lock_release(void *calldata)
+ 		nfs_free_seqid(data->arg.lock_seqid);
+ 	nfs4_put_lock_state(data->lsp);
+ 	put_nfs_open_context(data->ctx);
+-	fput(data->fl.fl_file);
+ 	kfree(data);
+ 	dprintk("%s: done!\n", __func__);
+ }
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 2a9ab265aa32..f8d2902ec118 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1678,7 +1678,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
+ 			break;
+ 		case -NFS4ERR_STALE_CLIENTID:
+ 			set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+-			nfs4_state_clear_reclaim_reboot(clp);
+ 			nfs4_state_start_reclaim_reboot(clp);
+ 			break;
+ 		case -NFS4ERR_EXPIRED:
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index c493427e9d69..a208c7ea9680 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -151,7 +151,8 @@ int nfsd_vers(int vers, enum vers_op change)
+ 
+ int nfsd_minorversion(u32 minorversion, enum vers_op change)
+ {
+-	if (minorversion > NFSD_SUPPORTED_MINOR_VERSION)
++	if (minorversion > NFSD_SUPPORTED_MINOR_VERSION &&
++	    change != NFSD_AVAIL)
+ 		return -1;
+ 	switch(change) {
+ 	case NFSD_SET:
+@@ -329,23 +330,20 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
+ 
+ void nfsd_reset_versions(void)
+ {
+-	int found_one = 0;
+ 	int i;
+ 
+-	for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++) {
+-		if (nfsd_program.pg_vers[i])
+-			found_one = 1;
+-	}
+-
+-	if (!found_one) {
+-		for (i = NFSD_MINVERS; i < NFSD_NRVERS; i++)
+-			nfsd_program.pg_vers[i] = nfsd_version[i];
+-#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
+-		for (i = NFSD_ACL_MINVERS; i < NFSD_ACL_NRVERS; i++)
+-			nfsd_acl_program.pg_vers[i] =
+-				nfsd_acl_version[i];
+-#endif
+-	}
++	for (i = 0; i < NFSD_NRVERS; i++)
++		if (nfsd_vers(i, NFSD_TEST))
++			return;
++
++	for (i = 0; i < NFSD_NRVERS; i++)
++		if (i != 4)
++			nfsd_vers(i, NFSD_SET);
++		else {
++			int minor = 0;
++			while (nfsd_minorversion(minor, NFSD_SET) >= 0)
++				minor++;
++		}
+ }
+ 
+ /*
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 92de3747ea8b..5eaee287be23 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -369,7 +369,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ 	__be32		err;
+ 	int		host_err;
+ 	bool		get_write_count;
+-	int		size_change = 0;
++	bool		size_change = (iap->ia_valid & ATTR_SIZE);
+ 
+ 	if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
+ 		accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
+@@ -382,11 +382,11 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ 	/* Get inode */
+ 	err = fh_verify(rqstp, fhp, ftype, accmode);
+ 	if (err)
+-		goto out;
++		return err;
+ 	if (get_write_count) {
+ 		host_err = fh_want_write(fhp);
+ 		if (host_err)
+-			return nfserrno(host_err);
++			goto out;
+ 	}
+ 
+ 	dentry = fhp->fh_dentry;
+@@ -397,20 +397,28 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ 		iap->ia_valid &= ~ATTR_MODE;
+ 
+ 	if (!iap->ia_valid)
+-		goto out;
++		return 0;
+ 
+ 	nfsd_sanitize_attrs(inode, iap);
+ 
++	if (check_guard && guardtime != inode->i_ctime.tv_sec)
++		return nfserr_notsync;
++
+ 	/*
+ 	 * The size case is special, it changes the file in addition to the
+-	 * attributes.
++	 * attributes, and file systems don't expect it to be mixed with
++	 * "random" attribute changes.  We thus split out the size change
++	 * into a separate call to ->setattr, and do the rest as a separate
++	 * setattr call.
+ 	 */
+-	if (iap->ia_valid & ATTR_SIZE) {
++	if (size_change) {
+ 		err = nfsd_get_write_access(rqstp, fhp, iap);
+ 		if (err)
+-			goto out;
+-		size_change = 1;
++			return err;
++	}
+ 
++	fh_lock(fhp);
++	if (size_change) {
+ 		/*
+ 		 * RFC5661, Section 18.30.4:
+ 		 *   Changing the size of a file with SETATTR indirectly
+@@ -418,29 +426,36 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
+ 		 *
+ 		 * (and similar for the older RFCs)
+ 		 */
+-		if (iap->ia_size != i_size_read(inode))
+-			iap->ia_valid |= ATTR_MTIME;
+-	}
++		struct iattr size_attr = {
++			.ia_valid	= ATTR_SIZE | ATTR_CTIME | ATTR_MTIME,
++			.ia_size	= iap->ia_size,
++		};
+ 
+-	iap->ia_valid |= ATTR_CTIME;
++		host_err = notify_change(dentry, &size_attr, NULL);
++		if (host_err)
++			goto out_unlock;
++		iap->ia_valid &= ~ATTR_SIZE;
+ 
+-	if (check_guard && guardtime != inode->i_ctime.tv_sec) {
+-		err = nfserr_notsync;
+-		goto out_put_write_access;
++		/*
++		 * Avoid the additional setattr call below if the only other
++		 * attribute that the client sends is the mtime, as we update
++		 * it as part of the size change above.
++		 */
++		if ((iap->ia_valid & ~ATTR_MTIME) == 0)
++			goto out_unlock;
+ 	}
+ 
+-	fh_lock(fhp);
++	iap->ia_valid |= ATTR_CTIME;
+ 	host_err = notify_change(dentry, iap, NULL);
+-	fh_unlock(fhp);
+-	err = nfserrno(host_err);
+ 
+-out_put_write_access:
++out_unlock:
++	fh_unlock(fhp);
+ 	if (size_change)
+ 		put_write_access(inode);
+-	if (!err)
+-		err = nfserrno(commit_metadata(fhp));
+ out:
+-	return err;
++	if (!host_err)
++		host_err = commit_metadata(fhp);
++	return nfserrno(host_err);
+ }
+ 
+ #if defined(CONFIG_NFSD_V4)
+diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
+index 15f327bed8c6..7340c36978a3 100644
+--- a/fs/proc/proc_tty.c
++++ b/fs/proc/proc_tty.c
+@@ -14,6 +14,7 @@
+ #include <linux/tty.h>
+ #include <linux/seq_file.h>
+ #include <linux/bitops.h>
++#include "internal.h"
+ 
+ /*
+  * The /proc/tty directory inodes...
+@@ -164,7 +165,7 @@ void proc_tty_unregister_driver(struct tty_driver *driver)
+ 	if (!ent)
+ 		return;
+ 		
+-	remove_proc_entry(driver->driver_name, proc_tty_driver);
++	remove_proc_entry(ent->name, proc_tty_driver);
+ 	
+ 	driver->proc_entry = NULL;
+ }
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 6299f341967b..97bbcea2978a 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -705,7 +705,7 @@ static loff_t udf_check_vsd(struct super_block *sb)
+ 	else
+ 		sectorsize = sb->s_blocksize;
+ 
+-	sector += (sbi->s_session << sb->s_blocksize_bits);
++	sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
+ 
+ 	udf_debug("Starting at sector %u (%ld byte sectors)\n",
+ 		  (unsigned int)(sector >> sb->s_blocksize_bits),
+diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
+index 51c85b6a0c34..cf9029d9a3f3 100644
+--- a/fs/xfs/libxfs/xfs_bmap.c
++++ b/fs/xfs/libxfs/xfs_bmap.c
+@@ -2671,7 +2671,7 @@ xfs_bmap_add_extent_unwritten_real(
+ 					&i)))
+ 				goto done;
+ 			XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+-			cur->bc_rec.b.br_state = XFS_EXT_NORM;
++			cur->bc_rec.b.br_state = new->br_state;
+ 			if ((error = xfs_btree_insert(cur, &i)))
+ 				goto done;
+ 			XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
+index a217176fde65..bc151999b68b 100644
+--- a/fs/xfs/libxfs/xfs_inode_buf.c
++++ b/fs/xfs/libxfs/xfs_inode_buf.c
+@@ -301,6 +301,14 @@ xfs_dinode_verify(
+ 	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
+ 		return false;
+ 
++	/* don't allow invalid i_size */
++	if (be64_to_cpu(dip->di_size) & (1ULL << 63))
++		return false;
++
++	/* No zero-length symlinks. */
++	if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
++		return false;
++
+ 	/* only version 3 or greater inodes are extensively verified here */
+ 	if (dip->di_version < 3)
+ 		return true;
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index fb0eb697a621..bdb68e919b46 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -374,6 +374,7 @@ retry:
+ out_free_pages:
+ 	for (i = 0; i < bp->b_page_count; i++)
+ 		__free_page(bp->b_pages[i]);
++	bp->b_flags &= ~_XBF_PAGES;
+ 	return error;
+ }
+ 
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 4aefff89949d..c0065697498c 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -738,7 +738,7 @@ xlog_find_head(
+ 	 * in the in-core log.  The following number can be made tighter if
+ 	 * we actually look at the block size of the filesystem.
+ 	 */
+-	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
++	num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
+ 	if (head_blk >= num_scan_bblks) {
+ 		/*
+ 		 * We are guaranteed that the entire check can be performed
+diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
+index a25414ce2898..9779c35f8454 100644
+--- a/include/crypto/internal/hash.h
++++ b/include/crypto/internal/hash.h
+@@ -83,6 +83,14 @@ int ahash_register_instance(struct crypto_template *tmpl,
+ 			    struct ahash_instance *inst);
+ void ahash_free_instance(struct crypto_instance *inst);
+ 
++int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
++		    unsigned int keylen);
++
++static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
++{
++	return alg->setkey != shash_no_setkey;
++}
++
+ int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
+ 			    struct hash_alg_common *alg,
+ 			    struct crypto_instance *inst);
+diff --git a/include/drm/drmP.h b/include/drm/drmP.h
+index 62c40777c009..8aba0f72aae4 100644
+--- a/include/drm/drmP.h
++++ b/include/drm/drmP.h
+@@ -153,6 +153,26 @@ void drm_err(const char *format, ...);
+ /** \name Macros to make printk easier */
+ /*@{*/
+ 
++#define _DRM_PRINTK(once, level, fmt, ...)				\
++	do {								\
++		printk##once(KERN_##level "[" DRM_NAME "] " fmt,	\
++			     ##__VA_ARGS__);				\
++	} while (0)
++
++#define DRM_INFO(fmt, ...)						\
++	_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
++#define DRM_NOTE(fmt, ...)						\
++	_DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
++#define DRM_WARN(fmt, ...)						\
++	_DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
++
++#define DRM_INFO_ONCE(fmt, ...)						\
++	_DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
++#define DRM_NOTE_ONCE(fmt, ...)						\
++	_DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
++#define DRM_WARN_ONCE(fmt, ...)						\
++	_DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
++
+ /**
+  * Error output.
+  *
+@@ -178,12 +198,6 @@ void drm_err(const char *format, ...);
+ 		drm_err(fmt, ##__VA_ARGS__);				\
+ })
+ 
+-#define DRM_INFO(fmt, ...)				\
+-	printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+-
+-#define DRM_INFO_ONCE(fmt, ...)				\
+-	printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+-
+ /**
+  * Debug output.
+  *
+diff --git a/include/linux/bitops.h b/include/linux/bitops.h
+index 297f5bda4fdf..858020682ac8 100644
+--- a/include/linux/bitops.h
++++ b/include/linux/bitops.h
+@@ -171,6 +171,17 @@ static inline __s32 sign_extend32(__u32 value, int index)
+ 	return (__s32)(value << shift) >> shift;
+ }
+ 
++/**
++ * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit
++ * @value: value to sign extend
++ * @index: 0 based bit index (0<=index<64) to sign bit
++ */
++static inline __s64 sign_extend64(__u64 value, int index)
++{
++	__u8 shift = 63 - index;
++	return (__s64)(value << shift) >> shift;
++}
++
+ static inline unsigned fls_long(unsigned long l)
+ {
+ 	if (sizeof(l) == 4)
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
+index e6797ded700e..696b6c44c564 100644
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -236,12 +236,10 @@ static inline int block_page_mkwrite_return(int err)
+ {
+ 	if (err == 0)
+ 		return VM_FAULT_LOCKED;
+-	if (err == -EFAULT)
++	if (err == -EFAULT || err == -EAGAIN)
+ 		return VM_FAULT_NOPAGE;
+ 	if (err == -ENOMEM)
+ 		return VM_FAULT_OOM;
+-	if (err == -EAGAIN)
+-		return VM_FAULT_RETRY;
+ 	/* -ENOSPC, -EDQUOT, -EIO ... */
+ 	return VM_FAULT_SIGBUS;
+ }
+diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
+index e55c08bc3a96..0abc56140c83 100644
+--- a/include/linux/ceph/osdmap.h
++++ b/include/linux/ceph/osdmap.h
+@@ -49,7 +49,7 @@ static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
+ 	case CEPH_POOL_TYPE_EC:
+ 		return false;
+ 	default:
+-		BUG_ON(1);
++		BUG();
+ 	}
+ }
+ 
+diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
+index 1ccaab44abcc..ec78cd93c0c1 100644
+--- a/include/linux/genalloc.h
++++ b/include/linux/genalloc.h
+@@ -31,6 +31,7 @@
+ #define __GENALLOC_H__
+ 
+ #include <linux/spinlock_types.h>
++#include <linux/atomic.h>
+ 
+ struct device;
+ struct device_node;
+@@ -66,7 +67,7 @@ struct gen_pool {
+  */
+ struct gen_pool_chunk {
+ 	struct list_head next_chunk;	/* next chunk in pool */
+-	atomic_t avail;
++	atomic_long_t avail;
+ 	phys_addr_t phys_addr;		/* physical starting address of memory chunk */
+ 	unsigned long start_addr;	/* start address of memory chunk */
+ 	unsigned long end_addr;		/* end address of memory chunk (inclusive) */
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index 29a57a5b7cee..ef4945c3f713 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -173,8 +173,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
+ 		    int len, void *val);
+ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ 			    int len, struct kvm_io_device *dev);
+-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+-			      struct kvm_io_device *dev);
++void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++			       struct kvm_io_device *dev);
+ 
+ #ifdef CONFIG_KVM_ASYNC_PF
+ struct kvm_async_pf {
+diff --git a/include/linux/log2.h b/include/linux/log2.h
+index ef3d4f67118c..c373295f359f 100644
+--- a/include/linux/log2.h
++++ b/include/linux/log2.h
+@@ -15,12 +15,6 @@
+ #include <linux/types.h>
+ #include <linux/bitops.h>
+ 
+-/*
+- * deal with unrepresentable constant logarithms
+- */
+-extern __attribute__((const, noreturn))
+-int ____ilog2_NaN(void);
+-
+ /*
+  * non-constant log of base 2 calculators
+  * - the arch may override these in asm/bitops.h if they can be implemented
+@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ #define ilog2(n)				\
+ (						\
+ 	__builtin_constant_p(n) ? (		\
+-		(n) < 1 ? ____ilog2_NaN() :	\
++		(n) < 2 ? 0 :			\
+ 		(n) & (1ULL << 63) ? 63 :	\
+ 		(n) & (1ULL << 62) ? 62 :	\
+ 		(n) & (1ULL << 61) ? 61 :	\
+@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ 		(n) & (1ULL <<  4) ?  4 :	\
+ 		(n) & (1ULL <<  3) ?  3 :	\
+ 		(n) & (1ULL <<  2) ?  2 :	\
+-		(n) & (1ULL <<  1) ?  1 :	\
+-		(n) & (1ULL <<  0) ?  0 :	\
+-		____ilog2_NaN()			\
+-				   ) :		\
++		1 ) :				\
+ 	(sizeof(n) <= 4) ?			\
+ 	__ilog2_u32(n) :			\
+ 	__ilog2_u64(n)				\
+diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
+index 83e80ab94500..41fe2f858830 100644
+--- a/include/linux/mlx4/device.h
++++ b/include/linux/mlx4/device.h
+@@ -443,6 +443,7 @@ enum {
+ enum {
+ 	MLX4_INTERFACE_STATE_UP		= 1 << 0,
+ 	MLX4_INTERFACE_STATE_DELETION	= 1 << 1,
++	MLX4_INTERFACE_STATE_NOWAIT	= 1 << 2,
+ };
+ 
+ #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
+diff --git a/include/linux/mman.h b/include/linux/mman.h
+index 16373c8f5f57..369bc3405a6d 100644
+--- a/include/linux/mman.h
++++ b/include/linux/mman.h
+@@ -63,8 +63,9 @@ static inline int arch_validate_prot(unsigned long prot)
+  * ("bit1" and "bit2" must be single bits)
+  */
+ #define _calc_vm_trans(x, bit1, bit2) \
++  ((!(bit1) || !(bit2)) ? 0 : \
+   ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
+-   : ((x) & (bit1)) / ((bit1) / (bit2)))
++   : ((x) & (bit1)) / ((bit1) / (bit2))))
+ 
+ /*
+  * Combine the mmap "prot" argument into "vm_flags" used internally.
+diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
+index 7dee00143afd..c201e31e9d7e 100644
+--- a/include/linux/omap-gpmc.h
++++ b/include/linux/omap-gpmc.h
+@@ -191,10 +191,11 @@ static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
+ #endif
+ 
+ #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
+-extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
++extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
+ #else
+ #define board_onenand_data	NULL
+-static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
++static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
+ {
++	return 0;
+ }
+ #endif
+diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
+index dae99d7d2bc0..706a7017885c 100644
+--- a/include/scsi/libsas.h
++++ b/include/scsi/libsas.h
+@@ -165,11 +165,11 @@ struct expander_device {
+ 
+ struct sata_device {
+ 	unsigned int class;
+-	struct smp_resp        rps_resp; /* report_phy_sata_resp */
+ 	u8     port_no;        /* port number, if this is a PM (Port) */
+ 
+ 	struct ata_port *ap;
+ 	struct ata_host ata_host;
++	struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
+ 	u8     fis[ATA_RESP_FIS_SIZE];
+ };
+ 
+diff --git a/include/uapi/linux/packet_diag.h b/include/uapi/linux/packet_diag.h
+index d08c63f3dd6f..0c5d5dd61b6a 100644
+--- a/include/uapi/linux/packet_diag.h
++++ b/include/uapi/linux/packet_diag.h
+@@ -64,7 +64,7 @@ struct packet_diag_mclist {
+ 	__u32	pdmc_count;
+ 	__u16	pdmc_type;
+ 	__u16	pdmc_alen;
+-	__u8	pdmc_addr[MAX_ADDR_LEN];
++	__u8	pdmc_addr[32]; /* MAX_ADDR_LEN */
+ };
+ 
+ struct packet_diag_ring {
+diff --git a/kernel/audit.c b/kernel/audit.c
+index 1c13e4267de6..ae05a41d713a 100644
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -80,13 +80,13 @@ static int	audit_initialized;
+ #define AUDIT_OFF	0
+ #define AUDIT_ON	1
+ #define AUDIT_LOCKED	2
+-u32		audit_enabled;
+-u32		audit_ever_enabled;
++u32		audit_enabled = AUDIT_OFF;
++u32		audit_ever_enabled = !!AUDIT_OFF;
+ 
+ EXPORT_SYMBOL_GPL(audit_enabled);
+ 
+ /* Default state when kernel boots without any parameters. */
+-static u32	audit_default;
++static u32	audit_default = AUDIT_OFF;
+ 
+ /* If auditing cannot proceed, audit_failure selects what happens. */
+ static u32	audit_failure = AUDIT_FAIL_PRINTK;
+@@ -1165,8 +1165,6 @@ static int __init audit_init(void)
+ 	skb_queue_head_init(&audit_skb_queue);
+ 	skb_queue_head_init(&audit_skb_hold_queue);
+ 	audit_initialized = AUDIT_INITIALIZED;
+-	audit_enabled = audit_default;
+-	audit_ever_enabled |= !!audit_default;
+ 
+ 	audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL, "initialized");
+ 
+@@ -1183,6 +1181,8 @@ static int __init audit_enable(char *str)
+ 	audit_default = !!simple_strtol(str, NULL, 0);
+ 	if (!audit_default)
+ 		audit_initialized = AUDIT_DISABLED;
++	audit_enabled = audit_default;
++	audit_ever_enabled = !!audit_enabled;
+ 
+ 	pr_info("%s\n", audit_default ?
+ 		"enabled (after initialization)" : "disabled (until reboot)");
+diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
+index fc1ef736253c..77777d918676 100644
+--- a/kernel/debug/kdb/kdb_io.c
++++ b/kernel/debug/kdb/kdb_io.c
+@@ -349,7 +349,7 @@ poll_again:
+ 			}
+ 			kdb_printf("\n");
+ 			for (i = 0; i < count; i++) {
+-				if (kallsyms_symbol_next(p_tmp, i) < 0)
++				if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
+ 					break;
+ 				kdb_printf("%s ", p_tmp);
+ 				*(p_tmp + len) = '\0';
+diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
+index a744098e4eb7..f850e906564b 100644
+--- a/kernel/gcov/base.c
++++ b/kernel/gcov/base.c
+@@ -92,6 +92,18 @@ void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters)
+ }
+ EXPORT_SYMBOL(__gcov_merge_time_profile);
+ 
++void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters)
++{
++	/* Unused. */
++}
++EXPORT_SYMBOL(__gcov_merge_icall_topn);
++
++void __gcov_exit(void)
++{
++	/* Unused. */
++}
++EXPORT_SYMBOL(__gcov_exit);
++
+ /**
+  * gcov_enable_events - enable event reporting through gcov_event()
+  *
+diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
+index 826ba9fb5e32..46a18e72bce6 100644
+--- a/kernel/gcov/gcc_4_7.c
++++ b/kernel/gcov/gcc_4_7.c
+@@ -18,7 +18,11 @@
+ #include <linux/vmalloc.h>
+ #include "gcov.h"
+ 
+-#if __GNUC__ == 4 && __GNUC_MINOR__ >= 9
++#if (__GNUC__ >= 7)
++#define GCOV_COUNTERS			9
++#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
++#define GCOV_COUNTERS			10
++#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
+ #define GCOV_COUNTERS			9
+ #else
+ #define GCOV_COUNTERS			8
+diff --git a/kernel/padata.c b/kernel/padata.c
+index ed6f4f93cc92..ecc7b3f452c7 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -189,19 +189,20 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
+ 
+ 	reorder = &next_queue->reorder;
+ 
++	spin_lock(&reorder->lock);
+ 	if (!list_empty(&reorder->list)) {
+ 		padata = list_entry(reorder->list.next,
+ 				    struct padata_priv, list);
+ 
+-		spin_lock(&reorder->lock);
+ 		list_del_init(&padata->list);
+ 		atomic_dec(&pd->reorder_objects);
+-		spin_unlock(&reorder->lock);
+ 
+ 		pd->processed++;
+ 
++		spin_unlock(&reorder->lock);
+ 		goto out;
+ 	}
++	spin_unlock(&reorder->lock);
+ 
+ 	if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
+ 		padata = ERR_PTR(-ENODATA);
+diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
+index 5e95145088fd..e45d441176a8 100644
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -419,13 +419,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
+  *
+  * This function returns true if:
+  *
+- *   runtime / (deadline - t) > dl_runtime / dl_period ,
++ *   runtime / (deadline - t) > dl_runtime / dl_deadline ,
+  *
+  * IOW we can't recycle current parameters.
+  *
+- * Notice that the bandwidth check is done against the period. For
++ * Notice that the bandwidth check is done against the deadline. For
+  * task with deadline equal to period this is the same of using
+- * dl_deadline instead of dl_period in the equation above.
++ * dl_period instead of dl_deadline in the equation above.
+  */
+ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
+ 			       struct sched_dl_entity *pi_se, u64 t)
+@@ -450,7 +450,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
+ 	 * of anything below microseconds resolution is actually fiction
+ 	 * (but still we want to give the user that illusion >;).
+ 	 */
+-	left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
++	left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
+ 	right = ((dl_se->deadline - t) >> DL_SCALE) *
+ 		(pi_se->dl_runtime >> DL_SCALE);
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index ca1a306ea7e6..7a26798ffbf9 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3376,37 +3376,30 @@ static const struct file_operations show_traces_fops = {
+ 	.llseek		= seq_lseek,
+ };
+ 
+-/*
+- * The tracer itself will not take this lock, but still we want
+- * to provide a consistent cpumask to user-space:
+- */
+-static DEFINE_MUTEX(tracing_cpumask_update_lock);
+-
+-/*
+- * Temporary storage for the character representation of the
+- * CPU bitmask (and one more byte for the newline):
+- */
+-static char mask_str[NR_CPUS + 1];
+-
+ static ssize_t
+ tracing_cpumask_read(struct file *filp, char __user *ubuf,
+ 		     size_t count, loff_t *ppos)
+ {
+ 	struct trace_array *tr = file_inode(filp)->i_private;
++	char *mask_str;
+ 	int len;
+ 
+-	mutex_lock(&tracing_cpumask_update_lock);
++	len = snprintf(NULL, 0, "%*pb\n",
++		       cpumask_pr_args(tr->tracing_cpumask)) + 1;
++	mask_str = kmalloc(len, GFP_KERNEL);
++	if (!mask_str)
++		return -ENOMEM;
+ 
+-	len = snprintf(mask_str, count, "%*pb\n",
++	len = snprintf(mask_str, len, "%*pb\n",
+ 		       cpumask_pr_args(tr->tracing_cpumask));
+ 	if (len >= count) {
+ 		count = -EINVAL;
+ 		goto out_err;
+ 	}
+-	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
++	count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
+ 
+ out_err:
+-	mutex_unlock(&tracing_cpumask_update_lock);
++	kfree(mask_str);
+ 
+ 	return count;
+ }
+@@ -3426,8 +3419,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+ 	if (err)
+ 		goto err_unlock;
+ 
+-	mutex_lock(&tracing_cpumask_update_lock);
+-
+ 	local_irq_disable();
+ 	arch_spin_lock(&tr->max_lock);
+ 	for_each_tracing_cpu(cpu) {
+@@ -3450,8 +3441,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+ 	local_irq_enable();
+ 
+ 	cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
+-
+-	mutex_unlock(&tracing_cpumask_update_lock);
+ 	free_cpumask_var(tracing_cpumask_new);
+ 
+ 	return count;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 791f6ebc84a3..376db986db9b 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1484,6 +1484,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
+ 	struct timer_list *timer = &dwork->timer;
+ 	struct work_struct *work = &dwork->work;
+ 
++	WARN_ON_ONCE(!wq);
+ 	WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
+ 		     timer->data != (unsigned long)dwork);
+ 	WARN_ON_ONCE(timer_pending(timer));
+diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
+index 3bb040e347ec..bbdfbcb912f3 100644
+--- a/lib/asn1_decoder.c
++++ b/lib/asn1_decoder.c
+@@ -421,6 +421,8 @@ next_op:
+ 			else
+ 				act = machine[pc + 1];
+ 			ret = actions[act](context, hdr, 0, data + tdp, len);
++			if (ret < 0)
++				return ret;
+ 		}
+ 		pc += asn1_op_lengths[op];
+ 		goto next_op;
+diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
+index d8f3d3150603..2b382c165571 100644
+--- a/lib/dynamic_debug.c
++++ b/lib/dynamic_debug.c
+@@ -353,6 +353,10 @@ static int ddebug_parse_query(char *words[], int nwords,
+ 				if (parse_lineno(last, &query->last_lineno) < 0)
+ 					return -EINVAL;
+ 
++				/* special case for last lineno not specified */
++				if (query->last_lineno == 0)
++					query->last_lineno = UINT_MAX;
++
+ 				if (query->last_lineno < query->first_lineno) {
+ 					pr_err("last-line:%d < 1st-line:%d\n",
+ 						query->last_lineno,
+diff --git a/lib/genalloc.c b/lib/genalloc.c
+index d214866eeea2..59dafa4ff412 100644
+--- a/lib/genalloc.c
++++ b/lib/genalloc.c
+@@ -193,7 +193,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
+ 	chunk->phys_addr = phys;
+ 	chunk->start_addr = virt;
+ 	chunk->end_addr = virt + size - 1;
+-	atomic_set(&chunk->avail, size);
++	atomic_long_set(&chunk->avail, size);
+ 
+ 	spin_lock(&pool->lock);
+ 	list_add_rcu(&chunk->next_chunk, &pool->chunks);
+@@ -284,7 +284,7 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+ 	nbits = (size + (1UL << order) - 1) >> order;
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
+-		if (size > atomic_read(&chunk->avail))
++		if (size > atomic_long_read(&chunk->avail))
+ 			continue;
+ 
+ 		end_bit = chunk_size(chunk) >> order;
+@@ -303,7 +303,7 @@ retry:
+ 
+ 		addr = chunk->start_addr + ((unsigned long)start_bit << order);
+ 		size = nbits << order;
+-		atomic_sub(size, &chunk->avail);
++		atomic_long_sub(size, &chunk->avail);
+ 		break;
+ 	}
+ 	rcu_read_unlock();
+@@ -369,7 +369,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
+ 			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
+ 			BUG_ON(remain);
+ 			size = nbits << order;
+-			atomic_add(size, &chunk->avail);
++			atomic_long_add(size, &chunk->avail);
+ 			rcu_read_unlock();
+ 			return;
+ 		}
+@@ -443,7 +443,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
+ 
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
+-		avail += atomic_read(&chunk->avail);
++		avail += atomic_long_read(&chunk->avail);
+ 	rcu_read_unlock();
+ 	return avail;
+ }
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 940ba74b297c..d30864a8ed57 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -6,6 +6,7 @@
+ #include <linux/inet.h>
+ #include <linux/kthread.h>
+ #include <linux/net.h>
++#include <linux/sched.h>
+ #include <linux/slab.h>
+ #include <linux/socket.h>
+ #include <linux/string.h>
+@@ -477,11 +478,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
+ {
+ 	struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
+ 	struct socket *sock;
++	unsigned int noio_flag;
+ 	int ret;
+ 
+ 	BUG_ON(con->sock);
++
++	/* sock_create_kern() allocates with GFP_KERNEL */
++	noio_flag = memalloc_noio_save();
+ 	ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
+ 			       IPPROTO_TCP, &sock);
++	memalloc_noio_restore(noio_flag);
+ 	if (ret)
+ 		return ret;
+ 	sock->sk->sk_allocation = GFP_NOFS;
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index ddc3573894b0..bc95e48d5cfb 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -1265,7 +1265,6 @@ static int decode_new_up_state_weight(void **p, void *end,
+ 		if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
+ 		    (xorstate & CEPH_OSD_EXISTS)) {
+ 			pr_info("osd%d does not exist\n", osd);
+-			map->osd_weight[osd] = CEPH_OSD_IN;
+ 			ret = set_primary_affinity(map, osd,
+ 						   CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
+ 			if (ret)
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 33674208d325..4d4213b6f7f6 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1268,6 +1268,7 @@ void netdev_notify_peers(struct net_device *dev)
+ {
+ 	rtnl_lock();
+ 	call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
++	call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev);
+ 	rtnl_unlock();
+ }
+ EXPORT_SYMBOL(netdev_notify_peers);
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 2894bb5b7e0a..97a1fa140a9b 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3696,13 +3696,14 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
+ 	if (!skb_may_tx_timestamp(sk, false))
+ 		return;
+ 
+-	/* take a reference to prevent skb_orphan() from freeing the socket */
+-	sock_hold(sk);
+-
+-	*skb_hwtstamps(skb) = *hwtstamps;
+-	__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
+-
+-	sock_put(sk);
++	/* Take a reference to prevent skb_orphan() from freeing the socket,
++	 * but only if the socket refcount is not zero.
++	 */
++	if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
++		*skb_hwtstamps(skb) = *hwtstamps;
++		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
++		sock_put(sk);
++	}
+ }
+ EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
+ 
+@@ -3753,7 +3754,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
+ {
+ 	struct sock *sk = skb->sk;
+ 	struct sock_exterr_skb *serr;
+-	int err;
++	int err = 1;
+ 
+ 	skb->wifi_acked_valid = 1;
+ 	skb->wifi_acked = acked;
+@@ -3763,14 +3764,15 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
+ 	serr->ee.ee_errno = ENOMSG;
+ 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
+ 
+-	/* take a reference to prevent skb_orphan() from freeing the socket */
+-	sock_hold(sk);
+-
+-	err = sock_queue_err_skb(sk, skb);
++	/* Take a reference to prevent skb_orphan() from freeing the socket,
++	 * but only if the socket refcount is not zero.
++	 */
++	if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
++		err = sock_queue_err_skb(sk, skb);
++		sock_put(sk);
++	}
+ 	if (err)
+ 		kfree_skb(skb);
+-
+-	sock_put(sk);
+ }
+ EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index e369262ea57e..76e0b874f378 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1435,6 +1435,11 @@ static void __sk_free(struct sock *sk)
+ 		pr_debug("%s: optmem leakage (%d bytes) detected\n",
+ 			 __func__, atomic_read(&sk->sk_omem_alloc));
+ 
++	if (sk->sk_frag.page) {
++		put_page(sk->sk_frag.page);
++		sk->sk_frag.page = NULL;
++	}
++
+ 	if (sk->sk_peer_cred)
+ 		put_cred(sk->sk_peer_cred);
+ 	put_pid(sk->sk_peer_pid);
+@@ -2622,11 +2627,6 @@ void sk_common_release(struct sock *sk)
+ 
+ 	sk_refcnt_debug_release(sk);
+ 
+-	if (sk->sk_frag.page) {
+-		put_page(sk->sk_frag.page);
+-		sk->sk_frag.page = NULL;
+-	}
+-
+ 	sock_put(sk);
+ }
+ EXPORT_SYMBOL(sk_common_release);
+diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
+index f053198e730c..5e3a7302f774 100644
+--- a/net/dccp/ccids/ccid2.c
++++ b/net/dccp/ccids/ccid2.c
+@@ -749,6 +749,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
+ 	for (i = 0; i < hc->tx_seqbufc; i++)
+ 		kfree(hc->tx_seqbuf[i]);
+ 	hc->tx_seqbufc = 0;
++	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
+ }
+ 
+ static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index fd7ac7895c38..bafb2223b879 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -289,7 +289,8 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
+ 
+ 	switch (type) {
+ 	case ICMP_REDIRECT:
+-		dccp_do_redirect(skb, sk);
++		if (!sock_owned_by_user(sk))
++			dccp_do_redirect(skb, sk);
+ 		goto out;
+ 	case ICMP_SOURCE_QUENCH:
+ 		/* Just silently ignore these. */
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index a0490508d213..d61027e78e25 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -121,10 +121,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	np = inet6_sk(sk);
+ 
+ 	if (type == NDISC_REDIRECT) {
+-		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
++		if (!sock_owned_by_user(sk)) {
++			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+ 
+-		if (dst)
+-			dst->ops->redirect(dst, sk, skb);
++			if (dst)
++				dst->ops->redirect(dst, sk, skb);
++		}
+ 		goto out;
+ 	}
+ 
+diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
+index 838f524cf11a..53c38641fe98 100644
+--- a/net/dccp/minisocks.c
++++ b/net/dccp/minisocks.c
+@@ -122,6 +122,7 @@ struct sock *dccp_create_openreq_child(struct sock *sk,
+ 			/* It is still raw copy of parent, so invalidate
+ 			 * destructor and make plain sk_free() */
+ 			newsk->sk_destruct = NULL;
++			bh_unlock_sock(newsk);
+ 			sk_free(newsk);
+ 			return NULL;
+ 		}
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 765909ba781e..a8db70b7fe45 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1033,7 +1033,8 @@ static void nl_fib_input(struct sk_buff *skb)
+ 
+ 	net = sock_net(skb->sk);
+ 	nlh = nlmsg_hdr(skb);
+-	if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
++	if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
++	    skb->len < nlh->nlmsg_len ||
+ 	    nlmsg_len(nlh) < sizeof(*frn))
+ 		return;
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 6141fef3a64b..6a9a495aff23 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -622,9 +622,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
+ 	struct fnhe_hash_bucket *hash;
+ 	struct fib_nh_exception *fnhe;
+ 	struct rtable *rt;
++	u32 genid, hval;
+ 	unsigned int i;
+ 	int depth;
+-	u32 hval = fnhe_hashfun(daddr);
++
++	genid = fnhe_genid(dev_net(nh->nh_dev));
++	hval = fnhe_hashfun(daddr);
+ 
+ 	spin_lock_bh(&fnhe_lock);
+ 
+@@ -647,12 +650,13 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
+ 	}
+ 
+ 	if (fnhe) {
++		if (fnhe->fnhe_genid != genid)
++			fnhe->fnhe_genid = genid;
+ 		if (gw)
+ 			fnhe->fnhe_gw = gw;
+-		if (pmtu) {
++		if (pmtu)
+ 			fnhe->fnhe_pmtu = pmtu;
+-			fnhe->fnhe_expires = max(1UL, expires);
+-		}
++		fnhe->fnhe_expires = max(1UL, expires);
+ 		/* Update all cached dsts too */
+ 		rt = rcu_dereference(fnhe->fnhe_rth_input);
+ 		if (rt)
+@@ -671,7 +675,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
+ 			fnhe->fnhe_next = hash->chain;
+ 			rcu_assign_pointer(hash->chain, fnhe);
+ 		}
+-		fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
++		fnhe->fnhe_genid = genid;
+ 		fnhe->fnhe_daddr = daddr;
+ 		fnhe->fnhe_gw = gw;
+ 		fnhe->fnhe_pmtu = pmtu;
+@@ -1887,6 +1891,7 @@ int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ {
+ 	int res;
+ 
++	tos &= IPTOS_RT_MASK;
+ 	rcu_read_lock();
+ 
+ 	/* Multicast recognition logic is moved from route cache to here.
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 1ba4d0964042..a1de8300cfce 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2227,6 +2227,8 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 	tcp_init_send_head(sk);
+ 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
+ 	__sk_dst_reset(sk);
++	dst_release(sk->sk_rx_dst);
++	sk->sk_rx_dst = NULL;
+ 
+ 	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 95f98d2444fa..4763c431f7d8 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4895,7 +4895,7 @@ static void tcp_check_space(struct sock *sk)
+ 	if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
+ 		sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
+ 		/* pairs with tcp_poll() */
+-		smp_mb__after_atomic();
++		smp_mb();
+ 		if (sk->sk_socket &&
+ 		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+ 			tcp_new_space(sk);
+@@ -5388,6 +5388,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 
+ 	tcp_set_state(sk, TCP_ESTABLISHED);
++	icsk->icsk_ack.lrcvtime = tcp_time_stamp;
+ 
+ 	if (skb) {
+ 		icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
+@@ -5600,7 +5601,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
+ 			 * to stand against the temptation 8)     --ANK
+ 			 */
+ 			inet_csk_schedule_ack(sk);
+-			icsk->icsk_ack.lrcvtime = tcp_time_stamp;
+ 			tcp_enter_quickack_mode(sk);
+ 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+ 						  TCP_DELACK_MAX, TCP_RTO_MAX);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 13b92d595138..2923f7f7932a 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -271,10 +271,13 @@ EXPORT_SYMBOL(tcp_v4_connect);
+  */
+ void tcp_v4_mtu_reduced(struct sock *sk)
+ {
+-	struct dst_entry *dst;
+ 	struct inet_sock *inet = inet_sk(sk);
+-	u32 mtu = tcp_sk(sk)->mtu_info;
++	struct dst_entry *dst;
++	u32 mtu;
+ 
++	if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
++		return;
++	mtu = tcp_sk(sk)->mtu_info;
+ 	dst = inet_csk_update_pmtu(sk, mtu);
+ 	if (!dst)
+ 		return;
+@@ -417,7 +420,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
+ 
+ 	switch (type) {
+ 	case ICMP_REDIRECT:
+-		do_redirect(icmp_skb, sk);
++		if (!sock_owned_by_user(sk))
++			do_redirect(icmp_skb, sk);
+ 		goto out;
+ 	case ICMP_SOURCE_QUENCH:
+ 		/* Just silently ignore these. */
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index fec2907b85e8..2c58a5d4cba8 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -462,6 +462,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
+ 		newtp->srtt_us = 0;
+ 		newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
+ 		newicsk->icsk_rto = TCP_TIMEOUT_INIT;
++		newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
+ 
+ 		newtp->packets_out = 0;
+ 		newtp->retrans_out = 0;
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index c8f97858d6f6..f8c6b2343301 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -223,7 +223,8 @@ void tcp_delack_timer_handler(struct sock *sk)
+ 
+ 	sk_mem_reclaim_partial(sk);
+ 
+-	if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
++	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
++	    !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+ 		goto out;
+ 
+ 	if (time_after(icsk->icsk_ack.timeout, jiffies)) {
+@@ -504,7 +505,8 @@ void tcp_write_timer_handler(struct sock *sk)
+ 	struct inet_connection_sock *icsk = inet_csk(sk);
+ 	int event;
+ 
+-	if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
++	if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
++	    !icsk->icsk_pending)
+ 		goto out;
+ 
+ 	if (time_after(icsk->icsk_timeout, jiffies)) {
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index df4edab0ba3a..8441f9939d49 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3068,6 +3068,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
+ {
+ 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ 	struct inet6_dev *idev = __in6_dev_get(dev);
++	struct net *net = dev_net(dev);
+ 	int run_pending = 0;
+ 	int err;
+ 
+@@ -3170,7 +3171,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
+ 			 * IPV6_MIN_MTU stop IPv6 on this interface.
+ 			 */
+ 			if (dev->mtu < IPV6_MIN_MTU)
+-				addrconf_ifdown(dev, 1);
++				addrconf_ifdown(dev, dev != net->loopback_dev);
+ 		}
+ 		break;
+ 
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index bad62fa5e70f..44bae47fa1b9 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -889,12 +889,12 @@ static int __init inet6_init(void)
+ 	err = register_pernet_subsys(&inet6_net_ops);
+ 	if (err)
+ 		goto register_pernet_fail;
+-	err = icmpv6_init();
+-	if (err)
+-		goto icmp_fail;
+ 	err = ip6_mr_init();
+ 	if (err)
+ 		goto ipmr_fail;
++	err = icmpv6_init();
++	if (err)
++		goto icmp_fail;
+ 	err = ndisc_init();
+ 	if (err)
+ 		goto ndisc_fail;
+@@ -1012,10 +1012,10 @@ igmp_fail:
+ 	ndisc_cleanup();
+ ndisc_fail:
+ 	ip6_mr_cleanup();
+-ipmr_fail:
+-	icmpv6_cleanup();
+ icmp_fail:
+ 	unregister_pernet_subsys(&inet6_net_ops);
++ipmr_fail:
++	icmpv6_cleanup();
+ register_pernet_fail:
+ 	sock_unregister(PF_INET6);
+ 	rtnl_unregister_all(PF_INET6);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index e7a60f5de097..0c9a4cac95ee 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -868,6 +868,8 @@ add:
+ 			ins = &rt->dst.rt6_next;
+ 			iter = *ins;
+ 			while (iter) {
++				if (iter->rt6i_metric > rt->rt6i_metric)
++					break;
+ 				if (rt6_qualify_for_ecmp(iter)) {
+ 					*ins = iter->dst.rt6_next;
+ 					fib6_purge_rt(iter, fn, info->nl_net);
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 6ac448d8dd76..c460e653b6a5 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
+ 	struct vti6_net *ip6n = net_generic(net, vti6_net_id);
+ 	int err;
+ 
++	dev->rtnl_link_ops = &vti6_link_ops;
+ 	err = register_netdevice(dev);
+ 	if (err < 0)
+ 		goto out;
+ 
+ 	strcpy(t->parms.name, dev->name);
+-	dev->rtnl_link_ops = &vti6_link_ops;
+ 
+ 	dev_hold(dev);
+ 	vti6_tnl_link(ip6n, t);
+@@ -474,11 +474,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 	if (!skb->ignore_df && skb->len > mtu) {
+ 		skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
+ 
+-		if (skb->protocol == htons(ETH_P_IPV6))
++		if (skb->protocol == htons(ETH_P_IPV6)) {
++			if (mtu < IPV6_MIN_MTU)
++				mtu = IPV6_MIN_MTU;
++
+ 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+-		else
++		} else {
+ 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ 				  htonl(mtu));
++		}
+ 
+ 		return -EMSGSIZE;
+ 	}
+@@ -681,6 +685,10 @@ vti6_parm_to_user(struct ip6_tnl_parm2 *u, const struct __ip6_tnl_parm *p)
+ 	u->link = p->link;
+ 	u->i_key = p->i_key;
+ 	u->o_key = p->o_key;
++	if (u->i_key)
++		u->i_flags |= GRE_KEY;
++	if (u->o_key)
++		u->o_flags |= GRE_KEY;
+ 	u->proto = p->proto;
+ 
+ 	memcpy(u->name, p->name, sizeof(u->name));
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 85c4b2fff504..295502b261a8 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1092,6 +1092,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
+ 	ipip6_tunnel_link(sitn, t);
+ 	t->parms.iph.ttl = p->iph.ttl;
+ 	t->parms.iph.tos = p->iph.tos;
++	t->parms.iph.frag_off = p->iph.frag_off;
+ 	if (t->parms.link != p->link) {
+ 		t->parms.link = p->link;
+ 		ipip6_tunnel_bind_dev(t->dev);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 0b43bcb6e576..2f0f1b415fbe 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -382,10 +382,12 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
+ 	np = inet6_sk(sk);
+ 
+ 	if (type == NDISC_REDIRECT) {
+-		struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
++		if (!sock_owned_by_user(sk)) {
++			struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+ 
+-		if (dst)
+-			dst->ops->redirect(dst, sk, skb);
++			if (dst)
++				dst->ops->redirect(dst, sk, skb);
++		}
+ 		goto out;
+ 	}
+ 
+diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
+index acbe61c7e683..160dc89335e2 100644
+--- a/net/irda/irqueue.c
++++ b/net/irda/irqueue.c
+@@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
+  *    for deallocating this structure if it's complex. If not the user can
+  *    just supply kfree, which should take care of the job.
+  */
+-#ifdef CONFIG_LOCKDEP
+-static int hashbin_lock_depth = 0;
+-#endif
+ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+ {
+ 	irda_queue_t* queue;
+@@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+ 	IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
+ 
+ 	/* Synchronize */
+-	if ( hashbin->hb_type & HB_LOCK ) {
+-		spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
+-					 hashbin_lock_depth++);
+-	}
++	if (hashbin->hb_type & HB_LOCK)
++		spin_lock_irqsave(&hashbin->hb_spinlock, flags);
+ 
+ 	/*
+ 	 *  Free the entries in the hashbin, TODO: use hashbin_clear when
+ 	 *  it has been shown to work
+ 	 */
+ 	for (i = 0; i < HASHBIN_SIZE; i ++ ) {
+-		queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
+-		while (queue ) {
+-			if (free_func)
+-				(*free_func)(queue);
+-			queue = dequeue_first(
+-				(irda_queue_t**) &hashbin->hb_queue[i]);
++		while (1) {
++			queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
++
++			if (!queue)
++				break;
++
++			if (free_func) {
++				if (hashbin->hb_type & HB_LOCK)
++					spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
++				free_func(queue);
++				if (hashbin->hb_type & HB_LOCK)
++					spin_lock_irqsave(&hashbin->hb_spinlock, flags);
++			}
+ 		}
+ 	}
+ 
+@@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
+ 	hashbin->magic = ~HB_MAGIC;
+ 
+ 	/* Release lock */
+-	if ( hashbin->hb_type & HB_LOCK) {
++	if (hashbin->hb_type & HB_LOCK)
+ 		spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
+-#ifdef CONFIG_LOCKDEP
+-		hashbin_lock_depth--;
+-#endif
+-	}
+ 
+ 	/*
+ 	 *  Free the hashbin structure
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 8a1d1542eb2f..0c4de8dd58bf 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1859,7 +1859,7 @@ static __net_exit void l2tp_exit_net(struct net *net)
+ 
+ 	rcu_read_lock_bh();
+ 	list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+-		(void)l2tp_tunnel_delete(tunnel);
++		l2tp_tunnel_delete(tunnel);
+ 	}
+ 	rcu_read_unlock_bh();
+ }
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 44ee0683b14b..13c5a7ca0482 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -381,7 +381,7 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
+ drop:
+ 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS);
+ 	kfree_skb(skb);
+-	return -1;
++	return 0;
+ }
+ 
+ /* Userspace will call sendmsg() on the tunnel socket to send L2TP
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index 4c0ce67329ca..1e412ad6ced5 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -285,7 +285,7 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
+ 	l2tp_tunnel_notify(&l2tp_nl_family, info,
+ 			   tunnel, L2TP_CMD_TUNNEL_DELETE);
+ 
+-	(void) l2tp_tunnel_delete(tunnel);
++	l2tp_tunnel_delete(tunnel);
+ 
+ out:
+ 	return ret;
+diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
+index 81a61fce3afb..841026e02ce8 100644
+--- a/net/llc/llc_conn.c
++++ b/net/llc/llc_conn.c
+@@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
+ 		 * another trick required to cope with how the PROCOM state
+ 		 * machine works. -acme
+ 		 */
++		skb_orphan(skb);
++		sock_hold(sk);
+ 		skb->sk = sk;
++		skb->destructor = sock_efree;
+ 	}
+ 	if (!sock_owned_by_user(sk))
+ 		llc_conn_rcv(sk, skb);
+diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
+index d0e1e804ebd7..5404d0d195cc 100644
+--- a/net/llc/llc_sap.c
++++ b/net/llc/llc_sap.c
+@@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
+ 
+ 	ev->type   = LLC_SAP_EV_TYPE_PDU;
+ 	ev->reason = 0;
++	skb_orphan(skb);
++	sock_hold(sk);
+ 	skb->sk = sk;
++	skb->destructor = sock_efree;
+ 	llc_sap_state_process(sap, skb);
+ }
+ 
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 48257f17688f..2ce35082a335 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -289,8 +289,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
+ 	/* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
+ 	*pos |= ifmsh->ps_peers_deep_sleep ?
+ 			IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
+-	*pos++ = 0x00;
+-
+ 	return 0;
+ }
+ 
+diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
+index 1f93a5978f2a..398375098efb 100644
+--- a/net/mpls/af_mpls.c
++++ b/net/mpls/af_mpls.c
+@@ -1098,6 +1098,7 @@ static void mpls_net_exit(struct net *net)
+ 	for (index = 0; index < platform_labels; index++) {
+ 		struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+ 		RCU_INIT_POINTER(platform_label[index], NULL);
++		mpls_notify_route(net, index, rt, NULL, NULL);
+ 		mpls_rt_free(rt);
+ 	}
+ 	rtnl_unlock();
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 4cd7e27e3fc4..7f5d147aff63 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1447,13 +1447,16 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
+ 		return -EINVAL;
+ 	}
+ 
++	mutex_lock(&fanout_mutex);
++
++	err = -EINVAL;
+ 	if (!po->running)
+-		return -EINVAL;
++		goto out;
+ 
++	err = -EALREADY;
+ 	if (po->fanout)
+-		return -EALREADY;
++		goto out;
+ 
+-	mutex_lock(&fanout_mutex);
+ 	match = NULL;
+ 	list_for_each_entry(f, &fanout_list, list) {
+ 		if (f->id == id &&
+@@ -1509,17 +1512,16 @@ static void fanout_release(struct sock *sk)
+ 	struct packet_sock *po = pkt_sk(sk);
+ 	struct packet_fanout *f;
+ 
+-	f = po->fanout;
+-	if (!f)
+-		return;
+-
+ 	mutex_lock(&fanout_mutex);
+-	po->fanout = NULL;
++	f = po->fanout;
++	if (f) {
++		po->fanout = NULL;
+ 
+-	if (atomic_dec_and_test(&f->sk_ref)) {
+-		list_del(&f->list);
+-		dev_remove_pack(&f->prot_hook);
+-		kfree(f);
++		if (atomic_dec_and_test(&f->sk_ref)) {
++			list_del(&f->list);
++			dev_remove_pack(&f->prot_hook);
++			kfree(f);
++		}
+ 	}
+ 	mutex_unlock(&fanout_mutex);
+ }
+@@ -2728,6 +2730,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
+ 	if (need_rehook) {
+ 		if (po->running) {
+ 			rcu_read_unlock();
++			/* prevents packet_notifier() from calling
++			 * register_prot_hook()
++			 */
++			po->num = 0;
+ 			__unregister_prot_hook(sk, true);
+ 			rcu_read_lock();
+ 			dev_curr = po->prot_hook.dev;
+@@ -2736,6 +2742,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
+ 								 dev->ifindex);
+ 		}
+ 
++		BUG_ON(po->running);
+ 		po->num = proto;
+ 		po->prot_hook.type = proto;
+ 
+@@ -2779,7 +2786,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ 			    int addr_len)
+ {
+ 	struct sock *sk = sock->sk;
+-	char name[15];
++	char name[sizeof(uaddr->sa_data) + 1];
+ 
+ 	/*
+ 	 *	Check legality
+@@ -2787,7 +2794,11 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ 
+ 	if (addr_len != sizeof(struct sockaddr))
+ 		return -EINVAL;
+-	strlcpy(name, uaddr->sa_data, sizeof(name));
++	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
++	 * zero-terminated.
++	 */
++	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
++	name[sizeof(uaddr->sa_data)] = 0;
+ 
+ 	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
+ }
+diff --git a/net/rds/rdma.c b/net/rds/rdma.c
+index 3738b1920c09..612c3050d514 100644
+--- a/net/rds/rdma.c
++++ b/net/rds/rdma.c
+@@ -184,7 +184,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
+ 	long i;
+ 	int ret;
+ 
+-	if (rs->rs_bound_addr == 0) {
++	if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
+ 		ret = -ENOTCONN; /* XXX not a great errno */
+ 		goto out;
+ 	}
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index f8d9c2a2c451..c9387f62f634 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -802,10 +802,8 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
+ 		goto out_module_put;
+ 
+ 	err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a);
+-	if (err < 0)
++	if (err <= 0)
+ 		goto out_module_put;
+-	if (err == 0)
+-		goto noflush_out;
+ 
+ 	nla_nest_end(skb, nest);
+ 
+@@ -822,7 +820,6 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
+ out_module_put:
+ 	module_put(a.ops->owner);
+ err_out:
+-noflush_out:
+ 	kfree_skb(skb);
+ 	return err;
+ }
+diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
+index 295d14bd6c67..85e3207d047a 100644
+--- a/net/sched/act_connmark.c
++++ b/net/sched/act_connmark.c
+@@ -105,6 +105,9 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
+ 	if (ret < 0)
+ 		return ret;
+ 
++	if (!tb[TCA_CONNMARK_PARMS])
++		return -EINVAL;
++
+ 	parm = nla_data(tb[TCA_CONNMARK_PARMS]);
+ 
+ 	if (!tcf_hash_check(parm->index, a, bind)) {
+diff --git a/net/sctp/debug.c b/net/sctp/debug.c
+index 95d7b15dad21..e371a0d90068 100644
+--- a/net/sctp/debug.c
++++ b/net/sctp/debug.c
+@@ -166,7 +166,7 @@ static const char *const sctp_timer_tbl[] = {
+ /* Lookup timer debug name. */
+ const char *sctp_tname(const sctp_subtype_t id)
+ {
+-	if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
++	if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
+ 		return sctp_timer_tbl[id.timeout];
+ 	return "unknown_timer";
+ }
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index cb7193ed4284..946d1c28f93f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -82,8 +82,8 @@
+ /* Forward declarations for internal helper functions. */
+ static int sctp_writeable(struct sock *sk);
+ static void sctp_wfree(struct sk_buff *skb);
+-static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
+-				size_t msg_len);
++static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
++				size_t msg_len, struct sock **orig_sk);
+ static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
+ static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
+ static int sctp_wait_for_accept(struct sock *sk, long timeo);
+@@ -1947,9 +1947,16 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
+ 
+ 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ 	if (!sctp_wspace(asoc)) {
+-		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
+-		if (err)
++		/* sk can be changed by peel off when waiting for buf. */
++		err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
++		if (err) {
++			if (err == -ESRCH) {
++				/* asoc is already dead. */
++				new_asoc = NULL;
++				err = -EPIPE;
++			}
+ 			goto out_free;
++		}
+ 	}
+ 
+ 	/* If an address is passed with the sendto/sendmsg call, it is used
+@@ -6972,7 +6979,7 @@ void sctp_sock_rfree(struct sk_buff *skb)
+ 
+ /* Helper function to wait for space in the sndbuf.  */
+ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+-				size_t msg_len)
++				size_t msg_len, struct sock **orig_sk)
+ {
+ 	struct sock *sk = asoc->base.sk;
+ 	int err = 0;
+@@ -6989,10 +6996,11 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 	for (;;) {
+ 		prepare_to_wait_exclusive(&asoc->wait, &wait,
+ 					  TASK_INTERRUPTIBLE);
++		if (asoc->base.dead)
++			goto do_dead;
+ 		if (!*timeo_p)
+ 			goto do_nonblock;
+-		if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
+-		    asoc->base.dead)
++		if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
+ 			goto do_error;
+ 		if (signal_pending(current))
+ 			goto do_interrupted;
+@@ -7006,11 +7014,17 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
+ 		current_timeo = schedule_timeout(current_timeo);
+ 		BUG_ON(sk != asoc->base.sk);
+ 		lock_sock(sk);
++		if (sk != asoc->base.sk) {
++			release_sock(sk);
++			sk = asoc->base.sk;
++			lock_sock(sk);
++		}
+ 
+ 		*timeo_p = current_timeo;
+ 	}
+ 
+ out:
++	*orig_sk = sk;
+ 	finish_wait(&asoc->wait, &wait);
+ 
+ 	/* Release the association's refcnt.  */
+@@ -7018,6 +7032,10 @@ out:
+ 
+ 	return err;
+ 
++do_dead:
++	err = -ESRCH;
++	goto out;
++
+ do_error:
+ 	err = -EPIPE;
+ 	goto out;
+diff --git a/net/socket.c b/net/socket.c
+index e66e4f357506..12d681c1b66d 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1706,6 +1706,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ 	/* We assume all kernel code knows the size of sockaddr_storage */
+ 	msg.msg_namelen = 0;
+ 	msg.msg_iocb = NULL;
++	msg.msg_flags = 0;
+ 	if (sock->file->f_flags & O_NONBLOCK)
+ 		flags |= MSG_DONTWAIT;
+ 	err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags);
+@@ -2192,8 +2193,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ 		return err;
+ 
+ 	err = sock_error(sock->sk);
+-	if (err)
++	if (err) {
++		datagrams = err;
+ 		goto out_put;
++	}
+ 
+ 	entry = mmsg;
+ 	compat_entry = (struct compat_mmsghdr __user *)mmsg;
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 337ca851a350..d1907b3cf832 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -273,10 +273,9 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
+ 
+ static void rpc_set_active(struct rpc_task *task)
+ {
+-	trace_rpc_task_begin(task->tk_client, task, NULL);
+-
+ 	rpc_task_set_debuginfo(task);
+ 	set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
++	trace_rpc_task_begin(task->tk_client, task, NULL);
+ }
+ 
+ /*
+diff --git a/net/tipc/server.c b/net/tipc/server.c
+index d0d619813684..7c52cb5781ab 100644
+--- a/net/tipc/server.c
++++ b/net/tipc/server.c
+@@ -614,14 +614,12 @@ int tipc_server_start(struct tipc_server *s)
+ void tipc_server_stop(struct tipc_server *s)
+ {
+ 	struct tipc_conn *con;
+-	int total = 0;
+ 	int id;
+ 
+ 	spin_lock_bh(&s->idr_lock);
+-	for (id = 0; total < s->idr_in_use; id++) {
++	for (id = 0; s->idr_in_use; id++) {
+ 		con = idr_find(&s->conn_idr, id);
+ 		if (con) {
+-			total++;
+ 			spin_unlock_bh(&s->idr_lock);
+ 			tipc_close_conn(con);
+ 			spin_lock_bh(&s->idr_lock);
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 6a0d48525fcf..c36757e72844 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
+ 	if (s) {
+ 		struct unix_sock *u = unix_sk(s);
+ 
++		BUG_ON(!atomic_long_read(&u->inflight));
+ 		BUG_ON(list_empty(&u->link));
+ 
+ 		if (atomic_long_dec_and_test(&u->inflight))
+@@ -341,6 +342,14 @@ void unix_gc(void)
+ 	}
+ 	list_del(&cursor);
+ 
++	/* Now gc_candidates contains only garbage.  Restore original
++	 * inflight counters for these as well, and remove the skbuffs
++	 * which are creating the cycle(s).
++	 */
++	skb_queue_head_init(&hitlist);
++	list_for_each_entry(u, &gc_candidates, link)
++		scan_children(&u->sk, inc_inflight, &hitlist);
++
+ 	/* not_cycle_list contains those sockets which do not make up a
+ 	 * cycle.  Restore these to the inflight list.
+ 	 */
+@@ -350,14 +359,6 @@ void unix_gc(void)
+ 		list_move_tail(&u->link, &gc_inflight_list);
+ 	}
+ 
+-	/* Now gc_candidates contains only garbage.  Restore original
+-	 * inflight counters for these as well, and remove the skbuffs
+-	 * which are creating the cycle(s).
+-	 */
+-	skb_queue_head_init(&hitlist);
+-	list_for_each_entry(u, &gc_candidates, link)
+-	scan_children(&u->sk, inc_inflight, &hitlist);
+-
+ 	spin_unlock(&unix_gc_lock);
+ 
+ 	/* Here we are. Hitlist is filled. Die. */
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 04a025218d13..7306683a7207 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1352,6 +1352,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
+ 		newp->xfrm_nr = old->xfrm_nr;
+ 		newp->index = old->index;
+ 		newp->type = old->type;
++		newp->family = old->family;
+ 		memcpy(newp->xfrm_vec, old->xfrm_vec,
+ 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
+ 		write_lock_bh(&net->xfrm.xfrm_policy_lock);
+@@ -2963,6 +2964,11 @@ static int __net_init xfrm_net_init(struct net *net)
+ {
+ 	int rv;
+ 
++	/* Initialize the per-net locks here */
++	spin_lock_init(&net->xfrm.xfrm_state_lock);
++	rwlock_init(&net->xfrm.xfrm_policy_lock);
++	mutex_init(&net->xfrm.xfrm_cfg_mutex);
++
+ 	rv = xfrm_statistics_init(net);
+ 	if (rv < 0)
+ 		goto out_statistics;
+@@ -2979,11 +2985,6 @@ static int __net_init xfrm_net_init(struct net *net)
+ 	if (rv < 0)
+ 		goto out;
+ 
+-	/* Initialize the per-net locks here */
+-	spin_lock_init(&net->xfrm.xfrm_state_lock);
+-	rwlock_init(&net->xfrm.xfrm_policy_lock);
+-	mutex_init(&net->xfrm.xfrm_cfg_mutex);
+-
+ 	return 0;
+ 
+ out:
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 18d9cf2eb648..30593cadd428 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -386,7 +386,14 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
+ 	up = nla_data(rp);
+ 	ulen = xfrm_replay_state_esn_len(up);
+ 
+-	if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
++	/* Check the overall length and the internal bitmap length to avoid
++	 * potential overflow. */
++	if (nla_len(rp) < ulen ||
++	    xfrm_replay_state_esn_len(replay_esn) != ulen ||
++	    replay_esn->bmp_len != up->bmp_len)
++		return -EINVAL;
++
++	if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
+ 		return -EINVAL;
+ 
+ 	return 0;
+diff --git a/scripts/module-common.lds b/scripts/module-common.lds
+index 73a2c7da0e55..53234e85192a 100644
+--- a/scripts/module-common.lds
++++ b/scripts/module-common.lds
+@@ -19,4 +19,6 @@ SECTIONS {
+ 
+ 	. = ALIGN(8);
+ 	.init_array		0 : { *(SORT(.init_array.*)) *(.init_array) }
++
++	__jump_table		0 : ALIGN(8) { KEEP(*(__jump_table)) }
+ }
+diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
+index eeee00dce729..cf837338a85e 100644
+--- a/security/integrity/ima/ima_main.c
++++ b/security/integrity/ima/ima_main.c
+@@ -52,6 +52,8 @@ static int __init hash_setup(char *str)
+ 			ima_hash_algo = HASH_ALGO_SHA1;
+ 		else if (strncmp(str, "md5", 3) == 0)
+ 			ima_hash_algo = HASH_ALGO_MD5;
++		else
++			return 1;
+ 		goto out;
+ 	}
+ 
+@@ -61,6 +63,8 @@ static int __init hash_setup(char *str)
+ 			break;
+ 		}
+ 	}
++	if (i == HASH_ALGO__LAST)
++		return 1;
+ out:
+ 	hash_setup_done = 1;
+ 	return 1;
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index a2d29cca16c6..c804189d0d03 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -99,7 +99,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
+ 	payload = NULL;
+ 
+ 	vm = false;
+-	if (_payload) {
++	if (plen) {
+ 		ret = -ENOMEM;
+ 		payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
+ 		if (!payload) {
+@@ -333,7 +333,7 @@ long keyctl_update_key(key_serial_t id,
+ 
+ 	/* pull the payload in if one was supplied */
+ 	payload = NULL;
+-	if (_payload) {
++	if (plen) {
+ 		ret = -ENOMEM;
+ 		payload = kmalloc(plen, GFP_KERNEL);
+ 		if (!payload)
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index f6e7fdd354de..7c60cb289719 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -149,7 +149,9 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
+ 				err = -ENXIO;
+ 				goto _error;
+ 			}
++			mutex_lock(&pcm->open_mutex);
+ 			err = snd_pcm_info_user(substream, info);
++			mutex_unlock(&pcm->open_mutex);
+ 		_error:
+ 			mutex_unlock(&register_mutex);
+ 			return err;
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index a2468f1101d1..0e6210000fa9 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -355,7 +355,7 @@ static int initialize_timer(struct snd_seq_timer *tmr)
+ 	unsigned long freq;
+ 
+ 	t = tmr->timeri->timer;
+-	if (snd_BUG_ON(!t))
++	if (!t)
+ 		return -EINVAL;
+ 
+ 	freq = tmr->preferred_resolution;
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 3ebfaa527825..2ee449fbe55f 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -203,6 +203,10 @@ static int snd_usb_copy_string_desc(struct mixer_build *state,
+ 				    int index, char *buf, int maxlen)
+ {
+ 	int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
++
++	if (len < 0)
++		return 0;
++
+ 	buf[len] = 0;
+ 	return len;
+ }
+@@ -2096,13 +2100,14 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
+ 	if (len)
+ 		;
+ 	else if (nameid)
+-		snd_usb_copy_string_desc(state, nameid, kctl->id.name,
++		len = snd_usb_copy_string_desc(state, nameid, kctl->id.name,
+ 					 sizeof(kctl->id.name));
+-	else {
++	else
+ 		len = get_term_name(state, &state->oterm,
+ 				    kctl->id.name, sizeof(kctl->id.name), 0);
+-		if (!len)
+-			strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
++
++	if (!len) {
++		strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
+ 
+ 		if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
+ 			append_ctl_name(kctl, " Clock Source");
+diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
+index 408bb076a234..db4a1eb5af7d 100644
+--- a/tools/hv/hv_kvp_daemon.c
++++ b/tools/hv/hv_kvp_daemon.c
+@@ -195,11 +195,14 @@ static void kvp_update_mem_state(int pool)
+ 	for (;;) {
+ 		readp = &record[records_read];
+ 		records_read += fread(readp, sizeof(struct kvp_record),
+-					ENTRIES_PER_BLOCK * num_blocks,
+-					filep);
++				ENTRIES_PER_BLOCK * num_blocks - records_read,
++				filep);
+ 
+ 		if (ferror(filep)) {
+-			syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
++			syslog(LOG_ERR,
++				"Failed to read file, pool: %d; error: %d %s",
++				 pool, errno, strerror(errno));
++			kvp_release_lock(pool);
+ 			exit(EXIT_FAILURE);
+ 		}
+ 
+@@ -212,6 +215,7 @@ static void kvp_update_mem_state(int pool)
+ 
+ 			if (record == NULL) {
+ 				syslog(LOG_ERR, "malloc failed");
++				kvp_release_lock(pool);
+ 				exit(EXIT_FAILURE);
+ 			}
+ 			continue;
+@@ -226,15 +230,11 @@ static void kvp_update_mem_state(int pool)
+ 	fclose(filep);
+ 	kvp_release_lock(pool);
+ }
++
+ static int kvp_file_init(void)
+ {
+ 	int  fd;
+-	FILE *filep;
+-	size_t records_read;
+ 	char *fname;
+-	struct kvp_record *record;
+-	struct kvp_record *readp;
+-	int num_blocks;
+ 	int i;
+ 	int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
+ 
+@@ -248,61 +248,19 @@ static int kvp_file_init(void)
+ 
+ 	for (i = 0; i < KVP_POOL_COUNT; i++) {
+ 		fname = kvp_file_info[i].fname;
+-		records_read = 0;
+-		num_blocks = 1;
+ 		sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
+ 		fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
+ 
+ 		if (fd == -1)
+ 			return 1;
+ 
+-
+-		filep = fopen(fname, "re");
+-		if (!filep) {
+-			close(fd);
+-			return 1;
+-		}
+-
+-		record = malloc(alloc_unit * num_blocks);
+-		if (record == NULL) {
+-			fclose(filep);
+-			close(fd);
+-			return 1;
+-		}
+-		for (;;) {
+-			readp = &record[records_read];
+-			records_read += fread(readp, sizeof(struct kvp_record),
+-					ENTRIES_PER_BLOCK,
+-					filep);
+-
+-			if (ferror(filep)) {
+-				syslog(LOG_ERR, "Failed to read file, pool: %d",
+-				       i);
+-				exit(EXIT_FAILURE);
+-			}
+-
+-			if (!feof(filep)) {
+-				/*
+-				 * We have more data to read.
+-				 */
+-				num_blocks++;
+-				record = realloc(record, alloc_unit *
+-						num_blocks);
+-				if (record == NULL) {
+-					fclose(filep);
+-					close(fd);
+-					return 1;
+-				}
+-				continue;
+-			}
+-			break;
+-		}
+ 		kvp_file_info[i].fd = fd;
+-		kvp_file_info[i].num_blocks = num_blocks;
+-		kvp_file_info[i].records = record;
+-		kvp_file_info[i].num_records = records_read;
+-		fclose(filep);
+-
++		kvp_file_info[i].num_blocks = 1;
++		kvp_file_info[i].records = malloc(alloc_unit);
++		if (kvp_file_info[i].records == NULL)
++			return 1;
++		kvp_file_info[i].num_records = 0;
++		kvp_update_mem_state(i);
+ 	}
+ 
+ 	return 0;
+diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
+index 41446668ccce..d5677d39c1e4 100644
+--- a/tools/include/linux/log2.h
++++ b/tools/include/linux/log2.h
+@@ -12,12 +12,6 @@
+ #ifndef _TOOLS_LINUX_LOG2_H
+ #define _TOOLS_LINUX_LOG2_H
+ 
+-/*
+- * deal with unrepresentable constant logarithms
+- */
+-extern __attribute__((const, noreturn))
+-int ____ilog2_NaN(void);
+-
+ /*
+  * non-constant log of base 2 calculators
+  * - the arch may override these in asm/bitops.h if they can be implemented
+@@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ #define ilog2(n)				\
+ (						\
+ 	__builtin_constant_p(n) ? (		\
+-		(n) < 1 ? ____ilog2_NaN() :	\
++		(n) < 2 ? 0 :			\
+ 		(n) & (1ULL << 63) ? 63 :	\
+ 		(n) & (1ULL << 62) ? 62 :	\
+ 		(n) & (1ULL << 61) ? 61 :	\
+@@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
+ 		(n) & (1ULL <<  4) ?  4 :	\
+ 		(n) & (1ULL <<  3) ?  3 :	\
+ 		(n) & (1ULL <<  2) ?  2 :	\
+-		(n) & (1ULL <<  1) ?  1 :	\
+-		(n) & (1ULL <<  0) ?  0 :	\
+-		____ilog2_NaN()			\
+-				   ) :		\
++		1 ) :				\
+ 	(sizeof(n) <= 4) ?			\
+ 	__ilog2_u32(n) :			\
+ 	__ilog2_u64(n)				\
+diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c
+index 2dfc9ad0e6f2..8e92b56c610a 100644
+--- a/tools/perf/tests/attr.c
++++ b/tools/perf/tests/attr.c
+@@ -150,7 +150,7 @@ static int run_dir(const char *d, const char *perf)
+ 	snprintf(cmd, 3*PATH_MAX, PYTHON " %s/attr.py -d %s/attr/ -p %s %.*s",
+ 		 d, d, perf, vcnt, v);
+ 
+-	return system(cmd);
++	return system(cmd) ? TEST_FAIL : TEST_OK;
+ }
+ 
+ int test__attr(void)
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 99378a5c57a7..28ee003beee1 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -194,7 +194,7 @@ void symbols__fixup_end(struct rb_root *symbols)
+ 
+ 	/* Last entry */
+ 	if (curr->end == curr->start)
+-		curr->end = roundup(curr->start, 4096);
++		curr->end = roundup(curr->start, 4096) + 4096;
+ }
+ 
+ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
+diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
+index f7997affd143..f45cee80c58b 100644
+--- a/tools/testing/selftests/powerpc/harness.c
++++ b/tools/testing/selftests/powerpc/harness.c
+@@ -109,9 +109,11 @@ int test_harness(int (test_function)(void), char *name)
+ 
+ 	rc = run_test(test_function, name);
+ 
+-	if (rc == MAGIC_SKIP_RETURN_VALUE)
++	if (rc == MAGIC_SKIP_RETURN_VALUE) {
+ 		test_skip(name);
+-	else
++		/* so that skipped test is not marked as failed */
++		rc = 0;
++	} else
+ 		test_finish(name, rc);
+ 
+ 	return rc;
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index 79db45336e3a..962bf7371cee 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -862,7 +862,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
+ 			continue;
+ 
+ 		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
+-		kvm->buses[bus_idx]->ioeventfd_count--;
++		if (kvm->buses[bus_idx])
++			kvm->buses[bus_idx]->ioeventfd_count--;
+ 		ioeventfd_release(p);
+ 		ret = 0;
+ 		break;
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index f9746f29f870..457719410ab4 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -600,8 +600,11 @@ static void kvm_destroy_vm(struct kvm *kvm)
+ 	list_del(&kvm->vm_list);
+ 	spin_unlock(&kvm_lock);
+ 	kvm_free_irq_routing(kvm);
+-	for (i = 0; i < KVM_NR_BUSES; i++)
+-		kvm_io_bus_destroy(kvm->buses[i]);
++	for (i = 0; i < KVM_NR_BUSES; i++) {
++		if (kvm->buses[i])
++			kvm_io_bus_destroy(kvm->buses[i]);
++		kvm->buses[i] = NULL;
++	}
+ 	kvm_coalesced_mmio_free(kvm);
+ #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
+ 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
+@@ -3038,6 +3041,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
+ 	};
+ 
+ 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
++	if (!bus)
++		return -ENOMEM;
+ 	r = __kvm_io_bus_write(vcpu, bus, &range, val);
+ 	return r < 0 ? r : 0;
+ }
+@@ -3055,6 +3060,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
+ 	};
+ 
+ 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
++	if (!bus)
++		return -ENOMEM;
+ 
+ 	/* First try the device referenced by cookie. */
+ 	if ((cookie >= 0) && (cookie < bus->dev_count) &&
+@@ -3105,6 +3112,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
+ 	};
+ 
+ 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
++	if (!bus)
++		return -ENOMEM;
+ 	r = __kvm_io_bus_read(vcpu, bus, &range, val);
+ 	return r < 0 ? r : 0;
+ }
+@@ -3117,6 +3126,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ 	struct kvm_io_bus *new_bus, *bus;
+ 
+ 	bus = kvm->buses[bus_idx];
++	if (!bus)
++		return -ENOMEM;
++
+ 	/* exclude ioeventfd which is limited by maximum fd */
+ 	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
+ 		return -ENOSPC;
+@@ -3136,37 +3148,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ }
+ 
+ /* Caller must hold slots_lock. */
+-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+-			      struct kvm_io_device *dev)
++void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
++			       struct kvm_io_device *dev)
+ {
+-	int i, r;
++	int i;
+ 	struct kvm_io_bus *new_bus, *bus;
+ 
+ 	bus = kvm->buses[bus_idx];
+-	r = -ENOENT;
++	if (!bus)
++		return;
++
+ 	for (i = 0; i < bus->dev_count; i++)
+ 		if (bus->range[i].dev == dev) {
+-			r = 0;
+ 			break;
+ 		}
+ 
+-	if (r)
+-		return r;
++	if (i == bus->dev_count)
++		return;
+ 
+ 	new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
+ 			  sizeof(struct kvm_io_range)), GFP_KERNEL);
+-	if (!new_bus)
+-		return -ENOMEM;
++	if (!new_bus)  {
++		pr_err("kvm: failed to shrink bus, removing it completely\n");
++		goto broken;
++	}
+ 
+ 	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+ 	new_bus->dev_count--;
+ 	memcpy(new_bus->range + i, bus->range + i + 1,
+ 	       (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+ 
++broken:
+ 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+ 	synchronize_srcu_expedited(&kvm->srcu);
+ 	kfree(bus);
+-	return r;
++	return;
+ }
+ 
+ static struct notifier_block kvm_cpu_notifier = {


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-12-15 20:22 Alice Ferrazzi
  0 siblings, 0 replies; 71+ messages in thread
From: Alice Ferrazzi @ 2017-12-15 20:22 UTC (permalink / raw
  To: gentoo-commits

commit:     4f5e8935a83b7b615e64287b1ba50633ac6e39b1
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Fri Dec 15 20:22:14 2017 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Fri Dec 15 20:22:14 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4f5e8935

linux kernel 4.1.48

 0000_README             |    4 +
 1047_linux-4.1.48.patch | 3434 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3438 insertions(+)

diff --git a/0000_README b/0000_README
index b37932b..1b3166c 100644
--- a/0000_README
+++ b/0000_README
@@ -231,6 +231,10 @@ Patch:  1046_linux-4.1.47.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.47
 
+Patch:  1047_linux-4.1.48.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.48
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1047_linux-4.1.48.patch b/1047_linux-4.1.48.patch
new file mode 100644
index 0000000..bf1334d
--- /dev/null
+++ b/1047_linux-4.1.48.patch
@@ -0,0 +1,3434 @@
+diff --git a/Makefile b/Makefile
+index c730c9719f6d..97edf556bfe4 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 47
++SUBLEVEL = 48
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
+index 9fe8e241335c..e1f6f0daa847 100644
+--- a/arch/arm/mm/dump.c
++++ b/arch/arm/mm/dump.c
+@@ -126,8 +126,8 @@ static const struct prot_bits section_bits[] = {
+ 		.val	= PMD_SECT_USER,
+ 		.set	= "USR",
+ 	}, {
+-		.mask	= L_PMD_SECT_RDONLY,
+-		.val	= L_PMD_SECT_RDONLY,
++		.mask	= L_PMD_SECT_RDONLY | PMD_SECT_AP2,
++		.val	= L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+ 		.set	= "ro",
+ 		.clear	= "RW",
+ #elif __LINUX_ARM_ARCH__ >= 6
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index be92fa0f2f35..10058c00ddd9 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -609,8 +609,8 @@ static struct section_perm ro_perms[] = {
+ 		.start  = (unsigned long)_stext,
+ 		.end    = (unsigned long)__init_begin,
+ #ifdef CONFIG_ARM_LPAE
+-		.mask   = ~L_PMD_SECT_RDONLY,
+-		.prot   = L_PMD_SECT_RDONLY,
++		.mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
++		.prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+ #else
+ 		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
+ 		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
+diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
+index 0e4ade342333..5c2e12923327 100644
+--- a/arch/mips/bcm47xx/leds.c
++++ b/arch/mips/bcm47xx/leds.c
+@@ -330,7 +330,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
+ /* Verified on: WRT54GS V1.0 */
+ static const struct gpio_led
+ bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
+-	BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
++	BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ 	BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ 	BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
+ };
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index 5c3aa41a162f..81a13fd56d13 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -561,6 +561,19 @@ static const struct user_regset_view user_mips64_view = {
+ 	.n		= ARRAY_SIZE(mips64_regsets),
+ };
+ 
++#ifdef CONFIG_MIPS32_N32
++
++static const struct user_regset_view user_mipsn32_view = {
++	.name		= "mipsn32",
++	.e_flags	= EF_MIPS_ABI2,
++	.e_machine	= ELF_ARCH,
++	.ei_osabi	= ELF_OSABI,
++	.regsets	= mips64_regsets,
++	.n		= ARRAY_SIZE(mips64_regsets),
++};
++
++#endif /* CONFIG_MIPS32_N32 */
++
+ #endif /* CONFIG_64BIT */
+ 
+ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+@@ -571,6 +584,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+ #ifdef CONFIG_MIPS32_O32
+ 	if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
+ 		return &user_mips_view;
++#endif
++#ifdef CONFIG_MIPS32_N32
++	if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
++		return &user_mipsn32_view;
+ #endif
+ 	return &user_mips64_view;
+ #endif
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 51e77841f9f6..2734f6a4857d 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -147,35 +147,6 @@ void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_add
+ 	add_memory_region(start, size, BOOT_MEM_RAM);
+ }
+ 
+-bool __init memory_region_available(phys_addr_t start, phys_addr_t size)
+-{
+-	int i;
+-	bool in_ram = false, free = true;
+-
+-	for (i = 0; i < boot_mem_map.nr_map; i++) {
+-		phys_addr_t start_, end_;
+-
+-		start_ = boot_mem_map.map[i].addr;
+-		end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;
+-
+-		switch (boot_mem_map.map[i].type) {
+-		case BOOT_MEM_RAM:
+-			if (start >= start_ && start + size <= end_)
+-				in_ram = true;
+-			break;
+-		case BOOT_MEM_RESERVED:
+-			if ((start >= start_ && start < end_) ||
+-			    (start < start_ && start + size >= start_))
+-				free = false;
+-			break;
+-		default:
+-			continue;
+-		}
+-	}
+-
+-	return in_ram && free;
+-}
+-
+ static void __init print_memory_map(void)
+ {
+ 	int i;
+@@ -324,19 +295,11 @@ static void __init bootmem_init(void)
+ 
+ #else  /* !CONFIG_SGI_IP27 */
+ 
+-static unsigned long __init bootmap_bytes(unsigned long pages)
+-{
+-	unsigned long bytes = DIV_ROUND_UP(pages, 8);
+-
+-	return ALIGN(bytes, sizeof(long));
+-}
+-
+ static void __init bootmem_init(void)
+ {
+ 	unsigned long reserved_end;
+ 	unsigned long mapstart = ~0UL;
+ 	unsigned long bootmap_size;
+-	bool bootmap_valid = false;
+ 	int i;
+ 
+ 	/*
+@@ -411,43 +374,12 @@ static void __init bootmem_init(void)
+ 		mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
+ #endif
+ 
+-	/*
+-	 * check that mapstart doesn't overlap with any of
+-	 * memory regions that have been reserved through eg. DTB
+-	 */
+-	bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);
+-
+-	bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
+-						bootmap_size);
+-	for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
+-		unsigned long mapstart_addr;
+-
+-		switch (boot_mem_map.map[i].type) {
+-		case BOOT_MEM_RESERVED:
+-			mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
+-						boot_mem_map.map[i].size);
+-			if (PHYS_PFN(mapstart_addr) < mapstart)
+-				break;
+-
+-			bootmap_valid = memory_region_available(mapstart_addr,
+-								bootmap_size);
+-			if (bootmap_valid)
+-				mapstart = PHYS_PFN(mapstart_addr);
+-			break;
+-		default:
+-			break;
+-		}
+-	}
+-
+-	if (!bootmap_valid)
+-		panic("No memory area to place a bootmap bitmap");
+-
+ 	/*
+ 	 * Initialize the boot-time allocator with low memory only.
+ 	 */
+-	if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
+-					 min_low_pfn, max_low_pfn))
+-		panic("Unexpected memory size required for bootmap");
++	bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
++					 min_low_pfn, max_low_pfn);
++
+ 
+ 	for (i = 0; i < boot_mem_map.nr_map; i++) {
+ 		unsigned long start, end;
+diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
+index c57215a66181..e916b14234e6 100644
+--- a/arch/mips/ralink/mt7620.c
++++ b/arch/mips/ralink/mt7620.c
+@@ -132,8 +132,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
+ 	FUNC("i2c", 0, 4, 2),
+ };
+ 
+-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
+-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
++static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
++static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
+ static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
+ static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
+ 
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 95f090fe385a..196395a0ac91 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -688,15 +688,15 @@ cas_action:
+ 	/* ELF32 Process entry path */
+ lws_compare_and_swap_2:
+ #ifdef CONFIG_64BIT
+-	/* Clip the input registers */
++	/* Clip the input registers. We don't need to clip %r23 as we
++	   only use it for word operations */
+ 	depdi	0, 31, 32, %r26
+ 	depdi	0, 31, 32, %r25
+ 	depdi	0, 31, 32, %r24
+-	depdi	0, 31, 32, %r23
+ #endif
+ 
+ 	/* Check the validity of the size pointer */
+-	subi,>>= 4, %r23, %r0
++	subi,>>= 3, %r23, %r0
+ 	b,n	lws_exit_nosys
+ 
+ 	/* Jump to the functions which will load the old and new values into
+diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
+index cf8c7e4e0b21..984a54c85952 100644
+--- a/arch/powerpc/kernel/signal.c
++++ b/arch/powerpc/kernel/signal.c
+@@ -102,7 +102,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
+ static void do_signal(struct pt_regs *regs)
+ {
+ 	sigset_t *oldset = sigmask_to_save();
+-	struct ksignal ksig;
++	struct ksignal ksig = { .sig = 0 };
+ 	int ret;
+ 	int is32 = is_32bit_task();
+ 
+diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
+new file mode 100644
+index 000000000000..2c3413b0ca52
+--- /dev/null
++++ b/arch/s390/include/asm/asm-prototypes.h
+@@ -0,0 +1,8 @@
++#ifndef _ASM_S390_PROTOTYPES_H
++
++#include <linux/kvm_host.h>
++#include <linux/ftrace.h>
++#include <asm/fpu/api.h>
++#include <asm-generic/asm-prototypes.h>
++
++#endif /* _ASM_S390_PROTOTYPES_H */
+diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
+index 8140d10c6785..d7c277664ec4 100644
+--- a/arch/s390/kernel/dis.c
++++ b/arch/s390/kernel/dis.c
+@@ -1549,6 +1549,7 @@ static struct s390_insn opcode_e7[] = {
+ 	{ "vfsq", 0xce, INSTR_VRR_VV000MM },
+ 	{ "vfs", 0xe2, INSTR_VRR_VVV00MM },
+ 	{ "vftci", 0x4a, INSTR_VRI_VVIMM },
++	{ "", 0, INSTR_INVALID }
+ };
+ 
+ static struct s390_insn opcode_eb[] = {
+@@ -1954,7 +1955,7 @@ void show_code(struct pt_regs *regs)
+ {
+ 	char *mode = user_mode(regs) ? "User" : "Krnl";
+ 	unsigned char code[64];
+-	char buffer[64], *ptr;
++	char buffer[128], *ptr;
+ 	mm_segment_t old_fs;
+ 	unsigned long addr;
+ 	int start, end, opsize, hops, i;
+@@ -2017,7 +2018,7 @@ void show_code(struct pt_regs *regs)
+ 		start += opsize;
+ 		printk(buffer);
+ 		ptr = buffer;
+-		ptr += sprintf(ptr, "\n          ");
++		ptr += sprintf(ptr, "\n\t  ");
+ 		hops++;
+ 	}
+ 	printk("\n");
+diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
+index 26b4ae96fdd7..ddbec1054f75 100644
+--- a/arch/s390/kernel/runtime_instr.c
++++ b/arch/s390/kernel/runtime_instr.c
+@@ -53,12 +53,14 @@ void exit_thread_runtime_instr(void)
+ {
+ 	struct task_struct *task = current;
+ 
++	preempt_disable();
+ 	if (!task->thread.ri_cb)
+ 		return;
+ 	disable_runtime_instr();
+ 	kfree(task->thread.ri_cb);
+ 	task->thread.ri_signum = 0;
+ 	task->thread.ri_cb = NULL;
++	preempt_enable();
+ }
+ 
+ static void runtime_instr_int_handler(struct ext_code ext_code,
+@@ -100,9 +102,7 @@ SYSCALL_DEFINE2(s390_runtime_instr, int, command, int, signum)
+ 		return -EOPNOTSUPP;
+ 
+ 	if (command == S390_RUNTIME_INSTR_STOP) {
+-		preempt_disable();
+ 		exit_thread_runtime_instr();
+-		preempt_enable();
+ 		return 0;
+ 	}
+ 
+diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
+index 3738b138b843..b047734842df 100644
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -3,6 +3,7 @@
+ 
+ #include <asm/i387.h>
+ #include <asm/pgtable.h>
++#include <asm/tlb.h>
+ 
+ /*
+  * We map the EFI regions needed for runtime services non-contiguously,
+@@ -64,6 +65,17 @@ extern u64 asmlinkage efi_call(void *fp, ...);
+ 
+ #define efi_call_phys(f, args...)		efi_call((f), args)
+ 
++/*
++ * Scratch space used for switching the pagetable in the EFI stub
++ */
++struct efi_scratch {
++	u64	r15;
++	u64	prev_cr3;
++	pgd_t	*efi_pgt;
++	bool	use_pgd;
++	u64	phys_stack;
++} __packed;
++
+ #define efi_call_virt(f, ...)						\
+ ({									\
+ 	efi_status_t __s;						\
+@@ -71,7 +83,20 @@ extern u64 asmlinkage efi_call(void *fp, ...);
+ 	efi_sync_low_kernel_mappings();					\
+ 	preempt_disable();						\
+ 	__kernel_fpu_begin();						\
++									\
++	if (efi_scratch.use_pgd) {					\
++		efi_scratch.prev_cr3 = read_cr3();			\
++		write_cr3((unsigned long)efi_scratch.efi_pgt);		\
++		__flush_tlb_all();					\
++	}								\
++									\
+ 	__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);	\
++									\
++	if (efi_scratch.use_pgd) {					\
++		write_cr3(efi_scratch.prev_cr3);			\
++		__flush_tlb_all();					\
++	}								\
++									\
+ 	__kernel_fpu_end();						\
+ 	preempt_enable();						\
+ 	__s;								\
+@@ -98,6 +123,7 @@ extern void __init efi_memory_uc(u64 addr, unsigned long size);
+ extern void __init efi_map_region(efi_memory_desc_t *md);
+ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
+ extern void efi_sync_low_kernel_mappings(void);
++extern int __init efi_alloc_page_tables(void);
+ extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+ extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+ extern void __init old_map_region(efi_memory_desc_t *md);
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 23be7ffebb4b..c5ecf85227e0 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1780,6 +1780,8 @@ static int ud_interception(struct vcpu_svm *svm)
+ 	int er;
+ 
+ 	er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
++	if (er == EMULATE_USER_EXIT)
++		return 0;
+ 	if (er != EMULATE_DONE)
+ 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ 	return 1;
+@@ -3196,6 +3198,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ 	u32 ecx = msr->index;
+ 	u64 data = msr->data;
+ 	switch (ecx) {
++	case MSR_IA32_CR_PAT:
++		if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
++			return 1;
++		vcpu->arch.pat = data;
++		svm->vmcb->save.g_pat = data;
++		mark_dirty(svm->vmcb, VMCB_NPT);
++		break;
+ 	case MSR_IA32_TSC:
+ 		kvm_write_tsc(vcpu, msr);
+ 		break;
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 33c1b5311b98..665b10a55b30 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -5096,6 +5096,8 @@ static int handle_exception(struct kvm_vcpu *vcpu)
+ 			return 1;
+ 		}
+ 		er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
++		if (er == EMULATE_USER_EXIT)
++			return 0;
+ 		if (er != EMULATE_DONE)
+ 			kvm_queue_exception(vcpu, UD_VECTOR);
+ 		return 1;
+@@ -9966,6 +9968,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ 	vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
+ 	vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
+ 	vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
++	vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
++	vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
+ 
+ 	/* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1.  */
+ 	if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 5f9cf11f9446..e4e7d45fd551 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1697,6 +1697,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
+ 	 */
+ 	BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
+ 
++	if (guest_hv_clock.version & 1)
++		++guest_hv_clock.version;  /* first time write, random junk */
++
+ 	vcpu->hv_clock.version = guest_hv_clock.version + 1;
+ 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ 				&vcpu->hv_clock,
+@@ -5433,6 +5436,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+ 			if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
+ 						emulation_type))
+ 				return EMULATE_DONE;
++			if (ctxt->have_exception && inject_emulated_exception(vcpu))
++				return EMULATE_DONE;
+ 			if (emulation_type & EMULTYPE_SKIP)
+ 				return EMULATE_FAIL;
+ 			return handle_emulation_failure(vcpu);
+diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
+index 816488c0b97e..2dc6a7df16cd 100644
+--- a/arch/x86/lib/x86-opcode-map.txt
++++ b/arch/x86/lib/x86-opcode-map.txt
+@@ -822,7 +822,7 @@ EndTable
+ 
+ GrpTable: Grp3_1
+ 0: TEST Eb,Ib
+-1:
++1: TEST Eb,Ib
+ 2: NOT Eb
+ 3: NEG Eb
+ 4: MUL AL,Eb
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index 2dd9b3ad3bb5..1e5a786e75ce 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -884,15 +884,10 @@ static void populate_pte(struct cpa_data *cpa,
+ 	pte = pte_offset_kernel(pmd, start);
+ 
+ 	while (num_pages-- && start < end) {
+-
+-		/* deal with the NX bit */
+-		if (!(pgprot_val(pgprot) & _PAGE_NX))
+-			cpa->pfn &= ~_PAGE_NX;
+-
+-		set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
++		set_pte(pte, pfn_pte(cpa->pfn, pgprot));
+ 
+ 		start	 += PAGE_SIZE;
+-		cpa->pfn += PAGE_SIZE;
++		cpa->pfn++;
+ 		pte++;
+ 	}
+ }
+@@ -948,11 +943,11 @@ static int populate_pmd(struct cpa_data *cpa,
+ 
+ 		pmd = pmd_offset(pud, start);
+ 
+-		set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
++		set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
+ 				   massage_pgprot(pmd_pgprot)));
+ 
+ 		start	  += PMD_SIZE;
+-		cpa->pfn  += PMD_SIZE;
++		cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
+ 		cur_pages += PMD_SIZE >> PAGE_SHIFT;
+ 	}
+ 
+@@ -1021,11 +1016,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
+ 	 * Map everything starting from the Gb boundary, possibly with 1G pages
+ 	 */
+ 	while (end - start >= PUD_SIZE) {
+-		set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
++		set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
+ 				   massage_pgprot(pud_pgprot)));
+ 
+ 		start	  += PUD_SIZE;
+-		cpa->pfn  += PUD_SIZE;
++		cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;
+ 		cur_pages += PUD_SIZE >> PAGE_SHIFT;
+ 		pud++;
+ 	}
+diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
+index d7f997f7c26d..10380c8749b7 100644
+--- a/arch/x86/platform/efi/efi-bgrt.c
++++ b/arch/x86/platform/efi/efi-bgrt.c
+@@ -28,8 +28,7 @@ struct bmp_header {
+ void __init efi_bgrt_init(void)
+ {
+ 	acpi_status status;
+-	void __iomem *image;
+-	bool ioremapped = false;
++	void *image;
+ 	struct bmp_header bmp_header;
+ 
+ 	if (acpi_disabled)
+@@ -65,20 +64,14 @@ void __init efi_bgrt_init(void)
+ 		return;
+ 	}
+ 
+-	image = efi_lookup_mapped_addr(bgrt_tab->image_address);
++	image = early_memremap(bgrt_tab->image_address, sizeof(bmp_header));
+ 	if (!image) {
+-		image = early_ioremap(bgrt_tab->image_address,
+-				       sizeof(bmp_header));
+-		ioremapped = true;
+-		if (!image) {
+-			pr_err("Ignoring BGRT: failed to map image header memory\n");
+-			return;
+-		}
++		pr_err("Ignoring BGRT: failed to map image header memory\n");
++		return;
+ 	}
+ 
+-	memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
+-	if (ioremapped)
+-		early_iounmap(image, sizeof(bmp_header));
++	memcpy(&bmp_header, image, sizeof(bmp_header));
++	early_memunmap(image, sizeof(bmp_header));
+ 	bgrt_image_size = bmp_header.size;
+ 
+ 	bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
+@@ -88,18 +81,14 @@ void __init efi_bgrt_init(void)
+ 		return;
+ 	}
+ 
+-	if (ioremapped) {
+-		image = early_ioremap(bgrt_tab->image_address,
+-				       bmp_header.size);
+-		if (!image) {
+-			pr_err("Ignoring BGRT: failed to map image memory\n");
+-			kfree(bgrt_image);
+-			bgrt_image = NULL;
+-			return;
+-		}
++	image = early_memremap(bgrt_tab->image_address, bmp_header.size);
++	if (!image) {
++		pr_err("Ignoring BGRT: failed to map image memory\n");
++		kfree(bgrt_image);
++		bgrt_image = NULL;
++		return;
+ 	}
+ 
+-	memcpy_fromio(bgrt_image, image, bgrt_image_size);
+-	if (ioremapped)
+-		early_iounmap(image, bmp_header.size);
++	memcpy(bgrt_image, image, bgrt_image_size);
++	early_memunmap(image, bmp_header.size);
+ }
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 477384985ac9..9b2fe733e9f7 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -843,7 +843,7 @@ static void __init kexec_enter_virtual_mode(void)
+  * This function will switch the EFI runtime services to virtual mode.
+  * Essentially, we look through the EFI memmap and map every region that
+  * has the runtime attribute bit set in its memory descriptor into the
+- * ->trampoline_pgd page table using a top-down VA allocation scheme.
++ * efi_pgd page table.
+  *
+  * The old method which used to update that memory descriptor with the
+  * virtual address obtained from ioremap() is still supported when the
+@@ -853,8 +853,8 @@ static void __init kexec_enter_virtual_mode(void)
+  *
+  * The new method does a pagetable switch in a preemption-safe manner
+  * so that we're in a different address space when calling a runtime
+- * function. For function arguments passing we do copy the PGDs of the
+- * kernel page table into ->trampoline_pgd prior to each call.
++ * function. For function arguments passing we do copy the PUDs of the
++ * kernel page table into efi_pgd prior to each call.
+  *
+  * Specially for kexec boot, efi runtime maps in previous kernel should
+  * be passed in via setup_data. In that case runtime ranges will be mapped
+@@ -869,6 +869,12 @@ static void __init __efi_enter_virtual_mode(void)
+ 
+ 	efi.systab = NULL;
+ 
++	if (efi_alloc_page_tables()) {
++		pr_err("Failed to allocate EFI page tables\n");
++		clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
++		return;
++	}
++
+ 	efi_merge_regions();
+ 	new_memmap = efi_map_regions(&count, &pg_shift);
+ 	if (!new_memmap) {
+@@ -928,28 +934,11 @@ static void __init __efi_enter_virtual_mode(void)
+ 	efi_runtime_mkexec();
+ 
+ 	/*
+-	 * We mapped the descriptor array into the EFI pagetable above but we're
+-	 * not unmapping it here. Here's why:
+-	 *
+-	 * We're copying select PGDs from the kernel page table to the EFI page
+-	 * table and when we do so and make changes to those PGDs like unmapping
+-	 * stuff from them, those changes appear in the kernel page table and we
+-	 * go boom.
+-	 *
+-	 * From setup_real_mode():
+-	 *
+-	 * ...
+-	 * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
+-	 *
+-	 * In this particular case, our allocation is in PGD 0 of the EFI page
+-	 * table but we've copied that PGD from PGD[272] of the EFI page table:
+-	 *
+-	 *	pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
+-	 *
+-	 * where the direct memory mapping in kernel space is.
+-	 *
+-	 * new_memmap's VA comes from that direct mapping and thus clearing it,
+-	 * it would get cleared in the kernel page table too.
++	 * We mapped the descriptor array into the EFI pagetable above
++	 * but we're not unmapping it here because if we're running in
++	 * EFI mixed mode we need all of memory to be accessible when
++	 * we pass parameters to the EFI runtime services in the
++	 * thunking code.
+ 	 *
+ 	 * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
+ 	 */
+diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
+index ed5b67338294..58d669bc8250 100644
+--- a/arch/x86/platform/efi/efi_32.c
++++ b/arch/x86/platform/efi/efi_32.c
+@@ -38,6 +38,11 @@
+  * say 0 - 3G.
+  */
+ 
++int __init efi_alloc_page_tables(void)
++{
++	return 0;
++}
++
+ void efi_sync_low_kernel_mappings(void) {}
+ void __init efi_dump_pagetable(void) {}
+ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index a0ac0f9c307f..18dfaad71c99 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -40,6 +40,7 @@
+ #include <asm/fixmap.h>
+ #include <asm/realmode.h>
+ #include <asm/time.h>
++#include <asm/pgalloc.h>
+ 
+ /*
+  * We allocate runtime services regions bottom-up, starting from -4G, i.e.
+@@ -47,16 +48,7 @@
+  */
+ static u64 efi_va = EFI_VA_START;
+ 
+-/*
+- * Scratch space used for switching the pagetable in the EFI stub
+- */
+-struct efi_scratch {
+-	u64 r15;
+-	u64 prev_cr3;
+-	pgd_t *efi_pgt;
+-	bool use_pgd;
+-	u64 phys_stack;
+-} __packed;
++struct efi_scratch efi_scratch;
+ 
+ static void __init early_code_mapping_set_exec(int executable)
+ {
+@@ -83,8 +75,11 @@ pgd_t * __init efi_call_phys_prolog(void)
+ 	int pgd;
+ 	int n_pgds;
+ 
+-	if (!efi_enabled(EFI_OLD_MEMMAP))
+-		return NULL;
++	if (!efi_enabled(EFI_OLD_MEMMAP)) {
++		save_pgd = (pgd_t *)read_cr3();
++		write_cr3((unsigned long)efi_scratch.efi_pgt);
++		goto out;
++	}
+ 
+ 	early_code_mapping_set_exec(1);
+ 
+@@ -96,6 +91,7 @@ pgd_t * __init efi_call_phys_prolog(void)
+ 		vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
+ 		set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+ 	}
++out:
+ 	__flush_tlb_all();
+ 
+ 	return save_pgd;
+@@ -109,8 +105,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
+ 	int pgd_idx;
+ 	int nr_pgds;
+ 
+-	if (!save_pgd)
++	if (!efi_enabled(EFI_OLD_MEMMAP)) {
++		write_cr3((unsigned long)save_pgd);
++		__flush_tlb_all();
+ 		return;
++	}
+ 
+ 	nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+ 
+@@ -123,27 +122,97 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
+ 	early_code_mapping_set_exec(0);
+ }
+ 
++static pgd_t *efi_pgd;
++
++/*
++ * We need our own copy of the higher levels of the page tables
++ * because we want to avoid inserting EFI region mappings (EFI_VA_END
++ * to EFI_VA_START) into the standard kernel page tables. Everything
++ * else can be shared, see efi_sync_low_kernel_mappings().
++ */
++int __init efi_alloc_page_tables(void)
++{
++	pgd_t *pgd;
++	pud_t *pud;
++	gfp_t gfp_mask;
++
++	if (efi_enabled(EFI_OLD_MEMMAP))
++		return 0;
++
++	gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
++	efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
++	if (!efi_pgd)
++		return -ENOMEM;
++
++	pgd = efi_pgd + pgd_index(EFI_VA_END);
++
++	pud = pud_alloc_one(NULL, 0);
++	if (!pud) {
++		free_page((unsigned long)efi_pgd);
++		return -ENOMEM;
++	}
++
++	pgd_populate(NULL, pgd, pud);
++
++	return 0;
++}
++
+ /*
+  * Add low kernel mappings for passing arguments to EFI functions.
+  */
+ void efi_sync_low_kernel_mappings(void)
+ {
+-	unsigned num_pgds;
+-	pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
++	unsigned num_entries;
++	pgd_t *pgd_k, *pgd_efi;
++	pud_t *pud_k, *pud_efi;
+ 
+ 	if (efi_enabled(EFI_OLD_MEMMAP))
+ 		return;
+ 
+-	num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
++	/*
++	 * We can share all PGD entries apart from the one entry that
++	 * covers the EFI runtime mapping space.
++	 *
++	 * Make sure the EFI runtime region mappings are guaranteed to
++	 * only span a single PGD entry and that the entry also maps
++	 * other important kernel regions.
++	 */
++	BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
++	BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
++			(EFI_VA_END & PGDIR_MASK));
++
++	pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
++	pgd_k = pgd_offset_k(PAGE_OFFSET);
+ 
+-	memcpy(pgd + pgd_index(PAGE_OFFSET),
+-		init_mm.pgd + pgd_index(PAGE_OFFSET),
+-		sizeof(pgd_t) * num_pgds);
++	num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
++	memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
++
++	/*
++	 * We share all the PUD entries apart from those that map the
++	 * EFI regions. Copy around them.
++	 */
++	BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
++	BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
++
++	pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
++	pud_efi = pud_offset(pgd_efi, 0);
++
++	pgd_k = pgd_offset_k(EFI_VA_END);
++	pud_k = pud_offset(pgd_k, 0);
++
++	num_entries = pud_index(EFI_VA_END);
++	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
++
++	pud_efi = pud_offset(pgd_efi, EFI_VA_START);
++	pud_k = pud_offset(pgd_k, EFI_VA_START);
++
++	num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
++	memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
+ }
+ 
+ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ {
+-	unsigned long text;
++	unsigned long pfn, text;
+ 	struct page *page;
+ 	unsigned npages;
+ 	pgd_t *pgd;
+@@ -151,8 +220,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ 	if (efi_enabled(EFI_OLD_MEMMAP))
+ 		return 0;
+ 
+-	efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
+-	pgd = __va(efi_scratch.efi_pgt);
++	efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
++	pgd = efi_pgd;
+ 
+ 	/*
+ 	 * It can happen that the physical address of new_memmap lands in memory
+@@ -160,7 +229,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ 	 * and ident-map those pages containing the map before calling
+ 	 * phys_efi_set_virtual_address_map().
+ 	 */
+-	if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
++	pfn = pa_memmap >> PAGE_SHIFT;
++	if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
+ 		pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
+ 		return 1;
+ 	}
+@@ -185,8 +255,9 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ 
+ 	npages = (_end - _text) >> PAGE_SHIFT;
+ 	text = __pa(_text);
++	pfn = text >> PAGE_SHIFT;
+ 
+-	if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
++	if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) {
+ 		pr_err("Failed to map kernel text 1:1\n");
+ 		return 1;
+ 	}
+@@ -196,20 +267,20 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ 
+ void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ {
+-	pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+-
+-	kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
++	kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
+ }
+ 
+ static void __init __map_region(efi_memory_desc_t *md, u64 va)
+ {
+-	pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+-	unsigned long pf = 0;
++	unsigned long flags = 0;
++	unsigned long pfn;
++	pgd_t *pgd = efi_pgd;
+ 
+ 	if (!(md->attribute & EFI_MEMORY_WB))
+-		pf |= _PAGE_PCD;
++		flags |= _PAGE_PCD;
+ 
+-	if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
++	pfn = md->phys_addr >> PAGE_SHIFT;
++	if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
+ 		pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
+ 			   md->phys_addr, va);
+ }
+@@ -312,9 +383,7 @@ void __init efi_runtime_mkexec(void)
+ void __init efi_dump_pagetable(void)
+ {
+ #ifdef CONFIG_EFI_PGT_DUMP
+-	pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+-
+-	ptdump_walk_pgd_level(NULL, pgd);
++	ptdump_walk_pgd_level(NULL, efi_pgd);
+ #endif
+ }
+ 
+diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
+index 86d0f9e08dd9..32020cb8bb08 100644
+--- a/arch/x86/platform/efi/efi_stub_64.S
++++ b/arch/x86/platform/efi/efi_stub_64.S
+@@ -38,41 +38,6 @@
+ 	mov %rsi, %cr0;			\
+ 	mov (%rsp), %rsp
+ 
+-	/* stolen from gcc */
+-	.macro FLUSH_TLB_ALL
+-	movq %r15, efi_scratch(%rip)
+-	movq %r14, efi_scratch+8(%rip)
+-	movq %cr4, %r15
+-	movq %r15, %r14
+-	andb $0x7f, %r14b
+-	movq %r14, %cr4
+-	movq %r15, %cr4
+-	movq efi_scratch+8(%rip), %r14
+-	movq efi_scratch(%rip), %r15
+-	.endm
+-
+-	.macro SWITCH_PGT
+-	cmpb $0, efi_scratch+24(%rip)
+-	je 1f
+-	movq %r15, efi_scratch(%rip)		# r15
+-	# save previous CR3
+-	movq %cr3, %r15
+-	movq %r15, efi_scratch+8(%rip)		# prev_cr3
+-	movq efi_scratch+16(%rip), %r15		# EFI pgt
+-	movq %r15, %cr3
+-	1:
+-	.endm
+-
+-	.macro RESTORE_PGT
+-	cmpb $0, efi_scratch+24(%rip)
+-	je 2f
+-	movq efi_scratch+8(%rip), %r15
+-	movq %r15, %cr3
+-	movq efi_scratch(%rip), %r15
+-	FLUSH_TLB_ALL
+-	2:
+-	.endm
+-
+ ENTRY(efi_call)
+ 	SAVE_XMM
+ 	mov (%rsp), %rax
+@@ -83,16 +48,8 @@ ENTRY(efi_call)
+ 	mov %r8, %r9
+ 	mov %rcx, %r8
+ 	mov %rsi, %rcx
+-	SWITCH_PGT
+ 	call *%rdi
+-	RESTORE_PGT
+ 	addq $48, %rsp
+ 	RESTORE_XMM
+ 	ret
+ ENDPROC(efi_call)
+-
+-	.data
+-ENTRY(efi_scratch)
+-	.fill 3,8,0
+-	.byte 0
+-	.quad 0
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 5ab6fa9cfc2f..5e8c07abf2e7 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -2237,8 +2237,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
+ 		if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
+ 			eflags |= ATA_EFLAG_DUBIOUS_XFER;
+ 		ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
++		trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
+ 	}
+-	trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
+ 	DPRINTK("EXIT\n");
+ }
+ 
+diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
+index 0a1df821860f..6fe6e08479e6 100644
+--- a/drivers/clk/ti/clk-dra7-atl.c
++++ b/drivers/clk/ti/clk-dra7-atl.c
+@@ -264,7 +264,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
+ 
+ 		/* Get configuration for the ATL instances */
+ 		snprintf(prop, sizeof(prop), "atl%u", i);
+-		cfg_node = of_find_node_by_name(node, prop);
++		cfg_node = of_get_child_by_name(node, prop);
+ 		if (cfg_node) {
+ 			ret = of_property_read_u32(cfg_node, "bws",
+ 						   &cdesc->bws);
+@@ -277,6 +277,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
+ 				atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i),
+ 					  cdesc->aws);
+ 			}
++			of_node_put(cfg_node);
+ 		}
+ 
+ 		cdesc->probed = true;
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 1f2c86d81176..91231143d991 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -237,38 +237,6 @@ err_put:
+ subsys_initcall(efisubsys_init);
+ 
+ 
+-/*
+- * We can't ioremap data in EFI boot services RAM, because we've already mapped
+- * it as RAM.  So, look it up in the existing EFI memory map instead.  Only
+- * callable after efi_enter_virtual_mode and before efi_free_boot_services.
+- */
+-void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
+-{
+-	struct efi_memory_map *map;
+-	void *p;
+-	map = efi.memmap;
+-	if (!map)
+-		return NULL;
+-	if (WARN_ON(!map->map))
+-		return NULL;
+-	for (p = map->map; p < map->map_end; p += map->desc_size) {
+-		efi_memory_desc_t *md = p;
+-		u64 size = md->num_pages << EFI_PAGE_SHIFT;
+-		u64 end = md->phys_addr + size;
+-		if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+-		    md->type != EFI_BOOT_SERVICES_CODE &&
+-		    md->type != EFI_BOOT_SERVICES_DATA)
+-			continue;
+-		if (!md->virt_addr)
+-			continue;
+-		if (phys_addr >= md->phys_addr && phys_addr < end) {
+-			phys_addr += md->virt_addr - md->phys_addr;
+-			return (__force void __iomem *)(unsigned long)phys_addr;
+-		}
+-	}
+-	return NULL;
+-}
+-
+ static __initdata efi_config_table_type_t common_tables[] = {
+ 	{ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
+ 	{ACPI_TABLE_GUID, "ACPI", &efi.acpi},
+diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
+index d6f43e06150a..c9c619727806 100644
+--- a/drivers/gpu/drm/armada/Makefile
++++ b/drivers/gpu/drm/armada/Makefile
+@@ -5,3 +5,5 @@ armada-y	+= armada_510.o
+ armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
+ 
+ obj-$(CONFIG_DRM_ARMADA) := armada.o
++
++CFLAGS_armada_trace.o := -I$(src)
+diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
+index ae628001fd97..b4891066a369 100644
+--- a/drivers/gpu/drm/i915/intel_i2c.c
++++ b/drivers/gpu/drm/i915/intel_i2c.c
+@@ -394,7 +394,9 @@ static bool
+ gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
+ {
+ 	return (i + 1 < num &&
+-		!(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
++		msgs[i].addr == msgs[i + 1].addr &&
++		!(msgs[i].flags & I2C_M_RD) &&
++		(msgs[i].len == 1 || msgs[i].len == 2) &&
+ 		(msgs[i + 1].flags & I2C_M_RD));
+ }
+ 
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index 30904a9b2a4c..91848659b38e 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -352,6 +352,7 @@ static int panel_simple_remove(struct device *dev)
+ 	drm_panel_remove(&panel->base);
+ 
+ 	panel_simple_disable(&panel->base);
++	panel_simple_unprepare(&panel->base);
+ 
+ 	if (panel->ddc)
+ 		put_device(&panel->ddc->dev);
+@@ -367,6 +368,7 @@ static void panel_simple_shutdown(struct device *dev)
+ 	struct panel_simple *panel = dev_get_drvdata(dev);
+ 
+ 	panel_simple_disable(&panel->base);
++	panel_simple_unprepare(&panel->base);
+ }
+ 
+ static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
+diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
+index 39c8d99cc48e..02107ccf57a1 100644
+--- a/drivers/iio/light/cm3232.c
++++ b/drivers/iio/light/cm3232.c
+@@ -119,7 +119,7 @@ static int cm3232_reg_init(struct cm3232_chip *chip)
+ 	if (ret < 0)
+ 		dev_err(&chip->client->dev, "Error writing reg_cmd\n");
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index b60fde9d9878..18e688d68e66 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -670,12 +670,19 @@ static void srp_path_rec_completion(int status,
+ static int srp_lookup_path(struct srp_rdma_ch *ch)
+ {
+ 	struct srp_target_port *target = ch->target;
+-	int ret;
++	int ret = -ENODEV;
+ 
+ 	ch->path.numb_path = 1;
+ 
+ 	init_completion(&ch->done);
+ 
++	/*
++	 * Avoid that the SCSI host can be removed by srp_remove_target()
++	 * before srp_path_rec_completion() is called.
++	 */
++	if (!scsi_host_get(target->scsi_host))
++		goto out;
++
+ 	ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
+ 					       target->srp_host->srp_dev->dev,
+ 					       target->srp_host->port,
+@@ -689,18 +696,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
+ 					       GFP_KERNEL,
+ 					       srp_path_rec_completion,
+ 					       ch, &ch->path_query);
+-	if (ch->path_query_id < 0)
+-		return ch->path_query_id;
++	ret = ch->path_query_id;
++	if (ret < 0)
++		goto put;
+ 
+ 	ret = wait_for_completion_interruptible(&ch->done);
+ 	if (ret < 0)
+-		return ret;
++		goto put;
+ 
+-	if (ch->status < 0)
++	ret = ch->status;
++	if (ret < 0)
+ 		shost_printk(KERN_WARNING, target->scsi_host,
+ 			     PFX "Path record query failed\n");
+ 
+-	return ch->status;
++put:
++	scsi_host_put(target->scsi_host);
++
++out:
++	return ret;
+ }
+ 
+ static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 27e5b0090e40..416cd07ab87a 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -3516,7 +3516,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
+ {
+ 	const char *p;
+ 	unsigned len, count, leading_zero_bytes;
+-	int ret, rc;
++	int ret;
+ 
+ 	p = name;
+ 	if (strncasecmp(p, "0x", 2) == 0)
+@@ -3528,10 +3528,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
+ 	count = min(len / 2, 16U);
+ 	leading_zero_bytes = 16 - count;
+ 	memset(i_port_id, 0, leading_zero_bytes);
+-	rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
+-	if (rc < 0)
+-		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
+-	ret = 0;
++	ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
++	if (ret < 0)
++		pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
+ out:
+ 	return ret;
+ }
+diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
+index 8eeab72b93e2..4d46f2ce606f 100644
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -406,7 +406,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
+ 
+ 	finish_wait(&ca->set->bucket_wait, &w);
+ out:
+-	wake_up_process(ca->alloc_thread);
++	if (ca->alloc_thread)
++		wake_up_process(ca->alloc_thread);
+ 
+ 	trace_bcache_alloc(ca, reserve);
+ 
+@@ -478,7 +479,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
+ 		if (b == -1)
+ 			goto err;
+ 
+-		k->ptr[i] = PTR(ca->buckets[b].gen,
++		k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
+ 				bucket_to_sector(c, b),
+ 				ca->sb.nr_this_dev);
+ 
+diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
+index 243de0bf15cd..4bf15182c4da 100644
+--- a/drivers/md/bcache/extents.c
++++ b/drivers/md/bcache/extents.c
+@@ -584,7 +584,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
+ 		return false;
+ 
+ 	for (i = 0; i < KEY_PTRS(l); i++)
+-		if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
++		if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+ 		    PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
+ 			return false;
+ 
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index fe080ad0e558..6fb47a2a3d9d 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -508,7 +508,7 @@ static void journal_reclaim(struct cache_set *c)
+ 			continue;
+ 
+ 		ja->cur_idx = next;
+-		k->ptr[n++] = PTR(0,
++		k->ptr[n++] = MAKE_PTR(0,
+ 				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
+ 				  ca->sb.nr_this_dev);
+ 	}
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 4520bca6dc3f..51dc353f7962 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -922,7 +922,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
+ 		buffers = c->minimum_buffers;
+ 
+ 	*limit_buffers = buffers;
+-	*threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
++	*threshold_buffers = mult_frac(buffers,
++				       DM_BUFIO_WRITEBACK_PERCENT, 100);
+ }
+ 
+ /*
+@@ -1823,19 +1824,15 @@ static int __init dm_bufio_init(void)
+ 	memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
+ 	memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
+ 
+-	mem = (__u64)((totalram_pages - totalhigh_pages) *
+-		      DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
++	mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
++			       DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
+ 
+ 	if (mem > ULONG_MAX)
+ 		mem = ULONG_MAX;
+ 
+ #ifdef CONFIG_MMU
+-	/*
+-	 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
+-	 * in fs/proc/internal.h
+-	 */
+-	if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
+-		mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
++	if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
++		mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
+ #endif
+ 
+ 	dm_bufio_default_cache_size = mem;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 87de9a0848b7..d192ab2ed17c 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -3516,11 +3516,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
+ 
+ 	md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
+ 
+-	if (test_bit(DMF_FREEING, &md->flags) ||
+-	    dm_deleting_md(md))
+-		return NULL;
+-
++	spin_lock(&_minor_lock);
++	if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
++		md = NULL;
++		goto out;
++	}
+ 	dm_get(md);
++out:
++	spin_unlock(&_minor_lock);
++
+ 	return md;
+ }
+ 
+diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
+index 4795c31ceebc..d2c1b340cf67 100644
+--- a/drivers/media/rc/ir-lirc-codec.c
++++ b/drivers/media/rc/ir-lirc-codec.c
+@@ -289,11 +289,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
+ 		if (!dev->max_timeout)
+ 			return -ENOSYS;
+ 
++		/* Check for multiply overflow */
++		if (val > U32_MAX / 1000)
++			return -EINVAL;
++
+ 		tmp = val * 1000;
+ 
+-		if (tmp < dev->min_timeout ||
+-		    tmp > dev->max_timeout)
+-				return -EINVAL;
++		if (tmp < dev->min_timeout || tmp > dev->max_timeout)
++			return -EINVAL;
+ 
+ 		dev->timeout = tmp;
+ 		break;
+diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
+index 07d08c49f4d4..b2e16bb67572 100644
+--- a/drivers/media/usb/as102/as102_fw.c
++++ b/drivers/media/usb/as102/as102_fw.c
+@@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
+ 				 unsigned char *cmd,
+ 				 const struct firmware *firmware) {
+ 
+-	struct as10x_fw_pkt_t fw_pkt;
++	struct as10x_fw_pkt_t *fw_pkt;
+ 	int total_read_bytes = 0, errno = 0;
+ 	unsigned char addr_has_changed = 0;
+ 
++	fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
++	if (!fw_pkt)
++		return -ENOMEM;
++
++
+ 	for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
+ 		int read_bytes = 0, data_len = 0;
+ 
+ 		/* parse intel hex line */
+ 		read_bytes = parse_hex_line(
+ 				(u8 *) (firmware->data + total_read_bytes),
+-				fw_pkt.raw.address,
+-				fw_pkt.raw.data,
++				fw_pkt->raw.address,
++				fw_pkt->raw.data,
+ 				&data_len,
+ 				&addr_has_changed);
+ 
+@@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
+ 		/* detect the end of file */
+ 		total_read_bytes += read_bytes;
+ 		if (total_read_bytes == firmware->size) {
+-			fw_pkt.u.request[0] = 0x00;
+-			fw_pkt.u.request[1] = 0x03;
++			fw_pkt->u.request[0] = 0x00;
++			fw_pkt->u.request[1] = 0x03;
+ 
+ 			/* send EOF command */
+ 			errno = bus_adap->ops->upload_fw_pkt(bus_adap,
+ 							     (uint8_t *)
+-							     &fw_pkt, 2, 0);
++							     fw_pkt, 2, 0);
+ 			if (errno < 0)
+ 				goto error;
+ 		} else {
+ 			if (!addr_has_changed) {
+ 				/* prepare command to send */
+-				fw_pkt.u.request[0] = 0x00;
+-				fw_pkt.u.request[1] = 0x01;
++				fw_pkt->u.request[0] = 0x00;
++				fw_pkt->u.request[1] = 0x01;
+ 
+-				data_len += sizeof(fw_pkt.u.request);
+-				data_len += sizeof(fw_pkt.raw.address);
++				data_len += sizeof(fw_pkt->u.request);
++				data_len += sizeof(fw_pkt->raw.address);
+ 
+ 				/* send cmd to device */
+ 				errno = bus_adap->ops->upload_fw_pkt(bus_adap,
+ 								     (uint8_t *)
+-								     &fw_pkt,
++								     fw_pkt,
+ 								     data_len,
+ 								     0);
+ 				if (errno < 0)
+@@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
+ 		}
+ 	}
+ error:
++	kfree(fw_pkt);
+ 	return (errno == 0) ? total_read_bytes : errno;
+ }
+ 
+diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
+index 3503fcef7b51..2b437d89d32c 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
+@@ -1636,7 +1636,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
+ 	nr = dev->devno;
+ 
+ 	assoc_desc = udev->actconfig->intf_assoc[0];
+-	if (assoc_desc->bFirstInterface != ifnum) {
++	if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
+ 		dev_err(d, "Not found matching IAD interface\n");
+ 		retval = -ENODEV;
+ 		goto err_if;
+diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
+index e3a3468002e6..a8423943ffe4 100644
+--- a/drivers/media/v4l2-core/v4l2-ctrls.c
++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
+@@ -1198,6 +1198,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
+ }
+ EXPORT_SYMBOL(v4l2_ctrl_fill);
+ 
++static u32 user_flags(const struct v4l2_ctrl *ctrl)
++{
++	u32 flags = ctrl->flags;
++
++	if (ctrl->is_ptr)
++		flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
++
++	return flags;
++}
++
+ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
+ {
+ 	memset(ev->reserved, 0, sizeof(ev->reserved));
+@@ -1205,7 +1215,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
+ 	ev->id = ctrl->id;
+ 	ev->u.ctrl.changes = changes;
+ 	ev->u.ctrl.type = ctrl->type;
+-	ev->u.ctrl.flags = ctrl->flags;
++	ev->u.ctrl.flags = user_flags(ctrl);
+ 	if (ctrl->is_ptr)
+ 		ev->u.ctrl.value64 = 0;
+ 	else
+@@ -2549,10 +2559,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
+ 	else
+ 		qc->id = ctrl->id;
+ 	strlcpy(qc->name, ctrl->name, sizeof(qc->name));
+-	qc->flags = ctrl->flags;
++	qc->flags = user_flags(ctrl);
+ 	qc->type = ctrl->type;
+-	if (ctrl->is_ptr)
+-		qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
+ 	qc->elem_size = ctrl->elem_size;
+ 	qc->elems = ctrl->elems;
+ 	qc->nr_of_dims = ctrl->nr_of_dims;
+diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
+index 2d3db81be099..b3b69f0e0dd7 100644
+--- a/drivers/misc/eeprom/at24.c
++++ b/drivers/misc/eeprom/at24.c
+@@ -275,6 +275,9 @@ static ssize_t at24_read(struct at24_data *at24,
+ 	if (unlikely(!count))
+ 		return count;
+ 
++	if (off + count > at24->chip.byte_len)
++		return -EINVAL;
++
+ 	/*
+ 	 * Read data from chip, protecting against concurrent updates
+ 	 * from this host, but not from other I2C masters.
+@@ -329,6 +332,9 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
+ 	unsigned long timeout, write_time;
+ 	unsigned next_page;
+ 
++	if (offset + count > at24->chip.byte_len)
++		return -EINVAL;
++
+ 	/* Get corresponding I2C address and adjust offset */
+ 	client = at24_translate_offset(at24, &offset);
+ 
+diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
+index 972ff844cf5a..cf7c7bc1e940 100644
+--- a/drivers/mmc/core/bus.c
++++ b/drivers/mmc/core/bus.c
+@@ -155,6 +155,9 @@ static int mmc_bus_suspend(struct device *dev)
+ 		return ret;
+ 
+ 	ret = host->bus_ops->suspend(host);
++	if (ret)
++		pm_generic_resume(dev);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index fa5cd51cba38..f84113fc7cb7 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2518,15 +2518,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+ 			    size_t *retlen, const uint8_t *buf)
+ {
+ 	struct nand_chip *chip = mtd->priv;
++	int chipnr = (int)(to >> chip->chip_shift);
+ 	struct mtd_oob_ops ops;
+ 	int ret;
+ 
+-	/* Wait for the device to get ready */
+-	panic_nand_wait(mtd, chip, 400);
+-
+ 	/* Grab the device */
+ 	panic_nand_get_device(chip, mtd, FL_WRITING);
+ 
++	chip->select_chip(mtd, chipnr);
++
++	/* Wait for the device to get ready */
++	panic_nand_wait(mtd, chip, 400);
++
+ 	memset(&ops, 0, sizeof(ops));
+ 	ops.len = len;
+ 	ops.datbuf = (uint8_t *)buf;
+diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
+index 8f8418d2ac4a..a0012c3cb4f6 100644
+--- a/drivers/net/ethernet/3com/typhoon.c
++++ b/drivers/net/ethernet/3com/typhoon.c
+@@ -2366,9 +2366,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	 * 4) Get the hardware address.
+ 	 * 5) Put the card to sleep.
+ 	 */
+-	if (typhoon_reset(ioaddr, WaitSleep) < 0) {
++	err = typhoon_reset(ioaddr, WaitSleep);
++	if (err < 0) {
+ 		err_msg = "could not reset 3XP";
+-		err = -EIO;
+ 		goto error_out_dma;
+ 	}
+ 
+@@ -2382,24 +2382,25 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	typhoon_init_interface(tp);
+ 	typhoon_init_rings(tp);
+ 
+-	if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
++	err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
++	if (err < 0) {
+ 		err_msg = "cannot boot 3XP sleep image";
+-		err = -EIO;
+ 		goto error_out_reset;
+ 	}
+ 
+ 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
+-	if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
++	err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
++	if (err < 0) {
+ 		err_msg = "cannot read MAC address";
+-		err = -EIO;
+ 		goto error_out_reset;
+ 	}
+ 
+ 	*(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
+ 	*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ 
+-	if(!is_valid_ether_addr(dev->dev_addr)) {
++	if (!is_valid_ether_addr(dev->dev_addr)) {
+ 		err_msg = "Could not obtain valid ethernet address, aborting";
++		err = -EIO;
+ 		goto error_out_reset;
+ 	}
+ 
+@@ -2407,7 +2408,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	 * later when we print out the version reported.
+ 	 */
+ 	INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
+-	if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
++	err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
++	if (err < 0) {
+ 		err_msg = "Could not get Sleep Image version";
+ 		goto error_out_reset;
+ 	}
+@@ -2424,9 +2426,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	if(xp_resp[0].numDesc != 0)
+ 		tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
+ 
+-	if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
++	err = typhoon_sleep(tp, PCI_D3hot, 0);
++	if (err < 0) {
+ 		err_msg = "cannot put adapter to sleep";
+-		err = -EIO;
+ 		goto error_out_reset;
+ 	}
+ 
+@@ -2449,7 +2451,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	dev->features = dev->hw_features |
+ 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
+ 
+-	if(register_netdev(dev) < 0) {
++	err = register_netdev(dev);
++	if (err < 0) {
+ 		err_msg = "unable to register netdev";
+ 		goto error_out_reset;
+ 	}
+diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
+index 30b74d590bee..1c9cb53c25a3 100644
+--- a/drivers/net/ethernet/intel/e1000e/mac.c
++++ b/drivers/net/ethernet/intel/e1000e/mac.c
+@@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
+  *  Checks to see of the link status of the hardware has changed.  If a
+  *  change in link status has been detected, then we read the PHY registers
+  *  to get the current speed/duplex if link exists.
++ *
++ *  Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
++ *  up).
+  **/
+ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
+ {
+@@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
+ 	 * Change or Rx Sequence Error interrupt.
+ 	 */
+ 	if (!mac->get_link_status)
+-		return 0;
++		return 1;
+ 
+ 	/* First we want to see if the MII Status Register reports
+ 	 * link.  If so, then we want to get the current speed/duplex
+@@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
+ 	 * different link partner.
+ 	 */
+ 	ret_val = e1000e_config_fc_after_link_up(hw);
+-	if (ret_val)
++	if (ret_val) {
+ 		e_dbg("Error configuring flow control\n");
++		return ret_val;
++	}
+ 
+-	return ret_val;
++	return 1;
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index 68913d103542..39e9d7db23df 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -4866,7 +4866,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
+ 	case e1000_media_type_copper:
+ 		if (hw->mac.get_link_status) {
+ 			ret_val = hw->mac.ops.check_for_link(hw);
+-			link_active = !hw->mac.get_link_status;
++			link_active = ret_val > 0;
+ 		} else {
+ 			link_active = true;
+ 		}
+@@ -4884,7 +4884,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
+ 		break;
+ 	}
+ 
+-	if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
++	if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+ 	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+ 		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
+ 		e_info("Gigabit has been disabled, downgrading speed\n");
+diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
+index b2005e13fb01..0963aa2d5e45 100644
+--- a/drivers/net/ethernet/intel/e1000e/phy.c
++++ b/drivers/net/ethernet/intel/e1000e/phy.c
+@@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ 	s32 ret_val = 0;
+ 	u16 i, phy_status;
+ 
++	*success = false;
+ 	for (i = 0; i < iterations; i++) {
+ 		/* Some PHYs require the MII_BMSR register to be read
+ 		 * twice due to the link bit being sticky.  No harm doing
+@@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+ 		ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
+ 		if (ret_val)
+ 			break;
+-		if (phy_status & BMSR_LSTATUS)
++		if (phy_status & BMSR_LSTATUS) {
++			*success = true;
+ 			break;
++		}
+ 		if (usec_interval >= 1000)
+ 			msleep(usec_interval / 1000);
+ 		else
+ 			udelay(usec_interval);
+ 	}
+ 
+-	*success = (i < iterations);
+-
+ 	return ret_val;
+ }
+ 
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+index c9da1b5d4804..ae0b9300ab08 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+@@ -1239,7 +1239,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
+ 			break;
+ 
+ 		/* prevent any other reads prior to eop_desc */
+-		read_barrier_depends();
++		smp_rmb();
+ 
+ 		/* if DD is not set pending work has not been completed */
+ 		if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 5b5bea159bd5..13e0cf90e567 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3283,7 +3283,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
+ 			break;
+ 
+ 		/* prevent any other reads prior to eop_desc */
+-		read_barrier_depends();
++		smp_rmb();
+ 
+ 		/* if the descriptor isn't done, no work yet to do */
+ 		if (!(eop_desc->cmd_type_offset_bsz &
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 9d95042d5a0f..c1e8303062de 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -712,7 +712,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+ 			break;
+ 
+ 		/* prevent any other reads prior to eop_desc */
+-		read_barrier_depends();
++		smp_rmb();
+ 
+ 		/* we have caught up to head, no work left to do */
+ 		if (tx_head == tx_desc)
+diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+index 458fbb421090..1045ba3a554d 100644
+--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+@@ -231,7 +231,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+ 			break;
+ 
+ 		/* prevent any other reads prior to eop_desc */
+-		read_barrier_depends();
++		smp_rmb();
+ 
+ 		/* we have caught up to head, no work left to do */
+ 		if (tx_head == tx_desc)
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index b5a8a5e40870..34f15f56b2a1 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -6427,7 +6427,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+ 			break;
+ 
+ 		/* prevent any other reads prior to eop_desc */
+-		read_barrier_depends();
++		smp_rmb();
+ 
+ 		/* if DD is not set pending work has not been completed */
+ 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
+index 95af14e139d7..70f8f743a416 100644
+--- a/drivers/net/ethernet/intel/igbvf/netdev.c
++++ b/drivers/net/ethernet/intel/igbvf/netdev.c
+@@ -808,7 +808,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
+ 			break;
+ 
+ 		/* prevent any other reads prior to eop_desc */
+-		read_barrier_depends();
++		smp_rmb();
+ 
+ 		/* if DD is not set pending work has not been completed */
+ 		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 2462b3c5fa40..e9762d0079eb 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -1100,7 +1100,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
+ 			break;
+ 
+ 		/* prevent any other reads prior to eop_desc */
+-		read_barrier_depends();
++		smp_rmb();
+ 
+ 		/* if DD is not set pending work has not been completed */
+ 		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index 1d7b00b038a2..80029560dd11 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -312,7 +312,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
+ 			break;
+ 
+ 		/* prevent any other reads prior to eop_desc */
+-		read_barrier_depends();
++		smp_rmb();
+ 
+ 		/* if DD is not set pending work has not been completed */
+ 		if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 56e0a1de0c37..6757d9c63bf2 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -3118,7 +3118,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
+ 	lockdep_assert_held(&ar->conf_mutex);
+ 
+ 	list_for_each_entry(arvif, &ar->arvifs, list) {
+-		WARN_ON(arvif->txpower < 0);
++		if (arvif->txpower <= 0)
++			continue;
+ 
+ 		if (txpower == -1)
+ 			txpower = arvif->txpower;
+@@ -3126,8 +3127,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
+ 			txpower = min(txpower, arvif->txpower);
+ 	}
+ 
+-	if (WARN_ON(txpower == -1))
+-		return -EINVAL;
++	if (txpower == -1)
++		return 0;
+ 
+ 	ret = ath10k_mac_txpower_setup(ar, txpower);
+ 	if (ret) {
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index ee0c5f602e29..87638a6e7388 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -953,8 +953,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
+ 		struct ath10k_fw_stats_pdev *dst;
+ 
+ 		src = data;
+-		if (data_len < sizeof(*src))
++		if (data_len < sizeof(*src)) {
++			kfree(tb);
+ 			return -EPROTO;
++		}
+ 
+ 		data += sizeof(*src);
+ 		data_len -= sizeof(*src);
+@@ -974,8 +976,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
+ 		struct ath10k_fw_stats_vdev *dst;
+ 
+ 		src = data;
+-		if (data_len < sizeof(*src))
++		if (data_len < sizeof(*src)) {
++			kfree(tb);
+ 			return -EPROTO;
++		}
+ 
+ 		data += sizeof(*src);
+ 		data_len -= sizeof(*src);
+@@ -993,8 +997,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
+ 		struct ath10k_fw_stats_peer *dst;
+ 
+ 		src = data;
+-		if (data_len < sizeof(*src))
++		if (data_len < sizeof(*src)) {
++			kfree(tb);
+ 			return -EPROTO;
++		}
+ 
+ 		data += sizeof(*src);
+ 		data_len -= sizeof(*src);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 3c4e709cf9a1..5754d7b48c1f 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1319,8 +1319,16 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
+ 
+ static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
+ {
+-	if (hpp)
+-		dev_warn(&dev->dev, "PCI-X settings not supported\n");
++	int pos;
++
++	if (!hpp)
++		return;
++
++	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
++	if (!pos)
++		return;
++
++	dev_warn(&dev->dev, "PCI-X settings not supported\n");
+ }
+ 
+ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+@@ -1331,6 +1339,9 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+ 	if (!hpp)
+ 		return;
+ 
++	if (!pci_is_pcie(dev))
++		return;
++
+ 	if (hpp->revision > 1) {
+ 		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
+ 			 hpp->revision);
+diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
+index a2b7ae3329c0..9fe1d5793cee 100644
+--- a/drivers/staging/iio/cdc/ad7150.c
++++ b/drivers/staging/iio/cdc/ad7150.c
+@@ -275,7 +275,7 @@ static int ad7150_write_event_config(struct iio_dev *indio_dev,
+ error_ret:
+ 	mutex_unlock(&chip->state_lock);
+ 
+-	return 0;
++	return ret;
+ }
+ 
+ static int ad7150_read_event_value(struct iio_dev *indio_dev,
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 89d01943ca93..8df1ff3766c4 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1919,12 +1919,14 @@ attach:
+ 
+ 	if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
+ 		int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
+-		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
++		if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
+ 			out_of_order_cmdsn = 1;
+-		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
++		} else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
++			target_put_sess_cmd(&cmd->se_cmd);
+ 			return 0;
+-		else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
++		} else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
+ 			return -1;
++		}
+ 	}
+ 	iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
+ 
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 2209040bff95..46ae8d258669 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -862,6 +862,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
+ 		      struct scatterlist *sg, int sg_count)
+ {
+ 	size_t off = iter->iov_offset;
++	struct scatterlist *p = sg;
+ 	int i, ret;
+ 
+ 	for (i = 0; i < iter->nr_segs; i++) {
+@@ -870,8 +871,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
+ 
+ 		ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
+ 		if (ret < 0) {
+-			for (i = 0; i < sg_count; i++) {
+-				struct page *page = sg_page(&sg[i]);
++			while (p < sg) {
++				struct page *page = sg_page(p++);
+ 				if (page)
+ 					put_page(page);
+ 			}
+diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
+index 0e0eb10f82a0..816a0e08ef10 100644
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
+ 			rc = -ENOMEM;
+ 			goto out;
+ 		}
+-	} else if (msg_type == XS_TRANSACTION_END) {
++	} else if (u->u.msg.tx_id != 0) {
+ 		list_for_each_entry(trans, &u->transactions, list)
+ 			if (trans->handle.id == u->u.msg.tx_id)
+ 				break;
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index 99c3c4ffe1d9..2df79195652b 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data)
+ 
+ 	if (v9inode->qid.type != st->qid.type)
+ 		return 0;
++
++	if (v9inode->qid.path != st->qid.path)
++		return 0;
+ 	return 1;
+ }
+ 
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index ce7ab92f7e84..095424bfab5e 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data)
+ 
+ 	if (v9inode->qid.type != st->qid.type)
+ 		return 0;
++
++	if (v9inode->qid.path != st->qid.path)
++		return 0;
+ 	return 1;
+ }
+ 
+diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
+index 35b755e79c2d..fe6e7050fe50 100644
+--- a/fs/autofs4/waitq.c
++++ b/fs/autofs4/waitq.c
+@@ -87,7 +87,8 @@ static int autofs4_write(struct autofs_sb_info *sbi,
+ 		spin_unlock_irqrestore(&current->sighand->siglock, flags);
+ 	}
+ 
+-	return (bytes > 0);
++	/* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
++	return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
+ }
+ 	
+ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
+@@ -101,6 +102,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
+ 	} pkt;
+ 	struct file *pipe = NULL;
+ 	size_t pktsz;
++	int ret;
+ 
+ 	DPRINTK("wait id = 0x%08lx, name = %.*s, type=%d",
+ 		(unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type);
+@@ -173,7 +175,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
+ 	mutex_unlock(&sbi->wq_mutex);
+ 
+ 	if (autofs4_write(sbi, pipe, &pkt, pktsz))
++	switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
++	case 0:
++		break;
++	case -ENOMEM:
++	case -ERESTARTSYS:
++		/* Just fail this one */
++		autofs4_wait_release(sbi, wq->wait_queue_token, ret);
++		break;
++	default:
+ 		autofs4_catatonic_mode(sbi);
++		break;
++	}
+ 	fput(pipe);
+ }
+ 
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 1d8b3f8c3654..0fd2354dfd0a 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3260,13 +3260,6 @@ again:
+ 		goto again;
+ 	}
+ 
+-	/* We've already setup this transaction, go ahead and exit */
+-	if (block_group->cache_generation == trans->transid &&
+-	    i_size_read(inode)) {
+-		dcs = BTRFS_DC_SETUP;
+-		goto out_put;
+-	}
+-
+ 	/*
+ 	 * We want to set the generation to 0, that way if anything goes wrong
+ 	 * from here on out we know not to trust this cache when we load up next
+@@ -3290,6 +3283,13 @@ again:
+ 	}
+ 	WARN_ON(ret);
+ 
++	/* We've already setup this transaction, go ahead and exit */
++	if (block_group->cache_generation == trans->transid &&
++	    i_size_read(inode)) {
++		dcs = BTRFS_DC_SETUP;
++		goto out_put;
++	}
++
+ 	if (i_size_read(inode) > 0) {
+ 		ret = btrfs_check_trunc_cache_free_space(root,
+ 					&root->fs_info->global_block_rsv);
+diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
+index 286f10b0363b..4f457d5c4933 100644
+--- a/fs/ecryptfs/messaging.c
++++ b/fs/ecryptfs/messaging.c
+@@ -442,15 +442,16 @@ void ecryptfs_release_messaging(void)
+ 	}
+ 	if (ecryptfs_daemon_hash) {
+ 		struct ecryptfs_daemon *daemon;
++		struct hlist_node *n;
+ 		int i;
+ 
+ 		mutex_lock(&ecryptfs_daemon_hash_mux);
+ 		for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
+ 			int rc;
+ 
+-			hlist_for_each_entry(daemon,
+-					     &ecryptfs_daemon_hash[i],
+-					     euid_chain) {
++			hlist_for_each_entry_safe(daemon, n,
++						  &ecryptfs_daemon_hash[i],
++						  euid_chain) {
+ 				rc = ecryptfs_exorcise_daemon(daemon);
+ 				if (rc)
+ 					printk(KERN_ERR "%s: Error whilst "
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index a3276bf9ac00..447d64e47c4c 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4795,7 +4795,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ 	}
+ 
+ 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+-	     offset + len > i_size_read(inode)) {
++	    (offset + len > i_size_read(inode) ||
++	     offset + len > EXT4_I(inode)->i_disksize)) {
+ 		new_size = offset + len;
+ 		ret = inode_newsize_ok(inode, new_size);
+ 		if (ret)
+@@ -4964,7 +4965,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ 	}
+ 
+ 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+-	     offset + len > i_size_read(inode)) {
++	    (offset + len > i_size_read(inode) ||
++	     offset + len > EXT4_I(inode)->i_disksize)) {
+ 		new_size = offset + len;
+ 		ret = inode_newsize_ok(inode, new_size);
+ 		if (ret)
+diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
+index 0ac4c1f73fbd..25177e6bd603 100644
+--- a/fs/isofs/isofs.h
++++ b/fs/isofs/isofs.h
+@@ -103,7 +103,7 @@ static inline unsigned int isonum_733(char *p)
+ 	/* Ignore bigendian datum due to broken mastering programs */
+ 	return get_unaligned_le32(p);
+ }
+-extern int iso_date(char *, int);
++extern int iso_date(u8 *, int);
+ 
+ struct inode;		/* To make gcc happy */
+ 
+diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
+index ed09e2b08637..f835976ce033 100644
+--- a/fs/isofs/rock.h
++++ b/fs/isofs/rock.h
+@@ -65,7 +65,7 @@ struct RR_PL_s {
+ };
+ 
+ struct stamp {
+-	char time[7];
++	__u8 time[7];		/* actually 6 unsigned, 1 signed */
+ } __attribute__ ((packed));
+ 
+ struct RR_TF_s {
+diff --git a/fs/isofs/util.c b/fs/isofs/util.c
+index 005a15cfd30a..37860fea364d 100644
+--- a/fs/isofs/util.c
++++ b/fs/isofs/util.c
+@@ -15,7 +15,7 @@
+  * to GMT.  Thus  we should always be correct.
+  */
+ 
+-int iso_date(char * p, int flag)
++int iso_date(u8 *p, int flag)
+ {
+ 	int year, month, day, hour, minute, second, tz;
+ 	int crtime;
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 4227adce3e52..1484ae1907c6 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1279,7 +1279,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
+ 		return 0;
+ 	}
+ 
+-	error = nfs_revalidate_inode(NFS_SERVER(inode), inode);
++	error = nfs_lookup_verify_inode(inode, flags);
+ 	dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
+ 			__func__, inode->i_ino, error ? "invalid" : "valid");
+ 	return !error;
+@@ -1439,6 +1439,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
+ 
+ const struct dentry_operations nfs4_dentry_operations = {
+ 	.d_revalidate	= nfs4_lookup_revalidate,
++	.d_weak_revalidate	= nfs_weak_revalidate,
+ 	.d_delete	= nfs_dentry_delete,
+ 	.d_iput		= nfs_dentry_iput,
+ 	.d_automount	= nfs_d_automount,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index f06af7248be7..1ce18913762a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -242,15 +242,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
+ };
+ 
+ const u32 nfs4_fs_locations_bitmap[3] = {
+-	FATTR4_WORD0_TYPE
+-	| FATTR4_WORD0_CHANGE
++	FATTR4_WORD0_CHANGE
+ 	| FATTR4_WORD0_SIZE
+ 	| FATTR4_WORD0_FSID
+ 	| FATTR4_WORD0_FILEID
+ 	| FATTR4_WORD0_FS_LOCATIONS,
+-	FATTR4_WORD1_MODE
+-	| FATTR4_WORD1_NUMLINKS
+-	| FATTR4_WORD1_OWNER
++	FATTR4_WORD1_OWNER
+ 	| FATTR4_WORD1_OWNER_GROUP
+ 	| FATTR4_WORD1_RAWDEV
+ 	| FATTR4_WORD1_SPACE_USED
+@@ -6212,9 +6209,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
+ 				   struct page *page)
+ {
+ 	struct nfs_server *server = NFS_SERVER(dir);
+-	u32 bitmask[3] = {
+-		[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
+-	};
++	u32 bitmask[3];
+ 	struct nfs4_fs_locations_arg args = {
+ 		.dir_fh = NFS_FH(dir),
+ 		.name = name,
+@@ -6233,12 +6228,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
+ 
+ 	dprintk("%s: start\n", __func__);
+ 
++	bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
++	bitmask[1] = nfs4_fattr_bitmap[1];
++
+ 	/* Ask for the fileid of the absent filesystem if mounted_on_fileid
+ 	 * is not supported */
+ 	if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
+-		bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
++		bitmask[0] &= ~FATTR4_WORD0_FILEID;
+ 	else
+-		bitmask[0] |= FATTR4_WORD0_FILEID;
++		bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
+ 
+ 	nfs_fattr_init(&fs_locations->fattr);
+ 	fs_locations->server = server;
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index f175b833b6ba..0ee31759ddde 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1319,7 +1319,7 @@ static int nfs_parse_mount_options(char *raw,
+ 			mnt->options |= NFS_OPTION_MIGRATION;
+ 			break;
+ 		case Opt_nomigration:
+-			mnt->options &= NFS_OPTION_MIGRATION;
++			mnt->options &= ~NFS_OPTION_MIGRATION;
+ 			break;
+ 
+ 		/*
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 22e9799323ad..529434f926f1 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -3776,7 +3776,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei
+ {
+ 	struct nfs4_stid *ret;
+ 
+-	ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
++	ret = find_stateid_by_type(cl, s,
++				NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
+ 	if (!ret)
+ 		return NULL;
+ 	return delegstateid(ret);
+@@ -3799,6 +3800,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
+ 	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
+ 	if (deleg == NULL)
+ 		goto out;
++	if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
++		nfs4_put_stid(&deleg->dl_stid);
++		if (cl->cl_minorversion)
++			status = nfserr_deleg_revoked;
++		goto out;
++	}
+ 	flags = share_access_to_flags(open->op_share_access);
+ 	status = nfs4_check_delegmode(deleg, flags);
+ 	if (status) {
+@@ -4655,6 +4662,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ 		     struct nfs4_stid **s, struct nfsd_net *nn)
+ {
+ 	__be32 status;
++	bool return_revoked = false;
++
++	/*
++	 *  only return revoked delegations if explicitly asked.
++	 *  otherwise we report revoked or bad_stateid status.
++	 */
++	if (typemask & NFS4_REVOKED_DELEG_STID)
++		return_revoked = true;
++	else if (typemask & NFS4_DELEG_STID)
++		typemask |= NFS4_REVOKED_DELEG_STID;
+ 
+ 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+ 		return nfserr_bad_stateid;
+@@ -4669,6 +4686,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ 	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
+ 	if (!*s)
+ 		return nfserr_bad_stateid;
++	if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
++		nfs4_put_stid(*s);
++		if (cstate->minorversion)
++			return nfserr_deleg_revoked;
++		return nfserr_bad_stateid;
++	}
+ 	return nfs_ok;
+ }
+ 
+diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
+index c6abbad9b8e3..3cdd5cc80c11 100644
+--- a/fs/nilfs2/segment.c
++++ b/fs/nilfs2/segment.c
+@@ -1883,8 +1883,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
+ 					      "failed to get inode block.\n");
+ 				return err;
+ 			}
+-			mark_buffer_dirty(ibh);
+-			nilfs_mdt_mark_dirty(ifile);
+ 			spin_lock(&nilfs->ns_inode_lock);
+ 			if (likely(!ii->i_bh))
+ 				ii->i_bh = ibh;
+@@ -1893,6 +1891,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
+ 			goto retry;
+ 		}
+ 
++		// Always redirty the buffer to avoid race condition
++		mark_buffer_dirty(ii->i_bh);
++		nilfs_mdt_mark_dirty(ifile);
++
+ 		clear_bit(NILFS_I_QUEUED, &ii->i_state);
+ 		set_bit(NILFS_I_BUSY, &ii->i_state);
+ 		list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
+diff --git a/include/linux/netlink.h b/include/linux/netlink.h
+index 6835c1279df7..0a9f48df9c9e 100644
+--- a/include/linux/netlink.h
++++ b/include/linux/netlink.h
+@@ -120,6 +120,7 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
+ struct netlink_callback {
+ 	struct sk_buff		*skb;
+ 	const struct nlmsghdr	*nlh;
++	int			(*start)(struct netlink_callback *);
+ 	int			(*dump)(struct sk_buff * skb,
+ 					struct netlink_callback *cb);
+ 	int			(*done)(struct netlink_callback *cb);
+@@ -142,6 +143,7 @@ struct nlmsghdr *
+ __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags);
+ 
+ struct netlink_dump_control {
++	int (*start)(struct netlink_callback *);
+ 	int (*dump)(struct sk_buff *skb, struct netlink_callback *);
+ 	int (*done)(struct netlink_callback *);
+ 	void *data;
+diff --git a/include/net/genetlink.h b/include/net/genetlink.h
+index a9af1cc8c1bc..d76f2da89b5a 100644
+--- a/include/net/genetlink.h
++++ b/include/net/genetlink.h
+@@ -114,6 +114,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
+  * @flags: flags
+  * @policy: attribute validation policy
+  * @doit: standard command callback
++ * @start: start callback for dumps
+  * @dumpit: callback for dumpers
+  * @done: completion callback for dumps
+  * @ops_list: operations list
+@@ -122,6 +123,7 @@ struct genl_ops {
+ 	const struct nla_policy	*policy;
+ 	int		       (*doit)(struct sk_buff *skb,
+ 				       struct genl_info *info);
++	int		       (*start)(struct netlink_callback *cb);
+ 	int		       (*dumpit)(struct sk_buff *skb,
+ 					 struct netlink_callback *cb);
+ 	int		       (*done)(struct netlink_callback *cb);
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index 5664ca07c9c7..a01a076ea060 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -455,20 +455,22 @@ TRACE_EVENT(svc_recv,
+ 	TP_ARGS(rqst, status),
+ 
+ 	TP_STRUCT__entry(
+-		__field(struct sockaddr *, addr)
+ 		__field(__be32, xid)
+ 		__field(int, status)
+ 		__field(unsigned long, flags)
++		__dynamic_array(unsigned char, addr, rqst->rq_addrlen)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__entry->addr = (struct sockaddr *)&rqst->rq_addr;
+ 		__entry->xid = status > 0 ? rqst->rq_xid : 0;
+ 		__entry->status = status;
+ 		__entry->flags = rqst->rq_flags;
++		memcpy(__get_dynamic_array(addr),
++			&rqst->rq_addr, rqst->rq_addrlen);
+ 	),
+ 
+-	TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
++	TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s",
++			(struct sockaddr *)__get_dynamic_array(addr),
+ 			be32_to_cpu(__entry->xid), __entry->status,
+ 			show_rqstp_flags(__entry->flags))
+ );
+@@ -480,22 +482,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
+ 	TP_ARGS(rqst, status),
+ 
+ 	TP_STRUCT__entry(
+-		__field(struct sockaddr *, addr)
+ 		__field(__be32, xid)
+-		__field(int, dropme)
+ 		__field(int, status)
+ 		__field(unsigned long, flags)
++		__dynamic_array(unsigned char, addr, rqst->rq_addrlen)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__entry->addr = (struct sockaddr *)&rqst->rq_addr;
+ 		__entry->xid = rqst->rq_xid;
+ 		__entry->status = status;
+ 		__entry->flags = rqst->rq_flags;
++		memcpy(__get_dynamic_array(addr),
++			&rqst->rq_addr, rqst->rq_addrlen);
+ 	),
+ 
+ 	TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
+-		__entry->addr, be32_to_cpu(__entry->xid),
++		(struct sockaddr *)__get_dynamic_array(addr),
++		be32_to_cpu(__entry->xid),
+ 		__entry->status, show_rqstp_flags(__entry->flags))
+ );
+ 
+diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
+index 22b6ad31c706..8562b1cb776b 100644
+--- a/include/uapi/linux/bcache.h
++++ b/include/uapi/linux/bcache.h
+@@ -90,7 +90,7 @@ PTR_FIELD(PTR_GEN,			0,  8)
+ 
+ #define PTR_CHECK_DEV			((1 << PTR_DEV_BITS) - 1)
+ 
+-#define PTR(gen, offset, dev)						\
++#define MAKE_PTR(gen, offset, dev)					\
+ 	((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
+ 
+ /* Bkey utility code */
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index f2f8ff54d2c0..8fbedeb5553f 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -577,8 +577,7 @@ void resched_cpu(int cpu)
+ 	struct rq *rq = cpu_rq(cpu);
+ 	unsigned long flags;
+ 
+-	if (!raw_spin_trylock_irqsave(&rq->lock, flags))
+-		return;
++	raw_spin_lock_irqsave(&rq->lock, flags);
+ 	resched_curr(rq);
+ 	raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index bdd6a8dd5797..4aa31c77cbab 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1235,17 +1235,11 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
+ 	VM_BUG_ON_PAGE(!PageHead(page), page);
+ 	if (flags & FOLL_TOUCH) {
+ 		pmd_t _pmd;
+-		/*
+-		 * We should set the dirty bit only for FOLL_WRITE but
+-		 * for now the dirty bit in the pmd is meaningless.
+-		 * And if the dirty bit will become meaningful and
+-		 * we'll only set it with FOLL_WRITE, an atomic
+-		 * set_bit will be required on the pmd to set the
+-		 * young bit, instead of the current set_pmd_at.
+-		 */
+-		_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
++		_pmd = pmd_mkyoung(*pmd);
++		if (flags & FOLL_WRITE)
++			_pmd = pmd_mkdirty(_pmd);
+ 		if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
+-					  pmd, _pmd,  1))
++					  pmd, _pmd, flags & FOLL_WRITE))
+ 			update_mmu_cache_pmd(vma, addr, pmd);
+ 	}
+ 	if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
+diff --git a/mm/madvise.c b/mm/madvise.c
+index d551475517bf..a9c866c9cc0e 100644
+--- a/mm/madvise.c
++++ b/mm/madvise.c
+@@ -221,15 +221,14 @@ static long madvise_willneed(struct vm_area_struct *vma,
+ {
+ 	struct file *file = vma->vm_file;
+ 
++	*prev = vma;
+ #ifdef CONFIG_SWAP
+ 	if (!file) {
+-		*prev = vma;
+ 		force_swapin_readahead(vma, start, end);
+ 		return 0;
+ 	}
+ 
+ 	if (shmem_mapping(file->f_mapping)) {
+-		*prev = vma;
+ 		force_shm_swapin_readahead(vma, start, end,
+ 					file->f_mapping);
+ 		return 0;
+@@ -244,7 +243,6 @@ static long madvise_willneed(struct vm_area_struct *vma,
+ 		return 0;
+ 	}
+ 
+-	*prev = vma;
+ 	start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ 	if (end > vma->vm_end)
+ 		end = vma->vm_end;
+diff --git a/net/9p/client.c b/net/9p/client.c
+index fb4ac475311c..77b608dee691 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -749,8 +749,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
+ 	}
+ again:
+ 	/* Wait for the response */
+-	err = wait_event_interruptible(*req->wq,
+-				       req->status >= REQ_STATUS_RCVD);
++	err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
+ 
+ 	/*
+ 	 * Make sure our req is coherent with regard to updates in other
+diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
+index 9dd49ca67dbc..202923a36677 100644
+--- a/net/9p/trans_virtio.c
++++ b/net/9p/trans_virtio.c
+@@ -290,8 +290,8 @@ req_retry:
+ 		if (err == -ENOSPC) {
+ 			chan->ring_bufs_avail = 0;
+ 			spin_unlock_irqrestore(&chan->lock, flags);
+-			err = wait_event_interruptible(*chan->vc_wq,
+-							chan->ring_bufs_avail);
++			err = wait_event_killable(*chan->vc_wq,
++						  chan->ring_bufs_avail);
+ 			if (err  == -ERESTARTSYS)
+ 				return err;
+ 
+@@ -331,7 +331,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
+ 		 * Other zc request to finish here
+ 		 */
+ 		if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
+-			err = wait_event_interruptible(vp_wq,
++			err = wait_event_killable(vp_wq,
+ 			      (atomic_read(&vp_pinned) < chan->p9_max_pages));
+ 			if (err == -ERESTARTSYS)
+ 				return err;
+@@ -475,8 +475,8 @@ req_retry_pinned:
+ 		if (err == -ENOSPC) {
+ 			chan->ring_bufs_avail = 0;
+ 			spin_unlock_irqrestore(&chan->lock, flags);
+-			err = wait_event_interruptible(*chan->vc_wq,
+-						       chan->ring_bufs_avail);
++			err = wait_event_killable(*chan->vc_wq,
++						  chan->ring_bufs_avail);
+ 			if (err  == -ERESTARTSYS)
+ 				goto err_out;
+ 
+@@ -493,8 +493,7 @@ req_retry_pinned:
+ 	virtqueue_kick(chan->vq);
+ 	spin_unlock_irqrestore(&chan->lock, flags);
+ 	p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
+-	err = wait_event_interruptible(*req->wq,
+-				       req->status >= REQ_STATUS_RCVD);
++	err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
+ 	/*
+ 	 * Non kernel buffers are pinned, unpin them
+ 	 */
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 897f5941d86b..135fe458bfac 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3056,7 +3056,11 @@ static int ip6_route_dev_notify(struct notifier_block *this,
+ 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
+ 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
+ #endif
+-	 } else if (event == NETDEV_UNREGISTER) {
++	 } else if (event == NETDEV_UNREGISTER &&
++		    dev->reg_state != NETREG_UNREGISTERED) {
++		/* NETDEV_UNREGISTER could be fired for multiple times by
++		 * netdev_wait_allrefs(). Make sure we only call this once.
++		 */
+ 		in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ 		in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index cdf8609a6240..d0f2ce65e474 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -690,7 +690,6 @@ struct ieee80211_if_mesh {
+ 	const struct ieee80211_mesh_sync_ops *sync_ops;
+ 	s64 sync_offset_clockdrift_max;
+ 	spinlock_t sync_offset_lock;
+-	bool adjusting_tbtt;
+ 	/* mesh power save */
+ 	enum nl80211_mesh_power_mode nonpeer_pm;
+ 	int ps_peers_light_sleep;
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index c5af4e3d4497..48257f17688f 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -289,8 +289,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
+ 	/* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
+ 	*pos |= ifmsh->ps_peers_deep_sleep ?
+ 			IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
+-	*pos++ |= ifmsh->adjusting_tbtt ?
+-			IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
+ 	*pos++ = 0x00;
+ 
+ 	return 0;
+@@ -792,7 +790,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
+ 	ifmsh->mesh_cc_id = 0;	/* Disabled */
+ 	/* register sync ops from extensible synchronization framework */
+ 	ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
+-	ifmsh->adjusting_tbtt = false;
+ 	ifmsh->sync_offset_clockdrift_max = 0;
+ 	set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
+ 	ieee80211_mesh_root_setup(ifmsh);
+diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
+index 60d737f144e3..7f758c9aa200 100644
+--- a/net/mac80211/mesh_plink.c
++++ b/net/mac80211/mesh_plink.c
+@@ -452,12 +452,14 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
+ 
+ 	/* Userspace handles station allocation */
+ 	if (sdata->u.mesh.user_mpm ||
+-	    sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
+-		cfg80211_notify_new_peer_candidate(sdata->dev, addr,
+-						   elems->ie_start,
+-						   elems->total_len,
+-						   GFP_KERNEL);
+-	else
++	    sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
++		if (mesh_peer_accepts_plinks(elems) &&
++		    mesh_plink_availables(sdata))
++			cfg80211_notify_new_peer_candidate(sdata->dev, addr,
++							   elems->ie_start,
++							   elems->total_len,
++							   GFP_KERNEL);
++	} else
+ 		sta = __mesh_sta_info_alloc(sdata, addr);
+ 
+ 	return sta;
+diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
+index 09625d6205c3..6e8ece73bfa6 100644
+--- a/net/mac80211/mesh_sync.c
++++ b/net/mac80211/mesh_sync.c
+@@ -119,7 +119,6 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
+ 	 */
+ 
+ 	if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
+-		clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
+ 		msync_dbg(sdata, "STA %pM : is adjusting TBTT\n",
+ 			  sta->sta.addr);
+ 		goto no_sync;
+@@ -168,11 +167,9 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
+ 					 struct beacon_data *beacon)
+ {
+ 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+-	u8 cap;
+ 
+ 	WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
+ 	WARN_ON(!rcu_read_lock_held());
+-	cap = beacon->meshconf->meshconf_cap;
+ 
+ 	spin_lock_bh(&ifmsh->sync_offset_lock);
+ 
+@@ -186,21 +183,13 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
+ 			  "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
+ 			  ifmsh->sync_offset_clockdrift_max);
+ 		set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags);
+-
+-		ifmsh->adjusting_tbtt = true;
+ 	} else {
+ 		msync_dbg(sdata,
+ 			  "TBTT : max clockdrift=%lld; too small to adjust\n",
+ 			  (long long)ifmsh->sync_offset_clockdrift_max);
+ 		ifmsh->sync_offset_clockdrift_max = 0;
+-
+-		ifmsh->adjusting_tbtt = false;
+ 	}
+ 	spin_unlock_bh(&ifmsh->sync_offset_lock);
+-
+-	beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ?
+-			IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap :
+-			~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap;
+ }
+ 
+ static const struct sync_method sync_methods[] = {
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 34ded09317e7..792f6637c83e 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -1925,7 +1925,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
+ 	 * is called on error from nf_tables_newrule().
+ 	 */
+ 	expr = nft_expr_first(rule);
+-	while (expr->ops && expr != nft_expr_last(rule)) {
++	while (expr != nft_expr_last(rule) && expr->ops) {
+ 		nf_tables_expr_destroy(ctx, expr);
+ 		expr = nft_expr_next(expr);
+ 	}
+diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
+index 96805d21d618..b204b7db515f 100644
+--- a/net/netfilter/nft_queue.c
++++ b/net/netfilter/nft_queue.c
+@@ -37,7 +37,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
+ 
+ 	if (priv->queues_total > 1) {
+ 		if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
+-			int cpu = smp_processor_id();
++			int cpu = raw_smp_processor_id();
+ 
+ 			queue = priv->queuenum + cpu % priv->queues_total;
+ 		} else {
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 1c0d4aee783d..a5815be7c81c 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2821,6 +2821,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 
+ 	cb = &nlk->cb;
+ 	memset(cb, 0, sizeof(*cb));
++	cb->start = control->start;
+ 	cb->dump = control->dump;
+ 	cb->done = control->done;
+ 	cb->nlh = nlh;
+@@ -2834,6 +2835,9 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 
+ 	mutex_unlock(nlk->cb_mutex);
+ 
++	if (cb->start)
++		cb->start(cb);
++
+ 	ret = netlink_dump(sk);
+ 	sock_put(sk);
+ 
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 2ed5f964772e..3d111b053e3e 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -513,6 +513,20 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
+ }
+ EXPORT_SYMBOL(genlmsg_put);
+ 
++static int genl_lock_start(struct netlink_callback *cb)
++{
++	/* our ops are always const - netlink API doesn't propagate that */
++	const struct genl_ops *ops = cb->data;
++	int rc = 0;
++
++	if (ops->start) {
++		genl_lock();
++		rc = ops->start(cb);
++		genl_unlock();
++	}
++	return rc;
++}
++
+ static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ 	/* our ops are always const - netlink API doesn't propagate that */
+@@ -577,6 +591,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
+ 				.module = family->module,
+ 				/* we have const, but the netlink API doesn't */
+ 				.data = (void *)ops,
++				.start = genl_lock_start,
+ 				.dump = genl_lock_dumpit,
+ 				.done = genl_lock_done,
+ 			};
+@@ -588,6 +603,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
+ 		} else {
+ 			struct netlink_dump_control c = {
+ 				.module = family->module,
++				.start = ops->start,
+ 				.dump = ops->dumpit,
+ 				.done = ops->done,
+ 			};
+diff --git a/net/nfc/core.c b/net/nfc/core.c
+index 54596f609d04..ab4b1e1d186d 100644
+--- a/net/nfc/core.c
++++ b/net/nfc/core.c
+@@ -1093,7 +1093,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
+ err_free_dev:
+ 	kfree(dev);
+ 
+-	return ERR_PTR(rc);
++	return NULL;
+ }
+ EXPORT_SYMBOL(nfc_allocate_device);
+ 
+diff --git a/net/rds/send.c b/net/rds/send.c
+index eba0eaf4cc38..37730bfa393f 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -932,6 +932,11 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
+ 			ret = rds_cmsg_rdma_map(rs, rm, cmsg);
+ 			if (!ret)
+ 				*allocated_mr = 1;
++			else if (ret == -ENODEV)
++				/* Accommodate the get_mr() case which can fail
++				 * if connection isn't established yet.
++				 */
++				ret = -EAGAIN;
+ 			break;
+ 		case RDS_CMSG_ATOMIC_CSWP:
+ 		case RDS_CMSG_ATOMIC_FADD:
+@@ -1039,8 +1044,12 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
+ 
+ 	/* Parse any control messages the user may have included. */
+ 	ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
+-	if (ret)
++	if (ret) {
++		/* Trigger connection so that its ready for the next retry */
++		if (ret ==  -EAGAIN)
++			rds_conn_connect_if_down(conn);
+ 		goto out;
++	}
+ 
+ 	if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
+ 		printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index e1c69b216db3..dfc542ecf9c4 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1209,10 +1209,14 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
+ 
+ 		if (signal_pending(current)) {
+ 			err = sock_intr_errno(timeout);
+-			goto out_wait_error;
++			sk->sk_state = SS_UNCONNECTED;
++			sock->state = SS_UNCONNECTED;
++			goto out_wait;
+ 		} else if (timeout == 0) {
+ 			err = -ETIMEDOUT;
+-			goto out_wait_error;
++			sk->sk_state = SS_UNCONNECTED;
++			sock->state = SS_UNCONNECTED;
++			goto out_wait;
+ 		}
+ 
+ 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+@@ -1220,20 +1224,17 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
+ 
+ 	if (sk->sk_err) {
+ 		err = -sk->sk_err;
+-		goto out_wait_error;
+-	} else
++		sk->sk_state = SS_UNCONNECTED;
++		sock->state = SS_UNCONNECTED;
++	} else {
+ 		err = 0;
++	}
+ 
+ out_wait:
+ 	finish_wait(sk_sleep(sk), &wait);
+ out:
+ 	release_sock(sk);
+ 	return err;
+-
+-out_wait_error:
+-	sk->sk_state = SS_UNCONNECTED;
+-	sock->state = SS_UNCONNECTED;
+-	goto out_wait;
+ }
+ 
+ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
+@@ -1270,18 +1271,20 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
+ 	       listener->sk_err == 0) {
+ 		release_sock(listener);
+ 		timeout = schedule_timeout(timeout);
++		finish_wait(sk_sleep(listener), &wait);
+ 		lock_sock(listener);
+ 
+ 		if (signal_pending(current)) {
+ 			err = sock_intr_errno(timeout);
+-			goto out_wait;
++			goto out;
+ 		} else if (timeout == 0) {
+ 			err = -EAGAIN;
+-			goto out_wait;
++			goto out;
+ 		}
+ 
+ 		prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
+ 	}
++	finish_wait(sk_sleep(listener), &wait);
+ 
+ 	if (listener->sk_err)
+ 		err = -listener->sk_err;
+@@ -1301,19 +1304,15 @@ static int vsock_accept(struct socket *sock, struct socket *newsock, int flags)
+ 		 */
+ 		if (err) {
+ 			vconnected->rejected = true;
+-			release_sock(connected);
+-			sock_put(connected);
+-			goto out_wait;
++		} else {
++			newsock->state = SS_CONNECTED;
++			sock_graft(connected, newsock);
+ 		}
+ 
+-		newsock->state = SS_CONNECTED;
+-		sock_graft(connected, newsock);
+ 		release_sock(connected);
+ 		sock_put(connected);
+ 	}
+ 
+-out_wait:
+-	finish_wait(sk_sleep(listener), &wait);
+ out:
+ 	release_sock(listener);
+ 	return err;
+@@ -1513,8 +1512,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	long timeout;
+ 	int err;
+ 	struct vsock_transport_send_notify_data send_data;
+-
+-	DEFINE_WAIT(wait);
++	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 
+ 	sk = sock->sk;
+ 	vsk = vsock_sk(sk);
+@@ -1557,11 +1555,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	if (err < 0)
+ 		goto out;
+ 
+-	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+-
+ 	while (total_written < len) {
+ 		ssize_t written;
+ 
++		add_wait_queue(sk_sleep(sk), &wait);
+ 		while (vsock_stream_has_space(vsk) == 0 &&
+ 		       sk->sk_err == 0 &&
+ 		       !(sk->sk_shutdown & SEND_SHUTDOWN) &&
+@@ -1570,27 +1567,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 			/* Don't wait for non-blocking sockets. */
+ 			if (timeout == 0) {
+ 				err = -EAGAIN;
+-				goto out_wait;
++				remove_wait_queue(sk_sleep(sk), &wait);
++				goto out_err;
+ 			}
+ 
+ 			err = transport->notify_send_pre_block(vsk, &send_data);
+-			if (err < 0)
+-				goto out_wait;
++			if (err < 0) {
++				remove_wait_queue(sk_sleep(sk), &wait);
++				goto out_err;
++			}
+ 
+ 			release_sock(sk);
+-			timeout = schedule_timeout(timeout);
++			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
+ 			lock_sock(sk);
+ 			if (signal_pending(current)) {
+ 				err = sock_intr_errno(timeout);
+-				goto out_wait;
++				remove_wait_queue(sk_sleep(sk), &wait);
++				goto out_err;
+ 			} else if (timeout == 0) {
+ 				err = -EAGAIN;
+-				goto out_wait;
++				remove_wait_queue(sk_sleep(sk), &wait);
++				goto out_err;
+ 			}
+-
+-			prepare_to_wait(sk_sleep(sk), &wait,
+-					TASK_INTERRUPTIBLE);
+ 		}
++		remove_wait_queue(sk_sleep(sk), &wait);
+ 
+ 		/* These checks occur both as part of and after the loop
+ 		 * conditional since we need to check before and after
+@@ -1598,16 +1598,16 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 		 */
+ 		if (sk->sk_err) {
+ 			err = -sk->sk_err;
+-			goto out_wait;
++			goto out_err;
+ 		} else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
+ 			   (vsk->peer_shutdown & RCV_SHUTDOWN)) {
+ 			err = -EPIPE;
+-			goto out_wait;
++			goto out_err;
+ 		}
+ 
+ 		err = transport->notify_send_pre_enqueue(vsk, &send_data);
+ 		if (err < 0)
+-			goto out_wait;
++			goto out_err;
+ 
+ 		/* Note that enqueue will only write as many bytes as are free
+ 		 * in the produce queue, so we don't need to ensure len is
+@@ -1620,7 +1620,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 				len - total_written);
+ 		if (written < 0) {
+ 			err = -ENOMEM;
+-			goto out_wait;
++			goto out_err;
+ 		}
+ 
+ 		total_written += written;
+@@ -1628,14 +1628,13 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ 		err = transport->notify_send_post_enqueue(
+ 				vsk, written, &send_data);
+ 		if (err < 0)
+-			goto out_wait;
++			goto out_err;
+ 
+ 	}
+ 
+-out_wait:
++out_err:
+ 	if (total_written > 0)
+ 		err = total_written;
+-	finish_wait(sk_sleep(sk), &wait);
+ out:
+ 	release_sock(sk);
+ 	return err;
+@@ -1716,21 +1715,61 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	if (err < 0)
+ 		goto out;
+ 
+-	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ 
+ 	while (1) {
+-		s64 ready = vsock_stream_has_data(vsk);
++		s64 ready;
+ 
+-		if (ready < 0) {
+-			/* Invalid queue pair content. XXX This should be
+-			 * changed to a connection reset in a later change.
+-			 */
++		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
++		ready = vsock_stream_has_data(vsk);
+ 
+-			err = -ENOMEM;
+-			goto out_wait;
+-		} else if (ready > 0) {
++		if (ready == 0) {
++			if (sk->sk_err != 0 ||
++			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
++			    (vsk->peer_shutdown & SEND_SHUTDOWN)) {
++				finish_wait(sk_sleep(sk), &wait);
++				break;
++			}
++			/* Don't wait for non-blocking sockets. */
++			if (timeout == 0) {
++				err = -EAGAIN;
++				finish_wait(sk_sleep(sk), &wait);
++				break;
++			}
++
++			err = transport->notify_recv_pre_block(
++					vsk, target, &recv_data);
++			if (err < 0) {
++				finish_wait(sk_sleep(sk), &wait);
++				break;
++			}
++			release_sock(sk);
++			timeout = schedule_timeout(timeout);
++			lock_sock(sk);
++
++			if (signal_pending(current)) {
++				err = sock_intr_errno(timeout);
++				finish_wait(sk_sleep(sk), &wait);
++				break;
++			} else if (timeout == 0) {
++				err = -EAGAIN;
++				finish_wait(sk_sleep(sk), &wait);
++				break;
++			}
++		} else {
+ 			ssize_t read;
+ 
++			finish_wait(sk_sleep(sk), &wait);
++
++			if (ready < 0) {
++				/* Invalid queue pair content. XXX This should
++				* be changed to a connection reset in a later
++				* change.
++				*/
++
++				err = -ENOMEM;
++				goto out;
++			}
++
+ 			err = transport->notify_recv_pre_dequeue(
+ 					vsk, target, &recv_data);
+ 			if (err < 0)
+@@ -1750,42 +1789,12 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 					vsk, target, read,
+ 					!(flags & MSG_PEEK), &recv_data);
+ 			if (err < 0)
+-				goto out_wait;
++				goto out;
+ 
+ 			if (read >= target || flags & MSG_PEEK)
+ 				break;
+ 
+ 			target -= read;
+-		} else {
+-			if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN)
+-			    || (vsk->peer_shutdown & SEND_SHUTDOWN)) {
+-				break;
+-			}
+-			/* Don't wait for non-blocking sockets. */
+-			if (timeout == 0) {
+-				err = -EAGAIN;
+-				break;
+-			}
+-
+-			err = transport->notify_recv_pre_block(
+-					vsk, target, &recv_data);
+-			if (err < 0)
+-				break;
+-
+-			release_sock(sk);
+-			timeout = schedule_timeout(timeout);
+-			lock_sock(sk);
+-
+-			if (signal_pending(current)) {
+-				err = sock_intr_errno(timeout);
+-				break;
+-			} else if (timeout == 0) {
+-				err = -EAGAIN;
+-				break;
+-			}
+-
+-			prepare_to_wait(sk_sleep(sk), &wait,
+-					TASK_INTERRUPTIBLE);
+ 		}
+ 	}
+ 
+@@ -1797,8 +1806,6 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	if (copied > 0)
+ 		err = copied;
+ 
+-out_wait:
+-	finish_wait(sk_sleep(sk), &wait);
+ out:
+ 	release_sock(sk);
+ 	return err;
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 2091664295ba..18d9cf2eb648 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1622,32 +1622,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
+ 
+ static int xfrm_dump_policy_done(struct netlink_callback *cb)
+ {
+-	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
++	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+ 	struct net *net = sock_net(cb->skb->sk);
+ 
+ 	xfrm_policy_walk_done(walk, net);
+ 	return 0;
+ }
+ 
++static int xfrm_dump_policy_start(struct netlink_callback *cb)
++{
++	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
++
++	BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
++
++	xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
++	return 0;
++}
++
+ static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
+ {
+ 	struct net *net = sock_net(skb->sk);
+-	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
++	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+ 	struct xfrm_dump_info info;
+ 
+-	BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
+-		     sizeof(cb->args) - sizeof(cb->args[0]));
+-
+ 	info.in_skb = cb->skb;
+ 	info.out_skb = skb;
+ 	info.nlmsg_seq = cb->nlh->nlmsg_seq;
+ 	info.nlmsg_flags = NLM_F_MULTI;
+ 
+-	if (!cb->args[0]) {
+-		cb->args[0] = 1;
+-		xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
+-	}
+-
+ 	(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
+ 
+ 	return skb->len;
+@@ -2383,6 +2385,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
+ 
+ static const struct xfrm_link {
+ 	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
++	int (*start)(struct netlink_callback *);
+ 	int (*dump)(struct sk_buff *, struct netlink_callback *);
+ 	int (*done)(struct netlink_callback *);
+ 	const struct nla_policy *nla_pol;
+@@ -2396,6 +2399,7 @@ static const struct xfrm_link {
+ 	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
+ 	[XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy    },
+ 	[XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
++						   .start = xfrm_dump_policy_start,
+ 						   .dump = xfrm_dump_policy,
+ 						   .done = xfrm_dump_policy_done },
+ 	[XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
+@@ -2447,6 +2451,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 
+ 		{
+ 			struct netlink_dump_control c = {
++				.start = link->start,
+ 				.dump = link->dump,
+ 				.done = link->done,
+ 			};
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 253a2da05cf0..9a7cc9a56a21 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -264,8 +264,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream,
+ 				runtime->rate);
+ 		*audio_tstamp = ns_to_timespec(audio_nsecs);
+ 	}
+-	runtime->status->audio_tstamp = *audio_tstamp;
+-	runtime->status->tstamp = *curr_tstamp;
++	if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
++		runtime->status->audio_tstamp = *audio_tstamp;
++		runtime->status->tstamp = *curr_tstamp;
++	}
+ 
+ 	/*
+ 	 * re-take a driver timestamp to let apps detect if the reference tstamp
+diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
+index 0b4b028e8e98..de9155eed727 100644
+--- a/sound/core/timer_compat.c
++++ b/sound/core/timer_compat.c
+@@ -40,11 +40,11 @@ static int snd_timer_user_info_compat(struct file *file,
+ 	struct snd_timer *t;
+ 
+ 	tu = file->private_data;
+-	if (snd_BUG_ON(!tu->timeri))
+-		return -ENXIO;
++	if (!tu->timeri)
++		return -EBADFD;
+ 	t = tu->timeri->timer;
+-	if (snd_BUG_ON(!t))
+-		return -ENXIO;
++	if (!t)
++		return -EBADFD;
+ 	memset(&info, 0, sizeof(info));
+ 	info.card = t->card ? t->card->number : -1;
+ 	if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
+@@ -73,8 +73,8 @@ static int snd_timer_user_status_compat(struct file *file,
+ 	struct snd_timer_status32 status;
+ 	
+ 	tu = file->private_data;
+-	if (snd_BUG_ON(!tu->timeri))
+-		return -ENXIO;
++	if (!tu->timeri)
++		return -EBADFD;
+ 	memset(&status, 0, sizeof(status));
+ 	status.tstamp.tv_sec = tu->tstamp.tv_sec;
+ 	status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 68bd0ba8bab8..b8886d493083 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2126,6 +2126,9 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* AMD Hudson */
+ 	{ PCI_DEVICE(0x1022, 0x780d),
+ 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
++	/* AMD Raven */
++	{ PCI_DEVICE(0x1022, 0x15e3),
++	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ 	/* ATI HDMI */
+ 	{ PCI_DEVICE(0x1002, 0x0002),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 55601ce89e0f..75c4e14f4156 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4297,7 +4297,7 @@ static void alc_no_shutup(struct hda_codec *codec)
+ static void alc_fixup_no_shutup(struct hda_codec *codec,
+ 				const struct hda_fixup *fix, int action)
+ {
+-	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++	if (action == HDA_FIXUP_ACT_PROBE) {
+ 		struct alc_spec *spec = codec->spec;
+ 		spec->shutup = alc_no_shutup;
+ 	}
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index 431d94397219..21a2f1bd7369 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -533,7 +533,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
+ 	const struct wmfw_region *region;
+ 	const struct wm_adsp_region *mem;
+ 	const char *region_name;
+-	char *file, *text;
++	char *file, *text = NULL;
+ 	struct wm_adsp_buf *buf;
+ 	unsigned int reg;
+ 	int regions = 0;
+@@ -678,10 +678,21 @@ static int wm_adsp_load(struct wm_adsp *dsp)
+ 			 regions, le32_to_cpu(region->len), offset,
+ 			 region_name);
+ 
++		if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
++		    firmware->size) {
++			adsp_err(dsp,
++				 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
++				 file, regions, region_name,
++				 le32_to_cpu(region->len), firmware->size);
++			ret = -EINVAL;
++			goto out_fw;
++		}
++
+ 		if (text) {
+ 			memcpy(text, region->data, le32_to_cpu(region->len));
+ 			adsp_info(dsp, "%s: %s\n", file, text);
+ 			kfree(text);
++			text = NULL;
+ 		}
+ 
+ 		if (reg) {
+@@ -724,6 +735,7 @@ out_fw:
+ 	regmap_async_complete(regmap);
+ 	wm_adsp_buf_free(&buf_list);
+ 	release_firmware(firmware);
++	kfree(text);
+ out:
+ 	kfree(file);
+ 
+@@ -1312,6 +1324,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
+ 		}
+ 
+ 		if (reg) {
++			if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
++			    firmware->size) {
++				adsp_err(dsp,
++					 "%s.%d: %s region len %d bytes exceeds file length %zu\n",
++					 file, blocks, region_name,
++					 le32_to_cpu(blk->len),
++					 firmware->size);
++				ret = -EINVAL;
++				goto out_fw;
++			}
++
+ 			buf = wm_adsp_buf_alloc(blk->data,
+ 						le32_to_cpu(blk->len),
+ 						&buf_list);
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index 9f48d75fa992..9a198248e6cb 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -850,10 +850,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
+ 		return -ENOMEM;
+ 
+ 	ret = snd_ctl_add(card, kctrl);
+-	if (ret < 0) {
+-		snd_ctl_free_one(kctrl);
++	if (ret < 0)
+ 		return ret;
+-	}
+ 
+ 	cfg->update = update;
+ 	cfg->card = card;
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 7ccbcaf6a147..66294eb64501 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor *
+ 	while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
+ 					     ctrl_iface->extralen,
+ 					     cs, UAC2_CLOCK_SOURCE))) {
+-		if (cs->bClockID == clock_id)
++		if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
+ 			return cs;
+ 	}
+ 
+@@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor *
+ 	while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
+ 					     ctrl_iface->extralen,
+ 					     cs, UAC2_CLOCK_SELECTOR))) {
+-		if (cs->bClockID == clock_id)
++		if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
++			if (cs->bLength < 5 + cs->bNrInPins)
++				return NULL;
+ 			return cs;
++		}
+ 	}
+ 
+ 	return NULL;
+@@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor *
+ 	while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
+ 					     ctrl_iface->extralen,
+ 					     cs, UAC2_CLOCK_MULTIPLIER))) {
+-		if (cs->bClockID == clock_id)
++		if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
+ 			return cs;
+ 	}
+ 
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index cb0ef39adc36..3ebfaa527825 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1391,6 +1391,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
+ 	__u8 *bmaControls;
+ 
+ 	if (state->mixer->protocol == UAC_VERSION_1) {
++		if (hdr->bLength < 7) {
++			usb_audio_err(state->chip,
++				      "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
++				      unitid);
++			return -EINVAL;
++		}
+ 		csize = hdr->bControlSize;
+ 		if (!csize) {
+ 			usb_audio_dbg(state->chip,
+@@ -1408,6 +1414,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
+ 		}
+ 	} else {
+ 		struct uac2_feature_unit_descriptor *ftr = _ftr;
++		if (hdr->bLength < 6) {
++			usb_audio_err(state->chip,
++				      "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
++				      unitid);
++			return -EINVAL;
++		}
+ 		csize = 4;
+ 		channels = (hdr->bLength - 6) / 4 - 1;
+ 		bmaControls = ftr->bmaControls;
+@@ -2008,7 +2020,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
+ 	const struct usbmix_name_map *map;
+ 	char **namelist;
+ 
+-	if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
++	if (desc->bLength < 5 || !desc->bNrInPins ||
++	    desc->bLength < 5 + desc->bNrInPins) {
+ 		usb_audio_err(state->chip,
+ 			"invalid SELECTOR UNIT descriptor %d\n", unitid);
+ 		return -EINVAL;


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-12-08 14:48 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2017-12-08 14:48 UTC (permalink / raw
  To: gentoo-commits

commit:     ca5583f1a2b0ded411d4436cb5f0a2e6b431d5cd
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Dec  8 14:48:13 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Dec  8 14:48:13 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ca5583f1

Removal of redundant patchset

 0000_README                             |   4 -
 2400_BT-check-L2CAP-buffer-length.patch | 357 --------------------------------
 2 files changed, 361 deletions(-)

diff --git a/0000_README b/0000_README
index 85f04e7..b37932b 100644
--- a/0000_README
+++ b/0000_README
@@ -247,10 +247,6 @@ Patch:  1800_fix-lru-cache-add-oom-regression.patch
 From:   http://thread.gmane.org/gmane.linux.kernel.stable/184384
 Desc:   Revert commit 8f182270dfec mm/swap.c: flush lru pvecs on compound page arrival to fix OOM error.
 
-Patch:  2400_BT-check-L2CAP-buffer-length.patch
-From:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
-Desc:   Validate the output buffer length for L2CAP config reqs and resps to avoid stack buffer overflowing. CVE-2017-1000251. See bug #630840
-
 Patch:  2700_ThinkPad-30-brightness-control-fix.patch
 From:   Seth Forshee <seth.forshee@canonical.com>
 Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.

diff --git a/2400_BT-check-L2CAP-buffer-length.patch b/2400_BT-check-L2CAP-buffer-length.patch
deleted file mode 100644
index c6bfdf7..0000000
--- a/2400_BT-check-L2CAP-buffer-length.patch
+++ /dev/null
@@ -1,357 +0,0 @@
-From e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3 Mon Sep 17 00:00:00 2001
-From: Ben Seri <ben@armis.com>
-Date: Sat, 9 Sep 2017 23:15:59 +0200
-Subject: Bluetooth: Properly check L2CAP config option output buffer length
-
-Validate the output buffer length for L2CAP config requests and responses
-to avoid overflowing the stack buffer used for building the option blocks.
-
-Cc: stable@vger.kernel.org
-Signed-off-by: Ben Seri <ben@armis.com>
-Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
-Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
----
- net/bluetooth/l2cap_core.c | 80 +++++++++++++++++++++++++---------------------
- 1 file changed, 43 insertions(+), 37 deletions(-)
-
-diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
-index 303c779..43ba91c 100644
---- a/net/bluetooth/l2cap_core.c
-+++ b/net/bluetooth/l2cap_core.c
-@@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
- 				       u8 code, u8 ident, u16 dlen, void *data);
- static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
- 			   void *data);
--static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
-+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
- static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
- 
- static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
-@@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
- 
- 			set_bit(CONF_REQ_SENT, &chan->conf_state);
- 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
--				       l2cap_build_conf_req(chan, buf), buf);
-+				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- 			chan->num_conf_req++;
- 		}
- 
-@@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
- 	return len;
- }
- 
--static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
-+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
- {
- 	struct l2cap_conf_opt *opt = *ptr;
- 
- 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
- 
-+	if (size < L2CAP_CONF_OPT_SIZE + len)
-+		return;
-+
- 	opt->type = type;
- 	opt->len  = len;
- 
-@@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
- 	*ptr += L2CAP_CONF_OPT_SIZE + len;
- }
- 
--static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
-+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
- {
- 	struct l2cap_conf_efs efs;
- 
-@@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
- 	}
- 
- 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
--			   (unsigned long) &efs);
-+			   (unsigned long) &efs, size);
- }
- 
- static void l2cap_ack_timeout(struct work_struct *work)
-@@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
- 	chan->ack_win = chan->tx_win;
- }
- 
--static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
-+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
- {
- 	struct l2cap_conf_req *req = data;
- 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
- 	void *ptr = req->data;
-+	void *endptr = data + data_size;
- 	u16 size;
- 
- 	BT_DBG("chan %p", chan);
-@@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
- 
- done:
- 	if (chan->imtu != L2CAP_DEFAULT_MTU)
--		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
-+		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
- 
- 	switch (chan->mode) {
- 	case L2CAP_MODE_BASIC:
-@@ -3239,7 +3243,7 @@ done:
- 		rfc.max_pdu_size    = 0;
- 
- 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
--				   (unsigned long) &rfc);
-+				   (unsigned long) &rfc, endptr - ptr);
- 		break;
- 
- 	case L2CAP_MODE_ERTM:
-@@ -3259,21 +3263,21 @@ done:
- 				       L2CAP_DEFAULT_TX_WINDOW);
- 
- 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
--				   (unsigned long) &rfc);
-+				   (unsigned long) &rfc, endptr - ptr);
- 
- 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
--			l2cap_add_opt_efs(&ptr, chan);
-+			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
- 
- 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
--					   chan->tx_win);
-+					   chan->tx_win, endptr - ptr);
- 
- 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
- 			if (chan->fcs == L2CAP_FCS_NONE ||
- 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
- 				chan->fcs = L2CAP_FCS_NONE;
- 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
--						   chan->fcs);
-+						   chan->fcs, endptr - ptr);
- 			}
- 		break;
- 
-@@ -3291,17 +3295,17 @@ done:
- 		rfc.max_pdu_size = cpu_to_le16(size);
- 
- 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
--				   (unsigned long) &rfc);
-+				   (unsigned long) &rfc, endptr - ptr);
- 
- 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
--			l2cap_add_opt_efs(&ptr, chan);
-+			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
- 
- 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
- 			if (chan->fcs == L2CAP_FCS_NONE ||
- 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
- 				chan->fcs = L2CAP_FCS_NONE;
- 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
--						   chan->fcs);
-+						   chan->fcs, endptr - ptr);
- 			}
- 		break;
- 	}
-@@ -3312,10 +3316,11 @@ done:
- 	return ptr - data;
- }
- 
--static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
-+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
- {
- 	struct l2cap_conf_rsp *rsp = data;
- 	void *ptr = rsp->data;
-+	void *endptr = data + data_size;
- 	void *req = chan->conf_req;
- 	int len = chan->conf_len;
- 	int type, hint, olen;
-@@ -3417,7 +3422,7 @@ done:
- 			return -ECONNREFUSED;
- 
- 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
--				   (unsigned long) &rfc);
-+				   (unsigned long) &rfc, endptr - ptr);
- 	}
- 
- 	if (result == L2CAP_CONF_SUCCESS) {
-@@ -3430,7 +3435,7 @@ done:
- 			chan->omtu = mtu;
- 			set_bit(CONF_MTU_DONE, &chan->conf_state);
- 		}
--		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
-+		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
- 
- 		if (remote_efs) {
- 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
-@@ -3444,7 +3449,7 @@ done:
- 
- 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
- 						   sizeof(efs),
--						   (unsigned long) &efs);
-+						   (unsigned long) &efs, endptr - ptr);
- 			} else {
- 				/* Send PENDING Conf Rsp */
- 				result = L2CAP_CONF_PENDING;
-@@ -3477,7 +3482,7 @@ done:
- 			set_bit(CONF_MODE_DONE, &chan->conf_state);
- 
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
--					   sizeof(rfc), (unsigned long) &rfc);
-+					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
- 
- 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
- 				chan->remote_id = efs.id;
-@@ -3491,7 +3496,7 @@ done:
- 					le32_to_cpu(efs.sdu_itime);
- 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
- 						   sizeof(efs),
--						   (unsigned long) &efs);
-+						   (unsigned long) &efs, endptr - ptr);
- 			}
- 			break;
- 
-@@ -3505,7 +3510,7 @@ done:
- 			set_bit(CONF_MODE_DONE, &chan->conf_state);
- 
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
--					   (unsigned long) &rfc);
-+					   (unsigned long) &rfc, endptr - ptr);
- 
- 			break;
- 
-@@ -3527,10 +3532,11 @@ done:
- }
- 
- static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
--				void *data, u16 *result)
-+				void *data, size_t size, u16 *result)
- {
- 	struct l2cap_conf_req *req = data;
- 	void *ptr = req->data;
-+	void *endptr = data + size;
- 	int type, olen;
- 	unsigned long val;
- 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
-@@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
- 			} else
- 				chan->imtu = val;
--			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
-+			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
- 			break;
- 
- 		case L2CAP_CONF_FLUSH_TO:
- 			chan->flush_to = val;
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
--					   2, chan->flush_to);
-+					   2, chan->flush_to, endptr - ptr);
- 			break;
- 
- 		case L2CAP_CONF_RFC:
-@@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- 			chan->fcs = 0;
- 
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
--					   sizeof(rfc), (unsigned long) &rfc);
-+					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
- 			break;
- 
- 		case L2CAP_CONF_EWS:
- 			chan->ack_win = min_t(u16, val, chan->ack_win);
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
--					   chan->tx_win);
-+					   chan->tx_win, endptr - ptr);
- 			break;
- 
- 		case L2CAP_CONF_EFS:
-@@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- 				return -ECONNREFUSED;
- 
- 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
--					   (unsigned long) &efs);
-+					   (unsigned long) &efs, endptr - ptr);
- 			break;
- 
- 		case L2CAP_CONF_FCS:
-@@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
- 		return;
- 
- 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
--		       l2cap_build_conf_req(chan, buf), buf);
-+		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- 	chan->num_conf_req++;
- }
- 
-@@ -3900,7 +3906,7 @@ sendresp:
- 		u8 buf[128];
- 		set_bit(CONF_REQ_SENT, &chan->conf_state);
- 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
--			       l2cap_build_conf_req(chan, buf), buf);
-+			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- 		chan->num_conf_req++;
- 	}
- 
-@@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
- 			break;
- 
- 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
--			       l2cap_build_conf_req(chan, req), req);
-+			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
- 		chan->num_conf_req++;
- 		break;
- 
-@@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
- 	}
- 
- 	/* Complete config. */
--	len = l2cap_parse_conf_req(chan, rsp);
-+	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
- 	if (len < 0) {
- 		l2cap_send_disconn_req(chan, ECONNRESET);
- 		goto unlock;
-@@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
- 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
- 		u8 buf[64];
- 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
--			       l2cap_build_conf_req(chan, buf), buf);
-+			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- 		chan->num_conf_req++;
- 	}
- 
-@@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
- 			char buf[64];
- 
- 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
--						   buf, &result);
-+						   buf, sizeof(buf), &result);
- 			if (len < 0) {
- 				l2cap_send_disconn_req(chan, ECONNRESET);
- 				goto done;
-@@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
- 			/* throw out any old stored conf requests */
- 			result = L2CAP_CONF_SUCCESS;
- 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
--						   req, &result);
-+						   req, sizeof(req), &result);
- 			if (len < 0) {
- 				l2cap_send_disconn_req(chan, ECONNRESET);
- 				goto done;
-@@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
- 			set_bit(CONF_REQ_SENT, &chan->conf_state);
- 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
- 				       L2CAP_CONF_REQ,
--				       l2cap_build_conf_req(chan, buf), buf);
-+				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
- 			chan->num_conf_req++;
- 		}
- 	}
-@@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
- 				set_bit(CONF_REQ_SENT, &chan->conf_state);
- 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
- 					       L2CAP_CONF_REQ,
--					       l2cap_build_conf_req(chan, buf),
-+					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
- 					       buf);
- 				chan->num_conf_req++;
- 			}
--- 
-cgit v1.1
-


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-12-07 18:53 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2017-12-07 18:53 UTC (permalink / raw
  To: gentoo-commits

commit:     54acffba0b4c473f2da2098df12e5b6b053602a2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Dec  7 18:53:29 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Dec  7 18:53:29 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=54acffba

Linux patch 4.1.46 and 4.1.47

 0000_README             |    8 +
 1045_linux-4.1.46.patch | 6685 +++++++++++++++++++++++++++++++++++++++++++++++
 1046_linux-4.1.47.patch | 5346 +++++++++++++++++++++++++++++++++++++
 3 files changed, 12039 insertions(+)

diff --git a/0000_README b/0000_README
index 43ea8eb..85f04e7 100644
--- a/0000_README
+++ b/0000_README
@@ -223,6 +223,14 @@ Patch:  1044_linux-4.1.45.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.45
 
+Patch:  1045_linux-4.1.46.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.46
+
+Patch:  1046_linux-4.1.47.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.47
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1045_linux-4.1.46.patch b/1045_linux-4.1.46.patch
new file mode 100644
index 0000000..9a7f15d
--- /dev/null
+++ b/1045_linux-4.1.46.patch
@@ -0,0 +1,6685 @@
+diff --git a/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt b/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt
+new file mode 100644
+index 000000000000..6ec1a880ac18
+--- /dev/null
++++ b/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt
+@@ -0,0 +1,46 @@
++THS8135 Video DAC
++-----------------
++
++This is the binding for Texas Instruments THS8135 Video DAC bridge.
++
++Required properties:
++
++- compatible: Must be "ti,ths8135"
++
++Required nodes:
++
++This device has two video ports. Their connections are modelled using the OF
++graph bindings specified in Documentation/devicetree/bindings/graph.txt.
++
++- Video port 0 for RGB input
++- Video port 1 for VGA output
++
++Example
++-------
++
++vga-bridge {
++	compatible = "ti,ths8135";
++	#address-cells = <1>;
++	#size-cells = <0>;
++
++	ports {
++		#address-cells = <1>;
++		#size-cells = <0>;
++
++		port@0 {
++			reg = <0>;
++
++			vga_bridge_in: endpoint {
++				remote-endpoint = <&lcdc_out_vga>;
++			};
++		};
++
++		port@1 {
++			reg = <1>;
++
++			vga_bridge_out: endpoint {
++				remote-endpoint = <&vga_con_in>;
++			};
++		};
++	};
++};
+diff --git a/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt b/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
+new file mode 100644
+index 000000000000..b3629405f568
+--- /dev/null
++++ b/Documentation/devicetree/bindings/iio/adc/avia-hx711.txt
+@@ -0,0 +1,18 @@
++* AVIA HX711 ADC chip for weight cells
++  Bit-banging driver
++
++Required properties:
++ - compatible:	Should be "avia,hx711"
++ - sck-gpios:	Definition of the GPIO for the clock
++ - dout-gpios:	Definition of the GPIO for data-out
++		See Documentation/devicetree/bindings/gpio/gpio.txt
++ - avdd-supply:	Definition of the regulator used as analog supply
++
++Example:
++weight@0 {
++	compatible = "avia,hx711";
++	sck-gpios = <&gpio3 10 GPIO_ACTIVE_HIGH>;
++	dout-gpios = <&gpio0 7 GPIO_ACTIVE_HIGH>;
++	avdd-suppy = <&avdd>;
++};
++
+diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
+index 80339192c93e..6a7fa617b07a 100644
+--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
++++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
+@@ -31,6 +31,7 @@ asahi-kasei	Asahi Kasei Corp.
+ atmel	Atmel Corporation
+ auo	AU Optronics Corporation
+ avago	Avago Technologies
++avia	avia semiconductor
+ avic	Shanghai AVIC Optoelectronics Co., Ltd.
+ axis	Axis Communications AB
+ bosch	Bosch Sensortec GmbH
+diff --git a/Makefile b/Makefile
+index d4c064604058..1b4148baf398 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 45
++SUBLEVEL = 46
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
+index aed66d5df7f1..b7576349528c 100644
+--- a/arch/arm/Kconfig-nommu
++++ b/arch/arm/Kconfig-nommu
+@@ -34,8 +34,7 @@ config PROCESSOR_ID
+ 	  used instead of the auto-probing which utilizes the register.
+ 
+ config REMAP_VECTORS_TO_RAM
+-	bool 'Install vectors to the beginning of RAM' if DRAM_BASE
+-	depends on DRAM_BASE
++	bool 'Install vectors to the beginning of RAM'
+ 	help
+ 	  The kernel needs to change the hardware exception vectors.
+ 	  In nommu mode, the hardware exception vectors are normally
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index b346b35f827d..e9bef82bff42 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -502,6 +502,7 @@ ENDPROC(__mmap_switched)
+  * booted in EL1 or EL2 respectively.
+  */
+ ENTRY(el2_setup)
++	msr	SPsel, #1			// We want to use SP_EL{1,2}
+ 	mrs	x0, CurrentEL
+ 	cmp	x0, #CurrentEL_EL2
+ 	b.ne	1f
+diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
+index 3b46f7ce9ca7..77733b403c09 100644
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -141,7 +141,7 @@ SECTIONS
+ 	 * Force .bss to 64K alignment so that .bss..swapper_pg_dir
+ 	 * gets that alignment.	 .sbss should be empty, so there will be
+ 	 * no holes after __init_end. */
+-	BSS_SECTION(0, 0x10000, 0)
++	BSS_SECTION(0, 0x10000, 8)
+ 
+ 	_end = . ;
+ 
+diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c
+index ba0c053e25ae..2a2fff407ac4 100644
+--- a/arch/parisc/kernel/perf.c
++++ b/arch/parisc/kernel/perf.c
+@@ -39,7 +39,7 @@
+  *  the PDC INTRIGUE calls.  This is done to eliminate bugs introduced
+  *  in various PDC revisions.  The code is much more maintainable
+  *  and reliable this way vs having to debug on every version of PDC
+- *  on every box. 
++ *  on every box.
+  */
+ 
+ #include <linux/capability.h>
+@@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
+ static int perf_release(struct inode *inode, struct file *file);
+ static int perf_open(struct inode *inode, struct file *file);
+ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
+-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 
+-	loff_t *ppos);
++static ssize_t perf_write(struct file *file, const char __user *buf,
++	size_t count, loff_t *ppos);
+ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ static void perf_start_counters(void);
+ static int perf_stop_counters(uint32_t *raddr);
+@@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
+ /*
+  * configure:
+  *
+- * Configure the cpu with a given data image.  First turn off the counters, 
++ * Configure the cpu with a given data image.  First turn off the counters,
+  * then download the image, then turn the counters back on.
+  */
+ static int perf_config(uint32_t *image_ptr)
+@@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
+ 	error = perf_stop_counters(raddr);
+ 	if (error != 0) {
+ 		printk("perf_config: perf_stop_counters = %ld\n", error);
+-		return -EINVAL; 
++		return -EINVAL;
+ 	}
+ 
+ printk("Preparing to write image\n");
+@@ -242,7 +242,7 @@ printk("Preparing to write image\n");
+ 	error = perf_write_image((uint64_t *)image_ptr);
+ 	if (error != 0) {
+ 		printk("perf_config: DOWNLOAD = %ld\n", error);
+-		return -EINVAL; 
++		return -EINVAL;
+ 	}
+ 
+ printk("Preparing to start counters\n");
+@@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
+ }
+ 
+ /*
+- * Open the device and initialize all of its memory.  The device is only 
++ * Open the device and initialize all of its memory.  The device is only
+  * opened once, but can be "queried" by multiple processes that know its
+  * file descriptor.
+  */
+@@ -298,8 +298,8 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
+  * called on the processor that the download should happen
+  * on.
+  */
+-static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, 
+-	loff_t *ppos)
++static ssize_t perf_write(struct file *file, const char __user *buf,
++	size_t count, loff_t *ppos)
+ {
+ 	int err;
+ 	size_t image_size;
+@@ -307,11 +307,11 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
+ 	uint32_t interface_type;
+ 	uint32_t test;
+ 
+-	if (perf_processor_interface == ONYX_INTF) 
++	if (perf_processor_interface == ONYX_INTF)
+ 		image_size = PCXU_IMAGE_SIZE;
+-	else if (perf_processor_interface == CUDA_INTF) 
++	else if (perf_processor_interface == CUDA_INTF)
+ 		image_size = PCXW_IMAGE_SIZE;
+-	else 
++	else
+ 		return -EFAULT;
+ 
+ 	if (!capable(CAP_SYS_ADMIN))
+@@ -331,22 +331,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
+ 
+ 	/* First check the machine type is correct for
+ 	   the requested image */
+-        if (((perf_processor_interface == CUDA_INTF) &&
+-		       (interface_type != CUDA_INTF)) ||
+-	    ((perf_processor_interface == ONYX_INTF) &&
+-	               (interface_type != ONYX_INTF))) 
++	if (((perf_processor_interface == CUDA_INTF) &&
++			(interface_type != CUDA_INTF)) ||
++		((perf_processor_interface == ONYX_INTF) &&
++			(interface_type != ONYX_INTF)))
+ 		return -EINVAL;
+ 
+ 	/* Next check to make sure the requested image
+ 	   is valid */
+-	if (((interface_type == CUDA_INTF) && 
++	if (((interface_type == CUDA_INTF) &&
+ 		       (test >= MAX_CUDA_IMAGES)) ||
+-	    ((interface_type == ONYX_INTF) && 
+-		       (test >= MAX_ONYX_IMAGES))) 
++	    ((interface_type == ONYX_INTF) &&
++		       (test >= MAX_ONYX_IMAGES)))
+ 		return -EINVAL;
+ 
+ 	/* Copy the image into the processor */
+-	if (interface_type == CUDA_INTF) 
++	if (interface_type == CUDA_INTF)
+ 		return perf_config(cuda_images[test]);
+ 	else
+ 		return perf_config(onyx_images[test]);
+@@ -360,7 +360,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
+ static void perf_patch_images(void)
+ {
+ #if 0 /* FIXME!! */
+-/* 
++/*
+  * NOTE:  this routine is VERY specific to the current TLB image.
+  * If the image is changed, this routine might also need to be changed.
+  */
+@@ -368,9 +368,9 @@ static void perf_patch_images(void)
+ 	extern void $i_dtlb_miss_2_0();
+ 	extern void PA2_0_iva();
+ 
+-	/* 
++	/*
+ 	 * We can only use the lower 32-bits, the upper 32-bits should be 0
+-	 * anyway given this is in the kernel 
++	 * anyway given this is in the kernel
+ 	 */
+ 	uint32_t itlb_addr  = (uint32_t)&($i_itlb_miss_2_0);
+ 	uint32_t dtlb_addr  = (uint32_t)&($i_dtlb_miss_2_0);
+@@ -378,21 +378,21 @@ static void perf_patch_images(void)
+ 
+ 	if (perf_processor_interface == ONYX_INTF) {
+ 		/* clear last 2 bytes */
+-		onyx_images[TLBMISS][15] &= 0xffffff00;  
++		onyx_images[TLBMISS][15] &= 0xffffff00;
+ 		/* set 2 bytes */
+ 		onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
+ 		onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
+ 		onyx_images[TLBMISS][17] = itlb_addr;
+ 
+ 		/* clear last 2 bytes */
+-		onyx_images[TLBHANDMISS][15] &= 0xffffff00;  
++		onyx_images[TLBHANDMISS][15] &= 0xffffff00;
+ 		/* set 2 bytes */
+ 		onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
+ 		onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
+ 		onyx_images[TLBHANDMISS][17] = itlb_addr;
+ 
+ 		/* clear last 2 bytes */
+-		onyx_images[BIG_CPI][15] &= 0xffffff00;  
++		onyx_images[BIG_CPI][15] &= 0xffffff00;
+ 		/* set 2 bytes */
+ 		onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
+ 		onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
+@@ -405,24 +405,24 @@ static void perf_patch_images(void)
+ 
+ 	} else if (perf_processor_interface == CUDA_INTF) {
+ 		/* Cuda interface */
+-		cuda_images[TLBMISS][16] =  
++		cuda_images[TLBMISS][16] =
+ 			(cuda_images[TLBMISS][16]&0xffff0000) |
+ 			((dtlb_addr >> 8)&0x0000ffff);
+-		cuda_images[TLBMISS][17] = 
++		cuda_images[TLBMISS][17] =
+ 			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+ 		cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
+ 
+-		cuda_images[TLBHANDMISS][16] = 
++		cuda_images[TLBHANDMISS][16] =
+ 			(cuda_images[TLBHANDMISS][16]&0xffff0000) |
+ 			((dtlb_addr >> 8)&0x0000ffff);
+-		cuda_images[TLBHANDMISS][17] = 
++		cuda_images[TLBHANDMISS][17] =
+ 			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+ 		cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
+ 
+-		cuda_images[BIG_CPI][16] = 
++		cuda_images[BIG_CPI][16] =
+ 			(cuda_images[BIG_CPI][16]&0xffff0000) |
+ 			((dtlb_addr >> 8)&0x0000ffff);
+-		cuda_images[BIG_CPI][17] = 
++		cuda_images[BIG_CPI][17] =
+ 			((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
+ 		cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
+ 	} else {
+@@ -434,7 +434,7 @@ static void perf_patch_images(void)
+ 
+ /*
+  * ioctl routine
+- * All routines effect the processor that they are executed on.  Thus you 
++ * All routines effect the processor that they are executed on.  Thus you
+  * must be running on the processor that you wish to change.
+  */
+ 
+@@ -460,7 +460,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 			}
+ 
+ 			/* copy out the Counters */
+-			if (copy_to_user((void __user *)arg, raddr, 
++			if (copy_to_user((void __user *)arg, raddr,
+ 					sizeof (raddr)) != 0) {
+ 				error =  -EFAULT;
+ 				break;
+@@ -488,7 +488,7 @@ static const struct file_operations perf_fops = {
+ 	.open = perf_open,
+ 	.release = perf_release
+ };
+-	
++
+ static struct miscdevice perf_dev = {
+ 	MISC_DYNAMIC_MINOR,
+ 	PA_PERF_DEV,
+@@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
+ 		/* OR sticky2 (bit 1496) to counter2 bit 32 */
+ 		tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
+ 		raddr[2] = (uint32_t)tmp64;
+-		
++
+ 		/* Counter3 is bits 1497 to 1528 */
+ 		tmp64 =  (userbuf[23] >> 7) & 0x00000000ffffffff;
+ 		/* OR sticky3 (bit 1529) to counter3 bit 32 */
+@@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
+ 		userbuf[22] = 0;
+ 		userbuf[23] = 0;
+ 
+-		/* 
++		/*
+ 		 * Write back the zeroed bytes + the image given
+ 		 * the read was destructive.
+ 		 */
+@@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
+ 	} else {
+ 
+ 		/*
+-		 * Read RDR-15 which contains the counters and sticky bits 
++		 * Read RDR-15 which contains the counters and sticky bits
+ 		 */
+ 		if (!perf_rdr_read_ubuf(15, userbuf)) {
+ 			return -13;
+ 		}
+ 
+-		/* 
++		/*
+ 		 * Clear out the counters
+ 		 */
+ 		perf_rdr_clear(15);
+@@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
+ 		raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
+ 		raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
+ 	}
+- 
++
+ 	return 0;
+ }
+ 
+@@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t	rdr_num, uint64_t *buffer)
+ 	i = tentry->num_words;
+ 	while (i--) {
+ 		buffer[i] = 0;
+-	}	
++	}
+ 
+ 	/* Check for bits an even number of 64 */
+ 	if ((xbits = width & 0x03f) != 0) {
+@@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
+ 	}
+ 
+ 	runway = ioremap_nocache(cpu_device->hpa.start, 4096);
++	if (!runway) {
++		pr_err("perf_write_image: ioremap failed!\n");
++		return -ENOMEM;
++	}
+ 
+ 	/* Merge intrigue bits into Runway STATUS 0 */
+ 	tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
+-	__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), 
++	__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
+ 		     runway + RUNWAY_STATUS);
+-	
++
+ 	/* Write RUNWAY DEBUG registers */
+ 	for (i = 0; i < 8; i++) {
+ 		__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
+ 	}
+ 
+-	return 0; 
++	return 0;
+ }
+ 
+ /*
+@@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
+ 			perf_rdr_shift_out_U(rdr_num, buffer[i]);
+ 		} else {
+ 			perf_rdr_shift_out_W(rdr_num, buffer[i]);
+-		}	
++		}
+ 	}
+ printk("perf_rdr_write done\n");
+ }
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 8f13c7facdd7..95f090fe385a 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -479,11 +479,6 @@ lws_start:
+ 	comiclr,>>	__NR_lws_entries, %r20, %r0
+ 	b,n	lws_exit_nosys
+ 
+-	/* WARNING: Trashing sr2 and sr3 */
+-	mfsp	%sr7,%r1			/* get userspace into sr3 */
+-	mtsp	%r1,%sr3
+-	mtsp	%r0,%sr2			/* get kernel space into sr2 */
+-
+ 	/* Load table start */
+ 	ldil	L%lws_table, %r1
+ 	ldo	R%lws_table(%r1), %r28	/* Scratch use of r28 */
+@@ -632,9 +627,9 @@ cas_action:
+ 	stw	%r1, 4(%sr2,%r20)
+ #endif
+ 	/* The load and store could fail */
+-1:	ldw,ma	0(%sr3,%r26), %r28
++1:	ldw,ma	0(%r26), %r28
+ 	sub,<>	%r28, %r25, %r0
+-2:	stw,ma	%r24, 0(%sr3,%r26)
++2:	stw,ma	%r24, 0(%r26)
+ 	/* Free lock */
+ 	stw,ma	%r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+@@ -711,9 +706,9 @@ lws_compare_and_swap_2:
+ 	nop
+ 
+ 	/* 8bit load */
+-4:	ldb	0(%sr3,%r25), %r25
++4:	ldb	0(%r25), %r25
+ 	b	cas2_lock_start
+-5:	ldb	0(%sr3,%r24), %r24
++5:	ldb	0(%r24), %r24
+ 	nop
+ 	nop
+ 	nop
+@@ -721,9 +716,9 @@ lws_compare_and_swap_2:
+ 	nop
+ 
+ 	/* 16bit load */
+-6:	ldh	0(%sr3,%r25), %r25
++6:	ldh	0(%r25), %r25
+ 	b	cas2_lock_start
+-7:	ldh	0(%sr3,%r24), %r24
++7:	ldh	0(%r24), %r24
+ 	nop
+ 	nop
+ 	nop
+@@ -731,9 +726,9 @@ lws_compare_and_swap_2:
+ 	nop
+ 
+ 	/* 32bit load */
+-8:	ldw	0(%sr3,%r25), %r25
++8:	ldw	0(%r25), %r25
+ 	b	cas2_lock_start
+-9:	ldw	0(%sr3,%r24), %r24
++9:	ldw	0(%r24), %r24
+ 	nop
+ 	nop
+ 	nop
+@@ -742,14 +737,14 @@ lws_compare_and_swap_2:
+ 
+ 	/* 64bit load */
+ #ifdef CONFIG_64BIT
+-10:	ldd	0(%sr3,%r25), %r25
+-11:	ldd	0(%sr3,%r24), %r24
++10:	ldd	0(%r25), %r25
++11:	ldd	0(%r24), %r24
+ #else
+-	/* Load new value into r22/r23 - high/low */
+-10:	ldw	0(%sr3,%r25), %r22
+-11:	ldw	4(%sr3,%r25), %r23
++	/* Load old value into r22/r23 - high/low */
++10:	ldw	0(%r25), %r22
++11:	ldw	4(%r25), %r23
+ 	/* Load new value into fr4 for atomic store later */
+-12:	flddx	0(%sr3,%r24), %fr4
++12:	flddx	0(%r24), %fr4
+ #endif
+ 
+ cas2_lock_start:
+@@ -799,30 +794,30 @@ cas2_action:
+ 	ldo	1(%r0),%r28
+ 
+ 	/* 8bit CAS */
+-13:	ldb,ma	0(%sr3,%r26), %r29
++13:	ldb,ma	0(%r26), %r29
+ 	sub,=	%r29, %r25, %r0
+ 	b,n	cas2_end
+-14:	stb,ma	%r24, 0(%sr3,%r26)
++14:	stb,ma	%r24, 0(%r26)
+ 	b	cas2_end
+ 	copy	%r0, %r28
+ 	nop
+ 	nop
+ 
+ 	/* 16bit CAS */
+-15:	ldh,ma	0(%sr3,%r26), %r29
++15:	ldh,ma	0(%r26), %r29
+ 	sub,=	%r29, %r25, %r0
+ 	b,n	cas2_end
+-16:	sth,ma	%r24, 0(%sr3,%r26)
++16:	sth,ma	%r24, 0(%r26)
+ 	b	cas2_end
+ 	copy	%r0, %r28
+ 	nop
+ 	nop
+ 
+ 	/* 32bit CAS */
+-17:	ldw,ma	0(%sr3,%r26), %r29
++17:	ldw,ma	0(%r26), %r29
+ 	sub,=	%r29, %r25, %r0
+ 	b,n	cas2_end
+-18:	stw,ma	%r24, 0(%sr3,%r26)
++18:	stw,ma	%r24, 0(%r26)
+ 	b	cas2_end
+ 	copy	%r0, %r28
+ 	nop
+@@ -830,22 +825,22 @@ cas2_action:
+ 
+ 	/* 64bit CAS */
+ #ifdef CONFIG_64BIT
+-19:	ldd,ma	0(%sr3,%r26), %r29
++19:	ldd,ma	0(%r26), %r29
+ 	sub,*=	%r29, %r25, %r0
+ 	b,n	cas2_end
+-20:	std,ma	%r24, 0(%sr3,%r26)
++20:	std,ma	%r24, 0(%r26)
+ 	copy	%r0, %r28
+ #else
+ 	/* Compare first word */
+-19:	ldw,ma	0(%sr3,%r26), %r29
++19:	ldw	0(%r26), %r29
+ 	sub,=	%r29, %r22, %r0
+ 	b,n	cas2_end
+ 	/* Compare second word */
+-20:	ldw,ma	4(%sr3,%r26), %r29
++20:	ldw	4(%r26), %r29
+ 	sub,=	%r29, %r23, %r0
+ 	b,n	cas2_end
+ 	/* Perform the store */
+-21:	fstdx	%fr4, 0(%sr3,%r26)
++21:	fstdx	%fr4, 0(%r26)
+ 	copy	%r0, %r28
+ #endif
+ 
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 54cf9bc94dad..3a095670b0c4 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -101,22 +101,17 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ 				   struct kvm_create_spapr_tce *args)
+ {
+ 	struct kvmppc_spapr_tce_table *stt = NULL;
++	struct kvmppc_spapr_tce_table *siter;
+ 	long npages;
+ 	int ret = -ENOMEM;
+ 	int i;
+ 
+-	/* Check this LIOBN hasn't been previously allocated */
+-	list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
+-		if (stt->liobn == args->liobn)
+-			return -EBUSY;
+-	}
+-
+ 	npages = kvmppc_stt_npages(args->window_size);
+ 
+ 	stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
+ 		      GFP_KERNEL);
+ 	if (!stt)
+-		goto fail;
++		return ret;
+ 
+ 	stt->liobn = args->liobn;
+ 	stt->window_size = args->window_size;
+@@ -128,23 +123,36 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
+ 			goto fail;
+ 	}
+ 
+-	kvm_get_kvm(kvm);
+-
+ 	mutex_lock(&kvm->lock);
+-	list_add(&stt->list, &kvm->arch.spapr_tce_tables);
++
++	/* Check this LIOBN hasn't been previously allocated */
++	ret = 0;
++	list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
++		if (siter->liobn == args->liobn) {
++			ret = -EBUSY;
++			break;
++		}
++	}
++
++	if (!ret)
++		ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
++				       stt, O_RDWR | O_CLOEXEC);
++
++	if (ret >= 0) {
++		list_add(&stt->list, &kvm->arch.spapr_tce_tables);
++		kvm_get_kvm(kvm);
++	}
+ 
+ 	mutex_unlock(&kvm->lock);
+ 
+-	return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
+-				stt, O_RDWR | O_CLOEXEC);
++	if (ret >= 0)
++		return ret;
+ 
+-fail:
+-	if (stt) {
+-		for (i = 0; i < npages; i++)
+-			if (stt->pages[i])
+-				__free_page(stt->pages[i]);
++ fail:
++	for (i = 0; i < npages; i++)
++		if (stt->pages[i])
++			__free_page(stt->pages[i]);
+ 
+-		kfree(stt);
+-	}
++	kfree(stt);
+ 	return ret;
+ }
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index ceb18d349459..8dd0c8edefd6 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -225,8 +225,10 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
+ 		return -ENOENT;
+ 
+ 	dn = dlpar_configure_connector(drc_index, parent_dn);
+-	if (!dn)
++	if (!dn) {
++		of_node_put(parent_dn);
+ 		return -ENOENT;
++	}
+ 
+ 	rc = dlpar_attach_node(dn);
+ 	if (rc)
+diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
+index be0cc1beed41..3fae200dd251 100644
+--- a/arch/sparc/include/asm/setup.h
++++ b/arch/sparc/include/asm/setup.h
+@@ -59,8 +59,11 @@ extern atomic_t dcpage_flushes;
+ extern atomic_t dcpage_flushes_xcall;
+ 
+ extern int sysctl_tsb_ratio;
+-#endif
+ 
++#ifdef CONFIG_SERIAL_SUNHV
++void sunhv_migrate_hvcons_irq(int cpu);
++#endif
++#endif
+ void sun_do_break(void);
+ extern int stop_a_enabled;
+ extern int scons_pwroff;
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 4511caa3b7e9..46866b2097e8 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -1443,8 +1443,12 @@ void smp_send_stop(void)
+ 	int cpu;
+ 
+ 	if (tlb_type == hypervisor) {
++		int this_cpu = smp_processor_id();
++#ifdef CONFIG_SERIAL_SUNHV
++		sunhv_migrate_hvcons_irq(this_cpu);
++#endif
+ 		for_each_online_cpu(cpu) {
+-			if (cpu == smp_processor_id())
++			if (cpu == this_cpu)
+ 				continue;
+ #ifdef CONFIG_SUN_LDOMS
+ 			if (ldom_domaining_enabled) {
+diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
+index bdf02eeee765..c5f0af61420d 100644
+--- a/arch/x86/include/asm/alternative-asm.h
++++ b/arch/x86/include/asm/alternative-asm.h
+@@ -50,8 +50,10 @@
+ #define new_len2		145f-144f
+ 
+ /*
+- * max without conditionals. Idea adapted from:
++ * gas compatible max based on the idea from:
+  * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
++ *
++ * The additional "-" is needed because gas uses a "true" value of -1.
+  */
+ #define alt_max_short(a, b)	((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
+ 
+diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
+index ba32af062f61..c97effa6c72b 100644
+--- a/arch/x86/include/asm/alternative.h
++++ b/arch/x86/include/asm/alternative.h
+@@ -96,12 +96,12 @@ static inline int alternatives_text_reserved(void *start, void *end)
+ 	alt_end_marker ":\n"
+ 
+ /*
+- * max without conditionals. Idea adapted from:
++ * gas compatible max based on the idea from:
+  * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+  *
+- * The additional "-" is needed because gas works with s32s.
++ * The additional "-" is needed because gas uses a "true" value of -1.
+  */
+-#define alt_max_short(a, b)	"((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))"
++#define alt_max_short(a, b)	"((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))"
+ 
+ /*
+  * Pad the second replacement alternative with additional NOPs if it is
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index a41beadb3db9..6ca31bf3ccbd 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -272,6 +272,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
+ 	return 0;
+ }
+ 
++static bool is_blacklisted(unsigned int cpu)
++{
++	struct cpuinfo_x86 *c = &cpu_data(cpu);
++
++	if (c->x86 == 6 && c->x86_model == 79) {
++		pr_err_once("late loading on model 79 is disabled.\n");
++		return true;
++	}
++
++	return false;
++}
++
+ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
+ 					     bool refresh_fw)
+ {
+@@ -280,6 +292,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
+ 	const struct firmware *firmware;
+ 	enum ucode_state ret;
+ 
++	if (is_blacklisted(cpu))
++		return UCODE_NFOUND;
++
+ 	sprintf(name, "intel-ucode/%02x-%02x-%02x",
+ 		c->x86, c->x86_model, c->x86_mask);
+ 
+@@ -304,6 +319,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
+ static enum ucode_state
+ request_microcode_user(int cpu, const void __user *buf, size_t size)
+ {
++	if (is_blacklisted(cpu))
++		return UCODE_NFOUND;
++
+ 	return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
+ }
+ 
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 32e29f926e5a..33c1b5311b98 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9276,6 +9276,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+ 		vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
+ 				page_to_phys(vmx->nested.virtual_apic_page));
+ 		vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
++	} else {
++#ifdef CONFIG_X86_64
++		exec_control |= CPU_BASED_CR8_LOAD_EXITING |
++				CPU_BASED_CR8_STORE_EXITING;
++#endif
+ 	}
+ 
+ 	if (cpu_has_vmx_msr_bitmap() &&
+@@ -9936,7 +9941,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ 	 * (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
+ 	 */
+ 	vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+-	kvm_set_cr4(vcpu, vmcs12->host_cr4);
++	vmx_set_cr4(vcpu, vmcs12->host_cr4);
+ 
+ 	nested_ept_uninit_mmu_context(vcpu);
+ 
+diff --git a/block/bio.c b/block/bio.c
+index cbce3e2208f4..d9cf77c6a847 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1342,6 +1342,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+ 		offset = uaddr & ~PAGE_MASK;
+ 		for (j = cur_page; j < page_limit; j++) {
+ 			unsigned int bytes = PAGE_SIZE - offset;
++			unsigned short prev_bi_vcnt = bio->bi_vcnt;
+ 
+ 			if (len <= 0)
+ 				break;
+@@ -1356,6 +1357,13 @@ struct bio *bio_map_user_iov(struct request_queue *q,
+ 					    bytes)
+ 				break;
+ 
++			/*
++			 * check if vector was merged with previous
++			 * drop page reference if needed
++			 */
++			if (bio->bi_vcnt == prev_bi_vcnt)
++				put_page(pages[j]);
++
+ 			len -= bytes;
+ 			offset = 0;
+ 		}
+diff --git a/block/bsg-lib.c b/block/bsg-lib.c
+index 650f427d915b..341b8d858e67 100644
+--- a/block/bsg-lib.c
++++ b/block/bsg-lib.c
+@@ -147,7 +147,6 @@ static int bsg_create_job(struct device *dev, struct request *req)
+ failjob_rls_rqst_payload:
+ 	kfree(job->request_payload.sg_list);
+ failjob_rls_job:
+-	kfree(job);
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/block/partitions/efi.c b/block/partitions/efi.c
+index 26cb624ace05..d26d0d27f5fd 100644
+--- a/block/partitions/efi.c
++++ b/block/partitions/efi.c
+@@ -293,7 +293,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
+ 	if (!gpt)
+ 		return NULL;
+ 
+-	count = le32_to_cpu(gpt->num_partition_entries) *
++	count = (size_t)le32_to_cpu(gpt->num_partition_entries) *
+                 le32_to_cpu(gpt->sizeof_partition_entry);
+ 	if (!count)
+ 		return NULL;
+@@ -352,7 +352,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
+ 			gpt_header **gpt, gpt_entry **ptes)
+ {
+ 	u32 crc, origcrc;
+-	u64 lastlba;
++	u64 lastlba, pt_size;
+ 
+ 	if (!ptes)
+ 		return 0;
+@@ -434,13 +434,20 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
+ 		goto fail;
+ 	}
+ 
++	/* Sanity check partition table size */
++	pt_size = (u64)le32_to_cpu((*gpt)->num_partition_entries) *
++		le32_to_cpu((*gpt)->sizeof_partition_entry);
++	if (pt_size > KMALLOC_MAX_SIZE) {
++		pr_debug("GUID Partition Table is too large: %llu > %lu bytes\n",
++			 (unsigned long long)pt_size, KMALLOC_MAX_SIZE);
++		goto fail;
++	}
++
+ 	if (!(*ptes = alloc_read_gpt_entries(state, *gpt)))
+ 		goto fail;
+ 
+ 	/* Check the GUID Partition Entry Array CRC */
+-	crc = efi_crc32((const unsigned char *) (*ptes),
+-			le32_to_cpu((*gpt)->num_partition_entries) *
+-			le32_to_cpu((*gpt)->sizeof_partition_entry));
++	crc = efi_crc32((const unsigned char *) (*ptes), pt_size);
+ 
+ 	if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
+ 		pr_debug("GUID Partitition Entry Array CRC check failed.\n");
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index 362905e7c841..1b7480773899 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -290,6 +290,7 @@ config CRYPTO_XTS
+ 	select CRYPTO_BLKCIPHER
+ 	select CRYPTO_MANAGER
+ 	select CRYPTO_GF128MUL
++	select CRYPTO_ECB
+ 	help
+ 	  XTS: IEEE1619/D16 narrow block cipher use with aes-xts-plain,
+ 	  key size 256, 384 or 512 bits. This implementation currently
+diff --git a/crypto/shash.c b/crypto/shash.c
+index 03fbcd4a82c4..17510eaf0a36 100644
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -274,12 +274,14 @@ static int shash_async_finup(struct ahash_request *req)
+ 
+ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
+ {
+-	struct scatterlist *sg = req->src;
+-	unsigned int offset = sg->offset;
+ 	unsigned int nbytes = req->nbytes;
++	struct scatterlist *sg;
++	unsigned int offset;
+ 	int err;
+ 
+-	if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
++	if (nbytes &&
++	    (sg = req->src, offset = sg->offset,
++	     nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
+ 		void *data;
+ 
+ 		data = kmap_atomic(sg_page(sg));
+diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
+index e2d94972962d..7aa10c200ecb 100644
+--- a/drivers/ata/libata-transport.c
++++ b/drivers/ata/libata-transport.c
+@@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
+ 
+ static void ata_tport_release(struct device *dev)
+ {
+-	put_device(dev->parent);
+ }
+ 
+ /**
+@@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
+ 	device_initialize(dev);
+ 	dev->type = &ata_port_type;
+ 
+-	dev->parent = get_device(parent);
++	dev->parent = parent;
+ 	dev->release = ata_tport_release;
+ 	dev_set_name(dev, "ata%d", ap->print_id);
+ 	transport_setup_device(dev);
+@@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
+ 
+ static void ata_tlink_release(struct device *dev)
+ {
+-	put_device(dev->parent);
+ }
+ 
+ /**
+@@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
+ 	int error;
+ 
+ 	device_initialize(dev);
+-	dev->parent = get_device(&ap->tdev);
++	dev->parent = &ap->tdev;
+ 	dev->release = ata_tlink_release;
+ 	if (ata_is_host_link(link))
+ 		dev_set_name(dev, "link%d", ap->print_id);
+@@ -588,7 +586,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
+ 
+ static void ata_tdev_release(struct device *dev)
+ {
+-	put_device(dev->parent);
+ }
+ 
+ /**
+@@ -661,7 +658,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
+ 	int error;
+ 
+ 	device_initialize(dev);
+-	dev->parent = get_device(&link->tdev);
++	dev->parent = &link->tdev;
+ 	dev->release = ata_tdev_release;
+ 	if (ata_is_host_link(link))
+ 		dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 29a4ef08e051..018482d7c87b 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -731,7 +731,8 @@ static ssize_t driver_override_store(struct device *dev,
+ 	struct platform_device *pdev = to_platform_device(dev);
+ 	char *driver_override, *old, *cp;
+ 
+-	if (count > PATH_MAX)
++	/* We need to keep extra room for a newline */
++	if (count >= (PAGE_SIZE - 1))
+ 		return -EINVAL;
+ 
+ 	driver_override = kstrndup(buf, count, GFP_KERNEL);
+diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
+index 6f047dcb94c2..155fdb3e3d55 100644
+--- a/drivers/bus/mvebu-mbus.c
++++ b/drivers/bus/mvebu-mbus.c
+@@ -604,7 +604,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
+ 			if (mbus->hw_io_coherency)
+ 				w->mbus_attr |= ATTR_HW_COHERENCY;
+ 			w->base = base & DDR_BASE_CS_LOW_MASK;
+-			w->size = (size | ~DDR_SIZE_MASK) + 1;
++			w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
+ 		}
+ 	}
+ 	mvebu_mbus_dram_info.num_cs = cs;
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index eb79d49ab88c..3159bafdbe6b 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1579,9 +1579,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
+ 		req_ctx->swinit = 0;
+ 	} else {
+ 		desc->ptr[1] = zero_entry;
+-		/* Indicate next op is not the first. */
+-		req_ctx->first = 0;
+ 	}
++	/* Indicate next op is not the first. */
++	req_ctx->first = 0;
+ 
+ 	/* HMAC key */
+ 	if (ctx->keylen)
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 7724ddb0f776..fd11a251024d 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1800,6 +1800,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 				return -EINVAL;
+ 			}
+ 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
++			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
+ 		} else {
+ 			port = NULL;
+ 			req_payload.num_slots = 0;
+@@ -1815,6 +1816,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 			if (req_payload.num_slots) {
+ 				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
+ 				mgr->payloads[i].num_slots = req_payload.num_slots;
++				mgr->payloads[i].vcpi = req_payload.vcpi;
+ 			} else if (mgr->payloads[i].num_slots) {
+ 				mgr->payloads[i].num_slots = 0;
+ 				drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index fadf9865709e..cb1d140db6b9 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -991,6 +991,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
+ 	is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+ 	is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ 
++	if (port == PORT_A && is_dvi) {
++		DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
++			      is_hdmi ? "/HDMI" : "");
++		is_dvi = false;
++		is_hdmi = false;
++	}
++
+ 	info->supports_dvi = is_dvi;
+ 	info->supports_hdmi = is_hdmi;
+ 	info->supports_dp = is_dp;
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index 1ec738292a1a..a9054be9bca2 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -538,7 +538,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
+ {
+ 	/* the worst case is computed from the set_report command with a
+ 	 * reportID > 15 and the maximum report length */
+-	int args_len = sizeof(__u8) + /* optional ReportID byte */
++	int args_len = sizeof(__u8) + /* ReportID */
++		       sizeof(__u8) + /* optional ReportID byte */
+ 		       sizeof(__u16) + /* data register */
+ 		       sizeof(__u16) + /* size of the report */
+ 		       report_size; /* report */
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index 49a259fc610e..765aad050e34 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -970,6 +970,8 @@ static int usbhid_parse(struct hid_device *hid)
+ 	unsigned int rsize = 0;
+ 	char *rdesc;
+ 	int ret, n;
++	int num_descriptors;
++	size_t offset = offsetof(struct hid_descriptor, desc);
+ 
+ 	quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor),
+ 			le16_to_cpu(dev->descriptor.idProduct));
+@@ -992,10 +994,18 @@ static int usbhid_parse(struct hid_device *hid)
+ 		return -ENODEV;
+ 	}
+ 
++	if (hdesc->bLength < sizeof(struct hid_descriptor)) {
++		dbg_hid("hid descriptor is too short\n");
++		return -EINVAL;
++	}
++
+ 	hid->version = le16_to_cpu(hdesc->bcdHID);
+ 	hid->country = hdesc->bCountryCode;
+ 
+-	for (n = 0; n < hdesc->bNumDescriptors; n++)
++	num_descriptors = min_t(int, hdesc->bNumDescriptors,
++	       (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor));
++
++	for (n = 0; n < num_descriptors; n++)
+ 		if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT)
+ 			rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
+ 
+diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
+index dee93ec87d02..84e0994aafdd 100644
+--- a/drivers/hwmon/gl520sm.c
++++ b/drivers/hwmon/gl520sm.c
+@@ -208,11 +208,13 @@ static ssize_t get_cpu_vid(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR(cpu0_vid, S_IRUGO, get_cpu_vid, NULL);
+ 
+-#define VDD_FROM_REG(val) (((val) * 95 + 2) / 4)
+-#define VDD_TO_REG(val) clamp_val((((val) * 4 + 47) / 95), 0, 255)
++#define VDD_FROM_REG(val)	DIV_ROUND_CLOSEST((val) * 95, 4)
++#define VDD_CLAMP(val)		clamp_val(val, 0, 255 * 95 / 4)
++#define VDD_TO_REG(val)		DIV_ROUND_CLOSEST(VDD_CLAMP(val) * 4, 95)
+ 
+-#define IN_FROM_REG(val) ((val) * 19)
+-#define IN_TO_REG(val) clamp_val((((val) + 9) / 19), 0, 255)
++#define IN_FROM_REG(val)	((val) * 19)
++#define IN_CLAMP(val)		clamp_val(val, 0, 255 * 19)
++#define IN_TO_REG(val)		DIV_ROUND_CLOSEST(IN_CLAMP(val), 19)
+ 
+ static ssize_t get_in_input(struct device *dev, struct device_attribute *attr,
+ 			    char *buf)
+@@ -349,8 +351,13 @@ static SENSOR_DEVICE_ATTR(in4_max, S_IRUGO | S_IWUSR,
+ 
+ #define DIV_FROM_REG(val) (1 << (val))
+ #define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : (480000 / ((val) << (div))))
+-#define FAN_TO_REG(val, div) ((val) <= 0 ? 0 : \
+-	clamp_val((480000 + ((val) << ((div)-1))) / ((val) << (div)), 1, 255))
++
++#define FAN_BASE(div)		(480000 >> (div))
++#define FAN_CLAMP(val, div)	clamp_val(val, FAN_BASE(div) / 255, \
++					  FAN_BASE(div))
++#define FAN_TO_REG(val, div)	((val) == 0 ? 0 : \
++				 DIV_ROUND_CLOSEST(480000, \
++						FAN_CLAMP(val, div) << (div)))
+ 
+ static ssize_t get_fan_input(struct device *dev, struct device_attribute *attr,
+ 			     char *buf)
+@@ -513,9 +520,9 @@ static SENSOR_DEVICE_ATTR(fan2_div, S_IRUGO | S_IWUSR,
+ static DEVICE_ATTR(fan1_off, S_IRUGO | S_IWUSR,
+ 		get_fan_off, set_fan_off);
+ 
+-#define TEMP_FROM_REG(val) (((val) - 130) * 1000)
+-#define TEMP_TO_REG(val) clamp_val(((((val) < 0 ? \
+-			(val) - 500 : (val) + 500) / 1000) + 130), 0, 255)
++#define TEMP_FROM_REG(val)	(((val) - 130) * 1000)
++#define TEMP_CLAMP(val)		clamp_val(val, -130000, 125000)
++#define TEMP_TO_REG(val)	(DIV_ROUND_CLOSEST(TEMP_CLAMP(val), 1000) + 130)
+ 
+ static ssize_t get_temp_input(struct device *dev, struct device_attribute *attr,
+ 			      char *buf)
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index 9bd10a9b4b50..8029a81a3799 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -875,6 +875,7 @@ static int at91_twi_suspend_noirq(struct device *dev)
+ 
+ static int at91_twi_resume_noirq(struct device *dev)
+ {
++	struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
+ 	int ret;
+ 
+ 	if (!pm_runtime_status_suspended(dev)) {
+@@ -886,6 +887,8 @@ static int at91_twi_resume_noirq(struct device *dev)
+ 	pm_runtime_mark_last_busy(dev);
+ 	pm_request_autosuspend(dev);
+ 
++	at91_init_twi_bus(twi_dev);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+index a9276eeb61d5..1d60af5dbffc 100644
+--- a/drivers/i2c/busses/i2c-ismt.c
++++ b/drivers/i2c/busses/i2c-ismt.c
+@@ -339,12 +339,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
+ 			data->word = dma_buffer[0] | (dma_buffer[1] << 8);
+ 			break;
+ 		case I2C_SMBUS_BLOCK_DATA:
+-		case I2C_SMBUS_I2C_BLOCK_DATA:
+ 			if (desc->rxbytes != dma_buffer[0] + 1)
+ 				return -EMSGSIZE;
+ 
+ 			memcpy(data->block, dma_buffer, desc->rxbytes);
+ 			break;
++		case I2C_SMBUS_I2C_BLOCK_DATA:
++			memcpy(&data->block[1], dma_buffer, desc->rxbytes);
++			data->block[0] = desc->rxbytes;
++			break;
+ 		}
+ 		return 0;
+ 	}
+diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
+index 5e176adca8e8..ee7685f15d88 100644
+--- a/drivers/i2c/busses/i2c-meson.c
++++ b/drivers/i2c/busses/i2c-meson.c
+@@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len)
+ 		wdata1 |= *buf++ << ((i - 4) * 8);
+ 
+ 	writel(wdata0, i2c->regs + REG_TOK_WDATA0);
+-	writel(wdata0, i2c->regs + REG_TOK_WDATA1);
++	writel(wdata1, i2c->regs + REG_TOK_WDATA1);
+ 
+ 	dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
+ 		wdata0, wdata1, len);
+diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
+index d10bd0c97233..22c4c17cd996 100644
+--- a/drivers/iio/adc/ad_sigma_delta.c
++++ b/drivers/iio/adc/ad_sigma_delta.c
+@@ -177,6 +177,34 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(ad_sd_read_reg);
+ 
++/**
++ * ad_sd_reset() - Reset the serial interface
++ *
++ * @sigma_delta: The sigma delta device
++ * @reset_length: Number of SCLKs with DIN = 1
++ *
++ * Returns 0 on success, an error code otherwise.
++ **/
++int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
++	unsigned int reset_length)
++{
++	uint8_t *buf;
++	unsigned int size;
++	int ret;
++
++	size = DIV_ROUND_UP(reset_length, 8);
++	buf = kcalloc(size, sizeof(*buf), GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	memset(buf, 0xff, size);
++	ret = spi_write(sigma_delta->spi, buf, size);
++	kfree(buf);
++
++	return ret;
++}
++EXPORT_SYMBOL_GPL(ad_sd_reset);
++
+ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+ 	unsigned int mode, unsigned int channel)
+ {
+diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
+index 56008a86b78f..1c626a3cc7f2 100644
+--- a/drivers/iio/adc/axp288_adc.c
++++ b/drivers/iio/adc/axp288_adc.c
+@@ -28,8 +28,6 @@
+ #include <linux/iio/driver.h>
+ 
+ #define AXP288_ADC_EN_MASK		0xF1
+-#define AXP288_ADC_TS_PIN_GPADC		0xF2
+-#define AXP288_ADC_TS_PIN_ON		0xF3
+ 
+ enum axp288_adc_id {
+ 	AXP288_ADC_TS,
+@@ -123,16 +121,6 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
+ 	return IIO_VAL_INT;
+ }
+ 
+-static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
+-				unsigned long address)
+-{
+-	/* channels other than GPADC do not need to switch TS pin */
+-	if (address != AXP288_GP_ADC_H)
+-		return 0;
+-
+-	return regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+-}
+-
+ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
+ 			struct iio_chan_spec const *chan,
+ 			int *val, int *val2, long mask)
+@@ -143,16 +131,7 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
+ 	mutex_lock(&indio_dev->mlock);
+ 	switch (mask) {
+ 	case IIO_CHAN_INFO_RAW:
+-		if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+-					chan->address)) {
+-			dev_err(&indio_dev->dev, "GPADC mode\n");
+-			ret = -EINVAL;
+-			break;
+-		}
+ 		ret = axp288_adc_read_channel(val, chan->address, info->regmap);
+-		if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+-						chan->address))
+-			dev_err(&indio_dev->dev, "TS pin restore\n");
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+@@ -162,15 +141,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
+ 	return ret;
+ }
+ 
+-static int axp288_adc_set_state(struct regmap *regmap)
+-{
+-	/* ADC should be always enabled for internal FG to function */
+-	if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
+-		return -EIO;
+-
+-	return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+-}
+-
+ static const struct iio_info axp288_adc_iio_info = {
+ 	.read_raw = &axp288_adc_read_raw,
+ 	.driver_module = THIS_MODULE,
+@@ -199,7 +169,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
+ 	 * Set ADC to enabled state at all time, including system suspend.
+ 	 * otherwise internal fuel gauge functionality may be affected.
+ 	 */
+-	ret = axp288_adc_set_state(axp20x->regmap);
++	ret = regmap_write(info->regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "unable to enable ADC device\n");
+ 		return ret;
+diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
+index 8d9c9b9215dd..e7c68a71eeb8 100644
+--- a/drivers/iio/adc/mcp320x.c
++++ b/drivers/iio/adc/mcp320x.c
+@@ -294,6 +294,7 @@ static int mcp320x_probe(struct spi_device *spi)
+ 	indio_dev->name = spi_get_device_id(spi)->name;
+ 	indio_dev->modes = INDIO_DIRECT_MODE;
+ 	indio_dev->info = &mcp320x_info;
++	spi_set_drvdata(spi, indio_dev);
+ 
+ 	chip_info = &mcp320x_chip_infos[spi_get_device_id(spi)->driver_data];
+ 	indio_dev->channels = chip_info->channels;
+diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
+index ce93bd8e3f68..a483747cdc9b 100644
+--- a/drivers/iio/adc/xilinx-xadc-core.c
++++ b/drivers/iio/adc/xilinx-xadc-core.c
+@@ -1223,7 +1223,7 @@ static int xadc_probe(struct platform_device *pdev)
+ 
+ 	ret = xadc->ops->setup(pdev, indio_dev, irq);
+ 	if (ret)
+-		goto err_free_samplerate_trigger;
++		goto err_clk_disable_unprepare;
+ 
+ 	ret = request_threaded_irq(irq, xadc->ops->interrupt_handler,
+ 				xadc->ops->threaded_interrupt_handler,
+@@ -1284,6 +1284,8 @@ static int xadc_probe(struct platform_device *pdev)
+ 
+ err_free_irq:
+ 	free_irq(irq, indio_dev);
++err_clk_disable_unprepare:
++	clk_disable_unprepare(xadc->clk);
+ err_free_samplerate_trigger:
+ 	if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ 		iio_trigger_free(xadc->samplerate_trigger);
+@@ -1293,8 +1295,6 @@ err_free_convst_trigger:
+ err_triggered_buffer_cleanup:
+ 	if (xadc->ops->flags & XADC_FLAGS_BUFFERED)
+ 		iio_triggered_buffer_cleanup(indio_dev);
+-err_clk_disable_unprepare:
+-	clk_disable_unprepare(xadc->clk);
+ err_device_free:
+ 	kfree(indio_dev->channels);
+ 
+diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
+index 4df97f650e44..b17aaa27c042 100644
+--- a/drivers/iio/industrialio-core.c
++++ b/drivers/iio/industrialio-core.c
+@@ -203,8 +203,10 @@ static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
+ 	ret = indio_dev->info->debugfs_reg_access(indio_dev,
+ 						  indio_dev->cached_reg_addr,
+ 						  0, &val);
+-	if (ret)
++	if (ret) {
+ 		dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
++		return ret;
++	}
+ 
+ 	len = snprintf(buf, sizeof(buf), "0x%X\n", val);
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
+index f32b4628e991..55d51aa1bf56 100644
+--- a/drivers/infiniband/hw/qib/qib_iba7322.c
++++ b/drivers/infiniband/hw/qib/qib_iba7322.c
+@@ -7096,7 +7096,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start,
+ 	unsigned long flags;
+ 
+ 	while (wait) {
+-		unsigned long shadow;
++		unsigned long shadow = 0;
+ 		int cstart, previ = -1;
+ 
+ 		/*
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+index c65e62da1502..7599fb0b2d98 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
+@@ -977,7 +977,7 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
+ 						   rcu_dereference_protected(neigh->hnext,
+ 									     lockdep_is_held(&priv->lock)));
+ 				/* remove from path/mc list */
+-				list_del(&neigh->list);
++				list_del_init(&neigh->list);
+ 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ 			} else {
+ 				np = &neigh->hnext;
+@@ -1140,7 +1140,7 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
+ 					   rcu_dereference_protected(neigh->hnext,
+ 								     lockdep_is_held(&priv->lock)));
+ 			/* remove from parent list */
+-			list_del(&neigh->list);
++			list_del_init(&neigh->list);
+ 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ 			return;
+ 		} else {
+@@ -1225,7 +1225,7 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
+ 						   rcu_dereference_protected(neigh->hnext,
+ 									     lockdep_is_held(&priv->lock)));
+ 				/* remove from parent list */
+-				list_del(&neigh->list);
++				list_del_init(&neigh->list);
+ 				call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ 			} else {
+ 				np = &neigh->hnext;
+@@ -1267,7 +1267,7 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
+ 					   rcu_dereference_protected(neigh->hnext,
+ 								     lockdep_is_held(&priv->lock)));
+ 			/* remove from path/mc list */
+-			list_del(&neigh->list);
++			list_del_init(&neigh->list);
+ 			call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
+ 		}
+ 	}
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+index 57a34f87dedf..9b47a437d6c9 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+@@ -160,11 +160,11 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
+ out:
+ 	up_write(&ppriv->vlan_rwsem);
+ 
++	rtnl_unlock();
++
+ 	if (result)
+ 		free_netdev(priv->dev);
+ 
+-	rtnl_unlock();
+-
+ 	return result;
+ }
+ 
+@@ -185,7 +185,6 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
+ 	list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
+ 		if (priv->pkey == pkey &&
+ 		    priv->child_type == IPOIB_LEGACY_CHILD) {
+-			unregister_netdevice(priv->dev);
+ 			list_del(&priv->list);
+ 			dev = priv->dev;
+ 			break;
+@@ -193,6 +192,11 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
+ 	}
+ 	up_write(&ppriv->vlan_rwsem);
+ 
++	if (dev) {
++		ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
++		unregister_netdevice(dev);
++	}
++
+ 	rtnl_unlock();
+ 
+ 	if (dev) {
+diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
+index 7c18249d6c8e..8b68a210277b 100644
+--- a/drivers/input/tablet/gtco.c
++++ b/drivers/input/tablet/gtco.c
+@@ -231,13 +231,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
+ 
+ 	/* Walk  this report and pull out the info we need */
+ 	while (i < length) {
+-		prefix = report[i];
+-
+-		/* Skip over prefix */
+-		i++;
++		prefix = report[i++];
+ 
+ 		/* Determine data size and save the data in the proper variable */
+-		size = PREF_SIZE(prefix);
++		size = (1U << PREF_SIZE(prefix)) >> 1;
++		if (i + size > length) {
++			dev_err(ddev,
++				"Not enough data (need %d, have %d)\n",
++				i + size, length);
++			break;
++		}
++
+ 		switch (size) {
+ 		case 1:
+ 			data = report[i];
+@@ -245,8 +249,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
+ 		case 2:
+ 			data16 = get_unaligned_le16(&report[i]);
+ 			break;
+-		case 3:
+-			size = 4;
++		case 4:
+ 			data32 = get_unaligned_le32(&report[i]);
+ 			break;
+ 		}
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index f0fd5352f8ef..354da81a16bd 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -3380,6 +3380,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+ 	mutex_unlock(&domain->api_lock);
+ 
+ 	domain_flush_tlb_pde(domain);
++	domain_flush_complete(domain);
+ 
+ 	return unmap_size;
+ }
+diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
+index c12bb93334ff..2f7dffaae93a 100644
+--- a/drivers/irqchip/irq-crossbar.c
++++ b/drivers/irqchip/irq-crossbar.c
+@@ -193,7 +193,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
+ 
+ static int __init crossbar_of_init(struct device_node *node)
+ {
+-	int i, size, max = 0, reserved = 0, entry;
++	int i, size, reserved = 0;
++	u32 max = 0, entry;
+ 	const __be32 *irqsr;
+ 	int ret = -ENOMEM;
+ 
+diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
+index bf3fbd00a091..64b586458d3d 100644
+--- a/drivers/isdn/i4l/isdn_ppp.c
++++ b/drivers/isdn/i4l/isdn_ppp.c
+@@ -828,7 +828,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
+ 	isdn_net_local *lp;
+ 	struct ippp_struct *is;
+ 	int proto;
+-	unsigned char protobuf[4];
+ 
+ 	is = file->private_data;
+ 
+@@ -842,24 +841,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
+ 	if (!lp)
+ 		printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
+ 	else {
+-		/*
+-		 * Don't reset huptimer for
+-		 * LCP packets. (Echo requests).
+-		 */
+-		if (copy_from_user(protobuf, buf, 4))
+-			return -EFAULT;
+-		proto = PPP_PROTOCOL(protobuf);
+-		if (proto != PPP_LCP)
+-			lp->huptimer = 0;
++		if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
++			unsigned char protobuf[4];
++			/*
++			 * Don't reset huptimer for
++			 * LCP packets. (Echo requests).
++			 */
++			if (copy_from_user(protobuf, buf, 4))
++				return -EFAULT;
++
++			proto = PPP_PROTOCOL(protobuf);
++			if (proto != PPP_LCP)
++				lp->huptimer = 0;
+ 
+-		if (lp->isdn_device < 0 || lp->isdn_channel < 0)
+ 			return 0;
++		}
+ 
+ 		if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
+ 		    lp->dialstate == 0 &&
+ 		    (lp->flags & ISDN_NET_CONNECTED)) {
+ 			unsigned short hl;
+ 			struct sk_buff *skb;
++			unsigned char *cpy_buf;
+ 			/*
+ 			 * we need to reserve enough space in front of
+ 			 * sk_buff. old call to dev_alloc_skb only reserved
+@@ -872,11 +875,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
+ 				return count;
+ 			}
+ 			skb_reserve(skb, hl);
+-			if (copy_from_user(skb_put(skb, count), buf, count))
++			cpy_buf = skb_put(skb, count);
++			if (copy_from_user(cpy_buf, buf, count))
+ 			{
+ 				kfree_skb(skb);
+ 				return -EFAULT;
+ 			}
++
++			/*
++			 * Don't reset huptimer for
++			 * LCP packets. (Echo requests).
++			 */
++			proto = PPP_PROTOCOL(cpy_buf);
++			if (proto != PPP_LCP)
++				lp->huptimer = 0;
++
+ 			if (is->debug & 0x40) {
+ 				printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
+ 				isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 213c3332d7fb..0ba6c358c6e0 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -1502,11 +1502,24 @@ retry_write:
+ 			mbio->bi_private = r10_bio;
+ 
+ 			atomic_inc(&r10_bio->remaining);
++
++			cb = blk_check_plugged(raid10_unplug, mddev,
++					       sizeof(*plug));
++			if (cb)
++				plug = container_of(cb, struct raid10_plug_cb,
++						    cb);
++			else
++				plug = NULL;
+ 			spin_lock_irqsave(&conf->device_lock, flags);
+-			bio_list_add(&conf->pending_bio_list, mbio);
+-			conf->pending_count++;
++			if (plug) {
++				bio_list_add(&plug->pending, mbio);
++				plug->pending_cnt++;
++			} else {
++				bio_list_add(&conf->pending_bio_list, mbio);
++				conf->pending_count++;
++			}
+ 			spin_unlock_irqrestore(&conf->device_lock, flags);
+-			if (!mddev_check_plugged(mddev))
++			if (!plug)
+ 				md_wakeup_thread(mddev->thread);
+ 		}
+ 	}
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 69542a92e4b0..02e6d335f178 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -816,6 +816,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
+ 			spin_unlock(&head->batch_head->batch_lock);
+ 			goto unlock_out;
+ 		}
++		/*
++		 * We must assign batch_head of this stripe within the
++		 * batch_lock, otherwise clear_batch_ready of batch head
++		 * stripe could clear BATCH_READY bit of this stripe and
++		 * this stripe->batch_head doesn't get assigned, which
++		 * could confuse clear_batch_ready for this stripe
++		 */
++		sh->batch_head = head->batch_head;
+ 
+ 		/*
+ 		 * at this point, head's BATCH_READY could be cleared, but we
+@@ -823,8 +831,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
+ 		 */
+ 		list_add(&sh->batch_list, &head->batch_list);
+ 		spin_unlock(&head->batch_head->batch_lock);
+-
+-		sh->batch_head = head->batch_head;
+ 	} else {
+ 		head->batch_head = head;
+ 		sh->batch_head = head->batch_head;
+@@ -4249,7 +4255,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
+ 
+ 		set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+ 					    (1 << STRIPE_PREREAD_ACTIVE) |
+-					    (1 << STRIPE_DEGRADED)),
++					    (1 << STRIPE_DEGRADED) |
++					    (1 << STRIPE_ON_UNPLUG_LIST)),
+ 			      head_sh->state & (1 << STRIPE_INSYNC));
+ 
+ 		sh->check_state = head_sh->check_state;
+diff --git a/drivers/media/pci/ttpci/av7110_hw.c b/drivers/media/pci/ttpci/av7110_hw.c
+index 300bd3c94738..0992bb0e207e 100644
+--- a/drivers/media/pci/ttpci/av7110_hw.c
++++ b/drivers/media/pci/ttpci/av7110_hw.c
+@@ -56,11 +56,11 @@
+    by Nathan Laredo <laredo@gnu.org> */
+ 
+ int av7110_debiwrite(struct av7110 *av7110, u32 config,
+-		     int addr, u32 val, int count)
++		     int addr, u32 val, unsigned int count)
+ {
+ 	struct saa7146_dev *dev = av7110->dev;
+ 
+-	if (count <= 0 || count > 32764) {
++	if (count > 32764) {
+ 		printk("%s: invalid count %d\n", __func__, count);
+ 		return -1;
+ 	}
+@@ -78,12 +78,12 @@ int av7110_debiwrite(struct av7110 *av7110, u32 config,
+ 	return 0;
+ }
+ 
+-u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, int count)
++u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, unsigned int count)
+ {
+ 	struct saa7146_dev *dev = av7110->dev;
+ 	u32 result = 0;
+ 
+-	if (count > 32764 || count <= 0) {
++	if (count > 32764) {
+ 		printk("%s: invalid count %d\n", __func__, count);
+ 		return 0;
+ 	}
+diff --git a/drivers/media/pci/ttpci/av7110_hw.h b/drivers/media/pci/ttpci/av7110_hw.h
+index 1634aba5cb84..ccb148059406 100644
+--- a/drivers/media/pci/ttpci/av7110_hw.h
++++ b/drivers/media/pci/ttpci/av7110_hw.h
+@@ -377,14 +377,14 @@ extern int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
+ 
+ /* DEBI (saa7146 data extension bus interface) access */
+ extern int av7110_debiwrite(struct av7110 *av7110, u32 config,
+-			    int addr, u32 val, int count);
++			    int addr, u32 val, unsigned int count);
+ extern u32 av7110_debiread(struct av7110 *av7110, u32 config,
+-			   int addr, int count);
++			   int addr, unsigned int count);
+ 
+ 
+ /* DEBI during interrupt */
+ /* single word writes */
+-static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
++static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
+ {
+ 	av7110_debiwrite(av7110, config, addr, val, count);
+ }
+@@ -397,7 +397,7 @@ static inline void mwdebi(struct av7110 *av7110, u32 config, int addr,
+ 	av7110_debiwrite(av7110, config, addr, 0, count);
+ }
+ 
+-static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
++static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
+ {
+ 	u32 res;
+ 
+@@ -408,7 +408,7 @@ static inline u32 irdebi(struct av7110 *av7110, u32 config, int addr, u32 val, i
+ }
+ 
+ /* DEBI outside interrupts, only for count <= 4! */
+-static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
++static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
+ {
+ 	unsigned long flags;
+ 
+@@ -417,7 +417,7 @@ static inline void wdebi(struct av7110 *av7110, u32 config, int addr, u32 val, i
+ 	spin_unlock_irqrestore(&av7110->debilock, flags);
+ }
+ 
+-static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, int count)
++static inline u32 rdebi(struct av7110 *av7110, u32 config, int addr, u32 val, unsigned int count)
+ {
+ 	unsigned long flags;
+ 	u32 res;
+diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
+index fd2891c886a3..668eacade975 100644
+--- a/drivers/media/platform/exynos-gsc/gsc-core.c
++++ b/drivers/media/platform/exynos-gsc/gsc-core.c
+@@ -849,9 +849,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
+ 
+ 	if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
+ 		(frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
+-		(frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) ||
+ 		(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
+-		(frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) ||
+ 		(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
+ 		swap(addr->cb, addr->cr);
+ 
+diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
+index bee02e644d62..2900c0d4bd4e 100644
+--- a/drivers/mmc/core/sdio_bus.c
++++ b/drivers/mmc/core/sdio_bus.c
+@@ -259,7 +259,7 @@ static void sdio_release_func(struct device *dev)
+ 	sdio_free_func_cis(func);
+ 
+ 	kfree(func->info);
+-
++	kfree(func->tmpbuf);
+ 	kfree(func);
+ }
+ 
+@@ -274,6 +274,16 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
+ 	if (!func)
+ 		return ERR_PTR(-ENOMEM);
+ 
++	/*
++	 * allocate buffer separately to make sure it's properly aligned for
++	 * DMA usage (incl. 64 bit DMA)
++	 */
++	func->tmpbuf = kmalloc(4, GFP_KERNEL);
++	if (!func->tmpbuf) {
++		kfree(func);
++		return ERR_PTR(-ENOMEM);
++	}
++
+ 	func->card = card;
+ 
+ 	device_initialize(&func->dev);
+diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
+index 411c1af92c62..730a2bac124d 100644
+--- a/drivers/net/can/usb/esd_usb2.c
++++ b/drivers/net/can/usb/esd_usb2.c
+@@ -335,7 +335,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
+ 		}
+ 
+ 		cf->can_id = id & ESD_IDMASK;
+-		cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
++		cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
+ 
+ 		if (id & ESD_EXTID)
+ 			cf->can_id |= CAN_EFF_FLAG;
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index d47ae410ba6c..3b850f390fd7 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -356,6 +356,8 @@ static void gs_usb_recieve_bulk_callback(struct urb *urb)
+ 
+ 		gs_free_tx_context(txc);
+ 
++		atomic_dec(&dev->active_tx_urbs);
++
+ 		netif_wake_queue(netdev);
+ 	}
+ 
+@@ -444,14 +446,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
+ 			  urb->transfer_buffer_length,
+ 			  urb->transfer_buffer,
+ 			  urb->transfer_dma);
+-
+-	atomic_dec(&dev->active_tx_urbs);
+-
+-	if (!netif_device_present(netdev))
+-		return;
+-
+-	if (netif_queue_stopped(netdev))
+-		netif_wake_queue(netdev);
+ }
+ 
+ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
+index 8b17a9065b0b..dc77225227c7 100644
+--- a/drivers/net/can/usb/kvaser_usb.c
++++ b/drivers/net/can/usb/kvaser_usb.c
+@@ -134,6 +134,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
+ #define CMD_RESET_ERROR_COUNTER		49
+ #define CMD_TX_ACKNOWLEDGE		50
+ #define CMD_CAN_ERROR_EVENT		51
++#define CMD_FLUSH_QUEUE_REPLY		68
+ 
+ #define CMD_LEAF_USB_THROTTLE		77
+ #define CMD_LEAF_LOG_MESSAGE		106
+@@ -1298,6 +1299,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
+ 			goto warn;
+ 		break;
+ 
++	case CMD_FLUSH_QUEUE_REPLY:
++		if (dev->family != KVASER_LEAF)
++			goto warn;
++		break;
++
+ 	default:
+ warn:		dev_warn(dev->udev->dev.parent,
+ 			 "Unhandled message (%d)\n", msg->id);
+@@ -1608,7 +1614,8 @@ static int kvaser_usb_close(struct net_device *netdev)
+ 	if (err)
+ 		netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
+ 
+-	if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
++	err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
++	if (err)
+ 		netdev_warn(netdev, "Cannot reset card, error %d\n", err);
+ 
+ 	err = kvaser_usb_stop_chip(priv);
+diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
+index fdb5cdb3cd15..81abe46c9e0d 100644
+--- a/drivers/net/ethernet/ibm/emac/mal.c
++++ b/drivers/net/ethernet/ibm/emac/mal.c
+@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
+ 	unsigned long flags;
+ 
+ 	MAL_DBG2(mal, "poll(%d)" NL, budget);
+- again:
++
+ 	/* Process TX skbs */
+ 	list_for_each(l, &mal->poll_list) {
+ 		struct mal_commac *mc =
+@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
+ 			spin_lock_irqsave(&mal->lock, flags);
+ 			mal_disable_eob_irq(mal);
+ 			spin_unlock_irqrestore(&mal->lock, flags);
+-			goto again;
+ 		}
+ 		mc->ops->poll_tx(mc->dev);
+ 	}
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+index dae2ebb53af7..d6677de375af 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+@@ -88,10 +88,17 @@ void mlx4_en_remove_timestamp(struct mlx4_en_dev *mdev)
+ 	}
+ }
+ 
++#define MLX4_EN_WRAP_AROUND_SEC	10UL
++/* By scheduling the overflow check every 5 seconds, we have a reasonably
++ * good chance we wont miss a wrap around.
++ * TOTO: Use a timer instead of a work queue to increase the guarantee.
++ */
++#define MLX4_EN_OVERFLOW_PERIOD (MLX4_EN_WRAP_AROUND_SEC * HZ / 2)
++
+ void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev)
+ {
+ 	bool timeout = time_is_before_jiffies(mdev->last_overflow_check +
+-					      mdev->overflow_period);
++					      MLX4_EN_OVERFLOW_PERIOD);
+ 	unsigned long flags;
+ 
+ 	if (timeout) {
+@@ -236,7 +243,6 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+ 	.enable		= mlx4_en_phc_enable,
+ };
+ 
+-#define MLX4_EN_WRAP_AROUND_SEC	10ULL
+ 
+ /* This function calculates the max shift that enables the user range
+  * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
+@@ -258,7 +264,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ {
+ 	struct mlx4_dev *dev = mdev->dev;
+ 	unsigned long flags;
+-	u64 ns, zero = 0;
+ 
+ 	rwlock_init(&mdev->clock_lock);
+ 
+@@ -275,13 +280,6 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ 			 ktime_to_ns(ktime_get_real()));
+ 	write_unlock_irqrestore(&mdev->clock_lock, flags);
+ 
+-	/* Calculate period in seconds to call the overflow watchdog - to make
+-	 * sure counter is checked at least once every wrap around.
+-	 */
+-	ns = cyclecounter_cyc2ns(&mdev->cycles, mdev->cycles.mask, zero, &zero);
+-	do_div(ns, NSEC_PER_SEC / 2 / HZ);
+-	mdev->overflow_period = ns;
+-
+ 	/* Configure the PHC */
+ 	mdev->ptp_clock_info = mlx4_en_ptp_clock_info;
+ 	snprintf(mdev->ptp_clock_info.name, 16, "mlx4 ptp");
+diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
+index ced5ecab5aa7..115d301f1f61 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -768,8 +768,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
+ 		return -ENOSYS;
+ 	}
+ 
+-	mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+-
+ 	dev->caps.hca_core_clock = hca_param.hca_core_clock;
+ 
+ 	memset(&dev_cap, 0, sizeof(dev_cap));
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index 909fcf803c54..239ef9dcc2e0 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -408,7 +408,6 @@ struct mlx4_en_dev {
+ 	struct cyclecounter	cycles;
+ 	struct timecounter	clock;
+ 	unsigned long		last_overflow_check;
+-	unsigned long		overflow_period;
+ 	struct ptp_clock	*ptp_clock;
+ 	struct ptp_clock_info	ptp_clock_info;
+ 	struct notifier_block	nb;
+diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
+index 06dbbe5201cb..2a22ac72cd1b 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.h
++++ b/drivers/net/ethernet/renesas/sh_eth.h
+@@ -328,7 +328,7 @@ enum FELIC_MODE_BIT {
+ 	ECMR_DPAD = 0x00200000, ECMR_RZPF = 0x00100000,
+ 	ECMR_ZPF = 0x00080000, ECMR_PFR = 0x00040000, ECMR_RXF = 0x00020000,
+ 	ECMR_TXF = 0x00010000, ECMR_MCT = 0x00002000, ECMR_PRCEF = 0x00001000,
+-	ECMR_PMDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
++	ECMR_MPDE = 0x00000200, ECMR_RE = 0x00000040, ECMR_TE = 0x00000020,
+ 	ECMR_RTM = 0x00000010, ECMR_ILB = 0x00000008, ECMR_ELB = 0x00000004,
+ 	ECMR_DM = 0x00000002, ECMR_PRM = 0x00000001,
+ };
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 2b45d0168c3c..a16b054a4fa6 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2334,8 +2334,10 @@ start_again:
+ 
+ 	hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
+ 			  TEAM_CMD_OPTIONS_GET);
+-	if (!hdr)
++	if (!hdr) {
++		nlmsg_free(skb);
+ 		return -EMSGSIZE;
++	}
+ 
+ 	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
+ 		goto nla_put_failure;
+@@ -2602,8 +2604,10 @@ start_again:
+ 
+ 	hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
+ 			  TEAM_CMD_PORT_LIST_GET);
+-	if (!hdr)
++	if (!hdr) {
++		nlmsg_free(skb);
+ 		return -EMSGSIZE;
++	}
+ 
+ 	if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
+ 		goto nla_put_failure;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 01f5ff84cf6b..12e67e0eb9c9 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1142,11 +1142,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
+ 	switch (tun->flags & TUN_TYPE_MASK) {
+ 	case IFF_TUN:
+ 		if (tun->flags & IFF_NO_PI) {
+-			switch (skb->data[0] & 0xf0) {
+-			case 0x40:
++			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
++
++			switch (ip_version) {
++			case 4:
+ 				pi.proto = htons(ETH_P_IP);
+ 				break;
+-			case 0x60:
++			case 6:
+ 				pi.proto = htons(ETH_P_IPV6);
+ 				break;
+ 			default:
+diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
+index 7ba8d0885f12..2b47cbae524c 100644
+--- a/drivers/net/usb/Kconfig
++++ b/drivers/net/usb/Kconfig
+@@ -351,7 +351,7 @@ config USB_NET_NET1080
+ 	  optionally with LEDs that indicate traffic
+ 
+ config USB_NET_PLUSB
+-	tristate "Prolific PL-2301/2302/25A1 based cables"
++	tristate "Prolific PL-2301/2302/25A1/27A1 based cables"
+ 	# if the handshake/init/reset problems, from original 'plusb',
+ 	# are ever resolved ... then remove "experimental"
+ 	depends on USB_USBNET
+diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
+index 1bfe0fcaccf5..7c02231c1a1b 100644
+--- a/drivers/net/usb/plusb.c
++++ b/drivers/net/usb/plusb.c
+@@ -102,7 +102,7 @@ static int pl_reset(struct usbnet *dev)
+ }
+ 
+ static const struct driver_info	prolific_info = {
+-	.description =	"Prolific PL-2301/PL-2302/PL-25A1",
++	.description =	"Prolific PL-2301/PL-2302/PL-25A1/PL-27A1",
+ 	.flags =	FLAG_POINTTOPOINT | FLAG_NO_SETINT,
+ 		/* some PL-2302 versions seem to fail usb_set_interface() */
+ 	.reset =	pl_reset,
+@@ -139,6 +139,17 @@ static const struct usb_device_id	products [] = {
+ 					 * Host-to-Host Cable
+ 					 */
+ 	.driver_info =  (unsigned long) &prolific_info,
++
++},
++
++/* super speed cables */
++{
++	USB_DEVICE(0x067b, 0x27a1),     /* PL-27A1, no eeprom
++					 * also: goobay Active USB 3.0
++					 * Data Link,
++					 * Unitek Y-3501
++					 */
++	.driver_info =  (unsigned long) &prolific_info,
+ },
+ 
+ 	{ },		// END
+@@ -158,5 +169,5 @@ static struct usb_driver plusb_driver = {
+ module_usb_driver(plusb_driver);
+ 
+ MODULE_AUTHOR("David Brownell");
+-MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
++MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1/27A1 USB Host to Host Link Driver");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+index c304b66af5c6..ab9f55344acd 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+@@ -869,7 +869,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
+ 
+ 	eth_broadcast_addr(params_le->bssid);
+ 	params_le->bss_type = DOT11_BSSTYPE_ANY;
+-	params_le->scan_type = 0;
++	params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
+ 	params_le->channel_num = 0;
+ 	params_le->nprobes = cpu_to_le32(-1);
+ 	params_le->active_time = cpu_to_le32(-1);
+@@ -877,12 +877,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
+ 	params_le->home_time = cpu_to_le32(-1);
+ 	memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
+ 
+-	/* if request is null exit so it will be all channel broadcast scan */
+-	if (!request)
+-		return;
+-
+ 	n_ssids = request->n_ssids;
+ 	n_channels = request->n_channels;
++
+ 	/* Copy channel array if applicable */
+ 	brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
+ 		  n_channels);
+@@ -919,16 +916,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
+ 			ptr += sizeof(ssid_le);
+ 		}
+ 	} else {
+-		brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids);
+-		if ((request->ssids) && request->ssids->ssid_len) {
+-			brcmf_dbg(SCAN, "SSID %s len=%d\n",
+-				  params_le->ssid_le.SSID,
+-				  request->ssids->ssid_len);
+-			params_le->ssid_le.SSID_len =
+-				cpu_to_le32(request->ssids->ssid_len);
+-			memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
+-				request->ssids->ssid_len);
+-		}
++		brcmf_dbg(SCAN, "Performing passive scan\n");
++		params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
+ 	}
+ 	/* Adding mask to channel numbers */
+ 	params_le->channel_num =
+@@ -2826,6 +2815,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
+ 	struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
+ 	s32 status;
+ 	struct brcmf_escan_result_le *escan_result_le;
++	u32 escan_buflen;
+ 	struct brcmf_bss_info_le *bss_info_le;
+ 	struct brcmf_bss_info_le *bss = NULL;
+ 	u32 bi_length;
+@@ -2842,11 +2832,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
+ 
+ 	if (status == BRCMF_E_STATUS_PARTIAL) {
+ 		brcmf_dbg(SCAN, "ESCAN Partial result\n");
++		if (e->datalen < sizeof(*escan_result_le)) {
++			brcmf_err("invalid event data length\n");
++			goto exit;
++		}
+ 		escan_result_le = (struct brcmf_escan_result_le *) data;
+ 		if (!escan_result_le) {
+ 			brcmf_err("Invalid escan result (NULL pointer)\n");
+ 			goto exit;
+ 		}
++		escan_buflen = le32_to_cpu(escan_result_le->buflen);
++		if (escan_buflen > WL_ESCAN_BUF_SIZE ||
++		    escan_buflen > e->datalen ||
++		    escan_buflen < sizeof(*escan_result_le)) {
++			brcmf_err("Invalid escan buffer length: %d\n",
++				  escan_buflen);
++			goto exit;
++		}
+ 		if (le16_to_cpu(escan_result_le->bss_count) != 1) {
+ 			brcmf_err("Invalid bss_count %d: ignoring\n",
+ 				  escan_result_le->bss_count);
+@@ -2863,9 +2865,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
+ 		}
+ 
+ 		bi_length = le32_to_cpu(bss_info_le->length);
+-		if (bi_length != (le32_to_cpu(escan_result_le->buflen) -
+-					WL_ESCAN_RESULTS_FIXED_SIZE)) {
+-			brcmf_err("Invalid bss_info length %d: ignoring\n",
++		if (bi_length != escan_buflen -	WL_ESCAN_RESULTS_FIXED_SIZE) {
++			brcmf_err("Ignoring invalid bss_info length: %d\n",
+ 				  bi_length);
+ 			goto exit;
+ 		}
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+index 374920965108..909f4571e5c7 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
++++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+@@ -41,6 +41,11 @@
+ #define BRCMF_SCAN_PARAMS_COUNT_MASK	0x0000ffff
+ #define BRCMF_SCAN_PARAMS_NSSID_SHIFT	16
+ 
++/* scan type definitions */
++#define BRCMF_SCANTYPE_DEFAULT		0xFF
++#define BRCMF_SCANTYPE_ACTIVE		0
++#define BRCMF_SCANTYPE_PASSIVE		1
++
+ /* primary (ie tx) key */
+ #define BRCMF_PRIMARY_KEY		(1 << 1)
+ #define DOT11_BSSTYPE_ANY		2
+diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+index 99dac9b8a082..c75bfd3f8cb3 100644
+--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
++++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
+ }
+ 
+ static void
+-wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
+-		       u8 len)
++wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
++		       const u8 *dlys, u8 len)
+ {
+ 	u32 t1_offset, t2_offset;
+ 	u8 ctr;
+@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
+ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
+ {
+ 	u16 currband;
+-	s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
+-	s8 *lna1_gain_db = NULL;
+-	s8 *lna1_gain_db_2 = NULL;
+-	s8 *lna2_gain_db = NULL;
+-	s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
+-	s8 *tia_gain_db;
+-	s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
+-	s8 *tia_gainbits;
+-	u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
+-	u16 *rfseq_init_gain;
++	static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
++	const s8 *lna1_gain_db = NULL;
++	const s8 *lna1_gain_db_2 = NULL;
++	const s8 *lna2_gain_db = NULL;
++	static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
++	const s8 *tia_gain_db;
++	static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
++	const s8 *tia_gainbits;
++	static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
++	const u16 *rfseq_init_gain;
+ 	u16 init_gaincode;
+ 	u16 clip1hi_gaincode;
+ 	u16 clip1md_gaincode = 0;
+@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
+ 
+ 			if ((freq <= 5080) || (freq == 5825)) {
+ 
+-				s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
+-				s8 lna1A_gain_db_2_rev7[] = {
+-					11, 17, 22, 25};
+-				s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
++				static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
++				static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
++				static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ 
+ 				crsminu_th = 0x3e;
+ 				lna1_gain_db = lna1A_gain_db_rev7;
+@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
+ 				lna2_gain_db = lna2A_gain_db_rev7;
+ 			} else if ((freq >= 5500) && (freq <= 5700)) {
+ 
+-				s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
+-				s8 lna1A_gain_db_2_rev7[] = {
+-					12, 18, 22, 26};
+-				s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
++				static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
++				static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
++				static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
+ 
+ 				crsminu_th = 0x45;
+ 				clip1md_gaincode_B = 0x14;
+@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
+ 				lna2_gain_db = lna2A_gain_db_rev7;
+ 			} else {
+ 
+-				s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
+-				s8 lna1A_gain_db_2_rev7[] = {
+-					12, 18, 22, 26};
+-				s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
++				static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
++				static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
++				static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ 
+ 				crsminu_th = 0x41;
+ 				lna1_gain_db = lna1A_gain_db_rev7;
+@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
+ 		NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
+ 		NPHY_RFSEQ_CMD_SET_HPF_BW
+ 	};
+-	u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
+-	s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
+-	s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
+-	s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
+-	s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
+-	s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
+-	s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
+-	s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
+-	s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
+-	s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
+-	s8 *lna1_gain_db = NULL;
+-	s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
+-	s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
+-	s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
+-	s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
+-	s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
+-	s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
+-	s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
+-	s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
+-	s8 *lna2_gain_db = NULL;
+-	s8 tiaG_gain_db[] = {
++	static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
++	static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
++	static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
++	static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
++	static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
++	static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
++	static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
++	static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
++	static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
++	static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
++	const s8 *lna1_gain_db = NULL;
++	static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
++	static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
++	static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
++	static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
++	static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
++	static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
++	static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
++	static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
++	const s8 *lna2_gain_db = NULL;
++	static const s8 tiaG_gain_db[] = {
+ 		0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
+-	s8 tiaA_gain_db[] = {
++	static const s8 tiaA_gain_db[] = {
+ 		0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
+-	s8 tiaA_gain_db_rev4[] = {
++	static const s8 tiaA_gain_db_rev4[] = {
+ 		0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
+-	s8 tiaA_gain_db_rev5[] = {
++	static const s8 tiaA_gain_db_rev5[] = {
+ 		0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
+-	s8 tiaA_gain_db_rev6[] = {
++	static const s8 tiaA_gain_db_rev6[] = {
+ 		0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
+-	s8 *tia_gain_db;
+-	s8 tiaG_gainbits[] = {
++	const s8 *tia_gain_db;
++	static const s8 tiaG_gainbits[] = {
+ 		0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
+-	s8 tiaA_gainbits[] = {
++	static const s8 tiaA_gainbits[] = {
+ 		0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
+-	s8 tiaA_gainbits_rev4[] = {
++	static const s8 tiaA_gainbits_rev4[] = {
+ 		0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
+-	s8 tiaA_gainbits_rev5[] = {
++	static const s8 tiaA_gainbits_rev5[] = {
+ 		0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
+-	s8 tiaA_gainbits_rev6[] = {
++	static const s8 tiaA_gainbits_rev6[] = {
+ 		0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
+-	s8 *tia_gainbits;
+-	s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
+-	s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
+-	u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
+-	u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
+-	u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
+-	u16 rfseqG_init_gain_rev5_elna[] = {
++	const s8 *tia_gainbits;
++	static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
++	static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
++	static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
++	static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
++	static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
++	static const u16 rfseqG_init_gain_rev5_elna[] = {
+ 		0x013f, 0x013f, 0x013f, 0x013f };
+-	u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
+-	u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
+-	u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
+-	u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
+-	u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
+-	u16 rfseqA_init_gain_rev4_elna[] = {
++	static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
++	static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
++	static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
++	static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
++	static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
++	static const u16 rfseqA_init_gain_rev4_elna[] = {
+ 		0x314f, 0x314f, 0x314f, 0x314f };
+-	u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
+-	u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
+-	u16 *rfseq_init_gain;
++	static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
++	static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
++	const u16 *rfseq_init_gain;
+ 	u16 initG_gaincode = 0x627e;
+ 	u16 initG_gaincode_rev4 = 0x527e;
+ 	u16 initG_gaincode_rev5 = 0x427e;
+@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
+ 	u16 clip1mdA_gaincode_rev6 = 0x2084;
+ 	u16 clip1md_gaincode = 0;
+ 	u16 clip1loG_gaincode = 0x0074;
+-	u16 clip1loG_gaincode_rev5[] = {
++	static const u16 clip1loG_gaincode_rev5[] = {
+ 		0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
+ 	};
+-	u16 clip1loG_gaincode_rev6[] = {
++	static const u16 clip1loG_gaincode_rev6[] = {
+ 		0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
+ 	};
+ 	u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
+@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
+ 
+ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ {
+-	u8 rfseq_rx2tx_events[] = {
++	static const u8 rfseq_rx2tx_events[] = {
+ 		NPHY_RFSEQ_CMD_NOP,
+ 		NPHY_RFSEQ_CMD_RXG_FBW,
+ 		NPHY_RFSEQ_CMD_TR_SWITCH,
+@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 		NPHY_RFSEQ_CMD_EXT_PA
+ 	};
+ 	u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
+-	u8 rfseq_tx2rx_events[] = {
++	static const u8 rfseq_tx2rx_events[] = {
+ 		NPHY_RFSEQ_CMD_NOP,
+ 		NPHY_RFSEQ_CMD_EXT_PA,
+ 		NPHY_RFSEQ_CMD_TX_GAIN,
+@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 		NPHY_RFSEQ_CMD_RXG_FBW,
+ 		NPHY_RFSEQ_CMD_CLR_HIQ_DIS
+ 	};
+-	u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
+-	u8 rfseq_tx2rx_events_rev3[] = {
++	static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
++	static const u8 rfseq_tx2rx_events_rev3[] = {
+ 		NPHY_REV3_RFSEQ_CMD_EXT_PA,
+ 		NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
+ 		NPHY_REV3_RFSEQ_CMD_TX_GAIN,
+@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 		NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
+ 		NPHY_REV3_RFSEQ_CMD_END
+ 	};
+-	u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
++	static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ 	u8 rfseq_rx2tx_events_rev3[] = {
+ 		NPHY_REV3_RFSEQ_CMD_NOP,
+ 		NPHY_REV3_RFSEQ_CMD_RXG_FBW,
+@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 	};
+ 	u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
+ 
+-	u8 rfseq_rx2tx_events_rev3_ipa[] = {
++	static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
+ 		NPHY_REV3_RFSEQ_CMD_NOP,
+ 		NPHY_REV3_RFSEQ_CMD_RXG_FBW,
+ 		NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
+@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 		NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
+ 		NPHY_REV3_RFSEQ_CMD_END
+ 	};
+-	u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+-	u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
++	static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
++	static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
+ 
+ 	s16 alpha0, alpha1, alpha2;
+ 	s16 beta0, beta1, beta2;
+ 	u32 leg_data_weights, ht_data_weights, nss1_data_weights,
+ 	    stbc_data_weights;
+ 	u8 chan_freq_range = 0;
+-	u16 dac_control = 0x0002;
++	static const u16 dac_control = 0x0002;
+ 	u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
+ 	u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
+ 	u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
+@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 	u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
+ 	u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
+ 	u16 *aux_adc_gain;
+-	u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
+-	u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
++	static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
++	static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
+ 	s32 min_nvar_val = 0x18d;
+ 	s32 min_nvar_offset_6mbps = 20;
+ 	u8 pdetrange;
+@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
+ 	u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
+ 	u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
+ 	u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
+-	u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
+-	u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+-	u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
++	static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
++	static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
++	static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+ 	u16 ipalvlshift_3p3_war_en = 0;
+ 	u16 rccal_bcap_val, rccal_scap_val;
+ 	u16 rccal_tx20_11b_bcap = 0;
+@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
+ 	u16 bbmult;
+ 	u16 tblentry;
+ 
+-	struct nphy_txiqcal_ladder ladder_lo[] = {
++	static const struct nphy_txiqcal_ladder ladder_lo[] = {
+ 		{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
+ 		{25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
+ 		{25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
+ 	};
+ 
+-	struct nphy_txiqcal_ladder ladder_iq[] = {
++	static const struct nphy_txiqcal_ladder ladder_iq[] = {
+ 		{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
+ 		{25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
+ 		{100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
+@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
+ 	u16 cal_gain[2];
+ 	struct nphy_iqcal_params cal_params[2];
+ 	u32 tbl_len;
+-	void *tbl_ptr;
++	const void *tbl_ptr;
+ 	bool ladder_updated[2];
+ 	u8 mphase_cal_lastphase = 0;
+ 	int bcmerror = 0;
+ 	bool phyhang_avoid_state = false;
+ 
+-	u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
++	static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+ 		0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
+ 		0x1902,
+ 		0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
+ 		0x6407
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
++	static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+ 		0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
+ 		0x3200,
+ 		0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
+ 		0x6407
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
++	static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+ 		0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
+ 		0x1202,
+ 		0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
+ 		0x4707
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
++	static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+ 		0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
+ 		0x2300,
+ 		0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
+ 		0x4707
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_startcoefs[] = {
++	static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
+ 		0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 		0x0000
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
++	static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+ 		0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
+ 		0x9123, 0x9264, 0x9086, 0x9245, 0x9056
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_cmds_recal[] = {
++	static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+ 		0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
+ 		0x9101, 0x9253, 0x9053, 0x9234, 0x9034
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
++	static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
+ 		0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 		0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 		0x0000
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
++	static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+ 		0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
+ 		0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
+ 	};
+ 
+-	u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
++	static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+ 		0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
+ 		0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
+ 	};
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index 90e8b662e44d..d82d2b787c7c 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -1766,6 +1766,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+ 	struct iwl_mvm_mc_iter_data *data = _data;
+ 	struct iwl_mvm *mvm = data->mvm;
+ 	struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
++	struct iwl_host_cmd hcmd = {
++		.id = MCAST_FILTER_CMD,
++		.flags = CMD_ASYNC,
++		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
++	};
+ 	int ret, len;
+ 
+ 	/* if we don't have free ports, mcast frames will be dropped */
+@@ -1780,7 +1785,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+ 	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ 	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+ 
+-	ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd);
++	hcmd.len[0] = len;
++	hcmd.data[0] = cmd;
++
++	ret = iwl_mvm_send_cmd(mvm, &hcmd);
+ 	if (ret)
+ 		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
+ }
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index cdbad7d72afa..03097016fd43 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -2883,6 +2883,7 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2,
+ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ {
+ 	struct hwsim_new_radio_params param = { 0 };
++	const char *hwname = NULL;
+ 
+ 	param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+ 	param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
+@@ -2896,8 +2897,14 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ 	if (info->attrs[HWSIM_ATTR_NO_VIF])
+ 		param.no_vif = true;
+ 
+-	if (info->attrs[HWSIM_ATTR_RADIO_NAME])
+-		param.hwname = nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
++	if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
++		hwname = kasprintf(GFP_KERNEL, "%.*s",
++				   nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
++				   (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
++		if (!hwname)
++			return -ENOMEM;
++		param.hwname = hwname;
++	}
+ 
+ 	if (info->attrs[HWSIM_ATTR_USE_CHANCTX])
+ 		param.use_chanctx = true;
+@@ -2925,11 +2932,15 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ 	s64 idx = -1;
+ 	const char *hwname = NULL;
+ 
+-	if (info->attrs[HWSIM_ATTR_RADIO_ID])
++	if (info->attrs[HWSIM_ATTR_RADIO_ID]) {
+ 		idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]);
+-	else if (info->attrs[HWSIM_ATTR_RADIO_NAME])
+-		hwname = (void *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]);
+-	else
++	} else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
++		hwname = kasprintf(GFP_KERNEL, "%.*s",
++				   nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]),
++				   (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]));
++		if (!hwname)
++			return -ENOMEM;
++	} else
+ 		return -EINVAL;
+ 
+ 	spin_lock_bh(&hwsim_radio_lock);
+@@ -2938,7 +2949,8 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ 			if (data->idx != idx)
+ 				continue;
+ 		} else {
+-			if (strcmp(hwname, wiphy_name(data->hw->wiphy)))
++			if (!hwname ||
++			    strcmp(hwname, wiphy_name(data->hw->wiphy)))
+ 				continue;
+ 		}
+ 
+@@ -2946,10 +2958,12 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
+ 		spin_unlock_bh(&hwsim_radio_lock);
+ 		mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
+ 					 info);
++		kfree(hwname);
+ 		return 0;
+ 	}
+ 	spin_unlock_bh(&hwsim_radio_lock);
+ 
++	kfree(hwname);
+ 	return -ENODEV;
+ }
+ 
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 1cfaafc670a8..c8be27dd8e59 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -519,7 +519,7 @@ static ssize_t driver_override_store(struct device *dev,
+ 				     const char *buf, size_t count)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
+-	char *driver_override, *old = pdev->driver_override, *cp;
++	char *driver_override, *old, *cp;
+ 
+ 	/* We need to keep extra room for a newline */
+ 	if (count >= (PAGE_SIZE - 1))
+@@ -533,12 +533,15 @@ static ssize_t driver_override_store(struct device *dev,
+ 	if (cp)
+ 		*cp = '\0';
+ 
++	device_lock(dev);
++	old = pdev->driver_override;
+ 	if (strlen(driver_override)) {
+ 		pdev->driver_override = driver_override;
+ 	} else {
+ 		kfree(driver_override);
+ 		pdev->driver_override = NULL;
+ 	}
++	device_unlock(dev);
+ 
+ 	kfree(old);
+ 
+@@ -549,8 +552,12 @@ static ssize_t driver_override_show(struct device *dev,
+ 				    struct device_attribute *attr, char *buf)
+ {
+ 	struct pci_dev *pdev = to_pci_dev(dev);
++	ssize_t len;
+ 
+-	return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
++	device_lock(dev);
++	len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
++	device_unlock(dev);
++	return len;
+ }
+ static DEVICE_ATTR_RW(driver_override);
+ 
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 6de09147e791..ad42fe8e6d23 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -225,7 +225,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+ 		zfcp_erp_wait(adapter);
+ 		ret = fc_block_scsi_eh(scpnt);
+ 		if (ret) {
+-			zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
++			zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags);
+ 			return ret;
+ 		}
+ 
+@@ -236,7 +236,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+ 		}
+ 	}
+ 	if (!fsf_req) {
+-		zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
++		zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags);
+ 		return FAILED;
+ 	}
+ 
+diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
+index 6ed1caadbc6a..78ff191e9427 100644
+--- a/drivers/scsi/device_handler/scsi_dh_emc.c
++++ b/drivers/scsi/device_handler/scsi_dh_emc.c
+@@ -464,7 +464,7 @@ static int clariion_prep_fn(struct scsi_device *sdev, struct request *req)
+ static int clariion_std_inquiry(struct scsi_device *sdev,
+ 				struct clariion_dh_data *csdev)
+ {
+-	int err;
++	int err = SCSI_DH_OK;
+ 	char *sp_model;
+ 
+ 	err = send_inquiry_cmd(sdev, 0, csdev);
+diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
+index 67d43e35693d..b5a653aed5a4 100644
+--- a/drivers/scsi/scsi_transport_iscsi.c
++++ b/drivers/scsi/scsi_transport_iscsi.c
+@@ -3693,7 +3693,7 @@ iscsi_if_rx(struct sk_buff *skb)
+ 		uint32_t group;
+ 
+ 		nlh = nlmsg_hdr(skb);
+-		if (nlh->nlmsg_len < sizeof(*nlh) ||
++		if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
+ 		    skb->len < nlh->nlmsg_len) {
+ 			break;
+ 		}
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index fbdba7925723..102806a961da 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -842,7 +842,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
+ 
+ 	val = 0;
+ 	list_for_each_entry(srp, &sfp->rq_list, entry) {
+-		if (val > SG_MAX_QUEUE)
++		if (val >= SG_MAX_QUEUE)
+ 			break;
+ 		rinfo[val].req_state = srp->done + 1;
+ 		rinfo[val].problem =
+diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
+index fe56fb6c7d30..2684b8adfa50 100644
+--- a/drivers/staging/iio/adc/ad7192.c
++++ b/drivers/staging/iio/adc/ad7192.c
+@@ -206,11 +206,9 @@ static int ad7192_setup(struct ad7192_state *st,
+ 	struct iio_dev *indio_dev = spi_get_drvdata(st->sd.spi);
+ 	unsigned long long scale_uv;
+ 	int i, ret, id;
+-	u8 ones[6];
+ 
+ 	/* reset the serial interface */
+-	memset(&ones, 0xFF, 6);
+-	ret = spi_write(st->sd.spi, &ones, 6);
++	ret = ad_sd_reset(&st->sd, 48);
+ 	if (ret < 0)
+ 		goto out;
+ 	usleep_range(500, 1000); /* Wait for at least 500us */
+diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
+index 959a14c9dd5d..10c81298eb0a 100644
+--- a/drivers/target/iscsi/iscsi_target_erl0.c
++++ b/drivers/target/iscsi/iscsi_target_erl0.c
+@@ -44,10 +44,8 @@ void iscsit_set_dataout_sequence_values(
+ 	 */
+ 	if (cmd->unsolicited_data) {
+ 		cmd->seq_start_offset = cmd->write_data_done;
+-		cmd->seq_end_offset = (cmd->write_data_done +
+-			((cmd->se_cmd.data_length >
+-			  conn->sess->sess_ops->FirstBurstLength) ?
+-			 conn->sess->sess_ops->FirstBurstLength : cmd->se_cmd.data_length));
++		cmd->seq_end_offset = min(cmd->se_cmd.data_length,
++					conn->sess->sess_ops->FirstBurstLength);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
+index 0655fecf8240..ed7831b0f8b5 100644
+--- a/drivers/tty/goldfish.c
++++ b/drivers/tty/goldfish.c
+@@ -293,7 +293,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
+ 	return 0;
+ 
+ err_tty_register_device_failed:
+-	free_irq(irq, pdev);
++	free_irq(irq, qtty);
+ err_request_irq_failed:
+ 	goldfish_tty_current_line_count--;
+ 	if (goldfish_tty_current_line_count == 0)
+diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
+index 534754440fa8..5d5f25600f04 100644
+--- a/drivers/tty/serial/sunhv.c
++++ b/drivers/tty/serial/sunhv.c
+@@ -397,6 +397,12 @@ static struct uart_driver sunhv_reg = {
+ 
+ static struct uart_port *sunhv_port;
+ 
++void sunhv_migrate_hvcons_irq(int cpu)
++{
++	/* Migrate hvcons irq to param cpu */
++	irq_force_affinity(sunhv_port->irq, cpumask_of(cpu));
++}
++
+ /* Copy 's' into the con_write_page, decoding "\n" into
+  * "\r\n" along the way.  We have to return two lengths
+  * because the caller needs to know how much to advance
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index acab64245923..463d8a3375f5 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1774,6 +1774,9 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
+ 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ 	},
++	{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
++	.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
++	},
+ 
+ 	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
+ 	.driver_info = CLEAR_HALT_CONDITIONS,
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 28085c240160..0b99f913d7f2 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -548,15 +548,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
+ 
+ 		} else if (header->bDescriptorType ==
+ 				USB_DT_INTERFACE_ASSOCIATION) {
++			struct usb_interface_assoc_descriptor *d;
++
++			d = (struct usb_interface_assoc_descriptor *)header;
++			if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
++				dev_warn(ddev,
++					 "config %d has an invalid interface association descriptor of length %d, skipping\n",
++					 cfgno, d->bLength);
++				continue;
++			}
++
+ 			if (iad_num == USB_MAXIADS) {
+ 				dev_warn(ddev, "found more Interface "
+ 					       "Association Descriptors "
+ 					       "than allocated for in "
+ 					       "configuration %d\n", cfgno);
+ 			} else {
+-				config->intf_assoc[iad_num] =
+-					(struct usb_interface_assoc_descriptor
+-					*)header;
++				config->intf_assoc[iad_num] = d;
+ 				iad_num++;
+ 			}
+ 
+@@ -757,7 +765,7 @@ int usb_get_configuration(struct usb_device *dev)
+ 		}
+ 
+ 		if (dev->quirks & USB_QUIRK_DELAY_INIT)
+-			msleep(100);
++			msleep(200);
+ 
+ 		result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
+ 		    bigbuffer, length);
+@@ -857,10 +865,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
+ 	for (i = 0; i < num; i++) {
+ 		buffer += length;
+ 		cap = (struct usb_dev_cap_header *)buffer;
+-		length = cap->bLength;
+ 
+-		if (total_len < length)
++		if (total_len < sizeof(*cap) || total_len < cap->bLength) {
++			dev->bos->desc->bNumDeviceCaps = i;
+ 			break;
++		}
++		length = cap->bLength;
+ 		total_len -= length;
+ 
+ 		if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index fbf5c57b8251..8600144a7aab 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2670,13 +2670,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ 	if (!(portstatus & USB_PORT_STAT_CONNECTION))
+ 		return -ENOTCONN;
+ 
+-	/* bomb out completely if the connection bounced.  A USB 3.0
+-	 * connection may bounce if multiple warm resets were issued,
++	/* Retry if connect change is set but status is still connected.
++	 * A USB 3.0 connection may bounce if multiple warm resets were issued,
+ 	 * but the device may have successfully re-connected. Ignore it.
+ 	 */
+ 	if (!hub_is_superspeed(hub->hdev) &&
+-			(portchange & USB_PORT_STAT_C_CONNECTION))
+-		return -ENOTCONN;
++	    (portchange & USB_PORT_STAT_C_CONNECTION)) {
++		usb_clear_port_feature(hub->hdev, port1,
++				       USB_PORT_FEAT_C_CONNECTION);
++		return -EAGAIN;
++	}
+ 
+ 	if (!(portstatus & USB_PORT_STAT_ENABLE))
+ 		return -EBUSY;
+@@ -4718,7 +4721,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+ 			goto loop;
+ 
+ 		if (udev->quirks & USB_QUIRK_DELAY_INIT)
+-			msleep(1000);
++			msleep(2000);
+ 
+ 		/* consecutive bus-powered hubs aren't reliable; they can
+ 		 * violate the voltage drop budget.  if the new child has
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 82806e311202..a6aaf2f193a4 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Corsair Strafe RGB */
+ 	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
++	/* MIDI keyboard WORLDE MINI */
++	{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
++			USB_QUIRK_CONFIG_INTF_STRINGS },
++
+ 	/* Acer C120 LED Projector */
+ 	{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index d17304ae0b42..a01d90fe37d9 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1867,6 +1867,8 @@ static DEVICE_ATTR_RO(suspended);
+ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
+ {
+ 	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
++	struct usb_gadget_strings	*gstr = cdev->driver->strings[0];
++	struct usb_string		*dev_str = gstr->strings;
+ 
+ 	/* composite_disconnect() must already have been called
+ 	 * by the underlying peripheral controller driver!
+@@ -1886,6 +1888,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
+ 
+ 	composite_dev_cleanup(cdev);
+ 
++	if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer)
++		dev_str[USB_GADGET_MANUFACTURER_IDX].s = "";
++
+ 	kfree(cdev->def_manufacturer);
+ 	kfree(cdev);
+ 	set_gadget_data(gadget, NULL);
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index 11f183c7d348..335137d4e892 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -306,8 +306,6 @@ struct fsg_common {
+ 	struct completion	thread_notifier;
+ 	struct task_struct	*thread_task;
+ 
+-	/* Callback functions. */
+-	const struct fsg_operations	*ops;
+ 	/* Gadget's private data. */
+ 	void			*private_data;
+ 
+@@ -2508,6 +2506,7 @@ static void handle_exception(struct fsg_common *common)
+ static int fsg_main_thread(void *common_)
+ {
+ 	struct fsg_common	*common = common_;
++	int			i;
+ 
+ 	/*
+ 	 * Allow the thread to be killed by a signal, but set the signal mask
+@@ -2569,21 +2568,16 @@ static int fsg_main_thread(void *common_)
+ 	common->thread_task = NULL;
+ 	spin_unlock_irq(&common->lock);
+ 
+-	if (!common->ops || !common->ops->thread_exits
+-	 || common->ops->thread_exits(common) < 0) {
+-		int i;
++	/* Eject media from all LUNs */
+ 
+-		down_write(&common->filesem);
+-		for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
+-			struct fsg_lun *curlun = common->luns[i];
+-			if (!curlun || !fsg_lun_is_open(curlun))
+-				continue;
++	down_write(&common->filesem);
++	for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
++		struct fsg_lun *curlun = common->luns[i];
+ 
++		if (curlun && fsg_lun_is_open(curlun))
+ 			fsg_lun_close(curlun);
+-			curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+-		}
+-		up_write(&common->filesem);
+ 	}
++	up_write(&common->filesem);
+ 
+ 	/* Let fsg_unbind() know the thread has exited */
+ 	complete_and_exit(&common->thread_notifier, 0);
+@@ -2787,13 +2781,6 @@ void fsg_common_remove_luns(struct fsg_common *common)
+ 	_fsg_common_remove_luns(common, ARRAY_SIZE(common->luns));
+ }
+ 
+-void fsg_common_set_ops(struct fsg_common *common,
+-			const struct fsg_operations *ops)
+-{
+-	common->ops = ops;
+-}
+-EXPORT_SYMBOL_GPL(fsg_common_set_ops);
+-
+ void fsg_common_free_buffers(struct fsg_common *common)
+ {
+ 	_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
+diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
+index b6a9918eaefb..dfa2176f43c2 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.h
++++ b/drivers/usb/gadget/function/f_mass_storage.h
+@@ -60,17 +60,6 @@ struct fsg_module_parameters {
+ struct fsg_common;
+ 
+ /* FSF callback functions */
+-struct fsg_operations {
+-	/*
+-	 * Callback function to call when thread exits.  If no
+-	 * callback is set or it returns value lower then zero MSF
+-	 * will force eject all LUNs it operates on (including those
+-	 * marked as non-removable or with prevent_medium_removal flag
+-	 * set).
+-	 */
+-	int (*thread_exits)(struct fsg_common *common);
+-};
+-
+ struct fsg_lun_opts {
+ 	struct config_group group;
+ 	struct fsg_lun *lun;
+@@ -141,9 +130,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun);
+ 
+ void fsg_common_remove_luns(struct fsg_common *common);
+ 
+-void fsg_common_set_ops(struct fsg_common *common,
+-			const struct fsg_operations *ops);
+-
+ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
+ 			  unsigned int id, const char *name,
+ 			  const char **name_pfx);
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index a60f4c9ea292..63d71400dcaf 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -27,7 +27,7 @@
+ #include <linux/mmu_context.h>
+ #include <linux/aio.h>
+ #include <linux/uio.h>
+-
++#include <linux/delay.h>
+ #include <linux/device.h>
+ #include <linux/moduleparam.h>
+ 
+@@ -116,6 +116,7 @@ enum ep0_state {
+ struct dev_data {
+ 	spinlock_t			lock;
+ 	atomic_t			count;
++	int				udc_usage;
+ 	enum ep0_state			state;		/* P: lock */
+ 	struct usb_gadgetfs_event	event [N_EVENT];
+ 	unsigned			ev_next;
+@@ -512,9 +513,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
+ 		INIT_WORK(&priv->work, ep_user_copy_worker);
+ 		schedule_work(&priv->work);
+ 	}
+-	spin_unlock(&epdata->dev->lock);
+ 
+ 	usb_ep_free_request(ep, req);
++	spin_unlock(&epdata->dev->lock);
+ 	put_ep(epdata);
+ }
+ 
+@@ -935,9 +936,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 			struct usb_request	*req = dev->req;
+ 
+ 			if ((retval = setup_req (ep, req, 0)) == 0) {
++				++dev->udc_usage;
+ 				spin_unlock_irq (&dev->lock);
+ 				retval = usb_ep_queue (ep, req, GFP_KERNEL);
+ 				spin_lock_irq (&dev->lock);
++				--dev->udc_usage;
+ 			}
+ 			dev->state = STATE_DEV_CONNECTED;
+ 
+@@ -979,11 +982,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 				retval = -EIO;
+ 			else {
+ 				len = min (len, (size_t)dev->req->actual);
+-// FIXME don't call this with the spinlock held ...
++				++dev->udc_usage;
++				spin_unlock_irq(&dev->lock);
+ 				if (copy_to_user (buf, dev->req->buf, len))
+ 					retval = -EFAULT;
+ 				else
+ 					retval = len;
++				spin_lock_irq(&dev->lock);
++				--dev->udc_usage;
+ 				clean_req (dev->gadget->ep0, dev->req);
+ 				/* NOTE userspace can't yet choose to stall */
+ 			}
+@@ -1127,6 +1133,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 			retval = setup_req (dev->gadget->ep0, dev->req, len);
+ 			if (retval == 0) {
+ 				dev->state = STATE_DEV_CONNECTED;
++				++dev->udc_usage;
+ 				spin_unlock_irq (&dev->lock);
+ 				if (copy_from_user (dev->req->buf, buf, len))
+ 					retval = -EFAULT;
+@@ -1137,10 +1144,10 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 						dev->gadget->ep0, dev->req,
+ 						GFP_KERNEL);
+ 				}
++				spin_lock_irq(&dev->lock);
++				--dev->udc_usage;
+ 				if (retval < 0) {
+-					spin_lock_irq (&dev->lock);
+ 					clean_req (dev->gadget->ep0, dev->req);
+-					spin_unlock_irq (&dev->lock);
+ 				} else
+ 					retval = len;
+ 
+@@ -1237,9 +1244,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
+ 	struct usb_gadget	*gadget = dev->gadget;
+ 	long ret = -ENOTTY;
+ 
+-	if (gadget->ops->ioctl)
++	spin_lock_irq(&dev->lock);
++	if (dev->state == STATE_DEV_OPENED ||
++			dev->state == STATE_DEV_UNBOUND) {
++		/* Not bound to a UDC */
++	} else if (gadget->ops->ioctl) {
++		++dev->udc_usage;
++		spin_unlock_irq(&dev->lock);
++
+ 		ret = gadget->ops->ioctl (gadget, code, value);
+ 
++		spin_lock_irq(&dev->lock);
++		--dev->udc_usage;
++	}
++	spin_unlock_irq(&dev->lock);
++
+ 	return ret;
+ }
+ 
+@@ -1457,10 +1476,12 @@ delegate:
+ 				if (value < 0)
+ 					break;
+ 
++				++dev->udc_usage;
+ 				spin_unlock (&dev->lock);
+ 				value = usb_ep_queue (gadget->ep0, dev->req,
+ 							GFP_KERNEL);
+ 				spin_lock (&dev->lock);
++				--dev->udc_usage;
+ 				if (value < 0) {
+ 					clean_req (gadget->ep0, dev->req);
+ 					break;
+@@ -1484,8 +1505,12 @@ delegate:
+ 		req->length = value;
+ 		req->zero = value < w_length;
+ 
++		++dev->udc_usage;
+ 		spin_unlock (&dev->lock);
+ 		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
++		spin_lock(&dev->lock);
++		--dev->udc_usage;
++		spin_unlock(&dev->lock);
+ 		if (value < 0) {
+ 			DBG (dev, "ep_queue --> %d\n", value);
+ 			req->status = 0;
+@@ -1512,21 +1537,24 @@ static void destroy_ep_files (struct dev_data *dev)
+ 		/* break link to FS */
+ 		ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
+ 		list_del_init (&ep->epfiles);
++		spin_unlock_irq (&dev->lock);
++
+ 		dentry = ep->dentry;
+ 		ep->dentry = NULL;
+ 		parent = d_inode(dentry->d_parent);
+ 
+ 		/* break link to controller */
++		mutex_lock(&ep->lock);
+ 		if (ep->state == STATE_EP_ENABLED)
+ 			(void) usb_ep_disable (ep->ep);
+ 		ep->state = STATE_EP_UNBOUND;
+ 		usb_ep_free_request (ep->ep, ep->req);
+ 		ep->ep = NULL;
++		mutex_unlock(&ep->lock);
++
+ 		wake_up (&ep->wait);
+ 		put_ep (ep);
+ 
+-		spin_unlock_irq (&dev->lock);
+-
+ 		/* break link to dcache */
+ 		mutex_lock (&parent->i_mutex);
+ 		d_delete (dentry);
+@@ -1597,6 +1625,11 @@ gadgetfs_unbind (struct usb_gadget *gadget)
+ 
+ 	spin_lock_irq (&dev->lock);
+ 	dev->state = STATE_DEV_UNBOUND;
++	while (dev->udc_usage > 0) {
++		spin_unlock_irq(&dev->lock);
++		usleep_range(1000, 2000);
++		spin_lock_irq(&dev->lock);
++	}
+ 	spin_unlock_irq (&dev->lock);
+ 
+ 	destroy_ep_files (dev);
+diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
+index 3eb1b9fad5a0..f1347d4492d1 100644
+--- a/drivers/usb/gadget/legacy/mass_storage.c
++++ b/drivers/usb/gadget/legacy/mass_storage.c
+@@ -121,15 +121,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
+ 
+ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
+ 
+-static unsigned long msg_registered;
+-static void msg_cleanup(void);
+-
+-static int msg_thread_exits(struct fsg_common *common)
+-{
+-	msg_cleanup();
+-	return 0;
+-}
+-
+ static int msg_do_config(struct usb_configuration *c)
+ {
+ 	struct fsg_opts *opts;
+@@ -168,9 +159,6 @@ static struct usb_configuration msg_config_driver = {
+ 
+ static int msg_bind(struct usb_composite_dev *cdev)
+ {
+-	static const struct fsg_operations ops = {
+-		.thread_exits = msg_thread_exits,
+-	};
+ 	struct fsg_opts *opts;
+ 	struct fsg_config config;
+ 	int status;
+@@ -187,8 +175,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
+ 	if (status)
+ 		goto fail;
+ 
+-	fsg_common_set_ops(opts->common, &ops);
+-
+ 	status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
+ 	if (status)
+ 		goto fail_set_cdev;
+@@ -213,7 +199,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
+ 	usb_composite_overwrite_options(cdev, &coverwrite);
+ 	dev_info(&cdev->gadget->dev,
+ 		 DRIVER_DESC ", version: " DRIVER_VERSION "\n");
+-	set_bit(0, &msg_registered);
+ 	return 0;
+ 
+ fail_string_ids:
+@@ -258,9 +243,8 @@ static int __init msg_init(void)
+ }
+ module_init(msg_init);
+ 
+-static void msg_cleanup(void)
++static void __exit msg_cleanup(void)
+ {
+-	if (test_and_clear_bit(0, &msg_registered))
+-		usb_composite_unregister(&msg_driver);
++	usb_composite_unregister(&msg_driver);
+ }
+ module_exit(msg_cleanup);
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 09e6ff835e1c..35f730324b63 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -173,6 +173,8 @@ struct dummy_hcd {
+ 
+ 	struct usb_device		*udev;
+ 	struct list_head		urbp_list;
++	struct urbp			*next_frame_urbp;
++
+ 	u32				stream_en_ep;
+ 	u8				num_stream[30 / 2];
+ 
+@@ -189,11 +191,13 @@ struct dummy {
+ 	 */
+ 	struct dummy_ep			ep[DUMMY_ENDPOINTS];
+ 	int				address;
++	int				callback_usage;
+ 	struct usb_gadget		gadget;
+ 	struct usb_gadget_driver	*driver;
+ 	struct dummy_request		fifo_req;
+ 	u8				fifo_buf[FIFO_SIZE];
+ 	u16				devstatus;
++	unsigned			ints_enabled:1;
+ 	unsigned			udc_suspended:1;
+ 	unsigned			pullup:1;
+ 
+@@ -352,6 +356,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
+ static void set_link_state(struct dummy_hcd *dum_hcd)
+ {
+ 	struct dummy *dum = dum_hcd->dum;
++	unsigned int power_bit;
+ 
+ 	dum_hcd->active = 0;
+ 	if (dum->pullup)
+@@ -362,32 +367,43 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
+ 			return;
+ 
+ 	set_link_state_by_speed(dum_hcd);
++	power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ?
++			USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER);
+ 
+ 	if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
+ 	     dum_hcd->active)
+ 		dum_hcd->resuming = 0;
+ 
+ 	/* Currently !connected or in reset */
+-	if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
++	if ((dum_hcd->port_status & power_bit) == 0 ||
+ 			(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
+-		unsigned disconnect = USB_PORT_STAT_CONNECTION &
++		unsigned int disconnect = power_bit &
+ 				dum_hcd->old_status & (~dum_hcd->port_status);
+-		unsigned reset = USB_PORT_STAT_RESET &
++		unsigned int reset = USB_PORT_STAT_RESET &
+ 				(~dum_hcd->old_status) & dum_hcd->port_status;
+ 
+ 		/* Report reset and disconnect events to the driver */
+-		if (dum->driver && (disconnect || reset)) {
++		if (dum->ints_enabled && (disconnect || reset)) {
+ 			stop_activity(dum);
++			++dum->callback_usage;
++			spin_unlock(&dum->lock);
+ 			if (reset)
+ 				usb_gadget_udc_reset(&dum->gadget, dum->driver);
+ 			else
+ 				dum->driver->disconnect(&dum->gadget);
++			spin_lock(&dum->lock);
++			--dum->callback_usage;
+ 		}
+-	} else if (dum_hcd->active != dum_hcd->old_active) {
++	} else if (dum_hcd->active != dum_hcd->old_active &&
++			dum->ints_enabled) {
++		++dum->callback_usage;
++		spin_unlock(&dum->lock);
+ 		if (dum_hcd->old_active && dum->driver->suspend)
+ 			dum->driver->suspend(&dum->gadget);
+ 		else if (!dum_hcd->old_active &&  dum->driver->resume)
+ 			dum->driver->resume(&dum->gadget);
++		spin_lock(&dum->lock);
++		--dum->callback_usage;
+ 	}
+ 
+ 	dum_hcd->old_status = dum_hcd->port_status;
+@@ -903,8 +919,11 @@ static int dummy_udc_start(struct usb_gadget *g,
+ 	 * can't enumerate without help from the driver we're binding.
+ 	 */
+ 
++	spin_lock_irq(&dum->lock);
+ 	dum->devstatus = 0;
+ 	dum->driver = driver;
++	dum->ints_enabled = 1;
++	spin_unlock_irq(&dum->lock);
+ 
+ 	return 0;
+ }
+@@ -915,6 +934,16 @@ static int dummy_udc_stop(struct usb_gadget *g)
+ 	struct dummy		*dum = dum_hcd->dum;
+ 
+ 	spin_lock_irq(&dum->lock);
++	dum->ints_enabled = 0;
++	stop_activity(dum);
++
++	/* emulate synchronize_irq(): wait for callbacks to finish */
++	while (dum->callback_usage > 0) {
++		spin_unlock_irq(&dum->lock);
++		usleep_range(1000, 2000);
++		spin_lock_irq(&dum->lock);
++	}
++
+ 	dum->driver = NULL;
+ 	spin_unlock_irq(&dum->lock);
+ 
+@@ -967,7 +996,12 @@ static int dummy_udc_probe(struct platform_device *pdev)
+ 	memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
+ 	dum->gadget.name = gadget_name;
+ 	dum->gadget.ops = &dummy_ops;
+-	dum->gadget.max_speed = USB_SPEED_SUPER;
++	if (mod_data.is_super_speed)
++		dum->gadget.max_speed = USB_SPEED_SUPER;
++	else if (mod_data.is_high_speed)
++		dum->gadget.max_speed = USB_SPEED_HIGH;
++	else
++		dum->gadget.max_speed = USB_SPEED_FULL;
+ 
+ 	dum->gadget.dev.parent = &pdev->dev;
+ 	init_dummy_udc_hw(dum);
+@@ -1176,6 +1210,8 @@ static int dummy_urb_enqueue(
+ 
+ 	list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
+ 	urb->hcpriv = urbp;
++	if (!dum_hcd->next_frame_urbp)
++		dum_hcd->next_frame_urbp = urbp;
+ 	if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
+ 		urb->error_count = 1;		/* mark as a new urb */
+ 
+@@ -1438,6 +1474,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
+ 	if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
+ 			dum->ss_hcd : dum->hs_hcd)))
+ 		return NULL;
++	if (!dum->ints_enabled)
++		return NULL;
+ 	if ((address & ~USB_DIR_IN) == 0)
+ 		return &dum->ep[0];
+ 	for (i = 1; i < DUMMY_ENDPOINTS; i++) {
+@@ -1679,6 +1717,7 @@ static void dummy_timer(unsigned long _dum_hcd)
+ 		spin_unlock_irqrestore(&dum->lock, flags);
+ 		return;
+ 	}
++	dum_hcd->next_frame_urbp = NULL;
+ 
+ 	for (i = 0; i < DUMMY_ENDPOINTS; i++) {
+ 		if (!ep_name[i])
+@@ -1695,6 +1734,10 @@ restart:
+ 		int			type;
+ 		int			status = -EINPROGRESS;
+ 
++		/* stop when we reach URBs queued after the timer interrupt */
++		if (urbp == dum_hcd->next_frame_urbp)
++			break;
++
+ 		urb = urbp->urb;
+ 		if (urb->unlinked)
+ 			goto return_urb;
+@@ -1774,10 +1817,12 @@ restart:
+ 			 * until setup() returns; no reentrancy issues etc.
+ 			 */
+ 			if (value > 0) {
++				++dum->callback_usage;
+ 				spin_unlock(&dum->lock);
+ 				value = dum->driver->setup(&dum->gadget,
+ 						&setup);
+ 				spin_lock(&dum->lock);
++				--dum->callback_usage;
+ 
+ 				if (value >= 0) {
+ 					/* no delays (max 64KB data stage) */
+@@ -2485,8 +2530,6 @@ static struct hc_driver dummy_hcd = {
+ 	.product_desc =		"Dummy host controller",
+ 	.hcd_priv_size =	sizeof(struct dummy_hcd),
+ 
+-	.flags =		HCD_USB3 | HCD_SHARED,
+-
+ 	.reset =		dummy_setup,
+ 	.start =		dummy_start,
+ 	.stop =			dummy_stop,
+@@ -2515,8 +2558,12 @@ static int dummy_hcd_probe(struct platform_device *pdev)
+ 	dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
+ 	dum = *((void **)dev_get_platdata(&pdev->dev));
+ 
+-	if (!mod_data.is_super_speed)
++	if (mod_data.is_super_speed)
++		dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
++	else if (mod_data.is_high_speed)
+ 		dummy_hcd.flags = HCD_USB2;
++	else
++		dummy_hcd.flags = HCD_USB11;
+ 	hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
+ 	if (!hs_hcd)
+ 		return -ENOMEM;
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 1fc6f478a02c..89e9494c3245 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -969,7 +969,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
+  *
+  * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
+  * It signals to the BIOS that the OS wants control of the host controller,
+- * and then waits 5 seconds for the BIOS to hand over control.
++ * and then waits 1 second for the BIOS to hand over control.
+  * If we timeout, assume the BIOS is broken and take control anyway.
+  */
+ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
+@@ -1015,9 +1015,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
+ 	if (val & XHCI_HC_BIOS_OWNED) {
+ 		writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
+ 
+-		/* Wait for 5 seconds with 10 microsecond polling interval */
++		/* Wait for 1 second with 10 microsecond polling interval */
+ 		timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
+-				0, 5000, 10);
++				0, 1000000, 10);
+ 
+ 		/* Assume a buggy BIOS and take HC ownership anyway */
+ 		if (timeout) {
+@@ -1046,7 +1046,7 @@ hc_init:
+ 	 * operational or runtime registers.  Wait 5 seconds and no more.
+ 	 */
+ 	timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
+-			5000, 10);
++			5000000, 10);
+ 	/* Assume a buggy HC and start HC initialization anyway */
+ 	if (timeout) {
+ 		val = readl(op_reg_base + XHCI_STS_OFFSET);
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 25b1cf0b6848..064bdf78286d 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -293,15 +293,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
+ 						     GFP_NOWAIT);
+ 			if (!command) {
+ 				spin_unlock_irqrestore(&xhci->lock, flags);
+-				xhci_free_command(xhci, cmd);
+-				return -ENOMEM;
++				ret = -ENOMEM;
++				goto cmd_cleanup;
++			}
+ 
++			ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
++						       i, suspend);
++			if (ret) {
++				spin_unlock_irqrestore(&xhci->lock, flags);
++				xhci_free_command(xhci, command);
++				goto cmd_cleanup;
+ 			}
+-			xhci_queue_stop_endpoint(xhci, command, slot_id, i,
+-						 suspend);
+ 		}
+ 	}
+-	xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
++	ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
++	if (ret) {
++		spin_unlock_irqrestore(&xhci->lock, flags);
++		goto cmd_cleanup;
++	}
++
+ 	xhci_ring_cmd_db(xhci);
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
+ 
+@@ -312,6 +322,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
+ 		xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
+ 		ret = -ETIME;
+ 	}
++
++cmd_cleanup:
+ 	xhci_free_command(xhci, cmd);
+ 	return ret;
+ }
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index f33028642e31..566ba291e1be 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1439,7 +1439,7 @@ struct xhci_bus_state {
+ 
+ static inline unsigned int hcd_index(struct usb_hcd *hcd)
+ {
+-	if (hcd->speed == HCD_USB3)
++	if (hcd->speed >= HCD_USB3)
+ 		return 0;
+ 	else
+ 		return 1;
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 9f65d8477372..3ecc79644b7a 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -863,7 +863,7 @@ b_host:
+ 	 */
+ 	if (int_usb & MUSB_INTR_RESET) {
+ 		handled = IRQ_HANDLED;
+-		if (devctl & MUSB_DEVCTL_HM) {
++		if (is_host_active(musb)) {
+ 			/*
+ 			 * When BABBLE happens what we can depends on which
+ 			 * platform MUSB is running, because some platforms
+@@ -873,9 +873,7 @@ b_host:
+ 			 * drop the session.
+ 			 */
+ 			dev_err(musb->controller, "Babble\n");
+-
+-			if (is_host_active(musb))
+-				musb_recover_from_babble(musb);
++			musb_recover_from_babble(musb);
+ 		} else {
+ 			dev_dbg(musb->controller, "BUS RESET as %s\n",
+ 				usb_otg_state_string(musb->xceiv->otg->state));
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index 36e5b5c530bd..8bb9367ada45 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -285,11 +285,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
+ 			      struct usbhs_fifo *fifo)
+ {
+ 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
++	int ret = 0;
+ 
+-	if (!usbhs_pipe_is_dcp(pipe))
+-		usbhsf_fifo_barrier(priv, fifo);
++	if (!usbhs_pipe_is_dcp(pipe)) {
++		/*
++		 * This driver checks the pipe condition first to avoid -EBUSY
++		 * from usbhsf_fifo_barrier() with about 10 msec delay in
++		 * the interrupt handler if the pipe is RX direction and empty.
++		 */
++		if (usbhs_pipe_is_dir_in(pipe))
++			ret = usbhs_pipe_is_accessible(pipe);
++		if (!ret)
++			ret = usbhsf_fifo_barrier(priv, fifo);
++	}
+ 
+-	usbhs_write(priv, fifo->ctr, BCLR);
++	/*
++	 * if non-DCP pipe, this driver should set BCLR when
++	 * usbhsf_fifo_barrier() returns 0.
++	 */
++	if (!ret)
++		usbhs_write(priv, fifo->ctr, BCLR);
+ }
+ 
+ static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
+@@ -843,9 +858,9 @@ static void xfer_work(struct work_struct *work)
+ 		fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
+ 
+ 	usbhs_pipe_running(pipe, 1);
+-	usbhsf_dma_start(pipe, fifo);
+ 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
+ 	dma_async_issue_pending(chan);
++	usbhsf_dma_start(pipe, fifo);
+ 	usbhs_pipe_enable(pipe);
+ 
+ xfer_work_end:
+diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
+index 3806e7014199..2938153fe7b1 100644
+--- a/drivers/usb/serial/console.c
++++ b/drivers/usb/serial/console.c
+@@ -189,6 +189,7 @@ static int usb_console_setup(struct console *co, char *options)
+ 	tty_kref_put(tty);
+  reset_open_count:
+ 	port->port.count = 0;
++	info->port = NULL;
+ 	usb_autopm_put_interface(serial->interface);
+  error_get_interface:
+ 	usb_serial_put(serial);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 31cd99f59a6a..b1be08570088 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -168,6 +168,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
++	{ USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */
+ 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
+ 	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
+diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
+index 39e683096e94..45182c65fa1f 100644
+--- a/drivers/usb/serial/metro-usb.c
++++ b/drivers/usb/serial/metro-usb.c
+@@ -45,6 +45,7 @@ struct metrousb_private {
+ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
+ 	{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) },	/* MS7820 */
+ 	{ }, /* Terminating entry. */
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 3c0552df5e37..02f189e2bda7 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -234,11 +234,16 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum,
+ 
+ 	status = usb_control_msg(usbdev, pipe, request, requesttype, value,
+ 				     index, buf, 1, MOS_WDR_TIMEOUT);
+-	if (status == 1)
++	if (status == 1) {
+ 		*data = *buf;
+-	else if (status < 0)
++	} else {
+ 		dev_err(&usbdev->dev,
+ 			"mos7720: usb_control_msg() failed: %d\n", status);
++		if (status >= 0)
++			status = -EIO;
++		*data = 0;
++	}
++
+ 	kfree(buf);
+ 
+ 	return status;
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index 03813d86a847..f16211a32922 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -285,9 +285,15 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
+ 	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+ 			      MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
+ 			      MOS_WDR_TIMEOUT);
++	if (ret < VENDOR_READ_LENGTH) {
++		if (ret >= 0)
++			ret = -EIO;
++		goto out;
++	}
++
+ 	*val = buf[0];
+ 	dev_dbg(&port->dev, "%s offset is %x, return val %x\n", __func__, reg, *val);
+-
++out:
+ 	kfree(buf);
+ 	return ret;
+ }
+@@ -353,8 +359,13 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
+ 	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+ 			      MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
+ 			      MOS_WDR_TIMEOUT);
++	if (ret < VENDOR_READ_LENGTH) {
++		if (ret >= 0)
++			ret = -EIO;
++		goto out;
++	}
+ 	*val = buf[0];
+-
++out:
+ 	kfree(buf);
+ 	return ret;
+ }
+@@ -1490,10 +1501,10 @@ static int mos7840_tiocmget(struct tty_struct *tty)
+ 		return -ENODEV;
+ 
+ 	status = mos7840_get_uart_reg(port, MODEM_STATUS_REGISTER, &msr);
+-	if (status != 1)
++	if (status < 0)
+ 		return -EIO;
+ 	status = mos7840_get_uart_reg(port, MODEM_CONTROL_REGISTER, &mcr);
+-	if (status != 1)
++	if (status < 0)
+ 		return -EIO;
+ 	result = ((mcr & MCR_DTR) ? TIOCM_DTR : 0)
+ 	    | ((mcr & MCR_RTS) ? TIOCM_RTS : 0)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index a0fbc4e5a272..dc489fb4261b 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -517,6 +517,7 @@ static void option_instat_callback(struct urb *urb);
+ 
+ /* TP-LINK Incorporated products */
+ #define TPLINK_VENDOR_ID			0x2357
++#define TPLINK_PRODUCT_LTE			0x000D
+ #define TPLINK_PRODUCT_MA180			0x0201
+ 
+ /* Changhong products */
+@@ -2008,6 +2009,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
+ 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
+ 	{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) },	/* TP-Link LTE Module */
+ 	{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000),					/* TP-Link MA260 */
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 23c303b2a3a2..996dc09b00b8 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -172,6 +172,10 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x413c, 0x81b3)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+ 	{DEVICE_SWI(0x413c, 0x81b5)},	/* Dell Wireless 5811e QDL */
+ 	{DEVICE_SWI(0x413c, 0x81b6)},	/* Dell Wireless 5811e QDL */
++	{DEVICE_SWI(0x413c, 0x81cf)},   /* Dell Wireless 5819 */
++	{DEVICE_SWI(0x413c, 0x81d0)},   /* Dell Wireless 5819 */
++	{DEVICE_SWI(0x413c, 0x81d1)},   /* Dell Wireless 5818 */
++	{DEVICE_SWI(0x413c, 0x81d2)},   /* Dell Wireless 5818 */
+ 
+ 	/* Huawei devices */
+ 	{DEVICE_HWI(0x03f0, 0x581d)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
+diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
+index f58caa9e6a27..a155cd02bce2 100644
+--- a/drivers/usb/storage/uas-detect.h
++++ b/drivers/usb/storage/uas-detect.h
+@@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf)
+ 		intf->desc.bInterfaceProtocol == USB_PR_UAS);
+ }
+ 
+-static int uas_find_uas_alt_setting(struct usb_interface *intf)
++static struct usb_host_interface *uas_find_uas_alt_setting(
++		struct usb_interface *intf)
+ {
+ 	int i;
+ 
+@@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf)
+ 		struct usb_host_interface *alt = &intf->altsetting[i];
+ 
+ 		if (uas_is_interface(alt))
+-			return alt->desc.bAlternateSetting;
++			return alt;
+ 	}
+ 
+-	return -ENODEV;
++	return NULL;
+ }
+ 
+ static int uas_find_endpoints(struct usb_host_interface *alt,
+@@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf,
+ 	struct usb_device *udev = interface_to_usbdev(intf);
+ 	struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ 	unsigned long flags = id->driver_info;
+-	int r, alt;
+-
++	struct usb_host_interface *alt;
++	int r;
+ 
+ 	alt = uas_find_uas_alt_setting(intf);
+-	if (alt < 0)
++	if (!alt)
+ 		return 0;
+ 
+-	r = uas_find_endpoints(&intf->altsetting[alt], eps);
++	r = uas_find_endpoints(alt, eps);
+ 	if (r < 0)
+ 		return 0;
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 546bb2b1ffc2..f58ae4a84c11 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -851,14 +851,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids);
+ static int uas_switch_interface(struct usb_device *udev,
+ 				struct usb_interface *intf)
+ {
+-	int alt;
++	struct usb_host_interface *alt;
+ 
+ 	alt = uas_find_uas_alt_setting(intf);
+-	if (alt < 0)
+-		return alt;
++	if (!alt)
++		return -ENODEV;
+ 
+-	return usb_set_interface(udev,
+-			intf->altsetting[0].desc.bInterfaceNumber, alt);
++	return usb_set_interface(udev, alt->desc.bInterfaceNumber,
++			alt->desc.bAlternateSetting);
+ }
+ 
+ static int uas_configure_endpoints(struct uas_dev_info *devinfo)
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 2f40b6150fdc..8e80da125b25 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1379,6 +1379,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_SANE_SENSE ),
+ 
++/* Reported by Kris Lindgren <kris.lindgren@gmail.com> */
++UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
++		"Seagate",
++		"External",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_WP_DETECT ),
++
+ UNUSUAL_DEV(  0x0d49, 0x7310, 0x0000, 0x9999,
+ 		"Maxtor",
+ 		"USB to SATA",
+diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
+index e75bbe5a10cd..1212b4b3c5a9 100644
+--- a/drivers/uwb/hwa-rc.c
++++ b/drivers/uwb/hwa-rc.c
+@@ -827,6 +827,8 @@ static int hwarc_probe(struct usb_interface *iface,
+ 
+ 	if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ 		return -ENODEV;
++	if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
++		return -ENODEV;
+ 
+ 	result = -ENOMEM;
+ 	uwb_rc = uwb_rc_alloc();
+diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
+index bdcb13cc1d54..5c9828370217 100644
+--- a/drivers/uwb/uwbd.c
++++ b/drivers/uwb/uwbd.c
+@@ -303,18 +303,22 @@ static int uwbd(void *param)
+ /** Start the UWB daemon */
+ void uwbd_start(struct uwb_rc *rc)
+ {
+-	rc->uwbd.task = kthread_run(uwbd, rc, "uwbd");
+-	if (rc->uwbd.task == NULL)
++	struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
++	if (IS_ERR(task)) {
++		rc->uwbd.task = NULL;
+ 		printk(KERN_ERR "UWB: Cannot start management daemon; "
+ 		       "UWB won't work\n");
+-	else
++	} else {
++		rc->uwbd.task = task;
+ 		rc->uwbd.pid = rc->uwbd.task->pid;
++	}
+ }
+ 
+ /* Stop the UWB daemon and free any unprocessed events */
+ void uwbd_stop(struct uwb_rc *rc)
+ {
+-	kthread_stop(rc->uwbd.task);
++	if (rc->uwbd.task)
++		kthread_stop(rc->uwbd.task);
+ 	uwbd_flush(rc);
+ }
+ 
+diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
+index 8789e487b96e..f84b5225aa5f 100644
+--- a/drivers/video/fbdev/aty/atyfb_base.c
++++ b/drivers/video/fbdev/aty/atyfb_base.c
+@@ -1852,7 +1852,7 @@ static int atyfb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
+ #if defined(DEBUG) && defined(CONFIG_FB_ATY_CT)
+ 	case ATYIO_CLKR:
+ 		if (M64_HAS(INTEGRATED)) {
+-			struct atyclk clk;
++			struct atyclk clk = { 0 };
+ 			union aty_pll *pll = &par->pll;
+ 			u32 dsp_config = pll->ct.dsp_config;
+ 			u32 dsp_on_off = pll->ct.dsp_on_off;
+diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
+index 5bf931ce1353..978098f71761 100644
+--- a/drivers/watchdog/kempld_wdt.c
++++ b/drivers/watchdog/kempld_wdt.c
+@@ -140,12 +140,19 @@ static int kempld_wdt_set_stage_timeout(struct kempld_wdt_data *wdt_data,
+ 					unsigned int timeout)
+ {
+ 	struct kempld_device_data *pld = wdt_data->pld;
+-	u32 prescaler = kempld_prescaler[PRESCALER_21];
++	u32 prescaler;
+ 	u64 stage_timeout64;
+ 	u32 stage_timeout;
+ 	u32 remainder;
+ 	u8 stage_cfg;
+ 
++#if GCC_VERSION < 40400
++	/* work around a bug compiling do_div() */
++	prescaler = READ_ONCE(kempld_prescaler[PRESCALER_21]);
++#else
++	prescaler = kempld_prescaler[PRESCALER_21];
++#endif
++
+ 	if (!stage)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index e227eb09b1a0..cea37ee4c615 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -827,6 +827,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 	mutex_unlock(&priv->lock);
+ 
+ 	if (use_ptemod) {
++		map->pages_vm_start = vma->vm_start;
+ 		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
+ 					  vma->vm_end - vma->vm_start,
+ 					  find_grant_ptes, map);
+@@ -864,7 +865,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 					    set_grant_ptes_as_special, NULL);
+ 		}
+ #endif
+-		map->pages_vm_start = vma->vm_start;
+ 	}
+ 
+ 	return 0;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index f1feb3123c9c..2b115c309e1c 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3948,6 +3948,10 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
+ 		ret = PTR_ERR(new_root);
+ 		goto out;
+ 	}
++	if (!is_fstree(new_root->objectid)) {
++		ret = -ENOENT;
++		goto out;
++	}
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path) {
+diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
+index b2c1ab7cae78..5fe5314270fd 100644
+--- a/fs/btrfs/send.c
++++ b/fs/btrfs/send.c
+@@ -1641,6 +1641,9 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
+ {
+ 	int ret;
+ 
++	if (ino == BTRFS_FIRST_FREE_OBJECTID)
++		return 1;
++
+ 	ret = get_cur_inode_state(sctx, ino, gen);
+ 	if (ret < 0)
+ 		goto out;
+@@ -1826,7 +1829,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
+ 	 * not delted and then re-created, if it was then we have no overwrite
+ 	 * and we can just unlink this entry.
+ 	 */
+-	if (sctx->parent_root) {
++	if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
+ 		ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
+ 				     NULL, NULL, NULL);
+ 		if (ret < 0 && ret != -ENOENT)
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index be5ea6af8366..2380d6916ea1 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1791,6 +1791,7 @@ static int try_flush_caps(struct inode *inode, unsigned *flush_tid)
+ retry:
+ 	spin_lock(&ci->i_ceph_lock);
+ 	if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
++		spin_unlock(&ci->i_ceph_lock);
+ 		dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
+ 		goto out;
+ 	}
+@@ -1808,8 +1809,10 @@ retry:
+ 			mutex_lock(&session->s_mutex);
+ 			goto retry;
+ 		}
+-		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
++		if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
++			spin_unlock(&ci->i_ceph_lock);
+ 			goto out;
++		}
+ 
+ 		flushing = __mark_caps_flushing(inode, session);
+ 
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 4f3bf0f527f6..c1105075e3e5 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1803,13 +1803,18 @@ static int build_dentry_path(struct dentry *dentry,
+ 			     int *pfreepath)
+ {
+ 	char *path;
++	struct inode *dir;
+ 
+-	if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) {
+-		*pino = ceph_ino(d_inode(dentry->d_parent));
++	rcu_read_lock();
++	dir = d_inode_rcu(dentry->d_parent);
++	if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
++		*pino = ceph_ino(dir);
++		rcu_read_unlock();
+ 		*ppath = dentry->d_name.name;
+ 		*ppathlen = dentry->d_name.len;
+ 		return 0;
+ 	}
++	rcu_read_unlock();
+ 	path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
+ 	if (IS_ERR(path))
+ 		return PTR_ERR(path);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index cb3406815330..a2c100aed4b0 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3957,6 +3957,14 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ 	cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
+ 		 server->sec_mode, server->capabilities, server->timeAdj);
+ 
++	if (ses->auth_key.response) {
++		cifs_dbg(VFS, "Free previous auth_key.response = %p\n",
++			 ses->auth_key.response);
++		kfree(ses->auth_key.response);
++		ses->auth_key.response = NULL;
++		ses->auth_key.len = 0;
++	}
++
+ 	if (server->ops->sess_setup)
+ 		rc = server->ops->sess_setup(xid, ses, nls_info);
+ 
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index fcf986aa2abe..47e04038a846 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -224,6 +224,13 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
+ 	if (backup_cred(cifs_sb))
+ 		create_options |= CREATE_OPEN_BACKUP_INTENT;
+ 
++	/* O_SYNC also has bit for O_DSYNC so following check picks up either */
++	if (f_flags & O_SYNC)
++		create_options |= CREATE_WRITE_THROUGH;
++
++	if (f_flags & O_DIRECT)
++		create_options |= CREATE_NO_BUFFER;
++
+ 	oparms.tcon = tcon;
+ 	oparms.cifs_sb = cifs_sb;
+ 	oparms.desired_access = desired_access;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 2f6f164c83ab..0cf4a76e8e94 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -453,15 +453,22 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
+ 
+ 	/*
+ 	 * validation ioctl must be signed, so no point sending this if we
+-	 * can not sign it.  We could eventually change this to selectively
++	 * can not sign it (ie are not known user).  Even if signing is not
++	 * required (enabled but not negotiated), in those cases we selectively
+ 	 * sign just this, the first and only signed request on a connection.
+-	 * This is good enough for now since a user who wants better security
+-	 * would also enable signing on the mount. Having validation of
+-	 * negotiate info for signed connections helps reduce attack vectors
++	 * Having validation of negotiate info  helps reduce attack vectors.
+ 	 */
+-	if (tcon->ses->server->sign == false)
++	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
+ 		return 0; /* validation requires signing */
+ 
++	if (tcon->ses->user_name == NULL) {
++		cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
++		return 0; /* validation requires signing */
++	}
++
++	if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
++		cifs_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
++
+ 	vneg_inbuf.Capabilities =
+ 			cpu_to_le32(tcon->ses->server->vals->req_capabilities);
+ 	memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index d83a021a659f..7484537424a7 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -813,7 +813,8 @@ out:
+ 	 */
+ 	if (sdio->boundary) {
+ 		ret = dio_send_cur_page(dio, sdio, map_bh);
+-		dio_bio_submit(dio, sdio);
++		if (sdio->bio)
++			dio_bio_submit(dio, sdio);
+ 		page_cache_release(sdio->cur_page);
+ 		sdio->cur_page = NULL;
+ 	}
+diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
+index c3fe1e323951..ea2ef0eac0c4 100644
+--- a/fs/ext4/acl.c
++++ b/fs/ext4/acl.c
+@@ -195,13 +195,6 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
+ 	switch (type) {
+ 	case ACL_TYPE_ACCESS:
+ 		name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
+-		if (acl) {
+-			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+-			if (error)
+-				return error;
+-			inode->i_ctime = ext4_current_time(inode);
+-			ext4_mark_inode_dirty(handle, inode);
+-		}
+ 		break;
+ 
+ 	case ACL_TYPE_DEFAULT:
+@@ -234,6 +227,8 @@ ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ {
+ 	handle_t *handle;
+ 	int error, retries = 0;
++	umode_t mode = inode->i_mode;
++	int update_mode = 0;
+ 
+ retry:
+ 	handle = ext4_journal_start(inode, EXT4_HT_XATTR,
+@@ -241,7 +236,20 @@ retry:
+ 	if (IS_ERR(handle))
+ 		return PTR_ERR(handle);
+ 
++	if ((type == ACL_TYPE_ACCESS) && acl) {
++		error = posix_acl_update_mode(inode, &mode, &acl);
++		if (error)
++			goto out_stop;
++		update_mode = 1;
++	}
++
+ 	error = __ext4_set_acl(handle, inode, type, acl);
++	if (!error && update_mode) {
++		inode->i_mode = mode;
++		inode->i_ctime = ext4_current_time(inode);
++		ext4_mark_inode_dirty(handle, inode);
++	}
++out_stop:
+ 	ext4_journal_stop(handle);
+ 	if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+ 		goto retry;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 2ab9fd88a7c3..1cc4bfa49823 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1896,15 +1896,29 @@ static int ext4_writepage(struct page *page,
+ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
+ {
+ 	int len;
+-	loff_t size = i_size_read(mpd->inode);
++	loff_t size;
+ 	int err;
+ 
+ 	BUG_ON(page->index != mpd->first_page);
+-	if (page->index == size >> PAGE_CACHE_SHIFT)
+-		len = size & ~PAGE_CACHE_MASK;
+-	else
+-		len = PAGE_CACHE_SIZE;
+ 	clear_page_dirty_for_io(page);
++	/*
++	 * We have to be very careful here!  Nothing protects writeback path
++	 * against i_size changes and the page can be writeably mapped into
++	 * page tables. So an application can be growing i_size and writing
++	 * data through mmap while writeback runs. clear_page_dirty_for_io()
++	 * write-protects our page in page tables and the page cannot get
++	 * written to again until we release page lock. So only after
++	 * clear_page_dirty_for_io() we are safe to sample i_size for
++	 * ext4_bio_write_page() to zero-out tail of the written page. We rely
++	 * on the barrier provided by TestClearPageDirty in
++	 * clear_page_dirty_for_io() to make sure i_size is really sampled only
++	 * after page tables are updated.
++	 */
++	size = i_size_read(mpd->inode);
++	if (page->index == size >> PAGE_SHIFT)
++		len = size & ~PAGE_MASK;
++	else
++		len = PAGE_SIZE;
+ 	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
+ 	if (!err)
+ 		mpd->wbc->nr_to_write--;
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 1e1aae669fa8..9242087a73c2 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -1590,7 +1590,12 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
+ 			goto fail;
+ 	}
+ repeat:
+-	page = grab_cache_page_write_begin(mapping, index, flags);
++	/*
++	 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
++	 * wait_for_stable_page. Will wait that below with our IO control.
++	 */
++	page = pagecache_get_page(mapping, index,
++				FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
+ 	if (!page) {
+ 		err = -ENOMEM;
+ 		goto fail;
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 88b09a33d117..b795117b3643 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1295,7 +1295,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
+ 			*/
+ 			over = !dir_emit(ctx, dirent->name, dirent->namelen,
+ 				       dirent->ino, dirent->type);
+-			ctx->pos = dirent->off;
++			if (!over)
++				ctx->pos = dirent->off;
+ 		}
+ 
+ 		buf += reclen;
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 4a4ac9386d4d..5c22d61869a4 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -531,6 +531,7 @@ void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
+ 	init_waitqueue_head(&res->l_event);
+ 	INIT_LIST_HEAD(&res->l_blocked_list);
+ 	INIT_LIST_HEAD(&res->l_mask_waiters);
++	INIT_LIST_HEAD(&res->l_holders);
+ }
+ 
+ void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
+@@ -748,6 +749,50 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
+ 	res->l_flags = 0UL;
+ }
+ 
++/*
++ * Keep a list of processes who have interest in a lockres.
++ * Note: this is now only uesed for check recursive cluster locking.
++ */
++static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
++				   struct ocfs2_lock_holder *oh)
++{
++	INIT_LIST_HEAD(&oh->oh_list);
++	oh->oh_owner_pid = get_pid(task_pid(current));
++
++	spin_lock(&lockres->l_lock);
++	list_add_tail(&oh->oh_list, &lockres->l_holders);
++	spin_unlock(&lockres->l_lock);
++}
++
++static inline void ocfs2_remove_holder(struct ocfs2_lock_res *lockres,
++				       struct ocfs2_lock_holder *oh)
++{
++	spin_lock(&lockres->l_lock);
++	list_del(&oh->oh_list);
++	spin_unlock(&lockres->l_lock);
++
++	put_pid(oh->oh_owner_pid);
++}
++
++static inline int ocfs2_is_locked_by_me(struct ocfs2_lock_res *lockres)
++{
++	struct ocfs2_lock_holder *oh;
++	struct pid *pid;
++
++	/* look in the list of holders for one with the current task as owner */
++	spin_lock(&lockres->l_lock);
++	pid = task_pid(current);
++	list_for_each_entry(oh, &lockres->l_holders, oh_list) {
++		if (oh->oh_owner_pid == pid) {
++			spin_unlock(&lockres->l_lock);
++			return 1;
++		}
++	}
++	spin_unlock(&lockres->l_lock);
++
++	return 0;
++}
++
+ static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
+ 				     int level)
+ {
+@@ -2343,8 +2388,9 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
+ 		goto getbh;
+ 	}
+ 
+-	if (ocfs2_mount_local(osb))
+-		goto local;
++	if ((arg_flags & OCFS2_META_LOCK_GETBH) ||
++	    ocfs2_mount_local(osb))
++		goto update;
+ 
+ 	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
+ 		ocfs2_wait_for_recovery(osb);
+@@ -2373,7 +2419,7 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
+ 	if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
+ 		ocfs2_wait_for_recovery(osb);
+ 
+-local:
++update:
+ 	/*
+ 	 * We only see this flag if we're being called from
+ 	 * ocfs2_read_locked_inode(). It means we're locking an inode
+@@ -2515,6 +2561,59 @@ void ocfs2_inode_unlock(struct inode *inode,
+ 		ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
+ }
+ 
++/*
++ * This _tracker variantes are introduced to deal with the recursive cluster
++ * locking issue. The idea is to keep track of a lock holder on the stack of
++ * the current process. If there's a lock holder on the stack, we know the
++ * task context is already protected by cluster locking. Currently, they're
++ * used in some VFS entry routines.
++ *
++ * return < 0 on error, return == 0 if there's no lock holder on the stack
++ * before this call, return == 1 if this call would be a recursive locking.
++ */
++int ocfs2_inode_lock_tracker(struct inode *inode,
++			     struct buffer_head **ret_bh,
++			     int ex,
++			     struct ocfs2_lock_holder *oh)
++{
++	int status;
++	int arg_flags = 0, has_locked;
++	struct ocfs2_lock_res *lockres;
++
++	lockres = &OCFS2_I(inode)->ip_inode_lockres;
++	has_locked = ocfs2_is_locked_by_me(lockres);
++	/* Just get buffer head if the cluster lock has been taken */
++	if (has_locked)
++		arg_flags = OCFS2_META_LOCK_GETBH;
++
++	if (likely(!has_locked || ret_bh)) {
++		status = ocfs2_inode_lock_full(inode, ret_bh, ex, arg_flags);
++		if (status < 0) {
++			if (status != -ENOENT)
++				mlog_errno(status);
++			return status;
++		}
++	}
++	if (!has_locked)
++		ocfs2_add_holder(lockres, oh);
++
++	return has_locked;
++}
++
++void ocfs2_inode_unlock_tracker(struct inode *inode,
++				int ex,
++				struct ocfs2_lock_holder *oh,
++				int had_lock)
++{
++	struct ocfs2_lock_res *lockres;
++
++	lockres = &OCFS2_I(inode)->ip_inode_lockres;
++	if (!had_lock) {
++		ocfs2_remove_holder(lockres, oh);
++		ocfs2_inode_unlock(inode, ex);
++	}
++}
++
+ int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
+ {
+ 	struct ocfs2_lock_res *lockres;
+diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h
+index d293a22c32c5..a7fc18ba0dc1 100644
+--- a/fs/ocfs2/dlmglue.h
++++ b/fs/ocfs2/dlmglue.h
+@@ -70,6 +70,11 @@ struct ocfs2_orphan_scan_lvb {
+ 	__be32	lvb_os_seqno;
+ };
+ 
++struct ocfs2_lock_holder {
++	struct list_head oh_list;
++	struct pid *oh_owner_pid;
++};
++
+ /* ocfs2_inode_lock_full() 'arg_flags' flags */
+ /* don't wait on recovery. */
+ #define OCFS2_META_LOCK_RECOVERY	(0x01)
+@@ -77,6 +82,8 @@ struct ocfs2_orphan_scan_lvb {
+ #define OCFS2_META_LOCK_NOQUEUE		(0x02)
+ /* don't block waiting for the downconvert thread, instead return -EAGAIN */
+ #define OCFS2_LOCK_NONBLOCK		(0x04)
++/* just get back disk inode bh if we've got cluster lock. */
++#define OCFS2_META_LOCK_GETBH		(0x08)
+ 
+ /* Locking subclasses of inode cluster lock */
+ enum {
+@@ -170,4 +177,15 @@ void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug);
+ 
+ /* To set the locking protocol on module initialization */
+ void ocfs2_set_locking_protocol(void);
++
++/* The _tracker pair is used to avoid cluster recursive locking */
++int ocfs2_inode_lock_tracker(struct inode *inode,
++			     struct buffer_head **ret_bh,
++			     int ex,
++			     struct ocfs2_lock_holder *oh);
++void ocfs2_inode_unlock_tracker(struct inode *inode,
++				int ex,
++				struct ocfs2_lock_holder *oh,
++				int had_lock);
++
+ #endif	/* DLMGLUE_H */
+diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
+index 460c6c37e683..2424adbc4fef 100644
+--- a/fs/ocfs2/ocfs2.h
++++ b/fs/ocfs2/ocfs2.h
+@@ -172,6 +172,7 @@ struct ocfs2_lock_res {
+ 
+ 	struct list_head         l_blocked_list;
+ 	struct list_head         l_mask_waiters;
++	struct list_head	 l_holders;
+ 
+ 	unsigned long		 l_flags;
+ 	char                     l_name[OCFS2_LOCK_ID_MAX_LEN];
+diff --git a/fs/read_write.c b/fs/read_write.c
+index 819ef3faf1bb..bfd1a5dddf6e 100644
+--- a/fs/read_write.c
++++ b/fs/read_write.c
+@@ -112,7 +112,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
+ 		 * In the generic case the entire file is data, so as long as
+ 		 * offset isn't at the end of the file then the offset is data.
+ 		 */
+-		if (offset >= eof)
++		if ((unsigned long long)offset >= eof)
+ 			return -ENXIO;
+ 		break;
+ 	case SEEK_HOLE:
+@@ -120,7 +120,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int whence,
+ 		 * There is a virtual hole at the end of the file, so as long as
+ 		 * offset isn't i_size or larger, return i_size.
+ 		 */
+-		if (offset >= eof)
++		if ((unsigned long long)offset >= eof)
+ 			return -ENXIO;
+ 		offset = eof;
+ 		break;
+diff --git a/fs/xattr.c b/fs/xattr.c
+index 81d4695a6115..c7219609bd40 100644
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -163,7 +163,7 @@ xattr_getsecurity(struct inode *inode, const char *name, void *value,
+ 	}
+ 	memcpy(value, buffer, len);
+ out:
+-	security_release_secctx(buffer, len);
++	kfree(buffer);
+ out_noalloc:
+ 	return len;
+ }
+diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
+index a7a3a63bb360..ab018bafb777 100644
+--- a/fs/xfs/kmem.c
++++ b/fs/xfs/kmem.c
+@@ -24,24 +24,6 @@
+ #include "kmem.h"
+ #include "xfs_message.h"
+ 
+-/*
+- * Greedy allocation.  May fail and may return vmalloced memory.
+- */
+-void *
+-kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
+-{
+-	void		*ptr;
+-	size_t		kmsize = maxsize;
+-
+-	while (!(ptr = vzalloc(kmsize))) {
+-		if ((kmsize >>= 1) <= minsize)
+-			kmsize = minsize;
+-	}
+-	if (ptr)
+-		*size = kmsize;
+-	return ptr;
+-}
+-
+ void *
+ kmem_alloc(size_t size, xfs_km_flags_t flags)
+ {
+diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
+index cc6b768fc068..ae45f77ce33b 100644
+--- a/fs/xfs/kmem.h
++++ b/fs/xfs/kmem.h
+@@ -69,8 +69,6 @@ static inline void  kmem_free(const void *ptr)
+ }
+ 
+ 
+-extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
+-
+ static inline void *
+ kmem_zalloc(size_t size, xfs_km_flags_t flags)
+ {
+diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
+index b5cab0d1831b..7c89623f891b 100644
+--- a/fs/xfs/xfs_itable.c
++++ b/fs/xfs/xfs_itable.c
+@@ -351,7 +351,6 @@ xfs_bulkstat(
+ 	xfs_agino_t		agino;	/* inode # in allocation group */
+ 	xfs_agnumber_t		agno;	/* allocation group number */
+ 	xfs_btree_cur_t		*cur;	/* btree cursor for ialloc btree */
+-	size_t			irbsize; /* size of irec buffer in bytes */
+ 	xfs_inobt_rec_incore_t	*irbuf;	/* start of irec buffer */
+ 	int			nirbuf;	/* size of irbuf */
+ 	int			ubcount; /* size of user's buffer */
+@@ -378,11 +377,10 @@ xfs_bulkstat(
+ 	*ubcountp = 0;
+ 	*done = 0;
+ 
+-	irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
++	irbuf = kmem_zalloc_large(PAGE_SIZE * 4, KM_SLEEP);
+ 	if (!irbuf)
+ 		return -ENOMEM;
+-
+-	nirbuf = irbsize / sizeof(*irbuf);
++	nirbuf = (PAGE_SIZE * 4) / sizeof(*irbuf);
+ 
+ 	/*
+ 	 * Loop over the allocation groups, starting from the last
+diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
+index 4d9f233c4ba8..7d58ffdacd62 100644
+--- a/include/asm-generic/percpu.h
++++ b/include/asm-generic/percpu.h
+@@ -105,15 +105,35 @@ do {									\
+ 	(__ret);							\
+ })
+ 
+-#define this_cpu_generic_read(pcp)					\
++#define __this_cpu_generic_read_nopreempt(pcp)				\
+ ({									\
+ 	typeof(pcp) __ret;						\
+ 	preempt_disable();						\
+-	__ret = *this_cpu_ptr(&(pcp));					\
++	__ret = READ_ONCE(*raw_cpu_ptr(&(pcp)));			\
+ 	preempt_enable();						\
+ 	__ret;								\
+ })
+ 
++#define __this_cpu_generic_read_noirq(pcp)				\
++({									\
++	typeof(pcp) __ret;						\
++	unsigned long __flags;						\
++	raw_local_irq_save(__flags);					\
++	__ret = *raw_cpu_ptr(&(pcp));					\
++	raw_local_irq_restore(__flags);					\
++	__ret;								\
++})
++
++#define this_cpu_generic_read(pcp)					\
++({									\
++	typeof(pcp) __ret;						\
++	if (__native_word(pcp))						\
++		__ret = __this_cpu_generic_read_nopreempt(pcp);		\
++	else								\
++		__ret = __this_cpu_generic_read_noirq(pcp);		\
++	__ret;								\
++})
++
+ #define this_cpu_generic_to_op(pcp, val, op)				\
+ do {									\
+ 	unsigned long __flags;						\
+diff --git a/include/linux/audit.h b/include/linux/audit.h
+index c2e7e3a83965..d810e6e55d79 100644
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -277,6 +277,20 @@ static inline int audit_socketcall(int nargs, unsigned long *args)
+ 		return __audit_socketcall(nargs, args);
+ 	return 0;
+ }
++
++static inline int audit_socketcall_compat(int nargs, u32 *args)
++{
++	unsigned long a[AUDITSC_ARGS];
++	int i;
++
++	if (audit_dummy_context())
++		return 0;
++
++	for (i = 0; i < nargs; i++)
++		a[i] = (unsigned long)args[i];
++	return __audit_socketcall(nargs, a);
++}
++
+ static inline int audit_sockaddr(int len, void *addr)
+ {
+ 	if (unlikely(!audit_dummy_context()))
+@@ -403,6 +417,12 @@ static inline int audit_socketcall(int nargs, unsigned long *args)
+ {
+ 	return 0;
+ }
++
++static inline int audit_socketcall_compat(int nargs, u32 *args)
++{
++	return 0;
++}
++
+ static inline void audit_fd_pair(int fd1, int fd2)
+ { }
+ static inline int audit_sockaddr(int len, void *addr)
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index 1b357997cac5..d7d426c3c9b1 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -40,7 +40,9 @@ static inline void cpuset_dec(void)
+ 
+ extern int cpuset_init(void);
+ extern void cpuset_init_smp(void);
++extern void cpuset_force_rebuild(void);
+ extern void cpuset_update_active_cpus(bool cpu_online);
++extern void cpuset_wait_for_hotplug(void);
+ extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
+ extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
+ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
+@@ -138,11 +140,15 @@ static inline bool cpusets_enabled(void) { return false; }
+ static inline int cpuset_init(void) { return 0; }
+ static inline void cpuset_init_smp(void) {}
+ 
++static inline void cpuset_force_rebuild(void) { }
++
+ static inline void cpuset_update_active_cpus(bool cpu_online)
+ {
+ 	partition_sched_domains(1, NULL, NULL);
+ }
+ 
++static inline void cpuset_wait_for_hotplug(void) { }
++
+ static inline void cpuset_cpus_allowed(struct task_struct *p,
+ 				       struct cpumask *mask)
+ {
+diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
+index e7fdec4db9da..6cc48ac55fd2 100644
+--- a/include/linux/iio/adc/ad_sigma_delta.h
++++ b/include/linux/iio/adc/ad_sigma_delta.h
+@@ -111,6 +111,9 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
+ int ad_sd_read_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
+ 	unsigned int size, unsigned int *val);
+ 
++int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
++	unsigned int reset_length);
++
+ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
+ 	const struct iio_chan_spec *chan, int *val);
+ int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
+diff --git a/include/linux/key.h b/include/linux/key.h
+index e1d4715f3222..dcc00a7a5b8d 100644
+--- a/include/linux/key.h
++++ b/include/linux/key.h
+@@ -172,6 +172,7 @@ struct key {
+ #define KEY_FLAG_TRUSTED_ONLY	9	/* set if keyring only accepts links to trusted keys */
+ #define KEY_FLAG_BUILTIN	10	/* set if key is builtin */
+ #define KEY_FLAG_ROOT_CAN_INVAL	11	/* set if key can be invalidated by root without permission */
++#define KEY_FLAG_UID_KEYRING	12	/* set if key is a user or user session keyring */
+ 
+ 	/* the key type and key description string
+ 	 * - the desc is used to match a key against search criteria
+@@ -223,6 +224,7 @@ extern struct key *key_alloc(struct key_type *type,
+ #define KEY_ALLOC_QUOTA_OVERRUN	0x0001	/* add to quota, permit even if overrun */
+ #define KEY_ALLOC_NOT_IN_QUOTA	0x0002	/* not in quota */
+ #define KEY_ALLOC_TRUSTED	0x0004	/* Key should be flagged as trusted */
++#define KEY_ALLOC_UID_KEYRING	0x0010	/* allocating a user or user session keyring */
+ 
+ extern void key_revoke(struct key *key);
+ extern void key_invalidate(struct key *key);
+diff --git a/include/linux/mbus.h b/include/linux/mbus.h
+index 611b69fa8594..653aa56a13ca 100644
+--- a/include/linux/mbus.h
++++ b/include/linux/mbus.h
+@@ -29,8 +29,8 @@ struct mbus_dram_target_info
+ 	struct mbus_dram_window {
+ 		u8	cs_index;
+ 		u8	mbus_attr;
+-		u32	base;
+-		u32	size;
++		u64	base;
++		u64	size;
+ 	} cs[4];
+ };
+ 
+diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
+index aab032a6ae61..97ca105347a6 100644
+--- a/include/linux/mmc/sdio_func.h
++++ b/include/linux/mmc/sdio_func.h
+@@ -53,7 +53,7 @@ struct sdio_func {
+ 	unsigned int		state;		/* function state */
+ #define SDIO_STATE_PRESENT	(1<<0)		/* present in sysfs */
+ 
+-	u8			tmpbuf[4];	/* DMA:able scratch buffer */
++	u8			*tmpbuf;	/* DMA:able scratch buffer */
+ 
+ 	unsigned		num_info;	/* number of info strings */
+ 	const char		**info;		/* info strings */
+diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
+index 2a330ec9e2af..d1397c8ed94e 100644
+--- a/include/linux/platform_data/mmp_dma.h
++++ b/include/linux/platform_data/mmp_dma.h
+@@ -14,6 +14,7 @@
+ 
+ struct mmp_dma_platdata {
+ 	int dma_channels;
++	int nb_requestors;
+ };
+ 
+ #endif /* MMP_DMA_H */
+diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
+index cccdcfd14973..f348c736e6e0 100644
+--- a/include/net/sctp/ulpevent.h
++++ b/include/net/sctp/ulpevent.h
+@@ -141,8 +141,12 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
+ static inline int sctp_ulpevent_type_enabled(__u16 sn_type,
+ 					     struct sctp_event_subscribe *mask)
+ {
++	int offset = sn_type - SCTP_SN_TYPE_BASE;
+ 	char *amask = (char *) mask;
+-	return amask[sn_type - SCTP_SN_TYPE_BASE];
++
++	if (offset >= sizeof(struct sctp_event_subscribe))
++		return 0;
++	return amask[offset];
+ }
+ 
+ /* Given an event subscription, is this event enabled? */
+diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
+index a03acd0d398a..695257ae64ac 100644
+--- a/include/sound/seq_virmidi.h
++++ b/include/sound/seq_virmidi.h
+@@ -60,6 +60,7 @@ struct snd_virmidi_dev {
+ 	int port;			/* created/attached port */
+ 	unsigned int flags;		/* SNDRV_VIRMIDI_* */
+ 	rwlock_t filelist_lock;
++	struct rw_semaphore filelist_sem;
+ 	struct list_head filelist;
+ };
+ 
+diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
+index 07735822a28f..c43dd2b765b1 100644
+--- a/include/uapi/drm/drm_fourcc.h
++++ b/include/uapi/drm/drm_fourcc.h
+@@ -143,6 +143,7 @@
+ 
+ /* Vendor Ids: */
+ #define DRM_FORMAT_MOD_NONE           0
++#define DRM_FORMAT_MOD_VENDOR_NONE    0
+ #define DRM_FORMAT_MOD_VENDOR_INTEL   0x01
+ #define DRM_FORMAT_MOD_VENDOR_AMD     0x02
+ #define DRM_FORMAT_MOD_VENDOR_NV      0x03
+diff --git a/include/uapi/linux/mroute6.h b/include/uapi/linux/mroute6.h
+index ce91215cf7e6..e0b566dc72ef 100644
+--- a/include/uapi/linux/mroute6.h
++++ b/include/uapi/linux/mroute6.h
+@@ -3,6 +3,7 @@
+ 
+ #include <linux/types.h>
+ #include <linux/sockios.h>
++#include <linux/in6.h>		/* For struct sockaddr_in6. */
+ 
+ /*
+  *	Based on the MROUTING 3.5 defines primarily to keep
+diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
+index 91950950aa59..25e865320288 100644
+--- a/include/uapi/linux/rds.h
++++ b/include/uapi/linux/rds.h
+@@ -35,6 +35,7 @@
+ #define _LINUX_RDS_H
+ 
+ #include <linux/types.h>
++#include <linux/socket.h>		/* For __kernel_sockaddr_storage. */
+ 
+ #define RDS_IB_ABI_VERSION		0x301
+ 
+@@ -213,7 +214,7 @@ struct rds_get_mr_args {
+ };
+ 
+ struct rds_get_mr_for_dest_args {
+-	struct sockaddr_storage	dest_addr;
++	struct __kernel_sockaddr_storage dest_addr;
+ 	struct rds_iovec 	vec;
+ 	uint64_t		cookie_addr;
+ 	uint64_t		flags;
+diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
+index dd5f21e75805..856de39d0b89 100644
+--- a/include/uapi/linux/spi/spidev.h
++++ b/include/uapi/linux/spi/spidev.h
+@@ -23,6 +23,7 @@
+ #define SPIDEV_H
+ 
+ #include <linux/types.h>
++#include <linux/ioctl.h>
+ 
+ /* User space versions of kernel symbols for SPI clocking modes,
+  * matching <linux/spi/spi.h>
+diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
+index aa33fd1b2d4f..400196c45b3c 100644
+--- a/include/uapi/linux/usb/ch9.h
++++ b/include/uapi/linux/usb/ch9.h
+@@ -705,6 +705,7 @@ struct usb_interface_assoc_descriptor {
+ 	__u8  iFunction;
+ } __attribute__ ((packed));
+ 
++#define USB_DT_INTERFACE_ASSOCIATION_SIZE	8
+ 
+ /*-------------------------------------------------------------------------*/
+ 
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index efd143dcedf1..1bdc6f910a1d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -926,7 +926,8 @@ static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn)
+ 			}
+ 		} else {
+ 			if (insn->src_reg != BPF_REG_0 || insn->off != 0 ||
+-			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) {
++			    (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) ||
++			    BPF_CLASS(insn->code) == BPF_ALU64) {
+ 				verbose("BPF_END uses reserved fields\n");
+ 				return -EINVAL;
+ 			}
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index d222a0c32e34..ec15d7f7ceb9 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -2246,6 +2246,13 @@ retry:
+ 	mutex_unlock(&cpuset_mutex);
+ }
+ 
++static bool force_rebuild;
++
++void cpuset_force_rebuild(void)
++{
++	force_rebuild = true;
++}
++
+ /**
+  * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
+  *
+@@ -2320,8 +2327,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
+ 	}
+ 
+ 	/* rebuild sched domains if cpus_allowed has changed */
+-	if (cpus_updated)
++	if (cpus_updated || force_rebuild) {
++		force_rebuild = false;
+ 		rebuild_sched_domains();
++	}
+ }
+ 
+ void cpuset_update_active_cpus(bool cpu_online)
+@@ -2340,6 +2349,11 @@ void cpuset_update_active_cpus(bool cpu_online)
+ 	schedule_work(&cpuset_hotplug_work);
+ }
+ 
++void cpuset_wait_for_hotplug(void)
++{
++	flush_work(&cpuset_hotplug_work);
++}
++
+ /*
+  * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
+  * Call this routine anytime after node_states[N_MEMORY] changes.
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index aaeae885d9af..a42ad8d8a4a5 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -3128,10 +3128,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
+ 	if (depth) {
+ 		hlock = curr->held_locks + depth - 1;
+ 		if (hlock->class_idx == class_idx && nest_lock) {
+-			if (hlock->references)
++			if (hlock->references) {
++				/*
++				 * Check: unsigned int references:12, overflow.
++				 */
++				if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
++					return 0;
++
+ 				hlock->references++;
+-			else
++			} else {
+ 				hlock->references = 2;
++			}
+ 
+ 			return 1;
+ 		}
+diff --git a/kernel/power/process.c b/kernel/power/process.c
+index 564f786df470..ba2029a02259 100644
+--- a/kernel/power/process.c
++++ b/kernel/power/process.c
+@@ -18,8 +18,9 @@
+ #include <linux/workqueue.h>
+ #include <linux/kmod.h>
+ #include <trace/events/power.h>
++#include <linux/cpuset.h>
+ 
+-/* 
++/*
+  * Timeout for stopping processes
+  */
+ unsigned int __read_mostly freeze_timeout_msecs = 20 * MSEC_PER_SEC;
+@@ -198,6 +199,8 @@ void thaw_processes(void)
+ 	__usermodehelper_set_disable_depth(UMH_FREEZING);
+ 	thaw_workqueues();
+ 
++	cpuset_wait_for_hotplug();
++
+ 	read_lock(&tasklist_lock);
+ 	for_each_process_thread(g, p) {
+ 		/* No other threads should have PF_SUSPEND_TASK set */
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 976d5fbcd60d..f2f8ff54d2c0 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7024,17 +7024,16 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
+ 		 * operation in the resume sequence, just build a single sched
+ 		 * domain, ignoring cpusets.
+ 		 */
+-		num_cpus_frozen--;
+-		if (likely(num_cpus_frozen)) {
+-			partition_sched_domains(1, NULL, NULL);
++		partition_sched_domains(1, NULL, NULL);
++		if (--num_cpus_frozen)
+ 			break;
+-		}
+ 
+ 		/*
+ 		 * This is the last CPU online operation. So fall through and
+ 		 * restore the original sched domains by considering the
+ 		 * cpuset configurations.
+ 		 */
++		cpuset_force_rebuild();
+ 
+ 	case CPU_ONLINE:
+ 		cpuset_update_active_cpus(true);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 06d0e5712e86..05eeb7bd9f99 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -4286,9 +4286,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
+ static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
+ static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
+ 
+-static unsigned long save_global_trampoline;
+-static unsigned long save_global_flags;
+-
+ static int __init set_graph_function(char *str)
+ {
+ 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
+@@ -5879,17 +5876,6 @@ void unregister_ftrace_graph(void)
+ 	unregister_pm_notifier(&ftrace_suspend_notifier);
+ 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+ 
+-#ifdef CONFIG_DYNAMIC_FTRACE
+-	/*
+-	 * Function graph does not allocate the trampoline, but
+-	 * other global_ops do. We need to reset the ALLOC_TRAMP flag
+-	 * if one was used.
+-	 */
+-	global_ops.trampoline = save_global_trampoline;
+-	if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
+-		global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
+-#endif
+-
+  out:
+ 	mutex_unlock(&ftrace_lock);
+ }
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 17213d74540b..ca1a306ea7e6 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3218,11 +3218,17 @@ static int tracing_open(struct inode *inode, struct file *file)
+ 	/* If this file was open for write, then erase contents */
+ 	if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ 		int cpu = tracing_get_cpu(inode);
++		struct trace_buffer *trace_buf = &tr->trace_buffer;
++
++#ifdef CONFIG_TRACER_MAX_TRACE
++		if (tr->current_trace->print_max)
++			trace_buf = &tr->max_buffer;
++#endif
+ 
+ 		if (cpu == RING_BUFFER_ALL_CPUS)
+-			tracing_reset_online_cpus(&tr->trace_buffer);
++			tracing_reset_online_cpus(trace_buf);
+ 		else
+-			tracing_reset(&tr->trace_buffer, cpu);
++			tracing_reset(trace_buf, cpu);
+ 	}
+ 
+ 	if (file->f_mode & FMODE_READ) {
+@@ -4668,7 +4674,7 @@ static int tracing_wait_pipe(struct file *filp)
+ 		 *
+ 		 * iter->pos will be 0 if we haven't read anything.
+ 		 */
+-		if (!tracing_is_on() && iter->pos)
++		if (!tracer_tracing_is_on(iter->tr) && iter->pos)
+ 			break;
+ 
+ 		mutex_unlock(&iter->mutex);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 9cdf3bfc9178..791f6ebc84a3 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -68,6 +68,7 @@ enum {
+ 	 * attach_mutex to avoid changing binding state while
+ 	 * worker_attach_to_pool() is in progress.
+ 	 */
++	POOL_MANAGER_ACTIVE	= 1 << 0,	/* being managed */
+ 	POOL_DISASSOCIATED	= 1 << 2,	/* cpu can't serve workers */
+ 
+ 	/* worker flags */
+@@ -163,7 +164,6 @@ struct worker_pool {
+ 						/* L: hash of busy workers */
+ 
+ 	/* see manage_workers() for details on the two manager mutexes */
+-	struct mutex		manager_arb;	/* manager arbitration */
+ 	struct worker		*manager;	/* L: purely informational */
+ 	struct mutex		attach_mutex;	/* attach/detach exclusion */
+ 	struct list_head	workers;	/* A: attached workers */
+@@ -300,6 +300,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
+ 
+ static DEFINE_MUTEX(wq_pool_mutex);	/* protects pools and workqueues list */
+ static DEFINE_SPINLOCK(wq_mayday_lock);	/* protects wq->maydays list */
++static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
+ 
+ static LIST_HEAD(workqueues);		/* PR: list of all workqueues */
+ static bool workqueue_freezing;		/* PL: have wqs started freezing? */
+@@ -813,7 +814,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
+ /* Do we have too many workers and should some go away? */
+ static bool too_many_workers(struct worker_pool *pool)
+ {
+-	bool managing = mutex_is_locked(&pool->manager_arb);
++	bool managing = pool->flags & POOL_MANAGER_ACTIVE;
+ 	int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
+ 	int nr_busy = pool->nr_workers - nr_idle;
+ 
+@@ -1959,24 +1960,17 @@ static bool manage_workers(struct worker *worker)
+ {
+ 	struct worker_pool *pool = worker->pool;
+ 
+-	/*
+-	 * Anyone who successfully grabs manager_arb wins the arbitration
+-	 * and becomes the manager.  mutex_trylock() on pool->manager_arb
+-	 * failure while holding pool->lock reliably indicates that someone
+-	 * else is managing the pool and the worker which failed trylock
+-	 * can proceed to executing work items.  This means that anyone
+-	 * grabbing manager_arb is responsible for actually performing
+-	 * manager duties.  If manager_arb is grabbed and released without
+-	 * actual management, the pool may stall indefinitely.
+-	 */
+-	if (!mutex_trylock(&pool->manager_arb))
++	if (pool->flags & POOL_MANAGER_ACTIVE)
+ 		return false;
++
++	pool->flags |= POOL_MANAGER_ACTIVE;
+ 	pool->manager = worker;
+ 
+ 	maybe_create_worker(pool);
+ 
+ 	pool->manager = NULL;
+-	mutex_unlock(&pool->manager_arb);
++	pool->flags &= ~POOL_MANAGER_ACTIVE;
++	wake_up(&wq_manager_wait);
+ 	return true;
+ }
+ 
+@@ -3156,7 +3150,6 @@ static int init_worker_pool(struct worker_pool *pool)
+ 	setup_timer(&pool->mayday_timer, pool_mayday_timeout,
+ 		    (unsigned long)pool);
+ 
+-	mutex_init(&pool->manager_arb);
+ 	mutex_init(&pool->attach_mutex);
+ 	INIT_LIST_HEAD(&pool->workers);
+ 
+@@ -3226,13 +3219,15 @@ static void put_unbound_pool(struct worker_pool *pool)
+ 	hash_del(&pool->hash_node);
+ 
+ 	/*
+-	 * Become the manager and destroy all workers.  Grabbing
+-	 * manager_arb prevents @pool's workers from blocking on
+-	 * attach_mutex.
++	 * Become the manager and destroy all workers.  This prevents
++	 * @pool's workers from blocking on attach_mutex.  We're the last
++	 * manager and @pool gets freed with the flag set.
+ 	 */
+-	mutex_lock(&pool->manager_arb);
+-
+ 	spin_lock_irq(&pool->lock);
++	wait_event_lock_irq(wq_manager_wait,
++			    !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
++	pool->flags |= POOL_MANAGER_ACTIVE;
++
+ 	while ((worker = first_idle_worker(pool)))
+ 		destroy_worker(worker);
+ 	WARN_ON(pool->nr_workers || pool->nr_idle);
+@@ -3246,8 +3241,6 @@ static void put_unbound_pool(struct worker_pool *pool)
+ 	if (pool->detach_completion)
+ 		wait_for_completion(pool->detach_completion);
+ 
+-	mutex_unlock(&pool->manager_arb);
+-
+ 	/* shut down the timers */
+ 	del_timer_sync(&pool->idle_timer);
+ 	del_timer_sync(&pool->mayday_timer);
+diff --git a/lib/assoc_array.c b/lib/assoc_array.c
+index 59fd7c0b119c..5cd093589c5a 100644
+--- a/lib/assoc_array.c
++++ b/lib/assoc_array.c
+@@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
+ 		if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
+ 			goto all_leaves_cluster_together;
+ 
+-		/* Otherwise we can just insert a new node ahead of the old
+-		 * one.
++		/* Otherwise all the old leaves cluster in the same slot, but
++		 * the new leaf wants to go into a different slot - so we
++		 * create a new node (n0) to hold the new leaf and a pointer to
++		 * a new node (n1) holding all the old leaves.
++		 *
++		 * This can be done by falling through to the node splitting
++		 * path.
+ 		 */
+-		goto present_leaves_cluster_but_not_new_leaf;
++		pr_devel("present leaves cluster but not new leaf\n");
+ 	}
+ 
+ split_node:
+ 	pr_devel("split node\n");
+ 
+-	/* We need to split the current node; we know that the node doesn't
+-	 * simply contain a full set of leaves that cluster together (it
+-	 * contains meta pointers and/or non-clustering leaves).
++	/* We need to split the current node.  The node must contain anything
++	 * from a single leaf (in the one leaf case, this leaf will cluster
++	 * with the new leaf) and the rest meta-pointers, to all leaves, some
++	 * of which may cluster.
++	 *
++	 * It won't contain the case in which all the current leaves plus the
++	 * new leaves want to cluster in the same slot.
+ 	 *
+ 	 * We need to expel at least two leaves out of a set consisting of the
+-	 * leaves in the node and the new leaf.
++	 * leaves in the node and the new leaf.  The current meta pointers can
++	 * just be copied as they shouldn't cluster with any of the leaves.
+ 	 *
+ 	 * We need a new node (n0) to replace the current one and a new node to
+ 	 * take the expelled nodes (n1).
+@@ -717,33 +727,6 @@ found_slot_for_multiple_occupancy:
+ 	pr_devel("<--%s() = ok [split node]\n", __func__);
+ 	return true;
+ 
+-present_leaves_cluster_but_not_new_leaf:
+-	/* All the old leaves cluster in the same slot, but the new leaf wants
+-	 * to go into a different slot, so we create a new node to hold the new
+-	 * leaf and a pointer to a new node holding all the old leaves.
+-	 */
+-	pr_devel("present leaves cluster but not new leaf\n");
+-
+-	new_n0->back_pointer = node->back_pointer;
+-	new_n0->parent_slot = node->parent_slot;
+-	new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
+-	new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
+-	new_n1->parent_slot = edit->segment_cache[0];
+-	new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
+-	edit->adjust_count_on = new_n0;
+-
+-	for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
+-		new_n1->slots[i] = node->slots[i];
+-
+-	new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
+-	edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
+-
+-	edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
+-	edit->set[0].to = assoc_array_node_to_ptr(new_n0);
+-	edit->excised_meta[0] = assoc_array_node_to_ptr(node);
+-	pr_devel("<--%s() = ok [insert node before]\n", __func__);
+-	return true;
+-
+ all_leaves_cluster_together:
+ 	/* All the leaves, new and old, want to cluster together in this node
+ 	 * in the same slot, so we have to replace this node with a shortcut to
+diff --git a/lib/digsig.c b/lib/digsig.c
+index ae05ea393fc8..4b8ef0bd315b 100644
+--- a/lib/digsig.c
++++ b/lib/digsig.c
+@@ -86,6 +86,12 @@ static int digsig_verify_rsa(struct key *key,
+ 	down_read(&key->sem);
+ 	ukp = key->payload.data;
+ 
++	if (!ukp) {
++		/* key was revoked before we acquired its semaphore */
++		err = -EKEYREVOKED;
++		goto err1;
++	}
++
+ 	if (ukp->datalen < sizeof(*pkh))
+ 		goto err1;
+ 
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 999bb3424d44..c0395d54aadc 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -228,7 +228,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
+ {
+ 	struct kmem_cache *s;
+ 
+-	if (slab_nomerge || (flags & SLAB_NEVER_MERGE))
++	if (slab_nomerge)
+ 		return NULL;
+ 
+ 	if (ctor)
+@@ -239,6 +239,9 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
+ 	size = ALIGN(size, align);
+ 	flags = kmem_cache_flags(size, flags, name, NULL);
+ 
++	if (flags & SLAB_NEVER_MERGE)
++		return NULL;
++
+ 	list_for_each_entry_reverse(s, &slab_caches, list) {
+ 		if (slab_unmergeable(s))
+ 			continue;
+diff --git a/net/compat.c b/net/compat.c
+index 5cfd26a0006f..0ccf3ecf6bbb 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -22,6 +22,7 @@
+ #include <linux/filter.h>
+ #include <linux/compat.h>
+ #include <linux/security.h>
++#include <linux/audit.h>
+ #include <linux/export.h>
+ 
+ #include <net/scm.h>
+@@ -767,14 +768,24 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+ 
+ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
+ {
+-	int ret;
+-	u32 a[6];
++	u32 a[AUDITSC_ARGS];
++	unsigned int len;
+ 	u32 a0, a1;
++	int ret;
+ 
+ 	if (call < SYS_SOCKET || call > SYS_SENDMMSG)
+ 		return -EINVAL;
+-	if (copy_from_user(a, args, nas[call]))
++	len = nas[call];
++	if (len > sizeof(a))
++		return -EINVAL;
++
++	if (copy_from_user(a, args, len))
+ 		return -EFAULT;
++
++	ret = audit_socketcall_compat(len / sizeof(a[0]), a);
++	if (ret)
++		return ret;
++
+ 	a0 = a[0];
+ 	a1 = a[1];
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index bb711e5e345b..5d94dadb8df9 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2284,6 +2284,9 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
+ {
+ 	unsigned long flags;
+ 
++	if (unlikely(!skb))
++		return;
++
+ 	if (likely(atomic_read(&skb->users) == 1)) {
+ 		smp_rmb();
+ 		atomic_set(&skb->users, 0);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 1b70ed7015cb..e369262ea57e 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1497,6 +1497,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
+ 
+ 		sock_copy(newsk, sk);
+ 
++		newsk->sk_prot_creator = sk->sk_prot;
++
+ 		/* SANITY */
+ 		get_net(sock_net(newsk));
+ 		sk_node_init(&newsk->sk_node);
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 0c152087ca15..eaa50d69b607 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
+ 	struct ip_tunnel_parm *parms = &tunnel->parms;
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct net_device *tdev;	/* Device to other host */
++	int pkt_len = skb->len;
+ 	int err;
+ 
+ 	if (!dst) {
+@@ -199,7 +200,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
+ 
+ 	err = dst_output(skb);
+ 	if (net_xmit_eval(err) == 0)
+-		err = skb->len;
++		err = pkt_len;
+ 	iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+ 	return NETDEV_TX_OK;
+ 
+diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
+index 7c676671329d..cc626e1b06d3 100644
+--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
++++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
+@@ -1304,6 +1304,7 @@ static int __init nf_nat_snmp_basic_init(void)
+ static void __exit nf_nat_snmp_basic_fini(void)
+ {
+ 	RCU_INIT_POINTER(nf_nat_snmp_hook, NULL);
++	synchronize_rcu();
+ 	nf_conntrack_helper_unregister(&snmp_trap_helper);
+ }
+ 
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 0224c032dca5..6ac448d8dd76 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -434,6 +434,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 	struct dst_entry *dst = skb_dst(skb);
+ 	struct net_device *tdev;
+ 	struct xfrm_state *x;
++	int pkt_len = skb->len;
+ 	int err = -1;
+ 	int mtu;
+ 
+@@ -487,7 +488,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ 		struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+ 
+ 		u64_stats_update_begin(&tstats->syncp);
+-		tstats->tx_bytes += skb->len;
++		tstats->tx_bytes += pkt_len;
+ 		tstats->tx_packets++;
+ 		u64_stats_update_end(&tstats->syncp);
+ 	} else {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 2c1d77bc6979..a6842f1ad3ac 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1003,6 +1003,7 @@ static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
+ 		 */
+ 		offset = skb_transport_offset(skb);
+ 		skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
++		csum = skb->csum;
+ 
+ 		skb->ip_summed = CHECKSUM_NONE;
+ 
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index cd0a5903376c..8a1d1542eb2f 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1321,6 +1321,9 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ 	struct sock *sk = NULL;
+ 
+ 	tunnel = container_of(work, struct l2tp_tunnel, del_work);
++
++	l2tp_tunnel_closeall(tunnel);
++
+ 	sk = l2tp_tunnel_sock_lookup(tunnel);
+ 	if (!sk)
+ 		goto out;
+@@ -1643,15 +1646,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+ 
+ /* This function is used by the netlink TUNNEL_DELETE command.
+  */
+-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
++void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+ {
+-	l2tp_tunnel_inc_refcount(tunnel);
+-	l2tp_tunnel_closeall(tunnel);
+-	if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
+-		l2tp_tunnel_dec_refcount(tunnel);
+-		return 1;
++	if (!test_and_set_bit(0, &tunnel->dead)) {
++		l2tp_tunnel_inc_refcount(tunnel);
++		queue_work(l2tp_wq, &tunnel->del_work);
+ 	}
+-	return 0;
+ }
+ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+ 
+diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
+index cfb50c2acbd9..d23b760d7f35 100644
+--- a/net/l2tp/l2tp_core.h
++++ b/net/l2tp/l2tp_core.h
+@@ -169,6 +169,9 @@ struct l2tp_tunnel_cfg {
+ 
+ struct l2tp_tunnel {
+ 	int			magic;		/* Should be L2TP_TUNNEL_MAGIC */
++
++	unsigned long		dead;
++
+ 	struct rcu_head rcu;
+ 	rwlock_t		hlist_lock;	/* protect session_hlist */
+ 	struct hlist_head	session_hlist[L2TP_HASH_SIZE];
+@@ -253,7 +256,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
+ 		       u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
+ 		       struct l2tp_tunnel **tunnelp);
+ void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
+-int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
++void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
+ struct l2tp_session *l2tp_session_create(int priv_size,
+ 					 struct l2tp_tunnel *tunnel,
+ 					 u32 session_id, u32 peer_session_id,
+diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
+index 683f0e3cb124..d97a8131b6de 100644
+--- a/net/mac80211/offchannel.c
++++ b/net/mac80211/offchannel.c
+@@ -469,6 +469,8 @@ void ieee80211_roc_purge(struct ieee80211_local *local,
+ 	struct ieee80211_roc_work *roc, *tmp;
+ 	LIST_HEAD(tmp_list);
+ 
++	flush_work(&local->hw_roc_start);
++
+ 	mutex_lock(&local->mtx);
+ 	list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ 		if (sdata && roc->sdata != sdata)
+diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
+index 4e78c57b818f..f3b92ce463b0 100644
+--- a/net/netfilter/nf_conntrack_ecache.c
++++ b/net/netfilter/nf_conntrack_ecache.c
+@@ -200,6 +200,7 @@ void nf_conntrack_unregister_notifier(struct net *net,
+ 	BUG_ON(notify != new);
+ 	RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
+ 	mutex_unlock(&nf_ct_ecache_mutex);
++	/* synchronize_rcu() is called from ctnetlink_exit. */
+ }
+ EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
+ 
+@@ -236,6 +237,7 @@ void nf_ct_expect_unregister_notifier(struct net *net,
+ 	BUG_ON(notify != new);
+ 	RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
+ 	mutex_unlock(&nf_ct_ecache_mutex);
++	/* synchronize_rcu() is called from ctnetlink_exit. */
+ }
+ EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
+ 
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index b45a4223cb05..4f4c88d70a8f 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -392,7 +392,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
+ 	struct net *net = nf_ct_exp_net(expect);
+ 	struct hlist_node *next;
+ 	unsigned int h;
+-	int ret = 1;
++	int ret = 0;
+ 
+ 	if (!master_help) {
+ 		ret = -ESHUTDOWN;
+@@ -442,7 +442,7 @@ int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
+ 
+ 	spin_lock_bh(&nf_conntrack_expect_lock);
+ 	ret = __nf_ct_expect_check(expect);
+-	if (ret <= 0)
++	if (ret < 0)
+ 		goto out;
+ 
+ 	ret = nf_ct_expect_insert(expect);
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index b6e939a8b099..c0e64d15cf34 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -3281,6 +3281,7 @@ static void __exit ctnetlink_exit(void)
+ #ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+ 	RCU_INIT_POINTER(nfq_ct_hook, NULL);
+ #endif
++	synchronize_rcu();
+ }
+ 
+ module_init(ctnetlink_init);
+diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
+index 4e0b47831d43..56e175efb66e 100644
+--- a/net/netfilter/nf_nat_core.c
++++ b/net/netfilter/nf_nat_core.c
+@@ -888,6 +888,8 @@ static void __exit nf_nat_cleanup(void)
+ #ifdef CONFIG_XFRM
+ 	RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
+ #endif
++	synchronize_rcu();
++
+ 	for (i = 0; i < NFPROTO_NUMPROTO; i++)
+ 		kfree(nf_nat_l4protos[i]);
+ 	synchronize_net();
+diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
+index 54330fb5efaf..6d10002d23f8 100644
+--- a/net/netfilter/nfnetlink_cthelper.c
++++ b/net/netfilter/nfnetlink_cthelper.c
+@@ -161,6 +161,7 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
+ 	int i, ret;
+ 	struct nf_conntrack_expect_policy *expect_policy;
+ 	struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
++	unsigned int class_max;
+ 
+ 	ret = nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
+ 			       nfnl_cthelper_expect_policy_set);
+@@ -170,19 +171,18 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
+ 	if (!tb[NFCTH_POLICY_SET_NUM])
+ 		return -EINVAL;
+ 
+-	helper->expect_class_max =
+-		ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+-
+-	if (helper->expect_class_max != 0 &&
+-	    helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
++	class_max = ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
++	if (class_max == 0)
++		return -EINVAL;
++	if (class_max > NF_CT_MAX_EXPECT_CLASSES)
+ 		return -EOVERFLOW;
+ 
+ 	expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
+-				helper->expect_class_max, GFP_KERNEL);
++				class_max, GFP_KERNEL);
+ 	if (expect_policy == NULL)
+ 		return -ENOMEM;
+ 
+-	for (i=0; i<helper->expect_class_max; i++) {
++	for (i = 0; i < class_max; i++) {
+ 		if (!tb[NFCTH_POLICY_SET+i])
+ 			goto err;
+ 
+@@ -191,6 +191,8 @@ nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
+ 		if (ret < 0)
+ 			goto err;
+ 	}
++
++	helper->expect_class_max = class_max - 1;
+ 	helper->expect_policy = expect_policy;
+ 	return 0;
+ err:
+@@ -377,10 +379,10 @@ nfnl_cthelper_dump_policy(struct sk_buff *skb,
+ 		goto nla_put_failure;
+ 
+ 	if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
+-			 htonl(helper->expect_class_max)))
++			 htonl(helper->expect_class_max + 1)))
+ 		goto nla_put_failure;
+ 
+-	for (i=0; i<helper->expect_class_max; i++) {
++	for (i = 0; i < helper->expect_class_max + 1; i++) {
+ 		nest_parms2 = nla_nest_start(skb,
+ 				(NFCTH_POLICY_SET+i) | NLA_F_NESTED);
+ 		if (nest_parms2 == NULL)
+diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
+index 476accd17145..3dfe3b7271e4 100644
+--- a/net/netfilter/nfnetlink_cttimeout.c
++++ b/net/netfilter/nfnetlink_cttimeout.c
+@@ -578,6 +578,7 @@ static void __exit cttimeout_exit(void)
+ #ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ 	RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
+ 	RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
++	synchronize_rcu();
+ #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+ }
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index c29070c27073..4cd7e27e3fc4 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2694,13 +2694,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
+ 	int ret = 0;
+ 	bool unlisted = false;
+ 
+-	if (po->fanout)
+-		return -EINVAL;
+-
+ 	lock_sock(sk);
+ 	spin_lock(&po->bind_lock);
+ 	rcu_read_lock();
+ 
++	if (po->fanout) {
++		ret = -EINVAL;
++		goto out_unlock;
++	}
++
+ 	if (name) {
+ 		dev = dev_get_by_name_rcu(sock_net(sk), name);
+ 		if (!dev) {
+@@ -3542,6 +3544,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
+ 	case PACKET_HDRLEN:
+ 		if (len > sizeof(int))
+ 			len = sizeof(int);
++		if (len < sizeof(int))
++			return -EINVAL;
+ 		if (copy_from_user(&val, optval, len))
+ 			return -EFAULT;
+ 		switch (val) {
+diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
+index 8a09ee7db3c1..920b39572edd 100644
+--- a/net/rds/ib_cm.c
++++ b/net/rds/ib_cm.c
+@@ -307,7 +307,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 		ret = PTR_ERR(ic->i_send_cq);
+ 		ic->i_send_cq = NULL;
+ 		rdsdebug("ib_create_cq send failed: %d\n", ret);
+-		goto out;
++		goto rds_ibdev_out;
+ 	}
+ 
+ 	ic->i_recv_cq = ib_create_cq(dev, rds_ib_recv_cq_comp_handler,
+@@ -317,19 +317,19 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 		ret = PTR_ERR(ic->i_recv_cq);
+ 		ic->i_recv_cq = NULL;
+ 		rdsdebug("ib_create_cq recv failed: %d\n", ret);
+-		goto out;
++		goto send_cq_out;
+ 	}
+ 
+ 	ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP);
+ 	if (ret) {
+ 		rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
+-		goto out;
++		goto recv_cq_out;
+ 	}
+ 
+ 	ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
+ 	if (ret) {
+ 		rdsdebug("ib_req_notify_cq recv failed: %d\n", ret);
+-		goto out;
++		goto recv_cq_out;
+ 	}
+ 
+ 	/* XXX negotiate max send/recv with remote? */
+@@ -353,7 +353,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 	ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr);
+ 	if (ret) {
+ 		rdsdebug("rdma_create_qp failed: %d\n", ret);
+-		goto out;
++		goto recv_cq_out;
+ 	}
+ 
+ 	ic->i_send_hdrs = ib_dma_alloc_coherent(dev,
+@@ -363,7 +363,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 	if (!ic->i_send_hdrs) {
+ 		ret = -ENOMEM;
+ 		rdsdebug("ib_dma_alloc_coherent send failed\n");
+-		goto out;
++		goto qp_out;
+ 	}
+ 
+ 	ic->i_recv_hdrs = ib_dma_alloc_coherent(dev,
+@@ -373,7 +373,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 	if (!ic->i_recv_hdrs) {
+ 		ret = -ENOMEM;
+ 		rdsdebug("ib_dma_alloc_coherent recv failed\n");
+-		goto out;
++		goto send_hdrs_dma_out;
+ 	}
+ 
+ 	ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
+@@ -381,7 +381,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 	if (!ic->i_ack) {
+ 		ret = -ENOMEM;
+ 		rdsdebug("ib_dma_alloc_coherent ack failed\n");
+-		goto out;
++		goto recv_hdrs_dma_out;
+ 	}
+ 
+ 	ic->i_sends = vzalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
+@@ -389,7 +389,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 	if (!ic->i_sends) {
+ 		ret = -ENOMEM;
+ 		rdsdebug("send allocation failed\n");
+-		goto out;
++		goto ack_dma_out;
+ 	}
+ 
+ 	ic->i_recvs = vzalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
+@@ -397,7 +397,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 	if (!ic->i_recvs) {
+ 		ret = -ENOMEM;
+ 		rdsdebug("recv allocation failed\n");
+-		goto out;
++		goto sends_out;
+ 	}
+ 
+ 	rds_ib_recv_init_ack(ic);
+@@ -405,8 +405,33 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
+ 	rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
+ 		 ic->i_send_cq, ic->i_recv_cq);
+ 
+-out:
++	return ret;
++
++sends_out:
++	vfree(ic->i_sends);
++ack_dma_out:
++	ib_dma_free_coherent(dev, sizeof(struct rds_header),
++			     ic->i_ack, ic->i_ack_dma);
++recv_hdrs_dma_out:
++	ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr *
++					sizeof(struct rds_header),
++					ic->i_recv_hdrs, ic->i_recv_hdrs_dma);
++send_hdrs_dma_out:
++	ib_dma_free_coherent(dev, ic->i_send_ring.w_nr *
++					sizeof(struct rds_header),
++					ic->i_send_hdrs, ic->i_send_hdrs_dma);
++qp_out:
++	rdma_destroy_qp(ic->i_cm_id);
++recv_cq_out:
++	if (!ib_destroy_cq(ic->i_recv_cq))
++		ic->i_recv_cq = NULL;
++send_cq_out:
++	if (!ib_destroy_cq(ic->i_send_cq))
++		ic->i_send_cq = NULL;
++rds_ibdev_out:
++	rds_ib_remove_conn(rds_ibdev, conn);
+ 	rds_ib_dev_put(rds_ibdev);
++
+ 	return ret;
+ }
+ 
+diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
+index bd3825d38abc..716d0b4d4834 100644
+--- a/net/rds/ib_send.c
++++ b/net/rds/ib_send.c
+@@ -102,16 +102,6 @@ static void rds_ib_send_complete(struct rds_message *rm,
+ 	complete(rm, notify_status);
+ }
+ 
+-static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
+-				   struct rm_data_op *op,
+-				   int wc_status)
+-{
+-	if (op->op_nents)
+-		ib_dma_unmap_sg(ic->i_cm_id->device,
+-				op->op_sg, op->op_nents,
+-				DMA_TO_DEVICE);
+-}
+-
+ static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
+ 				   struct rm_rdma_op *op,
+ 				   int wc_status)
+@@ -172,6 +162,21 @@ static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
+ 		rds_ib_stats_inc(s_ib_atomic_fadd);
+ }
+ 
++static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
++				   struct rm_data_op *op,
++				   int wc_status)
++{
++	struct rds_message *rm = container_of(op, struct rds_message, data);
++
++	if (op->op_nents)
++		ib_dma_unmap_sg(ic->i_cm_id->device,
++				op->op_sg, op->op_nents,
++				DMA_TO_DEVICE);
++
++	if (rm->rdma.op_active && rm->data.op_notify)
++		rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status);
++}
++
+ /*
+  * Unmap the resources associated with a struct send_work.
+  *
+diff --git a/net/rds/rdma.c b/net/rds/rdma.c
+index 40084d843e9f..3738b1920c09 100644
+--- a/net/rds/rdma.c
++++ b/net/rds/rdma.c
+@@ -625,6 +625,16 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ 		}
+ 		op->op_notifier->n_user_token = args->user_token;
+ 		op->op_notifier->n_status = RDS_RDMA_SUCCESS;
++
++		/* Enable rmda notification on data operation for composite
++		 * rds messages and make sure notification is enabled only
++		 * for the data operation which follows it so that application
++		 * gets notified only after full message gets delivered.
++		 */
++		if (rm->data.op_sg) {
++			rm->rdma.op_notify = 0;
++			rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
++		}
+ 	}
+ 
+ 	/* The cookie contains the R_Key of the remote memory region, and
+diff --git a/net/rds/rds.h b/net/rds/rds.h
+index 0d41155a2258..af68d5941d5f 100644
+--- a/net/rds/rds.h
++++ b/net/rds/rds.h
+@@ -361,6 +361,7 @@ struct rds_message {
+ 		} rdma;
+ 		struct rm_data_op {
+ 			unsigned int		op_active:1;
++			unsigned int		op_notify:1;
+ 			unsigned int		op_nents;
+ 			unsigned int		op_count;
+ 			struct scatterlist	*op_sg;
+diff --git a/net/rds/send.c b/net/rds/send.c
+index 7b30c0f3180d..eba0eaf4cc38 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -454,12 +454,14 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
+ 	struct rm_rdma_op *ro;
+ 	struct rds_notifier *notifier;
+ 	unsigned long flags;
++	unsigned int notify = 0;
+ 
+ 	spin_lock_irqsave(&rm->m_rs_lock, flags);
+ 
++	notify =  rm->rdma.op_notify | rm->data.op_notify;
+ 	ro = &rm->rdma;
+ 	if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
+-	    ro->op_active && ro->op_notify && ro->op_notifier) {
++	    ro->op_active && notify && ro->op_notifier) {
+ 		notifier = ro->op_notifier;
+ 		rs = rm->m_rs;
+ 		sock_hold(rds_rs_to_sk(rs));
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index e9333147d6f1..7c7ec95459f6 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -525,7 +525,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
+ 		return false;
+ 	if (msg_errcode(msg))
+ 		return false;
+-	*err = -TIPC_ERR_NO_NAME;
++	*err = TIPC_ERR_NO_NAME;
+ 	if (skb_linearize(skb))
+ 		return false;
+ 	if (msg_reroute_cnt(msg))
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index e81e20cbe6dd..acb4ccf448ba 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -9605,6 +9605,9 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
+ 	if (err)
+ 		return err;
+ 
++	if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
++	    !tb[NL80211_REKEY_DATA_KCK])
++		return -EINVAL;
+ 	if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
+ 		return -ERANGE;
+ 	if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
+diff --git a/security/keys/internal.h b/security/keys/internal.h
+index 5105c2c2da75..51ffb9cde073 100644
+--- a/security/keys/internal.h
++++ b/security/keys/internal.h
+@@ -136,7 +136,7 @@ extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+ extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
+ extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
+ 
+-extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
++extern struct key *find_keyring_by_name(const char *name, bool uid_keyring);
+ 
+ extern int install_user_keyrings(void);
+ extern int install_thread_keyring_to_cred(struct cred *);
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 72ed9be9da28..c50a926cea54 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -296,6 +296,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
+ 		key->flags |= 1 << KEY_FLAG_IN_QUOTA;
+ 	if (flags & KEY_ALLOC_TRUSTED)
+ 		key->flags |= 1 << KEY_FLAG_TRUSTED;
++	if (flags & KEY_ALLOC_UID_KEYRING)
++		key->flags |= 1 << KEY_FLAG_UID_KEYRING;
+ 
+ #ifdef KEY_DEBUGGING
+ 	key->magic = KEY_DEBUG_MAGIC;
+@@ -905,6 +907,16 @@ error:
+ 	 */
+ 	__key_link_end(keyring, &index_key, edit);
+ 
++	key = key_ref_to_ptr(key_ref);
++	if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
++		ret = wait_for_key_construction(key, true);
++		if (ret < 0) {
++			key_ref_put(key_ref);
++			key_ref = ERR_PTR(ret);
++			goto error_free_prep;
++		}
++	}
++
+ 	key_ref = __key_update(key_ref, &prep);
+ 	goto error_free_prep;
+ }
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 0be918844c1e..a2d29cca16c6 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -744,6 +744,11 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
+ 
+ 	key = key_ref_to_ptr(key_ref);
+ 
++	if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
++		ret = -ENOKEY;
++		goto error2;
++	}
++
+ 	/* see if we can read it directly */
+ 	ret = key_permission(key_ref, KEY_NEED_READ);
+ 	if (ret == 0)
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index d33437007ad2..6d913f40b6f0 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -416,7 +416,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
+ }
+ 
+ struct keyring_read_iterator_context {
+-	size_t			qty;
++	size_t			buflen;
+ 	size_t			count;
+ 	key_serial_t __user	*buffer;
+ };
+@@ -428,9 +428,9 @@ static int keyring_read_iterator(const void *object, void *data)
+ 	int ret;
+ 
+ 	kenter("{%s,%d},,{%zu/%zu}",
+-	       key->type->name, key->serial, ctx->count, ctx->qty);
++	       key->type->name, key->serial, ctx->count, ctx->buflen);
+ 
+-	if (ctx->count >= ctx->qty)
++	if (ctx->count >= ctx->buflen)
+ 		return 1;
+ 
+ 	ret = put_user(key->serial, ctx->buffer);
+@@ -465,16 +465,12 @@ static long keyring_read(const struct key *keyring,
+ 		return 0;
+ 
+ 	/* Calculate how much data we could return */
+-	ctx.qty = nr_keys * sizeof(key_serial_t);
+-
+ 	if (!buffer || !buflen)
+-		return ctx.qty;
+-
+-	if (buflen > ctx.qty)
+-		ctx.qty = buflen;
++		return nr_keys * sizeof(key_serial_t);
+ 
+ 	/* Copy the IDs of the subscribed keys into the buffer */
+ 	ctx.buffer = (key_serial_t __user *)buffer;
++	ctx.buflen = buflen;
+ 	ctx.count = 0;
+ 	ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+ 	if (ret < 0) {
+@@ -965,15 +961,15 @@ found:
+ /*
+  * Find a keyring with the specified name.
+  *
+- * All named keyrings in the current user namespace are searched, provided they
+- * grant Search permission directly to the caller (unless this check is
+- * skipped).  Keyrings whose usage points have reached zero or who have been
+- * revoked are skipped.
++ * Only keyrings that have nonzero refcount, are not revoked, and are owned by a
++ * user in the current user namespace are considered.  If @uid_keyring is %true,
++ * the keyring additionally must have been allocated as a user or user session
++ * keyring; otherwise, it must grant Search permission directly to the caller.
+  *
+  * Returns a pointer to the keyring with the keyring's refcount having being
+  * incremented on success.  -ENOKEY is returned if a key could not be found.
+  */
+-struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
++struct key *find_keyring_by_name(const char *name, bool uid_keyring)
+ {
+ 	struct key *keyring;
+ 	int bucket;
+@@ -1001,10 +997,15 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
+ 			if (strcmp(keyring->description, name) != 0)
+ 				continue;
+ 
+-			if (!skip_perm_check &&
+-			    key_permission(make_key_ref(keyring, 0),
+-					   KEY_NEED_SEARCH) < 0)
+-				continue;
++			if (uid_keyring) {
++				if (!test_bit(KEY_FLAG_UID_KEYRING,
++					      &keyring->flags))
++					continue;
++			} else {
++				if (key_permission(make_key_ref(keyring, 0),
++						   KEY_NEED_SEARCH) < 0)
++					continue;
++			}
+ 
+ 			/* we've got a match but we might end up racing with
+ 			 * key_cleanup() if the keyring is currently 'dead'
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index 162077db5f81..85b61a3ac981 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -76,7 +76,9 @@ int install_user_keyrings(void)
+ 		if (IS_ERR(uid_keyring)) {
+ 			uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
+ 						    cred, user_keyring_perm,
+-						    KEY_ALLOC_IN_QUOTA, NULL);
++						    KEY_ALLOC_UID_KEYRING |
++							KEY_ALLOC_IN_QUOTA,
++						    NULL);
+ 			if (IS_ERR(uid_keyring)) {
+ 				ret = PTR_ERR(uid_keyring);
+ 				goto error;
+@@ -92,7 +94,9 @@ int install_user_keyrings(void)
+ 			session_keyring =
+ 				keyring_alloc(buf, user->uid, INVALID_GID,
+ 					      cred, user_keyring_perm,
+-					      KEY_ALLOC_IN_QUOTA, NULL);
++					      KEY_ALLOC_UID_KEYRING |
++						  KEY_ALLOC_IN_QUOTA,
++					      NULL);
+ 			if (IS_ERR(session_keyring)) {
+ 				ret = PTR_ERR(session_keyring);
+ 				goto error_release;
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index e45f0a3df127..10c0d33afdba 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -1271,7 +1271,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
+  * @inode: the object
+  * @name: attribute name
+  * @buffer: where to put the result
+- * @alloc: unused
++ * @alloc: duplicate memory
+  *
+  * Returns the size of the attribute or an error code
+  */
+@@ -1284,43 +1284,38 @@ static int smack_inode_getsecurity(const struct inode *inode,
+ 	struct super_block *sbp;
+ 	struct inode *ip = (struct inode *)inode;
+ 	struct smack_known *isp;
+-	int ilen;
+-	int rc = 0;
+ 
+-	if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
++	if (strcmp(name, XATTR_SMACK_SUFFIX) == 0)
+ 		isp = smk_of_inode(inode);
+-		ilen = strlen(isp->smk_known);
+-		*buffer = isp->smk_known;
+-		return ilen;
+-	}
++	else {
++		/*
++		 * The rest of the Smack xattrs are only on sockets.
++		 */
++		sbp = ip->i_sb;
++		if (sbp->s_magic != SOCKFS_MAGIC)
++			return -EOPNOTSUPP;
+ 
+-	/*
+-	 * The rest of the Smack xattrs are only on sockets.
+-	 */
+-	sbp = ip->i_sb;
+-	if (sbp->s_magic != SOCKFS_MAGIC)
+-		return -EOPNOTSUPP;
++		sock = SOCKET_I(ip);
++		if (sock == NULL || sock->sk == NULL)
++			return -EOPNOTSUPP;
+ 
+-	sock = SOCKET_I(ip);
+-	if (sock == NULL || sock->sk == NULL)
+-		return -EOPNOTSUPP;
+-
+-	ssp = sock->sk->sk_security;
++		ssp = sock->sk->sk_security;
+ 
+-	if (strcmp(name, XATTR_SMACK_IPIN) == 0)
+-		isp = ssp->smk_in;
+-	else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
+-		isp = ssp->smk_out;
+-	else
+-		return -EOPNOTSUPP;
++		if (strcmp(name, XATTR_SMACK_IPIN) == 0)
++			isp = ssp->smk_in;
++		else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
++			isp = ssp->smk_out;
++		else
++			return -EOPNOTSUPP;
++	}
+ 
+-	ilen = strlen(isp->smk_known);
+-	if (rc == 0) {
+-		*buffer = isp->smk_known;
+-		rc = ilen;
++	if (alloc) {
++		*buffer = kstrdup(isp->smk_known, GFP_KERNEL);
++		if (*buffer == NULL)
++			return -ENOMEM;
+ 	}
+ 
+-	return rc;
++	return strlen(isp->smk_known);
+ }
+ 
+ 
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index b554d7f9e3be..6163bf3e8177 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -872,14 +872,13 @@ static const struct file_operations snd_compr_file_ops = {
+ static int snd_compress_dev_register(struct snd_device *device)
+ {
+ 	int ret = -EINVAL;
+-	char str[16];
+ 	struct snd_compr *compr;
+ 
+ 	if (snd_BUG_ON(!device || !device->device_data))
+ 		return -EBADFD;
+ 	compr = device->device_data;
+ 
+-	pr_debug("reg %s for device %s, direction %d\n", str, compr->name,
++	pr_debug("reg device %s, direction %d\n", compr->name,
+ 			compr->direction);
+ 	/* register compressed device */
+ 	ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS,
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index b6f5f47048ba..5d2d3d63abcf 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1260,6 +1260,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
+ 	struct snd_seq_client_port *port;
+ 	struct snd_seq_port_info info;
+ 	struct snd_seq_port_callback *callback;
++	int port_idx;
+ 
+ 	if (copy_from_user(&info, arg, sizeof(info)))
+ 		return -EFAULT;
+@@ -1273,7 +1274,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
+ 		return -ENOMEM;
+ 
+ 	if (client->type == USER_CLIENT && info.kernel) {
+-		snd_seq_delete_port(client, port->addr.port);
++		port_idx = port->addr.port;
++		snd_seq_port_unlock(port);
++		snd_seq_delete_port(client, port_idx);
+ 		return -EINVAL;
+ 	}
+ 	if (client->type == KERNEL_CLIENT) {
+@@ -1294,6 +1297,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client,
+ 
+ 	snd_seq_set_port_info(port, &info);
+ 	snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
++	snd_seq_port_unlock(port);
+ 
+ 	if (copy_to_user(arg, &info, sizeof(info)))
+ 		return -EFAULT;
+diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
+index 12ba83367b1b..ba5752ee9af3 100644
+--- a/sound/core/seq/seq_lock.c
++++ b/sound/core/seq/seq_lock.c
+@@ -23,8 +23,6 @@
+ #include <sound/core.h>
+ #include "seq_lock.h"
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
+-
+ /* wait until all locks are released */
+ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
+ {
+@@ -42,5 +40,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
+ }
+ 
+ EXPORT_SYMBOL(snd_use_lock_sync_helper);
+-
+-#endif
+diff --git a/sound/core/seq/seq_lock.h b/sound/core/seq/seq_lock.h
+index 54044bc2c9ef..ac38031c370e 100644
+--- a/sound/core/seq/seq_lock.h
++++ b/sound/core/seq/seq_lock.h
+@@ -3,8 +3,6 @@
+ 
+ #include <linux/sched.h>
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
+-
+ typedef atomic_t snd_use_lock_t;
+ 
+ /* initialize lock */
+@@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
+ void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
+ #define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
+ 
+-#else /* SMP || CONFIG_SND_DEBUG */
+-
+-typedef spinlock_t snd_use_lock_t;	/* dummy */
+-#define snd_use_lock_init(lockp) /**/
+-#define snd_use_lock_use(lockp) /**/
+-#define snd_use_lock_free(lockp) /**/
+-#define snd_use_lock_sync(lockp) /**/
+-
+-#endif /* SMP || CONFIG_SND_DEBUG */
+-
+ #endif /* __SND_SEQ_LOCK_H */
+diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
+index fe686ee41c6d..f04714d70bf7 100644
+--- a/sound/core/seq/seq_ports.c
++++ b/sound/core/seq/seq_ports.c
+@@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp)
+ }
+ 
+ 
+-/* create a port, port number is returned (-1 on failure) */
++/* create a port, port number is returned (-1 on failure);
++ * the caller needs to unref the port via snd_seq_port_unlock() appropriately
++ */
+ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ 						int port)
+ {
+@@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ 	snd_use_lock_init(&new_port->use_lock);
+ 	port_subs_info_init(&new_port->c_src);
+ 	port_subs_info_init(&new_port->c_dest);
++	snd_use_lock_use(&new_port->use_lock);
+ 
+ 	num = port >= 0 ? port : 0;
+ 	mutex_lock(&client->ports_mutex);
+@@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ 	list_add_tail(&new_port->list, &p->list);
+ 	client->num_ports++;
+ 	new_port->addr.port = num;	/* store the port number in the port */
++	sprintf(new_port->name, "port-%d", num);
+ 	write_unlock_irqrestore(&client->ports_lock, flags);
+ 	mutex_unlock(&client->ports_mutex);
+-	sprintf(new_port->name, "port-%d", num);
+ 
+ 	return new_port;
+ }
+diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
+index 81134e067184..3b126af4a026 100644
+--- a/sound/core/seq/seq_virmidi.c
++++ b/sound/core/seq/seq_virmidi.c
+@@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi,
+  * decode input event and put to read buffer of each opened file
+  */
+ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
+-					 struct snd_seq_event *ev)
++					 struct snd_seq_event *ev,
++					 bool atomic)
+ {
+ 	struct snd_virmidi *vmidi;
+ 	unsigned char msg[4];
+ 	int len;
+ 
+-	read_lock(&rdev->filelist_lock);
++	if (atomic)
++		read_lock(&rdev->filelist_lock);
++	else
++		down_read(&rdev->filelist_sem);
+ 	list_for_each_entry(vmidi, &rdev->filelist, list) {
+ 		if (!vmidi->trigger)
+ 			continue;
+@@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
+ 				snd_rawmidi_receive(vmidi->substream, msg, len);
+ 		}
+ 	}
+-	read_unlock(&rdev->filelist_lock);
++	if (atomic)
++		read_unlock(&rdev->filelist_lock);
++	else
++		up_read(&rdev->filelist_sem);
+ 
+ 	return 0;
+ }
+@@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev)
+ 	struct snd_virmidi_dev *rdev;
+ 
+ 	rdev = rmidi->private_data;
+-	return snd_virmidi_dev_receive_event(rdev, ev);
++	return snd_virmidi_dev_receive_event(rdev, ev, true);
+ }
+ #endif  /*  0  */
+ 
+@@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct,
+ 	rdev = private_data;
+ 	if (!(rdev->flags & SNDRV_VIRMIDI_USE))
+ 		return 0; /* ignored */
+-	return snd_virmidi_dev_receive_event(rdev, ev);
++	return snd_virmidi_dev_receive_event(rdev, ev, atomic);
+ }
+ 
+ /*
+@@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
+ 	struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
+ 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+ 	struct snd_virmidi *vmidi;
+-	unsigned long flags;
+ 
+ 	vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL);
+ 	if (vmidi == NULL)
+@@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream)
+ 	vmidi->client = rdev->client;
+ 	vmidi->port = rdev->port;	
+ 	runtime->private_data = vmidi;
+-	write_lock_irqsave(&rdev->filelist_lock, flags);
++	down_write(&rdev->filelist_sem);
++	write_lock_irq(&rdev->filelist_lock);
+ 	list_add_tail(&vmidi->list, &rdev->filelist);
+-	write_unlock_irqrestore(&rdev->filelist_lock, flags);
++	write_unlock_irq(&rdev->filelist_lock);
++	up_write(&rdev->filelist_sem);
+ 	vmidi->rdev = rdev;
+ 	return 0;
+ }
+@@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
+ 	struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
+ 	struct snd_virmidi *vmidi = substream->runtime->private_data;
+ 
++	down_write(&rdev->filelist_sem);
+ 	write_lock_irq(&rdev->filelist_lock);
+ 	list_del(&vmidi->list);
+ 	write_unlock_irq(&rdev->filelist_lock);
++	up_write(&rdev->filelist_sem);
+ 	snd_midi_event_free(vmidi->parser);
+ 	substream->runtime->private_data = NULL;
+ 	kfree(vmidi);
+@@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi
+ 	rdev->rmidi = rmidi;
+ 	rdev->device = device;
+ 	rdev->client = -1;
++	init_rwsem(&rdev->filelist_sem);
+ 	rwlock_init(&rdev->filelist_lock);
+ 	INIT_LIST_HEAD(&rdev->filelist);
+ 	rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH;
+diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
+index d3125c169684..065a69cf6118 100644
+--- a/sound/pci/au88x0/au88x0_core.c
++++ b/sound/pci/au88x0/au88x0_core.c
+@@ -2279,6 +2279,9 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
+ 	} else {
+ 		int src[2], mix[2];
+ 
++		if (nr_ch < 1)
++			return -EINVAL;
++
+ 		/* Get SRC and MIXER hardware resources. */
+ 		for (i = 0; i < nr_ch; i++) {
+ 			if ((mix[i] =
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 57197bef5f5b..8cd701dbd52c 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -1958,7 +1958,7 @@ static int get_kctl_0dB_offset(struct hda_codec *codec,
+ 			return -1;
+ 		if (*step_to_check && *step_to_check != step) {
+ 			codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
+--				   *step_to_check, step);
++				   *step_to_check, step);
+ 			return -1;
+ 		}
+ 		*step_to_check = step;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a0e45ae0a628..55601ce89e0f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5771,6 +5771,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		ALC225_STANDARD_PINS,
+ 		{0x12, 0xb7a60130},
+ 		{0x1b, 0x90170110}),
++	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x90a60140},
++		{0x14, 0x90170110},
++		{0x21, 0x02211020}),
++	SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x90a60140},
++		{0x14, 0x90170150},
++		{0x21, 0x02211020}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
+ 		ALC255_STANDARD_PINS,
+ 		{0x12, 0x40300000},
+diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
+index b871ba407e4e..4458190149d1 100644
+--- a/sound/usb/caiaq/device.c
++++ b/sound/usb/caiaq/device.c
+@@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
+ 
+ 	err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0);
+ 	if (err)
+-		return err;
++		goto err_kill_urb;
+ 
+-	if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ))
+-		return -ENODEV;
++	if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) {
++		err = -ENODEV;
++		goto err_kill_urb;
++	}
+ 
+ 	usb_string(usb_dev, usb_dev->descriptor.iManufacturer,
+ 		   cdev->vendor_name, CAIAQ_USB_STR_LEN);
+@@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev)
+ 
+ 	setup_card(cdev);
+ 	return 0;
++
++ err_kill_urb:
++	usb_kill_urb(&cdev->ep1_in_urb);
++	return err;
+ }
+ 
+ static int snd_probe(struct usb_interface *intf,
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 86cf7b585e01..4bfbbd63ae90 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -218,6 +218,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
+ 	struct usb_interface_descriptor *altsd;
+ 	void *control_header;
+ 	int i, protocol;
++	int rest_bytes;
+ 
+ 	/* find audiocontrol interface */
+ 	host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0];
+@@ -232,6 +233,15 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
+ 		return -EINVAL;
+ 	}
+ 
++	rest_bytes = (void *)(host_iface->extra + host_iface->extralen) -
++		control_header;
++
++	/* just to be sure -- this shouldn't hit at all */
++	if (rest_bytes <= 0) {
++		dev_err(&dev->dev, "invalid control header\n");
++		return -EINVAL;
++	}
++
+ 	switch (protocol) {
+ 	default:
+ 		dev_warn(&dev->dev,
+@@ -242,11 +252,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
+ 	case UAC_VERSION_1: {
+ 		struct uac1_ac_header_descriptor *h1 = control_header;
+ 
++		if (rest_bytes < sizeof(*h1)) {
++			dev_err(&dev->dev, "too short v1 buffer descriptor\n");
++			return -EINVAL;
++		}
++
+ 		if (!h1->bInCollection) {
+ 			dev_info(&dev->dev, "skipping empty audio interface (v1)\n");
+ 			return -EINVAL;
+ 		}
+ 
++		if (rest_bytes < h1->bLength) {
++			dev_err(&dev->dev, "invalid buffer length (v1)\n");
++			return -EINVAL;
++		}
++
+ 		if (h1->bLength < sizeof(*h1) + h1->bInCollection) {
+ 			dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n");
+ 			return -EINVAL;
+diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
+index 81b7da8e56d3..6ea0350fe097 100644
+--- a/sound/usb/line6/driver.c
++++ b/sound/usb/line6/driver.c
+@@ -586,9 +586,10 @@ int line6_probe(struct usb_interface *interface,
+ 	return 0;
+ 
+  error:
+-	if (line6->disconnect)
+-		line6->disconnect(line6);
+-	snd_card_free(card);
++	/* we can call disconnect callback here because no close-sync is
++	 * needed yet at this point
++	 */
++	line6_disconnect(interface);
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(line6_probe);
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 33c544acf3f6..cb0ef39adc36 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -2155,6 +2155,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
+ 
+ static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
+ {
++	/* kill pending URBs */
++	snd_usb_mixer_disconnect(mixer);
++
+ 	kfree(mixer->id_elems);
+ 	if (mixer->urb) {
+ 		kfree(mixer->urb->transfer_buffer);
+@@ -2498,8 +2501,13 @@ _error:
+ 
+ void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer)
+ {
+-	usb_kill_urb(mixer->urb);
+-	usb_kill_urb(mixer->rc_urb);
++	if (mixer->disconnected)
++		return;
++	if (mixer->urb)
++		usb_kill_urb(mixer->urb);
++	if (mixer->rc_urb)
++		usb_kill_urb(mixer->rc_urb);
++	mixer->disconnected = true;
+ }
+ 
+ #ifdef CONFIG_PM
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index 2b4b067646ab..545d99b09706 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -22,6 +22,8 @@ struct usb_mixer_interface {
+ 	struct urb *rc_urb;
+ 	struct usb_ctrlrequest *rc_setup_packet;
+ 	u8 rc_buffer[6];
++
++	bool disconnected;
+ };
+ 
+ #define MAX_CHANNELS	16	/* max logical channels */
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 693b2ac6720a..0cb245493d07 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1300,6 +1300,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 	case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
+ 	case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
+ 	case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
++	case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
+ 		if (fp->altsetting == 2)
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 		break;
+diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c
+index bf618e1500ac..e7b934f4d837 100644
+--- a/sound/usb/usx2y/usb_stream.c
++++ b/sound/usb/usx2y/usb_stream.c
+@@ -191,7 +191,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
+ 	}
+ 
+ 	pg = get_order(read_size);
+-	sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
++	sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
++					  __GFP_NOWARN, pg);
+ 	if (!sk->s) {
+ 		snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
+ 		goto out;
+@@ -211,7 +212,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk,
+ 	pg = get_order(write_size);
+ 
+ 	sk->write_page =
+-		(void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg);
++		(void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO|
++					 __GFP_NOWARN, pg);
+ 	if (!sk->write_page) {
+ 		snd_printk(KERN_WARNING "couldn't __get_free_pages()\n");
+ 		usb_stream_free(sk);
+diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
+index 88461f09cc86..8717c80f759c 100644
+--- a/tools/include/linux/compiler.h
++++ b/tools/include/linux/compiler.h
+@@ -37,4 +37,12 @@
+ 
+ #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
+ 
++#ifndef __fallthrough
++# if defined(__GNUC__) && __GNUC__ >= 7
++#  define __fallthrough __attribute__ ((fallthrough))
++# else
++#  define __fallthrough
++# endif
++#endif
++
+ #endif /* _TOOLS_LINUX_COMPILER_H */
+diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build
+index 928e110179cb..34faecf774ae 100644
+--- a/tools/perf/scripts/perl/Perf-Trace-Util/Build
++++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build
+@@ -1,3 +1,5 @@
+ libperf-y += Context.o
+ 
+-CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default
++CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes
++CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
++CFLAGS_Context.o += -Wno-switch-default -Wno-shadow

diff --git a/1046_linux-4.1.47.patch b/1046_linux-4.1.47.patch
new file mode 100644
index 0000000..30005e4
--- /dev/null
+++ b/1046_linux-4.1.47.patch
@@ -0,0 +1,5346 @@
+diff --git a/Makefile b/Makefile
+index 1b4148baf398..c730c9719f6d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 46
++SUBLEVEL = 47
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
+index 3743ca221d40..a04518090875 100644
+--- a/arch/arm/configs/omap2plus_defconfig
++++ b/arch/arm/configs/omap2plus_defconfig
+@@ -215,6 +215,7 @@ CONFIG_SERIO=m
+ CONFIG_SERIAL_8250=y
+ CONFIG_SERIAL_8250_CONSOLE=y
+ CONFIG_SERIAL_8250_NR_UARTS=32
++CONFIG_SERIAL_8250_RUNTIME_UARTS=6
+ CONFIG_SERIAL_8250_EXTENDED=y
+ CONFIG_SERIAL_8250_MANY_PORTS=y
+ CONFIG_SERIAL_8250_SHARE_IRQ=y
+diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
+index 3c4596d0ce6c..4f633276452a 100644
+--- a/arch/arm/include/asm/Kbuild
++++ b/arch/arm/include/asm/Kbuild
+@@ -36,4 +36,3 @@ generic-y += termbits.h
+ generic-y += termios.h
+ generic-y += timex.h
+ generic-y += trace_clock.h
+-generic-y += unaligned.h
+diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
+new file mode 100644
+index 000000000000..ab905ffcf193
+--- /dev/null
++++ b/arch/arm/include/asm/unaligned.h
+@@ -0,0 +1,27 @@
++#ifndef __ASM_ARM_UNALIGNED_H
++#define __ASM_ARM_UNALIGNED_H
++
++/*
++ * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
++ * but we don't want to use linux/unaligned/access_ok.h since that can lead
++ * to traps on unaligned stm/ldm or strd/ldrd.
++ */
++#include <asm/byteorder.h>
++
++#if defined(__LITTLE_ENDIAN)
++# include <linux/unaligned/le_struct.h>
++# include <linux/unaligned/be_byteshift.h>
++# include <linux/unaligned/generic.h>
++# define get_unaligned	__get_unaligned_le
++# define put_unaligned	__put_unaligned_le
++#elif defined(__BIG_ENDIAN)
++# include <linux/unaligned/be_struct.h>
++# include <linux/unaligned/le_byteshift.h>
++# include <linux/unaligned/generic.h>
++# define get_unaligned	__get_unaligned_be
++# define put_unaligned	__put_unaligned_be
++#else
++# error need to define endianess
++#endif
++
++#endif /* __ASM_ARM_UNALIGNED_H */
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index 3dce1a342030..6da8d9754450 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -132,30 +132,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ 	set_fs(fs);
+ }
+ 
+-static void dump_instr(const char *lvl, struct pt_regs *regs)
++static void __dump_instr(const char *lvl, struct pt_regs *regs)
+ {
+ 	unsigned long addr = instruction_pointer(regs);
+ 	const int thumb = thumb_mode(regs);
+ 	const int width = thumb ? 4 : 8;
+-	mm_segment_t fs;
+ 	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+ 	int i;
+ 
+ 	/*
+-	 * We need to switch to kernel mode so that we can use __get_user
+-	 * to safely read from kernel space.  Note that we now dump the
+-	 * code first, just in case the backtrace kills us.
++	 * Note that we now dump the code first, just in case the backtrace
++	 * kills us.
+ 	 */
+-	fs = get_fs();
+-	set_fs(KERNEL_DS);
+ 
+ 	for (i = -4; i < 1 + !!thumb; i++) {
+ 		unsigned int val, bad;
+ 
+ 		if (thumb)
+-			bad = __get_user(val, &((u16 *)addr)[i]);
++			bad = get_user(val, &((u16 *)addr)[i]);
+ 		else
+-			bad = __get_user(val, &((u32 *)addr)[i]);
++			bad = get_user(val, &((u32 *)addr)[i]);
+ 
+ 		if (!bad)
+ 			p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
+@@ -166,8 +162,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
+ 		}
+ 	}
+ 	printk("%sCode: %s\n", lvl, str);
++}
+ 
+-	set_fs(fs);
++static void dump_instr(const char *lvl, struct pt_regs *regs)
++{
++	mm_segment_t fs;
++
++	if (!user_mode(regs)) {
++		fs = get_fs();
++		set_fs(KERNEL_DS);
++		__dump_instr(lvl, regs);
++		set_fs(fs);
++	} else {
++		__dump_instr(lvl, regs);
++	}
+ }
+ 
+ #ifdef CONFIG_ARM_UNWIND
+diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
+index af11511dda50..d9a044ff0a5d 100644
+--- a/arch/arm/mach-omap2/pdata-quirks.c
++++ b/arch/arm/mach-omap2/pdata-quirks.c
+@@ -372,7 +372,6 @@ static void pdata_quirks_check(struct pdata_init *quirks)
+ 		if (of_machine_is_compatible(quirks->compatible)) {
+ 			if (quirks->fn)
+ 				quirks->fn();
+-			break;
+ 		}
+ 		quirks++;
+ 	}
+diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
+index d897292712eb..4452d45f724f 100644
+--- a/arch/arm/mach-pxa/balloon3.c
++++ b/arch/arm/mach-pxa/balloon3.c
+@@ -17,6 +17,7 @@
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+ #include <linux/interrupt.h>
++#include <linux/leds.h>
+ #include <linux/sched.h>
+ #include <linux/bitops.h>
+ #include <linux/fb.h>
+diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c
+index 3aa264640c9d..caa66c06a7a3 100644
+--- a/arch/arm/mach-pxa/colibri-pxa270-income.c
++++ b/arch/arm/mach-pxa/colibri-pxa270-income.c
+@@ -17,6 +17,7 @@
+ #include <linux/gpio.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/leds.h>
+ #include <linux/ioport.h>
+ #include <linux/kernel.h>
+ #include <linux/platform_device.h>
+diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
+index 89f790dda93e..d1f12909f740 100644
+--- a/arch/arm/mach-pxa/corgi.c
++++ b/arch/arm/mach-pxa/corgi.c
+@@ -18,6 +18,7 @@
+ #include <linux/major.h>
+ #include <linux/fs.h>
+ #include <linux/interrupt.h>
++#include <linux/leds.h>
+ #include <linux/mmc/host.h>
+ #include <linux/mtd/physmap.h>
+ #include <linux/pm.h>
+diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
+index 066e3a250ee0..5e50c53f1f4b 100644
+--- a/arch/arm/mach-pxa/trizeps4.c
++++ b/arch/arm/mach-pxa/trizeps4.c
+@@ -16,6 +16,7 @@
+ #include <linux/kernel.h>
+ #include <linux/platform_device.h>
+ #include <linux/interrupt.h>
++#include <linux/leds.h>
+ #include <linux/export.h>
+ #include <linux/sched.h>
+ #include <linux/bitops.h>
+diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
+index 54122a983ae3..2cce92924068 100644
+--- a/arch/arm/mach-pxa/vpac270.c
++++ b/arch/arm/mach-pxa/vpac270.c
+@@ -15,6 +15,7 @@
+ #include <linux/irq.h>
+ #include <linux/gpio_keys.h>
+ #include <linux/input.h>
++#include <linux/leds.h>
+ #include <linux/gpio.h>
+ #include <linux/usb/gpio_vbus.h>
+ #include <linux/mtd/mtd.h>
+diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
+index 6158566fa0f7..dc96cb9b7a89 100644
+--- a/arch/arm/mach-pxa/zeus.c
++++ b/arch/arm/mach-pxa/zeus.c
+@@ -13,6 +13,7 @@
+ 
+ #include <linux/cpufreq.h>
+ #include <linux/interrupt.h>
++#include <linux/leds.h>
+ #include <linux/irq.h>
+ #include <linux/pm.h>
+ #include <linux/gpio.h>
+diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
+index 77daea478e88..68019c393b83 100644
+--- a/arch/arm/mach-pxa/zylonite.c
++++ b/arch/arm/mach-pxa/zylonite.c
+@@ -16,6 +16,7 @@
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/interrupt.h>
++#include <linux/leds.h>
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+ #include <linux/gpio.h>
+diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
+index 498325074a06..0576be82b533 100644
+--- a/arch/arm/xen/mm.c
++++ b/arch/arm/xen/mm.c
+@@ -180,6 +180,7 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
+ 	.unmap_page = xen_swiotlb_unmap_page,
+ 	.dma_supported = xen_swiotlb_dma_supported,
+ 	.set_dma_mask = xen_swiotlb_set_dma_mask,
++	.mmap = xen_swiotlb_dma_mmap,
+ };
+ 
+ int __init xen_mm_init(void)
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 1ef2940df13c..8bbd57efae78 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -116,7 +116,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
+ 	for (i = -4; i < 1; i++) {
+ 		unsigned int val, bad;
+ 
+-		bad = __get_user(val, &((u32 *)addr)[i]);
++		bad = get_user(val, &((u32 *)addr)[i]);
+ 
+ 		if (!bad)
+ 			p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
+diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
+index be9ff1673ded..5afbb7b41160 100644
+--- a/arch/mips/ar7/platform.c
++++ b/arch/mips/ar7/platform.c
+@@ -577,6 +577,7 @@ static int __init ar7_register_uarts(void)
+ 	uart_port.type		= PORT_AR7;
+ 	uart_port.uartclk	= clk_get_rate(bus_clk) / 2;
+ 	uart_port.iotype	= UPIO_MEM32;
++	uart_port.flags		= UPF_FIXED_TYPE;
+ 	uart_port.regshift	= 2;
+ 
+ 	uart_port.line		= 0;
+@@ -655,6 +656,10 @@ static int __init ar7_register_devices(void)
+ 	u32 val;
+ 	int res;
+ 
++	res = ar7_gpio_init();
++	if (res)
++		pr_warn("unable to register gpios: %d\n", res);
++
+ 	res = ar7_register_uarts();
+ 	if (res)
+ 		pr_err("unable to setup uart(s): %d\n", res);
+diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
+index a23adc49d50f..36aabee9cba4 100644
+--- a/arch/mips/ar7/prom.c
++++ b/arch/mips/ar7/prom.c
+@@ -246,8 +246,6 @@ void __init prom_init(void)
+ 	ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
+ 	ar7_init_env((struct env_var *)fw_arg2);
+ 	console_config();
+-
+-	ar7_gpio_init();
+ }
+ 
+ #define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
+diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
+index 7c26b28bf252..859cf7048347 100644
+--- a/arch/mips/include/asm/asm.h
++++ b/arch/mips/include/asm/asm.h
+@@ -54,7 +54,8 @@
+ 		.align	2;				\
+ 		.type	symbol, @function;		\
+ 		.ent	symbol, 0;			\
+-symbol:		.frame	sp, 0, ra
++symbol:		.frame	sp, 0, ra;			\
++		.insn
+ 
+ /*
+  * NESTED - declare nested routine entry point
+@@ -63,8 +64,9 @@ symbol:		.frame	sp, 0, ra
+ 		.globl	symbol;				\
+ 		.align	2;				\
+ 		.type	symbol, @function;		\
+-		.ent	symbol, 0;			 \
+-symbol:		.frame	sp, framesize, rpc
++		.ent	symbol, 0;			\
++symbol:		.frame	sp, framesize, rpc;		\
++		.insn
+ 
+ /*
+  * END - mark end of function
+@@ -86,7 +88,7 @@ symbol:
+ #define FEXPORT(symbol)					\
+ 		.globl	symbol;				\
+ 		.type	symbol, @function;		\
+-symbol:
++symbol:		.insn
+ 
+ /*
+  * ABS - export absolute symbol
+diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
+index 59c0901bdd84..cd74e7fae9e4 100644
+--- a/arch/mips/include/asm/mips-cm.h
++++ b/arch/mips/include/asm/mips-cm.h
+@@ -173,8 +173,8 @@ BUILD_CM_Cx_R_(tcid_8_priority,	0x80)
+ #define CM_GCR_BASE_GCRBASE_MSK			(_ULCAST_(0x1ffff) << 15)
+ #define CM_GCR_BASE_CMDEFTGT_SHF		0
+ #define CM_GCR_BASE_CMDEFTGT_MSK		(_ULCAST_(0x3) << 0)
+-#define  CM_GCR_BASE_CMDEFTGT_DISABLED		0
+-#define  CM_GCR_BASE_CMDEFTGT_MEM		1
++#define  CM_GCR_BASE_CMDEFTGT_MEM		0
++#define  CM_GCR_BASE_CMDEFTGT_RESERVED		1
+ #define  CM_GCR_BASE_CMDEFTGT_IOCU0		2
+ #define  CM_GCR_BASE_CMDEFTGT_IOCU1		3
+ 
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index fc537d1b649d..ded8b8ba34fd 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -48,9 +48,7 @@
+ #ifdef CONFIG_HOTPLUG_CPU
+ void arch_cpu_idle_dead(void)
+ {
+-	/* What the heck is this check doing ? */
+-	if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
+-		play_dead();
++	play_dead();
+ }
+ #endif
+ 
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index be73c491182b..51e77841f9f6 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -147,6 +147,35 @@ void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_add
+ 	add_memory_region(start, size, BOOT_MEM_RAM);
+ }
+ 
++bool __init memory_region_available(phys_addr_t start, phys_addr_t size)
++{
++	int i;
++	bool in_ram = false, free = true;
++
++	for (i = 0; i < boot_mem_map.nr_map; i++) {
++		phys_addr_t start_, end_;
++
++		start_ = boot_mem_map.map[i].addr;
++		end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;
++
++		switch (boot_mem_map.map[i].type) {
++		case BOOT_MEM_RAM:
++			if (start >= start_ && start + size <= end_)
++				in_ram = true;
++			break;
++		case BOOT_MEM_RESERVED:
++			if ((start >= start_ && start < end_) ||
++			    (start < start_ && start + size >= start_))
++				free = false;
++			break;
++		default:
++			continue;
++		}
++	}
++
++	return in_ram && free;
++}
++
+ static void __init print_memory_map(void)
+ {
+ 	int i;
+@@ -295,11 +324,19 @@ static void __init bootmem_init(void)
+ 
+ #else  /* !CONFIG_SGI_IP27 */
+ 
++static unsigned long __init bootmap_bytes(unsigned long pages)
++{
++	unsigned long bytes = DIV_ROUND_UP(pages, 8);
++
++	return ALIGN(bytes, sizeof(long));
++}
++
+ static void __init bootmem_init(void)
+ {
+ 	unsigned long reserved_end;
+ 	unsigned long mapstart = ~0UL;
+ 	unsigned long bootmap_size;
++	bool bootmap_valid = false;
+ 	int i;
+ 
+ 	/*
+@@ -375,11 +412,42 @@ static void __init bootmem_init(void)
+ #endif
+ 
+ 	/*
+-	 * Initialize the boot-time allocator with low memory only.
++	 * check that mapstart doesn't overlap with any of
++	 * memory regions that have been reserved through eg. DTB
+ 	 */
+-	bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
+-					 min_low_pfn, max_low_pfn);
++	bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);
++
++	bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
++						bootmap_size);
++	for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
++		unsigned long mapstart_addr;
++
++		switch (boot_mem_map.map[i].type) {
++		case BOOT_MEM_RESERVED:
++			mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
++						boot_mem_map.map[i].size);
++			if (PHYS_PFN(mapstart_addr) < mapstart)
++				break;
++
++			bootmap_valid = memory_region_available(mapstart_addr,
++								bootmap_size);
++			if (bootmap_valid)
++				mapstart = PHYS_PFN(mapstart_addr);
++			break;
++		default:
++			break;
++		}
++	}
+ 
++	if (!bootmap_valid)
++		panic("No memory area to place a bootmap bitmap");
++
++	/*
++	 * Initialize the boot-time allocator with low memory only.
++	 */
++	if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
++					 min_low_pfn, max_low_pfn))
++		panic("Unexpected memory size required for bootmap");
+ 
+ 	for (i = 0; i < boot_mem_map.nr_map; i++) {
+ 		unsigned long start, end;
+@@ -428,6 +496,10 @@ static void __init bootmem_init(void)
+ 			continue;
+ 		default:
+ 			/* Not usable memory */
++			if (start > min_low_pfn && end < max_low_pfn)
++				reserve_bootmem(boot_mem_map.map[i].addr,
++						boot_mem_map.map[i].size,
++						BOOTMEM_DEFAULT);
+ 			continue;
+ 		}
+ 
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index a0268f61cd57..51d3c301dae9 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -63,6 +63,9 @@ EXPORT_SYMBOL(cpu_sibling_map);
+ cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(cpu_core_map);
+ 
++static DECLARE_COMPLETION(cpu_starting);
++static DECLARE_COMPLETION(cpu_running);
++
+ /*
+  * A logcal cpu mask containing only one VPE per core to
+  * reduce the number of IPIs on large MT systems.
+@@ -172,9 +175,12 @@ asmlinkage void start_secondary(void)
+ 	cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ 	notify_cpu_starting(cpu);
+ 
+-	cpumask_set_cpu(cpu, &cpu_callin_map);
++	/* Notify boot CPU that we're starting & ready to sync counters */
++	complete(&cpu_starting);
++
+ 	synchronise_count_slave(cpu);
+ 
++	/* The CPU is running and counters synchronised, now mark it online */
+ 	set_cpu_online(cpu, true);
+ 
+ 	set_cpu_sibling_map(cpu);
+@@ -182,6 +188,12 @@ asmlinkage void start_secondary(void)
+ 
+ 	calculate_cpu_foreign_map();
+ 
++	/*
++	 * Notify boot CPU that we're up & online and it can safely return
++	 * from __cpu_up
++	 */
++	complete(&cpu_running);
++
+ 	/*
+ 	 * irq will be enabled in ->smp_finish(), enabling it too early
+ 	 * is dangerous.
+@@ -250,22 +262,23 @@ void smp_prepare_boot_cpu(void)
+ {
+ 	set_cpu_possible(0, true);
+ 	set_cpu_online(0, true);
+-	cpumask_set_cpu(0, &cpu_callin_map);
+ }
+ 
+ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+ {
+ 	mp_ops->boot_secondary(cpu, tidle);
+ 
+-	/*
+-	 * Trust is futile.  We should really have timeouts ...
+-	 */
+-	while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
+-		udelay(100);
+-		schedule();
++	/* Wait for CPU to start and be ready to sync counters */
++	if (!wait_for_completion_timeout(&cpu_starting,
++					 msecs_to_jiffies(1000))) {
++		pr_crit("CPU%u: failed to start\n", cpu);
++		return -EIO;
+ 	}
+ 
+ 	synchronise_count_master(cpu);
++
++	/* Wait for CPU to finish startup & mark itself online before return */
++	wait_for_completion(&cpu_running);
+ 	return 0;
+ }
+ 
+diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
+index d78178daea4b..e2fe48dd67b5 100644
+--- a/arch/mips/mm/uasm-micromips.c
++++ b/arch/mips/mm/uasm-micromips.c
+@@ -75,7 +75,7 @@ static struct insn insn_table_MM[] = {
+ 	{ insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS },
+ 	{ insn_lb, M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+ 	{ insn_ld, 0, 0 },
+-	{ insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RS | RS | SIMM },
++	{ insn_lh, M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
+ 	{ insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM },
+ 	{ insn_lld, 0, 0 },
+ 	{ insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM },
+diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
+index 5f5d18b0e94d..14e368d88dac 100644
+--- a/arch/mips/netlogic/common/irq.c
++++ b/arch/mips/netlogic/common/irq.c
+@@ -275,7 +275,7 @@ asmlinkage void plat_irq_dispatch(void)
+ 	do_IRQ(nlm_irq_to_xirq(node, i));
+ }
+ 
+-#ifdef CONFIG_OF
++#ifdef CONFIG_CPU_XLP
+ static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
+ 	.xlate = irq_domain_xlate_onetwocell,
+ };
+@@ -348,7 +348,7 @@ void __init arch_init_irq(void)
+ #if defined(CONFIG_CPU_XLR)
+ 	nlm_setup_fmn_irq();
+ #endif
+-#if defined(CONFIG_OF)
++#ifdef CONFIG_CPU_XLP
+ 	of_irq_init(xlp_pic_irq_ids);
+ #endif
+ }
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 190cc48abc0c..4b8c928a9873 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -1079,11 +1079,6 @@ source "arch/powerpc/Kconfig.debug"
+ 
+ source "security/Kconfig"
+ 
+-config KEYS_COMPAT
+-	bool
+-	depends on COMPAT && KEYS
+-	default y
+-
+ source "crypto/Kconfig"
+ 
+ config PPC_LIB_RHEAP
+diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
+index 00e45b6d4f24..1c0f762c7613 100644
+--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
+@@ -282,6 +282,7 @@ static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
+ 		 */
+ 		if (reject && reject != XICS_IPI) {
+ 			arch_spin_unlock(&ics->lock);
++			icp->n_reject++;
+ 			new_irq = reject;
+ 			goto again;
+ 		}
+@@ -613,10 +614,8 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+ 	state = &ics->irq_state[src];
+ 
+ 	/* Still asserted, resend it */
+-	if (state->asserted) {
+-		icp->n_reject++;
++	if (state->asserted)
+ 		icp_rm_deliver_irq(xics, icp, irq);
+-	}
+ 
+ 	if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
+ 		icp->rm_action |= XICS_RM_NOTIFY_EOI;
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index b06dc3839268..a37c902c123f 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -338,9 +338,6 @@ config COMPAT
+ config SYSVIPC_COMPAT
+ 	def_bool y if COMPAT && SYSVIPC
+ 
+-config KEYS_COMPAT
+-	def_bool y if COMPAT && KEYS
+-
+ config SMP
+ 	def_bool y
+ 	prompt "Symmetric multi-processing support"
+diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+index 538c10db3537..8dc315b212c2 100644
+--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
++++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+@@ -165,7 +165,6 @@ static struct plat_sci_port scif2_platform_data = {
+ 	.scscr		= SCSCR_TE | SCSCR_RE,
+ 	.type		= PORT_IRDA,
+ 	.ops		= &sh770x_sci_port_ops,
+-	.regshift	= 1,
+ };
+ 
+ static struct resource scif2_resources[] = {
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index a1c2ecc4bec7..6fd39d987b05 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -549,9 +549,6 @@ config SYSVIPC_COMPAT
+ 	depends on COMPAT && SYSVIPC
+ 	default y
+ 
+-config KEYS_COMPAT
+-	def_bool y if COMPAT && KEYS
+-
+ endmenu
+ 
+ source "net/Kconfig"
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 226d5696e1d1..a3d283addbde 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2552,10 +2552,6 @@ config COMPAT_FOR_U64_ALIGNMENT
+ config SYSVIPC_COMPAT
+ 	def_bool y
+ 	depends on SYSVIPC
+-
+-config KEYS_COMPAT
+-	def_bool y
+-	depends on KEYS
+ endif
+ 
+ endmenu
+diff --git a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
+index 85c4e1cf7172..e1693457c178 100644
+--- a/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
++++ b/arch/x86/crypto/sha-mb/sha1_mb_mgr_flush_avx2.S
+@@ -174,8 +174,8 @@ LABEL skip_ %I
+ .endr
+ 
+ 	# Find min length
+-	vmovdqa _lens+0*16(state), %xmm0
+-	vmovdqa _lens+1*16(state), %xmm1
++	vmovdqu _lens+0*16(state), %xmm0
++	vmovdqu _lens+1*16(state), %xmm1
+ 
+ 	vpminud %xmm1, %xmm0, %xmm2     # xmm2 has {D,C,B,A}
+ 	vpalignr $8, %xmm2, %xmm3, %xmm3   # xmm3 has {x,x,D,C}
+@@ -195,8 +195,8 @@ LABEL skip_ %I
+ 	vpsubd  %xmm2, %xmm0, %xmm0
+ 	vpsubd  %xmm2, %xmm1, %xmm1
+ 
+-	vmovdqa %xmm0, _lens+0*16(state)
+-	vmovdqa %xmm1, _lens+1*16(state)
++	vmovdqu %xmm0, _lens+0*16(state)
++	vmovdqu %xmm1, _lens+1*16(state)
+ 
+ 	# "state" and "args" are the same address, arg1
+ 	# len is arg2
+@@ -260,8 +260,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
+ 	jc      .return_null
+ 
+ 	# Find min length
+-	vmovdqa _lens(state), %xmm0
+-	vmovdqa _lens+1*16(state), %xmm1
++	vmovdqu _lens(state), %xmm0
++	vmovdqu _lens+1*16(state), %xmm1
+ 
+ 	vpminud %xmm1, %xmm0, %xmm2        # xmm2 has {D,C,B,A}
+ 	vpalignr $8, %xmm2, %xmm3, %xmm3   # xmm3 has {x,x,D,C}
+diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
+index 57a9d94fe160..cba3ef3c690d 100644
+--- a/arch/x86/include/asm/kvm_emulate.h
++++ b/arch/x86/include/asm/kvm_emulate.h
+@@ -277,6 +277,7 @@ struct x86_emulate_ctxt {
+ 	bool guest_mode; /* guest running a nested guest */
+ 	bool perm_ok; /* do not check permissions if true */
+ 	bool ud;	/* inject an #UD if host doesn't support insn */
++	bool tf;	/* TF value before instruction (after for syscall/sysret) */
+ 
+ 	bool have_exception;
+ 	struct x86_exception exception;
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 630bcb0d7a04..d877a59f8de8 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2400,6 +2400,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
+ 		ctxt->eflags &= ~(X86_EFLAGS_VM | X86_EFLAGS_IF);
+ 	}
+ 
++	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
+ 	return X86EMUL_CONTINUE;
+ }
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 9d7ea42482e3..5f9cf11f9446 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -5132,6 +5132,8 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
+ 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+ 
+ 	ctxt->eflags = kvm_get_rflags(vcpu);
++	ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
++
+ 	ctxt->eip = kvm_rip_read(vcpu);
+ 	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
+ 		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
+@@ -5322,37 +5324,26 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
+ 	return dr6;
+ }
+ 
+-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
++static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
+ {
+ 	struct kvm_run *kvm_run = vcpu->run;
+ 
+-	/*
+-	 * rflags is the old, "raw" value of the flags.  The new value has
+-	 * not been saved yet.
+-	 *
+-	 * This is correct even for TF set by the guest, because "the
+-	 * processor will not generate this exception after the instruction
+-	 * that sets the TF flag".
+-	 */
+-	if (unlikely(rflags & X86_EFLAGS_TF)) {
+-		if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+-			kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
+-						  DR6_RTM;
+-			kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+-			kvm_run->debug.arch.exception = DB_VECTOR;
+-			kvm_run->exit_reason = KVM_EXIT_DEBUG;
+-			*r = EMULATE_USER_EXIT;
+-		} else {
+-			vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
+-			/*
+-			 * "Certain debug exceptions may clear bit 0-3.  The
+-			 * remaining contents of the DR6 register are never
+-			 * cleared by the processor".
+-			 */
+-			vcpu->arch.dr6 &= ~15;
+-			vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
+-			kvm_queue_exception(vcpu, DB_VECTOR);
+-		}
++	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
++		kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
++		kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
++		kvm_run->debug.arch.exception = DB_VECTOR;
++		kvm_run->exit_reason = KVM_EXIT_DEBUG;
++		*r = EMULATE_USER_EXIT;
++	} else {
++		vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
++		/*
++		 * "Certain debug exceptions may clear bit 0-3.  The
++		 * remaining contents of the DR6 register are never
++		 * cleared by the processor".
++		 */
++		vcpu->arch.dr6 &= ~15;
++		vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
++		kvm_queue_exception(vcpu, DB_VECTOR);
+ 	}
+ }
+ 
+@@ -5507,8 +5498,9 @@ restart:
+ 		toggle_interruptibility(vcpu, ctxt->interruptibility);
+ 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
+ 		kvm_rip_write(vcpu, ctxt->eip);
+-		if (r == EMULATE_DONE)
+-			kvm_vcpu_check_singlestep(vcpu, rflags, &r);
++		if (r == EMULATE_DONE &&
++		    (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
++			kvm_vcpu_do_singlestep(vcpu, &r);
+ 		if (!ctxt->have_exception ||
+ 		    exception_type(ctxt->exception.vector) == EXCPT_TRAP)
+ 			__kvm_set_rflags(vcpu, ctxt->eflags);
+diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
+index d90528ea5412..12c051d19e4b 100644
+--- a/arch/x86/oprofile/op_model_ppro.c
++++ b/arch/x86/oprofile/op_model_ppro.c
+@@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void)
+ 	eax.full = cpuid_eax(0xa);
+ 
+ 	/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
+-	if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
+-		__this_cpu_read(cpu_info.x86_model) == 15) {
++	if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
++	    boot_cpu_data.x86_model == 15) {
+ 		eax.split.version_id = 2;
+ 		eax.split.num_counters = 2;
+ 		eax.split.bit_width = 40;
+diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
+index 9dca4b995be0..d9a94bee1905 100644
+--- a/drivers/ata/Kconfig
++++ b/drivers/ata/Kconfig
+@@ -245,6 +245,7 @@ config SATA_SX4
+ 
+ config ATA_BMDMA
+ 	bool "ATA BMDMA support"
++	depends on HAS_DMA
+ 	default y
+ 	help
+ 	  This option adds support for SFF ATA controllers with BMDMA
+@@ -291,6 +292,7 @@ config SATA_DWC_VDEBUG
+ 
+ config SATA_HIGHBANK
+ 	tristate "Calxeda Highbank SATA support"
++	depends on HAS_DMA
+ 	depends on ARCH_HIGHBANK || COMPILE_TEST
+ 	help
+ 	  This option enables support for the Calxeda Highbank SoC's
+@@ -300,6 +302,7 @@ config SATA_HIGHBANK
+ 
+ config SATA_MV
+ 	tristate "Marvell SATA support"
++	depends on HAS_DMA
+ 	depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
+ 		   ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
+ 	select GENERIC_PHY
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 89ecec13c567..4e83ece479d2 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -2712,7 +2712,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
+ 	 * from the parent.
+ 	 */
+ 	page_count = (u32)calc_pages_for(0, length);
+-	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
++	pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
+ 	if (IS_ERR(pages)) {
+ 		result = PTR_ERR(pages);
+ 		pages = NULL;
+@@ -2839,7 +2839,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
+ 	 */
+ 	size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
+ 	page_count = (u32)calc_pages_for(0, size);
+-	pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
++	pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
+ 	if (IS_ERR(pages))
+ 		return PTR_ERR(pages);
+ 
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 1d8c6cb89c7f..c5f0e2b7668c 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -1394,33 +1394,34 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
+ static void make_response(struct xen_blkif *blkif, u64 id,
+ 			  unsigned short op, int st)
+ {
+-	struct blkif_response  resp;
++	struct blkif_response *resp;
+ 	unsigned long     flags;
+ 	union blkif_back_rings *blk_rings = &blkif->blk_rings;
+ 	int notify;
+ 
+-	resp.id        = id;
+-	resp.operation = op;
+-	resp.status    = st;
+-
+ 	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+ 	/* Place on the response ring for the relevant domain. */
+ 	switch (blkif->blk_protocol) {
+ 	case BLKIF_PROTOCOL_NATIVE:
+-		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
+-		       &resp, sizeof(resp));
++		resp = RING_GET_RESPONSE(&blk_rings->native,
++					 blk_rings->native.rsp_prod_pvt);
+ 		break;
+ 	case BLKIF_PROTOCOL_X86_32:
+-		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
+-		       &resp, sizeof(resp));
++		resp = RING_GET_RESPONSE(&blk_rings->x86_32,
++					 blk_rings->x86_32.rsp_prod_pvt);
+ 		break;
+ 	case BLKIF_PROTOCOL_X86_64:
+-		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
+-		       &resp, sizeof(resp));
++		resp = RING_GET_RESPONSE(&blk_rings->x86_64,
++					 blk_rings->x86_64.rsp_prod_pvt);
+ 		break;
+ 	default:
+ 		BUG();
+ 	}
++
++	resp->id        = id;
++	resp->operation = op;
++	resp->status    = st;
++
+ 	blk_rings->common.rsp_prod_pvt++;
+ 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
+ 	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
+diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
+index f620b5d3f77c..5fa081c7ef8e 100644
+--- a/drivers/block/xen-blkback/common.h
++++ b/drivers/block/xen-blkback/common.h
+@@ -64,9 +64,8 @@
+ struct blkif_common_request {
+ 	char dummy;
+ };
+-struct blkif_common_response {
+-	char dummy;
+-};
++
++/* i386 protocol version */
+ 
+ struct blkif_x86_32_request_rw {
+ 	uint8_t        nr_segments;  /* number of segments                   */
+@@ -118,14 +117,6 @@ struct blkif_x86_32_request {
+ 	} u;
+ } __attribute__((__packed__));
+ 
+-/* i386 protocol version */
+-#pragma pack(push, 4)
+-struct blkif_x86_32_response {
+-	uint64_t        id;              /* copied from request */
+-	uint8_t         operation;       /* copied from request */
+-	int16_t         status;          /* BLKIF_RSP_???       */
+-};
+-#pragma pack(pop)
+ /* x86_64 protocol version */
+ 
+ struct blkif_x86_64_request_rw {
+@@ -182,18 +173,12 @@ struct blkif_x86_64_request {
+ 	} u;
+ } __attribute__((__packed__));
+ 
+-struct blkif_x86_64_response {
+-	uint64_t       __attribute__((__aligned__(8))) id;
+-	uint8_t         operation;       /* copied from request */
+-	int16_t         status;          /* BLKIF_RSP_???       */
+-};
+-
+ DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
+-		  struct blkif_common_response);
++		  struct blkif_response);
+ DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
+-		  struct blkif_x86_32_response);
++		  struct blkif_response __packed);
+ DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
+-		  struct blkif_x86_64_response);
++		  struct blkif_response);
+ 
+ union blkif_back_rings {
+ 	struct blkif_back_ring        native;
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 5da703c65d93..e10f28fc50ee 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -4019,7 +4019,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
+ }
+ 
+ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
+-			      struct list_head *timeouts, long timeout_period,
++			      struct list_head *timeouts,
++			      unsigned long timeout_period,
+ 			      int slot, unsigned long *flags,
+ 			      unsigned int *waiting_msgs)
+ {
+@@ -4032,8 +4033,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
+ 	if (!ent->inuse)
+ 		return;
+ 
+-	ent->timeout -= timeout_period;
+-	if (ent->timeout > 0) {
++	if (timeout_period < ent->timeout) {
++		ent->timeout -= timeout_period;
+ 		(*waiting_msgs)++;
+ 		return;
+ 	}
+@@ -4099,7 +4100,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
+ 	}
+ }
+ 
+-static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
++static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
++					 unsigned long timeout_period)
+ {
+ 	struct list_head     timeouts;
+ 	struct ipmi_recv_msg *msg, *msg2;
+diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
+index 220ee49633e4..897ec0f8d718 100644
+--- a/drivers/dma/dmatest.c
++++ b/drivers/dma/dmatest.c
+@@ -634,6 +634,7 @@ static int dmatest_func(void *data)
+ 			 * free it this time?" dancing.  For now, just
+ 			 * leave it dangling.
+ 			 */
++			WARN(1, "dmatest: Kernel stack may be corrupted!!\n");
+ 			dmaengine_unmap_put(um);
+ 			result("test timed out", total_tests, src_off, dst_off,
+ 			       len, 0);
+diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
+index 11c6757b6c40..ccb21f2b8cbf 100644
+--- a/drivers/extcon/extcon-palmas.c
++++ b/drivers/extcon/extcon-palmas.c
+@@ -150,6 +150,11 @@ static int palmas_usb_probe(struct platform_device *pdev)
+ 	struct palmas_usb *palmas_usb;
+ 	int status;
+ 
++	if (!palmas) {
++		dev_err(&pdev->dev, "failed to get valid parent\n");
++		return -EINVAL;
++	}
++
+ 	palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL);
+ 	if (!palmas_usb)
+ 		return -ENOMEM;
+diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
+index 48f7359e2a6b..2859161f3478 100644
+--- a/drivers/gpu/drm/drm_drv.c
++++ b/drivers/gpu/drm/drm_drv.c
+@@ -325,7 +325,7 @@ static int drm_minor_register(struct drm_device *dev, unsigned int type)
+ 	ret = drm_debugfs_init(minor, minor->index, drm_debugfs_root);
+ 	if (ret) {
+ 		DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
+-		return ret;
++		goto err_debugfs;
+ 	}
+ 
+ 	ret = device_add(minor->kdev);
+diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
+index f6b283b8375e..d8352e47774d 100644
+--- a/drivers/gpu/drm/mgag200/mgag200_main.c
++++ b/drivers/gpu/drm/mgag200/mgag200_main.c
+@@ -138,6 +138,8 @@ static int mga_vram_init(struct mga_device *mdev)
+ 	}
+ 
+ 	mem = pci_iomap(mdev->dev->pdev, 0, 0);
++	if (!mem)
++		return -ENOMEM;
+ 
+ 	mdev->mc.vram_size = mga_probe_vram(mdev, mem);
+ 
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index 4a45ae01cc3e..258ec8644dee 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -34,10 +34,13 @@ static inline void __user *to_user_ptr(u64 address)
+ }
+ 
+ static struct msm_gem_submit *submit_create(struct drm_device *dev,
+-		struct msm_gpu *gpu, int nr)
++		struct msm_gpu *gpu, uint32_t nr)
+ {
+ 	struct msm_gem_submit *submit;
+-	int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
++	uint64_t sz = sizeof(*submit) + ((u64)nr * sizeof(submit->bos[0]));
++
++	if (sz > SIZE_MAX)
++		return NULL;
+ 
+ 	submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ 	if (submit) {
+diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
+index 9564f2568e2c..73d49d0eb05d 100644
+--- a/drivers/gpu/drm/sti/sti_vtg.c
++++ b/drivers/gpu/drm/sti/sti_vtg.c
+@@ -328,6 +328,10 @@ static int vtg_probe(struct platform_device *pdev)
+ 		return -ENOMEM;
+ 	}
+ 	vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
++	if (!vtg->regs) {
++		DRM_ERROR("failed to remap I/O memory\n");
++		return -ENOMEM;
++	}
+ 
+ 	np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
+ 	if (np) {
+diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
+index d7e3af671543..d8803c3bbfdc 100644
+--- a/drivers/i2c/busses/i2c-riic.c
++++ b/drivers/i2c/busses/i2c-riic.c
+@@ -80,6 +80,7 @@
+ #define ICIER_TEIE	0x40
+ #define ICIER_RIE	0x20
+ #define ICIER_NAKIE	0x10
++#define ICIER_SPIE	0x08
+ 
+ #define ICSR2_NACKF	0x10
+ 
+@@ -216,11 +217,10 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
+ 		return IRQ_NONE;
+ 	}
+ 
+-	if (riic->is_last || riic->err)
++	if (riic->is_last || riic->err) {
++		riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
+ 		writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+-
+-	writeb(0, riic->base + RIIC_ICIER);
+-	complete(&riic->msg_done);
++	}
+ 
+ 	return IRQ_HANDLED;
+ }
+@@ -240,13 +240,13 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
+ 
+ 	if (riic->bytes_left == 1) {
+ 		/* STOP must come before we set ACKBT! */
+-		if (riic->is_last)
++		if (riic->is_last) {
++			riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
+ 			writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
++		}
+ 
+ 		riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
+ 
+-		writeb(0, riic->base + RIIC_ICIER);
+-		complete(&riic->msg_done);
+ 	} else {
+ 		riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3);
+ 	}
+@@ -259,6 +259,21 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
+ 	return IRQ_HANDLED;
+ }
+ 
++static irqreturn_t riic_stop_isr(int irq, void *data)
++{
++	struct riic_dev *riic = data;
++
++	/* read back registers to confirm writes have fully propagated */
++	writeb(0, riic->base + RIIC_ICSR2);
++	readb(riic->base + RIIC_ICSR2);
++	writeb(0, riic->base + RIIC_ICIER);
++	readb(riic->base + RIIC_ICIER);
++
++	complete(&riic->msg_done);
++
++	return IRQ_HANDLED;
++}
++
+ static u32 riic_func(struct i2c_adapter *adap)
+ {
+ 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+@@ -326,6 +341,7 @@ static struct riic_irq_desc riic_irqs[] = {
+ 	{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
+ 	{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
+ 	{ .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
++	{ .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" },
+ 	{ .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
+ };
+ 
+diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c
+index 572bc6f02ca8..e18f12b74610 100644
+--- a/drivers/iio/trigger/iio-trig-interrupt.c
++++ b/drivers/iio/trigger/iio-trig-interrupt.c
+@@ -58,7 +58,7 @@ static int iio_interrupt_trigger_probe(struct platform_device *pdev)
+ 	trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
+ 	if (!trig_info) {
+ 		ret = -ENOMEM;
+-		goto error_put_trigger;
++		goto error_free_trigger;
+ 	}
+ 	iio_trigger_set_drvdata(trig, trig_info);
+ 	trig_info->irq = irq;
+@@ -83,8 +83,8 @@ error_release_irq:
+ 	free_irq(irq, trig);
+ error_free_trig_info:
+ 	kfree(trig_info);
+-error_put_trigger:
+-	iio_trigger_put(trig);
++error_free_trigger:
++	iio_trigger_free(trig);
+ error_ret:
+ 	return ret;
+ }
+@@ -99,7 +99,7 @@ static int iio_interrupt_trigger_remove(struct platform_device *pdev)
+ 	iio_trigger_unregister(trig);
+ 	free_irq(trig_info->irq, trig);
+ 	kfree(trig_info);
+-	iio_trigger_put(trig);
++	iio_trigger_free(trig);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
+index 3dfab2bc6d69..202e8b89caf2 100644
+--- a/drivers/iio/trigger/iio-trig-sysfs.c
++++ b/drivers/iio/trigger/iio-trig-sysfs.c
+@@ -174,7 +174,7 @@ static int iio_sysfs_trigger_probe(int id)
+ 	return 0;
+ 
+ out2:
+-	iio_trigger_put(t->trig);
++	iio_trigger_free(t->trig);
+ free_t:
+ 	kfree(t);
+ out1:
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+index 2e61df01c0ac..082c1a2b93af 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+@@ -1362,7 +1362,7 @@ static void ipoib_cm_tx_reap(struct work_struct *work)
+ 
+ 	while (!list_empty(&priv->cm.reap_list)) {
+ 		p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
+-		list_del(&p->list);
++		list_del_init(&p->list);
+ 		spin_unlock_irqrestore(&priv->lock, flags);
+ 		netif_tx_unlock_bh(dev);
+ 		ipoib_cm_tx_destroy(p);
+diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
+index 3aa2ec45bcab..b5e7317ee1c1 100644
+--- a/drivers/input/keyboard/mpr121_touchkey.c
++++ b/drivers/input/keyboard/mpr121_touchkey.c
+@@ -87,7 +87,8 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
+ 	struct mpr121_touchkey *mpr121 = dev_id;
+ 	struct i2c_client *client = mpr121->client;
+ 	struct input_dev *input = mpr121->input_dev;
+-	unsigned int key_num, key_val, pressed;
++	unsigned long bit_changed;
++	unsigned int key_num;
+ 	int reg;
+ 
+ 	reg = i2c_smbus_read_byte_data(client, ELE_TOUCH_STATUS_1_ADDR);
+@@ -105,18 +106,22 @@ static irqreturn_t mpr_touchkey_interrupt(int irq, void *dev_id)
+ 
+ 	reg &= TOUCH_STATUS_MASK;
+ 	/* use old press bit to figure out which bit changed */
+-	key_num = ffs(reg ^ mpr121->statusbits) - 1;
+-	pressed = reg & (1 << key_num);
++	bit_changed = reg ^ mpr121->statusbits;
+ 	mpr121->statusbits = reg;
++	for_each_set_bit(key_num, &bit_changed, mpr121->keycount) {
++		unsigned int key_val, pressed;
+ 
+-	key_val = mpr121->keycodes[key_num];
++		pressed = reg & BIT(key_num);
++		key_val = mpr121->keycodes[key_num];
+ 
+-	input_event(input, EV_MSC, MSC_SCAN, key_num);
+-	input_report_key(input, key_val, pressed);
+-	input_sync(input);
++		input_event(input, EV_MSC, MSC_SCAN, key_num);
++		input_report_key(input, key_val, pressed);
++
++		dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
++			pressed ? "pressed" : "released");
+ 
+-	dev_dbg(&client->dev, "key %d %d %s\n", key_num, key_val,
+-		pressed ? "pressed" : "released");
++	}
++	input_sync(input);
+ 
+ out:
+ 	return IRQ_HANDLED;
+@@ -231,6 +236,7 @@ static int mpr_touchkey_probe(struct i2c_client *client,
+ 	input_dev->id.bustype = BUS_I2C;
+ 	input_dev->dev.parent = &client->dev;
+ 	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
++	input_set_capability(input_dev, EV_MSC, MSC_SCAN);
+ 
+ 	input_dev->keycode = mpr121->keycodes;
+ 	input_dev->keycodesize = sizeof(mpr121->keycodes[0]);
+diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
+index f4e8fbec6a94..b5304e264881 100644
+--- a/drivers/input/misc/ims-pcu.c
++++ b/drivers/input/misc/ims-pcu.c
+@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
+ 		return NULL;
+ 	}
+ 
+-	while (buflen > 0) {
++	while (buflen >= sizeof(*union_desc)) {
+ 		union_desc = (struct usb_cdc_union_desc *)buf;
+ 
++		if (union_desc->bLength > buflen) {
++			dev_err(&intf->dev, "Too large descriptor\n");
++			return NULL;
++		}
++
+ 		if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
+ 		    union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
+ 			dev_dbg(&intf->dev, "Found union header\n");
+-			return union_desc;
++
++			if (union_desc->bLength >= sizeof(*union_desc))
++				return union_desc;
++
++			dev_err(&intf->dev,
++				"Union descriptor to short (%d vs %zd\n)",
++				union_desc->bLength, sizeof(*union_desc));
++			return NULL;
+ 		}
+ 
+ 		buflen -= union_desc->bLength;
+diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
+index 5f92ec23bb07..2621a045dfa8 100644
+--- a/drivers/media/i2c/adv7604.c
++++ b/drivers/media/i2c/adv7604.c
+@@ -2669,6 +2669,9 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
+ 	state->pdata.alt_data_sat = 1;
+ 	state->pdata.op_format_mode_sel = ADV7604_OP_FORMAT_MODE0;
+ 	state->pdata.bus_order = ADV7604_BUS_ORDER_RGB;
++	state->pdata.dr_str_data = ADV76XX_DR_STR_MEDIUM_HIGH;
++	state->pdata.dr_str_clk = ADV76XX_DR_STR_MEDIUM_HIGH;
++	state->pdata.dr_str_sync = ADV76XX_DR_STR_MEDIUM_HIGH;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c
+index d407244fd1bc..bd0f5b195188 100644
+--- a/drivers/media/pci/bt8xx/dvb-bt8xx.c
++++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c
+@@ -680,6 +680,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
+ 		/*	DST is not a frontend, attaching the ASIC	*/
+ 		if (dvb_attach(dst_attach, state, &card->dvb_adapter) == NULL) {
+ 			pr_err("%s: Could not find a Twinhan DST\n", __func__);
++			kfree(state);
+ 			break;
+ 		}
+ 		/*	Attach other DST peripherals if any		*/
+diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
+index 49658ca39e51..a851f20dca23 100644
+--- a/drivers/media/platform/exynos4-is/fimc-is.c
++++ b/drivers/media/platform/exynos4-is/fimc-is.c
+@@ -815,12 +815,13 @@ static int fimc_is_probe(struct platform_device *pdev)
+ 	is->irq = irq_of_parse_and_map(dev->of_node, 0);
+ 	if (!is->irq) {
+ 		dev_err(dev, "no irq found\n");
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err_iounmap;
+ 	}
+ 
+ 	ret = fimc_is_get_clocks(is);
+ 	if (ret < 0)
+-		return ret;
++		goto err_iounmap;
+ 
+ 	platform_set_drvdata(pdev, is);
+ 
+@@ -880,6 +881,8 @@ err_irq:
+ 	free_irq(is->irq, is);
+ err_clk:
+ 	fimc_is_put_clocks(is);
++err_iounmap:
++	iounmap(is->pmu_regs);
+ 	return ret;
+ }
+ 
+@@ -935,6 +938,7 @@ static int fimc_is_remove(struct platform_device *pdev)
+ 	fimc_is_unregister_subdevs(is);
+ 	vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
+ 	fimc_is_put_clocks(is);
++	iounmap(is->pmu_regs);
+ 	fimc_is_debugfs_remove(is);
+ 	release_firmware(is->fw.f_w);
+ 	fimc_is_free_cpu_memory(is);
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index eb9e7feb9b13..7a16e9ea041c 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -2419,6 +2419,11 @@ static int imon_probe(struct usb_interface *interface,
+ 	mutex_lock(&driver_lock);
+ 
+ 	first_if = usb_ifnum_to_if(usbdev, 0);
++	if (!first_if) {
++		ret = -ENODEV;
++		goto fail;
++	}
++
+ 	first_if_ctx = usb_get_intfdata(first_if);
+ 
+ 	if (ifnum == 0) {
+diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c
+index e42bde081cd7..ae055466fc01 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-core.c
++++ b/drivers/media/usb/cx231xx/cx231xx-core.c
+@@ -356,7 +356,12 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
+ 	 */
+ 	if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) ||
+ 					(ven_req->bRequest == 0x5) ||
+-					(ven_req->bRequest == 0x6))) {
++					(ven_req->bRequest == 0x6) ||
++
++					/* Internal Master 3 Bus can send
++					 * and receive only 4 bytes per time
++					 */
++					(ven_req->bRequest == 0x2))) {
+ 		unsend_size = 0;
+ 		pdata = ven_req->pBuff;
+ 
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index c170523226aa..0d7565158207 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -292,7 +292,7 @@ static int stk7700P2_frontend_attach(struct dvb_usb_adapter *adap)
+ 					     stk7700d_dib7000p_mt2266_config)
+ 		    != 0) {
+ 			err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n", __func__);
+-			dvb_detach(&state->dib7000p_ops);
++			dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 			return -ENODEV;
+ 		}
+ 	}
+@@ -326,7 +326,7 @@ static int stk7700d_frontend_attach(struct dvb_usb_adapter *adap)
+ 					     stk7700d_dib7000p_mt2266_config)
+ 		    != 0) {
+ 			err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n", __func__);
+-			dvb_detach(&state->dib7000p_ops);
++			dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 			return -ENODEV;
+ 		}
+ 	}
+@@ -479,7 +479,7 @@ static int stk7700ph_frontend_attach(struct dvb_usb_adapter *adap)
+ 				     &stk7700ph_dib7700_xc3028_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 		    __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1010,7 +1010,7 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap)
+ 				     &dib7070p_dib7000p_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 		    __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -1068,7 +1068,7 @@ static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap)
+ 				     &dib7770p_dib7000p_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 		    __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -3036,7 +3036,7 @@ static int nim7090_frontend_attach(struct dvb_usb_adapter *adap)
+ 
+ 	if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, &nim7090_dib7000p_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n", __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 	adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap, 0x80, &nim7090_dib7000p_config);
+@@ -3089,7 +3089,7 @@ static int tfe7090pvr_frontend0_attach(struct dvb_usb_adapter *adap)
+ 	/* initialize IC 0 */
+ 	if (state->dib7000p_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x20, &tfe7090pvr_dib7000p_config[0]) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n", __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -3119,7 +3119,7 @@ static int tfe7090pvr_frontend1_attach(struct dvb_usb_adapter *adap)
+ 	i2c = state->dib7000p_ops.get_i2c_master(adap->dev->adapter[0].fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_6_7, 1);
+ 	if (state->dib7000p_ops.i2c_enumeration(i2c, 1, 0x10, &tfe7090pvr_dib7000p_config[1]) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n", __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -3194,7 +3194,7 @@ static int tfe7790p_frontend_attach(struct dvb_usb_adapter *adap)
+ 				1, 0x10, &tfe7790p_dib7000p_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 				__func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 	adap->fe_adap[0].fe = state->dib7000p_ops.init(&adap->dev->i2c_adap,
+@@ -3289,7 +3289,7 @@ static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+ 				     stk7070pd_dib7000p_config) != 0) {
+ 		err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 		    __func__);
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+@@ -3364,7 +3364,7 @@ static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
+ 					     stk7070pd_dib7000p_config) != 0) {
+ 			err("%s: state->dib7000p_ops.i2c_enumeration failed.  Cannot continue\n",
+ 			    __func__);
+-			dvb_detach(&state->dib7000p_ops);
++			dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 			return -ENODEV;
+ 		}
+ 	}
+@@ -3600,7 +3600,7 @@ static int pctv340e_frontend_attach(struct dvb_usb_adapter *adap)
+ 
+ 	if (state->dib7000p_ops.dib7000pc_detection(&adap->dev->i2c_adap) == 0) {
+ 		/* Demodulator not found for some reason? */
+-		dvb_detach(&state->dib7000p_ops);
++		dvb_detach(state->dib7000p_ops.set_wbd_ref);
+ 		return -ENODEV;
+ 	}
+ 
+diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
+index 0d1825696153..405ce78c1ef4 100644
+--- a/drivers/mfd/ab8500-sysctrl.c
++++ b/drivers/mfd/ab8500-sysctrl.c
+@@ -99,7 +99,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value)
+ 	u8 bank;
+ 
+ 	if (sysctrl_dev == NULL)
+-		return -EINVAL;
++		return -EPROBE_DEFER;
+ 
+ 	bank = (reg >> 8);
+ 	if (!valid_bank(bank))
+@@ -115,11 +115,13 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
+ 	u8 bank;
+ 
+ 	if (sysctrl_dev == NULL)
+-		return -EINVAL;
++		return -EPROBE_DEFER;
+ 
+ 	bank = (reg >> 8);
+-	if (!valid_bank(bank))
++	if (!valid_bank(bank)) {
++		pr_err("invalid bank\n");
+ 		return -EINVAL;
++	}
+ 
+ 	return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
+ 		(u8)(reg & 0xFF), mask, value);
+@@ -180,9 +182,15 @@ static int ab8500_sysctrl_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
++static const struct of_device_id ab8500_sysctrl_match[] = {
++	{ .compatible = "stericsson,ab8500-sysctrl", },
++	{}
++};
++
+ static struct platform_driver ab8500_sysctrl_driver = {
+ 	.driver = {
+ 		.name = "ab8500-sysctrl",
++		.of_match_table = ab8500_sysctrl_match,
+ 	},
+ 	.probe = ab8500_sysctrl_probe,
+ 	.remove = ab8500_sysctrl_remove,
+diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
+index 94cddf381ba3..5186ac611564 100644
+--- a/drivers/mmc/host/s3cmci.c
++++ b/drivers/mmc/host/s3cmci.c
+@@ -21,6 +21,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/seq_file.h>
+ #include <linux/gpio.h>
++#include <linux/interrupt.h>
+ #include <linux/irq.h>
+ #include <linux/io.h>
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 9ba92e23e67f..16f9c742bc30 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3151,7 +3151,7 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
+ 	hash ^= (hash >> 16);
+ 	hash ^= (hash >> 8);
+ 
+-	return hash;
++	return hash >> 1;
+ }
+ 
+ /*-------------------------- Device entry points ----------------------------*/
+diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
+index cf7c18947189..d065c0e2d18e 100644
+--- a/drivers/net/can/c_can/c_can_pci.c
++++ b/drivers/net/can/c_can/c_can_pci.c
+@@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
+ 		break;
+ 	case BOSCH_D_CAN:
+ 		priv->regs = reg_map_d_can;
+-		priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ 		break;
+ 	default:
+ 		ret = -EINVAL;
+diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
+index e36d10520e24..717530eac70c 100644
+--- a/drivers/net/can/c_can/c_can_platform.c
++++ b/drivers/net/can/c_can/c_can_platform.c
+@@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
+ 		break;
+ 	case BOSCH_D_CAN:
+ 		priv->regs = reg_map_d_can;
+-		priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ 		priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ 		priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ 		priv->read_reg32 = d_can_plat_read_reg32;
+diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
+index b1b9ebafb354..a3b2e23921bf 100644
+--- a/drivers/net/ethernet/fealnx.c
++++ b/drivers/net/ethernet/fealnx.c
+@@ -257,8 +257,8 @@ enum rx_desc_status_bits {
+ 	RXFSD = 0x00000800,	/* first descriptor */
+ 	RXLSD = 0x00000400,	/* last descriptor */
+ 	ErrorSummary = 0x80,	/* error summary */
+-	RUNT = 0x40,		/* runt packet received */
+-	LONG = 0x20,		/* long packet received */
++	RUNTPKT = 0x40,		/* runt packet received */
++	LONGPKT = 0x20,		/* long packet received */
+ 	FAE = 0x10,		/* frame align error */
+ 	CRC = 0x08,		/* crc error */
+ 	RXER = 0x04,		/* receive error */
+@@ -1633,7 +1633,7 @@ static int netdev_rx(struct net_device *dev)
+ 					       dev->name, rx_status);
+ 
+ 				dev->stats.rx_errors++;	/* end of a packet. */
+-				if (rx_status & (LONG | RUNT))
++				if (rx_status & (LONGPKT | RUNTPKT))
+ 					dev->stats.rx_length_errors++;
+ 				if (rx_status & RXER)
+ 					dev->stats.rx_frame_errors++;
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+index 1b2738380518..a00fd1a44393 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+@@ -1967,9 +1967,10 @@ static void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw,
+  *  function can also be used to respond to an error as the connection
+  *  resetting would also be a means of dealing with errors.
+  **/
+-static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+-				       struct fm10k_mbx_info *mbx)
++static s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
++				      struct fm10k_mbx_info *mbx)
+ {
++	s32 err = 0;
+ 	const enum fm10k_mbx_state state = mbx->state;
+ 
+ 	switch (state) {
+@@ -1982,6 +1983,7 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+ 	case FM10K_STATE_OPEN:
+ 		/* flush any incomplete work */
+ 		fm10k_sm_mbx_connect_reset(mbx);
++		err = FM10K_ERR_RESET_REQUESTED;
+ 		break;
+ 	case FM10K_STATE_CONNECT:
+ 		/* Update remote value to match local value */
+@@ -1991,6 +1993,8 @@ static void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+ 	}
+ 
+ 	fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail);
++
++	return err;
+ }
+ 
+ /**
+@@ -2071,7 +2075,7 @@ static s32 fm10k_sm_mbx_process(struct fm10k_hw *hw,
+ 
+ 	switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) {
+ 	case 0:
+-		fm10k_sm_mbx_process_reset(hw, mbx);
++		err = fm10k_sm_mbx_process_reset(hw, mbx);
+ 		break;
+ 	case FM10K_SM_MBX_VERSION:
+ 		err = fm10k_sm_mbx_process_version_1(hw, mbx);
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+index df9fda38bdd1..0b5938834703 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+@@ -990,6 +990,7 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
+ 	struct fm10k_hw *hw = &interface->hw;
+ 	struct fm10k_mbx_info *mbx = &hw->mbx;
+ 	u32 eicr;
++	s32 err = 0;
+ 
+ 	/* unmask any set bits related to this interrupt */
+ 	eicr = fm10k_read_reg(hw, FM10K_EICR);
+@@ -1005,12 +1006,15 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
+ 
+ 	/* service mailboxes */
+ 	if (fm10k_mbx_trylock(interface)) {
+-		mbx->ops.process(hw, mbx);
++		err = mbx->ops.process(hw, mbx);
+ 		/* handle VFLRE events */
+ 		fm10k_iov_event(interface);
+ 		fm10k_mbx_unlock(interface);
+ 	}
+ 
++	if (err == FM10K_ERR_RESET_REQUESTED)
++		interface->flags |= FM10K_FLAG_RESET_REQUESTED;
++
+ 	/* if switch toggled state we should reset GLORTs */
+ 	if (eicr & FM10K_EICR_SWITCHNOTREADY) {
+ 		/* force link down for at least 4 seconds */
+diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
+index 0f69ef81751a..099bd0af6a4e 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
++++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
+@@ -215,6 +215,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
+ 	hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+ 			E1000_STATUS_FUNC_SHIFT;
+ 
++	/* Make sure the PHY is in a good state. Several people have reported
++	 * firmware leaving the PHY's page select register set to something
++	 * other than the default of zero, which causes the PHY ID read to
++	 * access something other than the intended register.
++	 */
++	ret_val = hw->phy.ops.reset(hw);
++	if (ret_val) {
++		hw_dbg("Error resetting the PHY.\n");
++		goto out;
++	}
++
+ 	/* Set phy->phy_addr and phy->id. */
+ 	ret_val = igb_get_phy_id_82575(hw);
+ 	if (ret_val)
+diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
+index 65d931669f81..89402fce7d79 100644
+--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
++++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
+@@ -699,9 +699,9 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
+ 
+ 	ret_val = igb_pool_flash_update_done_i210(hw);
+ 	if (ret_val)
+-		hw_dbg("Flash update complete\n");
+-	else
+ 		hw_dbg("Flash update time out\n");
++	else
++		hw_dbg("Flash update complete\n");
+ 
+ out:
+ 	return ret_val;
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 7dfbcde34509..b5a8a5e40870 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -3167,7 +3167,9 @@ static int __igb_close(struct net_device *netdev, bool suspending)
+ 
+ static int igb_close(struct net_device *netdev)
+ {
+-	return __igb_close(netdev, false);
++	if (netif_device_present(netdev))
++		return __igb_close(netdev, false);
++	return 0;
+ }
+ 
+ /**
+@@ -7363,12 +7365,14 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ 	int retval = 0;
+ #endif
+ 
++	rtnl_lock();
+ 	netif_device_detach(netdev);
+ 
+ 	if (netif_running(netdev))
+ 		__igb_close(netdev, true);
+ 
+ 	igb_clear_interrupt_scheme(adapter);
++	rtnl_unlock();
+ 
+ #ifdef CONFIG_PM
+ 	retval = pci_save_state(pdev);
+@@ -7487,16 +7491,15 @@ static int igb_resume(struct device *dev)
+ 
+ 	wr32(E1000_WUS, ~0);
+ 
+-	if (netdev->flags & IFF_UP) {
+-		rtnl_lock();
++	rtnl_lock();
++	if (!err && netif_running(netdev))
+ 		err = __igb_open(netdev, true);
+-		rtnl_unlock();
+-		if (err)
+-			return err;
+-	}
+ 
+-	netif_device_attach(netdev);
+-	return 0;
++	if (!err)
++		netif_device_attach(netdev);
++	rtnl_unlock();
++
++	return err;
+ }
+ 
+ static int igb_runtime_idle(struct device *dev)
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+index 68e1e757ecef..2582fa5d9cfc 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+@@ -307,6 +307,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
+ 	ixgbe_cache_ring_rss(adapter);
+ }
+ 
++#define IXGBE_RSS_64Q_MASK	0x3F
+ #define IXGBE_RSS_16Q_MASK	0xF
+ #define IXGBE_RSS_8Q_MASK	0x7
+ #define IXGBE_RSS_4Q_MASK	0x3
+@@ -602,6 +603,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+  **/
+ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+ {
++	struct ixgbe_hw *hw = &adapter->hw;
+ 	struct ixgbe_ring_feature *f;
+ 	u16 rss_i;
+ 
+@@ -610,7 +612,11 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+ 	rss_i = f->limit;
+ 
+ 	f->indices = rss_i;
+-	f->mask = IXGBE_RSS_16Q_MASK;
++
++	if (hw->mac.type < ixgbe_mac_X550)
++		f->mask = IXGBE_RSS_16Q_MASK;
++	else
++		f->mask = IXGBE_RSS_64Q_MASK;
+ 
+ 	/* disable ATR by default, it will be configured below */
+ 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 463ff47200f1..2462b3c5fa40 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -5765,7 +5765,8 @@ static int ixgbe_close(struct net_device *netdev)
+ 
+ 	ixgbe_ptp_stop(adapter);
+ 
+-	ixgbe_close_suspend(adapter);
++	if (netif_device_present(netdev))
++		ixgbe_close_suspend(adapter);
+ 
+ 	ixgbe_fdir_filter_exit(adapter);
+ 
+@@ -5810,14 +5811,12 @@ static int ixgbe_resume(struct pci_dev *pdev)
+ 	if (!err && netif_running(netdev))
+ 		err = ixgbe_open(netdev);
+ 
+-	rtnl_unlock();
+-
+-	if (err)
+-		return err;
+ 
+-	netif_device_attach(netdev);
++	if (!err)
++		netif_device_attach(netdev);
++	rtnl_unlock();
+ 
+-	return 0;
++	return err;
+ }
+ #endif /* CONFIG_PM */
+ 
+@@ -5832,14 +5831,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
+ 	int retval = 0;
+ #endif
+ 
++	rtnl_lock();
+ 	netif_device_detach(netdev);
+ 
+-	rtnl_lock();
+ 	if (netif_running(netdev))
+ 		ixgbe_close_suspend(adapter);
+-	rtnl_unlock();
+ 
+ 	ixgbe_clear_interrupt_scheme(adapter);
++	rtnl_unlock();
+ 
+ #ifdef CONFIG_PM
+ 	retval = pci_save_state(pdev);
+@@ -8963,7 +8962,7 @@ skip_bad_vf_detection:
+ 	}
+ 
+ 	if (netif_running(netdev))
+-		ixgbe_down(adapter);
++		ixgbe_close_suspend(adapter);
+ 
+ 	if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+ 		pci_disable_device(pdev);
+@@ -9033,10 +9032,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
+ 	}
+ 
+ #endif
++	rtnl_lock();
+ 	if (netif_running(netdev))
+-		ixgbe_up(adapter);
++		ixgbe_open(netdev);
+ 
+ 	netif_device_attach(netdev);
++	rtnl_unlock();
+ }
+ 
+ static const struct pci_error_handlers ixgbe_err_handler = {
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 2b212f3e140c..1f67ebf0e8ef 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -1059,6 +1059,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
+ 	case TUNSETSNDBUF:
+ 		if (get_user(s, sp))
+ 			return -EFAULT;
++		if (s <= 0)
++			return -EINVAL;
+ 
+ 		q->sk.sk_sndbuf = s;
+ 		return 0;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 12e67e0eb9c9..0bd2d20d22cb 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1637,6 +1637,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+ 
+ 		if (!dev)
+ 			return -ENOMEM;
++		err = dev_get_valid_name(net, dev, name);
++		if (err < 0)
++			goto err_free_dev;
+ 
+ 		dev_net_set(dev, net);
+ 		dev->rtnl_link_ops = &tun_link_ops;
+@@ -2017,6 +2020,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ 			ret = -EFAULT;
+ 			break;
+ 		}
++		if (sndbuf <= 0) {
++			ret = -EINVAL;
++			break;
++		}
+ 
+ 		tun->sndbuf = sndbuf;
+ 		tun_set_sndbuf(tun);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 480c9366d6b6..6cf881ce4d4e 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -316,7 +316,7 @@ next_desc:
+ 	}
+ 
+ 	/* errors aren't fatal - we can live with the dynamic address */
+-	if (cdc_ether) {
++	if (cdc_ether && cdc_ether->wMaxSegmentSize) {
+ 		dev->hard_mtu = le16_to_cpu(cdc_ether->wMaxSegmentSize);
+ 		usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
+ 	}
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+index ab9f55344acd..89d8109a8a47 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+@@ -4207,9 +4207,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
+ 		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
+ 		if (err < 0)
+ 			brcmf_err("setting AP mode failed %d\n", err);
+-		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
+-		if (err < 0)
+-			brcmf_err("setting INFRA mode failed %d\n", err);
+ 		if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
+ 			brcmf_fil_iovar_int_set(ifp, "mbss", 0);
+ 		err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 7bd3c5a8116d..e683925220d1 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -67,6 +67,7 @@ module_param(rx_drain_timeout_msecs, uint, 0444);
+ unsigned int rx_stall_timeout_msecs = 60000;
+ module_param(rx_stall_timeout_msecs, uint, 0444);
+ 
++#define MAX_QUEUES_DEFAULT 8
+ unsigned int xenvif_max_queues;
+ module_param_named(max_queues, xenvif_max_queues, uint, 0644);
+ MODULE_PARM_DESC(max_queues,
+@@ -2011,11 +2012,12 @@ static int __init netback_init(void)
+ 	if (!xen_domain())
+ 		return -ENODEV;
+ 
+-	/* Allow as many queues as there are CPUs if user has not
++	/* Allow as many queues as there are CPUs but max. 8 if user has not
+ 	 * specified a value.
+ 	 */
+ 	if (xenvif_max_queues == 0)
+-		xenvif_max_queues = num_online_cpus();
++		xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
++					  num_online_cpus());
+ 
+ 	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
+ 		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
+diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
+index 1ab863551920..4225d3d3198a 100644
+--- a/drivers/pci/host/pci-mvebu.c
++++ b/drivers/pci/host/pci-mvebu.c
+@@ -107,6 +107,12 @@ struct mvebu_pcie {
+ 	int nports;
+ };
+ 
++struct mvebu_pcie_window {
++	phys_addr_t base;
++	phys_addr_t remap;
++	size_t size;
++};
++
+ /* Structure representing one PCIe interface */
+ struct mvebu_pcie_port {
+ 	char *name;
+@@ -125,10 +131,8 @@ struct mvebu_pcie_port {
+ 	struct mvebu_sw_pci_bridge bridge;
+ 	struct device_node *dn;
+ 	struct mvebu_pcie *pcie;
+-	phys_addr_t memwin_base;
+-	size_t memwin_size;
+-	phys_addr_t iowin_base;
+-	size_t iowin_size;
++	struct mvebu_pcie_window memwin;
++	struct mvebu_pcie_window iowin;
+ 	u32 saved_pcie_stat;
+ };
+ 
+@@ -345,23 +349,45 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+ 	}
+ }
+ 
++static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
++				  unsigned int target, unsigned int attribute,
++				  const struct mvebu_pcie_window *desired,
++				  struct mvebu_pcie_window *cur)
++{
++	if (desired->base == cur->base && desired->remap == cur->remap &&
++	    desired->size == cur->size)
++		return;
++
++	if (cur->size != 0) {
++		mvebu_pcie_del_windows(port, cur->base, cur->size);
++		cur->size = 0;
++		cur->base = 0;
++
++		/*
++		 * If something tries to change the window while it is enabled
++		 * the change will not be done atomically. That would be
++		 * difficult to do in the general case.
++		 */
++	}
++
++	if (desired->size == 0)
++		return;
++
++	mvebu_pcie_add_windows(port, target, attribute, desired->base,
++			       desired->size, desired->remap);
++	*cur = *desired;
++}
++
+ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+ {
+-	phys_addr_t iobase;
++	struct mvebu_pcie_window desired = {};
+ 
+ 	/* Are the new iobase/iolimit values invalid? */
+ 	if (port->bridge.iolimit < port->bridge.iobase ||
+ 	    port->bridge.iolimitupper < port->bridge.iobaseupper ||
+ 	    !(port->bridge.command & PCI_COMMAND_IO)) {
+-
+-		/* If a window was configured, remove it */
+-		if (port->iowin_base) {
+-			mvebu_pcie_del_windows(port, port->iowin_base,
+-					       port->iowin_size);
+-			port->iowin_base = 0;
+-			port->iowin_size = 0;
+-		}
+-
++		mvebu_pcie_set_window(port, port->io_target, port->io_attr,
++				      &desired, &port->iowin);
+ 		return;
+ 	}
+ 
+@@ -378,32 +404,27 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+ 	 * specifications. iobase is the bus address, port->iowin_base
+ 	 * is the CPU address.
+ 	 */
+-	iobase = ((port->bridge.iobase & 0xF0) << 8) |
+-		(port->bridge.iobaseupper << 16);
+-	port->iowin_base = port->pcie->io.start + iobase;
+-	port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
+-			    (port->bridge.iolimitupper << 16)) -
+-			    iobase) + 1;
+-
+-	mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
+-			       port->iowin_base, port->iowin_size,
+-			       iobase);
++	desired.remap = ((port->bridge.iobase & 0xF0) << 8) |
++			(port->bridge.iobaseupper << 16);
++	desired.base = port->pcie->io.start + desired.remap;
++	desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
++			 (port->bridge.iolimitupper << 16)) -
++			desired.remap) +
++		       1;
++
++	mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
++			      &port->iowin);
+ }
+ 
+ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+ {
++	struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
++
+ 	/* Are the new membase/memlimit values invalid? */
+ 	if (port->bridge.memlimit < port->bridge.membase ||
+ 	    !(port->bridge.command & PCI_COMMAND_MEMORY)) {
+-
+-		/* If a window was configured, remove it */
+-		if (port->memwin_base) {
+-			mvebu_pcie_del_windows(port, port->memwin_base,
+-					       port->memwin_size);
+-			port->memwin_base = 0;
+-			port->memwin_size = 0;
+-		}
+-
++		mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
++				      &desired, &port->memwin);
+ 		return;
+ 	}
+ 
+@@ -413,14 +434,12 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+ 	 * window to setup, according to the PCI-to-PCI bridge
+ 	 * specifications.
+ 	 */
+-	port->memwin_base  = ((port->bridge.membase & 0xFFF0) << 16);
+-	port->memwin_size  =
+-		(((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+-		port->memwin_base + 1;
+-
+-	mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
+-			       port->memwin_base, port->memwin_size,
+-			       MVEBU_MBUS_NO_REMAP);
++	desired.base = ((port->bridge.membase & 0xFFF0) << 16);
++	desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
++		       desired.base + 1;
++
++	mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
++			      &port->memwin);
+ }
+ 
+ /*
+diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
+index af2046c87806..847f75601591 100644
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -249,7 +249,7 @@ static int hp_wmi_display_state(void)
+ 	int ret = hp_wmi_perform_query(HPWMI_DISPLAY_QUERY, 0, &state,
+ 				       sizeof(state), sizeof(state));
+ 	if (ret)
+-		return -EINVAL;
++		return ret < 0 ? ret : -EINVAL;
+ 	return state;
+ }
+ 
+@@ -259,7 +259,7 @@ static int hp_wmi_hddtemp_state(void)
+ 	int ret = hp_wmi_perform_query(HPWMI_HDDTEMP_QUERY, 0, &state,
+ 				       sizeof(state), sizeof(state));
+ 	if (ret)
+-		return -EINVAL;
++		return ret < 0 ? ret : -EINVAL;
+ 	return state;
+ }
+ 
+@@ -269,7 +269,7 @@ static int hp_wmi_als_state(void)
+ 	int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 0, &state,
+ 				       sizeof(state), sizeof(state));
+ 	if (ret)
+-		return -EINVAL;
++		return ret < 0 ? ret : -EINVAL;
+ 	return state;
+ }
+ 
+@@ -280,7 +280,7 @@ static int hp_wmi_dock_state(void)
+ 				       sizeof(state), sizeof(state));
+ 
+ 	if (ret)
+-		return -EINVAL;
++		return ret < 0 ? ret : -EINVAL;
+ 
+ 	return state & 0x1;
+ }
+@@ -291,7 +291,7 @@ static int hp_wmi_tablet_state(void)
+ 	int ret = hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, 0, &state,
+ 				       sizeof(state), sizeof(state));
+ 	if (ret)
+-		return ret;
++		return ret < 0 ? ret : -EINVAL;
+ 
+ 	return (state & 0x4) ? 1 : 0;
+ }
+@@ -324,7 +324,7 @@ static int __init hp_wmi_enable_hotkeys(void)
+ 	int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
+ 				       sizeof(value), 0);
+ 	if (ret)
+-		return -EINVAL;
++		return ret < 0 ? ret : -EINVAL;
+ 	return 0;
+ }
+ 
+@@ -337,7 +337,7 @@ static int hp_wmi_set_block(void *data, bool blocked)
+ 	ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1,
+ 				   &query, sizeof(query), 0);
+ 	if (ret)
+-		return -EINVAL;
++		return ret < 0 ? ret : -EINVAL;
+ 	return 0;
+ }
+ 
+@@ -429,7 +429,7 @@ static int hp_wmi_post_code_state(void)
+ 	int ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 0, &state,
+ 				       sizeof(state), sizeof(state));
+ 	if (ret)
+-		return -EINVAL;
++		return ret < 0 ? ret : -EINVAL;
+ 	return state;
+ }
+ 
+@@ -495,7 +495,7 @@ static ssize_t set_als(struct device *dev, struct device_attribute *attr,
+ 	int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, 1, &tmp,
+ 				       sizeof(tmp), sizeof(tmp));
+ 	if (ret)
+-		return -EINVAL;
++		return ret < 0 ? ret : -EINVAL;
+ 
+ 	return count;
+ }
+@@ -516,7 +516,7 @@ static ssize_t set_postcode(struct device *dev, struct device_attribute *attr,
+ 	ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, 1, &tmp,
+ 				       sizeof(tmp), sizeof(tmp));
+ 	if (ret)
+-		return -EINVAL;
++		return ret < 0 ? ret : -EINVAL;
+ 
+ 	return count;
+ }
+@@ -573,10 +573,12 @@ static void hp_wmi_notify(u32 value, void *context)
+ 
+ 	switch (event_id) {
+ 	case HPWMI_DOCK_EVENT:
+-		input_report_switch(hp_wmi_input_dev, SW_DOCK,
+-				    hp_wmi_dock_state());
+-		input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+-				    hp_wmi_tablet_state());
++		if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
++			input_report_switch(hp_wmi_input_dev, SW_DOCK,
++					    hp_wmi_dock_state());
++		if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
++			input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
++					    hp_wmi_tablet_state());
+ 		input_sync(hp_wmi_input_dev);
+ 		break;
+ 	case HPWMI_PARK_HDD:
+@@ -649,6 +651,7 @@ static int __init hp_wmi_input_setup(void)
+ {
+ 	acpi_status status;
+ 	int err;
++	int val;
+ 
+ 	hp_wmi_input_dev = input_allocate_device();
+ 	if (!hp_wmi_input_dev)
+@@ -659,17 +662,26 @@ static int __init hp_wmi_input_setup(void)
+ 	hp_wmi_input_dev->id.bustype = BUS_HOST;
+ 
+ 	__set_bit(EV_SW, hp_wmi_input_dev->evbit);
+-	__set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
+-	__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
++
++	/* Dock */
++	val = hp_wmi_dock_state();
++	if (!(val < 0)) {
++		__set_bit(SW_DOCK, hp_wmi_input_dev->swbit);
++		input_report_switch(hp_wmi_input_dev, SW_DOCK, val);
++	}
++
++	/* Tablet mode */
++	val = hp_wmi_tablet_state();
++	if (!(val < 0)) {
++		__set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
++		input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
++	}
+ 
+ 	err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
+ 	if (err)
+ 		goto err_free_dev;
+ 
+ 	/* Set initial hardware state */
+-	input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_dock_state());
+-	input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+-			    hp_wmi_tablet_state());
+ 	input_sync(hp_wmi_input_dev);
+ 
+ 	if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
+@@ -982,10 +994,12 @@ static int hp_wmi_resume_handler(struct device *device)
+ 	 * changed.
+ 	 */
+ 	if (hp_wmi_input_dev) {
+-		input_report_switch(hp_wmi_input_dev, SW_DOCK,
+-				    hp_wmi_dock_state());
+-		input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
+-				    hp_wmi_tablet_state());
++		if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit))
++			input_report_switch(hp_wmi_input_dev, SW_DOCK,
++					    hp_wmi_dock_state());
++		if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit))
++			input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE,
++					    hp_wmi_tablet_state());
+ 		input_sync(hp_wmi_input_dev);
+ 	}
+ 
+diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
+index 0944e834af8d..aeb3f786d2f0 100644
+--- a/drivers/platform/x86/intel_mid_thermal.c
++++ b/drivers/platform/x86/intel_mid_thermal.c
+@@ -551,6 +551,7 @@ static const struct platform_device_id therm_id_table[] = {
+ 	{ "msic_thermal", 1 },
+ 	{ }
+ };
++MODULE_DEVICE_TABLE(platform, therm_id_table);
+ 
+ static struct platform_driver mid_thermal_driver = {
+ 	.driver = {
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 028d7f76e94e..5f12645f70b8 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -1633,8 +1633,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 	/* check for for attention message */
+ 	if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
+ 		device = dasd_device_from_cdev_locked(cdev);
+-		device->discipline->check_attention(device, irb->esw.esw1.lpum);
+-		dasd_put_device(device);
++		if (!IS_ERR(device)) {
++			device->discipline->check_attention(device,
++							    irb->esw.esw1.lpum);
++			dasd_put_device(device);
++		}
+ 	}
+ 
+ 	if (!cqr)
+diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
+index d296847a0a7c..77336d85a717 100644
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -902,7 +902,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
+ int qeth_core_hardsetup_card(struct qeth_card *);
+ void qeth_print_status_message(struct qeth_card *);
+ int qeth_init_qdio_queues(struct qeth_card *);
+-int qeth_send_startlan(struct qeth_card *);
+ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
+ 		  int (*reply_cb)
+ 		  (struct qeth_card *, struct qeth_reply *, unsigned long),
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 2289a2cf7bd8..a81215d87ce1 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -3003,7 +3003,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ }
+ EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
+ 
+-int qeth_send_startlan(struct qeth_card *card)
++static int qeth_send_startlan(struct qeth_card *card)
+ {
+ 	int rc;
+ 	struct qeth_cmd_buffer *iob;
+@@ -3016,7 +3016,6 @@ int qeth_send_startlan(struct qeth_card *card)
+ 	rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
+ 	return rc;
+ }
+-EXPORT_SYMBOL_GPL(qeth_send_startlan);
+ 
+ static int qeth_default_setadapterparms_cb(struct qeth_card *card,
+ 		struct qeth_reply *reply, unsigned long data)
+@@ -5130,6 +5129,20 @@ retriable:
+ 		goto out;
+ 	}
+ 
++	rc = qeth_send_startlan(card);
++	if (rc) {
++		QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
++		if (rc == IPA_RC_LAN_OFFLINE) {
++			dev_warn(&card->gdev->dev,
++				"The LAN is offline\n");
++			card->lan_online = 0;
++		} else {
++			rc = -ENODEV;
++			goto out;
++		}
++	} else
++		card->lan_online = 1;
++
+ 	card->options.ipa4.supported_funcs = 0;
+ 	card->options.adp.supported_funcs = 0;
+ 	card->options.sbp.supported_funcs = 0;
+@@ -5140,14 +5153,14 @@ retriable:
+ 	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+ 		rc = qeth_query_setadapterparms(card);
+ 		if (rc < 0) {
+-			QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
++			QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ 			goto out;
+ 		}
+ 	}
+ 	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+ 		rc = qeth_query_setdiagass(card);
+ 		if (rc < 0) {
+-			QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
++			QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
+ 			goto out;
+ 		}
+ 	}
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index 743673b8b046..b0413f5611cf 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -1035,21 +1035,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
+ 	/* softsetup */
+ 	QETH_DBF_TEXT(SETUP, 2, "softsetp");
+ 
+-	rc = qeth_send_startlan(card);
+-	if (rc) {
+-		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+-		if (rc == 0xe080) {
+-			dev_warn(&card->gdev->dev,
+-				"The LAN is offline\n");
+-			card->lan_online = 0;
+-			goto contin;
+-		}
+-		rc = -ENODEV;
+-		goto out_remove;
+-	} else
+-		card->lan_online = 1;
+-
+-contin:
+ 	if ((card->info.type == QETH_CARD_TYPE_OSD) ||
+ 	    (card->info.type == QETH_CARD_TYPE_OSX)) {
+ 		/* configure isolation level */
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index 976c81b32f99..fefd3c512386 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -3413,21 +3413,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
+ 	/* softsetup */
+ 	QETH_DBF_TEXT(SETUP, 2, "softsetp");
+ 
+-	rc = qeth_send_startlan(card);
+-	if (rc) {
+-		QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+-		if (rc == 0xe080) {
+-			dev_warn(&card->gdev->dev,
+-				"The LAN is offline\n");
+-			card->lan_online = 0;
+-			goto contin;
+-		}
+-		rc = -ENODEV;
+-		goto out_remove;
+-	} else
+-		card->lan_online = 1;
+-
+-contin:
+ 	rc = qeth_l3_setadapter_parms(card);
+ 	if (rc)
+ 		QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index d65bd178d131..ba82403f2cdf 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -5147,6 +5147,19 @@ lpfc_free_sysfs_attr(struct lpfc_vport *vport)
+  * Dynamic FC Host Attributes Support
+  */
+ 
++/**
++ * lpfc_get_host_symbolic_name - Copy symbolic name into the scsi host
++ * @shost: kernel scsi host pointer.
++ **/
++static void
++lpfc_get_host_symbolic_name(struct Scsi_Host *shost)
++{
++	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
++
++	lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
++				      sizeof fc_host_symbolic_name(shost));
++}
++
+ /**
+  * lpfc_get_host_port_id - Copy the vport DID into the scsi host port id
+  * @shost: kernel scsi host pointer.
+@@ -5681,6 +5694,8 @@ struct fc_function_template lpfc_transport_functions = {
+ 	.show_host_supported_fc4s = 1,
+ 	.show_host_supported_speeds = 1,
+ 	.show_host_maxframe_size = 1,
++
++	.get_host_symbolic_name = lpfc_get_host_symbolic_name,
+ 	.show_host_symbolic_name = 1,
+ 
+ 	/* dynamic attributes the driver supports */
+@@ -5748,6 +5763,8 @@ struct fc_function_template lpfc_vport_transport_functions = {
+ 	.show_host_supported_fc4s = 1,
+ 	.show_host_supported_speeds = 1,
+ 	.show_host_maxframe_size = 1,
++
++	.get_host_symbolic_name = lpfc_get_host_symbolic_name,
+ 	.show_host_symbolic_name = 1,
+ 
+ 	/* dynamic attributes the driver supports */
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index edb1a4d648dd..7ca27e5ef079 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -118,6 +118,8 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
+ 	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
+ 		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
+ 	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
++	/* ensure WQE bcopy flushed before doorbell write */
++	wmb();
+ 
+ 	/* Update the host index before invoking device */
+ 	host_index = q->host_index;
+@@ -9823,6 +9825,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+ 		iabt->ulpCommand = CMD_CLOSE_XRI_CN;
+ 
+ 	abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
++	abtsiocbp->vport = vport;
+ 
+ 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
+ 			 "0339 Abort xri x%x, original iotag x%x, "
+diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
+index a87ee33f4f2a..1ea4702da5bb 100644
+--- a/drivers/scsi/lpfc/lpfc_vport.c
++++ b/drivers/scsi/lpfc/lpfc_vport.c
+@@ -528,6 +528,12 @@ enable_vport(struct fc_vport *fc_vport)
+ 
+ 	spin_lock_irq(shost->host_lock);
+ 	vport->load_flag |= FC_LOADING;
++	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
++		spin_unlock_irq(shost->host_lock);
++		lpfc_issue_init_vpi(vport);
++		goto out;
++	}
++
+ 	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ 	spin_unlock_irq(shost->host_lock);
+ 
+@@ -548,6 +554,8 @@ enable_vport(struct fc_vport *fc_vport)
+ 	} else {
+ 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+ 	}
++
++out:
+ 	lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+ 			 "1827 Vport Enabled.\n");
+ 	return VPORT_OK;
+diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+index 3c1c8c6c4a6c..bb8ab4e8e846 100644
+--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
++++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+@@ -258,7 +258,7 @@ out_free_irq:
+ out1:
+ 	iio_trigger_unregister(st->trig);
+ out:
+-	iio_trigger_put(st->trig);
++	iio_trigger_free(st->trig);
+ 	return ret;
+ }
+ 
+@@ -271,7 +271,7 @@ static int iio_bfin_tmr_trigger_remove(struct platform_device *pdev)
+ 		peripheral_free(st->t->pin);
+ 	free_irq(st->irq, st);
+ 	iio_trigger_unregister(st->trig);
+-	iio_trigger_put(st->trig);
++	iio_trigger_free(st->trig);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+index 89794fdfec9d..3b93270d5146 100644
+--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
++++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+@@ -1066,23 +1066,21 @@ struct hsm_action_item {
+  * \retval buffer
+  */
+ static inline char *hai_dump_data_field(struct hsm_action_item *hai,
+-					char *buffer, int len)
++					char *buffer, size_t len)
+ {
+-	int i, sz, data_len;
++	int i, data_len;
+ 	char *ptr;
+ 
+ 	ptr = buffer;
+-	sz = len;
+ 	data_len = hai->hai_len - sizeof(*hai);
+-	for (i = 0 ; (i < data_len) && (sz > 0) ; i++) {
+-		int cnt;
+-
+-		cnt = snprintf(ptr, sz, "%.2X",
+-			       (unsigned char)hai->hai_data[i]);
+-		ptr += cnt;
+-		sz -= cnt;
++	for (i = 0; (i < data_len) && (len > 2); i++) {
++		snprintf(ptr, 3, "%02X", (unsigned char)hai->hai_data[i]);
++		ptr += 2;
++		len -= 2;
+ 	}
++
+ 	*ptr = '\0';
++
+ 	return buffer;
+ }
+ 
+diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+index 84b111eb48fa..fddf8ee143da 100644
+--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
++++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+@@ -572,6 +572,13 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
+ 	if (lock == NULL)
+ 		return NULL;
+ 
++	if (lock->l_export && lock->l_export->exp_failed) {
++		CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
++		       lock, lock->l_export);
++		LDLM_LOCK_PUT(lock);
++		return NULL;
++	}
++
+ 	/* It's unlikely but possible that someone marked the lock as
+ 	 * destroyed after we did handle2object on it */
+ 	if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
+diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
+index c6c824356464..7dc9fd006b12 100644
+--- a/drivers/staging/lustre/lustre/llite/rw26.c
++++ b/drivers/staging/lustre/lustre/llite/rw26.c
+@@ -376,6 +376,10 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
+ 	if (!lli->lli_has_smd)
+ 		return -EBADF;
+ 
++	/* Check EOF by ourselves */
++	if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode))
++		return 0;
++
+ 	/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
+ 	if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
+ 		return -EINVAL;
+diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
+index 8e61421515cb..58997fb0c1dd 100644
+--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
++++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
+@@ -1498,20 +1498,15 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
+ 		 * it may hit swab race at LU-1044. */
+ 		if (req->rq_ops->hpreq_check) {
+ 			rc = req->rq_ops->hpreq_check(req);
+-			/**
+-			 * XXX: Out of all current
+-			 * ptlrpc_hpreq_ops::hpreq_check(), only
+-			 * ldlm_cancel_hpreq_check() can return an error code;
+-			 * other functions assert in similar places, which seems
+-			 * odd. What also does not seem right is that handlers
+-			 * for those RPCs do not assert on the same checks, but
+-			 * rather handle the error cases. e.g. see
+-			 * ost_rw_hpreq_check(), and ost_brw_read(),
+-			 * ost_brw_write().
++			if (rc == -ESTALE) {
++				req->rq_status = rc;
++				ptlrpc_error(req);
++			}
++			/** can only return error,
++			 * 0 for normal request,
++			 *  or 1 for high priority request
+ 			 */
+-			if (rc < 0)
+-				return rc;
+-			LASSERT(rc == 0 || rc == 1);
++			LASSERT(rc <= 1);
+ 		}
+ 
+ 		spin_lock_bh(&req->rq_export->exp_rpc_lock);
+diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
+index 971bf457f32d..e75a386344e4 100644
+--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
++++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
+@@ -75,7 +75,7 @@ extern u32 GlobalDebugLevel;
+ #define DBG_88E_LEVEL(_level, fmt, arg...)				\
+ 	do {								\
+ 		if (_level <= GlobalDebugLevel)				\
+-			pr_info(DRIVER_PREFIX"ERROR " fmt, ##arg);	\
++			pr_info(DRIVER_PREFIX fmt, ##arg);	\
+ 	} while (0)
+ 
+ #define DBG_88E(...)							\
+diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
+index 8269be80437a..53c39ca99ee4 100644
+--- a/drivers/staging/rtl8712/ieee80211.h
++++ b/drivers/staging/rtl8712/ieee80211.h
+@@ -142,52 +142,52 @@ struct ieee_ibss_seq {
+ };
+ 
+ struct ieee80211_hdr {
+-	u16 frame_ctl;
+-	u16 duration_id;
++	__le16 frame_ctl;
++	__le16 duration_id;
+ 	u8 addr1[ETH_ALEN];
+ 	u8 addr2[ETH_ALEN];
+ 	u8 addr3[ETH_ALEN];
+-	u16 seq_ctl;
++	__le16 seq_ctl;
+ 	u8 addr4[ETH_ALEN];
+-} __packed;
++}  __packed __aligned(2);
+ 
+ struct ieee80211_hdr_3addr {
+-	u16 frame_ctl;
+-	u16 duration_id;
++	__le16 frame_ctl;
++	__le16 duration_id;
+ 	u8 addr1[ETH_ALEN];
+ 	u8 addr2[ETH_ALEN];
+ 	u8 addr3[ETH_ALEN];
+-	u16 seq_ctl;
+-} __packed;
++	__le16 seq_ctl;
++}  __packed __aligned(2);
+ 
+ 
+ struct	ieee80211_hdr_qos {
+-	u16 frame_ctl;
+-	u16 duration_id;
++	__le16 frame_ctl;
++	__le16 duration_id;
+ 	u8 addr1[ETH_ALEN];
+ 	u8 addr2[ETH_ALEN];
+ 	u8 addr3[ETH_ALEN];
+-	u16 seq_ctl;
++	__le16 seq_ctl;
+ 	u8 addr4[ETH_ALEN];
+-	u16	qc;
+-}  __packed;
++	__le16	qc;
++}   __packed __aligned(2);
+ 
+ struct  ieee80211_hdr_3addr_qos {
+-	u16 frame_ctl;
+-	u16 duration_id;
++	__le16 frame_ctl;
++	__le16 duration_id;
+ 	u8  addr1[ETH_ALEN];
+ 	u8  addr2[ETH_ALEN];
+ 	u8  addr3[ETH_ALEN];
+-	u16 seq_ctl;
+-	u16 qc;
++	__le16 seq_ctl;
++	__le16 qc;
+ }  __packed;
+ 
+ struct eapol {
+ 	u8 snap[6];
+-	u16 ethertype;
++	__be16 ethertype;
+ 	u8 version;
+ 	u8 type;
+-	u16 length;
++	__le16 length;
+ } __packed;
+ 
+ 
+@@ -554,13 +554,13 @@ Total: 28-2340 bytes
+ */
+ 
+ struct ieee80211_header_data {
+-	u16 frame_ctl;
+-	u16 duration_id;
++	__le16 frame_ctl;
++	__le16 duration_id;
+ 	u8 addr1[6];
+ 	u8 addr2[6];
+ 	u8 addr3[6];
+-	u16 seq_ctrl;
+-};
++	__le16 seq_ctrl;
++} __packed __aligned(2);
+ 
+ #define BEACON_PROBE_SSID_ID_POSITION 12
+ 
+@@ -592,18 +592,18 @@ struct ieee80211_info_element {
+ /*
+  * These are the data types that can make up management packets
+  *
+-	u16 auth_algorithm;
+-	u16 auth_sequence;
+-	u16 beacon_interval;
+-	u16 capability;
++	__le16 auth_algorithm;
++	__le16 auth_sequence;
++	__le16 beacon_interval;
++	__le16 capability;
+ 	u8 current_ap[ETH_ALEN];
+-	u16 listen_interval;
++	__le16 listen_interval;
+ 	struct {
+ 		u16 association_id:14, reserved:2;
+ 	} __packed;
+-	u32 time_stamp[2];
+-	u16 reason;
+-	u16 status;
++	__le32 time_stamp[2];
++	__le16 reason;
++	__le16 status;
+ */
+ 
+ #define IEEE80211_DEFAULT_TX_ESSID "Penguin"
+@@ -611,16 +611,16 @@ struct ieee80211_info_element {
+ 
+ struct ieee80211_authentication {
+ 	struct ieee80211_header_data header;
+-	u16 algorithm;
+-	u16 transaction;
+-	u16 status;
++	__le16 algorithm;
++	__le16 transaction;
++	__le16 status;
+ } __packed;
+ 
+ struct ieee80211_probe_response {
+ 	struct ieee80211_header_data header;
+-	u32 time_stamp[2];
+-	u16 beacon_interval;
+-	u16 capability;
++	__le32 time_stamp[2];
++	__le16 beacon_interval;
++	__le16 capability;
+ 	struct ieee80211_info_element info_element;
+ } __packed;
+ 
+@@ -630,16 +630,16 @@ struct ieee80211_probe_request {
+ 
+ struct ieee80211_assoc_request_frame {
+ 	struct ieee80211_hdr_3addr header;
+-	u16 capability;
+-	u16 listen_interval;
++	__le16 capability;
++	__le16 listen_interval;
+ 	struct ieee80211_info_element_hdr info_element;
+ } __packed;
+ 
+ struct ieee80211_assoc_response_frame {
+ 	struct ieee80211_hdr_3addr header;
+-	u16 capability;
+-	u16 status;
+-	u16 aid;
++	__le16 capability;
++	__le16 status;
++	__le16 aid;
+ } __packed;
+ 
+ struct ieee80211_txb {
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 7444640a7453..89d01943ca93 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -1755,7 +1755,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 	struct iscsi_tm *hdr;
+ 	int out_of_order_cmdsn = 0, ret;
+ 	bool sess_ref = false;
+-	u8 function;
++	u8 function, tcm_function = TMR_UNKNOWN;
+ 
+ 	hdr			= (struct iscsi_tm *) buf;
+ 	hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
+@@ -1801,10 +1801,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 	 * LIO-Target $FABRIC_MOD
+ 	 */
+ 	if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
+-
+-		u8 tcm_function;
+-		int ret;
+-
+ 		transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+ 				      conn->sess->se_sess, 0, DMA_NONE,
+ 				      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+@@ -1840,15 +1836,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 			return iscsit_add_reject_cmd(cmd,
+ 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ 		}
+-
+-		ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
+-					 tcm_function, GFP_KERNEL);
+-		if (ret < 0)
+-			return iscsit_add_reject_cmd(cmd,
++	}
++	ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
++				 GFP_KERNEL);
++	if (ret < 0)
++		return iscsit_add_reject_cmd(cmd,
+ 				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ 
+-		cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
+-	}
++	cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
+ 
+ 	cmd->iscsi_opcode	= ISCSI_OP_SCSI_TMFUNC;
+ 	cmd->i_state		= ISTATE_SEND_TASKMGTRSP;
+@@ -3449,7 +3444,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
+ 
+ 			if ((tpg->tpg_attrib.generate_node_acls == 0) &&
+ 			    (tpg->tpg_attrib.demo_mode_discovery == 0) &&
+-			    (!core_tpg_get_initiator_node_acl(&tpg->tpg_se_tpg,
++			    (!target_tpg_has_node_acl(&tpg->tpg_se_tpg,
+ 				cmd->conn->sess->sess_ops->InitiatorName))) {
+ 				continue;
+ 			}
+@@ -3969,6 +3964,8 @@ int iscsi_target_tx_thread(void *arg)
+ {
+ 	int ret = 0;
+ 	struct iscsi_conn *conn = arg;
++	bool conn_freed = false;
++
+ 	/*
+ 	 * Allow ourselves to be interrupted by SIGINT so that a
+ 	 * connection recovery / failure event can be triggered externally.
+@@ -3994,12 +3991,14 @@ get_immediate:
+ 			goto transport_err;
+ 
+ 		ret = iscsit_handle_response_queue(conn);
+-		if (ret == 1)
++		if (ret == 1) {
+ 			goto get_immediate;
+-		else if (ret == -ECONNRESET)
++		} else if (ret == -ECONNRESET) {
++			conn_freed = true;
+ 			goto out;
+-		else if (ret < 0)
++		} else if (ret < 0) {
+ 			goto transport_err;
++		}
+ 	}
+ 
+ transport_err:
+@@ -4009,8 +4008,13 @@ transport_err:
+ 	 * responsible for cleaning up the early connection failure.
+ 	 */
+ 	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
+-		iscsit_take_action_for_connection_exit(conn);
++		iscsit_take_action_for_connection_exit(conn, &conn_freed);
+ out:
++	if (!conn_freed) {
++		while (!kthread_should_stop()) {
++			msleep(100);
++		}
++	}
+ 	return 0;
+ }
+ 
+@@ -4117,6 +4121,7 @@ int iscsi_target_rx_thread(void *arg)
+ 	u32 checksum = 0, digest = 0;
+ 	struct iscsi_conn *conn = arg;
+ 	struct kvec iov;
++	bool conn_freed = false;
+ 	/*
+ 	 * Allow ourselves to be interrupted by SIGINT so that a
+ 	 * connection recovery / failure event can be triggered externally.
+@@ -4128,7 +4133,7 @@ int iscsi_target_rx_thread(void *arg)
+ 	 */
+ 	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+ 	if (rc < 0 || iscsi_target_check_conn_state(conn))
+-		return 0;
++		goto out;
+ 
+ 	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+ 		struct completion comp;
+@@ -4213,7 +4218,13 @@ int iscsi_target_rx_thread(void *arg)
+ transport_err:
+ 	if (!signal_pending(current))
+ 		atomic_set(&conn->transport_failed, 1);
+-	iscsit_take_action_for_connection_exit(conn);
++	iscsit_take_action_for_connection_exit(conn, &conn_freed);
++out:
++	if (!conn_freed) {
++		while (!kthread_should_stop()) {
++			msleep(100);
++		}
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
+index 10c81298eb0a..84c1c36867ba 100644
+--- a/drivers/target/iscsi/iscsi_target_erl0.c
++++ b/drivers/target/iscsi/iscsi_target_erl0.c
+@@ -928,8 +928,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
+ 	}
+ }
+ 
+-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
++void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
+ {
++	*conn_freed = false;
++
+ 	spin_lock_bh(&conn->state_lock);
+ 	if (atomic_read(&conn->connection_exit)) {
+ 		spin_unlock_bh(&conn->state_lock);
+@@ -940,6 +942,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+ 	if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+ 		spin_unlock_bh(&conn->state_lock);
+ 		iscsit_close_connection(conn);
++		*conn_freed = true;
+ 		return;
+ 	}
+ 
+@@ -953,6 +956,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+ 	spin_unlock_bh(&conn->state_lock);
+ 
+ 	iscsit_handle_connection_cleanup(conn);
++	*conn_freed = true;
+ }
+ 
+ /*
+diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
+index 21acc9a06376..42673fb44790 100644
+--- a/drivers/target/iscsi/iscsi_target_erl0.h
++++ b/drivers/target/iscsi/iscsi_target_erl0.h
+@@ -9,7 +9,7 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *);
+ extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
+ extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
+ extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
+-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
++extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
+ extern int iscsit_recover_from_unknown_opcode(struct iscsi_conn *);
+ 
+ #endif   /*** ISCSI_TARGET_ERL0_H ***/
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index ee3a4bb9fba7..0e75ea8b2a69 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1510,5 +1510,9 @@ int iscsi_target_login_thread(void *arg)
+ 			break;
+ 	}
+ 
++	while (!kthread_should_stop()) {
++		msleep(100);
++	}
++
+ 	return 0;
+ }
+diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
+index 1f7886bb16bf..65e0a06ed916 100644
+--- a/drivers/target/target_core_fabric_configfs.c
++++ b/drivers/target/target_core_fabric_configfs.c
+@@ -98,6 +98,11 @@ static int target_fabric_mappedlun_link(
+ 				"_tpg does not exist\n");
+ 		return -EINVAL;
+ 	}
++	if (lun->lun_shutdown) {
++		pr_err("Unable to create mappedlun symlink because"
++			" lun->lun_shutdown=true\n");
++		return -EINVAL;
++	}
+ 	se_tpg = lun->lun_sep->sep_tpg;
+ 
+ 	nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 8bf7a06c25a9..5de51c51c37d 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -768,59 +768,6 @@ sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
+ 	return 0;
+ }
+ 
+-/**
+- * sbc_parse_verify - parse VERIFY, VERIFY_16 and WRITE VERIFY commands
+- * @cmd:     (in)  structure that describes the SCSI command to be parsed.
+- * @sectors: (out) Number of logical blocks on the storage medium that will be
+- *           affected by the SCSI command.
+- * @bufflen: (out) Expected length of the SCSI Data-Out buffer.
+- */
+-static sense_reason_t sbc_parse_verify(struct se_cmd *cmd, int *sectors,
+-				       u32 *bufflen)
+-{
+-	struct se_device *dev = cmd->se_dev;
+-	u8 *cdb = cmd->t_task_cdb;
+-	u8 bytchk = (cdb[1] >> 1) & 3;
+-	sense_reason_t ret;
+-
+-	switch (cdb[0]) {
+-	case VERIFY:
+-	case WRITE_VERIFY:
+-		*sectors = transport_get_sectors_10(cdb);
+-		cmd->t_task_lba = transport_lba_32(cdb);
+-		break;
+-	case VERIFY_16:
+-		*sectors = transport_get_sectors_16(cdb);
+-		cmd->t_task_lba = transport_lba_64(cdb);
+-		break;
+-	default:
+-		WARN_ON_ONCE(true);
+-		return TCM_UNSUPPORTED_SCSI_OPCODE;
+-	}
+-
+-	if (sbc_check_dpofua(dev, cmd, cdb))
+-		return TCM_INVALID_CDB_FIELD;
+-
+-	ret = sbc_check_prot(dev, cmd, cdb, *sectors, true);
+-	if (ret)
+-		return ret;
+-
+-	switch (bytchk) {
+-	case 0:
+-		*bufflen = 0;
+-		break;
+-	case 1:
+-		*bufflen = sbc_get_size(cmd, *sectors);
+-		cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+-		break;
+-	default:
+-		pr_err("Unsupported BYTCHK value %d for SCSI opcode %#x\n",
+-		       bytchk, cdb[0]);
+-		return TCM_INVALID_CDB_FIELD;
+-	}
+-	return TCM_NO_SENSE;
+-}
+-
+ sense_reason_t
+ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ {
+@@ -891,6 +838,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ 		cmd->execute_cmd = sbc_execute_rw;
+ 		break;
+ 	case WRITE_10:
++	case WRITE_VERIFY:
+ 		sectors = transport_get_sectors_10(cdb);
+ 		cmd->t_task_lba = transport_lba_32(cdb);
+ 
+@@ -905,12 +853,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ 		cmd->execute_rw = ops->execute_rw;
+ 		cmd->execute_cmd = sbc_execute_rw;
+ 		break;
+-	case WRITE_VERIFY:
+-		ret = sbc_parse_verify(cmd, &sectors, &size);
+-		if (ret)
+-			return ret;
+-		cmd->execute_cmd = sbc_execute_rw;
+-		goto check_lba;
+ 	case WRITE_12:
+ 		sectors = transport_get_sectors_12(cdb);
+ 		cmd->t_task_lba = transport_lba_32(cdb);
+@@ -1110,9 +1052,14 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
+ 		break;
+ 	case VERIFY:
+ 	case VERIFY_16:
+-		ret = sbc_parse_verify(cmd, &sectors, &size);
+-		if (ret)
+-			return ret;
++		size = 0;
++		if (cdb[0] == VERIFY) {
++			sectors = transport_get_sectors_10(cdb);
++			cmd->t_task_lba = transport_lba_32(cdb);
++		} else {
++			sectors = transport_get_sectors_16(cdb);
++			cmd->t_task_lba = transport_lba_64(cdb);
++		}
+ 		cmd->execute_cmd = sbc_emulate_noop;
+ 		goto check_lba;
+ 	case REZERO_UNIT:
+diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
+index 47f064415bf6..707717221b49 100644
+--- a/drivers/target/target_core_tpg.c
++++ b/drivers/target/target_core_tpg.c
+@@ -110,9 +110,21 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
+ 	unsigned char *initiatorname)
+ {
+ 	struct se_node_acl *acl;
+-
++	/*
++	 * Obtain se_node_acl->acl_kref using fabric driver provided
++	 * initiatorname[] during node acl endpoint lookup driven by
++	 * new se_session login.
++	 *
++	 * The reference is held until se_session shutdown -> release
++	 * occurs via fabric driver invoked transport_deregister_session()
++	 * or transport_free_session() code.
++	 */
+ 	spin_lock_irq(&tpg->acl_node_lock);
+ 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
++	if (acl) {
++		if (!kref_get_unless_zero(&acl->acl_kref))
++			acl = NULL;
++	}
+ 	spin_unlock_irq(&tpg->acl_node_lock);
+ 
+ 	return acl;
+@@ -254,6 +266,25 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
+ 	return 0;
+ }
+ 
++bool target_tpg_has_node_acl(struct se_portal_group *tpg,
++			     const char *initiatorname)
++{
++	struct se_node_acl *acl;
++	bool found = false;
++
++	spin_lock_irq(&tpg->acl_node_lock);
++	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
++		if (!strcmp(acl->initiatorname, initiatorname)) {
++			found = true;
++			break;
++		}
++	}
++	spin_unlock_irq(&tpg->acl_node_lock);
++
++	return found;
++}
++EXPORT_SYMBOL(target_tpg_has_node_acl);
++
+ /*	core_tpg_check_initiator_node_acl()
+  *
+  *
+@@ -274,10 +305,19 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
+ 	acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
+ 	if (!acl)
+ 		return NULL;
++	/*
++	 * When allocating a dynamically generated node_acl, go ahead
++	 * and take the extra kref now before returning to the fabric
++	 * driver caller.
++	 *
++	 * Note this reference will be released at session shutdown
++	 * time within transport_free_session() code.
++	 */
++	kref_init(&acl->acl_kref);
++	kref_get(&acl->acl_kref);
+ 
+ 	INIT_LIST_HEAD(&acl->acl_list);
+ 	INIT_LIST_HEAD(&acl->acl_sess_list);
+-	kref_init(&acl->acl_kref);
+ 	init_completion(&acl->acl_free_comp);
+ 	spin_lock_init(&acl->device_list_lock);
+ 	spin_lock_init(&acl->nacl_sess_lock);
+@@ -460,7 +500,7 @@ int core_tpg_del_initiator_node_acl(
+ 	if (acl->dynamic_node_acl) {
+ 		acl->dynamic_node_acl = 0;
+ 	}
+-	list_del(&acl->acl_list);
++	list_del_init(&acl->acl_list);
+ 	tpg->num_node_acls--;
+ 	spin_unlock_irq(&tpg->acl_node_lock);
+ 
+@@ -761,7 +801,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
+ 	spin_lock_irq(&se_tpg->acl_node_lock);
+ 	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
+ 			acl_list) {
+-		list_del(&nacl->acl_list);
++		list_del_init(&nacl->acl_list);
+ 		se_tpg->num_node_acls--;
+ 		spin_unlock_irq(&se_tpg->acl_node_lock);
+ 
+@@ -843,6 +883,8 @@ void core_tpg_remove_lun(
+ 	struct se_portal_group *tpg,
+ 	struct se_lun *lun)
+ {
++	lun->lun_shutdown = true;
++
+ 	core_clear_lun_from_tpg(lun, tpg);
+ 	transport_clear_lun_ref(lun);
+ 
+@@ -850,6 +892,7 @@ void core_tpg_remove_lun(
+ 
+ 	spin_lock(&tpg->tpg_lun_lock);
+ 	lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
++	lun->lun_shutdown = false;
+ 	spin_unlock(&tpg->tpg_lun_lock);
+ 
+ 	percpu_ref_exit(&lun->lun_ref);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index b7d27b816359..958b0dc8ada7 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -359,7 +359,6 @@ void __transport_register_session(
+ 					&buf[0], PR_REG_ISID_LEN);
+ 			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
+ 		}
+-		kref_get(&se_nacl->acl_kref);
+ 
+ 		spin_lock_irq(&se_nacl->nacl_sess_lock);
+ 		/*
+@@ -448,14 +447,30 @@ static void target_complete_nacl(struct kref *kref)
+ {
+ 	struct se_node_acl *nacl = container_of(kref,
+ 				struct se_node_acl, acl_kref);
++	struct se_portal_group *se_tpg = nacl->se_tpg;
++	const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
++	unsigned long flags;
++
++	if (!nacl->dynamic_stop) {
++		complete(&nacl->acl_free_comp);
++		return;
++	}
+ 
+-	complete(&nacl->acl_free_comp);
++	spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
++	list_del_init(&nacl->acl_list);
++	se_tpg->num_node_acls--;
++	spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
++
++	core_tpg_wait_for_nacl_pr_ref(nacl);
++	core_free_device_list_for_node(nacl, se_tpg);
++	se_tfo->tpg_release_fabric_acl(se_tpg, nacl);
+ }
+ 
+ void target_put_nacl(struct se_node_acl *nacl)
+ {
+ 	kref_put(&nacl->acl_kref, target_complete_nacl);
+ }
++EXPORT_SYMBOL(target_put_nacl);
+ 
+ void transport_deregister_session_configfs(struct se_session *se_sess)
+ {
+@@ -488,6 +503,42 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
+ 
+ void transport_free_session(struct se_session *se_sess)
+ {
++	struct se_node_acl *se_nacl = se_sess->se_node_acl;
++
++	/*
++	 * Drop the se_node_acl->nacl_kref obtained from within
++	 * core_tpg_get_initiator_node_acl().
++	 */
++	if (se_nacl) {
++		struct se_portal_group *se_tpg = se_nacl->se_tpg;
++		const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
++		unsigned long flags;
++
++		se_sess->se_node_acl = NULL;
++
++		/*
++		 * Also determine if we need to drop the extra ->cmd_kref if
++		 * it had been previously dynamically generated, and
++		 * the endpoint is not caching dynamic ACLs.
++		 */
++		spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
++		if (se_nacl->dynamic_node_acl &&
++		    !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
++			spin_lock(&se_nacl->nacl_sess_lock);
++			if (list_empty(&se_nacl->acl_sess_list))
++				se_nacl->dynamic_stop = true;
++			spin_unlock(&se_nacl->nacl_sess_lock);
++
++			if (se_nacl->dynamic_stop)
++				list_del_init(&se_nacl->acl_list);
++		}
++		spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
++
++		if (se_nacl->dynamic_stop)
++			target_put_nacl(se_nacl);
++
++		target_put_nacl(se_nacl);
++	}
+ 	if (se_sess->sess_cmd_map) {
+ 		percpu_ida_destroy(&se_sess->sess_tag_pool);
+ 		if (is_vmalloc_addr(se_sess->sess_cmd_map))
+@@ -502,16 +553,12 @@ EXPORT_SYMBOL(transport_free_session);
+ void transport_deregister_session(struct se_session *se_sess)
+ {
+ 	struct se_portal_group *se_tpg = se_sess->se_tpg;
+-	const struct target_core_fabric_ops *se_tfo;
+-	struct se_node_acl *se_nacl;
+ 	unsigned long flags;
+-	bool comp_nacl = true;
+ 
+ 	if (!se_tpg) {
+ 		transport_free_session(se_sess);
+ 		return;
+ 	}
+-	se_tfo = se_tpg->se_tpg_tfo;
+ 
+ 	spin_lock_irqsave(&se_tpg->session_lock, flags);
+ 	list_del(&se_sess->sess_list);
+@@ -519,37 +566,16 @@ void transport_deregister_session(struct se_session *se_sess)
+ 	se_sess->fabric_sess_ptr = NULL;
+ 	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
+ 
+-	/*
+-	 * Determine if we need to do extra work for this initiator node's
+-	 * struct se_node_acl if it had been previously dynamically generated.
+-	 */
+-	se_nacl = se_sess->se_node_acl;
+-
+-	spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
+-	if (se_nacl && se_nacl->dynamic_node_acl) {
+-		if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
+-			list_del(&se_nacl->acl_list);
+-			se_tpg->num_node_acls--;
+-			spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
+-			core_tpg_wait_for_nacl_pr_ref(se_nacl);
+-			core_free_device_list_for_node(se_nacl, se_tpg);
+-			se_tfo->tpg_release_fabric_acl(se_tpg, se_nacl);
+-
+-			comp_nacl = false;
+-			spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
+-		}
+-	}
+-	spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
+-
+ 	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
+ 		se_tpg->se_tpg_tfo->get_fabric_name());
+ 	/*
+ 	 * If last kref is dropping now for an explicit NodeACL, awake sleeping
+ 	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
+-	 * removal context.
++	 * removal context from within transport_free_session() code.
++	 *
++	 * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
++	 * to release all remaining generate_node_acl=1 created ACL resources.
+ 	 */
+-	if (se_nacl && comp_nacl)
+-		target_put_nacl(se_nacl);
+ 
+ 	transport_free_session(se_sess);
+ }
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 6b07b1040dbb..b8e618ab2273 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -708,7 +708,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
+ 	if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+ 		up->efr |= UART_EFR_RTS;
+ 	else
+-		up->efr &= UART_EFR_RTS;
++		up->efr &= ~UART_EFR_RTS;
+ 	serial_out(up, UART_EFR, up->efr);
+ 	serial_out(up, UART_LCR, lcr);
+ 
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index e7d6566fafaf..19316609d4f9 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -171,18 +171,17 @@ static struct plat_sci_reg sci_regmap[SCIx_NR_REGTYPES][SCIx_NR_REGS] = {
+ 	},
+ 
+ 	/*
+-	 * Common definitions for legacy IrDA ports, dependent on
+-	 * regshift value.
++	 * Common definitions for legacy IrDA ports.
+ 	 */
+ 	[SCIx_IRDA_REGTYPE] = {
+ 		[SCSMR]		= { 0x00,  8 },
+-		[SCBRR]		= { 0x01,  8 },
+-		[SCSCR]		= { 0x02,  8 },
+-		[SCxTDR]	= { 0x03,  8 },
+-		[SCxSR]		= { 0x04,  8 },
+-		[SCxRDR]	= { 0x05,  8 },
+-		[SCFCR]		= { 0x06,  8 },
+-		[SCFDR]		= { 0x07, 16 },
++		[SCBRR]		= { 0x02,  8 },
++		[SCSCR]		= { 0x04,  8 },
++		[SCxTDR]	= { 0x06,  8 },
++		[SCxSR]		= { 0x08, 16 },
++		[SCxRDR]	= { 0x0a,  8 },
++		[SCFCR]		= { 0x0c,  8 },
++		[SCFDR]		= { 0x0e, 16 },
+ 		[SCTFDR]	= sci_reg_invalid,
+ 		[SCRFDR]	= sci_reg_invalid,
+ 		[SCSPTR]	= sci_reg_invalid,
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index a235e9ab932c..94a15883f8cc 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1649,6 +1649,18 @@ static int proc_unlinkurb(struct usb_dev_state *ps, void __user *arg)
+ 	return 0;
+ }
+ 
++static void compute_isochronous_actual_length(struct urb *urb)
++{
++	unsigned int i;
++
++	if (urb->number_of_packets > 0) {
++		urb->actual_length = 0;
++		for (i = 0; i < urb->number_of_packets; i++)
++			urb->actual_length +=
++					urb->iso_frame_desc[i].actual_length;
++	}
++}
++
+ static int processcompl(struct async *as, void __user * __user *arg)
+ {
+ 	struct urb *urb = as->urb;
+@@ -1656,6 +1668,7 @@ static int processcompl(struct async *as, void __user * __user *arg)
+ 	void __user *addr = as->userurb;
+ 	unsigned int i;
+ 
++	compute_isochronous_actual_length(urb);
+ 	if (as->userbuffer && urb->actual_length) {
+ 		if (copy_urb_data_to_user(as->userbuffer, urb))
+ 			goto err_out;
+@@ -1825,6 +1838,7 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
+ 	void __user *addr = as->userurb;
+ 	unsigned int i;
+ 
++	compute_isochronous_actual_length(urb);
+ 	if (as->userbuffer && urb->actual_length) {
+ 		if (copy_urb_data_to_user(as->userbuffer, urb))
+ 			return -EFAULT;
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 78f357b1a8fd..de0843cdeb9f 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2927,6 +2927,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
+ 	}
+ 
+ 	usb_put_invalidate_rhdev(hcd);
++	hcd->flags = 0;
+ }
+ EXPORT_SYMBOL_GPL(usb_remove_hcd);
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index a6aaf2f193a4..37c418e581fb 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -221,6 +221,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Corsair Strafe RGB */
+ 	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
++	/* Corsair K70 LUX */
++	{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* MIDI keyboard WORLDE MINI */
+ 	{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index 58b4c2828ee9..9176837442e9 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -183,12 +183,13 @@ found:
+ 			return tmp;
+ 	}
+ 
+-	if (in) {
++	if (in)
+ 		dev->in_pipe = usb_rcvbulkpipe(udev,
+ 			in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
++	if (out)
+ 		dev->out_pipe = usb_sndbulkpipe(udev,
+ 			out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+-	}
++
+ 	if (iso_in) {
+ 		dev->iso_in = &iso_in->desc;
+ 		dev->in_iso_pipe = usb_rcvisocpipe(udev,
+diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
+index 37d0e8cc7af6..2220c1b9df10 100644
+--- a/drivers/usb/serial/garmin_gps.c
++++ b/drivers/usb/serial/garmin_gps.c
+@@ -138,6 +138,7 @@ struct garmin_data {
+ 	__u8   privpkt[4*6];
+ 	spinlock_t lock;
+ 	struct list_head pktlist;
++	struct usb_anchor write_urbs;
+ };
+ 
+ 
+@@ -906,13 +907,19 @@ static int garmin_init_session(struct usb_serial_port *port)
+ 					sizeof(GARMIN_START_SESSION_REQ), 0);
+ 
+ 			if (status < 0)
+-				break;
++				goto err_kill_urbs;
+ 		}
+ 
+ 		if (status > 0)
+ 			status = 0;
+ 	}
+ 
++	return status;
++
++err_kill_urbs:
++	usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
++	usb_kill_urb(port->interrupt_in_urb);
++
+ 	return status;
+ }
+ 
+@@ -931,7 +938,6 @@ static int garmin_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	spin_unlock_irqrestore(&garmin_data_p->lock, flags);
+ 
+ 	/* shutdown any bulk reads that might be going on */
+-	usb_kill_urb(port->write_urb);
+ 	usb_kill_urb(port->read_urb);
+ 
+ 	if (garmin_data_p->state == STATE_RESET)
+@@ -954,7 +960,7 @@ static void garmin_close(struct usb_serial_port *port)
+ 
+ 	/* shutdown our urbs */
+ 	usb_kill_urb(port->read_urb);
+-	usb_kill_urb(port->write_urb);
++	usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
+ 
+ 	/* keep reset state so we know that we must start a new session */
+ 	if (garmin_data_p->state != STATE_RESET)
+@@ -1038,12 +1044,14 @@ static int garmin_write_bulk(struct usb_serial_port *port,
+ 	}
+ 
+ 	/* send it down the pipe */
++	usb_anchor_urb(urb, &garmin_data_p->write_urbs);
+ 	status = usb_submit_urb(urb, GFP_ATOMIC);
+ 	if (status) {
+ 		dev_err(&port->dev,
+ 		   "%s - usb_submit_urb(write bulk) failed with status = %d\n",
+ 				__func__, status);
+ 		count = status;
++		usb_unanchor_urb(urb);
+ 		kfree(buffer);
+ 	}
+ 
+@@ -1402,9 +1410,16 @@ static int garmin_port_probe(struct usb_serial_port *port)
+ 	garmin_data_p->state = 0;
+ 	garmin_data_p->flags = 0;
+ 	garmin_data_p->count = 0;
++	init_usb_anchor(&garmin_data_p->write_urbs);
+ 	usb_set_serial_port_data(port, garmin_data_p);
+ 
+ 	status = garmin_init_session(port);
++	if (status)
++		goto err_free;
++
++	return 0;
++err_free:
++	kfree(garmin_data_p);
+ 
+ 	return status;
+ }
+@@ -1414,6 +1429,7 @@ static int garmin_port_remove(struct usb_serial_port *port)
+ {
+ 	struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
+ 
++	usb_kill_anchored_urbs(&garmin_data_p->write_urbs);
+ 	usb_kill_urb(port->interrupt_in_urb);
+ 	del_timer_sync(&garmin_data_p->timer);
+ 	kfree(garmin_data_p);
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 996dc09b00b8..7b3ce6e42fc6 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -147,6 +147,7 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x114f, 0x68a2)},	/* Sierra Wireless MC7750 */
+ 	{DEVICE_SWI(0x1199, 0x68a2)},	/* Sierra Wireless MC7710 */
+ 	{DEVICE_SWI(0x1199, 0x901c)},	/* Sierra Wireless EM7700 */
++	{DEVICE_SWI(0x1199, 0x901e)},	/* Sierra Wireless EM7355 QDL */
+ 	{DEVICE_SWI(0x1199, 0x901f)},	/* Sierra Wireless EM7355 */
+ 	{DEVICE_SWI(0x1199, 0x9040)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9051)},	/* Netgear AirCard 340U */
+diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
+index dd88ba1d71ce..35373e2065b2 100644
+--- a/drivers/video/backlight/adp5520_bl.c
++++ b/drivers/video/backlight/adp5520_bl.c
+@@ -332,10 +332,18 @@ static int adp5520_bl_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	platform_set_drvdata(pdev, bl);
+-	ret |= adp5520_bl_setup(bl);
++	ret = adp5520_bl_setup(bl);
++	if (ret) {
++		dev_err(&pdev->dev, "failed to setup\n");
++		if (data->pdata->en_ambl_sens)
++			sysfs_remove_group(&bl->dev.kobj,
++					&adp5520_bl_attr_group);
++		return ret;
++	}
++
+ 	backlight_update_status(bl);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static int adp5520_bl_remove(struct platform_device *pdev)
+diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
+index 7de847df224f..4b40c6a4d441 100644
+--- a/drivers/video/backlight/lcd.c
++++ b/drivers/video/backlight/lcd.c
+@@ -226,6 +226,8 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
+ 	dev_set_name(&new_ld->dev, "%s", name);
+ 	dev_set_drvdata(&new_ld->dev, devdata);
+ 
++	new_ld->ops = ops;
++
+ 	rc = device_register(&new_ld->dev);
+ 	if (rc) {
+ 		put_device(&new_ld->dev);
+@@ -238,8 +240,6 @@ struct lcd_device *lcd_device_register(const char *name, struct device *parent,
+ 		return ERR_PTR(rc);
+ 	}
+ 
+-	new_ld->ops = ops;
+-
+ 	return new_ld;
+ }
+ EXPORT_SYMBOL(lcd_device_register);
+diff --git a/drivers/video/fbdev/pmag-ba-fb.c b/drivers/video/fbdev/pmag-ba-fb.c
+index 914a52ba8477..77837665ce89 100644
+--- a/drivers/video/fbdev/pmag-ba-fb.c
++++ b/drivers/video/fbdev/pmag-ba-fb.c
+@@ -129,7 +129,7 @@ static struct fb_ops pmagbafb_ops = {
+ /*
+  * Turn the hardware cursor off.
+  */
+-static void __init pmagbafb_erase_cursor(struct fb_info *info)
++static void pmagbafb_erase_cursor(struct fb_info *info)
+ {
+ 	struct pmagbafb_par *par = info->par;
+ 
+diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
+index 9e6a85104a20..adc4444ff458 100644
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -275,8 +275,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
+ 	err = xenbus_transaction_start(&xbt);
+ 	if (err)
+ 		return;
+-	if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
+-		pr_err("Unable to read sysrq code in control/sysrq\n");
++	err = xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key);
++	if (err < 0) {
++		/*
++		 * The Xenstore watch fires directly after registering it and
++		 * after a suspend/resume cycle. So ENOENT is no error but
++		 * might happen in those cases.
++		 */
++		if (err != -ENOENT)
++			pr_err("Error %d reading sysrq code in control/sysrq\n",
++			       err);
+ 		xenbus_transaction_end(xbt, 1);
+ 		return;
+ 	}
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 3a0e6a031174..004d0e663d9d 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -687,3 +687,22 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
++
++/*
++ * Create userspace mapping for the DMA-coherent memory.
++ * This function should be called with the pages from the current domain only,
++ * passing pages mapped from other domains would lead to memory corruption.
++ */
++int
++xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
++		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
++		     unsigned long attrs)
++{
++#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
++	if (__generic_dma_ops(dev)->mmap)
++		return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
++						    dma_addr, size, attrs);
++#endif
++	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
++}
++EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 297e05c9e2b0..49a0d6b027c1 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -193,7 +193,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ 	int i;
+ 
+-	if (unlikely(direntry->d_name.len >
++	if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
++		     direntry->d_name.len >
+ 		     le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
+ 		return -ENAMETOOLONG;
+ 
+@@ -509,7 +510,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
+ 
+ 	rc = check_name(direntry, tcon);
+ 	if (rc)
+-		goto out_free_xid;
++		goto out;
+ 
+ 	server = tcon->ses->server;
+ 
+diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
+index 9b1ffaa0572e..e362c5dad208 100644
+--- a/fs/coda/upcall.c
++++ b/fs/coda/upcall.c
+@@ -446,8 +446,7 @@ int venus_fsync(struct super_block *sb, struct CodaFid *fid)
+ 	UPARG(CODA_FSYNC);
+ 
+ 	inp->coda_fsync.VFid = *fid;
+-	error = coda_upcall(coda_vcp(sb), sizeof(union inputArgs),
+-			    &outsize, inp);
++	error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
+ 
+ 	CODA_FREE(inp, insize);
+ 	return error;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 1cc4bfa49823..443ff49dc36f 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -652,6 +652,20 @@ has_zeroout:
+ 		ret = check_block_validity(inode, map);
+ 		if (ret != 0)
+ 			return ret;
++
++		/*
++		 * Inodes with freshly allocated blocks where contents will be
++		 * visible after transaction commit must be on transaction's
++		 * ordered data list.
++		 */
++		if (map->m_flags & EXT4_MAP_NEW &&
++		    !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
++		    !IS_NOQUOTA(inode) &&
++		    ext4_should_order_data(inode)) {
++			ret = ext4_jbd2_file_inode(handle, inode);
++			if (ret)
++				return ret;
++		}
+ 	}
+ 	return retval;
+ }
+@@ -1146,15 +1160,6 @@ static int ext4_write_end(struct file *file,
+ 	int i_size_changed = 0;
+ 
+ 	trace_ext4_write_end(inode, pos, len, copied);
+-	if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
+-		ret = ext4_jbd2_file_inode(handle, inode);
+-		if (ret) {
+-			unlock_page(page);
+-			page_cache_release(page);
+-			goto errout;
+-		}
+-	}
+-
+ 	if (ext4_has_inline_data(inode)) {
+ 		ret = ext4_write_inline_data_end(inode, pos, len,
+ 						 copied, page);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 2524057fd880..07ba42156a6a 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2113,8 +2113,10 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ 	 * We search using buddy data only if the order of the request
+ 	 * is greater than equal to the sbi_s_mb_order2_reqs
+ 	 * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
++	 * We also support searching for power-of-two requests only for
++	 * requests upto maximum buddy size we have constructed.
+ 	 */
+-	if (i >= sbi->s_mb_order2_reqs) {
++	if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
+ 		/*
+ 		 * This should tell if fe_len is exactly power of 2
+ 		 */
+@@ -2176,7 +2178,7 @@ repeat:
+ 			}
+ 
+ 			ac->ac_groups_scanned++;
+-			if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
++			if (cr == 0)
+ 				ext4_mb_simple_scan_group(ac, &e4b);
+ 			else if (cr == 1 && sbi->s_stripe &&
+ 					!(ac->ac_g_ex.fe_len % sbi->s_stripe))
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ccc43e2f07e2..807b1df8e134 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2469,9 +2469,9 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
+ 
+ 	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
+ 		ret = sbi->s_stripe;
+-	else if (stripe_width <= sbi->s_blocks_per_group)
++	else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
+ 		ret = stripe_width;
+-	else if (stride <= sbi->s_blocks_per_group)
++	else if (stride && stride <= sbi->s_blocks_per_group)
+ 		ret = stride;
+ 	else
+ 		ret = 0;
+diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
+index 51dde817e1f2..11cf71ac1cb5 100644
+--- a/fs/fscache/object-list.c
++++ b/fs/fscache/object-list.c
+@@ -330,6 +330,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
+ 	rcu_read_lock();
+ 
+ 	confkey = key->payload.data;
++	if (!confkey) {
++		/* key was revoked */
++		rcu_read_unlock();
++		key_put(key);
++		goto no_config;
++	}
++
+ 	buf = confkey->data;
+ 
+ 	for (len = confkey->datalen - 1; len >= 0; len--) {
+diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
+index 2d7f76e52c37..28d273eb5102 100644
+--- a/fs/ocfs2/alloc.c
++++ b/fs/ocfs2/alloc.c
+@@ -7245,13 +7245,24 @@ out:
+ 
+ static int ocfs2_trim_extent(struct super_block *sb,
+ 			     struct ocfs2_group_desc *gd,
+-			     u32 start, u32 count)
++			     u64 group, u32 start, u32 count)
+ {
+ 	u64 discard, bcount;
++	struct ocfs2_super *osb = OCFS2_SB(sb);
+ 
+ 	bcount = ocfs2_clusters_to_blocks(sb, count);
+-	discard = le64_to_cpu(gd->bg_blkno) +
+-			ocfs2_clusters_to_blocks(sb, start);
++	discard = ocfs2_clusters_to_blocks(sb, start);
++
++	/*
++	 * For the first cluster group, the gd->bg_blkno is not at the start
++	 * of the group, but at an offset from the start. If we add it while
++	 * calculating discard for first group, we will wrongly start fstrim a
++	 * few blocks after the desried start block and the range can cross
++	 * over into the next cluster group. So, add it only if this is not
++	 * the first cluster group.
++	 */
++	if (group != osb->first_cluster_group_blkno)
++		discard += le64_to_cpu(gd->bg_blkno);
+ 
+ 	trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
+ 
+@@ -7259,7 +7270,7 @@ static int ocfs2_trim_extent(struct super_block *sb,
+ }
+ 
+ static int ocfs2_trim_group(struct super_block *sb,
+-			    struct ocfs2_group_desc *gd,
++			    struct ocfs2_group_desc *gd, u64 group,
+ 			    u32 start, u32 max, u32 minbits)
+ {
+ 	int ret = 0, count = 0, next;
+@@ -7278,7 +7289,7 @@ static int ocfs2_trim_group(struct super_block *sb,
+ 		next = ocfs2_find_next_bit(bitmap, max, start);
+ 
+ 		if ((next - start) >= minbits) {
+-			ret = ocfs2_trim_extent(sb, gd,
++			ret = ocfs2_trim_extent(sb, gd, group,
+ 						start, next - start);
+ 			if (ret < 0) {
+ 				mlog_errno(ret);
+@@ -7376,7 +7387,8 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
+ 		}
+ 
+ 		gd = (struct ocfs2_group_desc *)gd_bh->b_data;
+-		cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
++		cnt = ocfs2_trim_group(sb, gd, group,
++				       first_bit, last_bit, minlen);
+ 		brelse(gd_bh);
+ 		gd_bh = NULL;
+ 		if (cnt < 0) {
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index bc06b982e9ea..024128c7f3f2 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1158,6 +1158,13 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ 		dquot_initialize(inode);
+ 	size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
+ 	if (size_change) {
++		/*
++		 * Here we should wait dio to finish before inode lock
++		 * to avoid a deadlock between ocfs2_setattr() and
++		 * ocfs2_dio_end_io_write()
++		 */
++		inode_dio_wait(inode);
++
+ 		status = ocfs2_rw_lock(inode, 1);
+ 		if (status < 0) {
+ 			mlog_errno(status);
+@@ -1177,8 +1184,6 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ 		if (status)
+ 			goto bail_unlock;
+ 
+-		inode_dio_wait(inode);
+-
+ 		if (i_size_read(inode) >= attr->ia_size) {
+ 			if (ocfs2_should_order_data(inode)) {
+ 				status = ocfs2_begin_ordered_truncate(inode,
+diff --git a/include/dt-bindings/pinctrl/omap.h b/include/dt-bindings/pinctrl/omap.h
+index 13949259705a..0d4fe32b3ae2 100644
+--- a/include/dt-bindings/pinctrl/omap.h
++++ b/include/dt-bindings/pinctrl/omap.h
+@@ -45,8 +45,8 @@
+ #define PIN_OFF_NONE		0
+ #define PIN_OFF_OUTPUT_HIGH	(OFF_EN | OFFOUT_EN | OFFOUT_VAL)
+ #define PIN_OFF_OUTPUT_LOW	(OFF_EN | OFFOUT_EN)
+-#define PIN_OFF_INPUT_PULLUP	(OFF_EN | OFF_PULL_EN | OFF_PULL_UP)
+-#define PIN_OFF_INPUT_PULLDOWN	(OFF_EN | OFF_PULL_EN)
++#define PIN_OFF_INPUT_PULLUP	(OFF_EN | OFFOUT_EN | OFF_PULL_EN | OFF_PULL_UP)
++#define PIN_OFF_INPUT_PULLDOWN	(OFF_EN | OFFOUT_EN | OFF_PULL_EN)
+ #define PIN_OFF_WAKEUPENABLE	WAKEUP_EN
+ 
+ /*
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index c11f9d1963c3..0fe33734ef41 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3380,6 +3380,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+ 				    unsigned char name_assign_type,
+ 				    void (*setup)(struct net_device *),
+ 				    unsigned int txqs, unsigned int rxqs);
++int dev_get_valid_name(struct net *net, struct net_device *dev,
++		       const char *name);
++
+ #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
+ 	alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
+ 
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index d164045e296c..af193347dc4e 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -135,11 +135,7 @@ static inline const char *phy_modes(phy_interface_t interface)
+ /* Used when trying to connect to a specific phy (mii bus id:phy device id) */
+ #define PHY_ID_FMT "%s:%02x"
+ 
+-/*
+- * Need to be a little smaller than phydev->dev.bus_id to leave room
+- * for the ":%02x"
+- */
+-#define MII_BUS_ID_SIZE	(20 - 3)
++#define MII_BUS_ID_SIZE	61
+ 
+ /* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
+    IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
+@@ -587,7 +583,7 @@ struct phy_driver {
+ /* A Structure for boards to register fixups with the PHY Lib */
+ struct phy_fixup {
+ 	struct list_head list;
+-	char bus_id[20];
++	char bus_id[MII_BUS_ID_SIZE + 3];
+ 	u32 phy_uid;
+ 	u32 phy_uid_mask;
+ 	int (*run)(struct phy_device *phydev);
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index ca2e26a486ee..495ad8fbe240 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -3230,6 +3230,13 @@ static inline void nf_reset_trace(struct sk_buff *skb)
+ #endif
+ }
+ 
++static inline void ipvs_reset(struct sk_buff *skb)
++{
++#if IS_ENABLED(CONFIG_IP_VS)
++	skb->ipvs_property = 0;
++#endif
++}
++
+ /* Note: This doesn't put any conntrack and bridge info in dst. */
+ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
+ 			     bool copy)
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 3d3a365233f0..966d229d4482 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1535,12 +1535,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk)
+ 	tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
+ }
+ 
+-/* Called when old skb is about to be deleted (to be combined with new skb) */
+-static inline void tcp_highest_sack_combine(struct sock *sk,
++/* Called when old skb is about to be deleted and replaced by new skb */
++static inline void tcp_highest_sack_replace(struct sock *sk,
+ 					    struct sk_buff *old,
+ 					    struct sk_buff *new)
+ {
+-	if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
++	if (old == tcp_highest_sack(sk))
+ 		tcp_sk(sk)->highest_sack = new;
+ }
+ 
+diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
+index feb58d455560..4b9ee3009aa0 100644
+--- a/include/sound/seq_kernel.h
++++ b/include/sound/seq_kernel.h
+@@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
+ #define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS	200
+ 
+ /* max delivery path length */
+-#define SNDRV_SEQ_MAX_HOPS		10
++/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
++#define SNDRV_SEQ_MAX_HOPS		8
+ 
+ /* max size of event size */
+ #define SNDRV_SEQ_MAX_EVENT_LEN		0x3fffffff
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 2b40a1fab293..ad3c146bda32 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -228,6 +228,7 @@ enum tcm_tmreq_table {
+ 	TMR_LUN_RESET		= 5,
+ 	TMR_TARGET_WARM_RESET	= 6,
+ 	TMR_TARGET_COLD_RESET	= 7,
++	TMR_UNKNOWN		= 0xff,
+ };
+ 
+ /* fabric independent task management response values */
+@@ -590,6 +591,7 @@ struct se_node_acl {
+ 	/* Used to signal demo mode created ACL, disabled by default */
+ 	bool			dynamic_node_acl;
+ 	bool			acl_stop:1;
++	bool			dynamic_stop;
+ 	u32			queue_depth;
+ 	u32			acl_index;
+ 	enum target_prot_type	saved_prot_type;
+@@ -723,6 +725,7 @@ struct se_lun {
+ 	u32			lun_access;
+ 	u32			lun_flags;
+ 	u32			unpacked_lun;
++	bool			lun_shutdown;
+ 	atomic_t		lun_acl_count;
+ 	spinlock_t		lun_acl_lock;
+ 	spinlock_t		lun_sep_lock;
+diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
+index 24c8d9d0d946..419a8c8d2f3e 100644
+--- a/include/target/target_core_fabric.h
++++ b/include/target/target_core_fabric.h
+@@ -171,6 +171,8 @@ int	transport_lookup_tmr_lun(struct se_cmd *, u32);
+ 
+ struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+ 		unsigned char *);
++bool	target_tpg_has_node_acl(struct se_portal_group *tpg,
++		const char *);
+ struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
+ 		unsigned char *);
+ void	core_tpg_clear_object_luns(struct se_portal_group *);
+diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
+index 8b2eb93ae8ba..fab4fb9c6442 100644
+--- a/include/xen/swiotlb-xen.h
++++ b/include/xen/swiotlb-xen.h
+@@ -58,4 +58,9 @@ xen_swiotlb_dma_supported(struct device *hwdev, u64 mask);
+ 
+ extern int
+ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask);
++
++extern int
++xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
++		     void *cpu_addr, dma_addr_t dma_addr, size_t size,
++		     unsigned long attrs);
+ #endif /* __LINUX_SWIOTLB_XEN_H */
+diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
+index 1a000bb050f9..3bb040e347ec 100644
+--- a/lib/asn1_decoder.c
++++ b/lib/asn1_decoder.c
+@@ -219,7 +219,7 @@ next_op:
+ 		hdr = 2;
+ 
+ 		/* Extract a tag from the data */
+-		if (unlikely(dp >= datalen - 1))
++		if (unlikely(datalen - dp < 2))
+ 			goto data_overrun_error;
+ 		tag = data[dp++];
+ 		if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
+@@ -265,7 +265,7 @@ next_op:
+ 				int n = len - 0x80;
+ 				if (unlikely(n > 2))
+ 					goto length_too_long;
+-				if (unlikely(dp >= datalen - n))
++				if (unlikely(n > datalen - dp))
+ 					goto data_overrun_error;
+ 				hdr += n;
+ 				for (len = 0; n > 0; n--) {
+@@ -275,6 +275,9 @@ next_op:
+ 				if (unlikely(len > datalen - dp))
+ 					goto data_overrun_error;
+ 			}
++		} else {
++			if (unlikely(len > datalen - dp))
++				goto data_overrun_error;
+ 		}
+ 
+ 		if (flags & FLAG_CONS) {
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 86374c1c49a4..841191061816 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -65,14 +65,19 @@ static ssize_t trigger_request_store(struct device *dev,
+ 	release_firmware(test_firmware);
+ 	test_firmware = NULL;
+ 	rc = request_firmware(&test_firmware, name, dev);
+-	if (rc)
++	if (rc) {
+ 		pr_info("load of '%s' failed: %d\n", name, rc);
+-	pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0);
++		goto out;
++	}
++	pr_info("loaded: %zu\n", test_firmware->size);
++	rc = count;
++
++out:
+ 	mutex_unlock(&test_fw_mutex);
+ 
+ 	kfree(name);
+ 
+-	return count;
++	return rc;
+ }
+ static DEVICE_ATTR_WO(trigger_request);
+ 
+diff --git a/mm/page_ext.c b/mm/page_ext.c
+index d86fd2f5353f..0380a9d2507d 100644
+--- a/mm/page_ext.c
++++ b/mm/page_ext.c
+@@ -102,7 +102,6 @@ struct page_ext *lookup_page_ext(struct page *page)
+ 	struct page_ext *base;
+ 
+ 	base = NODE_DATA(page_to_nid(page))->node_page_ext;
+-#ifdef CONFIG_DEBUG_VM
+ 	/*
+ 	 * The sanity checks the page allocator does upon freeing a
+ 	 * page can reach here before the page_ext arrays are
+@@ -111,7 +110,6 @@ struct page_ext *lookup_page_ext(struct page *page)
+ 	 */
+ 	if (unlikely(!base))
+ 		return NULL;
+-#endif
+ 	offset = pfn - round_down(node_start_pfn(page_to_nid(page)),
+ 					MAX_ORDER_NR_PAGES);
+ 	return base + offset;
+@@ -176,7 +174,6 @@ struct page_ext *lookup_page_ext(struct page *page)
+ {
+ 	unsigned long pfn = page_to_pfn(page);
+ 	struct mem_section *section = __pfn_to_section(pfn);
+-#ifdef CONFIG_DEBUG_VM
+ 	/*
+ 	 * The sanity checks the page allocator does upon freeing a
+ 	 * page can reach here before the page_ext arrays are
+@@ -185,7 +182,6 @@ struct page_ext *lookup_page_ext(struct page *page)
+ 	 */
+ 	if (!section->page_ext)
+ 		return NULL;
+-#endif
+ 	return section->page_ext + pfn;
+ }
+ 
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c
+index 29f2f8b853ae..c2cbd2620169 100644
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -142,8 +142,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
+ 	do {
+ 		next = hugetlb_entry_end(h, addr, end);
+ 		pte = huge_pte_offset(walk->mm, addr & hmask);
+-		if (pte && walk->hugetlb_entry)
++
++		if (pte)
+ 			err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
++		else if (walk->pte_hole)
++			err = walk->pte_hole(addr, next, walk);
++
+ 		if (err)
+ 			break;
+ 	} while (addr = next, addr != end);
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index dfecba30d83a..ce53c8691604 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -376,6 +376,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
+ 			dev->name);
+ 		vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
+ 	}
++	if (event == NETDEV_DOWN &&
++	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
++		vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
+ 
+ 	vlan_info = rtnl_dereference(dev->vlan_info);
+ 	if (!vlan_info)
+@@ -423,9 +426,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
+ 		struct net_device *tmp;
+ 		LIST_HEAD(close_list);
+ 
+-		if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+-			vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
+-
+ 		/* Put all VLANs for this dev in the down state too.  */
+ 		vlan_group_for_each_dev(grp, i, vlandev) {
+ 			flgs = vlandev->flags;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 5d94dadb8df9..33674208d325 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1079,9 +1079,8 @@ static int dev_alloc_name_ns(struct net *net,
+ 	return ret;
+ }
+ 
+-static int dev_get_valid_name(struct net *net,
+-			      struct net_device *dev,
+-			      const char *name)
++int dev_get_valid_name(struct net *net, struct net_device *dev,
++		       const char *name)
+ {
+ 	BUG_ON(!net);
+ 
+@@ -1097,6 +1096,7 @@ static int dev_get_valid_name(struct net *net,
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL(dev_get_valid_name);
+ 
+ /**
+  *	dev_change_name - change name of a device
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index c9793c6c5005..2894bb5b7e0a 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -4159,6 +4159,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
+ 	if (!xnet)
+ 		return;
+ 
++	ipvs_reset(skb);
+ 	skb_orphan(skb);
+ 	skb->mark = 0;
+ }
+diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
+index ff7736f7ff42..fc0c09e770e6 100644
+--- a/net/dsa/Kconfig
++++ b/net/dsa/Kconfig
+@@ -1,12 +1,13 @@
+ config HAVE_NET_DSA
+ 	def_bool y
+-	depends on NETDEVICES && !S390
++	depends on INET && NETDEVICES && !S390
+ 
+ # Drivers must select NET_DSA and the appropriate tagging format
+ 
+ config NET_DSA
+ 	tristate "Distributed Switch Architecture"
+-	depends on HAVE_NET_DSA && NET_SWITCHDEV
++	depends on HAVE_NET_DSA
++	select NET_SWITCHDEV
+ 	select PHYLIB
+ 	---help---
+ 	  Say Y if you want to enable support for the hardware switches supported
+diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
+index ac9a32ec3ee4..0157f09c0de9 100644
+--- a/net/ipv4/ah4.c
++++ b/net/ipv4/ah4.c
+@@ -270,6 +270,9 @@ static void ah_input_done(struct crypto_async_request *base, int err)
+ 	int ihl = ip_hdrlen(skb);
+ 	int ah_hlen = (ah->hdrlen + 2) << 2;
+ 
++	if (err)
++		goto out;
++
+ 	work_iph = AH_SKB_CB(skb)->tmp;
+ 	auth_data = ah_tmp_auth(work_iph, ihl);
+ 	icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
+diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
+index ff96396ebec5..80d70716dbc1 100644
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -129,42 +129,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
+ static int ipip_err(struct sk_buff *skb, u32 info)
+ {
+ 
+-/* All the routers (except for Linux) return only
+-   8 bytes of packet payload. It means, that precise relaying of
+-   ICMP in the real Internet is absolutely infeasible.
+- */
++	/* All the routers (except for Linux) return only
++	   8 bytes of packet payload. It means, that precise relaying of
++	   ICMP in the real Internet is absolutely infeasible.
++	 */
+ 	struct net *net = dev_net(skb->dev);
+ 	struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
+ 	const struct iphdr *iph = (const struct iphdr *)skb->data;
+-	struct ip_tunnel *t;
+-	int err;
+ 	const int type = icmp_hdr(skb)->type;
+ 	const int code = icmp_hdr(skb)->code;
++	struct ip_tunnel *t;
++	int err = 0;
++
++	switch (type) {
++	case ICMP_DEST_UNREACH:
++		switch (code) {
++		case ICMP_SR_FAILED:
++			/* Impossible event. */
++			goto out;
++		default:
++			/* All others are translated to HOST_UNREACH.
++			 * rfc2003 contains "deep thoughts" about NET_UNREACH,
++			 * I believe they are just ether pollution. --ANK
++			 */
++			break;
++		}
++		break;
++
++	case ICMP_TIME_EXCEEDED:
++		if (code != ICMP_EXC_TTL)
++			goto out;
++		break;
++
++	case ICMP_REDIRECT:
++		break;
++
++	default:
++		goto out;
++	}
+ 
+-	err = -ENOENT;
+ 	t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ 			     iph->daddr, iph->saddr, 0);
+-	if (!t)
++	if (!t) {
++		err = -ENOENT;
+ 		goto out;
++	}
+ 
+ 	if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+-		ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+-				 t->parms.link, 0, IPPROTO_IPIP, 0);
+-		err = 0;
++		ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
++				 iph->protocol, 0);
+ 		goto out;
+ 	}
+ 
+ 	if (type == ICMP_REDIRECT) {
+-		ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
+-			      IPPROTO_IPIP, 0);
+-		err = 0;
++		ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
+ 		goto out;
+ 	}
+ 
+-	if (t->parms.iph.daddr == 0)
++	if (t->parms.iph.daddr == 0) {
++		err = -ENOENT;
+ 		goto out;
++	}
+ 
+-	err = 0;
+ 	if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
+ 		goto out;
+ 
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index f23590a85ce4..4e48774ea851 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1962,6 +1962,7 @@ static int tcp_mtu_probe(struct sock *sk)
+ 	nskb->ip_summed = skb->ip_summed;
+ 
+ 	tcp_insert_write_queue_before(nskb, skb, sk);
++	tcp_highest_sack_replace(sk, skb, nskb);
+ 
+ 	len = 0;
+ 	tcp_for_write_queue_from_safe(skb, next, sk) {
+@@ -2465,7 +2466,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
+ 
+ 	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
+ 
+-	tcp_highest_sack_combine(sk, next_skb, skb);
++	tcp_highest_sack_replace(sk, next_skb, skb);
+ 
+ 	tcp_unlink_write_queue(next_skb, sk);
+ 
+@@ -3004,13 +3005,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+ 	tcp_ecn_make_synack(req, th, sk);
+ 	th->source = htons(ireq->ir_num);
+ 	th->dest = ireq->ir_rmt_port;
+-	/* Setting of flags are superfluous here for callers (and ECE is
+-	 * not even correctly set)
+-	 */
+-	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
+-			     TCPHDR_SYN | TCPHDR_ACK);
+-
+-	th->seq = htonl(TCP_SKB_CB(skb)->seq);
++	skb->ip_summed = CHECKSUM_PARTIAL;
++	th->seq = htonl(tcp_rsk(req)->snt_isn);
+ 	/* XXX data is queued and acked as is. No buffer/window check */
+ 	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
+ 
+diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
+index db939e4ac68a..bcd3cbe0a02f 100644
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+ 	}
+ 	opt_space->dst1opt = fopt->dst1opt;
+ 	opt_space->opt_flen = fopt->opt_flen;
++	opt_space->tot_len = fopt->tot_len;
+ 	return opt_space;
+ }
+ EXPORT_SYMBOL_GPL(fl6_merge_options);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 29a1ffa72cd0..7a6317671d32 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1180,11 +1180,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
+ 		if (WARN_ON(v6_cork->opt))
+ 			return -EINVAL;
+ 
+-		v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
++		v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
+ 		if (unlikely(!v6_cork->opt))
+ 			return -ENOBUFS;
+ 
+-		v6_cork->opt->tot_len = opt->tot_len;
++		v6_cork->opt->tot_len = sizeof(*opt);
+ 		v6_cork->opt->opt_flen = opt->opt_flen;
+ 		v6_cork->opt->opt_nflen = opt->opt_nflen;
+ 
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index afbb50e378ec..2d4d2230f976 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -1015,6 +1015,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
+ 		 session->name, cmd, arg);
+ 
+ 	sk = ps->sock;
++	if (!sk)
++		return -EBADR;
++
+ 	sock_hold(sk);
+ 
+ 	switch (cmd) {
+diff --git a/net/mac80211/key.c b/net/mac80211/key.c
+index 81e9785f38bc..2fb91a7a11f0 100644
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -4,6 +4,7 @@
+  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
+  * Copyright 2007-2008	Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
++ * Copyright 2017	Intel Deutschland GmbH
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -18,6 +19,7 @@
+ #include <linux/slab.h>
+ #include <linux/export.h>
+ #include <net/mac80211.h>
++#include <crypto/algapi.h>
+ #include <asm/unaligned.h>
+ #include "ieee80211_i.h"
+ #include "driver-ops.h"
+@@ -603,6 +605,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
+ 	ieee80211_key_free_common(key);
+ }
+ 
++static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
++				    struct ieee80211_key *old,
++				    struct ieee80211_key *new)
++{
++	u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
++	u8 *tk_old, *tk_new;
++
++	if (!old || new->conf.keylen != old->conf.keylen)
++		return false;
++
++	tk_old = old->conf.key;
++	tk_new = new->conf.key;
++
++	/*
++	 * In station mode, don't compare the TX MIC key, as it's never used
++	 * and offloaded rekeying may not care to send it to the host. This
++	 * is the case in iwlwifi, for example.
++	 */
++	if (sdata->vif.type == NL80211_IFTYPE_STATION &&
++	    new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
++	    new->conf.keylen == WLAN_KEY_LEN_TKIP &&
++	    !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
++		memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
++		memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
++		memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
++		memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
++		tk_old = tkip_old;
++		tk_new = tkip_new;
++	}
++
++	return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
++}
++
+ int ieee80211_key_link(struct ieee80211_key *key,
+ 		       struct ieee80211_sub_if_data *sdata,
+ 		       struct sta_info *sta)
+@@ -614,9 +649,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
+ 
+ 	pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ 	idx = key->conf.keyidx;
+-	key->local = sdata->local;
+-	key->sdata = sdata;
+-	key->sta = sta;
+ 
+ 	mutex_lock(&sdata->local->key_mtx);
+ 
+@@ -627,6 +659,20 @@ int ieee80211_key_link(struct ieee80211_key *key,
+ 	else
+ 		old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+ 
++	/*
++	 * Silently accept key re-installation without really installing the
++	 * new version of the key to avoid nonce reuse or replay issues.
++	 */
++	if (ieee80211_key_identical(sdata, old_key, key)) {
++		ieee80211_key_free_unused(key);
++		ret = 0;
++		goto out;
++	}
++
++	key->local = sdata->local;
++	key->sdata = sdata;
++	key->sta = sta;
++
+ 	increment_tailroom_need_count(sdata);
+ 
+ 	ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
+@@ -642,6 +688,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
+ 		ret = 0;
+ 	}
+ 
++ out:
+ 	mutex_unlock(&sdata->local->key_mtx);
+ 
+ 	return ret;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index dbc32b19c574..1c0d4aee783d 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2684,7 +2684,7 @@ static int netlink_dump(struct sock *sk)
+ 	struct sk_buff *skb = NULL;
+ 	struct nlmsghdr *nlh;
+ 	struct module *module;
+-	int len, err = -ENOBUFS;
++	int err = -ENOBUFS;
+ 	int alloc_min_size;
+ 	int alloc_size;
+ 
+@@ -2734,9 +2734,11 @@ static int netlink_dump(struct sock *sk)
+ 	skb_reserve(skb, skb_tailroom(skb) - alloc_size);
+ 	netlink_skb_set_owner_r(skb, sk);
+ 
+-	len = cb->dump(skb, cb);
++	if (nlk->dump_done_errno > 0)
++		nlk->dump_done_errno = cb->dump(skb, cb);
+ 
+-	if (len > 0) {
++	if (nlk->dump_done_errno > 0 ||
++	    skb_tailroom(skb) < nlmsg_total_size(sizeof(nlk->dump_done_errno))) {
+ 		mutex_unlock(nlk->cb_mutex);
+ 
+ 		if (sk_filter(sk, skb))
+@@ -2746,13 +2748,15 @@ static int netlink_dump(struct sock *sk)
+ 		return 0;
+ 	}
+ 
+-	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI);
+-	if (!nlh)
++	nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
++			       sizeof(nlk->dump_done_errno), NLM_F_MULTI);
++	if (WARN_ON(!nlh))
+ 		goto errout_skb;
+ 
+ 	nl_dump_check_consistent(cb, nlh);
+ 
+-	memcpy(nlmsg_data(nlh), &len, sizeof(len));
++	memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
++	       sizeof(nlk->dump_done_errno));
+ 
+ 	if (sk_filter(sk, skb))
+ 		kfree_skb(skb);
+@@ -2826,6 +2830,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+ 	cb->skb = skb;
+ 
+ 	nlk->cb_running = true;
++	nlk->dump_done_errno = INT_MAX;
+ 
+ 	mutex_unlock(nlk->cb_mutex);
+ 
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index 14437d9b1965..34dc71b0fe56 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -37,6 +37,7 @@ struct netlink_sock {
+ 	wait_queue_head_t	wait;
+ 	bool			bound;
+ 	bool			cb_running;
++	int			dump_done_errno;
+ 	struct netlink_callback	cb;
+ 	struct mutex		*cb_mutex;
+ 	struct mutex		cb_def_mutex;
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index 2d7859c03fd2..71c2ef84c5b0 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -420,7 +420,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
+ {
+ 	struct dst_entry *dst;
+ 
+-	if (!t)
++	if (sock_owned_by_user(sk) || !t)
+ 		return;
+ 	dst = sctp_transport_dst_check(t);
+ 	if (dst)
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 2bb7240c6f8b..00db4424faf1 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -787,6 +787,8 @@ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
+ 		if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
+ 			struct sctp_ulpevent *ev = sctp_skb2event(skb);
+ 			addr->v6.sin6_scope_id = ev->iif;
++		} else {
++			addr->v6.sin6_scope_id = 0;
+ 		}
+ 	}
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 34d3d4056a11..cb7193ed4284 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -168,6 +168,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
+ 	sk_mem_charge(sk, chunk->skb->truesize);
+ }
+ 
++static void sctp_clear_owner_w(struct sctp_chunk *chunk)
++{
++	skb_orphan(chunk->skb);
++}
++
++static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
++				       void (*cb)(struct sctp_chunk *))
++
++{
++	struct sctp_outq *q = &asoc->outqueue;
++	struct sctp_transport *t;
++	struct sctp_chunk *chunk;
++
++	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
++		list_for_each_entry(chunk, &t->transmitted, transmitted_list)
++			cb(chunk);
++
++	list_for_each_entry(chunk, &q->retransmit, list)
++		cb(chunk);
++
++	list_for_each_entry(chunk, &q->sacked, list)
++		cb(chunk);
++
++	list_for_each_entry(chunk, &q->abandoned, list)
++		cb(chunk);
++
++	list_for_each_entry(chunk, &q->out_chunk_list, list)
++		cb(chunk);
++}
++
+ /* Verify that this is a valid address. */
+ static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
+ 				   int len)
+@@ -4433,6 +4463,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
+ 	struct socket *sock;
+ 	int err = 0;
+ 
++	/* Do not peel off from one netns to another one. */
++	if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
++		return -EINVAL;
++
+ 	if (!asoc)
+ 		return -EINVAL;
+ 
+@@ -7358,7 +7392,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ 	 * paths won't try to lock it and then oldsk.
+ 	 */
+ 	lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
++	sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
+ 	sctp_assoc_migrate(assoc, newsk);
++	sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
+ 
+ 	/* If the association on the newsk is already closed before accept()
+ 	 * is called, set RCV_SHUTDOWN flag.
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index 4d9679701a6d..384c84e83462 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
+ 	err = -ENOENT;
+ 	if (sk == NULL)
+ 		goto out_nosk;
++	if (!net_eq(sock_net(sk), net))
++		goto out;
+ 
+ 	err = sock_diag_check_cookie(sk, req->udiag_cookie);
+ 	if (err)
+diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
+index 880a7d1d27d2..4ccff66523c9 100644
+--- a/samples/trace_events/trace-events-sample.c
++++ b/samples/trace_events/trace-events-sample.c
+@@ -78,28 +78,36 @@ static int simple_thread_fn(void *arg)
+ }
+ 
+ static DEFINE_MUTEX(thread_mutex);
++static int simple_thread_cnt;
+ 
+ void foo_bar_reg(void)
+ {
++	mutex_lock(&thread_mutex);
++	if (simple_thread_cnt++)
++		goto out;
++
+ 	pr_info("Starting thread for foo_bar_fn\n");
+ 	/*
+ 	 * We shouldn't be able to start a trace when the module is
+ 	 * unloading (there's other locks to prevent that). But
+ 	 * for consistency sake, we still take the thread_mutex.
+ 	 */
+-	mutex_lock(&thread_mutex);
+ 	simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
++ out:
+ 	mutex_unlock(&thread_mutex);
+ }
+ 
+ void foo_bar_unreg(void)
+ {
+-	pr_info("Killing thread for foo_bar_fn\n");
+-	/* protect against module unloading */
+ 	mutex_lock(&thread_mutex);
++	if (--simple_thread_cnt)
++		goto out;
++
++	pr_info("Killing thread for foo_bar_fn\n");
+ 	if (simple_tsk_fn)
+ 		kthread_stop(simple_tsk_fn);
+ 	simple_tsk_fn = NULL;
++ out:
+ 	mutex_unlock(&thread_mutex);
+ }
+ 
+diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
+index 5970161625a5..9ee9139b0b07 100644
+--- a/security/integrity/ima/ima_appraise.c
++++ b/security/integrity/ima/ima_appraise.c
+@@ -297,6 +297,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+ 	if (iint->flags & IMA_DIGSIG)
+ 		return;
+ 
++	if (iint->ima_file_status != INTEGRITY_PASS)
++		return;
++
+ 	rc = ima_collect_measurement(iint, file, NULL, NULL);
+ 	if (rc < 0)
+ 		return;
+diff --git a/security/keys/Kconfig b/security/keys/Kconfig
+index 72483b8f1be5..1edb37eea81d 100644
+--- a/security/keys/Kconfig
++++ b/security/keys/Kconfig
+@@ -20,6 +20,10 @@ config KEYS
+ 
+ 	  If you are unsure as to whether this is required, answer N.
+ 
++config KEYS_COMPAT
++	def_bool y
++	depends on COMPAT && KEYS
++
+ config PERSISTENT_KEYRINGS
+ 	bool "Enable register of persistent per-UID keyrings"
+ 	depends on KEYS
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index 8e1c0099bb66..89d5695c51cd 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -315,6 +315,13 @@ static struct key *request_user_key(const char *master_desc, u8 **master_key,
+ 
+ 	down_read(&ukey->sem);
+ 	upayload = ukey->payload.data;
++	if (!upayload) {
++		/* key was revoked before we acquired its semaphore */
++		up_read(&ukey->sem);
++		key_put(ukey);
++		ukey = ERR_PTR(-EKEYREVOKED);
++		goto error;
++	}
+ 	*master_key = upayload->data;
+ 	*master_keylen = upayload->datalen;
+ error:
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index 6d913f40b6f0..ac424781d54d 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -452,34 +452,33 @@ static long keyring_read(const struct key *keyring,
+ 			 char __user *buffer, size_t buflen)
+ {
+ 	struct keyring_read_iterator_context ctx;
+-	unsigned long nr_keys;
+-	int ret;
++	long ret;
+ 
+ 	kenter("{%d},,%zu", key_serial(keyring), buflen);
+ 
+ 	if (buflen & (sizeof(key_serial_t) - 1))
+ 		return -EINVAL;
+ 
+-	nr_keys = keyring->keys.nr_leaves_on_tree;
+-	if (nr_keys == 0)
+-		return 0;
+-
+-	/* Calculate how much data we could return */
+-	if (!buffer || !buflen)
+-		return nr_keys * sizeof(key_serial_t);
+-
+-	/* Copy the IDs of the subscribed keys into the buffer */
+-	ctx.buffer = (key_serial_t __user *)buffer;
+-	ctx.buflen = buflen;
+-	ctx.count = 0;
+-	ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+-	if (ret < 0) {
+-		kleave(" = %d [iterate]", ret);
+-		return ret;
++	/* Copy as many key IDs as fit into the buffer */
++	if (buffer && buflen) {
++		ctx.buffer = (key_serial_t __user *)buffer;
++		ctx.buflen = buflen;
++		ctx.count = 0;
++		ret = assoc_array_iterate(&keyring->keys,
++					  keyring_read_iterator, &ctx);
++		if (ret < 0) {
++			kleave(" = %ld [iterate]", ret);
++			return ret;
++		}
+ 	}
+ 
+-	kleave(" = %zu [ok]", ctx.count);
+-	return ctx.count;
++	/* Return the size of the buffer needed */
++	ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
++	if (ret <= buflen)
++		kleave("= %ld [ok]", ret);
++	else
++		kleave("= %ld [buffer too small]", ret);
++	return ret;
+ }
+ 
+ /*
+diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
+index 96e8395ae586..74c68a0f8abe 100644
+--- a/sound/core/seq/oss/seq_oss_midi.c
++++ b/sound/core/seq/oss/seq_oss_midi.c
+@@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
+ 	if (!dp->timer->running)
+ 		len = snd_seq_oss_timer_start(dp->timer);
+ 	if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
+-		if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+-			snd_seq_oss_readq_puts(dp->readq, mdev->seq_device,
+-					       ev->data.ext.ptr, ev->data.ext.len);
++		snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
+ 	} else {
+ 		len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
+ 		if (len > 0)
+diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c
+index c080c73cea04..ed7c7bc038aa 100644
+--- a/sound/core/seq/oss/seq_oss_readq.c
++++ b/sound/core/seq/oss/seq_oss_readq.c
+@@ -118,6 +118,35 @@ snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, in
+ 	return 0;
+ }
+ 
++/*
++ * put MIDI sysex bytes; the event buffer may be chained, thus it has
++ * to be expanded via snd_seq_dump_var_event().
++ */
++struct readq_sysex_ctx {
++	struct seq_oss_readq *readq;
++	int dev;
++};
++
++static int readq_dump_sysex(void *ptr, void *buf, int count)
++{
++	struct readq_sysex_ctx *ctx = ptr;
++
++	return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count);
++}
++
++int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
++			    struct snd_seq_event *ev)
++{
++	struct readq_sysex_ctx ctx = {
++		.readq = q,
++		.dev = dev
++	};
++
++	if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
++		return 0;
++	return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx);
++}
++
+ /*
+  * copy an event to input queue:
+  * return zero if enqueued
+diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h
+index f1463f1f449e..8d033ca2d23f 100644
+--- a/sound/core/seq/oss/seq_oss_readq.h
++++ b/sound/core/seq/oss/seq_oss_readq.h
+@@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq_oss_readq *q);
+ void snd_seq_oss_readq_clear(struct seq_oss_readq *readq);
+ unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait);
+ int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len);
++int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
++			    struct snd_seq_event *ev);
+ int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev);
+ int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode);
+ int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 5d2d3d63abcf..e3767122dd0b 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -676,7 +676,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
+ 	if (atomic)
+ 		read_lock(&grp->list_lock);
+ 	else
+-		down_read(&grp->list_mutex);
++		down_read_nested(&grp->list_mutex, hop);
+ 	list_for_each_entry(subs, &grp->list_head, src_list) {
+ 		/* both ports ready? */
+ 		if (atomic_read(&subs->ref_count) != 2)
+diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
+index d99f99d61983..59ec91589ad9 100644
+--- a/sound/core/seq/seq_device.c
++++ b/sound/core/seq/seq_device.c
+@@ -148,8 +148,10 @@ void snd_seq_device_load_drivers(void)
+ 	flush_work(&autoload_work);
+ }
+ EXPORT_SYMBOL(snd_seq_device_load_drivers);
++#define cancel_autoload_drivers()	cancel_work_sync(&autoload_work)
+ #else
+ #define queue_autoload_drivers() /* NOP */
++#define cancel_autoload_drivers() /* NOP */
+ #endif
+ 
+ /*
+@@ -159,6 +161,7 @@ static int snd_seq_device_dev_free(struct snd_device *device)
+ {
+ 	struct snd_seq_device *dev = device->device_data;
+ 
++	cancel_autoload_drivers();
+ 	put_device(&dev->dev);
+ 	return 0;
+ }
+diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
+index 2e908225d754..0b4b028e8e98 100644
+--- a/sound/core/timer_compat.c
++++ b/sound/core/timer_compat.c
+@@ -106,7 +106,8 @@ enum {
+ #endif /* CONFIG_X86_X32 */
+ };
+ 
+-static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
++static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
++					  unsigned long arg)
+ {
+ 	void __user *argp = compat_ptr(arg);
+ 
+@@ -127,7 +128,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
+ 	case SNDRV_TIMER_IOCTL_PAUSE:
+ 	case SNDRV_TIMER_IOCTL_PAUSE_OLD:
+ 	case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
+-		return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
++		return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
+ 	case SNDRV_TIMER_IOCTL_INFO32:
+ 		return snd_timer_user_info_compat(file, argp);
+ 	case SNDRV_TIMER_IOCTL_STATUS32:
+@@ -139,3 +140,15 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
+ 	}
+ 	return -ENOIOCTLCMD;
+ }
++
++static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
++					unsigned long arg)
++{
++	struct snd_timer_user *tu = file->private_data;
++	long ret;
++
++	mutex_lock(&tu->ioctl_lock);
++	ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
++	mutex_unlock(&tu->ioctl_lock);
++	return ret;
++}
+diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
+index 11467272089e..ea7b377f0378 100644
+--- a/sound/drivers/vx/vx_pcm.c
++++ b/sound/drivers/vx/vx_pcm.c
+@@ -1015,7 +1015,7 @@ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream
+ 	int size, space, count;
+ 	struct snd_pcm_runtime *runtime = subs->runtime;
+ 
+-	if (! pipe->prepared || (chip->chip_status & VX_STAT_IS_STALE))
++	if (!pipe->running || (chip->chip_status & VX_STAT_IS_STALE))
+ 		return;
+ 
+ 	size = runtime->buffer_size - snd_pcm_capture_avail(runtime);
+@@ -1048,8 +1048,10 @@ static void vx_pcm_capture_update(struct vx_core *chip, struct snd_pcm_substream
+ 		/* ok, let's accelerate! */
+ 		int align = pipe->align * 3;
+ 		space = (count / align) * align;
+-		vx_pseudo_dma_read(chip, runtime, pipe, space);
+-		count -= space;
++		if (space > 0) {
++			vx_pseudo_dma_read(chip, runtime, pipe, space);
++			count -= space;
++		}
+ 	}
+ 	/* read the rest of bytes */
+ 	while (count > 0) {
+diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c
+index af83b3b38052..8e457ea27f89 100644
+--- a/sound/pci/vx222/vx222_ops.c
++++ b/sound/pci/vx222/vx222_ops.c
+@@ -269,12 +269,12 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
+ 
+ 	/* Transfer using pseudo-dma.
+ 	 */
+-	if (offset + count > pipe->buffer_bytes) {
++	if (offset + count >= pipe->buffer_bytes) {
+ 		int length = pipe->buffer_bytes - offset;
+ 		count -= length;
+ 		length >>= 2; /* in 32bit words */
+ 		/* Transfer using pseudo-dma. */
+-		while (length-- > 0) {
++		for (; length > 0; length--) {
+ 			outl(cpu_to_le32(*addr), port);
+ 			addr++;
+ 		}
+@@ -284,7 +284,7 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
+ 	pipe->hw_ptr += count;
+ 	count >>= 2; /* in 32bit words */
+ 	/* Transfer using pseudo-dma. */
+-	while (count-- > 0) {
++	for (; count > 0; count--) {
+ 		outl(cpu_to_le32(*addr), port);
+ 		addr++;
+ 	}
+@@ -307,12 +307,12 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
+ 	vx2_setup_pseudo_dma(chip, 0);
+ 	/* Transfer using pseudo-dma.
+ 	 */
+-	if (offset + count > pipe->buffer_bytes) {
++	if (offset + count >= pipe->buffer_bytes) {
+ 		int length = pipe->buffer_bytes - offset;
+ 		count -= length;
+ 		length >>= 2; /* in 32bit words */
+ 		/* Transfer using pseudo-dma. */
+-		while (length-- > 0)
++		for (; length > 0; length--)
+ 			*addr++ = le32_to_cpu(inl(port));
+ 		addr = (u32 *)runtime->dma_area;
+ 		pipe->hw_ptr = 0;
+@@ -320,7 +320,7 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
+ 	pipe->hw_ptr += count;
+ 	count >>= 2; /* in 32bit words */
+ 	/* Transfer using pseudo-dma. */
+-	while (count-- > 0)
++	for (; count > 0; count--)
+ 		*addr++ = le32_to_cpu(inl(port));
+ 
+ 	vx2_release_pseudo_dma(chip);
+diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c
+index 281972913c32..56aa1ba73ccc 100644
+--- a/sound/pcmcia/vx/vxp_ops.c
++++ b/sound/pcmcia/vx/vxp_ops.c
+@@ -369,12 +369,12 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
+ 	unsigned short *addr = (unsigned short *)(runtime->dma_area + offset);
+ 
+ 	vx_setup_pseudo_dma(chip, 1);
+-	if (offset + count > pipe->buffer_bytes) {
++	if (offset + count >= pipe->buffer_bytes) {
+ 		int length = pipe->buffer_bytes - offset;
+ 		count -= length;
+ 		length >>= 1; /* in 16bit words */
+ 		/* Transfer using pseudo-dma. */
+-		while (length-- > 0) {
++		for (; length > 0; length--) {
+ 			outw(cpu_to_le16(*addr), port);
+ 			addr++;
+ 		}
+@@ -384,7 +384,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
+ 	pipe->hw_ptr += count;
+ 	count >>= 1; /* in 16bit words */
+ 	/* Transfer using pseudo-dma. */
+-	while (count-- > 0) {
++	for (; count > 0; count--) {
+ 		outw(cpu_to_le16(*addr), port);
+ 		addr++;
+ 	}
+@@ -411,12 +411,12 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
+ 	if (snd_BUG_ON(count % 2))
+ 		return;
+ 	vx_setup_pseudo_dma(chip, 0);
+-	if (offset + count > pipe->buffer_bytes) {
++	if (offset + count >= pipe->buffer_bytes) {
+ 		int length = pipe->buffer_bytes - offset;
+ 		count -= length;
+ 		length >>= 1; /* in 16bit words */
+ 		/* Transfer using pseudo-dma. */
+-		while (length-- > 0)
++		for (; length > 0; length--)
+ 			*addr++ = le16_to_cpu(inw(port));
+ 		addr = (unsigned short *)runtime->dma_area;
+ 		pipe->hw_ptr = 0;
+@@ -424,7 +424,7 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
+ 	pipe->hw_ptr += count;
+ 	count >>= 1; /* in 16bit words */
+ 	/* Transfer using pseudo-dma. */
+-	while (count-- > 1)
++	for (; count > 1; count--)
+ 		*addr++ = le16_to_cpu(inw(port));
+ 	/* Disable DMA */
+ 	pchip->regDIALOG &= ~VXP_DLG_DMAREAD_SEL_MASK;
+diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
+index fa2e690e51c8..12024799fda1 100644
+--- a/sound/soc/codecs/adau17x1.c
++++ b/sound/soc/codecs/adau17x1.c
+@@ -89,6 +89,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
+ 	return 0;
+ }
+ 
++static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
++	struct snd_kcontrol *kcontrol, int event)
++{
++	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
++	struct adau *adau = snd_soc_codec_get_drvdata(codec);
++
++	/*
++	 * If we are capturing, toggle the ADOSR bit in Converter Control 0 to
++	 * avoid losing SNR (workaround from ADI). This must be done after
++	 * the ADC(s) have been enabled. According to the data sheet, it is
++	 * normally illegal to set this bit when the sampling rate is 96 kHz,
++	 * but according to ADI it is acceptable for this workaround.
++	 */
++	regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
++		ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
++	regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
++		ADAU17X1_CONVERTER0_ADOSR, 0);
++
++	return 0;
++}
++
+ static const char * const adau17x1_mono_stereo_text[] = {
+ 	"Stereo",
+ 	"Mono Left Channel (L+R)",
+@@ -120,7 +141,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = {
+ 	SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
+ 		&adau17x1_dac_mode_mux),
+ 
+-	SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
++	SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
++			   adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
+ 	SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
+ 	SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
+ 	SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
+diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
+index e13583e6ff56..6b46461cdc03 100644
+--- a/sound/soc/codecs/adau17x1.h
++++ b/sound/soc/codecs/adau17x1.h
+@@ -123,5 +123,7 @@ bool adau17x1_has_dsp(struct adau *adau);
+ 
+ #define ADAU17X1_CONVERTER0_CONVSR_MASK		0x7
+ 
++#define ADAU17X1_CONVERTER0_ADOSR		BIT(3)
++
+ 
+ #endif
+diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
+index 3fc6c10c2479..874694e647ae 100755
+--- a/tools/testing/selftests/firmware/fw_filesystem.sh
++++ b/tools/testing/selftests/firmware/fw_filesystem.sh
+@@ -35,8 +35,16 @@ echo "ABCD0123" >"$FW"
+ 
+ NAME=$(basename "$FW")
+ 
++if printf '\000' >"$DIR"/trigger_request 2> /dev/null; then
++	echo "$0: empty filename should not succeed" >&2
++	exit 1
++fi
++
+ # Request a firmware that doesn't exist, it should fail.
+-echo -n "nope-$NAME" >"$DIR"/trigger_request
++if echo -n "nope-$NAME" >"$DIR"/trigger_request 2> /dev/null; then
++	echo "$0: firmware shouldn't have loaded" >&2
++	exit 1
++fi
+ if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ 	echo "$0: firmware was not expected to match" >&2
+ 	exit 1
+diff --git a/tools/testing/selftests/firmware/fw_userhelper.sh b/tools/testing/selftests/firmware/fw_userhelper.sh
+index 6efbade12139..97126d30ff04 100755
+--- a/tools/testing/selftests/firmware/fw_userhelper.sh
++++ b/tools/testing/selftests/firmware/fw_userhelper.sh
+@@ -54,9 +54,33 @@ trap "test_finish" EXIT
+ echo "ABCD0123" >"$FW"
+ NAME=$(basename "$FW")
+ 
++DEVPATH="$DIR"/"nope-$NAME"/loading
++
+ # Test failure when doing nothing (timeout works).
+-echo 1 >/sys/class/firmware/timeout
+-echo -n "$NAME" >"$DIR"/trigger_request
++echo -n 2 >/sys/class/firmware/timeout
++echo -n "nope-$NAME" >"$DIR"/trigger_request 2>/dev/null &
++
++# Give the kernel some time to load the loading file, must be less
++# than the timeout above.
++sleep 1
++if [ ! -f $DEVPATH ]; then
++	echo "$0: fallback mechanism immediately cancelled"
++	echo ""
++	echo "The file never appeared: $DEVPATH"
++	echo ""
++	echo "This might be a distribution udev rule setup by your distribution"
++	echo "to immediately cancel all fallback requests, this must be"
++	echo "removed before running these tests. To confirm look for"
++	echo "a firmware rule like /lib/udev/rules.d/50-firmware.rules"
++	echo "and see if you have something like this:"
++	echo ""
++	echo "SUBSYSTEM==\"firmware\", ACTION==\"add\", ATTR{loading}=\"-1\""
++	echo ""
++	echo "If you do remove this file or comment out this line before"
++	echo "proceeding with these tests."
++	exit 1
++fi
++
+ if diff -q "$FW" /dev/test_firmware >/dev/null ; then
+ 	echo "$0: firmware was not expected to match" >&2
+ 	exit 1


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-10-18 11:51 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2017-10-18 11:51 UTC (permalink / raw
  To: gentoo-commits

commit:     f3c55170f5ab7f92a5cef3ea0b6c5f9f910d1145
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 18 11:51:15 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Oct 18 11:51:15 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f3c55170

Linux patches 4.1.44 and 4.1.45

 0000_README             |    8 +
 1043_linux-4.1.44.patch | 4971 +++++++++++++++++++++++++++++++++++++++++++++++
 1044_linux-4.1.45.patch | 4031 ++++++++++++++++++++++++++++++++++++++
 3 files changed, 9010 insertions(+)

diff --git a/0000_README b/0000_README
index 959795e..43ea8eb 100644
--- a/0000_README
+++ b/0000_README
@@ -215,6 +215,14 @@ Patch:  1042_linux-4.1.43.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.43
 
+Patch:  1043_linux-4.1.44.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.44
+
+Patch:  1044_linux-4.1.45.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.45
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1043_linux-4.1.44.patch b/1043_linux-4.1.44.patch
new file mode 100644
index 0000000..962183f
--- /dev/null
+++ b/1043_linux-4.1.44.patch
@@ -0,0 +1,4971 @@
+diff --git a/Makefile b/Makefile
+index 50d0a93fa343..9c7aa08c70b7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 43
++SUBLEVEL = 44
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
+index 757ac079e7f2..bcf4f1b6b2bc 100644
+--- a/arch/arm/boot/dts/armada-388-gp.dts
++++ b/arch/arm/boot/dts/armada-388-gp.dts
+@@ -91,7 +91,7 @@
+ 					pinctrl-names = "default";
+ 					pinctrl-0 = <&pca0_pins>;
+ 					interrupt-parent = <&gpio0>;
+-					interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
++					interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+ 					gpio-controller;
+ 					#gpio-cells = <2>;
+ 					interrupt-controller;
+@@ -103,7 +103,7 @@
+ 					compatible = "nxp,pca9555";
+ 					pinctrl-names = "default";
+ 					interrupt-parent = <&gpio0>;
+-					interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
++					interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+ 					gpio-controller;
+ 					#gpio-cells = <2>;
+ 					interrupt-controller;
+diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
+index 5f5e0f3d5b64..27cd4abfc74d 100644
+--- a/arch/arm/boot/dts/omap3-n900.dts
++++ b/arch/arm/boot/dts/omap3-n900.dts
+@@ -697,6 +697,8 @@
+ 	vmmc_aux-supply = <&vsim>;
+ 	bus-width = <8>;
+ 	non-removable;
++	no-sdio;
++	no-sd;
+ };
+ 
+ &mmc3 {
+diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
+index f3142369f594..01116ee1284b 100644
+--- a/arch/arm/configs/s3c2410_defconfig
++++ b/arch/arm/configs/s3c2410_defconfig
+@@ -87,9 +87,9 @@ CONFIG_IPV6_TUNNEL=m
+ CONFIG_NETFILTER=y
+ CONFIG_NF_CONNTRACK=m
+ CONFIG_NF_CONNTRACK_EVENTS=y
+-CONFIG_NF_CT_PROTO_DCCP=m
+-CONFIG_NF_CT_PROTO_SCTP=m
+-CONFIG_NF_CT_PROTO_UDPLITE=m
++CONFIG_NF_CT_PROTO_DCCP=y
++CONFIG_NF_CT_PROTO_SCTP=y
++CONFIG_NF_CT_PROTO_UDPLITE=y
+ CONFIG_NF_CONNTRACK_AMANDA=m
+ CONFIG_NF_CONNTRACK_FTP=m
+ CONFIG_NF_CONNTRACK_H323=m
+diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
+index bfe2a2f5a644..22b73112b75f 100644
+--- a/arch/arm/include/asm/ftrace.h
++++ b/arch/arm/include/asm/ftrace.h
+@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
+ 
+ #define ftrace_return_address(n) return_address(n)
+ 
++#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
++
++static inline bool arch_syscall_match_sym_name(const char *sym,
++					       const char *name)
++{
++	if (!strcmp(sym, "sys_mmap2"))
++		sym = "sys_mmap_pgoff";
++	else if (!strcmp(sym, "sys_statfs64_wrapper"))
++		sym = "sys_statfs64";
++	else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
++		sym = "sys_fstatfs64";
++	else if (!strcmp(sym, "sys_arm_fadvise64_64"))
++		sym = "sys_fadvise64_64";
++
++	/* Ignore case since sym may start with "SyS" instead of "sys" */
++	return !strcasecmp(sym, name);
++}
++
+ #endif /* ifndef __ASSEMBLY__ */
+ 
+ #endif /* _ASM_ARM_FTRACE */
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 4e15eed87074..3ca19cdb0eac 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1611,12 +1611,16 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
+ 
+ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+ {
++	if (!kvm->arch.pgd)
++		return 0;
+ 	trace_kvm_age_hva(start, end);
+ 	return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
+ }
+ 
+ int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+ {
++	if (!kvm->arch.pgd)
++		return 0;
+ 	trace_kvm_test_age_hva(hva);
+ 	return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
+ }
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+index 0a3f40ecd06d..96235d2b135d 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
++++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+@@ -26,7 +26,7 @@
+ 		stdout-path = "serial0:115200n8";
+ 	};
+ 
+-	memory {
++	memory@0 {
+ 		device_type = "memory";
+ 		reg = <0x0 0x0 0x40000000>;
+ 	};
+diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+index 11e0b00045cf..0cb2cdfd7309 100644
+--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+@@ -71,7 +71,7 @@
+ 			     <1 10 0xf01>;
+ 	};
+ 
+-	amba_apu {
++	amba_apu: amba_apu@0 {
+ 		compatible = "simple-bus";
+ 		#address-cells = <2>;
+ 		#size-cells = <1>;
+@@ -251,7 +251,7 @@
+ 		};
+ 
+ 		i2c0: i2c@ff020000 {
+-			compatible = "cdns,i2c-r1p10";
++			compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
+ 			status = "disabled";
+ 			interrupt-parent = <&gic>;
+ 			interrupts = <0 17 4>;
+@@ -262,7 +262,7 @@
+ 		};
+ 
+ 		i2c1: i2c@ff030000 {
+-			compatible = "cdns,i2c-r1p10";
++			compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
+ 			status = "disabled";
+ 			interrupt-parent = <&gic>;
+ 			interrupts = <0 18 4>;
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index fa5efaa5c3ac..16523fbd9671 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -62,21 +62,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
+ 			break;
+ 
+ 		pud = pud_offset(pgd, addr);
+-		printk(", *pud=%016llx", pud_val(*pud));
++		pr_cont(", *pud=%016llx", pud_val(*pud));
+ 		if (pud_none(*pud) || pud_bad(*pud))
+ 			break;
+ 
+ 		pmd = pmd_offset(pud, addr);
+-		printk(", *pmd=%016llx", pmd_val(*pmd));
++		pr_cont(", *pmd=%016llx", pmd_val(*pmd));
+ 		if (pmd_none(*pmd) || pmd_bad(*pmd))
+ 			break;
+ 
+ 		pte = pte_offset_map(pmd, addr);
+-		printk(", *pte=%016llx", pte_val(*pte));
++		pr_cont(", *pte=%016llx", pte_val(*pte));
+ 		pte_unmap(pte);
+ 	} while(0);
+ 
+-	printk("\n");
++	pr_cont("\n");
+ }
+ 
+ /*
+diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
+index de781cf54bc7..da80878f2c0d 100644
+--- a/arch/mips/include/asm/branch.h
++++ b/arch/mips/include/asm/branch.h
+@@ -74,10 +74,7 @@ static inline int compute_return_epc(struct pt_regs *regs)
+ 			return __microMIPS_compute_return_epc(regs);
+ 		if (cpu_has_mips16)
+ 			return __MIPS16e_compute_return_epc(regs);
+-		return regs->cp0_epc;
+-	}
+-
+-	if (!delay_slot(regs)) {
++	} else if (!delay_slot(regs)) {
+ 		regs->cp0_epc += 4;
+ 		return 0;
+ 	}
+diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
+index fe376aa705c5..13254da66ce8 100644
+--- a/arch/mips/kernel/branch.c
++++ b/arch/mips/kernel/branch.c
+@@ -399,7 +399,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+  *
+  * @regs:	Pointer to pt_regs
+  * @insn:	branch instruction to decode
+- * @returns:	-EFAULT on error and forces SIGBUS, and on success
++ * @returns:	-EFAULT on error and forces SIGILL, and on success
+  *		returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
+  *		evaluating the branch.
+  *
+@@ -556,6 +556,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
+ 	/*
+ 	 * These are unconditional and in j_format.
+ 	 */
++	case jalx_op:
+ 	case jal_op:
+ 		regs->regs[31] = regs->cp0_epc + 8;
+ 	case j_op:
+@@ -843,8 +844,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
+ 	return ret;
+ 
+ sigill_dsp:
+-	printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
+-	force_sig(SIGBUS, current);
++	pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
++		current->comm);
++	force_sig(SIGILL, current);
+ 	return -EFAULT;
+ sigill_r6:
+ 	pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
+diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
+index 298b2b773d12..f1fab6ff53e6 100644
+--- a/arch/mips/kernel/proc.c
++++ b/arch/mips/kernel/proc.c
+@@ -83,7 +83,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
+ 	}
+ 
+ 	seq_printf(m, "isa\t\t\t:"); 
+-	if (cpu_has_mips_r1)
++	if (cpu_has_mips_1)
+ 		seq_printf(m, " mips1");
+ 	if (cpu_has_mips_2)
+ 		seq_printf(m, "%s", " mips2");
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index f7968b5149b0..5c3aa41a162f 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -838,7 +838,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+ 	audit_syscall_exit(regs);
+ 
+ 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+-		trace_sys_exit(regs, regs->regs[2]);
++		trace_sys_exit(regs, regs_return_value(regs));
+ 
+ 	if (test_thread_flag(TIF_SYSCALL_TRACE))
+ 		tracehook_report_syscall_exit(regs, 0);
+diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
+index 6e8de80bb446..d516765ce320 100644
+--- a/arch/mips/kernel/scall32-o32.S
++++ b/arch/mips/kernel/scall32-o32.S
+@@ -362,7 +362,7 @@ EXPORT(sys_call_table)
+ 	PTR	sys_writev
+ 	PTR	sys_cacheflush
+ 	PTR	sys_cachectl
+-	PTR	sys_sysmips
++	PTR	__sys_sysmips
+ 	PTR	sys_ni_syscall			/* 4150 */
+ 	PTR	sys_getsid
+ 	PTR	sys_fdatasync
+diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
+index a6f6b762c47a..a60edb497da3 100644
+--- a/arch/mips/kernel/scall64-64.S
++++ b/arch/mips/kernel/scall64-64.S
+@@ -318,7 +318,7 @@ EXPORT(sys_call_table)
+ 	PTR	sys_sched_getaffinity
+ 	PTR	sys_cacheflush
+ 	PTR	sys_cachectl
+-	PTR	sys_sysmips
++	PTR	__sys_sysmips
+ 	PTR	sys_io_setup			/* 5200 */
+ 	PTR	sys_io_destroy
+ 	PTR	sys_io_getevents
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index 97fa4c7b9a5e..5de53e4b9607 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -307,7 +307,7 @@ EXPORT(sysn32_call_table)
+ 	PTR	compat_sys_sched_getaffinity
+ 	PTR	sys_cacheflush
+ 	PTR	sys_cachectl
+-	PTR	sys_sysmips
++	PTR	__sys_sysmips
+ 	PTR	compat_sys_io_setup			/* 6200 */
+ 	PTR	sys_io_destroy
+ 	PTR	compat_sys_io_getevents
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index 80e39776e377..185092b9ecc1 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -359,7 +359,7 @@ EXPORT(sys32_call_table)
+ 	PTR	compat_sys_writev
+ 	PTR	sys_cacheflush
+ 	PTR	sys_cachectl
+-	PTR	sys_sysmips
++	PTR	__sys_sysmips
+ 	PTR	sys_ni_syscall			/* 4150 */
+ 	PTR	sys_getsid
+ 	PTR	sys_fdatasync
+diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
+index 53a7ef9a8f32..4234b2d726c5 100644
+--- a/arch/mips/kernel/syscall.c
++++ b/arch/mips/kernel/syscall.c
+@@ -28,6 +28,7 @@
+ #include <linux/elf.h>
+ 
+ #include <asm/asm.h>
++#include <asm/asm-eva.h>
+ #include <asm/branch.h>
+ #include <asm/cachectl.h>
+ #include <asm/cacheflush.h>
+@@ -138,10 +139,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
+ 		__asm__ __volatile__ (
+ 		"	.set	"MIPS_ISA_ARCH_LEVEL"			\n"
+ 		"	li	%[err], 0				\n"
+-		"1:	ll	%[old], (%[addr])			\n"
++		"1:							\n"
++		user_ll("%[old]", "(%[addr])")
+ 		"	move	%[tmp], %[new]				\n"
+-		"2:	sc	%[tmp], (%[addr])			\n"
+-		"	bnez	%[tmp], 4f				\n"
++		"2:							\n"
++		user_sc("%[tmp]", "(%[addr])")
++		"	beqz	%[tmp], 4f				\n"
+ 		"3:							\n"
+ 		"	.insn						\n"
+ 		"	.subsection 2					\n"
+@@ -199,6 +202,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
+ 	unreachable();
+ }
+ 
++/*
++ * mips_atomic_set() normally returns directly via syscall_exit potentially
++ * clobbering static registers, so be sure to preserve them.
++ */
++save_static_function(sys_sysmips);
++
+ SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
+ {
+ 	switch (cmd) {
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index 81f645973eb3..62ad117675b3 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -2140,6 +2140,35 @@ dcopuop:
+ 	return 0;
+ }
+ 
++/*
++ * Emulate FPU instructions.
++ *
++ * If we use FPU hardware, then we have been typically called to handle
++ * an unimplemented operation, such as where an operand is a NaN or
++ * denormalized.  In that case exit the emulation loop after a single
++ * iteration so as to let hardware execute any subsequent instructions.
++ *
++ * If we have no FPU hardware or it has been disabled, then continue
++ * emulating floating-point instructions until one of these conditions
++ * has occurred:
++ *
++ * - a non-FPU instruction has been encountered,
++ *
++ * - an attempt to emulate has ended with a signal,
++ *
++ * - the ISA mode has been switched.
++ *
++ * We need to terminate the emulation loop if we got switched to the
++ * MIPS16 mode, whether supported or not, so that we do not attempt
++ * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
++ * Similarly if we got switched to the microMIPS mode and only the
++ * regular MIPS mode is supported, so that we do not attempt to emulate
++ * a microMIPS instruction as a regular MIPS FPU instruction.  Or if
++ * we got switched to the regular MIPS mode and only the microMIPS mode
++ * is supported, so that we do not attempt to emulate a regular MIPS
++ * instruction that should cause an Address Error exception instead.
++ * For simplicity we always terminate upon an ISA mode switch.
++ */
+ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ 	int has_fpu, void *__user *fault_addr)
+ {
+@@ -2225,6 +2254,15 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
+ 			break;
+ 		if (sig)
+ 			break;
++		/*
++		 * We have to check for the ISA bit explicitly here,
++		 * because `get_isa16_mode' may return 0 if support
++		 * for code compression has been globally disabled,
++		 * or otherwise we may produce the wrong signal or
++		 * even proceed successfully where we must not.
++		 */
++		if ((xcp->cp0_epc ^ prevepc) & 0x1)
++			break;
+ 
+ 		cond_resched();
+ 	} while (xcp->cp0_epc > prevepc);
+diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
+index 2d69a853b742..3a08b55609b6 100644
+--- a/arch/openrisc/kernel/vmlinux.lds.S
++++ b/arch/openrisc/kernel/vmlinux.lds.S
+@@ -38,6 +38,8 @@ SECTIONS
+         /* Read-only sections, merged into text segment: */
+         . = LOAD_BASE ;
+ 
++	_text = .;
++
+ 	/* _s_kernel_ro must be page aligned */
+ 	. = ALIGN(PAGE_SIZE);
+ 	_s_kernel_ro = .;
+diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
+index 512d2782b043..0d6670056cd2 100644
+--- a/arch/powerpc/include/asm/atomic.h
++++ b/arch/powerpc/include/asm/atomic.h
+@@ -453,7 +453,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+  * Atomically increments @v by 1, so long as @v is non-zero.
+  * Returns non-zero if @v was non-zero, and zero otherwise.
+  */
+-static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
++static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
+ {
+ 	long t1, t2;
+ 
+@@ -472,7 +472,7 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
+ 	: "r" (&v->counter)
+ 	: "cc", "xer", "memory");
+ 
+-	return t1;
++	return t1 != 0;
+ }
+ 
+ #endif /* __powerpc64__ */
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index a4bf6e0eb813..e97e58e28668 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -1237,7 +1237,7 @@ static inline unsigned long mfvtb (void)
+ 				"	.llong 0\n"			\
+ 				".previous"				\
+ 			: "=r" (rval) \
+-			: "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \
++			: "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
+ 			rval;})
+ #else
+ #define mftb()		({unsigned long rval;	\
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index c1e10ffadd17..8e7a6c8efd27 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2232,6 +2232,10 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ {
+ 	int r;
+ 	int srcu_idx;
++	unsigned long ebb_regs[3] = {};	/* shut up GCC */
++	unsigned long user_tar = 0;
++	unsigned long proc_fscr = 0;
++	unsigned int user_vrsave;
+ 
+ 	if (!vcpu->arch.sane) {
+ 		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+@@ -2281,6 +2285,17 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ 	flush_fp_to_thread(current);
+ 	flush_altivec_to_thread(current);
+ 	flush_vsx_to_thread(current);
++
++	/* Save userspace EBB and other register values */
++	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
++		ebb_regs[0] = mfspr(SPRN_EBBHR);
++		ebb_regs[1] = mfspr(SPRN_EBBRR);
++		ebb_regs[2] = mfspr(SPRN_BESCR);
++		user_tar = mfspr(SPRN_TAR);
++		proc_fscr = mfspr(SPRN_FSCR);
++	}
++	user_vrsave = mfspr(SPRN_VRSAVE);
++
+ 	vcpu->arch.wqp = &vcpu->arch.vcore->wq;
+ 	vcpu->arch.pgdir = current->mm->pgd;
+ 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
+@@ -2302,6 +2317,16 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ 		}
+ 	} while (is_kvmppc_resume_guest(r));
+ 
++	/* Restore userspace EBB and other register values */
++	if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
++		mtspr(SPRN_EBBHR, ebb_regs[0]);
++		mtspr(SPRN_EBBRR, ebb_regs[1]);
++		mtspr(SPRN_BESCR, ebb_regs[2]);
++		mtspr(SPRN_TAR, user_tar);
++		mtspr(SPRN_FSCR, proc_fscr);
++	}
++	mtspr(SPRN_VRSAVE, user_vrsave);
++
+  out:
+ 	vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
+ 	atomic_dec(&vcpu->kvm->arch.vcpus_running);
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 70eaf547703e..a3018f109cd3 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -36,6 +36,13 @@
+ #define NAPPING_CEDE	1
+ #define NAPPING_NOVCPU	2
+ 
++/* Stack frame offsets for kvmppc_hv_entry */
++#define SFS			112
++#define STACK_SLOT_TRAP		(SFS-4)
++#define STACK_SLOT_CIABR	(SFS-16)
++#define STACK_SLOT_DAWR		(SFS-24)
++#define STACK_SLOT_DAWRX	(SFS-32)
++
+ /*
+  * Call kvmppc_hv_entry in real mode.
+  * Must be called with interrupts hard-disabled.
+@@ -265,10 +272,10 @@ kvm_novcpu_exit:
+ 	bl	kvmhv_accumulate_time
+ #endif
+ 13:	mr	r3, r12
+-	stw	r12, 112-4(r1)
++	stw	r12, STACK_SLOT_TRAP(r1)
+ 	bl	kvmhv_commence_exit
+ 	nop
+-	lwz	r12, 112-4(r1)
++	lwz	r12, STACK_SLOT_TRAP(r1)
+ 	b	kvmhv_switch_to_host
+ 
+ /*
+@@ -404,7 +411,7 @@ kvmppc_hv_entry:
+ 	 */
+ 	mflr	r0
+ 	std	r0, PPC_LR_STKOFF(r1)
+-	stdu	r1, -112(r1)
++	stdu	r1, -SFS(r1)
+ 
+ 	/* Save R1 in the PACA */
+ 	std	r1, HSTATE_HOST_R1(r13)
+@@ -558,6 +565,16 @@ kvmppc_got_guest:
+ 	mtspr	SPRN_PURR,r7
+ 	mtspr	SPRN_SPURR,r8
+ 
++	/* Save host values of some registers */
++BEGIN_FTR_SECTION
++	mfspr	r5, SPRN_CIABR
++	mfspr	r6, SPRN_DAWR
++	mfspr	r7, SPRN_DAWRX
++	std	r5, STACK_SLOT_CIABR(r1)
++	std	r6, STACK_SLOT_DAWR(r1)
++	std	r7, STACK_SLOT_DAWRX(r1)
++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
++
+ BEGIN_FTR_SECTION
+ 	/* Set partition DABR */
+ 	/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
+@@ -1169,8 +1186,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ 	 */
+ 	li	r0, 0
+ 	mtspr	SPRN_IAMR, r0
+-	mtspr	SPRN_CIABR, r0
+-	mtspr	SPRN_DAWRX, r0
++	mtspr	SPRN_PSPB, r0
+ 	mtspr	SPRN_TCSCR, r0
+ 	mtspr	SPRN_WORT, r0
+ 	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
+@@ -1186,6 +1202,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ 	std	r6,VCPU_UAMOR(r9)
+ 	li	r6,0
+ 	mtspr	SPRN_AMR,r6
++	mtspr	SPRN_UAMOR, r6
+ 
+ 	/* Switch DSCR back to host value */
+ 	mfspr	r8, SPRN_DSCR
+@@ -1327,6 +1344,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ 	slbia
+ 	ptesync
+ 
++	/* Restore host values of some registers */
++BEGIN_FTR_SECTION
++	ld	r5, STACK_SLOT_CIABR(r1)
++	ld	r6, STACK_SLOT_DAWR(r1)
++	ld	r7, STACK_SLOT_DAWRX(r1)
++	mtspr	SPRN_CIABR, r5
++	mtspr	SPRN_DAWR, r6
++	mtspr	SPRN_DAWRX, r7
++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
++
+ 	/*
+ 	 * POWER7/POWER8 guest -> host partition switch code.
+ 	 * We don't have to lock against tlbies but we do
+@@ -1431,8 +1458,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ 	li	r0, KVM_GUEST_MODE_NONE
+ 	stb	r0, HSTATE_IN_GUEST(r13)
+ 
+-	ld	r0, 112+PPC_LR_STKOFF(r1)
+-	addi	r1, r1, 112
++	ld	r0, SFS+PPC_LR_STKOFF(r1)
++	addi	r1, r1, SFS
+ 	mtlr	r0
+ 	blr
+ 
+diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
+index 4014881e9843..e37162d356d8 100644
+--- a/arch/powerpc/lib/sstep.c
++++ b/arch/powerpc/lib/sstep.c
+@@ -687,8 +687,10 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
+ 	case 19:
+ 		switch ((instr >> 1) & 0x3ff) {
+ 		case 0:		/* mcrf */
+-			rd = (instr >> 21) & 0x1c;
+-			ra = (instr >> 16) & 0x1c;
++			rd = 7 - ((instr >> 23) & 0x7);
++			ra = 7 - ((instr >> 18) & 0x7);
++			rd *= 4;
++			ra *= 4;
+ 			val = (regs->ccr >> ra) & 0xf;
+ 			regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
+ 			goto instr_done;
+@@ -967,6 +969,19 @@ int __kprobes analyse_instr(struct instruction_op *op, struct pt_regs *regs,
+ #endif
+ 
+ 		case 19:	/* mfcr */
++			if ((instr >> 20) & 1) {
++				imm = 0xf0000000UL;
++				for (sh = 0; sh < 8; ++sh) {
++					if (instr & (0x80000 >> sh)) {
++						regs->gpr[rd] = regs->ccr & imm;
++						break;
++					}
++					imm >>= 4;
++				}
++
++				goto instr_done;
++			}
++
+ 			regs->gpr[rd] = regs->ccr;
+ 			regs->gpr[rd] &= 0xffffffffUL;
+ 			goto instr_done;
+diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
+index 0f319521e002..14392b4e4693 100644
+--- a/arch/powerpc/platforms/pseries/reconfig.c
++++ b/arch/powerpc/platforms/pseries/reconfig.c
+@@ -112,7 +112,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
+ 
+ 	of_detach_node(np);
+ 	of_node_put(parent);
+-	of_node_put(np); /* Must decrement the refcount */
+ 	return 0;
+ }
+ 
+diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
+index 6ba0bf928909..6bc941be6921 100644
+--- a/arch/s390/include/asm/syscall.h
++++ b/arch/s390/include/asm/syscall.h
+@@ -64,6 +64,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
+ {
+ 	unsigned long mask = -1UL;
+ 
++	/*
++	 * No arguments for this syscall, there's nothing to do.
++	 */
++	if (!n)
++		return;
++
+ 	BUG_ON(i + n > 6);
+ #ifdef CONFIG_COMPAT
+ 	if (test_tsk_thread_flag(task, TIF_31BIT))
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index dc2d7aa56440..a3b51d30e8d8 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -1139,7 +1139,8 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp)
+ 		insn_count = bpf_jit_insn(jit, fp, i);
+ 		if (insn_count < 0)
+ 			return -1;
+-		jit->addrs[i + 1] = jit->prg; /* Next instruction address */
++		/* Next instruction address */
++		jit->addrs[i + insn_count] = jit->prg;
+ 	}
+ 	bpf_jit_epilogue(jit);
+ 
+diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
+index 349dd23e2876..0cdeb2b483a0 100644
+--- a/arch/sparc/include/asm/mmu_context_64.h
++++ b/arch/sparc/include/asm/mmu_context_64.h
+@@ -25,9 +25,11 @@ void destroy_context(struct mm_struct *mm);
+ void __tsb_context_switch(unsigned long pgd_pa,
+ 			  struct tsb_config *tsb_base,
+ 			  struct tsb_config *tsb_huge,
+-			  unsigned long tsb_descr_pa);
++			  unsigned long tsb_descr_pa,
++			  unsigned long secondary_ctx);
+ 
+-static inline void tsb_context_switch(struct mm_struct *mm)
++static inline void tsb_context_switch_ctx(struct mm_struct *mm,
++					  unsigned long ctx)
+ {
+ 	__tsb_context_switch(__pa(mm->pgd),
+ 			     &mm->context.tsb_block[0],
+@@ -38,9 +40,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
+ #else
+ 			     NULL
+ #endif
+-			     , __pa(&mm->context.tsb_descr[0]));
++			     , __pa(&mm->context.tsb_descr[0]),
++			     ctx);
+ }
+ 
++#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
++
+ void tsb_grow(struct mm_struct *mm,
+ 	      unsigned long tsb_index,
+ 	      unsigned long mm_rss);
+@@ -110,8 +115,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
+ 	 * cpu0 to update it's TSB because at that point the cpu_vm_mask
+ 	 * only had cpu1 set in it.
+ 	 */
+-	load_secondary_context(mm);
+-	tsb_context_switch(mm);
++	tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
+ 
+ 	/* Any time a processor runs a context on an address space
+ 	 * for the first time, we must flush that context out of the
+diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
+index ec9c04de3664..ff05992dae7a 100644
+--- a/arch/sparc/include/asm/trap_block.h
++++ b/arch/sparc/include/asm/trap_block.h
+@@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
+ void init_cur_cpu_trap(struct thread_info *);
+ void setup_tba(void);
+ extern int ncpus_probed;
++extern u64 cpu_mondo_counter[NR_CPUS];
+ 
+ unsigned long real_hard_smp_processor_id(void);
+ 
+diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
+index 95a9fa0d2195..4511caa3b7e9 100644
+--- a/arch/sparc/kernel/smp_64.c
++++ b/arch/sparc/kernel/smp_64.c
+@@ -617,22 +617,48 @@ retry:
+ 	}
+ }
+ 
+-/* Multi-cpu list version.  */
++#define	CPU_MONDO_COUNTER(cpuid)	(cpu_mondo_counter[cpuid])
++#define	MONDO_USEC_WAIT_MIN		2
++#define	MONDO_USEC_WAIT_MAX		100
++#define	MONDO_RETRY_LIMIT		500000
++
++/* Multi-cpu list version.
++ *
++ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
++ * Sometimes not all cpus receive the mondo, requiring us to re-send
++ * the mondo until all cpus have received, or cpus are truly stuck
++ * unable to receive mondo, and we timeout.
++ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
++ * perform guest service, such as PCIe error handling. Consider the
++ * service time, 1 second overall wait is reasonable for 1 cpu.
++ * Here two in-between mondo check wait time are defined: 2 usec for
++ * single cpu quick turn around and up to 100usec for large cpu count.
++ * Deliver mondo to large number of cpus could take longer, we adjusts
++ * the retry count as long as target cpus are making forward progress.
++ */
+ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
+ {
+-	int retries, this_cpu, prev_sent, i, saw_cpu_error;
++	int this_cpu, tot_cpus, prev_sent, i, rem;
++	int usec_wait, retries, tot_retries;
++	u16 first_cpu = 0xffff;
++	unsigned long xc_rcvd = 0;
+ 	unsigned long status;
++	int ecpuerror_id = 0;
++	int enocpu_id = 0;
+ 	u16 *cpu_list;
++	u16 cpu;
+ 
+ 	this_cpu = smp_processor_id();
+-
+ 	cpu_list = __va(tb->cpu_list_pa);
+-
+-	saw_cpu_error = 0;
+-	retries = 0;
++	usec_wait = cnt * MONDO_USEC_WAIT_MIN;
++	if (usec_wait > MONDO_USEC_WAIT_MAX)
++		usec_wait = MONDO_USEC_WAIT_MAX;
++	retries = tot_retries = 0;
++	tot_cpus = cnt;
+ 	prev_sent = 0;
++
+ 	do {
+-		int forward_progress, n_sent;
++		int n_sent, mondo_delivered, target_cpu_busy;
+ 
+ 		status = sun4v_cpu_mondo_send(cnt,
+ 					      tb->cpu_list_pa,
+@@ -640,94 +666,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
+ 
+ 		/* HV_EOK means all cpus received the xcall, we're done.  */
+ 		if (likely(status == HV_EOK))
+-			break;
++			goto xcall_done;
++
++		/* If not these non-fatal errors, panic */
++		if (unlikely((status != HV_EWOULDBLOCK) &&
++			(status != HV_ECPUERROR) &&
++			(status != HV_ENOCPU)))
++			goto fatal_errors;
+ 
+ 		/* First, see if we made any forward progress.
++		 *
++		 * Go through the cpu_list, count the target cpus that have
++		 * received our mondo (n_sent), and those that did not (rem).
++		 * Re-pack cpu_list with the cpus remain to be retried in the
++		 * front - this simplifies tracking the truly stalled cpus.
+ 		 *
+ 		 * The hypervisor indicates successful sends by setting
+ 		 * cpu list entries to the value 0xffff.
++		 *
++		 * EWOULDBLOCK means some target cpus did not receive the
++		 * mondo and retry usually helps.
++		 *
++		 * ECPUERROR means at least one target cpu is in error state,
++		 * it's usually safe to skip the faulty cpu and retry.
++		 *
++		 * ENOCPU means one of the target cpu doesn't belong to the
++		 * domain, perhaps offlined which is unexpected, but not
++		 * fatal and it's okay to skip the offlined cpu.
+ 		 */
++		rem = 0;
+ 		n_sent = 0;
+ 		for (i = 0; i < cnt; i++) {
+-			if (likely(cpu_list[i] == 0xffff))
++			cpu = cpu_list[i];
++			if (likely(cpu == 0xffff)) {
+ 				n_sent++;
++			} else if ((status == HV_ECPUERROR) &&
++				(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
++				ecpuerror_id = cpu + 1;
++			} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
++				enocpu_id = cpu + 1;
++			} else {
++				cpu_list[rem++] = cpu;
++			}
+ 		}
+ 
+-		forward_progress = 0;
+-		if (n_sent > prev_sent)
+-			forward_progress = 1;
++		/* No cpu remained, we're done. */
++		if (rem == 0)
++			break;
+ 
+-		prev_sent = n_sent;
++		/* Otherwise, update the cpu count for retry. */
++		cnt = rem;
+ 
+-		/* If we get a HV_ECPUERROR, then one or more of the cpus
+-		 * in the list are in error state.  Use the cpu_state()
+-		 * hypervisor call to find out which cpus are in error state.
++		/* Record the overall number of mondos received by the
++		 * first of the remaining cpus.
+ 		 */
+-		if (unlikely(status == HV_ECPUERROR)) {
+-			for (i = 0; i < cnt; i++) {
+-				long err;
+-				u16 cpu;
++		if (first_cpu != cpu_list[0]) {
++			first_cpu = cpu_list[0];
++			xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
++		}
+ 
+-				cpu = cpu_list[i];
+-				if (cpu == 0xffff)
+-					continue;
++		/* Was any mondo delivered successfully? */
++		mondo_delivered = (n_sent > prev_sent);
++		prev_sent = n_sent;
+ 
+-				err = sun4v_cpu_state(cpu);
+-				if (err == HV_CPU_STATE_ERROR) {
+-					saw_cpu_error = (cpu + 1);
+-					cpu_list[i] = 0xffff;
+-				}
+-			}
+-		} else if (unlikely(status != HV_EWOULDBLOCK))
+-			goto fatal_mondo_error;
++		/* or, was any target cpu busy processing other mondos? */
++		target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
++		xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+ 
+-		/* Don't bother rewriting the CPU list, just leave the
+-		 * 0xffff and non-0xffff entries in there and the
+-		 * hypervisor will do the right thing.
+-		 *
+-		 * Only advance timeout state if we didn't make any
+-		 * forward progress.
++		/* Retry count is for no progress. If we're making progress,
++		 * reset the retry count.
+ 		 */
+-		if (unlikely(!forward_progress)) {
+-			if (unlikely(++retries > 10000))
+-				goto fatal_mondo_timeout;
+-
+-			/* Delay a little bit to let other cpus catch up
+-			 * on their cpu mondo queue work.
+-			 */
+-			udelay(2 * cnt);
++		if (likely(mondo_delivered || target_cpu_busy)) {
++			tot_retries += retries;
++			retries = 0;
++		} else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
++			goto fatal_mondo_timeout;
+ 		}
+-	} while (1);
+ 
+-	if (unlikely(saw_cpu_error))
+-		goto fatal_mondo_cpu_error;
++		/* Delay a little bit to let other cpus catch up on
++		 * their cpu mondo queue work.
++		 */
++		if (!mondo_delivered)
++			udelay(usec_wait);
+ 
+-	return;
++		retries++;
++	} while (1);
+ 
+-fatal_mondo_cpu_error:
+-	printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
+-	       "(including %d) were in error state\n",
+-	       this_cpu, saw_cpu_error - 1);
++xcall_done:
++	if (unlikely(ecpuerror_id > 0)) {
++		pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
++		       this_cpu, ecpuerror_id - 1);
++	} else if (unlikely(enocpu_id > 0)) {
++		pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
++		       this_cpu, enocpu_id - 1);
++	}
+ 	return;
+ 
++fatal_errors:
++	/* fatal errors include bad alignment, etc */
++	pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
++	       this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
++	panic("Unexpected SUN4V mondo error %lu\n", status);
++
+ fatal_mondo_timeout:
+-	printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
+-	       " progress after %d retries.\n",
+-	       this_cpu, retries);
+-	goto dump_cpu_list_and_out;
+-
+-fatal_mondo_error:
+-	printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
+-	       this_cpu, status);
+-	printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
+-	       "mondo_block_pa(%lx)\n",
+-	       this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+-
+-dump_cpu_list_and_out:
+-	printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
+-	for (i = 0; i < cnt; i++)
+-		printk("%u ", cpu_list[i]);
+-	printk("]\n");
++	/* some cpus being non-responsive to the cpu mondo */
++	pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
++	       this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
++	panic("SUN4V mondo timeout panic\n");
+ }
+ 
+ static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
+diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
+index 559bc5e9c199..34631995859a 100644
+--- a/arch/sparc/kernel/sun4v_ivec.S
++++ b/arch/sparc/kernel/sun4v_ivec.S
+@@ -26,6 +26,21 @@ sun4v_cpu_mondo:
+ 	ldxa	[%g0] ASI_SCRATCHPAD, %g4
+ 	sub	%g4, TRAP_PER_CPU_FAULT_INFO, %g4
+ 
++	/* Get smp_processor_id() into %g3 */
++	sethi	%hi(trap_block), %g5
++	or	%g5, %lo(trap_block), %g5
++	sub	%g4, %g5, %g3
++	srlx	%g3, TRAP_BLOCK_SZ_SHIFT, %g3
++
++	/* Increment cpu_mondo_counter[smp_processor_id()] */
++	sethi	%hi(cpu_mondo_counter), %g5
++	or	%g5, %lo(cpu_mondo_counter), %g5
++	sllx	%g3, 3, %g3
++	add	%g5, %g3, %g5
++	ldx	[%g5], %g3
++	add	%g3, 1, %g3
++	stx	%g3, [%g5]
++
+ 	/* Get CPU mondo queue base phys address into %g7.  */
+ 	ldx	[%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
+ 
+diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
+index cc97a43268ee..d883c5951e8b 100644
+--- a/arch/sparc/kernel/traps_64.c
++++ b/arch/sparc/kernel/traps_64.c
+@@ -2659,6 +2659,7 @@ void do_getpsr(struct pt_regs *regs)
+ 	}
+ }
+ 
++u64 cpu_mondo_counter[NR_CPUS] = {0};
+ struct trap_per_cpu trap_block[NR_CPUS];
+ EXPORT_SYMBOL(trap_block);
+ 
+diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
+index 8e920d152565..12fe20c9042c 100644
+--- a/arch/sparc/kernel/tsb.S
++++ b/arch/sparc/kernel/tsb.S
+@@ -367,6 +367,7 @@ tsb_flush:
+ 	 * %o1:	TSB base config pointer
+ 	 * %o2:	TSB huge config pointer, or NULL if none
+ 	 * %o3:	Hypervisor TSB descriptor physical address
++	 * %o4: Secondary context to load, if non-zero
+ 	 *
+ 	 * We have to run this whole thing with interrupts
+ 	 * disabled so that the current cpu doesn't change
+@@ -379,6 +380,17 @@ __tsb_context_switch:
+ 	rdpr	%pstate, %g1
+ 	wrpr	%g1, PSTATE_IE, %pstate
+ 
++	brz,pn	%o4, 1f
++	 mov	SECONDARY_CONTEXT, %o5
++
++661:	stxa	%o4, [%o5] ASI_DMMU
++	.section .sun4v_1insn_patch, "ax"
++	.word	661b
++	stxa	%o4, [%o5] ASI_MMU
++	.previous
++	flush	%g6
++
++1:
+ 	TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
+ 
+ 	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
+diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
+index 17bd2e167e07..df707a8ad311 100644
+--- a/arch/sparc/power/hibernate.c
++++ b/arch/sparc/power/hibernate.c
+@@ -35,6 +35,5 @@ void restore_processor_state(void)
+ {
+ 	struct mm_struct *mm = current->active_mm;
+ 
+-	load_secondary_context(mm);
+-	tsb_context_switch(mm);
++	tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
+ }
+diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
+index 318b8465d302..06ceddb3a22e 100644
+--- a/arch/x86/boot/string.c
++++ b/arch/x86/boot/string.c
+@@ -14,6 +14,7 @@
+ 
+ #include <linux/types.h>
+ #include "ctype.h"
++#include "string.h"
+ 
+ int memcmp(const void *s1, const void *s2, size_t len)
+ {
+diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
+index 725e820602b1..113588ddb43f 100644
+--- a/arch/x86/boot/string.h
++++ b/arch/x86/boot/string.h
+@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
+ #define memset(d,c,l) __builtin_memset(d,c,l)
+ #define memcmp	__builtin_memcmp
+ 
++extern int strcmp(const char *str1, const char *str2);
++extern int strncmp(const char *cs, const char *ct, size_t count);
++extern size_t strlen(const char *s);
++extern char *strstr(const char *s1, const char *s2);
++extern size_t strnlen(const char *s, size_t maxlen);
++extern unsigned int atou(const char *s);
++extern unsigned long long simple_strtoull(const char *cp, char **endp,
++					  unsigned int base);
++
+ #endif /* BOOT_STRING_H */
+diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
+index ca08a27b90b3..4ad5a91aea79 100644
+--- a/arch/x86/include/asm/xen/hypercall.h
++++ b/arch/x86/include/asm/xen/hypercall.h
+@@ -43,6 +43,7 @@
+ 
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
++#include <asm/smap.h>
+ 
+ #include <xen/interface/xen.h>
+ #include <xen/interface/sched.h>
+@@ -213,10 +214,12 @@ privcmd_call(unsigned call,
+ 	__HYPERCALL_DECLS;
+ 	__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ 
++	stac();
+ 	asm volatile("call *%[call]"
+ 		     : __HYPERCALL_5PARAM
+ 		     : [call] "a" (&hypercall_page[call])
+ 		     : __HYPERCALL_CLOBBER5);
++	clac();
+ 
+ 	return (long)__res;
+ }
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index 07bea80223f6..60aa02503b48 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -328,6 +328,14 @@ static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
+ 	int pin;
+ 	struct mpc_intsrc mp_irq;
+ 
++	/*
++	 * Check bus_irq boundary.
++	 */
++	if (bus_irq >= NR_IRQS_LEGACY) {
++		pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
++		return;
++	}
++
+ 	/*
+ 	 * Convert 'gsi' to 'ioapic.pin'.
+ 	 */
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+index df61c2d0cb56..bd7e7d6c29c5 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
+@@ -581,6 +581,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
+ 	const char *name = th_names[bank];
+ 	int err = 0;
+ 
++	if (!dev)
++		return -ENODEV;
++
+ 	if (is_shared_bank(bank)) {
+ 		nb = node_to_amd_nb(amd_get_nb_id(cpu));
+ 
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 27e63c1770e6..916e84aa5447 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -151,6 +151,8 @@ void kvm_async_pf_task_wait(u32 token)
+ 		if (hlist_unhashed(&n.link))
+ 			break;
+ 
++		rcu_irq_exit();
++
+ 		if (!n.halted) {
+ 			local_irq_enable();
+ 			schedule();
+@@ -159,11 +161,11 @@ void kvm_async_pf_task_wait(u32 token)
+ 			/*
+ 			 * We cannot reschedule. So halt.
+ 			 */
+-			rcu_irq_exit();
+ 			native_safe_halt();
+ 			local_irq_disable();
+-			rcu_irq_enter();
+ 		}
++
++		rcu_irq_enter();
+ 	}
+ 	if (!n.halted)
+ 		finish_wait(&n.wq, &wait);
+diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
+index 39c485b0c25c..db89f4b8b966 100644
+--- a/drivers/acpi/glue.c
++++ b/drivers/acpi/glue.c
+@@ -97,7 +97,15 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
+ 	if (check_children && list_empty(&adev->children))
+ 		return -ENODEV;
+ 
+-	return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
++	/*
++	 * If the device has a _HID (or _CID) returning a valid ACPI/PNP
++	 * device ID, it is better to make it look less attractive here, so that
++	 * the other device with the same _ADR value (that may not have a valid
++	 * device ID) can be matched going forward.  [This means a second spec
++	 * violation in a row, so whatever we do here is best effort anyway.]
++	 */
++	return sta_present && list_empty(&adev->pnp.ids) ?
++			FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+ }
+ 
+ struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index ae7cfcb562dc..4d4cdade9d7e 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2834,10 +2834,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
+ static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+ {
+ 	if (!sata_pmp_attached(ap)) {
+-		if (likely(devno < ata_link_max_devices(&ap->link)))
++		if (likely(devno >= 0 &&
++			   devno < ata_link_max_devices(&ap->link)))
+ 			return &ap->link.device[devno];
+ 	} else {
+-		if (likely(devno < ap->nr_pmp_links))
++		if (likely(devno >= 0 &&
++			   devno < ap->nr_pmp_links))
+ 			return &ap->pmp_link[devno].device[0];
+ 	}
+ 
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 2327613d4539..75e29733af54 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -1440,7 +1440,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
+ 	}
+ 
+ 	dev->power.subsys_data->domain_data = &gpd_data->base;
+-	dev->pm_domain = &genpd->domain;
+ 
+ 	spin_unlock_irq(&dev->power.lock);
+ 
+@@ -1459,7 +1458,6 @@ static void genpd_free_dev_data(struct device *dev,
+ {
+ 	spin_lock_irq(&dev->power.lock);
+ 
+-	dev->pm_domain = NULL;
+ 	dev->power.subsys_data->domain_data = NULL;
+ 
+ 	spin_unlock_irq(&dev->power.lock);
+@@ -1500,6 +1498,8 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ 	if (ret)
+ 		goto out;
+ 
++	dev->pm_domain = &genpd->domain;
++
+ 	genpd->device_count++;
+ 	genpd->max_off_time_changed = true;
+ 
+@@ -1563,6 +1563,8 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ 	if (genpd->detach_dev)
+ 		genpd->detach_dev(genpd, dev);
+ 
++	dev->pm_domain = NULL;
++
+ 	list_del_init(&pdd->list_node);
+ 
+ 	genpd_release_lock(genpd);
+@@ -1673,7 +1675,7 @@ int pm_genpd_add_subdomain_names(const char *master_name,
+ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ 			      struct generic_pm_domain *subdomain)
+ {
+-	struct gpd_link *link;
++	struct gpd_link *l, *link;
+ 	int ret = -EINVAL;
+ 
+ 	if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+@@ -1682,7 +1684,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+  start:
+ 	genpd_acquire_lock(genpd);
+ 
+-	list_for_each_entry(link, &genpd->master_links, master_node) {
++	list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
+ 		if (link->slave != subdomain)
+ 			continue;
+ 
+@@ -2062,10 +2064,10 @@ EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
+  */
+ void of_genpd_del_provider(struct device_node *np)
+ {
+-	struct of_genpd_provider *cp;
++	struct of_genpd_provider *cp, *tmp;
+ 
+ 	mutex_lock(&of_genpd_mutex);
+-	list_for_each_entry(cp, &of_genpd_providers, link) {
++	list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
+ 		if (cp->node == np) {
+ 			list_del(&cp->link);
+ 			of_node_put(cp->node);
+diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
+index 5ea2f0bbbc7c..071c3ea70882 100644
+--- a/drivers/block/virtio_blk.c
++++ b/drivers/block/virtio_blk.c
+@@ -642,11 +642,12 @@ static int virtblk_probe(struct virtio_device *vdev)
+ 	if (err)
+ 		goto out_put_disk;
+ 
+-	q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
++	q = blk_mq_init_queue(&vblk->tag_set);
+ 	if (IS_ERR(q)) {
+ 		err = -ENOMEM;
+ 		goto out_free_tags;
+ 	}
++	vblk->disk->queue = q;
+ 
+ 	q->queuedata = vblk;
+ 
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 3e9ec9523f73..1d8c6cb89c7f 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -588,8 +588,6 @@ int xen_blkif_schedule(void *arg)
+ 	unsigned long timeout;
+ 	int ret;
+ 
+-	xen_blkif_get(blkif);
+-
+ 	while (!kthread_should_stop()) {
+ 		if (try_to_freeze())
+ 			continue;
+@@ -643,7 +641,6 @@ purge_gnt_list:
+ 		print_stats(blkif);
+ 
+ 	blkif->xenblkd = NULL;
+-	xen_blkif_put(blkif);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
+index 6ab69ad61ee1..b8c48da3b19f 100644
+--- a/drivers/block/xen-blkback/xenbus.c
++++ b/drivers/block/xen-blkback/xenbus.c
+@@ -256,7 +256,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
+ 	if (blkif->xenblkd) {
+ 		kthread_stop(blkif->xenblkd);
+ 		wake_up(&blkif->shutdown_wq);
+-		blkif->xenblkd = NULL;
+ 	}
+ 
+ 	/* The above kthread_stop() guarantees that at this point we
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 4bc508c14900..5da703c65d93 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -3871,6 +3871,9 @@ static void smi_recv_tasklet(unsigned long val)
+ 	 * because the lower layer is allowed to hold locks while calling
+ 	 * message delivery.
+ 	 */
++
++	rcu_read_lock();
++
+ 	if (!run_to_completion)
+ 		spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+ 	if (intf->curr_msg == NULL && !intf->in_shutdown) {
+@@ -3893,6 +3896,8 @@ static void smi_recv_tasklet(unsigned long val)
+ 	if (newmsg)
+ 		intf->handlers->sender(intf->send_info, newmsg);
+ 
++	rcu_read_unlock();
++
+ 	handle_new_recv_msgs(intf);
+ }
+ 
+diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
+index 9df92eda8749..9156bbd90b56 100644
+--- a/drivers/char/ipmi/ipmi_ssif.c
++++ b/drivers/char/ipmi/ipmi_ssif.c
+@@ -757,6 +757,11 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
+ 			       result, len, data[2]);
+ 		} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
+ 			   || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
++			/*
++			 * Don't abort here, maybe it was a queued
++			 * response to a previous command.
++			 */
++			ipmi_ssif_unlock_cond(ssif_info, flags);
+ 			pr_warn(PFX "Invalid response getting flags: %x %x\n",
+ 				data[0], data[1]);
+ 		} else {
+diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
+index 37b8be7cba95..f335fcee09af 100644
+--- a/drivers/char/ipmi/ipmi_watchdog.c
++++ b/drivers/char/ipmi/ipmi_watchdog.c
+@@ -1156,10 +1156,11 @@ static int wdog_reboot_handler(struct notifier_block *this,
+ 			ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
+ 			ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ 		} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
+-			/* Set a long timer to let the reboot happens, but
+-			   reboot if it hangs, but only if the watchdog
++			/* Set a long timer to let the reboot happen or
++			   reset if it hangs, but only if the watchdog
+ 			   timer was already running. */
+-			timeout = 120;
++			if (timeout < 120)
++				timeout = 120;
+ 			pretimeout = 0;
+ 			ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
+ 			ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
+index ee66fd4673f3..62a6117b57d7 100644
+--- a/drivers/char/tpm/tpm-sysfs.c
++++ b/drivers/char/tpm/tpm-sysfs.c
+@@ -38,6 +38,8 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr,
+ 
+ 	struct tpm_chip *chip = dev_get_drvdata(dev);
+ 
++	memset(&tpm_cmd, 0, sizeof(tpm_cmd));
++
+ 	tpm_cmd.header.in = tpm_readpubek_header;
+ 	err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
+ 			       "attempting to read the PUBEK");
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index f557695a2409..7724ddb0f776 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
+ 			return false;
+ 		}
+ 
++		/*
++		 * ignore out-of-order messages or messages that are part of a
++		 * failed transaction
++		 */
++		if (!recv_hdr.somt && !msg->have_somt)
++			return false;
++
+ 		/* get length contained in this portion */
+ 		msg->curchunk_len = recv_hdr.msg_len;
+ 		msg->curchunk_hdrlen = hdrlen;
+@@ -2161,7 +2168,7 @@ out_unlock:
+ }
+ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
+ 
+-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
++static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+ {
+ 	int len;
+ 	u8 replyblock[32];
+@@ -2176,12 +2183,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+ 			       replyblock, len);
+ 	if (ret != len) {
+ 		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
+-		return;
++		return false;
+ 	}
+ 	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
+ 	if (!ret) {
+ 		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
+-		return;
++		return false;
+ 	}
+ 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
+ 
+@@ -2193,21 +2200,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+ 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
+ 				    replyblock, len);
+ 		if (ret != len) {
+-			DRM_DEBUG_KMS("failed to read a chunk\n");
++			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
++				      len, ret);
++			return false;
+ 		}
++
+ 		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
+-		if (ret == false)
++		if (!ret) {
+ 			DRM_DEBUG_KMS("failed to build sideband msg\n");
++			return false;
++		}
++
+ 		curreply += len;
+ 		replylen -= len;
+ 	}
++	return true;
+ }
+ 
+ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+ {
+ 	int ret = 0;
+ 
+-	drm_dp_get_one_sb_msg(mgr, false);
++	if (!drm_dp_get_one_sb_msg(mgr, false)) {
++		memset(&mgr->down_rep_recv, 0,
++		       sizeof(struct drm_dp_sideband_msg_rx));
++		return 0;
++	}
+ 
+ 	if (mgr->down_rep_recv.have_eomt) {
+ 		struct drm_dp_sideband_msg_tx *txmsg;
+@@ -2263,7 +2281,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
+ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ {
+ 	int ret = 0;
+-	drm_dp_get_one_sb_msg(mgr, true);
++
++	if (!drm_dp_get_one_sb_msg(mgr, true)) {
++		memset(&mgr->up_req_recv, 0,
++		       sizeof(struct drm_dp_sideband_msg_rx));
++		return 0;
++	}
+ 
+ 	if (mgr->up_req_recv.have_eomt) {
+ 		struct drm_dp_sideband_msg_req_body msg;
+@@ -2315,7 +2338,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
+ 		}
+ 
+-		drm_dp_put_mst_branch_device(mstb);
++		if (mstb)
++			drm_dp_put_mst_branch_device(mstb);
++
+ 		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ 	}
+ 	return ret;
+diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+index bbdcab0a56c1..3401df5b44db 100644
+--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+@@ -193,7 +193,14 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ void adreno_flush(struct msm_gpu *gpu)
+ {
+ 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+-	uint32_t wptr = get_wptr(gpu->rb);
++	uint32_t wptr;
++
++	/*
++	 * Mask wptr value that we calculate to fit in the HW range. This is
++	 * to account for the possibility that the last command fit exactly into
++	 * the ringbuffer and rb->next hasn't wrapped to zero yet
++	 */
++	wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
+ 
+ 	/* ensure writes to ringbuffer have hit system memory: */
+ 	mb();
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index 4ff8c334e7c8..4a45ae01cc3e 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -90,7 +90,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 			pagefault_disable();
+ 		}
+ 
+-		if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
++		if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
++			!(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
+ 			DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
+ 			ret = -EINVAL;
+ 			goto out_unlock;
+diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
+index 1f14b908b221..ae317271cf81 100644
+--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
++++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
+@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+ 	struct msm_ringbuffer *ring;
+ 	int ret;
+ 
+-	size = ALIGN(size, 4);   /* size should be dword aligned */
++	if (WARN_ON(!is_power_of_2(size)))
++		return ERR_PTR(-EINVAL);
+ 
+ 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ 	if (!ring) {
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index d4ac8c837314..8e86cf7da614 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -30,6 +30,7 @@
+ #include "radeon_audio.h"
+ #include "atom.h"
+ #include <linux/backlight.h>
++#include <linux/dmi.h>
+ 
+ extern int atom_debug;
+ 
+@@ -2183,9 +2184,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx)
+ 		goto assigned;
+ 	}
+ 
+-	/* on DCE32 and encoder can driver any block so just crtc id */
++	/*
++	 * On DCE32 any encoder can drive any block so usually just use crtc id,
++	 * but Apple thinks different at least on iMac10,1, so there use linkb,
++	 * otherwise the internal eDP panel will stay dark.
++	 */
+ 	if (ASIC_IS_DCE32(rdev)) {
+-		enc_idx = radeon_crtc->crtc_id;
++		if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
++			enc_idx = (dig->linkb) ? 1 : 0;
++		else
++			enc_idx = radeon_crtc->crtc_id;
++
+ 		goto assigned;
+ 	}
+ 
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index 7d0b8ef9bea2..7c6f15d284e3 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -277,26 +277,6 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
+  * Page Flip
+  */
+ 
+-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
+-				   struct drm_file *file)
+-{
+-	struct drm_pending_vblank_event *event;
+-	struct drm_device *dev = rcrtc->crtc.dev;
+-	unsigned long flags;
+-
+-	/* Destroy the pending vertical blanking event associated with the
+-	 * pending page flip, if any, and disable vertical blanking interrupts.
+-	 */
+-	spin_lock_irqsave(&dev->event_lock, flags);
+-	event = rcrtc->event;
+-	if (event && event->base.file_priv == file) {
+-		rcrtc->event = NULL;
+-		event->base.destroy(&event->base);
+-		drm_crtc_vblank_put(&rcrtc->crtc);
+-	}
+-	spin_unlock_irqrestore(&dev->event_lock, flags);
+-}
+-
+ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
+ {
+ 	struct drm_pending_vblank_event *event;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+index 5d9aa9b33769..0d61a813054a 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+@@ -53,8 +53,6 @@ enum rcar_du_output {
+ 
+ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
+ void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
+-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
+-				   struct drm_file *file);
+ void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
+ void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
+ 
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+index da1216a73969..94133c3ffe20 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+@@ -205,15 +205,6 @@ done:
+ 	return ret;
+ }
+ 
+-static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
+-{
+-	struct rcar_du_device *rcdu = dev->dev_private;
+-	unsigned int i;
+-
+-	for (i = 0; i < rcdu->num_crtcs; ++i)
+-		rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
+-}
+-
+ static void rcar_du_lastclose(struct drm_device *dev)
+ {
+ 	struct rcar_du_device *rcdu = dev->dev_private;
+@@ -256,7 +247,6 @@ static struct drm_driver rcar_du_driver = {
+ 				| DRIVER_ATOMIC,
+ 	.load			= rcar_du_load,
+ 	.unload			= rcar_du_unload,
+-	.preclose		= rcar_du_preclose,
+ 	.lastclose		= rcar_du_lastclose,
+ 	.set_busid		= drm_platform_set_busid,
+ 	.get_vblank_counter	= drm_vblank_count,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index aee1c6ccc52d..6c312b584802 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -285,7 +285,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
+ 			   struct vmw_sw_context *sw_context,
+ 			   SVGA3dCmdHeader *header)
+ {
+-	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
++	return -EINVAL;
+ }
+ 
+ static int vmw_cmd_ok(struct vmw_private *dev_priv,
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 07a963039b60..d786b48f5d7b 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -2391,6 +2391,7 @@ static const struct hid_device_id hid_ignore_list[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
+ #if defined(CONFIG_MOUSE_SYNAPTICS_USB) || defined(CONFIG_MOUSE_SYNAPTICS_USB_MODULE)
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 7ce93d927f62..e995058ad264 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -760,6 +760,9 @@
+ #define USB_VENDOR_ID_PETALYNX		0x18b1
+ #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE	0x0037
+ 
++#define USB_VENDOR_ID_PETZL		0x2122
++#define USB_DEVICE_ID_PETZL_HEADLAMP	0x1234
++
+ #define USB_VENDOR_ID_PHILIPS		0x0471
+ #define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
+ 
+diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
+index 56292ae4538d..9bcad9a444f7 100644
+--- a/drivers/iio/adc/vf610_adc.c
++++ b/drivers/iio/adc/vf610_adc.c
+@@ -71,7 +71,7 @@
+ #define VF610_ADC_ADSTS_MASK		0x300
+ #define VF610_ADC_ADLPC_EN		0x80
+ #define VF610_ADC_ADHSC_EN		0x400
+-#define VF610_ADC_REFSEL_VALT		0x100
++#define VF610_ADC_REFSEL_VALT		0x800
+ #define VF610_ADC_REFSEL_VBG		0x1000
+ #define VF610_ADC_ADTRG_HARD		0x2000
+ #define VF610_ADC_AVGS_8		0x4000
+diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
+index 94daa9fc1247..6a135effb7c5 100644
+--- a/drivers/iio/light/tsl2563.c
++++ b/drivers/iio/light/tsl2563.c
+@@ -626,7 +626,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
+ 	struct tsl2563_chip *chip = iio_priv(dev_info);
+ 
+ 	iio_push_event(dev_info,
+-		       IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
++		       IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
+ 					    0,
+ 					    IIO_EV_TYPE_THRESH,
+ 					    IIO_EV_DIR_EITHER),
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index b52a704c3449..2d515a544f33 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -1586,7 +1586,7 @@ isert_rcv_completion(struct iser_rx_desc *desc,
+ 		     struct isert_conn *isert_conn,
+ 		     u32 xfer_len)
+ {
+-	struct ib_device *ib_dev = isert_conn->cm_id->device;
++	struct ib_device *ib_dev = isert_conn->device->ib_device;
+ 	struct iscsi_hdr *hdr;
+ 	u64 rx_dma;
+ 	int rx_buflen, outstanding;
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index 4cfb0ac797ef..6f15cdf5ff40 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -397,8 +397,10 @@ static int i8042_start(struct serio *serio)
+ {
+ 	struct i8042_port *port = serio->port_data;
+ 
++	spin_lock_irq(&i8042_lock);
+ 	port->exists = true;
+-	mb();
++	spin_unlock_irq(&i8042_lock);
++
+ 	return 0;
+ }
+ 
+@@ -411,16 +413,20 @@ static void i8042_stop(struct serio *serio)
+ {
+ 	struct i8042_port *port = serio->port_data;
+ 
++	spin_lock_irq(&i8042_lock);
+ 	port->exists = false;
++	port->serio = NULL;
++	spin_unlock_irq(&i8042_lock);
+ 
+ 	/*
++	 * We need to make sure that interrupt handler finishes using
++	 * our serio port before we return from this function.
+ 	 * We synchronize with both AUX and KBD IRQs because there is
+ 	 * a (very unlikely) chance that AUX IRQ is raised for KBD port
+ 	 * and vice versa.
+ 	 */
+ 	synchronize_irq(I8042_AUX_IRQ);
+ 	synchronize_irq(I8042_KBD_IRQ);
+-	port->serio = NULL;
+ }
+ 
+ /*
+@@ -537,7 +543,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id)
+ 
+ 	spin_unlock_irqrestore(&i8042_lock, flags);
+ 
+-	if (likely(port->exists && !filtered))
++	if (likely(serio && !filtered))
+ 		serio_interrupt(serio, data, dfl);
+ 
+  out:
+diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
+index 9c1e8adaf4fc..bf3fbd00a091 100644
+--- a/drivers/isdn/i4l/isdn_ppp.c
++++ b/drivers/isdn/i4l/isdn_ppp.c
+@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
+ 		       id);
+ 		return NULL;
+ 	} else {
+-		rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
++		rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
+ 		if (!rs)
+ 			return NULL;
+ 		rs->state = CCPResetIdle;
+diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
+index 19b491d2964f..ac6087f77e08 100644
+--- a/drivers/mailbox/mailbox.c
++++ b/drivers/mailbox/mailbox.c
+@@ -104,11 +104,14 @@ static void tx_tick(struct mbox_chan *chan, int r)
+ 	/* Submit next message */
+ 	msg_submit(chan);
+ 
++	if (!mssg)
++		return;
++
+ 	/* Notify the client */
+-	if (mssg && chan->cl->tx_done)
++	if (chan->cl->tx_done)
+ 		chan->cl->tx_done(chan->cl, mssg, r);
+ 
+-	if (chan->cl->tx_block)
++	if (r != -ETIME && chan->cl->tx_block)
+ 		complete(&chan->tx_complete);
+ }
+ 
+@@ -258,7 +261,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
+ 
+ 	msg_submit(chan);
+ 
+-	if (chan->cl->tx_block && chan->active_req) {
++	if (chan->cl->tx_block) {
+ 		unsigned long wait;
+ 		int ret;
+ 
+@@ -269,8 +272,8 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
+ 
+ 		ret = wait_for_completion_timeout(&chan->tx_complete, wait);
+ 		if (ret == 0) {
+-			t = -EIO;
+-			tx_tick(chan, -EIO);
++			t = -ETIME;
++			tx_tick(chan, t);
+ 		}
+ 	}
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 2b4e51c0544c..bf29edd8e8ee 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1118,7 +1118,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
+ 		 */
+ 		DEFINE_WAIT(w);
+ 		for (;;) {
+-			flush_signals(current);
++			sigset_t full, old;
+ 			prepare_to_wait(&conf->wait_barrier,
+ 					&w, TASK_INTERRUPTIBLE);
+ 			if (bio_end_sector(bio) <= mddev->suspend_lo ||
+@@ -1127,7 +1127,10 @@ static void make_request(struct mddev *mddev, struct bio * bio)
+ 			     !md_cluster_ops->area_resyncing(mddev,
+ 				     bio->bi_iter.bi_sector, bio_end_sector(bio))))
+ 				break;
++			sigfillset(&full);
++			sigprocmask(SIG_BLOCK, &full, &old);
+ 			schedule();
++			sigprocmask(SIG_SETMASK, &old, NULL);
+ 		}
+ 		finish_wait(&conf->wait_barrier, &w);
+ 	}
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index d7942cbaa1b0..69542a92e4b0 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5275,12 +5275,15 @@ static void make_request(struct mddev *mddev, struct bio * bi)
+ 				 * userspace, we want an interruptible
+ 				 * wait.
+ 				 */
+-				flush_signals(current);
+ 				prepare_to_wait(&conf->wait_for_overlap,
+ 						&w, TASK_INTERRUPTIBLE);
+ 				if (logical_sector >= mddev->suspend_lo &&
+ 				    logical_sector < mddev->suspend_hi) {
++					sigset_t full, old;
++					sigfillset(&full);
++					sigprocmask(SIG_BLOCK, &full, &old);
+ 					schedule();
++					sigprocmask(SIG_SETMASK, &old, NULL);
+ 					do_prepare = true;
+ 				}
+ 				goto retry;
+@@ -5796,6 +5799,8 @@ static void raid5_do_work(struct work_struct *work)
+ 	pr_debug("%d stripes handled\n", handled);
+ 
+ 	spin_unlock_irq(&conf->device_lock);
++
++	async_tx_issue_pending_all();
+ 	blk_finish_plug(&plug);
+ 
+ 	pr_debug("--- raid5worker inactive\n");
+@@ -7441,12 +7446,10 @@ static void end_reshape(struct r5conf *conf)
+ {
+ 
+ 	if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
+-		struct md_rdev *rdev;
+ 
+ 		spin_lock_irq(&conf->device_lock);
+ 		conf->previous_raid_disks = conf->raid_disks;
+-		rdev_for_each(rdev, conf->mddev)
+-			rdev->data_offset = rdev->new_data_offset;
++		md_finish_reshape(conf->mddev);
+ 		smp_wmb();
+ 		conf->reshape_progress = MaxSector;
+ 		spin_unlock_irq(&conf->device_lock);
+diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
+index 8001cde1db1e..503135a4f47a 100644
+--- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
++++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
+@@ -211,7 +211,7 @@ static int s5c73m3_3a_lock(struct s5c73m3 *state, struct v4l2_ctrl *ctrl)
+ 	}
+ 
+ 	if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS)
+-		ret = s5c73m3_af_run(state, ~af_lock);
++		ret = s5c73m3_af_run(state, !af_lock);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
+index 8f2556ec3971..61611d1682d1 100644
+--- a/drivers/media/pci/cx88/cx88-cards.c
++++ b/drivers/media/pci/cx88/cx88-cards.c
+@@ -3691,7 +3691,14 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
+ 	core->nr = nr;
+ 	sprintf(core->name, "cx88[%d]", core->nr);
+ 
+-	core->tvnorm = V4L2_STD_NTSC_M;
++	/*
++	 * Note: Setting initial standard here would cause first call to
++	 * cx88_set_tvnorm() to return without programming any registers.  Leave
++	 * it blank for at this point and it will get set later in
++	 * cx8800_initdev()
++	 */
++	core->tvnorm  = 0;
++
+ 	core->width   = 320;
+ 	core->height  = 240;
+ 	core->field   = V4L2_FIELD_INTERLACED;
+diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
+index c9decd80bf61..53073def2bec 100644
+--- a/drivers/media/pci/cx88/cx88-video.c
++++ b/drivers/media/pci/cx88/cx88-video.c
+@@ -1429,7 +1429,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev,
+ 
+ 	/* initial device configuration */
+ 	mutex_lock(&core->lock);
+-	cx88_set_tvnorm(core, core->tvnorm);
++	cx88_set_tvnorm(core, V4L2_STD_NTSC_M);
+ 	v4l2_ctrl_handler_setup(&core->video_hdl);
+ 	v4l2_ctrl_handler_setup(&core->audio_hdl);
+ 	cx88_video_mux(core, 0);
+diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
+index 6c73f5b155f6..1c779ea8b5ec 100644
+--- a/drivers/media/pci/saa7164/saa7164-bus.c
++++ b/drivers/media/pci/saa7164/saa7164-bus.c
+@@ -393,11 +393,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
+ 	msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
+ 	msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
+ 	msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
++	memcpy(msg, &msg_tmp, sizeof(*msg));
+ 
+ 	/* No need to update the read positions, because this was a peek */
+ 	/* If the caller specifically want to peek, return */
+ 	if (peekonly) {
+-		memcpy(msg, &msg_tmp, sizeof(*msg));
+ 		goto peekout;
+ 	}
+ 
+@@ -442,21 +442,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
+ 		space_rem = bus->m_dwSizeGetRing - curr_grp;
+ 
+ 		if (space_rem < sizeof(*msg)) {
+-			/* msg wraps around the ring */
+-			memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
+-			memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
+-				sizeof(*msg) - space_rem);
+ 			if (buf)
+ 				memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
+ 					space_rem, buf_size);
+ 
+ 		} else if (space_rem == sizeof(*msg)) {
+-			memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+ 			if (buf)
+ 				memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
+ 		} else {
+ 			/* Additional data wraps around the ring */
+-			memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+ 			if (buf) {
+ 				memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
+ 					sizeof(*msg), space_rem - sizeof(*msg));
+@@ -469,15 +463,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
+ 
+ 	} else {
+ 		/* No wrapping */
+-		memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
+ 		if (buf)
+ 			memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
+ 				buf_size);
+ 	}
+-	/* Convert from little endian to CPU */
+-	msg->size = le16_to_cpu((__force __le16)msg->size);
+-	msg->command = le32_to_cpu((__force __le32)msg->command);
+-	msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
+ 
+ 	/* Update the read positions, adjusting the ring */
+ 	saa7164_writel(bus->m_dwGetReadPos, new_grp);
+diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
+index ccfcf3f528d3..445e17aeb8b2 100644
+--- a/drivers/media/platform/davinci/vpfe_capture.c
++++ b/drivers/media/platform/davinci/vpfe_capture.c
+@@ -1706,27 +1706,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
+ 
+ 	switch (cmd) {
+ 	case VPFE_CMD_S_CCDC_RAW_PARAMS:
++		ret = -EINVAL;
+ 		v4l2_warn(&vpfe_dev->v4l2_dev,
+-			  "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
+-		if (ccdc_dev->hw_ops.set_params) {
+-			ret = ccdc_dev->hw_ops.set_params(param);
+-			if (ret) {
+-				v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+-					"Error setting parameters in CCDC\n");
+-				goto unlock_out;
+-			}
+-			ret = vpfe_get_ccdc_image_format(vpfe_dev,
+-							 &vpfe_dev->fmt);
+-			if (ret < 0) {
+-				v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+-					"Invalid image format at CCDC\n");
+-				goto unlock_out;
+-			}
+-		} else {
+-			ret = -EINVAL;
+-			v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
+-				"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+-		}
++			"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
+ 		break;
+ 	default:
+ 		ret = -ENOTTY;
+diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
+index 65f80b8b9f7a..eb9e7feb9b13 100644
+--- a/drivers/media/rc/imon.c
++++ b/drivers/media/rc/imon.c
+@@ -1629,7 +1629,7 @@ static void imon_incoming_packet(struct imon_context *ictx,
+ 	if (kc == KEY_KEYBOARD && !ictx->release_code) {
+ 		ictx->last_keycode = kc;
+ 		if (!nomouse) {
+-			ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
++			ictx->pad_mouse = !ictx->pad_mouse;
+ 			dev_dbg(dev, "toggling to %s mode\n",
+ 				ictx->pad_mouse ? "mouse" : "keyboard");
+ 			spin_unlock_irqrestore(&ictx->kc_lock, flags);
+diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
+index 98893a8332c7..4795c31ceebc 100644
+--- a/drivers/media/rc/ir-lirc-codec.c
++++ b/drivers/media/rc/ir-lirc-codec.c
+@@ -257,7 +257,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
+ 		return 0;
+ 
+ 	case LIRC_GET_REC_RESOLUTION:
+-		val = dev->rx_resolution;
++		val = dev->rx_resolution / 1000;
+ 		break;
+ 
+ 	case LIRC_SET_WIDEBAND_RECEIVER:
+diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
+index 65fed7146e9b..cc91f7b3d90c 100644
+--- a/drivers/misc/enclosure.c
++++ b/drivers/misc/enclosure.c
+@@ -375,6 +375,7 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
+ 			 struct device *dev)
+ {
+ 	struct enclosure_component *cdev;
++	int err;
+ 
+ 	if (!edev || component >= edev->components)
+ 		return -EINVAL;
+@@ -384,12 +385,17 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
+ 	if (cdev->dev == dev)
+ 		return -EEXIST;
+ 
+-	if (cdev->dev)
++	if (cdev->dev) {
+ 		enclosure_remove_links(cdev);
+-
+-	put_device(cdev->dev);
++		put_device(cdev->dev);
++	}
+ 	cdev->dev = get_device(dev);
+-	return enclosure_add_links(cdev);
++	err = enclosure_add_links(cdev);
++	if (err) {
++		put_device(cdev->dev);
++		cdev->dev = NULL;
++	}
++	return err;
+ }
+ EXPORT_SYMBOL_GPL(enclosure_add_device);
+ 
+diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
+index 5d5d36272bb5..448123268e3b 100644
+--- a/drivers/mtd/spi-nor/fsl-quadspi.c
++++ b/drivers/mtd/spi-nor/fsl-quadspi.c
+@@ -140,15 +140,15 @@
+ #define LUT_MODE		4
+ #define LUT_MODE2		5
+ #define LUT_MODE4		6
+-#define LUT_READ		7
+-#define LUT_WRITE		8
++#define LUT_FSL_READ		7
++#define LUT_FSL_WRITE		8
+ #define LUT_JMP_ON_CS		9
+ #define LUT_ADDR_DDR		10
+ #define LUT_MODE_DDR		11
+ #define LUT_MODE2_DDR		12
+ #define LUT_MODE4_DDR		13
+-#define LUT_READ_DDR		14
+-#define LUT_WRITE_DDR		15
++#define LUT_FSL_READ_DDR		14
++#define LUT_FSL_WRITE_DDR		15
+ #define LUT_DATA_LEARN		16
+ 
+ /*
+@@ -312,7 +312,7 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
+ 
+ 	writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ 			base + QUADSPI_LUT(lut_base));
+-	writel(LUT0(DUMMY, PAD1, dummy) | LUT1(READ, PAD4, rxfifo),
++	writel(LUT0(DUMMY, PAD1, dummy) | LUT1(FSL_READ, PAD4, rxfifo),
+ 			base + QUADSPI_LUT(lut_base + 1));
+ 
+ 	/* Write enable */
+@@ -333,11 +333,11 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
+ 
+ 	writel(LUT0(CMD, PAD1, cmd) | LUT1(ADDR, PAD1, addrlen),
+ 			base + QUADSPI_LUT(lut_base));
+-	writel(LUT0(WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
++	writel(LUT0(FSL_WRITE, PAD1, 0), base + QUADSPI_LUT(lut_base + 1));
+ 
+ 	/* Read Status */
+ 	lut_base = SEQID_RDSR * 4;
+-	writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(READ, PAD1, 0x1),
++	writel(LUT0(CMD, PAD1, SPINOR_OP_RDSR) | LUT1(FSL_READ, PAD1, 0x1),
+ 			base + QUADSPI_LUT(lut_base));
+ 
+ 	/* Erase a sector */
+@@ -362,17 +362,17 @@ static void fsl_qspi_init_lut(struct fsl_qspi *q)
+ 
+ 	/* READ ID */
+ 	lut_base = SEQID_RDID * 4;
+-	writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(READ, PAD1, 0x8),
++	writel(LUT0(CMD, PAD1, SPINOR_OP_RDID) | LUT1(FSL_READ, PAD1, 0x8),
+ 			base + QUADSPI_LUT(lut_base));
+ 
+ 	/* Write Register */
+ 	lut_base = SEQID_WRSR * 4;
+-	writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(WRITE, PAD1, 0x2),
++	writel(LUT0(CMD, PAD1, SPINOR_OP_WRSR) | LUT1(FSL_WRITE, PAD1, 0x2),
+ 			base + QUADSPI_LUT(lut_base));
+ 
+ 	/* Read Configuration Register */
+ 	lut_base = SEQID_RDCR * 4;
+-	writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(READ, PAD1, 0x1),
++	writel(LUT0(CMD, PAD1, SPINOR_OP_RDCR) | LUT1(FSL_READ, PAD1, 0x1),
+ 			base + QUADSPI_LUT(lut_base));
+ 
+ 	/* Write disable */
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 7896f0f1fa05..f9713fe036ef 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -8722,11 +8722,14 @@ static void tg3_free_consistent(struct tg3 *tp)
+ 	tg3_mem_rx_release(tp);
+ 	tg3_mem_tx_release(tp);
+ 
++	/* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
++	tg3_full_lock(tp, 0);
+ 	if (tp->hw_stats) {
+ 		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+ 				  tp->hw_stats, tp->stats_mapping);
+ 		tp->hw_stats = NULL;
+ 	}
++	tg3_full_unlock(tp);
+ }
+ 
+ /*
+diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
+index 2a9dd460a95f..e1f9e7cebf8f 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
++++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
+@@ -118,8 +118,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
++	if (offset_in_page(buf)) {
++		dma_free_coherent(dev, PAGE_SIZE << order,
++				  buf, sg_dma_address(mem));
++		return -ENOMEM;
++	}
++
+ 	sg_set_buf(mem, buf, PAGE_SIZE << order);
+-	BUG_ON(mem->offset);
+ 	sg_dma_len(mem) = PAGE_SIZE << order;
+ 	return 0;
+ }
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index 3df51faf18ae..af4b1f4c24d2 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -326,6 +326,7 @@ enum cfg_version {
+ static const struct pci_device_id rtl8169_pci_tbl[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), 0, 0, RTL_CFG_0 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, RTL_CFG_2 },
++	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8161), 0, 0, RTL_CFG_1 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, RTL_CFG_0 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8168), 0, 0, RTL_CFG_1 },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, RTL_CFG_0 },
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index c93a458f96f7..e2dd94a91c15 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -726,6 +726,7 @@ static struct sh_eth_cpu_data sh7734_data = {
+ 	.tsu		= 1,
+ 	.hw_crc		= 1,
+ 	.select_mii	= 1,
++	.shift_rd0	= 1,
+ };
+ 
+ /* SH7763 */
+@@ -794,6 +795,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
+ 	.rpadir_value   = 2 << 16,
+ 	.no_trimd	= 1,
+ 	.no_ade		= 1,
++	.hw_crc		= 1,
+ 	.tsu		= 1,
+ 	.select_mii	= 1,
+ 	.shift_rd0	= 1,
+diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
+index bca6a1e72d1d..e1bb802d4a4d 100644
+--- a/drivers/net/irda/mcs7780.c
++++ b/drivers/net/irda/mcs7780.c
+@@ -141,9 +141,19 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
+ static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
+ {
+ 	struct usb_device *dev = mcs->usbdev;
+-	int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+-				  MCS_RD_RTYPE, 0, reg, val, 2,
+-				  msecs_to_jiffies(MCS_CTRL_TIMEOUT));
++	void *dmabuf;
++	int ret;
++
++	dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
++	if (!dmabuf)
++		return -ENOMEM;
++
++	ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
++			      MCS_RD_RTYPE, 0, reg, dmabuf, 2,
++			      msecs_to_jiffies(MCS_CTRL_TIMEOUT));
++
++	memcpy(val, dmabuf, sizeof(__u16));
++	kfree(dmabuf);
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+index 21a668faacd7..1ca78b46c01b 100644
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -512,6 +512,9 @@ void phy_stop_machine(struct phy_device *phydev)
+ 	if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
+ 		phydev->state = PHY_UP;
+ 	mutex_unlock(&phydev->lock);
++
++	/* Now we can run the state machine synchronously */
++	phy_state_machine(&phydev->state_queue.work);
+ }
+ 
+ /**
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index d551df62e61a..afb87840f853 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1281,6 +1281,8 @@ static int phy_remove(struct device *dev)
+ {
+ 	struct phy_device *phydev = to_phy_device(dev);
+ 
++	cancel_delayed_work_sync(&phydev->state_queue);
++
+ 	mutex_lock(&phydev->lock);
+ 	phydev->state = PHY_DOWN;
+ 	mutex_unlock(&phydev->lock);
+@@ -1355,7 +1357,7 @@ static struct phy_driver genphy_driver[] = {
+ 	.phy_id		= 0xffffffff,
+ 	.phy_id_mask	= 0xffffffff,
+ 	.name		= "Generic PHY",
+-	.soft_reset	= genphy_soft_reset,
++	.soft_reset	= genphy_no_soft_reset,
+ 	.config_init	= genphy_config_init,
+ 	.features	= PHY_GBIT_FEATURES | SUPPORTED_MII |
+ 			  SUPPORTED_AUI | SUPPORTED_FIBRE |
+diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
+index 1e9cdca37014..c626971096d4 100644
+--- a/drivers/net/usb/kaweth.c
++++ b/drivers/net/usb/kaweth.c
+@@ -1009,6 +1009,7 @@ static int kaweth_probe(
+ 	struct net_device *netdev;
+ 	const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+ 	int result = 0;
++	int rv = -EIO;
+ 
+ 	dev_dbg(dev,
+ 		"Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
+@@ -1029,6 +1030,7 @@ static int kaweth_probe(
+ 	kaweth = netdev_priv(netdev);
+ 	kaweth->dev = udev;
+ 	kaweth->net = netdev;
++	kaweth->intf = intf;
+ 
+ 	spin_lock_init(&kaweth->device_lock);
+ 	init_waitqueue_head(&kaweth->term_wait);
+@@ -1048,6 +1050,10 @@ static int kaweth_probe(
+ 		/* Download the firmware */
+ 		dev_info(dev, "Downloading firmware...\n");
+ 		kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
++		if (!kaweth->firmware_buf) {
++			rv = -ENOMEM;
++			goto err_free_netdev;
++		}
+ 		if ((result = kaweth_download_firmware(kaweth,
+ 						      "kaweth/new_code.bin",
+ 						      100,
+@@ -1139,8 +1145,6 @@ err_fw:
+ 
+ 	dev_dbg(dev, "Initializing net device.\n");
+ 
+-	kaweth->intf = intf;
+-
+ 	kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
+ 	if (!kaweth->tx_urb)
+ 		goto err_free_netdev;
+@@ -1210,7 +1214,7 @@ err_only_tx:
+ err_free_netdev:
+ 	free_netdev(netdev);
+ 
+-	return -EIO;
++	return rv;
+ }
+ 
+ /****************************************************************
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
+index c8b64e7a6089..deed8dcfd91a 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
++++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
+@@ -562,6 +562,9 @@ ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
+ 	struct sk_buff *skb;
+ 	u32 cmd_id;
+ 
++	if (!ar->wmi.ops->gen_vdev_spectral_conf)
++		return -EOPNOTSUPP;
++
+ 	skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
+ 	if (IS_ERR(skb))
+ 		return PTR_ERR(skb);
+@@ -577,6 +580,9 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
+ 	struct sk_buff *skb;
+ 	u32 cmd_id;
+ 
++	if (!ar->wmi.ops->gen_vdev_spectral_enable)
++		return -EOPNOTSUPP;
++
+ 	skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
+ 						    enable);
+ 	if (IS_ERR(skb))
+diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+index 1ad66b76749b..c1b661e5c8c4 100644
+--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
++++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+@@ -1816,8 +1816,6 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
+ static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
+ {
+ 	REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+-	REG_SET_BIT(ah, 0x9864, 0x7f000);
+-	REG_SET_BIT(ah, 0x9924, 0x7f00fe);
+ 	REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
+ 	REG_WRITE(ah, AR_CR, AR_CR_RXD);
+ 	REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
+diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
+index ac4781f37e78..b4e6304afd40 100644
+--- a/drivers/net/wireless/ath/ath9k/tx99.c
++++ b/drivers/net/wireless/ath/ath9k/tx99.c
+@@ -190,22 +190,27 @@ static ssize_t write_file_tx99(struct file *file, const char __user *user_buf,
+ 	if (strtobool(buf, &start))
+ 		return -EINVAL;
+ 
++	mutex_lock(&sc->mutex);
++
+ 	if (start == sc->tx99_state) {
+ 		if (!start)
+-			return count;
++			goto out;
+ 		ath_dbg(common, XMIT, "Resetting TX99\n");
+ 		ath9k_tx99_deinit(sc);
+ 	}
+ 
+ 	if (!start) {
+ 		ath9k_tx99_deinit(sc);
+-		return count;
++		goto out;
+ 	}
+ 
+ 	r = ath9k_tx99_init(sc);
+-	if (r)
++	if (r) {
++		mutex_unlock(&sc->mutex);
+ 		return r;
+-
++	}
++out:
++	mutex_unlock(&sc->mutex);
+ 	return count;
+ }
+ 
+diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
+index c2a238426425..a058151f5eed 100644
+--- a/drivers/net/wireless/ath/wil6210/main.c
++++ b/drivers/net/wireless/ath/wil6210/main.c
+@@ -323,18 +323,19 @@ static void wil_fw_error_worker(struct work_struct *work)
+ 
+ 	wil->last_fw_recovery = jiffies;
+ 
++	wil_info(wil, "fw error recovery requested (try %d)...\n",
++		 wil->recovery_count);
++	if (!no_fw_recovery)
++		wil->recovery_state = fw_recovery_running;
++	if (wil_wait_for_recovery(wil) != 0)
++		return;
++
+ 	mutex_lock(&wil->mutex);
+ 	switch (wdev->iftype) {
+ 	case NL80211_IFTYPE_STATION:
+ 	case NL80211_IFTYPE_P2P_CLIENT:
+ 	case NL80211_IFTYPE_MONITOR:
+-		wil_info(wil, "fw error recovery requested (try %d)...\n",
+-			 wil->recovery_count);
+-		if (!no_fw_recovery)
+-			wil->recovery_state = fw_recovery_running;
+-		if (0 != wil_wait_for_recovery(wil))
+-			break;
+-
++		/* silent recovery, upper layers will see disconnect */
+ 		__wil_down(wil);
+ 		__wil_up(wil);
+ 		break;
+diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
+index 8a495b318b6f..aa8400e3839c 100644
+--- a/drivers/net/xen-netback/common.h
++++ b/drivers/net/xen-netback/common.h
+@@ -195,6 +195,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
+ 	unsigned long   remaining_credit;
+ 	struct timer_list credit_timeout;
+ 	u64 credit_window_start;
++	bool rate_limited;
+ 
+ 	/* Statistics */
+ 	struct xenvif_stats stats;
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index 1a83e190fc15..e34527071260 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -99,7 +99,11 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
+ 
+ 	if (work_done < budget) {
+ 		napi_complete(napi);
+-		xenvif_napi_schedule_or_enable_events(queue);
++		/* If the queue is rate-limited, it shall be
++		 * rescheduled in the timer callback.
++		 */
++		if (likely(!queue->rate_limited))
++			xenvif_napi_schedule_or_enable_events(queue);
+ 	}
+ 
+ 	return work_done;
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 5e5b6184e720..7bd3c5a8116d 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -640,6 +640,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
+ 		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
+ 
+ 	queue->remaining_credit = min(max_credit, max_burst);
++	queue->rate_limited = false;
+ }
+ 
+ void xenvif_tx_credit_callback(unsigned long data)
+@@ -1152,8 +1153,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
+ 		msecs_to_jiffies(queue->credit_usec / 1000);
+ 
+ 	/* Timer could already be pending in rare cases. */
+-	if (timer_pending(&queue->credit_timeout))
++	if (timer_pending(&queue->credit_timeout)) {
++		queue->rate_limited = true;
+ 		return true;
++	}
+ 
+ 	/* Passed the point where we can replenish credit? */
+ 	if (time_after_eq64(now, next_credit)) {
+@@ -1168,6 +1171,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
+ 		mod_timer(&queue->credit_timeout,
+ 			  next_credit);
+ 		queue->credit_window_start = next_credit;
++		queue->rate_limited = true;
+ 
+ 		return true;
+ 	}
+diff --git a/drivers/of/device.c b/drivers/of/device.c
+index 20c1332a0018..493b21bd1199 100644
+--- a/drivers/of/device.c
++++ b/drivers/of/device.c
+@@ -212,6 +212,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
+ 
+ 	return tsize;
+ }
++EXPORT_SYMBOL_GPL(of_device_get_modalias);
+ 
+ /**
+  * of_device_uevent - Display OF related uevent information
+@@ -274,3 +275,4 @@ int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
+ 
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 74f4a26e16b5..98101c4118bb 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -937,6 +937,7 @@ static int pci_pm_thaw_noirq(struct device *dev)
+ 		return pci_legacy_resume_early(dev);
+ 
+ 	pci_update_current_state(pci_dev, PCI_D0);
++	pci_restore_state(pci_dev);
+ 
+ 	if (drv && drv->pm && drv->pm->thaw_noirq)
+ 		error = drv->pm->thaw_noirq(dev);
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
+index 0b7afa50121a..390feee4d47c 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
+@@ -194,8 +194,6 @@ static int exynos_irq_request_resources(struct irq_data *irqd)
+ 
+ 	spin_unlock_irqrestore(&bank->slock, flags);
+ 
+-	exynos_irq_unmask(irqd);
+-
+ 	return 0;
+ }
+ 
+@@ -216,8 +214,6 @@ static void exynos_irq_release_resources(struct irq_data *irqd)
+ 	shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
+ 	mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
+ 
+-	exynos_irq_mask(irqd);
+-
+ 	spin_lock_irqsave(&bank->slock, flags);
+ 
+ 	con = readl(d->virt_base + reg_con);
+diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+index 7376a97b5e65..727ce62de0bd 100644
+--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
++++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+@@ -800,6 +800,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
+ 		  SUNXI_FUNCTION(0x2, "lcd1"),		/* D16 */
+ 		  SUNXI_FUNCTION(0x3, "pata"),		/* ATAD12 */
+ 		  SUNXI_FUNCTION(0x4, "keypad"),	/* IN6 */
++		  SUNXI_FUNCTION(0x5, "sim"),		/* DET */
+ 		  SUNXI_FUNCTION_IRQ(0x6, 16),		/* EINT16 */
+ 		  SUNXI_FUNCTION(0x7, "csi1")),		/* D16 */
+ 	SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
+diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
+index ce129e595b55..5c935847599c 100644
+--- a/drivers/scsi/fnic/fnic.h
++++ b/drivers/scsi/fnic/fnic.h
+@@ -248,6 +248,7 @@ struct fnic {
+ 	struct completion *remove_wait; /* device remove thread blocks */
+ 
+ 	atomic_t in_flight;		/* io counter */
++	bool internal_reset_inprogress;
+ 	u32 _reserved;			/* fill hole */
+ 	unsigned long state_flags;	/* protected by host lock */
+ 	enum fnic_state state;
+diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
+index 25436cd2860c..eaf29b18fb7a 100644
+--- a/drivers/scsi/fnic/fnic_scsi.c
++++ b/drivers/scsi/fnic/fnic_scsi.c
+@@ -2517,6 +2517,19 @@ int fnic_host_reset(struct scsi_cmnd *sc)
+ 	unsigned long wait_host_tmo;
+ 	struct Scsi_Host *shost = sc->device->host;
+ 	struct fc_lport *lp = shost_priv(shost);
++	struct fnic *fnic = lport_priv(lp);
++	unsigned long flags;
++
++	spin_lock_irqsave(&fnic->fnic_lock, flags);
++	if (fnic->internal_reset_inprogress == 0) {
++		fnic->internal_reset_inprogress = 1;
++	} else {
++		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
++		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
++			"host reset in progress skipping another host reset\n");
++		return SUCCESS;
++	}
++	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ 
+ 	/*
+ 	 * If fnic_reset is successful, wait for fabric login to complete
+@@ -2537,6 +2550,9 @@ int fnic_host_reset(struct scsi_cmnd *sc)
+ 		}
+ 	}
+ 
++	spin_lock_irqsave(&fnic->fnic_lock, flags);
++	fnic->internal_reset_inprogress = 0;
++	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index 14a781b6b88d..093f7b4847df 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -4410,14 +4410,13 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ static int
+ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+ {
+-	int r, i;
++	int r, i, index;
+ 	unsigned long	flags;
+ 	u32 reply_address;
+ 	u16 smid;
+ 	struct _tr_list *delayed_tr, *delayed_tr_next;
+ 	struct adapter_reply_queue *reply_q;
+-	long reply_post_free;
+-	u32 reply_post_free_sz, index = 0;
++	Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
+ 
+ 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ 	    __func__));
+@@ -4488,27 +4487,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+ 		_base_assign_reply_queues(ioc);
+ 
+ 	/* initialize Reply Post Free Queue */
+-	reply_post_free_sz = ioc->reply_post_queue_depth *
+-	    sizeof(Mpi2DefaultReplyDescriptor_t);
+-	reply_post_free = (long)ioc->reply_post[index].reply_post_free;
++	index = 0;
++	reply_post_free_contig = ioc->reply_post[0].reply_post_free;
+ 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
++		/*
++		 * If RDPQ is enabled, switch to the next allocation.
++		 * Otherwise advance within the contiguous region.
++		 */
++		if (ioc->rdpq_array_enable) {
++			reply_q->reply_post_free =
++				ioc->reply_post[index++].reply_post_free;
++		} else {
++			reply_q->reply_post_free = reply_post_free_contig;
++			reply_post_free_contig += ioc->reply_post_queue_depth;
++		}
++
+ 		reply_q->reply_post_host_index = 0;
+-		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
+-		    reply_post_free;
+ 		for (i = 0; i < ioc->reply_post_queue_depth; i++)
+ 			reply_q->reply_post_free[i].Words =
+ 			    cpu_to_le64(ULLONG_MAX);
+ 		if (!_base_is_controller_msix_enabled(ioc))
+ 			goto skip_init_reply_post_free_queue;
+-		/*
+-		 * If RDPQ is enabled, switch to the next allocation.
+-		 * Otherwise advance within the contiguous region.
+-		 */
+-		if (ioc->rdpq_array_enable)
+-			reply_post_free = (long)
+-			    ioc->reply_post[++index].reply_post_free;
+-		else
+-			reply_post_free += reply_post_free_sz;
+ 	}
+  skip_init_reply_post_free_queue:
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 82b92c414a9c..c1b2e86839ae 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -329,12 +329,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
+ 	struct qla_hw_data *ha = vha->hw;
+ 	ssize_t rval = 0;
+ 
++	mutex_lock(&ha->optrom_mutex);
++
+ 	if (ha->optrom_state != QLA_SREADING)
+-		return 0;
++		goto out;
+ 
+-	mutex_lock(&ha->optrom_mutex);
+ 	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
+ 	    ha->optrom_region_size);
++
++out:
+ 	mutex_unlock(&ha->optrom_mutex);
+ 
+ 	return rval;
+@@ -349,14 +352,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
+ 	    struct device, kobj)));
+ 	struct qla_hw_data *ha = vha->hw;
+ 
+-	if (ha->optrom_state != QLA_SWRITING)
++	mutex_lock(&ha->optrom_mutex);
++
++	if (ha->optrom_state != QLA_SWRITING) {
++		mutex_unlock(&ha->optrom_mutex);
+ 		return -EINVAL;
+-	if (off > ha->optrom_region_size)
++	}
++	if (off > ha->optrom_region_size) {
++		mutex_unlock(&ha->optrom_mutex);
+ 		return -ERANGE;
++	}
+ 	if (off + count > ha->optrom_region_size)
+ 		count = ha->optrom_region_size - off;
+ 
+-	mutex_lock(&ha->optrom_mutex);
+ 	memcpy(&ha->optrom_buffer[off], buf, count);
+ 	mutex_unlock(&ha->optrom_mutex);
+ 
+diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
+index 4fbfcdc5cb24..ebaefecd6e82 100644
+--- a/drivers/spi/spi-dw.c
++++ b/drivers/spi/spi-dw.c
+@@ -113,7 +113,10 @@ static const struct file_operations dw_spi_regs_ops = {
+ 
+ static int dw_spi_debugfs_init(struct dw_spi *dws)
+ {
+-	dws->debugfs = debugfs_create_dir("dw_spi", NULL);
++	char name[128];
++
++	snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
++	dws->debugfs = debugfs_create_dir(name, NULL);
+ 	if (!dws->debugfs)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
+index 94938436aef9..2f9f2958c203 100644
+--- a/drivers/spmi/spmi.c
++++ b/drivers/spmi/spmi.c
+@@ -348,11 +348,23 @@ static int spmi_drv_remove(struct device *dev)
+ 	return 0;
+ }
+ 
++static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++	int ret;
++
++	ret = of_device_uevent_modalias(dev, env);
++	if (ret != -ENODEV)
++		return ret;
++
++	return 0;
++}
++
+ static struct bus_type spmi_bus_type = {
+ 	.name		= "spmi",
+ 	.match		= spmi_device_match,
+ 	.probe		= spmi_drv_probe,
+ 	.remove		= spmi_drv_remove,
++	.uevent		= spmi_drv_uevent,
+ };
+ 
+ /**
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index a503132f91e8..ab6139b5472f 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -2875,9 +2875,6 @@ static int __init comedi_init(void)
+ 
+ 	comedi_class->dev_groups = comedi_dev_groups;
+ 
+-	/* XXX requires /proc interface */
+-	comedi_proc_init();
+-
+ 	/* create devices files for legacy/manual use */
+ 	for (i = 0; i < comedi_num_legacy_minors; i++) {
+ 		struct comedi_device *dev;
+@@ -2895,6 +2892,9 @@ static int __init comedi_init(void)
+ 		mutex_unlock(&dev->mutex);
+ 	}
+ 
++	/* XXX requires /proc interface */
++	comedi_proc_init();
++
+ 	return 0;
+ }
+ module_init(comedi_init);
+diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
+index 7bc3e4a73834..16af77d20bdb 100644
+--- a/drivers/staging/iio/resolver/ad2s1210.c
++++ b/drivers/staging/iio/resolver/ad2s1210.c
+@@ -468,7 +468,7 @@ static int ad2s1210_read_raw(struct iio_dev *indio_dev,
+ 			     long m)
+ {
+ 	struct ad2s1210_state *st = iio_priv(indio_dev);
+-	bool negative;
++	u16 negative;
+ 	int ret = 0;
+ 	u16 pos;
+ 	s16 vel;
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index ef3c73e38172..4273e34ff3ea 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -48,6 +48,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+ 	{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ 	{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
+ 	{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
++	{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+ 	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+ 	{}	/* Terminating entry */
+ };
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 2fc3a231c2b6..7444640a7453 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -426,6 +426,7 @@ int iscsit_reset_np_thread(
+ 		return 0;
+ 	}
+ 	np->np_thread_state = ISCSI_NP_THREAD_RESET;
++	atomic_inc(&np->np_reset_count);
+ 
+ 	if (np->np_thread) {
+ 		spin_unlock_bh(&np->np_thread_lock);
+@@ -1992,6 +1993,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
+ 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
+ 	cmd->data_direction	= DMA_NONE;
++	kfree(cmd->text_in_ptr);
+ 	cmd->text_in_ptr	= NULL;
+ 
+ 	return 0;
+@@ -4589,8 +4591,11 @@ static void iscsit_logout_post_handler_closesession(
+ 	 * always sleep waiting for RX/TX thread shutdown to complete
+ 	 * within iscsit_close_connection().
+ 	 */
+-	if (conn->conn_transport->transport_type == ISCSI_TCP)
++	if (conn->conn_transport->transport_type == ISCSI_TCP) {
+ 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
++		if (!sleep)
++			return;
++	}
+ 
+ 	atomic_set(&conn->conn_logout_remove, 0);
+ 	complete(&conn->conn_logout_comp);
+@@ -4606,8 +4611,11 @@ static void iscsit_logout_post_handler_samecid(
+ {
+ 	int sleep = 1;
+ 
+-	if (conn->conn_transport->transport_type == ISCSI_TCP)
++	if (conn->conn_transport->transport_type == ISCSI_TCP) {
+ 		sleep = cmpxchg(&conn->tx_thread_active, true, false);
++		if (!sleep)
++			return;
++	}
+ 
+ 	atomic_set(&conn->conn_logout_remove, 0);
+ 	complete(&conn->conn_logout_comp);
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index c5cbd702e7cd..ee3a4bb9fba7 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1290,9 +1290,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ 	flush_signals(current);
+ 
+ 	spin_lock_bh(&np->np_thread_lock);
+-	if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
++	if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+ 		np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
++		spin_unlock_bh(&np->np_thread_lock);
+ 		complete(&np->np_restart_comp);
++		return 1;
+ 	} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
+ 		spin_unlock_bh(&np->np_thread_lock);
+ 		goto exit;
+@@ -1325,7 +1327,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ 		goto exit;
+ 	} else if (rc < 0) {
+ 		spin_lock_bh(&np->np_thread_lock);
+-		if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
++		if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
++			np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ 			spin_unlock_bh(&np->np_thread_lock);
+ 			complete(&np->np_restart_comp);
+ 			iscsit_put_transport(conn->conn_transport);
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 95c1c4ecf336..b7d27b816359 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -733,6 +733,15 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+ 	if (cmd->transport_state & CMD_T_ABORTED ||
+ 	    cmd->transport_state & CMD_T_STOP) {
+ 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++		/*
++		 * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
++		 * release se_device->caw_sem obtained by sbc_compare_and_write()
++		 * since target_complete_ok_work() or target_complete_failure_work()
++		 * won't be called to invoke the normal CAW completion callbacks.
++		 */
++		if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
++			up(&dev->caw_sem);
++		}
+ 		complete_all(&cmd->t_transport_stop_comp);
+ 		return;
+ 	} else if (!success) {
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 7983298ab32c..acab64245923 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1771,6 +1771,9 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
+ 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ 	},
++	{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
++	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
++	},
+ 
+ 	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
+ 	.driver_info = CLEAR_HALT_CONDITIONS,
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 029fa26d2ac9..78f357b1a8fd 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2397,6 +2397,8 @@ void usb_hc_died (struct usb_hcd *hcd)
+ 	}
+ 	if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
+ 		hcd = hcd->shared_hcd;
++		clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
++		set_bit(HCD_FLAG_DEAD, &hcd->flags);
+ 		if (hcd->rh_registered) {
+ 			clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index e479c7d47a9f..fbf5c57b8251 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4618,7 +4618,8 @@ hub_power_remaining (struct usb_hub *hub)
+ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+ 		u16 portchange)
+ {
+-	int status, i;
++	int status = -ENODEV;
++	int i;
+ 	unsigned unit_load;
+ 	struct usb_device *hdev = hub->hdev;
+ 	struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
+@@ -4822,9 +4823,10 @@ loop:
+ 
+ done:
+ 	hub_port_disable(hub, port1, 1);
+-	if (hcd->driver->relinquish_port && !hub->hdev->parent)
+-		hcd->driver->relinquish_port(hcd, port1);
+-
++	if (hcd->driver->relinquish_port && !hub->hdev->parent) {
++		if (status != -ENOTCONN && status != -ENODEV)
++			hcd->driver->relinquish_port(hcd, port1);
++	}
+ }
+ 
+ /* Handle physical or logical connection change events.
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 3116edfcdc18..574da2b4529c 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -150,6 +150,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* appletouch */
+ 	{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
++	{ USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* Avision AV600U */
+ 	{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
+ 	  USB_QUIRK_STRING_FETCH_255 },
+@@ -249,6 +252,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
+ 	{ USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
+ 	{ USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
+ 	{ USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
++	{ USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+ 	/* Logitech Optical Mouse M90/M100 */
+ 	{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
+diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
+index f7f35a36c09a..466640afa7be 100644
+--- a/drivers/usb/gadget/function/f_hid.c
++++ b/drivers/usb/gadget/function/f_hid.c
+@@ -544,7 +544,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+ 		}
+ 		status = usb_ep_enable(hidg->out_ep);
+ 		if (status < 0) {
+-			ERROR(cdev, "Enable IN endpoint FAILED!\n");
++			ERROR(cdev, "Enable OUT endpoint FAILED!\n");
+ 			goto fail;
+ 		}
+ 		hidg->out_ep->driver_data = hidg;
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index f9400564cb72..03b9a372636f 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -89,6 +89,7 @@ enum amd_chipset_gen {
+ 	AMD_CHIPSET_HUDSON2,
+ 	AMD_CHIPSET_BOLTON,
+ 	AMD_CHIPSET_YANGTZE,
++	AMD_CHIPSET_TAISHAN,
+ 	AMD_CHIPSET_UNKNOWN,
+ };
+ 
+@@ -132,6 +133,11 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
+ 			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
+ 		else if (rev >= 0x40 && rev <= 0x4f)
+ 			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
++	}
++	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
++					  0x145c, NULL);
++	if (pinfo->smbus_dev) {
++		pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+ 	} else {
+ 		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
+@@ -251,11 +257,12 @@ int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
+ {
+ 	/* Make sure amd chipset type has already been initialized */
+ 	usb_amd_find_chipset_info();
+-	if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
+-		return 0;
+-
+-	dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+-	return 1;
++	if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
++	    amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
++		dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
++		return 1;
++	}
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
+ 
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 2dd322e92951..25b1cf0b6848 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -651,6 +651,9 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 			clear_bit(wIndex, &bus_state->resuming_ports);
+ 
+ 			set_bit(wIndex, &bus_state->rexit_ports);
++
++			xhci_test_and_clear_bit(xhci, port_array, wIndex,
++						PORT_PLC);
+ 			xhci_set_link_state(xhci, port_array, wIndex,
+ 					XDEV_U0);
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index fbb77e2b288d..639419066ec4 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -789,13 +789,16 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
+ 			(ep->ep_state & EP_GETTING_NO_STREAMS)) {
+ 		int stream_id;
+ 
+-		for (stream_id = 0; stream_id < ep->stream_info->num_streams;
++		for (stream_id = 1; stream_id < ep->stream_info->num_streams;
+ 				stream_id++) {
++			ring = ep->stream_info->stream_rings[stream_id];
++			if (!ring)
++				continue;
++
+ 			xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ 					"Killing URBs for slot ID %u, ep index %u, stream %u",
+-					slot_id, ep_index, stream_id + 1);
+-			xhci_kill_ring_urbs(xhci,
+-					ep->stream_info->stream_rings[stream_id]);
++					slot_id, ep_index, stream_id);
++			xhci_kill_ring_urbs(xhci, ring);
+ 		}
+ 	} else {
+ 		ring = ep->ring;
+diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
+index 0f7e850fd4aa..61a898e4a010 100644
+--- a/drivers/usb/renesas_usbhs/common.c
++++ b/drivers/usb/renesas_usbhs/common.c
+@@ -731,8 +731,10 @@ static int usbhsc_resume(struct device *dev)
+ 	struct usbhs_priv *priv = dev_get_drvdata(dev);
+ 	struct platform_device *pdev = usbhs_priv_to_pdev(priv);
+ 
+-	if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
++	if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
+ 		usbhsc_power_ctrl(priv, 1);
++		usbhs_mod_autonomy_mode(priv);
++	}
+ 
+ 	usbhs_platform_call(priv, phy_reset, pdev);
+ 
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 69040e9069e0..31cd99f59a6a 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -133,6 +133,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+ 	{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+ 	{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
++	{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
+ 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 5d841485bbe3..f08b35819666 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2022,6 +2022,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index 1db4b61bdf7b..a51b28379850 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
+ 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
+ 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
++	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485) },
+ 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
+ 	{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
+ 	{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index 09d9be88209e..3b5a15d1dc0d 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -27,6 +27,7 @@
+ #define ATEN_VENDOR_ID		0x0557
+ #define ATEN_VENDOR_ID2		0x0547
+ #define ATEN_PRODUCT_ID		0x2008
++#define ATEN_PRODUCT_UC485	0x2021
+ #define ATEN_PRODUCT_ID2	0x2118
+ 
+ #define IODATA_VENDOR_ID	0x04bb
+diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
+index 076178645ba4..45b18df9fef1 100644
+--- a/drivers/usb/storage/isd200.c
++++ b/drivers/usb/storage/isd200.c
+@@ -1522,8 +1522,11 @@ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us)
+ 
+ 	/* Make sure driver was initialized */
+ 
+-	if (us->extra == NULL)
++	if (us->extra == NULL) {
+ 		usb_stor_dbg(us, "ERROR Driver not initialized\n");
++		srb->result = DID_ERROR << 16;
++		return;
++	}
+ 
+ 	scsi_set_resid(srb, 0);
+ 	/* scsi_bufflen might change in protocol translation to ata */
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index 53341a77d89f..a37ed1e59e99 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -123,9 +123,9 @@ UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
+ /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
+ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
+ 		"Initio Corporation",
+-		"",
++		"INIC-3069",
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+-		US_FL_NO_ATA_1X),
++		US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
+ 
+ /* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
+ UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index ae90cf8867b5..f85e3a17cf5a 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -902,6 +902,10 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
+ 			return ret;
+ 
+ 		vdev->barmap[index] = pci_iomap(pdev, index, 0);
++		if (!vdev->barmap[index]) {
++			pci_release_selected_regions(pdev, 1 << index);
++			return -ENOMEM;
++		}
+ 	}
+ 
+ 	vma->vm_private_data = vdev;
+diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
+index 210db24d2204..4d39f7959adf 100644
+--- a/drivers/vfio/pci/vfio_pci_rdwr.c
++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
+@@ -190,7 +190,10 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
+ 	if (!vdev->has_vga)
+ 		return -EINVAL;
+ 
+-	switch (pos) {
++	if (pos > 0xbfffful)
++		return -EINVAL;
++
++	switch ((u32)pos) {
+ 	case 0xa0000 ... 0xbffff:
+ 		count = min(count, (size_t)(0xc0000 - pos));
+ 		iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
+diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
+index e1278fe04b1e..8b2354536bbb 100644
+--- a/drivers/vfio/vfio.c
++++ b/drivers/vfio/vfio.c
+@@ -295,6 +295,34 @@ static void vfio_group_put(struct vfio_group *group)
+ 	kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
+ }
+ 
++struct vfio_group_put_work {
++	struct work_struct work;
++	struct vfio_group *group;
++};
++
++static void vfio_group_put_bg(struct work_struct *work)
++{
++	struct vfio_group_put_work *do_work;
++
++	do_work = container_of(work, struct vfio_group_put_work, work);
++
++	vfio_group_put(do_work->group);
++	kfree(do_work);
++}
++
++static void vfio_group_schedule_put(struct vfio_group *group)
++{
++	struct vfio_group_put_work *do_work;
++
++	do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
++	if (WARN_ON(!do_work))
++		return;
++
++	INIT_WORK(&do_work->work, vfio_group_put_bg);
++	do_work->group = group;
++	schedule_work(&do_work->work);
++}
++
+ /* Assume group_lock or group reference is held */
+ static void vfio_group_get(struct vfio_group *group)
+ {
+@@ -601,7 +629,14 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
+ 		break;
+ 	}
+ 
+-	vfio_group_put(group);
++	/*
++	 * If we're the last reference to the group, the group will be
++	 * released, which includes unregistering the iommu group notifier.
++	 * We hold a read-lock on that notifier list, unregistering needs
++	 * a write-lock... deadlock.  Release our reference asynchronously
++	 * to avoid that situation.
++	 */
++	vfio_group_schedule_put(group);
+ 	return NOTIFY_OK;
+ }
+ 
+@@ -1504,6 +1539,15 @@ void vfio_group_put_external_user(struct vfio_group *group)
+ }
+ EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
+ 
++bool vfio_external_group_match_file(struct vfio_group *test_group,
++				    struct file *filep)
++{
++	struct vfio_group *group = filep->private_data;
++
++	return (filep->f_op == &vfio_group_fops) && (group == test_group);
++}
++EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
++
+ int vfio_external_user_iommu_id(struct vfio_group *group)
+ {
+ 	return iommu_group_id(group->iommu_group);
+diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
+index 07675d6f323e..d4530b54479c 100644
+--- a/drivers/video/fbdev/cobalt_lcdfb.c
++++ b/drivers/video/fbdev/cobalt_lcdfb.c
+@@ -350,6 +350,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
+ 	info->screen_size = resource_size(res);
+ 	info->screen_base = devm_ioremap(&dev->dev, res->start,
+ 					 info->screen_size);
++	if (!info->screen_base) {
++		framebuffer_release(info);
++		return -ENOMEM;
++	}
++
+ 	info->fbops = &cobalt_lcd_fbops;
+ 	info->fix = cobalt_lcdfb_fix;
+ 	info->fix.smem_start = res->start;
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index ece4982ee593..f57cf1c42ca3 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -391,6 +391,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ 				lastoff = page_offset(page);
+ 				bh = head = page_buffers(page);
+ 				do {
++					if (lastoff + bh->b_size <= startoff)
++						goto next;
+ 					if (buffer_uptodate(bh) ||
+ 					    buffer_unwritten(bh)) {
+ 						if (whence == SEEK_DATA)
+@@ -405,6 +407,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
+ 						unlock_page(page);
+ 						goto out;
+ 					}
++next:
+ 					lastoff += bh->b_size;
+ 					bh = bh->b_this_page;
+ 				} while (bh != head);
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 0e783b9f7007..b63e308e2545 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1932,7 +1932,8 @@ retry:
+ 			n_desc_blocks = o_desc_blocks +
+ 				le16_to_cpu(es->s_reserved_gdt_blocks);
+ 			n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
+-			n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
++			n_blocks_count = (ext4_fsblk_t)n_group *
++				EXT4_BLOCKS_PER_GROUP(sb);
+ 			n_group--; /* set to last group number */
+ 		}
+ 
+diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
+index c5e4a1856a0f..4147d83e6fdd 100644
+--- a/fs/f2fs/acl.c
++++ b/fs/f2fs/acl.c
+@@ -213,7 +213,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
+ 	switch (type) {
+ 	case ACL_TYPE_ACCESS:
+ 		name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
+-		if (acl) {
++		if (acl && !ipage) {
+ 			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
+ 			if (error)
+ 				return error;
+diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
+index 660183e9ab7c..f2ada269feb7 100644
+--- a/fs/f2fs/super.c
++++ b/fs/f2fs/super.c
+@@ -979,6 +979,8 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ 	unsigned int total, fsmeta;
+ 	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
+ 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
++	unsigned int main_segs, blocks_per_seg;
++	int i;
+ 
+ 	total = le32_to_cpu(raw_super->segment_count);
+ 	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
+@@ -990,6 +992,20 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+ 	if (unlikely(fsmeta >= total))
+ 		return 1;
+ 
++	main_segs = le32_to_cpu(raw_super->segment_count_main);
++	blocks_per_seg = sbi->blocks_per_seg;
++
++	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
++		if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
++			le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
++			return 1;
++	}
++	for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
++		if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
++			le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
++			return 1;
++	}
++
+ 	if (unlikely(f2fs_cp_error(sbi))) {
+ 		f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
+ 		return 1;
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 1f03f0a36e35..cacf95ac49fe 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -46,7 +46,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
+ {
+ 	struct fuse_file *ff;
+ 
+-	ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
++	ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
+ 	if (unlikely(!ff))
+ 		return NULL;
+ 
+diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
+index f31fd0dd92c6..b1daeafbea92 100644
+--- a/fs/nfs/Kconfig
++++ b/fs/nfs/Kconfig
+@@ -121,6 +121,7 @@ config PNFS_FILE_LAYOUT
+ config PNFS_BLOCK
+ 	tristate
+ 	depends on NFS_V4_1 && BLK_DEV_DM
++	depends on 64BIT || LBDAF
+ 	default NFS_V4
+ 
+ config PNFS_OBJLAYOUT
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index b6d97dfa9cb6..4227adce3e52 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1154,11 +1154,13 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+ 	/* Force a full look up iff the parent directory has changed */
+ 	if (!nfs_is_exclusive_create(dir, flags) &&
+ 	    nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
+-
+-		if (nfs_lookup_verify_inode(inode, flags)) {
++		error = nfs_lookup_verify_inode(inode, flags);
++		if (error) {
+ 			if (flags & LOOKUP_RCU)
+ 				return -ECHILD;
+-			goto out_zap_parent;
++			if (error == -ESTALE)
++				goto out_zap_parent;
++			goto out_error;
+ 		}
+ 		goto out_valid;
+ 	}
+@@ -1182,8 +1184,10 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+ 	trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
+ 	error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+ 	trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
+-	if (error)
++	if (error == -ESTALE || error == -ENOENT)
+ 		goto out_bad;
++	if (error)
++		goto out_error;
+ 	if (nfs_compare_fh(NFS_FH(inode), fhandle))
+ 		goto out_bad;
+ 	if ((error = nfs_refresh_inode(inode, fattr)) != 0)
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index b28fa4cbea52..a84dd247b13a 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -30,6 +30,7 @@ void nfs4_ff_layout_free_deviceid(struct nfs4_ff_layout_ds *mirror_ds)
+ {
+ 	nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
+ 	nfs4_pnfs_ds_put(mirror_ds->ds);
++	kfree(mirror_ds->ds_versions);
+ 	kfree_rcu(mirror_ds, id_node.rcu);
+ }
+ 
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 723b8922d76b..8ddff9a72b34 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1227,9 +1227,9 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
+ 		return 0;
+ 	/* Has the inode gone and changed behind our back? */
+ 	if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
+-		return -EIO;
++		return -ESTALE;
+ 	if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
+-		return -EIO;
++		return -ESTALE;
+ 
+ 	if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
+ 			inode->i_version != fattr->change_attr)
+diff --git a/fs/seq_file.c b/fs/seq_file.c
+index 4408057d1dc8..4dcf9d28f022 100644
+--- a/fs/seq_file.c
++++ b/fs/seq_file.c
+@@ -62,9 +62,10 @@ int seq_open(struct file *file, const struct seq_operations *op)
+ 	memset(p, 0, sizeof(*p));
+ 	mutex_init(&p->lock);
+ 	p->op = op;
+-#ifdef CONFIG_USER_NS
+-	p->user_ns = file->f_cred->user_ns;
+-#endif
++
++	// No refcounting: the lifetime of 'p' is constrained
++	// to the lifetime of the file.
++	p->file = file;
+ 
+ 	/*
+ 	 * Wrappers around seq_open(e.g. swaps_open) need to be
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 78a40ef0c463..9635cd478cc9 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -1235,8 +1235,8 @@ int udf_setsize(struct inode *inode, loff_t newsize)
+ 			return err;
+ 		}
+ set_size:
+-		truncate_setsize(inode, newsize);
+ 		up_write(&iinfo->i_data_sem);
++		truncate_setsize(inode, newsize);
+ 	} else {
+ 		if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ 			down_write(&iinfo->i_data_sem);
+@@ -1253,9 +1253,9 @@ set_size:
+ 					  udf_get_block);
+ 		if (err)
+ 			return err;
++		truncate_setsize(inode, newsize);
+ 		down_write(&iinfo->i_data_sem);
+ 		udf_clear_extent_cache(inode);
+-		truncate_setsize(inode, newsize);
+ 		udf_truncate_extents(inode);
+ 		up_write(&iinfo->i_data_sem);
+ 	}
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+index 685809835b5c..d164045e296c 100644
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -751,6 +751,10 @@ int genphy_read_status(struct phy_device *phydev);
+ int genphy_suspend(struct phy_device *phydev);
+ int genphy_resume(struct phy_device *phydev);
+ int genphy_soft_reset(struct phy_device *phydev);
++static inline int genphy_no_soft_reset(struct phy_device *phydev)
++{
++	return 0;
++}
+ void phy_driver_unregister(struct phy_driver *drv);
+ void phy_drivers_unregister(struct phy_driver *drv, int n);
+ int phy_driver_register(struct phy_driver *new_driver);
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index af99802ce7fe..b6c033430b15 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -774,6 +774,16 @@ struct signal_struct {
+ 
+ #define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */
+ 
++#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
++			  SIGNAL_STOP_CONTINUED)
++
++static inline void signal_set_stop_flags(struct signal_struct *sig,
++					 unsigned int flags)
++{
++	WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
++	sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
++}
++
+ /* If true, all threads except ->group_exit_task have pending SIGKILL */
+ static inline int signal_group_exit(const struct signal_struct *sig)
+ {
+diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
+index 7848473a5bc8..f36c3a27f7f6 100644
+--- a/include/linux/seq_file.h
++++ b/include/linux/seq_file.h
+@@ -7,13 +7,10 @@
+ #include <linux/mutex.h>
+ #include <linux/cpumask.h>
+ #include <linux/nodemask.h>
++#include <linux/fs.h>
++#include <linux/cred.h>
+ 
+ struct seq_operations;
+-struct file;
+-struct path;
+-struct inode;
+-struct dentry;
+-struct user_namespace;
+ 
+ struct seq_file {
+ 	char *buf;
+@@ -27,9 +24,7 @@ struct seq_file {
+ 	struct mutex lock;
+ 	const struct seq_operations *op;
+ 	int poll_event;
+-#ifdef CONFIG_USER_NS
+-	struct user_namespace *user_ns;
+-#endif
++	const struct file *file;
+ 	void *private;
+ };
+ 
+@@ -141,7 +136,7 @@ int seq_put_decimal_ll(struct seq_file *m, char delimiter,
+ static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
+ {
+ #ifdef CONFIG_USER_NS
+-	return seq->user_ns;
++	return seq->file->f_cred->user_ns;
+ #else
+ 	extern struct user_namespace init_user_ns;
+ 	return &init_user_ns;
+diff --git a/include/linux/slab.h b/include/linux/slab.h
+index ffd24c830151..ef441d93cea0 100644
+--- a/include/linux/slab.h
++++ b/include/linux/slab.h
+@@ -185,7 +185,7 @@ size_t ksize(const void *);
+  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
+  */
+ #define KMALLOC_SHIFT_HIGH	(PAGE_SHIFT + 1)
+-#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT)
++#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
+ #ifndef KMALLOC_SHIFT_LOW
+ #define KMALLOC_SHIFT_LOW	3
+ #endif
+@@ -198,7 +198,7 @@ size_t ksize(const void *);
+  * be allocated from the same page.
+  */
+ #define KMALLOC_SHIFT_HIGH	PAGE_SHIFT
+-#define KMALLOC_SHIFT_MAX	30
++#define KMALLOC_SHIFT_MAX	(MAX_ORDER + PAGE_SHIFT - 1)
+ #ifndef KMALLOC_SHIFT_LOW
+ #define KMALLOC_SHIFT_LOW	3
+ #endif
+diff --git a/include/linux/vfio.h b/include/linux/vfio.h
+index ddb440975382..34851bf2e2c8 100644
+--- a/include/linux/vfio.h
++++ b/include/linux/vfio.h
+@@ -85,6 +85,8 @@ extern void vfio_unregister_iommu_driver(
+  */
+ extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
+ extern void vfio_group_put_external_user(struct vfio_group *group);
++extern bool vfio_external_group_match_file(struct vfio_group *group,
++					   struct file *filep);
+ extern int vfio_external_user_iommu_id(struct vfio_group *group);
+ extern long vfio_external_check_extension(struct vfio_group *group,
+ 					  unsigned long arg);
+diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
+index e0f4109e64c6..c2aa73e5e6bb 100644
+--- a/include/net/iw_handler.h
++++ b/include/net/iw_handler.h
+@@ -556,7 +556,8 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
+ 		memcpy(stream + lcp_len,
+ 		       ((char *) &iwe->u) + IW_EV_POINT_OFF,
+ 		       IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
+-		memcpy(stream + point_len, extra, iwe->u.data.length);
++		if (iwe->u.data.length && extra)
++			memcpy(stream + point_len, extra, iwe->u.data.length);
+ 		stream += event_len;
+ 	}
+ 	return stream;
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index ce13cf20f625..d33b17ba51d2 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -444,6 +444,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
+ 
+ #define _sctp_walk_params(pos, chunk, end, member)\
+ for (pos.v = chunk->member;\
++     (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
++      (void *)chunk + end) &&\
+      pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
+      ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
+      pos.v += WORD_ROUND(ntohs(pos.p->length)))
+@@ -454,6 +456,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
+ #define _sctp_walk_errors(err, chunk_hdr, end)\
+ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
+ 	    sizeof(sctp_chunkhdr_t));\
++     ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
++      (void *)chunk_hdr + end) &&\
+      (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
+      ntohs(err->length) >= sizeof(sctp_errhdr_t); \
+      err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
+index e37059c901e2..e12eec076cd5 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -785,6 +785,7 @@ struct iscsi_np {
+ 	int			np_sock_type;
+ 	enum np_thread_state_table np_thread_state;
+ 	bool                    enabled;
++	atomic_t		np_reset_count;
+ 	enum iscsi_timer_flags_table np_login_timer_flags;
+ 	u32			np_exports;
+ 	enum np_flags_table	np_flags;
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 10e9eec3e228..e871080bc44e 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6111,21 +6111,6 @@ static void perf_log_itrace_start(struct perf_event *event)
+ 	perf_output_end(&handle);
+ }
+ 
+-static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs)
+-{
+-	/*
+-	 * Due to interrupt latency (AKA "skid"), we may enter the
+-	 * kernel before taking an overflow, even if the PMU is only
+-	 * counting user events.
+-	 * To avoid leaking information to userspace, we must always
+-	 * reject kernel samples when exclude_kernel is set.
+-	 */
+-	if (event->attr.exclude_kernel && !user_mode(regs))
+-		return false;
+-
+-	return true;
+-}
+-
+ /*
+  * Generic event overflow handling, sampling.
+  */
+@@ -6172,12 +6157,6 @@ static int __perf_event_overflow(struct perf_event *event,
+ 			perf_adjust_period(event, delta, hwc->last_period, true);
+ 	}
+ 
+-	/*
+-	 * For security, drop the skid kernel samples if necessary.
+-	 */
+-	if (!sample_is_allowed(event, regs))
+-		return ret;
+-
+ 	/*
+ 	 * XXX event_limit might not quite work as expected on inherited
+ 	 * events
+diff --git a/kernel/resource.c b/kernel/resource.c
+index a7c27cb71fc5..cbf725c24c3b 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -105,16 +105,25 @@ static int r_show(struct seq_file *m, void *v)
+ {
+ 	struct resource *root = m->private;
+ 	struct resource *r = v, *p;
++	unsigned long long start, end;
+ 	int width = root->end < 0x10000 ? 4 : 8;
+ 	int depth;
+ 
+ 	for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
+ 		if (p->parent == root)
+ 			break;
++
++	if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
++		start = r->start;
++		end = r->end;
++	} else {
++		start = end = 0;
++	}
++
+ 	seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
+ 			depth * 2, "",
+-			width, (unsigned long long) r->start,
+-			width, (unsigned long long) r->end,
++			width, start,
++			width, end,
+ 			r->name ? r->name : "<BAD>");
+ 	return 0;
+ }
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 0206be728dac..525a4cda5598 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
+ 	 * fresh group stop.  Read comment in do_signal_stop() for details.
+ 	 */
+ 	if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
+-		sig->flags = SIGNAL_STOP_STOPPED;
++		signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
+ 		return true;
+ 	}
+ 	return false;
+@@ -888,7 +888,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
+ 			 * will take ->siglock, notice SIGNAL_CLD_MASK, and
+ 			 * notify its parent. See get_signal_to_deliver().
+ 			 */
+-			signal->flags = why | SIGNAL_STOP_CONTINUED;
++			signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
+ 			signal->group_stop_count = 0;
+ 			signal->group_exit_code = 0;
+ 		}
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index d0efe9295a0e..9cdf3bfc9178 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3854,6 +3854,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
+ 	struct workqueue_struct *wq;
+ 	struct pool_workqueue *pwq;
+ 
++	/*
++	 * Unbound && max_active == 1 used to imply ordered, which is no
++	 * longer the case on NUMA machines due to per-node pools.  While
++	 * alloc_ordered_workqueue() is the right way to create an ordered
++	 * workqueue, keep the previous behavior to avoid subtle breakages
++	 * on NUMA.
++	 */
++	if ((flags & WQ_UNBOUND) && max_active == 1)
++		flags |= __WQ_ORDERED;
++
+ 	/* see the comment above the definition of WQ_POWER_EFFICIENT */
+ 	if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
+ 		flags |= WQ_UNBOUND;
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index ba2b0c87e65b..c986a6198b0e 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -145,7 +145,7 @@ config DEBUG_INFO_REDUCED
+ 
+ config DEBUG_INFO_SPLIT
+ 	bool "Produce split debuginfo in .dwo files"
+-	depends on DEBUG_INFO
++	depends on DEBUG_INFO && !FRV
+ 	help
+ 	  Generate debug info into separate .dwo files. This significantly
+ 	  reduces the build directory size for builds with DEBUG_INFO,
+diff --git a/mm/mempool.c b/mm/mempool.c
+index 2cc08de8b1db..70cccdcff860 100644
+--- a/mm/mempool.c
++++ b/mm/mempool.c
+@@ -135,8 +135,8 @@ static void *remove_element(mempool_t *pool)
+ 	void *element = pool->elements[--pool->curr_nr];
+ 
+ 	BUG_ON(pool->curr_nr < 0);
+-	check_element(pool, element);
+ 	kasan_unpoison_element(pool, element);
++	check_element(pool, element);
+ 	return element;
+ }
+ 
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 4f1ff71074c7..35bda77211ea 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1106,14 +1106,14 @@ int move_freepages(struct zone *zone,
+ #endif
+ 
+ 	for (page = start_page; page <= end_page;) {
+-		/* Make sure we are not inadvertently changing nodes */
+-		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+-
+ 		if (!pfn_valid_within(page_to_pfn(page))) {
+ 			page++;
+ 			continue;
+ 		}
+ 
++		/* Make sure we are not inadvertently changing nodes */
++		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
++
+ 		if (!PageBuddy(page)) {
+ 			page++;
+ 			continue;
+@@ -5555,8 +5555,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
+ 	}
+ 
+ 	if (pages && s)
+-		pr_info("Freeing %s memory: %ldK (%p - %p)\n",
+-			s, pages << (PAGE_SHIFT - 10), start, end);
++		pr_info("Freeing %s memory: %ldK\n",
++			s, pages << (PAGE_SHIFT - 10));
+ 
+ 	return pages;
+ }
+@@ -6513,7 +6513,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
+ 
+ 	/* Make sure the range is really isolated. */
+ 	if (test_pages_isolated(outer_start, end, false)) {
+-		pr_info("%s: [%lx, %lx) PFNs busy\n",
++		pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
+ 			__func__, outer_start, end);
+ 		ret = -EBUSY;
+ 		goto done;
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index d45e590e8f10..dfecba30d83a 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -292,6 +292,10 @@ static void vlan_sync_address(struct net_device *dev,
+ 	if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
+ 		return;
+ 
++	/* vlan continues to inherit address of lower device */
++	if (vlan_dev_inherit_address(vlandev, dev))
++		goto out;
++
+ 	/* vlan address was different from the old address and is equal to
+ 	 * the new address */
+ 	if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
+@@ -304,6 +308,7 @@ static void vlan_sync_address(struct net_device *dev,
+ 	    !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
+ 		dev_uc_add(dev, vlandev->dev_addr);
+ 
++out:
+ 	ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
+ }
+ 
+diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
+index 9d010a09ab98..cc1557978066 100644
+--- a/net/8021q/vlan.h
++++ b/net/8021q/vlan.h
+@@ -109,6 +109,8 @@ int vlan_check_real_dev(struct net_device *real_dev,
+ void vlan_setup(struct net_device *dev);
+ int register_vlan_dev(struct net_device *dev);
+ void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
++bool vlan_dev_inherit_address(struct net_device *dev,
++			      struct net_device *real_dev);
+ 
+ static inline u32 vlan_get_ingress_priority(struct net_device *dev,
+ 					    u16 vlan_tci)
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index 01d7ba840df8..93010f34c200 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -244,6 +244,17 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
+ 	strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
+ }
+ 
++bool vlan_dev_inherit_address(struct net_device *dev,
++			      struct net_device *real_dev)
++{
++	if (dev->addr_assign_type != NET_ADDR_STOLEN)
++		return false;
++
++	ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
++	call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
++	return true;
++}
++
+ static int vlan_dev_open(struct net_device *dev)
+ {
+ 	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+@@ -254,7 +265,8 @@ static int vlan_dev_open(struct net_device *dev)
+ 	    !(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
+ 		return -ENETDOWN;
+ 
+-	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
++	if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
++	    !vlan_dev_inherit_address(dev, real_dev)) {
+ 		err = dev_uc_add(real_dev, dev->dev_addr);
+ 		if (err < 0)
+ 			goto out;
+@@ -558,8 +570,10 @@ static int vlan_dev_init(struct net_device *dev)
+ 	/* ipv6 shared card related stuff */
+ 	dev->dev_id = real_dev->dev_id;
+ 
+-	if (is_zero_ether_addr(dev->dev_addr))
+-		eth_hw_addr_inherit(dev, real_dev);
++	if (is_zero_ether_addr(dev->dev_addr)) {
++		ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
++		dev->addr_assign_type = NET_ADDR_STOLEN;
++	}
+ 	if (is_zero_ether_addr(dev->broadcast))
+ 		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
+ 
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 69ad5091e2ce..e4b56fcb5d4e 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -23,6 +23,7 @@
+ #include <linux/debugfs.h>
+ #include <linux/crypto.h>
+ #include <linux/scatterlist.h>
++#include <crypto/algapi.h>
+ #include <crypto/b128ops.h>
+ 
+ #include <net/bluetooth/bluetooth.h>
+@@ -506,7 +507,7 @@ bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16],
+ 	if (err)
+ 		return false;
+ 
+-	return !memcmp(bdaddr->b, hash, 3);
++	return !crypto_memneq(bdaddr->b, hash, 3);
+ }
+ 
+ int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
+@@ -559,7 +560,7 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
+ 			/* This is unlikely, but we need to check that
+ 			 * we didn't accidentially generate a debug key.
+ 			 */
+-			if (memcmp(smp->local_sk, debug_sk, 32))
++			if (crypto_memneq(smp->local_sk, debug_sk, 32))
+ 				break;
+ 		}
+ 		smp->debug_key = false;
+@@ -973,7 +974,7 @@ static u8 smp_random(struct smp_chan *smp)
+ 	if (ret)
+ 		return SMP_UNSPECIFIED;
+ 
+-	if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
++	if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) {
+ 		BT_ERR("Pairing failed (confirmation values mismatch)");
+ 		return SMP_CONFIRM_FAILED;
+ 	}
+@@ -1490,7 +1491,7 @@ static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op)
+ 			   smp->rrnd, r, cfm))
+ 			return SMP_UNSPECIFIED;
+ 
+-		if (memcmp(smp->pcnf, cfm, 16))
++		if (crypto_memneq(smp->pcnf, cfm, 16))
+ 			return SMP_CONFIRM_FAILED;
+ 
+ 		smp->passkey_round++;
+@@ -1874,7 +1875,7 @@ static u8 sc_send_public_key(struct smp_chan *smp)
+ 			/* This is unlikely, but we need to check that
+ 			 * we didn't accidentially generate a debug key.
+ 			 */
+-			if (memcmp(smp->local_sk, debug_sk, 32))
++			if (crypto_memneq(smp->local_sk, debug_sk, 32))
+ 				break;
+ 		}
+ 	}
+@@ -2139,7 +2140,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
+ 		if (err)
+ 			return SMP_UNSPECIFIED;
+ 
+-		if (memcmp(smp->pcnf, cfm, 16))
++		if (crypto_memneq(smp->pcnf, cfm, 16))
+ 			return SMP_CONFIRM_FAILED;
+ 	} else {
+ 		smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
+@@ -2594,7 +2595,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ 		if (err)
+ 			return SMP_UNSPECIFIED;
+ 
+-		if (memcmp(cfm.confirm_val, smp->pcnf, 16))
++		if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16))
+ 			return SMP_CONFIRM_FAILED;
+ 	}
+ 
+@@ -2627,7 +2628,7 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	else
+ 		hcon->pending_sec_level = BT_SECURITY_FIPS;
+ 
+-	if (!memcmp(debug_pk, smp->remote_pk, 64))
++	if (!crypto_memneq(debug_pk, smp->remote_pk, 64))
+ 		set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
+ 
+ 	if (smp->method == DSP_PASSKEY) {
+@@ -2726,7 +2727,7 @@ static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb)
+ 	if (err)
+ 		return SMP_UNSPECIFIED;
+ 
+-	if (memcmp(check->e, e, 16))
++	if (crypto_memneq(check->e, e, 16))
+ 		return SMP_DHKEY_CHECK_FAILED;
+ 
+ 	if (!hcon->out) {
+@@ -3336,7 +3337,7 @@ static int __init test_ah(struct crypto_blkcipher *tfm_aes)
+ 	if (err)
+ 		return err;
+ 
+-	if (memcmp(res, exp, 3))
++	if (crypto_memneq(res, exp, 3))
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -3366,7 +3367,7 @@ static int __init test_c1(struct crypto_blkcipher *tfm_aes)
+ 	if (err)
+ 		return err;
+ 
+-	if (memcmp(res, exp, 16))
++	if (crypto_memneq(res, exp, 16))
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -3391,7 +3392,7 @@ static int __init test_s1(struct crypto_blkcipher *tfm_aes)
+ 	if (err)
+ 		return err;
+ 
+-	if (memcmp(res, exp, 16))
++	if (crypto_memneq(res, exp, 16))
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -3423,7 +3424,7 @@ static int __init test_f4(struct crypto_hash *tfm_cmac)
+ 	if (err)
+ 		return err;
+ 
+-	if (memcmp(res, exp, 16))
++	if (crypto_memneq(res, exp, 16))
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -3457,10 +3458,10 @@ static int __init test_f5(struct crypto_hash *tfm_cmac)
+ 	if (err)
+ 		return err;
+ 
+-	if (memcmp(mackey, exp_mackey, 16))
++	if (crypto_memneq(mackey, exp_mackey, 16))
+ 		return -EINVAL;
+ 
+-	if (memcmp(ltk, exp_ltk, 16))
++	if (crypto_memneq(ltk, exp_ltk, 16))
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -3493,7 +3494,7 @@ static int __init test_f6(struct crypto_hash *tfm_cmac)
+ 	if (err)
+ 		return err;
+ 
+-	if (memcmp(res, exp, 16))
++	if (crypto_memneq(res, exp, 16))
+ 		return -EINVAL;
+ 
+ 	return 0;
+@@ -3547,7 +3548,7 @@ static int __init test_h6(struct crypto_hash *tfm_cmac)
+ 	if (err)
+ 		return err;
+ 
+-	if (memcmp(res, exp, 16))
++	if (crypto_memneq(res, exp, 16))
+ 		return -EINVAL;
+ 
+ 	return 0;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index bd47736b689e..bb711e5e345b 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2465,9 +2465,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
+ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
+ {
+ 	if (tx_path)
+-		return skb->ip_summed != CHECKSUM_PARTIAL;
+-	else
+-		return skb->ip_summed == CHECKSUM_NONE;
++		return skb->ip_summed != CHECKSUM_PARTIAL &&
++		       skb->ip_summed != CHECKSUM_UNNECESSARY;
++
++	return skb->ip_summed == CHECKSUM_NONE;
+ }
+ 
+ /**
+@@ -2486,11 +2487,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
+ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ 				  netdev_features_t features, bool tx_path)
+ {
++	struct sk_buff *segs;
++
+ 	if (unlikely(skb_needs_check(skb, tx_path))) {
+ 		int err;
+ 
+-		skb_warn_bad_offload(skb);
+-
++		/* We're going to init ->check field in TCP or UDP header */
+ 		err = skb_cow_head(skb, 0);
+ 		if (err < 0)
+ 			return ERR_PTR(err);
+@@ -2505,7 +2507,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ 	skb_reset_mac_header(skb);
+ 	skb_reset_mac_len(skb);
+ 
+-	return skb_mac_gso_segment(skb, features);
++	segs = skb_mac_gso_segment(skb, features);
++
++	if (unlikely(skb_needs_check(skb, tx_path)))
++		skb_warn_bad_offload(skb);
++
++	return segs;
+ }
+ EXPORT_SYMBOL(__skb_gso_segment);
+ 
+diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
+index b94b1d293506..151e047ce072 100644
+--- a/net/core/dev_ioctl.c
++++ b/net/core/dev_ioctl.c
+@@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
+ 
+ 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+ 		return -EFAULT;
++	ifr.ifr_name[IFNAMSIZ-1] = 0;
+ 
+ 	error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
+ 	if (error)
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 7a0d98628137..3936683486e9 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1628,7 +1628,8 @@ static int do_setlink(const struct sk_buff *skb,
+ 		struct sockaddr *sa;
+ 		int len;
+ 
+-		len = sizeof(sa_family_t) + dev->addr_len;
++		len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
++						  sizeof(*sa));
+ 		sa = kmalloc(len, GFP_KERNEL);
+ 		if (!sa) {
+ 			err = -ENOMEM;
+diff --git a/net/dccp/feat.c b/net/dccp/feat.c
+index 1704948e6a12..f227f002c73d 100644
+--- a/net/dccp/feat.c
++++ b/net/dccp/feat.c
+@@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
+ 	 * singleton values (which always leads to failure).
+ 	 * These settings can still (later) be overridden via sockopts.
+ 	 */
+-	if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
+-	    ccid_get_builtin_ccids(&rx.val, &rx.len))
++	if (ccid_get_builtin_ccids(&tx.val, &tx.len))
+ 		return -ENOBUFS;
++	if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
++		kfree(tx.val);
++		return -ENOBUFS;
++	}
+ 
+ 	if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
+ 	    !dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
+diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
+index ccf4c5629b3c..fd7ac7895c38 100644
+--- a/net/dccp/ipv4.c
++++ b/net/dccp/ipv4.c
+@@ -661,6 +661,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
+ 		goto drop_and_free;
+ 
+ 	inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
++	reqsk_put(req);
+ 	return 0;
+ 
+ drop_and_free:
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 513b6aabc5b7..765909ba781e 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -1247,13 +1247,14 @@ static struct pernet_operations fib_net_ops = {
+ 
+ void __init ip_fib_init(void)
+ {
+-	rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
+-	rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
+-	rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
++	fib_trie_init();
+ 
+ 	register_pernet_subsys(&fib_net_ops);
++
+ 	register_netdevice_notifier(&fib_netdev_notifier);
+ 	register_inetaddr_notifier(&fib_inetaddr_notifier);
+ 
+-	fib_trie_init();
++	rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
++	rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
++	rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
+ }
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index 51573f8a39bc..adbb28b39413 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -891,7 +891,7 @@ static int __ip_append_data(struct sock *sk,
+ 		csummode = CHECKSUM_PARTIAL;
+ 
+ 	cork->length += length;
+-	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
++	if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
+ 	    (sk->sk_protocol == IPPROTO_UDP) &&
+ 	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+ 	    (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 4a3a17ff046d..767ee7471c9b 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2536,8 +2536,8 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 
+ 	/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
+-	if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
+-	    (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
++	if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
++	    (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
+ 		tp->snd_cwnd = tp->snd_ssthresh;
+ 		tp->snd_cwnd_stamp = tcp_time_stamp;
+ 	}
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index dfcab88c3e74..8f27dce93f71 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -231,7 +231,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+ 	if (uh->check == 0)
+ 		uh->check = CSUM_MANGLED_0;
+ 
+-	skb->ip_summed = CHECKSUM_NONE;
++	skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
+ 	/* Fragment the skb. IP headers of the fragments are updated in
+ 	 * inet_gso_segment()
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index eefb8759cfa4..29a1ffa72cd0 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1340,7 +1340,7 @@ emsgsize:
+ 	 */
+ 
+ 	cork->length += length;
+-	if (((length > mtu) ||
++	if ((((length + fragheaderlen) > mtu) ||
+ 	     (skb && skb_is_gso(skb))) &&
+ 	    (sk->sk_protocol == IPPROTO_UDP) &&
+ 	    (rt->dst.dev->features & NETIF_F_UFO) &&
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 3f6ee4138cab..292ef2e584db 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -76,7 +76,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
+ 
+ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ {
+-	u16 offset = sizeof(struct ipv6hdr);
++	unsigned int offset = sizeof(struct ipv6hdr);
+ 	unsigned int packet_len = skb_tail_pointer(skb) -
+ 		skb_network_header(skb);
+ 	int found_rhdr = 0;
+@@ -84,6 +84,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ 
+ 	while (offset <= packet_len) {
+ 		struct ipv6_opt_hdr *exthdr;
++		unsigned int len;
+ 
+ 		switch (**nexthdr) {
+ 
+@@ -109,7 +110,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ 
+ 		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
+ 						 offset);
+-		offset += ipv6_optlen(exthdr);
++		len = ipv6_optlen(exthdr);
++		if (len + offset >= IPV6_MAXPLEN)
++			return -EINVAL;
++		offset += len;
+ 		*nexthdr = &exthdr->nexthdr;
+ 	}
+ 
+diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
+index 01582966ffa0..2e3c12eeca07 100644
+--- a/net/ipv6/udp_offload.c
++++ b/net/ipv6/udp_offload.c
+@@ -86,7 +86,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+ 		if (uh->check == 0)
+ 			uh->check = CSUM_MANGLED_0;
+ 
+-		skb->ip_summed = CHECKSUM_NONE;
++		skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
+ 		/* Check if there is enough headroom to insert fragment header. */
+ 		tnl_hlen = skb_tnl_header_len(skb);
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 9a556e434f59..39c78c9e1c68 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -63,8 +63,13 @@ struct pfkey_sock {
+ 		} u;
+ 		struct sk_buff	*skb;
+ 	} dump;
++	struct mutex dump_lock;
+ };
+ 
++static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
++			       xfrm_address_t *saddr, xfrm_address_t *daddr,
++			       u16 *family);
++
+ static inline struct pfkey_sock *pfkey_sk(struct sock *sk)
+ {
+ 	return (struct pfkey_sock *)sk;
+@@ -139,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
+ {
+ 	struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
+ 	struct sock *sk;
++	struct pfkey_sock *pfk;
+ 	int err;
+ 
+ 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+@@ -153,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
+ 	if (sk == NULL)
+ 		goto out;
+ 
++	pfk = pfkey_sk(sk);
++	mutex_init(&pfk->dump_lock);
++
+ 	sock->ops = &pfkey_ops;
+ 	sock_init_data(sock, sk);
+ 
+@@ -281,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
+ 	struct sadb_msg *hdr;
+ 	int rc;
+ 
++	mutex_lock(&pfk->dump_lock);
++	if (!pfk->dump.dump) {
++		rc = 0;
++		goto out;
++	}
++
+ 	rc = pfk->dump.dump(pfk);
+-	if (rc == -ENOBUFS)
+-		return 0;
++	if (rc == -ENOBUFS) {
++		rc = 0;
++		goto out;
++	}
+ 
+ 	if (pfk->dump.skb) {
+-		if (!pfkey_can_dump(&pfk->sk))
+-			return 0;
++		if (!pfkey_can_dump(&pfk->sk)) {
++			rc = 0;
++			goto out;
++		}
+ 
+ 		hdr = (struct sadb_msg *) pfk->dump.skb->data;
+ 		hdr->sadb_msg_seq = 0;
+@@ -298,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
+ 	}
+ 
+ 	pfkey_terminate_dump(pfk);
++
++out:
++	mutex_unlock(&pfk->dump_lock);
+ 	return rc;
+ }
+ 
+@@ -1801,19 +1823,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
+ 	struct xfrm_address_filter *filter = NULL;
+ 	struct pfkey_sock *pfk = pfkey_sk(sk);
+ 
+-	if (pfk->dump.dump != NULL)
++	mutex_lock(&pfk->dump_lock);
++	if (pfk->dump.dump != NULL) {
++		mutex_unlock(&pfk->dump_lock);
+ 		return -EBUSY;
++	}
+ 
+ 	proto = pfkey_satype2proto(hdr->sadb_msg_satype);
+-	if (proto == 0)
++	if (proto == 0) {
++		mutex_unlock(&pfk->dump_lock);
+ 		return -EINVAL;
++	}
+ 
+ 	if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
+ 		struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
+ 
+ 		filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+-		if (filter == NULL)
++		if (filter == NULL) {
++			mutex_unlock(&pfk->dump_lock);
+ 			return -ENOMEM;
++		}
+ 
+ 		memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
+ 		       sizeof(xfrm_address_t));
+@@ -1829,6 +1858,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
+ 	pfk->dump.dump = pfkey_dump_sa;
+ 	pfk->dump.done = pfkey_dump_sa_done;
+ 	xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
++	mutex_unlock(&pfk->dump_lock);
+ 
+ 	return pfkey_do_dump(pfk);
+ }
+@@ -1921,19 +1951,14 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
+ 
+ 	/* addresses present only in tunnel mode */
+ 	if (t->mode == XFRM_MODE_TUNNEL) {
+-		u8 *sa = (u8 *) (rq + 1);
+-		int family, socklen;
++		int err;
+ 
+-		family = pfkey_sockaddr_extract((struct sockaddr *)sa,
+-						&t->saddr);
+-		if (!family)
+-			return -EINVAL;
+-
+-		socklen = pfkey_sockaddr_len(family);
+-		if (pfkey_sockaddr_extract((struct sockaddr *)(sa + socklen),
+-					   &t->id.daddr) != family)
+-			return -EINVAL;
+-		t->encap_family = family;
++		err = parse_sockaddr_pair(
++			(struct sockaddr *)(rq + 1),
++			rq->sadb_x_ipsecrequest_len - sizeof(*rq),
++			&t->saddr, &t->id.daddr, &t->encap_family);
++		if (err)
++			return err;
+ 	} else
+ 		t->encap_family = xp->family;
+ 
+@@ -1953,7 +1978,11 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
+ 	if (pol->sadb_x_policy_len * 8 < sizeof(struct sadb_x_policy))
+ 		return -EINVAL;
+ 
+-	while (len >= sizeof(struct sadb_x_ipsecrequest)) {
++	while (len >= sizeof(*rq)) {
++		if (len < rq->sadb_x_ipsecrequest_len ||
++		    rq->sadb_x_ipsecrequest_len < sizeof(*rq))
++			return -EINVAL;
++
+ 		if ((err = parse_ipsecrequest(xp, rq)) < 0)
+ 			return err;
+ 		len -= rq->sadb_x_ipsecrequest_len;
+@@ -2416,7 +2445,6 @@ out:
+ 	return err;
+ }
+ 
+-#ifdef CONFIG_NET_KEY_MIGRATE
+ static int pfkey_sockaddr_pair_size(sa_family_t family)
+ {
+ 	return PFKEY_ALIGN8(pfkey_sockaddr_len(family) * 2);
+@@ -2428,7 +2456,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
+ {
+ 	int af, socklen;
+ 
+-	if (ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
++	if (ext_len < 2 || ext_len < pfkey_sockaddr_pair_size(sa->sa_family))
+ 		return -EINVAL;
+ 
+ 	af = pfkey_sockaddr_extract(sa, saddr);
+@@ -2444,6 +2472,7 @@ static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_NET_KEY_MIGRATE
+ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
+ 				    struct xfrm_migrate *m)
+ {
+@@ -2451,13 +2480,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
+ 	struct sadb_x_ipsecrequest *rq2;
+ 	int mode;
+ 
+-	if (len <= sizeof(struct sadb_x_ipsecrequest) ||
+-	    len < rq1->sadb_x_ipsecrequest_len)
++	if (len < sizeof(*rq1) ||
++	    len < rq1->sadb_x_ipsecrequest_len ||
++	    rq1->sadb_x_ipsecrequest_len < sizeof(*rq1))
+ 		return -EINVAL;
+ 
+ 	/* old endoints */
+ 	err = parse_sockaddr_pair((struct sockaddr *)(rq1 + 1),
+-				  rq1->sadb_x_ipsecrequest_len,
++				  rq1->sadb_x_ipsecrequest_len - sizeof(*rq1),
+ 				  &m->old_saddr, &m->old_daddr,
+ 				  &m->old_family);
+ 	if (err)
+@@ -2466,13 +2496,14 @@ static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
+ 	rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
+ 	len -= rq1->sadb_x_ipsecrequest_len;
+ 
+-	if (len <= sizeof(struct sadb_x_ipsecrequest) ||
+-	    len < rq2->sadb_x_ipsecrequest_len)
++	if (len <= sizeof(*rq2) ||
++	    len < rq2->sadb_x_ipsecrequest_len ||
++	    rq2->sadb_x_ipsecrequest_len < sizeof(*rq2))
+ 		return -EINVAL;
+ 
+ 	/* new endpoints */
+ 	err = parse_sockaddr_pair((struct sockaddr *)(rq2 + 1),
+-				  rq2->sadb_x_ipsecrequest_len,
++				  rq2->sadb_x_ipsecrequest_len - sizeof(*rq2),
+ 				  &m->new_saddr, &m->new_daddr,
+ 				  &m->new_family);
+ 	if (err)
+@@ -2687,14 +2718,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
+ {
+ 	struct pfkey_sock *pfk = pfkey_sk(sk);
+ 
+-	if (pfk->dump.dump != NULL)
++	mutex_lock(&pfk->dump_lock);
++	if (pfk->dump.dump != NULL) {
++		mutex_unlock(&pfk->dump_lock);
+ 		return -EBUSY;
++	}
+ 
+ 	pfk->dump.msg_version = hdr->sadb_msg_version;
+ 	pfk->dump.msg_portid = hdr->sadb_msg_pid;
+ 	pfk->dump.dump = pfkey_dump_sp;
+ 	pfk->dump.done = pfkey_dump_sp_done;
+ 	xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
++	mutex_unlock(&pfk->dump_lock);
+ 
+ 	return pfkey_do_dump(pfk);
+ }
+diff --git a/net/nfc/core.c b/net/nfc/core.c
+index cff3f1614ad4..54596f609d04 100644
+--- a/net/nfc/core.c
++++ b/net/nfc/core.c
+@@ -969,6 +969,8 @@ static void nfc_release(struct device *d)
+ 			kfree(se);
+ 	}
+ 
++	ida_simple_remove(&nfc_index_ida, dev->idx);
++
+ 	kfree(dev);
+ }
+ 
+@@ -1043,6 +1045,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
+ 				    int tx_headroom, int tx_tailroom)
+ {
+ 	struct nfc_dev *dev;
++	int rc;
+ 
+ 	if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
+ 	    !ops->deactivate_target || !ops->im_transceive)
+@@ -1055,6 +1058,15 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
+ 	if (!dev)
+ 		return NULL;
+ 
++	rc = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
++	if (rc < 0)
++		goto err_free_dev;
++	dev->idx = rc;
++
++	dev->dev.class = &nfc_class;
++	dev_set_name(&dev->dev, "nfc%d", dev->idx);
++	device_initialize(&dev->dev);
++
+ 	dev->ops = ops;
+ 	dev->supported_protocols = supported_protocols;
+ 	dev->tx_headroom = tx_headroom;
+@@ -1077,6 +1089,11 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
+ 	}
+ 
+ 	return dev;
++
++err_free_dev:
++	kfree(dev);
++
++	return ERR_PTR(rc);
+ }
+ EXPORT_SYMBOL(nfc_allocate_device);
+ 
+@@ -1091,14 +1108,6 @@ int nfc_register_device(struct nfc_dev *dev)
+ 
+ 	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
+ 
+-	dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
+-	if (dev->idx < 0)
+-		return dev->idx;
+-
+-	dev->dev.class = &nfc_class;
+-	dev_set_name(&dev->dev, "nfc%d", dev->idx);
+-	device_initialize(&dev->dev);
+-
+ 	mutex_lock(&nfc_devlist_mutex);
+ 	nfc_devlist_generation++;
+ 	rc = device_add(&dev->dev);
+@@ -1136,12 +1145,10 @@ EXPORT_SYMBOL(nfc_register_device);
+  */
+ void nfc_unregister_device(struct nfc_dev *dev)
+ {
+-	int rc, id;
++	int rc;
+ 
+ 	pr_debug("dev_name=%s\n", dev_name(&dev->dev));
+ 
+-	id = dev->idx;
+-
+ 	if (dev->rfkill) {
+ 		rfkill_unregister(dev->rfkill);
+ 		rfkill_destroy(dev->rfkill);
+@@ -1166,8 +1173,6 @@ void nfc_unregister_device(struct nfc_dev *dev)
+ 	nfc_devlist_generation++;
+ 	device_del(&dev->dev);
+ 	mutex_unlock(&nfc_devlist_mutex);
+-
+-	ida_simple_remove(&nfc_index_ida, id);
+ }
+ EXPORT_SYMBOL(nfc_unregister_device);
+ 
+diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
+index 9578bd6a4f3e..5a6b76f8d157 100644
+--- a/net/nfc/llcp_sock.c
++++ b/net/nfc/llcp_sock.c
+@@ -76,7 +76,8 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
+ 	struct sockaddr_nfc_llcp llcp_addr;
+ 	int len, ret = 0;
+ 
+-	if (!addr || addr->sa_family != AF_NFC)
++	if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
++	    addr->sa_family != AF_NFC)
+ 		return -EINVAL;
+ 
+ 	pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
+@@ -150,7 +151,8 @@ static int llcp_raw_sock_bind(struct socket *sock, struct sockaddr *addr,
+ 	struct sockaddr_nfc_llcp llcp_addr;
+ 	int len, ret = 0;
+ 
+-	if (!addr || addr->sa_family != AF_NFC)
++	if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
++	    addr->sa_family != AF_NFC)
+ 		return -EINVAL;
+ 
+ 	pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
+@@ -655,8 +657,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
+ 
+ 	pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
+ 
+-	if (!addr || len < sizeof(struct sockaddr_nfc) ||
+-	    addr->sa_family != AF_NFC)
++	if (!addr || len < sizeof(*addr) || addr->sa_family != AF_NFC)
+ 		return -EINVAL;
+ 
+ 	if (addr->service_name_len == 0 && addr->dsap == 0)
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 49ff32106080..a776fb53d66d 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -981,8 +981,7 @@ struct nci_dev *nci_allocate_device(struct nci_ops *ops,
+ 	return ndev;
+ 
+ free_nfc:
+-	kfree(ndev->nfc_dev);
+-
++	nfc_free_device(ndev->nfc_dev);
+ free_nci:
+ 	kfree(ndev);
+ 	return NULL;
+diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
+index 3763036710ae..2f2a2a0e56ec 100644
+--- a/net/nfc/netlink.c
++++ b/net/nfc/netlink.c
+@@ -865,7 +865,9 @@ static int nfc_genl_activate_target(struct sk_buff *skb, struct genl_info *info)
+ 	u32 device_idx, target_idx, protocol;
+ 	int rc;
+ 
+-	if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
++	if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
++	    !info->attrs[NFC_ATTR_TARGET_INDEX] ||
++	    !info->attrs[NFC_ATTR_PROTOCOLS])
+ 		return -EINVAL;
+ 
+ 	device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 93c9a70d046e..c29070c27073 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3370,14 +3370,19 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 
+ 		if (optlen != sizeof(val))
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 		if (val > INT_MAX)
+ 			return -EINVAL;
+-		po->tp_reserve = val;
+-		return 0;
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->tp_reserve = val;
++			ret = 0;
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_LOSS:
+ 	{
+@@ -3954,7 +3959,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 		register_prot_hook(sk);
+ 	}
+ 	spin_unlock(&po->bind_lock);
+-	if (closing && (po->tp_version > TPACKET_V2)) {
++	if (pg_vec && (po->tp_version > TPACKET_V2)) {
+ 		/* Because we don't support block-based V3 on tx-ring */
+ 		if (!tx_ring)
+ 			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 9b5dd2ac60b6..a0e45ae0a628 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2228,6 +2228,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
++	SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
+ 	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+ 	SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
+ 
+diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
+index 51c4713ac6e3..468fdf21be4f 100644
+--- a/sound/soc/codecs/tlv320aic3x.c
++++ b/sound/soc/codecs/tlv320aic3x.c
+@@ -125,6 +125,16 @@ static const struct reg_default aic3x_reg[] = {
+ 	{ 108, 0x00 }, { 109, 0x00 },
+ };
+ 
++static bool aic3x_volatile_reg(struct device *dev, unsigned int reg)
++{
++	switch (reg) {
++	case AIC3X_RESET:
++		return true;
++	default:
++		return false;
++	}
++}
++
+ static const struct regmap_config aic3x_regmap = {
+ 	.reg_bits = 8,
+ 	.val_bits = 8,
+@@ -132,6 +142,9 @@ static const struct regmap_config aic3x_regmap = {
+ 	.max_register = DAC_ICC_ADJ,
+ 	.reg_defaults = aic3x_reg,
+ 	.num_reg_defaults = ARRAY_SIZE(aic3x_reg),
++
++	.volatile_reg = aic3x_volatile_reg,
++
+ 	.cache_type = REGCACHE_RBTREE,
+ };
+ 
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index 1874cf0e6cab..35805d7e2bc2 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -68,7 +68,8 @@ out:
+ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
+ {
+ 	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+-	struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
++	struct snd_pcm_substream *fe_substream =
++		 fe->pcm->streams[cstream->direction].substream;
+ 	struct snd_soc_platform *platform = fe->platform;
+ 	struct snd_soc_dpcm *dpcm;
+ 	struct snd_soc_dapm_widget_list *list;
+@@ -412,7 +413,8 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
+ 					struct snd_compr_params *params)
+ {
+ 	struct snd_soc_pcm_runtime *fe = cstream->private_data;
+-	struct snd_pcm_substream *fe_substream = fe->pcm->streams[0].substream;
++	struct snd_pcm_substream *fe_substream =
++		 fe->pcm->streams[cstream->direction].substream;
+ 	struct snd_soc_platform *platform = fe->platform;
+ 	int ret = 0, stream;
+ 
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 52fe7eb2dea1..c99e18cb2ba7 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -163,6 +163,10 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
+ 		dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
+ 				be->dai_link->name, event, dir);
+ 
++		if ((event == SND_SOC_DAPM_STREAM_STOP) &&
++		    (be->dpcm[dir].users >= 1))
++			continue;
++
+ 		snd_soc_dapm_stream_event(be, dir, event);
+ 	}
+ 
+@@ -1991,9 +1995,11 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
+ 		break;
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+-	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ 		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
+ 		break;
++	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++		fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
++		break;
+ 	}
+ 
+ out:
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index c2131b851602..5fed093fd447 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -361,6 +361,9 @@ static void snd_complete_urb(struct urb *urb)
+ 	if (unlikely(atomic_read(&ep->chip->shutdown)))
+ 		goto exit_clear;
+ 
++	if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
++		goto exit_clear;
++
+ 	if (usb_pipeout(ep->pipe)) {
+ 		retire_outbound_urb(ep, ctx);
+ 		/* can be stopped during retire callback */
+diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
+index f1ce60065258..ec30c2fcbac0 100644
+--- a/tools/lib/traceevent/plugin_sched_switch.c
++++ b/tools/lib/traceevent/plugin_sched_switch.c
+@@ -111,7 +111,7 @@ static int sched_switch_handler(struct trace_seq *s,
+ 	trace_seq_printf(s, "%lld ", val);
+ 
+ 	if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
+-		trace_seq_printf(s, "[%lld] ", val);
++		trace_seq_printf(s, "[%d] ", (int) val);
+ 
+ 	if (pevent_get_field_val(s,  event, "prev_state", record, &val, 0) == 0)
+ 		write_state(s, val);
+@@ -129,7 +129,7 @@ static int sched_switch_handler(struct trace_seq *s,
+ 	trace_seq_printf(s, "%lld", val);
+ 
+ 	if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
+-		trace_seq_printf(s, " [%lld]", val);
++		trace_seq_printf(s, " [%d]", (int) val);
+ 
+ 	return 0;
+ }
+diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
+index 6680fa5cb9dd..d9f04239a12a 100644
+--- a/tools/perf/ui/browser.c
++++ b/tools/perf/ui/browser.c
+@@ -673,7 +673,7 @@ static void __ui_browser__line_arrow_down(struct ui_browser *browser,
+ 		ui_browser__gotorc(browser, row, column + 1);
+ 		SLsmg_draw_hline(2);
+ 
+-		if (row++ == 0)
++		if (++row == 0)
+ 			goto out;
+ 	} else
+ 		row = 0;
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index 3ddfab315e19..ec35cb33e46b 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -488,6 +488,12 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
+ 				break;
+ 		} else {
+ 			int n = namesz + descsz;
++
++			if (n > (int)sizeof(bf)) {
++				n = sizeof(bf);
++				pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
++					 __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
++			}
+ 			if (read(fd, bf, n) != n)
+ 				break;
+ 		}
+diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
+index 620e37f741b8..6ddd3c742555 100644
+--- a/virt/kvm/vfio.c
++++ b/virt/kvm/vfio.c
+@@ -47,6 +47,22 @@ static struct vfio_group *kvm_vfio_group_get_external_user(struct file *filep)
+ 	return vfio_group;
+ }
+ 
++static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
++					       struct file *filep)
++{
++	bool ret, (*fn)(struct vfio_group *, struct file *);
++
++	fn = symbol_get(vfio_external_group_match_file);
++	if (!fn)
++		return false;
++
++	ret = fn(group, filep);
++
++	symbol_put(vfio_external_group_match_file);
++
++	return ret;
++}
++
+ static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
+ {
+ 	void (*fn)(struct vfio_group *);
+@@ -169,18 +185,13 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
+ 		if (!f.file)
+ 			return -EBADF;
+ 
+-		vfio_group = kvm_vfio_group_get_external_user(f.file);
+-		fdput(f);
+-
+-		if (IS_ERR(vfio_group))
+-			return PTR_ERR(vfio_group);
+-
+ 		ret = -ENOENT;
+ 
+ 		mutex_lock(&kv->lock);
+ 
+ 		list_for_each_entry(kvg, &kv->group_list, node) {
+-			if (kvg->vfio_group != vfio_group)
++			if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
++								f.file))
+ 				continue;
+ 
+ 			list_del(&kvg->node);
+@@ -192,7 +203,7 @@ static int kvm_vfio_set_group(struct kvm_device *dev, long attr, u64 arg)
+ 
+ 		mutex_unlock(&kv->lock);
+ 
+-		kvm_vfio_group_put_external_user(vfio_group);
++		fdput(f);
+ 
+ 		kvm_vfio_update_coherency(dev);
+ 

diff --git a/1044_linux-4.1.45.patch b/1044_linux-4.1.45.patch
new file mode 100644
index 0000000..eb4d11d
--- /dev/null
+++ b/1044_linux-4.1.45.patch
@@ -0,0 +1,4031 @@
+diff --git a/Makefile b/Makefile
+index 9c7aa08c70b7..d4c064604058 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 44
++SUBLEVEL = 45
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
+index 4cb4b6d3452c..0bc66e1d3a7e 100644
+--- a/arch/alpha/include/asm/types.h
++++ b/arch/alpha/include/asm/types.h
+@@ -1,6 +1,6 @@
+ #ifndef _ALPHA_TYPES_H
+ #define _ALPHA_TYPES_H
+ 
+-#include <asm-generic/int-ll64.h>
++#include <uapi/asm/types.h>
+ 
+ #endif /* _ALPHA_TYPES_H */
+diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h
+index 9fd3cd459777..8d1024d7be05 100644
+--- a/arch/alpha/include/uapi/asm/types.h
++++ b/arch/alpha/include/uapi/asm/types.h
+@@ -9,8 +9,18 @@
+  * need to be careful to avoid a name clashes.
+  */
+ 
+-#ifndef __KERNEL__
++/*
++ * This is here because we used to use l64 for alpha
++ * and we don't want to impact user mode with our change to ll64
++ * in the kernel.
++ *
++ * However, some user programs are fine with this.  They can
++ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
++ */
++#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
+ #include <asm-generic/int-l64.h>
++#else
++#include <asm-generic/int-ll64.h>
+ #endif
+ 
+ #endif /* _UAPI_ALPHA_TYPES_H */
+diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
+index d868289c5a26..da600d814035 100644
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -315,6 +315,12 @@ ENTRY(EV_MachineCheck)
+ 	lr  r0, [efa]
+ 	mov r1, sp
+ 
++	; hardware auto-disables MMU, re-enable it to allow kernel vaddr
++	; access for say stack unwinding of modules for crash dumps
++	lr	r3, [ARC_REG_PID]
++	or	r3, r3, MMU_ENABLE
++	sr	r3, [ARC_REG_PID]
++
+ 	lsr  	r3, r2, 8
+ 	bmsk 	r3, r3, 7
+ 	brne    r3, ECR_C_MCHK_DUP_TLB, 1f
+diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
+index 7f47d2a56f44..b7a0c44785c1 100644
+--- a/arch/arc/mm/tlb.c
++++ b/arch/arc/mm/tlb.c
+@@ -689,9 +689,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
+ 
+ 	local_irq_save(flags);
+ 
+-	/* re-enable the MMU */
+-	write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
+-
+ 	/* loop thru all sets of TLB */
+ 	for (set = 0; set < mmu->sets; set++) {
+ 
+diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
+index 6333d9c17875..9c521f9959a9 100644
+--- a/arch/arm/mm/fault.c
++++ b/arch/arm/mm/fault.c
+@@ -314,8 +314,11 @@ retry:
+ 	 * signal first. We do not need to release the mmap_sem because
+ 	 * it would already be released in __lock_page_or_retry in
+ 	 * mm/filemap.c. */
+-	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
++	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
++		if (!user_mode(regs))
++			goto no_context;
+ 		return 0;
++	}
+ 
+ 	/*
+ 	 * Major/minor page fault accounting is only done on the
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index c31e59fe2cb8..7b4e9ea0b1a4 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -156,9 +156,11 @@ void fpsimd_thread_switch(struct task_struct *next)
+ 
+ void fpsimd_flush_thread(void)
+ {
++	preempt_disable();
+ 	memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+ 	fpsimd_flush_task_state(current);
+ 	set_thread_flag(TIF_FOREIGN_FPSTATE);
++	preempt_enable();
+ }
+ 
+ /*
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 16523fbd9671..d0e42f6fcddd 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -253,8 +253,11 @@ retry:
+ 	 * signal first. We do not need to release the mmap_sem because it
+ 	 * would already be released in __lock_page_or_retry in mm/filemap.c.
+ 	 */
+-	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
++	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
++		if (!user_mode(regs))
++			goto no_context;
+ 		return 0;
++	}
+ 
+ 	/*
+ 	 * Major/minor page fault accounting is only done on the initial
+diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
+index 91e5c1758b5c..64e016abb2a5 100644
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -236,6 +236,28 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
+ 
+ #define SWIZ_PTR(p)		((unsigned char __user *)((p) ^ swiz))
+ 
++#define __get_user_or_set_dar(_regs, _dest, _addr)		\
++	({							\
++		int rc = 0;					\
++		typeof(_addr) __addr = (_addr);			\
++		if (__get_user_inatomic(_dest, __addr)) {	\
++			_regs->dar = (unsigned long)__addr;	\
++			rc = -EFAULT;				\
++		}						\
++		rc;						\
++	})
++
++#define __put_user_or_set_dar(_regs, _src, _addr)		\
++	({							\
++		int rc = 0;					\
++		typeof(_addr) __addr = (_addr);			\
++		if (__put_user_inatomic(_src, __addr)) {	\
++			_regs->dar = (unsigned long)__addr;	\
++			rc = -EFAULT;				\
++		}						\
++		rc;						\
++	})
++
+ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ 			    unsigned int reg, unsigned int nb,
+ 			    unsigned int flags, unsigned int instr,
+@@ -264,9 +286,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ 		} else {
+ 			unsigned long pc = regs->nip ^ (swiz & 4);
+ 
+-			if (__get_user_inatomic(instr,
+-						(unsigned int __user *)pc))
++			if (__get_user_or_set_dar(regs, instr,
++						  (unsigned int __user *)pc))
+ 				return -EFAULT;
++
+ 			if (swiz == 0 && (flags & SW))
+ 				instr = cpu_to_le32(instr);
+ 			nb = (instr >> 11) & 0x1f;
+@@ -310,31 +333,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ 			       ((nb0 + 3) / 4) * sizeof(unsigned long));
+ 
+ 		for (i = 0; i < nb; ++i, ++p)
+-			if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+-						SWIZ_PTR(p)))
++			if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++						  SWIZ_PTR(p)))
+ 				return -EFAULT;
+ 		if (nb0 > 0) {
+ 			rptr = &regs->gpr[0];
+ 			addr += nb;
+ 			for (i = 0; i < nb0; ++i, ++p)
+-				if (__get_user_inatomic(REG_BYTE(rptr,
+-								 i ^ bswiz),
+-							SWIZ_PTR(p)))
++				if (__get_user_or_set_dar(regs,
++							  REG_BYTE(rptr, i ^ bswiz),
++							  SWIZ_PTR(p)))
+ 					return -EFAULT;
+ 		}
+ 
+ 	} else {
+ 		for (i = 0; i < nb; ++i, ++p)
+-			if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+-						SWIZ_PTR(p)))
++			if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++						  SWIZ_PTR(p)))
+ 				return -EFAULT;
+ 		if (nb0 > 0) {
+ 			rptr = &regs->gpr[0];
+ 			addr += nb;
+ 			for (i = 0; i < nb0; ++i, ++p)
+-				if (__put_user_inatomic(REG_BYTE(rptr,
+-								 i ^ bswiz),
+-							SWIZ_PTR(p)))
++				if (__put_user_or_set_dar(regs,
++							  REG_BYTE(rptr, i ^ bswiz),
++							  SWIZ_PTR(p)))
+ 					return -EFAULT;
+ 		}
+ 	}
+@@ -346,29 +369,32 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+  * Only POWER6 has these instructions, and it does true little-endian,
+  * so we don't need the address swizzling.
+  */
+-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
+-			   unsigned int flags)
++static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
++			   unsigned int reg, unsigned int flags)
+ {
+ 	char *ptr0 = (char *) &current->thread.TS_FPR(reg);
+ 	char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
+-	int i, ret, sw = 0;
++	int i, sw = 0;
+ 
+ 	if (reg & 1)
+ 		return 0;	/* invalid form: FRS/FRT must be even */
+ 	if (flags & SW)
+ 		sw = 7;
+-	ret = 0;
++
+ 	for (i = 0; i < 8; ++i) {
+ 		if (!(flags & ST)) {
+-			ret |= __get_user(ptr0[i^sw], addr + i);
+-			ret |= __get_user(ptr1[i^sw], addr + i + 8);
++			if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++				return -EFAULT;
++			if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++				return -EFAULT;
+ 		} else {
+-			ret |= __put_user(ptr0[i^sw], addr + i);
+-			ret |= __put_user(ptr1[i^sw], addr + i + 8);
++			if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++				return -EFAULT;
++			if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++				return -EFAULT;
+ 		}
+ 	}
+-	if (ret)
+-		return -EFAULT;
++
+ 	return 1;	/* exception handled and fixed up */
+ }
+ 
+@@ -378,24 +404,27 @@ static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
+ {
+ 	char *ptr0 = (char *)&regs->gpr[reg];
+ 	char *ptr1 = (char *)&regs->gpr[reg+1];
+-	int i, ret, sw = 0;
++	int i, sw = 0;
+ 
+ 	if (reg & 1)
+ 		return 0;	/* invalid form: GPR must be even */
+ 	if (flags & SW)
+ 		sw = 7;
+-	ret = 0;
++
+ 	for (i = 0; i < 8; ++i) {
+ 		if (!(flags & ST)) {
+-			ret |= __get_user(ptr0[i^sw], addr + i);
+-			ret |= __get_user(ptr1[i^sw], addr + i + 8);
++			if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++				return -EFAULT;
++			if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++				return -EFAULT;
+ 		} else {
+-			ret |= __put_user(ptr0[i^sw], addr + i);
+-			ret |= __put_user(ptr1[i^sw], addr + i + 8);
++			if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++				return -EFAULT;
++			if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++				return -EFAULT;
+ 		}
+ 	}
+-	if (ret)
+-		return -EFAULT;
++
+ 	return 1;	/* exception handled and fixed up */
+ }
+ #endif /* CONFIG_PPC64 */
+@@ -688,9 +717,14 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
+ 	for (j = 0; j < length; j += elsize) {
+ 		for (i = 0; i < elsize; ++i) {
+ 			if (flags & ST)
+-				ret |= __put_user(ptr[i^sw], addr + i);
++				ret = __put_user_or_set_dar(regs, ptr[i^sw],
++							    addr + i);
+ 			else
+-				ret |= __get_user(ptr[i^sw], addr + i);
++				ret = __get_user_or_set_dar(regs, ptr[i^sw],
++							    addr + i);
++
++			if (ret)
++				return ret;
+ 		}
+ 		ptr  += elsize;
+ #ifdef __LITTLE_ENDIAN__
+@@ -740,7 +774,7 @@ int fix_alignment(struct pt_regs *regs)
+ 	unsigned int dsisr;
+ 	unsigned char __user *addr;
+ 	unsigned long p, swiz;
+-	int ret, i;
++	int i;
+ 	union data {
+ 		u64 ll;
+ 		double dd;
+@@ -923,7 +957,7 @@ int fix_alignment(struct pt_regs *regs)
+ 		if (flags & F) {
+ 			/* Special case for 16-byte FP loads and stores */
+ 			PPC_WARN_ALIGNMENT(fp_pair, regs);
+-			return emulate_fp_pair(addr, reg, flags);
++			return emulate_fp_pair(regs, addr, reg, flags);
+ 		} else {
+ #ifdef CONFIG_PPC64
+ 			/* Special case for 16-byte loads and stores */
+@@ -953,15 +987,12 @@ int fix_alignment(struct pt_regs *regs)
+ 		}
+ 
+ 		data.ll = 0;
+-		ret = 0;
+ 		p = (unsigned long)addr;
+ 
+ 		for (i = 0; i < nb; i++)
+-			ret |= __get_user_inatomic(data.v[start + i],
+-						   SWIZ_PTR(p++));
+-
+-		if (unlikely(ret))
+-			return -EFAULT;
++			if (__get_user_or_set_dar(regs, data.v[start + i],
++						  SWIZ_PTR(p++)))
++				return -EFAULT;
+ 
+ 	} else if (flags & F) {
+ 		data.ll = current->thread.TS_FPR(reg);
+@@ -1031,15 +1062,13 @@ int fix_alignment(struct pt_regs *regs)
+ 			break;
+ 		}
+ 
+-		ret = 0;
+ 		p = (unsigned long)addr;
+ 
+ 		for (i = 0; i < nb; i++)
+-			ret |= __put_user_inatomic(data.v[start + i],
+-						   SWIZ_PTR(p++));
++			if (__put_user_or_set_dar(regs, data.v[start + i],
++						  SWIZ_PTR(p++)))
++				return -EFAULT;
+ 
+-		if (unlikely(ret))
+-			return -EFAULT;
+ 	} else if (flags & F)
+ 		current->thread.TS_FPR(reg) = data.ll;
+ 	else
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 2903ff34174c..a8bd57d5ef43 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -204,6 +204,7 @@ void set_personality_ia32(bool);
+ 
+ #define ELF_CORE_COPY_REGS(pr_reg, regs)			\
+ do {								\
++	unsigned long base;					\
+ 	unsigned v;						\
+ 	(pr_reg)[0] = (regs)->r15;				\
+ 	(pr_reg)[1] = (regs)->r14;				\
+@@ -226,8 +227,8 @@ do {								\
+ 	(pr_reg)[18] = (regs)->flags;				\
+ 	(pr_reg)[19] = (regs)->sp;				\
+ 	(pr_reg)[20] = (regs)->ss;				\
+-	(pr_reg)[21] = current->thread.fs;			\
+-	(pr_reg)[22] = current->thread.gs;			\
++	rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base;		\
++	rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base;	\
+ 	asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v;	\
+ 	asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v;	\
+ 	asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v;	\
+diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
+index 34a5b93704d3..b36deb1d9561 100644
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -301,13 +301,13 @@ static inline unsigned type in##bwl##_p(int port)			\
+ static inline void outs##bwl(int port, const void *addr, unsigned long count) \
+ {									\
+ 	asm volatile("rep; outs" #bwl					\
+-		     : "+S"(addr), "+c"(count) : "d"(port));		\
++		     : "+S"(addr), "+c"(count) : "d"(port) : "memory");	\
+ }									\
+ 									\
+ static inline void ins##bwl(int port, void *addr, unsigned long count)	\
+ {									\
+ 	asm volatile("rep; ins" #bwl					\
+-		     : "+D"(addr), "+c"(count) : "d"(port));		\
++		     : "+D"(addr), "+c"(count) : "d"(port) : "memory");	\
+ }
+ 
+ BUILDIO(b, b, char)
+diff --git a/block/blk-core.c b/block/blk-core.c
+index bbbf36e6066b..a891e1f19f7b 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -194,7 +194,7 @@ EXPORT_SYMBOL(blk_delay_queue);
+  **/
+ void blk_start_queue(struct request_queue *q)
+ {
+-	WARN_ON(!irqs_disabled());
++	WARN_ON(!in_interrupt() && !irqs_disabled());
+ 
+ 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ 	__blk_run_queue(q);
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index c0f03562a145..3734c5591d07 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -94,8 +94,13 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
+ 	}
+ 	sgl = sreq->tsg;
+ 	n = sg_nents(sgl);
+-	for_each_sg(sgl, sg, n, i)
+-		put_page(sg_page(sg));
++	for_each_sg(sgl, sg, n, i) {
++		struct page *page = sg_page(sg);
++
++		/* some SGs may not have a page mapped */
++		if (page && atomic_read(&page->_count))
++			put_page(page);
++	}
+ 
+ 	kfree(sreq->tsg);
+ }
+diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
+index e82d0976a5d0..568120eee7d9 100644
+--- a/drivers/acpi/apei/ghes.c
++++ b/drivers/acpi/apei/ghes.c
+@@ -1064,6 +1064,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
+ 		if (list_empty(&ghes_sci))
+ 			unregister_acpi_hed_notifier(&ghes_notifier_sci);
+ 		mutex_unlock(&ghes_list_mutex);
++		synchronize_rcu();
+ 		break;
+ 	case ACPI_HEST_NOTIFY_NMI:
+ 		ghes_nmi_remove(ghes);
+diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
+index ccdc8db16bb8..fa2cf2dc4e33 100644
+--- a/drivers/acpi/ioapic.c
++++ b/drivers/acpi/ioapic.c
+@@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
+ 	struct resource *res = data;
+ 	struct resource_win win;
+ 
++	/*
++	 * We might assign this to 'res' later, make sure all pointers are
++	 * cleared before the resource is added to the global list
++	 */
++	memset(&win, 0, sizeof(win));
++
+ 	res->flags = 0;
+ 	if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0)
+ 		return AE_OK;
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 6f086415727c..235ba1fbabdb 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2865,7 +2865,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	const char *failure_string;
+ 	struct binder_buffer *buffer;
+ 
+-	if (proc->tsk != current)
++	if (proc->tsk != current->group_leader)
+ 		return -EINVAL;
+ 
+ 	if ((vma->vm_end - vma->vm_start) > SZ_4M)
+diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
+index 8d4d959a821c..8706533db57b 100644
+--- a/drivers/ata/pata_amd.c
++++ b/drivers/ata/pata_amd.c
+@@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = {
+ 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE),	8 },
+ 	{ PCI_VDEVICE(NVIDIA,	PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE),	8 },
+ 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE),		9 },
++	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_DEV_IDE),	9 },
+ 
+ 	{ },
+ };
+diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
+index 6c15a554efbe..dc1255294628 100644
+--- a/drivers/ata/pata_cs5536.c
++++ b/drivers/ata/pata_cs5536.c
+@@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+ 
+ static const struct pci_device_id cs5536[] = {
+ 	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_IDE), },
++	{ PCI_VDEVICE(AMD,	PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
+ 	{ },
+ };
+ 
+diff --git a/drivers/base/bus.c b/drivers/base/bus.c
+index 79bc203f51ef..07ea8608fb0b 100644
+--- a/drivers/base/bus.c
++++ b/drivers/base/bus.c
+@@ -722,7 +722,7 @@ int bus_add_driver(struct device_driver *drv)
+ 
+ out_unregister:
+ 	kobject_put(&priv->kobj);
+-	kfree(drv->p);
++	/* drv->p is freed in driver_release()  */
+ 	drv->p = NULL;
+ out_put_bus:
+ 	bus_put(bus);
+diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
+index 1e46eb2305c0..f928e698f659 100644
+--- a/drivers/block/skd_main.c
++++ b/drivers/block/skd_main.c
+@@ -2214,6 +2214,9 @@ static void skd_send_fitmsg(struct skd_device *skdev,
+ 		 */
+ 		qcmd |= FIT_QCMD_MSGSIZE_64;
+ 
++	/* Make sure skd_msg_buf is written before the doorbell is triggered. */
++	smp_wmb();
++
+ 	SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+ 
+ }
+@@ -2260,6 +2263,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
+ 	qcmd = skspcl->mb_dma_address;
+ 	qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+ 
++	/* Make sure skd_msg_buf is written before the doorbell is triggered. */
++	smp_wmb();
++
+ 	SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+ }
+ 
+@@ -4679,15 +4685,16 @@ static void skd_free_disk(struct skd_device *skdev)
+ {
+ 	struct gendisk *disk = skdev->disk;
+ 
+-	if (disk != NULL) {
+-		struct request_queue *q = disk->queue;
++	if (disk && (disk->flags & GENHD_FL_UP))
++		del_gendisk(disk);
+ 
+-		if (disk->flags & GENHD_FL_UP)
+-			del_gendisk(disk);
+-		if (q)
+-			blk_cleanup_queue(q);
+-		put_disk(disk);
++	if (skdev->queue) {
++		blk_cleanup_queue(skdev->queue);
++		skdev->queue = NULL;
++		disk->queue = NULL;
+ 	}
++
++	put_disk(disk);
+ 	skdev->disk = NULL;
+ }
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 3956fd646bf2..0c13dfd1c29d 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -323,6 +323,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
+ 	{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
+ 	{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
++	{ USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
+ 
+ 	/* Additional Realtek 8821AE Bluetooth devices */
+ 	{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
+diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
+index 6e3b78ee7d16..be9b1c8b9209 100644
+--- a/drivers/gpu/drm/drm_atomic.c
++++ b/drivers/gpu/drm/drm_atomic.c
+@@ -996,6 +996,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
+ 	if (config->funcs->atomic_check)
+ 		ret = config->funcs->atomic_check(state->dev, state);
+ 
++	if (ret)
++		return ret;
++
+ 	if (!state->allow_modeset) {
+ 		for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ 			if (crtc_state->mode_changed ||
+@@ -1007,7 +1010,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
+ 		}
+ 	}
+ 
+-	return ret;
++	return 0;
+ }
+ EXPORT_SYMBOL(drm_atomic_check_only);
+ 
+diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
+index 16a164770713..9b2de3ff66d9 100644
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -710,13 +710,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
+ 	struct drm_gem_object *obj = ptr;
+ 	struct drm_device *dev = obj->dev;
+ 
++	if (dev->driver->gem_close_object)
++		dev->driver->gem_close_object(obj, file_priv);
++
+ 	if (drm_core_check_feature(dev, DRIVER_PRIME))
+ 		drm_gem_remove_prime_handles(obj, file_priv);
+ 	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
+ 
+-	if (dev->driver->gem_close_object)
+-		dev->driver->gem_close_object(obj, file_priv);
+-
+ 	drm_gem_object_handle_unreference_unlocked(obj);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
+index b728523e194f..bfdbfc431e07 100644
+--- a/drivers/gpu/drm/i2c/adv7511.c
++++ b/drivers/gpu/drm/i2c/adv7511.c
+@@ -48,6 +48,10 @@ struct adv7511 {
+ 	struct gpio_desc *gpio_pd;
+ };
+ 
++static const int edid_i2c_addr = 0x7e;
++static const int packet_i2c_addr = 0x70;
++static const int cec_i2c_addr = 0x78;
++
+ static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
+ {
+ 	return to_encoder_slave(encoder)->slave_priv;
+@@ -362,12 +366,19 @@ static void adv7511_power_on(struct adv7511 *adv7511)
+ {
+ 	adv7511->current_edid_segment = -1;
+ 
+-	regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+-		     ADV7511_INT0_EDID_READY);
+-	regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
+-		     ADV7511_INT1_DDC_ERROR);
+ 	regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ 			   ADV7511_POWER_POWER_DOWN, 0);
++	if (adv7511->i2c_main->irq) {
++		/*
++		 * Documentation says the INT_ENABLE registers are reset in
++		 * POWER_DOWN mode. My 7511w preserved the bits, however.
++		 * Still, let's be safe and stick to the documentation.
++		 */
++		regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
++			     ADV7511_INT0_EDID_READY);
++		regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
++			     ADV7511_INT1_DDC_ERROR);
++	}
+ 
+ 	/*
+ 	 * Per spec it is allowed to pulse the HDP signal to indicate that the
+@@ -567,13 +578,18 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
+ 
+ 	/* Reading the EDID only works if the device is powered */
+ 	if (!adv7511->powered) {
+-		regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
+-			     ADV7511_INT0_EDID_READY);
+-		regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
+-			     ADV7511_INT1_DDC_ERROR);
+ 		regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
+ 				   ADV7511_POWER_POWER_DOWN, 0);
++		if (adv7511->i2c_main->irq) {
++			regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
++				     ADV7511_INT0_EDID_READY);
++			regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
++				     ADV7511_INT1_DDC_ERROR);
++		}
+ 		adv7511->current_edid_segment = -1;
++		/* Reset the EDID_I2C_ADDR register as it might be cleared */
++		regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
++				edid_i2c_addr);
+ 	}
+ 
+ 	edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
+@@ -849,10 +865,6 @@ static int adv7511_parse_dt(struct device_node *np,
+ 	return 0;
+ }
+ 
+-static const int edid_i2c_addr = 0x7e;
+-static const int packet_i2c_addr = 0x70;
+-static const int cec_i2c_addr = 0x78;
+-
+ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+ {
+ 	struct adv7511_link_config link_config;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index 7c6f15d284e3..824c835330df 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -148,8 +148,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ 	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
+ 
+ 	/* Signal polarities */
+-	value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
+-	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
++	value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
++	      | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
+ 	      | DSMR_DIPM_DE | DSMR_CSPM;
+ 	rcar_du_crtc_write(rcrtc, DSMR, value);
+ 
+@@ -171,7 +171,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ 					mode->crtc_vsync_start - 1);
+ 	rcar_du_crtc_write(rcrtc, VCR,  mode->crtc_vtotal - 1);
+ 
+-	rcar_du_crtc_write(rcrtc, DESR,  mode->htotal - mode->hsync_start);
++	rcar_du_crtc_write(rcrtc, DESR,  mode->htotal - mode->hsync_start - 1);
+ 	rcar_du_crtc_write(rcrtc, DEWR,  mode->hdisplay);
+ }
+ 
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+index 85043c5bad03..873e04aa9352 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+@@ -56,11 +56,11 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
+ 		return ret;
+ 
+ 	/* PLL clock configuration */
+-	if (freq <= 38000)
++	if (freq < 39000)
+ 		pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
+-	else if (freq <= 60000)
++	else if (freq < 61000)
+ 		pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
+-	else if (freq <= 121000)
++	else if (freq < 121000)
+ 		pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
+ 	else
+ 		pllcr = LVDPLLCR_PLLDLYCNT_150M;
+@@ -102,7 +102,7 @@ static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
+ 	/* Turn the PLL on, wait for the startup delay, and turn the output
+ 	 * on.
+ 	 */
+-	lvdcr0 |= LVDCR0_PLLEN;
++	lvdcr0 |= LVDCR0_PLLON;
+ 	rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ 
+ 	usleep_range(100, 150);
+diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
+index 77cf9289ab65..b1eafd097a79 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
++++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
+@@ -18,7 +18,7 @@
+ #define LVDCR0_DMD			(1 << 12)
+ #define LVDCR0_LVMD_MASK		(0xf << 8)
+ #define LVDCR0_LVMD_SHIFT		8
+-#define LVDCR0_PLLEN			(1 << 4)
++#define LVDCR0_PLLON			(1 << 4)
+ #define LVDCR0_BEN			(1 << 2)
+ #define LVDCR0_LVEN			(1 << 1)
+ #define LVDCR0_LVRES			(1 << 0)
+diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+index 025c429050c0..5d8dfe027b30 100644
+--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
+@@ -612,7 +612,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
+ 		} else {
+ 			pr_err("Failed to fill pool (%p)\n", pool);
+ 			/* If we have any pages left put them to the pool. */
+-			list_for_each_entry(p, &pool->list, lru) {
++			list_for_each_entry(p, &new_pages, lru) {
+ 				++cpages;
+ 			}
+ 			list_splice(&new_pages, &pool->list);
+diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
+index f994712d0904..a9276eeb61d5 100644
+--- a/drivers/i2c/busses/i2c-ismt.c
++++ b/drivers/i2c/busses/i2c-ismt.c
+@@ -340,8 +340,10 @@ static int ismt_process_desc(const struct ismt_desc *desc,
+ 			break;
+ 		case I2C_SMBUS_BLOCK_DATA:
+ 		case I2C_SMBUS_I2C_BLOCK_DATA:
+-			memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+-			data->block[0] = desc->rxbytes;
++			if (desc->rxbytes != dma_buffer[0] + 1)
++				return -EMSGSIZE;
++
++			memcpy(data->block, dma_buffer, desc->rxbytes);
+ 			break;
+ 		}
+ 		return 0;
+diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
+index 19b2d689a5ef..4880aae98b4c 100644
+--- a/drivers/i2c/busses/i2c-jz4780.c
++++ b/drivers/i2c/busses/i2c-jz4780.c
+@@ -783,10 +783,6 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
+ 
+ 	jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
+ 
+-	i2c->cmd = 0;
+-	memset(i2c->cmd_buf, 0, BUFSIZE);
+-	memset(i2c->data_buf, 0, BUFSIZE);
+-
+ 	i2c->irq = platform_get_irq(pdev, 0);
+ 	ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
+ 			       dev_name(&pdev->dev), i2c);
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+index 595511022795..3460dd0e3e99 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+@@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ 	s32 poll_value = 0;
+ 
+ 	if (state) {
+-		if (!atomic_read(&st->user_requested_state))
+-			return 0;
+ 		if (sensor_hub_device_open(st->hsdev))
+ 			return -EIO;
+ 
+@@ -86,6 +84,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ 				       &report_val);
+ 	}
+ 
++	pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
++		 st->pdev->name, state_val, report_val);
++
+ 	sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
+ 			       st->power_state.index,
+ 			       sizeof(state_val), &state_val);
+@@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ 		ret = pm_runtime_get_sync(&st->pdev->dev);
+ 	else {
+ 		pm_runtime_mark_last_busy(&st->pdev->dev);
++		pm_runtime_use_autosuspend(&st->pdev->dev);
+ 		ret = pm_runtime_put_autosuspend(&st->pdev->dev);
+ 	}
+ 	if (ret < 0) {
+@@ -175,8 +177,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
+ 	/* Default to 3 seconds, but can be changed from sysfs */
+ 	pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
+ 					 3000);
+-	pm_runtime_use_autosuspend(&attrb->pdev->dev);
+-
+ 	return ret;
+ error_unreg_trigger:
+ 	iio_trigger_unregister(trig);
+diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
+index b94bfd3f595b..7a9c50842d8b 100644
+--- a/drivers/iio/imu/adis16480.c
++++ b/drivers/iio/imu/adis16480.c
+@@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
+ 		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
+ 		.gyro_max_scale = 450,
+ 		.accel_max_val = IIO_M_S_2_TO_G(12500),
+-		.accel_max_scale = 5,
++		.accel_max_scale = 10,
+ 	},
+ 	[ADIS16485] = {
+ 		.channels = adis16485_channels,
+diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
+index 354d47ecd66a..7e2dc5e56632 100644
+--- a/drivers/input/mouse/trackpoint.c
++++ b/drivers/input/mouse/trackpoint.c
+@@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
+ 	if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
+ 		return -1;
+ 
+-	if (param[0] != TP_MAGIC_IDENT)
++	/* add new TP ID. */
++	if (!(param[0] & TP_MAGIC_IDENT))
+ 		return -1;
+ 
+ 	if (firmware_id)
+@@ -380,8 +381,8 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+ 		return 0;
+ 
+ 	if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
+-		psmouse_warn(psmouse, "failed to get extended button data\n");
+-		button_info = 0;
++		psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
++		button_info = 0x33;
+ 	}
+ 
+ 	psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
+diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
+index 5617ed3a7d7a..88055755f82e 100644
+--- a/drivers/input/mouse/trackpoint.h
++++ b/drivers/input/mouse/trackpoint.h
+@@ -21,8 +21,9 @@
+ #define TP_COMMAND		0xE2	/* Commands start with this */
+ 
+ #define TP_READ_ID		0xE1	/* Sent for device identification */
+-#define TP_MAGIC_IDENT		0x01	/* Sent after a TP_READ_ID followed */
++#define TP_MAGIC_IDENT		0x03	/* Sent after a TP_READ_ID followed */
+ 					/* by the firmware ID */
++					/* Firmware ID includes 0x1, 0x2, 0x3 */
+ 
+ 
+ /*
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 1f40cdc1b357..18fd4cd6d3c7 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -814,6 +814,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ 		},
+ 	},
++	{
++		/* Gigabyte P57 - Elantech touchpad */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
++		},
++	},
+ 	{
+ 		/* Schenker XMG C504 - Elantech touchpad */
+ 		.matches = {
+diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
+index 869d01dd4063..af20eac63ad4 100644
+--- a/drivers/irqchip/irq-atmel-aic-common.c
++++ b/drivers/irqchip/irq-atmel-aic-common.c
+@@ -148,9 +148,9 @@ void __init aic_common_rtc_irq_fixup(struct device_node *root)
+ 	struct device_node *np;
+ 	void __iomem *regs;
+ 
+-	np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
++	np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
+ 	if (!np)
+-		np = of_find_compatible_node(root, NULL,
++		np = of_find_compatible_node(NULL, NULL,
+ 					     "atmel,at91sam9x5-rtc");
+ 
+ 	if (!np)
+@@ -202,7 +202,6 @@ void __init aic_common_irq_fixup(const struct of_device_id *matches)
+ 		return;
+ 
+ 	match = of_match_node(matches, root);
+-	of_node_put(root);
+ 
+ 	if (match) {
+ 		void (*fixup)(struct device_node *) = match->data;
+diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
+index 269c2354c431..e1d71574bdb5 100644
+--- a/drivers/irqchip/irq-mips-gic.c
++++ b/drivers/irqchip/irq-mips-gic.c
+@@ -861,8 +861,11 @@ static int __init gic_of_init(struct device_node *node,
+ 		gic_len = resource_size(&res);
+ 	}
+ 
+-	if (mips_cm_present())
++	if (mips_cm_present()) {
+ 		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
++		/* Ensure GIC region is enabled before trying to access it */
++		__sync();
++	}
+ 	gic_present = true;
+ 
+ 	__gic_init(gic_base, gic_len, cpu_vec, 0, node);
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 04f7bc28ef83..dfdd1908641c 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -348,6 +348,7 @@ struct cached_dev {
+ 	/* Limit number of writeback bios in flight */
+ 	struct semaphore	in_flight;
+ 	struct task_struct	*writeback_thread;
++	struct workqueue_struct	*writeback_write_wq;
+ 
+ 	struct keybuf		writeback_keys;
+ 
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index a7a03a21d78a..8e5666ac8a6a 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1054,7 +1054,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ 	}
+ 
+ 	if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+-		bch_sectors_dirty_init(dc);
++		bch_sectors_dirty_init(&dc->disk);
+ 		atomic_set(&dc->has_dirty, 1);
+ 		atomic_inc(&dc->count);
+ 		bch_writeback_queue(dc);
+@@ -1087,6 +1087,8 @@ static void cached_dev_free(struct closure *cl)
+ 	cancel_delayed_work_sync(&dc->writeback_rate_update);
+ 	if (!IS_ERR_OR_NULL(dc->writeback_thread))
+ 		kthread_stop(dc->writeback_thread);
++	if (dc->writeback_write_wq)
++		destroy_workqueue(dc->writeback_write_wq);
+ 
+ 	mutex_lock(&bch_register_lock);
+ 
+@@ -1258,6 +1260,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
+ 		goto err;
+ 
+ 	bcache_device_attach(d, c, u - c->uuids);
++	bch_sectors_dirty_init(d);
+ 	bch_flash_dev_request_init(d);
+ 	add_disk(d->disk);
+ 
+@@ -1996,6 +1999,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ 			else
+ 				err = "device busy";
+ 			mutex_unlock(&bch_register_lock);
++			if (!IS_ERR(bdev))
++				bdput(bdev);
+ 			if (attr == &ksysfs_register_quiet)
+ 				goto out;
+ 		}
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index b3ff57d61dde..4fbb5532f24c 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -191,7 +191,7 @@ STORE(__cached_dev)
+ {
+ 	struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ 					     disk.kobj);
+-	unsigned v = size;
++	ssize_t v = size;
+ 	struct cache_set *c;
+ 	struct kobj_uevent_env *env;
+ 
+@@ -226,7 +226,7 @@ STORE(__cached_dev)
+ 		bch_cached_dev_run(dc);
+ 
+ 	if (attr == &sysfs_cache_mode) {
+-		ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
++		v = bch_read_string_list(buf, bch_cache_modes + 1);
+ 
+ 		if (v < 0)
+ 			return v;
+diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
+index db3ae4c2b223..6c18e3ec3e48 100644
+--- a/drivers/md/bcache/util.c
++++ b/drivers/md/bcache/util.c
+@@ -73,24 +73,44 @@ STRTO_H(strtouint, unsigned int)
+ STRTO_H(strtoll, long long)
+ STRTO_H(strtoull, unsigned long long)
+ 
++/**
++ * bch_hprint() - formats @v to human readable string for sysfs.
++ *
++ * @v - signed 64 bit integer
++ * @buf - the (at least 8 byte) buffer to format the result into.
++ *
++ * Returns the number of bytes used by format.
++ */
+ ssize_t bch_hprint(char *buf, int64_t v)
+ {
+ 	static const char units[] = "?kMGTPEZY";
+-	char dec[4] = "";
+-	int u, t = 0;
+-
+-	for (u = 0; v >= 1024 || v <= -1024; u++) {
+-		t = v & ~(~0 << 10);
+-		v >>= 10;
+-	}
+-
+-	if (!u)
+-		return sprintf(buf, "%llu", v);
+-
+-	if (v < 100 && v > -100)
+-		snprintf(dec, sizeof(dec), ".%i", t / 100);
+-
+-	return sprintf(buf, "%lli%s%c", v, dec, units[u]);
++	int u = 0, t;
++
++	uint64_t q;
++
++	if (v < 0)
++		q = -v;
++	else
++		q = v;
++
++	/* For as long as the number is more than 3 digits, but at least
++	 * once, shift right / divide by 1024.  Keep the remainder for
++	 * a digit after the decimal point.
++	 */
++	do {
++		u++;
++
++		t = q & ~(~0 << 10);
++		q >>= 10;
++	} while (q >= 1000);
++
++	if (v < 0)
++		/* '-', up to 3 digits, '.', 1 digit, 1 character, null;
++		 * yields 8 bytes.
++		 */
++		return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
++	else
++		return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
+ }
+ 
+ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index 540256a0df4f..b0667b321a3f 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -21,7 +21,8 @@
+ static void __update_writeback_rate(struct cached_dev *dc)
+ {
+ 	struct cache_set *c = dc->disk.c;
+-	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
++	uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
++				bcache_flash_devs_sectors_dirty(c);
+ 	uint64_t cache_dirty_target =
+ 		div_u64(cache_sectors * dc->writeback_percent, 100);
+ 
+@@ -190,7 +191,7 @@ static void write_dirty(struct closure *cl)
+ 
+ 	closure_bio_submit(&io->bio, cl, &io->dc->disk);
+ 
+-	continue_at(cl, write_dirty_finish, system_wq);
++	continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
+ }
+ 
+ static void read_dirty_endio(struct bio *bio, int error)
+@@ -210,7 +211,7 @@ static void read_dirty_submit(struct closure *cl)
+ 
+ 	closure_bio_submit(&io->bio, cl, &io->dc->disk);
+ 
+-	continue_at(cl, write_dirty, system_wq);
++	continue_at(cl, write_dirty, io->dc->writeback_write_wq);
+ }
+ 
+ static void read_dirty(struct cached_dev *dc)
+@@ -488,17 +489,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+ 	return MAP_CONTINUE;
+ }
+ 
+-void bch_sectors_dirty_init(struct cached_dev *dc)
++void bch_sectors_dirty_init(struct bcache_device *d)
+ {
+ 	struct sectors_dirty_init op;
+ 
+ 	bch_btree_op_init(&op.op, -1);
+-	op.inode = dc->disk.id;
++	op.inode = d->id;
+ 
+-	bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
++	bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
+ 			   sectors_dirty_init_fn, 0);
+ 
+-	dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
++	d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
+ }
+ 
+ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+@@ -522,6 +523,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+ 
+ int bch_cached_dev_writeback_start(struct cached_dev *dc)
+ {
++	dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
++						WQ_MEM_RECLAIM, 0);
++	if (!dc->writeback_write_wq)
++		return -ENOMEM;
++
+ 	dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
+ 					      "bcache_writeback");
+ 	if (IS_ERR(dc->writeback_thread))
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 073a042aed24..daec4fd782ea 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
+ 	return ret;
+ }
+ 
++static inline uint64_t  bcache_flash_devs_sectors_dirty(struct cache_set *c)
++{
++	uint64_t i, ret = 0;
++
++	mutex_lock(&bch_register_lock);
++
++	for (i = 0; i < c->nr_uuids; i++) {
++		struct bcache_device *d = c->devices[i];
++
++		if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
++			continue;
++	   ret += bcache_dev_sectors_dirty(d);
++	}
++
++	mutex_unlock(&bch_register_lock);
++
++	return ret;
++}
++
+ static inline unsigned offset_to_stripe(struct bcache_device *d,
+ 					uint64_t offset)
+ {
+@@ -85,7 +104,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
+ 
+ void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
+ 
+-void bch_sectors_dirty_init(struct cached_dev *dc);
++void bch_sectors_dirty_init(struct bcache_device *);
+ void bch_cached_dev_writeback_init(struct cached_dev *);
+ int bch_cached_dev_writeback_start(struct cached_dev *);
+ 
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index a7621a258936..7078447c8cd7 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -1965,6 +1965,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ 	long pages;
+ 	struct bitmap_page *new_bp;
+ 
++	if (bitmap->storage.file && !init) {
++		pr_info("md: cannot resize file-based bitmap\n");
++		return -EINVAL;
++	}
++
+ 	if (chunksize == 0) {
+ 		/* If there is enough space, leave the chunk size unchanged,
+ 		 * else increase by factor of two until there is enough space.
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index 3e59b288b8a8..618e4e2b4207 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -2001,6 +2001,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
+ 		goto done;
+ 	}
+ 
++	/* Validate the user-provided bit-size and offset */
++	if (mapping->size > 32 ||
++	    mapping->offset + mapping->size > ctrl->info.size * 8) {
++		ret = -EINVAL;
++		goto done;
++	}
++
+ 	list_for_each_entry(map, &ctrl->info.mappings, list) {
+ 		if (mapping->id == map->id) {
+ 			uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 4b777be714a4..4f002d0bebb1 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -750,7 +750,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
+ 		copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
+ 		put_user(kp->pending, &up->pending) ||
+ 		put_user(kp->sequence, &up->sequence) ||
+-		compat_put_timespec(&kp->timestamp, &up->timestamp) ||
++		put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
++		put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
+ 		put_user(kp->id, &up->id) ||
+ 		copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
+ 			return -EFAULT;
+diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
+index 4ee080d49bc0..3ea651afa63d 100644
+--- a/drivers/net/ethernet/freescale/gianfar.c
++++ b/drivers/net/ethernet/freescale/gianfar.c
+@@ -3512,7 +3512,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
+ 		u32 tempval1 = gfar_read(&regs->maccfg1);
+ 		u32 tempval = gfar_read(&regs->maccfg2);
+ 		u32 ecntrl = gfar_read(&regs->ecntrl);
+-		u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
++		u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
+ 
+ 		if (phydev->duplex != priv->oldduplex) {
+ 			if (!(phydev->duplex))
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+index 829be21f97b2..be258d90de9e 100644
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
+ 	seg_hdr->cookie = MPI_COREDUMP_COOKIE;
+ 	seg_hdr->segNum = seg_number;
+ 	seg_hdr->segSize = seg_size;
+-	memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
++	strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+ }
+ 
+ /*
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 34a59e79a33c..480c9366d6b6 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -750,6 +750,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
+ 	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
++	{QMI_FIXED_INTF(0x2001, 0x7e35, 4)},	/* D-Link DWM-222 */
+ 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+ 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+ 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index c0e454bb6a8d..e0e23470a380 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -1040,6 +1040,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+ 		goto err_wmi_detach;
+ 	}
+ 
++	/* If firmware indicates Full Rx Reorder support it must be used in a
++	 * slightly different manner. Let HTT code know.
++	 */
++	ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
++						ar->wmi.svc_map));
++
+ 	status = ath10k_htt_rx_alloc(&ar->htt);
+ 	if (status) {
+ 		ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
+@@ -1104,12 +1110,6 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+ 		goto err_hif_stop;
+ 	}
+ 
+-	/* If firmware indicates Full Rx Reorder support it must be used in a
+-	 * slightly different manner. Let HTT code know.
+-	 */
+-	ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+-						ar->wmi.svc_map));
+-
+ 	status = ath10k_htt_rx_ring_refill(ar);
+ 	if (status) {
+ 		ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
+diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
+index 275408eaf95e..8a11dab8f4b3 100644
+--- a/drivers/net/wireless/p54/fwio.c
++++ b/drivers/net/wireless/p54/fwio.c
+@@ -489,7 +489,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
+ 
+ 			entry += sizeof(__le16);
+ 			chan->pa_points_per_curve = 8;
+-			memset(chan->curve_data, 0, sizeof(*chan->curve_data));
++			memset(chan->curve_data, 0, sizeof(chan->curve_data));
+ 			memcpy(chan->curve_data, entry,
+ 			       sizeof(struct p54_pa_curve_data_sample) *
+ 			       min((u8)8, curve_data->points_per_channel));
+diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
+index 5d54d16a59e7..040bf3c66958 100644
+--- a/drivers/net/wireless/ti/wl1251/main.c
++++ b/drivers/net/wireless/ti/wl1251/main.c
+@@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
+ 
+ 	wl->state = WL1251_STATE_OFF;
+ 	mutex_init(&wl->mutex);
++	spin_lock_init(&wl->wl_lock);
+ 
+ 	wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
+ 	wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index bf89754fe973..308a95ead432 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -413,7 +413,7 @@ static void __unflatten_device_tree(void *blob,
+ 	/* Allocate memory for the expanded device tree */
+ 	mem = dt_alloc(size + 4, __alignof__(struct device_node));
+ 	if (!mem)
+-		return NULL;
++		return;
+ 
+ 	memset(mem, 0, size);
+ 
+diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
+index 7b0ca1551d7b..005ea632ba53 100644
+--- a/drivers/parisc/dino.c
++++ b/drivers/parisc/dino.c
+@@ -954,7 +954,7 @@ static int __init dino_probe(struct parisc_device *dev)
+ 
+ 	dino_dev->hba.dev = dev;
+ 	dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
+-	dino_dev->hba.lmmio_space_offset = 0;	/* CPU addrs == bus addrs */
++	dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
+ 	spin_lock_init(&dino_dev->dinosaur_pen);
+ 	dino_dev->hba.iommu = ccio_get_iommu(dev);
+ 
+diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
+index 7d223e9080ef..77dddee2753a 100644
+--- a/drivers/pci/hotplug/shpchp_hpc.c
++++ b/drivers/pci/hotplug/shpchp_hpc.c
+@@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
+ 		if (rc) {
+ 			ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
+ 			ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
++		} else {
++			pci_set_master(pdev);
+ 		}
+ 
+ 		rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index 5d7fbe4e907e..296889dc193f 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -418,8 +418,8 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+ 	rec->scsi_retries = sc->retries;
+ 	rec->scsi_allowed = sc->allowed;
+ 	rec->scsi_id = sc->device->id;
+-	/* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
+ 	rec->scsi_lun = (u32)sc->device->lun;
++	rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
+ 	rec->host_scribble = (unsigned long)sc->host_scribble;
+ 
+ 	memcpy(rec->scsi_opcode, sc->cmnd,
+@@ -427,19 +427,32 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
+ 
+ 	if (fsf) {
+ 		rec->fsf_req_id = fsf->req_id;
++		rec->pl_len = FCP_RESP_WITH_EXT;
+ 		fcp_rsp = (struct fcp_resp_with_ext *)
+ 				&(fsf->qtcb->bottom.io.fcp_rsp);
++		/* mandatory parts of FCP_RSP IU in this SCSI record */
+ 		memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
+ 		if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
+ 			fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+ 			rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
++			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
+ 		}
+ 		if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
+-			rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
+-					  (u16)ZFCP_DBF_PAY_MAX_REC);
+-			zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
+-					  "fcp_sns", fsf->req_id);
++			rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
+ 		}
++		/* complete FCP_RSP IU in associated PAYload record
++		 * but only if there are optional parts
++		 */
++		if (fcp_rsp->resp.fr_flags != 0)
++			zfcp_dbf_pl_write(
++				dbf, fcp_rsp,
++				/* at least one full PAY record
++				 * but not beyond hardware response field
++				 */
++				min_t(u16, max_t(u16, rec->pl_len,
++						 ZFCP_DBF_PAY_MAX_REC),
++				      FSF_FCP_RSP_SIZE),
++				"fcp_riu", fsf->req_id);
+ 	}
+ 
+ 	debug_event(dbf->scsi, 1, rec, sizeof(*rec));
+diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
+index 0be3d48681ae..2039e7510a30 100644
+--- a/drivers/s390/scsi/zfcp_dbf.h
++++ b/drivers/s390/scsi/zfcp_dbf.h
+@@ -196,7 +196,7 @@ enum zfcp_dbf_scsi_id {
+  * @id: unique number of recovery record type
+  * @tag: identifier string specifying the location of initiation
+  * @scsi_id: scsi device id
+- * @scsi_lun: scsi device logical unit number
++ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
+  * @scsi_result: scsi result
+  * @scsi_retries: current retry number of scsi request
+  * @scsi_allowed: allowed retries
+@@ -206,6 +206,7 @@ enum zfcp_dbf_scsi_id {
+  * @host_scribble: LLD specific data attached to SCSI request
+  * @pl_len: length of paload stored as zfcp_dbf_pay
+  * @fsf_rsp: response for fsf request
++ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
+  */
+ struct zfcp_dbf_scsi {
+ 	u8 id;
+@@ -222,6 +223,7 @@ struct zfcp_dbf_scsi {
+ 	u64 host_scribble;
+ 	u16 pl_len;
+ 	struct fcp_resp_with_ext fcp_rsp;
++	u32 scsi_lun_64_hi;
+ } __packed;
+ 
+ /**
+@@ -291,7 +293,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+ {
+ 	struct fsf_qtcb *qtcb = req->qtcb;
+ 
+-	if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
++	if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
++				    ZFCP_STATUS_FSFREQ_ERROR))) {
++		zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
++
++	} else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ 	    (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+ 		zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
+ 
+diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
+index df2b541c8287..a2275825186f 100644
+--- a/drivers/s390/scsi/zfcp_fc.h
++++ b/drivers/s390/scsi/zfcp_fc.h
+@@ -4,7 +4,7 @@
+  * Fibre Channel related definitions and inline functions for the zfcp
+  * device driver
+  *
+- * Copyright IBM Corp. 2009
++ * Copyright IBM Corp. 2009, 2017
+  */
+ 
+ #ifndef ZFCP_FC_H
+@@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
+ 		     !(rsp_flags & FCP_SNS_LEN_VAL) &&
+ 		     fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ 			set_host_byte(scsi, DID_ERROR);
++	} else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
++		/* FCP_DL was not sufficient for SCSI data length */
++		if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
++			set_host_byte(scsi, DID_ERROR);
+ 	}
+ }
+ 
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 21ec5e2f584c..7d77c318cc16 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -2246,7 +2246,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
+ 	fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+ 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
+ 
+-	if (scsi_prot_sg_count(scsi_cmnd)) {
++	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
++	    scsi_prot_sg_count(scsi_cmnd)) {
+ 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+ 				       scsi_prot_sg_count(scsi_cmnd));
+ 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 75f4bfc2b98a..6de09147e791 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -224,8 +224,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+ 
+ 		zfcp_erp_wait(adapter);
+ 		ret = fc_block_scsi_eh(scpnt);
+-		if (ret)
++		if (ret) {
++			zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
+ 			return ret;
++		}
+ 
+ 		if (!(atomic_read(&adapter->status) &
+ 		      ZFCP_STATUS_COMMON_RUNNING)) {
+@@ -233,8 +235,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+ 			return SUCCESS;
+ 		}
+ 	}
+-	if (!fsf_req)
++	if (!fsf_req) {
++		zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
+ 		return FAILED;
++	}
+ 
+ 	wait_for_completion(&fsf_req->completion);
+ 
+diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
+index 1910100638a2..00602abec0ea 100644
+--- a/drivers/scsi/isci/remote_node_context.c
++++ b/drivers/scsi/isci/remote_node_context.c
+@@ -66,6 +66,9 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
+ {
+ 	static const char * const strings[] = RNC_STATES;
+ 
++	if (state >= ARRAY_SIZE(strings))
++		return "UNKNOWN";
++
+ 	return strings[state];
+ }
+ #undef C
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 0e5b3584e918..4da8963315c7 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -1068,7 +1068,10 @@ stop_rr_fcf_flogi:
+ 					lpfc_sli4_unreg_all_rpis(vport);
+ 				}
+ 			}
+-			lpfc_issue_reg_vfi(vport);
++
++			/* Do not register VFI if the driver aborted FLOGI */
++			if (!lpfc_error_lost_link(irsp))
++				lpfc_issue_reg_vfi(vport);
+ 			lpfc_nlp_put(ndlp);
+ 			goto out;
+ 		}
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index a991690167aa..b66a7a6a601d 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -1709,9 +1709,12 @@ void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
+ 			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
+ 				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+ 				if (cmd_mfi->sync_cmd &&
+-					cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
++				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
++					cmd_mfi->frame->hdr.cmd_status =
++							MFI_STAT_WRONG_STATE;
+ 					megasas_complete_cmd(instance,
+ 							     cmd_mfi, DID_OK);
++				}
+ 			}
+ 		}
+ 	} else {
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index c1b2e86839ae..e9cd3013dcd0 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -404,6 +404,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ 		return -EINVAL;
+ 	if (start > ha->optrom_size)
+ 		return -EINVAL;
++	if (size > ha->optrom_size - start)
++		size = ha->optrom_size - start;
+ 
+ 	mutex_lock(&ha->optrom_mutex);
+ 	switch (val) {
+@@ -429,8 +431,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ 		}
+ 
+ 		ha->optrom_region_start = start;
+-		ha->optrom_region_size = start + size > ha->optrom_size ?
+-		    ha->optrom_size - start : size;
++		ha->optrom_region_size = start + size;
+ 
+ 		ha->optrom_state = QLA_SREADING;
+ 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+@@ -503,8 +504,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ 		}
+ 
+ 		ha->optrom_region_start = start;
+-		ha->optrom_region_size = start + size > ha->optrom_size ?
+-		    ha->optrom_size - start : size;
++		ha->optrom_region_size = start + size;
+ 
+ 		ha->optrom_state = QLA_SWRITING;
+ 		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index c94191369452..fbdba7925723 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -133,7 +133,7 @@ struct sg_device;		/* forward declarations */
+ struct sg_fd;
+ 
+ typedef struct sg_request {	/* SG_MAX_QUEUE requests outstanding per file */
+-	struct sg_request *nextrp;	/* NULL -> tail request (slist) */
++	struct list_head entry;	/* list entry */
+ 	struct sg_fd *parentfp;	/* NULL -> not in use */
+ 	Sg_scatter_hold data;	/* hold buffer, perhaps scatter list */
+ 	sg_io_hdr_t header;	/* scsi command+info, see <scsi/sg.h> */
+@@ -153,11 +153,11 @@ typedef struct sg_fd {		/* holds the state of a file descriptor */
+ 	struct sg_device *parentdp;	/* owning device */
+ 	wait_queue_head_t read_wait;	/* queue read until command done */
+ 	rwlock_t rq_list_lock;	/* protect access to list in req_arr */
++	struct mutex f_mutex;	/* protect against changes in this fd */
+ 	int timeout;		/* defaults to SG_DEFAULT_TIMEOUT      */
+ 	int timeout_user;	/* defaults to SG_DEFAULT_TIMEOUT_USER */
+ 	Sg_scatter_hold reserve;	/* buffer held for this file descriptor */
+-	unsigned save_scat_len;	/* original length of trunc. scat. element */
+-	Sg_request *headrp;	/* head of request slist, NULL->empty */
++	struct list_head rq_list; /* head of request list */
+ 	struct fasync_struct *async_qp;	/* used by asynchronous notification */
+ 	Sg_request req_arr[SG_MAX_QUEUE];	/* used as singly-linked list */
+ 	char low_dma;		/* as in parent but possibly overridden to 1 */
+@@ -166,6 +166,7 @@ typedef struct sg_fd {		/* holds the state of a file descriptor */
+ 	unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
+ 	char keep_orphan;	/* 0 -> drop orphan (def), 1 -> keep for read() */
+ 	char mmap_called;	/* 0 -> mmap() never called on this fd */
++	char res_in_use;	/* 1 -> 'reserve' array in use */
+ 	struct kref f_ref;
+ 	struct execute_work ew;
+ } Sg_fd;
+@@ -209,7 +210,6 @@ static void sg_remove_sfp(struct kref *);
+ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+ static Sg_request *sg_add_request(Sg_fd * sfp);
+ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
+-static int sg_res_in_use(Sg_fd * sfp);
+ static Sg_device *sg_get_dev(int dev);
+ static void sg_device_destroy(struct kref *kref);
+ 
+@@ -625,6 +625,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+ 	}
+ 	buf += SZ_SG_HEADER;
+ 	__get_user(opcode, buf);
++	mutex_lock(&sfp->f_mutex);
+ 	if (sfp->next_cmd_len > 0) {
+ 		cmd_size = sfp->next_cmd_len;
+ 		sfp->next_cmd_len = 0;	/* reset so only this write() effected */
+@@ -633,6 +634,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+ 		if ((opcode >= 0xc0) && old_hdr.twelve_byte)
+ 			cmd_size = 12;
+ 	}
++	mutex_unlock(&sfp->f_mutex);
+ 	SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
+ 		"sg_write:   scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
+ /* Determine buffer size.  */
+@@ -732,7 +734,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
+ 			sg_remove_request(sfp, srp);
+ 			return -EINVAL;	/* either MMAP_IO or DIRECT_IO (not both) */
+ 		}
+-		if (sg_res_in_use(sfp)) {
++		if (sfp->res_in_use) {
+ 			sg_remove_request(sfp, srp);
+ 			return -EBUSY;	/* reserve buffer already being used */
+ 		}
+@@ -831,6 +833,39 @@ static int max_sectors_bytes(struct request_queue *q)
+ 	return max_sectors << 9;
+ }
+ 
++static void
++sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
++{
++	Sg_request *srp;
++	int val;
++	unsigned int ms;
++
++	val = 0;
++	list_for_each_entry(srp, &sfp->rq_list, entry) {
++		if (val > SG_MAX_QUEUE)
++			break;
++		rinfo[val].req_state = srp->done + 1;
++		rinfo[val].problem =
++			srp->header.masked_status &
++			srp->header.host_status &
++			srp->header.driver_status;
++		if (srp->done)
++			rinfo[val].duration =
++				srp->header.duration;
++		else {
++			ms = jiffies_to_msecs(jiffies);
++			rinfo[val].duration =
++				(ms > srp->header.duration) ?
++				(ms - srp->header.duration) : 0;
++		}
++		rinfo[val].orphan = srp->orphan;
++		rinfo[val].sg_io_owned = srp->sg_io_owned;
++		rinfo[val].pack_id = srp->header.pack_id;
++		rinfo[val].usr_ptr = srp->header.usr_ptr;
++		val++;
++	}
++}
++
+ static long
+ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ {
+@@ -896,7 +931,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ 			return result;
+ 		if (val) {
+ 			sfp->low_dma = 1;
+-			if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
++			if ((0 == sfp->low_dma) && !sfp->res_in_use) {
+ 				val = (int) sfp->reserve.bufflen;
+ 				sg_remove_scat(sfp, &sfp->reserve);
+ 				sg_build_reserve(sfp, val);
+@@ -942,7 +977,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ 		if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
+ 			return -EFAULT;
+ 		read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-		for (srp = sfp->headrp; srp; srp = srp->nextrp) {
++		list_for_each_entry(srp, &sfp->rq_list, entry) {
+ 			if ((1 == srp->done) && (!srp->sg_io_owned)) {
+ 				read_unlock_irqrestore(&sfp->rq_list_lock,
+ 						       iflags);
+@@ -955,7 +990,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ 		return 0;
+ 	case SG_GET_NUM_WAITING:
+ 		read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-		for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
++		val = 0;
++		list_for_each_entry(srp, &sfp->rq_list, entry) {
+ 			if ((1 == srp->done) && (!srp->sg_io_owned))
+ 				++val;
+ 		}
+@@ -971,12 +1007,18 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+                         return -EINVAL;
+ 		val = min_t(int, val,
+ 			    max_sectors_bytes(sdp->device->request_queue));
++		mutex_lock(&sfp->f_mutex);
+ 		if (val != sfp->reserve.bufflen) {
+-			if (sg_res_in_use(sfp) || sfp->mmap_called)
++			if (sfp->mmap_called ||
++			    sfp->res_in_use) {
++				mutex_unlock(&sfp->f_mutex);
+ 				return -EBUSY;
++			}
++
+ 			sg_remove_scat(sfp, &sfp->reserve);
+ 			sg_build_reserve(sfp, val);
+ 		}
++		mutex_unlock(&sfp->f_mutex);
+ 		return 0;
+ 	case SG_GET_RESERVED_SIZE:
+ 		val = min_t(int, sfp->reserve.bufflen,
+@@ -1017,42 +1059,15 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ 			return -EFAULT;
+ 		else {
+ 			sg_req_info_t *rinfo;
+-			unsigned int ms;
+ 
+-			rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+-								GFP_KERNEL);
++			rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
++					GFP_KERNEL);
+ 			if (!rinfo)
+ 				return -ENOMEM;
+ 			read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-			for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
+-			     ++val, srp = srp ? srp->nextrp : srp) {
+-				memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
+-				if (srp) {
+-					rinfo[val].req_state = srp->done + 1;
+-					rinfo[val].problem =
+-					    srp->header.masked_status & 
+-					    srp->header.host_status & 
+-					    srp->header.driver_status;
+-					if (srp->done)
+-						rinfo[val].duration =
+-							srp->header.duration;
+-					else {
+-						ms = jiffies_to_msecs(jiffies);
+-						rinfo[val].duration =
+-						    (ms > srp->header.duration) ?
+-						    (ms - srp->header.duration) : 0;
+-					}
+-					rinfo[val].orphan = srp->orphan;
+-					rinfo[val].sg_io_owned =
+-							srp->sg_io_owned;
+-					rinfo[val].pack_id =
+-							srp->header.pack_id;
+-					rinfo[val].usr_ptr =
+-							srp->header.usr_ptr;
+-				}
+-			}
++			sg_fill_request_table(sfp, rinfo);
+ 			read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-			result = __copy_to_user(p, rinfo, 
++			result = __copy_to_user(p, rinfo,
+ 						SZ_SG_REQ_INFO * SG_MAX_QUEUE);
+ 			result = result ? -EFAULT : 0;
+ 			kfree(rinfo);
+@@ -1158,7 +1173,7 @@ sg_poll(struct file *filp, poll_table * wait)
+ 		return POLLERR;
+ 	poll_wait(filp, &sfp->read_wait, wait);
+ 	read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-	for (srp = sfp->headrp; srp; srp = srp->nextrp) {
++	list_for_each_entry(srp, &sfp->rq_list, entry) {
+ 		/* if any read waiting, flag it */
+ 		if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
+ 			res = POLLIN | POLLRDNORM;
+@@ -1239,6 +1254,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	unsigned long req_sz, len, sa;
+ 	Sg_scatter_hold *rsv_schp;
+ 	int k, length;
++	int ret = 0;
+ 
+ 	if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
+ 		return -ENXIO;
+@@ -1249,8 +1265,11 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	if (vma->vm_pgoff)
+ 		return -EINVAL;	/* want no offset */
+ 	rsv_schp = &sfp->reserve;
+-	if (req_sz > rsv_schp->bufflen)
+-		return -ENOMEM;	/* cannot map more than reserved buffer */
++	mutex_lock(&sfp->f_mutex);
++	if (req_sz > rsv_schp->bufflen) {
++		ret = -ENOMEM;	/* cannot map more than reserved buffer */
++		goto out;
++	}
+ 
+ 	sa = vma->vm_start;
+ 	length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
+@@ -1264,7 +1283,9 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ 	vma->vm_private_data = sfp;
+ 	vma->vm_ops = &sg_mmap_vm_ops;
+-	return 0;
++out:
++	mutex_unlock(&sfp->f_mutex);
++	return ret;
+ }
+ 
+ static void
+@@ -1731,13 +1752,25 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
+ 		md = &map_data;
+ 
+ 	if (md) {
+-		if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
++		mutex_lock(&sfp->f_mutex);
++		if (dxfer_len <= rsv_schp->bufflen &&
++		    !sfp->res_in_use) {
++			sfp->res_in_use = 1;
+ 			sg_link_reserve(sfp, srp, dxfer_len);
+-		else {
++		} else if (hp->flags & SG_FLAG_MMAP_IO) {
++			res = -EBUSY; /* sfp->res_in_use == 1 */
++			if (dxfer_len > rsv_schp->bufflen)
++				res = -ENOMEM;
++			mutex_unlock(&sfp->f_mutex);
++			return res;
++		} else {
+ 			res = sg_build_indirect(req_schp, sfp, dxfer_len);
+-			if (res)
++			if (res) {
++				mutex_unlock(&sfp->f_mutex);
+ 				return res;
++			}
+ 		}
++		mutex_unlock(&sfp->f_mutex);
+ 
+ 		md->pages = req_schp->pages;
+ 		md->page_order = req_schp->page_order;
+@@ -2026,8 +2059,9 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
+ 	req_schp->pages = NULL;
+ 	req_schp->page_order = 0;
+ 	req_schp->sglist_len = 0;
+-	sfp->save_scat_len = 0;
+ 	srp->res_used = 0;
++	/* Called without mutex lock to avoid deadlock */
++	sfp->res_in_use = 0;
+ }
+ 
+ static Sg_request *
+@@ -2037,7 +2071,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+ 	unsigned long iflags;
+ 
+ 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+-	for (resp = sfp->headrp; resp; resp = resp->nextrp) {
++	list_for_each_entry(resp, &sfp->rq_list, entry) {
+ 		/* look for requests that are ready + not SG_IO owned */
+ 		if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ 		    ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
+@@ -2055,70 +2089,45 @@ sg_add_request(Sg_fd * sfp)
+ {
+ 	int k;
+ 	unsigned long iflags;
+-	Sg_request *resp;
+ 	Sg_request *rp = sfp->req_arr;
+ 
+ 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+-	resp = sfp->headrp;
+-	if (!resp) {
+-		memset(rp, 0, sizeof (Sg_request));
+-		rp->parentfp = sfp;
+-		resp = rp;
+-		sfp->headrp = resp;
+-	} else {
+-		if (0 == sfp->cmd_q)
+-			resp = NULL;	/* command queuing disallowed */
+-		else {
+-			for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
+-				if (!rp->parentfp)
+-					break;
+-			}
+-			if (k < SG_MAX_QUEUE) {
+-				memset(rp, 0, sizeof (Sg_request));
+-				rp->parentfp = sfp;
+-				while (resp->nextrp)
+-					resp = resp->nextrp;
+-				resp->nextrp = rp;
+-				resp = rp;
+-			} else
+-				resp = NULL;
++	if (!list_empty(&sfp->rq_list)) {
++		if (!sfp->cmd_q)
++			goto out_unlock;
++
++		for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
++			if (!rp->parentfp)
++				break;
+ 		}
++		if (k >= SG_MAX_QUEUE)
++			goto out_unlock;
+ 	}
+-	if (resp) {
+-		resp->nextrp = NULL;
+-		resp->header.duration = jiffies_to_msecs(jiffies);
+-	}
++	memset(rp, 0, sizeof (Sg_request));
++	rp->parentfp = sfp;
++	rp->header.duration = jiffies_to_msecs(jiffies);
++	list_add_tail(&rp->entry, &sfp->rq_list);
+ 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-	return resp;
++	return rp;
++out_unlock:
++	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
++	return NULL;
+ }
+ 
+ /* Return of 1 for found; 0 for not found */
+ static int
+ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
+ {
+-	Sg_request *prev_rp;
+-	Sg_request *rp;
+ 	unsigned long iflags;
+ 	int res = 0;
+ 
+-	if ((!sfp) || (!srp) || (!sfp->headrp))
++	if (!sfp || !srp || list_empty(&sfp->rq_list))
+ 		return res;
+ 	write_lock_irqsave(&sfp->rq_list_lock, iflags);
+-	prev_rp = sfp->headrp;
+-	if (srp == prev_rp) {
+-		sfp->headrp = prev_rp->nextrp;
+-		prev_rp->parentfp = NULL;
++	if (!list_empty(&srp->entry)) {
++		list_del(&srp->entry);
++		srp->parentfp = NULL;
+ 		res = 1;
+-	} else {
+-		while ((rp = prev_rp->nextrp)) {
+-			if (srp == rp) {
+-				prev_rp->nextrp = rp->nextrp;
+-				rp->parentfp = NULL;
+-				res = 1;
+-				break;
+-			}
+-			prev_rp = rp;
+-		}
+ 	}
+ 	write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ 	return res;
+@@ -2137,8 +2146,9 @@ sg_add_sfp(Sg_device * sdp)
+ 
+ 	init_waitqueue_head(&sfp->read_wait);
+ 	rwlock_init(&sfp->rq_list_lock);
+-
++	INIT_LIST_HEAD(&sfp->rq_list);
+ 	kref_init(&sfp->f_ref);
++	mutex_init(&sfp->f_mutex);
+ 	sfp->timeout = SG_DEFAULT_TIMEOUT;
+ 	sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
+ 	sfp->force_packid = SG_DEF_FORCE_PACK_ID;
+@@ -2177,10 +2187,13 @@ sg_remove_sfp_usercontext(struct work_struct *work)
+ {
+ 	struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
+ 	struct sg_device *sdp = sfp->parentdp;
++	Sg_request *srp;
+ 
+ 	/* Cleanup any responses which were never read(). */
+-	while (sfp->headrp)
+-		sg_finish_rem_req(sfp->headrp);
++	while (!list_empty(&sfp->rq_list)) {
++		srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
++		sg_finish_rem_req(srp);
++	}
+ 
+ 	if (sfp->reserve.bufflen > 0) {
+ 		SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
+@@ -2214,20 +2227,6 @@ sg_remove_sfp(struct kref *kref)
+ 	schedule_work(&sfp->ew.work);
+ }
+ 
+-static int
+-sg_res_in_use(Sg_fd * sfp)
+-{
+-	const Sg_request *srp;
+-	unsigned long iflags;
+-
+-	read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-	for (srp = sfp->headrp; srp; srp = srp->nextrp)
+-		if (srp->res_used)
+-			break;
+-	read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-	return srp ? 1 : 0;
+-}
+-
+ #ifdef CONFIG_SCSI_PROC_FS
+ static int
+ sg_idr_max_id(int id, void *p, void *data)
+@@ -2597,7 +2596,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
+ /* must be called while holding sg_index_lock */
+ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
+ {
+-	int k, m, new_interface, blen, usg;
++	int k, new_interface, blen, usg;
+ 	Sg_request *srp;
+ 	Sg_fd *fp;
+ 	const sg_io_hdr_t *hp;
+@@ -2617,13 +2616,11 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
+ 		seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
+ 			   (int) fp->cmd_q, (int) fp->force_packid,
+ 			   (int) fp->keep_orphan);
+-		for (m = 0, srp = fp->headrp;
+-				srp != NULL;
+-				++m, srp = srp->nextrp) {
++		list_for_each_entry(srp, &fp->rq_list, entry) {
+ 			hp = &srp->header;
+ 			new_interface = (hp->interface_id == '\0') ? 0 : 1;
+ 			if (srp->res_used) {
+-				if (new_interface && 
++				if (new_interface &&
+ 				    (SG_FLAG_MMAP_IO & hp->flags))
+ 					cp = "     mmap>> ";
+ 				else
+@@ -2654,7 +2651,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
+ 			seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
+ 				   (int) srp->data.cmd_opcode);
+ 		}
+-		if (0 == m)
++		if (list_empty(&fp->rq_list))
+ 			seq_puts(s, "     No requests active\n");
+ 		read_unlock(&fp->rq_list_lock);
+ 	}
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index 6c52d1411a73..51a0cc047b5f 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1699,6 +1699,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ 	ret = storvsc_do_io(dev, cmd_request);
+ 
+ 	if (ret == -EAGAIN) {
++		if (payload_sz > sizeof(cmd_request->mpb))
++			kfree(payload);
+ 		/* no more space */
+ 
+ 		if (cmd_request->bounce_sgl_count)
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index 4273e34ff3ea..9af6ce2b6782 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -50,6 +50,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
+ 	{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+ 	{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
+ 	{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
++	{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
+ 	{}	/* Terminating entry */
+ };
+ 
+diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
+index 8a5d6a8e780f..ba32ac8d1747 100644
+--- a/drivers/staging/rts5208/rtsx_scsi.c
++++ b/drivers/staging/rts5208/rtsx_scsi.c
+@@ -414,7 +414,7 @@ void set_sense_data(struct rtsx_chip *chip, unsigned int lun, u8 err_code,
+ 	sense->ascq = ascq;
+ 	if (sns_key_info0 != 0) {
+ 		sense->sns_key_info[0] = SKSV | sns_key_info0;
+-		sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
++		sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
+ 		sense->sns_key_info[2] = sns_key_info1 & 0x0f;
+ 	}
+ }
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index aa9fad4f35b9..25c15910af77 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -355,6 +355,32 @@ int tty_insert_flip_string_flags(struct tty_port *port,
+ }
+ EXPORT_SYMBOL(tty_insert_flip_string_flags);
+ 
++/**
++ *	__tty_insert_flip_char   -	Add one character to the tty buffer
++ *	@port: tty port
++ *	@ch: character
++ *	@flag: flag byte
++ *
++ *	Queue a single byte to the tty buffering, with an optional flag.
++ *	This is the slow path of tty_insert_flip_char.
++ */
++int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
++{
++	struct tty_buffer *tb;
++	int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
++
++	if (!__tty_buffer_request_room(port, 1, flags))
++		return 0;
++
++	tb = port->buf.tail;
++	if (~tb->flags & TTYB_NORMAL)
++		*flag_buf_ptr(tb, tb->used) = flag;
++	*char_buf_ptr(tb, tb->used++) = ch;
++
++	return 1;
++}
++EXPORT_SYMBOL(__tty_insert_flip_char);
++
+ /**
+  *	tty_schedule_flip	-	push characters to ldisc
+  *	@port: tty port to push from
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index eb8fdc75843b..a235e9ab932c 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -519,6 +519,8 @@ static void async_completed(struct urb *urb)
+ 	if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
+ 			as->status != -ENOENT)
+ 		cancel_bulk_urbs(ps, as->bulk_addr);
++
++	wake_up(&ps->wait);
+ 	spin_unlock(&ps->lock);
+ 
+ 	if (signr) {
+@@ -526,8 +528,6 @@ static void async_completed(struct urb *urb)
+ 		put_pid(pid);
+ 		put_cred(cred);
+ 	}
+-
+-	wake_up(&ps->wait);
+ }
+ 
+ static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 574da2b4529c..82806e311202 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -57,8 +57,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Microsoft LifeCam-VX700 v2.0 */
+ 	{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+-	/* Logitech HD Pro Webcams C920 and C930e */
++	/* Logitech HD Pro Webcams C920, C920-C and C930e */
+ 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
++	{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
+ 	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
+ 	/* Logitech ConferenceCam CC3000e */
+@@ -217,6 +218,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ 			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ 
++	/* Corsair Strafe RGB */
++	{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Acer C120 LED Projector */
+ 	{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
+ 
+diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
+index 2776cfe64c09..ef9cf4a21afe 100644
+--- a/drivers/usb/core/usb-acpi.c
++++ b/drivers/usb/core/usb-acpi.c
+@@ -127,6 +127,22 @@ out:
+  */
+ #define USB_ACPI_LOCATION_VALID (1 << 31)
+ 
++static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
++					      int raw)
++{
++	struct acpi_device *adev;
++
++	if (!parent)
++		return NULL;
++
++	list_for_each_entry(adev, &parent->children, node) {
++		if (acpi_device_adr(adev) == raw)
++			return adev;
++	}
++
++	return acpi_find_child_device(parent, raw, false);
++}
++
+ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+ {
+ 	struct usb_device *udev;
+@@ -174,8 +190,10 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+ 			int raw;
+ 
+ 			raw = usb_hcd_find_raw_port_number(hcd, port1);
+-			adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
+-					raw, false);
++
++			adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
++						  raw);
++
+ 			if (!adev)
+ 				return NULL;
+ 		} else {
+@@ -186,7 +204,9 @@ static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+ 				return NULL;
+ 
+ 			acpi_bus_get_device(parent_handle, &adev);
+-			adev = acpi_find_child_device(adev, port1, false);
++
++			adev = usb_acpi_find_port(adev, port1);
++
+ 			if (!adev)
+ 				return NULL;
+ 		}
+diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
+index 03b9a372636f..1fc6f478a02c 100644
+--- a/drivers/usb/host/pci-quirks.c
++++ b/drivers/usb/host/pci-quirks.c
+@@ -133,29 +133,30 @@ static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo)
+ 			pinfo->sb_type.gen = AMD_CHIPSET_SB700;
+ 		else if (rev >= 0x40 && rev <= 0x4f)
+ 			pinfo->sb_type.gen = AMD_CHIPSET_SB800;
+-	}
+-	pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+-					  0x145c, NULL);
+-	if (pinfo->smbus_dev) {
+-		pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+ 	} else {
+ 		pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 				PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
+ 
+-		if (!pinfo->smbus_dev) {
+-			pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+-			return 0;
++		if (pinfo->smbus_dev) {
++			rev = pinfo->smbus_dev->revision;
++			if (rev >= 0x11 && rev <= 0x14)
++				pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
++			else if (rev >= 0x15 && rev <= 0x18)
++				pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
++			else if (rev >= 0x39 && rev <= 0x3a)
++				pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
++		} else {
++			pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
++							  0x145c, NULL);
++			if (pinfo->smbus_dev) {
++				rev = pinfo->smbus_dev->revision;
++				pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
++			} else {
++				pinfo->sb_type.gen = NOT_AMD_CHIPSET;
++				return 0;
++			}
+ 		}
+-
+-		rev = pinfo->smbus_dev->revision;
+-		if (rev >= 0x11 && rev <= 0x14)
+-			pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+-		else if (rev >= 0x15 && rev <= 0x18)
+-			pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+-		else if (rev >= 0x39 && rev <= 0x3a)
+-			pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+ 	}
+-
+ 	pinfo->sb_type.rev = rev;
+ 	return 1;
+ }
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index f08b35819666..a0fbc4e5a272 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2020,6 +2020,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
++	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) },			/* D-Link DWM-157 C1 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),			/* D-Link DWM-222 */
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index a40b454aea44..4f6a3afc45f4 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -1593,6 +1593,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
+ 			goto restore;
+ 		}
+ 
++		btrfs_qgroup_rescan_resume(fs_info);
++
+ 		if (!fs_info->uuid_root) {
+ 			btrfs_info(fs_info, "creating UUID tree");
+ 			ret = btrfs_create_uuid_tree(fs_info);
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 26a3b389a265..297e05c9e2b0 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -183,15 +183,20 @@ cifs_bp_rename_retry:
+ }
+ 
+ /*
++ * Don't allow path components longer than the server max.
+  * Don't allow the separator character in a path component.
+  * The VFS will not allow "/", but "\" is allowed by posix.
+  */
+ static int
+-check_name(struct dentry *direntry)
++check_name(struct dentry *direntry, struct cifs_tcon *tcon)
+ {
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ 	int i;
+ 
++	if (unlikely(direntry->d_name.len >
++		     le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
++		return -ENAMETOOLONG;
++
+ 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
+ 		for (i = 0; i < direntry->d_name.len; i++) {
+ 			if (direntry->d_name.name[i] == '\\') {
+@@ -489,10 +494,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
+ 		return finish_no_open(file, res);
+ 	}
+ 
+-	rc = check_name(direntry);
+-	if (rc)
+-		return rc;
+-
+ 	xid = get_xid();
+ 
+ 	cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+@@ -505,6 +506,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
+ 	}
+ 
+ 	tcon = tlink_tcon(tlink);
++
++	rc = check_name(direntry, tcon);
++	if (rc)
++		goto out_free_xid;
++
+ 	server = tcon->ses->server;
+ 
+ 	if (server->ops->new_lease_key)
+@@ -765,7 +771,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
+ 	}
+ 	pTcon = tlink_tcon(tlink);
+ 
+-	rc = check_name(direntry);
++	rc = check_name(direntry, pTcon);
+ 	if (rc)
+ 		goto lookup_out;
+ 
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index f8ae041d60fe..2f6f164c83ab 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2554,8 +2554,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
+ 	kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
+ 			  le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
+ 	kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
+-	kst->f_bfree  = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
+-	kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
++	kst->f_bfree  = kst->f_bavail =
++			le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
+ 	return;
+ }
+ 
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index 70867d54fb8b..31acb20d0b6e 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -82,8 +82,8 @@
+ 
+ #define NUMBER_OF_SMB2_COMMANDS	0x0013
+ 
+-/* BB FIXME - analyze following length BB */
+-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
++/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
++#define MAX_SMB2_HDR_SIZE 0x00b0
+ 
+ #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
+ 
+diff --git a/fs/dlm/user.c b/fs/dlm/user.c
+index fb85f32e9eca..0221731a9462 100644
+--- a/fs/dlm/user.c
++++ b/fs/dlm/user.c
+@@ -355,6 +355,10 @@ static int dlm_device_register(struct dlm_ls *ls, char *name)
+ 	error = misc_register(&ls->ls_device);
+ 	if (error) {
+ 		kfree(ls->ls_device.name);
++		/* this has to be set to NULL
++		 * to avoid a double-free in dlm_device_deregister
++		 */
++		ls->ls_device.name = NULL;
+ 	}
+ fail:
+ 	return error;
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index 1e009cad8d5c..1b08556776ce 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -518,8 +518,13 @@ static void ep_remove_wait_queue(struct eppoll_entry *pwq)
+ 	wait_queue_head_t *whead;
+ 
+ 	rcu_read_lock();
+-	/* If it is cleared by POLLFREE, it should be rcu-safe */
+-	whead = rcu_dereference(pwq->whead);
++	/*
++	 * If it is cleared by POLLFREE, it should be rcu-safe.
++	 * If we read NULL we need a barrier paired with
++	 * smp_store_release() in ep_poll_callback(), otherwise
++	 * we rely on whead->lock.
++	 */
++	whead = smp_load_acquire(&pwq->whead);
+ 	if (whead)
+ 		remove_wait_queue(whead, &pwq->wait);
+ 	rcu_read_unlock();
+@@ -1003,17 +1008,6 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
+ 	struct epitem *epi = ep_item_from_wait(wait);
+ 	struct eventpoll *ep = epi->ep;
+ 
+-	if ((unsigned long)key & POLLFREE) {
+-		ep_pwq_from_wait(wait)->whead = NULL;
+-		/*
+-		 * whead = NULL above can race with ep_remove_wait_queue()
+-		 * which can do another remove_wait_queue() after us, so we
+-		 * can't use __remove_wait_queue(). whead->lock is held by
+-		 * the caller.
+-		 */
+-		list_del_init(&wait->task_list);
+-	}
+-
+ 	spin_lock_irqsave(&ep->lock, flags);
+ 
+ 	/*
+@@ -1078,6 +1072,23 @@ out_unlock:
+ 	if (pwake)
+ 		ep_poll_safewake(&ep->poll_wait);
+ 
++
++	if ((unsigned long)key & POLLFREE) {
++		/*
++		 * If we race with ep_remove_wait_queue() it can miss
++		 * ->whead = NULL and do another remove_wait_queue() after
++		 * us, so we can't use __remove_wait_queue().
++		 */
++		list_del_init(&wait->task_list);
++		/*
++		 * ->whead != NULL protects us from the race with ep_free()
++		 * or ep_remove(), ep_remove_wait_queue() takes whead->lock
++		 * held by the caller. Once we nullify it, nothing protects
++		 * ep/epi or even wait.
++		 */
++		smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
++	}
++
+ 	return 1;
+ }
+ 
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 97aa8be40175..ccc43e2f07e2 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2233,7 +2233,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ #ifdef CONFIG_QUOTA
+ 	/* Needed for iput() to work correctly and not trash data */
+ 	sb->s_flags |= MS_ACTIVE;
+-	/* Turn on quotas so that they are updated correctly */
++	/* Turn on journaled quotas so that they are updated correctly */
+ 	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+ 		if (EXT4_SB(sb)->s_qf_names[i]) {
+ 			int ret = ext4_quota_on_mount(sb, i);
+@@ -2299,9 +2299,9 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ 		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
+ 		       PLURAL(nr_truncates));
+ #ifdef CONFIG_QUOTA
+-	/* Turn quotas off */
++	/* Turn off journaled quotas if they were enabled for orphan cleanup */
+ 	for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+-		if (sb_dqopt(sb)->files[i])
++		if (EXT4_SB(sb)->s_qf_names[i] && sb_dqopt(sb)->files[i])
+ 			dquot_quota_off(sb, i);
+ 	}
+ #endif
+diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
+index 8d8ea99f2156..e195cc5e3590 100644
+--- a/fs/f2fs/recovery.c
++++ b/fs/f2fs/recovery.c
+@@ -265,7 +265,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
+ 		return 0;
+ 
+ 	/* Get the previous summary */
+-	for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
++	for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
+ 		struct curseg_info *curseg = CURSEG_I(sbi, i);
+ 		if (curseg->segno == segno) {
+ 			sum = curseg->sum_blk->entries[blkoff];
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 16fcfdd6011c..280cd3d9151f 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -128,7 +128,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp)
+ 	argp->p = page_address(argp->pagelist[0]);
+ 	argp->pagelist++;
+ 	if (argp->pagelen < PAGE_SIZE) {
+-		argp->end = argp->p + (argp->pagelen>>2);
++		argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
+ 		argp->pagelen = 0;
+ 	} else {
+ 		argp->end = argp->p + (PAGE_SIZE>>2);
+@@ -1245,9 +1245,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
+ 		argp->pagelen -= pages * PAGE_SIZE;
+ 		len -= pages * PAGE_SIZE;
+ 
+-		argp->p = (__be32 *)page_address(argp->pagelist[0]);
+-		argp->pagelist++;
+-		argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
++		next_decode_page(argp);
+ 	}
+ 	argp->p += XDR_QUADLEN(len);
+ 
+diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
+index 7c7842c85a08..530c2f9c47c7 100644
+--- a/fs/xfs/xfs_linux.h
++++ b/fs/xfs/xfs_linux.h
+@@ -376,7 +376,14 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
+ #endif /* DEBUG */
+ 
+ #ifdef CONFIG_XFS_RT
+-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
++
++/*
++ * make sure we ignore the inode flag if the filesystem doesn't have a
++ * configured realtime device.
++ */
++#define XFS_IS_REALTIME_INODE(ip)			\
++	(((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) &&	\
++	 (ip)->i_mount->m_rtdev_targp)
+ #else
+ #define XFS_IS_REALTIME_INODE(ip) (0)
+ #endif
+diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
+index fc824e2828f3..5d2add1a6c96 100644
+--- a/include/asm-generic/topology.h
++++ b/include/asm-generic/topology.h
+@@ -48,7 +48,11 @@
+ #define parent_node(node)	((void)(node),0)
+ #endif
+ #ifndef cpumask_of_node
+-#define cpumask_of_node(node)	((void)node, cpu_online_mask)
++  #ifdef CONFIG_NEED_MULTIPLE_NODES
++    #define cpumask_of_node(node)	((node) == 0 ? cpu_online_mask : cpu_none_mask)
++  #else
++    #define cpumask_of_node(node)	((void)node, cpu_online_mask)
++  #endif
+ #endif
+ #ifndef pcibus_to_node
+ #define pcibus_to_node(bus)	((void)(bus), -1)
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 9b6f5dc58732..d57b902407dd 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -573,6 +573,7 @@
+ #define PCI_DEVICE_ID_AMD_CS5536_EHC    0x2095
+ #define PCI_DEVICE_ID_AMD_CS5536_UDC    0x2096
+ #define PCI_DEVICE_ID_AMD_CS5536_UOC    0x2097
++#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE    0x2092
+ #define PCI_DEVICE_ID_AMD_CS5536_IDE    0x209A
+ #define PCI_DEVICE_ID_AMD_LX_VIDEO  0x2081
+ #define PCI_DEVICE_ID_AMD_LX_AES    0x2082
+diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
+index c28dd523f96e..d43837f2ce3a 100644
+--- a/include/linux/tty_flip.h
++++ b/include/linux/tty_flip.h
+@@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port,
+ 		unsigned char **chars, size_t size);
+ extern void tty_flip_buffer_push(struct tty_port *port);
+ void tty_schedule_flip(struct tty_port *port);
++int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
+ 
+ static inline int tty_insert_flip_char(struct tty_port *port,
+ 					unsigned char ch, char flag)
+@@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port,
+ 		*char_buf_ptr(tb, tb->used++) = ch;
+ 		return 1;
+ 	}
+-	return tty_insert_flip_string_flags(port, &ch, &flag, 1);
++	return __tty_insert_flip_char(port, ch, flag);
+ }
+ 
+ static inline int tty_insert_flip_string(struct tty_port *port,
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 530bdca19803..35fdedac3e25 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -701,8 +701,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
+ 	old = *pold;
+ 	*pold = new;
+ 	if (old != NULL) {
+-		qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
++		unsigned int qlen = old->q.qlen;
++		unsigned int backlog = old->qstats.backlog;
++
+ 		qdisc_reset(old);
++		qdisc_tree_reduce_backlog(old, qlen, backlog);
+ 	}
+ 	sch_tree_unlock(sch);
+ 
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index 6e30024d9aac..d53c6e284e87 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -455,13 +455,15 @@ void audit_remove_watch_rule(struct audit_krule *krule)
+ 	list_del(&krule->rlist);
+ 
+ 	if (list_empty(&watch->rules)) {
++		/*
++		 * audit_remove_watch() drops our reference to 'parent' which
++		 * can get freed. Grab our own reference to be safe.
++		 */
++		audit_get_parent(parent);
+ 		audit_remove_watch(watch);
+-
+-		if (list_empty(&parent->watches)) {
+-			audit_get_parent(parent);
++		if (list_empty(&parent->watches))
+ 			fsnotify_destroy_mark(&parent->mark, audit_watch_group);
+-			audit_put_parent(parent);
+-		}
++		audit_put_parent(parent);
+ 	}
+ }
+ 
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e871080bc44e..e5553bdaf6c2 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -8102,28 +8102,27 @@ SYSCALL_DEFINE5(perf_event_open,
+ 			goto err_context;
+ 
+ 		/*
+-		 * Do not allow to attach to a group in a different
+-		 * task or CPU context:
++		 * Make sure we're both events for the same CPU;
++		 * grouping events for different CPUs is broken; since
++		 * you can never concurrently schedule them anyhow.
+ 		 */
+-		if (move_group) {
+-			/*
+-			 * Make sure we're both on the same task, or both
+-			 * per-cpu events.
+-			 */
+-			if (group_leader->ctx->task != ctx->task)
+-				goto err_context;
++		if (group_leader->cpu != event->cpu)
++			goto err_context;
+ 
+-			/*
+-			 * Make sure we're both events for the same CPU;
+-			 * grouping events for different CPUs is broken; since
+-			 * you can never concurrently schedule them anyhow.
+-			 */
+-			if (group_leader->cpu != event->cpu)
+-				goto err_context;
+-		} else {
+-			if (group_leader->ctx != ctx)
+-				goto err_context;
+-		}
++		/*
++		 * Make sure we're both on the same task, or both
++		 * per-CPU events.
++		 */
++		if (group_leader->ctx->task != ctx->task)
++			goto err_context;
++
++		/*
++		 * Do not allow to attach to a group in a different task
++		 * or CPU context. If we're moving SW events, we'll fix
++		 * this up later, so allow that.
++		 */
++		if (!move_group && group_leader->ctx != ctx)
++			goto err_context;
+ 
+ 		/*
+ 		 * Only a group leader can be exclusive or pinned
+diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
+index ec8cce259779..a25e3a11f1b3 100644
+--- a/kernel/locking/locktorture.c
++++ b/kernel/locking/locktorture.c
+@@ -630,6 +630,8 @@ static void lock_torture_cleanup(void)
+ 	else
+ 		lock_torture_print_module_parms(cxt.cur_ops,
+ 						"End of test: SUCCESS");
++	kfree(cxt.lwsa);
++	kfree(cxt.lrsa);
+ 	torture_cleanup_end();
+ }
+ 
+@@ -763,6 +765,8 @@ static int __init lock_torture_init(void)
+ 				       GFP_KERNEL);
+ 		if (reader_tasks == NULL) {
+ 			VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
++			kfree(writer_tasks);
++			writer_tasks = NULL;
+ 			firsterr = -ENOMEM;
+ 			goto unwind;
+ 		}
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index eb11011b5292..06d0e5712e86 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2657,13 +2657,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ 
+ 	if (!command || !ftrace_enabled) {
+ 		/*
+-		 * If these are control ops, they still need their
+-		 * per_cpu field freed. Since, function tracing is
++		 * If these are dynamic or control ops, they still
++		 * need their data freed. Since, function tracing is
+ 		 * not currently active, we can just free them
+ 		 * without synchronizing all CPUs.
+ 		 */
+-		if (ops->flags & FTRACE_OPS_FL_CONTROL)
+-			control_ops_free(ops);
++		if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL))
++			goto free_ops;
++
+ 		return 0;
+ 	}
+ 
+@@ -2718,6 +2719,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
+ 		schedule_on_each_cpu(ftrace_sync);
+ 
++ free_ops:
+ 		arch_ftrace_trampoline_free(ops);
+ 
+ 		if (ops->flags & FTRACE_OPS_FL_CONTROL)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 591b3b4f5337..17213d74540b 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5204,7 +5204,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
+ 	tracing_reset_online_cpus(&tr->trace_buffer);
+ 
+ #ifdef CONFIG_TRACER_MAX_TRACE
+-	if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
++	if (tr->max_buffer.buffer)
+ 		ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
+ 	tracing_reset_online_cpus(&tr->max_buffer);
+ #endif
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 52adf02d7619..f186066f8b87 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1928,6 +1928,10 @@ static int create_filter(struct ftrace_event_call *call,
+ 		if (err && set_str)
+ 			append_filter_err(ps, filter);
+ 	}
++	if (err && !set_str) {
++		free_event_filter(filter);
++		filter = NULL;
++	}
+ 	create_filter_finish(ps);
+ 
+ 	*filterp = filter;
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index b0f86ea77881..ca70d11b8aa7 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -272,7 +272,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
+ 		goto out_free;
+ 	if (cnt > 1) {
+ 		if (trace_selftest_test_global_cnt == 0)
+-			goto out;
++			goto out_free;
+ 	}
+ 	if (trace_selftest_test_dyn_cnt == 0)
+ 		goto out_free;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index ea06282f8a3e..dacd2e9a5b68 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -897,11 +897,6 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
+ 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
+ 	}
+ 
+-	if (vma) {
+-		up_read(&current->mm->mmap_sem);
+-		vma = NULL;
+-	}
+-
+ 	err = 0;
+ 	if (nmask) {
+ 		if (mpol_store_user_nodemask(pol)) {
+diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
+index 1641367e54ca..69f56073b337 100644
+--- a/net/bluetooth/bnep/core.c
++++ b/net/bluetooth/bnep/core.c
+@@ -484,16 +484,16 @@ static int bnep_session(void *arg)
+ 	struct net_device *dev = s->dev;
+ 	struct sock *sk = s->sock->sk;
+ 	struct sk_buff *skb;
+-	wait_queue_t wait;
++	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 
+ 	BT_DBG("");
+ 
+ 	set_user_nice(current, -15);
+ 
+-	init_waitqueue_entry(&wait, current);
+ 	add_wait_queue(sk_sleep(sk), &wait);
+ 	while (1) {
+-		set_current_state(TASK_INTERRUPTIBLE);
++		/* Ensure session->terminate is updated */
++		smp_mb__before_atomic();
+ 
+ 		if (atomic_read(&s->terminate))
+ 			break;
+@@ -515,9 +515,8 @@ static int bnep_session(void *arg)
+ 				break;
+ 		netif_wake_queue(dev);
+ 
+-		schedule();
++		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ 	}
+-	__set_current_state(TASK_RUNNING);
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 
+ 	/* Cleanup session */
+@@ -663,7 +662,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
+ 	s = __bnep_get_session(req->dst);
+ 	if (s) {
+ 		atomic_inc(&s->terminate);
+-		wake_up_process(s->task);
++		wake_up_interruptible(sk_sleep(s->sock->sk));
+ 	} else
+ 		err = -ENOENT;
+ 
+diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
+index 298ed37010e6..3a39fd523e40 100644
+--- a/net/bluetooth/cmtp/core.c
++++ b/net/bluetooth/cmtp/core.c
+@@ -281,16 +281,16 @@ static int cmtp_session(void *arg)
+ 	struct cmtp_session *session = arg;
+ 	struct sock *sk = session->sock->sk;
+ 	struct sk_buff *skb;
+-	wait_queue_t wait;
++	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 
+ 	BT_DBG("session %p", session);
+ 
+ 	set_user_nice(current, -15);
+ 
+-	init_waitqueue_entry(&wait, current);
+ 	add_wait_queue(sk_sleep(sk), &wait);
+ 	while (1) {
+-		set_current_state(TASK_INTERRUPTIBLE);
++		/* Ensure session->terminate is updated */
++		smp_mb__before_atomic();
+ 
+ 		if (atomic_read(&session->terminate))
+ 			break;
+@@ -307,9 +307,8 @@ static int cmtp_session(void *arg)
+ 
+ 		cmtp_process_transmit(session);
+ 
+-		schedule();
++		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ 	}
+-	__set_current_state(TASK_RUNNING);
+ 	remove_wait_queue(sk_sleep(sk), &wait);
+ 
+ 	down_write(&cmtp_session_sem);
+@@ -394,7 +393,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
+ 		err = cmtp_attach_device(session);
+ 		if (err < 0) {
+ 			atomic_inc(&session->terminate);
+-			wake_up_process(session->task);
++			wake_up_interruptible(sk_sleep(session->sock->sk));
+ 			up_write(&cmtp_session_sem);
+ 			return err;
+ 		}
+@@ -432,7 +431,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
+ 
+ 		/* Stop session thread */
+ 		atomic_inc(&session->terminate);
+-		wake_up_process(session->task);
++
++		/* Ensure session->terminate is updated */
++		smp_mb__after_atomic();
++
++		wake_up_interruptible(sk_sleep(session->sock->sk));
+ 	} else
+ 		err = -ENOENT;
+ 
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index 4a0015e16d4f..b9eb90109f7c 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -36,6 +36,7 @@
+ #define VERSION "1.2"
+ 
+ static DECLARE_RWSEM(hidp_session_sem);
++static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq);
+ static LIST_HEAD(hidp_session_list);
+ 
+ static unsigned char hidp_keycode[256] = {
+@@ -1067,12 +1068,12 @@ static int hidp_session_start_sync(struct hidp_session *session)
+  * Wake up session thread and notify it to stop. This is asynchronous and
+  * returns immediately. Call this whenever a runtime error occurs and you want
+  * the session to stop.
+- * Note: wake_up_process() performs any necessary memory-barriers for us.
++ * Note: wake_up_interruptible() performs any necessary memory-barriers for us.
+  */
+ static void hidp_session_terminate(struct hidp_session *session)
+ {
+ 	atomic_inc(&session->terminate);
+-	wake_up_process(session->task);
++	wake_up_interruptible(&hidp_session_wq);
+ }
+ 
+ /*
+@@ -1179,7 +1180,9 @@ static void hidp_session_run(struct hidp_session *session)
+ 	struct sock *ctrl_sk = session->ctrl_sock->sk;
+ 	struct sock *intr_sk = session->intr_sock->sk;
+ 	struct sk_buff *skb;
++	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ 
++	add_wait_queue(&hidp_session_wq, &wait);
+ 	for (;;) {
+ 		/*
+ 		 * This thread can be woken up two ways:
+@@ -1187,12 +1190,10 @@ static void hidp_session_run(struct hidp_session *session)
+ 		 *    session->terminate flag and wakes this thread up.
+ 		 *  - Via modifying the socket state of ctrl/intr_sock. This
+ 		 *    thread is woken up by ->sk_state_changed().
+-		 *
+-		 * Note: set_current_state() performs any necessary
+-		 * memory-barriers for us.
+ 		 */
+-		set_current_state(TASK_INTERRUPTIBLE);
+ 
++		/* Ensure session->terminate is updated */
++		smp_mb__before_atomic();
+ 		if (atomic_read(&session->terminate))
+ 			break;
+ 
+@@ -1226,11 +1227,22 @@ static void hidp_session_run(struct hidp_session *session)
+ 		hidp_process_transmit(session, &session->ctrl_transmit,
+ 				      session->ctrl_sock);
+ 
+-		schedule();
++		wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ 	}
++	remove_wait_queue(&hidp_session_wq, &wait);
+ 
+ 	atomic_inc(&session->terminate);
+-	set_current_state(TASK_RUNNING);
++
++	/* Ensure session->terminate is updated */
++	smp_mb__after_atomic();
++}
++
++static int hidp_session_wake_function(wait_queue_t *wait,
++				      unsigned int mode,
++				      int sync, void *key)
++{
++	wake_up_interruptible(&hidp_session_wq);
++	return false;
+ }
+ 
+ /*
+@@ -1243,7 +1255,8 @@ static void hidp_session_run(struct hidp_session *session)
+ static int hidp_session_thread(void *arg)
+ {
+ 	struct hidp_session *session = arg;
+-	wait_queue_t ctrl_wait, intr_wait;
++	DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function);
++	DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function);
+ 
+ 	BT_DBG("session %p", session);
+ 
+@@ -1253,8 +1266,6 @@ static int hidp_session_thread(void *arg)
+ 	set_user_nice(current, -15);
+ 	hidp_set_timer(session);
+ 
+-	init_waitqueue_entry(&ctrl_wait, current);
+-	init_waitqueue_entry(&intr_wait, current);
+ 	add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
+ 	add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
+ 	/* This memory barrier is paired with wq_has_sleeper(). See
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index dad419782a12..9b6b35977f48 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -57,7 +57,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ 				       u8 code, u8 ident, u16 dlen, void *data);
+ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ 			   void *data);
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
+ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
+ 
+ static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+@@ -1462,7 +1462,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+ 
+ 			set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-				       l2cap_build_conf_req(chan, buf), buf);
++				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 			chan->num_conf_req++;
+ 		}
+ 
+@@ -2970,12 +2970,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
+ 	return len;
+ }
+ 
+-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
++static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
+ {
+ 	struct l2cap_conf_opt *opt = *ptr;
+ 
+ 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+ 
++	if (size < L2CAP_CONF_OPT_SIZE + len)
++		return;
++
+ 	opt->type = type;
+ 	opt->len  = len;
+ 
+@@ -3000,7 +3003,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+ 	*ptr += L2CAP_CONF_OPT_SIZE + len;
+ }
+ 
+-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
++static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
+ {
+ 	struct l2cap_conf_efs efs;
+ 
+@@ -3028,7 +3031,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+ 	}
+ 
+ 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+-			   (unsigned long) &efs);
++			   (unsigned long) &efs, size);
+ }
+ 
+ static void l2cap_ack_timeout(struct work_struct *work)
+@@ -3174,11 +3177,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+ 	chan->ack_win = chan->tx_win;
+ }
+ 
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ 	struct l2cap_conf_req *req = data;
+ 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
+ 	void *ptr = req->data;
++	void *endptr = data + data_size;
+ 	u16 size;
+ 
+ 	BT_DBG("chan %p", chan);
+@@ -3203,7 +3207,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 
+ done:
+ 	if (chan->imtu != L2CAP_DEFAULT_MTU)
+-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ 
+ 	switch (chan->mode) {
+ 	case L2CAP_MODE_BASIC:
+@@ -3222,7 +3226,7 @@ done:
+ 		rfc.max_pdu_size    = 0;
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 		break;
+ 
+ 	case L2CAP_MODE_ERTM:
+@@ -3242,21 +3246,21 @@ done:
+ 				       L2CAP_DEFAULT_TX_WINDOW);
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+-			l2cap_add_opt_efs(&ptr, chan);
++			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+-					   chan->tx_win);
++					   chan->tx_win, endptr - ptr);
+ 
+ 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ 			if (chan->fcs == L2CAP_FCS_NONE ||
+ 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ 				chan->fcs = L2CAP_FCS_NONE;
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+-						   chan->fcs);
++						   chan->fcs, endptr - ptr);
+ 			}
+ 		break;
+ 
+@@ -3274,17 +3278,17 @@ done:
+ 		rfc.max_pdu_size = cpu_to_le16(size);
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+-			l2cap_add_opt_efs(&ptr, chan);
++			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+ 
+ 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ 			if (chan->fcs == L2CAP_FCS_NONE ||
+ 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ 				chan->fcs = L2CAP_FCS_NONE;
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+-						   chan->fcs);
++						   chan->fcs, endptr - ptr);
+ 			}
+ 		break;
+ 	}
+@@ -3295,10 +3299,11 @@ done:
+ 	return ptr - data;
+ }
+ 
+-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ 	struct l2cap_conf_rsp *rsp = data;
+ 	void *ptr = rsp->data;
++	void *endptr = data + data_size;
+ 	void *req = chan->conf_req;
+ 	int len = chan->conf_len;
+ 	int type, hint, olen;
+@@ -3400,7 +3405,7 @@ done:
+ 			return -ECONNREFUSED;
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 	}
+ 
+ 	if (result == L2CAP_CONF_SUCCESS) {
+@@ -3413,7 +3418,7 @@ done:
+ 			chan->omtu = mtu;
+ 			set_bit(CONF_MTU_DONE, &chan->conf_state);
+ 		}
+-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
++		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
+ 
+ 		if (remote_efs) {
+ 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+@@ -3427,7 +3432,7 @@ done:
+ 
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ 						   sizeof(efs),
+-						   (unsigned long) &efs);
++						   (unsigned long) &efs, endptr - ptr);
+ 			} else {
+ 				/* Send PENDING Conf Rsp */
+ 				result = L2CAP_CONF_PENDING;
+@@ -3460,7 +3465,7 @@ done:
+ 			set_bit(CONF_MODE_DONE, &chan->conf_state);
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+-					   sizeof(rfc), (unsigned long) &rfc);
++					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ 
+ 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ 				chan->remote_id = efs.id;
+@@ -3474,7 +3479,7 @@ done:
+ 					le32_to_cpu(efs.sdu_itime);
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ 						   sizeof(efs),
+-						   (unsigned long) &efs);
++						   (unsigned long) &efs, endptr - ptr);
+ 			}
+ 			break;
+ 
+@@ -3488,7 +3493,7 @@ done:
+ 			set_bit(CONF_MODE_DONE, &chan->conf_state);
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-					   (unsigned long) &rfc);
++					   (unsigned long) &rfc, endptr - ptr);
+ 
+ 			break;
+ 
+@@ -3510,10 +3515,11 @@ done:
+ }
+ 
+ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+-				void *data, u16 *result)
++				void *data, size_t size, u16 *result)
+ {
+ 	struct l2cap_conf_req *req = data;
+ 	void *ptr = req->data;
++	void *endptr = data + size;
+ 	int type, olen;
+ 	unsigned long val;
+ 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+@@ -3531,13 +3537,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
+ 			} else
+ 				chan->imtu = val;
+-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_FLUSH_TO:
+ 			chan->flush_to = val;
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
+-					   2, chan->flush_to);
++					   2, chan->flush_to, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_RFC:
+@@ -3551,13 +3557,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 			chan->fcs = 0;
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+-					   sizeof(rfc), (unsigned long) &rfc);
++					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_EWS:
+ 			chan->ack_win = min_t(u16, val, chan->ack_win);
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+-					   chan->tx_win);
++					   chan->tx_win, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_EFS:
+@@ -3570,7 +3576,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 				return -ECONNREFUSED;
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+-					   (unsigned long) &efs);
++					   (unsigned long) &efs, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_FCS:
+@@ -3675,7 +3681,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+ 		return;
+ 
+ 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-		       l2cap_build_conf_req(chan, buf), buf);
++		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 	chan->num_conf_req++;
+ }
+ 
+@@ -3883,7 +3889,7 @@ sendresp:
+ 		u8 buf[128];
+ 		set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, buf), buf);
++			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 		chan->num_conf_req++;
+ 	}
+ 
+@@ -3961,7 +3967,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ 			break;
+ 
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, req), req);
++			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
+ 		chan->num_conf_req++;
+ 		break;
+ 
+@@ -4073,7 +4079,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 	}
+ 
+ 	/* Complete config. */
+-	len = l2cap_parse_conf_req(chan, rsp);
++	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
+ 	if (len < 0) {
+ 		l2cap_send_disconn_req(chan, ECONNRESET);
+ 		goto unlock;
+@@ -4107,7 +4113,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
+ 		u8 buf[64];
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, buf), buf);
++			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 		chan->num_conf_req++;
+ 	}
+ 
+@@ -4167,7 +4173,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ 			char buf[64];
+ 
+ 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+-						   buf, &result);
++						   buf, sizeof(buf), &result);
+ 			if (len < 0) {
+ 				l2cap_send_disconn_req(chan, ECONNRESET);
+ 				goto done;
+@@ -4197,7 +4203,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ 			/* throw out any old stored conf requests */
+ 			result = L2CAP_CONF_SUCCESS;
+ 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+-						   req, &result);
++						   req, sizeof(req), &result);
+ 			if (len < 0) {
+ 				l2cap_send_disconn_req(chan, ECONNRESET);
+ 				goto done;
+@@ -4774,7 +4780,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
+ 			set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+ 				       L2CAP_CONF_REQ,
+-				       l2cap_build_conf_req(chan, buf), buf);
++				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 			chan->num_conf_req++;
+ 		}
+ 	}
+@@ -7430,7 +7436,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ 				set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ 					       L2CAP_CONF_REQ,
+-					       l2cap_build_conf_req(chan, buf),
++					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
+ 					       buf);
+ 				chan->num_conf_req++;
+ 			}
+diff --git a/net/dccp/proto.c b/net/dccp/proto.c
+index 52a94016526d..522658179cca 100644
+--- a/net/dccp/proto.c
++++ b/net/dccp/proto.c
+@@ -24,6 +24,7 @@
+ #include <net/checksum.h>
+ 
+ #include <net/inet_sock.h>
++#include <net/inet_common.h>
+ #include <net/sock.h>
+ #include <net/xfrm.h>
+ 
+@@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type)
+ 
+ EXPORT_SYMBOL_GPL(dccp_packet_name);
+ 
++static void dccp_sk_destruct(struct sock *sk)
++{
++	struct dccp_sock *dp = dccp_sk(sk);
++
++	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
++	dp->dccps_hc_tx_ccid = NULL;
++	inet_sock_destruct(sk);
++}
++
+ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
+ {
+ 	struct dccp_sock *dp = dccp_sk(sk);
+@@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
+ 	icsk->icsk_syn_retries	= sysctl_dccp_request_retries;
+ 	sk->sk_state		= DCCP_CLOSED;
+ 	sk->sk_write_space	= dccp_write_space;
++	sk->sk_destruct		= dccp_sk_destruct;
+ 	icsk->icsk_sync_mss	= dccp_sync_mss;
+ 	dp->dccps_mss_cache	= 536;
+ 	dp->dccps_rate_last	= jiffies;
+@@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk)
+ {
+ 	struct dccp_sock *dp = dccp_sk(sk);
+ 
+-	/*
+-	 * DCCP doesn't use sk_write_queue, just sk_send_head
+-	 * for retransmissions
+-	 */
++	__skb_queue_purge(&sk->sk_write_queue);
+ 	if (sk->sk_send_head != NULL) {
+ 		kfree_skb(sk->sk_send_head);
+ 		sk->sk_send_head = NULL;
+@@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk)
+ 		dp->dccps_hc_rx_ackvec = NULL;
+ 	}
+ 	ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
+-	ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+-	dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
++	dp->dccps_hc_rx_ccid = NULL;
+ 
+ 	/* clean up feature negotiation state */
+ 	dccp_feat_list_purge(&dp->dccps_featneg);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index d935c9815564..1ba4d0964042 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2220,6 +2220,10 @@ int tcp_disconnect(struct sock *sk, int flags)
+ 	tcp_set_ca_state(sk, TCP_CA_Open);
+ 	tcp_clear_retrans(tp);
+ 	inet_csk_delack_init(sk);
++	/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
++	 * issue in __tcp_select_window()
++	 */
++	icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
+ 	tcp_init_send_head(sk);
+ 	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
+ 	__sk_dst_reset(sk);
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 767ee7471c9b..95f98d2444fa 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2989,8 +2989,7 @@ void tcp_rearm_rto(struct sock *sk)
+ 			/* delta may not be positive if the socket is locked
+ 			 * when the retrans timer fires and is rescheduled.
+ 			 */
+-			if (delta > 0)
+-				rto = delta;
++			rto = max(delta, 1);
+ 		}
+ 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
+ 					  TCP_RTO_MAX);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index bde57b113009..e7a60f5de097 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -160,6 +160,12 @@ static void rt6_release(struct rt6_info *rt)
+ 		dst_free(&rt->dst);
+ }
+ 
++static void fib6_free_table(struct fib6_table *table)
++{
++	inetpeer_invalidate_tree(&table->tb6_peers);
++	kfree(table);
++}
++
+ static void fib6_link_table(struct net *net, struct fib6_table *tb)
+ {
+ 	unsigned int h;
+@@ -853,6 +859,8 @@ add:
+ 		}
+ 		nsiblings = iter->rt6i_nsiblings;
+ 		fib6_purge_rt(iter, fn, info->nl_net);
++		if (fn->rr_ptr == iter)
++			fn->rr_ptr = NULL;
+ 		rt6_release(iter);
+ 
+ 		if (nsiblings) {
+@@ -863,6 +871,8 @@ add:
+ 				if (rt6_qualify_for_ecmp(iter)) {
+ 					*ins = iter->dst.rt6_next;
+ 					fib6_purge_rt(iter, fn, info->nl_net);
++					if (fn->rr_ptr == iter)
++						fn->rr_ptr = NULL;
+ 					rt6_release(iter);
+ 					nsiblings--;
+ 				} else {
+@@ -1818,15 +1828,22 @@ out_timer:
+ 
+ static void fib6_net_exit(struct net *net)
+ {
++	unsigned int i;
++
+ 	rt6_ifdown(net, NULL);
+ 	del_timer_sync(&net->ipv6.ip6_fib_timer);
+ 
+-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+-	inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
+-	kfree(net->ipv6.fib6_local_tbl);
+-#endif
+-	inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
+-	kfree(net->ipv6.fib6_main_tbl);
++	for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
++		struct hlist_head *head = &net->ipv6.fib_table_hash[i];
++		struct hlist_node *tmp;
++		struct fib6_table *tb;
++
++		hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
++			hlist_del(&tb->tb6_hlist);
++			fib6_free_table(tb);
++		}
++	}
++
+ 	kfree(net->ipv6.fib_table_hash);
+ 	kfree(net->ipv6.rt6_stats);
+ }
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index 606a07890c68..1cb68e01c301 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -261,19 +261,6 @@ out:
+ 	return pp;
+ }
+ 
+-static struct sk_buff **sit_gro_receive(struct sk_buff **head,
+-					struct sk_buff *skb)
+-{
+-	if (NAPI_GRO_CB(skb)->encap_mark) {
+-		NAPI_GRO_CB(skb)->flush = 1;
+-		return NULL;
+-	}
+-
+-	NAPI_GRO_CB(skb)->encap_mark = 1;
+-
+-	return ipv6_gro_receive(head, skb);
+-}
+-
+ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
+ {
+ 	const struct net_offload *ops;
+diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
+index 292ef2e584db..7c6159b1481a 100644
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -84,7 +84,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ 
+ 	while (offset <= packet_len) {
+ 		struct ipv6_opt_hdr *exthdr;
+-		unsigned int len;
+ 
+ 		switch (**nexthdr) {
+ 
+@@ -110,10 +109,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
+ 
+ 		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
+ 						 offset);
+-		len = ipv6_optlen(exthdr);
+-		if (len + offset >= IPV6_MAXPLEN)
++		offset += ipv6_optlen(exthdr);
++		if (offset > IPV6_MAXPLEN)
+ 			return -EINVAL;
+-		offset += len;
+ 		*nexthdr = &exthdr->nexthdr;
+ 	}
+ 
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index eca46d3d3ff3..d7637c9218bd 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -2228,7 +2228,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
+ {
+ 	struct sock *sk = sock->sk;
+ 	struct irda_sock *self = irda_sk(sk);
+-	struct irda_device_list list;
++	struct irda_device_list list = { 0 };
+ 	struct irda_device_info *discoveries;
+ 	struct irda_ias_set *	ias_opt;	/* IAS get/query params */
+ 	struct ias_object *	ias_obj;	/* Object in IAS */
+diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
+index 1a9545965c0d..531ca55f1af6 100644
+--- a/net/netfilter/nf_conntrack_extend.c
++++ b/net/netfilter/nf_conntrack_extend.c
+@@ -53,7 +53,11 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
+ 
+ 	rcu_read_lock();
+ 	t = rcu_dereference(nf_ct_ext_types[id]);
+-	BUG_ON(t == NULL);
++	if (!t) {
++		rcu_read_unlock();
++		return NULL;
++	}
++
+ 	off = ALIGN(sizeof(struct nf_ct_ext), t->align);
+ 	len = off + t->len + var_alloc_len;
+ 	alloc_size = t->alloc_size + var_alloc_len;
+@@ -88,7 +92,10 @@ void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
+ 
+ 	rcu_read_lock();
+ 	t = rcu_dereference(nf_ct_ext_types[id]);
+-	BUG_ON(t == NULL);
++	if (!t) {
++		rcu_read_unlock();
++		return NULL;
++	}
+ 
+ 	newoff = ALIGN(old->len, t->align);
+ 	newlen = newoff + t->len + var_alloc_len;
+@@ -186,6 +193,6 @@ void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
+ 	RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
+ 	update_alloc_size(type);
+ 	mutex_unlock(&nf_ct_ext_type_mutex);
+-	rcu_barrier(); /* Wait for completion of call_rcu()'s */
++	synchronize_rcu();
+ }
+ EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index fdcced6aa71d..78d0eaf5de61 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -457,6 +457,7 @@ congestion_drop:
+ 		qdisc_drop(head, sch);
+ 
+ 		slot_queue_add(slot, skb);
++		qdisc_tree_reduce_backlog(sch, 0, delta);
+ 		return NET_XMIT_CN;
+ 	}
+ 
+@@ -488,8 +489,10 @@ enqueue:
+ 	/* Return Congestion Notification only if we dropped a packet
+ 	 * from this flow.
+ 	 */
+-	if (qlen != slot->qlen)
++	if (qlen != slot->qlen) {
++		qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
+ 		return NET_XMIT_CN;
++	}
+ 
+ 	/* As we dropped a packet, better let upper stack know this */
+ 	qdisc_tree_reduce_backlog(sch, 1, dropped);
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 29fa707d61fd..2bb7240c6f8b 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -491,7 +491,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
+ {
+ 	addr->sa.sa_family = AF_INET6;
+ 	addr->v6.sin6_port = port;
++	addr->v6.sin6_flowinfo = 0;
+ 	addr->v6.sin6_addr = *saddr;
++	addr->v6.sin6_scope_id = 0;
+ }
+ 
+ /* Compare addresses exactly.
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index e5ec86dd8dc1..a8dd585fcc38 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -256,13 +256,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ 	arg = nlmsg_new(0, GFP_KERNEL);
+ 	if (!arg) {
+ 		kfree_skb(msg->rep);
++		msg->rep = NULL;
+ 		return -ENOMEM;
+ 	}
+ 
+ 	err = __tipc_nl_compat_dumpit(cmd, msg, arg);
+-	if (err)
++	if (err) {
+ 		kfree_skb(msg->rep);
+-
++		msg->rep = NULL;
++	}
+ 	kfree_skb(arg);
+ 
+ 	return err;
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 155070f500aa..04a025218d13 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -3255,9 +3255,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
+ 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
+ 	struct xfrm_migrate *mp;
+ 
++	/* Stage 0 - sanity checks */
+ 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
+ 		goto out;
+ 
++	if (dir >= XFRM_POLICY_MAX) {
++		err = -EINVAL;
++		goto out;
++	}
++
+ 	/* Stage 1 - find policy */
+ 	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
+ 		err = -ENOENT;
+diff --git a/sound/core/control.c b/sound/core/control.c
+index b4fe9b002512..bd01d492f46a 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1126,7 +1126,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
+ 		mutex_lock(&ue->card->user_ctl_lock);
+ 		change = ue->tlv_data_size != size;
+ 		if (!change)
+-			change = memcmp(ue->tlv_data, new_data, size);
++			change = memcmp(ue->tlv_data, new_data, size) != 0;
+ 		kfree(ue->tlv_data);
+ 		ue->tlv_data = new_data;
+ 		ue->tlv_data_size = size;
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index 8158ba354b48..b6f5f47048ba 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1530,19 +1530,14 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client,
+ 				      void __user *arg)
+ {
+ 	struct snd_seq_queue_info info;
+-	int result;
+ 	struct snd_seq_queue *q;
+ 
+ 	if (copy_from_user(&info, arg, sizeof(info)))
+ 		return -EFAULT;
+ 
+-	result = snd_seq_queue_alloc(client->number, info.locked, info.flags);
+-	if (result < 0)
+-		return result;
+-
+-	q = queueptr(result);
+-	if (q == NULL)
+-		return -EINVAL;
++	q = snd_seq_queue_alloc(client->number, info.locked, info.flags);
++	if (IS_ERR(q))
++		return PTR_ERR(q);
+ 
+ 	info.queue = q->queue;
+ 	info.locked = q->locked;
+@@ -1552,7 +1547,7 @@ static int snd_seq_ioctl_create_queue(struct snd_seq_client *client,
+ 	if (! info.name[0])
+ 		snprintf(info.name, sizeof(info.name), "Queue-%d", q->queue);
+ 	strlcpy(q->name, info.name, sizeof(q->name));
+-	queuefree(q);
++	snd_use_lock_free(&q->use_lock);
+ 
+ 	if (copy_to_user(arg, &info, sizeof(info)))
+ 		return -EFAULT;
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index f676ae53c477..a7bd074f6c0e 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -184,22 +184,26 @@ void __exit snd_seq_queues_delete(void)
+ static void queue_use(struct snd_seq_queue *queue, int client, int use);
+ 
+ /* allocate a new queue -
+- * return queue index value or negative value for error
++ * return pointer to new queue or ERR_PTR(-errno) for error
++ * The new queue's use_lock is set to 1. It is the caller's responsibility to
++ * call snd_use_lock_free(&q->use_lock).
+  */
+-int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
++struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
+ {
+ 	struct snd_seq_queue *q;
+ 
+ 	q = queue_new(client, locked);
+ 	if (q == NULL)
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 	q->info_flags = info_flags;
+ 	queue_use(q, client, 1);
++	snd_use_lock_use(&q->use_lock);
+ 	if (queue_list_add(q) < 0) {
++		snd_use_lock_free(&q->use_lock);
+ 		queue_delete(q);
+-		return -ENOMEM;
++		return ERR_PTR(-ENOMEM);
+ 	}
+-	return q->queue;
++	return q;
+ }
+ 
+ /* delete a queue - queue must be owned by the client */
+diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
+index 30c8111477f6..719093489a2c 100644
+--- a/sound/core/seq/seq_queue.h
++++ b/sound/core/seq/seq_queue.h
+@@ -71,7 +71,7 @@ void snd_seq_queues_delete(void);
+ 
+ 
+ /* create new queue (constructor) */
+-int snd_seq_queue_alloc(int client, int locked, unsigned int flags);
++struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags);
+ 
+ /* delete queue (destructor) */
+ int snd_seq_queue_delete(int client, int queueid);
+diff --git a/sound/isa/msnd/msnd_midi.c b/sound/isa/msnd/msnd_midi.c
+index ffc67fd80c23..58e59cd3c95c 100644
+--- a/sound/isa/msnd/msnd_midi.c
++++ b/sound/isa/msnd/msnd_midi.c
+@@ -120,24 +120,24 @@ void snd_msndmidi_input_read(void *mpuv)
+ 	unsigned long flags;
+ 	struct snd_msndmidi *mpu = mpuv;
+ 	void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
++	u16 head, tail, size;
+ 
+ 	spin_lock_irqsave(&mpu->input_lock, flags);
+-	while (readw(mpu->dev->MIDQ + JQS_wTail) !=
+-	       readw(mpu->dev->MIDQ + JQS_wHead)) {
+-		u16 wTmp, val;
+-		val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));
+-
+-			if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,
+-				     &mpu->mode))
+-				snd_rawmidi_receive(mpu->substream_input,
+-						    (unsigned char *)&val, 1);
+-
+-		wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;
+-		if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))
+-			writew(0,  mpu->dev->MIDQ + JQS_wHead);
+-		else
+-			writew(wTmp,  mpu->dev->MIDQ + JQS_wHead);
++	head = readw(mpu->dev->MIDQ + JQS_wHead);
++	tail = readw(mpu->dev->MIDQ + JQS_wTail);
++	size = readw(mpu->dev->MIDQ + JQS_wSize);
++	if (head > size || tail > size)
++		goto out;
++	while (head != tail) {
++		unsigned char val = readw(pwMIDQData + 2 * head);
++
++		if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode))
++			snd_rawmidi_receive(mpu->substream_input, &val, 1);
++		if (++head > size)
++			head = 0;
++		writew(head, mpu->dev->MIDQ + JQS_wHead);
+ 	}
++ out:
+ 	spin_unlock_irqrestore(&mpu->input_lock, flags);
+ }
+ EXPORT_SYMBOL(snd_msndmidi_input_read);
+diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
+index 4c072666115d..a31ea6c22d19 100644
+--- a/sound/isa/msnd/msnd_pinnacle.c
++++ b/sound/isa/msnd/msnd_pinnacle.c
+@@ -170,23 +170,24 @@ static irqreturn_t snd_msnd_interrupt(int irq, void *dev_id)
+ {
+ 	struct snd_msnd *chip = dev_id;
+ 	void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
++	u16 head, tail, size;
+ 
+ 	/* Send ack to DSP */
+ 	/* inb(chip->io + HP_RXL); */
+ 
+ 	/* Evaluate queued DSP messages */
+-	while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) {
+-		u16 wTmp;
+-
+-		snd_msnd_eval_dsp_msg(chip,
+-			readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead)));
+-
+-		wTmp = readw(chip->DSPQ + JQS_wHead) + 1;
+-		if (wTmp > readw(chip->DSPQ + JQS_wSize))
+-			writew(0, chip->DSPQ + JQS_wHead);
+-		else
+-			writew(wTmp, chip->DSPQ + JQS_wHead);
++	head = readw(chip->DSPQ + JQS_wHead);
++	tail = readw(chip->DSPQ + JQS_wTail);
++	size = readw(chip->DSPQ + JQS_wSize);
++	if (head > size || tail > size)
++		goto out;
++	while (head != tail) {
++		snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head));
++		if (++head > size)
++			head = 0;
++		writew(head, chip->DSPQ + JQS_wHead);
+ 	}
++ out:
+ 	/* Send ack to DSP */
+ 	inb(chip->io + HP_RXL);
+ 	return IRQ_HANDLED;
+diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
+index 74177189063c..d3125c169684 100644
+--- a/sound/pci/au88x0/au88x0_core.c
++++ b/sound/pci/au88x0/au88x0_core.c
+@@ -2150,8 +2150,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
+ 							   stream->resources, en,
+ 							   VORTEX_RESOURCE_SRC)) < 0) {
+ 					memset(stream->resources, 0,
+-					       sizeof(unsigned char) *
+-					       VORTEX_RESOURCE_LAST);
++					       sizeof(stream->resources));
+ 					return -EBUSY;
+ 				}
+ 				if (stream->type != VORTEX_PCM_A3D) {
+@@ -2161,7 +2160,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
+ 								   VORTEX_RESOURCE_MIXIN)) < 0) {
+ 						memset(stream->resources,
+ 						       0,
+-						       sizeof(unsigned char) * VORTEX_RESOURCE_LAST);
++						       sizeof(stream->resources));
+ 						return -EBUSY;
+ 					}
+ 				}
+@@ -2174,8 +2173,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
+ 						   stream->resources, en,
+ 						   VORTEX_RESOURCE_A3D)) < 0) {
+ 				memset(stream->resources, 0,
+-				       sizeof(unsigned char) *
+-				       VORTEX_RESOURCE_LAST);
++				       sizeof(stream->resources));
+ 				dev_err(vortex->card->dev,
+ 					"out of A3D sources. Sorry\n");
+ 				return -EBUSY;
+@@ -2289,8 +2287,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
+ 						   VORTEX_RESOURCE_MIXOUT))
+ 			    < 0) {
+ 				memset(stream->resources, 0,
+-				       sizeof(unsigned char) *
+-				       VORTEX_RESOURCE_LAST);
++				       sizeof(stream->resources));
+ 				return -EBUSY;
+ 			}
+ 			if ((src[i] =
+@@ -2298,8 +2295,7 @@ vortex_adb_allocroute(vortex_t *vortex, int dma, int nr_ch, int dir,
+ 						   stream->resources, en,
+ 						   VORTEX_RESOURCE_SRC)) < 0) {
+ 				memset(stream->resources, 0,
+-				       sizeof(unsigned char) *
+-				       VORTEX_RESOURCE_LAST);
++				       sizeof(stream->resources));
+ 				return -EBUSY;
+ 			}
+ 		}
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 91b77bad03ea..a780540b7d4f 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -827,6 +827,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
++	SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
+ 	SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
+diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
+index 7bb9c087f3dc..4599983cfc8a 100644
+--- a/sound/soc/sh/rcar/ssi.c
++++ b/sound/soc/sh/rcar/ssi.c
+@@ -39,6 +39,7 @@
+ #define	SCKP		(1 << 13)	/* Serial Bit Clock Polarity */
+ #define	SWSP		(1 << 12)	/* Serial WS Polarity */
+ #define	SDTA		(1 << 10)	/* Serial Data Alignment */
++#define	PDTA		(1 <<  9)	/* Parallel Data Alignment */
+ #define	DEL		(1 <<  8)	/* Serial Data Delay */
+ #define	CKDV(v)		(v <<  4)	/* Serial Clock Division Ratio */
+ #define	TRMD		(1 <<  1)	/* Transmit/Receive Mode Select */
+@@ -278,7 +279,7 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
+ 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ 	u32 cr;
+ 
+-	cr = FORCE;
++	cr = FORCE | PDTA;
+ 
+ 	/*
+ 	 * always use 32bit system word for easy clock calculation.
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 4a033cbbd361..33c544acf3f6 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -541,6 +541,8 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
+ 
+ 	if (size < sizeof(scale))
+ 		return -ENOMEM;
++	if (cval->min_mute)
++		scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
+ 	scale[2] = cval->dBmin;
+ 	scale[3] = cval->dBmax;
+ 	if (copy_to_user(_tlv, scale, sizeof(scale)))
+diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
+index 3417ef347e40..2b4b067646ab 100644
+--- a/sound/usb/mixer.h
++++ b/sound/usb/mixer.h
+@@ -64,6 +64,7 @@ struct usb_mixer_elem_info {
+ 	int cached;
+ 	int cache_val[MAX_CHANNELS];
+ 	u8 initialized;
++	u8 min_mute;
+ 	void *private_data;
+ };
+ 
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 940442848fc8..de3f18059213 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -1863,6 +1863,12 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
+ 		if (unitid == 7 && cval->min == 0 && cval->max == 50)
+ 			snd_dragonfly_quirk_db_scale(mixer, kctl);
+ 		break;
++	/* lowest playback value is muted on C-Media devices */
++	case USB_ID(0x0d8c, 0x000c):
++	case USB_ID(0x0d8c, 0x0014):
++		if (strstr(kctl->id.name, "Playback"))
++			cval->min_mute = 1;
++		break;
+ 	}
+ }
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 2c71e5682716..693b2ac6720a 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1138,6 +1138,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ 	case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+ 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++	case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
+ 	case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
+ 	case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
+ 	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-09-13 19:38 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2017-09-13 19:38 UTC (permalink / raw
  To: gentoo-commits

commit:     853af618e0ee4cee610f17b65bf4d1d60a7f3da2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Sep 13 19:38:16 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Sep 13 19:38:16 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=853af618

Validate the output buffer length for L2CAP config reqs and resps to avoid stack buffer overflowing. CVE-2017-1000251. See bug #630840

 0000_README                             |   4 +
 2400_BT-check-L2CAP-buffer-length.patch | 357 ++++++++++++++++++++++++++++++++
 2 files changed, 361 insertions(+)

diff --git a/0000_README b/0000_README
index 60f0ad1..959795e 100644
--- a/0000_README
+++ b/0000_README
@@ -231,6 +231,10 @@ Patch:  1800_fix-lru-cache-add-oom-regression.patch
 From:   http://thread.gmane.org/gmane.linux.kernel.stable/184384
 Desc:   Revert commit 8f182270dfec mm/swap.c: flush lru pvecs on compound page arrival to fix OOM error.
 
+Patch:  2400_BT-check-L2CAP-buffer-length.patch
+From:   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3
+Desc:   Validate the output buffer length for L2CAP config reqs and resps to avoid stack buffer overflowing. CVE-2017-1000251. See bug #630840
+
 Patch:  2700_ThinkPad-30-brightness-control-fix.patch
 From:   Seth Forshee <seth.forshee@canonical.com>
 Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.

diff --git a/2400_BT-check-L2CAP-buffer-length.patch b/2400_BT-check-L2CAP-buffer-length.patch
new file mode 100644
index 0000000..c6bfdf7
--- /dev/null
+++ b/2400_BT-check-L2CAP-buffer-length.patch
@@ -0,0 +1,357 @@
+From e860d2c904d1a9f38a24eb44c9f34b8f915a6ea3 Mon Sep 17 00:00:00 2001
+From: Ben Seri <ben@armis.com>
+Date: Sat, 9 Sep 2017 23:15:59 +0200
+Subject: Bluetooth: Properly check L2CAP config option output buffer length
+
+Validate the output buffer length for L2CAP config requests and responses
+to avoid overflowing the stack buffer used for building the option blocks.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Ben Seri <ben@armis.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ net/bluetooth/l2cap_core.c | 80 +++++++++++++++++++++++++---------------------
+ 1 file changed, 43 insertions(+), 37 deletions(-)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 303c779..43ba91c 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -58,7 +58,7 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
+ 				       u8 code, u8 ident, u16 dlen, void *data);
+ static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
+ 			   void *data);
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
+ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
+ 
+ static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+@@ -1473,7 +1473,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
+ 
+ 			set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-				       l2cap_build_conf_req(chan, buf), buf);
++				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 			chan->num_conf_req++;
+ 		}
+ 
+@@ -2987,12 +2987,15 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
+ 	return len;
+ }
+ 
+-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
++static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
+ {
+ 	struct l2cap_conf_opt *opt = *ptr;
+ 
+ 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+ 
++	if (size < L2CAP_CONF_OPT_SIZE + len)
++		return;
++
+ 	opt->type = type;
+ 	opt->len  = len;
+ 
+@@ -3017,7 +3020,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+ 	*ptr += L2CAP_CONF_OPT_SIZE + len;
+ }
+ 
+-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
++static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
+ {
+ 	struct l2cap_conf_efs efs;
+ 
+@@ -3045,7 +3048,7 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+ 	}
+ 
+ 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
+-			   (unsigned long) &efs);
++			   (unsigned long) &efs, size);
+ }
+ 
+ static void l2cap_ack_timeout(struct work_struct *work)
+@@ -3191,11 +3194,12 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
+ 	chan->ack_win = chan->tx_win;
+ }
+ 
+-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ 	struct l2cap_conf_req *req = data;
+ 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
+ 	void *ptr = req->data;
++	void *endptr = data + data_size;
+ 	u16 size;
+ 
+ 	BT_DBG("chan %p", chan);
+@@ -3220,7 +3224,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+ 
+ done:
+ 	if (chan->imtu != L2CAP_DEFAULT_MTU)
+-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ 
+ 	switch (chan->mode) {
+ 	case L2CAP_MODE_BASIC:
+@@ -3239,7 +3243,7 @@ done:
+ 		rfc.max_pdu_size    = 0;
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 		break;
+ 
+ 	case L2CAP_MODE_ERTM:
+@@ -3259,21 +3263,21 @@ done:
+ 				       L2CAP_DEFAULT_TX_WINDOW);
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+-			l2cap_add_opt_efs(&ptr, chan);
++			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+-					   chan->tx_win);
++					   chan->tx_win, endptr - ptr);
+ 
+ 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ 			if (chan->fcs == L2CAP_FCS_NONE ||
+ 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ 				chan->fcs = L2CAP_FCS_NONE;
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+-						   chan->fcs);
++						   chan->fcs, endptr - ptr);
+ 			}
+ 		break;
+ 
+@@ -3291,17 +3295,17 @@ done:
+ 		rfc.max_pdu_size = cpu_to_le16(size);
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 
+ 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
+-			l2cap_add_opt_efs(&ptr, chan);
++			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
+ 
+ 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
+ 			if (chan->fcs == L2CAP_FCS_NONE ||
+ 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
+ 				chan->fcs = L2CAP_FCS_NONE;
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
+-						   chan->fcs);
++						   chan->fcs, endptr - ptr);
+ 			}
+ 		break;
+ 	}
+@@ -3312,10 +3316,11 @@ done:
+ 	return ptr - data;
+ }
+ 
+-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
++static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
+ {
+ 	struct l2cap_conf_rsp *rsp = data;
+ 	void *ptr = rsp->data;
++	void *endptr = data + data_size;
+ 	void *req = chan->conf_req;
+ 	int len = chan->conf_len;
+ 	int type, hint, olen;
+@@ -3417,7 +3422,7 @@ done:
+ 			return -ECONNREFUSED;
+ 
+ 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-				   (unsigned long) &rfc);
++				   (unsigned long) &rfc, endptr - ptr);
+ 	}
+ 
+ 	if (result == L2CAP_CONF_SUCCESS) {
+@@ -3430,7 +3435,7 @@ done:
+ 			chan->omtu = mtu;
+ 			set_bit(CONF_MTU_DONE, &chan->conf_state);
+ 		}
+-		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
++		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
+ 
+ 		if (remote_efs) {
+ 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+@@ -3444,7 +3449,7 @@ done:
+ 
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ 						   sizeof(efs),
+-						   (unsigned long) &efs);
++						   (unsigned long) &efs, endptr - ptr);
+ 			} else {
+ 				/* Send PENDING Conf Rsp */
+ 				result = L2CAP_CONF_PENDING;
+@@ -3477,7 +3482,7 @@ done:
+ 			set_bit(CONF_MODE_DONE, &chan->conf_state);
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+-					   sizeof(rfc), (unsigned long) &rfc);
++					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ 
+ 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
+ 				chan->remote_id = efs.id;
+@@ -3491,7 +3496,7 @@ done:
+ 					le32_to_cpu(efs.sdu_itime);
+ 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
+ 						   sizeof(efs),
+-						   (unsigned long) &efs);
++						   (unsigned long) &efs, endptr - ptr);
+ 			}
+ 			break;
+ 
+@@ -3505,7 +3510,7 @@ done:
+ 			set_bit(CONF_MODE_DONE, &chan->conf_state);
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+-					   (unsigned long) &rfc);
++					   (unsigned long) &rfc, endptr - ptr);
+ 
+ 			break;
+ 
+@@ -3527,10 +3532,11 @@ done:
+ }
+ 
+ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+-				void *data, u16 *result)
++				void *data, size_t size, u16 *result)
+ {
+ 	struct l2cap_conf_req *req = data;
+ 	void *ptr = req->data;
++	void *endptr = data + size;
+ 	int type, olen;
+ 	unsigned long val;
+ 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
+@@ -3548,13 +3554,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
+ 			} else
+ 				chan->imtu = val;
+-			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
++			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_FLUSH_TO:
+ 			chan->flush_to = val;
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
+-					   2, chan->flush_to);
++					   2, chan->flush_to, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_RFC:
+@@ -3568,13 +3574,13 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 			chan->fcs = 0;
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+-					   sizeof(rfc), (unsigned long) &rfc);
++					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_EWS:
+ 			chan->ack_win = min_t(u16, val, chan->ack_win);
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
+-					   chan->tx_win);
++					   chan->tx_win, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_EFS:
+@@ -3587,7 +3593,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
+ 				return -ECONNREFUSED;
+ 
+ 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+-					   (unsigned long) &efs);
++					   (unsigned long) &efs, endptr - ptr);
+ 			break;
+ 
+ 		case L2CAP_CONF_FCS:
+@@ -3692,7 +3698,7 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
+ 		return;
+ 
+ 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-		       l2cap_build_conf_req(chan, buf), buf);
++		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 	chan->num_conf_req++;
+ }
+ 
+@@ -3900,7 +3906,7 @@ sendresp:
+ 		u8 buf[128];
+ 		set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, buf), buf);
++			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 		chan->num_conf_req++;
+ 	}
+ 
+@@ -3978,7 +3984,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
+ 			break;
+ 
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, req), req);
++			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
+ 		chan->num_conf_req++;
+ 		break;
+ 
+@@ -4090,7 +4096,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 	}
+ 
+ 	/* Complete config. */
+-	len = l2cap_parse_conf_req(chan, rsp);
++	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
+ 	if (len < 0) {
+ 		l2cap_send_disconn_req(chan, ECONNRESET);
+ 		goto unlock;
+@@ -4124,7 +4130,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
+ 		u8 buf[64];
+ 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
+-			       l2cap_build_conf_req(chan, buf), buf);
++			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 		chan->num_conf_req++;
+ 	}
+ 
+@@ -4184,7 +4190,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ 			char buf[64];
+ 
+ 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+-						   buf, &result);
++						   buf, sizeof(buf), &result);
+ 			if (len < 0) {
+ 				l2cap_send_disconn_req(chan, ECONNRESET);
+ 				goto done;
+@@ -4214,7 +4220,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
+ 			/* throw out any old stored conf requests */
+ 			result = L2CAP_CONF_SUCCESS;
+ 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
+-						   req, &result);
++						   req, sizeof(req), &result);
+ 			if (len < 0) {
+ 				l2cap_send_disconn_req(chan, ECONNRESET);
+ 				goto done;
+@@ -4791,7 +4797,7 @@ static void l2cap_do_create(struct l2cap_chan *chan, int result,
+ 			set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
+ 				       L2CAP_CONF_REQ,
+-				       l2cap_build_conf_req(chan, buf), buf);
++				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
+ 			chan->num_conf_req++;
+ 		}
+ 	}
+@@ -7465,7 +7471,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
+ 				set_bit(CONF_REQ_SENT, &chan->conf_state);
+ 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ 					       L2CAP_CONF_REQ,
+-					       l2cap_build_conf_req(chan, buf),
++					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
+ 					       buf);
+ 				chan->num_conf_req++;
+ 			}
+-- 
+cgit v1.1
+


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-08-06 18:01 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2017-08-06 18:01 UTC (permalink / raw
  To: gentoo-commits

commit:     5a402b476904049d5733853f1cee6216b9a21424
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Aug  6 18:01:10 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Aug  6 18:01:10 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5a402b47

Linux patch 4.1.43

 0000_README             |    4 +
 1042_linux-4.1.43.patch | 5611 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5615 insertions(+)

diff --git a/0000_README b/0000_README
index 85eb55c..60f0ad1 100644
--- a/0000_README
+++ b/0000_README
@@ -211,6 +211,10 @@ Patch:  1041_linux-4.1.42.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.42
 
+Patch:  1042_linux-4.1.43.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.43
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1042_linux-4.1.43.patch b/1042_linux-4.1.43.patch
new file mode 100644
index 0000000..ad0b111
--- /dev/null
+++ b/1042_linux-4.1.43.patch
@@ -0,0 +1,5611 @@
+diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
+index c831001c45f1..4c88aa047790 100644
+--- a/Documentation/sysctl/kernel.txt
++++ b/Documentation/sysctl/kernel.txt
+@@ -798,14 +798,13 @@ via the /proc/sys interface:
+        Each write syscall must fully contain the sysctl value to be
+        written, and multiple writes on the same sysctl file descriptor
+        will rewrite the sysctl value, regardless of file position.
+-   0 - (default) Same behavior as above, but warn about processes that
+-       perform writes to a sysctl file descriptor when the file position
+-       is not 0.
+-   1 - Respect file position when writing sysctl strings. Multiple writes
+-       will append to the sysctl value buffer. Anything past the max length
+-       of the sysctl value buffer will be ignored. Writes to numeric sysctl
+-       entries must always be at file position 0 and the value must be
+-       fully contained in the buffer sent in the write syscall.
++   0 - Same behavior as above, but warn about processes that perform writes
++       to a sysctl file descriptor when the file position is not 0.
++   1 - (default) Respect file position when writing sysctl strings. Multiple
++       writes will append to the sysctl value buffer. Anything past the max
++       length of the sysctl value buffer will be ignored. Writes to numeric
++       sysctl entries must always be at file position 0 and the value must
++       be fully contained in the buffer sent in the write syscall.
+ 
+ ==============================================================
+ 
+diff --git a/Makefile b/Makefile
+index 0c3313f14ff0..50d0a93fa343 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 42
++SUBLEVEL = 43
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+@@ -622,6 +622,12 @@ endif
+ # Tell gcc to never replace conditional load with a non-conditional one
+ KBUILD_CFLAGS	+= $(call cc-option,--param=allow-store-data-races=0)
+ 
++# check for 'asm goto'
++ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
++	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
++	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
++endif
++
+ ifdef CONFIG_READABLE_ASM
+ # Disable optimizations that make assembler listings hard to read.
+ # reorder blocks reorders the control in the function
+@@ -777,12 +783,6 @@ KBUILD_CFLAGS   += $(call cc-option,-Werror=date-time)
+ # use the deterministic mode of AR if available
+ KBUILD_ARFLAGS := $(call ar-option,D)
+ 
+-# check for 'asm goto'
+-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+-	KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+-	KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+-endif
+-
+ include scripts/Makefile.kasan
+ include scripts/Makefile.extrawarn
+ 
+diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
+index 78aec6270c2f..90fbda066122 100644
+--- a/arch/arm/boot/dts/bcm5301x.dtsi
++++ b/arch/arm/boot/dts/bcm5301x.dtsi
+@@ -54,14 +54,14 @@
+ 		timer@0200 {
+ 			compatible = "arm,cortex-a9-global-timer";
+ 			reg = <0x0200 0x100>;
+-			interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
+ 			clocks = <&clk_periph>;
+ 		};
+ 
+ 		local-timer@0600 {
+ 			compatible = "arm,cortex-a9-twd-timer";
+ 			reg = <0x0600 0x100>;
+-			interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
++			interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
+ 			clocks = <&clk_periph>;
+ 		};
+ 
+diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
+index f94bf72832af..d6da4cc23920 100644
+--- a/arch/arm/boot/dts/imx6dl.dtsi
++++ b/arch/arm/boot/dts/imx6dl.dtsi
+@@ -30,7 +30,7 @@
+ 				/* kHz    uV */
+ 				996000  1250000
+ 				792000  1175000
+-				396000  1075000
++				396000  1150000
+ 			>;
+ 			fsl,soc-operating-points = <
+ 				/* ARM kHz  SOC-PU uV */
+diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
+index d2315ffd8f12..f13ae153fb24 100644
+--- a/arch/arm/include/asm/elf.h
++++ b/arch/arm/include/asm/elf.h
+@@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
+ #define CORE_DUMP_USE_REGSET
+ #define ELF_EXEC_PAGESIZE	4096
+ 
+-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+-   use of this is to invoke "./ld.so someprog" to test out a new version of
+-   the loader.  We need to make sure that it is out of the way of the program
+-   that it will "exec", and that there is sufficient room for the brk.  */
+-
+-#define ELF_ET_DYN_BASE	(TASK_SIZE / 3 * 2)
++/* This is the base location for PIE (ET_DYN with INTERP) loads. */
++#define ELF_ET_DYN_BASE		0x400000UL
+ 
+ /* When the program starts, a1 contains a pointer to a function to be 
+    registered with atexit, as per the SVR4 ABI.  A value of 0 means we 
+diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
+index 7186382672b5..d89d35b40e47 100644
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -1136,15 +1136,15 @@ void __init sanity_check_meminfo(void)
+ 
+ 	high_memory = __va(arm_lowmem_limit - 1) + 1;
+ 
++	if (!memblock_limit)
++		memblock_limit = arm_lowmem_limit;
++
+ 	/*
+ 	 * Round the memblock limit down to a pmd size.  This
+ 	 * helps to ensure that we will allocate memory from the
+ 	 * last full pmd, which should be mapped.
+ 	 */
+-	if (memblock_limit)
+-		memblock_limit = round_down(memblock_limit, PMD_SIZE);
+-	if (!memblock_limit)
+-		memblock_limit = arm_lowmem_limit;
++	memblock_limit = round_down(memblock_limit, PMD_SIZE);
+ 
+ 	memblock_set_current_limit(memblock_limit);
+ }
+diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
+index 71f19c4dc0de..ffe7850afdbd 100644
+--- a/arch/arm64/include/asm/barrier.h
++++ b/arch/arm64/include/asm/barrier.h
+@@ -63,23 +63,33 @@ do {									\
+ 
+ #define smp_store_release(p, v)						\
+ do {									\
++	union { typeof(*p) __val; char __c[1]; } __u =			\
++		{ .__val = (__force typeof(*p)) (v) }; 			\
+ 	compiletime_assert_atomic_type(*p);				\
+ 	switch (sizeof(*p)) {						\
+ 	case 1:								\
+ 		asm volatile ("stlrb %w1, %0"				\
+-				: "=Q" (*p) : "r" (v) : "memory");	\
++				: "=Q" (*p)				\
++				: "r" (*(__u8 *)__u.__c)		\
++				: "memory");				\
+ 		break;							\
+ 	case 2:								\
+ 		asm volatile ("stlrh %w1, %0"				\
+-				: "=Q" (*p) : "r" (v) : "memory");	\
++				: "=Q" (*p)				\
++				: "r" (*(__u16 *)__u.__c)		\
++				: "memory");				\
+ 		break;							\
+ 	case 4:								\
+ 		asm volatile ("stlr %w1, %0"				\
+-				: "=Q" (*p) : "r" (v) : "memory");	\
++				: "=Q" (*p)				\
++				: "r" (*(__u32 *)__u.__c)		\
++				: "memory");				\
+ 		break;							\
+ 	case 8:								\
+ 		asm volatile ("stlr %1, %0"				\
+-				: "=Q" (*p) : "r" (v) : "memory");	\
++				: "=Q" (*p)				\
++				: "r" (*(__u64 *)__u.__c)		\
++				: "memory");				\
+ 		break;							\
+ 	}								\
+ } while (0)
+diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
+index 7ac3920b1356..802dd71ed0b3 100644
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -298,7 +298,8 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
+ 	"	.quad		1b, 4b\n"			\
+ 	"	.popsection\n"					\
+ 	: "=&r" (res), "+r" (data), "=&r" (temp)		\
+-	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)		\
++	: "r" ((unsigned long)addr), "i" (-EAGAIN),		\
++	  "i" (-EFAULT)						\
+ 	: "memory")
+ 
+ #define __user_swp_asm(data, addr, res, temp) \
+diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
+index 7791840cf22c..db07793f7b43 100644
+--- a/arch/mips/kernel/entry.S
++++ b/arch/mips/kernel/entry.S
+@@ -11,6 +11,7 @@
+ #include <asm/asm.h>
+ #include <asm/asmmacro.h>
+ #include <asm/compiler.h>
++#include <asm/irqflags.h>
+ #include <asm/regdef.h>
+ #include <asm/mipsregs.h>
+ #include <asm/stackframe.h>
+@@ -137,6 +138,7 @@ work_pending:
+ 	andi	t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
+ 	beqz	t0, work_notifysig
+ work_resched:
++	TRACE_IRQS_OFF
+ 	jal	schedule
+ 
+ 	local_irq_disable		# make sure need_resched and
+@@ -173,6 +175,7 @@ syscall_exit_work:
+ 	beqz	t0, work_pending	# trace bit set?
+ 	local_irq_enable		# could let syscall_trace_leave()
+ 					# call schedule() instead
++	TRACE_IRQS_ON
+ 	move	a0, sp
+ 	jal	syscall_trace_leave
+ 	b	resume_userspace
+diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
+index 06147179a175..cd25b616075d 100644
+--- a/arch/mips/kernel/pm-cps.c
++++ b/arch/mips/kernel/pm-cps.c
+@@ -55,7 +55,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
+  * state. Actually per-core rather than per-CPU.
+  */
+ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
+-static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
+ 
+ /* Indicates online CPUs coupled with the current CPU */
+ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
+@@ -624,7 +623,6 @@ static int __init cps_gen_core_entries(unsigned cpu)
+ {
+ 	enum cps_pm_state state;
+ 	unsigned core = cpu_data[cpu].core;
+-	unsigned dlinesz = cpu_data[cpu].dcache.linesz;
+ 	void *entry_fn, *core_rc;
+ 
+ 	for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
+@@ -644,16 +642,11 @@ static int __init cps_gen_core_entries(unsigned cpu)
+ 	}
+ 
+ 	if (!per_cpu(ready_count, core)) {
+-		core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
++		core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
+ 		if (!core_rc) {
+ 			pr_err("Failed allocate core %u ready_count\n", core);
+ 			return -ENOMEM;
+ 		}
+-		per_cpu(ready_count_alloc, core) = core_rc;
+-
+-		/* Ensure ready_count is aligned to a cacheline boundary */
+-		core_rc += dlinesz - 1;
+-		core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
+ 		per_cpu(ready_count, core) = core_rc;
+ 	}
+ 
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 74403953e407..2e29b1aed924 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -193,6 +193,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
+ {
+ 	struct pt_regs regs;
+ 	mm_segment_t old_fs = get_fs();
++
++	regs.cp0_status = KSU_KERNEL;
+ 	if (sp) {
+ 		regs.regs[29] = (unsigned long)sp;
+ 		regs.regs[31] = 0;
+diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
+index 2ea5ff6dc22e..c57215a66181 100644
+--- a/arch/mips/ralink/mt7620.c
++++ b/arch/mips/ralink/mt7620.c
+@@ -98,31 +98,31 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
+ };
+ 
+ static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
+-	FUNC("sdcx", 3, 19, 1),
++	FUNC("sdxc d6", 3, 19, 1),
+ 	FUNC("utif", 2, 19, 1),
+ 	FUNC("gpio", 1, 19, 1),
+-	FUNC("pwm", 0, 19, 1),
++	FUNC("pwm1", 0, 19, 1),
+ };
+ 
+ static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
+-	FUNC("sdcx", 3, 18, 1),
++	FUNC("sdxc d7", 3, 18, 1),
+ 	FUNC("utif", 2, 18, 1),
+ 	FUNC("gpio", 1, 18, 1),
+-	FUNC("pwm", 0, 18, 1),
++	FUNC("pwm0", 0, 18, 1),
+ };
+ 
+ static struct rt2880_pmx_func uart2_grp_mt7628[] = {
+-	FUNC("sdcx", 3, 20, 2),
++	FUNC("sdxc d5 d4", 3, 20, 2),
+ 	FUNC("pwm", 2, 20, 2),
+ 	FUNC("gpio", 1, 20, 2),
+-	FUNC("uart", 0, 20, 2),
++	FUNC("uart2", 0, 20, 2),
+ };
+ 
+ static struct rt2880_pmx_func uart1_grp_mt7628[] = {
+-	FUNC("sdcx", 3, 45, 2),
++	FUNC("sw_r", 3, 45, 2),
+ 	FUNC("pwm", 2, 45, 2),
+ 	FUNC("gpio", 1, 45, 2),
+-	FUNC("uart", 0, 45, 2),
++	FUNC("uart1", 0, 45, 2),
+ };
+ 
+ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
+@@ -134,21 +134,21 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
+ 
+ static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
+ static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
+-static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) };
++static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
+ static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
+ 
+ static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
+ 	FUNC("jtag", 3, 22, 8),
+ 	FUNC("utif", 2, 22, 8),
+ 	FUNC("gpio", 1, 22, 8),
+-	FUNC("sdcx", 0, 22, 8),
++	FUNC("sdxc", 0, 22, 8),
+ };
+ 
+ static struct rt2880_pmx_func uart0_grp_mt7628[] = {
+ 	FUNC("-", 3, 12, 2),
+ 	FUNC("-", 2, 12, 2),
+ 	FUNC("gpio", 1, 12, 2),
+-	FUNC("uart", 0, 12, 2),
++	FUNC("uart0", 0, 12, 2),
+ };
+ 
+ static struct rt2880_pmx_func i2s_grp_mt7628[] = {
+@@ -162,7 +162,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
+ 	FUNC("-", 3, 6, 1),
+ 	FUNC("refclk", 2, 6, 1),
+ 	FUNC("gpio", 1, 6, 1),
+-	FUNC("spi", 0, 6, 1),
++	FUNC("spi cs1", 0, 6, 1),
+ };
+ 
+ static struct rt2880_pmx_func spis_grp_mt7628[] = {
+@@ -179,28 +179,44 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = {
+ 	FUNC("gpio", 0, 11, 1),
+ };
+ 
+-#define MT7628_GPIO_MODE_MASK	0x3
+-
+-#define MT7628_GPIO_MODE_PWM1	30
+-#define MT7628_GPIO_MODE_PWM0	28
+-#define MT7628_GPIO_MODE_UART2	26
+-#define MT7628_GPIO_MODE_UART1	24
+-#define MT7628_GPIO_MODE_I2C	20
+-#define MT7628_GPIO_MODE_REFCLK	18
+-#define MT7628_GPIO_MODE_PERST	16
+-#define MT7628_GPIO_MODE_WDT	14
+-#define MT7628_GPIO_MODE_SPI	12
+-#define MT7628_GPIO_MODE_SDMODE	10
+-#define MT7628_GPIO_MODE_UART0	8
+-#define MT7628_GPIO_MODE_I2S	6
+-#define MT7628_GPIO_MODE_CS1	4
+-#define MT7628_GPIO_MODE_SPIS	2
+-#define MT7628_GPIO_MODE_GPIO	0
++static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
++	FUNC("rsvd", 3, 35, 1),
++	FUNC("rsvd", 2, 35, 1),
++	FUNC("gpio", 1, 35, 1),
++	FUNC("wled_kn", 0, 35, 1),
++};
++
++static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
++	FUNC("rsvd", 3, 44, 1),
++	FUNC("rsvd", 2, 44, 1),
++	FUNC("gpio", 1, 44, 1),
++	FUNC("wled_an", 0, 44, 1),
++};
++
++#define MT7628_GPIO_MODE_MASK		0x3
++
++#define MT7628_GPIO_MODE_WLED_KN	48
++#define MT7628_GPIO_MODE_WLED_AN	32
++#define MT7628_GPIO_MODE_PWM1		30
++#define MT7628_GPIO_MODE_PWM0		28
++#define MT7628_GPIO_MODE_UART2		26
++#define MT7628_GPIO_MODE_UART1		24
++#define MT7628_GPIO_MODE_I2C		20
++#define MT7628_GPIO_MODE_REFCLK		18
++#define MT7628_GPIO_MODE_PERST		16
++#define MT7628_GPIO_MODE_WDT		14
++#define MT7628_GPIO_MODE_SPI		12
++#define MT7628_GPIO_MODE_SDMODE		10
++#define MT7628_GPIO_MODE_UART0		8
++#define MT7628_GPIO_MODE_I2S		6
++#define MT7628_GPIO_MODE_CS1		4
++#define MT7628_GPIO_MODE_SPIS		2
++#define MT7628_GPIO_MODE_GPIO		0
+ 
+ static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
+-	GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
++	GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 				1, MT7628_GPIO_MODE_PWM1),
+-	GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
++	GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 				1, MT7628_GPIO_MODE_PWM0),
+ 	GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 				1, MT7628_GPIO_MODE_UART2),
+@@ -224,6 +240,10 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
+ 				1, MT7628_GPIO_MODE_SPIS),
+ 	GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 				1, MT7628_GPIO_MODE_GPIO),
++	GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
++				1, MT7628_GPIO_MODE_WLED_AN),
++	GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
++				1, MT7628_GPIO_MODE_WLED_KN),
+ 	{ 0 }
+ };
+ 
+diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
+index d0eae5f2bd87..4fb62add2636 100644
+--- a/arch/parisc/include/asm/dma-mapping.h
++++ b/arch/parisc/include/asm/dma-mapping.h
+@@ -39,6 +39,8 @@ struct hppa_dma_ops {
+ ** flush/purge and allocate "regular" cacheable pages for everything.
+ */
+ 
++#define DMA_ERROR_CODE	(~(dma_addr_t)0)
++
+ #ifdef CONFIG_PA11
+ extern struct hppa_dma_ops pcxl_dma_ops;
+ extern struct hppa_dma_ops pcx_dma_ops;
+@@ -209,12 +211,13 @@ parisc_walk_tree(struct device *dev)
+ 			break;
+ 		}
+ 	}
+-	BUG_ON(!dev->platform_data);
+ 	return dev->platform_data;
+ }
+-		
+-#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
+-	
++
++#define GET_IOC(dev) ({					\
++	void *__pdata = parisc_walk_tree(dev);		\
++	__pdata ? HBA_DATA(__pdata)->iommu : NULL;	\
++})
+ 
+ #ifdef CONFIG_IOMMU_CCIO
+ struct parisc_device;
+diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
+index 8eefb12d1d33..3781b8c0fad9 100644
+--- a/arch/parisc/kernel/syscall_table.S
++++ b/arch/parisc/kernel/syscall_table.S
+@@ -361,7 +361,7 @@
+ 	ENTRY_SAME(ni_syscall)	/* 263: reserved for vserver */
+ 	ENTRY_SAME(add_key)
+ 	ENTRY_SAME(request_key)		/* 265 */
+-	ENTRY_SAME(keyctl)
++	ENTRY_COMP(keyctl)
+ 	ENTRY_SAME(ioprio_set)
+ 	ENTRY_SAME(ioprio_get)
+ 	ENTRY_SAME(inotify_init)
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index 50d64a7fc672..3b7c02f9b726 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -303,7 +303,7 @@ bad_area:
+ 		case 15:	/* Data TLB miss fault/Data page fault */
+ 			/* send SIGSEGV when outside of vma */
+ 			if (!vma ||
+-			    address < vma->vm_start || address > vma->vm_end) {
++			    address < vma->vm_start || address >= vma->vm_end) {
+ 				si.si_signo = SIGSEGV;
+ 				si.si_code = SEGV_MAPERR;
+ 				break;
+diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
+index ee46ffef608e..743ad7a400d6 100644
+--- a/arch/powerpc/include/asm/elf.h
++++ b/arch/powerpc/include/asm/elf.h
+@@ -23,12 +23,13 @@
+ #define CORE_DUMP_USE_REGSET
+ #define ELF_EXEC_PAGESIZE	PAGE_SIZE
+ 
+-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+-   use of this is to invoke "./ld.so someprog" to test out a new version of
+-   the loader.  We need to make sure that it is out of the way of the program
+-   that it will "exec", and that there is sufficient room for the brk.  */
+-
+-#define ELF_ET_DYN_BASE	0x20000000
++/*
++ * This is the base location for PIE (ET_DYN with INTERP) loads. On
++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
++ * space open for things that want to use the area for 32-bit pointers.
++ */
++#define ELF_ET_DYN_BASE		(is_32bit_task() ? 0x000400000UL : \
++						   0x100000000UL)
+ 
+ #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
+ 
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index b264937bba68..9340d05bcdc9 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -306,9 +306,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
+ 	 *
+ 	 * For pHyp, we have to enable IO for log retrieval. Otherwise,
+ 	 * 0xFF's is always returned from PCI config space.
++	 *
++	 * When the @severity is EEH_LOG_PERM, the PE is going to be
++	 * removed. Prior to that, the drivers for devices included in
++	 * the PE will be closed. The drivers rely on working IO path
++	 * to bring the devices to quiet state. Otherwise, PCI traffic
++	 * from those devices after they are removed is like to cause
++	 * another unexpected EEH error.
+ 	 */
+ 	if (!(pe->type & EEH_PE_PHB)) {
+-		if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
++		if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
++		    severity == EEH_LOG_PERM)
+ 			eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
+ 
+ 		/*
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 0a4f23a070ab..ffca0bf5b8b4 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -651,7 +651,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
+  */
+ #define MAX_WAIT_FOR_RECOVERY 300
+ 
+-static void eeh_handle_normal_event(struct eeh_pe *pe)
++static bool eeh_handle_normal_event(struct eeh_pe *pe)
+ {
+ 	struct pci_bus *frozen_bus;
+ 	int rc = 0;
+@@ -661,7 +661,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
+ 	if (!frozen_bus) {
+ 		pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
+ 			__func__, pe->phb->global_number, pe->addr);
+-		return;
++		return false;
+ 	}
+ 
+ 	eeh_pe_update_time_stamp(pe);
+@@ -778,7 +778,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
+ 	pr_info("EEH: Notify device driver to resume\n");
+ 	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
+ 
+-	return;
++	return false;
+ 
+ excess_failures:
+ 	/*
+@@ -819,7 +819,11 @@ perm_error:
+ 		pci_lock_rescan_remove();
+ 		pcibios_remove_pci_devices(frozen_bus);
+ 		pci_unlock_rescan_remove();
++
++		/* The passed PE should no longer be used */
++		return true;
+ 	}
++	return false;
+ }
+ 
+ static void eeh_handle_special_event(void)
+@@ -885,7 +889,14 @@ static void eeh_handle_special_event(void)
+ 		 */
+ 		if (rc == EEH_NEXT_ERR_FROZEN_PE ||
+ 		    rc == EEH_NEXT_ERR_FENCED_PHB) {
+-			eeh_handle_normal_event(pe);
++			/*
++			 * eeh_handle_normal_event() can make the PE stale if it
++			 * determines that the PE cannot possibly be recovered.
++			 * Don't modify the PE state if that's the case.
++			 */
++			if (eeh_handle_normal_event(pe))
++				continue;
++
+ 			eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
+ 		} else {
+ 			pci_lock_rescan_remove();
+diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
+index 7c053f281406..1138fec3dd65 100644
+--- a/arch/powerpc/kernel/kprobes.c
++++ b/arch/powerpc/kernel/kprobes.c
+@@ -514,6 +514,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+ #endif
+ #endif
+ 
++	/*
++	 * jprobes use jprobe_return() which skips the normal return
++	 * path of the function, and this messes up the accounting of the
++	 * function graph tracer.
++	 *
++	 * Pause function graph tracing while performing the jprobe function.
++	 */
++	pause_graph_tracing();
++
+ 	return 1;
+ }
+ 
+@@ -536,6 +545,8 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+ 	 * saved regs...
+ 	 */
+ 	memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
++	/* It's OK to start function graph tracing again */
++	unpause_graph_tracing();
+ 	preempt_enable_no_resched();
+ 	return 1;
+ }
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 63c37fd2b7a6..c1e10ffadd17 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2238,6 +2238,27 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ 		return -EINVAL;
+ 	}
+ 
++	/*
++	 * Don't allow entry with a suspended transaction, because
++	 * the guest entry/exit code will lose it.
++	 * If the guest has TM enabled, save away their TM-related SPRs
++	 * (they will get restored by the TM unavailable interrupt).
++	 */
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
++	    (current->thread.regs->msr & MSR_TM)) {
++		if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
++			run->exit_reason = KVM_EXIT_FAIL_ENTRY;
++			run->fail_entry.hardware_entry_failure_reason = 0;
++			return -EINVAL;
++		}
++		current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
++		current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
++		current->thread.tm_texasr = mfspr(SPRN_TEXASR);
++		current->thread.regs->msr &= ~MSR_TM;
++	}
++#endif
++
+ 	kvmppc_core_prepare_to_enter(vcpu);
+ 
+ 	/* No need to go into the guest when all we'll do is come back out */
+diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
+index d7697ab802f6..8e136b88cdf4 100644
+--- a/arch/s390/include/asm/ctl_reg.h
++++ b/arch/s390/include/asm/ctl_reg.h
+@@ -15,7 +15,9 @@
+ 	BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
+ 	asm volatile(							\
+ 		"	lctlg	%1,%2,%0\n"				\
+-		: : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
++		:							\
++		: "Q" (*(addrtype *)(&array)), "i" (low), "i" (high)	\
++		: "memory");						\
+ }
+ 
+ #define __ctl_store(array, low, high) {					\
+diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
+index 3ad48f22de78..f133ce08b270 100644
+--- a/arch/s390/include/asm/elf.h
++++ b/arch/s390/include/asm/elf.h
+@@ -154,14 +154,13 @@ extern unsigned int vdso_enabled;
+ #define CORE_DUMP_USE_REGSET
+ #define ELF_EXEC_PAGESIZE	4096
+ 
+-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+-   use of this is to invoke "./ld.so someprog" to test out a new version of
+-   the loader.  We need to make sure that it is out of the way of the program
+-   that it will "exec", and that there is sufficient room for the brk. 64-bit
+-   tasks are aligned to 4GB. */
+-#define ELF_ET_DYN_BASE (is_32bit_task() ? \
+-				(STACK_TOP / 3 * 2) : \
+-				(STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
++/*
++ * This is the base location for PIE (ET_DYN with INTERP) loads. On
++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
++ * space open for things that want to use the area for 32-bit pointers.
++ */
++#define ELF_ET_DYN_BASE		(is_compat_task() ? 0x000400000UL : \
++						    0x100000000UL)
+ 
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports. */
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index ef7d6c8fea66..f354fd84adeb 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -372,7 +372,7 @@ void __init vmem_map_init(void)
+ 	ro_end = (unsigned long)&_eshared & PAGE_MASK;
+ 	for_each_memblock(memory, reg) {
+ 		start = reg->base;
+-		end = reg->base + reg->size - 1;
++		end = reg->base + reg->size;
+ 		if (start >= ro_end || end <= ro_start)
+ 			vmem_add_mem(start, end - start, 0);
+ 		else if (start >= ro_start && end <= ro_end)
+diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
+index 3b5b7a9c866d..2903ff34174c 100644
+--- a/arch/x86/include/asm/elf.h
++++ b/arch/x86/include/asm/elf.h
+@@ -245,12 +245,13 @@ extern int force_personality32;
+ #define CORE_DUMP_USE_REGSET
+ #define ELF_EXEC_PAGESIZE	4096
+ 
+-/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+-   use of this is to invoke "./ld.so someprog" to test out a new version of
+-   the loader.  We need to make sure that it is out of the way of the program
+-   that it will "exec", and that there is sufficient room for the brk.  */
+-
+-#define ELF_ET_DYN_BASE		(TASK_SIZE / 3 * 2)
++/*
++ * This is the base location for PIE (ET_DYN with INTERP) loads. On
++ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
++ * space open for things that want to use the area for 32-bit pointers.
++ */
++#define ELF_ET_DYN_BASE		(mmap_is_ia32() ? 0x000400000UL : \
++						  0x100000000UL)
+ 
+ /* This yields a mask that user programs can use to figure out what
+    instruction set this CPU supports.  This could be done in user space,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 99a15e38fa06..32e29f926e5a 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2118,7 +2118,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
+ 	if (!(vmcs12->exception_bitmap & (1u << nr)))
+ 		return 0;
+ 
+-	nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
++	nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
+ 			  vmcs_read32(VM_EXIT_INTR_INFO),
+ 			  vmcs_readl(EXIT_QUALIFICATION));
+ 	return 1;
+@@ -6153,7 +6153,6 @@ static __init int hardware_setup(void)
+ 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
+ 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
+ 	vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+-	vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
+ 
+ 	memcpy(vmx_msr_bitmap_legacy_x2apic,
+ 			vmx_msr_bitmap_legacy, PAGE_SIZE);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index c730e4708c7d..9d7ea42482e3 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4910,6 +4910,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
+ 
+ 	if (var.unusable) {
+ 		memset(desc, 0, sizeof(*desc));
++		if (base3)
++			*base3 = 0;
+ 		return false;
+ 	}
+ 
+@@ -6049,7 +6051,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
+ 
+ 	kvm_x86_ops->patch_hypercall(vcpu, instruction);
+ 
+-	return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
++	return emulator_write_emulated(ctxt, rip, instruction, 3,
++		&ctxt->exception);
+ }
+ 
+ /*
+diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
+index fa997dfaef24..2f1c52e252b0 100644
+--- a/arch/x86/lib/copy_user_64.S
++++ b/arch/x86/lib/copy_user_64.S
+@@ -112,7 +112,7 @@ ENTRY(copy_user_generic_unrolled)
+ 	movl %edx,%ecx
+ 	andl $63,%edx
+ 	shrl $6,%ecx
+-	jz 17f
++	jz .L_copy_short_string
+ 1:	movq (%rsi),%r8
+ 2:	movq 1*8(%rsi),%r9
+ 3:	movq 2*8(%rsi),%r10
+@@ -133,7 +133,8 @@ ENTRY(copy_user_generic_unrolled)
+ 	leaq 64(%rdi),%rdi
+ 	decl %ecx
+ 	jnz 1b
+-17:	movl %edx,%ecx
++.L_copy_short_string:
++	movl %edx,%ecx
+ 	andl $7,%edx
+ 	shrl $3,%ecx
+ 	jz 20f
+@@ -251,6 +252,8 @@ ENDPROC(copy_user_generic_string)
+ ENTRY(copy_user_enhanced_fast_string)
+ 	CFI_STARTPROC
+ 	ASM_STAC
++	cmpl $64,%edx
++	jb .L_copy_short_string	/* less then 64 bytes, avoid the costly 'rep' */
+ 	movl %edx,%ecx
+ 1:	rep
+ 	movsb
+diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
+index 6a3c774eaff6..c2fea3af515d 100644
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -312,7 +312,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
+ 	 * We were not able to extract an address from the instruction,
+ 	 * probably because there was something invalid in it.
+ 	 */
+-	if (info->si_addr == (void *)-1) {
++	if (info->si_addr == (void __user *)-1) {
+ 		err = -EINVAL;
+ 		goto err_out;
+ 	}
+diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
+index 0c2fae8d929d..73eb7fd4aec4 100644
+--- a/arch/x86/tools/relocs.c
++++ b/arch/x86/tools/relocs.c
+@@ -992,11 +992,12 @@ static void emit_relocs(int as_text, int use_real_mode)
+ 		die("Segment relocations found but --realmode not specified\n");
+ 
+ 	/* Order the relocations for more efficient processing */
+-	sort_relocs(&relocs16);
+ 	sort_relocs(&relocs32);
+ #if ELF_BITS == 64
+ 	sort_relocs(&relocs32neg);
+ 	sort_relocs(&relocs64);
++#else
++	sort_relocs(&relocs16);
+ #endif
+ 
+ 	/* Print the relocations */
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 21d13038534e..ed29f61d1338 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -1981,7 +1981,11 @@ void device_shutdown(void)
+ 		pm_runtime_get_noresume(dev);
+ 		pm_runtime_barrier(dev);
+ 
+-		if (dev->bus && dev->bus->shutdown) {
++		if (dev->class && dev->class->shutdown) {
++			if (initcall_debug)
++				dev_info(dev, "shutdown\n");
++			dev->class->shutdown(dev);
++		} else if (dev->bus && dev->bus->shutdown) {
+ 			if (initcall_debug)
+ 				dev_info(dev, "shutdown\n");
+ 			dev->bus->shutdown(dev);
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 7403de94832c..29a4ef08e051 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -729,7 +729,7 @@ static ssize_t driver_override_store(struct device *dev,
+ 				     const char *buf, size_t count)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
+-	char *driver_override, *old = pdev->driver_override, *cp;
++	char *driver_override, *old, *cp;
+ 
+ 	if (count > PATH_MAX)
+ 		return -EINVAL;
+@@ -742,12 +742,15 @@ static ssize_t driver_override_store(struct device *dev,
+ 	if (cp)
+ 		*cp = '\0';
+ 
++	device_lock(dev);
++	old = pdev->driver_override;
+ 	if (strlen(driver_override)) {
+ 		pdev->driver_override = driver_override;
+ 	} else {
+ 		kfree(driver_override);
+ 		pdev->driver_override = NULL;
+ 	}
++	device_unlock(dev);
+ 
+ 	kfree(old);
+ 
+@@ -758,8 +761,12 @@ static ssize_t driver_override_show(struct device *dev,
+ 				    struct device_attribute *attr, char *buf)
+ {
+ 	struct platform_device *pdev = to_platform_device(dev);
++	ssize_t len;
+ 
+-	return sprintf(buf, "%s\n", pdev->driver_override);
++	device_lock(dev);
++	len = sprintf(buf, "%s\n", pdev->driver_override);
++	device_unlock(dev);
++	return len;
+ }
+ static DEVICE_ATTR_RW(driver_override);
+ 
+diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
+index d2be3f9c211c..dcc09e3e5778 100644
+--- a/drivers/base/power/sysfs.c
++++ b/drivers/base/power/sysfs.c
+@@ -268,6 +268,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev,
+ 			value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
+ 		else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
+ 			value = PM_QOS_LATENCY_ANY;
++		else
++			return -EINVAL;
+ 	}
+ 	ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
+ 	return ret < 0 ? ret : n;
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 9cd6968e2f92..d55156fc064d 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1714,13 +1714,15 @@ int random_int_secret_init(void)
+ 	return 0;
+ }
+ 
++static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
++		__aligned(sizeof(unsigned long));
++
+ /*
+  * Get a random word for internal kernel use only. Similar to urandom but
+  * with the goal of minimal entropy pool depletion. As a result, the random
+  * value is not cryptographically secure but for several uses the cost of
+  * depleting entropy is too high
+  */
+-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
+ unsigned int get_random_int(void)
+ {
+ 	__u32 *hash;
+@@ -1741,6 +1743,28 @@ unsigned int get_random_int(void)
+ EXPORT_SYMBOL(get_random_int);
+ 
+ /*
++ * Same as get_random_int(), but returns unsigned long.
++ */
++unsigned long get_random_long(void)
++{
++	__u32 *hash;
++	unsigned long ret;
++
++	if (arch_get_random_long(&ret))
++		return ret;
++
++	hash = get_cpu_var(get_random_int_hash);
++
++	hash[0] += current->pid + jiffies + random_get_entropy();
++	md5_transform(hash, random_int_secret);
++	ret = *(unsigned long *)hash;
++	put_cpu_var(get_random_int_hash);
++
++	return ret;
++}
++EXPORT_SYMBOL(get_random_long);
++
++/*
+  * randomize_range() returns a start address such that
+  *
+  *    [...... <range> .....]
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 810b171b55b7..374b0006aa7a 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1864,7 +1864,7 @@ static void config_work_handler(struct work_struct *work)
+ {
+ 	struct ports_device *portdev;
+ 
+-	portdev = container_of(work, struct ports_device, control_work);
++	portdev = container_of(work, struct ports_device, config_work);
+ 	if (!use_multiport(portdev)) {
+ 		struct virtio_device *vdev;
+ 		struct port *port;
+diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
+index 25a70d06c5bf..55836a538a68 100644
+--- a/drivers/cpufreq/cpufreq_conservative.c
++++ b/drivers/cpufreq/cpufreq_conservative.c
+@@ -204,8 +204,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
+ 	int ret;
+ 	ret = sscanf(buf, "%u", &input);
+ 
+-	/* cannot be lower than 11 otherwise freq will not fall */
+-	if (ret != 1 || input < 11 || input > 100 ||
++	/* cannot be lower than 1 otherwise freq will not fall */
++	if (ret != 1 || input < 1 || input > 100 ||
+ 			input >= cs_tuners->up_threshold)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
+index d6d425773fa4..5b2db3c6568f 100644
+--- a/drivers/cpufreq/s3c2416-cpufreq.c
++++ b/drivers/cpufreq/s3c2416-cpufreq.c
+@@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+ 	rate = clk_get_rate(s3c_freq->hclk);
+ 	if (rate < 133 * 1000 * 1000) {
+ 		pr_err("cpufreq: HCLK not at 133MHz\n");
+-		clk_put(s3c_freq->hclk);
+ 		ret = -EINVAL;
+ 		goto err_armclk;
+ 	}
+diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
+index 3178f84d2757..6bff78c5c032 100644
+--- a/drivers/crypto/atmel-sha.c
++++ b/drivers/crypto/atmel-sha.c
+@@ -963,7 +963,9 @@ static int atmel_sha_finup(struct ahash_request *req)
+ 	ctx->flags |= SHA_FLAGS_FINUP;
+ 
+ 	err1 = atmel_sha_update(req);
+-	if (err1 == -EINPROGRESS || err1 == -EBUSY)
++	if (err1 == -EINPROGRESS ||
++	    (err1 == -EBUSY && (ahash_request_flags(req) &
++				CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ 		return err1;
+ 
+ 	/*
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index 9742b3d66288..fc6d2d568541 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -490,7 +490,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+ 	if (!ret) {
+ 		/* in progress */
+-		wait_for_completion_interruptible(&result.completion);
++		wait_for_completion(&result.completion);
+ 		ret = result.err;
+ #ifdef DEBUG
+ 		print_hex_dump(KERN_ERR,
+diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
+index e1eaf4ff9762..3ce1d5cdcbd2 100644
+--- a/drivers/crypto/caam/key_gen.c
++++ b/drivers/crypto/caam/key_gen.c
+@@ -103,7 +103,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
+ 	ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
+ 	if (!ret) {
+ 		/* in progress */
+-		wait_for_completion_interruptible(&result.completion);
++		wait_for_completion(&result.completion);
+ 		ret = result.err;
+ #ifdef DEBUG
+ 		print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index f062158d4dc9..eb79d49ab88c 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -634,7 +634,7 @@ static void talitos_unregister_rng(struct device *dev)
+  * crypto alg
+  */
+ #define TALITOS_CRA_PRIORITY		3000
+-#define TALITOS_MAX_KEY_SIZE		96
++#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
+ #define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+ 
+ struct talitos_ctx {
+@@ -1322,6 +1322,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
+ {
+ 	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ 
++	if (keylen > TALITOS_MAX_KEY_SIZE) {
++		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
++		return -EINVAL;
++	}
++
+ 	memcpy(&ctx->key, key, keylen);
+ 	ctx->keylen = keylen;
+ 
+diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
+index 1638d39af595..023c08708b56 100644
+--- a/drivers/dma/ep93xx_dma.c
++++ b/drivers/dma/ep93xx_dma.c
+@@ -201,7 +201,6 @@ struct ep93xx_dma_engine {
+ 	struct dma_device	dma_dev;
+ 	bool			m2m;
+ 	int			(*hw_setup)(struct ep93xx_dma_chan *);
+-	void			(*hw_synchronize)(struct ep93xx_dma_chan *);
+ 	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
+ 	void			(*hw_submit)(struct ep93xx_dma_chan *);
+ 	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
+@@ -336,27 +335,21 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
+ 	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
+ }
+ 
+-static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
++static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+ {
+-	unsigned long flags;
+ 	u32 control;
+ 
+-	spin_lock_irqsave(&edmac->lock, flags);
+ 	control = readl(edmac->regs + M2P_CONTROL);
+ 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ 	m2p_set_control(edmac, control);
+-	spin_unlock_irqrestore(&edmac->lock, flags);
+ 
+ 	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
+-		schedule();
+-}
++		cpu_relax();
+ 
+-static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+-{
+ 	m2p_set_control(edmac, 0);
+ 
+-	while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
+-		dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
++	while (m2p_channel_state(edmac) == M2P_STATE_STALL)
++		cpu_relax();
+ }
+ 
+ static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
+@@ -1172,26 +1165,6 @@ fail:
+ }
+ 
+ /**
+- * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
+- * current context.
+- * @chan: channel
+- *
+- * Synchronizes the DMA channel termination to the current context. When this
+- * function returns it is guaranteed that all transfers for previously issued
+- * descriptors have stopped and and it is safe to free the memory associated
+- * with them. Furthermore it is guaranteed that all complete callback functions
+- * for a previously submitted descriptor have finished running and it is safe to
+- * free resources accessed from within the complete callbacks.
+- */
+-static void ep93xx_dma_synchronize(struct dma_chan *chan)
+-{
+-	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+-
+-	if (edmac->edma->hw_synchronize)
+-		edmac->edma->hw_synchronize(edmac);
+-}
+-
+-/**
+  * ep93xx_dma_terminate_all - terminate all transactions
+  * @chan: channel
+  *
+@@ -1354,7 +1327,6 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
+ 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
+ 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
+ 	dma_dev->device_config = ep93xx_dma_slave_config;
+-	dma_dev->device_synchronize = ep93xx_dma_synchronize;
+ 	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
+ 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
+ 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
+@@ -1372,7 +1344,6 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
+ 	} else {
+ 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+ 
+-		edma->hw_synchronize = m2p_hw_synchronize;
+ 		edma->hw_setup = m2p_hw_setup;
+ 		edma->hw_shutdown = m2p_hw_shutdown;
+ 		edma->hw_submit = m2p_hw_submit;
+diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
+index d33ea7ff8614..20e26b3a5a3d 100644
+--- a/drivers/gpu/drm/ast/ast_drv.h
++++ b/drivers/gpu/drm/ast/ast_drv.h
+@@ -113,7 +113,11 @@ struct ast_private {
+ 	struct ttm_bo_kmap_obj cache_kmap;
+ 	int next_cursor;
+ 	bool support_wide_screen;
+-	bool DisableP2A;
++	enum {
++		ast_use_p2a,
++		ast_use_dt,
++		ast_use_defaults
++	} config_mode;
+ 
+ 	enum ast_tx_chip tx_chip_type;
+ 	u8 dp501_maxclk;
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index dacfe512a93f..fd9a738ff882 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+ 	return ret;
+ }
+ 
++static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
++{
++	struct device_node *np = dev->pdev->dev.of_node;
++	struct ast_private *ast = dev->dev_private;
++	uint32_t data, jregd0, jregd1;
++
++	/* Defaults */
++	ast->config_mode = ast_use_defaults;
++	*scu_rev = 0xffffffff;
++
++	/* Check if we have device-tree properties */
++	if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
++					scu_rev)) {
++		/* We do, disable P2A access */
++		ast->config_mode = ast_use_dt;
++		DRM_INFO("Using device-tree for configuration\n");
++		return;
++	}
++
++	/* Not all families have a P2A bridge */
++	if (dev->pdev->device != PCI_CHIP_AST2000)
++		return;
++
++	/*
++	 * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
++	 * is disabled. We force using P2A if VGA only mode bit
++	 * is set D[7]
++	 */
++	jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
++	jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
++	if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
++		/* Double check it's actually working */
++		data = ast_read32(ast, 0xf004);
++		if (data != 0xFFFFFFFF) {
++			/* P2A works, grab silicon revision */
++			ast->config_mode = ast_use_p2a;
++
++			DRM_INFO("Using P2A bridge for configuration\n");
++
++			/* Read SCU7c (silicon revision register) */
++			ast_write32(ast, 0xf004, 0x1e6e0000);
++			ast_write32(ast, 0xf000, 0x1);
++			*scu_rev = ast_read32(ast, 0x1207c);
++			return;
++		}
++	}
++
++	/* We have a P2A bridge but it's disabled */
++	DRM_INFO("P2A bridge disabled, using default configuration\n");
++}
+ 
+ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
+ {
+ 	struct ast_private *ast = dev->dev_private;
+-	uint32_t data, jreg;
++	uint32_t jreg, scu_rev;
++
++	/*
++	 * If VGA isn't enabled, we need to enable now or subsequent
++	 * access to the scratch registers will fail. We also inform
++	 * our caller that it needs to POST the chip
++	 * (Assumption: VGA not enabled -> need to POST)
++	 */
++	if (!ast_is_vga_enabled(dev)) {
++		ast_enable_vga(dev);
++		DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
++		*need_post = true;
++	} else
++		*need_post = false;
++
++
++	/* Enable extended register access */
++	ast_enable_mmio(dev);
+ 	ast_open_key(ast);
+ 
++	/* Find out whether P2A works or whether to use device-tree */
++	ast_detect_config_mode(dev, &scu_rev);
++
++	/* Identify chipset */
+ 	if (dev->pdev->device == PCI_CHIP_AST1180) {
+ 		ast->chip = AST1100;
+ 		DRM_INFO("AST 1180 detected\n");
+@@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
+ 			ast->chip = AST2300;
+ 			DRM_INFO("AST 2300 detected\n");
+ 		} else if (dev->pdev->revision >= 0x10) {
+-			uint32_t data;
+-			ast_write32(ast, 0xf004, 0x1e6e0000);
+-			ast_write32(ast, 0xf000, 0x1);
+-
+-			data = ast_read32(ast, 0x1207c);
+-			switch (data & 0x0300) {
++			switch (scu_rev & 0x0300) {
+ 			case 0x0200:
+ 				ast->chip = AST1100;
+ 				DRM_INFO("AST 1100 detected\n");
+@@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
+ 		}
+ 	}
+ 
+-	/*
+-	 * If VGA isn't enabled, we need to enable now or subsequent
+-	 * access to the scratch registers will fail. We also inform
+-	 * our caller that it needs to POST the chip
+-	 * (Assumption: VGA not enabled -> need to POST)
+-	 */
+-	if (!ast_is_vga_enabled(dev)) {
+-		ast_enable_vga(dev);
+-		ast_enable_mmio(dev);
+-		DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
+-		*need_post = true;
+-	} else
+-		*need_post = false;
+-
+-	/* Check P2A Access */
+-	ast->DisableP2A = true;
+-	data = ast_read32(ast, 0xf004);
+-	if (data != 0xFFFFFFFF)
+-		ast->DisableP2A = false;
+-
+ 	/* Check if we support wide screen */
+ 	switch (ast->chip) {
+ 	case AST1180:
+@@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
+ 			ast->support_wide_screen = true;
+ 		else {
+ 			ast->support_wide_screen = false;
+-			if (ast->DisableP2A == false) {
+-				/* Read SCU7c (silicon revision register) */
+-				ast_write32(ast, 0xf004, 0x1e6e0000);
+-				ast_write32(ast, 0xf000, 0x1);
+-				data = ast_read32(ast, 0x1207c);
+-				data &= 0x300;
+-				if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+-					ast->support_wide_screen = true;
+-				if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+-					ast->support_wide_screen = true;
+-			}
++			if (ast->chip == AST2300 &&
++			    (scu_rev & 0x300) == 0x0) /* ast1300 */
++				ast->support_wide_screen = true;
++			if (ast->chip == AST2400 &&
++			    (scu_rev & 0x300) == 0x100) /* ast1400 */
++				ast->support_wide_screen = true;
+ 		}
+ 		break;
+ 	}
+@@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
+ 
+ static int ast_get_dram_info(struct drm_device *dev)
+ {
++	struct device_node *np = dev->pdev->dev.of_node;
+ 	struct ast_private *ast = dev->dev_private;
+-	uint32_t data, data2;
+-	uint32_t denum, num, div, ref_pll;
++	uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
++	uint32_t denum, num, div, ref_pll, dsel;
+ 
+-	if (ast->DisableP2A)
+-	{
++	switch (ast->config_mode) {
++	case ast_use_dt:
++		/*
++		 * If some properties are missing, use reasonable
++		 * defaults for AST2400
++		 */
++		if (of_property_read_u32(np, "aspeed,mcr-configuration",
++					 &mcr_cfg))
++			mcr_cfg = 0x00000577;
++		if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
++					 &mcr_scu_mpll))
++			mcr_scu_mpll = 0x000050C0;
++		if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
++					 &mcr_scu_strap))
++			mcr_scu_strap = 0;
++		break;
++	case ast_use_p2a:
++		ast_write32(ast, 0xf004, 0x1e6e0000);
++		ast_write32(ast, 0xf000, 0x1);
++		mcr_cfg = ast_read32(ast, 0x10004);
++		mcr_scu_mpll = ast_read32(ast, 0x10120);
++		mcr_scu_strap = ast_read32(ast, 0x10170);
++		break;
++	case ast_use_defaults:
++	default:
+ 		ast->dram_bus_width = 16;
+ 		ast->dram_type = AST_DRAM_1Gx16;
+ 		ast->mclk = 396;
++		return 0;
+ 	}
+-	else
+-	{
+-		ast_write32(ast, 0xf004, 0x1e6e0000);
+-		ast_write32(ast, 0xf000, 0x1);
+-		data = ast_read32(ast, 0x10004);
+-
+-		if (data & 0x40)
+-			ast->dram_bus_width = 16;
+-		else
+-			ast->dram_bus_width = 32;
+ 
+-		if (ast->chip == AST2300 || ast->chip == AST2400) {
+-			switch (data & 0x03) {
+-			case 0:
+-				ast->dram_type = AST_DRAM_512Mx16;
+-				break;
+-			default:
+-			case 1:
+-				ast->dram_type = AST_DRAM_1Gx16;
+-				break;
+-			case 2:
+-				ast->dram_type = AST_DRAM_2Gx16;
+-				break;
+-			case 3:
+-				ast->dram_type = AST_DRAM_4Gx16;
+-				break;
+-			}
+-		} else {
+-			switch (data & 0x0c) {
+-			case 0:
+-			case 4:
+-				ast->dram_type = AST_DRAM_512Mx16;
+-				break;
+-			case 8:
+-				if (data & 0x40)
+-					ast->dram_type = AST_DRAM_1Gx16;
+-				else
+-					ast->dram_type = AST_DRAM_512Mx32;
+-				break;
+-			case 0xc:
+-				ast->dram_type = AST_DRAM_1Gx32;
+-				break;
+-			}
+-		}
++	if (mcr_cfg & 0x40)
++		ast->dram_bus_width = 16;
++	else
++		ast->dram_bus_width = 32;
+ 
+-		data = ast_read32(ast, 0x10120);
+-		data2 = ast_read32(ast, 0x10170);
+-		if (data2 & 0x2000)
+-			ref_pll = 14318;
+-		else
+-			ref_pll = 12000;
+-
+-		denum = data & 0x1f;
+-		num = (data & 0x3fe0) >> 5;
+-		data = (data & 0xc000) >> 14;
+-		switch (data) {
+-		case 3:
+-			div = 0x4;
++	if (ast->chip == AST2300 || ast->chip == AST2400) {
++		switch (mcr_cfg & 0x03) {
++		case 0:
++			ast->dram_type = AST_DRAM_512Mx16;
+ 			break;
+-		case 2:
++		default:
+ 		case 1:
+-			div = 0x2;
++			ast->dram_type = AST_DRAM_1Gx16;
+ 			break;
+-		default:
+-			div = 0x1;
++		case 2:
++			ast->dram_type = AST_DRAM_2Gx16;
++			break;
++		case 3:
++			ast->dram_type = AST_DRAM_4Gx16;
++			break;
++		}
++	} else {
++		switch (mcr_cfg & 0x0c) {
++		case 0:
++		case 4:
++			ast->dram_type = AST_DRAM_512Mx16;
++			break;
++		case 8:
++			if (mcr_cfg & 0x40)
++				ast->dram_type = AST_DRAM_1Gx16;
++			else
++				ast->dram_type = AST_DRAM_512Mx32;
++			break;
++		case 0xc:
++			ast->dram_type = AST_DRAM_1Gx32;
+ 			break;
+ 		}
+-		ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+ 	}
++
++	if (mcr_scu_strap & 0x2000)
++		ref_pll = 14318;
++	else
++		ref_pll = 12000;
++
++	denum = mcr_scu_mpll & 0x1f;
++	num = (mcr_scu_mpll & 0x3fe0) >> 5;
++	dsel = (mcr_scu_mpll & 0xc000) >> 14;
++	switch (dsel) {
++	case 3:
++		div = 0x4;
++		break;
++	case 2:
++	case 1:
++		div = 0x2;
++		break;
++	default:
++		div = 0x1;
++		break;
++	}
++	ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
+index 270e8fb2803f..c7c58becb25d 100644
+--- a/drivers/gpu/drm/ast/ast_post.c
++++ b/drivers/gpu/drm/ast/ast_post.c
+@@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev)
+ 	ast_enable_mmio(dev);
+ 	ast_set_def_ext_reg(dev);
+ 
+-	if (ast->DisableP2A == false)
+-	{
++	if (ast->config_mode == ast_use_p2a) {
+ 		if (ast->chip == AST2300 || ast->chip == AST2400)
+ 			ast_init_dram_2300(dev);
+ 		else
+ 			ast_init_dram_reg(dev);
+ 
+ 		ast_init_3rdtx(dev);
+-	}
+-	else
+-	{
++	} else {
+ 		if (ast->tx_chip_type != AST_TX_NONE)
+ 			ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);	/* Enable DVO */
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index a9b01bcf7d0a..fcecaf5b5526 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -3394,6 +3394,13 @@ void radeon_combios_asic_init(struct drm_device *dev)
+ 	    rdev->pdev->subsystem_vendor == 0x103c &&
+ 	    rdev->pdev->subsystem_device == 0x280a)
+ 		return;
++	/* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
++	 * - it hangs on resume inside the dynclk 1 table.
++	 */
++	if (rdev->family == CHIP_RS400 &&
++	    rdev->pdev->subsystem_vendor == 0x1179 &&
++	    rdev->pdev->subsystem_device == 0xff31)
++	        return;
+ 
+ 	/* DYN CLK 1 */
+ 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 83b3eb2e444a..3c74c60fb8ea 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -127,6 +127,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
+ 	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
+ 	 */
+ 	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
++	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
++	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
++	 */
++	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+ 	/* macbook pro 8.2 */
+ 	{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
+ 	{ 0, 0, 0, 0, 0 },
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+index 21e9b7f8dad0..c3b8ebac18c2 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+@@ -317,6 +317,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
+ 	list_for_each_entry_safe(entry, next, &man->list, head)
+ 		vmw_cmdbuf_res_free(man, entry);
+ 
++	drm_ht_remove(&man->resources);
+ 	kfree(man);
+ }
+ 
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index bf039dbaa7eb..07a963039b60 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1228,6 +1228,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
+ 		/* Ignore report if ErrorRollOver */
+ 		if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
+ 		    value[n] >= min && value[n] <= max &&
++		    value[n] - min < field->maxusage &&
+ 		    field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
+ 			goto exit;
+ 	}
+@@ -1240,11 +1241,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
+ 		}
+ 
+ 		if (field->value[n] >= min && field->value[n] <= max
++			&& field->value[n] - min < field->maxusage
+ 			&& field->usage[field->value[n] - min].hid
+ 			&& search(value, field->value[n], count))
+ 				hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
+ 
+ 		if (value[n] >= min && value[n] <= max
++			&& value[n] - min < field->maxusage
+ 			&& field->usage[value[n] - min].hid
+ 			&& search(field->value, value[n], count))
+ 				hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
+diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
+index c4c9d9523694..1ec738292a1a 100644
+--- a/drivers/hid/i2c-hid/i2c-hid.c
++++ b/drivers/hid/i2c-hid/i2c-hid.c
+@@ -362,6 +362,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
+ 	if (ret)
+ 		return ret;
+ 
++	/*
++	 * The HID over I2C specification states that if a DEVICE needs time
++	 * after the PWR_ON request, it should utilise CLOCK stretching.
++	 * However, it has been observered that the Windows driver provides a
++	 * 1ms sleep between the PWR_ON and RESET requests and that some devices
++	 * rely on this.
++	 */
++	usleep_range(1000, 5000);
++
+ 	i2c_hid_dbg(ihid, "resetting...\n");
+ 
+ 	ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 785570272505..1f40cdc1b357 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -698,6 +698,13 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
+ 		},
+ 	},
++	{
++		/* Fujitsu UH554 laptop */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
+index 45087c3e5c57..d29c499375cb 100644
+--- a/drivers/iommu/amd_iommu_v2.c
++++ b/drivers/iommu/amd_iommu_v2.c
+@@ -675,9 +675,9 @@ out_clear_state:
+ 
+ out_unregister:
+ 	mmu_notifier_unregister(&pasid_state->mn, mm);
++	mmput(mm);
+ 
+ out_free:
+-	mmput(mm);
+ 	free_pasid_state(pasid_state);
+ 
+ out:
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index b85a8614c128..275f59071f56 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -965,7 +965,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
+ 		if (!dma_pte_present(pte) || dma_pte_superpage(pte))
+ 			goto next;
+ 
+-		level_pfn = pfn & level_mask(level - 1);
++		level_pfn = pfn & level_mask(level);
+ 		level_pte = phys_to_virt(dma_pte_addr(pte));
+ 
+ 		if (level > 2)
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index f1b15a0b3774..9976c37b9c64 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -612,6 +612,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ 	int enabled;
+ 	u64 val;
+ 
++	if (cpu >= nr_cpu_ids)
++		return -EINVAL;
++
+ 	if (gic_irq_in_rdist(d))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 7453c3ed4b8f..1fdcd5735418 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1852,7 +1852,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
+ 	}
+ 	sb = page_address(rdev->sb_page);
+ 	sb->data_size = cpu_to_le64(num_sectors);
+-	sb->super_offset = rdev->sb_start;
++	sb->super_offset = cpu_to_le64(rdev->sb_start);
+ 	sb->sb_csum = calc_sb_1_csum(sb);
+ 	md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
+ 		       rdev->sb_page);
+diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
+index f4da674e7f26..ae651007ee0f 100644
+--- a/drivers/media/pci/saa7134/saa7134-i2c.c
++++ b/drivers/media/pci/saa7134/saa7134-i2c.c
+@@ -350,12 +350,43 @@ static struct i2c_client saa7134_client_template = {
+ 
+ /* ----------------------------------------------------------- */
+ 
++/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
++static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
++{
++	u8 subaddr = 0x7, dmdregval;
++	u8 data[2];
++	int ret;
++	struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
++					   .buf = &subaddr, .len = 1},
++					  {.addr = 0x08,
++					   .flags = I2C_M_RD,
++					   .buf = &dmdregval, .len = 1}
++					};
++	struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
++					   .buf = data, .len = 2} };
++
++	ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
++	if ((ret == 2) && (dmdregval & 0x2)) {
++		pr_debug("%s: DVB-T demod i2c gate was left closed\n",
++			 dev->name);
++
++		data[0] = subaddr;
++		data[1] = (dmdregval & ~0x2);
++		if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
++			pr_err("%s: EEPROM i2c gate open failure\n",
++			  dev->name);
++	}
++}
++
+ static int
+ saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
+ {
+ 	unsigned char buf;
+ 	int i,err;
+ 
++	if (dev->board == SAA7134_BOARD_MD7134)
++		saa7134_i2c_eeprom_md7134_gate(dev);
++
+ 	dev->i2c_client.addr = 0xa0 >> 1;
+ 	buf = 0;
+ 	if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {
+diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
+index 5abab8800891..9190057535e6 100644
+--- a/drivers/mtd/bcm47xxpart.c
++++ b/drivers/mtd/bcm47xxpart.c
+@@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
+ {
+ 	uint32_t buf;
+ 	size_t bytes_read;
++	int err;
+ 
+-	if (mtd_read(master, offset, sizeof(buf), &bytes_read,
+-		     (uint8_t *)&buf) < 0) {
+-		pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+-			offset);
++	err  = mtd_read(master, offset, sizeof(buf), &bytes_read,
++			(uint8_t *)&buf);
++	if (err && !mtd_is_bitflip(err)) {
++		pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
++			offset, err);
+ 		goto out_default;
+ 	}
+ 
+@@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
+ 	int trx_part = -1;
+ 	int last_trx_part = -1;
+ 	int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
++	int err;
+ 
+ 	/*
+ 	 * Some really old flashes (like AT45DB*) had smaller erasesize-s, but
+@@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
+ 	/* Parse block by block looking for magics */
+ 	for (offset = 0; offset <= master->size - blocksize;
+ 	     offset += blocksize) {
+-		/* Nothing more in higher memory */
+-		if (offset >= 0x2000000)
++		/* Nothing more in higher memory on BCM47XX (MIPS) */
++		if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
+ 			break;
+ 
+ 		if (curr_part >= BCM47XXPART_MAX_PARTS) {
+@@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
+ 		}
+ 
+ 		/* Read beginning of the block */
+-		if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+-			     &bytes_read, (uint8_t *)buf) < 0) {
+-			pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+-			       offset);
++		err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
++			       &bytes_read, (uint8_t *)buf);
++		if (err && !mtd_is_bitflip(err)) {
++			pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
++			       offset, err);
+ 			continue;
+ 		}
+ 
+@@ -252,10 +256,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
+ 		}
+ 
+ 		/* Read middle of the block */
+-		if (mtd_read(master, offset + 0x8000, 0x4,
+-			     &bytes_read, (uint8_t *)buf) < 0) {
+-			pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
+-			       offset);
++		err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
++			       (uint8_t *)buf);
++		if (err && !mtd_is_bitflip(err)) {
++			pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
++			       offset, err);
+ 			continue;
+ 		}
+ 
+@@ -275,10 +280,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
+ 		}
+ 
+ 		offset = master->size - possible_nvram_sizes[i];
+-		if (mtd_read(master, offset, 0x4, &bytes_read,
+-			     (uint8_t *)buf) < 0) {
+-			pr_err("mtd_read error while reading at offset 0x%X!\n",
+-			       offset);
++		err = mtd_read(master, offset, 0x4, &bytes_read,
++			       (uint8_t *)buf);
++		if (err && !mtd_is_bitflip(err)) {
++			pr_err("mtd_read error while reading (offset 0x%X): %d\n",
++			       offset, err);
+ 			continue;
+ 		}
+ 
+diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
+index 14a5d2325dac..ac2d68d6d446 100644
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -900,6 +900,13 @@ static int spansion_quad_enable(struct spi_nor *nor)
+ 		return -EINVAL;
+ 	}
+ 
++	ret = spi_nor_wait_till_ready(nor);
++	if (ret) {
++		dev_err(nor->dev,
++			"timeout while writing configuration register\n");
++		return ret;
++	}
++
+ 	/* read back and check it */
+ 	ret = read_cr(nor);
+ 	if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 21d9497518fd..8b3c60b1f486 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -2779,8 +2779,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
+ 
+ 	/* Flush Tx queues */
+ 	ret = xgbe_flush_tx_queues(pdata);
+-	if (ret)
++	if (ret) {
++		netdev_err(pdata->netdev, "error flushing TX queues\n");
+ 		return ret;
++	}
+ 
+ 	/*
+ 	 * Initialize DMA related features
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 9fd6c69a8bac..eea5b58496a3 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -951,7 +951,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
+ 
+ 	DBGPR("-->xgbe_start\n");
+ 
+-	hw_if->init(pdata);
++	ret = hw_if->init(pdata);
++	if (ret)
++		return ret;
+ 
+ 	phy_start(pdata->phydev);
+ 
+diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
+index 21e3c38c7c75..6f0aad85c524 100644
+--- a/drivers/net/ethernet/broadcom/bgmac.c
++++ b/drivers/net/ethernet/broadcom/bgmac.c
+@@ -255,15 +255,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
+ 	while (ring->start != ring->end) {
+ 		int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
+ 		struct bgmac_slot_info *slot = &ring->slots[slot_idx];
+-		u32 ctl1;
++		u32 ctl0, ctl1;
+ 		int len;
+ 
+ 		if (slot_idx == empty_slot)
+ 			break;
+ 
++		ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
+ 		ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
+ 		len = ctl1 & BGMAC_DESC_CTL1_LEN;
+-		if (ctl1 & BGMAC_DESC_CTL0_SOF)
++		if (ctl0 & BGMAC_DESC_CTL0_SOF)
+ 			/* Unmap no longer used buffer */
+ 			dma_unmap_single(dma_dev, slot->dma_addr, len,
+ 					 DMA_TO_DEVICE);
+@@ -466,6 +467,11 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
+ 			len -= ETH_FCS_LEN;
+ 
+ 			skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
++			if (unlikely(!skb)) {
++				bgmac_err(bgmac, "build_skb failed\n");
++				put_page(virt_to_head_page(buf));
++				break;
++			}
+ 			skb_put(skb, BGMAC_RX_FRAME_OFFSET +
+ 				BGMAC_RX_BUF_OFFSET + len);
+ 			skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
+@@ -1299,7 +1305,8 @@ static int bgmac_open(struct net_device *net_dev)
+ 
+ 	phy_start(bgmac->phy_dev);
+ 
+-	netif_carrier_on(net_dev);
++	netif_start_queue(net_dev);
++
+ 	return 0;
+ }
+ 
+@@ -1564,6 +1571,11 @@ static int bgmac_probe(struct bcma_device *core)
+ 		dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
+ 	}
+ 
++	/* This (reset &) enable is not preset in specs or reference driver but
++	 * Broadcom does it in arch PCI code when enabling fake PCI device.
++	 */
++	bcma_core_enable(core, 0);
++
+ 	/* Allocation and references */
+ 	net_dev = alloc_etherdev(sizeof(*bgmac));
+ 	if (!net_dev)
+diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
+index c5e1d0ac75f9..8f3ef77902b9 100644
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
+@@ -1017,7 +1017,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
+ err:
+ 	spin_unlock_bh(&adapter->mcc_lock);
+ 
+-	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
++	 if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ 		status = -EPERM;
+ 
+ 	return status;
+diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
+index d74f5f4e5782..07eabf72c480 100644
+--- a/drivers/net/ethernet/korina.c
++++ b/drivers/net/ethernet/korina.c
+@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
+ 				DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
+ 				&lp->rx_dma_regs->dmasm);
+ 
+-	korina_free_ring(dev);
+-
+ 	napi_disable(&lp->napi);
+ 
++	korina_free_ring(dev);
++
+ 	if (korina_init(dev) < 0) {
+ 		printk(KERN_ERR "%s: cannot restart device\n", dev->name);
+ 		return;
+@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
+ 	tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
+ 	writel(tmp, &lp->rx_dma_regs->dmasm);
+ 
+-	korina_free_ring(dev);
+-
+ 	napi_disable(&lp->napi);
+ 
+ 	cancel_work_sync(&lp->restart_task);
+ 
++	korina_free_ring(dev);
++
+ 	free_irq(lp->rx_irq, dev);
+ 	free_irq(lp->tx_irq, dev);
+ 	free_irq(lp->ovr_irq, dev);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
+index 337811d208bd..fdc129151b18 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
+@@ -514,8 +514,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+ 			break;
+ 
+ 		case MLX4_EVENT_TYPE_SRQ_LIMIT:
+-			mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+-				 __func__);
++			mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
++				 __func__, be32_to_cpu(eqe->event.srq.srqn),
++				 eq->eqn);
+ 		case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
+ 			if (mlx4_is_master(dev)) {
+ 				/* forward only to slave owning the SRQ */
+@@ -530,15 +531,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+ 						  eq->eqn, eq->cons_index, ret);
+ 					break;
+ 				}
+-				mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+-					  __func__, slave,
+-					  be32_to_cpu(eqe->event.srq.srqn),
+-					  eqe->type, eqe->subtype);
++				if (eqe->type ==
++				    MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
++					mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
++						  __func__, slave,
++						  be32_to_cpu(eqe->event.srq.srqn),
++						  eqe->type, eqe->subtype);
+ 
+ 				if (!ret && slave != dev->caps.function) {
+-					mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+-						  __func__, eqe->type,
+-						  eqe->subtype, slave);
++					if (eqe->type ==
++					    MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
++						mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
++							  __func__, eqe->type,
++							  eqe->subtype, slave);
+ 					mlx4_slave_event(dev, slave, eqe);
+ 					break;
+ 				}
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index c56cf0b86f2c..09e3e0d8412e 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -895,7 +895,7 @@ static void decode_txts(struct dp83640_private *dp83640,
+ 	if (overflow) {
+ 		pr_debug("tx timestamp queue overflow, count %d\n", overflow);
+ 		while (skb) {
+-			skb_complete_tx_timestamp(skb, NULL);
++			kfree_skb(skb);
+ 			skb = skb_dequeue(&dp83640->tx_queue);
+ 		}
+ 		return;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index ef36e8c70b4d..64ca961bca18 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -47,8 +47,16 @@ module_param(gso, bool, 0444);
+  */
+ #define RECEIVE_AVG_WEIGHT 64
+ 
++/* With mergeable buffers we align buffer address and use the low bits to
++ * encode its true size. Buffer size is up to 1 page so we need to align to
++ * square root of page size to ensure we reserve enough bits to encode the true
++ * size.
++ */
++#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
++
+ /* Minimum alignment for mergeable packet buffers. */
+-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
++#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
++				   1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
+ 
+ #define VIRTNET_DRIVER_VERSION "1.0.0"
+ 
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index d9e873c3a273..422a9379a644 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2136,7 +2136,7 @@ static void vxlan_cleanup(unsigned long arg)
+ 				= container_of(p, struct vxlan_fdb, hlist);
+ 			unsigned long timeout;
+ 
+-			if (f->state & NUD_PERMANENT)
++			if (f->state & (NUD_PERMANENT | NUD_NOARP))
+ 				continue;
+ 
+ 			timeout = f->used + vxlan->age_interval * HZ;
+diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+index 8a15ebbce4a3..c304b66af5c6 100644
+--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
++++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+@@ -4384,6 +4384,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ 		cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true,
+ 					GFP_KERNEL);
+ 	} else if (ieee80211_is_action(mgmt->frame_control)) {
++		if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) {
++			brcmf_err("invalid action frame length\n");
++			err = -EINVAL;
++			goto exit;
++		}
+ 		af_params = kzalloc(sizeof(*af_params), GFP_KERNEL);
+ 		if (af_params == NULL) {
+ 			brcmf_err("unable to allocate frame\n");
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index eafaeb01aa3e..cdbad7d72afa 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -2538,7 +2538,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
+ 
+ 	tasklet_hrtimer_init(&data->beacon_timer,
+ 			     mac80211_hwsim_beacon,
+-			     CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
++			     CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ 
+ 	spin_lock_bh(&hwsim_radio_lock);
+ 	list_add_tail(&data->list, &hwsim_radios);
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index ea1be52f5515..8a38a5bd34b8 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -305,7 +305,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
+ 		queue->rx_skbs[id] = skb;
+ 
+ 		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
+-		BUG_ON((signed short)ref < 0);
++		WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
+ 		queue->grant_rx_ref[id] = ref;
+ 
+ 		pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
+@@ -323,7 +323,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
+ 	queue->rx.req_prod_pvt = req_prod;
+ 
+ 	/* Not enough requests? Try again later. */
+-	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
++	if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
+ 		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
+ 		return;
+ 	}
+@@ -429,7 +429,7 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
+ 	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
+ 	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
+ 	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
+-	BUG_ON((signed short)ref < 0);
++	WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
+ 
+ 	gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
+ 					page_to_mfn(page), GNTMAP_readonly);
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 63ea1e5b1c95..bf89754fe973 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -618,9 +618,12 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node,
+ 	const char *pathp;
+ 	int offset, rc = 0, depth = -1;
+ 
+-        for (offset = fdt_next_node(blob, -1, &depth);
+-             offset >= 0 && depth >= 0 && !rc;
+-             offset = fdt_next_node(blob, offset, &depth)) {
++	if (!blob)
++		return 0;
++
++	for (offset = fdt_next_node(blob, -1, &depth);
++	     offset >= 0 && depth >= 0 && !rc;
++	     offset = fdt_next_node(blob, offset, &depth)) {
+ 
+ 		pathp = fdt_get_name(blob, offset, NULL);
+ 		if (*pathp == '/')
+diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
+index 02ff84fcfa61..635e7c3a24ad 100644
+--- a/drivers/parisc/ccio-dma.c
++++ b/drivers/parisc/ccio-dma.c
+@@ -743,6 +743,8 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
+ 
+ 	BUG_ON(!dev);
+ 	ioc = GET_IOC(dev);
++	if (!ioc)
++		return DMA_ERROR_CODE;
+ 
+ 	BUG_ON(size <= 0);
+ 
+@@ -807,6 +809,10 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
+ 	
+ 	BUG_ON(!dev);
+ 	ioc = GET_IOC(dev);
++	if (!ioc) {
++		WARN_ON(!ioc);
++		return;
++	}
+ 
+ 	DBG_RUN("%s() iovp 0x%lx/%x\n",
+ 		__func__, (long)iova, size);
+@@ -910,6 +916,8 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ 	
+ 	BUG_ON(!dev);
+ 	ioc = GET_IOC(dev);
++	if (!ioc)
++		return 0;
+ 	
+ 	DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
+ 
+@@ -982,6 +990,10 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ 
+ 	BUG_ON(!dev);
+ 	ioc = GET_IOC(dev);
++	if (!ioc) {
++		WARN_ON(!ioc);
++		return;
++	}
+ 
+ 	DBG_RUN_SG("%s() START %d entries, %p,%x\n",
+ 		__func__, nents, sg_virt(sglist), sglist->length);
+diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
+index a0580afe1713..7b0ca1551d7b 100644
+--- a/drivers/parisc/dino.c
++++ b/drivers/parisc/dino.c
+@@ -154,7 +154,10 @@ struct dino_device
+ };
+ 
+ /* Looks nice and keeps the compiler happy */
+-#define DINO_DEV(d) ((struct dino_device *) d)
++#define DINO_DEV(d) ({				\
++	void *__pdata = d;			\
++	BUG_ON(!__pdata);			\
++	(struct dino_device *)__pdata; })
+ 
+ 
+ /*
+diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
+index a32c1f6c252c..3901ff66d0ee 100644
+--- a/drivers/parisc/lba_pci.c
++++ b/drivers/parisc/lba_pci.c
+@@ -111,8 +111,10 @@ static u32 lba_t32;
+ 
+ 
+ /* Looks nice and keeps the compiler happy */
+-#define LBA_DEV(d) ((struct lba_device *) (d))
+-
++#define LBA_DEV(d) ({				\
++	void *__pdata = d;			\
++	BUG_ON(!__pdata);			\
++	(struct lba_device *)__pdata; })
+ 
+ /*
+ ** Only allow 8 subsidiary busses per LBA
+diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
+index f1441e466c06..d3243071509a 100644
+--- a/drivers/parisc/sba_iommu.c
++++ b/drivers/parisc/sba_iommu.c
+@@ -691,6 +691,8 @@ static int sba_dma_supported( struct device *dev, u64 mask)
+ 		return 0;
+ 
+ 	ioc = GET_IOC(dev);
++	if (!ioc)
++		return 0;
+ 
+ 	/*
+ 	 * check if mask is >= than the current max IO Virt Address
+@@ -722,6 +724,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
+ 	int pide;
+ 
+ 	ioc = GET_IOC(dev);
++	if (!ioc)
++		return DMA_ERROR_CODE;
+ 
+ 	/* save offset bits */
+ 	offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
+@@ -803,6 +807,10 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
+ 	DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
+ 
+ 	ioc = GET_IOC(dev);
++	if (!ioc) {
++		WARN_ON(!ioc);
++		return;
++	}
+ 	offset = iova & ~IOVP_MASK;
+ 	iova ^= offset;        /* clear offset bits */
+ 	size += offset;
+@@ -942,6 +950,8 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ 	DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
+ 
+ 	ioc = GET_IOC(dev);
++	if (!ioc)
++		return 0;
+ 
+ 	/* Fast path single entry scatterlists. */
+ 	if (nents == 1) {
+@@ -1027,6 +1037,10 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ 		__func__, nents, sg_virt(sglist), sglist->length);
+ 
+ 	ioc = GET_IOC(dev);
++	if (!ioc) {
++		WARN_ON(!ioc);
++		return;
++	}
+ 
+ #ifdef SBA_COLLECT_STATS
+ 	ioc->usg_calls++;
+diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
+index 646d5c244af1..496075928af9 100644
+--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
++++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
+@@ -195,6 +195,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
+ 	return 0;
+ }
+ 
++static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
++{
++	u32 tmp;
++
++	tmp = readl(reg);
++	tmp &= ~(mask << shift);
++	tmp |= value << shift;
++	writel(tmp, reg);
++}
++
+ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+ 			       unsigned group)
+ {
+@@ -212,8 +222,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
+ 		reg += bank * 0x20 + pin / 16 * 0x10;
+ 		shift = pin % 16 * 2;
+ 
+-		writel(0x3 << shift, reg + CLR);
+-		writel(g->muxsel[i] << shift, reg + SET);
++		mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
+ 	}
+ 
+ 	return 0;
+@@ -280,8 +289,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
+ 			/* mA */
+ 			if (config & MA_PRESENT) {
+ 				shift = pin % 8 * 4;
+-				writel(0x3 << shift, reg + CLR);
+-				writel(ma << shift, reg + SET);
++				mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
+ 			}
+ 
+ 			/* vol */
+diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
+index 9677807db364..b505b87661f8 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
++++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
+@@ -732,8 +732,8 @@ static const char * const sdxc_c_groups[] = {
+ static const char * const nand_groups[] = {
+ 	"nand_io", "nand_io_ce0", "nand_io_ce1",
+ 	"nand_io_rb0", "nand_ale", "nand_cle",
+-	"nand_wen_clk", "nand_ren_clk", "nand_dqs0",
+-	"nand_dqs1"
++	"nand_wen_clk", "nand_ren_clk", "nand_dqs_0",
++	"nand_dqs_1"
+ };
+ 
+ static const char * const nor_groups[] = {
+diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
+index 7b2c9495c383..a2b021958213 100644
+--- a/drivers/pinctrl/sh-pfc/core.c
++++ b/drivers/pinctrl/sh-pfc/core.c
+@@ -529,6 +529,9 @@ static int sh_pfc_probe(struct platform_device *pdev)
+ 		ret = info->ops->init(pfc);
+ 		if (ret < 0)
+ 			return ret;
++
++		/* .init() may have overridden pfc->info */
++		info = pfc->info;
+ 	}
+ 
+ 	pinctrl_provide_dummies();
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index 8cad6c165680..a100d58dbfd7 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -800,6 +800,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
+ 			case 11:
+ 			case 7:
+ 			case 6:
++			case 1:
+ 				ideapad_input_report(priv, vpc_bit);
+ 				break;
+ 			case 5:
+diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
+index 851e8efe364e..0e5b3584e918 100644
+--- a/drivers/scsi/lpfc/lpfc_els.c
++++ b/drivers/scsi/lpfc/lpfc_els.c
+@@ -3600,12 +3600,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
+ 		} else {
+ 			buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
+ 			lpfc_els_free_data(phba, buf_ptr1);
++			elsiocb->context2 = NULL;
+ 		}
+ 	}
+ 
+ 	if (elsiocb->context3) {
+ 		buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
+ 		lpfc_els_free_bpl(phba, buf_ptr);
++		elsiocb->context3 = NULL;
+ 	}
+ 	lpfc_sli_release_iocbq(phba, elsiocb);
+ 	return 0;
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index 56f73682d4bd..edb1a4d648dd 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -5887,18 +5887,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
+ 
+  free_vfi_bmask:
+ 	kfree(phba->sli4_hba.vfi_bmask);
++	phba->sli4_hba.vfi_bmask = NULL;
+  free_xri_ids:
+ 	kfree(phba->sli4_hba.xri_ids);
++	phba->sli4_hba.xri_ids = NULL;
+  free_xri_bmask:
+ 	kfree(phba->sli4_hba.xri_bmask);
++	phba->sli4_hba.xri_bmask = NULL;
+  free_vpi_ids:
+ 	kfree(phba->vpi_ids);
++	phba->vpi_ids = NULL;
+  free_vpi_bmask:
+ 	kfree(phba->vpi_bmask);
++	phba->vpi_bmask = NULL;
+  free_rpi_ids:
+ 	kfree(phba->sli4_hba.rpi_ids);
++	phba->sli4_hba.rpi_ids = NULL;
+  free_rpi_bmask:
+ 	kfree(phba->sli4_hba.rpi_bmask);
++	phba->sli4_hba.rpi_bmask = NULL;
+  err_exit:
+ 	return rc;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 1f3991ba7580..b33762f1013f 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -2434,6 +2434,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
+ 	if (pkt->entry_status & RF_BUSY)
+ 		res = DID_BUS_BUSY << 16;
+ 
++	if (pkt->entry_type == NOTIFY_ACK_TYPE &&
++	    pkt->handle == QLA_TGT_SKIP_HANDLE)
++		return;
++
+ 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+ 	if (sp) {
+ 		sp->done(ha, sp, res);
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index df6193b48177..4de1394ebf22 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -2872,7 +2872,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
+ 
+ 	pkt->entry_type = NOTIFY_ACK_TYPE;
+ 	pkt->entry_count = 1;
+-	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
++	pkt->handle = QLA_TGT_SKIP_HANDLE;
+ 
+ 	nack = (struct nack_to_isp *)pkt;
+ 	nack->ox_id = ntfy->ox_id;
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 8a2cba63b5ff..80cebe691fee 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2454,7 +2454,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
+ 		if (sdp->broken_fua) {
+ 			sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
+ 			sdkp->DPOFUA = 0;
+-		} else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
++		} else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
++			   !sdkp->device->use_16_for_rw) {
+ 			sd_first_printk(KERN_NOTICE, sdkp,
+ 				  "Uses READ/WRITE(6), disabling FUA\n");
+ 			sdkp->DPOFUA = 0;
+diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
+index f164f24a4a55..d836414c920d 100644
+--- a/drivers/scsi/virtio_scsi.c
++++ b/drivers/scsi/virtio_scsi.c
+@@ -531,7 +531,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
+ {
+ 	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ 	struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
++	unsigned long flags;
+ 	int req_size;
++	int ret;
+ 
+ 	BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
+ 
+@@ -556,8 +558,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
+ 		req_size = sizeof(cmd->req.cmd);
+ 	}
+ 
+-	if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
++	ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
++	if (ret == -EIO) {
++		cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
++		spin_lock_irqsave(&req_vq->vq_lock, flags);
++		virtscsi_complete_cmd(vscsi, cmd);
++		spin_unlock_irqrestore(&req_vq->vq_lock, flags);
++	} else if (ret != 0) {
+ 		return SCSI_MLQUEUE_HOST_BUSY;
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
+index 5e991065f5b0..4e7110351e8c 100644
+--- a/drivers/spi/spi-davinci.c
++++ b/drivers/spi/spi-davinci.c
+@@ -655,7 +655,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
+ 			buf = t->rx_buf;
+ 		t->rx_dma = dma_map_single(&spi->dev, buf,
+ 				t->len, DMA_FROM_DEVICE);
+-		if (!t->rx_dma) {
++		if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
+ 			ret = -EFAULT;
+ 			goto err_rx_map;
+ 		}
+@@ -669,7 +669,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
+ 			buf = (void *)t->tx_buf;
+ 		t->tx_dma = dma_map_single(&spi->dev, buf,
+ 				t->len, DMA_TO_DEVICE);
+-		if (!t->tx_dma) {
++		if (dma_mapping_error(&spi->dev, t->tx_dma)) {
+ 			ret = -EFAULT;
+ 			goto err_tx_map;
+ 		}
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index e78ddbe5a954..a503132f91e8 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -2885,6 +2885,7 @@ static int __init comedi_init(void)
+ 		dev = comedi_alloc_board_minor(NULL);
+ 		if (IS_ERR(dev)) {
+ 			comedi_cleanup_board_minors();
++			class_destroy(comedi_class);
+ 			cdev_del(&comedi_cdev);
+ 			unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+ 						 COMEDI_NUM_MINORS);
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index f7bcefd46b5e..c50b304ce0b4 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -2120,7 +2120,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
+ 				return -ETIME;
+ 			}
+ 			d += signbits;
+-			data[n] = d;
++			data[n] = d & 0xffff;
+ 		}
+ 	} else if (devpriv->is_6143) {
+ 		for (n = 0; n < insn->n; n++) {
+@@ -2163,8 +2163,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
+ 				data[n] = dl;
+ 			} else {
+ 				d = ni_readw(dev, ADC_FIFO_Data_Register);
+-				d += signbits;	/* subtle: needs to be short addition */
+-				data[n] = d;
++				d += signbits;
++				data[n] = d & 0xffff;
+ 			}
+ 		}
+ 	}
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index 766fdcece074..4c6b479a34c2 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -534,6 +534,9 @@ static int vnt_start(struct ieee80211_hw *hw)
+ 		goto free_all;
+ 	}
+ 
++	if (vnt_key_init_table(priv))
++		goto free_all;
++
+ 	priv->int_interval = 1;  /* bInterval is set to 1 */
+ 
+ 	vnt_int_start_interrupt(priv);
+diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
+index 68bd7f5d9f73..6d561e1170f4 100644
+--- a/drivers/target/target_core_internal.h
++++ b/drivers/target/target_core_internal.h
+@@ -67,7 +67,7 @@ int	init_se_kmem_caches(void);
+ void	release_se_kmem_caches(void);
+ u32	scsi_get_new_index(scsi_index_t);
+ void	transport_subsystem_check_init(void);
+-void	transport_cmd_finish_abort(struct se_cmd *, int);
++int	transport_cmd_finish_abort(struct se_cmd *, int);
+ unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+ void	transport_dump_dev_state(struct se_device *, char *, int *);
+ void	transport_dump_dev_info(struct se_device *, struct se_lun *,
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index eed7c5a31b15..44510bd74963 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -78,7 +78,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
+ 	kfree(tmr);
+ }
+ 
+-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
++static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+ {
+ 	unsigned long flags;
+ 	bool remove = true, send_tas;
+@@ -94,7 +94,7 @@ static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+ 		transport_send_task_abort(cmd);
+ 	}
+ 
+-	transport_cmd_finish_abort(cmd, remove);
++	return transport_cmd_finish_abort(cmd, remove);
+ }
+ 
+ static int target_check_cdb_and_preempt(struct list_head *list,
+@@ -190,8 +190,8 @@ void core_tmr_abort_task(
+ 		cancel_work_sync(&se_cmd->work);
+ 		transport_wait_for_tasks(se_cmd);
+ 
+-		transport_cmd_finish_abort(se_cmd, true);
+-		target_put_sess_cmd(se_cmd);
++		if (!transport_cmd_finish_abort(se_cmd, true))
++			target_put_sess_cmd(se_cmd);
+ 
+ 		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+ 				" ref_tag: %d\n", ref_tag);
+@@ -291,8 +291,8 @@ static void core_tmr_drain_tmr_list(
+ 		cancel_work_sync(&cmd->work);
+ 		transport_wait_for_tasks(cmd);
+ 
+-		transport_cmd_finish_abort(cmd, 1);
+-		target_put_sess_cmd(cmd);
++		if (!transport_cmd_finish_abort(cmd, 1))
++			target_put_sess_cmd(cmd);
+ 	}
+ }
+ 
+@@ -390,8 +390,8 @@ static void core_tmr_drain_state_list(
+ 		cancel_work_sync(&cmd->work);
+ 		transport_wait_for_tasks(cmd);
+ 
+-		core_tmr_handle_tas_abort(cmd, tas);
+-		target_put_sess_cmd(cmd);
++		if (!core_tmr_handle_tas_abort(cmd, tas))
++			target_put_sess_cmd(cmd);
+ 	}
+ }
+ 
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 1cf3c0819b81..95c1c4ecf336 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -644,9 +644,10 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
+ 		percpu_ref_put(&lun->lun_ref);
+ }
+ 
+-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
++int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ {
+ 	bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
++	int ret = 0;
+ 
+ 	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+ 		transport_lun_remove_cmd(cmd);
+@@ -658,9 +659,11 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ 		cmd->se_tfo->aborted_task(cmd);
+ 
+ 	if (transport_cmd_check_stop_to_fabric(cmd))
+-		return;
++		return 1;
+ 	if (remove && ack_kref)
+-		transport_put_cmd(cmd);
++		ret = transport_put_cmd(cmd);
++
++	return ret;
+ }
+ 
+ static void target_complete_failure_work(struct work_struct *work)
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 2df90a54509a..50b67ff2b6ea 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -2693,13 +2693,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
+ 	 * related to the kernel should not use this.
+ 	 */
+ 			data = vt_get_shift_state();
+-			ret = __put_user(data, p);
++			ret = put_user(data, p);
+ 			break;
+ 		case TIOCL_GETMOUSEREPORTING:
+ 			console_lock();	/* May be overkill */
+ 			data = mouse_reporting();
+ 			console_unlock();
+-			ret = __put_user(data, p);
++			ret = put_user(data, p);
+ 			break;
+ 		case TIOCL_SETVESABLANK:
+ 			console_lock();
+@@ -2708,7 +2708,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
+ 			break;
+ 		case TIOCL_GETKMSGREDIRECT:
+ 			data = vt_get_kmsg_redirect();
+-			ret = __put_user(data, p);
++			ret = put_user(data, p);
+ 			break;
+ 		case TIOCL_SETKMSGREDIRECT:
+ 			if (!capable(CAP_SYS_ADMIN)) {
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 96b21b0dac1e..3116edfcdc18 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -223,6 +223,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Blackmagic Design UltraStudio SDI */
+ 	{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ 
++	/* Hauppauge HVR-950q */
++	{ USB_DEVICE(0x2040, 0x7200), .driver_info =
++			USB_QUIRK_CONFIG_INTF_STRINGS },
++
+ 	/* INTEL VALUE SSD */
+ 	{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
+index 4a1a543deeda..da885c3bc33f 100644
+--- a/drivers/usb/dwc3/dwc3-st.c
++++ b/drivers/usb/dwc3/dwc3-st.c
+@@ -227,7 +227,7 @@ static int st_dwc3_probe(struct platform_device *pdev)
+ 
+ 	dwc3_data->syscfg_reg_off = res->start;
+ 
+-	dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
++	dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
+ 		 dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
+ 
+ 	dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown");
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 48412e4afb1b..ff56aaa00bf7 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1202,7 +1202,7 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
+ 		goto out;
+ 	}
+ 
+-	if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
++	if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
+ 				request, req->dep->name)) {
+ 		ret = -EINVAL;
+ 		goto out;
+@@ -1249,7 +1249,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
+ 			dwc3_stop_active_transfer(dwc, dep->number, true);
+ 			goto out1;
+ 		}
+-		dev_err(dwc->dev, "request %p was not queued to %s\n",
++		dev_err(dwc->dev, "request %pK was not queued to %s\n",
+ 				request, ep->name);
+ 		ret = -EINVAL;
+ 		goto out0;
+@@ -1854,7 +1854,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 		 * would help. Lets hope that if this occurs, someone
+ 		 * fixes the root cause instead of looking away :)
+ 		 */
+-		dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
++		dev_err(dwc->dev, "%s's TRB (%pK) still owned by HW\n",
+ 				dep->name, trb);
+ 
+ 	count = trb->size & DWC3_TRB_SIZE_MASK;
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 921dd8b0733f..804b209f4c08 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -1668,12 +1668,12 @@ static int ffs_func_eps_enable(struct ffs_function *func)
+ 		ep->ep->driver_data = ep;
+ 		ep->ep->desc = ds;
+ 
+-		comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
+-				USB_DT_ENDPOINT_SIZE);
+-		ep->ep->maxburst = comp_desc->bMaxBurst + 1;
+-
+-		if (needs_comp_desc)
++		if (needs_comp_desc) {
++			comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
++					USB_DT_ENDPOINT_SIZE);
++			ep->ep->maxburst = comp_desc->bMaxBurst + 1;
+ 			ep->ep->comp_desc = comp_desc;
++		}
+ 
+ 		ret = usb_ep_enable(ep->ep);
+ 		if (likely(!ret)) {
+@@ -3459,6 +3459,7 @@ static void ffs_closed(struct ffs_data *ffs)
+ {
+ 	struct ffs_dev *ffs_obj;
+ 	struct f_fs_opts *opts;
++	struct config_item *ci;
+ 
+ 	ENTER();
+ 	ffs_dev_lock();
+@@ -3482,8 +3483,11 @@ static void ffs_closed(struct ffs_data *ffs)
+ 	    || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
+ 		goto done;
+ 
+-	unregister_gadget_item(ffs_obj->opts->
+-			       func_inst.group.cg_item.ci_parent->ci_parent);
++	ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
++	ffs_dev_unlock();
++
++	unregister_gadget_item(ci);
++	return;
+ done:
+ 	ffs_dev_unlock();
+ }
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f36f964a9a37..69040e9069e0 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -132,6 +132,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8977) },	/* CEL MeshWorks DevKit Device */
+ 	{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+ 	{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
++	{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
+ 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 80d93bccc09b..5d841485bbe3 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1874,6 +1874,10 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
+ 	},
+ 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
++	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
++	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
++	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+ 	{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 4f91868736a5..23c303b2a3a2 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -156,6 +156,7 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x1199, 0x9056)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9060)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9061)},	/* Sierra Wireless Modem */
++	{DEVICE_SWI(0x1199, 0x9063)},	/* Sierra Wireless EM7305 */
+ 	{DEVICE_SWI(0x1199, 0x9070)},	/* Sierra Wireless MC74xx */
+ 	{DEVICE_SWI(0x1199, 0x9071)},	/* Sierra Wireless MC74xx */
+ 	{DEVICE_SWI(0x1199, 0x9078)},	/* Sierra Wireless EM74xx */
+diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
+index 44ab43fc4fcc..af10f7b131a4 100644
+--- a/drivers/usb/usbip/stub_main.c
++++ b/drivers/usb/usbip/stub_main.c
+@@ -262,7 +262,11 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
+ 		kmem_cache_free(stub_priv_cache, priv);
+ 
+ 		kfree(urb->transfer_buffer);
++		urb->transfer_buffer = NULL;
++
+ 		kfree(urb->setup_packet);
++		urb->setup_packet = NULL;
++
+ 		usb_free_urb(urb);
+ 	}
+ }
+diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
+index dbcabc9dbe0d..021003c4de53 100644
+--- a/drivers/usb/usbip/stub_tx.c
++++ b/drivers/usb/usbip/stub_tx.c
+@@ -28,7 +28,11 @@ static void stub_free_priv_and_urb(struct stub_priv *priv)
+ 	struct urb *urb = priv->urb;
+ 
+ 	kfree(urb->setup_packet);
++	urb->setup_packet = NULL;
++
+ 	kfree(urb->transfer_buffer);
++	urb->transfer_buffer = NULL;
++
+ 	list_del(&priv->list);
+ 	kmem_cache_free(stub_priv_cache, priv);
+ 	usb_free_urb(urb);
+diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
+index 22d8ae65772a..35af1d15c7ef 100644
+--- a/drivers/watchdog/bcm_kona_wdt.c
++++ b/drivers/watchdog/bcm_kona_wdt.c
+@@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
+ 	if (!wdt)
+ 		return -ENOMEM;
+ 
++	spin_lock_init(&wdt->lock);
++
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	wdt->base = devm_ioremap_resource(dev, res);
+ 	if (IS_ERR(wdt->base))
+@@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
+-	spin_lock_init(&wdt->lock);
+ 	platform_set_drvdata(pdev, wdt);
+ 	watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
+ 
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index 4c549323c605..3a0e6a031174 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -416,9 +416,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
+ 	if (map == SWIOTLB_MAP_ERROR)
+ 		return DMA_ERROR_CODE;
+ 
++	dev_addr = xen_phys_to_bus(map);
+ 	xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
+ 					dev_addr, map & ~PAGE_MASK, size, dir, attrs);
+-	dev_addr = xen_phys_to_bus(map);
+ 
+ 	/*
+ 	 * Ensure that the address returned is DMA'ble
+@@ -574,13 +574,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+ 				sg_dma_len(sgl) = 0;
+ 				return 0;
+ 			}
++			dev_addr = xen_phys_to_bus(map);
+ 			xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
+ 						dev_addr,
+ 						map & ~PAGE_MASK,
+ 						sg->length,
+ 						dir,
+ 						attrs);
+-			sg->dma_address = xen_phys_to_bus(map);
++			sg->dma_address = dev_addr;
+ 		} else {
+ 			/* we are not interested in the dma_addr returned by
+ 			 * xen_dma_map_page, only in the potential cache flushes executed
+diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
+index ac7d921ed984..257425511d10 100644
+--- a/fs/autofs4/dev-ioctl.c
++++ b/fs/autofs4/dev-ioctl.c
+@@ -331,7 +331,7 @@ static int autofs_dev_ioctl_fail(struct file *fp,
+ 	int status;
+ 
+ 	token = (autofs_wqt_t) param->fail.token;
+-	status = param->fail.status ? param->fail.status : -ENOENT;
++	status = param->fail.status < 0 ? param->fail.status : -ENOENT;
+ 	return autofs4_wait_release(sbi, token, status);
+ }
+ 
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index cd46e4158830..90f20f8ce87e 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -904,17 +904,60 @@ static int load_elf_binary(struct linux_binprm *bprm)
+ 		elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
+ 
+ 		vaddr = elf_ppnt->p_vaddr;
++		/*
++		 * If we are loading ET_EXEC or we have already performed
++		 * the ET_DYN load_addr calculations, proceed normally.
++		 */
+ 		if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
+ 			elf_flags |= MAP_FIXED;
+ 		} else if (loc->elf_ex.e_type == ET_DYN) {
+-			/* Try and get dynamic programs out of the way of the
+-			 * default mmap base, as well as whatever program they
+-			 * might try to exec.  This is because the brk will
+-			 * follow the loader, and is not movable.  */
+-			load_bias = ELF_ET_DYN_BASE - vaddr;
+-			if (current->flags & PF_RANDOMIZE)
+-				load_bias += arch_mmap_rnd();
+-			load_bias = ELF_PAGESTART(load_bias);
++			/*
++			 * This logic is run once for the first LOAD Program
++			 * Header for ET_DYN binaries to calculate the
++			 * randomization (load_bias) for all the LOAD
++			 * Program Headers, and to calculate the entire
++			 * size of the ELF mapping (total_size). (Note that
++			 * load_addr_set is set to true later once the
++			 * initial mapping is performed.)
++			 *
++			 * There are effectively two types of ET_DYN
++			 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
++			 * and loaders (ET_DYN without INTERP, since they
++			 * _are_ the ELF interpreter). The loaders must
++			 * be loaded away from programs since the program
++			 * may otherwise collide with the loader (especially
++			 * for ET_EXEC which does not have a randomized
++			 * position). For example to handle invocations of
++			 * "./ld.so someprog" to test out a new version of
++			 * the loader, the subsequent program that the
++			 * loader loads must avoid the loader itself, so
++			 * they cannot share the same load range. Sufficient
++			 * room for the brk must be allocated with the
++			 * loader as well, since brk must be available with
++			 * the loader.
++			 *
++			 * Therefore, programs are loaded offset from
++			 * ELF_ET_DYN_BASE and loaders are loaded into the
++			 * independently randomized mmap region (0 load_bias
++			 * without MAP_FIXED).
++			 */
++			if (elf_interpreter) {
++				load_bias = ELF_ET_DYN_BASE;
++				if (current->flags & PF_RANDOMIZE)
++					load_bias += arch_mmap_rnd();
++				elf_flags |= MAP_FIXED;
++			} else
++				load_bias = 0;
++
++			/*
++			 * Since load_bias is used for all subsequent loading
++			 * calculations, we must lower it by the first vaddr
++			 * so that the remaining calculations based on the
++			 * ELF vaddrs will be correctly offset. The result
++			 * is then page aligned.
++			 */
++			load_bias = ELF_PAGESTART(load_bias - vaddr);
++
+ 			total_size = total_mapping_size(elf_phdata,
+ 							loc->elf_ex.e_phnum);
+ 			if (!total_size) {
+@@ -2285,6 +2328,7 @@ static int elf_core_dump(struct coredump_params *cprm)
+ 				goto end_coredump;
+ 		}
+ 	}
++	dump_truncate(cprm);
+ 
+ 	if (!elf_core_write_extra_data(cprm))
+ 		goto end_coredump;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 04997ac958c4..db6115486166 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4358,8 +4358,19 @@ search_again:
+ 		if (found_type > min_type) {
+ 			del_item = 1;
+ 		} else {
+-			if (item_end < new_size)
++			if (item_end < new_size) {
++				/*
++				 * With NO_HOLES mode, for the following mapping
++				 *
++				 * [0-4k][hole][8k-12k]
++				 *
++				 * if truncating isize down to 6k, it ends up
++				 * isize being 8k.
++				 */
++				if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
++					last_size = new_size;
+ 				break;
++			}
+ 			if (found_key.offset >= new_size)
+ 				del_item = 1;
+ 			else
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 7d7bd466520b..cb3406815330 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -401,6 +401,9 @@ cifs_reconnect(struct TCP_Server_Info *server)
+ 		}
+ 	} while (server->tcpStatus == CifsNeedReconnect);
+ 
++	if (server->tcpStatus == CifsNeedNegotiate)
++		mod_delayed_work(cifsiod_wq, &server->echo, 0);
++
+ 	return rc;
+ }
+ 
+@@ -410,18 +413,27 @@ cifs_echo_request(struct work_struct *work)
+ 	int rc;
+ 	struct TCP_Server_Info *server = container_of(work,
+ 					struct TCP_Server_Info, echo.work);
++	unsigned long echo_interval;
++
++	/*
++	 * If we need to renegotiate, set echo interval to zero to
++	 * immediately call echo service where we can renegotiate.
++	 */
++	if (server->tcpStatus == CifsNeedNegotiate)
++		echo_interval = 0;
++	else
++		echo_interval = SMB_ECHO_INTERVAL;
+ 
+ 	/*
+-	 * We cannot send an echo if it is disabled or until the
+-	 * NEGOTIATE_PROTOCOL request is done, which is indicated by
+-	 * server->ops->need_neg() == true. Also, no need to ping if
+-	 * we got a response recently.
++	 * We cannot send an echo if it is disabled.
++	 * Also, no need to ping if we got a response recently.
+ 	 */
+ 
+ 	if (server->tcpStatus == CifsNeedReconnect ||
+-	    server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
++	    server->tcpStatus == CifsExiting ||
++	    server->tcpStatus == CifsNew ||
+ 	    (server->ops->can_echo && !server->ops->can_echo(server)) ||
+-	    time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
++	    time_before(jiffies, server->lstrp + echo_interval - HZ))
+ 		goto requeue_echo;
+ 
+ 	rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index 87b87e091e8e..efd72e1fae74 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -849,8 +849,13 @@ cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+ 		     struct cifs_fid *fid, __u16 search_flags,
+ 		     struct cifs_search_info *srch_inf)
+ {
+-	return CIFSFindFirst(xid, tcon, path, cifs_sb,
+-			     &fid->netfid, search_flags, srch_inf, true);
++	int rc;
++
++	rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
++			   &fid->netfid, search_flags, srch_inf, true);
++	if (rc)
++		cifs_dbg(FYI, "find first failed=%d\n", rc);
++	return rc;
+ }
+ 
+ static int
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 57aeae6116d6..16212dab81d5 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -843,7 +843,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
+ 	kfree(utf16_path);
+ 	if (rc) {
+-		cifs_dbg(VFS, "open dir failed\n");
++		cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
+ 		return rc;
+ 	}
+ 
+@@ -853,7 +853,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
+ 	rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
+ 				  fid->volatile_fid, 0, srch_inf);
+ 	if (rc) {
+-		cifs_dbg(VFS, "query directory failed\n");
++		cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
+ 		SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
+ 	}
+ 	return rc;
+diff --git a/fs/coredump.c b/fs/coredump.c
+index 26d05e3bc6db..e07cbb629f1c 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -803,3 +803,21 @@ int dump_align(struct coredump_params *cprm, int align)
+ 	return mod ? dump_skip(cprm, align - mod) : 1;
+ }
+ EXPORT_SYMBOL(dump_align);
++
++/*
++ * Ensures that file size is big enough to contain the current file
++ * postion. This prevents gdb from complaining about a truncated file
++ * if the last "write" to the file was dump_skip.
++ */
++void dump_truncate(struct coredump_params *cprm)
++{
++	struct file *file = cprm->file;
++	loff_t offset;
++
++	if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
++		offset = file->f_op->llseek(file, 0, SEEK_CUR);
++		if (i_size_read(file->f_mapping->host) < offset)
++			do_truncate(file->f_path.dentry, offset, 0, file);
++	}
++}
++EXPORT_SYMBOL(dump_truncate);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 11d466bbfb0b..5ca8f0b2b897 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1128,11 +1128,12 @@ void shrink_dcache_sb(struct super_block *sb)
+ 		LIST_HEAD(dispose);
+ 
+ 		freed = list_lru_walk(&sb->s_dentry_lru,
+-			dentry_lru_isolate_shrink, &dispose, UINT_MAX);
++			dentry_lru_isolate_shrink, &dispose, 1024);
+ 
+ 		this_cpu_sub(nr_dentry_unused, freed);
+ 		shrink_dentry_list(&dispose);
+-	} while (freed > 0);
++		cond_resched();
++	} while (list_lru_count(&sb->s_dentry_lru) > 0);
+ }
+ EXPORT_SYMBOL(shrink_dcache_sb);
+ 
+diff --git a/fs/exec.c b/fs/exec.c
+index 04c9cab4d4d3..3ba35c21726e 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -199,7 +199,24 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ 
+ 	if (write) {
+ 		unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
+-		struct rlimit *rlim;
++		unsigned long ptr_size, limit;
++
++		/*
++		 * Since the stack will hold pointers to the strings, we
++		 * must account for them as well.
++		 *
++		 * The size calculation is the entire vma while each arg page is
++		 * built, so each time we get here it's calculating how far it
++		 * is currently (rather than each call being just the newly
++		 * added size from the arg page).  As a result, we need to
++		 * always add the entire size of the pointers, so that on the
++		 * last call to get_arg_page() we'll actually have the entire
++		 * correct size.
++		 */
++		ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
++		if (ptr_size > ULONG_MAX - size)
++			goto fail;
++		size += ptr_size;
+ 
+ 		acct_arg_size(bprm, size / PAGE_SIZE);
+ 
+@@ -211,20 +228,24 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
+ 			return page;
+ 
+ 		/*
+-		 * Limit to 1/4-th the stack size for the argv+env strings.
++		 * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
++		 * (whichever is smaller) for the argv+env strings.
+ 		 * This ensures that:
+ 		 *  - the remaining binfmt code will not run out of stack space,
+ 		 *  - the program will have a reasonable amount of stack left
+ 		 *    to work from.
+ 		 */
+-		rlim = current->signal->rlim;
+-		if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
+-			put_page(page);
+-			return NULL;
+-		}
++		limit = _STK_LIM / 4 * 3;
++		limit = min(limit, rlimit(RLIMIT_STACK) / 4);
++		if (size > limit)
++			goto fail;
+ 	}
+ 
+ 	return page;
++
++fail:
++	put_page(page);
++	return NULL;
+ }
+ 
+ static void put_arg_page(struct page *page)
+diff --git a/fs/fcntl.c b/fs/fcntl.c
+index ee85cd4e136a..62376451bbce 100644
+--- a/fs/fcntl.c
++++ b/fs/fcntl.c
+@@ -740,16 +740,10 @@ static int __init fcntl_init(void)
+ 	 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
+ 	 * is defined as O_NONBLOCK on some platforms and not on others.
+ 	 */
+-	BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+-		O_RDONLY	| O_WRONLY	| O_RDWR	|
+-		O_CREAT		| O_EXCL	| O_NOCTTY	|
+-		O_TRUNC		| O_APPEND	| /* O_NONBLOCK	| */
+-		__O_SYNC	| O_DSYNC	| FASYNC	|
+-		O_DIRECT	| O_LARGEFILE	| O_DIRECTORY	|
+-		O_NOFOLLOW	| O_NOATIME	| O_CLOEXEC	|
+-		__FMODE_EXEC	| O_PATH	| __O_TMPFILE	|
+-		__FMODE_NONOTIFY
+-		));
++	BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
++		HWEIGHT32(
++			(VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
++			__FMODE_EXEC | __FMODE_NONOTIFY));
+ 
+ 	fasync_cache = kmem_cache_create("fasync_cache",
+ 		sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
+diff --git a/fs/mount.h b/fs/mount.h
+index 32cabd55a787..bae2b0943019 100644
+--- a/fs/mount.h
++++ b/fs/mount.h
+@@ -57,6 +57,7 @@ struct mount {
+ 	struct mnt_namespace *mnt_ns;	/* containing namespace */
+ 	struct mountpoint *mnt_mp;	/* where is it mounted */
+ 	struct hlist_node mnt_mp_list;	/* list mounts with the same mountpoint */
++	struct list_head mnt_umounting; /* list entry for umount propagation */
+ #ifdef CONFIG_FSNOTIFY
+ 	struct hlist_head mnt_fsnotify_marks;
+ 	__u32 mnt_fsnotify_mask;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index df20ee946f7c..58b281ad30d5 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -237,6 +237,7 @@ static struct mount *alloc_vfsmnt(const char *name)
+ 		INIT_LIST_HEAD(&mnt->mnt_slave_list);
+ 		INIT_LIST_HEAD(&mnt->mnt_slave);
+ 		INIT_HLIST_NODE(&mnt->mnt_mp_list);
++		INIT_LIST_HEAD(&mnt->mnt_umounting);
+ #ifdef CONFIG_FSNOTIFY
+ 		INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
+ #endif
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index aadb4af4a0fe..b6d97dfa9cb6 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -2446,6 +2446,20 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
+ }
+ EXPORT_SYMBOL_GPL(nfs_may_open);
+ 
++static int nfs_execute_ok(struct inode *inode, int mask)
++{
++	struct nfs_server *server = NFS_SERVER(inode);
++	int ret;
++
++	if (mask & MAY_NOT_BLOCK)
++		ret = nfs_revalidate_inode_rcu(server, inode);
++	else
++		ret = nfs_revalidate_inode(server, inode);
++	if (ret == 0 && !execute_ok(inode))
++		ret = -EACCES;
++	return ret;
++}
++
+ int nfs_permission(struct inode *inode, int mask)
+ {
+ 	struct rpc_cred *cred;
+@@ -2463,6 +2477,9 @@ int nfs_permission(struct inode *inode, int mask)
+ 		case S_IFLNK:
+ 			goto out;
+ 		case S_IFREG:
++			if ((mask & MAY_OPEN) &&
++			   nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN))
++				return 0;
+ 			break;
+ 		case S_IFDIR:
+ 			/*
+@@ -2495,8 +2512,8 @@ force_lookup:
+ 			res = PTR_ERR(cred);
+ 	}
+ out:
+-	if (!res && (mask & MAY_EXEC) && !execute_ok(inode))
+-		res = -EACCES;
++	if (!res && (mask & MAY_EXEC))
++		res = nfs_execute_ok(inode, mask);
+ 
+ 	dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n",
+ 		inode->i_sb->s_id, inode->i_ino, mask, res);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 5d8c7e978c33..f06af7248be7 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2068,8 +2068,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
+ 	if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
+ 		return 0;
+ 
+-	/* even though OPEN succeeded, access is denied. Close the file */
+-	nfs4_close_state(state, fmode);
+ 	return -EACCES;
+ }
+ 
+diff --git a/fs/open.c b/fs/open.c
+index ff80b2542989..d0169e52d7fe 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -881,6 +881,12 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
+ 	int lookup_flags = 0;
+ 	int acc_mode;
+ 
++	/*
++	 * Clear out all open flags we don't know about so that we don't report
++	 * them in fcntl(F_GETFD) or similar interfaces.
++	 */
++	flags &= VALID_OPEN_FLAGS;
++
+ 	if (flags & (O_CREAT | __O_TMPFILE))
+ 		op->mode = (mode & S_IALLUGO) | S_IFREG;
+ 	else
+diff --git a/fs/pnode.c b/fs/pnode.c
+index b394ca5307ec..d15c63e97ef1 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -24,6 +24,11 @@ static inline struct mount *first_slave(struct mount *p)
+ 	return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
+ }
+ 
++static inline struct mount *last_slave(struct mount *p)
++{
++	return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
++}
++
+ static inline struct mount *next_slave(struct mount *p)
+ {
+ 	return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
+@@ -164,6 +169,19 @@ static struct mount *propagation_next(struct mount *m,
+ 	}
+ }
+ 
++static struct mount *skip_propagation_subtree(struct mount *m,
++						struct mount *origin)
++{
++	/*
++	 * Advance m such that propagation_next will not return
++	 * the slaves of m.
++	 */
++	if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
++		m = last_slave(m);
++
++	return m;
++}
++
+ static struct mount *next_group(struct mount *m, struct mount *origin)
+ {
+ 	while (1) {
+@@ -415,65 +433,104 @@ void propagate_mount_unlock(struct mount *mnt)
+ 	}
+ }
+ 
+-/*
+- * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
+- */
+-static void mark_umount_candidates(struct mount *mnt)
++static void umount_one(struct mount *mnt, struct list_head *to_umount)
+ {
+-	struct mount *parent = mnt->mnt_parent;
+-	struct mount *m;
+-
+-	BUG_ON(parent == mnt);
+-
+-	for (m = propagation_next(parent, parent); m;
+-			m = propagation_next(m, parent)) {
+-		struct mount *child = __lookup_mnt(&m->mnt,
+-						mnt->mnt_mountpoint);
+-		if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
+-			continue;
+-		if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
+-			SET_MNT_MARK(child);
+-		}
+-	}
++	CLEAR_MNT_MARK(mnt);
++	mnt->mnt.mnt_flags |= MNT_UMOUNT;
++	list_del_init(&mnt->mnt_child);
++	list_del_init(&mnt->mnt_umounting);
++	list_move_tail(&mnt->mnt_list, to_umount);
+ }
+ 
+ /*
+  * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
+  * parent propagates to.
+  */
+-static void __propagate_umount(struct mount *mnt)
++static bool __propagate_umount(struct mount *mnt,
++			       struct list_head *to_umount,
++			       struct list_head *to_restore)
+ {
+-	struct mount *parent = mnt->mnt_parent;
+-	struct mount *m;
++	bool progress = false;
++	struct mount *child;
+ 
+-	BUG_ON(parent == mnt);
++	/*
++	 * The state of the parent won't change if this mount is
++	 * already unmounted or marked as without children.
++	 */
++	if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
++		goto out;
+ 
+-	for (m = propagation_next(parent, parent); m;
+-			m = propagation_next(m, parent)) {
+-		struct mount *topper;
+-		struct mount *child = __lookup_mnt(&m->mnt,
+-						mnt->mnt_mountpoint);
+-		/*
+-		 * umount the child only if the child has no children
+-		 * and the child is marked safe to unmount.
+-		 */
+-		if (!child || !IS_MNT_MARKED(child))
++	/* Verify topper is the only grandchild that has not been
++	 * speculatively unmounted.
++	 */
++	list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
++		if (child->mnt_mountpoint == mnt->mnt.mnt_root)
+ 			continue;
+-		CLEAR_MNT_MARK(child);
++		if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
++			continue;
++		/* Found a mounted child */
++		goto children;
++	}
+ 
+-		/* If there is exactly one mount covering all of child
+-		 * replace child with that mount.
+-		 */
+-		topper = find_topper(child);
+-		if (topper)
+-			mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
+-					      topper);
++	/* Mark mounts that can be unmounted if not locked */
++	SET_MNT_MARK(mnt);
++	progress = true;
++
++	/* If a mount is without children and not locked umount it. */
++	if (!IS_MNT_LOCKED(mnt)) {
++		umount_one(mnt, to_umount);
++	} else {
++children:
++		list_move_tail(&mnt->mnt_umounting, to_restore);
++	}
++out:
++	return progress;
++}
++
++static void umount_list(struct list_head *to_umount,
++			struct list_head *to_restore)
++{
++	struct mount *mnt, *child, *tmp;
++	list_for_each_entry(mnt, to_umount, mnt_list) {
++		list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
++			/* topper? */
++			if (child->mnt_mountpoint == mnt->mnt.mnt_root)
++				list_move_tail(&child->mnt_umounting, to_restore);
++			else
++				umount_one(child, to_umount);
++		}
++	}
++}
+ 
+-		if (list_empty(&child->mnt_mounts)) {
+-			list_del_init(&child->mnt_child);
+-			child->mnt.mnt_flags |= MNT_UMOUNT;
+-			list_move_tail(&child->mnt_list, &mnt->mnt_list);
++static void restore_mounts(struct list_head *to_restore)
++{
++	/* Restore mounts to a clean working state */
++	while (!list_empty(to_restore)) {
++		struct mount *mnt, *parent;
++		struct mountpoint *mp;
++
++		mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
++		CLEAR_MNT_MARK(mnt);
++		list_del_init(&mnt->mnt_umounting);
++
++		/* Should this mount be reparented? */
++		mp = mnt->mnt_mp;
++		parent = mnt->mnt_parent;
++		while (parent->mnt.mnt_flags & MNT_UMOUNT) {
++			mp = parent->mnt_mp;
++			parent = parent->mnt_parent;
+ 		}
++		if (parent != mnt->mnt_parent)
++			mnt_change_mountpoint(parent, mp, mnt);
++	}
++}
++
++static void cleanup_umount_visitations(struct list_head *visited)
++{
++	while (!list_empty(visited)) {
++		struct mount *mnt =
++			list_first_entry(visited, struct mount, mnt_umounting);
++		list_del_init(&mnt->mnt_umounting);
+ 	}
+ }
+ 
+@@ -487,11 +544,68 @@ static void __propagate_umount(struct mount *mnt)
+ int propagate_umount(struct list_head *list)
+ {
+ 	struct mount *mnt;
++	LIST_HEAD(to_restore);
++	LIST_HEAD(to_umount);
++	LIST_HEAD(visited);
++
++	/* Find candidates for unmounting */
++	list_for_each_entry_reverse(mnt, list, mnt_list) {
++		struct mount *parent = mnt->mnt_parent;
++		struct mount *m;
++
++		/*
++		 * If this mount has already been visited it is known that it's
++		 * entire peer group and all of their slaves in the propagation
++		 * tree for the mountpoint has already been visited and there is
++		 * no need to visit them again.
++		 */
++		if (!list_empty(&mnt->mnt_umounting))
++			continue;
++
++		list_add_tail(&mnt->mnt_umounting, &visited);
++		for (m = propagation_next(parent, parent); m;
++		     m = propagation_next(m, parent)) {
++			struct mount *child = __lookup_mnt(&m->mnt,
++							   mnt->mnt_mountpoint);
++			if (!child)
++				continue;
++
++			if (!list_empty(&child->mnt_umounting)) {
++				/*
++				 * If the child has already been visited it is
++				 * know that it's entire peer group and all of
++				 * their slaves in the propgation tree for the
++				 * mountpoint has already been visited and there
++				 * is no need to visit this subtree again.
++				 */
++				m = skip_propagation_subtree(m, parent);
++				continue;
++			} else if (child->mnt.mnt_flags & MNT_UMOUNT) {
++				/*
++				 * We have come accross an partially unmounted
++				 * mount in list that has not been visited yet.
++				 * Remember it has been visited and continue
++				 * about our merry way.
++				 */
++				list_add_tail(&child->mnt_umounting, &visited);
++				continue;
++			}
++
++			/* Check the child and parents while progress is made */
++			while (__propagate_umount(child,
++						  &to_umount, &to_restore)) {
++				/* Is the parent a umount candidate? */
++				child = child->mnt_parent;
++				if (list_empty(&child->mnt_umounting))
++					break;
++			}
++		}
++	}
+ 
+-	list_for_each_entry_reverse(mnt, list, mnt_list)
+-		mark_umount_candidates(mnt);
++	umount_list(&to_umount, &to_restore);
++	restore_mounts(&to_restore);
++	cleanup_umount_visitations(&visited);
++	list_splice_tail(&to_umount, list);
+ 
+-	list_for_each_entry(mnt, list, mnt_list)
+-		__propagate_umount(mnt);
+ 	return 0;
+ }
+diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
+index dd4824589470..234331227c0c 100644
+--- a/fs/xfs/xfs_attr.h
++++ b/fs/xfs/xfs_attr.h
+@@ -112,6 +112,7 @@ typedef struct attrlist_cursor_kern {
+  *========================================================================*/
+ 
+ 
++/* Return 0 on success, or -errno; other state communicated via *context */
+ typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
+ 			      unsigned char *, int, int, unsigned char *);
+ 
+diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
+index 8f7e09d0d0f0..36db8b21969f 100644
+--- a/fs/xfs/xfs_attr_list.c
++++ b/fs/xfs/xfs_attr_list.c
+@@ -108,16 +108,14 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
+ 					   (int)sfe->namelen,
+ 					   (int)sfe->valuelen,
+ 					   &sfe->nameval[sfe->namelen]);
+-
++			if (error)
++				return error;
+ 			/*
+ 			 * Either search callback finished early or
+ 			 * didn't fit it all in the buffer after all.
+ 			 */
+ 			if (context->seen_enough)
+ 				break;
+-
+-			if (error)
+-				return error;
+ 			sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
+ 		}
+ 		trace_xfs_attr_list_sf_all(context);
+@@ -581,7 +579,7 @@ xfs_attr_put_listent(
+ 		trace_xfs_attr_list_full(context);
+ 		alist->al_more = 1;
+ 		context->seen_enough = 1;
+-		return 1;
++		return 0;
+ 	}
+ 
+ 	aep = (attrlist_ent_t *)&context->alist[context->firstu];
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index e69a0899bc05..3b7985991823 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -402,6 +402,7 @@ xfs_attrlist_by_handle(
+ {
+ 	int			error = -ENOMEM;
+ 	attrlist_cursor_kern_t	*cursor;
++	struct xfs_fsop_attrlist_handlereq __user	*p = arg;
+ 	xfs_fsop_attrlist_handlereq_t al_hreq;
+ 	struct dentry		*dentry;
+ 	char			*kbuf;
+@@ -434,6 +435,11 @@ xfs_attrlist_by_handle(
+ 	if (error)
+ 		goto out_kfree;
+ 
++	if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
++		error = -EFAULT;
++		goto out_kfree;
++	}
++
+ 	if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
+ 		error = -EFAULT;
+ 
+diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
+index c036815183cb..ead53c933de6 100644
+--- a/fs/xfs/xfs_xattr.c
++++ b/fs/xfs/xfs_xattr.c
+@@ -151,7 +151,8 @@ xfs_xattr_put_listent(
+ 	arraytop = context->count + prefix_len + namelen + 1;
+ 	if (arraytop > context->firstu) {
+ 		context->count = -1;	/* insufficient space */
+-		return 1;
++		context->seen_enough = 1;
++		return 0;
+ 	}
+ 	offset = (char *)context->alist + context->count;
+ 	strncpy(offset, xfs_xattr_prefix(flags), prefix_len);
+@@ -193,12 +194,15 @@ list_one_attr(const char *name, const size_t len, void *data,
+ }
+ 
+ ssize_t
+-xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
++xfs_vn_listxattr(
++	struct dentry	*dentry,
++	char		*data,
++	size_t		size)
+ {
+ 	struct xfs_attr_list_context context;
+ 	struct attrlist_cursor_kern cursor = { 0 };
+-	struct inode		*inode = d_inode(dentry);
+-	int			error;
++	struct inode	*inode = d_inode(dentry);
++	int		error;
+ 
+ 	/*
+ 	 * First read the regular on-disk attributes.
+@@ -216,7 +220,9 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
+ 	else
+ 		context.put_listent = xfs_xattr_put_listent_sizes;
+ 
+-	xfs_attr_list_int(&context);
++	error = xfs_attr_list_int(&context);
++	if (error)
++		return error;
+ 	if (context.count < 0)
+ 		return -ERANGE;
+ 
+diff --git a/include/linux/coredump.h b/include/linux/coredump.h
+index d016a121a8c4..28ffa94aed6b 100644
+--- a/include/linux/coredump.h
++++ b/include/linux/coredump.h
+@@ -14,6 +14,7 @@ struct coredump_params;
+ extern int dump_skip(struct coredump_params *cprm, size_t nr);
+ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
+ extern int dump_align(struct coredump_params *cprm, int align);
++extern void dump_truncate(struct coredump_params *cprm);
+ #ifdef CONFIG_COREDUMP
+ extern void do_coredump(const siginfo_t *siginfo);
+ #else
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 6558af90c8fe..98a1d9748eec 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -338,6 +338,7 @@ int subsys_virtual_register(struct bus_type *subsys,
+  * @suspend:	Used to put the device to sleep mode, usually to a low power
+  *		state.
+  * @resume:	Used to bring the device from the sleep mode.
++ * @shutdown:	Called at shut-down time to quiesce the device.
+  * @ns_type:	Callbacks so sysfs can detemine namespaces.
+  * @namespace:	Namespace of the device belongs to this class.
+  * @pm:		The default device power management operations of this class.
+@@ -366,6 +367,7 @@ struct class {
+ 
+ 	int (*suspend)(struct device *dev, pm_message_t state);
+ 	int (*resume)(struct device *dev);
++	int (*shutdown)(struct device *dev);
+ 
+ 	const struct kobj_ns_type_operations *ns_type;
+ 	const void *(*namespace)(struct device *dev);
+diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
+index 76ce329e656d..1b48d9c9a561 100644
+--- a/include/linux/fcntl.h
++++ b/include/linux/fcntl.h
+@@ -3,6 +3,12 @@
+ 
+ #include <uapi/linux/fcntl.h>
+ 
++/* list of all valid flags for the open/openat flags argument: */
++#define VALID_OPEN_FLAGS \
++	(O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
++	 O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
++	 FASYNC	| O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
++	 O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
+ 
+ #ifndef force_o_largefile
+ #define force_o_largefile() (BITS_PER_LONG != 32)
+diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
+index 2a6b9947aaa3..743b34f56f2b 100644
+--- a/include/linux/list_lru.h
++++ b/include/linux/list_lru.h
+@@ -44,6 +44,7 @@ struct list_lru_node {
+ 	/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
+ 	struct list_lru_memcg	*memcg_lrus;
+ #endif
++	long nr_items;
+ } ____cacheline_aligned_in_smp;
+ 
+ struct list_lru {
+diff --git a/include/linux/random.h b/include/linux/random.h
+index b05856e16b75..0fe49a14daa5 100644
+--- a/include/linux/random.h
++++ b/include/linux/random.h
+@@ -23,6 +23,7 @@ extern const struct file_operations random_fops, urandom_fops;
+ #endif
+ 
+ unsigned int get_random_int(void);
++unsigned long get_random_long(void);
+ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
+ 
+ u32 prandom_u32(void);
+diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
+index fb86963859c7..866cb3c596f9 100644
+--- a/include/linux/timekeeper_internal.h
++++ b/include/linux/timekeeper_internal.h
+@@ -29,7 +29,6 @@
+  */
+ struct tk_read_base {
+ 	struct clocksource	*clock;
+-	cycle_t			(*read)(struct clocksource *cs);
+ 	cycle_t			mask;
+ 	cycle_t			cycle_last;
+ 	u32			mult;
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index c00c7393ce8c..e70cea22f093 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -548,9 +548,9 @@ extern void usb_ep0_reinit(struct usb_device *);
+ 	((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+ 
+ #define EndpointRequest \
+-	((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
++	((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
+ #define EndpointOutRequest \
+-	((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
++	((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
+ 
+ /* class requests from the USB 2.0 hub spec, table 11-15 */
+ /* GetBusState and SetHubDescriptor are optional, omitted */
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 36ac102c97c7..3dac7ac61f48 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -945,10 +945,6 @@ struct xfrm_dst {
+ 	struct flow_cache_object flo;
+ 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
+ 	int num_pols, num_xfrms;
+-#ifdef CONFIG_XFRM_SUB_POLICY
+-	struct flowi *origin;
+-	struct xfrm_selector *partner;
+-#endif
+ 	u32 xfrm_genid;
+ 	u32 policy_genid;
+ 	u32 route_mtu_cached;
+@@ -964,12 +960,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
+ 	dst_release(xdst->route);
+ 	if (likely(xdst->u.dst.xfrm))
+ 		xfrm_state_put(xdst->u.dst.xfrm);
+-#ifdef CONFIG_XFRM_SUB_POLICY
+-	kfree(xdst->origin);
+-	xdst->origin = NULL;
+-	kfree(xdst->partner);
+-	xdst->partner = NULL;
+-#endif
+ }
+ #endif
+ 
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index c3fc5c2b63f3..e4e8b6080b33 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -1239,8 +1239,10 @@ retry:
+ 
+ 			timeo = MAX_SCHEDULE_TIMEOUT;
+ 			ret = netlink_attachskb(sock, nc, &timeo, NULL);
+-			if (ret == 1)
++			if (ret == 1) {
++				sock = NULL;
+ 				goto retry;
++			}
+ 			if (ret) {
+ 				sock = NULL;
+ 				nc = NULL;
+diff --git a/kernel/extable.c b/kernel/extable.c
+index c98f926277a8..818019777503 100644
+--- a/kernel/extable.c
++++ b/kernel/extable.c
+@@ -67,7 +67,7 @@ static inline int init_kernel_text(unsigned long addr)
+ 	return 0;
+ }
+ 
+-int core_kernel_text(unsigned long addr)
++int notrace core_kernel_text(unsigned long addr)
+ {
+ 	if (addr >= (unsigned long)_stext &&
+ 	    addr < (unsigned long)_etext)
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 8209fa2d36ef..edc1916e89ee 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -361,7 +361,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
+ 	set_task_stack_end_magic(tsk);
+ 
+ #ifdef CONFIG_CC_STACKPROTECTOR
+-	tsk->stack_canary = get_random_int();
++	tsk->stack_canary = get_random_long();
+ #endif
+ 
+ 	/*
+diff --git a/kernel/panic.c b/kernel/panic.c
+index a4f7820f5930..10e28b8d1ac9 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -166,7 +166,7 @@ void panic(const char *fmt, ...)
+ 		 * Delay timeout seconds before rebooting the machine.
+ 		 * We can't use the "normal" timers since we just panicked.
+ 		 */
+-		pr_emerg("Rebooting in %d seconds..", panic_timeout);
++		pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
+ 
+ 		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
+ 			touch_nmi_watchdog();
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 6cb5f00696f5..976d5fbcd60d 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5863,6 +5863,9 @@ enum s_alloc {
+  * Build an iteration mask that can exclude certain CPUs from the upwards
+  * domain traversal.
+  *
++ * Only CPUs that can arrive at this group should be considered to continue
++ * balancing.
++ *
+  * Asymmetric node setups can result in situations where the domain tree is of
+  * unequal depth, make sure to skip domains that already cover the entire
+  * range.
+@@ -5874,18 +5877,31 @@ enum s_alloc {
+  */
+ static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
+ {
+-	const struct cpumask *span = sched_domain_span(sd);
++	const struct cpumask *sg_span = sched_group_cpus(sg);
+ 	struct sd_data *sdd = sd->private;
+ 	struct sched_domain *sibling;
+ 	int i;
+ 
+-	for_each_cpu(i, span) {
++	for_each_cpu(i, sg_span) {
+ 		sibling = *per_cpu_ptr(sdd->sd, i);
+-		if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
++
++		/*
++		 * Can happen in the asymmetric case, where these siblings are
++		 * unused. The mask will not be empty because those CPUs that
++		 * do have the top domain _should_ span the domain.
++		 */
++		if (!sibling->child)
++			continue;
++
++		/* If we would not end up here, we can't continue from here */
++		if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
+ 			continue;
+ 
+ 		cpumask_set_cpu(i, sched_group_mask(sg));
+ 	}
++
++	/* We must not have empty masks here */
++	WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
+ }
+ 
+ /*
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 1431089b8a67..d59551865035 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -173,7 +173,7 @@ extern int no_unaligned_warning;
+ #define SYSCTL_WRITES_WARN	 0
+ #define SYSCTL_WRITES_STRICT	 1
+ 
+-static int sysctl_writes_strict = SYSCTL_WRITES_WARN;
++static int sysctl_writes_strict = SYSCTL_WRITES_STRICT;
+ 
+ static int proc_do_cad_pid(struct ctl_table *table, int write,
+ 		  void __user *buffer, size_t *lenp, loff_t *ppos);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index d296b904685b..308f8f019594 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -116,6 +116,26 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
+ 	tk->offs_boot = ktime_add(tk->offs_boot, delta);
+ }
+ 
++/*
++ * tk_clock_read - atomic clocksource read() helper
++ *
++ * This helper is necessary to use in the read paths because, while the
++ * seqlock ensures we don't return a bad value while structures are updated,
++ * it doesn't protect from potential crashes. There is the possibility that
++ * the tkr's clocksource may change between the read reference, and the
++ * clock reference passed to the read function.  This can cause crashes if
++ * the wrong clocksource is passed to the wrong read function.
++ * This isn't necessary to use when holding the timekeeper_lock or doing
++ * a read of the fast-timekeeper tkrs (which is protected by its own locking
++ * and update logic).
++ */
++static inline u64 tk_clock_read(struct tk_read_base *tkr)
++{
++	struct clocksource *clock = READ_ONCE(tkr->clock);
++
++	return clock->read(clock);
++}
++
+ #ifdef CONFIG_DEBUG_TIMEKEEPING
+ #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
+ /*
+@@ -184,7 +204,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
+ 	 */
+ 	do {
+ 		seq = read_seqcount_begin(&tk_core.seq);
+-		now = tkr->read(tkr->clock);
++		now = tk_clock_read(tkr);
+ 		last = tkr->cycle_last;
+ 		mask = tkr->mask;
+ 		max = tkr->clock->max_cycles;
+@@ -218,7 +238,7 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
+ 	cycle_t cycle_now, delta;
+ 
+ 	/* read clocksource */
+-	cycle_now = tkr->read(tkr->clock);
++	cycle_now = tk_clock_read(tkr);
+ 
+ 	/* calculate the delta since the last update_wall_time */
+ 	delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
+@@ -246,12 +266,10 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
+ 
+ 	old_clock = tk->tkr_mono.clock;
+ 	tk->tkr_mono.clock = clock;
+-	tk->tkr_mono.read = clock->read;
+ 	tk->tkr_mono.mask = clock->mask;
+-	tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
++	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
+ 
+ 	tk->tkr_raw.clock = clock;
+-	tk->tkr_raw.read = clock->read;
+ 	tk->tkr_raw.mask = clock->mask;
+ 	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
+ 
+@@ -440,7 +458,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
+ 
+ 		now += timekeeping_delta_to_ns(tkr,
+ 				clocksource_delta(
+-					tkr->read(tkr->clock),
++					tk_clock_read(tkr),
+ 					tkr->cycle_last,
+ 					tkr->mask));
+ 	} while (read_seqcount_retry(&tkf->seq, seq));
+@@ -468,6 +486,10 @@ static cycle_t dummy_clock_read(struct clocksource *cs)
+ 	return cycles_at_suspend;
+ }
+ 
++static struct clocksource dummy_clock = {
++	.read = dummy_clock_read,
++};
++
+ /**
+  * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
+  * @tk: Timekeeper to snapshot.
+@@ -484,13 +506,13 @@ static void halt_fast_timekeeper(struct timekeeper *tk)
+ 	struct tk_read_base *tkr = &tk->tkr_mono;
+ 
+ 	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
+-	cycles_at_suspend = tkr->read(tkr->clock);
+-	tkr_dummy.read = dummy_clock_read;
++	cycles_at_suspend = tk_clock_read(tkr);
++	tkr_dummy.clock = &dummy_clock;
+ 	update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
+ 
+ 	tkr = &tk->tkr_raw;
+ 	memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
+-	tkr_dummy.read = dummy_clock_read;
++	tkr_dummy.clock = &dummy_clock;
+ 	update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
+ }
+ 
+@@ -635,11 +657,10 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
+  */
+ static void timekeeping_forward_now(struct timekeeper *tk)
+ {
+-	struct clocksource *clock = tk->tkr_mono.clock;
+ 	cycle_t cycle_now, delta;
+ 	s64 nsec;
+ 
+-	cycle_now = tk->tkr_mono.read(clock);
++	cycle_now = tk_clock_read(&tk->tkr_mono);
+ 	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
+ 	tk->tkr_mono.cycle_last = cycle_now;
+ 	tk->tkr_raw.cycle_last  = cycle_now;
+@@ -1406,7 +1427,7 @@ void timekeeping_resume(void)
+ 	 * The less preferred source will only be tried if there is no better
+ 	 * usable source. The rtc part is handled separately in rtc core code.
+ 	 */
+-	cycle_now = tk->tkr_mono.read(clock);
++	cycle_now = tk_clock_read(&tk->tkr_mono);
+ 	if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
+ 		cycle_now > tk->tkr_mono.cycle_last) {
+ 		u64 num, max = ULLONG_MAX;
+@@ -1801,7 +1822,7 @@ void update_wall_time(void)
+ #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+ 	offset = real_tk->cycle_interval;
+ #else
+-	offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
++	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
+ 				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
+ #endif
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index a4c0ae70c6dd..591b3b4f5337 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1638,7 +1638,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
+ 		TRACE_FLAG_IRQS_NOSUPPORT |
+ #endif
+ 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+-		((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
++		((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
+ 		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
+ 		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
+ }
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 28b291f83a4c..9a4aee1d3345 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -671,30 +671,25 @@ static int create_trace_kprobe(int argc, char **argv)
+ 		pr_info("Probe point is not specified.\n");
+ 		return -EINVAL;
+ 	}
+-	if (isdigit(argv[1][0])) {
+-		if (is_return) {
+-			pr_info("Return probe point must be a symbol.\n");
+-			return -EINVAL;
+-		}
+-		/* an address specified */
+-		ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
+-		if (ret) {
+-			pr_info("Failed to parse address.\n");
+-			return ret;
+-		}
+-	} else {
++
++	/* try to parse an address. if that fails, try to read the
++	 * input as a symbol. */
++	if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
+ 		/* a symbol specified */
+ 		symbol = argv[1];
+ 		/* TODO: support .init module functions */
+ 		ret = traceprobe_split_symbol_offset(symbol, &offset);
+ 		if (ret) {
+-			pr_info("Failed to parse symbol.\n");
++			pr_info("Failed to parse either an address or a symbol.\n");
+ 			return ret;
+ 		}
+ 		if (offset && is_return) {
+ 			pr_info("Return probe must be used without offset.\n");
+ 			return -EINVAL;
+ 		}
++	} else if (is_return) {
++		pr_info("Return probe point must be a symbol.\n");
++		return -EINVAL;
+ 	}
+ 	argc -= 2; argv += 2;
+ 
+diff --git a/lib/cmdline.c b/lib/cmdline.c
+index 8f13cf73c2ec..79069d7938ea 100644
+--- a/lib/cmdline.c
++++ b/lib/cmdline.c
+@@ -22,14 +22,14 @@
+  *	the values[M, M+1, ..., N] into the ints array in get_options.
+  */
+ 
+-static int get_range(char **str, int *pint)
++static int get_range(char **str, int *pint, int n)
+ {
+ 	int x, inc_counter, upper_range;
+ 
+ 	(*str)++;
+ 	upper_range = simple_strtol((*str), NULL, 0);
+ 	inc_counter = upper_range - *pint;
+-	for (x = *pint; x < upper_range; x++)
++	for (x = *pint; n && x < upper_range; x++, n--)
+ 		*pint++ = x;
+ 	return inc_counter;
+ }
+@@ -96,7 +96,7 @@ char *get_options(const char *str, int nints, int *ints)
+ 			break;
+ 		if (res == 3) {
+ 			int range_nums;
+-			range_nums = get_range((char **)&str, ints + i);
++			range_nums = get_range((char **)&str, ints + i, nints - i);
+ 			if (range_nums < 0)
+ 				break;
+ 			/*
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index 3c365ab6cf5f..87a203e439f8 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -452,11 +452,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+ 		    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
+ 
+ 	/*
+-	 * For mappings greater than a page, we limit the stride (and
+-	 * hence alignment) to a page size.
++	 * For mappings greater than or equal to a page, we limit the stride
++	 * (and hence alignment) to a page size.
+ 	 */
+ 	nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
+-	if (size > PAGE_SIZE)
++	if (size >= PAGE_SIZE)
+ 		stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
+ 	else
+ 		stride = 1;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index a58270f60602..bdd6a8dd5797 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1294,8 +1294,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	 */
+ 	if (unlikely(pmd_trans_migrating(*pmdp))) {
+ 		page = pmd_page(*pmdp);
++		if (!get_page_unless_zero(page))
++			goto out_unlock;
+ 		spin_unlock(ptl);
+ 		wait_on_page_locked(page);
++		put_page(page);
+ 		goto out;
+ 	}
+ 
+@@ -1327,8 +1330,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 
+ 	/* Migration could have started since the pmd_trans_migrating check */
+ 	if (!page_locked) {
++		if (!get_page_unless_zero(page))
++			goto out_unlock;
+ 		spin_unlock(ptl);
+ 		wait_on_page_locked(page);
++		put_page(page);
+ 		page_nid = -1;
+ 		goto out;
+ 	}
+diff --git a/mm/list_lru.c b/mm/list_lru.c
+index 84b4c21d78d7..2a6a2e4b64ba 100644
+--- a/mm/list_lru.c
++++ b/mm/list_lru.c
+@@ -103,6 +103,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item)
+ 	if (list_empty(item)) {
+ 		list_add_tail(item, &l->list);
+ 		l->nr_items++;
++		nlru->nr_items++;
+ 		spin_unlock(&nlru->lock);
+ 		return true;
+ 	}
+@@ -122,6 +123,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
+ 	if (!list_empty(item)) {
+ 		list_del_init(item);
+ 		l->nr_items--;
++		nlru->nr_items--;
+ 		spin_unlock(&nlru->lock);
+ 		return true;
+ 	}
+@@ -169,15 +171,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one);
+ 
+ unsigned long list_lru_count_node(struct list_lru *lru, int nid)
+ {
+-	long count = 0;
+-	int memcg_idx;
++	struct list_lru_node *nlru;
+ 
+-	count += __list_lru_count_one(lru, nid, -1);
+-	if (list_lru_memcg_aware(lru)) {
+-		for_each_memcg_cache_index(memcg_idx)
+-			count += __list_lru_count_one(lru, nid, memcg_idx);
+-	}
+-	return count;
++	nlru = &lru->node[nid];
++	return nlru->nr_items;
+ }
+ EXPORT_SYMBOL_GPL(list_lru_count_node);
+ 
+@@ -212,6 +209,7 @@ restart:
+ 			assert_spin_locked(&nlru->lock);
+ 		case LRU_REMOVED:
+ 			isolated++;
++			nlru->nr_items--;
+ 			/*
+ 			 * If the lru lock has been dropped, our list
+ 			 * traversal is now invalid and so we have to
+diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
+index 40dd0f9b00d6..09f733b0424a 100644
+--- a/mm/swap_cgroup.c
++++ b/mm/swap_cgroup.c
+@@ -205,6 +205,8 @@ void swap_cgroup_swapoff(int type)
+ 			struct page *page = map[i];
+ 			if (page)
+ 				__free_page(page);
++			if (!(i % SWAP_CLUSTER_MAX))
++				cond_resched();
+ 		}
+ 		vfree(map);
+ 	}
+diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
+index 59555f0f8fc8..d45e590e8f10 100644
+--- a/net/8021q/vlan.c
++++ b/net/8021q/vlan.c
+@@ -278,7 +278,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
+ 	return 0;
+ 
+ out_free_newdev:
+-	free_netdev(new_dev);
++	if (new_dev->reg_state == NETREG_UNINITIALIZED)
++		free_netdev(new_dev);
+ 	return err;
+ }
+ 
+diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
+index f6c3b2137eea..bcb62e10a99c 100644
+--- a/net/caif/cfpkt_skbuff.c
++++ b/net/caif/cfpkt_skbuff.c
+@@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
+ {
+ 	struct sk_buff *skb;
+ 
+-	if (likely(in_interrupt()))
+-		skb = alloc_skb(len + pfx, GFP_ATOMIC);
+-	else
+-		skb = alloc_skb(len + pfx, GFP_KERNEL);
+-
++	skb = alloc_skb(len + pfx, GFP_ATOMIC);
+ 	if (unlikely(skb == NULL))
+ 		return NULL;
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 0f9289ff0f2a..bd47736b689e 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1214,8 +1214,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
+ 	if (!new_ifalias)
+ 		return -ENOMEM;
+ 	dev->ifalias = new_ifalias;
++	memcpy(dev->ifalias, alias, len);
++	dev->ifalias[len] = 0;
+ 
+-	strlcpy(dev->ifalias, alias, len+1);
+ 	return len;
+ }
+ 
+@@ -6726,8 +6727,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
+ 	} else {
+ 		netdev_stats_to_stats64(storage, &dev->stats);
+ 	}
+-	storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
+-	storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
++	storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
++	storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
+ 	return storage;
+ }
+ EXPORT_SYMBOL(dev_get_stats);
+diff --git a/net/core/dst.c b/net/core/dst.c
+index 540066cb33ef..4d385b292f5d 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -373,6 +373,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
+ 		spin_lock_bh(&dst_garbage.lock);
+ 		dst = dst_garbage.list;
+ 		dst_garbage.list = NULL;
++		/* The code in dst_ifdown places a hold on the loopback device.
++		 * If the gc entry processing is set to expire after a lengthy
++		 * interval, this hold can cause netdev_wait_allrefs() to hang
++		 * out and wait for a long time -- until the the loopback
++		 * interface is released.  If we're really unlucky, it'll emit
++		 * pr_emerg messages to console too.  Reset the interval here,
++		 * so dst cleanups occur in a more timely fashion.
++		 */
++		if (dst_garbage.timer_inc > DST_GC_INC) {
++			dst_garbage.timer_inc = DST_GC_INC;
++			dst_garbage.timer_expires = DST_GC_MIN;
++			mod_delayed_work(system_wq, &dst_gc_work,
++					 dst_garbage.timer_expires);
++		}
+ 		spin_unlock_bh(&dst_garbage.lock);
+ 
+ 		if (last)
+diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
+index 76d3bf70c31a..53b9099c331f 100644
+--- a/net/decnet/dn_route.c
++++ b/net/decnet/dn_route.c
+@@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
+ 	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
+ }
+ 
+-static inline void dnrt_drop(struct dn_route *rt)
+-{
+-	dst_release(&rt->dst);
+-	call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
+-}
+-
+ static void dn_dst_check_expire(unsigned long dummy)
+ {
+ 	int i;
+@@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
+ 			}
+ 			*rtp = rt->dst.dn_next;
+ 			rt->dst.dn_next = NULL;
+-			dnrt_drop(rt);
++			dnrt_free(rt);
+ 			break;
+ 		}
+ 		spin_unlock_bh(&dn_rt_hash_table[i].lock);
+@@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
+ 			dst_use(&rth->dst, now);
+ 			spin_unlock_bh(&dn_rt_hash_table[hash].lock);
+ 
+-			dnrt_drop(rt);
++			dst_free(&rt->dst);
+ 			*rp = rth;
+ 			return 0;
+ 		}
+@@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
+ 		for(; rt; rt = next) {
+ 			next = rcu_dereference_raw(rt->dst.dn_next);
+ 			RCU_INIT_POINTER(rt->dst.dn_next, NULL);
+-			dst_free((struct dst_entry *)rt);
++			dnrt_free(rt);
+ 		}
+ 
+ nothing_to_declare:
+@@ -1189,7 +1183,7 @@ make_route:
+ 	if (dev_out->flags & IFF_LOOPBACK)
+ 		flags |= RTCF_LOCAL;
+ 
+-	rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
++	rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
+ 	if (rt == NULL)
+ 		goto e_nobufs;
+ 
+diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
+index af34fc9bdf69..2fe45762ca70 100644
+--- a/net/decnet/netfilter/dn_rtmsg.c
++++ b/net/decnet/netfilter/dn_rtmsg.c
+@@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
+ {
+ 	struct nlmsghdr *nlh = nlmsg_hdr(skb);
+ 
+-	if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
++	if (skb->len < sizeof(*nlh) ||
++	    nlh->nlmsg_len < sizeof(*nlh) ||
++	    skb->len < nlh->nlmsg_len)
+ 		return;
+ 
+ 	if (!netlink_capable(skb, CAP_NET_ADMIN))
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 57978c5b2c91..fe2758c72dbf 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -734,10 +734,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
+ 	/* Use already configured phy mode */
+ 	if (p->phy_interface == PHY_INTERFACE_MODE_NA)
+ 		p->phy_interface = p->phy->interface;
+-	phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+-			   p->phy_interface);
+-
+-	return 0;
++	return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
++				  p->phy_interface);
+ }
+ 
+ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index 218abf9fb1ed..e2d3d62297ec 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1080,6 +1080,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
+ 	pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
+ 	if (!pmc)
+ 		return;
++	spin_lock_init(&pmc->lock);
+ 	spin_lock_bh(&im->lock);
+ 	pmc->interface = im->interface;
+ 	in_dev_hold(in_dev);
+@@ -1832,21 +1833,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
+ 
+ static void ip_mc_clear_src(struct ip_mc_list *pmc)
+ {
+-	struct ip_sf_list *psf, *nextpsf;
++	struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
+ 
+-	for (psf = pmc->tomb; psf; psf = nextpsf) {
++	spin_lock_bh(&pmc->lock);
++	tomb = pmc->tomb;
++	pmc->tomb = NULL;
++	sources = pmc->sources;
++	pmc->sources = NULL;
++	pmc->sfmode = MCAST_EXCLUDE;
++	pmc->sfcount[MCAST_INCLUDE] = 0;
++	pmc->sfcount[MCAST_EXCLUDE] = 1;
++	spin_unlock_bh(&pmc->lock);
++
++	for (psf = tomb; psf; psf = nextpsf) {
+ 		nextpsf = psf->sf_next;
+ 		kfree(psf);
+ 	}
+-	pmc->tomb = NULL;
+-	for (psf = pmc->sources; psf; psf = nextpsf) {
++	for (psf = sources; psf; psf = nextpsf) {
+ 		nextpsf = psf->sf_next;
+ 		kfree(psf);
+ 	}
+-	pmc->sources = NULL;
+-	pmc->sfmode = MCAST_EXCLUDE;
+-	pmc->sfcount[MCAST_INCLUDE] = 0;
+-	pmc->sfcount[MCAST_EXCLUDE] = 1;
+ }
+ 
+ /* Join a multicast group
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index e31af0c23e56..df4edab0ba3a 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -286,9 +286,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
+ static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
+ 				   unsigned long delay)
+ {
+-	if (!delayed_work_pending(&ifp->dad_work))
+-		in6_ifa_hold(ifp);
+-	mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
++	in6_ifa_hold(ifp);
++	if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
++		in6_ifa_put(ifp);
+ }
+ 
+ static int snmp6_alloc_dev(struct inet6_dev *idev)
+@@ -1675,17 +1675,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
+ 
+ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
+ {
+-	if (ifp->flags&IFA_F_PERMANENT) {
+-		spin_lock_bh(&ifp->lock);
+-		addrconf_del_dad_work(ifp);
+-		ifp->flags |= IFA_F_TENTATIVE;
+-		if (dad_failed)
+-			ifp->flags |= IFA_F_DADFAILED;
+-		spin_unlock_bh(&ifp->lock);
+-		if (dad_failed)
+-			ipv6_ifa_notify(0, ifp);
+-		in6_ifa_put(ifp);
+-	} else if (ifp->flags&IFA_F_TEMPORARY) {
++	if (ifp->flags&IFA_F_TEMPORARY) {
+ 		struct inet6_ifaddr *ifpub;
+ 		spin_lock_bh(&ifp->lock);
+ 		ifpub = ifp->ifpub;
+@@ -1698,6 +1688,16 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
+ 			spin_unlock_bh(&ifp->lock);
+ 		}
+ 		ipv6_del_addr(ifp);
++	} else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
++		spin_lock_bh(&ifp->lock);
++		addrconf_del_dad_work(ifp);
++		ifp->flags |= IFA_F_TENTATIVE;
++		if (dad_failed)
++			ifp->flags |= IFA_F_DADFAILED;
++		spin_unlock_bh(&ifp->lock);
++		if (dad_failed)
++			ipv6_ifa_notify(0, ifp);
++		in6_ifa_put(ifp);
+ 	} else {
+ 		ipv6_del_addr(ifp);
+ 	}
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index f91ee783a5fd..eefb8759cfa4 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -978,8 +978,10 @@ static int ip6_dst_lookup_tail(struct sock *sk,
+ 	}
+ #endif
+ 	if (ipv6_addr_v4mapped(&fl6->saddr) &&
+-	    !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
+-		return -EAFNOSUPPORT;
++	    !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
++		err = -EAFNOSUPPORT;
++		goto out_err_release;
++	}
+ 
+ 	return 0;
+ 
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index f0d52d721b3a..9a556e434f59 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ 			goto out;
+ 	}
+ 
++	err = -ENOBUFS;
+ 	key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
+ 	if (sa->sadb_sa_auth) {
+ 		int keysize = 0;
+@@ -1146,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ 		if (key)
+ 			keysize = (key->sadb_key_bits + 7) / 8;
+ 		x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
+-		if (!x->aalg)
++		if (!x->aalg) {
++			err = -ENOMEM;
+ 			goto out;
++		}
+ 		strcpy(x->aalg->alg_name, a->name);
+ 		x->aalg->alg_key_len = 0;
+ 		if (key) {
+@@ -1166,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ 				goto out;
+ 			}
+ 			x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
+-			if (!x->calg)
++			if (!x->calg) {
++				err = -ENOMEM;
+ 				goto out;
++			}
+ 			strcpy(x->calg->alg_name, a->name);
+ 			x->props.calgo = sa->sadb_sa_encrypt;
+ 		} else {
+@@ -1181,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ 			if (key)
+ 				keysize = (key->sadb_key_bits + 7) / 8;
+ 			x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
+-			if (!x->ealg)
++			if (!x->ealg) {
++				err = -ENOMEM;
+ 				goto out;
++			}
+ 			strcpy(x->ealg->alg_name, a->name);
+ 			x->ealg->alg_key_len = 0;
+ 			if (key) {
+@@ -1226,8 +1233,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
+ 		struct xfrm_encap_tmpl *natt;
+ 
+ 		x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
+-		if (!x->encap)
++		if (!x->encap) {
++			err = -ENOMEM;
+ 			goto out;
++		}
+ 
+ 		natt = x->encap;
+ 		n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index e86daed83c6f..0ddf23971b50 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -887,12 +887,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ 		supp_ht = supp_ht || sband->ht_cap.ht_supported;
+ 		supp_vht = supp_vht || sband->vht_cap.vht_supported;
+ 
+-		if (sband->ht_cap.ht_supported)
+-			local->rx_chains =
+-				max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
+-				    local->rx_chains);
++		if (!sband->ht_cap.ht_supported)
++			continue;
+ 
+ 		/* TODO: consider VHT for RX chains, hopefully it's the same */
++		local->rx_chains =
++			max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
++			    local->rx_chains);
++
++		/* no need to mask, SM_PS_DISABLED has all bits set */
++		sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
++			             IEEE80211_HT_CAP_SM_PS_SHIFT;
+ 	}
+ 
+ 	/* if low-level driver supports AP, we also support VLAN */
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index a26bd6532829..a837e405a8ab 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -822,10 +822,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
+ {
+ 	unsigned int verdict = NF_DROP;
+ 
+-	if (IP_VS_FWD_METHOD(cp) != 0) {
+-		pr_err("shouldn't reach here, because the box is on the "
+-		       "half connection in the tun/dr module.\n");
+-	}
++	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
++		goto ignore_cp;
+ 
+ 	/* Ensure the checksum is correct */
+ 	if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
+@@ -859,6 +857,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
+ 		ip_vs_notrack(skb);
+ 	else
+ 		ip_vs_update_conntrack(skb, cp, 0);
++
++ignore_cp:
+ 	verdict = NF_ACCEPT;
+ 
+ out:
+@@ -1229,8 +1229,11 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
+ 	 */
+ 	cp = pp->conn_out_get(af, skb, &iph, 0);
+ 
+-	if (likely(cp))
++	if (likely(cp)) {
++		if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
++			goto ignore_cp;
+ 		return handle_response(af, skb, pd, cp, &iph, hooknum);
++	}
+ 	if (sysctl_nat_icmp_send(net) &&
+ 	    (pp->protocol == IPPROTO_TCP ||
+ 	     pp->protocol == IPPROTO_UDP ||
+@@ -1272,9 +1275,15 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
+ 			}
+ 		}
+ 	}
++
++out:
+ 	IP_VS_DBG_PKT(12, af, pp, skb, 0,
+ 		      "ip_vs_out: packet continues traversal as normal");
+ 	return NF_ACCEPT;
++
++ignore_cp:
++	__ip_vs_conn_put(cp);
++	goto out;
+ }
+ 
+ /*
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 6b8b0abbfab4..b6e939a8b099 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -45,6 +45,8 @@
+ #include <net/netfilter/nf_conntrack_zones.h>
+ #include <net/netfilter/nf_conntrack_timestamp.h>
+ #include <net/netfilter/nf_conntrack_labels.h>
++#include <net/netfilter/nf_conntrack_seqadj.h>
++#include <net/netfilter/nf_conntrack_synproxy.h>
+ #ifdef CONFIG_NF_NAT_NEEDED
+ #include <net/netfilter/nf_nat_core.h>
+ #include <net/netfilter/nf_nat_l4proto.h>
+@@ -1727,6 +1729,8 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
+ 	nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
+ 	nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
+ 	nf_ct_labels_ext_add(ct);
++	nfct_seqadj_ext_add(ct);
++	nfct_synproxy_ext_add(ct);
+ 
+ 	/* we must add conntrack extensions before confirmation. */
+ 	ct->status |= IPS_CONFIRMED;
+diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
+index e762de5ee89b..6531d7039b11 100644
+--- a/net/netfilter/xt_TCPMSS.c
++++ b/net/netfilter/xt_TCPMSS.c
+@@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+ 	tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+ 	tcp_hdrlen = tcph->doff * 4;
+ 
+-	if (len < tcp_hdrlen)
++	if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
+ 		return -1;
+ 
+ 	if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+@@ -156,6 +156,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
+ 	if (len > tcp_hdrlen)
+ 		return 0;
+ 
++	/* tcph->doff has 4 bits, do not wrap it to 0 */
++	if (tcp_hdrlen >= 15 * 4)
++		return 0;
++
+ 	/*
+ 	 * MSS Option not found ?! add it..
+ 	 */
+diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
+index db0f39f5ef96..6851a6d98fce 100644
+--- a/net/rxrpc/ar-key.c
++++ b/net/rxrpc/ar-key.c
+@@ -215,7 +215,7 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
+ 				       unsigned int *_toklen)
+ {
+ 	const __be32 *xdr = *_xdr;
+-	unsigned int toklen = *_toklen, n_parts, loop, tmp;
++	unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
+ 
+ 	/* there must be at least one name, and at least #names+1 length
+ 	 * words */
+@@ -245,16 +245,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
+ 		toklen -= 4;
+ 		if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
+ 			return -EINVAL;
+-		if (tmp > toklen)
++		paddedlen = (tmp + 3) & ~3;
++		if (paddedlen > toklen)
+ 			return -EINVAL;
+ 		princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
+ 		if (!princ->name_parts[loop])
+ 			return -ENOMEM;
+ 		memcpy(princ->name_parts[loop], xdr, tmp);
+ 		princ->name_parts[loop][tmp] = 0;
+-		tmp = (tmp + 3) & ~3;
+-		toklen -= tmp;
+-		xdr += tmp >> 2;
++		toklen -= paddedlen;
++		xdr += paddedlen >> 2;
+ 	}
+ 
+ 	if (toklen < 4)
+@@ -263,16 +263,16 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
+ 	toklen -= 4;
+ 	if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
+ 		return -EINVAL;
+-	if (tmp > toklen)
++	paddedlen = (tmp + 3) & ~3;
++	if (paddedlen > toklen)
+ 		return -EINVAL;
+ 	princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
+ 	if (!princ->realm)
+ 		return -ENOMEM;
+ 	memcpy(princ->realm, xdr, tmp);
+ 	princ->realm[tmp] = 0;
+-	tmp = (tmp + 3) & ~3;
+-	toklen -= tmp;
+-	xdr += tmp >> 2;
++	toklen -= paddedlen;
++	xdr += paddedlen >> 2;
+ 
+ 	_debug("%s/...@%s", princ->name_parts[0], princ->realm);
+ 
+@@ -291,7 +291,7 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
+ 					 unsigned int *_toklen)
+ {
+ 	const __be32 *xdr = *_xdr;
+-	unsigned int toklen = *_toklen, len;
++	unsigned int toklen = *_toklen, len, paddedlen;
+ 
+ 	/* there must be at least one tag and one length word */
+ 	if (toklen <= 8)
+@@ -305,15 +305,17 @@ static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
+ 	toklen -= 8;
+ 	if (len > max_data_size)
+ 		return -EINVAL;
++	paddedlen = (len + 3) & ~3;
++	if (paddedlen > toklen)
++		return -EINVAL;
+ 	td->data_len = len;
+ 
+ 	if (len > 0) {
+ 		td->data = kmemdup(xdr, len, GFP_KERNEL);
+ 		if (!td->data)
+ 			return -ENOMEM;
+-		len = (len + 3) & ~3;
+-		toklen -= len;
+-		xdr += len >> 2;
++		toklen -= paddedlen;
++		xdr += paddedlen >> 2;
+ 	}
+ 
+ 	_debug("tag %x len %x", td->tag, td->data_len);
+@@ -385,7 +387,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
+ 				    const __be32 **_xdr, unsigned int *_toklen)
+ {
+ 	const __be32 *xdr = *_xdr;
+-	unsigned int toklen = *_toklen, len;
++	unsigned int toklen = *_toklen, len, paddedlen;
+ 
+ 	/* there must be at least one length word */
+ 	if (toklen <= 4)
+@@ -397,6 +399,9 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
+ 	toklen -= 4;
+ 	if (len > AFSTOKEN_K5_TIX_MAX)
+ 		return -EINVAL;
++	paddedlen = (len + 3) & ~3;
++	if (paddedlen > toklen)
++		return -EINVAL;
+ 	*_tktlen = len;
+ 
+ 	_debug("ticket len %u", len);
+@@ -405,9 +410,8 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
+ 		*_ticket = kmemdup(xdr, len, GFP_KERNEL);
+ 		if (!*_ticket)
+ 			return -ENOMEM;
+-		len = (len + 3) & ~3;
+-		toklen -= len;
+-		xdr += len >> 2;
++		toklen -= paddedlen;
++		xdr += paddedlen >> 2;
+ 	}
+ 
+ 	*_xdr = xdr;
+@@ -550,7 +554,7 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
+ {
+ 	const __be32 *xdr = prep->data, *token;
+ 	const char *cp;
+-	unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
++	unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
+ 	size_t datalen = prep->datalen;
+ 	int ret;
+ 
+@@ -576,22 +580,21 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
+ 	if (len < 1 || len > AFSTOKEN_CELL_MAX)
+ 		goto not_xdr;
+ 	datalen -= 4;
+-	tmp = (len + 3) & ~3;
+-	if (tmp > datalen)
++	paddedlen = (len + 3) & ~3;
++	if (paddedlen > datalen)
+ 		goto not_xdr;
+ 
+ 	cp = (const char *) xdr;
+ 	for (loop = 0; loop < len; loop++)
+ 		if (!isprint(cp[loop]))
+ 			goto not_xdr;
+-	if (len < tmp)
+-		for (; loop < tmp; loop++)
+-			if (cp[loop])
+-				goto not_xdr;
++	for (; loop < paddedlen; loop++)
++		if (cp[loop])
++			goto not_xdr;
+ 	_debug("cellname: [%u/%u] '%*.*s'",
+-	       len, tmp, len, len, (const char *) xdr);
+-	datalen -= tmp;
+-	xdr += tmp >> 2;
++	       len, paddedlen, len, len, (const char *) xdr);
++	datalen -= paddedlen;
++	xdr += paddedlen >> 2;
+ 
+ 	/* get the token count */
+ 	if (datalen < 12)
+@@ -612,10 +615,11 @@ static int rxrpc_preparse_xdr(struct key_preparsed_payload *prep)
+ 		sec_ix = ntohl(*xdr);
+ 		datalen -= 4;
+ 		_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
+-		if (toklen < 20 || toklen > datalen)
++		paddedlen = (toklen + 3) & ~3;
++		if (toklen < 20 || toklen > datalen || paddedlen > datalen)
+ 			goto not_xdr;
+-		datalen -= (toklen + 3) & ~3;
+-		xdr += (toklen + 3) >> 2;
++		datalen -= paddedlen;
++		xdr += paddedlen >> 2;
+ 
+ 	} while (--loop > 0);
+ 
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index c244a49ae4ac..25353056439d 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1004,6 +1004,9 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
+ 
+ 		return sch;
+ 	}
++	/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
++	if (ops->destroy)
++		ops->destroy(sch);
+ err_out3:
+ 	dev_put(dev);
+ 	kfree((char *) sch - sch->padded);
+diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
+index 792c6f330f77..c072305068e3 100644
+--- a/net/sched/sch_hhf.c
++++ b/net/sched/sch_hhf.c
+@@ -644,7 +644,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
+ 			q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
+ 						      sizeof(u32));
+ 			if (!q->hhf_arrays[i]) {
+-				hhf_destroy(sch);
++				/* Note: hhf_destroy() will be called
++				 * by our caller.
++				 */
+ 				return -ENOMEM;
+ 			}
+ 		}
+@@ -655,7 +657,9 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt)
+ 			q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
+ 							  BITS_PER_BYTE);
+ 			if (!q->hhf_valid_bits[i]) {
+-				hhf_destroy(sch);
++				/* Note: hhf_destroy() will be called
++				 * by our caller.
++				 */
+ 				return -ENOMEM;
+ 			}
+ 		}
+diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
+index 3e82f047caaf..d9c84328e7eb 100644
+--- a/net/sched/sch_mq.c
++++ b/net/sched/sch_mq.c
+@@ -52,7 +52,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
+ 	/* pre-allocate qdiscs, attachment can't fail */
+ 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
+ 			       GFP_KERNEL);
+-	if (priv->qdiscs == NULL)
++	if (!priv->qdiscs)
+ 		return -ENOMEM;
+ 
+ 	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+@@ -60,18 +60,14 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
+ 		qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
+ 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
+ 						    TC_H_MIN(ntx + 1)));
+-		if (qdisc == NULL)
+-			goto err;
++		if (!qdisc)
++			return -ENOMEM;
+ 		priv->qdiscs[ntx] = qdisc;
+ 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ 	}
+ 
+ 	sch->flags |= TCQ_F_MQROOT;
+ 	return 0;
+-
+-err:
+-	mq_destroy(sch);
+-	return -ENOMEM;
+ }
+ 
+ static void mq_attach(struct Qdisc *sch)
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index ad70ecf57ce7..66bccc5ff4ea 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -117,20 +117,17 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+ 	/* pre-allocate qdisc, attachment can't fail */
+ 	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
+ 			       GFP_KERNEL);
+-	if (priv->qdiscs == NULL) {
+-		err = -ENOMEM;
+-		goto err;
+-	}
++	if (!priv->qdiscs)
++		return -ENOMEM;
+ 
+ 	for (i = 0; i < dev->num_tx_queues; i++) {
+ 		dev_queue = netdev_get_tx_queue(dev, i);
+ 		qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
+ 					  TC_H_MAKE(TC_H_MAJ(sch->handle),
+ 						    TC_H_MIN(i + 1)));
+-		if (qdisc == NULL) {
+-			err = -ENOMEM;
+-			goto err;
+-		}
++		if (!qdisc)
++			return -ENOMEM;
++
+ 		priv->qdiscs[i] = qdisc;
+ 		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ 	}
+@@ -143,7 +140,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+ 		priv->hw_owned = 1;
+ 		err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
+ 		if (err)
+-			goto err;
++			return err;
+ 	} else {
+ 		netdev_set_num_tc(dev, qopt->num_tc);
+ 		for (i = 0; i < qopt->num_tc; i++)
+@@ -157,10 +154,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	sch->flags |= TCQ_F_MQROOT;
+ 	return 0;
+-
+-err:
+-	mqprio_destroy(sch);
+-	return err;
+ }
+ 
+ static void mqprio_attach(struct Qdisc *sch)
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index 4417fb25166f..fdcced6aa71d 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -765,9 +765,10 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
+ 	q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
+ 	q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
+ 	if (!q->ht || !q->slots) {
+-		sfq_destroy(sch);
++		/* Note: sfq_destroy() will be called by our caller */
+ 		return -ENOMEM;
+ 	}
++
+ 	for (i = 0; i < q->divisor; i++)
+ 		q->ht[i] = SFQ_EMPTY_SLOT;
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 6c880961554f..34d3d4056a11 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
+ 	union sctp_addr *laddr = (union sctp_addr *)addr;
+ 	struct sctp_transport *transport;
+ 
+-	if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
++	if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
+ 		return NULL;
+ 
+ 	addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 03da879008d7..ce6f2bff5208 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -967,7 +967,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	struct dentry *dentry;
+ 
+ 	err = -EINVAL;
+-	if (sunaddr->sun_family != AF_UNIX)
++	if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
++	    sunaddr->sun_family != AF_UNIX)
+ 		goto out;
+ 
+ 	if (addr_len == sizeof(short)) {
+@@ -1098,6 +1099,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
+ 	unsigned int hash;
+ 	int err;
+ 
++	err = -EINVAL;
++	if (alen < offsetofend(struct sockaddr, sa_family))
++		goto out;
++
+ 	if (addr->sa_family != AF_UNSPEC) {
+ 		err = unix_mkname(sunaddr, alen, &hash);
+ 		if (err < 0)
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 81203bbb2eef..e81e20cbe6dd 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -301,8 +301,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 	[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
+ 	[NL80211_ATTR_PID] = { .type = NLA_U32 },
+ 	[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
+-	[NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
+-				 .len = WLAN_PMKID_LEN },
++	[NL80211_ATTR_PMKID] = { .len = WLAN_PMKID_LEN },
+ 	[NL80211_ATTR_DURATION] = { .type = NLA_U32 },
+ 	[NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
+ 	[NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
+@@ -358,6 +357,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
+ 	[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
+ 	[NL80211_ATTR_P2P_CTWINDOW] = { .type = NLA_U8 },
+ 	[NL80211_ATTR_P2P_OPPPS] = { .type = NLA_U8 },
++	[NL80211_ATTR_LOCAL_MESH_POWER_MODE] = {. type = NLA_U32 },
+ 	[NL80211_ATTR_ACL_POLICY] = {. type = NLA_U32 },
+ 	[NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
+ 	[NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
+@@ -5678,6 +5678,10 @@ static int validate_scan_freqs(struct nlattr *freqs)
+ 	struct nlattr *attr1, *attr2;
+ 	int n_channels = 0, tmp1, tmp2;
+ 
++	nla_for_each_nested(attr1, freqs, tmp1)
++		if (nla_len(attr1) != sizeof(u32))
++			return 0;
++
+ 	nla_for_each_nested(attr1, freqs, tmp1) {
+ 		n_channels++;
+ 		/*
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 4cd2076ff84b..155070f500aa 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -1757,43 +1757,6 @@ free_dst:
+ 	goto out;
+ }
+ 
+-#ifdef CONFIG_XFRM_SUB_POLICY
+-static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
+-{
+-	if (!*target) {
+-		*target = kmalloc(size, GFP_ATOMIC);
+-		if (!*target)
+-			return -ENOMEM;
+-	}
+-
+-	memcpy(*target, src, size);
+-	return 0;
+-}
+-#endif
+-
+-static int xfrm_dst_update_parent(struct dst_entry *dst,
+-				  const struct xfrm_selector *sel)
+-{
+-#ifdef CONFIG_XFRM_SUB_POLICY
+-	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+-	return xfrm_dst_alloc_copy((void **)&(xdst->partner),
+-				   sel, sizeof(*sel));
+-#else
+-	return 0;
+-#endif
+-}
+-
+-static int xfrm_dst_update_origin(struct dst_entry *dst,
+-				  const struct flowi *fl)
+-{
+-#ifdef CONFIG_XFRM_SUB_POLICY
+-	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+-	return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
+-#else
+-	return 0;
+-#endif
+-}
+-
+ static int xfrm_expand_policies(const struct flowi *fl, u16 family,
+ 				struct xfrm_policy **pols,
+ 				int *num_pols, int *num_xfrms)
+@@ -1865,16 +1828,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
+ 
+ 	xdst = (struct xfrm_dst *)dst;
+ 	xdst->num_xfrms = err;
+-	if (num_pols > 1)
+-		err = xfrm_dst_update_parent(dst, &pols[1]->selector);
+-	else
+-		err = xfrm_dst_update_origin(dst, fl);
+-	if (unlikely(err)) {
+-		dst_free(dst);
+-		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
+-		return ERR_PTR(err);
+-	}
+-
+ 	xdst->num_pols = num_pols;
+ 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
+ 	xdst->policy_genid = atomic_read(&pols[0]->genid);
+diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
+index c5ec977b9c37..928c1cf8606b 100755
+--- a/scripts/checkpatch.pl
++++ b/scripts/checkpatch.pl
+@@ -3085,7 +3085,7 @@ sub process {
+ 				$fixedline =~ s/\s*=\s*$/ = {/;
+ 				fix_insert_line($fixlinenr, $fixedline);
+ 				$fixedline = $line;
+-				$fixedline =~ s/^(.\s*){\s*/$1/;
++				$fixedline =~ s/^(.\s*)\{\s*/$1/;
+ 				fix_insert_line($fixlinenr, $fixedline);
+ 			}
+ 		}
+@@ -3435,7 +3435,7 @@ sub process {
+ 				my $fixedline = rtrim($prevrawline) . " {";
+ 				fix_insert_line($fixlinenr, $fixedline);
+ 				$fixedline = $rawline;
+-				$fixedline =~ s/^(.\s*){\s*/$1\t/;
++				$fixedline =~ s/^(.\s*)\{\s*/$1\t/;
+ 				if ($fixedline !~ /^\+\s*$/) {
+ 					fix_insert_line($fixlinenr, $fixedline);
+ 				}
+@@ -3924,7 +3924,7 @@ sub process {
+ 			if (ERROR("SPACING",
+ 				  "space required before the open brace '{'\n" . $herecurr) &&
+ 			    $fix) {
+-				$fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
++				$fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/;
+ 			}
+ 		}
+ 
+diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
+index 0a374a2ce030..8e1c0099bb66 100644
+--- a/security/keys/encrypted-keys/encrypted.c
++++ b/security/keys/encrypted-keys/encrypted.c
+@@ -428,7 +428,7 @@ static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
+ static struct key *request_master_key(struct encrypted_key_payload *epayload,
+ 				      u8 **master_key, size_t *master_keylen)
+ {
+-	struct key *mkey = NULL;
++	struct key *mkey = ERR_PTR(-EINVAL);
+ 
+ 	if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
+ 		     KEY_TRUSTED_PREFIX_LEN)) {
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 0fda7b4901dd..e998aaf14338 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -3188,6 +3188,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec)
+ 						spec->input_paths[i][nums]);
+ 					spec->input_paths[i][nums] =
+ 						spec->input_paths[i][n];
++					spec->input_paths[i][n] = 0;
+ 				}
+ 			}
+ 			nums++;
+diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h
+index c1552c28507e..908a9c6fecf0 100644
+--- a/tools/lib/lockdep/uinclude/linux/lockdep.h
++++ b/tools/lib/lockdep/uinclude/linux/lockdep.h
+@@ -8,7 +8,7 @@
+ #include <linux/utsname.h>
+ 
+ 
+-#define MAX_LOCK_DEPTH 2000UL
++#define MAX_LOCK_DEPTH 255UL
+ 
+ #define asmlinkage
+ #define __visible
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index 58f10b8e6ff2..7ee9c19e8466 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -1061,21 +1061,19 @@ static int is_directory(const char *base_path, const struct dirent *dent)
+ 	return S_ISDIR(st.st_mode);
+ }
+ 
+-#define for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next)\
+-	while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) &&	\
+-	       lang_next)						\
+-		if ((lang_dirent.d_type == DT_DIR ||			\
+-		     (lang_dirent.d_type == DT_UNKNOWN &&		\
+-		      is_directory(scripts_path, &lang_dirent))) &&	\
+-		    (strcmp(lang_dirent.d_name, ".")) &&		\
+-		    (strcmp(lang_dirent.d_name, "..")))
+-
+-#define for_each_script(lang_path, lang_dir, script_dirent, script_next)\
+-	while (!readdir_r(lang_dir, &script_dirent, &script_next) &&	\
+-	       script_next)						\
+-		if (script_dirent.d_type != DT_DIR &&			\
+-		    (script_dirent.d_type != DT_UNKNOWN ||		\
+-		     !is_directory(lang_path, &script_dirent)))
++#define for_each_lang(scripts_path, scripts_dir, lang_dirent)		\
++	while ((lang_dirent = readdir(scripts_dir)) != NULL)		\
++		if ((lang_dirent->d_type == DT_DIR ||			\
++		     (lang_dirent->d_type == DT_UNKNOWN &&		\
++		      is_directory(scripts_path, lang_dirent))) &&	\
++		    (strcmp(lang_dirent->d_name, ".")) &&		\
++		    (strcmp(lang_dirent->d_name, "..")))
++
++#define for_each_script(lang_path, lang_dir, script_dirent)		\
++	while ((script_dirent = readdir(lang_dir)) != NULL)		\
++		if (script_dirent->d_type != DT_DIR &&			\
++		    (script_dirent->d_type != DT_UNKNOWN ||		\
++		     !is_directory(lang_path, script_dirent)))
+ 
+ 
+ #define RECORD_SUFFIX			"-record"
+@@ -1221,7 +1219,7 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
+ 				  const char *s __maybe_unused,
+ 				  int unset __maybe_unused)
+ {
+-	struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
++	struct dirent *script_dirent, *lang_dirent;
+ 	char scripts_path[MAXPATHLEN];
+ 	DIR *scripts_dir, *lang_dir;
+ 	char script_path[MAXPATHLEN];
+@@ -1236,19 +1234,19 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
+ 	if (!scripts_dir)
+ 		return -1;
+ 
+-	for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
++	for_each_lang(scripts_path, scripts_dir, lang_dirent) {
+ 		snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
+-			 lang_dirent.d_name);
++			 lang_dirent->d_name);
+ 		lang_dir = opendir(lang_path);
+ 		if (!lang_dir)
+ 			continue;
+ 
+-		for_each_script(lang_path, lang_dir, script_dirent, script_next) {
+-			script_root = get_script_root(&script_dirent, REPORT_SUFFIX);
++		for_each_script(lang_path, lang_dir, script_dirent) {
++			script_root = get_script_root(script_dirent, REPORT_SUFFIX);
+ 			if (script_root) {
+ 				desc = script_desc__findnew(script_root);
+ 				snprintf(script_path, MAXPATHLEN, "%s/%s",
+-					 lang_path, script_dirent.d_name);
++					 lang_path, script_dirent->d_name);
+ 				read_script_info(desc, script_path);
+ 				free(script_root);
+ 			}
+@@ -1336,7 +1334,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
+  */
+ int find_scripts(char **scripts_array, char **scripts_path_array)
+ {
+-	struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
++	struct dirent *script_dirent, *lang_dirent;
+ 	char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
+ 	DIR *scripts_dir, *lang_dir;
+ 	struct perf_session *session;
+@@ -1359,9 +1357,9 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
+ 		return -1;
+ 	}
+ 
+-	for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
++	for_each_lang(scripts_path, scripts_dir, lang_dirent) {
+ 		snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
+-			 lang_dirent.d_name);
++			 lang_dirent->d_name);
+ #ifdef NO_LIBPERL
+ 		if (strstr(lang_path, "perl"))
+ 			continue;
+@@ -1375,16 +1373,16 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
+ 		if (!lang_dir)
+ 			continue;
+ 
+-		for_each_script(lang_path, lang_dir, script_dirent, script_next) {
++		for_each_script(lang_path, lang_dir, script_dirent) {
+ 			/* Skip those real time scripts: xxxtop.p[yl] */
+-			if (strstr(script_dirent.d_name, "top."))
++			if (strstr(script_dirent->d_name, "top."))
+ 				continue;
+ 			sprintf(scripts_path_array[i], "%s/%s", lang_path,
+-				script_dirent.d_name);
+-			temp = strchr(script_dirent.d_name, '.');
++				script_dirent->d_name);
++			temp = strchr(script_dirent->d_name, '.');
+ 			snprintf(scripts_array[i],
+-				(temp - script_dirent.d_name) + 1,
+-				"%s", script_dirent.d_name);
++				(temp - script_dirent->d_name) + 1,
++				"%s", script_dirent->d_name);
+ 
+ 			if (check_ev_match(lang_path,
+ 					scripts_array[i], session))
+@@ -1402,7 +1400,7 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
+ 
+ static char *get_script_path(const char *script_root, const char *suffix)
+ {
+-	struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
++	struct dirent *script_dirent, *lang_dirent;
+ 	char scripts_path[MAXPATHLEN];
+ 	char script_path[MAXPATHLEN];
+ 	DIR *scripts_dir, *lang_dir;
+@@ -1415,21 +1413,21 @@ static char *get_script_path(const char *script_root, const char *suffix)
+ 	if (!scripts_dir)
+ 		return NULL;
+ 
+-	for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
++	for_each_lang(scripts_path, scripts_dir, lang_dirent) {
+ 		snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
+-			 lang_dirent.d_name);
++			 lang_dirent->d_name);
+ 		lang_dir = opendir(lang_path);
+ 		if (!lang_dir)
+ 			continue;
+ 
+-		for_each_script(lang_path, lang_dir, script_dirent, script_next) {
+-			__script_root = get_script_root(&script_dirent, suffix);
++		for_each_script(lang_path, lang_dir, script_dirent) {
++			__script_root = get_script_root(script_dirent, suffix);
+ 			if (__script_root && !strcmp(script_root, __script_root)) {
+ 				free(__script_root);
+ 				closedir(lang_dir);
+ 				closedir(scripts_dir);
+ 				snprintf(script_path, MAXPATHLEN, "%s/%s",
+-					 lang_path, script_dirent.d_name);
++					 lang_path, script_dirent->d_name);
+ 				return strdup(script_path);
+ 			}
+ 			free(__script_root);
+diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
+index 6a4d5d41c671..65e138019b99 100644
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -627,7 +627,7 @@ repeat:
+ 		case -1:
+ 			if (errno == EINTR)
+ 				continue;
+-			/* Fall trhu */
++			__fallthrough;
+ 		default:
+ 			c = getc(stdin);
+ 			tcsetattr(0, TCSAFLUSH, &save);
+diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
+index e122970361f2..09b9b74e4c1b 100644
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -1404,6 +1404,7 @@ static int trace__process_event(struct trace *trace, struct machine *machine,
+ 		color_fprintf(trace->output, PERF_COLOR_RED,
+ 			      "LOST %" PRIu64 " events!\n", event->lost.lost);
+ 		ret = machine__process_lost_event(machine, event, sample);
++		break;
+ 	default:
+ 		ret = machine__process_event(machine, event, sample);
+ 		break;
+diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
+index 3de744961739..024583871237 100644
+--- a/tools/perf/tests/parse-events.c
++++ b/tools/perf/tests/parse-events.c
+@@ -1677,15 +1677,14 @@ static int test_pmu_events(void)
+ 	}
+ 
+ 	while (!ret && (ent = readdir(dir))) {
+-#define MAX_NAME 100
+ 		struct evlist_test e;
+-		char name[MAX_NAME];
++		char name[2 * NAME_MAX + 1 + 12 + 3];
+ 
+ 		if (!strcmp(ent->d_name, ".") ||
+ 		    !strcmp(ent->d_name, ".."))
+ 			continue;
+ 
+-		snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name);
++		snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name);
+ 
+ 		e.name  = name;
+ 		e.check = test__checkevent_pmu_events;
+@@ -1693,11 +1692,10 @@ static int test_pmu_events(void)
+ 		ret = test_event(&e);
+ 		if (ret)
+ 			break;
+-		snprintf(name, MAX_NAME, "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
++		snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
+ 		e.name  = name;
+ 		e.check = test__checkevent_pmu_events_mix;
+ 		ret = test_event(&e);
+-#undef MAX_NAME
+ 	}
+ 
+ 	closedir(dir);
+diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
+index e5250eb2dd57..96b5638c3342 100644
+--- a/tools/perf/ui/browsers/annotate.c
++++ b/tools/perf/ui/browsers/annotate.c
+@@ -716,11 +716,11 @@ static int annotate_browser__run(struct annotate_browser *browser,
+ 				nd = browser->curr_hot;
+ 			break;
+ 		case K_UNTAB:
+-			if (nd != NULL)
++			if (nd != NULL) {
+ 				nd = rb_next(nd);
+ 				if (nd == NULL)
+ 					nd = rb_first(&browser->entries);
+-			else
++			} else
+ 				nd = browser->curr_hot;
+ 			break;
+ 		case K_F1:
+diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
+index d18a59ab4ed5..12ad79717d94 100644
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -385,7 +385,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
+ {
+ 	char filename[PATH_MAX];
+ 	DIR *tasks;
+-	struct dirent dirent, *next;
++	struct dirent *dirent;
+ 	pid_t tgid, ppid;
+ 	int rc = 0;
+ 
+@@ -413,11 +413,11 @@ static int __event__synthesize_thread(union perf_event *comm_event,
+ 		return 0;
+ 	}
+ 
+-	while (!readdir_r(tasks, &dirent, &next) && next) {
++	while ((dirent = readdir(tasks)) != NULL) {
+ 		char *end;
+ 		pid_t _pid;
+ 
+-		_pid = strtol(dirent.d_name, &end, 10);
++		_pid = strtol(dirent->d_name, &end, 10);
+ 		if (*end)
+ 			continue;
+ 
+@@ -523,7 +523,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
+ {
+ 	DIR *proc;
+ 	char proc_path[PATH_MAX];
+-	struct dirent dirent, *next;
++	struct dirent *dirent;
+ 	union perf_event *comm_event, *mmap_event, *fork_event;
+ 	int err = -1;
+ 
+@@ -548,9 +548,9 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
+ 	if (proc == NULL)
+ 		goto out_free_fork;
+ 
+-	while (!readdir_r(proc, &dirent, &next) && next) {
++	while ((dirent = readdir(proc)) != NULL) {
+ 		char *end;
+-		pid_t pid = strtol(dirent.d_name, &end, 10);
++		pid_t pid = strtol(dirent->d_name, &end, 10);
+ 
+ 		if (*end) /* only interested in proper numerical dirents */
+ 			continue;
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index 8b02a4355659..3297d7e85dd7 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -148,7 +148,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
+ 	if (fd == -1)
+ 		return -1;
+ 
+-		sret = read(fd, alias->unit, UNIT_MAX_LEN);
++	sret = read(fd, alias->unit, UNIT_MAX_LEN);
+ 	if (sret < 0)
+ 		goto error;
+ 
+diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
+index 6516e220c247..82d28c67e0f3 100644
+--- a/tools/perf/util/scripting-engines/Build
++++ b/tools/perf/util/scripting-engines/Build
+@@ -1,6 +1,6 @@
+ libperf-$(CONFIG_LIBPERL)   += trace-event-perl.o
+ libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
+ 
+-CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default
++CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
+ 
+ CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow
+diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
+index 6afd6106ceb5..3b2d7fdde6a6 100644
+--- a/tools/perf/util/string.c
++++ b/tools/perf/util/string.c
+@@ -21,6 +21,8 @@ s64 perf_atoll(const char *str)
+ 		case 'b': case 'B':
+ 			if (*p)
+ 				goto out_err;
++
++			__fallthrough;
+ 		case '\0':
+ 			return length;
+ 		default:
+diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
+index 1c8fbc9588c5..c0b7e17e3167 100644
+--- a/tools/perf/util/thread.c
++++ b/tools/perf/util/thread.c
+@@ -217,7 +217,7 @@ void thread__find_cpumode_addr_location(struct thread *thread,
+ 					struct addr_location *al)
+ {
+ 	size_t i;
+-	const u8 const cpumodes[] = {
++	const u8 cpumodes[] = {
+ 		PERF_RECORD_MISC_USER,
+ 		PERF_RECORD_MISC_KERNEL,
+ 		PERF_RECORD_MISC_GUEST_USER,
+diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
+index f93b9734735b..905a55401842 100644
+--- a/tools/perf/util/thread_map.c
++++ b/tools/perf/util/thread_map.c
+@@ -63,7 +63,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
+ {
+ 	DIR *proc;
+ 	int max_threads = 32, items, i;
+-	char path[256];
++	char path[NAME_MAX + 1 + 6];
+ 	struct dirent dirent, *next, **namelist = NULL;
+ 	struct thread_map *threads = malloc(sizeof(*threads) +
+ 					    max_threads * sizeof(pid_t));


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-04-14 19:17 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2017-04-14 19:17 UTC (permalink / raw
  To: gentoo-commits

commit:     b8d213a1983935e8741527f7a87ff63f1a44e648
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 14 19:17:28 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Apr 14 19:17:28 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b8d213a1

Fix for CVE-2016-10229. Unsafe second checksum calculation in udp.c. See bug #615480.

 0000_README                                        |  4 +
 ...udp-prop-suprt-MSG-PEEK-wth-trunc-buffers.patch | 94 ++++++++++++++++++++++
 2 files changed, 98 insertions(+)

diff --git a/0000_README b/0000_README
index 80a401b..c91ff69 100644
--- a/0000_README
+++ b/0000_README
@@ -211,6 +211,10 @@ Patch:  1520_CVE-2017-6074-dccp-skb-freeing-fix.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=610600
 Desc:   dccp: fix freeing skb too early for IPV6_RECVPKTINFO. CVE-2017-6074
 
+Patch:  1530_udp-prop-suprt-MSG-PEEK-wth-trunc-buffers.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=615480
+Desc:   Fixes CVE-2016-10229. Unsafe second checksum calculation in udp.c
+
 Patch:  1800_fix-lru-cache-add-oom-regression.patch
 From:   http://thread.gmane.org/gmane.linux.kernel.stable/184384
 Desc:   Revert commit 8f182270dfec mm/swap.c: flush lru pvecs on compound page arrival to fix OOM error.

diff --git a/1530_udp-prop-suprt-MSG-PEEK-wth-trunc-buffers.patch b/1530_udp-prop-suprt-MSG-PEEK-wth-trunc-buffers.patch
new file mode 100644
index 0000000..1d12eaa
--- /dev/null
+++ b/1530_udp-prop-suprt-MSG-PEEK-wth-trunc-buffers.patch
@@ -0,0 +1,94 @@
+From 197c949e7798fbf28cfadc69d9ca0c2abbf93191 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 30 Dec 2015 08:51:12 -0500
+Subject: udp: properly support MSG_PEEK with truncated buffers
+
+Backport of this upstream commit into stable kernels :
+89c22d8c3b27 ("net: Fix skb csum races when peeking")
+exposed a bug in udp stack vs MSG_PEEK support, when user provides
+a buffer smaller than skb payload.
+
+In this case,
+skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr),
+                                 msg->msg_iov);
+returns -EFAULT.
+
+This bug does not happen in upstream kernels since Al Viro did a great
+job to replace this into :
+skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
+This variant is safe vs short buffers.
+
+For the time being, instead reverting Herbert Xu patch and add back
+skb->ip_summed invalid changes, simply store the result of
+udp_lib_checksum_complete() so that we avoid computing the checksum a
+second time, and avoid the problematic
+skb_copy_and_csum_datagram_iovec() call.
+
+This patch can be applied on recent kernels as it avoids a double
+checksumming, then backported to stable kernels as a bug fix.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/ipv4/udp.c | 6 ++++--
+ net/ipv6/udp.c | 6 ++++--
+ 2 files changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 8841e98..ac14ae4 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1271,6 +1271,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
+ 	int peeked, off = 0;
+ 	int err;
+ 	int is_udplite = IS_UDPLITE(sk);
++	bool checksum_valid = false;
+ 	bool slow;
+ 
+ 	if (flags & MSG_ERRQUEUE)
+@@ -1296,11 +1297,12 @@ try_again:
+ 	 */
+ 
+ 	if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+-		if (udp_lib_checksum_complete(skb))
++		checksum_valid = !udp_lib_checksum_complete(skb);
++		if (!checksum_valid)
+ 			goto csum_copy_err;
+ 	}
+ 
+-	if (skb_csum_unnecessary(skb))
++	if (checksum_valid || skb_csum_unnecessary(skb))
+ 		err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
+ 					    msg, copied);
+ 	else {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 9da3287..00775ee 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -402,6 +402,7 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	int peeked, off = 0;
+ 	int err;
+ 	int is_udplite = IS_UDPLITE(sk);
++	bool checksum_valid = false;
+ 	int is_udp4;
+ 	bool slow;
+ 
+@@ -433,11 +434,12 @@ try_again:
+ 	 */
+ 
+ 	if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
+-		if (udp_lib_checksum_complete(skb))
++		checksum_valid = !udp_lib_checksum_complete(skb);
++		if (!checksum_valid)
+ 			goto csum_copy_err;
+ 	}
+ 
+-	if (skb_csum_unnecessary(skb))
++	if (checksum_valid || skb_csum_unnecessary(skb))
+ 		err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
+ 					    msg, copied);
+ 	else {
+-- 
+cgit v1.1
+


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-03-14 11:39 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2017-03-14 11:39 UTC (permalink / raw
  To: gentoo-commits

commit:     0ad43040a8e5b8482af4024d8a4d0128942d4a6c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Mar 14 11:39:22 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Mar 14 11:39:22 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=0ad43040

Linux patch 4.1.39

 0000_README             |    4 +
 1038_linux-4.1.39.patch | 4379 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4383 insertions(+)

diff --git a/0000_README b/0000_README
index 9c6c940..80a401b 100644
--- a/0000_README
+++ b/0000_README
@@ -195,6 +195,10 @@ Patch:  1037_linux-4.1.38.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.38
 
+Patch:  1038_linux-4.1.39.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.39
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1038_linux-4.1.39.patch b/1038_linux-4.1.39.patch
new file mode 100644
index 0000000..8c4add5
--- /dev/null
+++ b/1038_linux-4.1.39.patch
@@ -0,0 +1,4379 @@
+diff --git a/Makefile b/Makefile
+index 1aba5352e0bd..88cc36d6b469 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 38
++SUBLEVEL = 39
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
+index 74db59b6f392..86970eef24e5 100644
+--- a/arch/arc/kernel/unaligned.c
++++ b/arch/arc/kernel/unaligned.c
+@@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ 	if (state.fault)
+ 		goto fault;
+ 
++	/* clear any remanants of delay slot */
+ 	if (delay_mode(regs)) {
+-		regs->ret = regs->bta;
++		regs->ret = regs->bta & ~1U;
+ 		regs->status32 &= ~STATUS_DE_MASK;
+ 	} else {
+ 		regs->ret += state.instr_len;
+diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
+index 4f935ad9f27b..6881757b03e8 100644
+--- a/arch/arm/boot/dts/da850-evm.dts
++++ b/arch/arm/boot/dts/da850-evm.dts
+@@ -85,6 +85,7 @@
+ 				#size-cells = <1>;
+ 				compatible = "m25p64";
+ 				spi-max-frequency = <30000000>;
++				m25p,fast-read;
+ 				reg = <0>;
+ 				partition@0 {
+ 					label = "U-Boot-SPL";
+diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
+index 85e374f873ac..e9d04f475929 100644
+--- a/arch/arm/include/asm/cputype.h
++++ b/arch/arm/include/asm/cputype.h
+@@ -81,6 +81,9 @@
+ #define ARM_CPU_XSCALE_ARCH_V2		0x4000
+ #define ARM_CPU_XSCALE_ARCH_V3		0x6000
+ 
++/* Qualcomm implemented cores */
++#define ARM_CPU_PART_SCORPION		0x510002d0
++
+ extern unsigned int processor_id;
+ 
+ #ifdef CONFIG_CPU_CP15
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index dc7d0a95bd36..c02c06b1f755 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -1067,6 +1067,22 @@ static int __init arch_hw_breakpoint_init(void)
+ 		return 0;
+ 	}
+ 
++	/*
++	 * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
++	 * whenever a WFI is issued, even if the core is not powered down, in
++	 * violation of the architecture.  When DBGPRSR.SPD is set, accesses to
++	 * breakpoint and watchpoint registers are treated as undefined, so
++	 * this results in boot time and runtime failures when these are
++	 * accessed and we unexpectedly take a trap.
++	 *
++	 * It's not clear if/how this can be worked around, so we blacklist
++	 * Scorpion CPUs to avoid these issues.
++	*/
++	if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
++		pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
++		return 0;
++	}
++
+ 	has_ossr = core_has_os_save_restore();
+ 
+ 	/* Determine how many BRPs/WRPs are available. */
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index 4d9375814b53..d54c53b7ab63 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	struct pt_regs newregs;
++	struct pt_regs newregs = *task_pt_regs(target);
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ 				 &newregs,
+diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
+index 8ecfd15c3a02..df73914e81c8 100644
+--- a/arch/arm/lib/getuser.S
++++ b/arch/arm/lib/getuser.S
+@@ -67,7 +67,7 @@ ENTRY(__get_user_4)
+ ENDPROC(__get_user_4)
+ 
+ ENTRY(__get_user_8)
+-	check_uaccess r0, 8, r1, r2, __get_user_bad
++	check_uaccess r0, 8, r1, r2, __get_user_bad8
+ #ifdef CONFIG_THUMB2_KERNEL
+ 5: TUSER(ldr)	r2, [r0]
+ 6: TUSER(ldr)	r3, [r0, #4]
+diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
+index 45ce065e7170..1180a75cd707 100644
+--- a/arch/arm/mach-davinci/da850.c
++++ b/arch/arm/mach-davinci/da850.c
+@@ -297,6 +297,16 @@ static struct clk emac_clk = {
+ 	.gpsc		= 1,
+ };
+ 
++/*
++ * In order to avoid adding the emac_clk to the clock lookup table twice (and
++ * screwing up the linked list in the process) create a separate clock for
++ * mdio inheriting the rate from emac_clk.
++ */
++static struct clk mdio_clk = {
++	.name		= "mdio",
++	.parent		= &emac_clk,
++};
++
+ static struct clk mcasp_clk = {
+ 	.name		= "mcasp",
+ 	.parent		= &pll0_sysclk2,
+@@ -461,7 +471,7 @@ static struct clk_lookup da850_clks[] = {
+ 	CLK(NULL,		"arm",		&arm_clk),
+ 	CLK(NULL,		"rmii",		&rmii_clk),
+ 	CLK("davinci_emac.1",	NULL,		&emac_clk),
+-	CLK("davinci_mdio.0",	"fck",		&emac_clk),
++	CLK("davinci_mdio.0",	"fck",		&mdio_clk),
+ 	CLK("davinci-mcasp.0",	NULL,		&mcasp_clk),
+ 	CLK("da8xx_lcdc.0",	"fck",		&lcdc_clk),
+ 	CLK("da830-mmc.0",	NULL,		&mmcsd0_clk),
+diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
+index f6e372c528eb..94b5669ee4d3 100644
+--- a/arch/arm64/crypto/aes-modes.S
++++ b/arch/arm64/crypto/aes-modes.S
+@@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
+ 	cbz		w6, .Lcbcencloop
+ 
+ 	ld1		{v0.16b}, [x5]			/* get iv */
+-	enc_prepare	w3, x2, x5
++	enc_prepare	w3, x2, x6
+ 
+ .Lcbcencloop:
+ 	ld1		{v1.16b}, [x1], #16		/* get next pt block */
+ 	eor		v0.16b, v0.16b, v1.16b		/* ..and xor with iv */
+-	encrypt_block	v0, w3, x2, x5, w6
++	encrypt_block	v0, w3, x2, x6, w7
+ 	st1		{v0.16b}, [x0], #16
+ 	subs		w4, w4, #1
+ 	bne		.Lcbcencloop
++	st1		{v0.16b}, [x5]			/* return iv */
+ 	ret
+ AES_ENDPROC(aes_cbc_encrypt)
+ 
+@@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ 	cbz		w6, .LcbcdecloopNx
+ 
+ 	ld1		{v7.16b}, [x5]			/* get iv */
+-	dec_prepare	w3, x2, x5
++	dec_prepare	w3, x2, x6
+ 
+ .LcbcdecloopNx:
+ #if INTERLEAVE >= 2
+@@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ .Lcbcdecloop:
+ 	ld1		{v1.16b}, [x1], #16		/* get next ct block */
+ 	mov		v0.16b, v1.16b			/* ...and copy to v0 */
+-	decrypt_block	v0, w3, x2, x5, w6
++	decrypt_block	v0, w3, x2, x6, w7
+ 	eor		v0.16b, v0.16b, v7.16b		/* xor with iv => pt */
+ 	mov		v7.16b, v1.16b			/* ct is next iv */
+ 	st1		{v0.16b}, [x0], #16
+@@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
+ 	bne		.Lcbcdecloop
+ .Lcbcdecout:
+ 	FRAME_POP
++	st1		{v7.16b}, [x5]			/* return iv */
+ 	ret
+ AES_ENDPROC(aes_cbc_decrypt)
+ 
+@@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
+ 
+ AES_ENTRY(aes_ctr_encrypt)
+ 	FRAME_PUSH
+-	cbnz		w6, .Lctrfirst		/* 1st time around? */
+-	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
+-	rev		x5, x5
+-#if INTERLEAVE >= 2
+-	cmn		w5, w4			/* 32 bit overflow? */
+-	bcs		.Lctrinc
+-	add		x5, x5, #1		/* increment BE ctr */
+-	b		.LctrincNx
+-#else
+-	b		.Lctrinc
+-#endif
+-.Lctrfirst:
++	cbz		w6, .Lctrnotfirst	/* 1st time around? */
+ 	enc_prepare	w3, x2, x6
+ 	ld1		{v4.16b}, [x5]
+-	umov		x5, v4.d[1]		/* keep swabbed ctr in reg */
+-	rev		x5, x5
++
++.Lctrnotfirst:
++	umov		x8, v4.d[1]		/* keep swabbed ctr in reg */
++	rev		x8, x8
+ #if INTERLEAVE >= 2
+-	cmn		w5, w4			/* 32 bit overflow? */
++	cmn		w8, w4			/* 32 bit overflow? */
+ 	bcs		.Lctrloop
+ .LctrloopNx:
+ 	subs		w4, w4, #INTERLEAVE
+@@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
+ #if INTERLEAVE == 2
+ 	mov		v0.8b, v4.8b
+ 	mov		v1.8b, v4.8b
+-	rev		x7, x5
+-	add		x5, x5, #1
++	rev		x7, x8
++	add		x8, x8, #1
+ 	ins		v0.d[1], x7
+-	rev		x7, x5
+-	add		x5, x5, #1
++	rev		x7, x8
++	add		x8, x8, #1
+ 	ins		v1.d[1], x7
+ 	ld1		{v2.16b-v3.16b}, [x1], #32	/* get 2 input blocks */
+ 	do_encrypt_block2x
+@@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
+ 	st1		{v0.16b-v1.16b}, [x0], #32
+ #else
+ 	ldr		q8, =0x30000000200000001	/* addends 1,2,3[,0] */
+-	dup		v7.4s, w5
++	dup		v7.4s, w8
+ 	mov		v0.16b, v4.16b
+ 	add		v7.4s, v7.4s, v8.4s
+ 	mov		v1.16b, v4.16b
+@@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
+ 	eor		v2.16b, v7.16b, v2.16b
+ 	eor		v3.16b, v5.16b, v3.16b
+ 	st1		{v0.16b-v3.16b}, [x0], #64
+-	add		x5, x5, #INTERLEAVE
++	add		x8, x8, #INTERLEAVE
+ #endif
+-	cbz		w4, .LctroutNx
+-.LctrincNx:
+-	rev		x7, x5
++	rev		x7, x8
+ 	ins		v4.d[1], x7
++	cbz		w4, .Lctrout
+ 	b		.LctrloopNx
+-.LctroutNx:
+-	sub		x5, x5, #1
+-	rev		x7, x5
+-	ins		v4.d[1], x7
+-	b		.Lctrout
+ .Lctr1x:
+ 	adds		w4, w4, #INTERLEAVE
+ 	beq		.Lctrout
+@@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
+ .Lctrloop:
+ 	mov		v0.16b, v4.16b
+ 	encrypt_block	v0, w3, x2, x6, w7
++
++	adds		x8, x8, #1		/* increment BE ctr */
++	rev		x7, x8
++	ins		v4.d[1], x7
++	bcs		.Lctrcarry		/* overflow? */
++
++.Lctrcarrydone:
+ 	subs		w4, w4, #1
+ 	bmi		.Lctrhalfblock		/* blocks < 0 means 1/2 block */
+ 	ld1		{v3.16b}, [x1], #16
+ 	eor		v3.16b, v0.16b, v3.16b
+ 	st1		{v3.16b}, [x0], #16
+-	beq		.Lctrout
+-.Lctrinc:
+-	adds		x5, x5, #1		/* increment BE ctr */
+-	rev		x7, x5
+-	ins		v4.d[1], x7
+-	bcc		.Lctrloop		/* no overflow? */
+-	umov		x7, v4.d[0]		/* load upper word of ctr  */
+-	rev		x7, x7			/* ... to handle the carry */
+-	add		x7, x7, #1
+-	rev		x7, x7
+-	ins		v4.d[0], x7
+-	b		.Lctrloop
++	bne		.Lctrloop
++
++.Lctrout:
++	st1		{v4.16b}, [x5]		/* return next CTR value */
++	FRAME_POP
++	ret
++
+ .Lctrhalfblock:
+ 	ld1		{v3.8b}, [x1]
+ 	eor		v3.8b, v0.8b, v3.8b
+ 	st1		{v3.8b}, [x0]
+-.Lctrout:
+ 	FRAME_POP
+ 	ret
++
++.Lctrcarry:
++	umov		x7, v4.d[0]		/* load upper word of ctr  */
++	rev		x7, x7			/* ... to handle the carry */
++	add		x7, x7, #1
++	rev		x7, x7
++	ins		v4.d[0], x7
++	b		.Lctrcarrydone
+ AES_ENDPROC(aes_ctr_encrypt)
+ 	.ltorg
+ 
+diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
+index 6913643bbe54..c136fd53c847 100644
+--- a/arch/arm64/include/uapi/asm/ptrace.h
++++ b/arch/arm64/include/uapi/asm/ptrace.h
+@@ -75,6 +75,7 @@ struct user_fpsimd_state {
+ 	__uint128_t	vregs[32];
+ 	__u32		fpsr;
+ 	__u32		fpcr;
++	__u32		__reserved[2];
+ };
+ 
+ struct user_hwdebug_state {
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 7038b9a3b42c..f7738bbc8c3f 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -450,6 +450,8 @@ static int hw_break_set(struct task_struct *target,
+ 	/* (address, ctrl) registers */
+ 	limit = regset->n * regset->size;
+ 	while (count && offset < limit) {
++		if (count < PTRACE_HBP_ADDR_SZ)
++			return -EINVAL;
+ 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
+ 					 offset, offset + PTRACE_HBP_ADDR_SZ);
+ 		if (ret)
+@@ -459,6 +461,8 @@ static int hw_break_set(struct task_struct *target,
+ 			return ret;
+ 		offset += PTRACE_HBP_ADDR_SZ;
+ 
++		if (!count)
++			break;
+ 		ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
+ 					 offset, offset + PTRACE_HBP_CTRL_SZ);
+ 		if (ret)
+@@ -495,7 +499,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	struct user_pt_regs newregs;
++	struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
+ 	if (ret)
+@@ -525,7 +529,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	struct user_fpsimd_state newstate;
++	struct user_fpsimd_state newstate =
++		target->thread.fpsimd_state.user_fpsimd;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
+ 	if (ret)
+@@ -549,7 +554,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
+ 		   const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	unsigned long tls;
++	unsigned long tls = target->thread.tp_value;
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ 	if (ret)
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index 485fdc462243..02afe4b9755a 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -322,8 +322,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ #endif
+ 
+ 	/* Invalidate the icache for these ranges */
+-	local_flush_icache_range((unsigned long)gebase,
+-				(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
++	flush_icache_range((unsigned long)gebase,
++			   (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
+ 
+ 	/*
+ 	 * Allocate comm page for guest kernel, a TLB will be reserved for
+diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
+index 3f9406d9b9d6..da87943328a5 100644
+--- a/arch/parisc/include/asm/bitops.h
++++ b/arch/parisc/include/asm/bitops.h
+@@ -6,7 +6,7 @@
+ #endif
+ 
+ #include <linux/compiler.h>
+-#include <asm/types.h>		/* for BITS_PER_LONG/SHIFT_PER_LONG */
++#include <asm/types.h>
+ #include <asm/byteorder.h>
+ #include <asm/barrier.h>
+ #include <linux/atomic.h>
+@@ -17,6 +17,12 @@
+  * to include/asm-i386/bitops.h or kerneldoc
+  */
+ 
++#if __BITS_PER_LONG == 64
++#define SHIFT_PER_LONG 6
++#else
++#define SHIFT_PER_LONG 5
++#endif
++
+ #define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
+ 
+ 
+diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
+index e0a23c7bdd43..07fa7e50bdc0 100644
+--- a/arch/parisc/include/uapi/asm/bitsperlong.h
++++ b/arch/parisc/include/uapi/asm/bitsperlong.h
+@@ -3,10 +3,8 @@
+ 
+ #if defined(__LP64__)
+ #define __BITS_PER_LONG 64
+-#define SHIFT_PER_LONG 6
+ #else
+ #define __BITS_PER_LONG 32
+-#define SHIFT_PER_LONG 5
+ #endif
+ 
+ #include <asm-generic/bitsperlong.h>
+diff --git a/arch/parisc/include/uapi/asm/swab.h b/arch/parisc/include/uapi/asm/swab.h
+index e78403b129ef..928e1bbac98f 100644
+--- a/arch/parisc/include/uapi/asm/swab.h
++++ b/arch/parisc/include/uapi/asm/swab.h
+@@ -1,6 +1,7 @@
+ #ifndef _PARISC_SWAB_H
+ #define _PARISC_SWAB_H
+ 
++#include <asm/bitsperlong.h>
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+ 
+@@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+ }
+ #define __arch_swab32 __arch_swab32
+ 
+-#if BITS_PER_LONG > 32
++#if __BITS_PER_LONG > 32
+ /*
+ ** From "PA-RISC 2.0 Architecture", HP Professional Books.
+ ** See Appendix I page 8 , "Endian Byte Swapping".
+@@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
+ 	return x;
+ }
+ #define __arch_swab64 __arch_swab64
+-#endif /* BITS_PER_LONG > 32 */
++#endif /* __BITS_PER_LONG > 32 */
+ 
+ #endif /* _PARISC_SWAB_H */
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 6d04c9efb496..0a4f23a070ab 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -483,7 +483,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
+ static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
+ {
+ 	struct eeh_pe *pe = (struct eeh_pe *)data;
+-	bool *clear_sw_state = flag;
++	bool clear_sw_state = *(bool *)flag;
+ 	int i, rc = 1;
+ 
+ 	for (i = 0; rc && i < 3; i++)
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index ae97ba211d8e..f409bfc0ae42 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -2633,6 +2633,9 @@ static void __init prom_find_boot_cpu(void)
+ 
+ 	cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
+ 
++	if (!PHANDLE_VALID(cpu_pkg))
++		return;
++
+ 	prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
+ 	prom.cpu = be32_to_cpu(rval);
+ 
+diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
+index f84eed8243da..1231bff72582 100644
+--- a/arch/tile/kernel/ptrace.c
++++ b/arch/tile/kernel/ptrace.c
+@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
+ 			  const void *kbuf, const void __user *ubuf)
+ {
+ 	int ret;
+-	struct pt_regs regs;
++	struct pt_regs regs = *task_pt_regs(target);
+ 
+ 	ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
+ 				 sizeof(regs));
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 41b06fca39f7..606f5fff1989 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -718,7 +718,7 @@ struct kvm_x86_ops {
+ 	void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ 
+ 	void (*update_db_bp_intercept)(struct kvm_vcpu *vcpu);
+-	int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
++	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ 	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ 	u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
+ 	void (*get_segment)(struct kvm_vcpu *vcpu,
+@@ -938,7 +938,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
+ 
+ void kvm_enable_efer_bits(u64);
+ bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
+-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
++int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ 
+ struct x86_emulate_ctxt;
+@@ -967,7 +967,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
+ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
+ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
+ 
+-int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
++int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
+ 
+ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 7dd9a8d3911a..b9cf3ee6514f 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -2080,3 +2080,9 @@ void kvm_lapic_init(void)
+ 	jump_label_rate_limit(&apic_hw_disabled, HZ);
+ 	jump_label_rate_limit(&apic_sw_disabled, HZ);
+ }
++
++void kvm_lapic_exit(void)
++{
++	static_key_deferred_flush(&apic_hw_disabled);
++	static_key_deferred_flush(&apic_sw_disabled);
++}
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index c4ea87eedf8a..5773f1eaf46e 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -95,6 +95,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
+ 
+ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
+ void kvm_lapic_init(void);
++void kvm_lapic_exit(void);
+ 
+ static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
+ {
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 0d039cd268a8..23be7ffebb4b 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3069,42 +3069,42 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
+ 		svm_scale_tsc(vcpu, host_tsc);
+ }
+ 
+-static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
++static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+-	switch (ecx) {
++	switch (msr_info->index) {
+ 	case MSR_IA32_TSC: {
+-		*data = svm->vmcb->control.tsc_offset +
++		msr_info->data = svm->vmcb->control.tsc_offset +
+ 			svm_scale_tsc(vcpu, native_read_tsc());
+ 
+ 		break;
+ 	}
+ 	case MSR_STAR:
+-		*data = svm->vmcb->save.star;
++		msr_info->data = svm->vmcb->save.star;
+ 		break;
+ #ifdef CONFIG_X86_64
+ 	case MSR_LSTAR:
+-		*data = svm->vmcb->save.lstar;
++		msr_info->data = svm->vmcb->save.lstar;
+ 		break;
+ 	case MSR_CSTAR:
+-		*data = svm->vmcb->save.cstar;
++		msr_info->data = svm->vmcb->save.cstar;
+ 		break;
+ 	case MSR_KERNEL_GS_BASE:
+-		*data = svm->vmcb->save.kernel_gs_base;
++		msr_info->data = svm->vmcb->save.kernel_gs_base;
+ 		break;
+ 	case MSR_SYSCALL_MASK:
+-		*data = svm->vmcb->save.sfmask;
++		msr_info->data = svm->vmcb->save.sfmask;
+ 		break;
+ #endif
+ 	case MSR_IA32_SYSENTER_CS:
+-		*data = svm->vmcb->save.sysenter_cs;
++		msr_info->data = svm->vmcb->save.sysenter_cs;
+ 		break;
+ 	case MSR_IA32_SYSENTER_EIP:
+-		*data = svm->sysenter_eip;
++		msr_info->data = svm->sysenter_eip;
+ 		break;
+ 	case MSR_IA32_SYSENTER_ESP:
+-		*data = svm->sysenter_esp;
++		msr_info->data = svm->sysenter_esp;
+ 		break;
+ 	/*
+ 	 * Nobody will change the following 5 values in the VMCB so we can
+@@ -3112,31 +3112,31 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
+ 	 * implemented.
+ 	 */
+ 	case MSR_IA32_DEBUGCTLMSR:
+-		*data = svm->vmcb->save.dbgctl;
++		msr_info->data = svm->vmcb->save.dbgctl;
+ 		break;
+ 	case MSR_IA32_LASTBRANCHFROMIP:
+-		*data = svm->vmcb->save.br_from;
++		msr_info->data = svm->vmcb->save.br_from;
+ 		break;
+ 	case MSR_IA32_LASTBRANCHTOIP:
+-		*data = svm->vmcb->save.br_to;
++		msr_info->data = svm->vmcb->save.br_to;
+ 		break;
+ 	case MSR_IA32_LASTINTFROMIP:
+-		*data = svm->vmcb->save.last_excp_from;
++		msr_info->data = svm->vmcb->save.last_excp_from;
+ 		break;
+ 	case MSR_IA32_LASTINTTOIP:
+-		*data = svm->vmcb->save.last_excp_to;
++		msr_info->data = svm->vmcb->save.last_excp_to;
+ 		break;
+ 	case MSR_VM_HSAVE_PA:
+-		*data = svm->nested.hsave_msr;
++		msr_info->data = svm->nested.hsave_msr;
+ 		break;
+ 	case MSR_VM_CR:
+-		*data = svm->nested.vm_cr_msr;
++		msr_info->data = svm->nested.vm_cr_msr;
+ 		break;
+ 	case MSR_IA32_UCODE_REV:
+-		*data = 0x01000065;
++		msr_info->data = 0x01000065;
+ 		break;
+ 	default:
+-		return kvm_get_msr_common(vcpu, ecx, data);
++		return kvm_get_msr_common(vcpu, msr_info);
+ 	}
+ 	return 0;
+ }
+@@ -3144,16 +3144,20 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
+ static int rdmsr_interception(struct vcpu_svm *svm)
+ {
+ 	u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+-	u64 data;
++	struct msr_data msr_info;
+ 
+-	if (svm_get_msr(&svm->vcpu, ecx, &data)) {
++	msr_info.index = ecx;
++	msr_info.host_initiated = false;
++	if (svm_get_msr(&svm->vcpu, &msr_info)) {
+ 		trace_kvm_msr_read_ex(ecx);
+ 		kvm_inject_gp(&svm->vcpu, 0);
+ 	} else {
+-		trace_kvm_msr_read(ecx, data);
++		trace_kvm_msr_read(ecx, msr_info.data);
+ 
+-		kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, data & 0xffffffff);
+-		kvm_register_write(&svm->vcpu, VCPU_REGS_RDX, data >> 32);
++		kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
++				   msr_info.data & 0xffffffff);
++		kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
++				   msr_info.data >> 32);
+ 		svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
+ 		skip_emulated_instruction(&svm->vcpu);
+ 	}
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 341ea55d2e85..048830f2927d 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2640,76 +2640,69 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
+  * Returns 0 on success, non-0 otherwise.
+  * Assumes vcpu_load() was already called.
+  */
+-static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
++static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ {
+-	u64 data;
+ 	struct shared_msr_entry *msr;
+ 
+-	if (!pdata) {
+-		printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
+-		return -EINVAL;
+-	}
+-
+-	switch (msr_index) {
++	switch (msr_info->index) {
+ #ifdef CONFIG_X86_64
+ 	case MSR_FS_BASE:
+-		data = vmcs_readl(GUEST_FS_BASE);
++		msr_info->data = vmcs_readl(GUEST_FS_BASE);
+ 		break;
+ 	case MSR_GS_BASE:
+-		data = vmcs_readl(GUEST_GS_BASE);
++		msr_info->data = vmcs_readl(GUEST_GS_BASE);
+ 		break;
+ 	case MSR_KERNEL_GS_BASE:
+ 		vmx_load_host_state(to_vmx(vcpu));
+-		data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
++		msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
+ 		break;
+ #endif
+ 	case MSR_EFER:
+-		return kvm_get_msr_common(vcpu, msr_index, pdata);
++		return kvm_get_msr_common(vcpu, msr_info);
+ 	case MSR_IA32_TSC:
+-		data = guest_read_tsc();
++		msr_info->data = guest_read_tsc();
+ 		break;
+ 	case MSR_IA32_SYSENTER_CS:
+-		data = vmcs_read32(GUEST_SYSENTER_CS);
++		msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
+ 		break;
+ 	case MSR_IA32_SYSENTER_EIP:
+-		data = vmcs_readl(GUEST_SYSENTER_EIP);
++		msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
+ 		break;
+ 	case MSR_IA32_SYSENTER_ESP:
+-		data = vmcs_readl(GUEST_SYSENTER_ESP);
++		msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
+ 		break;
+ 	case MSR_IA32_BNDCFGS:
+ 		if (!vmx_mpx_supported())
+ 			return 1;
+-		data = vmcs_read64(GUEST_BNDCFGS);
++		msr_info->data = vmcs_read64(GUEST_BNDCFGS);
+ 		break;
+ 	case MSR_IA32_FEATURE_CONTROL:
+ 		if (!nested_vmx_allowed(vcpu))
+ 			return 1;
+-		data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
++		msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
+ 		break;
+ 	case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
+ 		if (!nested_vmx_allowed(vcpu))
+ 			return 1;
+-		return vmx_get_vmx_msr(vcpu, msr_index, pdata);
++		return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
+ 	case MSR_IA32_XSS:
+ 		if (!vmx_xsaves_supported())
+ 			return 1;
+-		data = vcpu->arch.ia32_xss;
++		msr_info->data = vcpu->arch.ia32_xss;
+ 		break;
+ 	case MSR_TSC_AUX:
+-		if (!to_vmx(vcpu)->rdtscp_enabled)
++		if (!to_vmx(vcpu)->rdtscp_enabled && !msr_info->host_initiated)
+ 			return 1;
+ 		/* Otherwise falls through */
+ 	default:
+-		msr = find_msr_entry(to_vmx(vcpu), msr_index);
++		msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
+ 		if (msr) {
+-			data = msr->data;
++			msr_info->data = msr->data;
+ 			break;
+ 		}
+-		return kvm_get_msr_common(vcpu, msr_index, pdata);
++		return kvm_get_msr_common(vcpu, msr_info);
+ 	}
+ 
+-	*pdata = data;
+ 	return 0;
+ }
+ 
+@@ -2804,7 +2797,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 			clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
+ 		break;
+ 	case MSR_TSC_AUX:
+-		if (!vmx->rdtscp_enabled)
++		if (!vmx->rdtscp_enabled && !msr_info->host_initiated)
+ 			return 1;
+ 		/* Check reserved bit, higher 32 bits should be zero */
+ 		if ((data >> 32) != 0)
+@@ -5496,19 +5489,21 @@ static int handle_cpuid(struct kvm_vcpu *vcpu)
+ static int handle_rdmsr(struct kvm_vcpu *vcpu)
+ {
+ 	u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
+-	u64 data;
++	struct msr_data msr_info;
+ 
+-	if (vmx_get_msr(vcpu, ecx, &data)) {
++	msr_info.index = ecx;
++	msr_info.host_initiated = false;
++	if (vmx_get_msr(vcpu, &msr_info)) {
+ 		trace_kvm_msr_read_ex(ecx);
+ 		kvm_inject_gp(vcpu, 0);
+ 		return 1;
+ 	}
+ 
+-	trace_kvm_msr_read(ecx, data);
++	trace_kvm_msr_read(ecx, msr_info.data);
+ 
+ 	/* FIXME: handling of bits 32:63 of rax, rdx */
+-	vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
+-	vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
++	vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
++	vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
+ 	skip_emulated_instruction(vcpu);
+ 	return 1;
+ }
+@@ -9038,6 +9033,7 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+ 	struct vmx_msr_entry e;
+ 
+ 	for (i = 0; i < count; i++) {
++		struct msr_data msr_info;
+ 		if (kvm_read_guest(vcpu->kvm,
+ 				   gpa + i * sizeof(e),
+ 				   &e, 2 * sizeof(u32))) {
+@@ -9052,7 +9048,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+ 				__func__, i, e.index, e.reserved);
+ 			return -EINVAL;
+ 		}
+-		if (kvm_get_msr(vcpu, e.index, &e.value)) {
++		msr_info.host_initiated = false;
++		msr_info.index = e.index;
++		if (kvm_get_msr(vcpu, &msr_info)) {
+ 			pr_warn_ratelimited(
+ 				"%s cannot read MSR (%u, 0x%x)\n",
+ 				__func__, i, e.index);
+@@ -9061,10 +9059,10 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
+ 		if (kvm_write_guest(vcpu->kvm,
+ 				    gpa + i * sizeof(e) +
+ 					offsetof(struct vmx_msr_entry, value),
+-				    &e.value, sizeof(e.value))) {
++				    &msr_info.data, sizeof(msr_info.data))) {
+ 			pr_warn_ratelimited(
+ 				"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
+-				__func__, i, e.index, e.value);
++				__func__, i, e.index, msr_info.data);
+ 			return -EINVAL;
+ 		}
+ 	}
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index bd84d2226ca1..012820f6acb9 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -1045,6 +1045,21 @@ EXPORT_SYMBOL_GPL(kvm_set_msr);
+ /*
+  * Adapt set_msr() to msr_io()'s calling convention
+  */
++static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
++{
++	struct msr_data msr;
++	int r;
++
++	msr.index = index;
++	msr.host_initiated = true;
++	r = kvm_get_msr(vcpu, &msr);
++	if (r)
++		return r;
++
++	*data = msr.data;
++	return 0;
++}
++
+ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
+ {
+ 	struct msr_data msr;
+@@ -2374,9 +2389,9 @@ EXPORT_SYMBOL_GPL(kvm_set_msr_common);
+  * Returns 0 on success, non-0 otherwise.
+  * Assumes vcpu_load() was already called.
+  */
+-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
++int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ {
+-	return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
++	return kvm_x86_ops->get_msr(vcpu, msr);
+ }
+ EXPORT_SYMBOL_GPL(kvm_get_msr);
+ 
+@@ -2513,11 +2528,9 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+ 	return 0;
+ }
+ 
+-int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
++int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ {
+-	u64 data;
+-
+-	switch (msr) {
++	switch (msr_info->index) {
+ 	case MSR_IA32_PLATFORM_ID:
+ 	case MSR_IA32_EBL_CR_POWERON:
+ 	case MSR_IA32_DEBUGCTLMSR:
+@@ -2540,26 +2553,26 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+ 	case MSR_AMD64_NB_CFG:
+ 	case MSR_FAM10H_MMIO_CONF_BASE:
+ 	case MSR_AMD64_BU_CFG2:
+-		data = 0;
++		msr_info->data = 0;
+ 		break;
+ 	case MSR_P6_PERFCTR0:
+ 	case MSR_P6_PERFCTR1:
+ 	case MSR_P6_EVNTSEL0:
+ 	case MSR_P6_EVNTSEL1:
+-		if (kvm_pmu_msr(vcpu, msr))
+-			return kvm_pmu_get_msr(vcpu, msr, pdata);
+-		data = 0;
++		if (kvm_pmu_msr(vcpu, msr_info->index))
++			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
++		msr_info->data = 0;
+ 		break;
+ 	case MSR_IA32_UCODE_REV:
+-		data = 0x100000000ULL;
++		msr_info->data = 0x100000000ULL;
+ 		break;
+ 	case MSR_MTRRcap:
+-		data = 0x500 | KVM_NR_VAR_MTRR;
++		msr_info->data = 0x500 | KVM_NR_VAR_MTRR;
+ 		break;
+ 	case 0x200 ... 0x2ff:
+-		return get_msr_mtrr(vcpu, msr, pdata);
++		return get_msr_mtrr(vcpu, msr_info->index, &msr_info->data);
+ 	case 0xcd: /* fsb frequency */
+-		data = 3;
++		msr_info->data = 3;
+ 		break;
+ 		/*
+ 		 * MSR_EBC_FREQUENCY_ID
+@@ -2573,48 +2586,48 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+ 		 * multiplying by zero otherwise.
+ 		 */
+ 	case MSR_EBC_FREQUENCY_ID:
+-		data = 1 << 24;
++		msr_info->data = 1 << 24;
+ 		break;
+ 	case MSR_IA32_APICBASE:
+-		data = kvm_get_apic_base(vcpu);
++		msr_info->data = kvm_get_apic_base(vcpu);
+ 		break;
+ 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+-		return kvm_x2apic_msr_read(vcpu, msr, pdata);
++		return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
+ 		break;
+ 	case MSR_IA32_TSCDEADLINE:
+-		data = kvm_get_lapic_tscdeadline_msr(vcpu);
++		msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
+ 		break;
+ 	case MSR_IA32_TSC_ADJUST:
+-		data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
++		msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
+ 		break;
+ 	case MSR_IA32_MISC_ENABLE:
+-		data = vcpu->arch.ia32_misc_enable_msr;
++		msr_info->data = vcpu->arch.ia32_misc_enable_msr;
+ 		break;
+ 	case MSR_IA32_PERF_STATUS:
+ 		/* TSC increment by tick */
+-		data = 1000ULL;
++		msr_info->data = 1000ULL;
+ 		/* CPU multiplier */
+-		data |= (((uint64_t)4ULL) << 40);
++		msr_info->data |= (((uint64_t)4ULL) << 40);
+ 		break;
+ 	case MSR_EFER:
+-		data = vcpu->arch.efer;
++		msr_info->data = vcpu->arch.efer;
+ 		break;
+ 	case MSR_KVM_WALL_CLOCK:
+ 	case MSR_KVM_WALL_CLOCK_NEW:
+-		data = vcpu->kvm->arch.wall_clock;
++		msr_info->data = vcpu->kvm->arch.wall_clock;
+ 		break;
+ 	case MSR_KVM_SYSTEM_TIME:
+ 	case MSR_KVM_SYSTEM_TIME_NEW:
+-		data = vcpu->arch.time;
++		msr_info->data = vcpu->arch.time;
+ 		break;
+ 	case MSR_KVM_ASYNC_PF_EN:
+-		data = vcpu->arch.apf.msr_val;
++		msr_info->data = vcpu->arch.apf.msr_val;
+ 		break;
+ 	case MSR_KVM_STEAL_TIME:
+-		data = vcpu->arch.st.msr_val;
++		msr_info->data = vcpu->arch.st.msr_val;
+ 		break;
+ 	case MSR_KVM_PV_EOI_EN:
+-		data = vcpu->arch.pv_eoi.msr_val;
++		msr_info->data = vcpu->arch.pv_eoi.msr_val;
+ 		break;
+ 	case MSR_IA32_P5_MC_ADDR:
+ 	case MSR_IA32_P5_MC_TYPE:
+@@ -2622,7 +2635,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+ 	case MSR_IA32_MCG_CTL:
+ 	case MSR_IA32_MCG_STATUS:
+ 	case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
+-		return get_msr_mce(vcpu, msr, pdata);
++		return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
+ 	case MSR_K7_CLK_CTL:
+ 		/*
+ 		 * Provide expected ramp-up count for K7. All other
+@@ -2633,17 +2646,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+ 		 * type 6, model 8 and higher from exploding due to
+ 		 * the rdmsr failing.
+ 		 */
+-		data = 0x20000000;
++		msr_info->data = 0x20000000;
+ 		break;
+ 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+-		if (kvm_hv_msr_partition_wide(msr)) {
++		if (kvm_hv_msr_partition_wide(msr_info->index)) {
+ 			int r;
+ 			mutex_lock(&vcpu->kvm->lock);
+-			r = get_msr_hyperv_pw(vcpu, msr, pdata);
++			r = get_msr_hyperv_pw(vcpu, msr_info->index, &msr_info->data);
+ 			mutex_unlock(&vcpu->kvm->lock);
+ 			return r;
+ 		} else
+-			return get_msr_hyperv(vcpu, msr, pdata);
++			return get_msr_hyperv(vcpu, msr_info->index, &msr_info->data);
+ 		break;
+ 	case MSR_IA32_BBL_CR_CTL3:
+ 		/* This legacy MSR exists but isn't fully documented in current
+@@ -2656,31 +2669,30 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+ 		 * L2 cache control register 3: 64GB range, 256KB size,
+ 		 * enabled, latency 0x1, configured
+ 		 */
+-		data = 0xbe702111;
++		msr_info->data = 0xbe702111;
+ 		break;
+ 	case MSR_AMD64_OSVW_ID_LENGTH:
+ 		if (!guest_cpuid_has_osvw(vcpu))
+ 			return 1;
+-		data = vcpu->arch.osvw.length;
++		msr_info->data = vcpu->arch.osvw.length;
+ 		break;
+ 	case MSR_AMD64_OSVW_STATUS:
+ 		if (!guest_cpuid_has_osvw(vcpu))
+ 			return 1;
+-		data = vcpu->arch.osvw.status;
++		msr_info->data = vcpu->arch.osvw.status;
+ 		break;
+ 	default:
+-		if (kvm_pmu_msr(vcpu, msr))
+-			return kvm_pmu_get_msr(vcpu, msr, pdata);
++		if (kvm_pmu_msr(vcpu, msr_info->index))
++			return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data);
+ 		if (!ignore_msrs) {
+-			vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
++			vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr_info->index);
+ 			return 1;
+ 		} else {
+-			vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
+-			data = 0;
++			vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
++			msr_info->data = 0;
+ 		}
+ 		break;
+ 	}
+-	*pdata = data;
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_get_msr_common);
+@@ -3453,7 +3465,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ 		break;
+ 	}
+ 	case KVM_GET_MSRS:
+-		r = msr_io(vcpu, argp, kvm_get_msr, 1);
++		r = msr_io(vcpu, argp, do_get_msr, 1);
+ 		break;
+ 	case KVM_SET_MSRS:
+ 		r = msr_io(vcpu, argp, do_set_msr, 0);
+@@ -4948,7 +4960,17 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
+ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
+ 			    u32 msr_index, u64 *pdata)
+ {
+-	return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
++	struct msr_data msr;
++	int r;
++
++	msr.index = msr_index;
++	msr.host_initiated = false;
++	r = kvm_get_msr(emul_to_vcpu(ctxt), &msr);
++	if (r)
++		return r;
++
++	*pdata = msr.data;
++	return 0;
+ }
+ 
+ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
+@@ -5848,6 +5870,7 @@ out:
+ 
+ void kvm_arch_exit(void)
+ {
++	kvm_lapic_exit();
+ 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
+ 
+ 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index ff9911707160..3b29bd7e0342 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -115,6 +115,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
+ 			DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
+ 		},
+ 	},
++	/* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
++	{
++		.callback = set_nouse_crs,
++		.ident = "Supermicro X8DTH",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
++			DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
++		},
++	},
+ 
+ 	/* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
+ 	{
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index dda720c6ab08..4e69f3161888 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -349,6 +349,7 @@ int crypto_register_alg(struct crypto_alg *alg)
+ 	struct crypto_larval *larval;
+ 	int err;
+ 
++	alg->cra_flags &= ~CRYPTO_ALG_DEAD;
+ 	err = crypto_check_alg(alg);
+ 	if (err)
+ 		return err;
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 9daf46bf3a28..4c0dac27882f 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -1692,6 +1692,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
+ 
+ 		if (qc->err_mask & ~AC_ERR_OTHER)
+ 			qc->err_mask &= ~AC_ERR_OTHER;
++	} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
++		qc->result_tf.command |= ATA_SENSE;
+ 	}
+ 
+ 	/* finish up */
+@@ -4139,10 +4141,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
+ 
+ 	/*
+-	 * Device times out with higher max sects.
++	 * These devices time out with higher max sects.
+ 	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+ 	 */
+-	{ "LITEON CX1-JB256-HP", NULL,		ATA_HORKAGE_MAX_SEC_1024 },
++	{ "LITEON CX1-JB*-HP",	NULL,		ATA_HORKAGE_MAX_SEC_1024 },
+ 
+ 	/* Devices we expect to fail diagnostics */
+ 
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index bd74ee555278..729f26322095 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -4121,6 +4121,9 @@ static int mv_platform_probe(struct platform_device *pdev)
+ 	host->iomap = NULL;
+ 	hpriv->base = devm_ioremap(&pdev->dev, res->start,
+ 				   resource_size(res));
++	if (!hpriv->base)
++		return -ENOMEM;
++
+ 	hpriv->base -= SATAHC0_REG_BASE;
+ 
+ 	hpriv->clk = clk_get(&pdev->dev, NULL);
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index ef773bf58a25..f557695a2409 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1810,7 +1810,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 				mgr->payloads[i].num_slots = req_payload.num_slots;
+ 			} else if (mgr->payloads[i].num_slots) {
+ 				mgr->payloads[i].num_slots = 0;
+-				drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
++				drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
+ 				req_payload.payload_state = mgr->payloads[i].payload_state;
+ 				mgr->payloads[i].start_slot = 0;
+ 			}
+diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
+index 213b11ea69b5..76a1461149fb 100644
+--- a/drivers/gpu/drm/drm_modes.c
++++ b/drivers/gpu/drm/drm_modes.c
+@@ -1401,6 +1401,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
+ 		return NULL;
+ 
+ 	mode->type |= DRM_MODE_TYPE_USERDEF;
++	/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
++	if (cmd->xres == 1366 && mode->hdisplay == 1368) {
++		mode->hdisplay = 1366;
++		mode->hsync_start--;
++		mode->hsync_end--;
++		drm_mode_set_name(mode);
++	}
+ 	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ 	return mode;
+ }
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index 0542c252dde5..131874409d84 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -506,6 +506,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
+ 	struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
+ 	struct edid *edid;
+ 	struct i2c_adapter *i2c;
++	bool ret = false;
+ 
+ 	BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
+ 
+@@ -522,17 +523,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
+ 		 */
+ 		if (!is_digital) {
+ 			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+-			return true;
++			ret = true;
++		} else {
++			DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ 		}
+-
+-		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ 	} else {
+ 		DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+ 	}
+ 
+ 	kfree(edid);
+ 
+-	return false;
++	return ret;
+ }
+ 
+ static enum drm_connector_status
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 56323732c748..78d82689b8d6 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -3785,10 +3785,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
+ 	drm_crtc_vblank_put(&intel_crtc->base);
+ 
+ 	wake_up_all(&dev_priv->pending_flip_queue);
+-	queue_work(dev_priv->wq, &work->work);
+-
+ 	trace_i915_flip_complete(intel_crtc->plane,
+ 				 work->pending_flip_obj);
++
++	queue_work(dev_priv->wq, &work->work);
+ }
+ 
+ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
+diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
+index 42e07afc4c2b..9666e2b4c6d6 100644
+--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
++++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
+@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
+ 		uint32_t mpllP;
+ 
+ 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
++		mpllP = (mpllP >> 8) & 0xf;
+ 		if (!mpllP)
+ 			mpllP = 4;
+ 
+@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
+ 		uint32_t clock;
+ 
+ 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
+-		return clock;
++		return clock / 1000;
+ 	}
+ 
+ 	ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
+index cba23008eca4..57f0082edb5f 100644
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -186,8 +186,8 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
+ 	}
+ 
+ 	if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
+-	    x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
+-	    y >= (crtc->y + crtc->mode.crtc_vdisplay))
++	    x >= (crtc->x + crtc->mode.hdisplay) ||
++	    y >= (crtc->y + crtc->mode.vdisplay))
+ 		goto out_of_bounds;
+ 
+ 	x += xorigin;
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index a717da729fb8..23cf21215f42 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2957,19 +2957,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 		    (rdev->pdev->device == 0x6817) ||
+ 		    (rdev->pdev->device == 0x6806))
+ 			max_mclk = 120000;
+-	} else if (rdev->family == CHIP_VERDE) {
+-		if ((rdev->pdev->revision == 0x81) ||
+-		    (rdev->pdev->revision == 0x83) ||
+-		    (rdev->pdev->revision == 0x87) ||
+-		    (rdev->pdev->device == 0x6820) ||
+-		    (rdev->pdev->device == 0x6821) ||
+-		    (rdev->pdev->device == 0x6822) ||
+-		    (rdev->pdev->device == 0x6823) ||
+-		    (rdev->pdev->device == 0x682A) ||
+-		    (rdev->pdev->device == 0x682B)) {
+-			max_sclk = 75000;
+-			max_mclk = 80000;
+-		}
+ 	} else if (rdev->family == CHIP_OLAND) {
+ 		if ((rdev->pdev->revision == 0xC7) ||
+ 		    (rdev->pdev->revision == 0x80) ||
+diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
+index c4ef3bc726e3..e299576004ce 100644
+--- a/drivers/hid/hid-cypress.c
++++ b/drivers/hid/hid-cypress.c
+@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ 	if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
+ 		return rdesc;
+ 
++	if (*rsize < 4)
++		return rdesc;
++
+ 	for (i = 0; i < *rsize - 4; i++)
+ 		if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
+ 			__u8 tmp;
+diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
+index 0efd766a545b..880b04b7bab3 100644
+--- a/drivers/input/touchscreen/elants_i2c.c
++++ b/drivers/input/touchscreen/elants_i2c.c
+@@ -889,9 +889,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
+ 
+ 		case QUEUE_HEADER_NORMAL:
+ 			report_count = ts->buf[FW_HDR_COUNT];
+-			if (report_count > 3) {
++			if (report_count == 0 || report_count > 3) {
+ 				dev_err(&client->dev,
+-					"too large report count: %*ph\n",
++					"bad report count: %*ph\n",
+ 					HEADER_SIZE, ts->buf);
+ 				break;
+ 			}
+diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
+index d7c286656a25..7b4ddf0a39ec 100644
+--- a/drivers/isdn/hardware/eicon/message.c
++++ b/drivers/isdn/hardware/eicon/message.c
+@@ -11304,7 +11304,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
+ 				((CAPI_MSG *) msg)->header.ncci = 0;
+ 				((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
+ 				((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
+-				PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
++				((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
++				((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
+ 				((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
+ 				w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
+ 				if (w != _QUEUE_FULL)
+diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
+index c945e4c2fbd4..ec30a004f319 100644
+--- a/drivers/media/usb/siano/smsusb.c
++++ b/drivers/media/usb/siano/smsusb.c
+@@ -200,22 +200,30 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev)
+ static int smsusb_sendrequest(void *context, void *buffer, size_t size)
+ {
+ 	struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
+-	struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
+-	int dummy;
++	struct sms_msg_hdr *phdr;
++	int dummy, ret;
+ 
+ 	if (dev->state != SMSUSB_ACTIVE) {
+ 		pr_debug("Device not active yet\n");
+ 		return -ENOENT;
+ 	}
+ 
++	phdr = kmalloc(size, GFP_KERNEL);
++	if (!phdr)
++		return -ENOMEM;
++	memcpy(phdr, buffer, size);
++
+ 	pr_debug("sending %s(%d) size: %d\n",
+ 		  smscore_translate_msg(phdr->msg_type), phdr->msg_type,
+ 		  phdr->msg_length);
+ 
+ 	smsendian_handle_tx_message((struct sms_msg_data *) phdr);
+-	smsendian_handle_message_header((struct sms_msg_hdr *)buffer);
+-	return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
+-			    buffer, size, &dummy, 1000);
++	smsendian_handle_message_header((struct sms_msg_hdr *)phdr);
++	ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
++			    phdr, size, &dummy, 1000);
++
++	kfree(phdr);
++	return ret;
+ }
+ 
+ static char *smsusb1_fw_lkup[] = {
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 3ccc89d4c473..1cc1e51a1866 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1415,10 +1415,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
+ 		err = mmc_select_hs400(card);
+ 		if (err)
+ 			goto free_card;
+-	} else if (mmc_card_hs(card)) {
++	} else {
+ 		/* Select the desired bus width optionally */
+ 		err = mmc_select_bus_width(card);
+-		if (!IS_ERR_VALUE(err)) {
++		if (!IS_ERR_VALUE(err) && mmc_card_hs(card)) {
+ 			err = mmc_select_hs_ddr(card);
+ 			if (err)
+ 				goto free_card;
+diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
+index a82411a2c024..7a178215866c 100644
+--- a/drivers/mmc/host/mxs-mmc.c
++++ b/drivers/mmc/host/mxs-mmc.c
+@@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
+ 	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
+ 	cmd1 = cmd->arg;
+ 
++	if (cmd->opcode == MMC_STOP_TRANSMISSION)
++		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
++
+ 	if (host->sdio_irq_en) {
+ 		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
+ 		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
+@@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
+ 		       ssp->base + HW_SSP_BLOCK_SIZE);
+ 	}
+ 
+-	if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
+-	    (cmd->opcode == SD_IO_RW_EXTENDED))
++	if (cmd->opcode == SD_IO_RW_EXTENDED)
+ 		cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+ 
+ 	cmd1 = cmd->arg;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index fdc44c8200ba..84e3f7a43f47 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2650,7 +2650,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
+ 			pr_err("%s: Card is consuming too much power!\n",
+ 				mmc_hostname(host->mmc));
+ 
+-		if (intmask & SDHCI_INT_CARD_INT) {
++		if ((intmask & SDHCI_INT_CARD_INT) &&
++		    (host->ier & SDHCI_INT_CARD_INT)) {
+ 			sdhci_enable_sdio_irq_nolock(host, false);
+ 			host->thread_isr |= SDHCI_INT_CARD_INT;
+ 			result = IRQ_WAKE_THREAD;
+diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
+index 5897d8d8fa5a..e60105dd1654 100644
+--- a/drivers/mtd/nand/Kconfig
++++ b/drivers/mtd/nand/Kconfig
+@@ -511,7 +511,7 @@ config MTD_NAND_FSMC
+ 	  Flexible Static Memory Controller (FSMC)
+ 
+ config MTD_NAND_XWAY
+-	tristate "Support for NAND on Lantiq XWAY SoC"
++	bool "Support for NAND on Lantiq XWAY SoC"
+ 	depends on LANTIQ && SOC_TYPE_XWAY
+ 	select MTD_NAND_PLATFORM
+ 	help
+diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
+index 7be393c96b1a..cf7c18947189 100644
+--- a/drivers/net/can/c_can/c_can_pci.c
++++ b/drivers/net/can/c_can/c_can_pci.c
+@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
+ 
+ 	dev->irq = pdev->irq;
+ 	priv->base = addr;
++	priv->device = &pdev->dev;
+ 
+ 	if (!c_can_pci_data->freq) {
+ 		dev_err(&pdev->dev, "no clock frequency defined\n");
+diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
+index e95a9e1a889f..7bb3cf38f346 100644
+--- a/drivers/net/can/ti_hecc.c
++++ b/drivers/net/can/ti_hecc.c
+@@ -951,7 +951,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
+ 	netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
+ 		HECC_DEF_NAPI_WEIGHT);
+ 
+-	clk_enable(priv->clk);
++	err = clk_prepare_enable(priv->clk);
++	if (err) {
++		dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
++		goto probe_exit_clk;
++	}
++
+ 	err = register_candev(ndev);
+ 	if (err) {
+ 		dev_err(&pdev->dev, "register_candev() failed\n");
+@@ -984,7 +989,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
+ 	struct ti_hecc_priv *priv = netdev_priv(ndev);
+ 
+ 	unregister_candev(ndev);
+-	clk_disable(priv->clk);
++	clk_disable_unprepare(priv->clk);
+ 	clk_put(priv->clk);
+ 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	iounmap(priv->base);
+@@ -1009,7 +1014,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
+ 	hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
+ 	priv->can.state = CAN_STATE_SLEEPING;
+ 
+-	clk_disable(priv->clk);
++	clk_disable_unprepare(priv->clk);
+ 
+ 	return 0;
+ }
+@@ -1018,8 +1023,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
+ {
+ 	struct net_device *dev = platform_get_drvdata(pdev);
+ 	struct ti_hecc_priv *priv = netdev_priv(dev);
++	int err;
+ 
+-	clk_enable(priv->clk);
++	err = clk_prepare_enable(priv->clk);
++	if (err)
++		return err;
+ 
+ 	hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
+ 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index fd51626e859e..ea1be52f5515 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1344,6 +1344,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
+ 	for (i = 0; i < num_queues; ++i) {
+ 		struct netfront_queue *queue = &info->queues[i];
+ 
++		del_timer_sync(&queue->rx_refill_timer);
++
+ 		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
+ 			unbind_from_irqhandler(queue->tx_irq, queue);
+ 		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
+@@ -1699,7 +1701,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
+ 
+ 		if (netif_running(info->netdev))
+ 			napi_disable(&queue->napi);
+-		del_timer_sync(&queue->rx_refill_timer);
+ 		netif_napi_del(&queue->napi);
+ 	}
+ 
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 993ff22df7ec..3c4e709cf9a1 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -982,6 +982,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
+ 	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ 	if (!pos)
+ 		return;
++
+ 	pdev->pcie_cap = pos;
+ 	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ 	pdev->pcie_flags_reg = reg16;
+@@ -989,13 +990,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
+ 	pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
+ 
+ 	/*
+-	 * A Root Port is always the upstream end of a Link.  No PCIe
+-	 * component has two Links.  Two Links are connected by a Switch
+-	 * that has a Port on each Link and internal logic to connect the
+-	 * two Ports.
++	 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
++	 * of a Link.  No PCIe component has two Links.  Two Links are
++	 * connected by a Switch that has a Port on each Link and internal
++	 * logic to connect the two Ports.
+ 	 */
+ 	type = pci_pcie_type(pdev);
+-	if (type == PCI_EXP_TYPE_ROOT_PORT)
++	if (type == PCI_EXP_TYPE_ROOT_PORT ||
++	    type == PCI_EXP_TYPE_PCIE_BRIDGE)
+ 		pdev->has_secondary_link = 1;
+ 	else if (type == PCI_EXP_TYPE_UPSTREAM ||
+ 		 type == PCI_EXP_TYPE_DOWNSTREAM) {
+diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
+index e736ecb3b8a4..3c96f23e2896 100644
+--- a/drivers/scsi/aacraid/comminit.c
++++ b/drivers/scsi/aacraid/comminit.c
+@@ -52,9 +52,13 @@ struct aac_common aac_config = {
+ 
+ static inline int aac_is_msix_mode(struct aac_dev *dev)
+ {
+-	u32 status;
++	u32 status = 0;
+ 
+-	status = src_readl(dev, MUnit.OMR);
++	if (dev->pdev->device == PMC_DEVICE_S6 ||
++		dev->pdev->device == PMC_DEVICE_S7 ||
++		dev->pdev->device == PMC_DEVICE_S8) {
++		status = src_readl(dev, MUnit.OMR);
++	}
+ 	return (status & AAC_INT_MODE_MSIX);
+ }
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index d00725574577..6a5e63460953 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -3365,7 +3365,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 				sizeof(struct ct6_dsd), 0,
+ 				SLAB_HWCACHE_ALIGN, NULL);
+ 			if (!ctx_cachep)
+-				goto fail_free_gid_list;
++				goto fail_free_srb_mempool;
+ 		}
+ 		ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
+ 			ctx_cachep);
+@@ -3518,7 +3518,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 	ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
+ 	    GFP_KERNEL);
+ 	if (!ha->loop_id_map)
+-		goto fail_async_pd;
++		goto fail_loop_id_map;
+ 	else {
+ 		qla2x00_set_reserved_loop_ids(ha);
+ 		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
+@@ -3527,6 +3527,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ 
+ 	return 0;
+ 
++fail_loop_id_map:
++	dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
+ fail_async_pd:
+ 	dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
+ fail_ex_init_cb:
+@@ -3554,6 +3556,10 @@ fail_free_ms_iocb:
+ 	dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
+ 	ha->ms_iocb = NULL;
+ 	ha->ms_iocb_dma = 0;
++
++	if (ha->sns_cmd)
++		dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
++		    ha->sns_cmd, ha->sns_cmd_dma);
+ fail_dma_pool:
+ 	if (IS_QLA82XX(ha) || ql2xenabledif) {
+ 		dma_pool_destroy(ha->fcp_cmnd_dma_pool);
+@@ -3571,10 +3577,12 @@ fail_free_nvram:
+ 	kfree(ha->nvram);
+ 	ha->nvram = NULL;
+ fail_free_ctx_mempool:
+-	mempool_destroy(ha->ctx_mempool);
++	if (ha->ctx_mempool)
++		mempool_destroy(ha->ctx_mempool);
+ 	ha->ctx_mempool = NULL;
+ fail_free_srb_mempool:
+-	mempool_destroy(ha->srb_mempool);
++	if (ha->srb_mempool)
++		mempool_destroy(ha->srb_mempool);
+ 	ha->srb_mempool = NULL;
+ fail_free_gid_list:
+ 	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 26bc4e9c7441..0cc067ce5715 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1757,6 +1757,10 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
+ 			return res;
+ 
+ 		iov_iter_truncate(&i, hp->dxfer_len);
++		if (!iov_iter_count(&i)) {
++			kfree(iov);
++			return -EINVAL;
++		}
+ 
+ 		res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC);
+ 		kfree(iov);
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 46b966d09af2..35180fbaa460 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -390,6 +390,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
+ 					     int *post_ret)
+ {
+ 	struct se_device *dev = cmd->se_dev;
++	sense_reason_t ret = TCM_NO_SENSE;
+ 
+ 	/*
+ 	 * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
+@@ -397,9 +398,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
+ 	 * sent to the backend driver.
+ 	 */
+ 	spin_lock_irq(&cmd->t_state_lock);
+-	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
++	if (cmd->transport_state & CMD_T_SENT) {
+ 		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+ 		*post_ret = 1;
++
++		if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
++			ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ 	}
+ 	spin_unlock_irq(&cmd->t_state_lock);
+ 
+@@ -409,7 +413,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
+ 	 */
+ 	up(&dev->caw_sem);
+ 
+-	return TCM_NO_SENSE;
++	return ret;
+ }
+ 
+ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
+diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
+index ddb0d6bc45f2..71778b1e4c7b 100644
+--- a/drivers/target/target_core_xcopy.c
++++ b/drivers/target/target_core_xcopy.c
+@@ -849,7 +849,7 @@ out:
+ 			" CHECK_CONDITION -> sending response\n", rc);
+ 		ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
+ 	}
+-	target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
++	target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
+ }
+ 
+ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 9ffdfcf2ec6e..2f7cfa5c7b8b 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -929,8 +929,8 @@ static const struct input_device_id sysrq_ids[] = {
+ 	{
+ 		.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ 				INPUT_DEVICE_ID_MATCH_KEYBIT,
+-		.evbit = { BIT_MASK(EV_KEY) },
+-		.keybit = { BIT_MASK(KEY_LEFTALT) },
++		.evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
++		.keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
+ 	},
+ 	{ },
+ };
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 81336acc7040..70af0804465c 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -178,6 +178,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 	if (ifp->desc.bNumEndpoints >= num_ep)
+ 		goto skip_to_next_endpoint_or_interface_descriptor;
+ 
++	/* Check for duplicate endpoint addresses */
++	for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
++		if (ifp->endpoint[i].desc.bEndpointAddress ==
++		    d->bEndpointAddress) {
++			dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
++			    cfgno, inum, asnum, d->bEndpointAddress);
++			goto skip_to_next_endpoint_or_interface_descriptor;
++		}
++	}
++
+ 	endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
+ 	++ifp->desc.bNumEndpoints;
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index d2e50a27140c..24f9f98968a5 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* CBM - Flash disk */
+ 	{ USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* WORLDE easy key (easykey.25) MIDI controller  */
++	{ USB_DEVICE(0x0218, 0x0401), .driver_info =
++			USB_QUIRK_CONFIG_INTF_STRINGS },
++
+ 	/* HP 5300/5370C scanner */
+ 	{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
+ 			USB_QUIRK_STRING_FETCH_255 },
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index a5e1b8b39ff5..1e51ded8607a 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -259,11 +259,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
+ 	if (req->request.status == -EINPROGRESS)
+ 		req->request.status = status;
+ 
+-	if (dwc->ep0_bounced && dep->number == 0)
++	if (dwc->ep0_bounced && dep->number <= 1)
+ 		dwc->ep0_bounced = false;
+-	else
+-		usb_gadget_unmap_request(&dwc->gadget, &req->request,
+-				req->direction);
++
++	usb_gadget_unmap_request(&dwc->gadget, &req->request,
++			req->direction);
+ 
+ 	dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
+ 			req, dep->name, req->request.actual,
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 9cd76cc8c0d9..d17304ae0b42 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1563,9 +1563,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 		value = min(w_length, (u16) 1);
+ 		break;
+ 
+-	/* function drivers must handle get/set altsetting; if there's
+-	 * no get() method, we know only altsetting zero works.
+-	 */
++	/* function drivers must handle get/set altsetting */
+ 	case USB_REQ_SET_INTERFACE:
+ 		if (ctrl->bRequestType != USB_RECIP_INTERFACE)
+ 			goto unknown;
+@@ -1574,7 +1572,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ 		f = cdev->config->interface[intf];
+ 		if (!f)
+ 			break;
+-		if (w_value && !f->set_alt)
++
++		/*
++		 * If there's no get_alt() method, we know only altsetting zero
++		 * works. There is no need to check if set_alt() is not NULL
++		 * as we check this in usb_add_function().
++		 */
++		if (w_value && !f->get_alt)
+ 			break;
+ 		value = f->set_alt(f, w_index, w_value);
+ 		if (value == USB_GADGET_DELAYED_STATUS) {
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index db9433eed2cc..71c7d1db784f 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -2079,6 +2079,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
+ 		if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
+ 			return -EINVAL;
+ 		length = le32_to_cpu(d->dwSize);
++		if (len < length)
++			return -EINVAL;
+ 		type = le32_to_cpu(d->dwPropertyDataType);
+ 		if (type < USB_EXT_PROP_UNICODE ||
+ 		    type > USB_EXT_PROP_UNICODE_MULTI) {
+@@ -2087,6 +2089,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
+ 			return -EINVAL;
+ 		}
+ 		pnl = le16_to_cpu(d->wPropertyNameLength);
++		if (length < 14 + pnl) {
++			pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
++				  length, pnl, type);
++			return -EINVAL;
++		}
+ 		pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
+ 		if (length != 14 + pnl + pdl) {
+ 			pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
+@@ -2171,6 +2178,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
+ 		}
+ 	}
+ 	if (flags & (1 << i)) {
++		if (len < 4) {
++			goto error;
++		}
+ 		os_descs_count = get_unaligned_le32(data);
+ 		data += 4;
+ 		len -= 4;
+@@ -2243,7 +2253,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
+ 
+ 	ENTER();
+ 
+-	if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
++	if (unlikely(len < 16 ||
++		     get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+ 		     get_unaligned_le32(data + 4) != len))
+ 		goto error;
+ 	str_count  = get_unaligned_le32(data + 8);
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index bccc5788bb98..44e74fd3a80a 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -1122,7 +1122,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 	/* data and/or status stage for control request */
+ 	} else if (dev->state == STATE_DEV_SETUP) {
+ 
+-		/* IN DATA+STATUS caller makes len <= wLength */
++		len = min_t(size_t, len, dev->setup_wLength);
+ 		if (dev->setup_in) {
+ 			retval = setup_req (dev->gadget->ep0, dev->req, len);
+ 			if (retval == 0) {
+@@ -1752,10 +1752,12 @@ static struct usb_gadget_driver probe_driver = {
+  * such as configuration notifications.
+  */
+ 
+-static int is_valid_config (struct usb_config_descriptor *config)
++static int is_valid_config(struct usb_config_descriptor *config,
++		unsigned int total)
+ {
+ 	return config->bDescriptorType == USB_DT_CONFIG
+ 		&& config->bLength == USB_DT_CONFIG_SIZE
++		&& total >= USB_DT_CONFIG_SIZE
+ 		&& config->bConfigurationValue != 0
+ 		&& (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0
+ 		&& (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0;
+@@ -1780,7 +1782,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 	}
+ 	spin_unlock_irq(&dev->lock);
+ 
+-	if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4))
++	if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ||
++	    (len > PAGE_SIZE * 4))
+ 		return -EINVAL;
+ 
+ 	/* we might need to change message format someday */
+@@ -1804,7 +1807,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 	/* full or low speed config */
+ 	dev->config = (void *) kbuf;
+ 	total = le16_to_cpu(dev->config->wTotalLength);
+-	if (!is_valid_config (dev->config) || total >= length)
++	if (!is_valid_config(dev->config, total) ||
++			total > length - USB_DT_DEVICE_SIZE)
+ 		goto fail;
+ 	kbuf += total;
+ 	length -= total;
+@@ -1813,10 +1817,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
+ 	if (kbuf [1] == USB_DT_CONFIG) {
+ 		dev->hs_config = (void *) kbuf;
+ 		total = le16_to_cpu(dev->hs_config->wTotalLength);
+-		if (!is_valid_config (dev->hs_config) || total >= length)
++		if (!is_valid_config(dev->hs_config, total) ||
++				total > length - USB_DT_DEVICE_SIZE)
+ 			goto fail;
+ 		kbuf += total;
+ 		length -= total;
++	} else {
++		dev->hs_config = NULL;
+ 	}
+ 
+ 	/* could support multiple configs, using another encoding! */
+diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
+index 181112c88f43..3300091e0e2a 100644
+--- a/drivers/usb/gadget/udc/dummy_hcd.c
++++ b/drivers/usb/gadget/udc/dummy_hcd.c
+@@ -266,7 +266,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep)
+ /* caller must hold lock */
+ static void stop_activity(struct dummy *dum)
+ {
+-	struct dummy_ep	*ep;
++	int i;
+ 
+ 	/* prevent any more requests */
+ 	dum->address = 0;
+@@ -274,8 +274,8 @@ static void stop_activity(struct dummy *dum)
+ 	/* The timer is left running so that outstanding URBs can fail */
+ 
+ 	/* nuke any pending requests first, so driver i/o is quiesced */
+-	list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list)
+-		nuke(dum, ep);
++	for (i = 0; i < DUMMY_ENDPOINTS; ++i)
++		nuke(dum, &dum->ep[i]);
+ 
+ 	/* driver now does any non-usb quiescing necessary */
+ }
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 5ab70afd5624..bd06e3a907fa 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -964,6 +964,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
+ 	xhci->devs[slot_id] = NULL;
+ }
+ 
++/*
++ * Free a virt_device structure.
++ * If the virt_device added a tt_info (a hub) and has children pointing to
++ * that tt_info, then free the child first. Recursive.
++ * We can't rely on udev at this point to find child-parent relationships.
++ */
++void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id)
++{
++	struct xhci_virt_device *vdev;
++	struct list_head *tt_list_head;
++	struct xhci_tt_bw_info *tt_info, *next;
++	int i;
++
++	vdev = xhci->devs[slot_id];
++	if (!vdev)
++		return;
++
++	tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
++	list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
++		/* is this a hub device that added a tt_info to the tts list */
++		if (tt_info->slot_id == slot_id) {
++			/* are any devices using this tt_info? */
++			for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) {
++				vdev = xhci->devs[i];
++				if (vdev && (vdev->tt_info == tt_info))
++					xhci_free_virt_devices_depth_first(
++						xhci, i);
++			}
++		}
++	}
++	/* we are now at a leaf device */
++	xhci_free_virt_device(xhci, slot_id);
++}
++
+ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
+ 		struct usb_device *udev, gfp_t flags)
+ {
+@@ -1825,8 +1859,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ 		}
+ 	}
+ 
+-	for (i = 1; i < MAX_HC_SLOTS; ++i)
+-		xhci_free_virt_device(xhci, i);
++	for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--)
++		xhci_free_virt_devices_depth_first(xhci, i);
+ 
+ 	if (xhci->segment_pool)
+ 		dma_pool_destroy(xhci->segment_pool);
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index fc60a9e8a129..bb47ea2c9c39 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -159,7 +159,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
+-		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
++		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
+ 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 2d8e77ff7821..7c08c7175c01 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -846,17 +846,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 
+ 	ep->stop_cmds_pending--;
+-	if (xhci->xhc_state & XHCI_STATE_REMOVING) {
+-		spin_unlock_irqrestore(&xhci->lock, flags);
+-		return;
+-	}
+-	if (xhci->xhc_state & XHCI_STATE_DYING) {
+-		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+-				"Stop EP timer ran, but another timer marked "
+-				"xHCI as DYING, exiting.");
+-		spin_unlock_irqrestore(&xhci->lock, flags);
+-		return;
+-	}
+ 	if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ 				"Stop EP timer ran, but no command pending, "
+@@ -1268,41 +1257,54 @@ void xhci_handle_command_timeout(unsigned long data)
+ 	bool second_timeout = false;
+ 	xhci = (struct xhci_hcd *) data;
+ 
+-	/* mark this command to be cancelled */
+ 	spin_lock_irqsave(&xhci->lock, flags);
+-	if (xhci->current_cmd) {
+-		if (xhci->current_cmd->status == COMP_CMD_ABORT)
+-			second_timeout = true;
+-		xhci->current_cmd->status = COMP_CMD_ABORT;
++
++	/*
++	 * If timeout work is pending, or current_cmd is NULL, it means we
++	 * raced with command completion. Command is handled so just return.
++	 */
++	if (!xhci->current_cmd || timer_pending(&xhci->cmd_timer)) {
++		spin_unlock_irqrestore(&xhci->lock, flags);
++		return;
+ 	}
+ 
++	/* mark this command to be cancelled */
++	if (xhci->current_cmd->status == COMP_CMD_ABORT)
++		second_timeout = true;
++	xhci->current_cmd->status = COMP_CMD_ABORT;
++
+ 	/* Make sure command ring is running before aborting it */
+ 	hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ 	if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
+ 	    (hw_ring_state & CMD_RING_RUNNING))  {
+-		spin_unlock_irqrestore(&xhci->lock, flags);
+ 		xhci_dbg(xhci, "Command timeout\n");
+ 		ret = xhci_abort_cmd_ring(xhci);
+ 		if (unlikely(ret == -ESHUTDOWN)) {
+ 			xhci_err(xhci, "Abort command ring failed\n");
+ 			xhci_cleanup_command_queue(xhci);
++			spin_unlock_irqrestore(&xhci->lock, flags);
+ 			usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
+ 			xhci_dbg(xhci, "xHCI host controller is dead.\n");
++
++			return;
+ 		}
+-		return;
++
++		goto time_out_completed;
+ 	}
+ 
+ 	/* command ring failed to restart, or host removed. Bail out */
+ 	if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
+-		spin_unlock_irqrestore(&xhci->lock, flags);
+ 		xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
+ 		xhci_cleanup_command_queue(xhci);
+-		return;
++
++		goto time_out_completed;
+ 	}
+ 
+ 	/* command timeout on stopped ring, ring can't be aborted */
+ 	xhci_dbg(xhci, "Command timeout on stopped ring\n");
+ 	xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
++
++time_out_completed:
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
+ 	return;
+ }
+@@ -1361,8 +1363,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ 	 */
+ 	if (cmd_comp_code == COMP_CMD_ABORT) {
+ 		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+-		if (cmd->status == COMP_CMD_ABORT)
++		if (cmd->status == COMP_CMD_ABORT) {
++			if (xhci->current_cmd == cmd)
++				xhci->current_cmd = NULL;
+ 			goto event_handled;
++		}
+ 	}
+ 
+ 	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
+@@ -1424,6 +1429,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ 		xhci->current_cmd = list_entry(cmd->cmd_list.next,
+ 					       struct xhci_command, cmd_list);
+ 		mod_timer(&xhci->cmd_timer, jiffies + XHCI_CMD_DEFAULT_TIMEOUT);
++	} else if (xhci->current_cmd == cmd) {
++		xhci->current_cmd = NULL;
+ 	}
+ 
+ event_handled:
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 896b928f7412..8ae4f9f50b6f 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1576,19 +1576,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		xhci_urb_free_priv(urb_priv);
+ 		return ret;
+ 	}
+-	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+-			(xhci->xhc_state & XHCI_STATE_HALTED)) {
+-		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+-				"Ep 0x%x: URB %p to be canceled on "
+-				"non-responsive xHCI host.",
+-				urb->ep->desc.bEndpointAddress, urb);
+-		/* Let the stop endpoint command watchdog timer (which set this
+-		 * state) finish cleaning up the endpoint TD lists.  We must
+-		 * have caught it in the middle of dropping a lock and giving
+-		 * back an URB.
+-		 */
+-		goto done;
+-	}
+ 
+ 	ep_index = xhci_get_endpoint_index(&urb->ep->desc);
+ 	ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
+diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
+index 3877249a8b2d..4a1182354e98 100644
+--- a/drivers/usb/musb/musb_core.h
++++ b/drivers/usb/musb/musb_core.h
+@@ -205,6 +205,7 @@ struct musb_platform_ops {
+ 	int	(*adjust_channel_params)(struct dma_channel *channel,
+ 				u16 packet_sz, u8 *mode,
+ 				dma_addr_t *dma_addr, u32 *len);
++	void	(*clear_ep_rxintr)(struct musb *musb, int epnum);
+ };
+ 
+ /*
+@@ -590,4 +591,10 @@ static inline int musb_platform_exit(struct musb *musb)
+ 	return musb->ops->exit(musb);
+ }
+ 
++static inline void musb_platform_clear_ep_rxintr(struct musb *musb, int epnum)
++{
++	if (musb->ops->clear_ep_rxintr)
++		musb->ops->clear_ep_rxintr(musb, epnum);
++}
++
+ #endif	/* __MUSB_CORE_H__ */
+diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
+index dcac5e7f19e0..fad6d5b92df9 100644
+--- a/drivers/usb/musb/musb_dsps.c
++++ b/drivers/usb/musb/musb_dsps.c
+@@ -301,6 +301,17 @@ static void otg_timer(unsigned long _musb)
+ 	spin_unlock_irqrestore(&musb->lock, flags);
+ }
+ 
++void dsps_musb_clear_ep_rxintr(struct musb *musb, int epnum)
++{
++	u32 epintr;
++	struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
++	const struct dsps_musb_wrapper *wrp = glue->wrp;
++
++	/* musb->lock might already been held */
++	epintr = (1 << epnum) << wrp->rxep_shift;
++	musb_writel(musb->ctrl_base, wrp->epintr_status, epintr);
++}
++
+ static irqreturn_t dsps_interrupt(int irq, void *hci)
+ {
+ 	struct musb  *musb = hci;
+@@ -647,6 +658,7 @@ static struct musb_platform_ops dsps_ops = {
+ 	.try_idle	= dsps_musb_try_idle,
+ 	.set_mode	= dsps_musb_set_mode,
+ 	.recover	= dsps_musb_recover,
++	.clear_ep_rxintr = dsps_musb_clear_ep_rxintr,
+ };
+ 
+ static u64 musb_dmamask = DMA_BIT_MASK(32);
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index 06853d7c89fd..1d0c096c1b84 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -2273,12 +2273,11 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
+ 	int			is_in = usb_pipein(urb->pipe);
+ 	int			status = 0;
+ 	u16			csr;
++	struct dma_channel	*dma = NULL;
+ 
+ 	musb_ep_select(regs, hw_end);
+ 
+ 	if (is_dma_capable()) {
+-		struct dma_channel	*dma;
+-
+ 		dma = is_in ? ep->rx_channel : ep->tx_channel;
+ 		if (dma) {
+ 			status = ep->musb->dma_controller->channel_abort(dma);
+@@ -2295,10 +2294,9 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
+ 		/* giveback saves bulk toggle */
+ 		csr = musb_h_flush_rxfifo(ep, 0);
+ 
+-		/* REVISIT we still get an irq; should likely clear the
+-		 * endpoint's irq status here to avoid bogus irqs.
+-		 * clearing that status is platform-specific...
+-		 */
++		/* clear the endpoint's irq status here to avoid bogus irqs */
++		if (is_dma_capable() && dma)
++			musb_platform_clear_ep_rxintr(musb, ep->epnum);
+ 	} else if (ep->epnum) {
+ 		musb_h_tx_flush_fifo(ep);
+ 		csr = musb_readw(epio, MUSB_TXCSR);
+diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
+index c73808f095bb..a9fb5838e717 100644
+--- a/drivers/usb/serial/ch341.c
++++ b/drivers/usb/serial/ch341.c
+@@ -99,6 +99,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
+ 	r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
+ 			    USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ 			    value, index, NULL, 0, DEFAULT_TIMEOUT);
++	if (r < 0)
++		dev_err(&dev->dev, "failed to send control message: %d\n", r);
+ 
+ 	return r;
+ }
+@@ -116,7 +118,20 @@ static int ch341_control_in(struct usb_device *dev,
+ 	r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
+ 			    USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 			    value, index, buf, bufsize, DEFAULT_TIMEOUT);
+-	return r;
++	if (r < bufsize) {
++		if (r >= 0) {
++			dev_err(&dev->dev,
++				"short control message received (%d < %u)\n",
++				r, bufsize);
++			r = -EIO;
++		}
++
++		dev_err(&dev->dev, "failed to receive control message: %d\n",
++			r);
++		return r;
++	}
++
++	return 0;
+ }
+ 
+ static int ch341_set_baudrate(struct usb_device *dev,
+@@ -158,9 +173,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
+ 
+ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
+ {
++	const unsigned int size = 2;
+ 	char *buffer;
+ 	int r;
+-	const unsigned size = 8;
+ 	unsigned long flags;
+ 
+ 	buffer = kmalloc(size, GFP_KERNEL);
+@@ -171,14 +186,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
+ 	if (r < 0)
+ 		goto out;
+ 
+-	/* setup the private status if available */
+-	if (r == 2) {
+-		r = 0;
+-		spin_lock_irqsave(&priv->lock, flags);
+-		priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
+-		spin_unlock_irqrestore(&priv->lock, flags);
+-	} else
+-		r = -EPROTO;
++	spin_lock_irqsave(&priv->lock, flags);
++	priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
++	spin_unlock_irqrestore(&priv->lock, flags);
+ 
+ out:	kfree(buffer);
+ 	return r;
+@@ -188,9 +198,9 @@ out:	kfree(buffer);
+ 
+ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
+ {
++	const unsigned int size = 2;
+ 	char *buffer;
+ 	int r;
+-	const unsigned size = 8;
+ 
+ 	buffer = kmalloc(size, GFP_KERNEL);
+ 	if (!buffer)
+@@ -253,7 +263,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
+ 
+ 	spin_lock_init(&priv->lock);
+ 	priv->baud_rate = DEFAULT_BAUD_RATE;
+-	priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
+ 
+ 	r = ch341_configure(port->serial->dev, priv);
+ 	if (r < 0)
+@@ -315,7 +324,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 
+ 	r = ch341_configure(serial->dev, priv);
+ 	if (r)
+-		goto out;
++		return r;
+ 
+ 	if (tty)
+ 		ch341_set_termios(tty, port, NULL);
+@@ -325,12 +334,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	if (r) {
+ 		dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
+ 			__func__, r);
+-		goto out;
++		return r;
+ 	}
+ 
+ 	r = usb_serial_generic_open(tty, port);
++	if (r)
++		goto err_kill_interrupt_urb;
++
++	return 0;
++
++err_kill_interrupt_urb:
++	usb_kill_urb(port->interrupt_in_urb);
+ 
+-out:	return r;
++	return r;
+ }
+ 
+ /* Old_termios contains the original termios settings and
+@@ -345,26 +361,25 @@ static void ch341_set_termios(struct tty_struct *tty,
+ 
+ 	baud_rate = tty_get_baud_rate(tty);
+ 
+-	priv->baud_rate = baud_rate;
+-
+ 	if (baud_rate) {
+-		spin_lock_irqsave(&priv->lock, flags);
+-		priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
+-		spin_unlock_irqrestore(&priv->lock, flags);
++		priv->baud_rate = baud_rate;
+ 		ch341_set_baudrate(port->serial->dev, priv);
+-	} else {
+-		spin_lock_irqsave(&priv->lock, flags);
+-		priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
+-		spin_unlock_irqrestore(&priv->lock, flags);
+ 	}
+ 
+-	ch341_set_handshake(port->serial->dev, priv->line_control);
+-
+ 	/* Unimplemented:
+ 	 * (cflag & CSIZE) : data bits [5, 8]
+ 	 * (cflag & PARENB) : parity {NONE, EVEN, ODD}
+ 	 * (cflag & CSTOPB) : stop bits [1, 2]
+ 	 */
++
++	spin_lock_irqsave(&priv->lock, flags);
++	if (C_BAUD(tty) == B0)
++		priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
++	else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
++		priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
++	spin_unlock_irqrestore(&priv->lock, flags);
++
++	ch341_set_handshake(port->serial->dev, priv->line_control);
+ }
+ 
+ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
+diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
+index 2916dea3ede8..8948f375e75d 100644
+--- a/drivers/usb/serial/cyberjack.c
++++ b/drivers/usb/serial/cyberjack.c
+@@ -50,6 +50,7 @@
+ #define CYBERJACK_PRODUCT_ID	0x0100
+ 
+ /* Function prototypes */
++static int cyberjack_attach(struct usb_serial *serial);
+ static int cyberjack_port_probe(struct usb_serial_port *port);
+ static int cyberjack_port_remove(struct usb_serial_port *port);
+ static int  cyberjack_open(struct tty_struct *tty,
+@@ -77,6 +78,7 @@ static struct usb_serial_driver cyberjack_device = {
+ 	.description =		"Reiner SCT Cyberjack USB card reader",
+ 	.id_table =		id_table,
+ 	.num_ports =		1,
++	.attach =		cyberjack_attach,
+ 	.port_probe =		cyberjack_port_probe,
+ 	.port_remove =		cyberjack_port_remove,
+ 	.open =			cyberjack_open,
+@@ -100,6 +102,14 @@ struct cyberjack_private {
+ 	short		wrsent;		/* Data already sent */
+ };
+ 
++static int cyberjack_attach(struct usb_serial *serial)
++{
++	if (serial->num_bulk_out < serial->num_ports)
++		return -ENODEV;
++
++	return 0;
++}
++
+ static int cyberjack_port_probe(struct usb_serial_port *port)
+ {
+ 	struct cyberjack_private *priv;
+diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
+index db591d19d416..37d0e8cc7af6 100644
+--- a/drivers/usb/serial/garmin_gps.c
++++ b/drivers/usb/serial/garmin_gps.c
+@@ -1044,6 +1044,7 @@ static int garmin_write_bulk(struct usb_serial_port *port,
+ 		   "%s - usb_submit_urb(write bulk) failed with status = %d\n",
+ 				__func__, status);
+ 		count = status;
++		kfree(buffer);
+ 	}
+ 
+ 	/* we are done with this urb, so let the host driver
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index 1947ea0e0988..b63a6c3899c5 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -2761,6 +2761,11 @@ static int edge_startup(struct usb_serial *serial)
+ 					EDGE_COMPATIBILITY_MASK1,
+ 					EDGE_COMPATIBILITY_MASK2 };
+ 
++	if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
+ 	dev = serial->dev;
+ 
+ 	/* create our private serial structure */
+diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
+index ddbb8fe1046d..2ef757f35162 100644
+--- a/drivers/usb/serial/io_ti.c
++++ b/drivers/usb/serial/io_ti.c
+@@ -2378,6 +2378,13 @@ static int edge_startup(struct usb_serial *serial)
+ 	struct edgeport_serial *edge_serial;
+ 	int status;
+ 
++	/* Make sure we have the required endpoints when in download mode. */
++	if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) {
++		if (serial->num_bulk_in < serial->num_ports ||
++				serial->num_bulk_out < serial->num_ports)
++			return -ENODEV;
++	}
++
+ 	/* create our private serial structure */
+ 	edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL);
+ 	if (!edge_serial)
+diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
+index 5ad4a0fb4b26..7ed7d33d6c10 100644
+--- a/drivers/usb/serial/iuu_phoenix.c
++++ b/drivers/usb/serial/iuu_phoenix.c
+@@ -68,6 +68,16 @@ struct iuu_private {
+ 	u32 clk;
+ };
+ 
++static int iuu_attach(struct usb_serial *serial)
++{
++	unsigned char num_ports = serial->num_ports;
++
++	if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports)
++		return -ENODEV;
++
++	return 0;
++}
++
+ static int iuu_port_probe(struct usb_serial_port *port)
+ {
+ 	struct iuu_private *priv;
+@@ -1196,6 +1206,7 @@ static struct usb_serial_driver iuu_device = {
+ 	.tiocmset = iuu_tiocmset,
+ 	.set_termios = iuu_set_termios,
+ 	.init_termios = iuu_init_termios,
++	.attach = iuu_attach,
+ 	.port_probe = iuu_port_probe,
+ 	.port_remove = iuu_port_remove,
+ };
+diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
+index 4f7e072e4e00..930be98d59b3 100644
+--- a/drivers/usb/serial/keyspan_pda.c
++++ b/drivers/usb/serial/keyspan_pda.c
+@@ -699,6 +699,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
+ MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
+ #endif
+ 
++static int keyspan_pda_attach(struct usb_serial *serial)
++{
++	unsigned char num_ports = serial->num_ports;
++
++	if (serial->num_bulk_out < num_ports ||
++			serial->num_interrupt_in < num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int keyspan_pda_port_probe(struct usb_serial_port *port)
+ {
+ 
+@@ -776,6 +789,7 @@ static struct usb_serial_driver keyspan_pda_device = {
+ 	.break_ctl =		keyspan_pda_break_ctl,
+ 	.tiocmget =		keyspan_pda_tiocmget,
+ 	.tiocmset =		keyspan_pda_tiocmset,
++	.attach =		keyspan_pda_attach,
+ 	.port_probe =		keyspan_pda_port_probe,
+ 	.port_remove =		keyspan_pda_port_remove,
+ };
+diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
+index 53c90131764d..28cd1d05ed58 100644
+--- a/drivers/usb/serial/kl5kusb105.c
++++ b/drivers/usb/serial/kl5kusb105.c
+@@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
+ 			     status_buf, KLSI_STATUSBUF_LEN,
+ 			     10000
+ 			     );
+-	if (rc < 0)
+-		dev_err(&port->dev, "Reading line status failed (error = %d)\n",
+-			rc);
+-	else {
++	if (rc != KLSI_STATUSBUF_LEN) {
++		dev_err(&port->dev, "reading line status failed: %d\n", rc);
++		if (rc >= 0)
++			rc = -EIO;
++	} else {
+ 		status = get_unaligned_le16(status_buf);
+ 
+ 		dev_info(&port->serial->dev->dev, "read status %x %x\n",
+diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
+index 2363654cafc9..813035f51fe7 100644
+--- a/drivers/usb/serial/kobil_sct.c
++++ b/drivers/usb/serial/kobil_sct.c
+@@ -51,6 +51,7 @@
+ 
+ 
+ /* Function prototypes */
++static int kobil_attach(struct usb_serial *serial);
+ static int kobil_port_probe(struct usb_serial_port *probe);
+ static int kobil_port_remove(struct usb_serial_port *probe);
+ static int  kobil_open(struct tty_struct *tty, struct usb_serial_port *port);
+@@ -86,6 +87,7 @@ static struct usb_serial_driver kobil_device = {
+ 	.description =		"KOBIL USB smart card terminal",
+ 	.id_table =		id_table,
+ 	.num_ports =		1,
++	.attach =		kobil_attach,
+ 	.port_probe =		kobil_port_probe,
+ 	.port_remove =		kobil_port_remove,
+ 	.ioctl =		kobil_ioctl,
+@@ -113,6 +115,16 @@ struct kobil_private {
+ };
+ 
+ 
++static int kobil_attach(struct usb_serial *serial)
++{
++	if (serial->num_interrupt_out < serial->num_ports) {
++		dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int kobil_port_probe(struct usb_serial_port *port)
+ {
+ 	struct usb_serial *serial = port->serial;
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 9a0c610d85a9..3c0552df5e37 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -65,8 +65,6 @@ struct moschip_port {
+ 	struct urb		*write_urb_pool[NUM_URBS];
+ };
+ 
+-static struct usb_serial_driver moschip7720_2port_driver;
+-
+ #define USB_VENDOR_ID_MOSCHIP		0x9710
+ #define MOSCHIP_DEVICE_ID_7720		0x7720
+ #define MOSCHIP_DEVICE_ID_7715		0x7715
+@@ -962,25 +960,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
+ 		tty_port_tty_wakeup(&mos7720_port->port->port);
+ }
+ 
+-/*
+- * mos77xx_probe
+- *	this function installs the appropriate read interrupt endpoint callback
+- *	depending on whether the device is a 7720 or 7715, thus avoiding costly
+- *	run-time checks in the high-frequency callback routine itself.
+- */
+-static int mos77xx_probe(struct usb_serial *serial,
+-			 const struct usb_device_id *id)
+-{
+-	if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
+-		moschip7720_2port_driver.read_int_callback =
+-			mos7715_interrupt_callback;
+-	else
+-		moschip7720_2port_driver.read_int_callback =
+-			mos7720_interrupt_callback;
+-
+-	return 0;
+-}
+-
+ static int mos77xx_calc_num_ports(struct usb_serial *serial)
+ {
+ 	u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+@@ -1897,6 +1876,11 @@ static int mos7720_startup(struct usb_serial *serial)
+ 	u16 product;
+ 	int ret_val;
+ 
++	if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) {
++		dev_err(&serial->interface->dev, "missing bulk endpoints\n");
++		return -ENODEV;
++	}
++
+ 	product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ 	dev = serial->dev;
+ 
+@@ -1921,19 +1905,18 @@ static int mos7720_startup(struct usb_serial *serial)
+ 			tmp->interrupt_in_endpointAddress;
+ 		serial->port[1]->interrupt_in_urb = NULL;
+ 		serial->port[1]->interrupt_in_buffer = NULL;
++
++		if (serial->port[0]->interrupt_in_urb) {
++			struct urb *urb = serial->port[0]->interrupt_in_urb;
++
++			urb->complete = mos7715_interrupt_callback;
++		}
+ 	}
+ 
+ 	/* setting configuration feature to one */
+ 	usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
+ 			(__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000);
+ 
+-	/* start the interrupt urb */
+-	ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
+-	if (ret_val)
+-		dev_err(&dev->dev,
+-			"%s - Error %d submitting control urb\n",
+-			__func__, ret_val);
+-
+ #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+ 	if (product == MOSCHIP_DEVICE_ID_7715) {
+ 		ret_val = mos7715_parport_init(serial);
+@@ -1941,6 +1924,13 @@ static int mos7720_startup(struct usb_serial *serial)
+ 			return ret_val;
+ 	}
+ #endif
++	/* start the interrupt urb */
++	ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL);
++	if (ret_val) {
++		dev_err(&dev->dev, "failed to submit interrupt urb: %d\n",
++			ret_val);
++	}
++
+ 	/* LSR For Port 1 */
+ 	read_mos_reg(serial, 0, LSR, &data);
+ 	dev_dbg(&dev->dev, "LSR:%x\n", data);
+@@ -1950,6 +1940,8 @@ static int mos7720_startup(struct usb_serial *serial)
+ 
+ static void mos7720_release(struct usb_serial *serial)
+ {
++	usb_kill_urb(serial->port[0]->interrupt_in_urb);
++
+ #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT
+ 	/* close the parallel port */
+ 
+@@ -2032,7 +2024,6 @@ static struct usb_serial_driver moschip7720_2port_driver = {
+ 	.close			= mos7720_close,
+ 	.throttle		= mos7720_throttle,
+ 	.unthrottle		= mos7720_unthrottle,
+-	.probe			= mos77xx_probe,
+ 	.attach			= mos7720_startup,
+ 	.release		= mos7720_release,
+ 	.port_probe		= mos7720_port_probe,
+@@ -2046,7 +2037,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
+ 	.chars_in_buffer	= mos7720_chars_in_buffer,
+ 	.break_ctl		= mos7720_break,
+ 	.read_bulk_callback	= mos7720_bulk_in_callback,
+-	.read_int_callback	= NULL  /* dynamically assigned in probe() */
++	.read_int_callback	= mos7720_interrupt_callback,
+ };
+ 
+ static struct usb_serial_driver * const serial_drivers[] = {
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index f5ab4cd9e7a1..d9f7b3bae09a 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -2116,6 +2116,17 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
+ 	return mos7840_num_ports;
+ }
+ 
++static int mos7840_attach(struct usb_serial *serial)
++{
++	if (serial->num_bulk_in < serial->num_ports ||
++			serial->num_bulk_out < serial->num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int mos7840_port_probe(struct usb_serial_port *port)
+ {
+ 	struct usb_serial *serial = port->serial;
+@@ -2394,6 +2405,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
+ 	.tiocmset = mos7840_tiocmset,
+ 	.tiocmiwait = usb_serial_generic_tiocmiwait,
+ 	.get_icount = usb_serial_generic_get_icount,
++	.attach = mos7840_attach,
+ 	.port_probe = mos7840_port_probe,
+ 	.port_remove = mos7840_port_remove,
+ 	.read_bulk_callback = mos7840_bulk_in_callback,
+diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
+index f6c6900bccf0..a180b17d2432 100644
+--- a/drivers/usb/serial/omninet.c
++++ b/drivers/usb/serial/omninet.c
+@@ -38,6 +38,7 @@ static int  omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 				const unsigned char *buf, int count);
+ static int  omninet_write_room(struct tty_struct *tty);
+ static void omninet_disconnect(struct usb_serial *serial);
++static int omninet_attach(struct usb_serial *serial);
+ static int omninet_port_probe(struct usb_serial_port *port);
+ static int omninet_port_remove(struct usb_serial_port *port);
+ 
+@@ -56,6 +57,7 @@ static struct usb_serial_driver zyxel_omninet_device = {
+ 	.description =		"ZyXEL - omni.net lcd plus usb",
+ 	.id_table =		id_table,
+ 	.num_ports =		1,
++	.attach =		omninet_attach,
+ 	.port_probe =		omninet_port_probe,
+ 	.port_remove =		omninet_port_remove,
+ 	.open =			omninet_open,
+@@ -104,6 +106,17 @@ struct omninet_data {
+ 	__u8	od_outseq;	/* Sequence number for bulk_out URBs */
+ };
+ 
++static int omninet_attach(struct usb_serial *serial)
++{
++	/* The second bulk-out endpoint is used for writing. */
++	if (serial->num_bulk_out < 2) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int omninet_port_probe(struct usb_serial_port *port)
+ {
+ 	struct omninet_data *od;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 248dac170f39..cfa46e2a8482 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -2019,6 +2019,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
+index a4b88bc038b6..b8bf52bf7a94 100644
+--- a/drivers/usb/serial/oti6858.c
++++ b/drivers/usb/serial/oti6858.c
+@@ -134,6 +134,7 @@ static int oti6858_chars_in_buffer(struct tty_struct *tty);
+ static int oti6858_tiocmget(struct tty_struct *tty);
+ static int oti6858_tiocmset(struct tty_struct *tty,
+ 				unsigned int set, unsigned int clear);
++static int oti6858_attach(struct usb_serial *serial);
+ static int oti6858_port_probe(struct usb_serial_port *port);
+ static int oti6858_port_remove(struct usb_serial_port *port);
+ 
+@@ -158,6 +159,7 @@ static struct usb_serial_driver oti6858_device = {
+ 	.write_bulk_callback =	oti6858_write_bulk_callback,
+ 	.write_room =		oti6858_write_room,
+ 	.chars_in_buffer =	oti6858_chars_in_buffer,
++	.attach =		oti6858_attach,
+ 	.port_probe =		oti6858_port_probe,
+ 	.port_remove =		oti6858_port_remove,
+ };
+@@ -324,6 +326,20 @@ static void send_data(struct work_struct *work)
+ 	usb_serial_port_softint(port);
+ }
+ 
++static int oti6858_attach(struct usb_serial *serial)
++{
++	unsigned char num_ports = serial->num_ports;
++
++	if (serial->num_bulk_in < num_ports ||
++			serial->num_bulk_out < num_ports ||
++			serial->num_interrupt_in < num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int oti6858_port_probe(struct usb_serial_port *port)
+ {
+ 	struct oti6858_private *priv;
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index ae682e4eeaef..1db4b61bdf7b 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
+ 	{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
+ 	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
++	{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
+ 	{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
+ 	{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
+ 	{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
+@@ -220,9 +221,17 @@ static int pl2303_probe(struct usb_serial *serial,
+ static int pl2303_startup(struct usb_serial *serial)
+ {
+ 	struct pl2303_serial_private *spriv;
++	unsigned char num_ports = serial->num_ports;
+ 	enum pl2303_type type = TYPE_01;
+ 	unsigned char *buf;
+ 
++	if (serial->num_bulk_in < num_ports ||
++			serial->num_bulk_out < num_ports ||
++			serial->num_interrupt_in < num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
+ 	spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
+ 	if (!spriv)
+ 		return -ENOMEM;
+diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
+index e3b7af8adfb7..09d9be88209e 100644
+--- a/drivers/usb/serial/pl2303.h
++++ b/drivers/usb/serial/pl2303.h
+@@ -27,6 +27,7 @@
+ #define ATEN_VENDOR_ID		0x0557
+ #define ATEN_VENDOR_ID2		0x0547
+ #define ATEN_PRODUCT_ID		0x2008
++#define ATEN_PRODUCT_ID2	0x2118
+ 
+ #define IODATA_VENDOR_ID	0x04bb
+ #define IODATA_PRODUCT_ID	0x0a03
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index f0a2ad15a992..e08ae0505ad2 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
+ 	{USB_DEVICE(0x1410, 0xa021)},	/* Novatel Gobi 3000 Composite */
+ 	{USB_DEVICE(0x413c, 0x8193)},	/* Dell Gobi 3000 QDL */
+ 	{USB_DEVICE(0x413c, 0x8194)},	/* Dell Gobi 3000 Composite */
++	{USB_DEVICE(0x413c, 0x81a6)},	/* Dell DW5570 QDL (MC8805) */
+ 	{USB_DEVICE(0x1199, 0x68a4)},	/* Sierra Wireless QDL */
+ 	{USB_DEVICE(0x1199, 0x68a5)},	/* Sierra Wireless Modem */
+ 	{USB_DEVICE(0x1199, 0x68a8)},	/* Sierra Wireless QDL */
+diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
+index b18974cbd995..a3ed07c58754 100644
+--- a/drivers/usb/serial/quatech2.c
++++ b/drivers/usb/serial/quatech2.c
+@@ -408,16 +408,12 @@ static void qt2_close(struct usb_serial_port *port)
+ {
+ 	struct usb_serial *serial;
+ 	struct qt2_port_private *port_priv;
+-	unsigned long flags;
+ 	int i;
+ 
+ 	serial = port->serial;
+ 	port_priv = usb_get_serial_port_data(port);
+ 
+-	spin_lock_irqsave(&port_priv->urb_lock, flags);
+ 	usb_kill_urb(port_priv->write_urb);
+-	port_priv->urb_in_use = false;
+-	spin_unlock_irqrestore(&port_priv->urb_lock, flags);
+ 
+ 	/* flush the port transmit buffer */
+ 	i = usb_control_msg(serial->dev,
+diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
+index ef0dbf0703c5..475e6c31b266 100644
+--- a/drivers/usb/serial/spcp8x5.c
++++ b/drivers/usb/serial/spcp8x5.c
+@@ -154,6 +154,19 @@ static int spcp8x5_probe(struct usb_serial *serial,
+ 	return 0;
+ }
+ 
++static int spcp8x5_attach(struct usb_serial *serial)
++{
++	unsigned char num_ports = serial->num_ports;
++
++	if (serial->num_bulk_in < num_ports ||
++			serial->num_bulk_out < num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
++	return 0;
++}
++
+ static int spcp8x5_port_probe(struct usb_serial_port *port)
+ {
+ 	const struct usb_device_id *id = usb_get_serial_data(port->serial);
+@@ -477,6 +490,7 @@ static struct usb_serial_driver spcp8x5_device = {
+ 	.tiocmget		= spcp8x5_tiocmget,
+ 	.tiocmset		= spcp8x5_tiocmset,
+ 	.probe			= spcp8x5_probe,
++	.attach			= spcp8x5_attach,
+ 	.port_probe		= spcp8x5_port_probe,
+ 	.port_remove		= spcp8x5_port_remove,
+ };
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index 2694df2f4559..535fcfafc097 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -339,6 +339,13 @@ static int ti_startup(struct usb_serial *serial)
+ 		goto free_tdev;
+ 	}
+ 
++	if (serial->num_bulk_in < serial->num_ports ||
++			serial->num_bulk_out < serial->num_ports) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		status = -ENODEV;
++		goto free_tdev;
++	}
++
+ 	return 0;
+ 
+ free_tdev:
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 4095824c8c6d..2f40b6150fdc 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2124,6 +2124,13 @@ UNUSUAL_DEV(  0x22b8, 0x3010, 0x0001, 0x0001,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ),
+ 
++/* Reported-by George Cherian <george.cherian@cavium.com> */
++UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
++		"JMicron",
++		"JMS56x",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_REPORT_OPCODES),
++
+ /*
+  * Patch by Constantin Baranov <const@tltsu.ru>
+  * Report by Andreas Koenecke.
+diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
+index f89245b8ba8e..68a113594808 100644
+--- a/drivers/video/fbdev/core/fbcmap.c
++++ b/drivers/video/fbdev/core/fbcmap.c
+@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
+ 
+ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
+ {
+-	int tooff = 0, fromoff = 0;
+-	int size;
++	unsigned int tooff = 0, fromoff = 0;
++	size_t size;
+ 
+ 	if (to->start > from->start)
+ 		fromoff = to->start - from->start;
+ 	else
+ 		tooff = from->start - to->start;
+-	size = to->len - tooff;
+-	if (size > (int) (from->len - fromoff))
+-		size = from->len - fromoff;
+-	if (size <= 0)
++	if (fromoff >= from->len || tooff >= to->len)
++		return -EINVAL;
++
++	size = min_t(size_t, to->len - tooff, from->len - fromoff);
++	if (size == 0)
+ 		return -EINVAL;
+ 	size *= sizeof(u16);
+ 
+@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
+ 
+ int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
+ {
+-	int tooff = 0, fromoff = 0;
+-	int size;
++	unsigned int tooff = 0, fromoff = 0;
++	size_t size;
+ 
+ 	if (to->start > from->start)
+ 		fromoff = to->start - from->start;
+ 	else
+ 		tooff = from->start - to->start;
+-	size = to->len - tooff;
+-	if (size > (int) (from->len - fromoff))
+-		size = from->len - fromoff;
+-	if (size <= 0)
++	if (fromoff >= from->len || tooff >= to->len)
++		return -EINVAL;
++
++	size = min_t(size_t, to->len - tooff, from->len - fromoff);
++	if (size == 0)
+ 		return -EINVAL;
+ 	size *= sizeof(u16);
+ 
+diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
+index 18078ecbfcc6..78271c8721f8 100644
+--- a/drivers/vme/bridges/vme_ca91cx42.c
++++ b/drivers/vme/bridges/vme_ca91cx42.c
+@@ -468,7 +468,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
+ 	vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
+ 	pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
+ 
+-	*pci_base = (dma_addr_t)vme_base + pci_offset;
++	*pci_base = (dma_addr_t)*vme_base + pci_offset;
+ 	*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
+ 
+ 	*enabled = 0;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index ff742d30ba60..f1feb3123c9c 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -5508,6 +5508,10 @@ long btrfs_ioctl(struct file *file, unsigned int
+ #ifdef CONFIG_COMPAT
+ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
++	/*
++	 * These all access 32-bit values anyway so no further
++	 * handling is necessary.
++	 */
+ 	switch (cmd) {
+ 	case FS_IOC32_GETFLAGS:
+ 		cmd = FS_IOC_GETFLAGS;
+@@ -5518,8 +5522,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	case FS_IOC32_GETVERSION:
+ 		cmd = FS_IOC_GETVERSION;
+ 		break;
+-	default:
+-		return -ENOIOCTLCMD;
+ 	}
+ 
+ 	return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 1e99b29650a9..4f3bf0f527f6 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -273,12 +273,13 @@ static int parse_reply_info_extra(void **p, void *end,
+ 				  struct ceph_mds_reply_info_parsed *info,
+ 				  u64 features)
+ {
+-	if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
++	u32 op = le32_to_cpu(info->head->op);
++
++	if (op == CEPH_MDS_OP_GETFILELOCK)
+ 		return parse_reply_info_filelock(p, end, info, features);
+-	else if (info->head->op == CEPH_MDS_OP_READDIR ||
+-		 info->head->op == CEPH_MDS_OP_LSSNAP)
++	else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
+ 		return parse_reply_info_dir(p, end, info, features);
+-	else if (info->head->op == CEPH_MDS_OP_CREATE)
++	else if (op == CEPH_MDS_OP_CREATE)
+ 		return parse_reply_info_create(p, end, info, features);
+ 	else
+ 		return -EIO;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 660857431b1c..11d466bbfb0b 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1322,8 +1322,11 @@ int d_set_mounted(struct dentry *dentry)
+ 	}
+ 	spin_lock(&dentry->d_lock);
+ 	if (!d_unlinked(dentry)) {
+-		dentry->d_flags |= DCACHE_MOUNTED;
+-		ret = 0;
++		ret = -EBUSY;
++		if (!d_mountpoint(dentry)) {
++			dentry->d_flags |= DCACHE_MOUNTED;
++			ret = 0;
++		}
+ 	}
+  	spin_unlock(&dentry->d_lock);
+ out:
+diff --git a/fs/exec.c b/fs/exec.c
+index d392c8ad0de0..04c9cab4d4d3 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1254,7 +1254,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
+ 	unsigned n_fs;
+ 
+ 	if (p->ptrace) {
+-		if (p->ptrace & PT_PTRACE_CAP)
++		if (ptracer_capable(p, current_user_ns()))
+ 			bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
+ 		else
+ 			bprm->unsafe |= LSM_UNSAFE_PTRACE;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index f853aaf92ec9..c46921daca22 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -735,26 +735,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
+ 	return NULL;
+ }
+ 
+-static struct mountpoint *new_mountpoint(struct dentry *dentry)
++static struct mountpoint *get_mountpoint(struct dentry *dentry)
+ {
+-	struct hlist_head *chain = mp_hash(dentry);
+-	struct mountpoint *mp;
++	struct mountpoint *mp, *new = NULL;
+ 	int ret;
+ 
+-	mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
+-	if (!mp)
++	if (d_mountpoint(dentry)) {
++mountpoint:
++		read_seqlock_excl(&mount_lock);
++		mp = lookup_mountpoint(dentry);
++		read_sequnlock_excl(&mount_lock);
++		if (mp)
++			goto done;
++	}
++
++	if (!new)
++		new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
++	if (!new)
+ 		return ERR_PTR(-ENOMEM);
+ 
++
++	/* Exactly one processes may set d_mounted */
+ 	ret = d_set_mounted(dentry);
+-	if (ret) {
+-		kfree(mp);
+-		return ERR_PTR(ret);
+-	}
+ 
+-	mp->m_dentry = dentry;
+-	mp->m_count = 1;
+-	hlist_add_head(&mp->m_hash, chain);
+-	INIT_HLIST_HEAD(&mp->m_list);
++	/* Someone else set d_mounted? */
++	if (ret == -EBUSY)
++		goto mountpoint;
++
++	/* The dentry is not available as a mountpoint? */
++	mp = ERR_PTR(ret);
++	if (ret)
++		goto done;
++
++	/* Add the new mountpoint to the hash table */
++	read_seqlock_excl(&mount_lock);
++	new->m_dentry = dentry;
++	new->m_count = 1;
++	hlist_add_head(&new->m_hash, mp_hash(dentry));
++	INIT_HLIST_HEAD(&new->m_list);
++	read_sequnlock_excl(&mount_lock);
++
++	mp = new;
++	new = NULL;
++done:
++	kfree(new);
+ 	return mp;
+ }
+ 
+@@ -1557,11 +1581,11 @@ void __detach_mounts(struct dentry *dentry)
+ 	struct mount *mnt;
+ 
+ 	namespace_lock();
++	lock_mount_hash();
+ 	mp = lookup_mountpoint(dentry);
+ 	if (IS_ERR_OR_NULL(mp))
+ 		goto out_unlock;
+ 
+-	lock_mount_hash();
+ 	event++;
+ 	while (!hlist_empty(&mp->m_list)) {
+ 		mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
+@@ -1571,9 +1595,9 @@ void __detach_mounts(struct dentry *dentry)
+ 		}
+ 		else umount_tree(mnt, UMOUNT_CONNECTED);
+ 	}
+-	unlock_mount_hash();
+ 	put_mountpoint(mp);
+ out_unlock:
++	unlock_mount_hash();
+ 	namespace_unlock();
+ }
+ 
+@@ -1994,9 +2018,7 @@ retry:
+ 	namespace_lock();
+ 	mnt = lookup_mnt(path);
+ 	if (likely(!mnt)) {
+-		struct mountpoint *mp = lookup_mountpoint(dentry);
+-		if (!mp)
+-			mp = new_mountpoint(dentry);
++		struct mountpoint *mp = get_mountpoint(dentry);
+ 		if (IS_ERR(mp)) {
+ 			namespace_unlock();
+ 			mutex_unlock(&dentry->d_inode->i_mutex);
+@@ -2015,7 +2037,11 @@ retry:
+ static void unlock_mount(struct mountpoint *where)
+ {
+ 	struct dentry *dentry = where->m_dentry;
++
++	read_seqlock_excl(&mount_lock);
+ 	put_mountpoint(where);
++	read_sequnlock_excl(&mount_lock);
++
+ 	namespace_unlock();
+ 	mutex_unlock(&dentry->d_inode->i_mutex);
+ }
+@@ -3091,9 +3117,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
+ 	touch_mnt_namespace(current->nsproxy->mnt_ns);
+ 	/* A moved mount should not expire automatically */
+ 	list_del_init(&new_mnt->mnt_expire);
++	put_mountpoint(root_mp);
+ 	unlock_mount_hash();
+ 	chroot_fs_refs(&root, &new);
+-	put_mountpoint(root_mp);
+ 	error = 0;
+ out4:
+ 	unlock_mount(old_mp);
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index ddef1dc80cf7..2a9ab265aa32 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1073,6 +1073,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
+ 		case -NFS4ERR_BADXDR:
+ 		case -NFS4ERR_RESOURCE:
+ 		case -NFS4ERR_NOFILEHANDLE:
++		case -NFS4ERR_MOVED:
+ 			/* Non-seqid mutating errors */
+ 			return;
+ 	};
+diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
+index 6904213a4363..8b6ec3eab627 100644
+--- a/fs/nfsd/nfs4layouts.c
++++ b/fs/nfsd/nfs4layouts.c
+@@ -189,10 +189,11 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
+ 	struct nfs4_layout_stateid *ls;
+ 	struct nfs4_stid *stp;
+ 
+-	stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache);
++	stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
++					nfsd4_free_layout_stateid);
+ 	if (!stp)
+ 		return NULL;
+-	stp->sc_free = nfsd4_free_layout_stateid;
++
+ 	get_nfs4_file(fp);
+ 	stp->sc_file = fp;
+ 
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index bb6c324f1f3d..22e9799323ad 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -553,8 +553,8 @@ out:
+ 	return co;
+ }
+ 
+-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+-					 struct kmem_cache *slab)
++struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
++				  void (*sc_free)(struct nfs4_stid *))
+ {
+ 	struct nfs4_stid *stid;
+ 	int new_id;
+@@ -570,6 +570,8 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+ 	idr_preload_end();
+ 	if (new_id < 0)
+ 		goto out_free;
++
++	stid->sc_free = sc_free;
+ 	stid->sc_client = cl;
+ 	stid->sc_stateid.si_opaque.so_id = new_id;
+ 	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
+@@ -594,15 +596,12 @@ out_free:
+ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
+ {
+ 	struct nfs4_stid *stid;
+-	struct nfs4_ol_stateid *stp;
+ 
+-	stid = nfs4_alloc_stid(clp, stateid_slab);
++	stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
+ 	if (!stid)
+ 		return NULL;
+ 
+-	stp = openlockstateid(stid);
+-	stp->st_stid.sc_free = nfs4_free_ol_stateid;
+-	return stp;
++	return openlockstateid(stid);
+ }
+ 
+ static void nfs4_free_deleg(struct nfs4_stid *stid)
+@@ -700,11 +699,10 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
+ 		goto out_dec;
+ 	if (delegation_blocked(&current_fh->fh_handle))
+ 		goto out_dec;
+-	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
++	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
+ 	if (dp == NULL)
+ 		goto out_dec;
+ 
+-	dp->dl_stid.sc_free = nfs4_free_deleg;
+ 	/*
+ 	 * delegation seqid's are never incremented.  The 4.1 special
+ 	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
+@@ -5309,7 +5307,6 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
+ 	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
+ 	get_nfs4_file(fp);
+ 	stp->st_stid.sc_file = fp;
+-	stp->st_stid.sc_free = nfs4_free_lock_stateid;
+ 	stp->st_access_bmap = 0;
+ 	stp->st_deny_bmap = open_stp->st_deny_bmap;
+ 	stp->st_openstp = open_stp;
+@@ -5352,7 +5349,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
+ 	lst = find_lock_stateid(lo, fi);
+ 	if (lst == NULL) {
+ 		spin_unlock(&clp->cl_lock);
+-		ns = nfs4_alloc_stid(clp, stateid_slab);
++		ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
+ 		if (ns == NULL)
+ 			return NULL;
+ 
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index 67685b6cfef3..fa2430e3d6a8 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -589,8 +589,8 @@ extern __be32 nfs4_preprocess_stateid_op(struct net *net,
+ __be32 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ 		     stateid_t *stateid, unsigned char typemask,
+ 		     struct nfs4_stid **s, struct nfsd_net *nn);
+-struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
+-		struct kmem_cache *slab);
++struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
++				  void (*sc_free)(struct nfs4_stid *));
+ void nfs4_unhash_stid(struct nfs4_stid *s);
+ void nfs4_put_stid(struct nfs4_stid *s);
+ void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *);
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 3623ab6fa97f..4a4ac9386d4d 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -3322,6 +3322,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
+ 	mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
+ 	     lockres->l_level, new_level);
+ 
++	/*
++	 * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
++	 * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
++	 * we can recover correctly from node failure. Otherwise, we may get
++	 * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
++	 */
++	if (!ocfs2_is_o2cb_active() &&
++	    lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
++		lvb = 1;
++
+ 	if (lvb)
+ 		dlm_flags |= DLM_LKF_VALBLK;
+ 
+diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
+index 5d965e83bd43..783bcdce5666 100644
+--- a/fs/ocfs2/stackglue.c
++++ b/fs/ocfs2/stackglue.c
+@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
+  */
+ static struct ocfs2_stack_plugin *active_stack;
+ 
++inline int ocfs2_is_o2cb_active(void)
++{
++	return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
++}
++EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
++
+ static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
+ {
+ 	struct ocfs2_stack_plugin *p;
+diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
+index 66334a30cea8..e1b30931974d 100644
+--- a/fs/ocfs2/stackglue.h
++++ b/fs/ocfs2/stackglue.h
+@@ -298,4 +298,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
+ int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
+ void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
+ 
++/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
++int ocfs2_is_o2cb_active(void);
++
+ #endif  /* STACKGLUE_H */
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 0dea606074c7..d38541256287 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -703,7 +703,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
+ 	ctl_dir = container_of(head, struct ctl_dir, header);
+ 
+ 	if (!dir_emit_dots(file, ctx))
+-		return 0;
++		goto out;
+ 
+ 	pos = 2;
+ 
+@@ -713,6 +713,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
+ 			break;
+ 		}
+ 	}
++out:
+ 	sysctl_head_finish(head);
+ 	return 0;
+ }
+diff --git a/fs/splice.c b/fs/splice.c
+index e7522c486068..6ec6054df138 100644
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -211,6 +211,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
+ 			buf->len = spd->partial[page_nr].len;
+ 			buf->private = spd->partial[page_nr].private;
+ 			buf->ops = spd->ops;
++			buf->flags = 0;
+ 			if (spd->flags & SPLICE_F_GIFT)
+ 				buf->flags |= PIPE_BUF_FLAG_GIFT;
+ 
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 957f5757f374..a18574237034 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -34,6 +34,11 @@
+ #include <linux/slab.h>
+ #include "ubifs.h"
+ 
++static int try_read_node(const struct ubifs_info *c, void *buf, int type,
++			 int len, int lnum, int offs);
++static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
++			      struct ubifs_zbranch *zbr, void *node);
++
+ /*
+  * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
+  * @NAME_LESS: name corresponding to the first argument is less than second
+@@ -403,7 +408,19 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
+ 		return 0;
+ 	}
+ 
+-	err = ubifs_tnc_read_node(c, zbr, node);
++	if (c->replaying) {
++		err = fallible_read_node(c, &zbr->key, zbr, node);
++		/*
++		 * When the node was not found, return -ENOENT, 0 otherwise.
++		 * Negative return codes stay as-is.
++		 */
++		if (err == 0)
++			err = -ENOENT;
++		else if (err == 1)
++			err = 0;
++	} else {
++		err = ubifs_tnc_read_node(c, zbr, node);
++	}
+ 	if (err)
+ 		return err;
+ 
+@@ -2767,7 +2784,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
+ 	if (nm->name) {
+ 		if (err) {
+ 			/* Handle collisions */
+-			err = resolve_collision(c, key, &znode, &n, nm);
++			if (c->replaying)
++				err = fallible_resolve_collision(c, key, &znode, &n,
++							 nm, 0);
++			else
++				err = resolve_collision(c, key, &znode, &n, nm);
+ 			dbg_tnc("rc returned %d, znode %p, n %d",
+ 				err, znode, n);
+ 			if (unlikely(err < 0))
+diff --git a/include/linux/capability.h b/include/linux/capability.h
+index af9f0b9e80e6..99d19db09908 100644
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -244,6 +244,7 @@ static inline bool ns_capable(struct user_namespace *ns, int cap)
+ #endif /* CONFIG_MULTIUSER */
+ extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
+ extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
++extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
+ 
+ /* audit system wants to get cap info from files as well */
+ extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
+diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
+index 59915ea5373c..a91b3b75da0f 100644
+--- a/include/linux/cpumask.h
++++ b/include/linux/cpumask.h
+@@ -556,7 +556,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
+ static inline int cpumask_parse_user(const char __user *buf, int len,
+ 				     struct cpumask *dstp)
+ {
+-	return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
++	return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+ }
+ 
+ /**
+@@ -571,7 +571,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
+ 				     struct cpumask *dstp)
+ {
+ 	return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
+-				     nr_cpu_ids);
++				     nr_cpumask_bits);
+ }
+ 
+ /**
+@@ -586,7 +586,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+ 	char *nl = strchr(buf, '\n');
+ 	unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
+ 
+-	return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
++	return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+ }
+ 
+ /**
+@@ -598,7 +598,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
+  */
+ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
+ {
+-	return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
++	return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
+ }
+ 
+ /**
+diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
+index 089f70f83e97..23da3af459fe 100644
+--- a/include/linux/jump_label_ratelimit.h
++++ b/include/linux/jump_label_ratelimit.h
+@@ -14,6 +14,7 @@ struct static_key_deferred {
+ 
+ #ifdef HAVE_JUMP_LABEL
+ extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
++extern void static_key_deferred_flush(struct static_key_deferred *key);
+ extern void
+ jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+ 
+@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
+ 	STATIC_KEY_CHECK_USE();
+ 	static_key_slow_dec(&key->key);
+ }
++static inline void static_key_deferred_flush(struct static_key_deferred *key)
++{
++	STATIC_KEY_CHECK_USE();
++}
+ static inline void
+ jump_label_rate_limit(struct static_key_deferred *key,
+ 		unsigned long rl)
+diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
+index 32201c269890..84662efaf657 100644
+--- a/include/linux/nfs4.h
++++ b/include/linux/nfs4.h
+@@ -265,7 +265,7 @@ enum nfsstat4 {
+ 
+ static inline bool seqid_mutating_err(u32 err)
+ {
+-	/* rfc 3530 section 8.1.5: */
++	/* See RFC 7530, section 9.1.7 */
+ 	switch (err) {
+ 	case NFS4ERR_STALE_CLIENTID:
+ 	case NFS4ERR_STALE_STATEID:
+@@ -274,6 +274,7 @@ static inline bool seqid_mutating_err(u32 err)
+ 	case NFS4ERR_BADXDR:
+ 	case NFS4ERR_RESOURCE:
+ 	case NFS4ERR_NOFILEHANDLE:
++	case NFS4ERR_MOVED:
+ 		return false;
+ 	};
+ 	return true;
+diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
+index 12c9b485beb7..abd7c01c84db 100644
+--- a/include/linux/percpu-refcount.h
++++ b/include/linux/percpu-refcount.h
+@@ -206,7 +206,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
+ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+ {
+ 	unsigned long __percpu *percpu_count;
+-	int ret;
++	bool ret;
+ 
+ 	rcu_read_lock_sched();
+ 
+@@ -240,7 +240,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
+ {
+ 	unsigned long __percpu *percpu_count;
+-	int ret = false;
++	bool ret = false;
+ 
+ 	rcu_read_lock_sched();
+ 
+diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
+index 998c098dd172..e615c7553959 100644
+--- a/include/linux/ptrace.h
++++ b/include/linux/ptrace.h
+@@ -19,7 +19,6 @@
+ #define PT_SEIZED	0x00010000	/* SEIZE used, enable new behavior */
+ #define PT_PTRACED	0x00000001
+ #define PT_DTRACE	0x00000002	/* delayed trace (used on m68k, i386) */
+-#define PT_PTRACE_CAP	0x00000004	/* ptracer can follow suid-exec */
+ 
+ #define PT_OPT_FLAG_SHIFT	3
+ /* PT_TRACE_* event enable flags */
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 9e39deaeddd6..c68ecb17a7e1 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1454,6 +1454,7 @@ struct task_struct {
+ 	struct list_head cpu_timers[3];
+ 
+ /* process credentials */
++	const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
+ 	const struct cred __rcu *real_cred; /* objective and real subjective task
+ 					 * credentials (COW) */
+ 	const struct cred __rcu *cred;	/* effective (overridable) subjective task
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index ee29cb43470f..d4bb3f429645 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -179,5 +179,6 @@ const char	*rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
+ int		rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
+ 
+ const char *rpc_proc_name(const struct rpc_task *task);
++void rpc_cleanup_clids(void);
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_SUNRPC_CLNT_H */
+diff --git a/kernel/capability.c b/kernel/capability.c
+index 45432b54d5c6..022df097a6bc 100644
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -447,3 +447,23 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
+ 		kgid_has_mapping(ns, inode->i_gid);
+ }
+ EXPORT_SYMBOL(capable_wrt_inode_uidgid);
++
++/**
++ * ptracer_capable - Determine if the ptracer holds CAP_SYS_PTRACE in the namespace
++ * @tsk: The task that may be ptraced
++ * @ns: The user namespace to search for CAP_SYS_PTRACE in
++ *
++ * Return true if the task that is ptracing the current task had CAP_SYS_PTRACE
++ * in the specified user namespace.
++ */
++bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns)
++{
++	int ret = 0;  /* An absent tracer adds no restrictions */
++	const struct cred *cred;
++	rcu_read_lock();
++	cred = rcu_dereference(tsk->ptracer_cred);
++	if (cred)
++		ret = security_capable_noaudit(cred, ns, CAP_SYS_PTRACE);
++	rcu_read_unlock();
++	return (ret == 0);
++}
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 6da64f0d0630..34d4db5cd984 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -198,8 +198,11 @@ void update_perf_cpu_limits(void)
+ 	u64 tmp = perf_sample_period_ns;
+ 
+ 	tmp *= sysctl_perf_cpu_time_max_percent;
+-	do_div(tmp, 100);
+-	ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
++	tmp = div_u64(tmp, 100);
++	if (!tmp)
++		tmp = 1;
++
++	WRITE_ONCE(perf_sample_allowed_ns, tmp);
+ }
+ 
+ static int perf_rotate_context(struct perf_cpu_context *cpuctx);
+@@ -213,6 +216,13 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
+ 	if (ret || !write)
+ 		return ret;
+ 
++	/*
++	 * If throttling is disabled don't allow the write:
++	 */
++	if (sysctl_perf_cpu_time_max_percent == 100 ||
++	    sysctl_perf_cpu_time_max_percent == 0)
++		return -EINVAL;
++
+ 	max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
+ 	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
+ 	update_perf_cpu_limits();
+@@ -226,12 +236,19 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
+ 				void __user *buffer, size_t *lenp,
+ 				loff_t *ppos)
+ {
+-	int ret = proc_dointvec(table, write, buffer, lenp, ppos);
++	int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ 
+ 	if (ret || !write)
+ 		return ret;
+ 
+-	update_perf_cpu_limits();
++	if (sysctl_perf_cpu_time_max_percent == 100 ||
++	    sysctl_perf_cpu_time_max_percent == 0) {
++		printk(KERN_WARNING
++		       "perf: Dynamic interrupt throttling disabled, can hang your system!\n");
++		WRITE_ONCE(perf_sample_allowed_ns, 0);
++	} else {
++		update_perf_cpu_limits();
++	}
+ 
+ 	return 0;
+ }
+@@ -245,62 +262,68 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
+ #define NR_ACCUMULATED_SAMPLES 128
+ static DEFINE_PER_CPU(u64, running_sample_length);
+ 
++static u64 __report_avg;
++static u64 __report_allowed;
++
+ static void perf_duration_warn(struct irq_work *w)
+ {
+-	u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
+-	u64 avg_local_sample_len;
+-	u64 local_samples_len;
+-
+-	local_samples_len = __this_cpu_read(running_sample_length);
+-	avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
+-
+ 	printk_ratelimited(KERN_WARNING
+-			"perf interrupt took too long (%lld > %lld), lowering "
+-			"kernel.perf_event_max_sample_rate to %d\n",
+-			avg_local_sample_len, allowed_ns >> 1,
+-			sysctl_perf_event_sample_rate);
++		"perf: interrupt took too long (%lld > %lld), lowering "
++		"kernel.perf_event_max_sample_rate to %d\n",
++		__report_avg, __report_allowed,
++		sysctl_perf_event_sample_rate);
+ }
+ 
+ static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
+ 
+ void perf_sample_event_took(u64 sample_len_ns)
+ {
+-	u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
+-	u64 avg_local_sample_len;
+-	u64 local_samples_len;
++	u64 max_len = READ_ONCE(perf_sample_allowed_ns);
++	u64 running_len;
++	u64 avg_len;
++	u32 max;
+ 
+-	if (allowed_ns == 0)
++	if (max_len == 0)
+ 		return;
+ 
+-	/* decay the counter by 1 average sample */
+-	local_samples_len = __this_cpu_read(running_sample_length);
+-	local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
+-	local_samples_len += sample_len_ns;
+-	__this_cpu_write(running_sample_length, local_samples_len);
++	/* Decay the counter by 1 average sample. */
++	running_len = __this_cpu_read(running_sample_length);
++	running_len -= running_len/NR_ACCUMULATED_SAMPLES;
++	running_len += sample_len_ns;
++	__this_cpu_write(running_sample_length, running_len);
+ 
+ 	/*
+-	 * note: this will be biased artifically low until we have
+-	 * seen NR_ACCUMULATED_SAMPLES.  Doing it this way keeps us
++	 * Note: this will be biased artifically low until we have
++	 * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
+ 	 * from having to maintain a count.
+ 	 */
+-	avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
+-
+-	if (avg_local_sample_len <= allowed_ns)
++	avg_len = running_len/NR_ACCUMULATED_SAMPLES;
++	if (avg_len <= max_len)
+ 		return;
+ 
+-	if (max_samples_per_tick <= 1)
+-		return;
++	__report_avg = avg_len;
++	__report_allowed = max_len;
+ 
+-	max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
+-	sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
+-	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
++	/*
++	 * Compute a throttle threshold 25% below the current duration.
++	 */
++	avg_len += avg_len / 4;
++	max = (TICK_NSEC / 100) * sysctl_perf_cpu_time_max_percent;
++	if (avg_len < max)
++		max /= (u32)avg_len;
++	else
++		max = 1;
+ 
+-	update_perf_cpu_limits();
++	WRITE_ONCE(perf_sample_allowed_ns, avg_len);
++	WRITE_ONCE(max_samples_per_tick, max);
++
++	sysctl_perf_event_sample_rate = max * HZ;
++	perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
+ 
+ 	if (!irq_work_queue(&perf_duration_work)) {
+-		early_printk("perf interrupt took too long (%lld > %lld), lowering "
++		early_printk("perf: interrupt took too long (%lld > %lld), lowering "
+ 			     "kernel.perf_event_max_sample_rate to %d\n",
+-			     avg_local_sample_len, allowed_ns >> 1,
++			     __report_avg, __report_allowed,
+ 			     sysctl_perf_event_sample_rate);
+ 	}
+ }
+@@ -5833,6 +5856,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+ 	char *buf = NULL;
+ 	char *name;
+ 
++	if (vma->vm_flags & VM_READ)
++		prot |= PROT_READ;
++	if (vma->vm_flags & VM_WRITE)
++		prot |= PROT_WRITE;
++	if (vma->vm_flags & VM_EXEC)
++		prot |= PROT_EXEC;
++
++	if (vma->vm_flags & VM_MAYSHARE)
++		flags = MAP_SHARED;
++	else
++		flags = MAP_PRIVATE;
++
++	if (vma->vm_flags & VM_DENYWRITE)
++		flags |= MAP_DENYWRITE;
++	if (vma->vm_flags & VM_MAYEXEC)
++		flags |= MAP_EXECUTABLE;
++	if (vma->vm_flags & VM_LOCKED)
++		flags |= MAP_LOCKED;
++	if (vma->vm_flags & VM_HUGETLB)
++		flags |= MAP_HUGETLB;
++
+ 	if (file) {
+ 		struct inode *inode;
+ 		dev_t dev;
+@@ -5859,27 +5903,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
+ 		maj = MAJOR(dev);
+ 		min = MINOR(dev);
+ 
+-		if (vma->vm_flags & VM_READ)
+-			prot |= PROT_READ;
+-		if (vma->vm_flags & VM_WRITE)
+-			prot |= PROT_WRITE;
+-		if (vma->vm_flags & VM_EXEC)
+-			prot |= PROT_EXEC;
+-
+-		if (vma->vm_flags & VM_MAYSHARE)
+-			flags = MAP_SHARED;
+-		else
+-			flags = MAP_PRIVATE;
+-
+-		if (vma->vm_flags & VM_DENYWRITE)
+-			flags |= MAP_DENYWRITE;
+-		if (vma->vm_flags & VM_MAYEXEC)
+-			flags |= MAP_EXECUTABLE;
+-		if (vma->vm_flags & VM_LOCKED)
+-			flags |= MAP_LOCKED;
+-		if (vma->vm_flags & VM_HUGETLB)
+-			flags |= MAP_HUGETLB;
+-
+ 		goto got_name;
+ 	} else {
+ 		if (vma->vm_ops && vma->vm_ops->name) {
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 2214b70f1910..01ee2e8859c4 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -3074,4 +3074,4 @@ static int __init futex_init(void)
+ 
+ 	return 0;
+ }
+-__initcall(futex_init);
++core_initcall(futex_init);
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 9019f15deab2..7d4d0a917d13 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -116,6 +116,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
+ 
++void static_key_deferred_flush(struct static_key_deferred *key)
++{
++	STATIC_KEY_CHECK_USE();
++	flush_delayed_work(&key->work);
++}
++EXPORT_SYMBOL_GPL(static_key_deferred_flush);
++
+ void jump_label_rate_limit(struct static_key_deferred *key,
+ 		unsigned long rl)
+ {
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 3c1aca0c3543..59dc17ba820b 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1394,7 +1394,7 @@ static void call_console_drivers(int level, const char *text, size_t len)
+ {
+ 	struct console *con;
+ 
+-	trace_console(text, len);
++	trace_console_rcuidle(text, len);
+ 
+ 	if (level >= console_loglevel && !ignore_loglevel)
+ 		return;
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 9650e7aee267..c67aab541ee2 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -40,6 +40,9 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
+ 	BUG_ON(!list_empty(&child->ptrace_entry));
+ 	list_add(&child->ptrace_entry, &new_parent->ptraced);
+ 	child->parent = new_parent;
++	rcu_read_lock();
++	child->ptracer_cred = get_cred(__task_cred(new_parent));
++	rcu_read_unlock();
+ }
+ 
+ /**
+@@ -72,11 +75,15 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
+  */
+ void __ptrace_unlink(struct task_struct *child)
+ {
++	const struct cred *old_cred;
+ 	BUG_ON(!child->ptrace);
+ 
+ 	child->ptrace = 0;
+ 	child->parent = child->real_parent;
+ 	list_del_init(&child->ptrace_entry);
++	old_cred = child->ptracer_cred;
++	child->ptracer_cred = NULL;
++	put_cred(old_cred);
+ 
+ 	spin_lock(&child->sighand->siglock);
+ 
+@@ -366,10 +373,6 @@ static int ptrace_attach(struct task_struct *task, long request,
+ 
+ 	if (seize)
+ 		flags |= PT_SEIZED;
+-	rcu_read_lock();
+-	if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
+-		flags |= PT_PTRACE_CAP;
+-	rcu_read_unlock();
+ 	task->ptrace = flags;
+ 
+ 	__ptrace_link(task, current);
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index cebbff5f34fe..1431089b8a67 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -2349,6 +2349,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
+ 				break;
+ 			if (neg)
+ 				continue;
++			val = convmul * val / convdiv;
+ 			if ((min && val < *min) || (max && val > *max))
+ 				continue;
+ 			*i = val;
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 1ffef05f1c1f..fc5165c744a8 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1485,6 +1485,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
+ 
+ 		cond_resched();
+ find_page:
++		if (fatal_signal_pending(current)) {
++			error = -EINTR;
++			goto out;
++		}
++
+ 		page = find_get_page(mapping, index);
+ 		if (!page) {
+ 			page_cache_sync_readahead(mapping,
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 88b472bec71d..2f47f0332a2c 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1387,23 +1387,32 @@ free:
+ }
+ 
+ /*
+- * When releasing a hugetlb pool reservation, any surplus pages that were
+- * allocated to satisfy the reservation must be explicitly freed if they were
+- * never used.
+- * Called with hugetlb_lock held.
++ * This routine has two main purposes:
++ * 1) Decrement the reservation count (resv_huge_pages) by the value passed
++ *    in unused_resv_pages.  This corresponds to the prior adjustments made
++ *    to the associated reservation map.
++ * 2) Free any unused surplus pages that may have been allocated to satisfy
++ *    the reservation.  As many as unused_resv_pages may be freed.
++ *
++ * Called with hugetlb_lock held.  However, the lock could be dropped (and
++ * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
++ * we must make sure nobody else can claim pages we are in the process of
++ * freeing.  Do this by ensuring resv_huge_page always is greater than the
++ * number of huge pages we plan to free when dropping the lock.
+  */
+ static void return_unused_surplus_pages(struct hstate *h,
+ 					unsigned long unused_resv_pages)
+ {
+ 	unsigned long nr_pages;
+ 
+-	/* Uncommit the reservation */
+-	h->resv_huge_pages -= unused_resv_pages;
+-
+ 	/* Cannot return gigantic pages currently */
+ 	if (hstate_is_gigantic(h))
+-		return;
++		goto out;
+ 
++	/*
++	 * Part (or even all) of the reservation could have been backed
++	 * by pre-allocated pages. Only free surplus pages.
++	 */
+ 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
+ 
+ 	/*
+@@ -1413,12 +1422,22 @@ static void return_unused_surplus_pages(struct hstate *h,
+ 	 * when the nodes with surplus pages have no free pages.
+ 	 * free_pool_huge_page() will balance the the freed pages across the
+ 	 * on-line nodes with memory and will handle the hstate accounting.
++	 *
++	 * Note that we decrement resv_huge_pages as we free the pages.  If
++	 * we drop the lock, resv_huge_pages will still be sufficiently large
++	 * to cover subsequent pages we may free.
+ 	 */
+ 	while (nr_pages--) {
++		h->resv_huge_pages--;
++		unused_resv_pages--;
+ 		if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
+-			break;
++			goto out;
+ 		cond_resched_lock(&hugetlb_lock);
+ 	}
++
++out:
++	/* Fully uncommit the reservation */
++	h->resv_huge_pages -= unused_resv_pages;
+ }
+ 
+ /*
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 99d4c1d0b858..18c63b754e49 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -2009,8 +2009,8 @@ retry_cpuset:
+ 
+ 	nmask = policy_nodemask(gfp, pol);
+ 	zl = policy_zonelist(gfp, pol, node);
+-	mpol_cond_put(pol);
+ 	page = __alloc_pages_nodemask(gfp, order, zl, nmask);
++	mpol_cond_put(pol);
+ out:
+ 	if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
+ 		goto retry_cpuset;
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index a1ba6875c2a2..2d9ffc2ac376 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -705,14 +705,23 @@ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
+ 
+ static void bcm_remove_op(struct bcm_op *op)
+ {
+-	hrtimer_cancel(&op->timer);
+-	hrtimer_cancel(&op->thrtimer);
+-
+-	if (op->tsklet.func)
+-		tasklet_kill(&op->tsklet);
++	if (op->tsklet.func) {
++		while (test_bit(TASKLET_STATE_SCHED, &op->tsklet.state) ||
++		       test_bit(TASKLET_STATE_RUN, &op->tsklet.state) ||
++		       hrtimer_active(&op->timer)) {
++			hrtimer_cancel(&op->timer);
++			tasklet_kill(&op->tsklet);
++		}
++	}
+ 
+-	if (op->thrtsklet.func)
+-		tasklet_kill(&op->thrtsklet);
++	if (op->thrtsklet.func) {
++		while (test_bit(TASKLET_STATE_SCHED, &op->thrtsklet.state) ||
++		       test_bit(TASKLET_STATE_RUN, &op->thrtsklet.state) ||
++		       hrtimer_active(&op->thrtimer)) {
++			hrtimer_cancel(&op->thrtimer);
++			tasklet_kill(&op->thrtsklet);
++		}
++	}
+ 
+ 	if ((op->frames) && (op->frames != &op->sframe))
+ 		kfree(op->frames);
+diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+index eeeba5adee6d..2410d557ae39 100644
+--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
++++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
+@@ -260,7 +260,7 @@ static int gssx_dec_option_array(struct xdr_stream *xdr,
+ 	if (!oa->data)
+ 		return -ENOMEM;
+ 
+-	creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL);
++	creds = kzalloc(sizeof(struct svc_cred), GFP_KERNEL);
+ 	if (!creds) {
+ 		kfree(oa->data);
+ 		return -ENOMEM;
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 4605dc73def6..033fec307528 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1481,7 +1481,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
+ 	case RPC_GSS_PROC_DESTROY:
+ 		if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
+ 			goto auth_err;
+-		rsci->h.expiry_time = get_seconds();
++		rsci->h.expiry_time = seconds_since_boot();
+ 		set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ 		if (resv->iov_len + 4 > PAGE_SIZE)
+ 			goto drop;
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 16e831dcfde0..c67d3627cf01 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -337,6 +337,11 @@ out:
+ 
+ static DEFINE_IDA(rpc_clids);
+ 
++void rpc_cleanup_clids(void)
++{
++	ida_destroy(&rpc_clids);
++}
++
+ static int rpc_alloc_clid(struct rpc_clnt *clnt)
+ {
+ 	int clid;
+diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
+index ee5d3d253102..3142f38d1104 100644
+--- a/net/sunrpc/sunrpc_syms.c
++++ b/net/sunrpc/sunrpc_syms.c
+@@ -119,6 +119,7 @@ out:
+ static void __exit
+ cleanup_sunrpc(void)
+ {
++	rpc_cleanup_clids();
+ 	rpcauth_remove_module();
+ 	cleanup_socket_xprt();
+ 	svc_cleanup_xprt_sock();
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 978d7f91ca91..81203bbb2eef 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -12784,13 +12784,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
+ 
+ 	list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
+ 		bool schedule_destroy_work = false;
+-		bool schedule_scan_stop = false;
+ 		struct cfg80211_sched_scan_request *sched_scan_req =
+ 			rcu_dereference(rdev->sched_scan_req);
+ 
+ 		if (sched_scan_req && notify->portid &&
+-		    sched_scan_req->owner_nlportid == notify->portid)
+-			schedule_scan_stop = true;
++		    sched_scan_req->owner_nlportid == notify->portid) {
++			sched_scan_req->owner_nlportid = 0;
++
++			if (rdev->ops->sched_scan_stop &&
++			    rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
++				schedule_work(&rdev->sched_scan_stop_wk);
++		}
+ 
+ 		list_for_each_entry_rcu(wdev, &rdev->wdev_list, list) {
+ 			cfg80211_mlme_unregister_socket(wdev, notify->portid);
+@@ -12821,12 +12825,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
+ 				spin_unlock(&rdev->destroy_list_lock);
+ 				schedule_work(&rdev->destroy_work);
+ 			}
+-		} else if (schedule_scan_stop) {
+-			sched_scan_req->owner_nlportid = 0;
+-
+-			if (rdev->ops->sched_scan_stop &&
+-			    rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
+-				schedule_work(&rdev->sched_scan_stop_wk);
+ 		}
+ 	}
+ 
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 280235cc3a98..0034eb420b0e 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -5626,7 +5626,7 @@ static int selinux_setprocattr(struct task_struct *p,
+ 		return error;
+ 
+ 	/* Obtain a SID for the context, if one was specified. */
+-	if (size && str[1] && str[1] != '\n') {
++	if (size && str[0] && str[0] != '\n') {
+ 		if (str[size-1] == '\n') {
+ 			str[size-1] = 0;
+ 			size--;
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index c850345c43b5..dfa5156f3585 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
+ {
+ 	unsigned long flags;
+ 	struct snd_seq_event_cell *ptr;
+-	int max_count = 5 * HZ;
+ 
+ 	if (snd_BUG_ON(!pool))
+ 		return -EINVAL;
+@@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
+ 	if (waitqueue_active(&pool->output_sleep))
+ 		wake_up(&pool->output_sleep);
+ 
+-	while (atomic_read(&pool->counter) > 0) {
+-		if (max_count == 0) {
+-			pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
+-			break;
+-		}
++	while (atomic_read(&pool->counter) > 0)
+ 		schedule_timeout_uninterruptible(1);
+-		max_count--;
+-	}
+ 	
+ 	/* release all resources */
+ 	spin_lock_irqsave(&pool->lock, flags);
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index 77ec21420355..f676ae53c477 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void)
+ 	}
+ }
+ 
++static void queue_use(struct snd_seq_queue *queue, int client, int use);
++
+ /* allocate a new queue -
+  * return queue index value or negative value for error
+  */
+@@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
+ 	if (q == NULL)
+ 		return -ENOMEM;
+ 	q->info_flags = info_flags;
++	queue_use(q, client, 1);
+ 	if (queue_list_add(q) < 0) {
+ 		queue_delete(q);
+ 		return -ENOMEM;
+ 	}
+-	snd_seq_queue_use(q->queue, client, 1); /* use this queue */
+ 	return q->queue;
+ }
+ 
+@@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
+ 	return result;
+ }
+ 
+-
+-/* use or unuse this queue -
+- * if it is the first client, starts the timer.
+- * if it is not longer used by any clients, stop the timer.
+- */
+-int snd_seq_queue_use(int queueid, int client, int use)
++/* use or unuse this queue */
++static void queue_use(struct snd_seq_queue *queue, int client, int use)
+ {
+-	struct snd_seq_queue *queue;
+-
+-	queue = queueptr(queueid);
+-	if (queue == NULL)
+-		return -EINVAL;
+-	mutex_lock(&queue->timer_mutex);
+ 	if (use) {
+ 		if (!test_and_set_bit(client, queue->clients_bitmap))
+ 			queue->clients++;
+@@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
+ 	} else {
+ 		snd_seq_timer_close(queue);
+ 	}
++}
++
++/* use or unuse this queue -
++ * if it is the first client, starts the timer.
++ * if it is not longer used by any clients, stop the timer.
++ */
++int snd_seq_queue_use(int queueid, int client, int use)
++{
++	struct snd_seq_queue *queue;
++
++	queue = queueptr(queueid);
++	if (queue == NULL)
++		return -EINVAL;
++	mutex_lock(&queue->timer_mutex);
++	queue_use(queue, client, use);
+ 	mutex_unlock(&queue->timer_mutex);
+ 	queuefree(queue);
+ 	return 0;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b93458698335..3adf376092dd 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2225,6 +2225,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
+ 	SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
+ 	SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
++	SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+ 	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+@@ -6939,6 +6940,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
+ 	SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
+ 	SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
++	SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8),
+ 	SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
+ 	SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 4a083433944e..2c71e5682716 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1132,6 +1132,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
+ 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
++	case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
+ 	case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
+ 	case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index f76830643086..c98c53f272c1 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -70,7 +70,7 @@ ifdef INSTALL_PATH
+ 	done;
+ 
+ 	@# Ask all targets to emit their test scripts
+-	echo "#!/bin/bash" > $(ALL_SCRIPT)
++	echo "#!/bin/sh" > $(ALL_SCRIPT)
+ 	echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
+ 	echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
+ 
+diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
+index c09a682df56a..16058bbea7a8 100755
+--- a/tools/testing/selftests/net/run_netsocktests
++++ b/tools/testing/selftests/net/run_netsocktests
+@@ -1,4 +1,4 @@
+-#!/bin/bash
++#!/bin/sh
+ 
+ echo "--------------------"
+ echo "running socket test"


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-03-02 16:31 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2017-03-02 16:31 UTC (permalink / raw
  To: gentoo-commits

commit:     dd4e296ce656f9d84cda4d366ddadd9404b7039e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar  2 16:16:08 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar  2 16:31:51 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=dd4e296c

Enable crypto API for systemd as its required for systemd versions >= 233. See bug #611368.

 4567_distro-Gentoo-Kconfig.patch | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 4a88040..5555b8a 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -7,9 +7,9 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- /dev/null	2017-02-18 04:25:56.900821893 -0500
-+++ b/distro/Kconfig	2017-02-18 10:41:16.512328155 -0500
-@@ -0,0 +1,142 @@
+--- /dev/null	2017-03-02 01:55:04.096566155 -0500
++++ b/distro/Kconfig	2017-03-02 11:12:05.049448255 -0500
+@@ -0,0 +1,145 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -114,6 +114,9 @@
 +	select BLK_DEV_BSG
 +	select CGROUPS
 +	select CHECKPOINT_RESTORE
++	select CRYPTO_HMAC 
++	select CRYPTO_SHA256
++	select CRYPTO_USER_API_HASH
 +	select DEVPTS_MULTIPLE_INSTANCES
 +	select DMIID if X86_32 || X86_64 || X86
 +	select EPOLL


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-03-02 16:31 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2017-03-02 16:31 UTC (permalink / raw
  To: gentoo-commits

commit:     590c1d9ccf1174bc04ee8c2e6156fe06ddac9db4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Feb 18 20:36:37 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar  2 16:30:03 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=590c1d9c

For GENTOO_LINUX_INIT_SYSTEMD don't add DMIID for non X86 architectures. See bug #609590.

 4567_distro-Gentoo-Kconfig.patch | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index acb0972..4a88040 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -7,8 +7,8 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- /dev/null	2016-11-15 00:56:18.320838834 -0500
-+++ b/distro/Kconfig	2016-11-16 06:24:29.457357409 -0500
+--- /dev/null	2017-02-18 04:25:56.900821893 -0500
++++ b/distro/Kconfig	2017-02-18 10:41:16.512328155 -0500
 @@ -0,0 +1,142 @@
 +menu "Gentoo Linux"
 +
@@ -115,7 +115,7 @@
 +	select CGROUPS
 +	select CHECKPOINT_RESTORE
 +	select DEVPTS_MULTIPLE_INSTANCES
-+	select DMIID
++	select DMIID if X86_32 || X86_64 || X86
 +	select EPOLL
 +	select FANOTIFY
 +	select FHANDLE


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-02-24 16:11 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2017-02-24 16:11 UTC (permalink / raw
  To: gentoo-commits

commit:     6ed8b62e018b2d75b73c9d848d64b1394c1e362a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Feb 24 16:11:39 2017 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Feb 24 16:11:39 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6ed8b62e

dccp: fix freeing skb too early for IPV6_RECVPKTINFO. CVE-2017-6074

 0000_README                                      |  4 ++
 1520_CVE-2017-6074-dccp-fix-early-skb-free.patch | 47 ++++++++++++++++++++++++
 2 files changed, 51 insertions(+)

diff --git a/0000_README b/0000_README
index 061bd5e..9c6c940 100644
--- a/0000_README
+++ b/0000_README
@@ -203,6 +203,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
+Patch:  1520_CVE-2017-6074-dccp-skb-freeing-fix.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=610600
+Desc:   dccp: fix freeing skb too early for IPV6_RECVPKTINFO. CVE-2017-6074
+
 Patch:  1800_fix-lru-cache-add-oom-regression.patch
 From:   http://thread.gmane.org/gmane.linux.kernel.stable/184384
 Desc:   Revert commit 8f182270dfec mm/swap.c: flush lru pvecs on compound page arrival to fix OOM error.

diff --git a/1520_CVE-2017-6074-dccp-fix-early-skb-free.patch b/1520_CVE-2017-6074-dccp-fix-early-skb-free.patch
new file mode 100644
index 0000000..433fd4b
--- /dev/null
+++ b/1520_CVE-2017-6074-dccp-fix-early-skb-free.patch
@@ -0,0 +1,47 @@
+From 5edabca9d4cff7f1f2b68f0bac55ef99d9798ba4 Mon Sep 17 00:00:00 2001
+From: Andrey Konovalov <andreyknvl@google.com>
+Date: Thu, 16 Feb 2017 17:22:46 +0100
+Subject: dccp: fix freeing skb too early for IPV6_RECVPKTINFO
+
+In the current DCCP implementation an skb for a DCCP_PKT_REQUEST packet
+is forcibly freed via __kfree_skb in dccp_rcv_state_process if
+dccp_v6_conn_request successfully returns.
+
+However, if IPV6_RECVPKTINFO is set on a socket, the address of the skb
+is saved to ireq->pktopts and the ref count for skb is incremented in
+dccp_v6_conn_request, so skb is still in use. Nevertheless, it gets freed
+in dccp_rcv_state_process.
+
+Fix by calling consume_skb instead of doing goto discard and therefore
+calling __kfree_skb.
+
+Similar fixes for TCP:
+
+fb7e2399ec17f1004c0e0ccfd17439f8759ede01 [TCP]: skb is unexpectedly freed.
+0aea76d35c9651d55bbaf746e7914e5f9ae5a25d tcp: SYN packets are now
+simply consumed
+
+Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/dccp/input.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/dccp/input.c b/net/dccp/input.c
+index ba34718..8fedc2d 100644
+--- a/net/dccp/input.c
++++ b/net/dccp/input.c
+@@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
+ 			if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
+ 								    skb) < 0)
+ 				return 1;
+-			goto discard;
++			consume_skb(skb);
++			return 0;
+ 		}
+ 		if (dh->dccph_type == DCCP_PKT_RESET)
+ 			goto discard;
+-- 
+cgit v0.12
+


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-01-18 23:50 Alice Ferrazzi
  0 siblings, 0 replies; 71+ messages in thread
From: Alice Ferrazzi @ 2017-01-18 23:50 UTC (permalink / raw
  To: gentoo-commits

commit:     d98fbf46c2bf583f517b78dda8df9422fdffe1b2
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 18 23:49:33 2017 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Wed Jan 18 23:49:33 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d98fbf46

Linux patch 4.1.38

 0000_README             |    4 +
 1037_linux-4.1.38.patch | 2415 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2419 insertions(+)

diff --git a/0000_README b/0000_README
index e28d8f1..061bd5e 100644
--- a/0000_README
+++ b/0000_README
@@ -191,6 +191,10 @@ Patch:  1036_linux-4.1.37.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.37
 
+Patch:  1037_linux-4.1.38.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.38
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1037_linux-4.1.38.patch b/1037_linux-4.1.38.patch
new file mode 100644
index 0000000..a0b5049
--- /dev/null
+++ b/1037_linux-4.1.38.patch
@@ -0,0 +1,2415 @@
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index 9fa2bf8c3f6f..7830f1c34a7d 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -1955,6 +1955,7 @@ registers, find a list below:
+   PPC   | KVM_REG_PPC_TM_VSCR           | 32
+   PPC   | KVM_REG_PPC_TM_DSCR           | 64
+   PPC   | KVM_REG_PPC_TM_TAR            | 64
++  PPC   | KVM_REG_PPC_TM_XER            | 64
+         |                               |
+   MIPS  | KVM_REG_MIPS_R0               | 64
+           ...
+diff --git a/Makefile b/Makefile
+index df72b644f78c..1aba5352e0bd 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 37
++SUBLEVEL = 38
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
+index 7d0f07020c80..ab19044815f5 100644
+--- a/arch/arm/xen/enlighten.c
++++ b/arch/arm/xen/enlighten.c
+@@ -218,8 +218,7 @@ static int __init xen_guest_init(void)
+ 	 * for secondary CPUs as they are brought up.
+ 	 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
+ 	 */
+-	xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
+-			                       sizeof(struct vcpu_info));
++	xen_vcpu_info = alloc_percpu(struct vcpu_info);
+ 	if (xen_vcpu_info == NULL)
+ 		return -ENOMEM;
+ 
+diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S
+index b6fcbaf5027b..3dc44b05fb97 100644
+--- a/arch/powerpc/boot/ps3-head.S
++++ b/arch/powerpc/boot/ps3-head.S
+@@ -57,11 +57,6 @@ __system_reset_overlay:
+ 	bctr
+ 
+ 1:
+-	/* Save the value at addr zero for a null pointer write check later. */
+-
+-	li	r4, 0
+-	lwz	r3, 0(r4)
+-
+ 	/* Primary delays then goes to _zimage_start in wrapper. */
+ 
+ 	or	31, 31, 31 /* db16cyc */
+diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
+index 4ec2d86d3c50..a05558a7e51a 100644
+--- a/arch/powerpc/boot/ps3.c
++++ b/arch/powerpc/boot/ps3.c
+@@ -119,13 +119,12 @@ void ps3_copy_vectors(void)
+ 	flush_cache((void *)0x100, 512);
+ }
+ 
+-void platform_init(unsigned long null_check)
++void platform_init(void)
+ {
+ 	const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
+ 	void *chosen;
+ 	unsigned long ft_addr;
+ 	u64 rm_size;
+-	unsigned long val;
+ 
+ 	console_ops.write = ps3_console_write;
+ 	platform_ops.exit = ps3_exit;
+@@ -153,11 +152,6 @@ void platform_init(unsigned long null_check)
+ 
+ 	printf(" flat tree at 0x%lx\n\r", ft_addr);
+ 
+-	val = *(unsigned long *)0;
+-
+-	if (val != null_check)
+-		printf("null check failed: %lx != %lx\n\r", val, null_check);
+-
+ 	((kernel_entry_t)0)(ft_addr, 0, NULL);
+ 
+ 	ps3_exit();
+diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
+index a193a13cf08b..7fe65af0035d 100644
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -532,6 +532,7 @@ struct kvm_vcpu_arch {
+ 	u64 tfiar;
+ 
+ 	u32 cr_tm;
++	u64 xer_tm;
+ 	u64 lr_tm;
+ 	u64 ctr_tm;
+ 	u64 amr_tm;
+diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
+index ab4d4732c492..720b71a636c8 100644
+--- a/arch/powerpc/include/uapi/asm/kvm.h
++++ b/arch/powerpc/include/uapi/asm/kvm.h
+@@ -587,6 +587,7 @@ struct kvm_get_htab_header {
+ #define KVM_REG_PPC_TM_VSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
+ #define KVM_REG_PPC_TM_DSCR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
+ #define KVM_REG_PPC_TM_TAR	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
++#define KVM_REG_PPC_TM_XER	(KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
+ 
+ /* PPC64 eXternal Interrupt Controller Specification */
+ #define KVM_DEV_XICS_GRP_SOURCES	1	/* 64-bit source attributes */
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 0034b6b3556a..d8d332e65078 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -583,6 +583,7 @@ int main(void)
+ 	DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
+ 	DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
+ 	DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
++	DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
+ 	DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
+ 	DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
+ 	DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index f5b3de7f7fa2..63c37fd2b7a6 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -1171,6 +1171,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ 	case KVM_REG_PPC_TM_CR:
+ 		*val = get_reg_val(id, vcpu->arch.cr_tm);
+ 		break;
++	case KVM_REG_PPC_TM_XER:
++		*val = get_reg_val(id, vcpu->arch.xer_tm);
++		break;
+ 	case KVM_REG_PPC_TM_LR:
+ 		*val = get_reg_val(id, vcpu->arch.lr_tm);
+ 		break;
+@@ -1378,6 +1381,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
+ 	case KVM_REG_PPC_TM_CR:
+ 		vcpu->arch.cr_tm = set_reg_val(id, *val);
+ 		break;
++	case KVM_REG_PPC_TM_XER:
++		vcpu->arch.xer_tm = set_reg_val(id, *val);
++		break;
+ 	case KVM_REG_PPC_TM_LR:
+ 		vcpu->arch.lr_tm = set_reg_val(id, *val);
+ 		break;
+diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+index c6d601cc9764..bee16411de73 100644
+--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+@@ -619,6 +619,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
+ 					      HPTE_V_ABSENT);
+ 			do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
+ 				  true);
++			/* Don't lose R/C bit updates done by hardware */
++			r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
+ 			hpte[1] = cpu_to_be64(r);
+ 		}
+ 	}
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 11d82b91aa4f..70eaf547703e 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -2399,11 +2399,13 @@ kvmppc_save_tm:
+ 	mfctr	r7
+ 	mfspr	r8, SPRN_AMR
+ 	mfspr	r10, SPRN_TAR
++	mfxer	r11
+ 	std	r5, VCPU_LR_TM(r9)
+ 	stw	r6, VCPU_CR_TM(r9)
+ 	std	r7, VCPU_CTR_TM(r9)
+ 	std	r8, VCPU_AMR_TM(r9)
+ 	std	r10, VCPU_TAR_TM(r9)
++	std	r11, VCPU_XER_TM(r9)
+ 
+ 	/* Restore r12 as trap number. */
+ 	lwz	r12, VCPU_TRAP(r9)
+@@ -2496,11 +2498,13 @@ kvmppc_restore_tm:
+ 	ld	r7, VCPU_CTR_TM(r4)
+ 	ld	r8, VCPU_AMR_TM(r4)
+ 	ld	r9, VCPU_TAR_TM(r4)
++	ld	r10, VCPU_XER_TM(r4)
+ 	mtlr	r5
+ 	mtcr	r6
+ 	mtctr	r7
+ 	mtspr	SPRN_AMR, r8
+ 	mtspr	SPRN_TAR, r9
++	mtxer	r10
+ 
+ 	/*
+ 	 * Load up PPR and DSCR values but don't put them in the actual SPRs
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 08f9d9230b94..341ea55d2e85 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1172,10 +1172,10 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
+ 	return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
+ }
+ 
+-static inline bool is_exception(u32 intr_info)
++static inline bool is_nmi(u32 intr_info)
+ {
+ 	return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
+-		== (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
++		== (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
+ }
+ 
+ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+@@ -5089,7 +5089,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
+ 	if (is_machine_check(intr_info))
+ 		return handle_machine_check(vcpu);
+ 
+-	if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
++	if (is_nmi(intr_info))
+ 		return 1;  /* already handled by vmx_vcpu_run() */
+ 
+ 	if (is_no_device(intr_info)) {
+@@ -7519,7 +7519,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
+ 
+ 	switch (exit_reason) {
+ 	case EXIT_REASON_EXCEPTION_NMI:
+-		if (!is_exception(intr_info))
++		if (is_nmi(intr_info))
+ 			return false;
+ 		else if (is_page_fault(intr_info))
+ 			return enable_ept;
+@@ -7967,8 +7967,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
+ 		kvm_machine_check();
+ 
+ 	/* We need to handle NMIs before interrupts are enabled */
+-	if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
+-	    (exit_intr_info & INTR_INFO_VALID_MASK)) {
++	if (is_nmi(exit_intr_info)) {
+ 		kvm_before_handle_nmi(&vmx->vcpu);
+ 		asm("int $2");
+ 		kvm_after_handle_nmi(&vmx->vcpu);
+diff --git a/block/bsg.c b/block/bsg.c
+index d214e929ce18..b9a53615bdef 100644
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -655,6 +655,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+ 
+ 	dprintk("%s: write %Zd bytes\n", bd->name, count);
+ 
++	if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
++		return -EINVAL;
++
+ 	bsg_set_block(bd, file);
+ 
+ 	bytes_written = 0;
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index 4d1d9de4f9bf..8b0b95014465 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -923,13 +923,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
+ 		timeout = MAX_JIFFY_OFFSET;
+ 	}
+ 
+-	retval = wait_for_completion_interruptible_timeout(&buf->completion,
++	timeout = wait_for_completion_interruptible_timeout(&buf->completion,
+ 			timeout);
+-	if (retval == -ERESTARTSYS || !retval) {
++	if (timeout == -ERESTARTSYS || !timeout) {
++		retval = timeout;
+ 		mutex_lock(&fw_lock);
+ 		fw_load_abort(fw_priv);
+ 		mutex_unlock(&fw_lock);
+-	} else if (retval > 0) {
++	} else if (timeout > 0) {
+ 		retval = 0;
+ 	}
+ 
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index cef6fa83a274..ea0c863861b9 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1436,7 +1436,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	blk_mq_start_request(bd->rq);
+ 
+ 	if (lo->lo_state != Lo_bound)
+-		return -EIO;
++		return BLK_MQ_RQ_QUEUE_ERROR;
+ 
+ 	if (cmd->rq->cmd_flags & REQ_WRITE) {
+ 		struct loop_device *lo = cmd->rq->q->queuedata;
+diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
+index 3111f2778079..849f2e29c243 100644
+--- a/drivers/char/tpm/xen-tpmfront.c
++++ b/drivers/char/tpm/xen-tpmfront.c
+@@ -305,7 +305,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
+ 	rv = setup_ring(dev, priv);
+ 	if (rv) {
+ 		chip = dev_get_drvdata(&dev->dev);
+-		tpm_chip_unregister(chip);
+ 		ring_free(priv);
+ 		return rv;
+ 	}
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index 29071a156cbe..c9202f6feda8 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -703,7 +703,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
+ 
+ 	/* Will read cryptlen */
+ 	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+-	aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
++	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
++			     FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
++	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+ 
+ 	/* Write ICV */
+ 	append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index fd5c5f3370f6..e53dbc90fcb6 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev)
+ 	ast_write32(ast, 0x10000, 0xfc600309);
+ 
+ 	do {
+-		;
++		if (pci_channel_offline(dev->pdev))
++			return -EIO;
+ 	} while (ast_read32(ast, 0x10000) != 0x01);
+ 	data = ast_read32(ast, 0x10004);
+ 
+@@ -429,7 +430,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
+ 	ast_detect_chip(dev, &need_post);
+ 
+ 	if (ast->chip != AST1180) {
+-		ast_get_dram_info(dev);
++		ret = ast_get_dram_info(dev);
++		if (ret)
++			goto out_free;
+ 		ast->vram_size = ast_get_vram_info(dev);
+ 		DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
+ 	}
+diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
+index 92e7e5795398..db98ab5cde3d 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.c
++++ b/drivers/gpu/drm/gma500/psb_drv.c
+@@ -484,6 +484,9 @@ static const struct file_operations psb_gem_fops = {
+ 	.open = drm_open,
+ 	.release = drm_release,
+ 	.unlocked_ioctl = psb_unlocked_ioctl,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl = drm_compat_ioctl,
++#endif
+ 	.mmap = drm_gem_mmap,
+ 	.poll = drm_poll,
+ 	.read = drm_read,
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 7d53d7e15455..c7a7cc17db32 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2006,21 +2006,19 @@ struct drm_i915_gem_object {
+ 	/** Record of address bit 17 of each page at last unbind. */
+ 	unsigned long *bit_17;
+ 
+-	union {
+-		/** for phy allocated objects */
+-		struct drm_dma_handle *phys_handle;
+-
+-		struct i915_gem_userptr {
+-			uintptr_t ptr;
+-			unsigned read_only :1;
+-			unsigned workers :4;
++	struct i915_gem_userptr {
++		uintptr_t ptr;
++		unsigned read_only :1;
++		unsigned workers :4;
+ #define I915_GEM_USERPTR_MAX_WORKERS 15
+ 
+-			struct i915_mm_struct *mm;
+-			struct i915_mmu_object *mmu_object;
+-			struct work_struct *work;
+-		} userptr;
+-	};
++		struct i915_mm_struct *mm;
++		struct i915_mmu_object *mmu_object;
++		struct work_struct *work;
++	} userptr;
++
++	/** for phys allocated objects */
++	struct drm_dma_handle *phys_handle;
+ };
+ #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
+ 
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 00bc49835e09..52c703be7882 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -1849,32 +1849,34 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
+ 				GEN9_MEM_LATENCY_LEVEL_MASK;
+ 
+ 		/*
++		 * If a level n (n > 1) has a 0us latency, all levels m (m >= n)
++		 * need to be disabled. We make sure to sanitize the values out
++		 * of the punit to satisfy this requirement.
++		 */
++		for (level = 1; level <= max_level; level++) {
++			if (wm[level] == 0) {
++				for (i = level + 1; i <= max_level; i++)
++					wm[i] = 0;
++				break;
++			}
++		}
++
++		/*
+ 		 * WaWmMemoryReadLatency:skl
+ 		 *
+ 		 * punit doesn't take into account the read latency so we need
+-		 * to add 2us to the various latency levels we retrieve from
+-		 * the punit.
+-		 *   - W0 is a bit special in that it's the only level that
+-		 *   can't be disabled if we want to have display working, so
+-		 *   we always add 2us there.
+-		 *   - For levels >=1, punit returns 0us latency when they are
+-		 *   disabled, so we respect that and don't add 2us then
+-		 *
+-		 * Additionally, if a level n (n > 1) has a 0us latency, all
+-		 * levels m (m >= n) need to be disabled. We make sure to
+-		 * sanitize the values out of the punit to satisfy this
+-		 * requirement.
++		 * to add 2us to the various latency levels we retrieve from the
++		 * punit when level 0 response data us 0us.
+ 		 */
+-		wm[0] += 2;
+-		for (level = 1; level <= max_level; level++)
+-			if (wm[level] != 0)
++		if (wm[0] == 0) {
++			wm[0] += 2;
++			for (level = 1; level <= max_level; level++) {
++				if (wm[level] == 0)
++					break;
+ 				wm[level] += 2;
+-			else {
+-				for (i = level + 1; i <= max_level; i++)
+-					wm[i] = 0;
+-
+-				break;
+ 			}
++		}
++
+ 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ 		uint64_t sskpd = I915_READ64(MCH_SSKPD);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
+index fa661744a1f5..cba23008eca4 100644
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -90,6 +90,9 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct radeon_device *rdev = crtc->dev->dev_private;
+ 
++	if (radeon_crtc->cursor_out_of_bounds)
++		return;
++
+ 	if (ASIC_IS_DCE4(rdev)) {
+ 		WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+ 		WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
+@@ -124,21 +127,25 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
+ 	int xorigin = 0, yorigin = 0;
+ 	int w = radeon_crtc->cursor_width;
+ 
++	radeon_crtc->cursor_x = x;
++	radeon_crtc->cursor_y = y;
++
+ 	if (ASIC_IS_AVIVO(rdev)) {
+ 		/* avivo cursor are offset into the total surface */
+ 		x += crtc->x;
+ 		y += crtc->y;
+ 	}
+-	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+ 
+-	if (x < 0) {
++	if (x < 0)
+ 		xorigin = min(-x, radeon_crtc->max_cursor_width - 1);
+-		x = 0;
+-	}
+-	if (y < 0) {
++	if (y < 0)
+ 		yorigin = min(-y, radeon_crtc->max_cursor_height - 1);
+-		y = 0;
++
++	if (!ASIC_IS_AVIVO(rdev)) {
++		x += crtc->x;
++		y += crtc->y;
+ 	}
++	DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+ 
+ 	/* fixed on DCE6 and newer */
+ 	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
+@@ -161,27 +168,31 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
+ 		if (i > 1) {
+ 			int cursor_end, frame_end;
+ 
+-			cursor_end = x - xorigin + w;
++			cursor_end = x + w;
+ 			frame_end = crtc->x + crtc->mode.crtc_hdisplay;
+ 			if (cursor_end >= frame_end) {
+ 				w = w - (cursor_end - frame_end);
+ 				if (!(frame_end & 0x7f))
+ 					w--;
+-			} else {
+-				if (!(cursor_end & 0x7f))
+-					w--;
++			} else if (cursor_end <= 0) {
++				goto out_of_bounds;
++			} else if (!(cursor_end & 0x7f)) {
++				w--;
+ 			}
+ 			if (w <= 0) {
+-				w = 1;
+-				cursor_end = x - xorigin + w;
+-				if (!(cursor_end & 0x7f)) {
+-					x--;
+-					WARN_ON_ONCE(x < 0);
+-				}
++				goto out_of_bounds;
+ 			}
+ 		}
+ 	}
+ 
++	if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
++	    x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
++	    y >= (crtc->y + crtc->mode.crtc_vdisplay))
++		goto out_of_bounds;
++
++	x += xorigin;
++	y += yorigin;
++
+ 	if (ASIC_IS_DCE4(rdev)) {
+ 		WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
+ 		WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+@@ -193,6 +204,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
+ 		WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
+ 		       ((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ 	} else {
++		x -= crtc->x;
++		y -= crtc->y;
++
+ 		if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
+ 			y *= 2;
+ 
+@@ -210,10 +224,20 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
+ 		       yorigin * 256);
+ 	}
+ 
+-	radeon_crtc->cursor_x = x;
+-	radeon_crtc->cursor_y = y;
++	if (radeon_crtc->cursor_out_of_bounds) {
++		radeon_crtc->cursor_out_of_bounds = false;
++		if (radeon_crtc->cursor_bo)
++			radeon_show_cursor(crtc);
++	}
+ 
+ 	return 0;
++
++ out_of_bounds:
++	if (!radeon_crtc->cursor_out_of_bounds) {
++		radeon_hide_cursor(crtc);
++		radeon_crtc->cursor_out_of_bounds = true;
++	}
++	return 0;
+ }
+ 
+ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+@@ -306,22 +330,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+ 		return ret;
+ 	}
+ 
+-	radeon_crtc->cursor_width = width;
+-	radeon_crtc->cursor_height = height;
+-
+ 	radeon_lock_cursor(crtc, true);
+ 
+-	if (hot_x != radeon_crtc->cursor_hot_x ||
++	if (width != radeon_crtc->cursor_width ||
++	    height != radeon_crtc->cursor_height ||
++	    hot_x != radeon_crtc->cursor_hot_x ||
+ 	    hot_y != radeon_crtc->cursor_hot_y) {
+ 		int x, y;
+ 
+ 		x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
+ 		y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
+ 
+-		radeon_cursor_move_locked(crtc, x, y);
+-
++		radeon_crtc->cursor_width = width;
++		radeon_crtc->cursor_height = height;
+ 		radeon_crtc->cursor_hot_x = hot_x;
+ 		radeon_crtc->cursor_hot_y = hot_y;
++
++		radeon_cursor_move_locked(crtc, x, y);
+ 	}
+ 
+ 	radeon_set_cursor(crtc);
+diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
+index 43ba333949c7..3974e03bd82f 100644
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -330,6 +330,7 @@ struct radeon_crtc {
+ 	u16 lut_r[256], lut_g[256], lut_b[256];
+ 	bool enabled;
+ 	bool can_tile;
++	bool cursor_out_of_bounds;
+ 	uint32_t crtc_offset;
+ 	struct drm_gem_object *cursor_bo;
+ 	uint64_t cursor_addr;
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index a808ba001ee7..a717da729fb8 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2975,6 +2975,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 		    (rdev->pdev->revision == 0x80) ||
+ 		    (rdev->pdev->revision == 0x81) ||
+ 		    (rdev->pdev->revision == 0x83) ||
++		    (rdev->pdev->revision == 0x87) ||
+ 		    (rdev->pdev->device == 0x6604) ||
+ 		    (rdev->pdev->device == 0x6605)) {
+ 			max_sclk = 75000;
+diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
+index fa17b552ff78..aab811d4c0e1 100644
+--- a/drivers/infiniband/core/multicast.c
++++ b/drivers/infiniband/core/multicast.c
+@@ -526,8 +526,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
+ 		process_join_error(group, status);
+ 	else {
+ 		int mgids_changed, is_mgid0;
+-		ib_find_pkey(group->port->dev->device, group->port->port_num,
+-			     be16_to_cpu(rec->pkey), &pkey_index);
++
++		if (ib_find_pkey(group->port->dev->device,
++				 group->port->port_num, be16_to_cpu(rec->pkey),
++				 &pkey_index))
++			pkey_index = MCAST_INVALID_PKEY_INDEX;
+ 
+ 		spin_lock_irq(&group->port->lock);
+ 		if (group->state == MCAST_BUSY &&
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+index 0d23e0568deb..b9705395b0aa 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+@@ -522,8 +522,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
+ 	if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ 		return;
+ 
+-	if (ib_query_port(priv->ca, priv->port, &port_attr) ||
+-	    port_attr.state != IB_PORT_ACTIVE) {
++	if (ib_query_port(priv->ca, priv->port, &port_attr)) {
++		ipoib_dbg(priv, "ib_query_port() failed\n");
++		return;
++	}
++	if (port_attr.state != IB_PORT_ACTIVE) {
+ 		ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
+ 			  port_attr.state);
+ 		return;
+diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
+index 599578042ea0..009f75d25268 100644
+--- a/drivers/input/misc/drv260x.c
++++ b/drivers/input/misc/drv260x.c
+@@ -597,7 +597,6 @@ static int drv260x_probe(struct i2c_client *client,
+ 	}
+ 
+ 	haptics->input_dev->name = "drv260x:haptics";
+-	haptics->input_dev->dev.parent = client->dev.parent;
+ 	haptics->input_dev->close = drv260x_close;
+ 	input_set_drvdata(haptics->input_dev, haptics);
+ 	input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index ce507a405d05..ab16f33b743b 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1482,12 +1482,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
+ 	if (!cc->key_size && strcmp(key, "-"))
+ 		goto out;
+ 
++	/* clear the flag since following operations may invalidate previously valid key */
++	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
++
+ 	if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
+ 		goto out;
+ 
+-	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+-
+ 	r = crypt_setkey_allcpus(cc);
++	if (!r)
++		set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
+ 
+ out:
+ 	/* Hex key string not needed after here, so wipe it. */
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index 53091295fce9..4ca451e679a3 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -766,17 +766,15 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ 	memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
+ 
+ 	r = sm_ll_new_metadata(&smm->ll, tm);
++	if (!r) {
++		if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
++			nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
++		r = sm_ll_extend(&smm->ll, nr_blocks);
++	}
++	memcpy(&smm->sm, &ops, sizeof(smm->sm));
+ 	if (r)
+ 		return r;
+ 
+-	if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+-		nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
+-	r = sm_ll_extend(&smm->ll, nr_blocks);
+-	if (r)
+-		return r;
+-
+-	memcpy(&smm->sm, &ops, sizeof(smm->sm));
+-
+ 	/*
+ 	 * Now we need to update the newly created data structures with the
+ 	 * allocated blocks that they were built from.
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index ef0a99a3a779..e6d689c0a175 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -6918,6 +6918,15 @@ static int run(struct mddev *mddev)
+ 			stripe = (stripe | (stripe-1)) + 1;
+ 		mddev->queue->limits.discard_alignment = stripe;
+ 		mddev->queue->limits.discard_granularity = stripe;
++
++		/*
++		 * We use 16-bit counter of active stripes in bi_phys_segments
++		 * (minus one for over-loaded initialization)
++		 */
++		blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
++		blk_queue_max_discard_sectors(mddev->queue,
++					      0xfffe * STRIPE_SECTORS);
++
+ 		/*
+ 		 * unaligned part of discard request will be ignored, so can't
+ 		 * guarantee discard_zeroes_data
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 5eb23ae82def..fdc44c8200ba 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2055,7 +2055,27 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ 			ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
+ 			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ 
++			sdhci_do_reset(host, SDHCI_RESET_CMD);
++			sdhci_do_reset(host, SDHCI_RESET_DATA);
++
+ 			err = -EIO;
++
++			if (cmd.opcode != MMC_SEND_TUNING_BLOCK_HS200)
++				goto out;
++
++			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
++			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
++
++			spin_unlock_irqrestore(&host->lock, flags);
++
++			memset(&cmd, 0, sizeof(cmd));
++			cmd.opcode = MMC_STOP_TRANSMISSION;
++			cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
++			cmd.busy_timeout = 50;
++			mmc_wait_for_cmd(mmc, &cmd, 0);
++
++			spin_lock_irqsave(&host->lock, flags);
++
+ 			goto out;
+ 		}
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
+index 7cdaf40c3057..ea7b8c25955f 100644
+--- a/drivers/net/wireless/ath/ath9k/pci.c
++++ b/drivers/net/wireless/ath/ath9k/pci.c
+@@ -27,7 +27,6 @@ static const struct pci_device_id ath_pci_id_table[] = {
+ 	{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI   */
+ 	{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
+ 	{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI   */
+-	{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI   */
+ 
+ #ifdef CONFIG_ATH9K_PCOEM
+ 	/* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */
+@@ -38,7 +37,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
+ 	  .driver_data = ATH9K_PCI_LED_ACT_HI },
+ #endif
+ 
+-	{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
++	{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI   */
+ 
+ #ifdef CONFIG_ATH9K_PCOEM
+ 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+@@ -86,7 +85,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
+ 			 0x10CF, /* Fujitsu */
+ 			 0x1536),
+ 	  .driver_data = ATH9K_PCI_D3_L1_WAR },
++#endif
+ 
++	{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
++
++#ifdef CONFIG_ATH9K_PCOEM
+ 	/* AR9285 card for Asus */
+ 	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 			 0x002B,
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 4e720ed402ef..66c12c8f968b 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1993,6 +1993,10 @@ bool pci_dev_run_wake(struct pci_dev *dev)
+ 	if (!dev->pme_support)
+ 		return false;
+ 
++	/* PME-capable in principle, but not from the intended sleep state */
++	if (!pci_pme_capable(dev, pci_target_state(dev)))
++		return false;
++
+ 	while (bus->parent) {
+ 		struct pci_dev *bridge = bus->self;
+ 
+diff --git a/drivers/regulator/stw481x-vmmc.c b/drivers/regulator/stw481x-vmmc.c
+index 7d2ae3e9e942..342f5da79975 100644
+--- a/drivers/regulator/stw481x-vmmc.c
++++ b/drivers/regulator/stw481x-vmmc.c
+@@ -47,7 +47,8 @@ static struct regulator_desc vmmc_regulator = {
+ 	.volt_table = stw481x_vmmc_voltages,
+ 	.enable_time = 200, /* FIXME: look this up */
+ 	.enable_reg = STW_CONF1,
+-	.enable_mask = STW_CONF1_PDN_VMMC,
++	.enable_mask = STW_CONF1_PDN_VMMC | STW_CONF1_MMC_LS_STATUS,
++	.enable_val = STW_CONF1_PDN_VMMC,
+ 	.vsel_reg = STW_CONF1,
+ 	.vsel_mask = STW_CONF1_VMMC_MASK,
+ };
+diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
+index 9bb48d70957c..4d20f7298cb8 100644
+--- a/drivers/s390/char/vmlogrdr.c
++++ b/drivers/s390/char/vmlogrdr.c
+@@ -872,7 +872,7 @@ static int __init vmlogrdr_init(void)
+ 		goto cleanup;
+ 
+ 	for (i=0; i < MAXMINOR; ++i ) {
+-		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
++		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ 		if (!sys_ser[i].buffer) {
+ 			rc = -ENOMEM;
+ 			break;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+index 5a0800d19970..bd6f71b97710 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
+@@ -2490,6 +2490,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
+ 		printk("megaraid_sas: pending commands remain after waiting, "
+ 		       "will reset adapter scsi%d.\n",
+ 		       instance->host->host_no);
++		*convert = 1;
+ 		retval = 1;
+ 	}
+ out:
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 9ad41168d26d..72699ac0a0c4 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -1005,10 +1005,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
+ 	struct request_queue *rq = sdev->request_queue;
+ 	struct scsi_target *starget = sdev->sdev_target;
+ 
+-	error = scsi_device_set_state(sdev, SDEV_RUNNING);
+-	if (error)
+-		return error;
+-
+ 	error = scsi_target_add(starget);
+ 	if (error)
+ 		return error;
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index ec192939750e..26bc4e9c7441 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -592,6 +592,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+ 	sg_io_hdr_t *hp;
+ 	unsigned char cmnd[SG_MAX_CDB_SIZE];
+ 
++	if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
++		return -EINVAL;
++
+ 	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ 		return -ENXIO;
+ 	SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
+diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
+index 0f28c08fcb3c..77b551da5728 100644
+--- a/drivers/ssb/pci.c
++++ b/drivers/ssb/pci.c
+@@ -909,6 +909,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
+ 			if (err) {
+ 				ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
+ 					 err);
++				goto out_free;
+ 			} else {
+ 				ssb_dbg("Using SPROM revision %d provided by platform\n",
+ 					sprom->revision);
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index 4b8da862cd7e..f7bcefd46b5e 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -2079,7 +2079,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
+ 			   unsigned int *data)
+ {
+ 	struct ni_private *devpriv = dev->private;
+-	unsigned int mask = (s->maxdata + 1) >> 1;
++	unsigned int mask = s->maxdata;
+ 	int i, n;
+ 	unsigned signbits;
+ 	unsigned int d;
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 07d2996d8c1f..39e8f22be68b 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -605,8 +605,6 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
+ 	target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
+ 	cmd->se_cmd = NULL;
+ 
+-	kmem_cache_free(tcmu_cmd_cache, cmd);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
+index 1967bee4f076..9035fbc5e98d 100644
+--- a/drivers/thermal/thermal_hwmon.c
++++ b/drivers/thermal/thermal_hwmon.c
+@@ -98,7 +98,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf)
+ 	long temperature;
+ 	int ret;
+ 
+-	ret = tz->ops->get_trip_temp(tz, 0, &temperature);
++	ret = tz->ops->get_crit_temp(tz, &temperature);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 41dcefe67b43..84532dc93801 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1720,6 +1720,7 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
+ 	.driver_info = QUIRK_CONTROL_LINE_STATE, },
+ 	{ USB_DEVICE(0x2184, 0x001c) },	/* GW Instek AFG-2225 */
++	{ USB_DEVICE(0x2184, 0x0036) },	/* GW Instek AFG-125 */
+ 	{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
+ 	},
+ 	/* Motorola H24 HSPA module: */
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index c98f78b0bf11..7602eceb5403 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -101,6 +101,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
+ 
+ static void hub_release(struct kref *kref);
+ static int usb_reset_and_verify_device(struct usb_device *udev);
++static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
++					  struct usb_port *port_dev);
+ 
+ static inline char *portspeed(struct usb_hub *hub, int portstatus)
+ {
+@@ -882,82 +884,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
+ }
+ 
+ /*
+- * If USB 3.0 ports are placed into the Disabled state, they will no longer
+- * detect any device connects or disconnects.  This is generally not what the
+- * USB core wants, since it expects a disabled port to produce a port status
+- * change event when a new device connects.
+- *
+- * Instead, set the link state to Disabled, wait for the link to settle into
+- * that state, clear any change bits, and then put the port into the RxDetect
+- * state.
++ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating
++ * a connection with a plugged-in cable but will signal the host when the cable
++ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
+  */
+-static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
+-{
+-	int ret;
+-	int total_time;
+-	u16 portchange, portstatus;
+-
+-	if (!hub_is_superspeed(hub->hdev))
+-		return -EINVAL;
+-
+-	ret = hub_port_status(hub, port1, &portstatus, &portchange);
+-	if (ret < 0)
+-		return ret;
+-
+-	/*
+-	 * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
+-	 * Controller [1022:7814] will have spurious result making the following
+-	 * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
+-	 * as high-speed device if we set the usb 3.0 port link state to
+-	 * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
+-	 * check the state here to avoid the bug.
+-	 */
+-	if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+-				USB_SS_PORT_LS_RX_DETECT) {
+-		dev_dbg(&hub->ports[port1 - 1]->dev,
+-			 "Not disabling port; link state is RxDetect\n");
+-		return ret;
+-	}
+-
+-	ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
+-	if (ret)
+-		return ret;
+-
+-	/* Wait for the link to enter the disabled state. */
+-	for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
+-		ret = hub_port_status(hub, port1, &portstatus, &portchange);
+-		if (ret < 0)
+-			return ret;
+-
+-		if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+-				USB_SS_PORT_LS_SS_DISABLED)
+-			break;
+-		if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+-			break;
+-		msleep(HUB_DEBOUNCE_STEP);
+-	}
+-	if (total_time >= HUB_DEBOUNCE_TIMEOUT)
+-		dev_warn(&hub->ports[port1 - 1]->dev,
+-				"Could not disable after %d ms\n", total_time);
+-
+-	return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
+-}
+-
+ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
+ {
+ 	struct usb_port *port_dev = hub->ports[port1 - 1];
+ 	struct usb_device *hdev = hub->hdev;
+ 	int ret = 0;
+ 
+-	if (port_dev->child && set_state)
+-		usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
+ 	if (!hub->error) {
+-		if (hub_is_superspeed(hub->hdev))
+-			ret = hub_usb3_port_disable(hub, port1);
+-		else
++		if (hub_is_superspeed(hub->hdev)) {
++			hub_usb3_port_prepare_disable(hub, port_dev);
++			ret = hub_set_port_link_state(hub, port_dev->portnum,
++						      USB_SS_PORT_LS_U3);
++		} else {
+ 			ret = usb_clear_port_feature(hdev, port1,
+ 					USB_PORT_FEAT_ENABLE);
++		}
+ 	}
++	if (port_dev->child && set_state)
++		usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
+ 	if (ret && ret != -ENODEV)
+ 		dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
+ 	return ret;
+@@ -4036,6 +3984,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev)
+ }
+ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
+ 
++/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
++static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
++					  struct usb_port *port_dev)
++{
++	struct usb_device *udev = port_dev->child;
++	int ret;
++
++	if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
++		ret = hub_set_port_link_state(hub, port_dev->portnum,
++					      USB_SS_PORT_LS_U0);
++		if (!ret) {
++			msleep(USB_RESUME_TIMEOUT);
++			ret = usb_disable_remote_wakeup(udev);
++		}
++		if (ret)
++			dev_warn(&udev->dev,
++				 "Port disable: can't disable remote wake\n");
++		udev->do_remote_wakeup = 0;
++	}
++}
+ 
+ #else	/* CONFIG_PM */
+ 
+@@ -4043,6 +4011,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
+ #define hub_resume		NULL
+ #define hub_reset_resume	NULL
+ 
++static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
++						 struct usb_port *port_dev) { }
++
+ int usb_disable_lpm(struct usb_device *udev)
+ {
+ 	return 0;
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 58b4657fc721..9cd76cc8c0d9 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -143,7 +143,7 @@ int config_ep_by_speed(struct usb_gadget *g,
+ 
+ ep_found:
+ 	/* commit results */
+-	_ep->maxpacket = usb_endpoint_maxp(chosen_desc);
++	_ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
+ 	_ep->desc = chosen_desc;
+ 	_ep->comp_desc = NULL;
+ 	_ep->maxburst = 0;
+diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c
+index 940304c33224..02260cfdedb1 100644
+--- a/drivers/usb/host/uhci-pci.c
++++ b/drivers/usb/host/uhci-pci.c
+@@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
+ 	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
+ 		uhci->wait_for_hp = 1;
+ 
++	/* Intel controllers use non-PME wakeup signalling */
++	if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
++		device_set_run_wake(uhci_dev(uhci), 1);
++
+ 	/* Set up pointers to PCI-specific functions */
+ 	uhci->reset_hc = uhci_pci_reset_hc;
+ 	uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
+diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
+index e020ad28a00c..53c90131764d 100644
+--- a/drivers/usb/serial/kl5kusb105.c
++++ b/drivers/usb/serial/kl5kusb105.c
+@@ -296,7 +296,7 @@ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 	rc = usb_serial_generic_open(tty, port);
+ 	if (rc) {
+ 		retval = rc;
+-		goto exit;
++		goto err_free_cfg;
+ 	}
+ 
+ 	rc = usb_control_msg(port->serial->dev,
+@@ -315,17 +315,32 @@ static int  klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 		dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
+ 
+ 	rc = klsi_105_get_line_state(port, &line_state);
+-	if (rc >= 0) {
+-		spin_lock_irqsave(&priv->lock, flags);
+-		priv->line_state = line_state;
+-		spin_unlock_irqrestore(&priv->lock, flags);
+-		dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
+-		retval = 0;
+-	} else
++	if (rc < 0) {
+ 		retval = rc;
++		goto err_disable_read;
++	}
++
++	spin_lock_irqsave(&priv->lock, flags);
++	priv->line_state = line_state;
++	spin_unlock_irqrestore(&priv->lock, flags);
++	dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
++			line_state);
++
++	return 0;
+ 
+-exit:
++err_disable_read:
++	usb_control_msg(port->serial->dev,
++			     usb_sndctrlpipe(port->serial->dev, 0),
++			     KL5KUSB105A_SIO_CONFIGURE,
++			     USB_TYPE_VENDOR | USB_DIR_OUT,
++			     KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
++			     0, /* index */
++			     NULL, 0,
++			     KLSI_TIMEOUT);
++	usb_serial_generic_close(port);
++err_free_cfg:
+ 	kfree(cfg);
++
+ 	return retval;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index a599e8a841b0..248dac170f39 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -271,6 +271,8 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_SINGLE		0x1006
+ #define TELIT_PRODUCT_DE910_DUAL		0x1010
+ #define TELIT_PRODUCT_UE910_V2			0x1012
++#define TELIT_PRODUCT_LE922_USBCFG1		0x1040
++#define TELIT_PRODUCT_LE922_USBCFG2		0x1041
+ #define TELIT_PRODUCT_LE922_USBCFG0		0x1042
+ #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
+ #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+@@ -1222,6 +1224,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
++		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
+@@ -2001,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) },			/* D-Link DWM-158 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index ee71baddbb10..e227eb09b1a0 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 
+ 	vma->vm_ops = &gntdev_vmops;
+ 
+-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
++	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
+ 
+ 	if (use_ptemod)
+ 		vma->vm_flags |= VM_DONTCOPY;
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index ccfd31f1df3a..300566ea9b74 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -729,7 +729,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
+ 		return true;	 /* already a holder */
+ 	else if (bdev->bd_holder != NULL)
+ 		return false; 	 /* held by someone else */
+-	else if (bdev->bd_contains == bdev)
++	else if (whole == bdev)
+ 		return true;  	 /* is a whole device which isn't held */
+ 
+ 	else if (whole->bd_holder == bd_may_claim)
+@@ -1772,6 +1772,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
+ 	spin_lock(&inode_sb_list_lock);
+ 	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
+ 		struct address_space *mapping = inode->i_mapping;
++		struct block_device *bdev;
+ 
+ 		spin_lock(&inode->i_lock);
+ 		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
+@@ -1792,8 +1793,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
+ 		 */
+ 		iput(old_inode);
+ 		old_inode = inode;
++		bdev = I_BDEV(inode);
+ 
+-		func(I_BDEV(inode), arg);
++		mutex_lock(&bdev->bd_mutex);
++		if (bdev->bd_openers)
++			func(bdev, arg);
++		mutex_unlock(&bdev->bd_mutex);
+ 
+ 		spin_lock(&inode_sb_list_lock);
+ 	}
+diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
+index 1848705506ff..0ce4de6430ef 100644
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -64,6 +64,20 @@ void btrfs_##name(struct work_struct *arg)				\
+ 	normal_work_helper(work);					\
+ }
+ 
++bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
++{
++	/*
++	 * We could compare wq->normal->pending with num_online_cpus()
++	 * to support "thresh == NO_THRESHOLD" case, but it requires
++	 * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
++	 * postpone it until someone needs the support of that case.
++	 */
++	if (wq->normal->thresh == NO_THRESHOLD)
++		return false;
++
++	return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
++}
++
+ BTRFS_WORK_HELPER(worker_helper);
+ BTRFS_WORK_HELPER(delalloc_helper);
+ BTRFS_WORK_HELPER(flush_delalloc_helper);
+diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
+index ec2ee477f8ba..8c4564204f25 100644
+--- a/fs/btrfs/async-thread.h
++++ b/fs/btrfs/async-thread.h
+@@ -78,4 +78,5 @@ void btrfs_queue_work(struct btrfs_workqueue *wq,
+ void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
+ void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
+ void btrfs_set_work_high_priority(struct btrfs_work *work);
++bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq);
+ #endif
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index bc2d048a9eb9..8265b0754dca 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1371,7 +1371,8 @@ release_path:
+ 	total_done++;
+ 
+ 	btrfs_release_prepared_delayed_node(delayed_node);
+-	if (async_work->nr == 0 || total_done < async_work->nr)
++	if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
++	    total_done < async_work->nr)
+ 		goto again;
+ 
+ free_path:
+@@ -1387,7 +1388,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
+ {
+ 	struct btrfs_async_delayed_work *async_work;
+ 
+-	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
++	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
++	    btrfs_workqueue_normal_congested(fs_info->delayed_workers))
+ 		return 0;
+ 
+ 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 32ecb95f6214..6ee954c62fe6 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1821,12 +1821,11 @@ static noinline int find_dir_range(struct btrfs_root *root,
+ next:
+ 	/* check the next slot in the tree to see if it is a valid item */
+ 	nritems = btrfs_header_nritems(path->nodes[0]);
++	path->slots[0]++;
+ 	if (path->slots[0] >= nritems) {
+ 		ret = btrfs_next_leaf(root, path);
+ 		if (ret)
+ 			goto out;
+-	} else {
+-		path->slots[0]++;
+ 	}
+ 
+ 	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+@@ -4984,6 +4983,7 @@ process_leaf:
+ 			if (di_key.type == BTRFS_ROOT_ITEM_KEY)
+ 				continue;
+ 
++			btrfs_release_path(path);
+ 			di_inode = btrfs_iget(root->fs_info->sb, &di_key,
+ 					      root, NULL);
+ 			if (IS_ERR(di_inode)) {
+@@ -4993,13 +4993,12 @@ process_leaf:
+ 
+ 			if (btrfs_inode_in_log(di_inode, trans->transid)) {
+ 				iput(di_inode);
+-				continue;
++				break;
+ 			}
+ 
+ 			ctx->log_new_dentries = false;
+ 			if (type == BTRFS_FT_DIR)
+ 				log_mode = LOG_INODE_ALL;
+-			btrfs_release_path(path);
+ 			ret = btrfs_log_inode(trans, root, di_inode,
+ 					      log_mode, 0, LLONG_MAX, ctx);
+ 			iput(di_inode);
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 22b289a3b1c4..9d0e4fef8ee1 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -615,6 +615,8 @@ struct TCP_Server_Info {
+ #ifdef CONFIG_CIFS_SMB2
+ 	unsigned int	max_read;
+ 	unsigned int	max_write;
++	struct delayed_work reconnect; /* reconnect workqueue job */
++	struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
+ #endif /* CONFIG_CIFS_SMB2 */
+ };
+ 
+@@ -814,6 +816,7 @@ cap_unix(struct cifs_ses *ses)
+ struct cifs_tcon {
+ 	struct list_head tcon_list;
+ 	int tc_count;
++	struct list_head rlist; /* reconnect list */
+ 	struct list_head openFileList;
+ 	struct cifs_ses *ses;	/* pointer to session associated with */
+ 	char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index f730c065df34..5ee60b50d8eb 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -207,6 +207,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
+ 					 struct tcon_link *tlink,
+ 					 struct cifs_pending_open *open);
+ extern void cifs_del_pending_open(struct cifs_pending_open *open);
++extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
++				 int from_reconnect);
++extern void cifs_put_tcon(struct cifs_tcon *tcon);
+ 
+ #if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
+ extern void cifs_dfs_release_automount_timer(void);
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index cd9d50e4f5f4..7d7bd466520b 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -52,6 +52,9 @@
+ #include "nterr.h"
+ #include "rfc1002pdu.h"
+ #include "fscache.h"
++#ifdef CONFIG_CIFS_SMB2
++#include "smb2proto.h"
++#endif
+ 
+ #define CIFS_PORT 445
+ #define RFC1001_PORT 139
+@@ -2069,8 +2072,8 @@ cifs_find_tcp_session(struct smb_vol *vol)
+ 	return NULL;
+ }
+ 
+-static void
+-cifs_put_tcp_session(struct TCP_Server_Info *server)
++void
++cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
+ {
+ 	struct task_struct *task;
+ 
+@@ -2087,6 +2090,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
+ 
+ 	cancel_delayed_work_sync(&server->echo);
+ 
++#ifdef CONFIG_CIFS_SMB2
++	if (from_reconnect)
++		/*
++		 * Avoid deadlock here: reconnect work calls
++		 * cifs_put_tcp_session() at its end. Need to be sure
++		 * that reconnect work does nothing with server pointer after
++		 * that step.
++		 */
++		cancel_delayed_work(&server->reconnect);
++	else
++		cancel_delayed_work_sync(&server->reconnect);
++#endif
++
+ 	spin_lock(&GlobalMid_Lock);
+ 	server->tcpStatus = CifsExiting;
+ 	spin_unlock(&GlobalMid_Lock);
+@@ -2151,6 +2167,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
+ 	INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
+ 	INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
+ 	INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
++#ifdef CONFIG_CIFS_SMB2
++	INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
++	mutex_init(&tcp_ses->reconnect_mutex);
++#endif
+ 	memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
+ 	       sizeof(tcp_ses->srcaddr));
+ 	memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
+@@ -2303,7 +2323,7 @@ cifs_put_smb_ses(struct cifs_ses *ses)
+ 	spin_unlock(&cifs_tcp_ses_lock);
+ 
+ 	sesInfoFree(ses);
+-	cifs_put_tcp_session(server);
++	cifs_put_tcp_session(server, 0);
+ }
+ 
+ #ifdef CONFIG_KEYS
+@@ -2476,7 +2496,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
+ 		mutex_unlock(&ses->session_mutex);
+ 
+ 		/* existing SMB ses has a server reference already */
+-		cifs_put_tcp_session(server);
++		cifs_put_tcp_session(server, 0);
+ 		free_xid(xid);
+ 		return ses;
+ 	}
+@@ -2566,7 +2586,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc)
+ 	return NULL;
+ }
+ 
+-static void
++void
+ cifs_put_tcon(struct cifs_tcon *tcon)
+ {
+ 	unsigned int xid;
+@@ -3673,7 +3693,7 @@ mount_fail_check:
+ 		else if (ses)
+ 			cifs_put_smb_ses(ses);
+ 		else
+-			cifs_put_tcp_session(server);
++			cifs_put_tcp_session(server, 0);
+ 		bdi_destroy(&cifs_sb->bdi);
+ 	}
+ 
+@@ -3984,7 +4004,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
+ 	ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
+ 	if (IS_ERR(ses)) {
+ 		tcon = (struct cifs_tcon *)ses;
+-		cifs_put_tcp_session(master_tcon->ses->server);
++		cifs_put_tcp_session(master_tcon->ses->server, 0);
+ 		goto out;
+ 	}
+ 
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index 2ab297dae5a7..1bdfd7c5309c 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -241,7 +241,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
+ 	 * and check it for zero before using.
+ 	 */
+ 	max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf;
+-	if (!max_buf) {
++	if (max_buf < sizeof(struct smb2_lock_element)) {
+ 		free_xid(xid);
+ 		return -EINVAL;
+ 	}
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 8f527c867f78..e3cf4a5fb35a 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -264,7 +264,7 @@ out:
+ 	case SMB2_CHANGE_NOTIFY:
+ 	case SMB2_QUERY_INFO:
+ 	case SMB2_SET_INFO:
+-		return -EAGAIN;
++		rc = -EAGAIN;
+ 	}
+ 	unload_nls(nls_codepage);
+ 	return rc;
+@@ -1615,6 +1615,54 @@ smb2_echo_callback(struct mid_q_entry *mid)
+ 	add_credits(server, credits_received, CIFS_ECHO_OP);
+ }
+ 
++void smb2_reconnect_server(struct work_struct *work)
++{
++	struct TCP_Server_Info *server = container_of(work,
++					struct TCP_Server_Info, reconnect.work);
++	struct cifs_ses *ses;
++	struct cifs_tcon *tcon, *tcon2;
++	struct list_head tmp_list;
++	int tcon_exist = false;
++
++	/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
++	mutex_lock(&server->reconnect_mutex);
++
++	INIT_LIST_HEAD(&tmp_list);
++	cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
++
++	spin_lock(&cifs_tcp_ses_lock);
++	list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++		list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
++			if (tcon->need_reconnect) {
++				tcon->tc_count++;
++				list_add_tail(&tcon->rlist, &tmp_list);
++				tcon_exist = true;
++			}
++		}
++	}
++	/*
++	 * Get the reference to server struct to be sure that the last call of
++	 * cifs_put_tcon() in the loop below won't release the server pointer.
++	 */
++	if (tcon_exist)
++		server->srv_count++;
++
++	spin_unlock(&cifs_tcp_ses_lock);
++
++	list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
++		smb2_reconnect(SMB2_ECHO, tcon);
++		list_del_init(&tcon->rlist);
++		cifs_put_tcon(tcon);
++	}
++
++	cifs_dbg(FYI, "Reconnecting tcons finished\n");
++	mutex_unlock(&server->reconnect_mutex);
++
++	/* now we can safely release srv struct */
++	if (tcon_exist)
++		cifs_put_tcp_session(server, 1);
++}
++
+ int
+ SMB2_echo(struct TCP_Server_Info *server)
+ {
+@@ -1627,32 +1675,11 @@ SMB2_echo(struct TCP_Server_Info *server)
+ 	cifs_dbg(FYI, "In echo request\n");
+ 
+ 	if (server->tcpStatus == CifsNeedNegotiate) {
+-		struct list_head *tmp, *tmp2;
+-		struct cifs_ses *ses;
+-		struct cifs_tcon *tcon;
+-
+-		cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
+-		spin_lock(&cifs_tcp_ses_lock);
+-		list_for_each(tmp, &server->smb_ses_list) {
+-			ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+-			list_for_each(tmp2, &ses->tcon_list) {
+-				tcon = list_entry(tmp2, struct cifs_tcon,
+-						  tcon_list);
+-				/* add check for persistent handle reconnect */
+-				if (tcon && tcon->need_reconnect) {
+-					spin_unlock(&cifs_tcp_ses_lock);
+-					rc = smb2_reconnect(SMB2_ECHO, tcon);
+-					spin_lock(&cifs_tcp_ses_lock);
+-				}
+-			}
+-		}
+-		spin_unlock(&cifs_tcp_ses_lock);
++		/* No need to send echo on newly established connections */
++		queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
++		return rc;
+ 	}
+ 
+-	/* if no session, renegotiate failed above */
+-	if (server->tcpStatus == CifsNeedNegotiate)
+-		return -EIO;
+-
+ 	rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
+ 	if (rc)
+ 		return rc;
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index 9bc59f9c12fb..0a406ae78129 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -95,6 +95,7 @@ extern int smb2_open_file(const unsigned int xid,
+ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
+ 			     struct file_lock *flock, const unsigned int xid);
+ extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
++extern void smb2_reconnect_server(struct work_struct *work);
+ 
+ /*
+  * SMB2 Worker functions - most of protocol specific implementation details
+diff --git a/fs/exec.c b/fs/exec.c
+index 1977c2a553ac..d392c8ad0de0 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -19,7 +19,7 @@
+  * current->executable is only used by the procfs.  This allows a dispatch
+  * table to check for several different types  of binary formats.  We keep
+  * trying until we recognize the file or we run out of supported binary
+- * formats. 
++ * formats.
+  */
+ 
+ #include <linux/slab.h>
+@@ -1108,6 +1108,13 @@ int flush_old_exec(struct linux_binprm * bprm)
+ 	flush_thread();
+ 	current->personality &= ~bprm->per_clear;
+ 
++	/*
++	 * We have to apply CLOEXEC before we change whether the process is
++	 * dumpable (in setup_new_exec) to avoid a race with a process in userspace
++	 * trying to access the should-be-closed file descriptors of a process
++	 * undergoing exec(2).
++	 */
++	do_close_on_exec(current->files);
+ 	return 0;
+ 
+ out:
+@@ -1157,7 +1164,6 @@ void setup_new_exec(struct linux_binprm * bprm)
+ 	   group */
+ 	current->self_exec_id++;
+ 	flush_signal_handlers(current, 0);
+-	do_close_on_exec(current->files);
+ }
+ EXPORT_SYMBOL(setup_new_exec);
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 145d6ba4117d..df67a6f8582a 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -224,6 +224,7 @@ struct ext4_io_submit {
+ #define	EXT4_MAX_BLOCK_SIZE		65536
+ #define EXT4_MIN_BLOCK_LOG_SIZE		10
+ #define EXT4_MAX_BLOCK_LOG_SIZE		16
++#define EXT4_MAX_CLUSTER_LOG_SIZE	30
+ #ifdef __KERNEL__
+ # define EXT4_BLOCK_SIZE(s)		((s)->s_blocksize)
+ #else
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 095c7a258d97..d77d542c2ed5 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -336,8 +336,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
+ 
+ 	len -= EXT4_MIN_INLINE_DATA_SIZE;
+ 	value = kzalloc(len, GFP_NOFS);
+-	if (!value)
++	if (!value) {
++		error = -ENOMEM;
+ 		goto out;
++	}
+ 
+ 	error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
+ 				     value, len);
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 5b58e266892b..e6798ca34928 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -668,7 +668,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
+ 	ext4_grpblk_t min;
+ 	ext4_grpblk_t max;
+ 	ext4_grpblk_t chunk;
+-	unsigned short border;
++	unsigned int border;
+ 
+ 	BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
+ 
+@@ -2254,7 +2254,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
+ 	struct ext4_group_info *grinfo;
+ 	struct sg {
+ 		struct ext4_group_info info;
+-		ext4_grpblk_t counters[16];
++		ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
+ 	} sg;
+ 
+ 	group--;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 4723d8b02747..97aa8be40175 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3361,10 +3361,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
+ 			ext4_set_bit(s++, buf);
+ 			count++;
+ 		}
+-		for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
+-			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
+-			count++;
++		j = ext4_bg_num_gdb(sb, grp);
++		if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
++			ext4_error(sb, "Invalid number of block group "
++				   "descriptor blocks: %d", j);
++			j = EXT4_BLOCKS_PER_GROUP(sb) - s;
+ 		}
++		count += j;
++		for (; j > 0; j--)
++			ext4_set_bit(EXT4_B2C(sbi, s++), buf);
+ 	}
+ 	if (!count)
+ 		return 0;
+@@ -3736,7 +3741,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+ 	    blocksize > EXT4_MAX_BLOCK_SIZE) {
+ 		ext4_msg(sb, KERN_ERR,
+-		       "Unsupported filesystem blocksize %d", blocksize);
++		       "Unsupported filesystem blocksize %d (%d log_block_size)",
++			 blocksize, le32_to_cpu(es->s_log_block_size));
++		goto failed_mount;
++	}
++	if (le32_to_cpu(es->s_log_block_size) >
++	    (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
++		ext4_msg(sb, KERN_ERR,
++			 "Invalid log block size: %u",
++			 le32_to_cpu(es->s_log_block_size));
+ 		goto failed_mount;
+ 	}
+ 
+@@ -3832,12 +3845,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 
+ 	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
+ 	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
+-	if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
+-		goto cantfind_ext4;
+ 
+ 	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
+ 	if (sbi->s_inodes_per_block == 0)
+ 		goto cantfind_ext4;
++	if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
++	    sbi->s_inodes_per_group > blocksize * 8) {
++		ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
++			 sbi->s_blocks_per_group);
++		goto failed_mount;
++	}
+ 	sbi->s_itb_per_group = sbi->s_inodes_per_group /
+ 					sbi->s_inodes_per_block;
+ 	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
+@@ -3878,6 +3895,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 				 "block size (%d)", clustersize, blocksize);
+ 			goto failed_mount;
+ 		}
++		if (le32_to_cpu(es->s_log_cluster_size) >
++		    (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
++			ext4_msg(sb, KERN_ERR,
++				 "Invalid log cluster size: %u",
++				 le32_to_cpu(es->s_log_cluster_size));
++			goto failed_mount;
++		}
+ 		sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
+ 			le32_to_cpu(es->s_log_block_size);
+ 		sbi->s_clusters_per_group =
+@@ -3914,13 +3938,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 	}
+ 	sbi->s_cluster_ratio = clustersize / blocksize;
+ 
+-	if (sbi->s_inodes_per_group > blocksize * 8) {
+-		ext4_msg(sb, KERN_ERR,
+-		       "#inodes per group too big: %lu",
+-		       sbi->s_inodes_per_group);
+-		goto failed_mount;
+-	}
+-
+ 	/* Do we have standard group size of clustersize * 8 blocks ? */
+ 	if (sbi->s_blocks_per_group == clustersize << 3)
+ 		set_opt2(sb, STD_GROUP_SIZE);
+diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
+index f5388f37217e..bb73d0a0f387 100644
+--- a/fs/f2fs/debug.c
++++ b/fs/f2fs/debug.c
+@@ -339,6 +339,7 @@ static int stat_open(struct inode *inode, struct file *file)
+ }
+ 
+ static const struct file_operations stat_fops = {
++	.owner = THIS_MODULE,
+ 	.open = stat_open,
+ 	.read = seq_read,
+ 	.llseek = seq_lseek,
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index 8b8d83a526ce..ddf5f9fd719f 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -414,7 +414,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
+ 	 */
+ 	if (!PageUptodate(page)) {
+ 		unsigned pglen = nfs_page_length(page);
+-		unsigned end = offset + len;
++		unsigned end = offset + copied;
+ 
+ 		if (pglen == 0) {
+ 			zero_user_segments(page, 0, offset,
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 1114afdd5a6b..4aefff89949d 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -3864,6 +3864,7 @@ xlog_recover_clear_agi_bucket(
+ 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
+ 	offset = offsetof(xfs_agi_t, agi_unlinked) +
+ 		 (sizeof(xfs_agino_t) * bucket);
++	xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
+ 	xfs_trans_log_buf(tp, agibp, offset,
+ 			  (offset + sizeof(xfs_agino_t) - 1));
+ 
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index ddd47c3a757d..c11f9d1963c3 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1940,8 +1940,8 @@ struct napi_gro_cb {
+ 	/* This is non-zero if the packet may be of the same flow. */
+ 	u8	same_flow:1;
+ 
+-	/* Used in udp_gro_receive */
+-	u8	udp_mark:1;
++	/* Used in tunnel GRO receive */
++	u8	encap_mark:1;
+ 
+ 	/* GRO checksum is valid */
+ 	u8	csum_valid:1;
+diff --git a/include/net/ip.h b/include/net/ip.h
+index f41fc497b21b..117bde93995d 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -234,6 +234,8 @@ static inline int inet_is_local_reserved_port(struct net *net, int port)
+ }
+ #endif
+ 
++__be32 inet_current_timestamp(void);
++
+ /* From inetpeer.c */
+ extern int inet_peer_threshold;
+ extern int inet_peer_minttl;
+diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
+index ac54c27a2bfd..e6796dc8c764 100644
+--- a/include/rdma/ib_addr.h
++++ b/include/rdma/ib_addr.h
+@@ -183,10 +183,12 @@ static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
+ 
+ 	dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
+ 	if (dev) {
+-		ip4 = (struct in_device *)dev->ip_ptr;
+-		if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address)
++		ip4 = in_dev_get(dev);
++		if (ip4 && ip4->ifa_list && ip4->ifa_list->ifa_address) {
+ 			ipv6_addr_set_v4mapped(ip4->ifa_list->ifa_address,
+ 					       (struct in6_addr *)gid);
++			in_dev_put(ip4);
++		}
+ 		dev_put(dev);
+ 	}
+ }
+diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
+index 0874e2edd275..79517e5549f1 100644
+--- a/kernel/debug/debug_core.c
++++ b/kernel/debug/debug_core.c
+@@ -598,11 +598,11 @@ return_normal:
+ 	/*
+ 	 * Wait for the other CPUs to be notified and be waiting for us:
+ 	 */
+-	time_left = loops_per_jiffy * HZ;
++	time_left = MSEC_PER_SEC;
+ 	while (kgdb_do_roundup && --time_left &&
+ 	       (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
+ 		   online_cpus)
+-		cpu_relax();
++		udelay(1000);
+ 	if (!time_left)
+ 		pr_crit("Timed out waiting for secondary CPUs.\n");
+ 
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index d9f112bd42a7..d296b904685b 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -309,10 +309,10 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
+ static inline u32 arch_gettimeoffset(void) { return 0; }
+ #endif
+ 
+-static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
++static inline u64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
+ 					  cycle_t delta)
+ {
+-	s64 nsec;
++	u64 nsec;
+ 
+ 	nsec = delta * tkr->mult + tkr->xtime_nsec;
+ 	nsec >>= tkr->shift;
+diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
+index a51e79688455..972ce5b596f4 100644
+--- a/kernel/trace/trace_functions_graph.c
++++ b/kernel/trace/trace_functions_graph.c
+@@ -779,6 +779,10 @@ print_graph_entry_leaf(struct trace_iterator *iter,
+ 
+ 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
+ 
++		/* If a graph tracer ignored set_graph_notrace */
++		if (call->depth < -1)
++			call->depth += FTRACE_NOTRACE_DEPTH;
++
+ 		/*
+ 		 * Comments display at + 1 to depth. Since
+ 		 * this is a leaf function, keep the comments
+@@ -787,7 +791,8 @@ print_graph_entry_leaf(struct trace_iterator *iter,
+ 		cpu_data->depth = call->depth - 1;
+ 
+ 		/* No need to keep this function around for this depth */
+-		if (call->depth < FTRACE_RETFUNC_DEPTH)
++		if (call->depth < FTRACE_RETFUNC_DEPTH &&
++		    !WARN_ON_ONCE(call->depth < 0))
+ 			cpu_data->enter_funcs[call->depth] = 0;
+ 	}
+ 
+@@ -816,11 +821,16 @@ print_graph_entry_nested(struct trace_iterator *iter,
+ 		struct fgraph_cpu_data *cpu_data;
+ 		int cpu = iter->cpu;
+ 
++		/* If a graph tracer ignored set_graph_notrace */
++		if (call->depth < -1)
++			call->depth += FTRACE_NOTRACE_DEPTH;
++
+ 		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
+ 		cpu_data->depth = call->depth;
+ 
+ 		/* Save this function pointer to see if the exit matches */
+-		if (call->depth < FTRACE_RETFUNC_DEPTH)
++		if (call->depth < FTRACE_RETFUNC_DEPTH &&
++		    !WARN_ON_ONCE(call->depth < 0))
+ 			cpu_data->enter_funcs[call->depth] = call->func;
+ 	}
+ 
+@@ -1048,7 +1058,8 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
+ 		 */
+ 		cpu_data->depth = trace->depth - 1;
+ 
+-		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
++		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
++		    !WARN_ON_ONCE(trace->depth < 0)) {
+ 			if (cpu_data->enter_funcs[trace->depth] != trace->func)
+ 				func_match = 0;
+ 			cpu_data->enter_funcs[trace->depth] = 0;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index e1a95dbcd5f8..f16e330e1096 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -246,6 +246,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
+ 	int nid = shrinkctl->nid;
+ 	long batch_size = shrinker->batch ? shrinker->batch
+ 					  : SHRINK_BATCH;
++	long scanned = 0, next_deferred;
+ 
+ 	freeable = shrinker->count_objects(shrinker, shrinkctl);
+ 	if (freeable == 0)
+@@ -267,7 +268,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
+ 		pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
+ 		       shrinker->scan_objects, total_scan);
+ 		total_scan = freeable;
+-	}
++		next_deferred = nr;
++	} else
++		next_deferred = total_scan;
+ 
+ 	/*
+ 	 * We need to avoid excessive windup on filesystem shrinkers
+@@ -324,17 +327,22 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
+ 
+ 		count_vm_events(SLABS_SCANNED, nr_to_scan);
+ 		total_scan -= nr_to_scan;
++		scanned += nr_to_scan;
+ 
+ 		cond_resched();
+ 	}
+ 
++	if (next_deferred >= scanned)
++		next_deferred -= scanned;
++	else
++		next_deferred = 0;
+ 	/*
+ 	 * move the unused scan count back into the shrinker in a
+ 	 * manner that handles concurrent updates. If we exhausted the
+ 	 * scan, there is no need to do an update.
+ 	 */
+-	if (total_scan > 0)
+-		new_nr = atomic_long_add_return(total_scan,
++	if (next_deferred > 0)
++		new_nr = atomic_long_add_return(next_deferred,
+ 						&shrinker->nr_deferred[nid]);
+ 	else
+ 		new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 84201c21705e..940ba74b297c 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -2029,6 +2029,19 @@ static int process_connect(struct ceph_connection *con)
+ 
+ 	dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
+ 
++	if (con->auth_reply_buf) {
++		/*
++		 * Any connection that defines ->get_authorizer()
++		 * should also define ->verify_authorizer_reply().
++		 * See get_connect_authorizer().
++		 */
++		ret = con->ops->verify_authorizer_reply(con, 0);
++		if (ret < 0) {
++			con->error_msg = "bad authorize reply";
++			return ret;
++		}
++	}
++
+ 	switch (con->in_reply.tag) {
+ 	case CEPH_MSGR_TAG_FEATURES:
+ 		pr_err("%s%lld %s feature set mismatch,"
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 56d820fc2707..0f9289ff0f2a 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4059,8 +4059,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
+ 		NAPI_GRO_CB(skb)->same_flow = 0;
+ 		NAPI_GRO_CB(skb)->flush = 0;
+ 		NAPI_GRO_CB(skb)->free = 0;
+-		NAPI_GRO_CB(skb)->udp_mark = 0;
+ 		NAPI_GRO_CB(skb)->recursion_counter = 0;
++		NAPI_GRO_CB(skb)->encap_mark = 0;
+ 		NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
+ 
+ 		/* Setup for GRO checksum validation */
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 2095cd6c31fd..84e46837610b 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1388,6 +1388,45 @@ out:
+ 	return pp;
+ }
+ 
++static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
++					 struct sk_buff *skb)
++{
++	if (NAPI_GRO_CB(skb)->encap_mark) {
++		NAPI_GRO_CB(skb)->flush = 1;
++		return NULL;
++	}
++
++	NAPI_GRO_CB(skb)->encap_mark = 1;
++
++	return inet_gro_receive(head, skb);
++}
++
++#define SECONDS_PER_DAY	86400
++
++/* inet_current_timestamp - Return IP network timestamp
++ *
++ * Return milliseconds since midnight in network byte order.
++ */
++__be32 inet_current_timestamp(void)
++{
++	u32 secs;
++	u32 msecs;
++	struct timespec64 ts;
++
++	ktime_get_real_ts64(&ts);
++
++	/* Get secs since midnight. */
++	(void)div_u64_rem(ts.tv_sec, SECONDS_PER_DAY, &secs);
++	/* Convert to msecs. */
++	msecs = secs * MSEC_PER_SEC;
++	/* Convert nsec to msec. */
++	msecs += (u32)ts.tv_nsec / NSEC_PER_MSEC;
++
++	/* Convert to network byte order. */
++	return htons(msecs);
++}
++EXPORT_SYMBOL(inet_current_timestamp);
++
+ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ {
+ 	if (sk->sk_family == AF_INET)
+@@ -1430,6 +1469,13 @@ out_unlock:
+ 	return err;
+ }
+ 
++static int ipip_gro_complete(struct sk_buff *skb, int nhoff)
++{
++	skb->encapsulation = 1;
++	skb_shinfo(skb)->gso_type |= SKB_GSO_IPIP;
++	return inet_gro_complete(skb, nhoff);
++}
++
+ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
+ 			 unsigned short type, unsigned char protocol,
+ 			 struct net *net)
+@@ -1646,8 +1692,8 @@ static struct packet_offload ip_packet_offload __read_mostly = {
+ static const struct net_offload ipip_offload = {
+ 	.callbacks = {
+ 		.gso_segment	= inet_gso_segment,
+-		.gro_receive	= inet_gro_receive,
+-		.gro_complete	= inet_gro_complete,
++		.gro_receive	= ipip_gro_receive,
++		.gro_complete	= ipip_gro_complete,
+ 	},
+ };
+ 
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index b22a75c0a3d9..7841b35e5ab0 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -182,6 +182,14 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
+ 	u8 proto = NAPI_GRO_CB(skb)->proto;
+ 	const struct net_offload **offloads;
+ 
++	/* We can clear the encap_mark for FOU as we are essentially doing
++	 * one of two possible things.  We are either adding an L4 tunnel
++	 * header to the outer L3 tunnel header, or we are are simply
++	 * treating the GRE tunnel header as though it is a UDP protocol
++	 * specific header such as VXLAN or GENEVE.
++	 */
++	NAPI_GRO_CB(skb)->encap_mark = 0;
++
+ 	rcu_read_lock();
+ 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ 	ops = rcu_dereference(offloads[proto]);
+@@ -349,6 +357,14 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
+ 		}
+ 	}
+ 
++	/* We can clear the encap_mark for GUE as we are essentially doing
++	 * one of two possible things.  We are either adding an L4 tunnel
++	 * header to the outer L3 tunnel header, or we are are simply
++	 * treating the GRE tunnel header as though it is a UDP protocol
++	 * specific header such as VXLAN or GENEVE.
++	 */
++	NAPI_GRO_CB(skb)->encap_mark = 0;
++
+ 	rcu_read_lock();
+ 	offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
+ 	ops = rcu_dereference(offloads[guehdr->proto_ctype]);
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index 53300b88d569..79ae0d7becbf 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -128,6 +128,11 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
+ 	struct packet_offload *ptype;
+ 	__be16 type;
+ 
++	if (NAPI_GRO_CB(skb)->encap_mark)
++		goto out;
++
++	NAPI_GRO_CB(skb)->encap_mark = 1;
++
+ 	off = skb_gro_offset(skb);
+ 	hlen = off + sizeof(*greh);
+ 	greh = skb_gro_header_fast(skb, off);
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index f5203fba6236..2ba975272ff6 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -907,7 +907,6 @@ static bool icmp_echo(struct sk_buff *skb)
+  */
+ static bool icmp_timestamp(struct sk_buff *skb)
+ {
+-	struct timespec tv;
+ 	struct icmp_bxm icmp_param;
+ 	/*
+ 	 *	Too short.
+@@ -918,9 +917,7 @@ static bool icmp_timestamp(struct sk_buff *skb)
+ 	/*
+ 	 *	Fill in the current time as ms since midnight UT:
+ 	 */
+-	getnstimeofday(&tv);
+-	icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
+-					 tv.tv_nsec / NSEC_PER_MSEC);
++	icmp_param.data.times[1] = inet_current_timestamp();
+ 	icmp_param.data.times[2] = icmp_param.data.times[1];
+ 	if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
+ 		BUG();
+diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
+index bd246792360b..4d158ff1def1 100644
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -58,10 +58,9 @@ void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
+ 		if (opt->ts_needaddr)
+ 			ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, skb, rt);
+ 		if (opt->ts_needtime) {
+-			struct timespec tv;
+ 			__be32 midtime;
+-			getnstimeofday(&tv);
+-			midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
++
++			midtime = inet_current_timestamp();
+ 			memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4);
+ 		}
+ 		return;
+@@ -415,11 +414,10 @@ int ip_options_compile(struct net *net,
+ 					break;
+ 				}
+ 				if (timeptr) {
+-					struct timespec tv;
+-					u32  midtime;
+-					getnstimeofday(&tv);
+-					midtime = (tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC;
+-					put_unaligned_be32(midtime, timeptr);
++					__be32 midtime;
++
++					midtime = inet_current_timestamp();
++					memcpy(timeptr, &midtime, 4);
+ 					opt->is_changed = 1;
+ 				}
+ 			} else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) {
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 2af7b7e1a0f6..dfcab88c3e74 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -299,14 +299,14 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+ 	unsigned int off = skb_gro_offset(skb);
+ 	int flush = 1;
+ 
+-	if (NAPI_GRO_CB(skb)->udp_mark ||
++	if (NAPI_GRO_CB(skb)->encap_mark ||
+ 	    (skb->ip_summed != CHECKSUM_PARTIAL &&
+ 	     NAPI_GRO_CB(skb)->csum_cnt == 0 &&
+ 	     !NAPI_GRO_CB(skb)->csum_valid))
+ 		goto out;
+ 
+-	/* mark that this skb passed once through the udp gro layer */
+-	NAPI_GRO_CB(skb)->udp_mark = 1;
++	/* mark that this skb passed once through the tunnel gro layer */
++	NAPI_GRO_CB(skb)->encap_mark = 1;
+ 
+ 	rcu_read_lock();
+ 	uo_priv = rcu_dereference(udp_offload_base);
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index db0b8428d248..9b01da54d475 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -258,6 +258,19 @@ out:
+ 	return pp;
+ }
+ 
++static struct sk_buff **sit_gro_receive(struct sk_buff **head,
++					struct sk_buff *skb)
++{
++	if (NAPI_GRO_CB(skb)->encap_mark) {
++		NAPI_GRO_CB(skb)->flush = 1;
++		return NULL;
++	}
++
++	NAPI_GRO_CB(skb)->encap_mark = 1;
++
++	return ipv6_gro_receive(head, skb);
++}
++
+ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
+ {
+ 	const struct net_offload *ops;
+diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
+index dace13d7638e..b7569238a410 100644
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -539,9 +539,13 @@ gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
+ 		return gss_new;
+ 	gss_msg = gss_add_msg(gss_new);
+ 	if (gss_msg == gss_new) {
+-		int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
++		int res;
++		atomic_inc(&gss_msg->count);
++		res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
+ 		if (res) {
+ 			gss_unhash_msg(gss_new);
++			atomic_dec(&gss_msg->count);
++			gss_release_msg(gss_new);
+ 			gss_msg = ERR_PTR(res);
+ 		}
+ 	} else
+@@ -834,6 +838,7 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
+ 			warn_gssd();
+ 		gss_release_msg(gss_msg);
+ 	}
++	gss_release_msg(gss_msg);
+ }
+ 
+ static void gss_pipe_dentry_destroy(struct dentry *dir,
+diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
+index 8275f0e55106..4b2f44c20caf 100644
+--- a/scripts/kconfig/nconf.gui.c
++++ b/scripts/kconfig/nconf.gui.c
+@@ -364,12 +364,14 @@ int dialog_inputbox(WINDOW *main_window,
+ 	WINDOW *prompt_win;
+ 	WINDOW *form_win;
+ 	PANEL *panel;
+-	int i, x, y;
++	int i, x, y, lines, columns, win_lines, win_cols;
+ 	int res = -1;
+ 	int cursor_position = strlen(init);
+ 	int cursor_form_win;
+ 	char *result = *resultp;
+ 
++	getmaxyx(stdscr, lines, columns);
++
+ 	if (strlen(init)+1 > *result_len) {
+ 		*result_len = strlen(init)+1;
+ 		*resultp = result = realloc(result, *result_len);
+@@ -386,14 +388,19 @@ int dialog_inputbox(WINDOW *main_window,
+ 	if (title)
+ 		prompt_width = max(prompt_width, strlen(title));
+ 
++	win_lines = min(prompt_lines+6, lines-2);
++	win_cols = min(prompt_width+7, columns-2);
++	prompt_lines = max(win_lines-6, 0);
++	prompt_width = max(win_cols-7, 0);
++
+ 	/* place dialog in middle of screen */
+-	y = (getmaxy(stdscr)-(prompt_lines+4))/2;
+-	x = (getmaxx(stdscr)-(prompt_width+4))/2;
++	y = (lines-win_lines)/2;
++	x = (columns-win_cols)/2;
+ 
+ 	strncpy(result, init, *result_len);
+ 
+ 	/* create the windows */
+-	win = newwin(prompt_lines+6, prompt_width+7, y, x);
++	win = newwin(win_lines, win_cols, y, x);
+ 	prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2);
+ 	form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2);
+ 	keypad(form_win, TRUE);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 2a9ec9706db8..b93458698335 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5876,6 +5876,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x12, 0x90a60180},
+ 		{0x14, 0x90170120},
+ 		{0x21, 0x02211030}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x1b, 0x01011020},
++		{0x21, 0x02211010}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC255_STANDARD_PINS,
+ 		{0x12, 0x90a60160},
+diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+index 2fbaf2c75d17..5f43e1ced179 100644
+--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
++++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+@@ -739,6 +739,9 @@ static int sst_soc_prepare(struct device *dev)
+ 	struct sst_data *drv = dev_get_drvdata(dev);
+ 	int i;
+ 
++	if (!drv->soc_card)
++		return 0;
++
+ 	/* suspend all pcms first */
+ 	snd_soc_suspend(drv->soc_card->dev);
+ 	snd_soc_poweroff(drv->soc_card->dev);
+@@ -761,6 +764,9 @@ static void sst_soc_complete(struct device *dev)
+ 	struct sst_data *drv = dev_get_drvdata(dev);
+ 	int i;
+ 
++	if (!drv->soc_card)
++		return;
++
+ 	/* restart SSPs */
+ 	for (i = 0; i < drv->soc_card->num_rtd; i++) {
+ 		struct snd_soc_dai *dai = drv->soc_card->rtd[i].cpu_dai;
+diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
+index 2c44139b4041..33db205dd12b 100644
+--- a/sound/usb/hiface/pcm.c
++++ b/sound/usb/hiface/pcm.c
+@@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
+ 
+ 	mutex_lock(&rt->stream_mutex);
+ 
++	hiface_pcm_stream_stop(rt);
++
+ 	sub->dma_off = 0;
+ 	sub->period_off = 0;
+ 
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index e0fc02763024..4a033cbbd361 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -925,9 +925,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
+ 	case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
+ 	case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
+ 	case USB_ID(0x046d, 0x0991):
++	case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
+ 	/* Most audio usb devices lie about volume resolution.
+ 	 * Most Logitech webcams have res = 384.
+-	 * Proboly there is some logitech magic behind this number --fishor
++	 * Probably there is some logitech magic behind this number --fishor
+ 	 */
+ 		if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+ 			usb_audio_info(chip,


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2017-01-10  4:02 Alice Ferrazzi
  0 siblings, 0 replies; 71+ messages in thread
From: Alice Ferrazzi @ 2017-01-10  4:02 UTC (permalink / raw
  To: gentoo-commits

commit:     3f861dc13e810781883d0f1cfe33a821ee3504f8
Author:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
AuthorDate: Tue Jan 10 04:07:56 2017 +0000
Commit:     Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
CommitDate: Tue Jan 10 04:07:56 2017 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3f861dc1

Linux patch 4.1.37

 0000_README                                      |    8 +-
 1036_linux-4.1.37.patch                          | 2732 ++++++++++++++++++++++
 1520_fix-race-condition-in-packet-set-ring.patch |   62 -
 3 files changed, 2736 insertions(+), 66 deletions(-)

diff --git a/0000_README b/0000_README
index 7e1cb6f..e28d8f1 100644
--- a/0000_README
+++ b/0000_README
@@ -187,6 +187,10 @@ Patch:  1035_linux-4.1.36.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.36
 
+Patch:  1036_linux-4.1.37.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.37
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.
@@ -195,10 +199,6 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
-Patch:  1520_fix-race-condition-in-packet-set-ring.patch
-From:   https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=84ac7260236a49c79eede91617700174c2c19b0c
-Desc:   packet: fix race condition in packet_set_ring. CVE-2016-8655. Bug #601926.
-
 Patch:  1800_fix-lru-cache-add-oom-regression.patch
 From:   http://thread.gmane.org/gmane.linux.kernel.stable/184384
 Desc:   Revert commit 8f182270dfec mm/swap.c: flush lru pvecs on compound page arrival to fix OOM error.

diff --git a/1036_linux-4.1.37.patch b/1036_linux-4.1.37.patch
new file mode 100644
index 0000000..c9fbe6d
--- /dev/null
+++ b/1036_linux-4.1.37.patch
@@ -0,0 +1,2732 @@
+diff --git a/Documentation/arm/CCN.txt b/Documentation/arm/CCN.txt
+index 0632b3aad83e..715776f06df6 100644
+--- a/Documentation/arm/CCN.txt
++++ b/Documentation/arm/CCN.txt
+@@ -38,7 +38,7 @@ Example of perf tool use:
+ / # perf list | grep ccn
+   ccn/cycles/                                        [Kernel PMU event]
+ <...>
+-  ccn/xp_valid_flit/                                 [Kernel PMU event]
++  ccn/xp_valid_flit,xp=?,port=?,vc=?,dir=?/          [Kernel PMU event]
+ <...>
+ 
+ / # perf stat -C 0 -e ccn/cycles/,ccn/xp_valid_flit,xp=1,port=0,vc=1,dir=1/ \
+diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
+index e69274de8d0c..0500895b768f 100644
+--- a/Documentation/filesystems/porting
++++ b/Documentation/filesystems/porting
+@@ -287,8 +287,8 @@ implementing on-disk size changes.  Start with a copy of the old inode_setattr
+ and vmtruncate, and the reorder the vmtruncate + foofs_vmtruncate sequence to
+ be in order of zeroing blocks using block_truncate_page or similar helpers,
+ size update and on finally on-disk truncation which should not fail.
+-inode_change_ok now includes the size checks for ATTR_SIZE and must be called
+-in the beginning of ->setattr unconditionally.
++setattr_prepare (which used to be inode_change_ok) now includes the size checks
++for ATTR_SIZE and must be called in the beginning of ->setattr unconditionally.
+ 
+ [mandatory]
+ 
+diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
+index 302b5ed616a6..35e17f748ca7 100644
+--- a/Documentation/sysctl/fs.txt
++++ b/Documentation/sysctl/fs.txt
+@@ -265,6 +265,13 @@ aio-nr can grow to.
+ 
+ ==============================================================
+ 
++mount-max:
++
++This denotes the maximum number of mounts that may exist
++in a mount namespace.
++
++==============================================================
++
+ 
+ 2. /proc/sys/fs/binfmt_misc
+ ----------------------------------------------------------
+diff --git a/Makefile b/Makefile
+index aa9fbee620ff..df72b644f78c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 36
++SUBLEVEL = 37
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
+index b52101d37ec7..ee21eecbe0d2 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -117,7 +117,7 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
+ /* The ARM override for dma_max_pfn() */
+ static inline unsigned long dma_max_pfn(struct device *dev)
+ {
+-	return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
++	return dma_to_pfn(dev, *dev->dma_mask);
+ }
+ #define dma_max_pfn(dev) dma_max_pfn(dev)
+ 
+diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
+index cd791948b286..7e459b7ee708 100644
+--- a/arch/x86/include/asm/tlbflush.h
++++ b/arch/x86/include/asm/tlbflush.h
+@@ -32,7 +32,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
+ /* Initialize cr4 shadow for this CPU. */
+ static inline void cr4_init_shadow(void)
+ {
+-	this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
++	this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
+ }
+ 
+ /* Set in this cpu's CR4. */
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 2c835e356349..d445c5f1aeb1 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -12,6 +12,7 @@ targets += purgatory.ro
+ 
+ KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
+ KBUILD_CFLAGS += -m$(BITS)
++KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+ 
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ 		$(call if_changed,ld)
+diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
+index 27fd0dacad5f..4d523cfe51ce 100644
+--- a/drivers/bus/arm-ccn.c
++++ b/drivers/bus/arm-ccn.c
+@@ -183,6 +183,7 @@ struct arm_ccn {
+ 	struct arm_ccn_component *xp;
+ 
+ 	struct arm_ccn_dt dt;
++	int mn_id;
+ };
+ 
+ 
+@@ -322,6 +323,7 @@ struct arm_ccn_pmu_event {
+ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
+ 		struct device_attribute *attr, char *buf)
+ {
++	struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
+ 	struct arm_ccn_pmu_event *event = container_of(attr,
+ 			struct arm_ccn_pmu_event, attr);
+ 	ssize_t res;
+@@ -336,6 +338,26 @@ static ssize_t arm_ccn_pmu_event_show(struct device *dev,
+ 	if (event->mask)
+ 		res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
+ 				event->mask);
++
++	/* Arguments required by an event */
++	switch (event->type) {
++	case CCN_TYPE_CYCLES:
++		break;
++	case CCN_TYPE_XP:
++		res += snprintf(buf + res, PAGE_SIZE - res,
++				",xp=?,port=?,vc=?,dir=?");
++		if (event->event == CCN_EVENT_WATCHPOINT)
++			res += snprintf(buf + res, PAGE_SIZE - res,
++					",cmp_l=?,cmp_h=?,mask=?");
++		break;
++	case CCN_TYPE_MN:
++		res += snprintf(buf + res, PAGE_SIZE - res, ",node=%d", ccn->mn_id);
++		break;
++	default:
++		res += snprintf(buf + res, PAGE_SIZE - res, ",node=?");
++		break;
++	}
++
+ 	res += snprintf(buf + res, PAGE_SIZE - res, "\n");
+ 
+ 	return res;
+@@ -360,9 +382,9 @@ static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
+ }
+ 
+ static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
+-	CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
+-	CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
+-	CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
++	CCN_EVENT_MN(eobarrier, "dir=1,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
++	CCN_EVENT_MN(ecbarrier, "dir=1,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
++	CCN_EVENT_MN(dvmop, "dir=1,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
+ 	CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
+ 	CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
+ 	CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
+@@ -649,6 +671,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
+ 
+ 	/* Validate node/xp vs topology */
+ 	switch (type) {
++	case CCN_TYPE_MN:
++		if (node_xp != ccn->mn_id) {
++			dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp);
++			return -EINVAL;
++		}
++		break;
+ 	case CCN_TYPE_XP:
+ 		if (node_xp >= ccn->num_xps) {
+ 			dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
+@@ -1214,6 +1242,8 @@ static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
+ 
+ 	switch (type) {
+ 	case CCN_TYPE_MN:
++		ccn->mn_id = id;
++		return 0;
+ 	case CCN_TYPE_DT:
+ 		return 0;
+ 	case CCN_TYPE_XP:
+diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
+index cd0554f68316..4ff8c334e7c8 100644
+--- a/drivers/gpu/drm/msm/msm_gem_submit.c
++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
+@@ -55,6 +55,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
+ 	return submit;
+ }
+ 
++static inline unsigned long __must_check
++copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
++{
++	if (access_ok(VERIFY_READ, from, n))
++		return __copy_from_user_inatomic(to, from, n);
++	return -EFAULT;
++}
++
+ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 		struct drm_msm_gem_submit *args, struct drm_file *file)
+ {
+@@ -62,6 +70,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 	int ret = 0;
+ 
+ 	spin_lock(&file->table_lock);
++	pagefault_disable();
+ 
+ 	for (i = 0; i < args->nr_bos; i++) {
+ 		struct drm_msm_gem_submit_bo submit_bo;
+@@ -70,10 +79,15 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 		void __user *userptr =
+ 			to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+ 
+-		ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+-		if (ret) {
+-			ret = -EFAULT;
+-			goto out_unlock;
++		ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
++		if (unlikely(ret)) {
++			pagefault_enable();
++			spin_unlock(&file->table_lock);
++			ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
++			if (ret)
++				goto out;
++			spin_lock(&file->table_lock);
++			pagefault_disable();
+ 		}
+ 
+ 		if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+@@ -113,9 +127,12 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
+ 	}
+ 
+ out_unlock:
+-	submit->nr_bos = i;
++	pagefault_enable();
+ 	spin_unlock(&file->table_lock);
+ 
++out:
++	submit->nr_bos = i;
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
+index feb6d18de78d..16970fccc5cd 100644
+--- a/drivers/mtd/nand/davinci_nand.c
++++ b/drivers/mtd/nand/davinci_nand.c
+@@ -241,6 +241,9 @@ static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+ 	unsigned long flags;
+ 	u32 val;
+ 
++	/* Reset ECC hardware */
++	davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
++
+ 	spin_lock_irqsave(&davinci_nand_lock, flags);
+ 
+ 	/* Start 4-bit ECC calculation for read/write */
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index ad535a854e5c..eab132778e67 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -21,6 +21,7 @@
+ #include <linux/slab.h>
+ #include <linux/netdevice.h>
+ #include <linux/if_arp.h>
++#include <linux/workqueue.h>
+ #include <linux/can.h>
+ #include <linux/can/dev.h>
+ #include <linux/can/skb.h>
+@@ -471,9 +472,8 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
+ /*
+  * CAN device restart for bus-off recovery
+  */
+-static void can_restart(unsigned long data)
++static void can_restart(struct net_device *dev)
+ {
+-	struct net_device *dev = (struct net_device *)data;
+ 	struct can_priv *priv = netdev_priv(dev);
+ 	struct net_device_stats *stats = &dev->stats;
+ 	struct sk_buff *skb;
+@@ -513,6 +513,14 @@ restart:
+ 		netdev_err(dev, "Error %d during restart", err);
+ }
+ 
++static void can_restart_work(struct work_struct *work)
++{
++	struct delayed_work *dwork = to_delayed_work(work);
++	struct can_priv *priv = container_of(dwork, struct can_priv, restart_work);
++
++	can_restart(priv->dev);
++}
++
+ int can_restart_now(struct net_device *dev)
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+@@ -526,8 +534,8 @@ int can_restart_now(struct net_device *dev)
+ 	if (priv->state != CAN_STATE_BUS_OFF)
+ 		return -EBUSY;
+ 
+-	/* Runs as soon as possible in the timer context */
+-	mod_timer(&priv->restart_timer, jiffies);
++	cancel_delayed_work_sync(&priv->restart_work);
++	can_restart(dev);
+ 
+ 	return 0;
+ }
+@@ -548,8 +556,8 @@ void can_bus_off(struct net_device *dev)
+ 	netif_carrier_off(dev);
+ 
+ 	if (priv->restart_ms)
+-		mod_timer(&priv->restart_timer,
+-			  jiffies + (priv->restart_ms * HZ) / 1000);
++		schedule_delayed_work(&priv->restart_work,
++				      msecs_to_jiffies(priv->restart_ms));
+ }
+ EXPORT_SYMBOL_GPL(can_bus_off);
+ 
+@@ -658,6 +666,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
+ 		return NULL;
+ 
+ 	priv = netdev_priv(dev);
++	priv->dev = dev;
+ 
+ 	if (echo_skb_max) {
+ 		priv->echo_skb_max = echo_skb_max;
+@@ -667,7 +676,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
+ 
+ 	priv->state = CAN_STATE_STOPPED;
+ 
+-	init_timer(&priv->restart_timer);
++	INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
+ 
+ 	return dev;
+ }
+@@ -748,8 +757,6 @@ int open_candev(struct net_device *dev)
+ 	if (!netif_carrier_ok(dev))
+ 		netif_carrier_on(dev);
+ 
+-	setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev);
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(open_candev);
+@@ -764,7 +771,7 @@ void close_candev(struct net_device *dev)
+ {
+ 	struct can_priv *priv = netdev_priv(dev);
+ 
+-	del_timer_sync(&priv->restart_timer);
++	cancel_delayed_work_sync(&priv->restart_work);
+ 	can_flush_echo_skb(dev);
+ }
+ EXPORT_SYMBOL_GPL(close_candev);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 940f78e41993..d9e873c3a273 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -635,7 +635,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
+ 		}
+ 	}
+ 
+-	pp = eth_gro_receive(head, skb);
++	pp = call_gro_receive(eth_gro_receive, head, skb);
+ 
+ out:
+ 	skb_gro_remcsum_cleanup(skb, &grc);
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 2926295a936d..c9f87cdc85c1 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2300,7 +2300,8 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ 	}
+ 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
+ 		unsigned char *ver_addr;
+-		int32_t user_len, cnt2end;
++		uint32_t user_len;
++		int32_t cnt2end;
+ 		uint8_t *pQbuffer, *ptmpuserbuffer;
+ 		ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
+ 		if (!ver_addr) {
+@@ -2309,6 +2310,11 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ 		}
+ 		ptmpuserbuffer = ver_addr;
+ 		user_len = pcmdmessagefld->cmdmessage.Length;
++		if (user_len > ARCMSR_API_DATA_BUFLEN) {
++			retvalue = ARCMSR_MESSAGE_FAIL;
++			kfree(ver_addr);
++			goto message_out;
++		}
+ 		memcpy(ptmpuserbuffer,
+ 			pcmdmessagefld->messagedatabuffer, user_len);
+ 		spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
+index 14e5c7cea929..1fcd31c6b37b 100644
+--- a/drivers/scsi/megaraid/megaraid_sas.h
++++ b/drivers/scsi/megaraid/megaraid_sas.h
+@@ -1862,7 +1862,7 @@ struct megasas_instance_template {
+ };
+ 
+ #define MEGASAS_IS_LOGICAL(scp)						\
+-	(scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
++	((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
+ 
+ #define MEGASAS_DEV_INDEX(inst, scp)					\
+ 	((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) + 	\
+diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
+index a27af7882170..d60425996948 100644
+--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
++++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
+@@ -1323,7 +1323,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
+ 		attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
+ 	}
+ 
+-	/* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
++	/* POSIX: check before ATTR_*TIME_SET set (from setattr_prepare) */
+ 	if (attr->ia_valid & TIMES_SET_FLAGS) {
+ 		if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
+ 		    !capable(CFS_CAP_FOWNER))
+diff --git a/fs/9p/acl.c b/fs/9p/acl.c
+index 31c010372660..de59b4892bfb 100644
+--- a/fs/9p/acl.c
++++ b/fs/9p/acl.c
+@@ -320,32 +320,26 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name,
+ 	case ACL_TYPE_ACCESS:
+ 		name = POSIX_ACL_XATTR_ACCESS;
+ 		if (acl) {
+-			umode_t mode = inode->i_mode;
+-			retval = posix_acl_equiv_mode(acl, &mode);
+-			if (retval < 0)
++			struct iattr iattr;
++
++			retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
++			if (retval)
+ 				goto err_out;
+-			else {
+-				struct iattr iattr;
+-				if (retval == 0) {
+-					/*
+-					 * ACL can be represented
+-					 * by the mode bits. So don't
+-					 * update ACL.
+-					 */
+-					acl = NULL;
+-					value = NULL;
+-					size = 0;
+-				}
+-				/* Updte the mode bits */
+-				iattr.ia_mode = ((mode & S_IALLUGO) |
+-						 (inode->i_mode & ~S_IALLUGO));
+-				iattr.ia_valid = ATTR_MODE;
+-				/* FIXME should we update ctime ?
+-				 * What is the following setxattr update the
+-				 * mode ?
++			if (!acl) {
++				/*
++				 * ACL can be represented
++				 * by the mode bits. So don't
++				 * update ACL.
+ 				 */
+-				v9fs_vfs_setattr_dotl(dentry, &iattr);
++				value = NULL;
++				size = 0;
+ 			}
++			iattr.ia_valid = ATTR_MODE;
++			/* FIXME should we update ctime ?
++			 * What is the following setxattr update the
++			 * mode ?
++			 */
++			v9fs_vfs_setattr_dotl(dentry, &iattr);
+ 		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index 53f1e8a21707..99c3c4ffe1d9 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -1094,7 +1094,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
+ 	struct p9_wstat wstat;
+ 
+ 	p9_debug(P9_DEBUG_VFS, "\n");
+-	retval = inode_change_ok(d_inode(dentry), iattr);
++	retval = setattr_prepare(dentry, iattr);
+ 	if (retval)
+ 		return retval;
+ 
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index 4d3ecfb55fcf..ce7ab92f7e84 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -560,7 +560,7 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
+ 
+ 	p9_debug(P9_DEBUG_VFS, "\n");
+ 
+-	retval = inode_change_ok(inode, iattr);
++	retval = setattr_prepare(dentry, iattr);
+ 	if (retval)
+ 		return retval;
+ 
+diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
+index 335055d828e4..f57baaa511aa 100644
+--- a/fs/adfs/inode.c
++++ b/fs/adfs/inode.c
+@@ -303,7 +303,7 @@ adfs_notify_change(struct dentry *dentry, struct iattr *attr)
+ 	unsigned int ia_valid = attr->ia_valid;
+ 	int error;
+ 	
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 
+ 	/*
+ 	 * we can't change the UID or GID of any file -
+diff --git a/fs/affs/inode.c b/fs/affs/inode.c
+index a022f4accd76..87953b94a5ae 100644
+--- a/fs/affs/inode.c
++++ b/fs/affs/inode.c
+@@ -218,7 +218,7 @@ affs_notify_change(struct dentry *dentry, struct iattr *attr)
+ 
+ 	pr_debug("notify_change(%lu,0x%x)\n", inode->i_ino, attr->ia_valid);
+ 
+-	error = inode_change_ok(inode,attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		goto out;
+ 
+diff --git a/fs/attr.c b/fs/attr.c
+index 6530ced19697..ee697ddc6c2e 100644
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -17,19 +17,22 @@
+ #include <linux/ima.h>
+ 
+ /**
+- * inode_change_ok - check if attribute changes to an inode are allowed
+- * @inode:	inode to check
++ * setattr_prepare - check if attribute changes to a dentry are allowed
++ * @dentry:	dentry to check
+  * @attr:	attributes to change
+  *
+  * Check if we are allowed to change the attributes contained in @attr
+- * in the given inode.  This includes the normal unix access permission
+- * checks, as well as checks for rlimits and others.
++ * in the given dentry.  This includes the normal unix access permission
++ * checks, as well as checks for rlimits and others. The function also clears
++ * SGID bit from mode if user is not allowed to set it. Also file capabilities
++ * and IMA extended attributes are cleared if ATTR_KILL_PRIV is set.
+  *
+  * Should be called as the first thing in ->setattr implementations,
+  * possibly after taking additional locks.
+  */
+-int inode_change_ok(const struct inode *inode, struct iattr *attr)
++int setattr_prepare(struct dentry *dentry, struct iattr *attr)
+ {
++	struct inode *inode = d_inode(dentry);
+ 	unsigned int ia_valid = attr->ia_valid;
+ 
+ 	/*
+@@ -44,7 +47,7 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
+ 
+ 	/* If force is set do it anyway. */
+ 	if (ia_valid & ATTR_FORCE)
+-		return 0;
++		goto kill_priv;
+ 
+ 	/* Make sure a caller can chown. */
+ 	if ((ia_valid & ATTR_UID) &&
+@@ -77,9 +80,19 @@ int inode_change_ok(const struct inode *inode, struct iattr *attr)
+ 			return -EPERM;
+ 	}
+ 
++kill_priv:
++	/* User has permission for the change */
++	if (ia_valid & ATTR_KILL_PRIV) {
++		int error;
++
++		error = security_inode_killpriv(dentry);
++		if (error)
++			return error;
++	}
++
+ 	return 0;
+ }
+-EXPORT_SYMBOL(inode_change_ok);
++EXPORT_SYMBOL(setattr_prepare);
+ 
+ /**
+  * inode_newsize_ok - may this inode be truncated to a given size
+@@ -217,13 +230,11 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
+ 	if (!(ia_valid & ATTR_MTIME_SET))
+ 		attr->ia_mtime = now;
+ 	if (ia_valid & ATTR_KILL_PRIV) {
+-		attr->ia_valid &= ~ATTR_KILL_PRIV;
+-		ia_valid &= ~ATTR_KILL_PRIV;
+ 		error = security_inode_need_killpriv(dentry);
+-		if (error > 0)
+-			error = security_inode_killpriv(dentry);
+-		if (error)
++		if (error < 0)
+ 			return error;
++		if (error == 0)
++			ia_valid = attr->ia_valid &= ~ATTR_KILL_PRIV;
+ 	}
+ 
+ 	/*
+diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
+index 9a0124a95851..fb3e64d37cb4 100644
+--- a/fs/btrfs/acl.c
++++ b/fs/btrfs/acl.c
+@@ -83,11 +83,9 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
+ 	case ACL_TYPE_ACCESS:
+ 		name = POSIX_ACL_XATTR_ACCESS;
+ 		if (acl) {
+-			ret = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (ret < 0)
++			ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (ret)
+ 				return ret;
+-			if (ret == 0)
+-				acl = NULL;
+ 		}
+ 		ret = 0;
+ 		break;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index c4771af7fd6f..757a34bdd2b9 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4975,7 +4975,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (btrfs_root_readonly(root))
+ 		return -EROFS;
+ 
+-	err = inode_change_ok(inode, attr);
++	err = setattr_prepare(dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
+index 64fa248343f6..d7496e3dbfc4 100644
+--- a/fs/ceph/acl.c
++++ b/fs/ceph/acl.c
+@@ -94,11 +94,9 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	case ACL_TYPE_ACCESS:
+ 		name = POSIX_ACL_XATTR_ACCESS;
+ 		if (acl) {
+-			ret = posix_acl_equiv_mode(acl, &new_mode);
+-			if (ret < 0)
++			ret = posix_acl_update_mode(inode, &new_mode, &acl);
++			if (ret)
+ 				goto out;
+-			if (ret == 0)
+-				acl = NULL;
+ 		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index e876e1944519..4484aaf5c478 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -1728,7 +1728,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (ceph_snap(inode) != CEPH_NOSNAP)
+ 		return -EROFS;
+ 
+-	err = inode_change_ok(inode, attr);
++	err = setattr_prepare(dentry, attr);
+ 	if (err != 0)
+ 		return err;
+ 
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 9fb3bc26a2ab..f82dfe7ae3e8 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -2134,7 +2134,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
+ 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
+ 		attrs->ia_valid |= ATTR_FORCE;
+ 
+-	rc = inode_change_ok(inode, attrs);
++	rc = setattr_prepare(direntry, attrs);
+ 	if (rc < 0)
+ 		goto out;
+ 
+@@ -2274,7 +2274,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+ 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
+ 		attrs->ia_valid |= ATTR_FORCE;
+ 
+-	rc = inode_change_ok(inode, attrs);
++	rc = setattr_prepare(direntry, attrs);
+ 	if (rc < 0) {
+ 		free_xid(xid);
+ 		return rc;
+diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
+index fc850b55db67..661dd53f0040 100644
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -942,7 +942,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
+ 	}
+ 	mutex_unlock(&crypt_stat->cs_mutex);
+ 
+-	rc = inode_change_ok(inode, ia);
++	rc = setattr_prepare(dentry, ia);
+ 	if (rc)
+ 		goto out;
+ 	if (ia->ia_valid & ATTR_SIZE) {
+diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
+index 786e4cc8c889..159c30c18395 100644
+--- a/fs/exofs/inode.c
++++ b/fs/exofs/inode.c
+@@ -1038,7 +1038,7 @@ int exofs_setattr(struct dentry *dentry, struct iattr *iattr)
+ 	if (unlikely(error))
+ 		return error;
+ 
+-	error = inode_change_ok(inode, iattr);
++	error = setattr_prepare(dentry, iattr);
+ 	if (unlikely(error))
+ 		return error;
+ 
+diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
+index 27695e6f4e46..d6aeb84e90b6 100644
+--- a/fs/ext2/acl.c
++++ b/fs/ext2/acl.c
+@@ -193,15 +193,11 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 		case ACL_TYPE_ACCESS:
+ 			name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
+ 			if (acl) {
+-				error = posix_acl_equiv_mode(acl, &inode->i_mode);
+-				if (error < 0)
++				error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++				if (error)
+ 					return error;
+-				else {
+-					inode->i_ctime = CURRENT_TIME_SEC;
+-					mark_inode_dirty(inode);
+-					if (error == 0)
+-						acl = NULL;
+-				}
++				inode->i_ctime = CURRENT_TIME_SEC;
++				mark_inode_dirty(inode);
+ 			}
+ 			break;
+ 
+diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
+index f460ae36d5b7..f08604366cb5 100644
+--- a/fs/ext2/inode.c
++++ b/fs/ext2/inode.c
+@@ -1547,7 +1547,7 @@ int ext2_setattr(struct dentry *dentry, struct iattr *iattr)
+ 	struct inode *inode = d_inode(dentry);
+ 	int error;
+ 
+-	error = inode_change_ok(inode, iattr);
++	error = setattr_prepare(dentry, iattr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
+index 2ee2dc4351d1..3613e87c688f 100644
+--- a/fs/ext3/inode.c
++++ b/fs/ext3/inode.c
+@@ -3244,7 +3244,7 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
+ 	int error, rc = 0;
+ 	const unsigned int ia_valid = attr->ia_valid;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
+index 69b1e73026a5..c3fe1e323951 100644
+--- a/fs/ext4/acl.c
++++ b/fs/ext4/acl.c
+@@ -196,15 +196,11 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
+ 	case ACL_TYPE_ACCESS:
+ 		name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
+ 		if (acl) {
+-			error = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (error < 0)
++			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (error)
+ 				return error;
+-			else {
+-				inode->i_ctime = ext4_current_time(inode);
+-				ext4_mark_inode_dirty(handle, inode);
+-				if (error == 0)
+-					acl = NULL;
+-			}
++			inode->i_ctime = ext4_current_time(inode);
++			ext4_mark_inode_dirty(handle, inode);
+ 		}
+ 		break;
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 15213a567301..145d6ba4117d 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1008,6 +1008,7 @@ struct ext4_inode_info {
+ /*
+  * Mount flags set via mount options or defaults
+  */
++#define EXT4_MOUNT_NO_MBCACHE		0x00001 /* Disable mbcache */
+ #define EXT4_MOUNT_GRPID		0x00004	/* Create files with directory's group */
+ #define EXT4_MOUNT_DEBUG		0x00008	/* Some debugging messages */
+ #define EXT4_MOUNT_ERRORS_CONT		0x00010	/* Continue on errors */
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 9b55c6f71bf2..5beca5c5413e 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -4751,7 +4751,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ 	int orphan = 0;
+ 	const unsigned int ia_valid = attr->ia_valid;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index afb3eb3e8b0f..4723d8b02747 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1144,6 +1144,7 @@ enum {
+ 	Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
+ 	Opt_inode_readahead_blks, Opt_journal_ioprio,
+ 	Opt_dioread_nolock, Opt_dioread_lock,
++	Opt_no_mbcache,
+ 	Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
+ 	Opt_max_dir_size_kb, Opt_nojournal_checksum,
+ };
+@@ -1222,6 +1223,7 @@ static const match_table_t tokens = {
+ 	{Opt_discard, "discard"},
+ 	{Opt_nodiscard, "nodiscard"},
+ 	{Opt_init_itable, "init_itable=%u"},
++	{Opt_no_mbcache, "no_mbcache"},
+ 	{Opt_init_itable, "init_itable"},
+ 	{Opt_noinit_itable, "noinit_itable"},
+ 	{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
+@@ -1385,6 +1387,7 @@ static const struct mount_opts {
+ 	{Opt_noauto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_SET},
+ 	{Opt_auto_da_alloc, EXT4_MOUNT_NO_AUTO_DA_ALLOC, MOPT_CLEAR},
+ 	{Opt_noinit_itable, EXT4_MOUNT_INIT_INODE_TABLE, MOPT_CLEAR},
++	{Opt_no_mbcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
+ 	{Opt_commit, 0, MOPT_GTE0},
+ 	{Opt_max_batch_time, 0, MOPT_GTE0},
+ 	{Opt_min_batch_time, 0, MOPT_GTE0},
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 16e28c08d1e8..cdc26e54400f 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -80,7 +80,7 @@
+ # define ea_bdebug(bh, fmt, ...)	no_printk(fmt, ##__VA_ARGS__)
+ #endif
+ 
+-static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
++static void ext4_xattr_cache_insert(struct inode *, struct buffer_head *);
+ static struct buffer_head *ext4_xattr_cache_find(struct inode *,
+ 						 struct ext4_xattr_header *,
+ 						 struct mb_cache_entry **);
+@@ -278,7 +278,6 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
+ 	struct ext4_xattr_entry *entry;
+ 	size_t size;
+ 	int error;
+-	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
+ 
+ 	ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
+ 		  name_index, name, buffer, (long)buffer_size);
+@@ -300,7 +299,7 @@ bad_block:
+ 		error = -EIO;
+ 		goto cleanup;
+ 	}
+-	ext4_xattr_cache_insert(ext4_mb_cache, bh);
++	ext4_xattr_cache_insert(inode, bh);
+ 	entry = BFIRST(bh);
+ 	error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
+ 	if (error == -EIO)
+@@ -426,7 +425,6 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ 	struct inode *inode = d_inode(dentry);
+ 	struct buffer_head *bh = NULL;
+ 	int error;
+-	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
+ 
+ 	ea_idebug(inode, "buffer=%p, buffer_size=%ld",
+ 		  buffer, (long)buffer_size);
+@@ -448,7 +446,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+ 		error = -EIO;
+ 		goto cleanup;
+ 	}
+-	ext4_xattr_cache_insert(ext4_mb_cache, bh);
++	ext4_xattr_cache_insert(inode, bh);
+ 	error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
+ 
+ cleanup:
+@@ -547,7 +545,8 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
+ 	int error = 0;
+ 	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
+ 
+-	ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
++	if (!test_opt(inode->i_sb, NO_MBCACHE))
++		ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
+ 	BUFFER_TRACE(bh, "get_write_access");
+ 	error = ext4_journal_get_write_access(handle, bh);
+ 	if (error)
+@@ -788,8 +787,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 	if (i->value && i->value_len > sb->s_blocksize)
+ 		return -ENOSPC;
+ 	if (s->base) {
+-		ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
+-					bs->bh->b_blocknr);
++		if (!test_opt(inode->i_sb, NO_MBCACHE))
++			ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
++						bs->bh->b_blocknr);
+ 		BUFFER_TRACE(bs->bh, "get_write_access");
+ 		error = ext4_journal_get_write_access(handle, bs->bh);
+ 		if (error)
+@@ -807,7 +807,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
+ 				if (!IS_LAST_ENTRY(s->first))
+ 					ext4_xattr_rehash(header(s->base),
+ 							  s->here);
+-				ext4_xattr_cache_insert(ext4_mb_cache,
++				ext4_xattr_cache_insert(inode,
+ 					bs->bh);
+ 			}
+ 			unlock_buffer(bs->bh);
+@@ -892,7 +892,8 @@ inserted:
+ 				if (error)
+ 					goto cleanup_dquot;
+ 			}
+-			mb_cache_entry_release(ce);
++			if (ce)
++				mb_cache_entry_release(ce);
+ 			ce = NULL;
+ 		} else if (bs->bh && s->base == bs->bh->b_data) {
+ 			/* We were modifying this block in-place. */
+@@ -939,7 +940,7 @@ getblk_failed:
+ 			memcpy(new_bh->b_data, s->base, new_bh->b_size);
+ 			set_buffer_uptodate(new_bh);
+ 			unlock_buffer(new_bh);
+-			ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
++			ext4_xattr_cache_insert(inode, new_bh);
+ 			error = ext4_handle_dirty_xattr_block(handle,
+ 							      inode, new_bh);
+ 			if (error)
+@@ -1529,12 +1530,17 @@ ext4_xattr_put_super(struct super_block *sb)
+  * Returns 0, or a negative error number on failure.
+  */
+ static void
+-ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
++ext4_xattr_cache_insert(struct inode *inode, struct buffer_head *bh)
+ {
++	struct super_block *sb = inode->i_sb;
++	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
+ 	__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
+ 	struct mb_cache_entry *ce;
+ 	int error;
+ 
++	if (test_opt(sb, NO_MBCACHE))
++		return;
++
+ 	ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
+ 	if (!ce) {
+ 		ea_bdebug(bh, "out of memory");
+@@ -1609,6 +1615,8 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
+ 	struct mb_cache_entry *ce;
+ 	struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
+ 
++	if (test_opt(inode->i_sb, NO_MBCACHE))
++		return NULL;
+ 	if (!header->h_hash)
+ 		return NULL;  /* never share */
+ 	ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
+diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
+index 4320ffab3495..c5e4a1856a0f 100644
+--- a/fs/f2fs/acl.c
++++ b/fs/f2fs/acl.c
+@@ -214,12 +214,10 @@ static int __f2fs_set_acl(struct inode *inode, int type,
+ 	case ACL_TYPE_ACCESS:
+ 		name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
+ 		if (acl) {
+-			error = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (error < 0)
++			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (error)
+ 				return error;
+ 			set_acl_inode(fi, inode->i_mode);
+-			if (error == 0)
+-				acl = NULL;
+ 		}
+ 		break;
+ 
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 2b52e48d7482..85e40c0fdcc4 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -617,7 +617,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct f2fs_inode_info *fi = F2FS_I(inode);
+ 	int err;
+ 
+-	err = inode_change_ok(inode, attr);
++	err = setattr_prepare(dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/fs/fat/file.c b/fs/fat/file.c
+index 442d50a0e33e..5d37650483c6 100644
+--- a/fs/fat/file.c
++++ b/fs/fat/file.c
+@@ -388,7 +388,7 @@ int fat_setattr(struct dentry *dentry, struct iattr *attr)
+ 			attr->ia_valid &= ~TIMES_SET_FLAGS;
+ 	}
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	attr->ia_valid = ia_valid;
+ 	if (error) {
+ 		if (sbi->options.quiet)
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 0572bca49f15..88b09a33d117 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1602,9 +1602,10 @@ int fuse_flush_times(struct inode *inode, struct fuse_file *ff)
+  * vmtruncate() doesn't allow for this case, so do the rlimit checking
+  * and the actual truncation by hand.
+  */
+-int fuse_do_setattr(struct inode *inode, struct iattr *attr,
++int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
+ 		    struct file *file)
+ {
++	struct inode *inode = d_inode(dentry);
+ 	struct fuse_conn *fc = get_fuse_conn(inode);
+ 	struct fuse_inode *fi = get_fuse_inode(inode);
+ 	FUSE_ARGS(args);
+@@ -1619,7 +1620,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
+ 	if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
+ 		attr->ia_valid |= ATTR_FORCE;
+ 
+-	err = inode_change_ok(inode, attr);
++	err = setattr_prepare(dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+@@ -1718,9 +1719,9 @@ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
+ 		return -EACCES;
+ 
+ 	if (attr->ia_valid & ATTR_FILE)
+-		return fuse_do_setattr(inode, attr, attr->ia_file);
++		return fuse_do_setattr(entry, attr, attr->ia_file);
+ 	else
+-		return fuse_do_setattr(inode, attr, NULL);
++		return fuse_do_setattr(entry, attr, NULL);
+ }
+ 
+ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index d8f29ef2d819..1f03f0a36e35 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -2797,7 +2797,7 @@ static void fuse_do_truncate(struct file *file)
+ 	attr.ia_file = file;
+ 	attr.ia_valid |= ATTR_FILE;
+ 
+-	fuse_do_setattr(inode, &attr, file);
++	fuse_do_setattr(file->f_path.dentry, &attr, file);
+ }
+ 
+ static inline loff_t fuse_round_up(loff_t off)
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index 85f9d8273455..30d2bde45f68 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -913,7 +913,7 @@ bool fuse_write_update_size(struct inode *inode, loff_t pos);
+ int fuse_flush_times(struct inode *inode, struct fuse_file *ff);
+ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
+ 
+-int fuse_do_setattr(struct inode *inode, struct iattr *attr,
++int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
+ 		    struct file *file);
+ 
+ void fuse_set_initialized(struct fuse_conn *fc);
+diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
+index 1be3b061c05c..ff0ac96a8e7b 100644
+--- a/fs/gfs2/acl.c
++++ b/fs/gfs2/acl.c
+@@ -79,17 +79,11 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	if (type == ACL_TYPE_ACCESS) {
+ 		umode_t mode = inode->i_mode;
+ 
+-		error = posix_acl_equiv_mode(acl, &mode);
+-		if (error < 0)
++		error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++		if (error)
+ 			return error;
+-
+-		if (error == 0)
+-			acl = NULL;
+-
+-		if (mode != inode->i_mode) {
+-			inode->i_mode = mode;
++		if (mode != inode->i_mode)
+ 			mark_inode_dirty(inode);
+-		}
+ 	}
+ 
+ 	if (acl) {
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index 1b3ca7a2e3fc..6f7f848a3c4e 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -1759,7 +1759,7 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ 		goto out;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		goto out;
+ 
+diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
+index b99ebddb10cb..6409b8b4afd4 100644
+--- a/fs/hfs/inode.c
++++ b/fs/hfs/inode.c
+@@ -604,7 +604,7 @@ int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
+ 	struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
+ 	int error;
+ 
+-	error = inode_change_ok(inode, attr); /* basic permission checks */
++	error = setattr_prepare(dentry, attr); /* basic permission checks */
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
+index 6dd107d7421e..d87c8a27e063 100644
+--- a/fs/hfsplus/inode.c
++++ b/fs/hfsplus/inode.c
+@@ -246,7 +246,7 @@ static int hfsplus_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct inode *inode = d_inode(dentry);
+ 	int error;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c
+index df0c9af68d05..71b3087b7e32 100644
+--- a/fs/hfsplus/posix_acl.c
++++ b/fs/hfsplus/posix_acl.c
+@@ -68,8 +68,8 @@ int hfsplus_set_posix_acl(struct inode *inode, struct posix_acl *acl,
+ 	case ACL_TYPE_ACCESS:
+ 		xattr_name = POSIX_ACL_XATTR_ACCESS;
+ 		if (acl) {
+-			err = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (err < 0)
++			err = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (err)
+ 				return err;
+ 		}
+ 		err = 0;
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index f895a85d9304..81ce4e4ad0f9 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -812,7 +812,7 @@ static int hostfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 
+ 	int fd = HOSTFS_I(inode)->fd;
+ 
+-	err = inode_change_ok(inode, attr);
++	err = setattr_prepare(dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
+index 933c73780813..efbed9520fdc 100644
+--- a/fs/hpfs/inode.c
++++ b/fs/hpfs/inode.c
+@@ -272,7 +272,7 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
+ 		goto out_unlock;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		goto out_unlock;
+ 
+diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
+index 87724c1d7be6..a533d8c66489 100644
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -400,7 +400,7 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 
+ 	BUG_ON(!inode);
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
+index 2f7a3c090489..f9f86f87d32b 100644
+--- a/fs/jffs2/acl.c
++++ b/fs/jffs2/acl.c
+@@ -235,9 +235,10 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 	case ACL_TYPE_ACCESS:
+ 		xprefix = JFFS2_XPREFIX_ACL_ACCESS;
+ 		if (acl) {
+-			umode_t mode = inode->i_mode;
+-			rc = posix_acl_equiv_mode(acl, &mode);
+-			if (rc < 0)
++			umode_t mode;
++
++			rc = posix_acl_update_mode(inode, &mode, &acl);
++			if (rc)
+ 				return rc;
+ 			if (inode->i_mode != mode) {
+ 				struct iattr attr;
+@@ -249,8 +250,6 @@ int jffs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 				if (rc < 0)
+ 					return rc;
+ 			}
+-			if (rc == 0)
+-				acl = NULL;
+ 		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
+index fe5ea080b4ec..6273abad377f 100644
+--- a/fs/jffs2/fs.c
++++ b/fs/jffs2/fs.c
+@@ -193,7 +193,7 @@ int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
+ 	struct inode *inode = d_inode(dentry);
+ 	int rc;
+ 
+-	rc = inode_change_ok(inode, iattr);
++	rc = setattr_prepare(dentry, iattr);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
+index 0c8ca830b113..9fad9f4fe883 100644
+--- a/fs/jfs/acl.c
++++ b/fs/jfs/acl.c
+@@ -84,13 +84,11 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
+ 	case ACL_TYPE_ACCESS:
+ 		ea_name = POSIX_ACL_XATTR_ACCESS;
+ 		if (acl) {
+-			rc = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (rc < 0)
++			rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (rc)
+ 				return rc;
+ 			inode->i_ctime = CURRENT_TIME;
+ 			mark_inode_dirty(inode);
+-			if (rc == 0)
+-				acl = NULL;
+ 		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+diff --git a/fs/jfs/file.c b/fs/jfs/file.c
+index e98d39d75cf4..66d6362a9007 100644
+--- a/fs/jfs/file.c
++++ b/fs/jfs/file.c
+@@ -103,7 +103,7 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr)
+ 	struct inode *inode = d_inode(dentry);
+ 	int rc;
+ 
+-	rc = inode_change_ok(inode, iattr);
++	rc = setattr_prepare(dentry, iattr);
+ 	if (rc)
+ 		return rc;
+ 
+diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
+index 756dd56aaf60..a17c850a4958 100644
+--- a/fs/kernfs/inode.c
++++ b/fs/kernfs/inode.c
+@@ -119,7 +119,7 @@ int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr)
+ 		return -EINVAL;
+ 
+ 	mutex_lock(&kernfs_mutex);
+-	error = inode_change_ok(inode, iattr);
++	error = setattr_prepare(dentry, iattr);
+ 	if (error)
+ 		goto out;
+ 
+diff --git a/fs/libfs.c b/fs/libfs.c
+index f4641fd27bda..50edbdc23cef 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -371,7 +371,7 @@ int simple_setattr(struct dentry *dentry, struct iattr *iattr)
+ 	struct inode *inode = d_inode(dentry);
+ 	int error;
+ 
+-	error = inode_change_ok(inode, iattr);
++	error = setattr_prepare(dentry, iattr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/logfs/file.c b/fs/logfs/file.c
+index 1a6f0167b16a..3abe1414c3f4 100644
+--- a/fs/logfs/file.c
++++ b/fs/logfs/file.c
+@@ -244,7 +244,7 @@ static int logfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct inode *inode = d_inode(dentry);
+ 	int err = 0;
+ 
+-	err = inode_change_ok(inode, attr);
++	err = setattr_prepare(dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/fs/minix/file.c b/fs/minix/file.c
+index 94f0eb9a6e2c..a6a4797aa0d4 100644
+--- a/fs/minix/file.c
++++ b/fs/minix/file.c
+@@ -26,7 +26,7 @@ static int minix_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct inode *inode = d_inode(dentry);
+ 	int error;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/mount.h b/fs/mount.h
+index 6a61c2b3e385..2152c16ddf74 100644
+--- a/fs/mount.h
++++ b/fs/mount.h
+@@ -13,6 +13,8 @@ struct mnt_namespace {
+ 	u64			seq;	/* Sequence number to prevent loops */
+ 	wait_queue_head_t poll;
+ 	u64 event;
++	unsigned int		mounts; /* # of mounts in the namespace */
++	unsigned int		pending_mounts;
+ };
+ 
+ struct mnt_pcp {
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 556721fb0cf6..f853aaf92ec9 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -27,6 +27,9 @@
+ #include "pnode.h"
+ #include "internal.h"
+ 
++/* Maximum number of mounts in a mount namespace */
++unsigned int sysctl_mount_max __read_mostly = 100000;
++
+ static unsigned int m_hash_mask __read_mostly;
+ static unsigned int m_hash_shift __read_mostly;
+ static unsigned int mp_hash_mask __read_mostly;
+@@ -888,6 +891,9 @@ static void commit_tree(struct mount *mnt, struct mount *shadows)
+ 
+ 	list_splice(&head, n->list.prev);
+ 
++	n->mounts += n->pending_mounts;
++	n->pending_mounts = 0;
++
+ 	attach_shadowed(mnt, parent, shadows);
+ 	touch_mnt_namespace(n);
+ }
+@@ -1408,11 +1414,16 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
+ 		propagate_umount(&tmp_list);
+ 
+ 	while (!list_empty(&tmp_list)) {
++		struct mnt_namespace *ns;
+ 		bool disconnect;
+ 		p = list_first_entry(&tmp_list, struct mount, mnt_list);
+ 		list_del_init(&p->mnt_expire);
+ 		list_del_init(&p->mnt_list);
+-		__touch_mnt_namespace(p->mnt_ns);
++		ns = p->mnt_ns;
++		if (ns) {
++			ns->mounts--;
++			__touch_mnt_namespace(ns);
++		}
+ 		p->mnt_ns = NULL;
+ 		if (how & UMOUNT_SYNC)
+ 			p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
+@@ -1821,6 +1832,28 @@ static int invent_group_ids(struct mount *mnt, bool recurse)
+ 	return 0;
+ }
+ 
++int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
++{
++	unsigned int max = READ_ONCE(sysctl_mount_max);
++	unsigned int mounts = 0, old, pending, sum;
++	struct mount *p;
++
++	for (p = mnt; p; p = next_mnt(p, mnt))
++		mounts++;
++
++	old = ns->mounts;
++	pending = ns->pending_mounts;
++	sum = old + pending;
++	if ((old > sum) ||
++	    (pending > sum) ||
++	    (max < sum) ||
++	    (mounts > (max - sum)))
++		return -ENOSPC;
++
++	ns->pending_mounts = pending + mounts;
++	return 0;
++}
++
+ /*
+  *  @source_mnt : mount tree to be attached
+  *  @nd         : place the mount tree @source_mnt is attached
+@@ -1890,10 +1923,18 @@ static int attach_recursive_mnt(struct mount *source_mnt,
+ 			struct path *parent_path)
+ {
+ 	HLIST_HEAD(tree_list);
++	struct mnt_namespace *ns = dest_mnt->mnt_ns;
+ 	struct mount *child, *p;
+ 	struct hlist_node *n;
+ 	int err;
+ 
++	/* Is there space to add these mounts to the mount namespace? */
++	if (!parent_path) {
++		err = count_mounts(ns, source_mnt);
++		if (err)
++			goto out;
++	}
++
+ 	if (IS_MNT_SHARED(dest_mnt)) {
+ 		err = invent_group_ids(source_mnt, true);
+ 		if (err)
+@@ -1930,11 +1971,13 @@ static int attach_recursive_mnt(struct mount *source_mnt,
+  out_cleanup_ids:
+ 	while (!hlist_empty(&tree_list)) {
+ 		child = hlist_entry(tree_list.first, struct mount, mnt_hash);
++		child->mnt_parent->mnt_ns->pending_mounts = 0;
+ 		umount_tree(child, UMOUNT_SYNC);
+ 	}
+ 	unlock_mount_hash();
+ 	cleanup_group_ids(source_mnt, NULL);
+  out:
++	ns->pending_mounts = 0;
+ 	return err;
+ }
+ 
+@@ -2758,6 +2801,8 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
+ 	init_waitqueue_head(&new_ns->poll);
+ 	new_ns->event = 0;
+ 	new_ns->user_ns = get_user_ns(user_ns);
++	new_ns->mounts = 0;
++	new_ns->pending_mounts = 0;
+ 	return new_ns;
+ }
+ 
+@@ -2807,6 +2852,7 @@ struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns,
+ 	q = new;
+ 	while (p) {
+ 		q->mnt_ns = new_ns;
++		new_ns->mounts++;
+ 		if (new_fs) {
+ 			if (&p->mnt == new_fs->root.mnt) {
+ 				new_fs->root.mnt = mntget(&q->mnt);
+@@ -2845,6 +2891,7 @@ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
+ 		struct mount *mnt = real_mount(m);
+ 		mnt->mnt_ns = new_ns;
+ 		new_ns->root = mnt;
++		new_ns->mounts++;
+ 		list_add(&mnt->mnt_list, &new_ns->list);
+ 	} else {
+ 		mntput(m);
+diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
+index 9605a2f63549..7b1261bc2dee 100644
+--- a/fs/ncpfs/inode.c
++++ b/fs/ncpfs/inode.c
+@@ -884,7 +884,7 @@ int ncp_notify_change(struct dentry *dentry, struct iattr *attr)
+ 	/* ageing the dentry to force validation */
+ 	ncp_age_dentry(server, dentry);
+ 
+-	result = inode_change_ok(inode, attr);
++	result = setattr_prepare(dentry, attr);
+ 	if (result < 0)
+ 		goto out;
+ 
+diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
+index aecbcd34d336..44f6f4f5eee0 100644
+--- a/fs/nfsd/nfsproc.c
++++ b/fs/nfsd/nfsproc.c
+@@ -59,13 +59,59 @@ static __be32
+ nfsd_proc_setattr(struct svc_rqst *rqstp, struct nfsd_sattrargs *argp,
+ 					  struct nfsd_attrstat  *resp)
+ {
++	struct iattr *iap = &argp->attrs;
++	struct svc_fh *fhp;
+ 	__be32 nfserr;
++
+ 	dprintk("nfsd: SETATTR  %s, valid=%x, size=%ld\n",
+ 		SVCFH_fmt(&argp->fh),
+ 		argp->attrs.ia_valid, (long) argp->attrs.ia_size);
+ 
+-	fh_copy(&resp->fh, &argp->fh);
+-	nfserr = nfsd_setattr(rqstp, &resp->fh, &argp->attrs,0, (time_t)0);
++	fhp = fh_copy(&resp->fh, &argp->fh);
++
++	/*
++	 * NFSv2 does not differentiate between "set-[ac]time-to-now"
++	 * which only requires access, and "set-[ac]time-to-X" which
++	 * requires ownership.
++	 * So if it looks like it might be "set both to the same time which
++	 * is close to now", and if setattr_prepare fails, then we
++	 * convert to "set to now" instead of "set to explicit time"
++	 *
++	 * We only call setattr_prepare as the last test as technically
++	 * it is not an interface that we should be using.
++	 */
++#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
++#define	MAX_TOUCH_TIME_ERROR (30*60)
++	if ((iap->ia_valid & BOTH_TIME_SET) == BOTH_TIME_SET &&
++	    iap->ia_mtime.tv_sec == iap->ia_atime.tv_sec) {
++		/*
++		 * Looks probable.
++		 *
++		 * Now just make sure time is in the right ballpark.
++		 * Solaris, at least, doesn't seem to care what the time
++		 * request is.  We require it be within 30 minutes of now.
++		 */
++		time_t delta = iap->ia_atime.tv_sec - get_seconds();
++
++		nfserr = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
++		if (nfserr)
++			goto done;
++
++		if (delta < 0)
++			delta = -delta;
++		if (delta < MAX_TOUCH_TIME_ERROR &&
++		    setattr_prepare(fhp->fh_dentry, iap) != 0) {
++			/*
++			 * Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
++			 * This will cause notify_change to set these times
++			 * to "now"
++			 */
++			iap->ia_valid &= ~BOTH_TIME_SET;
++		}
++	}
++
++	nfserr = nfsd_setattr(rqstp, fhp, iap, 0, (time_t)0);
++done:
+ 	return nfsd_return_attrs(nfserr, resp);
+ }
+ 
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 84d770be056e..92de3747ea8b 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -302,42 +302,6 @@ commit_metadata(struct svc_fh *fhp)
+ static void
+ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
+ {
+-	/*
+-	 * NFSv2 does not differentiate between "set-[ac]time-to-now"
+-	 * which only requires access, and "set-[ac]time-to-X" which
+-	 * requires ownership.
+-	 * So if it looks like it might be "set both to the same time which
+-	 * is close to now", and if inode_change_ok fails, then we
+-	 * convert to "set to now" instead of "set to explicit time"
+-	 *
+-	 * We only call inode_change_ok as the last test as technically
+-	 * it is not an interface that we should be using.
+-	 */
+-#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
+-#define	MAX_TOUCH_TIME_ERROR (30*60)
+-	if ((iap->ia_valid & BOTH_TIME_SET) == BOTH_TIME_SET &&
+-	    iap->ia_mtime.tv_sec == iap->ia_atime.tv_sec) {
+-		/*
+-		 * Looks probable.
+-		 *
+-		 * Now just make sure time is in the right ballpark.
+-		 * Solaris, at least, doesn't seem to care what the time
+-		 * request is.  We require it be within 30 minutes of now.
+-		 */
+-		time_t delta = iap->ia_atime.tv_sec - get_seconds();
+-		if (delta < 0)
+-			delta = -delta;
+-		if (delta < MAX_TOUCH_TIME_ERROR &&
+-		    inode_change_ok(inode, iap) != 0) {
+-			/*
+-			 * Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
+-			 * This will cause notify_change to set these times
+-			 * to "now"
+-			 */
+-			iap->ia_valid &= ~BOTH_TIME_SET;
+-		}
+-	}
+-
+ 	/* sanitize the mode change */
+ 	if (iap->ia_valid & ATTR_MODE) {
+ 		iap->ia_mode &= S_IALLUGO;
+diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
+index 258d9fe2521a..b40df2bb5ee4 100644
+--- a/fs/nilfs2/inode.c
++++ b/fs/nilfs2/inode.c
+@@ -839,7 +839,7 @@ int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
+ 	struct super_block *sb = inode->i_sb;
+ 	int err;
+ 
+-	err = inode_change_ok(inode, iattr);
++	err = setattr_prepare(dentry, iattr);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
+index d284f07eda77..c178763893f3 100644
+--- a/fs/ntfs/inode.c
++++ b/fs/ntfs/inode.c
+@@ -2893,7 +2893,7 @@ int ntfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	int err;
+ 	unsigned int ia_valid = attr->ia_valid;
+ 
+-	err = inode_change_ok(vi, attr);
++	err = setattr_prepare(dentry, attr);
+ 	if (err)
+ 		goto out;
+ 	/* We do not support NTFS ACLs yet. */
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index 762e5a3aecd3..c7641f656494 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -241,13 +241,11 @@ int ocfs2_set_acl(handle_t *handle,
+ 	case ACL_TYPE_ACCESS:
+ 		name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS;
+ 		if (acl) {
+-			umode_t mode = inode->i_mode;
+-			ret = posix_acl_equiv_mode(acl, &mode);
+-			if (ret < 0)
+-				return ret;
++			umode_t mode;
+ 
+-			if (ret == 0)
+-				acl = NULL;
++			ret = posix_acl_update_mode(inode, &mode, &acl);
++			if (ret)
++				return ret;
+ 
+ 			ret = ocfs2_acl_set_mode(inode, di_bh,
+ 						 handle, mode);
+diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
+index b5cf27dcb18a..43ac2289c613 100644
+--- a/fs/ocfs2/dlmfs/dlmfs.c
++++ b/fs/ocfs2/dlmfs/dlmfs.c
+@@ -211,7 +211,7 @@ static int dlmfs_file_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct inode *inode = d_inode(dentry);
+ 
+ 	attr->ia_valid &= ~ATTR_SIZE;
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index e00be7f509db..bc06b982e9ea 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1150,7 +1150,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
+ 		return 0;
+ 
+-	status = inode_change_ok(inode, attr);
++	status = setattr_prepare(dentry, attr);
+ 	if (status)
+ 		return status;
+ 
+diff --git a/fs/omfs/file.c b/fs/omfs/file.c
+index d9e26cfbb793..bf83e6644333 100644
+--- a/fs/omfs/file.c
++++ b/fs/omfs/file.c
+@@ -349,7 +349,7 @@ static int omfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct inode *inode = d_inode(dentry);
+ 	int error;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 0bb8347c0d8b..d293034ae2cb 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -54,7 +54,7 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
+ 	 * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
+ 	 * check for a swapfile (which this won't be anyway).
+ 	 */
+-	err = inode_change_ok(dentry->d_inode, attr);
++	err = setattr_prepare(dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/fs/pnode.c b/fs/pnode.c
+index 99899705b105..234a9ac49958 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -259,7 +259,7 @@ static int propagate_one(struct mount *m)
+ 		read_sequnlock_excl(&mount_lock);
+ 	}
+ 	hlist_add_head(&child->mnt_hash, list);
+-	return 0;
++	return count_mounts(m->mnt_ns, child);
+ }
+ 
+ /*
+diff --git a/fs/pnode.h b/fs/pnode.h
+index 0fcdbe7ca648..550f5a8b4fcf 100644
+--- a/fs/pnode.h
++++ b/fs/pnode.h
+@@ -52,4 +52,5 @@ void mnt_set_mountpoint(struct mount *, struct mountpoint *,
+ struct mount *copy_tree(struct mount *, struct dentry *, int);
+ bool is_path_reachable(struct mount *, struct dentry *,
+ 			 const struct path *root);
++int count_mounts(struct mnt_namespace *ns, struct mount *mnt);
+ #endif /* _LINUX_PNODE_H */
+diff --git a/fs/posix_acl.c b/fs/posix_acl.c
+index a9dafa83678c..0ef1c3722504 100644
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -598,6 +598,37 @@ no_mem:
+ }
+ EXPORT_SYMBOL_GPL(posix_acl_create);
+ 
++/**
++ * posix_acl_update_mode  -  update mode in set_acl
++ *
++ * Update the file mode when setting an ACL: compute the new file permission
++ * bits based on the ACL.  In addition, if the ACL is equivalent to the new
++ * file mode, set *acl to NULL to indicate that no ACL should be set.
++ *
++ * As with chmod, clear the setgit bit if the caller is not in the owning group
++ * or capable of CAP_FSETID (see inode_change_ok).
++ *
++ * Called from set_acl inode operations.
++ */
++int posix_acl_update_mode(struct inode *inode, umode_t *mode_p,
++			  struct posix_acl **acl)
++{
++	umode_t mode = inode->i_mode;
++	int error;
++
++	error = posix_acl_equiv_mode(*acl, &mode);
++	if (error < 0)
++		return error;
++	if (error == 0)
++		*acl = NULL;
++	if (!in_group_p(inode->i_gid) &&
++	    !capable_wrt_inode_uidgid(inode, CAP_FSETID))
++		mode &= ~S_ISGID;
++	*mode_p = mode;
++	return 0;
++}
++EXPORT_SYMBOL(posix_acl_update_mode);
++
+ /*
+  * Fix up the uids and gids in posix acl extended attributes in place.
+  */
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 239dca3fb676..fab32ad5d96d 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -517,7 +517,7 @@ int proc_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (attr->ia_valid & ATTR_MODE)
+ 		return -EPERM;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index e5dee5c3188e..d99099fe62d4 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -105,7 +105,7 @@ static int proc_notify_change(struct dentry *dentry, struct iattr *iattr)
+ 	struct proc_dir_entry *de = PDE(inode);
+ 	int error;
+ 
+-	error = inode_change_ok(inode, iattr);
++	error = setattr_prepare(dentry, iattr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index fdda62e6115e..0dea606074c7 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -753,7 +753,7 @@ static int proc_sys_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (attr->ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
+ 		return -EPERM;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
+index ba1323a94924..aab2593f3179 100644
+--- a/fs/ramfs/file-nommu.c
++++ b/fs/ramfs/file-nommu.c
+@@ -168,7 +168,7 @@ static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia)
+ 	int ret = 0;
+ 
+ 	/* POSIX UID/GID verification for setting inode attributes */
+-	ret = inode_change_ok(inode, ia);
++	ret = setattr_prepare(dentry, ia);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
+index f6f2fbad9777..7da1232a78e3 100644
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -3312,7 +3312,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	unsigned int ia_valid;
+ 	int error;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
+index 4b34b9dc03dd..9b1824f35501 100644
+--- a/fs/reiserfs/xattr_acl.c
++++ b/fs/reiserfs/xattr_acl.c
+@@ -246,13 +246,9 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
+ 	case ACL_TYPE_ACCESS:
+ 		name = POSIX_ACL_XATTR_ACCESS;
+ 		if (acl) {
+-			error = posix_acl_equiv_mode(acl, &inode->i_mode);
+-			if (error < 0)
++			error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
++			if (error)
+ 				return error;
+-			else {
+-				if (error == 0)
+-					acl = NULL;
+-			}
+ 		}
+ 		break;
+ 	case ACL_TYPE_DEFAULT:
+diff --git a/fs/sysv/file.c b/fs/sysv/file.c
+index 82ddc09061e2..7ba997e31aeb 100644
+--- a/fs/sysv/file.c
++++ b/fs/sysv/file.c
+@@ -33,7 +33,7 @@ static int sysv_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct inode *inode = d_inode(dentry);
+ 	int error;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index 75e9b2db14ab..2dc8ce485c51 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -1263,7 +1263,7 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
+ 
+ 	dbg_gen("ino %lu, mode %#x, ia_valid %#x",
+ 		inode->i_ino, inode->i_mode, attr->ia_valid);
+-	err = inode_change_ok(inode, attr);
++	err = setattr_prepare(dentry, attr);
+ 	if (err)
+ 		return err;
+ 
+diff --git a/fs/udf/file.c b/fs/udf/file.c
+index 7a95b8fed302..889f1e5da507 100644
+--- a/fs/udf/file.c
++++ b/fs/udf/file.c
+@@ -252,7 +252,7 @@ static int udf_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct inode *inode = d_inode(dentry);
+ 	int error;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c
+index 21154704c168..a958b36f40bb 100644
+--- a/fs/ufs/truncate.c
++++ b/fs/ufs/truncate.c
+@@ -496,7 +496,7 @@ int ufs_setattr(struct dentry *dentry, struct iattr *attr)
+ 	unsigned int ia_valid = attr->ia_valid;
+ 	int error;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/fs/utimes.c b/fs/utimes.c
+index aa138d64560a..61abc3051377 100644
+--- a/fs/utimes.c
++++ b/fs/utimes.c
+@@ -81,7 +81,7 @@ static int utimes_common(struct path *path, struct timespec *times)
+ 			newattrs.ia_valid |= ATTR_MTIME_SET;
+ 		}
+ 		/*
+-		 * Tell inode_change_ok(), that this is an explicit time
++		 * Tell setattr_prepare(), that this is an explicit time
+ 		 * update, even if neither ATTR_ATIME_SET nor ATTR_MTIME_SET
+ 		 * were used.
+ 		 */
+@@ -90,7 +90,7 @@ static int utimes_common(struct path *path, struct timespec *times)
+ 		/*
+ 		 * If times is NULL (or both times are UTIME_NOW),
+ 		 * then we need to check permissions, because
+-		 * inode_change_ok() won't do it.
++		 * setattr_prepare() won't do it.
+ 		 */
+ 		error = -EACCES;
+                 if (IS_IMMUTABLE(inode))
+diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
+index 4b641676f258..e80dbfa2a7b9 100644
+--- a/fs/xfs/xfs_acl.c
++++ b/fs/xfs/xfs_acl.c
+@@ -284,16 +284,11 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ 		return error;
+ 
+ 	if (type == ACL_TYPE_ACCESS) {
+-		umode_t mode = inode->i_mode;
+-		error = posix_acl_equiv_mode(acl, &mode);
+-
+-		if (error <= 0) {
+-			acl = NULL;
+-
+-			if (error < 0)
+-				return error;
+-		}
++		umode_t mode;
+ 
++		error = posix_acl_update_mode(inode, &mode, &acl);
++		if (error)
++			return error;
+ 		error = xfs_set_mode(inode, mode);
+ 		if (error)
+ 			return error;
+diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
+index 3b7591224f4a..550f8c4733ee 100644
+--- a/fs/xfs/xfs_file.c
++++ b/fs/xfs/xfs_file.c
+@@ -973,7 +973,7 @@ xfs_file_fallocate(
+ 
+ 		iattr.ia_valid = ATTR_SIZE;
+ 		iattr.ia_size = new_size;
+-		error = xfs_setattr_size(ip, &iattr);
++		error = xfs_vn_setattr_size(file->f_path.dentry, &iattr);
+ 		if (error)
+ 			goto out_unlock;
+ 	}
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index c29f34253e2b..6b67d617c092 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -1766,7 +1766,7 @@ xfs_inactive_truncate(
+ 	/*
+ 	 * Log the inode size first to prevent stale data exposure in the event
+ 	 * of a system crash before the truncate completes. See the related
+-	 * comment in xfs_setattr_size() for details.
++	 * comment in xfs_vn_setattr_size() for details.
+ 	 */
+ 	ip->i_d.di_size = 0;
+ 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
+index 87f67c6b654c..82e49109d0b6 100644
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -720,7 +720,7 @@ xfs_ioc_space(
+ 		iattr.ia_valid = ATTR_SIZE;
+ 		iattr.ia_size = bf->l_start;
+ 
+-		error = xfs_setattr_size(ip, &iattr);
++		error = xfs_vn_setattr_size(filp->f_path.dentry, &iattr);
+ 		break;
+ 	default:
+ 		ASSERT(0);
+diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
+index f4cd7204e236..4e4d6511185b 100644
+--- a/fs/xfs/xfs_iops.c
++++ b/fs/xfs/xfs_iops.c
+@@ -537,6 +537,30 @@ xfs_setattr_time(
+ 	}
+ }
+ 
++static int
++xfs_vn_change_ok(
++	struct dentry	*dentry,
++	struct iattr	*iattr)
++{
++	struct inode		*inode = d_inode(dentry);
++	struct xfs_inode	*ip = XFS_I(inode);
++	struct xfs_mount	*mp = ip->i_mount;
++
++	if (mp->m_flags & XFS_MOUNT_RDONLY)
++		return -EROFS;
++
++	if (XFS_FORCED_SHUTDOWN(mp))
++		return -EIO;
++
++	return setattr_prepare(dentry, iattr);
++}
++
++/*
++ * Set non-size attributes of an inode.
++ *
++ * Caution: The caller of this function is responsible for calling
++ * setattr_prepare() or otherwise verifying the change is fine.
++ */
+ int
+ xfs_setattr_nonsize(
+ 	struct xfs_inode	*ip,
+@@ -553,21 +577,6 @@ xfs_setattr_nonsize(
+ 	struct xfs_dquot	*udqp = NULL, *gdqp = NULL;
+ 	struct xfs_dquot	*olddquot1 = NULL, *olddquot2 = NULL;
+ 
+-	trace_xfs_setattr(ip);
+-
+-	/* If acls are being inherited, we already have this checked */
+-	if (!(flags & XFS_ATTR_NOACL)) {
+-		if (mp->m_flags & XFS_MOUNT_RDONLY)
+-			return -EROFS;
+-
+-		if (XFS_FORCED_SHUTDOWN(mp))
+-			return -EIO;
+-
+-		error = inode_change_ok(inode, iattr);
+-		if (error)
+-			return error;
+-	}
+-
+ 	ASSERT((mask & ATTR_SIZE) == 0);
+ 
+ 	/*
+@@ -741,8 +750,27 @@ out_dqrele:
+ 	return error;
+ }
+ 
++int
++xfs_vn_setattr_nonsize(
++	struct dentry		*dentry,
++	struct iattr		*iattr)
++{
++	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
++	int error;
++
++	trace_xfs_setattr(ip);
++
++	error = xfs_vn_change_ok(dentry, iattr);
++	if (error)
++		return error;
++	return xfs_setattr_nonsize(ip, iattr, 0);
++}
++
+ /*
+  * Truncate file.  Must have write permission and not be a directory.
++ *
++ * Caution: The caller of this function is responsible for calling
++ * setattr_prepare() or otherwise verifying the change is fine.
+  */
+ int
+ xfs_setattr_size(
+@@ -758,18 +786,6 @@ xfs_setattr_size(
+ 	uint			commit_flags = 0;
+ 	bool			did_zeroing = false;
+ 
+-	trace_xfs_setattr(ip);
+-
+-	if (mp->m_flags & XFS_MOUNT_RDONLY)
+-		return -EROFS;
+-
+-	if (XFS_FORCED_SHUTDOWN(mp))
+-		return -EIO;
+-
+-	error = inode_change_ok(inode, iattr);
+-	if (error)
+-		return error;
+-
+ 	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+ 	ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
+ 	ASSERT(S_ISREG(ip->i_d.di_mode));
+@@ -941,16 +957,32 @@ out_trans_cancel:
+ 	goto out_unlock;
+ }
+ 
++int
++xfs_vn_setattr_size(
++	struct dentry		*dentry,
++	struct iattr		*iattr)
++{
++	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
++	int error;
++
++	trace_xfs_setattr(ip);
++
++	error = xfs_vn_change_ok(dentry, iattr);
++	if (error)
++		return error;
++	return xfs_setattr_size(ip, iattr);
++}
++
+ STATIC int
+ xfs_vn_setattr(
+ 	struct dentry		*dentry,
+ 	struct iattr		*iattr)
+ {
+-	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
+ 	int			error;
+ 
+ 	if (iattr->ia_valid & ATTR_SIZE) {
+-		uint		iolock = XFS_IOLOCK_EXCL;
++		struct xfs_inode	*ip = XFS_I(d_inode(dentry));
++		uint			iolock = XFS_IOLOCK_EXCL;
+ 
+ 		xfs_ilock(ip, iolock);
+ 		error = xfs_break_layouts(d_inode(dentry), &iolock, true);
+@@ -958,11 +990,11 @@ xfs_vn_setattr(
+ 			xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+ 			iolock |= XFS_MMAPLOCK_EXCL;
+ 
+-			error = xfs_setattr_size(ip, iattr);
++			error = xfs_vn_setattr_size(dentry, iattr);
+ 		}
+ 		xfs_iunlock(ip, iolock);
+ 	} else {
+-		error = xfs_setattr_nonsize(ip, iattr, 0);
++		error = xfs_vn_setattr_nonsize(dentry, iattr);
+ 	}
+ 
+ 	return error;
+diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h
+index a0f84abb0d09..0259a383721a 100644
+--- a/fs/xfs/xfs_iops.h
++++ b/fs/xfs/xfs_iops.h
+@@ -33,6 +33,7 @@ extern ssize_t xfs_vn_listxattr(struct dentry *, char *data, size_t size);
+ extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr);
+ extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
+ 			       int flags);
+-extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
++extern int xfs_vn_setattr_nonsize(struct dentry *dentry, struct iattr *vap);
++extern int xfs_vn_setattr_size(struct dentry *dentry, struct iattr *vap);
+ 
+ #endif /* __XFS_IOPS_H__ */
+diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
+index 5e13b987d9e2..678e97fa3869 100644
+--- a/include/linux/can/dev.h
++++ b/include/linux/can/dev.h
+@@ -31,6 +31,7 @@ enum can_mode {
+  * CAN common private data
+  */
+ struct can_priv {
++	struct net_device *dev;
+ 	struct can_device_stats can_stats;
+ 
+ 	struct can_bittiming bittiming, data_bittiming;
+@@ -46,7 +47,7 @@ struct can_priv {
+ 	u32 ctrlmode_static;	/* static enabled options for driver/hardware */
+ 
+ 	int restart_ms;
+-	struct timer_list restart_timer;
++	struct delayed_work restart_work;
+ 
+ 	int (*do_set_bittiming)(struct net_device *dev);
+ 	int (*do_set_data_bittiming)(struct net_device *dev);
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index ae327f6a53f6..31c3d818c981 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2816,7 +2816,7 @@ extern int buffer_migrate_page(struct address_space *,
+ #define buffer_migrate_page NULL
+ #endif
+ 
+-extern int inode_change_ok(const struct inode *, struct iattr *);
++extern int setattr_prepare(struct dentry *, struct iattr *);
+ extern int inode_newsize_ok(const struct inode *, loff_t offset);
+ extern void setattr_copy(struct inode *inode, const struct iattr *attr);
+ 
+diff --git a/include/linux/mount.h b/include/linux/mount.h
+index f822c3c11377..dc6cd800cd5d 100644
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -95,4 +95,6 @@ extern void mark_mounts_for_expiry(struct list_head *mounts);
+ 
+ extern dev_t name_to_dev_t(const char *name);
+ 
++extern unsigned int sysctl_mount_max;
++
+ #endif /* _LINUX_MOUNT_H */
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 6c86c7edafa7..ddd47c3a757d 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1957,7 +1957,10 @@ struct napi_gro_cb {
+ 	/* Used in foo-over-udp, set in udp[46]_gro_receive */
+ 	u8	is_ipv6:1;
+ 
+-	/* 7 bit hole */
++	/* Number of gro_receive callbacks this packet already went through */
++	u8 recursion_counter:4;
++
++	/* 3 bit hole */
+ 
+ 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
+ 	__wsum	csum;
+@@ -1968,6 +1971,25 @@ struct napi_gro_cb {
+ 
+ #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
+ 
++#define GRO_RECURSION_LIMIT 15
++static inline int gro_recursion_inc_test(struct sk_buff *skb)
++{
++	return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
++}
++
++typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
++static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
++						struct sk_buff **head,
++						struct sk_buff *skb)
++{
++	if (gro_recursion_inc_test(skb)) {
++		NAPI_GRO_CB(skb)->flush |= 1;
++		return NULL;
++	}
++
++	return cb(head, skb);
++}
++
+ struct packet_type {
+ 	__be16			type;	/* This is really htons(ether_type). */
+ 	struct net_device	*dev;	/* NULL is wildcarded here	     */
+diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h
+index 3e96a6a76103..d1a8ad7e5ae4 100644
+--- a/include/linux/posix_acl.h
++++ b/include/linux/posix_acl.h
+@@ -95,6 +95,7 @@ extern int set_posix_acl(struct inode *, int, struct posix_acl *);
+ extern int posix_acl_chmod(struct inode *, umode_t);
+ extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **,
+ 		struct posix_acl **);
++extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **);
+ 
+ extern int simple_set_acl(struct inode *, struct posix_acl *, int);
+ extern int simple_acl_create(struct inode *, struct inode *);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 6d204f3f9df8..3d3a365233f0 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1434,6 +1434,8 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
+ {
+ 	if (sk->sk_send_head == skb_unlinked)
+ 		sk->sk_send_head = NULL;
++	if (tcp_sk(sk)->highest_sack == skb_unlinked)
++		tcp_sk(sk)->highest_sack = NULL;
+ }
+ 
+ static inline void tcp_init_send_head(struct sock *sk)
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index 388fc6f78c6f..71403502411b 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -323,8 +323,7 @@ static struct file_system_type cpuset_fs_type = {
+ /*
+  * Return in pmask the portion of a cpusets's cpus_allowed that
+  * are online.  If none are online, walk up the cpuset hierarchy
+- * until we find one that does have some online cpus.  The top
+- * cpuset always has some cpus online.
++ * until we find one that does have some online cpus.
+  *
+  * One way or another, we guarantee to return some non-empty subset
+  * of cpu_online_mask.
+@@ -333,8 +332,20 @@ static struct file_system_type cpuset_fs_type = {
+  */
+ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
+ {
+-	while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
++	while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
+ 		cs = parent_cs(cs);
++		if (unlikely(!cs)) {
++			/*
++			 * The top cpuset doesn't have any online cpu as a
++			 * consequence of a race between cpuset_hotplug_work
++			 * and cpu hotplug notifier.  But we know the top
++			 * cpuset's effective_cpus is on its way to to be
++			 * identical to cpu_online_mask.
++			 */
++			cpumask_copy(pmask, cpu_online_mask);
++			return;
++		}
++	}
+ 	cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
+ }
+ 
+diff --git a/kernel/ptrace.c b/kernel/ptrace.c
+index 261ee21e62db..9650e7aee267 100644
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -20,6 +20,7 @@
+ #include <linux/uio.h>
+ #include <linux/audit.h>
+ #include <linux/pid_namespace.h>
++#include <linux/user_namespace.h>
+ #include <linux/syscalls.h>
+ #include <linux/uaccess.h>
+ #include <linux/regset.h>
+@@ -207,12 +208,34 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
+ 	return ret;
+ }
+ 
+-static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
++static bool ptrace_has_cap(const struct cred *tcred, unsigned int mode)
+ {
++	struct user_namespace *tns = tcred->user_ns;
++
++	/* When a root-owned process enters a user namespace created by a
++	 * malicious user, the user shouldn't be able to execute code under
++	 * uid 0 by attaching to the root-owned process via ptrace.
++	 * Therefore, similar to the capable_wrt_inode_uidgid() check,
++	 * verify that all the uids and gids of the target process are
++	 * mapped into a namespace below the current one in which the caller
++	 * is capable.
++	 * No fsuid/fsgid check because __ptrace_may_access doesn't do it
++	 * either.
++	 */
++	while (
++	    !kuid_has_mapping(tns, tcred->euid) ||
++	    !kuid_has_mapping(tns, tcred->suid) ||
++	    !kuid_has_mapping(tns, tcred->uid)  ||
++	    !kgid_has_mapping(tns, tcred->egid) ||
++	    !kgid_has_mapping(tns, tcred->sgid) ||
++	    !kgid_has_mapping(tns, tcred->gid)) {
++		tns = tns->parent;
++	}
++
+ 	if (mode & PTRACE_MODE_NOAUDIT)
+-		return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
++		return has_ns_capability_noaudit(current, tns, CAP_SYS_PTRACE);
+ 	else
+-		return has_ns_capability(current, ns, CAP_SYS_PTRACE);
++		return has_ns_capability(current, tns, CAP_SYS_PTRACE);
+ }
+ 
+ /* Returns 0 on success, -errno on denial. */
+@@ -264,7 +287,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
+ 	    gid_eq(caller_gid, tcred->sgid) &&
+ 	    gid_eq(caller_gid, tcred->gid))
+ 		goto ok;
+-	if (ptrace_has_cap(tcred->user_ns, mode))
++	if (ptrace_has_cap(tcred, mode))
+ 		goto ok;
+ 	rcu_read_unlock();
+ 	return -EPERM;
+@@ -275,7 +298,7 @@ ok:
+ 		dumpable = get_dumpable(task->mm);
+ 	rcu_read_lock();
+ 	if (dumpable != SUID_DUMP_USER &&
+-	    !ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
++	    !ptrace_has_cap(__task_cred(task), mode)) {
+ 		rcu_read_unlock();
+ 		return -EPERM;
+ 	}
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 7d4900404c94..cebbff5f34fe 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -64,6 +64,7 @@
+ #include <linux/binfmts.h>
+ #include <linux/sched/sysctl.h>
+ #include <linux/kexec.h>
++#include <linux/mount.h>
+ 
+ #include <asm/uaccess.h>
+ #include <asm/processor.h>
+@@ -1709,6 +1710,14 @@ static struct ctl_table fs_table[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_doulongvec_minmax,
+ 	},
++	{
++		.procname	= "mount-max",
++		.data		= &sysctl_mount_max,
++		.maxlen		= sizeof(unsigned int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &one,
++	},
+ 	{ }
+ };
+ 
+diff --git a/mm/ksm.c b/mm/ksm.c
+index 7ee101eaacdf..22a2883eb822 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -283,7 +283,8 @@ static inline struct rmap_item *alloc_rmap_item(void)
+ {
+ 	struct rmap_item *rmap_item;
+ 
+-	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
++	rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
++						__GFP_NORETRY | __GFP_NOWARN);
+ 	if (rmap_item)
+ 		ksm_rmap_items++;
+ 	return rmap_item;
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 46511ad90bc5..feaaf6ea1b86 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -548,7 +548,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
+ 	struct shmem_inode_info *info = SHMEM_I(inode);
+ 	int error;
+ 
+-	error = inode_change_ok(inode, attr);
++	error = setattr_prepare(dentry, attr);
+ 	if (error)
+ 		return error;
+ 
+diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
+index 825e8fb5114b..f9e9a8148a43 100644
+--- a/net/bluetooth/rfcomm/sock.c
++++ b/net/bluetooth/rfcomm/sock.c
+@@ -334,16 +334,19 @@ static int rfcomm_sock_create(struct net *net, struct socket *sock,
+ 
+ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+ {
+-	struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
++	struct sockaddr_rc sa;
+ 	struct sock *sk = sock->sk;
+-	int chan = sa->rc_channel;
+-	int err = 0;
+-
+-	BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
++	int len, err = 0;
+ 
+ 	if (!addr || addr->sa_family != AF_BLUETOOTH)
+ 		return -EINVAL;
+ 
++	memset(&sa, 0, sizeof(sa));
++	len = min_t(unsigned int, sizeof(sa), addr_len);
++	memcpy(&sa, addr, len);
++
++	BT_DBG("sk %p %pMR", sk, &sa.rc_bdaddr);
++
+ 	lock_sock(sk);
+ 
+ 	if (sk->sk_state != BT_OPEN) {
+@@ -358,12 +361,13 @@ static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr
+ 
+ 	write_lock(&rfcomm_sk_list.lock);
+ 
+-	if (chan && __rfcomm_get_listen_sock_by_addr(chan, &sa->rc_bdaddr)) {
++	if (sa.rc_channel &&
++	    __rfcomm_get_listen_sock_by_addr(sa.rc_channel, &sa.rc_bdaddr)) {
+ 		err = -EADDRINUSE;
+ 	} else {
+ 		/* Save source address */
+-		bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr);
+-		rfcomm_pi(sk)->channel = chan;
++		bacpy(&rfcomm_pi(sk)->src, &sa.rc_bdaddr);
++		rfcomm_pi(sk)->channel = sa.rc_channel;
+ 		sk->sk_state = BT_BOUND;
+ 	}
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 185a3398c651..56d820fc2707 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4060,6 +4060,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
+ 		NAPI_GRO_CB(skb)->flush = 0;
+ 		NAPI_GRO_CB(skb)->free = 0;
+ 		NAPI_GRO_CB(skb)->udp_mark = 0;
++		NAPI_GRO_CB(skb)->recursion_counter = 0;
+ 		NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
+ 
+ 		/* Setup for GRO checksum validation */
+diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
+index f3bad41d725f..76f8389eacd2 100644
+--- a/net/ethernet/eth.c
++++ b/net/ethernet/eth.c
+@@ -434,7 +434,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
+ 
+ 	skb_gro_pull(skb, sizeof(*eh));
+ 	skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
+-	pp = ptype->callbacks.gro_receive(head, skb);
++	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+ 
+ out_unlock:
+ 	rcu_read_unlock();
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 0cc98b135b8f..2095cd6c31fd 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1377,7 +1377,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
+ 	skb_gro_pull(skb, sizeof(*iph));
+ 	skb_set_transport_header(skb, skb_gro_offset(skb));
+ 
+-	pp = ops->callbacks.gro_receive(head, skb);
++	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ 
+ out_unlock:
+ 	rcu_read_unlock();
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index 4b67937692c9..b22a75c0a3d9 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -188,7 +188,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
+ 	if (!ops || !ops->callbacks.gro_receive)
+ 		goto out_unlock;
+ 
+-	pp = ops->callbacks.gro_receive(head, skb);
++	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ 
+ out_unlock:
+ 	rcu_read_unlock();
+@@ -355,7 +355,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
+ 	if (WARN_ON(!ops || !ops->callbacks.gro_receive))
+ 		goto out_unlock;
+ 
+-	pp = ops->callbacks.gro_receive(head, skb);
++	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ 
+ out_unlock:
+ 	rcu_read_unlock();
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index 5a8ee3282550..53300b88d569 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -214,7 +214,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
+ 	/* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
+ 	skb_gro_postpull_rcsum(skb, greh, grehlen);
+ 
+-	pp = ptype->callbacks.gro_receive(head, skb);
++	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
+ 
+ out_unlock:
+ 	rcu_read_unlock();
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index f6ee0d561aab..3dac3d4aa26f 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -659,6 +659,10 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
+ 	if (len > 0xFFFF)
+ 		return -EMSGSIZE;
+ 
++	/* Must have at least a full ICMP header. */
++	if (len < icmph_len)
++		return -EINVAL;
++
+ 	/*
+ 	 *	Check the flags.
+ 	 */
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index f9386160cbee..2af7b7e1a0f6 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -339,8 +339,13 @@ unflush:
+ 	skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
+ 	skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
+ 	NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
+-	pp = uo_priv->offload->callbacks.gro_receive(head, skb,
+-						     uo_priv->offload);
++
++	if (gro_recursion_inc_test(skb)) {
++		pp = NULL;
++	} else {
++		pp = uo_priv->offload->callbacks.gro_receive(head, skb,
++							     uo_priv->offload);
++	}
+ 
+ out_unlock:
+ 	rcu_read_unlock();
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index 08b62047c67f..db0b8428d248 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
+ 
+ 	skb_gro_postpull_rcsum(skb, iph, nlen);
+ 
+-	pp = ops->callbacks.gro_receive(head, skb);
++	pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
+ 
+ out_unlock:
+ 	rcu_read_unlock();
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index a3654d929814..b9d1baaa8bdc 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3344,19 +3344,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
+ 
+ 		if (optlen != sizeof(val))
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 		switch (val) {
+ 		case TPACKET_V1:
+ 		case TPACKET_V2:
+ 		case TPACKET_V3:
+-			po->tp_version = val;
+-			return 0;
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->tp_version = val;
++			ret = 0;
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_RESERVE:
+ 	{
+@@ -3819,6 +3825,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 	/* Added to avoid minimal code churn */
+ 	struct tpacket_req *req = &req_u->req;
+ 
++	lock_sock(sk);
+ 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+ 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+ 		WARN(1, "Tx-ring is not supported.\n");
+@@ -3900,7 +3907,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 			goto out;
+ 	}
+ 
+-	lock_sock(sk);
+ 
+ 	/* Detach socket from network */
+ 	spin_lock(&po->bind_lock);
+@@ -3949,11 +3955,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ 		if (!tx_ring)
+ 			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
+ 	}
+-	release_sock(sk);
+ 
+ 	if (pg_vec)
+ 		free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
++	release_sock(sk);
+ 	return err;
+ }
+ 
+diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
+index 70e3dacbf84a..bf7d6a44c6f2 100644
+--- a/net/tipc/bearer.c
++++ b/net/tipc/bearer.c
+@@ -386,6 +386,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
+ 	dev = dev_get_by_name(net, driver_name);
+ 	if (!dev)
+ 		return -ENODEV;
++	if (tipc_mtu_bad(dev, 0)) {
++		dev_put(dev);
++		return -EINVAL;
++	}
+ 
+ 	/* Associate TIPC bearer with L2 bearer */
+ 	rcu_assign_pointer(b->media_ptr, dev);
+@@ -524,14 +528,17 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
+ 	if (!b_ptr)
+ 		return NOTIFY_DONE;
+ 
+-	b_ptr->mtu = dev->mtu;
+-
+ 	switch (evt) {
+ 	case NETDEV_CHANGE:
+ 		if (netif_carrier_ok(dev))
+ 			break;
+ 	case NETDEV_DOWN:
+ 	case NETDEV_CHANGEMTU:
++		if (tipc_mtu_bad(dev, 0)) {
++			bearer_disable(net, b_ptr, false);
++			break;
++		}
++		b_ptr->mtu = dev->mtu;
+ 		tipc_reset_bearer(net, b_ptr);
+ 		break;
+ 	case NETDEV_CHANGEADDR:
+diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
+index 5cad243ee8fc..b7302b012624 100644
+--- a/net/tipc/bearer.h
++++ b/net/tipc/bearer.h
+@@ -38,6 +38,7 @@
+ #define _TIPC_BEARER_H
+ 
+ #include "netlink.h"
++#include "msg.h"
+ #include <net/genetlink.h>
+ 
+ #define MAX_BEARERS	2
+@@ -61,6 +62,9 @@
+ #define TIPC_MEDIA_TYPE_IB	2
+ #define TIPC_MEDIA_TYPE_UDP	3
+ 
++/* minimum bearer MTU */
++#define TIPC_MIN_BEARER_MTU	(MAX_H_SIZE + INT_H_SIZE)
++
+ /**
+  * struct tipc_node_map - set of node identifiers
+  * @count: # of nodes in set
+@@ -218,4 +222,13 @@ void tipc_bearer_stop(struct net *net);
+ void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
+ 		      struct tipc_media_addr *dest);
+ 
++/* check if device MTU is too low for tipc headers */
++static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
++{
++	if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
++		return false;
++	netdev_warn(dev, "MTU too low for tipc bearer\n");
++	return true;
++}
++
+ #endif	/* _TIPC_BEARER_H */
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index f8dfee5072c0..e14f23542a1a 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -374,6 +374,11 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
+ 		udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ 		udp_conf.use_udp_checksums = false;
+ 		ub->ifindex = dev->ifindex;
++		if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
++				      sizeof(struct udphdr))) {
++			err = -EINVAL;
++			goto err;
++		}
+ 		b->mtu = dev->mtu - sizeof(struct iphdr)
+ 			- sizeof(struct udphdr);
+ #if IS_ENABLED(CONFIG_IPV6)
+diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
+index 973e8c141567..17867e723a51 100755
+--- a/scripts/gcc-x86_64-has-stack-protector.sh
++++ b/scripts/gcc-x86_64-has-stack-protector.sh
+@@ -1,6 +1,6 @@
+ #!/bin/sh
+ 
+-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
++echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+ if [ "$?" -eq "0" ] ; then
+ 	echo y
+ else

diff --git a/1520_fix-race-condition-in-packet-set-ring.patch b/1520_fix-race-condition-in-packet-set-ring.patch
deleted file mode 100644
index d85527f..0000000
--- a/1520_fix-race-condition-in-packet-set-ring.patch
+++ /dev/null
@@ -1,62 +0,0 @@
---- a/net/packet/af_packet.c	2016-12-07 18:10:25.785812861 -0500
-+++ b/net/packet/af_packet.c	2016-12-07 18:18:45.597933525 -0500
-@@ -3648,19 +3648,25 @@ packet_setsockopt(struct socket *sock, i
- 
- 		if (optlen != sizeof(val))
- 			return -EINVAL;
--		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
--			return -EBUSY;
- 		if (copy_from_user(&val, optval, sizeof(val)))
- 			return -EFAULT;
- 		switch (val) {
- 		case TPACKET_V1:
- 		case TPACKET_V2:
- 		case TPACKET_V3:
--			po->tp_version = val;
--			return 0;
-+			break;
- 		default:
- 			return -EINVAL;
- 		}
-+		lock_sock(sk);
-+		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
-+			ret = -EBUSY;
-+		} else {
-+			po->tp_version = val;
-+			ret = 0;
-+		}
-+		release_sock(sk);
-+		return ret;
- 	}
- 	case PACKET_RESERVE:
- 	{
-@@ -4164,6 +4170,7 @@ static int packet_set_ring(struct sock *
- 	/* Added to avoid minimal code churn */
- 	struct tpacket_req *req = &req_u->req;
- 
-+	lock_sock(sk);
- 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
- 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
- 		net_warn_ratelimited("Tx-ring is not supported.\n");
-@@ -4245,8 +4252,6 @@ static int packet_set_ring(struct sock *
- 			goto out;
- 	}
- 
--	lock_sock(sk);
--
- 	/* Detach socket from network */
- 	spin_lock(&po->bind_lock);
- 	was_running = po->running;
-@@ -4294,11 +4299,11 @@ static int packet_set_ring(struct sock *
- 		if (!tx_ring)
- 			prb_shutdown_retire_blk_timer(po, rb_queue);
- 	}
--	release_sock(sk);
- 
- 	if (pg_vec)
- 		free_pg_vec(pg_vec, order, req->tp_block_nr);
- out:
-+	release_sock(sk);
- 	return err;
- }
- 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-12-08  0:43 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-12-08  0:43 UTC (permalink / raw
  To: gentoo-commits

commit:     ef9c8970cb2c6c361f43617cc4de9528a6159b8a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Dec  8 00:43:10 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Dec  8 00:43:10 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ef9c8970

Fix race condition in packet_set_ring. CVE-2016-8655. Bug #601926.

 0000_README                                      |  4 ++
 1520_fix-race-condition-in-packet-set-ring.patch | 62 ++++++++++++++++++++++++
 2 files changed, 66 insertions(+)

diff --git a/0000_README b/0000_README
index 87cf515..7e1cb6f 100644
--- a/0000_README
+++ b/0000_README
@@ -195,6 +195,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
+Patch:  1520_fix-race-condition-in-packet-set-ring.patch
+From:   https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=84ac7260236a49c79eede91617700174c2c19b0c
+Desc:   packet: fix race condition in packet_set_ring. CVE-2016-8655. Bug #601926.
+
 Patch:  1800_fix-lru-cache-add-oom-regression.patch
 From:   http://thread.gmane.org/gmane.linux.kernel.stable/184384
 Desc:   Revert commit 8f182270dfec mm/swap.c: flush lru pvecs on compound page arrival to fix OOM error.

diff --git a/1520_fix-race-condition-in-packet-set-ring.patch b/1520_fix-race-condition-in-packet-set-ring.patch
new file mode 100644
index 0000000..d85527f
--- /dev/null
+++ b/1520_fix-race-condition-in-packet-set-ring.patch
@@ -0,0 +1,62 @@
+--- a/net/packet/af_packet.c	2016-12-07 18:10:25.785812861 -0500
++++ b/net/packet/af_packet.c	2016-12-07 18:18:45.597933525 -0500
+@@ -3648,19 +3648,25 @@ packet_setsockopt(struct socket *sock, i
+ 
+ 		if (optlen != sizeof(val))
+ 			return -EINVAL;
+-		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
+-			return -EBUSY;
+ 		if (copy_from_user(&val, optval, sizeof(val)))
+ 			return -EFAULT;
+ 		switch (val) {
+ 		case TPACKET_V1:
+ 		case TPACKET_V2:
+ 		case TPACKET_V3:
+-			po->tp_version = val;
+-			return 0;
++			break;
+ 		default:
+ 			return -EINVAL;
+ 		}
++		lock_sock(sk);
++		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
++			ret = -EBUSY;
++		} else {
++			po->tp_version = val;
++			ret = 0;
++		}
++		release_sock(sk);
++		return ret;
+ 	}
+ 	case PACKET_RESERVE:
+ 	{
+@@ -4164,6 +4170,7 @@ static int packet_set_ring(struct sock *
+ 	/* Added to avoid minimal code churn */
+ 	struct tpacket_req *req = &req_u->req;
+ 
++	lock_sock(sk);
+ 	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
+ 	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
+ 		net_warn_ratelimited("Tx-ring is not supported.\n");
+@@ -4245,8 +4252,6 @@ static int packet_set_ring(struct sock *
+ 			goto out;
+ 	}
+ 
+-	lock_sock(sk);
+-
+ 	/* Detach socket from network */
+ 	spin_lock(&po->bind_lock);
+ 	was_running = po->running;
+@@ -4294,11 +4299,11 @@ static int packet_set_ring(struct sock *
+ 		if (!tx_ring)
+ 			prb_shutdown_retire_blk_timer(po, rb_queue);
+ 	}
+-	release_sock(sk);
+ 
+ 	if (pg_vec)
+ 		free_pg_vec(pg_vec, order, req->tp_block_nr);
+ out:
++	release_sock(sk);
+ 	return err;
+ }
+ 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-11-30 11:45 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-11-30 11:45 UTC (permalink / raw
  To: gentoo-commits

commit:     5eab8276b7a473c02bb93990416e06e7c0bb7f5f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov 30 11:45:51 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov 30 11:45:51 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=5eab8276

Linux patch 4.1.36

 0000_README             |    4 +
 1035_linux-4.1.36.patch | 2346 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2350 insertions(+)

diff --git a/0000_README b/0000_README
index 5bb6b6b..87cf515 100644
--- a/0000_README
+++ b/0000_README
@@ -183,6 +183,10 @@ Patch:  1034_linux-4.1.35.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.35
 
+Patch:  1035_linux-4.1.36.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.36
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1035_linux-4.1.36.patch b/1035_linux-4.1.36.patch
new file mode 100644
index 0000000..57e0271
--- /dev/null
+++ b/1035_linux-4.1.36.patch
@@ -0,0 +1,2346 @@
+diff --git a/Documentation/x86/exception-tables.txt b/Documentation/x86/exception-tables.txt
+index 32901aa36f0a..e396bcd8d830 100644
+--- a/Documentation/x86/exception-tables.txt
++++ b/Documentation/x86/exception-tables.txt
+@@ -290,3 +290,38 @@ Due to the way that the exception table is built and needs to be ordered,
+ only use exceptions for code in the .text section.  Any other section
+ will cause the exception table to not be sorted correctly, and the
+ exceptions will fail.
++
++Things changed when 64-bit support was added to x86 Linux. Rather than
++double the size of the exception table by expanding the two entries
++from 32-bits to 64 bits, a clever trick was used to store addresses
++as relative offsets from the table itself. The assembly code changed
++from:
++	.long 1b,3b
++to:
++        .long (from) - .
++        .long (to) - .
++
++and the C-code that uses these values converts back to absolute addresses
++like this:
++
++	ex_insn_addr(const struct exception_table_entry *x)
++	{
++		return (unsigned long)&x->insn + x->insn;
++	}
++
++In v4.6 the exception table entry was expanded with a new field "handler".
++This is also 32-bits wide and contains a third relative function
++pointer which points to one of:
++
++1) int ex_handler_default(const struct exception_table_entry *fixup)
++   This is legacy case that just jumps to the fixup code
++2) int ex_handler_fault(const struct exception_table_entry *fixup)
++   This case provides the fault number of the trap that occurred at
++   entry->insn. It is used to distinguish page faults from machine
++   check.
++3) int ex_handler_ext(const struct exception_table_entry *fixup)
++   This case is used for uaccess_err ... we need to set a flag
++   in the task structure. Before the handler functions existed this
++   case was handled by adding a large offset to the fixup to tag
++   it as special.
++More functions can easily be added.
+diff --git a/Makefile b/Makefile
+index 21f657f2c4e6..aa9fbee620ff 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 35
++SUBLEVEL = 36
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+@@ -610,6 +610,8 @@ all: vmlinux
+ include arch/$(SRCARCH)/Makefile
+ 
+ KBUILD_CFLAGS	+= $(call cc-option,-fno-delete-null-pointer-checks,)
++KBUILD_CFLAGS	+= $(call cc-option,-fno-PIE)
++KBUILD_AFLAGS	+= $(call cc-option,-fno-PIE)
+ 
+ ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+ KBUILD_CFLAGS	+= -Os $(call cc-disable-warning,maybe-uninitialized,)
+diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
+index a9c80a2ea1a7..642934a5ae9b 100644
+--- a/arch/arm/include/asm/kvm_emulate.h
++++ b/arch/arm/include/asm/kvm_emulate.h
+@@ -131,6 +131,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
+ 	return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
+ }
+ 
++static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
++{
++	return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
++}
++
+ /* Get Access Size from a data abort */
+ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
+ {
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index a33af44230da..4681b6832d9f 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1435,6 +1435,22 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 		}
+ 
+ 		/*
++		 * Check for a cache maintenance operation. Since we
++		 * ended-up here, we know it is outside of any memory
++		 * slot. But we can't find out if that is for a device,
++		 * or if the guest is just being stupid. The only thing
++		 * we know for sure is that this range cannot be cached.
++		 *
++		 * So let's assume that the guest is just being
++		 * cautious, and skip the instruction.
++		 */
++		if (kvm_vcpu_dabt_is_cm(vcpu)) {
++			kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
++			ret = 1;
++			goto out_unlock;
++		}
++
++		/*
+ 		 * The IPA is reported as [MAX:12], so we need to
+ 		 * complement it with the bottom 12 bits from the
+ 		 * faulting VA. This is always 12 bits, irrespective
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index 3ca894ecf699..3e3c4c7a5082 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -153,11 +153,6 @@ static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
+ 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
+ }
+ 
+-static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+-{
+-	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
+-}
+-
+ static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
+ {
+ 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
+@@ -178,6 +173,17 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
+ 	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
+ }
+ 
++static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
++{
++	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
++		kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
++}
++
++static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
++{
++	return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
++}
++
+ static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
+ {
+ 	return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
+diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
+index 4fde8c1df97f..9d096bc89287 100644
+--- a/arch/arm64/include/asm/percpu.h
++++ b/arch/arm64/include/asm/percpu.h
+@@ -52,48 +52,44 @@ static inline unsigned long __percpu_##op(void *ptr,			\
+ 									\
+ 	switch (size) {							\
+ 	case 1:								\
+-		do {							\
+-			asm ("//__per_cpu_" #op "_1\n"			\
+-			"ldxrb	  %w[ret], %[ptr]\n"			\
++		asm ("//__per_cpu_" #op "_1\n"				\
++		"1:	ldxrb	  %w[ret], %[ptr]\n"			\
+ 			#asm_op " %w[ret], %w[ret], %w[val]\n"		\
+-			"stxrb	  %w[loop], %w[ret], %[ptr]\n"		\
+-			: [loop] "=&r" (loop), [ret] "=&r" (ret),	\
+-			  [ptr] "+Q"(*(u8 *)ptr)			\
+-			: [val] "Ir" (val));				\
+-		} while (loop);						\
++		"	stxrb	  %w[loop], %w[ret], %[ptr]\n"		\
++		"	cbnz	  %w[loop], 1b"				\
++		: [loop] "=&r" (loop), [ret] "=&r" (ret),		\
++		  [ptr] "+Q"(*(u8 *)ptr)				\
++		: [val] "Ir" (val));					\
+ 		break;							\
+ 	case 2:								\
+-		do {							\
+-			asm ("//__per_cpu_" #op "_2\n"			\
+-			"ldxrh	  %w[ret], %[ptr]\n"			\
++		asm ("//__per_cpu_" #op "_2\n"				\
++		"1:	ldxrh	  %w[ret], %[ptr]\n"			\
+ 			#asm_op " %w[ret], %w[ret], %w[val]\n"		\
+-			"stxrh	  %w[loop], %w[ret], %[ptr]\n"		\
+-			: [loop] "=&r" (loop), [ret] "=&r" (ret),	\
+-			  [ptr]  "+Q"(*(u16 *)ptr)			\
+-			: [val] "Ir" (val));				\
+-		} while (loop);						\
++		"	stxrh	  %w[loop], %w[ret], %[ptr]\n"		\
++		"	cbnz	  %w[loop], 1b"				\
++		: [loop] "=&r" (loop), [ret] "=&r" (ret),		\
++		  [ptr]  "+Q"(*(u16 *)ptr)				\
++		: [val] "Ir" (val));					\
+ 		break;							\
+ 	case 4:								\
+-		do {							\
+-			asm ("//__per_cpu_" #op "_4\n"			\
+-			"ldxr	  %w[ret], %[ptr]\n"			\
++		asm ("//__per_cpu_" #op "_4\n"				\
++		"1:	ldxr	  %w[ret], %[ptr]\n"			\
+ 			#asm_op " %w[ret], %w[ret], %w[val]\n"		\
+-			"stxr	  %w[loop], %w[ret], %[ptr]\n"		\
+-			: [loop] "=&r" (loop), [ret] "=&r" (ret),	\
+-			  [ptr] "+Q"(*(u32 *)ptr)			\
+-			: [val] "Ir" (val));				\
+-		} while (loop);						\
++		"	stxr	  %w[loop], %w[ret], %[ptr]\n"		\
++		"	cbnz	  %w[loop], 1b"				\
++		: [loop] "=&r" (loop), [ret] "=&r" (ret),		\
++		  [ptr] "+Q"(*(u32 *)ptr)				\
++		: [val] "Ir" (val));					\
+ 		break;							\
+ 	case 8:								\
+-		do {							\
+-			asm ("//__per_cpu_" #op "_8\n"			\
+-			"ldxr	  %[ret], %[ptr]\n"			\
++		asm ("//__per_cpu_" #op "_8\n"				\
++		"1:	ldxr	  %[ret], %[ptr]\n"			\
+ 			#asm_op " %[ret], %[ret], %[val]\n"		\
+-			"stxr	  %w[loop], %[ret], %[ptr]\n"		\
+-			: [loop] "=&r" (loop), [ret] "=&r" (ret),	\
+-			  [ptr] "+Q"(*(u64 *)ptr)			\
+-			: [val] "Ir" (val));				\
+-		} while (loop);						\
++		"	stxr	  %w[loop], %[ret], %[ptr]\n"		\
++		"	cbnz	  %w[loop], 1b"				\
++		: [loop] "=&r" (loop), [ret] "=&r" (ret),		\
++		  [ptr] "+Q"(*(u64 *)ptr)				\
++		: [val] "Ir" (val));					\
+ 		break;							\
+ 	default:							\
+ 		BUILD_BUG();						\
+@@ -158,44 +154,40 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
+ 
+ 	switch (size) {
+ 	case 1:
+-		do {
+-			asm ("//__percpu_xchg_1\n"
+-			"ldxrb %w[ret], %[ptr]\n"
+-			"stxrb %w[loop], %w[val], %[ptr]\n"
+-			: [loop] "=&r"(loop), [ret] "=&r"(ret),
+-			  [ptr] "+Q"(*(u8 *)ptr)
+-			: [val] "r" (val));
+-		} while (loop);
++		asm ("//__percpu_xchg_1\n"
++		"1:	ldxrb	%w[ret], %[ptr]\n"
++		"	stxrb	%w[loop], %w[val], %[ptr]\n"
++		"	cbnz	%w[loop], 1b"
++		: [loop] "=&r"(loop), [ret] "=&r"(ret),
++		  [ptr] "+Q"(*(u8 *)ptr)
++		: [val] "r" (val));
+ 		break;
+ 	case 2:
+-		do {
+-			asm ("//__percpu_xchg_2\n"
+-			"ldxrh %w[ret], %[ptr]\n"
+-			"stxrh %w[loop], %w[val], %[ptr]\n"
+-			: [loop] "=&r"(loop), [ret] "=&r"(ret),
+-			  [ptr] "+Q"(*(u16 *)ptr)
+-			: [val] "r" (val));
+-		} while (loop);
++		asm ("//__percpu_xchg_2\n"
++		"1:	ldxrh	%w[ret], %[ptr]\n"
++		"	stxrh	%w[loop], %w[val], %[ptr]\n"
++		"	cbnz	%w[loop], 1b"
++		: [loop] "=&r"(loop), [ret] "=&r"(ret),
++		  [ptr] "+Q"(*(u16 *)ptr)
++		: [val] "r" (val));
+ 		break;
+ 	case 4:
+-		do {
+-			asm ("//__percpu_xchg_4\n"
+-			"ldxr %w[ret], %[ptr]\n"
+-			"stxr %w[loop], %w[val], %[ptr]\n"
+-			: [loop] "=&r"(loop), [ret] "=&r"(ret),
+-			  [ptr] "+Q"(*(u32 *)ptr)
+-			: [val] "r" (val));
+-		} while (loop);
++		asm ("//__percpu_xchg_4\n"
++		"1:	ldxr	%w[ret], %[ptr]\n"
++		"	stxr	%w[loop], %w[val], %[ptr]\n"
++		"	cbnz	%w[loop], 1b"
++		: [loop] "=&r"(loop), [ret] "=&r"(ret),
++		  [ptr] "+Q"(*(u32 *)ptr)
++		: [val] "r" (val));
+ 		break;
+ 	case 8:
+-		do {
+-			asm ("//__percpu_xchg_8\n"
+-			"ldxr %[ret], %[ptr]\n"
+-			"stxr %w[loop], %[val], %[ptr]\n"
+-			: [loop] "=&r"(loop), [ret] "=&r"(ret),
+-			  [ptr] "+Q"(*(u64 *)ptr)
+-			: [val] "r" (val));
+-		} while (loop);
++		asm ("//__percpu_xchg_8\n"
++		"1:	ldxr	%[ret], %[ptr]\n"
++		"	stxr	%w[loop], %[val], %[ptr]\n"
++		"	cbnz	%w[loop], 1b"
++		: [loop] "=&r"(loop), [ret] "=&r"(ret),
++		  [ptr] "+Q"(*(u64 *)ptr)
++		: [val] "r" (val));
+ 		break;
+ 	default:
+ 		BUILD_BUG();
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index cc7435c9676e..b346b35f827d 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -572,8 +572,9 @@ CPU_LE(	movk	x0, #0x30d0, lsl #16	)	// Clear EE and E0E on LE systems
+ 	b.lt	4f				// Skip if no PMU present
+ 	mrs	x0, pmcr_el0			// Disable debug access traps
+ 	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
+-	msr	mdcr_el2, x0			// all PMU counters from EL1
+ 4:
++	csel	x0, xzr, x0, lt			// all PMU counters from EL1
++	msr	mdcr_el2, x0			// (if they exist)
+ 
+ 	/* Stage-2 translation */
+ 	msr	vttbr_el2, xzr
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index ab518d14b7b0..2ab11c31d77c 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -398,7 +398,10 @@ struct kvm_vcpu_arch {
+ 	/* Host KSEG0 address of the EI/DI offset */
+ 	void *kseg0_commpage;
+ 
+-	u32 io_gpr;		/* GPR used as IO source/target */
++	/* Resume PC after MMIO completion */
++	unsigned long io_pc;
++	/* GPR used as IO source/target */
++	u32 io_gpr;
+ 
+ 	struct hrtimer comparecount_timer;
+ 	/* Count timer control KVM register */
+@@ -420,8 +423,6 @@ struct kvm_vcpu_arch {
+ 	/* Bitmask of pending exceptions to be cleared */
+ 	unsigned long pending_exceptions_clr;
+ 
+-	unsigned long pending_load_cause;
+-
+ 	/* Save/Restore the entryhi register when are are preempted/scheduled back in */
+ 	unsigned long preempt_entryhi;
+ 
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index d6476d11212e..7f3183494e69 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -752,15 +752,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+ 	struct mips_coproc *cop0 = vcpu->arch.cop0;
+ 	enum emulation_result er = EMULATE_DONE;
+ 
+-	if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
++	if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
++		kvm_clear_c0_guest_status(cop0, ST0_ERL);
++		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
++	} else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+ 		kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+ 			  kvm_read_c0_guest_epc(cop0));
+ 		kvm_clear_c0_guest_status(cop0, ST0_EXL);
+ 		vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+ 
+-	} else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+-		kvm_clear_c0_guest_status(cop0, ST0_ERL);
+-		vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+ 	} else {
+ 		kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+ 			vcpu->arch.pc);
+@@ -1430,6 +1430,7 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+ 					    struct kvm_vcpu *vcpu)
+ {
+ 	enum emulation_result er = EMULATE_DO_MMIO;
++	unsigned long curr_pc;
+ 	int32_t op, base, rt, offset;
+ 	uint32_t bytes;
+ 
+@@ -1438,7 +1439,18 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+ 	offset = inst & 0xffff;
+ 	op = (inst >> 26) & 0x3f;
+ 
+-	vcpu->arch.pending_load_cause = cause;
++	/*
++	 * Find the resume PC now while we have safe and easy access to the
++	 * prior branch instruction, and save it for
++	 * kvm_mips_complete_mmio_load() to restore later.
++	 */
++	curr_pc = vcpu->arch.pc;
++	er = update_pc(vcpu, cause);
++	if (er == EMULATE_FAIL)
++		return er;
++	vcpu->arch.io_pc = vcpu->arch.pc;
++	vcpu->arch.pc = curr_pc;
++
+ 	vcpu->arch.io_gpr = rt;
+ 
+ 	switch (op) {
+@@ -2418,9 +2430,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+ 		goto done;
+ 	}
+ 
+-	er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+-	if (er == EMULATE_FAIL)
+-		return er;
++	/* Restore saved resume PC */
++	vcpu->arch.pc = vcpu->arch.io_pc;
+ 
+ 	switch (run->mmio.len) {
+ 	case 4:
+@@ -2442,11 +2453,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+ 		break;
+ 	}
+ 
+-	if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+-		kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+-			  vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+-			  vcpu->mmio_needed);
+-
+ done:
+ 	return er;
+ }
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 099c23616901..8f13c7facdd7 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -106,8 +106,6 @@ linux_gateway_entry:
+ 	mtsp	%r0,%sr4			/* get kernel space into sr4 */
+ 	mtsp	%r0,%sr5			/* get kernel space into sr5 */
+ 	mtsp	%r0,%sr6			/* get kernel space into sr6 */
+-	mfsp    %sr7,%r1                        /* save user sr7 */
+-	mtsp    %r1,%sr3                        /* and store it in sr3 */
+ 
+ #ifdef CONFIG_64BIT
+ 	/* for now we can *always* set the W bit on entry to the syscall
+@@ -133,6 +131,14 @@ linux_gateway_entry:
+ 	depdi	0, 31, 32, %r21
+ 1:	
+ #endif
++
++	/* We use a rsm/ssm pair to prevent sr3 from being clobbered
++	 * by external interrupts.
++	 */
++	mfsp    %sr7,%r1                        /* save user sr7 */
++	rsm	PSW_SM_I, %r0			/* disable interrupts */
++	mtsp    %r1,%sr3                        /* and store it in sr3 */
++
+ 	mfctl   %cr30,%r1
+ 	xor     %r1,%r30,%r30                   /* ye olde xor trick */
+ 	xor     %r1,%r30,%r1
+@@ -147,6 +153,7 @@ linux_gateway_entry:
+ 	 */
+ 
+ 	mtsp	%r0,%sr7			/* get kernel space into sr7 */
++	ssm	PSW_SM_I, %r0			/* enable interrupts */
+ 	STREGM	%r1,FRAME_SIZE(%r30)		/* save r1 (usp) here for now */
+ 	mfctl	%cr30,%r1			/* get task ptr in %r1 */
+ 	LDREG	TI_TASK(%r1),%r1
+diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
+index 112ccf497562..73f638789a38 100644
+--- a/arch/powerpc/kernel/idle_power7.S
++++ b/arch/powerpc/kernel/idle_power7.S
+@@ -44,7 +44,7 @@
+ 	std	r0,0(r1);					\
+ 	ptesync;						\
+ 	ld	r0,0(r1);					\
+-1:	cmp	cr0,r0,r0;					\
++1:	cmpd	cr0,r0,r0;					\
+ 	bne	1b;						\
+ 	IDLE_INST;						\
+ 	b	.
+diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
+index f031a47d7701..6fb1b3774b11 100644
+--- a/arch/powerpc/mm/copro_fault.c
++++ b/arch/powerpc/mm/copro_fault.c
+@@ -106,6 +106,8 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
+ 	switch (REGION_ID(ea)) {
+ 	case USER_REGION_ID:
+ 		pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
++		if (mm == NULL)
++			return 1;
+ 		psize = get_slice_psize(mm, ea);
+ 		ssize = user_segment_size(ea);
+ 		vsid = get_vsid(mm->context.id, ea, ssize);
+diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
+index 7730c1c5c83a..e2015452177d 100644
+--- a/arch/x86/include/asm/asm.h
++++ b/arch/x86/include/asm/asm.h
+@@ -44,19 +44,22 @@
+ 
+ /* Exception table entry */
+ #ifdef __ASSEMBLY__
+-# define _ASM_EXTABLE(from,to)					\
++# define _ASM_EXTABLE_HANDLE(from, to, handler)			\
+ 	.pushsection "__ex_table","a" ;				\
+-	.balign 8 ;						\
++	.balign 4 ;						\
+ 	.long (from) - . ;					\
+ 	.long (to) - . ;					\
++	.long (handler) - . ;					\
+ 	.popsection
+ 
+-# define _ASM_EXTABLE_EX(from,to)				\
+-	.pushsection "__ex_table","a" ;				\
+-	.balign 8 ;						\
+-	.long (from) - . ;					\
+-	.long (to) - . + 0x7ffffff0 ;				\
+-	.popsection
++# define _ASM_EXTABLE(from, to)					\
++	_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
++
++# define _ASM_EXTABLE_FAULT(from, to)				\
++	_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
++
++# define _ASM_EXTABLE_EX(from, to)				\
++	_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
+ 
+ # define _ASM_NOKPROBE(entry)					\
+ 	.pushsection "_kprobe_blacklist","aw" ;			\
+@@ -64,19 +67,24 @@
+ 	_ASM_PTR (entry);					\
+ 	.popsection
+ #else
+-# define _ASM_EXTABLE(from,to)					\
++# define _EXPAND_EXTABLE_HANDLE(x) #x
++# define _ASM_EXTABLE_HANDLE(from, to, handler)			\
+ 	" .pushsection \"__ex_table\",\"a\"\n"			\
+-	" .balign 8\n"						\
++	" .balign 4\n"						\
+ 	" .long (" #from ") - .\n"				\
+ 	" .long (" #to ") - .\n"				\
++	" .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n"	\
+ 	" .popsection\n"
+ 
+-# define _ASM_EXTABLE_EX(from,to)				\
+-	" .pushsection \"__ex_table\",\"a\"\n"			\
+-	" .balign 8\n"						\
+-	" .long (" #from ") - .\n"				\
+-	" .long (" #to ") - . + 0x7ffffff0\n"			\
+-	" .popsection\n"
++# define _ASM_EXTABLE(from, to)					\
++	_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
++
++# define _ASM_EXTABLE_FAULT(from, to)				\
++	_ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
++
++# define _ASM_EXTABLE_EX(from, to)				\
++	_ASM_EXTABLE_HANDLE(from, to, ex_handler_ext)
++
+ /* For C file, we already have NOKPROBE_SYMBOL macro */
+ #endif
+ 
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index d081e7e42fb3..81782dc54193 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -89,12 +89,11 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
+ 	likely(!__range_not_ok(addr, size, user_addr_max()))
+ 
+ /*
+- * The exception table consists of pairs of addresses relative to the
+- * exception table enty itself: the first is the address of an
+- * instruction that is allowed to fault, and the second is the address
+- * at which the program should continue.  No registers are modified,
+- * so it is entirely up to the continuation code to figure out what to
+- * do.
++ * The exception table consists of triples of addresses relative to the
++ * exception table entry itself. The first address is of an instruction
++ * that is allowed to fault, the second is the target at which the program
++ * should continue. The third is a handler function to deal with the fault
++ * caused by the instruction in the first field.
+  *
+  * All the routines below use bits of fixup code that are out of line
+  * with the main instruction path.  This means when everything is well,
+@@ -103,13 +102,14 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
+  */
+ 
+ struct exception_table_entry {
+-	int insn, fixup;
++	int insn, fixup, handler;
+ };
+ /* This is not the generic standard exception_table_entry format */
+ #define ARCH_HAS_SORT_EXTABLE
+ #define ARCH_HAS_SEARCH_EXTABLE
+ 
+-extern int fixup_exception(struct pt_regs *regs);
++extern int fixup_exception(struct pt_regs *regs, int trapnr);
++extern bool ex_has_fault_handler(unsigned long ip);
+ extern int early_fixup_exception(unsigned long *ip);
+ 
+ /*
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 023c442c33bb..e1d1f6cbaf11 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -1000,7 +1000,7 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+ 		 * In case the user-specified fault handler returned
+ 		 * zero, try to fix up.
+ 		 */
+-		if (fixup_exception(regs))
++		if (fixup_exception(regs, trapnr))
+ 			return 1;
+ 
+ 		/*
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 324ab5247687..020248f2cec4 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -208,7 +208,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
+ 	}
+ 
+ 	if (!user_mode(regs)) {
+-		if (!fixup_exception(regs)) {
++		if (!fixup_exception(regs, trapnr)) {
+ 			tsk->thread.error_code = error_code;
+ 			tsk->thread.trap_nr = trapnr;
+ 			die(str, regs, error_code);
+@@ -469,7 +469,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
+ 
+ 	tsk = current;
+ 	if (!user_mode(regs)) {
+-		if (fixup_exception(regs))
++		if (fixup_exception(regs, X86_TRAP_GP))
+ 			goto exit;
+ 
+ 		tsk->thread.error_code = error_code;
+@@ -720,7 +720,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
+ 
+ 	if (!user_mode(regs))
+ 	{
+-		if (!fixup_exception(regs)) {
++		if (!fixup_exception(regs, trapnr)) {
+ 			task->thread.error_code = error_code;
+ 			task->thread.trap_nr = trapnr;
+ 			die(str, regs, error_code);
+diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
+index 28146f03c514..74c3285dfdcf 100644
+--- a/arch/x86/kvm/ioapic.c
++++ b/arch/x86/kvm/ioapic.c
+@@ -596,7 +596,7 @@ static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
+ 	ioapic->irr = 0;
+ 	ioapic->irr_delivered = 0;
+ 	ioapic->id = 0;
+-	memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
++	memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
+ 	rtc_irq_eoi_tracking_reset(ioapic);
+ 	update_handled_vectors(ioapic);
+ }
+diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
+index 903ec1e9c326..9dd7e4b7fcde 100644
+--- a/arch/x86/mm/extable.c
++++ b/arch/x86/mm/extable.c
+@@ -3,6 +3,9 @@
+ #include <linux/sort.h>
+ #include <asm/uaccess.h>
+ 
++typedef bool (*ex_handler_t)(const struct exception_table_entry *,
++			    struct pt_regs *, int);
++
+ static inline unsigned long
+ ex_insn_addr(const struct exception_table_entry *x)
+ {
+@@ -13,11 +16,56 @@ ex_fixup_addr(const struct exception_table_entry *x)
+ {
+ 	return (unsigned long)&x->fixup + x->fixup;
+ }
++static inline ex_handler_t
++ex_fixup_handler(const struct exception_table_entry *x)
++{
++	return (ex_handler_t)((unsigned long)&x->handler + x->handler);
++}
+ 
+-int fixup_exception(struct pt_regs *regs)
++bool ex_handler_default(const struct exception_table_entry *fixup,
++		       struct pt_regs *regs, int trapnr)
+ {
+-	const struct exception_table_entry *fixup;
+-	unsigned long new_ip;
++	regs->ip = ex_fixup_addr(fixup);
++	return true;
++}
++EXPORT_SYMBOL(ex_handler_default);
++
++bool ex_handler_fault(const struct exception_table_entry *fixup,
++		     struct pt_regs *regs, int trapnr)
++{
++	regs->ip = ex_fixup_addr(fixup);
++	regs->ax = trapnr;
++	return true;
++}
++EXPORT_SYMBOL_GPL(ex_handler_fault);
++
++bool ex_handler_ext(const struct exception_table_entry *fixup,
++		   struct pt_regs *regs, int trapnr)
++{
++	/* Special hack for uaccess_err */
++	current_thread_info()->uaccess_err = 1;
++	regs->ip = ex_fixup_addr(fixup);
++	return true;
++}
++EXPORT_SYMBOL(ex_handler_ext);
++
++bool ex_has_fault_handler(unsigned long ip)
++{
++	const struct exception_table_entry *e;
++	ex_handler_t handler;
++
++	e = search_exception_tables(ip);
++	if (!e)
++		return false;
++	handler = ex_fixup_handler(e);
++
++	return handler == ex_handler_fault;
++}
++
++int fixup_exception(struct pt_regs *regs, int trapnr)
++{
++	const struct exception_table_entry *e;
++	ex_handler_t handler;
+ 
+ #ifdef CONFIG_PNPBIOS
+ 	if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
+@@ -33,42 +81,34 @@ int fixup_exception(struct pt_regs *regs)
+ 	}
+ #endif
+ 
+-	fixup = search_exception_tables(regs->ip);
+-	if (fixup) {
+-		new_ip = ex_fixup_addr(fixup);
+-
+-		if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
+-			/* Special hack for uaccess_err */
+-			current_thread_info()->uaccess_err = 1;
+-			new_ip -= 0x7ffffff0;
+-		}
+-		regs->ip = new_ip;
+-		return 1;
+-	}
++	e = search_exception_tables(regs->ip);
++	if (!e)
++		return 0;
+ 
+-	return 0;
++	handler = ex_fixup_handler(e);
++	return handler(e, regs, trapnr);
+ }
+ 
+ /* Restricted version used during very early boot */
+ int __init early_fixup_exception(unsigned long *ip)
+ {
+-	const struct exception_table_entry *fixup;
++	const struct exception_table_entry *e;
+ 	unsigned long new_ip;
++	ex_handler_t handler;
+ 
+-	fixup = search_exception_tables(*ip);
+-	if (fixup) {
+-		new_ip = ex_fixup_addr(fixup);
++	e = search_exception_tables(*ip);
++	if (!e)
++		return 0;
+ 
+-		if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
+-			/* uaccess handling not supported during early boot */
+-			return 0;
+-		}
++	new_ip  = ex_fixup_addr(e);
++	handler = ex_fixup_handler(e);
+ 
+-		*ip = new_ip;
+-		return 1;
+-	}
++	/* special handling not supported during early boot */
++	if (handler != ex_handler_default)
++		return 0;
+ 
+-	return 0;
++	*ip = new_ip;
++	return 1;
+ }
+ 
+ /*
+@@ -133,6 +173,8 @@ void sort_extable(struct exception_table_entry *start,
+ 		i += 4;
+ 		p->fixup += i;
+ 		i += 4;
++		p->handler += i;
++		i += 4;
+ 	}
+ 
+ 	sort(start, finish - start, sizeof(struct exception_table_entry),
+@@ -145,6 +187,8 @@ void sort_extable(struct exception_table_entry *start,
+ 		i += 4;
+ 		p->fixup -= i;
+ 		i += 4;
++		p->handler -= i;
++		i += 4;
+ 	}
+ }
+ 
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 62855ac37ab7..27bc31f0da52 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -659,7 +659,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
+ 	int sig;
+ 
+ 	/* Are we prepared to handle this kernel fault? */
+-	if (fixup_exception(regs)) {
++	if (fixup_exception(regs, X86_TRAP_PF)) {
+ 		/*
+ 		 * Any interrupt that takes a fault gets the fixup. This makes
+ 		 * the below recursive fault logic only apply to a faults from
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index f1a26d937d98..6f086415727c 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -1003,7 +1003,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
+ 
+ 
+ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+-					 uint32_t desc)
++					 u32 desc, bool need_strong_ref)
+ {
+ 	struct rb_node *n = proc->refs_by_desc.rb_node;
+ 	struct binder_ref *ref;
+@@ -1011,12 +1011,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+ 	while (n) {
+ 		ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ 
+-		if (desc < ref->desc)
++		if (desc < ref->desc) {
+ 			n = n->rb_left;
+-		else if (desc > ref->desc)
++		} else if (desc > ref->desc) {
+ 			n = n->rb_right;
+-		else
++		} else if (need_strong_ref && !ref->strong) {
++			binder_user_error("tried to use weak ref as strong ref\n");
++			return NULL;
++		} else {
+ 			return ref;
++		}
+ 	}
+ 	return NULL;
+ }
+@@ -1286,7 +1290,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
+ 		} break;
+ 		case BINDER_TYPE_HANDLE:
+ 		case BINDER_TYPE_WEAK_HANDLE: {
+-			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
++			struct binder_ref *ref;
++
++			ref = binder_get_ref(proc, fp->handle,
++					     fp->type == BINDER_TYPE_HANDLE);
+ 
+ 			if (ref == NULL) {
+ 				pr_err("transaction release %d bad handle %d\n",
+@@ -1380,7 +1387,7 @@ static void binder_transaction(struct binder_proc *proc,
+ 		if (tr->target.handle) {
+ 			struct binder_ref *ref;
+ 
+-			ref = binder_get_ref(proc, tr->target.handle);
++			ref = binder_get_ref(proc, tr->target.handle, true);
+ 			if (ref == NULL) {
+ 				binder_user_error("%d:%d got transaction to invalid handle\n",
+ 					proc->pid, thread->pid);
+@@ -1571,7 +1578,9 @@ static void binder_transaction(struct binder_proc *proc,
+ 				fp->type = BINDER_TYPE_HANDLE;
+ 			else
+ 				fp->type = BINDER_TYPE_WEAK_HANDLE;
++			fp->binder = 0;
+ 			fp->handle = ref->desc;
++			fp->cookie = 0;
+ 			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+ 				       &thread->todo);
+ 
+@@ -1583,7 +1592,10 @@ static void binder_transaction(struct binder_proc *proc,
+ 		} break;
+ 		case BINDER_TYPE_HANDLE:
+ 		case BINDER_TYPE_WEAK_HANDLE: {
+-			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
++			struct binder_ref *ref;
++
++			ref = binder_get_ref(proc, fp->handle,
++					     fp->type == BINDER_TYPE_HANDLE);
+ 
+ 			if (ref == NULL) {
+ 				binder_user_error("%d:%d got transaction with invalid handle, %d\n",
+@@ -1618,7 +1630,9 @@ static void binder_transaction(struct binder_proc *proc,
+ 					return_error = BR_FAILED_REPLY;
+ 					goto err_binder_get_ref_for_node_failed;
+ 				}
++				fp->binder = 0;
+ 				fp->handle = new_ref->desc;
++				fp->cookie = 0;
+ 				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+ 				trace_binder_transaction_ref_to_ref(t, ref,
+ 								    new_ref);
+@@ -1672,6 +1686,7 @@ static void binder_transaction(struct binder_proc *proc,
+ 			binder_debug(BINDER_DEBUG_TRANSACTION,
+ 				     "        fd %d -> %d\n", fp->handle, target_fd);
+ 			/* TODO: fput? */
++			fp->binder = 0;
+ 			fp->handle = target_fd;
+ 		} break;
+ 
+@@ -1794,7 +1809,9 @@ static int binder_thread_write(struct binder_proc *proc,
+ 						ref->desc);
+ 				}
+ 			} else
+-				ref = binder_get_ref(proc, target);
++				ref = binder_get_ref(proc, target,
++						     cmd == BC_ACQUIRE ||
++						     cmd == BC_RELEASE);
+ 			if (ref == NULL) {
+ 				binder_user_error("%d:%d refcount change on invalid ref %d\n",
+ 					proc->pid, thread->pid, target);
+@@ -1990,7 +2007,7 @@ static int binder_thread_write(struct binder_proc *proc,
+ 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ 				return -EFAULT;
+ 			ptr += sizeof(binder_uintptr_t);
+-			ref = binder_get_ref(proc, target);
++			ref = binder_get_ref(proc, target, false);
+ 			if (ref == NULL) {
+ 				binder_user_error("%d:%d %s invalid ref %d\n",
+ 					proc->pid, thread->pid,
+diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
+index 50754d203310..8cc67132d55d 100644
+--- a/drivers/char/virtio_console.c
++++ b/drivers/char/virtio_console.c
+@@ -1533,19 +1533,29 @@ static void remove_port_data(struct port *port)
+ 	spin_lock_irq(&port->inbuf_lock);
+ 	/* Remove unused data this port might have received. */
+ 	discard_port_data(port);
++	spin_unlock_irq(&port->inbuf_lock);
+ 
+ 	/* Remove buffers we queued up for the Host to send us data in. */
+-	while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+-		free_buf(buf, true);
+-	spin_unlock_irq(&port->inbuf_lock);
++	do {
++		spin_lock_irq(&port->inbuf_lock);
++		buf = virtqueue_detach_unused_buf(port->in_vq);
++		spin_unlock_irq(&port->inbuf_lock);
++		if (buf)
++			free_buf(buf, true);
++	} while (buf);
+ 
+ 	spin_lock_irq(&port->outvq_lock);
+ 	reclaim_consumed_buffers(port);
++	spin_unlock_irq(&port->outvq_lock);
+ 
+ 	/* Free pending buffers from the out-queue. */
+-	while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
+-		free_buf(buf, true);
+-	spin_unlock_irq(&port->outvq_lock);
++	do {
++		spin_lock_irq(&port->outvq_lock);
++		buf = virtqueue_detach_unused_buf(port->out_vq);
++		spin_unlock_irq(&port->outvq_lock);
++		if (buf)
++			free_buf(buf, true);
++	} while (buf);
+ }
+ 
+ /*
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index f4ea80d602f7..b9d2f76a0cf7 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -73,13 +73,13 @@ struct rfc2734_header {
+ 
+ #define fwnet_get_hdr_lf(h)		(((h)->w0 & 0xc0000000) >> 30)
+ #define fwnet_get_hdr_ether_type(h)	(((h)->w0 & 0x0000ffff))
+-#define fwnet_get_hdr_dg_size(h)	(((h)->w0 & 0x0fff0000) >> 16)
++#define fwnet_get_hdr_dg_size(h)	((((h)->w0 & 0x0fff0000) >> 16) + 1)
+ #define fwnet_get_hdr_fg_off(h)		(((h)->w0 & 0x00000fff))
+ #define fwnet_get_hdr_dgl(h)		(((h)->w1 & 0xffff0000) >> 16)
+ 
+-#define fwnet_set_hdr_lf(lf)		((lf)  << 30)
++#define fwnet_set_hdr_lf(lf)		((lf) << 30)
+ #define fwnet_set_hdr_ether_type(et)	(et)
+-#define fwnet_set_hdr_dg_size(dgs)	((dgs) << 16)
++#define fwnet_set_hdr_dg_size(dgs)	(((dgs) - 1) << 16)
+ #define fwnet_set_hdr_fg_off(fgo)	(fgo)
+ 
+ #define fwnet_set_hdr_dgl(dgl)		((dgl) << 16)
+@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ 	int retval;
+ 	u16 ether_type;
+ 
++	if (len <= RFC2374_UNFRAG_HDR_SIZE)
++		return 0;
++
+ 	hdr.w0 = be32_to_cpu(buf[0]);
+ 	lf = fwnet_get_hdr_lf(&hdr);
+ 	if (lf == RFC2374_HDR_UNFRAG) {
+@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ 		return fwnet_finish_incoming_packet(net, skb, source_node_id,
+ 						    is_broadcast, ether_type);
+ 	}
++
+ 	/* A datagram fragment has been received, now the fun begins. */
++
++	if (len <= RFC2374_FRAG_HDR_SIZE)
++		return 0;
++
+ 	hdr.w1 = ntohl(buf[1]);
+ 	buf += 2;
+ 	len -= RFC2374_FRAG_HDR_SIZE;
+@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
+ 		fg_off = fwnet_get_hdr_fg_off(&hdr);
+ 	}
+ 	datagram_label = fwnet_get_hdr_dgl(&hdr);
+-	dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
++	dg_size = fwnet_get_hdr_dg_size(&hdr);
++
++	if (fg_off + len > dg_size)
++		return 0;
+ 
+ 	spin_lock_irqsave(&dev->lock, flags);
+ 
+@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
+ 	fw_send_response(card, r, rcode);
+ }
+ 
++static int gasp_source_id(__be32 *p)
++{
++	return be32_to_cpu(p[0]) >> 16;
++}
++
++static u32 gasp_specifier_id(__be32 *p)
++{
++	return (be32_to_cpu(p[0]) & 0xffff) << 8 |
++	       (be32_to_cpu(p[1]) & 0xff000000) >> 24;
++}
++
++static u32 gasp_version(__be32 *p)
++{
++	return be32_to_cpu(p[1]) & 0xffffff;
++}
++
+ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ 		u32 cycle, size_t header_length, void *header, void *data)
+ {
+@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ 	__be32 *buf_ptr;
+ 	int retval;
+ 	u32 length;
+-	u16 source_node_id;
+-	u32 specifier_id;
+-	u32 ver;
+ 	unsigned long offset;
+ 	unsigned long flags;
+ 
+@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
+ 
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+ 
+-	specifier_id =    (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
+-			| (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
+-	ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
+-	source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
+-
+-	if (specifier_id == IANA_SPECIFIER_ID &&
+-	    (ver == RFC2734_SW_VERSION
++	if (length > IEEE1394_GASP_HDR_SIZE &&
++	    gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
++	    (gasp_version(buf_ptr) == RFC2734_SW_VERSION
+ #if IS_ENABLED(CONFIG_IPV6)
+-	     || ver == RFC3146_SW_VERSION
++	     || gasp_version(buf_ptr) == RFC3146_SW_VERSION
+ #endif
+-	    )) {
+-		buf_ptr += 2;
+-		length -= IEEE1394_GASP_HDR_SIZE;
+-		fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
++	    ))
++		fwnet_incoming_packet(dev, buf_ptr + 2,
++				      length - IEEE1394_GASP_HDR_SIZE,
++				      gasp_source_id(buf_ptr),
+ 				      context->card->generation, true);
+-	}
+ 
+ 	packet.payload_length = dev->rcv_buffer_size;
+ 	packet.interrupt = 1;
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 52dea773bb1b..ef773bf58a25 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -909,6 +909,7 @@ static void drm_dp_destroy_port(struct kref *kref)
+ 		/* no need to clean up vcpi
+ 		 * as if we have no connector we never setup a vcpi */
+ 		drm_dp_port_teardown_pdt(port, port->pdt);
++		port->pdt = DP_PEER_DEVICE_NONE;
+ 	}
+ 	kfree(port);
+ }
+@@ -1155,7 +1156,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ 			drm_dp_put_port(port);
+ 			goto out;
+ 		}
+-		if (port->port_num >= 8) {
++		if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
++		     port->pdt == DP_PEER_DEVICE_SST_SINK) &&
++		    port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ 			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ 		}
+ 	}
+@@ -2860,6 +2863,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ 		mgr->cbs->destroy_connector(mgr, port->connector);
+ 
+ 		drm_dp_port_teardown_pdt(port, port->pdt);
++		port->pdt = DP_PEER_DEVICE_NONE;
+ 
+ 		if (!port->input && port->vcpi.vcpi > 0) {
+ 			drm_dp_mst_reset_vcpi_slots(mgr, port);
+diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
+index 64d3a771920d..fc305ee22471 100644
+--- a/drivers/gpu/drm/radeon/ni.c
++++ b/drivers/gpu/drm/radeon/ni.c
+@@ -1371,9 +1371,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
+ void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ 			      int ring, u32 cp_int_cntl)
+ {
+-	u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+-
+-	WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
++	WREG32(SRBM_GFX_CNTL, RINGID(ring));
+ 	WREG32(CP_INT_CNTL, cp_int_cntl);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
+index fa2154493cf1..470af4aa4a6a 100644
+--- a/drivers/gpu/drm/radeon/r600_dpm.c
++++ b/drivers/gpu/drm/radeon/r600_dpm.c
+@@ -156,19 +156,20 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
+ 	struct drm_device *dev = rdev->ddev;
+ 	struct drm_crtc *crtc;
+ 	struct radeon_crtc *radeon_crtc;
+-	u32 line_time_us, vblank_lines;
++	u32 vblank_in_pixels;
+ 	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+ 
+ 	if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
+ 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ 			radeon_crtc = to_radeon_crtc(crtc);
+ 			if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+-				line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
+-					radeon_crtc->hw_mode.clock;
+-				vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
+-					radeon_crtc->hw_mode.crtc_vdisplay +
+-					(radeon_crtc->v_border * 2);
+-				vblank_time_us = vblank_lines * line_time_us;
++				vblank_in_pixels =
++					radeon_crtc->hw_mode.crtc_htotal *
++					(radeon_crtc->hw_mode.crtc_vblank_end -
++					 radeon_crtc->hw_mode.crtc_vdisplay +
++					 (radeon_crtc->v_border * 2));
++
++				vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
+ 				break;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+index db64e0062689..3b0c229d7dcd 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
+ 
+ 	tmp &= AUX_HPD_SEL(0x7);
+ 	tmp |= AUX_HPD_SEL(chan->rec.hpd);
+-	tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
++	tmp |= AUX_EN | AUX_LS_READ_EN;
+ 
+ 	WREG32(AUX_CONTROL + aux_offset[instance], tmp);
+ 
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index f666277a8993..a808ba001ee7 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2948,6 +2948,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 	int i;
+ 	struct si_dpm_quirk *p = si_dpm_quirk_list;
+ 
++	/* limit all SI kickers */
++	if (rdev->family == CHIP_PITCAIRN) {
++		if ((rdev->pdev->revision == 0x81) ||
++		    (rdev->pdev->device == 0x6810) ||
++		    (rdev->pdev->device == 0x6811) ||
++		    (rdev->pdev->device == 0x6816) ||
++		    (rdev->pdev->device == 0x6817) ||
++		    (rdev->pdev->device == 0x6806))
++			max_mclk = 120000;
++	} else if (rdev->family == CHIP_VERDE) {
++		if ((rdev->pdev->revision == 0x81) ||
++		    (rdev->pdev->revision == 0x83) ||
++		    (rdev->pdev->revision == 0x87) ||
++		    (rdev->pdev->device == 0x6820) ||
++		    (rdev->pdev->device == 0x6821) ||
++		    (rdev->pdev->device == 0x6822) ||
++		    (rdev->pdev->device == 0x6823) ||
++		    (rdev->pdev->device == 0x682A) ||
++		    (rdev->pdev->device == 0x682B)) {
++			max_sclk = 75000;
++			max_mclk = 80000;
++		}
++	} else if (rdev->family == CHIP_OLAND) {
++		if ((rdev->pdev->revision == 0xC7) ||
++		    (rdev->pdev->revision == 0x80) ||
++		    (rdev->pdev->revision == 0x81) ||
++		    (rdev->pdev->revision == 0x83) ||
++		    (rdev->pdev->device == 0x6604) ||
++		    (rdev->pdev->device == 0x6605)) {
++			max_sclk = 75000;
++			max_mclk = 80000;
++		}
++	} else if (rdev->family == CHIP_HAINAN) {
++		if ((rdev->pdev->revision == 0x81) ||
++		    (rdev->pdev->revision == 0x83) ||
++		    (rdev->pdev->revision == 0xC3) ||
++		    (rdev->pdev->device == 0x6664) ||
++		    (rdev->pdev->device == 0x6665) ||
++		    (rdev->pdev->device == 0x6667)) {
++			max_sclk = 75000;
++			max_mclk = 80000;
++		}
++	}
+ 	/* Apply dpm quirks */
+ 	while (p && p->chip_device != 0) {
+ 		if (rdev->pdev->vendor == p->chip_vendor &&
+@@ -2960,10 +3003,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 		}
+ 		++p;
+ 	}
+-	/* limit mclk on all R7 370 parts for stability */
+-	if (rdev->pdev->device == 0x6811 &&
+-	    rdev->pdev->revision == 0x81)
+-		max_mclk = 120000;
+ 
+ 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
+ 	    ni_dpm_vblank_too_short(rdev))
+diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
+index 7994ec2e4151..41f5896224bd 100644
+--- a/drivers/hv/hv_util.c
++++ b/drivers/hv/hv_util.c
+@@ -283,10 +283,14 @@ static void heartbeat_onchannelcallback(void *context)
+ 	u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
+ 	struct icmsg_negotiate *negop = NULL;
+ 
+-	vmbus_recvpacket(channel, hbeat_txf_buf,
+-			 PAGE_SIZE, &recvlen, &requestid);
++	while (1) {
++
++		vmbus_recvpacket(channel, hbeat_txf_buf,
++				 PAGE_SIZE, &recvlen, &requestid);
++
++		if (!recvlen)
++			break;
+ 
+-	if (recvlen > 0) {
+ 		icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
+ 				sizeof(struct vmbuspipe_hdr)];
+ 
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 68f5f4a0f1e7..418701947800 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -793,6 +793,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ 		},
+ 	},
++	{
++		/* Schenker XMG C504 - Elantech touchpad */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
++		},
++	},
+ 	{ }
+ };
+ 
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 1dbae580e8ca..f1b15a0b3774 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -180,7 +180,7 @@ static void gic_enable_redist(bool enable)
+ 			return;	/* No PM support in this redistributor */
+ 	}
+ 
+-	while (count--) {
++	while (--count) {
+ 		val = readl_relaxed(rbase + GICR_WAKER);
+ 		if (enable ^ (val & GICR_WAKER_ChildrenAsleep))
+ 			break;
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index e411ccba0af6..94533bdcbef2 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -700,37 +700,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
+ 
+ 	tgt->type = dm_get_target_type(type);
+ 	if (!tgt->type) {
+-		DMERR("%s: %s: unknown target type", dm_device_name(t->md),
+-		      type);
++		DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if (dm_target_needs_singleton(tgt->type)) {
+ 		if (t->num_targets) {
+-			DMERR("%s: target type %s must appear alone in table",
+-			      dm_device_name(t->md), type);
+-			return -EINVAL;
++			tgt->error = "singleton target type must appear alone in table";
++			goto bad;
+ 		}
+ 		t->singleton = 1;
+ 	}
+ 
+ 	if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
+-		DMERR("%s: target type %s may not be included in read-only tables",
+-		      dm_device_name(t->md), type);
+-		return -EINVAL;
++		tgt->error = "target type may not be included in a read-only table";
++		goto bad;
+ 	}
+ 
+ 	if (t->immutable_target_type) {
+ 		if (t->immutable_target_type != tgt->type) {
+-			DMERR("%s: immutable target type %s cannot be mixed with other target types",
+-			      dm_device_name(t->md), t->immutable_target_type->name);
+-			return -EINVAL;
++			tgt->error = "immutable target type cannot be mixed with other target types";
++			goto bad;
+ 		}
+ 	} else if (dm_target_is_immutable(tgt->type)) {
+ 		if (t->num_targets) {
+-			DMERR("%s: immutable target type %s cannot be mixed with other target types",
+-			      dm_device_name(t->md), tgt->type->name);
+-			return -EINVAL;
++			tgt->error = "immutable target type cannot be mixed with other target types";
++			goto bad;
+ 		}
+ 		t->immutable_target_type = tgt->type;
+ 	}
+@@ -745,7 +740,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
+ 	 */
+ 	if (!adjoin(t, tgt)) {
+ 		tgt->error = "Gap in table";
+-		r = -EINVAL;
+ 		goto bad;
+ 	}
+ 
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 72dc91de80f8..7453c3ed4b8f 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7847,6 +7847,9 @@ void md_do_sync(struct md_thread *thread)
+ 			break;
+ 
+ 		j += sectors;
++		if (j > max_sectors)
++			/* when skipping, extra large numbers can be returned. */
++			j = max_sectors;
+ 		if (j > 2)
+ 			mddev->curr_resync = j;
+ 		if (mddev_is_clustered(mddev))
+@@ -7915,6 +7918,12 @@ void md_do_sync(struct md_thread *thread)
+ 	blk_finish_plug(&plug);
+ 	wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
+ 
++	if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
++	    !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
++	    mddev->curr_resync > 3) {
++		mddev->curr_resync_completed = mddev->curr_resync;
++		sysfs_notify(&mddev->kobj, NULL, "sync_completed");
++	}
+ 	/* tell personality that we are finished */
+ 	mddev->pers->sync_request(mddev, max_sectors, &skipped);
+ 
+@@ -7922,7 +7931,7 @@ void md_do_sync(struct md_thread *thread)
+ 		md_cluster_ops->resync_finish(mddev);
+ 
+ 	if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
+-	    mddev->curr_resync > 2) {
++	    mddev->curr_resync > 3) {
+ 		if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ 			if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+ 				if (mddev->curr_resync >= mddev->recovery_cp) {
+diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c
+index 1105db2355d2..83bfb1659abe 100644
+--- a/drivers/memstick/host/rtsx_usb_ms.c
++++ b/drivers/memstick/host/rtsx_usb_ms.c
+@@ -524,6 +524,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
+ 	int rc;
+ 
+ 	if (!host->req) {
++		pm_runtime_get_sync(ms_dev(host));
+ 		do {
+ 			rc = memstick_next_req(msh, &host->req);
+ 			dev_dbg(ms_dev(host), "next req %d\n", rc);
+@@ -544,6 +545,7 @@ static void rtsx_usb_ms_handle_req(struct work_struct *work)
+ 						host->req->error);
+ 			}
+ 		} while (!rc);
++		pm_runtime_put(ms_dev(host));
+ 	}
+ 
+ }
+@@ -570,6 +572,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
+ 	dev_dbg(ms_dev(host), "%s: param = %d, value = %d\n",
+ 			__func__, param, value);
+ 
++	pm_runtime_get_sync(ms_dev(host));
+ 	mutex_lock(&ucr->dev_mutex);
+ 
+ 	err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_MS_CARD);
+@@ -635,6 +638,7 @@ static int rtsx_usb_ms_set_param(struct memstick_host *msh,
+ 	}
+ out:
+ 	mutex_unlock(&ucr->dev_mutex);
++	pm_runtime_put(ms_dev(host));
+ 
+ 	/* power-on delay */
+ 	if (param == MEMSTICK_POWER && value == MEMSTICK_POWER_ON)
+@@ -681,6 +685,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
+ 	int err;
+ 
+ 	for (;;) {
++		pm_runtime_get_sync(ms_dev(host));
+ 		mutex_lock(&ucr->dev_mutex);
+ 
+ 		/* Check pending MS card changes */
+@@ -703,6 +708,7 @@ static int rtsx_usb_detect_ms_card(void *__host)
+ 		}
+ 
+ poll_again:
++		pm_runtime_put(ms_dev(host));
+ 		if (host->eject)
+ 			break;
+ 
+diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
+index 1ca94e6fa8fb..e9e6f7d61a71 100644
+--- a/drivers/misc/genwqe/card_utils.c
++++ b/drivers/misc/genwqe/card_utils.c
+@@ -351,17 +351,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
+ 		if (copy_from_user(sgl->lpage, user_addr + user_size -
+ 				   sgl->lpage_size, sgl->lpage_size)) {
+ 			rc = -EFAULT;
+-			goto err_out1;
++			goto err_out2;
+ 		}
+ 	}
+ 	return 0;
+ 
++ err_out2:
++	__genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
++				 sgl->lpage_dma_addr);
++	sgl->lpage = NULL;
++	sgl->lpage_dma_addr = 0;
+  err_out1:
+ 	__genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
+ 				 sgl->fpage_dma_addr);
++	sgl->fpage = NULL;
++	sgl->fpage_dma_addr = 0;
+  err_out:
+ 	__genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
+ 				 sgl->sgl_dma_addr);
++	sgl->sgl = NULL;
++	sgl->sgl_dma_addr = 0;
++	sgl->sgl_size = 0;
+ 	return -ENOMEM;
+ }
+ 
+diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
+index bae680c648ff..396d75d9fb11 100644
+--- a/drivers/misc/mei/hw-txe.c
++++ b/drivers/misc/mei/hw-txe.c
+@@ -972,11 +972,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
+ 	hisr = mei_txe_br_reg_read(hw, HISR_REG);
+ 
+ 	aliveness = mei_txe_aliveness_get(dev);
+-	if (hhisr & IPC_HHIER_SEC && aliveness)
++	if (hhisr & IPC_HHIER_SEC && aliveness) {
+ 		ipc_isr = mei_txe_sec_reg_read_silent(hw,
+ 				SEC_IPC_HOST_INT_STATUS_REG);
+-	else
++	} else {
+ 		ipc_isr = 0;
++		hhisr &= ~IPC_HHIER_SEC;
++	}
+ 
+ 	generated = generated ||
+ 		(hisr & HISR_INT_STS_MSK) ||
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index ccefd6ca9c99..25939928d8fe 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1652,7 +1652,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ 	struct mmc_blk_data *md = mq->data;
+ 	struct mmc_packed *packed = mqrq->packed;
+ 	bool do_rel_wr, do_data_tag;
+-	u32 *packed_cmd_hdr;
++	__le32 *packed_cmd_hdr;
+ 	u8 hdr_blocks;
+ 	u8 i = 1;
+ 
+diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
+index 99e6521e6169..f42c11293dd8 100644
+--- a/drivers/mmc/card/queue.h
++++ b/drivers/mmc/card/queue.h
+@@ -24,7 +24,7 @@ enum mmc_packed_type {
+ 
+ struct mmc_packed {
+ 	struct list_head	list;
+-	u32			cmd_hdr[1024];
++	__le32			cmd_hdr[1024];
+ 	unsigned int		blocks;
+ 	u8			nr_entries;
+ 	u8			retries;
+diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
+index 88af827e086b..a9e97a138f3d 100644
+--- a/drivers/mmc/host/rtsx_usb_sdmmc.c
++++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
+@@ -1138,11 +1138,6 @@ static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+ 	dev_dbg(sdmmc_dev(host), "%s\n", __func__);
+ 	mutex_lock(&ucr->dev_mutex);
+ 
+-	if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) {
+-		mutex_unlock(&ucr->dev_mutex);
+-		return;
+-	}
+-
+ 	sd_set_power_mode(host, ios->power_mode);
+ 	sd_set_bus_width(host, ios->bus_width);
+ 	sd_set_timing(host, ios->timing, &host->ddr_mode);
+@@ -1314,6 +1309,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
+ 		container_of(work, struct rtsx_usb_sdmmc, led_work);
+ 	struct rtsx_ucr *ucr = host->ucr;
+ 
++	pm_runtime_get_sync(sdmmc_dev(host));
+ 	mutex_lock(&ucr->dev_mutex);
+ 
+ 	if (host->led.brightness == LED_OFF)
+@@ -1322,6 +1318,7 @@ static void rtsx_usb_update_led(struct work_struct *work)
+ 		rtsx_usb_turn_on_led(ucr);
+ 
+ 	mutex_unlock(&ucr->dev_mutex);
++	pm_runtime_put(sdmmc_dev(host));
+ }
+ #endif
+ 
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index c60dde917e49..5eb23ae82def 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -690,7 +690,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+ 			 * host->clock is in Hz.  target_timeout is in us.
+ 			 * Hence, us = 1000000 * cycles / Hz.  Round up.
+ 			 */
+-			val = 1000000 * data->timeout_clks;
++			val = 1000000ULL * data->timeout_clks;
+ 			if (do_div(val, host->clock))
+ 				target_timeout++;
+ 			target_timeout += val;
+diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
+index 914c39f9f388..2926295a936d 100644
+--- a/drivers/scsi/arcmsr/arcmsr_hba.c
++++ b/drivers/scsi/arcmsr/arcmsr_hba.c
+@@ -2540,18 +2540,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
+ 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
+ 	struct CommandControlBlock *ccb;
+ 	int target = cmd->device->id;
+-	int lun = cmd->device->lun;
+-	uint8_t scsicmd = cmd->cmnd[0];
+ 	cmd->scsi_done = done;
+ 	cmd->host_scribble = NULL;
+ 	cmd->result = 0;
+-	if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
+-		if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
+-    			cmd->result = (DID_NO_CONNECT << 16);
+-		}
+-		cmd->scsi_done(cmd);
+-		return 0;
+-	}
+ 	if (target == 16) {
+ 		/* virtual device for iop message transfer */
+ 		arcmsr_handle_virtual_command(acb, cmd);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 7a1c4b4e764b..a991690167aa 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -1622,16 +1622,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
+ 		goto out_done;
+ 	}
+ 
+-	switch (scmd->cmnd[0]) {
+-	case SYNCHRONIZE_CACHE:
+-		/*
+-		 * FW takes care of flush cache on its own
+-		 * No need to send it down
+-		 */
++	/*
++	 * FW takes care of flush cache on its own for Virtual Disk.
++	 * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
++	 */
++	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
+ 		scmd->result = DID_OK << 16;
+ 		goto out_done;
+-	default:
+-		break;
+ 	}
+ 
+ 	if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 1f8e2dc9c616..c07d1cd28e91 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -4993,6 +4993,7 @@ static void __exit scsi_debug_exit(void)
+ 	if (dif_storep)
+ 		vfree(dif_storep);
+ 
++	vfree(map_storep);
+ 	vfree(fake_storep);
+ }
+ 
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 6efab1c455e1..f7aa434811b5 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1516,12 +1516,12 @@ retry:
+  out_err:
+ 	kfree(lun_data);
+  out:
+-	scsi_device_put(sdev);
+ 	if (scsi_device_created(sdev))
+ 		/*
+ 		 * the sdev we used didn't appear in the report luns scan
+ 		 */
+ 		__scsi_remove_device(sdev);
++	scsi_device_put(sdev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 7580abe7cb45..1cf3c0819b81 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2495,8 +2495,10 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
+ 	 * fabric acknowledgement that requires two target_put_sess_cmd()
+ 	 * invocations before se_cmd descriptor release.
+ 	 */
+-	if (ack_kref)
++	if (ack_kref) {
+ 		kref_get(&se_cmd->cmd_kref);
++		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
++	}
+ 
+ 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ 	if (se_sess->sess_tearing_down) {
+diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
+index 4609305a1591..ddb0d6bc45f2 100644
+--- a/drivers/target/target_core_xcopy.c
++++ b/drivers/target/target_core_xcopy.c
+@@ -675,6 +675,7 @@ static int target_xcopy_read_source(
+ 	rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
+ 				remote_port, true);
+ 	if (rc < 0) {
++		ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+ 		transport_generic_free_cmd(se_cmd, 0);
+ 		return rc;
+ 	}
+@@ -686,6 +687,7 @@ static int target_xcopy_read_source(
+ 
+ 	rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+ 	if (rc < 0) {
++		ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+ 		transport_generic_free_cmd(se_cmd, 0);
+ 		return rc;
+ 	}
+@@ -736,6 +738,7 @@ static int target_xcopy_write_destination(
+ 				remote_port, false);
+ 	if (rc < 0) {
+ 		struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
++		ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+ 		/*
+ 		 * If the failure happened before the t_mem_list hand-off in
+ 		 * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
+@@ -751,6 +754,7 @@ static int target_xcopy_write_destination(
+ 
+ 	rc = target_xcopy_issue_pt_cmd(xpt_cmd);
+ 	if (rc < 0) {
++		ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
+ 		se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ 		transport_generic_free_cmd(se_cmd, 0);
+ 		return rc;
+@@ -837,9 +841,14 @@ static void target_xcopy_do_work(struct work_struct *work)
+ out:
+ 	xcopy_pt_undepend_remotedev(xop);
+ 	kfree(xop);
+-
+-	pr_warn("target_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n");
+-	ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
++	/*
++	 * Don't override an error scsi status if it has already been set
++	 */
++	if (ec_cmd->scsi_status == SAM_STAT_GOOD) {
++		pr_warn_ratelimited("target_xcopy_do_work: rc: %d, Setting X-COPY"
++			" CHECK_CONDITION -> sending response\n", rc);
++		ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
++	}
+ 	target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
+ }
+ 
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index ba86956ef4b5..2df90a54509a 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -865,10 +865,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
+ 	if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
+ 		return 0;
+ 
++	if (new_screen_size > (4 << 20))
++		return -EINVAL;
+ 	newscreen = kmalloc(new_screen_size, GFP_USER);
+ 	if (!newscreen)
+ 		return -ENOMEM;
+ 
++	if (vc == sel_cons)
++		clear_selection();
++
+ 	old_rows = vc->vc_rows;
+ 	old_row_size = vc->vc_size_row;
+ 
+diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
+index f1fd777ef4ec..82e63f73bfd5 100644
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -591,8 +591,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
+ 
+ 	/* throttle high/super speed IRQ rate back slightly */
+ 	if (gadget_is_dualspeed(dev->gadget))
+-		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
+-				     dev->gadget->speed == USB_SPEED_SUPER)
++		req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
++				       dev->gadget->speed == USB_SPEED_SUPER)) &&
++					!list_empty(&dev->tx_reqs))
+ 			? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
+ 			: 0;
+ 
+diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
+index 1dab9dfbca6a..c5de2e24c9e7 100644
+--- a/drivers/usb/host/ohci-hcd.c
++++ b/drivers/usb/host/ohci-hcd.c
+@@ -72,7 +72,7 @@
+ static const char	hcd_name [] = "ohci_hcd";
+ 
+ #define	STATECHANGE_DELAY	msecs_to_jiffies(300)
+-#define	IO_WATCHDOG_DELAY	msecs_to_jiffies(250)
++#define	IO_WATCHDOG_DELAY	msecs_to_jiffies(275)
+ 
+ #include "ohci.h"
+ #include "pci-quirks.h"
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 9d781d3ccc09..2dd322e92951 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1224,6 +1224,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ 	return 0;
+ }
+ 
++/*
++ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
++ * warm reset a USB3 device stuck in polling or compliance mode after resume.
++ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
++ */
++static bool xhci_port_missing_cas_quirk(int port_index,
++					     __le32 __iomem **port_array)
++{
++	u32 portsc;
++
++	portsc = readl(port_array[port_index]);
++
++	/* if any of these are set we are not stuck */
++	if (portsc & (PORT_CONNECT | PORT_CAS))
++		return false;
++
++	if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
++	    ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
++		return false;
++
++	/* clear wakeup/change bits, and do a warm port reset */
++	portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
++	portsc |= PORT_WR;
++	writel(portsc, port_array[port_index]);
++	/* flush write */
++	readl(port_array[port_index]);
++	return true;
++}
++
+ int xhci_bus_resume(struct usb_hcd *hcd)
+ {
+ 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+@@ -1258,6 +1287,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
+ 		int slot_id;
+ 
+ 		temp = readl(port_array[port_index]);
++
++		/* warm reset CAS limited ports stuck in polling/compliance */
++		if ((xhci->quirks & XHCI_MISSING_CAS) &&
++		    (hcd->speed >= HCD_USB3) &&
++		    xhci_port_missing_cas_quirk(port_index, port_array)) {
++			xhci_dbg(xhci, "reset stuck port %d\n", port_index);
++			continue;
++		}
+ 		if (DEV_SUPERSPEED(temp))
+ 			temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
+ 		else
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 54caaf87c567..fc60a9e8a129 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -45,11 +45,13 @@
+ 
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI	0x8c31
+ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI	0x9c31
++#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI	0x9cb1
+ #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI		0x22b5
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
+ #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
+ #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
++#define PCI_DEVICE_ID_INTEL_APL_XHCI			0x5aa8
+ 
+ static const char hcd_name[] = "xhci_hcd";
+ 
+@@ -147,7 +149,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+-		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
++		(pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
+ 		xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ 	}
+@@ -163,6 +166,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+ 		xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+ 	}
++	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++	    (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
++	     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
++		xhci->quirks |= XHCI_MISSING_CAS;
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ 			pdev->device == PCI_DEVICE_ID_EJ168) {
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index c5d6963e9cbe..f33028642e31 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -286,6 +286,8 @@ struct xhci_op_regs {
+ #define XDEV_U2		(0x2 << 5)
+ #define XDEV_U3		(0x3 << 5)
+ #define XDEV_INACTIVE	(0x6 << 5)
++#define XDEV_POLLING	(0x7 << 5)
++#define XDEV_COMP_MODE  (0xa << 5)
+ #define XDEV_RESUME	(0xf << 5)
+ /* true: port has power (see HCC_PPC) */
+ #define PORT_POWER	(1 << 9)
+@@ -1573,6 +1575,7 @@ struct xhci_hcd {
+ #define XHCI_PME_STUCK_QUIRK	(1 << 20)
+ #define XHCI_SSIC_PORT_UNUSED	(1 << 22)
+ #define XHCI_NO_64BIT_SUPPORT	(1 << 23)
++#define XHCI_MISSING_CAS	(1 << 24)
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+ 	/* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index f49e859ac5ce..6d4d8b828971 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -844,7 +844,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
+ 	unsigned int control;
+ 	int result;
+ 
+-	cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
++	result = cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
++	if (result)
++		return result;
+ 
+ 	result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
+ 		|((control & CONTROL_RTS) ? TIOCM_RTS : 0)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 8c48c9d83d48..494167fe6a2c 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
+ 	/* ekey Devices */
+ 	{ USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
+ 	/* Infineon Devices */
+-	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
++	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
++	{ USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
+ 	/* GE Healthcare devices */
+ 	{ USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
+ 	/* Active Research (Actisense) devices */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 48db84f25cc9..db1a9b3a5f38 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -626,8 +626,9 @@
+ /*
+  * Infineon Technologies
+  */
+-#define INFINEON_VID		0x058b
+-#define INFINEON_TRIBOARD_PID	0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
++#define INFINEON_VID		        0x058b
++#define INFINEON_TRIBOARD_TC1798_PID	0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
++#define INFINEON_TRIBOARD_TC2X7_PID	0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
+ 
+ /*
+  * Acton Research Corp.
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index a0ca291bc07f..e7e29c797824 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1077,7 +1077,8 @@ static int usb_serial_probe(struct usb_interface *interface,
+ 
+ 	serial->disconnected = 0;
+ 
+-	usb_serial_console_init(serial->port[0]->minor);
++	if (num_ports > 0)
++		usb_serial_console_init(serial->port[0]->minor);
+ exit:
+ 	module_put(type->driver.owner);
+ 	return 0;
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index bd3c92b4bcee..32ecb95f6214 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -2596,14 +2596,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
+ 					     int index, int error)
+ {
+ 	struct btrfs_log_ctx *ctx;
++	struct btrfs_log_ctx *safe;
+ 
+-	if (!error) {
+-		INIT_LIST_HEAD(&root->log_ctxs[index]);
+-		return;
+-	}
+-
+-	list_for_each_entry(ctx, &root->log_ctxs[index], list)
++	list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
++		list_del_init(&ctx->list);
+ 		ctx->log_ret = error;
++	}
+ 
+ 	INIT_LIST_HEAD(&root->log_ctxs[index]);
+ }
+@@ -2842,13 +2840,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 	mutex_unlock(&root->log_mutex);
+ 
+ out_wake_log_root:
+-	/*
+-	 * We needn't get log_mutex here because we are sure all
+-	 * the other tasks are blocked.
+-	 */
++	mutex_lock(&log_root_tree->log_mutex);
+ 	btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
+ 
+-	mutex_lock(&log_root_tree->log_mutex);
+ 	log_root_tree->log_transid_committed++;
+ 	atomic_set(&log_root_tree->log_commit[index2], 0);
+ 	mutex_unlock(&log_root_tree->log_mutex);
+@@ -2856,10 +2850,8 @@ out_wake_log_root:
+ 	if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
+ 		wake_up(&log_root_tree->log_commit_wait[index2]);
+ out:
+-	/* See above. */
+-	btrfs_remove_all_log_ctxs(root, index1, ret);
+-
+ 	mutex_lock(&root->log_mutex);
++	btrfs_remove_all_log_ctxs(root, index1, ret);
+ 	root->log_transid_committed++;
+ 	atomic_set(&root->log_commit[index1], 0);
+ 	mutex_unlock(&root->log_mutex);
+diff --git a/fs/ceph/file.c b/fs/ceph/file.c
+index 3b6b522b4b31..5ae447cbd2b5 100644
+--- a/fs/ceph/file.c
++++ b/fs/ceph/file.c
+@@ -868,7 +868,8 @@ again:
+ 		statret = __ceph_do_getattr(inode, page,
+ 					    CEPH_STAT_CAP_INLINE_DATA, !!page);
+ 		if (statret < 0) {
+-			 __free_page(page);
++			if (page)
++				__free_page(page);
+ 			if (statret == -ENODATA) {
+ 				BUG_ON(retry_op != READ_INLINE);
+ 				goto again;
+diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
+index d67a16f2a45d..350f67fb5b9c 100644
+--- a/fs/isofs/inode.c
++++ b/fs/isofs/inode.c
+@@ -690,6 +690,11 @@ static int isofs_fill_super(struct super_block *s, void *data, int silent)
+ 	pri_bh = NULL;
+ 
+ root_found:
++	/* We don't support read-write mounts */
++	if (!(s->s_flags & MS_RDONLY)) {
++		error = -EACCES;
++		goto out_freebh;
++	}
+ 
+ 	if (joliet_level && (pri == NULL || !opt.rock)) {
+ 		/* This is the case of Joliet with the norock mount flag.
+@@ -1503,9 +1508,6 @@ struct inode *__isofs_iget(struct super_block *sb,
+ static struct dentry *isofs_mount(struct file_system_type *fs_type,
+ 	int flags, const char *dev_name, void *data)
+ {
+-	/* We don't support read-write mounts */
+-	if (!(flags & MS_RDONLY))
+-		return ERR_PTR(-EACCES);
+ 	return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
+ }
+ 
+diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
+index ff2f2e6ad311..2abbb2babcae 100644
+--- a/fs/jbd2/transaction.c
++++ b/fs/jbd2/transaction.c
+@@ -1087,6 +1087,7 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
+ 		JBUFFER_TRACE(jh, "file as BJ_Reserved");
+ 		spin_lock(&journal->j_list_lock);
+ 		__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
++		spin_unlock(&journal->j_list_lock);
+ 	} else if (jh->b_transaction == journal->j_committing_transaction) {
+ 		/* first access by this transaction */
+ 		jh->b_modified = 0;
+@@ -1094,8 +1095,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
+ 		JBUFFER_TRACE(jh, "set next transaction");
+ 		spin_lock(&journal->j_list_lock);
+ 		jh->b_next_transaction = transaction;
++		spin_unlock(&journal->j_list_lock);
+ 	}
+-	spin_unlock(&journal->j_list_lock);
+ 	jbd_unlock_bh_state(bh);
+ 
+ 	/*
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 4d8aa749d9b2..c57cd417802b 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -129,6 +129,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
+ 		len -= bytes;
+ 	}
+ 
++	if (!error)
++		error = vfs_fsync(new_file, 0);
+ 	fput(new_file);
+ out_fput:
+ 	fput(old_file);
+diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
+index 27060fc855d4..e0af247f4740 100644
+--- a/fs/ubifs/dir.c
++++ b/fs/ubifs/dir.c
+@@ -350,7 +350,7 @@ static unsigned int vfs_dent_type(uint8_t type)
+  */
+ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
+ {
+-	int err;
++	int err = 0;
+ 	struct qstr nm;
+ 	union ubifs_key key;
+ 	struct ubifs_dent_node *dent;
+@@ -449,16 +449,23 @@ static int ubifs_readdir(struct file *file, struct dir_context *ctx)
+ 	}
+ 
+ out:
+-	if (err != -ENOENT) {
+-		ubifs_err(c, "cannot find next direntry, error %d", err);
+-		return err;
+-	}
+-
+ 	kfree(file->private_data);
+ 	file->private_data = NULL;
++
++	if (err != -ENOENT)
++		ubifs_err(c, "cannot find next direntry, error %d", err);
++	else
++		/*
++		 * -ENOENT is a non-fatal error in this context, the TNC uses
++		 * it to indicate that the cursor moved past the current directory
++		 * and readdir() has to stop.
++		 */
++		err = 0;
++
++
+ 	/* 2 is a special value indicating that there are no more direntries */
+ 	ctx->pos = 2;
+-	return 0;
++	return err;
+ }
+ 
+ /* Free saved readdir() state when the directory is closed */
+diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
+index fd65b3f1923c..7270162b72e4 100644
+--- a/fs/ubifs/xattr.c
++++ b/fs/ubifs/xattr.c
+@@ -173,6 +173,7 @@ out_cancel:
+ 	host_ui->xattr_cnt -= 1;
+ 	host_ui->xattr_size -= CALC_DENT_SIZE(nm->len);
+ 	host_ui->xattr_size -= CALC_XATTR_BYTES(size);
++	host_ui->xattr_names -= nm->len;
+ 	mutex_unlock(&host_ui->ui_mutex);
+ out_free:
+ 	make_bad_inode(inode);
+@@ -527,6 +528,7 @@ out_cancel:
+ 	host_ui->xattr_cnt += 1;
+ 	host_ui->xattr_size += CALC_DENT_SIZE(nm->len);
+ 	host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
++	host_ui->xattr_names += nm->len;
+ 	mutex_unlock(&host_ui->ui_mutex);
+ 	ubifs_release_budget(c, &req);
+ 	make_bad_inode(inode);
+diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
+index f48c3040c9ce..6021c322316c 100644
+--- a/fs/xfs/libxfs/xfs_dquot_buf.c
++++ b/fs/xfs/libxfs/xfs_dquot_buf.c
+@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
+ 	if (mp->m_quotainfo)
+ 		ndquots = mp->m_quotainfo->qi_dqperchunk;
+ 	else
+-		ndquots = xfs_calc_dquots_per_chunk(
+-					XFS_BB_TO_FSB(mp, bp->b_length));
++		ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
+ 
+ 	for (i = 0; i < ndquots; i++, d++) {
+ 		if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
+diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
+index 523f04c90dea..cede0a45b9c0 100644
+--- a/include/drm/drm_dp_helper.h
++++ b/include/drm/drm_dp_helper.h
+@@ -568,6 +568,10 @@
+ #define MODE_I2C_READ	4
+ #define MODE_I2C_STOP	8
+ 
++/* DP 1.2 MST PORTs - Section 2.5.1 v1.2a spec */
++#define DP_MST_PHYSICAL_PORT_0 0
++#define DP_MST_LOGICAL_PORT_0 8
++
+ #define DP_LINK_STATUS_SIZE	   6
+ bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
+ 			  int lane_count);
+diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
+index cbf1ce800fd1..5ef99b18966d 100644
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -218,7 +218,7 @@
+ #define GITS_BASER_TYPE_SHIFT		(56)
+ #define GITS_BASER_TYPE(r)		(((r) >> GITS_BASER_TYPE_SHIFT) & 7)
+ #define GITS_BASER_ENTRY_SIZE_SHIFT	(48)
+-#define GITS_BASER_ENTRY_SIZE(r)	((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
++#define GITS_BASER_ENTRY_SIZE(r)	((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+ #define GITS_BASER_NonShareable		(0UL << 10)
+ #define GITS_BASER_InnerShareable	(1UL << 10)
+ #define GITS_BASER_OuterShareable	(2UL << 10)
+diff --git a/mm/list_lru.c b/mm/list_lru.c
+index 909eca2c820e..84b4c21d78d7 100644
+--- a/mm/list_lru.c
++++ b/mm/list_lru.c
+@@ -532,6 +532,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
+ 	err = memcg_init_list_lru(lru, memcg_aware);
+ 	if (err) {
+ 		kfree(lru->node);
++		/* Do this so a list_lru_destroy() doesn't crash: */
++		lru->node = NULL;
+ 		goto out;
+ 	}
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 3073164a6fcf..06d1732e2094 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2179,16 +2179,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
+ 	if (!(status->rx_flags & IEEE80211_RX_AMSDU))
+ 		return RX_CONTINUE;
+ 
+-	if (ieee80211_has_a4(hdr->frame_control) &&
+-	    rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+-	    !rx->sdata->u.vlan.sta)
+-		return RX_DROP_UNUSABLE;
++	if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
++		switch (rx->sdata->vif.type) {
++		case NL80211_IFTYPE_AP_VLAN:
++			if (!rx->sdata->u.vlan.sta)
++				return RX_DROP_UNUSABLE;
++			break;
++		case NL80211_IFTYPE_STATION:
++			if (!rx->sdata->u.mgd.use_4addr)
++				return RX_DROP_UNUSABLE;
++			break;
++		default:
++			return RX_DROP_UNUSABLE;
++		}
++	}
+ 
+-	if (is_multicast_ether_addr(hdr->addr1) &&
+-	    ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+-	      rx->sdata->u.vlan.sta) ||
+-	     (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
+-	      rx->sdata->u.mgd.use_4addr)))
++	if (is_multicast_ether_addr(hdr->addr1))
+ 		return RX_DROP_UNUSABLE;
+ 
+ 	skb->dev = dev;
+diff --git a/scripts/sortextable.c b/scripts/sortextable.c
+index 1052d4834a44..4dd16a72a2ef 100644
+--- a/scripts/sortextable.c
++++ b/scripts/sortextable.c
+@@ -205,6 +205,35 @@ static int compare_relative_table(const void *a, const void *b)
+ 	return 0;
+ }
+ 
++static void x86_sort_relative_table(char *extab_image, int image_size)
++{
++	int i;
++
++	i = 0;
++	while (i < image_size) {
++		uint32_t *loc = (uint32_t *)(extab_image + i);
++
++		w(r(loc) + i, loc);
++		w(r(loc + 1) + i + 4, loc + 1);
++		w(r(loc + 2) + i + 8, loc + 2);
++
++		i += sizeof(uint32_t) * 3;
++	}
++
++	qsort(extab_image, image_size / 12, 12, compare_relative_table);
++
++	i = 0;
++	while (i < image_size) {
++		uint32_t *loc = (uint32_t *)(extab_image + i);
++
++		w(r(loc) - i, loc);
++		w(r(loc + 1) - (i + 4), loc + 1);
++		w(r(loc + 2) - (i + 8), loc + 2);
++
++		i += sizeof(uint32_t) * 3;
++	}
++}
++
+ static void sort_relative_table(char *extab_image, int image_size)
+ {
+ 	int i;
+@@ -277,6 +306,9 @@ do_file(char const *const fname)
+ 		break;
+ 	case EM_386:
+ 	case EM_X86_64:
++		custom_sort = x86_sort_relative_table;
++		break;
++
+ 	case EM_S390:
+ 		custom_sort = sort_relative_table;
+ 		break;
+diff --git a/security/keys/proc.c b/security/keys/proc.c
+index f0611a6368cd..b9f531c9e4fa 100644
+--- a/security/keys/proc.c
++++ b/security/keys/proc.c
+@@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
+ 	struct timespec now;
+ 	unsigned long timo;
+ 	key_ref_t key_ref, skey_ref;
+-	char xbuf[12];
++	char xbuf[16];
+ 	int rc;
+ 
+ 	struct keyring_search_context ctx = {
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index 26ce990592a0..40072d630b49 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -1738,7 +1738,7 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
+ 	status = azx_readb(chip, RIRBSTS);
+ 	if (status & RIRB_INT_MASK) {
+ 		if (status & RIRB_INT_RESPONSE) {
+-			if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
++			if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
+ 				udelay(80);
+ 			azx_update_rirb(chip);
+ 		}
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index 0efdb094d21c..6bb5340cf842 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -158,7 +158,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
+ #define AZX_DCAPS_SNOOP_MASK	(3 << 10)	/* snoop type mask */
+ #define AZX_DCAPS_SNOOP_OFF	(1 << 12)	/* snoop default off */
+ #define AZX_DCAPS_RIRB_DELAY	(1 << 13)	/* Long delay in read loop */
+-#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14)	/* Put a delay before read */
++/* 14 unused */
+ #define AZX_DCAPS_CTX_WORKAROUND (1 << 15)	/* X-Fi workaround */
+ #define AZX_DCAPS_POSFIX_LPIB	(1 << 16)	/* Use LPIB as default */
+ #define AZX_DCAPS_POSFIX_VIA	(1 << 17)	/* Use VIACOMBO as default */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index c8506496826a..16d09825a995 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2220,14 +2220,12 @@ static const struct pci_device_id azx_ids[] = {
+ 	  .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+ 	  .class_mask = 0xffffff,
+ 	  .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
+-	  AZX_DCAPS_NO_64BIT |
+-	  AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
++	  AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
+ #else
+ 	/* this entry seems still valid -- i.e. without emu20kx chip */
+ 	{ PCI_DEVICE(0x1102, 0x0009),
+ 	  .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
+-	  AZX_DCAPS_NO_64BIT |
+-	  AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
++	  AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
+ #endif
+ 	/* CM8888 */
+ 	{ PCI_DEVICE(0x13f6, 0x5011),
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index ecc2a4ea014d..32719f28aa86 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2898,6 +2898,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
+ AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
+ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ 
++/* Syntek STK1160 */
++{
++	.match_flags = USB_DEVICE_ID_MATCH_DEVICE |
++		       USB_DEVICE_ID_MATCH_INT_CLASS |
++		       USB_DEVICE_ID_MATCH_INT_SUBCLASS,
++	.idVendor = 0x05e1,
++	.idProduct = 0x0408,
++	.bInterfaceClass = USB_CLASS_AUDIO,
++	.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.vendor_name = "Syntek",
++		.product_name = "STK1160",
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_AUDIO_ALIGN_TRANSFER
++	}
++},
++
+ /* Digidesign Mbox */
+ {
+ 	/* Thanks to Clemens Ladisch <clemens@ladisch.de> */


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-11-23 11:25 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-11-23 11:25 UTC (permalink / raw
  To: gentoo-commits

commit:     bde6e65e58795d11ff0054877b4a2363df426327
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Nov 23 11:25:01 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Nov 23 11:25:01 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bde6e65e

Update gentoo kconfig patch for GENTOO_LINUX_INIT_SYSTEMD. See bug #598623

 4567_distro-Gentoo-Kconfig.patch | 14 +++++++++++---
 1 file changed, 11 insertions(+), 3 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 499b21f..acb0972 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -7,9 +7,9 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- /dev/null	2016-07-01 11:23:26.087932647 -0400
-+++ b/distro/Kconfig	2016-07-01 19:32:35.581415519 -0400
-@@ -0,0 +1,134 @@
+--- /dev/null	2016-11-15 00:56:18.320838834 -0500
++++ b/distro/Kconfig	2016-11-16 06:24:29.457357409 -0500
+@@ -0,0 +1,142 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -33,6 +33,7 @@
 +
 +	select DEVTMPFS
 +	select TMPFS
++	select UNIX
 +
 +	select MMU
 +	select SHMEM
@@ -112,17 +113,24 @@
 +	select AUTOFS4_FS
 +	select BLK_DEV_BSG
 +	select CGROUPS
++	select CHECKPOINT_RESTORE
 +	select DEVPTS_MULTIPLE_INSTANCES
++	select DMIID
 +	select EPOLL
 +	select FANOTIFY
 +	select FHANDLE
 +	select INOTIFY_USER
++	select IPV6
 +	select NET
 +	select NET_NS
 +	select PROC_FS
++	select SECCOMP
++	select SECCOMP_FILTER
 +	select SIGNALFD
 +	select SYSFS
 +	select TIMERFD
++	select TMPFS_POSIX_ACL
++	select TMPFS_XATTR
 +
 +	select ANON_INODES
 +	select BLOCK


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-10-28 10:19 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-10-28 10:19 UTC (permalink / raw
  To: gentoo-commits

commit:     880d575a3330d9626698dd16935b7b664d62739d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Oct 28 10:19:17 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Oct 28 10:19:17 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=880d575a

Linux patch 4.1.35

 0000_README             |   4 +
 1034_linux-4.1.35.patch | 215 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 219 insertions(+)

diff --git a/0000_README b/0000_README
index 72df015..5bb6b6b 100644
--- a/0000_README
+++ b/0000_README
@@ -179,6 +179,10 @@ Patch:  1033_linux-4.1.34.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.34
 
+Patch:  1034_linux-4.1.35.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.35
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1034_linux-4.1.35.patch b/1034_linux-4.1.35.patch
new file mode 100644
index 0000000..cfb75c7
--- /dev/null
+++ b/1034_linux-4.1.35.patch
@@ -0,0 +1,215 @@
+diff --git a/Makefile b/Makefile
+index 2d4dea4b3107..21f657f2c4e6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 34
++SUBLEVEL = 35
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index cc1993c5556e..9d781d3ccc09 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -619,8 +619,30 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 		if ((raw_port_status & PORT_RESET) ||
+ 				!(raw_port_status & PORT_PE))
+ 			return 0xffffffff;
+-		if (time_after_eq(jiffies,
+-					bus_state->resume_done[wIndex])) {
++		/* did port event handler already start resume timing? */
++		if (!bus_state->resume_done[wIndex]) {
++			/* If not, maybe we are in a host initated resume? */
++			if (test_bit(wIndex, &bus_state->resuming_ports)) {
++				/* Host initated resume doesn't time the resume
++				 * signalling using resume_done[].
++				 * It manually sets RESUME state, sleeps 20ms
++				 * and sets U0 state. This should probably be
++				 * changed, but not right now.
++				 */
++			} else {
++				/* port resume was discovered now and here,
++				 * start resume timing
++				 */
++				unsigned long timeout = jiffies +
++					msecs_to_jiffies(USB_RESUME_TIMEOUT);
++
++				set_bit(wIndex, &bus_state->resuming_ports);
++				bus_state->resume_done[wIndex] = timeout;
++				mod_timer(&hcd->rh_timer, timeout);
++			}
++		/* Has resume been signalled for USB_RESUME_TIME yet? */
++		} else if (time_after_eq(jiffies,
++					 bus_state->resume_done[wIndex])) {
+ 			int time_left;
+ 
+ 			xhci_dbg(xhci, "Resume USB2 port %d\n",
+@@ -661,13 +683,24 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 		} else {
+ 			/*
+ 			 * The resume has been signaling for less than
+-			 * 20ms. Report the port status as SUSPEND,
+-			 * let the usbcore check port status again
+-			 * and clear resume signaling later.
++			 * USB_RESUME_TIME. Report the port status as SUSPEND,
++			 * let the usbcore check port status again and clear
++			 * resume signaling later.
+ 			 */
+ 			status |= USB_PORT_STAT_SUSPEND;
+ 		}
+ 	}
++	/*
++	 * Clear stale usb2 resume signalling variables in case port changed
++	 * state during resume signalling. For example on error
++	 */
++	if ((bus_state->resume_done[wIndex] ||
++	     test_bit(wIndex, &bus_state->resuming_ports)) &&
++	    (raw_port_status & PORT_PLS_MASK) != XDEV_U3 &&
++	    (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
++		bus_state->resume_done[wIndex] = 0;
++		clear_bit(wIndex, &bus_state->resuming_ports);
++	}
+ 	if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0
+ 			&& (raw_port_status & PORT_POWER)
+ 			&& (bus_state->suspended_ports & (1 << wIndex))) {
+@@ -998,6 +1031,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 				if ((temp & PORT_PE) == 0)
+ 					goto error;
+ 
++				set_bit(wIndex, &bus_state->resuming_ports);
+ 				xhci_set_link_state(xhci, port_array, wIndex,
+ 							XDEV_RESUME);
+ 				spin_unlock_irqrestore(&xhci->lock, flags);
+@@ -1005,6 +1039,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ 				spin_lock_irqsave(&xhci->lock, flags);
+ 				xhci_set_link_state(xhci, port_array, wIndex,
+ 							XDEV_U0);
++				clear_bit(wIndex, &bus_state->resuming_ports);
+ 			}
+ 			bus_state->port_c_suspend |= 1 << wIndex;
+ 
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6a2911743829..2d8e77ff7821 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1605,7 +1605,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 			 */
+ 			bogus_port_status = true;
+ 			goto cleanup;
+-		} else {
++		} else if (!test_bit(faked_port_index,
++				     &bus_state->resuming_ports)) {
+ 			xhci_dbg(xhci, "resume HS port %d\n", port_id);
+ 			bus_state->resume_done[faked_port_index] = jiffies +
+ 				msecs_to_jiffies(USB_RESUME_TIMEOUT);
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 6b85ec64d302..7cadf0a660e7 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2064,6 +2064,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
+ #define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
+ #define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */
+ #define FOLL_TRIED	0x800	/* a retry, previous pass started an IO */
++#define FOLL_COW	0x4000	/* internal GUP flag */
+ 
+ typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
+ 			void *data);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 8fcc801fde15..d9f112bd42a7 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -309,17 +309,34 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
+ static inline u32 arch_gettimeoffset(void) { return 0; }
+ #endif
+ 
++static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr,
++					  cycle_t delta)
++{
++	s64 nsec;
++
++	nsec = delta * tkr->mult + tkr->xtime_nsec;
++	nsec >>= tkr->shift;
++
++	/* If arch requires, add in get_arch_timeoffset() */
++	return nsec + arch_gettimeoffset();
++}
++
+ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
+ {
+ 	cycle_t delta;
+-	s64 nsec;
+ 
+ 	delta = timekeeping_get_delta(tkr);
++	return timekeeping_delta_to_ns(tkr, delta);
++}
+ 
+-	nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
++static inline s64 timekeeping_cycles_to_ns(struct tk_read_base *tkr,
++					    cycle_t cycles)
++{
++	cycle_t delta;
+ 
+-	/* If arch requires, add in get_arch_timeoffset() */
+-	return nsec + arch_gettimeoffset();
++	/* calculate the delta since the last update_wall_time */
++	delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask);
++	return timekeeping_delta_to_ns(tkr, delta);
+ }
+ 
+ /**
+@@ -421,8 +438,11 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
+ 		tkr = tkf->base + (seq & 0x01);
+ 		now = ktime_to_ns(tkr->base);
+ 
+-		now += clocksource_delta(tkr->read(tkr->clock),
+-					 tkr->cycle_last, tkr->mask);
++		now += timekeeping_delta_to_ns(tkr,
++				clocksource_delta(
++					tkr->read(tkr->clock),
++					tkr->cycle_last,
++					tkr->mask));
+ 	} while (read_seqcount_retry(&tkf->seq, seq));
+ 
+ 	return now;
+diff --git a/mm/gup.c b/mm/gup.c
+index 6297f6bccfb1..e6de9e74e4ae 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -32,6 +32,16 @@ static struct page *no_page_table(struct vm_area_struct *vma,
+ 	return NULL;
+ }
+ 
++/*
++ * FOLL_FORCE can write to even unwritable pte's, but only
++ * after we've gone through a COW cycle and they are dirty.
++ */
++static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
++{
++	return pte_write(pte) ||
++		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
++}
++
+ static struct page *follow_page_pte(struct vm_area_struct *vma,
+ 		unsigned long address, pmd_t *pmd, unsigned int flags)
+ {
+@@ -66,7 +76,7 @@ retry:
+ 	}
+ 	if ((flags & FOLL_NUMA) && pte_protnone(pte))
+ 		goto no_page;
+-	if ((flags & FOLL_WRITE) && !pte_write(pte)) {
++	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
+ 		pte_unmap_unlock(ptep, ptl);
+ 		return NULL;
+ 	}
+@@ -315,7 +325,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
+ 	 * reCOWed by userspace write).
+ 	 */
+ 	if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
+-		*flags &= ~FOLL_WRITE;
++	        *flags |= FOLL_COW;
+ 	return 0;
+ }
+ 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-10-12 19:52 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-10-12 19:52 UTC (permalink / raw
  To: gentoo-commits

commit:     4635b6f052c8eab94dec6e8fbb7dbbc8905f83eb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Oct 12 19:52:44 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Oct 12 19:52:44 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4635b6f0

Linux patch 4.1.34

 0000_README             |    4 +
 1033_linux-4.1.34.patch | 3255 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3259 insertions(+)

diff --git a/0000_README b/0000_README
index 9a2fbfa..72df015 100644
--- a/0000_README
+++ b/0000_README
@@ -175,6 +175,10 @@ Patch:  1032_linux-4.1.33.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.33
 
+Patch:  1033_linux-4.1.34.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.34
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1033_linux-4.1.34.patch b/1033_linux-4.1.34.patch
new file mode 100644
index 0000000..fffc6fa
--- /dev/null
+++ b/1033_linux-4.1.34.patch
@@ -0,0 +1,3255 @@
+diff --git a/Makefile b/Makefile
+index 47c47d7c0926..2d4dea4b3107 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 33
++SUBLEVEL = 34
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h
+index 9b0d40093c9a..c0ddbbf73400 100644
+--- a/arch/alpha/include/asm/uaccess.h
++++ b/arch/alpha/include/asm/uaccess.h
+@@ -371,14 +371,6 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len)
+ 	return __cu_len;
+ }
+ 
+-extern inline long
+-__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
+-{
+-	if (__access_ok((unsigned long)validate, len, get_fs()))
+-		len = __copy_tofrom_user_nocheck(to, from, len);
+-	return len;
+-}
+-
+ #define __copy_to_user(to, from, n)					\
+ ({									\
+ 	__chk_user_ptr(to);						\
+@@ -393,17 +385,22 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali
+ #define __copy_to_user_inatomic __copy_to_user
+ #define __copy_from_user_inatomic __copy_from_user
+ 
+-
+ extern inline long
+ copy_to_user(void __user *to, const void *from, long n)
+ {
+-	return __copy_tofrom_user((__force void *)to, from, n, to);
++	if (likely(__access_ok((unsigned long)to, n, get_fs())))
++		n = __copy_tofrom_user_nocheck((__force void *)to, from, n);
++	return n;
+ }
+ 
+ extern inline long
+ copy_from_user(void *to, const void __user *from, long n)
+ {
+-	return __copy_tofrom_user(to, (__force void *)from, n, from);
++	if (likely(__access_ok((unsigned long)from, n, get_fs())))
++		n = __copy_tofrom_user_nocheck(to, (__force void *)from, n);
++	else
++		memset(to, 0, n);
++	return n;
+ }
+ 
+ extern void __do_clear_user(void);
+diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
+index 30c9baffa96f..08770c750696 100644
+--- a/arch/arc/include/asm/uaccess.h
++++ b/arch/arc/include/asm/uaccess.h
+@@ -83,7 +83,10 @@
+ 	"2:	;nop\n"				\
+ 	"	.section .fixup, \"ax\"\n"	\
+ 	"	.align 4\n"			\
+-	"3:	mov %0, %3\n"			\
++	"3:	# return -EFAULT\n"		\
++	"	mov %0, %3\n"			\
++	"	# zero out dst ptr\n"		\
++	"	mov %1,  0\n"			\
+ 	"	j   2b\n"			\
+ 	"	.previous\n"			\
+ 	"	.section __ex_table, \"a\"\n"	\
+@@ -101,7 +104,11 @@
+ 	"2:	;nop\n"				\
+ 	"	.section .fixup, \"ax\"\n"	\
+ 	"	.align 4\n"			\
+-	"3:	mov %0, %3\n"			\
++	"3:	# return -EFAULT\n"		\
++	"	mov %0, %3\n"			\
++	"	# zero out dst ptr\n"		\
++	"	mov %1,  0\n"			\
++	"	mov %R1, 0\n"			\
+ 	"	j   2b\n"			\
+ 	"	.previous\n"			\
+ 	"	.section __ex_table, \"a\"\n"	\
+diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
+index bfa5edde179c..2c1e7f09205f 100644
+--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
++++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
+@@ -113,7 +113,7 @@
+ 
+ 	partition@e0000 {
+ 		label = "u-boot environment";
+-		reg = <0xe0000 0x100000>;
++		reg = <0xe0000 0x20000>;
+ 	};
+ 
+ 	partition@100000 {
+diff --git a/arch/arm/boot/dts/stih410.dtsi b/arch/arm/boot/dts/stih410.dtsi
+index 208b5e89036a..f7b2c8defe56 100644
+--- a/arch/arm/boot/dts/stih410.dtsi
++++ b/arch/arm/boot/dts/stih410.dtsi
+@@ -33,7 +33,8 @@
+ 			compatible = "st,st-ohci-300x";
+ 			reg = <0x9a03c00 0x100>;
+ 			interrupts = <GIC_SPI 180 IRQ_TYPE_NONE>;
+-			clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
++			clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
++				 <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
+ 			resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
+ 				 <&softreset STIH407_USB2_PORT0_SOFTRESET>;
+ 			reset-names = "power", "softreset";
+@@ -47,7 +48,8 @@
+ 			interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pinctrl_usb0>;
+-			clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
++			clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
++				 <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
+ 			resets = <&powerdown STIH407_USB2_PORT0_POWERDOWN>,
+ 				 <&softreset STIH407_USB2_PORT0_SOFTRESET>;
+ 			reset-names = "power", "softreset";
+@@ -59,7 +61,8 @@
+ 			compatible = "st,st-ohci-300x";
+ 			reg = <0x9a83c00 0x100>;
+ 			interrupts = <GIC_SPI 181 IRQ_TYPE_NONE>;
+-			clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
++			clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
++				 <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
+ 			resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
+ 				 <&softreset STIH407_USB2_PORT1_SOFTRESET>;
+ 			reset-names = "power", "softreset";
+@@ -73,7 +76,8 @@
+ 			interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&pinctrl_usb1>;
+-			clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>;
++			clocks = <&clk_s_c0_flexgen CLK_TX_ICN_DISP_0>,
++				 <&clk_s_c0_flexgen CLK_RX_ICN_DISP_0>;
+ 			resets = <&powerdown STIH407_USB2_PORT1_POWERDOWN>,
+ 				 <&softreset STIH407_USB2_PORT1_SOFTRESET>;
+ 			reset-names = "power", "softreset";
+diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
+index b445a5d56f43..593da7ffb449 100644
+--- a/arch/arm/crypto/aes-ce-glue.c
++++ b/arch/arm/crypto/aes-ce-glue.c
+@@ -279,7 +279,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ 		err = blkcipher_walk_done(desc, &walk,
+ 					  walk.nbytes % AES_BLOCK_SIZE);
+ 	}
+-	if (nbytes) {
++	if (walk.nbytes % AES_BLOCK_SIZE) {
+ 		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+ 		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ 		u8 __aligned(8) tail[AES_BLOCK_SIZE];
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 5414081c0bbf..87b2663a5564 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -154,8 +154,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
+ {
+ 	int i;
+ 
+-	kvm_free_stage2_pgd(kvm);
+-
+ 	for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ 		if (kvm->vcpus[i]) {
+ 			kvm_arch_vcpu_free(kvm->vcpus[i]);
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 691ea94897fd..a33af44230da 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1850,6 +1850,7 @@ void kvm_arch_memslots_updated(struct kvm *kvm)
+ 
+ void kvm_arch_flush_shadow_all(struct kvm *kvm)
+ {
++	kvm_free_stage2_pgd(kvm);
+ }
+ 
+ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c
+index 6a7c6fc780cc..4627c862beac 100644
+--- a/arch/arm/mach-imx/pm-imx6.c
++++ b/arch/arm/mach-imx/pm-imx6.c
+@@ -288,7 +288,7 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
+ 		val |= 0x3 << BP_CLPCR_STBY_COUNT;
+ 		val |= BM_CLPCR_VSTBY;
+ 		val |= BM_CLPCR_SBYOS;
+-		if (cpu_is_imx6sl())
++		if (cpu_is_imx6sl() || cpu_is_imx6sx())
+ 			val |= BM_CLPCR_BYPASS_PMIC_READY;
+ 		if (cpu_is_imx6sl() || cpu_is_imx6sx())
+ 			val |= BM_CLPCR_BYP_MMDC_CH0_LPM_HS;
+diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+index 4e8e93c398db..808c8e59000d 100644
+--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+@@ -724,8 +724,20 @@ static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
+  * display serial interface controller
+  */
+ 
++static struct omap_hwmod_class_sysconfig omap3xxx_dsi_sysc = {
++	.rev_offs	= 0x0000,
++	.sysc_offs	= 0x0010,
++	.syss_offs	= 0x0014,
++	.sysc_flags	= (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
++			   SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
++			   SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
++	.idlemodes	= (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
++	.sysc_fields	= &omap_hwmod_sysc_type1,
++};
++
+ static struct omap_hwmod_class omap3xxx_dsi_hwmod_class = {
+ 	.name = "dsi",
++	.sysc	= &omap3xxx_dsi_sysc,
+ };
+ 
+ static struct omap_hwmod_irq_info omap3xxx_dsi1_irqs[] = {
+diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
+index 05d9e16c0dfd..6a51dfccfe71 100644
+--- a/arch/arm64/crypto/aes-glue.c
++++ b/arch/arm64/crypto/aes-glue.c
+@@ -211,7 +211,7 @@ static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ 		err = blkcipher_walk_done(desc, &walk,
+ 					  walk.nbytes % AES_BLOCK_SIZE);
+ 	}
+-	if (nbytes) {
++	if (walk.nbytes % AES_BLOCK_SIZE) {
+ 		u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+ 		u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+ 		u8 __aligned(8) tail[AES_BLOCK_SIZE];
+diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
+index cee128732435..d155a9bbfab6 100644
+--- a/arch/arm64/include/asm/spinlock.h
++++ b/arch/arm64/include/asm/spinlock.h
+@@ -231,4 +231,14 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
+ #define arch_read_relax(lock)	cpu_relax()
+ #define arch_write_relax(lock)	cpu_relax()
+ 
++/*
++ * Accesses appearing in program order before a spin_lock() operation
++ * can be reordered with accesses inside the critical section, by virtue
++ * of arch_spin_lock being constructed using acquire semantics.
++ *
++ * In cases where this is problematic (e.g. try_to_wake_up), an
++ * smp_mb__before_spinlock() can restore the required ordering.
++ */
++#define smp_mb__before_spinlock()	smp_mb()
++
+ #endif /* __ASM_SPINLOCK_H */
+diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h
+index a46f7cf3e1ea..20b52c40bcd2 100644
+--- a/arch/avr32/include/asm/uaccess.h
++++ b/arch/avr32/include/asm/uaccess.h
+@@ -74,7 +74,7 @@ extern __kernel_size_t __copy_user(void *to, const void *from,
+ 
+ extern __kernel_size_t copy_to_user(void __user *to, const void *from,
+ 				    __kernel_size_t n);
+-extern __kernel_size_t copy_from_user(void *to, const void __user *from,
++extern __kernel_size_t ___copy_from_user(void *to, const void __user *from,
+ 				      __kernel_size_t n);
+ 
+ static inline __kernel_size_t __copy_to_user(void __user *to, const void *from,
+@@ -88,6 +88,15 @@ static inline __kernel_size_t __copy_from_user(void *to,
+ {
+ 	return __copy_user(to, (const void __force *)from, n);
+ }
++static inline __kernel_size_t copy_from_user(void *to,
++					       const void __user *from,
++					       __kernel_size_t n)
++{
++	size_t res = ___copy_from_user(to, from, n);
++	if (unlikely(res))
++		memset(to + (n - res), 0, res);
++	return res;
++}
+ 
+ #define __copy_to_user_inatomic __copy_to_user
+ #define __copy_from_user_inatomic __copy_from_user
+diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c
+index d93ead02daed..7c6cf14f0985 100644
+--- a/arch/avr32/kernel/avr32_ksyms.c
++++ b/arch/avr32/kernel/avr32_ksyms.c
+@@ -36,7 +36,7 @@ EXPORT_SYMBOL(copy_page);
+ /*
+  * Userspace access stuff.
+  */
+-EXPORT_SYMBOL(copy_from_user);
++EXPORT_SYMBOL(___copy_from_user);
+ EXPORT_SYMBOL(copy_to_user);
+ EXPORT_SYMBOL(__copy_user);
+ EXPORT_SYMBOL(strncpy_from_user);
+diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S
+index ea59c04b07de..075373471da1 100644
+--- a/arch/avr32/lib/copy_user.S
++++ b/arch/avr32/lib/copy_user.S
+@@ -23,13 +23,13 @@
+ 	 */
+ 	.text
+ 	.align	1
+-	.global	copy_from_user
+-	.type	copy_from_user, @function
+-copy_from_user:
++	.global	___copy_from_user
++	.type	___copy_from_user, @function
++___copy_from_user:
+ 	branch_if_kernel r8, __copy_user
+ 	ret_if_privileged r8, r11, r10, r10
+ 	rjmp	__copy_user
+-	.size	copy_from_user, . - copy_from_user
++	.size	___copy_from_user, . - ___copy_from_user
+ 
+ 	.global	copy_to_user
+ 	.type	copy_to_user, @function
+diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h
+index 90612a7f2cf3..8cd0184ea9ef 100644
+--- a/arch/blackfin/include/asm/uaccess.h
++++ b/arch/blackfin/include/asm/uaccess.h
+@@ -177,11 +177,12 @@ static inline int bad_user_access_length(void)
+ static inline unsigned long __must_check
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+-	if (access_ok(VERIFY_READ, from, n))
++	if (likely(access_ok(VERIFY_READ, from, n))) {
+ 		memcpy(to, (const void __force *)from, n);
+-	else
+-		return n;
+-	return 0;
++		return 0;
++	}
++	memset(to, 0, n);
++	return n;
+ }
+ 
+ static inline unsigned long __must_check
+diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h
+index e3530d0f13ee..56c7d5750abd 100644
+--- a/arch/cris/include/asm/uaccess.h
++++ b/arch/cris/include/asm/uaccess.h
+@@ -194,30 +194,6 @@ extern unsigned long __copy_user(void __user *to, const void *from, unsigned lon
+ extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n);
+ extern unsigned long __do_clear_user(void __user *to, unsigned long n);
+ 
+-static inline unsigned long
+-__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
+-{
+-	if (access_ok(VERIFY_WRITE, to, n))
+-		return __copy_user(to, from, n);
+-	return n;
+-}
+-
+-static inline unsigned long
+-__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
+-{
+-	if (access_ok(VERIFY_READ, from, n))
+-		return __copy_user_zeroing(to, from, n);
+-	return n;
+-}
+-
+-static inline unsigned long
+-__generic_clear_user(void __user *to, unsigned long n)
+-{
+-	if (access_ok(VERIFY_WRITE, to, n))
+-		return __do_clear_user(to, n);
+-	return n;
+-}
+-
+ static inline long
+ __strncpy_from_user(char *dst, const char __user *src, long count)
+ {
+@@ -282,7 +258,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
+ 	else if (n == 24)
+ 		__asm_copy_from_user_24(to, from, ret);
+ 	else
+-		ret = __generic_copy_from_user(to, from, n);
++		ret = __copy_user_zeroing(to, from, n);
+ 
+ 	return ret;
+ }
+@@ -333,7 +309,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
+ 	else if (n == 24)
+ 		__asm_copy_to_user_24(to, from, ret);
+ 	else
+-		ret = __generic_copy_to_user(to, from, n);
++		ret = __copy_user(to, from, n);
+ 
+ 	return ret;
+ }
+@@ -366,26 +342,43 @@ __constant_clear_user(void __user *to, unsigned long n)
+ 	else if (n == 24)
+ 		__asm_clear_24(to, ret);
+ 	else
+-		ret = __generic_clear_user(to, n);
++		ret = __do_clear_user(to, n);
+ 
+ 	return ret;
+ }
+ 
+ 
+-#define clear_user(to, n)				\
+-	(__builtin_constant_p(n) ?			\
+-	 __constant_clear_user(to, n) :			\
+-	 __generic_clear_user(to, n))
++static inline size_t clear_user(void __user *to, size_t n)
++{
++	if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
++		return n;
++	if (__builtin_constant_p(n))
++		return __constant_clear_user(to, n);
++	else
++		return __do_clear_user(to, n);
++}
+ 
+-#define copy_from_user(to, from, n)			\
+-	(__builtin_constant_p(n) ?			\
+-	 __constant_copy_from_user(to, from, n) :	\
+-	 __generic_copy_from_user(to, from, n))
++static inline size_t copy_from_user(void *to, const void __user *from, size_t n)
++{
++	if (unlikely(!access_ok(VERIFY_READ, from, n))) {
++		memset(to, 0, n);
++		return n;
++	}
++	if (__builtin_constant_p(n))
++		return __constant_copy_from_user(to, from, n);
++	else
++		return __copy_user_zeroing(to, from, n);
++}
+ 
+-#define copy_to_user(to, from, n)			\
+-	(__builtin_constant_p(n) ?			\
+-	 __constant_copy_to_user(to, from, n) :		\
+-	 __generic_copy_to_user(to, from, n))
++static inline size_t copy_to_user(void __user *to, const void *from, size_t n)
++{
++	if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
++		return n;
++	if (__builtin_constant_p(n))
++		return __constant_copy_to_user(to, from, n);
++	else
++		return __copy_user(to, from, n);
++}
+ 
+ /* We let the __ versions of copy_from/to_user inline, because they're often
+  * used in fast paths and have only a small space overhead.
+diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
+index 3ac9a59d65d4..87d9e34c5df8 100644
+--- a/arch/frv/include/asm/uaccess.h
++++ b/arch/frv/include/asm/uaccess.h
+@@ -263,19 +263,25 @@ do {							\
+ extern long __memset_user(void *dst, unsigned long count);
+ extern long __memcpy_user(void *dst, const void *src, unsigned long count);
+ 
+-#define clear_user(dst,count)			__memset_user(____force(dst), (count))
++#define __clear_user(dst,count)			__memset_user(____force(dst), (count))
+ #define __copy_from_user_inatomic(to, from, n)	__memcpy_user((to), ____force(from), (n))
+ #define __copy_to_user_inatomic(to, from, n)	__memcpy_user(____force(to), (from), (n))
+ 
+ #else
+ 
+-#define clear_user(dst,count)			(memset(____force(dst), 0, (count)), 0)
++#define __clear_user(dst,count)			(memset(____force(dst), 0, (count)), 0)
+ #define __copy_from_user_inatomic(to, from, n)	(memcpy((to), ____force(from), (n)), 0)
+ #define __copy_to_user_inatomic(to, from, n)	(memcpy(____force(to), (from), (n)), 0)
+ 
+ #endif
+ 
+-#define __clear_user clear_user
++static inline unsigned long __must_check
++clear_user(void __user *to, unsigned long n)
++{
++	if (likely(__access_ok(to, n)))
++		n = __clear_user(to, n);
++	return n;
++}
+ 
+ static inline unsigned long __must_check
+ __copy_to_user(void __user *to, const void *from, unsigned long n)
+diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
+index e4127e4d6a5b..25fc9049db8a 100644
+--- a/arch/hexagon/include/asm/uaccess.h
++++ b/arch/hexagon/include/asm/uaccess.h
+@@ -102,7 +102,8 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
+ {
+ 	long res = __strnlen_user(src, n);
+ 
+-	/* return from strnlen can't be zero -- that would be rubbish. */
++	if (unlikely(!res))
++		return -EFAULT;
+ 
+ 	if (res > n) {
+ 		copy_from_user(dst, src, n);
+diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
+index 4f3fb6ccbf21..40c2027a2bf4 100644
+--- a/arch/ia64/include/asm/uaccess.h
++++ b/arch/ia64/include/asm/uaccess.h
+@@ -263,17 +263,15 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
+ 	__cu_len;									\
+ })
+ 
+-#define copy_from_user(to, from, n)							\
+-({											\
+-	void *__cu_to = (to);								\
+-	const void __user *__cu_from = (from);						\
+-	long __cu_len = (n);								\
+-											\
+-	__chk_user_ptr(__cu_from);							\
+-	if (__access_ok(__cu_from, __cu_len, get_fs()))					\
+-		__cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len);	\
+-	__cu_len;									\
+-})
++static inline unsigned long
++copy_from_user(void *to, const void __user *from, unsigned long n)
++{
++	if (likely(__access_ok(from, n, get_fs())))
++		n = __copy_user((__force void __user *) to, from, n);
++	else
++		memset(to, 0, n);
++	return n;
++}
+ 
+ #define __copy_in_user(to, from, size)	__copy_user((to), (from), (size))
+ 
+diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
+index 71adff209405..c66a38d0a895 100644
+--- a/arch/m32r/include/asm/uaccess.h
++++ b/arch/m32r/include/asm/uaccess.h
+@@ -215,7 +215,7 @@ extern int fixup_exception(struct pt_regs *regs);
+ #define __get_user_nocheck(x, ptr, size)				\
+ ({									\
+ 	long __gu_err = 0;						\
+-	unsigned long __gu_val;						\
++	unsigned long __gu_val = 0;					\
+ 	might_fault();							\
+ 	__get_user_size(__gu_val, (ptr), (size), __gu_err);		\
+ 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
+diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
+index 8282cbce7e39..273e61225c27 100644
+--- a/arch/metag/include/asm/uaccess.h
++++ b/arch/metag/include/asm/uaccess.h
+@@ -204,8 +204,9 @@ extern unsigned long __must_check __copy_user_zeroing(void *to,
+ static inline unsigned long
+ copy_from_user(void *to, const void __user *from, unsigned long n)
+ {
+-	if (access_ok(VERIFY_READ, from, n))
++	if (likely(access_ok(VERIFY_READ, from, n)))
+ 		return __copy_user_zeroing(to, from, n);
++	memset(to, 0, n);
+ 	return n;
+ }
+ 
+diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
+index 62942fd12672..0c0a5cfbf79a 100644
+--- a/arch/microblaze/include/asm/uaccess.h
++++ b/arch/microblaze/include/asm/uaccess.h
+@@ -226,7 +226,7 @@ extern long __user_bad(void);
+ 
+ #define __get_user(x, ptr)						\
+ ({									\
+-	unsigned long __gu_val;						\
++	unsigned long __gu_val = 0;					\
+ 	/*unsigned long __gu_ptr = (unsigned long)(ptr);*/		\
+ 	long __gu_err;							\
+ 	switch (sizeof(*(ptr))) {					\
+@@ -371,10 +371,13 @@ extern long __user_bad(void);
+ static inline long copy_from_user(void *to,
+ 		const void __user *from, unsigned long n)
+ {
++	unsigned long res = n;
+ 	might_fault();
+-	if (access_ok(VERIFY_READ, from, n))
+-		return __copy_from_user(to, from, n);
+-	return n;
++	if (likely(access_ok(VERIFY_READ, from, n)))
++		res = __copy_from_user(to, from, n);
++	if (unlikely(res))
++		memset(to + (n - res), 0, res);
++	return res;
+ }
+ 
+ #define __copy_to_user(to, from, n)	\
+diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
+index 6156ac8c4cfb..ab49b14a4be0 100644
+--- a/arch/mips/include/asm/asmmacro.h
++++ b/arch/mips/include/asm/asmmacro.h
+@@ -135,6 +135,7 @@
+ 	ldc1	$f28, THREAD_FPR28(\thread)
+ 	ldc1	$f30, THREAD_FPR30(\thread)
+ 	ctc1	\tmp, fcr31
++	.set	pop
+ 	.endm
+ 
+ 	.macro	fpu_restore_16odd thread
+diff --git a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
+index 2f82bfa3a773..c9f5769dfc8f 100644
+--- a/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
++++ b/arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
+@@ -11,11 +11,13 @@
+ #define CP0_EBASE $15, 1
+ 
+ 	.macro  kernel_entry_setup
++#ifdef CONFIG_SMP
+ 	mfc0	t0, CP0_EBASE
+ 	andi	t0, t0, 0x3ff		# CPUNum
+ 	beqz	t0, 1f
+ 	# CPUs other than zero goto smp_bootstrap
+ 	j	smp_bootstrap
++#endif /* CONFIG_SMP */
+ 
+ 1:
+ 	.endm
+diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
+index bf8b32450ef6..bc2f5164ce51 100644
+--- a/arch/mips/include/asm/uaccess.h
++++ b/arch/mips/include/asm/uaccess.h
+@@ -14,6 +14,7 @@
+ #include <linux/kernel.h>
+ #include <linux/errno.h>
+ #include <linux/thread_info.h>
++#include <linux/string.h>
+ #include <asm/asm-eva.h>
+ 
+ /*
+@@ -1136,6 +1137,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
+ 			__cu_len = __invoke_copy_from_user(__cu_to,	\
+ 							   __cu_from,	\
+ 							   __cu_len);   \
++		} else {						\
++			memset(__cu_to, 0, __cu_len);			\
+ 		}							\
+ 	}								\
+ 	__cu_len;							\
+diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
+index e19fa363c8fe..488e50dd2fe6 100644
+--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
++++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
+@@ -1163,7 +1163,9 @@ fpu_emul:
+ 		regs->regs[31] = r31;
+ 		regs->cp0_epc = epc;
+ 		if (!used_math()) {     /* First time FPU user.  */
++			preempt_disable();
+ 			err = init_fpu();
++			preempt_enable();
+ 			set_used_math();
+ 		}
+ 		lose_fpu(1);    /* Save FPU state for the emulator. */
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index 89847bee2b53..44a6f25e902e 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -593,14 +593,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+ 		return -EOPNOTSUPP;
+ 
+ 	/* Avoid inadvertently triggering emulation */
+-	if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
+-	    !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
++	if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
++	    !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
+ 		return -EOPNOTSUPP;
+-	if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
++	if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
+ 		return -EOPNOTSUPP;
+ 
+ 	/* FR = 0 not supported in MIPS R6 */
+-	if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
++	if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
+ 		return -EOPNOTSUPP;
+ 
+ 	/* Proceed with the mode switch */
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index 3cef551908f4..a0268f61cd57 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -172,6 +172,9 @@ asmlinkage void start_secondary(void)
+ 	cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ 	notify_cpu_starting(cpu);
+ 
++	cpumask_set_cpu(cpu, &cpu_callin_map);
++	synchronise_count_slave(cpu);
++
+ 	set_cpu_online(cpu, true);
+ 
+ 	set_cpu_sibling_map(cpu);
+@@ -179,10 +182,6 @@ asmlinkage void start_secondary(void)
+ 
+ 	calculate_cpu_foreign_map();
+ 
+-	cpumask_set_cpu(cpu, &cpu_callin_map);
+-
+-	synchronise_count_slave(cpu);
+-
+ 	/*
+ 	 * irq will be enabled in ->smp_finish(), enabling it too early
+ 	 * is dangerous.
+diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
+index 7a7ed9ca01bb..eff71c75dc27 100644
+--- a/arch/mips/kvm/tlb.c
++++ b/arch/mips/kvm/tlb.c
+@@ -152,7 +152,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+ 	srcu_idx = srcu_read_lock(&kvm->srcu);
+ 	pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+ 
+-	if (kvm_mips_is_error_pfn(pfn)) {
++	if (is_error_noslot_pfn(pfn)) {
+ 		kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+ 		err = -EFAULT;
+ 		goto out;
+diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
+index 537278746a15..4af43d9ba495 100644
+--- a/arch/mn10300/include/asm/uaccess.h
++++ b/arch/mn10300/include/asm/uaccess.h
+@@ -181,6 +181,7 @@ struct __large_struct { unsigned long buf[100]; };
+ 		"2:\n"						\
+ 		"	.section	.fixup,\"ax\"\n"	\
+ 		"3:\n\t"					\
++		"	mov		0,%1\n"			\
+ 		"	mov		%3,%0\n"		\
+ 		"	jmp		2b\n"			\
+ 		"	.previous\n"				\
+diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c
+index 7826e6c364e7..ce8899e5e171 100644
+--- a/arch/mn10300/lib/usercopy.c
++++ b/arch/mn10300/lib/usercopy.c
+@@ -9,7 +9,7 @@
+  * as published by the Free Software Foundation; either version
+  * 2 of the Licence, or (at your option) any later version.
+  */
+-#include <asm/uaccess.h>
++#include <linux/uaccess.h>
+ 
+ unsigned long
+ __generic_copy_to_user(void *to, const void *from, unsigned long n)
+@@ -24,6 +24,8 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
+ {
+ 	if (access_ok(VERIFY_READ, from, n))
+ 		__copy_user_zeroing(to, from, n);
++	else
++		memset(to, 0, n);
+ 	return n;
+ }
+ 
+diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
+index caa51ff85a3c..0ab82324c817 100644
+--- a/arch/nios2/include/asm/uaccess.h
++++ b/arch/nios2/include/asm/uaccess.h
+@@ -102,9 +102,12 @@ extern long __copy_to_user(void __user *to, const void *from, unsigned long n);
+ static inline long copy_from_user(void *to, const void __user *from,
+ 				unsigned long n)
+ {
+-	if (!access_ok(VERIFY_READ, from, n))
+-		return n;
+-	return __copy_from_user(to, from, n);
++	unsigned long res = n;
++	if (access_ok(VERIFY_READ, from, n))
++		res = __copy_from_user(to, from, n);
++	if (unlikely(res))
++		memset(to + (n - res), 0, res);
++	return res;
+ }
+ 
+ static inline long copy_to_user(void __user *to, const void *from,
+@@ -139,7 +142,7 @@ extern long strnlen_user(const char __user *s, long n);
+ 
+ #define __get_user_unknown(val, size, ptr, err) do {			\
+ 	err = 0;							\
+-	if (copy_from_user(&(val), ptr, size)) {			\
++	if (__copy_from_user(&(val), ptr, size)) {			\
+ 		err = -EFAULT;						\
+ 	}								\
+ 	} while (0)
+@@ -166,7 +169,7 @@ do {									\
+ 	({								\
+ 	long __gu_err = -EFAULT;					\
+ 	const __typeof__(*(ptr)) __user *__gu_ptr = (ptr);		\
+-	unsigned long __gu_val;						\
++	unsigned long __gu_val = 0;					\
+ 	__get_user_common(__gu_val, sizeof(*(ptr)), __gu_ptr, __gu_err);\
+ 	(x) = (__force __typeof__(x))__gu_val;				\
+ 	__gu_err;							\
+diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
+index a6bd07ca3d6c..5cc6b4f1b795 100644
+--- a/arch/openrisc/include/asm/uaccess.h
++++ b/arch/openrisc/include/asm/uaccess.h
+@@ -273,28 +273,20 @@ __copy_tofrom_user(void *to, const void *from, unsigned long size);
+ static inline unsigned long
+ copy_from_user(void *to, const void *from, unsigned long n)
+ {
+-	unsigned long over;
+-
+-	if (access_ok(VERIFY_READ, from, n))
+-		return __copy_tofrom_user(to, from, n);
+-	if ((unsigned long)from < TASK_SIZE) {
+-		over = (unsigned long)from + n - TASK_SIZE;
+-		return __copy_tofrom_user(to, from, n - over) + over;
+-	}
+-	return n;
++	unsigned long res = n;
++
++	if (likely(access_ok(VERIFY_READ, from, n)))
++		res = __copy_tofrom_user(to, from, n);
++	if (unlikely(res))
++		memset(to + (n - res), 0, res);
++	return res;
+ }
+ 
+ static inline unsigned long
+ copy_to_user(void *to, const void *from, unsigned long n)
+ {
+-	unsigned long over;
+-
+-	if (access_ok(VERIFY_WRITE, to, n))
+-		return __copy_tofrom_user(to, from, n);
+-	if ((unsigned long)to < TASK_SIZE) {
+-		over = (unsigned long)to + n - TASK_SIZE;
+-		return __copy_tofrom_user(to, from, n - over) + over;
+-	}
++	if (likely(access_ok(VERIFY_WRITE, to, n)))
++		n = __copy_tofrom_user(to, from, n);
+ 	return n;
+ }
+ 
+@@ -303,13 +295,8 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
+ static inline __must_check unsigned long
+ clear_user(void *addr, unsigned long size)
+ {
+-
+-	if (access_ok(VERIFY_WRITE, addr, size))
+-		return __clear_user(addr, size);
+-	if ((unsigned long)addr < TASK_SIZE) {
+-		unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+-		return __clear_user(addr, size - over) + over;
+-	}
++	if (likely(access_ok(VERIFY_WRITE, addr, size)))
++		size = __clear_user(addr, size);
+ 	return size;
+ }
+ 
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 1960b87c1c8b..4ad51465890b 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -10,6 +10,7 @@
+ #include <asm-generic/uaccess-unaligned.h>
+ 
+ #include <linux/bug.h>
++#include <linux/string.h>
+ 
+ #define VERIFY_READ 0
+ #define VERIFY_WRITE 1
+@@ -245,13 +246,14 @@ static inline unsigned long __must_check copy_from_user(void *to,
+                                           unsigned long n)
+ {
+         int sz = __compiletime_object_size(to);
+-        int ret = -EFAULT;
++        unsigned long ret = n;
+ 
+         if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
+                 ret = __copy_from_user(to, from, n);
+         else
+                 copy_from_user_overflow();
+-
++	if (unlikely(ret))
++		memset(to + (n - ret), 0, ret);
+         return ret;
+ }
+ 
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index a0c071d24e0e..6fbea25d8c78 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -323,30 +323,17 @@ extern unsigned long __copy_tofrom_user(void __user *to,
+ static inline unsigned long copy_from_user(void *to,
+ 		const void __user *from, unsigned long n)
+ {
+-	unsigned long over;
+-
+-	if (access_ok(VERIFY_READ, from, n))
++	if (likely(access_ok(VERIFY_READ, from, n)))
+ 		return __copy_tofrom_user((__force void __user *)to, from, n);
+-	if ((unsigned long)from < TASK_SIZE) {
+-		over = (unsigned long)from + n - TASK_SIZE;
+-		return __copy_tofrom_user((__force void __user *)to, from,
+-				n - over) + over;
+-	}
++	memset(to, 0, n);
+ 	return n;
+ }
+ 
+ static inline unsigned long copy_to_user(void __user *to,
+ 		const void *from, unsigned long n)
+ {
+-	unsigned long over;
+-
+ 	if (access_ok(VERIFY_WRITE, to, n))
+ 		return __copy_tofrom_user(to, (__force void __user *)from, n);
+-	if ((unsigned long)to < TASK_SIZE) {
+-		over = (unsigned long)to + n - TASK_SIZE;
+-		return __copy_tofrom_user(to, (__force void __user *)from,
+-				n - over) + over;
+-	}
+ 	return n;
+ }
+ 
+@@ -437,10 +424,6 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+ 	might_fault();
+ 	if (likely(access_ok(VERIFY_WRITE, addr, size)))
+ 		return __clear_user(addr, size);
+-	if ((unsigned long)addr < TASK_SIZE) {
+-		unsigned long over = (unsigned long)addr + size - TASK_SIZE;
+-		return __clear_user(addr, size - over) + over;
+-	}
+ 	return size;
+ }
+ 
+diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
+index 736d18b3cefd..4c48b487698c 100644
+--- a/arch/powerpc/mm/slb_low.S
++++ b/arch/powerpc/mm/slb_low.S
+@@ -113,7 +113,12 @@ BEGIN_FTR_SECTION
+ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
+ 	b	slb_finish_load_1T
+ 
+-0:
++0:	/*
++	 * For userspace addresses, make sure this is region 0.
++	 */
++	cmpdi	r9, 0
++	bne	8f
++
+ 	/* when using slices, we extract the psize off the slice bitmaps
+ 	 * and then we need to get the sllp encoding off the mmu_psize_defs
+ 	 * array.
+diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
+index d64a7a62164f..f6ac1d7e7ed8 100644
+--- a/arch/s390/include/asm/uaccess.h
++++ b/arch/s390/include/asm/uaccess.h
+@@ -213,28 +213,28 @@ int __put_user_bad(void) __attribute__((noreturn));
+ 	__chk_user_ptr(ptr);					\
+ 	switch (sizeof(*(ptr))) {				\
+ 	case 1: {						\
+-		unsigned char __x;				\
++		unsigned char __x = 0;				\
+ 		__gu_err = __get_user_fn(&__x, ptr,		\
+ 					 sizeof(*(ptr)));	\
+ 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
+ 		break;						\
+ 	};							\
+ 	case 2: {						\
+-		unsigned short __x;				\
++		unsigned short __x = 0;				\
+ 		__gu_err = __get_user_fn(&__x, ptr,		\
+ 					 sizeof(*(ptr)));	\
+ 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
+ 		break;						\
+ 	};							\
+ 	case 4: {						\
+-		unsigned int __x;				\
++		unsigned int __x = 0;				\
+ 		__gu_err = __get_user_fn(&__x, ptr,		\
+ 					 sizeof(*(ptr)));	\
+ 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
+ 		break;						\
+ 	};							\
+ 	case 8: {						\
+-		unsigned long long __x;				\
++		unsigned long long __x = 0;			\
+ 		__gu_err = __get_user_fn(&__x, ptr,		\
+ 					 sizeof(*(ptr)));	\
+ 		(x) = *(__force __typeof__(*(ptr)) *) &__x;	\
+diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h
+index ab66ddde777b..69326dfb894d 100644
+--- a/arch/score/include/asm/uaccess.h
++++ b/arch/score/include/asm/uaccess.h
+@@ -158,7 +158,7 @@ do {									\
+ 		__get_user_asm(val, "lw", ptr);				\
+ 		 break;							\
+ 	case 8: 							\
+-		if ((copy_from_user((void *)&val, ptr, 8)) == 0)	\
++		if (__copy_from_user((void *)&val, ptr, 8) == 0)	\
+ 			__gu_err = 0;					\
+ 		else							\
+ 			__gu_err = -EFAULT;				\
+@@ -183,6 +183,8 @@ do {									\
+ 									\
+ 	if (likely(access_ok(VERIFY_READ, __gu_ptr, size)))		\
+ 		__get_user_common((x), size, __gu_ptr);			\
++	else								\
++		(x) = 0;						\
+ 									\
+ 	__gu_err;							\
+ })
+@@ -196,6 +198,7 @@ do {									\
+ 		"2:\n"							\
+ 		".section .fixup,\"ax\"\n"				\
+ 		"3:li	%0, %4\n"					\
++		"li	%1, 0\n"					\
+ 		"j	2b\n"						\
+ 		".previous\n"						\
+ 		".section __ex_table,\"a\"\n"				\
+@@ -293,35 +296,34 @@ extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
+ static inline unsigned long
+ copy_from_user(void *to, const void *from, unsigned long len)
+ {
+-	unsigned long over;
++	unsigned long res = len;
+ 
+-	if (access_ok(VERIFY_READ, from, len))
+-		return __copy_tofrom_user(to, from, len);
++	if (likely(access_ok(VERIFY_READ, from, len)))
++		res = __copy_tofrom_user(to, from, len);
+ 
+-	if ((unsigned long)from < TASK_SIZE) {
+-		over = (unsigned long)from + len - TASK_SIZE;
+-		return __copy_tofrom_user(to, from, len - over) + over;
+-	}
+-	return len;
++	if (unlikely(res))
++		memset(to + (len - res), 0, res);
++
++	return res;
+ }
+ 
+ static inline unsigned long
+ copy_to_user(void *to, const void *from, unsigned long len)
+ {
+-	unsigned long over;
+-
+-	if (access_ok(VERIFY_WRITE, to, len))
+-		return __copy_tofrom_user(to, from, len);
++	if (likely(access_ok(VERIFY_WRITE, to, len)))
++		len = __copy_tofrom_user(to, from, len);
+ 
+-	if ((unsigned long)to < TASK_SIZE) {
+-		over = (unsigned long)to + len - TASK_SIZE;
+-		return __copy_tofrom_user(to, from, len - over) + over;
+-	}
+ 	return len;
+ }
+ 
+-#define __copy_from_user(to, from, len)	\
+-		__copy_tofrom_user((to), (from), (len))
++static inline unsigned long
++__copy_from_user(void *to, const void *from, unsigned long len)
++{
++	unsigned long left = __copy_tofrom_user(to, from, len);
++	if (unlikely(left))
++		memset(to + (len - left), 0, left);
++	return left;
++}
+ 
+ #define __copy_to_user(to, from, len)		\
+ 		__copy_tofrom_user((to), (from), (len))
+@@ -335,17 +337,17 @@ __copy_to_user_inatomic(void *to, const void *from, unsigned long len)
+ static inline unsigned long
+ __copy_from_user_inatomic(void *to, const void *from, unsigned long len)
+ {
+-	return __copy_from_user(to, from, len);
++	return __copy_tofrom_user(to, from, len);
+ }
+ 
+-#define __copy_in_user(to, from, len)	__copy_from_user(to, from, len)
++#define __copy_in_user(to, from, len)	__copy_tofrom_user(to, from, len)
+ 
+ static inline unsigned long
+ copy_in_user(void *to, const void *from, unsigned long len)
+ {
+ 	if (access_ok(VERIFY_READ, from, len) &&
+ 		      access_ok(VERFITY_WRITE, to, len))
+-		return copy_from_user(to, from, len);
++		return __copy_tofrom_user(to, from, len);
+ }
+ 
+ /*
+diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
+index a49635c51266..92ade79ac427 100644
+--- a/arch/sh/include/asm/uaccess.h
++++ b/arch/sh/include/asm/uaccess.h
+@@ -151,7 +151,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
+ 	__kernel_size_t __copy_size = (__kernel_size_t) n;
+ 
+ 	if (__copy_size && __access_ok(__copy_from, __copy_size))
+-		return __copy_user(to, from, __copy_size);
++		__copy_size = __copy_user(to, from, __copy_size);
++
++	if (unlikely(__copy_size))
++		memset(to + (n - __copy_size), 0, __copy_size);
+ 
+ 	return __copy_size;
+ }
+diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
+index c01376c76b86..ca5073dd4596 100644
+--- a/arch/sh/include/asm/uaccess_64.h
++++ b/arch/sh/include/asm/uaccess_64.h
+@@ -24,6 +24,7 @@
+ #define __get_user_size(x,ptr,size,retval)			\
+ do {								\
+ 	retval = 0;						\
++	x = 0;							\
+ 	switch (size) {						\
+ 	case 1:							\
+ 		retval = __get_user_asm_b((void *)&x,		\
+diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
+index 64ee103dc29d..dfb542c7cc71 100644
+--- a/arch/sparc/include/asm/uaccess_32.h
++++ b/arch/sparc/include/asm/uaccess_32.h
+@@ -328,8 +328,10 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, un
+ {
+ 	if (n && __access_ok((unsigned long) from, n))
+ 		return __copy_user((__force void __user *) to, from, n);
+-	else
++	else {
++		memset(to, 0, n);
+ 		return n;
++	}
+ }
+ 
+ static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
+index ace9dec050b1..d081e7e42fb3 100644
+--- a/arch/x86/include/asm/uaccess.h
++++ b/arch/x86/include/asm/uaccess.h
+@@ -391,7 +391,11 @@ do {									\
+ #define __get_user_asm_ex(x, addr, itype, rtype, ltype)			\
+ 	asm volatile("1:	mov"itype" %1,%"rtype"0\n"		\
+ 		     "2:\n"						\
+-		     _ASM_EXTABLE_EX(1b, 2b)				\
++		     ".section .fixup,\"ax\"\n"				\
++                     "3:xor"itype" %"rtype"0,%"rtype"0\n"		\
++		     "  jmp 2b\n"					\
++		     ".previous\n"					\
++		     _ASM_EXTABLE_EX(1b, 3b)				\
+ 		     : ltype(x) : "m" (__m(addr)))
+ 
+ #define __put_user_nocheck(x, ptr, size)			\
+diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
+index 0122bec38564..f25799f351f7 100644
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -233,6 +233,8 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
+ 		return blkcipher_walk_done(desc, walk, -EINVAL);
+ 	}
+ 
++	bsize = min(walk->walk_blocksize, n);
++
+ 	walk->flags &= ~(BLKCIPHER_WALK_SLOW | BLKCIPHER_WALK_COPY |
+ 			 BLKCIPHER_WALK_DIFF);
+ 	if (!scatterwalk_aligned(&walk->in, walk->alignmask) ||
+@@ -245,7 +247,6 @@ static int blkcipher_walk_next(struct blkcipher_desc *desc,
+ 		}
+ 	}
+ 
+-	bsize = min(walk->walk_blocksize, n);
+ 	n = scatterwalk_clamp(&walk->in, n);
+ 	n = scatterwalk_clamp(&walk->out, n);
+ 
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index b0602ba03111..34e4dfafb94f 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -585,9 +585,14 @@ static int cryptd_hash_export(struct ahash_request *req, void *out)
+ 
+ static int cryptd_hash_import(struct ahash_request *req, const void *in)
+ {
+-	struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
++	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++	struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
++	struct shash_desc *desc = cryptd_shash_desc(req);
++
++	desc->tfm = ctx->child;
++	desc->flags = req->base.flags;
+ 
+-	return crypto_shash_import(&rctx->desc, in);
++	return crypto_shash_import(desc, in);
+ }
+ 
+ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
+diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
+index 60397ec77ff7..27fd0dacad5f 100644
+--- a/drivers/bus/arm-ccn.c
++++ b/drivers/bus/arm-ccn.c
+@@ -804,6 +804,10 @@ static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
+ 	struct arm_ccn_component *xp;
+ 	u32 val, dt_cfg;
+ 
++	/* Nothing to do for cycle counter */
++	if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
++		return;
++
+ 	if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
+ 		xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
+ 	else
+@@ -901,7 +905,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
+ 
+ 	/* Comparison values */
+ 	writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
+-	writel((cmp_l >> 32) & 0xefffffff,
++	writel((cmp_l >> 32) & 0x7fffffff,
+ 			source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
+ 	writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
+ 	writel((cmp_h >> 32) & 0x0fffffff,
+@@ -909,7 +913,7 @@ static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
+ 
+ 	/* Mask */
+ 	writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
+-	writel((mask_l >> 32) & 0xefffffff,
++	writel((mask_l >> 32) & 0x7fffffff,
+ 			source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
+ 	writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
+ 	writel((mask_h >> 32) & 0x0fffffff,
+diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+index 767d0eaabe97..3101f57492c0 100644
+--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+@@ -316,19 +316,19 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
+ 			u32 *coeff_tab = heo_upscaling_ycoef;
+ 			u32 max_memsize;
+ 
+-			if (state->crtc_w < state->src_w)
++			if (state->crtc_h < state->src_h)
+ 				coeff_tab = heo_downscaling_ycoef;
+ 			for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
+ 				atmel_hlcdc_layer_update_cfg(&plane->layer,
+ 							     33 + i,
+ 							     0xffffffff,
+ 							     coeff_tab[i]);
+-			factor = ((8 * 256 * state->src_w) - (256 * 4)) /
+-				 state->crtc_w;
++			factor = ((8 * 256 * state->src_h) - (256 * 4)) /
++				 state->crtc_h;
+ 			factor++;
+-			max_memsize = ((factor * state->crtc_w) + (256 * 4)) /
++			max_memsize = ((factor * state->crtc_h) + (256 * 4)) /
+ 				      2048;
+-			if (max_memsize > state->src_w)
++			if (max_memsize > state->src_h)
+ 				factor--;
+ 			factor_reg |= (factor << 16) | 0x80000000;
+ 		}
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index 9cfcd0aef0df..92c4698e8427 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -1018,6 +1018,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+ 	return 0;
+ }
+ 
++#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ typedef struct drm_mode_fb_cmd232 {
+ 	u32 fb_id;
+ 	u32 width;
+@@ -1074,6 +1075,7 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
+ 
+ 	return 0;
+ }
++#endif
+ 
+ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ 	[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+@@ -1107,7 +1109,9 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ 	[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
+ #endif
+ 	[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
++#if defined(CONFIG_X86) || defined(CONFIG_IA64)
+ 	[DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
++#endif
+ };
+ 
+ /**
+diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
+index 78c911be115d..6bf81d95a3f4 100644
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -166,6 +166,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
+ 		ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+ 		if (ret < 0)
+ 			goto error_ret;
++		*val = 0;
+ 		*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
+ 		ret = IIO_VAL_INT_PLUS_MICRO;
+ 		break;
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index ad0a7e8c2c2b..8b8cacbaf20d 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -1273,11 +1273,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
+ 	struct flexcan_priv *priv = netdev_priv(dev);
+ 	int err;
+ 
+-	err = flexcan_chip_disable(priv);
+-	if (err)
+-		return err;
+-
+ 	if (netif_running(dev)) {
++		err = flexcan_chip_disable(priv);
++		if (err)
++			return err;
+ 		netif_stop_queue(dev);
+ 		netif_device_detach(dev);
+ 	}
+@@ -1290,13 +1289,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
+ {
+ 	struct net_device *dev = dev_get_drvdata(device);
+ 	struct flexcan_priv *priv = netdev_priv(dev);
++	int err;
+ 
+ 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ 	if (netif_running(dev)) {
+ 		netif_device_attach(dev);
+ 		netif_start_queue(dev);
++		err = flexcan_chip_enable(priv);
++		if (err)
++			return err;
+ 	}
+-	return flexcan_chip_enable(priv);
++	return 0;
+ }
+ 
+ static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index d5f2fbf62d72..534b2b87bd5d 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -1541,13 +1541,13 @@ static int ath9k_sta_state(struct ieee80211_hw *hw,
+ 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ 	int ret = 0;
+ 
+-	if (old_state == IEEE80211_STA_AUTH &&
+-	    new_state == IEEE80211_STA_ASSOC) {
++	if (old_state == IEEE80211_STA_NOTEXIST &&
++	    new_state == IEEE80211_STA_NONE) {
+ 		ret = ath9k_sta_add(hw, vif, sta);
+ 		ath_dbg(common, CONFIG,
+ 			"Add station: %pM\n", sta->addr);
+-	} else if (old_state == IEEE80211_STA_ASSOC &&
+-		   new_state == IEEE80211_STA_AUTH) {
++	} else if (old_state == IEEE80211_STA_NONE &&
++		   new_state == IEEE80211_STA_NOTEXIST) {
+ 		ret = ath9k_sta_remove(hw, vif, sta);
+ 		ath_dbg(common, CONFIG,
+ 			"Remove station: %pM\n", sta->addr);
+diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
+index fa09d4be2b53..2b456ca69d5c 100644
+--- a/drivers/scsi/constants.c
++++ b/drivers/scsi/constants.c
+@@ -1181,8 +1181,9 @@ static const char * const snstext[] = {
+ 
+ /* Get sense key string or NULL if not available */
+ const char *
+-scsi_sense_key_string(unsigned char key) {
+-	if (key <= 0xE)
++scsi_sense_key_string(unsigned char key)
++{
++	if (key < ARRAY_SIZE(snstext))
+ 		return snstext[key];
+ 	return NULL;
+ }
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index d93e43cfb6f8..ebcf40c2f12b 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -939,6 +939,15 @@ static int isr_setup_status_phase(struct ci_hdrc *ci)
+ 	int retval;
+ 	struct ci_hw_ep *hwep;
+ 
++	/*
++	 * Unexpected USB controller behavior, caused by bad signal integrity
++	 * or ground reference problems, can lead to isr_setup_status_phase
++	 * being called with ci->status equal to NULL.
++	 * If this situation occurs, you should review your USB hardware design.
++	 */
++	if (WARN_ON_ONCE(!ci->status))
++		return -EPIPE;
++
+ 	hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
+ 	ci->status->context = ci;
+ 	ci->status->complete = isr_setup_status_complete;
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index 894894f2ff93..81336acc7040 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -184,8 +184,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 	memcpy(&endpoint->desc, d, n);
+ 	INIT_LIST_HEAD(&endpoint->urb_list);
+ 
+-	/* Fix up bInterval values outside the legal range. Use 32 ms if no
+-	 * proper value can be guessed. */
++	/*
++	 * Fix up bInterval values outside the legal range.
++	 * Use 10 or 8 ms if no proper value can be guessed.
++	 */
+ 	i = 0;		/* i = min, j = max, n = default */
+ 	j = 255;
+ 	if (usb_endpoint_xfer_int(d)) {
+@@ -193,13 +195,15 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 		switch (to_usb_device(ddev)->speed) {
+ 		case USB_SPEED_SUPER:
+ 		case USB_SPEED_HIGH:
+-			/* Many device manufacturers are using full-speed
++			/*
++			 * Many device manufacturers are using full-speed
+ 			 * bInterval values in high-speed interrupt endpoint
+-			 * descriptors. Try to fix those and fall back to a
+-			 * 32 ms default value otherwise. */
++			 * descriptors. Try to fix those and fall back to an
++			 * 8-ms default value otherwise.
++			 */
+ 			n = fls(d->bInterval*8);
+ 			if (n == 0)
+-				n = 9;	/* 32 ms = 2^(9-1) uframes */
++				n = 7;	/* 8 ms = 2^(7-1) uframes */
+ 			j = 16;
+ 
+ 			/*
+@@ -214,10 +218,12 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 			}
+ 			break;
+ 		default:		/* USB_SPEED_FULL or _LOW */
+-			/* For low-speed, 10 ms is the official minimum.
++			/*
++			 * For low-speed, 10 ms is the official minimum.
+ 			 * But some "overclocked" devices might want faster
+-			 * polling so we'll allow it. */
+-			n = 32;
++			 * polling so we'll allow it.
++			 */
++			n = 10;
+ 			break;
+ 		}
+ 	} else if (usb_endpoint_xfer_isoc(d)) {
+@@ -225,10 +231,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
+ 		j = 16;
+ 		switch (to_usb_device(ddev)->speed) {
+ 		case USB_SPEED_HIGH:
+-			n = 9;		/* 32 ms = 2^(9-1) uframes */
++			n = 7;		/* 8 ms = 2^(7-1) uframes */
+ 			break;
+ 		default:		/* USB_SPEED_FULL */
+-			n = 6;		/* 32 ms = 2^(6-1) frames */
++			n = 4;		/* 8 ms = 2^(4-1) frames */
+ 			break;
+ 		}
+ 	}
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index f7e917866e05..6a2911743829 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -846,6 +846,10 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 
+ 	ep->stop_cmds_pending--;
++	if (xhci->xhc_state & XHCI_STATE_REMOVING) {
++		spin_unlock_irqrestore(&xhci->lock, flags);
++		return;
++	}
+ 	if (xhci->xhc_state & XHCI_STATE_DYING) {
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ 				"Stop EP timer ran, but another timer marked "
+@@ -899,7 +903,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
+ 	spin_unlock_irqrestore(&xhci->lock, flags);
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ 			"Calling usb_hc_died()");
+-	usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
++	usb_hc_died(xhci_to_hcd(xhci));
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ 			"xHCI host controller is dead.");
+ }
+diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
+index 9a705b15b3a1..cf274b8c63fe 100644
+--- a/drivers/usb/renesas_usbhs/mod.c
++++ b/drivers/usb/renesas_usbhs/mod.c
+@@ -277,9 +277,16 @@ static irqreturn_t usbhs_interrupt(int irq, void *data)
+ 	usbhs_write(priv, INTSTS0, ~irq_state.intsts0 & INTSTS0_MAGIC);
+ 	usbhs_write(priv, INTSTS1, ~irq_state.intsts1 & INTSTS1_MAGIC);
+ 
+-	usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
++	/*
++	 * The driver should not clear the xxxSTS after the line of
++	 * "call irq callback functions" because each "if" statement is
++	 * possible to call the callback function for avoiding any side effects.
++	 */
++	if (irq_state.intsts0 & BRDY)
++		usbhs_write(priv, BRDYSTS, ~irq_state.brdysts);
+ 	usbhs_write(priv, NRDYSTS, ~irq_state.nrdysts);
+-	usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
++	if (irq_state.intsts0 & BEMP)
++		usbhs_write(priv, BEMPSTS, ~irq_state.bempsts);
+ 
+ 	/*
+ 	 * call irq callback functions
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index a204782ae530..e98b6e57b703 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -54,7 +54,8 @@ DEVICE(funsoft, FUNSOFT_IDS);
+ /* Infineon Flashloader driver */
+ #define FLASHLOADER_IDS()		\
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
+-	{ USB_DEVICE(0x8087, 0x0716) }
++	{ USB_DEVICE(0x8087, 0x0716) }, \
++	{ USB_DEVICE(0x8087, 0x0801) }
+ DEVICE(flashloader, FLASHLOADER_IDS);
+ 
+ /* Google Serial USB SubClass */
+diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
+index 5b700ef1e59d..6196b5eaf9a5 100644
+--- a/fs/autofs4/autofs_i.h
++++ b/fs/autofs4/autofs_i.h
+@@ -79,9 +79,13 @@ struct autofs_info {
+ };
+ 
+ #define AUTOFS_INF_EXPIRING	(1<<0) /* dentry is in the process of expiring */
+-#define AUTOFS_INF_NO_RCU	(1<<1) /* the dentry is being considered
++#define AUTOFS_INF_WANT_EXPIRE	(1<<1) /* the dentry is being considered
+ 					* for expiry, so RCU_walk is
+-					* not permitted
++					* not permitted.  If it progresses to
++					* actual expiry attempt, the flag is
++					* not cleared when EXPIRING is set -
++					* in that case it gets cleared only
++					* when it comes to clearing EXPIRING.
+ 					*/
+ #define AUTOFS_INF_PENDING	(1<<2) /* dentry pending mount */
+ 
+diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
+index 1cebc3c52fa5..7a5a598a2d94 100644
+--- a/fs/autofs4/expire.c
++++ b/fs/autofs4/expire.c
+@@ -315,19 +315,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
+ 	if (ino->flags & AUTOFS_INF_PENDING)
+ 		goto out;
+ 	if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
+-		ino->flags |= AUTOFS_INF_NO_RCU;
++		ino->flags |= AUTOFS_INF_WANT_EXPIRE;
+ 		spin_unlock(&sbi->fs_lock);
+ 		synchronize_rcu();
+ 		spin_lock(&sbi->fs_lock);
+ 		if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
+ 			ino->flags |= AUTOFS_INF_EXPIRING;
+-			smp_mb();
+-			ino->flags &= ~AUTOFS_INF_NO_RCU;
+ 			init_completion(&ino->expire_complete);
+ 			spin_unlock(&sbi->fs_lock);
+ 			return root;
+ 		}
+-		ino->flags &= ~AUTOFS_INF_NO_RCU;
++		ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
+ 	}
+ out:
+ 	spin_unlock(&sbi->fs_lock);
+@@ -417,6 +415,7 @@ static struct dentry *should_expire(struct dentry *dentry,
+ 	}
+ 	return NULL;
+ }
++
+ /*
+  * Find an eligible tree to time-out
+  * A tree is eligible if :-
+@@ -432,6 +431,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
+ 	struct dentry *root = sb->s_root;
+ 	struct dentry *dentry;
+ 	struct dentry *expired;
++	struct dentry *found;
+ 	struct autofs_info *ino;
+ 
+ 	if (!root)
+@@ -442,48 +442,54 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
+ 
+ 	dentry = NULL;
+ 	while ((dentry = get_next_positive_subdir(dentry, root))) {
++		int flags = how;
++
+ 		spin_lock(&sbi->fs_lock);
+ 		ino = autofs4_dentry_ino(dentry);
+-		if (ino->flags & AUTOFS_INF_NO_RCU)
+-			expired = NULL;
+-		else
+-			expired = should_expire(dentry, mnt, timeout, how);
+-		if (!expired) {
++		if (ino->flags & AUTOFS_INF_WANT_EXPIRE) {
+ 			spin_unlock(&sbi->fs_lock);
+ 			continue;
+ 		}
++		spin_unlock(&sbi->fs_lock);
++
++		expired = should_expire(dentry, mnt, timeout, flags);
++		if (!expired)
++			continue;
++
++		spin_lock(&sbi->fs_lock);
+ 		ino = autofs4_dentry_ino(expired);
+-		ino->flags |= AUTOFS_INF_NO_RCU;
++		ino->flags |= AUTOFS_INF_WANT_EXPIRE;
+ 		spin_unlock(&sbi->fs_lock);
+ 		synchronize_rcu();
+-		spin_lock(&sbi->fs_lock);
+-		if (should_expire(expired, mnt, timeout, how)) {
+-			if (expired != dentry)
+-				dput(dentry);
+-			goto found;
+-		}
+ 
+-		ino->flags &= ~AUTOFS_INF_NO_RCU;
++		/* Make sure a reference is not taken on found if
++		 * things have changed.
++		 */
++		flags &= ~AUTOFS_EXP_LEAVES;
++		found = should_expire(expired, mnt, timeout, how);
++		if (!found || found != expired)
++			/* Something has changed, continue */
++			goto next;
++
+ 		if (expired != dentry)
+-			dput(expired);
++			dput(dentry);
++
++		spin_lock(&sbi->fs_lock);
++		goto found;
++next:
++		spin_lock(&sbi->fs_lock);
++		ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
+ 		spin_unlock(&sbi->fs_lock);
++		if (expired != dentry)
++			dput(expired);
+ 	}
+ 	return NULL;
+ 
+ found:
+ 	DPRINTK("returning %p %pd", expired, expired);
+ 	ino->flags |= AUTOFS_INF_EXPIRING;
+-	smp_mb();
+-	ino->flags &= ~AUTOFS_INF_NO_RCU;
+ 	init_completion(&ino->expire_complete);
+ 	spin_unlock(&sbi->fs_lock);
+-	spin_lock(&sbi->lookup_lock);
+-	spin_lock(&expired->d_parent->d_lock);
+-	spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
+-	list_move(&expired->d_parent->d_subdirs, &expired->d_child);
+-	spin_unlock(&expired->d_lock);
+-	spin_unlock(&expired->d_parent->d_lock);
+-	spin_unlock(&sbi->lookup_lock);
+ 	return expired;
+ }
+ 
+@@ -492,15 +498,27 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
+ 	struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
+ 	struct autofs_info *ino = autofs4_dentry_ino(dentry);
+ 	int status;
++	int state;
+ 
+ 	/* Block on any pending expire */
+-	if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
++	if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
+ 		return 0;
+ 	if (rcu_walk)
+ 		return -ECHILD;
+ 
++retry:
+ 	spin_lock(&sbi->fs_lock);
+-	if (ino->flags & AUTOFS_INF_EXPIRING) {
++	state = ino->flags & (AUTOFS_INF_WANT_EXPIRE | AUTOFS_INF_EXPIRING);
++	if (state == AUTOFS_INF_WANT_EXPIRE) {
++		spin_unlock(&sbi->fs_lock);
++		/*
++		 * Possibly being selected for expire, wait until
++		 * it's selected or not.
++		 */
++		schedule_timeout_uninterruptible(HZ/10);
++		goto retry;
++	}
++	if (state & AUTOFS_INF_EXPIRING) {
+ 		spin_unlock(&sbi->fs_lock);
+ 
+ 		DPRINTK("waiting for expire %p name=%pd", dentry, dentry);
+@@ -551,7 +569,7 @@ int autofs4_expire_run(struct super_block *sb,
+ 	ino = autofs4_dentry_ino(dentry);
+ 	/* avoid rapid-fire expire attempts if expiry fails */
+ 	ino->last_used = now;
+-	ino->flags &= ~AUTOFS_INF_EXPIRING;
++	ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
+ 	complete_all(&ino->expire_complete);
+ 	spin_unlock(&sbi->fs_lock);
+ 
+@@ -579,7 +597,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
+ 		spin_lock(&sbi->fs_lock);
+ 		/* avoid rapid-fire expire attempts if expiry fails */
+ 		ino->last_used = now;
+-		ino->flags &= ~AUTOFS_INF_EXPIRING;
++		ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
+ 		complete_all(&ino->expire_complete);
+ 		spin_unlock(&sbi->fs_lock);
+ 		dput(dentry);
+diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
+index c6d7d3dbd52a..7a54c6a867c8 100644
+--- a/fs/autofs4/root.c
++++ b/fs/autofs4/root.c
+@@ -455,7 +455,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
+ 		 * a mount-trap.
+ 		 */
+ 		struct inode *inode;
+-		if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
++		if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
+ 			return 0;
+ 		if (d_mountpoint(dentry))
+ 			return 0;
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 2eca30adb3e3..ff742d30ba60 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1610,6 +1610,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
+ 	int namelen;
+ 	int ret = 0;
+ 
++	if (!S_ISDIR(file_inode(file)->i_mode))
++		return -ENOTDIR;
++
+ 	ret = mnt_want_write_file(file);
+ 	if (ret)
+ 		goto out;
+@@ -1667,6 +1670,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
+ 	struct btrfs_ioctl_vol_args *vol_args;
+ 	int ret;
+ 
++	if (!S_ISDIR(file_inode(file)->i_mode))
++		return -ENOTDIR;
++
+ 	vol_args = memdup_user(arg, sizeof(*vol_args));
+ 	if (IS_ERR(vol_args))
+ 		return PTR_ERR(vol_args);
+@@ -1690,6 +1696,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
+ 	bool readonly = false;
+ 	struct btrfs_qgroup_inherit *inherit = NULL;
+ 
++	if (!S_ISDIR(file_inode(file)->i_mode))
++		return -ENOTDIR;
++
+ 	vol_args = memdup_user(arg, sizeof(*vol_args));
+ 	if (IS_ERR(vol_args))
+ 		return PTR_ERR(vol_args);
+@@ -2318,6 +2327,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+ 	int ret;
+ 	int err = 0;
+ 
++	if (!S_ISDIR(dir->i_mode))
++		return -ENOTDIR;
++
+ 	vol_args = memdup_user(arg, sizeof(*vol_args));
+ 	if (IS_ERR(vol_args))
+ 		return PTR_ERR(vol_args);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 5c222f3c4841..bd3c92b4bcee 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -2748,6 +2748,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
+ 
+ 	if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
+ 		blk_finish_plug(&plug);
++		list_del_init(&root_log_ctx.list);
+ 		mutex_unlock(&log_root_tree->log_mutex);
+ 		ret = root_log_ctx.log_ret;
+ 		goto out;
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 2cb9e178d1c5..4196aa567784 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -637,7 +637,13 @@ resizefs_out:
+ 			goto encryption_policy_out;
+ 		}
+ 
++		err = mnt_want_write_file(filp);
++		if (err)
++			goto encryption_policy_out;
++
+ 		err = ext4_process_policy(&policy, inode);
++
++		mnt_drop_write_file(filp);
+ encryption_policy_out:
+ 		return err;
+ #else
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 5cae35490b37..d8f29ef2d819 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -540,13 +540,13 @@ void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
+ 	req->out.args[0].size = count;
+ }
+ 
+-static void fuse_release_user_pages(struct fuse_req *req, int write)
++static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty)
+ {
+ 	unsigned i;
+ 
+ 	for (i = 0; i < req->num_pages; i++) {
+ 		struct page *page = req->pages[i];
+-		if (write)
++		if (should_dirty)
+ 			set_page_dirty_lock(page);
+ 		put_page(page);
+ 	}
+@@ -1331,6 +1331,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
+ 		       loff_t *ppos, int flags)
+ {
+ 	int write = flags & FUSE_DIO_WRITE;
++	bool should_dirty = !write && iter_is_iovec(iter);
+ 	int cuse = flags & FUSE_DIO_CUSE;
+ 	struct file *file = io->file;
+ 	struct inode *inode = file->f_mapping->host;
+@@ -1375,7 +1376,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
+ 			nres = fuse_send_read(req, io, pos, nbytes, owner);
+ 
+ 		if (!io->async)
+-			fuse_release_user_pages(req, !write);
++			fuse_release_user_pages(req, should_dirty);
+ 		if (req->out.h.error) {
+ 			if (!res)
+ 				res = req->out.h.error;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index eef16ec0638a..319ba6814899 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -7281,12 +7281,20 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
+ 	status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
+ 	trace_nfs4_create_session(clp, status);
+ 
++	switch (status) {
++	case -NFS4ERR_STALE_CLIENTID:
++	case -NFS4ERR_DELAY:
++	case -ETIMEDOUT:
++	case -EACCES:
++	case -EAGAIN:
++		goto out;
++	};
++
++	clp->cl_seqid++;
+ 	if (!status) {
+ 		/* Verify the session's negotiated channel_attrs values */
+ 		status = nfs4_verify_channel_attrs(&args, &res);
+ 		/* Increment the clientid slot sequence id */
+-		if (clp->cl_seqid == res.seqid)
+-			clp->cl_seqid++;
+ 		if (status)
+ 			goto out;
+ 		nfs4_update_session(session, &res);
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index d2f97ecca6a5..e0e5f7c3c99f 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -67,18 +67,7 @@ static int fanotify_get_response(struct fsnotify_group *group,
+ 
+ 	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+ 
+-	wait_event(group->fanotify_data.access_waitq, event->response ||
+-				atomic_read(&group->fanotify_data.bypass_perm));
+-
+-	if (!event->response) {	/* bypass_perm set */
+-		/*
+-		 * Event was canceled because group is being destroyed. Remove
+-		 * it from group's event list because we are responsible for
+-		 * freeing the permission event.
+-		 */
+-		fsnotify_remove_event(group, &event->fae.fse);
+-		return 0;
+-	}
++	wait_event(group->fanotify_data.access_waitq, event->response);
+ 
+ 	/* userspace responded, convert to something usable */
+ 	switch (event->response) {
+diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
+index cf275500a665..45ca844d1323 100644
+--- a/fs/notify/fanotify/fanotify_user.c
++++ b/fs/notify/fanotify/fanotify_user.c
+@@ -358,16 +358,20 @@ static int fanotify_release(struct inode *ignored, struct file *file)
+ 
+ #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ 	struct fanotify_perm_event_info *event, *next;
++	struct fsnotify_event *fsn_event;
+ 
+ 	/*
+-	 * There may be still new events arriving in the notification queue
+-	 * but since userspace cannot use fanotify fd anymore, no event can
+-	 * enter or leave access_list by now.
++	 * Stop new events from arriving in the notification queue. since
++	 * userspace cannot use fanotify fd anymore, no event can enter or
++	 * leave access_list by now either.
+ 	 */
+-	spin_lock(&group->fanotify_data.access_lock);
+-
+-	atomic_inc(&group->fanotify_data.bypass_perm);
++	fsnotify_group_stop_queueing(group);
+ 
++	/*
++	 * Process all permission events on access_list and notification queue
++	 * and simulate reply from userspace.
++	 */
++	spin_lock(&group->fanotify_data.access_lock);
+ 	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
+ 				 fae.fse.list) {
+ 		pr_debug("%s: found group=%p event=%p\n", __func__, group,
+@@ -379,12 +383,21 @@ static int fanotify_release(struct inode *ignored, struct file *file)
+ 	spin_unlock(&group->fanotify_data.access_lock);
+ 
+ 	/*
+-	 * Since bypass_perm is set, newly queued events will not wait for
+-	 * access response. Wake up the already sleeping ones now.
+-	 * synchronize_srcu() in fsnotify_destroy_group() will wait for all
+-	 * processes sleeping in fanotify_handle_event() waiting for access
+-	 * response and thus also for all permission events to be freed.
++	 * Destroy all non-permission events. For permission events just
++	 * dequeue them and set the response. They will be freed once the
++	 * response is consumed and fanotify_get_response() returns.
+ 	 */
++	mutex_lock(&group->notification_mutex);
++	while (!fsnotify_notify_queue_is_empty(group)) {
++		fsn_event = fsnotify_remove_first_event(group);
++		if (!(fsn_event->mask & FAN_ALL_PERM_EVENTS))
++			fsnotify_destroy_event(group, fsn_event);
++		else
++			FANOTIFY_PE(fsn_event)->response = FAN_ALLOW;
++	}
++	mutex_unlock(&group->notification_mutex);
++
++	/* Response for all permission events it set, wakeup waiters */
+ 	wake_up(&group->fanotify_data.access_waitq);
+ #endif
+ 
+@@ -751,7 +764,6 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
+ 	spin_lock_init(&group->fanotify_data.access_lock);
+ 	init_waitqueue_head(&group->fanotify_data.access_waitq);
+ 	INIT_LIST_HEAD(&group->fanotify_data.access_list);
+-	atomic_set(&group->fanotify_data.bypass_perm, 0);
+ #endif
+ 	switch (flags & FAN_ALL_CLASS_BITS) {
+ 	case FAN_CLASS_NOTIF:
+diff --git a/fs/notify/group.c b/fs/notify/group.c
+index d16b62cb2854..18eb30c6bd8f 100644
+--- a/fs/notify/group.c
++++ b/fs/notify/group.c
+@@ -40,6 +40,17 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
+ }
+ 
+ /*
++ * Stop queueing new events for this group. Once this function returns
++ * fsnotify_add_event() will not add any new events to the group's queue.
++ */
++void fsnotify_group_stop_queueing(struct fsnotify_group *group)
++{
++	mutex_lock(&group->notification_mutex);
++	group->shutdown = true;
++	mutex_unlock(&group->notification_mutex);
++}
++
++/*
+  * Trying to get rid of a group. Remove all marks, flush all events and release
+  * the group reference.
+  * Note that another thread calling fsnotify_clear_marks_by_group() may still
+@@ -47,6 +58,14 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
+  */
+ void fsnotify_destroy_group(struct fsnotify_group *group)
+ {
++	/*
++	 * Stop queueing new events. The code below is careful enough to not
++	 * require this but fanotify needs to stop queuing events even before
++	 * fsnotify_destroy_group() is called and this makes the other callers
++	 * of fsnotify_destroy_group() to see the same behavior.
++	 */
++	fsnotify_group_stop_queueing(group);
++
+ 	/* clear all inode marks for this group */
+ 	fsnotify_clear_marks_by_group(group);
+ 
+diff --git a/fs/notify/notification.c b/fs/notify/notification.c
+index a95d8e037aeb..e455e83ceeeb 100644
+--- a/fs/notify/notification.c
++++ b/fs/notify/notification.c
+@@ -82,7 +82,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
+  * Add an event to the group notification queue.  The group can later pull this
+  * event off the queue to deal with.  The function returns 0 if the event was
+  * added to the queue, 1 if the event was merged with some other queued event,
+- * 2 if the queue of events has overflown.
++ * 2 if the event was not queued - either the queue of events has overflown
++ * or the group is shutting down.
+  */
+ int fsnotify_add_event(struct fsnotify_group *group,
+ 		       struct fsnotify_event *event,
+@@ -96,6 +97,11 @@ int fsnotify_add_event(struct fsnotify_group *group,
+ 
+ 	mutex_lock(&group->notification_mutex);
+ 
++	if (group->shutdown) {
++		mutex_unlock(&group->notification_mutex);
++		return 2;
++	}
++
+ 	if (group->q_len >= group->max_events) {
+ 		ret = 2;
+ 		/* Queue overflow event only if it isn't already queued */
+@@ -126,21 +132,6 @@ queue:
+ }
+ 
+ /*
+- * Remove @event from group's notification queue. It is the responsibility of
+- * the caller to destroy the event.
+- */
+-void fsnotify_remove_event(struct fsnotify_group *group,
+-			   struct fsnotify_event *event)
+-{
+-	mutex_lock(&group->notification_mutex);
+-	if (!list_empty(&event->list)) {
+-		list_del_init(&event->list);
+-		group->q_len--;
+-	}
+-	mutex_unlock(&group->notification_mutex);
+-}
+-
+-/*
+  * Remove and return the first event from the notification list.  It is the
+  * responsibility of the caller to destroy the obtained event
+  */
+diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
+index f90931335c6b..2e11658676eb 100644
+--- a/fs/ocfs2/dlm/dlmconvert.c
++++ b/fs/ocfs2/dlm/dlmconvert.c
+@@ -262,7 +262,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 				  struct dlm_lock *lock, int flags, int type)
+ {
+ 	enum dlm_status status;
+-	u8 old_owner = res->owner;
+ 
+ 	mlog(0, "type=%d, convert_type=%d, busy=%d\n", lock->ml.type,
+ 	     lock->ml.convert_type, res->state & DLM_LOCK_RES_IN_PROGRESS);
+@@ -329,7 +328,6 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 
+ 	spin_lock(&res->spinlock);
+ 	res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
+-	lock->convert_pending = 0;
+ 	/* if it failed, move it back to granted queue.
+ 	 * if master returns DLM_NORMAL and then down before sending ast,
+ 	 * it may have already been moved to granted queue, reset to
+@@ -338,12 +336,14 @@ enum dlm_status dlmconvert_remote(struct dlm_ctxt *dlm,
+ 		if (status != DLM_NOTQUEUED)
+ 			dlm_error(status);
+ 		dlm_revert_pending_convert(res, lock);
+-	} else if ((res->state & DLM_LOCK_RES_RECOVERING) ||
+-			(old_owner != res->owner)) {
+-		mlog(0, "res %.*s is in recovering or has been recovered.\n",
+-				res->lockname.len, res->lockname.name);
++	} else if (!lock->convert_pending) {
++		mlog(0, "%s: res %.*s, owner died and lock has been moved back "
++				"to granted list, retry convert.\n",
++				dlm->name, res->lockname.len, res->lockname.name);
+ 		status = DLM_RECOVERING;
+ 	}
++
++	lock->convert_pending = 0;
+ bail:
+ 	spin_unlock(&res->spinlock);
+ 
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index 3f1ee404f40f..e00be7f509db 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1522,7 +1522,8 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
+ 				       u64 start, u64 len)
+ {
+ 	int ret = 0;
+-	u64 tmpend, end = start + len;
++	u64 tmpend = 0;
++	u64 end = start + len;
+ 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+ 	unsigned int csize = osb->s_clustersize;
+ 	handle_t *handle;
+@@ -1554,18 +1555,31 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
+ 	}
+ 
+ 	/*
+-	 * We want to get the byte offset of the end of the 1st cluster.
++	 * If start is on a cluster boundary and end is somewhere in another
++	 * cluster, we have not COWed the cluster starting at start, unless
++	 * end is also within the same cluster. So, in this case, we skip this
++	 * first call to ocfs2_zero_range_for_truncate() truncate and move on
++	 * to the next one.
+ 	 */
+-	tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
+-	if (tmpend > end)
+-		tmpend = end;
++	if ((start & (csize - 1)) != 0) {
++		/*
++		 * We want to get the byte offset of the end of the 1st
++		 * cluster.
++		 */
++		tmpend = (u64)osb->s_clustersize +
++			(start & ~(osb->s_clustersize - 1));
++		if (tmpend > end)
++			tmpend = end;
+ 
+-	trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
+-						 (unsigned long long)tmpend);
++		trace_ocfs2_zero_partial_clusters_range1(
++			(unsigned long long)start,
++			(unsigned long long)tmpend);
+ 
+-	ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
+-	if (ret)
+-		mlog_errno(ret);
++		ret = ocfs2_zero_range_for_truncate(inode, handle, start,
++						    tmpend);
++		if (ret)
++			mlog_errno(ret);
++	}
+ 
+ 	if (tmpend < end) {
+ 		/*
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index c9b740111526..f60b2c745d02 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -679,11 +679,11 @@ retry:
+ 			goto out_dput;
+ 
+ 		err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
+-		if (err && err != -ENODATA)
++		if (err && err != -ENODATA && err != -EOPNOTSUPP)
+ 			goto out_dput;
+ 
+ 		err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
+-		if (err && err != -ENODATA)
++		if (err && err != -ENODATA && err != -EOPNOTSUPP)
+ 			goto out_dput;
+ 
+ 		/* Clear any inherited mode bits */
+diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
+index 72d8803832ff..32901d11f8c4 100644
+--- a/include/asm-generic/uaccess.h
++++ b/include/asm-generic/uaccess.h
+@@ -163,9 +163,10 @@ static inline __must_check long __copy_to_user(void __user *to,
+ 
+ #define put_user(x, ptr)					\
+ ({								\
++	void *__p = (ptr);					\
+ 	might_fault();						\
+-	access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ?		\
+-		__put_user(x, ptr) :				\
++	access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ?		\
++		__put_user((x), ((__typeof__(*(ptr)) *)__p)) :	\
+ 		-EFAULT;					\
+ })
+ 
+@@ -225,17 +226,22 @@ extern int __put_user_bad(void) __attribute__((noreturn));
+ 
+ #define get_user(x, ptr)					\
+ ({								\
++	const void *__p = (ptr);				\
+ 	might_fault();						\
+-	access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ?		\
+-		__get_user(x, ptr) :				\
+-		-EFAULT;					\
++	access_ok(VERIFY_READ, __p, sizeof(*ptr)) ?		\
++		__get_user((x), (__typeof__(*(ptr)) *)__p) :	\
++		((x) = (__typeof__(*(ptr)))0,-EFAULT);		\
+ })
+ 
+ #ifndef __get_user_fn
+ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
+ {
+-	size = __copy_from_user(x, ptr, size);
+-	return size ? -EFAULT : size;
++	size_t n = __copy_from_user(x, ptr, size);
++	if (unlikely(n)) {
++		memset(x + (size - n), 0, n);
++		return -EFAULT;
++	}
++	return 0;
+ }
+ 
+ #define __get_user_fn(sz, u, k)	__get_user_fn(sz, u, k)
+@@ -255,11 +261,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
+ static inline long copy_from_user(void *to,
+ 		const void __user * from, unsigned long n)
+ {
++	unsigned long res = n;
+ 	might_fault();
+-	if (access_ok(VERIFY_READ, from, n))
+-		return __copy_from_user(to, from, n);
+-	else
+-		return n;
++	if (likely(access_ok(VERIFY_READ, from, n)))
++		res = __copy_from_user(to, from, n);
++	if (unlikely(res))
++		memset(to + (n - res), 0, res);
++	return res;
+ }
+ 
+ static inline long copy_to_user(void __user *to,
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index 0f313f93c586..46dde3a3c891 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -150,6 +150,7 @@ struct fsnotify_group {
+ 	#define FS_PRIO_1	1 /* fanotify content based access control */
+ 	#define FS_PRIO_2	2 /* fanotify pre-content access */
+ 	unsigned int priority;
++	bool shutdown;		/* group is being shut down, don't queue more events */
+ 
+ 	/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
+ 	struct mutex mark_mutex;	/* protect marks_list */
+@@ -181,7 +182,6 @@ struct fsnotify_group {
+ 			spinlock_t access_lock;
+ 			struct list_head access_list;
+ 			wait_queue_head_t access_waitq;
+-			atomic_t bypass_perm;
+ #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
+ 			int f_flags;
+ 			unsigned int max_marks;
+@@ -301,6 +301,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
+ extern void fsnotify_get_group(struct fsnotify_group *group);
+ /* drop reference on a group from fsnotify_alloc_group */
+ extern void fsnotify_put_group(struct fsnotify_group *group);
++/* group destruction begins, stop queuing new events */
++extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
+ /* destroy group */
+ extern void fsnotify_destroy_group(struct fsnotify_group *group);
+ /* fasync handler function */
+@@ -313,8 +315,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
+ 			      struct fsnotify_event *event,
+ 			      int (*merge)(struct list_head *,
+ 					   struct fsnotify_event *));
+-/* Remove passed event from groups notification queue */
+-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
+ /* true if the group notification queue is empty */
+ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
+ /* return, but do not dequeue the first event on the notification queue */
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index 3532dca843f4..33475a37f1bb 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -852,6 +852,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
+ static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
+ #endif
+ 
++/*
++ * The irqsave variants are for usage in non interrupt code. Do not use
++ * them in irq_chip callbacks. Use irq_gc_lock() instead.
++ */
++#define irq_gc_lock_irqsave(gc, flags)	\
++	raw_spin_lock_irqsave(&(gc)->lock, flags)
++
++#define irq_gc_unlock_irqrestore(gc, flags)	\
++	raw_spin_unlock_irqrestore(&(gc)->lock, flags)
++
+ static inline void irq_reg_writel(struct irq_chip_generic *gc,
+ 				  u32 val, int reg_offset)
+ {
+diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
+index 4b3736f7065c..30a8f531236c 100644
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -594,56 +594,56 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
+  */
+ static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
+ {
+-	int ret = 0;
+ 	char __user *end = uaddr + size - 1;
+ 
+ 	if (unlikely(size == 0))
+-		return ret;
++		return 0;
+ 
++	if (unlikely(uaddr > end))
++		return -EFAULT;
+ 	/*
+ 	 * Writing zeroes into userspace here is OK, because we know that if
+ 	 * the zero gets there, we'll be overwriting it.
+ 	 */
+-	while (uaddr <= end) {
+-		ret = __put_user(0, uaddr);
+-		if (ret != 0)
+-			return ret;
++	do {
++		if (unlikely(__put_user(0, uaddr) != 0))
++			return -EFAULT;
+ 		uaddr += PAGE_SIZE;
+-	}
++	} while (uaddr <= end);
+ 
+ 	/* Check whether the range spilled into the next page. */
+ 	if (((unsigned long)uaddr & PAGE_MASK) ==
+ 			((unsigned long)end & PAGE_MASK))
+-		ret = __put_user(0, end);
++		return __put_user(0, end);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static inline int fault_in_multipages_readable(const char __user *uaddr,
+ 					       int size)
+ {
+ 	volatile char c;
+-	int ret = 0;
+ 	const char __user *end = uaddr + size - 1;
+ 
+ 	if (unlikely(size == 0))
+-		return ret;
++		return 0;
+ 
+-	while (uaddr <= end) {
+-		ret = __get_user(c, uaddr);
+-		if (ret != 0)
+-			return ret;
++	if (unlikely(uaddr > end))
++		return -EFAULT;
++
++	do {
++		if (unlikely(__get_user(c, uaddr) != 0))
++			return -EFAULT;
+ 		uaddr += PAGE_SIZE;
+-	}
++	} while (uaddr <= end);
+ 
+ 	/* Check whether the range spilled into the next page. */
+ 	if (((unsigned long)uaddr & PAGE_MASK) ==
+ 			((unsigned long)end & PAGE_MASK)) {
+-		ret = __get_user(c, end);
+-		(void)c;
++		return __get_user(c, end);
+ 	}
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+diff --git a/include/linux/uio.h b/include/linux/uio.h
+index 8b01e1c3c614..5f9c59da978b 100644
+--- a/include/linux/uio.h
++++ b/include/linux/uio.h
+@@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
+ 		struct iov_iter *i, unsigned long offset, size_t bytes);
+ void iov_iter_advance(struct iov_iter *i, size_t bytes);
+ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
++#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
+ size_t iov_iter_single_seg_count(const struct iov_iter *i);
+ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+ 			 struct iov_iter *i);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 98e607121d09..6cb5f00696f5 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1672,6 +1672,28 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ 	success = 1; /* we're going to change ->state */
+ 	cpu = task_cpu(p);
+ 
++	/*
++	 * Ensure we load p->on_rq _after_ p->state, otherwise it would
++	 * be possible to, falsely, observe p->on_rq == 0 and get stuck
++	 * in smp_cond_load_acquire() below.
++	 *
++	 * sched_ttwu_pending()                 try_to_wake_up()
++	 *   [S] p->on_rq = 1;                  [L] P->state
++	 *       UNLOCK rq->lock  -----.
++	 *                              \
++	 *				 +---   RMB
++	 * schedule()                   /
++	 *       LOCK rq->lock    -----'
++	 *       UNLOCK rq->lock
++	 *
++	 * [task p]
++	 *   [S] p->state = UNINTERRUPTIBLE     [L] p->on_rq
++	 *
++	 * Pairs with the UNLOCK+LOCK on rq->lock from the
++	 * last wakeup of our task and the schedule that got our task
++	 * current.
++	 */
++	smp_rmb();
+ 	if (p->on_rq && ttwu_remote(p, wake_flags))
+ 		goto stat;
+ 
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index de6ea94c41bb..61ea7e8cdde5 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4694,19 +4694,20 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
+ 	struct trace_iterator *iter = filp->private_data;
+ 	ssize_t sret;
+ 
+-	/* return any leftover data */
+-	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+-	if (sret != -EBUSY)
+-		return sret;
+-
+-	trace_seq_init(&iter->seq);
+-
+ 	/*
+ 	 * Avoid more than one consumer on a single file descriptor
+ 	 * This is just a matter of traces coherency, the ring buffer itself
+ 	 * is protected.
+ 	 */
+ 	mutex_lock(&iter->mutex);
++
++	/* return any leftover data */
++	sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
++	if (sret != -EBUSY)
++		goto out;
++
++	trace_seq_init(&iter->seq);
++
+ 	if (iter->trace->read) {
+ 		sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
+ 		if (sret)
+@@ -5731,9 +5732,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 		return -EBUSY;
+ #endif
+ 
+-	if (splice_grow_spd(pipe, &spd))
+-		return -ENOMEM;
+-
+ 	if (*ppos & (PAGE_SIZE - 1))
+ 		return -EINVAL;
+ 
+@@ -5743,6 +5741,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 		len &= PAGE_MASK;
+ 	}
+ 
++	if (splice_grow_spd(pipe, &spd))
++		return -ENOMEM;
++
+  again:
+ 	trace_access_lock(iter->cpu_file);
+ 	entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
+@@ -5800,19 +5801,21 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
+ 	/* did we read anything? */
+ 	if (!spd.nr_pages) {
+ 		if (ret)
+-			return ret;
++			goto out;
+ 
++		ret = -EAGAIN;
+ 		if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
+-			return -EAGAIN;
++			goto out;
+ 
+ 		ret = wait_on_pipe(iter, true);
+ 		if (ret)
+-			return ret;
++			goto out;
+ 
+ 		goto again;
+ 	}
+ 
+ 	ret = splice_to_pipe(pipe, &spd);
++out:
+ 	splice_shrink_spd(&spd);
+ 
+ 	return ret;
+diff --git a/lib/iov_iter.c b/lib/iov_iter.c
+index 75232ad0a5e7..daca582a8ed0 100644
+--- a/lib/iov_iter.c
++++ b/lib/iov_iter.c
+@@ -298,33 +298,13 @@ done:
+ }
+ 
+ /*
+- * Fault in the first iovec of the given iov_iter, to a maximum length
+- * of bytes. Returns 0 on success, or non-zero if the memory could not be
+- * accessed (ie. because it is an invalid address).
+- *
+- * writev-intensive code may want this to prefault several iovecs -- that
+- * would be possible (callers must not rely on the fact that _only_ the
+- * first iovec will be faulted with the current implementation).
+- */
+-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+-{
+-	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
+-		char __user *buf = i->iov->iov_base + i->iov_offset;
+-		bytes = min(bytes, i->iov->iov_len - i->iov_offset);
+-		return fault_in_pages_readable(buf, bytes);
+-	}
+-	return 0;
+-}
+-EXPORT_SYMBOL(iov_iter_fault_in_readable);
+-
+-/*
+  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
+  * bytes.  For each iovec, fault in each page that constitutes the iovec.
+  *
+  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
+  * because it is an invalid address).
+  */
+-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
++int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+ {
+ 	size_t skip = i->iov_offset;
+ 	const struct iovec *iov;
+@@ -341,7 +321,7 @@ int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes)
+ 	}
+ 	return 0;
+ }
+-EXPORT_SYMBOL(iov_iter_fault_in_multipages_readable);
++EXPORT_SYMBOL(iov_iter_fault_in_readable);
+ 
+ void iov_iter_init(struct iov_iter *i, int direction,
+ 			const struct iovec *iov, unsigned long nr_segs,
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index f555f4fc1d62..c66b7d325a39 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -1801,6 +1801,7 @@ errdad:
+ 	spin_unlock_bh(&ifp->lock);
+ 
+ 	addrconf_mod_dad_work(ifp, 0);
++	in6_ifa_put(ifp);
+ }
+ 
+ /* Join to solicited addr multicast group.
+@@ -3526,6 +3527,7 @@ static void addrconf_dad_work(struct work_struct *w)
+ 		addrconf_dad_begin(ifp);
+ 		goto out;
+ 	} else if (action == DAD_ABORT) {
++		in6_ifa_hold(ifp);
+ 		addrconf_dad_stop(ifp, 1);
+ 		goto out;
+ 	}
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index f24138681b80..978d7f91ca91 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -6466,7 +6466,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
+ 
+ 		params.n_counter_offsets_presp = len / sizeof(u16);
+ 		if (rdev->wiphy.max_num_csa_counters &&
+-		    (params.n_counter_offsets_beacon >
++		    (params.n_counter_offsets_presp >
+ 		     rdev->wiphy.max_num_csa_counters))
+ 			return -EINVAL;
+ 
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index c753211cb83f..b50ee5d622e1 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -955,29 +955,8 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
+ 			return private(dev, iwr, cmd, info, handler);
+ 	}
+ 	/* Old driver API : call driver ioctl handler */
+-	if (dev->netdev_ops->ndo_do_ioctl) {
+-#ifdef CONFIG_COMPAT
+-		if (info->flags & IW_REQUEST_FLAG_COMPAT) {
+-			int ret = 0;
+-			struct iwreq iwr_lcl;
+-			struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
+-
+-			memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
+-			iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
+-			iwr_lcl.u.data.length = iwp_compat->length;
+-			iwr_lcl.u.data.flags = iwp_compat->flags;
+-
+-			ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
+-
+-			iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
+-			iwp_compat->length = iwr_lcl.u.data.length;
+-			iwp_compat->flags = iwr_lcl.u.data.flags;
+-
+-			return ret;
+-		} else
+-#endif
+-			return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+-	}
++	if (dev->netdev_ops->ndo_do_ioctl)
++		return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
+ 	return -EOPNOTSUPP;
+ }
+ 
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index 795437b10082..b450a27588c8 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -1633,11 +1633,13 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
+ 		return -EBUSY;
+ 	}
+ 	list_add_tail(&rmidi->list, &snd_rawmidi_devices);
++	mutex_unlock(&register_mutex);
+ 	err = snd_register_device(SNDRV_DEVICE_TYPE_RAWMIDI,
+ 				  rmidi->card, rmidi->device,
+ 				  &snd_rawmidi_f_ops, rmidi, &rmidi->dev);
+ 	if (err < 0) {
+ 		rmidi_err(rmidi, "unable to register\n");
++		mutex_lock(&register_mutex);
+ 		list_del(&rmidi->list);
+ 		mutex_unlock(&register_mutex);
+ 		return err;
+@@ -1645,6 +1647,7 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
+ 	if (rmidi->ops && rmidi->ops->dev_register &&
+ 	    (err = rmidi->ops->dev_register(rmidi)) < 0) {
+ 		snd_unregister_device(&rmidi->dev);
++		mutex_lock(&register_mutex);
+ 		list_del(&rmidi->list);
+ 		mutex_unlock(&register_mutex);
+ 		return err;
+@@ -1677,7 +1680,6 @@ static int snd_rawmidi_dev_register(struct snd_device *device)
+ 		}
+ 	}
+ #endif /* CONFIG_SND_OSSEMUL */
+-	mutex_unlock(&register_mutex);
+ 	sprintf(name, "midi%d", rmidi->device);
+ 	entry = snd_info_create_card_entry(rmidi->card, name, rmidi->card->proc_root);
+ 	if (entry) {
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 43e785a79eb3..8800d237369a 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -35,6 +35,9 @@
+ #include <sound/initval.h>
+ #include <linux/kmod.h>
+ 
++/* internal flags */
++#define SNDRV_TIMER_IFLG_PAUSED		0x00010000
++
+ #if IS_ENABLED(CONFIG_SND_HRTIMER)
+ #define DEFAULT_TIMER_LIMIT 4
+ #elif IS_ENABLED(CONFIG_SND_RTCTIMER)
+@@ -296,8 +299,21 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ 		get_device(&timer->card->card_dev);
+ 	timeri->slave_class = tid->dev_sclass;
+ 	timeri->slave_id = slave_id;
+-	if (list_empty(&timer->open_list_head) && timer->hw.open)
+-		timer->hw.open(timer);
++
++	if (list_empty(&timer->open_list_head) && timer->hw.open) {
++		int err = timer->hw.open(timer);
++		if (err) {
++			kfree(timeri->owner);
++			kfree(timeri);
++
++			if (timer->card)
++				put_device(&timer->card->card_dev);
++			module_put(timer->module);
++			mutex_unlock(&register_mutex);
++			return err;
++		}
++	}
++
+ 	list_add_tail(&timeri->open_list, &timer->open_list_head);
+ 	snd_timer_check_master(timeri);
+ 	mutex_unlock(&register_mutex);
+@@ -305,8 +321,6 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ 	return 0;
+ }
+ 
+-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
+-
+ /*
+  * close a timer instance
+  */
+@@ -395,7 +409,6 @@ unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
+ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
+ {
+ 	struct snd_timer *timer;
+-	unsigned long flags;
+ 	unsigned long resolution = 0;
+ 	struct snd_timer_instance *ts;
+ 	struct timespec tstamp;
+@@ -419,34 +432,66 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
+ 		return;
+ 	if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
+ 		return;
+-	spin_lock_irqsave(&timer->lock, flags);
+ 	list_for_each_entry(ts, &ti->slave_active_head, active_list)
+ 		if (ts->ccallback)
+ 			ts->ccallback(ts, event + 100, &tstamp, resolution);
+-	spin_unlock_irqrestore(&timer->lock, flags);
+ }
+ 
+-static int snd_timer_start1(struct snd_timer *timer, struct snd_timer_instance *timeri,
+-			    unsigned long sticks)
++/* start/continue a master timer */
++static int snd_timer_start1(struct snd_timer_instance *timeri,
++			    bool start, unsigned long ticks)
+ {
++	struct snd_timer *timer;
++	int result;
++	unsigned long flags;
++
++	timer = timeri->timer;
++	if (!timer)
++		return -EINVAL;
++
++	spin_lock_irqsave(&timer->lock, flags);
++	if (timer->card && timer->card->shutdown) {
++		result = -ENODEV;
++		goto unlock;
++	}
++	if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
++			     SNDRV_TIMER_IFLG_START)) {
++		result = -EBUSY;
++		goto unlock;
++	}
++
++	if (start)
++		timeri->ticks = timeri->cticks = ticks;
++	else if (!timeri->cticks)
++		timeri->cticks = 1;
++	timeri->pticks = 0;
++
+ 	list_move_tail(&timeri->active_list, &timer->active_list_head);
+ 	if (timer->running) {
+ 		if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
+ 			goto __start_now;
+ 		timer->flags |= SNDRV_TIMER_FLG_RESCHED;
+ 		timeri->flags |= SNDRV_TIMER_IFLG_START;
+-		return 1;	/* delayed start */
++		result = 1; /* delayed start */
+ 	} else {
+-		timer->sticks = sticks;
++		if (start)
++			timer->sticks = ticks;
+ 		timer->hw.start(timer);
+ 	      __start_now:
+ 		timer->running++;
+ 		timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
+-		return 0;
++		result = 0;
+ 	}
++	snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
++			  SNDRV_TIMER_EVENT_CONTINUE);
++ unlock:
++	spin_unlock_irqrestore(&timer->lock, flags);
++	return result;
+ }
+ 
+-static int snd_timer_start_slave(struct snd_timer_instance *timeri)
++/* start/continue a slave timer */
++static int snd_timer_start_slave(struct snd_timer_instance *timeri,
++				 bool start)
+ {
+ 	unsigned long flags;
+ 
+@@ -460,88 +505,37 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
+ 		spin_lock(&timeri->timer->lock);
+ 		list_add_tail(&timeri->active_list,
+ 			      &timeri->master->slave_active_head);
++		snd_timer_notify1(timeri, start ? SNDRV_TIMER_EVENT_START :
++				  SNDRV_TIMER_EVENT_CONTINUE);
+ 		spin_unlock(&timeri->timer->lock);
+ 	}
+ 	spin_unlock_irqrestore(&slave_active_lock, flags);
+ 	return 1; /* delayed start */
+ }
+ 
+-/*
+- *  start the timer instance
+- */
+-int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
++/* stop/pause a master timer */
++static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
+ {
+ 	struct snd_timer *timer;
+-	int result = -EINVAL;
++	int result = 0;
+ 	unsigned long flags;
+ 
+-	if (timeri == NULL || ticks < 1)
+-		return -EINVAL;
+-	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
+-		result = snd_timer_start_slave(timeri);
+-		if (result >= 0)
+-			snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+-		return result;
+-	}
+-	timer = timeri->timer;
+-	if (timer == NULL)
+-		return -EINVAL;
+-	if (timer->card && timer->card->shutdown)
+-		return -ENODEV;
+-	spin_lock_irqsave(&timer->lock, flags);
+-	if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+-			     SNDRV_TIMER_IFLG_START)) {
+-		result = -EBUSY;
+-		goto unlock;
+-	}
+-	timeri->ticks = timeri->cticks = ticks;
+-	timeri->pticks = 0;
+-	result = snd_timer_start1(timer, timeri, ticks);
+- unlock:
+-	spin_unlock_irqrestore(&timer->lock, flags);
+-	if (result >= 0)
+-		snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+-	return result;
+-}
+-
+-static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
+-{
+-	struct snd_timer *timer;
+-	unsigned long flags;
+-
+-	if (snd_BUG_ON(!timeri))
+-		return -ENXIO;
+-
+-	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
+-		spin_lock_irqsave(&slave_active_lock, flags);
+-		if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
+-			spin_unlock_irqrestore(&slave_active_lock, flags);
+-			return -EBUSY;
+-		}
+-		if (timeri->timer)
+-			spin_lock(&timeri->timer->lock);
+-		timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+-		list_del_init(&timeri->ack_list);
+-		list_del_init(&timeri->active_list);
+-		if (timeri->timer)
+-			spin_unlock(&timeri->timer->lock);
+-		spin_unlock_irqrestore(&slave_active_lock, flags);
+-		goto __end;
+-	}
+ 	timer = timeri->timer;
+ 	if (!timer)
+ 		return -EINVAL;
+ 	spin_lock_irqsave(&timer->lock, flags);
+ 	if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+ 			       SNDRV_TIMER_IFLG_START))) {
+-		spin_unlock_irqrestore(&timer->lock, flags);
+-		return -EBUSY;
++		result = -EBUSY;
++		goto unlock;
+ 	}
+ 	list_del_init(&timeri->ack_list);
+ 	list_del_init(&timeri->active_list);
+-	if (timer->card && timer->card->shutdown) {
+-		spin_unlock_irqrestore(&timer->lock, flags);
+-		return 0;
++	if (timer->card && timer->card->shutdown)
++		goto unlock;
++	if (stop) {
++		timeri->cticks = timeri->ticks;
++		timeri->pticks = 0;
+ 	}
+ 	if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
+ 	    !(--timer->running)) {
+@@ -556,35 +550,64 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
+ 		}
+ 	}
+ 	timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
++	if (stop)
++		timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED;
++	else
++		timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
++	snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
++			  SNDRV_TIMER_EVENT_CONTINUE);
++ unlock:
+ 	spin_unlock_irqrestore(&timer->lock, flags);
+-      __end:
+-	if (event != SNDRV_TIMER_EVENT_RESOLUTION)
+-		snd_timer_notify1(timeri, event);
++	return result;
++}
++
++/* stop/pause a slave timer */
++static int snd_timer_stop_slave(struct snd_timer_instance *timeri, bool stop)
++{
++	unsigned long flags;
++
++	spin_lock_irqsave(&slave_active_lock, flags);
++	if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
++		spin_unlock_irqrestore(&slave_active_lock, flags);
++		return -EBUSY;
++	}
++	timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
++	if (timeri->timer) {
++		spin_lock(&timeri->timer->lock);
++		list_del_init(&timeri->ack_list);
++		list_del_init(&timeri->active_list);
++		snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
++				  SNDRV_TIMER_EVENT_CONTINUE);
++		spin_unlock(&timeri->timer->lock);
++	}
++	spin_unlock_irqrestore(&slave_active_lock, flags);
+ 	return 0;
+ }
+ 
+ /*
++ *  start the timer instance
++ */
++int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
++{
++	if (timeri == NULL || ticks < 1)
++		return -EINVAL;
++	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
++		return snd_timer_start_slave(timeri, true);
++	else
++		return snd_timer_start1(timeri, true, ticks);
++}
++
++/*
+  * stop the timer instance.
+  *
+  * do not call this from the timer callback!
+  */
+ int snd_timer_stop(struct snd_timer_instance *timeri)
+ {
+-	struct snd_timer *timer;
+-	unsigned long flags;
+-	int err;
+-
+-	err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
+-	if (err < 0)
+-		return err;
+-	timer = timeri->timer;
+-	if (!timer)
+-		return -EINVAL;
+-	spin_lock_irqsave(&timer->lock, flags);
+-	timeri->cticks = timeri->ticks;
+-	timeri->pticks = 0;
+-	spin_unlock_irqrestore(&timer->lock, flags);
+-	return 0;
++	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
++		return snd_timer_stop_slave(timeri, true);
++	else
++		return snd_timer_stop1(timeri, true);
+ }
+ 
+ /*
+@@ -592,32 +615,14 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
+  */
+ int snd_timer_continue(struct snd_timer_instance *timeri)
+ {
+-	struct snd_timer *timer;
+-	int result = -EINVAL;
+-	unsigned long flags;
++	/* timer can continue only after pause */
++	if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
++		return -EINVAL;
+ 
+-	if (timeri == NULL)
+-		return result;
+ 	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
+-		return snd_timer_start_slave(timeri);
+-	timer = timeri->timer;
+-	if (! timer)
+-		return -EINVAL;
+-	if (timer->card && timer->card->shutdown)
+-		return -ENODEV;
+-	spin_lock_irqsave(&timer->lock, flags);
+-	if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+-		result = -EBUSY;
+-		goto unlock;
+-	}
+-	if (!timeri->cticks)
+-		timeri->cticks = 1;
+-	timeri->pticks = 0;
+-	result = snd_timer_start1(timer, timeri, timer->sticks);
+- unlock:
+-	spin_unlock_irqrestore(&timer->lock, flags);
+-	snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
+-	return result;
++		return snd_timer_start_slave(timeri, false);
++	else
++		return snd_timer_start1(timeri, false, 0);
+ }
+ 
+ /*
+@@ -625,7 +630,10 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
+  */
+ int snd_timer_pause(struct snd_timer_instance * timeri)
+ {
+-	return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
++	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
++		return snd_timer_stop_slave(timeri, false);
++	else
++		return snd_timer_stop1(timeri, false);
+ }
+ 
+ /*
+@@ -837,6 +845,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
+ 	timer->tmr_subdevice = tid->subdevice;
+ 	if (id)
+ 		strlcpy(timer->id, id, sizeof(timer->id));
++	timer->sticks = 1;
+ 	INIT_LIST_HEAD(&timer->device_list);
+ 	INIT_LIST_HEAD(&timer->open_list_head);
+ 	INIT_LIST_HEAD(&timer->active_list_head);
+@@ -1826,6 +1835,9 @@ static int snd_timer_user_continue(struct file *file)
+ 	tu = file->private_data;
+ 	if (!tu->timeri)
+ 		return -EBADFD;
++	/* start timer instead of continue if it's not used before */
++	if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
++		return snd_timer_user_start(file);
+ 	tu->timeri->lost = 0;
+ 	return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
+ }
+@@ -1967,6 +1979,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ 		tu->qused--;
+ 		spin_unlock_irq(&tu->qlock);
+ 
++		mutex_lock(&tu->ioctl_lock);
+ 		if (tu->tread) {
+ 			if (copy_to_user(buffer, &tu->tqueue[qhead],
+ 					 sizeof(struct snd_timer_tread)))
+@@ -1976,6 +1989,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ 					 sizeof(struct snd_timer_read)))
+ 				err = -EFAULT;
+ 		}
++		mutex_unlock(&tu->ioctl_lock);
+ 
+ 		spin_lock_irq(&tu->qlock);
+ 		if (err < 0)
+diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
+index 084d414b228c..b431c34b2d74 100644
+--- a/sound/firewire/fireworks/fireworks.h
++++ b/sound/firewire/fireworks/fireworks.h
+@@ -106,7 +106,6 @@ struct snd_efw {
+ 	u8 *resp_buf;
+ 	u8 *pull_ptr;
+ 	u8 *push_ptr;
+-	unsigned int resp_queues;
+ };
+ 
+ int snd_efw_transaction_cmd(struct fw_unit *unit,
+diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
+index 33df8655fe81..2e1d9a23920c 100644
+--- a/sound/firewire/fireworks/fireworks_hwdep.c
++++ b/sound/firewire/fireworks/fireworks_hwdep.c
+@@ -25,6 +25,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
+ {
+ 	unsigned int length, till_end, type;
+ 	struct snd_efw_transaction *t;
++	u8 *pull_ptr;
+ 	long count = 0;
+ 
+ 	if (remained < sizeof(type) + sizeof(struct snd_efw_transaction))
+@@ -38,8 +39,17 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
+ 	buf += sizeof(type);
+ 
+ 	/* write into buffer as many responses as possible */
+-	while (efw->resp_queues > 0) {
+-		t = (struct snd_efw_transaction *)(efw->pull_ptr);
++	spin_lock_irq(&efw->lock);
++
++	/*
++	 * When another task reaches here during this task's access to user
++	 * space, it picks up current position in buffer and can read the same
++	 * series of responses.
++	 */
++	pull_ptr = efw->pull_ptr;
++
++	while (efw->push_ptr != pull_ptr) {
++		t = (struct snd_efw_transaction *)(pull_ptr);
+ 		length = be32_to_cpu(t->length) * sizeof(__be32);
+ 
+ 		/* confirm enough space for this response */
+@@ -49,26 +59,39 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
+ 		/* copy from ring buffer to user buffer */
+ 		while (length > 0) {
+ 			till_end = snd_efw_resp_buf_size -
+-				(unsigned int)(efw->pull_ptr - efw->resp_buf);
++				(unsigned int)(pull_ptr - efw->resp_buf);
+ 			till_end = min_t(unsigned int, length, till_end);
+ 
+-			if (copy_to_user(buf, efw->pull_ptr, till_end))
++			spin_unlock_irq(&efw->lock);
++
++			if (copy_to_user(buf, pull_ptr, till_end))
+ 				return -EFAULT;
+ 
+-			efw->pull_ptr += till_end;
+-			if (efw->pull_ptr >= efw->resp_buf +
+-					     snd_efw_resp_buf_size)
+-				efw->pull_ptr -= snd_efw_resp_buf_size;
++			spin_lock_irq(&efw->lock);
++
++			pull_ptr += till_end;
++			if (pull_ptr >= efw->resp_buf + snd_efw_resp_buf_size)
++				pull_ptr -= snd_efw_resp_buf_size;
+ 
+ 			length -= till_end;
+ 			buf += till_end;
+ 			count += till_end;
+ 			remained -= till_end;
+ 		}
+-
+-		efw->resp_queues--;
+ 	}
+ 
++	/*
++	 * All of tasks can read from the buffer nearly simultaneously, but the
++	 * last position for each task is different depending on the length of
++	 * given buffer. Here, for simplicity, a position of buffer is set by
++	 * the latest task. It's better for a listening application to allow one
++	 * thread to read from the buffer. Unless, each task can read different
++	 * sequence of responses depending on variation of buffer length.
++	 */
++	efw->pull_ptr = pull_ptr;
++
++	spin_unlock_irq(&efw->lock);
++
+ 	return count;
+ }
+ 
+@@ -76,14 +99,17 @@ static long
+ hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
+ 		  loff_t *offset)
+ {
+-	union snd_firewire_event event;
++	union snd_firewire_event event = {
++		.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
++	};
+ 
+-	memset(&event, 0, sizeof(event));
++	spin_lock_irq(&efw->lock);
+ 
+-	event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
+ 	event.lock_status.status = (efw->dev_lock_count > 0);
+ 	efw->dev_lock_changed = false;
+ 
++	spin_unlock_irq(&efw->lock);
++
+ 	count = min_t(long, count, sizeof(event.lock_status));
+ 
+ 	if (copy_to_user(buf, &event, count))
+@@ -98,10 +124,15 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
+ {
+ 	struct snd_efw *efw = hwdep->private_data;
+ 	DEFINE_WAIT(wait);
++	bool dev_lock_changed;
++	bool queued;
+ 
+ 	spin_lock_irq(&efw->lock);
+ 
+-	while ((!efw->dev_lock_changed) && (efw->resp_queues == 0)) {
++	dev_lock_changed = efw->dev_lock_changed;
++	queued = efw->push_ptr != efw->pull_ptr;
++
++	while (!dev_lock_changed && !queued) {
+ 		prepare_to_wait(&efw->hwdep_wait, &wait, TASK_INTERRUPTIBLE);
+ 		spin_unlock_irq(&efw->lock);
+ 		schedule();
+@@ -109,15 +140,17 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
+ 		if (signal_pending(current))
+ 			return -ERESTARTSYS;
+ 		spin_lock_irq(&efw->lock);
++		dev_lock_changed = efw->dev_lock_changed;
++		queued = efw->push_ptr != efw->pull_ptr;
+ 	}
+ 
+-	if (efw->dev_lock_changed)
++	spin_unlock_irq(&efw->lock);
++
++	if (dev_lock_changed)
+ 		count = hwdep_read_locked(efw, buf, count, offset);
+-	else if (efw->resp_queues > 0)
++	else if (queued)
+ 		count = hwdep_read_resp_buf(efw, buf, count, offset);
+ 
+-	spin_unlock_irq(&efw->lock);
+-
+ 	return count;
+ }
+ 
+@@ -160,7 +193,7 @@ hwdep_poll(struct snd_hwdep *hwdep, struct file *file, poll_table *wait)
+ 	poll_wait(file, &efw->hwdep_wait, wait);
+ 
+ 	spin_lock_irq(&efw->lock);
+-	if (efw->dev_lock_changed || (efw->resp_queues > 0))
++	if (efw->dev_lock_changed || efw->pull_ptr != efw->push_ptr)
+ 		events = POLLIN | POLLRDNORM;
+ 	else
+ 		events = 0;
+diff --git a/sound/firewire/fireworks/fireworks_proc.c b/sound/firewire/fireworks/fireworks_proc.c
+index 0639dcb13f7d..beb0a0ffee57 100644
+--- a/sound/firewire/fireworks/fireworks_proc.c
++++ b/sound/firewire/fireworks/fireworks_proc.c
+@@ -188,8 +188,8 @@ proc_read_queues_state(struct snd_info_entry *entry,
+ 	else
+ 		consumed = (unsigned int)(efw->push_ptr - efw->pull_ptr);
+ 
+-	snd_iprintf(buffer, "%d %d/%d\n",
+-		    efw->resp_queues, consumed, snd_efw_resp_buf_size);
++	snd_iprintf(buffer, "%d/%d\n",
++		    consumed, snd_efw_resp_buf_size);
+ }
+ 
+ static void
+diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
+index f550808d1784..36a08ba51ec7 100644
+--- a/sound/firewire/fireworks/fireworks_transaction.c
++++ b/sound/firewire/fireworks/fireworks_transaction.c
+@@ -121,11 +121,11 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
+ 	size_t capacity, till_end;
+ 	struct snd_efw_transaction *t;
+ 
+-	spin_lock_irq(&efw->lock);
+-
+ 	t = (struct snd_efw_transaction *)data;
+ 	length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
+ 
++	spin_lock_irq(&efw->lock);
++
+ 	if (efw->push_ptr < efw->pull_ptr)
+ 		capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
+ 	else
+@@ -155,7 +155,6 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
+ 	}
+ 
+ 	/* for hwdep */
+-	efw->resp_queues++;
+ 	wake_up(&efw->hwdep_wait);
+ 
+ 	*rcode = RCODE_COMPLETE;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 429697a93a71..2a9ec9706db8 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4750,6 +4750,7 @@ enum {
+ 	ALC221_FIXUP_HP_FRONT_MIC,
+ 	ALC292_FIXUP_TPT460,
+ 	ALC298_FIXUP_SPK_VOLUME,
++	ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5411,6 +5412,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	},
++	[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x1b, 0x90170151 },
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5455,6 +5465,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
++	SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
+ 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index e27df0d3898b..4a083433944e 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1137,6 +1137,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ 	case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+ 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++	case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
+ 	case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
+ 	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
+ 	case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-09-18 12:47 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-09-18 12:47 UTC (permalink / raw
  To: gentoo-commits

commit:     859d270f730c3d0b708c6cc88ddfc3ed97fc5b3a
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Sep 18 12:47:05 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Sep 18 12:47:05 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=859d270f

Linux patch 4.1.32. Linux patch 4.1.33

 0000_README             |    8 +
 1031_linux-4.1.32.patch | 1489 +++++++++++++++++++++++++++++++++++++++++++++++
 1032_linux-4.1.33.patch |  982 +++++++++++++++++++++++++++++++
 3 files changed, 2479 insertions(+)

diff --git a/0000_README b/0000_README
index 0ce7ab6..9a2fbfa 100644
--- a/0000_README
+++ b/0000_README
@@ -167,6 +167,14 @@ Patch:  1030_linux-4.1.31.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.31
 
+Patch:  1031_linux-4.1.32.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.32
+
+Patch:  1032_linux-4.1.33.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.33
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1031_linux-4.1.32.patch b/1031_linux-4.1.32.patch
new file mode 100644
index 0000000..87bef87
--- /dev/null
+++ b/1031_linux-4.1.32.patch
@@ -0,0 +1,1489 @@
+diff --git a/Makefile b/Makefile
+index bea5ca2ca2b0..e995f28b6d37 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 31
++SUBLEVEL = 32
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
+index 884081099f80..81cdbc36699b 100644
+--- a/arch/arc/include/asm/entry.h
++++ b/arch/arc/include/asm/entry.h
+@@ -143,8 +143,6 @@
+ 	POP	r13
+ .endm
+ 
+-#define OFF_USER_R25_FROM_R24	(SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
+-
+ /*--------------------------------------------------------------
+  * Collect User Mode callee regs as struct callee_regs - needed by
+  * fork/do_signal/unaligned-access-emulation.
+@@ -157,12 +155,13 @@
+  *-------------------------------------------------------------*/
+ .macro SAVE_CALLEE_SAVED_USER
+ 
++	mov	r12, sp		; save SP as ref to pt_regs
+ 	SAVE_R13_TO_R24
+ 
+ #ifdef CONFIG_ARC_CURR_IN_REG
+-	; Retrieve orig r25 and save it on stack
+-	ld.as   r12, [sp, OFF_USER_R25_FROM_R24]
+-	st.a    r12, [sp, -4]
++	; Retrieve orig r25 and save it with rest of callee_regs
++	ld	r12, [r12, PT_user_r25]
++	PUSH	r12
+ #else
+ 	PUSH	r25
+ #endif
+@@ -209,12 +208,16 @@
+ .macro RESTORE_CALLEE_SAVED_USER
+ 
+ #ifdef CONFIG_ARC_CURR_IN_REG
+-	ld.ab   r12, [sp, 4]
+-	st.as   r12, [sp, OFF_USER_R25_FROM_R24]
++	POP	r12
+ #else
+ 	POP	r25
+ #endif
+ 	RESTORE_R24_TO_R13
++
++	; SP is back to start of pt_regs
++#ifdef CONFIG_ARC_CURR_IN_REG
++	st	r12, [sp, PT_user_r25]
++#endif
+ .endm
+ 
+ /*--------------------------------------------------------------
+diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
+index 27ecc6975a58..853a80ee7aaa 100644
+--- a/arch/arc/include/asm/irqflags.h
++++ b/arch/arc/include/asm/irqflags.h
+@@ -168,10 +168,10 @@ static inline int arch_irqs_disabled(void)
+ .endm
+ 
+ .macro IRQ_ENABLE  scratch
++	TRACE_ASM_IRQ_ENABLE
+ 	lr	\scratch, [status32]
+ 	or	\scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ 	flag	\scratch
+-	TRACE_ASM_IRQ_ENABLE
+ .endm
+ 
+ #endif	/* __ASSEMBLY__ */
+diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
+index 0f99ac8fcbb2..0037a587320d 100644
+--- a/arch/arc/include/uapi/asm/elf.h
++++ b/arch/arc/include/uapi/asm/elf.h
+@@ -13,8 +13,15 @@
+ 
+ /* Machine specific ELF Hdr flags */
+ #define EF_ARC_OSABI_MSK	0x00000f00
+-#define EF_ARC_OSABI_ORIG	0x00000000   /* MUST be zero for back-compat */
+-#define EF_ARC_OSABI_CURRENT	0x00000300   /* v3 (no legacy syscalls) */
++
++#define EF_ARC_OSABI_V3		0x00000300   /* v3 (no legacy syscalls) */
++#define EF_ARC_OSABI_V4		0x00000400   /* v4 (64bit data any reg align) */
++
++#if __GNUC__ < 6
++#define EF_ARC_OSABI_CURRENT	EF_ARC_OSABI_V3
++#else
++#define EF_ARC_OSABI_CURRENT	EF_ARC_OSABI_V4
++#endif
+ 
+ typedef unsigned long elf_greg_t;
+ typedef unsigned long elf_fpregset_t;
+diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c
+index 4d9e77724bed..000dd041ab42 100644
+--- a/arch/arc/kernel/arcksyms.c
++++ b/arch/arc/kernel/arcksyms.c
+@@ -28,6 +28,7 @@ extern void __muldf3(void);
+ extern void __divdf3(void);
+ extern void __floatunsidf(void);
+ extern void __floatunsisf(void);
++extern void __udivdi3(void);
+ 
+ EXPORT_SYMBOL(__ashldi3);
+ EXPORT_SYMBOL(__ashrdi3);
+@@ -45,6 +46,7 @@ EXPORT_SYMBOL(__muldf3);
+ EXPORT_SYMBOL(__divdf3);
+ EXPORT_SYMBOL(__floatunsidf);
+ EXPORT_SYMBOL(__floatunsisf);
++EXPORT_SYMBOL(__udivdi3);
+ 
+ /* ARC optimised assembler routines */
+ EXPORT_SYMBOL(memset);
+diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
+index 6c3aa0edb9b5..5f14311ce59e 100644
+--- a/arch/arc/kernel/asm-offsets.c
++++ b/arch/arc/kernel/asm-offsets.c
+@@ -59,5 +59,7 @@ int main(void)
+ 
+ 	DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
+ 	DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
++	DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
++
+ 	return 0;
+ }
+diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
+index e095c557afdd..72d3990799d5 100644
+--- a/arch/arc/kernel/process.c
++++ b/arch/arc/kernel/process.c
+@@ -201,7 +201,7 @@ int elf_check_arch(const struct elf32_hdr *x)
+ 		return 0;
+ 
+ 	eflags = x->e_flags;
+-	if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
++	if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
+ 		pr_err("ABI mismatch - you need newer toolchain\n");
+ 		force_sigsegv(SIGSEGV, current);
+ 		return 0;
+diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
+index 1d167c6df8ca..321a4b67bfa0 100644
+--- a/arch/arc/kernel/setup.c
++++ b/arch/arc/kernel/setup.c
+@@ -237,8 +237,10 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
+ 			       cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
+ 			       cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
+ 
+-	n += scnprintf(buf + n, len - n,
+-		       "OS ABI [v3]\t: no-legacy-syscalls\n");
++	n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
++			EF_ARC_OSABI_CURRENT >> 8,
++			EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
++			"no-legacy-syscalls" : "64-bit data any register aligned");
+ 
+ 	return buf;
+ }
+diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
+index 12b2100db073..9cfaae9887b9 100644
+--- a/arch/arc/mm/cache_arc700.c
++++ b/arch/arc/mm/cache_arc700.c
+@@ -155,6 +155,15 @@ void arc_cache_init(void)
+ 
+ 	printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+ 
++	/*
++	 * Only master CPU needs to execute rest of function:
++	 *  - Assume SMP so all cores will have same cache config so
++	 *    any geomtry checks will be same for all
++	 *  - IOC setup / dma callbacks only need to be setup once
++	 */
++	if (cpu)
++		return;
++
+ 	if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
+ 		struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+ 
+diff --git a/arch/parisc/include/uapi/asm/errno.h b/arch/parisc/include/uapi/asm/errno.h
+index c0ae62520d15..274d5bc6ecce 100644
+--- a/arch/parisc/include/uapi/asm/errno.h
++++ b/arch/parisc/include/uapi/asm/errno.h
+@@ -97,10 +97,10 @@
+ #define	ENOTCONN	235	/* Transport endpoint is not connected */
+ #define	ESHUTDOWN	236	/* Cannot send after transport endpoint shutdown */
+ #define	ETOOMANYREFS	237	/* Too many references: cannot splice */
+-#define EREFUSED	ECONNREFUSED	/* for HP's NFS apparently */
+ #define	ETIMEDOUT	238	/* Connection timed out */
+ #define	ECONNREFUSED	239	/* Connection refused */
+-#define EREMOTERELEASE	240	/* Remote peer released connection */
++#define	EREFUSED	ECONNREFUSED	/* for HP's NFS apparently */
++#define	EREMOTERELEASE	240	/* Remote peer released connection */
+ #define	EHOSTDOWN	241	/* Host is down */
+ #define	EHOSTUNREACH	242	/* No route to host */
+ 
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index 9a4e71261fca..b264937bba68 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -678,7 +678,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
+ 	/* Check if the request is finished successfully */
+ 	if (active_flag) {
+ 		rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
+-		if (rc <= 0)
++		if (rc < 0)
+ 			return rc;
+ 
+ 		if (rc & active_flag)
+diff --git a/arch/um/include/asm/common.lds.S b/arch/um/include/asm/common.lds.S
+index 1dd5bd8a8c59..133055311dce 100644
+--- a/arch/um/include/asm/common.lds.S
++++ b/arch/um/include/asm/common.lds.S
+@@ -81,7 +81,7 @@
+   .altinstr_replacement : { *(.altinstr_replacement) }
+   /* .exit.text is discard at runtime, not link time, to deal with references
+      from .altinstructions and .eh_frame */
+-  .exit.text : { *(.exit.text) }
++  .exit.text : { EXIT_TEXT }
+   .exit.data : { *(.exit.data) }
+ 
+   .preinit_array : {
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index 307a49828826..def2e2e523f1 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1583,6 +1583,9 @@ void __init enable_IR_x2apic(void)
+ 	unsigned long flags;
+ 	int ret, ir_stat;
+ 
++	if (skip_ioapic_setup)
++		return;
++
+ 	ir_stat = irq_remapping_prepare();
+ 	if (ir_stat < 0 && !x2apic_supported())
+ 		return;
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 7f29dc0237d1..bbbf36e6066b 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -475,7 +475,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+ 
+ void blk_set_queue_dying(struct request_queue *q)
+ {
+-	queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
++	spin_lock_irq(q->queue_lock);
++	queue_flag_set(QUEUE_FLAG_DYING, q);
++	spin_unlock_irq(q->queue_lock);
+ 
+ 	if (q->mq_ops)
+ 		blk_mq_wake_waiters(q);
+diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
+index 1928a8912584..b03d7919f919 100644
+--- a/drivers/clocksource/sun4i_timer.c
++++ b/drivers/clocksource/sun4i_timer.c
+@@ -120,12 +120,16 @@ static struct clock_event_device sun4i_clockevent = {
+ 	.set_next_event = sun4i_clkevt_next_event,
+ };
+ 
++static void sun4i_timer_clear_interrupt(void)
++{
++	writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
++}
+ 
+ static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
+ {
+ 	struct clock_event_device *evt = (struct clock_event_device *)dev_id;
+ 
+-	writel(0x1, timer_base + TIMER_IRQ_ST_REG);
++	sun4i_timer_clear_interrupt();
+ 	evt->event_handler(evt);
+ 
+ 	return IRQ_HANDLED;
+@@ -190,6 +194,9 @@ static void __init sun4i_timer_init(struct device_node *node)
+ 	/* Make sure timer is stopped before playing with interrupts */
+ 	sun4i_clkevt_time_stop(0);
+ 
++	/* clear timer0 interrupt */
++	sun4i_timer_clear_interrupt();
++
+ 	sun4i_clockevent.cpumask = cpu_possible_mask;
+ 	sun4i_clockevent.irq = irq;
+ 
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index 0436997e054b..9742b3d66288 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -1862,6 +1862,7 @@ caam_hash_alloc(struct caam_hash_template *template,
+ 			 template->name);
+ 		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ 			 template->driver_name);
++		t_alg->ahash_alg.setkey = NULL;
+ 	}
+ 	alg->cra_module = THIS_MODULE;
+ 	alg->cra_init = caam_hash_cra_init;
+diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
+index caefe806db5e..c88b01bbf9a3 100644
+--- a/drivers/gpio/Kconfig
++++ b/drivers/gpio/Kconfig
+@@ -50,6 +50,7 @@ config GPIO_DEVRES
+ config OF_GPIO
+ 	def_bool y
+ 	depends on OF
++	depends on HAS_IOMEM
+ 
+ config GPIO_ACPI
+ 	def_bool y
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 800a025dd062..8a9f49b7391b 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -4923,6 +4923,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
+ 	unsigned long flags;
+ 	int ret = -EINVAL;
+ 
++	if (!drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
+ 	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+ 	    page_flip->reserved != 0)
+ 		return -EINVAL;
+diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
+index ad90fa3045e5..084dcae37a3d 100644
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -2115,6 +2115,7 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
+ 		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
+ 
+ 		ppgtt->base.cleanup(&ppgtt->base);
++		kfree(ppgtt);
+ 	}
+ 
+ 	if (drm_mm_initialized(&vm->mm)) {
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index 79bab6fd76bb..eb2a5bac215a 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -586,7 +586,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ 		if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
+ 			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ 		/* use frac fb div on RS780/RS880 */
+-		if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
++		if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
++		    && !radeon_crtc->ss_enabled)
+ 			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ 		if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
+ 			radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+@@ -623,7 +624,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
+ 			if (radeon_crtc->ss.refdiv) {
+ 				radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
+ 				radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
+-				if (ASIC_IS_AVIVO(rdev))
++				if (ASIC_IS_AVIVO(rdev) &&
++				    rdev->family != CHIP_RS780 &&
++				    rdev->family != CHIP_RS880)
+ 					radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
+ 			}
+ 		}
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index 50ce26a3b314..f27d81b61536 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -263,8 +263,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
+ 
+ 	rdev = radeon_get_rdev(bo->bdev);
+ 	ridx = radeon_copy_ring_index(rdev);
+-	old_start = old_mem->start << PAGE_SHIFT;
+-	new_start = new_mem->start << PAGE_SHIFT;
++	old_start = (u64)old_mem->start << PAGE_SHIFT;
++	new_start = (u64)new_mem->start << PAGE_SHIFT;
+ 
+ 	switch (old_mem->mem_type) {
+ 	case TTM_PL_VRAM:
+diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
+index f97c73bd14f8..f04fbd3ef9f3 100644
+--- a/drivers/input/keyboard/tegra-kbc.c
++++ b/drivers/input/keyboard/tegra-kbc.c
+@@ -376,7 +376,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
+ 	/* Reset the KBC controller to clear all previous status.*/
+ 	reset_control_assert(kbc->rst);
+ 	udelay(100);
+-	reset_control_assert(kbc->rst);
++	reset_control_deassert(kbc->rst);
+ 	udelay(100);
+ 
+ 	tegra_kbc_config_pins(kbc);
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index e7d7230a7e31..4cfb0ac797ef 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -1277,6 +1277,7 @@ static int __init i8042_create_aux_port(int idx)
+ 	serio->write		= i8042_aux_write;
+ 	serio->start		= i8042_start;
+ 	serio->stop		= i8042_stop;
++	serio->ps2_cmd_mutex	= &i8042_mutex;
+ 	serio->port_data	= port;
+ 	serio->dev.parent	= &i8042_platform_device->dev;
+ 	if (idx < 0) {
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 2a102834c2ee..a7a03a21d78a 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1851,7 +1851,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
+ 	free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
+ 
+ 	if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
+-	    !init_fifo(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
++	    !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
+ 	    !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
+ 	    !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
+ 	    !init_fifo(&ca->free_inc,	free << 2, GFP_KERNEL) ||
+@@ -1876,7 +1876,7 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
+ 				struct block_device *bdev, struct cache *ca)
+ {
+ 	char name[BDEVNAME_SIZE];
+-	const char *err = NULL;
++	const char *err = NULL; /* must be set for any error case */
+ 	int ret = 0;
+ 
+ 	memcpy(&ca->sb, sb, sizeof(struct cache_sb));
+@@ -1893,8 +1893,13 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
+ 		ca->discard = CACHE_DISCARD(&ca->sb);
+ 
+ 	ret = cache_alloc(sb, ca);
+-	if (ret != 0)
++	if (ret != 0) {
++		if (ret == -ENOMEM)
++			err = "cache_alloc(): -ENOMEM";
++		else
++			err = "cache_alloc(): unknown error";
+ 		goto err;
++	}
+ 
+ 	if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
+ 		err = "error calling kobject_add";
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index 0f5e1820c92d..768c9fdf0df8 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -287,15 +287,13 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 		pb->bio_submitted = true;
+ 
+ 		/*
+-		 * Map reads as normal only if corrupt_bio_byte set.
++		 * Error reads if neither corrupt_bio_byte or drop_writes are set.
++		 * Otherwise, flakey_end_io() will decide if the reads should be modified.
+ 		 */
+ 		if (bio_data_dir(bio) == READ) {
+-			/* If flags were specified, only corrupt those that match. */
+-			if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
+-			    all_corrupt_bio_flags_match(bio, fc))
+-				goto map_bio;
+-			else
++			if (!fc->corrupt_bio_byte && !test_bit(DROP_WRITES, &fc->flags))
+ 				return -EIO;
++			goto map_bio;
+ 		}
+ 
+ 		/*
+@@ -332,14 +330,21 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
+ 	struct flakey_c *fc = ti->private;
+ 	struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ 
+-	/*
+-	 * Corrupt successful READs while in down state.
+-	 */
+ 	if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
+-		if (fc->corrupt_bio_byte)
++		if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
++		    all_corrupt_bio_flags_match(bio, fc)) {
++			/*
++			 * Corrupt successful matching READs while in down state.
++			 */
+ 			corrupt_bio_data(bio, fc);
+-		else
++
++		} else if (!test_bit(DROP_WRITES, &fc->flags)) {
++			/*
++			 * Error read during the down_interval if drop_writes
++			 * wasn't configured.
++			 */
+ 			return -EIO;
++		}
+ 	}
+ 
+ 	return error;
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 92618686604c..9cd27b703dd6 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -1369,10 +1369,10 @@ int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev)
+ 	if (!sysfs_initialized)
+ 		return -EACCES;
+ 
+-	if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
+-		retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
+-	else
++	if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
+ 		retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr);
++	else
++		retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr);
+ 	if (retval)
+ 		goto err;
+ 
+@@ -1424,10 +1424,10 @@ err_rom_file:
+ err_resource_files:
+ 	pci_remove_resource_files(pdev);
+ err_config_file:
+-	if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
+-		sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
+-	else
++	if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
+ 		sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
++	else
++		sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
+ err:
+ 	return retval;
+ }
+@@ -1461,10 +1461,10 @@ void pci_remove_sysfs_dev_files(struct pci_dev *pdev)
+ 
+ 	pci_remove_capabilities_sysfs(pdev);
+ 
+-	if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE)
+-		sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
+-	else
++	if (pdev->cfg_size > PCI_CFG_SPACE_SIZE)
+ 		sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr);
++	else
++		sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr);
+ 
+ 	pci_remove_resource_files(pdev);
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index ae12c0317645..5d8c049fe503 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -288,6 +288,18 @@ static void quirk_citrine(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CITRINE,	quirk_citrine);
+ 
++/*
++ * This chip can cause bus lockups if config addresses above 0x600
++ * are read or written.
++ */
++static void quirk_nfp6000(struct pci_dev *dev)
++{
++	dev->cfg_size = 0x600;
++}
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME,	PCI_DEVICE_ID_NETRONOME_NFP4000,	quirk_nfp6000);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME,	PCI_DEVICE_ID_NETRONOME_NFP6000,	quirk_nfp6000);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME,	PCI_DEVICE_ID_NETRONOME_NFP6000_VF,	quirk_nfp6000);
++
+ /*  On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
+ static void quirk_extend_bar_to_page(struct pci_dev *dev)
+ {
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index 7de3b64bf142..4e1b3bf58093 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -48,17 +48,6 @@ static int amd_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+ 
+ 	spin_lock_irqsave(&gpio_dev->lock, flags);
+ 	pin_reg = readl(gpio_dev->base + offset * 4);
+-	/*
+-	 * Suppose BIOS or Bootloader sets specific debounce for the
+-	 * GPIO. if not, set debounce to be  2.75ms and remove glitch.
+-	*/
+-	if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
+-		pin_reg |= 0xf;
+-		pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
+-		pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
+-		pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+-	}
+-
+ 	pin_reg &= ~BIT(OUTPUT_ENABLE_OFF);
+ 	writel(pin_reg, gpio_dev->base + offset * 4);
+ 	spin_unlock_irqrestore(&gpio_dev->lock, flags);
+@@ -331,15 +320,6 @@ static void amd_gpio_irq_enable(struct irq_data *d)
+ 
+ 	spin_lock_irqsave(&gpio_dev->lock, flags);
+ 	pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
+-	/*
+-		Suppose BIOS or Bootloader sets specific debounce for the
+-		GPIO. if not, set debounce to be  2.75ms.
+-	*/
+-	if ((pin_reg & DB_TMR_OUT_MASK) == 0) {
+-		pin_reg |= 0xf;
+-		pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
+-		pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
+-	}
+ 	pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
+ 	pin_reg |= BIT(INTERRUPT_MASK_OFF);
+ 	writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
+diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
+index 57fd66357b95..028d7f76e94e 100644
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -1582,9 +1582,18 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 	unsigned long long now;
+ 	int expires;
+ 
++	cqr = (struct dasd_ccw_req *) intparm;
+ 	if (IS_ERR(irb)) {
+ 		switch (PTR_ERR(irb)) {
+ 		case -EIO:
++			if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) {
++				device = (struct dasd_device *) cqr->startdev;
++				cqr->status = DASD_CQR_CLEARED;
++				dasd_device_clear_timer(device);
++				wake_up(&dasd_flush_wq);
++				dasd_schedule_device_bh(device);
++				return;
++			}
+ 			break;
+ 		case -ETIMEDOUT:
+ 			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
+@@ -1600,7 +1609,6 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
+ 	}
+ 
+ 	now = get_tod_clock();
+-	cqr = (struct dasd_ccw_req *) intparm;
+ 	/* check for conditions that should be handled immediately */
+ 	if (!cqr ||
+ 	    !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
+diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
+index 54195a117f72..f78cc943d230 100644
+--- a/drivers/scsi/aacraid/commctrl.c
++++ b/drivers/scsi/aacraid/commctrl.c
+@@ -63,7 +63,7 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
+ 	struct fib *fibptr;
+ 	struct hw_fib * hw_fib = (struct hw_fib *)0;
+ 	dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
+-	unsigned size;
++	unsigned int size, osize;
+ 	int retval;
+ 
+ 	if (dev->in_reset) {
+@@ -87,7 +87,8 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
+ 	 *	will not overrun the buffer when we copy the memory. Return
+ 	 *	an error if we would.
+ 	 */
+-	size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
++	osize = size = le16_to_cpu(kfib->header.Size) +
++		sizeof(struct aac_fibhdr);
+ 	if (size < le16_to_cpu(kfib->header.SenderSize))
+ 		size = le16_to_cpu(kfib->header.SenderSize);
+ 	if (size > dev->max_fib_size) {
+@@ -118,6 +119,14 @@ static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
+ 		goto cleanup;
+ 	}
+ 
++	/* Sanity check the second copy */
++	if ((osize != le16_to_cpu(kfib->header.Size) +
++		sizeof(struct aac_fibhdr))
++		|| (size < le16_to_cpu(kfib->header.SenderSize))) {
++		retval = -EINVAL;
++		goto cleanup;
++	}
++
+ 	if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
+ 		aac_adapter_interrupt(dev);
+ 		/*
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 09084c9da8b7..41dcefe67b43 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1346,7 +1346,6 @@ made_compressed_probe:
+ 	spin_lock_init(&acm->write_lock);
+ 	spin_lock_init(&acm->read_lock);
+ 	mutex_init(&acm->mutex);
+-	acm->rx_endpoint = usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress);
+ 	acm->is_int_ep = usb_endpoint_xfer_int(epread);
+ 	if (acm->is_int_ep)
+ 		acm->bInterval = epread->bInterval;
+@@ -1386,14 +1385,14 @@ made_compressed_probe:
+ 		urb->transfer_dma = rb->dma;
+ 		if (acm->is_int_ep) {
+ 			usb_fill_int_urb(urb, acm->dev,
+-					 acm->rx_endpoint,
++					 usb_rcvintpipe(usb_dev, epread->bEndpointAddress),
+ 					 rb->base,
+ 					 acm->readsize,
+ 					 acm_read_bulk_callback, rb,
+ 					 acm->bInterval);
+ 		} else {
+ 			usb_fill_bulk_urb(urb, acm->dev,
+-					  acm->rx_endpoint,
++					  usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress),
+ 					  rb->base,
+ 					  acm->readsize,
+ 					  acm_read_bulk_callback, rb);
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index ac830e0ae38b..bae1e3717d20 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -95,7 +95,6 @@ struct acm {
+ 	struct urb *read_urbs[ACM_NR];
+ 	struct acm_rb read_buffers[ACM_NR];
+ 	int rx_buflimit;
+-	int rx_endpoint;
+ 	spinlock_t read_lock;
+ 	int write_used;					/* number of non-empty write buffers */
+ 	int transmitting;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 563d84eb484d..c98f78b0bf11 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1298,8 +1298,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
+ 	struct usb_device *hdev = hub->hdev;
+ 	int i;
+ 
+-	cancel_delayed_work_sync(&hub->init_work);
+-
+ 	/* hub_wq and related activity won't re-trigger */
+ 	hub->quiescing = 1;
+ 
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index 830f020230c4..c702f5d941d9 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -24,14 +24,17 @@
+ 
+ #include "platform_data.h"
+ 
+-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3	0xabcd
+-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
+-#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
+-#define PCI_DEVICE_ID_INTEL_BYT		0x0f37
+-#define PCI_DEVICE_ID_INTEL_MRFLD	0x119e
+-#define PCI_DEVICE_ID_INTEL_BSW		0x22B7
+-#define PCI_DEVICE_ID_INTEL_SPTLP	0x9d30
+-#define PCI_DEVICE_ID_INTEL_SPTH	0xa130
++#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3		0xabcd
++#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI	0xabce
++#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31	0xabcf
++#define PCI_DEVICE_ID_INTEL_BYT			0x0f37
++#define PCI_DEVICE_ID_INTEL_MRFLD		0x119e
++#define PCI_DEVICE_ID_INTEL_BSW			0x22b7
++#define PCI_DEVICE_ID_INTEL_SPTLP		0x9d30
++#define PCI_DEVICE_ID_INTEL_SPTH		0xa130
++#define PCI_DEVICE_ID_INTEL_BXT			0x0aaa
++#define PCI_DEVICE_ID_INTEL_APL			0x5aaa
++#define PCI_DEVICE_ID_INTEL_KBP			0xa2b0
+ 
+ static int dwc3_pci_quirks(struct pci_dev *pdev)
+ {
+@@ -166,6 +169,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), },
++	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
+ 	{  }	/* Terminating Entry */
+ };
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index f4c6e81df034..a5e1b8b39ff5 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1810,7 +1810,8 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
+ 
+ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 		struct dwc3_request *req, struct dwc3_trb *trb,
+-		const struct dwc3_event_depevt *event, int status)
++		const struct dwc3_event_depevt *event, int status,
++		int chain)
+ {
+ 	unsigned int		count;
+ 	unsigned int		s_pkt = 0;
+@@ -1818,6 +1819,19 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 
+ 	trace_dwc3_complete_trb(dep, trb);
+ 
++	/*
++	 * If we're in the middle of series of chained TRBs and we
++	 * receive a short transfer along the way, DWC3 will skip
++	 * through all TRBs including the last TRB in the chain (the
++	 * where CHN bit is zero. DWC3 will also avoid clearing HWO
++	 * bit and SW has to do it manually.
++	 *
++	 * We're going to do that here to avoid problems of HW trying
++	 * to use bogus TRBs for transfers.
++	 */
++	if (chain && (trb->ctrl & DWC3_TRB_CTRL_HWO))
++		trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
++
+ 	if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
+ 		/*
+ 		 * We continue despite the error. There is not much we
+@@ -1829,6 +1843,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 		 */
+ 		dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
+ 				dep->name, trb);
++
+ 	count = trb->size & DWC3_TRB_SIZE_MASK;
+ 
+ 	if (dep->direction) {
+@@ -1866,15 +1881,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 			s_pkt = 1;
+ 	}
+ 
+-	/*
+-	 * We assume here we will always receive the entire data block
+-	 * which we should receive. Meaning, if we program RX to
+-	 * receive 4K but we receive only 2K, we assume that's all we
+-	 * should receive and we simply bounce the request back to the
+-	 * gadget driver for further processing.
+-	 */
+-	req->request.actual += req->request.length - count;
+-	if (s_pkt)
++	if (s_pkt && !chain)
+ 		return 1;
+ 	if ((event->status & DEPEVT_STATUS_LST) &&
+ 			(trb->ctrl & (DWC3_TRB_CTRL_LST |
+@@ -1893,14 +1900,19 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 	struct dwc3_trb		*trb;
+ 	unsigned int		slot;
+ 	unsigned int		i;
++	int			count = 0;
+ 	int			ret;
+ 
+ 	do {
++		int chain;
++
+ 		req = next_request(&dep->req_queued);
+ 		if (!req) {
+ 			WARN_ON_ONCE(1);
+ 			return 1;
+ 		}
++
++		chain = req->request.num_mapped_sgs > 0;
+ 		i = 0;
+ 		do {
+ 			slot = req->start_slot + i;
+@@ -1909,13 +1921,22 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 				slot++;
+ 			slot %= DWC3_TRB_NUM;
+ 			trb = &dep->trb_pool[slot];
++			count += trb->size & DWC3_TRB_SIZE_MASK;
+ 
+ 			ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
+-					event, status);
++					event, status, chain);
+ 			if (ret)
+ 				break;
+ 		} while (++i < req->request.num_mapped_sgs);
+ 
++		/*
++		 * We assume here we will always receive the entire data block
++		 * which we should receive. Meaning, if we program RX to
++		 * receive 4K but we receive only 2K, we assume that's all we
++		 * should receive and we simply bounce the request back to the
++		 * gadget driver for further processing.
++		 */
++		req->request.actual += req->request.length - count;
+ 		dwc3_gadget_giveback(dep, req, status);
+ 
+ 		if (ret)
+diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
+index f4d88dfb26a7..53946d107bc2 100644
+--- a/drivers/usb/host/ehci-hcd.c
++++ b/drivers/usb/host/ehci-hcd.c
+@@ -331,11 +331,11 @@ static void ehci_turn_off_all_ports(struct ehci_hcd *ehci)
+ 	int	port = HCS_N_PORTS(ehci->hcs_params);
+ 
+ 	while (port--) {
+-		ehci_writel(ehci, PORT_RWC_BITS,
+-				&ehci->regs->port_status[port]);
+ 		spin_unlock_irq(&ehci->lock);
+ 		ehci_port_power(ehci, port, false);
+ 		spin_lock_irq(&ehci->lock);
++		ehci_writel(ehci, PORT_RWC_BITS,
++				&ehci->regs->port_status[port]);
+ 	}
+ }
+ 
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index ee07ba41c8db..cc1993c5556e 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -276,6 +276,9 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
+ 
+ 	ret = 0;
+ 	virt_dev = xhci->devs[slot_id];
++	if (!virt_dev)
++		return -ENODEV;
++
+ 	cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
+ 	if (!cmd) {
+ 		xhci_dbg(xhci, "Couldn't allocate command structure.\n");
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6ef255142e01..f7e917866e05 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1331,12 +1331,6 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ 
+ 	cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
+ 
+-	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+-		xhci_err(xhci,
+-			 "Command completion event does not match command\n");
+-		return;
+-	}
+-
+ 	del_timer(&xhci->cmd_timer);
+ 
+ 	trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
+@@ -1348,6 +1342,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
+ 		xhci_handle_stopped_cmd_ring(xhci, cmd);
+ 		return;
+ 	}
++
++	if (cmd->command_trb != xhci->cmd_ring->dequeue) {
++		xhci_err(xhci,
++			 "Command completion event does not match command\n");
++		return;
++	}
++
+ 	/*
+ 	 * Host aborted the command ring, check if the current command was
+ 	 * supposed to be aborted, otherwise continue normally.
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index bbddc44ce8bc..c33ad2181b33 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -545,7 +545,6 @@ static void sg_timeout(unsigned long _req)
+ {
+ 	struct usb_sg_request	*req = (struct usb_sg_request *) _req;
+ 
+-	req->status = -ETIMEDOUT;
+ 	usb_sg_cancel(req);
+ }
+ 
+@@ -576,8 +575,10 @@ static int perform_sglist(
+ 		mod_timer(&sg_timer, jiffies +
+ 				msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
+ 		usb_sg_wait(req);
+-		del_timer_sync(&sg_timer);
+-		retval = req->status;
++		if (!del_timer_sync(&sg_timer))
++			retval = -ETIMEDOUT;
++		else
++			retval = req->status;
+ 
+ 		/* FIXME check resulting data pattern */
+ 
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index db565f620f82..36e5b5c530bd 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -869,7 +869,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+ 
+ 	/* use PIO if packet is less than pio_dma_border or pipe is DCP */
+ 	if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
+-	    usbhs_pipe_is_dcp(pipe))
++	    usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
+ 		goto usbhsf_pio_prepare_push;
+ 
+ 	/* check data length if this driver don't use USB-DMAC */
+@@ -974,7 +974,7 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
+ 
+ 	/* use PIO if packet is less than pio_dma_border or pipe is DCP */
+ 	if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
+-	    usbhs_pipe_is_dcp(pipe))
++	    usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
+ 		goto usbhsf_pio_prepare_pop;
+ 
+ 	fifo = usbhsf_get_dma_fifo(priv, pkt);
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index 74af77a022a8..275c9aebc4c9 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -614,10 +614,13 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
+ 		 * use dmaengine if possible.
+ 		 * It will use pio handler if impossible.
+ 		 */
+-		if (usb_endpoint_dir_in(desc))
++		if (usb_endpoint_dir_in(desc)) {
+ 			pipe->handler = &usbhs_fifo_dma_push_handler;
+-		else
++		} else {
+ 			pipe->handler = &usbhs_fifo_dma_pop_handler;
++			usbhs_xxxsts_clear(priv, BRDYSTS,
++					   usbhs_pipe_number(pipe));
++		}
+ 
+ 		ret = 0;
+ 	}
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index b61f12160d37..8c48c9d83d48 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -648,6 +648,8 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_TFD128_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_FM3RX_PID) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_ELV_WS777_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_PALMSENS_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_IVIUM_XSTAT_PID) },
+ 	{ USB_DEVICE(FTDI_VID, LINX_SDMUSBQSS_PID) },
+ 	{ USB_DEVICE(FTDI_VID, LINX_MASTERDEVEL2_PID) },
+ 	{ USB_DEVICE(FTDI_VID, LINX_FUTURE_0_PID) },
+@@ -1008,6 +1010,7 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
+ 	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
+ 	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
++	{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 334bc600282d..48db84f25cc9 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -406,6 +406,12 @@
+ #define FTDI_4N_GALAXY_DE_3_PID	0xF3C2
+ 
+ /*
++ * Ivium Technologies product IDs
++ */
++#define FTDI_PALMSENS_PID	0xf440
++#define FTDI_IVIUM_XSTAT_PID	0xf441
++
++/*
+  * Linx Technologies product ids
+  */
+ #define LINX_SDMUSBQSS_PID	0xF448	/* Linx SDM-USB-QS-S */
+@@ -673,6 +679,12 @@
+ #define INTREPID_NEOVI_PID	0x0701
+ 
+ /*
++ * WICED USB UART
++ */
++#define WICED_VID		0x0A5C
++#define WICED_USB20706V2_PID	0x6422
++
++/*
+  * Definitions for ID TECH (www.idt-net.com) devices
+  */
+ #define IDTECH_VID		0x0ACD	/* ID TECH Vendor ID */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 2a7bf26c68e6..ca03fbfa2a32 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -277,6 +277,12 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_LE920			0x1200
+ #define TELIT_PRODUCT_LE910			0x1201
+ #define TELIT_PRODUCT_LE910_USBCFG4		0x1206
++#define TELIT_PRODUCT_LE920A4_1207		0x1207
++#define TELIT_PRODUCT_LE920A4_1208		0x1208
++#define TELIT_PRODUCT_LE920A4_1211		0x1211
++#define TELIT_PRODUCT_LE920A4_1212		0x1212
++#define TELIT_PRODUCT_LE920A4_1213		0x1213
++#define TELIT_PRODUCT_LE920A4_1214		0x1214
+ 
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID				0x19d2
+@@ -636,6 +642,11 @@ static const struct option_blacklist_info sierra_mc73xx_blacklist = {
+ 	.reserved = BIT(8) | BIT(10) | BIT(11),
+ };
+ 
++static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
++	.sendsetup = BIT(0),
++	.reserved = BIT(1),
++};
++
+ static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
+ 	.sendsetup = BIT(2),
+ 	.reserved = BIT(0) | BIT(1) | BIT(3),
+@@ -1215,6 +1226,16 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ 		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
++		.driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
++		.driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+@@ -1978,6 +1999,7 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+ 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+ 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 46f1f13b41f1..a0ca291bc07f 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1432,7 +1432,7 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
+ 
+ 	rc = usb_register(udriver);
+ 	if (rc)
+-		return rc;
++		goto failed_usb_register;
+ 
+ 	for (sd = serial_drivers; *sd; ++sd) {
+ 		(*sd)->usb_driver = udriver;
+@@ -1450,6 +1450,8 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
+ 	while (sd-- > serial_drivers)
+ 		usb_serial_deregister(*sd);
+ 	usb_deregister(udriver);
++failed_usb_register:
++	kfree(udriver);
+ 	return rc;
+ }
+ EXPORT_SYMBOL_GPL(usb_serial_register_drivers);
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index 636435b41293..2209040bff95 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -89,7 +89,7 @@ struct vhost_scsi_cmd {
+ 	struct scatterlist *tvc_prot_sgl;
+ 	struct page **tvc_upages;
+ 	/* Pointer to response header iovec */
+-	struct iovec *tvc_resp_iov;
++	struct iovec tvc_resp_iov;
+ 	/* Pointer to vhost_scsi for our device */
+ 	struct vhost_scsi *tvc_vhost;
+ 	/* Pointer to vhost_virtqueue for the cmd */
+@@ -716,7 +716,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
+ 		memcpy(v_rsp.sense, cmd->tvc_sense_buf,
+ 		       se_cmd->scsi_sense_length);
+ 
+-		iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
++		iov_iter_init(&iov_iter, READ, &cmd->tvc_resp_iov,
+ 			      cmd->tvc_in_iovs, sizeof(v_rsp));
+ 		ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
+ 		if (likely(ret == sizeof(v_rsp))) {
+@@ -1212,7 +1212,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+ 		}
+ 		cmd->tvc_vhost = vs;
+ 		cmd->tvc_vq = vq;
+-		cmd->tvc_resp_iov = &vq->iov[out];
++		cmd->tvc_resp_iov = vq->iov[out];
+ 		cmd->tvc_in_iovs = in;
+ 
+ 		pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
+diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
+index 531e76474983..0e0eb10f82a0 100644
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
+ 			rc = -ENOMEM;
+ 			goto out;
+ 		}
+-	} else {
++	} else if (msg_type == XS_TRANSACTION_END) {
+ 		list_for_each_entry(trans, &u->transactions, list)
+ 			if (trans->handle.id == u->u.msg.tx_id)
+ 				break;
+diff --git a/fs/seq_file.c b/fs/seq_file.c
+index 555f82155be8..4408057d1dc8 100644
+--- a/fs/seq_file.c
++++ b/fs/seq_file.c
+@@ -212,8 +212,10 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
+ 		size -= n;
+ 		buf += n;
+ 		copied += n;
+-		if (!m->count)
++		if (!m->count) {
++			m->from = 0;
+ 			m->index++;
++		}
+ 		if (!size)
+ 			goto Done;
+ 	}
+diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
+index b45345d701e7..51157da3f76e 100644
+--- a/fs/ubifs/tnc_commit.c
++++ b/fs/ubifs/tnc_commit.c
+@@ -370,7 +370,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
+ 
+ 	p = c->gap_lebs;
+ 	do {
+-		ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
++		ubifs_assert(p < c->gap_lebs + c->lst.idx_lebs);
+ 		written = layout_leb_in_gaps(c, p);
+ 		if (written < 0) {
+ 			err = written;
+diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
+index 2f7b9a40f627..9b6f5dc58732 100644
+--- a/include/linux/pci_ids.h
++++ b/include/linux/pci_ids.h
+@@ -2483,6 +2483,13 @@
+ #define PCI_DEVICE_ID_KORENIX_JETCARDF2	0x1700
+ #define PCI_DEVICE_ID_KORENIX_JETCARDF3	0x17ff
+ 
++#define PCI_VENDOR_ID_NETRONOME		0x19ee
++#define PCI_DEVICE_ID_NETRONOME_NFP3200	0x3200
++#define PCI_DEVICE_ID_NETRONOME_NFP3240	0x3240
++#define PCI_DEVICE_ID_NETRONOME_NFP4000	0x4000
++#define PCI_DEVICE_ID_NETRONOME_NFP6000	0x6000
++#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF	0x6003
++
+ #define PCI_VENDOR_ID_QMI		0x1a32
+ 
+ #define PCI_VENDOR_ID_AZWAVE		0x1a3b
+diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
+index cb346f26a22d..a89bca964b1f 100644
+--- a/kernel/events/uprobes.c
++++ b/kernel/events/uprobes.c
+@@ -179,8 +179,10 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
+ 	mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
+ 	err = -EAGAIN;
+ 	ptep = page_check_address(page, mm, addr, &ptl, 0);
+-	if (!ptep)
++	if (!ptep) {
++		mem_cgroup_cancel_charge(kpage, memcg);
+ 		goto unlock;
++	}
+ 
+ 	get_page(kpage);
+ 	page_add_new_anon_rmap(kpage, vma, addr);
+@@ -207,7 +209,6 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
+ 
+ 	err = 0;
+  unlock:
+-	mem_cgroup_cancel_charge(kpage, memcg);
+ 	mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
+ 	unlock_page(page);
+ 	return err;
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 65dbf8aee751..8fcc801fde15 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -419,7 +419,10 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
+ 	do {
+ 		seq = raw_read_seqcount(&tkf->seq);
+ 		tkr = tkf->base + (seq & 0x01);
+-		now = ktime_to_ns(tkr->base) + timekeeping_get_ns(tkr);
++		now = ktime_to_ns(tkr->base);
++
++		now += clocksource_delta(tkr->read(tkr->clock),
++					 tkr->cycle_last, tkr->mask);
+ 	} while (read_seqcount_retry(&tkf->seq, seq));
+ 
+ 	return now;
+diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c
+index f6bd65236712..107310a6f36f 100644
+--- a/kernel/time/timekeeping_debug.c
++++ b/kernel/time/timekeeping_debug.c
+@@ -23,7 +23,9 @@
+ 
+ #include "timekeeping_internal.h"
+ 
+-static unsigned int sleep_time_bin[32] = {0};
++#define NUM_BINS 32
++
++static unsigned int sleep_time_bin[NUM_BINS] = {0};
+ 
+ static int tk_debug_show_sleep_time(struct seq_file *s, void *data)
+ {
+@@ -69,6 +71,9 @@ late_initcall(tk_debug_sleep_time_init);
+ 
+ void tk_debug_account_sleep_time(struct timespec64 *t)
+ {
+-	sleep_time_bin[fls(t->tv_sec)]++;
++	/* Cap bin index so we don't overflow the array */
++	int bin = min(fls(t->tv_sec), NUM_BINS-1);
++
++	sleep_time_bin[bin]++;
+ }
+ 
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 87463c814896..36b93adfd7da 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -89,7 +89,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
+ EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+ 
+ /* rfc5961 challenge ack rate limiting */
+-int sysctl_tcp_challenge_ack_limit = 100;
++int sysctl_tcp_challenge_ack_limit = 1000;
+ 
+ int sysctl_tcp_stdurg __read_mostly;
+ int sysctl_tcp_rfc1337 __read_mostly;
+@@ -3343,6 +3343,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
+ 	return flag;
+ }
+ 
++static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
++				   u32 *last_oow_ack_time)
++{
++	if (*last_oow_ack_time) {
++		s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
++
++		if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
++			NET_INC_STATS(net, mib_idx);
++			return true;	/* rate-limited: don't send yet! */
++		}
++	}
++
++	*last_oow_ack_time = tcp_time_stamp;
++
++	return false;	/* not rate-limited: go ahead, send dupack now! */
++}
++
+ /* Return true if we're currently rate-limiting out-of-window ACKs and
+  * thus shouldn't send a dupack right now. We rate-limit dupacks in
+  * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
+@@ -3356,21 +3373,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
+ 	/* Data packets without SYNs are not likely part of an ACK loop. */
+ 	if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
+ 	    !tcp_hdr(skb)->syn)
+-		goto not_rate_limited;
+-
+-	if (*last_oow_ack_time) {
+-		s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+-
+-		if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+-			NET_INC_STATS_BH(net, mib_idx);
+-			return true;	/* rate-limited: don't send yet! */
+-		}
+-	}
+-
+-	*last_oow_ack_time = tcp_time_stamp;
++		return false;
+ 
+-not_rate_limited:
+-	return false;	/* not rate-limited: go ahead, send dupack now! */
++	return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
+ }
+ 
+ /* RFC 5961 7 [ACK Throttling] */
+@@ -3380,21 +3385,26 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
+ 	static u32 challenge_timestamp;
+ 	static unsigned int challenge_count;
+ 	struct tcp_sock *tp = tcp_sk(sk);
+-	u32 now;
++	u32 count, now;
+ 
+ 	/* First check our per-socket dupack rate limit. */
+-	if (tcp_oow_rate_limited(sock_net(sk), skb,
+-				 LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+-				 &tp->last_oow_ack_time))
++	if (__tcp_oow_rate_limited(sock_net(sk),
++				   LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
++				   &tp->last_oow_ack_time))
+ 		return;
+ 
+-	/* Then check the check host-wide RFC 5961 rate limit. */
++	/* Then check host-wide RFC 5961 rate limit. */
+ 	now = jiffies / HZ;
+ 	if (now != challenge_timestamp) {
++		u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
++
+ 		challenge_timestamp = now;
+-		challenge_count = 0;
++		WRITE_ONCE(challenge_count, half +
++			   prandom_u32_max(sysctl_tcp_challenge_ack_limit));
+ 	}
+-	if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
++	count = READ_ONCE(challenge_count);
++	if (count > 0) {
++		WRITE_ONCE(challenge_count, count - 1);
+ 		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+ 		tcp_send_ack(sk);
+ 	}
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index f06d42267306..89ed0206882c 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -907,7 +907,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
+ 
+ 	/* free all potentially still buffered bcast frames */
+ 	local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps.bc_buf);
+-	skb_queue_purge(&sdata->u.ap.ps.bc_buf);
++	ieee80211_purge_tx_queue(&local->hw, &sdata->u.ap.ps.bc_buf);
+ 
+ 	mutex_lock(&local->mtx);
+ 	ieee80211_vif_copy_chanctx_to_vlans(sdata, true);
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 5787f15a3a12..8dbdbaea70fd 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -355,7 +355,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
+ 		skb = skb_dequeue(&ps->bc_buf);
+ 		if (skb) {
+ 			purged++;
+-			dev_kfree_skb(skb);
++			ieee80211_free_txskb(&local->hw, skb);
+ 		}
+ 		total += skb_queue_len(&ps->bc_buf);
+ 	}
+@@ -438,7 +438,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
+ 	if (skb_queue_len(&ps->bc_buf) >= AP_MAX_BC_BUFFER) {
+ 		ps_dbg(tx->sdata,
+ 		       "BC TX buffer full - dropping the oldest frame\n");
+-		dev_kfree_skb(skb_dequeue(&ps->bc_buf));
++		ieee80211_free_txskb(&tx->local->hw, skb_dequeue(&ps->bc_buf));
+ 	} else
+ 		tx->local->total_ps_buffered++;
+ 
+@@ -3247,7 +3247,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
+ 			sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
+ 		if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb))
+ 			break;
+-		dev_kfree_skb_any(skb);
++		ieee80211_free_txskb(hw, skb);
+ 	}
+ 
+ 	info = IEEE80211_SKB_CB(skb);
+diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
+index 841d05946b88..5420f2fe717e 100644
+--- a/sound/soc/atmel/atmel_ssc_dai.c
++++ b/sound/soc/atmel/atmel_ssc_dai.c
+@@ -298,8 +298,9 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
+ 	clk_enable(ssc_p->ssc->clk);
+ 	ssc_p->mck_rate = clk_get_rate(ssc_p->ssc->clk);
+ 
+-	/* Reset the SSC to keep it at a clean status */
+-	ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
++	/* Reset the SSC unless initialized to keep it in a clean state */
++	if (!ssc_p->initialized)
++		ssc_writel(ssc_p->ssc->regs, CR, SSC_BIT(CR_SWRST));
+ 
+ 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 		dir = 0;
+diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
+index 204cc074adb9..41aa3355e920 100644
+--- a/sound/usb/line6/pcm.c
++++ b/sound/usb/line6/pcm.c
+@@ -55,7 +55,6 @@ static int snd_line6_impulse_volume_put(struct snd_kcontrol *kcontrol,
+ 		err = line6_pcm_acquire(line6pcm, LINE6_STREAM_IMPULSE);
+ 		if (err < 0) {
+ 			line6pcm->impulse_volume = 0;
+-			line6_pcm_release(line6pcm, LINE6_STREAM_IMPULSE);
+ 			return err;
+ 		}
+ 	} else {
+@@ -211,7 +210,9 @@ static void line6_stream_stop(struct snd_line6_pcm *line6pcm, int direction,
+ 	spin_lock_irqsave(&pstr->lock, flags);
+ 	clear_bit(type, &pstr->running);
+ 	if (!pstr->running) {
++		spin_unlock_irqrestore(&pstr->lock, flags);
+ 		line6_unlink_audio_urbs(line6pcm, pstr);
++		spin_lock_irqsave(&pstr->lock, flags);
+ 		if (direction == SNDRV_PCM_STREAM_CAPTURE) {
+ 			line6pcm->prev_fbuf = NULL;
+ 			line6pcm->prev_fsize = 0;
+diff --git a/sound/usb/line6/pod.c b/sound/usb/line6/pod.c
+index daf81d169a42..45dd34874f43 100644
+--- a/sound/usb/line6/pod.c
++++ b/sound/usb/line6/pod.c
+@@ -244,8 +244,8 @@ static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
+ static ssize_t serial_number_show(struct device *dev,
+ 				  struct device_attribute *attr, char *buf)
+ {
+-	struct usb_interface *interface = to_usb_interface(dev);
+-	struct usb_line6_pod *pod = usb_get_intfdata(interface);
++	struct snd_card *card = dev_to_snd_card(dev);
++	struct usb_line6_pod *pod = card->private_data;
+ 
+ 	return sprintf(buf, "%u\n", pod->serial_number);
+ }
+@@ -256,8 +256,8 @@ static ssize_t serial_number_show(struct device *dev,
+ static ssize_t firmware_version_show(struct device *dev,
+ 				     struct device_attribute *attr, char *buf)
+ {
+-	struct usb_interface *interface = to_usb_interface(dev);
+-	struct usb_line6_pod *pod = usb_get_intfdata(interface);
++	struct snd_card *card = dev_to_snd_card(dev);
++	struct usb_line6_pod *pod = card->private_data;
+ 
+ 	return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100,
+ 		       pod->firmware_version % 100);
+@@ -269,8 +269,8 @@ static ssize_t firmware_version_show(struct device *dev,
+ static ssize_t device_id_show(struct device *dev,
+ 			      struct device_attribute *attr, char *buf)
+ {
+-	struct usb_interface *interface = to_usb_interface(dev);
+-	struct usb_line6_pod *pod = usb_get_intfdata(interface);
++	struct snd_card *card = dev_to_snd_card(dev);
++	struct usb_line6_pod *pod = card->private_data;
+ 
+ 	return sprintf(buf, "%d\n", pod->device_id);
+ }

diff --git a/1032_linux-4.1.33.patch b/1032_linux-4.1.33.patch
new file mode 100644
index 0000000..17c9a86
--- /dev/null
+++ b/1032_linux-4.1.33.patch
@@ -0,0 +1,982 @@
+diff --git a/Makefile b/Makefile
+index e995f28b6d37..47c47d7c0926 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 32
++SUBLEVEL = 33
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 09c549826c5f..9615fe1701c6 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -96,7 +96,7 @@
+ #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
+ 
+ /* Set of bits not changed in pte_modify */
+-#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_SPECIAL)
++#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
+ 
+ /* More Abbrevaited helpers */
+ #define PAGE_U_NONE     __pgprot(___DEF)
+diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
+index b83f3b7737fb..087acb569b63 100644
+--- a/arch/arm/kernel/sys_oabi-compat.c
++++ b/arch/arm/kernel/sys_oabi-compat.c
+@@ -193,15 +193,44 @@ struct oabi_flock64 {
+ 	pid_t	l_pid;
+ } __attribute__ ((packed,aligned(4)));
+ 
+-asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
++static long do_locks(unsigned int fd, unsigned int cmd,
+ 				 unsigned long arg)
+ {
+-	struct oabi_flock64 user;
+ 	struct flock64 kernel;
+-	mm_segment_t fs = USER_DS; /* initialized to kill a warning */
+-	unsigned long local_arg = arg;
+-	int ret;
++	struct oabi_flock64 user;
++	mm_segment_t fs;
++	long ret;
++
++	if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
++			   sizeof(user)))
++		return -EFAULT;
++	kernel.l_type	= user.l_type;
++	kernel.l_whence	= user.l_whence;
++	kernel.l_start	= user.l_start;
++	kernel.l_len	= user.l_len;
++	kernel.l_pid	= user.l_pid;
++
++	fs = get_fs();
++	set_fs(KERNEL_DS);
++	ret = sys_fcntl64(fd, cmd, (unsigned long)&kernel);
++	set_fs(fs);
++
++	if (!ret && (cmd == F_GETLK64 || cmd == F_OFD_GETLK)) {
++		user.l_type	= kernel.l_type;
++		user.l_whence	= kernel.l_whence;
++		user.l_start	= kernel.l_start;
++		user.l_len	= kernel.l_len;
++		user.l_pid	= kernel.l_pid;
++		if (copy_to_user((struct oabi_flock64 __user *)arg,
++				 &user, sizeof(user)))
++			ret = -EFAULT;
++	}
++	return ret;
++}
+ 
++asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
++				 unsigned long arg)
++{
+ 	switch (cmd) {
+ 	case F_OFD_GETLK:
+ 	case F_OFD_SETLK:
+@@ -209,39 +238,11 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
+ 	case F_GETLK64:
+ 	case F_SETLK64:
+ 	case F_SETLKW64:
+-		if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
+-				   sizeof(user)))
+-			return -EFAULT;
+-		kernel.l_type	= user.l_type;
+-		kernel.l_whence	= user.l_whence;
+-		kernel.l_start	= user.l_start;
+-		kernel.l_len	= user.l_len;
+-		kernel.l_pid	= user.l_pid;
+-		local_arg = (unsigned long)&kernel;
+-		fs = get_fs();
+-		set_fs(KERNEL_DS);
+-	}
+-
+-	ret = sys_fcntl64(fd, cmd, local_arg);
++		return do_locks(fd, cmd, arg);
+ 
+-	switch (cmd) {
+-	case F_GETLK64:
+-		if (!ret) {
+-			user.l_type	= kernel.l_type;
+-			user.l_whence	= kernel.l_whence;
+-			user.l_start	= kernel.l_start;
+-			user.l_len	= kernel.l_len;
+-			user.l_pid	= kernel.l_pid;
+-			if (copy_to_user((struct oabi_flock64 __user *)arg,
+-					 &user, sizeof(user)))
+-				ret = -EFAULT;
+-		}
+-	case F_SETLK64:
+-	case F_SETLKW64:
+-		set_fs(fs);
++	default:
++		return sys_fcntl64(fd, cmd, arg);
+ 	}
+-
+-	return ret;
+ }
+ 
+ struct oabi_epoll_event {
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index e4cf63301ff4..c6c4248ab138 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -618,6 +618,17 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
+ 		set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
+ }
+ 
++#define MSR_AMD64_DE_CFG	0xC0011029
++
++static void init_amd_ln(struct cpuinfo_x86 *c)
++{
++	/*
++	 * Apply erratum 665 fix unconditionally so machines without a BIOS
++	 * fix work.
++	 */
++	msr_set_bit(MSR_AMD64_DE_CFG, 31);
++}
++
+ static void init_amd_bd(struct cpuinfo_x86 *c)
+ {
+ 	u64 value;
+@@ -675,6 +686,7 @@ static void init_amd(struct cpuinfo_x86 *c)
+ 	case 6:	   init_amd_k7(c); break;
+ 	case 0xf:  init_amd_k8(c); break;
+ 	case 0x10: init_amd_gh(c); break;
++	case 0x12: init_amd_ln(c); break;
+ 	case 0x15: init_amd_bd(c); break;
+ 	}
+ 
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index 1f316f066c49..68cf2ec816d5 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -55,12 +55,12 @@ asm (".pushsection .entry.text, \"ax\"\n"
+      ".popsection");
+ 
+ /* identity function, which can be inlined */
+-u32 _paravirt_ident_32(u32 x)
++u32 notrace _paravirt_ident_32(u32 x)
+ {
+ 	return x;
+ }
+ 
+-u64 _paravirt_ident_64(u64 x)
++u64 notrace _paravirt_ident_64(u64 x)
+ {
+ 	return x;
+ }
+diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
+index ebd8a5f398b0..900cf63d0420 100644
+--- a/drivers/dma/sh/usb-dmac.c
++++ b/drivers/dma/sh/usb-dmac.c
+@@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev)
+ {
+ 	struct usb_dmac_chan *chan = dev;
+ 	irqreturn_t ret = IRQ_NONE;
+-	u32 mask = USB_DMACHCR_TE;
+-	u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP;
++	u32 mask = 0;
+ 	u32 chcr;
++	bool xfer_end = false;
+ 
+ 	spin_lock(&chan->vc.lock);
+ 
+ 	chcr = usb_dmac_chan_read(chan, USB_DMACHCR);
+-	if (chcr & check_bits)
+-		mask |= USB_DMACHCR_DE | check_bits;
++	if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) {
++		mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP;
++		if (chcr & USB_DMACHCR_DE)
++			xfer_end = true;
++		ret |= IRQ_HANDLED;
++	}
+ 	if (chcr & USB_DMACHCR_NULL) {
+ 		/* An interruption of TE will happen after we set FTE */
+ 		mask |= USB_DMACHCR_NULL;
+ 		chcr |= USB_DMACHCR_FTE;
+ 		ret |= IRQ_HANDLED;
+ 	}
+-	usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
++	if (mask)
++		usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask);
+ 
+-	if (chcr & check_bits) {
++	if (xfer_end)
+ 		usb_dmac_isr_transfer_end(chan);
+-		ret |= IRQ_HANDLED;
+-	}
+ 
+ 	spin_unlock(&chan->vc.lock);
+ 
+diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
+index d8738d4f8df3..78c911be115d 100644
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -160,6 +160,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
+ 		if (ret < 0)
+ 			goto error_ret;
+ 		*val = ret;
++		ret = IIO_VAL_INT;
+ 		break;
+ 	case IIO_CHAN_INFO_SCALE:
+ 		ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 049282e6482f..ce507a405d05 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -1433,7 +1433,7 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
+ 	unsigned i;
+ 	int err;
+ 
+-	cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
++	cc->tfms = kzalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
+ 			   GFP_KERNEL);
+ 	if (!cc->tfms)
+ 		return -ENOMEM;
+@@ -1898,6 +1898,13 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
+ 		return DM_MAPIO_REMAPPED;
+ 	}
+ 
++	/*
++	 * Check if bio is too large, split as needed.
++	 */
++	if (unlikely(bio->bi_iter.bi_size > (BIO_MAX_PAGES << PAGE_SHIFT)) &&
++	    bio_data_dir(bio) == WRITE)
++		dm_accept_partial_bio(bio, ((BIO_MAX_PAGES << PAGE_SHIFT) >> SECTOR_SHIFT));
++
+ 	io = dm_per_bio_data(bio, cc->per_bio_data_size);
+ 	crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
+ 	io->ctx.req = (struct ablkcipher_request *)(io + 1);
+diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
+index 93e08446a87d..9e136baf5fc5 100644
+--- a/drivers/md/dm-log-writes.c
++++ b/drivers/md/dm-log-writes.c
+@@ -259,12 +259,12 @@ static int log_one_block(struct log_writes_c *lc,
+ 		goto out;
+ 	sector++;
+ 
+-	bio = bio_alloc(GFP_KERNEL, block->vec_cnt);
++	atomic_inc(&lc->io_blocks);
++	bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt, BIO_MAX_PAGES));
+ 	if (!bio) {
+ 		DMERR("Couldn't alloc log bio");
+ 		goto error;
+ 	}
+-	atomic_inc(&lc->io_blocks);
+ 	bio->bi_iter.bi_size = 0;
+ 	bio->bi_iter.bi_sector = sector;
+ 	bio->bi_bdev = lc->logdev->bdev;
+@@ -282,7 +282,7 @@ static int log_one_block(struct log_writes_c *lc,
+ 		if (ret != block->vecs[i].bv_len) {
+ 			atomic_inc(&lc->io_blocks);
+ 			submit_bio(WRITE, bio);
+-			bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i);
++			bio = bio_alloc(GFP_KERNEL, min(block->vec_cnt - i, BIO_MAX_PAGES));
+ 			if (!bio) {
+ 				DMERR("Couldn't alloc log bio");
+ 				goto error;
+@@ -456,7 +456,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	}
+ 
+ 	lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write");
+-	if (!lc->log_kthread) {
++	if (IS_ERR(lc->log_kthread)) {
+ 		ti->error = "Couldn't alloc kthread";
+ 		dm_put_device(ti, lc->dev);
+ 		dm_put_device(ti, lc->logdev);
+diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
+index 80d613c0fbc6..6fd2c17125fc 100644
+--- a/drivers/staging/comedi/drivers/comedi_test.c
++++ b/drivers/staging/comedi/drivers/comedi_test.c
+@@ -55,10 +55,6 @@ zero volts).
+ 
+ #define N_CHANS 8
+ 
+-enum waveform_state_bits {
+-	WAVEFORM_AI_RUNNING = 0
+-};
+-
+ /* Data unique to this driver */
+ struct waveform_private {
+ 	struct timer_list timer;
+@@ -67,7 +63,6 @@ struct waveform_private {
+ 	unsigned long usec_period;	/* waveform period in microseconds */
+ 	unsigned long usec_current;	/* current time (mod waveform period) */
+ 	unsigned long usec_remainder;	/* usec since last scan */
+-	unsigned long state_bits;
+ 	unsigned int scan_period;	/* scan period in usec */
+ 	unsigned int convert_period;	/* conversion period in usec */
+ 	unsigned int ao_loopbacks[N_CHANS];
+@@ -177,10 +172,6 @@ static void waveform_ai_interrupt(unsigned long arg)
+ 	unsigned int num_scans;
+ 	ktime_t now;
+ 
+-	/* check command is still active */
+-	if (!test_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits))
+-		return;
+-
+ 	now = ktime_get();
+ 
+ 	elapsed_time = ktime_to_us(ktime_sub(now, devpriv->last));
+@@ -322,10 +313,6 @@ static int waveform_ai_cmd(struct comedi_device *dev,
+ 	devpriv->usec_remainder = 0;
+ 
+ 	devpriv->timer.expires = jiffies + 1;
+-	/* mark command as active */
+-	smp_mb__before_atomic();
+-	set_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
+-	smp_mb__after_atomic();
+ 	add_timer(&devpriv->timer);
+ 	return 0;
+ }
+@@ -335,11 +322,12 @@ static int waveform_ai_cancel(struct comedi_device *dev,
+ {
+ 	struct waveform_private *devpriv = dev->private;
+ 
+-	/* mark command as no longer active */
+-	clear_bit(WAVEFORM_AI_RUNNING, &devpriv->state_bits);
+-	smp_mb__after_atomic();
+-	/* cannot call del_timer_sync() as may be called from timer routine */
+-	del_timer(&devpriv->timer);
++	if (in_softirq()) {
++		/* Assume we were called from the timer routine itself. */
++		del_timer(&devpriv->timer);
++	} else {
++		del_timer_sync(&devpriv->timer);
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
+index f97d18d92255..63c338223548 100644
+--- a/drivers/staging/comedi/drivers/daqboard2000.c
++++ b/drivers/staging/comedi/drivers/daqboard2000.c
+@@ -636,7 +636,7 @@ static const void *daqboard2000_find_boardinfo(struct comedi_device *dev,
+ 	const struct daq200_boardtype *board;
+ 	int i;
+ 
+-	if (pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH)
++	if (pcidev->subsystem_vendor != PCI_VENDOR_ID_IOTECH)
+ 		return NULL;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(boardtypes); i++) {
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index 34b3a522668f..4b8da862cd7e 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -2987,7 +2987,15 @@ static int ni_ao_inttrig(struct comedi_device *dev,
+ 	int i;
+ 	static const int timeout = 1000;
+ 
+-	if (trig_num != cmd->start_arg)
++	/*
++	 * Require trig_num == cmd->start_arg when cmd->start_src == TRIG_INT.
++	 * For backwards compatibility, also allow trig_num == 0 when
++	 * cmd->start_src != TRIG_INT (i.e. when cmd->start_src == TRIG_EXT);
++	 * in that case, the internal trigger is being used as a pre-trigger
++	 * before the external trigger.
++	 */
++	if (!(trig_num == cmd->start_arg ||
++	      (trig_num == 0 && cmd->start_src != TRIG_INT)))
+ 		return -EINVAL;
+ 
+ 	/* Null trig at beginning prevent ao start trigger from executing more than
+@@ -5635,7 +5643,7 @@ static int ni_E_init(struct comedi_device *dev,
+ 		s->maxdata	= (devpriv->is_m_series) ? 0xffffffff
+ 							 : 0x00ffffff;
+ 		s->insn_read	= ni_tio_insn_read;
+-		s->insn_write	= ni_tio_insn_read;
++		s->insn_write	= ni_tio_insn_write;
+ 		s->insn_config	= ni_tio_insn_config;
+ #ifdef PCIDMA
+ 		if (dev->irq && devpriv->mite) {
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index a78a62bf0c96..6057a8a2d57d 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2016,6 +2016,43 @@ pci_wch_ch38x_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_PERICOM_PI7C9X7954	0x7954
+ #define PCI_DEVICE_ID_PERICOM_PI7C9X7958	0x7958
+ 
++#define PCI_VENDOR_ID_ACCESIO			0x494f
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB	0x1051
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S	0x1053
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB	0x105C
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S	0x105E
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB	0x1091
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2	0x1093
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB	0x1099
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4	0x109B
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB	0x10D1
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM	0x10D3
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB	0x10DA
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM	0x10DC
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1	0x1108
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2	0x1110
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2	0x1111
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4	0x1118
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4	0x1119
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S	0x1152
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S	0x115A
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2	0x1190
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2	0x1191
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4	0x1198
++#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4	0x1199
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM	0x11D0
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4	0x105A
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4	0x105B
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8	0x106A
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8	0x106B
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4	0x1098
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8	0x10A9
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM	0x10D9
++#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM	0x10E9
++#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM	0x11D8
++
++
++
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584	0x1584
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588	0x1588
+@@ -5217,6 +5254,108 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 		0,
+ 		0, pbn_pericom_PI7C9X7958 },
+ 	/*
++	 * ACCES I/O Products quad
++	 */
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7954 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7958 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7958 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7958 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7958 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7958 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7958 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7958 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7958 },
++	{	PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
++		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
++		pbn_pericom_PI7C9X7958 },
++	/*
+ 	 * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
+ 	 */
+ 	{	PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 92937c14f818..d93e43cfb6f8 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1578,8 +1578,11 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
+ {
+ 	struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
+ 
+-	/* Data+ pullup controlled by OTG state machine in OTG fsm mode */
+-	if (ci_otg_is_fsm_mode(ci))
++	/*
++	 * Data+ pullup controlled by OTG state machine in OTG fsm mode;
++	 * and don't touch Data+ in host mode for dual role config.
++	 */
++	if (ci_otg_is_fsm_mode(ci) || ci->role == CI_ROLE_HOST)
+ 		return 0;
+ 
+ 	pm_runtime_get_sync(&ci->gadget.dev);
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 986abde07683..3d46b0bae75c 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1526,11 +1526,17 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ 	as->urb->start_frame = uurb->start_frame;
+ 	as->urb->number_of_packets = number_of_packets;
+ 	as->urb->stream_id = stream_id;
+-	if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
+-			ps->dev->speed == USB_SPEED_HIGH)
+-		as->urb->interval = 1 << min(15, ep->desc.bInterval - 1);
+-	else
+-		as->urb->interval = ep->desc.bInterval;
++
++	if (ep->desc.bInterval) {
++		if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
++				ps->dev->speed == USB_SPEED_HIGH ||
++				ps->dev->speed >= USB_SPEED_SUPER)
++			as->urb->interval = 1 <<
++					min(15, ep->desc.bInterval - 1);
++		else
++			as->urb->interval = ep->desc.bInterval;
++	}
++
+ 	as->urb->context = as;
+ 	as->urb->complete = async_completed;
+ 	for (totlen = u = 0; u < number_of_packets; u++) {
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 4f70df33975a..9a0c610d85a9 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -1239,7 +1239,7 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 
+ 	if (urb->transfer_buffer == NULL) {
+ 		urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+-					       GFP_KERNEL);
++					       GFP_ATOMIC);
+ 		if (!urb->transfer_buffer)
+ 			goto exit;
+ 	}
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index e4473a9109cf..f5ab4cd9e7a1 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1340,8 +1340,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
+ 	}
+ 
+ 	if (urb->transfer_buffer == NULL) {
+-		urb->transfer_buffer =
+-		    kmalloc(URB_TRANSFER_BUFFER_SIZE, GFP_KERNEL);
++		urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
++					       GFP_ATOMIC);
+ 		if (!urb->transfer_buffer)
+ 			goto exit;
+ 	}
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index ca03fbfa2a32..a599e8a841b0 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -528,6 +528,12 @@ static void option_instat_callback(struct urb *urb);
+ #define VIATELECOM_VENDOR_ID			0x15eb
+ #define VIATELECOM_PRODUCT_CDS7			0x0001
+ 
++/* WeTelecom products */
++#define WETELECOM_VENDOR_ID			0x22de
++#define WETELECOM_PRODUCT_WMD200		0x6801
++#define WETELECOM_PRODUCT_6802			0x6802
++#define WETELECOM_PRODUCT_WMD300		0x6803
++
+ struct option_blacklist_info {
+ 	/* bitmask of interface numbers blacklisted for send_setup */
+ 	const unsigned long sendsetup;
+@@ -2003,6 +2009,9 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+ 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+ 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
+ 	{ } /* Terminating entry */
+ };
+ MODULE_DEVICE_TABLE(usb, option_ids);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index f01dd19adb7b..afb3eb3e8b0f 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2082,6 +2082,7 @@ void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
+ 
+ /* Called at mount-time, super-block is locked */
+ static int ext4_check_descriptors(struct super_block *sb,
++				  ext4_fsblk_t sb_block,
+ 				  ext4_group_t *first_not_zeroed)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+@@ -2112,6 +2113,11 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			grp = i;
+ 
+ 		block_bitmap = ext4_block_bitmap(sb, gdp);
++		if (block_bitmap == sb_block) {
++			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
++				 "Block bitmap for group %u overlaps "
++				 "superblock", i);
++		}
+ 		if (block_bitmap < first_block || block_bitmap > last_block) {
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ 			       "Block bitmap for group %u not in group "
+@@ -2119,6 +2125,11 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			return 0;
+ 		}
+ 		inode_bitmap = ext4_inode_bitmap(sb, gdp);
++		if (inode_bitmap == sb_block) {
++			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
++				 "Inode bitmap for group %u overlaps "
++				 "superblock", i);
++		}
+ 		if (inode_bitmap < first_block || inode_bitmap > last_block) {
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ 			       "Inode bitmap for group %u not in group "
+@@ -2126,6 +2137,11 @@ static int ext4_check_descriptors(struct super_block *sb,
+ 			return 0;
+ 		}
+ 		inode_table = ext4_inode_table(sb, gdp);
++		if (inode_table == sb_block) {
++			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
++				 "Inode table for group %u overlaps "
++				 "superblock", i);
++		}
+ 		if (inode_table < first_block ||
+ 		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
+ 			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+@@ -3989,7 +4005,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 			goto failed_mount2;
+ 		}
+ 	}
+-	if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
++	if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
+ 		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
+ 		goto failed_mount2;
+ 	}
+diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
+index 2bacb9988566..9ff28bc294c0 100644
+--- a/fs/kernfs/file.c
++++ b/fs/kernfs/file.c
+@@ -834,21 +834,35 @@ repeat:
+ 	mutex_lock(&kernfs_mutex);
+ 
+ 	list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
++		struct kernfs_node *parent;
+ 		struct inode *inode;
+-		struct dentry *dentry;
+ 
++		/*
++		 * We want fsnotify_modify() on @kn but as the
++		 * modifications aren't originating from userland don't
++		 * have the matching @file available.  Look up the inodes
++		 * and generate the events manually.
++		 */
+ 		inode = ilookup(info->sb, kn->ino);
+ 		if (!inode)
+ 			continue;
+ 
+-		dentry = d_find_any_alias(inode);
+-		if (dentry) {
+-			fsnotify_parent(NULL, dentry, FS_MODIFY);
+-			fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
+-				 NULL, 0);
+-			dput(dentry);
++		parent = kernfs_get_parent(kn);
++		if (parent) {
++			struct inode *p_inode;
++
++			p_inode = ilookup(info->sb, parent->ino);
++			if (p_inode) {
++				fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD,
++					 inode, FSNOTIFY_EVENT_INODE, kn->name, 0);
++				iput(p_inode);
++			}
++
++			kernfs_put(parent);
+ 		}
+ 
++		fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
++			 kn->name, 0);
+ 		iput(inode);
+ 	}
+ 
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 8d129bb7355a..a78558a25035 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -303,6 +303,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, struct n
+ err_socks:
+ 	svc_rpcb_cleanup(serv, net);
+ err_bind:
++	nn->cb_users[minorversion]--;
+ 	dprintk("NFS: Couldn't create callback socket: err = %d; "
+ 			"net = %p\n", ret, net);
+ 	return ret;
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index d47c188682b1..ea4fe630cdac 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1472,6 +1472,7 @@ pnfs_update_layout(struct inode *ino,
+ 		goto out;
+ 
+ lookup_again:
++	nfs4_client_recover_expired_lease(clp);
+ 	first = false;
+ 	spin_lock(&ino->i_lock);
+ 	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 758012bfd5f0..4d8aa749d9b2 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -48,6 +48,8 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
+ 	}
+ 
+ 	for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
++		if (ovl_is_private_xattr(name))
++			continue;
+ retry:
+ 		size = vfs_getxattr(old, name, value, value_size);
+ 		if (size == -ERANGE)
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 97fd65700ae2..0bb8347c0d8b 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -221,8 +221,7 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+ 	return realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
+ }
+ 
+-
+-static bool ovl_is_private_xattr(const char *name)
++bool ovl_is_private_xattr(const char *name)
+ {
+ 	return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
+ }
+@@ -280,7 +279,8 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
+ 	struct path realpath;
+ 	enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+ 	ssize_t res;
+-	int off;
++	size_t len;
++	char *s;
+ 
+ 	res = vfs_listxattr(realpath.dentry, list, size);
+ 	if (res <= 0 || size == 0)
+@@ -290,17 +290,19 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
+ 		return res;
+ 
+ 	/* filter out private xattrs */
+-	for (off = 0; off < res;) {
+-		char *s = list + off;
+-		size_t slen = strlen(s) + 1;
++	for (s = list, len = res; len;) {
++		size_t slen = strnlen(s, len) + 1;
+ 
+-		BUG_ON(off + slen > res);
++		/* underlying fs providing us with an broken xattr list? */
++		if (WARN_ON(slen > len))
++			return -EIO;
+ 
++		len -= slen;
+ 		if (ovl_is_private_xattr(s)) {
+ 			res -= slen;
+-			memmove(s, s + slen, res - off);
++			memmove(s, s + slen, len);
+ 		} else {
+-			off += slen;
++			s += slen;
+ 		}
+ 	}
+ 
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 983540910ba8..32b077b07085 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -174,6 +174,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
+ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
+ int ovl_removexattr(struct dentry *dentry, const char *name);
+ struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags);
++bool ovl_is_private_xattr(const char *name);
+ 
+ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
+ 			    struct ovl_entry *oe);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 39266655d2bd..c9b740111526 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -658,6 +658,10 @@ retry:
+ 		struct kstat stat = {
+ 			.mode = S_IFDIR | 0,
+ 		};
++		struct iattr attr = {
++			.ia_valid = ATTR_MODE,
++			.ia_mode = stat.mode,
++		};
+ 
+ 		if (work->d_inode) {
+ 			err = -EEXIST;
+@@ -673,6 +677,21 @@ retry:
+ 		err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
+ 		if (err)
+ 			goto out_dput;
++
++		err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_DEFAULT);
++		if (err && err != -ENODATA)
++			goto out_dput;
++
++		err = vfs_removexattr(work, XATTR_NAME_POSIX_ACL_ACCESS);
++		if (err && err != -ENODATA)
++			goto out_dput;
++
++		/* Clear any inherited mode bits */
++		mutex_lock(&work->d_inode->i_mutex);
++		err = notify_change(work, &attr, NULL);
++		mutex_unlock(&work->d_inode->i_mutex);
++		if (err)
++			goto out_dput;
+ 	}
+ out_unlock:
+ 	mutex_unlock(&dir->i_mutex);
+diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
+index 7c2867b44141..167f80e8f08d 100644
+--- a/fs/sysfs/file.c
++++ b/fs/sysfs/file.c
+@@ -108,14 +108,22 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
+ {
+ 	const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
+ 	struct kobject *kobj = of->kn->parent->priv;
++	size_t len;
+ 
+ 	/*
+ 	 * If buf != of->prealloc_buf, we don't know how
+ 	 * large it is, so cannot safely pass it to ->show
+ 	 */
+-	if (pos || WARN_ON_ONCE(buf != of->prealloc_buf))
++	if (WARN_ON_ONCE(buf != of->prealloc_buf))
+ 		return 0;
+-	return ops->show(kobj, of->kn->priv, buf);
++	len = ops->show(kobj, of->kn->priv, buf);
++	if (pos) {
++		if (len <= pos)
++			return 0;
++		len -= pos;
++		memmove(buf, buf + pos, len);
++	}
++	return min(count, len);
+ }
+ 
+ /* kernfs write callback for regular sysfs files */
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index 535bd843f2f4..901f11b30174 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -539,7 +539,8 @@ xfs_sb_verify(
+ 	 * Only check the in progress field for the primary superblock as
+ 	 * mkfs.xfs doesn't clear it from secondary superblocks.
+ 	 */
+-	return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
++	return xfs_mount_validate_sb(mp, &sb,
++				     bp->b_maps[0].bm_bn == XFS_SB_DADDR,
+ 				     check_version);
+ }
+ 
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index 7dd64bf98c56..6e2b892b58ee 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -1537,7 +1537,7 @@ xfs_wait_buftarg(
+ 	 * ensure here that all reference counts have been dropped before we
+ 	 * start walking the LRU list.
+ 	 */
+-	drain_workqueue(btp->bt_mount->m_buf_workqueue);
++	flush_workqueue(btp->bt_mount->m_buf_workqueue);
+ 
+ 	/* loop until there is nothing left on the lru list. */
+ 	while (list_lru_count(&btp->bt_lru)) {
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index f0acff0f66c9..388fc6f78c6f 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -2042,6 +2042,20 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
+ 	mutex_unlock(&cpuset_mutex);
+ }
+ 
++/*
++ * Make sure the new task conform to the current state of its parent,
++ * which could have been changed by cpuset just after it inherits the
++ * state from the parent and before it sits on the cgroup's task list.
++ */
++void cpuset_fork(struct task_struct *task)
++{
++	if (task_css_is_root(task, cpuset_cgrp_id))
++		return;
++
++	set_cpus_allowed_ptr(task, &current->cpus_allowed);
++	task->mems_allowed = current->mems_allowed;
++}
++
+ struct cgroup_subsys cpuset_cgrp_subsys = {
+ 	.css_alloc	= cpuset_css_alloc,
+ 	.css_online	= cpuset_css_online,
+@@ -2051,6 +2065,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
+ 	.cancel_attach	= cpuset_cancel_attach,
+ 	.attach		= cpuset_attach,
+ 	.bind		= cpuset_bind,
++	.fork		= cpuset_fork,
+ 	.legacy_cftypes	= files,
+ 	.early_init	= 1,
+ };


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-08-22 23:29 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-08-22 23:29 UTC (permalink / raw
  To: gentoo-commits

commit:     d944130d7f18157ffd062622fe16340d4f456677
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Aug 22 23:28:53 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Aug 22 23:28:53 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d944130d

Linux patch 4.1.31

 0000_README             |    4 +
 1030_linux-4.1.31.patch | 6882 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6886 insertions(+)

diff --git a/0000_README b/0000_README
index f04d74d..0ce7ab6 100644
--- a/0000_README
+++ b/0000_README
@@ -163,6 +163,10 @@ Patch:  1029_linux-4.1.30.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.30
 
+Patch:  1030_linux-4.1.31.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.31
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1030_linux-4.1.31.patch b/1030_linux-4.1.31.patch
new file mode 100644
index 0000000..c3197a6
--- /dev/null
+++ b/1030_linux-4.1.31.patch
@@ -0,0 +1,6882 @@
+diff --git a/.mailmap b/.mailmap
+index 6287004040e7..d4b4748bab36 100644
+--- a/.mailmap
++++ b/.mailmap
+@@ -80,6 +80,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
+ Linas Vepstas <linas@austin.ibm.com>
+ Mark Brown <broonie@sirena.org.uk>
+ Matthieu CASTET <castet.matthieu@free.fr>
++Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com>
+ Mayuresh Janorkar <mayur@ti.com>
+ Michael Buesch <m@bues.ch>
+ Michel Dänzer <michel@tungstengraphics.com>
+diff --git a/CREDITS b/CREDITS
+index ec7e6c7fdd1b..a830a8622417 100644
+--- a/CREDITS
++++ b/CREDITS
+@@ -644,6 +644,7 @@ D: Configure, Menuconfig, xconfig
+ 
+ N: Mauro Carvalho Chehab
+ E: m.chehab@samsung.org
++E: mchehab@osg.samsung.com
+ E: mchehab@infradead.org
+ D: Media subsystem (V4L/DVB) drivers and core
+ D: EDAC drivers and EDAC 3.0 core rework
+diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
+index 6708c5e264aa..33e96f740639 100644
+--- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
++++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
+@@ -1,4 +1,4 @@
+-What		/sys/bus/iio/devices/iio:deviceX/in_proximity_raw
++What		/sys/bus/iio/devices/iio:deviceX/in_proximity_input
+ Date:		March 2014
+ KernelVersion:	3.15
+ Contact:	Matt Ranostay <mranostay@gmail.com>
+diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
+index c72702ec1ded..aacf1ce73b8a 100644
+--- a/Documentation/module-signing.txt
++++ b/Documentation/module-signing.txt
+@@ -239,3 +239,9 @@ Since the private key is used to sign modules, viruses and malware could use
+ the private key to sign modules and compromise the operating system.  The
+ private key must be either destroyed or moved to a secure location and not kept
+ in the root node of the kernel source tree.
++
++If you use the same private key to sign modules for multiple kernel
++configurations, you must ensure that the module version information is
++sufficient to prevent loading a module into a different kernel.  Either
++set CONFIG_MODVERSIONS=y or ensure that each configuration has a different
++kernel release string by changing EXTRAVERSION or CONFIG_LOCALVERSION.
+diff --git a/MAINTAINERS b/MAINTAINERS
+index a1d127a83a48..ecd0eb88f51a 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -1913,7 +1913,8 @@ F:	include/net/ax25.h
+ F:	net/ax25/
+ 
+ AZ6007 DVB DRIVER
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+ T:	git git://linuxtv.org/media_tree.git
+@@ -2318,7 +2319,8 @@ F:	Documentation/filesystems/btrfs.txt
+ F:	fs/btrfs/
+ 
+ BTTV VIDEO4LINUX DRIVER
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+ T:	git git://linuxtv.org/media_tree.git
+@@ -2882,7 +2884,8 @@ F:	drivers/media/common/cx2341x*
+ F:	include/media/cx2341x*
+ 
+ CX88 VIDEO4LINUX DRIVER
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+ T:	git git://linuxtv.org/media_tree.git
+@@ -3614,7 +3617,8 @@ F:	fs/ecryptfs/
+ EDAC-CORE
+ M:	Doug Thompson <dougthompson@xmission.com>
+ M:	Borislav Petkov <bp@alien8.de>
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-edac@vger.kernel.org
+ W:	bluesmoke.sourceforge.net
+ T:	git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git#for-next
+@@ -3665,7 +3669,8 @@ S:	Maintained
+ F:	drivers/edac/e7xxx_edac.c
+ 
+ EDAC-GHES
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-edac@vger.kernel.org
+ W:	bluesmoke.sourceforge.net
+ S:	Maintained
+@@ -3693,21 +3698,24 @@ S:	Maintained
+ F:	drivers/edac/i5000_edac.c
+ 
+ EDAC-I5400
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-edac@vger.kernel.org
+ W:	bluesmoke.sourceforge.net
+ S:	Maintained
+ F:	drivers/edac/i5400_edac.c
+ 
+ EDAC-I7300
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-edac@vger.kernel.org
+ W:	bluesmoke.sourceforge.net
+ S:	Maintained
+ F:	drivers/edac/i7300_edac.c
+ 
+ EDAC-I7CORE
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-edac@vger.kernel.org
+ W:	bluesmoke.sourceforge.net
+ S:	Maintained
+@@ -3750,7 +3758,8 @@ S:	Maintained
+ F:	drivers/edac/r82600_edac.c
+ 
+ EDAC-SBRIDGE
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-edac@vger.kernel.org
+ W:	bluesmoke.sourceforge.net
+ S:	Maintained
+@@ -3810,7 +3819,8 @@ S:	Maintained
+ F:	drivers/net/ethernet/ibm/ehea/
+ 
+ EM28XX VIDEO4LINUX DRIVER
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+ T:	git git://linuxtv.org/media_tree.git
+@@ -6345,7 +6355,8 @@ S:	Maintained
+ F:	drivers/media/radio/radio-maxiradio*
+ 
+ MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ P:	LinuxTV.org Project
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+@@ -8502,7 +8513,8 @@ S:	Odd Fixes
+ F:	drivers/media/i2c/saa6588*
+ 
+ SAA7134 VIDEO4LINUX DRIVER
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+ T:	git git://linuxtv.org/media_tree.git
+@@ -8947,7 +8959,8 @@ S:	Maintained
+ F:	drivers/media/radio/si4713/radio-usb-si4713.c
+ 
+ SIANO DVB DRIVER
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+ T:	git git://linuxtv.org/media_tree.git
+@@ -9695,7 +9708,8 @@ S:	Maintained
+ F:	drivers/media/i2c/tda9840*
+ 
+ TEA5761 TUNER DRIVER
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+ T:	git git://linuxtv.org/media_tree.git
+@@ -9703,7 +9717,8 @@ S:	Odd fixes
+ F:	drivers/media/tuners/tea5761.*
+ 
+ TEA5767 TUNER DRIVER
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+ T:	git git://linuxtv.org/media_tree.git
+@@ -10042,7 +10057,8 @@ F:	include/linux/shmem_fs.h
+ F:	mm/shmem.c
+ 
+ TM6000 VIDEO4LINUX DRIVER
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+ T:	git git://linuxtv.org/media_tree.git
+@@ -10900,7 +10916,8 @@ S:	Maintained
+ F:	arch/x86/vdso/
+ 
+ XC2028/3028 TUNER DRIVER
+-M:	Mauro Carvalho Chehab <mchehab@osg.samsung.com>
++M:	Mauro Carvalho Chehab <mchehab@s-opensource.com>
++M:	Mauro Carvalho Chehab <mchehab@kernel.org>
+ L:	linux-media@vger.kernel.org
+ W:	http://linuxtv.org
+ T:	git git://linuxtv.org/media_tree.git
+diff --git a/Makefile b/Makefile
+index 137679c0cc49..bea5ca2ca2b0 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 30
++SUBLEVEL = 31
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
+index 9615fe1701c6..09c549826c5f 100644
+--- a/arch/arc/include/asm/pgtable.h
++++ b/arch/arc/include/asm/pgtable.h
+@@ -96,7 +96,7 @@
+ #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
+ 
+ /* Set of bits not changed in pte_modify */
+-#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
++#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_SPECIAL)
+ 
+ /* More Abbrevaited helpers */
+ #define PAGE_U_NONE     __pgprot(___DEF)
+diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts
+index b67e5be618cf..28b6b0e46e63 100644
+--- a/arch/arm/boot/dts/sun4i-a10-a1000.dts
++++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts
+@@ -178,6 +178,7 @@
+ 		regulator-name = "emac-3v3";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
++		startup-delay-us = <20000>;
+ 		enable-active-high;
+ 		gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>;
+ 	};
+diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
+index d3f73ea25567..864acaa00c9e 100644
+--- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts
++++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts
+@@ -159,6 +159,7 @@
+ 		regulator-name = "emac-3v3";
+ 		regulator-min-microvolt = <3300000>;
+ 		regulator-max-microvolt = <3300000>;
++		startup-delay-us = <20000>;
+ 		enable-active-high;
+ 		gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>;
+ 	};
+diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
+index d4264bb0a409..7cc281d94769 100644
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -58,6 +58,7 @@
+ #define COMPAT_PSR_Z_BIT	0x40000000
+ #define COMPAT_PSR_N_BIT	0x80000000
+ #define COMPAT_PSR_IT_MASK	0x0600fc00	/* If-Then execution state mask */
++#define COMPAT_PSR_GE_MASK	0x000f0000
+ 
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ #define COMPAT_PSR_ENDSTATE	COMPAT_PSR_E_BIT
+@@ -116,6 +117,8 @@ struct pt_regs {
+ 	};
+ 	u64 orig_x0;
+ 	u64 syscallno;
++	u64 orig_addr_limit;
++	u64 unused;	// maintain 16 byte alignment
+ };
+ 
+ #define arch_has_single_step()	(1)
+@@ -151,35 +154,9 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
+ 	return regs->regs[0];
+ }
+ 
+-/*
+- * Are the current registers suitable for user mode? (used to maintain
+- * security in signal handlers)
+- */
+-static inline int valid_user_regs(struct user_pt_regs *regs)
+-{
+-	if (user_mode(regs) && (regs->pstate & PSR_I_BIT) == 0) {
+-		regs->pstate &= ~(PSR_F_BIT | PSR_A_BIT);
+-
+-		/* The T bit is reserved for AArch64 */
+-		if (!(regs->pstate & PSR_MODE32_BIT))
+-			regs->pstate &= ~COMPAT_PSR_T_BIT;
+-
+-		return 1;
+-	}
+-
+-	/*
+-	 * Force PSR to something logical...
+-	 */
+-	regs->pstate &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | \
+-			COMPAT_PSR_T_BIT | PSR_MODE32_BIT;
+-
+-	if (!(regs->pstate & PSR_MODE32_BIT)) {
+-		regs->pstate &= ~COMPAT_PSR_T_BIT;
+-		regs->pstate |= PSR_MODE_EL0t;
+-	}
+-
+-	return 0;
+-}
++/* We must avoid circular header include via sched.h */
++struct task_struct;
++int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
+ 
+ #define instruction_pointer(regs)	((unsigned long)(regs)->pc)
+ 
+diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
+index da675cc5dfae..4106ac64f95e 100644
+--- a/arch/arm64/kernel/asm-offsets.c
++++ b/arch/arm64/kernel/asm-offsets.c
+@@ -58,6 +58,7 @@ int main(void)
+   DEFINE(S_PC,			offsetof(struct pt_regs, pc));
+   DEFINE(S_ORIG_X0,		offsetof(struct pt_regs, orig_x0));
+   DEFINE(S_SYSCALLNO,		offsetof(struct pt_regs, syscallno));
++  DEFINE(S_ORIG_ADDR_LIMIT,	offsetof(struct pt_regs, orig_addr_limit));
+   DEFINE(S_FRAME_SIZE,		sizeof(struct pt_regs));
+   BLANK();
+   DEFINE(MM_CONTEXT_ID,		offsetof(struct mm_struct, context.id));
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
+index 0d1d675f2cce..00ced919fa5a 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -150,7 +150,6 @@ static int debug_monitors_init(void)
+ 	/* Clear the OS lock. */
+ 	on_each_cpu(clear_os_lock, NULL, 1);
+ 	isb();
+-	local_dbg_enable();
+ 
+ 	/* Register hotplug handler. */
+ 	__register_cpu_notifier(&os_lock_nb);
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index bddd04d031db..05012cdb555f 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -27,6 +27,7 @@
+ #include <asm/cpufeature.h>
+ #include <asm/errno.h>
+ #include <asm/esr.h>
++#include <asm/memory.h>
+ #include <asm/thread_info.h>
+ #include <asm/unistd.h>
+ 
+@@ -93,7 +94,13 @@
+ 	disable_step_tsk x19, x20		// exceptions when scheduling.
+ 	.else
+ 	add	x21, sp, #S_FRAME_SIZE
+-	.endif
++	get_thread_info tsk
++	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
++	ldr	x20, [tsk, #TI_ADDR_LIMIT]
++	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
++	mov	x20, #TASK_SIZE_64
++	str	x20, [tsk, #TI_ADDR_LIMIT]
++	.endif /* \el == 0 */
+ 	mrs	x22, elr_el1
+ 	mrs	x23, spsr_el1
+ 	stp	lr, x21, [sp, #S_LR]
+@@ -117,6 +124,12 @@
+ 	.endm
+ 
+ 	.macro	kernel_exit, el, ret = 0
++	.if	\el != 0
++	/* Restore the task's original addr_limit. */
++	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
++	str	x20, [tsk, #TI_ADDR_LIMIT]
++	.endif
++
+ 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
+ 	.if	\el == 0
+ 	ct_user_enter
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 608ac6aa497b..7038b9a3b42c 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -39,6 +39,7 @@
+ #include <linux/elf.h>
+ 
+ #include <asm/compat.h>
++#include <asm/cpufeature.h>
+ #include <asm/debug-monitors.h>
+ #include <asm/pgtable.h>
+ #include <asm/syscall.h>
+@@ -500,7 +501,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (!valid_user_regs(&newregs))
++	if (!valid_user_regs(&newregs, target))
+ 		return -EINVAL;
+ 
+ 	task_pt_regs(target)->user_regs = newregs;
+@@ -770,7 +771,7 @@ static int compat_gpr_set(struct task_struct *target,
+ 
+ 	}
+ 
+-	if (valid_user_regs(&newregs.user_regs))
++	if (valid_user_regs(&newregs.user_regs, target))
+ 		*task_pt_regs(target) = newregs;
+ 	else
+ 		ret = -EINVAL;
+@@ -1182,3 +1183,79 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+ 	if (test_thread_flag(TIF_SYSCALL_TRACE))
+ 		tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
+ }
++
++/*
++ * Bits which are always architecturally RES0 per ARM DDI 0487A.h
++ * Userspace cannot use these until they have an architectural meaning.
++ * We also reserve IL for the kernel; SS is handled dynamically.
++ */
++#define SPSR_EL1_AARCH64_RES0_BITS \
++	(GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
++	 GENMASK_ULL(5, 5))
++#define SPSR_EL1_AARCH32_RES0_BITS \
++	(GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
++
++static int valid_compat_regs(struct user_pt_regs *regs)
++{
++	regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
++
++	if (!system_supports_mixed_endian_el0()) {
++		if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
++			regs->pstate |= COMPAT_PSR_E_BIT;
++		else
++			regs->pstate &= ~COMPAT_PSR_E_BIT;
++	}
++
++	if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
++	    (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
++	    (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
++	    (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
++		return 1;
++	}
++
++	/*
++	 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
++	 * arch/arm.
++	 */
++	regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
++			COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
++			COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
++			COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
++			COMPAT_PSR_T_BIT;
++	regs->pstate |= PSR_MODE32_BIT;
++
++	return 0;
++}
++
++static int valid_native_regs(struct user_pt_regs *regs)
++{
++	regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
++
++	if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
++	    (regs->pstate & PSR_D_BIT) == 0 &&
++	    (regs->pstate & PSR_A_BIT) == 0 &&
++	    (regs->pstate & PSR_I_BIT) == 0 &&
++	    (regs->pstate & PSR_F_BIT) == 0) {
++		return 1;
++	}
++
++	/* Force PSR to a valid 64-bit EL0t */
++	regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
++
++	return 0;
++}
++
++/*
++ * Are the current registers suitable for user mode? (used to maintain
++ * security in signal handlers)
++ */
++int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
++{
++	if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
++		regs->pstate &= ~DBG_SPSR_SS;
++
++	if (is_compat_thread(task_thread_info(task)))
++		return valid_compat_regs(regs);
++	else
++		return valid_native_regs(regs);
++}
+diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
+index e18c48cb6db1..a8eafdbc7cb8 100644
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -115,7 +115,7 @@ static int restore_sigframe(struct pt_regs *regs,
+ 	 */
+ 	regs->syscallno = ~0UL;
+ 
+-	err |= !valid_user_regs(&regs->user_regs);
++	err |= !valid_user_regs(&regs->user_regs, current);
+ 
+ 	if (err == 0) {
+ 		struct fpsimd_context *fpsimd_ctx =
+@@ -307,7 +307,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ 	/*
+ 	 * Check that the resulting registers are actually sane.
+ 	 */
+-	ret |= !valid_user_regs(&regs->user_regs);
++	ret |= !valid_user_regs(&regs->user_regs, current);
+ 
+ 	/*
+ 	 * Fast forward the stepping logic so we step into the signal
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index c58aee062590..bb3ba4aab30a 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -356,7 +356,7 @@ static int compat_restore_sigframe(struct pt_regs *regs,
+ 	 */
+ 	regs->syscallno = ~0UL;
+ 
+-	err |= !valid_user_regs(&regs->user_regs);
++	err |= !valid_user_regs(&regs->user_regs, current);
+ 
+ 	aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
+ 	if (err == 0)
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index d3a202b85ba6..edf73a6e16a8 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -179,7 +179,6 @@ asmlinkage void secondary_start_kernel(void)
+ 	set_cpu_online(cpu, true);
+ 	complete(&cpu_running);
+ 
+-	local_dbg_enable();
+ 	local_irq_enable();
+ 	local_async_enable();
+ 
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index d253908a988d..262c8ec55790 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -205,6 +205,8 @@ ENTRY(__cpu_setup)
+ 	msr	cpacr_el1, x0			// Enable FP/ASIMD
+ 	mov	x0, #1 << 12			// Reset mdscr_el1 and disable
+ 	msr	mdscr_el1, x0			// access to the DCC from EL0
++	isb					// Unmask debug exceptions now,
++	enable_dbg				// since this is per-cpu
+ 	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
+ 	/*
+ 	 * Memory region attributes for LPAE:
+diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h
+index 0154e2807ebb..2369ad394876 100644
+--- a/arch/metag/include/asm/cmpxchg_lnkget.h
++++ b/arch/metag/include/asm/cmpxchg_lnkget.h
+@@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
+ 		      "	DCACHE	[%2], %0\n"
+ #endif
+ 		      "2:\n"
+-		      : "=&d" (temp), "=&da" (retval)
++		      : "=&d" (temp), "=&d" (retval)
+ 		      : "da" (m), "bd" (old), "da" (new)
+ 		      : "cc"
+ 		      );
+diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
+index e5ed7ada1433..3a317e1ede71 100644
+--- a/arch/mips/kernel/csrc-r4k.c
++++ b/arch/mips/kernel/csrc-r4k.c
+@@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = {
+ 	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+ 
+-static u64 notrace r4k_read_sched_clock(void)
++static u64 __maybe_unused notrace r4k_read_sched_clock(void)
+ {
+ 	return read_c0_count();
+ }
+@@ -38,7 +38,9 @@ int __init init_r4k_clocksource(void)
+ 
+ 	clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+ 
++#ifndef CONFIG_CPU_FREQ
+ 	sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
++#endif
+ 
+ 	return 0;
+ }
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index 4b2010654c46..97fa4c7b9a5e 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -353,7 +353,7 @@ EXPORT(sysn32_call_table)
+ 	PTR	sys_ni_syscall			/* available, was setaltroot */
+ 	PTR	sys_add_key
+ 	PTR	sys_request_key
+-	PTR	sys_keyctl			/* 6245 */
++	PTR	compat_sys_keyctl		/* 6245 */
+ 	PTR	sys_set_thread_area
+ 	PTR	sys_inotify_init
+ 	PTR	sys_inotify_add_watch
+diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
+index d07b210fbeff..80e39776e377 100644
+--- a/arch/mips/kernel/scall64-o32.S
++++ b/arch/mips/kernel/scall64-o32.S
+@@ -492,7 +492,7 @@ EXPORT(sys32_call_table)
+ 	PTR	sys_ni_syscall			/* available, was setaltroot */
+ 	PTR	sys_add_key			/* 4280 */
+ 	PTR	sys_request_key
+-	PTR	sys_keyctl
++	PTR	compat_sys_keyctl
+ 	PTR	sys_set_thread_area
+ 	PTR	sys_inotify_init
+ 	PTR	sys_inotify_add_watch		/* 4285 */
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index dc10c77b7500..d6476d11212e 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -1629,8 +1629,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
+ 
+ 	preempt_disable();
+ 	if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+-		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
+-			kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
++		if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
++		    kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
++			kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
++				__func__, va, vcpu, read_c0_entryhi());
++			er = EMULATE_FAIL;
++			preempt_enable();
++			goto done;
++		}
+ 	} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+ 		   KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+ 		int index;
+@@ -1665,14 +1671,19 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
+ 								run, vcpu);
+ 				preempt_enable();
+ 				goto dont_update_pc;
+-			} else {
+-				/*
+-				 * We fault an entry from the guest tlb to the
+-				 * shadow host TLB
+-				 */
+-				kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+-								     NULL,
+-								     NULL);
++			}
++			/*
++			 * We fault an entry from the guest tlb to the
++			 * shadow host TLB
++			 */
++			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
++								 NULL, NULL)) {
++				kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
++					__func__, va, index, vcpu,
++					read_c0_entryhi());
++				er = EMULATE_FAIL;
++				preempt_enable();
++				goto done;
+ 			}
+ 		}
+ 	} else {
+@@ -2633,8 +2644,13 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+ 			 * OK we have a Guest TLB entry, now inject it into the
+ 			 * shadow host TLB
+ 			 */
+-			kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+-							     NULL);
++			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
++								 NULL, NULL)) {
++				kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
++					__func__, va, index, vcpu,
++					read_c0_entryhi());
++				er = EMULATE_FAIL;
++			}
+ 		}
+ 	}
+ 
+diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
+index aed0ac2a4972..7a7ed9ca01bb 100644
+--- a/arch/mips/kvm/tlb.c
++++ b/arch/mips/kvm/tlb.c
+@@ -276,7 +276,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+ 	}
+ 
+ 	gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+-	if (gfn >= kvm->arch.guest_pmap_npages) {
++	if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
+ 		kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+ 			gfn, badvaddr);
+ 		kvm_mips_dump_host_tlbs();
+@@ -361,25 +361,39 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ 	unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ 	struct kvm *kvm = vcpu->kvm;
+ 	pfn_t pfn0, pfn1;
+-
+-	if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+-		pfn0 = 0;
+-		pfn1 = 0;
+-	} else {
+-		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+-					   >> PAGE_SHIFT) < 0)
+-			return -1;
+-
+-		if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+-					   >> PAGE_SHIFT) < 0)
+-			return -1;
+-
+-		pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+-					    >> PAGE_SHIFT];
+-		pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+-					    >> PAGE_SHIFT];
++	gfn_t gfn0, gfn1;
++	long tlb_lo[2];
++
++	tlb_lo[0] = tlb->tlb_lo0;
++	tlb_lo[1] = tlb->tlb_lo1;
++
++	/*
++	 * The commpage address must not be mapped to anything else if the guest
++	 * TLB contains entries nearby, or commpage accesses will break.
++	 */
++	if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) &
++			VPN2_MASK & (PAGE_MASK << 1)))
++		tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0;
++
++	gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT;
++	gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT;
++	if (gfn0 >= kvm->arch.guest_pmap_npages ||
++	    gfn1 >= kvm->arch.guest_pmap_npages) {
++		kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n",
++			__func__, gfn0, gfn1, tlb->tlb_hi);
++		kvm_mips_dump_guest_tlbs(vcpu);
++		return -1;
+ 	}
+ 
++	if (kvm_mips_map_page(kvm, gfn0) < 0)
++		return -1;
++
++	if (kvm_mips_map_page(kvm, gfn1) < 0)
++		return -1;
++
++	pfn0 = kvm->arch.guest_pmap[gfn0];
++	pfn1 = kvm->arch.guest_pmap[gfn1];
++
+ 	if (hpa0)
+ 		*hpa0 = pfn0 << PAGE_SHIFT;
+ 
+@@ -391,9 +405,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ 					       kvm_mips_get_kernel_asid(vcpu) :
+ 					       kvm_mips_get_user_asid(vcpu));
+ 	entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+-		   (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
++		   (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V);
+ 	entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+-		   (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
++		   (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V);
+ 
+ 	kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+ 		  tlb->tlb_lo0, tlb->tlb_lo1);
+@@ -794,10 +808,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+ 				local_irq_restore(flags);
+ 				return KVM_INVALID_INST;
+ 			}
+-			kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+-							     &vcpu->arch.
+-							     guest_tlb[index],
+-							     NULL, NULL);
++			if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
++						&vcpu->arch.guest_tlb[index],
++						NULL, NULL)) {
++				kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n",
++					__func__, opc, index, vcpu,
++					read_c0_entryhi());
++				kvm_mips_dump_guest_tlbs(vcpu);
++				local_irq_restore(flags);
++				return KVM_INVALID_INST;
++			}
+ 			inst = *(opc);
+ 		}
+ 		local_irq_restore(flags);
+diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
+index b4a837893562..5abe51cad899 100644
+--- a/arch/mips/mm/uasm-mips.c
++++ b/arch/mips/mm/uasm-mips.c
+@@ -65,7 +65,7 @@ static struct insn insn_table[] = {
+ #ifndef CONFIG_CPU_MIPSR6
+ 	{ insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+ #else
+-	{ insn_cache,  M6(cache_op, 0, 0, 0, cache6_op),  RS | RT | SIMM9 },
++	{ insn_cache,  M6(spec3_op, 0, 0, 0, cache6_op),  RS | RT | SIMM9 },
+ #endif
+ 	{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ 	{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
+diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
+index 5754b226da7e..521aef95e6b7 100644
+--- a/arch/powerpc/kernel/tm.S
++++ b/arch/powerpc/kernel/tm.S
+@@ -110,17 +110,11 @@ _GLOBAL(tm_reclaim)
+ 	std	r3, STK_PARAM(R3)(r1)
+ 	SAVE_NVGPRS(r1)
+ 
+-	/* We need to setup MSR for VSX register save instructions.  Here we
+-	 * also clear the MSR RI since when we do the treclaim, we won't have a
+-	 * valid kernel pointer for a while.  We clear RI here as it avoids
+-	 * adding another mtmsr closer to the treclaim.  This makes the region
+-	 * maked as non-recoverable wider than it needs to be but it saves on
+-	 * inserting another mtmsrd later.
+-	 */
++	/* We need to setup MSR for VSX register save instructions. */
+ 	mfmsr	r14
+ 	mr	r15, r14
+ 	ori	r15, r15, MSR_FP
+-	li	r16, MSR_RI
++	li	r16, 0
+ 	ori	r16, r16, MSR_EE /* IRQs hard off */
+ 	andc	r15, r15, r16
+ 	oris	r15, r15, MSR_VEC@h
+@@ -176,7 +170,17 @@ dont_backup_fp:
+ 1:	tdeqi   r6, 0
+ 	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
+ 
+-	/* The moment we treclaim, ALL of our GPRs will switch
++	/* Clear MSR RI since we are about to change r1, EE is already off. */
++	li	r4, 0
++	mtmsrd	r4, 1
++
++	/*
++	 * BE CAREFUL HERE:
++	 * At this point we can't take an SLB miss since we have MSR_RI
++	 * off. Load only to/from the stack/paca which are in SLB bolted regions
++	 * until we turn MSR RI back on.
++	 *
++	 * The moment we treclaim, ALL of our GPRs will switch
+ 	 * to user register state.  (FPRs, CCR etc. also!)
+ 	 * Use an sprg and a tm_scratch in the PACA to shuffle.
+ 	 */
+@@ -197,6 +201,11 @@ dont_backup_fp:
+ 
+ 	/* Store the PPR in r11 and reset to decent value */
+ 	std	r11, GPR11(r1)			/* Temporary stash */
++
++	/* Reset MSR RI so we can take SLB faults again */
++	li	r11, MSR_RI
++	mtmsrd	r11, 1
++
+ 	mfspr	r11, SPRN_PPR
+ 	HMT_MEDIUM
+ 
+@@ -329,8 +338,6 @@ _GLOBAL(__tm_recheckpoint)
+ 	 */
+ 	subi	r7, r7, STACK_FRAME_OVERHEAD
+ 
+-	SET_SCRATCH0(r1)
+-
+ 	mfmsr	r6
+ 	/* R4 = original MSR to indicate whether thread used FP/Vector etc. */
+ 
+@@ -397,11 +404,6 @@ restore_gprs:
+ 	ld	r5, THREAD_TM_DSCR(r3)
+ 	ld	r6, THREAD_TM_PPR(r3)
+ 
+-	/* Clear the MSR RI since we are about to change R1.  EE is already off
+-	 */
+-	li	r4, 0
+-	mtmsrd	r4, 1
+-
+ 	REST_GPR(0, r7)				/* GPR0 */
+ 	REST_2GPRS(2, r7)			/* GPR2-3 */
+ 	REST_GPR(4, r7)				/* GPR4 */
+@@ -439,10 +441,34 @@ restore_gprs:
+ 	ld	r6, _CCR(r7)
+ 	mtcr    r6
+ 
+-	REST_GPR(1, r7)				/* GPR1 */
+-	REST_GPR(5, r7)				/* GPR5-7 */
+ 	REST_GPR(6, r7)
+-	ld	r7, GPR7(r7)
++
++	/*
++	 * Store r1 and r5 on the stack so that we can access them
++	 * after we clear MSR RI.
++	 */
++
++	REST_GPR(5, r7)
++	std	r5, -8(r1)
++	ld	r5, GPR1(r7)
++	std	r5, -16(r1)
++
++	REST_GPR(7, r7)
++
++	/* Clear MSR RI since we are about to change r1. EE is already off */
++	li	r5, 0
++	mtmsrd	r5, 1
++
++	/*
++	 * BE CAREFUL HERE:
++	 * At this point we can't take an SLB miss since we have MSR_RI
++	 * off. Load only to/from the stack/paca which are in SLB bolted regions
++	 * until we turn MSR RI back on.
++	 */
++
++	SET_SCRATCH0(r1)
++	ld	r5, -8(r1)
++	ld	r1, -16(r1)
+ 
+ 	/* Commit register state as checkpointed state: */
+ 	TRECHKPT
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index a34e43eec658..11d82b91aa4f 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -570,112 +570,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
+-	b	skip_tm
+-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+-
+-	/* Turn on TM/FP/VSX/VMX so we can restore them. */
+-	mfmsr	r5
+-	li	r6, MSR_TM >> 32
+-	sldi	r6, r6, 32
+-	or	r5, r5, r6
+-	ori	r5, r5, MSR_FP
+-	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
+-	mtmsrd	r5
+-
+-	/*
+-	 * The user may change these outside of a transaction, so they must
+-	 * always be context switched.
+-	 */
+-	ld	r5, VCPU_TFHAR(r4)
+-	ld	r6, VCPU_TFIAR(r4)
+-	ld	r7, VCPU_TEXASR(r4)
+-	mtspr	SPRN_TFHAR, r5
+-	mtspr	SPRN_TFIAR, r6
+-	mtspr	SPRN_TEXASR, r7
+-
+-	ld	r5, VCPU_MSR(r4)
+-	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+-	beq	skip_tm	/* TM not active in guest */
+-
+-	/* Make sure the failure summary is set, otherwise we'll program check
+-	 * when we trechkpt.  It's possible that this might have been not set
+-	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
+-	 * host.
+-	 */
+-	oris	r7, r7, (TEXASR_FS)@h
+-	mtspr	SPRN_TEXASR, r7
+-
+-	/*
+-	 * We need to load up the checkpointed state for the guest.
+-	 * We need to do this early as it will blow away any GPRs, VSRs and
+-	 * some SPRs.
+-	 */
+-
+-	mr	r31, r4
+-	addi	r3, r31, VCPU_FPRS_TM
+-	bl	load_fp_state
+-	addi	r3, r31, VCPU_VRS_TM
+-	bl	load_vr_state
+-	mr	r4, r31
+-	lwz	r7, VCPU_VRSAVE_TM(r4)
+-	mtspr	SPRN_VRSAVE, r7
+-
+-	ld	r5, VCPU_LR_TM(r4)
+-	lwz	r6, VCPU_CR_TM(r4)
+-	ld	r7, VCPU_CTR_TM(r4)
+-	ld	r8, VCPU_AMR_TM(r4)
+-	ld	r9, VCPU_TAR_TM(r4)
+-	mtlr	r5
+-	mtcr	r6
+-	mtctr	r7
+-	mtspr	SPRN_AMR, r8
+-	mtspr	SPRN_TAR, r9
+-
+-	/*
+-	 * Load up PPR and DSCR values but don't put them in the actual SPRs
+-	 * till the last moment to avoid running with userspace PPR and DSCR for
+-	 * too long.
+-	 */
+-	ld	r29, VCPU_DSCR_TM(r4)
+-	ld	r30, VCPU_PPR_TM(r4)
+-
+-	std	r2, PACATMSCRATCH(r13) /* Save TOC */
+-
+-	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
+-	li	r5, 0
+-	mtmsrd	r5, 1
+-
+-	/* Load GPRs r0-r28 */
+-	reg = 0
+-	.rept	29
+-	ld	reg, VCPU_GPRS_TM(reg)(r31)
+-	reg = reg + 1
+-	.endr
+-
+-	mtspr	SPRN_DSCR, r29
+-	mtspr	SPRN_PPR, r30
+-
+-	/* Load final GPRs */
+-	ld	29, VCPU_GPRS_TM(29)(r31)
+-	ld	30, VCPU_GPRS_TM(30)(r31)
+-	ld	31, VCPU_GPRS_TM(31)(r31)
+-
+-	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
+-	TRECHKPT
+-
+-	/* Now let's get back the state we need. */
+-	HMT_MEDIUM
+-	GET_PACA(r13)
+-	ld	r29, HSTATE_DSCR(r13)
+-	mtspr	SPRN_DSCR, r29
+-	ld	r4, HSTATE_KVM_VCPU(r13)
+-	ld	r1, HSTATE_HOST_R1(r13)
+-	ld	r2, PACATMSCRATCH(r13)
+-
+-	/* Set the MSR RI since we have our registers back. */
+-	li	r5, MSR_RI
+-	mtmsrd	r5, 1
+-skip_tm:
++	bl	kvmppc_restore_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+ 
+ 	/* Load guest PMU registers */
+@@ -756,12 +652,6 @@ BEGIN_FTR_SECTION
+ 	/* Skip next section on POWER7 */
+ 	b	8f
+ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+-	/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
+-	mfmsr	r8
+-	li	r0, 1
+-	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+-	mtmsrd	r8
+-
+ 	/* Load up POWER8-specific registers */
+ 	ld	r5, VCPU_IAMR(r4)
+ 	lwz	r6, VCPU_PSPB(r4)
+@@ -1339,106 +1229,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ 
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ BEGIN_FTR_SECTION
+-	b	2f
+-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+-	/* Turn on TM. */
+-	mfmsr	r8
+-	li	r0, 1
+-	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+-	mtmsrd	r8
+-
+-	ld	r5, VCPU_MSR(r9)
+-	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+-	beq	1f	/* TM not active in guest. */
+-
+-	li	r3, TM_CAUSE_KVM_RESCHED
+-
+-	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
+-	li	r5, 0
+-	mtmsrd	r5, 1
+-
+-	/* All GPRs are volatile at this point. */
+-	TRECLAIM(R3)
+-
+-	/* Temporarily store r13 and r9 so we have some regs to play with */
+-	SET_SCRATCH0(r13)
+-	GET_PACA(r13)
+-	std	r9, PACATMSCRATCH(r13)
+-	ld	r9, HSTATE_KVM_VCPU(r13)
+-
+-	/* Get a few more GPRs free. */
+-	std	r29, VCPU_GPRS_TM(29)(r9)
+-	std	r30, VCPU_GPRS_TM(30)(r9)
+-	std	r31, VCPU_GPRS_TM(31)(r9)
+-
+-	/* Save away PPR and DSCR soon so don't run with user values. */
+-	mfspr	r31, SPRN_PPR
+-	HMT_MEDIUM
+-	mfspr	r30, SPRN_DSCR
+-	ld	r29, HSTATE_DSCR(r13)
+-	mtspr	SPRN_DSCR, r29
+-
+-	/* Save all but r9, r13 & r29-r31 */
+-	reg = 0
+-	.rept	29
+-	.if (reg != 9) && (reg != 13)
+-	std	reg, VCPU_GPRS_TM(reg)(r9)
+-	.endif
+-	reg = reg + 1
+-	.endr
+-	/* ... now save r13 */
+-	GET_SCRATCH0(r4)
+-	std	r4, VCPU_GPRS_TM(13)(r9)
+-	/* ... and save r9 */
+-	ld	r4, PACATMSCRATCH(r13)
+-	std	r4, VCPU_GPRS_TM(9)(r9)
+-
+-	/* Reload stack pointer and TOC. */
+-	ld	r1, HSTATE_HOST_R1(r13)
+-	ld	r2, PACATOC(r13)
+-
+-	/* Set MSR RI now we have r1 and r13 back. */
+-	li	r5, MSR_RI
+-	mtmsrd	r5, 1
+-
+-	/* Save away checkpinted SPRs. */
+-	std	r31, VCPU_PPR_TM(r9)
+-	std	r30, VCPU_DSCR_TM(r9)
+-	mflr	r5
+-	mfcr	r6
+-	mfctr	r7
+-	mfspr	r8, SPRN_AMR
+-	mfspr	r10, SPRN_TAR
+-	std	r5, VCPU_LR_TM(r9)
+-	stw	r6, VCPU_CR_TM(r9)
+-	std	r7, VCPU_CTR_TM(r9)
+-	std	r8, VCPU_AMR_TM(r9)
+-	std	r10, VCPU_TAR_TM(r9)
+-
+-	/* Restore r12 as trap number. */
+-	lwz	r12, VCPU_TRAP(r9)
+-
+-	/* Save FP/VSX. */
+-	addi	r3, r9, VCPU_FPRS_TM
+-	bl	store_fp_state
+-	addi	r3, r9, VCPU_VRS_TM
+-	bl	store_vr_state
+-	mfspr	r6, SPRN_VRSAVE
+-	stw	r6, VCPU_VRSAVE_TM(r9)
+-1:
+-	/*
+-	 * We need to save these SPRs after the treclaim so that the software
+-	 * error code is recorded correctly in the TEXASR.  Also the user may
+-	 * change these outside of a transaction, so they must always be
+-	 * context switched.
+-	 */
+-	mfspr	r5, SPRN_TFHAR
+-	mfspr	r6, SPRN_TFIAR
+-	mfspr	r7, SPRN_TEXASR
+-	std	r5, VCPU_TFHAR(r9)
+-	std	r6, VCPU_TFIAR(r9)
+-	std	r7, VCPU_TEXASR(r9)
+-2:
++	bl	kvmppc_save_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ #endif
+ 
+ 	/* Increment yield count if they have a VPA */
+@@ -2139,6 +1931,13 @@ _GLOBAL(kvmppc_h_cede)		/* r3 = vcpu pointer, r11 = msr, r13 = paca */
+ 	/* save FP state */
+ 	bl	kvmppc_save_fp
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++BEGIN_FTR_SECTION
++	ld	r9, HSTATE_KVM_VCPU(r13)
++	bl	kvmppc_save_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
++#endif
++
+ 	/*
+ 	 * Set DEC to the smaller of DEC and HDEC, so that we wake
+ 	 * no later than the end of our timeslice (HDEC interrupts
+@@ -2215,6 +2014,12 @@ kvm_end_cede:
+ 	bl	kvmhv_accumulate_time
+ #endif
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++BEGIN_FTR_SECTION
++	bl	kvmppc_restore_tm
++END_FTR_SECTION_IFSET(CPU_FTR_TM)
++#endif
++
+ 	/* load up FP state */
+ 	bl	kvmppc_load_fp
+ 
+@@ -2514,6 +2319,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ 	mr	r4,r31
+ 	blr
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++/*
++ * Save transactional state and TM-related registers.
++ * Called with r9 pointing to the vcpu struct.
++ * This can modify all checkpointed registers, but
++ * restores r1, r2 and r9 (vcpu pointer) before exit.
++ */
++kvmppc_save_tm:
++	mflr	r0
++	std	r0, PPC_LR_STKOFF(r1)
++
++	/* Turn on TM. */
++	mfmsr	r8
++	li	r0, 1
++	rldimi	r8, r0, MSR_TM_LG, 63-MSR_TM_LG
++	mtmsrd	r8
++
++	ld	r5, VCPU_MSR(r9)
++	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++	beq	1f	/* TM not active in guest. */
++
++	std	r1, HSTATE_HOST_R1(r13)
++	li	r3, TM_CAUSE_KVM_RESCHED
++
++	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
++	li	r5, 0
++	mtmsrd	r5, 1
++
++	/* All GPRs are volatile at this point. */
++	TRECLAIM(R3)
++
++	/* Temporarily store r13 and r9 so we have some regs to play with */
++	SET_SCRATCH0(r13)
++	GET_PACA(r13)
++	std	r9, PACATMSCRATCH(r13)
++	ld	r9, HSTATE_KVM_VCPU(r13)
++
++	/* Get a few more GPRs free. */
++	std	r29, VCPU_GPRS_TM(29)(r9)
++	std	r30, VCPU_GPRS_TM(30)(r9)
++	std	r31, VCPU_GPRS_TM(31)(r9)
++
++	/* Save away PPR and DSCR soon so don't run with user values. */
++	mfspr	r31, SPRN_PPR
++	HMT_MEDIUM
++	mfspr	r30, SPRN_DSCR
++	ld	r29, HSTATE_DSCR(r13)
++	mtspr	SPRN_DSCR, r29
++
++	/* Save all but r9, r13 & r29-r31 */
++	reg = 0
++	.rept	29
++	.if (reg != 9) && (reg != 13)
++	std	reg, VCPU_GPRS_TM(reg)(r9)
++	.endif
++	reg = reg + 1
++	.endr
++	/* ... now save r13 */
++	GET_SCRATCH0(r4)
++	std	r4, VCPU_GPRS_TM(13)(r9)
++	/* ... and save r9 */
++	ld	r4, PACATMSCRATCH(r13)
++	std	r4, VCPU_GPRS_TM(9)(r9)
++
++	/* Reload stack pointer and TOC. */
++	ld	r1, HSTATE_HOST_R1(r13)
++	ld	r2, PACATOC(r13)
++
++	/* Set MSR RI now we have r1 and r13 back. */
++	li	r5, MSR_RI
++	mtmsrd	r5, 1
++
++	/* Save away checkpinted SPRs. */
++	std	r31, VCPU_PPR_TM(r9)
++	std	r30, VCPU_DSCR_TM(r9)
++	mflr	r5
++	mfcr	r6
++	mfctr	r7
++	mfspr	r8, SPRN_AMR
++	mfspr	r10, SPRN_TAR
++	std	r5, VCPU_LR_TM(r9)
++	stw	r6, VCPU_CR_TM(r9)
++	std	r7, VCPU_CTR_TM(r9)
++	std	r8, VCPU_AMR_TM(r9)
++	std	r10, VCPU_TAR_TM(r9)
++
++	/* Restore r12 as trap number. */
++	lwz	r12, VCPU_TRAP(r9)
++
++	/* Save FP/VSX. */
++	addi	r3, r9, VCPU_FPRS_TM
++	bl	store_fp_state
++	addi	r3, r9, VCPU_VRS_TM
++	bl	store_vr_state
++	mfspr	r6, SPRN_VRSAVE
++	stw	r6, VCPU_VRSAVE_TM(r9)
++1:
++	/*
++	 * We need to save these SPRs after the treclaim so that the software
++	 * error code is recorded correctly in the TEXASR.  Also the user may
++	 * change these outside of a transaction, so they must always be
++	 * context switched.
++	 */
++	mfspr	r5, SPRN_TFHAR
++	mfspr	r6, SPRN_TFIAR
++	mfspr	r7, SPRN_TEXASR
++	std	r5, VCPU_TFHAR(r9)
++	std	r6, VCPU_TFIAR(r9)
++	std	r7, VCPU_TEXASR(r9)
++
++	ld	r0, PPC_LR_STKOFF(r1)
++	mtlr	r0
++	blr
++
++/*
++ * Restore transactional state and TM-related registers.
++ * Called with r4 pointing to the vcpu struct.
++ * This potentially modifies all checkpointed registers.
++ * It restores r1, r2, r4 from the PACA.
++ */
++kvmppc_restore_tm:
++	mflr	r0
++	std	r0, PPC_LR_STKOFF(r1)
++
++	/* Turn on TM/FP/VSX/VMX so we can restore them. */
++	mfmsr	r5
++	li	r6, MSR_TM >> 32
++	sldi	r6, r6, 32
++	or	r5, r5, r6
++	ori	r5, r5, MSR_FP
++	oris	r5, r5, (MSR_VEC | MSR_VSX)@h
++	mtmsrd	r5
++
++	/*
++	 * The user may change these outside of a transaction, so they must
++	 * always be context switched.
++	 */
++	ld	r5, VCPU_TFHAR(r4)
++	ld	r6, VCPU_TFIAR(r4)
++	ld	r7, VCPU_TEXASR(r4)
++	mtspr	SPRN_TFHAR, r5
++	mtspr	SPRN_TFIAR, r6
++	mtspr	SPRN_TEXASR, r7
++
++	ld	r5, VCPU_MSR(r4)
++	rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++	beqlr		/* TM not active in guest */
++	std	r1, HSTATE_HOST_R1(r13)
++
++	/* Make sure the failure summary is set, otherwise we'll program check
++	 * when we trechkpt.  It's possible that this might have been not set
++	 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
++	 * host.
++	 */
++	oris	r7, r7, (TEXASR_FS)@h
++	mtspr	SPRN_TEXASR, r7
++
++	/*
++	 * We need to load up the checkpointed state for the guest.
++	 * We need to do this early as it will blow away any GPRs, VSRs and
++	 * some SPRs.
++	 */
++
++	mr	r31, r4
++	addi	r3, r31, VCPU_FPRS_TM
++	bl	load_fp_state
++	addi	r3, r31, VCPU_VRS_TM
++	bl	load_vr_state
++	mr	r4, r31
++	lwz	r7, VCPU_VRSAVE_TM(r4)
++	mtspr	SPRN_VRSAVE, r7
++
++	ld	r5, VCPU_LR_TM(r4)
++	lwz	r6, VCPU_CR_TM(r4)
++	ld	r7, VCPU_CTR_TM(r4)
++	ld	r8, VCPU_AMR_TM(r4)
++	ld	r9, VCPU_TAR_TM(r4)
++	mtlr	r5
++	mtcr	r6
++	mtctr	r7
++	mtspr	SPRN_AMR, r8
++	mtspr	SPRN_TAR, r9
++
++	/*
++	 * Load up PPR and DSCR values but don't put them in the actual SPRs
++	 * till the last moment to avoid running with userspace PPR and DSCR for
++	 * too long.
++	 */
++	ld	r29, VCPU_DSCR_TM(r4)
++	ld	r30, VCPU_PPR_TM(r4)
++
++	std	r2, PACATMSCRATCH(r13) /* Save TOC */
++
++	/* Clear the MSR RI since r1, r13 are all going to be foobar. */
++	li	r5, 0
++	mtmsrd	r5, 1
++
++	/* Load GPRs r0-r28 */
++	reg = 0
++	.rept	29
++	ld	reg, VCPU_GPRS_TM(reg)(r31)
++	reg = reg + 1
++	.endr
++
++	mtspr	SPRN_DSCR, r29
++	mtspr	SPRN_PPR, r30
++
++	/* Load final GPRs */
++	ld	29, VCPU_GPRS_TM(29)(r31)
++	ld	30, VCPU_GPRS_TM(30)(r31)
++	ld	31, VCPU_GPRS_TM(31)(r31)
++
++	/* TM checkpointed state is now setup.  All GPRs are now volatile. */
++	TRECHKPT
++
++	/* Now let's get back the state we need. */
++	HMT_MEDIUM
++	GET_PACA(r13)
++	ld	r29, HSTATE_DSCR(r13)
++	mtspr	SPRN_DSCR, r29
++	ld	r4, HSTATE_KVM_VCPU(r13)
++	ld	r1, HSTATE_HOST_R1(r13)
++	ld	r2, PACATMSCRATCH(r13)
++
++	/* Set the MSR RI since we have our registers back. */
++	li	r5, MSR_RI
++	mtmsrd	r5, 1
++
++	ld	r0, PPC_LR_STKOFF(r1)
++	mtlr	r0
++	blr
++#endif
++
+ /*
+  * We come here if we get any exception or interrupt while we are
+  * executing host real mode code while in guest MMU context.
+diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
+index 61d5a17f45c0..e8b05e635595 100644
+--- a/arch/powerpc/platforms/pseries/iommu.c
++++ b/arch/powerpc/platforms/pseries/iommu.c
+@@ -825,7 +825,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
+ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 			struct ddw_query_response *query)
+ {
+-	struct eeh_dev *edev;
++	struct device_node *dn;
++	struct pci_dn *pdn;
+ 	u32 cfg_addr;
+ 	u64 buid;
+ 	int ret;
+@@ -836,11 +837,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 	 * Retrieve them from the pci device, not the node with the
+ 	 * dma-window property
+ 	 */
+-	edev = pci_dev_to_eeh_dev(dev);
+-	cfg_addr = edev->config_addr;
+-	if (edev->pe_config_addr)
+-		cfg_addr = edev->pe_config_addr;
+-	buid = edev->phb->buid;
++	dn = pci_device_to_OF_node(dev);
++	pdn = PCI_DN(dn);
++	buid = pdn->phb->buid;
++	cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
+ 
+ 	ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
+ 		  cfg_addr, BUID_HI(buid), BUID_LO(buid));
+@@ -854,7 +854,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 			struct ddw_create_response *create, int page_shift,
+ 			int window_shift)
+ {
+-	struct eeh_dev *edev;
++	struct device_node *dn;
++	struct pci_dn *pdn;
+ 	u32 cfg_addr;
+ 	u64 buid;
+ 	int ret;
+@@ -865,11 +866,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
+ 	 * Retrieve them from the pci device, not the node with the
+ 	 * dma-window property
+ 	 */
+-	edev = pci_dev_to_eeh_dev(dev);
+-	cfg_addr = edev->config_addr;
+-	if (edev->pe_config_addr)
+-		cfg_addr = edev->pe_config_addr;
+-	buid = edev->phb->buid;
++	dn = pci_device_to_OF_node(dev);
++	pdn = PCI_DN(dn);
++	buid = pdn->phb->buid;
++	cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
+ 
+ 	do {
+ 		/* extra outputs are LIOBN and dma-addr (hi, lo) */
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index ebf82a99df45..9ac703cfdb21 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -173,7 +173,7 @@ EXPORT_SYMBOL_GPL(gmap_alloc);
+ static void gmap_flush_tlb(struct gmap *gmap)
+ {
+ 	if (MACHINE_HAS_IDTE)
+-		__tlb_flush_asce(gmap->mm, gmap->asce);
++		__tlb_flush_idte(gmap->asce);
+ 	else
+ 		__tlb_flush_global();
+ }
+@@ -212,7 +212,7 @@ void gmap_free(struct gmap *gmap)
+ 
+ 	/* Flush tlb. */
+ 	if (MACHINE_HAS_IDTE)
+-		__tlb_flush_asce(gmap->mm, gmap->asce);
++		__tlb_flush_idte(gmap->asce);
+ 	else
+ 		__tlb_flush_global();
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 22212615a137..185ebd2c0c3c 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -3332,7 +3332,7 @@ __init int intel_pmu_init(void)
+ 				c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+ 			}
+ 			c->idxmsk64 &=
+-				~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
++				~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
+ 			c->weight = hweight64(c->idxmsk64);
+ 		}
+ 	}
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 1274fac7c28f..08f9d9230b94 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -7770,6 +7770,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
+ 	if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ 			(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
+ 			exit_reason != EXIT_REASON_EPT_VIOLATION &&
++			exit_reason != EXIT_REASON_PML_FULL &&
+ 			exit_reason != EXIT_REASON_TASK_SWITCH)) {
+ 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
+@@ -8377,6 +8378,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
+ 	put_cpu();
+ }
+ 
++/*
++ * Ensure that the current vmcs of the logical processor is the
++ * vmcs01 of the vcpu before calling free_nested().
++ */
++static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
++{
++       struct vcpu_vmx *vmx = to_vmx(vcpu);
++       int r;
++
++       r = vcpu_load(vcpu);
++       BUG_ON(r);
++       vmx_load_vmcs01(vcpu);
++       free_nested(vmx);
++       vcpu_put(vcpu);
++}
++
+ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -8385,8 +8402,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
+ 		vmx_disable_pml(vmx);
+ 	free_vpid(vmx);
+ 	leave_guest_mode(vcpu);
+-	vmx_load_vmcs01(vcpu);
+-	free_nested(vmx);
++	vmx_free_vcpu_nested(vcpu);
+ 	free_loaded_vmcs(vmx->loaded_vmcs);
+ 	kfree(vmx->guest_msrs);
+ 	kvm_vcpu_uninit(vcpu);
+diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
+index ef8187f9d28d..f3a443dad74c 100644
+--- a/arch/x86/syscalls/syscall_32.tbl
++++ b/arch/x86/syscalls/syscall_32.tbl
+@@ -294,7 +294,7 @@
+ # 285 sys_setaltroot
+ 286	i386	add_key			sys_add_key
+ 287	i386	request_key		sys_request_key
+-288	i386	keyctl			sys_keyctl
++288	i386	keyctl			sys_keyctl			compat_sys_keyctl
+ 289	i386	ioprio_set		sys_ioprio_set
+ 290	i386	ioprio_get		sys_ioprio_get
+ 291	i386	inotify_init		sys_inotify_init
+diff --git a/block/genhd.c b/block/genhd.c
+index ea982eadaf63..f5d12185d631 100644
+--- a/block/genhd.c
++++ b/block/genhd.c
+@@ -828,6 +828,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v)
+ 	if (iter) {
+ 		class_dev_iter_exit(iter);
+ 		kfree(iter);
++		seqf->private = NULL;
+ 	}
+ }
+ 
+diff --git a/crypto/gcm.c b/crypto/gcm.c
+index 2e403f6138c1..ee3c29bd7ddb 100644
+--- a/crypto/gcm.c
++++ b/crypto/gcm.c
+@@ -716,7 +716,9 @@ static struct crypto_instance *crypto_gcm_alloc_common(struct rtattr **tb,
+ 
+ 	ghash_alg = crypto_find_alg(ghash_name, &crypto_ahash_type,
+ 				    CRYPTO_ALG_TYPE_HASH,
+-				    CRYPTO_ALG_TYPE_AHASH_MASK);
++				    CRYPTO_ALG_TYPE_AHASH_MASK |
++				    crypto_requires_sync(algt->type,
++							 algt->mask));
+ 	if (IS_ERR(ghash_alg))
+ 		return ERR_CAST(ghash_alg);
+ 
+diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
+index 3bd749c7bb70..46dcbbc4f32e 100644
+--- a/crypto/scatterwalk.c
++++ b/crypto/scatterwalk.c
+@@ -68,7 +68,8 @@ static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
+ 
+ void scatterwalk_done(struct scatter_walk *walk, int out, int more)
+ {
+-	if (!(scatterwalk_pagelen(walk) & (PAGE_SIZE - 1)) || !more)
++	if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
++	    !(walk->offset & (PAGE_SIZE - 1)))
+ 		scatterwalk_pagedone(walk, out, more);
+ }
+ EXPORT_SYMBOL_GPL(scatterwalk_done);
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 0beaa52df66b..c31980079507 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -122,6 +122,8 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
+ 	{ USB_DEVICE(0x13d3, 0x3472) },
+ 	{ USB_DEVICE(0x13d3, 0x3474) },
++	{ USB_DEVICE(0x13d3, 0x3487) },
++	{ USB_DEVICE(0x13d3, 0x3490) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE02C) },
+@@ -188,6 +190,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index ac553f997a1c..81721ad6fcb0 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -224,6 +224,8 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
+index aa28c65eb6b4..14945fd9d5e1 100644
+--- a/drivers/gpio/gpio-intel-mid.c
++++ b/drivers/gpio/gpio-intel-mid.c
+@@ -17,7 +17,6 @@
+  * Moorestown platform Langwell chip.
+  * Medfield platform Penwell chip.
+  * Clovertrail platform Cloverview chip.
+- * Merrifield platform Tangier chip.
+  */
+ 
+ #include <linux/module.h>
+@@ -64,10 +63,6 @@ enum GPIO_REG {
+ /* intel_mid gpio driver data */
+ struct intel_mid_gpio_ddata {
+ 	u16 ngpio;		/* number of gpio pins */
+-	u32 gplr_offset;	/* offset of first GPLR register from base */
+-	u32 flis_base;		/* base address of FLIS registers */
+-	u32 flis_len;		/* length of FLIS registers */
+-	u32 (*get_flis_offset)(int gpio);
+ 	u32 chip_irq_type;	/* chip interrupt type */
+ };
+ 
+@@ -257,15 +252,6 @@ static const struct intel_mid_gpio_ddata gpio_cloverview_core = {
+ 	.chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+ };
+ 
+-static const struct intel_mid_gpio_ddata gpio_tangier = {
+-	.ngpio = 192,
+-	.gplr_offset = 4,
+-	.flis_base = 0xff0c0000,
+-	.flis_len = 0x8000,
+-	.get_flis_offset = NULL,
+-	.chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE,
+-};
+-
+ static const struct pci_device_id intel_gpio_ids[] = {
+ 	{
+ 		/* Lincroft */
+@@ -292,11 +278,6 @@ static const struct pci_device_id intel_gpio_ids[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7),
+ 		.driver_data = (kernel_ulong_t)&gpio_cloverview_core,
+ 	},
+-	{
+-		/* Tangier */
+-		PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199),
+-		.driver_data = (kernel_ulong_t)&gpio_tangier,
+-	},
+ 	{ 0 }
+ };
+ MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index 16f7c4f2d8c8..6e2720ea7d45 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(i2c, pca953x_id);
+ #define MAX_BANK 5
+ #define BANK_SZ 8
+ 
+-#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ)
++#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ)
+ 
+ struct pca953x_chip {
+ 	unsigned gpio_start;
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 5250596a612e..56323732c748 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7129,14 +7129,12 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_encoder *encoder;
+-	int i;
+ 	u32 val, final;
+ 	bool has_lvds = false;
+ 	bool has_cpu_edp = false;
+ 	bool has_panel = false;
+ 	bool has_ck505 = false;
+ 	bool can_ssc = false;
+-	bool using_ssc_source = false;
+ 
+ 	/* We need to take the global config into account */
+ 	for_each_intel_encoder(dev, encoder) {
+@@ -7163,22 +7161,8 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		can_ssc = true;
+ 	}
+ 
+-	/* Check if any DPLLs are using the SSC source */
+-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+-		u32 temp = I915_READ(PCH_DPLL(i));
+-
+-		if (!(temp & DPLL_VCO_ENABLE))
+-			continue;
+-
+-		if ((temp & PLL_REF_INPUT_MASK) ==
+-		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+-			using_ssc_source = true;
+-			break;
+-		}
+-	}
+-
+-	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+-		      has_panel, has_lvds, has_ck505, using_ssc_source);
++	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
++		      has_panel, has_lvds, has_ck505);
+ 
+ 	/* Ironlake: try to setup display ref clock before DPLL
+ 	 * enabling. This is only under driver's control after
+@@ -7215,9 +7199,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ 		} else
+ 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+-	} else if (using_ssc_source) {
+-		final |= DREF_SSC_SOURCE_ENABLE;
+-		final |= DREF_SSC1_ENABLE;
++	} else {
++		final |= DREF_SSC_SOURCE_DISABLE;
++		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+ 	}
+ 
+ 	if (final == val)
+@@ -7263,7 +7247,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		POSTING_READ(PCH_DREF_CONTROL);
+ 		udelay(200);
+ 	} else {
+-		DRM_DEBUG_KMS("Disabling CPU source output\n");
++		DRM_DEBUG_KMS("Disabling SSC entirely\n");
+ 
+ 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ 
+@@ -7274,20 +7258,16 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		POSTING_READ(PCH_DREF_CONTROL);
+ 		udelay(200);
+ 
+-		if (!using_ssc_source) {
+-			DRM_DEBUG_KMS("Disabling SSC source\n");
+-
+-			/* Turn off the SSC source */
+-			val &= ~DREF_SSC_SOURCE_MASK;
+-			val |= DREF_SSC_SOURCE_DISABLE;
++		/* Turn off the SSC source */
++		val &= ~DREF_SSC_SOURCE_MASK;
++		val |= DREF_SSC_SOURCE_DISABLE;
+ 
+-			/* Turn off SSC1 */
+-			val &= ~DREF_SSC1_ENABLE;
++		/* Turn off SSC1 */
++		val &= ~DREF_SSC1_ENABLE;
+ 
+-			I915_WRITE(PCH_DREF_CONTROL, val);
+-			POSTING_READ(PCH_DREF_CONTROL);
+-			udelay(200);
+-		}
++		I915_WRITE(PCH_DREF_CONTROL, val);
++		POSTING_READ(PCH_DREF_CONTROL);
++		udelay(200);
+ 	}
+ 
+ 	BUG_ON(val != final);
+diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+index 7a92d15d474e..1ff5ca37dd62 100644
+--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+@@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 			 ((image->dx + image->width) & 0xffff));
+ 	OUT_RING(chan, bg);
+ 	OUT_RING(chan, fg);
+-	OUT_RING(chan, (image->height << 16) | image->width);
++	OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8));
+ 	OUT_RING(chan, (image->height << 16) | image->width);
+ 	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ 
+-	dsize = ALIGN(image->width * image->height, 32) >> 5;
++	dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
+ 	while (dsize) {
+ 		int iter_len = dsize > 128 ? 128 : dsize;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
+index cb2a71ada99e..8462f72e8819 100644
+--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
+@@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	OUT_RING(chan, 0);
+ 	OUT_RING(chan, image->dy);
+ 
+-	dwords = ALIGN(image->width * image->height, 32) >> 5;
++	dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
+ 	while (dwords) {
+ 		int push = dwords > 2047 ? 2047 : dwords;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+index 69f760e8c54f..90552420c217 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+@@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	OUT_RING  (chan, 0);
+ 	OUT_RING  (chan, image->dy);
+ 
+-	dwords = ALIGN(image->width * image->height, 32) >> 5;
++	dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
+ 	while (dwords) {
+ 		int push = dwords > 2047 ? 2047 : dwords;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+index dcc84eb54fb6..183d1701ae94 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+@@ -88,8 +88,8 @@ nv30_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ 		nv_wo32(chan, i, 0x00040004);
+ 	for (i = 0x1f18; i <= 0x3088 ; i += 16) {
+ 		nv_wo32(chan, i + 0, 0x10700ff9);
+-		nv_wo32(chan, i + 1, 0x0436086c);
+-		nv_wo32(chan, i + 2, 0x000c001b);
++		nv_wo32(chan, i + 4, 0x0436086c);
++		nv_wo32(chan, i + 8, 0x000c001b);
+ 	}
+ 	for (i = 0x30b8; i < 0x30c8; i += 4)
+ 		nv_wo32(chan, i, 0x0000ffff);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+index 985b7f3306ae..720c97135e85 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+@@ -86,8 +86,8 @@ nv34_gr_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ 		nv_wo32(chan, i, 0x00040004);
+ 	for (i = 0x15ac; i <= 0x271c ; i += 16) {
+ 		nv_wo32(chan, i + 0, 0x10700ff9);
+-		nv_wo32(chan, i + 1, 0x0436086c);
+-		nv_wo32(chan, i + 2, 0x000c001b);
++		nv_wo32(chan, i + 4, 0x0436086c);
++		nv_wo32(chan, i + 8, 0x000c001b);
+ 	}
+ 	for (i = 0x274c; i < 0x275c; i += 4)
+ 		nv_wo32(chan, i, 0x0000ffff);
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 0b04b9282f56..d4ac8c837314 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
+ 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
++		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
+ 			if (dig->backlight_level == 0)
+ 				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+ 			else {
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index de9a2ffcf5f7..0c5b3eeff82d 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ 		    le16_to_cpu(firmware_info->info.usReferenceClock);
+ 		p1pll->reference_div = 0;
+ 
+-		if (crev < 2)
++		if ((frev < 2) && (crev < 2))
+ 			p1pll->pll_out_min =
+ 				le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
+ 		else
+@@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ 		p1pll->pll_out_max =
+ 		    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+ 
+-		if (crev >= 4) {
++		if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
+ 			p1pll->lcd_pll_out_min =
+ 				le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ 			if (p1pll->lcd_pll_out_min == 0)
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index 8bc7d0bbd3c8..868247c22de4 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -10,6 +10,7 @@
+ #include <linux/slab.h>
+ #include <linux/acpi.h>
+ #include <linux/pci.h>
++#include <linux/delay.h>
+ 
+ #include "radeon_acpi.h"
+ 
+@@ -255,6 +256,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
+ 		if (!info)
+ 			return -EIO;
+ 		kfree(info);
++
++		/* 200ms delay is required after off */
++		if (state == 0)
++			msleep(200);
+ 	}
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index f5c96fb7e8d0..9f699e87320a 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -2039,7 +2039,6 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 							   RADEON_OUTPUT_CSC_BYPASS);
+ 			/* no HPD on analog connectors */
+ 			radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ 			connector->interlace_allowed = true;
+ 			connector->doublescan_allowed = true;
+ 			break;
+@@ -2289,8 +2288,10 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 	}
+ 
+ 	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+-		if (i2c_bus->valid)
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++		if (i2c_bus->valid) {
++			connector->polled = DRM_CONNECTOR_POLL_CONNECT |
++			                    DRM_CONNECTOR_POLL_DISCONNECT;
++		}
+ 	} else
+ 		connector->polled = DRM_CONNECTOR_POLL_HPD;
+ 
+@@ -2366,7 +2367,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
+ 					      1);
+ 		/* no HPD on analog connectors */
+ 		radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+-		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ 		connector->interlace_allowed = true;
+ 		connector->doublescan_allowed = true;
+ 		break;
+@@ -2451,10 +2451,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
+ 	}
+ 
+ 	if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
+-		if (i2c_bus->valid)
+-			connector->polled = DRM_CONNECTOR_POLL_CONNECT;
++		if (i2c_bus->valid) {
++			connector->polled = DRM_CONNECTOR_POLL_CONNECT |
++			                    DRM_CONNECTOR_POLL_DISCONNECT;
++		}
+ 	} else
+ 		connector->polled = DRM_CONNECTOR_POLL_HPD;
++
+ 	connector->display_info.subpixel_order = subpixel_order;
+ 	drm_connector_register(connector);
+ }
+diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
+index e094c572b86e..1a2032c2c1fb 100644
+--- a/drivers/hid/uhid.c
++++ b/drivers/hid/uhid.c
+@@ -51,10 +51,26 @@ struct uhid_device {
+ 	u32 report_id;
+ 	u32 report_type;
+ 	struct uhid_event report_buf;
++	struct work_struct worker;
+ };
+ 
+ static struct miscdevice uhid_misc;
+ 
++static void uhid_device_add_worker(struct work_struct *work)
++{
++	struct uhid_device *uhid = container_of(work, struct uhid_device, worker);
++	int ret;
++
++	ret = hid_add_device(uhid->hid);
++	if (ret) {
++		hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
++
++		hid_destroy_device(uhid->hid);
++		uhid->hid = NULL;
++		uhid->running = false;
++	}
++}
++
+ static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
+ {
+ 	__u8 newhead;
+@@ -498,18 +514,14 @@ static int uhid_dev_create2(struct uhid_device *uhid,
+ 	uhid->hid = hid;
+ 	uhid->running = true;
+ 
+-	ret = hid_add_device(hid);
+-	if (ret) {
+-		hid_err(hid, "Cannot register HID device\n");
+-		goto err_hid;
+-	}
++	/* Adding of a HID device is done through a worker, to allow HID drivers
++	 * which use feature requests during .probe to work, without they would
++	 * be blocked on devlock, which is held by uhid_char_write.
++	 */
++	schedule_work(&uhid->worker);
+ 
+ 	return 0;
+ 
+-err_hid:
+-	hid_destroy_device(hid);
+-	uhid->hid = NULL;
+-	uhid->running = false;
+ err_free:
+ 	kfree(uhid->rd_data);
+ 	uhid->rd_data = NULL;
+@@ -550,6 +562,8 @@ static int uhid_dev_destroy(struct uhid_device *uhid)
+ 	uhid->running = false;
+ 	wake_up_interruptible(&uhid->report_wait);
+ 
++	cancel_work_sync(&uhid->worker);
++
+ 	hid_destroy_device(uhid->hid);
+ 	kfree(uhid->rd_data);
+ 
+@@ -612,6 +626,7 @@ static int uhid_char_open(struct inode *inode, struct file *file)
+ 	init_waitqueue_head(&uhid->waitq);
+ 	init_waitqueue_head(&uhid->report_wait);
+ 	uhid->running = false;
++	INIT_WORK(&uhid->worker, uhid_device_add_worker);
+ 
+ 	file->private_data = uhid;
+ 	nonseekable_open(inode, file);
+diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
+index 98ba761cbb9c..d8738d4f8df3 100644
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
+ 
+ 	mutex_lock(&st->buf_lock);
+ 	ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+-	if (ret)
++	if (ret < 0)
+ 		goto error_ret;
+ 	st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
+ 	st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
+@@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
+ 		break;
+ 	case IIO_CHAN_INFO_SCALE:
+ 		ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
+-		if (ret)
++		if (ret < 0)
+ 			goto error_ret;
+ 		*val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
+ 		ret = IIO_VAL_INT_PLUS_MICRO;
+diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
+index 70f78c3062a7..8e2b9e70511d 100644
+--- a/drivers/iio/adc/ad7266.c
++++ b/drivers/iio/adc/ad7266.c
+@@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
+ 
+ 	st = iio_priv(indio_dev);
+ 
+-	st->reg = devm_regulator_get(&spi->dev, "vref");
+-	if (!IS_ERR_OR_NULL(st->reg)) {
++	st->reg = devm_regulator_get_optional(&spi->dev, "vref");
++	if (!IS_ERR(st->reg)) {
+ 		ret = regulator_enable(st->reg);
+ 		if (ret)
+ 			return ret;
+@@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
+ 
+ 		st->vref_mv = ret / 1000;
+ 	} else {
++		/* Any other error indicates that the regulator does exist */
++		if (PTR_ERR(st->reg) != -ENODEV)
++			return PTR_ERR(st->reg);
+ 		/* Use internal reference */
+ 		st->vref_mv = 2500;
+ 	}
+diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
+index d31098e0c43f..ae824d40195a 100644
+--- a/drivers/iio/industrialio-trigger.c
++++ b/drivers/iio/industrialio-trigger.c
+@@ -203,22 +203,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
+ 
+ 	/* Prevent the module from being removed whilst attached to a trigger */
+ 	__module_get(pf->indio_dev->info->driver_module);
++
++	/* Get irq number */
+ 	pf->irq = iio_trigger_get_irq(trig);
++	if (pf->irq < 0)
++		goto out_put_module;
++
++	/* Request irq */
+ 	ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
+ 				   pf->type, pf->name,
+ 				   pf);
+-	if (ret < 0) {
+-		module_put(pf->indio_dev->info->driver_module);
+-		return ret;
+-	}
++	if (ret < 0)
++		goto out_put_irq;
+ 
++	/* Enable trigger in driver */
+ 	if (trig->ops && trig->ops->set_trigger_state && notinuse) {
+ 		ret = trig->ops->set_trigger_state(trig, true);
+ 		if (ret < 0)
+-			module_put(pf->indio_dev->info->driver_module);
++			goto out_free_irq;
+ 	}
+ 
+ 	return ret;
++
++out_free_irq:
++	free_irq(pf->irq, pf);
++out_put_irq:
++	iio_trigger_put_irq(trig, pf->irq);
++out_put_module:
++	module_put(pf->indio_dev->info->driver_module);
++	return ret;
+ }
+ 
+ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
+diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
+index e881fa6291e9..1f7f844bc0b8 100644
+--- a/drivers/iio/pressure/st_pressure_core.c
++++ b/drivers/iio/pressure/st_pressure_core.c
+@@ -28,15 +28,21 @@
+ #include <linux/iio/common/st_sensors.h>
+ #include "st_pressure.h"
+ 
++#define MCELSIUS_PER_CELSIUS			1000
++
++/* Default pressure sensitivity */
+ #define ST_PRESS_LSB_PER_MBAR			4096UL
+ #define ST_PRESS_KPASCAL_NANO_SCALE		(100000000UL / \
+ 						 ST_PRESS_LSB_PER_MBAR)
++
++/* Default temperature sensitivity */
+ #define ST_PRESS_LSB_PER_CELSIUS		480UL
+-#define ST_PRESS_CELSIUS_NANO_SCALE		(1000000000UL / \
+-						 ST_PRESS_LSB_PER_CELSIUS)
++#define ST_PRESS_MILLI_CELSIUS_OFFSET		42500UL
++
+ #define ST_PRESS_NUMBER_DATA_CHANNELS		1
+ 
+ /* FULLSCALE */
++#define ST_PRESS_FS_AVL_1100MB			1100
+ #define ST_PRESS_FS_AVL_1260MB			1260
+ 
+ #define ST_PRESS_1_OUT_XL_ADDR			0x28
+@@ -54,18 +60,20 @@
+ #define ST_PRESS_LPS331AP_PW_MASK		0x80
+ #define ST_PRESS_LPS331AP_FS_ADDR		0x23
+ #define ST_PRESS_LPS331AP_FS_MASK		0x30
+-#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL	0x00
+-#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN	ST_PRESS_KPASCAL_NANO_SCALE
+-#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN	ST_PRESS_CELSIUS_NANO_SCALE
+ #define ST_PRESS_LPS331AP_BDU_ADDR		0x20
+ #define ST_PRESS_LPS331AP_BDU_MASK		0x04
+ #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR		0x22
+ #define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK	0x04
+ #define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK	0x20
+ #define ST_PRESS_LPS331AP_MULTIREAD_BIT		true
+-#define ST_PRESS_LPS331AP_TEMP_OFFSET		42500
+ 
+ /* CUSTOM VALUES FOR LPS001WP SENSOR */
++
++/* LPS001WP pressure resolution */
++#define ST_PRESS_LPS001WP_LSB_PER_MBAR		16UL
++/* LPS001WP temperature resolution */
++#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS	64UL
++
+ #define ST_PRESS_LPS001WP_WAI_EXP		0xba
+ #define ST_PRESS_LPS001WP_ODR_ADDR		0x20
+ #define ST_PRESS_LPS001WP_ODR_MASK		0x30
+@@ -74,6 +82,8 @@
+ #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL	0x03
+ #define ST_PRESS_LPS001WP_PW_ADDR		0x20
+ #define ST_PRESS_LPS001WP_PW_MASK		0x40
++#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
++	(100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
+ #define ST_PRESS_LPS001WP_BDU_ADDR		0x20
+ #define ST_PRESS_LPS001WP_BDU_MASK		0x04
+ #define ST_PRESS_LPS001WP_MULTIREAD_BIT		true
+@@ -90,18 +100,12 @@
+ #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL	0x04
+ #define ST_PRESS_LPS25H_PW_ADDR			0x20
+ #define ST_PRESS_LPS25H_PW_MASK			0x80
+-#define ST_PRESS_LPS25H_FS_ADDR			0x00
+-#define ST_PRESS_LPS25H_FS_MASK			0x00
+-#define ST_PRESS_LPS25H_FS_AVL_1260_VAL		0x00
+-#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN	ST_PRESS_KPASCAL_NANO_SCALE
+-#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN	ST_PRESS_CELSIUS_NANO_SCALE
+ #define ST_PRESS_LPS25H_BDU_ADDR		0x20
+ #define ST_PRESS_LPS25H_BDU_MASK		0x04
+ #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR		0x23
+ #define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK	0x01
+ #define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK	0x10
+ #define ST_PRESS_LPS25H_MULTIREAD_BIT		true
+-#define ST_PRESS_LPS25H_TEMP_OFFSET		42500
+ #define ST_PRESS_LPS25H_OUT_XL_ADDR		0x28
+ #define ST_TEMP_LPS25H_OUT_L_ADDR		0x2b
+ 
+@@ -153,7 +157,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
+ 			.storagebits = 16,
+ 			.endianness = IIO_LE,
+ 		},
+-		.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
++		.info_mask_separate =
++			BIT(IIO_CHAN_INFO_RAW) |
++			BIT(IIO_CHAN_INFO_SCALE),
+ 		.modified = 0,
+ 	},
+ 	{
+@@ -169,7 +175,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
+ 		},
+ 		.info_mask_separate =
+ 			BIT(IIO_CHAN_INFO_RAW) |
+-			BIT(IIO_CHAN_INFO_OFFSET),
++			BIT(IIO_CHAN_INFO_SCALE),
+ 		.modified = 0,
+ 	},
+ 	IIO_CHAN_SOFT_TIMESTAMP(1)
+@@ -203,11 +209,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
+ 			.addr = ST_PRESS_LPS331AP_FS_ADDR,
+ 			.mask = ST_PRESS_LPS331AP_FS_MASK,
+ 			.fs_avl = {
++				/*
++				 * Pressure and temperature sensitivity values
++				 * as defined in table 3 of LPS331AP datasheet.
++				 */
+ 				[0] = {
+ 					.num = ST_PRESS_FS_AVL_1260MB,
+-					.value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
+-					.gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
+-					.gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
++					.gain = ST_PRESS_KPASCAL_NANO_SCALE,
++					.gain2 = ST_PRESS_LSB_PER_CELSIUS,
+ 				},
+ 			},
+ 		},
+@@ -246,7 +255,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
+ 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ 		},
+ 		.fs = {
+-			.addr = 0,
++			.fs_avl = {
++				/*
++				 * Pressure and temperature resolution values
++				 * as defined in table 3 of LPS001WP datasheet.
++				 */
++				[0] = {
++					.num = ST_PRESS_FS_AVL_1100MB,
++					.gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
++					.gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
++				},
++			},
+ 		},
+ 		.bdu = {
+ 			.addr = ST_PRESS_LPS001WP_BDU_ADDR,
+@@ -282,14 +301,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
+ 			.value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ 		},
+ 		.fs = {
+-			.addr = ST_PRESS_LPS25H_FS_ADDR,
+-			.mask = ST_PRESS_LPS25H_FS_MASK,
+ 			.fs_avl = {
++				/*
++				 * Pressure and temperature sensitivity values
++				 * as defined in table 3 of LPS25H datasheet.
++				 */
+ 				[0] = {
+ 					.num = ST_PRESS_FS_AVL_1260MB,
+-					.value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
+-					.gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
+-					.gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
++					.gain = ST_PRESS_KPASCAL_NANO_SCALE,
++					.gain2 = ST_PRESS_LSB_PER_CELSIUS,
+ 				},
+ 			},
+ 		},
+@@ -343,26 +363,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
+ 
+ 		return IIO_VAL_INT;
+ 	case IIO_CHAN_INFO_SCALE:
+-		*val = 0;
+-
+ 		switch (ch->type) {
+ 		case IIO_PRESSURE:
++			*val = 0;
+ 			*val2 = press_data->current_fullscale->gain;
+-			break;
++			return IIO_VAL_INT_PLUS_NANO;
+ 		case IIO_TEMP:
++			*val = MCELSIUS_PER_CELSIUS;
+ 			*val2 = press_data->current_fullscale->gain2;
+-			break;
++			return IIO_VAL_FRACTIONAL;
+ 		default:
+ 			err = -EINVAL;
+ 			goto read_error;
+ 		}
+ 
+-		return IIO_VAL_INT_PLUS_NANO;
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		switch (ch->type) {
+ 		case IIO_TEMP:
+-			*val = 425;
+-			*val2 = 10;
++			*val = ST_PRESS_MILLI_CELSIUS_OFFSET *
++			       press_data->current_fullscale->gain2;
++			*val2 = MCELSIUS_PER_CELSIUS;
+ 			break;
+ 		default:
+ 			err = -EINVAL;
+diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
+index bc0d68efd455..c96ddaf00fa6 100644
+--- a/drivers/iio/proximity/as3935.c
++++ b/drivers/iio/proximity/as3935.c
+@@ -64,6 +64,7 @@ struct as3935_state {
+ 	struct delayed_work work;
+ 
+ 	u32 tune_cap;
++	u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
+ 	u8 buf[2] ____cacheline_aligned;
+ };
+ 
+@@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
+ 		.type           = IIO_PROXIMITY,
+ 		.info_mask_separate =
+ 			BIT(IIO_CHAN_INFO_RAW) |
+-			BIT(IIO_CHAN_INFO_PROCESSED),
++			BIT(IIO_CHAN_INFO_PROCESSED) |
++			BIT(IIO_CHAN_INFO_SCALE),
+ 		.scan_index     = 0,
+ 		.scan_type = {
+ 			.sign           = 'u',
+@@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
+ 		/* storm out of range */
+ 		if (*val == AS3935_DATA_MASK)
+ 			return -EINVAL;
+-		*val *= 1000;
++
++		if (m == IIO_CHAN_INFO_PROCESSED)
++			*val *= 1000;
++		break;
++	case IIO_CHAN_INFO_SCALE:
++		*val = 1000;
+ 		break;
+ 	default:
+ 		return -EINVAL;
+@@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
+ 	ret = as3935_read(st, AS3935_DATA, &val);
+ 	if (ret)
+ 		goto err_read;
+-	val &= AS3935_DATA_MASK;
+-	val *= 1000;
+ 
+-	iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
++	st->buffer[0] = val & AS3935_DATA_MASK;
++	iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
++					   pf->timestamp);
+ err_read:
+ 	iio_trigger_notify_done(indio_dev->trig);
+ 
+diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
+index cb5ece77fd7d..e7d7230a7e31 100644
+--- a/drivers/input/serio/i8042.c
++++ b/drivers/input/serio/i8042.c
+@@ -1249,6 +1249,7 @@ static int __init i8042_create_kbd_port(void)
+ 	serio->start		= i8042_start;
+ 	serio->stop		= i8042_stop;
+ 	serio->close		= i8042_port_close;
++	serio->ps2_cmd_mutex	= &i8042_mutex;
+ 	serio->port_data	= port;
+ 	serio->dev.parent	= &i8042_platform_device->dev;
+ 	strlcpy(serio->name, "i8042 KBD port", sizeof(serio->name));
+@@ -1345,21 +1346,6 @@ static void i8042_unregister_ports(void)
+ 	}
+ }
+ 
+-/*
+- * Checks whether port belongs to i8042 controller.
+- */
+-bool i8042_check_port_owner(const struct serio *port)
+-{
+-	int i;
+-
+-	for (i = 0; i < I8042_NUM_PORTS; i++)
+-		if (i8042_ports[i].serio == port)
+-			return true;
+-
+-	return false;
+-}
+-EXPORT_SYMBOL(i8042_check_port_owner);
+-
+ static void i8042_free_irqs(void)
+ {
+ 	if (i8042_aux_irq_registered)
+diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
+index 75516996db20..ded0c6f65c9f 100644
+--- a/drivers/input/serio/libps2.c
++++ b/drivers/input/serio/libps2.c
+@@ -56,19 +56,17 @@ EXPORT_SYMBOL(ps2_sendbyte);
+ 
+ void ps2_begin_command(struct ps2dev *ps2dev)
+ {
+-	mutex_lock(&ps2dev->cmd_mutex);
++	struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
+ 
+-	if (i8042_check_port_owner(ps2dev->serio))
+-		i8042_lock_chip();
++	mutex_lock(m);
+ }
+ EXPORT_SYMBOL(ps2_begin_command);
+ 
+ void ps2_end_command(struct ps2dev *ps2dev)
+ {
+-	if (i8042_check_port_owner(ps2dev->serio))
+-		i8042_unlock_chip();
++	struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex;
+ 
+-	mutex_unlock(&ps2dev->cmd_mutex);
++	mutex_unlock(m);
+ }
+ EXPORT_SYMBOL(ps2_end_command);
+ 
+diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
+index b257e46876d3..0f5e1820c92d 100644
+--- a/drivers/md/dm-flakey.c
++++ b/drivers/md/dm-flakey.c
+@@ -287,10 +287,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
+ 		pb->bio_submitted = true;
+ 
+ 		/*
+-		 * Map reads as normal.
++		 * Map reads as normal only if corrupt_bio_byte set.
+ 		 */
+-		if (bio_data_dir(bio) == READ)
+-			goto map_bio;
++		if (bio_data_dir(bio) == READ) {
++			/* If flags were specified, only corrupt those that match. */
++			if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
++			    all_corrupt_bio_flags_match(bio, fc))
++				goto map_bio;
++			else
++				return -EIO;
++		}
+ 
+ 		/*
+ 		 * Drop writes?
+@@ -328,12 +334,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
+ 
+ 	/*
+ 	 * Corrupt successful READs while in down state.
+-	 * If flags were specified, only corrupt those that match.
+ 	 */
+-	if (fc->corrupt_bio_byte && !error && pb->bio_submitted &&
+-	    (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
+-	    all_corrupt_bio_flags_match(bio, fc))
+-		corrupt_bio_data(bio, fc);
++	if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
++		if (fc->corrupt_bio_byte)
++			corrupt_bio_data(bio, fc);
++		else
++			return -EIO;
++	}
+ 
+ 	return error;
+ }
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 1f37781f7765..87de9a0848b7 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1200,8 +1200,14 @@ static void stop_queue(struct request_queue *q)
+ {
+ 	if (!q->mq_ops)
+ 		old_stop_queue(q);
+-	else
++	else {
++		spin_lock_irq(q->queue_lock);
++		queue_flag_set(QUEUE_FLAG_STOPPED, q);
++		spin_unlock_irq(q->queue_lock);
++
++		blk_mq_cancel_requeue_work(q);
+ 		blk_mq_stop_hw_queues(q);
++	}
+ }
+ 
+ static void old_start_queue(struct request_queue *q)
+@@ -1218,8 +1224,10 @@ static void start_queue(struct request_queue *q)
+ {
+ 	if (!q->mq_ops)
+ 		old_start_queue(q);
+-	else
++	else {
++		queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q);
+ 		blk_mq_start_stopped_hw_queues(q, true);
++	}
+ }
+ 
+ static void dm_done(struct request *clone, int error, bool mapped)
+@@ -2139,7 +2147,7 @@ static void dm_request_fn(struct request_queue *q)
+ 	goto out;
+ 
+ delay_and_out:
+-	blk_delay_queue(q, HZ / 100);
++	blk_delay_queue(q, 10);
+ out:
+ 	dm_put_live_table(md, srcu_idx);
+ }
+@@ -2731,6 +2739,17 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 	}
+ 	dm_put_live_table(md, srcu_idx);
+ 
++	/*
++	 * On suspend dm_stop_queue() handles stopping the blk-mq
++	 * request_queue BUT: even though the hw_queues are marked
++	 * BLK_MQ_S_STOPPED at that point there is still a race that
++	 * is allowing block/blk-mq.c to call ->queue_rq against a
++	 * hctx that it really shouldn't.  The following check guards
++	 * against this rarity (albeit _not_ race-free).
++	 */
++	if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
++		return BLK_MQ_RQ_QUEUE_BUSY;
++
+ 	if (ti->type->busy && ti->type->busy(ti))
+ 		return BLK_MQ_RQ_QUEUE_BUSY;
+ 
+@@ -3130,7 +3149,8 @@ static void unlock_fs(struct mapped_device *md)
+  * Caller must hold md->suspend_lock
+  */
+ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+-			unsigned suspend_flags, int interruptible)
++			unsigned suspend_flags, int interruptible,
++			int dmf_suspended_flag)
+ {
+ 	bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
+ 	bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
+@@ -3197,6 +3217,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
+ 	 * to finish.
+ 	 */
+ 	r = dm_wait_for_completion(md, interruptible);
++	if (!r)
++		set_bit(dmf_suspended_flag, &md->flags);
+ 
+ 	if (noflush)
+ 		clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
+@@ -3258,12 +3280,10 @@ retry:
+ 
+ 	map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
+ 
+-	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE);
++	r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
+ 	if (r)
+ 		goto out_unlock;
+ 
+-	set_bit(DMF_SUSPENDED, &md->flags);
+-
+ 	dm_table_postsuspend_targets(map);
+ 
+ out_unlock:
+@@ -3357,9 +3377,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
+ 	 * would require changing .presuspend to return an error -- avoid this
+ 	 * until there is a need for more elaborate variants of internal suspend.
+ 	 */
+-	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE);
+-
+-	set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
++	(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
++			    DMF_SUSPENDED_INTERNALLY);
+ 
+ 	dm_table_postsuspend_targets(map);
+ }
+diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
+index 1100e98a7b1d..7df7fb3738a0 100644
+--- a/drivers/media/dvb-core/dvb_ringbuffer.c
++++ b/drivers/media/dvb-core/dvb_ringbuffer.c
+@@ -55,7 +55,13 @@ void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
+ 
+ int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf)
+ {
+-	return (rbuf->pread==rbuf->pwrite);
++	/* smp_load_acquire() to load write pointer on reader side
++	 * this pairs with smp_store_release() in dvb_ringbuffer_write(),
++	 * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
++	 *
++	 * for memory barriers also see Documentation/circular-buffers.txt
++	 */
++	return (rbuf->pread == smp_load_acquire(&rbuf->pwrite));
+ }
+ 
+ 
+@@ -64,7 +70,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf)
+ {
+ 	ssize_t free;
+ 
+-	free = rbuf->pread - rbuf->pwrite;
++	/* ACCESS_ONCE() to load read pointer on writer side
++	 * this pairs with smp_store_release() in dvb_ringbuffer_read(),
++	 * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(),
++	 * or dvb_ringbuffer_reset()
++	 */
++	free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite;
+ 	if (free <= 0)
+ 		free += rbuf->size;
+ 	return free-1;
+@@ -76,7 +87,11 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
+ {
+ 	ssize_t avail;
+ 
+-	avail = rbuf->pwrite - rbuf->pread;
++	/* smp_load_acquire() to load write pointer on reader side
++	 * this pairs with smp_store_release() in dvb_ringbuffer_write(),
++	 * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset()
++	 */
++	avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread;
+ 	if (avail < 0)
+ 		avail += rbuf->size;
+ 	return avail;
+@@ -86,14 +101,25 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf)
+ 
+ void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf)
+ {
+-	rbuf->pread = rbuf->pwrite;
++	/* dvb_ringbuffer_flush() counts as read operation
++	 * smp_load_acquire() to load write pointer
++	 * smp_store_release() to update read pointer, this ensures that the
++	 * correct pointer is visible for subsequent dvb_ringbuffer_free()
++	 * calls on other cpu cores
++	 */
++	smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite));
+ 	rbuf->error = 0;
+ }
+ EXPORT_SYMBOL(dvb_ringbuffer_flush);
+ 
+ void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf)
+ {
+-	rbuf->pread = rbuf->pwrite = 0;
++	/* dvb_ringbuffer_reset() counts as read and write operation
++	 * smp_store_release() to update read pointer
++	 */
++	smp_store_release(&rbuf->pread, 0);
++	/* smp_store_release() to update write pointer */
++	smp_store_release(&rbuf->pwrite, 0);
+ 	rbuf->error = 0;
+ }
+ 
+@@ -119,12 +145,17 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si
+ 			return -EFAULT;
+ 		buf += split;
+ 		todo -= split;
+-		rbuf->pread = 0;
++		/* smp_store_release() for read pointer update to ensure
++		 * that buf is not overwritten until read is complete,
++		 * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
++		 */
++		smp_store_release(&rbuf->pread, 0);
+ 	}
+ 	if (copy_to_user(buf, rbuf->data+rbuf->pread, todo))
+ 		return -EFAULT;
+ 
+-	rbuf->pread = (rbuf->pread + todo) % rbuf->size;
++	/* smp_store_release() to update read pointer, see above */
++	smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
+ 
+ 	return len;
+ }
+@@ -139,11 +170,16 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
+ 		memcpy(buf, rbuf->data+rbuf->pread, split);
+ 		buf += split;
+ 		todo -= split;
+-		rbuf->pread = 0;
++		/* smp_store_release() for read pointer update to ensure
++		 * that buf is not overwritten until read is complete,
++		 * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free()
++		 */
++		smp_store_release(&rbuf->pread, 0);
+ 	}
+ 	memcpy(buf, rbuf->data+rbuf->pread, todo);
+ 
+-	rbuf->pread = (rbuf->pread + todo) % rbuf->size;
++	/* smp_store_release() to update read pointer, see above */
++	smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size);
+ }
+ 
+ 
+@@ -158,10 +194,16 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t
+ 		memcpy(rbuf->data+rbuf->pwrite, buf, split);
+ 		buf += split;
+ 		todo -= split;
+-		rbuf->pwrite = 0;
++		/* smp_store_release() for write pointer update to ensure that
++		 * written data is visible on other cpu cores before the pointer
++		 * update, this pairs with smp_load_acquire() in
++		 * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
++		 */
++		smp_store_release(&rbuf->pwrite, 0);
+ 	}
+ 	memcpy(rbuf->data+rbuf->pwrite, buf, todo);
+-	rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
++	/* smp_store_release() for write pointer update, see above */
++	smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
+ 
+ 	return len;
+ }
+@@ -181,12 +223,18 @@ ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
+ 			return len - todo;
+ 		buf += split;
+ 		todo -= split;
+-		rbuf->pwrite = 0;
++		/* smp_store_release() for write pointer update to ensure that
++		 * written data is visible on other cpu cores before the pointer
++		 * update, this pairs with smp_load_acquire() in
++		 * dvb_ringbuffer_empty() or dvb_ringbuffer_avail()
++		 */
++		smp_store_release(&rbuf->pwrite, 0);
+ 	}
+ 	status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo);
+ 	if (status)
+ 		return len - todo;
+-	rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size;
++	/* smp_store_release() for write pointer update, see above */
++	smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size);
+ 
+ 	return len;
+ }
+diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+index 8333fbc2fe96..835dbb8f5970 100644
+--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
+@@ -1022,6 +1022,11 @@ static int match_child(struct device *dev, void *data)
+ 	return !strcmp(dev_name(dev), (char *)data);
+ }
+ 
++static void s5p_mfc_memdev_release(struct device *dev)
++{
++	dma_release_declared_memory(dev);
++}
++
+ static void *mfc_get_drv_data(struct platform_device *pdev);
+ 
+ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+@@ -1034,6 +1039,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+ 		mfc_err("Not enough memory\n");
+ 		return -ENOMEM;
+ 	}
++
++	dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l");
++	dev->mem_dev_l->release = s5p_mfc_memdev_release;
+ 	device_initialize(dev->mem_dev_l);
+ 	of_property_read_u32_array(dev->plat_dev->dev.of_node,
+ 			"samsung,mfc-l", mem_info, 2);
+@@ -1051,6 +1059,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
+ 		mfc_err("Not enough memory\n");
+ 		return -ENOMEM;
+ 	}
++
++	dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r");
++	dev->mem_dev_r->release = s5p_mfc_memdev_release;
+ 	device_initialize(dev->mem_dev_r);
+ 	of_property_read_u32_array(dev->plat_dev->dev.of_node,
+ 			"samsung,mfc-r", mem_info, 2);
+diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
+index 84fa6e9b59a1..67314c034cdb 100644
+--- a/drivers/media/rc/ir-rc5-decoder.c
++++ b/drivers/media/rc/ir-rc5-decoder.c
+@@ -29,7 +29,7 @@
+ #define RC5_BIT_START		(1 * RC5_UNIT)
+ #define RC5_BIT_END		(1 * RC5_UNIT)
+ #define RC5X_SPACE		(4 * RC5_UNIT)
+-#define RC5_TRAILER		(10 * RC5_UNIT) /* In reality, approx 100 */
++#define RC5_TRAILER		(6 * RC5_UNIT) /* In reality, approx 100 */
+ 
+ enum rc5_state {
+ 	STATE_INACTIVE,
+diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c
+index 78c12d22dfbb..5dab02432e82 100644
+--- a/drivers/media/usb/usbtv/usbtv-audio.c
++++ b/drivers/media/usb/usbtv/usbtv-audio.c
+@@ -278,6 +278,9 @@ static void snd_usbtv_trigger(struct work_struct *work)
+ {
+ 	struct usbtv *chip = container_of(work, struct usbtv, snd_trigger);
+ 
++	if (!chip->snd)
++		return;
++
+ 	if (atomic_read(&chip->snd_stream))
+ 		usbtv_audio_start(chip);
+ 	else
+@@ -378,6 +381,8 @@ err:
+ 
+ void usbtv_audio_free(struct usbtv *usbtv)
+ {
++	cancel_work_sync(&usbtv->snd_trigger);
++
+ 	if (usbtv->snd && usbtv->udev) {
+ 		snd_card_free(usbtv->snd);
+ 		usbtv->snd = NULL;
+diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
+index 12e324319573..798761f2252f 100644
+--- a/drivers/mfd/qcom_rpm.c
++++ b/drivers/mfd/qcom_rpm.c
+@@ -34,7 +34,13 @@ struct qcom_rpm_resource {
+ struct qcom_rpm_data {
+ 	u32 version;
+ 	const struct qcom_rpm_resource *resource_table;
+-	unsigned n_resources;
++	unsigned int n_resources;
++	unsigned int req_ctx_off;
++	unsigned int req_sel_off;
++	unsigned int ack_ctx_off;
++	unsigned int ack_sel_off;
++	unsigned int req_sel_size;
++	unsigned int ack_sel_size;
+ };
+ 
+ struct qcom_rpm {
+@@ -61,11 +67,7 @@ struct qcom_rpm {
+ 
+ #define RPM_REQUEST_TIMEOUT	(5 * HZ)
+ 
+-#define RPM_REQUEST_CONTEXT	3
+-#define RPM_REQ_SELECT		11
+-#define RPM_ACK_CONTEXT		15
+-#define RPM_ACK_SELECTOR	23
+-#define RPM_SELECT_SIZE		7
++#define RPM_MAX_SEL_SIZE	7
+ 
+ #define RPM_NOTIFICATION	BIT(30)
+ #define RPM_REJECTED		BIT(31)
+@@ -156,6 +158,12 @@ static const struct qcom_rpm_data apq8064_template = {
+ 	.version = 3,
+ 	.resource_table = apq8064_rpm_resource_table,
+ 	.n_resources = ARRAY_SIZE(apq8064_rpm_resource_table),
++	.req_ctx_off = 3,
++	.req_sel_off = 11,
++	.ack_ctx_off = 15,
++	.ack_sel_off = 23,
++	.req_sel_size = 4,
++	.ack_sel_size = 7,
+ };
+ 
+ static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = {
+@@ -239,6 +247,12 @@ static const struct qcom_rpm_data msm8660_template = {
+ 	.version = 2,
+ 	.resource_table = msm8660_rpm_resource_table,
+ 	.n_resources = ARRAY_SIZE(msm8660_rpm_resource_table),
++	.req_ctx_off = 3,
++	.req_sel_off = 11,
++	.ack_ctx_off = 19,
++	.ack_sel_off = 27,
++	.req_sel_size = 7,
++	.ack_sel_size = 7,
+ };
+ 
+ static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = {
+@@ -321,6 +335,12 @@ static const struct qcom_rpm_data msm8960_template = {
+ 	.version = 3,
+ 	.resource_table = msm8960_rpm_resource_table,
+ 	.n_resources = ARRAY_SIZE(msm8960_rpm_resource_table),
++	.req_ctx_off = 3,
++	.req_sel_off = 11,
++	.ack_ctx_off = 15,
++	.ack_sel_off = 23,
++	.req_sel_size = 4,
++	.ack_sel_size = 7,
+ };
+ 
+ static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = {
+@@ -361,6 +381,12 @@ static const struct qcom_rpm_data ipq806x_template = {
+ 	.version = 3,
+ 	.resource_table = ipq806x_rpm_resource_table,
+ 	.n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table),
++	.req_ctx_off = 3,
++	.req_sel_off = 11,
++	.ack_ctx_off = 15,
++	.ack_sel_off = 23,
++	.req_sel_size = 4,
++	.ack_sel_size = 7,
+ };
+ 
+ static const struct of_device_id qcom_rpm_of_match[] = {
+@@ -379,7 +405,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
+ {
+ 	const struct qcom_rpm_resource *res;
+ 	const struct qcom_rpm_data *data = rpm->data;
+-	u32 sel_mask[RPM_SELECT_SIZE] = { 0 };
++	u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 };
+ 	int left;
+ 	int ret = 0;
+ 	int i;
+@@ -397,12 +423,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm,
+ 		writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i));
+ 
+ 	bitmap_set((unsigned long *)sel_mask, res->select_id, 1);
+-	for (i = 0; i < ARRAY_SIZE(sel_mask); i++) {
++	for (i = 0; i < rpm->data->req_sel_size; i++) {
+ 		writel_relaxed(sel_mask[i],
+-			       RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i));
++			       RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i));
+ 	}
+ 
+-	writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT));
++	writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off));
+ 
+ 	reinit_completion(&rpm->ack);
+ 	regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit));
+@@ -425,10 +451,11 @@ static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev)
+ 	u32 ack;
+ 	int i;
+ 
+-	ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
+-	for (i = 0; i < RPM_SELECT_SIZE; i++)
+-		writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i));
+-	writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT));
++	ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
++	for (i = 0; i < rpm->data->ack_sel_size; i++)
++		writel_relaxed(0,
++			RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i));
++	writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off));
+ 
+ 	if (ack & RPM_NOTIFICATION) {
+ 		dev_warn(rpm->dev, "ignoring notification!\n");
+diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
+index c2e1232cd45c..fa5cd51cba38 100644
+--- a/drivers/mtd/nand/nand_base.c
++++ b/drivers/mtd/nand/nand_base.c
+@@ -2441,7 +2441,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+ 		int cached = writelen > bytes && page != blockmask;
+ 		uint8_t *wbuf = buf;
+ 		int use_bufpoi;
+-		int part_pagewr = (column || writelen < (mtd->writesize - 1));
++		int part_pagewr = (column || writelen < mtd->writesize);
+ 
+ 		if (part_pagewr)
+ 			use_bufpoi = 1;
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index b7f824d5ee88..9fd4f7838080 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -887,7 +887,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ 		ubi = ubi_devices[i];
+ 		if (ubi && mtd->index == ubi->mtd->index) {
+-			ubi_err(ubi, "mtd%d is already attached to ubi%d",
++			pr_err("ubi: mtd%d is already attached to ubi%d",
+ 				mtd->index, i);
+ 			return -EEXIST;
+ 		}
+@@ -902,7 +902,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 	 * no sense to attach emulated MTD devices, so we prohibit this.
+ 	 */
+ 	if (mtd->type == MTD_UBIVOLUME) {
+-		ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
++		pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI",
+ 			mtd->index);
+ 		return -EINVAL;
+ 	}
+@@ -913,7 +913,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 			if (!ubi_devices[ubi_num])
+ 				break;
+ 		if (ubi_num == UBI_MAX_DEVICES) {
+-			ubi_err(ubi, "only %d UBI devices may be created",
++			pr_err("ubi: only %d UBI devices may be created",
+ 				UBI_MAX_DEVICES);
+ 			return -ENFILE;
+ 		}
+@@ -923,7 +923,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 
+ 		/* Make sure ubi_num is not busy */
+ 		if (ubi_devices[ubi_num]) {
+-			ubi_err(ubi, "already exists");
++			pr_err("ubi: ubi%i already exists", ubi_num);
+ 			return -EEXIST;
+ 		}
+ 	}
+@@ -1005,6 +1005,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 			goto out_detach;
+ 	}
+ 
++	/* Make device "available" before it becomes accessible via sysfs */
++	ubi_devices[ubi_num] = ubi;
++
+ 	err = uif_init(ubi, &ref);
+ 	if (err)
+ 		goto out_detach;
+@@ -1049,7 +1052,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
+ 	wake_up_process(ubi->bgt_thread);
+ 	spin_unlock(&ubi->wl_lock);
+ 
+-	ubi_devices[ubi_num] = ubi;
+ 	ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
+ 	return ubi_num;
+ 
+@@ -1060,6 +1062,7 @@ out_uif:
+ 	ubi_assert(ref);
+ 	uif_close(ubi);
+ out_detach:
++	ubi_devices[ubi_num] = NULL;
+ 	ubi_wl_close(ubi);
+ 	ubi_free_internal_volumes(ubi);
+ 	vfree(ubi->vtbl);
+diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
+index ff4d97848d1c..812ecf2d253a 100644
+--- a/drivers/mtd/ubi/vmt.c
++++ b/drivers/mtd/ubi/vmt.c
+@@ -536,13 +536,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+ 		spin_unlock(&ubi->volumes_lock);
+ 	}
+ 
+-	/* Change volume table record */
+-	vtbl_rec = ubi->vtbl[vol_id];
+-	vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
+-	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+-	if (err)
+-		goto out_acc;
+-
+ 	if (pebs < 0) {
+ 		for (i = 0; i < -pebs; i++) {
+ 			err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
+@@ -560,6 +553,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
+ 		spin_unlock(&ubi->volumes_lock);
+ 	}
+ 
++	/*
++	 * When we shrink a volume we have to flush all pending (erase) work.
++	 * Otherwise it can happen that upon next attach UBI finds a LEB with
++	 * lnum > highest_lnum and refuses to attach.
++	 */
++	if (pebs < 0) {
++		err = ubi_wl_flush(ubi, vol_id, UBI_ALL);
++		if (err)
++			goto out_acc;
++	}
++
++	/* Change volume table record */
++	vtbl_rec = ubi->vtbl[vol_id];
++	vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs);
++	err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
++	if (err)
++		goto out_acc;
++
+ 	vol->reserved_pebs = reserved_pebs;
+ 	if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
+ 		vol->used_ebs = reserved_pebs;
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 5db25e46a962..5dbc1744eba5 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -858,6 +858,13 @@ advance:
+ 	if (cdc_ncm_init(dev))
+ 		goto error2;
+ 
++	/* Some firmwares need a pause here or they will silently fail
++	 * to set up the interface properly.  This value was decided
++	 * empirically on a Sierra Wireless MC7455 running 02.08.02.00
++	 * firmware.
++	 */
++	usleep_range(10000, 20000);
++
+ 	/* configure data interface */
+ 	temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
+ 	if (temp) {
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
+index 9faf69875fab..2babc39f66a7 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -422,6 +422,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ /* 8000 Series */
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
+@@ -444,6 +445,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index 5ed97246c2e7..20f0b00dda05 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -112,6 +112,7 @@ static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
+ 	return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
+ }
+ 
++/* always return newly allocated name, caller must free after use */
+ static const char *safe_name(struct kobject *kobj, const char *orig_name)
+ {
+ 	const char *name = orig_name;
+@@ -126,9 +127,12 @@ static const char *safe_name(struct kobject *kobj, const char *orig_name)
+ 		name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
+ 	}
+ 
+-	if (name != orig_name)
++	if (name == orig_name) {
++		name = kstrdup(orig_name, GFP_KERNEL);
++	} else {
+ 		pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
+ 			kobject_name(kobj), name);
++	}
+ 	return name;
+ }
+ 
+@@ -159,6 +163,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+ int __of_attach_node_sysfs(struct device_node *np)
+ {
+ 	const char *name;
++	struct kobject *parent;
+ 	struct property *pp;
+ 	int rc;
+ 
+@@ -171,15 +176,16 @@ int __of_attach_node_sysfs(struct device_node *np)
+ 	np->kobj.kset = of_kset;
+ 	if (!np->parent) {
+ 		/* Nodes without parents are new top level trees */
+-		rc = kobject_add(&np->kobj, NULL, "%s",
+-				 safe_name(&of_kset->kobj, "base"));
++		name = safe_name(&of_kset->kobj, "base");
++		parent = NULL;
+ 	} else {
+ 		name = safe_name(&np->parent->kobj, kbasename(np->full_name));
+-		if (!name || !name[0])
+-			return -EINVAL;
+-
+-		rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name);
++		parent = &np->parent->kobj;
+ 	}
++	if (!name)
++		return -ENOMEM;
++	rc = kobject_add(&np->kobj, parent, "%s", name);
++	kfree(name);
+ 	if (rc)
+ 		return rc;
+ 
+@@ -1756,6 +1762,12 @@ int __of_remove_property(struct device_node *np, struct property *prop)
+ 	return 0;
+ }
+ 
++void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop)
++{
++	sysfs_remove_bin_file(&np->kobj, &prop->attr);
++	kfree(prop->attr.attr.name);
++}
++
+ void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
+ {
+ 	if (!IS_ENABLED(CONFIG_SYSFS))
+@@ -1763,7 +1775,7 @@ void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
+ 
+ 	/* at early boot, bail here and defer setup to of_init() */
+ 	if (of_kset && of_node_is_attached(np))
+-		sysfs_remove_bin_file(&np->kobj, &prop->attr);
++		__of_sysfs_remove_bin_file(np, prop);
+ }
+ 
+ /**
+@@ -1833,7 +1845,7 @@ void __of_update_property_sysfs(struct device_node *np, struct property *newprop
+ 		return;
+ 
+ 	if (oldprop)
+-		sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
++		__of_sysfs_remove_bin_file(np, oldprop);
+ 	__of_add_property_sysfs(np, newprop);
+ }
+ 
+diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
+index 53826b84e0ec..2d72ddcf534f 100644
+--- a/drivers/of/dynamic.c
++++ b/drivers/of/dynamic.c
+@@ -55,7 +55,7 @@ void __of_detach_node_sysfs(struct device_node *np)
+ 	/* only remove properties if on sysfs */
+ 	if (of_node_is_attached(np)) {
+ 		for_each_property_of_node(np, pp)
+-			sysfs_remove_bin_file(&np->kobj, &pp->attr);
++			__of_sysfs_remove_bin_file(np, pp);
+ 		kobject_del(&np->kobj);
+ 	}
+ 
+diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
+index 8e882e706cd8..46ddbee22ce3 100644
+--- a/drivers/of/of_private.h
++++ b/drivers/of/of_private.h
+@@ -81,6 +81,9 @@ extern int __of_attach_node_sysfs(struct device_node *np);
+ extern void __of_detach_node(struct device_node *np);
+ extern void __of_detach_node_sysfs(struct device_node *np);
+ 
++extern void __of_sysfs_remove_bin_file(struct device_node *np,
++				       struct property *prop);
++
+ /* iterators for transactions, used for overlays */
+ /* forward iterator */
+ #define for_each_transaction_entry(_oft, _te) \
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 4a6933f02cd0..ae12c0317645 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3108,13 +3108,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
+ }
+ 
+ /*
+- * Atheros AR93xx chips do not behave after a bus reset.  The device will
+- * throw a Link Down error on AER-capable systems and regardless of AER,
+- * config space of the device is never accessible again and typically
+- * causes the system to hang or reset when access is attempted.
++ * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
++ * The device will throw a Link Down error on AER-capable systems and
++ * regardless of AER, config space of the device is never accessible again
++ * and typically causes the system to hang or reset when access is attempted.
+  * http://www.spinics.net/lists/linux-pci/msg34797.html
+  */
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
+ 
+ static void quirk_no_pm_reset(struct pci_dev *dev)
+ {
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index 732ff757a95f..688f6b08c70f 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -160,7 +160,6 @@ struct chv_pin_context {
+  * @pctldev: Pointer to the pin controller device
+  * @chip: GPIO chip in this pin controller
+  * @regs: MMIO registers
+- * @lock: Lock to serialize register accesses
+  * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO
+  *		offset (in GPIO number space)
+  * @community: Community this pinctrl instance represents
+@@ -174,7 +173,6 @@ struct chv_pinctrl {
+ 	struct pinctrl_dev *pctldev;
+ 	struct gpio_chip chip;
+ 	void __iomem *regs;
+-	spinlock_t lock;
+ 	unsigned intr_lines[16];
+ 	const struct chv_community *community;
+ 	u32 saved_intmask;
+@@ -659,6 +657,17 @@ static const struct chv_community *chv_communities[] = {
+ 	&southeast_community,
+ };
+ 
++/*
++ * Lock to serialize register accesses
++ *
++ * Due to a silicon issue, a shared lock must be used to prevent
++ * concurrent accesses across the 4 GPIO controllers.
++ *
++ * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005),
++ * errata #CHT34, for further information.
++ */
++static DEFINE_RAW_SPINLOCK(chv_lock);
++
+ static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset,
+ 				unsigned reg)
+ {
+@@ -720,13 +729,13 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ 	u32 ctrl0, ctrl1;
+ 	bool locked;
+ 
+-	spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+ 	ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1));
+ 	locked = chv_pad_locked(pctrl, offset);
+ 
+-	spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	if (ctrl0 & CHV_PADCTRL0_GPIOEN) {
+ 		seq_puts(s, "GPIO ");
+@@ -789,14 +798,14 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
+ 
+ 	grp = &pctrl->community->groups[group];
+ 
+-	spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	/* Check first that the pad is not locked */
+ 	for (i = 0; i < grp->npins; i++) {
+ 		if (chv_pad_locked(pctrl, grp->pins[i])) {
+ 			dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
+ 				 grp->pins[i]);
+-			spin_unlock_irqrestore(&pctrl->lock, flags);
++			raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 			return -EBUSY;
+ 		}
+ 	}
+@@ -839,7 +848,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
+ 			pin, altfunc->mode, altfunc->invert_oe ? "" : "not ");
+ 	}
+ 
+-	spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -853,13 +862,13 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 	void __iomem *reg;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	if (chv_pad_locked(pctrl, offset)) {
+ 		value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0));
+ 		if (!(value & CHV_PADCTRL0_GPIOEN)) {
+ 			/* Locked so cannot enable */
+-			spin_unlock_irqrestore(&pctrl->lock, flags);
++			raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 			return -EBUSY;
+ 		}
+ 	} else {
+@@ -899,7 +908,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
+ 		chv_writel(value, reg);
+ 	}
+ 
+-	spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -913,13 +922,13 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev,
+ 	void __iomem *reg;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	reg = chv_padreg(pctrl, offset, CHV_PADCTRL0);
+ 	value = readl(reg) & ~CHV_PADCTRL0_GPIOEN;
+ 	chv_writel(value, reg);
+ 
+-	spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+ 
+ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
+@@ -931,7 +940,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
+ 	unsigned long flags;
+ 	u32 ctrl0;
+ 
+-	spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK;
+ 	if (input)
+@@ -940,7 +949,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev,
+ 		ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT;
+ 	chv_writel(ctrl0, reg);
+ 
+-	spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -965,10 +974,10 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin,
+ 	u16 arg = 0;
+ 	u32 term;
+ 
+-	spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ 	ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
+-	spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT;
+ 
+@@ -1042,7 +1051,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
+ 	unsigned long flags;
+ 	u32 ctrl0, pull;
+ 
+-	spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 	ctrl0 = readl(reg);
+ 
+ 	switch (param) {
+@@ -1065,7 +1074,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
+ 			pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
+ 			break;
+ 		default:
+-			spin_unlock_irqrestore(&pctrl->lock, flags);
++			raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -1083,7 +1092,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
+ 			pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT;
+ 			break;
+ 		default:
+-			spin_unlock_irqrestore(&pctrl->lock, flags);
++			raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 			return -EINVAL;
+ 		}
+ 
+@@ -1091,12 +1100,12 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin,
+ 		break;
+ 
+ 	default:
+-		spin_unlock_irqrestore(&pctrl->lock, flags);
++		raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 		return -EINVAL;
+ 	}
+ 
+ 	chv_writel(ctrl0, reg);
+-	spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -1169,9 +1178,12 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned offset)
+ {
+ 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
+ 	int pin = chv_gpio_offset_to_pin(pctrl, offset);
++	unsigned long flags;
+ 	u32 ctrl0, cfg;
+ 
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ 	cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+@@ -1189,7 +1201,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ 	void __iomem *reg;
+ 	u32 ctrl0;
+ 
+-	spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	reg = chv_padreg(pctrl, pin, CHV_PADCTRL0);
+ 	ctrl0 = readl(reg);
+@@ -1201,7 +1213,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ 
+ 	chv_writel(ctrl0, reg);
+ 
+-	spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+ 
+ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+@@ -1209,8 +1221,11 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
+ 	struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(chip);
+ 	unsigned pin = chv_gpio_offset_to_pin(pctrl, offset);
+ 	u32 ctrl0, direction;
++	unsigned long flags;
+ 
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 	ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK;
+ 	direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT;
+@@ -1248,14 +1263,14 @@ static void chv_gpio_irq_ack(struct irq_data *d)
+ 	int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d));
+ 	u32 intr_line;
+ 
+-	spin_lock(&pctrl->lock);
++	raw_spin_lock(&chv_lock);
+ 
+ 	intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ 	intr_line &= CHV_PADCTRL0_INTSEL_MASK;
+ 	intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT;
+ 	chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT);
+ 
+-	spin_unlock(&pctrl->lock);
++	raw_spin_unlock(&chv_lock);
+ }
+ 
+ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+@@ -1266,7 +1281,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+ 	u32 value, intr_line;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ 	intr_line &= CHV_PADCTRL0_INTSEL_MASK;
+@@ -1279,7 +1294,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
+ 		value |= BIT(intr_line);
+ 	chv_writel(value, pctrl->regs + CHV_INTMASK);
+ 
+-	spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ }
+ 
+ static void chv_gpio_irq_mask(struct irq_data *d)
+@@ -1313,6 +1328,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
+ 		unsigned long flags;
+ 		u32 intsel, value;
+ 
++		raw_spin_lock_irqsave(&chv_lock, flags);
+ 		intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+ 		intsel &= CHV_PADCTRL0_INTSEL_MASK;
+ 		intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
+@@ -1323,12 +1339,11 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
+ 		else
+ 			handler = handle_edge_irq;
+ 
+-		spin_lock_irqsave(&pctrl->lock, flags);
+ 		if (!pctrl->intr_lines[intsel]) {
+ 			__irq_set_handler_locked(d->irq, handler);
+ 			pctrl->intr_lines[intsel] = offset;
+ 		}
+-		spin_unlock_irqrestore(&pctrl->lock, flags);
++		raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 	}
+ 
+ 	chv_gpio_irq_unmask(d);
+@@ -1344,7 +1359,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
+ 	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&pctrl->lock, flags);
++	raw_spin_lock_irqsave(&chv_lock, flags);
+ 
+ 	/*
+ 	 * Pins which can be used as shared interrupt are configured in
+@@ -1393,7 +1408,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
+ 	else if (type & IRQ_TYPE_LEVEL_MASK)
+ 		__irq_set_handler_locked(d->irq, handle_level_irq);
+ 
+-	spin_unlock_irqrestore(&pctrl->lock, flags);
++	raw_spin_unlock_irqrestore(&chv_lock, flags);
+ 
+ 	return 0;
+ }
+@@ -1505,7 +1520,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
+ 	if (i == ARRAY_SIZE(chv_communities))
+ 		return -ENODEV;
+ 
+-	spin_lock_init(&pctrl->lock);
+ 	pctrl->dev = &pdev->dev;
+ 
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
+index fb4dd7b3ee71..af2046c87806 100644
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -723,6 +723,11 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device)
+ 	if (err)
+ 		return err;
+ 
++	err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless,
++				   sizeof(wireless), 0);
++	if (err)
++		return err;
++
+ 	if (wireless & 0x1) {
+ 		wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev,
+ 					   RFKILL_TYPE_WLAN,
+@@ -910,7 +915,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
+ 	gps_rfkill = NULL;
+ 	rfkill2_count = 0;
+ 
+-	if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device))
++	if (hp_wmi_rfkill_setup(device))
+ 		hp_wmi_rfkill2_setup(device);
+ 
+ 	err = device_create_file(&device->dev, &dev_attr_display);
+diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
+index c5a2523b0185..463003d2529c 100644
+--- a/drivers/rtc/rtc-s3c.c
++++ b/drivers/rtc/rtc-s3c.c
+@@ -149,12 +149,14 @@ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
+ 	if (!is_power_of_2(freq))
+ 		return -EINVAL;
+ 
++	s3c_rtc_enable_clk(info);
+ 	spin_lock_irq(&info->pie_lock);
+ 
+ 	if (info->data->set_freq)
+ 		info->data->set_freq(info, freq);
+ 
+ 	spin_unlock_irq(&info->pie_lock);
++	s3c_rtc_disable_clk(info);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index f089082c00e1..e6b77049c756 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -477,7 +477,14 @@ static void reset_sccr1(struct driver_data *drv_data)
+ 	u32 sccr1_reg;
+ 
+ 	sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
+-	sccr1_reg &= ~SSCR1_RFT;
++	switch (drv_data->ssp_type) {
++	case QUARK_X1000_SSP:
++		sccr1_reg &= ~QUARK_X1000_SSCR1_RFT;
++		break;
++	default:
++		sccr1_reg &= ~SSCR1_RFT;
++		break;
++	}
+ 	sccr1_reg |= chip->threshold;
+ 	pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
+ }
+diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
+index b614f272b5f4..851fcfa98068 100644
+--- a/drivers/staging/iio/accel/sca3000_core.c
++++ b/drivers/staging/iio/accel/sca3000_core.c
+@@ -595,7 +595,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
+ 		goto error_ret_mut;
+ 	ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+ 	mutex_unlock(&st->lock);
+-	if (ret)
++	if (ret < 0)
+ 		goto error_ret;
+ 	val = ret;
+ 	if (base_freq > 0)
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 6f50e9d958de..6fad3e9fd389 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -500,7 +500,8 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+ 	bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
+ 
+ 	spin_lock_bh(&conn->cmd_lock);
+-	if (!list_empty(&cmd->i_conn_node))
++	if (!list_empty(&cmd->i_conn_node) &&
++	    !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
+ 		list_del_init(&cmd->i_conn_node);
+ 	spin_unlock_bh(&conn->cmd_lock);
+ 
+@@ -4215,6 +4216,7 @@ transport_err:
+ 
+ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+ {
++	LIST_HEAD(tmp_list);
+ 	struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;
+ 	struct iscsi_session *sess = conn->sess;
+ 	/*
+@@ -4223,18 +4225,26 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
+ 	 * has been reset -> returned sleeping pre-handler state.
+ 	 */
+ 	spin_lock_bh(&conn->cmd_lock);
+-	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
++	list_splice_init(&conn->conn_cmd_list, &tmp_list);
+ 
++	list_for_each_entry(cmd, &tmp_list, i_conn_node) {
++		struct se_cmd *se_cmd = &cmd->se_cmd;
++
++		if (se_cmd->se_tfo != NULL) {
++			spin_lock(&se_cmd->t_state_lock);
++			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++			spin_unlock(&se_cmd->t_state_lock);
++		}
++	}
++	spin_unlock_bh(&conn->cmd_lock);
++
++	list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) {
+ 		list_del_init(&cmd->i_conn_node);
+-		spin_unlock_bh(&conn->cmd_lock);
+ 
+ 		iscsit_increment_maxcmdsn(cmd, sess);
+-
+ 		iscsit_free_cmd(cmd, true);
+ 
+-		spin_lock_bh(&conn->cmd_lock);
+ 	}
+-	spin_unlock_bh(&conn->cmd_lock);
+ }
+ 
+ static void iscsit_stop_timers_for_cmds(
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 39654e917cd8..e929205e28c6 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -1415,8 +1415,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ 	}
+ 	login->zero_tsih = zero_tsih;
+ 
+-	conn->sess->se_sess->sup_prot_ops =
+-		conn->conn_transport->iscsit_get_sup_prot_ops(conn);
++	if (conn->sess)
++		conn->sess->se_sess->sup_prot_ops =
++			conn->conn_transport->iscsit_get_sup_prot_ops(conn);
+ 
+ 	tpg = conn->tpg;
+ 	if (!tpg) {
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index e8848e7fe5d4..7580abe7cb45 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -2531,15 +2531,9 @@ static void target_release_cmd_kref(struct kref *kref)
+ 	struct se_session *se_sess = se_cmd->se_sess;
+ 	bool fabric_stop;
+ 
+-	if (list_empty(&se_cmd->se_cmd_list)) {
+-		spin_unlock(&se_sess->sess_cmd_lock);
+-		target_free_cmd_mem(se_cmd);
+-		se_cmd->se_tfo->release_cmd(se_cmd);
+-		return;
+-	}
+-
+ 	spin_lock(&se_cmd->t_state_lock);
+-	fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
++	fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) &&
++		      (se_cmd->transport_state & CMD_T_ABORTED);
+ 	spin_unlock(&se_cmd->t_state_lock);
+ 
+ 	if (se_cmd->cmd_wait_set || fabric_stop) {
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 85323ff75edf..baa888caa964 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -88,37 +88,6 @@ static void atmel_stop_rx(struct uart_port *port);
+ 
+ #define ATMEL_ISR_PASS_LIMIT	256
+ 
+-/* UART registers. CR is write-only, hence no GET macro */
+-#define UART_PUT_CR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_CR)
+-#define UART_GET_MR(port)	__raw_readl((port)->membase + ATMEL_US_MR)
+-#define UART_PUT_MR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_MR)
+-#define UART_PUT_IER(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_IER)
+-#define UART_PUT_IDR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_IDR)
+-#define UART_GET_IMR(port)	__raw_readl((port)->membase + ATMEL_US_IMR)
+-#define UART_GET_CSR(port)	__raw_readl((port)->membase + ATMEL_US_CSR)
+-#define UART_GET_CHAR(port)	__raw_readl((port)->membase + ATMEL_US_RHR)
+-#define UART_PUT_CHAR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_THR)
+-#define UART_GET_BRGR(port)	__raw_readl((port)->membase + ATMEL_US_BRGR)
+-#define UART_PUT_BRGR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_BRGR)
+-#define UART_PUT_RTOR(port,v)	__raw_writel(v, (port)->membase + ATMEL_US_RTOR)
+-#define UART_PUT_TTGR(port, v)	__raw_writel(v, (port)->membase + ATMEL_US_TTGR)
+-#define UART_GET_IP_NAME(port)	__raw_readl((port)->membase + ATMEL_US_NAME)
+-#define UART_GET_IP_VERSION(port) __raw_readl((port)->membase + ATMEL_US_VERSION)
+-
+- /* PDC registers */
+-#define UART_PUT_PTCR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
+-#define UART_GET_PTSR(port)	__raw_readl((port)->membase + ATMEL_PDC_PTSR)
+-
+-#define UART_PUT_RPR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_RPR)
+-#define UART_GET_RPR(port)	__raw_readl((port)->membase + ATMEL_PDC_RPR)
+-#define UART_PUT_RCR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_RCR)
+-#define UART_PUT_RNPR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_RNPR)
+-#define UART_PUT_RNCR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_RNCR)
+-
+-#define UART_PUT_TPR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_TPR)
+-#define UART_PUT_TCR(port,v)	__raw_writel(v, (port)->membase + ATMEL_PDC_TCR)
+-#define UART_GET_TCR(port)	__raw_readl((port)->membase + ATMEL_PDC_TCR)
+-
+ struct atmel_dma_buffer {
+ 	unsigned char	*buf;
+ 	dma_addr_t	dma_addr;
+@@ -211,6 +180,16 @@ to_atmel_uart_port(struct uart_port *uart)
+ 	return container_of(uart, struct atmel_uart_port, uart);
+ }
+ 
++static inline u32 atmel_uart_readl(struct uart_port *port, u32 reg)
++{
++	return __raw_readl(port->membase + reg);
++}
++
++static inline void atmel_uart_writel(struct uart_port *port, u32 reg, u32 value)
++{
++	__raw_writel(value, port->membase + reg);
++}
++
+ #ifdef CONFIG_SERIAL_ATMEL_PDC
+ static bool atmel_use_pdc_rx(struct uart_port *port)
+ {
+@@ -256,7 +235,7 @@ static unsigned int atmel_get_lines_status(struct uart_port *port)
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ 	unsigned int status, ret = 0;
+ 
+-	status = UART_GET_CSR(port);
++	status = atmel_uart_readl(port, ATMEL_US_CSR);
+ 
+ 	mctrl_gpio_get(atmel_port->gpios, &ret);
+ 
+@@ -303,9 +282,9 @@ static int atmel_config_rs485(struct uart_port *port,
+ 	unsigned int mode;
+ 
+ 	/* Disable interrupts */
+-	UART_PUT_IDR(port, atmel_port->tx_done_mask);
++	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
+ 
+-	mode = UART_GET_MR(port);
++	mode = atmel_uart_readl(port, ATMEL_US_MR);
+ 
+ 	/* Resetting serial mode to RS232 (0x0) */
+ 	mode &= ~ATMEL_US_USMODE;
+@@ -315,7 +294,8 @@ static int atmel_config_rs485(struct uart_port *port,
+ 	if (rs485conf->flags & SER_RS485_ENABLED) {
+ 		dev_dbg(port->dev, "Setting UART to RS485\n");
+ 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+-		UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
++		atmel_uart_writel(port, ATMEL_US_TTGR,
++				  rs485conf->delay_rts_after_send);
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	} else {
+ 		dev_dbg(port->dev, "Setting UART to RS232\n");
+@@ -325,10 +305,10 @@ static int atmel_config_rs485(struct uart_port *port,
+ 		else
+ 			atmel_port->tx_done_mask = ATMEL_US_TXRDY;
+ 	}
+-	UART_PUT_MR(port, mode);
++	atmel_uart_writel(port, ATMEL_US_MR, mode);
+ 
+ 	/* Enable interrupts */
+-	UART_PUT_IER(port, atmel_port->tx_done_mask);
++	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
+ 
+ 	return 0;
+ }
+@@ -338,7 +318,9 @@ static int atmel_config_rs485(struct uart_port *port,
+  */
+ static u_int atmel_tx_empty(struct uart_port *port)
+ {
+-	return (UART_GET_CSR(port) & ATMEL_US_TXEMPTY) ? TIOCSER_TEMT : 0;
++	return (atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXEMPTY) ?
++		TIOCSER_TEMT :
++		0;
+ }
+ 
+ /*
+@@ -347,13 +329,14 @@ static u_int atmel_tx_empty(struct uart_port *port)
+ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
+ {
+ 	unsigned int control = 0;
+-	unsigned int mode = UART_GET_MR(port);
++	unsigned int mode = atmel_uart_readl(port, ATMEL_US_MR);
+ 	unsigned int rts_paused, rts_ready;
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ 
+ 	/* override mode to RS485 if needed, otherwise keep the current mode */
+ 	if (port->rs485.flags & SER_RS485_ENABLED) {
+-		UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
++		atmel_uart_writel(port, ATMEL_US_TTGR,
++				  port->rs485.delay_rts_after_send);
+ 		mode &= ~ATMEL_US_USMODE;
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	}
+@@ -383,7 +366,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
+ 	else
+ 		control |= ATMEL_US_DTRDIS;
+ 
+-	UART_PUT_CR(port, control);
++	atmel_uart_writel(port, ATMEL_US_CR, control);
+ 
+ 	mctrl_gpio_set(atmel_port->gpios, mctrl);
+ 
+@@ -394,7 +377,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
+ 	else
+ 		mode |= ATMEL_US_CHMODE_NORMAL;
+ 
+-	UART_PUT_MR(port, mode);
++	atmel_uart_writel(port, ATMEL_US_MR, mode);
+ }
+ 
+ /*
+@@ -405,7 +388,7 @@ static u_int atmel_get_mctrl(struct uart_port *port)
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ 	unsigned int ret = 0, status;
+ 
+-	status = UART_GET_CSR(port);
++	status = atmel_uart_readl(port, ATMEL_US_CSR);
+ 
+ 	/*
+ 	 * The control signals are active low.
+@@ -431,10 +414,10 @@ static void atmel_stop_tx(struct uart_port *port)
+ 
+ 	if (atmel_use_pdc_tx(port)) {
+ 		/* disable PDC transmit */
+-		UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
++		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
+ 	}
+ 	/* Disable interrupts */
+-	UART_PUT_IDR(port, atmel_port->tx_done_mask);
++	atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
+ 
+ 	if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ 	    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
+@@ -448,21 +431,23 @@ static void atmel_start_tx(struct uart_port *port)
+ {
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ 
+-	if (atmel_use_pdc_tx(port)) {
+-		if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
+-			/* The transmitter is already running.  Yes, we
+-			   really need this.*/
+-			return;
++	if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
++				       & ATMEL_PDC_TXTEN))
++		/* The transmitter is already running.  Yes, we
++		   really need this.*/
++		return;
+ 
++	if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
+ 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ 			atmel_stop_rx(port);
+ 
++	if (atmel_use_pdc_tx(port))
+ 		/* re-enable PDC transmit */
+-		UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
+-	}
++		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
++
+ 	/* Enable interrupts */
+-	UART_PUT_IER(port, atmel_port->tx_done_mask);
++	atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
+ }
+ 
+ /*
+@@ -470,17 +455,19 @@ static void atmel_start_tx(struct uart_port *port)
+  */
+ static void atmel_start_rx(struct uart_port *port)
+ {
+-	UART_PUT_CR(port, ATMEL_US_RSTSTA);  /* reset status and receiver */
++	/* reset status and receiver */
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
+ 
+-	UART_PUT_CR(port, ATMEL_US_RXEN);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXEN);
+ 
+ 	if (atmel_use_pdc_rx(port)) {
+ 		/* enable PDC controller */
+-		UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+-			port->read_status_mask);
+-		UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
++		atmel_uart_writel(port, ATMEL_US_IER,
++				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
++				  port->read_status_mask);
++		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
+ 	} else {
+-		UART_PUT_IER(port, ATMEL_US_RXRDY);
++		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
+ 	}
+ }
+ 
+@@ -489,15 +476,16 @@ static void atmel_start_rx(struct uart_port *port)
+  */
+ static void atmel_stop_rx(struct uart_port *port)
+ {
+-	UART_PUT_CR(port, ATMEL_US_RXDIS);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RXDIS);
+ 
+ 	if (atmel_use_pdc_rx(port)) {
+ 		/* disable PDC receive */
+-		UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
+-		UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
+-			port->read_status_mask);
++		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS);
++		atmel_uart_writel(port, ATMEL_US_IDR,
++				  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
++				  port->read_status_mask);
+ 	} else {
+-		UART_PUT_IDR(port, ATMEL_US_RXRDY);
++		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXRDY);
+ 	}
+ }
+ 
+@@ -537,7 +525,7 @@ static void atmel_enable_ms(struct uart_port *port)
+ 	else
+ 		ier |= ATMEL_US_DCDIC;
+ 
+-	UART_PUT_IER(port, ier);
++	atmel_uart_writel(port, ATMEL_US_IER, ier);
+ }
+ 
+ /*
+@@ -576,7 +564,7 @@ static void atmel_disable_ms(struct uart_port *port)
+ 	else
+ 		idr |= ATMEL_US_DCDIC;
+ 
+-	UART_PUT_IDR(port, idr);
++	atmel_uart_writel(port, ATMEL_US_IDR, idr);
+ }
+ 
+ /*
+@@ -585,9 +573,11 @@ static void atmel_disable_ms(struct uart_port *port)
+ static void atmel_break_ctl(struct uart_port *port, int break_state)
+ {
+ 	if (break_state != 0)
+-		UART_PUT_CR(port, ATMEL_US_STTBRK);	/* start break */
++		/* start break */
++		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTBRK);
+ 	else
+-		UART_PUT_CR(port, ATMEL_US_STPBRK);	/* stop break */
++		/* stop break */
++		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STPBRK);
+ }
+ 
+ /*
+@@ -621,7 +611,7 @@ atmel_buffer_rx_char(struct uart_port *port, unsigned int status,
+ static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status)
+ {
+ 	/* clear error */
+-	UART_PUT_CR(port, ATMEL_US_RSTSTA);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
+ 
+ 	if (status & ATMEL_US_RXBRK) {
+ 		/* ignore side-effect */
+@@ -644,9 +634,9 @@ static void atmel_rx_chars(struct uart_port *port)
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ 	unsigned int status, ch;
+ 
+-	status = UART_GET_CSR(port);
++	status = atmel_uart_readl(port, ATMEL_US_CSR);
+ 	while (status & ATMEL_US_RXRDY) {
+-		ch = UART_GET_CHAR(port);
++		ch = atmel_uart_readl(port, ATMEL_US_RHR);
+ 
+ 		/*
+ 		 * note that the error handling code is
+@@ -657,12 +647,13 @@ static void atmel_rx_chars(struct uart_port *port)
+ 			     || atmel_port->break_active)) {
+ 
+ 			/* clear error */
+-			UART_PUT_CR(port, ATMEL_US_RSTSTA);
++			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
+ 
+ 			if (status & ATMEL_US_RXBRK
+ 			    && !atmel_port->break_active) {
+ 				atmel_port->break_active = 1;
+-				UART_PUT_IER(port, ATMEL_US_RXBRK);
++				atmel_uart_writel(port, ATMEL_US_IER,
++						  ATMEL_US_RXBRK);
+ 			} else {
+ 				/*
+ 				 * This is either the end-of-break
+@@ -671,14 +662,15 @@ static void atmel_rx_chars(struct uart_port *port)
+ 				 * being set. In both cases, the next
+ 				 * RXBRK will indicate start-of-break.
+ 				 */
+-				UART_PUT_IDR(port, ATMEL_US_RXBRK);
++				atmel_uart_writel(port, ATMEL_US_IDR,
++						  ATMEL_US_RXBRK);
+ 				status &= ~ATMEL_US_RXBRK;
+ 				atmel_port->break_active = 0;
+ 			}
+ 		}
+ 
+ 		atmel_buffer_rx_char(port, status, ch);
+-		status = UART_GET_CSR(port);
++		status = atmel_uart_readl(port, ATMEL_US_CSR);
+ 	}
+ 
+ 	tasklet_schedule(&atmel_port->tasklet);
+@@ -693,16 +685,18 @@ static void atmel_tx_chars(struct uart_port *port)
+ 	struct circ_buf *xmit = &port->state->xmit;
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ 
+-	if (port->x_char && UART_GET_CSR(port) & atmel_port->tx_done_mask) {
+-		UART_PUT_CHAR(port, port->x_char);
++	if (port->x_char &&
++	    (atmel_uart_readl(port, ATMEL_US_CSR) & atmel_port->tx_done_mask)) {
++		atmel_uart_writel(port, ATMEL_US_THR, port->x_char);
+ 		port->icount.tx++;
+ 		port->x_char = 0;
+ 	}
+ 	if (uart_circ_empty(xmit) || uart_tx_stopped(port))
+ 		return;
+ 
+-	while (UART_GET_CSR(port) & atmel_port->tx_done_mask) {
+-		UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
++	while (atmel_uart_readl(port, ATMEL_US_CSR) &
++	       atmel_port->tx_done_mask) {
++		atmel_uart_writel(port, ATMEL_US_THR, xmit->buf[xmit->tail]);
+ 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ 		port->icount.tx++;
+ 		if (uart_circ_empty(xmit))
+@@ -714,7 +708,8 @@ static void atmel_tx_chars(struct uart_port *port)
+ 
+ 	if (!uart_circ_empty(xmit))
+ 		/* Enable interrupts */
+-		UART_PUT_IER(port, atmel_port->tx_done_mask);
++		atmel_uart_writel(port, ATMEL_US_IER,
++				  atmel_port->tx_done_mask);
+ }
+ 
+ static void atmel_complete_tx_dma(void *arg)
+@@ -934,14 +929,14 @@ static void atmel_rx_from_dma(struct uart_port *port)
+ 
+ 
+ 	/* Reset the UART timeout early so that we don't miss one */
+-	UART_PUT_CR(port, ATMEL_US_STTTO);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
+ 	dmastat = dmaengine_tx_status(chan,
+ 				atmel_port->cookie_rx,
+ 				&state);
+ 	/* Restart a new tasklet if DMA status is error */
+ 	if (dmastat == DMA_ERROR) {
+ 		dev_dbg(port->dev, "Get residue error, restart tasklet\n");
+-		UART_PUT_IER(port, ATMEL_US_TIMEOUT);
++		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
+ 		tasklet_schedule(&atmel_port->tasklet);
+ 		return;
+ 	}
+@@ -1007,7 +1002,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
+ 	tty_flip_buffer_push(tport);
+ 	spin_lock(&port->lock);
+ 
+-	UART_PUT_IER(port, ATMEL_US_TIMEOUT);
++	atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
+ }
+ 
+ static int atmel_prepare_rx_dma(struct uart_port *port)
+@@ -1117,8 +1112,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
+ 		 * the moment.
+ 		 */
+ 		if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
+-			UART_PUT_IDR(port, (ATMEL_US_ENDRX
+-						| ATMEL_US_TIMEOUT));
++			atmel_uart_writel(port, ATMEL_US_IDR,
++					  (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
+ 			tasklet_schedule(&atmel_port->tasklet);
+ 		}
+ 
+@@ -1129,7 +1124,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
+ 
+ 	if (atmel_use_dma_rx(port)) {
+ 		if (pending & ATMEL_US_TIMEOUT) {
+-			UART_PUT_IDR(port, ATMEL_US_TIMEOUT);
++			atmel_uart_writel(port, ATMEL_US_IDR,
++					  ATMEL_US_TIMEOUT);
+ 			tasklet_schedule(&atmel_port->tasklet);
+ 		}
+ 	}
+@@ -1142,8 +1138,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
+ 		 * End of break detected. If it came along with a
+ 		 * character, atmel_rx_chars will handle it.
+ 		 */
+-		UART_PUT_CR(port, ATMEL_US_RSTSTA);
+-		UART_PUT_IDR(port, ATMEL_US_RXBRK);
++		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
++		atmel_uart_writel(port, ATMEL_US_IDR, ATMEL_US_RXBRK);
+ 		atmel_port->break_active = 0;
+ 	}
+ }
+@@ -1158,7 +1154,8 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
+ 
+ 	if (pending & atmel_port->tx_done_mask) {
+ 		/* Either PDC or interrupt transmission */
+-		UART_PUT_IDR(port, atmel_port->tx_done_mask);
++		atmel_uart_writel(port, ATMEL_US_IDR,
++				  atmel_port->tx_done_mask);
+ 		tasklet_schedule(&atmel_port->tasklet);
+ 	}
+ }
+@@ -1193,7 +1190,7 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
+ 
+ 	do {
+ 		status = atmel_get_lines_status(port);
+-		mask = UART_GET_IMR(port);
++		mask = atmel_uart_readl(port, ATMEL_US_IMR);
+ 		pending = status & mask;
+ 		if (!gpio_handled) {
+ 			/*
+@@ -1219,7 +1216,7 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
+ 		if (atmel_port->suspended) {
+ 			atmel_port->pending |= pending;
+ 			atmel_port->pending_status = status;
+-			UART_PUT_IDR(port, mask);
++			atmel_uart_writel(port, ATMEL_US_IDR, mask);
+ 			pm_system_wakeup();
+ 			break;
+ 		}
+@@ -1256,7 +1253,7 @@ static void atmel_tx_pdc(struct uart_port *port)
+ 	int count;
+ 
+ 	/* nothing left to transmit? */
+-	if (UART_GET_TCR(port))
++	if (atmel_uart_readl(port, ATMEL_PDC_TCR))
+ 		return;
+ 
+ 	xmit->tail += pdc->ofs;
+@@ -1268,7 +1265,7 @@ static void atmel_tx_pdc(struct uart_port *port)
+ 	/* more to transmit - setup next transfer */
+ 
+ 	/* disable PDC transmit */
+-	UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
++	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
+ 
+ 	if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
+ 		dma_sync_single_for_device(port->dev,
+@@ -1279,12 +1276,14 @@ static void atmel_tx_pdc(struct uart_port *port)
+ 		count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ 		pdc->ofs = count;
+ 
+-		UART_PUT_TPR(port, pdc->dma_addr + xmit->tail);
+-		UART_PUT_TCR(port, count);
++		atmel_uart_writel(port, ATMEL_PDC_TPR,
++				  pdc->dma_addr + xmit->tail);
++		atmel_uart_writel(port, ATMEL_PDC_TCR, count);
+ 		/* re-enable PDC transmit */
+-		UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
++		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
+ 		/* Enable interrupts */
+-		UART_PUT_IER(port, atmel_port->tx_done_mask);
++		atmel_uart_writel(port, ATMEL_US_IER,
++				  atmel_port->tx_done_mask);
+ 	} else {
+ 		if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ 		    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+@@ -1410,10 +1409,10 @@ static void atmel_rx_from_pdc(struct uart_port *port)
+ 
+ 	do {
+ 		/* Reset the UART timeout early so that we don't miss one */
+-		UART_PUT_CR(port, ATMEL_US_STTTO);
++		atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
+ 
+ 		pdc = &atmel_port->pdc_rx[rx_idx];
+-		head = UART_GET_RPR(port) - pdc->dma_addr;
++		head = atmel_uart_readl(port, ATMEL_PDC_RPR) - pdc->dma_addr;
+ 		tail = pdc->ofs;
+ 
+ 		/* If the PDC has switched buffers, RPR won't contain
+@@ -1456,8 +1455,8 @@ static void atmel_rx_from_pdc(struct uart_port *port)
+ 		 */
+ 		if (head >= pdc->dma_size) {
+ 			pdc->ofs = 0;
+-			UART_PUT_RNPR(port, pdc->dma_addr);
+-			UART_PUT_RNCR(port, pdc->dma_size);
++			atmel_uart_writel(port, ATMEL_PDC_RNPR, pdc->dma_addr);
++			atmel_uart_writel(port, ATMEL_PDC_RNCR, pdc->dma_size);
+ 
+ 			rx_idx = !rx_idx;
+ 			atmel_port->pdc_rx_idx = rx_idx;
+@@ -1472,7 +1471,8 @@ static void atmel_rx_from_pdc(struct uart_port *port)
+ 	tty_flip_buffer_push(tport);
+ 	spin_lock(&port->lock);
+ 
+-	UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
++	atmel_uart_writel(port, ATMEL_US_IER,
++			  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
+ }
+ 
+ static int atmel_prepare_rx_pdc(struct uart_port *port)
+@@ -1505,11 +1505,12 @@ static int atmel_prepare_rx_pdc(struct uart_port *port)
+ 
+ 	atmel_port->pdc_rx_idx = 0;
+ 
+-	UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr);
+-	UART_PUT_RCR(port, PDC_BUFFER_SIZE);
++	atmel_uart_writel(port, ATMEL_PDC_RPR, atmel_port->pdc_rx[0].dma_addr);
++	atmel_uart_writel(port, ATMEL_PDC_RCR, PDC_BUFFER_SIZE);
+ 
+-	UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr);
+-	UART_PUT_RNCR(port, PDC_BUFFER_SIZE);
++	atmel_uart_writel(port, ATMEL_PDC_RNPR,
++			  atmel_port->pdc_rx[1].dma_addr);
++	atmel_uart_writel(port, ATMEL_PDC_RNCR, PDC_BUFFER_SIZE);
+ 
+ 	return 0;
+ }
+@@ -1666,7 +1667,7 @@ static void atmel_set_ops(struct uart_port *port)
+ static void atmel_get_ip_name(struct uart_port *port)
+ {
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+-	int name = UART_GET_IP_NAME(port);
++	int name = atmel_uart_readl(port, ATMEL_US_NAME);
+ 	u32 version;
+ 	int usart, uart;
+ 	/* usart and uart ascii */
+@@ -1683,7 +1684,7 @@ static void atmel_get_ip_name(struct uart_port *port)
+ 		atmel_port->is_usart = false;
+ 	} else {
+ 		/* fallback for older SoCs: use version field */
+-		version = UART_GET_IP_VERSION(port);
++		version = atmel_uart_readl(port, ATMEL_US_VERSION);
+ 		switch (version) {
+ 		case 0x302:
+ 		case 0x10213:
+@@ -1755,7 +1756,7 @@ static int atmel_startup(struct uart_port *port)
+ 	 * request_irq() is called we could get stuck trying to
+ 	 * handle an unexpected interrupt
+ 	 */
+-	UART_PUT_IDR(port, -1);
++	atmel_uart_writel(port, ATMEL_US_IDR, -1);
+ 	atmel_port->ms_irq_enabled = false;
+ 
+ 	/*
+@@ -1803,9 +1804,9 @@ static int atmel_startup(struct uart_port *port)
+ 	/*
+ 	 * Finally, enable the serial port
+ 	 */
+-	UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+ 	/* enable xmit & rcvr */
+-	UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ 
+ 	setup_timer(&atmel_port->uart_timer,
+ 			atmel_uart_timer_callback,
+@@ -1818,13 +1819,14 @@ static int atmel_startup(struct uart_port *port)
+ 					jiffies + uart_poll_timeout(port));
+ 		/* set USART timeout */
+ 		} else {
+-			UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
+-			UART_PUT_CR(port, ATMEL_US_STTTO);
++			atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
++			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
+ 
+-			UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
++			atmel_uart_writel(port, ATMEL_US_IER,
++					  ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
+ 		}
+ 		/* enable PDC controller */
+-		UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
++		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
+ 	} else if (atmel_use_dma_rx(port)) {
+ 		/* set UART timeout */
+ 		if (!atmel_port->is_usart) {
+@@ -1832,14 +1834,15 @@ static int atmel_startup(struct uart_port *port)
+ 					jiffies + uart_poll_timeout(port));
+ 		/* set USART timeout */
+ 		} else {
+-			UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
+-			UART_PUT_CR(port, ATMEL_US_STTTO);
++			atmel_uart_writel(port, ATMEL_US_RTOR, PDC_RX_TIMEOUT);
++			atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_STTTO);
+ 
+-			UART_PUT_IER(port, ATMEL_US_TIMEOUT);
++			atmel_uart_writel(port, ATMEL_US_IER,
++					  ATMEL_US_TIMEOUT);
+ 		}
+ 	} else {
+ 		/* enable receive only */
+-		UART_PUT_IER(port, ATMEL_US_RXRDY);
++		atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_RXRDY);
+ 	}
+ 
+ 	return 0;
+@@ -1859,7 +1862,7 @@ static void atmel_flush_buffer(struct uart_port *port)
+ 	struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ 
+ 	if (atmel_use_pdc_tx(port)) {
+-		UART_PUT_TCR(port, 0);
++		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
+ 		atmel_port->pdc_tx.ofs = 0;
+ 	}
+ }
+@@ -1891,8 +1894,8 @@ static void atmel_shutdown(struct uart_port *port)
+ 	atmel_stop_rx(port);
+ 	atmel_stop_tx(port);
+ 
+-	UART_PUT_CR(port, ATMEL_US_RSTSTA);
+-	UART_PUT_IDR(port, -1);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
++	atmel_uart_writel(port, ATMEL_US_IDR, -1);
+ 
+ 
+ 	/*
+@@ -1937,12 +1940,12 @@ static void atmel_serial_pm(struct uart_port *port, unsigned int state,
+ 		clk_prepare_enable(atmel_port->clk);
+ 
+ 		/* re-enable interrupts if we disabled some on suspend */
+-		UART_PUT_IER(port, atmel_port->backup_imr);
++		atmel_uart_writel(port, ATMEL_US_IER, atmel_port->backup_imr);
+ 		break;
+ 	case 3:
+ 		/* Back up the interrupt mask and disable all interrupts */
+-		atmel_port->backup_imr = UART_GET_IMR(port);
+-		UART_PUT_IDR(port, -1);
++		atmel_port->backup_imr = atmel_uart_readl(port, ATMEL_US_IMR);
++		atmel_uart_writel(port, ATMEL_US_IDR, -1);
+ 
+ 		/*
+ 		 * Disable the peripheral clock for this serial port.
+@@ -1965,7 +1968,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	unsigned int old_mode, mode, imr, quot, baud;
+ 
+ 	/* save the current mode register */
+-	mode = old_mode = UART_GET_MR(port);
++	mode = old_mode = atmel_uart_readl(port, ATMEL_US_MR);
+ 
+ 	/* reset the mode, clock divisor, parity, stop bits and data size */
+ 	mode &= ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP |
+@@ -2024,7 +2027,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 
+ 	if (atmel_use_pdc_rx(port))
+ 		/* need to enable error interrupts */
+-		UART_PUT_IER(port, port->read_status_mask);
++		atmel_uart_writel(port, ATMEL_US_IER, port->read_status_mask);
+ 
+ 	/*
+ 	 * Characters to ignore
+@@ -2051,15 +2054,16 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	 * transmitter is empty if requested by the caller, so there's
+ 	 * no need to wait for it here.
+ 	 */
+-	imr = UART_GET_IMR(port);
+-	UART_PUT_IDR(port, -1);
++	imr = atmel_uart_readl(port, ATMEL_US_IMR);
++	atmel_uart_writel(port, ATMEL_US_IDR, -1);
+ 
+ 	/* disable receiver and transmitter */
+-	UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS | ATMEL_US_RXDIS);
+ 
+ 	/* mode */
+ 	if (port->rs485.flags & SER_RS485_ENABLED) {
+-		UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
++		atmel_uart_writel(port, ATMEL_US_TTGR,
++				  port->rs485.delay_rts_after_send);
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	} else if (termios->c_cflag & CRTSCTS) {
+ 		/* RS232 with hardware handshake (RTS/CTS) */
+@@ -2070,7 +2074,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	}
+ 
+ 	/* set the mode, clock divisor, parity, stop bits and data size */
+-	UART_PUT_MR(port, mode);
++	atmel_uart_writel(port, ATMEL_US_MR, mode);
+ 
+ 	/*
+ 	 * when switching the mode, set the RTS line state according to the
+@@ -2087,16 +2091,16 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 			rts_state = ATMEL_US_RTSEN;
+ 		}
+ 
+-		UART_PUT_CR(port, rts_state);
++		atmel_uart_writel(port, ATMEL_US_CR, rts_state);
+ 	}
+ 
+ 	/* set the baud rate */
+-	UART_PUT_BRGR(port, quot);
+-	UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+-	UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
++	atmel_uart_writel(port, ATMEL_US_BRGR, quot);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ 
+ 	/* restore interrupts */
+-	UART_PUT_IER(port, imr);
++	atmel_uart_writel(port, ATMEL_US_IER, imr);
+ 
+ 	/* CTS flow-control and modem-status interrupts */
+ 	if (UART_ENABLE_MS(port, termios->c_cflag))
+@@ -2207,18 +2211,18 @@ static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
+ #ifdef CONFIG_CONSOLE_POLL
+ static int atmel_poll_get_char(struct uart_port *port)
+ {
+-	while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
++	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_RXRDY))
+ 		cpu_relax();
+ 
+-	return UART_GET_CHAR(port);
++	return atmel_uart_readl(port, ATMEL_US_RHR);
+ }
+ 
+ static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
+ {
+-	while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
++	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
+ 		cpu_relax();
+ 
+-	UART_PUT_CHAR(port, ch);
++	atmel_uart_writel(port, ATMEL_US_THR, ch);
+ }
+ #endif
+ 
+@@ -2323,9 +2327,9 @@ struct platform_device *atmel_default_console_device;	/* the serial console devi
+ #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
+ static void atmel_console_putchar(struct uart_port *port, int ch)
+ {
+-	while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
++	while (!(atmel_uart_readl(port, ATMEL_US_CSR) & ATMEL_US_TXRDY))
+ 		cpu_relax();
+-	UART_PUT_CHAR(port, ch);
++	atmel_uart_writel(port, ATMEL_US_THR, ch);
+ }
+ 
+ /*
+@@ -2341,12 +2345,13 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
+ 	/*
+ 	 * First, save IMR and then disable interrupts
+ 	 */
+-	imr = UART_GET_IMR(port);
+-	UART_PUT_IDR(port, ATMEL_US_RXRDY | atmel_port->tx_done_mask);
++	imr = atmel_uart_readl(port, ATMEL_US_IMR);
++	atmel_uart_writel(port, ATMEL_US_IDR,
++			  ATMEL_US_RXRDY | atmel_port->tx_done_mask);
+ 
+ 	/* Store PDC transmit status and disable it */
+-	pdc_tx = UART_GET_PTSR(port) & ATMEL_PDC_TXTEN;
+-	UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
++	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
++	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
+ 
+ 	uart_console_write(port, s, count, atmel_console_putchar);
+ 
+@@ -2355,15 +2360,15 @@ static void atmel_console_write(struct console *co, const char *s, u_int count)
+ 	 * and restore IMR
+ 	 */
+ 	do {
+-		status = UART_GET_CSR(port);
++		status = atmel_uart_readl(port, ATMEL_US_CSR);
+ 	} while (!(status & ATMEL_US_TXRDY));
+ 
+ 	/* Restore PDC transmit status */
+ 	if (pdc_tx)
+-		UART_PUT_PTCR(port, ATMEL_PDC_TXTEN);
++		atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
+ 
+ 	/* set interrupts back the way they were */
+-	UART_PUT_IER(port, imr);
++	atmel_uart_writel(port, ATMEL_US_IER, imr);
+ }
+ 
+ /*
+@@ -2379,17 +2384,17 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
+ 	 * If the baud rate generator isn't running, the port wasn't
+ 	 * initialized by the boot loader.
+ 	 */
+-	quot = UART_GET_BRGR(port) & ATMEL_US_CD;
++	quot = atmel_uart_readl(port, ATMEL_US_BRGR) & ATMEL_US_CD;
+ 	if (!quot)
+ 		return;
+ 
+-	mr = UART_GET_MR(port) & ATMEL_US_CHRL;
++	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_CHRL;
+ 	if (mr == ATMEL_US_CHRL_8)
+ 		*bits = 8;
+ 	else
+ 		*bits = 7;
+ 
+-	mr = UART_GET_MR(port) & ATMEL_US_PAR;
++	mr = atmel_uart_readl(port, ATMEL_US_MR) & ATMEL_US_PAR;
+ 	if (mr == ATMEL_US_PAR_EVEN)
+ 		*parity = 'e';
+ 	else if (mr == ATMEL_US_PAR_ODD)
+@@ -2422,9 +2427,9 @@ static int __init atmel_console_setup(struct console *co, char *options)
+ 	if (ret)
+ 		return ret;
+ 
+-	UART_PUT_IDR(port, -1);
+-	UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
+-	UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
++	atmel_uart_writel(port, ATMEL_US_IDR, -1);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
++	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);
+ 
+ 	if (options)
+ 		uart_parse_options(options, &baud, &parity, &bits, &flow);
+@@ -2531,7 +2536,8 @@ static int atmel_serial_suspend(struct platform_device *pdev,
+ 
+ 	if (atmel_is_console_port(port) && console_suspend_enabled) {
+ 		/* Drain the TX shifter */
+-		while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY))
++		while (!(atmel_uart_readl(port, ATMEL_US_CSR) &
++			 ATMEL_US_TXEMPTY))
+ 			cpu_relax();
+ 	}
+ 
+@@ -2683,8 +2689,9 @@ static int atmel_serial_probe(struct platform_device *pdev)
+ 	clk_prepare_enable(port->clk);
+ 
+ 	if (rs485_enabled) {
+-		UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
+-		UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
++		atmel_uart_writel(&port->uart, ATMEL_US_MR,
++				  ATMEL_US_USMODE_NORMAL);
++		atmel_uart_writel(&port->uart, ATMEL_US_CR, ATMEL_US_RTSEN);
+ 	}
+ 
+ 	/*
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index e42cb6bdd31d..10f83076826d 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1701,7 +1701,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+ 		return -ENODEV;
+ 
+ 	if (port->mapbase != 0)
+-		return 0;
++		return -EINVAL;
+ 
+ 	/* setup info for port */
+ 	port->dev	= &platdev->dev;
+@@ -1755,22 +1755,25 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+ 		ourport->dma = devm_kzalloc(port->dev,
+ 					    sizeof(*ourport->dma),
+ 					    GFP_KERNEL);
+-		if (!ourport->dma)
+-			return -ENOMEM;
++		if (!ourport->dma) {
++			ret = -ENOMEM;
++			goto err;
++		}
+ 	}
+ 
+ 	ourport->clk	= clk_get(&platdev->dev, "uart");
+ 	if (IS_ERR(ourport->clk)) {
+ 		pr_err("%s: Controller clock not found\n",
+ 				dev_name(&platdev->dev));
+-		return PTR_ERR(ourport->clk);
++		ret = PTR_ERR(ourport->clk);
++		goto err;
+ 	}
+ 
+ 	ret = clk_prepare_enable(ourport->clk);
+ 	if (ret) {
+ 		pr_err("uart: clock failed to prepare+enable: %d\n", ret);
+ 		clk_put(ourport->clk);
+-		return ret;
++		goto err;
+ 	}
+ 
+ 	/* Keep all interrupts masked and cleared */
+@@ -1786,7 +1789,12 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
+ 
+ 	/* reset the fifos (and setup the uart) */
+ 	s3c24xx_serial_resetport(port, cfg);
++
+ 	return 0;
++
++err:
++	port->mapbase = 0;
++	return ret;
+ }
+ 
+ /* Device driver serial port probe */
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index f28b5375e2c8..d2e50a27140c 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -128,6 +128,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x04f3, 0x016f), .driver_info =
+ 			USB_QUIRK_DEVICE_QUALIFIER },
+ 
++	{ USB_DEVICE(0x04f3, 0x0381), .driver_info =
++			USB_QUIRK_NO_LPM },
++
++	{ USB_DEVICE(0x04f3, 0x21b8), .driver_info =
++			USB_QUIRK_DEVICE_QUALIFIER },
++
+ 	/* Roland SC-8820 */
+ 	{ USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index b886226be241..f4c6e81df034 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1939,6 +1939,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 		return 1;
+ 	}
+ 
++	if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
++		if ((event->status & DEPEVT_STATUS_IOC) &&
++				(trb->ctrl & DWC3_TRB_CTRL_IOC))
++			return 0;
+ 	return 1;
+ }
+ 
+diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
+index fe1d5fc7da2d..47d2c09e4f35 100644
+--- a/drivers/usb/host/ohci-q.c
++++ b/drivers/usb/host/ohci-q.c
+@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
+ {
+ 	int	branch;
+ 
+-	ed->state = ED_OPER;
+ 	ed->ed_prev = NULL;
+ 	ed->ed_next = NULL;
+ 	ed->hwNextED = 0;
+@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
+ 	/* the HC may not see the schedule updates yet, but if it does
+ 	 * then they'll be properly ordered.
+ 	 */
++
++	ed->state = ED_OPER;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index f1893e08e51a..db565f620f82 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -808,20 +808,27 @@ static void xfer_work(struct work_struct *work)
+ {
+ 	struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
+ 	struct usbhs_pipe *pipe = pkt->pipe;
+-	struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
++	struct usbhs_fifo *fifo;
+ 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+ 	struct dma_async_tx_descriptor *desc;
+-	struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
++	struct dma_chan *chan;
+ 	struct device *dev = usbhs_priv_to_dev(priv);
+ 	enum dma_transfer_direction dir;
++	unsigned long flags;
+ 
++	usbhs_lock(priv, flags);
++	fifo = usbhs_pipe_to_fifo(pipe);
++	if (!fifo)
++		goto xfer_work_end;
++
++	chan = usbhsf_dma_chan_get(fifo, pkt);
+ 	dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+ 
+ 	desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
+ 					pkt->trans, dir,
+ 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ 	if (!desc)
+-		return;
++		goto xfer_work_end;
+ 
+ 	desc->callback		= usbhsf_dma_complete;
+ 	desc->callback_param	= pipe;
+@@ -829,7 +836,7 @@ static void xfer_work(struct work_struct *work)
+ 	pkt->cookie = dmaengine_submit(desc);
+ 	if (pkt->cookie < 0) {
+ 		dev_err(dev, "Failed to submit dma descriptor\n");
+-		return;
++		goto xfer_work_end;
+ 	}
+ 
+ 	dev_dbg(dev, "  %s %d (%d/ %d)\n",
+@@ -840,6 +847,9 @@ static void xfer_work(struct work_struct *work)
+ 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
+ 	dma_async_issue_pending(chan);
+ 	usbhs_pipe_enable(pipe);
++
++xfer_work_end:
++	usbhs_unlock(priv, flags);
+ }
+ 
+ /*
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index 046529656465..74af77a022a8 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -582,6 +582,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
+ 	struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
+ 	struct usbhs_pipe *pipe;
+ 	int ret = -EIO;
++	unsigned long flags;
++
++	usbhs_lock(priv, flags);
+ 
+ 	/*
+ 	 * if it already have pipe,
+@@ -590,7 +593,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
+ 	if (uep->pipe) {
+ 		usbhs_pipe_clear(uep->pipe);
+ 		usbhs_pipe_sequence_data0(uep->pipe);
+-		return 0;
++		ret = 0;
++		goto usbhsg_ep_enable_end;
+ 	}
+ 
+ 	pipe = usbhs_pipe_malloc(priv,
+@@ -618,6 +622,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
+ 		ret = 0;
+ 	}
+ 
++usbhsg_ep_enable_end:
++	usbhs_unlock(priv, flags);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index f00919d579e0..2a7bf26c68e6 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -276,6 +276,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+ #define TELIT_PRODUCT_LE920			0x1200
+ #define TELIT_PRODUCT_LE910			0x1201
++#define TELIT_PRODUCT_LE910_USBCFG4		0x1206
+ 
+ /* ZTE PRODUCTS */
+ #define ZTE_VENDOR_ID				0x19d2
+@@ -1210,6 +1211,8 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+ 		.driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 89bac470f04e..56f7e2521202 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -73,7 +73,7 @@ struct virtio_balloon {
+ 
+ 	/* The array of pfns we tell the Host about. */
+ 	unsigned int num_pfns;
+-	u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
++	__virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
+ 
+ 	/* Memory statistics */
+ 	int need_stats_update;
+@@ -125,14 +125,16 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
+ 	wait_event(vb->acked, virtqueue_get_buf(vq, &len));
+ }
+ 
+-static void set_page_pfns(u32 pfns[], struct page *page)
++static void set_page_pfns(struct virtio_balloon *vb,
++			  __virtio32 pfns[], struct page *page)
+ {
+ 	unsigned int i;
+ 
+ 	/* Set balloon pfns pointing at this page.
+ 	 * Note that the first pfn points at start of the page. */
+ 	for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
+-		pfns[i] = page_to_balloon_pfn(page) + i;
++		pfns[i] = cpu_to_virtio32(vb->vdev,
++					  page_to_balloon_pfn(page) + i);
+ }
+ 
+ static void fill_balloon(struct virtio_balloon *vb, size_t num)
+@@ -155,9 +157,11 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
+ 			msleep(200);
+ 			break;
+ 		}
+-		set_page_pfns(vb->pfns + vb->num_pfns, page);
++		set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
+ 		vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
+-		adjust_managed_page_count(page, -1);
++		if (!virtio_has_feature(vb->vdev,
++					VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
++			adjust_managed_page_count(page, -1);
+ 	}
+ 
+ 	/* Did we get any? */
+@@ -169,11 +173,15 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
+ static void release_pages_balloon(struct virtio_balloon *vb)
+ {
+ 	unsigned int i;
++	struct page *page;
+ 
+ 	/* Find pfns pointing at start of each page, get pages and free them. */
+ 	for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+-		struct page *page = balloon_pfn_to_page(vb->pfns[i]);
+-		adjust_managed_page_count(page, 1);
++		page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
++							   vb->pfns[i]));
++		if (!virtio_has_feature(vb->vdev,
++					VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
++			adjust_managed_page_count(page, 1);
+ 		put_page(page); /* balloon reference */
+ 	}
+ }
+@@ -188,12 +196,14 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
+ 	num = min(num, ARRAY_SIZE(vb->pfns));
+ 
+ 	mutex_lock(&vb->balloon_lock);
++	/* We can't release more pages than taken */
++	num = min(num, (size_t)vb->num_pages);
+ 	for (vb->num_pfns = 0; vb->num_pfns < num;
+ 	     vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+ 		page = balloon_page_dequeue(vb_dev_info);
+ 		if (!page)
+ 			break;
+-		set_page_pfns(vb->pfns + vb->num_pfns, page);
++		set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
+ 		vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
+ 	}
+ 
+@@ -461,13 +471,13 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
+ 	__count_vm_event(BALLOON_MIGRATE);
+ 	spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
+ 	vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
+-	set_page_pfns(vb->pfns, newpage);
++	set_page_pfns(vb, vb->pfns, newpage);
+ 	tell_host(vb, vb->inflate_vq);
+ 
+ 	/* balloon's page migration 2nd step -- deflate "page" */
+ 	balloon_page_delete(page);
+ 	vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
+-	set_page_pfns(vb->pfns, page);
++	set_page_pfns(vb, vb->pfns, page);
+ 	tell_host(vb, vb->deflate_vq);
+ 
+ 	mutex_unlock(&vb->balloon_lock);
+diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
+index 3182273a3407..1418daa03d95 100644
+--- a/fs/cifs/cifs_fs_sb.h
++++ b/fs/cifs/cifs_fs_sb.h
+@@ -46,6 +46,9 @@
+ #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
+ #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
+ #define CIFS_MOUNT_MAP_SFM_CHR	0x800000 /* SFM/MAC mapping for illegal chars */
++#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
++					      * root mountable
++					      */
+ 
+ struct cifs_sb_info {
+ 	struct rb_root tlink_tree;
+@@ -67,5 +70,6 @@ struct cifs_sb_info {
+ 	struct backing_dev_info bdi;
+ 	struct delayed_work prune_tlinks;
+ 	struct rcu_head rcu;
++	char *prepath;
+ };
+ #endif				/* _CIFS_FS_SB_H */
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index e682b36a210f..4acbc390a7d6 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -731,24 +731,26 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 
+ 	memcpy(ses->auth_key.response + baselen, tiblob, tilen);
+ 
++	mutex_lock(&ses->server->srv_mutex);
++
+ 	rc = crypto_hmacmd5_alloc(ses->server);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	/* calculate ntlmv2_hash */
+ 	rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	/* calculate first part of the client response (CR1) */
+ 	rc = CalcNTLMv2_response(ses, ntlmv2_hash);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	/* now calculate the session key for NTLMv2 */
+@@ -757,13 +759,13 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	if (rc) {
+ 		cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
+ 			 __func__);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
+@@ -771,7 +773,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 		CIFS_HMAC_MD5_HASH_SIZE);
+ 	if (rc) {
+ 		cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
+-		goto setup_ntlmv2_rsp_ret;
++		goto unlock;
+ 	}
+ 
+ 	rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
+@@ -779,6 +781,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	if (rc)
+ 		cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+ 
++unlock:
++	mutex_unlock(&ses->server->srv_mutex);
+ setup_ntlmv2_rsp_ret:
+ 	kfree(tiblob);
+ 
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index fe24e22fc154..9f205a6159d3 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -679,6 +679,14 @@ cifs_do_mount(struct file_system_type *fs_type,
+ 		goto out_cifs_sb;
+ 	}
+ 
++	if (volume_info->prepath) {
++		cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL);
++		if (cifs_sb->prepath == NULL) {
++			root = ERR_PTR(-ENOMEM);
++			goto out_cifs_sb;
++		}
++	}
++
+ 	cifs_setup_cifs_sb(volume_info, cifs_sb);
+ 
+ 	rc = cifs_mount(cifs_sb, volume_info);
+@@ -717,7 +725,11 @@ cifs_do_mount(struct file_system_type *fs_type,
+ 		sb->s_flags |= MS_ACTIVE;
+ 	}
+ 
+-	root = cifs_get_root(volume_info, sb);
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++		root = dget(sb->s_root);
++	else
++		root = cifs_get_root(volume_info, sb);
++
+ 	if (IS_ERR(root))
+ 		goto out_super;
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 17998d19b166..cd9d50e4f5f4 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -3436,6 +3436,44 @@ cifs_get_volume_info(char *mount_data, const char *devname)
+ 	return volume_info;
+ }
+ 
++static int
++cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
++					unsigned int xid,
++					struct cifs_tcon *tcon,
++					struct cifs_sb_info *cifs_sb,
++					char *full_path)
++{
++	int rc;
++	char *s;
++	char sep, tmp;
++
++	sep = CIFS_DIR_SEP(cifs_sb);
++	s = full_path;
++
++	rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
++	while (rc == 0) {
++		/* skip separators */
++		while (*s == sep)
++			s++;
++		if (!*s)
++			break;
++		/* next separator */
++		while (*s && *s != sep)
++			s++;
++
++		/*
++		 * temporarily null-terminate the path at the end of
++		 * the current component
++		 */
++		tmp = *s;
++		*s = 0;
++		rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
++						     full_path);
++		*s = tmp;
++	}
++	return rc;
++}
++
+ int
+ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
+ {
+@@ -3562,6 +3600,16 @@ remote_path_check:
+ 			kfree(full_path);
+ 			goto mount_fail_check;
+ 		}
++
++		rc = cifs_are_all_path_components_accessible(server,
++							     xid, tcon, cifs_sb,
++							     full_path);
++		if (rc != 0) {
++			cifs_dbg(VFS, "cannot query dirs between root and final path, "
++				 "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
++			cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
++			rc = 0;
++		}
+ 		kfree(full_path);
+ 	}
+ 
+@@ -3831,6 +3879,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
+ 
+ 	bdi_destroy(&cifs_sb->bdi);
+ 	kfree(cifs_sb->mountdata);
++	kfree(cifs_sb->prepath);
+ 	call_rcu(&cifs_sb->rcu, delayed_free);
+ }
+ 
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index c3eb998a99bd..26a3b389a265 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -84,6 +84,7 @@ build_path_from_dentry(struct dentry *direntry)
+ 	struct dentry *temp;
+ 	int namelen;
+ 	int dfsplen;
++	int pplen = 0;
+ 	char *full_path;
+ 	char dirsep;
+ 	struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+@@ -95,8 +96,12 @@ build_path_from_dentry(struct dentry *direntry)
+ 		dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
+ 	else
+ 		dfsplen = 0;
++
++	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++		pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
++
+ cifs_bp_rename_retry:
+-	namelen = dfsplen;
++	namelen = dfsplen + pplen;
+ 	seq = read_seqbegin(&rename_lock);
+ 	rcu_read_lock();
+ 	for (temp = direntry; !IS_ROOT(temp);) {
+@@ -137,7 +142,7 @@ cifs_bp_rename_retry:
+ 		}
+ 	}
+ 	rcu_read_unlock();
+-	if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
++	if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) {
+ 		cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n",
+ 			 namelen, dfsplen);
+ 		/* presumably this is only possible if racing with a rename
+@@ -153,6 +158,17 @@ cifs_bp_rename_retry:
+ 	   those safely to '/' if any are found in the middle of the prepath */
+ 	/* BB test paths to Windows with '/' in the midst of prepath */
+ 
++	if (pplen) {
++		int i;
++
++		cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
++		memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
++		full_path[dfsplen] = '\\';
++		for (i = 0; i < pplen-1; i++)
++			if (full_path[dfsplen+1+i] == '/')
++				full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
++	}
++
+ 	if (dfsplen) {
+ 		strncpy(full_path, tcon->treeName, dfsplen);
+ 		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
+@@ -229,6 +245,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
+ 				goto cifs_create_get_file_info;
+ 			}
+ 
++			if (S_ISDIR(newinode->i_mode)) {
++				CIFSSMBClose(xid, tcon, fid->netfid);
++				iput(newinode);
++				rc = -EISDIR;
++				goto out;
++			}
++
+ 			if (!S_ISREG(newinode->i_mode)) {
+ 				/*
+ 				 * The server may allow us to open things like
+@@ -399,10 +422,14 @@ cifs_create_set_dentry:
+ 	if (rc != 0) {
+ 		cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
+ 			 rc);
+-		if (server->ops->close)
+-			server->ops->close(xid, tcon, fid);
+-		goto out;
++		goto out_err;
+ 	}
++
++	if (S_ISDIR(newinode->i_mode)) {
++		rc = -EISDIR;
++		goto out_err;
++	}
++
+ 	d_drop(direntry);
+ 	d_add(direntry, newinode);
+ 
+@@ -410,6 +437,13 @@ out:
+ 	kfree(buf);
+ 	kfree(full_path);
+ 	return rc;
++
++out_err:
++	if (server->ops->close)
++		server->ops->close(xid, tcon, fid);
++	if (newinode)
++		iput(newinode);
++	goto out;
+ }
+ 
+ int
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index 6b66dd5d1540..9fb3bc26a2ab 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -982,10 +982,26 @@ struct inode *cifs_root_iget(struct super_block *sb)
+ 	struct inode *inode = NULL;
+ 	long rc;
+ 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
++	char *path = NULL;
++	int len;
++
++	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
++	    && cifs_sb->prepath) {
++		len = strlen(cifs_sb->prepath);
++		path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
++		if (path == NULL)
++			return ERR_PTR(-ENOMEM);
++		path[0] = '/';
++		memcpy(path+1, cifs_sb->prepath, len);
++	} else {
++		path = kstrdup("", GFP_KERNEL);
++		if (path == NULL)
++			return ERR_PTR(-ENOMEM);
++	}
+ 
+ 	xid = get_xid();
+ 	if (tcon->unix_ext) {
+-		rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
++		rc = cifs_get_inode_info_unix(&inode, path, sb, xid);
+ 		/* some servers mistakenly claim POSIX support */
+ 		if (rc != -EOPNOTSUPP)
+ 			goto iget_no_retry;
+@@ -993,7 +1009,8 @@ struct inode *cifs_root_iget(struct super_block *sb)
+ 		tcon->unix_ext = false;
+ 	}
+ 
+-	rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
++	convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
++	rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL);
+ 
+ iget_no_retry:
+ 	if (!inode) {
+@@ -1022,6 +1039,7 @@ iget_no_retry:
+ 	}
+ 
+ out:
++	kfree(path);
+ 	/* can not call macro free_xid here since in a void func
+ 	 * TODO: This is no longer true
+ 	 */
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 1678b9cb94c7..57aeae6116d6 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -973,6 +973,9 @@ smb2_new_lease_key(struct cifs_fid *fid)
+ 	get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
+ }
+ 
++#define SMB2_SYMLINK_STRUCT_SIZE \
++	(sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
++
+ static int
+ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		   const char *full_path, char **target_path,
+@@ -985,7 +988,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 	struct cifs_fid fid;
+ 	struct smb2_err_rsp *err_buf = NULL;
+ 	struct smb2_symlink_err_rsp *symlink;
+-	unsigned int sub_len, sub_offset;
++	unsigned int sub_len;
++	unsigned int sub_offset;
++	unsigned int print_len;
++	unsigned int print_offset;
+ 
+ 	cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
+ 
+@@ -1006,11 +1012,33 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		kfree(utf16_path);
+ 		return -ENOENT;
+ 	}
++
++	if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
++	    get_rfc1002_length(err_buf) + 4 < SMB2_SYMLINK_STRUCT_SIZE) {
++		kfree(utf16_path);
++		return -ENOENT;
++	}
++
+ 	/* open must fail on symlink - reset rc */
+ 	rc = 0;
+ 	symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
+ 	sub_len = le16_to_cpu(symlink->SubstituteNameLength);
+ 	sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
++	print_len = le16_to_cpu(symlink->PrintNameLength);
++	print_offset = le16_to_cpu(symlink->PrintNameOffset);
++
++	if (get_rfc1002_length(err_buf) + 4 <
++			SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
++		kfree(utf16_path);
++		return -ENOENT;
++	}
++
++	if (get_rfc1002_length(err_buf) + 4 <
++			SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
++		kfree(utf16_path);
++		return -ENOENT;
++	}
++
+ 	*target_path = cifs_strndup_from_utf16(
+ 				(char *)symlink->PathBuffer + sub_offset,
+ 				sub_len, true, cifs_sb->local_nls);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 2c75b393d31a..660857431b1c 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -578,7 +578,6 @@ static struct dentry *dentry_kill(struct dentry *dentry)
+ 
+ failed:
+ 	spin_unlock(&dentry->d_lock);
+-	cpu_relax();
+ 	return dentry; /* try again with same dentry */
+ }
+ 
+@@ -752,6 +751,8 @@ void dput(struct dentry *dentry)
+ 		return;
+ 
+ repeat:
++	might_sleep();
++
+ 	rcu_read_lock();
+ 	if (likely(fast_dput(dentry))) {
+ 		rcu_read_unlock();
+@@ -783,8 +784,10 @@ repeat:
+ 
+ kill_it:
+ 	dentry = dentry_kill(dentry);
+-	if (dentry)
++	if (dentry) {
++		cond_resched();
+ 		goto repeat;
++	}
+ }
+ EXPORT_SYMBOL(dput);
+ 
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index 955bf49a7945..41117e51a2e9 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -208,6 +208,9 @@ static int ext4_init_block_bitmap(struct super_block *sb,
+ 	memset(bh->b_data, 0, sb->s_blocksize);
+ 
+ 	bit_max = ext4_num_base_meta_clusters(sb, block_group);
++	if ((bit_max >> 3) >= bh->b_size)
++		return -EIO;
++
+ 	for (bit = 0; bit < bit_max; bit++)
+ 		ext4_set_bit(bit, bh->b_data);
+ 
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index ea12f565be24..a3276bf9ac00 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -375,9 +375,13 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
+ 	ext4_fsblk_t block = ext4_ext_pblock(ext);
+ 	int len = ext4_ext_get_actual_len(ext);
+ 	ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
+-	ext4_lblk_t last = lblock + len - 1;
+ 
+-	if (len == 0 || lblock > last)
++	/*
++	 * We allow neither:
++	 *  - zero length
++	 *  - overflow/wrap-around
++	 */
++	if (lblock + len <= lblock)
+ 		return 0;
+ 	return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
+ }
+@@ -468,6 +472,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
+ 		error_msg = "invalid extent entries";
+ 		goto corrupted;
+ 	}
++	if (unlikely(depth > 32)) {
++		error_msg = "too large eh_depth";
++		goto corrupted;
++	}
+ 	/* Verify checksum on non-root extent tree nodes */
+ 	if (ext_depth(inode) != depth &&
+ 	    !ext4_extent_block_csum_verify(inode, eh)) {
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index ba12e2953aec..9b55c6f71bf2 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -204,9 +204,9 @@ void ext4_evict_inode(struct inode *inode)
+ 		 * Note that directories do not have this problem because they
+ 		 * don't use page cache.
+ 		 */
+-		if (ext4_should_journal_data(inode) &&
+-		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
+-		    inode->i_ino != EXT4_JOURNAL_INO) {
++		if (inode->i_ino != EXT4_JOURNAL_INO &&
++		    ext4_should_journal_data(inode) &&
++		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
+ 			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
+ 			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
+ 
+@@ -2557,13 +2557,36 @@ retry:
+ 				done = true;
+ 			}
+ 		}
+-		ext4_journal_stop(handle);
++		/*
++		 * Caution: If the handle is synchronous,
++		 * ext4_journal_stop() can wait for transaction commit
++		 * to finish which may depend on writeback of pages to
++		 * complete or on page lock to be released.  In that
++		 * case, we have to wait until after after we have
++		 * submitted all the IO, released page locks we hold,
++		 * and dropped io_end reference (for extent conversion
++		 * to be able to complete) before stopping the handle.
++		 */
++		if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
++			ext4_journal_stop(handle);
++			handle = NULL;
++		}
+ 		/* Submit prepared bio */
+ 		ext4_io_submit(&mpd.io_submit);
+ 		/* Unlock pages we didn't use */
+ 		mpage_release_unused_pages(&mpd, give_up_on_write);
+-		/* Drop our io_end reference we got from init */
+-		ext4_put_io_end(mpd.io_submit.io_end);
++		/*
++		 * Drop our io_end reference we got from init. We have
++		 * to be careful and use deferred io_end finishing if
++		 * we are still holding the transaction as we can
++		 * release the last reference to io_end which may end
++		 * up doing unwritten extent conversion.
++		 */
++		if (handle) {
++			ext4_put_io_end_defer(mpd.io_submit.io_end);
++			ext4_journal_stop(handle);
++		} else
++			ext4_put_io_end(mpd.io_submit.io_end);
+ 
+ 		if (ret == -ENOSPC && sbi->s_journal) {
+ 			/*
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index aedff7963468..f01dd19adb7b 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2229,6 +2229,16 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ 	while (es->s_last_orphan) {
+ 		struct inode *inode;
+ 
++		/*
++		 * We may have encountered an error during cleanup; if
++		 * so, skip the rest.
++		 */
++		if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
++			jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
++			es->s_last_orphan = 0;
++			break;
++		}
++
+ 		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
+ 		if (IS_ERR(inode)) {
+ 			es->s_last_orphan = 0;
+@@ -3711,6 +3721,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ 		goto failed_mount;
+ 	}
+ 
++	if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
++		ext4_msg(sb, KERN_ERR,
++			 "Number of reserved GDT blocks insanely large: %d",
++			 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
++		goto failed_mount;
++	}
++
+ 	if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
+ 		if (blocksize != PAGE_SIZE) {
+ 			ext4_msg(sb, KERN_ERR,
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index 3227091c2a64..5cae35490b37 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -417,6 +417,15 @@ static int fuse_flush(struct file *file, fl_owner_t id)
+ 	fuse_sync_writes(inode);
+ 	mutex_unlock(&inode->i_mutex);
+ 
++	if (test_bit(AS_ENOSPC, &file->f_mapping->flags) &&
++	    test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags))
++		err = -ENOSPC;
++	if (test_bit(AS_EIO, &file->f_mapping->flags) &&
++	    test_and_clear_bit(AS_EIO, &file->f_mapping->flags))
++		err = -EIO;
++	if (err)
++		return err;
++
+ 	req = fuse_get_req_nofail_nopages(fc, file);
+ 	memset(&inarg, 0, sizeof(inarg));
+ 	inarg.fh = ff->fh;
+@@ -462,6 +471,21 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
+ 		goto out;
+ 
+ 	fuse_sync_writes(inode);
++
++	/*
++	 * Due to implementation of fuse writeback
++	 * filemap_write_and_wait_range() does not catch errors.
++	 * We have to do this directly after fuse_sync_writes()
++	 */
++	if (test_bit(AS_ENOSPC, &file->f_mapping->flags) &&
++	    test_and_clear_bit(AS_ENOSPC, &file->f_mapping->flags))
++		err = -ENOSPC;
++	if (test_bit(AS_EIO, &file->f_mapping->flags) &&
++	    test_and_clear_bit(AS_EIO, &file->f_mapping->flags))
++		err = -EIO;
++	if (err)
++		goto out;
++
+ 	err = sync_inode_metadata(inode, 1);
+ 	if (err)
+ 		goto out;
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 708d697113fc..846bb19d605a 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -910,7 +910,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
+ 	arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
+ 		FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
+ 		FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
+-		FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
++		FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
+ 		FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
+ 		FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
+ 	req->in.h.opcode = FUSE_INIT;
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index f98cd9adbc0d..51af4fff890f 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1241,6 +1241,9 @@ int nfs_updatepage(struct file *file, struct page *page,
+ 	dprintk("NFS:       nfs_updatepage(%pD2 %d@%lld)\n",
+ 		file, count, (long long)(page_file_offset(page) + offset));
+ 
++	if (!count)
++		goto out;
++
+ 	if (nfs_can_extend_write(file, page, inode)) {
+ 		count = max(count + offset, nfs_page_length(page));
+ 		offset = 0;
+@@ -1251,7 +1254,7 @@ int nfs_updatepage(struct file *file, struct page *page,
+ 		nfs_set_pageerror(page);
+ 	else
+ 		__set_page_dirty_nobuffers(page);
+-
++out:
+ 	dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
+ 			status, (long long)i_size_read(inode));
+ 	return status;
+diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
+index d54701f6dc78..076a9c096a6c 100644
+--- a/fs/nfsd/nfs2acl.c
++++ b/fs/nfsd/nfs2acl.c
+@@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
+ 		goto out;
+ 
+ 	inode = d_inode(fh->fh_dentry);
+-	if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
+-		error = -EOPNOTSUPP;
+-		goto out_errno;
+-	}
+ 
+ 	error = fh_want_write(fh);
+ 	if (error)
+ 		goto out_errno;
+ 
+-	error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
++	fh_lock(fh);
++
++	error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
+ 	if (error)
+-		goto out_drop_write;
+-	error = inode->i_op->set_acl(inode, argp->acl_default,
+-				     ACL_TYPE_DEFAULT);
++		goto out_drop_lock;
++	error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
+ 	if (error)
+-		goto out_drop_write;
++		goto out_drop_lock;
++
++	fh_unlock(fh);
+ 
+ 	fh_drop_write(fh);
+ 
+@@ -131,7 +130,8 @@ out:
+ 	posix_acl_release(argp->acl_access);
+ 	posix_acl_release(argp->acl_default);
+ 	return nfserr;
+-out_drop_write:
++out_drop_lock:
++	fh_unlock(fh);
+ 	fh_drop_write(fh);
+ out_errno:
+ 	nfserr = nfserrno(error);
+diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
+index 882b1a14bc3e..3911aa07a95c 100644
+--- a/fs/nfsd/nfs3acl.c
++++ b/fs/nfsd/nfs3acl.c
+@@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
+ 		goto out;
+ 
+ 	inode = d_inode(fh->fh_dentry);
+-	if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
+-		error = -EOPNOTSUPP;
+-		goto out_errno;
+-	}
+ 
+ 	error = fh_want_write(fh);
+ 	if (error)
+ 		goto out_errno;
+ 
+-	error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
++	fh_lock(fh);
++
++	error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
+ 	if (error)
+-		goto out_drop_write;
+-	error = inode->i_op->set_acl(inode, argp->acl_default,
+-				     ACL_TYPE_DEFAULT);
++		goto out_drop_lock;
++	error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
+ 
+-out_drop_write:
++out_drop_lock:
++	fh_unlock(fh);
+ 	fh_drop_write(fh);
+ out_errno:
+ 	nfserr = nfserrno(error);
+diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
+index 67242bf7c6cc..7de3b41b43b6 100644
+--- a/fs/nfsd/nfs4acl.c
++++ b/fs/nfsd/nfs4acl.c
+@@ -782,9 +782,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	dentry = fhp->fh_dentry;
+ 	inode = d_inode(dentry);
+ 
+-	if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
+-		return nfserr_attrnotsupp;
+-
+ 	if (S_ISDIR(inode->i_mode))
+ 		flags = NFS4_ACL_DIR;
+ 
+@@ -794,16 +791,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ 	if (host_error < 0)
+ 		goto out_nfserr;
+ 
+-	host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
++	fh_lock(fhp);
++
++	host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
+ 	if (host_error < 0)
+-		goto out_release;
++		goto out_drop_lock;
+ 
+ 	if (S_ISDIR(inode->i_mode)) {
+-		host_error = inode->i_op->set_acl(inode, dpacl,
+-						  ACL_TYPE_DEFAULT);
++		host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
+ 	}
+ 
+-out_release:
++out_drop_lock:
++	fh_unlock(fhp);
++
+ 	posix_acl_release(pacl);
+ 	posix_acl_release(dpacl);
+ out_nfserr:
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 977236a46aa2..ba5ef733951f 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -590,21 +590,25 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
+ {
+ 	struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
+ 	struct inode *dir = upperdir->d_inode;
+-	struct dentry *upper = ovl_dentry_upper(dentry);
++	struct dentry *upper;
+ 	int err;
+ 
+ 	mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
++	upper = lookup_one_len(dentry->d_name.name, upperdir,
++			       dentry->d_name.len);
++	err = PTR_ERR(upper);
++	if (IS_ERR(upper))
++		goto out_unlock;
++
+ 	err = -ESTALE;
+-	if (upper->d_parent == upperdir) {
+-		/* Don't let d_delete() think it can reset d_inode */
+-		dget(upper);
++	if (upper == ovl_dentry_upper(dentry)) {
+ 		if (is_dir)
+ 			err = vfs_rmdir(dir, upper);
+ 		else
+ 			err = vfs_unlink(dir, upper, NULL);
+-		dput(upper);
+ 		ovl_dentry_version_inc(dentry->d_parent);
+ 	}
++	dput(upper);
+ 
+ 	/*
+ 	 * Keeping this dentry hashed would mean having to release
+@@ -614,6 +618,7 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
+ 	 */
+ 	if (!err)
+ 		d_drop(dentry);
++out_unlock:
+ 	mutex_unlock(&dir->i_mutex);
+ 
+ 	return err;
+@@ -834,29 +839,39 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
+ 
+ 	trap = lock_rename(new_upperdir, old_upperdir);
+ 
+-	olddentry = ovl_dentry_upper(old);
+-	newdentry = ovl_dentry_upper(new);
+-	if (newdentry) {
++
++	olddentry = lookup_one_len(old->d_name.name, old_upperdir,
++				   old->d_name.len);
++	err = PTR_ERR(olddentry);
++	if (IS_ERR(olddentry))
++		goto out_unlock;
++
++	err = -ESTALE;
++	if (olddentry != ovl_dentry_upper(old))
++		goto out_dput_old;
++
++	newdentry = lookup_one_len(new->d_name.name, new_upperdir,
++				   new->d_name.len);
++	err = PTR_ERR(newdentry);
++	if (IS_ERR(newdentry))
++		goto out_dput_old;
++
++	err = -ESTALE;
++	if (ovl_dentry_upper(new)) {
+ 		if (opaquedir) {
+-			newdentry = opaquedir;
+-			opaquedir = NULL;
++			if (newdentry != opaquedir)
++				goto out_dput;
+ 		} else {
+-			dget(newdentry);
++			if (newdentry != ovl_dentry_upper(new))
++				goto out_dput;
+ 		}
+ 	} else {
+ 		new_create = true;
+-		newdentry = lookup_one_len(new->d_name.name, new_upperdir,
+-					   new->d_name.len);
+-		err = PTR_ERR(newdentry);
+-		if (IS_ERR(newdentry))
+-			goto out_unlock;
++		if (!d_is_negative(newdentry) &&
++		    (!new_opaque || !ovl_is_whiteout(newdentry)))
++			goto out_dput;
+ 	}
+ 
+-	err = -ESTALE;
+-	if (olddentry->d_parent != old_upperdir)
+-		goto out_dput;
+-	if (newdentry->d_parent != new_upperdir)
+-		goto out_dput;
+ 	if (olddentry == trap)
+ 		goto out_dput;
+ 	if (newdentry == trap)
+@@ -919,6 +934,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
+ 
+ out_dput:
+ 	dput(newdentry);
++out_dput_old:
++	dput(olddentry);
+ out_unlock:
+ 	unlock_rename(new_upperdir, old_upperdir);
+ out_revert_creds:
+diff --git a/fs/posix_acl.c b/fs/posix_acl.c
+index 84bb65b83570..a9dafa83678c 100644
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -791,38 +791,42 @@ posix_acl_xattr_get(struct dentry *dentry, const char *name,
+ 	return error;
+ }
+ 
+-static int
+-posix_acl_xattr_set(struct dentry *dentry, const char *name,
+-		const void *value, size_t size, int flags, int type)
++int
++set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
+ {
+-	struct inode *inode = d_backing_inode(dentry);
+-	struct posix_acl *acl = NULL;
+-	int ret;
+-
+ 	if (!IS_POSIXACL(inode))
+ 		return -EOPNOTSUPP;
+ 	if (!inode->i_op->set_acl)
+ 		return -EOPNOTSUPP;
+ 
+ 	if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+-		return value ? -EACCES : 0;
++		return acl ? -EACCES : 0;
+ 	if (!inode_owner_or_capable(inode))
+ 		return -EPERM;
+ 
++	if (acl) {
++		int ret = posix_acl_valid(acl);
++		if (ret)
++			return ret;
++	}
++	return inode->i_op->set_acl(inode, acl, type);
++}
++EXPORT_SYMBOL(set_posix_acl);
++
++static int
++posix_acl_xattr_set(struct dentry *dentry, const char *name,
++		const void *value, size_t size, int flags, int type)
++{
++	struct inode *inode = d_backing_inode(dentry);
++	struct posix_acl *acl = NULL;
++	int ret;
++
+ 	if (value) {
+ 		acl = posix_acl_from_xattr(&init_user_ns, value, size);
+ 		if (IS_ERR(acl))
+ 			return PTR_ERR(acl);
+-
+-		if (acl) {
+-			ret = posix_acl_valid(acl);
+-			if (ret)
+-				goto out;
+-		}
+ 	}
+-
+-	ret = inode->i_op->set_acl(inode, acl, type);
+-out:
++	ret = set_posix_acl(inode, type, acl);
+ 	posix_acl_release(acl);
+ 	return ret;
+ }
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 6dee68d013ff..32287fb146db 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1257,7 +1257,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
+ 	if (!pm.buffer)
+ 		goto out_task;
+ 
+-	mm = mm_access(task, PTRACE_MODE_READ);
++	mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
+ 	ret = PTR_ERR(mm);
+ 	if (!mm || IS_ERR(mm))
+ 		goto out_free;
+diff --git a/include/linux/i8042.h b/include/linux/i8042.h
+index 0f9bafa17a02..d98780ca9604 100644
+--- a/include/linux/i8042.h
++++ b/include/linux/i8042.h
+@@ -62,7 +62,6 @@ struct serio;
+ void i8042_lock_chip(void);
+ void i8042_unlock_chip(void);
+ int i8042_command(unsigned char *param, int command);
+-bool i8042_check_port_owner(const struct serio *);
+ int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ 					struct serio *serio));
+ int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
+@@ -83,11 +82,6 @@ static inline int i8042_command(unsigned char *param, int command)
+ 	return -ENODEV;
+ }
+ 
+-static inline bool i8042_check_port_owner(const struct serio *serio)
+-{
+-	return false;
+-}
+-
+ static inline int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
+ 					struct serio *serio))
+ {
+diff --git a/include/linux/serio.h b/include/linux/serio.h
+index 9f779c7a2da4..27ae809edd70 100644
+--- a/include/linux/serio.h
++++ b/include/linux/serio.h
+@@ -29,7 +29,8 @@ struct serio {
+ 
+ 	struct serio_device_id id;
+ 
+-	spinlock_t lock;		/* protects critical sections from port's interrupt handler */
++	/* Protects critical sections from port's interrupt handler */
++	spinlock_t lock;
+ 
+ 	int (*write)(struct serio *, unsigned char);
+ 	int (*open)(struct serio *);
+@@ -38,16 +39,29 @@ struct serio {
+ 	void (*stop)(struct serio *);
+ 
+ 	struct serio *parent;
+-	struct list_head child_node;	/* Entry in parent->children list */
++	/* Entry in parent->children list */
++	struct list_head child_node;
+ 	struct list_head children;
+-	unsigned int depth;		/* level of nesting in serio hierarchy */
++	/* Level of nesting in serio hierarchy */
++	unsigned int depth;
+ 
+-	struct serio_driver *drv;	/* accessed from interrupt, must be protected by serio->lock and serio->sem */
+-	struct mutex drv_mutex;		/* protects serio->drv so attributes can pin driver */
++	/*
++	 * serio->drv is accessed from interrupt handlers; when modifying
++	 * caller should acquire serio->drv_mutex and serio->lock.
++	 */
++	struct serio_driver *drv;
++	/* Protects serio->drv so attributes can pin current driver */
++	struct mutex drv_mutex;
+ 
+ 	struct device dev;
+ 
+ 	struct list_head node;
++
++	/*
++	 * For use by PS/2 layer when several ports share hardware and
++	 * may get indigestion when exposed to concurrent access (i8042).
++	 */
++	struct mutex *ps2_cmd_mutex;
+ };
+ #define to_serio_port(d)	container_of(d, struct serio, dev)
+ 
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index 003dca933803..5664ca07c9c7 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -529,20 +529,27 @@ TRACE_EVENT(svc_xprt_do_enqueue,
+ 
+ 	TP_STRUCT__entry(
+ 		__field(struct svc_xprt *, xprt)
+-		__field_struct(struct sockaddr_storage, ss)
+ 		__field(int, pid)
+ 		__field(unsigned long, flags)
++		__dynamic_array(unsigned char, addr, xprt != NULL ?
++			xprt->xpt_remotelen : 0)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->xprt = xprt;
+-		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+ 		__entry->pid = rqst? rqst->rq_task->pid : 0;
+-		__entry->flags = xprt ? xprt->xpt_flags : 0;
++		if (xprt) {
++			memcpy(__get_dynamic_array(addr),
++				&xprt->xpt_remote,
++				xprt->xpt_remotelen);
++			__entry->flags = xprt->xpt_flags;
++		} else
++			__entry->flags = 0;
+ 	),
+ 
+ 	TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
+-		(struct sockaddr *)&__entry->ss,
++		__get_dynamic_array_len(addr) != 0 ?
++			(struct sockaddr *)__get_dynamic_array(addr) : NULL,
+ 		__entry->pid, show_svc_xprt_flags(__entry->flags))
+ );
+ 
+@@ -553,18 +560,25 @@ TRACE_EVENT(svc_xprt_dequeue,
+ 
+ 	TP_STRUCT__entry(
+ 		__field(struct svc_xprt *, xprt)
+-		__field_struct(struct sockaddr_storage, ss)
+ 		__field(unsigned long, flags)
++		__dynamic_array(unsigned char, addr, xprt != NULL ?
++			xprt->xpt_remotelen : 0)
+ 	),
+ 
+ 	TP_fast_assign(
+-		__entry->xprt = xprt,
+-		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+-		__entry->flags = xprt ? xprt->xpt_flags : 0;
++		__entry->xprt = xprt;
++		if (xprt) {
++			memcpy(__get_dynamic_array(addr),
++					&xprt->xpt_remote,
++					xprt->xpt_remotelen);
++			__entry->flags = xprt->xpt_flags;
++		} else
++			__entry->flags = 0;
+ 	),
+ 
+ 	TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt,
+-		(struct sockaddr *)&__entry->ss,
++		__get_dynamic_array_len(addr) != 0 ?
++			(struct sockaddr *)__get_dynamic_array(addr) : NULL,
+ 		show_svc_xprt_flags(__entry->flags))
+ );
+ 
+@@ -592,19 +606,26 @@ TRACE_EVENT(svc_handle_xprt,
+ 	TP_STRUCT__entry(
+ 		__field(struct svc_xprt *, xprt)
+ 		__field(int, len)
+-		__field_struct(struct sockaddr_storage, ss)
+ 		__field(unsigned long, flags)
++		__dynamic_array(unsigned char, addr, xprt != NULL ?
++			xprt->xpt_remotelen : 0)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->xprt = xprt;
+-		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+ 		__entry->len = len;
+-		__entry->flags = xprt ? xprt->xpt_flags : 0;
++		if (xprt) {
++			memcpy(__get_dynamic_array(addr),
++					&xprt->xpt_remote,
++					xprt->xpt_remotelen);
++			__entry->flags = xprt->xpt_flags;
++		} else
++			__entry->flags = 0;
+ 	),
+ 
+ 	TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
+-		(struct sockaddr *)&__entry->ss,
++		__get_dynamic_array_len(addr) != 0 ?
++			(struct sockaddr *)__get_dynamic_array(addr) : NULL,
+ 		__entry->len, show_svc_xprt_flags(__entry->flags))
+ );
+ #endif /* _TRACE_SUNRPC_H */
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 652540613d26..3b2b0f5149ab 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -678,7 +678,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
+ 		rcu_read_lock();
+ 		ipc_lock_object(&msq->q_perm);
+ 
+-		ipc_rcu_putref(msq, ipc_rcu_free);
++		ipc_rcu_putref(msq, msg_rcu_free);
+ 		/* raced with RMID? */
+ 		if (!ipc_valid_object(&msq->q_perm)) {
+ 			err = -EIDRM;
+diff --git a/ipc/sem.c b/ipc/sem.c
+index c50aa5755c62..534caee6bf33 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -442,7 +442,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
+ static inline void sem_lock_and_putref(struct sem_array *sma)
+ {
+ 	sem_lock(sma, NULL, -1);
+-	ipc_rcu_putref(sma, ipc_rcu_free);
++	ipc_rcu_putref(sma, sem_rcu_free);
+ }
+ 
+ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
+@@ -1385,7 +1385,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
+ 			rcu_read_unlock();
+ 			sem_io = ipc_alloc(sizeof(ushort)*nsems);
+ 			if (sem_io == NULL) {
+-				ipc_rcu_putref(sma, ipc_rcu_free);
++				ipc_rcu_putref(sma, sem_rcu_free);
+ 				return -ENOMEM;
+ 			}
+ 
+@@ -1419,20 +1419,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
+ 		if (nsems > SEMMSL_FAST) {
+ 			sem_io = ipc_alloc(sizeof(ushort)*nsems);
+ 			if (sem_io == NULL) {
+-				ipc_rcu_putref(sma, ipc_rcu_free);
++				ipc_rcu_putref(sma, sem_rcu_free);
+ 				return -ENOMEM;
+ 			}
+ 		}
+ 
+ 		if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
+-			ipc_rcu_putref(sma, ipc_rcu_free);
++			ipc_rcu_putref(sma, sem_rcu_free);
+ 			err = -EFAULT;
+ 			goto out_free;
+ 		}
+ 
+ 		for (i = 0; i < nsems; i++) {
+ 			if (sem_io[i] > SEMVMX) {
+-				ipc_rcu_putref(sma, ipc_rcu_free);
++				ipc_rcu_putref(sma, sem_rcu_free);
+ 				err = -ERANGE;
+ 				goto out_free;
+ 			}
+@@ -1722,7 +1722,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
+ 	/* step 2: allocate new undo structure */
+ 	new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
+ 	if (!new) {
+-		ipc_rcu_putref(sma, ipc_rcu_free);
++		ipc_rcu_putref(sma, sem_rcu_free);
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+diff --git a/kernel/auditsc.c b/kernel/auditsc.c
+index 9fb9d1cb83ce..b1943039aab6 100644
+--- a/kernel/auditsc.c
++++ b/kernel/auditsc.c
+@@ -73,6 +73,7 @@
+ #include <linux/compat.h>
+ #include <linux/ctype.h>
+ #include <linux/string.h>
++#include <linux/uaccess.h>
+ #include <uapi/linux/limits.h>
+ 
+ #include "audit.h"
+@@ -82,7 +83,8 @@
+ #define AUDITSC_SUCCESS 1
+ #define AUDITSC_FAILURE 2
+ 
+-/* no execve audit message should be longer than this (userspace limits) */
++/* no execve audit message should be longer than this (userspace limits),
++ * see the note near the top of audit_log_execve_info() about this value */
+ #define MAX_EXECVE_AUDIT_LEN 7500
+ 
+ /* max length to print of cmdline/proctitle value during audit */
+@@ -987,185 +989,178 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
+ 	return rc;
+ }
+ 
+-/*
+- * to_send and len_sent accounting are very loose estimates.  We aren't
+- * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being
+- * within about 500 bytes (next page boundary)
+- *
+- * why snprintf?  an int is up to 12 digits long.  if we just assumed when
+- * logging that a[%d]= was going to be 16 characters long we would be wasting
+- * space in every audit message.  In one 7500 byte message we can log up to
+- * about 1000 min size arguments.  That comes down to about 50% waste of space
+- * if we didn't do the snprintf to find out how long arg_num_len was.
+- */
+-static int audit_log_single_execve_arg(struct audit_context *context,
+-					struct audit_buffer **ab,
+-					int arg_num,
+-					size_t *len_sent,
+-					const char __user *p,
+-					char *buf)
++static void audit_log_execve_info(struct audit_context *context,
++				  struct audit_buffer **ab)
+ {
+-	char arg_num_len_buf[12];
+-	const char __user *tmp_p = p;
+-	/* how many digits are in arg_num? 5 is the length of ' a=""' */
+-	size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5;
+-	size_t len, len_left, to_send;
+-	size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN;
+-	unsigned int i, has_cntl = 0, too_long = 0;
+-	int ret;
+-
+-	/* strnlen_user includes the null we don't want to send */
+-	len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1;
+-
+-	/*
+-	 * We just created this mm, if we can't find the strings
+-	 * we just copied into it something is _very_ wrong. Similar
+-	 * for strings that are too long, we should not have created
+-	 * any.
+-	 */
+-	if (unlikely((len == -1) || len > MAX_ARG_STRLEN - 1)) {
+-		WARN_ON(1);
+-		send_sig(SIGKILL, current, 0);
+-		return -1;
++	long len_max;
++	long len_rem;
++	long len_full;
++	long len_buf;
++	long len_abuf;
++	long len_tmp;
++	bool require_data;
++	bool encode;
++	unsigned int iter;
++	unsigned int arg;
++	char *buf_head;
++	char *buf;
++	const char __user *p = (const char __user *)current->mm->arg_start;
++
++	/* NOTE: this buffer needs to be large enough to hold all the non-arg
++	 *       data we put in the audit record for this argument (see the
++	 *       code below) ... at this point in time 96 is plenty */
++	char abuf[96];
++
++	/* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the
++	 *       current value of 7500 is not as important as the fact that it
++	 *       is less than 8k, a setting of 7500 gives us plenty of wiggle
++	 *       room if we go over a little bit in the logging below */
++	WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500);
++	len_max = MAX_EXECVE_AUDIT_LEN;
++
++	/* scratch buffer to hold the userspace args */
++	buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
++	if (!buf_head) {
++		audit_panic("out of memory for argv string");
++		return;
+ 	}
++	buf = buf_head;
+ 
+-	/* walk the whole argument looking for non-ascii chars */
++	audit_log_format(*ab, "argc=%d", context->execve.argc);
++
++	len_rem = len_max;
++	len_buf = 0;
++	len_full = 0;
++	require_data = true;
++	encode = false;
++	iter = 0;
++	arg = 0;
+ 	do {
+-		if (len_left > MAX_EXECVE_AUDIT_LEN)
+-			to_send = MAX_EXECVE_AUDIT_LEN;
+-		else
+-			to_send = len_left;
+-		ret = copy_from_user(buf, tmp_p, to_send);
+-		/*
+-		 * There is no reason for this copy to be short. We just
+-		 * copied them here, and the mm hasn't been exposed to user-
+-		 * space yet.
+-		 */
+-		if (ret) {
+-			WARN_ON(1);
+-			send_sig(SIGKILL, current, 0);
+-			return -1;
+-		}
+-		buf[to_send] = '\0';
+-		has_cntl = audit_string_contains_control(buf, to_send);
+-		if (has_cntl) {
+-			/*
+-			 * hex messages get logged as 2 bytes, so we can only
+-			 * send half as much in each message
+-			 */
+-			max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2;
+-			break;
+-		}
+-		len_left -= to_send;
+-		tmp_p += to_send;
+-	} while (len_left > 0);
+-
+-	len_left = len;
+-
+-	if (len > max_execve_audit_len)
+-		too_long = 1;
+-
+-	/* rewalk the argument actually logging the message */
+-	for (i = 0; len_left > 0; i++) {
+-		int room_left;
+-
+-		if (len_left > max_execve_audit_len)
+-			to_send = max_execve_audit_len;
+-		else
+-			to_send = len_left;
+-
+-		/* do we have space left to send this argument in this ab? */
+-		room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent;
+-		if (has_cntl)
+-			room_left -= (to_send * 2);
+-		else
+-			room_left -= to_send;
+-		if (room_left < 0) {
+-			*len_sent = 0;
+-			audit_log_end(*ab);
+-			*ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE);
+-			if (!*ab)
+-				return 0;
+-		}
++		/* NOTE: we don't ever want to trust this value for anything
++		 *       serious, but the audit record format insists we
++		 *       provide an argument length for really long arguments,
++		 *       e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but
++		 *       to use strncpy_from_user() to obtain this value for
++		 *       recording in the log, although we don't use it
++		 *       anywhere here to avoid a double-fetch problem */
++		if (len_full == 0)
++			len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1;
++
++		/* read more data from userspace */
++		if (require_data) {
++			/* can we make more room in the buffer? */
++			if (buf != buf_head) {
++				memmove(buf_head, buf, len_buf);
++				buf = buf_head;
++			}
++
++			/* fetch as much as we can of the argument */
++			len_tmp = strncpy_from_user(&buf_head[len_buf], p,
++						    len_max - len_buf);
++			if (len_tmp == -EFAULT) {
++				/* unable to copy from userspace */
++				send_sig(SIGKILL, current, 0);
++				goto out;
++			} else if (len_tmp == (len_max - len_buf)) {
++				/* buffer is not large enough */
++				require_data = true;
++				/* NOTE: if we are going to span multiple
++				 *       buffers force the encoding so we stand
++				 *       a chance at a sane len_full value and
++				 *       consistent record encoding */
++				encode = true;
++				len_full = len_full * 2;
++				p += len_tmp;
++			} else {
++				require_data = false;
++				if (!encode)
++					encode = audit_string_contains_control(
++								buf, len_tmp);
++				/* try to use a trusted value for len_full */
++				if (len_full < len_max)
++					len_full = (encode ?
++						    len_tmp * 2 : len_tmp);
++				p += len_tmp + 1;
++			}
++			len_buf += len_tmp;
++			buf_head[len_buf] = '\0';
+ 
+-		/*
+-		 * first record needs to say how long the original string was
+-		 * so we can be sure nothing was lost.
+-		 */
+-		if ((i == 0) && (too_long))
+-			audit_log_format(*ab, " a%d_len=%zu", arg_num,
+-					 has_cntl ? 2*len : len);
+-
+-		/*
+-		 * normally arguments are small enough to fit and we already
+-		 * filled buf above when we checked for control characters
+-		 * so don't bother with another copy_from_user
+-		 */
+-		if (len >= max_execve_audit_len)
+-			ret = copy_from_user(buf, p, to_send);
+-		else
+-			ret = 0;
+-		if (ret) {
+-			WARN_ON(1);
+-			send_sig(SIGKILL, current, 0);
+-			return -1;
++			/* length of the buffer in the audit record? */
++			len_abuf = (encode ? len_buf * 2 : len_buf + 2);
+ 		}
+-		buf[to_send] = '\0';
+-
+-		/* actually log it */
+-		audit_log_format(*ab, " a%d", arg_num);
+-		if (too_long)
+-			audit_log_format(*ab, "[%d]", i);
+-		audit_log_format(*ab, "=");
+-		if (has_cntl)
+-			audit_log_n_hex(*ab, buf, to_send);
+-		else
+-			audit_log_string(*ab, buf);
+-
+-		p += to_send;
+-		len_left -= to_send;
+-		*len_sent += arg_num_len;
+-		if (has_cntl)
+-			*len_sent += to_send * 2;
+-		else
+-			*len_sent += to_send;
+-	}
+-	/* include the null we didn't log */
+-	return len + 1;
+-}
+ 
+-static void audit_log_execve_info(struct audit_context *context,
+-				  struct audit_buffer **ab)
+-{
+-	int i, len;
+-	size_t len_sent = 0;
+-	const char __user *p;
+-	char *buf;
++		/* write as much as we can to the audit log */
++		if (len_buf > 0) {
++			/* NOTE: some magic numbers here - basically if we
++			 *       can't fit a reasonable amount of data into the
++			 *       existing audit buffer, flush it and start with
++			 *       a new buffer */
++			if ((sizeof(abuf) + 8) > len_rem) {
++				len_rem = len_max;
++				audit_log_end(*ab);
++				*ab = audit_log_start(context,
++						      GFP_KERNEL, AUDIT_EXECVE);
++				if (!*ab)
++					goto out;
++			}
+ 
+-	p = (const char __user *)current->mm->arg_start;
++			/* create the non-arg portion of the arg record */
++			len_tmp = 0;
++			if (require_data || (iter > 0) ||
++			    ((len_abuf + sizeof(abuf)) > len_rem)) {
++				if (iter == 0) {
++					len_tmp += snprintf(&abuf[len_tmp],
++							sizeof(abuf) - len_tmp,
++							" a%d_len=%lu",
++							arg, len_full);
++				}
++				len_tmp += snprintf(&abuf[len_tmp],
++						    sizeof(abuf) - len_tmp,
++						    " a%d[%d]=", arg, iter++);
++			} else
++				len_tmp += snprintf(&abuf[len_tmp],
++						    sizeof(abuf) - len_tmp,
++						    " a%d=", arg);
++			WARN_ON(len_tmp >= sizeof(abuf));
++			abuf[sizeof(abuf) - 1] = '\0';
++
++			/* log the arg in the audit record */
++			audit_log_format(*ab, "%s", abuf);
++			len_rem -= len_tmp;
++			len_tmp = len_buf;
++			if (encode) {
++				if (len_abuf > len_rem)
++					len_tmp = len_rem / 2; /* encoding */
++				audit_log_n_hex(*ab, buf, len_tmp);
++				len_rem -= len_tmp * 2;
++				len_abuf -= len_tmp * 2;
++			} else {
++				if (len_abuf > len_rem)
++					len_tmp = len_rem - 2; /* quotes */
++				audit_log_n_string(*ab, buf, len_tmp);
++				len_rem -= len_tmp + 2;
++				/* don't subtract the "2" because we still need
++				 * to add quotes to the remaining string */
++				len_abuf -= len_tmp;
++			}
++			len_buf -= len_tmp;
++			buf += len_tmp;
++		}
+ 
+-	audit_log_format(*ab, "argc=%d", context->execve.argc);
++		/* ready to move to the next argument? */
++		if ((len_buf == 0) && !require_data) {
++			arg++;
++			iter = 0;
++			len_full = 0;
++			require_data = true;
++			encode = false;
++		}
++	} while (arg < context->execve.argc);
+ 
+-	/*
+-	 * we need some kernel buffer to hold the userspace args.  Just
+-	 * allocate one big one rather than allocating one of the right size
+-	 * for every single argument inside audit_log_single_execve_arg()
+-	 * should be <8k allocation so should be pretty safe.
+-	 */
+-	buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
+-	if (!buf) {
+-		audit_panic("out of memory for argv string");
+-		return;
+-	}
++	/* NOTE: the caller handles the final audit_log_end() call */
+ 
+-	for (i = 0; i < context->execve.argc; i++) {
+-		len = audit_log_single_execve_arg(context, ab, i,
+-						  &len_sent, p, buf);
+-		if (len <= 0)
+-			break;
+-		p += len;
+-	}
+-	kfree(buf);
++out:
++	kfree(buf_head);
+ }
+ 
+ static void show_special(struct audit_context *context, int *call_panic)
+diff --git a/kernel/module.c b/kernel/module.c
+index be8971d817ed..6920d1080cdd 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2451,13 +2451,18 @@ static inline void kmemleak_load_module(const struct module *mod,
+ #endif
+ 
+ #ifdef CONFIG_MODULE_SIG
+-static int module_sig_check(struct load_info *info)
++static int module_sig_check(struct load_info *info, int flags)
+ {
+ 	int err = -ENOKEY;
+ 	const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
+ 	const void *mod = info->hdr;
+ 
+-	if (info->len > markerlen &&
++	/*
++	 * Require flags == 0, as a module with version information
++	 * removed is no longer the module that was signed
++	 */
++	if (flags == 0 &&
++	    info->len > markerlen &&
+ 	    memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
+ 		/* We truncate the module to discard the signature */
+ 		info->len -= markerlen;
+@@ -2476,7 +2481,7 @@ static int module_sig_check(struct load_info *info)
+ 	return err;
+ }
+ #else /* !CONFIG_MODULE_SIG */
+-static int module_sig_check(struct load_info *info)
++static int module_sig_check(struct load_info *info, int flags)
+ {
+ 	return 0;
+ }
+@@ -3277,7 +3282,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ 	long err;
+ 	char *after_dashes;
+ 
+-	err = module_sig_check(info);
++	err = module_sig_check(info, flags);
+ 	if (err)
+ 		goto free_copy;
+ 
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 9d724c0383d2..88b472bec71d 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1756,6 +1756,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
+ 		 * and reducing the surplus.
+ 		 */
+ 		spin_unlock(&hugetlb_lock);
++
++		/* yield cpu to avoid soft lockup */
++		cond_resched();
++
+ 		if (hstate_is_gigantic(h))
+ 			ret = alloc_fresh_gigantic_page(h, nodes_allowed);
+ 		else
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index a7278f05eafb..289c96d3e47d 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -927,7 +927,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
+ 			break;
+ 		}
+ 
+-		if (get_user(opt, (u32 __user *) optval)) {
++		if (get_user(opt, (u16 __user *) optval)) {
+ 			err = -EFAULT;
+ 			break;
+ 		}
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index 9a1edcde4ba5..eca46d3d3ff3 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1024,8 +1024,11 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
+ 	}
+ 
+ 	/* Check if we have opened a local TSAP */
+-	if (!self->tsap)
+-		irda_open_tsap(self, LSAP_ANY, addr->sir_name);
++	if (!self->tsap) {
++		err = irda_open_tsap(self, LSAP_ANY, addr->sir_name);
++		if (err)
++			goto out;
++	}
+ 
+ 	/* Move to connecting socket, start sending Connect Requests */
+ 	sock->state = SS_CONNECTING;
+diff --git a/net/netfilter/nf_nat_redirect.c b/net/netfilter/nf_nat_redirect.c
+index 97b75f9bfbcd..d43869879fcf 100644
+--- a/net/netfilter/nf_nat_redirect.c
++++ b/net/netfilter/nf_nat_redirect.c
+@@ -55,7 +55,7 @@ nf_nat_redirect_ipv4(struct sk_buff *skb,
+ 
+ 		rcu_read_lock();
+ 		indev = __in_dev_get_rcu(skb->dev);
+-		if (indev != NULL) {
++		if (indev && indev->ifa_list) {
+ 			ifa = indev->ifa_list;
+ 			newdst = ifa->ifa_local;
+ 		}
+diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
+index 28cddc85b700..bfa2b6d5b5cf 100644
+--- a/net/netlabel/netlabel_kapi.c
++++ b/net/netlabel/netlabel_kapi.c
+@@ -824,7 +824,11 @@ socket_setattr_return:
+  */
+ void netlbl_sock_delattr(struct sock *sk)
+ {
+-	cipso_v4_sock_delattr(sk);
++	switch (sk->sk_family) {
++	case AF_INET:
++		cipso_v4_sock_delattr(sk);
++		break;
++	}
+ }
+ 
+ /**
+@@ -987,7 +991,11 @@ req_setattr_return:
+ */
+ void netlbl_req_delattr(struct request_sock *req)
+ {
+-	cipso_v4_req_delattr(req);
++	switch (req->rsk_ops->family) {
++	case AF_INET:
++		cipso_v4_req_delattr(req);
++		break;
++	}
+ }
+ 
+ /**
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index e00bcd129336..5dffe37f300e 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -33,10 +33,17 @@
+ #include <string.h>
+ #include <unistd.h>
+ 
++/*
++ * glibc synced up and added the metag number but didn't add the relocations.
++ * Work around this in a crude manner for now.
++ */
+ #ifndef EM_METAG
+-/* Remove this when these make it to the standard system elf.h. */
+ #define EM_METAG      174
++#endif
++#ifndef R_METAG_ADDR32
+ #define R_METAG_ADDR32                   2
++#endif
++#ifndef R_METAG_NONE
+ #define R_METAG_NONE                     3
+ #endif
+ 
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 7e3020c1e9d3..43e785a79eb3 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1247,6 +1247,7 @@ static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
+ 		tu->tstamp = *tstamp;
+ 	if ((tu->filter & (1 << event)) == 0 || !tu->tread)
+ 		return;
++	memset(&r1, 0, sizeof(r1));
+ 	r1.event = event;
+ 	r1.tstamp = *tstamp;
+ 	r1.val = resolution;
+@@ -1281,6 +1282,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
+ 	}
+ 	if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
+ 	    tu->last_resolution != resolution) {
++		memset(&r1, 0, sizeof(r1));
+ 		r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
+ 		r1.tstamp = tstamp;
+ 		r1.val = resolution;
+@@ -1746,6 +1748,7 @@ static int snd_timer_user_params(struct file *file,
+ 	if (tu->timeri->flags & SNDRV_TIMER_IFLG_EARLY_EVENT) {
+ 		if (tu->tread) {
+ 			struct snd_timer_tread tread;
++			memset(&tread, 0, sizeof(tread));
+ 			tread.event = SNDRV_TIMER_EVENT_EARLY;
+ 			tread.tstamp.tv_sec = 0;
+ 			tread.tstamp.tv_nsec = 0;
+diff --git a/sound/hda/array.c b/sound/hda/array.c
+index 516795baa7db..5dfa610e4471 100644
+--- a/sound/hda/array.c
++++ b/sound/hda/array.c
+@@ -21,13 +21,15 @@ void *snd_array_new(struct snd_array *array)
+ 		return NULL;
+ 	if (array->used >= array->alloced) {
+ 		int num = array->alloced + array->alloc_align;
++		int oldsize = array->alloced * array->elem_size;
+ 		int size = (num + 1) * array->elem_size;
+ 		void *nlist;
+ 		if (snd_BUG_ON(num >= 4096))
+ 			return NULL;
+-		nlist = krealloc(array->list, size, GFP_KERNEL | __GFP_ZERO);
++		nlist = krealloc(array->list, size, GFP_KERNEL);
+ 		if (!nlist)
+ 			return NULL;
++		memset(nlist + oldsize, 0, size - oldsize);
+ 		array->list = nlist;
+ 		array->alloced = num;
+ 	}
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index f24a69db0dd8..c8506496826a 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2114,6 +2114,8 @@ static const struct pci_device_id azx_ids[] = {
+ 	{ PCI_DEVICE(0x1022, 0x780d),
+ 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ 	/* ATI HDMI */
++	{ PCI_DEVICE(0x1002, 0x0002),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0x1308),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0x157a),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index b3234321aa4b..429697a93a71 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4584,6 +4584,71 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc298_fixup_speaker_volume(struct hda_codec *codec,
++					const struct hda_fixup *fix, int action)
++{
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		/* The speaker is routed to the Node 0x06 by a mistake, as a result
++		   we can't adjust the speaker's volume since this node does not has
++		   Amp-out capability. we change the speaker's route to:
++		   Node 0x02 (Audio Output) -> Node 0x0c (Audio Mixer) -> Node 0x17 (
++		   Pin Complex), since Node 0x02 has Amp-out caps, we can adjust
++		   speaker's volume now. */
++
++		hda_nid_t conn1[1] = { 0x0c };
++		snd_hda_override_conn_list(codec, 0x17, 1, conn1);
++	}
++}
++
++/* Hook to update amp GPIO4 for automute */
++static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec,
++					  struct hda_jack_callback *jack)
++{
++	struct alc_spec *spec = codec->spec;
++
++	snd_hda_gen_hp_automute(codec, jack);
++	/* mute_led_polarity is set to 0, so we pass inverted value here */
++	alc_update_gpio_led(codec, 0x10, !spec->gen.hp_jack_present);
++}
++
++/* Manage GPIOs for HP EliteBook Folio 9480m.
++ *
++ * GPIO4 is the headphone amplifier power control
++ * GPIO3 is the audio output mute indicator LED
++ */
++
++static void alc280_fixup_hp_9480m(struct hda_codec *codec,
++				  const struct hda_fixup *fix,
++				  int action)
++{
++	struct alc_spec *spec = codec->spec;
++	static const struct hda_verb gpio_init[] = {
++		{ 0x01, AC_VERB_SET_GPIO_MASK, 0x18 },
++		{ 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 },
++		{}
++	};
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		/* Set the hooks to turn the headphone amp on/off
++		 * as needed
++		 */
++		spec->gen.vmaster_mute.hook = alc_fixup_gpio_mute_hook;
++		spec->gen.hp_automute_hook = alc280_hp_gpio4_automute_hook;
++
++		/* The GPIOs are currently off */
++		spec->gpio_led = 0;
++
++		/* GPIO3 is connected to the output mute LED,
++		 * high is on, low is off
++		 */
++		spec->mute_led_polarity = 0;
++		spec->gpio_mute_led_mask = 0x08;
++
++		/* Initialize GPIO configuration */
++		snd_hda_add_verbs(codec, gpio_init);
++	}
++}
++
+ /* for hda_fixup_thinkpad_acpi() */
+ #include "thinkpad_helper.c"
+ 
+@@ -4665,6 +4730,7 @@ enum {
+ 	ALC286_FIXUP_HP_GPIO_LED,
+ 	ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
+ 	ALC280_FIXUP_HP_DOCK_PINS,
++	ALC280_FIXUP_HP_9480M,
+ 	ALC288_FIXUP_DELL_HEADSET_MODE,
+ 	ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC288_FIXUP_DELL_XPS_13_GPIO6,
+@@ -4683,6 +4749,7 @@ enum {
+ 	ALC280_FIXUP_HP_HEADSET_MIC,
+ 	ALC221_FIXUP_HP_FRONT_MIC,
+ 	ALC292_FIXUP_TPT460,
++	ALC298_FIXUP_SPK_VOLUME,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5199,6 +5266,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC280_FIXUP_HP_GPIO4
+ 	},
++	[ALC280_FIXUP_HP_9480M] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc280_fixup_hp_9480m,
++	},
+ 	[ALC288_FIXUP_DELL_HEADSET_MODE] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_headset_mode_dell_alc288,
+@@ -5334,6 +5405,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
+ 	},
++	[ALC298_FIXUP_SPK_VOLUME] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc298_fixup_speaker_volume,
++		.chained = true,
++		.chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5380,6 +5457,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
++	SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -5407,6 +5485,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
+ 	SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
+ 	/* ALC290 */
+@@ -5700,6 +5779,32 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x1e, 0x411111f0},
+ 		{0x21, 0x0221103f}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x40000000},
++		{0x14, 0x90170130},
++		{0x1b, 0x02011020},
++		{0x21, 0x0221103f}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x14, 0x90170150},
++		{0x17, 0x411111f0},
++		{0x18, 0x411111f0},
++		{0x19, 0x411111f0},
++		{0x1a, 0x411111f0},
++		{0x1b, 0x02011020},
++		{0x1d, 0x4054c029},
++		{0x1e, 0x411111f0},
++		{0x21, 0x0221105f}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x40000000},
++		{0x14, 0x90170110},
++		{0x17, 0x411111f0},
++		{0x18, 0x411111f0},
++		{0x19, 0x411111f0},
++		{0x1a, 0x411111f0},
++		{0x1b, 0x01014020},
++		{0x1d, 0x4054c029},
++		{0x1e, 0x411111f0},
++		{0x21, 0x0221101f}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		{0x12, 0x90a60160},
+ 		{0x14, 0x90170120},
+ 		{0x17, 0x90170140},
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index d93deb5ce4f2..f9746f29f870 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -134,6 +134,7 @@ int vcpu_load(struct kvm_vcpu *vcpu)
+ 	put_cpu();
+ 	return 0;
+ }
++EXPORT_SYMBOL_GPL(vcpu_load);
+ 
+ void vcpu_put(struct kvm_vcpu *vcpu)
+ {
+@@ -143,6 +144,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
+ 	preempt_enable();
+ 	mutex_unlock(&vcpu->mutex);
+ }
++EXPORT_SYMBOL_GPL(vcpu_put);
+ 
+ static void ack_flush(void *_completed)
+ {


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-08-10 12:55 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-08-10 12:55 UTC (permalink / raw
  To: gentoo-commits

commit:     77a30b98a82b92f4edee8de7f15ac9022397d4a7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 10 12:55:32 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 10 12:55:32 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=77a30b98

Linux patch 4.1.30

 0000_README             |    4 +
 1029_linux-4.1.30.patch | 1516 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1520 insertions(+)

diff --git a/0000_README b/0000_README
index a9101d3..f04d74d 100644
--- a/0000_README
+++ b/0000_README
@@ -159,6 +159,10 @@ Patch:  1028_linux-4.1.29.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.29
 
+Patch:  1029_linux-4.1.30.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.30
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1029_linux-4.1.30.patch b/1029_linux-4.1.30.patch
new file mode 100644
index 0000000..18a058f
--- /dev/null
+++ b/1029_linux-4.1.30.patch
@@ -0,0 +1,1516 @@
+diff --git a/Makefile b/Makefile
+index 76fa21fa16b8..137679c0cc49 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 29
++SUBLEVEL = 30
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
+index e46e9ea1e187..0ee54cd9bf0b 100644
+--- a/arch/arm/mach-mvebu/coherency.c
++++ b/arch/arm/mach-mvebu/coherency.c
+@@ -116,22 +116,16 @@ static void __init armada_370_coherency_init(struct device_node *np)
+ }
+ 
+ /*
+- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
+- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
+- * is needed as a workaround for a deadlock issue between the PCIe
+- * interface and the cache controller.
++ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
++ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
++ * needed for the HW I/O coherency mechanism to work properly without
++ * deadlock.
+  */
+ static void __iomem *
+-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+-			      unsigned int mtype, void *caller)
++armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
++			 unsigned int mtype, void *caller)
+ {
+-	struct resource pcie_mem;
+-
+-	mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
+-
+-	if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
+-		mtype = MT_UNCACHED;
+-
++	mtype = MT_UNCACHED;
+ 	return __arm_ioremap_caller(phys_addr, size, mtype, caller);
+ }
+ 
+@@ -140,7 +134,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
+ 	struct device_node *cache_dn;
+ 
+ 	coherency_cpu_base = of_iomap(np, 0);
+-	arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
++	arch_ioremap_caller = armada_wa_ioremap_caller;
+ 
+ 	/*
+ 	 * We should switch the PL310 to I/O coherency mode only if
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 49b52035226c..be73c491182b 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -686,9 +686,6 @@ static void __init arch_mem_init(char **cmdline_p)
+ 	for_each_memblock(reserved, reg)
+ 		if (reg->size != 0)
+ 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+-
+-	reserve_bootmem_region(__pa_symbol(&__nosave_begin),
+-			__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
+ }
+ 
+ static void __init resource_init(void)
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 71c7ace855d7..1d71181dcc04 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -1301,18 +1301,10 @@ static int __init numa_parse_sun4u(void)
+ 
+ static int __init bootmem_init_numa(void)
+ {
+-	int i, j;
+ 	int err = -1;
+ 
+ 	numadbg("bootmem_init_numa()\n");
+ 
+-	/* Some sane defaults for numa latency values */
+-	for (i = 0; i < MAX_NUMNODES; i++) {
+-		for (j = 0; j < MAX_NUMNODES; j++)
+-			numa_latency[i][j] = (i == j) ?
+-				LOCAL_DISTANCE : REMOTE_DISTANCE;
+-	}
+-
+ 	if (numa_enabled) {
+ 		if (tlb_type == hypervisor)
+ 			err = numa_parse_mdesc();
+diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
+index fe9f0b79a18b..1390ecd8392a 100644
+--- a/arch/x86/kernel/early-quirks.c
++++ b/arch/x86/kernel/early-quirks.c
+@@ -11,7 +11,11 @@
+ 
+ #include <linux/pci.h>
+ #include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/pci_ids.h>
++#include <linux/bcma/bcma.h>
++#include <linux/bcma/bcma_regs.h>
+ #include <drm/i915_drm.h>
+ #include <asm/pci-direct.h>
+ #include <asm/dma.h>
+@@ -21,6 +25,9 @@
+ #include <asm/iommu.h>
+ #include <asm/gart.h>
+ #include <asm/irq_remapping.h>
++#include <asm/early_ioremap.h>
++
++#define dev_err(msg)  pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg)
+ 
+ static void __init fix_hypertransport_config(int num, int slot, int func)
+ {
+@@ -76,6 +83,13 @@ static void __init nvidia_bugs(int num, int slot, int func)
+ #ifdef CONFIG_ACPI
+ #ifdef CONFIG_X86_IO_APIC
+ 	/*
++	 * Only applies to Nvidia root ports (bus 0) and not to
++	 * Nvidia graphics cards with PCI ports on secondary buses.
++	 */
++	if (num)
++		return;
++
++	/*
+ 	 * All timer overrides on Nvidia are
+ 	 * wrong unless HPET is enabled.
+ 	 * Unfortunately that's not true on many Asus boards.
+@@ -588,6 +602,61 @@ static void __init force_disable_hpet(int num, int slot, int func)
+ #endif
+ }
+ 
++#define BCM4331_MMIO_SIZE	16384
++#define BCM4331_PM_CAP		0x40
++#define bcma_aread32(reg)	ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
++#define bcma_awrite32(reg, val)	iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg)
++
++static void __init apple_airport_reset(int bus, int slot, int func)
++{
++	void __iomem *mmio;
++	u16 pmcsr;
++	u64 addr;
++	int i;
++
++	if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
++		return;
++
++	/* Card may have been put into PCI_D3hot by grub quirk */
++	pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
++
++	if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
++		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
++		write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr);
++		mdelay(10);
++
++		pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
++		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
++			dev_err("Cannot power up Apple AirPort card\n");
++			return;
++		}
++	}
++
++	addr  =      read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
++	addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
++	addr &= PCI_BASE_ADDRESS_MEM_MASK;
++
++	mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
++	if (!mmio) {
++		dev_err("Cannot iomap Apple AirPort card\n");
++		return;
++	}
++
++	pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
++
++	for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
++		udelay(10);
++
++	bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
++	bcma_aread32(BCMA_RESET_CTL);
++	udelay(1);
++
++	bcma_awrite32(BCMA_RESET_CTL, 0);
++	bcma_aread32(BCMA_RESET_CTL);
++	udelay(10);
++
++	early_iounmap(mmio, BCM4331_MMIO_SIZE);
++}
+ 
+ #define QFLAG_APPLY_ONCE 	0x1
+ #define QFLAG_APPLIED		0x2
+@@ -601,12 +670,6 @@ struct chipset {
+ 	void (*f)(int num, int slot, int func);
+ };
+ 
+-/*
+- * Only works for devices on the root bus. If you add any devices
+- * not on bus 0 readd another loop level in early_quirks(). But
+- * be careful because at least the Nvidia quirk here relies on
+- * only matching on bus 0.
+- */
+ static struct chipset early_qrk[] __initdata = {
+ 	{ PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ 	  PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
+@@ -632,9 +695,13 @@ static struct chipset early_qrk[] __initdata = {
+ 	 */
+ 	{ PCI_VENDOR_ID_INTEL, 0x0f00,
+ 		PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
++	{ PCI_VENDOR_ID_BROADCOM, 0x4331,
++	  PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
+ 	{}
+ };
+ 
++static void __init early_pci_scan_bus(int bus);
++
+ /**
+  * check_dev_quirk - apply early quirks to a given PCI device
+  * @num: bus number
+@@ -643,7 +710,7 @@ static struct chipset early_qrk[] __initdata = {
+  *
+  * Check the vendor & device ID against the early quirks table.
+  *
+- * If the device is single function, let early_quirks() know so we don't
++ * If the device is single function, let early_pci_scan_bus() know so we don't
+  * poke at this device again.
+  */
+ static int __init check_dev_quirk(int num, int slot, int func)
+@@ -652,6 +719,7 @@ static int __init check_dev_quirk(int num, int slot, int func)
+ 	u16 vendor;
+ 	u16 device;
+ 	u8 type;
++	u8 sec;
+ 	int i;
+ 
+ 	class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
+@@ -679,25 +747,36 @@ static int __init check_dev_quirk(int num, int slot, int func)
+ 
+ 	type = read_pci_config_byte(num, slot, func,
+ 				    PCI_HEADER_TYPE);
++
++	if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
++		sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
++		if (sec > num)
++			early_pci_scan_bus(sec);
++	}
++
+ 	if (!(type & 0x80))
+ 		return -1;
+ 
+ 	return 0;
+ }
+ 
+-void __init early_quirks(void)
++static void __init early_pci_scan_bus(int bus)
+ {
+ 	int slot, func;
+ 
+-	if (!early_pci_allowed())
+-		return;
+-
+ 	/* Poor man's PCI discovery */
+-	/* Only scan the root bus */
+ 	for (slot = 0; slot < 32; slot++)
+ 		for (func = 0; func < 8; func++) {
+ 			/* Only probe function 0 on single fn devices */
+-			if (check_dev_quirk(0, slot, func))
++			if (check_dev_quirk(bus, slot, func))
+ 				break;
+ 		}
+ }
++
++void __init early_quirks(void)
++{
++	if (!early_pci_allowed())
++		return;
++
++	early_pci_scan_bus(0);
++}
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index e0064d180f04..9daf46bf3a28 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4138,6 +4138,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	 */
+ 	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
+ 
++	/*
++	 * Device times out with higher max sects.
++	 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
++	 */
++	{ "LITEON CX1-JB256-HP", NULL,		ATA_HORKAGE_MAX_SEC_1024 },
++
+ 	/* Devices we expect to fail diagnostics */
+ 
+ 	/* Devices where NCQ should be avoided */
+diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
+index 15f2b2e242ea..d492c2eddd7b 100644
+--- a/drivers/bcma/bcma_private.h
++++ b/drivers/bcma/bcma_private.h
+@@ -8,8 +8,6 @@
+ #include <linux/bcma/bcma.h>
+ #include <linux/delay.h>
+ 
+-#define BCMA_CORE_SIZE		0x1000
+-
+ #define bcma_err(bus, fmt, ...) \
+ 	pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+ #define bcma_warn(bus, fmt, ...) \
+diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+index f7929e769250..7ab9cc456dd2 100644
+--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
++++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
+@@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
+ 	if (!mutex_is_locked(mutex))
+ 		return false;
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
++#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
+ 	return mutex->owner == task;
+ #else
+ 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
+diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
+index 8d9b7de25613..0c4fd830d64b 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo.c
++++ b/drivers/gpu/drm/ttm/ttm_bo.c
+@@ -1000,9 +1000,9 @@ out_unlock:
+ 	return ret;
+ }
+ 
+-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+-			      struct ttm_mem_reg *mem,
+-			      uint32_t *new_flags)
++bool ttm_bo_mem_compat(struct ttm_placement *placement,
++		       struct ttm_mem_reg *mem,
++		       uint32_t *new_flags)
+ {
+ 	int i;
+ 
+@@ -1034,6 +1034,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ 
+ 	return false;
+ }
++EXPORT_SYMBOL(ttm_bo_mem_compat);
+ 
+ int ttm_bo_validate(struct ttm_buffer_object *bo,
+ 			struct ttm_placement *placement,
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 61c761156371..a450c4ee1217 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -1025,6 +1025,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
+ 	int ep_irq_in_idx;
+ 	int i, error;
+ 
++	if (intf->cur_altsetting->desc.bNumEndpoints != 2)
++		return -ENODEV;
++
+ 	for (i = 0; xpad_device[i].idVendor; i++) {
+ 		if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
+ 		    (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
+diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
+index a50750ce511d..ce51bf19cef3 100644
+--- a/drivers/media/usb/airspy/airspy.c
++++ b/drivers/media/usb/airspy/airspy.c
+@@ -1072,7 +1072,7 @@ static int airspy_probe(struct usb_interface *intf,
+ 	if (ret) {
+ 		dev_err(s->dev, "Failed to register as video device (%d)\n",
+ 				ret);
+-		goto err_unregister_v4l2_dev;
++		goto err_free_controls;
+ 	}
+ 	dev_info(s->dev, "Registered as %s\n",
+ 			video_device_node_name(&s->vdev));
+@@ -1081,7 +1081,6 @@ static int airspy_probe(struct usb_interface *intf,
+ 
+ err_free_controls:
+ 	v4l2_ctrl_handler_free(&s->hdl);
+-err_unregister_v4l2_dev:
+ 	v4l2_device_unregister(&s->v4l2_dev);
+ err_free_mem:
+ 	kfree(s);
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 3705c7e63521..ccefd6ca9c99 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -1664,8 +1664,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ 
+ 	packed_cmd_hdr = packed->cmd_hdr;
+ 	memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
+-	packed_cmd_hdr[0] = (packed->nr_entries << 16) |
+-		(PACKED_CMD_WR << 8) | PACKED_CMD_VER;
++	packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
++		(PACKED_CMD_WR << 8) | PACKED_CMD_VER);
+ 	hdr_blocks = mmc_large_sector(card) ? 8 : 1;
+ 
+ 	/*
+@@ -1679,14 +1679,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ 			((brq->data.blocks * brq->data.blksz) >=
+ 			 card->ext_csd.data_tag_unit_size);
+ 		/* Argument of CMD23 */
+-		packed_cmd_hdr[(i * 2)] =
++		packed_cmd_hdr[(i * 2)] = cpu_to_le32(
+ 			(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+ 			(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
+-			blk_rq_sectors(prq);
++			blk_rq_sectors(prq));
+ 		/* Argument of CMD18 or CMD25 */
+-		packed_cmd_hdr[((i * 2)) + 1] =
++		packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
+ 			mmc_card_blockaddr(card) ?
+-			blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
++			blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
+ 		packed->blocks += blk_rq_sectors(prq);
+ 		i++;
+ 	}
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index e4c079612100..40161dacc9c7 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -222,7 +222,7 @@
+ /* Various constants */
+ 
+ /* Coalescing */
+-#define MVNETA_TXDONE_COAL_PKTS		1
++#define MVNETA_TXDONE_COAL_PKTS		0	/* interrupt per packet */
+ #define MVNETA_RX_COAL_PKTS		32
+ #define MVNETA_RX_COAL_USEC		100
+ 
+diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
+index 38a8bbe74810..83797d89c30f 100644
+--- a/drivers/pps/clients/pps_parport.c
++++ b/drivers/pps/clients/pps_parport.c
+@@ -195,7 +195,7 @@ static void parport_detach(struct parport *port)
+ 	struct pps_client_pp *device;
+ 
+ 	/* FIXME: oooh, this is ugly! */
+-	if (strcmp(pardev->name, KBUILD_MODNAME))
++	if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
+ 		/* not our port */
+ 		return;
+ 
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index 8a89f6e7715d..8353ce1991b8 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -365,34 +365,22 @@ static void to_utf8(struct vc_data *vc, uint c)
+ 
+ static void do_compute_shiftstate(void)
+ {
+-	unsigned int i, j, k, sym, val;
++	unsigned int k, sym, val;
+ 
+ 	shift_state = 0;
+ 	memset(shift_down, 0, sizeof(shift_down));
+ 
+-	for (i = 0; i < ARRAY_SIZE(key_down); i++) {
+-
+-		if (!key_down[i])
++	for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
++		sym = U(key_maps[0][k]);
++		if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
+ 			continue;
+ 
+-		k = i * BITS_PER_LONG;
+-
+-		for (j = 0; j < BITS_PER_LONG; j++, k++) {
+-
+-			if (!test_bit(k, key_down))
+-				continue;
++		val = KVAL(sym);
++		if (val == KVAL(K_CAPSSHIFT))
++			val = KVAL(K_SHIFT);
+ 
+-			sym = U(key_maps[0][k]);
+-			if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
+-				continue;
+-
+-			val = KVAL(sym);
+-			if (val == KVAL(K_CAPSSHIFT))
+-				val = KVAL(K_SHIFT);
+-
+-			shift_down[val]++;
+-			shift_state |= (1 << val);
+-		}
++		shift_down[val]++;
++		shift_state |= BIT(val);
+ 	}
+ }
+ 
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index a2b1d7ce3e1a..977236a46aa2 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -511,6 +511,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
+ 	struct dentry *upper;
+ 	struct dentry *opaquedir = NULL;
+ 	int err;
++	int flags = 0;
+ 
+ 	if (WARN_ON(!workdir))
+ 		return -EROFS;
+@@ -540,46 +541,39 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
+ 	if (err)
+ 		goto out_dput;
+ 
+-	whiteout = ovl_whiteout(workdir, dentry);
+-	err = PTR_ERR(whiteout);
+-	if (IS_ERR(whiteout))
++	upper = lookup_one_len(dentry->d_name.name, upperdir,
++			       dentry->d_name.len);
++	err = PTR_ERR(upper);
++	if (IS_ERR(upper))
+ 		goto out_unlock;
+ 
+-	upper = ovl_dentry_upper(dentry);
+-	if (!upper) {
+-		upper = lookup_one_len(dentry->d_name.name, upperdir,
+-				       dentry->d_name.len);
+-		err = PTR_ERR(upper);
+-		if (IS_ERR(upper))
+-			goto kill_whiteout;
+-
+-		err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
+-		dput(upper);
+-		if (err)
+-			goto kill_whiteout;
+-	} else {
+-		int flags = 0;
++	err = -ESTALE;
++	if ((opaquedir && upper != opaquedir) ||
++	    (!opaquedir && ovl_dentry_upper(dentry) &&
++	     upper != ovl_dentry_upper(dentry))) {
++		goto out_dput_upper;
++	}
+ 
+-		if (opaquedir)
+-			upper = opaquedir;
+-		err = -ESTALE;
+-		if (upper->d_parent != upperdir)
+-			goto kill_whiteout;
++	whiteout = ovl_whiteout(workdir, dentry);
++	err = PTR_ERR(whiteout);
++	if (IS_ERR(whiteout))
++		goto out_dput_upper;
+ 
+-		if (is_dir)
+-			flags |= RENAME_EXCHANGE;
++	if (d_is_dir(upper))
++		flags = RENAME_EXCHANGE;
+ 
+-		err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
+-		if (err)
+-			goto kill_whiteout;
++	err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
++	if (err)
++		goto kill_whiteout;
++	if (flags)
++		ovl_cleanup(wdir, upper);
+ 
+-		if (is_dir)
+-			ovl_cleanup(wdir, upper);
+-	}
+ 	ovl_dentry_version_inc(dentry->d_parent);
+ out_d_drop:
+ 	d_drop(dentry);
+ 	dput(whiteout);
++out_dput_upper:
++	dput(upper);
+ out_unlock:
+ 	unlock_rename(workdir, upperdir);
+ out_dput:
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index edd2a4a5fd3c..97fd65700ae2 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -67,6 +67,10 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
+ 		upperdentry = ovl_dentry_upper(dentry);
+ 
+ 		mutex_lock(&upperdentry->d_inode->i_mutex);
++
++		if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
++			attr->ia_valid &= ~ATTR_MODE;
++
+ 		err = notify_change(upperdentry, attr, NULL);
+ 		if (!err)
+ 			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
+@@ -411,12 +415,11 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
+ 	if (!inode)
+ 		return NULL;
+ 
+-	mode &= S_IFMT;
+-
+ 	inode->i_ino = get_next_ino();
+ 	inode->i_mode = mode;
+ 	inode->i_flags |= S_NOATIME | S_NOCMTIME;
+ 
++	mode &= S_IFMT;
+ 	switch (mode) {
+ 	case S_IFDIR:
+ 		inode->i_private = oe;
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index ea5a40b06e3a..983540910ba8 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -181,6 +181,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
+ {
+ 	to->i_uid = from->i_uid;
+ 	to->i_gid = from->i_gid;
++	to->i_mode = from->i_mode;
+ }
+ 
+ /* dir.c */
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 8bd374d3cf21..13671bc9a288 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -521,15 +521,19 @@
+ 
+ #define INIT_TEXT							\
+ 	*(.init.text)							\
++	*(.text.startup)						\
+ 	MEM_DISCARD(init.text)
+ 
+ #define EXIT_DATA							\
+ 	*(.exit.data)							\
++	*(.fini_array)							\
++	*(.dtors)							\
+ 	MEM_DISCARD(exit.data)						\
+ 	MEM_DISCARD(exit.rodata)
+ 
+ #define EXIT_TEXT							\
+ 	*(.exit.text)							\
++	*(.text.exit)							\
+ 	MEM_DISCARD(exit.text)
+ 
+ #define EXIT_CALL							\
+diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
+index c768ddfbe53c..b7bfa513e6ed 100644
+--- a/include/drm/ttm/ttm_bo_api.h
++++ b/include/drm/ttm/ttm_bo_api.h
+@@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
+  */
+ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
+ 		       bool interruptible, bool no_wait);
++
++/**
++ * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
++ *
++ * @placement:  Return immediately if buffer is busy.
++ * @mem:  The struct ttm_mem_reg indicating the region where the bo resides
++ * @new_flags: Describes compatible placement found
++ *
++ * Returns true if the placement is compatible
++ */
++extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
++			      struct ttm_mem_reg *mem,
++			      uint32_t *new_flags);
++
+ /**
+  * ttm_bo_validate
+  *
+diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
+index e34f906647d3..567fb450fbfe 100644
+--- a/include/linux/bcma/bcma.h
++++ b/include/linux/bcma/bcma.h
+@@ -154,6 +154,7 @@ struct bcma_host_ops {
+ #define BCMA_CORE_DEFAULT		0xFFF
+ 
+ #define BCMA_MAX_NR_CORES		16
++#define BCMA_CORE_SIZE			0x1000
+ 
+ /* Chip IDs of PCIe devices */
+ #define BCMA_CHIP_ID_BCM4313	0x4313
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index 7741efa43b35..cc615e273f80 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -243,6 +243,10 @@ int xt_check_entry_offsets(const void *base, const char *elems,
+ 			   unsigned int target_offset,
+ 			   unsigned int next_offset);
+ 
++unsigned int *xt_alloc_entry_offsets(unsigned int size);
++bool xt_find_jump_offset(const unsigned int *offsets,
++			 unsigned int target, unsigned int size);
++
+ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+ 		   bool inv_proto);
+ int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
+index 5d5174b59802..673dee29a9b9 100644
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -382,6 +382,7 @@ static inline __must_check
+ void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+ {
+ 	iter->next_index = iter->index;
++	iter->tags = 0;
+ 	return NULL;
+ }
+ 
+diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
+index 0075da74abf0..57d1acb91c56 100644
+--- a/kernel/time/posix-cpu-timers.c
++++ b/kernel/time/posix-cpu-timers.c
+@@ -784,6 +784,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
+ 			timer->it.cpu.expires = 0;
+ 			sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
+ 					   &itp->it_value);
++			return;
+ 		} else {
+ 			cpu_timer_sample_group(timer->it_clock, p, &now);
+ 			unlock_task_sighand(p, &flags);
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 32c719a4bc3d..f93ada7403bf 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -948,13 +948,10 @@ static void isolate_freepages(struct compact_control *cc)
+ 	 * pages on cc->migratepages. We stop searching if the migrate
+ 	 * and free page scanners meet or enough free pages are isolated.
+ 	 */
+-	for (; block_start_pfn >= low_pfn &&
+-			cc->nr_migratepages > cc->nr_freepages;
++	for (; block_start_pfn >= low_pfn;
+ 				block_end_pfn = block_start_pfn,
+ 				block_start_pfn -= pageblock_nr_pages,
+ 				isolate_start_pfn = block_start_pfn) {
+-		unsigned long isolated;
+-
+ 		/*
+ 		 * This can iterate a massively long zone without finding any
+ 		 * suitable migration targets, so periodically check if we need
+@@ -978,43 +975,43 @@ static void isolate_freepages(struct compact_control *cc)
+ 			continue;
+ 
+ 		/* Found a block suitable for isolating free pages from. */
+-		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
+-						block_end_pfn, freelist, false);
+-		/* If isolation failed early, do not continue needlessly */
+-		if (!isolated && isolate_start_pfn < block_end_pfn &&
+-		    cc->nr_migratepages > cc->nr_freepages)
+-			break;
+-
+-		/*
+-		 * Remember where the free scanner should restart next time,
+-		 * which is where isolate_freepages_block() left off.
+-		 * But if it scanned the whole pageblock, isolate_start_pfn
+-		 * now points at block_end_pfn, which is the start of the next
+-		 * pageblock.
+-		 * In that case we will however want to restart at the start
+-		 * of the previous pageblock.
+-		 */
+-		cc->free_pfn = (isolate_start_pfn < block_end_pfn) ?
+-				isolate_start_pfn :
+-				block_start_pfn - pageblock_nr_pages;
++		isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
++					freelist, false);
+ 
+ 		/*
+-		 * isolate_freepages_block() might have aborted due to async
+-		 * compaction being contended
++		 * If we isolated enough freepages, or aborted due to lock
++		 * contention, terminate.
+ 		 */
+-		if (cc->contended)
++		if ((cc->nr_freepages >= cc->nr_migratepages)
++							|| cc->contended) {
++			if (isolate_start_pfn >= block_end_pfn) {
++				/*
++				 * Restart at previous pageblock if more
++				 * freepages can be isolated next time.
++				 */
++				isolate_start_pfn =
++					block_start_pfn - pageblock_nr_pages;
++			}
+ 			break;
++		} else if (isolate_start_pfn < block_end_pfn) {
++			/*
++			 * If isolation failed early, do not continue
++			 * needlessly.
++			 */
++			break;
++		}
+ 	}
+ 
+ 	/* split_free_page does not map the pages */
+ 	map_pages(freelist);
+ 
+ 	/*
+-	 * If we crossed the migrate scanner, we want to keep it that way
+-	 * so that compact_finished() may detect this
++	 * Record where the free scanner will restart next time. Either we
++	 * broke from the loop and set isolate_start_pfn based on the last
++	 * call to isolate_freepages_block(), or we met the migration scanner
++	 * and the loop terminated due to isolate_start_pfn < low_pfn
+ 	 */
+-	if (block_start_pfn < low_pfn)
+-		cc->free_pfn = cc->migrate_pfn;
++	cc->free_pfn = isolate_start_pfn;
+ }
+ 
+ /*
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index 4a3125836b64..ddc3573894b0 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -1192,6 +1192,115 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
+ }
+ 
+ /*
++ * Encoding order is (new_up_client, new_state, new_weight).  Need to
++ * apply in the (new_weight, new_state, new_up_client) order, because
++ * an incremental map may look like e.g.
++ *
++ *     new_up_client: { osd=6, addr=... } # set osd_state and addr
++ *     new_state: { osd=6, xorstate=EXISTS } # clear osd_state
++ */
++static int decode_new_up_state_weight(void **p, void *end,
++				      struct ceph_osdmap *map)
++{
++	void *new_up_client;
++	void *new_state;
++	void *new_weight_end;
++	u32 len;
++
++	new_up_client = *p;
++	ceph_decode_32_safe(p, end, len, e_inval);
++	len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
++	ceph_decode_need(p, end, len, e_inval);
++	*p += len;
++
++	new_state = *p;
++	ceph_decode_32_safe(p, end, len, e_inval);
++	len *= sizeof(u32) + sizeof(u8);
++	ceph_decode_need(p, end, len, e_inval);
++	*p += len;
++
++	/* new_weight */
++	ceph_decode_32_safe(p, end, len, e_inval);
++	while (len--) {
++		s32 osd;
++		u32 w;
++
++		ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
++		osd = ceph_decode_32(p);
++		w = ceph_decode_32(p);
++		BUG_ON(osd >= map->max_osd);
++		pr_info("osd%d weight 0x%x %s\n", osd, w,
++		     w == CEPH_OSD_IN ? "(in)" :
++		     (w == CEPH_OSD_OUT ? "(out)" : ""));
++		map->osd_weight[osd] = w;
++
++		/*
++		 * If we are marking in, set the EXISTS, and clear the
++		 * AUTOOUT and NEW bits.
++		 */
++		if (w) {
++			map->osd_state[osd] |= CEPH_OSD_EXISTS;
++			map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
++						 CEPH_OSD_NEW);
++		}
++	}
++	new_weight_end = *p;
++
++	/* new_state (up/down) */
++	*p = new_state;
++	len = ceph_decode_32(p);
++	while (len--) {
++		s32 osd;
++		u8 xorstate;
++		int ret;
++
++		osd = ceph_decode_32(p);
++		xorstate = ceph_decode_8(p);
++		if (xorstate == 0)
++			xorstate = CEPH_OSD_UP;
++		BUG_ON(osd >= map->max_osd);
++		if ((map->osd_state[osd] & CEPH_OSD_UP) &&
++		    (xorstate & CEPH_OSD_UP))
++			pr_info("osd%d down\n", osd);
++		if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
++		    (xorstate & CEPH_OSD_EXISTS)) {
++			pr_info("osd%d does not exist\n", osd);
++			map->osd_weight[osd] = CEPH_OSD_IN;
++			ret = set_primary_affinity(map, osd,
++						   CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
++			if (ret)
++				return ret;
++			memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
++			map->osd_state[osd] = 0;
++		} else {
++			map->osd_state[osd] ^= xorstate;
++		}
++	}
++
++	/* new_up_client */
++	*p = new_up_client;
++	len = ceph_decode_32(p);
++	while (len--) {
++		s32 osd;
++		struct ceph_entity_addr addr;
++
++		osd = ceph_decode_32(p);
++		ceph_decode_copy(p, &addr, sizeof(addr));
++		ceph_decode_addr(&addr);
++		BUG_ON(osd >= map->max_osd);
++		pr_info("osd%d up\n", osd);
++		map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
++		map->osd_addr[osd] = addr;
++	}
++
++	*p = new_weight_end;
++	return 0;
++
++e_inval:
++	return -EINVAL;
++}
++
++/*
+  * decode and apply an incremental map update.
+  */
+ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+@@ -1290,49 +1399,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+ 			__remove_pg_pool(&map->pg_pools, pi);
+ 	}
+ 
+-	/* new_up */
+-	ceph_decode_32_safe(p, end, len, e_inval);
+-	while (len--) {
+-		u32 osd;
+-		struct ceph_entity_addr addr;
+-		ceph_decode_32_safe(p, end, osd, e_inval);
+-		ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
+-		ceph_decode_addr(&addr);
+-		pr_info("osd%d up\n", osd);
+-		BUG_ON(osd >= map->max_osd);
+-		map->osd_state[osd] |= CEPH_OSD_UP;
+-		map->osd_addr[osd] = addr;
+-	}
+-
+-	/* new_state */
+-	ceph_decode_32_safe(p, end, len, e_inval);
+-	while (len--) {
+-		u32 osd;
+-		u8 xorstate;
+-		ceph_decode_32_safe(p, end, osd, e_inval);
+-		xorstate = **(u8 **)p;
+-		(*p)++;  /* clean flag */
+-		if (xorstate == 0)
+-			xorstate = CEPH_OSD_UP;
+-		if (xorstate & CEPH_OSD_UP)
+-			pr_info("osd%d down\n", osd);
+-		if (osd < map->max_osd)
+-			map->osd_state[osd] ^= xorstate;
+-	}
+-
+-	/* new_weight */
+-	ceph_decode_32_safe(p, end, len, e_inval);
+-	while (len--) {
+-		u32 osd, off;
+-		ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
+-		osd = ceph_decode_32(p);
+-		off = ceph_decode_32(p);
+-		pr_info("osd%d weight 0x%x %s\n", osd, off,
+-		     off == CEPH_OSD_IN ? "(in)" :
+-		     (off == CEPH_OSD_OUT ? "(out)" : ""));
+-		if (osd < map->max_osd)
+-			map->osd_weight[osd] = off;
+-	}
++	/* new_up_client, new_state, new_weight */
++	err = decode_new_up_state_weight(p, end, map);
++	if (err)
++		goto bad;
+ 
+ 	/* new_pg_temp */
+ 	err = decode_new_pg_temp(p, end, map);
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index 2953ee9e5fa0..ebf5821caefb 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -362,23 +362,12 @@ static inline bool unconditional(const struct arpt_entry *e)
+ 	       memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
+ }
+ 
+-static bool find_jump_target(const struct xt_table_info *t,
+-			     const struct arpt_entry *target)
+-{
+-	struct arpt_entry *iter;
+-
+-	xt_entry_foreach(iter, t->entries, t->size) {
+-		 if (iter == target)
+-			return true;
+-	}
+-	return false;
+-}
+-
+ /* Figures out from what hook each rule can be called: returns 0 if
+  * there are loops.  Puts hook bitmask in comefrom.
+  */
+ static int mark_source_chains(const struct xt_table_info *newinfo,
+-			      unsigned int valid_hooks, void *entry0)
++			      unsigned int valid_hooks, void *entry0,
++			      unsigned int *offsets)
+ {
+ 	unsigned int hook;
+ 
+@@ -467,10 +456,11 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					if (!xt_find_jump_offset(offsets, newpos,
++								 newinfo->number))
++						return 0;
+ 					e = (struct arpt_entry *)
+ 						(entry0 + newpos);
+-					if (!find_jump_target(newinfo, e))
+-						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
+@@ -630,6 +620,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+                            const struct arpt_replace *repl)
+ {
+ 	struct arpt_entry *iter;
++	unsigned int *offsets;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+@@ -643,6 +634,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	duprintf("translate_table: size %u\n", newinfo->size);
++	offsets = xt_alloc_entry_offsets(newinfo->number);
++	if (!offsets)
++		return -ENOMEM;
+ 	i = 0;
+ 
+ 	/* Walk through entries, checking offsets. */
+@@ -653,20 +647,21 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 						 repl->underflow,
+ 						 repl->valid_hooks);
+ 		if (ret != 0)
+-			break;
++			goto out_free;
++		if (i < repl->num_entries)
++			offsets[i] = (void *)iter - entry0;
+ 		++i;
+ 		if (strcmp(arpt_get_target(iter)->u.user.name,
+ 		    XT_ERROR_TARGET) == 0)
+ 			++newinfo->stacksize;
+ 	}
+ 	duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
+-	if (ret != 0)
+-		return ret;
+ 
++	ret = -EINVAL;
+ 	if (i != repl->num_entries) {
+ 		duprintf("translate_table: %u not %u entries\n",
+ 			 i, repl->num_entries);
+-		return -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	/* Check hooks all assigned */
+@@ -677,17 +672,20 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid hook entry %u %u\n",
+ 				 i, repl->hook_entry[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid underflow %u %u\n",
+ 				 i, repl->underflow[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+-		return -ELOOP;
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
++		ret = -ELOOP;
++		goto out_free;
++	}
++	kvfree(offsets);
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -714,6 +712,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	return ret;
++ out_free:
++	kvfree(offsets);
++	return ret;
+ }
+ 
+ static void get_counters(const struct xt_table_info *t,
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 3bcf28bf1525..8e729cba1e59 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -438,23 +438,12 @@ ipt_do_table(struct sk_buff *skb,
+ #endif
+ }
+ 
+-static bool find_jump_target(const struct xt_table_info *t,
+-			     const struct ipt_entry *target)
+-{
+-	struct ipt_entry *iter;
+-
+-	xt_entry_foreach(iter, t->entries, t->size) {
+-		 if (iter == target)
+-			return true;
+-	}
+-	return false;
+-}
+-
+ /* Figures out from what hook each rule can be called: returns 0 if
+    there are loops.  Puts hook bitmask in comefrom. */
+ static int
+ mark_source_chains(const struct xt_table_info *newinfo,
+-		   unsigned int valid_hooks, void *entry0)
++		   unsigned int valid_hooks, void *entry0,
++		   unsigned int *offsets)
+ {
+ 	unsigned int hook;
+ 
+@@ -547,10 +536,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					if (!xt_find_jump_offset(offsets, newpos,
++								 newinfo->number))
++						return 0;
+ 					e = (struct ipt_entry *)
+ 						(entry0 + newpos);
+-					if (!find_jump_target(newinfo, e))
+-						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
+@@ -797,6 +787,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+                 const struct ipt_replace *repl)
+ {
+ 	struct ipt_entry *iter;
++	unsigned int *offsets;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+@@ -810,6 +801,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	duprintf("translate_table: size %u\n", newinfo->size);
++	offsets = xt_alloc_entry_offsets(newinfo->number);
++	if (!offsets)
++		return -ENOMEM;
+ 	i = 0;
+ 	/* Walk through entries, checking offsets. */
+ 	xt_entry_foreach(iter, entry0, newinfo->size) {
+@@ -819,17 +813,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 						 repl->underflow,
+ 						 repl->valid_hooks);
+ 		if (ret != 0)
+-			return ret;
++			goto out_free;
++		if (i < repl->num_entries)
++			offsets[i] = (void *)iter - entry0;
+ 		++i;
+ 		if (strcmp(ipt_get_target(iter)->u.user.name,
+ 		    XT_ERROR_TARGET) == 0)
+ 			++newinfo->stacksize;
+ 	}
+ 
++	ret = -EINVAL;
+ 	if (i != repl->num_entries) {
+ 		duprintf("translate_table: %u not %u entries\n",
+ 			 i, repl->num_entries);
+-		return -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	/* Check hooks all assigned */
+@@ -840,17 +837,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid hook entry %u %u\n",
+ 				 i, repl->hook_entry[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid underflow %u %u\n",
+ 				 i, repl->underflow[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+-		return -ELOOP;
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
++		ret = -ELOOP;
++		goto out_free;
++	}
++	kvfree(offsets);
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -877,6 +877,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	return ret;
++ out_free:
++	kvfree(offsets);
++	return ret;
+ }
+ 
+ static void
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 5254d76dfce8..98e99fa833f1 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -451,23 +451,12 @@ ip6t_do_table(struct sk_buff *skb,
+ #endif
+ }
+ 
+-static bool find_jump_target(const struct xt_table_info *t,
+-			     const struct ip6t_entry *target)
+-{
+-	struct ip6t_entry *iter;
+-
+-	xt_entry_foreach(iter, t->entries, t->size) {
+-		 if (iter == target)
+-			return true;
+-	}
+-	return false;
+-}
+-
+ /* Figures out from what hook each rule can be called: returns 0 if
+    there are loops.  Puts hook bitmask in comefrom. */
+ static int
+ mark_source_chains(const struct xt_table_info *newinfo,
+-		   unsigned int valid_hooks, void *entry0)
++		   unsigned int valid_hooks, void *entry0,
++		   unsigned int *offsets)
+ {
+ 	unsigned int hook;
+ 
+@@ -560,10 +549,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					if (!xt_find_jump_offset(offsets, newpos,
++								 newinfo->number))
++						return 0;
+ 					e = (struct ip6t_entry *)
+ 						(entry0 + newpos);
+-					if (!find_jump_target(newinfo, e))
+-						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
+@@ -810,6 +800,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+                 const struct ip6t_replace *repl)
+ {
+ 	struct ip6t_entry *iter;
++	unsigned int *offsets;
+ 	unsigned int i;
+ 	int ret = 0;
+ 
+@@ -823,6 +814,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	duprintf("translate_table: size %u\n", newinfo->size);
++	offsets = xt_alloc_entry_offsets(newinfo->number);
++	if (!offsets)
++		return -ENOMEM;
+ 	i = 0;
+ 	/* Walk through entries, checking offsets. */
+ 	xt_entry_foreach(iter, entry0, newinfo->size) {
+@@ -832,17 +826,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 						 repl->underflow,
+ 						 repl->valid_hooks);
+ 		if (ret != 0)
+-			return ret;
++			goto out_free;
++		if (i < repl->num_entries)
++			offsets[i] = (void *)iter - entry0;
+ 		++i;
+ 		if (strcmp(ip6t_get_target(iter)->u.user.name,
+ 		    XT_ERROR_TARGET) == 0)
+ 			++newinfo->stacksize;
+ 	}
+ 
++	ret = -EINVAL;
+ 	if (i != repl->num_entries) {
+ 		duprintf("translate_table: %u not %u entries\n",
+ 			 i, repl->num_entries);
+-		return -EINVAL;
++		goto out_free;
+ 	}
+ 
+ 	/* Check hooks all assigned */
+@@ -853,17 +850,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid hook entry %u %u\n",
+ 				 i, repl->hook_entry[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
+ 			duprintf("Invalid underflow %u %u\n",
+ 				 i, repl->underflow[i]);
+-			return -EINVAL;
++			goto out_free;
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+-		return -ELOOP;
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
++		ret = -ELOOP;
++		goto out_free;
++	}
++	kvfree(offsets);
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -890,6 +890,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
+ 	}
+ 
+ 	return ret;
++ out_free:
++	kvfree(offsets);
++	return ret;
+ }
+ 
+ static void
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index 4b850c639ac5..703fc9ba6f20 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -704,6 +704,56 @@ int xt_check_entry_offsets(const void *base,
+ }
+ EXPORT_SYMBOL(xt_check_entry_offsets);
+ 
++/**
++ * xt_alloc_entry_offsets - allocate array to store rule head offsets
++ *
++ * @size: number of entries
++ *
++ * Return: NULL or kmalloc'd or vmalloc'd array
++ */
++unsigned int *xt_alloc_entry_offsets(unsigned int size)
++{
++	unsigned int *off;
++
++	off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
++
++	if (off)
++		return off;
++
++	if (size < (SIZE_MAX / sizeof(unsigned int)))
++		off = vmalloc(size * sizeof(unsigned int));
++
++	return off;
++}
++EXPORT_SYMBOL(xt_alloc_entry_offsets);
++
++/**
++ * xt_find_jump_offset - check if target is a valid jump offset
++ *
++ * @offsets: array containing all valid rule start offsets of a rule blob
++ * @target: the jump target to search for
++ * @size: entries in @offset
++ */
++bool xt_find_jump_offset(const unsigned int *offsets,
++			 unsigned int target, unsigned int size)
++{
++	int m, low = 0, hi = size;
++
++	while (hi > low) {
++		m = (low + hi) / 2u;
++
++		if (offsets[m] > target)
++			hi = m;
++		else if (offsets[m] < target)
++			low = m + 1;
++		else
++			return true;
++	}
++
++	return false;
++}
++EXPORT_SYMBOL(xt_find_jump_offset);
++
+ int xt_check_target(struct xt_tgchk_param *par,
+ 		    unsigned int size, u_int8_t proto, bool inv_proto)
+ {
+diff --git a/sound/core/control.c b/sound/core/control.c
+index a85d45595d02..b4fe9b002512 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -160,6 +160,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
+ 	
+ 	if (snd_BUG_ON(!card || !id))
+ 		return;
++	if (card->shutdown)
++		return;
+ 	read_lock(&card->ctl_files_rwlock);
+ #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
+ 	card->mixer_oss_change_count++;
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index dfed728d8c87..f6e7fdd354de 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -849,6 +849,14 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device,
+ }
+ EXPORT_SYMBOL(snd_pcm_new_internal);
+ 
++static void free_chmap(struct snd_pcm_str *pstr)
++{
++	if (pstr->chmap_kctl) {
++		snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
++		pstr->chmap_kctl = NULL;
++	}
++}
++
+ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
+ {
+ 	struct snd_pcm_substream *substream, *substream_next;
+@@ -871,6 +879,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
+ 		kfree(setup);
+ 	}
+ #endif
++	free_chmap(pstr);
+ 	if (pstr->substream_count)
+ 		put_device(&pstr->dev);
+ }
+@@ -1138,10 +1147,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
+ 	for (cidx = 0; cidx < 2; cidx++) {
+ 		if (!pcm->internal)
+ 			snd_unregister_device(&pcm->streams[cidx].dev);
+-		if (pcm->streams[cidx].chmap_kctl) {
+-			snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
+-			pcm->streams[cidx].chmap_kctl = NULL;
+-		}
++		free_chmap(&pcm->streams[cidx]);
+ 	}
+ 	mutex_unlock(&pcm->open_mutex);
+ 	mutex_unlock(&register_mutex);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 9e113bc3b02d..f24a69db0dd8 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1123,8 +1123,10 @@ static int azx_free(struct azx *chip)
+ 	if (use_vga_switcheroo(hda)) {
+ 		if (chip->disabled && chip->bus)
+ 			snd_hda_unlock_devices(chip->bus);
+-		if (hda->vga_switcheroo_registered)
++		if (hda->vga_switcheroo_registered) {
+ 			vga_switcheroo_unregister_client(chip->pci);
++			vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
++		}
+ 	}
+ 
+ 	if (chip->initialized) {
+@@ -2116,6 +2118,8 @@ static const struct pci_device_id azx_ids[] = {
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0x157a),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
++	{ PCI_DEVICE(0x1002, 0x15b3),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0x793b),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ 	{ PCI_DEVICE(0x1002, 0x7919),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 707bc5405d9f..b3234321aa4b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5590,7 +5590,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+-	{0x12, 0xb7a60130}, \
+ 	{0x21, 0x04211020}
+ 
+ #define ALC255_STANDARD_PINS \
+@@ -5650,10 +5649,24 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC225_STANDARD_PINS,
++		{0x12, 0xb7a60130},
+ 		{0x14, 0x901701a0}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC225_STANDARD_PINS,
++		{0x12, 0xb7a60130},
+ 		{0x14, 0x901701b0}),
++	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC225_STANDARD_PINS,
++		{0x12, 0xb7a60150},
++		{0x14, 0x901701a0}),
++	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC225_STANDARD_PINS,
++		{0x12, 0xb7a60150},
++		{0x14, 0x901701b0}),
++	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC225_STANDARD_PINS,
++		{0x12, 0xb7a60130},
++		{0x1b, 0x90170110}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
+ 		ALC255_STANDARD_PINS,
+ 		{0x12, 0x40300000},


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-07-31 16:01 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-07-31 16:01 UTC (permalink / raw
  To: gentoo-commits

commit:     1f9b6e4fcf4c1ec883981ad2404d0361ea65643d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 31 16:01:43 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jul 31 16:01:43 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1f9b6e4f

Linux kernel 4.1.29

 0000_README             |   4 +
 1028_linux-4.1.29.patch | 456 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 460 insertions(+)

diff --git a/0000_README b/0000_README
index 88ef9f3..a9101d3 100644
--- a/0000_README
+++ b/0000_README
@@ -155,6 +155,10 @@ Patch:  1027_linux-4.1.28.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.28
 
+Patch:  1028_linux-4.1.29.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.29
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1028_linux-4.1.29.patch b/1028_linux-4.1.29.patch
new file mode 100644
index 0000000..998db23
--- /dev/null
+++ b/1028_linux-4.1.29.patch
@@ -0,0 +1,456 @@
+diff --git a/Makefile b/Makefile
+index 241237cd4ca6..76fa21fa16b8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 28
++SUBLEVEL = 29
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index f33206e27d8d..2b697db33ca6 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -603,7 +603,8 @@ static inline struct page *pmd_page(pmd_t pmd)
+ 
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+ {
+-	pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
++	pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
++		       (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
+ 	return pmd;
+ }
+ 
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 28dbbb0d12c4..9b19d96e9562 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -166,8 +166,7 @@ static struct ibm_pa_feature {
+ 	 * we don't want to turn on TM here, so we use the *_COMP versions
+ 	 * which are 0 if the kernel doesn't support TM.
+ 	 */
+-	{CPU_FTR_TM_COMP, 0, 0,
+-	 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
++	{CPU_FTR_TM_COMP, 0, 0, PPC_FEATURE2_HTM_COMP, 22, 0, 0},
+ };
+ 
+ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
+diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
+index 5caed1dd7ccf..80bb8c0349a7 100644
+--- a/arch/x86/kernel/amd_nb.c
++++ b/arch/x86/kernel/amd_nb.c
+@@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
+ 	while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
+ 		i++;
+ 
+-	if (i == 0)
+-		return 0;
++	if (!i)
++		return -ENODEV;
+ 
+ 	nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
+ 	if (!nb)
+diff --git a/block/ioprio.c b/block/ioprio.c
+index 31666c92b46a..563435684c3c 100644
+--- a/block/ioprio.c
++++ b/block/ioprio.c
+@@ -149,8 +149,10 @@ static int get_task_ioprio(struct task_struct *p)
+ 	if (ret)
+ 		goto out;
+ 	ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
++	task_lock(p);
+ 	if (p->io_context)
+ 		ret = p->io_context->ioprio;
++	task_unlock(p);
+ out:
+ 	return ret;
+ }
+diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
+index b72906f5b999..2d4766bd61c3 100644
+--- a/drivers/gpio/gpio-sch.c
++++ b/drivers/gpio/gpio-sch.c
+@@ -63,9 +63,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
+ 	return gpio % 8;
+ }
+ 
+-static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
++static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
+ {
+-	struct sch_gpio *sch = to_sch_gpio(gc);
+ 	unsigned short offset, bit;
+ 	u8 reg_val;
+ 
+@@ -77,10 +76,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
+ 	return reg_val;
+ }
+ 
+-static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
++static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
+ 			     int val)
+ {
+-	struct sch_gpio *sch = to_sch_gpio(gc);
+ 	unsigned short offset, bit;
+ 	u8 reg_val;
+ 
+@@ -100,14 +98,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
+ 	struct sch_gpio *sch = to_sch_gpio(gc);
+ 
+ 	spin_lock(&sch->lock);
+-	sch_gpio_reg_set(gc, gpio_num, GIO, 1);
++	sch_gpio_reg_set(sch, gpio_num, GIO, 1);
+ 	spin_unlock(&sch->lock);
+ 	return 0;
+ }
+ 
+ static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
+ {
+-	return sch_gpio_reg_get(gc, gpio_num, GLV);
++	struct sch_gpio *sch = to_sch_gpio(gc);
++	return sch_gpio_reg_get(sch, gpio_num, GLV);
+ }
+ 
+ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
+@@ -115,7 +114,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
+ 	struct sch_gpio *sch = to_sch_gpio(gc);
+ 
+ 	spin_lock(&sch->lock);
+-	sch_gpio_reg_set(gc, gpio_num, GLV, val);
++	sch_gpio_reg_set(sch, gpio_num, GLV, val);
+ 	spin_unlock(&sch->lock);
+ }
+ 
+@@ -125,7 +124,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
+ 	struct sch_gpio *sch = to_sch_gpio(gc);
+ 
+ 	spin_lock(&sch->lock);
+-	sch_gpio_reg_set(gc, gpio_num, GIO, 0);
++	sch_gpio_reg_set(sch, gpio_num, GIO, 0);
+ 	spin_unlock(&sch->lock);
+ 
+ 	/*
+@@ -184,13 +183,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
+ 		 * GPIO7 is configured by the CMC as SLPIOVR
+ 		 * Enable GPIO[9:8] core powered gpios explicitly
+ 		 */
+-		sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
+-		sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
++		sch_gpio_reg_set(sch, 8, GEN, 1);
++		sch_gpio_reg_set(sch, 9, GEN, 1);
+ 		/*
+ 		 * SUS_GPIO[2:0] enabled by default
+ 		 * Enable SUS_GPIO3 resume powered gpio explicitly
+ 		 */
+-		sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
++		sch_gpio_reg_set(sch, 13, GEN, 1);
+ 		break;
+ 
+ 	case PCI_DEVICE_ID_INTEL_ITC_LPC:
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index 0ea0869120cf..187bea44d123 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -902,6 +902,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
+ 		qeth_l2_set_offline(cgdev);
+ 
+ 	if (card->dev) {
++		netif_napi_del(&card->napi);
+ 		unregister_netdev(card->dev);
+ 		card->dev = NULL;
+ 	}
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index 04e42c649134..3f94738feb45 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -3362,6 +3362,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
+ 		qeth_l3_set_offline(cgdev);
+ 
+ 	if (card->dev) {
++		netif_napi_del(&card->napi);
+ 		unregister_netdev(card->dev);
+ 		card->dev = NULL;
+ 	}
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index cccab6188328..cd52c070701b 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -9966,6 +9966,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
+ 		ioa_cfg->intr_flag = IPR_USE_MSI;
+ 	else {
+ 		ioa_cfg->intr_flag = IPR_USE_LSI;
++		ioa_cfg->clear_isr = 1;
+ 		ioa_cfg->nvectors = 1;
+ 		dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ 	}
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index 59fc190f1e92..b96e207bf250 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -423,36 +423,7 @@ upload:
+ 
+ 	return 0;
+ }
+-static int __init check_prereq(void)
+-{
+-	struct cpuinfo_x86 *c = &cpu_data(0);
+-
+-	if (!xen_initial_domain())
+-		return -ENODEV;
+-
+-	if (!acpi_gbl_FADT.smi_command)
+-		return -ENODEV;
+-
+-	if (c->x86_vendor == X86_VENDOR_INTEL) {
+-		if (!cpu_has(c, X86_FEATURE_EST))
+-			return -ENODEV;
+ 
+-		return 0;
+-	}
+-	if (c->x86_vendor == X86_VENDOR_AMD) {
+-		/* Copied from powernow-k8.h, can't include ../cpufreq/powernow
+-		 * as we get compile warnings for the static functions.
+-		 */
+-#define CPUID_FREQ_VOLT_CAPABILITIES    0x80000007
+-#define USE_HW_PSTATE                   0x00000080
+-		u32 eax, ebx, ecx, edx;
+-		cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
+-		if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
+-			return -ENODEV;
+-		return 0;
+-	}
+-	return -ENODEV;
+-}
+ /* acpi_perf_data is a pointer to percpu data. */
+ static struct acpi_processor_performance __percpu *acpi_perf_data;
+ 
+@@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
+ static int __init xen_acpi_processor_init(void)
+ {
+ 	unsigned int i;
+-	int rc = check_prereq();
++	int rc;
+ 
+-	if (rc)
+-		return rc;
++	if (!xen_initial_domain())
++		return -ENODEV;
+ 
+ 	nr_acpi_bits = get_max_acpi_id() + 1;
+ 	acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
+diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
+index 9433e46518c8..531e76474983 100644
+--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
++++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
+@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
+ 			rc = -ENOMEM;
+ 			goto out;
+ 		}
++	} else {
++		list_for_each_entry(trans, &u->transactions, list)
++			if (trans->handle.id == u->u.msg.tx_id)
++				break;
++		if (&trans->list == &u->transactions)
++			return -ESRCH;
+ 	}
+ 
+ 	reply = xenbus_dev_request_and_reply(&u->u.msg);
+ 	if (IS_ERR(reply)) {
+-		kfree(trans);
++		if (msg_type == XS_TRANSACTION_START)
++			kfree(trans);
+ 		rc = PTR_ERR(reply);
+ 		goto out;
+ 	}
+@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
+ 			list_add(&trans->list, &u->transactions);
+ 		}
+ 	} else if (u->u.msg.type == XS_TRANSACTION_END) {
+-		list_for_each_entry(trans, &u->transactions, list)
+-			if (trans->handle.id == u->u.msg.tx_id)
+-				break;
+-		BUG_ON(&trans->list == &u->transactions);
+ 		list_del(&trans->list);
+-
+ 		kfree(trans);
+ 	}
+ 
+diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
+index ba804f3d8278..ce65591b4168 100644
+--- a/drivers/xen/xenbus/xenbus_xs.c
++++ b/drivers/xen/xenbus/xenbus_xs.c
+@@ -250,9 +250,6 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
+ 
+ 	mutex_unlock(&xs_state.request_mutex);
+ 
+-	if (IS_ERR(ret))
+-		return ret;
+-
+ 	if ((msg->type == XS_TRANSACTION_END) ||
+ 	    ((req_msg.type == XS_TRANSACTION_START) &&
+ 	     (msg->type == XS_ERROR)))
+diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
+index 72afcc629d7b..f56a4216d081 100644
+--- a/fs/ecryptfs/file.c
++++ b/fs/ecryptfs/file.c
+@@ -170,6 +170,19 @@ out:
+ 	return rc;
+ }
+ 
++static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	struct file *lower_file = ecryptfs_file_to_lower(file);
++	/*
++	 * Don't allow mmap on top of file systems that don't support it
++	 * natively.  If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
++	 * allows recursive mounting, this will need to be extended.
++	 */
++	if (!lower_file->f_op->mmap)
++		return -ENODEV;
++	return generic_file_mmap(file, vma);
++}
++
+ /**
+  * ecryptfs_open
+  * @inode: inode speciying file to open
+@@ -365,7 +378,7 @@ const struct file_operations ecryptfs_main_fops = {
+ #ifdef CONFIG_COMPAT
+ 	.compat_ioctl = ecryptfs_compat_ioctl,
+ #endif
+-	.mmap = generic_file_mmap,
++	.mmap = ecryptfs_mmap,
+ 	.open = ecryptfs_open,
+ 	.flush = ecryptfs_flush,
+ 	.release = ecryptfs_release,
+diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
+index e818f5ac7a26..866bb18efefe 100644
+--- a/fs/ecryptfs/kthread.c
++++ b/fs/ecryptfs/kthread.c
+@@ -25,7 +25,6 @@
+ #include <linux/slab.h>
+ #include <linux/wait.h>
+ #include <linux/mount.h>
+-#include <linux/file.h>
+ #include "ecryptfs_kernel.h"
+ 
+ struct ecryptfs_open_req {
+@@ -148,7 +147,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
+ 	flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
+ 	(*lower_file) = dentry_open(&req.path, flags, cred);
+ 	if (!IS_ERR(*lower_file))
+-		goto have_file;
++		goto out;
+ 	if ((flags & O_ACCMODE) == O_RDONLY) {
+ 		rc = PTR_ERR((*lower_file));
+ 		goto out;
+@@ -166,16 +165,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
+ 	mutex_unlock(&ecryptfs_kthread_ctl.mux);
+ 	wake_up(&ecryptfs_kthread_ctl.wait);
+ 	wait_for_completion(&req.done);
+-	if (IS_ERR(*lower_file)) {
++	if (IS_ERR(*lower_file))
+ 		rc = PTR_ERR(*lower_file);
+-		goto out;
+-	}
+-have_file:
+-	if ((*lower_file)->f_op->mmap == NULL) {
+-		fput(*lower_file);
+-		*lower_file = NULL;
+-		rc = -EMEDIUMTYPE;
+-	}
+ out:
+ 	return rc;
+ }
+diff --git a/mm/swap.c b/mm/swap.c
+index b523f0a4cbfb..ab3b9c2dd783 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -631,7 +631,7 @@ static void __lru_cache_add(struct page *page)
+ 	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+ 
+ 	page_cache_get(page);
+-	if (!pagevec_space(pvec) || PageCompound(page))
++	if (!pagevec_add(pvec, page) || PageCompound(page))
+ 		__pagevec_lru_add(pvec);
+ 	put_cpu_var(lru_add_pvec);
+ }
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 1782555fcaca..7e3020c1e9d3 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1961,6 +1961,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ 
+ 		qhead = tu->qhead++;
+ 		tu->qhead %= tu->queue_size;
++		tu->qused--;
+ 		spin_unlock_irq(&tu->qlock);
+ 
+ 		if (tu->tread) {
+@@ -1974,7 +1975,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ 		}
+ 
+ 		spin_lock_irq(&tu->qlock);
+-		tu->qused--;
+ 		if (err < 0)
+ 			goto _error;
+ 		result += unit;
+diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
+index 4667c3232b7f..74177189063c 100644
+--- a/sound/pci/au88x0/au88x0_core.c
++++ b/sound/pci/au88x0/au88x0_core.c
+@@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
+ 	int page, p, pp, delta, i;
+ 
+ 	page =
+-	    (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
+-	     WT_SUBBUF_MASK)
+-	    >> WT_SUBBUF_SHIFT;
++	    (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
++	     >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
+ 	if (dma->nr_periods >= 4)
+ 		delta = (page - dma->period_real) & 3;
+ 	else {
+diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
+index 862db9a0b041..f3e5cfa4f25d 100644
+--- a/sound/pci/echoaudio/echoaudio.c
++++ b/sound/pci/echoaudio/echoaudio.c
+@@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
+ 	u32 pipe_alloc_mask;
+ 	int err;
+ 
+-	commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
++	commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
+ 	if (commpage_bak == NULL)
+ 		return -ENOMEM;
+ 	commpage = chip->comm_page;
+-	memcpy(commpage_bak, commpage, sizeof(struct comm_page));
++	memcpy(commpage_bak, commpage, sizeof(*commpage));
+ 
+ 	err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
+ 	if (err < 0) {
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 16e0ebacbdb0..0fda7b4901dd 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -3992,6 +3992,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
+ 
+ 	for (n = 0; n < spec->paths.used; n++) {
+ 		path = snd_array_elem(&spec->paths, n);
++		if (!path->depth)
++			continue;
+ 		if (path->path[0] == nid ||
+ 		    path->path[path->depth - 1] == nid) {
+ 			bool pin_old = path->pin_enabled;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index abf8d342f1f4..707bc5405d9f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5505,6 +5505,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
++	SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-07-15 14:18 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-07-15 14:18 UTC (permalink / raw
  To: gentoo-commits

commit:     73336b381c437fb71c4316d2cffcae360c2fe541
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul 15 14:19:00 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul 15 14:19:00 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=73336b38

Revert commit 8f182270dfec mm/swap.c: flush lru pvecs on compound page arrival to fix OOM error.

 0000_README                                 |  4 ++++
 1800_fix-lru-cache-add-oom-regression.patch | 17 +++++++++++++++++
 2 files changed, 21 insertions(+)

diff --git a/0000_README b/0000_README
index 24c0a3c..88ef9f3 100644
--- a/0000_README
+++ b/0000_README
@@ -163,6 +163,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
+Patch:  1800_fix-lru-cache-add-oom-regression.patch
+From:   http://thread.gmane.org/gmane.linux.kernel.stable/184384
+Desc:   Revert commit 8f182270dfec mm/swap.c: flush lru pvecs on compound page arrival to fix OOM error.
+
 Patch:  2700_ThinkPad-30-brightness-control-fix.patch
 From:   Seth Forshee <seth.forshee@canonical.com>
 Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.

diff --git a/1800_fix-lru-cache-add-oom-regression.patch b/1800_fix-lru-cache-add-oom-regression.patch
new file mode 100644
index 0000000..f1ca64b
--- /dev/null
+++ b/1800_fix-lru-cache-add-oom-regression.patch
@@ -0,0 +1,17 @@
+Revert commit 8f182270dfec mm/swap.c: flush lru pvecs on compound page arrival to fix OOM error.
+
+Signed-off-by: Steven Rostedt <rostedt <at> goodmis.org>
+---
+diff --git a/mm/swap.c b/mm/swap.c
+index b523f0a4cbfb..ab3b9c2dd783 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+ <at>  <at>  -631,7 +631,7  <at>  <at>  static void __lru_cache_add(struct page *page)
+    struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+
+    page_cache_get(page);
+-   if (!pagevec_space(pvec) || PageCompound(page))
++   if (!pagevec_add(pvec, page) || PageCompound(page))
+        __pagevec_lru_add(pvec);
+    put_cpu_var(lru_add_pvec);
+ }


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-07-13 23:38 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-07-13 23:38 UTC (permalink / raw
  To: gentoo-commits

commit:     051f9f633cfed6438da621f5ba235c50e1073ccf
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 13 23:38:26 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 13 23:38:26 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=051f9f63

Linux patch 4.1.28

 0000_README             |     4 +
 1027_linux-4.1.28.patch | 12868 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 12872 insertions(+)

diff --git a/0000_README b/0000_README
index b592a97..24c0a3c 100644
--- a/0000_README
+++ b/0000_README
@@ -151,6 +151,10 @@ Patch:  1026_linux-4.1.27.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.27
 
+Patch:  1027_linux-4.1.28.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.28
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1027_linux-4.1.28.patch b/1027_linux-4.1.28.patch
new file mode 100644
index 0000000..71127b1
--- /dev/null
+++ b/1027_linux-4.1.28.patch
@@ -0,0 +1,12868 @@
+diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
+index 8638f61c8c9d..37eca00796ee 100644
+--- a/Documentation/scsi/scsi_eh.txt
++++ b/Documentation/scsi/scsi_eh.txt
+@@ -263,19 +263,23 @@ scmd->allowed.
+ 
+  3. scmd recovered
+     ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
+-	- shost->host_failed--
+ 	- clear scmd->eh_eflags
+ 	- scsi_setup_cmd_retry()
+ 	- move from local eh_work_q to local eh_done_q
+     LOCKING: none
++    CONCURRENCY: at most one thread per separate eh_work_q to
++		 keep queue manipulation lockless
+ 
+  4. EH completes
+     ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
+-	    layer of failure.
++	    layer of failure. May be called concurrently but must have
++	    a no more than one thread per separate eh_work_q to
++	    manipulate the queue locklessly
+ 	- scmd is removed from eh_done_q and scmd->eh_entry is cleared
+ 	- if retry is necessary, scmd is requeued using
+           scsi_queue_insert()
+ 	- otherwise, scsi_finish_command() is invoked for scmd
++	- zero shost->host_failed
+     LOCKING: queue or finish function performs appropriate locking
+ 
+ 
+diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
+index 88152f214f48..302b5ed616a6 100644
+--- a/Documentation/sysctl/fs.txt
++++ b/Documentation/sysctl/fs.txt
+@@ -32,6 +32,8 @@ Currently, these files are in /proc/sys/fs:
+ - nr_open
+ - overflowuid
+ - overflowgid
++- pipe-user-pages-hard
++- pipe-user-pages-soft
+ - protected_hardlinks
+ - protected_symlinks
+ - suid_dumpable
+@@ -159,6 +161,27 @@ The default is 65534.
+ 
+ ==============================================================
+ 
++pipe-user-pages-hard:
++
++Maximum total number of pages a non-privileged user may allocate for pipes.
++Once this limit is reached, no new pipes may be allocated until usage goes
++below the limit again. When set to 0, no limit is applied, which is the default
++setting.
++
++==============================================================
++
++pipe-user-pages-soft:
++
++Maximum total number of pages a non-privileged user may allocate for pipes
++before the pipe size gets limited to a single page. Once this limit is reached,
++new pipes will be limited to a single page in size for this user in order to
++limit total memory usage, and trying to increase them using fcntl() will be
++denied until usage goes below the limit again. The default value allows to
++allocate up to 1024 pipes at their default size. When set to 0, no limit is
++applied.
++
++==============================================================
++
+ protected_hardlinks:
+ 
+ A long-standing class of security issues is the hardlink-based
+diff --git a/Makefile b/Makefile
+index 54b3d8ae8624..241237cd4ca6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 27
++SUBLEVEL = 28
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arc/Makefile b/arch/arc/Makefile
+index 2f21e1e0ecf7..305dbdf6c944 100644
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -34,7 +34,6 @@ cflags-$(atleast_gcc44)			+= -fsection-anchors
+ cflags-$(CONFIG_ARC_HAS_LLSC)		+= -mlock
+ cflags-$(CONFIG_ARC_HAS_SWAPE)		+= -mswape
+ cflags-$(CONFIG_ARC_HAS_RTSC)		+= -mrtsc
+-cflags-$(CONFIG_ARC_DW2_UNWIND)		+= -fasynchronous-unwind-tables
+ 
+ # By default gcc 4.8 generates dwarf4 which kernel unwinder can't grok
+ ifeq ($(atleast_gcc48),y)
+diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
+index 92320d6f737c..5086cc767c0b 100644
+--- a/arch/arc/kernel/stacktrace.c
++++ b/arch/arc/kernel/stacktrace.c
+@@ -144,7 +144,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ 	 * prelogue is setup (callee regs saved and then fp set and not other
+ 	 * way around
+ 	 */
+-	pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
++	pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ 	return 0;
+ 
+ #endif
+diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
+index bfd662e49a25..89b5a0a00dc9 100644
+--- a/arch/arm/include/asm/pgtable-2level.h
++++ b/arch/arm/include/asm/pgtable-2level.h
+@@ -164,6 +164,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+ 
+ #define pmd_large(pmd)		(pmd_val(pmd) & 2)
+ #define pmd_bad(pmd)		(pmd_val(pmd) & 2)
++#define pmd_present(pmd)	(pmd_val(pmd))
+ 
+ #define copy_pmd(pmdpd,pmdps)		\
+ 	do {				\
+diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
+index a745a2a53853..fd929b5ded9e 100644
+--- a/arch/arm/include/asm/pgtable-3level.h
++++ b/arch/arm/include/asm/pgtable-3level.h
+@@ -212,6 +212,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+ 						: !!(pmd_val(pmd) & (val)))
+ #define pmd_isclear(pmd, val)	(!(pmd_val(pmd) & (val)))
+ 
++#define pmd_present(pmd)	(pmd_isset((pmd), L_PMD_SECT_VALID))
+ #define pmd_young(pmd)		(pmd_isset((pmd), PMD_SECT_AF))
+ #define pte_special(pte)	(pte_isset((pte), L_PTE_SPECIAL))
+ static inline pte_t pte_mkspecial(pte_t pte)
+@@ -257,10 +258,10 @@ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
+ #define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+ #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
+ 
+-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
++/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
+ static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+ {
+-	return __pmd(0);
++	return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
+ }
+ 
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
+index f40354198bad..7fa12e0f1bc9 100644
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+ #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
+ 
+ #define pmd_none(pmd)		(!pmd_val(pmd))
+-#define pmd_present(pmd)	(pmd_val(pmd))
+ 
+ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+ {
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index d6223cbcb661..5414081c0bbf 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -257,6 +257,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+ 	kvm_mmu_free_memory_caches(vcpu);
+ 	kvm_timer_vcpu_terminate(vcpu);
+ 	kvm_vgic_vcpu_destroy(vcpu);
++	kvm_vcpu_uninit(vcpu);
+ 	kmem_cache_free(kvm_vcpu_cache, vcpu);
+ }
+ 
+diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
+index aa7b379e2661..2a3db0bd9e15 100644
+--- a/arch/arm/mach-omap2/cpuidle34xx.c
++++ b/arch/arm/mach-omap2/cpuidle34xx.c
+@@ -34,6 +34,7 @@
+ #include "pm.h"
+ #include "control.h"
+ #include "common.h"
++#include "soc.h"
+ 
+ /* Mach specific information to be recorded in the C-state driver_data */
+ struct omap3_idle_statedata {
+@@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
+ 	.safe_state_index = 0,
+ };
+ 
++/*
++ * Numbers based on measurements made in October 2009 for PM optimized kernel
++ * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
++ * and worst case latencies).
++ */
++static struct cpuidle_driver omap3430_idle_driver = {
++	.name             = "omap3430_idle",
++	.owner            = THIS_MODULE,
++	.states = {
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 110 + 162,
++			.target_residency = 5,
++			.name		  = "C1",
++			.desc		  = "MPU ON + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 106 + 180,
++			.target_residency = 309,
++			.name		  = "C2",
++			.desc		  = "MPU ON + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 107 + 410,
++			.target_residency = 46057,
++			.name		  = "C3",
++			.desc		  = "MPU RET + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 121 + 3374,
++			.target_residency = 46057,
++			.name		  = "C4",
++			.desc		  = "MPU OFF + CORE ON",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 855 + 1146,
++			.target_residency = 46057,
++			.name		  = "C5",
++			.desc		  = "MPU RET + CORE RET",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 7580 + 4134,
++			.target_residency = 484329,
++			.name		  = "C6",
++			.desc		  = "MPU OFF + CORE RET",
++		},
++		{
++			.enter		  = omap3_enter_idle_bm,
++			.exit_latency	  = 7505 + 15274,
++			.target_residency = 484329,
++			.name		  = "C7",
++			.desc		  = "MPU OFF + CORE OFF",
++		},
++	},
++	.state_count = ARRAY_SIZE(omap3_idle_data),
++	.safe_state_index = 0,
++};
++
+ /* Public functions */
+ 
+ /**
+@@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
+ 	if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
+ 		return -ENODEV;
+ 
+-	return cpuidle_register(&omap3_idle_driver, NULL);
++	if (cpu_is_omap3430())
++		return cpuidle_register(&omap3430_idle_driver, NULL);
++	else
++		return cpuidle_register(&omap3_idle_driver, NULL);
+ }
+diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c
+index ff780a8d8366..9a42736ef4ac 100644
+--- a/arch/arm/mach-s3c64xx/dev-audio.c
++++ b/arch/arm/mach-s3c64xx/dev-audio.c
+@@ -54,12 +54,12 @@ static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
+ 
+ static struct resource s3c64xx_iis0_resource[] = {
+ 	[0] = DEFINE_RES_MEM(S3C64XX_PA_IIS0, SZ_256),
+-	[1] = DEFINE_RES_DMA(DMACH_I2S0_OUT),
+-	[2] = DEFINE_RES_DMA(DMACH_I2S0_IN),
+ };
+ 
+-static struct s3c_audio_pdata i2sv3_pdata = {
++static struct s3c_audio_pdata i2s0_pdata = {
+ 	.cfg_gpio = s3c64xx_i2s_cfg_gpio,
++	.dma_playback = DMACH_I2S0_OUT,
++	.dma_capture = DMACH_I2S0_IN,
+ };
+ 
+ struct platform_device s3c64xx_device_iis0 = {
+@@ -68,15 +68,19 @@ struct platform_device s3c64xx_device_iis0 = {
+ 	.num_resources	  = ARRAY_SIZE(s3c64xx_iis0_resource),
+ 	.resource	  = s3c64xx_iis0_resource,
+ 	.dev = {
+-		.platform_data = &i2sv3_pdata,
++		.platform_data = &i2s0_pdata,
+ 	},
+ };
+ EXPORT_SYMBOL(s3c64xx_device_iis0);
+ 
+ static struct resource s3c64xx_iis1_resource[] = {
+ 	[0] = DEFINE_RES_MEM(S3C64XX_PA_IIS1, SZ_256),
+-	[1] = DEFINE_RES_DMA(DMACH_I2S1_OUT),
+-	[2] = DEFINE_RES_DMA(DMACH_I2S1_IN),
++};
++
++static struct s3c_audio_pdata i2s1_pdata = {
++	.cfg_gpio = s3c64xx_i2s_cfg_gpio,
++	.dma_playback = DMACH_I2S1_OUT,
++	.dma_capture = DMACH_I2S1_IN,
+ };
+ 
+ struct platform_device s3c64xx_device_iis1 = {
+@@ -85,19 +89,19 @@ struct platform_device s3c64xx_device_iis1 = {
+ 	.num_resources	  = ARRAY_SIZE(s3c64xx_iis1_resource),
+ 	.resource	  = s3c64xx_iis1_resource,
+ 	.dev = {
+-		.platform_data = &i2sv3_pdata,
++		.platform_data = &i2s1_pdata,
+ 	},
+ };
+ EXPORT_SYMBOL(s3c64xx_device_iis1);
+ 
+ static struct resource s3c64xx_iisv4_resource[] = {
+ 	[0] = DEFINE_RES_MEM(S3C64XX_PA_IISV4, SZ_256),
+-	[1] = DEFINE_RES_DMA(DMACH_HSI_I2SV40_TX),
+-	[2] = DEFINE_RES_DMA(DMACH_HSI_I2SV40_RX),
+ };
+ 
+ static struct s3c_audio_pdata i2sv4_pdata = {
+ 	.cfg_gpio = s3c64xx_i2s_cfg_gpio,
++	.dma_playback = DMACH_HSI_I2SV40_TX,
++	.dma_capture = DMACH_HSI_I2SV40_RX,
+ 	.type = {
+ 		.i2s = {
+ 			.quirks = QUIRK_PRI_6CHAN,
+@@ -142,12 +146,12 @@ static int s3c64xx_pcm_cfg_gpio(struct platform_device *pdev)
+ 
+ static struct resource s3c64xx_pcm0_resource[] = {
+ 	[0] = DEFINE_RES_MEM(S3C64XX_PA_PCM0, SZ_256),
+-	[1] = DEFINE_RES_DMA(DMACH_PCM0_TX),
+-	[2] = DEFINE_RES_DMA(DMACH_PCM0_RX),
+ };
+ 
+ static struct s3c_audio_pdata s3c_pcm0_pdata = {
+ 	.cfg_gpio = s3c64xx_pcm_cfg_gpio,
++	.dma_capture = DMACH_PCM0_RX,
++	.dma_playback = DMACH_PCM0_TX,
+ };
+ 
+ struct platform_device s3c64xx_device_pcm0 = {
+@@ -163,12 +167,12 @@ EXPORT_SYMBOL(s3c64xx_device_pcm0);
+ 
+ static struct resource s3c64xx_pcm1_resource[] = {
+ 	[0] = DEFINE_RES_MEM(S3C64XX_PA_PCM1, SZ_256),
+-	[1] = DEFINE_RES_DMA(DMACH_PCM1_TX),
+-	[2] = DEFINE_RES_DMA(DMACH_PCM1_RX),
+ };
+ 
+ static struct s3c_audio_pdata s3c_pcm1_pdata = {
+ 	.cfg_gpio = s3c64xx_pcm_cfg_gpio,
++	.dma_playback = DMACH_PCM1_TX,
++	.dma_capture = DMACH_PCM1_RX,
+ };
+ 
+ struct platform_device s3c64xx_device_pcm1 = {
+@@ -196,13 +200,14 @@ static int s3c64xx_ac97_cfg_gpe(struct platform_device *pdev)
+ 
+ static struct resource s3c64xx_ac97_resource[] = {
+ 	[0] = DEFINE_RES_MEM(S3C64XX_PA_AC97, SZ_256),
+-	[1] = DEFINE_RES_DMA(DMACH_AC97_PCMOUT),
+-	[2] = DEFINE_RES_DMA(DMACH_AC97_PCMIN),
+-	[3] = DEFINE_RES_DMA(DMACH_AC97_MICIN),
+-	[4] = DEFINE_RES_IRQ(IRQ_AC97),
++	[1] = DEFINE_RES_IRQ(IRQ_AC97),
+ };
+ 
+-static struct s3c_audio_pdata s3c_ac97_pdata;
++static struct s3c_audio_pdata s3c_ac97_pdata = {
++	.dma_playback = DMACH_AC97_PCMOUT,
++	.dma_capture = DMACH_AC97_PCMIN,
++	.dma_capture_mic = DMACH_AC97_MICIN,
++};
+ 
+ static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32);
+ 
+diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h
+index 096e14073bd9..9c739eafe95c 100644
+--- a/arch/arm/mach-s3c64xx/include/mach/dma.h
++++ b/arch/arm/mach-s3c64xx/include/mach/dma.h
+@@ -14,38 +14,38 @@
+ #define S3C64XX_DMA_CHAN(name)		((unsigned long)(name))
+ 
+ /* DMA0/SDMA0 */
+-#define DMACH_UART0		S3C64XX_DMA_CHAN("uart0_tx")
+-#define DMACH_UART0_SRC2	S3C64XX_DMA_CHAN("uart0_rx")
+-#define DMACH_UART1		S3C64XX_DMA_CHAN("uart1_tx")
+-#define DMACH_UART1_SRC2	S3C64XX_DMA_CHAN("uart1_rx")
+-#define DMACH_UART2		S3C64XX_DMA_CHAN("uart2_tx")
+-#define DMACH_UART2_SRC2	S3C64XX_DMA_CHAN("uart2_rx")
+-#define DMACH_UART3		S3C64XX_DMA_CHAN("uart3_tx")
+-#define DMACH_UART3_SRC2	S3C64XX_DMA_CHAN("uart3_rx")
+-#define DMACH_PCM0_TX		S3C64XX_DMA_CHAN("pcm0_tx")
+-#define DMACH_PCM0_RX		S3C64XX_DMA_CHAN("pcm0_rx")
+-#define DMACH_I2S0_OUT		S3C64XX_DMA_CHAN("i2s0_tx")
+-#define DMACH_I2S0_IN		S3C64XX_DMA_CHAN("i2s0_rx")
++#define DMACH_UART0		"uart0_tx"
++#define DMACH_UART0_SRC2	"uart0_rx"
++#define DMACH_UART1		"uart1_tx"
++#define DMACH_UART1_SRC2	"uart1_rx"
++#define DMACH_UART2		"uart2_tx"
++#define DMACH_UART2_SRC2	"uart2_rx"
++#define DMACH_UART3		"uart3_tx"
++#define DMACH_UART3_SRC2	"uart3_rx"
++#define DMACH_PCM0_TX		"pcm0_tx"
++#define DMACH_PCM0_RX		"pcm0_rx"
++#define DMACH_I2S0_OUT		"i2s0_tx"
++#define DMACH_I2S0_IN		"i2s0_rx"
+ #define DMACH_SPI0_TX		S3C64XX_DMA_CHAN("spi0_tx")
+ #define DMACH_SPI0_RX		S3C64XX_DMA_CHAN("spi0_rx")
+-#define DMACH_HSI_I2SV40_TX	S3C64XX_DMA_CHAN("i2s2_tx")
+-#define DMACH_HSI_I2SV40_RX	S3C64XX_DMA_CHAN("i2s2_rx")
++#define DMACH_HSI_I2SV40_TX	"i2s2_tx"
++#define DMACH_HSI_I2SV40_RX	"i2s2_rx"
+ 
+ /* DMA1/SDMA1 */
+-#define DMACH_PCM1_TX		S3C64XX_DMA_CHAN("pcm1_tx")
+-#define DMACH_PCM1_RX		S3C64XX_DMA_CHAN("pcm1_rx")
+-#define DMACH_I2S1_OUT		S3C64XX_DMA_CHAN("i2s1_tx")
+-#define DMACH_I2S1_IN		S3C64XX_DMA_CHAN("i2s1_rx")
++#define DMACH_PCM1_TX		"pcm1_tx"
++#define DMACH_PCM1_RX		"pcm1_rx"
++#define DMACH_I2S1_OUT		"i2s1_tx"
++#define DMACH_I2S1_IN		"i2s1_rx"
+ #define DMACH_SPI1_TX		S3C64XX_DMA_CHAN("spi1_tx")
+ #define DMACH_SPI1_RX		S3C64XX_DMA_CHAN("spi1_rx")
+-#define DMACH_AC97_PCMOUT	S3C64XX_DMA_CHAN("ac97_out")
+-#define DMACH_AC97_PCMIN	S3C64XX_DMA_CHAN("ac97_in")
+-#define DMACH_AC97_MICIN	S3C64XX_DMA_CHAN("ac97_mic")
+-#define DMACH_PWM		S3C64XX_DMA_CHAN("pwm")
+-#define DMACH_IRDA		S3C64XX_DMA_CHAN("irda")
+-#define DMACH_EXTERNAL		S3C64XX_DMA_CHAN("external")
+-#define DMACH_SECURITY_RX	S3C64XX_DMA_CHAN("sec_rx")
+-#define DMACH_SECURITY_TX	S3C64XX_DMA_CHAN("sec_tx")
++#define DMACH_AC97_PCMOUT	"ac97_out"
++#define DMACH_AC97_PCMIN	"ac97_in"
++#define DMACH_AC97_MICIN	"ac97_mic"
++#define DMACH_PWM		"pwm"
++#define DMACH_IRDA		"irda"
++#define DMACH_EXTERNAL		"external"
++#define DMACH_SECURITY_RX	"sec_rx"
++#define DMACH_SECURITY_TX	"sec_tx"
+ 
+ enum dma_ch {
+ 	DMACH_MAX = 32
+diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
+index 83c7d154bde0..8b67db8c1213 100644
+--- a/arch/arm/plat-samsung/devs.c
++++ b/arch/arm/plat-samsung/devs.c
+@@ -65,6 +65,7 @@
+ #include <linux/platform_data/usb-ohci-s3c2410.h>
+ #include <plat/usb-phy.h>
+ #include <plat/regs-spi.h>
++#include <linux/platform_data/asoc-s3c.h>
+ #include <linux/platform_data/spi-s3c64xx.h>
+ 
+ static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
+@@ -74,9 +75,12 @@ static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
+ static struct resource s3c_ac97_resource[] = {
+ 	[0] = DEFINE_RES_MEM(S3C2440_PA_AC97, S3C2440_SZ_AC97),
+ 	[1] = DEFINE_RES_IRQ(IRQ_S3C244X_AC97),
+-	[2] = DEFINE_RES_DMA_NAMED(DMACH_PCM_OUT, "PCM out"),
+-	[3] = DEFINE_RES_DMA_NAMED(DMACH_PCM_IN, "PCM in"),
+-	[4] = DEFINE_RES_DMA_NAMED(DMACH_MIC_IN, "Mic in"),
++};
++
++static struct s3c_audio_pdata s3c_ac97_pdata = {
++	.dma_playback = (void *)DMACH_PCM_OUT,
++	.dma_capture = (void *)DMACH_PCM_IN,
++	.dma_capture_mic = (void *)DMACH_MIC_IN,
+ };
+ 
+ struct platform_device s3c_device_ac97 = {
+@@ -87,6 +91,7 @@ struct platform_device s3c_device_ac97 = {
+ 	.dev		= {
+ 		.dma_mask		= &samsung_device_dma_mask,
+ 		.coherent_dma_mask	= DMA_BIT_MASK(32),
++		.platform_data		= &s3c_ac97_pdata,
+ 	}
+ };
+ #endif /* CONFIG_CPU_S3C2440 */
+diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
+index b6f14e8d2121..bfb8eb168f2d 100644
+--- a/arch/arm64/mm/flush.c
++++ b/arch/arm64/mm/flush.c
+@@ -74,10 +74,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
+ {
+ 	struct page *page = pte_page(pte);
+ 
+-	/* no flushing needed for anonymous pages */
+-	if (!page_mapping(page))
+-		return;
+-
+ 	if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
+ 		__flush_dcache_area(page_address(page),
+ 				PAGE_SIZE << compound_order(page));
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index 3585af093576..ab518d14b7b0 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -370,6 +370,7 @@ struct kvm_mips_tlb {
+ #define KVM_MIPS_GUEST_TLB_SIZE	64
+ struct kvm_vcpu_arch {
+ 	void *host_ebase, *guest_ebase;
++	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ 	unsigned long host_stack;
+ 	unsigned long host_gp;
+ 
+diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
+index 9b3b48e21c22..6ec90a19be70 100644
+--- a/arch/mips/include/asm/processor.h
++++ b/arch/mips/include/asm/processor.h
+@@ -51,7 +51,7 @@ extern unsigned int vced_count, vcei_count;
+  * User space process size: 2GB. This is hardcoded into a few places,
+  * so don't change it unless you know what you are doing.
+  */
+-#define TASK_SIZE	0x7fff8000UL
++#define TASK_SIZE	0x80000000UL
+ #endif
+ 
+ #define STACK_TOP_MAX	TASK_SIZE
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index be73c491182b..49b52035226c 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -686,6 +686,9 @@ static void __init arch_mem_init(char **cmdline_p)
+ 	for_each_memblock(reserved, reg)
+ 		if (reg->size != 0)
+ 			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
++
++	reserve_bootmem_region(__pa_symbol(&__nosave_begin),
++			__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
+ }
+ 
+ static void __init resource_init(void)
+diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
+index 4ab4bdfad703..2143884709e4 100644
+--- a/arch/mips/kvm/interrupt.h
++++ b/arch/mips/kvm/interrupt.h
+@@ -28,6 +28,7 @@
+ #define MIPS_EXC_MAX                12
+ /* XXXSL More to follow */
+ 
++extern char __kvm_mips_vcpu_run_end[];
+ extern char mips32_exception[], mips32_exceptionEnd[];
+ extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+ 
+diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
+index d1ee95a7f7dd..ae01e7fe4e1c 100644
+--- a/arch/mips/kvm/locore.S
++++ b/arch/mips/kvm/locore.S
+@@ -235,6 +235,7 @@ FEXPORT(__kvm_mips_load_k0k1)
+ 
+ 	/* Jump to guest */
+ 	eret
++EXPORT(__kvm_mips_vcpu_run_end)
+ 
+ VECTOR(MIPSX(exception), unknown)
+ /* Find out what mode we came from and jump to the proper handler. */
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index ace4ed7d41c6..485fdc462243 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -312,6 +312,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ 	memcpy(gebase + offset, mips32_GuestException,
+ 	       mips32_GuestExceptionEnd - mips32_GuestException);
+ 
++#ifdef MODULE
++	offset += mips32_GuestExceptionEnd - mips32_GuestException;
++	memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
++	       __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
++	vcpu->arch.vcpu_run = gebase + offset;
++#else
++	vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
++#endif
++
+ 	/* Invalidate the icache for these ranges */
+ 	local_flush_icache_range((unsigned long)gebase,
+ 				(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
+@@ -401,7 +410,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ 	/* Disable hardware page table walking while in guest */
+ 	htw_stop();
+ 
+-	r = __kvm_mips_vcpu_run(run, vcpu);
++	r = vcpu->arch.vcpu_run(run, vcpu);
+ 
+ 	/* Re-enable HTW before enabling interrupts */
+ 	htw_start();
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index c8c8275765e7..dd023904bac5 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -1240,6 +1240,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
+ 		current->thread.regs = regs - 1;
+ 	}
+ 
++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
++	/*
++	 * Clear any transactional state, we're exec()ing. The cause is
++	 * not important as there will never be a recheckpoint so it's not
++	 * user visible.
++	 */
++	if (MSR_TM_SUSPENDED(mfmsr()))
++		tm_reclaim_current(0);
++#endif
++
+ 	memset(regs->gpr, 0, sizeof(regs->gpr));
+ 	regs->ctr = 0;
+ 	regs->link = 0;
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index abe9cdc390a5..28dbbb0d12c4 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -162,11 +162,12 @@ static struct ibm_pa_feature {
+ 	{0, MMU_FTR_CI_LARGE_PAGE, 0, 0,		1, 2, 0},
+ 	{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
+ 	/*
+-	 * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
+-	 * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
+-	 * which is 0 if the kernel doesn't support TM.
++	 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
++	 * we don't want to turn on TM here, so we use the *_COMP versions
++	 * which are 0 if the kernel doesn't support TM.
+ 	 */
+-	{CPU_FTR_TM_COMP, 0, 0, 0,		22, 0, 0},
++	{CPU_FTR_TM_COMP, 0, 0,
++	 PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
+ };
+ 
+ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
+diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
+index 3385e3d0506e..4d29154a4987 100644
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -472,13 +472,13 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
+ {
+ 	struct hugepd_freelist **batchp;
+ 
+-	batchp = this_cpu_ptr(&hugepd_freelist_cur);
++	batchp = &get_cpu_var(hugepd_freelist_cur);
+ 
+ 	if (atomic_read(&tlb->mm->mm_users) < 2 ||
+ 	    cpumask_equal(mm_cpumask(tlb->mm),
+ 			  cpumask_of(smp_processor_id()))) {
+ 		kmem_cache_free(hugepte_cache, hugepte);
+-        put_cpu_var(hugepd_freelist_cur);
++		put_cpu_var(hugepd_freelist_cur);
+ 		return;
+ 	}
+ 
+diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
+index d29ad9545b41..081b2ad99d73 100644
+--- a/arch/s390/include/asm/mmu.h
++++ b/arch/s390/include/asm/mmu.h
+@@ -11,7 +11,7 @@ typedef struct {
+ 	spinlock_t list_lock;
+ 	struct list_head pgtable_list;
+ 	struct list_head gmap_list;
+-	unsigned long asce_bits;
++	unsigned long asce;
+ 	unsigned long asce_limit;
+ 	unsigned long vdso_base;
+ 	/* The mmu context allocates 4K page tables. */
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index e485817f7b1a..22877c9440ea 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
+ 	mm->context.has_pgste = 0;
+ 	mm->context.use_skey = 0;
+ #endif
+-	if (mm->context.asce_limit == 0) {
++	switch (mm->context.asce_limit) {
++	case 1UL << 42:
++		/*
++		 * forked 3-level task, fall through to set new asce with new
++		 * mm->pgd
++		 */
++	case 0:
+ 		/* context created by exec, set asce limit to 4TB */
+-		mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+-			_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+ 		mm->context.asce_limit = STACK_TOP_MAX;
+-	} else if (mm->context.asce_limit == (1UL << 31)) {
++		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
++				   _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
++		break;
++	case 1UL << 53:
++		/* forked 4-level task, set new asce with new mm->pgd */
++		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
++				   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
++		break;
++	case 1UL << 31:
++		/* forked 2-level compat task, set new asce with new mm->pgd */
++		mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
++				   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
++		/* pgd_alloc() did not increase mm->nr_pmds */
+ 		mm_inc_nr_pmds(mm);
+ 	}
+ 	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
+@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
+ 
+ static inline void set_user_asce(struct mm_struct *mm)
+ {
+-	S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
++	S390_lowcore.user_asce = mm->context.asce;
+ 	if (current->thread.mm_segment.ar4)
+ 		__ctl_load(S390_lowcore.user_asce, 7, 7);
+ 	set_cpu_flag(CIF_ASCE);
+@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ {
+ 	int cpu = smp_processor_id();
+ 
+-	S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
++	S390_lowcore.user_asce = next->context.asce;
+ 	if (prev == next)
+ 		return;
+ 	if (MACHINE_HAS_TLB_LC)
+diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
+index d7cc79fb6191..5991cdcb5b40 100644
+--- a/arch/s390/include/asm/pgalloc.h
++++ b/arch/s390/include/asm/pgalloc.h
+@@ -56,8 +56,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
+ 	return _REGION2_ENTRY_EMPTY;
+ }
+ 
+-int crst_table_upgrade(struct mm_struct *, unsigned long limit);
+-void crst_table_downgrade(struct mm_struct *, unsigned long limit);
++int crst_table_upgrade(struct mm_struct *);
++void crst_table_downgrade(struct mm_struct *);
+ 
+ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
+ {
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index dedb6218544b..7ce53f682ec9 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -155,7 +155,7 @@ struct stack_frame {
+ 	regs->psw.mask	= PSW_USER_BITS | PSW_MASK_BA;			\
+ 	regs->psw.addr	= new_psw | PSW_ADDR_AMODE;			\
+ 	regs->gprs[15]	= new_stackp;					\
+-	crst_table_downgrade(current->mm, 1UL << 31);			\
++	crst_table_downgrade(current->mm);				\
+ 	execve_tail();							\
+ } while (0)
+ 
+diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
+index ca148f7c3eaa..a2e6ef32e054 100644
+--- a/arch/s390/include/asm/tlbflush.h
++++ b/arch/s390/include/asm/tlbflush.h
+@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+ static inline void __tlb_flush_kernel(void)
+ {
+ 	if (MACHINE_HAS_IDTE)
+-		__tlb_flush_idte((unsigned long) init_mm.pgd |
+-				 init_mm.context.asce_bits);
++		__tlb_flush_idte(init_mm.context.asce);
+ 	else
+ 		__tlb_flush_global();
+ }
+@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+ static inline void __tlb_flush_kernel(void)
+ {
+ 	if (MACHINE_HAS_TLB_LC)
+-		__tlb_flush_idte_local((unsigned long) init_mm.pgd |
+-				       init_mm.context.asce_bits);
++		__tlb_flush_idte_local(init_mm.context.asce);
+ 	else
+ 		__tlb_flush_local();
+ }
+@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
+ 	 * only ran on the local cpu.
+ 	 */
+ 	if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
+-		__tlb_flush_asce(mm, (unsigned long) mm->pgd |
+-				 mm->context.asce_bits);
++		__tlb_flush_asce(mm, mm->context.asce);
+ 	else
+ 		__tlb_flush_full(mm);
+ }
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index 52fbef91d1d9..7963c6aa1196 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -2044,13 +2044,6 @@ void s390_reset_system(void (*fn_pre)(void),
+ 	S390_lowcore.program_new_psw.addr =
+ 		PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
+ 
+-	/*
+-	 * Clear subchannel ID and number to signal new kernel that no CCW or
+-	 * SCSI IPL has been done (for kexec and kdump)
+-	 */
+-	S390_lowcore.subchannel_id = 0;
+-	S390_lowcore.subchannel_nr = 0;
+-
+ 	/* Store status at absolute zero */
+ 	store_status();
+ 
+diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
+index 80875c43a4a4..728744918a07 100644
+--- a/arch/s390/mm/init.c
++++ b/arch/s390/mm/init.c
+@@ -112,7 +112,8 @@ void __init paging_init(void)
+ 		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
+ 		pgd_type = _REGION3_ENTRY_EMPTY;
+ 	}
+-	S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
++	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
++	S390_lowcore.kernel_asce = init_mm.context.asce;
+ 	clear_table((unsigned long *) init_mm.pgd, pgd_type,
+ 		    sizeof(unsigned long)*2048);
+ 	vmem_map_init();
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index 6e552af08c76..e2f8685d9981 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -184,7 +184,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
+ 	if (!(flags & MAP_FIXED))
+ 		addr = 0;
+ 	if ((addr + len) >= TASK_SIZE)
+-		return crst_table_upgrade(current->mm, 1UL << 53);
++		return crst_table_upgrade(current->mm);
+ 	return 0;
+ }
+ 
+@@ -201,7 +201,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
+ 		return area;
+ 	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
+ 		/* Upgrade the page table to 4 levels and retry. */
+-		rc = crst_table_upgrade(mm, 1UL << 53);
++		rc = crst_table_upgrade(mm);
+ 		if (rc)
+ 			return (unsigned long) rc;
+ 		area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
+@@ -223,7 +223,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
+ 		return area;
+ 	if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
+ 		/* Upgrade the page table to 4 levels and retry. */
+-		rc = crst_table_upgrade(mm, 1UL << 53);
++		rc = crst_table_upgrade(mm);
+ 		if (rc)
+ 			return (unsigned long) rc;
+ 		area = arch_get_unmapped_area_topdown(filp, addr, len,
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index b33f66110ca9..ebf82a99df45 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -56,81 +56,52 @@ static void __crst_table_upgrade(void *arg)
+ 	__tlb_flush_local();
+ }
+ 
+-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
++int crst_table_upgrade(struct mm_struct *mm)
+ {
+ 	unsigned long *table, *pgd;
+-	unsigned long entry;
+-	int flush;
+ 
+-	BUG_ON(limit > (1UL << 53));
+-	flush = 0;
+-repeat:
++	/* upgrade should only happen from 3 to 4 levels */
++	BUG_ON(mm->context.asce_limit != (1UL << 42));
++
+ 	table = crst_table_alloc(mm);
+ 	if (!table)
+ 		return -ENOMEM;
++
+ 	spin_lock_bh(&mm->page_table_lock);
+-	if (mm->context.asce_limit < limit) {
+-		pgd = (unsigned long *) mm->pgd;
+-		if (mm->context.asce_limit <= (1UL << 31)) {
+-			entry = _REGION3_ENTRY_EMPTY;
+-			mm->context.asce_limit = 1UL << 42;
+-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+-						_ASCE_USER_BITS |
+-						_ASCE_TYPE_REGION3;
+-		} else {
+-			entry = _REGION2_ENTRY_EMPTY;
+-			mm->context.asce_limit = 1UL << 53;
+-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+-						_ASCE_USER_BITS |
+-						_ASCE_TYPE_REGION2;
+-		}
+-		crst_table_init(table, entry);
+-		pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
+-		mm->pgd = (pgd_t *) table;
+-		mm->task_size = mm->context.asce_limit;
+-		table = NULL;
+-		flush = 1;
+-	}
++	pgd = (unsigned long *) mm->pgd;
++	crst_table_init(table, _REGION2_ENTRY_EMPTY);
++	pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
++	mm->pgd = (pgd_t *) table;
++	mm->context.asce_limit = 1UL << 53;
++	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
++			   _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
++	mm->task_size = mm->context.asce_limit;
+ 	spin_unlock_bh(&mm->page_table_lock);
+-	if (table)
+-		crst_table_free(mm, table);
+-	if (mm->context.asce_limit < limit)
+-		goto repeat;
+-	if (flush)
+-		on_each_cpu(__crst_table_upgrade, mm, 0);
++
++	on_each_cpu(__crst_table_upgrade, mm, 0);
+ 	return 0;
+ }
+ 
+-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
++void crst_table_downgrade(struct mm_struct *mm)
+ {
+ 	pgd_t *pgd;
+ 
++	/* downgrade should only happen from 3 to 2 levels (compat only) */
++	BUG_ON(mm->context.asce_limit != (1UL << 42));
++
+ 	if (current->active_mm == mm) {
+ 		clear_user_asce();
+ 		__tlb_flush_mm(mm);
+ 	}
+-	while (mm->context.asce_limit > limit) {
+-		pgd = mm->pgd;
+-		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
+-		case _REGION_ENTRY_TYPE_R2:
+-			mm->context.asce_limit = 1UL << 42;
+-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+-						_ASCE_USER_BITS |
+-						_ASCE_TYPE_REGION3;
+-			break;
+-		case _REGION_ENTRY_TYPE_R3:
+-			mm->context.asce_limit = 1UL << 31;
+-			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
+-						_ASCE_USER_BITS |
+-						_ASCE_TYPE_SEGMENT;
+-			break;
+-		default:
+-			BUG();
+-		}
+-		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+-		mm->task_size = mm->context.asce_limit;
+-		crst_table_free(mm, (unsigned long *) pgd);
+-	}
++
++	pgd = mm->pgd;
++	mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
++	mm->context.asce_limit = 1UL << 31;
++	mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
++			   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
++	mm->task_size = mm->context.asce_limit;
++	crst_table_free(mm, (unsigned long *) pgd);
++
+ 	if (current->active_mm == mm)
+ 		set_user_asce(mm);
+ }
+diff --git a/arch/sparc/include/asm/head_64.h b/arch/sparc/include/asm/head_64.h
+index 10e9dabc4c41..f0700cfeedd7 100644
+--- a/arch/sparc/include/asm/head_64.h
++++ b/arch/sparc/include/asm/head_64.h
+@@ -15,6 +15,10 @@
+ 
+ #define	PTREGS_OFF	(STACK_BIAS + STACKFRAME_SZ)
+ 
++#define	RTRAP_PSTATE		(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
++#define	RTRAP_PSTATE_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
++#define RTRAP_PSTATE_AG_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
++
+ #define __CHEETAH_ID	0x003e0014
+ #define __JALAPENO_ID	0x003e0016
+ #define __SERRANO_ID	0x003e0022
+diff --git a/arch/sparc/include/asm/ttable.h b/arch/sparc/include/asm/ttable.h
+index 71b5a67522ab..781b9f1dbdc2 100644
+--- a/arch/sparc/include/asm/ttable.h
++++ b/arch/sparc/include/asm/ttable.h
+@@ -589,8 +589,8 @@ user_rtt_fill_64bit:					\
+ 	 restored;					\
+ 	nop; nop; nop; nop; nop; nop;			\
+ 	nop; nop; nop; nop; nop;			\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_dax;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_mna;		\
+ 	ba,a,pt	%xcc, user_rtt_fill_fixup;
+ 
+ 
+@@ -652,8 +652,8 @@ user_rtt_fill_32bit:					\
+ 	 restored;					\
+ 	nop; nop; nop; nop; nop;			\
+ 	nop; nop; nop;					\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
+-	ba,a,pt	%xcc, user_rtt_fill_fixup;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_dax;		\
++	ba,a,pt	%xcc, user_rtt_fill_fixup_mna;		\
+ 	ba,a,pt	%xcc, user_rtt_fill_fixup;
+ 
+ 
+diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
+index 7cf9c6ea3f1f..fdb13327fded 100644
+--- a/arch/sparc/kernel/Makefile
++++ b/arch/sparc/kernel/Makefile
+@@ -21,6 +21,7 @@ CFLAGS_REMOVE_perf_event.o := -pg
+ CFLAGS_REMOVE_pcr.o := -pg
+ endif
+ 
++obj-$(CONFIG_SPARC64)   += urtt_fill.o
+ obj-$(CONFIG_SPARC32)   += entry.o wof.o wuf.o
+ obj-$(CONFIG_SPARC32)   += etrap_32.o
+ obj-$(CONFIG_SPARC32)   += rtrap_32.o
+diff --git a/arch/sparc/kernel/cherrs.S b/arch/sparc/kernel/cherrs.S
+index 4ee1ad420862..655628def68e 100644
+--- a/arch/sparc/kernel/cherrs.S
++++ b/arch/sparc/kernel/cherrs.S
+@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
+ 	subcc		%g1, %g2, %g1		! Next cacheline
+ 	bge,pt		%icc, 1b
+ 	 nop
+-	ba,pt		%xcc, dcpe_icpe_tl1_common
+-	 nop
++	ba,a,pt		%xcc, dcpe_icpe_tl1_common
+ 
+ do_dcpe_tl1_fatal:
+ 	sethi		%hi(1f), %g7
+@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
+ 	mov		0x2, %o0
+ 	call		cheetah_plus_parity_error
+ 	 add		%sp, PTREGS_OFF, %o1
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_dcpe_tl1,.-do_dcpe_tl1
+ 
+ 	.globl		do_icpe_tl1
+@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal:	/* Ok we may use interrupt globals safely. */
+ 	subcc		%g1, %g2, %g1
+ 	bge,pt		%icc, 1b
+ 	 nop
+-	ba,pt		%xcc, dcpe_icpe_tl1_common
+-	 nop
++	ba,a,pt		%xcc, dcpe_icpe_tl1_common
+ 
+ do_icpe_tl1_fatal:
+ 	sethi		%hi(1f), %g7
+@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
+ 	mov		0x3, %o0
+ 	call		cheetah_plus_parity_error
+ 	 add		%sp, PTREGS_OFF, %o1
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_icpe_tl1,.-do_icpe_tl1
+ 	
+ 	.type		dcpe_icpe_tl1_common,#function
+@@ -456,7 +452,7 @@ __cheetah_log_error:
+ 	 cmp		%g2, 0x63
+ 	be		c_cee
+ 	 nop
+-	ba,pt		%xcc, c_deferred
++	ba,a,pt		%xcc, c_deferred
+ 	.size		__cheetah_log_error,.-__cheetah_log_error
+ 
+ 	/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
+diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
+index 33c02b15f478..a83707c83be8 100644
+--- a/arch/sparc/kernel/entry.S
++++ b/arch/sparc/kernel/entry.S
+@@ -948,7 +948,24 @@ linux_syscall_trace:
+ 	cmp	%o0, 0
+ 	bne	3f
+ 	 mov	-ENOSYS, %o0
++
++	/* Syscall tracing can modify the registers.  */
++	ld	[%sp + STACKFRAME_SZ + PT_G1], %g1
++	sethi	%hi(sys_call_table), %l7
++	ld	[%sp + STACKFRAME_SZ + PT_I0], %i0
++	or	%l7, %lo(sys_call_table), %l7
++	ld	[%sp + STACKFRAME_SZ + PT_I1], %i1
++	ld	[%sp + STACKFRAME_SZ + PT_I2], %i2
++	ld	[%sp + STACKFRAME_SZ + PT_I3], %i3
++	ld	[%sp + STACKFRAME_SZ + PT_I4], %i4
++	ld	[%sp + STACKFRAME_SZ + PT_I5], %i5
++	cmp	%g1, NR_syscalls
++	bgeu	3f
++	 mov	-ENOSYS, %o0
++
++	sll	%g1, 2, %l4
+ 	mov	%i0, %o0
++	ld	[%l7 + %l4], %l7
+ 	mov	%i1, %o1
+ 	mov	%i2, %o2
+ 	mov	%i3, %o3
+diff --git a/arch/sparc/kernel/fpu_traps.S b/arch/sparc/kernel/fpu_traps.S
+index a6864826a4bd..336d2750fe78 100644
+--- a/arch/sparc/kernel/fpu_traps.S
++++ b/arch/sparc/kernel/fpu_traps.S
+@@ -100,8 +100,8 @@ do_fpdis:
+ 	fmuld		%f0, %f2, %f26
+ 	faddd		%f0, %f2, %f28
+ 	fmuld		%f0, %f2, %f30
+-	b,pt		%xcc, fpdis_exit
+-	 nop
++	ba,a,pt		%xcc, fpdis_exit
++
+ 2:	andcc		%g5, FPRS_DU, %g0
+ 	bne,pt		%icc, 3f
+ 	 fzero		%f32
+@@ -144,8 +144,8 @@ do_fpdis:
+ 	fmuld		%f32, %f34, %f58
+ 	faddd		%f32, %f34, %f60
+ 	fmuld		%f32, %f34, %f62
+-	ba,pt		%xcc, fpdis_exit
+-	 nop
++	ba,a,pt		%xcc, fpdis_exit
++
+ 3:	mov		SECONDARY_CONTEXT, %g3
+ 	add		%g6, TI_FPREGS, %g1
+ 
+@@ -197,8 +197,7 @@ fpdis_exit2:
+ fp_other_bounce:
+ 	call		do_fpother
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		fp_other_bounce,.-fp_other_bounce
+ 
+ 	.align		32
+diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
+index 3d61fcae7ee3..8ff57630a486 100644
+--- a/arch/sparc/kernel/head_64.S
++++ b/arch/sparc/kernel/head_64.S
+@@ -461,9 +461,8 @@ sun4v_chip_type:
+ 	subcc	%g3, 1, %g3
+ 	bne,pt	%xcc, 41b
+ 	add	%g1, 1, %g1
+-	mov	SUN4V_CHIP_SPARC64X, %g4
+ 	ba,pt	%xcc, 5f
+-	nop
++	 mov	SUN4V_CHIP_SPARC64X, %g4
+ 
+ 49:
+ 	mov	SUN4V_CHIP_UNKNOWN, %g4
+@@ -548,8 +547,7 @@ sun4u_init:
+ 	stxa		%g0, [%g7] ASI_DMMU
+ 	membar	#Sync
+ 
+-	ba,pt		%xcc, sun4u_continue
+-	 nop
++	ba,a,pt		%xcc, sun4u_continue
+ 
+ sun4v_init:
+ 	/* Set ctx 0 */
+@@ -560,14 +558,12 @@ sun4v_init:
+ 	mov		SECONDARY_CONTEXT, %g7
+ 	stxa		%g0, [%g7] ASI_MMU
+ 	membar		#Sync
+-	ba,pt		%xcc, niagara_tlb_fixup
+-	 nop
++	ba,a,pt		%xcc, niagara_tlb_fixup
+ 
+ sun4u_continue:
+ 	BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
+ 
+-	ba,pt	%xcc, spitfire_tlb_fixup
+-	 nop
++	ba,a,pt	%xcc, spitfire_tlb_fixup
+ 
+ niagara_tlb_fixup:
+ 	mov	3, %g2		/* Set TLB type to hypervisor. */
+@@ -639,8 +635,7 @@ niagara_patch:
+ 	call	hypervisor_patch_cachetlbops
+ 	 nop
+ 
+-	ba,pt	%xcc, tlb_fixup_done
+-	 nop
++	ba,a,pt	%xcc, tlb_fixup_done
+ 
+ cheetah_tlb_fixup:
+ 	mov	2, %g2		/* Set TLB type to cheetah+. */
+@@ -659,8 +654,7 @@ cheetah_tlb_fixup:
+ 	call	cheetah_patch_cachetlbops
+ 	 nop
+ 
+-	ba,pt	%xcc, tlb_fixup_done
+-	 nop
++	ba,a,pt	%xcc, tlb_fixup_done
+ 
+ spitfire_tlb_fixup:
+ 	/* Set TLB type to spitfire. */
+@@ -782,8 +776,7 @@ setup_trap_table:
+ 	call	%o1
+ 	 add	%sp, (2047 + 128), %o0
+ 
+-	ba,pt	%xcc, 2f
+-	 nop
++	ba,a,pt	%xcc, 2f
+ 
+ 1:	sethi	%hi(sparc64_ttable_tl0), %o0
+ 	set	prom_set_trap_table_name, %g2
+@@ -822,8 +815,7 @@ setup_trap_table:
+ 
+ 	BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
+ 
+-	ba,pt	%xcc, 2f
+-	 nop
++	ba,a,pt	%xcc, 2f
+ 
+ 	/* Disable STICK_INT interrupts. */
+ 1:
+diff --git a/arch/sparc/kernel/misctrap.S b/arch/sparc/kernel/misctrap.S
+index 753b4f031bfb..34b4933900bf 100644
+--- a/arch/sparc/kernel/misctrap.S
++++ b/arch/sparc/kernel/misctrap.S
+@@ -18,8 +18,7 @@ __do_privact:
+ 109:	or		%g7, %lo(109b), %g7
+ 	call		do_privact
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__do_privact,.-__do_privact
+ 
+ 	.type		do_mna,#function
+@@ -46,8 +45,7 @@ do_mna:
+ 	mov		%l5, %o2
+ 	call		mem_address_unaligned
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_mna,.-do_mna
+ 
+ 	.type		do_lddfmna,#function
+@@ -65,8 +63,7 @@ do_lddfmna:
+ 	mov		%l5, %o2
+ 	call		handle_lddfmna
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_lddfmna,.-do_lddfmna
+ 
+ 	.type		do_stdfmna,#function
+@@ -84,8 +81,7 @@ do_stdfmna:
+ 	mov		%l5, %o2
+ 	call		handle_stdfmna
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		do_stdfmna,.-do_stdfmna
+ 
+ 	.type		breakpoint_trap,#function
+diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
+index c928bc64b4ba..991ba90beb04 100644
+--- a/arch/sparc/kernel/pci.c
++++ b/arch/sparc/kernel/pci.c
+@@ -994,6 +994,23 @@ void pcibios_set_master(struct pci_dev *dev)
+ 	/* No special bus mastering setup handling */
+ }
+ 
++#ifdef CONFIG_PCI_IOV
++int pcibios_add_device(struct pci_dev *dev)
++{
++	struct pci_dev *pdev;
++
++	/* Add sriov arch specific initialization here.
++	 * Copy dev_archdata from PF to VF
++	 */
++	if (dev->is_virtfn) {
++		pdev = dev->physfn;
++		memcpy(&dev->dev.archdata, &pdev->dev.archdata,
++		       sizeof(struct dev_archdata));
++	}
++	return 0;
++}
++#endif /* CONFIG_PCI_IOV */
++
+ static int __init pcibios_init(void)
+ {
+ 	pci_dfl_cache_line_size = 64 >> 2;
+diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
+index 39f0c662f4c8..8de386dc8150 100644
+--- a/arch/sparc/kernel/rtrap_64.S
++++ b/arch/sparc/kernel/rtrap_64.S
+@@ -14,10 +14,6 @@
+ #include <asm/visasm.h>
+ #include <asm/processor.h>
+ 
+-#define		RTRAP_PSTATE		(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
+-#define		RTRAP_PSTATE_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
+-#define		RTRAP_PSTATE_AG_IRQOFF	(PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
+-
+ #ifdef CONFIG_CONTEXT_TRACKING
+ # define SCHEDULE_USER schedule_user
+ #else
+@@ -236,52 +232,17 @@ rt_continue:	ldx			[%sp + PTREGS_OFF + PT_V9_G1], %g1
+ 		 wrpr			%g1, %cwp
+ 		ba,a,pt			%xcc, user_rtt_fill_64bit
+ 
+-user_rtt_fill_fixup:
+-		rdpr	%cwp, %g1
+-		add	%g1, 1, %g1
+-		wrpr	%g1, 0x0, %cwp
+-
+-		rdpr	%wstate, %g2
+-		sll	%g2, 3, %g2
+-		wrpr	%g2, 0x0, %wstate
+-
+-		/* We know %canrestore and %otherwin are both zero.  */
+-
+-		sethi	%hi(sparc64_kern_pri_context), %g2
+-		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g2
+-		mov	PRIMARY_CONTEXT, %g1
+-
+-661:		stxa	%g2, [%g1] ASI_DMMU
+-		.section .sun4v_1insn_patch, "ax"
+-		.word	661b
+-		stxa	%g2, [%g1] ASI_MMU
+-		.previous
+-
+-		sethi	%hi(KERNBASE), %g1
+-		flush	%g1
++user_rtt_fill_fixup_dax:
++		ba,pt	%xcc, user_rtt_fill_fixup_common
++		 mov	1, %g3
+ 
+-		or	%g4, FAULT_CODE_WINFIXUP, %g4
+-		stb	%g4, [%g6 + TI_FAULT_CODE]
+-		stx	%g5, [%g6 + TI_FAULT_ADDR]
++user_rtt_fill_fixup_mna:
++		ba,pt	%xcc, user_rtt_fill_fixup_common
++		 mov	2, %g3
+ 
+-		mov	%g6, %l1
+-		wrpr	%g0, 0x0, %tl
+-
+-661:		nop
+-		.section		.sun4v_1insn_patch, "ax"
+-		.word			661b
+-		SET_GL(0)
+-		.previous
+-
+-		wrpr	%g0, RTRAP_PSTATE, %pstate
+-
+-		mov	%l1, %g6
+-		ldx	[%g6 + TI_TASK], %g4
+-		LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
+-		call	do_sparc64_fault
+-		 add	%sp, PTREGS_OFF, %o0
+-		ba,pt	%xcc, rtrap
+-		 nop
++user_rtt_fill_fixup:
++		ba,pt	%xcc, user_rtt_fill_fixup_common
++		 clr	%g3
+ 
+ user_rtt_pre_restore:
+ 		add			%g1, 1, %g1
+diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
+index 4eed773a7735..77655f0f0fc7 100644
+--- a/arch/sparc/kernel/signal32.c
++++ b/arch/sparc/kernel/signal32.c
+@@ -138,12 +138,24 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ 	return 0;
+ }
+ 
++/* Checks if the fp is valid.  We always build signal frames which are
++ * 16-byte aligned, therefore we can always enforce that the restore
++ * frame has that property as well.
++ */
++static bool invalid_frame_pointer(void __user *fp, int fplen)
++{
++	if ((((unsigned long) fp) & 15) ||
++	    ((unsigned long)fp) > 0x100000000ULL - fplen)
++		return true;
++	return false;
++}
++
+ void do_sigreturn32(struct pt_regs *regs)
+ {
+ 	struct signal_frame32 __user *sf;
+ 	compat_uptr_t fpu_save;
+ 	compat_uptr_t rwin_save;
+-	unsigned int psr;
++	unsigned int psr, ufp;
+ 	unsigned pc, npc;
+ 	sigset_t set;
+ 	compat_sigset_t seta;
+@@ -158,11 +170,16 @@ void do_sigreturn32(struct pt_regs *regs)
+ 	sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+-	    (((unsigned long) sf) & 3))
++	if (invalid_frame_pointer(sf, sizeof(*sf)))
++		goto segv;
++
++	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
++		goto segv;
++
++	if (ufp & 0x7)
+ 		goto segv;
+ 
+-	if (get_user(pc, &sf->info.si_regs.pc) ||
++	if (__get_user(pc, &sf->info.si_regs.pc) ||
+ 	    __get_user(npc, &sf->info.si_regs.npc))
+ 		goto segv;
+ 
+@@ -227,7 +244,7 @@ segv:
+ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
+ {
+ 	struct rt_signal_frame32 __user *sf;
+-	unsigned int psr, pc, npc;
++	unsigned int psr, pc, npc, ufp;
+ 	compat_uptr_t fpu_save;
+ 	compat_uptr_t rwin_save;
+ 	sigset_t set;
+@@ -242,11 +259,16 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
+ 	sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+-	    (((unsigned long) sf) & 3))
++	if (invalid_frame_pointer(sf, sizeof(*sf)))
+ 		goto segv;
+ 
+-	if (get_user(pc, &sf->regs.pc) || 
++	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
++		goto segv;
++
++	if (ufp & 0x7)
++		goto segv;
++
++	if (__get_user(pc, &sf->regs.pc) ||
+ 	    __get_user(npc, &sf->regs.npc))
+ 		goto segv;
+ 
+@@ -307,14 +329,6 @@ segv:
+ 	force_sig(SIGSEGV, current);
+ }
+ 
+-/* Checks if the fp is valid */
+-static int invalid_frame_pointer(void __user *fp, int fplen)
+-{
+-	if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
+-		return 1;
+-	return 0;
+-}
+-
+ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
+ {
+ 	unsigned long sp;
+diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
+index 52aa5e4ce5e7..c3c12efe0bc0 100644
+--- a/arch/sparc/kernel/signal_32.c
++++ b/arch/sparc/kernel/signal_32.c
+@@ -60,10 +60,22 @@ struct rt_signal_frame {
+ #define SF_ALIGNEDSZ  (((sizeof(struct signal_frame) + 7) & (~7)))
+ #define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
+ 
++/* Checks if the fp is valid.  We always build signal frames which are
++ * 16-byte aligned, therefore we can always enforce that the restore
++ * frame has that property as well.
++ */
++static inline bool invalid_frame_pointer(void __user *fp, int fplen)
++{
++	if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen))
++		return true;
++
++	return false;
++}
++
+ asmlinkage void do_sigreturn(struct pt_regs *regs)
+ {
++	unsigned long up_psr, pc, npc, ufp;
+ 	struct signal_frame __user *sf;
+-	unsigned long up_psr, pc, npc;
+ 	sigset_t set;
+ 	__siginfo_fpu_t __user *fpu_save;
+ 	__siginfo_rwin_t __user *rwin_save;
+@@ -77,10 +89,13 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
+ 	sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
++	if (!invalid_frame_pointer(sf, sizeof(*sf)))
++		goto segv_and_exit;
++
++	if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
+ 		goto segv_and_exit;
+ 
+-	if (((unsigned long) sf) & 3)
++	if (ufp & 0x7)
+ 		goto segv_and_exit;
+ 
+ 	err = __get_user(pc,  &sf->info.si_regs.pc);
+@@ -127,7 +142,7 @@ segv_and_exit:
+ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
+ {
+ 	struct rt_signal_frame __user *sf;
+-	unsigned int psr, pc, npc;
++	unsigned int psr, pc, npc, ufp;
+ 	__siginfo_fpu_t __user *fpu_save;
+ 	__siginfo_rwin_t __user *rwin_save;
+ 	sigset_t set;
+@@ -135,8 +150,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
+ 
+ 	synchronize_user_stack();
+ 	sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
+-	if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
+-	    (((unsigned long) sf) & 0x03))
++	if (!invalid_frame_pointer(sf, sizeof(*sf)))
++		goto segv;
++
++	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
++		goto segv;
++
++	if (ufp & 0x7)
+ 		goto segv;
+ 
+ 	err = __get_user(pc, &sf->regs.pc);
+@@ -178,15 +198,6 @@ segv:
+ 	force_sig(SIGSEGV, current);
+ }
+ 
+-/* Checks if the fp is valid */
+-static inline int invalid_frame_pointer(void __user *fp, int fplen)
+-{
+-	if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen))
+-		return 1;
+-
+-	return 0;
+-}
+-
+ static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
+ {
+ 	unsigned long sp = regs->u_regs[UREG_FP];
+diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
+index d88beff47bab..5ee930c48f4c 100644
+--- a/arch/sparc/kernel/signal_64.c
++++ b/arch/sparc/kernel/signal_64.c
+@@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
+ 	unsigned char fenab;
+ 	int err;
+ 
+-	flush_user_windows();
++	synchronize_user_stack();
+ 	if (get_thread_wsaved()					||
+ 	    (((unsigned long)ucp) & (sizeof(unsigned long)-1))	||
+ 	    (!__access_ok(ucp, sizeof(*ucp))))
+@@ -234,6 +234,17 @@ do_sigsegv:
+ 	goto out;
+ }
+ 
++/* Checks if the fp is valid.  We always build rt signal frames which
++ * are 16-byte aligned, therefore we can always enforce that the
++ * restore frame has that property as well.
++ */
++static bool invalid_frame_pointer(void __user *fp)
++{
++	if (((unsigned long) fp) & 15)
++		return true;
++	return false;
++}
++
+ struct rt_signal_frame {
+ 	struct sparc_stackf	ss;
+ 	siginfo_t		info;
+@@ -246,8 +257,8 @@ struct rt_signal_frame {
+ 
+ void do_rt_sigreturn(struct pt_regs *regs)
+ {
++	unsigned long tpc, tnpc, tstate, ufp;
+ 	struct rt_signal_frame __user *sf;
+-	unsigned long tpc, tnpc, tstate;
+ 	__siginfo_fpu_t __user *fpu_save;
+ 	__siginfo_rwin_t __user *rwin_save;
+ 	sigset_t set;
+@@ -261,10 +272,16 @@ void do_rt_sigreturn(struct pt_regs *regs)
+ 		(regs->u_regs [UREG_FP] + STACK_BIAS);
+ 
+ 	/* 1. Make sure we are not getting garbage from the user */
+-	if (((unsigned long) sf) & 3)
++	if (invalid_frame_pointer(sf))
++		goto segv;
++
++	if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
+ 		goto segv;
+ 
+-	err = get_user(tpc, &sf->regs.tpc);
++	if ((ufp + STACK_BIAS) & 0x7)
++		goto segv;
++
++	err = __get_user(tpc, &sf->regs.tpc);
+ 	err |= __get_user(tnpc, &sf->regs.tnpc);
+ 	if (test_thread_flag(TIF_32BIT)) {
+ 		tpc &= 0xffffffff;
+@@ -308,14 +325,6 @@ segv:
+ 	force_sig(SIGSEGV, current);
+ }
+ 
+-/* Checks if the fp is valid */
+-static int invalid_frame_pointer(void __user *fp)
+-{
+-	if (((unsigned long) fp) & 15)
+-		return 1;
+-	return 0;
+-}
+-
+ static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize)
+ {
+ 	unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
+diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c
+index 0f6eebe71e6c..e5fe8cef9a69 100644
+--- a/arch/sparc/kernel/sigutil_32.c
++++ b/arch/sparc/kernel/sigutil_32.c
+@@ -48,6 +48,10 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+ {
+ 	int err;
++
++	if (((unsigned long) fpu) & 3)
++		return -EFAULT;
++
+ #ifdef CONFIG_SMP
+ 	if (test_tsk_thread_flag(current, TIF_USEDFPU))
+ 		regs->psr &= ~PSR_EF;
+@@ -97,7 +101,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
+ 	struct thread_info *t = current_thread_info();
+ 	int i, wsaved, err;
+ 
+-	__get_user(wsaved, &rp->wsaved);
++	if (((unsigned long) rp) & 3)
++		return -EFAULT;
++
++	get_user(wsaved, &rp->wsaved);
+ 	if (wsaved > NSWINS)
+ 		return -EFAULT;
+ 
+diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
+index 387834a9c56a..36aadcbeac69 100644
+--- a/arch/sparc/kernel/sigutil_64.c
++++ b/arch/sparc/kernel/sigutil_64.c
+@@ -37,7 +37,10 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
+ 	unsigned long fprs;
+ 	int err;
+ 
+-	err = __get_user(fprs, &fpu->si_fprs);
++	if (((unsigned long) fpu) & 7)
++		return -EFAULT;
++
++	err = get_user(fprs, &fpu->si_fprs);
+ 	fprs_write(0);
+ 	regs->tstate &= ~TSTATE_PEF;
+ 	if (fprs & FPRS_DL)
+@@ -72,7 +75,10 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp)
+ 	struct thread_info *t = current_thread_info();
+ 	int i, wsaved, err;
+ 
+-	__get_user(wsaved, &rp->wsaved);
++	if (((unsigned long) rp) & 7)
++		return -EFAULT;
++
++	get_user(wsaved, &rp->wsaved);
+ 	if (wsaved > NSWINS)
+ 		return -EFAULT;
+ 
+diff --git a/arch/sparc/kernel/spiterrs.S b/arch/sparc/kernel/spiterrs.S
+index c357e40ffd01..4a73009f66a5 100644
+--- a/arch/sparc/kernel/spiterrs.S
++++ b/arch/sparc/kernel/spiterrs.S
+@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
+ 	ba,pt		%xcc, etraptl1
+ 	 rd		%pc, %g7
+ 
+-	ba,pt		%xcc, 2f
+-	 nop
++	ba,a,pt		%xcc, 2f
+ 
+ 1:	ba,pt		%xcc, etrap_irq
+ 	 rd		%pc, %g7
+@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
+ 	mov		%l5, %o2
+ 	call		spitfire_access_error
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_access_error,.-__spitfire_access_error
+ 
+ 	/* This is the trap handler entry point for ECC correctable
+@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
+ 	mov		%l5, %o2
+ 	call		spitfire_data_access_exception_tl1
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
+ 
+ 	.type		__spitfire_data_access_exception,#function
+@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
+ 	mov		%l5, %o2
+ 	call		spitfire_data_access_exception
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_data_access_exception,.-__spitfire_data_access_exception
+ 
+ 	.type		__spitfire_insn_access_exception_tl1,#function
+@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
+ 	mov		%l5, %o2
+ 	call		spitfire_insn_access_exception_tl1
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
+ 
+ 	.type		__spitfire_insn_access_exception,#function
+@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
+ 	mov		%l5, %o2
+ 	call		spitfire_insn_access_exception
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 	.size		__spitfire_insn_access_exception,.-__spitfire_insn_access_exception
+diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
+index bb0008927598..c4a1b5c40e4e 100644
+--- a/arch/sparc/kernel/syscalls.S
++++ b/arch/sparc/kernel/syscalls.S
+@@ -158,7 +158,25 @@ linux_syscall_trace32:
+ 	 add	%sp, PTREGS_OFF, %o0
+ 	brnz,pn	%o0, 3f
+ 	 mov	-ENOSYS, %o0
++
++	/* Syscall tracing can modify the registers.  */
++	ldx	[%sp + PTREGS_OFF + PT_V9_G1], %g1
++	sethi	%hi(sys_call_table32), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I0], %i0
++	or	%l7, %lo(sys_call_table32), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I1], %i1
++	ldx	[%sp + PTREGS_OFF + PT_V9_I2], %i2
++	ldx	[%sp + PTREGS_OFF + PT_V9_I3], %i3
++	ldx	[%sp + PTREGS_OFF + PT_V9_I4], %i4
++	ldx	[%sp + PTREGS_OFF + PT_V9_I5], %i5
++
++	cmp	%g1, NR_syscalls
++	bgeu,pn	%xcc, 3f
++	 mov	-ENOSYS, %o0
++
++	sll	%g1, 2, %l4
+ 	srl	%i0, 0, %o0
++	lduw	[%l7 + %l4], %l7
+ 	srl	%i4, 0, %o4
+ 	srl	%i1, 0, %o1
+ 	srl	%i2, 0, %o2
+@@ -170,7 +188,25 @@ linux_syscall_trace:
+ 	 add	%sp, PTREGS_OFF, %o0
+ 	brnz,pn	%o0, 3f
+ 	 mov	-ENOSYS, %o0
++
++	/* Syscall tracing can modify the registers.  */
++	ldx	[%sp + PTREGS_OFF + PT_V9_G1], %g1
++	sethi	%hi(sys_call_table64), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I0], %i0
++	or	%l7, %lo(sys_call_table64), %l7
++	ldx	[%sp + PTREGS_OFF + PT_V9_I1], %i1
++	ldx	[%sp + PTREGS_OFF + PT_V9_I2], %i2
++	ldx	[%sp + PTREGS_OFF + PT_V9_I3], %i3
++	ldx	[%sp + PTREGS_OFF + PT_V9_I4], %i4
++	ldx	[%sp + PTREGS_OFF + PT_V9_I5], %i5
++
++	cmp	%g1, NR_syscalls
++	bgeu,pn	%xcc, 3f
++	 mov	-ENOSYS, %o0
++
++	sll	%g1, 2, %l4
+ 	mov	%i0, %o0
++	lduw	[%l7 + %l4], %l7
+ 	mov	%i1, %o1
+ 	mov	%i2, %o2
+ 	mov	%i3, %o3
+diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S
+new file mode 100644
+index 000000000000..5604a2b051d4
+--- /dev/null
++++ b/arch/sparc/kernel/urtt_fill.S
+@@ -0,0 +1,98 @@
++#include <asm/thread_info.h>
++#include <asm/trap_block.h>
++#include <asm/spitfire.h>
++#include <asm/ptrace.h>
++#include <asm/head.h>
++
++		.text
++		.align	8
++		.globl	user_rtt_fill_fixup_common
++user_rtt_fill_fixup_common:
++		rdpr	%cwp, %g1
++		add	%g1, 1, %g1
++		wrpr	%g1, 0x0, %cwp
++
++		rdpr	%wstate, %g2
++		sll	%g2, 3, %g2
++		wrpr	%g2, 0x0, %wstate
++
++		/* We know %canrestore and %otherwin are both zero.  */
++
++		sethi	%hi(sparc64_kern_pri_context), %g2
++		ldx	[%g2 + %lo(sparc64_kern_pri_context)], %g2
++		mov	PRIMARY_CONTEXT, %g1
++
++661:		stxa	%g2, [%g1] ASI_DMMU
++		.section .sun4v_1insn_patch, "ax"
++		.word	661b
++		stxa	%g2, [%g1] ASI_MMU
++		.previous
++
++		sethi	%hi(KERNBASE), %g1
++		flush	%g1
++
++		mov	%g4, %l4
++		mov	%g5, %l5
++		brnz,pn	%g3, 1f
++		 mov	%g3, %l3
++
++		or	%g4, FAULT_CODE_WINFIXUP, %g4
++		stb	%g4, [%g6 + TI_FAULT_CODE]
++		stx	%g5, [%g6 + TI_FAULT_ADDR]
++1:
++		mov	%g6, %l1
++		wrpr	%g0, 0x0, %tl
++
++661:		nop
++		.section		.sun4v_1insn_patch, "ax"
++		.word			661b
++		SET_GL(0)
++		.previous
++
++		wrpr	%g0, RTRAP_PSTATE, %pstate
++
++		mov	%l1, %g6
++		ldx	[%g6 + TI_TASK], %g4
++		LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
++
++		brnz,pn	%l3, 1f
++		 nop
++
++		call	do_sparc64_fault
++		 add	%sp, PTREGS_OFF, %o0
++		ba,pt	%xcc, rtrap
++		 nop
++
++1:		cmp	%g3, 2
++		bne,pn	%xcc, 2f
++		 nop
++
++		sethi	%hi(tlb_type), %g1
++		lduw	[%g1 + %lo(tlb_type)], %g1
++		cmp	%g1, 3
++		bne,pt	%icc, 1f
++		 add	%sp, PTREGS_OFF, %o0
++		mov	%l4, %o2
++		call	sun4v_do_mna
++		 mov	%l5, %o1
++		ba,a,pt	%xcc, rtrap
++1:		mov	%l4, %o1
++		mov	%l5, %o2
++		call	mem_address_unaligned
++		 nop
++		ba,a,pt	%xcc, rtrap
++
++2:		sethi	%hi(tlb_type), %g1
++		mov	%l4, %o1
++		lduw	[%g1 + %lo(tlb_type)], %g1
++		mov	%l5, %o2
++		cmp	%g1, 3
++		bne,pt	%icc, 1f
++		 add	%sp, PTREGS_OFF, %o0
++		call	sun4v_data_access_exception
++		 nop
++		ba,a,pt	%xcc, rtrap
++
++1:		call	spitfire_data_access_exception
++		 nop
++		ba,a,pt	%xcc, rtrap
+diff --git a/arch/sparc/kernel/utrap.S b/arch/sparc/kernel/utrap.S
+index b7f0f3f3a909..c731e8023d3e 100644
+--- a/arch/sparc/kernel/utrap.S
++++ b/arch/sparc/kernel/utrap.S
+@@ -11,8 +11,7 @@ utrap_trap:		/* %g3=handler,%g4=level */
+ 	mov		%l4, %o1
+         call		bad_trap
+ 	 add		%sp, PTREGS_OFF, %o0
+-	ba,pt		%xcc, rtrap
+-	 nop
++	ba,a,pt		%xcc, rtrap
+ 
+ invoke_utrap:
+ 	sllx		%g3, 3, %g3
+diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
+index f1a2f688b28a..4a41d412dd3d 100644
+--- a/arch/sparc/kernel/vmlinux.lds.S
++++ b/arch/sparc/kernel/vmlinux.lds.S
+@@ -33,6 +33,10 @@ ENTRY(_start)
+ jiffies = jiffies_64;
+ #endif
+ 
++#ifdef CONFIG_SPARC64
++ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
++#endif
++
+ SECTIONS
+ {
+ #ifdef CONFIG_SPARC64
+diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S
+index 1e67ce958369..855019a8590e 100644
+--- a/arch/sparc/kernel/winfixup.S
++++ b/arch/sparc/kernel/winfixup.S
+@@ -32,8 +32,7 @@ fill_fixup:
+ 	 rd	%pc, %g7
+ 	call	do_sparc64_fault
+ 	 add	%sp, PTREGS_OFF, %o0
+-	ba,pt	%xcc, rtrap
+-	 nop
++	ba,a,pt	%xcc, rtrap
+ 
+ 	/* Be very careful about usage of the trap globals here.
+ 	 * You cannot touch %g5 as that has the fault information.
+diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
+index 559cb744112c..71c7ace855d7 100644
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -1301,10 +1301,18 @@ static int __init numa_parse_sun4u(void)
+ 
+ static int __init bootmem_init_numa(void)
+ {
++	int i, j;
+ 	int err = -1;
+ 
+ 	numadbg("bootmem_init_numa()\n");
+ 
++	/* Some sane defaults for numa latency values */
++	for (i = 0; i < MAX_NUMNODES; i++) {
++		for (j = 0; j < MAX_NUMNODES; j++)
++			numa_latency[i][j] = (i == j) ?
++				LOCAL_DISTANCE : REMOTE_DISTANCE;
++	}
++
+ 	if (numa_enabled) {
+ 		if (tlb_type == hypervisor)
+ 			err = numa_parse_mdesc();
+@@ -2762,9 +2770,10 @@ void hugetlb_setup(struct pt_regs *regs)
+ 	 * the Data-TLB for huge pages.
+ 	 */
+ 	if (tlb_type == cheetah_plus) {
++		bool need_context_reload = false;
+ 		unsigned long ctx;
+ 
+-		spin_lock(&ctx_alloc_lock);
++		spin_lock_irq(&ctx_alloc_lock);
+ 		ctx = mm->context.sparc64_ctx_val;
+ 		ctx &= ~CTX_PGSZ_MASK;
+ 		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
+@@ -2783,9 +2792,12 @@ void hugetlb_setup(struct pt_regs *regs)
+ 			 * also executing in this address space.
+ 			 */
+ 			mm->context.sparc64_ctx_val = ctx;
+-			on_each_cpu(context_reload, mm, 0);
++			need_context_reload = true;
+ 		}
+-		spin_unlock(&ctx_alloc_lock);
++		spin_unlock_irq(&ctx_alloc_lock);
++
++		if (need_context_reload)
++			on_each_cpu(context_reload, mm, 0);
+ 	}
+ }
+ #endif
+diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+index 1af51b1586d7..56270f0f05e6 100644
+--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -385,6 +385,9 @@ static void intel_thermal_interrupt(void)
+ {
+ 	__u64 msr_val;
+ 
++	if (static_cpu_has(X86_FEATURE_HWP))
++		wrmsrl_safe(MSR_HWP_STATUS, 0);
++
+ 	rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
+ 
+ 	/* Check for violation of core thermal thresholds*/
+diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
+index 1deffe6cc873..023c442c33bb 100644
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -959,7 +959,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+ 		 * normal page fault.
+ 		 */
+ 		regs->ip = (unsigned long)cur->addr;
++		/*
++		 * Trap flag (TF) has been set here because this fault
++		 * happened where the single stepping will be done.
++		 * So clear it by resetting the current kprobe:
++		 */
++		regs->flags &= ~X86_EFLAGS_TF;
++
++		/*
++		 * If the TF flag was set before the kprobe hit,
++		 * don't touch it:
++		 */
+ 		regs->flags |= kcb->kprobe_old_flags;
++
+ 		if (kcb->kprobe_status == KPROBE_REENTER)
+ 			restore_previous_kprobe(kcb);
+ 		else
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index 1d08ad3582d0..090aa5c1d6b1 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -501,6 +501,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ 			do_cpuid_1_ent(&entry[i], function, idx);
+ 			if (idx == 1) {
+ 				entry[i].eax &= kvm_supported_word10_x86_features;
++				cpuid_mask(&entry[i].eax, 10);
+ 				entry[i].ebx = 0;
+ 				if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
+ 					entry[i].ebx =
+diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
+index 637ab34ed632..ddb2244b06a1 100644
+--- a/arch/x86/mm/kmmio.c
++++ b/arch/x86/mm/kmmio.c
+@@ -33,7 +33,7 @@
+ struct kmmio_fault_page {
+ 	struct list_head list;
+ 	struct kmmio_fault_page *release_next;
+-	unsigned long page; /* location of the fault page */
++	unsigned long addr; /* the requested address */
+ 	pteval_t old_presence; /* page presence prior to arming */
+ 	bool armed;
+ 
+@@ -70,9 +70,16 @@ unsigned int kmmio_count;
+ static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
+ static LIST_HEAD(kmmio_probes);
+ 
+-static struct list_head *kmmio_page_list(unsigned long page)
++static struct list_head *kmmio_page_list(unsigned long addr)
+ {
+-	return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
++	unsigned int l;
++	pte_t *pte = lookup_address(addr, &l);
++
++	if (!pte)
++		return NULL;
++	addr &= page_level_mask(l);
++
++	return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
+ }
+ 
+ /* Accessed per-cpu */
+@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
+ }
+ 
+ /* You must be holding RCU read lock. */
+-static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
++static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
+ {
+ 	struct list_head *head;
+ 	struct kmmio_fault_page *f;
++	unsigned int l;
++	pte_t *pte = lookup_address(addr, &l);
+ 
+-	page &= PAGE_MASK;
+-	head = kmmio_page_list(page);
++	if (!pte)
++		return NULL;
++	addr &= page_level_mask(l);
++	head = kmmio_page_list(addr);
+ 	list_for_each_entry_rcu(f, head, list) {
+-		if (f->page == page)
++		if (f->addr == addr)
+ 			return f;
+ 	}
+ 	return NULL;
+@@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
+ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+ {
+ 	unsigned int level;
+-	pte_t *pte = lookup_address(f->page, &level);
++	pte_t *pte = lookup_address(f->addr, &level);
+ 
+ 	if (!pte) {
+-		pr_err("no pte for page 0x%08lx\n", f->page);
++		pr_err("no pte for addr 0x%08lx\n", f->addr);
+ 		return -1;
+ 	}
+ 
+@@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
+ 		return -1;
+ 	}
+ 
+-	__flush_tlb_one(f->page);
++	__flush_tlb_one(f->addr);
+ 	return 0;
+ }
+ 
+@@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
+ 	int ret;
+ 	WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
+ 	if (f->armed) {
+-		pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
+-			   f->page, f->count, !!f->old_presence);
++		pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
++			   f->addr, f->count, !!f->old_presence);
+ 	}
+ 	ret = clear_page_presence(f, true);
+-	WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
+-		  f->page);
++	WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
++		  f->addr);
+ 	f->armed = true;
+ 	return ret;
+ }
+@@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
+ {
+ 	int ret = clear_page_presence(f, false);
+ 	WARN_ONCE(ret < 0,
+-			KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
++			KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
+ 	f->armed = false;
+ }
+ 
+@@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 	struct kmmio_context *ctx;
+ 	struct kmmio_fault_page *faultpage;
+ 	int ret = 0; /* default to fault not handled */
++	unsigned long page_base = addr;
++	unsigned int l;
++	pte_t *pte = lookup_address(addr, &l);
++	if (!pte)
++		return -EINVAL;
++	page_base &= page_level_mask(l);
+ 
+ 	/*
+ 	 * Preemption is now disabled to prevent process switch during
+@@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 	preempt_disable();
+ 	rcu_read_lock();
+ 
+-	faultpage = get_kmmio_fault_page(addr);
++	faultpage = get_kmmio_fault_page(page_base);
+ 	if (!faultpage) {
+ 		/*
+ 		 * Either this page fault is not caused by kmmio, or
+@@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 
+ 	ctx = &get_cpu_var(kmmio_ctx);
+ 	if (ctx->active) {
+-		if (addr == ctx->addr) {
++		if (page_base == ctx->addr) {
+ 			/*
+ 			 * A second fault on the same page means some other
+ 			 * condition needs handling by do_page_fault(), the
+@@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+ 	ctx->active++;
+ 
+ 	ctx->fpage = faultpage;
+-	ctx->probe = get_kmmio_probe(addr);
++	ctx->probe = get_kmmio_probe(page_base);
+ 	ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
+-	ctx->addr = addr;
++	ctx->addr = page_base;
+ 
+ 	if (ctx->probe && ctx->probe->pre_handler)
+ 		ctx->probe->pre_handler(ctx->probe, regs, addr);
+@@ -354,12 +371,11 @@ out:
+ }
+ 
+ /* You must be holding kmmio_lock. */
+-static int add_kmmio_fault_page(unsigned long page)
++static int add_kmmio_fault_page(unsigned long addr)
+ {
+ 	struct kmmio_fault_page *f;
+ 
+-	page &= PAGE_MASK;
+-	f = get_kmmio_fault_page(page);
++	f = get_kmmio_fault_page(addr);
+ 	if (f) {
+ 		if (!f->count)
+ 			arm_kmmio_fault_page(f);
+@@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
+ 		return -1;
+ 
+ 	f->count = 1;
+-	f->page = page;
++	f->addr = addr;
+ 
+ 	if (arm_kmmio_fault_page(f)) {
+ 		kfree(f);
+ 		return -1;
+ 	}
+ 
+-	list_add_rcu(&f->list, kmmio_page_list(f->page));
++	list_add_rcu(&f->list, kmmio_page_list(f->addr));
+ 
+ 	return 0;
+ }
+ 
+ /* You must be holding kmmio_lock. */
+-static void release_kmmio_fault_page(unsigned long page,
++static void release_kmmio_fault_page(unsigned long addr,
+ 				struct kmmio_fault_page **release_list)
+ {
+ 	struct kmmio_fault_page *f;
+ 
+-	page &= PAGE_MASK;
+-	f = get_kmmio_fault_page(page);
++	f = get_kmmio_fault_page(addr);
+ 	if (!f)
+ 		return;
+ 
+@@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
+ 	int ret = 0;
+ 	unsigned long size = 0;
+ 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
++	unsigned int l;
++	pte_t *pte;
+ 
+ 	spin_lock_irqsave(&kmmio_lock, flags);
+ 	if (get_kmmio_probe(p->addr)) {
+ 		ret = -EEXIST;
+ 		goto out;
+ 	}
++
++	pte = lookup_address(p->addr, &l);
++	if (!pte) {
++		ret = -EINVAL;
++		goto out;
++	}
++
+ 	kmmio_count++;
+ 	list_add_rcu(&p->list, &kmmio_probes);
+ 	while (size < size_lim) {
+ 		if (add_kmmio_fault_page(p->addr + size))
+ 			pr_err("Unable to set page fault.\n");
+-		size += PAGE_SIZE;
++		size += page_level_size(l);
+ 	}
+ out:
+ 	spin_unlock_irqrestore(&kmmio_lock, flags);
+@@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
+ 	const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+ 	struct kmmio_fault_page *release_list = NULL;
+ 	struct kmmio_delayed_release *drelease;
++	unsigned int l;
++	pte_t *pte;
++
++	pte = lookup_address(p->addr, &l);
++	if (!pte)
++		return;
+ 
+ 	spin_lock_irqsave(&kmmio_lock, flags);
+ 	while (size < size_lim) {
+ 		release_kmmio_fault_page(p->addr + size, &release_list);
+-		size += PAGE_SIZE;
++		size += page_level_size(l);
+ 	}
+ 	list_del_rcu(&p->list);
+ 	kmmio_count--;
+diff --git a/crypto/asymmetric_keys/pkcs7_trust.c b/crypto/asymmetric_keys/pkcs7_trust.c
+index 0f6463b6692b..44c8447eb97e 100644
+--- a/crypto/asymmetric_keys/pkcs7_trust.c
++++ b/crypto/asymmetric_keys/pkcs7_trust.c
+@@ -174,6 +174,8 @@ int pkcs7_validate_trust(struct pkcs7_message *pkcs7,
+ 	int cached_ret = -ENOKEY;
+ 	int ret;
+ 
++	*_trusted = false;
++
+ 	for (p = pkcs7->certs; p; p = p->next)
+ 		p->seen = false;
+ 
+diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
+index 58f335ca2e75..568f2b942aac 100644
+--- a/drivers/acpi/acpi_processor.c
++++ b/drivers/acpi/acpi_processor.c
+@@ -475,6 +475,58 @@ static void acpi_processor_remove(struct acpi_device *device)
+ }
+ #endif /* CONFIG_ACPI_HOTPLUG_CPU */
+ 
++#ifdef CONFIG_X86
++static bool acpi_hwp_native_thermal_lvt_set;
++static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle,
++							  u32 lvl,
++							  void *context,
++							  void **rv)
++{
++	u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953";
++	u32 capbuf[2];
++	struct acpi_osc_context osc_context = {
++		.uuid_str = sb_uuid_str,
++		.rev = 1,
++		.cap.length = 8,
++		.cap.pointer = capbuf,
++	};
++
++	if (acpi_hwp_native_thermal_lvt_set)
++		return AE_CTRL_TERMINATE;
++
++	capbuf[0] = 0x0000;
++	capbuf[1] = 0x1000; /* set bit 12 */
++
++	if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) {
++		if (osc_context.ret.pointer && osc_context.ret.length > 1) {
++			u32 *capbuf_ret = osc_context.ret.pointer;
++
++			if (capbuf_ret[1] & 0x1000) {
++				acpi_handle_info(handle,
++					"_OSC native thermal LVT Acked\n");
++				acpi_hwp_native_thermal_lvt_set = true;
++			}
++		}
++		kfree(osc_context.ret.pointer);
++	}
++
++	return AE_OK;
++}
++
++void __init acpi_early_processor_osc(void)
++{
++	if (boot_cpu_has(X86_FEATURE_HWP)) {
++		acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
++				    ACPI_UINT32_MAX,
++				    acpi_hwp_native_thermal_lvt_osc,
++				    NULL, NULL, NULL);
++		acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID,
++				 acpi_hwp_native_thermal_lvt_osc,
++				 NULL, NULL);
++	}
++}
++#endif
++
+ /*
+  * The following ACPI IDs are known to be suitable for representing as
+  * processor devices.
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index 513e7230e3d0..fd6053908d24 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -612,6 +612,9 @@ static int __init acpi_bus_init(void)
+ 		goto error1;
+ 	}
+ 
++	/* Set capability bits for _OSC under processor scope */
++	acpi_early_processor_osc();
++
+ 	/*
+ 	 * _OSC method may exist in module level code,
+ 	 * so it must be run after ACPI_FULL_INITIALIZATION
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index ba4a61e964be..7db7f9dd7c47 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -121,6 +121,12 @@ void acpi_early_processor_set_pdc(void);
+ static inline void acpi_early_processor_set_pdc(void) {}
+ #endif
+ 
++#ifdef CONFIG_X86
++void acpi_early_processor_osc(void);
++#else
++static inline void acpi_early_processor_osc(void) {}
++#endif
++
+ /* --------------------------------------------------------------------------
+                                   Embedded Controller
+    -------------------------------------------------------------------------- */
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index cb0508af1459..5ab6fa9cfc2f 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
+ 	ata_scsi_port_error_handler(host, ap);
+ 
+ 	/* finish or retry handled scmd's and clean up */
+-	WARN_ON(host->host_failed || !list_empty(&eh_work_q));
++	WARN_ON(!list_empty(&eh_work_q));
+ 
+ 	DPRINTK("EXIT\n");
+ }
+diff --git a/drivers/base/module.c b/drivers/base/module.c
+index db930d3ee312..2a215780eda2 100644
+--- a/drivers/base/module.c
++++ b/drivers/base/module.c
+@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
+ 
+ static void module_create_drivers_dir(struct module_kobject *mk)
+ {
+-	if (!mk || mk->drivers_dir)
+-		return;
++	static DEFINE_MUTEX(drivers_dir_mutex);
+ 
+-	mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
++	mutex_lock(&drivers_dir_mutex);
++	if (mk && !mk->drivers_dir)
++		mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
++	mutex_unlock(&drivers_dir_mutex);
+ }
+ 
+ void module_add_driver(struct module *mod, struct device_driver *drv)
+diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
+index 2af8b29656af..eecaa02ec222 100644
+--- a/drivers/block/mtip32xx/mtip32xx.c
++++ b/drivers/block/mtip32xx/mtip32xx.c
+@@ -2946,9 +2946,15 @@ static int mtip_service_thread(void *data)
+ 		 * is in progress nor error handling is active
+ 		 */
+ 		wait_event_interruptible(port->svc_wait, (port->flags) &&
+-			!(port->flags & MTIP_PF_PAUSE_IO));
++			(port->flags & MTIP_PF_SVC_THD_WORK));
+ 
+-		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
++		if (kthread_should_stop() ||
++			test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
++			goto st_out;
++
++		if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
++				&dd->dd_flag)))
++			goto st_out;
+ 
+ 		if (kthread_should_stop() ||
+ 			test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
+@@ -2962,6 +2968,8 @@ static int mtip_service_thread(void *data)
+ 				&dd->dd_flag)))
+ 			goto st_out;
+ 
++		set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
++
+ restart_eh:
+ 		/* Demux bits: start with error handling */
+ 		if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) {
+@@ -3004,10 +3012,8 @@ restart_eh:
+ 		}
+ 
+ 		if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
+-			if (mtip_ftl_rebuild_poll(dd) < 0)
+-				set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
+-							&dd->dd_flag);
+-			clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
++			if (mtip_ftl_rebuild_poll(dd) == 0)
++				clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
+ 		}
+ 	}
+ 
+@@ -3887,7 +3893,6 @@ static int mtip_block_initialize(struct driver_data *dd)
+ 
+ 	mtip_hw_debugfs_init(dd);
+ 
+-skip_create_disk:
+ 	memset(&dd->tags, 0, sizeof(dd->tags));
+ 	dd->tags.ops = &mtip_mq_ops;
+ 	dd->tags.nr_hw_queues = 1;
+@@ -3917,6 +3922,7 @@ skip_create_disk:
+ 	dd->disk->queue		= dd->queue;
+ 	dd->queue->queuedata	= dd;
+ 
++skip_create_disk:
+ 	/* Initialize the protocol layer. */
+ 	wait_for_rebuild = mtip_hw_get_identify(dd);
+ 	if (wait_for_rebuild < 0) {
+@@ -4078,7 +4084,8 @@ static int mtip_block_remove(struct driver_data *dd)
+ 		dd->bdev = NULL;
+ 	}
+ 	if (dd->disk) {
+-		del_gendisk(dd->disk);
++		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
++			del_gendisk(dd->disk);
+ 		if (dd->disk->queue) {
+ 			blk_cleanup_queue(dd->queue);
+ 			blk_mq_free_tag_set(&dd->tags);
+@@ -4119,7 +4126,8 @@ static int mtip_block_shutdown(struct driver_data *dd)
+ 		dev_info(&dd->pdev->dev,
+ 			"Shutting down %s ...\n", dd->disk->disk_name);
+ 
+-		del_gendisk(dd->disk);
++		if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
++			del_gendisk(dd->disk);
+ 		if (dd->disk->queue) {
+ 			blk_cleanup_queue(dd->queue);
+ 			blk_mq_free_tag_set(&dd->tags);
+diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
+index 76695265dffb..578ad36c9913 100644
+--- a/drivers/block/mtip32xx/mtip32xx.h
++++ b/drivers/block/mtip32xx/mtip32xx.h
+@@ -145,6 +145,11 @@ enum {
+ 	MTIP_PF_SR_CLEANUP_BIT      = 7,
+ 	MTIP_PF_SVC_THD_STOP_BIT    = 8,
+ 
++	MTIP_PF_SVC_THD_WORK	= ((1 << MTIP_PF_EH_ACTIVE_BIT) |
++				  (1 << MTIP_PF_ISSUE_CMDS_BIT) |
++				  (1 << MTIP_PF_REBUILD_BIT) |
++				  (1 << MTIP_PF_SVC_THD_STOP_BIT)),
++
+ 	/* below are bit numbers in 'dd_flag' defined in driver_data */
+ 	MTIP_DDF_SEC_LOCK_BIT	    = 0,
+ 	MTIP_DDF_REMOVE_PENDING_BIT = 1,
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 39e5f7fae3ef..9911b2067286 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -557,8 +557,8 @@ static void do_nbd_request(struct request_queue *q)
+ 			req, req->cmd_type);
+ 
+ 		if (unlikely(!nbd->sock)) {
+-			dev_err(disk_to_dev(nbd->disk),
+-				"Attempted send on closed socket\n");
++			dev_err_ratelimited(disk_to_dev(nbd->disk),
++					    "Attempted send on closed socket\n");
+ 			req->errors++;
+ 			nbd_end_request(nbd, req);
+ 			spin_lock_irq(q->queue_lock);
+diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
+index d48715b287e6..b0414702e61a 100644
+--- a/drivers/block/paride/pd.c
++++ b/drivers/block/paride/pd.c
+@@ -126,7 +126,7 @@
+ */
+ #include <linux/types.h>
+ 
+-static bool verbose = 0;
++static int verbose = 0;
+ static int major = PD_MAJOR;
+ static char *name = PD_NAME;
+ static int cluster = 64;
+@@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
+ static DEFINE_MUTEX(pd_mutex);
+ static DEFINE_SPINLOCK(pd_lock);
+ 
+-module_param(verbose, bool, 0);
++module_param(verbose, int, 0);
+ module_param(major, int, 0);
+ module_param(name, charp, 0);
+ module_param(cluster, int, 0);
+diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
+index 2596042eb987..ada45058e04d 100644
+--- a/drivers/block/paride/pt.c
++++ b/drivers/block/paride/pt.c
+@@ -117,7 +117,7 @@
+ 
+ */
+ 
+-static bool verbose = 0;
++static int verbose = 0;
+ static int major = PT_MAJOR;
+ static char *name = PT_NAME;
+ static int disable = 0;
+@@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
+ 
+ #include <asm/uaccess.h>
+ 
+-module_param(verbose, bool, 0);
++module_param(verbose, int, 0);
+ module_param(major, int, 0);
+ module_param(name, charp, 0);
+ module_param_array(drive0, int, NULL, 0);
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index bf75f6361773..4bc508c14900 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -3813,6 +3813,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
+ 	while (!list_empty(&intf->waiting_rcv_msgs)) {
+ 		smi_msg = list_entry(intf->waiting_rcv_msgs.next,
+ 				     struct ipmi_smi_msg, link);
++		list_del(&smi_msg->link);
+ 		if (!run_to_completion)
+ 			spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
+ 					       flags);
+@@ -3822,11 +3823,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
+ 		if (rv > 0) {
+ 			/*
+ 			 * To preserve message order, quit if we
+-			 * can't handle a message.
++			 * can't handle a message.  Add the message
++			 * back at the head, this is safe because this
++			 * tasklet is the only thing that pulls the
++			 * messages.
+ 			 */
++			list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
+ 			break;
+ 		} else {
+-			list_del(&smi_msg->link);
+ 			if (rv == 0)
+ 				/* Message handled */
+ 				ipmi_free_smi_msg(smi_msg);
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 1082d4bb016a..591629cc32d5 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -133,6 +133,8 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
+ 	chip->cdev.owner = chip->pdev->driver->owner;
+ 	chip->cdev.kobj.parent = &chip->dev.kobj;
+ 
++	devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
++
+ 	return chip;
+ }
+ EXPORT_SYMBOL_GPL(tpmm_chip_alloc);
+@@ -168,7 +170,7 @@ static int tpm_dev_add_device(struct tpm_chip *chip)
+ static void tpm_dev_del_device(struct tpm_chip *chip)
+ {
+ 	cdev_del(&chip->cdev);
+-	device_unregister(&chip->dev);
++	device_del(&chip->dev);
+ }
+ 
+ static int tpm1_chip_register(struct tpm_chip *chip)
+diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
+index 5d75bffab141..0f721998b2d4 100644
+--- a/drivers/clk/qcom/gcc-msm8916.c
++++ b/drivers/clk/qcom/gcc-msm8916.c
+@@ -1961,6 +1961,7 @@ static struct clk_branch gcc_crypto_ahb_clk = {
+ 				"pcnoc_bfdcd_clk_src",
+ 			},
+ 			.num_parents = 1,
++			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+@@ -1996,6 +1997,7 @@ static struct clk_branch gcc_crypto_clk = {
+ 				"crypto_clk_src",
+ 			},
+ 			.num_parents = 1,
++			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
+index eb6a4f9fa107..476d2090c96b 100644
+--- a/drivers/clk/qcom/gcc-msm8960.c
++++ b/drivers/clk/qcom/gcc-msm8960.c
+@@ -2753,7 +2753,7 @@ static struct clk_rcg ce3_src = {
+ 	},
+ 	.freq_tbl = clk_tbl_ce3,
+ 	.clkr = {
+-		.enable_reg = 0x2c08,
++		.enable_reg = 0x36c0,
+ 		.enable_mask = BIT(7),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "ce3_src",
+@@ -2769,7 +2769,7 @@ static struct clk_branch ce3_core_clk = {
+ 	.halt_reg = 0x2fdc,
+ 	.halt_bit = 5,
+ 	.clkr = {
+-		.enable_reg = 0x36c4,
++		.enable_reg = 0x36cc,
+ 		.enable_mask = BIT(4),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "ce3_core_clk",
+diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
+index c842e3b60f21..2c111c2cdccc 100644
+--- a/drivers/clk/rockchip/clk-mmc-phase.c
++++ b/drivers/clk/rockchip/clk-mmc-phase.c
+@@ -131,6 +131,7 @@ struct clk *rockchip_clk_register_mmc(const char *name,
+ 	if (!mmc_clock)
+ 		return NULL;
+ 
++	init.flags = 0;
+ 	init.num_parents = num_parents;
+ 	init.parent_names = parent_names;
+ 	init.ops = &rockchip_mmc_clk_ops;
+diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
+index edb5d489ae61..3b9de3264534 100644
+--- a/drivers/clk/rockchip/clk.c
++++ b/drivers/clk/rockchip/clk.c
+@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 	if (gate_offset >= 0) {
+ 		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+ 		if (!gate)
+-			return ERR_PTR(-ENOMEM);
++			goto err_gate;
+ 
+ 		gate->flags = gate_flags;
+ 		gate->reg = base + gate_offset;
+@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 	if (div_width > 0) {
+ 		div = kzalloc(sizeof(*div), GFP_KERNEL);
+ 		if (!div)
+-			return ERR_PTR(-ENOMEM);
++			goto err_div;
+ 
+ 		div->flags = div_flags;
+ 		div->reg = base + muxdiv_offset;
+@@ -100,6 +100,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
+ 				     flags);
+ 
+ 	return clk;
++err_div:
++	kfree(gate);
++err_gate:
++	kfree(mux);
++	return ERR_PTR(-ENOMEM);
+ }
+ 
+ static struct clk *rockchip_clk_register_frac_branch(const char *name,
+diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
+index 5122ef25f595..e63c3ef9b5ec 100644
+--- a/drivers/clk/versatile/clk-sp810.c
++++ b/drivers/clk/versatile/clk-sp810.c
+@@ -141,6 +141,7 @@ void __init clk_sp810_of_setup(struct device_node *node)
+ 	const char *parent_names[2];
+ 	char name[12];
+ 	struct clk_init_data init;
++	static int instance;
+ 	int i;
+ 
+ 	if (!sp810) {
+@@ -172,7 +173,7 @@ void __init clk_sp810_of_setup(struct device_node *node)
+ 	init.num_parents = ARRAY_SIZE(parent_names);
+ 
+ 	for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) {
+-		snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
++		snprintf(name, sizeof(name), "sp810_%d_%d", instance, i);
+ 
+ 		sp810->timerclken[i].sp810 = sp810;
+ 		sp810->timerclken[i].channel = i;
+@@ -184,5 +185,6 @@ void __init clk_sp810_of_setup(struct device_node *node)
+ 	}
+ 
+ 	of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810);
++	instance++;
+ }
+ CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup);
+diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
+index 5f5f360628fc..c5e6222a6ca2 100644
+--- a/drivers/crypto/ux500/hash/hash_core.c
++++ b/drivers/crypto/ux500/hash/hash_core.c
+@@ -797,7 +797,7 @@ static int hash_process_data(struct hash_device_data *device_data,
+ 						&device_data->state);
+ 				memmove(req_ctx->state.buffer,
+ 					device_data->state.buffer,
+-					HASH_BLOCK_SIZE / sizeof(u32));
++					HASH_BLOCK_SIZE);
+ 				if (ret) {
+ 					dev_err(device_data->dev,
+ 						"%s: hash_resume_state() failed!\n",
+@@ -848,7 +848,7 @@ static int hash_process_data(struct hash_device_data *device_data,
+ 
+ 			memmove(device_data->state.buffer,
+ 				req_ctx->state.buffer,
+-				HASH_BLOCK_SIZE / sizeof(u32));
++				HASH_BLOCK_SIZE);
+ 			if (ret) {
+ 				dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
+ 					__func__);
+diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
+index c8e7f653e5d3..f508cea02039 100644
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -167,7 +167,7 @@ struct crypto_alg p8_aes_cbc_alg = {
+     .cra_name = "cbc(aes)",
+     .cra_driver_name = "p8_aes_cbc",
+     .cra_module = THIS_MODULE,
+-    .cra_priority = 1000,
++    .cra_priority = 2000,
+     .cra_type = &crypto_blkcipher_type,
+     .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+     .cra_alignmask = 0,
+diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
+index 266e708d63df..d8fa3b4ec17f 100644
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -151,7 +151,7 @@ struct crypto_alg p8_aes_ctr_alg = {
+     .cra_name = "ctr(aes)",
+     .cra_driver_name = "p8_aes_ctr",
+     .cra_module = THIS_MODULE,
+-    .cra_priority = 1000,
++    .cra_priority = 2000,
+     .cra_type = &crypto_blkcipher_type,
+     .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+     .cra_alignmask = 0,
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index ffa809f30b19..c5e6c82516ce 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -238,7 +238,7 @@ struct at_xdmac_lld {
+ 	u32		mbr_cfg;	/* Configuration Register */
+ };
+ 
+-
++/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
+ struct at_xdmac_desc {
+ 	struct at_xdmac_lld		lld;
+ 	enum dma_transfer_direction	direction;
+@@ -249,7 +249,7 @@ struct at_xdmac_desc {
+ 	unsigned int			xfer_size;
+ 	struct list_head		descs_list;
+ 	struct list_head		xfer_node;
+-};
++} __aligned(sizeof(u64));
+ 
+ static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
+ {
+@@ -930,6 +930,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	u32			cur_nda, check_nda, cur_ubc, mask, value;
+ 	u8			dwidth = 0;
+ 	unsigned long		flags;
++	bool			initd;
+ 
+ 	ret = dma_cookie_status(chan, cookie, txstate);
+ 	if (ret == DMA_COMPLETE)
+@@ -954,7 +955,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	residue = desc->xfer_size;
+ 	/*
+ 	 * Flush FIFO: only relevant when the transfer is source peripheral
+-	 * synchronized.
++	 * synchronized. Flush is needed before reading CUBC because data in
++	 * the FIFO are not reported by CUBC. Reporting a residue of the
++	 * transfer length while we have data in FIFO can cause issue.
++	 * Usecase: atmel USART has a timeout which means I have received
++	 * characters but there is no more character received for a while. On
++	 * timeout, it requests the residue. If the data are in the DMA FIFO,
++	 * we will return a residue of the transfer length. It means no data
++	 * received. If an application is waiting for these data, it will hang
++	 * since we won't have another USART timeout without receiving new
++	 * data.
+ 	 */
+ 	mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
+ 	value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
+@@ -965,34 +975,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	}
+ 
+ 	/*
+-	 * When processing the residue, we need to read two registers but we
+-	 * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
+-	 * we stand in the descriptor list and AT_XDMAC_CUBC is used
+-	 * to know how many data are remaining for the current descriptor.
+-	 * Since the dma channel is not paused to not loose data, between the
+-	 * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
+-	 * descriptor.
+-	 * For that reason, after reading AT_XDMAC_CUBC, we check if we are
+-	 * still using the same descriptor by reading a second time
+-	 * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
+-	 * read again AT_XDMAC_CUBC.
++	 * The easiest way to compute the residue should be to pause the DMA
++	 * but doing this can lead to miss some data as some devices don't
++	 * have FIFO.
++	 * We need to read several registers because:
++	 * - DMA is running therefore a descriptor change is possible while
++	 * reading these registers
++	 * - When the block transfer is done, the value of the CUBC register
++	 * is set to its initial value until the fetch of the next descriptor.
++	 * This value will corrupt the residue calculation so we have to skip
++	 * it.
++	 *
++	 * INITD --------                    ------------
++	 *              |____________________|
++	 *       _______________________  _______________
++	 * NDA       @desc2             \/   @desc3
++	 *       _______________________/\_______________
++	 *       __________  ___________  _______________
++	 * CUBC       0    \/ MAX desc1 \/  MAX desc2
++	 *       __________/\___________/\_______________
++	 *
++	 * Since descriptors are aligned on 64 bits, we can assume that
++	 * the update of NDA and CUBC is atomic.
+ 	 * Memory barriers are used to ensure the read order of the registers.
+-	 * A max number of retries is set because unlikely it can never ends if
+-	 * we are transferring a lot of data with small buffers.
++	 * A max number of retries is set because unlikely it could never ends.
+ 	 */
+-	cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+-	rmb();
+-	cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+ 	for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
+-		rmb();
+ 		check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+-
+-		if (likely(cur_nda == check_nda))
+-			break;
+-
+-		cur_nda = check_nda;
++		rmb();
++		initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
+ 		rmb();
+ 		cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
++		rmb();
++		cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
++		rmb();
++
++		if ((check_nda == cur_nda) && initd)
++			break;
+ 	}
+ 
+ 	if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
+@@ -1001,6 +1020,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	}
+ 
+ 	/*
++	 * Flush FIFO: only relevant when the transfer is source peripheral
++	 * synchronized. Another flush is needed here because CUBC is updated
++	 * when the controller sends the data write command. It can lead to
++	 * report data that are not written in the memory or the device. The
++	 * FIFO flush ensures that data are really written.
++	 */
++	if ((desc->lld.mbr_cfg & mask) == value) {
++		at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
++		while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
++			cpu_relax();
++	}
++
++	/*
+ 	 * Remove size of all microblocks already transferred and the current
+ 	 * one. Then add the remaining size to transfer of the current
+ 	 * microblock.
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 63226e9036a1..1f2c86d81176 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -170,6 +170,7 @@ static int generic_ops_register(void)
+ {
+ 	generic_ops.get_variable = efi.get_variable;
+ 	generic_ops.set_variable = efi.set_variable;
++	generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
+ 	generic_ops.get_next_variable = efi.get_next_variable;
+ 	generic_ops.query_variable_store = efi_query_variable_store;
+ 
+diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+index be9fa8220499..767d0eaabe97 100644
+--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+@@ -335,6 +335,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
+ 
+ 		atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
+ 					     factor_reg);
++	} else {
++		atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
+index 71dcbc64ae98..7f0356ea0bbf 100644
+--- a/drivers/gpu/drm/drm_dp_helper.c
++++ b/drivers/gpu/drm/drm_dp_helper.c
+@@ -432,7 +432,7 @@ static u32 drm_dp_i2c_functionality(struct i2c_adapter *adapter)
+  */
+ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+ {
+-	unsigned int retry;
++	unsigned int retry, defer_i2c;
+ 	int ret;
+ 
+ 	/*
+@@ -440,7 +440,7 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+ 	 * is required to retry at least seven times upon receiving AUX_DEFER
+ 	 * before giving up the AUX transaction.
+ 	 */
+-	for (retry = 0; retry < 7; retry++) {
++	for (retry = 0, defer_i2c = 0; retry < (7 + defer_i2c); retry++) {
+ 		mutex_lock(&aux->hw_mutex);
+ 		ret = aux->transfer(aux, msg);
+ 		mutex_unlock(&aux->hw_mutex);
+@@ -499,7 +499,13 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+ 
+ 		case DP_AUX_I2C_REPLY_DEFER:
+ 			DRM_DEBUG_KMS("I2C defer\n");
++			/* DP Compliance Test 4.2.2.5 Requirement:
++			 * Must have at least 7 retries for I2C defers on the
++			 * transaction to pass this test
++			 */
+ 			aux->i2c_defer_count++;
++			if (defer_i2c < 7)
++				defer_i2c++;
+ 			usleep_range(400, 500);
+ 			continue;
+ 
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 10b8839cbd0c..52dea773bb1b 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2862,11 +2862,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ 		drm_dp_port_teardown_pdt(port, port->pdt);
+ 
+ 		if (!port->input && port->vcpi.vcpi > 0) {
+-			if (mgr->mst_state) {
+-				drm_dp_mst_reset_vcpi_slots(mgr, port);
+-				drm_dp_update_payload_part1(mgr);
+-				drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+-			}
++			drm_dp_mst_reset_vcpi_slots(mgr, port);
++			drm_dp_update_payload_part1(mgr);
++			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ 		}
+ 
+ 		kref_put(&port->kref, drm_dp_free_mst_port);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 56323732c748..5250596a612e 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -7129,12 +7129,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ {
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct intel_encoder *encoder;
++	int i;
+ 	u32 val, final;
+ 	bool has_lvds = false;
+ 	bool has_cpu_edp = false;
+ 	bool has_panel = false;
+ 	bool has_ck505 = false;
+ 	bool can_ssc = false;
++	bool using_ssc_source = false;
+ 
+ 	/* We need to take the global config into account */
+ 	for_each_intel_encoder(dev, encoder) {
+@@ -7161,8 +7163,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		can_ssc = true;
+ 	}
+ 
+-	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
+-		      has_panel, has_lvds, has_ck505);
++	/* Check if any DPLLs are using the SSC source */
++	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
++		u32 temp = I915_READ(PCH_DPLL(i));
++
++		if (!(temp & DPLL_VCO_ENABLE))
++			continue;
++
++		if ((temp & PLL_REF_INPUT_MASK) ==
++		    PLLB_REF_INPUT_SPREADSPECTRUMIN) {
++			using_ssc_source = true;
++			break;
++		}
++	}
++
++	DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
++		      has_panel, has_lvds, has_ck505, using_ssc_source);
+ 
+ 	/* Ironlake: try to setup display ref clock before DPLL
+ 	 * enabling. This is only under driver's control after
+@@ -7199,9 +7215,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 				final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
+ 		} else
+ 			final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+-	} else {
+-		final |= DREF_SSC_SOURCE_DISABLE;
+-		final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
++	} else if (using_ssc_source) {
++		final |= DREF_SSC_SOURCE_ENABLE;
++		final |= DREF_SSC1_ENABLE;
+ 	}
+ 
+ 	if (final == val)
+@@ -7247,7 +7263,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		POSTING_READ(PCH_DREF_CONTROL);
+ 		udelay(200);
+ 	} else {
+-		DRM_DEBUG_KMS("Disabling SSC entirely\n");
++		DRM_DEBUG_KMS("Disabling CPU source output\n");
+ 
+ 		val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
+ 
+@@ -7258,16 +7274,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
+ 		POSTING_READ(PCH_DREF_CONTROL);
+ 		udelay(200);
+ 
+-		/* Turn off the SSC source */
+-		val &= ~DREF_SSC_SOURCE_MASK;
+-		val |= DREF_SSC_SOURCE_DISABLE;
++		if (!using_ssc_source) {
++			DRM_DEBUG_KMS("Disabling SSC source\n");
+ 
+-		/* Turn off SSC1 */
+-		val &= ~DREF_SSC1_ENABLE;
++			/* Turn off the SSC source */
++			val &= ~DREF_SSC_SOURCE_MASK;
++			val |= DREF_SSC_SOURCE_DISABLE;
+ 
+-		I915_WRITE(PCH_DREF_CONTROL, val);
+-		POSTING_READ(PCH_DREF_CONTROL);
+-		udelay(200);
++			/* Turn off SSC1 */
++			val &= ~DREF_SSC1_ENABLE;
++
++			I915_WRITE(PCH_DREF_CONTROL, val);
++			POSTING_READ(PCH_DREF_CONTROL);
++			udelay(200);
++		}
+ 	}
+ 
+ 	BUG_ON(val != final);
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index b7e20dee64c4..09844b5fe250 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -1814,6 +1814,17 @@ i915_dispatch_execbuffer(struct intel_engine_cs *ring,
+ 	return 0;
+ }
+ 
++static void cleanup_phys_status_page(struct intel_engine_cs *ring)
++{
++	struct drm_i915_private *dev_priv = to_i915(ring->dev);
++
++	if (!dev_priv->status_page_dmah)
++		return;
++
++	drm_pci_free(ring->dev, dev_priv->status_page_dmah);
++	ring->status_page.page_addr = NULL;
++}
++
+ static void cleanup_status_page(struct intel_engine_cs *ring)
+ {
+ 	struct drm_i915_gem_object *obj;
+@@ -1830,9 +1841,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
+ 
+ static int init_status_page(struct intel_engine_cs *ring)
+ {
+-	struct drm_i915_gem_object *obj;
++	struct drm_i915_gem_object *obj = ring->status_page.obj;
+ 
+-	if ((obj = ring->status_page.obj) == NULL) {
++	if (obj == NULL) {
+ 		unsigned flags;
+ 		int ret;
+ 
+@@ -1985,7 +1996,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
+ 		if (ret)
+ 			goto error;
+ 	} else {
+-		BUG_ON(ring->id != RCS);
++		WARN_ON(ring->id != RCS);
+ 		ret = init_phys_status_page(ring);
+ 		if (ret)
+ 			goto error;
+@@ -2049,7 +2060,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
+ 	if (ring->cleanup)
+ 		ring->cleanup(ring);
+ 
+-	cleanup_status_page(ring);
++	if (I915_NEED_GFX_HWS(ring->dev)) {
++		cleanup_status_page(ring);
++	} else {
++		WARN_ON(ring->id != RCS);
++		cleanup_phys_status_page(ring);
++	}
+ 
+ 	i915_cmd_parser_fini_ring(ring);
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index cf43f77be254..bb29f1e482d7 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -572,7 +572,8 @@ nouveau_fbcon_init(struct drm_device *dev)
+ 	if (ret)
+ 		goto fini;
+ 
+-	fbcon->helper.fbdev->pixmap.buf_align = 4;
++	if (fbcon->helper.fbdev)
++		fbcon->helper.fbdev->pixmap.buf_align = 4;
+ 	return 0;
+ 
+ fini:
+diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
+index dac78ad24b31..79bab6fd76bb 100644
+--- a/drivers/gpu/drm/radeon/atombios_crtc.c
++++ b/drivers/gpu/drm/radeon/atombios_crtc.c
+@@ -1739,6 +1739,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
+ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+ {
+ 	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_crtc *test_crtc;
+ 	struct radeon_crtc *test_radeon_crtc;
+ 
+@@ -1748,6 +1749,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
+ 		test_radeon_crtc = to_radeon_crtc(test_crtc);
+ 		if (test_radeon_crtc->encoder &&
+ 		    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
++			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
++			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
++			    test_radeon_crtc->pll_id == ATOM_PPLL2)
++				continue;
+ 			/* for DP use the same PLL for all */
+ 			if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
+ 				return test_radeon_crtc->pll_id;
+@@ -1769,6 +1774,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+ {
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct drm_device *dev = crtc->dev;
++	struct radeon_device *rdev = dev->dev_private;
+ 	struct drm_crtc *test_crtc;
+ 	struct radeon_crtc *test_radeon_crtc;
+ 	u32 adjusted_clock, test_adjusted_clock;
+@@ -1784,6 +1790,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
+ 		test_radeon_crtc = to_radeon_crtc(test_crtc);
+ 		if (test_radeon_crtc->encoder &&
+ 		    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
++			/* PPLL2 is exclusive to UNIPHYA on DCE61 */
++			if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
++			    test_radeon_crtc->pll_id == ATOM_PPLL2)
++				continue;
+ 			/* check if we are already driving this connector with another crtc */
+ 			if (test_radeon_crtc->connector == radeon_crtc->connector) {
+ 				/* if we are, return that pll */
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 604c44d88e7a..83b3eb2e444a 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+ /*
+  * GPU helpers function.
+  */
++
++/**
++ * radeon_device_is_virtual - check if we are running is a virtual environment
++ *
++ * Check if the asic has been passed through to a VM (all asics).
++ * Used at driver startup.
++ * Returns true if virtual or false if not.
++ */
++static bool radeon_device_is_virtual(void)
++{
++#ifdef CONFIG_X86
++	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
++#else
++	return false;
++#endif
++}
++
+ /**
+  * radeon_card_posted - check if the hw has already been initialized
+  *
+@@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
+ {
+ 	uint32_t reg;
+ 
++	/* for pass through, always force asic_init */
++	if (radeon_device_is_virtual())
++		return false;
++
+ 	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
+ 	if (efi_enabled(EFI_BOOT) &&
+ 	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index c4e0e69b688d..f666277a8993 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2925,6 +2925,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ 	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0x2015, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
+@@ -2959,6 +2960,10 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
+ 		}
+ 		++p;
+ 	}
++	/* limit mclk on all R7 370 parts for stability */
++	if (rdev->pdev->device == 0x6811 &&
++	    rdev->pdev->revision == 0x81)
++		max_mclk = 120000;
+ 
+ 	if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
+ 	    ni_dpm_vblank_too_short(rdev))
+diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
+index 5fc16cecd3ba..cd8d183dcfe5 100644
+--- a/drivers/gpu/drm/udl/udl_fb.c
++++ b/drivers/gpu/drm/udl/udl_fb.c
+@@ -546,7 +546,7 @@ static int udlfb_create(struct drm_fb_helper *helper,
+ 
+ 	return ret;
+ out_gfree:
+-	drm_gem_object_unreference(&ufbdev->ufb.obj->base);
++	drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
+ out:
+ 	return ret;
+ }
+diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
+index 2a0a784ab6ee..d7528e0d8442 100644
+--- a/drivers/gpu/drm/udl/udl_gem.c
++++ b/drivers/gpu/drm/udl/udl_gem.c
+@@ -52,7 +52,7 @@ udl_gem_create(struct drm_file *file,
+ 		return ret;
+ 	}
+ 
+-	drm_gem_object_unreference(&obj->base);
++	drm_gem_object_unreference_unlocked(&obj->base);
+ 	*handle_p = handle;
+ 	return 0;
+ }
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index bc23db196930..bf039dbaa7eb 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1864,6 +1864,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_ELITE_KBD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_EXTREME_3D) },
++	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DUAL_ACTION) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD) },
+ 	{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
+diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
+index 4e49462870ab..d0c8a1c1e1fe 100644
+--- a/drivers/hid/hid-elo.c
++++ b/drivers/hid/hid-elo.c
+@@ -259,7 +259,7 @@ static void elo_remove(struct hid_device *hdev)
+ 	struct elo_priv *priv = hid_get_drvdata(hdev);
+ 
+ 	hid_hw_stop(hdev);
+-	flush_workqueue(wq);
++	cancel_delayed_work_sync(&priv->work);
+ 	kfree(priv);
+ }
+ 
+diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
+index 2f1ddca6f2e0..700145b15088 100644
+--- a/drivers/hid/usbhid/hiddev.c
++++ b/drivers/hid/usbhid/hiddev.c
+@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
+ 					goto inval;
+ 			} else if (uref->usage_index >= field->report_count)
+ 				goto inval;
+-
+-			else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+-				 (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+-				  uref->usage_index + uref_multi->num_values > field->report_count))
+-				goto inval;
+ 		}
+ 
++		if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
++		    (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
++		     uref->usage_index + uref_multi->num_values > field->report_count))
++			goto inval;
++
+ 		switch (cmd) {
+ 		case HIDIOCGUSAGE:
+ 			uref->value = field->value[uref->usage_index];
+diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
+index f67d71ee8386..159f50d0ae39 100644
+--- a/drivers/hwmon/max1111.c
++++ b/drivers/hwmon/max1111.c
+@@ -85,6 +85,9 @@ static struct max1111_data *the_max1111;
+ 
+ int max1111_read_channel(int channel)
+ {
++	if (!the_max1111 || !the_max1111->spi)
++		return -ENODEV;
++
+ 	return max1111_read(&the_max1111->spi->dev, channel);
+ }
+ EXPORT_SYMBOL(max1111_read_channel);
+@@ -258,6 +261,9 @@ static int max1111_remove(struct spi_device *spi)
+ {
+ 	struct max1111_data *data = spi_get_drvdata(spi);
+ 
++#ifdef CONFIG_SHARPSL_PM
++	the_max1111 = NULL;
++#endif
+ 	hwmon_device_unregister(data->hwmon_dev);
+ 	sysfs_remove_group(&spi->dev.kobj, &max1110_attr_group);
+ 	sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
+diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
+index b29c7500461a..f54ece8fce78 100644
+--- a/drivers/i2c/busses/i2c-exynos5.c
++++ b/drivers/i2c/busses/i2c-exynos5.c
+@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
+ 		return -EIO;
+ 	}
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 
+ 	for (i = 0; i < num; i++, msgs++) {
+ 		stop = (i == num - 1);
+@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
+ 	}
+ 
+  out:
+-	clk_disable_unprepare(i2c->clk);
++	clk_disable(i2c->clk);
+ 	return ret;
+ }
+ 
+@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
+ 		return -ENOENT;
+ 	}
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_prepare_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 
+ 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
+@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, i2c);
+ 
++	clk_disable(i2c->clk);
++
++	return 0;
++
+  err_clk:
+ 	clk_disable_unprepare(i2c->clk);
+ 	return ret;
+@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
+ 
+ 	i2c_del_adapter(&i2c->adap);
+ 
++	clk_unprepare(i2c->clk);
++
+ 	return 0;
+ }
+ 
+@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
+ 
+ 	i2c->suspended = 1;
+ 
++	clk_unprepare(i2c->clk);
++
+ 	return 0;
+ }
+ 
+@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
+ 	struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
+ 	int ret = 0;
+ 
+-	clk_prepare_enable(i2c->clk);
++	ret = clk_prepare_enable(i2c->clk);
++	if (ret)
++		return ret;
+ 
+ 	ret = exynos5_hsi2c_clock_setup(i2c);
+ 	if (ret) {
+@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
+ 	}
+ 
+ 	exynos5_i2c_init(i2c);
+-	clk_disable_unprepare(i2c->clk);
++	clk_disable(i2c->clk);
+ 	i2c->suspended = 0;
+ 
+ 	return 0;
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index fd780bbcd07e..f2a7f72f7aa6 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
+ 	int eoc_gpio;
+ 	int err;
+ 	const char *name = NULL;
+-	enum asahi_compass_chipset chipset;
++	enum asahi_compass_chipset chipset = AK_MAX_TYPE;
+ 
+ 	/* Grab and set up the supplied GPIO. */
+ 	if (client->dev.platform_data)
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index 33fdd50123f7..9fa27b0cda32 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ 
+ 	ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
+ 	ah->av.ib.g_slid  = ah_attr->src_path_bits;
++	ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ 	if (ah_attr->ah_flags & IB_AH_GRH) {
+ 		ah->av.ib.g_slid   |= 0x80;
+ 		ah->av.ib.gid_index = ah_attr->grh.sgid_index;
+@@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
+ 		       !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
+ 			--ah->av.ib.stat_rate;
+ 	}
+-	ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ 
+ 	return &ah->ibah;
+ }
+diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
+index c4ca20e63221..b6d14bba6645 100644
+--- a/drivers/input/misc/pmic8xxx-pwrkey.c
++++ b/drivers/input/misc/pmic8xxx-pwrkey.c
+@@ -92,7 +92,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+ 	if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
+ 		kpd_delay = 15625;
+ 
+-	if (kpd_delay > 62500 || kpd_delay == 0) {
++	/* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
++	if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
+ 		dev_err(&pdev->dev, "invalid power key trigger delay\n");
+ 		return -EINVAL;
+ 	}
+@@ -122,8 +123,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+ 	pwr->name = "pmic8xxx_pwrkey";
+ 	pwr->phys = "pmic8xxx_pwrkey/input0";
+ 
+-	delay = (kpd_delay << 10) / USEC_PER_SEC;
+-	delay = 1 + ilog2(delay);
++	delay = (kpd_delay << 6) / USEC_PER_SEC;
++	delay = ilog2(delay);
+ 
+ 	err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
+ 	if (err < 0) {
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index 0f5b400706d7..c3c5d492cba0 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1550,13 +1550,7 @@ static int elantech_set_properties(struct elantech_data *etd)
+ 		case 5:
+ 			etd->hw_version = 3;
+ 			break;
+-		case 6:
+-		case 7:
+-		case 8:
+-		case 9:
+-		case 10:
+-		case 13:
+-		case 14:
++		case 6 ... 14:
+ 			etd->hw_version = 4;
+ 			break;
+ 		default:
+diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
+index a3f0f5a47490..0f586780ceb4 100644
+--- a/drivers/input/mouse/vmmouse.c
++++ b/drivers/input/mouse/vmmouse.c
+@@ -355,18 +355,11 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
+ 		return -ENXIO;
+ 	}
+ 
+-	if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
+-		psmouse_dbg(psmouse, "VMMouse port in use.\n");
+-		return -EBUSY;
+-	}
+-
+ 	/* Check if the device is present */
+ 	response = ~VMMOUSE_PROTO_MAGIC;
+ 	VMMOUSE_CMD(GETVERSION, 0, version, response, dummy1, dummy2);
+-	if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU) {
+-		release_region(VMMOUSE_PROTO_PORT, 4);
++	if (response != VMMOUSE_PROTO_MAGIC || version == 0xffffffffU)
+ 		return -ENXIO;
+-	}
+ 
+ 	if (set_properties) {
+ 		psmouse->vendor = VMMOUSE_VENDOR;
+@@ -374,8 +367,6 @@ int vmmouse_detect(struct psmouse *psmouse, bool set_properties)
+ 		psmouse->model = version;
+ 	}
+ 
+-	release_region(VMMOUSE_PROTO_PORT, 4);
+-
+ 	return 0;
+ }
+ 
+@@ -394,7 +385,6 @@ static void vmmouse_disconnect(struct psmouse *psmouse)
+ 	psmouse_reset(psmouse);
+ 	input_unregister_device(priv->abs_dev);
+ 	kfree(priv);
+-	release_region(VMMOUSE_PROTO_PORT, 4);
+ }
+ 
+ /**
+@@ -438,15 +428,10 @@ int vmmouse_init(struct psmouse *psmouse)
+ 	struct input_dev *rel_dev = psmouse->dev, *abs_dev;
+ 	int error;
+ 
+-	if (!request_region(VMMOUSE_PROTO_PORT, 4, "vmmouse")) {
+-		psmouse_dbg(psmouse, "VMMouse port in use.\n");
+-		return -EBUSY;
+-	}
+-
+ 	psmouse_reset(psmouse);
+ 	error = vmmouse_enable(psmouse);
+ 	if (error)
+-		goto release_region;
++		return error;
+ 
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ 	abs_dev = input_allocate_device();
+@@ -502,8 +487,5 @@ init_fail:
+ 	kfree(priv);
+ 	psmouse->private = NULL;
+ 
+-release_region:
+-	release_region(VMMOUSE_PROTO_PORT, 4);
+-
+ 	return error;
+ }
+diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
+index 2792ca397dd0..3ed0ce1e4dcb 100644
+--- a/drivers/input/touchscreen/wacom_w8001.c
++++ b/drivers/input/touchscreen/wacom_w8001.c
+@@ -27,7 +27,7 @@ MODULE_AUTHOR("Jaya Kumar <jayakumar.lkml@gmail.com>");
+ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL");
+ 
+-#define W8001_MAX_LENGTH	11
++#define W8001_MAX_LENGTH	13
+ #define W8001_LEAD_MASK		0x80
+ #define W8001_LEAD_BYTE		0x80
+ #define W8001_TAB_MASK		0x40
+diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
+index 19880c7385e3..d618f67a9f48 100644
+--- a/drivers/input/touchscreen/zforce_ts.c
++++ b/drivers/input/touchscreen/zforce_ts.c
+@@ -359,8 +359,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
+ 			point.coord_x = point.coord_y = 0;
+ 		}
+ 
+-		point.state = payload[9 * i + 5] & 0x03;
+-		point.id = (payload[9 * i + 5] & 0xfc) >> 2;
++		point.state = payload[9 * i + 5] & 0x0f;
++		point.id = (payload[9 * i + 5] & 0xf0) >> 4;
+ 
+ 		/* determine touch major, minor and orientation */
+ 		point.area_major = max(payload[9 * i + 6],
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index 11ec9d2a27df..38f375516ae6 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1099,6 +1099,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 	int i;
+ 	int r = -EINVAL;
+ 	char *origin_path, *cow_path;
++	dev_t origin_dev, cow_dev;
+ 	unsigned args_used, num_flush_bios = 1;
+ 	fmode_t origin_mode = FMODE_READ;
+ 
+@@ -1129,11 +1130,19 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ 		ti->error = "Cannot get origin device";
+ 		goto bad_origin;
+ 	}
++	origin_dev = s->origin->bdev->bd_dev;
+ 
+ 	cow_path = argv[0];
+ 	argv++;
+ 	argc--;
+ 
++	cow_dev = dm_get_dev_t(cow_path);
++	if (cow_dev && cow_dev == origin_dev) {
++		ti->error = "COW device cannot be the same as origin device";
++		r = -EINVAL;
++		goto bad_cow;
++	}
++
+ 	r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow);
+ 	if (r) {
+ 		ti->error = "Cannot get COW device";
+diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
+index 16ba55ad7089..e411ccba0af6 100644
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -365,6 +365,26 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
+ }
+ 
+ /*
++ * Convert the path to a device
++ */
++dev_t dm_get_dev_t(const char *path)
++{
++	dev_t uninitialized_var(dev);
++	struct block_device *bdev;
++
++	bdev = lookup_bdev(path);
++	if (IS_ERR(bdev))
++		dev = name_to_dev_t(path);
++	else {
++		dev = bdev->bd_dev;
++		bdput(bdev);
++	}
++
++	return dev;
++}
++EXPORT_SYMBOL_GPL(dm_get_dev_t);
++
++/*
+  * Add a device to the list, or just increment the usage count if
+  * it's already present.
+  */
+@@ -372,23 +392,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ 		  struct dm_dev **result)
+ {
+ 	int r;
+-	dev_t uninitialized_var(dev);
++	dev_t dev;
+ 	struct dm_dev_internal *dd;
+ 	struct dm_table *t = ti->table;
+-	struct block_device *bdev;
+ 
+ 	BUG_ON(!t);
+ 
+-	/* convert the path to a device */
+-	bdev = lookup_bdev(path);
+-	if (IS_ERR(bdev)) {
+-		dev = name_to_dev_t(path);
+-		if (!dev)
+-			return -ENODEV;
+-	} else {
+-		dev = bdev->bd_dev;
+-		bdput(bdev);
+-	}
++	dev = dm_get_dev_t(path);
++	if (!dev)
++		return -ENODEV;
+ 
+ 	dd = find_device(&t->devices, dev);
+ 	if (!dd) {
+diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
+index 8e6fe0200117..e14b1a19f4e6 100644
+--- a/drivers/media/platform/coda/coda-common.c
++++ b/drivers/media/platform/coda/coda-common.c
+@@ -2100,14 +2100,12 @@ static int coda_probe(struct platform_device *pdev)
+ 
+ 	pdev_id = of_id ? of_id->data : platform_get_device_id(pdev);
+ 
+-	if (of_id) {
++	if (of_id)
+ 		dev->devtype = of_id->data;
+-	} else if (pdev_id) {
++	else if (pdev_id)
+ 		dev->devtype = &coda_devdata[pdev_id->driver_data];
+-	} else {
+-		ret = -EINVAL;
+-		goto err_v4l2_register;
+-	}
++	else
++		return -EINVAL;
+ 
+ 	spin_lock_init(&dev->irqlock);
+ 	INIT_LIST_HEAD(&dev->instances);
+diff --git a/drivers/media/platform/vsp1/vsp1_sru.c b/drivers/media/platform/vsp1/vsp1_sru.c
+index 6310acab60e7..d41ae950d1a1 100644
+--- a/drivers/media/platform/vsp1/vsp1_sru.c
++++ b/drivers/media/platform/vsp1/vsp1_sru.c
+@@ -154,6 +154,7 @@ static int sru_s_stream(struct v4l2_subdev *subdev, int enable)
+ 	mutex_lock(sru->ctrls.lock);
+ 	ctrl0 |= vsp1_sru_read(sru, VI6_SRU_CTRL0)
+ 	       & (VI6_SRU_CTRL0_PARAM0_MASK | VI6_SRU_CTRL0_PARAM1_MASK);
++	vsp1_sru_write(sru, VI6_SRU_CTRL0, ctrl0);
+ 	mutex_unlock(sru->ctrls.lock);
+ 
+ 	vsp1_sru_write(sru, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5);
+diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
+index c94ea0d68746..2c51acce4b34 100644
+--- a/drivers/memory/omap-gpmc.c
++++ b/drivers/memory/omap-gpmc.c
+@@ -394,7 +394,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
+ 	gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
+ 			   GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
+ 	gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
+-			   GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
++			   GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
+ 	gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
+ 			   GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
+ 			   p->cycle2cyclesamecsen);
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index 006242c8bca0..b3c10b7dae1f 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -429,7 +429,7 @@ config ARM_CHARLCD
+ 	  still useful.
+ 
+ config BMP085
+-	bool
++	tristate
+ 	depends on SYSFS
+ 
+ config BMP085_I2C
+diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
+index 15e88078ba1e..f1a0b99f5a9a 100644
+--- a/drivers/misc/ad525x_dpot.c
++++ b/drivers/misc/ad525x_dpot.c
+@@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
+ 			 */
+ 			value = swab16(value);
+ 
+-			if (dpot->uid == DPOT_UID(AD5271_ID))
++			if (dpot->uid == DPOT_UID(AD5274_ID))
+ 				value = value >> 2;
+ 		return value;
+ 	default:
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 1c73ba6efdbd..f109aeed9883 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
+ 	int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
+ 	struct ubi_volume *vol = ubi->volumes[idx];
+ 	struct ubi_vid_hdr *vid_hdr;
++	uint32_t crc;
+ 
+ 	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ 	if (!vid_hdr)
+@@ -599,14 +600,8 @@ retry:
+ 		goto out_put;
+ 	}
+ 
+-	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+-	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
+-	if (err) {
+-		up_read(&ubi->fm_eba_sem);
+-		goto write_error;
+-	}
++	ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
+ 
+-	data_size = offset + len;
+ 	mutex_lock(&ubi->buf_mutex);
+ 	memset(ubi->peb_buf + offset, 0xFF, len);
+ 
+@@ -621,6 +616,19 @@ retry:
+ 
+ 	memcpy(ubi->peb_buf + offset, buf, len);
+ 
++	data_size = offset + len;
++	crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
++	vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
++	vid_hdr->copy_flag = 1;
++	vid_hdr->data_size = cpu_to_be32(data_size);
++	vid_hdr->data_crc = cpu_to_be32(crc);
++	err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
++	if (err) {
++		mutex_unlock(&ubi->buf_mutex);
++		up_read(&ubi->fm_eba_sem);
++		goto write_error;
++	}
++
+ 	err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
+ 	if (err) {
+ 		mutex_unlock(&ubi->buf_mutex);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index bd744e31c434..9ba92e23e67f 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3246,6 +3246,30 @@ static int bond_close(struct net_device *bond_dev)
+ 	return 0;
+ }
+ 
++/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
++ * that some drivers can provide 32bit values only.
++ */
++static void bond_fold_stats(struct rtnl_link_stats64 *_res,
++			    const struct rtnl_link_stats64 *_new,
++			    const struct rtnl_link_stats64 *_old)
++{
++	const u64 *new = (const u64 *)_new;
++	const u64 *old = (const u64 *)_old;
++	u64 *res = (u64 *)_res;
++	int i;
++
++	for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
++		u64 nv = new[i];
++		u64 ov = old[i];
++
++		/* detects if this particular field is 32bit only */
++		if (((nv | ov) >> 32) == 0)
++			res[i] += (u32)nv - (u32)ov;
++		else
++			res[i] += nv - ov;
++	}
++}
++
+ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+ 						struct rtnl_link_stats64 *stats)
+ {
+@@ -3254,43 +3278,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+ 	struct list_head *iter;
+ 	struct slave *slave;
+ 
++	spin_lock(&bond->stats_lock);
+ 	memcpy(stats, &bond->bond_stats, sizeof(*stats));
+ 
+-	bond_for_each_slave(bond, slave, iter) {
+-		const struct rtnl_link_stats64 *sstats =
++	rcu_read_lock();
++	bond_for_each_slave_rcu(bond, slave, iter) {
++		const struct rtnl_link_stats64 *new =
+ 			dev_get_stats(slave->dev, &temp);
+-		struct rtnl_link_stats64 *pstats = &slave->slave_stats;
+-
+-		stats->rx_packets +=  sstats->rx_packets - pstats->rx_packets;
+-		stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
+-		stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
+-		stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
+-
+-		stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
+-		stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
+-		stats->tx_errors += sstats->tx_errors - pstats->tx_errors;
+-		stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped;
+-
+-		stats->multicast += sstats->multicast - pstats->multicast;
+-		stats->collisions += sstats->collisions - pstats->collisions;
+-
+-		stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors;
+-		stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors;
+-		stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors;
+-		stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors;
+-		stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors;
+-		stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors;
+-
+-		stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors;
+-		stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors;
+-		stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors;
+-		stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors;
+-		stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors;
++
++		bond_fold_stats(stats, new, &slave->slave_stats);
+ 
+ 		/* save off the slave stats for the next run */
+-		memcpy(pstats, sstats, sizeof(*sstats));
++		memcpy(&slave->slave_stats, new, sizeof(*new));
+ 	}
++	rcu_read_unlock();
++
+ 	memcpy(&bond->bond_stats, stats, sizeof(*stats));
++	spin_unlock(&bond->stats_lock);
+ 
+ 	return stats;
+ }
+@@ -4102,6 +4106,7 @@ void bond_setup(struct net_device *bond_dev)
+ 	struct bonding *bond = netdev_priv(bond_dev);
+ 
+ 	spin_lock_init(&bond->mode_lock);
++	spin_lock_init(&bond->stats_lock);
+ 	bond->params = bonding_defaults;
+ 
+ 	/* Initialize pointers */
+diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
+index f4e40aa4d2a2..35233aa5a88a 100644
+--- a/drivers/net/can/at91_can.c
++++ b/drivers/net/can/at91_can.c
+@@ -733,9 +733,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
+ 
+ 	/* upper group completed, look again in lower */
+ 	if (priv->rx_next > get_mb_rx_low_last(priv) &&
+-	    quota > 0 && mb > get_mb_rx_last(priv)) {
++	    mb > get_mb_rx_last(priv)) {
+ 		priv->rx_next = get_mb_rx_first(priv);
+-		goto again;
++		if (quota > 0)
++			goto again;
+ 	}
+ 
+ 	return received;
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 5d214d135332..c076414103d2 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
+ 
+ 	priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
+ 
+-	for (i = 0; i < frame->can_dlc; i += 2) {
+-		priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+-				frame->data[i] | (frame->data[i + 1] << 8));
++	if (priv->type == BOSCH_D_CAN) {
++		u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
++
++		for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
++			data = (u32)frame->data[i];
++			data |= (u32)frame->data[i + 1] << 8;
++			data |= (u32)frame->data[i + 2] << 16;
++			data |= (u32)frame->data[i + 3] << 24;
++			priv->write_reg32(priv, dreg, data);
++		}
++	} else {
++		for (i = 0; i < frame->can_dlc; i += 2) {
++			priv->write_reg(priv,
++					C_CAN_IFACE(DATA1_REG, iface) + i / 2,
++					frame->data[i] |
++					(frame->data[i + 1] << 8));
++		}
+ 	}
+ }
+ 
+@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
+ 	} else {
+ 		int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
+ 
+-		for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
+-			data = priv->read_reg(priv, dreg);
+-			frame->data[i] = data;
+-			frame->data[i + 1] = data >> 8;
++		if (priv->type == BOSCH_D_CAN) {
++			for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
++				data = priv->read_reg32(priv, dreg);
++				frame->data[i] = data;
++				frame->data[i + 1] = data >> 8;
++				frame->data[i + 2] = data >> 16;
++				frame->data[i + 3] = data >> 24;
++			}
++		} else {
++			for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
++				data = priv->read_reg(priv, dreg);
++				frame->data[i] = data;
++				frame->data[i + 1] = data >> 8;
++			}
+ 		}
+ 	}
+ 
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 910c12e2638e..ad535a854e5c 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
+ 	 * - control mode with CAN_CTRLMODE_FD set
+ 	 */
+ 
++	if (!data)
++		return 0;
++
+ 	if (data[IFLA_CAN_CTRLMODE]) {
+ 		struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ 
+@@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
+ 	return -EOPNOTSUPP;
+ }
+ 
++static void can_dellink(struct net_device *dev, struct list_head *head)
++{
++	return;
++}
++
+ static struct rtnl_link_ops can_link_ops __read_mostly = {
+ 	.kind		= "can",
+ 	.maxtype	= IFLA_CAN_MAX,
+@@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
+ 	.validate	= can_validate,
+ 	.newlink	= can_newlink,
+ 	.changelink	= can_changelink,
++	.dellink	= can_dellink,
+ 	.get_size	= can_get_size,
+ 	.fill_info	= can_fill_info,
+ 	.get_xstats_size = can_get_xstats_size,
+diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
+index 46a535318c7a..972ee645fac6 100644
+--- a/drivers/net/ethernet/atheros/atlx/atl2.c
++++ b/drivers/net/ethernet/atheros/atlx/atl2.c
+@@ -1412,7 +1412,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 
+ 	err = -EIO;
+ 
+-	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX;
++	netdev->hw_features = NETIF_F_HW_VLAN_CTAG_RX;
+ 	netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
+ 
+ 	/* Init PHY as early as possible due to power saving issue  */
+diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+index 6043734ea613..a9fcac044e9e 100644
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -1048,7 +1048,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
+ 			dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ 			dma_unmap_single(&dev->dev,
+ 					 dma_unmap_addr(tx_cb_ptr, dma_addr),
+-					 tx_cb_ptr->skb->len,
++					 dma_unmap_len(tx_cb_ptr, dma_len),
+ 					 DMA_TO_DEVICE);
+ 			bcmgenet_free_cb(tx_cb_ptr);
+ 		} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
+@@ -1159,7 +1159,7 @@ static int bcmgenet_xmit_single(struct net_device *dev,
+ 	}
+ 
+ 	dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+-	dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
++	dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
+ 	length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ 			(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
+ 			DMA_TX_APPEND_CRC;
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 570390b5cd42..67aec18dd76c 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1546,9 +1546,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
+ 	struct fec_enet_private *fep = netdev_priv(ndev);
+ 
+ 	for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
+-		clear_bit(queue_id, &fep->work_rx);
+-		pkt_received += fec_enet_rx_queue(ndev,
++		int ret;
++
++		ret = fec_enet_rx_queue(ndev,
+ 					budget - pkt_received, queue_id);
++
++		if (ret < budget - pkt_received)
++			clear_bit(queue_id, &fep->work_rx);
++
++		pkt_received += ret;
+ 	}
+ 	return pkt_received;
+ }
+diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
+index 6e9a792097d3..46e8d5b12c1a 100644
+--- a/drivers/net/ethernet/jme.c
++++ b/drivers/net/ethernet/jme.c
+@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
+ }
+ 
+ static inline void
+-jme_clear_pm(struct jme_adapter *jme)
++jme_clear_pm_enable_wol(struct jme_adapter *jme)
+ {
+ 	jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
+ }
+ 
++static inline void
++jme_clear_pm_disable_wol(struct jme_adapter *jme)
++{
++	jwrite32(jme, JME_PMCS, PMCS_STMASK);
++}
++
+ static int
+ jme_reload_eeprom(struct jme_adapter *jme)
+ {
+@@ -1857,7 +1863,7 @@ jme_open(struct net_device *netdev)
+ 	struct jme_adapter *jme = netdev_priv(netdev);
+ 	int rc;
+ 
+-	jme_clear_pm(jme);
++	jme_clear_pm_disable_wol(jme);
+ 	JME_NAPI_ENABLE(jme);
+ 
+ 	tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
+@@ -1929,11 +1935,11 @@ jme_wait_link(struct jme_adapter *jme)
+ static void
+ jme_powersave_phy(struct jme_adapter *jme)
+ {
+-	if (jme->reg_pmcs) {
++	if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
+ 		jme_set_100m_half(jme);
+ 		if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
+ 			jme_wait_link(jme);
+-		jme_clear_pm(jme);
++		jme_clear_pm_enable_wol(jme);
+ 	} else {
+ 		jme_phy_off(jme);
+ 	}
+@@ -2650,9 +2656,6 @@ jme_set_wol(struct net_device *netdev,
+ 	if (wol->wolopts & WAKE_MAGIC)
+ 		jme->reg_pmcs |= PMCS_MFEN;
+ 
+-	jwrite32(jme, JME_PMCS, jme->reg_pmcs);
+-	device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
+-
+ 	return 0;
+ }
+ 
+@@ -3176,8 +3179,8 @@ jme_init_one(struct pci_dev *pdev,
+ 	jme->mii_if.mdio_read = jme_mdio_read;
+ 	jme->mii_if.mdio_write = jme_mdio_write;
+ 
+-	jme_clear_pm(jme);
+-	device_set_wakeup_enable(&pdev->dev, true);
++	jme_clear_pm_disable_wol(jme);
++	device_init_wakeup(&pdev->dev, true);
+ 
+ 	jme_set_phyfifo_5level(jme);
+ 	jme->pcirev = pdev->revision;
+@@ -3308,7 +3311,7 @@ jme_resume(struct device *dev)
+ 	if (!netif_running(netdev))
+ 		return 0;
+ 
+-	jme_clear_pm(jme);
++	jme_clear_pm_disable_wol(jme);
+ 	jme_phy_on(jme);
+ 	if (test_bit(JME_FLAG_SSET, &jme->flags))
+ 		jme_set_settings(netdev, &jme->old_ecmd);
+@@ -3316,13 +3319,14 @@ jme_resume(struct device *dev)
+ 		jme_reset_phy_processor(jme);
+ 	jme_phy_calibration(jme);
+ 	jme_phy_setEA(jme);
+-	jme_start_irq(jme);
+ 	netif_device_attach(netdev);
+ 
+ 	atomic_inc(&jme->link_changing);
+ 
+ 	jme_reset_link(jme);
+ 
++	jme_start_irq(jme);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index 80aac20104de..f6095d2b77de 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -710,7 +710,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
+ 
+ 	if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
+ 		return -1;
+-	hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
++	hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
+ 
+ 	csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
+ 				       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index c10d98f6ad96..a1b4301f719a 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -400,7 +400,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ 	u32 packets = 0;
+ 	u32 bytes = 0;
+ 	int factor = priv->cqe_factor;
+-	u64 timestamp = 0;
+ 	int done = 0;
+ 	int budget = priv->tx_work_limit;
+ 	u32 last_nr_txbb;
+@@ -440,9 +439,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ 		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+ 
+ 		do {
++			u64 timestamp = 0;
++
+ 			txbbs_skipped += last_nr_txbb;
+ 			ring_index = (ring_index + last_nr_txbb) & size_mask;
+-			if (ring->tx_info[ring_index].ts_requested)
++
++			if (unlikely(ring->tx_info[ring_index].ts_requested))
+ 				timestamp = mlx4_en_get_cqe_ts(cqe);
+ 
+ 			/* free next descriptor */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index bafe2180cf0c..e662ab39499e 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -2960,7 +2960,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
+ 		case QP_TRANS_RTS2RTS:
+ 		case QP_TRANS_SQD2SQD:
+ 		case QP_TRANS_SQD2RTS:
+-			if (slave != mlx4_master_func_num(dev))
++			if (slave != mlx4_master_func_num(dev)) {
+ 				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
+ 					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
+ 					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
+@@ -2979,6 +2979,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
+ 					if (qp_ctx->alt_path.mgid_index >= num_gids)
+ 						return -EINVAL;
+ 				}
++			}
+ 			break;
+ 		default:
+ 			break;
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+index f221126a5c4e..d0992825c47c 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+@@ -567,6 +567,7 @@ struct qlcnic_adapter_stats {
+ 	u64  tx_dma_map_error;
+ 	u64  spurious_intr;
+ 	u64  mac_filter_limit_overrun;
++	u64  mbx_spurious_intr;
+ };
+ 
+ /*
+@@ -1099,7 +1100,7 @@ struct qlcnic_mailbox {
+ 	unsigned long		status;
+ 	spinlock_t		queue_lock;	/* Mailbox queue lock */
+ 	spinlock_t		aen_lock;	/* Mailbox response/AEN lock */
+-	atomic_t		rsp_status;
++	u32			rsp_status;
+ 	u32			num_cmds;
+ };
+ 
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+index 840bf36b5e9d..dd618d7ed257 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+@@ -489,7 +489,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+ 
+ static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+ {
+-	atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
++	mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ 	complete(&mbx->completion);
+ }
+ 
+@@ -508,7 +508,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+ 	if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+ 		__qlcnic_83xx_process_aen(adapter);
+ 	} else {
+-		if (atomic_read(&mbx->rsp_status) != rsp_status)
++		if (mbx->rsp_status != rsp_status)
+ 			qlcnic_83xx_notify_mbx_response(mbx);
+ 	}
+ out:
+@@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+ 		if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+ 			__qlcnic_83xx_process_aen(adapter);
+ 		} else {
+-			if (atomic_read(&mbx->rsp_status) != rsp_status)
++			if (mbx->rsp_status != rsp_status)
+ 				qlcnic_83xx_notify_mbx_response(mbx);
+ 		}
+ 	}
+@@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ 
+ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+ {
++	u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ 	struct qlcnic_adapter *adapter = data;
+ 	struct qlcnic_mailbox *mbx;
+-	u32 mask, resp, event;
+ 	unsigned long flags;
+ 
+ 	mbx = adapter->ahw->mailbox;
+@@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+ 		goto out;
+ 
+ 	event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+-	if (event &  QLCNIC_MBX_ASYNC_EVENT)
++	if (event &  QLCNIC_MBX_ASYNC_EVENT) {
+ 		__qlcnic_83xx_process_aen(adapter);
+-	else
+-		qlcnic_83xx_notify_mbx_response(mbx);
++	} else {
++		if (mbx->rsp_status != rsp_status)
++			qlcnic_83xx_notify_mbx_response(mbx);
++		else
++			adapter->stats.mbx_spurious_intr++;
++	}
+ 
+ out:
+ 	mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+@@ -4025,10 +4029,10 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+ 	struct qlcnic_adapter *adapter = mbx->adapter;
+ 	struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ 	struct device *dev = &adapter->pdev->dev;
+-	atomic_t *rsp_status = &mbx->rsp_status;
+ 	struct list_head *head = &mbx->cmd_q;
+ 	struct qlcnic_hardware_context *ahw;
+ 	struct qlcnic_cmd_args *cmd = NULL;
++	unsigned long flags;
+ 
+ 	ahw = adapter->ahw;
+ 
+@@ -4038,7 +4042,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+ 			return;
+ 		}
+ 
+-		atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
++		spin_lock_irqsave(&mbx->aen_lock, flags);
++		mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
++		spin_unlock_irqrestore(&mbx->aen_lock, flags);
+ 
+ 		spin_lock(&mbx->queue_lock);
+ 
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+index 494e8105adee..0a2318cad34d 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+@@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ 	 QLC_OFF(stats.mac_filter_limit_overrun)},
+ 	{"spurious intr", QLC_SIZEOF(stats.spurious_intr),
+ 	 QLC_OFF(stats.spurious_intr)},
+-
++	{"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
++	 QLC_OFF(stats.mbx_spurious_intr)},
+ };
+ 
+ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
+diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+index 25800a1dedcb..b915de060a42 100644
+--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
++++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+@@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+ 		return;
+ 	}
+ 	skb_reserve(new_skb, NET_IP_ALIGN);
++
++	pci_dma_sync_single_for_cpu(qdev->pdev,
++				    dma_unmap_addr(sbq_desc, mapaddr),
++				    dma_unmap_len(sbq_desc, maplen),
++				    PCI_DMA_FROMDEVICE);
++
+ 	memcpy(skb_put(new_skb, length), skb->data, length);
++
++	pci_dma_sync_single_for_device(qdev->pdev,
++				       dma_unmap_addr(sbq_desc, mapaddr),
++				       dma_unmap_len(sbq_desc, maplen),
++				       PCI_DMA_FROMDEVICE);
+ 	skb = new_skb;
+ 
+ 	/* Frame error, so drop the packet. */
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index 97e4df9bf407..cba41860167c 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev)
+ 	dev->netdev_ops = &qcaspi_netdev_ops;
+ 	qcaspi_set_ethtool_ops(dev);
+ 	dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
+-	dev->flags = IFF_MULTICAST;
++	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ 	dev->tx_queue_len = 100;
+ 
+ 	qca = netdev_priv(dev);
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 13463c4acc86..c93a458f96f7 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1148,8 +1148,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
+ 
+ 		/* RX descriptor */
+ 		rxdesc = &mdp->rx_ring[i];
+-		/* The size of the buffer is a multiple of 16 bytes. */
+-		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
++		/* The size of the buffer is a multiple of 32 bytes. */
++		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
+ 		dma_addr = dma_map_single(&ndev->dev, skb->data,
+ 					  rxdesc->buffer_length,
+ 					  DMA_FROM_DEVICE);
+@@ -1173,7 +1173,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
+ 	mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
+ 
+ 	/* Mark the last entry as wrapping the ring. */
+-	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
++	if (rxdesc)
++		rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
+ 
+ 	memset(mdp->tx_ring, 0, tx_ringsize);
+ 
+@@ -1506,7 +1507,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
+ 			if (mdp->cd->rpadir)
+ 				skb_reserve(skb, NET_IP_ALIGN);
+ 			dma_unmap_single(&ndev->dev, rxdesc->addr,
+-					 ALIGN(mdp->rx_buf_sz, 16),
++					 ALIGN(mdp->rx_buf_sz, 32),
+ 					 DMA_FROM_DEVICE);
+ 			skb_put(skb, pkt_len);
+ 			skb->protocol = eth_type_trans(skb, ndev);
+@@ -1524,8 +1525,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
+ 	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
+ 		entry = mdp->dirty_rx % mdp->num_rx_ring;
+ 		rxdesc = &mdp->rx_ring[entry];
+-		/* The size of the buffer is 16 byte boundary. */
+-		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
++		/* The size of the buffer is 32 byte boundary. */
++		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
+ 
+ 		if (mdp->rx_skbuff[entry] == NULL) {
+ 			skb = netdev_alloc_skb(ndev, skbuff_size);
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index feca46efa12f..c642e201a45e 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -452,6 +452,17 @@ fail:
+ 	return rc;
+ }
+ 
++static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
++{
++	struct efx_channel *channel;
++	struct efx_tx_queue *tx_queue;
++
++	/* All our existing PIO buffers went away */
++	efx_for_each_channel(channel, efx)
++		efx_for_each_channel_tx_queue(tx_queue, channel)
++			tx_queue->piobuf = NULL;
++}
++
+ #else /* !EFX_USE_PIO */
+ 
+ static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
+@@ -468,6 +479,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
+ {
+ }
+ 
++static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
++{
++}
++
+ #endif /* EFX_USE_PIO */
+ 
+ static void efx_ef10_remove(struct efx_nic *efx)
+@@ -699,6 +714,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
+ 	nic_data->must_realloc_vis = true;
+ 	nic_data->must_restore_filters = true;
+ 	nic_data->must_restore_piobufs = true;
++	efx_ef10_forget_old_piobufs(efx);
+ 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+ }
+ 
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 4dba5fbc735e..2b212f3e140c 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -710,6 +710,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 			macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
+ 		if (copylen > good_linear)
+ 			copylen = good_linear;
++		else if (copylen < ETH_HLEN)
++			copylen = ETH_HLEN;
+ 		linear = copylen;
+ 		i = *from;
+ 		iov_iter_advance(&i, copylen);
+@@ -719,10 +721,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+ 
+ 	if (!zerocopy) {
+ 		copylen = len;
+-		if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear)
++		linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
++		if (linear > good_linear)
+ 			linear = good_linear;
+-		else
+-			linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
++		else if (linear < ETH_HLEN)
++			linear = ETH_HLEN;
+ 	}
+ 
+ 	skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index cfe49a07c7c1..51ba895f0522 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -563,7 +563,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
+ 
+ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ {
+-	struct ppp_file *pf = file->private_data;
++	struct ppp_file *pf;
+ 	struct ppp *ppp;
+ 	int err = -EFAULT, val, val2, i;
+ 	struct ppp_idle idle;
+@@ -573,9 +573,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	void __user *argp = (void __user *)arg;
+ 	int __user *p = argp;
+ 
+-	if (!pf)
+-		return ppp_unattached_ioctl(current->nsproxy->net_ns,
+-					pf, file, cmd, arg);
++	mutex_lock(&ppp_mutex);
++
++	pf = file->private_data;
++	if (!pf) {
++		err = ppp_unattached_ioctl(current->nsproxy->net_ns,
++					   pf, file, cmd, arg);
++		goto out;
++	}
+ 
+ 	if (cmd == PPPIOCDETACH) {
+ 		/*
+@@ -590,7 +595,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		 * this fd and reopening /dev/ppp.
+ 		 */
+ 		err = -EINVAL;
+-		mutex_lock(&ppp_mutex);
+ 		if (pf->kind == INTERFACE) {
+ 			ppp = PF_TO_PPP(pf);
+ 			if (file == ppp->owner)
+@@ -602,15 +606,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		} else
+ 			pr_warn("PPPIOCDETACH file->f_count=%ld\n",
+ 				atomic_long_read(&file->f_count));
+-		mutex_unlock(&ppp_mutex);
+-		return err;
++		goto out;
+ 	}
+ 
+ 	if (pf->kind == CHANNEL) {
+ 		struct channel *pch;
+ 		struct ppp_channel *chan;
+ 
+-		mutex_lock(&ppp_mutex);
+ 		pch = PF_TO_CHANNEL(pf);
+ 
+ 		switch (cmd) {
+@@ -632,17 +634,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 				err = chan->ops->ioctl(chan, cmd, arg);
+ 			up_read(&pch->chan_sem);
+ 		}
+-		mutex_unlock(&ppp_mutex);
+-		return err;
++		goto out;
+ 	}
+ 
+ 	if (pf->kind != INTERFACE) {
+ 		/* can't happen */
+ 		pr_err("PPP: not interface or channel??\n");
+-		return -EINVAL;
++		err = -EINVAL;
++		goto out;
+ 	}
+ 
+-	mutex_lock(&ppp_mutex);
+ 	ppp = PF_TO_PPP(pf);
+ 	switch (cmd) {
+ 	case PPPIOCSMRU:
+@@ -817,7 +818,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	default:
+ 		err = -ENOTTY;
+ 	}
++
++out:
+ 	mutex_unlock(&ppp_mutex);
++
+ 	return err;
+ }
+ 
+@@ -830,7 +834,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+ 	struct ppp_net *pn;
+ 	int __user *p = (int __user *)arg;
+ 
+-	mutex_lock(&ppp_mutex);
+ 	switch (cmd) {
+ 	case PPPIOCNEWUNIT:
+ 		/* Create a new ppp unit */
+@@ -881,7 +884,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
+ 	default:
+ 		err = -ENOTTY;
+ 	}
+-	mutex_unlock(&ppp_mutex);
++
+ 	return err;
+ }
+ 
+@@ -2244,7 +2247,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
+ 
+ 	pch->ppp = NULL;
+ 	pch->chan = chan;
+-	pch->chan_net = net;
++	pch->chan_net = get_net(net);
+ 	chan->ppp = pch;
+ 	init_ppp_file(&pch->file, CHANNEL);
+ 	pch->file.hdrlen = chan->hdrlen;
+@@ -2341,6 +2344,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
+ 	spin_lock_bh(&pn->all_channels_lock);
+ 	list_del(&pch->list);
+ 	spin_unlock_bh(&pn->all_channels_lock);
++	put_net(pch->chan_net);
++	pch->chan_net = NULL;
+ 
+ 	pch->file.dead = 1;
+ 	wake_up_interruptible(&pch->file.rwait);
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index e470ae59d405..01f5ff84cf6b 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -516,11 +516,13 @@ static void tun_detach_all(struct net_device *dev)
+ 	for (i = 0; i < n; i++) {
+ 		tfile = rtnl_dereference(tun->tfiles[i]);
+ 		BUG_ON(!tfile);
++		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
+ 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+ 		RCU_INIT_POINTER(tfile->tun, NULL);
+ 		--tun->numqueues;
+ 	}
+ 	list_for_each_entry(tfile, &tun->disabled, next) {
++		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
+ 		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
+ 		RCU_INIT_POINTER(tfile->tun, NULL);
+ 	}
+@@ -575,6 +577,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
+ 			goto out;
+ 	}
+ 	tfile->queue_index = tun->numqueues;
++	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
+ 	rcu_assign_pointer(tfile->tun, tun);
+ 	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
+ 	tun->numqueues++;
+@@ -1357,9 +1360,6 @@ static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
+ 	if (!iov_iter_count(to))
+ 		return 0;
+ 
+-	if (tun->dev->reg_state != NETREG_REGISTERED)
+-		return -EIO;
+-
+ 	/* Read frames from queue */
+ 	skb = __skb_recv_datagram(tfile->socket.sk, noblock ? MSG_DONTWAIT : 0,
+ 				  &peeked, &off, &err);
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 0b481c30979b..5db25e46a962 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -843,7 +843,11 @@ advance:
+ 
+ 	iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
+ 
+-	/* reset data interface */
++	/* Reset data interface. Some devices will not reset properly
++	 * unless they are configured first.  Toggle the altsetting to
++	 * force a reset
++	 */
++	usb_set_interface(dev->udev, iface_no, data_altsetting);
+ 	temp = usb_set_interface(dev->udev, iface_no, 0);
+ 	if (temp) {
+ 		dev_dbg(&intf->dev, "set interface failed\n");
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index cffb25280a3b..8153e97408e7 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -749,6 +749,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x19d2, 0x1426, 2)},	/* ZTE MF91 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x1428, 2)},	/* Telewell TW-LTE 4G v2 */
+ 	{QMI_FIXED_INTF(0x19d2, 0x2002, 4)},	/* ZTE (Vodafone) K3765-Z */
++	{QMI_FIXED_INTF(0x2001, 0x7e19, 4)},	/* D-Link DWM-221 B1 */
+ 	{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
+ 	{QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
+ 	{QMI_FIXED_INTF(0x1199, 0x68a2, 8)},	/* Sierra Wireless MC7710 in QMI mode */
+@@ -767,8 +768,10 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x1199, 0x9061, 8)},	/* Sierra Wireless Modem */
+ 	{QMI_FIXED_INTF(0x1199, 0x9070, 8)},	/* Sierra Wireless MC74xx/EM74xx */
+ 	{QMI_FIXED_INTF(0x1199, 0x9070, 10)},	/* Sierra Wireless MC74xx/EM74xx */
+-	{QMI_FIXED_INTF(0x1199, 0x9071, 8)},	/* Sierra Wireless MC74xx/EM74xx */
+-	{QMI_FIXED_INTF(0x1199, 0x9071, 10)},	/* Sierra Wireless MC74xx/EM74xx */
++	{QMI_FIXED_INTF(0x1199, 0x9071, 8)},	/* Sierra Wireless MC74xx */
++	{QMI_FIXED_INTF(0x1199, 0x9071, 10)},	/* Sierra Wireless MC74xx */
++	{QMI_FIXED_INTF(0x1199, 0x9079, 8)},	/* Sierra Wireless EM74xx */
++	{QMI_FIXED_INTF(0x1199, 0x9079, 10)},	/* Sierra Wireless EM74xx */
+ 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
+ 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index e0498571ae26..edbb2f389337 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1754,6 +1754,13 @@ out3:
+ 	if (info->unbind)
+ 		info->unbind (dev, udev);
+ out1:
++	/* subdrivers must undo all they did in bind() if they
++	 * fail it, but we may fail later and a deferred kevent
++	 * may trigger an error resubmitting itself and, worse,
++	 * schedule a timer. So we kill it all just in case.
++	 */
++	cancel_work_sync(&dev->kevent);
++	del_timer_sync(&dev->delay);
+ 	free_netdev(net);
+ out:
+ 	return status;
+diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
+index 44541dbc5c28..69b994f3b8c5 100644
+--- a/drivers/net/wan/farsync.c
++++ b/drivers/net/wan/farsync.c
+@@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+                 dev->mem_start   = card->phys_mem
+                                  + BUF_OFFSET ( txBuffer[i][0][0]);
+                 dev->mem_end     = card->phys_mem
+-                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
++                                 + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
+                 dev->base_addr   = card->pci_conf;
+                 dev->irq         = card->irq;
+ 
+diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
+index cc81482c934d..113a43fca9cf 100644
+--- a/drivers/net/wireless/ath/ath9k/eeprom.c
++++ b/drivers/net/wireless/ath/ath9k/eeprom.c
+@@ -403,10 +403,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ 
+ 	if (match) {
+ 		if (AR_SREV_9287(ah)) {
+-			/* FIXME: array overrun? */
+ 			for (i = 0; i < numXpdGains; i++) {
+ 				minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
+-				maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
++				maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1];
+ 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ 						data_9287[idxL].pwrPdg[i],
+ 						data_9287[idxL].vpdPdg[i],
+@@ -416,7 +415,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ 		} else if (eeprom_4k) {
+ 			for (i = 0; i < numXpdGains; i++) {
+ 				minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
+-				maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
++				maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1];
+ 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ 						data_4k[idxL].pwrPdg[i],
+ 						data_4k[idxL].vpdPdg[i],
+@@ -426,7 +425,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
+ 		} else {
+ 			for (i = 0; i < numXpdGains; i++) {
+ 				minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
+-				maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
++				maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1];
+ 				ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
+ 						data_def[idxL].pwrPdg[i],
+ 						data_def[idxL].vpdPdg[i],
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index c44393f26fd3..4e720ed402ef 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4520,8 +4520,10 @@ int pci_get_new_domain_nr(void)
+ void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
+ {
+ 	static int use_dt_domains = -1;
+-	int domain = of_get_pci_domain_nr(parent->of_node);
++	int domain = -1;
+ 
++	if (parent)
++		domain = of_get_pci_domain_nr(parent->of_node);
+ 	/*
+ 	 * Check DT domain and use_dt_domains values.
+ 	 *
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
+index e261f1cf85c6..09c05bffe026 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
+@@ -205,9 +205,9 @@ static int imx_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+ 		pin_reg = &info->pin_regs[pin_id];
+ 
+ 		if (pin_reg->mux_reg == -1) {
+-			dev_err(ipctl->dev, "Pin(%s) does not support mux function\n",
++			dev_dbg(ipctl->dev, "Pin(%s) does not support mux function\n",
+ 				info->pins[pin_id].name);
+-			return -EINVAL;
++			continue;
+ 		}
+ 
+ 		if (info->flags & SHARE_MUX_CONF_REG) {
+diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+index a6a22054c0ba..f4b1dac45aca 100644
+--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
++++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+@@ -1025,7 +1025,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
+ 		int pullidx = 0;
+ 
+ 		if (pull)
+-			pullidx = data_out ? 1 : 2;
++			pullidx = data_out ? 2 : 1;
+ 
+ 		seq_printf(s, " gpio-%-3d (%-20.20s) in  %s %s",
+ 			   gpio,
+diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
+index 13b45f297727..f2e4232ea98d 100644
+--- a/drivers/pinctrl/pinctrl-single.c
++++ b/drivers/pinctrl/pinctrl-single.c
+@@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
+ 
+ 		/* Parse pins in each row from LSB */
+ 		while (mask) {
+-			bit_pos = ffs(mask);
++			bit_pos = __ffs(mask);
+ 			pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
+-			mask_pos = ((pcs->fmask) << (bit_pos - 1));
++			mask_pos = ((pcs->fmask) << bit_pos);
+ 			val_pos = val & mask_pos;
+ 			submask = mask & mask_pos;
+ 
+@@ -1576,6 +1576,9 @@ static inline void pcs_irq_set(struct pcs_soc_data *pcs_soc,
+ 		else
+ 			mask &= ~soc_mask;
+ 		pcs->write(mask, pcswi->reg);
++
++		/* flush posted write */
++		mask = pcs->read(pcswi->reg);
+ 		raw_spin_unlock(&pcs->lock);
+ 	}
+ 
+@@ -1851,7 +1854,7 @@ static int pcs_probe(struct platform_device *pdev)
+ 	ret = of_property_read_u32(np, "pinctrl-single,function-mask",
+ 				   &pcs->fmask);
+ 	if (!ret) {
+-		pcs->fshift = ffs(pcs->fmask) - 1;
++		pcs->fshift = __ffs(pcs->fmask);
+ 		pcs->fmax = pcs->fmask >> pcs->fshift;
+ 	} else {
+ 		/* If mask property doesn't exist, function mux is invalid. */
+diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
+index 4bc0c7f459a5..b2bf48c7dc36 100644
+--- a/drivers/power/power_supply_core.c
++++ b/drivers/power/power_supply_core.c
+@@ -526,11 +526,12 @@ static int power_supply_read_temp(struct thermal_zone_device *tzd,
+ 
+ 	WARN_ON(tzd == NULL);
+ 	psy = tzd->devdata;
+-	ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
++	ret = power_supply_get_property(psy, POWER_SUPPLY_PROP_TEMP, &val);
++	if (ret)
++		return ret;
+ 
+ 	/* Convert tenths of degree Celsius to milli degree Celsius. */
+-	if (!ret)
+-		*temp = val.intval * 100;
++	*temp = val.intval * 100;
+ 
+ 	return ret;
+ }
+@@ -573,10 +574,12 @@ static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
+ 	int ret;
+ 
+ 	psy = tcd->devdata;
+-	ret = psy->desc->get_property(psy,
+-		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
+-	if (!ret)
+-		*state = val.intval;
++	ret = power_supply_get_property(psy,
++			POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
++	if (ret)
++		return ret;
++
++	*state = val.intval;
+ 
+ 	return ret;
+ }
+@@ -589,10 +592,12 @@ static int ps_get_cur_chrage_cntl_limit(struct thermal_cooling_device *tcd,
+ 	int ret;
+ 
+ 	psy = tcd->devdata;
+-	ret = psy->desc->get_property(psy,
+-		POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
+-	if (!ret)
+-		*state = val.intval;
++	ret = power_supply_get_property(psy,
++			POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
++	if (ret)
++		return ret;
++
++	*state = val.intval;
+ 
+ 	return ret;
+ }
+diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
+index 58f5d3b8e981..27343e1c43ef 100644
+--- a/drivers/regulator/s5m8767.c
++++ b/drivers/regulator/s5m8767.c
+@@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
+ 		}
+ 	}
+ 
+-	if (i < s5m8767->num_regulators)
+-		*enable_ctrl =
+-		s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
++	if (i >= s5m8767->num_regulators)
++		return -EINVAL;
++
++	*enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
+ 
+ 	return 0;
+ }
+@@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
+ 			else
+ 				regulators[id].vsel_mask = 0xff;
+ 
+-			s5m8767_get_register(s5m8767, id, &enable_reg,
++			ret = s5m8767_get_register(s5m8767, id, &enable_reg,
+ 					     &enable_val);
++			if (ret) {
++				dev_err(s5m8767->dev, "error reading registers\n");
++				return ret;
++			}
+ 			regulators[id].enable_reg = enable_reg;
+ 			regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
+ 			regulators[id].enable_val = enable_val;
+diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
+index 818a3635a8c8..86865881ce4b 100644
+--- a/drivers/rtc/rtc-ds1685.c
++++ b/drivers/rtc/rtc-ds1685.c
+@@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
+  * Only use this where you are certain another lock will not be held.
+  */
+ static inline void
+-ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
++ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
+ {
+-	spin_lock_irqsave(&rtc->lock, flags);
++	spin_lock_irqsave(&rtc->lock, *flags);
+ 	ds1685_rtc_switch_to_bank1(rtc);
+ }
+ 
+@@ -1304,7 +1304,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
+ {
+ 	struct ds1685_priv *rtc = dev_get_drvdata(dev);
+ 	u8 reg = 0, bit = 0, tmp;
+-	unsigned long flags = 0;
++	unsigned long flags;
+ 	long int val = 0;
+ 	const struct ds1685_rtc_ctrl_regs *reg_info =
+ 		ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
+@@ -1325,7 +1325,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
+ 	bit = reg_info->bit;
+ 
+ 	/* Safe to spinlock during a write. */
+-	ds1685_rtc_begin_ctrl_access(rtc, flags);
++	ds1685_rtc_begin_ctrl_access(rtc, &flags);
+ 	tmp = rtc->read(rtc, reg);
+ 	rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
+ 	ds1685_rtc_end_ctrl_access(rtc, flags);
+diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
+index 0f710e98538f..3db1557e5394 100644
+--- a/drivers/rtc/rtc-hym8563.c
++++ b/drivers/rtc/rtc-hym8563.c
+@@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ 	 * it does not seem to carry it over a subsequent write/read.
+ 	 * So we'll limit ourself to 100 years, starting at 2000 for now.
+ 	 */
+-	buf[6] = tm->tm_year - 100;
++	buf[6] = bin2bcd(tm->tm_year - 100);
+ 
+ 	/*
+ 	 * CTL1 only contains TEST-mode bits apart from stop,
+diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
+index 7632a87784c3..d42cef0ca939 100644
+--- a/drivers/rtc/rtc-max77686.c
++++ b/drivers/rtc/rtc-max77686.c
+@@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
+ 
+ 	info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
+ 					 MAX77686_RTCIRQ_RTCA1);
+-	if (!info->virq) {
++	if (info->virq <= 0) {
+ 		ret = -ENXIO;
+ 		goto err_rtc;
+ 	}
+diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
+index f64c282275b3..e1b86bb01062 100644
+--- a/drivers/rtc/rtc-vr41xx.c
++++ b/drivers/rtc/rtc-vr41xx.c
+@@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
+ }
+ 
+ static const struct rtc_class_ops vr41xx_rtc_ops = {
+-	.release	= vr41xx_rtc_release,
+-	.ioctl		= vr41xx_rtc_ioctl,
+-	.read_time	= vr41xx_rtc_read_time,
+-	.set_time	= vr41xx_rtc_set_time,
+-	.read_alarm	= vr41xx_rtc_read_alarm,
+-	.set_alarm	= vr41xx_rtc_set_alarm,
++	.release		= vr41xx_rtc_release,
++	.ioctl			= vr41xx_rtc_ioctl,
++	.read_time		= vr41xx_rtc_read_time,
++	.set_time		= vr41xx_rtc_set_time,
++	.read_alarm		= vr41xx_rtc_read_alarm,
++	.set_alarm		= vr41xx_rtc_set_alarm,
++	.alarm_irq_enable	= vr41xx_rtc_alarm_irq_enable,
+ };
+ 
+ static int rtc_probe(struct platform_device *pdev)
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index e8c8c1ecc1f5..bf8fd38abbbd 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -2848,7 +2848,7 @@ lpfc_online(struct lpfc_hba *phba)
+ 	}
+ 
+ 	vports = lpfc_create_vport_work_array(phba);
+-	if (vports != NULL)
++	if (vports != NULL) {
+ 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+ 			struct Scsi_Host *shost;
+ 			shost = lpfc_shost_from_vport(vports[i]);
+@@ -2865,7 +2865,8 @@ lpfc_online(struct lpfc_hba *phba)
+ 			}
+ 			spin_unlock_irq(shost->host_lock);
+ 		}
+-		lpfc_destroy_vport_work_array(phba, vports);
++	}
++	lpfc_destroy_vport_work_array(phba, vports);
+ 
+ 	lpfc_unblock_mgmt_io(phba);
+ 	return 0;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 890637fdd61e..7a1c4b4e764b 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -6203,12 +6203,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
+ 	}
+ 
+ 	for (i = 0; i < ioc->sge_count; i++) {
+-		if (kbuff_arr[i])
++		if (kbuff_arr[i]) {
+ 			dma_free_coherent(&instance->pdev->dev,
+ 					  le32_to_cpu(kern_sge32[i].length),
+ 					  kbuff_arr[i],
+ 					  le32_to_cpu(kern_sge32[i].phys_addr));
+ 			kbuff_arr[i] = NULL;
++		}
+ 	}
+ 
+ 	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index c6b93d273799..841fdf745fcf 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1122,7 +1122,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
+  */
+ void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
+ {
+-	scmd->device->host->host_failed--;
+ 	scmd->eh_eflags = 0;
+ 	list_move_tail(&scmd->eh_entry, done_q);
+ }
+@@ -2216,6 +2215,9 @@ int scsi_error_handler(void *data)
+ 		else
+ 			scsi_unjam_host(shost);
+ 
++		/* All scmds have been handled */
++		shost->host_failed = 0;
++
+ 		/*
+ 		 * Note - if the above fails completely, the action is to take
+ 		 * individual devices offline and flush the queue of any
+diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
+index 68e7efeb9a27..1d308cba29b1 100644
+--- a/drivers/spi/spi-rockchip.c
++++ b/drivers/spi/spi-rockchip.c
+@@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
+ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+ {
+ 	u32 ser;
+-	struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
++	struct spi_master *master = spi->master;
++	struct rockchip_spi *rs = spi_master_get_devdata(master);
++
++	pm_runtime_get_sync(rs->dev);
+ 
+ 	ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
+ 
+@@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+ 		ser &= ~(1 << spi->chip_select);
+ 
+ 	writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
++
++	pm_runtime_put_sync(rs->dev);
+ }
+ 
+ static int rockchip_spi_prepare_message(struct spi_master *master,
+diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
+index fbb0a4d74e91..39d7c7c70112 100644
+--- a/drivers/spi/spi-sun4i.c
++++ b/drivers/spi/spi-sun4i.c
+@@ -170,13 +170,17 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
+ {
+ 	struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ 	unsigned int mclk_rate, div, timeout;
++	unsigned int start, end, tx_time;
+ 	unsigned int tx_len = 0;
+ 	int ret = 0;
+ 	u32 reg;
+ 
+ 	/* We don't support transfer larger than the FIFO */
+ 	if (tfr->len > SUN4I_FIFO_DEPTH)
+-		return -EINVAL;
++		return -EMSGSIZE;
++
++	if (tfr->tx_buf && tfr->len >= SUN4I_FIFO_DEPTH)
++		return -EMSGSIZE;
+ 
+ 	reinit_completion(&sspi->done);
+ 	sspi->tx_buf = tfr->tx_buf;
+@@ -269,8 +273,12 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
+ 	sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
+ 	sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
+ 
+-	/* Fill the TX FIFO */
+-	sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
++	/*
++	 * Fill the TX FIFO
++	 * Filling the FIFO fully causes timeout for some reason
++	 * at least on spi2 on A10s
++	 */
++	sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH - 1);
+ 
+ 	/* Enable the interrupts */
+ 	sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
+@@ -279,9 +287,16 @@ static int sun4i_spi_transfer_one(struct spi_master *master,
+ 	reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+ 	sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+ 
++	tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
++	start = jiffies;
+ 	timeout = wait_for_completion_timeout(&sspi->done,
+-					      msecs_to_jiffies(1000));
++					      msecs_to_jiffies(tx_time));
++	end = jiffies;
+ 	if (!timeout) {
++		dev_warn(&master->dev,
++			 "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
++			 dev_name(&spi->dev), tfr->len, tfr->speed_hz,
++			 jiffies_to_msecs(end - start), tx_time);
+ 		ret = -ETIMEDOUT;
+ 		goto out;
+ 	}
+diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
+index ac48f59705a8..e77add01b0e9 100644
+--- a/drivers/spi/spi-sun6i.c
++++ b/drivers/spi/spi-sun6i.c
+@@ -160,6 +160,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
+ {
+ 	struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ 	unsigned int mclk_rate, div, timeout;
++	unsigned int start, end, tx_time;
+ 	unsigned int tx_len = 0;
+ 	int ret = 0;
+ 	u32 reg;
+@@ -269,9 +270,16 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
+ 	reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ 	sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+ 
++	tx_time = max(tfr->len * 8 * 2 / (tfr->speed_hz / 1000), 100U);
++	start = jiffies;
+ 	timeout = wait_for_completion_timeout(&sspi->done,
+-					      msecs_to_jiffies(1000));
++					      msecs_to_jiffies(tx_time));
++	end = jiffies;
+ 	if (!timeout) {
++		dev_warn(&master->dev,
++			 "%s: timeout transferring %u bytes@%iHz for %i(%i)ms",
++			 dev_name(&spi->dev), tfr->len, tfr->speed_hz,
++			 jiffies_to_msecs(end - start), tx_time);
+ 		ret = -ETIMEDOUT;
+ 		goto out;
+ 	}
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index b4fd8debf941..a64d53f7b1d1 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -791,22 +791,16 @@ static int size_fifo(struct uart_8250_port *up)
+  */
+ static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
+ {
+-	unsigned char old_dll, old_dlm, old_lcr;
+-	unsigned int id;
++	unsigned char old_lcr;
++	unsigned int id, old_dl;
+ 
+ 	old_lcr = serial_in(p, UART_LCR);
+ 	serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
++	old_dl = serial_dl_read(p);
++	serial_dl_write(p, 0);
++	id = serial_dl_read(p);
++	serial_dl_write(p, old_dl);
+ 
+-	old_dll = serial_in(p, UART_DLL);
+-	old_dlm = serial_in(p, UART_DLM);
+-
+-	serial_out(p, UART_DLL, 0);
+-	serial_out(p, UART_DLM, 0);
+-
+-	id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
+-
+-	serial_out(p, UART_DLL, old_dll);
+-	serial_out(p, UART_DLM, old_dlm);
+ 	serial_out(p, UART_LCR, old_lcr);
+ 
+ 	return id;
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index 1e0d9b8c48c9..e42cb6bdd31d 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1288,6 +1288,8 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
+ 	/* check to see if we need  to change clock source */
+ 
+ 	if (ourport->baudclk != clk) {
++		clk_prepare_enable(clk);
++
+ 		s3c24xx_serial_setsource(port, clk_sel);
+ 
+ 		if (!IS_ERR(ourport->baudclk)) {
+@@ -1295,8 +1297,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
+ 			ourport->baudclk = ERR_PTR(-EINVAL);
+ 		}
+ 
+-		clk_prepare_enable(clk);
+-
+ 		ourport->baudclk = clk;
+ 		ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
+ 	}
+diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
+index 4a24eb2b0ede..ba86956ef4b5 100644
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3587,9 +3587,10 @@ static int do_register_con_driver(const struct consw *csw, int first, int last)
+ 		goto err;
+ 
+ 	desc = csw->con_startup();
+-
+-	if (!desc)
++	if (!desc) {
++		retval = -ENODEV;
+ 		goto err;
++	}
+ 
+ 	retval = -EINVAL;
+ 
+diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
+index 61d538aa2346..4f4f06a5889f 100644
+--- a/drivers/usb/common/usb-otg-fsm.c
++++ b/drivers/usb/common/usb-otg-fsm.c
+@@ -21,6 +21,7 @@
+  * 675 Mass Ave, Cambridge, MA 02139, USA.
+  */
+ 
++#include <linux/module.h>
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+ #include <linux/mutex.h>
+@@ -365,3 +366,4 @@ int otg_statemachine(struct otg_fsm *fsm)
+ 	return state_changed;
+ }
+ EXPORT_SYMBOL_GPL(otg_statemachine);
++MODULE_LICENSE("GPL");
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index e47cfcd5640c..3a49ba2910df 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -2522,26 +2522,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
+  * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
+  * deallocated.
+  *
+- * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
+- * freed.  When hcd_release() is called for either hcd in a peer set
+- * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
+- * block new peering attempts
++ * Make sure to deallocate the bandwidth_mutex only when the last HCD is
++ * freed.  When hcd_release() is called for either hcd in a peer set,
++ * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
+  */
+ static void hcd_release(struct kref *kref)
+ {
+ 	struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
+ 
+ 	mutex_lock(&usb_port_peer_mutex);
+-	if (usb_hcd_is_primary_hcd(hcd)) {
+-		kfree(hcd->address0_mutex);
+-		kfree(hcd->bandwidth_mutex);
+-	}
+ 	if (hcd->shared_hcd) {
+ 		struct usb_hcd *peer = hcd->shared_hcd;
+ 
+ 		peer->shared_hcd = NULL;
+-		if (peer->primary_hcd == hcd)
+-			peer->primary_hcd = NULL;
++		peer->primary_hcd = NULL;
++	} else {
++		kfree(hcd->address0_mutex);
++		kfree(hcd->bandwidth_mutex);
+ 	}
+ 	mutex_unlock(&usb_port_peer_mutex);
+ 	kfree(hcd);
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 017c1de53aa5..f28b5375e2c8 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Creative SB Audigy 2 NX */
+ 	{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* USB3503 */
++	{ USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	/* Microsoft Wireless Laser Mouse 6000 Receiver */
+ 	{ USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+@@ -170,6 +173,10 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* MAYA44USB sound device */
+ 	{ USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* ASUS Base Station(T100) */
++	{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
++			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
++
+ 	/* Action Semiconductor flash disk */
+ 	{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
+ 			USB_QUIRK_STRING_FETCH_255 },
+@@ -185,26 +192,22 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x1908, 0x1315), .driver_info =
+ 			USB_QUIRK_HONOR_BNUMINTERFACES },
+ 
+-	/* INTEL VALUE SSD */
+-	{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+-	/* USB3503 */
+-	{ USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
+-
+-	/* ASUS Base Station(T100) */
+-	{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+-			USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+-
+ 	/* Protocol and OTG Electrical Test Device */
+ 	{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ 			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ 
++	/* Acer C120 LED Projector */
++	{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	/* Blackmagic Design Intensity Shuttle */
+ 	{ USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
+ 
+ 	/* Blackmagic Design UltraStudio SDI */
+ 	{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ 
++	/* INTEL VALUE SSD */
++	{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	{ }  /* terminating entry must be last */
+ };
+ 
+diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
+index 7bd0a95b2815..0a465a90f0d6 100644
+--- a/drivers/usb/dwc3/dwc3-exynos.c
++++ b/drivers/usb/dwc3/dwc3-exynos.c
+@@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
+ 
+ 	platform_set_drvdata(pdev, exynos);
+ 
+-	ret = dwc3_exynos_register_phys(exynos);
+-	if (ret) {
+-		dev_err(dev, "couldn't register PHYs\n");
+-		return ret;
+-	}
+-
+ 	exynos->dev	= dev;
+ 
+ 	exynos->clk = devm_clk_get(dev, "usbdrd30");
+@@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
+ 		goto err3;
+ 	}
+ 
++	ret = dwc3_exynos_register_phys(exynos);
++	if (ret) {
++		dev_err(dev, "couldn't register PHYs\n");
++		goto err4;
++	}
++
+ 	if (node) {
+ 		ret = of_platform_populate(node, NULL, NULL, dev);
+ 		if (ret) {
+ 			dev_err(dev, "failed to add dwc3 core\n");
+-			goto err4;
++			goto err5;
+ 		}
+ 	} else {
+ 		dev_err(dev, "no device node, failed to add dwc3 core\n");
+ 		ret = -ENODEV;
+-		goto err4;
++		goto err5;
+ 	}
+ 
+ 	return 0;
+ 
++err5:
++	platform_device_unregister(exynos->usb2_phy);
++	platform_device_unregister(exynos->usb3_phy);
+ err4:
+ 	regulator_disable(exynos->vdd10);
+ err3:
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 82240dbdf6dd..db9433eed2cc 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -651,7 +651,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ 	if (io_data->read && ret > 0) {
+ 		use_mm(io_data->mm);
+ 		ret = copy_to_iter(io_data->buf, ret, &io_data->data);
+-		if (iov_iter_count(&io_data->data))
++		if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
+ 			ret = -EFAULT;
+ 		unuse_mm(io_data->mm);
+ 	}
+diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
+index 2030565c6789..bccc5788bb98 100644
+--- a/drivers/usb/gadget/legacy/inode.c
++++ b/drivers/usb/gadget/legacy/inode.c
+@@ -934,8 +934,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
+ 			struct usb_ep		*ep = dev->gadget->ep0;
+ 			struct usb_request	*req = dev->req;
+ 
+-			if ((retval = setup_req (ep, req, 0)) == 0)
+-				retval = usb_ep_queue (ep, req, GFP_ATOMIC);
++			if ((retval = setup_req (ep, req, 0)) == 0) {
++				spin_unlock_irq (&dev->lock);
++				retval = usb_ep_queue (ep, req, GFP_KERNEL);
++				spin_lock_irq (&dev->lock);
++			}
+ 			dev->state = STATE_DEV_CONNECTED;
+ 
+ 			/* assume that was SET_CONFIGURATION */
+@@ -1453,8 +1456,11 @@ delegate:
+ 							w_length);
+ 				if (value < 0)
+ 					break;
++
++				spin_unlock (&dev->lock);
+ 				value = usb_ep_queue (gadget->ep0, dev->req,
+-							GFP_ATOMIC);
++							GFP_KERNEL);
++				spin_lock (&dev->lock);
+ 				if (value < 0) {
+ 					clean_req (gadget->ep0, dev->req);
+ 					break;
+@@ -1477,11 +1483,14 @@ delegate:
+ 	if (value >= 0 && dev->state != STATE_DEV_SETUP) {
+ 		req->length = value;
+ 		req->zero = value < w_length;
+-		value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
++
++		spin_unlock (&dev->lock);
++		value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
+ 		if (value < 0) {
+ 			DBG (dev, "ep_queue --> %d\n", value);
+ 			req->status = 0;
+ 		}
++		return value;
+ 	}
+ 
+ 	/* device stalls when value < 0 */
+diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
+index ff9af29b4e9f..d888a00195ac 100644
+--- a/drivers/usb/host/ehci-tegra.c
++++ b/drivers/usb/host/ehci-tegra.c
+@@ -89,7 +89,7 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
+ 	if (!usb1_reset_attempted) {
+ 		struct reset_control *usb1_reset;
+ 
+-		usb1_reset = of_reset_control_get(phy_np, "usb");
++		usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
+ 		if (IS_ERR(usb1_reset)) {
+ 			dev_warn(&pdev->dev,
+ 				 "can't get utmi-pads reset from the PHY\n");
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index c6027acb6263..54caaf87c567 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -37,6 +37,7 @@
+ /* Device for a quirk */
+ #define PCI_VENDOR_ID_FRESCO_LOGIC	0x1b73
+ #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK	0x1000
++#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009	0x1009
+ #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400	0x1400
+ 
+ #define PCI_VENDOR_ID_ETRON		0x1b6f
+@@ -108,6 +109,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ 	}
+ 
++	if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
++			pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
++		xhci->quirks |= XHCI_BROKEN_STREAMS;
++
+ 	if (pdev->vendor == PCI_VENDOR_ID_NEC)
+ 		xhci->quirks |= XHCI_NEC_HOST;
+ 
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index 783e819139a7..7606710baf43 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -116,6 +116,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
+ 		ret = clk_prepare_enable(clk);
+ 		if (ret)
+ 			goto put_hcd;
++	} else if (PTR_ERR(clk) == -EPROBE_DEFER) {
++		ret = -EPROBE_DEFER;
++		goto put_hcd;
+ 	}
+ 
+ 	if (of_device_is_compatible(pdev->dev.of_node,
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 6fe0377ec5cf..6ef255142e01 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -289,6 +289,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
+ 
+ 	temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ 	xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
++
++	/*
++	 * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
++	 * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
++	 * but the completion event in never sent. Use the cmd timeout timer to
++	 * handle those cases. Use twice the time to cover the bit polling retry
++	 */
++	mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
+ 	xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+ 			&xhci->op_regs->cmd_ring);
+ 
+@@ -313,6 +321,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
+ 
+ 		xhci_err(xhci, "Stopped the command ring failed, "
+ 				"maybe the host is dead\n");
++		del_timer(&xhci->cmd_timer);
+ 		xhci->xhc_state |= XHCI_STATE_DYING;
+ 		xhci_quiesce(xhci);
+ 		xhci_halt(xhci);
+@@ -1252,22 +1261,21 @@ void xhci_handle_command_timeout(unsigned long data)
+ 	int ret;
+ 	unsigned long flags;
+ 	u64 hw_ring_state;
+-	struct xhci_command *cur_cmd = NULL;
++	bool second_timeout = false;
+ 	xhci = (struct xhci_hcd *) data;
+ 
+ 	/* mark this command to be cancelled */
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 	if (xhci->current_cmd) {
+-		cur_cmd = xhci->current_cmd;
+-		cur_cmd->status = COMP_CMD_ABORT;
++		if (xhci->current_cmd->status == COMP_CMD_ABORT)
++			second_timeout = true;
++		xhci->current_cmd->status = COMP_CMD_ABORT;
+ 	}
+ 
+-
+ 	/* Make sure command ring is running before aborting it */
+ 	hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+ 	if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
+ 	    (hw_ring_state & CMD_RING_RUNNING))  {
+-
+ 		spin_unlock_irqrestore(&xhci->lock, flags);
+ 		xhci_dbg(xhci, "Command timeout\n");
+ 		ret = xhci_abort_cmd_ring(xhci);
+@@ -1279,6 +1287,15 @@ void xhci_handle_command_timeout(unsigned long data)
+ 		}
+ 		return;
+ 	}
++
++	/* command ring failed to restart, or host removed. Bail out */
++	if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
++		spin_unlock_irqrestore(&xhci->lock, flags);
++		xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
++		xhci_cleanup_command_queue(xhci);
++		return;
++	}
++
+ 	/* command timeout on stopped ring, ring can't be aborted */
+ 	xhci_dbg(xhci, "Command timeout on stopped ring\n");
+ 	xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
+diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
+index c3d5fc9dfb5b..06853d7c89fd 100644
+--- a/drivers/usb/musb/musb_host.c
++++ b/drivers/usb/musb/musb_host.c
+@@ -583,14 +583,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
+ 		musb_writew(ep->regs, MUSB_TXCSR, 0);
+ 
+ 	/* scrub all previous state, clearing toggle */
+-	} else {
+-		csr = musb_readw(ep->regs, MUSB_RXCSR);
+-		if (csr & MUSB_RXCSR_RXPKTRDY)
+-			WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+-				musb_readw(ep->regs, MUSB_RXCOUNT));
+-
+-		musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+ 	}
++	csr = musb_readw(ep->regs, MUSB_RXCSR);
++	if (csr & MUSB_RXCSR_RXPKTRDY)
++		WARNING("rx%d, packet/%d ready?\n", ep->epnum,
++			musb_readw(ep->regs, MUSB_RXCOUNT));
++
++	musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
+ 
+ 	/* target addr and (for multipoint) hub addr/port */
+ 	if (musb->is_multipoint) {
+@@ -950,9 +949,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
+ 	if (is_in) {
+ 		dma = is_dma_capable() ? ep->rx_channel : NULL;
+ 
+-		/* clear nak timeout bit */
++		/*
++		 * Need to stop the transaction by clearing REQPKT first
++		 * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
++		 * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
++		 */
+ 		rx_csr = musb_readw(epio, MUSB_RXCSR);
+ 		rx_csr |= MUSB_RXCSR_H_WZC_BITS;
++		rx_csr &= ~MUSB_RXCSR_H_REQPKT;
++		musb_writew(epio, MUSB_RXCSR, rx_csr);
+ 		rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ 		musb_writew(epio, MUSB_RXCSR, rx_csr);
+ 
+diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
+index facaaf003f19..e40da7759a0e 100644
+--- a/drivers/usb/usbip/usbip_common.c
++++ b/drivers/usb/usbip/usbip_common.c
+@@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
+ 	if (!(size > 0))
+ 		return 0;
+ 
++	if (size > urb->transfer_buffer_length) {
++		/* should not happen, probably malicious packet */
++		if (ud->side == USBIP_STUB) {
++			usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
++			return 0;
++		} else {
++			usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
++			return -EPIPE;
++		}
++	}
++
+ 	ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
+ 	if (ret != size) {
+ 		dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index d1e1e1704da1..44eb7c737ea2 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -2246,7 +2246,6 @@ config XEN_FBDEV_FRONTEND
+ 	select FB_SYS_IMAGEBLIT
+ 	select FB_SYS_FOPS
+ 	select FB_DEFERRED_IO
+-	select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
+ 	select XEN_XENBUS_FRONTEND
+ 	default y
+ 	help
+diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
+index 0081725c6b5b..d00510029c93 100644
+--- a/drivers/video/fbdev/da8xx-fb.c
++++ b/drivers/video/fbdev/da8xx-fb.c
+@@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 2,
+ 		.hsync_len      = 0,
+ 		.vsync_len      = 0,
+-		.sync           = FB_SYNC_CLK_INVERT |
+-			FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = FB_SYNC_CLK_INVERT,
+ 	},
+ 	/* Sharp LK043T1DG01 */
+ 	[1] = {
+@@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 2,
+ 		.hsync_len      = 41,
+ 		.vsync_len      = 10,
+-		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = 0,
+ 		.flag           = 0,
+ 	},
+ 	[2] = {
+@@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
+ 		.lower_margin   = 10,
+ 		.hsync_len      = 10,
+ 		.vsync_len      = 10,
+-		.sync           = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
++		.sync           = 0,
+ 		.flag           = 0,
+ 	},
+ 	[3] = {
+diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
+index 9c234209d8b5..47a4177b16d2 100644
+--- a/drivers/xen/xen-pciback/conf_space.c
++++ b/drivers/xen/xen-pciback/conf_space.c
+@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
+ 		field_start = OFFSET(cfg_entry);
+ 		field_end = OFFSET(cfg_entry) + field->size;
+ 
+-		if ((req_start >= field_start && req_start < field_end)
+-		    || (req_end > field_start && req_end <= field_end)) {
++		 if (req_end > field_start && field_end > req_start) {
+ 			err = conf_space_read(dev, cfg_entry, field_start,
+ 					      &tmp_val);
+ 			if (err)
+@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
+ 		field_start = OFFSET(cfg_entry);
+ 		field_end = OFFSET(cfg_entry) + field->size;
+ 
+-		if ((req_start >= field_start && req_start < field_end)
+-		    || (req_end > field_start && req_end <= field_end)) {
++		 if (req_end > field_start && field_end > req_start) {
+ 			tmp_val = 0;
+ 
+ 			err = xen_pcibk_config_read(dev, field_start,
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 0f11ebc92f02..844c883a7169 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -1548,6 +1548,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
+ 		       trans->transid, root->fs_info->generation);
+ 
+ 	if (!should_cow_block(trans, root, buf)) {
++		trans->dirty = true;
+ 		*cow_ret = buf;
+ 		return 0;
+ 	}
+@@ -2767,8 +2768,10 @@ again:
+ 			 * then we don't want to set the path blocking,
+ 			 * so we test it here
+ 			 */
+-			if (!should_cow_block(trans, root, b))
++			if (!should_cow_block(trans, root, b)) {
++				trans->dirty = true;
+ 				goto cow_done;
++			}
+ 
+ 			/*
+ 			 * must have write locks on this node and the
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index d1ae1322648a..2771bc32dbd9 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -7504,7 +7504,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+ 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
+ 			 buf->start + buf->len - 1, GFP_NOFS);
+ 	}
+-	trans->blocks_used++;
++	trans->dirty = true;
+ 	/* this returns a buffer locked for blocking */
+ 	return buf;
+ }
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 43502247e176..2eca30adb3e3 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -1639,7 +1639,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
+ 
+ 		src_inode = file_inode(src.file);
+ 		if (src_inode->i_sb != file_inode(file)->i_sb) {
+-			btrfs_info(BTRFS_I(src_inode)->root->fs_info,
++			btrfs_info(BTRFS_I(file_inode(file))->root->fs_info,
+ 				   "Snapshot src from another FS");
+ 			ret = -EXDEV;
+ 		} else if (!inode_owner_or_capable(src_inode)) {
+diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
+index 70734d89193a..a40b454aea44 100644
+--- a/fs/btrfs/super.c
++++ b/fs/btrfs/super.c
+@@ -262,7 +262,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
+ 	trans->aborted = errno;
+ 	/* Nothing used. The other threads that have joined this
+ 	 * transaction may be able to continue. */
+-	if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
++	if (!trans->dirty && list_empty(&trans->new_bgs)) {
+ 		const char *errstr;
+ 
+ 		errstr = btrfs_decode_error(errno);
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 00d18c2bdb0f..6d43b2ab183b 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -507,7 +507,6 @@ again:
+ 
+ 	h->transid = cur_trans->transid;
+ 	h->transaction = cur_trans;
+-	h->blocks_used = 0;
+ 	h->bytes_reserved = 0;
+ 	h->root = root;
+ 	h->delayed_ref_updates = 0;
+diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
+index 0b24755596ba..4ce102be6d6b 100644
+--- a/fs/btrfs/transaction.h
++++ b/fs/btrfs/transaction.h
+@@ -105,7 +105,6 @@ struct btrfs_trans_handle {
+ 	u64 qgroup_reserved;
+ 	unsigned long use_count;
+ 	unsigned long blocks_reserved;
+-	unsigned long blocks_used;
+ 	unsigned long delayed_ref_updates;
+ 	struct btrfs_transaction *transaction;
+ 	struct btrfs_block_rsv *block_rsv;
+@@ -115,6 +114,7 @@ struct btrfs_trans_handle {
+ 	bool allocating_chunk;
+ 	bool reloc_reserved;
+ 	bool sync;
++	bool dirty;
+ 	unsigned int type;
+ 	/*
+ 	 * this root is only needed to validate that the root passed to
+diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
+index 5a53ac6b1e02..02b071bf3732 100644
+--- a/fs/cifs/cifs_unicode.c
++++ b/fs/cifs/cifs_unicode.c
+@@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target)
+ 	case SFM_SLASH:
+ 		*target = '\\';
+ 		break;
++	case SFM_SPACE:
++		*target = ' ';
++		break;
++	case SFM_PERIOD:
++		*target = '.';
++		break;
+ 	default:
+ 		return false;
+ 	}
+@@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char)
+ 	return dest_char;
+ }
+ 
+-static __le16 convert_to_sfm_char(char src_char)
++static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
+ {
+ 	__le16 dest_char;
+ 
+@@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char)
+ 	case '|':
+ 		dest_char = cpu_to_le16(SFM_PIPE);
+ 		break;
++	case '.':
++		if (end_of_string)
++			dest_char = cpu_to_le16(SFM_PERIOD);
++		else
++			dest_char = 0;
++		break;
++	case ' ':
++		if (end_of_string)
++			dest_char = cpu_to_le16(SFM_SPACE);
++		else
++			dest_char = 0;
++		break;
+ 	default:
+ 		dest_char = 0;
+ 	}
+@@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
+ 		/* see if we must remap this char */
+ 		if (map_chars == SFU_MAP_UNI_RSVD)
+ 			dst_char = convert_to_sfu_char(src_char);
+-		else if (map_chars == SFM_MAP_UNI_RSVD)
+-			dst_char = convert_to_sfm_char(src_char);
+-		else
++		else if (map_chars == SFM_MAP_UNI_RSVD) {
++			bool end_of_string;
++
++			if (i == srclen - 1)
++				end_of_string = true;
++			else
++				end_of_string = false;
++
++			dst_char = convert_to_sfm_char(src_char, end_of_string);
++		} else
+ 			dst_char = 0;
+ 		/*
+ 		 * FIXME: We can not handle remapping backslash (UNI_SLASH)
+diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
+index bdc52cb9a676..479bc0a941f3 100644
+--- a/fs/cifs/cifs_unicode.h
++++ b/fs/cifs/cifs_unicode.h
+@@ -64,6 +64,8 @@
+ #define SFM_LESSTHAN    ((__u16) 0xF023)
+ #define SFM_PIPE        ((__u16) 0xF027)
+ #define SFM_SLASH       ((__u16) 0xF026)
++#define SFM_PERIOD	((__u16) 0xF028)
++#define SFM_SPACE	((__u16) 0xF029)
+ 
+ /*
+  * Mapping mechanism to use when one of the seven reserved characters is
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index de626b939811..17998d19b166 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -414,7 +414,9 @@ cifs_echo_request(struct work_struct *work)
+ 	 * server->ops->need_neg() == true. Also, no need to ping if
+ 	 * we got a response recently.
+ 	 */
+-	if (!server->ops->need_neg || server->ops->need_neg(server) ||
++
++	if (server->tcpStatus == CifsNeedReconnect ||
++	    server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
+ 	    (server->ops->can_echo && !server->ops->can_echo(server)) ||
+ 	    time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
+ 		goto requeue_echo;
+diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
+index 848249fa120f..3079b38f0afb 100644
+--- a/fs/cifs/ntlmssp.h
++++ b/fs/cifs/ntlmssp.h
+@@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
+ 
+ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
+ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
+-int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
++int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
+ 			struct cifs_ses *ses,
+ 			const struct nls_table *nls_cp);
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 8ffda5084dbf..5f9229ddf335 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
+ 	sec_blob->DomainName.MaximumLength = 0;
+ }
+ 
+-/* We do not malloc the blob, it is passed in pbuffer, because its
+-   maximum possible size is fixed and small, making this approach cleaner.
+-   This function returns the length of the data in the blob */
+-int build_ntlmssp_auth_blob(unsigned char *pbuffer,
++static int size_of_ntlmssp_blob(struct cifs_ses *ses)
++{
++	int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
++		- CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
++
++	if (ses->domainName)
++		sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
++	else
++		sz += 2;
++
++	if (ses->user_name)
++		sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
++	else
++		sz += 2;
++
++	return sz;
++}
++
++int build_ntlmssp_auth_blob(unsigned char **pbuffer,
+ 					u16 *buflen,
+ 				   struct cifs_ses *ses,
+ 				   const struct nls_table *nls_cp)
+ {
+ 	int rc;
+-	AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
++	AUTHENTICATE_MESSAGE *sec_blob;
+ 	__u32 flags;
+ 	unsigned char *tmp;
+ 
++	rc = setup_ntlmv2_rsp(ses, nls_cp);
++	if (rc) {
++		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
++		*buflen = 0;
++		goto setup_ntlmv2_ret;
++	}
++	*pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
++	sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
++
+ 	memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
+ 	sec_blob->MessageType = NtLmAuthenticate;
+ 
+@@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 			flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+ 	}
+ 
+-	tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
++	tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
+ 	sec_blob->NegotiateFlags = cpu_to_le32(flags);
+ 
+ 	sec_blob->LmChallengeResponse.BufferOffset =
+@@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 	sec_blob->LmChallengeResponse.Length = 0;
+ 	sec_blob->LmChallengeResponse.MaximumLength = 0;
+ 
+-	sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
++	sec_blob->NtChallengeResponse.BufferOffset =
++				cpu_to_le32(tmp - *pbuffer);
+ 	if (ses->user_name != NULL) {
+-		rc = setup_ntlmv2_rsp(ses, nls_cp);
+-		if (rc) {
+-			cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+-			goto setup_ntlmv2_ret;
+-		}
+ 		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+ 				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ 		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+@@ -423,7 +443,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 	}
+ 
+ 	if (ses->domainName == NULL) {
+-		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->DomainName.Length = 0;
+ 		sec_blob->DomainName.MaximumLength = 0;
+ 		tmp += 2;
+@@ -432,14 +452,14 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 		len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
+ 				      CIFS_MAX_USERNAME_LEN, nls_cp);
+ 		len *= 2; /* unicode is 2 bytes each */
+-		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->DomainName.Length = cpu_to_le16(len);
+ 		sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
+ 		tmp += len;
+ 	}
+ 
+ 	if (ses->user_name == NULL) {
+-		sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->UserName.Length = 0;
+ 		sec_blob->UserName.MaximumLength = 0;
+ 		tmp += 2;
+@@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 		len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
+ 				      CIFS_MAX_USERNAME_LEN, nls_cp);
+ 		len *= 2; /* unicode is 2 bytes each */
+-		sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->UserName.Length = cpu_to_le16(len);
+ 		sec_blob->UserName.MaximumLength = cpu_to_le16(len);
+ 		tmp += len;
+ 	}
+ 
+-	sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
++	sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 	sec_blob->WorkstationName.Length = 0;
+ 	sec_blob->WorkstationName.MaximumLength = 0;
+ 	tmp += 2;
+@@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 		(ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
+ 			&& !calc_seckey(ses)) {
+ 		memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
+-		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
+ 		sec_blob->SessionKey.MaximumLength =
+ 				cpu_to_le16(CIFS_CPHTXT_SIZE);
+ 		tmp += CIFS_CPHTXT_SIZE;
+ 	} else {
+-		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
++		sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
+ 		sec_blob->SessionKey.Length = 0;
+ 		sec_blob->SessionKey.MaximumLength = 0;
+ 	}
+ 
++	*buflen = tmp - *pbuffer;
+ setup_ntlmv2_ret:
+-	*buflen = tmp - pbuffer;
+ 	return rc;
+ }
+ 
+@@ -1266,7 +1286,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
+ 	struct cifs_ses *ses = sess_data->ses;
+ 	__u16 bytes_remaining;
+ 	char *bcc_ptr;
+-	char *ntlmsspblob = NULL;
++	unsigned char *ntlmsspblob = NULL;
+ 	u16 blob_len;
+ 
+ 	cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
+@@ -1279,19 +1299,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
+ 	/* Build security blob before we assemble the request */
+ 	pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
+ 	smb_buf = (struct smb_hdr *)pSMB;
+-	/*
+-	 * 5 is an empirical value, large enough to hold
+-	 * authenticate message plus max 10 of av paris,
+-	 * domain, user, workstation names, flags, etc.
+-	 */
+-	ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
+-				GFP_KERNEL);
+-	if (!ntlmsspblob) {
+-		rc = -ENOMEM;
+-		goto out;
+-	}
+-
+-	rc = build_ntlmssp_auth_blob(ntlmsspblob,
++	rc = build_ntlmssp_auth_blob(&ntlmsspblob,
+ 					&blob_len, ses, sess_data->nls_cp);
+ 	if (rc)
+ 		goto out_free_ntlmsspblob;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 14e845e8996f..8f527c867f78 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -532,7 +532,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ 	u16 blob_length = 0;
+ 	struct key *spnego_key = NULL;
+ 	char *security_blob = NULL;
+-	char *ntlmssp_blob = NULL;
++	unsigned char *ntlmssp_blob = NULL;
+ 	bool use_spnego = false; /* else use raw ntlmssp */
+ 
+ 	cifs_dbg(FYI, "Session Setup\n");
+@@ -657,13 +657,7 @@ ssetup_ntlmssp_authenticate:
+ 		iov[1].iov_len = blob_length;
+ 	} else if (phase == NtLmAuthenticate) {
+ 		req->hdr.SessionId = ses->Suid;
+-		ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
+-				       GFP_KERNEL);
+-		if (ntlmssp_blob == NULL) {
+-			rc = -ENOMEM;
+-			goto ssetup_exit;
+-		}
+-		rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
++		rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
+ 					     nls_cp);
+ 		if (rc) {
+ 			cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
+@@ -1632,6 +1626,33 @@ SMB2_echo(struct TCP_Server_Info *server)
+ 
+ 	cifs_dbg(FYI, "In echo request\n");
+ 
++	if (server->tcpStatus == CifsNeedNegotiate) {
++		struct list_head *tmp, *tmp2;
++		struct cifs_ses *ses;
++		struct cifs_tcon *tcon;
++
++		cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
++		spin_lock(&cifs_tcp_ses_lock);
++		list_for_each(tmp, &server->smb_ses_list) {
++			ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
++			list_for_each(tmp2, &ses->tcon_list) {
++				tcon = list_entry(tmp2, struct cifs_tcon,
++						  tcon_list);
++				/* add check for persistent handle reconnect */
++				if (tcon && tcon->need_reconnect) {
++					spin_unlock(&cifs_tcp_ses_lock);
++					rc = smb2_reconnect(SMB2_ECHO, tcon);
++					spin_lock(&cifs_tcp_ses_lock);
++				}
++			}
++		}
++		spin_unlock(&cifs_tcp_ses_lock);
++	}
++
++	/* if no session, renegotiate failed above */
++	if (server->tcpStatus == CifsNeedNegotiate)
++		return -EIO;
++
+ 	rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
+ 	if (rc)
+ 		return rc;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index f43996884242..ba12e2953aec 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5094,6 +5094,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+ 	might_sleep();
+ 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
+ 	err = ext4_reserve_inode_write(handle, inode, &iloc);
++	if (err)
++		return err;
+ 	if (ext4_handle_valid(handle) &&
+ 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
+ 	    !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
+@@ -5124,9 +5126,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+ 			}
+ 		}
+ 	}
+-	if (!err)
+-		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+-	return err;
++	return ext4_mark_iloc_dirty(handle, inode, &iloc);
+ }
+ 
+ /*
+diff --git a/fs/locks.c b/fs/locks.c
+index 8501eecb2af0..3c234b9fbdd9 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -1596,7 +1596,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
+ {
+ 	struct file_lock *fl, *my_fl = NULL, *lease;
+ 	struct dentry *dentry = filp->f_path.dentry;
+-	struct inode *inode = dentry->d_inode;
++	struct inode *inode = file_inode(filp);
+ 	struct file_lock_context *ctx;
+ 	bool is_deleg = (*flp)->fl_flags & FL_DELEG;
+ 	int error;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 6257268147ee..556721fb0cf6 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1551,6 +1551,7 @@ void __detach_mounts(struct dentry *dentry)
+ 		goto out_unlock;
+ 
+ 	lock_mount_hash();
++	event++;
+ 	while (!hlist_empty(&mp->m_list)) {
+ 		mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
+ 		if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index b2c8b31b2be7..aadb4af4a0fe 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1542,9 +1542,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ 		err = PTR_ERR(inode);
+ 		trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
+ 		put_nfs_open_context(ctx);
++		d_drop(dentry);
+ 		switch (err) {
+ 		case -ENOENT:
+-			d_drop(dentry);
+ 			d_add(dentry, NULL);
+ 			nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ 			break;
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 84706204cc33..eef16ec0638a 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2715,12 +2715,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
+ 			call_close |= is_wronly;
+ 		else if (is_wronly)
+ 			calldata->arg.fmode |= FMODE_WRITE;
++		if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
++			call_close |= is_rdwr;
+ 	} else if (is_rdwr)
+ 		calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
+ 
+-	if (calldata->arg.fmode == 0)
+-		call_close |= is_rdwr;
+-
+ 	if (!nfs4_valid_open_stateid(state))
+ 		call_close = 0;
+ 	spin_unlock(&state->owner->so_lock);
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index 1705c78ee2d8..19c1bcf70e3e 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -124,11 +124,12 @@ pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
+ 	if (ret) {
+ 		cinfo->ds->nwritten -= ret;
+ 		cinfo->ds->ncommitting += ret;
+-		bucket->clseg = bucket->wlseg;
+-		if (list_empty(src))
++		if (bucket->clseg == NULL)
++			bucket->clseg = pnfs_get_lseg(bucket->wlseg);
++		if (list_empty(src)) {
++			pnfs_put_lseg_locked(bucket->wlseg);
+ 			bucket->wlseg = NULL;
+-		else
+-			pnfs_get_lseg(bucket->clseg);
++		}
+ 	}
+ 	return ret;
+ }
+@@ -182,19 +183,23 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
+ 	struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
+ 	struct pnfs_commit_bucket *bucket;
+ 	struct pnfs_layout_segment *freeme;
++	LIST_HEAD(pages);
+ 	int i;
+ 
++	spin_lock(cinfo->lock);
+ 	for (i = idx; i < fl_cinfo->nbuckets; i++) {
+ 		bucket = &fl_cinfo->buckets[i];
+ 		if (list_empty(&bucket->committing))
+ 			continue;
+-		nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo, i);
+-		spin_lock(cinfo->lock);
+ 		freeme = bucket->clseg;
+ 		bucket->clseg = NULL;
++		list_splice_init(&bucket->committing, &pages);
+ 		spin_unlock(cinfo->lock);
++		nfs_retry_commit(&pages, freeme, cinfo, i);
+ 		pnfs_put_lseg(freeme);
++		spin_lock(cinfo->lock);
+ 	}
++	spin_unlock(cinfo->lock);
+ }
+ 
+ static unsigned int
+@@ -216,10 +221,6 @@ pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
+ 		if (!data)
+ 			break;
+ 		data->ds_commit_index = i;
+-		spin_lock(cinfo->lock);
+-		data->lseg = bucket->clseg;
+-		bucket->clseg = NULL;
+-		spin_unlock(cinfo->lock);
+ 		list_add(&data->pages, list);
+ 		nreq++;
+ 	}
+@@ -229,6 +230,47 @@ pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
+ 	return nreq;
+ }
+ 
++static inline
++void pnfs_fetch_commit_bucket_list(struct list_head *pages,
++		struct nfs_commit_data *data,
++		struct nfs_commit_info *cinfo)
++{
++	struct pnfs_commit_bucket *bucket;
++
++	bucket = &cinfo->ds->buckets[data->ds_commit_index];
++	spin_lock(cinfo->lock);
++	list_splice_init(pages, &bucket->committing);
++	data->lseg = bucket->clseg;
++	bucket->clseg = NULL;
++	spin_unlock(cinfo->lock);
++
++}
++
++/* Helper function for pnfs_generic_commit_pagelist to catch an empty
++ * page list. This can happen when two commits race.
++ *
++ * This must be called instead of nfs_init_commit - call one or the other, but
++ * not both!
++ */
++static bool
++pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
++					  struct nfs_commit_data *data,
++					  struct nfs_commit_info *cinfo)
++{
++	if (list_empty(pages)) {
++		if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
++			wake_up_atomic_t(&cinfo->mds->rpcs_out);
++		/* don't call nfs_commitdata_release - it tries to put
++		 * the open_context which is not acquired until nfs_init_commit
++		 * which has not been called on @data */
++		WARN_ON_ONCE(data->context);
++		nfs_commit_free(data);
++		return true;
++	}
++
++	return false;
++}
++
+ /* This follows nfs_commit_list pretty closely */
+ int
+ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+@@ -243,7 +285,7 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ 	if (!list_empty(mds_pages)) {
+ 		data = nfs_commitdata_alloc();
+ 		if (data != NULL) {
+-			data->lseg = NULL;
++			data->ds_commit_index = -1;
+ 			list_add(&data->pages, &list);
+ 			nreq++;
+ 		} else {
+@@ -265,19 +307,27 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
+ 
+ 	list_for_each_entry_safe(data, tmp, &list, pages) {
+ 		list_del_init(&data->pages);
+-		if (!data->lseg) {
++		if (data->ds_commit_index < 0) {
++			/* another commit raced with us */
++			if (pnfs_generic_commit_cancel_empty_pagelist(mds_pages,
++				data, cinfo))
++				continue;
++
+ 			nfs_init_commit(data, mds_pages, NULL, cinfo);
+ 			nfs_initiate_commit(NFS_CLIENT(inode), data,
+ 					    NFS_PROTO(data->inode),
+ 					    data->mds_ops, how, 0);
+ 		} else {
+-			struct pnfs_commit_bucket *buckets;
++			LIST_HEAD(pages);
++
++			pnfs_fetch_commit_bucket_list(&pages, data, cinfo);
++
++			/* another commit raced with us */
++			if (pnfs_generic_commit_cancel_empty_pagelist(&pages,
++				data, cinfo))
++				continue;
+ 
+-			buckets = cinfo->ds->buckets;
+-			nfs_init_commit(data,
+-					&buckets[data->ds_commit_index].committing,
+-					data->lseg,
+-					cinfo);
++			nfs_init_commit(data, &pages, data->lseg, cinfo);
+ 			initiate_commit(data, how);
+ 		}
+ 	}
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index d9851a6a2813..f98cd9adbc0d 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1653,6 +1653,10 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
+ {
+ 	struct nfs_commit_data	*data;
+ 
++	/* another commit raced with us */
++	if (list_empty(head))
++		return 0;
++
+ 	data = nfs_commitdata_alloc();
+ 
+ 	if (!data)
+diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
+index 5694cfb7a47b..29c4bff1e6e1 100644
+--- a/fs/nfsd/nfs4callback.c
++++ b/fs/nfsd/nfs4callback.c
+@@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
+ 	}
+ }
+ 
+-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
+-{
+-	struct rpc_xprt *xprt;
+-
+-	if (args->protocol != XPRT_TRANSPORT_BC_TCP)
+-		return rpc_create(args);
+-
+-	xprt = args->bc_xprt->xpt_bc_xprt;
+-	if (xprt) {
+-		xprt_get(xprt);
+-		return rpc_create_xprt(args, xprt);
+-	}
+-
+-	return rpc_create(args);
+-}
+-
+ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
+ {
+ 	int maxtime = max_cb_time(clp->net);
+@@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
+ 		args.authflavor = ses->se_cb_sec.flavor;
+ 	}
+ 	/* Create RPC client */
+-	client = create_backchannel_client(&args);
++	client = rpc_create(&args);
+ 	if (IS_ERR(client)) {
+ 		dprintk("NFSD: couldn't create callback client: %ld\n",
+ 			PTR_ERR(client));
+diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
+index 69bd801afb53..37e49cb2ac4c 100644
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -443,7 +443,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
+ 	if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
+ 		return 0;
+ 	bytes = le16_to_cpu(sbp->s_bytes);
+-	if (bytes > BLOCK_SIZE)
++	if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
+ 		return 0;
+ 	crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
+ 		       sumoff);
+diff --git a/fs/pipe.c b/fs/pipe.c
+index 8865f7963700..5916c19dbb02 100644
+--- a/fs/pipe.c
++++ b/fs/pipe.c
+@@ -38,6 +38,12 @@ unsigned int pipe_max_size = 1048576;
+  */
+ unsigned int pipe_min_size = PAGE_SIZE;
+ 
++/* Maximum allocatable pages per user. Hard limit is unset by default, soft
++ * matches default values.
++ */
++unsigned long pipe_user_pages_hard;
++unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
++
+ /*
+  * We use a start+len construction, which provides full use of the 
+  * allocated memory.
+@@ -584,20 +590,49 @@ pipe_fasync(int fd, struct file *filp, int on)
+ 	return retval;
+ }
+ 
++static void account_pipe_buffers(struct pipe_inode_info *pipe,
++                                 unsigned long old, unsigned long new)
++{
++	atomic_long_add(new - old, &pipe->user->pipe_bufs);
++}
++
++static bool too_many_pipe_buffers_soft(struct user_struct *user)
++{
++	return pipe_user_pages_soft &&
++	       atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_soft;
++}
++
++static bool too_many_pipe_buffers_hard(struct user_struct *user)
++{
++	return pipe_user_pages_hard &&
++	       atomic_long_read(&user->pipe_bufs) >= pipe_user_pages_hard;
++}
++
+ struct pipe_inode_info *alloc_pipe_info(void)
+ {
+ 	struct pipe_inode_info *pipe;
+ 
+ 	pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
+ 	if (pipe) {
+-		pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
++		unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
++		struct user_struct *user = get_current_user();
++
++		if (!too_many_pipe_buffers_hard(user)) {
++			if (too_many_pipe_buffers_soft(user))
++				pipe_bufs = 1;
++			pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
++		}
++
+ 		if (pipe->bufs) {
+ 			init_waitqueue_head(&pipe->wait);
+ 			pipe->r_counter = pipe->w_counter = 1;
+-			pipe->buffers = PIPE_DEF_BUFFERS;
++			pipe->buffers = pipe_bufs;
++			pipe->user = user;
++			account_pipe_buffers(pipe, 0, pipe_bufs);
+ 			mutex_init(&pipe->mutex);
+ 			return pipe;
+ 		}
++		free_uid(user);
+ 		kfree(pipe);
+ 	}
+ 
+@@ -608,6 +643,8 @@ void free_pipe_info(struct pipe_inode_info *pipe)
+ {
+ 	int i;
+ 
++	account_pipe_buffers(pipe, pipe->buffers, 0);
++	free_uid(pipe->user);
+ 	for (i = 0; i < pipe->buffers; i++) {
+ 		struct pipe_buffer *buf = pipe->bufs + i;
+ 		if (buf->ops)
+@@ -996,6 +1033,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
+ 			memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
+ 	}
+ 
++	account_pipe_buffers(pipe, pipe->buffers, nr_pages);
+ 	pipe->curbuf = 0;
+ 	kfree(pipe->bufs);
+ 	pipe->bufs = bufs;
+@@ -1067,6 +1105,11 @@ long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
+ 			ret = -EPERM;
+ 			goto out;
++		} else if ((too_many_pipe_buffers_hard(pipe->user) ||
++			    too_many_pipe_buffers_soft(pipe->user)) &&
++		           !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
++			ret = -EPERM;
++			goto out;
+ 		}
+ 		ret = pipe_set_size(pipe, nr_pages);
+ 		break;
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 68d51ed1666f..239dca3fb676 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -759,7 +759,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
+ 	int ret = 0;
+ 	struct mm_struct *mm = file->private_data;
+ 
+-	if (!mm)
++	/* Ensure the process spawned far enough to have an environment. */
++	if (!mm || !mm->env_end)
+ 		return 0;
+ 
+ 	page = (char *)__get_free_page(GFP_TEMPORARY);
+diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
+index 35efc103c39c..75e9b2db14ab 100644
+--- a/fs/ubifs/file.c
++++ b/fs/ubifs/file.c
+@@ -53,6 +53,7 @@
+ #include <linux/mount.h>
+ #include <linux/namei.h>
+ #include <linux/slab.h>
++#include <linux/migrate.h>
+ 
+ static int read_block(struct inode *inode, void *addr, unsigned int block,
+ 		      struct ubifs_data_node *dn)
+@@ -1420,6 +1421,26 @@ static int ubifs_set_page_dirty(struct page *page)
+ 	return ret;
+ }
+ 
++#ifdef CONFIG_MIGRATION
++static int ubifs_migrate_page(struct address_space *mapping,
++		struct page *newpage, struct page *page, enum migrate_mode mode)
++{
++	int rc;
++
++	rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
++	if (rc != MIGRATEPAGE_SUCCESS)
++		return rc;
++
++	if (PagePrivate(page)) {
++		ClearPagePrivate(page);
++		SetPagePrivate(newpage);
++	}
++
++	migrate_page_copy(newpage, page);
++	return MIGRATEPAGE_SUCCESS;
++}
++#endif
++
+ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
+ {
+ 	/*
+@@ -1556,6 +1577,9 @@ const struct address_space_operations ubifs_file_address_operations = {
+ 	.write_end      = ubifs_write_end,
+ 	.invalidatepage = ubifs_invalidatepage,
+ 	.set_page_dirty = ubifs_set_page_dirty,
++#ifdef CONFIG_MIGRATION
++	.migratepage	= ubifs_migrate_page,
++#endif
+ 	.releasepage    = ubifs_releasepage,
+ };
+ 
+diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
+index 516162be1398..e1b1c8278294 100644
+--- a/fs/xfs/libxfs/xfs_alloc.c
++++ b/fs/xfs/libxfs/xfs_alloc.c
+@@ -519,6 +519,7 @@ xfs_agfl_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_agfl_buf_ops = {
++	.name = "xfs_agfl",
+ 	.verify_read = xfs_agfl_read_verify,
+ 	.verify_write = xfs_agfl_write_verify,
+ };
+@@ -2276,6 +2277,7 @@ xfs_agf_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_agf_buf_ops = {
++	.name = "xfs_agf",
+ 	.verify_read = xfs_agf_read_verify,
+ 	.verify_write = xfs_agf_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
+index 59d521c09a17..13629ad8a60c 100644
+--- a/fs/xfs/libxfs/xfs_alloc_btree.c
++++ b/fs/xfs/libxfs/xfs_alloc_btree.c
+@@ -379,6 +379,7 @@ xfs_allocbt_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_allocbt_buf_ops = {
++	.name = "xfs_allocbt",
+ 	.verify_read = xfs_allocbt_read_verify,
+ 	.verify_write = xfs_allocbt_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
+index e9d401ce93bb..0532561a6010 100644
+--- a/fs/xfs/libxfs/xfs_attr_leaf.c
++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
+@@ -325,6 +325,7 @@ xfs_attr3_leaf_read_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
++	.name = "xfs_attr3_leaf",
+ 	.verify_read = xfs_attr3_leaf_read_verify,
+ 	.verify_write = xfs_attr3_leaf_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
+index dd714037c322..c3db53d1bdb3 100644
+--- a/fs/xfs/libxfs/xfs_attr_remote.c
++++ b/fs/xfs/libxfs/xfs_attr_remote.c
+@@ -201,6 +201,7 @@ xfs_attr3_rmt_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
++	.name = "xfs_attr3_rmt",
+ 	.verify_read = xfs_attr3_rmt_read_verify,
+ 	.verify_write = xfs_attr3_rmt_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
+index 2c44c8e50782..225f2a8c0436 100644
+--- a/fs/xfs/libxfs/xfs_bmap_btree.c
++++ b/fs/xfs/libxfs/xfs_bmap_btree.c
+@@ -719,6 +719,7 @@ xfs_bmbt_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_bmbt_buf_ops = {
++	.name = "xfs_bmbt",
+ 	.verify_read = xfs_bmbt_read_verify,
+ 	.verify_write = xfs_bmbt_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
+index 2385f8cd08ab..5d1827056efb 100644
+--- a/fs/xfs/libxfs/xfs_da_btree.c
++++ b/fs/xfs/libxfs/xfs_da_btree.c
+@@ -241,6 +241,7 @@ xfs_da3_node_read_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_da3_node_buf_ops = {
++	.name = "xfs_da3_node",
+ 	.verify_read = xfs_da3_node_read_verify,
+ 	.verify_write = xfs_da3_node_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
+index 9354e190b82e..a02ee011c8da 100644
+--- a/fs/xfs/libxfs/xfs_dir2_block.c
++++ b/fs/xfs/libxfs/xfs_dir2_block.c
+@@ -120,6 +120,7 @@ xfs_dir3_block_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_dir3_block_buf_ops = {
++	.name = "xfs_dir3_block",
+ 	.verify_read = xfs_dir3_block_read_verify,
+ 	.verify_write = xfs_dir3_block_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
+index 534bbf283d6b..e020a2c3a73f 100644
+--- a/fs/xfs/libxfs/xfs_dir2_data.c
++++ b/fs/xfs/libxfs/xfs_dir2_data.c
+@@ -302,11 +302,13 @@ xfs_dir3_data_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_dir3_data_buf_ops = {
++	.name = "xfs_dir3_data",
+ 	.verify_read = xfs_dir3_data_read_verify,
+ 	.verify_write = xfs_dir3_data_write_verify,
+ };
+ 
+ static const struct xfs_buf_ops xfs_dir3_data_reada_buf_ops = {
++	.name = "xfs_dir3_data_reada",
+ 	.verify_read = xfs_dir3_data_reada_verify,
+ 	.verify_write = xfs_dir3_data_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
+index 106119955400..eb66ae07428a 100644
+--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
++++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
+@@ -242,11 +242,13 @@ xfs_dir3_leafn_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops = {
++	.name = "xfs_dir3_leaf1",
+ 	.verify_read = xfs_dir3_leaf1_read_verify,
+ 	.verify_write = xfs_dir3_leaf1_write_verify,
+ };
+ 
+ const struct xfs_buf_ops xfs_dir3_leafn_buf_ops = {
++	.name = "xfs_dir3_leafn",
+ 	.verify_read = xfs_dir3_leafn_read_verify,
+ 	.verify_write = xfs_dir3_leafn_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
+index 06bb4218b362..f6e591edbb98 100644
+--- a/fs/xfs/libxfs/xfs_dir2_node.c
++++ b/fs/xfs/libxfs/xfs_dir2_node.c
+@@ -147,6 +147,7 @@ xfs_dir3_free_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_dir3_free_buf_ops = {
++	.name = "xfs_dir3_free",
+ 	.verify_read = xfs_dir3_free_read_verify,
+ 	.verify_write = xfs_dir3_free_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
+index 48aff071591d..f48c3040c9ce 100644
+--- a/fs/xfs/libxfs/xfs_dquot_buf.c
++++ b/fs/xfs/libxfs/xfs_dquot_buf.c
+@@ -301,6 +301,7 @@ xfs_dquot_buf_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_dquot_buf_ops = {
++	.name = "xfs_dquot",
+ 	.verify_read = xfs_dquot_buf_read_verify,
+ 	.verify_write = xfs_dquot_buf_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
+index 1c9e75521250..fe20c2670f6c 100644
+--- a/fs/xfs/libxfs/xfs_ialloc.c
++++ b/fs/xfs/libxfs/xfs_ialloc.c
+@@ -2117,6 +2117,7 @@ xfs_agi_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_agi_buf_ops = {
++	.name = "xfs_agi",
+ 	.verify_read = xfs_agi_read_verify,
+ 	.verify_write = xfs_agi_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
+index 964c465ca69c..216a6f0997f6 100644
+--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
++++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
+@@ -295,6 +295,7 @@ xfs_inobt_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_inobt_buf_ops = {
++	.name = "xfs_inobt",
+ 	.verify_read = xfs_inobt_read_verify,
+ 	.verify_write = xfs_inobt_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
+index 7da6d0b2c2ed..a217176fde65 100644
+--- a/fs/xfs/libxfs/xfs_inode_buf.c
++++ b/fs/xfs/libxfs/xfs_inode_buf.c
+@@ -138,11 +138,13 @@ xfs_inode_buf_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_inode_buf_ops = {
++	.name = "xfs_inode",
+ 	.verify_read = xfs_inode_buf_read_verify,
+ 	.verify_write = xfs_inode_buf_write_verify,
+ };
+ 
+ const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
++	.name = "xxfs_inode_ra",
+ 	.verify_read = xfs_inode_buf_readahead_verify,
+ 	.verify_write = xfs_inode_buf_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
+index dc4bfc5d88fc..535bd843f2f4 100644
+--- a/fs/xfs/libxfs/xfs_sb.c
++++ b/fs/xfs/libxfs/xfs_sb.c
+@@ -637,11 +637,13 @@ xfs_sb_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_sb_buf_ops = {
++	.name = "xfs_sb",
+ 	.verify_read = xfs_sb_read_verify,
+ 	.verify_write = xfs_sb_write_verify,
+ };
+ 
+ const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
++	.name = "xfs_sb_quiet",
+ 	.verify_read = xfs_sb_quiet_read_verify,
+ 	.verify_write = xfs_sb_write_verify,
+ };
+diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
+index e7e26bd6468f..4caff91ced51 100644
+--- a/fs/xfs/libxfs/xfs_symlink_remote.c
++++ b/fs/xfs/libxfs/xfs_symlink_remote.c
+@@ -164,6 +164,7 @@ xfs_symlink_write_verify(
+ }
+ 
+ const struct xfs_buf_ops xfs_symlink_buf_ops = {
++	.name = "xfs_symlink",
+ 	.verify_read = xfs_symlink_read_verify,
+ 	.verify_write = xfs_symlink_write_verify,
+ };
+diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
+index 75ff5d5a7d2e..110cb85e04f3 100644
+--- a/fs/xfs/xfs_buf.h
++++ b/fs/xfs/xfs_buf.h
+@@ -131,6 +131,7 @@ struct xfs_buf_map {
+ 	struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
+ 
+ struct xfs_buf_ops {
++	char *name;
+ 	void (*verify_read)(struct xfs_buf *);
+ 	void (*verify_write)(struct xfs_buf *);
+ };
+diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
+index 338e50bbfd1e..63db1cc2091a 100644
+--- a/fs/xfs/xfs_error.c
++++ b/fs/xfs/xfs_error.c
+@@ -164,9 +164,9 @@ xfs_verifier_error(
+ {
+ 	struct xfs_mount *mp = bp->b_target->bt_mount;
+ 
+-	xfs_alert(mp, "Metadata %s detected at %pF, block 0x%llx",
++	xfs_alert(mp, "Metadata %s detected at %pF, %s block 0x%llx",
+ 		  bp->b_error == -EFSBADCRC ? "CRC error" : "corruption",
+-		  __return_address, bp->b_bn);
++		  __return_address, bp->b_ops->name, bp->b_bn);
+ 
+ 	xfs_alert(mp, "Unmount and run xfs_repair");
+ 
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index 51cc1deb7af3..30c8971bc615 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -127,6 +127,8 @@ struct dm_dev {
+ 	char name[16];
+ };
+ 
++dev_t dm_get_dev_t(const char *path);
++
+ /*
+  * Constructors should call these functions to ensure destination devices
+  * are opened/closed correctly.
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 05b9a694e213..6c86c7edafa7 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -265,6 +265,7 @@ struct header_ops {
+ 	void	(*cache_update)(struct hh_cache *hh,
+ 				const struct net_device *dev,
+ 				const unsigned char *haddr);
++	bool	(*validate)(const char *ll_header, unsigned int len);
+ };
+ 
+ /* These flag bits are private to the generic network queueing
+@@ -1372,7 +1373,7 @@ enum netdev_priv_flags {
+  *	@dma:		DMA channel
+  *	@mtu:		Interface MTU value
+  *	@type:		Interface hardware type
+- *	@hard_header_len: Hardware header length
++ *	@hard_header_len: Maximum hardware header length.
+  *
+  *	@needed_headroom: Extra headroom the hardware may need, but not in all
+  *			  cases can this be guaranteed
+@@ -2416,6 +2417,24 @@ static inline int dev_parse_header(const struct sk_buff *skb,
+ 	return dev->header_ops->parse(skb, haddr);
+ }
+ 
++/* ll_header must have at least hard_header_len allocated */
++static inline bool dev_validate_header(const struct net_device *dev,
++				       char *ll_header, int len)
++{
++	if (likely(len >= dev->hard_header_len))
++		return true;
++
++	if (capable(CAP_SYS_RAWIO)) {
++		memset(ll_header + len, 0, dev->hard_header_len - len);
++		return true;
++	}
++
++	if (dev->header_ops && dev->header_ops->validate)
++		return dev->header_ops->validate(ll_header, len);
++
++	return false;
++}
++
+ typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
+ int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
+ static inline int unregister_gifconf(unsigned int family)
+diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
+index a3e215bb0241..7741efa43b35 100644
+--- a/include/linux/netfilter/x_tables.h
++++ b/include/linux/netfilter/x_tables.h
+@@ -239,11 +239,18 @@ void xt_unregister_match(struct xt_match *target);
+ int xt_register_matches(struct xt_match *match, unsigned int n);
+ void xt_unregister_matches(struct xt_match *match, unsigned int n);
+ 
++int xt_check_entry_offsets(const void *base, const char *elems,
++			   unsigned int target_offset,
++			   unsigned int next_offset);
++
+ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
+ 		   bool inv_proto);
+ int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
+ 		    bool inv_proto);
+ 
++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
++				 struct xt_counters_info *info, bool compat);
++
+ struct xt_table *xt_register_table(struct net *net,
+ 				   const struct xt_table *table,
+ 				   struct xt_table_info *bootstrap,
+@@ -421,7 +428,7 @@ void xt_compat_init_offsets(u_int8_t af, unsigned int number);
+ int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
+ 
+ int xt_compat_match_offset(const struct xt_match *match);
+-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ 			      unsigned int *size);
+ int xt_compat_match_to_user(const struct xt_entry_match *m,
+ 			    void __user **dstptr, unsigned int *size);
+@@ -431,6 +438,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ 				unsigned int *size);
+ int xt_compat_target_to_user(const struct xt_entry_target *t,
+ 			     void __user **dstptr, unsigned int *size);
++int xt_compat_check_entry_offsets(const void *base, const char *elems,
++				  unsigned int target_offset,
++				  unsigned int next_offset);
+ 
+ #endif /* CONFIG_COMPAT */
+ #endif /* _X_TABLES_H */
+diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
+index eb8b8ac6df3c..24f5470d3944 100644
+--- a/include/linux/pipe_fs_i.h
++++ b/include/linux/pipe_fs_i.h
+@@ -42,6 +42,7 @@ struct pipe_buffer {
+  *	@fasync_readers: reader side fasync
+  *	@fasync_writers: writer side fasync
+  *	@bufs: the circular array of pipe buffers
++ *	@user: the user who created this pipe
+  **/
+ struct pipe_inode_info {
+ 	struct mutex mutex;
+@@ -57,6 +58,7 @@ struct pipe_inode_info {
+ 	struct fasync_struct *fasync_readers;
+ 	struct fasync_struct *fasync_writers;
+ 	struct pipe_buffer *bufs;
++	struct user_struct *user;
+ };
+ 
+ /*
+@@ -123,6 +125,8 @@ void pipe_unlock(struct pipe_inode_info *);
+ void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
+ 
+ extern unsigned int pipe_max_size, pipe_min_size;
++extern unsigned long pipe_user_pages_hard;
++extern unsigned long pipe_user_pages_soft;
+ int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
+ 
+ 
+diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
+index 5e0bc779e6c5..33f88b4479e4 100644
+--- a/include/linux/platform_data/asoc-s3c.h
++++ b/include/linux/platform_data/asoc-s3c.h
+@@ -39,6 +39,10 @@ struct samsung_i2s {
+  */
+ struct s3c_audio_pdata {
+ 	int (*cfg_gpio)(struct platform_device *);
++	void *dma_playback;
++	void *dma_capture;
++	void *dma_play_sec;
++	void *dma_capture_mic;
+ 	union {
+ 		struct samsung_i2s i2s;
+ 	} type;
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 9128b4e9f541..9e39deaeddd6 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -803,6 +803,7 @@ struct user_struct {
+ #endif
+ 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
+ 	unsigned long unix_inflight;	/* How many files in flight in unix sockets */
++	atomic_long_t pipe_bufs;  /* how many pages are allocated in pipe buffers */
+ 
+ #ifdef CONFIG_KEYS
+ 	struct key *uid_keyring;	/* UID specific keyring */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 6633b0cd3fb9..ca2e26a486ee 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1781,6 +1781,30 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
+ 	skb->tail += len;
+ }
+ 
++/**
++ *	skb_tailroom_reserve - adjust reserved_tailroom
++ *	@skb: buffer to alter
++ *	@mtu: maximum amount of headlen permitted
++ *	@needed_tailroom: minimum amount of reserved_tailroom
++ *
++ *	Set reserved_tailroom so that headlen can be as large as possible but
++ *	not larger than mtu and tailroom cannot be smaller than
++ *	needed_tailroom.
++ *	The required headroom should already have been reserved before using
++ *	this function.
++ */
++static inline void skb_tailroom_reserve(struct sk_buff *skb, unsigned int mtu,
++					unsigned int needed_tailroom)
++{
++	SKB_LINEAR_ASSERT(skb);
++	if (mtu < skb_tailroom(skb) - needed_tailroom)
++		/* use at most mtu */
++		skb->reserved_tailroom = skb_tailroom(skb) - mtu;
++	else
++		/* use up to all available space */
++		skb->reserved_tailroom = needed_tailroom;
++}
++
+ #define ENCAP_TYPE_ETHER	0
+ #define ENCAP_TYPE_IPPROTO	1
+ 
+diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
+index 598ba80ec30c..ee29cb43470f 100644
+--- a/include/linux/sunrpc/clnt.h
++++ b/include/linux/sunrpc/clnt.h
+@@ -134,8 +134,6 @@ struct rpc_create_args {
+ #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT	(1UL << 9)
+ 
+ struct rpc_clnt *rpc_create(struct rpc_create_args *args);
+-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+-					struct rpc_xprt *xprt);
+ struct rpc_clnt	*rpc_bind_new_program(struct rpc_clnt *,
+ 				const struct rpc_program *, u32);
+ void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
+diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
+index 966889a20ea3..e479033bd782 100644
+--- a/include/linux/usb/ehci_def.h
++++ b/include/linux/usb/ehci_def.h
+@@ -180,11 +180,11 @@ struct ehci_regs {
+  * PORTSCx
+  */
+ 	/* HOSTPC: offset 0x84 */
+-	u32		hostpc[1];	/* HOSTPC extension */
++	u32		hostpc[0];	/* HOSTPC extension */
+ #define HOSTPC_PHCD	(1<<22)		/* Phy clock disable */
+ #define HOSTPC_PSPD	(3<<25)		/* Port speed detection */
+ 
+-	u32		reserved5[16];
++	u32		reserved5[17];
+ 
+ 	/* USBMODE_EX: offset 0xc8 */
+ 	u32		usbmode_ex;	/* USB Device mode extension */
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 78ed135e9dea..5cba8f3c3fe4 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -211,6 +211,7 @@ struct bonding {
+ 	 * ALB mode (6) - to sync the use and modifications of its hash table
+ 	 */
+ 	spinlock_t mode_lock;
++	spinlock_t stats_lock;
+ 	u8	 send_peer_notif;
+ 	u8       igmp_retrans;
+ #ifdef CONFIG_PROC_FS
+diff --git a/include/net/codel.h b/include/net/codel.h
+index 1e18005f7f65..0ee76108e741 100644
+--- a/include/net/codel.h
++++ b/include/net/codel.h
+@@ -160,11 +160,13 @@ struct codel_vars {
+  * struct codel_stats - contains codel shared variables and stats
+  * @maxpacket:	largest packet we've seen so far
+  * @drop_count:	temp count of dropped packets in dequeue()
++ * @drop_len:	bytes of dropped packets in dequeue()
+  * ecn_mark:	number of packets we ECN marked instead of dropping
+  */
+ struct codel_stats {
+ 	u32		maxpacket;
+ 	u32		drop_count;
++	u32		drop_len;
+ 	u32		ecn_mark;
+ };
+ 
+@@ -301,6 +303,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ 								  vars->rec_inv_sqrt);
+ 					goto end;
+ 				}
++				stats->drop_len += qdisc_pkt_len(skb);
+ 				qdisc_drop(skb, sch);
+ 				stats->drop_count++;
+ 				skb = dequeue_func(vars, sch);
+@@ -323,6 +326,7 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch,
+ 		if (params->ecn && INET_ECN_set_ce(skb)) {
+ 			stats->ecn_mark++;
+ 		} else {
++			stats->drop_len += qdisc_pkt_len(skb);
+ 			qdisc_drop(skb, sch);
+ 			stats->drop_count++;
+ 
+diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
+index 4e3731ee4eac..cd8594528d4b 100644
+--- a/include/net/ip_vs.h
++++ b/include/net/ip_vs.h
+@@ -1584,6 +1584,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
+ }
+ #endif /* CONFIG_IP_VS_NFCT */
+ 
++/* Really using conntrack? */
++static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
++					     struct sk_buff *skb)
++{
++#ifdef CONFIG_IP_VS_NFCT
++	enum ip_conntrack_info ctinfo;
++	struct nf_conn *ct;
++
++	if (!(cp->flags & IP_VS_CONN_F_NFCT))
++		return false;
++	ct = nf_ct_get(skb, &ctinfo);
++	if (ct && !nf_ct_is_untracked(ct))
++		return true;
++#endif
++	return false;
++}
++
+ static inline int
+ ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
+ {
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 080b657ef8fb..530bdca19803 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -395,7 +395,8 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ 			      struct Qdisc *qdisc);
+ void qdisc_reset(struct Qdisc *qdisc);
+ void qdisc_destroy(struct Qdisc *qdisc);
+-void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
++void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
++			       unsigned int len);
+ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ 			  const struct Qdisc_ops *ops);
+ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
+@@ -691,6 +692,23 @@ static inline void qdisc_reset_queue(struct Qdisc *sch)
+ 	sch->qstats.backlog = 0;
+ }
+ 
++static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
++					  struct Qdisc **pold)
++{
++	struct Qdisc *old;
++
++	sch_tree_lock(sch);
++	old = *pold;
++	*pold = new;
++	if (old != NULL) {
++		qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
++		qdisc_reset(old);
++	}
++	sch_tree_unlock(sch);
++
++	return old;
++}
++
+ static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
+ 					      struct sk_buff_head *list)
+ {
+diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
+index df705908480a..2f48d648e051 100644
+--- a/include/sound/hda_regmap.h
++++ b/include/sound/hda_regmap.h
+@@ -17,6 +17,8 @@ int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
+ 				    unsigned int verb);
+ int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
+ 			     unsigned int *val);
++int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
++				      unsigned int reg, unsigned int *val);
+ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
+ 			      unsigned int val);
+ int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 6582410a71c7..efd143dcedf1 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1227,6 +1227,7 @@ static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn)
+ 	}
+ 
+ 	if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
++	    BPF_SIZE(insn->code) == BPF_DW ||
+ 	    (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
+ 		verbose("BPF_LD_ABS uses reserved fields\n");
+ 		return -EINVAL;
+@@ -1864,7 +1865,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
+ 			if (IS_ERR(map)) {
+ 				verbose("fd %d is not pointing to valid bpf_map\n",
+ 					insn->imm);
+-				fdput(f);
+ 				return PTR_ERR(map);
+ 			}
+ 
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 359da3abb004..3abce1e0f910 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -4378,14 +4378,15 @@ static void css_free_work_fn(struct work_struct *work)
+ 
+ 	if (ss) {
+ 		/* css free path */
++		struct cgroup_subsys_state *parent = css->parent;
+ 		int id = css->id;
+ 
+-		if (css->parent)
+-			css_put(css->parent);
+-
+ 		ss->css_free(css);
+ 		cgroup_idr_remove(&ss->css_idr, id);
+ 		cgroup_put(cgrp);
++
++		if (parent)
++			css_put(parent);
+ 	} else {
+ 		/* cgroup free path */
+ 		atomic_dec(&cgrp->root->nr_cgrps);
+@@ -4478,6 +4479,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
+ 	memset(css, 0, sizeof(*css));
+ 	css->cgroup = cgrp;
+ 	css->ss = ss;
++	css->id = -1;
+ 	INIT_LIST_HEAD(&css->sibling);
+ 	INIT_LIST_HEAD(&css->children);
+ 	css->serial_nr = css_serial_nr_next++;
+@@ -4563,7 +4565,7 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
+ 
+ 	err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
+ 	if (err < 0)
+-		goto err_free_percpu_ref;
++		goto err_free_css;
+ 	css->id = err;
+ 
+ 	if (visible) {
+@@ -4595,9 +4597,6 @@ err_list_del:
+ 	list_del_rcu(&css->sibling);
+ 	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
+ err_free_id:
+-	cgroup_idr_remove(&ss->css_idr, css->id);
+-err_free_percpu_ref:
+-	percpu_ref_exit(&css->refcnt);
+ err_free_css:
+ 	call_rcu(&css->rcu_head, css_free_rcu_fn);
+ 	return err;
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 46b168e19c98..2214b70f1910 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1381,8 +1381,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
+ 	if (likely(&hb1->chain != &hb2->chain)) {
+ 		plist_del(&q->list, &hb1->chain);
+ 		hb_waiters_dec(hb1);
+-		plist_add(&q->list, &hb2->chain);
+ 		hb_waiters_inc(hb2);
++		plist_add(&q->list, &hb2->chain);
+ 		q->lock_ptr = &hb2->lock;
+ 	}
+ 	get_futex_key_refs(key2);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 3b0f4c09ab92..98e607121d09 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4568,14 +4568,16 @@ void show_state_filter(unsigned long state_filter)
+ 		/*
+ 		 * reset the NMI-timeout, listing all files on a slow
+ 		 * console might take a lot of time:
++		 * Also, reset softlockup watchdogs on all CPUs, because
++		 * another CPU might be blocked waiting for us to process
++		 * an IPI.
+ 		 */
+ 		touch_nmi_watchdog();
++		touch_all_softlockup_watchdogs();
+ 		if (!state_filter || (p->state & state_filter))
+ 			sched_show_task(p);
+ 	}
+ 
+-	touch_all_softlockup_watchdogs();
+-
+ #ifdef CONFIG_SCHED_DEBUG
+ 	sysrq_sched_debug_show();
+ #endif
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index c3eee4c6d6c1..7d4900404c94 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1695,6 +1695,20 @@ static struct ctl_table fs_table[] = {
+ 		.proc_handler	= &pipe_proc_fn,
+ 		.extra1		= &pipe_min_size,
+ 	},
++	{
++		.procname	= "pipe-user-pages-hard",
++		.data		= &pipe_user_pages_hard,
++		.maxlen		= sizeof(pipe_user_pages_hard),
++		.mode		= 0644,
++		.proc_handler	= proc_doulongvec_minmax,
++	},
++	{
++		.procname	= "pipe-user-pages-soft",
++		.data		= &pipe_user_pages_soft,
++		.maxlen		= sizeof(pipe_user_pages_soft),
++		.mode		= 0644,
++		.proc_handler	= proc_doulongvec_minmax,
++	},
+ 	{ }
+ };
+ 
+diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
+index 6d6c0411cbe8..fc0c74f18288 100644
+--- a/kernel/trace/trace_printk.c
++++ b/kernel/trace/trace_printk.c
+@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
+ static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
+ {
+ 	struct trace_bprintk_fmt *pos;
++
++	if (!fmt)
++		return ERR_PTR(-EINVAL);
++
+ 	list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
+ 		if (!strcmp(pos->fmt, fmt))
+ 			return pos;
+@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
+ 	for (iter = start; iter < end; iter++) {
+ 		struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
+ 		if (tb_fmt) {
+-			*iter = tb_fmt->fmt;
++			if (!IS_ERR(tb_fmt))
++				*iter = tb_fmt->fmt;
+ 			continue;
+ 		}
+ 
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 3dcf93cd622b..32c719a4bc3d 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -431,6 +431,24 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ 
+ 		if (!valid_page)
+ 			valid_page = page;
++
++		/*
++		 * For compound pages such as THP and hugetlbfs, we can save
++		 * potentially a lot of iterations if we skip them at once.
++		 * The check is racy, but we can consider only valid values
++		 * and the only danger is skipping too much.
++		 */
++		if (PageCompound(page)) {
++			unsigned int comp_order = compound_order(page);
++
++			if (likely(comp_order < MAX_ORDER)) {
++				blockpfn += (1UL << comp_order) - 1;
++				cursor += (1UL << comp_order) - 1;
++			}
++
++			goto isolate_fail;
++		}
++
+ 		if (!PageBuddy(page))
+ 			goto isolate_fail;
+ 
+@@ -462,25 +480,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
+ 
+ 		/* Found a free page, break it into order-0 pages */
+ 		isolated = split_free_page(page);
++		if (!isolated)
++			break;
++
+ 		total_isolated += isolated;
++		cc->nr_freepages += isolated;
+ 		for (i = 0; i < isolated; i++) {
+ 			list_add(&page->lru, freelist);
+ 			page++;
+ 		}
+-
+-		/* If a page was split, advance to the end of it */
+-		if (isolated) {
+-			cc->nr_freepages += isolated;
+-			if (!strict &&
+-				cc->nr_migratepages <= cc->nr_freepages) {
+-				blockpfn += isolated;
+-				break;
+-			}
+-
+-			blockpfn += isolated - 1;
+-			cursor += isolated - 1;
+-			continue;
++		if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
++			blockpfn += isolated;
++			break;
+ 		}
++		/* Advance to the end of split page */
++		blockpfn += isolated - 1;
++		cursor += isolated - 1;
++		continue;
+ 
+ isolate_fail:
+ 		if (strict)
+@@ -490,6 +506,16 @@ isolate_fail:
+ 
+ 	}
+ 
++	if (locked)
++		spin_unlock_irqrestore(&cc->zone->lock, flags);
++
++	/*
++	 * There is a tiny chance that we have read bogus compound_order(),
++	 * so be careful to not go outside of the pageblock.
++	 */
++	if (unlikely(blockpfn > end_pfn))
++		blockpfn = end_pfn;
++
+ 	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
+ 					nr_scanned, total_isolated);
+ 
+@@ -504,9 +530,6 @@ isolate_fail:
+ 	if (strict && blockpfn < end_pfn)
+ 		total_isolated = 0;
+ 
+-	if (locked)
+-		spin_unlock_irqrestore(&cc->zone->lock, flags);
+-
+ 	/* Update the pageblock-skip if the whole pageblock was scanned */
+ 	if (blockpfn == end_pfn)
+ 		update_pageblock_skip(cc, valid_page, total_isolated, false);
+@@ -930,6 +953,7 @@ static void isolate_freepages(struct compact_control *cc)
+ 				block_end_pfn = block_start_pfn,
+ 				block_start_pfn -= pageblock_nr_pages,
+ 				isolate_start_pfn = block_start_pfn) {
++		unsigned long isolated;
+ 
+ 		/*
+ 		 * This can iterate a massively long zone without finding any
+@@ -954,8 +978,12 @@ static void isolate_freepages(struct compact_control *cc)
+ 			continue;
+ 
+ 		/* Found a block suitable for isolating free pages from. */
+-		isolate_freepages_block(cc, &isolate_start_pfn,
+-					block_end_pfn, freelist, false);
++		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
++						block_end_pfn, freelist, false);
++		/* If isolation failed early, do not continue needlessly */
++		if (!isolated && isolate_start_pfn < block_end_pfn &&
++		    cc->nr_migratepages > cc->nr_freepages)
++			break;
+ 
+ 		/*
+ 		 * Remember where the free scanner should restart next time,
+diff --git a/mm/migrate.c b/mm/migrate.c
+index fe71f91c7b27..2599977221aa 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -389,6 +389,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
+ 
+ 	return MIGRATEPAGE_SUCCESS;
+ }
++EXPORT_SYMBOL(migrate_page_move_mapping);
+ 
+ /*
+  * The expected number of remaining references is the same as that
+@@ -549,6 +550,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
+ 	if (PageWriteback(newpage))
+ 		end_page_writeback(newpage);
+ }
++EXPORT_SYMBOL(migrate_page_copy);
+ 
+ /************************************************************
+  *                    Migration functions
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 551923097bbc..f6f6831cec52 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5926,7 +5926,7 @@ int __meminit init_per_zone_wmark_min(void)
+ 	setup_per_zone_inactive_ratio();
+ 	return 0;
+ }
+-module_init(init_per_zone_wmark_min)
++core_initcall(init_per_zone_wmark_min)
+ 
+ /*
+  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
+diff --git a/mm/page_isolation.c b/mm/page_isolation.c
+index 303c908790ef..4b640c20f5c5 100644
+--- a/mm/page_isolation.c
++++ b/mm/page_isolation.c
+@@ -300,11 +300,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private,
+ 	 * now as a simple work-around, we use the next node for destination.
+ 	 */
+ 	if (PageHuge(page)) {
+-		nodemask_t src = nodemask_of_node(page_to_nid(page));
+-		nodemask_t dst;
+-		nodes_complement(dst, src);
++		int node = next_online_node(page_to_nid(page));
++		if (node == MAX_NUMNODES)
++			node = first_online_node;
+ 		return alloc_huge_page_node(page_hstate(compound_head(page)),
+-					    next_node(page_to_nid(page), dst));
++					    node);
+ 	}
+ 
+ 	if (PageHighMem(page))
+diff --git a/mm/percpu.c b/mm/percpu.c
+index 2dd74487a0af..b97617587620 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -110,7 +110,7 @@ struct pcpu_chunk {
+ 	int			map_used;	/* # of map entries used before the sentry */
+ 	int			map_alloc;	/* # of map entries allocated */
+ 	int			*map;		/* allocation map */
+-	struct work_struct	map_extend_work;/* async ->map[] extension */
++	struct list_head	map_extend_list;/* on pcpu_map_extend_chunks */
+ 
+ 	void			*data;		/* chunk data */
+ 	int			first_free;	/* no free below this */
+@@ -160,10 +160,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
+ static int pcpu_reserved_chunk_limit;
+ 
+ static DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
+-static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop */
++static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
+ 
+ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+ 
++/* chunks which need their map areas extended, protected by pcpu_lock */
++static LIST_HEAD(pcpu_map_extend_chunks);
++
+ /*
+  * The number of empty populated pages, protected by pcpu_lock.  The
+  * reserved chunk doesn't contribute to the count.
+@@ -397,13 +400,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
+ {
+ 	int margin, new_alloc;
+ 
++	lockdep_assert_held(&pcpu_lock);
++
+ 	if (is_atomic) {
+ 		margin = 3;
+ 
+ 		if (chunk->map_alloc <
+-		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
+-		    pcpu_async_enabled)
+-			schedule_work(&chunk->map_extend_work);
++		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
++			if (list_empty(&chunk->map_extend_list)) {
++				list_add_tail(&chunk->map_extend_list,
++					      &pcpu_map_extend_chunks);
++				pcpu_schedule_balance_work();
++			}
++		}
+ 	} else {
+ 		margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
+ 	}
+@@ -437,6 +446,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
+ 	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
+ 	unsigned long flags;
+ 
++	lockdep_assert_held(&pcpu_alloc_mutex);
++
+ 	new = pcpu_mem_zalloc(new_size);
+ 	if (!new)
+ 		return -ENOMEM;
+@@ -469,20 +480,6 @@ out_unlock:
+ 	return 0;
+ }
+ 
+-static void pcpu_map_extend_workfn(struct work_struct *work)
+-{
+-	struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
+-						map_extend_work);
+-	int new_alloc;
+-
+-	spin_lock_irq(&pcpu_lock);
+-	new_alloc = pcpu_need_to_extend(chunk, false);
+-	spin_unlock_irq(&pcpu_lock);
+-
+-	if (new_alloc)
+-		pcpu_extend_area_map(chunk, new_alloc);
+-}
+-
+ /**
+  * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
+  * @chunk: chunk the candidate area belongs to
+@@ -742,7 +739,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
+ 	chunk->map_used = 1;
+ 
+ 	INIT_LIST_HEAD(&chunk->list);
+-	INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
++	INIT_LIST_HEAD(&chunk->map_extend_list);
+ 	chunk->free_size = pcpu_unit_size;
+ 	chunk->contig_hint = pcpu_unit_size;
+ 
+@@ -897,6 +894,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
+ 		return NULL;
+ 	}
+ 
++	if (!is_atomic)
++		mutex_lock(&pcpu_alloc_mutex);
++
+ 	spin_lock_irqsave(&pcpu_lock, flags);
+ 
+ 	/* serve reserved allocations from the reserved chunk if available */
+@@ -969,12 +969,9 @@ restart:
+ 	if (is_atomic)
+ 		goto fail;
+ 
+-	mutex_lock(&pcpu_alloc_mutex);
+-
+ 	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
+ 		chunk = pcpu_create_chunk();
+ 		if (!chunk) {
+-			mutex_unlock(&pcpu_alloc_mutex);
+ 			err = "failed to allocate new chunk";
+ 			goto fail;
+ 		}
+@@ -985,7 +982,6 @@ restart:
+ 		spin_lock_irqsave(&pcpu_lock, flags);
+ 	}
+ 
+-	mutex_unlock(&pcpu_alloc_mutex);
+ 	goto restart;
+ 
+ area_found:
+@@ -995,8 +991,6 @@ area_found:
+ 	if (!is_atomic) {
+ 		int page_start, page_end, rs, re;
+ 
+-		mutex_lock(&pcpu_alloc_mutex);
+-
+ 		page_start = PFN_DOWN(off);
+ 		page_end = PFN_UP(off + size);
+ 
+@@ -1007,7 +1001,6 @@ area_found:
+ 
+ 			spin_lock_irqsave(&pcpu_lock, flags);
+ 			if (ret) {
+-				mutex_unlock(&pcpu_alloc_mutex);
+ 				pcpu_free_area(chunk, off, &occ_pages);
+ 				err = "failed to populate";
+ 				goto fail_unlock;
+@@ -1047,6 +1040,8 @@ fail:
+ 		/* see the flag handling in pcpu_blance_workfn() */
+ 		pcpu_atomic_alloc_failed = true;
+ 		pcpu_schedule_balance_work();
++	} else {
++		mutex_unlock(&pcpu_alloc_mutex);
+ 	}
+ 	return NULL;
+ }
+@@ -1131,6 +1126,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
+ 		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
+ 			continue;
+ 
++		list_del_init(&chunk->map_extend_list);
+ 		list_move(&chunk->list, &to_free);
+ 	}
+ 
+@@ -1148,6 +1144,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
+ 		pcpu_destroy_chunk(chunk);
+ 	}
+ 
++	/* service chunks which requested async area map extension */
++	do {
++		int new_alloc = 0;
++
++		spin_lock_irq(&pcpu_lock);
++
++		chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
++					struct pcpu_chunk, map_extend_list);
++		if (chunk) {
++			list_del_init(&chunk->map_extend_list);
++			new_alloc = pcpu_need_to_extend(chunk, false);
++		}
++
++		spin_unlock_irq(&pcpu_lock);
++
++		if (new_alloc)
++			pcpu_extend_area_map(chunk, new_alloc);
++	} while (chunk);
++
+ 	/*
+ 	 * Ensure there are certain number of free populated pages for
+ 	 * atomic allocs.  Fill up from the most packed so that atomic
+@@ -1646,7 +1661,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ 	 */
+ 	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+ 	INIT_LIST_HEAD(&schunk->list);
+-	INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
++	INIT_LIST_HEAD(&schunk->map_extend_list);
+ 	schunk->base_addr = base_addr;
+ 	schunk->map = smap;
+ 	schunk->map_alloc = ARRAY_SIZE(smap);
+@@ -1676,7 +1691,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
+ 	if (dyn_size) {
+ 		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
+ 		INIT_LIST_HEAD(&dchunk->list);
+-		INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
++		INIT_LIST_HEAD(&dchunk->map_extend_list);
+ 		dchunk->base_addr = base_addr;
+ 		dchunk->map = dmap;
+ 		dchunk->map_alloc = ARRAY_SIZE(dmap);
+diff --git a/mm/shmem.c b/mm/shmem.c
+index 47d536e59fc0..46511ad90bc5 100644
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -2137,9 +2137,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
+ 									NULL);
+ 		if (error) {
+ 			/* Remove the !PageUptodate pages we added */
+-			shmem_undo_range(inode,
+-				(loff_t)start << PAGE_CACHE_SHIFT,
+-				(loff_t)index << PAGE_CACHE_SHIFT, true);
++			if (index > start) {
++				shmem_undo_range(inode,
++				    (loff_t)start << PAGE_CACHE_SHIFT,
++				    ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true);
++			}
+ 			goto undone;
+ 		}
+ 
+diff --git a/mm/swap.c b/mm/swap.c
+index a7251a8ed532..b523f0a4cbfb 100644
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -483,7 +483,7 @@ void rotate_reclaimable_page(struct page *page)
+ 		page_cache_get(page);
+ 		local_irq_save(flags);
+ 		pvec = this_cpu_ptr(&lru_rotate_pvecs);
+-		if (!pagevec_add(pvec, page))
++		if (!pagevec_add(pvec, page) || PageCompound(page))
+ 			pagevec_move_tail(pvec);
+ 		local_irq_restore(flags);
+ 	}
+@@ -539,7 +539,7 @@ void activate_page(struct page *page)
+ 		struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
+ 
+ 		page_cache_get(page);
+-		if (!pagevec_add(pvec, page))
++		if (!pagevec_add(pvec, page) || PageCompound(page))
+ 			pagevec_lru_move_fn(pvec, __activate_page, NULL);
+ 		put_cpu_var(activate_page_pvecs);
+ 	}
+@@ -631,9 +631,8 @@ static void __lru_cache_add(struct page *page)
+ 	struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+ 
+ 	page_cache_get(page);
+-	if (!pagevec_space(pvec))
++	if (!pagevec_space(pvec) || PageCompound(page))
+ 		__pagevec_lru_add(pvec);
+-	pagevec_add(pvec, page);
+ 	put_cpu_var(lru_add_pvec);
+ }
+ 
+@@ -846,7 +845,7 @@ void deactivate_file_page(struct page *page)
+ 	if (likely(get_page_unless_zero(page))) {
+ 		struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
+ 
+-		if (!pagevec_add(pvec, page))
++		if (!pagevec_add(pvec, page) || PageCompound(page))
+ 			pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
+ 		put_cpu_var(lru_deactivate_file_pvecs);
+ 	}
+diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
+index 7c646bb2c6f7..98d206b22653 100644
+--- a/net/ax25/ax25_ip.c
++++ b/net/ax25/ax25_ip.c
+@@ -229,8 +229,23 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
+ }
+ #endif
+ 
++static bool ax25_validate_header(const char *header, unsigned int len)
++{
++	ax25_digi digi;
++
++	if (!len)
++		return false;
++
++	if (header[0])
++		return true;
++
++	return ax25_addr_parse(header + 1, len - 1, NULL, NULL, &digi, NULL,
++			       NULL);
++}
++
+ const struct header_ops ax25_header_ops = {
+ 	.create = ax25_hard_header,
++	.validate = ax25_validate_header,
+ };
+ 
+ EXPORT_SYMBOL(ax25_header_ops);
+diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
+index da83982bf974..f77dafc114a6 100644
+--- a/net/batman-adv/routing.c
++++ b/net/batman-adv/routing.c
+@@ -88,6 +88,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
+ 		neigh_node = NULL;
+ 
+ 	spin_lock_bh(&orig_node->neigh_list_lock);
++	/* curr_router used earlier may not be the current orig_ifinfo->router
++	 * anymore because it was dereferenced outside of the neigh_list_lock
++	 * protected region. After the new best neighbor has replace the current
++	 * best neighbor the reference counter needs to decrease. Consequently,
++	 * the code needs to ensure the curr_router variable contains a pointer
++	 * to the replaced best neighbor.
++	 */
++	curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
++
+ 	rcu_assign_pointer(orig_ifinfo->router, neigh_node);
+ 	spin_unlock_bh(&orig_node->neigh_list_lock);
+ 	batadv_orig_ifinfo_free_ref(orig_ifinfo);
+diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
+index 3d64ed20c393..6004c2de7b2a 100644
+--- a/net/batman-adv/send.c
++++ b/net/batman-adv/send.c
+@@ -611,6 +611,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ 
+ 		if (pending) {
+ 			hlist_del(&forw_packet->list);
++			if (!forw_packet->own)
++				atomic_inc(&bat_priv->bcast_queue_left);
++
+ 			batadv_forw_packet_free(forw_packet);
+ 		}
+ 	}
+@@ -638,6 +641,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ 
+ 		if (pending) {
+ 			hlist_del(&forw_packet->list);
++			if (!forw_packet->own)
++				atomic_inc(&bat_priv->batman_queue_left);
++
+ 			batadv_forw_packet_free(forw_packet);
+ 		}
+ 	}
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index a0b1b861b968..b38a8aa3cce8 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -377,11 +377,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	 */
+ 	nf_reset(skb);
+ 
++	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
++		goto dropped;
++
+ 	vid = batadv_get_vid(skb, 0);
+ 	ethhdr = eth_hdr(skb);
+ 
+ 	switch (ntohs(ethhdr->h_proto)) {
+ 	case ETH_P_8021Q:
++		if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
++			goto dropped;
++
+ 		vhdr = (struct vlan_ethhdr *)skb->data;
+ 
+ 		if (vhdr->h_vlan_encapsulated_proto != ethertype)
+@@ -393,8 +399,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
+ 	}
+ 
+ 	/* skb->dev & skb->pkt_type are set here */
+-	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+-		goto dropped;
+ 	skb->protocol = eth_type_trans(skb, soft_iface);
+ 
+ 	/* should not be necessary anymore as we use skb_pull_rcsum()
+diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
+index 8d423bc649b9..f876f707fd9e 100644
+--- a/net/bridge/br_ioctl.c
++++ b/net/bridge/br_ioctl.c
+@@ -21,18 +21,19 @@
+ #include <asm/uaccess.h>
+ #include "br_private.h"
+ 
+-/* called with RTNL */
+ static int get_bridge_ifindices(struct net *net, int *indices, int num)
+ {
+ 	struct net_device *dev;
+ 	int i = 0;
+ 
+-	for_each_netdev(net, dev) {
++	rcu_read_lock();
++	for_each_netdev_rcu(net, dev) {
+ 		if (i >= num)
+ 			break;
+ 		if (dev->priv_flags & IFF_EBRIDGE)
+ 			indices[i++] = dev->ifindex;
+ 	}
++	rcu_read_unlock();
+ 
+ 	return i;
+ }
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index fe95cb704aaa..e9c4b51525de 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -884,7 +884,9 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ 	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ 	       + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ 	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_PORT_ID */
+-	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN); /* IFLA_PHYS_SWITCH_ID */
++	       + nla_total_size(MAX_PHYS_ITEM_ID_LEN) /* IFLA_PHYS_SWITCH_ID */
++	       + nla_total_size(IFNAMSIZ); /* IFLA_PHYS_PORT_NAME */
++
+ }
+ 
+ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
+@@ -1070,14 +1072,16 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ 		goto nla_put_failure;
+ 
+ 	if (1) {
+-		struct rtnl_link_ifmap map = {
+-			.mem_start   = dev->mem_start,
+-			.mem_end     = dev->mem_end,
+-			.base_addr   = dev->base_addr,
+-			.irq         = dev->irq,
+-			.dma         = dev->dma,
+-			.port        = dev->if_port,
+-		};
++		struct rtnl_link_ifmap map;
++
++		memset(&map, 0, sizeof(map));
++		map.mem_start   = dev->mem_start;
++		map.mem_end     = dev->mem_end;
++		map.base_addr   = dev->base_addr;
++		map.irq         = dev->irq;
++		map.dma         = dev->dma;
++		map.port        = dev->if_port;
++
+ 		if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+ 			goto nla_put_failure;
+ 	}
+diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
+index 03227ffd19ce..76d3bf70c31a 100644
+--- a/net/decnet/dn_route.c
++++ b/net/decnet/dn_route.c
+@@ -1036,10 +1036,13 @@ source_ok:
+ 	if (!fld.daddr) {
+ 		fld.daddr = fld.saddr;
+ 
+-		err = -EADDRNOTAVAIL;
+ 		if (dev_out)
+ 			dev_put(dev_out);
++		err = -EINVAL;
+ 		dev_out = init_net.loopback_dev;
++		if (!dev_out->dn_ptr)
++			goto out;
++		err = -EADDRNOTAVAIL;
+ 		dev_hold(dev_out);
+ 		if (!fld.daddr) {
+ 			fld.daddr =
+@@ -1112,6 +1115,8 @@ source_ok:
+ 		if (dev_out == NULL)
+ 			goto out;
+ 		dn_db = rcu_dereference_raw(dev_out->dn_ptr);
++		if (!dn_db)
++			goto e_inval;
+ 		/* Possible improvement - check all devices for local addr */
+ 		if (dn_dev_islocal(dev_out, fld.daddr)) {
+ 			dev_put(dev_out);
+@@ -1153,6 +1158,8 @@ select_source:
+ 			dev_put(dev_out);
+ 		dev_out = init_net.loopback_dev;
+ 		dev_hold(dev_out);
++		if (!dev_out->dn_ptr)
++			goto e_inval;
+ 		fld.flowidn_oif = dev_out->ifindex;
+ 		if (res.fi)
+ 			dn_fib_info_put(res.fi);
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 280d46f947ea..a57056d87a43 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -334,6 +334,9 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
+ 
+ 	ASSERT_RTNL();
+ 
++	if (in_dev->dead)
++		goto no_promotions;
++
+ 	/* 1. Deleting primary ifaddr forces deletion all secondaries
+ 	 * unless alias promotion is set
+ 	 **/
+@@ -380,6 +383,7 @@ static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
+ 			fib_del_ifaddr(ifa, ifa1);
+ 	}
+ 
++no_promotions:
+ 	/* 2. Unlink it */
+ 
+ 	*ifap = ifa1->ifa_next;
+diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
+index 872494e6e6eb..513b6aabc5b7 100644
+--- a/net/ipv4/fib_frontend.c
++++ b/net/ipv4/fib_frontend.c
+@@ -861,7 +861,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 	if (ifa->ifa_flags & IFA_F_SECONDARY) {
+ 		prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
+ 		if (!prim) {
+-			pr_warn("%s: bug: prim == NULL\n", __func__);
++			/* if the device has been deleted, we don't perform
++			 * address promotion
++			 */
++			if (!in_dev->dead)
++				pr_warn("%s: bug: prim == NULL\n", __func__);
+ 			return;
+ 		}
+ 		if (iprim && iprim != prim) {
+@@ -876,6 +880,9 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 		subnet = 1;
+ 	}
+ 
++	if (in_dev->dead)
++		goto no_promotions;
++
+ 	/* Deletion is more complicated than add.
+ 	 * We should take care of not to delete too much :-)
+ 	 *
+@@ -951,6 +958,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
+ 		}
+ 	}
+ 
++no_promotions:
+ 	if (!(ok & BRD_OK))
+ 		fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
+ 	if (subnet && ifa->ifa_prefixlen < 31) {
+diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
+index a3a697f5ffba..218abf9fb1ed 100644
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -353,9 +353,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
+ 	skb_dst_set(skb, &rt->dst);
+ 	skb->dev = dev;
+ 
+-	skb->reserved_tailroom = skb_end_offset(skb) -
+-				 min(mtu, skb_end_offset(skb));
+ 	skb_reserve(skb, hlen);
++	skb_tailroom_reserve(skb, mtu, tlen);
+ 
+ 	skb_reset_network_header(skb);
+ 	pip = ip_hdr(skb);
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 626d9e56a6bd..35080a708b59 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -652,6 +652,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 	inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
+ 	connected = (tunnel->parms.iph.daddr != 0);
+ 
++	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
++
+ 	dst = tnl_params->daddr;
+ 	if (dst == 0) {
+ 		/* NBMA tunnel */
+@@ -749,7 +751,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 				tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
+ 			tunnel->err_count--;
+ 
+-			memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ 			dst_link_failure(skb);
+ 		} else
+ 			tunnel->err_count = 0;
+diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
+index a61200754f4b..2953ee9e5fa0 100644
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -354,11 +354,24 @@ unsigned int arpt_do_table(struct sk_buff *skb,
+ }
+ 
+ /* All zeroes == unconditional rule. */
+-static inline bool unconditional(const struct arpt_arp *arp)
++static inline bool unconditional(const struct arpt_entry *e)
+ {
+ 	static const struct arpt_arp uncond;
+ 
+-	return memcmp(arp, &uncond, sizeof(uncond)) == 0;
++	return e->target_offset == sizeof(struct arpt_entry) &&
++	       memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
++}
++
++static bool find_jump_target(const struct xt_table_info *t,
++			     const struct arpt_entry *target)
++{
++	struct arpt_entry *iter;
++
++	xt_entry_foreach(iter, t->entries, t->size) {
++		 if (iter == target)
++			return true;
++	}
++	return false;
+ }
+ 
+ /* Figures out from what hook each rule can be called: returns 0 if
+@@ -397,11 +410,10 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 				|= ((1 << hook) | (1 << NF_ARP_NUMHOOKS));
+ 
+ 			/* Unconditional return/END. */
+-			if ((e->target_offset == sizeof(struct arpt_entry) &&
++			if ((unconditional(e) &&
+ 			     (strcmp(t->target.u.user.name,
+ 				     XT_STANDARD_TARGET) == 0) &&
+-			     t->verdict < 0 && unconditional(&e->arp)) ||
+-			    visited) {
++			     t->verdict < 0) || visited) {
+ 				unsigned int oldpos, size;
+ 
+ 				if ((strcmp(t->target.u.user.name,
+@@ -434,6 +446,8 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 				size = e->next_offset;
+ 				e = (struct arpt_entry *)
+ 					(entry0 + pos + size);
++				if (pos + size >= newinfo->size)
++					return 0;
+ 				e->counters.pcnt = pos;
+ 				pos += size;
+ 			} else {
+@@ -453,9 +467,15 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					e = (struct arpt_entry *)
++						(entry0 + newpos);
++					if (!find_jump_target(newinfo, e))
++						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
++					if (newpos >= newinfo->size)
++						return 0;
+ 				}
+ 				e = (struct arpt_entry *)
+ 					(entry0 + newpos);
+@@ -469,25 +489,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
+ 	return 1;
+ }
+ 
+-static inline int check_entry(const struct arpt_entry *e, const char *name)
+-{
+-	const struct xt_entry_target *t;
+-
+-	if (!arp_checkentry(&e->arp)) {
+-		duprintf("arp_tables: arp check failed %p %s.\n", e, name);
+-		return -EINVAL;
+-	}
+-
+-	if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
+-		return -EINVAL;
+-
+-	t = arpt_get_target_c(e);
+-	if (e->target_offset + t->u.target_size > e->next_offset)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+ static inline int check_target(struct arpt_entry *e, const char *name)
+ {
+ 	struct xt_entry_target *t = arpt_get_target(e);
+@@ -517,10 +518,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
+ 	struct xt_target *target;
+ 	int ret;
+ 
+-	ret = check_entry(e, name);
+-	if (ret)
+-		return ret;
+-
+ 	t = arpt_get_target(e);
+ 	target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
+ 					t->u.user.revision);
+@@ -546,7 +543,7 @@ static bool check_underflow(const struct arpt_entry *e)
+ 	const struct xt_entry_target *t;
+ 	unsigned int verdict;
+ 
+-	if (!unconditional(&e->arp))
++	if (!unconditional(e))
+ 		return false;
+ 	t = arpt_get_target_c(e);
+ 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
+@@ -565,9 +562,11 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
+ 					     unsigned int valid_hooks)
+ {
+ 	unsigned int h;
++	int err;
+ 
+ 	if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct arpt_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p\n", e);
+ 		return -EINVAL;
+ 	}
+@@ -579,6 +578,14 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
++	if (!arp_checkentry(&e->arp))
++		return -EINVAL;
++
++	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
++				     e->next_offset);
++	if (err)
++		return err;
++
+ 	/* Check hooks & underflows */
+ 	for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
+ 		if (!(valid_hooks & (1 << h)))
+@@ -587,9 +594,9 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
+ 			newinfo->hook_entry[h] = hook_entries[h];
+ 		if ((unsigned char *)e - base == underflows[h]) {
+ 			if (!check_underflow(e)) {
+-				pr_err("Underflows must be unconditional and "
+-				       "use the STANDARD target with "
+-				       "ACCEPT/DROP\n");
++				pr_debug("Underflows must be unconditional and "
++					 "use the STANDARD target with "
++					 "ACCEPT/DROP\n");
+ 				return -EINVAL;
+ 			}
+ 			newinfo->underflow[h] = underflows[h];
+@@ -679,10 +686,8 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
+ 		}
+ 	}
+ 
+-	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) {
+-		duprintf("Looping hook\n");
++	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
+ 		return -ELOOP;
+-	}
+ 
+ 	/* Finally, each sanity check must pass */
+ 	i = 0;
+@@ -1118,56 +1123,18 @@ static int do_add_counters(struct net *net, const void __user *user,
+ 	unsigned int i, curcpu;
+ 	struct xt_counters_info tmp;
+ 	struct xt_counters *paddc;
+-	unsigned int num_counters;
+-	const char *name;
+-	int size;
+-	void *ptmp;
+ 	struct xt_table *t;
+ 	const struct xt_table_info *private;
+ 	int ret = 0;
+ 	void *loc_cpu_entry;
+ 	struct arpt_entry *iter;
+ 	unsigned int addend;
+-#ifdef CONFIG_COMPAT
+-	struct compat_xt_counters_info compat_tmp;
+ 
+-	if (compat) {
+-		ptmp = &compat_tmp;
+-		size = sizeof(struct compat_xt_counters_info);
+-	} else
+-#endif
+-	{
+-		ptmp = &tmp;
+-		size = sizeof(struct xt_counters_info);
+-	}
++	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
++	if (IS_ERR(paddc))
++		return PTR_ERR(paddc);
+ 
+-	if (copy_from_user(ptmp, user, size) != 0)
+-		return -EFAULT;
+-
+-#ifdef CONFIG_COMPAT
+-	if (compat) {
+-		num_counters = compat_tmp.num_counters;
+-		name = compat_tmp.name;
+-	} else
+-#endif
+-	{
+-		num_counters = tmp.num_counters;
+-		name = tmp.name;
+-	}
+-
+-	if (len != size + num_counters * sizeof(struct xt_counters))
+-		return -EINVAL;
+-
+-	paddc = vmalloc(len - size);
+-	if (!paddc)
+-		return -ENOMEM;
+-
+-	if (copy_from_user(paddc, user + size, len - size) != 0) {
+-		ret = -EFAULT;
+-		goto free;
+-	}
+-
+-	t = xt_find_table_lock(net, NFPROTO_ARP, name);
++	t = xt_find_table_lock(net, NFPROTO_ARP, tmp.name);
+ 	if (IS_ERR_OR_NULL(t)) {
+ 		ret = t ? PTR_ERR(t) : -ENOENT;
+ 		goto free;
+@@ -1175,7 +1142,7 @@ static int do_add_counters(struct net *net, const void __user *user,
+ 
+ 	local_bh_disable();
+ 	private = t->private;
+-	if (private->number != num_counters) {
++	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+ 	}
+@@ -1201,6 +1168,18 @@ static int do_add_counters(struct net *net, const void __user *user,
+ }
+ 
+ #ifdef CONFIG_COMPAT
++struct compat_arpt_replace {
++	char				name[XT_TABLE_MAXNAMELEN];
++	u32				valid_hooks;
++	u32				num_entries;
++	u32				size;
++	u32				hook_entry[NF_ARP_NUMHOOKS];
++	u32				underflow[NF_ARP_NUMHOOKS];
++	u32				num_counters;
++	compat_uptr_t			counters;
++	struct compat_arpt_entry	entries[0];
++};
++
+ static inline void compat_release_entry(struct compat_arpt_entry *e)
+ {
+ 	struct xt_entry_target *t;
+@@ -1209,24 +1188,22 @@ static inline void compat_release_entry(struct compat_arpt_entry *e)
+ 	module_put(t->u.kernel.target->me);
+ }
+ 
+-static inline int
++static int
+ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
+ 				  struct xt_table_info *newinfo,
+ 				  unsigned int *size,
+ 				  const unsigned char *base,
+-				  const unsigned char *limit,
+-				  const unsigned int *hook_entries,
+-				  const unsigned int *underflows,
+-				  const char *name)
++				  const unsigned char *limit)
+ {
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	unsigned int entry_offset;
+-	int ret, off, h;
++	int ret, off;
+ 
+ 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
+ 	if ((unsigned long)e % __alignof__(struct compat_arpt_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct compat_arpt_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p, limit = %p\n", e, limit);
+ 		return -EINVAL;
+ 	}
+@@ -1238,8 +1215,11 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* For purposes of check_entry casting the compat entry is fine */
+-	ret = check_entry((struct arpt_entry *)e, name);
++	if (!arp_checkentry(&e->arp))
++		return -EINVAL;
++
++	ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset,
++					    e->next_offset);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1263,17 +1243,6 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
+ 	if (ret)
+ 		goto release_target;
+ 
+-	/* Check hooks & underflows */
+-	for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
+-		if ((unsigned char *)e - base == hook_entries[h])
+-			newinfo->hook_entry[h] = hook_entries[h];
+-		if ((unsigned char *)e - base == underflows[h])
+-			newinfo->underflow[h] = underflows[h];
+-	}
+-
+-	/* Clear counters and comefrom */
+-	memset(&e->counters, 0, sizeof(e->counters));
+-	e->comefrom = 0;
+ 	return 0;
+ 
+ release_target:
+@@ -1282,18 +1251,17 @@ out:
+ 	return ret;
+ }
+ 
+-static int
++static void
+ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
+-			    unsigned int *size, const char *name,
++			    unsigned int *size,
+ 			    struct xt_table_info *newinfo, unsigned char *base)
+ {
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	struct arpt_entry *de;
+ 	unsigned int origsize;
+-	int ret, h;
++	int h;
+ 
+-	ret = 0;
+ 	origsize = *size;
+ 	de = (struct arpt_entry *)*dstptr;
+ 	memcpy(de, e, sizeof(struct arpt_entry));
+@@ -1314,144 +1282,81 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr,
+ 		if ((unsigned char *)de - base < newinfo->underflow[h])
+ 			newinfo->underflow[h] -= origsize - *size;
+ 	}
+-	return ret;
+ }
+ 
+-static int translate_compat_table(const char *name,
+-				  unsigned int valid_hooks,
+-				  struct xt_table_info **pinfo,
++static int translate_compat_table(struct xt_table_info **pinfo,
+ 				  void **pentry0,
+-				  unsigned int total_size,
+-				  unsigned int number,
+-				  unsigned int *hook_entries,
+-				  unsigned int *underflows)
++				  const struct compat_arpt_replace *compatr)
+ {
+ 	unsigned int i, j;
+ 	struct xt_table_info *newinfo, *info;
+ 	void *pos, *entry0, *entry1;
+ 	struct compat_arpt_entry *iter0;
+-	struct arpt_entry *iter1;
++	struct arpt_replace repl;
+ 	unsigned int size;
+ 	int ret = 0;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+-	size = total_size;
+-	info->number = number;
+-
+-	/* Init all hooks to impossible value. */
+-	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+-		info->hook_entry[i] = 0xFFFFFFFF;
+-		info->underflow[i] = 0xFFFFFFFF;
+-	}
++	size = compatr->size;
++	info->number = compatr->num_entries;
+ 
+ 	duprintf("translate_compat_table: size %u\n", info->size);
+ 	j = 0;
+ 	xt_compat_lock(NFPROTO_ARP);
+-	xt_compat_init_offsets(NFPROTO_ARP, number);
++	xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
+ 	/* Walk through entries, checking offsets. */
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ 							entry0,
+-							entry0 + total_size,
+-							hook_entries,
+-							underflows,
+-							name);
++							entry0 + compatr->size);
+ 		if (ret != 0)
+ 			goto out_unlock;
+ 		++j;
+ 	}
+ 
+ 	ret = -EINVAL;
+-	if (j != number) {
++	if (j != compatr->num_entries) {
+ 		duprintf("translate_compat_table: %u not %u entries\n",
+-			 j, number);
++			 j, compatr->num_entries);
+ 		goto out_unlock;
+ 	}
+ 
+-	/* Check hooks all assigned */
+-	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+-		/* Only hooks which are valid */
+-		if (!(valid_hooks & (1 << i)))
+-			continue;
+-		if (info->hook_entry[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid hook entry %u %u\n",
+-				 i, hook_entries[i]);
+-			goto out_unlock;
+-		}
+-		if (info->underflow[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid underflow %u %u\n",
+-				 i, underflows[i]);
+-			goto out_unlock;
+-		}
+-	}
+-
+ 	ret = -ENOMEM;
+ 	newinfo = xt_alloc_table_info(size);
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
+-	newinfo->number = number;
++	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
+ 		newinfo->hook_entry[i] = info->hook_entry[i];
+ 		newinfo->underflow[i] = info->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries[raw_smp_processor_id()];
+ 	pos = entry1;
+-	size = total_size;
+-	xt_entry_foreach(iter0, entry0, total_size) {
+-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
+-						  name, newinfo, entry1);
+-		if (ret != 0)
+-			break;
+-	}
++	size = compatr->size;
++	xt_entry_foreach(iter0, entry0, compatr->size)
++		compat_copy_entry_from_user(iter0, &pos, &size,
++					    newinfo, entry1);
++
++	/* all module references in entry0 are now gone */
++
+ 	xt_compat_flush_offsets(NFPROTO_ARP);
+ 	xt_compat_unlock(NFPROTO_ARP);
+-	if (ret)
+-		goto free_newinfo;
+ 
+-	ret = -ELOOP;
+-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
+-		goto free_newinfo;
++	memcpy(&repl, compatr, sizeof(*compatr));
+ 
+-	i = 0;
+-	xt_entry_foreach(iter1, entry1, newinfo->size) {
+-		ret = check_target(iter1, name);
+-		if (ret != 0)
+-			break;
+-		++i;
+-		if (strcmp(arpt_get_target(iter1)->u.user.name,
+-		    XT_ERROR_TARGET) == 0)
+-			++newinfo->stacksize;
+-	}
+-	if (ret) {
+-		/*
+-		 * The first i matches need cleanup_entry (calls ->destroy)
+-		 * because they had called ->check already. The other j-i
+-		 * entries need only release.
+-		 */
+-		int skip = i;
+-		j -= i;
+-		xt_entry_foreach(iter0, entry0, newinfo->size) {
+-			if (skip-- > 0)
+-				continue;
+-			if (j-- == 0)
+-				break;
+-			compat_release_entry(iter0);
+-		}
+-		xt_entry_foreach(iter1, entry1, newinfo->size) {
+-			if (i-- == 0)
+-				break;
+-			cleanup_entry(iter1);
+-		}
+-		xt_free_table_info(newinfo);
+-		return ret;
++	for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
++		repl.hook_entry[i] = newinfo->hook_entry[i];
++		repl.underflow[i] = newinfo->underflow[i];
+ 	}
+ 
+-	/* And one copy for every other CPU */
+-	for_each_possible_cpu(i)
+-		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
+-			memcpy(newinfo->entries[i], entry1, newinfo->size);
++	repl.num_counters = 0;
++	repl.counters = NULL;
++	repl.size = newinfo->size;
++	ret = translate_table(newinfo, entry1, &repl);
++	if (ret)
++		goto free_newinfo;
+ 
+ 	*pinfo = newinfo;
+ 	*pentry0 = entry1;
+@@ -1460,31 +1365,18 @@ static int translate_compat_table(const char *name,
+ 
+ free_newinfo:
+ 	xt_free_table_info(newinfo);
+-out:
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	return ret;
++out_unlock:
++	xt_compat_flush_offsets(NFPROTO_ARP);
++	xt_compat_unlock(NFPROTO_ARP);
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		if (j-- == 0)
+ 			break;
+ 		compat_release_entry(iter0);
+ 	}
+ 	return ret;
+-out_unlock:
+-	xt_compat_flush_offsets(NFPROTO_ARP);
+-	xt_compat_unlock(NFPROTO_ARP);
+-	goto out;
+ }
+ 
+-struct compat_arpt_replace {
+-	char				name[XT_TABLE_MAXNAMELEN];
+-	u32				valid_hooks;
+-	u32				num_entries;
+-	u32				size;
+-	u32				hook_entry[NF_ARP_NUMHOOKS];
+-	u32				underflow[NF_ARP_NUMHOOKS];
+-	u32				num_counters;
+-	compat_uptr_t			counters;
+-	struct compat_arpt_entry	entries[0];
+-};
+-
+ static int compat_do_replace(struct net *net, void __user *user,
+ 			     unsigned int len)
+ {
+@@ -1518,10 +1410,7 @@ static int compat_do_replace(struct net *net, void __user *user,
+ 		goto free_newinfo;
+ 	}
+ 
+-	ret = translate_compat_table(tmp.name, tmp.valid_hooks,
+-				     &newinfo, &loc_cpu_entry, tmp.size,
+-				     tmp.num_entries, tmp.hook_entry,
+-				     tmp.underflow);
++	ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp);
+ 	if (ret != 0)
+ 		goto free_newinfo;
+ 
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 2d0e265fef6e..3bcf28bf1525 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -168,11 +168,12 @@ get_entry(const void *base, unsigned int offset)
+ 
+ /* All zeroes == unconditional rule. */
+ /* Mildly perf critical (only if packet tracing is on) */
+-static inline bool unconditional(const struct ipt_ip *ip)
++static inline bool unconditional(const struct ipt_entry *e)
+ {
+ 	static const struct ipt_ip uncond;
+ 
+-	return memcmp(ip, &uncond, sizeof(uncond)) == 0;
++	return e->target_offset == sizeof(struct ipt_entry) &&
++	       memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
+ #undef FWINV
+ }
+ 
+@@ -229,11 +230,10 @@ get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
+ 	} else if (s == e) {
+ 		(*rulenum)++;
+ 
+-		if (s->target_offset == sizeof(struct ipt_entry) &&
++		if (unconditional(s) &&
+ 		    strcmp(t->target.u.kernel.target->name,
+ 			   XT_STANDARD_TARGET) == 0 &&
+-		   t->verdict < 0 &&
+-		   unconditional(&s->ip)) {
++		   t->verdict < 0) {
+ 			/* Tail of chains: STANDARD target (return/policy) */
+ 			*comment = *chainname == hookname
+ 				? comments[NF_IP_TRACE_COMMENT_POLICY]
+@@ -438,6 +438,18 @@ ipt_do_table(struct sk_buff *skb,
+ #endif
+ }
+ 
++static bool find_jump_target(const struct xt_table_info *t,
++			     const struct ipt_entry *target)
++{
++	struct ipt_entry *iter;
++
++	xt_entry_foreach(iter, t->entries, t->size) {
++		 if (iter == target)
++			return true;
++	}
++	return false;
++}
++
+ /* Figures out from what hook each rule can be called: returns 0 if
+    there are loops.  Puts hook bitmask in comefrom. */
+ static int
+@@ -471,11 +483,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
+ 
+ 			/* Unconditional return/END. */
+-			if ((e->target_offset == sizeof(struct ipt_entry) &&
++			if ((unconditional(e) &&
+ 			     (strcmp(t->target.u.user.name,
+ 				     XT_STANDARD_TARGET) == 0) &&
+-			     t->verdict < 0 && unconditional(&e->ip)) ||
+-			    visited) {
++			     t->verdict < 0) || visited) {
+ 				unsigned int oldpos, size;
+ 
+ 				if ((strcmp(t->target.u.user.name,
+@@ -516,6 +527,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 				size = e->next_offset;
+ 				e = (struct ipt_entry *)
+ 					(entry0 + pos + size);
++				if (pos + size >= newinfo->size)
++					return 0;
+ 				e->counters.pcnt = pos;
+ 				pos += size;
+ 			} else {
+@@ -534,9 +547,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					e = (struct ipt_entry *)
++						(entry0 + newpos);
++					if (!find_jump_target(newinfo, e))
++						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
++					if (newpos >= newinfo->size)
++						return 0;
+ 				}
+ 				e = (struct ipt_entry *)
+ 					(entry0 + newpos);
+@@ -564,27 +583,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
+ }
+ 
+ static int
+-check_entry(const struct ipt_entry *e, const char *name)
+-{
+-	const struct xt_entry_target *t;
+-
+-	if (!ip_checkentry(&e->ip)) {
+-		duprintf("ip check failed %p %s.\n", e, name);
+-		return -EINVAL;
+-	}
+-
+-	if (e->target_offset + sizeof(struct xt_entry_target) >
+-	    e->next_offset)
+-		return -EINVAL;
+-
+-	t = ipt_get_target_c(e);
+-	if (e->target_offset + t->u.target_size > e->next_offset)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+-static int
+ check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
+ {
+ 	const struct ipt_ip *ip = par->entryinfo;
+@@ -661,10 +659,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
+ 	struct xt_mtchk_param mtpar;
+ 	struct xt_entry_match *ematch;
+ 
+-	ret = check_entry(e, name);
+-	if (ret)
+-		return ret;
+-
+ 	j = 0;
+ 	mtpar.net	= net;
+ 	mtpar.table     = name;
+@@ -708,7 +702,7 @@ static bool check_underflow(const struct ipt_entry *e)
+ 	const struct xt_entry_target *t;
+ 	unsigned int verdict;
+ 
+-	if (!unconditional(&e->ip))
++	if (!unconditional(e))
+ 		return false;
+ 	t = ipt_get_target_c(e);
+ 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
+@@ -728,9 +722,11 @@ check_entry_size_and_hooks(struct ipt_entry *e,
+ 			   unsigned int valid_hooks)
+ {
+ 	unsigned int h;
++	int err;
+ 
+ 	if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct ipt_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p\n", e);
+ 		return -EINVAL;
+ 	}
+@@ -742,6 +738,14 @@ check_entry_size_and_hooks(struct ipt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
++	if (!ip_checkentry(&e->ip))
++		return -EINVAL;
++
++	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
++				     e->next_offset);
++	if (err)
++		return err;
++
+ 	/* Check hooks & underflows */
+ 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+ 		if (!(valid_hooks & (1 << h)))
+@@ -750,9 +754,9 @@ check_entry_size_and_hooks(struct ipt_entry *e,
+ 			newinfo->hook_entry[h] = hook_entries[h];
+ 		if ((unsigned char *)e - base == underflows[h]) {
+ 			if (!check_underflow(e)) {
+-				pr_err("Underflows must be unconditional and "
+-				       "use the STANDARD target with "
+-				       "ACCEPT/DROP\n");
++				pr_debug("Underflows must be unconditional and "
++					 "use the STANDARD target with "
++					 "ACCEPT/DROP\n");
+ 				return -EINVAL;
+ 			}
+ 			newinfo->underflow[h] = underflows[h];
+@@ -1306,56 +1310,18 @@ do_add_counters(struct net *net, const void __user *user,
+ 	unsigned int i, curcpu;
+ 	struct xt_counters_info tmp;
+ 	struct xt_counters *paddc;
+-	unsigned int num_counters;
+-	const char *name;
+-	int size;
+-	void *ptmp;
+ 	struct xt_table *t;
+ 	const struct xt_table_info *private;
+ 	int ret = 0;
+ 	void *loc_cpu_entry;
+ 	struct ipt_entry *iter;
+ 	unsigned int addend;
+-#ifdef CONFIG_COMPAT
+-	struct compat_xt_counters_info compat_tmp;
+-
+-	if (compat) {
+-		ptmp = &compat_tmp;
+-		size = sizeof(struct compat_xt_counters_info);
+-	} else
+-#endif
+-	{
+-		ptmp = &tmp;
+-		size = sizeof(struct xt_counters_info);
+-	}
+-
+-	if (copy_from_user(ptmp, user, size) != 0)
+-		return -EFAULT;
+-
+-#ifdef CONFIG_COMPAT
+-	if (compat) {
+-		num_counters = compat_tmp.num_counters;
+-		name = compat_tmp.name;
+-	} else
+-#endif
+-	{
+-		num_counters = tmp.num_counters;
+-		name = tmp.name;
+-	}
+ 
+-	if (len != size + num_counters * sizeof(struct xt_counters))
+-		return -EINVAL;
+-
+-	paddc = vmalloc(len - size);
+-	if (!paddc)
+-		return -ENOMEM;
+-
+-	if (copy_from_user(paddc, user + size, len - size) != 0) {
+-		ret = -EFAULT;
+-		goto free;
+-	}
++	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
++	if (IS_ERR(paddc))
++		return PTR_ERR(paddc);
+ 
+-	t = xt_find_table_lock(net, AF_INET, name);
++	t = xt_find_table_lock(net, AF_INET, tmp.name);
+ 	if (IS_ERR_OR_NULL(t)) {
+ 		ret = t ? PTR_ERR(t) : -ENOENT;
+ 		goto free;
+@@ -1363,7 +1329,7 @@ do_add_counters(struct net *net, const void __user *user,
+ 
+ 	local_bh_disable();
+ 	private = t->private;
+-	if (private->number != num_counters) {
++	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+ 	}
+@@ -1442,7 +1408,6 @@ compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
+ 
+ static int
+ compat_find_calc_match(struct xt_entry_match *m,
+-		       const char *name,
+ 		       const struct ipt_ip *ip,
+ 		       unsigned int hookmask,
+ 		       int *size)
+@@ -1478,21 +1443,19 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 				  struct xt_table_info *newinfo,
+ 				  unsigned int *size,
+ 				  const unsigned char *base,
+-				  const unsigned char *limit,
+-				  const unsigned int *hook_entries,
+-				  const unsigned int *underflows,
+-				  const char *name)
++				  const unsigned char *limit)
+ {
+ 	struct xt_entry_match *ematch;
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	unsigned int entry_offset;
+ 	unsigned int j;
+-	int ret, off, h;
++	int ret, off;
+ 
+ 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
+ 	if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p, limit = %p\n", e, limit);
+ 		return -EINVAL;
+ 	}
+@@ -1504,8 +1467,11 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* For purposes of check_entry casting the compat entry is fine */
+-	ret = check_entry((struct ipt_entry *)e, name);
++	if (!ip_checkentry(&e->ip))
++		return -EINVAL;
++
++	ret = xt_compat_check_entry_offsets(e, e->elems,
++					    e->target_offset, e->next_offset);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1513,8 +1479,8 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 	entry_offset = (void *)e - (void *)base;
+ 	j = 0;
+ 	xt_ematch_foreach(ematch, e) {
+-		ret = compat_find_calc_match(ematch, name,
+-					     &e->ip, e->comefrom, &off);
++		ret = compat_find_calc_match(ematch, &e->ip, e->comefrom,
++					     &off);
+ 		if (ret != 0)
+ 			goto release_matches;
+ 		++j;
+@@ -1537,17 +1503,6 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
+ 	if (ret)
+ 		goto out;
+ 
+-	/* Check hooks & underflows */
+-	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+-		if ((unsigned char *)e - base == hook_entries[h])
+-			newinfo->hook_entry[h] = hook_entries[h];
+-		if ((unsigned char *)e - base == underflows[h])
+-			newinfo->underflow[h] = underflows[h];
+-	}
+-
+-	/* Clear counters and comefrom */
+-	memset(&e->counters, 0, sizeof(e->counters));
+-	e->comefrom = 0;
+ 	return 0;
+ 
+ out:
+@@ -1561,19 +1516,18 @@ release_matches:
+ 	return ret;
+ }
+ 
+-static int
++static void
+ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
+-			    unsigned int *size, const char *name,
++			    unsigned int *size,
+ 			    struct xt_table_info *newinfo, unsigned char *base)
+ {
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	struct ipt_entry *de;
+ 	unsigned int origsize;
+-	int ret, h;
++	int h;
+ 	struct xt_entry_match *ematch;
+ 
+-	ret = 0;
+ 	origsize = *size;
+ 	de = (struct ipt_entry *)*dstptr;
+ 	memcpy(de, e, sizeof(struct ipt_entry));
+@@ -1582,198 +1536,104 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
+ 	*dstptr += sizeof(struct ipt_entry);
+ 	*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
+ 
+-	xt_ematch_foreach(ematch, e) {
+-		ret = xt_compat_match_from_user(ematch, dstptr, size);
+-		if (ret != 0)
+-			return ret;
+-	}
++	xt_ematch_foreach(ematch, e)
++		xt_compat_match_from_user(ematch, dstptr, size);
++
+ 	de->target_offset = e->target_offset - (origsize - *size);
+ 	t = compat_ipt_get_target(e);
+ 	target = t->u.kernel.target;
+ 	xt_compat_target_from_user(t, dstptr, size);
+ 
+ 	de->next_offset = e->next_offset - (origsize - *size);
++
+ 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+ 		if ((unsigned char *)de - base < newinfo->hook_entry[h])
+ 			newinfo->hook_entry[h] -= origsize - *size;
+ 		if ((unsigned char *)de - base < newinfo->underflow[h])
+ 			newinfo->underflow[h] -= origsize - *size;
+ 	}
+-	return ret;
+-}
+-
+-static int
+-compat_check_entry(struct ipt_entry *e, struct net *net, const char *name)
+-{
+-	struct xt_entry_match *ematch;
+-	struct xt_mtchk_param mtpar;
+-	unsigned int j;
+-	int ret = 0;
+-
+-	j = 0;
+-	mtpar.net	= net;
+-	mtpar.table     = name;
+-	mtpar.entryinfo = &e->ip;
+-	mtpar.hook_mask = e->comefrom;
+-	mtpar.family    = NFPROTO_IPV4;
+-	xt_ematch_foreach(ematch, e) {
+-		ret = check_match(ematch, &mtpar);
+-		if (ret != 0)
+-			goto cleanup_matches;
+-		++j;
+-	}
+-
+-	ret = check_target(e, net, name);
+-	if (ret)
+-		goto cleanup_matches;
+-	return 0;
+-
+- cleanup_matches:
+-	xt_ematch_foreach(ematch, e) {
+-		if (j-- == 0)
+-			break;
+-		cleanup_match(ematch, net);
+-	}
+-	return ret;
+ }
+ 
+ static int
+ translate_compat_table(struct net *net,
+-		       const char *name,
+-		       unsigned int valid_hooks,
+ 		       struct xt_table_info **pinfo,
+ 		       void **pentry0,
+-		       unsigned int total_size,
+-		       unsigned int number,
+-		       unsigned int *hook_entries,
+-		       unsigned int *underflows)
++		       const struct compat_ipt_replace *compatr)
+ {
+ 	unsigned int i, j;
+ 	struct xt_table_info *newinfo, *info;
+ 	void *pos, *entry0, *entry1;
+ 	struct compat_ipt_entry *iter0;
+-	struct ipt_entry *iter1;
++	struct ipt_replace repl;
+ 	unsigned int size;
+ 	int ret;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+-	size = total_size;
+-	info->number = number;
+-
+-	/* Init all hooks to impossible value. */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		info->hook_entry[i] = 0xFFFFFFFF;
+-		info->underflow[i] = 0xFFFFFFFF;
+-	}
++	size = compatr->size;
++	info->number = compatr->num_entries;
+ 
+ 	duprintf("translate_compat_table: size %u\n", info->size);
+ 	j = 0;
+ 	xt_compat_lock(AF_INET);
+-	xt_compat_init_offsets(AF_INET, number);
++	xt_compat_init_offsets(AF_INET, compatr->num_entries);
+ 	/* Walk through entries, checking offsets. */
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ 							entry0,
+-							entry0 + total_size,
+-							hook_entries,
+-							underflows,
+-							name);
++							entry0 + compatr->size);
+ 		if (ret != 0)
+ 			goto out_unlock;
+ 		++j;
+ 	}
+ 
+ 	ret = -EINVAL;
+-	if (j != number) {
++	if (j != compatr->num_entries) {
+ 		duprintf("translate_compat_table: %u not %u entries\n",
+-			 j, number);
++			 j, compatr->num_entries);
+ 		goto out_unlock;
+ 	}
+ 
+-	/* Check hooks all assigned */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		/* Only hooks which are valid */
+-		if (!(valid_hooks & (1 << i)))
+-			continue;
+-		if (info->hook_entry[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid hook entry %u %u\n",
+-				 i, hook_entries[i]);
+-			goto out_unlock;
+-		}
+-		if (info->underflow[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid underflow %u %u\n",
+-				 i, underflows[i]);
+-			goto out_unlock;
+-		}
+-	}
+-
+ 	ret = -ENOMEM;
+ 	newinfo = xt_alloc_table_info(size);
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
+-	newinfo->number = number;
++	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		newinfo->hook_entry[i] = info->hook_entry[i];
+-		newinfo->underflow[i] = info->underflow[i];
++		newinfo->hook_entry[i] = compatr->hook_entry[i];
++		newinfo->underflow[i] = compatr->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries[raw_smp_processor_id()];
+ 	pos = entry1;
+-	size = total_size;
+-	xt_entry_foreach(iter0, entry0, total_size) {
+-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
+-						  name, newinfo, entry1);
+-		if (ret != 0)
+-			break;
+-	}
++	size = compatr->size;
++	xt_entry_foreach(iter0, entry0, compatr->size)
++		compat_copy_entry_from_user(iter0, &pos, &size,
++					    newinfo, entry1);
++
++	/* all module references in entry0 are now gone.
++	 * entry1/newinfo contains a 64bit ruleset that looks exactly as
++	 * generated by 64bit userspace.
++	 *
++	 * Call standard translate_table() to validate all hook_entrys,
++	 * underflows, check for loops, etc.
++	 */
+ 	xt_compat_flush_offsets(AF_INET);
+ 	xt_compat_unlock(AF_INET);
+-	if (ret)
+-		goto free_newinfo;
+ 
+-	ret = -ELOOP;
+-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
+-		goto free_newinfo;
++	memcpy(&repl, compatr, sizeof(*compatr));
+ 
+-	i = 0;
+-	xt_entry_foreach(iter1, entry1, newinfo->size) {
+-		ret = compat_check_entry(iter1, net, name);
+-		if (ret != 0)
+-			break;
+-		++i;
+-		if (strcmp(ipt_get_target(iter1)->u.user.name,
+-		    XT_ERROR_TARGET) == 0)
+-			++newinfo->stacksize;
+-	}
+-	if (ret) {
+-		/*
+-		 * The first i matches need cleanup_entry (calls ->destroy)
+-		 * because they had called ->check already. The other j-i
+-		 * entries need only release.
+-		 */
+-		int skip = i;
+-		j -= i;
+-		xt_entry_foreach(iter0, entry0, newinfo->size) {
+-			if (skip-- > 0)
+-				continue;
+-			if (j-- == 0)
+-				break;
+-			compat_release_entry(iter0);
+-		}
+-		xt_entry_foreach(iter1, entry1, newinfo->size) {
+-			if (i-- == 0)
+-				break;
+-			cleanup_entry(iter1, net);
+-		}
+-		xt_free_table_info(newinfo);
+-		return ret;
++	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
++		repl.hook_entry[i] = newinfo->hook_entry[i];
++		repl.underflow[i] = newinfo->underflow[i];
+ 	}
+ 
+-	/* And one copy for every other CPU */
+-	for_each_possible_cpu(i)
+-		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
+-			memcpy(newinfo->entries[i], entry1, newinfo->size);
++	repl.num_counters = 0;
++	repl.counters = NULL;
++	repl.size = newinfo->size;
++	ret = translate_table(net, newinfo, entry1, &repl);
++	if (ret)
++		goto free_newinfo;
+ 
+ 	*pinfo = newinfo;
+ 	*pentry0 = entry1;
+@@ -1782,17 +1642,16 @@ translate_compat_table(struct net *net,
+ 
+ free_newinfo:
+ 	xt_free_table_info(newinfo);
+-out:
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	return ret;
++out_unlock:
++	xt_compat_flush_offsets(AF_INET);
++	xt_compat_unlock(AF_INET);
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		if (j-- == 0)
+ 			break;
+ 		compat_release_entry(iter0);
+ 	}
+ 	return ret;
+-out_unlock:
+-	xt_compat_flush_offsets(AF_INET);
+-	xt_compat_unlock(AF_INET);
+-	goto out;
+ }
+ 
+ static int
+@@ -1829,10 +1688,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
+ 		goto free_newinfo;
+ 	}
+ 
+-	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
+-				     &newinfo, &loc_cpu_entry, tmp.size,
+-				     tmp.num_entries, tmp.hook_entry,
+-				     tmp.underflow);
++	ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
+ 	if (ret != 0)
+ 		goto free_newinfo;
+ 
+diff --git a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+index c6eb42100e9a..ea91058b5f6f 100644
+--- a/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
++++ b/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
+@@ -108,10 +108,18 @@ static int masq_inet_event(struct notifier_block *this,
+ 			   unsigned long event,
+ 			   void *ptr)
+ {
+-	struct net_device *dev = ((struct in_ifaddr *)ptr)->ifa_dev->dev;
++	struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
+ 	struct netdev_notifier_info info;
+ 
+-	netdev_notifier_info_init(&info, dev);
++	/* The masq_dev_notifier will catch the case of the device going
++	 * down.  So if the inetdev is dead and being destroyed we have
++	 * no work to do.  Otherwise this is an individual address removal
++	 * and we have to perform the flush.
++	 */
++	if (idev->dead)
++		return NOTIFY_DONE;
++
++	netdev_notifier_info_init(&info, idev->dev);
+ 	return masq_device_event(this, event, &info);
+ }
+ 
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 1d3cdb4d4ebc..eb1d9839a257 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1976,6 +1976,18 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		 */
+ 		if (fi && res->prefixlen < 4)
+ 			fi = NULL;
++	} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
++		   (orig_oif != dev_out->ifindex)) {
++		/* For local routes that require a particular output interface
++		 * we do not want to cache the result.  Caching the result
++		 * causes incorrect behaviour when there are multiple source
++		 * addresses on the interface, the end result being that if the
++		 * intended recipient is waiting on that interface for the
++		 * packet he won't receive it because it will be delivered on
++		 * the loopback interface and the IP_PKTINFO ipi_ifindex will
++		 * be set to the loopback interface as well.
++		 */
++		fi = NULL;
+ 	}
+ 
+ 	fnhe = NULL;
+diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
+index a51d63a43e33..9c840c5c6047 100644
+--- a/net/ipv4/tcp_metrics.c
++++ b/net/ipv4/tcp_metrics.c
+@@ -566,7 +566,7 @@ reset:
+ 	 */
+ 	if (crtt > tp->srtt_us) {
+ 		/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
+-		crtt /= 8 * USEC_PER_MSEC;
++		crtt /= 8 * USEC_PER_SEC / HZ;
+ 		inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
+ 	} else if (tp->srtt_us == 0) {
+ 		/* RFC6298: 5.7 We've failed to get a valid RTT sample from
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 1ea4322c3b0c..ae66c8426ad0 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2627,8 +2627,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+ 	 */
+ 	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
+ 		     skb_headroom(skb) >= 0xFFFF)) {
+-		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
+-						   GFP_ATOMIC);
++		struct sk_buff *nskb;
++
++		skb_mstamp_get(&skb->skb_mstamp);
++		nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
+ 		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+ 			     -ENOBUFS;
+ 	} else {
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index a390174b96de..031752efe1ab 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1979,10 +1979,14 @@ void udp_v4_early_demux(struct sk_buff *skb)
+ 		if (!in_dev)
+ 			return;
+ 
+-		ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+-				       iph->protocol);
+-		if (!ours)
+-			return;
++		/* we are supposed to accept bcast packets */
++		if (skb->pkt_type == PACKET_MULTICAST) {
++			ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
++					       iph->protocol);
++			if (!ours)
++				return;
++		}
++
+ 		sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
+ 						   uh->source, iph->saddr, dif);
+ 	} else if (skb->pkt_type == PACKET_HOST) {
+diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
+index 6bb98cc193c9..7b534ac04056 100644
+--- a/net/ipv4/udp_tunnel.c
++++ b/net/ipv4/udp_tunnel.c
+@@ -90,6 +90,8 @@ int udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
+ 	uh->source = src_port;
+ 	uh->len = htons(skb->len);
+ 
++	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
++
+ 	udp_set_csum(nocheck, skb, src, dst, skb->len);
+ 
+ 	return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP,
+diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
+index 5c5d23e59da5..9508a20fbf61 100644
+--- a/net/ipv6/exthdrs_core.c
++++ b/net/ipv6/exthdrs_core.c
+@@ -257,7 +257,11 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
+ 						*fragoff = _frag_off;
+ 					return hp->nexthdr;
+ 				}
+-				return -ENOENT;
++				if (!found)
++					return -ENOENT;
++				if (fragoff)
++					*fragoff = _frag_off;
++				break;
+ 			}
+ 			hdrlen = 8;
+ 		} else if (nexthdr == NEXTHDR_AUTH) {
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 76be7d311cc4..b1311da5d7b8 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -783,6 +783,8 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
+ 	__u32 mtu;
+ 	int err;
+ 
++	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
++
+ 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+ 		encap_limit = t->parms.encap_limit;
+ 
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 5cafd92c2312..c7c2c33aa4af 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -284,12 +284,12 @@ static int ip6_tnl_create2(struct net_device *dev)
+ 
+ 	t = netdev_priv(dev);
+ 
++	dev->rtnl_link_ops = &ip6_link_ops;
+ 	err = register_netdevice(dev);
+ 	if (err < 0)
+ 		goto out;
+ 
+ 	strcpy(t->parms.name, dev->name);
+-	dev->rtnl_link_ops = &ip6_link_ops;
+ 
+ 	dev_hold(dev);
+ 	ip6_tnl_link(ip6n, t);
+@@ -1124,6 +1124,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	u8 tproto;
+ 	int err;
+ 
++	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
++
+ 	tproto = ACCESS_ONCE(t->parms.proto);
+ 	if (tproto != IPPROTO_IPIP && tproto != 0)
+ 		return -1;
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 41e3b5ee8d0b..9a63110b6548 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1574,9 +1574,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
+ 		return NULL;
+ 
+ 	skb->priority = TC_PRIO_CONTROL;
+-	skb->reserved_tailroom = skb_end_offset(skb) -
+-				 min(mtu, skb_end_offset(skb));
+ 	skb_reserve(skb, hlen);
++	skb_tailroom_reserve(skb, mtu, tlen);
+ 
+ 	if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
+ 		/* <draft-ietf-magma-mld-source-05.txt>:
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 62f5b0d0bc9b..5254d76dfce8 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -198,11 +198,12 @@ get_entry(const void *base, unsigned int offset)
+ 
+ /* All zeroes == unconditional rule. */
+ /* Mildly perf critical (only if packet tracing is on) */
+-static inline bool unconditional(const struct ip6t_ip6 *ipv6)
++static inline bool unconditional(const struct ip6t_entry *e)
+ {
+ 	static const struct ip6t_ip6 uncond;
+ 
+-	return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
++	return e->target_offset == sizeof(struct ip6t_entry) &&
++	       memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;
+ }
+ 
+ static inline const struct xt_entry_target *
+@@ -258,11 +259,10 @@ get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
+ 	} else if (s == e) {
+ 		(*rulenum)++;
+ 
+-		if (s->target_offset == sizeof(struct ip6t_entry) &&
++		if (unconditional(s) &&
+ 		    strcmp(t->target.u.kernel.target->name,
+ 			   XT_STANDARD_TARGET) == 0 &&
+-		    t->verdict < 0 &&
+-		    unconditional(&s->ipv6)) {
++		    t->verdict < 0) {
+ 			/* Tail of chains: STANDARD target (return/policy) */
+ 			*comment = *chainname == hookname
+ 				? comments[NF_IP6_TRACE_COMMENT_POLICY]
+@@ -451,6 +451,18 @@ ip6t_do_table(struct sk_buff *skb,
+ #endif
+ }
+ 
++static bool find_jump_target(const struct xt_table_info *t,
++			     const struct ip6t_entry *target)
++{
++	struct ip6t_entry *iter;
++
++	xt_entry_foreach(iter, t->entries, t->size) {
++		 if (iter == target)
++			return true;
++	}
++	return false;
++}
++
+ /* Figures out from what hook each rule can be called: returns 0 if
+    there are loops.  Puts hook bitmask in comefrom. */
+ static int
+@@ -484,11 +496,10 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
+ 
+ 			/* Unconditional return/END. */
+-			if ((e->target_offset == sizeof(struct ip6t_entry) &&
++			if ((unconditional(e) &&
+ 			     (strcmp(t->target.u.user.name,
+ 				     XT_STANDARD_TARGET) == 0) &&
+-			     t->verdict < 0 &&
+-			     unconditional(&e->ipv6)) || visited) {
++			     t->verdict < 0) || visited) {
+ 				unsigned int oldpos, size;
+ 
+ 				if ((strcmp(t->target.u.user.name,
+@@ -529,6 +540,8 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 				size = e->next_offset;
+ 				e = (struct ip6t_entry *)
+ 					(entry0 + pos + size);
++				if (pos + size >= newinfo->size)
++					return 0;
+ 				e->counters.pcnt = pos;
+ 				pos += size;
+ 			} else {
+@@ -547,9 +560,15 @@ mark_source_chains(const struct xt_table_info *newinfo,
+ 					/* This a jump; chase it. */
+ 					duprintf("Jump rule %u -> %u\n",
+ 						 pos, newpos);
++					e = (struct ip6t_entry *)
++						(entry0 + newpos);
++					if (!find_jump_target(newinfo, e))
++						return 0;
+ 				} else {
+ 					/* ... this is a fallthru */
+ 					newpos = pos + e->next_offset;
++					if (newpos >= newinfo->size)
++						return 0;
+ 				}
+ 				e = (struct ip6t_entry *)
+ 					(entry0 + newpos);
+@@ -576,27 +595,6 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
+ 	module_put(par.match->me);
+ }
+ 
+-static int
+-check_entry(const struct ip6t_entry *e, const char *name)
+-{
+-	const struct xt_entry_target *t;
+-
+-	if (!ip6_checkentry(&e->ipv6)) {
+-		duprintf("ip_tables: ip check failed %p %s.\n", e, name);
+-		return -EINVAL;
+-	}
+-
+-	if (e->target_offset + sizeof(struct xt_entry_target) >
+-	    e->next_offset)
+-		return -EINVAL;
+-
+-	t = ip6t_get_target_c(e);
+-	if (e->target_offset + t->u.target_size > e->next_offset)
+-		return -EINVAL;
+-
+-	return 0;
+-}
+-
+ static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
+ {
+ 	const struct ip6t_ip6 *ipv6 = par->entryinfo;
+@@ -675,10 +673,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
+ 	struct xt_mtchk_param mtpar;
+ 	struct xt_entry_match *ematch;
+ 
+-	ret = check_entry(e, name);
+-	if (ret)
+-		return ret;
+-
+ 	j = 0;
+ 	mtpar.net	= net;
+ 	mtpar.table     = name;
+@@ -722,7 +716,7 @@ static bool check_underflow(const struct ip6t_entry *e)
+ 	const struct xt_entry_target *t;
+ 	unsigned int verdict;
+ 
+-	if (!unconditional(&e->ipv6))
++	if (!unconditional(e))
+ 		return false;
+ 	t = ip6t_get_target_c(e);
+ 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
+@@ -742,9 +736,11 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
+ 			   unsigned int valid_hooks)
+ {
+ 	unsigned int h;
++	int err;
+ 
+ 	if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p\n", e);
+ 		return -EINVAL;
+ 	}
+@@ -756,6 +752,14 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
++	if (!ip6_checkentry(&e->ipv6))
++		return -EINVAL;
++
++	err = xt_check_entry_offsets(e, e->elems, e->target_offset,
++				     e->next_offset);
++	if (err)
++		return err;
++
+ 	/* Check hooks & underflows */
+ 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+ 		if (!(valid_hooks & (1 << h)))
+@@ -764,9 +768,9 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
+ 			newinfo->hook_entry[h] = hook_entries[h];
+ 		if ((unsigned char *)e - base == underflows[h]) {
+ 			if (!check_underflow(e)) {
+-				pr_err("Underflows must be unconditional and "
+-				       "use the STANDARD target with "
+-				       "ACCEPT/DROP\n");
++				pr_debug("Underflows must be unconditional and "
++					 "use the STANDARD target with "
++					 "ACCEPT/DROP\n");
+ 				return -EINVAL;
+ 			}
+ 			newinfo->underflow[h] = underflows[h];
+@@ -1319,56 +1323,17 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
+ 	unsigned int i, curcpu;
+ 	struct xt_counters_info tmp;
+ 	struct xt_counters *paddc;
+-	unsigned int num_counters;
+-	char *name;
+-	int size;
+-	void *ptmp;
+ 	struct xt_table *t;
+ 	const struct xt_table_info *private;
+ 	int ret = 0;
+ 	const void *loc_cpu_entry;
+ 	struct ip6t_entry *iter;
+ 	unsigned int addend;
+-#ifdef CONFIG_COMPAT
+-	struct compat_xt_counters_info compat_tmp;
+-
+-	if (compat) {
+-		ptmp = &compat_tmp;
+-		size = sizeof(struct compat_xt_counters_info);
+-	} else
+-#endif
+-	{
+-		ptmp = &tmp;
+-		size = sizeof(struct xt_counters_info);
+-	}
+-
+-	if (copy_from_user(ptmp, user, size) != 0)
+-		return -EFAULT;
+ 
+-#ifdef CONFIG_COMPAT
+-	if (compat) {
+-		num_counters = compat_tmp.num_counters;
+-		name = compat_tmp.name;
+-	} else
+-#endif
+-	{
+-		num_counters = tmp.num_counters;
+-		name = tmp.name;
+-	}
+-
+-	if (len != size + num_counters * sizeof(struct xt_counters))
+-		return -EINVAL;
+-
+-	paddc = vmalloc(len - size);
+-	if (!paddc)
+-		return -ENOMEM;
+-
+-	if (copy_from_user(paddc, user + size, len - size) != 0) {
+-		ret = -EFAULT;
+-		goto free;
+-	}
+-
+-	t = xt_find_table_lock(net, AF_INET6, name);
++	paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
++	if (IS_ERR(paddc))
++		return PTR_ERR(paddc);
++	t = xt_find_table_lock(net, AF_INET6, tmp.name);
+ 	if (IS_ERR_OR_NULL(t)) {
+ 		ret = t ? PTR_ERR(t) : -ENOENT;
+ 		goto free;
+@@ -1377,7 +1342,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
+ 
+ 	local_bh_disable();
+ 	private = t->private;
+-	if (private->number != num_counters) {
++	if (private->number != tmp.num_counters) {
+ 		ret = -EINVAL;
+ 		goto unlock_up_free;
+ 	}
+@@ -1457,7 +1422,6 @@ compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
+ 
+ static int
+ compat_find_calc_match(struct xt_entry_match *m,
+-		       const char *name,
+ 		       const struct ip6t_ip6 *ipv6,
+ 		       unsigned int hookmask,
+ 		       int *size)
+@@ -1493,21 +1457,19 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 				  struct xt_table_info *newinfo,
+ 				  unsigned int *size,
+ 				  const unsigned char *base,
+-				  const unsigned char *limit,
+-				  const unsigned int *hook_entries,
+-				  const unsigned int *underflows,
+-				  const char *name)
++				  const unsigned char *limit)
+ {
+ 	struct xt_entry_match *ematch;
+ 	struct xt_entry_target *t;
+ 	struct xt_target *target;
+ 	unsigned int entry_offset;
+ 	unsigned int j;
+-	int ret, off, h;
++	int ret, off;
+ 
+ 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
+ 	if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
+-	    (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
++	    (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit ||
++	    (unsigned char *)e + e->next_offset > limit) {
+ 		duprintf("Bad offset %p, limit = %p\n", e, limit);
+ 		return -EINVAL;
+ 	}
+@@ -1519,8 +1481,11 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 		return -EINVAL;
+ 	}
+ 
+-	/* For purposes of check_entry casting the compat entry is fine */
+-	ret = check_entry((struct ip6t_entry *)e, name);
++	if (!ip6_checkentry(&e->ipv6))
++		return -EINVAL;
++
++	ret = xt_compat_check_entry_offsets(e, e->elems,
++					    e->target_offset, e->next_offset);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -1528,8 +1493,8 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 	entry_offset = (void *)e - (void *)base;
+ 	j = 0;
+ 	xt_ematch_foreach(ematch, e) {
+-		ret = compat_find_calc_match(ematch, name,
+-					     &e->ipv6, e->comefrom, &off);
++		ret = compat_find_calc_match(ematch, &e->ipv6, e->comefrom,
++					     &off);
+ 		if (ret != 0)
+ 			goto release_matches;
+ 		++j;
+@@ -1552,17 +1517,6 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
+ 	if (ret)
+ 		goto out;
+ 
+-	/* Check hooks & underflows */
+-	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+-		if ((unsigned char *)e - base == hook_entries[h])
+-			newinfo->hook_entry[h] = hook_entries[h];
+-		if ((unsigned char *)e - base == underflows[h])
+-			newinfo->underflow[h] = underflows[h];
+-	}
+-
+-	/* Clear counters and comefrom */
+-	memset(&e->counters, 0, sizeof(e->counters));
+-	e->comefrom = 0;
+ 	return 0;
+ 
+ out:
+@@ -1576,18 +1530,17 @@ release_matches:
+ 	return ret;
+ }
+ 
+-static int
++static void
+ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
+-			    unsigned int *size, const char *name,
++			    unsigned int *size,
+ 			    struct xt_table_info *newinfo, unsigned char *base)
+ {
+ 	struct xt_entry_target *t;
+ 	struct ip6t_entry *de;
+ 	unsigned int origsize;
+-	int ret, h;
++	int h;
+ 	struct xt_entry_match *ematch;
+ 
+-	ret = 0;
+ 	origsize = *size;
+ 	de = (struct ip6t_entry *)*dstptr;
+ 	memcpy(de, e, sizeof(struct ip6t_entry));
+@@ -1596,11 +1549,9 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
+ 	*dstptr += sizeof(struct ip6t_entry);
+ 	*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
+ 
+-	xt_ematch_foreach(ematch, e) {
+-		ret = xt_compat_match_from_user(ematch, dstptr, size);
+-		if (ret != 0)
+-			return ret;
+-	}
++	xt_ematch_foreach(ematch, e)
++		xt_compat_match_from_user(ematch, dstptr, size);
++
+ 	de->target_offset = e->target_offset - (origsize - *size);
+ 	t = compat_ip6t_get_target(e);
+ 	xt_compat_target_from_user(t, dstptr, size);
+@@ -1612,181 +1563,82 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
+ 		if ((unsigned char *)de - base < newinfo->underflow[h])
+ 			newinfo->underflow[h] -= origsize - *size;
+ 	}
+-	return ret;
+-}
+-
+-static int compat_check_entry(struct ip6t_entry *e, struct net *net,
+-			      const char *name)
+-{
+-	unsigned int j;
+-	int ret = 0;
+-	struct xt_mtchk_param mtpar;
+-	struct xt_entry_match *ematch;
+-
+-	j = 0;
+-	mtpar.net	= net;
+-	mtpar.table     = name;
+-	mtpar.entryinfo = &e->ipv6;
+-	mtpar.hook_mask = e->comefrom;
+-	mtpar.family    = NFPROTO_IPV6;
+-	xt_ematch_foreach(ematch, e) {
+-		ret = check_match(ematch, &mtpar);
+-		if (ret != 0)
+-			goto cleanup_matches;
+-		++j;
+-	}
+-
+-	ret = check_target(e, net, name);
+-	if (ret)
+-		goto cleanup_matches;
+-	return 0;
+-
+- cleanup_matches:
+-	xt_ematch_foreach(ematch, e) {
+-		if (j-- == 0)
+-			break;
+-		cleanup_match(ematch, net);
+-	}
+-	return ret;
+ }
+ 
+ static int
+ translate_compat_table(struct net *net,
+-		       const char *name,
+-		       unsigned int valid_hooks,
+ 		       struct xt_table_info **pinfo,
+ 		       void **pentry0,
+-		       unsigned int total_size,
+-		       unsigned int number,
+-		       unsigned int *hook_entries,
+-		       unsigned int *underflows)
++		       const struct compat_ip6t_replace *compatr)
+ {
+ 	unsigned int i, j;
+ 	struct xt_table_info *newinfo, *info;
+ 	void *pos, *entry0, *entry1;
+ 	struct compat_ip6t_entry *iter0;
+-	struct ip6t_entry *iter1;
++	struct ip6t_replace repl;
+ 	unsigned int size;
+ 	int ret = 0;
+ 
+ 	info = *pinfo;
+ 	entry0 = *pentry0;
+-	size = total_size;
+-	info->number = number;
+-
+-	/* Init all hooks to impossible value. */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		info->hook_entry[i] = 0xFFFFFFFF;
+-		info->underflow[i] = 0xFFFFFFFF;
+-	}
++	size = compatr->size;
++	info->number = compatr->num_entries;
+ 
+ 	duprintf("translate_compat_table: size %u\n", info->size);
+ 	j = 0;
+ 	xt_compat_lock(AF_INET6);
+-	xt_compat_init_offsets(AF_INET6, number);
++	xt_compat_init_offsets(AF_INET6, compatr->num_entries);
+ 	/* Walk through entries, checking offsets. */
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
+ 							entry0,
+-							entry0 + total_size,
+-							hook_entries,
+-							underflows,
+-							name);
++							entry0 + compatr->size);
+ 		if (ret != 0)
+ 			goto out_unlock;
+ 		++j;
+ 	}
+ 
+ 	ret = -EINVAL;
+-	if (j != number) {
++	if (j != compatr->num_entries) {
+ 		duprintf("translate_compat_table: %u not %u entries\n",
+-			 j, number);
++			 j, compatr->num_entries);
+ 		goto out_unlock;
+ 	}
+ 
+-	/* Check hooks all assigned */
+-	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		/* Only hooks which are valid */
+-		if (!(valid_hooks & (1 << i)))
+-			continue;
+-		if (info->hook_entry[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid hook entry %u %u\n",
+-				 i, hook_entries[i]);
+-			goto out_unlock;
+-		}
+-		if (info->underflow[i] == 0xFFFFFFFF) {
+-			duprintf("Invalid underflow %u %u\n",
+-				 i, underflows[i]);
+-			goto out_unlock;
+-		}
+-	}
+-
+ 	ret = -ENOMEM;
+ 	newinfo = xt_alloc_table_info(size);
+ 	if (!newinfo)
+ 		goto out_unlock;
+ 
+-	newinfo->number = number;
++	newinfo->number = compatr->num_entries;
+ 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
+-		newinfo->hook_entry[i] = info->hook_entry[i];
+-		newinfo->underflow[i] = info->underflow[i];
++		newinfo->hook_entry[i] = compatr->hook_entry[i];
++		newinfo->underflow[i] = compatr->underflow[i];
+ 	}
+ 	entry1 = newinfo->entries[raw_smp_processor_id()];
+ 	pos = entry1;
+-	size = total_size;
+-	xt_entry_foreach(iter0, entry0, total_size) {
+-		ret = compat_copy_entry_from_user(iter0, &pos, &size,
+-						  name, newinfo, entry1);
+-		if (ret != 0)
+-			break;
+-	}
++	size = compatr->size;
++	xt_entry_foreach(iter0, entry0, compatr->size)
++		compat_copy_entry_from_user(iter0, &pos, &size,
++					    newinfo, entry1);
++
++	/* all module references in entry0 are now gone. */
+ 	xt_compat_flush_offsets(AF_INET6);
+ 	xt_compat_unlock(AF_INET6);
+-	if (ret)
+-		goto free_newinfo;
+ 
+-	ret = -ELOOP;
+-	if (!mark_source_chains(newinfo, valid_hooks, entry1))
+-		goto free_newinfo;
++	memcpy(&repl, compatr, sizeof(*compatr));
+ 
+-	i = 0;
+-	xt_entry_foreach(iter1, entry1, newinfo->size) {
+-		ret = compat_check_entry(iter1, net, name);
+-		if (ret != 0)
+-			break;
+-		++i;
+-		if (strcmp(ip6t_get_target(iter1)->u.user.name,
+-		    XT_ERROR_TARGET) == 0)
+-			++newinfo->stacksize;
+-	}
+-	if (ret) {
+-		/*
+-		 * The first i matches need cleanup_entry (calls ->destroy)
+-		 * because they had called ->check already. The other j-i
+-		 * entries need only release.
+-		 */
+-		int skip = i;
+-		j -= i;
+-		xt_entry_foreach(iter0, entry0, newinfo->size) {
+-			if (skip-- > 0)
+-				continue;
+-			if (j-- == 0)
+-				break;
+-			compat_release_entry(iter0);
+-		}
+-		xt_entry_foreach(iter1, entry1, newinfo->size) {
+-			if (i-- == 0)
+-				break;
+-			cleanup_entry(iter1, net);
+-		}
+-		xt_free_table_info(newinfo);
+-		return ret;
++	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
++		repl.hook_entry[i] = newinfo->hook_entry[i];
++		repl.underflow[i] = newinfo->underflow[i];
+ 	}
+ 
+-	/* And one copy for every other CPU */
+-	for_each_possible_cpu(i)
+-		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
+-			memcpy(newinfo->entries[i], entry1, newinfo->size);
++	repl.num_counters = 0;
++	repl.counters = NULL;
++	repl.size = newinfo->size;
++	ret = translate_table(net, newinfo, entry1, &repl);
++	if (ret)
++		goto free_newinfo;
+ 
+ 	*pinfo = newinfo;
+ 	*pentry0 = entry1;
+@@ -1795,17 +1647,16 @@ translate_compat_table(struct net *net,
+ 
+ free_newinfo:
+ 	xt_free_table_info(newinfo);
+-out:
+-	xt_entry_foreach(iter0, entry0, total_size) {
++	return ret;
++out_unlock:
++	xt_compat_flush_offsets(AF_INET6);
++	xt_compat_unlock(AF_INET6);
++	xt_entry_foreach(iter0, entry0, compatr->size) {
+ 		if (j-- == 0)
+ 			break;
+ 		compat_release_entry(iter0);
+ 	}
+ 	return ret;
+-out_unlock:
+-	xt_compat_flush_offsets(AF_INET6);
+-	xt_compat_unlock(AF_INET6);
+-	goto out;
+ }
+ 
+ static int
+@@ -1842,10 +1693,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
+ 		goto free_newinfo;
+ 	}
+ 
+-	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
+-				     &newinfo, &loc_cpu_entry, tmp.size,
+-				     tmp.num_entries, tmp.hook_entry,
+-				     tmp.underflow);
++	ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
+ 	if (ret != 0)
+ 		goto free_newinfo;
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index c1147acbc8c4..ac6c40d08ac5 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1691,7 +1691,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
+ 	destp = ntohs(inet->inet_dport);
+ 	srcp  = ntohs(inet->inet_sport);
+ 
+-	if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
++	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
++	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
++	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+ 		timer_active	= 1;
+ 		timer_expires	= icsk->icsk_timeout;
+ 	} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 7333f3575fc5..1173557ea551 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -834,8 +834,8 @@ start_lookup:
+ 		flush_stack(stack, count, skb, count - 1);
+ 	} else {
+ 		if (!inner_flushed)
+-			UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
+-					 proto == IPPROTO_UDPLITE);
++			UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
++					  proto == IPPROTO_UDPLITE);
+ 		consume_skb(skb);
+ 	}
+ 	return 0;
+@@ -913,11 +913,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 		ret = udpv6_queue_rcv_skb(sk, skb);
+ 		sock_put(sk);
+ 
+-		/* a return value > 0 means to resubmit the input, but
+-		 * it wants the return to be -protocol, or 0
+-		 */
++		/* a return value > 0 means to resubmit the input */
+ 		if (ret > 0)
+-			return -ret;
++			return ret;
+ 
+ 		return 0;
+ 	}
+diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
+index 79649937ec71..44ee0683b14b 100644
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+ 	struct l2tp_tunnel *tunnel = NULL;
+ 	int length;
+ 
+-	/* Point to L2TP header */
+-	optr = ptr = skb->data;
+-
+ 	if (!pskb_may_pull(skb, 4))
+ 		goto discard;
+ 
++	/* Point to L2TP header */
++	optr = ptr = skb->data;
+ 	session_id = ntohl(*((__be32 *) ptr));
+ 	ptr += 4;
+ 
+@@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
+ 		if (!pskb_may_pull(skb, length))
+ 			goto discard;
+ 
++		/* Point to L2TP header */
++		optr = ptr = skb->data;
++		ptr += 4;
+ 		pr_debug("%s: ip recv\n", tunnel->name);
+ 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+ 	}
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index 0ce9da948ad7..36f8fa223a78 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -135,12 +135,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+ 	struct l2tp_tunnel *tunnel = NULL;
+ 	int length;
+ 
+-	/* Point to L2TP header */
+-	optr = ptr = skb->data;
+-
+ 	if (!pskb_may_pull(skb, 4))
+ 		goto discard;
+ 
++	/* Point to L2TP header */
++	optr = ptr = skb->data;
+ 	session_id = ntohl(*((__be32 *) ptr));
+ 	ptr += 4;
+ 
+@@ -168,6 +167,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
+ 		if (!pskb_may_pull(skb, length))
+ 			goto discard;
+ 
++		/* Point to L2TP header */
++		optr = ptr = skb->data;
++		ptr += 4;
+ 		pr_debug("%s: ip recv\n", tunnel->name);
+ 		print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
+ 	}
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 17a8dff06090..c58f242c00f1 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
+ 	if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
+ 		struct llc_pktinfo info;
+ 
++		memset(&info, 0, sizeof(info));
+ 		info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
+ 		llc_pdu_decode_dsap(skb, &info.lpi_sap);
+ 		llc_pdu_decode_da(skb, info.lpi_mac);
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index 41adfc898a18..6f1f3bdddea2 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -7,6 +7,7 @@
+  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+  * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
+  * Copyright 2013-2014  Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -1479,14 +1480,21 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
+ 
+ 		sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
+ 
+-		num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
+-							 &ifibss->chandef,
+-							 channels,
+-							 ARRAY_SIZE(channels));
+ 		scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
+-		ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+-					    ifibss->ssid_len, channels, num,
+-					    scan_width);
++
++		if (ifibss->fixed_channel) {
++			num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
++								 &ifibss->chandef,
++								 channels,
++								 ARRAY_SIZE(channels));
++			ieee80211_request_ibss_scan(sdata, ifibss->ssid,
++						    ifibss->ssid_len, channels,
++						    num, scan_width);
++		} else {
++			ieee80211_request_ibss_scan(sdata, ifibss->ssid,
++						    ifibss->ssid_len, NULL,
++						    0, scan_width);
++		}
+ 	} else {
+ 		int interval = IEEE80211_SCAN_INTERVAL;
+ 
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 84cef600c573..6e89ab8eac44 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -980,7 +980,10 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
+ 	if (sdata->vif.txq) {
+ 		struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+ 
++		spin_lock_bh(&txqi->queue.lock);
+ 		ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
++		spin_unlock_bh(&txqi->queue.lock);
++
+ 		atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
+ 	}
+ 
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index d4b08d87537c..3073164a6fcf 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -2227,7 +2227,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ 	struct ieee80211_sub_if_data *sdata = rx->sdata;
+ 	struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+-	u16 q, hdrlen;
++	u16 ac, q, hdrlen;
+ 
+ 	hdr = (struct ieee80211_hdr *) skb->data;
+ 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
+@@ -2297,7 +2297,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
+ 	    ether_addr_equal(sdata->vif.addr, hdr->addr3))
+ 		return RX_CONTINUE;
+ 
+-	q = ieee80211_select_queue_80211(sdata, skb, hdr);
++	ac = ieee80211_select_queue_80211(sdata, skb, hdr);
++	q = sdata->vif.hw_queue[ac];
+ 	if (ieee80211_queue_stopped(&local->hw, q)) {
+ 		IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
+ 		return RX_DROP_MONITOR;
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index a7027190f298..bcdbda289d75 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -472,11 +472,17 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ {
+ 	struct ieee80211_local *local = sta->local;
+ 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+-	struct station_info sinfo;
++	struct station_info *sinfo;
+ 	int err = 0;
+ 
+ 	lockdep_assert_held(&local->sta_mtx);
+ 
++	sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
++	if (!sinfo) {
++		err = -ENOMEM;
++		goto out_err;
++	}
++
+ 	/* check if STA exists already */
+ 	if (sta_info_get_bss(sdata, sta->sta.addr)) {
+ 		err = -EEXIST;
+@@ -510,10 +516,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ 	ieee80211_sta_debugfs_add(sta);
+ 	rate_control_add_sta_debugfs(sta);
+ 
+-	memset(&sinfo, 0, sizeof(sinfo));
+-	sinfo.filled = 0;
+-	sinfo.generation = local->sta_generation;
+-	cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
++	sinfo->generation = local->sta_generation;
++	cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
++	kfree(sinfo);
+ 
+ 	sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
+ 
+@@ -876,7 +881,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
+ {
+ 	struct ieee80211_local *local = sta->local;
+ 	struct ieee80211_sub_if_data *sdata = sta->sdata;
+-	struct station_info sinfo = {};
++	struct station_info *sinfo;
+ 	int ret;
+ 
+ 	/*
+@@ -914,8 +919,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
+ 
+ 	sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
+ 
+-	sta_set_sinfo(sta, &sinfo);
+-	cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
++	sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
++	if (sinfo)
++		sta_set_sinfo(sta, sinfo);
++	cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
++	kfree(sinfo);
+ 
+ 	rate_control_remove_sta_debugfs(sta);
+ 	ieee80211_sta_debugfs_remove(sta);
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 38fbc194b9cb..a26bd6532829 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -1689,15 +1689,34 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
+ 	cp = pp->conn_in_get(af, skb, &iph, 0);
+ 
+ 	conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
+-	if (conn_reuse_mode && !iph.fragoffs &&
+-	    is_new_conn(skb, &iph) && cp &&
+-	    ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
+-	      unlikely(!atomic_read(&cp->dest->weight))) ||
+-	     unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) {
+-		if (!atomic_read(&cp->n_control))
+-			ip_vs_conn_expire_now(cp);
+-		__ip_vs_conn_put(cp);
+-		cp = NULL;
++	if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
++		bool uses_ct = false, resched = false;
++
++		if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
++		    unlikely(!atomic_read(&cp->dest->weight))) {
++			resched = true;
++			uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
++		} else if (is_new_conn_expected(cp, conn_reuse_mode)) {
++			uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
++			if (!atomic_read(&cp->n_control)) {
++				resched = true;
++			} else {
++				/* Do not reschedule controlling connection
++				 * that uses conntrack while it is still
++				 * referenced by controlled connection(s).
++				 */
++				resched = !uses_ct;
++			}
++		}
++
++		if (resched) {
++			if (!atomic_read(&cp->n_control))
++				ip_vs_conn_expire_now(cp);
++			__ip_vs_conn_put(cp);
++			if (uses_ct)
++				return NF_DROP;
++			cp = NULL;
++		}
+ 	}
+ 
+ 	if (unlikely(!cp) && !iph.fragoffs) {
+diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
+index bed5f7042529..bb318e4623a3 100644
+--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
+@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
+ 	dptr = skb->data + dataoff;
+ 	datalen = skb->len - dataoff;
+ 
+-	if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen))
++	if (get_callid(dptr, 0, datalen, &matchoff, &matchlen))
+ 		return -EINVAL;
+ 
+ 	/* N.B: pe_data is only set on success,
+diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
+index 51a459c3c649..4b850c639ac5 100644
+--- a/net/netfilter/x_tables.c
++++ b/net/netfilter/x_tables.c
+@@ -418,6 +418,47 @@ int xt_check_match(struct xt_mtchk_param *par,
+ }
+ EXPORT_SYMBOL_GPL(xt_check_match);
+ 
++/** xt_check_entry_match - check that matches end before start of target
++ *
++ * @match: beginning of xt_entry_match
++ * @target: beginning of this rules target (alleged end of matches)
++ * @alignment: alignment requirement of match structures
++ *
++ * Validates that all matches add up to the beginning of the target,
++ * and that each match covers at least the base structure size.
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++static int xt_check_entry_match(const char *match, const char *target,
++				const size_t alignment)
++{
++	const struct xt_entry_match *pos;
++	int length = target - match;
++
++	if (length == 0) /* no matches */
++		return 0;
++
++	pos = (struct xt_entry_match *)match;
++	do {
++		if ((unsigned long)pos % alignment)
++			return -EINVAL;
++
++		if (length < (int)sizeof(struct xt_entry_match))
++			return -EINVAL;
++
++		if (pos->u.match_size < sizeof(struct xt_entry_match))
++			return -EINVAL;
++
++		if (pos->u.match_size > length)
++			return -EINVAL;
++
++		length -= pos->u.match_size;
++		pos = ((void *)((char *)(pos) + (pos)->u.match_size));
++	} while (length > 0);
++
++	return 0;
++}
++
+ #ifdef CONFIG_COMPAT
+ int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
+ {
+@@ -487,13 +528,14 @@ int xt_compat_match_offset(const struct xt_match *match)
+ }
+ EXPORT_SYMBOL_GPL(xt_compat_match_offset);
+ 
+-int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+-			      unsigned int *size)
++void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
++			       unsigned int *size)
+ {
+ 	const struct xt_match *match = m->u.kernel.match;
+ 	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
+ 	int pad, off = xt_compat_match_offset(match);
+ 	u_int16_t msize = cm->u.user.match_size;
++	char name[sizeof(m->u.user.name)];
+ 
+ 	m = *dstptr;
+ 	memcpy(m, cm, sizeof(*cm));
+@@ -507,10 +549,12 @@ int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
+ 
+ 	msize += off;
+ 	m->u.user.match_size = msize;
++	strlcpy(name, match->name, sizeof(name));
++	module_put(match->me);
++	strncpy(m->u.user.name, name, sizeof(m->u.user.name));
+ 
+ 	*size += off;
+ 	*dstptr += msize;
+-	return 0;
+ }
+ EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
+ 
+@@ -541,8 +585,125 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
++
++/* non-compat version may have padding after verdict */
++struct compat_xt_standard_target {
++	struct compat_xt_entry_target t;
++	compat_uint_t verdict;
++};
++
++int xt_compat_check_entry_offsets(const void *base, const char *elems,
++				  unsigned int target_offset,
++				  unsigned int next_offset)
++{
++	long size_of_base_struct = elems - (const char *)base;
++	const struct compat_xt_entry_target *t;
++	const char *e = base;
++
++	if (target_offset < size_of_base_struct)
++		return -EINVAL;
++
++	if (target_offset + sizeof(*t) > next_offset)
++		return -EINVAL;
++
++	t = (void *)(e + target_offset);
++	if (t->u.target_size < sizeof(*t))
++		return -EINVAL;
++
++	if (target_offset + t->u.target_size > next_offset)
++		return -EINVAL;
++
++	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
++	    COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset)
++		return -EINVAL;
++
++	/* compat_xt_entry match has less strict aligment requirements,
++	 * otherwise they are identical.  In case of padding differences
++	 * we need to add compat version of xt_check_entry_match.
++	 */
++	BUILD_BUG_ON(sizeof(struct compat_xt_entry_match) != sizeof(struct xt_entry_match));
++
++	return xt_check_entry_match(elems, base + target_offset,
++				    __alignof__(struct compat_xt_entry_match));
++}
++EXPORT_SYMBOL(xt_compat_check_entry_offsets);
+ #endif /* CONFIG_COMPAT */
+ 
++/**
++ * xt_check_entry_offsets - validate arp/ip/ip6t_entry
++ *
++ * @base: pointer to arp/ip/ip6t_entry
++ * @elems: pointer to first xt_entry_match, i.e. ip(6)t_entry->elems
++ * @target_offset: the arp/ip/ip6_t->target_offset
++ * @next_offset: the arp/ip/ip6_t->next_offset
++ *
++ * validates that target_offset and next_offset are sane and that all
++ * match sizes (if any) align with the target offset.
++ *
++ * This function does not validate the targets or matches themselves, it
++ * only tests that all the offsets and sizes are correct, that all
++ * match structures are aligned, and that the last structure ends where
++ * the target structure begins.
++ *
++ * Also see xt_compat_check_entry_offsets for CONFIG_COMPAT version.
++ *
++ * The arp/ip/ip6t_entry structure @base must have passed following tests:
++ * - it must point to a valid memory location
++ * - base to base + next_offset must be accessible, i.e. not exceed allocated
++ *   length.
++ *
++ * A well-formed entry looks like this:
++ *
++ * ip(6)t_entry   match [mtdata]  match [mtdata] target [tgdata] ip(6)t_entry
++ * e->elems[]-----'                              |               |
++ *                matchsize                      |               |
++ *                                matchsize      |               |
++ *                                               |               |
++ * target_offset---------------------------------'               |
++ * next_offset---------------------------------------------------'
++ *
++ * elems[]: flexible array member at end of ip(6)/arpt_entry struct.
++ *          This is where matches (if any) and the target reside.
++ * target_offset: beginning of target.
++ * next_offset: start of the next rule; also: size of this rule.
++ * Since targets have a minimum size, target_offset + minlen <= next_offset.
++ *
++ * Every match stores its size, sum of sizes must not exceed target_offset.
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++int xt_check_entry_offsets(const void *base,
++			   const char *elems,
++			   unsigned int target_offset,
++			   unsigned int next_offset)
++{
++	long size_of_base_struct = elems - (const char *)base;
++	const struct xt_entry_target *t;
++	const char *e = base;
++
++	/* target start is within the ip/ip6/arpt_entry struct */
++	if (target_offset < size_of_base_struct)
++		return -EINVAL;
++
++	if (target_offset + sizeof(*t) > next_offset)
++		return -EINVAL;
++
++	t = (void *)(e + target_offset);
++	if (t->u.target_size < sizeof(*t))
++		return -EINVAL;
++
++	if (target_offset + t->u.target_size > next_offset)
++		return -EINVAL;
++
++	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 &&
++	    XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset)
++		return -EINVAL;
++
++	return xt_check_entry_match(elems, base + target_offset,
++				    __alignof__(struct xt_entry_match));
++}
++EXPORT_SYMBOL(xt_check_entry_offsets);
++
+ int xt_check_target(struct xt_tgchk_param *par,
+ 		    unsigned int size, u_int8_t proto, bool inv_proto)
+ {
+@@ -593,6 +754,80 @@ int xt_check_target(struct xt_tgchk_param *par,
+ }
+ EXPORT_SYMBOL_GPL(xt_check_target);
+ 
++/**
++ * xt_copy_counters_from_user - copy counters and metadata from userspace
++ *
++ * @user: src pointer to userspace memory
++ * @len: alleged size of userspace memory
++ * @info: where to store the xt_counters_info metadata
++ * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
++ *
++ * Copies counter meta data from @user and stores it in @info.
++ *
++ * vmallocs memory to hold the counters, then copies the counter data
++ * from @user to the new memory and returns a pointer to it.
++ *
++ * If @compat is true, @info gets converted automatically to the 64bit
++ * representation.
++ *
++ * The metadata associated with the counters is stored in @info.
++ *
++ * Return: returns pointer that caller has to test via IS_ERR().
++ * If IS_ERR is false, caller has to vfree the pointer.
++ */
++void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
++				 struct xt_counters_info *info, bool compat)
++{
++	void *mem;
++	u64 size;
++
++#ifdef CONFIG_COMPAT
++	if (compat) {
++		/* structures only differ in size due to alignment */
++		struct compat_xt_counters_info compat_tmp;
++
++		if (len <= sizeof(compat_tmp))
++			return ERR_PTR(-EINVAL);
++
++		len -= sizeof(compat_tmp);
++		if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
++			return ERR_PTR(-EFAULT);
++
++		strlcpy(info->name, compat_tmp.name, sizeof(info->name));
++		info->num_counters = compat_tmp.num_counters;
++		user += sizeof(compat_tmp);
++	} else
++#endif
++	{
++		if (len <= sizeof(*info))
++			return ERR_PTR(-EINVAL);
++
++		len -= sizeof(*info);
++		if (copy_from_user(info, user, sizeof(*info)) != 0)
++			return ERR_PTR(-EFAULT);
++
++		info->name[sizeof(info->name) - 1] = '\0';
++		user += sizeof(*info);
++	}
++
++	size = sizeof(struct xt_counters);
++	size *= info->num_counters;
++
++	if (size != (u64)len)
++		return ERR_PTR(-EINVAL);
++
++	mem = vmalloc(len);
++	if (!mem)
++		return ERR_PTR(-ENOMEM);
++
++	if (copy_from_user(mem, user, len) == 0)
++		return mem;
++
++	vfree(mem);
++	return ERR_PTR(-EFAULT);
++}
++EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
++
+ #ifdef CONFIG_COMPAT
+ int xt_compat_target_offset(const struct xt_target *target)
+ {
+@@ -608,6 +843,7 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ 	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
+ 	int pad, off = xt_compat_target_offset(target);
+ 	u_int16_t tsize = ct->u.user.target_size;
++	char name[sizeof(t->u.user.name)];
+ 
+ 	t = *dstptr;
+ 	memcpy(t, ct, sizeof(*ct));
+@@ -621,6 +857,9 @@ void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
+ 
+ 	tsize += off;
+ 	t->u.user.target_size = tsize;
++	strlcpy(name, target->name, sizeof(name));
++	module_put(target->me);
++	strncpy(t->u.user.name, name, sizeof(t->u.user.name));
+ 
+ 	*size += off;
+ 	*dstptr += tsize;
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 0c29986ecd87..dbc32b19c574 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2683,6 +2683,7 @@ static int netlink_dump(struct sock *sk)
+ 	struct netlink_callback *cb;
+ 	struct sk_buff *skb = NULL;
+ 	struct nlmsghdr *nlh;
++	struct module *module;
+ 	int len, err = -ENOBUFS;
+ 	int alloc_min_size;
+ 	int alloc_size;
+@@ -2762,9 +2763,11 @@ static int netlink_dump(struct sock *sk)
+ 		cb->done(cb);
+ 
+ 	nlk->cb_running = false;
++	module = cb->module;
++	skb = cb->skb;
+ 	mutex_unlock(nlk->cb_mutex);
+-	module_put(cb->module);
+-	consume_skb(cb->skb);
++	module_put(module);
++	consume_skb(skb);
+ 	return 0;
+ 
+ errout_skb:
+diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
+index b491c1c296fe..9920f7502f6d 100644
+--- a/net/openvswitch/actions.c
++++ b/net/openvswitch/actions.c
+@@ -441,7 +441,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ 		mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
+ 
+ 		if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
+-			set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
++			set_ipv6_addr(skb, flow_key->ip.proto, saddr, masked,
+ 				      true);
+ 			memcpy(&flow_key->ipv6.addr.src, masked,
+ 			       sizeof(flow_key->ipv6.addr.src));
+@@ -463,7 +463,7 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ 							     NULL, &flags)
+ 					       != NEXTHDR_ROUTING);
+ 
+-			set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
++			set_ipv6_addr(skb, flow_key->ip.proto, daddr, masked,
+ 				      recalc_csum);
+ 			memcpy(&flow_key->ipv6.addr.dst, masked,
+ 			       sizeof(flow_key->ipv6.addr.dst));
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index ebc39e66d704..a3654d929814 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1699,6 +1699,10 @@ retry:
+ 		goto retry;
+ 	}
+ 
++	if (!dev_validate_header(dev, skb->data, len)) {
++		err = -EINVAL;
++		goto out_unlock;
++	}
+ 	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
+ 	    !packet_extra_vlan_len_allowed(dev, skb)) {
+ 		err = -EMSGSIZE;
+@@ -2109,18 +2113,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
+ 	sock_wfree(skb);
+ }
+ 
+-static bool ll_header_truncated(const struct net_device *dev, int len)
+-{
+-	/* net device doesn't like empty head */
+-	if (unlikely(len <= dev->hard_header_len)) {
+-		net_warn_ratelimited("%s: packet size is too short (%d <= %d)\n",
+-				     current->comm, len, dev->hard_header_len);
+-		return true;
+-	}
+-
+-	return false;
+-}
+-
+ static void tpacket_set_protocol(const struct net_device *dev,
+ 				 struct sk_buff *skb)
+ {
+@@ -2203,19 +2195,19 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 		if (unlikely(err < 0))
+ 			return -EINVAL;
+ 	} else if (dev->hard_header_len) {
+-		if (ll_header_truncated(dev, tp_len))
+-			return -EINVAL;
++		int hdrlen = min_t(int, dev->hard_header_len, tp_len);
+ 
+ 		skb_push(skb, dev->hard_header_len);
+-		err = skb_store_bits(skb, 0, data,
+-				dev->hard_header_len);
++		err = skb_store_bits(skb, 0, data, hdrlen);
+ 		if (unlikely(err))
+ 			return err;
++		if (!dev_validate_header(dev, skb->data, hdrlen))
++			return -EINVAL;
+ 		if (!skb->protocol)
+ 			tpacket_set_protocol(dev, skb);
+ 
+-		data += dev->hard_header_len;
+-		to_write -= dev->hard_header_len;
++		data += hdrlen;
++		to_write -= hdrlen;
+ 	}
+ 
+ 	offset = offset_in_page(data);
+@@ -2538,9 +2530,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
+ 		if (unlikely(offset < 0))
+ 			goto out_free;
+-	} else {
+-		if (ll_header_truncated(dev, len))
+-			goto out_free;
+ 	}
+ 
+ 	/* Returns -EFAULT on error */
+@@ -2548,6 +2537,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 	if (err)
+ 		goto out_free;
+ 
++	if (sock->type == SOCK_RAW &&
++	    !dev_validate_header(dev, skb->data, len)) {
++		err = -EINVAL;
++		goto out_free;
++	}
++
+ 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+ 
+ 	if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
+@@ -3212,6 +3207,7 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
+ 	i->ifindex = mreq->mr_ifindex;
+ 	i->alen = mreq->mr_alen;
+ 	memcpy(i->addr, mreq->mr_address, i->alen);
++	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
+ 	i->count = 1;
+ 	i->next = po->mclist;
+ 	po->mclist = i;
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 68c599a5e1d1..c244a49ae4ac 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -744,14 +744,15 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
+ 	return 0;
+ }
+ 
+-void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
++void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
++			       unsigned int len)
+ {
+ 	const struct Qdisc_class_ops *cops;
+ 	unsigned long cl;
+ 	u32 parentid;
+ 	int drops;
+ 
+-	if (n == 0)
++	if (n == 0 && len == 0)
+ 		return;
+ 	drops = max_t(int, n, 0);
+ 	rcu_read_lock();
+@@ -774,11 +775,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+ 			cops->put(sch, cl);
+ 		}
+ 		sch->q.qlen -= n;
++		sch->qstats.backlog -= len;
+ 		__qdisc_qstats_drop(sch, drops);
+ 	}
+ 	rcu_read_unlock();
+ }
+-EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
++EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
+ 
+ static void notify_and_destroy(struct net *net, struct sk_buff *skb,
+ 			       struct nlmsghdr *n, u32 clid,
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index beeb75f80fdb..f6e7a60012b1 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1624,13 +1624,8 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 			new->reshape_fail = cbq_reshape_fail;
+ #endif
+ 	}
+-	sch_tree_lock(sch);
+-	*old = cl->q;
+-	cl->q = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+ 
++	*old = qdisc_replace(sch, new, &cl->q);
+ 	return 0;
+ }
+ 
+@@ -1914,7 +1909,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
+ {
+ 	struct cbq_sched_data *q = qdisc_priv(sch);
+ 	struct cbq_class *cl = (struct cbq_class *)arg;
+-	unsigned int qlen;
++	unsigned int qlen, backlog;
+ 
+ 	if (cl->filters || cl->children || cl == &q->link)
+ 		return -EBUSY;
+@@ -1922,8 +1917,9 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
+ 	sch_tree_lock(sch);
+ 
+ 	qlen = cl->q->q.qlen;
++	backlog = cl->q->qstats.backlog;
+ 	qdisc_reset(cl->q);
+-	qdisc_tree_decrease_qlen(cl->q, qlen);
++	qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
+ 
+ 	if (cl->next_alive)
+ 		cbq_deactivate_class(cl);
+diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
+index c009eb9045ce..3f6437db9b0f 100644
+--- a/net/sched/sch_choke.c
++++ b/net/sched/sch_choke.c
+@@ -128,8 +128,8 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
+ 		choke_zap_tail_holes(q);
+ 
+ 	qdisc_qstats_backlog_dec(sch, skb);
++	qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
+ 	qdisc_drop(skb, sch);
+-	qdisc_tree_decrease_qlen(sch, 1);
+ 	--sch->q.qlen;
+ }
+ 
+@@ -449,6 +449,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+ 		old = q->tab;
+ 		if (old) {
+ 			unsigned int oqlen = sch->q.qlen, tail = 0;
++			unsigned dropped = 0;
+ 
+ 			while (q->head != q->tail) {
+ 				struct sk_buff *skb = q->tab[q->head];
+@@ -460,11 +461,12 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
+ 					ntab[tail++] = skb;
+ 					continue;
+ 				}
++				dropped += qdisc_pkt_len(skb);
+ 				qdisc_qstats_backlog_dec(sch, skb);
+ 				--sch->q.qlen;
+ 				qdisc_drop(skb, sch);
+ 			}
+-			qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
++			qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
+ 			q->head = 0;
+ 			q->tail = tail;
+ 		}
+diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
+index 7a0bdb16ac92..9a9068d00833 100644
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -79,12 +79,13 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
+ 
+ 	skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
+ 
+-	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
++	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+ 	 * or HTB crashes. Defer it for next round.
+ 	 */
+ 	if (q->stats.drop_count && sch->q.qlen) {
+-		qdisc_tree_decrease_qlen(sch, q->stats.drop_count);
++		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
+ 		q->stats.drop_count = 0;
++		q->stats.drop_len = 0;
+ 	}
+ 	if (skb)
+ 		qdisc_bstats_update(sch, skb);
+@@ -115,7 +116,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+ {
+ 	struct codel_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_CODEL_MAX + 1];
+-	unsigned int qlen;
++	unsigned int qlen, dropped = 0;
+ 	int err;
+ 
+ 	if (!opt)
+@@ -149,10 +150,11 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = __skb_dequeue(&sch->q);
+ 
++		dropped += qdisc_pkt_len(skb);
+ 		qdisc_qstats_backlog_dec(sch, skb);
+ 		qdisc_drop(skb, sch);
+ 	}
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
+index 338706092c27..e599803caa1e 100644
+--- a/net/sched/sch_drr.c
++++ b/net/sched/sch_drr.c
+@@ -53,9 +53,10 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
+ static void drr_purge_queue(struct drr_class *cl)
+ {
+ 	unsigned int len = cl->qdisc->q.qlen;
++	unsigned int backlog = cl->qdisc->qstats.backlog;
+ 
+ 	qdisc_reset(cl->qdisc);
+-	qdisc_tree_decrease_qlen(cl->qdisc, len);
++	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ }
+ 
+ static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
+@@ -226,11 +227,7 @@ static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	drr_purge_queue(cl);
+-	*old = cl->qdisc;
+-	cl->qdisc = new;
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index 66700a6116aa..7288dda2a7fb 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -67,13 +67,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	*old = p->q;
+-	p->q = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &p->q);
+ 	return 0;
+ }
+ 
+@@ -262,6 +256,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		return err;
+ 	}
+ 
++	qdisc_qstats_backlog_inc(sch, skb);
+ 	sch->q.qlen++;
+ 
+ 	return NET_XMIT_SUCCESS;
+@@ -284,6 +279,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
+ 		return NULL;
+ 
+ 	qdisc_bstats_update(sch, skb);
++	qdisc_qstats_backlog_dec(sch, skb);
+ 	sch->q.qlen--;
+ 
+ 	index = skb->tc_index & (p->indices - 1);
+@@ -399,6 +395,7 @@ static void dsmark_reset(struct Qdisc *sch)
+ 
+ 	pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+ 	qdisc_reset(p->q);
++	sch->qstats.backlog = 0;
+ 	sch->q.qlen = 0;
+ }
+ 
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index f377702d4b91..4816778566d3 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -659,6 +659,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+ 	struct fq_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_FQ_MAX + 1];
+ 	int err, drop_count = 0;
++	unsigned drop_len = 0;
+ 	u32 fq_log;
+ 
+ 	if (!opt)
+@@ -733,10 +734,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 		if (!skb)
+ 			break;
++		drop_len += qdisc_pkt_len(skb);
+ 		kfree_skb(skb);
+ 		drop_count++;
+ 	}
+-	qdisc_tree_decrease_qlen(sch, drop_count);
++	qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
+ 
+ 	sch_tree_unlock(sch);
+ 	return err;
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index 9291598b5aad..96971c7ab228 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -173,7 +173,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
+ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+ 	struct fq_codel_sched_data *q = qdisc_priv(sch);
+-	unsigned int idx;
++	unsigned int idx, prev_backlog;
+ 	struct fq_codel_flow *flow;
+ 	int uninitialized_var(ret);
+ 
+@@ -201,6 +201,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	if (++sch->q.qlen <= sch->limit)
+ 		return NET_XMIT_SUCCESS;
+ 
++	prev_backlog = sch->qstats.backlog;
+ 	q->drop_overlimit++;
+ 	/* Return Congestion Notification only if we dropped a packet
+ 	 * from this flow.
+@@ -209,7 +210,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		return NET_XMIT_CN;
+ 
+ 	/* As we dropped a packet, better let upper stack know this */
+-	qdisc_tree_decrease_qlen(sch, 1);
++	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -239,6 +240,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
+ 	struct fq_codel_flow *flow;
+ 	struct list_head *head;
+ 	u32 prev_drop_count, prev_ecn_mark;
++	unsigned int prev_backlog;
+ 
+ begin:
+ 	head = &q->new_flows;
+@@ -257,6 +259,7 @@ begin:
+ 
+ 	prev_drop_count = q->cstats.drop_count;
+ 	prev_ecn_mark = q->cstats.ecn_mark;
++	prev_backlog = sch->qstats.backlog;
+ 
+ 	skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
+ 			    dequeue);
+@@ -274,12 +277,14 @@ begin:
+ 	}
+ 	qdisc_bstats_update(sch, skb);
+ 	flow->deficit -= qdisc_pkt_len(skb);
+-	/* We cant call qdisc_tree_decrease_qlen() if our qlen is 0,
++	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
+ 	 * or HTB crashes. Defer it for next round.
+ 	 */
+ 	if (q->cstats.drop_count && sch->q.qlen) {
+-		qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
++		qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
++					  q->cstats.drop_len);
+ 		q->cstats.drop_count = 0;
++		q->cstats.drop_len = 0;
+ 	}
+ 	return skb;
+ }
+@@ -347,11 +352,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = fq_codel_dequeue(sch);
+ 
++		q->cstats.drop_len += qdisc_pkt_len(skb);
+ 		kfree_skb(skb);
+ 		q->cstats.drop_count++;
+ 	}
+-	qdisc_tree_decrease_qlen(sch, q->cstats.drop_count);
++	qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
+ 	q->cstats.drop_count = 0;
++	q->cstats.drop_len = 0;
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 3c6f6b774ba6..9821e6d641bb 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -159,12 +159,15 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ 	if (validate)
+ 		skb = validate_xmit_skb_list(skb, dev);
+ 
+-	if (skb) {
++	if (likely(skb)) {
+ 		HARD_TX_LOCK(dev, txq, smp_processor_id());
+ 		if (!netif_xmit_frozen_or_stopped(txq))
+ 			skb = dev_hard_start_xmit(skb, dev, txq, &ret);
+ 
+ 		HARD_TX_UNLOCK(dev, txq);
++	} else {
++		spin_lock(root_lock);
++		return qdisc_qlen(q);
+ 	}
+ 	spin_lock(root_lock);
+ 
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index e6c7416d0332..d3e21dac8b40 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -895,9 +895,10 @@ static void
+ hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
+ {
+ 	unsigned int len = cl->qdisc->q.qlen;
++	unsigned int backlog = cl->qdisc->qstats.backlog;
+ 
+ 	qdisc_reset(cl->qdisc);
+-	qdisc_tree_decrease_qlen(cl->qdisc, len);
++	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ }
+ 
+ static void
+@@ -1215,11 +1216,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	hfsc_purge_queue(sch, cl);
+-	*old = cl->qdisc;
+-	cl->qdisc = new;
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
+index 15d3aabfe250..792c6f330f77 100644
+--- a/net/sched/sch_hhf.c
++++ b/net/sched/sch_hhf.c
+@@ -390,6 +390,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	struct hhf_sched_data *q = qdisc_priv(sch);
+ 	enum wdrr_bucket_idx idx;
+ 	struct wdrr_bucket *bucket;
++	unsigned int prev_backlog;
+ 
+ 	idx = hhf_classify(skb, sch);
+ 
+@@ -417,6 +418,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	if (++sch->q.qlen <= sch->limit)
+ 		return NET_XMIT_SUCCESS;
+ 
++	prev_backlog = sch->qstats.backlog;
+ 	q->drop_overlimit++;
+ 	/* Return Congestion Notification only if we dropped a packet from this
+ 	 * bucket.
+@@ -425,7 +427,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		return NET_XMIT_CN;
+ 
+ 	/* As we dropped a packet, better let upper stack know this. */
+-	qdisc_tree_decrease_qlen(sch, 1);
++	qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -535,7 +537,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+ {
+ 	struct hhf_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_HHF_MAX + 1];
+-	unsigned int qlen;
++	unsigned int qlen, prev_backlog;
+ 	int err;
+ 	u64 non_hh_quantum;
+ 	u32 new_quantum = q->quantum;
+@@ -585,12 +587,14 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt)
+ 	}
+ 
+ 	qlen = sch->q.qlen;
++	prev_backlog = sch->qstats.backlog;
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = hhf_dequeue(sch);
+ 
+ 		kfree_skb(skb);
+ 	}
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
++				  prev_backlog - sch->qstats.backlog);
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index f1acb0f60dc3..ccff00640713 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -600,6 +600,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		htb_activate(q, cl);
+ 	}
+ 
++	qdisc_qstats_backlog_inc(sch, skb);
+ 	sch->q.qlen++;
+ 	return NET_XMIT_SUCCESS;
+ }
+@@ -889,6 +890,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
+ ok:
+ 		qdisc_bstats_update(sch, skb);
+ 		qdisc_unthrottled(sch);
++		qdisc_qstats_backlog_dec(sch, skb);
+ 		sch->q.qlen--;
+ 		return skb;
+ 	}
+@@ -955,6 +957,7 @@ static unsigned int htb_drop(struct Qdisc *sch)
+ 			unsigned int len;
+ 			if (cl->un.leaf.q->ops->drop &&
+ 			    (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
++				sch->qstats.backlog -= len;
+ 				sch->q.qlen--;
+ 				if (!cl->un.leaf.q->q.qlen)
+ 					htb_deactivate(q, cl);
+@@ -984,12 +987,12 @@ static void htb_reset(struct Qdisc *sch)
+ 			}
+ 			cl->prio_activity = 0;
+ 			cl->cmode = HTB_CAN_SEND;
+-
+ 		}
+ 	}
+ 	qdisc_watchdog_cancel(&q->watchdog);
+ 	__skb_queue_purge(&q->direct_queue);
+ 	sch->q.qlen = 0;
++	sch->qstats.backlog = 0;
+ 	memset(q->hlevel, 0, sizeof(q->hlevel));
+ 	memset(q->row_mask, 0, sizeof(q->row_mask));
+ 	for (i = 0; i < TC_HTB_NUMPRIO; i++)
+@@ -1165,14 +1168,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 				     cl->common.classid)) == NULL)
+ 		return -ENOBUFS;
+ 
+-	sch_tree_lock(sch);
+-	*old = cl->un.leaf.q;
+-	cl->un.leaf.q = new;
+-	if (*old != NULL) {
+-		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-		qdisc_reset(*old);
+-	}
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->un.leaf.q);
+ 	return 0;
+ }
+ 
+@@ -1274,7 +1270,6 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
+ {
+ 	struct htb_sched *q = qdisc_priv(sch);
+ 	struct htb_class *cl = (struct htb_class *)arg;
+-	unsigned int qlen;
+ 	struct Qdisc *new_q = NULL;
+ 	int last_child = 0;
+ 
+@@ -1294,9 +1289,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
+ 	sch_tree_lock(sch);
+ 
+ 	if (!cl->level) {
+-		qlen = cl->un.leaf.q->q.qlen;
++		unsigned int qlen = cl->un.leaf.q->q.qlen;
++		unsigned int backlog = cl->un.leaf.q->qstats.backlog;
++
+ 		qdisc_reset(cl->un.leaf.q);
+-		qdisc_tree_decrease_qlen(cl->un.leaf.q, qlen);
++		qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
+ 	}
+ 
+ 	/* delete from hash and active; remainder in destroy_class */
+@@ -1430,10 +1427,11 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
+ 		sch_tree_lock(sch);
+ 		if (parent && !parent->level) {
+ 			unsigned int qlen = parent->un.leaf.q->q.qlen;
++			unsigned int backlog = parent->un.leaf.q->qstats.backlog;
+ 
+ 			/* turn parent into inner node */
+ 			qdisc_reset(parent->un.leaf.q);
+-			qdisc_tree_decrease_qlen(parent->un.leaf.q, qlen);
++			qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
+ 			qdisc_destroy(parent->un.leaf.q);
+ 			if (parent->prio_activity)
+ 				htb_deactivate(q, parent);
+diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
+index 42dd218871e0..23437d62a8db 100644
+--- a/net/sched/sch_multiq.c
++++ b/net/sched/sch_multiq.c
+@@ -218,7 +218,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+ 		if (q->queues[i] != &noop_qdisc) {
+ 			struct Qdisc *child = q->queues[i];
+ 			q->queues[i] = &noop_qdisc;
+-			qdisc_tree_decrease_qlen(child, child->q.qlen);
++			qdisc_tree_reduce_backlog(child, child->q.qlen,
++						  child->qstats.backlog);
+ 			qdisc_destroy(child);
+ 		}
+ 	}
+@@ -238,8 +239,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
+ 				q->queues[i] = child;
+ 
+ 				if (old != &noop_qdisc) {
+-					qdisc_tree_decrease_qlen(old,
+-								 old->q.qlen);
++					qdisc_tree_reduce_backlog(old,
++								  old->q.qlen,
++								  old->qstats.backlog);
+ 					qdisc_destroy(old);
+ 				}
+ 				sch_tree_unlock(sch);
+@@ -303,13 +305,7 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->queues[band];
+-	q->queues[band] = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->queues[band]);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 956ead2cab9a..80124c1edbba 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+ 	sch->q.qlen++;
+ }
+ 
++/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
++ * when we statistically choose to corrupt one, we instead segment it, returning
++ * the first packet to be corrupted, and re-enqueue the remaining frames
++ */
++static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
++{
++	struct sk_buff *segs;
++	netdev_features_t features = netif_skb_features(skb);
++
++	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
++
++	if (IS_ERR_OR_NULL(segs)) {
++		qdisc_reshape_fail(skb, sch);
++		return NULL;
++	}
++	consume_skb(skb);
++	return segs;
++}
++
+ /*
+  * Insert one skb into qdisc.
+  * Note: parent depends on return value to account for queue length.
+@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	/* We don't fill cb now as skb_unshare() may invalidate it */
+ 	struct netem_skb_cb *cb;
+ 	struct sk_buff *skb2;
++	struct sk_buff *segs = NULL;
++	unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
++	int nb = 0;
+ 	int count = 1;
++	int rc = NET_XMIT_SUCCESS;
+ 
+ 	/* Random duplication */
+ 	if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
+@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 	 * do it now in software before we mangle it.
+ 	 */
+ 	if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
++		if (skb_is_gso(skb)) {
++			segs = netem_segment(skb, sch);
++			if (!segs)
++				return NET_XMIT_DROP;
++		} else {
++			segs = skb;
++		}
++
++		skb = segs;
++		segs = segs->next;
++
+ 		if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
+ 		    (skb->ip_summed == CHECKSUM_PARTIAL &&
+-		     skb_checksum_help(skb)))
+-			return qdisc_drop(skb, sch);
++		     skb_checksum_help(skb))) {
++			rc = qdisc_drop(skb, sch);
++			goto finish_segs;
++		}
+ 
+ 		skb->data[prandom_u32() % skb_headlen(skb)] ^=
+ 			1<<(prandom_u32() % 8);
+@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ 		sch->qstats.requeues++;
+ 	}
+ 
++finish_segs:
++	if (segs) {
++		while (segs) {
++			skb2 = segs->next;
++			segs->next = NULL;
++			qdisc_skb_cb(segs)->pkt_len = segs->len;
++			last_len = segs->len;
++			rc = qdisc_enqueue(segs, sch);
++			if (rc != NET_XMIT_SUCCESS) {
++				if (net_xmit_drop_count(rc))
++					qdisc_qstats_drop(sch);
++			} else {
++				nb++;
++				len += last_len;
++			}
++			segs = skb2;
++		}
++		sch->q.qlen += nb;
++		if (nb > 1)
++			qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
++	}
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -598,7 +655,8 @@ deliver:
+ 				if (unlikely(err != NET_XMIT_SUCCESS)) {
+ 					if (net_xmit_drop_count(err)) {
+ 						qdisc_qstats_drop(sch);
+-						qdisc_tree_decrease_qlen(sch, 1);
++						qdisc_tree_reduce_backlog(sch, 1,
++									  qdisc_pkt_len(skb));
+ 					}
+ 				}
+ 				goto tfifo_dequeue;
+@@ -1037,15 +1095,7 @@ static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ {
+ 	struct netem_sched_data *q = qdisc_priv(sch);
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	if (*old) {
+-		qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-		qdisc_reset(*old);
+-	}
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
+index b783a446d884..71ae3b9629f9 100644
+--- a/net/sched/sch_pie.c
++++ b/net/sched/sch_pie.c
+@@ -183,7 +183,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+ {
+ 	struct pie_sched_data *q = qdisc_priv(sch);
+ 	struct nlattr *tb[TCA_PIE_MAX + 1];
+-	unsigned int qlen;
++	unsigned int qlen, dropped = 0;
+ 	int err;
+ 
+ 	if (!opt)
+@@ -232,10 +232,11 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
+ 	while (sch->q.qlen > sch->limit) {
+ 		struct sk_buff *skb = __skb_dequeue(&sch->q);
+ 
++		dropped += qdisc_pkt_len(skb);
+ 		qdisc_qstats_backlog_dec(sch, skb);
+ 		qdisc_drop(skb, sch);
+ 	}
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ 
+ 	sch_tree_unlock(sch);
+ 	return 0;
+diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
+index 8e5cd34aaa74..e671b1a4e815 100644
+--- a/net/sched/sch_prio.c
++++ b/net/sched/sch_prio.c
+@@ -191,7 +191,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+ 		struct Qdisc *child = q->queues[i];
+ 		q->queues[i] = &noop_qdisc;
+ 		if (child != &noop_qdisc) {
+-			qdisc_tree_decrease_qlen(child, child->q.qlen);
++			qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
+ 			qdisc_destroy(child);
+ 		}
+ 	}
+@@ -210,8 +210,9 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
+ 				q->queues[i] = child;
+ 
+ 				if (old != &noop_qdisc) {
+-					qdisc_tree_decrease_qlen(old,
+-								 old->q.qlen);
++					qdisc_tree_reduce_backlog(old,
++								  old->q.qlen,
++								  old->qstats.backlog);
+ 					qdisc_destroy(old);
+ 				}
+ 				sch_tree_unlock(sch);
+@@ -268,13 +269,7 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->queues[band];
+-	q->queues[band] = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->queues[band]);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index 3ec7e88a43ca..e2b8fd47008b 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -221,9 +221,10 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
+ static void qfq_purge_queue(struct qfq_class *cl)
+ {
+ 	unsigned int len = cl->qdisc->q.qlen;
++	unsigned int backlog = cl->qdisc->qstats.backlog;
+ 
+ 	qdisc_reset(cl->qdisc);
+-	qdisc_tree_decrease_qlen(cl->qdisc, len);
++	qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
+ }
+ 
+ static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
+@@ -619,11 +620,7 @@ static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
+ 			new = &noop_qdisc;
+ 	}
+ 
+-	sch_tree_lock(sch);
+-	qfq_purge_queue(cl);
+-	*old = cl->qdisc;
+-	cl->qdisc = new;
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &cl->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index 6c0534cc7758..8c0508c0e287 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -210,7 +210,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
+ 	q->flags = ctl->flags;
+ 	q->limit = ctl->limit;
+ 	if (child) {
+-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
++		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
++					  q->qdisc->qstats.backlog);
+ 		qdisc_destroy(q->qdisc);
+ 		q->qdisc = child;
+ 	}
+@@ -313,12 +314,7 @@ static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index 5819dd82630d..e1d634e3c255 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -518,7 +518,8 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	sch_tree_lock(sch);
+ 
+-	qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
++	qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
++				  q->qdisc->qstats.backlog);
+ 	qdisc_destroy(q->qdisc);
+ 	q->qdisc = child;
+ 
+@@ -614,12 +615,7 @@ static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
+index b877140beda5..4417fb25166f 100644
+--- a/net/sched/sch_sfq.c
++++ b/net/sched/sch_sfq.c
+@@ -369,7 +369,7 @@ static int
+ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+ {
+ 	struct sfq_sched_data *q = qdisc_priv(sch);
+-	unsigned int hash;
++	unsigned int hash, dropped;
+ 	sfq_index x, qlen;
+ 	struct sfq_slot *slot;
+ 	int uninitialized_var(ret);
+@@ -484,7 +484,7 @@ enqueue:
+ 		return NET_XMIT_SUCCESS;
+ 
+ 	qlen = slot->qlen;
+-	sfq_drop(sch);
++	dropped = sfq_drop(sch);
+ 	/* Return Congestion Notification only if we dropped a packet
+ 	 * from this flow.
+ 	 */
+@@ -492,7 +492,7 @@ enqueue:
+ 		return NET_XMIT_CN;
+ 
+ 	/* As we dropped a packet, better let upper stack know this */
+-	qdisc_tree_decrease_qlen(sch, 1);
++	qdisc_tree_reduce_backlog(sch, 1, dropped);
+ 	return NET_XMIT_SUCCESS;
+ }
+ 
+@@ -560,6 +560,7 @@ static void sfq_rehash(struct Qdisc *sch)
+ 	struct sfq_slot *slot;
+ 	struct sk_buff_head list;
+ 	int dropped = 0;
++	unsigned int drop_len = 0;
+ 
+ 	__skb_queue_head_init(&list);
+ 
+@@ -588,6 +589,7 @@ static void sfq_rehash(struct Qdisc *sch)
+ 			if (x >= SFQ_MAX_FLOWS) {
+ drop:
+ 				qdisc_qstats_backlog_dec(sch, skb);
++				drop_len += qdisc_pkt_len(skb);
+ 				kfree_skb(skb);
+ 				dropped++;
+ 				continue;
+@@ -617,7 +619,7 @@ drop:
+ 		}
+ 	}
+ 	sch->q.qlen -= dropped;
+-	qdisc_tree_decrease_qlen(sch, dropped);
++	qdisc_tree_reduce_backlog(sch, dropped, drop_len);
+ }
+ 
+ static void sfq_perturbation(unsigned long arg)
+@@ -641,7 +643,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+ 	struct sfq_sched_data *q = qdisc_priv(sch);
+ 	struct tc_sfq_qopt *ctl = nla_data(opt);
+ 	struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
+-	unsigned int qlen;
++	unsigned int qlen, dropped = 0;
+ 	struct red_parms *p = NULL;
+ 
+ 	if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
+@@ -690,8 +692,8 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	qlen = sch->q.qlen;
+ 	while (sch->q.qlen > q->limit)
+-		sfq_drop(sch);
+-	qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
++		dropped += sfq_drop(sch);
++	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
+ 
+ 	del_timer(&q->perturb_timer);
+ 	if (q->perturb_period) {
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index a4afde14e865..c2fbde742f37 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -160,6 +160,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+ 	struct tbf_sched_data *q = qdisc_priv(sch);
+ 	struct sk_buff *segs, *nskb;
+ 	netdev_features_t features = netif_skb_features(skb);
++	unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
+ 	int ret, nb;
+ 
+ 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+@@ -172,6 +173,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+ 		nskb = segs->next;
+ 		segs->next = NULL;
+ 		qdisc_skb_cb(segs)->pkt_len = segs->len;
++		len += segs->len;
+ 		ret = qdisc_enqueue(segs, q->qdisc);
+ 		if (ret != NET_XMIT_SUCCESS) {
+ 			if (net_xmit_drop_count(ret))
+@@ -183,7 +185,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
+ 	}
+ 	sch->q.qlen += nb;
+ 	if (nb > 1)
+-		qdisc_tree_decrease_qlen(sch, 1 - nb);
++		qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+ 	consume_skb(skb);
+ 	return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+ }
+@@ -399,7 +401,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
+ 
+ 	sch_tree_lock(sch);
+ 	if (child) {
+-		qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
++		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
++					  q->qdisc->qstats.backlog);
+ 		qdisc_destroy(q->qdisc);
+ 		q->qdisc = child;
+ 	}
+@@ -502,13 +505,7 @@ static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+ 	if (new == NULL)
+ 		new = &noop_qdisc;
+ 
+-	sch_tree_lock(sch);
+-	*old = q->qdisc;
+-	q->qdisc = new;
+-	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+-	qdisc_reset(*old);
+-	sch_tree_unlock(sch);
+-
++	*old = qdisc_replace(sch, new, &q->qdisc);
+ 	return 0;
+ }
+ 
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 3267a5cbb3e8..18361cbfc882 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -519,6 +519,8 @@ static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ 		}
+ 		return 0;
+ 	}
++	if (addr1->v6.sin6_port != addr2->v6.sin6_port)
++		return 0;
+ 	if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
+ 		return 0;
+ 	/* If this is a linklocal address, compare the scope_id. */
+diff --git a/net/socket.c b/net/socket.c
+index dcbfa868e398..e66e4f357506 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -2247,31 +2247,31 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ 			break;
+ 	}
+ 
+-out_put:
+-	fput_light(sock->file, fput_needed);
+-
+ 	if (err == 0)
+-		return datagrams;
++		goto out_put;
+ 
+-	if (datagrams != 0) {
++	if (datagrams == 0) {
++		datagrams = err;
++		goto out_put;
++	}
++
++	/*
++	 * We may return less entries than requested (vlen) if the
++	 * sock is non block and there aren't enough datagrams...
++	 */
++	if (err != -EAGAIN) {
+ 		/*
+-		 * We may return less entries than requested (vlen) if the
+-		 * sock is non block and there aren't enough datagrams...
++		 * ... or  if recvmsg returns an error after we
++		 * received some datagrams, where we record the
++		 * error to return on the next call or if the
++		 * app asks about it using getsockopt(SO_ERROR).
+ 		 */
+-		if (err != -EAGAIN) {
+-			/*
+-			 * ... or  if recvmsg returns an error after we
+-			 * received some datagrams, where we record the
+-			 * error to return on the next call or if the
+-			 * app asks about it using getsockopt(SO_ERROR).
+-			 */
+-			sock->sk->sk_err = -err;
+-		}
+-
+-		return datagrams;
++		sock->sk->sk_err = -err;
+ 	}
++out_put:
++	fput_light(sock->file, fput_needed);
+ 
+-	return err;
++	return datagrams;
+ }
+ 
+ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 8d79e70bd978..9ec709b9707c 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1175,14 +1175,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
+ 	}
+ 
+ 	crq->q.reader = 0;
+-	crq->item = cache_get(h);
+ 	crq->buf = buf;
+ 	crq->len = 0;
+ 	crq->readers = 0;
+ 	spin_lock(&queue_lock);
+-	if (test_bit(CACHE_PENDING, &h->flags))
++	if (test_bit(CACHE_PENDING, &h->flags)) {
++		crq->item = cache_get(h);
+ 		list_add_tail(&crq->q.list, &detail->queue);
+-	else
++	} else
+ 		/* Lost a race, no longer PENDING, so don't enqueue */
+ 		ret = -EAGAIN;
+ 	spin_unlock(&queue_lock);
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index e6ce1517367f..16e831dcfde0 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -442,7 +442,7 @@ out_no_rpciod:
+ 	return ERR_PTR(err);
+ }
+ 
+-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
++static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+ 					struct rpc_xprt *xprt)
+ {
+ 	struct rpc_clnt *clnt = NULL;
+@@ -474,7 +474,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+ 
+ 	return clnt;
+ }
+-EXPORT_SYMBOL_GPL(rpc_create_xprt);
+ 
+ /**
+  * rpc_create - create an RPC client and transport with one call
+@@ -500,6 +499,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
+ 	};
+ 	char servername[48];
+ 
++	if (args->bc_xprt) {
++		WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
++		xprt = args->bc_xprt->xpt_bc_xprt;
++		if (xprt) {
++			xprt_get(xprt);
++			return rpc_create_xprt(args, xprt);
++		}
++	}
++
+ 	if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
+ 		xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
+ 	if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index ce9121e8e990..e5ec86dd8dc1 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -712,7 +712,7 @@ static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
+ 		goto out;
+ 
+ 	tipc_tlv_sprintf(msg->rep, "%-10u %s",
+-			 nla_get_u32(publ[TIPC_NLA_PUBL_REF]),
++			 nla_get_u32(publ[TIPC_NLA_PUBL_KEY]),
+ 			 scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
+ out:
+ 	tipc_tlv_sprintf(msg->rep, "\n");
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 20cc6df07157..d41d424b9913 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2804,6 +2804,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
+ 		if (err)
+ 			return err;
+ 
++		if (!attrs[TIPC_NLA_SOCK])
++			return -EINVAL;
++
+ 		err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
+ 				       attrs[TIPC_NLA_SOCK],
+ 				       tipc_nl_sock_policy);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 535a642a1688..03da879008d7 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -935,32 +935,20 @@ fail:
+ 	return NULL;
+ }
+ 
+-static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
++static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
++		      struct path *res)
+ {
+-	struct dentry *dentry;
+-	struct path path;
+-	int err = 0;
+-	/*
+-	 * Get the parent directory, calculate the hash for last
+-	 * component.
+-	 */
+-	dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
+-	err = PTR_ERR(dentry);
+-	if (IS_ERR(dentry))
+-		return err;
++	int err;
+ 
+-	/*
+-	 * All right, let's create it.
+-	 */
+-	err = security_path_mknod(&path, dentry, mode, 0);
++	err = security_path_mknod(path, dentry, mode, 0);
+ 	if (!err) {
+-		err = vfs_mknod(d_inode(path.dentry), dentry, mode, 0);
++		err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
+ 		if (!err) {
+-			res->mnt = mntget(path.mnt);
++			res->mnt = mntget(path->mnt);
+ 			res->dentry = dget(dentry);
+ 		}
+ 	}
+-	done_path_create(&path, dentry);
++
+ 	return err;
+ }
+ 
+@@ -971,10 +959,12 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	struct unix_sock *u = unix_sk(sk);
+ 	struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
+ 	char *sun_path = sunaddr->sun_path;
+-	int err;
++	int err, name_err;
+ 	unsigned int hash;
+ 	struct unix_address *addr;
+ 	struct hlist_head *list;
++	struct path path;
++	struct dentry *dentry;
+ 
+ 	err = -EINVAL;
+ 	if (sunaddr->sun_family != AF_UNIX)
+@@ -990,14 +980,34 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 		goto out;
+ 	addr_len = err;
+ 
++	name_err = 0;
++	dentry = NULL;
++	if (sun_path[0]) {
++		/* Get the parent directory, calculate the hash for last
++		 * component.
++		 */
++		dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
++
++		if (IS_ERR(dentry)) {
++			/* delay report until after 'already bound' check */
++			name_err = PTR_ERR(dentry);
++			dentry = NULL;
++		}
++	}
++
+ 	err = mutex_lock_interruptible(&u->readlock);
+ 	if (err)
+-		goto out;
++		goto out_path;
+ 
+ 	err = -EINVAL;
+ 	if (u->addr)
+ 		goto out_up;
+ 
++	if (name_err) {
++		err = name_err == -EEXIST ? -EADDRINUSE : name_err;
++		goto out_up;
++	}
++
+ 	err = -ENOMEM;
+ 	addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
+ 	if (!addr)
+@@ -1008,11 +1018,11 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	addr->hash = hash ^ sk->sk_type;
+ 	atomic_set(&addr->refcnt, 1);
+ 
+-	if (sun_path[0]) {
+-		struct path path;
++	if (dentry) {
++		struct path u_path;
+ 		umode_t mode = S_IFSOCK |
+ 		       (SOCK_INODE(sock)->i_mode & ~current_umask());
+-		err = unix_mknod(sun_path, mode, &path);
++		err = unix_mknod(dentry, &path, mode, &u_path);
+ 		if (err) {
+ 			if (err == -EEXIST)
+ 				err = -EADDRINUSE;
+@@ -1020,9 +1030,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 			goto out_up;
+ 		}
+ 		addr->hash = UNIX_HASH_SIZE;
+-		hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE-1);
++		hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+ 		spin_lock(&unix_table_lock);
+-		u->path = path;
++		u->path = u_path;
+ 		list = &unix_socket_table[hash];
+ 	} else {
+ 		spin_lock(&unix_table_lock);
+@@ -1045,6 +1055,10 @@ out_unlock:
+ 	spin_unlock(&unix_table_lock);
+ out_up:
+ 	mutex_unlock(&u->readlock);
++out_path:
++	if (dentry)
++		done_path_create(&path, dentry);
++
+ out:
+ 	return err;
+ }
+diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
+index 2ec86e652a19..e1c69b216db3 100644
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -1794,27 +1794,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ 	else if (sk->sk_shutdown & RCV_SHUTDOWN)
+ 		err = 0;
+ 
+-	if (copied > 0) {
+-		/* We only do these additional bookkeeping/notification steps
+-		 * if we actually copied something out of the queue pair
+-		 * instead of just peeking ahead.
+-		 */
+-
+-		if (!(flags & MSG_PEEK)) {
+-			/* If the other side has shutdown for sending and there
+-			 * is nothing more to read, then modify the socket
+-			 * state.
+-			 */
+-			if (vsk->peer_shutdown & SEND_SHUTDOWN) {
+-				if (vsock_stream_has_data(vsk) <= 0) {
+-					sk->sk_state = SS_UNCONNECTED;
+-					sock_set_flag(sk, SOCK_DONE);
+-					sk->sk_state_change(sk);
+-				}
+-			}
+-		}
++	if (copied > 0)
+ 		err = copied;
+-	}
+ 
+ out_wait:
+ 	finish_wait(sk_sleep(sk), &wait);
+diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
+index 7ecd04c21360..997ff7b2509b 100644
+--- a/net/x25/x25_facilities.c
++++ b/net/x25/x25_facilities.c
+@@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
+ 
+ 	memset(&theirs, 0, sizeof(theirs));
+ 	memcpy(new, ours, sizeof(*new));
++	memset(dte, 0, sizeof(*dte));
+ 
+ 	len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
+ 	if (len < 0)
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index b58286ecd156..cbaf52c837f4 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -292,12 +292,15 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
+ 		XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
+ 
+ 		skb_dst_force(skb);
++		dev_hold(skb->dev);
+ 
+ 		nexthdr = x->type->input(x, skb);
+ 
+ 		if (nexthdr == -EINPROGRESS)
+ 			return 0;
+ resume:
++		dev_put(skb->dev);
++
+ 		spin_lock(&x->lock);
+ 		if (nexthdr <= 0) {
+ 			if (nexthdr == -EBADMSG) {
+diff --git a/security/keys/key.c b/security/keys/key.c
+index aee2ec5a18fc..970b58ee3e20 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -578,7 +578,7 @@ int key_reject_and_link(struct key *key,
+ 
+ 	mutex_unlock(&key_construction_mutex);
+ 
+-	if (keyring)
++	if (keyring && link_ret == 0)
+ 		__key_link_end(keyring, &key->index_key, edit);
+ 
+ 	/* wake up anyone waiting for a key to be constructed */
+diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
+index 7d45645f10ba..253a2da05cf0 100644
+--- a/sound/core/pcm_lib.c
++++ b/sound/core/pcm_lib.c
+@@ -322,7 +322,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
+ 			char name[16];
+ 			snd_pcm_debug_name(substream, name, sizeof(name));
+ 			pcm_err(substream->pcm,
+-				"BUG: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
++				"invalid position: %s, pos = %ld, buffer size = %ld, period size = %ld\n",
+ 				name, pos, runtime->buffer_size,
+ 				runtime->period_size);
+ 		}
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index bf48e71f73cd..1782555fcaca 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1051,8 +1051,8 @@ static int snd_timer_s_start(struct snd_timer * timer)
+ 		njiff += timer->sticks - priv->correction;
+ 		priv->correction = 0;
+ 	}
+-	priv->last_expires = priv->tlist.expires = njiff;
+-	add_timer(&priv->tlist);
++	priv->last_expires = njiff;
++	mod_timer(&priv->tlist, njiff);
+ 	return 0;
+ }
+ 
+diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
+index c5d5217a4180..4df5dc1a3765 100644
+--- a/sound/drivers/dummy.c
++++ b/sound/drivers/dummy.c
+@@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
+ 
+ static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
+ {
++	hrtimer_cancel(&dpcm->timer);
+ 	tasklet_kill(&dpcm->tasklet);
+ }
+ 
+diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
+index 961ca32ee989..37f2dacd6691 100644
+--- a/sound/hda/hdac_device.c
++++ b/sound/hda/hdac_device.c
+@@ -261,13 +261,11 @@ EXPORT_SYMBOL_GPL(_snd_hdac_read_parm);
+ int snd_hdac_read_parm_uncached(struct hdac_device *codec, hda_nid_t nid,
+ 				int parm)
+ {
+-	int val;
++	unsigned int cmd, val;
+ 
+-	if (codec->regmap)
+-		regcache_cache_bypass(codec->regmap, true);
+-	val = snd_hdac_read_parm(codec, nid, parm);
+-	if (codec->regmap)
+-		regcache_cache_bypass(codec->regmap, false);
++	cmd = snd_hdac_regmap_encode_verb(nid, AC_VERB_PARAMETERS) | parm;
++	if (snd_hdac_regmap_read_raw_uncached(codec, cmd, &val) < 0)
++		return -1;
+ 	return val;
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_read_parm_uncached);
+diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
+index b0ed870ffb88..f3eb78e47ced 100644
+--- a/sound/hda/hdac_regmap.c
++++ b/sound/hda/hdac_regmap.c
+@@ -411,7 +411,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
+ 	err = reg_raw_write(codec, reg, val);
+ 	if (err == -EAGAIN) {
+ 		err = snd_hdac_power_up_pm(codec);
+-		if (!err)
++		if (err >= 0)
+ 			err = reg_raw_write(codec, reg, val);
+ 		snd_hdac_power_down_pm(codec);
+ 	}
+@@ -420,14 +420,30 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
+ EXPORT_SYMBOL_GPL(snd_hdac_regmap_write_raw);
+ 
+ static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
+-			unsigned int *val)
++			unsigned int *val, bool uncached)
+ {
+-	if (!codec->regmap)
++	if (uncached || !codec->regmap)
+ 		return hda_reg_read(codec, reg, val);
+ 	else
+ 		return regmap_read(codec->regmap, reg, val);
+ }
+ 
++static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
++				      unsigned int reg, unsigned int *val,
++				      bool uncached)
++{
++	int err;
++
++	err = reg_raw_read(codec, reg, val, uncached);
++	if (err == -EAGAIN) {
++		err = snd_hdac_power_up_pm(codec);
++		if (err >= 0)
++			err = reg_raw_read(codec, reg, val, uncached);
++		snd_hdac_power_down_pm(codec);
++	}
++	return err;
++}
++
+ /**
+  * snd_hdac_regmap_read_raw - read a pseudo register with power mgmt
+  * @codec: the codec object
+@@ -439,19 +455,19 @@ static int reg_raw_read(struct hdac_device *codec, unsigned int reg,
+ int snd_hdac_regmap_read_raw(struct hdac_device *codec, unsigned int reg,
+ 			     unsigned int *val)
+ {
+-	int err;
+-
+-	err = reg_raw_read(codec, reg, val);
+-	if (err == -EAGAIN) {
+-		err = snd_hdac_power_up_pm(codec);
+-		if (!err)
+-			err = reg_raw_read(codec, reg, val);
+-		snd_hdac_power_down_pm(codec);
+-	}
+-	return err;
++	return __snd_hdac_regmap_read_raw(codec, reg, val, false);
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_regmap_read_raw);
+ 
++/* Works like snd_hdac_regmap_read_raw(), but this doesn't read from the
++ * cache but always via hda verbs.
++ */
++int snd_hdac_regmap_read_raw_uncached(struct hdac_device *codec,
++				      unsigned int reg, unsigned int *val)
++{
++	return __snd_hdac_regmap_read_raw(codec, reg, val, true);
++}
++
+ /**
+  * snd_hdac_regmap_update_raw - update a pseudo register with power mgmt
+  * @codec: the codec object
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index a62872f7b41a..abf8d342f1f4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5504,6 +5504,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
++	SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
++	SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+@@ -5635,8 +5637,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{0x15, 0x0221401f}, \
+ 	{0x1a, 0x411111f0}, \
+ 	{0x1b, 0x411111f0}, \
+-	{0x1d, 0x40700001}, \
+-	{0x1e, 0x411111f0}
++	{0x1d, 0x40700001}
+ 
+ #define ALC298_STANDARD_PINS \
+ 	{0x18, 0x411111f0}, \
+@@ -5920,35 +5921,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x13, 0x411111f0},
+ 		{0x16, 0x01014020},
+ 		{0x18, 0x411111f0},
+-		{0x19, 0x01a19030}),
++		{0x19, 0x01a19030},
++		{0x1e, 0x411111f0}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
+ 		ALC292_STANDARD_PINS,
+ 		{0x12, 0x90a60140},
+ 		{0x13, 0x411111f0},
+ 		{0x16, 0x01014020},
+ 		{0x18, 0x02a19031},
+-		{0x19, 0x01a1903e}),
++		{0x19, 0x01a1903e},
++		{0x1e, 0x411111f0}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+ 		ALC292_STANDARD_PINS,
+ 		{0x12, 0x90a60140},
+ 		{0x13, 0x411111f0},
+ 		{0x16, 0x411111f0},
+ 		{0x18, 0x411111f0},
+-		{0x19, 0x411111f0}),
++		{0x19, 0x411111f0},
++		{0x1e, 0x411111f0}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC292_STANDARD_PINS,
+ 		{0x12, 0x40000000},
+ 		{0x13, 0x90a60140},
+ 		{0x16, 0x21014020},
+ 		{0x18, 0x411111f0},
+-		{0x19, 0x21a19030}),
++		{0x19, 0x21a19030},
++		{0x1e, 0x411111f0}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC292_STANDARD_PINS,
+ 		{0x12, 0x40000000},
+ 		{0x13, 0x90a60140},
+ 		{0x16, 0x411111f0},
+ 		{0x18, 0x411111f0},
+-		{0x19, 0x411111f0}),
++		{0x19, 0x411111f0},
++		{0x1e, 0x411111f0}),
++	SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC292_STANDARD_PINS,
++		{0x12, 0x40000000},
++		{0x13, 0x90a60140},
++		{0x16, 0x21014020},
++		{0x18, 0x411111f0},
++		{0x19, 0x21a19030},
++		{0x1e, 0x411111ff}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC298_STANDARD_PINS,
+ 		{0x12, 0x90a60130},
+diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
+index f7549cc7ea85..d1f53abb60de 100644
+--- a/sound/soc/codecs/ssm4567.c
++++ b/sound/soc/codecs/ssm4567.c
+@@ -338,6 +338,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
+ 	regcache_cache_only(ssm4567->regmap, !enable);
+ 
+ 	if (enable) {
++		ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
++			0x00);
++		if (ret)
++			return ret;
++
+ 		ret = regmap_update_bits(ssm4567->regmap,
+ 			SSM4567_REG_POWER_CTRL,
+ 			SSM4567_POWER_SPWDN, 0x00);
+diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
+index e4145509d63c..9c5219392460 100644
+--- a/sound/soc/samsung/ac97.c
++++ b/sound/soc/samsung/ac97.c
+@@ -324,7 +324,7 @@ static const struct snd_soc_component_driver s3c_ac97_component = {
+ 
+ static int s3c_ac97_probe(struct platform_device *pdev)
+ {
+-	struct resource *mem_res, *dmatx_res, *dmarx_res, *dmamic_res, *irq_res;
++	struct resource *mem_res, *irq_res;
+ 	struct s3c_audio_pdata *ac97_pdata;
+ 	int ret;
+ 
+@@ -335,24 +335,6 @@ static int s3c_ac97_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	/* Check for availability of necessary resource */
+-	dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+-	if (!dmatx_res) {
+-		dev_err(&pdev->dev, "Unable to get AC97-TX dma resource\n");
+-		return -ENXIO;
+-	}
+-
+-	dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+-	if (!dmarx_res) {
+-		dev_err(&pdev->dev, "Unable to get AC97-RX dma resource\n");
+-		return -ENXIO;
+-	}
+-
+-	dmamic_res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
+-	if (!dmamic_res) {
+-		dev_err(&pdev->dev, "Unable to get AC97-MIC dma resource\n");
+-		return -ENXIO;
+-	}
+-
+ 	irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ 	if (!irq_res) {
+ 		dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
+@@ -364,11 +346,11 @@ static int s3c_ac97_probe(struct platform_device *pdev)
+ 	if (IS_ERR(s3c_ac97.regs))
+ 		return PTR_ERR(s3c_ac97.regs);
+ 
+-	s3c_ac97_pcm_out.channel = dmatx_res->start;
++	s3c_ac97_pcm_out.slave = ac97_pdata->dma_playback;
+ 	s3c_ac97_pcm_out.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
+-	s3c_ac97_pcm_in.channel = dmarx_res->start;
++	s3c_ac97_pcm_in.slave = ac97_pdata->dma_capture;
+ 	s3c_ac97_pcm_in.dma_addr = mem_res->start + S3C_AC97_PCM_DATA;
+-	s3c_ac97_mic_in.channel = dmamic_res->start;
++	s3c_ac97_mic_in.slave = ac97_pdata->dma_capture_mic;
+ 	s3c_ac97_mic_in.dma_addr = mem_res->start + S3C_AC97_MIC_DATA;
+ 
+ 	init_completion(&s3c_ac97.done);
+diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
+index 0e85dcfec023..085ef30f5ca2 100644
+--- a/sound/soc/samsung/dma.h
++++ b/sound/soc/samsung/dma.h
+@@ -15,7 +15,7 @@
+ #include <sound/dmaengine_pcm.h>
+ 
+ struct s3c_dma_params {
+-	int channel;				/* Channel ID */
++	void *slave;				/* Channel ID */
+ 	dma_addr_t dma_addr;
+ 	int dma_size;			/* Size of the DMA transfer */
+ 	char *ch_name;
+diff --git a/sound/soc/samsung/dmaengine.c b/sound/soc/samsung/dmaengine.c
+index 506f5bf6d082..727008d57d14 100644
+--- a/sound/soc/samsung/dmaengine.c
++++ b/sound/soc/samsung/dmaengine.c
+@@ -50,14 +50,14 @@ void samsung_asoc_init_dma_data(struct snd_soc_dai *dai,
+ 
+ 	if (playback) {
+ 		playback_data = &playback->dma_data;
+-		playback_data->filter_data = (void *)playback->channel;
++		playback_data->filter_data = playback->slave;
+ 		playback_data->chan_name = playback->ch_name;
+ 		playback_data->addr = playback->dma_addr;
+ 		playback_data->addr_width = playback->dma_size;
+ 	}
+ 	if (capture) {
+ 		capture_data = &capture->dma_data;
+-		capture_data->filter_data = (void *)capture->channel;
++		capture_data->filter_data = capture->slave;
+ 		capture_data->chan_name = capture->ch_name;
+ 		capture_data->addr = capture->dma_addr;
+ 		capture_data->addr_width = capture->dma_size;
+diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
+index 5e8ccb0a7028..9456c78c9051 100644
+--- a/sound/soc/samsung/i2s.c
++++ b/sound/soc/samsung/i2s.c
+@@ -1260,27 +1260,14 @@ static int samsung_i2s_probe(struct platform_device *pdev)
+ 	pri_dai->lock = &pri_dai->spinlock;
+ 
+ 	if (!np) {
+-		res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+-		if (!res) {
+-			dev_err(&pdev->dev,
+-				"Unable to get I2S-TX dma resource\n");
+-			return -ENXIO;
+-		}
+-		pri_dai->dma_playback.channel = res->start;
+-
+-		res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+-		if (!res) {
+-			dev_err(&pdev->dev,
+-				"Unable to get I2S-RX dma resource\n");
+-			return -ENXIO;
+-		}
+-		pri_dai->dma_capture.channel = res->start;
+-
+ 		if (i2s_pdata == NULL) {
+ 			dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n");
+ 			return -EINVAL;
+ 		}
+ 
++		pri_dai->dma_playback.slave = i2s_pdata->dma_playback;
++		pri_dai->dma_capture.slave = i2s_pdata->dma_capture;
++
+ 		if (&i2s_pdata->type)
+ 			i2s_cfg = &i2s_pdata->type.i2s;
+ 
+@@ -1341,11 +1328,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
+ 		sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
+ 		sec_dai->dma_playback.ch_name = "tx-sec";
+ 
+-		if (!np) {
+-			res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
+-			if (res)
+-				sec_dai->dma_playback.channel = res->start;
+-		}
++		if (!np)
++			sec_dai->dma_playback.slave = i2s_pdata->dma_play_sec;
+ 
+ 		sec_dai->dma_playback.dma_size = 4;
+ 		sec_dai->addr = pri_dai->addr;
+diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
+index b320a9d3fbf8..c77f324e0bb8 100644
+--- a/sound/soc/samsung/pcm.c
++++ b/sound/soc/samsung/pcm.c
+@@ -486,7 +486,7 @@ static const struct snd_soc_component_driver s3c_pcm_component = {
+ static int s3c_pcm_dev_probe(struct platform_device *pdev)
+ {
+ 	struct s3c_pcm_info *pcm;
+-	struct resource *mem_res, *dmatx_res, *dmarx_res;
++	struct resource *mem_res;
+ 	struct s3c_audio_pdata *pcm_pdata;
+ 	int ret;
+ 
+@@ -499,18 +499,6 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
+ 	pcm_pdata = pdev->dev.platform_data;
+ 
+ 	/* Check for availability of necessary resource */
+-	dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+-	if (!dmatx_res) {
+-		dev_err(&pdev->dev, "Unable to get PCM-TX dma resource\n");
+-		return -ENXIO;
+-	}
+-
+-	dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+-	if (!dmarx_res) {
+-		dev_err(&pdev->dev, "Unable to get PCM-RX dma resource\n");
+-		return -ENXIO;
+-	}
+-
+ 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	if (!mem_res) {
+ 		dev_err(&pdev->dev, "Unable to get register resource\n");
+@@ -568,8 +556,10 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
+ 	s3c_pcm_stereo_out[pdev->id].dma_addr = mem_res->start
+ 							+ S3C_PCM_TXFIFO;
+ 
+-	s3c_pcm_stereo_in[pdev->id].channel = dmarx_res->start;
+-	s3c_pcm_stereo_out[pdev->id].channel = dmatx_res->start;
++	if (pcm_pdata) {
++		s3c_pcm_stereo_in[pdev->id].slave = pcm_pdata->dma_capture;
++		s3c_pcm_stereo_out[pdev->id].slave = pcm_pdata->dma_playback;
++	}
+ 
+ 	pcm->dma_capture = &s3c_pcm_stereo_in[pdev->id];
+ 	pcm->dma_playback = &s3c_pcm_stereo_out[pdev->id];
+diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
+index df65c5b494b1..b6ab3fc5789e 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.c
++++ b/sound/soc/samsung/s3c-i2s-v2.c
+@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
+ #endif
+ 
+ int s3c_i2sv2_register_component(struct device *dev, int id,
+-			   struct snd_soc_component_driver *cmp_drv,
++			   const struct snd_soc_component_driver *cmp_drv,
+ 			   struct snd_soc_dai_driver *dai_drv)
+ {
+ 	struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
+diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
+index 90abab364b49..d0684145ed1f 100644
+--- a/sound/soc/samsung/s3c-i2s-v2.h
++++ b/sound/soc/samsung/s3c-i2s-v2.h
+@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
+  * soc core.
+  */
+ extern int s3c_i2sv2_register_component(struct device *dev, int id,
+-					struct snd_soc_component_driver *cmp_drv,
++					const struct snd_soc_component_driver *cmp_drv,
+ 					struct snd_soc_dai_driver *dai_drv);
+ 
+ #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
+diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
+index 2b766d212ce0..77d27c85a32a 100644
+--- a/sound/soc/samsung/s3c2412-i2s.c
++++ b/sound/soc/samsung/s3c2412-i2s.c
+@@ -34,13 +34,13 @@
+ #include "s3c2412-i2s.h"
+ 
+ static struct s3c_dma_params s3c2412_i2s_pcm_stereo_out = {
+-	.channel	= DMACH_I2S_OUT,
++	.slave		= (void *)(uintptr_t)DMACH_I2S_OUT,
+ 	.ch_name	= "tx",
+ 	.dma_size	= 4,
+ };
+ 
+ static struct s3c_dma_params s3c2412_i2s_pcm_stereo_in = {
+-	.channel	= DMACH_I2S_IN,
++	.slave		= (void *)(uintptr_t)DMACH_I2S_IN,
+ 	.ch_name	= "rx",
+ 	.dma_size	= 4,
+ };
+diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
+index 5bf723689692..9da3a77ea2c7 100644
+--- a/sound/soc/samsung/s3c24xx-i2s.c
++++ b/sound/soc/samsung/s3c24xx-i2s.c
+@@ -32,13 +32,13 @@
+ #include "s3c24xx-i2s.h"
+ 
+ static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_out = {
+-	.channel	= DMACH_I2S_OUT,
++	.slave		= (void *)(uintptr_t)DMACH_I2S_OUT,
+ 	.ch_name	= "tx",
+ 	.dma_size	= 2,
+ };
+ 
+ static struct s3c_dma_params s3c24xx_i2s_pcm_stereo_in = {
+-	.channel	= DMACH_I2S_IN,
++	.slave		= (void *)(uintptr_t)DMACH_I2S_IN,
+ 	.ch_name	= "rx",
+ 	.dma_size	= 2,
+ };
+diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
+index 36dbc0e96004..9dd7ee6d03ff 100644
+--- a/sound/soc/samsung/spdif.c
++++ b/sound/soc/samsung/spdif.c
+@@ -359,7 +359,7 @@ static const struct snd_soc_component_driver samsung_spdif_component = {
+ static int spdif_probe(struct platform_device *pdev)
+ {
+ 	struct s3c_audio_pdata *spdif_pdata;
+-	struct resource *mem_res, *dma_res;
++	struct resource *mem_res;
+ 	struct samsung_spdif_info *spdif;
+ 	int ret;
+ 
+@@ -367,12 +367,6 @@ static int spdif_probe(struct platform_device *pdev)
+ 
+ 	dev_dbg(&pdev->dev, "Entered %s\n", __func__);
+ 
+-	dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+-	if (!dma_res) {
+-		dev_err(&pdev->dev, "Unable to get dma resource.\n");
+-		return -ENXIO;
+-	}
+-
+ 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ 	if (!mem_res) {
+ 		dev_err(&pdev->dev, "Unable to get register resource.\n");
+@@ -432,7 +426,7 @@ static int spdif_probe(struct platform_device *pdev)
+ 
+ 	spdif_stereo_out.dma_size = 2;
+ 	spdif_stereo_out.dma_addr = mem_res->start + DATA_OUTBUF;
+-	spdif_stereo_out.channel = dma_res->start;
++	spdif_stereo_out.slave = spdif_pdata ? spdif_pdata->dma_playback : NULL;
+ 
+ 	spdif->dma_playback = &spdif_stereo_out;
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 194fa7f60a38..e27df0d3898b 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -147,6 +147,7 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 		usb_audio_err(chip, "cannot memdup\n");
+ 		return -ENOMEM;
+ 	}
++	INIT_LIST_HEAD(&fp->list);
+ 	if (fp->nr_rates > MAX_NR_RATES) {
+ 		kfree(fp);
+ 		return -EINVAL;
+@@ -164,23 +165,18 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 	stream = (fp->endpoint & USB_DIR_IN)
+ 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ 	err = snd_usb_add_audio_stream(chip, stream, fp);
+-	if (err < 0) {
+-		kfree(fp);
+-		kfree(rate_table);
+-		return err;
+-	}
++	if (err < 0)
++		goto error;
+ 	if (fp->iface != get_iface_desc(&iface->altsetting[0])->bInterfaceNumber ||
+ 	    fp->altset_idx >= iface->num_altsetting) {
+-		kfree(fp);
+-		kfree(rate_table);
+-		return -EINVAL;
++		err = -EINVAL;
++		goto error;
+ 	}
+ 	alts = &iface->altsetting[fp->altset_idx];
+ 	altsd = get_iface_desc(alts);
+ 	if (altsd->bNumEndpoints < 1) {
+-		kfree(fp);
+-		kfree(rate_table);
+-		return -EINVAL;
++		err = -EINVAL;
++		goto error;
+ 	}
+ 
+ 	fp->protocol = altsd->bInterfaceProtocol;
+@@ -193,6 +189,12 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip,
+ 	snd_usb_init_pitch(chip, fp->iface, alts, fp);
+ 	snd_usb_init_sample_rate(chip, fp->iface, alts, fp, fp->rate_max);
+ 	return 0;
++
++ error:
++	list_del(&fp->list); /* unlink for avoiding double-free */
++	kfree(fp);
++	kfree(rate_table);
++	return err;
+ }
+ 
+ static int create_auto_pcm_quirk(struct snd_usb_audio *chip,
+@@ -465,6 +467,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
+ 	fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
+ 	fp->datainterval = 0;
+ 	fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
++	INIT_LIST_HEAD(&fp->list);
+ 
+ 	switch (fp->maxpacksize) {
+ 	case 0x120:
+@@ -488,6 +491,7 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
+ 		? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ 	err = snd_usb_add_audio_stream(chip, stream, fp);
+ 	if (err < 0) {
++		list_del(&fp->list); /* unlink for avoiding double-free */
+ 		kfree(fp);
+ 		return err;
+ 	}
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 310a3822d2b7..25e8075f9ea3 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -315,7 +315,9 @@ static struct snd_pcm_chmap_elem *convert_chmap(int channels, unsigned int bits,
+ /*
+  * add this endpoint to the chip instance.
+  * if a stream with the same endpoint already exists, append to it.
+- * if not, create a new pcm stream.
++ * if not, create a new pcm stream. note, fp is added to the substream
++ * fmt_list and will be freed on the chip instance release. do not free
++ * fp or do remove it from the substream fmt_list to avoid double-free.
+  */
+ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
+ 			     int stream,
+@@ -668,6 +670,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
+ 					* (fp->maxpacksize & 0x7ff);
+ 		fp->attributes = parse_uac_endpoint_attributes(chip, alts, protocol, iface_no);
+ 		fp->clock = clock;
++		INIT_LIST_HEAD(&fp->list);
+ 
+ 		/* some quirks for attributes here */
+ 
+@@ -716,6 +719,7 @@ int snd_usb_parse_audio_interface(struct snd_usb_audio *chip, int iface_no)
+ 		dev_dbg(&dev->dev, "%u:%d: add audio endpoint %#x\n", iface_no, altno, fp->endpoint);
+ 		err = snd_usb_add_audio_stream(chip, stream, fp);
+ 		if (err < 0) {
++			list_del(&fp->list); /* unlink for avoiding double-free */
+ 			kfree(fp->rate_table);
+ 			kfree(fp->chmap);
+ 			kfree(fp);
+diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
+index 04e150d83e7d..756ed9fdc9ad 100644
+--- a/tools/perf/Documentation/perf-stat.txt
++++ b/tools/perf/Documentation/perf-stat.txt
+@@ -62,6 +62,14 @@ OPTIONS
+ --scale::
+ 	scale/normalize counter values
+ 
++-d::
++--detailed::
++	print more detailed statistics, can be specified up to 3 times
++
++	   -d:          detailed events, L1 and LLC data cache
++        -d -d:     more detailed events, dTLB and iTLB events
++     -d -d -d:     very detailed events, adding prefetch events
++
+ -r::
+ --repeat=<n>::
+ 	repeat command and print average + stddev (max: 100). 0 means forever.
+diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
+index ff866c4d2e2f..d18a59ab4ed5 100644
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -251,7 +251,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
+ 		strcpy(execname, "");
+ 
+ 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
+-		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
++		n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
+ 		       &event->mmap2.start, &event->mmap2.len, prot,
+ 		       &event->mmap2.pgoff, &event->mmap2.maj,
+ 		       &event->mmap2.min,
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index c2f87ff0061d..d93deb5ce4f2 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2622,7 +2622,7 @@ static long kvm_vm_ioctl(struct file *filp,
+ 		if (copy_from_user(&routing, argp, sizeof(routing)))
+ 			goto out;
+ 		r = -EINVAL;
+-		if (routing.nr >= KVM_MAX_IRQ_ROUTES)
++		if (routing.nr > KVM_MAX_IRQ_ROUTES)
+ 			goto out;
+ 		if (routing.flags)
+ 			goto out;


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-07-02 15:31 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-07-02 15:31 UTC (permalink / raw
  To: gentoo-commits

commit:     69af2da49481a0d114da92c3cb339ce48617c9b7
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jul  2 15:31:19 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jul  2 15:31:19 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=69af2da4

Select SYSVIPC when GENTOO_LINUX_PORTAGE is selected. Dependency of IPC_NS. See bug #587736.

 4567_distro-Gentoo-Kconfig.patch | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index c7af596..499b21f 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -1,5 +1,5 @@
---- a/Kconfig
-+++ b/Kconfig
+--- a/Kconfig	2016-07-01 19:22:17.117439707 -0400
++++ b/Kconfig	2016-07-01 19:21:54.371440596 -0400
 @@ -8,4 +8,6 @@ config SRCARCH
  	string
  	option env="SRCARCH"
@@ -7,9 +7,9 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- /dev/null
-+++ b/distro/Kconfig
-@@ -0,0 +1,131 @@
+--- /dev/null	2016-07-01 11:23:26.087932647 -0400
++++ b/distro/Kconfig	2016-07-01 19:32:35.581415519 -0400
+@@ -0,0 +1,134 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -63,6 +63,7 @@
 +	select NAMESPACES
 +	select IPC_NS
 +	select NET_NS
++	select SYSVIPC
 +
 +	help
 +		This enables options required by various Portage FEATURES.
@@ -71,6 +72,8 @@
 +		CGROUPS     (required for FEATURES=cgroup)
 +		IPC_NS      (required for FEATURES=ipc-sandbox)
 +		NET_NS      (required for FEATURES=network-sandbox)
++		SYSVIPC     (required by IPC_NS)
++   
 +
 +		It is highly recommended that you leave this enabled as these FEATURES
 +		are, or will soon be, enabled by default.


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-07-01 19:56 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-07-01 19:56 UTC (permalink / raw
  To: gentoo-commits

commit:     da2e8e644edd67e41f173e6eb9d686d63fc532ed
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul  1 19:56:27 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul  1 19:56:27 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=da2e8e64

Update gcc 4.9+ optimization patch. Bug #587578

 ...-additional-cpu-optimizations-for-gcc-4.9.patch | 90 ++++++++++++++--------
 1 file changed, 57 insertions(+), 33 deletions(-)

diff --git a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
index 418201d..d9729b2 100644
--- a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
+++ b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
@@ -21,11 +21,12 @@ bug report to see if I'm right: https://bugzilla.kernel.org/show_bug.cgi?id=7746
 This patch will expand the number of microarchitectures to include newer
 processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
 14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
-Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 1.5 Gen Core
-i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7 (Sandybridge), Intel 3rd Gen
-Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core i3/i5/i7 (Haswell), Intel 5th
-Gen Core i3/i5/i7 (Broadwell), and the low power Silvermont series of Atom
-processors (Silvermont). It also offers the compiler the 'native' flag.
+Family 15h (Steamroller), Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7
+(Nehalem), Intel 1.5 Gen Core i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7
+(Sandybridge), Intel 3rd Gen Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core
+i3/i5/i7 (Haswell), Intel 5th Gen Core i3/i5/i7 (Broadwell), and the low power
+Silvermont series of Atom processors (Silvermont). It also offers the compiler
+the 'native' flag.
 
 Small but real speed increases are measurable using a make endpoint comparing
 a generic kernel to one built with one of the respective microarchs.
@@ -37,9 +38,9 @@ REQUIREMENTS
 linux version >=3.15
 gcc version >=4.9
 
---- a/arch/x86/include/asm/module.h	2014-06-16 16:44:27.000000000 -0400
-+++ b/arch/x86/include/asm/module.h	2015-03-07 03:27:32.556672424 -0500
-@@ -15,6 +15,22 @@
+--- a/arch/x86/include/asm/module.h	2015-08-30 14:34:09.000000000 -0400
++++ b/arch/x86/include/asm/module.h	2015-11-06 14:18:24.234941036 -0500
+@@ -15,6 +15,24 @@
  #define MODULE_PROC_FAMILY "586MMX "
  #elif defined CONFIG_MCORE2
  #define MODULE_PROC_FAMILY "CORE2 "
@@ -59,10 +60,12 @@ gcc version >=4.9
 +#define MODULE_PROC_FAMILY "HASWELL "
 +#elif defined CONFIG_MBROADWELL
 +#define MODULE_PROC_FAMILY "BROADWELL "
++#elif defined CONFIG_MSKYLAKE
++#define MODULE_PROC_FAMILY "SKYLAKE "
  #elif defined CONFIG_MATOM
  #define MODULE_PROC_FAMILY "ATOM "
  #elif defined CONFIG_M686
-@@ -33,6 +49,20 @@
+@@ -33,6 +51,22 @@
  #define MODULE_PROC_FAMILY "K7 "
  #elif defined CONFIG_MK8
  #define MODULE_PROC_FAMILY "K8 "
@@ -77,14 +80,16 @@ gcc version >=4.9
 +#elif defined CONFIG_MBULLDOZER
 +#define MODULE_PROC_FAMILY "BULLDOZER "
 +#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "STEAMROLLER "
++#elif defined CONFIG_MSTEAMROLLER
 +#define MODULE_PROC_FAMILY "PILEDRIVER "
 +#elif defined CONFIG_MJAGUAR
 +#define MODULE_PROC_FAMILY "JAGUAR "
  #elif defined CONFIG_MELAN
  #define MODULE_PROC_FAMILY "ELAN "
  #elif defined CONFIG_MCRUSOE
---- a/arch/x86/Kconfig.cpu	2014-06-16 16:44:27.000000000 -0400
-+++ b/arch/x86/Kconfig.cpu	2015-03-07 03:32:14.337713226 -0500
+--- a/arch/x86/Kconfig.cpu	2015-08-30 14:34:09.000000000 -0400
++++ b/arch/x86/Kconfig.cpu	2015-11-06 14:20:14.948369244 -0500
 @@ -137,9 +137,8 @@ config MPENTIUM4
  		-Paxville
  		-Dempsey
@@ -105,7 +110,7 @@ gcc version >=4.9
  	depends on X86_32
  	---help---
  	  Select this for an AMD Athlon K7-family processor.  Enables use of
-@@ -155,12 +154,62 @@ config MK7
+@@ -155,12 +154,69 @@ config MK7
  	  flags to GCC.
  
  config MK8
@@ -159,6 +164,13 @@ gcc version >=4.9
 +
 +	  Enables -march=bdver2
 +
++config MSTEAMROLLER
++	bool "AMD Steamroller"
++	---help---
++	  Select this for AMD Steamroller processors.
++
++	  Enables -march=bdver3
++
 +config MJAGUAR
 +	bool "AMD Jaguar"
 +	---help---
@@ -169,7 +181,7 @@ gcc version >=4.9
  config MCRUSOE
  	bool "Crusoe"
  	depends on X86_32
-@@ -251,8 +300,17 @@ config MPSC
+@@ -251,8 +307,17 @@ config MPSC
  	  using the cpu family field
  	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
  
@@ -188,7 +200,7 @@ gcc version >=4.9
  	---help---
  
  	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
-@@ -260,14 +318,63 @@ config MCORE2
+@@ -260,14 +325,71 @@ config MCORE2
  	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
  	  (not a typo)
  
@@ -255,10 +267,18 @@ gcc version >=4.9
 +	  Select this for 5th Gen Core processors in the Broadwell family.
 +
 +	  Enables -march=broadwell
++
++config MSKYLAKE
++	bool "Intel Skylake"
++	---help---
++
++	  Select this for 6th Gen Core processors in the Skylake family.
++
++	  Enables -march=skylake
  
  config GENERIC_CPU
  	bool "Generic-x86-64"
-@@ -276,6 +383,19 @@ config GENERIC_CPU
+@@ -276,6 +398,19 @@ config GENERIC_CPU
  	  Generic x86-64 CPU.
  	  Run equally well on all x86-64 CPUs.
  
@@ -278,54 +298,54 @@ gcc version >=4.9
  endchoice
  
  config X86_GENERIC
-@@ -300,7 +420,7 @@ config X86_INTERNODE_CACHE_SHIFT
+@@ -300,7 +435,7 @@ config X86_INTERNODE_CACHE_SHIFT
  config X86_L1_CACHE_SHIFT
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
  	default "4" if MELAN || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
  
-@@ -331,11 +451,11 @@ config X86_ALIGNMENT_16
+@@ -331,11 +466,11 @@ config X86_ALIGNMENT_16
  
  config X86_INTEL_USERCOPY
  	def_bool y
 -	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
-+	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE
  
  config X86_USE_PPRO_CHECKSUM
  	def_bool y
 -	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-+	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MATOM || MNATIVE
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MATOM || MNATIVE
  
  config X86_USE_3DNOW
  	def_bool y
-@@ -359,17 +479,17 @@ config X86_P6_NOP
+@@ -359,17 +494,17 @@ config X86_P6_NOP
  
  config X86_TSC
  	def_bool y
 -	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
-+	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM) || X86_64
  
  config X86_CMPXCHG64
  	def_bool y
 -	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
-+	depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
++	depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
  
  # this should be set for all -march=.. options where the compiler
  # generates cmov.
  config X86_CMOV
  	def_bool y
 -	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
-+	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
++	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
  
  config X86_MINIMUM_CPU_FAMILY
  	int
---- a/arch/x86/Makefile	2014-06-16 16:44:27.000000000 -0400
-+++ b/arch/x86/Makefile	2015-03-07 03:33:27.650843211 -0500
-@@ -92,13 +92,35 @@ else
- 	KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
+--- a/arch/x86/Makefile	2015-08-30 14:34:09.000000000 -0400
++++ b/arch/x86/Makefile	2015-11-06 14:21:05.708983344 -0500
+@@ -94,13 +94,38 @@ else
+ 	KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
  
          # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
 +        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
@@ -336,6 +356,7 @@ gcc version >=4.9
 +        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
 +        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
 +        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++        cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
 +        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
          cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
  
@@ -358,14 +379,16 @@ gcc version >=4.9
 +                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
 +        cflags-$(CONFIG_MBROADWELL) += \
 +                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
++        cflags-$(CONFIG_MSKYLAKE) += \
++                $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
 +        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
 +                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
          cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
          KBUILD_CFLAGS += $(cflags-y)
  
---- a/arch/x86/Makefile_32.cpu	2014-06-16 16:44:27.000000000 -0400
-+++ b/arch/x86/Makefile_32.cpu	2015-03-07 03:34:15.203586024 -0500
-@@ -23,7 +23,15 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+--- a/arch/x86/Makefile_32.cpu	2015-08-30 14:34:09.000000000 -0400
++++ b/arch/x86/Makefile_32.cpu	2015-11-06 14:21:43.604429077 -0500
+@@ -23,7 +23,16 @@ cflags-$(CONFIG_MK6)		+= -march=k6
  # Please note, that patches that add -march=athlon-xp and friends are pointless.
  # They make zero difference whatsosever to performance at this time.
  cflags-$(CONFIG_MK7)		+= -march=athlon
@@ -377,11 +400,12 @@ gcc version >=4.9
 +cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
 +cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
 +cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MSTEAMROLLER)	+= $(call cc-option,-march=bdver3,-march=athlon)
 +cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
  cflags-$(CONFIG_MCRUSOE)	+= -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
  cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
  cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
-@@ -32,8 +40,15 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+@@ -32,8 +41,16 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
  cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
  cflags-$(CONFIG_MVIAC7)		+= -march=i686
  cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
@@ -394,9 +418,9 @@ gcc version >=4.9
 +cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
 +cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
 +cflags-$(CONFIG_MBROADWELL)	+= -march=i686 $(call tune,broadwell)
++cflags-$(CONFIG_MSKYLAKE)	+= -march=i686 $(call tune,skylake)
 +cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
 +	$(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
  
  # AMD Elan support
  cflags-$(CONFIG_MELAN)		+= -march=i486
-


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-06-23 11:45 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-06-23 11:45 UTC (permalink / raw
  To: gentoo-commits

commit:     9b1ae75ce3b7546ecbaf6b6a880ea885166190cb
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Jun 23 11:45:51 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Jun 23 11:45:51 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=9b1ae75c

Linux patch 4.1.27

 0000_README             |    4 +
 1026_linux-4.1.27.patch | 1129 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1133 insertions(+)

diff --git a/0000_README b/0000_README
index 783092f..b592a97 100644
--- a/0000_README
+++ b/0000_README
@@ -147,6 +147,10 @@ Patch:  1025_linux-4.1.26.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.26
 
+Patch:  1026_linux-4.1.27.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.27
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1026_linux-4.1.27.patch b/1026_linux-4.1.27.patch
new file mode 100644
index 0000000..bd3e683
--- /dev/null
+++ b/1026_linux-4.1.27.patch
@@ -0,0 +1,1129 @@
+diff --git a/Makefile b/Makefile
+index 080a87e290b9..54b3d8ae8624 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 26
++SUBLEVEL = 27
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
+index ef9119f7462e..4d9375814b53 100644
+--- a/arch/arm/kernel/ptrace.c
++++ b/arch/arm/kernel/ptrace.c
+@@ -733,8 +733,8 @@ static int vfp_set(struct task_struct *target,
+ 	if (ret)
+ 		return ret;
+ 
+-	vfp_flush_hwstate(thread);
+ 	thread->vfpstate.hard = new_vfp;
++	vfp_flush_hwstate(thread);
+ 
+ 	return 0;
+ }
+diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
+index d7c0acb35ec2..8d49614d600d 100644
+--- a/arch/parisc/kernel/unaligned.c
++++ b/arch/parisc/kernel/unaligned.c
+@@ -666,7 +666,7 @@ void handle_unaligned(struct pt_regs *regs)
+ 		break;
+ 	}
+ 
+-	if (modify && R1(regs->iir))
++	if (ret == 0 && modify && R1(regs->iir))
+ 		regs->gr[R1(regs->iir)] = newbase;
+ 
+ 
+@@ -677,6 +677,14 @@ void handle_unaligned(struct pt_regs *regs)
+ 
+ 	if (ret)
+ 	{
++		/*
++		 * The unaligned handler failed.
++		 * If we were called by __get_user() or __put_user() jump
++		 * to it's exception fixup handler instead of crashing.
++		 */
++		if (!user_mode(regs) && fixup_exception(regs))
++			return;
++
+ 		printk(KERN_CRIT "Unaligned handler failed, ret = %d\n", ret);
+ 		die_if_kernel("Unaligned data reference", regs, 28);
+ 
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index f4f99f01b746..a4bf6e0eb813 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -708,7 +708,7 @@
+ #define   MMCR0_FCWAIT	0x00000002UL /* freeze counter in WAIT state */
+ #define   MMCR0_FCHV	0x00000001UL /* freeze conditions in hypervisor mode */
+ #define SPRN_MMCR1	798
+-#define SPRN_MMCR2	769
++#define SPRN_MMCR2	785
+ #define SPRN_MMCRA	0x312
+ #define   MMCRA_SDSYNC	0x80000000UL /* SDAR synced with SIAR */
+ #define   MMCRA_SDAR_DCACHE_MISS 0x40000000UL
+@@ -745,13 +745,13 @@
+ #define SPRN_PMC6	792
+ #define SPRN_PMC7	793
+ #define SPRN_PMC8	794
+-#define SPRN_SIAR	780
+-#define SPRN_SDAR	781
+ #define SPRN_SIER	784
+ #define   SIER_SIPR		0x2000000	/* Sampled MSR_PR */
+ #define   SIER_SIHV		0x1000000	/* Sampled MSR_HV */
+ #define   SIER_SIAR_VALID	0x0400000	/* SIAR contents valid */
+ #define   SIER_SDAR_VALID	0x0200000	/* SDAR contents valid */
++#define SPRN_SIAR	796
++#define SPRN_SDAR	797
+ #define SPRN_TACR	888
+ #define SPRN_TCSCR	889
+ #define SPRN_CSIGR	890
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index fd1fe4c37599..ae97ba211d8e 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -647,6 +647,7 @@ unsigned char ibm_architecture_vec[] = {
+ 	W(0xffff0000), W(0x003e0000),	/* POWER6 */
+ 	W(0xffff0000), W(0x003f0000),	/* POWER7 */
+ 	W(0xffff0000), W(0x004b0000),	/* POWER8E */
++	W(0xffff0000), W(0x004c0000),   /* POWER8NVL */
+ 	W(0xffff0000), W(0x004d0000),	/* POWER8 */
+ 	W(0xffffffff), W(0x0f000004),	/* all 2.07-compliant */
+ 	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
+@@ -709,7 +710,7 @@ unsigned char ibm_architecture_vec[] = {
+ 	 * must match by the macro below. Update the definition if
+ 	 * the structure layout changes.
+ 	 */
+-#define IBM_ARCH_VEC_NRCORES_OFFSET	125
++#define IBM_ARCH_VEC_NRCORES_OFFSET	133
+ 	W(NR_CPUS),			/* number of cores supported */
+ 	0,
+ 	0,
+diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
+index 2039397cc75d..d2a44cd476f2 100644
+--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
++++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
+@@ -623,29 +623,50 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
+ {
+ 	int config_addr;
+ 	int ret;
++	/* Waiting 0.2s maximum before skipping configuration */
++	int max_wait = 200;
+ 
+ 	/* Figure out the PE address */
+ 	config_addr = pe->config_addr;
+ 	if (pe->addr)
+ 		config_addr = pe->addr;
+ 
+-	/* Use new configure-pe function, if supported */
+-	if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
+-		ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
+-				config_addr, BUID_HI(pe->phb->buid),
+-				BUID_LO(pe->phb->buid));
+-	} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
+-		ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
+-				config_addr, BUID_HI(pe->phb->buid),
+-				BUID_LO(pe->phb->buid));
+-	} else {
+-		return -EFAULT;
+-	}
++	while (max_wait > 0) {
++		/* Use new configure-pe function, if supported */
++		if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
++			ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
++					config_addr, BUID_HI(pe->phb->buid),
++					BUID_LO(pe->phb->buid));
++		} else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
++			ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
++					config_addr, BUID_HI(pe->phb->buid),
++					BUID_LO(pe->phb->buid));
++		} else {
++			return -EFAULT;
++		}
+ 
+-	if (ret)
+-		pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
+-			__func__, pe->phb->global_number, pe->addr, ret);
++		if (!ret)
++			return ret;
++
++		/*
++		 * If RTAS returns a delay value that's above 100ms, cut it
++		 * down to 100ms in case firmware made a mistake.  For more
++		 * on how these delay values work see rtas_busy_delay_time
++		 */
++		if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
++		    ret <= RTAS_EXTENDED_DELAY_MAX)
++			ret = RTAS_EXTENDED_DELAY_MIN+2;
++
++		max_wait -= rtas_busy_delay_time(ret);
++
++		if (max_wait < 0)
++			break;
++
++		rtas_busy_delay(ret);
++	}
+ 
++	pr_warn("%s: Unable to configure bridge PHB#%d-PE#%x (%d)\n",
++		__func__, pe->phb->global_number, pe->addr, ret);
+ 	return ret;
+ }
+ 
+diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
+index 57bbf2fb21f6..78c366462e70 100644
+--- a/arch/x86/boot/Makefile
++++ b/arch/x86/boot/Makefile
+@@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
+ 	for i in lib lib64 share end ; do \
+ 		if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
+ 			cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
++			if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
++				cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
++			fi ; \
+ 			break ; \
+ 		fi ; \
+ 		if [ $$i = end ] ; then exit 1 ; fi ; \
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 75eb9603ed29..bd84d2226ca1 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3174,6 +3174,11 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+ 	if (dbgregs->flags)
+ 		return -EINVAL;
+ 
++	if (dbgregs->dr6 & ~0xffffffffull)
++		return -EINVAL;
++	if (dbgregs->dr7 & ~0xffffffffull)
++		return -EINVAL;
++
+ 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+ 	kvm_update_dr0123(vcpu);
+ 	vcpu->arch.dr6 = dbgregs->dr6;
+diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
+index 4870f28403f5..05bfe568cd30 100644
+--- a/crypto/asymmetric_keys/Kconfig
++++ b/crypto/asymmetric_keys/Kconfig
+@@ -14,6 +14,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ 	select MPILIB
+ 	select PUBLIC_KEY_ALGO_RSA
+ 	select CRYPTO_HASH_INFO
++	select CRYPTO_AKCIPHER
+ 	help
+ 	  This option provides support for asymmetric public key type handling.
+ 	  If signature generation and/or verification are to be used,
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 1ee2ab58e37d..e6eed20b1401 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1037,8 +1037,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
+ 
+ 	/* cpuinfo and default policy values */
+ 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
+-	policy->cpuinfo.max_freq =
+-		cpu->pstate.turbo_pstate * cpu->pstate.scaling;
++	update_turbo_state();
++	policy->cpuinfo.max_freq = limits.turbo_disabled ?
++			cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
++	policy->cpuinfo.max_freq *= cpu->pstate.scaling;
++
+ 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+ 	cpumask_set_cpu(policy->cpu, policy->cpus);
+ 
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+index 52c7395cb8d8..0d0d4529ee36 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+ 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
+ 	unsigned int unit;
++	u32 unit_size;
+ 	int ret;
+ 
+ 	if (!ctx->u.aes.key_len)
+@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+ 	if (!req->info)
+ 		return -EINVAL;
+ 
+-	for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
+-		if (!(req->nbytes & (unit_size_map[unit].size - 1)))
+-			break;
++	unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
++	if (req->nbytes <= unit_size_map[0].size) {
++		for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
++			if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
++				unit_size = unit_size_map[unit].value;
++				break;
++			}
++		}
++	}
+ 
+-	if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
++	if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+ 	    (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
+ 		/* Use the fallback to process the request for any
+ 		 * unsupported unit sizes or key sizes
+@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+ 	rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
+ 	rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
+ 					   : CCP_AES_ACTION_DECRYPT;
+-	rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
++	rctx->cmd.u.xts.unit_size = unit_size;
+ 	rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
+ 	rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
+ 	rctx->cmd.u.xts.iv = &rctx->iv_sg;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index a7e7be0a8ae8..cb46c468b01e 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -218,8 +218,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
+ 	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
+ };
+ 
+-#define RIR_RNK_TGT(reg)		GET_BITFIELD(reg, 16, 19)
+-#define RIR_OFFSET(reg)		GET_BITFIELD(reg,  2, 14)
++#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
++	GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
++
++#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
++	GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
+ 
+ /* Device 16, functions 2-7 */
+ 
+@@ -1101,14 +1104,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
+ 				pci_read_config_dword(pvt->pci_tad[i],
+ 						      rir_offset[j][k],
+ 						      &reg);
+-				tmp_mb = RIR_OFFSET(reg) << 6;
++				tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
+ 
+ 				gb = div_u64_rem(tmp_mb, 1024, &mb);
+ 				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
+ 					 i, j, k,
+ 					 gb, (mb*1000)/1024,
+ 					 ((u64)tmp_mb) << 20L,
+-					 (u32)RIR_RNK_TGT(reg),
++					 (u32)RIR_RNK_TGT(pvt->info.type, reg),
+ 					 reg);
+ 			}
+ 		}
+@@ -1432,7 +1435,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 	pci_read_config_dword(pvt->pci_tad[base_ch],
+ 			      rir_offset[n_rir][idx],
+ 			      &reg);
+-	*rank = RIR_RNK_TGT(reg);
++	*rank = RIR_RNK_TGT(pvt->info.type, reg);
+ 
+ 	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
+ 		 n_rir,
+diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
+index b164ce837b43..81ddd1d6d84b 100644
+--- a/drivers/gpio/gpio-bcm-kona.c
++++ b/drivers/gpio/gpio-bcm-kona.c
+@@ -549,11 +549,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio)
+ 	/* disable interrupts and clear status */
+ 	for (i = 0; i < kona_gpio->num_bank; i++) {
+ 		/* Unlock the entire bank first */
+-		bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE);
++		bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE);
+ 		writel(0xffffffff, reg_base + GPIO_INT_MASK(i));
+ 		writel(0xffffffff, reg_base + GPIO_INT_STATUS(i));
+ 		/* Now re-lock the bank */
+-		bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE);
++		bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE);
+ 	}
+ }
+ 
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 6bc612b8a49f..95752d38b7fe 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -375,7 +375,7 @@ struct gpio_chip *gpiochip_find(void *data,
+ 
+ 	spin_lock_irqsave(&gpio_lock, flags);
+ 	list_for_each_entry(chip, &gpio_chips, list)
+-		if (match(chip, data))
++		if (chip && match(chip, data))
+ 			break;
+ 
+ 	/* No match? */
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index 567791b27d6d..cf43f77be254 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -572,6 +572,7 @@ nouveau_fbcon_init(struct drm_device *dev)
+ 	if (ret)
+ 		goto fini;
+ 
++	fbcon->helper.fbdev->pixmap.buf_align = 4;
+ 	return 0;
+ 
+ fini:
+diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+index 495c57644ced..7a92d15d474e 100644
+--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+@@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	uint32_t fg;
+ 	uint32_t bg;
+ 	uint32_t dsize;
+-	uint32_t width;
+ 	uint32_t *data = (uint32_t *)image->data;
+ 	int ret;
+ 
+@@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	if (ret)
+ 		return ret;
+ 
+-	width = ALIGN(image->width, 8);
+-	dsize = ALIGN(width * image->height, 32) >> 5;
+-
+ 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ 		fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
+@@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 			 ((image->dx + image->width) & 0xffff));
+ 	OUT_RING(chan, bg);
+ 	OUT_RING(chan, fg);
+-	OUT_RING(chan, (image->height << 16) | width);
++	OUT_RING(chan, (image->height << 16) | image->width);
+ 	OUT_RING(chan, (image->height << 16) | image->width);
+ 	OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
+ 
++	dsize = ALIGN(image->width * image->height, 32) >> 5;
+ 	while (dsize) {
+ 		int iter_len = dsize > 128 ? 128 : dsize;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
+index 394c89abcc97..cb2a71ada99e 100644
+--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
+@@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	struct nouveau_fbdev *nfbdev = info->par;
+ 	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ 	struct nouveau_channel *chan = drm->channel;
+-	uint32_t width, dwords, *data = (uint32_t *)image->data;
++	uint32_t dwords, *data = (uint32_t *)image->data;
+ 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+ 	uint32_t *palette = info->pseudo_palette;
+ 	int ret;
+@@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	if (ret)
+ 		return ret;
+ 
+-	width = ALIGN(image->width, 32);
+-	dwords = (width * image->height) >> 5;
+-
+ 	BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
+ 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+@@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	OUT_RING(chan, 0);
+ 	OUT_RING(chan, image->dy);
+ 
++	dwords = ALIGN(image->width * image->height, 32) >> 5;
+ 	while (dwords) {
+ 		int push = dwords > 2047 ? 2047 : dwords;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+index 61246677e8dc..69f760e8c54f 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+@@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	struct nouveau_fbdev *nfbdev = info->par;
+ 	struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
+ 	struct nouveau_channel *chan = drm->channel;
+-	uint32_t width, dwords, *data = (uint32_t *)image->data;
++	uint32_t dwords, *data = (uint32_t *)image->data;
+ 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+ 	uint32_t *palette = info->pseudo_palette;
+ 	int ret;
+@@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	if (ret)
+ 		return ret;
+ 
+-	width = ALIGN(image->width, 32);
+-	dwords = (width * image->height) >> 5;
+-
+ 	BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
+ 	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ 	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+@@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+ 	OUT_RING  (chan, 0);
+ 	OUT_RING  (chan, image->dy);
+ 
++	dwords = ALIGN(image->width * image->height, 32) >> 5;
+ 	while (dwords) {
+ 		int push = dwords > 2047 ? 2047 : dwords;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+index 5606c25e5d02..6d9fea664c6a 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
++++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+@@ -796,21 +796,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr_priv *priv, int gpc)
+ }
+ 
+ static const struct nvkm_enum gf100_mp_warp_error[] = {
+-	{ 0x00, "NO_ERROR" },
+-	{ 0x01, "STACK_MISMATCH" },
++	{ 0x01, "STACK_ERROR" },
++	{ 0x02, "API_STACK_ERROR" },
++	{ 0x03, "RET_EMPTY_STACK_ERROR" },
++	{ 0x04, "PC_WRAP" },
+ 	{ 0x05, "MISALIGNED_PC" },
+-	{ 0x08, "MISALIGNED_GPR" },
+-	{ 0x09, "INVALID_OPCODE" },
+-	{ 0x0d, "GPR_OUT_OF_BOUNDS" },
+-	{ 0x0e, "MEM_OUT_OF_BOUNDS" },
+-	{ 0x0f, "UNALIGNED_MEM_ACCESS" },
+-	{ 0x11, "INVALID_PARAM" },
++	{ 0x06, "PC_OVERFLOW" },
++	{ 0x07, "MISALIGNED_IMMC_ADDR" },
++	{ 0x08, "MISALIGNED_REG" },
++	{ 0x09, "ILLEGAL_INSTR_ENCODING" },
++	{ 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
++	{ 0x0b, "ILLEGAL_INSTR_PARAM" },
++	{ 0x0c, "INVALID_CONST_ADDR" },
++	{ 0x0d, "OOR_REG" },
++	{ 0x0e, "OOR_ADDR" },
++	{ 0x0f, "MISALIGNED_ADDR" },
++	{ 0x10, "INVALID_ADDR_SPACE" },
++	{ 0x11, "ILLEGAL_INSTR_PARAM2" },
++	{ 0x12, "INVALID_CONST_ADDR_LDC" },
++	{ 0x13, "GEOMETRY_SM_ERROR" },
++	{ 0x14, "DIVERGENT" },
++	{ 0x15, "WARP_EXIT" },
+ 	{}
+ };
+ 
+ static const struct nvkm_bitfield gf100_mp_global_error[] = {
++	{ 0x00000001, "SM_TO_SM_FAULT" },
++	{ 0x00000002, "L1_ERROR" },
+ 	{ 0x00000004, "MULTIPLE_WARP_ERRORS" },
+-	{ 0x00000008, "OUT_OF_STACK_SPACE" },
++	{ 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
++	{ 0x00000010, "BPT_INT" },
++	{ 0x00000020, "BPT_PAUSE" },
++	{ 0x00000040, "SINGLE_STEP_COMPLETE" },
++	{ 0x20000000, "ECC_SEC_ERROR" },
++	{ 0x40000000, "ECC_DED_ERROR" },
++	{ 0x80000000, "TIMEOUT" },
+ 	{}
+ };
+ 
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index c4b1ac6750d8..3f86e548d795 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -1379,47 +1379,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
+ static long uvc_v4l2_compat_ioctl32(struct file *file,
+ 		     unsigned int cmd, unsigned long arg)
+ {
++	struct uvc_fh *handle = file->private_data;
+ 	union {
+ 		struct uvc_xu_control_mapping xmap;
+ 		struct uvc_xu_control_query xqry;
+ 	} karg;
+ 	void __user *up = compat_ptr(arg);
+-	mm_segment_t old_fs;
+ 	long ret;
+ 
+ 	switch (cmd) {
+ 	case UVCIOC_CTRL_MAP32:
+-		cmd = UVCIOC_CTRL_MAP;
+ 		ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
++		if (ret)
++			return ret;
++		ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
++		if (ret)
++			return ret;
++		ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
++		if (ret)
++			return ret;
++
+ 		break;
+ 
+ 	case UVCIOC_CTRL_QUERY32:
+-		cmd = UVCIOC_CTRL_QUERY;
+ 		ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
++		if (ret)
++			return ret;
++		ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
++		if (ret)
++			return ret;
++		ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
++		if (ret)
++			return ret;
+ 		break;
+ 
+ 	default:
+ 		return -ENOIOCTLCMD;
+ 	}
+ 
+-	old_fs = get_fs();
+-	set_fs(KERNEL_DS);
+-	ret = video_ioctl2(file, cmd, (unsigned long)&karg);
+-	set_fs(old_fs);
+-
+-	if (ret < 0)
+-		return ret;
+-
+-	switch (cmd) {
+-	case UVCIOC_CTRL_MAP:
+-		ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
+-		break;
+-
+-	case UVCIOC_CTRL_QUERY:
+-		ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
+-		break;
+-	}
+-
+ 	return ret;
+ }
+ #endif
+diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
+index d5c0a1af08b9..eafaeb01aa3e 100644
+--- a/drivers/net/wireless/mac80211_hwsim.c
++++ b/drivers/net/wireless/mac80211_hwsim.c
+@@ -2724,6 +2724,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
+ 	if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
+ 	    !info->attrs[HWSIM_ATTR_FLAGS] ||
+ 	    !info->attrs[HWSIM_ATTR_COOKIE] ||
++	    !info->attrs[HWSIM_ATTR_SIGNAL] ||
+ 	    !info->attrs[HWSIM_ATTR_TX_INFO])
+ 		goto out;
+ 
+diff --git a/drivers/of/irq.c b/drivers/of/irq.c
+index 1a7980692f25..f5d497989fcd 100644
+--- a/drivers/of/irq.c
++++ b/drivers/of/irq.c
+@@ -385,13 +385,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
+ EXPORT_SYMBOL_GPL(of_irq_to_resource);
+ 
+ /**
+- * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
++ * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
+  * @dev: pointer to device tree node
+- * @index: zero-based index of the irq
+- *
+- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
+- * is not yet created.
++ * @index: zero-based index of the IRQ
+  *
++ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
++ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
++ * of any other failure.
+  */
+ int of_irq_get(struct device_node *dev, int index)
+ {
+@@ -412,12 +412,13 @@ int of_irq_get(struct device_node *dev, int index)
+ EXPORT_SYMBOL_GPL(of_irq_get);
+ 
+ /**
+- * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
++ * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
+  * @dev: pointer to device tree node
+- * @name: irq name
++ * @name: IRQ name
+  *
+- * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
+- * is not yet created, or error code in case of any other failure.
++ * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
++ * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
++ * of any other failure.
+  */
+ int of_irq_get_byname(struct device_node *dev, const char *name)
+ {
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+index de08175aef0a..d32a72e96c72 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+@@ -1030,9 +1030,10 @@ static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc)
+ 	const struct mtk_desc_pin *pin;
+ 
+ 	chained_irq_enter(chip, desc);
+-	for (eint_num = 0; eint_num < pctl->devdata->ap_num; eint_num += 32) {
++	for (eint_num = 0;
++	     eint_num < pctl->devdata->ap_num;
++	     eint_num += 32, reg += 4) {
+ 		status = readl(reg);
+-		reg += 4;
+ 		while (status) {
+ 			offset = __ffs(status);
+ 			index = eint_num + offset;
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index ac418e73536d..42d3f82e75c7 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -227,6 +227,7 @@ static struct {
+ 	{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ 	{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
+ 	{"Promise", "", NULL, BLIST_SPARSELUN},
++	{"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES},
+ 	{"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
+ 	{"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
+ 	{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 448ebdaa3d69..17fbf1d3eadc 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -909,9 +909,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
+ 	}
+ 
+ 	/*
+-	 * If we finished all bytes in the request we are done now.
++	 * special case: failed zero length commands always need to
++	 * drop down into the retry code. Otherwise, if we finished
++	 * all bytes in the request we are done now.
+ 	 */
+-	if (!scsi_end_request(req, error, good_bytes, 0))
++	if (!(blk_rq_bytes(req) == 0 && error) &&
++	    !scsi_end_request(req, error, good_bytes, 0))
+ 		return;
+ 
+ 	/*
+diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
+index f65f0d109fc8..5db5e91fd12f 100644
+--- a/drivers/thermal/cpu_cooling.c
++++ b/drivers/thermal/cpu_cooling.c
+@@ -52,7 +52,7 @@
+  *	registered cooling device.
+  * @cpufreq_state: integer value representing the current state of cpufreq
+  *	cooling	devices.
+- * @cpufreq_val: integer value representing the absolute value of the clipped
++ * @clipped_freq: integer value representing the absolute value of the clipped
+  *	frequency.
+  * @max_level: maximum cooling level. One less than total number of valid
+  *	cpufreq frequencies.
+@@ -66,7 +66,7 @@ struct cpufreq_cooling_device {
+ 	int id;
+ 	struct thermal_cooling_device *cool_dev;
+ 	unsigned int cpufreq_state;
+-	unsigned int cpufreq_val;
++	unsigned int clipped_freq;
+ 	unsigned int max_level;
+ 	unsigned int *freq_table;	/* In descending order */
+ 	struct cpumask allowed_cpus;
+@@ -195,7 +195,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
+ 					&cpufreq_dev->allowed_cpus))
+ 			continue;
+ 
+-		max_freq = cpufreq_dev->cpufreq_val;
++		max_freq = cpufreq_dev->clipped_freq;
+ 
+ 		if (policy->max != max_freq)
+ 			cpufreq_verify_within_limits(policy, 0, max_freq);
+@@ -273,7 +273,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
+ 
+ 	clip_freq = cpufreq_device->freq_table[state];
+ 	cpufreq_device->cpufreq_state = state;
+-	cpufreq_device->cpufreq_val = clip_freq;
++	cpufreq_device->clipped_freq = clip_freq;
+ 
+ 	cpufreq_update_policy(cpu);
+ 
+@@ -363,14 +363,6 @@ __cpufreq_cooling_register(struct device_node *np,
+ 		goto free_table;
+ 	}
+ 
+-	snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
+-		 cpufreq_dev->id);
+-
+-	cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
+-						      &cpufreq_cooling_ops);
+-	if (IS_ERR(cool_dev))
+-		goto remove_idr;
+-
+ 	/* Fill freq-table in descending order of frequencies */
+ 	for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
+ 		freq = find_next_max(table, freq);
+@@ -383,7 +375,15 @@ __cpufreq_cooling_register(struct device_node *np,
+ 			pr_debug("%s: freq:%u KHz\n", __func__, freq);
+ 	}
+ 
+-	cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
++	snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
++		 cpufreq_dev->id);
++
++	cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
++						      &cpufreq_cooling_ops);
++	if (IS_ERR(cool_dev))
++		goto remove_idr;
++
++	cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
+ 	cpufreq_dev->cool_dev = cool_dev;
+ 
+ 	mutex_lock(&cooling_cpufreq_lock);
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 10bce74c427f..2c75b393d31a 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1618,7 +1618,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
+ 	struct dentry *dentry = __d_alloc(parent->d_sb, name);
+ 	if (!dentry)
+ 		return NULL;
+-
++	dentry->d_flags |= DCACHE_RCUACCESS;
+ 	spin_lock(&parent->d_lock);
+ 	/*
+ 	 * don't need child lock because it is not subject
+@@ -2410,7 +2410,6 @@ static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
+ {
+ 	BUG_ON(!d_unhashed(entry));
+ 	hlist_bl_lock(b);
+-	entry->d_flags |= DCACHE_RCUACCESS;
+ 	hlist_bl_add_head_rcu(&entry->d_hash, b);
+ 	hlist_bl_unlock(b);
+ }
+@@ -2629,6 +2628,7 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
+ 	/* ... and switch them in the tree */
+ 	if (IS_ROOT(dentry)) {
+ 		/* splicing a tree */
++		dentry->d_flags |= DCACHE_RCUACCESS;
+ 		dentry->d_parent = target->d_parent;
+ 		target->d_parent = target;
+ 		list_del_init(&target->d_child);
+diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
+index 866bb18efefe..e818f5ac7a26 100644
+--- a/fs/ecryptfs/kthread.c
++++ b/fs/ecryptfs/kthread.c
+@@ -25,6 +25,7 @@
+ #include <linux/slab.h>
+ #include <linux/wait.h>
+ #include <linux/mount.h>
++#include <linux/file.h>
+ #include "ecryptfs_kernel.h"
+ 
+ struct ecryptfs_open_req {
+@@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
+ 	flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
+ 	(*lower_file) = dentry_open(&req.path, flags, cred);
+ 	if (!IS_ERR(*lower_file))
+-		goto out;
++		goto have_file;
+ 	if ((flags & O_ACCMODE) == O_RDONLY) {
+ 		rc = PTR_ERR((*lower_file));
+ 		goto out;
+@@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file,
+ 	mutex_unlock(&ecryptfs_kthread_ctl.mux);
+ 	wake_up(&ecryptfs_kthread_ctl.wait);
+ 	wait_for_completion(&req.done);
+-	if (IS_ERR(*lower_file))
++	if (IS_ERR(*lower_file)) {
+ 		rc = PTR_ERR(*lower_file);
++		goto out;
++	}
++have_file:
++	if ((*lower_file)->f_op->mmap == NULL) {
++		fput(*lower_file);
++		*lower_file = NULL;
++		rc = -EMEDIUMTYPE;
++	}
+ out:
+ 	return rc;
+ }
+diff --git a/fs/namespace.c b/fs/namespace.c
+index fce3cc1a3fa7..6257268147ee 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2390,8 +2390,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
+ 			mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
+ 		}
+ 		if (type->fs_flags & FS_USERNS_VISIBLE) {
+-			if (!fs_fully_visible(type, &mnt_flags))
++			if (!fs_fully_visible(type, &mnt_flags)) {
++				put_filesystem(type);
+ 				return -EPERM;
++			}
+ 		}
+ 	}
+ 
+@@ -3236,7 +3238,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
+ 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+ 			struct inode *inode = child->mnt_mountpoint->d_inode;
+ 			/* Only worry about locked mounts */
+-			if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
++			if (!(child->mnt.mnt_flags & MNT_LOCKED))
+ 				continue;
+ 			/* Is the directory permanetly empty? */
+ 			if (!is_empty_dir_inode(inode))
+diff --git a/fs/proc/root.c b/fs/proc/root.c
+index 68feb0f70e63..c3e1bc595e6d 100644
+--- a/fs/proc/root.c
++++ b/fs/proc/root.c
+@@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
+ 	if (IS_ERR(sb))
+ 		return ERR_CAST(sb);
+ 
++	/*
++	 * procfs isn't actually a stacking filesystem; however, there is
++	 * too much magic going on inside it to permit stacking things on
++	 * top of it
++	 */
++	sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
++
+ 	if (!proc_parse_options(options, ns)) {
+ 		deactivate_locked_super(sb);
+ 		return ERR_PTR(-EINVAL);
+diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
+index ffbc034c8810..cbf1ce800fd1 100644
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -307,7 +307,7 @@
+ #define ICC_SGI1R_AFFINITY_1_SHIFT	16
+ #define ICC_SGI1R_AFFINITY_1_MASK	(0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
+ #define ICC_SGI1R_SGI_ID_SHIFT		24
+-#define ICC_SGI1R_SGI_ID_MASK		(0xff << ICC_SGI1R_SGI_ID_SHIFT)
++#define ICC_SGI1R_SGI_ID_MASK		(0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
+ #define ICC_SGI1R_AFFINITY_2_SHIFT	32
+ #define ICC_SGI1R_AFFINITY_2_MASK	(0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
+ #define ICC_SGI1R_IRQ_ROUTING_MODE_BIT	40
+diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
+index 4cccea6b8934..6f1c3e41b2a6 100644
+--- a/kernel/locking/mutex.c
++++ b/kernel/locking/mutex.c
+@@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
+ 	if (!hold_ctx)
+ 		return 0;
+ 
+-	if (unlikely(ctx == hold_ctx))
+-		return -EALREADY;
+-
+ 	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
+ 	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
+ #ifdef CONFIG_DEBUG_MUTEXES
+@@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
+ 	unsigned long flags;
+ 	int ret;
+ 
++	if (use_ww_ctx) {
++		struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
++		if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
++			return -EALREADY;
++	}
++
+ 	preempt_disable();
+ 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
+ 
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index afcc67a157fd..c5af4e3d4497 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta)
+ 		del_timer_sync(&sta->plink_timer);
+ 	}
+ 
++	/* make sure no readers can access nexthop sta from here on */
++	mesh_path_flush_by_nexthop(sta);
++	synchronize_net();
++
+ 	if (changed)
+ 		ieee80211_mbss_info_change_notify(sdata, changed);
+ }
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index b50ee5d622e1..c753211cb83f 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -955,8 +955,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr,
+ 			return private(dev, iwr, cmd, info, handler);
+ 	}
+ 	/* Old driver API : call driver ioctl handler */
+-	if (dev->netdev_ops->ndo_do_ioctl)
+-		return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
++	if (dev->netdev_ops->ndo_do_ioctl) {
++#ifdef CONFIG_COMPAT
++		if (info->flags & IW_REQUEST_FLAG_COMPAT) {
++			int ret = 0;
++			struct iwreq iwr_lcl;
++			struct compat_iw_point *iwp_compat = (void *) &iwr->u.data;
++
++			memcpy(&iwr_lcl, iwr, sizeof(struct iwreq));
++			iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer);
++			iwr_lcl.u.data.length = iwp_compat->length;
++			iwr_lcl.u.data.flags = iwp_compat->flags;
++
++			ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd);
++
++			iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer);
++			iwp_compat->length = iwr_lcl.u.data.length;
++			iwp_compat->flags = iwr_lcl.u.data.flags;
++
++			return ret;
++		} else
++#endif
++			return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
++	}
+ 	return -EOPNOTSUPP;
+ }
+ 
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index 78691d51a479..0ef6956dcfa6 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -653,7 +653,7 @@ static int do_of_entry (const char *filename, void *symval, char *alias)
+ 	len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
+ 		      (*type)[0] ? *type : "*");
+ 
+-	if (compatible[0])
++	if ((*compatible)[0])
+ 		sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
+ 			*compatible);
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index bee74795c9b9..a62872f7b41a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3608,13 +3608,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
+ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ {
+ 	static struct coef_fw coef0255[] = {
+-		WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
+ 		WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
+ 		UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
+ 		WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
+ 		WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
+ 		{}
+ 	};
++	static struct coef_fw coef0255_1[] = {
++		WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
++		{}
++	};
++	static struct coef_fw coef0256[] = {
++		WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
++		{}
++	};
+ 	static struct coef_fw coef0233[] = {
+ 		WRITE_COEF(0x1b, 0x0c0b),
+ 		WRITE_COEF(0x45, 0xc429),
+@@ -3657,7 +3664,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
+ 
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0255:
++		alc_process_coef_fw(codec, coef0255_1);
++		alc_process_coef_fw(codec, coef0255);
++		break;
+ 	case 0x10ec0256:
++		alc_process_coef_fw(codec, coef0256);
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
+ 	case 0x10ec0233:
+@@ -3854,6 +3865,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ 		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ 		{}
+ 	};
++	static struct coef_fw coef0256[] = {
++		WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
++		WRITE_COEF(0x1b, 0x0c6b),
++		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
++		{}
++	};
+ 	static struct coef_fw coef0233[] = {
+ 		WRITE_COEF(0x45, 0xd429),
+ 		WRITE_COEF(0x1b, 0x0c2b),
+@@ -3887,9 +3904,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
+ 
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0255:
+-	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0256:
++		alc_process_coef_fw(codec, coef0256);
++		break;
+ 	case 0x10ec0233:
+ 	case 0x10ec0283:
+ 		alc_process_coef_fw(codec, coef0233);
+@@ -3922,6 +3941,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ 		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
+ 		{}
+ 	};
++	static struct coef_fw coef0256[] = {
++		WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
++		WRITE_COEF(0x1b, 0x0c6b),
++		WRITE_COEFEX(0x57, 0x03, 0x8ea6),
++		{}
++	};
+ 	static struct coef_fw coef0233[] = {
+ 		WRITE_COEF(0x45, 0xe429),
+ 		WRITE_COEF(0x1b, 0x0c2b),
+@@ -3955,9 +3980,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
+ 
+ 	switch (codec->core.vendor_id) {
+ 	case 0x10ec0255:
+-	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0255);
+ 		break;
++	case 0x10ec0256:
++		alc_process_coef_fw(codec, coef0256);
++		break;
+ 	case 0x10ec0233:
+ 	case 0x10ec0283:
+ 		alc_process_coef_fw(codec, coef0233);
+@@ -4181,7 +4208,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
+ static void alc255_set_default_jack_type(struct hda_codec *codec)
+ {
+ 	/* Set to iphone type */
+-	static struct coef_fw fw[] = {
++	static struct coef_fw alc255fw[] = {
+ 		WRITE_COEF(0x1b, 0x880b),
+ 		WRITE_COEF(0x45, 0xd089),
+ 		WRITE_COEF(0x1b, 0x080b),
+@@ -4189,7 +4216,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
+ 		WRITE_COEF(0x1b, 0x0c0b),
+ 		{}
+ 	};
+-	alc_process_coef_fw(codec, fw);
++	static struct coef_fw alc256fw[] = {
++		WRITE_COEF(0x1b, 0x884b),
++		WRITE_COEF(0x45, 0xd089),
++		WRITE_COEF(0x1b, 0x084b),
++		WRITE_COEF(0x46, 0x0004),
++		WRITE_COEF(0x1b, 0x0c4b),
++		{}
++	};
++	switch (codec->core.vendor_id) {
++	case 0x10ec0255:
++		alc_process_coef_fw(codec, alc255fw);
++		break;
++	case 0x10ec0256:
++		alc_process_coef_fw(codec, alc256fw);
++		break;
++	}
+ 	msleep(30);
+ }
+ 
+@@ -5446,6 +5488,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+@@ -5698,6 +5741,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x17, 0x40000000},
+ 		{0x1d, 0x40700001},
+ 		{0x21, 0x02211040}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x90a60180},
++		{0x14, 0x90170120},
++		{0x21, 0x02211030}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC255_STANDARD_PINS,
+ 		{0x12, 0x90a60160},
+diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c
+index 1d56a901e791..1470f2aab091 100644
+--- a/virt/kvm/irqchip.c
++++ b/virt/kvm/irqchip.c
+@@ -51,7 +51,7 @@ int kvm_irq_map_gsi(struct kvm *kvm,
+ 
+ 	irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
+ 					lockdep_is_held(&kvm->irq_lock));
+-	if (gsi < irq_rt->nr_rt_entries) {
++	if (irq_rt && gsi < irq_rt->nr_rt_entries) {
+ 		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
+ 			entries[n] = *e;
+ 			++n;


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-06-08 11:17 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-06-08 11:17 UTC (permalink / raw
  To: gentoo-commits

commit:     d8670f4c5aadaf0577ffb7e424f6bfd145d5ed2f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jun  8 11:17:42 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jun  8 11:17:42 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d8670f4c

Linux patch 4.1.26

 0000_README             |    4 +
 1025_linux-4.1.26.patch | 6090 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 6094 insertions(+)

diff --git a/0000_README b/0000_README
index fcfa288..783092f 100644
--- a/0000_README
+++ b/0000_README
@@ -143,6 +143,10 @@ Patch:  1024_linux-4.1.25.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.25
 
+Patch:  1025_linux-4.1.26.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.26
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1025_linux-4.1.26.patch b/1025_linux-4.1.26.patch
new file mode 100644
index 0000000..573cabf
--- /dev/null
+++ b/1025_linux-4.1.26.patch
@@ -0,0 +1,6090 @@
+diff --git a/Documentation/devicetree/bindings/crypto/samsung-sss.txt b/Documentation/devicetree/bindings/crypto/samsung-sss.txt
+index a6dafa83c6df..7a5ca56683cc 100644
+--- a/Documentation/devicetree/bindings/crypto/samsung-sss.txt
++++ b/Documentation/devicetree/bindings/crypto/samsung-sss.txt
+@@ -23,10 +23,8 @@ Required properties:
+   - "samsung,exynos4210-secss" for Exynos4210, Exynos4212, Exynos4412, Exynos5250,
+ 		Exynos5260 and Exynos5420 SoCs.
+ - reg : Offset and length of the register set for the module
+-- interrupts : interrupt specifiers of SSS module interrupts, should contain
+-		following entries:
+-		- first : feed control interrupt (required for all variants),
+-		- second : hash interrupt (required only for samsung,s5pv210-secss).
++- interrupts : interrupt specifiers of SSS module interrupts (one feed
++		control interrupt).
+ 
+ - clocks : list of clock phandle and specifier pairs for all clocks  listed in
+ 		clock-names property.
+diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
+index dbe6623fed1c..1e52d67d0abf 100644
+--- a/Documentation/serial/tty.txt
++++ b/Documentation/serial/tty.txt
+@@ -198,9 +198,6 @@ TTY_IO_ERROR		If set, causes all subsequent userspace read/write
+ 
+ TTY_OTHER_CLOSED	Device is a pty and the other side has closed.
+ 
+-TTY_OTHER_DONE		Device is a pty and the other side has closed and
+-			all pending input processing has been completed.
+-
+ TTY_NO_WRITE_SPLIT	Prevent driver from splitting up writes into
+ 			smaller chunks.
+ 
+diff --git a/Makefile b/Makefile
+index c2f929d78726..080a87e290b9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 25
++SUBLEVEL = 26
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+@@ -374,7 +374,7 @@ AFLAGS_MODULE   =
+ LDFLAGS_MODULE  =
+ CFLAGS_KERNEL	=
+ AFLAGS_KERNEL	=
+-CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage
++CFLAGS_GCOV	= -fprofile-arcs -ftest-coverage -fno-tree-loop-im
+ 
+ 
+ # Use USERINCLUDE when you must reference the UAPI directories only.
+@@ -686,9 +686,10 @@ KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+ KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+ else
+ 
+-# This warning generated too much noise in a regular build.
+-# Use make W=1 to enable this warning (see scripts/Makefile.build)
++# These warnings generated too much noise in a regular build.
++# Use make W=1 to enable them (see scripts/Makefile.build)
+ KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
++KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+ endif
+ 
+ ifdef CONFIG_FRAME_POINTER
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index da09ddcfcc00..691ea94897fd 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -886,11 +886,14 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
+ 	VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
+ 
+ 	old_pmd = *pmd;
+-	kvm_set_pmd(pmd, *new_pmd);
+-	if (pmd_present(old_pmd))
++	if (pmd_present(old_pmd)) {
++		pmd_clear(pmd);
+ 		kvm_tlb_flush_vmid_ipa(kvm, addr);
+-	else
++	} else {
+ 		get_page(virt_to_page(pmd));
++	}
++
++	kvm_set_pmd(pmd, *new_pmd);
+ 	return 0;
+ }
+ 
+@@ -939,12 +942,14 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ 
+ 	/* Create 2nd stage page table mapping - Level 3 */
+ 	old_pte = *pte;
+-	kvm_set_pte(pte, *new_pte);
+-	if (pte_present(old_pte))
++	if (pte_present(old_pte)) {
++		kvm_set_pte(pte, __pte(0));
+ 		kvm_tlb_flush_vmid_ipa(kvm, addr);
+-	else
++	} else {
+ 		get_page(virt_to_page(pte));
++	}
+ 
++	kvm_set_pte(pte, *new_pte);
+ 	return 0;
+ }
+ 
+diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
+index 59bfae75dc98..d007a7b5015a 100644
+--- a/arch/arm64/include/asm/pgtable-hwdef.h
++++ b/arch/arm64/include/asm/pgtable-hwdef.h
+@@ -77,7 +77,6 @@
+  * Section
+  */
+ #define PMD_SECT_VALID		(_AT(pmdval_t, 1) << 0)
+-#define PMD_SECT_PROT_NONE	(_AT(pmdval_t, 1) << 58)
+ #define PMD_SECT_USER		(_AT(pmdval_t, 1) << 6)		/* AP[1] */
+ #define PMD_SECT_RDONLY		(_AT(pmdval_t, 1) << 7)		/* AP[2] */
+ #define PMD_SECT_S		(_AT(pmdval_t, 3) << 8)
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 526a9cb218d3..f1fc3140dedb 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -285,6 +285,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+ #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+ 
++#define pmd_present(pmd)	pte_present(pmd_pte(pmd))
+ #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
+ #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
+ #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+@@ -293,7 +294,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
+ #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+ #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+ #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+-#define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
++#define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
+ 
+ #define __HAVE_ARCH_PMD_WRITE
+ #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
+@@ -332,7 +333,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ 				     unsigned long size, pgprot_t vma_prot);
+ 
+ #define pmd_none(pmd)		(!pmd_val(pmd))
+-#define pmd_present(pmd)	(pmd_val(pmd))
+ 
+ #define pmd_bad(pmd)		(!(pmd_val(pmd) & 2))
+ 
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index 648112e90ed5..3972e65fbd5a 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -130,7 +130,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
+ 		esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
+ 
+ 	if (!is_iabt)
+-		esr |= ESR_ELx_EC_DABT_LOW;
++		esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
+ 
+ 	vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
+ }
+diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
+index b955fafc58ba..d1adc59af5bf 100644
+--- a/arch/mips/ath79/early_printk.c
++++ b/arch/mips/ath79/early_printk.c
+@@ -31,13 +31,15 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
+ 	} while (1);
+ }
+ 
++#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
++
+ static void prom_putchar_ar71xx(unsigned char ch)
+ {
+ 	void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
+ 
+-	prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
++	prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
+ 	__raw_writel(ch, base + UART_TX * 4);
+-	prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
++	prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
+ }
+ 
+ static void prom_putchar_ar933x(unsigned char ch)
+diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
+index 723229f4cf27..176de586a71a 100644
+--- a/arch/mips/include/asm/cacheflush.h
++++ b/arch/mips/include/asm/cacheflush.h
+@@ -51,7 +51,6 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma,
+ 	unsigned long start, unsigned long end);
+ extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
+ extern void __flush_dcache_page(struct page *page);
+-extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
+ 
+ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+ static inline void flush_dcache_page(struct page *page)
+@@ -77,11 +76,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
+ static inline void flush_icache_page(struct vm_area_struct *vma,
+ 	struct page *page)
+ {
+-	if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
+-	    Page_dcache_dirty(page)) {
+-		__flush_icache_page(vma, page);
+-		ClearPageDcacheDirty(page);
+-	}
+ }
+ 
+ extern void (*flush_icache_range)(unsigned long start, unsigned long end);
+diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
+index 4c25823563fe..3585af093576 100644
+--- a/arch/mips/include/asm/kvm_host.h
++++ b/arch/mips/include/asm/kvm_host.h
+@@ -782,7 +782,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+ 
+ uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
+ void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
+-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
++void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
+ void kvm_mips_init_count(struct kvm_vcpu *vcpu);
+ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
+ int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
+diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
+index af5638b12c75..38bbeda8644c 100644
+--- a/arch/mips/include/asm/msa.h
++++ b/arch/mips/include/asm/msa.h
+@@ -67,6 +67,19 @@ static inline void restore_msa(struct task_struct *t)
+ 		_restore_msa(t);
+ }
+ 
++static inline void init_msa_upper(void)
++{
++	/*
++	 * Check cpu_has_msa only if it's a constant. This will allow the
++	 * compiler to optimise out code for CPUs without MSA without adding
++	 * an extra redundant check for CPUs with MSA.
++	 */
++	if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
++		return;
++
++	_init_msa_upper();
++}
++
+ #ifdef TOOLCHAIN_SUPPORTS_MSA
+ 
+ #define __BUILD_MSA_CTL_REG(name, cs)				\
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 7fe24aef7fdc..f33206e27d8d 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -127,10 +127,14 @@ do {									\
+ 	}								\
+ } while(0)
+ 
++static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
++			      pte_t *ptep, pte_t pteval);
++
+ #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+ 
+ #define pte_none(pte)		(!(((pte).pte_high) & ~_PAGE_GLOBAL))
+ #define pte_present(pte)	((pte).pte_low & _PAGE_PRESENT)
++#define pte_no_exec(pte)	((pte).pte_low & _PAGE_NO_EXEC)
+ 
+ static inline void set_pte(pte_t *ptep, pte_t pte)
+ {
+@@ -148,7 +152,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
+ 			buddy->pte_high |= _PAGE_GLOBAL;
+ 	}
+ }
+-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+ 
+ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+@@ -166,6 +169,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
+ 
+ #define pte_none(pte)		(!(pte_val(pte) & ~_PAGE_GLOBAL))
+ #define pte_present(pte)	(pte_val(pte) & _PAGE_PRESENT)
++#define pte_no_exec(pte)	(pte_val(pte) & _PAGE_NO_EXEC)
+ 
+ /*
+  * Certain architectures need to do special things when pte's
+@@ -218,7 +222,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
+ 	}
+ #endif
+ }
+-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+ 
+ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+@@ -234,6 +237,22 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
+ }
+ #endif
+ 
++static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
++			      pte_t *ptep, pte_t pteval)
++{
++	extern void __update_cache(unsigned long address, pte_t pte);
++
++	if (!pte_present(pteval))
++		goto cache_sync_done;
++
++	if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
++		goto cache_sync_done;
++
++	__update_cache(addr, pteval);
++cache_sync_done:
++	set_pte(ptep, pteval);
++}
++
+ /*
+  * (pmds are folded into puds so this doesn't get actually called,
+  * but the define is needed for a generic inline function.)
+@@ -428,15 +447,12 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ 
+ extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
+ 	pte_t pte);
+-extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
+-	pte_t pte);
+ 
+ static inline void update_mmu_cache(struct vm_area_struct *vma,
+ 	unsigned long address, pte_t *ptep)
+ {
+ 	pte_t pte = *ptep;
+ 	__update_tlb(vma, address, pte);
+-	__update_cache(vma, address, pte);
+ }
+ 
+ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
+index 2cb7fdead570..e2b5337e840f 100644
+--- a/arch/mips/include/uapi/asm/siginfo.h
++++ b/arch/mips/include/uapi/asm/siginfo.h
+@@ -28,7 +28,7 @@
+ 
+ #define __ARCH_SIGSYS
+ 
+-#include <uapi/asm-generic/siginfo.h>
++#include <asm-generic/siginfo.h>
+ 
+ /* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
+ typedef struct siginfo {
+@@ -42,13 +42,13 @@ typedef struct siginfo {
+ 
+ 		/* kill() */
+ 		struct {
+-			pid_t _pid;		/* sender's pid */
++			__kernel_pid_t _pid;	/* sender's pid */
+ 			__ARCH_SI_UID_T _uid;	/* sender's uid */
+ 		} _kill;
+ 
+ 		/* POSIX.1b timers */
+ 		struct {
+-			timer_t _tid;		/* timer id */
++			__kernel_timer_t _tid;	/* timer id */
+ 			int _overrun;		/* overrun count */
+ 			char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
+ 			sigval_t _sigval;	/* same as below */
+@@ -57,26 +57,26 @@ typedef struct siginfo {
+ 
+ 		/* POSIX.1b signals */
+ 		struct {
+-			pid_t _pid;		/* sender's pid */
++			__kernel_pid_t _pid;	/* sender's pid */
+ 			__ARCH_SI_UID_T _uid;	/* sender's uid */
+ 			sigval_t _sigval;
+ 		} _rt;
+ 
+ 		/* SIGCHLD */
+ 		struct {
+-			pid_t _pid;		/* which child */
++			__kernel_pid_t _pid;	/* which child */
+ 			__ARCH_SI_UID_T _uid;	/* sender's uid */
+ 			int _status;		/* exit code */
+-			clock_t _utime;
+-			clock_t _stime;
++			__kernel_clock_t _utime;
++			__kernel_clock_t _stime;
+ 		} _sigchld;
+ 
+ 		/* IRIX SIGCHLD */
+ 		struct {
+-			pid_t _pid;		/* which child */
+-			clock_t _utime;
++			__kernel_pid_t _pid;	/* which child */
++			__kernel_clock_t _utime;
+ 			int _status;		/* exit code */
+-			clock_t _stime;
++			__kernel_clock_t _stime;
+ 		} _irix_sigchld;
+ 
+ 		/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+@@ -118,6 +118,4 @@ typedef struct siginfo {
+ #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */
+ #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */
+ 
+-#include <asm-generic/siginfo.h>
+-
+ #endif /* _UAPI_ASM_SIGINFO_H */
+diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
+index f2977f00911b..e19fa363c8fe 100644
+--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
++++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
+@@ -27,6 +27,7 @@
+ #include <asm/inst.h>
+ #include <asm/mips-r2-to-r6-emul.h>
+ #include <asm/local.h>
++#include <asm/mipsregs.h>
+ #include <asm/ptrace.h>
+ #include <asm/uaccess.h>
+ 
+@@ -1250,10 +1251,10 @@ fpu_emul:
+ 			"	j	10b\n"
+ 			"	.previous\n"
+ 			"	.section	__ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1325,10 +1326,10 @@ fpu_emul:
+ 			"	j	10b\n"
+ 			"       .previous\n"
+ 			"	.section	__ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1396,10 +1397,10 @@ fpu_emul:
+ 			"	j	9b\n"
+ 			"	.previous\n"
+ 			"	.section        __ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1466,10 +1467,10 @@ fpu_emul:
+ 			"	j	9b\n"
+ 			"	.previous\n"
+ 			"	.section        __ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1581,14 +1582,14 @@ fpu_emul:
+ 			"	j	9b\n"
+ 			"	.previous\n"
+ 			"	.section        __ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
+-			"	.word	5b,8b\n"
+-			"	.word	6b,8b\n"
+-			"	.word	7b,8b\n"
+-			"	.word	0b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
++			STR(PTR) " 5b,8b\n"
++			STR(PTR) " 6b,8b\n"
++			STR(PTR) " 7b,8b\n"
++			STR(PTR) " 0b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1700,14 +1701,14 @@ fpu_emul:
+ 			"	j      9b\n"
+ 			"	.previous\n"
+ 			"	.section        __ex_table,\"a\"\n"
+-			"	.word  1b,8b\n"
+-			"	.word  2b,8b\n"
+-			"	.word  3b,8b\n"
+-			"	.word  4b,8b\n"
+-			"	.word  5b,8b\n"
+-			"	.word  6b,8b\n"
+-			"	.word  7b,8b\n"
+-			"	.word  0b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
++			STR(PTR) " 5b,8b\n"
++			STR(PTR) " 6b,8b\n"
++			STR(PTR) " 7b,8b\n"
++			STR(PTR) " 0b,8b\n"
+ 			"	.previous\n"
+ 			"	.set    pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1819,14 +1820,14 @@ fpu_emul:
+ 			"	j	9b\n"
+ 			"	.previous\n"
+ 			"	.section        __ex_table,\"a\"\n"
+-			"	.word	1b,8b\n"
+-			"	.word	2b,8b\n"
+-			"	.word	3b,8b\n"
+-			"	.word	4b,8b\n"
+-			"	.word	5b,8b\n"
+-			"	.word	6b,8b\n"
+-			"	.word	7b,8b\n"
+-			"	.word	0b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
++			STR(PTR) " 5b,8b\n"
++			STR(PTR) " 6b,8b\n"
++			STR(PTR) " 7b,8b\n"
++			STR(PTR) " 0b,8b\n"
+ 			"	.previous\n"
+ 			"	.set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1937,14 +1938,14 @@ fpu_emul:
+ 			"       j	9b\n"
+ 			"       .previous\n"
+ 			"       .section        __ex_table,\"a\"\n"
+-			"       .word	1b,8b\n"
+-			"       .word	2b,8b\n"
+-			"       .word	3b,8b\n"
+-			"       .word	4b,8b\n"
+-			"       .word	5b,8b\n"
+-			"       .word	6b,8b\n"
+-			"       .word	7b,8b\n"
+-			"       .word	0b,8b\n"
++			STR(PTR) " 1b,8b\n"
++			STR(PTR) " 2b,8b\n"
++			STR(PTR) " 3b,8b\n"
++			STR(PTR) " 4b,8b\n"
++			STR(PTR) " 5b,8b\n"
++			STR(PTR) " 6b,8b\n"
++			STR(PTR) " 7b,8b\n"
++			STR(PTR) " 0b,8b\n"
+ 			"       .previous\n"
+ 			"       .set	pop\n"
+ 			: "+&r"(rt), "=&r"(rs),
+@@ -1999,7 +2000,7 @@ fpu_emul:
+ 			"j	2b\n"
+ 			".previous\n"
+ 			".section        __ex_table,\"a\"\n"
+-			".word  1b, 3b\n"
++			STR(PTR) " 1b,3b\n"
+ 			".previous\n"
+ 			: "=&r"(res), "+&r"(err)
+ 			: "r"(vaddr), "i"(SIGSEGV)
+@@ -2057,7 +2058,7 @@ fpu_emul:
+ 			"j	2b\n"
+ 			".previous\n"
+ 			".section        __ex_table,\"a\"\n"
+-			".word	1b, 3b\n"
++			STR(PTR) " 1b,3b\n"
+ 			".previous\n"
+ 			: "+&r"(res), "+&r"(err)
+ 			: "r"(vaddr), "i"(SIGSEGV));
+@@ -2118,7 +2119,7 @@ fpu_emul:
+ 			"j	2b\n"
+ 			".previous\n"
+ 			".section        __ex_table,\"a\"\n"
+-			".word  1b, 3b\n"
++			STR(PTR) " 1b,3b\n"
+ 			".previous\n"
+ 			: "=&r"(res), "+&r"(err)
+ 			: "r"(vaddr), "i"(SIGSEGV)
+@@ -2181,7 +2182,7 @@ fpu_emul:
+ 			"j	2b\n"
+ 			".previous\n"
+ 			".section        __ex_table,\"a\"\n"
+-			".word	1b, 3b\n"
++			STR(PTR) " 1b,3b\n"
+ 			".previous\n"
+ 			: "+&r"(res), "+&r"(err)
+ 			: "r"(vaddr), "i"(SIGSEGV));
+diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
+index f2975d4d1e44..89847bee2b53 100644
+--- a/arch/mips/kernel/process.c
++++ b/arch/mips/kernel/process.c
+@@ -457,7 +457,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
+ 		    *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
+ 			regs = (struct pt_regs *)*sp;
+ 			pc = regs->cp0_epc;
+-			if (__kernel_text_address(pc)) {
++			if (!user_mode(regs) && __kernel_text_address(pc)) {
+ 				*sp = regs->regs[29];
+ 				*ra = regs->regs[31];
+ 				return pc;
+@@ -603,6 +603,9 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+ 	if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
+ 		return -EOPNOTSUPP;
+ 
++	/* Proceed with the mode switch */
++	preempt_disable();
++
+ 	/* Save FP & vector context, then disable FPU & MSA */
+ 	if (task->signal == current->signal)
+ 		lose_fpu(1);
+@@ -661,6 +664,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+ 
+ 	/* Allow threads to use FP again */
+ 	atomic_set(&task->mm->context.fp_mode_switching, 0);
++	preempt_enable();
+ 
+ 	return 0;
+ }
+diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
+index e933a309f2ea..f7968b5149b0 100644
+--- a/arch/mips/kernel/ptrace.c
++++ b/arch/mips/kernel/ptrace.c
+@@ -56,8 +56,7 @@ static void init_fp_ctx(struct task_struct *target)
+ 	/* Begin with data registers set to all 1s... */
+ 	memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
+ 
+-	/* ...and FCSR zeroed */
+-	target->thread.fpu.fcr31 = 0;
++	/* FCSR has been preset by `mips_set_personality_nan'.  */
+ 
+ 	/*
+ 	 * Record that the target has "used" math, such that the context
+@@ -79,6 +78,22 @@ void ptrace_disable(struct task_struct *child)
+ }
+ 
+ /*
++ * Poke at FCSR according to its mask.  Don't set the cause bits as
++ * this is currently not handled correctly in FP context restoration
++ * and will cause an oops if a corresponding enable bit is set.
++ */
++static void ptrace_setfcr31(struct task_struct *child, u32 value)
++{
++	u32 fcr31;
++	u32 mask;
++
++	value &= ~FPU_CSR_ALL_X;
++	fcr31 = child->thread.fpu.fcr31;
++	mask = boot_cpu_data.fpu_msk31;
++	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
++}
++
++/*
+  * Read a general register set.	 We always use the 64-bit format, even
+  * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
+  * Registers are sign extended to fill the available space.
+@@ -158,9 +173,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
+ {
+ 	union fpureg *fregs;
+ 	u64 fpr_val;
+-	u32 fcr31;
+ 	u32 value;
+-	u32 mask;
+ 	int i;
+ 
+ 	if (!access_ok(VERIFY_READ, data, 33 * 8))
+@@ -175,9 +188,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
+ 	}
+ 
+ 	__get_user(value, data + 64);
+-	fcr31 = child->thread.fpu.fcr31;
+-	mask = boot_cpu_data.fpu_msk31;
+-	child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
++	ptrace_setfcr31(child, value);
+ 
+ 	/* FIR may not be written.  */
+ 
+@@ -720,7 +731,7 @@ long arch_ptrace(struct task_struct *child, long request,
+ 			break;
+ #endif
+ 		case FPC_CSR:
+-			child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
++			ptrace_setfcr31(child, data);
+ 			break;
+ 		case DSP_BASE ... DSP_BASE + 5: {
+ 			dspreg_t *dregs;
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 54923d6b7e16..74403953e407 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -143,7 +143,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
+ 	if (!task)
+ 		task = current;
+ 
+-	if (raw_show_trace || !__kernel_text_address(pc)) {
++	if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
+ 		show_raw_backtrace(sp);
+ 		return;
+ 	}
+@@ -1228,7 +1228,7 @@ static int enable_restore_fp_context(int msa)
+ 		err = init_fpu();
+ 		if (msa && !err) {
+ 			enable_msa();
+-			_init_msa_upper();
++			init_msa_upper();
+ 			set_thread_flag(TIF_USEDMSA);
+ 			set_thread_flag(TIF_MSA_CTX_LIVE);
+ 		}
+@@ -1291,7 +1291,7 @@ static int enable_restore_fp_context(int msa)
+ 	 */
+ 	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
+ 	if (!prior_msa && was_fpu_owner) {
+-		_init_msa_upper();
++		init_msa_upper();
+ 
+ 		goto out;
+ 	}
+@@ -1308,7 +1308,7 @@ static int enable_restore_fp_context(int msa)
+ 		 * of each vector register such that it cannot see data left
+ 		 * behind by another task.
+ 		 */
+-		_init_msa_upper();
++		init_msa_upper();
+ 	} else {
+ 		/* We need to restore the vector context. */
+ 		restore_msa(current);
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index 41b1b090f56f..dc10c77b7500 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
+  */
+ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+ {
+-	ktime_t expires;
++	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	ktime_t expires, threshold;
++	uint32_t count, compare;
+ 	int running;
+ 
+-	/* Is the hrtimer pending? */
++	/* Calculate the biased and scaled guest CP0_Count */
++	count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
++	compare = kvm_read_c0_guest_compare(cop0);
++
++	/*
++	 * Find whether CP0_Count has reached the closest timer interrupt. If
++	 * not, we shouldn't inject it.
++	 */
++	if ((int32_t)(count - compare) < 0)
++		return count;
++
++	/*
++	 * The CP0_Count we're going to return has already reached the closest
++	 * timer interrupt. Quickly check if it really is a new interrupt by
++	 * looking at whether the interval until the hrtimer expiry time is
++	 * less than 1/4 of the timer period.
++	 */
+ 	expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
+-	if (ktime_compare(now, expires) >= 0) {
++	threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
++	if (ktime_before(expires, threshold)) {
+ 		/*
+ 		 * Cancel it while we handle it so there's no chance of
+ 		 * interference with the timeout handler.
+@@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+ 		}
+ 	}
+ 
+-	/* Return the biased and scaled guest CP0_Count */
+-	return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
++	return count;
+ }
+ 
+ /**
+@@ -420,32 +438,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
+ }
+ 
+ /**
+- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
+- * @vcpu:	Virtual CPU.
+- *
+- * Recalculates and updates the expiry time of the hrtimer. This can be used
+- * after timer parameters have been altered which do not depend on the time that
+- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
+- * kvm_mips_resume_hrtimer() are used directly).
+- *
+- * It is guaranteed that no timer interrupts will be lost in the process.
+- *
+- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+- */
+-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
+-{
+-	ktime_t now;
+-	uint32_t count;
+-
+-	/*
+-	 * freeze_hrtimer takes care of a timer interrupts <= count, and
+-	 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
+-	 */
+-	now = kvm_mips_freeze_hrtimer(vcpu, &count);
+-	kvm_mips_resume_hrtimer(vcpu, now, count);
+-}
+-
+-/**
+  * kvm_mips_write_count() - Modify the count and update timer.
+  * @vcpu:	Virtual CPU.
+  * @count:	Guest CP0_Count value to set.
+@@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
+  * kvm_mips_write_compare() - Modify compare and update timer.
+  * @vcpu:	Virtual CPU.
+  * @compare:	New CP0_Compare value.
++ * @ack:	Whether to acknowledge timer interrupt.
+  *
+  * Update CP0_Compare to a new value and update the timeout.
++ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
++ * any pending timer interrupt is preserved.
+  */
+-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
++void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
+ {
+ 	struct mips_coproc *cop0 = vcpu->arch.cop0;
++	int dc;
++	u32 old_compare = kvm_read_c0_guest_compare(cop0);
++	ktime_t now;
++	uint32_t count;
+ 
+ 	/* if unchanged, must just be an ack */
+-	if (kvm_read_c0_guest_compare(cop0) == compare)
++	if (old_compare == compare) {
++		if (!ack)
++			return;
++		kvm_mips_callbacks->dequeue_timer_int(vcpu);
++		kvm_write_c0_guest_compare(cop0, compare);
+ 		return;
++	}
++
++	/* freeze_hrtimer() takes care of timer interrupts <= count */
++	dc = kvm_mips_count_disabled(vcpu);
++	if (!dc)
++		now = kvm_mips_freeze_hrtimer(vcpu, &count);
++
++	if (ack)
++		kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ 
+-	/* Update compare */
+ 	kvm_write_c0_guest_compare(cop0, compare);
+ 
+-	/* Update timeout if count enabled */
+-	if (!kvm_mips_count_disabled(vcpu))
+-		kvm_mips_update_hrtimer(vcpu);
++	/* resume_hrtimer() takes care of timer interrupts > count */
++	if (!dc)
++		kvm_mips_resume_hrtimer(vcpu, now, count);
+ }
+ 
+ /**
+@@ -1095,9 +1106,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
+ 
+ 				/* If we are writing to COMPARE */
+ 				/* Clear pending timer interrupt, if any */
+-				kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ 				kvm_mips_write_compare(vcpu,
+-						       vcpu->arch.gprs[rt]);
++						       vcpu->arch.gprs[rt],
++						       true);
+ 			} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+ 				unsigned int old_val, val, change;
+ 
+diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
+index d836ed5b0bc7..307cc4c98bdd 100644
+--- a/arch/mips/kvm/trap_emul.c
++++ b/arch/mips/kvm/trap_emul.c
+@@ -547,7 +547,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
+ 		kvm_mips_write_count(vcpu, v);
+ 		break;
+ 	case KVM_REG_MIPS_CP0_COMPARE:
+-		kvm_mips_write_compare(vcpu, v);
++		kvm_mips_write_compare(vcpu, v, false);
+ 		break;
+ 	case KVM_REG_MIPS_CP0_CAUSE:
+ 		/*
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index 2b95e34fa9e8..81f645973eb3 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -445,9 +445,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ 	case spec_op:
+ 		switch (insn.r_format.func) {
+ 		case jalr_op:
+-			regs->regs[insn.r_format.rd] =
+-				regs->cp0_epc + dec_insn.pc_inc +
+-				dec_insn.next_pc_inc;
++			if (insn.r_format.rd != 0) {
++				regs->regs[insn.r_format.rd] =
++					regs->cp0_epc + dec_insn.pc_inc +
++					dec_insn.next_pc_inc;
++			}
+ 			/* Fall through */
+ 		case jr_op:
+ 			/* For R6, JR already emulated in jalr_op */
+diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
+index 77d96db8253c..8a648e20b521 100644
+--- a/arch/mips/mm/cache.c
++++ b/arch/mips/mm/cache.c
+@@ -16,6 +16,7 @@
+ #include <linux/mm.h>
+ 
+ #include <asm/cacheflush.h>
++#include <asm/highmem.h>
+ #include <asm/processor.h>
+ #include <asm/cpu.h>
+ #include <asm/cpu-features.h>
+@@ -83,8 +84,6 @@ void __flush_dcache_page(struct page *page)
+ 	struct address_space *mapping = page_mapping(page);
+ 	unsigned long addr;
+ 
+-	if (PageHighMem(page))
+-		return;
+ 	if (mapping && !mapping_mapped(mapping)) {
+ 		SetPageDcacheDirty(page);
+ 		return;
+@@ -95,8 +94,15 @@ void __flush_dcache_page(struct page *page)
+ 	 * case is for exec env/arg pages and those are %99 certainly going to
+ 	 * get faulted into the tlb (and thus flushed) anyways.
+ 	 */
+-	addr = (unsigned long) page_address(page);
++	if (PageHighMem(page))
++		addr = (unsigned long)kmap_atomic(page);
++	else
++		addr = (unsigned long)page_address(page);
++
+ 	flush_data_cache_page(addr);
++
++	if (PageHighMem(page))
++		__kunmap_atomic((void *)addr);
+ }
+ 
+ EXPORT_SYMBOL(__flush_dcache_page);
+@@ -119,33 +125,28 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
+ 
+ EXPORT_SYMBOL(__flush_anon_page);
+ 
+-void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
+-{
+-	unsigned long addr;
+-
+-	if (PageHighMem(page))
+-		return;
+-
+-	addr = (unsigned long) page_address(page);
+-	flush_data_cache_page(addr);
+-}
+-EXPORT_SYMBOL_GPL(__flush_icache_page);
+-
+-void __update_cache(struct vm_area_struct *vma, unsigned long address,
+-	pte_t pte)
++void __update_cache(unsigned long address, pte_t pte)
+ {
+ 	struct page *page;
+ 	unsigned long pfn, addr;
+-	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
++	int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
+ 
+ 	pfn = pte_pfn(pte);
+ 	if (unlikely(!pfn_valid(pfn)))
+ 		return;
+ 	page = pfn_to_page(pfn);
+-	if (page_mapping(page) && Page_dcache_dirty(page)) {
+-		addr = (unsigned long) page_address(page);
++	if (Page_dcache_dirty(page)) {
++		if (PageHighMem(page))
++			addr = (unsigned long)kmap_atomic(page);
++		else
++			addr = (unsigned long)page_address(page);
++
+ 		if (exec || pages_do_alias(addr, address & PAGE_MASK))
+ 			flush_data_cache_page(addr);
++
++		if (PageHighMem(page))
++			__kunmap_atomic((void *)addr);
++
+ 		ClearPageDcacheDirty(page);
+ 	}
+ }
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index cb565ad0a5b6..9a4e71261fca 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -1073,7 +1073,7 @@ void eeh_add_device_early(struct pci_dn *pdn)
+ 	struct pci_controller *phb;
+ 	struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ 
+-	if (!edev || !eeh_enabled())
++	if (!edev)
+ 		return;
+ 
+ 	if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 90cc67904dc6..6d04c9efb496 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -166,6 +166,16 @@ static void *eeh_dev_save_state(void *data, void *userdata)
+ 	if (!edev)
+ 		return NULL;
+ 
++	/*
++	 * We cannot access the config space on some adapters.
++	 * Otherwise, it will cause fenced PHB. We don't save
++	 * the content in their config space and will restore
++	 * from the initial config space saved when the EEH
++	 * device is created.
++	 */
++	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
++		return NULL;
++
+ 	pdev = eeh_dev_to_pci_dev(edev);
+ 	if (!pdev)
+ 		return NULL;
+@@ -305,6 +315,19 @@ static void *eeh_dev_restore_state(void *data, void *userdata)
+ 	if (!edev)
+ 		return NULL;
+ 
++	/*
++	 * The content in the config space isn't saved because
++	 * the blocked config space on some adapters. We have
++	 * to restore the initial saved config space when the
++	 * EEH device is created.
++	 */
++	if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
++		if (list_is_last(&edev->list, &edev->pe->edevs))
++			eeh_pe_restore_bars(edev->pe);
++
++		return NULL;
++	}
++
+ 	pdev = eeh_dev_to_pci_dev(edev);
+ 	if (!pdev)
+ 		return NULL;
+@@ -502,9 +525,6 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
+ 	/* Save states */
+ 	eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
+ 
+-	/* Report error */
+-	eeh_pe_dev_traverse(pe, eeh_report_error, &result);
+-
+ 	/* Issue reset */
+ 	ret = eeh_reset_pe(pe);
+ 	if (ret) {
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index 9519e6bdc6d7..7662bfae0493 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -963,11 +963,6 @@ hv_facility_unavailable_relon_trampoline:
+ #endif
+ 	STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
+ 
+-	/* Other future vectors */
+-	.align	7
+-	.globl	__end_interrupts
+-__end_interrupts:
+-
+ 	.align	7
+ system_call_entry_direct:
+ #if defined(CONFIG_RELOCATABLE)
+@@ -1261,6 +1256,17 @@ __end_handlers:
+ 	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+ 	STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
+ 
++	/*
++	 * The __end_interrupts marker must be past the out-of-line (OOL)
++	 * handlers, so that they are copied to real address 0x100 when running
++	 * a relocatable kernel. This ensures they can be reached from the short
++	 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
++	 * directly, without using LOAD_HANDLER().
++	 */
++	.align	7
++	.globl	__end_interrupts
++__end_interrupts:
++
+ #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+ /*
+  * Data area reserved for FWNMI option.
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c
+index 123ff1bb2f60..cb6baa2252b7 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
+@@ -633,6 +633,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
+ 
+ 	/* clear STOP and INT from current entry */
+ 	buf->topa_index[buf->stop_pos]->stop = 0;
++	buf->topa_index[buf->stop_pos]->intr = 0;
+ 	buf->topa_index[buf->intr_pos]->intr = 0;
+ 
+ 	/* how many pages till the STOP marker */
+@@ -657,6 +658,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
+ 	buf->intr_pos = idx;
+ 
+ 	buf->topa_index[buf->stop_pos]->stop = 1;
++	buf->topa_index[buf->stop_pos]->intr = 1;
+ 	buf->topa_index[buf->intr_pos]->intr = 1;
+ 
+ 	return 0;
+diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
+index d22f4b5bbc04..6b8b1073d9ec 100644
+--- a/arch/x86/pci/xen.c
++++ b/arch/x86/pci/xen.c
+@@ -488,8 +488,11 @@ int __init pci_xen_initial_domain(void)
+ #endif
+ 	__acpi_register_gsi = acpi_register_gsi_xen;
+ 	__acpi_unregister_gsi = NULL;
+-	/* Pre-allocate legacy irqs */
+-	for (irq = 0; irq < nr_legacy_irqs(); irq++) {
++	/*
++	 * Pre-allocate the legacy IRQs.  Use NR_LEGACY_IRQS here
++	 * because we don't have a PIC and thus nr_legacy_irqs() is zero.
++	 */
++	for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
+ 		int trigger, polarity;
+ 
+ 		if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index 98f5316aad72..de039384ae7e 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -138,7 +138,7 @@ static struct osi_linux {
+ 	unsigned int	enable:1;
+ 	unsigned int	dmi:1;
+ 	unsigned int	cmdline:1;
+-	unsigned int	default_disabling:1;
++	u8		default_disabling;
+ } osi_linux = {0, 0, 0, 0};
+ 
+ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
+@@ -1452,10 +1452,13 @@ void __init acpi_osi_setup(char *str)
+ 	if (*str == '!') {
+ 		str++;
+ 		if (*str == '\0') {
+-			osi_linux.default_disabling = 1;
++			/* Do not override acpi_osi=!* */
++			if (!osi_linux.default_disabling)
++				osi_linux.default_disabling =
++					ACPI_DISABLE_ALL_VENDOR_STRINGS;
+ 			return;
+ 		} else if (*str == '*') {
+-			acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
++			osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
+ 			for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ 				osi = &osi_setup_entries[i];
+ 				osi->enable = false;
+@@ -1528,10 +1531,13 @@ static void __init acpi_osi_setup_late(void)
+ 	acpi_status status;
+ 
+ 	if (osi_linux.default_disabling) {
+-		status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
++		status = acpi_update_interfaces(osi_linux.default_disabling);
+ 
+ 		if (ACPI_SUCCESS(status))
+-			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
++			printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
++				osi_linux.default_disabling ==
++				ACPI_DISABLE_ALL_STRINGS ?
++				" and feature groups" : "");
+ 	}
+ 
+ 	for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 3d874eca7104..9f198da6b19f 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -1259,14 +1259,15 @@ int dpm_suspend_late(pm_message_t state)
+ 		error = device_suspend_late(dev);
+ 
+ 		mutex_lock(&dpm_list_mtx);
++		if (!list_empty(&dev->power.entry))
++			list_move(&dev->power.entry, &dpm_late_early_list);
++
+ 		if (error) {
+ 			pm_dev_err(dev, state, " late", error);
+ 			dpm_save_failed_dev(dev_name(dev));
+ 			put_device(dev);
+ 			break;
+ 		}
+-		if (!list_empty(&dev->power.entry))
+-			list_move(&dev->power.entry, &dpm_late_early_list);
+ 		put_device(dev);
+ 
+ 		if (async_error)
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index 5070c4fe8542..5b06452e2af2 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -1462,11 +1462,16 @@ int pm_runtime_force_resume(struct device *dev)
+ 		goto out;
+ 	}
+ 
+-	ret = callback(dev);
++	ret = pm_runtime_set_active(dev);
+ 	if (ret)
+ 		goto out;
+ 
+-	pm_runtime_set_active(dev);
++	ret = callback(dev);
++	if (ret) {
++		pm_runtime_set_suspended(dev);
++		goto out;
++	}
++
+ 	pm_runtime_mark_last_busy(dev);
+ out:
+ 	pm_runtime_enable(dev);
+diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
+index 6653473f2757..eaa646dfa783 100644
+--- a/drivers/bluetooth/hci_vhci.c
++++ b/drivers/bluetooth/hci_vhci.c
+@@ -50,6 +50,7 @@ struct vhci_data {
+ 	wait_queue_head_t read_wait;
+ 	struct sk_buff_head readq;
+ 
++	struct mutex open_mutex;
+ 	struct delayed_work open_timeout;
+ };
+ 
+@@ -95,12 +96,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ 	return 0;
+ }
+ 
+-static int vhci_create_device(struct vhci_data *data, __u8 opcode)
++static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
+ {
+ 	struct hci_dev *hdev;
+ 	struct sk_buff *skb;
+ 	__u8 dev_type;
+ 
++	if (data->hdev)
++		return -EBADFD;
++
+ 	/* bits 0-1 are dev_type (BR/EDR or AMP) */
+ 	dev_type = opcode & 0x03;
+ 
+@@ -159,6 +163,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
+ 	return 0;
+ }
+ 
++static int vhci_create_device(struct vhci_data *data, __u8 opcode)
++{
++	int err;
++
++	mutex_lock(&data->open_mutex);
++	err = __vhci_create_device(data, opcode);
++	mutex_unlock(&data->open_mutex);
++
++	return err;
++}
++
+ static inline ssize_t vhci_get_user(struct vhci_data *data,
+ 				    struct iov_iter *from)
+ {
+@@ -197,11 +212,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
+ 		break;
+ 
+ 	case HCI_VENDOR_PKT:
+-		if (data->hdev) {
+-			kfree_skb(skb);
+-			return -EBADFD;
+-		}
+-
+ 		cancel_delayed_work_sync(&data->open_timeout);
+ 
+ 		opcode = *((__u8 *) skb->data);
+@@ -328,6 +338,7 @@ static int vhci_open(struct inode *inode, struct file *file)
+ 	skb_queue_head_init(&data->readq);
+ 	init_waitqueue_head(&data->read_wait);
+ 
++	mutex_init(&data->open_mutex);
+ 	INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
+ 
+ 	file->private_data = data;
+@@ -341,15 +352,18 @@ static int vhci_open(struct inode *inode, struct file *file)
+ static int vhci_release(struct inode *inode, struct file *file)
+ {
+ 	struct vhci_data *data = file->private_data;
+-	struct hci_dev *hdev = data->hdev;
++	struct hci_dev *hdev;
+ 
+ 	cancel_delayed_work_sync(&data->open_timeout);
+ 
++	hdev = data->hdev;
++
+ 	if (hdev) {
+ 		hci_unregister_dev(hdev);
+ 		hci_free_dev(hdev);
+ 	}
+ 
++	skb_queue_purge(&data->readq);
+ 	file->private_data = NULL;
+ 	kfree(data);
+ 
+diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
+index 7936dce4b878..8a31e17d0ee9 100644
+--- a/drivers/cpuidle/coupled.c
++++ b/drivers/cpuidle/coupled.c
+@@ -176,14 +176,12 @@ void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a)
+ 
+ /**
+  * cpuidle_state_is_coupled - check if a state is part of a coupled set
+- * @dev: struct cpuidle_device for the current cpu
+  * @drv: struct cpuidle_driver for the platform
+  * @state: index of the target state in drv->states
+  *
+  * Returns true if the target state is coupled with cpus besides this one
+  */
+-bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+-	struct cpuidle_driver *drv, int state)
++bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
+ {
+ 	return drv->states[state].flags & CPUIDLE_FLAG_COUPLED;
+ }
+diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
+index 61c417b9e53f..81f60351aaf3 100644
+--- a/drivers/cpuidle/cpuidle.c
++++ b/drivers/cpuidle/cpuidle.c
+@@ -185,7 +185,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
+ 		tick_broadcast_exit();
+ 	}
+ 
+-	if (!cpuidle_state_is_coupled(dev, drv, entered_state))
++	if (!cpuidle_state_is_coupled(drv, index))
+ 		local_irq_enable();
+ 
+ 	diff = ktime_to_us(ktime_sub(time_end, time_start));
+@@ -234,7 +234,7 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ 		  int index)
+ {
+-	if (cpuidle_state_is_coupled(dev, drv, index))
++	if (cpuidle_state_is_coupled(drv, index))
+ 		return cpuidle_enter_state_coupled(dev, drv, index);
+ 	return cpuidle_enter_state(dev, drv, index);
+ }
+@@ -404,6 +404,8 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
+ 	list_del(&dev->device_list);
+ 	per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ 	module_put(drv->owner);
++
++	dev->registered = 0;
+ }
+ 
+ static void __cpuidle_device_init(struct cpuidle_device *dev)
+diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
+index ee97e9672ecf..178c5ad3d568 100644
+--- a/drivers/cpuidle/cpuidle.h
++++ b/drivers/cpuidle/cpuidle.h
+@@ -34,15 +34,14 @@ extern int cpuidle_add_sysfs(struct cpuidle_device *dev);
+ extern void cpuidle_remove_sysfs(struct cpuidle_device *dev);
+ 
+ #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
+-bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+-		struct cpuidle_driver *drv, int state);
++bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state);
+ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
+ 		struct cpuidle_driver *drv, int next_state);
+ int cpuidle_coupled_register_device(struct cpuidle_device *dev);
+ void cpuidle_coupled_unregister_device(struct cpuidle_device *dev);
+ #else
+-static inline bool cpuidle_state_is_coupled(struct cpuidle_device *dev,
+-		struct cpuidle_driver *drv, int state)
++static inline
++bool cpuidle_state_is_coupled(struct cpuidle_driver *drv, int state)
+ {
+ 	return false;
+ }
+diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
+index b8b5d47acd7a..9bfd4100baa8 100644
+--- a/drivers/crypto/caam/jr.c
++++ b/drivers/crypto/caam/jr.c
+@@ -241,7 +241,7 @@ static void caam_jr_dequeue(unsigned long devarg)
+ struct device *caam_jr_alloc(void)
+ {
+ 	struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
+-	struct device *dev = NULL;
++	struct device *dev = ERR_PTR(-ENODEV);
+ 	int min_tfm_cnt	= INT_MAX;
+ 	int tfm_cnt;
+ 
+diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
+index f214a8755827..36d936fb259e 100644
+--- a/drivers/crypto/s5p-sss.c
++++ b/drivers/crypto/s5p-sss.c
+@@ -149,7 +149,6 @@
+ 
+ /**
+  * struct samsung_aes_variant - platform specific SSS driver data
+- * @has_hash_irq: true if SSS module uses hash interrupt, false otherwise
+  * @aes_offset: AES register offset from SSS module's base.
+  *
+  * Specifies platform specific configuration of SSS module.
+@@ -157,7 +156,6 @@
+  * expansion of its usage.
+  */
+ struct samsung_aes_variant {
+-	bool			    has_hash_irq;
+ 	unsigned int		    aes_offset;
+ };
+ 
+@@ -178,7 +176,6 @@ struct s5p_aes_dev {
+ 	struct clk                 *clk;
+ 	void __iomem               *ioaddr;
+ 	void __iomem               *aes_ioaddr;
+-	int                         irq_hash;
+ 	int                         irq_fc;
+ 
+ 	struct ablkcipher_request  *req;
+@@ -197,12 +194,10 @@ struct s5p_aes_dev {
+ static struct s5p_aes_dev *s5p_dev;
+ 
+ static const struct samsung_aes_variant s5p_aes_data = {
+-	.has_hash_irq	= true,
+ 	.aes_offset	= 0x4000,
+ };
+ 
+ static const struct samsung_aes_variant exynos_aes_data = {
+-	.has_hash_irq	= false,
+ 	.aes_offset	= 0x200,
+ };
+ 
+@@ -313,43 +308,55 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
+ 	return err;
+ }
+ 
+-static void s5p_aes_tx(struct s5p_aes_dev *dev)
++/*
++ * Returns true if new transmitting (output) data is ready and its
++ * address+length have to be written to device (by calling
++ * s5p_set_dma_outdata()). False otherwise.
++ */
++static bool s5p_aes_tx(struct s5p_aes_dev *dev)
+ {
+ 	int err = 0;
++	bool ret = false;
+ 
+ 	s5p_unset_outdata(dev);
+ 
+ 	if (!sg_is_last(dev->sg_dst)) {
+ 		err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
+-		if (err) {
++		if (err)
+ 			s5p_aes_complete(dev, err);
+-			return;
+-		}
+-
+-		s5p_set_dma_outdata(dev, dev->sg_dst);
++		else
++			ret = true;
+ 	} else {
+ 		s5p_aes_complete(dev, err);
+ 
+ 		dev->busy = true;
+ 		tasklet_schedule(&dev->tasklet);
+ 	}
++
++	return ret;
+ }
+ 
+-static void s5p_aes_rx(struct s5p_aes_dev *dev)
++/*
++ * Returns true if new receiving (input) data is ready and its
++ * address+length have to be written to device (by calling
++ * s5p_set_dma_indata()). False otherwise.
++ */
++static bool s5p_aes_rx(struct s5p_aes_dev *dev)
+ {
+ 	int err;
++	bool ret = false;
+ 
+ 	s5p_unset_indata(dev);
+ 
+ 	if (!sg_is_last(dev->sg_src)) {
+ 		err = s5p_set_indata(dev, sg_next(dev->sg_src));
+-		if (err) {
++		if (err)
+ 			s5p_aes_complete(dev, err);
+-			return;
+-		}
+-
+-		s5p_set_dma_indata(dev, dev->sg_src);
++		else
++			ret = true;
+ 	}
++
++	return ret;
+ }
+ 
+ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
+@@ -358,18 +365,29 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
+ 	struct s5p_aes_dev     *dev  = platform_get_drvdata(pdev);
+ 	uint32_t                status;
+ 	unsigned long           flags;
++	bool			set_dma_tx = false;
++	bool			set_dma_rx = false;
+ 
+ 	spin_lock_irqsave(&dev->lock, flags);
+ 
+-	if (irq == dev->irq_fc) {
+-		status = SSS_READ(dev, FCINTSTAT);
+-		if (status & SSS_FCINTSTAT_BRDMAINT)
+-			s5p_aes_rx(dev);
+-		if (status & SSS_FCINTSTAT_BTDMAINT)
+-			s5p_aes_tx(dev);
+-
+-		SSS_WRITE(dev, FCINTPEND, status);
+-	}
++	status = SSS_READ(dev, FCINTSTAT);
++	if (status & SSS_FCINTSTAT_BRDMAINT)
++		set_dma_rx = s5p_aes_rx(dev);
++	if (status & SSS_FCINTSTAT_BTDMAINT)
++		set_dma_tx = s5p_aes_tx(dev);
++
++	SSS_WRITE(dev, FCINTPEND, status);
++
++	/*
++	 * Writing length of DMA block (either receiving or transmitting)
++	 * will start the operation immediately, so this should be done
++	 * at the end (even after clearing pending interrupts to not miss the
++	 * interrupt).
++	 */
++	if (set_dma_tx)
++		s5p_set_dma_outdata(dev, dev->sg_dst);
++	if (set_dma_rx)
++		s5p_set_dma_indata(dev, dev->sg_src);
+ 
+ 	spin_unlock_irqrestore(&dev->lock, flags);
+ 
+@@ -671,21 +689,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
+ 		goto err_irq;
+ 	}
+ 
+-	if (variant->has_hash_irq) {
+-		pdata->irq_hash = platform_get_irq(pdev, 1);
+-		if (pdata->irq_hash < 0) {
+-			err = pdata->irq_hash;
+-			dev_warn(dev, "hash interrupt is not available.\n");
+-			goto err_irq;
+-		}
+-		err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
+-				       IRQF_SHARED, pdev->name, pdev);
+-		if (err < 0) {
+-			dev_warn(dev, "hash interrupt is not available.\n");
+-			goto err_irq;
+-		}
+-	}
+-
+ 	pdata->busy = false;
+ 	pdata->variant = variant;
+ 	pdata->dev = dev;
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index cac422916c7a..c8b90b3be349 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -1549,7 +1549,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ 			  int n, int width, int height)
+ {
+ 	int c, o;
+-	struct drm_device *dev = fb_helper->dev;
+ 	struct drm_connector *connector;
+ 	const struct drm_connector_helper_funcs *connector_funcs;
+ 	struct drm_encoder *encoder;
+@@ -1568,7 +1567,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ 	if (modes[n] == NULL)
+ 		return best_score;
+ 
+-	crtcs = kzalloc(dev->mode_config.num_connector *
++	crtcs = kzalloc(fb_helper->connector_count *
+ 			sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
+ 	if (!crtcs)
+ 		return best_score;
+@@ -1614,7 +1613,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
+ 		if (score > best_score) {
+ 			best_score = score;
+ 			memcpy(best_crtcs, crtcs,
+-			       dev->mode_config.num_connector *
++			       fb_helper->connector_count *
+ 			       sizeof(struct drm_fb_helper_crtc *));
+ 		}
+ 	}
+diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+index 6b43ae3ffd73..1616af209bfc 100644
+--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
++++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+@@ -72,7 +72,7 @@ static const char *const dsi_errors[] = {
+ 	"RX Prot Violation",
+ 	"HS Generic Write FIFO Full",
+ 	"LP Generic Write FIFO Full",
+-	"Generic Read Data Avail"
++	"Generic Read Data Avail",
+ 	"Special Packet Sent",
+ 	"Tearing Effect",
+ };
+diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
+index 4e7e7da2e03b..64783985e392 100644
+--- a/drivers/gpu/drm/i915/intel_fbdev.c
++++ b/drivers/gpu/drm/i915/intel_fbdev.c
+@@ -361,12 +361,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
+ 	uint64_t conn_configured = 0, mask;
+ 	int pass = 0;
+ 
+-	save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
++	save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool),
+ 			       GFP_KERNEL);
+ 	if (!save_enabled)
+ 		return false;
+ 
+-	memcpy(save_enabled, enabled, dev->mode_config.num_connector);
++	memcpy(save_enabled, enabled, fb_helper->connector_count);
+ 	mask = (1 << fb_helper->connector_count) - 1;
+ retry:
+ 	for (i = 0; i < fb_helper->connector_count; i++) {
+@@ -505,7 +505,7 @@ retry:
+ 	if (fallback) {
+ bail:
+ 		DRM_DEBUG_KMS("Not using firmware configuration\n");
+-		memcpy(enabled, save_enabled, dev->mode_config.num_connector);
++		memcpy(enabled, save_enabled, fb_helper->connector_count);
+ 		kfree(save_enabled);
+ 		return false;
+ 	}
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index 555b896d2bda..00bc49835e09 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -3574,6 +3574,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+ 	if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ 		hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+ 
++	memset(active, 0, sizeof(*active));
++
+ 	active->pipe_enabled = intel_crtc->active;
+ 
+ 	if (active->pipe_enabled) {
+diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
+index 6c99ee7bafa3..ee396ff167d9 100644
+--- a/drivers/hwmon/ads7828.c
++++ b/drivers/hwmon/ads7828.c
+@@ -120,6 +120,7 @@ static int ads7828_probe(struct i2c_client *client,
+ 	unsigned int vref_mv = ADS7828_INT_VREF_MV;
+ 	bool diff_input = false;
+ 	bool ext_vref = false;
++	unsigned int regval;
+ 
+ 	data = devm_kzalloc(dev, sizeof(struct ads7828_data), GFP_KERNEL);
+ 	if (!data)
+@@ -154,6 +155,15 @@ static int ads7828_probe(struct i2c_client *client,
+ 	if (!diff_input)
+ 		data->cmd_byte |= ADS7828_CMD_SD_SE;
+ 
++	/*
++	 * Datasheet specifies internal reference voltage is disabled by
++	 * default. The internal reference voltage needs to be enabled and
++	 * voltage needs to settle before getting valid ADC data. So perform a
++	 * dummy read to enable the internal reference voltage.
++	 */
++	if (!ext_vref)
++		regmap_read(data->regmap, data->cmd_byte, &regval);
++
+ 	hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+ 							   data,
+ 							   ads7828_groups);
+diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
+index e82edf810d1f..9021725e25e4 100644
+--- a/drivers/input/misc/pwm-beeper.c
++++ b/drivers/input/misc/pwm-beeper.c
+@@ -20,21 +20,40 @@
+ #include <linux/platform_device.h>
+ #include <linux/pwm.h>
+ #include <linux/slab.h>
++#include <linux/workqueue.h>
+ 
+ struct pwm_beeper {
+ 	struct input_dev *input;
+ 	struct pwm_device *pwm;
++	struct work_struct work;
+ 	unsigned long period;
+ };
+ 
+ #define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
+ 
++static void __pwm_beeper_set(struct pwm_beeper *beeper)
++{
++	unsigned long period = beeper->period;
++
++	if (period) {
++		pwm_config(beeper->pwm, period / 2, period);
++		pwm_enable(beeper->pwm);
++	} else
++		pwm_disable(beeper->pwm);
++}
++
++static void pwm_beeper_work(struct work_struct *work)
++{
++	struct pwm_beeper *beeper =
++		container_of(work, struct pwm_beeper, work);
++
++	__pwm_beeper_set(beeper);
++}
++
+ static int pwm_beeper_event(struct input_dev *input,
+ 			    unsigned int type, unsigned int code, int value)
+ {
+-	int ret = 0;
+ 	struct pwm_beeper *beeper = input_get_drvdata(input);
+-	unsigned long period;
+ 
+ 	if (type != EV_SND || value < 0)
+ 		return -EINVAL;
+@@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input,
+ 		return -EINVAL;
+ 	}
+ 
+-	if (value == 0) {
+-		pwm_disable(beeper->pwm);
+-	} else {
+-		period = HZ_TO_NANOSECONDS(value);
+-		ret = pwm_config(beeper->pwm, period / 2, period);
+-		if (ret)
+-			return ret;
+-		ret = pwm_enable(beeper->pwm);
+-		if (ret)
+-			return ret;
+-		beeper->period = period;
+-	}
++	if (value == 0)
++		beeper->period = 0;
++	else
++		beeper->period = HZ_TO_NANOSECONDS(value);
++
++	schedule_work(&beeper->work);
+ 
+ 	return 0;
+ }
+ 
++static void pwm_beeper_stop(struct pwm_beeper *beeper)
++{
++	cancel_work_sync(&beeper->work);
++
++	if (beeper->period)
++		pwm_disable(beeper->pwm);
++}
++
++static void pwm_beeper_close(struct input_dev *input)
++{
++	struct pwm_beeper *beeper = input_get_drvdata(input);
++
++	pwm_beeper_stop(beeper);
++}
++
+ static int pwm_beeper_probe(struct platform_device *pdev)
+ {
+ 	unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
+@@ -87,6 +115,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
+ 		goto err_free;
+ 	}
+ 
++	INIT_WORK(&beeper->work, pwm_beeper_work);
++
+ 	beeper->input = input_allocate_device();
+ 	if (!beeper->input) {
+ 		dev_err(&pdev->dev, "Failed to allocate input device\n");
+@@ -106,6 +136,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
+ 	beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
+ 
+ 	beeper->input->event = pwm_beeper_event;
++	beeper->input->close = pwm_beeper_close;
+ 
+ 	input_set_drvdata(beeper->input, beeper);
+ 
+@@ -135,7 +166,6 @@ static int pwm_beeper_remove(struct platform_device *pdev)
+ 
+ 	input_unregister_device(beeper->input);
+ 
+-	pwm_disable(beeper->pwm);
+ 	pwm_free(beeper->pwm);
+ 
+ 	kfree(beeper);
+@@ -147,8 +177,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev)
+ {
+ 	struct pwm_beeper *beeper = dev_get_drvdata(dev);
+ 
+-	if (beeper->period)
+-		pwm_disable(beeper->pwm);
++	pwm_beeper_stop(beeper);
+ 
+ 	return 0;
+ }
+@@ -157,10 +186,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev)
+ {
+ 	struct pwm_beeper *beeper = dev_get_drvdata(dev);
+ 
+-	if (beeper->period) {
+-		pwm_config(beeper->pwm, beeper->period / 2, beeper->period);
+-		pwm_enable(beeper->pwm);
+-	}
++	if (beeper->period)
++		__pwm_beeper_set(beeper);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
+index 421e29e4cd81..5221450f9b57 100644
+--- a/drivers/input/misc/uinput.c
++++ b/drivers/input/misc/uinput.c
+@@ -895,9 +895,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ }
+ 
+ #ifdef CONFIG_COMPAT
++
++#define UI_SET_PHYS_COMPAT	_IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
++
+ static long uinput_compat_ioctl(struct file *file,
+ 				unsigned int cmd, unsigned long arg)
+ {
++	if (cmd == UI_SET_PHYS_COMPAT)
++		cmd = UI_SET_PHYS;
++
+ 	return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
+ }
+ #endif
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 49875adb6b44..1dbae580e8ca 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -353,6 +353,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
+ 		if (irqnr < 16) {
+ 			gic_write_eoir(irqnr);
+ #ifdef CONFIG_SMP
++			/*
++			 * Unlike GICv2, we don't need an smp_rmb() here.
++			 * The control dependency from gic_read_iar to
++			 * the ISB in gic_write_eoir is enough to ensure
++			 * that any shared data read by handle_IPI will
++			 * be read after the ACK.
++			 */
+ 			handle_IPI(irqnr, regs);
+ #else
+ 			WARN_ONCE(true, "Unexpected SGI received!\n");
+@@ -372,6 +379,15 @@ static void __init gic_dist_init(void)
+ 	writel_relaxed(0, base + GICD_CTLR);
+ 	gic_dist_wait_for_rwp();
+ 
++	/*
++	 * Configure SPIs as non-secure Group-1. This will only matter
++	 * if the GIC only has a single security state. This will not
++	 * do the right thing if the kernel is running in secure mode,
++	 * but that's not the intended use case anyway.
++	 */
++	for (i = 32; i < gic_data.irq_nr; i += 32)
++		writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
++
+ 	gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
+ 
+ 	/* Enable distributor with ARE, Group1 */
+@@ -475,6 +491,9 @@ static void gic_cpu_init(void)
+ 
+ 	rbase = gic_data_rdist_sgi_base();
+ 
++	/* Configure SGIs/PPIs as non-secure Group-1 */
++	writel_relaxed(~0, rbase + GICR_IGROUPR0);
++
+ 	gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+ 
+ 	/* Give LPIs a spin */
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 01999d74bd3a..eb9fb9299ec5 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -278,6 +278,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+ 		if (irqnr < 16) {
+ 			writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
+ #ifdef CONFIG_SMP
++			/*
++			 * Ensure any shared data written by the CPU sending
++			 * the IPI is read after we've read the ACK register
++			 * on the GIC.
++			 *
++			 * Pairs with the write barrier in gic_raise_softirq
++			 */
++			smp_rmb();
+ 			handle_IPI(irqnr, regs);
+ #endif
+ 			continue;
+diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
+index 004926955263..b0155b05cddb 100644
+--- a/drivers/mcb/mcb-parse.c
++++ b/drivers/mcb/mcb-parse.c
+@@ -57,7 +57,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
+ 	mdev->id = GDD_DEV(reg1);
+ 	mdev->rev = GDD_REV(reg1);
+ 	mdev->var = GDD_VAR(reg1);
+-	mdev->bar = GDD_BAR(reg1);
++	mdev->bar = GDD_BAR(reg2);
+ 	mdev->group = GDD_GRP(reg2);
+ 	mdev->inst = GDD_INS(reg2);
+ 
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 8085059ce925..4b777be714a4 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -259,7 +259,8 @@ static int put_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user
+ static int put_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up)
+ {
+ 	if (!access_ok(VERIFY_WRITE, up, sizeof(struct v4l2_create_buffers32)) ||
+-	    copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)))
++	    copy_to_user(up, kp, offsetof(struct v4l2_create_buffers32, format)) ||
++	    copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
+ 		return -EFAULT;
+ 	return __put_v4l2_format32(&kp->format, &up->format);
+ }
+diff --git a/drivers/mfd/intel_quark_i2c_gpio.c b/drivers/mfd/intel_quark_i2c_gpio.c
+index 1ce16037d043..958c13473e8c 100644
+--- a/drivers/mfd/intel_quark_i2c_gpio.c
++++ b/drivers/mfd/intel_quark_i2c_gpio.c
+@@ -90,19 +90,19 @@ static struct resource intel_quark_gpio_res[] = {
+ 
+ static struct mfd_cell intel_quark_mfd_cells[] = {
+ 	{
+-		.id = MFD_I2C_BAR,
+-		.name = "i2c_designware",
+-		.num_resources = ARRAY_SIZE(intel_quark_i2c_res),
+-		.resources = intel_quark_i2c_res,
+-		.ignore_resource_conflicts = true,
+-	},
+-	{
+ 		.id = MFD_GPIO_BAR,
+ 		.name = "gpio-dwapb",
+ 		.num_resources = ARRAY_SIZE(intel_quark_gpio_res),
+ 		.resources = intel_quark_gpio_res,
+ 		.ignore_resource_conflicts = true,
+ 	},
++	{
++		.id = MFD_I2C_BAR,
++		.name = "i2c_designware",
++		.num_resources = ARRAY_SIZE(intel_quark_i2c_res),
++		.resources = intel_quark_i2c_res,
++		.ignore_resource_conflicts = true,
++	},
+ };
+ 
+ static const struct pci_device_id intel_quark_mfd_ids[] = {
+@@ -248,12 +248,11 @@ static int intel_quark_mfd_probe(struct pci_dev *pdev,
+ 
+ 	dev_set_drvdata(&pdev->dev, quark_mfd);
+ 
+-	ret = intel_quark_i2c_setup(pdev, &intel_quark_mfd_cells[MFD_I2C_BAR]);
++	ret = intel_quark_i2c_setup(pdev, &intel_quark_mfd_cells[1]);
+ 	if (ret)
+ 		return ret;
+ 
+-	ret = intel_quark_gpio_setup(pdev,
+-				     &intel_quark_mfd_cells[MFD_GPIO_BAR]);
++	ret = intel_quark_gpio_setup(pdev, &intel_quark_mfd_cells[0]);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
+index b7b3e8ee64f2..c30290f33430 100644
+--- a/drivers/mfd/omap-usb-tll.c
++++ b/drivers/mfd/omap-usb-tll.c
+@@ -269,6 +269,8 @@ static int usbtll_omap_probe(struct platform_device *pdev)
+ 
+ 		if (IS_ERR(tll->ch_clk[i]))
+ 			dev_dbg(dev, "can't get clock : %s\n", clkname);
++		else
++			clk_prepare(tll->ch_clk[i]);
+ 	}
+ 
+ 	pm_runtime_put_sync(dev);
+@@ -301,9 +303,12 @@ static int usbtll_omap_remove(struct platform_device *pdev)
+ 	tll_dev = NULL;
+ 	spin_unlock(&tll_lock);
+ 
+-	for (i = 0; i < tll->nch; i++)
+-		if (!IS_ERR(tll->ch_clk[i]))
++	for (i = 0; i < tll->nch; i++) {
++		if (!IS_ERR(tll->ch_clk[i])) {
++			clk_unprepare(tll->ch_clk[i]);
+ 			clk_put(tll->ch_clk[i]);
++		}
++	}
+ 
+ 	pm_runtime_disable(&pdev->dev);
+ 	return 0;
+@@ -420,7 +425,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
+ 			if (IS_ERR(tll->ch_clk[i]))
+ 				continue;
+ 
+-			r = clk_prepare_enable(tll->ch_clk[i]);
++			r = clk_enable(tll->ch_clk[i]);
+ 			if (r) {
+ 				dev_err(tll_dev,
+ 				 "Error enabling ch %d clock: %d\n", i, r);
+@@ -448,7 +453,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
+ 	for (i = 0; i < tll->nch; i++) {
+ 		if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
+ 			if (!IS_ERR(tll->ch_clk[i]))
+-				clk_disable_unprepare(tll->ch_clk[i]);
++				clk_disable(tll->ch_clk[i]);
+ 		}
+ 	}
+ 
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 31d2627d9d4d..3705c7e63521 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -2403,11 +2403,12 @@ static const struct mmc_fixup blk_fixups[] =
+ 		  MMC_QUIRK_BLK_NO_CMD23),
+ 
+ 	/*
+-	 * Some Micron MMC cards needs longer data read timeout than
+-	 * indicated in CSD.
++	 * Some MMC cards need longer data read timeout than indicated in CSD.
+ 	 */
+ 	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+ 		  MMC_QUIRK_LONG_READ_TIME),
++	MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
++		  MMC_QUIRK_LONG_READ_TIME),
+ 
+ 	/*
+ 	 * On these Samsung MoviNAND parts, performing secure erase or
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 588fb7908642..b5d8906ac34f 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -831,11 +831,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
+ 	/*
+ 	 * Some cards require longer data read timeout than indicated in CSD.
+ 	 * Address this by setting the read timeout to a "reasonably high"
+-	 * value. For the cards tested, 300ms has proven enough. If necessary,
++	 * value. For the cards tested, 600ms has proven enough. If necessary,
+ 	 * this value can be increased if other problematic cards require this.
+ 	 */
+ 	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+-		data->timeout_ns = 300000000;
++		data->timeout_ns = 600000000;
+ 		data->timeout_clks = 0;
+ 	}
+ 
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index f36c76f8b232..3ccc89d4c473 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -330,6 +330,9 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+ 	}
+ }
+ 
++/* Minimum partition switch timeout in milliseconds */
++#define MMC_MIN_PART_SWITCH_TIME	300
++
+ /*
+  * Decode extended CSD.
+  */
+@@ -394,6 +397,10 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ 
+ 		/* EXT_CSD value is in units of 10ms, but we store in ms */
+ 		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
++		/* Some eMMC set the value too low so set a minimum */
++		if (card->ext_csd.part_time &&
++		    card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
++			card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
+ 
+ 		/* Sleep / awake timeout in 100ns units */
+ 		if (sa_shift > 0 && sa_shift <= 0x17)
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index 22d929fa3371..5d4b8a623a12 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -203,7 +203,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
+ 	.chip    = &sdhci_acpi_chip_int,
+ 	.caps    = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
+ 		   MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
+-		   MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
++		   MMC_CAP_WAIT_WHILE_BUSY,
+ 	.caps2   = MMC_CAP2_HC_ERASE_SZ,
+ 	.flags   = SDHCI_ACPI_RUNTIME_PM,
+ 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+@@ -216,7 +216,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
+ 		   SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ 	.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
+ 	.caps    = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
+-		   MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
++		   MMC_CAP_WAIT_WHILE_BUSY,
+ 	.flags   = SDHCI_ACPI_RUNTIME_PM,
+ 	.pm_caps = MMC_PM_KEEP_POWER,
+ 	.probe_slot	= sdhci_acpi_sdio_probe_slot,
+@@ -228,7 +228,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
+ 	.quirks  = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ 	.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
+ 		   SDHCI_QUIRK2_STOP_WITH_TC,
+-	.caps    = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
++	.caps    = MMC_CAP_WAIT_WHILE_BUSY,
+ 	.probe_slot	= sdhci_acpi_sd_probe_slot,
+ };
+ 
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 51bca035cd83..1c73ba6efdbd 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -426,8 +426,25 @@ retry:
+ 						 pnum, vol_id, lnum);
+ 					err = -EBADMSG;
+ 				} else {
+-					err = -EINVAL;
+-					ubi_ro_mode(ubi);
++					/*
++					 * Ending up here in the non-Fastmap case
++					 * is a clear bug as the VID header had to
++					 * be present at scan time to have it referenced.
++					 * With fastmap the story is more complicated.
++					 * Fastmap has the mapping info without the need
++					 * of a full scan. So the LEB could have been
++					 * unmapped, Fastmap cannot know this and keeps
++					 * the LEB referenced.
++					 * This is valid and works as the layer above UBI
++					 * has to do bookkeeping about used/referenced
++					 * LEBs in any case.
++					 */
++					if (ubi->fast_attach) {
++						err = -EBADMSG;
++					} else {
++						err = -EINVAL;
++						ubi_ro_mode(ubi);
++					}
+ 				}
+ 			}
+ 			goto out_free;
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index 02a6de2f53ee..9f505b0dd292 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -1051,6 +1051,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
+ 	ubi_msg(ubi, "fastmap WL pool size: %d",
+ 		ubi->fm_wl_pool.max_size);
+ 	ubi->fm_disabled = 0;
++	ubi->fast_attach = 1;
+ 
+ 	ubi_free_vid_hdr(ubi, vh);
+ 	kfree(ech);
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
+index c998212fc680..292a286ba489 100644
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -462,6 +462,7 @@ struct ubi_debug_info {
+  * @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
+  * @fm_work: fastmap work queue
+  * @fm_work_scheduled: non-zero if fastmap work was scheduled
++ * @fast_attach: non-zero if UBI was attached by fastmap
+  *
+  * @used: RB-tree of used physical eraseblocks
+  * @erroneous: RB-tree of erroneous used physical eraseblocks
+@@ -570,6 +571,7 @@ struct ubi_device {
+ 	size_t fm_size;
+ 	struct work_struct fm_work;
+ 	int fm_work_scheduled;
++	int fast_attach;
+ 
+ 	/* Wear-leveling sub-system's stuff */
+ 	struct rb_root used;
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index 141c2a42d7ed..910c12e2638e 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -696,11 +696,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
+ 	/* allow change of MTU according to the CANFD ability of the device */
+ 	switch (new_mtu) {
+ 	case CAN_MTU:
++		/* 'CANFD-only' controllers can not switch to CAN_MTU */
++		if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
++			return -EINVAL;
++
+ 		priv->ctrlmode &= ~CAN_CTRLMODE_FD;
+ 		break;
+ 
+ 	case CANFD_MTU:
+-		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
++		/* check for potential CANFD ability */
++		if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
++		    !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
+ 			return -EINVAL;
+ 
+ 		priv->ctrlmode |= CAN_CTRLMODE_FD;
+@@ -782,6 +788,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
+ 				= { .len = sizeof(struct can_bittiming_const) },
+ };
+ 
++static int can_validate(struct nlattr *tb[], struct nlattr *data[])
++{
++	bool is_can_fd = false;
++
++	/* Make sure that valid CAN FD configurations always consist of
++	 * - nominal/arbitration bittiming
++	 * - data bittiming
++	 * - control mode with CAN_CTRLMODE_FD set
++	 */
++
++	if (data[IFLA_CAN_CTRLMODE]) {
++		struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
++
++		is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
++	}
++
++	if (is_can_fd) {
++		if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
++			return -EOPNOTSUPP;
++	}
++
++	if (data[IFLA_CAN_DATA_BITTIMING]) {
++		if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
++			return -EOPNOTSUPP;
++	}
++
++	return 0;
++}
++
+ static int can_changelink(struct net_device *dev,
+ 			  struct nlattr *tb[], struct nlattr *data[])
+ {
+@@ -813,19 +848,31 @@ static int can_changelink(struct net_device *dev,
+ 
+ 	if (data[IFLA_CAN_CTRLMODE]) {
+ 		struct can_ctrlmode *cm;
++		u32 ctrlstatic;
++		u32 maskedflags;
+ 
+ 		/* Do not allow changing controller mode while running */
+ 		if (dev->flags & IFF_UP)
+ 			return -EBUSY;
+ 		cm = nla_data(data[IFLA_CAN_CTRLMODE]);
++		ctrlstatic = priv->ctrlmode_static;
++		maskedflags = cm->flags & cm->mask;
++
++		/* check whether provided bits are allowed to be passed */
++		if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
++			return -EOPNOTSUPP;
++
++		/* do not check for static fd-non-iso if 'fd' is disabled */
++		if (!(maskedflags & CAN_CTRLMODE_FD))
++			ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
+ 
+-		/* check whether changed bits are allowed to be modified */
+-		if (cm->mask & ~priv->ctrlmode_supported)
++		/* make sure static options are provided by configuration */
++		if ((maskedflags & ctrlstatic) != ctrlstatic)
+ 			return -EOPNOTSUPP;
+ 
+ 		/* clear bits to be modified and copy the flag values */
+ 		priv->ctrlmode &= ~cm->mask;
+-		priv->ctrlmode |= (cm->flags & cm->mask);
++		priv->ctrlmode |= maskedflags;
+ 
+ 		/* CAN_CTRLMODE_FD can only be set when driver supports FD */
+ 		if (priv->ctrlmode & CAN_CTRLMODE_FD)
+@@ -966,6 +1013,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
+ 	.maxtype	= IFLA_CAN_MAX,
+ 	.policy		= can_policy,
+ 	.setup		= can_setup,
++	.validate	= can_validate,
+ 	.newlink	= can_newlink,
+ 	.changelink	= can_changelink,
+ 	.get_size	= can_get_size,
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index ef655177bb5e..37f15eb4260a 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -958,7 +958,7 @@ static struct net_device *alloc_m_can_dev(void)
+ 	priv->can.do_get_berr_counter = m_can_get_berr_counter;
+ 
+ 	/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
+-	priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
++	can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
+ 
+ 	/* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
+ 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index 301081db1ef6..1b69427fbb29 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -1947,7 +1947,12 @@ static ssize_t ath10k_write_pktlog_filter(struct file *file,
+ 		goto out;
+ 	}
+ 
+-	if (filter && (filter != ar->debug.pktlog_filter)) {
++	if (filter == ar->debug.pktlog_filter) {
++		ret = count;
++		goto out;
++	}
++
++	if (filter) {
+ 		ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
+ 		if (ret) {
+ 			ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 1734cc50ded8..56e0a1de0c37 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -3329,7 +3329,10 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
+ 		goto err_vdev_delete;
+ 	}
+ 
+-	if (ar->cfg_tx_chainmask) {
++	/* Configuring number of spatial stream for monitor interface is causing
++	 * target assert in qca9888 and qca6174.
++	 */
++	if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
+ 		u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+ 
+ 		vdev_param = ar->wmi.vdev_param->nss;
+diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
+index ca4b7ccd697f..afa0f764c617 100644
+--- a/drivers/net/wireless/ath/ath5k/led.c
++++ b/drivers/net/wireless/ath/ath5k/led.c
+@@ -77,7 +77,7 @@ static const struct pci_device_id ath5k_led_devices[] = {
+ 	/* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
+ 	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
+ 	/* HP Compaq C700 (nitrousnrg@gmail.com) */
+-	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
++	{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
+ 	/* LiteOn AR5BXB63 (magooz@salug.it) */
+ 	{ ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
+ 	/* IBM-specific AR5212 (all others) */
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index 46a389c20bfc..b7b02549ef54 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -49,6 +49,10 @@ int ath9k_led_blink;
+ module_param_named(blink, ath9k_led_blink, int, 0444);
+ MODULE_PARM_DESC(blink, "Enable LED blink on activity");
+ 
++static int ath9k_led_active_high = -1;
++module_param_named(led_active_high, ath9k_led_active_high, int, 0444);
++MODULE_PARM_DESC(led_active_high, "Invert LED polarity");
++
+ static int ath9k_btcoex_enable;
+ module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
+ MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+@@ -600,6 +604,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
+ 	if (ret)
+ 		return ret;
+ 
++	if (ath9k_led_active_high != -1)
++		ah->config.led_active_high = ath9k_led_active_high == 1;
++
+ 	/*
+ 	 * Enable WLAN/BT RX Antenna diversity only when:
+ 	 *
+diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
+index e6fef1be9977..7cdaf40c3057 100644
+--- a/drivers/net/wireless/ath/ath9k/pci.c
++++ b/drivers/net/wireless/ath/ath9k/pci.c
+@@ -28,6 +28,16 @@ static const struct pci_device_id ath_pci_id_table[] = {
+ 	{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
+ 	{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI   */
+ 	{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI   */
++
++#ifdef CONFIG_ATH9K_PCOEM
++	/* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */
++	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
++			 0x0029,
++			 PCI_VENDOR_ID_ATHEROS,
++			 0x2096),
++	  .driver_data = ATH9K_PCI_LED_ACT_HI },
++#endif
++
+ 	{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+ 
+ #ifdef CONFIG_ATH9K_PCOEM
+diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
+index 01f56c7df8b5..6b4c10676533 100644
+--- a/drivers/net/wireless/rtlwifi/base.c
++++ b/drivers/net/wireless/rtlwifi/base.c
+@@ -1662,9 +1662,9 @@ void rtl_watchdog_wq_callback(void *data)
+ 		if (((rtlpriv->link_info.num_rx_inperiod +
+ 		      rtlpriv->link_info.num_tx_inperiod) > 8) ||
+ 		    (rtlpriv->link_info.num_rx_inperiod > 2))
+-			rtl_lps_enter(hw);
+-		else
+ 			rtl_lps_leave(hw);
++		else
++			rtl_lps_enter(hw);
+ 	}
+ 
+ 	rtlpriv->link_info.num_rx_inperiod = 0;
+diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
+index cefe26991421..5c2c0a1b0353 100644
+--- a/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
++++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtc8723b2ant.c
+@@ -1203,7 +1203,6 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
+ 
+ 		/* Force GNT_BT to low */
+ 		btcoexist->btc_write_1byte_bitmask(btcoexist, 0x765, 0x18, 0x0);
+-		btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ 
+ 		if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT) {
+ 			/* tell firmware "no antenna inverse" */
+@@ -1211,19 +1210,25 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
+ 			h2c_parameter[1] = 1;  /* ext switch type */
+ 			btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ 						h2c_parameter);
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
+ 		} else {
+ 			/* tell firmware "antenna inverse" */
+ 			h2c_parameter[0] = 1;
+ 			h2c_parameter[1] = 1;  /* ext switch type */
+ 			btcoexist->btc_fill_h2c(btcoexist, 0x65, 2,
+ 						h2c_parameter);
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
+ 		}
+ 	}
+ 
+ 	/* ext switch setting */
+ 	if (use_ext_switch) {
+ 		/* fixed internal switch S1->WiFi, S0->BT */
+-		btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
++		if (board_info->btdm_ant_pos == BTC_ANTENNA_AT_MAIN_PORT)
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x0);
++		else
++			btcoexist->btc_write_2byte(btcoexist, 0x948, 0x280);
++
+ 		switch (antpos_type) {
+ 		case BTC_ANT_WIFI_AT_MAIN:
+ 			/* ext switch main at wifi */
+diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
+index b2791c893417..babd1490f20c 100644
+--- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
++++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
+@@ -965,13 +965,38 @@ void exhalbtc_set_chip_type(u8 chip_type)
+ 	}
+ }
+ 
+-void exhalbtc_set_ant_num(u8 type, u8 ant_num)
++void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num)
+ {
+ 	if (BT_COEX_ANT_TYPE_PG == type) {
+ 		gl_bt_coexist.board_info.pg_ant_num = ant_num;
+ 		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
++		/* The antenna position:
++		 * Main (default) or Aux for pgAntNum=2 && btdmAntNum =1.
++		 * The antenna position should be determined by
++		 * auto-detect mechanism.
++		 * The following is assumed to main,
++		 * and those must be modified
++		 * if y auto-detect mechanism is ready
++		 */
++		if ((gl_bt_coexist.board_info.pg_ant_num == 2) &&
++		    (gl_bt_coexist.board_info.btdm_ant_num == 1))
++			gl_bt_coexist.board_info.btdm_ant_pos =
++						       BTC_ANTENNA_AT_MAIN_PORT;
++		else
++			gl_bt_coexist.board_info.btdm_ant_pos =
++						       BTC_ANTENNA_AT_MAIN_PORT;
+ 	} else if (BT_COEX_ANT_TYPE_ANTDIV == type) {
+ 		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
++		gl_bt_coexist.board_info.btdm_ant_pos =
++						       BTC_ANTENNA_AT_MAIN_PORT;
++	} else if (type == BT_COEX_ANT_TYPE_DETECTED) {
++		gl_bt_coexist.board_info.btdm_ant_num = ant_num;
++		if (rtlpriv->cfg->mod_params->ant_sel == 1)
++			gl_bt_coexist.board_info.btdm_ant_pos =
++				BTC_ANTENNA_AT_AUX_PORT;
++		else
++			gl_bt_coexist.board_info.btdm_ant_pos =
++				BTC_ANTENNA_AT_MAIN_PORT;
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
+index 0a903ea179ef..f41ca57dd8a7 100644
+--- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
++++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.h
+@@ -535,7 +535,7 @@ void exhalbtc_set_bt_patch_version(u16 bt_hci_version, u16 bt_patch_version);
+ void exhalbtc_update_min_bt_rssi(char bt_rssi);
+ void exhalbtc_set_bt_exist(bool bt_exist);
+ void exhalbtc_set_chip_type(u8 chip_type);
+-void exhalbtc_set_ant_num(u8 type, u8 ant_num);
++void exhalbtc_set_ant_num(struct rtl_priv *rtlpriv, u8 type, u8 ant_num);
+ void exhalbtc_display_bt_coex_info(struct btc_coexist *btcoexist);
+ void exhalbtc_signal_compensation(struct btc_coexist *btcoexist,
+ 				  u8 *rssi_wifi, u8 *rssi_bt);
+diff --git a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
+index b9b0cb7af8ea..d3fd9211b3a4 100644
+--- a/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
++++ b/drivers/net/wireless/rtlwifi/btcoexist/rtl_btc.c
+@@ -72,7 +72,10 @@ void rtl_btc_init_hal_vars(struct rtl_priv *rtlpriv)
+ 		 __func__, bt_type);
+ 	exhalbtc_set_chip_type(bt_type);
+ 
+-	exhalbtc_set_ant_num(BT_COEX_ANT_TYPE_PG, ant_num);
++	if (rtlpriv->cfg->mod_params->ant_sel == 1)
++		exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_DETECTED, 1);
++	else
++		exhalbtc_set_ant_num(rtlpriv, BT_COEX_ANT_TYPE_PG, ant_num);
+ }
+ 
+ void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index 7f471bff435c..5b4048041147 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -1573,7 +1573,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
+ 							 true,
+ 							 HW_DESC_TXBUFF_ADDR),
+ 						 skb->len, PCI_DMA_TODEVICE);
+-				kfree_skb(skb);
++				dev_kfree_skb_irq(skb);
+ 				ring->idx = (ring->idx + 1) % ring->entries;
+ 			}
+ 			ring->idx = 0;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+index b9417268427e..bff9152c1ef0 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+@@ -2684,6 +2684,7 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ 					      bool auto_load_fail, u8 *hwinfo)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
++	struct rtl_mod_params *mod_params = rtlpriv->cfg->mod_params;
+ 	u8 value;
+ 	u32 tmpu_32;
+ 
+@@ -2702,6 +2703,10 @@ void rtl8723be_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
+ 		rtlpriv->btcoexist.btc_info.ant_num = ANT_X2;
+ 	}
+ 
++	/* override ant_num / ant_path */
++	if (mod_params->ant_sel)
++		rtlpriv->btcoexist.btc_info.ant_num =
++			(mod_params->ant_sel == 1 ? ANT_X2 : ANT_X1);
+ }
+ 
+ void rtl8723be_bt_reg_init(struct ieee80211_hw *hw)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+index 7bf88d9dcdc3..0adee281ded8 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+@@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+ 	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ 
+ 	rtl8723be_bt_reg_init(hw);
+-	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+ 	rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
+ 
+ 	rtlpriv->dm.dm_initialgain_enable = 1;
+@@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ 	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		rtlpriv->cfg->mod_params->sw_crypto;
++	rtlpriv->cfg->mod_params->disable_watchdog =
++		rtlpriv->cfg->mod_params->disable_watchdog;
+ 	if (rtlpriv->cfg->mod_params->disable_watchdog)
+ 		pr_info("watchdog disabled\n");
+ 	rtlpriv->psc.reg_fwctrl_lps = 3;
+@@ -267,6 +270,10 @@ static struct rtl_mod_params rtl8723be_mod_params = {
+ 	.inactiveps = true,
+ 	.swctrl_lps = false,
+ 	.fwctrl_lps = true,
++	.msi_support = false,
++	.disable_watchdog = false,
++	.debug = DBG_EMERG,
++	.ant_sel = 0,
+ };
+ 
+ static struct rtl_hal_cfg rtl8723be_hal_cfg = {
+@@ -388,6 +395,7 @@ module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
+ module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
+ module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
+ 		   bool, 0444);
++module_param_named(ant_sel, rtl8723be_mod_params.ant_sel, int, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+ MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+@@ -396,6 +404,7 @@ MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+ MODULE_PARM_DESC(disable_watchdog,
+ 		 "Set to 1 to disable the watchdog (default 0)\n");
++MODULE_PARM_DESC(ant_sel, "Set to 1 or 2 to force antenna number (default 0)\n");
+ 
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+ 
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index f1fa8100f288..2fb502418fcb 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -2236,6 +2236,9 @@ struct rtl_mod_params {
+ 
+ 	/* default 0: 1 means do not disable interrupts */
+ 	bool int_clear;
++
++	/* select antenna */
++	int ant_sel;
+ };
+ 
+ struct rtl_hal_usbint_cfg {
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 25ad1b27ffae..993ff22df7ec 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -176,9 +176,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 	u16 orig_cmd;
+ 	struct pci_bus_region region, inverted_region;
+ 
+-	if (dev->non_compliant_bars)
+-		return 0;
+-
+ 	mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
+ 
+ 	/* No printks while decoding is disabled! */
+@@ -319,6 +316,9 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+ {
+ 	unsigned int pos, reg;
+ 
++	if (dev->non_compliant_bars)
++		return;
++
+ 	for (pos = 0; pos < howmany; pos++) {
+ 		struct resource *res = &dev->resource[pos];
+ 		reg = PCI_BASE_ADDRESS_0 + (pos << 2);
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+index 86192be3b679..cd047556d9ca 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+@@ -109,6 +109,7 @@ struct exynos5440_pmx_func {
+  * @nr_groups: number of pin groups available.
+  * @pmx_functions: list of pin functions parsed from device tree.
+  * @nr_functions: number of pin functions available.
++ * @range: gpio range to register with pinctrl
+  */
+ struct exynos5440_pinctrl_priv_data {
+ 	void __iomem			*reg_base;
+@@ -119,6 +120,7 @@ struct exynos5440_pinctrl_priv_data {
+ 	unsigned int			nr_groups;
+ 	const struct exynos5440_pmx_func	*pmx_functions;
+ 	unsigned int			nr_functions;
++	struct pinctrl_gpio_range	range;
+ };
+ 
+ /**
+@@ -769,7 +771,6 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev,
+ 	struct pinctrl_desc *ctrldesc;
+ 	struct pinctrl_dev *pctl_dev;
+ 	struct pinctrl_pin_desc *pindesc, *pdesc;
+-	struct pinctrl_gpio_range grange;
+ 	char *pin_names;
+ 	int pin, ret;
+ 
+@@ -827,12 +828,12 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev,
+ 		return -EINVAL;
+ 	}
+ 
+-	grange.name = "exynos5440-pctrl-gpio-range";
+-	grange.id = 0;
+-	grange.base = 0;
+-	grange.npins = EXYNOS5440_MAX_PINS;
+-	grange.gc = priv->gc;
+-	pinctrl_add_gpio_range(pctl_dev, &grange);
++	priv->range.name = "exynos5440-pctrl-gpio-range";
++	priv->range.id = 0;
++	priv->range.base = 0;
++	priv->range.npins = EXYNOS5440_MAX_PINS;
++	priv->range.gc = priv->gc;
++	pinctrl_add_gpio_range(pctl_dev, &priv->range);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 8a28116b5805..b352a1fa2f2a 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -110,6 +110,11 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ 					  struct device *dev,
+ 					  const char *supply_name);
+ 
++static struct regulator_dev *dev_to_rdev(struct device *dev)
++{
++	return container_of(dev, struct regulator_dev, dev);
++}
++
+ static const char *rdev_get_name(struct regulator_dev *rdev)
+ {
+ 	if (rdev->constraints && rdev->constraints->name)
+@@ -3587,6 +3592,11 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
+ 			   &rdev->bypass_count);
+ }
+ 
++static int regulator_register_resolve_supply(struct device *dev, void *data)
++{
++	return regulator_resolve_supply(dev_to_rdev(dev));
++}
++
+ /**
+  * regulator_register - register regulator
+  * @regulator_desc: regulator to register
+@@ -3735,6 +3745,10 @@ regulator_register(const struct regulator_desc *regulator_desc,
+ 	list_add(&rdev->list, &regulator_list);
+ 
+ 	rdev_init_debugfs(rdev);
++
++	/* try to resolve regulators supply since a new one was registered */
++	class_for_each_device(&regulator_class, NULL, NULL,
++			      regulator_register_resolve_supply);
+ out:
+ 	mutex_unlock(&regulator_list_mutex);
+ 	kfree(config);
+@@ -4104,13 +4118,57 @@ static int __init regulator_init(void)
+ /* init early to allow our consumers to complete system booting */
+ core_initcall(regulator_init);
+ 
+-static int __init regulator_init_complete(void)
++static int __init regulator_late_cleanup(struct device *dev, void *data)
+ {
+-	struct regulator_dev *rdev;
+-	const struct regulator_ops *ops;
+-	struct regulation_constraints *c;
++	struct regulator_dev *rdev = dev_to_rdev(dev);
++	const struct regulator_ops *ops = rdev->desc->ops;
++	struct regulation_constraints *c = rdev->constraints;
+ 	int enabled, ret;
+ 
++	if (c && c->always_on)
++		return 0;
++
++	if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
++		return 0;
++
++	mutex_lock(&rdev->mutex);
++
++	if (rdev->use_count)
++		goto unlock;
++
++	/* If we can't read the status assume it's on. */
++	if (ops->is_enabled)
++		enabled = ops->is_enabled(rdev);
++	else
++		enabled = 1;
++
++	if (!enabled)
++		goto unlock;
++
++	if (have_full_constraints()) {
++		/* We log since this may kill the system if it goes
++		 * wrong. */
++		rdev_info(rdev, "disabling\n");
++		ret = _regulator_do_disable(rdev);
++		if (ret != 0)
++			rdev_err(rdev, "couldn't disable: %d\n", ret);
++	} else {
++		/* The intention is that in future we will
++		 * assume that full constraints are provided
++		 * so warn even if we aren't going to do
++		 * anything here.
++		 */
++		rdev_warn(rdev, "incomplete constraints, leaving on\n");
++	}
++
++unlock:
++	mutex_unlock(&rdev->mutex);
++
++	return 0;
++}
++
++static int __init regulator_init_complete(void)
++{
+ 	/*
+ 	 * Since DT doesn't provide an idiomatic mechanism for
+ 	 * enabling full constraints and since it's much more natural
+@@ -4120,58 +4178,13 @@ static int __init regulator_init_complete(void)
+ 	if (of_have_populated_dt())
+ 		has_full_constraints = true;
+ 
+-	mutex_lock(&regulator_list_mutex);
+-
+ 	/* If we have a full configuration then disable any regulators
+ 	 * we have permission to change the status for and which are
+ 	 * not in use or always_on.  This is effectively the default
+ 	 * for DT and ACPI as they have full constraints.
+ 	 */
+-	list_for_each_entry(rdev, &regulator_list, list) {
+-		ops = rdev->desc->ops;
+-		c = rdev->constraints;
+-
+-		if (c && c->always_on)
+-			continue;
+-
+-		if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
+-			continue;
+-
+-		mutex_lock(&rdev->mutex);
+-
+-		if (rdev->use_count)
+-			goto unlock;
+-
+-		/* If we can't read the status assume it's on. */
+-		if (ops->is_enabled)
+-			enabled = ops->is_enabled(rdev);
+-		else
+-			enabled = 1;
+-
+-		if (!enabled)
+-			goto unlock;
+-
+-		if (have_full_constraints()) {
+-			/* We log since this may kill the system if it
+-			 * goes wrong. */
+-			rdev_info(rdev, "disabling\n");
+-			ret = _regulator_do_disable(rdev);
+-			if (ret != 0)
+-				rdev_err(rdev, "couldn't disable: %d\n", ret);
+-		} else {
+-			/* The intention is that in future we will
+-			 * assume that full constraints are provided
+-			 * so warn even if we aren't going to do
+-			 * anything here.
+-			 */
+-			rdev_warn(rdev, "incomplete constraints, leaving on\n");
+-		}
+-
+-unlock:
+-		mutex_unlock(&rdev->mutex);
+-	}
+-
+-	mutex_unlock(&regulator_list_mutex);
++	class_for_each_device(&regulator_class, NULL, NULL,
++			      regulator_late_cleanup);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
+index 18c9c0648bd0..3e8e92b1535d 100644
+--- a/drivers/scsi/aacraid/aacraid.h
++++ b/drivers/scsi/aacraid/aacraid.h
+@@ -29,6 +29,7 @@ enum {
+ #define AAC_INT_MODE_MSI		(1<<1)
+ #define AAC_INT_MODE_AIF		(1<<2)
+ #define AAC_INT_MODE_SYNC		(1<<3)
++#define AAC_INT_MODE_MSIX		(1<<16)
+ 
+ #define AAC_INT_ENABLE_TYPE1_INTX	0xfffffffb
+ #define AAC_INT_ENABLE_TYPE1_MSIX	0xfffffffa
+diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
+index 45db84ad322f..e736ecb3b8a4 100644
+--- a/drivers/scsi/aacraid/comminit.c
++++ b/drivers/scsi/aacraid/comminit.c
+@@ -37,6 +37,7 @@
+ #include <linux/spinlock.h>
+ #include <linux/slab.h>
+ #include <linux/blkdev.h>
++#include <linux/delay.h>
+ #include <linux/completion.h>
+ #include <linux/mm.h>
+ #include <scsi/scsi_host.h>
+@@ -49,6 +50,20 @@ struct aac_common aac_config = {
+ 	.irq_mod = 1
+ };
+ 
++static inline int aac_is_msix_mode(struct aac_dev *dev)
++{
++	u32 status;
++
++	status = src_readl(dev, MUnit.OMR);
++	return (status & AAC_INT_MODE_MSIX);
++}
++
++static inline void aac_change_to_intx(struct aac_dev *dev)
++{
++	aac_src_access_devreg(dev, AAC_DISABLE_MSIX);
++	aac_src_access_devreg(dev, AAC_ENABLE_INTX);
++}
++
+ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
+ {
+ 	unsigned char *base;
+@@ -358,6 +373,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+ 	dev->comm_interface = AAC_COMM_PRODUCER;
+ 	dev->raw_io_interface = dev->raw_io_64 = 0;
+ 
++
++	/*
++	 * Enable INTX mode, if not done already Enabled
++	 */
++	if (aac_is_msix_mode(dev)) {
++		aac_change_to_intx(dev);
++		dev_info(&dev->pdev->dev, "Changed firmware to INTX mode");
++	}
++
+ 	if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
+ 		0, 0, 0, 0, 0, 0,
+ 		status+0, status+1, status+2, status+3, NULL)) &&
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index 4b79d9511778..9410ffe68d0a 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -611,10 +611,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
+ 					}
+ 					return -EFAULT;
+ 				}
+-				/* We used to udelay() here but that absorbed
+-				 * a CPU when a timeout occured. Not very
+-				 * useful. */
+-				cpu_relax();
++				/*
++				 * Allow other processes / CPUS to use core
++				 */
++				schedule();
+ 			}
+ 		} else if (down_interruptible(&fibptr->event_wait)) {
+ 			/* Do nothing ... satisfy
+@@ -1999,6 +1999,10 @@ int aac_command_thread(void *data)
+ 		if (difference <= 0)
+ 			difference = 1;
+ 		set_current_state(TASK_INTERRUPTIBLE);
++
++		if (kthread_should_stop())
++			break;
++
+ 		schedule_timeout(difference);
+ 
+ 		if (kthread_should_stop())
+diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
+index 53baf37cd21a..2e78d1fe1f09 100644
+--- a/drivers/staging/comedi/drivers/das1800.c
++++ b/drivers/staging/comedi/drivers/das1800.c
+@@ -567,14 +567,17 @@ static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
+ 	struct comedi_isadma_desc *desc;
+ 	int i;
+ 
+-	outb(0x0, dev->iobase + DAS1800_STATUS);	/* disable conversions */
+-	outb(0x0, dev->iobase + DAS1800_CONTROL_B);	/* disable interrupts and dma */
+-	outb(0x0, dev->iobase + DAS1800_CONTROL_A);	/* disable and clear fifo and stop triggering */
+-
+-	for (i = 0; i < 2; i++) {
+-		desc = &dma->desc[i];
+-		if (desc->chan)
+-			comedi_isadma_disable(desc->chan);
++	/* disable and stop conversions */
++	outb(0x0, dev->iobase + DAS1800_STATUS);
++	outb(0x0, dev->iobase + DAS1800_CONTROL_B);
++	outb(0x0, dev->iobase + DAS1800_CONTROL_A);
++
++	if (dma) {
++		for (i = 0; i < 2; i++) {
++			desc = &dma->desc[i];
++			if (desc->chan)
++				comedi_isadma_disable(desc->chan);
++		}
+ 	}
+ 
+ 	return 0;
+@@ -934,13 +937,14 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
+ {
+ 	struct das1800_private *devpriv = dev->private;
+ 	struct comedi_isadma *dma = devpriv->dma;
+-	struct comedi_isadma_desc *desc = &dma->desc[0];
++	struct comedi_isadma_desc *desc;
+ 	unsigned int bytes;
+ 
+ 	if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
+ 		return;
+ 
+ 	dma->cur_dma = 0;
++	desc = &dma->desc[0];
+ 
+ 	/* determine a dma transfer size to fill buffer in 0.3 sec */
+ 	bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
+diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
+index 0dde34e3a7c5..545c60c826a1 100644
+--- a/drivers/thunderbolt/eeprom.c
++++ b/drivers/thunderbolt/eeprom.c
+@@ -444,6 +444,7 @@ int tb_drom_read(struct tb_switch *sw)
+ 	return tb_drom_parse_entries(sw);
+ err:
+ 	kfree(sw->drom);
++	sw->drom = NULL;
+ 	return -EIO;
+ 
+ }
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 2c34c3249972..2ec337612a79 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2045,7 +2045,9 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
+ 		}
+ 	}
+ 	spin_unlock(&gsm_mux_lock);
+-	WARN_ON(i == MAX_MUX);
++	/* open failed before registering => nothing to do */
++	if (i == MAX_MUX)
++		return;
+ 
+ 	/* In theory disconnecting DLCI 0 is sufficient but for some
+ 	   modems this is apparently not the case. */
+diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
+index bbc4ce66c2c1..644ddb841d9f 100644
+--- a/drivers/tty/n_hdlc.c
++++ b/drivers/tty/n_hdlc.c
+@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
+ 	add_wait_queue(&tty->read_wait, &wait);
+ 
+ 	for (;;) {
+-		if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
++		if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
+ 			ret = -EIO;
+ 			break;
+ 		}
+@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
+ 		/* set bits for operations that won't block */
+ 		if (n_hdlc->rx_buf_list.head)
+ 			mask |= POLLIN | POLLRDNORM;	/* readable */
+-		if (test_bit(TTY_OTHER_DONE, &tty->flags))
++		if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ 			mask |= POLLHUP;
+ 		if (tty_hung_up_p(filp))
+ 			mask |= POLLHUP;
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 33088c70ef3b..aba20f66bdd9 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1962,18 +1962,6 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
+ 		return ldata->commit_head - ldata->read_tail >= amt;
+ }
+ 
+-static inline int check_other_done(struct tty_struct *tty)
+-{
+-	int done = test_bit(TTY_OTHER_DONE, &tty->flags);
+-	if (done) {
+-		/* paired with cmpxchg() in check_other_closed(); ensures
+-		 * read buffer head index is not stale
+-		 */
+-		smp_mb__after_atomic();
+-	}
+-	return done;
+-}
+-
+ /**
+  *	copy_from_read_buf	-	copy read data directly
+  *	@tty: terminal device
+@@ -2192,7 +2180,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 	unsigned char __user *b = buf;
+ 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
+-	int c, done;
++	int c;
+ 	int minimum, time;
+ 	ssize_t retval = 0;
+ 	long timeout;
+@@ -2260,32 +2248,35 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
+ 		    ((minimum - (b - buf)) >= 1))
+ 			ldata->minimum_to_wake = (minimum - (b - buf));
+ 
+-		done = check_other_done(tty);
+-
+ 		if (!input_available_p(tty, 0)) {
+-			if (done) {
+-				retval = -EIO;
+-				break;
+-			}
+-			if (tty_hung_up_p(file))
+-				break;
+-			if (!timeout)
+-				break;
+-			if (file->f_flags & O_NONBLOCK) {
+-				retval = -EAGAIN;
+-				break;
+-			}
+-			if (signal_pending(current)) {
+-				retval = -ERESTARTSYS;
+-				break;
+-			}
+ 			up_read(&tty->termios_rwsem);
++			tty_buffer_flush_work(tty->port);
++			down_read(&tty->termios_rwsem);
++			if (!input_available_p(tty, 0)) {
++				if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
++					retval = -EIO;
++					break;
++				}
++				if (tty_hung_up_p(file))
++					break;
++				if (!timeout)
++					break;
++				if (file->f_flags & O_NONBLOCK) {
++					retval = -EAGAIN;
++					break;
++				}
++				if (signal_pending(current)) {
++					retval = -ERESTARTSYS;
++					break;
++				}
++				up_read(&tty->termios_rwsem);
+ 
+-			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
+-					     timeout);
++				timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
++						timeout);
+ 
+-			down_read(&tty->termios_rwsem);
+-			continue;
++				down_read(&tty->termios_rwsem);
++				continue;
++			}
+ 		}
+ 
+ 		if (ldata->icanon && !L_EXTPROC(tty)) {
+@@ -2470,12 +2461,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
+ 
+ 	poll_wait(file, &tty->read_wait, wait);
+ 	poll_wait(file, &tty->write_wait, wait);
+-	if (check_other_done(tty))
+-		mask |= POLLHUP;
+ 	if (input_available_p(tty, 1))
+ 		mask |= POLLIN | POLLRDNORM;
++	else {
++		tty_buffer_flush_work(tty->port);
++		if (input_available_p(tty, 1))
++			mask |= POLLIN | POLLRDNORM;
++	}
+ 	if (tty->packet && tty->link->ctrl_status)
+ 		mask |= POLLPRI | POLLIN | POLLRDNORM;
++	if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
++		mask |= POLLHUP;
+ 	if (tty_hung_up_p(file))
+ 		mask |= POLLHUP;
+ 	if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 254c183a5efe..b1f78aafb2d0 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -54,7 +54,7 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
+ 	if (!tty->link)
+ 		return;
+ 	set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+-	tty_flip_buffer_push(tty->link->port);
++	wake_up_interruptible(&tty->link->read_wait);
+ 	wake_up_interruptible(&tty->link->write_wait);
+ 	if (tty->driver->subtype == PTY_TYPE_MASTER) {
+ 		set_bit(TTY_OTHER_CLOSED, &tty->flags);
+@@ -242,9 +242,7 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
+ 		goto out;
+ 
+ 	clear_bit(TTY_IO_ERROR, &tty->flags);
+-	/* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
+ 	clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+-	clear_bit(TTY_OTHER_DONE, &tty->link->flags);
+ 	set_bit(TTY_THROTTLED, &tty->flags);
+ 	return 0;
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index b82b2a0f82a3..a78a62bf0c96 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1402,6 +1402,9 @@ byt_set_termios(struct uart_port *p, struct ktermios *termios,
+ 	unsigned long m, n;
+ 	u32 reg;
+ 
++	/* Gracefully handle the B0 case: fall back to B9600 */
++	fuart = fuart ? fuart : 9600 * 16;
++
+ 	/* Get Fuart closer to Fref */
+ 	fuart *= rounddown_pow_of_two(fref / fuart);
+ 
+diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
+index 7d2532b23969..100c2413c22f 100644
+--- a/drivers/tty/serial/ucc_uart.c
++++ b/drivers/tty/serial/ucc_uart.c
+@@ -1478,6 +1478,9 @@ static const struct of_device_id ucc_uart_match[] = {
+ 		.type = "serial",
+ 		.compatible = "ucc_uart",
+ 	},
++	{
++		.compatible = "fsl,t1040-ucc-uart",
++	},
+ 	{},
+ };
+ MODULE_DEVICE_TABLE(of, ucc_uart_match);
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index 2f78b77f0f81..6b9de83d297e 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -37,29 +37,6 @@
+ 
+ #define TTY_BUFFER_PAGE	(((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
+ 
+-/*
+- * If all tty flip buffers have been processed by flush_to_ldisc() or
+- * dropped by tty_buffer_flush(), check if the linked pty has been closed.
+- * If so, wake the reader/poll to process
+- */
+-static inline void check_other_closed(struct tty_struct *tty)
+-{
+-	unsigned long flags, old;
+-
+-	/* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
+-	for (flags = ACCESS_ONCE(tty->flags);
+-	     test_bit(TTY_OTHER_CLOSED, &flags);
+-	     ) {
+-		old = flags;
+-		__set_bit(TTY_OTHER_DONE, &flags);
+-		flags = cmpxchg(&tty->flags, old, flags);
+-		if (old == flags) {
+-			wake_up_interruptible(&tty->read_wait);
+-			break;
+-		}
+-	}
+-}
+-
+ /**
+  *	tty_buffer_lock_exclusive	-	gain exclusive access to buffer
+  *	tty_buffer_unlock_exclusive	-	release exclusive access
+@@ -251,8 +228,6 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
+ 	if (ld && ld->ops->flush_buffer)
+ 		ld->ops->flush_buffer(tty);
+ 
+-	check_other_closed(tty);
+-
+ 	atomic_dec(&buf->priority);
+ 	mutex_unlock(&buf->lock);
+ }
+@@ -495,10 +470,8 @@ static void flush_to_ldisc(struct work_struct *work)
+ 		smp_rmb();
+ 		count = head->commit - head->read;
+ 		if (!count) {
+-			if (next == NULL) {
+-				check_other_closed(tty);
++			if (next == NULL)
+ 				break;
+-			}
+ 			buf->head = next;
+ 			tty_buffer_free(port, head);
+ 			continue;
+@@ -576,3 +549,8 @@ void tty_buffer_set_lock_subclass(struct tty_port *port)
+ {
+ 	lockdep_set_subclass(&port->buf.lock, TTY_LOCK_SLAVE);
+ }
++
++void tty_buffer_flush_work(struct tty_port *port)
++{
++	flush_work(&port->buf.work);
++}
+diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
+index 7792c0e2d3b6..68323c267672 100644
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -283,7 +283,7 @@ static int usb_probe_interface(struct device *dev)
+ 	struct usb_device *udev = interface_to_usbdev(intf);
+ 	const struct usb_device_id *id;
+ 	int error = -ENODEV;
+-	int lpm_disable_error;
++	int lpm_disable_error = -ENODEV;
+ 
+ 	dev_dbg(dev, "%s\n", __func__);
+ 
+@@ -331,12 +331,14 @@ static int usb_probe_interface(struct device *dev)
+ 	 * setting during probe, that should also be fine.  usb_set_interface()
+ 	 * will attempt to disable LPM, and fail if it can't disable it.
+ 	 */
+-	lpm_disable_error = usb_unlocked_disable_lpm(udev);
+-	if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
+-		dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
+-				__func__, driver->name);
+-		error = lpm_disable_error;
+-		goto err;
++	if (driver->disable_hub_initiated_lpm) {
++		lpm_disable_error = usb_unlocked_disable_lpm(udev);
++		if (lpm_disable_error) {
++			dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
++					__func__, driver->name);
++			error = lpm_disable_error;
++			goto err;
++		}
+ 	}
+ 
+ 	/* Carry out a deferred switch to altsetting 0 */
+@@ -386,7 +388,8 @@ static int usb_unbind_interface(struct device *dev)
+ 	struct usb_interface *intf = to_usb_interface(dev);
+ 	struct usb_host_endpoint *ep, **eps = NULL;
+ 	struct usb_device *udev;
+-	int i, j, error, r, lpm_disable_error;
++	int i, j, error, r;
++	int lpm_disable_error = -ENODEV;
+ 
+ 	intf->condition = USB_INTERFACE_UNBINDING;
+ 
+@@ -394,12 +397,13 @@ static int usb_unbind_interface(struct device *dev)
+ 	udev = interface_to_usbdev(intf);
+ 	error = usb_autoresume_device(udev);
+ 
+-	/* Hub-initiated LPM policy may change, so attempt to disable LPM until
++	/* If hub-initiated LPM policy may change, attempt to disable LPM until
+ 	 * the driver is unbound.  If LPM isn't disabled, that's fine because it
+ 	 * wouldn't be enabled unless all the bound interfaces supported
+ 	 * hub-initiated LPM.
+ 	 */
+-	lpm_disable_error = usb_unlocked_disable_lpm(udev);
++	if (driver->disable_hub_initiated_lpm)
++		lpm_disable_error = usb_unlocked_disable_lpm(udev);
+ 
+ 	/*
+ 	 * Terminate all URBs for this interface unless the driver
+@@ -502,7 +506,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
+ 	struct device *dev;
+ 	struct usb_device *udev;
+ 	int retval = 0;
+-	int lpm_disable_error;
++	int lpm_disable_error = -ENODEV;
+ 
+ 	if (!iface)
+ 		return -ENODEV;
+@@ -519,12 +523,14 @@ int usb_driver_claim_interface(struct usb_driver *driver,
+ 
+ 	iface->condition = USB_INTERFACE_BOUND;
+ 
+-	/* Disable LPM until this driver is bound. */
+-	lpm_disable_error = usb_unlocked_disable_lpm(udev);
+-	if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
+-		dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
+-				__func__, driver->name);
+-		return -ENOMEM;
++	/* See the comment about disabling LPM in usb_probe_interface(). */
++	if (driver->disable_hub_initiated_lpm) {
++		lpm_disable_error = usb_unlocked_disable_lpm(udev);
++		if (lpm_disable_error) {
++			dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
++					__func__, driver->name);
++			return -ENOMEM;
++		}
+ 	}
+ 
+ 	/* Claimed interfaces are initially inactive (suspended) and
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 1c1385e3a824..e47cfcd5640c 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -915,7 +915,7 @@ static void usb_bus_init (struct usb_bus *bus)
+ 	bus->bandwidth_allocated = 0;
+ 	bus->bandwidth_int_reqs  = 0;
+ 	bus->bandwidth_isoc_reqs = 0;
+-	mutex_init(&bus->usb_address0_mutex);
++	mutex_init(&bus->devnum_next_mutex);
+ 
+ 	INIT_LIST_HEAD (&bus->bus_list);
+ }
+@@ -2446,6 +2446,14 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
+ 		return NULL;
+ 	}
+ 	if (primary_hcd == NULL) {
++		hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex),
++				GFP_KERNEL);
++		if (!hcd->address0_mutex) {
++			kfree(hcd);
++			dev_dbg(dev, "hcd address0 mutex alloc failed\n");
++			return NULL;
++		}
++		mutex_init(hcd->address0_mutex);
+ 		hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
+ 				GFP_KERNEL);
+ 		if (!hcd->bandwidth_mutex) {
+@@ -2457,6 +2465,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
+ 		dev_set_drvdata(dev, hcd);
+ 	} else {
+ 		mutex_lock(&usb_port_peer_mutex);
++		hcd->address0_mutex = primary_hcd->address0_mutex;
+ 		hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex;
+ 		hcd->primary_hcd = primary_hcd;
+ 		primary_hcd->primary_hcd = primary_hcd;
+@@ -2523,8 +2532,10 @@ static void hcd_release(struct kref *kref)
+ 	struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
+ 
+ 	mutex_lock(&usb_port_peer_mutex);
+-	if (usb_hcd_is_primary_hcd(hcd))
++	if (usb_hcd_is_primary_hcd(hcd)) {
++		kfree(hcd->address0_mutex);
+ 		kfree(hcd->bandwidth_mutex);
++	}
+ 	if (hcd->shared_hcd) {
+ 		struct usb_hcd *peer = hcd->shared_hcd;
+ 
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index db6985f04054..563d84eb484d 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2066,7 +2066,7 @@ static void choose_devnum(struct usb_device *udev)
+ 	struct usb_bus	*bus = udev->bus;
+ 
+ 	/* be safe when more hub events are proceed in parallel */
+-	mutex_lock(&bus->usb_address0_mutex);
++	mutex_lock(&bus->devnum_next_mutex);
+ 	if (udev->wusb) {
+ 		devnum = udev->portnum + 1;
+ 		BUG_ON(test_bit(devnum, bus->devmap.devicemap));
+@@ -2084,7 +2084,7 @@ static void choose_devnum(struct usb_device *udev)
+ 		set_bit(devnum, bus->devmap.devicemap);
+ 		udev->devnum = devnum;
+ 	}
+-	mutex_unlock(&bus->usb_address0_mutex);
++	mutex_unlock(&bus->devnum_next_mutex);
+ }
+ 
+ static void release_devnum(struct usb_device *udev)
+@@ -4257,7 +4257,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 	if (oldspeed == USB_SPEED_LOW)
+ 		delay = HUB_LONG_RESET_TIME;
+ 
+-	mutex_lock(&hdev->bus->usb_address0_mutex);
++	mutex_lock(hcd->address0_mutex);
+ 
+ 	/* Reset the device; full speed may morph to high speed */
+ 	/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
+@@ -4539,7 +4539,7 @@ fail:
+ 		hub_port_disable(hub, port1, 0);
+ 		update_devnum(udev, devnum);	/* for disconnect processing */
+ 	}
+-	mutex_unlock(&hdev->bus->usb_address0_mutex);
++	mutex_unlock(hcd->address0_mutex);
+ 	return retval;
+ }
+ 
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index 15c307155037..917d99ccf7e4 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -279,9 +279,8 @@ struct fsg_common {
+ 	int			cmnd_size;
+ 	u8			cmnd[MAX_COMMAND_SIZE];
+ 
+-	unsigned int		nluns;
+ 	unsigned int		lun;
+-	struct fsg_lun		**luns;
++	struct fsg_lun		*luns[FSG_MAX_LUNS];
+ 	struct fsg_lun		*curlun;
+ 
+ 	unsigned int		bulk_out_maxpacket;
+@@ -490,6 +489,16 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
+ 	spin_unlock(&common->lock);
+ }
+ 
++static int _fsg_common_get_max_lun(struct fsg_common *common)
++{
++	int i = ARRAY_SIZE(common->luns) - 1;
++
++	while (i >= 0 && !common->luns[i])
++		--i;
++
++	return i;
++}
++
+ static int fsg_setup(struct usb_function *f,
+ 		     const struct usb_ctrlrequest *ctrl)
+ {
+@@ -533,7 +542,7 @@ static int fsg_setup(struct usb_function *f,
+ 				w_length != 1)
+ 			return -EDOM;
+ 		VDBG(fsg, "get max LUN\n");
+-		*(u8 *)req->buf = fsg->common->nluns - 1;
++		*(u8 *)req->buf = _fsg_common_get_max_lun(fsg->common);
+ 
+ 		/* Respond with data/status */
+ 		req->length = min((u16)1, w_length);
+@@ -2131,8 +2140,9 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+ 	}
+ 
+ 	/* Is the CBW meaningful? */
+-	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
+-			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
++	if (cbw->Lun >= ARRAY_SIZE(common->luns) ||
++	    cbw->Flags & ~US_BULK_FLAG_IN || cbw->Length <= 0 ||
++	    cbw->Length > MAX_COMMAND_SIZE) {
+ 		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
+ 				"cmdlen %u\n",
+ 				cbw->Lun, cbw->Flags, cbw->Length);
+@@ -2159,7 +2169,7 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+ 	if (common->data_size == 0)
+ 		common->data_dir = DATA_DIR_NONE;
+ 	common->lun = cbw->Lun;
+-	if (common->lun < common->nluns)
++	if (common->lun < ARRAY_SIZE(common->luns))
+ 		common->curlun = common->luns[common->lun];
+ 	else
+ 		common->curlun = NULL;
+@@ -2307,7 +2317,7 @@ reset:
+ 	}
+ 
+ 	common->running = 1;
+-	for (i = 0; i < common->nluns; ++i)
++	for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
+ 		if (common->luns[i])
+ 			common->luns[i]->unit_attention_data =
+ 				SS_RESET_OCCURRED;
+@@ -2409,7 +2419,7 @@ static void handle_exception(struct fsg_common *common)
+ 	if (old_state == FSG_STATE_ABORT_BULK_OUT)
+ 		common->state = FSG_STATE_STATUS_PHASE;
+ 	else {
+-		for (i = 0; i < common->nluns; ++i) {
++		for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
+ 			curlun = common->luns[i];
+ 			if (!curlun)
+ 				continue;
+@@ -2453,7 +2463,7 @@ static void handle_exception(struct fsg_common *common)
+ 		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
+ 		 * CONFIG_CHANGE cases.
+ 		 */
+-		/* for (i = 0; i < common->nluns; ++i) */
++		/* for (i = 0; i < common->ARRAY_SIZE(common->luns); ++i) */
+ 		/*	if (common->luns[i]) */
+ 		/*		common->luns[i]->unit_attention_data = */
+ 		/*			SS_RESET_OCCURRED;  */
+@@ -2552,12 +2562,11 @@ static int fsg_main_thread(void *common_)
+ 
+ 	if (!common->ops || !common->ops->thread_exits
+ 	 || common->ops->thread_exits(common) < 0) {
+-		struct fsg_lun **curlun_it = common->luns;
+-		unsigned i = common->nluns;
++		int i;
+ 
+ 		down_write(&common->filesem);
+-		for (; i--; ++curlun_it) {
+-			struct fsg_lun *curlun = *curlun_it;
++		for (i = 0; i < ARRAY_SIZE(common->luns); --i) {
++			struct fsg_lun *curlun = common->luns[i];
+ 			if (!curlun || !fsg_lun_is_open(curlun))
+ 				continue;
+ 
+@@ -2676,6 +2685,7 @@ static struct fsg_common *fsg_common_setup(struct fsg_common *common)
+ 	init_completion(&common->thread_notifier);
+ 	init_waitqueue_head(&common->fsg_wait);
+ 	common->state = FSG_STATE_TERMINATED;
++	memset(common->luns, 0, sizeof(common->luns));
+ 
+ 	return common;
+ }
+@@ -2742,9 +2752,9 @@ error_release:
+ }
+ EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers);
+ 
+-void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs)
++void fsg_common_remove_lun(struct fsg_lun *lun)
+ {
+-	if (sysfs)
++	if (device_is_registered(&lun->dev))
+ 		device_unregister(&lun->dev);
+ 	fsg_lun_close(lun);
+ 	kfree(lun);
+@@ -2757,7 +2767,7 @@ static void _fsg_common_remove_luns(struct fsg_common *common, int n)
+ 
+ 	for (i = 0; i < n; ++i)
+ 		if (common->luns[i]) {
+-			fsg_common_remove_lun(common->luns[i], common->sysfs);
++			fsg_common_remove_lun(common->luns[i]);
+ 			common->luns[i] = NULL;
+ 		}
+ }
+@@ -2765,40 +2775,8 @@ EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
+ 
+ void fsg_common_remove_luns(struct fsg_common *common)
+ {
+-	_fsg_common_remove_luns(common, common->nluns);
+-}
+-
+-void fsg_common_free_luns(struct fsg_common *common)
+-{
+-	fsg_common_remove_luns(common);
+-	kfree(common->luns);
+-	common->luns = NULL;
++	_fsg_common_remove_luns(common, ARRAY_SIZE(common->luns));
+ }
+-EXPORT_SYMBOL_GPL(fsg_common_free_luns);
+-
+-int fsg_common_set_nluns(struct fsg_common *common, int nluns)
+-{
+-	struct fsg_lun **curlun;
+-
+-	/* Find out how many LUNs there should be */
+-	if (nluns < 1 || nluns > FSG_MAX_LUNS) {
+-		pr_err("invalid number of LUNs: %u\n", nluns);
+-		return -EINVAL;
+-	}
+-
+-	curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL);
+-	if (unlikely(!curlun))
+-		return -ENOMEM;
+-
+-	if (common->luns)
+-		fsg_common_free_luns(common);
+-
+-	common->luns = curlun;
+-	common->nluns = nluns;
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
+ 
+ void fsg_common_set_ops(struct fsg_common *common,
+ 			const struct fsg_operations *ops)
+@@ -2880,7 +2858,7 @@ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
+ 	char *pathbuf, *p;
+ 	int rc = -ENOMEM;
+ 
+-	if (!common->nluns || !common->luns)
++	if (id >= ARRAY_SIZE(common->luns))
+ 		return -ENODEV;
+ 
+ 	if (common->luns[id])
+@@ -2949,7 +2927,7 @@ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
+ 	return 0;
+ 
+ error_lun:
+-	if (common->sysfs)
++	if (device_is_registered(&lun->dev))
+ 		device_unregister(&lun->dev);
+ 	fsg_lun_close(lun);
+ 	common->luns[id] = NULL;
+@@ -2964,14 +2942,16 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg)
+ 	char buf[8]; /* enough for 100000000 different numbers, decimal */
+ 	int i, rc;
+ 
+-	for (i = 0; i < common->nluns; ++i) {
++	fsg_common_remove_luns(common);
++
++	for (i = 0; i < cfg->nluns; ++i) {
+ 		snprintf(buf, sizeof(buf), "lun%d", i);
+ 		rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL);
+ 		if (rc)
+ 			goto fail;
+ 	}
+ 
+-	pr_info("Number of LUNs=%d\n", common->nluns);
++	pr_info("Number of LUNs=%d\n", cfg->nluns);
+ 
+ 	return 0;
+ 
+@@ -2998,51 +2978,26 @@ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
+ }
+ EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
+ 
+-int fsg_common_run_thread(struct fsg_common *common)
+-{
+-	common->state = FSG_STATE_IDLE;
+-	/* Tell the thread to start working */
+-	common->thread_task =
+-		kthread_create(fsg_main_thread, common, "file-storage");
+-	if (IS_ERR(common->thread_task)) {
+-		common->state = FSG_STATE_TERMINATED;
+-		return PTR_ERR(common->thread_task);
+-	}
+-
+-	DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
+-
+-	wake_up_process(common->thread_task);
+-
+-	return 0;
+-}
+-EXPORT_SYMBOL_GPL(fsg_common_run_thread);
+-
+ static void fsg_common_release(struct kref *ref)
+ {
+ 	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
++	int i;
+ 
+ 	/* If the thread isn't already dead, tell it to exit now */
+ 	if (common->state != FSG_STATE_TERMINATED) {
+ 		raise_exception(common, FSG_STATE_EXIT);
+ 		wait_for_completion(&common->thread_notifier);
++		common->thread_task = NULL;
+ 	}
+ 
+-	if (likely(common->luns)) {
+-		struct fsg_lun **lun_it = common->luns;
+-		unsigned i = common->nluns;
+-
+-		/* In error recovery common->nluns may be zero. */
+-		for (; i; --i, ++lun_it) {
+-			struct fsg_lun *lun = *lun_it;
+-			if (!lun)
+-				continue;
+-			fsg_lun_close(lun);
+-			if (common->sysfs)
+-				device_unregister(&lun->dev);
+-			kfree(lun);
+-		}
+-
+-		kfree(common->luns);
++	for (i = 0; i < ARRAY_SIZE(common->luns); ++i) {
++		struct fsg_lun *lun = common->luns[i];
++		if (!lun)
++			continue;
++		fsg_lun_close(lun);
++		if (device_is_registered(&lun->dev))
++			device_unregister(&lun->dev);
++		kfree(lun);
+ 	}
+ 
+ 	_fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
+@@ -3056,6 +3011,7 @@ static void fsg_common_release(struct kref *ref)
+ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+ {
+ 	struct fsg_dev		*fsg = fsg_from_func(f);
++	struct fsg_common	*common = fsg->common;
+ 	struct usb_gadget	*gadget = c->cdev->gadget;
+ 	int			i;
+ 	struct usb_ep		*ep;
+@@ -3063,6 +3019,13 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+ 	int			ret;
+ 	struct fsg_opts		*opts;
+ 
++	/* Don't allow to bind if we don't have at least one LUN */
++	ret = _fsg_common_get_max_lun(common);
++	if (ret < 0) {
++		pr_err("There should be at least one LUN.\n");
++		return -EINVAL;
++	}
++
+ 	opts = fsg_opts_from_func_inst(f->fi);
+ 	if (!opts->no_configfs) {
+ 		ret = fsg_common_set_cdev(fsg->common, c->cdev,
+@@ -3070,9 +3033,21 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+ 		if (ret)
+ 			return ret;
+ 		fsg_common_set_inquiry_string(fsg->common, NULL, NULL);
+-		ret = fsg_common_run_thread(fsg->common);
+-		if (ret)
++	}
++
++	if (!common->thread_task) {
++		common->state = FSG_STATE_IDLE;
++		common->thread_task =
++			kthread_create(fsg_main_thread, common, "file-storage");
++		if (IS_ERR(common->thread_task)) {
++			int ret = PTR_ERR(common->thread_task);
++			common->thread_task = NULL;
++			common->state = FSG_STATE_TERMINATED;
+ 			return ret;
++		}
++		DBG(common, "I/O thread pid: %d\n",
++		    task_pid_nr(common->thread_task));
++		wake_up_process(common->thread_task);
+ 	}
+ 
+ 	fsg->gadget = gadget;
+@@ -3355,7 +3330,7 @@ static void fsg_lun_drop(struct config_group *group, struct config_item *item)
+ 		unregister_gadget_item(gadget);
+ 	}
+ 
+-	fsg_common_remove_lun(lun_opts->lun, fsg_opts->common->sysfs);
++	fsg_common_remove_lun(lun_opts->lun);
+ 	fsg_opts->common->luns[lun_opts->lun_id] = NULL;
+ 	lun_opts->lun_id = 0;
+ 	mutex_unlock(&fsg_opts->lock);
+@@ -3509,14 +3484,11 @@ static struct usb_function_instance *fsg_alloc_inst(void)
+ 		rc = PTR_ERR(opts->common);
+ 		goto release_opts;
+ 	}
+-	rc = fsg_common_set_nluns(opts->common, FSG_MAX_LUNS);
+-	if (rc)
+-		goto release_opts;
+ 
+ 	rc = fsg_common_set_num_buffers(opts->common,
+ 					CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS);
+ 	if (rc)
+-		goto release_luns;
++		goto release_opts;
+ 
+ 	pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
+ 
+@@ -3524,6 +3496,9 @@ static struct usb_function_instance *fsg_alloc_inst(void)
+ 	config.removable = true;
+ 	rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
+ 			(const char **)&opts->func_inst.group.cg_item.ci_name);
++	if (rc)
++		goto release_buffers;
++
+ 	opts->lun0.lun = opts->common->luns[0];
+ 	opts->lun0.lun_id = 0;
+ 	config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type);
+@@ -3534,8 +3509,8 @@ static struct usb_function_instance *fsg_alloc_inst(void)
+ 
+ 	return &opts->func_inst;
+ 
+-release_luns:
+-	kfree(opts->common->luns);
++release_buffers:
++	fsg_common_free_buffers(opts->common);
+ release_opts:
+ 	kfree(opts);
+ 	return ERR_PTR(rc);
+@@ -3561,23 +3536,12 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
+ 	struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
+ 	struct fsg_common *common = opts->common;
+ 	struct fsg_dev *fsg;
+-	unsigned nluns, i;
+ 
+ 	fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
+ 	if (unlikely(!fsg))
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	mutex_lock(&opts->lock);
+-	if (!opts->refcnt) {
+-		for (nluns = i = 0; i < FSG_MAX_LUNS; ++i)
+-			if (common->luns[i])
+-				nluns = i + 1;
+-		if (!nluns)
+-			pr_warn("No LUNS defined, continuing anyway\n");
+-		else
+-			common->nluns = nluns;
+-		pr_info("Number of LUNs=%u\n", common->nluns);
+-	}
+ 	opts->refcnt++;
+ 	mutex_unlock(&opts->lock);
+ 
+diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
+index b4866fcef30b..b6a9918eaefb 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.h
++++ b/drivers/usb/gadget/function/f_mass_storage.h
+@@ -137,14 +137,10 @@ void fsg_common_free_buffers(struct fsg_common *common);
+ int fsg_common_set_cdev(struct fsg_common *common,
+ 			struct usb_composite_dev *cdev, bool can_stall);
+ 
+-void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs);
++void fsg_common_remove_lun(struct fsg_lun *lun);
+ 
+ void fsg_common_remove_luns(struct fsg_common *common);
+ 
+-void fsg_common_free_luns(struct fsg_common *common);
+-
+-int fsg_common_set_nluns(struct fsg_common *common, int nluns);
+-
+ void fsg_common_set_ops(struct fsg_common *common,
+ 			const struct fsg_operations *ops);
+ 
+@@ -157,8 +153,6 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg);
+ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
+ 				   const char *pn);
+ 
+-int fsg_common_run_thread(struct fsg_common *common);
+-
+ void fsg_config_from_params(struct fsg_config *cfg,
+ 			    const struct fsg_module_parameters *params,
+ 			    unsigned int fsg_num_buffers);
+diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
+index 1194b09ae746..bc97ec645e68 100644
+--- a/drivers/usb/gadget/legacy/acm_ms.c
++++ b/drivers/usb/gadget/legacy/acm_ms.c
+@@ -147,10 +147,6 @@ static int acm_ms_do_config(struct usb_configuration *c)
+ 	if (status < 0)
+ 		goto put_msg;
+ 
+-	status = fsg_common_run_thread(opts->common);
+-	if (status)
+-		goto remove_acm;
+-
+ 	status = usb_add_function(c, f_msg);
+ 	if (status)
+ 		goto remove_acm;
+@@ -200,10 +196,6 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
+ 	if (status)
+ 		goto fail;
+ 
+-	status = fsg_common_set_nluns(opts->common, config.nluns);
+-	if (status)
+-		goto fail_set_nluns;
+-
+ 	status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
+ 	if (status)
+ 		goto fail_set_cdev;
+@@ -239,8 +231,6 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
+ fail_string_ids:
+ 	fsg_common_remove_luns(opts->common);
+ fail_set_cdev:
+-	fsg_common_free_luns(opts->common);
+-fail_set_nluns:
+ 	fsg_common_free_buffers(opts->common);
+ fail:
+ 	usb_put_function_instance(fi_msg);
+diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
+index e7bfb081f111..3eb1b9fad5a0 100644
+--- a/drivers/usb/gadget/legacy/mass_storage.c
++++ b/drivers/usb/gadget/legacy/mass_storage.c
+@@ -146,10 +146,6 @@ static int msg_do_config(struct usb_configuration *c)
+ 	if (IS_ERR(f_msg))
+ 		return PTR_ERR(f_msg);
+ 
+-	ret = fsg_common_run_thread(opts->common);
+-	if (ret)
+-		goto put_func;
+-
+ 	ret = usb_add_function(c, f_msg);
+ 	if (ret)
+ 		goto put_func;
+@@ -191,10 +187,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
+ 	if (status)
+ 		goto fail;
+ 
+-	status = fsg_common_set_nluns(opts->common, config.nluns);
+-	if (status)
+-		goto fail_set_nluns;
+-
+ 	fsg_common_set_ops(opts->common, &ops);
+ 
+ 	status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
+@@ -227,8 +219,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
+ fail_string_ids:
+ 	fsg_common_remove_luns(opts->common);
+ fail_set_cdev:
+-	fsg_common_free_luns(opts->common);
+-fail_set_nluns:
+ 	fsg_common_free_buffers(opts->common);
+ fail:
+ 	usb_put_function_instance(fi_msg);
+diff --git a/drivers/usb/gadget/legacy/multi.c b/drivers/usb/gadget/legacy/multi.c
+index b21b51f0c9fa..5b2cfdda24a8 100644
+--- a/drivers/usb/gadget/legacy/multi.c
++++ b/drivers/usb/gadget/legacy/multi.c
+@@ -151,7 +151,6 @@ static struct usb_function *f_msg_rndis;
+ 
+ static int rndis_do_config(struct usb_configuration *c)
+ {
+-	struct fsg_opts *fsg_opts;
+ 	int ret;
+ 
+ 	if (gadget_is_otg(c->cdev->gadget)) {
+@@ -183,11 +182,6 @@ static int rndis_do_config(struct usb_configuration *c)
+ 		goto err_fsg;
+ 	}
+ 
+-	fsg_opts = fsg_opts_from_func_inst(fi_msg);
+-	ret = fsg_common_run_thread(fsg_opts->common);
+-	if (ret)
+-		goto err_run;
+-
+ 	ret = usb_add_function(c, f_msg_rndis);
+ 	if (ret)
+ 		goto err_run;
+@@ -239,7 +233,6 @@ static struct usb_function *f_msg_multi;
+ 
+ static int cdc_do_config(struct usb_configuration *c)
+ {
+-	struct fsg_opts *fsg_opts;
+ 	int ret;
+ 
+ 	if (gadget_is_otg(c->cdev->gadget)) {
+@@ -272,11 +265,6 @@ static int cdc_do_config(struct usb_configuration *c)
+ 		goto err_fsg;
+ 	}
+ 
+-	fsg_opts = fsg_opts_from_func_inst(fi_msg);
+-	ret = fsg_common_run_thread(fsg_opts->common);
+-	if (ret)
+-		goto err_run;
+-
+ 	ret = usb_add_function(c, f_msg_multi);
+ 	if (ret)
+ 		goto err_run;
+@@ -407,10 +395,6 @@ static int __ref multi_bind(struct usb_composite_dev *cdev)
+ 	if (status)
+ 		goto fail2;
+ 
+-	status = fsg_common_set_nluns(fsg_opts->common, config.nluns);
+-	if (status)
+-		goto fail_set_nluns;
+-
+ 	status = fsg_common_set_cdev(fsg_opts->common, cdev, config.can_stall);
+ 	if (status)
+ 		goto fail_set_cdev;
+@@ -448,8 +432,6 @@ static int __ref multi_bind(struct usb_composite_dev *cdev)
+ fail_string_ids:
+ 	fsg_common_remove_luns(fsg_opts->common);
+ fail_set_cdev:
+-	fsg_common_free_luns(fsg_opts->common);
+-fail_set_nluns:
+ 	fsg_common_free_buffers(fsg_opts->common);
+ fail2:
+ 	usb_put_function_instance(fi_msg);
+diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
+index 0bbafe795a72..bbddc44ce8bc 100644
+--- a/drivers/usb/misc/usbtest.c
++++ b/drivers/usb/misc/usbtest.c
+@@ -303,11 +303,20 @@ static unsigned mod_pattern;
+ module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
+ 
+-static inline void simple_fill_buf(struct urb *urb)
++static unsigned get_maxpacket(struct usb_device *udev, int pipe)
++{
++	struct usb_host_endpoint	*ep;
++
++	ep = usb_pipe_endpoint(udev, pipe);
++	return le16_to_cpup(&ep->desc.wMaxPacketSize);
++}
++
++static void simple_fill_buf(struct urb *urb)
+ {
+ 	unsigned	i;
+ 	u8		*buf = urb->transfer_buffer;
+ 	unsigned	len = urb->transfer_buffer_length;
++	unsigned	maxpacket;
+ 
+ 	switch (pattern) {
+ 	default:
+@@ -316,8 +325,9 @@ static inline void simple_fill_buf(struct urb *urb)
+ 		memset(buf, 0, len);
+ 		break;
+ 	case 1:			/* mod63 */
++		maxpacket = get_maxpacket(urb->dev, urb->pipe);
+ 		for (i = 0; i < len; i++)
+-			*buf++ = (u8) (i % 63);
++			*buf++ = (u8) ((i % maxpacket) % 63);
+ 		break;
+ 	}
+ }
+@@ -349,6 +359,7 @@ static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
+ 	u8		expected;
+ 	u8		*buf = urb->transfer_buffer;
+ 	unsigned	len = urb->actual_length;
++	unsigned	maxpacket = get_maxpacket(urb->dev, urb->pipe);
+ 
+ 	int ret = check_guard_bytes(tdev, urb);
+ 	if (ret)
+@@ -366,7 +377,7 @@ static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
+ 		 * with set_interface or set_config.
+ 		 */
+ 		case 1:			/* mod63 */
+-			expected = i % 63;
++			expected = (i % maxpacket) % 63;
+ 			break;
+ 		/* always fail unsupported patterns */
+ 		default:
+@@ -478,11 +489,14 @@ static void free_sglist(struct scatterlist *sg, int nents)
+ }
+ 
+ static struct scatterlist *
+-alloc_sglist(int nents, int max, int vary)
++alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
+ {
+ 	struct scatterlist	*sg;
++	unsigned int		n_size = 0;
+ 	unsigned		i;
+ 	unsigned		size = max;
++	unsigned		maxpacket =
++		get_maxpacket(interface_to_usbdev(dev->intf), pipe);
+ 
+ 	if (max == 0)
+ 		return NULL;
+@@ -511,7 +525,8 @@ alloc_sglist(int nents, int max, int vary)
+ 			break;
+ 		case 1:
+ 			for (j = 0; j < size; j++)
+-				*buf++ = (u8) (j % 63);
++				*buf++ = (u8) (((j + n_size) % maxpacket) % 63);
++			n_size += size;
+ 			break;
+ 		}
+ 
+@@ -2175,7 +2190,8 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
+ 			"TEST 5:  write %d sglists %d entries of %d bytes\n",
+ 				param->iterations,
+ 				param->sglen, param->length);
+-		sg = alloc_sglist(param->sglen, param->length, 0);
++		sg = alloc_sglist(param->sglen, param->length,
++				0, dev, dev->out_pipe);
+ 		if (!sg) {
+ 			retval = -ENOMEM;
+ 			break;
+@@ -2193,7 +2209,8 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
+ 			"TEST 6:  read %d sglists %d entries of %d bytes\n",
+ 				param->iterations,
+ 				param->sglen, param->length);
+-		sg = alloc_sglist(param->sglen, param->length, 0);
++		sg = alloc_sglist(param->sglen, param->length,
++				0, dev, dev->in_pipe);
+ 		if (!sg) {
+ 			retval = -ENOMEM;
+ 			break;
+@@ -2210,7 +2227,8 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
+ 			"TEST 7:  write/%d %d sglists %d entries 0..%d bytes\n",
+ 				param->vary, param->iterations,
+ 				param->sglen, param->length);
+-		sg = alloc_sglist(param->sglen, param->length, param->vary);
++		sg = alloc_sglist(param->sglen, param->length,
++				param->vary, dev, dev->out_pipe);
+ 		if (!sg) {
+ 			retval = -ENOMEM;
+ 			break;
+@@ -2227,7 +2245,8 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
+ 			"TEST 8:  read/%d %d sglists %d entries 0..%d bytes\n",
+ 				param->vary, param->iterations,
+ 				param->sglen, param->length);
+-		sg = alloc_sglist(param->sglen, param->length, param->vary);
++		sg = alloc_sglist(param->sglen, param->length,
++				param->vary, dev, dev->in_pipe);
+ 		if (!sg) {
+ 			retval = -ENOMEM;
+ 			break;
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index a2b43a6e7fa7..f49e859ac5ce 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -43,8 +43,8 @@ static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int);
+ static int cp210x_tiocmset_port(struct usb_serial_port *port,
+ 		unsigned int, unsigned int);
+ static void cp210x_break_ctl(struct tty_struct *, int);
+-static int cp210x_startup(struct usb_serial *);
+-static void cp210x_release(struct usb_serial *);
++static int cp210x_port_probe(struct usb_serial_port *);
++static int cp210x_port_remove(struct usb_serial_port *);
+ static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
+ 
+ static const struct usb_device_id id_table[] = {
+@@ -206,7 +206,7 @@ static const struct usb_device_id id_table[] = {
+ 
+ MODULE_DEVICE_TABLE(usb, id_table);
+ 
+-struct cp210x_serial_private {
++struct cp210x_port_private {
+ 	__u8			bInterfaceNumber;
+ };
+ 
+@@ -225,8 +225,8 @@ static struct usb_serial_driver cp210x_device = {
+ 	.set_termios		= cp210x_set_termios,
+ 	.tiocmget		= cp210x_tiocmget,
+ 	.tiocmset		= cp210x_tiocmset,
+-	.attach			= cp210x_startup,
+-	.release		= cp210x_release,
++	.port_probe		= cp210x_port_probe,
++	.port_remove		= cp210x_port_remove,
+ 	.dtr_rts		= cp210x_dtr_rts
+ };
+ 
+@@ -320,7 +320,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
+ 		unsigned int *data, int size)
+ {
+ 	struct usb_serial *serial = port->serial;
+-	struct cp210x_serial_private *spriv = usb_get_serial_data(serial);
++	struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ 	__le32 *buf;
+ 	int result, i, length;
+ 
+@@ -334,7 +334,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
+ 	/* Issue the request, attempting to read 'size' bytes */
+ 	result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ 				request, REQTYPE_INTERFACE_TO_HOST, 0x0000,
+-				spriv->bInterfaceNumber, buf, size,
++				port_priv->bInterfaceNumber, buf, size,
+ 				USB_CTRL_GET_TIMEOUT);
+ 
+ 	/* Convert data into an array of integers */
+@@ -365,7 +365,7 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
+ 		unsigned int *data, int size)
+ {
+ 	struct usb_serial *serial = port->serial;
+-	struct cp210x_serial_private *spriv = usb_get_serial_data(serial);
++	struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
+ 	__le32 *buf;
+ 	int result, i, length;
+ 
+@@ -384,13 +384,13 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
+ 		result = usb_control_msg(serial->dev,
+ 				usb_sndctrlpipe(serial->dev, 0),
+ 				request, REQTYPE_HOST_TO_INTERFACE, 0x0000,
+-				spriv->bInterfaceNumber, buf, size,
++				port_priv->bInterfaceNumber, buf, size,
+ 				USB_CTRL_SET_TIMEOUT);
+ 	} else {
+ 		result = usb_control_msg(serial->dev,
+ 				usb_sndctrlpipe(serial->dev, 0),
+ 				request, REQTYPE_HOST_TO_INTERFACE, data[0],
+-				spriv->bInterfaceNumber, NULL, 0,
++				port_priv->bInterfaceNumber, NULL, 0,
+ 				USB_CTRL_SET_TIMEOUT);
+ 	}
+ 
+@@ -784,7 +784,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
+ 		} else {
+ 			modem_ctl[0] &= ~0x7B;
+ 			modem_ctl[0] |= 0x01;
+-			modem_ctl[1] |= 0x40;
++			modem_ctl[1] = 0x40;
+ 			dev_dbg(dev, "%s - flow control = NONE\n", __func__);
+ 		}
+ 
+@@ -872,29 +872,32 @@ static void cp210x_break_ctl(struct tty_struct *tty, int break_state)
+ 	cp210x_set_config(port, CP210X_SET_BREAK, &state, 2);
+ }
+ 
+-static int cp210x_startup(struct usb_serial *serial)
++static int cp210x_port_probe(struct usb_serial_port *port)
+ {
++	struct usb_serial *serial = port->serial;
+ 	struct usb_host_interface *cur_altsetting;
+-	struct cp210x_serial_private *spriv;
++	struct cp210x_port_private *port_priv;
+ 
+-	spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
+-	if (!spriv)
++	port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
++	if (!port_priv)
+ 		return -ENOMEM;
+ 
+ 	cur_altsetting = serial->interface->cur_altsetting;
+-	spriv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
++	port_priv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
+ 
+-	usb_set_serial_data(serial, spriv);
++	usb_set_serial_port_data(port, port_priv);
+ 
+ 	return 0;
+ }
+ 
+-static void cp210x_release(struct usb_serial *serial)
++static int cp210x_port_remove(struct usb_serial_port *port)
+ {
+-	struct cp210x_serial_private *spriv;
++	struct cp210x_port_private *port_priv;
++
++	port_priv = usb_get_serial_port_data(port);
++	kfree(port_priv);
+ 
+-	spriv = usb_get_serial_data(serial);
+-	kfree(spriv);
++	return 0;
+ }
+ 
+ module_usb_serial_driver(serial_drivers, id_table);
+diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
+index c0866971db2b..1947ea0e0988 100644
+--- a/drivers/usb/serial/io_edgeport.c
++++ b/drivers/usb/serial/io_edgeport.c
+@@ -2856,14 +2856,16 @@ static int edge_startup(struct usb_serial *serial)
+ 				/* not set up yet, so do it now */
+ 				edge_serial->interrupt_read_urb =
+ 						usb_alloc_urb(0, GFP_KERNEL);
+-				if (!edge_serial->interrupt_read_urb)
+-					return -ENOMEM;
++				if (!edge_serial->interrupt_read_urb) {
++					response = -ENOMEM;
++					break;
++				}
+ 
+ 				edge_serial->interrupt_in_buffer =
+ 					kmalloc(buffer_size, GFP_KERNEL);
+ 				if (!edge_serial->interrupt_in_buffer) {
+-					usb_free_urb(edge_serial->interrupt_read_urb);
+-					return -ENOMEM;
++					response = -ENOMEM;
++					break;
+ 				}
+ 				edge_serial->interrupt_in_endpoint =
+ 						endpoint->bEndpointAddress;
+@@ -2891,14 +2893,16 @@ static int edge_startup(struct usb_serial *serial)
+ 				/* not set up yet, so do it now */
+ 				edge_serial->read_urb =
+ 						usb_alloc_urb(0, GFP_KERNEL);
+-				if (!edge_serial->read_urb)
+-					return -ENOMEM;
++				if (!edge_serial->read_urb) {
++					response = -ENOMEM;
++					break;
++				}
+ 
+ 				edge_serial->bulk_in_buffer =
+ 					kmalloc(buffer_size, GFP_KERNEL);
+ 				if (!edge_serial->bulk_in_buffer) {
+-					usb_free_urb(edge_serial->read_urb);
+-					return -ENOMEM;
++					response = -ENOMEM;
++					break;
+ 				}
+ 				edge_serial->bulk_in_endpoint =
+ 						endpoint->bEndpointAddress;
+@@ -2924,9 +2928,22 @@ static int edge_startup(struct usb_serial *serial)
+ 			}
+ 		}
+ 
+-		if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
+-			dev_err(ddev, "Error - the proper endpoints were not found!\n");
+-			return -ENODEV;
++		if (response || !interrupt_in_found || !bulk_in_found ||
++							!bulk_out_found) {
++			if (!response) {
++				dev_err(ddev, "expected endpoints not found\n");
++				response = -ENODEV;
++			}
++
++			usb_free_urb(edge_serial->interrupt_read_urb);
++			kfree(edge_serial->interrupt_in_buffer);
++
++			usb_free_urb(edge_serial->read_urb);
++			kfree(edge_serial->bulk_in_buffer);
++
++			kfree(edge_serial);
++
++			return response;
+ 		}
+ 
+ 		/* start interrupt read for this edgeport this interrupt will
+@@ -2949,16 +2966,9 @@ static void edge_disconnect(struct usb_serial *serial)
+ {
+ 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+ 
+-	/* stop reads and writes on all ports */
+-	/* free up our endpoint stuff */
+ 	if (edge_serial->is_epic) {
+ 		usb_kill_urb(edge_serial->interrupt_read_urb);
+-		usb_free_urb(edge_serial->interrupt_read_urb);
+-		kfree(edge_serial->interrupt_in_buffer);
+-
+ 		usb_kill_urb(edge_serial->read_urb);
+-		usb_free_urb(edge_serial->read_urb);
+-		kfree(edge_serial->bulk_in_buffer);
+ 	}
+ }
+ 
+@@ -2971,6 +2981,16 @@ static void edge_release(struct usb_serial *serial)
+ {
+ 	struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+ 
++	if (edge_serial->is_epic) {
++		usb_kill_urb(edge_serial->interrupt_read_urb);
++		usb_free_urb(edge_serial->interrupt_read_urb);
++		kfree(edge_serial->interrupt_in_buffer);
++
++		usb_kill_urb(edge_serial->read_urb);
++		usb_free_urb(edge_serial->read_urb);
++		kfree(edge_serial->bulk_in_buffer);
++	}
++
+ 	kfree(edge_serial);
+ }
+ 
+diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
+index e07b15ed5814..7faa901ee47f 100644
+--- a/drivers/usb/serial/keyspan.c
++++ b/drivers/usb/serial/keyspan.c
+@@ -2376,6 +2376,10 @@ static void keyspan_release(struct usb_serial *serial)
+ 
+ 	s_priv = usb_get_serial_data(serial);
+ 
++	/* Make sure to unlink the URBs submitted in attach. */
++	usb_kill_urb(s_priv->instat_urb);
++	usb_kill_urb(s_priv->indat_urb);
++
+ 	usb_free_urb(s_priv->instat_urb);
+ 	usb_free_urb(s_priv->indat_urb);
+ 	usb_free_urb(s_priv->glocont_urb);
+diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
+index 460a40669967..d029b2fc0f75 100644
+--- a/drivers/usb/serial/mxuport.c
++++ b/drivers/usb/serial/mxuport.c
+@@ -1263,6 +1263,15 @@ static int mxuport_attach(struct usb_serial *serial)
+ 	return 0;
+ }
+ 
++static void mxuport_release(struct usb_serial *serial)
++{
++	struct usb_serial_port *port0 = serial->port[0];
++	struct usb_serial_port *port1 = serial->port[1];
++
++	usb_serial_generic_close(port1);
++	usb_serial_generic_close(port0);
++}
++
+ static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+ 	struct mxuport_port *mxport = usb_get_serial_port_data(port);
+@@ -1365,6 +1374,7 @@ static struct usb_serial_driver mxuport_device = {
+ 	.probe			= mxuport_probe,
+ 	.port_probe		= mxuport_port_probe,
+ 	.attach			= mxuport_attach,
++	.release		= mxuport_release,
+ 	.calc_num_ports		= mxuport_calc_num_ports,
+ 	.open			= mxuport_open,
+ 	.close			= mxuport_close,
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 94e520de6404..f00919d579e0 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -378,18 +378,22 @@ static void option_instat_callback(struct urb *urb);
+ #define HAIER_PRODUCT_CE81B			0x10f8
+ #define HAIER_PRODUCT_CE100			0x2009
+ 
+-/* Cinterion (formerly Siemens) products */
+-#define SIEMENS_VENDOR_ID				0x0681
+-#define CINTERION_VENDOR_ID				0x1e2d
++/* Gemalto's Cinterion products (formerly Siemens) */
++#define SIEMENS_VENDOR_ID			0x0681
++#define CINTERION_VENDOR_ID			0x1e2d
++#define CINTERION_PRODUCT_HC25_MDMNET		0x0040
+ #define CINTERION_PRODUCT_HC25_MDM		0x0047
+-#define CINTERION_PRODUCT_HC25_MDMNET	0x0040
++#define CINTERION_PRODUCT_HC28_MDMNET		0x004A /* same for HC28J */
+ #define CINTERION_PRODUCT_HC28_MDM		0x004C
+-#define CINTERION_PRODUCT_HC28_MDMNET	0x004A /* same for HC28J */
+ #define CINTERION_PRODUCT_EU3_E			0x0051
+ #define CINTERION_PRODUCT_EU3_P			0x0052
+ #define CINTERION_PRODUCT_PH8			0x0053
+ #define CINTERION_PRODUCT_AHXX			0x0055
+ #define CINTERION_PRODUCT_PLXX			0x0060
++#define CINTERION_PRODUCT_PH8_2RMNET		0x0082
++#define CINTERION_PRODUCT_PH8_AUDIO		0x0083
++#define CINTERION_PRODUCT_AHXX_2RMNET		0x0084
++#define CINTERION_PRODUCT_AHXX_AUDIO		0x0085
+ 
+ /* Olivetti products */
+ #define OLIVETTI_VENDOR_ID			0x0b3c
+@@ -641,6 +645,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+ 	.reserved = BIT(1) | BIT(2) | BIT(3),
+ };
+ 
++static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
++	.reserved = BIT(4) | BIT(5),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -1614,7 +1622,79 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
+ 		.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+-	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
+@@ -1625,6 +1705,61 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) },
+@@ -1724,7 +1859,13 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+-	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
++		.driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
++		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
++	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
+ 	{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
+diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
+index 504f5bff79c0..b18974cbd995 100644
+--- a/drivers/usb/serial/quatech2.c
++++ b/drivers/usb/serial/quatech2.c
+@@ -141,6 +141,7 @@ static void qt2_release(struct usb_serial *serial)
+ 
+ 	serial_priv = usb_get_serial_data(serial);
+ 
++	usb_kill_urb(serial_priv->read_urb);
+ 	usb_free_urb(serial_priv->read_urb);
+ 	kfree(serial_priv->read_buffer);
+ 	kfree(serial_priv);
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 511aab3b9206..4bf7a34f6a4c 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -486,7 +486,8 @@ static void eoi_pirq(struct irq_data *data)
+ 	if (!VALID_EVTCHN(evtchn))
+ 		return;
+ 
+-	if (unlikely(irqd_is_setaffinity_pending(data))) {
++	if (unlikely(irqd_is_setaffinity_pending(data)) &&
++	    likely(!irqd_irq_disabled(data))) {
+ 		int masked = test_and_set_mask(evtchn);
+ 
+ 		clear_evtchn(evtchn);
+@@ -1373,7 +1374,8 @@ static void ack_dynirq(struct irq_data *data)
+ 	if (!VALID_EVTCHN(evtchn))
+ 		return;
+ 
+-	if (unlikely(irqd_is_setaffinity_pending(data))) {
++	if (unlikely(irqd_is_setaffinity_pending(data)) &&
++	    likely(!irqd_irq_disabled(data))) {
+ 		int masked = test_and_set_mask(evtchn);
+ 
+ 		clear_evtchn(evtchn);
+diff --git a/fs/affs/super.c b/fs/affs/super.c
+index 3f89c9e05b40..cc7571201b4f 100644
+--- a/fs/affs/super.c
++++ b/fs/affs/super.c
+@@ -526,7 +526,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
+ 	char			*prefix = NULL;
+ 
+ 	new_opts = kstrdup(data, GFP_KERNEL);
+-	if (!new_opts)
++	if (data && !new_opts)
+ 		return -ENOMEM;
+ 
+ 	pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
+@@ -544,7 +544,8 @@ affs_remount(struct super_block *sb, int *flags, char *data)
+ 	}
+ 
+ 	flush_delayed_work(&sbi->sb_work);
+-	replace_mount_options(sb, new_opts);
++	if (new_opts)
++		replace_mount_options(sb, new_opts);
+ 
+ 	sbi->s_flags = mount_flags;
+ 	sbi->s_mode  = mode;
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 699944a07491..53cbbedf21d1 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -3956,6 +3956,7 @@ void btrfs_test_inode_set_ops(struct inode *inode);
+ 
+ /* ioctl.c */
+ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
++long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ void btrfs_update_iflags(struct inode *inode);
+ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
+ int btrfs_is_empty_uuid(u8 *uuid);
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 2b0d84d32db4..2b230e9b3bad 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -2819,7 +2819,7 @@ const struct file_operations btrfs_file_operations = {
+ 	.fallocate	= btrfs_fallocate,
+ 	.unlocked_ioctl	= btrfs_ioctl,
+ #ifdef CONFIG_COMPAT
+-	.compat_ioctl	= btrfs_ioctl,
++	.compat_ioctl	= btrfs_compat_ioctl,
+ #endif
+ };
+ 
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index f751ab47e9a5..c4771af7fd6f 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9894,7 +9894,7 @@ static const struct file_operations btrfs_dir_file_operations = {
+ 	.iterate	= btrfs_real_readdir,
+ 	.unlocked_ioctl	= btrfs_ioctl,
+ #ifdef CONFIG_COMPAT
+-	.compat_ioctl	= btrfs_ioctl,
++	.compat_ioctl	= btrfs_compat_ioctl,
+ #endif
+ 	.release        = btrfs_release_file,
+ 	.fsync		= btrfs_sync_file,
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 5189d54417ab..43502247e176 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -5492,3 +5492,24 @@ long btrfs_ioctl(struct file *file, unsigned int
+ 
+ 	return -ENOTTY;
+ }
++
++#ifdef CONFIG_COMPAT
++long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++	switch (cmd) {
++	case FS_IOC32_GETFLAGS:
++		cmd = FS_IOC_GETFLAGS;
++		break;
++	case FS_IOC32_SETFLAGS:
++		cmd = FS_IOC_SETFLAGS;
++		break;
++	case FS_IOC32_GETVERSION:
++		cmd = FS_IOC_GETVERSION;
++		break;
++	default:
++		return -ENOIOCTLCMD;
++	}
++
++	return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
++}
++#endif
+diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
+index f4cf200b3c76..79450fa66d16 100644
+--- a/fs/cifs/cifs_spnego.c
++++ b/fs/cifs/cifs_spnego.c
+@@ -24,10 +24,13 @@
+ #include <linux/string.h>
+ #include <keys/user-type.h>
+ #include <linux/key-type.h>
++#include <linux/keyctl.h>
+ #include <linux/inet.h>
+ #include "cifsglob.h"
+ #include "cifs_spnego.h"
+ #include "cifs_debug.h"
++#include "cifsproto.h"
++static const struct cred *spnego_cred;
+ 
+ /* create a new cifs key */
+ static int
+@@ -102,6 +105,7 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
+ 	size_t desc_len;
+ 	struct key *spnego_key;
+ 	const char *hostname = server->hostname;
++	const struct cred *saved_cred;
+ 
+ 	/* length of fields (with semicolons): ver=0xyz ip4=ipaddress
+ 	   host=hostname sec=mechanism uid=0xFF user=username */
+@@ -163,7 +167,9 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
+ 	sprintf(dp, ";pid=0x%x", current->pid);
+ 
+ 	cifs_dbg(FYI, "key description = %s\n", description);
++	saved_cred = override_creds(spnego_cred);
+ 	spnego_key = request_key(&cifs_spnego_key_type, description, "");
++	revert_creds(saved_cred);
+ 
+ #ifdef CONFIG_CIFS_DEBUG2
+ 	if (cifsFYI && !IS_ERR(spnego_key)) {
+@@ -177,3 +183,64 @@ out:
+ 	kfree(description);
+ 	return spnego_key;
+ }
++
++int
++init_cifs_spnego(void)
++{
++	struct cred *cred;
++	struct key *keyring;
++	int ret;
++
++	cifs_dbg(FYI, "Registering the %s key type\n",
++		 cifs_spnego_key_type.name);
++
++	/*
++	 * Create an override credential set with special thread keyring for
++	 * spnego upcalls.
++	 */
++
++	cred = prepare_kernel_cred(NULL);
++	if (!cred)
++		return -ENOMEM;
++
++	keyring = keyring_alloc(".cifs_spnego",
++				GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
++				(KEY_POS_ALL & ~KEY_POS_SETATTR) |
++				KEY_USR_VIEW | KEY_USR_READ,
++				KEY_ALLOC_NOT_IN_QUOTA, NULL);
++	if (IS_ERR(keyring)) {
++		ret = PTR_ERR(keyring);
++		goto failed_put_cred;
++	}
++
++	ret = register_key_type(&cifs_spnego_key_type);
++	if (ret < 0)
++		goto failed_put_key;
++
++	/*
++	 * instruct request_key() to use this special keyring as a cache for
++	 * the results it looks up
++	 */
++	set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
++	cred->thread_keyring = keyring;
++	cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
++	spnego_cred = cred;
++
++	cifs_dbg(FYI, "cifs spnego keyring: %d\n", key_serial(keyring));
++	return 0;
++
++failed_put_key:
++	key_put(keyring);
++failed_put_cred:
++	put_cred(cred);
++	return ret;
++}
++
++void
++exit_cifs_spnego(void)
++{
++	key_revoke(spnego_cred->thread_keyring);
++	unregister_key_type(&cifs_spnego_key_type);
++	put_cred(spnego_cred);
++	cifs_dbg(FYI, "Unregistered %s key type\n", cifs_spnego_key_type.name);
++}
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 6a1119e87fbb..fe24e22fc154 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1238,7 +1238,7 @@ init_cifs(void)
+ 		goto out_destroy_mids;
+ 
+ #ifdef CONFIG_CIFS_UPCALL
+-	rc = register_key_type(&cifs_spnego_key_type);
++	rc = init_cifs_spnego();
+ 	if (rc)
+ 		goto out_destroy_request_bufs;
+ #endif /* CONFIG_CIFS_UPCALL */
+@@ -1261,7 +1261,7 @@ out_init_cifs_idmap:
+ out_register_key_type:
+ #endif
+ #ifdef CONFIG_CIFS_UPCALL
+-	unregister_key_type(&cifs_spnego_key_type);
++	exit_cifs_spnego();
+ out_destroy_request_bufs:
+ #endif
+ 	cifs_destroy_request_bufs();
+diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
+index c63fd1dde25b..f730c065df34 100644
+--- a/fs/cifs/cifsproto.h
++++ b/fs/cifs/cifsproto.h
+@@ -60,6 +60,8 @@ do {								\
+ } while (0)
+ extern int init_cifs_idmap(void);
+ extern void exit_cifs_idmap(void);
++extern int init_cifs_spnego(void);
++extern void exit_cifs_spnego(void);
+ extern char *build_path_from_dentry(struct dentry *);
+ extern char *cifs_build_path_to_root(struct smb_vol *vol,
+ 				     struct cifs_sb_info *cifs_sb,
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index bce6fdcd5d48..8ffda5084dbf 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -400,19 +400,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
+ 	sec_blob->LmChallengeResponse.MaximumLength = 0;
+ 
+ 	sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
+-	rc = setup_ntlmv2_rsp(ses, nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+-		goto setup_ntlmv2_ret;
++	if (ses->user_name != NULL) {
++		rc = setup_ntlmv2_rsp(ses, nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
++			goto setup_ntlmv2_ret;
++		}
++		memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
++
++		sec_blob->NtChallengeResponse.Length =
++				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		sec_blob->NtChallengeResponse.MaximumLength =
++				cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++	} else {
++		/*
++		 * don't send an NT Response for anonymous access
++		 */
++		sec_blob->NtChallengeResponse.Length = 0;
++		sec_blob->NtChallengeResponse.MaximumLength = 0;
+ 	}
+-	memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+-
+-	sec_blob->NtChallengeResponse.Length =
+-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	sec_blob->NtChallengeResponse.MaximumLength =
+-			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ 
+ 	if (ses->domainName == NULL) {
+ 		sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
+@@ -670,20 +678,24 @@ sess_auth_lanman(struct sess_data *sess_data)
+ 
+ 	pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
+ 
+-	/* no capabilities flags in old lanman negotiation */
+-	pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+-
+-	/* Calculate hash with password and copy into bcc_ptr.
+-	 * Encryption Key (stored as in cryptkey) gets used if the
+-	 * security mode bit in Negottiate Protocol response states
+-	 * to use challenge/response method (i.e. Password bit is 1).
+-	 */
+-	rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
+-			      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
+-			      true : false, lnm_session_key);
+-
+-	memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
+-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
++	if (ses->user_name != NULL) {
++		/* no capabilities flags in old lanman negotiation */
++		pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
++
++		/* Calculate hash with password and copy into bcc_ptr.
++		 * Encryption Key (stored as in cryptkey) gets used if the
++		 * security mode bit in Negottiate Protocol response states
++		 * to use challenge/response method (i.e. Password bit is 1).
++		 */
++		rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
++				      ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
++				      true : false, lnm_session_key);
++
++		memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
++		bcc_ptr += CIFS_AUTH_RESP_SIZE;
++	} else {
++		pSMB->old_req.PasswordLength = 0;
++	}
+ 
+ 	/*
+ 	 * can not sign if LANMAN negotiated so no need
+@@ -769,26 +781,31 @@ sess_auth_ntlm(struct sess_data *sess_data)
+ 	capabilities = cifs_ssetup_hdr(ses, pSMB);
+ 
+ 	pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+-	pSMB->req_no_secext.CaseInsensitivePasswordLength =
+-			cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+-	pSMB->req_no_secext.CaseSensitivePasswordLength =
+-			cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+-
+-	/* calculate ntlm response and session key */
+-	rc = setup_ntlm_response(ses, sess_data->nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLM authentication\n",
+-				 rc);
+-		goto out;
+-	}
++	if (ses->user_name != NULL) {
++		pSMB->req_no_secext.CaseInsensitivePasswordLength =
++				cpu_to_le16(CIFS_AUTH_RESP_SIZE);
++		pSMB->req_no_secext.CaseSensitivePasswordLength =
++				cpu_to_le16(CIFS_AUTH_RESP_SIZE);
++
++		/* calculate ntlm response and session key */
++		rc = setup_ntlm_response(ses, sess_data->nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLM authentication\n",
++					 rc);
++			goto out;
++		}
+ 
+-	/* copy ntlm response */
+-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			CIFS_AUTH_RESP_SIZE);
+-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
+-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			CIFS_AUTH_RESP_SIZE);
+-	bcc_ptr += CIFS_AUTH_RESP_SIZE;
++		/* copy ntlm response */
++		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				CIFS_AUTH_RESP_SIZE);
++		bcc_ptr += CIFS_AUTH_RESP_SIZE;
++		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				CIFS_AUTH_RESP_SIZE);
++		bcc_ptr += CIFS_AUTH_RESP_SIZE;
++	} else {
++		pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
++		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
++	}
+ 
+ 	if (ses->capabilities & CAP_UNICODE) {
+ 		/* unicode strings must be word aligned */
+@@ -878,22 +895,26 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
+ 	/* LM2 password would be here if we supported it */
+ 	pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
+ 
+-	/* calculate nlmv2 response and session key */
+-	rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
+-	if (rc) {
+-		cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
+-		goto out;
+-	}
++	if (ses->user_name != NULL) {
++		/* calculate nlmv2 response and session key */
++		rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
++		if (rc) {
++			cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
++			goto out;
++		}
+ 
+-	memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+-			ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+-	bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
++		memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
++				ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+ 
+-	/* set case sensitive password length after tilen may get
+-	 * assigned, tilen is 0 otherwise.
+-	 */
+-	pSMB->req_no_secext.CaseSensitivePasswordLength =
+-		cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++		/* set case sensitive password length after tilen may get
++		 * assigned, tilen is 0 otherwise.
++		 */
++		pSMB->req_no_secext.CaseSensitivePasswordLength =
++			cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
++	} else {
++		pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
++	}
+ 
+ 	if (ses->capabilities & CAP_UNICODE) {
+ 		if (sess_data->iov[0].iov_len % 2) {
+diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
+index bc0bb9c34f72..0ffa18094335 100644
+--- a/fs/cifs/smb2glob.h
++++ b/fs/cifs/smb2glob.h
+@@ -44,6 +44,7 @@
+ #define SMB2_OP_DELETE 7
+ #define SMB2_OP_HARDLINK 8
+ #define SMB2_OP_SET_EOF 9
++#define SMB2_OP_RMDIR 10
+ 
+ /* Used when constructing chained read requests. */
+ #define CHAINED_REQUEST 1
+diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
+index 899bbc86f73e..4f0231e685a9 100644
+--- a/fs/cifs/smb2inode.c
++++ b/fs/cifs/smb2inode.c
+@@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
+ 		 * SMB2_open() call.
+ 		 */
+ 		break;
++	case SMB2_OP_RMDIR:
++		tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
++				   fid.volatile_fid);
++		break;
+ 	case SMB2_OP_RENAME:
+ 		tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
+ 				    fid.volatile_fid, (__le16 *)data);
+@@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
+ 	   struct cifs_sb_info *cifs_sb)
+ {
+ 	return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+-				  CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
+-				  NULL, SMB2_OP_DELETE);
++				  CREATE_NOT_FILE,
++				  NULL, SMB2_OP_RMDIR);
+ }
+ 
+ int
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 657a9c5c4fff..14e845e8996f 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -2384,6 +2384,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
+ }
+ 
+ int
++SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
++		  u64 persistent_fid, u64 volatile_fid)
++{
++	__u8 delete_pending = 1;
++	void *data;
++	unsigned int size;
++
++	data = &delete_pending;
++	size = 1; /* sizeof __u8 */
++
++	return send_set_info(xid, tcon, persistent_fid, volatile_fid,
++			current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
++			&size);
++}
++
++int
+ SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 		  u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
+ {
+diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
+index 79dc650c18b2..9bc59f9c12fb 100644
+--- a/fs/cifs/smb2proto.h
++++ b/fs/cifs/smb2proto.h
+@@ -140,6 +140,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
+ extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
+ 		       u64 persistent_fid, u64 volatile_fid,
+ 		       __le16 *target_file);
++extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
++		      u64 persistent_fid, u64 volatile_fid);
+ extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
+ 			     u64 persistent_fid, u64 volatile_fid,
+ 			     __le16 *target_file);
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index 1eaa6cb96cd0..3fcdb2e9d228 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1112,22 +1112,20 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
+ 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
+ 	ext4_group_t block_group;
+ 	int bit;
+-	struct buffer_head *bitmap_bh;
++	struct buffer_head *bitmap_bh = NULL;
+ 	struct inode *inode = NULL;
+-	long err = -EIO;
++	int err = -EIO;
+ 
+-	/* Error cases - e2fsck has already cleaned up for us */
+-	if (ino > max_ino) {
+-		ext4_warning(sb, "bad orphan ino %lu!  e2fsck was run?", ino);
+-		goto error;
+-	}
++	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
++		goto bad_orphan;
+ 
+ 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
+ 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
+ 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
+ 	if (!bitmap_bh) {
+-		ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
+-		goto error;
++		ext4_error(sb, "inode bitmap error %ld for orphan %lu",
++			   ino, PTR_ERR(bitmap_bh));
++		return (struct inode *) bitmap_bh;
+ 	}
+ 
+ 	/* Having the inode bit set should be a 100% indicator that this
+@@ -1138,15 +1136,21 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
+ 		goto bad_orphan;
+ 
+ 	inode = ext4_iget(sb, ino);
+-	if (IS_ERR(inode))
+-		goto iget_failed;
++	if (IS_ERR(inode)) {
++		err = PTR_ERR(inode);
++		ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
++			   ino, err);
++		return inode;
++	}
+ 
+ 	/*
+-	 * If the orphans has i_nlinks > 0 then it should be able to be
+-	 * truncated, otherwise it won't be removed from the orphan list
+-	 * during processing and an infinite loop will result.
++	 * If the orphans has i_nlinks > 0 then it should be able to
++	 * be truncated, otherwise it won't be removed from the orphan
++	 * list during processing and an infinite loop will result.
++	 * Similarly, it must not be a bad inode.
+ 	 */
+-	if (inode->i_nlink && !ext4_can_truncate(inode))
++	if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
++	    is_bad_inode(inode))
+ 		goto bad_orphan;
+ 
+ 	if (NEXT_ORPHAN(inode) > max_ino)
+@@ -1154,29 +1158,25 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
+ 	brelse(bitmap_bh);
+ 	return inode;
+ 
+-iget_failed:
+-	err = PTR_ERR(inode);
+-	inode = NULL;
+ bad_orphan:
+-	ext4_warning(sb, "bad orphan inode %lu!  e2fsck was run?", ino);
+-	printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
+-	       bit, (unsigned long long)bitmap_bh->b_blocknr,
+-	       ext4_test_bit(bit, bitmap_bh->b_data));
+-	printk(KERN_WARNING "inode=%p\n", inode);
++	ext4_error(sb, "bad orphan inode %lu", ino);
++	if (bitmap_bh)
++		printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
++		       bit, (unsigned long long)bitmap_bh->b_blocknr,
++		       ext4_test_bit(bit, bitmap_bh->b_data));
+ 	if (inode) {
+-		printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
++		printk(KERN_ERR "is_bad_inode(inode)=%d\n",
+ 		       is_bad_inode(inode));
+-		printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
++		printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
+ 		       NEXT_ORPHAN(inode));
+-		printk(KERN_WARNING "max_ino=%lu\n", max_ino);
+-		printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
++		printk(KERN_ERR "max_ino=%lu\n", max_ino);
++		printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink);
+ 		/* Avoid freeing blocks if we got a bad deleted inode */
+ 		if (inode->i_nlink == 0)
+ 			inode->i_blocks = 0;
+ 		iput(inode);
+ 	}
+ 	brelse(bitmap_bh);
+-error:
+ 	return ERR_PTR(err);
+ }
+ 
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 41260489d3bc..5b58e266892b 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1248,6 +1248,7 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
+ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
+ {
+ 	int order = 1;
++	int bb_incr = 1 << (e4b->bd_blkbits - 1);
+ 	void *bb;
+ 
+ 	BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
+@@ -1260,7 +1261,8 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
+ 			/* this block is part of buddy of order 'order' */
+ 			return order;
+ 		}
+-		bb += 1 << (e4b->bd_blkbits - order);
++		bb += bb_incr;
++		bb_incr >>= 1;
+ 		order++;
+ 	}
+ 	return 0;
+@@ -2553,7 +2555,7 @@ int ext4_mb_init(struct super_block *sb)
+ {
+ 	struct ext4_sb_info *sbi = EXT4_SB(sb);
+ 	unsigned i, j;
+-	unsigned offset;
++	unsigned offset, offset_incr;
+ 	unsigned max;
+ 	int ret;
+ 
+@@ -2582,11 +2584,13 @@ int ext4_mb_init(struct super_block *sb)
+ 
+ 	i = 1;
+ 	offset = 0;
++	offset_incr = 1 << (sb->s_blocksize_bits - 1);
+ 	max = sb->s_blocksize << 2;
+ 	do {
+ 		sbi->s_mb_offsets[i] = offset;
+ 		sbi->s_mb_maxs[i] = max;
+-		offset += 1 << (sb->s_blocksize_bits - i);
++		offset += offset_incr;
++		offset_incr = offset_incr >> 1;
+ 		max = max >> 1;
+ 		i++;
+ 	} while (i <= sb->s_blocksize_bits + 1);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 814f3beb4369..03482c01fb3e 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2897,7 +2897,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
+ 			 * list entries can cause panics at unmount time.
+ 			 */
+ 			mutex_lock(&sbi->s_orphan_lock);
+-			list_del(&EXT4_I(inode)->i_orphan);
++			list_del_init(&EXT4_I(inode)->i_orphan);
+ 			mutex_unlock(&sbi->s_orphan_lock);
+ 		}
+ 	}
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index 8685c655737f..bc6e5c5da88c 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -15,6 +15,7 @@
+ #include <linux/sched.h>
+ #include <linux/bitmap.h>
+ #include <linux/slab.h>
++#include <linux/seq_file.h>
+ 
+ /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
+ 
+@@ -426,10 +427,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
+ 	int lowercase, eas, chk, errs, chkdsk, timeshift;
+ 	int o;
+ 	struct hpfs_sb_info *sbi = hpfs_sb(s);
+-	char *new_opts = kstrdup(data, GFP_KERNEL);
+-
+-	if (!new_opts)
+-		return -ENOMEM;
+ 
+ 	sync_filesystem(s);
+ 
+@@ -466,17 +463,44 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
+ 
+ 	if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
+ 
+-	replace_mount_options(s, new_opts);
+-
+ 	hpfs_unlock(s);
+ 	return 0;
+ 
+ out_err:
+ 	hpfs_unlock(s);
+-	kfree(new_opts);
+ 	return -EINVAL;
+ }
+ 
++static int hpfs_show_options(struct seq_file *seq, struct dentry *root)
++{
++	struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb);
++
++	seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid));
++	seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid));
++	seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777));
++	if (sbi->sb_lowercase)
++		seq_printf(seq, ",case=lower");
++	if (!sbi->sb_chk)
++		seq_printf(seq, ",check=none");
++	if (sbi->sb_chk == 2)
++		seq_printf(seq, ",check=strict");
++	if (!sbi->sb_err)
++		seq_printf(seq, ",errors=continue");
++	if (sbi->sb_err == 2)
++		seq_printf(seq, ",errors=panic");
++	if (!sbi->sb_chkdsk)
++		seq_printf(seq, ",chkdsk=no");
++	if (sbi->sb_chkdsk == 2)
++		seq_printf(seq, ",chkdsk=always");
++	if (!sbi->sb_eas)
++		seq_printf(seq, ",eas=no");
++	if (sbi->sb_eas == 1)
++		seq_printf(seq, ",eas=ro");
++	if (sbi->sb_timeshift)
++		seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift);
++	return 0;
++}
++
+ /* Super operations */
+ 
+ static const struct super_operations hpfs_sops =
+@@ -487,7 +511,7 @@ static const struct super_operations hpfs_sops =
+ 	.put_super	= hpfs_put_super,
+ 	.statfs		= hpfs_statfs,
+ 	.remount_fs	= hpfs_remount_fs,
+-	.show_options	= generic_show_options,
++	.show_options	= hpfs_show_options,
+ };
+ 
+ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
+@@ -510,8 +534,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
+ 
+ 	int o;
+ 
+-	save_mount_options(s, options);
+-
+ 	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ 	if (!sbi) {
+ 		return -ENOMEM;
+diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
+index 4daaa662337b..30c42ed58fc9 100644
+--- a/fs/xfs/libxfs/xfs_format.h
++++ b/fs/xfs/libxfs/xfs_format.h
+@@ -754,7 +754,7 @@ typedef struct xfs_agfl {
+ 	__be64		agfl_lsn;
+ 	__be32		agfl_crc;
+ 	__be32		agfl_bno[];	/* actually XFS_AGFL_SIZE(mp) */
+-} xfs_agfl_t;
++} __attribute__((packed)) xfs_agfl_t;
+ 
+ #define XFS_AGFL_CRC_OFF	offsetof(struct xfs_agfl, agfl_crc)
+ 
+diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
+index cb7e8a29dfb6..1a89b3417989 100644
+--- a/fs/xfs/xfs_fsops.c
++++ b/fs/xfs/xfs_fsops.c
+@@ -241,8 +241,8 @@ xfs_growfs_data_private(
+ 		agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
+ 		agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
+ 		agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
+-		agf->agf_flfirst = 0;
+-		agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
++		agf->agf_flfirst = cpu_to_be32(1);
++		agf->agf_fllast = 0;
+ 		agf->agf_flcount = 0;
+ 		tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
+ 		agf->agf_freeblks = cpu_to_be32(tmpsize);
+diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
+index 539a85fddbc2..c29f34253e2b 100644
+--- a/fs/xfs/xfs_inode.c
++++ b/fs/xfs/xfs_inode.c
+@@ -3236,13 +3236,14 @@ xfs_iflush_cluster(
+ 		 * We need to check under the i_flags_lock for a valid inode
+ 		 * here. Skip it if it is not valid or the wrong inode.
+ 		 */
+-		spin_lock(&ip->i_flags_lock);
+-		if (!ip->i_ino ||
++		spin_lock(&iq->i_flags_lock);
++		if (!iq->i_ino ||
++		    __xfs_iflags_test(iq, XFS_ISTALE) ||
+ 		    (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
+-			spin_unlock(&ip->i_flags_lock);
++			spin_unlock(&iq->i_flags_lock);
+ 			continue;
+ 		}
+-		spin_unlock(&ip->i_flags_lock);
++		spin_unlock(&iq->i_flags_lock);
+ 
+ 		/*
+ 		 * Do an un-protected check to see if the inode is dirty and
+@@ -3358,7 +3359,7 @@ xfs_iflush(
+ 	struct xfs_buf		**bpp)
+ {
+ 	struct xfs_mount	*mp = ip->i_mount;
+-	struct xfs_buf		*bp;
++	struct xfs_buf		*bp = NULL;
+ 	struct xfs_dinode	*dip;
+ 	int			error;
+ 
+@@ -3400,14 +3401,22 @@ xfs_iflush(
+ 	}
+ 
+ 	/*
+-	 * Get the buffer containing the on-disk inode.
++	 * Get the buffer containing the on-disk inode. We are doing a try-lock
++	 * operation here, so we may get  an EAGAIN error. In that case, we
++	 * simply want to return with the inode still dirty.
++	 *
++	 * If we get any other error, we effectively have a corruption situation
++	 * and we cannot flush the inode, so we treat it the same as failing
++	 * xfs_iflush_int().
+ 	 */
+ 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
+ 			       0);
+-	if (error || !bp) {
++	if (error == -EAGAIN) {
+ 		xfs_ifunlock(ip);
+ 		return error;
+ 	}
++	if (error)
++		goto corrupt_out;
+ 
+ 	/*
+ 	 * First flush out the inode that xfs_iflush was called with.
+@@ -3435,7 +3444,8 @@ xfs_iflush(
+ 	return 0;
+ 
+ corrupt_out:
+-	xfs_buf_relse(bp);
++	if (bp)
++		xfs_buf_relse(bp);
+ 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ cluster_corrupt_out:
+ 	error = -EFSCORRUPTED;
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 65a45372fb1f..2a517576f2bc 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -1233,6 +1233,16 @@ xfs_fs_remount(
+ 			return -EINVAL;
+ 		}
+ 
++		if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
++		    xfs_sb_has_ro_compat_feature(sbp,
++					XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
++			xfs_warn(mp,
++"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
++				(sbp->sb_features_ro_compat &
++					XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
++			return -EINVAL;
++		}
++
+ 		mp->m_flags &= ~XFS_MOUNT_RDONLY;
+ 
+ 		/*
+diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h
+index 3d1a3af5cf59..a2508a8f9a9c 100644
+--- a/include/asm-generic/siginfo.h
++++ b/include/asm-generic/siginfo.h
+@@ -17,21 +17,6 @@
+ struct siginfo;
+ void do_schedule_next_timer(struct siginfo *info);
+ 
+-#ifndef HAVE_ARCH_COPY_SIGINFO
+-
+-#include <linux/string.h>
+-
+-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+-{
+-	if (from->si_code < 0)
+-		memcpy(to, from, sizeof(*to));
+-	else
+-		/* _sigchld is currently the largest know union member */
+-		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+-}
+-
+-#endif
+-
+ extern int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from);
+ 
+ #endif
+diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
+index c3a9c8fc60fa..5e13b987d9e2 100644
+--- a/include/linux/can/dev.h
++++ b/include/linux/can/dev.h
+@@ -39,8 +39,11 @@ struct can_priv {
+ 	struct can_clock clock;
+ 
+ 	enum can_state state;
+-	u32 ctrlmode;
+-	u32 ctrlmode_supported;
++
++	/* CAN controller features - see include/uapi/linux/can/netlink.h */
++	u32 ctrlmode;		/* current options setting */
++	u32 ctrlmode_supported;	/* options that can be modified by netlink */
++	u32 ctrlmode_static;	/* static enabled options for driver/hardware */
+ 
+ 	int restart_ms;
+ 	struct timer_list restart_timer;
+@@ -107,6 +110,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
+ 	return skb->len == CANFD_MTU;
+ }
+ 
++/* helper to define static CAN controller features at device creation time */
++static inline void can_set_static_ctrlmode(struct net_device *dev,
++					   u32 static_mode)
++{
++	struct can_priv *priv = netdev_priv(dev);
++
++	/* alloc_candev() succeeded => netdev_priv() is valid at this point */
++	priv->ctrlmode = static_mode;
++	priv->ctrlmode_static = static_mode;
++
++	/* override MTU which was set by default in can_setup()? */
++	if (static_mode & CAN_CTRLMODE_FD)
++		dev->mtu = CANFD_MTU;
++}
++
+ /* get data length from can_dlc with sanitized can_dlc */
+ u8 can_dlc2len(u8 can_dlc);
+ 
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index ab1e0392b5ac..883ceb1439fa 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -28,6 +28,21 @@ struct sigpending {
+ 	sigset_t signal;
+ };
+ 
++#ifndef HAVE_ARCH_COPY_SIGINFO
++
++#include <linux/string.h>
++
++static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
++{
++	if (from->si_code < 0)
++		memcpy(to, from, sizeof(*to));
++	else
++		/* _sigchld is currently the largest know union member */
++		memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
++}
++
++#endif
++
+ /*
+  * Define some primitives to manipulate sigset_t.
+  */
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index 5e704e26f9a2..52baf4089bd2 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -339,7 +339,6 @@ struct tty_file_private {
+ #define TTY_EXCLUSIVE 		3	/* Exclusive open mode */
+ #define TTY_DEBUG 		4	/* Debugging */
+ #define TTY_DO_WRITE_WAKEUP 	5	/* Call write_wakeup after queuing new */
+-#define TTY_OTHER_DONE		6	/* Closed pty has completed input processing */
+ #define TTY_LDISC_OPEN	 	11	/* Line discipline is open */
+ #define TTY_PTY_LOCK 		16	/* pty private */
+ #define TTY_NO_WRITE_SPLIT 	17	/* Preserve write boundaries to driver */
+@@ -467,6 +466,7 @@ extern void tty_buffer_free_all(struct tty_port *port);
+ extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
+ extern void tty_buffer_init(struct tty_port *port);
+ extern void tty_buffer_set_lock_subclass(struct tty_port *port);
++extern void tty_buffer_flush_work(struct tty_port *port);
+ extern speed_t tty_termios_baud_rate(struct ktermios *termios);
+ extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
+ extern void tty_termios_encode_baud_rate(struct ktermios *termios,
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index 447fe29b55b4..cb793594aa1e 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -367,14 +367,13 @@ struct usb_bus {
+ 
+ 	int devnum_next;		/* Next open device number in
+ 					 * round-robin allocation */
++	struct mutex devnum_next_mutex; /* devnum_next mutex */
+ 
+ 	struct usb_devmap devmap;	/* device address allocation map */
+ 	struct usb_device *root_hub;	/* Root hub */
+ 	struct usb_bus *hs_companion;	/* Companion EHCI bus, if any */
+ 	struct list_head bus_list;	/* list of busses */
+ 
+-	struct mutex usb_address0_mutex; /* unaddressed device mutex */
+-
+ 	int bandwidth_allocated;	/* on this bus: how much of the time
+ 					 * reserved for periodic (intr/iso)
+ 					 * requests is used, on average?
+@@ -1060,7 +1059,7 @@ struct usbdrv_wrap {
+  *	for interfaces bound to this driver.
+  * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
+  *	endpoints before calling the driver's disconnect method.
+- * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
++ * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs
+  *	to initiate lower power link state transitions when an idle timeout
+  *	occurs.  Device-initiated USB 3.0 link PM will still be allowed.
+  *
+diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
+index 68b1e836dff1..c00c7393ce8c 100644
+--- a/include/linux/usb/hcd.h
++++ b/include/linux/usb/hcd.h
+@@ -169,6 +169,7 @@ struct usb_hcd {
+ 	 * bandwidth_mutex should be dropped after a successful control message
+ 	 * to the device, or resetting the bandwidth after a failed attempt.
+ 	 */
++	struct mutex		*address0_mutex;
+ 	struct mutex		*bandwidth_mutex;
+ 	struct usb_hcd		*shared_hcd;
+ 	struct usb_hcd		*primary_hcd;
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 22fcc05dec40..819f51ec4f55 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -914,17 +914,28 @@ static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
+ 		task_pid_type(p, wo->wo_type) == wo->wo_pid;
+ }
+ 
+-static int eligible_child(struct wait_opts *wo, struct task_struct *p)
++static int
++eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
+ {
+ 	if (!eligible_pid(wo, p))
+ 		return 0;
+-	/* Wait for all children (clone and not) if __WALL is set;
+-	 * otherwise, wait for clone children *only* if __WCLONE is
+-	 * set; otherwise, wait for non-clone children *only*.  (Note:
+-	 * A "clone" child here is one that reports to its parent
+-	 * using a signal other than SIGCHLD.) */
+-	if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
+-	    && !(wo->wo_flags & __WALL))
++
++	/*
++	 * Wait for all children (clone and not) if __WALL is set or
++	 * if it is traced by us.
++	 */
++	if (ptrace || (wo->wo_flags & __WALL))
++		return 1;
++
++	/*
++	 * Otherwise, wait for clone children *only* if __WCLONE is set;
++	 * otherwise, wait for non-clone children *only*.
++	 *
++	 * Note: a "clone" child here is one that reports to its parent
++	 * using a signal other than SIGCHLD, or a non-leader thread which
++	 * we can only see if it is traced by us.
++	 */
++	if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
+ 		return 0;
+ 
+ 	return 1;
+@@ -1297,7 +1308,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
+ 	if (unlikely(exit_state == EXIT_DEAD))
+ 		return 0;
+ 
+-	ret = eligible_child(wo, p);
++	ret = eligible_child(wo, ptrace, p);
+ 	if (!ret)
+ 		return ret;
+ 
+diff --git a/kernel/sched/proc.c b/kernel/sched/proc.c
+index 8ecd552fe4f2..09dfe51f89b5 100644
+--- a/kernel/sched/proc.c
++++ b/kernel/sched/proc.c
+@@ -97,10 +97,13 @@ long calc_load_fold_active(struct rq *this_rq)
+ static unsigned long
+ calc_load(unsigned long load, unsigned long exp, unsigned long active)
+ {
+-	load *= exp;
+-	load += active * (FIXED_1 - exp);
+-	load += 1UL << (FSHIFT - 1);
+-	return load >> FSHIFT;
++	unsigned long newload;
++
++	newload = load * exp + active * (FIXED_1 - exp);
++	if (active >= load)
++		newload += FIXED_1-1;
++
++	return newload / FIXED_1;
+ }
+ 
+ #ifdef CONFIG_NO_HZ_COMMON
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 0315d43176d8..fb147c7811d2 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -461,7 +461,8 @@ struct ring_buffer_per_cpu {
+ 	raw_spinlock_t			reader_lock;	/* serialize readers */
+ 	arch_spinlock_t			lock;
+ 	struct lock_class_key		lock_key;
+-	unsigned int			nr_pages;
++	unsigned long			nr_pages;
++	unsigned int			current_context;
+ 	struct list_head		*pages;
+ 	struct buffer_page		*head_page;	/* read from head */
+ 	struct buffer_page		*tail_page;	/* write to tail */
+@@ -481,7 +482,7 @@ struct ring_buffer_per_cpu {
+ 	u64				write_stamp;
+ 	u64				read_stamp;
+ 	/* ring buffer pages to update, > 0 to add, < 0 to remove */
+-	int				nr_pages_to_update;
++	long				nr_pages_to_update;
+ 	struct list_head		new_pages; /* new pages to add */
+ 	struct work_struct		update_pages_work;
+ 	struct completion		update_done;
+@@ -1160,10 +1161,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+ 	return 0;
+ }
+ 
+-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
++static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
+ {
+-	int i;
+ 	struct buffer_page *bpage, *tmp;
++	long i;
+ 
+ 	for (i = 0; i < nr_pages; i++) {
+ 		struct page *page;
+@@ -1200,7 +1201,7 @@ free_pages:
+ }
+ 
+ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+-			     unsigned nr_pages)
++			     unsigned long nr_pages)
+ {
+ 	LIST_HEAD(pages);
+ 
+@@ -1225,7 +1226,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
+ }
+ 
+ static struct ring_buffer_per_cpu *
+-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
++rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+ 	struct buffer_page *bpage;
+@@ -1325,8 +1326,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
+ 					struct lock_class_key *key)
+ {
+ 	struct ring_buffer *buffer;
++	long nr_pages;
+ 	int bsize;
+-	int cpu, nr_pages;
++	int cpu;
+ 
+ 	/* keep it in its own cache line */
+ 	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
+@@ -1452,12 +1454,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
+ }
+ 
+ static int
+-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
++rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+ {
+ 	struct list_head *tail_page, *to_remove, *next_page;
+ 	struct buffer_page *to_remove_page, *tmp_iter_page;
+ 	struct buffer_page *last_page, *first_page;
+-	unsigned int nr_removed;
++	unsigned long nr_removed;
+ 	unsigned long head_bit;
+ 	int page_entries;
+ 
+@@ -1674,7 +1676,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 			int cpu_id)
+ {
+ 	struct ring_buffer_per_cpu *cpu_buffer;
+-	unsigned nr_pages;
++	unsigned long nr_pages;
+ 	int cpu, err = 0;
+ 
+ 	/*
+@@ -1688,14 +1690,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
+ 	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
+ 		return size;
+ 
+-	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+-	size *= BUF_PAGE_SIZE;
++	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+ 
+ 	/* we need a minimum of two pages */
+-	if (size < BUF_PAGE_SIZE * 2)
+-		size = BUF_PAGE_SIZE * 2;
++	if (nr_pages < 2)
++		nr_pages = 2;
+ 
+-	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
++	size = nr_pages * BUF_PAGE_SIZE;
+ 
+ 	/*
+ 	 * Don't succeed if resizing is disabled, as a reader might be
+@@ -2675,11 +2676,11 @@ rb_reserve_next_event(struct ring_buffer *buffer,
+  * just so happens that it is the same bit corresponding to
+  * the current context.
+  */
+-static DEFINE_PER_CPU(unsigned int, current_context);
+ 
+-static __always_inline int trace_recursive_lock(void)
++static __always_inline int
++trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+-	unsigned int val = __this_cpu_read(current_context);
++	unsigned int val = cpu_buffer->current_context;
+ 	int bit;
+ 
+ 	if (in_interrupt()) {
+@@ -2696,20 +2697,21 @@ static __always_inline int trace_recursive_lock(void)
+ 		return 1;
+ 
+ 	val |= (1 << bit);
+-	__this_cpu_write(current_context, val);
++	cpu_buffer->current_context = val;
+ 
+ 	return 0;
+ }
+ 
+-static __always_inline void trace_recursive_unlock(void)
++static __always_inline void
++trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
+ {
+-	__this_cpu_and(current_context, __this_cpu_read(current_context) - 1);
++	cpu_buffer->current_context &= cpu_buffer->current_context - 1;
+ }
+ 
+ #else
+ 
+-#define trace_recursive_lock()		(0)
+-#define trace_recursive_unlock()	do { } while (0)
++#define trace_recursive_lock(cpu_buffer)	(0)
++#define trace_recursive_unlock(cpu_buffer)	do { } while (0)
+ 
+ #endif
+ 
+@@ -2741,35 +2743,34 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
+ 	/* If we are tracing schedule, we don't want to recurse */
+ 	preempt_disable_notrace();
+ 
+-	if (atomic_read(&buffer->record_disabled))
+-		goto out_nocheck;
+-
+-	if (trace_recursive_lock())
+-		goto out_nocheck;
++	if (unlikely(atomic_read(&buffer->record_disabled)))
++		goto out;
+ 
+ 	cpu = raw_smp_processor_id();
+ 
+-	if (!cpumask_test_cpu(cpu, buffer->cpumask))
++	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
+ 		goto out;
+ 
+ 	cpu_buffer = buffer->buffers[cpu];
+ 
+-	if (atomic_read(&cpu_buffer->record_disabled))
++	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
+ 		goto out;
+ 
+-	if (length > BUF_MAX_DATA_SIZE)
++	if (unlikely(length > BUF_MAX_DATA_SIZE))
++		goto out;
++
++	if (unlikely(trace_recursive_lock(cpu_buffer)))
+ 		goto out;
+ 
+ 	event = rb_reserve_next_event(buffer, cpu_buffer, length);
+ 	if (!event)
+-		goto out;
++		goto out_unlock;
+ 
+ 	return event;
+ 
++ out_unlock:
++	trace_recursive_unlock(cpu_buffer);
+  out:
+-	trace_recursive_unlock();
+-
+- out_nocheck:
+ 	preempt_enable_notrace();
+ 	return NULL;
+ }
+@@ -2859,7 +2860,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+ 
+ 	rb_wakeups(buffer, cpu_buffer);
+ 
+-	trace_recursive_unlock();
++	trace_recursive_unlock(cpu_buffer);
+ 
+ 	preempt_enable_notrace();
+ 
+@@ -2970,7 +2971,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
+  out:
+ 	rb_end_commit(cpu_buffer);
+ 
+-	trace_recursive_unlock();
++	trace_recursive_unlock(cpu_buffer);
+ 
+ 	preempt_enable_notrace();
+ 
+@@ -4647,8 +4648,9 @@ static int rb_cpu_notify(struct notifier_block *self,
+ 	struct ring_buffer *buffer =
+ 		container_of(self, struct ring_buffer, cpu_notify);
+ 	long cpu = (long)hcpu;
+-	int cpu_i, nr_pages_same;
+-	unsigned int nr_pages;
++	long nr_pages_same;
++	int cpu_i;
++	unsigned long nr_pages;
+ 
+ 	switch (action) {
+ 	case CPU_UP_PREPARE:
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index 517a568f038d..a30bd1018f7e 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -657,9 +657,9 @@ static struct dma_debug_entry *dma_entry_alloc(void)
+ 	spin_lock_irqsave(&free_entries_lock, flags);
+ 
+ 	if (list_empty(&free_entries)) {
+-		pr_err("DMA-API: debugging out of memory - disabling\n");
+ 		global_disable = true;
+ 		spin_unlock_irqrestore(&free_entries_lock, flags);
++		pr_err("DMA-API: debugging out of memory - disabling\n");
+ 		return NULL;
+ 	}
+ 
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 1095be9c80ab..4605dc73def6 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -857,8 +857,8 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
+ 		goto out;
+ 	if (svc_getnl(&buf->head[0]) != seq)
+ 		goto out;
+-	/* trim off the mic at the end before returning */
+-	xdr_buf_trim(buf, mic.len + 4);
++	/* trim off the mic and padding at the end before returning */
++	xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
+ 	stat = 0;
+ out:
+ 	kfree(mic.data);
+diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
+index f734033af219..5c17a5c1f306 100644
+--- a/scripts/Makefile.extrawarn
++++ b/scripts/Makefile.extrawarn
+@@ -24,6 +24,7 @@ warning-1 += $(call cc-option, -Wmissing-prototypes)
+ warning-1 += -Wold-style-definition
+ warning-1 += $(call cc-option, -Wmissing-include-dirs)
+ warning-1 += $(call cc-option, -Wunused-but-set-variable)
++warning-1 += $(call cc-option, -Wunused-const-variable)
+ warning-1 += $(call cc-disable-warning, missing-field-initializers)
+ 
+ warning-2 := -Waggregate-return
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 29595e0c3fb4..bee74795c9b9 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4630,6 +4630,7 @@ enum {
+ 	ALC288_FIXUP_DISABLE_AAMIX,
+ 	ALC292_FIXUP_DELL_E7X,
+ 	ALC292_FIXUP_DISABLE_AAMIX,
++	ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
+ 	ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC275_FIXUP_DELL_XPS,
+ 	ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
+@@ -5201,6 +5202,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE
+ 	},
++	[ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_disable_aamix,
++		.chained = true,
++		.chain_id = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE
++	},
+ 	[ALC292_FIXUP_DELL_E7X] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_dell_xps13,
+@@ -5323,13 +5330,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+-	SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+-	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+-	SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+-	SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+-	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
++	SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
++	SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
++	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
++	SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
++	SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
++	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
++	SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
+index 13585e88f597..8e9b76d03f81 100644
+--- a/sound/soc/codecs/ak4642.c
++++ b/sound/soc/codecs/ak4642.c
+@@ -64,12 +64,15 @@
+ #define FIL1_0		0x1c
+ #define FIL1_1		0x1d
+ #define FIL1_2		0x1e
+-#define FIL1_3		0x1f
++#define FIL1_3		0x1f	/* The maximum valid register for ak4642 */
+ #define PW_MGMT4	0x20
+ #define MD_CTL5		0x21
+ #define LO_MS		0x22
+ #define HP_MS		0x23
+-#define SPK_MS		0x24
++#define SPK_MS		0x24	/* The maximum valid register for ak4643 */
++#define EQ_FBEQAB	0x25
++#define EQ_FBEQCD	0x26
++#define EQ_FBEQE	0x27	/* The maximum valid register for ak4648 */
+ 
+ /* PW_MGMT1*/
+ #define PMVCM		(1 << 6) /* VCOM Power Management */
+@@ -241,7 +244,7 @@ static const struct snd_soc_dapm_route ak4642_intercon[] = {
+ /*
+  * ak4642 register cache
+  */
+-static const struct reg_default ak4642_reg[] = {
++static const struct reg_default ak4643_reg[] = {
+ 	{  0, 0x00 }, {  1, 0x00 }, {  2, 0x01 }, {  3, 0x00 },
+ 	{  4, 0x02 }, {  5, 0x00 }, {  6, 0x00 }, {  7, 0x00 },
+ 	{  8, 0xe1 }, {  9, 0xe1 }, { 10, 0x18 }, { 11, 0x00 },
+@@ -254,6 +257,14 @@ static const struct reg_default ak4642_reg[] = {
+ 	{ 36, 0x00 },
+ };
+ 
++/* The default settings for 0x0 ~ 0x1f registers are the same for ak4642
++   and ak4643. So we reuse the ak4643 reg_default for ak4642.
++   The valid registers for ak4642 are 0x0 ~ 0x1f which is a subset of ak4643,
++   so define NUM_AK4642_REG_DEFAULTS for ak4642.
++*/
++#define ak4642_reg ak4643_reg
++#define NUM_AK4642_REG_DEFAULTS	(FIL1_3 + 1)
++
+ static const struct reg_default ak4648_reg[] = {
+ 	{  0, 0x00 }, {  1, 0x00 }, {  2, 0x01 }, {  3, 0x00 },
+ 	{  4, 0x02 }, {  5, 0x00 }, {  6, 0x00 }, {  7, 0x00 },
+@@ -536,17 +547,28 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4642 = {
+ static const struct regmap_config ak4642_regmap = {
+ 	.reg_bits		= 8,
+ 	.val_bits		= 8,
+-	.max_register		= ARRAY_SIZE(ak4642_reg) + 1,
++	.max_register		= FIL1_3,
+ 	.reg_defaults		= ak4642_reg,
+-	.num_reg_defaults	= ARRAY_SIZE(ak4642_reg),
++	.num_reg_defaults	= NUM_AK4642_REG_DEFAULTS,
++	.cache_type		= REGCACHE_RBTREE,
++};
++
++static const struct regmap_config ak4643_regmap = {
++	.reg_bits		= 8,
++	.val_bits		= 8,
++	.max_register		= SPK_MS,
++	.reg_defaults		= ak4643_reg,
++	.num_reg_defaults	= ARRAY_SIZE(ak4643_reg),
++	.cache_type		= REGCACHE_RBTREE,
+ };
+ 
+ static const struct regmap_config ak4648_regmap = {
+ 	.reg_bits		= 8,
+ 	.val_bits		= 8,
+-	.max_register		= ARRAY_SIZE(ak4648_reg) + 1,
++	.max_register		= EQ_FBEQE,
+ 	.reg_defaults		= ak4648_reg,
+ 	.num_reg_defaults	= ARRAY_SIZE(ak4648_reg),
++	.cache_type		= REGCACHE_RBTREE,
+ };
+ 
+ static const struct ak4642_drvdata ak4642_drvdata = {
+@@ -554,7 +576,7 @@ static const struct ak4642_drvdata ak4642_drvdata = {
+ };
+ 
+ static const struct ak4642_drvdata ak4643_drvdata = {
+-	.regmap_config = &ak4642_regmap,
++	.regmap_config = &ak4643_regmap,
+ };
+ 
+ static const struct ak4642_drvdata ak4648_drvdata = {


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-05-24 12:39 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-05-24 12:39 UTC (permalink / raw
  To: gentoo-commits

commit:     52f7ac601320fc8777c2da9f66f3485952b0d0f1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue May 24 12:38:50 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue May 24 12:38:50 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=52f7ac60

Linux patch 4.1.25

 0000_README             |    4 +
 1024_linux-4.1.25.patch | 5192 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5196 insertions(+)

diff --git a/0000_README b/0000_README
index 6700cd7..fcfa288 100644
--- a/0000_README
+++ b/0000_README
@@ -139,6 +139,10 @@ Patch:  1023_linux-4.1.24.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.24
 
+Patch:  1024_linux-4.1.25.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.25
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1024_linux-4.1.25.patch b/1024_linux-4.1.25.patch
new file mode 100644
index 0000000..0be2af8
--- /dev/null
+++ b/1024_linux-4.1.25.patch
@@ -0,0 +1,5192 @@
+diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
+index c2340eeeb97f..c000832a7fb9 100644
+--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
++++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
+@@ -30,6 +30,10 @@ Optional properties:
+ - target-supply     : regulator for SATA target power
+ - phys              : reference to the SATA PHY node
+ - phy-names         : must be "sata-phy"
++- ports-implemented : Mask that indicates which ports that the HBA supports
++		      are available for software to use. Useful if PORTS_IMPL
++		      is not programmed by the BIOS, which is true with
++		      some embedded SOC's.
+ 
+ Required properties when using sub-nodes:
+ - #address-cells    : number of cells to encode an address
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 0b51c8a3c627..a1d127a83a48 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -3772,8 +3772,8 @@ F:	Documentation/efi-stub.txt
+ F:	arch/ia64/kernel/efi.c
+ F:	arch/x86/boot/compressed/eboot.[ch]
+ F:	arch/x86/include/asm/efi.h
+-F:	arch/x86/platform/efi/*
+-F:	drivers/firmware/efi/*
++F:	arch/x86/platform/efi/
++F:	drivers/firmware/efi/
+ F:	include/linux/efi*.h
+ 
+ EFI VARIABLE FILESYSTEM
+diff --git a/Makefile b/Makefile
+index df1d8b1448ae..c2f929d78726 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 24
++SUBLEVEL = 25
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
+index 5bb016427107..e01316bbef61 100644
+--- a/arch/arm/mach-socfpga/headsmp.S
++++ b/arch/arm/mach-socfpga/headsmp.S
+@@ -12,6 +12,7 @@
+ #include <asm/memory.h>
+ 
+ 	.arch	armv7-a
++	.arm
+ 
+ ENTRY(secondary_trampoline)
+ 	/* CPU1 will always fetch from 0x0 when it is brought out of reset.
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 02cf40c96fe3..099c23616901 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -342,7 +342,7 @@ tracesys_next:
+ 	stw     %r21, -56(%r30)                 /* 6th argument */
+ #endif
+ 
+-	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
++	comiclr,>>	__NR_Linux_syscalls, %r20, %r0
+ 	b,n	.Ltracesys_nosys
+ 
+ 	LDREGX  %r20(%r19), %r19
+diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
+index 5b3a903adae6..7043539e0248 100644
+--- a/arch/powerpc/include/asm/word-at-a-time.h
++++ b/arch/powerpc/include/asm/word-at-a-time.h
+@@ -77,7 +77,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
+ 	    "andc	%1,%1,%2\n\t"
+ 	    "popcntd	%0,%1"
+ 		: "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+-		: "r" (bits));
++		: "b" (bits));
+ 
+ 	return leading_zero_bits;
+ }
+diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
+index b285d4e8c68e..5da924bbf0a0 100644
+--- a/arch/x86/kernel/sysfb_efi.c
++++ b/arch/x86/kernel/sysfb_efi.c
+@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
+ 					continue;
+ 				for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ 					resource_size_t start, end;
++					unsigned long flags;
++
++					flags = pci_resource_flags(dev, i);
++					if (!(flags & IORESOURCE_MEM))
++						continue;
++
++					if (flags & IORESOURCE_UNSET)
++						continue;
++
++					if (pci_resource_len(dev, i) == 0)
++						continue;
+ 
+ 					start = pci_resource_start(dev, i);
+-					if (start == 0)
+-						break;
+ 					end = pci_resource_end(dev, i);
+ 					if (screen_info.lfb_base >= start &&
+ 					    screen_info.lfb_base < end) {
+ 						found_bar = 1;
++						break;
+ 					}
+ 				}
+ 			}
+diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
+index 92ae6acac8a7..6aa0f4d9eea6 100644
+--- a/arch/x86/kernel/tsc_msr.c
++++ b/arch/x86/kernel/tsc_msr.c
+@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
+ 
+ 	if (freq_desc_tables[cpu_index].msr_plat) {
+ 		rdmsr(MSR_PLATFORM_INFO, lo, hi);
+-		ratio = (lo >> 8) & 0x1f;
++		ratio = (lo >> 8) & 0xff;
+ 	} else {
+ 		rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ 		ratio = (hi >> 8) & 0x1f;
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index d19b52324cf5..dac1c24e9c3e 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
+ 	struct scatterlist *sg;
+ 
+ 	sg = walk->sg;
+-	walk->pg = sg_page(sg);
+ 	walk->offset = sg->offset;
++	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
++	walk->offset = offset_in_page(walk->offset);
+ 	walk->entrylen = sg->length;
+ 
+ 	if (walk->entrylen > walk->total)
+diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
+index d72565a3c646..adff30d5ba33 100644
+--- a/drivers/acpi/acpica/dsmethod.c
++++ b/drivers/acpi/acpica/dsmethod.c
+@@ -412,6 +412,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
+ 				obj_desc->method.mutex->mutex.
+ 				    original_sync_level =
+ 				    obj_desc->method.mutex->mutex.sync_level;
++
++				obj_desc->method.mutex->mutex.thread_id =
++				    acpi_os_get_thread_id();
+ 			}
+ 		}
+ 
+diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
+index 78d6ae0b90c4..706af86bde6b 100644
+--- a/drivers/ata/ahci_platform.c
++++ b/drivers/ata/ahci_platform.c
+@@ -49,6 +49,9 @@ static int ahci_probe(struct platform_device *pdev)
+ 	if (rc)
+ 		return rc;
+ 
++	of_property_read_u32(dev->of_node,
++			     "ports-implemented", &hpriv->force_port_map);
++
+ 	if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
+ 		hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
+ 
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 49840264dd57..de56b91238c9 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -467,6 +467,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+ 		dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
+ 			 port_map, hpriv->force_port_map);
+ 		port_map = hpriv->force_port_map;
++		hpriv->saved_port_map = port_map;
+ 	}
+ 
+ 	if (hpriv->mask_port_map) {
+diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c
+index d7026dc33388..b394aaef3867 100644
+--- a/drivers/base/regmap/regmap-spmi.c
++++ b/drivers/base/regmap/regmap-spmi.c
+@@ -153,7 +153,7 @@ static int regmap_spmi_ext_read(void *context,
+ 	while (val_size) {
+ 		len = min_t(size_t, val_size, 8);
+ 
+-		err = spmi_ext_register_readl(context, addr, val, val_size);
++		err = spmi_ext_register_readl(context, addr, val, len);
+ 		if (err)
+ 			goto err_out;
+ 
+diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
+index 545069d5fdfb..e342565e8715 100644
+--- a/drivers/cpuidle/cpuidle-arm.c
++++ b/drivers/cpuidle/cpuidle-arm.c
+@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
+ 		 * call the CPU ops suspend protocol with idle index as a
+ 		 * parameter.
+ 		 */
+-		arm_cpuidle_suspend(idx);
++		ret = arm_cpuidle_suspend(idx);
+ 
+ 		cpu_pm_exit();
+ 	}
+diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
+index 01087a38da22..792bdae2b91d 100644
+--- a/drivers/edac/i7core_edac.c
++++ b/drivers/edac/i7core_edac.c
+@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 
+ 	i7_dev = get_i7core_dev(mce->socketid);
+ 	if (!i7_dev)
+-		return NOTIFY_BAD;
++		return NOTIFY_DONE;
+ 
+ 	mci = i7_dev->mci;
+ 	pvt = mci->pvt_info;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 5d87111fdc87..a7e7be0a8ae8 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -2175,7 +2175,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
+ 
+ 	mci = get_mci_for_node_id(mce->socketid);
+ 	if (!mci)
+-		return NOTIFY_BAD;
++		return NOTIFY_DONE;
+ 	pvt = mci->pvt_info;
+ 
+ 	/*
+diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
+index 7f2ea21c730d..6f182fd91a6d 100644
+--- a/drivers/firmware/efi/vars.c
++++ b/drivers/firmware/efi/vars.c
+@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
+ 	{ NULL_GUID, "", NULL },
+ };
+ 
++/*
++ * Check if @var_name matches the pattern given in @match_name.
++ *
++ * @var_name: an array of @len non-NUL characters.
++ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
++ *              final "*" character matches any trailing characters @var_name,
++ *              including the case when there are none left in @var_name.
++ * @match: on output, the number of non-wildcard characters in @match_name
++ *         that @var_name matches, regardless of the return value.
++ * @return: whether @var_name fully matches @match_name.
++ */
+ static bool
+ variable_matches(const char *var_name, size_t len, const char *match_name,
+ 		 int *match)
+ {
+ 	for (*match = 0; ; (*match)++) {
+ 		char c = match_name[*match];
+-		char u = var_name[*match];
+ 
+-		/* Wildcard in the matching name means we've matched */
+-		if (c == '*')
++		switch (c) {
++		case '*':
++			/* Wildcard in @match_name means we've matched. */
+ 			return true;
+ 
+-		/* Case sensitive match */
+-		if (!c && *match == len)
+-			return true;
++		case '\0':
++			/* @match_name has ended. Has @var_name too? */
++			return (*match == len);
+ 
+-		if (c != u)
++		default:
++			/*
++			 * We've reached a non-wildcard char in @match_name.
++			 * Continue only if there's an identical character in
++			 * @var_name.
++			 */
++			if (*match < len && c == var_name[*match])
++				continue;
+ 			return false;
+-
+-		if (!c)
+-			return true;
++		}
+ 	}
+-	return true;
+ }
+ 
+ bool
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 2c04c59022f3..10b8839cbd0c 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1784,6 +1784,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 		req_payload.start_slot = cur_slots;
+ 		if (mgr->proposed_vcpis[i]) {
+ 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
++			port = drm_dp_get_validated_port_ref(mgr, port);
++			if (!port) {
++				mutex_unlock(&mgr->payload_lock);
++				return -EINVAL;
++			}
+ 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
+ 		} else {
+ 			port = NULL;
+@@ -1809,6 +1814,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+ 			mgr->payloads[i].payload_state = req_payload.payload_state;
+ 		}
+ 		cur_slots += req_payload.num_slots;
++
++		if (port)
++			drm_dp_put_port(port);
+ 	}
+ 
+ 	for (i = 0; i < mgr->max_payloads; i++) {
+@@ -2114,6 +2122,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+ 
+ 	if (mgr->mst_primary) {
+ 		int sret;
++		u8 guid[16];
++
+ 		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
+ 		if (sret != DP_RECEIVER_CAP_SIZE) {
+ 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+@@ -2128,6 +2138,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
+ 			ret = -1;
+ 			goto out_unlock;
+ 		}
++
++		/* Some hubs forget their guids after they resume */
++		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
++		if (sret != 16) {
++			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
++			ret = -1;
++			goto out_unlock;
++		}
++		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
++
+ 		ret = 0;
+ 	} else
+ 		ret = -1;
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index a30db4b4050e..d20d818620c6 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -2452,7 +2452,14 @@ enum skl_disp_power_wells {
+ #define GEN6_RP_STATE_LIMITS	(MCHBAR_MIRROR_BASE_SNB + 0x5994)
+ #define GEN6_RP_STATE_CAP	(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+ 
+-#define INTERVAL_1_28_US(us)	(((us) * 100) >> 7)
++/*
++ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
++ * 8300) freezing up around GPU hangs. Looks as if even
++ * scheduling/timer interrupts start misbehaving if the RPS
++ * EI/thresholds are "bad", leading to a very sluggish or even
++ * frozen machine.
++ */
++#define INTERVAL_1_28_US(us)	roundup(((us) * 100) >> 7, 25)
+ #define INTERVAL_1_33_US(us)	(((us) * 3)   >> 2)
+ #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
+ 				INTERVAL_1_33_US(us) : \
+diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
+index 515d7123785d..0542c252dde5 100644
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -312,8 +312,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
+ 		pipe_config->has_pch_encoder = true;
+ 
+ 	/* LPT FDI RX only supports 8bpc. */
+-	if (HAS_PCH_LPT(dev))
++	if (HAS_PCH_LPT(dev)) {
++		if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
++			DRM_DEBUG_KMS("LPT only supports 24bpp\n");
++			return false;
++		}
++
+ 		pipe_config->pipe_bpp = 24;
++	}
+ 
+ 	/* FDI must always be 2.7 GHz */
+ 	if (HAS_DDI(dev)) {
+diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
+index 3eb0efc2dd0d..4d554ec867be 100644
+--- a/drivers/gpu/drm/i915/intel_ddi.c
++++ b/drivers/gpu/drm/i915/intel_ddi.c
+@@ -2167,12 +2167,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
+ 	intel_ddi_clock_get(encoder, pipe_config);
+ }
+ 
+-static void intel_ddi_destroy(struct drm_encoder *encoder)
+-{
+-	/* HDMI has nothing special to destroy, so we can go with this. */
+-	intel_dp_encoder_destroy(encoder);
+-}
+-
+ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ 				     struct intel_crtc_state *pipe_config)
+ {
+@@ -2191,7 +2185,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
+ }
+ 
+ static const struct drm_encoder_funcs intel_ddi_funcs = {
+-	.destroy = intel_ddi_destroy,
++	.reset = intel_dp_encoder_reset,
++	.destroy = intel_dp_encoder_destroy,
+ };
+ 
+ static struct intel_connector *
+@@ -2264,6 +2259,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
+ 	intel_encoder->post_disable = intel_ddi_post_disable;
+ 	intel_encoder->get_hw_state = intel_ddi_get_hw_state;
+ 	intel_encoder->get_config = intel_ddi_get_config;
++	intel_encoder->suspend = intel_dp_encoder_suspend;
+ 
+ 	intel_dig_port->port = port;
+ 	intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index fb2983f77141..3f8cb8017a71 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4549,7 +4549,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+ 	kfree(intel_dig_port);
+ }
+ 
+-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+ {
+ 	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ 
+@@ -4591,7 +4591,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
+ 	edp_panel_vdd_schedule_off(intel_dp);
+ }
+ 
+-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
++void intel_dp_encoder_reset(struct drm_encoder *encoder)
+ {
+ 	struct intel_dp *intel_dp;
+ 
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 68d1f74a7403..34291dce48c4 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -1066,6 +1066,8 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp);
+ void intel_dp_complete_link_train(struct intel_dp *intel_dp);
+ void intel_dp_stop_link_train(struct intel_dp *intel_dp);
+ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
++void intel_dp_encoder_reset(struct drm_encoder *encoder);
++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
+ void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
+ bool intel_dp_compute_config(struct intel_encoder *encoder,
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 7ac42d063574..c868acb47e03 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -325,8 +325,8 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
+ 			}
+ 		}
+ 	} else {
+-		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+-			for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++		for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++			for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+ 				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+ 				if (max_pix_clock >= pix_clock) {
+ 					*dp_lanes = lane_num;
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index adf74f4366bb..0b04b9282f56 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+ 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+ 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+ 
++	/* vertical FP must be at least 1 */
++	if (mode->crtc_vsync_start == mode->crtc_vdisplay)
++		adjusted_mode->crtc_vsync_start++;
++
+ 	/* get the native mode for scaling */
+ 	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+ 		radeon_panel_mode_fixup(encoder, adjusted_mode);
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index feef136cdb55..3bb4fdea8002 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -2560,10 +2560,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
+ 	WREG32(VM_CONTEXT1_CNTL, 0);
+ }
+ 
++static const unsigned ni_dig_offsets[] =
++{
++	NI_DIG0_REGISTER_OFFSET,
++	NI_DIG1_REGISTER_OFFSET,
++	NI_DIG2_REGISTER_OFFSET,
++	NI_DIG3_REGISTER_OFFSET,
++	NI_DIG4_REGISTER_OFFSET,
++	NI_DIG5_REGISTER_OFFSET
++};
++
++static const unsigned ni_tx_offsets[] =
++{
++	NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
++	NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
++};
++
++static const unsigned evergreen_dp_offsets[] =
++{
++	EVERGREEN_DP0_REGISTER_OFFSET,
++	EVERGREEN_DP1_REGISTER_OFFSET,
++	EVERGREEN_DP2_REGISTER_OFFSET,
++	EVERGREEN_DP3_REGISTER_OFFSET,
++	EVERGREEN_DP4_REGISTER_OFFSET,
++	EVERGREEN_DP5_REGISTER_OFFSET
++};
++
++
++/*
++ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
++ * We go from crtc to connector and it is not relible  since it
++ * should be an opposite direction .If crtc is enable then
++ * find the dig_fe which selects this crtc and insure that it enable.
++ * if such dig_fe is found then find dig_be which selects found dig_be and
++ * insure that it enable and in DP_SST mode.
++ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
++ * from dp symbols clocks .
++ */
++static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
++					       unsigned crtc_id, unsigned *ret_dig_fe)
++{
++	unsigned i;
++	unsigned dig_fe;
++	unsigned dig_be;
++	unsigned dig_en_be;
++	unsigned uniphy_pll;
++	unsigned digs_fe_selected;
++	unsigned dig_be_mode;
++	unsigned dig_fe_mask;
++	bool is_enabled = false;
++	bool found_crtc = false;
++
++	/* loop through all running dig_fe to find selected crtc */
++	for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
++		dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
++		if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
++		    crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
++			/* found running pipe */
++			found_crtc = true;
++			dig_fe_mask = 1 << i;
++			dig_fe = i;
++			break;
++		}
++	}
++
++	if (found_crtc) {
++		/* loop through all running dig_be to find selected dig_fe */
++		for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
++			dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
++			/* if dig_fe_selected by dig_be? */
++			digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
++			dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
++			if (dig_fe_mask &  digs_fe_selected &&
++			    /* if dig_be in sst mode? */
++			    dig_be_mode == NI_DIG_BE_DPSST) {
++				dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
++						   ni_dig_offsets[i]);
++				uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
++						    ni_tx_offsets[i]);
++				/* dig_be enable and tx is running */
++				if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
++				    dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
++				    uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
++					is_enabled = true;
++					*ret_dig_fe = dig_fe;
++					break;
++				}
++			}
++		}
++	}
++
++	return is_enabled;
++}
++
++/*
++ * Blank dig when in dp sst mode
++ * Dig ignores crtc timing
++ */
++static void evergreen_blank_dp_output(struct radeon_device *rdev,
++				      unsigned dig_fe)
++{
++	unsigned stream_ctrl;
++	unsigned fifo_ctrl;
++	unsigned counter = 0;
++
++	if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
++		DRM_ERROR("invalid dig_fe %d\n", dig_fe);
++		return;
++	}
++
++	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++			     evergreen_dp_offsets[dig_fe]);
++	if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
++		DRM_ERROR("dig %d , should be enable\n", dig_fe);
++		return;
++	}
++
++	stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
++	WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++	       evergreen_dp_offsets[dig_fe], stream_ctrl);
++
++	stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++			     evergreen_dp_offsets[dig_fe]);
++	while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
++		msleep(1);
++		counter++;
++		stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
++				     evergreen_dp_offsets[dig_fe]);
++	}
++	if (counter >= 32 )
++		DRM_ERROR("counter exceeds %d\n", counter);
++
++	fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
++	fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
++	WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
++
++}
++
+ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+ {
+ 	u32 crtc_enabled, tmp, frame_count, blackout;
+ 	int i, j;
++	unsigned dig_fe;
+ 
+ 	if (!ASIC_IS_NODCE(rdev)) {
+ 		save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+@@ -2603,7 +2745,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
+ 					break;
+ 				udelay(1);
+ 			}
+-
++			/*we should disable dig if it drives dp sst*/
++			/*but we are in radeon_device_init and the topology is unknown*/
++			/*and it is available after radeon_modeset_init*/
++			/*the following method radeon_atom_encoder_dpms_dig*/
++			/*does the job if we initialize it properly*/
++			/*for now we do it this manually*/
++			/**/
++			if (ASIC_IS_DCE5(rdev) &&
++			    evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
++				evergreen_blank_dp_output(rdev, dig_fe);
++			/*we could remove 6 lines below*/
+ 			/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
+ 			WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
+ 			tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
+diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
+index aa939dfed3a3..b436badf9efa 100644
+--- a/drivers/gpu/drm/radeon/evergreen_reg.h
++++ b/drivers/gpu/drm/radeon/evergreen_reg.h
+@@ -250,8 +250,43 @@
+ 
+ /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+ #define EVERGREEN_HDMI_BASE				0x7030
++/*DIG block*/
++#define NI_DIG0_REGISTER_OFFSET                 (0x7000  - 0x7000)
++#define NI_DIG1_REGISTER_OFFSET                 (0x7C00  - 0x7000)
++#define NI_DIG2_REGISTER_OFFSET                 (0x10800 - 0x7000)
++#define NI_DIG3_REGISTER_OFFSET                 (0x11400 - 0x7000)
++#define NI_DIG4_REGISTER_OFFSET                 (0x12000 - 0x7000)
++#define NI_DIG5_REGISTER_OFFSET                 (0x12C00 - 0x7000)
++
++
++#define NI_DIG_FE_CNTL                               0x7000
++#       define NI_DIG_FE_CNTL_SOURCE_SELECT(x)        ((x) & 0x3)
++#       define NI_DIG_FE_CNTL_SYMCLK_FE_ON            (1<<24)
++
++
++#define NI_DIG_BE_CNTL                    0x7140
++#       define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x)     (((x) >> 8 ) & 0x3F)
++#       define NI_DIG_FE_CNTL_MODE(x)                 (((x) >> 16) & 0x7 )
++
++#define NI_DIG_BE_EN_CNTL                              0x7144
++#       define NI_DIG_BE_EN_CNTL_ENABLE               (1 << 0)
++#       define NI_DIG_BE_EN_CNTL_SYMBCLK_ON           (1 << 8)
++#       define NI_DIG_BE_DPSST 0
+ 
+ /* Display Port block */
++#define EVERGREEN_DP0_REGISTER_OFFSET                 (0x730C  - 0x730C)
++#define EVERGREEN_DP1_REGISTER_OFFSET                 (0x7F0C  - 0x730C)
++#define EVERGREEN_DP2_REGISTER_OFFSET                 (0x10B0C - 0x730C)
++#define EVERGREEN_DP3_REGISTER_OFFSET                 (0x1170C - 0x730C)
++#define EVERGREEN_DP4_REGISTER_OFFSET                 (0x1230C - 0x730C)
++#define EVERGREEN_DP5_REGISTER_OFFSET                 (0x12F0C - 0x730C)
++
++
++#define EVERGREEN_DP_VID_STREAM_CNTL                    0x730C
++#       define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE     (1 << 0)
++#       define EVERGREEN_DP_VID_STREAM_STATUS          (1 <<16)
++#define EVERGREEN_DP_STEER_FIFO                         0x7310
++#       define EVERGREEN_DP_STEER_FIFO_RESET           (1 << 0)
+ #define EVERGREEN_DP_SEC_CNTL                           0x7280
+ #       define EVERGREEN_DP_SEC_STREAM_ENABLE           (1 << 0)
+ #       define EVERGREEN_DP_SEC_ASP_ENABLE              (1 << 4)
+@@ -266,4 +301,15 @@
+ #       define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x)      (((x) & 0xf) << 24)
+ #       define EVERGREEN_DP_SEC_SS_EN                   (1 << 28)
+ 
++/*DCIO_UNIPHY block*/
++#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1            (0x6600  -0x6600)
++#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1            (0x6640  -0x6600)
++#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1            (0x6680 - 0x6600)
++#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1            (0x66C0 - 0x6600)
++#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1            (0x6700 - 0x6600)
++#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1            (0x6740 - 0x6600)
++
++#define NI_DCIO_UNIPHY0_PLL_CONTROL1                   0x6618
++#       define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE     (1 << 0)
++
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+index 3b0c229d7dcd..db64e0062689 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
+ 
+ 	tmp &= AUX_HPD_SEL(0x7);
+ 	tmp |= AUX_HPD_SEL(chan->rec.hpd);
+-	tmp |= AUX_EN | AUX_LS_READ_EN;
++	tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
+ 
+ 	WREG32(AUX_CONTROL + aux_offset[instance], tmp);
+ 
+diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
+index 6361d124f67d..14d45c70056e 100644
+--- a/drivers/hv/ring_buffer.c
++++ b/drivers/hv/ring_buffer.c
+@@ -103,19 +103,30 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+  *    there is room for the producer to send the pending packet.
+  */
+ 
+-static bool hv_need_to_signal_on_read(u32 old_rd,
+-					 struct hv_ring_buffer_info *rbi)
++static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
+ {
+-	u32 prev_write_sz;
+ 	u32 cur_write_sz;
+ 	u32 r_size;
+-	u32 write_loc = rbi->ring_buffer->write_index;
++	u32 write_loc;
+ 	u32 read_loc = rbi->ring_buffer->read_index;
+-	u32 pending_sz = rbi->ring_buffer->pending_send_sz;
++	u32 pending_sz;
+ 
+ 	/*
+-	 * If the other end is not blocked on write don't bother.
++	 * Issue a full memory barrier before making the signaling decision.
++	 * Here is the reason for having this barrier:
++	 * If the reading of the pend_sz (in this function)
++	 * were to be reordered and read before we commit the new read
++	 * index (in the calling function)  we could
++	 * have a problem. If the host were to set the pending_sz after we
++	 * have sampled pending_sz and go to sleep before we commit the
++	 * read index, we could miss sending the interrupt. Issue a full
++	 * memory barrier to address this.
+ 	 */
++	mb();
++
++	pending_sz = rbi->ring_buffer->pending_send_sz;
++	write_loc = rbi->ring_buffer->write_index;
++	/* If the other end is not blocked on write don't bother. */
+ 	if (pending_sz == 0)
+ 		return false;
+ 
+@@ -123,22 +134,13 @@ static bool hv_need_to_signal_on_read(u32 old_rd,
+ 	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+ 			read_loc - write_loc;
+ 
+-	prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
+-			old_rd - write_loc;
+-
+-
+-	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
++	if (cur_write_sz >= pending_sz)
+ 		return true;
+ 
+ 	return false;
+ }
+ 
+-/*
+- * hv_get_next_write_location()
+- *
+- * Get the next write location for the specified ring buffer
+- *
+- */
++/* Get the next write location for the specified ring buffer. */
+ static inline u32
+ hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
+ {
+@@ -147,12 +149,7 @@ hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
+ 	return next;
+ }
+ 
+-/*
+- * hv_set_next_write_location()
+- *
+- * Set the next write location for the specified ring buffer
+- *
+- */
++/* Set the next write location for the specified ring buffer. */
+ static inline void
+ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
+ 		     u32 next_write_location)
+@@ -160,11 +157,7 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
+ 	ring_info->ring_buffer->write_index = next_write_location;
+ }
+ 
+-/*
+- * hv_get_next_read_location()
+- *
+- * Get the next read location for the specified ring buffer
+- */
++/* Get the next read location for the specified ring buffer. */
+ static inline u32
+ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
+ {
+@@ -174,10 +167,8 @@ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
+ }
+ 
+ /*
+- * hv_get_next_readlocation_withoffset()
+- *
+  * Get the next read location + offset for the specified ring buffer.
+- * This allows the caller to skip
++ * This allows the caller to skip.
+  */
+ static inline u32
+ hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
+@@ -191,13 +182,7 @@ hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
+ 	return next;
+ }
+ 
+-/*
+- *
+- * hv_set_next_read_location()
+- *
+- * Set the next read location for the specified ring buffer
+- *
+- */
++/* Set the next read location for the specified ring buffer. */
+ static inline void
+ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
+ 		    u32 next_read_location)
+@@ -206,12 +191,7 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
+ }
+ 
+ 
+-/*
+- *
+- * hv_get_ring_buffer()
+- *
+- * Get the start of the ring buffer
+- */
++/* Get the start of the ring buffer. */
+ static inline void *
+ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
+ {
+@@ -219,25 +199,14 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
+ }
+ 
+ 
+-/*
+- *
+- * hv_get_ring_buffersize()
+- *
+- * Get the size of the ring buffer
+- */
++/* Get the size of the ring buffer. */
+ static inline u32
+ hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
+ {
+ 	return ring_info->ring_datasize;
+ }
+ 
+-/*
+- *
+- * hv_get_ring_bufferindices()
+- *
+- * Get the read and write indices as u64 of the specified ring buffer
+- *
+- */
++/* Get the read and write indices as u64 of the specified ring buffer. */
+ static inline u64
+ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
+ {
+@@ -245,12 +214,8 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
+ }
+ 
+ /*
+- *
+- * hv_copyfrom_ringbuffer()
+- *
+  * Helper routine to copy to source from ring buffer.
+  * Assume there is enough room. Handles wrap-around in src case only!!
+- *
+  */
+ static u32 hv_copyfrom_ringbuffer(
+ 	struct hv_ring_buffer_info	*ring_info,
+@@ -282,12 +247,8 @@ static u32 hv_copyfrom_ringbuffer(
+ 
+ 
+ /*
+- *
+- * hv_copyto_ringbuffer()
+- *
+  * Helper routine to copy from source to ring buffer.
+  * Assume there is enough room. Handles wrap-around in dest case only!!
+- *
+  */
+ static u32 hv_copyto_ringbuffer(
+ 	struct hv_ring_buffer_info	*ring_info,
+@@ -313,13 +274,7 @@ static u32 hv_copyto_ringbuffer(
+ 	return start_write_offset;
+ }
+ 
+-/*
+- *
+- * hv_ringbuffer_get_debuginfo()
+- *
+- * Get various debug metrics for the specified ring buffer
+- *
+- */
++/* Get various debug metrics for the specified ring buffer. */
+ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
+ 			    struct hv_ring_buffer_debug_info *debug_info)
+ {
+@@ -342,13 +297,7 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
+ 	}
+ }
+ 
+-/*
+- *
+- * hv_ringbuffer_init()
+- *
+- *Initialize the ring buffer
+- *
+- */
++/* Initialize the ring buffer. */
+ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+ 		   void *buffer, u32 buflen)
+ {
+@@ -361,9 +310,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+ 	ring_info->ring_buffer->read_index =
+ 		ring_info->ring_buffer->write_index = 0;
+ 
+-	/*
+-	 * Set the feature bit for enabling flow control.
+-	 */
++	/* Set the feature bit for enabling flow control. */
+ 	ring_info->ring_buffer->feature_bits.value = 1;
+ 
+ 	ring_info->ring_size = buflen;
+@@ -374,24 +321,12 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
+ 	return 0;
+ }
+ 
+-/*
+- *
+- * hv_ringbuffer_cleanup()
+- *
+- * Cleanup the ring buffer
+- *
+- */
++/* Cleanup the ring buffer. */
+ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
+ {
+ }
+ 
+-/*
+- *
+- * hv_ringbuffer_write()
+- *
+- * Write to the ring buffer
+- *
+- */
++/* Write to the ring buffer. */
+ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
+ 		    struct kvec *kv_list, u32 kv_count, bool *signal)
+ {
+@@ -416,10 +351,11 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
+ 				&bytes_avail_toread,
+ 				&bytes_avail_towrite);
+ 
+-
+-	/* If there is only room for the packet, assume it is full. */
+-	/* Otherwise, the next time around, we think the ring buffer */
+-	/* is empty since the read index == write index */
++	/*
++	 * If there is only room for the packet, assume it is full.
++	 * Otherwise, the next time around, we think the ring buffer
++	 * is empty since the read index == write index.
++	 */
+ 	if (bytes_avail_towrite <= totalbytes_towrite) {
+ 		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ 		return -EAGAIN;
+@@ -459,13 +395,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
+ }
+ 
+ 
+-/*
+- *
+- * hv_ringbuffer_peek()
+- *
+- * Read without advancing the read index
+- *
+- */
++/* Read without advancing the read index. */
+ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
+ 		   void *Buffer, u32 buflen)
+ {
+@@ -502,13 +432,7 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
+ }
+ 
+ 
+-/*
+- *
+- * hv_ringbuffer_read()
+- *
+- * Read and advance the read index
+- *
+- */
++/* Read and advance the read index. */
+ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ 		   u32 buflen, u32 offset, bool *signal)
+ {
+@@ -517,7 +441,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ 	u32 next_read_location = 0;
+ 	u64 prev_indices = 0;
+ 	unsigned long flags;
+-	u32 old_read;
+ 
+ 	if (buflen <= 0)
+ 		return -EINVAL;
+@@ -528,8 +451,6 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ 				&bytes_avail_toread,
+ 				&bytes_avail_towrite);
+ 
+-	old_read = bytes_avail_toread;
+-
+ 	/* Make sure there is something to read */
+ 	if (bytes_avail_toread < buflen) {
+ 		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+@@ -550,9 +471,11 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ 						sizeof(u64),
+ 						next_read_location);
+ 
+-	/* Make sure all reads are done before we update the read index since */
+-	/* the writer may start writing to the read area once the read index */
+-	/*is updated */
++	/*
++	 * Make sure all reads are done before we update the read index since
++	 * the writer may start writing to the read area once the read index
++	 * is updated.
++	 */
+ 	mb();
+ 
+ 	/* Update the read index */
+@@ -560,7 +483,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
+ 
+ 	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
+ 
+-	*signal = hv_need_to_signal_on_read(old_read, inring_info);
++	*signal = hv_need_to_signal_on_read(inring_info);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
+index b13936dacc78..fd780bbcd07e 100644
+--- a/drivers/iio/magnetometer/ak8975.c
++++ b/drivers/iio/magnetometer/ak8975.c
+@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ 	int rc;
+ 	int irq;
+ 
++	init_waitqueue_head(&data->data_ready_queue);
++	clear_bit(0, &data->flags);
+ 	if (client->irq)
+ 		irq = client->irq;
+ 	else
+@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
+ 		return rc;
+ 	}
+ 
+-	init_waitqueue_head(&data->data_ready_queue);
+-	clear_bit(0, &data->flags);
+ 	data->eoc_irq = irq;
+ 
+ 	return rc;
+diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
+index f2f63933e8a9..5befec118a18 100644
+--- a/drivers/infiniband/core/ucm.c
++++ b/drivers/infiniband/core/ucm.c
+@@ -48,6 +48,7 @@
+ 
+ #include <asm/uaccess.h>
+ 
++#include <rdma/ib.h>
+ #include <rdma/ib_cm.h>
+ #include <rdma/ib_user_cm.h>
+ #include <rdma/ib_marshall.h>
+@@ -1104,6 +1105,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
+ 	struct ib_ucm_cmd_hdr hdr;
+ 	ssize_t result;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (len < sizeof(hdr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 45d67e9228d7..81dd84d0b68b 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -1487,6 +1487,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
+ 	struct rdma_ucm_cmd_hdr hdr;
+ 	ssize_t ret;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (len < sizeof(hdr))
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 09686d49d4c1..e063b07de170 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -48,6 +48,8 @@
+ 
+ #include <asm/uaccess.h>
+ 
++#include <rdma/ib.h>
++
+ #include "uverbs.h"
+ 
+ MODULE_AUTHOR("Roland Dreier");
+@@ -613,6 +615,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
+ 	struct ib_uverbs_cmd_hdr hdr;
+ 	__u32 flags;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
++		return -EACCES;
++
+ 	if (count < sizeof hdr)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
+index 725881890c4a..619154ec8249 100644
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -45,6 +45,8 @@
+ #include <linux/export.h>
+ #include <linux/uio.h>
+ 
++#include <rdma/ib.h>
++
+ #include "qib.h"
+ #include "qib_common.h"
+ #include "qib_user_sdma.h"
+@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
+ 	ssize_t ret = 0;
+ 	void *dest;
+ 
++	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
++		return -EACCES;
++
+ 	if (count < sizeof(cmd.type)) {
+ 		ret = -EINVAL;
+ 		goto bail;
+diff --git a/drivers/input/misc/max8997_haptic.c b/drivers/input/misc/max8997_haptic.c
+index d0f687281339..2709fc147da6 100644
+--- a/drivers/input/misc/max8997_haptic.c
++++ b/drivers/input/misc/max8997_haptic.c
+@@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
+ 	struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ 	const struct max8997_platform_data *pdata =
+ 					dev_get_platdata(iodev->dev);
+-	const struct max8997_haptic_platform_data *haptic_pdata =
+-					pdata->haptic_pdata;
++	const struct max8997_haptic_platform_data *haptic_pdata = NULL;
+ 	struct max8997_haptic *chip;
+ 	struct input_dev *input_dev;
+ 	int error;
+ 
++	if (pdata)
++		haptic_pdata = pdata->haptic_pdata;
++
+ 	if (!haptic_pdata) {
+ 		dev_err(&pdev->dev, "no haptic platform data\n");
+ 		return -EINVAL;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 78c1f77e7903..72dc91de80f8 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -289,6 +289,8 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
+ 	 * go away inside make_request
+ 	 */
+ 	sectors = bio_sectors(bio);
++	/* bio could be mergeable after passing to underlayer */
++	bio->bi_rw &= ~REQ_NOMERGE;
+ 	mddev->pers->make_request(mddev, bio);
+ 
+ 	cpu = part_stat_lock();
+diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
+index c8929c526691..7cafada79075 100644
+--- a/drivers/misc/cxl/irq.c
++++ b/drivers/misc/cxl/irq.c
+@@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
+ void cxl_unmap_irq(unsigned int virq, void *cookie)
+ {
+ 	free_irq(virq, cookie);
+-	irq_dispose_mapping(virq);
+ }
+ 
+ static int cxl_register_one_irq(struct cxl *adapter,
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+deleted file mode 100644
+index 80cab4ec0522..000000000000
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ /dev/null
+@@ -1,2717 +0,0 @@
+-/******************************************************************************
+- *
+- * This file is provided under a dual BSD/GPLv2 license.  When using or
+- * redistributing this file, you may do so under either license.
+- *
+- * GPL LICENSE SUMMARY
+- *
+- * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
+- * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+- * Copyright(c) 2016 Intel Deutschland GmbH
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of version 2 of the GNU General Public License as
+- * published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+- * USA
+- *
+- * The full GNU General Public License is included in this distribution
+- * in the file called COPYING.
+- *
+- * Contact Information:
+- *  Intel Linux Wireless <linuxwifi@intel.com>
+- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+- *
+- * BSD LICENSE
+- *
+- * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
+- * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+- * Copyright(c) 2016 Intel Deutschland GmbH
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- *  * Redistributions of source code must retain the above copyright
+- *    notice, this list of conditions and the following disclaimer.
+- *  * Redistributions in binary form must reproduce the above copyright
+- *    notice, this list of conditions and the following disclaimer in
+- *    the documentation and/or other materials provided with the
+- *    distribution.
+- *  * Neither the name Intel Corporation nor the names of its
+- *    contributors may be used to endorse or promote products derived
+- *    from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- *****************************************************************************/
+-#include <linux/pci.h>
+-#include <linux/pci-aspm.h>
+-#include <linux/interrupt.h>
+-#include <linux/debugfs.h>
+-#include <linux/sched.h>
+-#include <linux/bitops.h>
+-#include <linux/gfp.h>
+-#include <linux/vmalloc.h>
+-
+-#include "iwl-drv.h"
+-#include "iwl-trans.h"
+-#include "iwl-csr.h"
+-#include "iwl-prph.h"
+-#include "iwl-scd.h"
+-#include "iwl-agn-hw.h"
+-#include "iwl-fw-error-dump.h"
+-#include "internal.h"
+-#include "iwl-fh.h"
+-
+-/* extended range in FW SRAM */
+-#define IWL_FW_MEM_EXTENDED_START	0x40000
+-#define IWL_FW_MEM_EXTENDED_END		0x57FFF
+-
+-static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-
+-	if (!trans_pcie->fw_mon_page)
+-		return;
+-
+-	dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
+-		       trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
+-	__free_pages(trans_pcie->fw_mon_page,
+-		     get_order(trans_pcie->fw_mon_size));
+-	trans_pcie->fw_mon_page = NULL;
+-	trans_pcie->fw_mon_phys = 0;
+-	trans_pcie->fw_mon_size = 0;
+-}
+-
+-static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	struct page *page = NULL;
+-	dma_addr_t phys;
+-	u32 size = 0;
+-	u8 power;
+-
+-	if (!max_power) {
+-		/* default max_power is maximum */
+-		max_power = 26;
+-	} else {
+-		max_power += 11;
+-	}
+-
+-	if (WARN(max_power > 26,
+-		 "External buffer size for monitor is too big %d, check the FW TLV\n",
+-		 max_power))
+-		return;
+-
+-	if (trans_pcie->fw_mon_page) {
+-		dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
+-					   trans_pcie->fw_mon_size,
+-					   DMA_FROM_DEVICE);
+-		return;
+-	}
+-
+-	phys = 0;
+-	for (power = max_power; power >= 11; power--) {
+-		int order;
+-
+-		size = BIT(power);
+-		order = get_order(size);
+-		page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
+-				   order);
+-		if (!page)
+-			continue;
+-
+-		phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
+-				    DMA_FROM_DEVICE);
+-		if (dma_mapping_error(trans->dev, phys)) {
+-			__free_pages(page, order);
+-			page = NULL;
+-			continue;
+-		}
+-		IWL_INFO(trans,
+-			 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
+-			 size, order);
+-		break;
+-	}
+-
+-	if (WARN_ON_ONCE(!page))
+-		return;
+-
+-	if (power != max_power)
+-		IWL_ERR(trans,
+-			"Sorry - debug buffer is only %luK while you requested %luK\n",
+-			(unsigned long)BIT(power - 10),
+-			(unsigned long)BIT(max_power - 10));
+-
+-	trans_pcie->fw_mon_page = page;
+-	trans_pcie->fw_mon_phys = phys;
+-	trans_pcie->fw_mon_size = size;
+-}
+-
+-static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
+-{
+-	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+-		    ((reg & 0x0000ffff) | (2 << 28)));
+-	return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
+-}
+-
+-static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
+-{
+-	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
+-	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+-		    ((reg & 0x0000ffff) | (3 << 28)));
+-}
+-
+-static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
+-{
+-	if (trans->cfg->apmg_not_supported)
+-		return;
+-
+-	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
+-		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+-				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+-				       ~APMG_PS_CTRL_MSK_PWR_SRC);
+-	else
+-		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+-				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+-				       ~APMG_PS_CTRL_MSK_PWR_SRC);
+-}
+-
+-/* PCI registers */
+-#define PCI_CFG_RETRY_TIMEOUT	0x041
+-
+-static void iwl_pcie_apm_config(struct iwl_trans *trans)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	u16 lctl;
+-	u16 cap;
+-
+-	/*
+-	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+-	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
+-	 * If so (likely), disable L0S, so device moves directly L0->L1;
+-	 *    costs negligible amount of power savings.
+-	 * If not (unlikely), enable L0S, so there is at least some
+-	 *    power savings, even without L1.
+-	 */
+-	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
+-	if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
+-		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+-	else
+-		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+-	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+-
+-	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
+-	trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+-	dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
+-		 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
+-		 trans->ltr_enabled ? "En" : "Dis");
+-}
+-
+-/*
+- * Start up NIC's basic functionality after it has been reset
+- * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+- * NOTE:  This does not load uCode nor start the embedded processor
+- */
+-static int iwl_pcie_apm_init(struct iwl_trans *trans)
+-{
+-	int ret = 0;
+-	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+-
+-	/*
+-	 * Use "set_bit" below rather than "write", to preserve any hardware
+-	 * bits already set by default after reset.
+-	 */
+-
+-	/* Disable L0S exit timer (platform NMI Work/Around) */
+-	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+-		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+-			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+-
+-	/*
+-	 * Disable L0s without affecting L1;
+-	 *  don't wait for ICH L0s (ICH bug W/A)
+-	 */
+-	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+-		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+-
+-	/* Set FH wait threshold to maximum (HW error during stress W/A) */
+-	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+-
+-	/*
+-	 * Enable HAP INTA (interrupt from management bus) to
+-	 * wake device's PCI Express link L1a -> L0s
+-	 */
+-	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+-		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+-
+-	iwl_pcie_apm_config(trans);
+-
+-	/* Configure analog phase-lock-loop before activating to D0A */
+-	if (trans->cfg->base_params->pll_cfg_val)
+-		iwl_set_bit(trans, CSR_ANA_PLL_CFG,
+-			    trans->cfg->base_params->pll_cfg_val);
+-
+-	/*
+-	 * Set "initialization complete" bit to move adapter from
+-	 * D0U* --> D0A* (powered-up active) state.
+-	 */
+-	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+-
+-	/*
+-	 * Wait for clock stabilization; once stabilized, access to
+-	 * device-internal resources is supported, e.g. iwl_write_prph()
+-	 * and accesses to uCode SRAM.
+-	 */
+-	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+-			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+-			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+-	if (ret < 0) {
+-		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+-		goto out;
+-	}
+-
+-	if (trans->cfg->host_interrupt_operation_mode) {
+-		/*
+-		 * This is a bit of an abuse - This is needed for 7260 / 3160
+-		 * only check host_interrupt_operation_mode even if this is
+-		 * not related to host_interrupt_operation_mode.
+-		 *
+-		 * Enable the oscillator to count wake up time for L1 exit. This
+-		 * consumes slightly more power (100uA) - but allows to be sure
+-		 * that we wake up from L1 on time.
+-		 *
+-		 * This looks weird: read twice the same register, discard the
+-		 * value, set a bit, and yet again, read that same register
+-		 * just to discard the value. But that's the way the hardware
+-		 * seems to like it.
+-		 */
+-		iwl_read_prph(trans, OSC_CLK);
+-		iwl_read_prph(trans, OSC_CLK);
+-		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
+-		iwl_read_prph(trans, OSC_CLK);
+-		iwl_read_prph(trans, OSC_CLK);
+-	}
+-
+-	/*
+-	 * Enable DMA clock and wait for it to stabilize.
+-	 *
+-	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+-	 * bits do not disable clocks.  This preserves any hardware
+-	 * bits already set by default in "CLK_CTRL_REG" after reset.
+-	 */
+-	if (!trans->cfg->apmg_not_supported) {
+-		iwl_write_prph(trans, APMG_CLK_EN_REG,
+-			       APMG_CLK_VAL_DMA_CLK_RQT);
+-		udelay(20);
+-
+-		/* Disable L1-Active */
+-		iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+-				  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+-
+-		/* Clear the interrupt in APMG if the NIC is in RFKILL */
+-		iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+-			       APMG_RTC_INT_STT_RFKILL);
+-	}
+-
+-	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
+-
+-out:
+-	return ret;
+-}
+-
+-/*
+- * Enable LP XTAL to avoid HW bug where device may consume much power if
+- * FW is not loaded after device reset. LP XTAL is disabled by default
+- * after device HW reset. Do it only if XTAL is fed by internal source.
+- * Configure device's "persistence" mode to avoid resetting XTAL again when
+- * SHRD_HW_RST occurs in S3.
+- */
+-static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
+-{
+-	int ret;
+-	u32 apmg_gp1_reg;
+-	u32 apmg_xtal_cfg_reg;
+-	u32 dl_cfg_reg;
+-
+-	/* Force XTAL ON */
+-	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+-				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+-
+-	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+-	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+-
+-	udelay(10);
+-
+-	/*
+-	 * Set "initialization complete" bit to move adapter from
+-	 * D0U* --> D0A* (powered-up active) state.
+-	 */
+-	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+-
+-	/*
+-	 * Wait for clock stabilization; once stabilized, access to
+-	 * device-internal resources is possible.
+-	 */
+-	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+-			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+-			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+-			   25000);
+-	if (WARN_ON(ret < 0)) {
+-		IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
+-		/* Release XTAL ON request */
+-		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+-					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+-		return;
+-	}
+-
+-	/*
+-	 * Clear "disable persistence" to avoid LP XTAL resetting when
+-	 * SHRD_HW_RST is applied in S3.
+-	 */
+-	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+-				    APMG_PCIDEV_STT_VAL_PERSIST_DIS);
+-
+-	/*
+-	 * Force APMG XTAL to be active to prevent its disabling by HW
+-	 * caused by APMG idle state.
+-	 */
+-	apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
+-						    SHR_APMG_XTAL_CFG_REG);
+-	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+-				 apmg_xtal_cfg_reg |
+-				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+-
+-	/*
+-	 * Reset entire device again - do controller reset (results in
+-	 * SHRD_HW_RST). Turn MAC off before proceeding.
+-	 */
+-	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+-
+-	udelay(10);
+-
+-	/* Enable LP XTAL by indirect access through CSR */
+-	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
+-	iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
+-				 SHR_APMG_GP1_WF_XTAL_LP_EN |
+-				 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
+-
+-	/* Clear delay line clock power up */
+-	dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
+-	iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
+-				 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
+-
+-	/*
+-	 * Enable persistence mode to avoid LP XTAL resetting when
+-	 * SHRD_HW_RST is applied in S3.
+-	 */
+-	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+-		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+-
+-	/*
+-	 * Clear "initialization complete" bit to move adapter from
+-	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+-	 */
+-	iwl_clear_bit(trans, CSR_GP_CNTRL,
+-		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+-
+-	/* Activates XTAL resources monitor */
+-	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
+-				 CSR_MONITOR_XTAL_RESOURCES);
+-
+-	/* Release XTAL ON request */
+-	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+-				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+-	udelay(10);
+-
+-	/* Release APMG XTAL */
+-	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+-				 apmg_xtal_cfg_reg &
+-				 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+-}
+-
+-static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
+-{
+-	int ret = 0;
+-
+-	/* stop device's busmaster DMA activity */
+-	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+-
+-	ret = iwl_poll_bit(trans, CSR_RESET,
+-			   CSR_RESET_REG_FLAG_MASTER_DISABLED,
+-			   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+-	if (ret < 0)
+-		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
+-
+-	IWL_DEBUG_INFO(trans, "stop master\n");
+-
+-	return ret;
+-}
+-
+-static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
+-{
+-	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+-
+-	if (op_mode_leave) {
+-		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+-			iwl_pcie_apm_init(trans);
+-
+-		/* inform ME that we are leaving */
+-		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+-			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+-					  APMG_PCIDEV_STT_VAL_WAKE_ME);
+-		else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+-			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+-				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
+-			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+-				    CSR_HW_IF_CONFIG_REG_PREPARE |
+-				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+-			mdelay(1);
+-			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+-				      CSR_RESET_LINK_PWR_MGMT_DISABLED);
+-		}
+-		mdelay(5);
+-	}
+-
+-	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+-
+-	/* Stop device's DMA activity */
+-	iwl_pcie_apm_stop_master(trans);
+-
+-	if (trans->cfg->lp_xtal_workaround) {
+-		iwl_pcie_apm_lp_xtal_enable(trans);
+-		return;
+-	}
+-
+-	/* Reset the entire device */
+-	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+-
+-	udelay(10);
+-
+-	/*
+-	 * Clear "initialization complete" bit to move adapter from
+-	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+-	 */
+-	iwl_clear_bit(trans, CSR_GP_CNTRL,
+-		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+-}
+-
+-static int iwl_pcie_nic_init(struct iwl_trans *trans)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-
+-	/* nic_init */
+-	spin_lock(&trans_pcie->irq_lock);
+-	iwl_pcie_apm_init(trans);
+-
+-	spin_unlock(&trans_pcie->irq_lock);
+-
+-	iwl_pcie_set_pwr(trans, false);
+-
+-	iwl_op_mode_nic_config(trans->op_mode);
+-
+-	/* Allocate the RX queue, or reset if it is already allocated */
+-	iwl_pcie_rx_init(trans);
+-
+-	/* Allocate or reset and init all Tx and Command queues */
+-	if (iwl_pcie_tx_init(trans))
+-		return -ENOMEM;
+-
+-	if (trans->cfg->base_params->shadow_reg_enable) {
+-		/* enable shadow regs in HW */
+-		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+-		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+-	}
+-
+-	return 0;
+-}
+-
+-#define HW_READY_TIMEOUT (50)
+-
+-/* Note: returns poll_bit return value, which is >= 0 if success */
+-static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
+-{
+-	int ret;
+-
+-	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+-		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+-
+-	/* See if we got it */
+-	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+-			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+-			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+-			   HW_READY_TIMEOUT);
+-
+-	if (ret >= 0)
+-		iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
+-
+-	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
+-	return ret;
+-}
+-
+-/* Note: returns standard 0/-ERROR code */
+-static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+-{
+-	int ret;
+-	int t = 0;
+-	int iter;
+-
+-	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+-
+-	ret = iwl_pcie_set_hw_ready(trans);
+-	/* If the card is ready, exit 0 */
+-	if (ret >= 0)
+-		return 0;
+-
+-	iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+-		    CSR_RESET_LINK_PWR_MGMT_DISABLED);
+-	msleep(1);
+-
+-	for (iter = 0; iter < 10; iter++) {
+-		/* If HW is not ready, prepare the conditions to check again */
+-		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+-			    CSR_HW_IF_CONFIG_REG_PREPARE);
+-
+-		do {
+-			ret = iwl_pcie_set_hw_ready(trans);
+-			if (ret >= 0)
+-				return 0;
+-
+-			usleep_range(200, 1000);
+-			t += 200;
+-		} while (t < 150000);
+-		msleep(25);
+-	}
+-
+-	IWL_ERR(trans, "Couldn't prepare the card\n");
+-
+-	return ret;
+-}
+-
+-/*
+- * ucode
+- */
+-static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
+-				   dma_addr_t phy_addr, u32 byte_cnt)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	int ret;
+-
+-	trans_pcie->ucode_write_complete = false;
+-
+-	iwl_write_direct32(trans,
+-			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+-			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+-
+-	iwl_write_direct32(trans,
+-			   FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
+-			   dst_addr);
+-
+-	iwl_write_direct32(trans,
+-			   FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+-			   phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+-
+-	iwl_write_direct32(trans,
+-			   FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+-			   (iwl_get_dma_hi_addr(phy_addr)
+-				<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+-
+-	iwl_write_direct32(trans,
+-			   FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+-			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+-			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+-			   FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+-
+-	iwl_write_direct32(trans,
+-			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+-			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
+-			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
+-			   FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+-
+-	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
+-				 trans_pcie->ucode_write_complete, 5 * HZ);
+-	if (!ret) {
+-		IWL_ERR(trans, "Failed to load firmware chunk!\n");
+-		return -ETIMEDOUT;
+-	}
+-
+-	return 0;
+-}
+-
+-static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
+-			    const struct fw_desc *section)
+-{
+-	u8 *v_addr;
+-	dma_addr_t p_addr;
+-	u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
+-	int ret = 0;
+-
+-	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+-		     section_num);
+-
+-	v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
+-				    GFP_KERNEL | __GFP_NOWARN);
+-	if (!v_addr) {
+-		IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
+-		chunk_sz = PAGE_SIZE;
+-		v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
+-					    &p_addr, GFP_KERNEL);
+-		if (!v_addr)
+-			return -ENOMEM;
+-	}
+-
+-	for (offset = 0; offset < section->len; offset += chunk_sz) {
+-		u32 copy_size, dst_addr;
+-		bool extended_addr = false;
+-
+-		copy_size = min_t(u32, chunk_sz, section->len - offset);
+-		dst_addr = section->offset + offset;
+-
+-		if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
+-		    dst_addr <= IWL_FW_MEM_EXTENDED_END)
+-			extended_addr = true;
+-
+-		if (extended_addr)
+-			iwl_set_bits_prph(trans, LMPM_CHICK,
+-					  LMPM_CHICK_EXTENDED_ADDR_SPACE);
+-
+-		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+-		ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
+-						   copy_size);
+-
+-		if (extended_addr)
+-			iwl_clear_bits_prph(trans, LMPM_CHICK,
+-					    LMPM_CHICK_EXTENDED_ADDR_SPACE);
+-
+-		if (ret) {
+-			IWL_ERR(trans,
+-				"Could not load the [%d] uCode section\n",
+-				section_num);
+-			break;
+-		}
+-	}
+-
+-	dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
+-	return ret;
+-}
+-
+-/*
+- * Driver Takes the ownership on secure machine before FW load
+- * and prevent race with the BT load.
+- * W/A for ROM bug. (should be remove in the next Si step)
+- */
+-static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
+-{
+-	u32 val, loop = 1000;
+-
+-	/*
+-	 * Check the RSA semaphore is accessible.
+-	 * If the HW isn't locked and the rsa semaphore isn't accessible,
+-	 * we are in trouble.
+-	 */
+-	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
+-	if (val & (BIT(1) | BIT(17))) {
+-		IWL_DEBUG_INFO(trans,
+-			       "can't access the RSA semaphore it is write protected\n");
+-		return 0;
+-	}
+-
+-	/* take ownership on the AUX IF */
+-	iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
+-	iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
+-
+-	do {
+-		iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
+-		val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
+-		if (val == 0x1) {
+-			iwl_write_prph(trans, RSA_ENABLE, 0);
+-			return 0;
+-		}
+-
+-		udelay(10);
+-		loop--;
+-	} while (loop > 0);
+-
+-	IWL_ERR(trans, "Failed to take ownership on secure machine\n");
+-	return -EIO;
+-}
+-
+-static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
+-					   const struct fw_img *image,
+-					   int cpu,
+-					   int *first_ucode_section)
+-{
+-	int shift_param;
+-	int i, ret = 0, sec_num = 0x1;
+-	u32 val, last_read_idx = 0;
+-
+-	if (cpu == 1) {
+-		shift_param = 0;
+-		*first_ucode_section = 0;
+-	} else {
+-		shift_param = 16;
+-		(*first_ucode_section)++;
+-	}
+-
+-	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+-		last_read_idx = i;
+-
+-		/*
+-		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+-		 * CPU1 to CPU2.
+-		 * PAGING_SEPARATOR_SECTION delimiter - separate between
+-		 * CPU2 non paged to CPU2 paging sec.
+-		 */
+-		if (!image->sec[i].data ||
+-		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+-		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
+-			IWL_DEBUG_FW(trans,
+-				     "Break since Data not valid or Empty section, sec = %d\n",
+-				     i);
+-			break;
+-		}
+-
+-		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+-		if (ret)
+-			return ret;
+-
+-		/* Notify the ucode of the loaded section number and status */
+-		val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+-		val = val | (sec_num << shift_param);
+-		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+-		sec_num = (sec_num << 1) | 0x1;
+-	}
+-
+-	*first_ucode_section = last_read_idx;
+-
+-	if (cpu == 1)
+-		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
+-	else
+-		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+-
+-	return 0;
+-}
+-
+-static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+-				      const struct fw_img *image,
+-				      int cpu,
+-				      int *first_ucode_section)
+-{
+-	int shift_param;
+-	int i, ret = 0;
+-	u32 last_read_idx = 0;
+-
+-	if (cpu == 1) {
+-		shift_param = 0;
+-		*first_ucode_section = 0;
+-	} else {
+-		shift_param = 16;
+-		(*first_ucode_section)++;
+-	}
+-
+-	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+-		last_read_idx = i;
+-
+-		/*
+-		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+-		 * CPU1 to CPU2.
+-		 * PAGING_SEPARATOR_SECTION delimiter - separate between
+-		 * CPU2 non paged to CPU2 paging sec.
+-		 */
+-		if (!image->sec[i].data ||
+-		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+-		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
+-			IWL_DEBUG_FW(trans,
+-				     "Break since Data not valid or Empty section, sec = %d\n",
+-				     i);
+-			break;
+-		}
+-
+-		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+-		iwl_set_bits_prph(trans,
+-				  CSR_UCODE_LOAD_STATUS_ADDR,
+-				  (LMPM_CPU_UCODE_LOADING_COMPLETED |
+-				   LMPM_CPU_HDRS_LOADING_COMPLETED |
+-				   LMPM_CPU_UCODE_LOADING_STARTED) <<
+-					shift_param);
+-
+-	*first_ucode_section = last_read_idx;
+-
+-	return 0;
+-}
+-
+-static void iwl_pcie_apply_destination(struct iwl_trans *trans)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
+-	int i;
+-
+-	if (dest->version)
+-		IWL_ERR(trans,
+-			"DBG DEST version is %d - expect issues\n",
+-			dest->version);
+-
+-	IWL_INFO(trans, "Applying debug destination %s\n",
+-		 get_fw_dbg_mode_string(dest->monitor_mode));
+-
+-	if (dest->monitor_mode == EXTERNAL_MODE)
+-		iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
+-	else
+-		IWL_WARN(trans, "PCI should have external buffer debug\n");
+-
+-	for (i = 0; i < trans->dbg_dest_reg_num; i++) {
+-		u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
+-		u32 val = le32_to_cpu(dest->reg_ops[i].val);
+-
+-		switch (dest->reg_ops[i].op) {
+-		case CSR_ASSIGN:
+-			iwl_write32(trans, addr, val);
+-			break;
+-		case CSR_SETBIT:
+-			iwl_set_bit(trans, addr, BIT(val));
+-			break;
+-		case CSR_CLEARBIT:
+-			iwl_clear_bit(trans, addr, BIT(val));
+-			break;
+-		case PRPH_ASSIGN:
+-			iwl_write_prph(trans, addr, val);
+-			break;
+-		case PRPH_SETBIT:
+-			iwl_set_bits_prph(trans, addr, BIT(val));
+-			break;
+-		case PRPH_CLEARBIT:
+-			iwl_clear_bits_prph(trans, addr, BIT(val));
+-			break;
+-		case PRPH_BLOCKBIT:
+-			if (iwl_read_prph(trans, addr) & BIT(val)) {
+-				IWL_ERR(trans,
+-					"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+-					val, addr);
+-				goto monitor;
+-			}
+-			break;
+-		default:
+-			IWL_ERR(trans, "FW debug - unknown OP %d\n",
+-				dest->reg_ops[i].op);
+-			break;
+-		}
+-	}
+-
+-monitor:
+-	if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+-		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
+-			       trans_pcie->fw_mon_phys >> dest->base_shift);
+-		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+-			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+-				       (trans_pcie->fw_mon_phys +
+-					trans_pcie->fw_mon_size - 256) >>
+-						dest->end_shift);
+-		else
+-			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+-				       (trans_pcie->fw_mon_phys +
+-					trans_pcie->fw_mon_size) >>
+-						dest->end_shift);
+-	}
+-}
+-
+-static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
+-				const struct fw_img *image)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	int ret = 0;
+-	int first_ucode_section;
+-
+-	IWL_DEBUG_FW(trans, "working with %s CPU\n",
+-		     image->is_dual_cpus ? "Dual" : "Single");
+-
+-	/* load to FW the binary non secured sections of CPU1 */
+-	ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
+-	if (ret)
+-		return ret;
+-
+-	if (image->is_dual_cpus) {
+-		/* set CPU2 header address */
+-		iwl_write_prph(trans,
+-			       LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+-			       LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+-
+-		/* load to FW the binary sections of CPU2 */
+-		ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+-						 &first_ucode_section);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	/* supported for 7000 only for the moment */
+-	if (iwlwifi_mod_params.fw_monitor &&
+-	    trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+-		iwl_pcie_alloc_fw_monitor(trans, 0);
+-
+-		if (trans_pcie->fw_mon_size) {
+-			iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
+-				       trans_pcie->fw_mon_phys >> 4);
+-			iwl_write_prph(trans, MON_BUFF_END_ADDR,
+-				       (trans_pcie->fw_mon_phys +
+-					trans_pcie->fw_mon_size) >> 4);
+-		}
+-	} else if (trans->dbg_dest_tlv) {
+-		iwl_pcie_apply_destination(trans);
+-	}
+-
+-	/* release CPU reset */
+-	iwl_write32(trans, CSR_RESET, 0);
+-
+-	return 0;
+-}
+-
+-static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
+-					  const struct fw_img *image)
+-{
+-	int ret = 0;
+-	int first_ucode_section;
+-
+-	IWL_DEBUG_FW(trans, "working with %s CPU\n",
+-		     image->is_dual_cpus ? "Dual" : "Single");
+-
+-	if (trans->dbg_dest_tlv)
+-		iwl_pcie_apply_destination(trans);
+-
+-	/* TODO: remove in the next Si step */
+-	ret = iwl_pcie_rsa_race_bug_wa(trans);
+-	if (ret)
+-		return ret;
+-
+-	/* configure the ucode to be ready to get the secured image */
+-	/* release CPU reset */
+-	iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+-
+-	/* load to FW the binary Secured sections of CPU1 */
+-	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
+-					      &first_ucode_section);
+-	if (ret)
+-		return ret;
+-
+-	/* load to FW the binary sections of CPU2 */
+-	return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+-					       &first_ucode_section);
+-}
+-
+-static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+-				   const struct fw_img *fw, bool run_in_rfkill)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	bool hw_rfkill;
+-	int ret;
+-
+-	mutex_lock(&trans_pcie->mutex);
+-
+-	/* Someone called stop_device, don't try to start_fw */
+-	if (trans_pcie->is_down) {
+-		IWL_WARN(trans,
+-			 "Can't start_fw since the HW hasn't been started\n");
+-		ret = EIO;
+-		goto out;
+-	}
+-
+-	/* This may fail if AMT took ownership of the device */
+-	if (iwl_pcie_prepare_card_hw(trans)) {
+-		IWL_WARN(trans, "Exit HW not ready\n");
+-		ret = -EIO;
+-		goto out;
+-	}
+-
+-	iwl_enable_rfkill_int(trans);
+-
+-	/* If platform's RF_KILL switch is NOT set to KILL */
+-	hw_rfkill = iwl_is_rfkill_set(trans);
+-	if (hw_rfkill)
+-		set_bit(STATUS_RFKILL, &trans->status);
+-	else
+-		clear_bit(STATUS_RFKILL, &trans->status);
+-	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+-	if (hw_rfkill && !run_in_rfkill) {
+-		ret = -ERFKILL;
+-		goto out;
+-	}
+-
+-	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+-
+-	ret = iwl_pcie_nic_init(trans);
+-	if (ret) {
+-		IWL_ERR(trans, "Unable to init nic\n");
+-		goto out;
+-	}
+-
+-	/* make sure rfkill handshake bits are cleared */
+-	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+-	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+-		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+-
+-	/* clear (again), then enable host interrupts */
+-	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+-	iwl_enable_interrupts(trans);
+-
+-	/* really make sure rfkill handshake bits are cleared */
+-	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+-	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+-
+-	/* Load the given image to the HW */
+-	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+-		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
+-	else
+-		ret = iwl_pcie_load_given_ucode(trans, fw);
+-
+-out:
+-	mutex_unlock(&trans_pcie->mutex);
+-	return ret;
+-}
+-
+-static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+-{
+-	iwl_pcie_reset_ict(trans);
+-	iwl_pcie_tx_start(trans, scd_addr);
+-}
+-
+-static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	bool hw_rfkill, was_hw_rfkill;
+-
+-	lockdep_assert_held(&trans_pcie->mutex);
+-
+-	if (trans_pcie->is_down)
+-		return;
+-
+-	trans_pcie->is_down = true;
+-
+-	was_hw_rfkill = iwl_is_rfkill_set(trans);
+-
+-	/* tell the device to stop sending interrupts */
+-	spin_lock(&trans_pcie->irq_lock);
+-	iwl_disable_interrupts(trans);
+-	spin_unlock(&trans_pcie->irq_lock);
+-
+-	/* device going down, Stop using ICT table */
+-	iwl_pcie_disable_ict(trans);
+-
+-	/*
+-	 * If a HW restart happens during firmware loading,
+-	 * then the firmware loading might call this function
+-	 * and later it might be called again due to the
+-	 * restart. So don't process again if the device is
+-	 * already dead.
+-	 */
+-	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+-		IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
+-		iwl_pcie_tx_stop(trans);
+-		iwl_pcie_rx_stop(trans);
+-
+-		/* Power-down device's busmaster DMA clocks */
+-		if (!trans->cfg->apmg_not_supported) {
+-			iwl_write_prph(trans, APMG_CLK_DIS_REG,
+-				       APMG_CLK_VAL_DMA_CLK_RQT);
+-			udelay(5);
+-		}
+-	}
+-
+-	/* Make sure (redundant) we've released our request to stay awake */
+-	iwl_clear_bit(trans, CSR_GP_CNTRL,
+-		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+-
+-	/* Stop the device, and put it in low power state */
+-	iwl_pcie_apm_stop(trans, false);
+-
+-	/* stop and reset the on-board processor */
+-	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+-	udelay(20);
+-
+-	/*
+-	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
+-	 * This is a bug in certain verions of the hardware.
+-	 * Certain devices also keep sending HW RF kill interrupt all
+-	 * the time, unless the interrupt is ACKed even if the interrupt
+-	 * should be masked. Re-ACK all the interrupts here.
+-	 */
+-	spin_lock(&trans_pcie->irq_lock);
+-	iwl_disable_interrupts(trans);
+-	spin_unlock(&trans_pcie->irq_lock);
+-
+-
+-	/* clear all status bits */
+-	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+-	clear_bit(STATUS_INT_ENABLED, &trans->status);
+-	clear_bit(STATUS_TPOWER_PMI, &trans->status);
+-	clear_bit(STATUS_RFKILL, &trans->status);
+-
+-	/*
+-	 * Even if we stop the HW, we still want the RF kill
+-	 * interrupt
+-	 */
+-	iwl_enable_rfkill_int(trans);
+-
+-	/*
+-	 * Check again since the RF kill state may have changed while
+-	 * all the interrupts were disabled, in this case we couldn't
+-	 * receive the RF kill interrupt and update the state in the
+-	 * op_mode.
+-	 * Don't call the op_mode if the rkfill state hasn't changed.
+-	 * This allows the op_mode to call stop_device from the rfkill
+-	 * notification without endless recursion. Under very rare
+-	 * circumstances, we might have a small recursion if the rfkill
+-	 * state changed exactly now while we were called from stop_device.
+-	 * This is very unlikely but can happen and is supported.
+-	 */
+-	hw_rfkill = iwl_is_rfkill_set(trans);
+-	if (hw_rfkill)
+-		set_bit(STATUS_RFKILL, &trans->status);
+-	else
+-		clear_bit(STATUS_RFKILL, &trans->status);
+-	if (hw_rfkill != was_hw_rfkill)
+-		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+-
+-	/* re-take ownership to prevent other users from stealing the deivce */
+-	iwl_pcie_prepare_card_hw(trans);
+-}
+-
+-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-
+-	mutex_lock(&trans_pcie->mutex);
+-	_iwl_trans_pcie_stop_device(trans, low_power);
+-	mutex_unlock(&trans_pcie->mutex);
+-}
+-
+-void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+-{
+-	struct iwl_trans_pcie __maybe_unused *trans_pcie =
+-		IWL_TRANS_GET_PCIE_TRANS(trans);
+-
+-	lockdep_assert_held(&trans_pcie->mutex);
+-
+-	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
+-		_iwl_trans_pcie_stop_device(trans, true);
+-}
+-
+-static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-
+-	if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
+-		/* Enable persistence mode to avoid reset */
+-		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+-			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+-	}
+-
+-	iwl_disable_interrupts(trans);
+-
+-	/*
+-	 * in testing mode, the host stays awake and the
+-	 * hardware won't be reset (not even partially)
+-	 */
+-	if (test)
+-		return;
+-
+-	iwl_pcie_disable_ict(trans);
+-
+-	synchronize_irq(trans_pcie->pci_dev->irq);
+-
+-	iwl_clear_bit(trans, CSR_GP_CNTRL,
+-		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+-	iwl_clear_bit(trans, CSR_GP_CNTRL,
+-		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+-
+-	if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) {
+-		/*
+-		 * reset TX queues -- some of their registers reset during S3
+-		 * so if we don't reset everything here the D3 image would try
+-		 * to execute some invalid memory upon resume
+-		 */
+-		iwl_trans_pcie_tx_reset(trans);
+-	}
+-
+-	iwl_pcie_set_pwr(trans, true);
+-}
+-
+-static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+-				    enum iwl_d3_status *status,
+-				    bool test)
+-{
+-	u32 val;
+-	int ret;
+-
+-	if (test) {
+-		iwl_enable_interrupts(trans);
+-		*status = IWL_D3_STATUS_ALIVE;
+-		return 0;
+-	}
+-
+-	/*
+-	 * Also enables interrupts - none will happen as the device doesn't
+-	 * know we're waking it up, only when the opmode actually tells it
+-	 * after this call.
+-	 */
+-	iwl_pcie_reset_ict(trans);
+-
+-	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+-	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+-
+-	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+-		udelay(2);
+-
+-	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+-			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+-			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+-			   25000);
+-	if (ret < 0) {
+-		IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
+-		return ret;
+-	}
+-
+-	iwl_pcie_set_pwr(trans, false);
+-
+-	if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
+-		iwl_clear_bit(trans, CSR_GP_CNTRL,
+-			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+-	} else {
+-		iwl_trans_pcie_tx_reset(trans);
+-
+-		ret = iwl_pcie_rx_init(trans);
+-		if (ret) {
+-			IWL_ERR(trans,
+-				"Failed to resume the device (RX reset)\n");
+-			return ret;
+-		}
+-	}
+-
+-	val = iwl_read32(trans, CSR_RESET);
+-	if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
+-		*status = IWL_D3_STATUS_RESET;
+-	else
+-		*status = IWL_D3_STATUS_ALIVE;
+-
+-	return 0;
+-}
+-
+-static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	bool hw_rfkill;
+-	int err;
+-
+-	lockdep_assert_held(&trans_pcie->mutex);
+-
+-	err = iwl_pcie_prepare_card_hw(trans);
+-	if (err) {
+-		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
+-		return err;
+-	}
+-
+-	/* Reset the entire device */
+-	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+-
+-	usleep_range(10, 15);
+-
+-	iwl_pcie_apm_init(trans);
+-
+-	/* From now on, the op_mode will be kept updated about RF kill state */
+-	iwl_enable_rfkill_int(trans);
+-
+-	/* Set is_down to false here so that...*/
+-	trans_pcie->is_down = false;
+-
+-	hw_rfkill = iwl_is_rfkill_set(trans);
+-	if (hw_rfkill)
+-		set_bit(STATUS_RFKILL, &trans->status);
+-	else
+-		clear_bit(STATUS_RFKILL, &trans->status);
+-	/* ... rfkill can call stop_device and set it false if needed */
+-	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+-
+-	return 0;
+-}
+-
+-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	int ret;
+-
+-	mutex_lock(&trans_pcie->mutex);
+-	ret = _iwl_trans_pcie_start_hw(trans, low_power);
+-	mutex_unlock(&trans_pcie->mutex);
+-
+-	return ret;
+-}
+-
+-static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-
+-	mutex_lock(&trans_pcie->mutex);
+-
+-	/* disable interrupts - don't enable HW RF kill interrupt */
+-	spin_lock(&trans_pcie->irq_lock);
+-	iwl_disable_interrupts(trans);
+-	spin_unlock(&trans_pcie->irq_lock);
+-
+-	iwl_pcie_apm_stop(trans, true);
+-
+-	spin_lock(&trans_pcie->irq_lock);
+-	iwl_disable_interrupts(trans);
+-	spin_unlock(&trans_pcie->irq_lock);
+-
+-	iwl_pcie_disable_ict(trans);
+-
+-	mutex_unlock(&trans_pcie->mutex);
+-
+-	synchronize_irq(trans_pcie->pci_dev->irq);
+-}
+-
+-static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+-{
+-	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+-}
+-
+-static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+-{
+-	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+-}
+-
+-static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
+-{
+-	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+-}
+-
+-static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
+-{
+-	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
+-			       ((reg & 0x000FFFFF) | (3 << 24)));
+-	return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
+-}
+-
+-static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
+-				      u32 val)
+-{
+-	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
+-			       ((addr & 0x000FFFFF) | (3 << 24)));
+-	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+-}
+-
+-static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+-{
+-	WARN_ON(1);
+-	return 0;
+-}
+-
+-static void iwl_trans_pcie_configure(struct iwl_trans *trans,
+-				     const struct iwl_trans_config *trans_cfg)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-
+-	trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+-	trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
+-	trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+-	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
+-		trans_pcie->n_no_reclaim_cmds = 0;
+-	else
+-		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
+-	if (trans_pcie->n_no_reclaim_cmds)
+-		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
+-		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
+-
+-	trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
+-	trans_pcie->rx_page_order =
+-		iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+-
+-	trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
+-	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+-	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
+-	trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
+-
+-	trans->command_groups = trans_cfg->command_groups;
+-	trans->command_groups_size = trans_cfg->command_groups_size;
+-
+-	/* init ref_count to 1 (should be cleared when ucode is loaded) */
+-	trans_pcie->ref_count = 1;
+-
+-	/* Initialize NAPI here - it should be before registering to mac80211
+-	 * in the opmode but after the HW struct is allocated.
+-	 * As this function may be called again in some corner cases don't
+-	 * do anything if NAPI was already initialized.
+-	 */
+-	if (!trans_pcie->napi.poll) {
+-		init_dummy_netdev(&trans_pcie->napi_dev);
+-		netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
+-			       iwl_pcie_dummy_napi_poll, 64);
+-	}
+-}
+-
+-void iwl_trans_pcie_free(struct iwl_trans *trans)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	int i;
+-
+-	synchronize_irq(trans_pcie->pci_dev->irq);
+-
+-	iwl_pcie_tx_free(trans);
+-	iwl_pcie_rx_free(trans);
+-
+-	free_irq(trans_pcie->pci_dev->irq, trans);
+-	iwl_pcie_free_ict(trans);
+-
+-	pci_disable_msi(trans_pcie->pci_dev);
+-	iounmap(trans_pcie->hw_base);
+-	pci_release_regions(trans_pcie->pci_dev);
+-	pci_disable_device(trans_pcie->pci_dev);
+-
+-	if (trans_pcie->napi.poll)
+-		netif_napi_del(&trans_pcie->napi);
+-
+-	iwl_pcie_free_fw_monitor(trans);
+-
+-	for_each_possible_cpu(i) {
+-		struct iwl_tso_hdr_page *p =
+-			per_cpu_ptr(trans_pcie->tso_hdr_page, i);
+-
+-		if (p->page)
+-			__free_page(p->page);
+-	}
+-
+-	free_percpu(trans_pcie->tso_hdr_page);
+-	iwl_trans_free(trans);
+-}
+-
+-static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
+-{
+-	if (state)
+-		set_bit(STATUS_TPOWER_PMI, &trans->status);
+-	else
+-		clear_bit(STATUS_TPOWER_PMI, &trans->status);
+-}
+-
+-static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
+-					   unsigned long *flags)
+-{
+-	int ret;
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-
+-	spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
+-
+-	if (trans_pcie->cmd_hold_nic_awake)
+-		goto out;
+-
+-	/* this bit wakes up the NIC */
+-	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+-				 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+-	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+-		udelay(2);
+-
+-	/*
+-	 * These bits say the device is running, and should keep running for
+-	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+-	 * but they do not indicate that embedded SRAM is restored yet;
+-	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
+-	 * to/from host DRAM when sleeping/waking for power-saving.
+-	 * Each direction takes approximately 1/4 millisecond; with this
+-	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+-	 * series of register accesses are expected (e.g. reading Event Log),
+-	 * to keep device from sleeping.
+-	 *
+-	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+-	 * SRAM is okay/restored.  We don't check that here because this call
+-	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
+-	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+-	 *
+-	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
+-	 * and do not save/restore SRAM when power cycling.
+-	 */
+-	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+-			   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+-			   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+-			    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+-	if (unlikely(ret < 0)) {
+-		iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+-		WARN_ONCE(1,
+-			  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
+-			  iwl_read32(trans, CSR_GP_CNTRL));
+-		spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+-		return false;
+-	}
+-
+-out:
+-	/*
+-	 * Fool sparse by faking we release the lock - sparse will
+-	 * track nic_access anyway.
+-	 */
+-	__release(&trans_pcie->reg_lock);
+-	return true;
+-}
+-
+-static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
+-					      unsigned long *flags)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-
+-	lockdep_assert_held(&trans_pcie->reg_lock);
+-
+-	/*
+-	 * Fool sparse by faking we acquiring the lock - sparse will
+-	 * track nic_access anyway.
+-	 */
+-	__acquire(&trans_pcie->reg_lock);
+-
+-	if (trans_pcie->cmd_hold_nic_awake)
+-		goto out;
+-
+-	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+-				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+-	/*
+-	 * Above we read the CSR_GP_CNTRL register, which will flush
+-	 * any previous writes, but we need the write that clears the
+-	 * MAC_ACCESS_REQ bit to be performed before any other writes
+-	 * scheduled on different CPUs (after we drop reg_lock).
+-	 */
+-	mmiowb();
+-out:
+-	spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+-}
+-
+-static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+-				   void *buf, int dwords)
+-{
+-	unsigned long flags;
+-	int offs, ret = 0;
+-	u32 *vals = buf;
+-
+-	if (iwl_trans_grab_nic_access(trans, &flags)) {
+-		iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
+-		for (offs = 0; offs < dwords; offs++)
+-			vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+-		iwl_trans_release_nic_access(trans, &flags);
+-	} else {
+-		ret = -EBUSY;
+-	}
+-	return ret;
+-}
+-
+-static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+-				    const void *buf, int dwords)
+-{
+-	unsigned long flags;
+-	int offs, ret = 0;
+-	const u32 *vals = buf;
+-
+-	if (iwl_trans_grab_nic_access(trans, &flags)) {
+-		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
+-		for (offs = 0; offs < dwords; offs++)
+-			iwl_write32(trans, HBUS_TARG_MEM_WDAT,
+-				    vals ? vals[offs] : 0);
+-		iwl_trans_release_nic_access(trans, &flags);
+-	} else {
+-		ret = -EBUSY;
+-	}
+-	return ret;
+-}
+-
+-static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
+-					    unsigned long txqs,
+-					    bool freeze)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	int queue;
+-
+-	for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
+-		struct iwl_txq *txq = &trans_pcie->txq[queue];
+-		unsigned long now;
+-
+-		spin_lock_bh(&txq->lock);
+-
+-		now = jiffies;
+-
+-		if (txq->frozen == freeze)
+-			goto next_queue;
+-
+-		IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
+-				    freeze ? "Freezing" : "Waking", queue);
+-
+-		txq->frozen = freeze;
+-
+-		if (txq->q.read_ptr == txq->q.write_ptr)
+-			goto next_queue;
+-
+-		if (freeze) {
+-			if (unlikely(time_after(now,
+-						txq->stuck_timer.expires))) {
+-				/*
+-				 * The timer should have fired, maybe it is
+-				 * spinning right now on the lock.
+-				 */
+-				goto next_queue;
+-			}
+-			/* remember how long until the timer fires */
+-			txq->frozen_expiry_remainder =
+-				txq->stuck_timer.expires - now;
+-			del_timer(&txq->stuck_timer);
+-			goto next_queue;
+-		}
+-
+-		/*
+-		 * Wake a non-empty queue -> arm timer with the
+-		 * remainder before it froze
+-		 */
+-		mod_timer(&txq->stuck_timer,
+-			  now + txq->frozen_expiry_remainder);
+-
+-next_queue:
+-		spin_unlock_bh(&txq->lock);
+-	}
+-}
+-
+-static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	int i;
+-
+-	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+-		struct iwl_txq *txq = &trans_pcie->txq[i];
+-
+-		if (i == trans_pcie->cmd_queue)
+-			continue;
+-
+-		spin_lock_bh(&txq->lock);
+-
+-		if (!block && !(WARN_ON_ONCE(!txq->block))) {
+-			txq->block--;
+-			if (!txq->block) {
+-				iwl_write32(trans, HBUS_TARG_WRPTR,
+-					    txq->q.write_ptr | (i << 8));
+-			}
+-		} else if (block) {
+-			txq->block++;
+-		}
+-
+-		spin_unlock_bh(&txq->lock);
+-	}
+-}
+-
+-#define IWL_FLUSH_WAIT_MS	2000
+-
+-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	struct iwl_txq *txq;
+-	struct iwl_queue *q;
+-	int cnt;
+-	unsigned long now = jiffies;
+-	u32 scd_sram_addr;
+-	u8 buf[16];
+-	int ret = 0;
+-
+-	/* waiting for all the tx frames complete might take a while */
+-	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+-		u8 wr_ptr;
+-
+-		if (cnt == trans_pcie->cmd_queue)
+-			continue;
+-		if (!test_bit(cnt, trans_pcie->queue_used))
+-			continue;
+-		if (!(BIT(cnt) & txq_bm))
+-			continue;
+-
+-		IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
+-		txq = &trans_pcie->txq[cnt];
+-		q = &txq->q;
+-		wr_ptr = ACCESS_ONCE(q->write_ptr);
+-
+-		while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
+-		       !time_after(jiffies,
+-				   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+-			u8 write_ptr = ACCESS_ONCE(q->write_ptr);
+-
+-			if (WARN_ONCE(wr_ptr != write_ptr,
+-				      "WR pointer moved while flushing %d -> %d\n",
+-				      wr_ptr, write_ptr))
+-				return -ETIMEDOUT;
+-			msleep(1);
+-		}
+-
+-		if (q->read_ptr != q->write_ptr) {
+-			IWL_ERR(trans,
+-				"fail to flush all tx fifo queues Q %d\n", cnt);
+-			ret = -ETIMEDOUT;
+-			break;
+-		}
+-		IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
+-	}
+-
+-	if (!ret)
+-		return 0;
+-
+-	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
+-		txq->q.read_ptr, txq->q.write_ptr);
+-
+-	scd_sram_addr = trans_pcie->scd_base_addr +
+-			SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
+-	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+-
+-	iwl_print_hex_error(trans, buf, sizeof(buf));
+-
+-	for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
+-		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
+-			iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
+-
+-	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+-		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
+-		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+-		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+-		u32 tbl_dw =
+-			iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
+-					     SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
+-
+-		if (cnt & 0x1)
+-			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+-		else
+-			tbl_dw = tbl_dw & 0x0000FFFF;
+-
+-		IWL_ERR(trans,
+-			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+-			cnt, active ? "" : "in", fifo, tbl_dw,
+-			iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
+-				(TFD_QUEUE_SIZE_MAX - 1),
+-			iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
+-	}
+-
+-	return ret;
+-}
+-
+-static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+-					 u32 mask, u32 value)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+-	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+-	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+-}
+-
+-void iwl_trans_pcie_ref(struct iwl_trans *trans)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	unsigned long flags;
+-
+-	if (iwlwifi_mod_params.d0i3_disable)
+-		return;
+-
+-	spin_lock_irqsave(&trans_pcie->ref_lock, flags);
+-	IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
+-	trans_pcie->ref_count++;
+-	spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+-}
+-
+-void iwl_trans_pcie_unref(struct iwl_trans *trans)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	unsigned long flags;
+-
+-	if (iwlwifi_mod_params.d0i3_disable)
+-		return;
+-
+-	spin_lock_irqsave(&trans_pcie->ref_lock, flags);
+-	IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
+-	if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
+-		spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+-		return;
+-	}
+-	trans_pcie->ref_count--;
+-	spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+-}
+-
+-static const char *get_csr_string(int cmd)
+-{
+-#define IWL_CMD(x) case x: return #x
+-	switch (cmd) {
+-	IWL_CMD(CSR_HW_IF_CONFIG_REG);
+-	IWL_CMD(CSR_INT_COALESCING);
+-	IWL_CMD(CSR_INT);
+-	IWL_CMD(CSR_INT_MASK);
+-	IWL_CMD(CSR_FH_INT_STATUS);
+-	IWL_CMD(CSR_GPIO_IN);
+-	IWL_CMD(CSR_RESET);
+-	IWL_CMD(CSR_GP_CNTRL);
+-	IWL_CMD(CSR_HW_REV);
+-	IWL_CMD(CSR_EEPROM_REG);
+-	IWL_CMD(CSR_EEPROM_GP);
+-	IWL_CMD(CSR_OTP_GP_REG);
+-	IWL_CMD(CSR_GIO_REG);
+-	IWL_CMD(CSR_GP_UCODE_REG);
+-	IWL_CMD(CSR_GP_DRIVER_REG);
+-	IWL_CMD(CSR_UCODE_DRV_GP1);
+-	IWL_CMD(CSR_UCODE_DRV_GP2);
+-	IWL_CMD(CSR_LED_REG);
+-	IWL_CMD(CSR_DRAM_INT_TBL_REG);
+-	IWL_CMD(CSR_GIO_CHICKEN_BITS);
+-	IWL_CMD(CSR_ANA_PLL_CFG);
+-	IWL_CMD(CSR_HW_REV_WA_REG);
+-	IWL_CMD(CSR_MONITOR_STATUS_REG);
+-	IWL_CMD(CSR_DBG_HPET_MEM_REG);
+-	default:
+-		return "UNKNOWN";
+-	}
+-#undef IWL_CMD
+-}
+-
+-void iwl_pcie_dump_csr(struct iwl_trans *trans)
+-{
+-	int i;
+-	static const u32 csr_tbl[] = {
+-		CSR_HW_IF_CONFIG_REG,
+-		CSR_INT_COALESCING,
+-		CSR_INT,
+-		CSR_INT_MASK,
+-		CSR_FH_INT_STATUS,
+-		CSR_GPIO_IN,
+-		CSR_RESET,
+-		CSR_GP_CNTRL,
+-		CSR_HW_REV,
+-		CSR_EEPROM_REG,
+-		CSR_EEPROM_GP,
+-		CSR_OTP_GP_REG,
+-		CSR_GIO_REG,
+-		CSR_GP_UCODE_REG,
+-		CSR_GP_DRIVER_REG,
+-		CSR_UCODE_DRV_GP1,
+-		CSR_UCODE_DRV_GP2,
+-		CSR_LED_REG,
+-		CSR_DRAM_INT_TBL_REG,
+-		CSR_GIO_CHICKEN_BITS,
+-		CSR_ANA_PLL_CFG,
+-		CSR_MONITOR_STATUS_REG,
+-		CSR_HW_REV_WA_REG,
+-		CSR_DBG_HPET_MEM_REG
+-	};
+-	IWL_ERR(trans, "CSR values:\n");
+-	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
+-		"CSR_INT_PERIODIC_REG)\n");
+-	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
+-		IWL_ERR(trans, "  %25s: 0X%08x\n",
+-			get_csr_string(csr_tbl[i]),
+-			iwl_read32(trans, csr_tbl[i]));
+-	}
+-}
+-
+-#ifdef CONFIG_IWLWIFI_DEBUGFS
+-/* create and remove of files */
+-#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
+-	if (!debugfs_create_file(#name, mode, parent, trans,		\
+-				 &iwl_dbgfs_##name##_ops))		\
+-		goto err;						\
+-} while (0)
+-
+-/* file operation */
+-#define DEBUGFS_READ_FILE_OPS(name)					\
+-static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+-	.read = iwl_dbgfs_##name##_read,				\
+-	.open = simple_open,						\
+-	.llseek = generic_file_llseek,					\
+-};
+-
+-#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
+-static const struct file_operations iwl_dbgfs_##name##_ops = {          \
+-	.write = iwl_dbgfs_##name##_write,                              \
+-	.open = simple_open,						\
+-	.llseek = generic_file_llseek,					\
+-};
+-
+-#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
+-static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+-	.write = iwl_dbgfs_##name##_write,				\
+-	.read = iwl_dbgfs_##name##_read,				\
+-	.open = simple_open,						\
+-	.llseek = generic_file_llseek,					\
+-};
+-
+-static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
+-				       char __user *user_buf,
+-				       size_t count, loff_t *ppos)
+-{
+-	struct iwl_trans *trans = file->private_data;
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	struct iwl_txq *txq;
+-	struct iwl_queue *q;
+-	char *buf;
+-	int pos = 0;
+-	int cnt;
+-	int ret;
+-	size_t bufsz;
+-
+-	bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
+-
+-	if (!trans_pcie->txq)
+-		return -EAGAIN;
+-
+-	buf = kzalloc(bufsz, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+-		txq = &trans_pcie->txq[cnt];
+-		q = &txq->q;
+-		pos += scnprintf(buf + pos, bufsz - pos,
+-				"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
+-				cnt, q->read_ptr, q->write_ptr,
+-				!!test_bit(cnt, trans_pcie->queue_used),
+-				 !!test_bit(cnt, trans_pcie->queue_stopped),
+-				 txq->need_update, txq->frozen,
+-				 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
+-	}
+-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+-	kfree(buf);
+-	return ret;
+-}
+-
+-static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+-				       char __user *user_buf,
+-				       size_t count, loff_t *ppos)
+-{
+-	struct iwl_trans *trans = file->private_data;
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	struct iwl_rxq *rxq = &trans_pcie->rxq;
+-	char buf[256];
+-	int pos = 0;
+-	const size_t bufsz = sizeof(buf);
+-
+-	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
+-						rxq->read);
+-	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
+-						rxq->write);
+-	pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
+-						rxq->write_actual);
+-	pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
+-						rxq->need_update);
+-	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+-						rxq->free_count);
+-	if (rxq->rb_stts) {
+-		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+-			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
+-	} else {
+-		pos += scnprintf(buf + pos, bufsz - pos,
+-					"closed_rb_num: Not Allocated\n");
+-	}
+-	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+-}
+-
+-static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+-					char __user *user_buf,
+-					size_t count, loff_t *ppos)
+-{
+-	struct iwl_trans *trans = file->private_data;
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+-
+-	int pos = 0;
+-	char *buf;
+-	int bufsz = 24 * 64; /* 24 items * 64 char per item */
+-	ssize_t ret;
+-
+-	buf = kzalloc(bufsz, GFP_KERNEL);
+-	if (!buf)
+-		return -ENOMEM;
+-
+-	pos += scnprintf(buf + pos, bufsz - pos,
+-			"Interrupt Statistics Report:\n");
+-
+-	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+-		isr_stats->hw);
+-	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+-		isr_stats->sw);
+-	if (isr_stats->sw || isr_stats->hw) {
+-		pos += scnprintf(buf + pos, bufsz - pos,
+-			"\tLast Restarting Code:  0x%X\n",
+-			isr_stats->err_code);
+-	}
+-#ifdef CONFIG_IWLWIFI_DEBUG
+-	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+-		isr_stats->sch);
+-	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+-		isr_stats->alive);
+-#endif
+-	pos += scnprintf(buf + pos, bufsz - pos,
+-		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
+-
+-	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+-		isr_stats->ctkill);
+-
+-	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+-		isr_stats->wakeup);
+-
+-	pos += scnprintf(buf + pos, bufsz - pos,
+-		"Rx command responses:\t\t %u\n", isr_stats->rx);
+-
+-	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+-		isr_stats->tx);
+-
+-	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+-		isr_stats->unhandled);
+-
+-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+-	kfree(buf);
+-	return ret;
+-}
+-
+-static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+-					 const char __user *user_buf,
+-					 size_t count, loff_t *ppos)
+-{
+-	struct iwl_trans *trans = file->private_data;
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+-
+-	char buf[8];
+-	int buf_size;
+-	u32 reset_flag;
+-
+-	memset(buf, 0, sizeof(buf));
+-	buf_size = min(count, sizeof(buf) -  1);
+-	if (copy_from_user(buf, user_buf, buf_size))
+-		return -EFAULT;
+-	if (sscanf(buf, "%x", &reset_flag) != 1)
+-		return -EFAULT;
+-	if (reset_flag == 0)
+-		memset(isr_stats, 0, sizeof(*isr_stats));
+-
+-	return count;
+-}
+-
+-static ssize_t iwl_dbgfs_csr_write(struct file *file,
+-				   const char __user *user_buf,
+-				   size_t count, loff_t *ppos)
+-{
+-	struct iwl_trans *trans = file->private_data;
+-	char buf[8];
+-	int buf_size;
+-	int csr;
+-
+-	memset(buf, 0, sizeof(buf));
+-	buf_size = min(count, sizeof(buf) -  1);
+-	if (copy_from_user(buf, user_buf, buf_size))
+-		return -EFAULT;
+-	if (sscanf(buf, "%d", &csr) != 1)
+-		return -EFAULT;
+-
+-	iwl_pcie_dump_csr(trans);
+-
+-	return count;
+-}
+-
+-static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+-				     char __user *user_buf,
+-				     size_t count, loff_t *ppos)
+-{
+-	struct iwl_trans *trans = file->private_data;
+-	char *buf = NULL;
+-	ssize_t ret;
+-
+-	ret = iwl_dump_fh(trans, &buf);
+-	if (ret < 0)
+-		return ret;
+-	if (!buf)
+-		return -EINVAL;
+-	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+-	kfree(buf);
+-	return ret;
+-}
+-
+-DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+-DEBUGFS_READ_FILE_OPS(fh_reg);
+-DEBUGFS_READ_FILE_OPS(rx_queue);
+-DEBUGFS_READ_FILE_OPS(tx_queue);
+-DEBUGFS_WRITE_FILE_OPS(csr);
+-
+-/* Create the debugfs files and directories */
+-int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
+-{
+-	struct dentry *dir = trans->dbgfs_dir;
+-
+-	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
+-	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
+-	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
+-	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
+-	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
+-	return 0;
+-
+-err:
+-	IWL_ERR(trans, "failed to create the trans debugfs entry\n");
+-	return -ENOMEM;
+-}
+-#endif /*CONFIG_IWLWIFI_DEBUGFS */
+-
+-static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
+-{
+-	u32 cmdlen = 0;
+-	int i;
+-
+-	for (i = 0; i < IWL_NUM_OF_TBS; i++)
+-		cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
+-
+-	return cmdlen;
+-}
+-
+-static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+-				   struct iwl_fw_error_dump_data **data,
+-				   int allocated_rb_nums)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+-	struct iwl_rxq *rxq = &trans_pcie->rxq;
+-	u32 i, r, j, rb_len = 0;
+-
+-	spin_lock(&rxq->lock);
+-
+-	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
+-
+-	for (i = rxq->read, j = 0;
+-	     i != r && j < allocated_rb_nums;
+-	     i = (i + 1) & RX_QUEUE_MASK, j++) {
+-		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+-		struct iwl_fw_error_dump_rb *rb;
+-
+-		dma_unmap_page(trans->dev, rxb->page_dma, max_len,
+-			       DMA_FROM_DEVICE);
+-
+-		rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+-
+-		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+-		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+-		rb = (void *)(*data)->data;
+-		rb->index = cpu_to_le32(i);
+-		memcpy(rb->data, page_address(rxb->page), max_len);
+-		/* remap the page for the free benefit */
+-		rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
+-						     max_len,
+-						     DMA_FROM_DEVICE);
+-
+-		*data = iwl_fw_error_next_data(*data);
+-	}
+-
+-	spin_unlock(&rxq->lock);
+-
+-	return rb_len;
+-}
+-#define IWL_CSR_TO_DUMP (0x250)
+-
+-static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
+-				   struct iwl_fw_error_dump_data **data)
+-{
+-	u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
+-	__le32 *val;
+-	int i;
+-
+-	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
+-	(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
+-	val = (void *)(*data)->data;
+-
+-	for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
+-		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+-
+-	*data = iwl_fw_error_next_data(*data);
+-
+-	return csr_len;
+-}
+-
+-static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
+-				       struct iwl_fw_error_dump_data **data)
+-{
+-	u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
+-	unsigned long flags;
+-	__le32 *val;
+-	int i;
+-
+-	if (!iwl_trans_grab_nic_access(trans, &flags))
+-		return 0;
+-
+-	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
+-	(*data)->len = cpu_to_le32(fh_regs_len);
+-	val = (void *)(*data)->data;
+-
+-	for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
+-		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+-
+-	iwl_trans_release_nic_access(trans, &flags);
+-
+-	*data = iwl_fw_error_next_data(*data);
+-
+-	return sizeof(**data) + fh_regs_len;
+-}
+-
+-static u32
+-iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
+-				 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
+-				 u32 monitor_len)
+-{
+-	u32 buf_size_in_dwords = (monitor_len >> 2);
+-	u32 *buffer = (u32 *)fw_mon_data->data;
+-	unsigned long flags;
+-	u32 i;
+-
+-	if (!iwl_trans_grab_nic_access(trans, &flags))
+-		return 0;
+-
+-	iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
+-	for (i = 0; i < buf_size_in_dwords; i++)
+-		buffer[i] = iwl_read_prph_no_grab(trans,
+-				MON_DMARB_RD_DATA_ADDR);
+-	iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
+-
+-	iwl_trans_release_nic_access(trans, &flags);
+-
+-	return monitor_len;
+-}
+-
+-static u32
+-iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+-			    struct iwl_fw_error_dump_data **data,
+-			    u32 monitor_len)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	u32 len = 0;
+-
+-	if ((trans_pcie->fw_mon_page &&
+-	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+-	    trans->dbg_dest_tlv) {
+-		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+-		u32 base, write_ptr, wrap_cnt;
+-
+-		/* If there was a dest TLV - use the values from there */
+-		if (trans->dbg_dest_tlv) {
+-			write_ptr =
+-				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+-			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+-			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+-		} else {
+-			base = MON_BUFF_BASE_ADDR;
+-			write_ptr = MON_BUFF_WRPTR;
+-			wrap_cnt = MON_BUFF_CYCLE_CNT;
+-		}
+-
+-		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+-		fw_mon_data = (void *)(*data)->data;
+-		fw_mon_data->fw_mon_wr_ptr =
+-			cpu_to_le32(iwl_read_prph(trans, write_ptr));
+-		fw_mon_data->fw_mon_cycle_cnt =
+-			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+-		fw_mon_data->fw_mon_base_ptr =
+-			cpu_to_le32(iwl_read_prph(trans, base));
+-
+-		len += sizeof(**data) + sizeof(*fw_mon_data);
+-		if (trans_pcie->fw_mon_page) {
+-			/*
+-			 * The firmware is now asserted, it won't write anything
+-			 * to the buffer. CPU can take ownership to fetch the
+-			 * data. The buffer will be handed back to the device
+-			 * before the firmware will be restarted.
+-			 */
+-			dma_sync_single_for_cpu(trans->dev,
+-						trans_pcie->fw_mon_phys,
+-						trans_pcie->fw_mon_size,
+-						DMA_FROM_DEVICE);
+-			memcpy(fw_mon_data->data,
+-			       page_address(trans_pcie->fw_mon_page),
+-			       trans_pcie->fw_mon_size);
+-
+-			monitor_len = trans_pcie->fw_mon_size;
+-		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+-			/*
+-			 * Update pointers to reflect actual values after
+-			 * shifting
+-			 */
+-			base = iwl_read_prph(trans, base) <<
+-			       trans->dbg_dest_tlv->base_shift;
+-			iwl_trans_read_mem(trans, base, fw_mon_data->data,
+-					   monitor_len / sizeof(u32));
+-		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+-			monitor_len =
+-				iwl_trans_pci_dump_marbh_monitor(trans,
+-								 fw_mon_data,
+-								 monitor_len);
+-		} else {
+-			/* Didn't match anything - output no monitor data */
+-			monitor_len = 0;
+-		}
+-
+-		len += monitor_len;
+-		(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+-	}
+-
+-	return len;
+-}
+-
+-static struct iwl_trans_dump_data
+-*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+-			  const struct iwl_fw_dbg_trigger_tlv *trigger)
+-{
+-	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-	struct iwl_fw_error_dump_data *data;
+-	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
+-	struct iwl_fw_error_dump_txcmd *txcmd;
+-	struct iwl_trans_dump_data *dump_data;
+-	u32 len, num_rbs;
+-	u32 monitor_len;
+-	int i, ptr;
+-	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
+-
+-	/* transport dump header */
+-	len = sizeof(*dump_data);
+-
+-	/* host commands */
+-	len += sizeof(*data) +
+-		cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+-
+-	/* FW monitor */
+-	if (trans_pcie->fw_mon_page) {
+-		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
+-		       trans_pcie->fw_mon_size;
+-		monitor_len = trans_pcie->fw_mon_size;
+-	} else if (trans->dbg_dest_tlv) {
+-		u32 base, end;
+-
+-		base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+-		end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
+-
+-		base = iwl_read_prph(trans, base) <<
+-		       trans->dbg_dest_tlv->base_shift;
+-		end = iwl_read_prph(trans, end) <<
+-		      trans->dbg_dest_tlv->end_shift;
+-
+-		/* Make "end" point to the actual end */
+-		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
+-		    trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
+-			end += (1 << trans->dbg_dest_tlv->end_shift);
+-		monitor_len = end - base;
+-		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
+-		       monitor_len;
+-	} else {
+-		monitor_len = 0;
+-	}
+-
+-	if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+-		dump_data = vzalloc(len);
+-		if (!dump_data)
+-			return NULL;
+-
+-		data = (void *)dump_data->data;
+-		len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+-		dump_data->len = len;
+-
+-		return dump_data;
+-	}
+-
+-	/* CSR registers */
+-	len += sizeof(*data) + IWL_CSR_TO_DUMP;
+-
+-	/* FH registers */
+-	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
+-
+-	if (dump_rbs) {
+-		/* RBs */
+-		num_rbs = le16_to_cpu(ACCESS_ONCE(
+-				      trans_pcie->rxq.rb_stts->closed_rb_num))
+-				      & 0x0FFF;
+-		num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+-		len += num_rbs * (sizeof(*data) +
+-				  sizeof(struct iwl_fw_error_dump_rb) +
+-				  (PAGE_SIZE << trans_pcie->rx_page_order));
+-	}
+-
+-	dump_data = vzalloc(len);
+-	if (!dump_data)
+-		return NULL;
+-
+-	len = 0;
+-	data = (void *)dump_data->data;
+-	data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+-	txcmd = (void *)data->data;
+-	spin_lock_bh(&cmdq->lock);
+-	ptr = cmdq->q.write_ptr;
+-	for (i = 0; i < cmdq->q.n_window; i++) {
+-		u8 idx = get_cmd_index(&cmdq->q, ptr);
+-		u32 caplen, cmdlen;
+-
+-		cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
+-		caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+-
+-		if (cmdlen) {
+-			len += sizeof(*txcmd) + caplen;
+-			txcmd->cmdlen = cpu_to_le32(cmdlen);
+-			txcmd->caplen = cpu_to_le32(caplen);
+-			memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
+-			txcmd = (void *)((u8 *)txcmd->data + caplen);
+-		}
+-
+-		ptr = iwl_queue_dec_wrap(ptr);
+-	}
+-	spin_unlock_bh(&cmdq->lock);
+-
+-	data->len = cpu_to_le32(len);
+-	len += sizeof(*data);
+-	data = iwl_fw_error_next_data(data);
+-
+-	len += iwl_trans_pcie_dump_csr(trans, &data);
+-	len += iwl_trans_pcie_fh_regs_dump(trans, &data);
+-	if (dump_rbs)
+-		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
+-
+-	len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+-
+-	dump_data->len = len;
+-
+-	return dump_data;
+-}
+-
+-static const struct iwl_trans_ops trans_ops_pcie = {
+-	.start_hw = iwl_trans_pcie_start_hw,
+-	.op_mode_leave = iwl_trans_pcie_op_mode_leave,
+-	.fw_alive = iwl_trans_pcie_fw_alive,
+-	.start_fw = iwl_trans_pcie_start_fw,
+-	.stop_device = iwl_trans_pcie_stop_device,
+-
+-	.d3_suspend = iwl_trans_pcie_d3_suspend,
+-	.d3_resume = iwl_trans_pcie_d3_resume,
+-
+-	.send_cmd = iwl_trans_pcie_send_hcmd,
+-
+-	.tx = iwl_trans_pcie_tx,
+-	.reclaim = iwl_trans_pcie_reclaim,
+-
+-	.txq_disable = iwl_trans_pcie_txq_disable,
+-	.txq_enable = iwl_trans_pcie_txq_enable,
+-
+-	.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
+-	.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
+-	.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
+-
+-	.write8 = iwl_trans_pcie_write8,
+-	.write32 = iwl_trans_pcie_write32,
+-	.read32 = iwl_trans_pcie_read32,
+-	.read_prph = iwl_trans_pcie_read_prph,
+-	.write_prph = iwl_trans_pcie_write_prph,
+-	.read_mem = iwl_trans_pcie_read_mem,
+-	.write_mem = iwl_trans_pcie_write_mem,
+-	.configure = iwl_trans_pcie_configure,
+-	.set_pmi = iwl_trans_pcie_set_pmi,
+-	.grab_nic_access = iwl_trans_pcie_grab_nic_access,
+-	.release_nic_access = iwl_trans_pcie_release_nic_access,
+-	.set_bits_mask = iwl_trans_pcie_set_bits_mask,
+-
+-	.ref = iwl_trans_pcie_ref,
+-	.unref = iwl_trans_pcie_unref,
+-
+-	.dump_data = iwl_trans_pcie_dump_data,
+-};
+-
+-struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+-				       const struct pci_device_id *ent,
+-				       const struct iwl_cfg *cfg)
+-{
+-	struct iwl_trans_pcie *trans_pcie;
+-	struct iwl_trans *trans;
+-	u16 pci_cmd;
+-	int ret;
+-
+-	trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+-				&pdev->dev, cfg, &trans_ops_pcie, 0);
+-	if (!trans)
+-		return ERR_PTR(-ENOMEM);
+-
+-	trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
+-
+-	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+-
+-	trans_pcie->trans = trans;
+-	spin_lock_init(&trans_pcie->irq_lock);
+-	spin_lock_init(&trans_pcie->reg_lock);
+-	spin_lock_init(&trans_pcie->ref_lock);
+-	mutex_init(&trans_pcie->mutex);
+-	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+-	trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
+-	if (!trans_pcie->tso_hdr_page) {
+-		ret = -ENOMEM;
+-		goto out_no_pci;
+-	}
+-
+-	ret = pci_enable_device(pdev);
+-	if (ret)
+-		goto out_no_pci;
+-
+-	if (!cfg->base_params->pcie_l1_allowed) {
+-		/*
+-		 * W/A - seems to solve weird behavior. We need to remove this
+-		 * if we don't want to stay in L1 all the time. This wastes a
+-		 * lot of power.
+-		 */
+-		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+-				       PCIE_LINK_STATE_L1 |
+-				       PCIE_LINK_STATE_CLKPM);
+-	}
+-
+-	pci_set_master(pdev);
+-
+-	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+-	if (!ret)
+-		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+-	if (ret) {
+-		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+-		if (!ret)
+-			ret = pci_set_consistent_dma_mask(pdev,
+-							  DMA_BIT_MASK(32));
+-		/* both attempts failed: */
+-		if (ret) {
+-			dev_err(&pdev->dev, "No suitable DMA available\n");
+-			goto out_pci_disable_device;
+-		}
+-	}
+-
+-	ret = pci_request_regions(pdev, DRV_NAME);
+-	if (ret) {
+-		dev_err(&pdev->dev, "pci_request_regions failed\n");
+-		goto out_pci_disable_device;
+-	}
+-
+-	trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
+-	if (!trans_pcie->hw_base) {
+-		dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
+-		ret = -ENODEV;
+-		goto out_pci_release_regions;
+-	}
+-
+-	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+-	 * PCI Tx retries from interfering with C3 CPU state */
+-	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+-
+-	trans->dev = &pdev->dev;
+-	trans_pcie->pci_dev = pdev;
+-	iwl_disable_interrupts(trans);
+-
+-	ret = pci_enable_msi(pdev);
+-	if (ret) {
+-		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
+-		/* enable rfkill interrupt: hw bug w/a */
+-		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+-		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+-			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+-			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+-		}
+-	}
+-
+-	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+-	/*
+-	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
+-	 * changed, and now the revision step also includes bit 0-1 (no more
+-	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
+-	 * in the old format.
+-	 */
+-	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
+-		unsigned long flags;
+-
+-		trans->hw_rev = (trans->hw_rev & 0xfff0) |
+-				(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
+-
+-		ret = iwl_pcie_prepare_card_hw(trans);
+-		if (ret) {
+-			IWL_WARN(trans, "Exit HW not ready\n");
+-			goto out_pci_disable_msi;
+-		}
+-
+-		/*
+-		 * in-order to recognize C step driver should read chip version
+-		 * id located at the AUX bus MISC address space.
+-		 */
+-		iwl_set_bit(trans, CSR_GP_CNTRL,
+-			    CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+-		udelay(2);
+-
+-		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+-				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+-				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+-				   25000);
+-		if (ret < 0) {
+-			IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
+-			goto out_pci_disable_msi;
+-		}
+-
+-		if (iwl_trans_grab_nic_access(trans, &flags)) {
+-			u32 hw_step;
+-
+-			hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
+-			hw_step |= ENABLE_WFPM;
+-			iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step);
+-			hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG);
+-			hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
+-			if (hw_step == 0x3)
+-				trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
+-						(SILICON_C_STEP << 2);
+-			iwl_trans_release_nic_access(trans, &flags);
+-		}
+-	}
+-
+-	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
+-	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
+-		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
+-
+-	/* Initialize the wait queue for commands */
+-	init_waitqueue_head(&trans_pcie->wait_command_queue);
+-
+-	ret = iwl_pcie_alloc_ict(trans);
+-	if (ret)
+-		goto out_pci_disable_msi;
+-
+-	ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
+-				   iwl_pcie_irq_handler,
+-				   IRQF_SHARED, DRV_NAME, trans);
+-	if (ret) {
+-		IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
+-		goto out_free_ict;
+-	}
+-
+-	trans_pcie->inta_mask = CSR_INI_SET_MASK;
+-
+-	return trans;
+-
+-out_free_ict:
+-	iwl_pcie_free_ict(trans);
+-out_pci_disable_msi:
+-	pci_disable_msi(pdev);
+-out_pci_release_regions:
+-	pci_release_regions(pdev);
+-out_pci_disable_device:
+-	pci_disable_device(pdev);
+-out_no_pci:
+-	free_percpu(trans_pcie->tso_hdr_page);
+-	iwl_trans_free(trans);
+-	return ERR_PTR(ret);
+-}
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 840c47d8e2ce..de69a9cc900d 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -711,8 +711,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
+ 	 */
+ 	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
+ 	if (val & (BIT(1) | BIT(17))) {
+-		IWL_INFO(trans,
+-			 "can't access the RSA semaphore it is write protected\n");
++		IWL_DEBUG_INFO(trans,
++				"can't access the RSA semaphore it is write protected\n");
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
+index 8de135174e82..8b978ad87cb3 100644
+--- a/drivers/regulator/s2mps11.c
++++ b/drivers/regulator/s2mps11.c
+@@ -305,7 +305,7 @@ static struct regulator_ops s2mps11_buck_ops = {
+ 	.enable_mask	= S2MPS11_ENABLE_MASK			\
+ }
+ 
+-#define regulator_desc_s2mps11_buck6_10(num, min, step) {	\
++#define regulator_desc_s2mps11_buck67810(num, min, step) {	\
+ 	.name		= "BUCK"#num,				\
+ 	.id		= S2MPS11_BUCK##num,			\
+ 	.ops		= &s2mps11_buck_ops,			\
+@@ -321,6 +321,22 @@ static struct regulator_ops s2mps11_buck_ops = {
+ 	.enable_mask	= S2MPS11_ENABLE_MASK			\
+ }
+ 
++#define regulator_desc_s2mps11_buck9 {				\
++	.name		= "BUCK9",				\
++	.id		= S2MPS11_BUCK9,			\
++	.ops		= &s2mps11_buck_ops,			\
++	.type		= REGULATOR_VOLTAGE,			\
++	.owner		= THIS_MODULE,				\
++	.min_uV		= MIN_3000_MV,				\
++	.uV_step	= STEP_25_MV,				\
++	.n_voltages	= S2MPS11_BUCK9_N_VOLTAGES,		\
++	.ramp_delay	= S2MPS11_RAMP_DELAY,			\
++	.vsel_reg	= S2MPS11_REG_B9CTRL2,			\
++	.vsel_mask	= S2MPS11_BUCK9_VSEL_MASK,		\
++	.enable_reg	= S2MPS11_REG_B9CTRL1,			\
++	.enable_mask	= S2MPS11_ENABLE_MASK			\
++}
++
+ static const struct regulator_desc s2mps11_regulators[] = {
+ 	regulator_desc_s2mps11_ldo(1, STEP_25_MV),
+ 	regulator_desc_s2mps11_ldo(2, STEP_50_MV),
+@@ -365,11 +381,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
+ 	regulator_desc_s2mps11_buck1_4(3),
+ 	regulator_desc_s2mps11_buck1_4(4),
+ 	regulator_desc_s2mps11_buck5,
+-	regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
+-	regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
+-	regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
++	regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
++	regulator_desc_s2mps11_buck9,
++	regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
+ };
+ 
+ static struct regulator_ops s2mps14_reg_ops;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index ae9eb716c02f..db6985f04054 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -5352,6 +5352,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	}
+ 
+ 	bos = udev->bos;
++	udev->bos = NULL;
+ 
+ 	for (i = 0; i < SET_CONFIG_TRIES; ++i) {
+ 
+@@ -5444,11 +5445,8 @@ done:
+ 	usb_set_usb2_hardware_lpm(udev, 1);
+ 	usb_unlocked_enable_lpm(udev);
+ 	usb_enable_ltm(udev);
+-	/* release the new BOS descriptor allocated  by hub_port_init() */
+-	if (udev->bos != bos) {
+-		usb_release_bos_descriptor(udev);
+-		udev->bos = bos;
+-	}
++	usb_release_bos_descriptor(udev);
++	udev->bos = bos;
+ 	return 0;
+ 
+ re_enumerate:
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index bdc0f2f24f19..a2b43a6e7fa7 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -108,6 +108,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
+ 	{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
+ 	{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
++	{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
+ 	{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
+ 	{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
+@@ -117,6 +118,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
+ 	{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
+ 	{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
++	{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ 	{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
+ 	{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
+ 	{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+@@ -140,6 +142,8 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
+ 	{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
+ 	{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
++	{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
++	{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
+ 	{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
+ 	{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ 	{ USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
+index 735d7522a3a9..204659a5f6db 100644
+--- a/fs/isofs/rock.c
++++ b/fs/isofs/rock.c
+@@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
+ 	int retnamlen = 0;
+ 	int truncate = 0;
+ 	int ret = 0;
++	char *p;
++	int len;
+ 
+ 	if (!ISOFS_SB(inode->i_sb)->s_rock)
+ 		return 0;
+@@ -267,12 +269,17 @@ repeat:
+ 					rr->u.NM.flags);
+ 				break;
+ 			}
+-			if ((strlen(retname) + rr->len - 5) >= 254) {
++			len = rr->len - 5;
++			if (retnamlen + len >= 254) {
+ 				truncate = 1;
+ 				break;
+ 			}
+-			strncat(retname, rr->u.NM.name, rr->len - 5);
+-			retnamlen += rr->len - 5;
++			p = memchr(rr->u.NM.name, '\0', len);
++			if (unlikely(p))
++				len = p - rr->u.NM.name;
++			memcpy(retname + retnamlen, rr->u.NM.name, len);
++			retnamlen += len;
++			retname[retnamlen] = '\0';
+ 			break;
+ 		case SIG('R', 'E'):
+ 			kfree(rs.buffer);
+diff --git a/fs/namei.c b/fs/namei.c
+index f3cc848da8bc..c7a6eabc02a5 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2839,22 +2839,10 @@ no_open:
+ 		dentry = lookup_real(dir, dentry, nd->flags);
+ 		if (IS_ERR(dentry))
+ 			return PTR_ERR(dentry);
+-
+-		if (create_error) {
+-			int open_flag = op->open_flag;
+-
+-			error = create_error;
+-			if ((open_flag & O_EXCL)) {
+-				if (!dentry->d_inode)
+-					goto out;
+-			} else if (!dentry->d_inode) {
+-				goto out;
+-			} else if ((open_flag & O_TRUNC) &&
+-				   d_is_reg(dentry)) {
+-				goto out;
+-			}
+-			/* will fail later, go on to get the right error */
+-		}
++	}
++	if (create_error && !dentry->d_inode) {
++		error = create_error;
++		goto out;
+ 	}
+ looked_up:
+ 	path->dentry = dentry;
+diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
+index c58a1bcfda0f..762e5a3aecd3 100644
+--- a/fs/ocfs2/acl.c
++++ b/fs/ocfs2/acl.c
+@@ -308,3 +308,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
+ 
+ 	return acl;
+ }
++
++int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
++{
++	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++	struct posix_acl *acl;
++	int ret;
++
++	if (S_ISLNK(inode->i_mode))
++		return -EOPNOTSUPP;
++
++	if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
++		return 0;
++
++	acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
++	if (IS_ERR(acl) || !acl)
++		return PTR_ERR(acl);
++	ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
++	if (ret)
++		return ret;
++	ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
++			    acl, NULL, NULL);
++	posix_acl_release(acl);
++	return ret;
++}
++
++/*
++ * Initialize the ACLs of a new inode. If parent directory has default ACL,
++ * then clone to new inode. Called from ocfs2_mknod.
++ */
++int ocfs2_init_acl(handle_t *handle,
++		   struct inode *inode,
++		   struct inode *dir,
++		   struct buffer_head *di_bh,
++		   struct buffer_head *dir_bh,
++		   struct ocfs2_alloc_context *meta_ac,
++		   struct ocfs2_alloc_context *data_ac)
++{
++	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
++	struct posix_acl *acl = NULL;
++	int ret = 0, ret2;
++	umode_t mode;
++
++	if (!S_ISLNK(inode->i_mode)) {
++		if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
++			acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
++						   dir_bh);
++			if (IS_ERR(acl))
++				return PTR_ERR(acl);
++		}
++		if (!acl) {
++			mode = inode->i_mode & ~current_umask();
++			ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++			if (ret) {
++				mlog_errno(ret);
++				goto cleanup;
++			}
++		}
++	}
++	if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
++		if (S_ISDIR(inode->i_mode)) {
++			ret = ocfs2_set_acl(handle, inode, di_bh,
++					    ACL_TYPE_DEFAULT, acl,
++					    meta_ac, data_ac);
++			if (ret)
++				goto cleanup;
++		}
++		mode = inode->i_mode;
++		ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
++		if (ret < 0)
++			return ret;
++
++		ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
++		if (ret2) {
++			mlog_errno(ret2);
++			ret = ret2;
++			goto cleanup;
++		}
++		if (ret > 0) {
++			ret = ocfs2_set_acl(handle, inode,
++					    di_bh, ACL_TYPE_ACCESS,
++					    acl, meta_ac, data_ac);
++		}
++	}
++cleanup:
++	posix_acl_release(acl);
++	return ret;
++}
+diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h
+index 3fce68d08625..2783a75b3999 100644
+--- a/fs/ocfs2/acl.h
++++ b/fs/ocfs2/acl.h
+@@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
+ 			 struct posix_acl *acl,
+ 			 struct ocfs2_alloc_context *meta_ac,
+ 			 struct ocfs2_alloc_context *data_ac);
++extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
++extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
++			  struct buffer_head *, struct buffer_head *,
++			  struct ocfs2_alloc_context *,
++			  struct ocfs2_alloc_context *);
+ 
+ #endif /* OCFS2_ACL_H */
+diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
+index d8b670cbd909..3f1ee404f40f 100644
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1256,18 +1256,18 @@ bail_unlock_rw:
+ 	if (size_change)
+ 		ocfs2_rw_unlock(inode, 1);
+ bail:
+-	brelse(bh);
+ 
+ 	/* Release quota pointers in case we acquired them */
+ 	for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
+ 		dqput(transfer_to[qtype]);
+ 
+ 	if (!status && attr->ia_valid & ATTR_MODE) {
+-		status = posix_acl_chmod(inode, inode->i_mode);
++		status = ocfs2_acl_chmod(inode, bh);
+ 		if (status < 0)
+ 			mlog_errno(status);
+ 	}
+ 
++	brelse(bh);
+ 	return status;
+ }
+ 
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 4d5e0a573f4f..2077dbdd4883 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -257,7 +257,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 	struct ocfs2_dir_lookup_result lookup = { NULL, };
+ 	sigset_t oldset;
+ 	int did_block_signals = 0;
+-	struct posix_acl *default_acl = NULL, *acl = NULL;
+ 	struct ocfs2_dentry_lock *dl = NULL;
+ 
+ 	trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
+@@ -360,14 +359,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 		goto leave;
+ 	}
+ 
+-	status = posix_acl_create(dir, &mode, &default_acl, &acl);
+-	if (status) {
+-		mlog_errno(status);
+-		goto leave;
+-	}
+-	/* update inode->i_mode after mask with "umask". */
+-	inode->i_mode = mode;
+-
+ 	handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
+ 							    S_ISDIR(mode),
+ 							    xattr_credits));
+@@ -416,16 +407,8 @@ static int ocfs2_mknod(struct inode *dir,
+ 		inc_nlink(dir);
+ 	}
+ 
+-	if (default_acl) {
+-		status = ocfs2_set_acl(handle, inode, new_fe_bh,
+-				       ACL_TYPE_DEFAULT, default_acl,
+-				       meta_ac, data_ac);
+-	}
+-	if (!status && acl) {
+-		status = ocfs2_set_acl(handle, inode, new_fe_bh,
+-				       ACL_TYPE_ACCESS, acl,
+-				       meta_ac, data_ac);
+-	}
++	status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
++			 meta_ac, data_ac);
+ 
+ 	if (status < 0) {
+ 		mlog_errno(status);
+@@ -467,10 +450,6 @@ static int ocfs2_mknod(struct inode *dir,
+ 	d_instantiate(dentry, inode);
+ 	status = 0;
+ leave:
+-	if (default_acl)
+-		posix_acl_release(default_acl);
+-	if (acl)
+-		posix_acl_release(acl);
+ 	if (status < 0 && did_quota_inode)
+ 		dquot_free_inode(inode);
+ 	if (handle)
+diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
+index d8c6af101f3f..57b3aafe50c4 100644
+--- a/fs/ocfs2/refcounttree.c
++++ b/fs/ocfs2/refcounttree.c
+@@ -4266,20 +4266,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 	struct inode *inode = d_inode(old_dentry);
+ 	struct buffer_head *old_bh = NULL;
+ 	struct inode *new_orphan_inode = NULL;
+-	struct posix_acl *default_acl, *acl;
+-	umode_t mode;
+ 
+ 	if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
+ 		return -EOPNOTSUPP;
+ 
+-	mode = inode->i_mode;
+-	error = posix_acl_create(dir, &mode, &default_acl, &acl);
+-	if (error) {
+-		mlog_errno(error);
+-		return error;
+-	}
+ 
+-	error = ocfs2_create_inode_in_orphan(dir, mode,
++	error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
+ 					     &new_orphan_inode);
+ 	if (error) {
+ 		mlog_errno(error);
+@@ -4318,16 +4310,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
+ 	/* If the security isn't preserved, we need to re-initialize them. */
+ 	if (!preserve) {
+ 		error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
+-						    &new_dentry->d_name,
+-						    default_acl, acl);
++						    &new_dentry->d_name);
+ 		if (error)
+ 			mlog_errno(error);
+ 	}
+ out:
+-	if (default_acl)
+-		posix_acl_release(default_acl);
+-	if (acl)
+-		posix_acl_release(acl);
+ 	if (!error) {
+ 		error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
+ 						       new_dentry);
+diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
+index d03bfbf3d27d..fdddc7a85810 100644
+--- a/fs/ocfs2/xattr.c
++++ b/fs/ocfs2/xattr.c
+@@ -7205,12 +7205,10 @@ out:
+  */
+ int ocfs2_init_security_and_acl(struct inode *dir,
+ 				struct inode *inode,
+-				const struct qstr *qstr,
+-				struct posix_acl *default_acl,
+-				struct posix_acl *acl)
++				const struct qstr *qstr)
+ {
+-	struct buffer_head *dir_bh = NULL;
+ 	int ret = 0;
++	struct buffer_head *dir_bh = NULL;
+ 
+ 	ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
+ 	if (ret) {
+@@ -7223,11 +7221,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
+ 		mlog_errno(ret);
+ 		goto leave;
+ 	}
+-
+-	if (!ret && default_acl)
+-		ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+-	if (!ret && acl)
+-		ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
++	ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
++	if (ret)
++		mlog_errno(ret);
+ 
+ 	ocfs2_inode_unlock(dir, 0);
+ 	brelse(dir_bh);
+diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h
+index f10d5b93c366..1633cc15ea1f 100644
+--- a/fs/ocfs2/xattr.h
++++ b/fs/ocfs2/xattr.h
+@@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
+ 			 bool preserve_security);
+ int ocfs2_init_security_and_acl(struct inode *dir,
+ 				struct inode *inode,
+-				const struct qstr *qstr,
+-				struct posix_acl *default_acl,
+-				struct posix_acl *acl);
++				const struct qstr *qstr);
+ #endif /* OCFS2_XATTR_H */
+diff --git a/fs/pnode.c b/fs/pnode.c
+index 6367e1e435c6..99899705b105 100644
+--- a/fs/pnode.c
++++ b/fs/pnode.c
+@@ -198,10 +198,15 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
+ 
+ /* all accesses are serialized by namespace_sem */
+ static struct user_namespace *user_ns;
+-static struct mount *last_dest, *last_source, *dest_master;
++static struct mount *last_dest, *first_source, *last_source, *dest_master;
+ static struct mountpoint *mp;
+ static struct hlist_head *list;
+ 
++static inline bool peers(struct mount *m1, struct mount *m2)
++{
++	return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
++}
++
+ static int propagate_one(struct mount *m)
+ {
+ 	struct mount *child;
+@@ -212,24 +217,26 @@ static int propagate_one(struct mount *m)
+ 	/* skip if mountpoint isn't covered by it */
+ 	if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
+ 		return 0;
+-	if (m->mnt_group_id == last_dest->mnt_group_id) {
++	if (peers(m, last_dest)) {
+ 		type = CL_MAKE_SHARED;
+ 	} else {
+ 		struct mount *n, *p;
++		bool done;
+ 		for (n = m; ; n = p) {
+ 			p = n->mnt_master;
+-			if (p == dest_master || IS_MNT_MARKED(p)) {
+-				while (last_dest->mnt_master != p) {
+-					last_source = last_source->mnt_master;
+-					last_dest = last_source->mnt_parent;
+-				}
+-				if (n->mnt_group_id != last_dest->mnt_group_id) {
+-					last_source = last_source->mnt_master;
+-					last_dest = last_source->mnt_parent;
+-				}
++			if (p == dest_master || IS_MNT_MARKED(p))
+ 				break;
+-			}
+ 		}
++		do {
++			struct mount *parent = last_source->mnt_parent;
++			if (last_source == first_source)
++				break;
++			done = parent->mnt_master == p;
++			if (done && peers(n, parent))
++				break;
++			last_source = last_source->mnt_master;
++		} while (!done);
++
+ 		type = CL_SLAVE;
+ 		/* beginning of peer group among the slaves? */
+ 		if (IS_MNT_SHARED(m))
+@@ -281,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
+ 	 */
+ 	user_ns = current->nsproxy->mnt_ns->user_ns;
+ 	last_dest = dest_mnt;
++	first_source = source_mnt;
+ 	last_source = source_mnt;
+ 	mp = dest_mp;
+ 	list = tree_list;
+diff --git a/include/linux/hash.h b/include/linux/hash.h
+index 1afde47e1528..79c52fa81cac 100644
+--- a/include/linux/hash.h
++++ b/include/linux/hash.h
+@@ -32,12 +32,28 @@
+ #error Wordsize not 32 or 64
+ #endif
+ 
++/*
++ * The above primes are actively bad for hashing, since they are
++ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
++ * real problems. Besides, the "prime" part is pointless for the
++ * multiplicative hash.
++ *
++ * Although a random odd number will do, it turns out that the golden
++ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
++ * properties.
++ *
++ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
++ * (See Knuth vol 3, section 6.4, exercise 9.)
++ */
++#define GOLDEN_RATIO_32 0x61C88647
++#define GOLDEN_RATIO_64 0x61C8864680B583EBull
++
+ static __always_inline u64 hash_64(u64 val, unsigned int bits)
+ {
+ 	u64 hash = val;
+ 
+-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
+-	hash = hash * GOLDEN_RATIO_PRIME_64;
++#if BITS_PER_LONG == 64
++	hash = hash * GOLDEN_RATIO_64;
+ #else
+ 	/*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
+ 	u64 n = hash;
+diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
+index 7981a9d77d3f..ad81a1a7193f 100644
+--- a/include/linux/mfd/samsung/s2mps11.h
++++ b/include/linux/mfd/samsung/s2mps11.h
+@@ -173,10 +173,12 @@ enum s2mps11_regulators {
+ 
+ #define S2MPS11_LDO_VSEL_MASK	0x3F
+ #define S2MPS11_BUCK_VSEL_MASK	0xFF
++#define S2MPS11_BUCK9_VSEL_MASK	0x1F
+ #define S2MPS11_ENABLE_MASK	(0x03 << S2MPS11_ENABLE_SHIFT)
+ #define S2MPS11_ENABLE_SHIFT	0x06
+ #define S2MPS11_LDO_N_VOLTAGES	(S2MPS11_LDO_VSEL_MASK + 1)
+ #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
++#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
+ #define S2MPS11_RAMP_DELAY	25000		/* uV/us */
+ 
+ 
+diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
+index f34e040b34e9..41c93844fb1d 100644
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
+ 	 1 << PG_private | 1 << PG_private_2 | \
+ 	 1 << PG_writeback | 1 << PG_reserved | \
+ 	 1 << PG_slab	 | 1 << PG_swapcache | 1 << PG_active | \
+-	 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
++	 1 << PG_unevictable | __PG_MLOCKED | \
+ 	 __PG_COMPOUND_LOCK)
+ 
+ /*
+  * Flags checked when a page is prepped for return by the page allocator.
+- * Pages being prepped should not have any flags set.  It they are set,
++ * Pages being prepped should not have these flags set.  It they are set,
+  * there has been a kernel bug or struct page corruption.
++ *
++ * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
++ * alloc-free cycle to prevent from reusing the page.
+  */
+-#define PAGE_FLAGS_CHECK_AT_PREP	((1 << NR_PAGEFLAGS) - 1)
++#define PAGE_FLAGS_CHECK_AT_PREP	\
++	(((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
+ 
+ #define PAGE_FLAGS_PRIVATE				\
+ 	(1 << PG_private | 1 << PG_private_2)
+diff --git a/include/rdma/ib.h b/include/rdma/ib.h
+index cf8f9e700e48..a6b93706b0fc 100644
+--- a/include/rdma/ib.h
++++ b/include/rdma/ib.h
+@@ -34,6 +34,7 @@
+ #define _RDMA_IB_H
+ 
+ #include <linux/types.h>
++#include <linux/sched.h>
+ 
+ struct ib_addr {
+ 	union {
+@@ -86,4 +87,19 @@ struct sockaddr_ib {
+ 	__u64			sib_scope_id;
+ };
+ 
++/*
++ * The IB interfaces that use write() as bi-directional ioctl() are
++ * fundamentally unsafe, since there are lots of ways to trigger "write()"
++ * calls from various contexts with elevated privileges. That includes the
++ * traditional suid executable error message writes, but also various kernel
++ * interfaces that can write to file descriptors.
++ *
++ * This function provides protection for the legacy API by restricting the
++ * calling context.
++ */
++static inline bool ib_safe_file_access(struct file *filp)
++{
++	return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
++}
++
+ #endif /* _RDMA_IB_H */
+diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
+index c039f1d68a09..086168e18ca8 100644
+--- a/include/uapi/linux/v4l2-dv-timings.h
++++ b/include/uapi/linux/v4l2-dv-timings.h
+@@ -183,7 +183,8 @@
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P24 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -191,14 +192,16 @@
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P25 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P30 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -206,14 +209,16 @@
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P50 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_3840X2160P60 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -221,7 +226,8 @@
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P24 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -229,14 +235,16 @@
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P25 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P30 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+@@ -244,14 +252,16 @@
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P50 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
+ }
+ 
+ #define V4L2_DV_BT_CEA_4096X2160P60 { \
+ 	.type = V4L2_DV_BT_656_1120, \
+-	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
++	V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
++		V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
+ 		594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
+ 		V4L2_DV_BT_STD_CEA861, \
+ 		V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 7f63ad978cb8..dba8894d25cc 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ 			 bool truncated)
+ {
+ 	struct ring_buffer *rb = handle->rb;
++	bool wakeup = truncated;
+ 	unsigned long aux_head;
+ 	u64 flags = 0;
+ 
+@@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ 	aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
+ 
+ 	if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
+-		perf_output_wakeup(handle);
++		wakeup = true;
+ 		local_add(rb->aux_watermark, &rb->aux_wakeup);
+ 	}
++
++	if (wakeup) {
++		if (truncated)
++			handle->event->pending_disable = 1;
++		perf_output_wakeup(handle);
++	}
++
+ 	handle->event = NULL;
+ 
+ 	local_set(&rb->aux_nest, 0);
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index f69ec1295b0b..6459f77e2c72 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1626,8 +1626,13 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
+ 	trace_create_file("filter", 0644, file->dir, file,
+ 			  &ftrace_event_filter_fops);
+ 
+-	trace_create_file("trigger", 0644, file->dir, file,
+-			  &event_trigger_fops);
++	/*
++	 * Only event directories that can be enabled should have
++	 * triggers.
++	 */
++	if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
++		trace_create_file("trigger", 0644, file->dir, file,
++				  &event_trigger_fops);
+ 
+ 	trace_create_file("format", 0444, file->dir, call,
+ 			  &ftrace_event_format_fops);
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 6d631161705c..d0efe9295a0e 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -654,6 +654,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
+ 	 */
+ 	smp_wmb();
+ 	set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
++	/*
++	 * The following mb guarantees that previous clear of a PENDING bit
++	 * will not be reordered with any speculative LOADS or STORES from
++	 * work->current_func, which is executed afterwards.  This possible
++	 * reordering can lead to a missed execution on attempt to qeueue
++	 * the same @work.  E.g. consider this case:
++	 *
++	 *   CPU#0                         CPU#1
++	 *   ----------------------------  --------------------------------
++	 *
++	 * 1  STORE event_indicated
++	 * 2  queue_work_on() {
++	 * 3    test_and_set_bit(PENDING)
++	 * 4 }                             set_..._and_clear_pending() {
++	 * 5                                 set_work_data() # clear bit
++	 * 6                                 smp_mb()
++	 * 7                               work->current_func() {
++	 * 8				      LOAD event_indicated
++	 *				   }
++	 *
++	 * Without an explicit full barrier speculative LOAD on line 8 can
++	 * be executed before CPU#0 does STORE on line 1.  If that happens,
++	 * CPU#0 observes the PENDING bit is still set and new execution of
++	 * a @work is not queued in a hope, that CPU#1 will eventually
++	 * finish the queued @work.  Meanwhile CPU#1 does not see
++	 * event_indicated is set, because speculative LOAD was executed
++	 * before actual STORE.
++	 */
++	smp_mb();
+ }
+ 
+ static void clear_work_data(struct work_struct *work)
+@@ -4448,6 +4477,17 @@ static void rebind_workers(struct worker_pool *pool)
+ 						  pool->attrs->cpumask) < 0);
+ 
+ 	spin_lock_irq(&pool->lock);
++
++	/*
++	 * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
++	 * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
++	 * being reworked and this can go away in time.
++	 */
++	if (!(pool->flags & POOL_DISASSOCIATED)) {
++		spin_unlock_irq(&pool->lock);
++		return;
++	}
++
+ 	pool->flags &= ~POOL_DISASSOCIATED;
+ 
+ 	for_each_pool_worker(worker, pool) {
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 018f08da99a2..3dcf93cd622b 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -853,16 +853,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
+ 		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
+ 							ISOLATE_UNEVICTABLE);
+ 
+-		/*
+-		 * In case of fatal failure, release everything that might
+-		 * have been isolated in the previous iteration, and signal
+-		 * the failure back to caller.
+-		 */
+-		if (!pfn) {
+-			putback_movable_pages(&cc->migratepages);
+-			cc->nr_migratepages = 0;
++		if (!pfn)
+ 			break;
+-		}
+ 
+ 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
+ 			break;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 8e792ec5e84c..52975ebcfaa4 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page,
+ 		/* after clearing PageTail the gup refcount can be released */
+ 		smp_mb__after_atomic();
+ 
+-		/*
+-		 * retain hwpoison flag of the poisoned tail page:
+-		 *   fix for the unsuitable process killed on Guest Machine(KVM)
+-		 *   by the memory-failure.
+-		 */
+-		page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
++		page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+ 		page_tail->flags |= (page->flags &
+ 				     ((1L << PG_referenced) |
+ 				      (1L << PG_swapbacked) |
+@@ -2066,10 +2061,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
+ 		 * page fault if needed.
+ 		 */
+ 		return 0;
+-	if (vma->vm_ops)
++	if (vma->vm_ops || (vm_flags & VM_NO_THP))
+ 		/* khugepaged not yet working on file or special mappings */
+ 		return 0;
+-	VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
+ 	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
+ 	hend = vma->vm_end & HPAGE_PMD_MASK;
+ 	if (hstart < hend)
+@@ -2426,8 +2420,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
+ 		return false;
+ 	if (is_vma_temporary_stack(vma))
+ 		return false;
+-	VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
+-	return true;
++	return !(vma->vm_flags & VM_NO_THP);
+ }
+ 
+ static void collapse_huge_page(struct mm_struct *mm,
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index e26bc59d7dff..7207c16f39c9 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1695,20 +1695,7 @@ static int __soft_offline_page(struct page *page, int flags)
+ 			if (ret > 0)
+ 				ret = -EIO;
+ 		} else {
+-			/*
+-			 * After page migration succeeds, the source page can
+-			 * be trapped in pagevec and actual freeing is delayed.
+-			 * Freeing code works differently based on PG_hwpoison,
+-			 * so there's a race. We need to make sure that the
+-			 * source page should be freed back to buddy before
+-			 * setting PG_hwpoison.
+-			 */
+-			if (!is_free_buddy_page(page))
+-				drain_all_pages(page_zone(page));
+ 			SetPageHWPoison(page);
+-			if (!is_free_buddy_page(page))
+-				pr_info("soft offline: %#lx: page leaked\n",
+-					pfn);
+ 			atomic_long_inc(&num_poisoned_pages);
+ 		}
+ 	} else {
+@@ -1760,14 +1747,6 @@ int soft_offline_page(struct page *page, int flags)
+ 
+ 	get_online_mems();
+ 
+-	/*
+-	 * Isolate the page, so that it doesn't get reallocated if it
+-	 * was free. This flag should be kept set until the source page
+-	 * is freed and PG_hwpoison on it is set.
+-	 */
+-	if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
+-		set_migratetype_isolate(page, true);
+-
+ 	ret = get_any_page(page, pfn, flags);
+ 	put_online_mems();
+ 	if (ret > 0) { /* for in-use pages */
+@@ -1786,6 +1765,5 @@ int soft_offline_page(struct page *page, int flags)
+ 				atomic_long_inc(&num_poisoned_pages);
+ 		}
+ 	}
+-	unset_migratetype_isolate(page, MIGRATE_MOVABLE);
+ 	return ret;
+ }
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 8c4841a6dc4c..fe71f91c7b27 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -918,7 +918,8 @@ out:
+ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
+ 				   free_page_t put_new_page,
+ 				   unsigned long private, struct page *page,
+-				   int force, enum migrate_mode mode)
++				   int force, enum migrate_mode mode,
++				   enum migrate_reason reason)
+ {
+ 	int rc = 0;
+ 	int *result = NULL;
+@@ -949,7 +950,11 @@ out:
+ 		list_del(&page->lru);
+ 		dec_zone_page_state(page, NR_ISOLATED_ANON +
+ 				page_is_file_cache(page));
+-		putback_lru_page(page);
++		/* Soft-offlined page shouldn't go through lru cache list */
++		if (reason == MR_MEMORY_FAILURE)
++			put_page(page);
++		else
++			putback_lru_page(page);
+ 	}
+ 
+ 	/*
+@@ -1122,7 +1127,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
+ 						pass > 2, mode);
+ 			else
+ 				rc = unmap_and_move(get_new_page, put_new_page,
+-						private, page, pass > 2, mode);
++						private, page, pass > 2, mode,
++						reason);
+ 
+ 			switch(rc) {
+ 			case -ENOMEM:
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 872b2ac95dec..551923097bbc 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -962,6 +962,10 @@ static inline int check_new_page(struct page *page)
+ 		bad_reason = "non-NULL mapping";
+ 	if (unlikely(atomic_read(&page->_count) != 0))
+ 		bad_reason = "nonzero _count";
++	if (unlikely(page->flags & __PG_HWPOISON)) {
++		bad_reason = "HWPoisoned (hardware-corrupted)";
++		bad_flags = __PG_HWPOISON;
++	}
+ 	if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
+ 		bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
+ 		bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 1a17bd7c0ce5..e1a95dbcd5f8 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2470,7 +2470,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
+ 		sc->gfp_mask |= __GFP_HIGHMEM;
+ 
+ 	for_each_zone_zonelist_nodemask(zone, z, zonelist,
+-					requested_highidx, sc->nodemask) {
++					gfp_zone(sc->gfp_mask), sc->nodemask) {
+ 		enum zone_type classzone_idx;
+ 
+ 		if (!populated_zone(zone))
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 13fad8668f83..bc3f791845aa 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1735,6 +1735,7 @@ void nf_conntrack_init_end(void)
+ 
+ int nf_conntrack_init_net(struct net *net)
+ {
++	static atomic64_t unique_id;
+ 	int ret = -ENOMEM;
+ 	int cpu;
+ 
+@@ -1758,7 +1759,8 @@ int nf_conntrack_init_net(struct net *net)
+ 	if (!net->ct.stat)
+ 		goto err_pcpu_lists;
+ 
+-	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
++	net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
++				(u64)atomic64_inc_return(&unique_id));
+ 	if (!net->ct.slabname)
+ 		goto err_slabname;
+ 
+diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
+index a6e3d9b511ab..aa6121c2dd84 100644
+--- a/sound/pci/hda/hda_sysfs.c
++++ b/sound/pci/hda/hda_sysfs.c
+@@ -141,14 +141,6 @@ static int reconfig_codec(struct hda_codec *codec)
+ 	err = snd_hda_codec_configure(codec);
+ 	if (err < 0)
+ 		goto error;
+-	/* rebuild PCMs */
+-	err = snd_hda_codec_build_pcms(codec);
+-	if (err < 0)
+-		goto error;
+-	/* rebuild mixers */
+-	err = snd_hda_codec_build_controls(codec);
+-	if (err < 0)
+-		goto error;
+ 	err = snd_card_register(codec->card);
+  error:
+ 	snd_hda_power_down(codec);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 86b83f521613..29595e0c3fb4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5451,6 +5451,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+@@ -6449,6 +6450,8 @@ enum {
+ 	ALC668_FIXUP_AUTO_MUTE,
+ 	ALC668_FIXUP_DELL_DISABLE_AAMIX,
+ 	ALC668_FIXUP_DELL_XPS13,
++	ALC662_FIXUP_ASUS_Nx50,
++	ALC668_FIXUP_ASUS_Nx51,
+ };
+ 
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -6689,6 +6692,21 @@ static const struct hda_fixup alc662_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_bass_chmap,
+ 	},
++	[ALC662_FIXUP_ASUS_Nx50] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_auto_mute_via_amp,
++		.chained = true,
++		.chain_id = ALC662_FIXUP_BASS_1A
++	},
++	[ALC668_FIXUP_ASUS_Nx51] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{0x1a, 0x90170151}, /* bass speaker */
++			{}
++		},
++		.chained = true,
++		.chain_id = ALC662_FIXUP_BASS_CHMAP,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -6711,10 +6729,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+-	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
++	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
++	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
++	SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
+ 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
++	SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
++	SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
+ 	SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
+ 	SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index 06317f7d945f..3630d7d090e8 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -361,7 +361,7 @@ static unsigned int bst_tlv[] = {
+ 
+ /* Interface data select */
+ static const char * const rt5640_data_select[] = {
+-	"Normal", "left copy to right", "right copy to left", "Swap"};
++	"Normal", "Swap", "left copy to right", "right copy to left"};
+ 
+ static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
+ 			    RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
+diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
+index 3deb8babeabb..243f42633989 100644
+--- a/sound/soc/codecs/rt5640.h
++++ b/sound/soc/codecs/rt5640.h
+@@ -442,39 +442,39 @@
+ #define RT5640_IF1_DAC_SEL_MASK			(0x3 << 14)
+ #define RT5640_IF1_DAC_SEL_SFT			14
+ #define RT5640_IF1_DAC_SEL_NOR			(0x0 << 14)
+-#define RT5640_IF1_DAC_SEL_L2R			(0x1 << 14)
+-#define RT5640_IF1_DAC_SEL_R2L			(0x2 << 14)
+-#define RT5640_IF1_DAC_SEL_SWAP			(0x3 << 14)
++#define RT5640_IF1_DAC_SEL_SWAP			(0x1 << 14)
++#define RT5640_IF1_DAC_SEL_L2R			(0x2 << 14)
++#define RT5640_IF1_DAC_SEL_R2L			(0x3 << 14)
+ #define RT5640_IF1_ADC_SEL_MASK			(0x3 << 12)
+ #define RT5640_IF1_ADC_SEL_SFT			12
+ #define RT5640_IF1_ADC_SEL_NOR			(0x0 << 12)
+-#define RT5640_IF1_ADC_SEL_L2R			(0x1 << 12)
+-#define RT5640_IF1_ADC_SEL_R2L			(0x2 << 12)
+-#define RT5640_IF1_ADC_SEL_SWAP			(0x3 << 12)
++#define RT5640_IF1_ADC_SEL_SWAP			(0x1 << 12)
++#define RT5640_IF1_ADC_SEL_L2R			(0x2 << 12)
++#define RT5640_IF1_ADC_SEL_R2L			(0x3 << 12)
+ #define RT5640_IF2_DAC_SEL_MASK			(0x3 << 10)
+ #define RT5640_IF2_DAC_SEL_SFT			10
+ #define RT5640_IF2_DAC_SEL_NOR			(0x0 << 10)
+-#define RT5640_IF2_DAC_SEL_L2R			(0x1 << 10)
+-#define RT5640_IF2_DAC_SEL_R2L			(0x2 << 10)
+-#define RT5640_IF2_DAC_SEL_SWAP			(0x3 << 10)
++#define RT5640_IF2_DAC_SEL_SWAP			(0x1 << 10)
++#define RT5640_IF2_DAC_SEL_L2R			(0x2 << 10)
++#define RT5640_IF2_DAC_SEL_R2L			(0x3 << 10)
+ #define RT5640_IF2_ADC_SEL_MASK			(0x3 << 8)
+ #define RT5640_IF2_ADC_SEL_SFT			8
+ #define RT5640_IF2_ADC_SEL_NOR			(0x0 << 8)
+-#define RT5640_IF2_ADC_SEL_L2R			(0x1 << 8)
+-#define RT5640_IF2_ADC_SEL_R2L			(0x2 << 8)
+-#define RT5640_IF2_ADC_SEL_SWAP			(0x3 << 8)
++#define RT5640_IF2_ADC_SEL_SWAP			(0x1 << 8)
++#define RT5640_IF2_ADC_SEL_L2R			(0x2 << 8)
++#define RT5640_IF2_ADC_SEL_R2L			(0x3 << 8)
+ #define RT5640_IF3_DAC_SEL_MASK			(0x3 << 6)
+ #define RT5640_IF3_DAC_SEL_SFT			6
+ #define RT5640_IF3_DAC_SEL_NOR			(0x0 << 6)
+-#define RT5640_IF3_DAC_SEL_L2R			(0x1 << 6)
+-#define RT5640_IF3_DAC_SEL_R2L			(0x2 << 6)
+-#define RT5640_IF3_DAC_SEL_SWAP			(0x3 << 6)
++#define RT5640_IF3_DAC_SEL_SWAP			(0x1 << 6)
++#define RT5640_IF3_DAC_SEL_L2R			(0x2 << 6)
++#define RT5640_IF3_DAC_SEL_R2L			(0x3 << 6)
+ #define RT5640_IF3_ADC_SEL_MASK			(0x3 << 4)
+ #define RT5640_IF3_ADC_SEL_SFT			4
+ #define RT5640_IF3_ADC_SEL_NOR			(0x0 << 4)
+-#define RT5640_IF3_ADC_SEL_L2R			(0x1 << 4)
+-#define RT5640_IF3_ADC_SEL_R2L			(0x2 << 4)
+-#define RT5640_IF3_ADC_SEL_SWAP			(0x3 << 4)
++#define RT5640_IF3_ADC_SEL_SWAP			(0x1 << 4)
++#define RT5640_IF3_ADC_SEL_L2R			(0x2 << 4)
++#define RT5640_IF3_ADC_SEL_R2L			(0x3 << 4)
+ 
+ /* REC Left Mixer Control 1 (0x3b) */
+ #define RT5640_G_HP_L_RM_L_MASK			(0x7 << 13)
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 5ad43cba860c..194fa7f60a38 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1131,8 +1131,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
+ 	case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
++	case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
+ 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++	case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
+ 	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
++	case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
+ 	case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
+ 		return true;
+ 	}
+diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
+index 0144b3d1bb77..88cccea3ca99 100644
+--- a/tools/lib/traceevent/parse-filter.c
++++ b/tools/lib/traceevent/parse-filter.c
+@@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
+ 		current_op = current_exp;
+ 
+ 	ret = collapse_tree(current_op, parg, error_str);
++	/* collapse_tree() may free current_op, and updates parg accordingly */
++	current_op = NULL;
+ 	if (ret < 0)
+ 		goto fail;
+ 
+-	*parg = current_op;
+-
+ 	free(token);
+ 	return 0;
+ 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-05-12  0:12 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-05-12  0:12 UTC (permalink / raw
  To: gentoo-commits

commit:     85e809cf8d53f398f9f5cc0882736d5fb9b5a2d9
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu May 12 00:12:12 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu May 12 00:12:12 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=85e809cf

Linux patch 4.1.24

 0000_README             |   4 +
 1023_linux-4.1.24.patch | 646 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 650 insertions(+)

diff --git a/0000_README b/0000_README
index a797069..6700cd7 100644
--- a/0000_README
+++ b/0000_README
@@ -135,6 +135,10 @@ Patch:  1022_linux-4.1.23.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.23
 
+Patch:  1023_linux-4.1.24.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.24
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1023_linux-4.1.24.patch b/1023_linux-4.1.24.patch
new file mode 100644
index 0000000..0ac83c9
--- /dev/null
+++ b/1023_linux-4.1.24.patch
@@ -0,0 +1,646 @@
+diff --git a/Makefile b/Makefile
+index 9956129bb106..df1d8b1448ae 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 23
++SUBLEVEL = 24
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
+index de2c0e4ee1aa..67de80a8e178 100644
+--- a/arch/powerpc/include/uapi/asm/cputable.h
++++ b/arch/powerpc/include/uapi/asm/cputable.h
+@@ -31,6 +31,7 @@
+ #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
+ 					0x00000040
+ 
++/* Reserved - do not use		0x00000004 */
+ #define PPC_FEATURE_TRUE_LE		0x00000002
+ #define PPC_FEATURE_PPC_LE		0x00000001
+ 
+diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
+index 308c5e15676b..abe9cdc390a5 100644
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -149,23 +149,24 @@ static struct ibm_pa_feature {
+ 	unsigned long	cpu_features;	/* CPU_FTR_xxx bit */
+ 	unsigned long	mmu_features;	/* MMU_FTR_xxx bit */
+ 	unsigned int	cpu_user_ftrs;	/* PPC_FEATURE_xxx bit */
++	unsigned int	cpu_user_ftrs2;	/* PPC_FEATURE2_xxx bit */
+ 	unsigned char	pabyte;		/* byte number in ibm,pa-features */
+ 	unsigned char	pabit;		/* bit number (big-endian) */
+ 	unsigned char	invert;		/* if 1, pa bit set => clear feature */
+ } ibm_pa_features[] __initdata = {
+-	{0, 0, PPC_FEATURE_HAS_MMU,	0, 0, 0},
+-	{0, 0, PPC_FEATURE_HAS_FPU,	0, 1, 0},
+-	{CPU_FTR_CTRL, 0, 0,		0, 3, 0},
+-	{CPU_FTR_NOEXECUTE, 0, 0,	0, 6, 0},
+-	{CPU_FTR_NODSISRALIGN, 0, 0,	1, 1, 1},
+-	{0, MMU_FTR_CI_LARGE_PAGE, 0,	1, 2, 0},
+-	{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
++	{0, 0, PPC_FEATURE_HAS_MMU, 0,		0, 0, 0},
++	{0, 0, PPC_FEATURE_HAS_FPU, 0,		0, 1, 0},
++	{CPU_FTR_CTRL, 0, 0, 0,			0, 3, 0},
++	{CPU_FTR_NOEXECUTE, 0, 0, 0,		0, 6, 0},
++	{CPU_FTR_NODSISRALIGN, 0, 0, 0,		1, 1, 1},
++	{0, MMU_FTR_CI_LARGE_PAGE, 0, 0,		1, 2, 0},
++	{CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
+ 	/*
+ 	 * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
+ 	 * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
+ 	 * which is 0 if the kernel doesn't support TM.
+ 	 */
+-	{CPU_FTR_TM_COMP, 0, 0,		22, 0, 0},
++	{CPU_FTR_TM_COMP, 0, 0, 0,		22, 0, 0},
+ };
+ 
+ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
+@@ -196,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
+ 		if (bit ^ fp->invert) {
+ 			cur_cpu_spec->cpu_features |= fp->cpu_features;
+ 			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
++			cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
+ 			cur_cpu_spec->mmu_features |= fp->mmu_features;
+ 		} else {
+ 			cur_cpu_spec->cpu_features &= ~fp->cpu_features;
+ 			cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
++			cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
+ 			cur_cpu_spec->mmu_features &= ~fp->mmu_features;
+ 		}
+ 	}
+diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
+index 11eae5f55b70..9787b61e0758 100644
+--- a/arch/s390/include/asm/hugetlb.h
++++ b/arch/s390/include/asm/hugetlb.h
+@@ -14,6 +14,7 @@
+ 
+ #define is_hugepage_only_range(mm, addr, len)	0
+ #define hugetlb_free_pgd_range			free_pgd_range
++#define hugepages_supported()			(MACHINE_HAS_HPAGE)
+ 
+ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ 		     pte_t *ptep, pte_t pte);
+diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
+index 4e33fe339b3d..5b7153b34361 100644
+--- a/arch/s390/include/asm/pci.h
++++ b/arch/s390/include/asm/pci.h
+@@ -45,7 +45,8 @@ struct zpci_fmb {
+ 	u64 rpcit_ops;
+ 	u64 dma_rbytes;
+ 	u64 dma_wbytes;
+-} __packed __aligned(64);
++	u64 pad[2];
++} __packed __aligned(128);
+ 
+ enum zpci_state {
+ 	ZPCI_FN_STATE_RESERVED,
+diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
+index e510b1c5d690..e5b79c1bb191 100644
+--- a/arch/x86/crypto/sha-mb/sha1_mb.c
++++ b/arch/x86/crypto/sha-mb/sha1_mb.c
+@@ -456,10 +456,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
+ 
+ 			req = cast_mcryptd_ctx_to_req(req_ctx);
+ 			if (irqs_disabled())
+-				rctx->complete(&req->base, ret);
++				req_ctx->complete(&req->base, ret);
+ 			else {
+ 				local_bh_disable();
+-				rctx->complete(&req->base, ret);
++				req_ctx->complete(&req->base, ret);
+ 				local_bh_enable();
+ 			}
+ 		}
+diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
+index 68c05398bba9..7aadd3cea843 100644
+--- a/arch/x86/include/asm/hugetlb.h
++++ b/arch/x86/include/asm/hugetlb.h
+@@ -4,6 +4,7 @@
+ #include <asm/page.h>
+ #include <asm-generic/hugetlb.h>
+ 
++#define hugepages_supported() cpu_has_pse
+ 
+ static inline int is_hugepage_only_range(struct mm_struct *mm,
+ 					 unsigned long addr,
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+index 0a4973b47c99..a2ca3c757712 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+@@ -207,6 +207,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+ 	struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+ 	struct ccp_aes_cmac_exp_ctx state;
+ 
++	/* Don't let anything leak to 'out' */
++	memset(&state, 0, sizeof(state));
++
+ 	state.null_msg = rctx->null_msg;
+ 	memcpy(state.iv, rctx->iv, sizeof(state.iv));
+ 	state.buf_count = rctx->buf_count;
+diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
+index 9711b6d29162..895c0d1316e5 100644
+--- a/drivers/crypto/ccp/ccp-crypto-sha.c
++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
+@@ -199,6 +199,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
+ 	struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ 	struct ccp_sha_exp_ctx state;
+ 
++	/* Don't let anything leak to 'out' */
++	memset(&state, 0, sizeof(state));
++
+ 	state.type = rctx->type;
+ 	state.msg_bits = rctx->msg_bits;
+ 	state.first = rctx->first;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index adcc628b1f93..5d87111fdc87 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1316,7 +1316,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 	}
+ 
+ 	ch_way = TAD_CH(reg) + 1;
+-	sck_way = 1 << TAD_SOCK(reg);
++	sck_way = TAD_SOCK(reg);
+ 
+ 	if (ch_way == 3)
+ 		idx = addr >> 6;
+@@ -1355,7 +1355,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 		switch(ch_way) {
+ 		case 2:
+ 		case 4:
+-			sck_xch = 1 << sck_way * (ch_way >> 1);
++			sck_xch = (1 << sck_way) * (ch_way >> 1);
+ 			break;
+ 		default:
+ 			sprintf(msg, "Invalid mirror set. Can't decode addr");
+@@ -1391,7 +1391,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
+ 
+ 	ch_addr = addr - offset;
+ 	ch_addr >>= (6 + shiftup);
+-	ch_addr /= ch_way * sck_way;
++	ch_addr /= sck_xch;
+ 	ch_addr <<= (6 + shiftup);
+ 	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
+ 
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 9e33705d4d0e..2c04c59022f3 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1663,13 +1663,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ 	struct drm_dp_mst_branch *mstb;
+ 	int len, ret, port_num;
+ 
++	port = drm_dp_get_validated_port_ref(mgr, port);
++	if (!port)
++		return -EINVAL;
++
+ 	port_num = port->port_num;
+ 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ 	if (!mstb) {
+ 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+ 
+-		if (!mstb)
++		if (!mstb) {
++			drm_dp_put_port(port);
+ 			return -EINVAL;
++		}
+ 	}
+ 
+ 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+@@ -1695,6 +1701,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
+ 	kfree(txmsg);
+ fail_put:
+ 	drm_dp_put_mst_branch_device(mstb);
++	drm_dp_put_port(port);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
+index a18807ec8371..71a0ebae7bfb 100644
+--- a/drivers/gpu/drm/i915/intel_uncore.c
++++ b/drivers/gpu/drm/i915/intel_uncore.c
+@@ -1074,7 +1074,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
+ 	} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
+ 		dev_priv->uncore.funcs.force_wake_get =
+ 			fw_domains_get_with_thread_status;
+-		dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
++		if (IS_HASWELL(dev))
++			dev_priv->uncore.funcs.force_wake_put =
++				fw_domains_put_with_fifo;
++		else
++			dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ 		fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
+ 	} else if (IS_IVYBRIDGE(dev)) {
+diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+index 1523cf94bcdc..8bc7d0bbd3c8 100644
+--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
++++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+@@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
+ 	return radeon_atpx_priv.atpx_detected;
+ }
+ 
+-bool radeon_has_atpx_dgpu_power_cntl(void) {
+-	return radeon_atpx_priv.atpx.functions.power_cntl;
+-}
+-
+ /**
+  * radeon_atpx_call - call an ATPX method
+  *
+@@ -145,6 +141,10 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
+  */
+ static int radeon_atpx_validate(struct radeon_atpx *atpx)
+ {
++	/* make sure required functions are enabled */
++	/* dGPU power control is required */
++	atpx->functions.power_cntl = true;
++
+ 	if (atpx->functions.px_params) {
+ 		union acpi_object *info;
+ 		struct atpx_px_params output;
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 5a2cafb4f1bc..f5c96fb7e8d0 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1977,10 +1977,12 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 						   rdev->mode_info.dither_property,
+ 						   RADEON_FMT_DITHER_DISABLE);
+ 
+-			if (radeon_audio != 0)
++			if (radeon_audio != 0) {
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
++			}
+ 			if (ASIC_IS_DCE5(rdev))
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.output_csc_property,
+@@ -2105,6 +2107,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
+ 			}
+ 			if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+ 				radeon_connector->dac_load_detect = true;
+@@ -2160,6 +2163,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
+ 			}
+ 			if (ASIC_IS_DCE5(rdev))
+ 				drm_object_attach_property(&radeon_connector->base.base,
+@@ -2212,6 +2216,7 @@ radeon_add_atom_connector(struct drm_device *dev,
+ 				drm_object_attach_property(&radeon_connector->base.base,
+ 							   rdev->mode_info.audio_property,
+ 							   RADEON_AUDIO_AUTO);
++				radeon_connector->audio = RADEON_AUDIO_AUTO;
+ 			}
+ 			if (ASIC_IS_DCE5(rdev))
+ 				drm_object_attach_property(&radeon_connector->base.base,
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 9cbdd8aac28f..604c44d88e7a 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
+ 	"LAST",
+ };
+ 
+-#if defined(CONFIG_VGA_SWITCHEROO)
+-bool radeon_has_atpx_dgpu_power_cntl(void);
+-#else
+-static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
+-#endif
+-
+ #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
+ #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
+ 
+@@ -1433,7 +1427,7 @@ int radeon_device_init(struct radeon_device *rdev,
+ 	 * ignore it */
+ 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+ 
+-	if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
++	if (rdev->flags & RADEON_IS_PX)
+ 		runtime = true;
+ 	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
+ 	if (runtime)
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index f5c0590bbf73..50ce26a3b314 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+ {
+ 	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+ 
++	if (radeon_ttm_tt_has_userptr(bo->ttm))
++		return -EPERM;
+ 	return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index 9dfcedec05a6..c4e0e69b688d 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2926,6 +2926,10 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
++	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
+ 	{ 0, 0, 0, 0 },
+ };
+ 
+diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
+index 3a7f3a4a4396..7c18249d6c8e 100644
+--- a/drivers/input/tablet/gtco.c
++++ b/drivers/input/tablet/gtco.c
+@@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
+ 		goto err_free_buf;
+ 	}
+ 
++	/* Sanity check that a device has an endpoint */
++	if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
++		dev_err(&usbinterface->dev,
++			"Invalid number of endpoints\n");
++		error = -EINVAL;
++		goto err_free_urb;
++	}
++
+ 	/*
+ 	 * The endpoint is always altsetting 0, we know this since we know
+ 	 * this device only has one interrupt endpoint
+@@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
+ 	 * HID report descriptor
+ 	 */
+ 	if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
+-				     HID_DEVICE_TYPE, &hid_desc) != 0){
++				     HID_DEVICE_TYPE, &hid_desc) != 0) {
+ 		dev_err(&usbinterface->dev,
+ 			"Can't retrieve exta USB descriptor to get hid report descriptor length\n");
+ 		error = -EIO;
+diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
+index 883ba74fbc1e..4cf38c39878a 100644
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -238,11 +238,6 @@ static ssize_t ___mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
+ 	dev = cl->dev;
+ 
+ 	mutex_lock(&dev->device_lock);
+-	if (dev->dev_state != MEI_DEV_ENABLED) {
+-		rets = -ENODEV;
+-		goto out;
+-	}
+-
+ 	if (!mei_cl_is_connected(cl)) {
+ 		rets = -ENODEV;
+ 		goto out;
+@@ -292,10 +287,6 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length)
+ 	dev = cl->dev;
+ 
+ 	mutex_lock(&dev->device_lock);
+-	if (dev->dev_state != MEI_DEV_ENABLED) {
+-		rets = -ENODEV;
+-		goto out;
+-	}
+ 
+ 	cb = mei_cl_read_cb(cl, NULL);
+ 	if (cb)
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+index d60a467a983c..80cab4ec0522 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
+ 	 */
+ 	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
+ 	if (val & (BIT(1) | BIT(17))) {
+-		IWL_INFO(trans,
+-			 "can't access the RSA semaphore it is write protected\n");
++		IWL_DEBUG_INFO(trans,
++			       "can't access the RSA semaphore it is write protected\n");
+ 		return 0;
+ 	}
+ 
+diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+index 474812e2b0cb..de08175aef0a 100644
+--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
++++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+@@ -852,7 +852,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ 	struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
+ 	int eint_num, virq, eint_offset;
+ 	unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
+-	static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
++	static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
++						128000, 256000};
+ 	const struct mtk_desc_pin *pin;
+ 	struct irq_data *d;
+ 
+@@ -870,9 +871,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
+ 	if (!mtk_eint_can_en_debounce(pctl, eint_num))
+ 		return -ENOSYS;
+ 
+-	dbnc = ARRAY_SIZE(dbnc_arr);
+-	for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
+-		if (debounce <= dbnc_arr[i]) {
++	dbnc = ARRAY_SIZE(debounce_time);
++	for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
++		if (debounce <= debounce_time[i]) {
+ 			dbnc = i;
+ 			break;
+ 		}
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 6e7be91e6097..82240dbdf6dd 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ 						   work);
+ 	int ret = io_data->req->status ? io_data->req->status :
+ 					 io_data->req->actual;
++	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+ 
+ 	if (io_data->read && ret > 0) {
+ 		use_mm(io_data->mm);
+@@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
+ 
+ 	io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
+ 
+-	if (io_data->ffs->ffs_eventfd &&
+-	    !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
++	if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
+ 		eventfd_signal(io_data->ffs->ffs_eventfd, 1);
+ 
+ 	usb_ep_free_request(io_data->ep, io_data->req);
+ 
+-	io_data->kiocb->private = NULL;
+ 	if (io_data->read)
+ 		kfree(io_data->to_free);
+ 	kfree(io_data->buf);
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index 461a0558bca4..cebecff536a3 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
+ {
+ #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+ 	return false;
++#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
++	return false;
+ #else
+ 	return true;
+ #endif
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 205026175c42..d891f949466a 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -460,15 +460,14 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
+ 	return &mm->page_table_lock;
+ }
+ 
+-static inline bool hugepages_supported(void)
+-{
+-	/*
+-	 * Some platform decide whether they support huge pages at boot
+-	 * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
+-	 * there is no such support
+-	 */
+-	return HPAGE_SHIFT != 0;
+-}
++#ifndef hugepages_supported
++/*
++ * Some platform decide whether they support huge pages at boot
++ * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
++ * when there is no such support
++ */
++#define hugepages_supported() (HPAGE_SHIFT != 0)
++#endif
+ 
+ #else	/* CONFIG_HUGETLB_PAGE */
+ struct hstate {};
+diff --git a/kernel/futex.c b/kernel/futex.c
+index b75fbddacf0e..46b168e19c98 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1157,10 +1157,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+ 	 */
+ 	newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
+ 
+-	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
++	if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
+ 		ret = -EFAULT;
+-	else if (curval != uval)
+-		ret = -EINVAL;
++	} else if (curval != uval) {
++		/*
++		 * If a unconditional UNLOCK_PI operation (user space did not
++		 * try the TID->0 transition) raced with a waiter setting the
++		 * FUTEX_WAITERS flag between get_user() and locking the hash
++		 * bucket lock, retry the operation.
++		 */
++		if ((FUTEX_TID_MASK & curval) == uval)
++			ret = -EAGAIN;
++		else
++			ret = -EINVAL;
++	}
+ 	if (ret) {
+ 		raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+ 		return ret;
+@@ -2419,6 +2429,15 @@ retry:
+ 		 */
+ 		if (ret == -EFAULT)
+ 			goto pi_faulted;
++		/*
++		 * A unconditional UNLOCK_PI op raced against a waiter
++		 * setting the FUTEX_WAITERS bit. Try again.
++		 */
++		if (ret == -EAGAIN) {
++			spin_unlock(&hb->lock);
++			put_futex_key(&key);
++			goto retry;
++		}
+ 		goto out_unlock;
+ 	}
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 0d6038c87bef..0c29986ecd87 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1290,7 +1290,7 @@ static int netlink_release(struct socket *sock)
+ 
+ 	skb_queue_purge(&sk->sk_write_queue);
+ 
+-	if (nlk->portid) {
++	if (nlk->portid && nlk->bound) {
+ 		struct netlink_notify n = {
+ 						.net = sock_net(sk),
+ 						.protocol = sk->sk_protocol,
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 04b6f3f6ee0b..f24138681b80 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -12777,7 +12777,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
+ 	struct wireless_dev *wdev;
+ 	struct cfg80211_beacon_registration *reg, *tmp;
+ 
+-	if (state != NETLINK_URELEASE)
++	if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
+ 		return NOTIFY_DONE;
+ 
+ 	rcu_read_lock();
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 69093ce34231..9e113bc3b02d 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -307,6 +307,10 @@ enum {
+ 	(AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
+ 	 AZX_DCAPS_I915_POWERWELL)
+ 
++#define AZX_DCAPS_INTEL_BROXTON \
++	(AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG |\
++	 AZX_DCAPS_I915_POWERWELL)
++
+ /* quirks for ATI SB / AMD Hudson */
+ #define AZX_DCAPS_PRESET_ATI_SB \
+ 	(AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\
+@@ -2039,6 +2043,12 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* Sunrise Point-LP */
+ 	{ PCI_DEVICE(0x8086, 0x9d70),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
++	/* Broxton-P(Apollolake) */
++	{ PCI_DEVICE(0x8086, 0x5a98),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
++	/* Broxton-T */
++	{ PCI_DEVICE(0x8086, 0x1a98),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
+ 	/* Haswell */
+ 	{ PCI_DEVICE(0x8086, 0x0a0c),
+ 	  .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index cc1a7a4a7cbd..86b83f521613 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5318,6 +5318,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
++	SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
+index c5194f5b150a..d7e71f309299 100644
+--- a/sound/pci/pcxhr/pcxhr_core.c
++++ b/sound/pci/pcxhr/pcxhr_core.c
+@@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
+ 	}
+ 
+ 	pcxhr_msg_thread(mgr);
++	mutex_unlock(&mgr->lock);
+ 	return IRQ_HANDLED;
+ }


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-04-28 18:56 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-04-28 18:56 UTC (permalink / raw
  To: gentoo-commits

commit:     26c5b0bcb48671f84a02d4f62590131aa4566f9b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Apr 28 18:56:12 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Apr 28 18:56:12 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=26c5b0bc

Linux patch 4.1.23

 0000_README             |    4 +
 1022_linux-4.1.23.patch | 2721 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2725 insertions(+)

diff --git a/0000_README b/0000_README
index 3075177..a797069 100644
--- a/0000_README
+++ b/0000_README
@@ -131,6 +131,10 @@ Patch:  1021_linux-4.1.22.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.22
 
+Patch:  1022_linux-4.1.23.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.23
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1022_linux-4.1.23.patch b/1022_linux-4.1.23.patch
new file mode 100644
index 0000000..68a28a4
--- /dev/null
+++ b/1022_linux-4.1.23.patch
@@ -0,0 +1,2721 @@
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index cd03a0faca8f..7bcd7a26f885 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -3808,6 +3808,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 					sector if the number is odd);
+ 				i = IGNORE_DEVICE (don't bind to this
+ 					device);
++				j = NO_REPORT_LUNS (don't use report luns
++					command, uas only);
+ 				l = NOT_LOCKABLE (don't try to lock and
+ 					unlock ejectable media);
+ 				m = MAX_SECTORS_64 (don't transfer more
+diff --git a/Makefile b/Makefile
+index 7f4a4039fdd9..9956129bb106 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 22
++SUBLEVEL = 23
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 9185bb958503..dee6831c0434 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -1402,9 +1402,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
+ 	    (sf & SYSC_HAS_CLOCKACTIVITY))
+ 		_set_clockactivity(oh, oh->class->sysc->clockact, &v);
+ 
+-	/* If the cached value is the same as the new value, skip the write */
+-	if (oh->_sysc_cache != v)
+-		_write_sysconfig(v, oh);
++	_write_sysconfig(v, oh);
+ 
+ 	/*
+ 	 * Set the autoidle bit only after setting the smartidle bit
+@@ -1467,7 +1465,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
+ 		_set_master_standbymode(oh, idlemode, &v);
+ 	}
+ 
+-	_write_sysconfig(v, oh);
++	/* If the cached value is the same as the new value, skip the write */
++	if (oh->_sysc_cache != v)
++		_write_sysconfig(v, oh);
+ }
+ 
+ /**
+diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
+index 0abdd4c607ed..1960b87c1c8b 100644
+--- a/arch/parisc/include/asm/uaccess.h
++++ b/arch/parisc/include/asm/uaccess.h
+@@ -76,6 +76,7 @@ struct exception_table_entry {
+  */
+ struct exception_data {
+ 	unsigned long fault_ip;
++	unsigned long fault_gp;
+ 	unsigned long fault_space;
+ 	unsigned long fault_addr;
+ };
+diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
+index 59001cea13f9..c972e6550f56 100644
+--- a/arch/parisc/kernel/asm-offsets.c
++++ b/arch/parisc/kernel/asm-offsets.c
+@@ -291,6 +291,7 @@ int main(void)
+ 	DEFINE(ASM_PT_INITIAL, PT_INITIAL);
+ 	BLANK();
+ 	DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
++	DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp));
+ 	DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
+ 	DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
+ 	BLANK();
+diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
+index 568b2c61ea02..3cad8aadc69e 100644
+--- a/arch/parisc/kernel/parisc_ksyms.c
++++ b/arch/parisc/kernel/parisc_ksyms.c
+@@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64);
+ EXPORT_SYMBOL(lclear_user);
+ EXPORT_SYMBOL(lstrnlen_user);
+ 
+-/* Global fixups */
+-extern void fixup_get_user_skip_1(void);
+-extern void fixup_get_user_skip_2(void);
+-extern void fixup_put_user_skip_1(void);
+-extern void fixup_put_user_skip_2(void);
++/* Global fixups - defined as int to avoid creation of function pointers */
++extern int fixup_get_user_skip_1;
++extern int fixup_get_user_skip_2;
++extern int fixup_put_user_skip_1;
++extern int fixup_put_user_skip_2;
+ EXPORT_SYMBOL(fixup_get_user_skip_1);
+ EXPORT_SYMBOL(fixup_get_user_skip_2);
+ EXPORT_SYMBOL(fixup_put_user_skip_1);
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 7f67c4c96a7a..bbf22658d1a3 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -798,6 +798,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
+ 
+ 	    if (fault_space == 0 && !in_atomic())
+ 	    {
++		/* Clean up and return if in exception table. */
++		if (fixup_exception(regs))
++			return;
+ 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
+ 		parisc_terminate("Kernel Fault", regs, code, fault_address);
+ 	    }
+diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
+index 536ef66bb94b..1052b747e011 100644
+--- a/arch/parisc/lib/fixup.S
++++ b/arch/parisc/lib/fixup.S
+@@ -26,6 +26,7 @@
+ 
+ #ifdef CONFIG_SMP
+ 	.macro  get_fault_ip t1 t2
++	loadgp
+ 	addil LT%__per_cpu_offset,%r27
+ 	LDREG RT%__per_cpu_offset(%r1),\t1
+ 	/* t2 = smp_processor_id() */
+@@ -40,14 +41,19 @@
+ 	LDREG RT%exception_data(%r1),\t1
+ 	/* t1 = this_cpu_ptr(&exception_data) */
+ 	add,l \t1,\t2,\t1
++	/* %r27 = t1->fault_gp - restore gp */
++	LDREG EXCDATA_GP(\t1), %r27
+ 	/* t1 = t1->fault_ip */
+ 	LDREG EXCDATA_IP(\t1), \t1
+ 	.endm
+ #else
+ 	.macro  get_fault_ip t1 t2
++	loadgp
+ 	/* t1 = this_cpu_ptr(&exception_data) */
+ 	addil LT%exception_data,%r27
+ 	LDREG RT%exception_data(%r1),\t2
++	/* %r27 = t2->fault_gp - restore gp */
++	LDREG EXCDATA_GP(\t2), %r27
+ 	/* t1 = t2->fault_ip */
+ 	LDREG EXCDATA_IP(\t2), \t1
+ 	.endm
+diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
+index e5120e653240..50d64a7fc672 100644
+--- a/arch/parisc/mm/fault.c
++++ b/arch/parisc/mm/fault.c
+@@ -151,6 +151,7 @@ int fixup_exception(struct pt_regs *regs)
+ 		struct exception_data *d;
+ 		d = this_cpu_ptr(&exception_data);
+ 		d->fault_ip = regs->iaoq[0];
++		d->fault_gp = regs->gr[27];
+ 		d->fault_space = regs->isr;
+ 		d->fault_addr = regs->ior;
+ 
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index c228d8da1f8c..75eb9603ed29 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -680,7 +680,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+ 		if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512)
+ 			return 1;
+ 	}
+-	kvm_put_guest_xcr0(vcpu);
+ 	vcpu->arch.xcr0 = xcr0;
+ 
+ 	if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK)
+@@ -6115,12 +6114,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
+ 	}
+ 
+ 	/* try to inject new event if pending */
+-	if (vcpu->arch.nmi_pending) {
+-		if (kvm_x86_ops->nmi_allowed(vcpu)) {
+-			--vcpu->arch.nmi_pending;
+-			vcpu->arch.nmi_injected = true;
+-			kvm_x86_ops->set_nmi(vcpu);
+-		}
++	if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
++		--vcpu->arch.nmi_pending;
++		vcpu->arch.nmi_injected = true;
++		kvm_x86_ops->set_nmi(vcpu);
+ 	} else if (kvm_cpu_has_injectable_intr(vcpu)) {
+ 		/*
+ 		 * Because interrupts can be injected asynchronously, we are
+@@ -6290,10 +6287,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		if (inject_pending_event(vcpu, req_int_win) != 0)
+ 			req_immediate_exit = true;
+ 		/* enable NMI/IRQ window open exits if needed */
+-		else if (vcpu->arch.nmi_pending)
+-			kvm_x86_ops->enable_nmi_window(vcpu);
+-		else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
+-			kvm_x86_ops->enable_irq_window(vcpu);
++		else {
++			if (vcpu->arch.nmi_pending)
++				kvm_x86_ops->enable_nmi_window(vcpu);
++			if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
++				kvm_x86_ops->enable_irq_window(vcpu);
++		}
+ 
+ 		if (kvm_lapic_enabled(vcpu)) {
+ 			/*
+@@ -6318,8 +6317,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 	kvm_x86_ops->prepare_guest_switch(vcpu);
+ 	if (vcpu->fpu_active)
+ 		kvm_load_guest_fpu(vcpu);
+-	kvm_load_guest_xcr0(vcpu);
+-
+ 	vcpu->mode = IN_GUEST_MODE;
+ 
+ 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+@@ -6342,6 +6339,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 		goto cancel_injection;
+ 	}
+ 
++	kvm_load_guest_xcr0(vcpu);
++
+ 	if (req_immediate_exit)
+ 		smp_send_reschedule(vcpu->cpu);
+ 
+@@ -6392,6 +6391,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 	vcpu->mode = OUTSIDE_GUEST_MODE;
+ 	smp_wmb();
+ 
++	kvm_put_guest_xcr0(vcpu);
++
+ 	/* Interrupt is enabled by handle_external_intr() */
+ 	kvm_x86_ops->handle_external_intr(vcpu);
+ 
+@@ -7040,7 +7041,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+ 	 * and assume host would use all available bits.
+ 	 * Guest xcr0 would be loaded later.
+ 	 */
+-	kvm_put_guest_xcr0(vcpu);
+ 	vcpu->guest_fpu_loaded = 1;
+ 	__kernel_fpu_begin();
+ 	fpu_restore_checking(&vcpu->arch.guest_fpu);
+@@ -7049,8 +7049,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+ 
+ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+ {
+-	kvm_put_guest_xcr0(vcpu);
+-
+ 	if (!vcpu->guest_fpu_loaded)
+ 		return;
+ 
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 09138ceba046..89ecec13c567 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -1934,7 +1934,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
+ 
+ 	osdc = &rbd_dev->rbd_client->client->osdc;
+ 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
+-					  GFP_ATOMIC);
++					  GFP_NOIO);
+ 	if (!osd_req)
+ 		return NULL;	/* ENOMEM */
+ 
+@@ -1983,7 +1983,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
+ 	rbd_dev = img_request->rbd_dev;
+ 	osdc = &rbd_dev->rbd_client->client->osdc;
+ 	osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
+-						false, GFP_ATOMIC);
++						false, GFP_NOIO);
+ 	if (!osd_req)
+ 		return NULL;	/* ENOMEM */
+ 
+@@ -2482,7 +2482,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
+ 					bio_chain_clone_range(&bio_list,
+ 								&bio_offset,
+ 								clone_size,
+-								GFP_ATOMIC);
++								GFP_NOIO);
+ 			if (!obj_request->bio_list)
+ 				goto out_unwind;
+ 		} else if (type == OBJ_REQUEST_PAGES) {
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index ebffc744cb1b..5ec92ce0e5ac 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
+ static void dwc_initialize(struct dw_dma_chan *dwc)
+ {
+ 	struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+-	struct dw_dma_slave *dws = dwc->chan.private;
+ 	u32 cfghi = DWC_CFGH_FIFO_MODE;
+ 	u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+ 
+ 	if (dwc->initialized == true)
+ 		return;
+ 
+-	if (dws) {
+-		/*
+-		 * We need controller-specific data to set up slave
+-		 * transfers.
+-		 */
+-		BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+-
+-		cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
+-		cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
+-	} else {
+-		cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
+-		cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+-	}
++	cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
++	cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
+ 
+ 	channel_writel(dwc, CFG_LO, cfglo);
+ 	channel_writel(dwc, CFG_HI, cfghi);
+@@ -936,7 +924,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
+ 	struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ 	struct dw_dma_slave *dws = param;
+ 
+-	if (!dws || dws->dma_dev != chan->device->dev)
++	if (dws->dma_dev != chan->device->dev)
+ 		return false;
+ 
+ 	/* We have to copy data since dws can be temporary storage */
+@@ -1160,6 +1148,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
+ 	 * doesn't mean what you think it means), and status writeback.
+ 	 */
+ 
++	/*
++	 * We need controller-specific data to set up slave transfers.
++	 */
++	if (chan->private && !dw_dma_filter(chan, chan->private)) {
++		dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
++		return -EINVAL;
++	}
++
+ 	/* Enable controller here if needed */
+ 	if (!dw->in_use)
+ 		dw_dma_on(dw);
+@@ -1221,6 +1217,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ 	spin_lock_irqsave(&dwc->lock, flags);
+ 	list_splice_init(&dwc->free_list, &list);
+ 	dwc->descs_allocated = 0;
++
++	/* Clear custom channel configuration */
++	dwc->src_id = 0;
++	dwc->dst_id = 0;
++
++	dwc->src_master = 0;
++	dwc->dst_master = 0;
++
+ 	dwc->initialized = false;
+ 
+ 	/* Disable interrupts */
+diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
+index f42f71e37e73..b863b685d2d5 100644
+--- a/drivers/dma/hsu/hsu.c
++++ b/drivers/dma/hsu/hsu.c
+@@ -143,7 +143,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
+ 	sr = hsu_chan_readl(hsuc, HSU_CH_SR);
+ 	spin_unlock_irqrestore(&hsuc->lock, flags);
+ 
+-	return sr;
++	return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
+ }
+ 
+ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
+diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
+index 0275233cf550..6a54f351df90 100644
+--- a/drivers/dma/hsu/hsu.h
++++ b/drivers/dma/hsu/hsu.h
+@@ -41,6 +41,9 @@
+ #define HSU_CH_SR_DESCTO(x)	BIT(8 + (x))
+ #define HSU_CH_SR_DESCTO_ANY	(BIT(11) | BIT(10) | BIT(9) | BIT(8))
+ #define HSU_CH_SR_CHE		BIT(15)
++#define HSU_CH_SR_DESCE(x)	BIT(16 + (x))
++#define HSU_CH_SR_DESCE_ANY	(BIT(19) | BIT(18) | BIT(17) | BIT(16))
++#define HSU_CH_SR_CDESC_ANY	(BIT(31) | BIT(30))
+ 
+ /* Bits in HSU_CH_CR */
+ #define HSU_CH_CR_CHA		BIT(0)
+diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
+index e2da64abbccd..16f7c4f2d8c8 100644
+--- a/drivers/gpio/gpio-pca953x.c
++++ b/drivers/gpio/gpio-pca953x.c
+@@ -21,6 +21,7 @@
+ #ifdef CONFIG_OF_GPIO
+ #include <linux/of_platform.h>
+ #endif
++#include <asm/unaligned.h>
+ 
+ #define PCA953X_INPUT		0
+ #define PCA953X_OUTPUT		1
+@@ -154,7 +155,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val)
+ 		switch (chip->chip_type) {
+ 		case PCA953X_TYPE:
+ 			ret = i2c_smbus_write_word_data(chip->client,
+-							reg << 1, (u16) *val);
++			    reg << 1, cpu_to_le16(get_unaligned((u16 *)val)));
+ 			break;
+ 		case PCA957X_TYPE:
+ 			ret = i2c_smbus_write_byte_data(chip->client, reg << 1,
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index b103773df2a3..56323732c748 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -5609,10 +5609,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
+ 			      connector->base.base.id,
+ 			      connector->base.name);
+ 
+-		/* there is no real hw state for MST connectors */
+-		if (connector->mst_port)
+-			return;
+-
+ 		I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
+ 		     "wrong connector dpms state\n");
+ 		I915_STATE_WARN(connector->base.encoder != &encoder->base,
+@@ -11225,13 +11221,6 @@ check_encoder_state(struct drm_device *dev)
+ 			if (connector->base.dpms != DRM_MODE_DPMS_OFF)
+ 				active = true;
+ 		}
+-		/*
+-		 * for MST connectors if we unplug the connector is gone
+-		 * away but the encoder is still connected to a crtc
+-		 * until a modeset happens in response to the hotplug.
+-		 */
+-		if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST)
+-			continue;
+ 
+ 		I915_STATE_WARN(!!encoder->base.crtc != enabled,
+ 		     "encoder's enabled state mismatch "
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
+index 88c557551b89..8266a8d8b9df 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -451,14 +451,23 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ {
+ 	struct intel_connector *intel_connector = to_intel_connector(connector);
+ 	struct drm_device *dev = connector->dev;
+-	/* need to nuke the connector */
+-	drm_modeset_lock_all(dev);
+-	intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+-	drm_modeset_unlock_all(dev);
+ 
+ 	intel_connector->unregister(intel_connector);
+ 
++	/* need to nuke the connector */
+ 	drm_modeset_lock_all(dev);
++	if (connector->state->crtc) {
++		struct drm_mode_set set;
++		int ret;
++
++		memset(&set, 0, sizeof(set));
++		set.crtc = connector->state->crtc,
++
++		ret = drm_atomic_helper_set_config(&set);
++
++		WARN(ret, "Disabling mst crtc failed with %i\n", ret);
++	}
++
+ 	intel_connector_remove_from_fbdev(intel_connector);
+ 	drm_connector_cleanup(connector);
+ 	drm_modeset_unlock_all(dev);
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 52921a871230..18484301fc3e 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -367,10 +367,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
+ 
+ 	qxl_bo_kunmap(user_bo);
+ 
++	qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
++	qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
++	qcrtc->hot_spot_x = hot_x;
++	qcrtc->hot_spot_y = hot_y;
++
+ 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ 	cmd->type = QXL_CURSOR_SET;
+-	cmd->u.set.position.x = qcrtc->cur_x;
+-	cmd->u.set.position.y = qcrtc->cur_y;
++	cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
++	cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ 
+ 	cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
+ 
+@@ -433,8 +438,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
+ 
+ 	cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ 	cmd->type = QXL_CURSOR_MOVE;
+-	cmd->u.position.x = qcrtc->cur_x;
+-	cmd->u.position.y = qcrtc->cur_y;
++	cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
++	cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
+ 	qxl_release_unmap(qdev, release, &cmd->release_info);
+ 
+ 	qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index e66143cc1a7a..eef66769245f 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -135,6 +135,8 @@ struct qxl_crtc {
+ 	int index;
+ 	int cur_x;
+ 	int cur_y;
++	int hot_spot_x;
++	int hot_spot_y;
+ };
+ 
+ struct qxl_output {
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index 1764a168888c..49a259fc610e 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -950,14 +950,6 @@ static int usbhid_output_report(struct hid_device *hid, __u8 *buf, size_t count)
+ 	return ret;
+ }
+ 
+-static void usbhid_restart_queues(struct usbhid_device *usbhid)
+-{
+-	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+-		usbhid_restart_out_queue(usbhid);
+-	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+-		usbhid_restart_ctrl_queue(usbhid);
+-}
+-
+ static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
+ {
+ 	struct usbhid_device *usbhid = hid->driver_data;
+@@ -1403,6 +1395,37 @@ static void hid_cease_io(struct usbhid_device *usbhid)
+ 	usb_kill_urb(usbhid->urbout);
+ }
+ 
++static void hid_restart_io(struct hid_device *hid)
++{
++	struct usbhid_device *usbhid = hid->driver_data;
++	int clear_halt = test_bit(HID_CLEAR_HALT, &usbhid->iofl);
++	int reset_pending = test_bit(HID_RESET_PENDING, &usbhid->iofl);
++
++	spin_lock_irq(&usbhid->lock);
++	clear_bit(HID_SUSPENDED, &usbhid->iofl);
++	usbhid_mark_busy(usbhid);
++
++	if (clear_halt || reset_pending)
++		schedule_work(&usbhid->reset_work);
++	usbhid->retry_delay = 0;
++	spin_unlock_irq(&usbhid->lock);
++
++	if (reset_pending || !test_bit(HID_STARTED, &usbhid->iofl))
++		return;
++
++	if (!clear_halt) {
++		if (hid_start_in(hid) < 0)
++			hid_io_error(hid);
++	}
++
++	spin_lock_irq(&usbhid->lock);
++	if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
++		usbhid_restart_out_queue(usbhid);
++	if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
++		usbhid_restart_ctrl_queue(usbhid);
++	spin_unlock_irq(&usbhid->lock);
++}
++
+ /* Treat USB reset pretty much the same as suspend/resume */
+ static int hid_pre_reset(struct usb_interface *intf)
+ {
+@@ -1452,14 +1475,14 @@ static int hid_post_reset(struct usb_interface *intf)
+ 		return 1;
+ 	}
+ 
++	/* No need to do another reset or clear a halted endpoint */
+ 	spin_lock_irq(&usbhid->lock);
+ 	clear_bit(HID_RESET_PENDING, &usbhid->iofl);
++	clear_bit(HID_CLEAR_HALT, &usbhid->iofl);
+ 	spin_unlock_irq(&usbhid->lock);
+ 	hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0);
+-	status = hid_start_in(hid);
+-	if (status < 0)
+-		hid_io_error(hid);
+-	usbhid_restart_queues(usbhid);
++
++	hid_restart_io(hid);
+ 
+ 	return 0;
+ }
+@@ -1482,25 +1505,9 @@ void usbhid_put_power(struct hid_device *hid)
+ #ifdef CONFIG_PM
+ static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
+ {
+-	struct usbhid_device *usbhid = hid->driver_data;
+-	int status;
+-
+-	spin_lock_irq(&usbhid->lock);
+-	clear_bit(HID_SUSPENDED, &usbhid->iofl);
+-	usbhid_mark_busy(usbhid);
+-
+-	if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
+-			test_bit(HID_RESET_PENDING, &usbhid->iofl))
+-		schedule_work(&usbhid->reset_work);
+-	usbhid->retry_delay = 0;
+-
+-	usbhid_restart_queues(usbhid);
+-	spin_unlock_irq(&usbhid->lock);
+-
+-	status = hid_start_in(hid);
+-	if (status < 0)
+-		hid_io_error(hid);
++	int status = 0;
+ 
++	hid_restart_io(hid);
+ 	if (driver_suspended && hid->driver && hid->driver->resume)
+ 		status = hid->driver->resume(hid);
+ 	return status;
+@@ -1569,12 +1576,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
+ static int hid_resume(struct usb_interface *intf)
+ {
+ 	struct hid_device *hid = usb_get_intfdata (intf);
+-	struct usbhid_device *usbhid = hid->driver_data;
+ 	int status;
+ 
+-	if (!test_bit(HID_STARTED, &usbhid->iofl))
+-		return 0;
+-
+ 	status = hid_resume_common(hid, true);
+ 	dev_dbg(&intf->dev, "resume status %d\n", status);
+ 	return 0;
+@@ -1583,10 +1586,8 @@ static int hid_resume(struct usb_interface *intf)
+ static int hid_reset_resume(struct usb_interface *intf)
+ {
+ 	struct hid_device *hid = usb_get_intfdata(intf);
+-	struct usbhid_device *usbhid = hid->driver_data;
+ 	int status;
+ 
+-	clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ 	status = hid_post_reset(intf);
+ 	if (status >= 0 && hid->driver && hid->driver->reset_resume) {
+ 		int ret = hid->driver->reset_resume(hid);
+diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
+index 082ae6ba492f..1741c33674c3 100644
+--- a/drivers/media/usb/au0828/au0828-core.c
++++ b/drivers/media/usb/au0828/au0828-core.c
+@@ -159,7 +159,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface)
+ 	   Set the status so poll routines can check and avoid
+ 	   access after disconnect.
+ 	*/
+-	dev->dev_state = DEV_DISCONNECTED;
++	set_bit(DEV_DISCONNECTED, &dev->dev_state);
+ 
+ 	au0828_rc_unregister(dev);
+ 	/* Digital TV */
+diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c
+index b0f067971979..3d6687f0407d 100644
+--- a/drivers/media/usb/au0828/au0828-input.c
++++ b/drivers/media/usb/au0828/au0828-input.c
+@@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir)
+ 	bool first = true;
+ 
+ 	/* do nothing if device is disconnected */
+-	if (ir->dev->dev_state == DEV_DISCONNECTED)
++	if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state))
+ 		return 0;
+ 
+ 	/* Check IR int */
+@@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc)
+ 	cancel_delayed_work_sync(&ir->work);
+ 
+ 	/* do nothing if device is disconnected */
+-	if (ir->dev->dev_state != DEV_DISCONNECTED) {
++	if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) {
+ 		/* Disable IR */
+ 		au8522_rc_clear(ir, 0xe0, 1 << 4);
+ 	}
+diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
+index 1a362a041ab3..0605c0f35059 100644
+--- a/drivers/media/usb/au0828/au0828-video.c
++++ b/drivers/media/usb/au0828/au0828-video.c
+@@ -104,14 +104,13 @@ static inline void print_err_status(struct au0828_dev *dev,
+ 
+ static int check_dev(struct au0828_dev *dev)
+ {
+-	if (dev->dev_state & DEV_DISCONNECTED) {
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) {
+ 		pr_info("v4l2 ioctl: device not present\n");
+ 		return -ENODEV;
+ 	}
+ 
+-	if (dev->dev_state & DEV_MISCONFIGURED) {
+-		pr_info("v4l2 ioctl: device is misconfigured; "
+-		       "close and open it again\n");
++	if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) {
++		pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n");
+ 		return -EIO;
+ 	}
+ 	return 0;
+@@ -519,8 +518,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb)
+ 	if (!dev)
+ 		return 0;
+ 
+-	if ((dev->dev_state & DEV_DISCONNECTED) ||
+-	    (dev->dev_state & DEV_MISCONFIGURED))
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state) ||
++	    test_bit(DEV_MISCONFIGURED, &dev->dev_state))
+ 		return 0;
+ 
+ 	if (urb->status < 0) {
+@@ -761,10 +760,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev)
+ 	int ret = 0;
+ 
+ 	dev->stream_state = STREAM_INTERRUPT;
+-	if (dev->dev_state == DEV_DISCONNECTED)
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
+ 		return -ENODEV;
+ 	else if (ret) {
+-		dev->dev_state = DEV_MISCONFIGURED;
++		set_bit(DEV_MISCONFIGURED, &dev->dev_state);
+ 		dprintk(1, "%s device is misconfigured!\n", __func__);
+ 		return ret;
+ 	}
+@@ -952,7 +951,7 @@ static int au0828_v4l2_open(struct file *filp)
+ 	int ret;
+ 
+ 	dprintk(1,
+-		"%s called std_set %d dev_state %d stream users %d users %d\n",
++		"%s called std_set %d dev_state %ld stream users %d users %d\n",
+ 		__func__, dev->std_set_in_tuner_core, dev->dev_state,
+ 		dev->streaming_users, dev->users);
+ 
+@@ -971,7 +970,7 @@ static int au0828_v4l2_open(struct file *filp)
+ 		au0828_analog_stream_enable(dev);
+ 		au0828_analog_stream_reset(dev);
+ 		dev->stream_state = STREAM_OFF;
+-		dev->dev_state |= DEV_INITIALIZED;
++		set_bit(DEV_INITIALIZED, &dev->dev_state);
+ 	}
+ 	dev->users++;
+ 	mutex_unlock(&dev->lock);
+@@ -985,7 +984,7 @@ static int au0828_v4l2_close(struct file *filp)
+ 	struct video_device *vdev = video_devdata(filp);
+ 
+ 	dprintk(1,
+-		"%s called std_set %d dev_state %d stream users %d users %d\n",
++		"%s called std_set %d dev_state %ld stream users %d users %d\n",
+ 		__func__, dev->std_set_in_tuner_core, dev->dev_state,
+ 		dev->streaming_users, dev->users);
+ 
+@@ -1001,7 +1000,7 @@ static int au0828_v4l2_close(struct file *filp)
+ 		del_timer_sync(&dev->vbi_timeout);
+ 	}
+ 
+-	if (dev->dev_state == DEV_DISCONNECTED)
++	if (test_bit(DEV_DISCONNECTED, &dev->dev_state))
+ 		goto end;
+ 
+ 	if (dev->users == 1) {
+@@ -1030,7 +1029,7 @@ static void au0828_init_tuner(struct au0828_dev *dev)
+ 		.type = V4L2_TUNER_ANALOG_TV,
+ 	};
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	if (dev->std_set_in_tuner_core)
+@@ -1102,7 +1101,7 @@ static int vidioc_querycap(struct file *file, void  *priv,
+ 	struct video_device *vdev = video_devdata(file);
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	strlcpy(cap->driver, "au0828", sizeof(cap->driver));
+@@ -1145,7 +1144,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	f->fmt.pix.width = dev->width;
+@@ -1164,7 +1163,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	return au0828_set_format(dev, VIDIOC_TRY_FMT, f);
+@@ -1176,7 +1175,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 	int rc;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	rc = check_dev(dev);
+@@ -1198,7 +1197,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	if (norm == dev->std)
+@@ -1230,7 +1229,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	*norm = dev->std;
+@@ -1253,7 +1252,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
+ 		[AU0828_VMUX_DEBUG] = "tv debug"
+ 	};
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	tmp = input->index;
+@@ -1283,7 +1282,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	*i = dev->ctrl_input;
+@@ -1294,7 +1293,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index)
+ {
+ 	int i;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	switch (AUVI_INPUT(index).type) {
+@@ -1379,7 +1378,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a)
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	a->index = dev->ctrl_ainput;
+@@ -1399,7 +1398,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio
+ 	if (a->index != dev->ctrl_ainput)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 	return 0;
+ }
+@@ -1411,7 +1410,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
+ 	if (t->index != 0)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	strcpy(t->name, "Auvitek tuner");
+@@ -1431,7 +1430,7 @@ static int vidioc_s_tuner(struct file *file, void *priv,
+ 	if (t->index != 0)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	au0828_init_tuner(dev);
+@@ -1453,7 +1452,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
+ 
+ 	if (freq->tuner != 0)
+ 		return -EINVAL;
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 	freq->frequency = dev->ctrl_freq;
+ 	return 0;
+@@ -1468,7 +1467,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
+ 	if (freq->tuner != 0)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	au0828_init_tuner(dev);
+@@ -1494,7 +1493,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	format->fmt.vbi.samples_per_line = dev->vbi_width;
+@@ -1520,7 +1519,7 @@ static int vidioc_cropcap(struct file *file, void *priv,
+ 	if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ 		return -EINVAL;
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	cc->bounds.left = 0;
+@@ -1542,7 +1541,7 @@ static int vidioc_g_register(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	reg->val = au0828_read(dev, reg->reg);
+@@ -1555,7 +1554,7 @@ static int vidioc_s_register(struct file *file, void *priv,
+ {
+ 	struct au0828_dev *dev = video_drvdata(file);
+ 
+-	dprintk(1, "%s called std_set %d dev_state %d\n", __func__,
++	dprintk(1, "%s called std_set %d dev_state %ld\n", __func__,
+ 		dev->std_set_in_tuner_core, dev->dev_state);
+ 
+ 	return au0828_writereg(dev, reg->reg, reg->val);
+diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h
+index 3b480005ce3b..42a5379c6aa3 100644
+--- a/drivers/media/usb/au0828/au0828.h
++++ b/drivers/media/usb/au0828/au0828.h
+@@ -21,6 +21,7 @@
+ 
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ 
++#include <linux/bitops.h>
+ #include <linux/usb.h>
+ #include <linux/i2c.h>
+ #include <linux/i2c-algo-bit.h>
+@@ -121,9 +122,9 @@ enum au0828_stream_state {
+ 
+ /* device state */
+ enum au0828_dev_state {
+-	DEV_INITIALIZED = 0x01,
+-	DEV_DISCONNECTED = 0x02,
+-	DEV_MISCONFIGURED = 0x04
++	DEV_INITIALIZED = 0,
++	DEV_DISCONNECTED = 1,
++	DEV_MISCONFIGURED = 2
+ };
+ 
+ struct au0828_dev;
+@@ -247,7 +248,7 @@ struct au0828_dev {
+ 	int input_type;
+ 	int std_set_in_tuner_core;
+ 	unsigned int ctrl_input;
+-	enum au0828_dev_state dev_state;
++	long unsigned int dev_state; /* defined at enum au0828_dev_state */;
+ 	enum au0828_stream_state stream_state;
+ 	wait_queue_head_t open;
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index c66fd23b3c13..1a82a8552182 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1260,18 +1260,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+ 	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ 	struct scsi_device *sdp = sdkp->device;
+ 	struct Scsi_Host *host = sdp->host;
++	sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
+ 	int diskinfo[4];
+ 
+ 	/* default to most commonly used values */
+-        diskinfo[0] = 0x40;	/* 1 << 6 */
+-       	diskinfo[1] = 0x20;	/* 1 << 5 */
+-       	diskinfo[2] = sdkp->capacity >> 11;
+-	
++	diskinfo[0] = 0x40;	/* 1 << 6 */
++	diskinfo[1] = 0x20;	/* 1 << 5 */
++	diskinfo[2] = capacity >> 11;
++
+ 	/* override with calculated, extended default, or driver values */
+ 	if (host->hostt->bios_param)
+-		host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo);
++		host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
+ 	else
+-		scsicam_bios_param(bdev, sdkp->capacity, diskinfo);
++		scsicam_bios_param(bdev, capacity, diskinfo);
+ 
+ 	geo->heads = diskinfo[0];
+ 	geo->sectors = diskinfo[1];
+@@ -2230,14 +2231,6 @@ got_data:
+ 	} else
+ 		sdkp->max_xfer_blocks = SD_DEF_XFER_BLOCKS;
+ 
+-	/* Rescale capacity to 512-byte units */
+-	if (sector_size == 4096)
+-		sdkp->capacity <<= 3;
+-	else if (sector_size == 2048)
+-		sdkp->capacity <<= 2;
+-	else if (sector_size == 1024)
+-		sdkp->capacity <<= 1;
+-
+ 	blk_queue_physical_block_size(sdp->request_queue,
+ 				      sdkp->physical_block_size);
+ 	sdkp->device->sector_size = sector_size;
+@@ -2773,7 +2766,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	sdkp->disk->queue->limits.max_sectors =
+ 		min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
+ 
+-	set_capacity(disk, sdkp->capacity);
++	set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
+ 	sd_config_write_same(sdkp);
+ 	kfree(buffer);
+ 
+diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
+index 63ba5ca7f9a1..0264936a855a 100644
+--- a/drivers/scsi/sd.h
++++ b/drivers/scsi/sd.h
+@@ -65,7 +65,7 @@ struct scsi_disk {
+ 	struct device	dev;
+ 	struct gendisk	*disk;
+ 	atomic_t	openers;
+-	sector_t	capacity;	/* size in 512-byte sectors */
++	sector_t	capacity;	/* size in logical blocks */
+ 	u32		max_xfer_blocks;
+ 	u32		max_ws_blocks;
+ 	u32		max_unmap_blocks;
+@@ -145,6 +145,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
+ 	return 0;
+ }
+ 
++static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks)
++{
++	return blocks << (ilog2(sdev->sector_size) - 9);
++}
++
+ /*
+  * A DIF-capable target device can be formatted with different
+  * protection schemes.  Currently 0 through 3 are defined:
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 09f1e5f2f013..a61386ce41ee 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -1491,7 +1491,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ {
+ 	struct thermal_zone_device *tz;
+ 	enum thermal_trip_type trip_type;
+-	int trip_temp;
++	unsigned long trip_temp;
+ 	int result;
+ 	int count;
+ 	int passive = 0;
+diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
+index 9eb1cff28bd4..b8b580e5ae6e 100644
+--- a/drivers/usb/core/hcd-pci.c
++++ b/drivers/usb/core/hcd-pci.c
+@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
+ 		if (companion->bus != pdev->bus ||
+ 				PCI_SLOT(companion->devfn) != slot)
+ 			continue;
++
++		/*
++		 * Companion device should be either UHCI,OHCI or EHCI host
++		 * controller, otherwise skip.
++		 */
++		if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
++				companion->class != CL_EHCI)
++			continue;
++
+ 		companion_hcd = pci_get_drvdata(companion);
+ 		if (!companion_hcd || !companion_hcd->self.root_hub)
+ 			continue;
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 41f841fa6c4d..5ab70afd5624 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1879,6 +1879,12 @@ no_bw:
+ 	kfree(xhci->rh_bw);
+ 	kfree(xhci->ext_caps);
+ 
++	xhci->usb2_ports = NULL;
++	xhci->usb3_ports = NULL;
++	xhci->port_array = NULL;
++	xhci->rh_bw = NULL;
++	xhci->ext_caps = NULL;
++
+ 	xhci->page_size = 0;
+ 	xhci->page_shift = 0;
+ 	xhci->bus_state[0].bus_suspended = 0;
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 3ff5fcc7c94b..c6027acb6263 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -48,6 +48,7 @@
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
+ #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI		0x1aa8
+ 
+ static const char hcd_name[] = "xhci_hcd";
+ 
+@@ -149,7 +150,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+-		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
++		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
+ 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+@@ -296,6 +298,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ 	struct xhci_hcd *xhci;
+ 
+ 	xhci = hcd_to_xhci(pci_get_drvdata(dev));
++	xhci->xhc_state |= XHCI_STATE_REMOVING;
+ 	if (xhci->shared_hcd) {
+ 		usb_remove_hcd(xhci->shared_hcd);
+ 		usb_put_hcd(xhci->shared_hcd);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index e6d858a49d04..6fe0377ec5cf 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -3823,8 +3823,12 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ {
+ 	int reserved_trbs = xhci->cmd_ring_reserved_trbs;
+ 	int ret;
+-	if (xhci->xhc_state & XHCI_STATE_DYING)
++
++	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
++		(xhci->xhc_state & XHCI_STATE_HALTED)) {
++		xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
+ 		return -ESHUTDOWN;
++	}
+ 
+ 	if (!command_must_succeed)
+ 		reserved_trbs++;
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 910f7fac031f..896b928f7412 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci)
+ 				"waited %u microseconds.\n",
+ 				XHCI_MAX_HALT_USEC);
+ 	if (!ret)
+-		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
++		/* clear state flags. Including dying, halted or removing */
++		xhci->xhc_state = 0;
+ 
+ 	return ret;
+ }
+@@ -1113,8 +1114,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 		/* Resume root hubs only when have pending events. */
+ 		status = readl(&xhci->op_regs->status);
+ 		if (status & STS_EINT) {
+-			usb_hcd_resume_root_hub(hcd);
+ 			usb_hcd_resume_root_hub(xhci->shared_hcd);
++			usb_hcd_resume_root_hub(hcd);
+ 		}
+ 	}
+ 
+@@ -1129,10 +1130,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ 
+ 	/* Re-enable port polling. */
+ 	xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+-	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+-	usb_hcd_poll_rh_status(hcd);
+ 	set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+ 	usb_hcd_poll_rh_status(xhci->shared_hcd);
++	set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
++	usb_hcd_poll_rh_status(hcd);
+ 
+ 	return retval;
+ }
+@@ -2763,7 +2764,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+ 	if (ret <= 0)
+ 		return ret;
+ 	xhci = hcd_to_xhci(hcd);
+-	if (xhci->xhc_state & XHCI_STATE_DYING)
++	if ((xhci->xhc_state & XHCI_STATE_DYING) ||
++		(xhci->xhc_state & XHCI_STATE_REMOVING))
+ 		return -ENODEV;
+ 
+ 	xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
+@@ -3812,7 +3814,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 
+ 	mutex_lock(&xhci->mutex);
+ 
+-	if (xhci->xhc_state)	/* dying or halted */
++	if (xhci->xhc_state)	/* dying, removing or halted */
+ 		goto out;
+ 
+ 	if (!udev->slot_id) {
+@@ -4939,6 +4941,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
+ 		goto error;
+ 	xhci_dbg(xhci, "Reset complete\n");
+ 
++	/*
++	 * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0)
++	 * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit
++	 * address memory pointers actually. So, this driver clears the AC64
++	 * bit of xhci->hcc_params to call dma_set_coherent_mask(dev,
++	 * DMA_BIT_MASK(32)) in this xhci_gen_setup().
++	 */
++	if (xhci->quirks & XHCI_NO_64BIT_SUPPORT)
++		xhci->hcc_params &= ~BIT(0);
++
+ 	/* Set dma_mask and coherent_dma_mask to 64-bits,
+ 	 * if xHC supports 64-bit addressing */
+ 	if (HCC_64BIT_ADDR(xhci->hcc_params) &&
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index f18cdf0ec795..c5d6963e9cbe 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1536,6 +1536,7 @@ struct xhci_hcd {
+  */
+ #define XHCI_STATE_DYING	(1 << 0)
+ #define XHCI_STATE_HALTED	(1 << 1)
++#define XHCI_STATE_REMOVING	(1 << 2)
+ 	/* Statistics */
+ 	int			error_bitmask;
+ 	unsigned int		quirks;
+@@ -1571,6 +1572,7 @@ struct xhci_hcd {
+ #define XHCI_BROKEN_STREAMS	(1 << 19)
+ #define XHCI_PME_STUCK_QUIRK	(1 << 20)
+ #define XHCI_SSIC_PORT_UNUSED	(1 << 22)
++#define XHCI_NO_64BIT_SUPPORT	(1 << 23)
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+ 	/* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
+index c0f5c652d272..f1893e08e51a 100644
+--- a/drivers/usb/renesas_usbhs/fifo.c
++++ b/drivers/usb/renesas_usbhs/fifo.c
+@@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
+ 		goto __usbhs_pkt_handler_end;
+ 	}
+ 
+-	ret = func(pkt, &is_done);
++	if (likely(func))
++		ret = func(pkt, &is_done);
+ 
+ 	if (is_done)
+ 		__usbhsf_pkt_del(pkt);
+@@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
+ 
+ 	pkt->trans = len;
+ 
++	usbhsf_tx_irq_ctrl(pipe, 0);
+ 	INIT_WORK(&pkt->work, xfer_work);
+ 	schedule_work(&pkt->work);
+ 
+diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
+index dc2aa3261202..046529656465 100644
+--- a/drivers/usb/renesas_usbhs/mod_gadget.c
++++ b/drivers/usb/renesas_usbhs/mod_gadget.c
+@@ -154,10 +154,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
+ 	struct usbhs_pipe *pipe = pkt->pipe;
+ 	struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe);
+ 	struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt);
++	unsigned long flags;
+ 
+ 	ureq->req.actual = pkt->actual;
+ 
+-	usbhsg_queue_pop(uep, ureq, 0);
++	usbhs_lock(priv, flags);
++	if (uep)
++		__usbhsg_queue_pop(uep, ureq, 0);
++	usbhs_unlock(priv, flags);
+ }
+ 
+ static void usbhsg_queue_push(struct usbhsg_uep *uep,
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 7a76fe4c2f9e..bdc0f2f24f19 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -164,6 +164,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ 	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ 	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
++	{ USB_DEVICE(0x1901, 0x0194) },	/* GE Healthcare Remote Alarm Box */
+ 	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
+index 01bf53392819..244acb1299a9 100644
+--- a/drivers/usb/serial/cypress_m8.c
++++ b/drivers/usb/serial/cypress_m8.c
+@@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
+ 	struct usb_serial *serial = port->serial;
+ 	struct cypress_private *priv;
+ 
++	if (!port->interrupt_out_urb || !port->interrupt_in_urb) {
++		dev_err(&port->dev, "required endpoint is missing\n");
++		return -ENODEV;
++	}
++
+ 	priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+@@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
+ 		cypress_set_termios(tty, port, &priv->tmp_termios);
+ 
+ 	/* setup the port and start reading from the device */
+-	if (!port->interrupt_in_urb) {
+-		dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n",
+-			__func__);
+-		return -1;
+-	}
+-
+ 	usb_fill_int_urb(port->interrupt_in_urb, serial->dev,
+ 		usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress),
+ 		port->interrupt_in_urb->transfer_buffer,
+diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
+index 12b0e67473ba..3df7b7ec178e 100644
+--- a/drivers/usb/serial/digi_acceleport.c
++++ b/drivers/usb/serial/digi_acceleport.c
+@@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
+ 
+ static int digi_startup(struct usb_serial *serial)
+ {
++	struct device *dev = &serial->interface->dev;
+ 	struct digi_serial *serial_priv;
+ 	int ret;
++	int i;
++
++	/* check whether the device has the expected number of endpoints */
++	if (serial->num_port_pointers < serial->type->num_ports + 1) {
++		dev_err(dev, "OOB endpoints missing\n");
++		return -ENODEV;
++	}
++
++	for (i = 0; i < serial->type->num_ports + 1 ; i++) {
++		if (!serial->port[i]->read_urb) {
++			dev_err(dev, "bulk-in endpoint missing\n");
++			return -ENODEV;
++		}
++		if (!serial->port[i]->write_urb) {
++			dev_err(dev, "bulk-out endpoint missing\n");
++			return -ENODEV;
++		}
++	}
+ 
+ 	serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL);
+ 	if (!serial_priv)
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 8c660ae401d8..b61f12160d37 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) },
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) },
+ 	{ USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) },
++	/* ICP DAS I-756xU devices */
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) },
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
++	{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
+ 	{ }					/* Terminating entry */
+ };
+ 
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 7850071c0ae1..334bc600282d 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -872,6 +872,14 @@
+ #define NOVITUS_BONO_E_PID		0x6010
+ 
+ /*
++ * ICPDAS I-756*U devices
++ */
++#define ICPDAS_VID			0x1b5c
++#define ICPDAS_I7560U_PID		0x0103
++#define ICPDAS_I7561U_PID		0x0104
++#define ICPDAS_I7563U_PID		0x0105
++
++/*
+  * RT Systems programming cables for various ham radios
+  */
+ #define RTSYSTEMS_VID		0x2100	/* Vendor ID */
+diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
+index fd707d6a10e2..89726f702202 100644
+--- a/drivers/usb/serial/mct_u232.c
++++ b/drivers/usb/serial/mct_u232.c
+@@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port,
+ 
+ static int mct_u232_port_probe(struct usb_serial_port *port)
+ {
++	struct usb_serial *serial = port->serial;
+ 	struct mct_u232_private *priv;
+ 
++	/* check first to simplify error handling */
++	if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) {
++		dev_err(&port->dev, "expected endpoint missing\n");
++		return -ENODEV;
++	}
++
+ 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ 	if (!priv)
+ 		return -ENOMEM;
+ 
+ 	/* Use second interrupt-in endpoint for reading. */
+-	priv->read_urb = port->serial->port[1]->interrupt_in_urb;
++	priv->read_urb = serial->port[1]->interrupt_in_urb;
+ 	priv->read_urb->context = port;
+ 
+ 	spin_lock_init(&priv->lock);
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index ce3d6af977b7..94e520de6404 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1830,6 +1830,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
++	{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff),			/* D-Link DWM-221 B1 */
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 637ee7754ad5..546bb2b1ffc2 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -2,7 +2,7 @@
+  * USB Attached SCSI
+  * Note that this is not the same as the USB Mass Storage driver
+  *
+- * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2014
++ * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
+  * Copyright Matthew Wilcox for Intel Corp, 2010
+  * Copyright Sarah Sharp for Intel Corp, 2010
+  *
+@@ -757,6 +757,17 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
+ 	return SUCCESS;
+ }
+ 
++static int uas_target_alloc(struct scsi_target *starget)
++{
++	struct uas_dev_info *devinfo = (struct uas_dev_info *)
++			dev_to_shost(starget->dev.parent)->hostdata;
++
++	if (devinfo->flags & US_FL_NO_REPORT_LUNS)
++		starget->no_report_luns = 1;
++
++	return 0;
++}
++
+ static int uas_slave_alloc(struct scsi_device *sdev)
+ {
+ 	struct uas_dev_info *devinfo =
+@@ -808,6 +819,7 @@ static struct scsi_host_template uas_host_template = {
+ 	.module = THIS_MODULE,
+ 	.name = "uas",
+ 	.queuecommand = uas_queuecommand,
++	.target_alloc = uas_target_alloc,
+ 	.slave_alloc = uas_slave_alloc,
+ 	.slave_configure = uas_slave_configure,
+ 	.eh_abort_handler = uas_eh_abort_handler,
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index ccc113e83d88..53341a77d89f 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -64,6 +64,13 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_ATA_1X),
+ 
++/* Reported-by: David Webb <djw@noc.ac.uk> */
++UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
++		"Seagate",
++		"Expansion Desk",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_NO_REPORT_LUNS),
++
+ /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+ UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
+ 		"Seagate",
+diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
+index 6c10c888f35f..ba8f759e723f 100644
+--- a/drivers/usb/storage/usb.c
++++ b/drivers/usb/storage/usb.c
+@@ -480,7 +480,7 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
+ 			US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
+ 			US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
+ 			US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
+-			US_FL_MAX_SECTORS_240);
++			US_FL_MAX_SECTORS_240 | US_FL_NO_REPORT_LUNS);
+ 
+ 	p = quirks;
+ 	while (*p) {
+@@ -530,6 +530,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
+ 		case 'i':
+ 			f |= US_FL_IGNORE_DEVICE;
+ 			break;
++		case 'j':
++			f |= US_FL_NO_REPORT_LUNS;
++			break;
+ 		case 'l':
+ 			f |= US_FL_NOT_LOCKABLE;
+ 			break;
+diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
+index e88e0997a889..6652258048df 100644
+--- a/drivers/virtio/virtio_pci_modern.c
++++ b/drivers/virtio/virtio_pci_modern.c
+@@ -17,6 +17,7 @@
+  *
+  */
+ 
++#include <linux/delay.h>
+ #define VIRTIO_PCI_NO_LEGACY
+ #include "virtio_pci_common.h"
+ 
+@@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev)
+ 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ 	/* 0 status means a reset. */
+ 	vp_iowrite8(0, &vp_dev->common->device_status);
+-	/* Flush out the status write, and flush in device writes,
+-	 * including MSI-X interrupts, if any. */
+-	vp_ioread8(&vp_dev->common->device_status);
++	/* After writing 0 to device_status, the driver MUST wait for a read of
++	 * device_status to return 0 before reinitializing the device.
++	 * This will flush out the status write, and flush in device writes,
++	 * including MSI-X interrupts, if any.
++	 */
++	while (vp_ioread8(&vp_dev->common->device_status))
++		msleep(1);
+ 	/* Flush pending VQ/configuration callbacks. */
+ 	vp_synchronize_vectors(vdev);
+ }
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 38387950490e..511aab3b9206 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -483,9 +483,19 @@ static void eoi_pirq(struct irq_data *data)
+ 	struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
+ 	int rc = 0;
+ 
+-	irq_move_irq(data);
++	if (!VALID_EVTCHN(evtchn))
++		return;
+ 
+-	if (VALID_EVTCHN(evtchn))
++	if (unlikely(irqd_is_setaffinity_pending(data))) {
++		int masked = test_and_set_mask(evtchn);
++
++		clear_evtchn(evtchn);
++
++		irq_move_masked_irq(data);
++
++		if (!masked)
++			unmask_evtchn(evtchn);
++	} else
+ 		clear_evtchn(evtchn);
+ 
+ 	if (pirq_needs_eoi(data->irq)) {
+@@ -1360,9 +1370,19 @@ static void ack_dynirq(struct irq_data *data)
+ {
+ 	int evtchn = evtchn_from_irq(data->irq);
+ 
+-	irq_move_irq(data);
++	if (!VALID_EVTCHN(evtchn))
++		return;
+ 
+-	if (VALID_EVTCHN(evtchn))
++	if (unlikely(irqd_is_setaffinity_pending(data))) {
++		int masked = test_and_set_mask(evtchn);
++
++		clear_evtchn(evtchn);
++
++		irq_move_masked_irq(data);
++
++		if (!masked)
++			unmask_evtchn(evtchn);
++	} else
+ 		clear_evtchn(evtchn);
+ }
+ 
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 4920fceffacb..5c222f3c4841 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4123,6 +4123,308 @@ static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
+ 	return 0;
+ }
+ 
++/*
++ * At the moment we always log all xattrs. This is to figure out at log replay
++ * time which xattrs must have their deletion replayed. If a xattr is missing
++ * in the log tree and exists in the fs/subvol tree, we delete it. This is
++ * because if a xattr is deleted, the inode is fsynced and a power failure
++ * happens, causing the log to be replayed the next time the fs is mounted,
++ * we want the xattr to not exist anymore (same behaviour as other filesystems
++ * with a journal, ext3/4, xfs, f2fs, etc).
++ */
++static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
++				struct btrfs_root *root,
++				struct inode *inode,
++				struct btrfs_path *path,
++				struct btrfs_path *dst_path)
++{
++	int ret;
++	struct btrfs_key key;
++	const u64 ino = btrfs_ino(inode);
++	int ins_nr = 0;
++	int start_slot = 0;
++
++	key.objectid = ino;
++	key.type = BTRFS_XATTR_ITEM_KEY;
++	key.offset = 0;
++
++	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
++	if (ret < 0)
++		return ret;
++
++	while (true) {
++		int slot = path->slots[0];
++		struct extent_buffer *leaf = path->nodes[0];
++		int nritems = btrfs_header_nritems(leaf);
++
++		if (slot >= nritems) {
++			if (ins_nr > 0) {
++				u64 last_extent = 0;
++
++				ret = copy_items(trans, inode, dst_path, path,
++						 &last_extent, start_slot,
++						 ins_nr, 1, 0);
++				/* can't be 1, extent items aren't processed */
++				ASSERT(ret <= 0);
++				if (ret < 0)
++					return ret;
++				ins_nr = 0;
++			}
++			ret = btrfs_next_leaf(root, path);
++			if (ret < 0)
++				return ret;
++			else if (ret > 0)
++				break;
++			continue;
++		}
++
++		btrfs_item_key_to_cpu(leaf, &key, slot);
++		if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
++			break;
++
++		if (ins_nr == 0)
++			start_slot = slot;
++		ins_nr++;
++		path->slots[0]++;
++		cond_resched();
++	}
++	if (ins_nr > 0) {
++		u64 last_extent = 0;
++
++		ret = copy_items(trans, inode, dst_path, path,
++				 &last_extent, start_slot,
++				 ins_nr, 1, 0);
++		/* can't be 1, extent items aren't processed */
++		ASSERT(ret <= 0);
++		if (ret < 0)
++			return ret;
++	}
++
++	return 0;
++}
++
++/*
++ * If the no holes feature is enabled we need to make sure any hole between the
++ * last extent and the i_size of our inode is explicitly marked in the log. This
++ * is to make sure that doing something like:
++ *
++ *      1) create file with 128Kb of data
++ *      2) truncate file to 64Kb
++ *      3) truncate file to 256Kb
++ *      4) fsync file
++ *      5) <crash/power failure>
++ *      6) mount fs and trigger log replay
++ *
++ * Will give us a file with a size of 256Kb, the first 64Kb of data match what
++ * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
++ * file correspond to a hole. The presence of explicit holes in a log tree is
++ * what guarantees that log replay will remove/adjust file extent items in the
++ * fs/subvol tree.
++ *
++ * Here we do not need to care about holes between extents, that is already done
++ * by copy_items(). We also only need to do this in the full sync path, where we
++ * lookup for extents from the fs/subvol tree only. In the fast path case, we
++ * lookup the list of modified extent maps and if any represents a hole, we
++ * insert a corresponding extent representing a hole in the log tree.
++ */
++static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
++				   struct btrfs_root *root,
++				   struct inode *inode,
++				   struct btrfs_path *path)
++{
++	int ret;
++	struct btrfs_key key;
++	u64 hole_start;
++	u64 hole_size;
++	struct extent_buffer *leaf;
++	struct btrfs_root *log = root->log_root;
++	const u64 ino = btrfs_ino(inode);
++	const u64 i_size = i_size_read(inode);
++
++	if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
++		return 0;
++
++	key.objectid = ino;
++	key.type = BTRFS_EXTENT_DATA_KEY;
++	key.offset = (u64)-1;
++
++	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
++	ASSERT(ret != 0);
++	if (ret < 0)
++		return ret;
++
++	ASSERT(path->slots[0] > 0);
++	path->slots[0]--;
++	leaf = path->nodes[0];
++	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
++
++	if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
++		/* inode does not have any extents */
++		hole_start = 0;
++		hole_size = i_size;
++	} else {
++		struct btrfs_file_extent_item *extent;
++		u64 len;
++
++		/*
++		 * If there's an extent beyond i_size, an explicit hole was
++		 * already inserted by copy_items().
++		 */
++		if (key.offset >= i_size)
++			return 0;
++
++		extent = btrfs_item_ptr(leaf, path->slots[0],
++					struct btrfs_file_extent_item);
++
++		if (btrfs_file_extent_type(leaf, extent) ==
++		    BTRFS_FILE_EXTENT_INLINE) {
++			len = btrfs_file_extent_inline_len(leaf,
++							   path->slots[0],
++							   extent);
++			ASSERT(len == i_size);
++			return 0;
++		}
++
++		len = btrfs_file_extent_num_bytes(leaf, extent);
++		/* Last extent goes beyond i_size, no need to log a hole. */
++		if (key.offset + len > i_size)
++			return 0;
++		hole_start = key.offset + len;
++		hole_size = i_size - hole_start;
++	}
++	btrfs_release_path(path);
++
++	/* Last extent ends at i_size. */
++	if (hole_size == 0)
++		return 0;
++
++	hole_size = ALIGN(hole_size, root->sectorsize);
++	ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
++				       hole_size, 0, hole_size, 0, 0, 0);
++	return ret;
++}
++
++/*
++ * When we are logging a new inode X, check if it doesn't have a reference that
++ * matches the reference from some other inode Y created in a past transaction
++ * and that was renamed in the current transaction. If we don't do this, then at
++ * log replay time we can lose inode Y (and all its files if it's a directory):
++ *
++ * mkdir /mnt/x
++ * echo "hello world" > /mnt/x/foobar
++ * sync
++ * mv /mnt/x /mnt/y
++ * mkdir /mnt/x                 # or touch /mnt/x
++ * xfs_io -c fsync /mnt/x
++ * <power fail>
++ * mount fs, trigger log replay
++ *
++ * After the log replay procedure, we would lose the first directory and all its
++ * files (file foobar).
++ * For the case where inode Y is not a directory we simply end up losing it:
++ *
++ * echo "123" > /mnt/foo
++ * sync
++ * mv /mnt/foo /mnt/bar
++ * echo "abc" > /mnt/foo
++ * xfs_io -c fsync /mnt/foo
++ * <power fail>
++ *
++ * We also need this for cases where a snapshot entry is replaced by some other
++ * entry (file or directory) otherwise we end up with an unreplayable log due to
++ * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
++ * if it were a regular entry:
++ *
++ * mkdir /mnt/x
++ * btrfs subvolume snapshot /mnt /mnt/x/snap
++ * btrfs subvolume delete /mnt/x/snap
++ * rmdir /mnt/x
++ * mkdir /mnt/x
++ * fsync /mnt/x or fsync some new file inside it
++ * <power fail>
++ *
++ * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
++ * the same transaction.
++ */
++static int btrfs_check_ref_name_override(struct extent_buffer *eb,
++					 const int slot,
++					 const struct btrfs_key *key,
++					 struct inode *inode)
++{
++	int ret;
++	struct btrfs_path *search_path;
++	char *name = NULL;
++	u32 name_len = 0;
++	u32 item_size = btrfs_item_size_nr(eb, slot);
++	u32 cur_offset = 0;
++	unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
++
++	search_path = btrfs_alloc_path();
++	if (!search_path)
++		return -ENOMEM;
++	search_path->search_commit_root = 1;
++	search_path->skip_locking = 1;
++
++	while (cur_offset < item_size) {
++		u64 parent;
++		u32 this_name_len;
++		u32 this_len;
++		unsigned long name_ptr;
++		struct btrfs_dir_item *di;
++
++		if (key->type == BTRFS_INODE_REF_KEY) {
++			struct btrfs_inode_ref *iref;
++
++			iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
++			parent = key->offset;
++			this_name_len = btrfs_inode_ref_name_len(eb, iref);
++			name_ptr = (unsigned long)(iref + 1);
++			this_len = sizeof(*iref) + this_name_len;
++		} else {
++			struct btrfs_inode_extref *extref;
++
++			extref = (struct btrfs_inode_extref *)(ptr +
++							       cur_offset);
++			parent = btrfs_inode_extref_parent(eb, extref);
++			this_name_len = btrfs_inode_extref_name_len(eb, extref);
++			name_ptr = (unsigned long)&extref->name;
++			this_len = sizeof(*extref) + this_name_len;
++		}
++
++		if (this_name_len > name_len) {
++			char *new_name;
++
++			new_name = krealloc(name, this_name_len, GFP_NOFS);
++			if (!new_name) {
++				ret = -ENOMEM;
++				goto out;
++			}
++			name_len = this_name_len;
++			name = new_name;
++		}
++
++		read_extent_buffer(eb, name, name_ptr, this_name_len);
++		di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root,
++					   search_path, parent,
++					   name, this_name_len, 0);
++		if (di && !IS_ERR(di)) {
++			ret = 1;
++			goto out;
++		} else if (IS_ERR(di)) {
++			ret = PTR_ERR(di);
++			goto out;
++		}
++		btrfs_release_path(search_path);
++
++		cur_offset += this_len;
++	}
++	ret = 0;
++out:
++	btrfs_free_path(search_path);
++	kfree(name);
++	return ret;
++}
++
+ /* log a single inode in the tree log.
+  * At least one parent directory for this inode must exist in the tree
+  * or be logged already.
+@@ -4295,6 +4597,41 @@ again:
+ 		if (min_key.type == BTRFS_INODE_ITEM_KEY)
+ 			need_log_inode_item = false;
+ 
++		if ((min_key.type == BTRFS_INODE_REF_KEY ||
++		     min_key.type == BTRFS_INODE_EXTREF_KEY) &&
++		    BTRFS_I(inode)->generation == trans->transid) {
++			ret = btrfs_check_ref_name_override(path->nodes[0],
++							    path->slots[0],
++							    &min_key, inode);
++			if (ret < 0) {
++				err = ret;
++				goto out_unlock;
++			} else if (ret > 0) {
++				err = 1;
++				btrfs_set_log_full_commit(root->fs_info, trans);
++				goto out_unlock;
++			}
++		}
++
++		/* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
++		if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
++			if (ins_nr == 0)
++				goto next_slot;
++			ret = copy_items(trans, inode, dst_path, path,
++					 &last_extent, ins_start_slot,
++					 ins_nr, inode_only, logged_isize);
++			if (ret < 0) {
++				err = ret;
++				goto out_unlock;
++			}
++			ins_nr = 0;
++			if (ret) {
++				btrfs_release_path(path);
++				continue;
++			}
++			goto next_slot;
++		}
++
+ 		src = path->nodes[0];
+ 		if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
+ 			ins_nr++;
+@@ -4362,6 +4699,18 @@ next_slot:
+ 		ins_nr = 0;
+ 	}
+ 
++	btrfs_release_path(path);
++	btrfs_release_path(dst_path);
++	err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
++	if (err)
++		goto out_unlock;
++	if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
++		btrfs_release_path(path);
++		btrfs_release_path(dst_path);
++		err = btrfs_log_trailing_hole(trans, root, inode, path);
++		if (err)
++			goto out_unlock;
++	}
+ log_extents:
+ 	btrfs_release_path(path);
+ 	btrfs_release_path(dst_path);
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 8bec8f1e4b31..153c9a0db303 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -462,7 +462,7 @@ struct dentry *debugfs_create_automount(const char *name,
+ 	if (unlikely(!inode))
+ 		return failed_creating(dentry);
+ 
+-	inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
++	make_empty_dir_inode(inode);
+ 	inode->i_flags |= S_AUTOMOUNT;
+ 	inode->i_private = data;
+ 	dentry->d_fsdata = (void *)f;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 95dfff88de11..15213a567301 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -813,6 +813,29 @@ do {									       \
+ #include "extents_status.h"
+ 
+ /*
++ * Lock subclasses for i_data_sem in the ext4_inode_info structure.
++ *
++ * These are needed to avoid lockdep false positives when we need to
++ * allocate blocks to the quota inode during ext4_map_blocks(), while
++ * holding i_data_sem for a normal (non-quota) inode.  Since we don't
++ * do quota tracking for the quota inode, this avoids deadlock (as
++ * well as infinite recursion, since it isn't turtles all the way
++ * down...)
++ *
++ *  I_DATA_SEM_NORMAL - Used for most inodes
++ *  I_DATA_SEM_OTHER  - Used by move_inode.c for the second normal inode
++ *			  where the second inode has larger inode number
++ *			  than the first
++ *  I_DATA_SEM_QUOTA  - Used for quota inodes only
++ */
++enum {
++	I_DATA_SEM_NORMAL = 0,
++	I_DATA_SEM_OTHER,
++	I_DATA_SEM_QUOTA,
++};
++
++
++/*
+  * fourth extended file system inode data in memory
+  */
+ struct ext4_inode_info {
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 3fb92abe5707..bd059e985485 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second)
+ {
+ 	if (first < second) {
+ 		down_write(&EXT4_I(first)->i_data_sem);
+-		down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
++		down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER);
+ 	} else {
+ 		down_write(&EXT4_I(second)->i_data_sem);
+-		down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);
++		down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER);
+ 
+ 	}
+ }
+@@ -487,6 +487,13 @@ mext_check_arguments(struct inode *orig_inode,
+ 		return -EBUSY;
+ 	}
+ 
++	if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
++		ext4_debug("ext4 move extent: The argument files should "
++			"not be quota files [ino:orig %lu, donor %lu]\n",
++			orig_inode->i_ino, donor_inode->i_ino);
++		return -EBUSY;
++	}
++
+ 	/* Ext4 move extent supports only extent based file */
+ 	if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
+ 		ext4_debug("ext4 move extent: orig file is not extents "
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 6f5ca3e92246..aedff7963468 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1276,9 +1276,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
+ 		return -1;
+ 	}
+ 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+-		ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
+-			 "when QUOTA feature is enabled");
+-		return -1;
++		ext4_msg(sb, KERN_INFO, "Journaled quota options "
++			 "ignored when QUOTA feature is enabled");
++		return 1;
+ 	}
+ 	qname = match_strdup(args);
+ 	if (!qname) {
+@@ -1636,10 +1636,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
+ 		}
+ 		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ 					       EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+-			ext4_msg(sb, KERN_ERR,
+-				 "Cannot set journaled quota options "
++			ext4_msg(sb, KERN_INFO,
++				 "Quota format mount options ignored "
+ 				 "when QUOTA feature is enabled");
+-			return -1;
++			return 1;
+ 		}
+ 		sbi->s_jquota_fmt = m->mount_opt;
+ #endif
+@@ -1696,11 +1696,11 @@ static int parse_options(char *options, struct super_block *sb,
+ #ifdef CONFIG_QUOTA
+ 	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+ 	    (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
+-		ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
+-			 "feature is enabled");
+-		return 0;
+-	}
+-	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
++		ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota "
++			 "mount options ignored.");
++		clear_opt(sb, USRQUOTA);
++		clear_opt(sb, GRPQUOTA);
++	} else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
+ 		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
+ 			clear_opt(sb, USRQUOTA);
+ 
+@@ -5272,6 +5272,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
+ 					EXT4_SB(sb)->s_jquota_fmt, type);
+ }
+ 
++static void lockdep_set_quota_inode(struct inode *inode, int subclass)
++{
++	struct ext4_inode_info *ei = EXT4_I(inode);
++
++	/* The first argument of lockdep_set_subclass has to be
++	 * *exactly* the same as the argument to init_rwsem() --- in
++	 * this case, in init_once() --- or lockdep gets unhappy
++	 * because the name of the lock is set using the
++	 * stringification of the argument to init_rwsem().
++	 */
++	(void) ei;	/* shut up clang warning if !CONFIG_LOCKDEP */
++	lockdep_set_subclass(&ei->i_data_sem, subclass);
++}
++
+ /*
+  * Standard function to be called on quota_on
+  */
+@@ -5311,8 +5325,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
+ 		if (err)
+ 			return err;
+ 	}
+-
+-	return dquot_quota_on(sb, type, format_id, path);
++	lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA);
++	err = dquot_quota_on(sb, type, format_id, path);
++	if (err)
++		lockdep_set_quota_inode(path->dentry->d_inode,
++					     I_DATA_SEM_NORMAL);
++	return err;
+ }
+ 
+ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+@@ -5338,8 +5356,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
+ 
+ 	/* Don't account quota for quota files to avoid recursion */
+ 	qf_inode->i_flags |= S_NOQUOTA;
++	lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
+ 	err = dquot_enable(qf_inode, type, format_id, flags);
+ 	iput(qf_inode);
++	if (err)
++		lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
+ 
+ 	return err;
+ }
+diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
+index 371e560d13cf..27fa57322550 100644
+--- a/include/linux/compiler-gcc.h
++++ b/include/linux/compiler-gcc.h
+@@ -115,10 +115,122 @@
+ #define __maybe_unused			__attribute__((unused))
+ #define __always_unused			__attribute__((unused))
+ 
+-#define __gcc_header(x) #x
+-#define _gcc_header(x) __gcc_header(linux/compiler-gcc##x.h)
+-#define gcc_header(x) _gcc_header(x)
+-#include gcc_header(__GNUC__)
++/* gcc version specific checks */
++
++#if GCC_VERSION < 30200
++# error Sorry, your compiler is too old - please upgrade it.
++#endif
++
++#if GCC_VERSION < 30300
++# define __used			__attribute__((__unused__))
++#else
++# define __used			__attribute__((__used__))
++#endif
++
++#ifdef CONFIG_GCOV_KERNEL
++# if GCC_VERSION < 30400
++#   error "GCOV profiling support for gcc versions below 3.4 not included"
++# endif /* __GNUC_MINOR__ */
++#endif /* CONFIG_GCOV_KERNEL */
++
++#if GCC_VERSION >= 30400
++#define __must_check		__attribute__((warn_unused_result))
++#endif
++
++#if GCC_VERSION >= 40000
++
++/* GCC 4.1.[01] miscompiles __weak */
++#ifdef __KERNEL__
++# if GCC_VERSION >= 40100 &&  GCC_VERSION <= 40101
++#  error Your version of gcc miscompiles the __weak directive
++# endif
++#endif
++
++#define __used			__attribute__((__used__))
++#define __compiler_offsetof(a, b)					\
++	__builtin_offsetof(a, b)
++
++#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
++# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
++#endif
++
++#if GCC_VERSION >= 40300
++/* Mark functions as cold. gcc will assume any path leading to a call
++ * to them will be unlikely.  This means a lot of manual unlikely()s
++ * are unnecessary now for any paths leading to the usual suspects
++ * like BUG(), printk(), panic() etc. [but let's keep them for now for
++ * older compilers]
++ *
++ * Early snapshots of gcc 4.3 don't support this and we can't detect this
++ * in the preprocessor, but we can live with this because they're unreleased.
++ * Maketime probing would be overkill here.
++ *
++ * gcc also has a __attribute__((__hot__)) to move hot functions into
++ * a special section, but I don't see any sense in this right now in
++ * the kernel context
++ */
++#define __cold			__attribute__((__cold__))
++
++#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
++
++#ifndef __CHECKER__
++# define __compiletime_warning(message) __attribute__((warning(message)))
++# define __compiletime_error(message) __attribute__((error(message)))
++#endif /* __CHECKER__ */
++#endif /* GCC_VERSION >= 40300 */
++
++#if GCC_VERSION >= 40500
++/*
++ * Mark a position in code as unreachable.  This can be used to
++ * suppress control flow warnings after asm blocks that transfer
++ * control elsewhere.
++ *
++ * Early snapshots of gcc 4.5 don't support this and we can't detect
++ * this in the preprocessor, but we can live with this because they're
++ * unreleased.  Really, we need to have autoconf for the kernel.
++ */
++#define unreachable() __builtin_unreachable()
++
++/* Mark a function definition as prohibited from being cloned. */
++#define __noclone	__attribute__((__noclone__, __optimize__("no-tracer")))
++
++#endif /* GCC_VERSION >= 40500 */
++
++#if GCC_VERSION >= 40600
++/*
++ * Tell the optimizer that something else uses this function or variable.
++ */
++#define __visible	__attribute__((externally_visible))
++#endif
++
++/*
++ * GCC 'asm goto' miscompiles certain code sequences:
++ *
++ *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
++ *
++ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
++ *
++ * (asm goto is automatically volatile - the naming reflects this.)
++ */
++#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
++
++#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
++#if GCC_VERSION >= 40400
++#define __HAVE_BUILTIN_BSWAP32__
++#define __HAVE_BUILTIN_BSWAP64__
++#endif
++#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
++#define __HAVE_BUILTIN_BSWAP16__
++#endif
++#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
++
++#if GCC_VERSION >= 50000
++#define KASAN_ABI_VERSION 4
++#elif GCC_VERSION >= 40902
++#define KASAN_ABI_VERSION 3
++#endif
++
++#endif	/* gcc version >= 40000 specific checks */
+ 
+ #if !defined(__noclone)
+ #define __noclone	/* not needed */
+diff --git a/include/linux/compiler-gcc3.h b/include/linux/compiler-gcc3.h
+deleted file mode 100644
+index 7d89febe4d79..000000000000
+--- a/include/linux/compiler-gcc3.h
++++ /dev/null
+@@ -1,23 +0,0 @@
+-#ifndef __LINUX_COMPILER_H
+-#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
+-#endif
+-
+-#if GCC_VERSION < 30200
+-# error Sorry, your compiler is too old - please upgrade it.
+-#endif
+-
+-#if GCC_VERSION >= 30300
+-# define __used			__attribute__((__used__))
+-#else
+-# define __used			__attribute__((__unused__))
+-#endif
+-
+-#if GCC_VERSION >= 30400
+-#define __must_check		__attribute__((warn_unused_result))
+-#endif
+-
+-#ifdef CONFIG_GCOV_KERNEL
+-# if GCC_VERSION < 30400
+-#   error "GCOV profiling support for gcc versions below 3.4 not included"
+-# endif /* __GNUC_MINOR__ */
+-#endif /* CONFIG_GCOV_KERNEL */
+diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
+deleted file mode 100644
+index 769e19864632..000000000000
+--- a/include/linux/compiler-gcc4.h
++++ /dev/null
+@@ -1,91 +0,0 @@
+-#ifndef __LINUX_COMPILER_H
+-#error "Please don't include <linux/compiler-gcc4.h> directly, include <linux/compiler.h> instead."
+-#endif
+-
+-/* GCC 4.1.[01] miscompiles __weak */
+-#ifdef __KERNEL__
+-# if GCC_VERSION >= 40100 &&  GCC_VERSION <= 40101
+-#  error Your version of gcc miscompiles the __weak directive
+-# endif
+-#endif
+-
+-#define __used			__attribute__((__used__))
+-#define __must_check 		__attribute__((warn_unused_result))
+-#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+-
+-#if GCC_VERSION >= 40100 && GCC_VERSION < 40600
+-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
+-#endif
+-
+-#if GCC_VERSION >= 40300
+-/* Mark functions as cold. gcc will assume any path leading to a call
+-   to them will be unlikely.  This means a lot of manual unlikely()s
+-   are unnecessary now for any paths leading to the usual suspects
+-   like BUG(), printk(), panic() etc. [but let's keep them for now for
+-   older compilers]
+-
+-   Early snapshots of gcc 4.3 don't support this and we can't detect this
+-   in the preprocessor, but we can live with this because they're unreleased.
+-   Maketime probing would be overkill here.
+-
+-   gcc also has a __attribute__((__hot__)) to move hot functions into
+-   a special section, but I don't see any sense in this right now in
+-   the kernel context */
+-#define __cold			__attribute__((__cold__))
+-
+-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+-
+-#ifndef __CHECKER__
+-# define __compiletime_warning(message) __attribute__((warning(message)))
+-# define __compiletime_error(message) __attribute__((error(message)))
+-#endif /* __CHECKER__ */
+-#endif /* GCC_VERSION >= 40300 */
+-
+-#if GCC_VERSION >= 40500
+-/*
+- * Mark a position in code as unreachable.  This can be used to
+- * suppress control flow warnings after asm blocks that transfer
+- * control elsewhere.
+- *
+- * Early snapshots of gcc 4.5 don't support this and we can't detect
+- * this in the preprocessor, but we can live with this because they're
+- * unreleased.  Really, we need to have autoconf for the kernel.
+- */
+-#define unreachable() __builtin_unreachable()
+-
+-/* Mark a function definition as prohibited from being cloned. */
+-#define __noclone	__attribute__((__noclone__))
+-
+-#endif /* GCC_VERSION >= 40500 */
+-
+-#if GCC_VERSION >= 40600
+-/*
+- * Tell the optimizer that something else uses this function or variable.
+- */
+-#define __visible __attribute__((externally_visible))
+-#endif
+-
+-/*
+- * GCC 'asm goto' miscompiles certain code sequences:
+- *
+- *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+- *
+- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+- *
+- * (asm goto is automatically volatile - the naming reflects this.)
+- */
+-#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
+-
+-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+-#if GCC_VERSION >= 40400
+-#define __HAVE_BUILTIN_BSWAP32__
+-#define __HAVE_BUILTIN_BSWAP64__
+-#endif
+-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+-#define __HAVE_BUILTIN_BSWAP16__
+-#endif
+-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+-
+-#if GCC_VERSION >= 40902
+-#define KASAN_ABI_VERSION 3
+-#endif
+diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
+deleted file mode 100644
+index efee493714eb..000000000000
+--- a/include/linux/compiler-gcc5.h
++++ /dev/null
+@@ -1,67 +0,0 @@
+-#ifndef __LINUX_COMPILER_H
+-#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+-#endif
+-
+-#define __used				__attribute__((__used__))
+-#define __must_check			__attribute__((warn_unused_result))
+-#define __compiler_offsetof(a, b)	__builtin_offsetof(a, b)
+-
+-/* Mark functions as cold. gcc will assume any path leading to a call
+-   to them will be unlikely.  This means a lot of manual unlikely()s
+-   are unnecessary now for any paths leading to the usual suspects
+-   like BUG(), printk(), panic() etc. [but let's keep them for now for
+-   older compilers]
+-
+-   Early snapshots of gcc 4.3 don't support this and we can't detect this
+-   in the preprocessor, but we can live with this because they're unreleased.
+-   Maketime probing would be overkill here.
+-
+-   gcc also has a __attribute__((__hot__)) to move hot functions into
+-   a special section, but I don't see any sense in this right now in
+-   the kernel context */
+-#define __cold			__attribute__((__cold__))
+-
+-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+-
+-#ifndef __CHECKER__
+-# define __compiletime_warning(message) __attribute__((warning(message)))
+-# define __compiletime_error(message) __attribute__((error(message)))
+-#endif /* __CHECKER__ */
+-
+-/*
+- * Mark a position in code as unreachable.  This can be used to
+- * suppress control flow warnings after asm blocks that transfer
+- * control elsewhere.
+- *
+- * Early snapshots of gcc 4.5 don't support this and we can't detect
+- * this in the preprocessor, but we can live with this because they're
+- * unreleased.  Really, we need to have autoconf for the kernel.
+- */
+-#define unreachable() __builtin_unreachable()
+-
+-/* Mark a function definition as prohibited from being cloned. */
+-#define __noclone	__attribute__((__noclone__))
+-
+-/*
+- * Tell the optimizer that something else uses this function or variable.
+- */
+-#define __visible __attribute__((externally_visible))
+-
+-/*
+- * GCC 'asm goto' miscompiles certain code sequences:
+- *
+- *   http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+- *
+- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+- *
+- * (asm goto is automatically volatile - the naming reflects this.)
+- */
+-#define asm_volatile_goto(x...)	do { asm goto(x); asm (""); } while (0)
+-
+-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+-#define __HAVE_BUILTIN_BSWAP32__
+-#define __HAVE_BUILTIN_BSWAP64__
+-#define __HAVE_BUILTIN_BSWAP16__
+-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+-
+-#define KASAN_ABI_VERSION 4
+diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
+index 7f5f78bd15ad..245f57dbbb61 100644
+--- a/include/linux/usb_usual.h
++++ b/include/linux/usb_usual.h
+@@ -79,6 +79,8 @@
+ 		/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */	\
+ 	US_FLAG(MAX_SECTORS_240,	0x08000000)		\
+ 		/* Sets max_sectors to 240 */			\
++	US_FLAG(NO_REPORT_LUNS,	0x10000000)			\
++		/* Cannot handle REPORT_LUNS */			\
+ 
+ #define US_FLAG(name, value)	US_FL_##name = value ,
+ enum { US_DO_ALL_FLAGS };
+diff --git a/lib/assoc_array.c b/lib/assoc_array.c
+index 03dd576e6773..59fd7c0b119c 100644
+--- a/lib/assoc_array.c
++++ b/lib/assoc_array.c
+@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
+ 			free_slot = i;
+ 			continue;
+ 		}
+-		if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
++		if (assoc_array_ptr_is_leaf(ptr) &&
++		    ops->compare_object(assoc_array_ptr_to_leaf(ptr),
++					index_key)) {
+ 			pr_devel("replace in slot %d\n", i);
+ 			edit->leaf_p = &node->slots[i];
+ 			edit->dead_leaf = node->slots[i];
+diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
+index abcecdc2d0f2..0710a62ad2f6 100644
+--- a/lib/lz4/lz4defs.h
++++ b/lib/lz4/lz4defs.h
+@@ -11,8 +11,7 @@
+ /*
+  * Detects 64 bits mode
+  */
+-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
+-	|| defined(__ppc64__) || defined(__LP64__))
++#if defined(CONFIG_64BIT)
+ #define LZ4_ARCH64 1
+ #else
+ #define LZ4_ARCH64 0
+@@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
+ 
+ #define PUT4(s, d) (A32(d) = A32(s))
+ #define PUT8(s, d) (A64(d) = A64(s))
++
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
++	(d = s - A16(p))
++
+ #define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
+ 	do {	\
+ 		A16(p) = v; \
+@@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
+ #define PUT8(s, d) \
+ 	put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
+ 
+-#define LZ4_WRITE_LITTLEENDIAN_16(p, v)	\
+-	do {	\
+-		put_unaligned(v, (u16 *)(p)); \
+-		p += 2; \
++#define LZ4_READ_LITTLEENDIAN_16(d, s, p)	\
++	(d = s - get_unaligned_le16(p))
++
++#define LZ4_WRITE_LITTLEENDIAN_16(p, v)			\
++	do {						\
++		put_unaligned_le16(v, (u16 *)(p));	\
++		p += 2;					\
+ 	} while (0)
+ #endif
+ 
+@@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
+ 
+ #endif
+ 
+-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+-	(d = s - get_unaligned_le16(p))
+-
+ #define LZ4_WILDCOPY(s, d, e)		\
+ 	do {				\
+ 		LZ4_COPYPACKET(s, d);	\
+diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
+index 06d3d665a9fd..3ce0b861cc0e 100644
+--- a/net/ipv4/tcp_cubic.c
++++ b/net/ipv4/tcp_cubic.c
+@@ -151,6 +151,27 @@ static void bictcp_init(struct sock *sk)
+ 		tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
+ }
+ 
++static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
++{
++	if (event == CA_EVENT_TX_START) {
++		struct bictcp *ca = inet_csk_ca(sk);
++		u32 now = tcp_time_stamp;
++		s32 delta;
++
++		delta = now - tcp_sk(sk)->lsndtime;
++
++		/* We were application limited (idle) for a while.
++		 * Shift epoch_start to keep cwnd growth to cubic curve.
++		 */
++		if (ca->epoch_start && delta > 0) {
++			ca->epoch_start += delta;
++			if (after(ca->epoch_start, now))
++				ca->epoch_start = now;
++		}
++		return;
++	}
++}
++
+ /* calculate the cubic root of x using a table lookup followed by one
+  * Newton-Raphson iteration.
+  * Avg err ~= 0.195%
+@@ -450,6 +471,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
+ 	.cong_avoid	= bictcp_cong_avoid,
+ 	.set_state	= bictcp_state,
+ 	.undo_cwnd	= bictcp_undo_cwnd,
++	.cwnd_event	= bictcp_cwnd_event,
+ 	.pkts_acked     = bictcp_acked,
+ 	.owner		= THIS_MODULE,
+ 	.name		= "cubic",
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 2880f2ae99ab..a7027190f298 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -252,11 +252,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ }
+ 
+ /* Caller must hold local->sta_mtx */
+-static void sta_info_hash_add(struct ieee80211_local *local,
+-			      struct sta_info *sta)
++static int sta_info_hash_add(struct ieee80211_local *local,
++			     struct sta_info *sta)
+ {
+-	rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
+-			       sta_rht_params);
++	return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node,
++				      sta_rht_params);
+ }
+ 
+ static void sta_deliver_ps_frames(struct work_struct *wk)
+@@ -491,7 +491,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+ 	set_sta_flag(sta, WLAN_STA_BLOCK_BA);
+ 
+ 	/* make the station visible */
+-	sta_info_hash_add(local, sta);
++	err = sta_info_hash_add(local, sta);
++	if (err)
++		goto out_drop_sta;
+ 
+ 	list_add_tail_rcu(&sta->list, &local->sta_list);
+ 
+@@ -526,6 +528,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
+  out_remove:
+ 	sta_info_hash_del(local, sta);
+ 	list_del_rcu(&sta->list);
++ out_drop_sta:
+ 	local->num_sta--;
+ 	synchronize_net();
+ 	__cleanup_single_sta(sta);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 90cf6168267e..cc1a7a4a7cbd 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4638,6 +4638,8 @@ enum {
+ 	ALC255_FIXUP_DELL_SPK_NOISE,
+ 	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC280_FIXUP_HP_HEADSET_MIC,
++	ALC221_FIXUP_HP_FRONT_MIC,
++	ALC292_FIXUP_TPT460,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5270,6 +5272,19 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_HEADSET_MIC,
+ 	},
++	[ALC221_FIXUP_HP_FRONT_MIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x02a19020 }, /* Front Mic */
++			{ }
++		},
++	},
++	[ALC292_FIXUP_TPT460] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_tpt440_dock,
++		.chained = true,
++		.chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5374,6 +5389,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
++	SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -5421,7 +5437,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+-	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
++	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460),
+ 	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+@@ -5514,6 +5530,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
+ 	{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
+ 	{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
++	{.id = ALC292_FIXUP_TPT460, .name = "tpt460"},
+ 	{}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index ddca6547399b..1f8fb0d904e0 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -349,6 +349,16 @@ static struct usbmix_name_map bose_companion5_map[] = {
+ };
+ 
+ /*
++ * Dell usb dock with ALC4020 codec had a firmware problem where it got
++ * screwed up when zero volume is passed; just skip it as a workaround
++ */
++static const struct usbmix_name_map dell_alc4020_map[] = {
++	{ 16, NULL },
++	{ 19, NULL },
++	{ 0 }
++};
++
++/*
+  * Control map entries
+  */
+ 
+@@ -431,6 +441,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.map = aureon_51_2_map,
+ 	},
+ 	{
++		.id = USB_ID(0x0bda, 0x4014),
++		.map = dell_alc4020_map,
++	},
++	{
+ 		.id = USB_ID(0x0dba, 0x1000),
+ 		.map = mbox1_map,
+ 	},
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 5fb308d39e2a..5ad43cba860c 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1128,9 +1128,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
+ 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
++	case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
+ 	case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++	case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
+ 	case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
+ 		return true;
+ 	}


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-04-22 18:06 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-04-22 18:06 UTC (permalink / raw
  To: gentoo-commits

commit:     53f63e4e0ebb503d06ae47030b9d66e1208de0e2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Apr 22 18:05:58 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Apr 22 18:05:58 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=53f63e4e

Update for Gentoo specific distro patch. Select DEVPTS_MULTIPLE_INSTANCES when GENTOO_LINUX_INIT_SYSTEMD selected. Bug #579424.

 4567_distro-Gentoo-Kconfig.patch | 39 +++++++++++++++++++++++++++++++--------
 1 file changed, 31 insertions(+), 8 deletions(-)

diff --git a/4567_distro-Gentoo-Kconfig.patch b/4567_distro-Gentoo-Kconfig.patch
index 652e2a7..c7af596 100644
--- a/4567_distro-Gentoo-Kconfig.patch
+++ b/4567_distro-Gentoo-Kconfig.patch
@@ -1,5 +1,5 @@
---- a/Kconfig	2014-04-02 09:45:05.389224541 -0400
-+++ b/Kconfig	2014-04-02 09:45:39.269224273 -0400
+--- a/Kconfig
++++ b/Kconfig
 @@ -8,4 +8,6 @@ config SRCARCH
  	string
  	option env="SRCARCH"
@@ -7,9 +7,9 @@
 +source "distro/Kconfig"
 +
  source "arch/$SRCARCH/Kconfig"
---- 	1969-12-31 19:00:00.000000000 -0500
-+++ b/distro/Kconfig	2014-04-02 09:57:03.539218861 -0400
-@@ -0,0 +1,108 @@
+--- /dev/null
++++ b/distro/Kconfig
+@@ -0,0 +1,131 @@
 +menu "Gentoo Linux"
 +
 +config GENTOO_LINUX
@@ -30,7 +30,7 @@
 +
 +	depends on GENTOO_LINUX
 +	default y if GENTOO_LINUX
-+	
++
 +	select DEVTMPFS
 +	select TMPFS
 +
@@ -51,7 +51,29 @@
 +		boot process; if not available, it causes sysfs and udev to malfunction.
 +
 +		To ensure Gentoo Linux boots, it is best to leave this setting enabled;
-+		if you run a custom setup, you could consider whether to disable this. 
++		if you run a custom setup, you could consider whether to disable this.
++
++config GENTOO_LINUX_PORTAGE
++	bool "Select options required by Portage features"
++
++	depends on GENTOO_LINUX
++	default y if GENTOO_LINUX
++
++	select CGROUPS
++	select NAMESPACES
++	select IPC_NS
++	select NET_NS
++
++	help
++		This enables options required by various Portage FEATURES.
++		Currently this selects:
++
++		CGROUPS     (required for FEATURES=cgroup)
++		IPC_NS      (required for FEATURES=ipc-sandbox)
++		NET_NS      (required for FEATURES=network-sandbox)
++
++		It is highly recommended that you leave this enabled as these FEATURES
++		are, or will soon be, enabled by default.
 +
 +menu "Support for init systems, system and service managers"
 +	visible if GENTOO_LINUX
@@ -87,12 +109,13 @@
 +	select AUTOFS4_FS
 +	select BLK_DEV_BSG
 +	select CGROUPS
++	select DEVPTS_MULTIPLE_INSTANCES
 +	select EPOLL
 +	select FANOTIFY
 +	select FHANDLE
 +	select INOTIFY_USER
 +	select NET
-+	select NET_NS 
++	select NET_NS
 +	select PROC_FS
 +	select SIGNALFD
 +	select SYSFS


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-04-06 11:23 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-04-06 11:23 UTC (permalink / raw
  To: gentoo-commits

commit:     1ebec2302e4819c5bb8ef97bafc6f513f794a23c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Apr  6 11:23:42 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Apr  6 11:23:42 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1ebec230

Linux patch 4.1.21

 0000_README             |    4 +
 1020_linux-4.1.21.patch | 2005 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2009 insertions(+)

diff --git a/0000_README b/0000_README
index 3e19785..cb424ff 100644
--- a/0000_README
+++ b/0000_README
@@ -123,6 +123,10 @@ Patch:  1019_linux-4.1.20.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.20
 
+Patch:  1020_linux-4.1.21.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.21
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1020_linux-4.1.21.patch b/1020_linux-4.1.21.patch
new file mode 100644
index 0000000..54f0496
--- /dev/null
+++ b/1020_linux-4.1.21.patch
@@ -0,0 +1,2005 @@
+diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
+index 4f6a82cef1d1..cbe35b3de9e9 100644
+--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
++++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
+@@ -23,6 +23,7 @@ Optional properties:
+   during suspend.
+ - ti,no-reset-on-init: When present, the module should not be reset at init
+ - ti,no-idle-on-init: When present, the module should not be idled at init
++- ti,no-idle: When present, the module is never allowed to idle.
+ 
+ Example:
+ 
+diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
+index c59bd9bc41ef..4176ab076f1c 100644
+--- a/Documentation/virtual/kvm/mmu.txt
++++ b/Documentation/virtual/kvm/mmu.txt
+@@ -352,7 +352,8 @@ In the first case there are two additional complications:
+ - if CR4.SMEP is enabled: since we've turned the page into a kernel page,
+   the kernel may now execute it.  We handle this by also setting spte.nx.
+   If we get a user fetch or read fault, we'll change spte.u=1 and
+-  spte.nx=gpte.nx back.
++  spte.nx=gpte.nx back.  For this to work, KVM forces EFER.NX to 1 when
++  shadow paging is in use.
+ - if CR4.SMAP is disabled: since the page has been changed to a kernel
+   page, it can not be reused when CR4.SMAP is enabled. We set
+   CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
+diff --git a/Makefile b/Makefile
+index 39be1bbd373a..79fab0d55218 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 20
++SUBLEVEL = 21
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index dfcc0dd637e5..bc04b754fe36 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -1411,6 +1411,16 @@
+ 			       0x48485200 0x2E00>;
+ 			#address-cells = <1>;
+ 			#size-cells = <1>;
++
++			/*
++			 * Do not allow gating of cpsw clock as workaround
++			 * for errata i877. Keeping internal clock disabled
++			 * causes the device switching characteristics
++			 * to degrade over time and eventually fail to meet
++			 * the data manual delay time/skew specs.
++			 */
++			ti,no-idle;
++
+ 			/*
+ 			 * rx_thresh_pend
+ 			 * rx_pend
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 5286e7773ed4..9185bb958503 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -876,6 +876,36 @@ static int _init_opt_clks(struct omap_hwmod *oh)
+ 	return ret;
+ }
+ 
++static void _enable_optional_clocks(struct omap_hwmod *oh)
++{
++	struct omap_hwmod_opt_clk *oc;
++	int i;
++
++	pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
++
++	for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
++		if (oc->_clk) {
++			pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
++				 __clk_get_name(oc->_clk));
++			clk_enable(oc->_clk);
++		}
++}
++
++static void _disable_optional_clocks(struct omap_hwmod *oh)
++{
++	struct omap_hwmod_opt_clk *oc;
++	int i;
++
++	pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
++
++	for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
++		if (oc->_clk) {
++			pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
++				 __clk_get_name(oc->_clk));
++			clk_disable(oc->_clk);
++		}
++}
++
+ /**
+  * _enable_clocks - enable hwmod main clock and interface clocks
+  * @oh: struct omap_hwmod *
+@@ -903,6 +933,9 @@ static int _enable_clocks(struct omap_hwmod *oh)
+ 			clk_enable(os->_clk);
+ 	}
+ 
++	if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
++		_enable_optional_clocks(oh);
++
+ 	/* The opt clocks are controlled by the device driver. */
+ 
+ 	return 0;
+@@ -934,41 +967,14 @@ static int _disable_clocks(struct omap_hwmod *oh)
+ 			clk_disable(os->_clk);
+ 	}
+ 
++	if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
++		_disable_optional_clocks(oh);
++
+ 	/* The opt clocks are controlled by the device driver. */
+ 
+ 	return 0;
+ }
+ 
+-static void _enable_optional_clocks(struct omap_hwmod *oh)
+-{
+-	struct omap_hwmod_opt_clk *oc;
+-	int i;
+-
+-	pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
+-
+-	for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+-		if (oc->_clk) {
+-			pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
+-				 __clk_get_name(oc->_clk));
+-			clk_enable(oc->_clk);
+-		}
+-}
+-
+-static void _disable_optional_clocks(struct omap_hwmod *oh)
+-{
+-	struct omap_hwmod_opt_clk *oc;
+-	int i;
+-
+-	pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
+-
+-	for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
+-		if (oc->_clk) {
+-			pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
+-				 __clk_get_name(oc->_clk));
+-			clk_disable(oc->_clk);
+-		}
+-}
+-
+ /**
+  * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4
+  * @oh: struct omap_hwmod *
+@@ -2180,6 +2186,11 @@ static int _enable(struct omap_hwmod *oh)
+  */
+ static int _idle(struct omap_hwmod *oh)
+ {
++	if (oh->flags & HWMOD_NO_IDLE) {
++		oh->_int_flags |= _HWMOD_SKIP_ENABLE;
++		return 0;
++	}
++
+ 	pr_debug("omap_hwmod: %s: idling\n", oh->name);
+ 
+ 	if (oh->_state != _HWMOD_STATE_ENABLED) {
+@@ -2484,6 +2495,8 @@ static int __init _init(struct omap_hwmod *oh, void *data)
+ 			oh->flags |= HWMOD_INIT_NO_RESET;
+ 		if (of_find_property(np, "ti,no-idle-on-init", NULL))
+ 			oh->flags |= HWMOD_INIT_NO_IDLE;
++		if (of_find_property(np, "ti,no-idle", NULL))
++			oh->flags |= HWMOD_NO_IDLE;
+ 	}
+ 
+ 	oh->_state = _HWMOD_STATE_INITIALIZED;
+@@ -2610,7 +2623,7 @@ static void __init _setup_postsetup(struct omap_hwmod *oh)
+ 	 * XXX HWMOD_INIT_NO_IDLE does not belong in hwmod data -
+ 	 * it should be set by the core code as a runtime flag during startup
+ 	 */
+-	if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
++	if ((oh->flags & (HWMOD_INIT_NO_IDLE | HWMOD_NO_IDLE)) &&
+ 	    (postsetup_state == _HWMOD_STATE_IDLE)) {
+ 		oh->_int_flags |= _HWMOD_SKIP_ENABLE;
+ 		postsetup_state = _HWMOD_STATE_ENABLED;
+diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
+index 9611c91d9b82..ec289c5f099a 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.h
++++ b/arch/arm/mach-omap2/omap_hwmod.h
+@@ -517,6 +517,10 @@ struct omap_hwmod_omap4_prcm {
+  * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up 
+  *     events by calling _reconfigure_io_chain() when a device is enabled
+  *     or idled.
++ * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
++ *     operate and they need to be handled at the same time as the main_clk.
++ * HWMOD_NO_IDLE: Do not idle the hwmod at all. Useful to handle certain
++ *     IPs like CPSW on DRA7, where clocks to this module cannot be disabled.
+  */
+ #define HWMOD_SWSUP_SIDLE			(1 << 0)
+ #define HWMOD_SWSUP_MSTANDBY			(1 << 1)
+@@ -532,6 +536,8 @@ struct omap_hwmod_omap4_prcm {
+ #define HWMOD_FORCE_MSTANDBY			(1 << 11)
+ #define HWMOD_SWSUP_SIDLE_ACT			(1 << 12)
+ #define HWMOD_RECONFIG_IO_CHAIN			(1 << 13)
++#define HWMOD_OPT_CLKS_NEEDED			(1 << 14)
++#define HWMOD_NO_IDLE				(1 << 15)
+ 
+ /*
+  * omap_hwmod._int_flags definitions
+diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
+index b056369fd47d..0d1d675f2cce 100644
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -184,20 +184,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
+ 
+ /* EL1 Single Step Handler hooks */
+ static LIST_HEAD(step_hook);
+-static DEFINE_RWLOCK(step_hook_lock);
++static DEFINE_SPINLOCK(step_hook_lock);
+ 
+ void register_step_hook(struct step_hook *hook)
+ {
+-	write_lock(&step_hook_lock);
+-	list_add(&hook->node, &step_hook);
+-	write_unlock(&step_hook_lock);
++	spin_lock(&step_hook_lock);
++	list_add_rcu(&hook->node, &step_hook);
++	spin_unlock(&step_hook_lock);
+ }
+ 
+ void unregister_step_hook(struct step_hook *hook)
+ {
+-	write_lock(&step_hook_lock);
+-	list_del(&hook->node);
+-	write_unlock(&step_hook_lock);
++	spin_lock(&step_hook_lock);
++	list_del_rcu(&hook->node);
++	spin_unlock(&step_hook_lock);
++	synchronize_rcu();
+ }
+ 
+ /*
+@@ -211,15 +212,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
+ 	struct step_hook *hook;
+ 	int retval = DBG_HOOK_ERROR;
+ 
+-	read_lock(&step_hook_lock);
++	rcu_read_lock();
+ 
+-	list_for_each_entry(hook, &step_hook, node)	{
++	list_for_each_entry_rcu(hook, &step_hook, node)	{
+ 		retval = hook->fn(regs, esr);
+ 		if (retval == DBG_HOOK_HANDLED)
+ 			break;
+ 	}
+ 
+-	read_unlock(&step_hook_lock);
++	rcu_read_unlock();
+ 
+ 	return retval;
+ }
+@@ -271,20 +272,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
+  * Use reader/writer locks instead of plain spinlock.
+  */
+ static LIST_HEAD(break_hook);
+-static DEFINE_RWLOCK(break_hook_lock);
++static DEFINE_SPINLOCK(break_hook_lock);
+ 
+ void register_break_hook(struct break_hook *hook)
+ {
+-	write_lock(&break_hook_lock);
+-	list_add(&hook->node, &break_hook);
+-	write_unlock(&break_hook_lock);
++	spin_lock(&break_hook_lock);
++	list_add_rcu(&hook->node, &break_hook);
++	spin_unlock(&break_hook_lock);
+ }
+ 
+ void unregister_break_hook(struct break_hook *hook)
+ {
+-	write_lock(&break_hook_lock);
+-	list_del(&hook->node);
+-	write_unlock(&break_hook_lock);
++	spin_lock(&break_hook_lock);
++	list_del_rcu(&hook->node);
++	spin_unlock(&break_hook_lock);
++	synchronize_rcu();
+ }
+ 
+ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
+@@ -292,11 +294,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
+ 	struct break_hook *hook;
+ 	int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
+ 
+-	read_lock(&break_hook_lock);
+-	list_for_each_entry(hook, &break_hook, node)
++	rcu_read_lock();
++	list_for_each_entry_rcu(hook, &break_hook, node)
+ 		if ((esr & hook->esr_mask) == hook->esr_val)
+ 			fn = hook->fn;
+-	read_unlock(&break_hook_lock);
++	rcu_read_unlock();
+ 
+ 	return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
+ }
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index a3b1ffe50aa0..c99e8a32bea4 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2103,11 +2103,11 @@ config CPU_R4K_CACHE_TLB
+ 
+ config MIPS_MT_SMP
+ 	bool "MIPS MT SMP support (1 TC on each available VPE)"
+-	depends on SYS_SUPPORTS_MULTITHREADING
++	depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6
+ 	select CPU_MIPSR2_IRQ_VI
+ 	select CPU_MIPSR2_IRQ_EI
+ 	select SYNC_R4K
+-	select MIPS_GIC_IPI
++	select MIPS_GIC_IPI if MIPS_GIC
+ 	select MIPS_MT
+ 	select SMP
+ 	select SMP_UP
+@@ -2204,8 +2204,8 @@ config MIPS_VPE_APSP_API_MT
+ 
+ config MIPS_CMP
+ 	bool "MIPS CMP framework support (DEPRECATED)"
+-	depends on SYS_SUPPORTS_MIPS_CMP
+-	select MIPS_GIC_IPI
++	depends on SYS_SUPPORTS_MIPS_CMP && !CPU_MIPSR6
++	select MIPS_GIC_IPI if MIPS_GIC
+ 	select SMP
+ 	select SYNC_R4K
+ 	select SYS_SUPPORTS_SMP
+@@ -2221,11 +2221,11 @@ config MIPS_CMP
+ 
+ config MIPS_CPS
+ 	bool "MIPS Coherent Processing System support"
+-	depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
++	depends on SYS_SUPPORTS_MIPS_CPS && !CPU_MIPSR6
+ 	select MIPS_CM
+ 	select MIPS_CPC
+ 	select MIPS_CPS_PM if HOTPLUG_CPU
+-	select MIPS_GIC_IPI
++	select MIPS_GIC_IPI if MIPS_GIC
+ 	select SMP
+ 	select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
+ 	select SYS_SUPPORTS_HOTPLUG_CPU
+@@ -2244,6 +2244,7 @@ config MIPS_CPS_PM
+ 	bool
+ 
+ config MIPS_GIC_IPI
++	depends on MIPS_GIC
+ 	bool
+ 
+ config MIPS_CM
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index f8338e6d3dd7..a34e43eec658 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1273,6 +1273,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
+ 	std	r6, VCPU_ACOP(r9)
+ 	stw	r7, VCPU_GUEST_PID(r9)
+ 	std	r8, VCPU_WORT(r9)
++	/*
++	 * Restore various registers to 0, where non-zero values
++	 * set by the guest could disrupt the host.
++	 */
++	li	r0, 0
++	mtspr	SPRN_IAMR, r0
++	mtspr	SPRN_CIABR, r0
++	mtspr	SPRN_DAWRX, r0
++	mtspr	SPRN_TCSCR, r0
++	mtspr	SPRN_WORT, r0
++	/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
++	li	r0, 1
++	sldi	r0, r0, 31
++	mtspr	SPRN_MMCRS, r0
+ 8:
+ 
+ 	/* Save and reset AMR and UAMOR before turning on the MMU */
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index fb1b93ea3e3f..e485817f7b1a 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -15,17 +15,25 @@
+ static inline int init_new_context(struct task_struct *tsk,
+ 				   struct mm_struct *mm)
+ {
++	spin_lock_init(&mm->context.list_lock);
++	INIT_LIST_HEAD(&mm->context.pgtable_list);
++	INIT_LIST_HEAD(&mm->context.gmap_list);
+ 	cpumask_clear(&mm->context.cpu_attach_mask);
+ 	atomic_set(&mm->context.attach_count, 0);
+ 	mm->context.flush_mm = 0;
+-	mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
+-	mm->context.asce_bits |= _ASCE_TYPE_REGION3;
+ #ifdef CONFIG_PGSTE
+ 	mm->context.alloc_pgste = page_table_allocate_pgste;
+ 	mm->context.has_pgste = 0;
+ 	mm->context.use_skey = 0;
+ #endif
+-	mm->context.asce_limit = STACK_TOP_MAX;
++	if (mm->context.asce_limit == 0) {
++		/* context created by exec, set asce limit to 4TB */
++		mm->context.asce_bits = _ASCE_TABLE_LENGTH |
++			_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
++		mm->context.asce_limit = STACK_TOP_MAX;
++	} else if (mm->context.asce_limit == (1UL << 31)) {
++		mm_inc_nr_pmds(mm);
++	}
+ 	crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
+ 	return 0;
+ }
+@@ -111,8 +119,6 @@ static inline void activate_mm(struct mm_struct *prev,
+ static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ 				 struct mm_struct *mm)
+ {
+-	if (oldmm->context.asce_limit < mm->context.asce_limit)
+-		crst_table_downgrade(mm, oldmm->context.asce_limit);
+ }
+ 
+ static inline void arch_exit_mmap(struct mm_struct *mm)
+diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
+index 7b7858f158b4..d7cc79fb6191 100644
+--- a/arch/s390/include/asm/pgalloc.h
++++ b/arch/s390/include/asm/pgalloc.h
+@@ -100,12 +100,26 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+ 
+ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+ {
+-	spin_lock_init(&mm->context.list_lock);
+-	INIT_LIST_HEAD(&mm->context.pgtable_list);
+-	INIT_LIST_HEAD(&mm->context.gmap_list);
+-	return (pgd_t *) crst_table_alloc(mm);
++	unsigned long *table = crst_table_alloc(mm);
++
++	if (!table)
++		return NULL;
++	if (mm->context.asce_limit == (1UL << 31)) {
++		/* Forking a compat process with 2 page table levels */
++		if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
++			crst_table_free(mm, table);
++			return NULL;
++		}
++	}
++	return (pgd_t *) table;
++}
++
++static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
++{
++	if (mm->context.asce_limit == (1UL << 31))
++		pgtable_pmd_page_dtor(virt_to_page(pgd));
++	crst_table_free(mm, (unsigned long *) pgd);
+ }
+-#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
+ 
+ static inline void pmd_populate(struct mm_struct *mm,
+ 				pmd_t *pmd, pgtable_t pte)
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 945f9e13f1aa..917148620f49 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1674,6 +1674,13 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ 			return;
+ 		}
+ 		break;
++	case MSR_IA32_PEBS_ENABLE:
++		/* PEBS needs a quiescent period after being disabled (to write
++		 * a record).  Disabling PEBS through VMX MSR swapping doesn't
++		 * provide that period, so a CPU could write host's record into
++		 * guest's memory.
++		 */
++		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ 	}
+ 
+ 	for (i = 0; i < m->nr; ++i)
+@@ -1711,26 +1718,31 @@ static void reload_tss(void)
+ 
+ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
+ {
+-	u64 guest_efer;
+-	u64 ignore_bits;
++	u64 guest_efer = vmx->vcpu.arch.efer;
++	u64 ignore_bits = 0;
+ 
+-	guest_efer = vmx->vcpu.arch.efer;
++	if (!enable_ept) {
++		/*
++		 * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
++		 * host CPUID is more efficient than testing guest CPUID
++		 * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
++		 */
++		if (boot_cpu_has(X86_FEATURE_SMEP))
++			guest_efer |= EFER_NX;
++		else if (!(guest_efer & EFER_NX))
++			ignore_bits |= EFER_NX;
++	}
+ 
+ 	/*
+-	 * NX is emulated; LMA and LME handled by hardware; SCE meaningless
+-	 * outside long mode
++	 * LMA and LME handled by hardware; SCE meaningless outside long mode.
+ 	 */
+-	ignore_bits = EFER_NX | EFER_SCE;
++	ignore_bits |= EFER_SCE;
+ #ifdef CONFIG_X86_64
+ 	ignore_bits |= EFER_LMA | EFER_LME;
+ 	/* SCE is meaningful only in long mode on Intel */
+ 	if (guest_efer & EFER_LMA)
+ 		ignore_bits &= ~(u64)EFER_SCE;
+ #endif
+-	guest_efer &= ~ignore_bits;
+-	guest_efer |= host_efer & ignore_bits;
+-	vmx->guest_msrs[efer_offset].data = guest_efer;
+-	vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+ 
+ 	clear_atomic_switch_msr(vmx, MSR_EFER);
+ 
+@@ -1741,16 +1753,21 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
+ 	 */
+ 	if (cpu_has_load_ia32_efer ||
+ 	    (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
+-		guest_efer = vmx->vcpu.arch.efer;
+ 		if (!(guest_efer & EFER_LMA))
+ 			guest_efer &= ~EFER_LME;
+ 		if (guest_efer != host_efer)
+ 			add_atomic_switch_msr(vmx, MSR_EFER,
+ 					      guest_efer, host_efer);
+ 		return false;
+-	}
++	} else {
++		guest_efer &= ~ignore_bits;
++		guest_efer |= host_efer & ignore_bits;
+ 
+-	return true;
++		vmx->guest_msrs[efer_offset].data = guest_efer;
++		vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
++
++		return true;
++	}
+ }
+ 
+ static unsigned long segment_base(u16 selector)
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index 8d8c35623f2a..ffa809f30b19 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -176,6 +176,7 @@
+ #define AT_XDMAC_MAX_CHAN	0x20
+ #define AT_XDMAC_MAX_CSIZE	16	/* 16 data */
+ #define AT_XDMAC_MAX_DWIDTH	8	/* 64 bits */
++#define AT_XDMAC_RESIDUE_MAX_RETRIES	5
+ 
+ #define AT_XDMAC_DMA_BUSWIDTHS\
+ 	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
+@@ -925,8 +926,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 	struct at_xdmac_desc	*desc, *_desc;
+ 	struct list_head	*descs_list;
+ 	enum dma_status		ret;
+-	int			residue;
+-	u32			cur_nda, mask, value;
++	int			residue, retry;
++	u32			cur_nda, check_nda, cur_ubc, mask, value;
+ 	u8			dwidth = 0;
+ 	unsigned long		flags;
+ 
+@@ -963,7 +964,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 			cpu_relax();
+ 	}
+ 
++	/*
++	 * When processing the residue, we need to read two registers but we
++	 * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
++	 * we stand in the descriptor list and AT_XDMAC_CUBC is used
++	 * to know how many data are remaining for the current descriptor.
++	 * Since the dma channel is not paused to not loose data, between the
++	 * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
++	 * descriptor.
++	 * For that reason, after reading AT_XDMAC_CUBC, we check if we are
++	 * still using the same descriptor by reading a second time
++	 * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
++	 * read again AT_XDMAC_CUBC.
++	 * Memory barriers are used to ensure the read order of the registers.
++	 * A max number of retries is set because unlikely it can never ends if
++	 * we are transferring a lot of data with small buffers.
++	 */
+ 	cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
++	rmb();
++	cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
++	for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
++		rmb();
++		check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
++
++		if (likely(cur_nda == check_nda))
++			break;
++
++		cur_nda = check_nda;
++		rmb();
++		cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
++	}
++
++	if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
++		ret = DMA_ERROR;
++		goto spin_unlock;
++	}
++
+ 	/*
+ 	 * Remove size of all microblocks already transferred and the current
+ 	 * one. Then add the remaining size to transfer of the current
+@@ -976,7 +1012,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ 		if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
+ 			break;
+ 	}
+-	residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
++	residue += cur_ubc << dwidth;
+ 
+ 	dma_set_residue(txstate, residue);
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 447dbfa6c793..7ac42d063574 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -254,7 +254,7 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
+ #define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_LEVEL_3
+ #define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPH_LEVEL_3
+ 
+-static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
++static void dp_get_adjust_train(const u8 link_status[DP_LINK_STATUS_SIZE],
+ 				int lane_count,
+ 				u8 train_set[4])
+ {
+@@ -301,77 +301,43 @@ static int convert_bpc_to_bpp(int bpc)
+ 		return bpc * 3;
+ }
+ 
+-/* get the max pix clock supported by the link rate and lane num */
+-static int dp_get_max_dp_pix_clock(int link_rate,
+-				   int lane_num,
+-				   int bpp)
+-{
+-	return (link_rate * lane_num * 8) / bpp;
+-}
+-
+ /***** radeon specific DP functions *****/
+ 
+-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+-				u8 dpcd[DP_DPCD_SIZE])
+-{
+-	int max_link_rate;
+-
+-	if (radeon_connector_is_dp12_capable(connector))
+-		max_link_rate = min(drm_dp_max_link_rate(dpcd), 540000);
+-	else
+-		max_link_rate = min(drm_dp_max_link_rate(dpcd), 270000);
+-
+-	return max_link_rate;
+-}
+-
+-/* First get the min lane# when low rate is used according to pixel clock
+- * (prefer low rate), second check max lane# supported by DP panel,
+- * if the max lane# < low rate lane# then use max lane# instead.
+- */
+-static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
+-					u8 dpcd[DP_DPCD_SIZE],
+-					int pix_clock)
++int radeon_dp_get_dp_link_config(struct drm_connector *connector,
++				 const u8 dpcd[DP_DPCD_SIZE],
++				 unsigned pix_clock,
++				 unsigned *dp_lanes, unsigned *dp_rate)
+ {
+ 	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+-	int max_link_rate = radeon_dp_get_max_link_rate(connector, dpcd);
+-	int max_lane_num = drm_dp_max_lane_count(dpcd);
+-	int lane_num;
+-	int max_dp_pix_clock;
+-
+-	for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
+-		max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
+-		if (pix_clock <= max_dp_pix_clock)
+-			break;
+-	}
+-
+-	return lane_num;
+-}
+-
+-static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
+-				       u8 dpcd[DP_DPCD_SIZE],
+-				       int pix_clock)
+-{
+-	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
+-	int lane_num, max_pix_clock;
++	static const unsigned link_rates[3] = { 162000, 270000, 540000 };
++	unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
++	unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
++	unsigned lane_num, i, max_pix_clock;
+ 
+ 	if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+-	    ENCODER_OBJECT_ID_NUTMEG)
+-		return 270000;
+-
+-	lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
+-	max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
+-	if (pix_clock <= max_pix_clock)
+-		return 162000;
+-	max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
+-	if (pix_clock <= max_pix_clock)
+-		return 270000;
+-	if (radeon_connector_is_dp12_capable(connector)) {
+-		max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
+-		if (pix_clock <= max_pix_clock)
+-			return 540000;
++	    ENCODER_OBJECT_ID_NUTMEG) {
++		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
++			max_pix_clock = (lane_num * 270000 * 8) / bpp;
++			if (max_pix_clock >= pix_clock) {
++				*dp_lanes = lane_num;
++				*dp_rate = 270000;
++				return 0;
++			}
++		}
++	} else {
++		for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
++			for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
++				max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
++				if (max_pix_clock >= pix_clock) {
++					*dp_lanes = lane_num;
++					*dp_rate = link_rates[i];
++					return 0;
++				}
++			}
++		}
+ 	}
+ 
+-	return radeon_dp_get_max_link_rate(connector, dpcd);
++	return -EINVAL;
+ }
+ 
+ static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
+@@ -490,6 +456,7 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
+ {
+ 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ 	struct radeon_connector_atom_dig *dig_connector;
++	int ret;
+ 
+ 	if (!radeon_connector->con_priv)
+ 		return;
+@@ -497,10 +464,14 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
+ 
+ 	if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ 	    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
+-		dig_connector->dp_clock =
+-			radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
+-		dig_connector->dp_lane_count =
+-			radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
++		ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
++						   mode->clock,
++						   &dig_connector->dp_lane_count,
++						   &dig_connector->dp_clock);
++		if (ret) {
++			dig_connector->dp_clock = 0;
++			dig_connector->dp_lane_count = 0;
++		}
+ 	}
+ }
+ 
+@@ -509,7 +480,8 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+ {
+ 	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ 	struct radeon_connector_atom_dig *dig_connector;
+-	int dp_clock;
++	unsigned dp_clock, dp_lanes;
++	int ret;
+ 
+ 	if ((mode->clock > 340000) &&
+ 	    (!radeon_connector_is_dp12_capable(connector)))
+@@ -519,8 +491,12 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
+ 		return MODE_CLOCK_HIGH;
+ 	dig_connector = radeon_connector->con_priv;
+ 
+-	dp_clock =
+-		radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
++	ret = radeon_dp_get_dp_link_config(connector, dig_connector->dpcd,
++					   mode->clock,
++					   &dp_lanes,
++					   &dp_clock);
++	if (ret)
++		return MODE_CLOCK_HIGH;
+ 
+ 	if ((dp_clock == 540000) &&
+ 	    (!radeon_connector_is_dp12_capable(connector)))
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+index c9ff4cf4c4e7..c4b4c0233937 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+@@ -520,11 +520,17 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
+ 	drm_mode_set_crtcinfo(adjusted_mode, 0);
+ 	{
+ 	  struct radeon_connector_atom_dig *dig_connector;
++	  int ret;
+ 
+ 	  dig_connector = mst_enc->connector->con_priv;
+-	  dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
+-	  dig_connector->dp_clock = radeon_dp_get_max_link_rate(&mst_enc->connector->base,
+-								dig_connector->dpcd);
++	  ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
++					     dig_connector->dpcd, adjusted_mode->clock,
++					     &dig_connector->dp_lane_count,
++					     &dig_connector->dp_clock);
++	  if (ret) {
++		  dig_connector->dp_lane_count = 0;
++		  dig_connector->dp_clock = 0;
++	  }
+ 	  DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
+ 			dig_connector->dp_lane_count, dig_connector->dp_clock);
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
+index 9af2d8398e90..43ba333949c7 100644
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -752,8 +752,10 @@ extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+ extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+ extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+ 				    struct drm_connector *connector);
+-int radeon_dp_get_max_link_rate(struct drm_connector *connector,
+-				u8 *dpcd);
++extern int radeon_dp_get_dp_link_config(struct drm_connector *connector,
++					const u8 *dpcd,
++					unsigned pix_clock,
++					unsigned *dp_lanes, unsigned *dp_rate);
+ extern void radeon_dp_set_rx_power_state(struct drm_connector *connector,
+ 					 u8 power_state);
+ extern void radeon_dp_aux_init(struct radeon_connector *radeon_connector);
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index a56eab7f0ab1..8319eed613b0 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1079,6 +1079,8 @@ force:
+ 
+ 	/* update display watermarks based on new power state */
+ 	radeon_bandwidth_update(rdev);
++	/* update displays */
++	radeon_dpm_display_configuration_changed(rdev);
+ 
+ 	/* wait for the rings to drain */
+ 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+@@ -1095,9 +1097,6 @@ force:
+ 
+ 	radeon_dpm_post_set_power_state(rdev);
+ 
+-	/* update displays */
+-	radeon_dpm_display_configuration_changed(rdev);
+-
+ 	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+ 	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+ 	rdev->pm.dpm.single_display = single_display;
+diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
+index 8b4d3e6875eb..21924f52863f 100644
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
+ static void gs_destroy_candev(struct gs_can *dev)
+ {
+ 	unregister_candev(dev->netdev);
+-	free_candev(dev->netdev);
+ 	usb_kill_anchored_urbs(&dev->tx_submitted);
+-	kfree(dev);
++	free_candev(dev->netdev);
+ }
+ 
+ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+@@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
+ 	for (i = 0; i < icount; i++) {
+ 		dev->canch[i] = gs_make_candev(i, intf);
+ 		if (IS_ERR_OR_NULL(dev->canch[i])) {
++			/* save error code to return later */
++			rc = PTR_ERR(dev->canch[i]);
++
+ 			/* on failure destroy previously created candevs */
+ 			icount = i;
+-			for (i = 0; i < icount; i++) {
++			for (i = 0; i < icount; i++)
+ 				gs_destroy_candev(dev->canch[i]);
+-				dev->canch[i] = NULL;
+-			}
++
++			usb_kill_anchored_urbs(&dev->rx_submitted);
+ 			kfree(dev);
+ 			return rc;
+ 		}
+@@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
+ 		return;
+ 	}
+ 
+-	for (i = 0; i < GS_MAX_INTF; i++) {
+-		struct gs_can *can = dev->canch[i];
+-
+-		if (!can)
+-			continue;
+-
+-		gs_destroy_candev(can);
+-	}
++	for (i = 0; i < GS_MAX_INTF; i++)
++		if (dev->canch[i])
++			gs_destroy_candev(dev->canch[i]);
+ 
+ 	usb_kill_anchored_urbs(&dev->rx_submitted);
++	kfree(dev);
+ }
+ 
+ static const struct usb_device_id gs_usb_table[] = {
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index 281451c274ca..771097f2162d 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -370,6 +370,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
+ 		return -1;
+ 	}
+ 
++	/*
++	 * Increase the pending frames counter, so that later when a reply comes
++	 * in and the counter is decreased - we don't start getting negative
++	 * values.
++	 * Note that we don't need to make sure it isn't agg'd, since we're
++	 * TXing non-sta
++	 */
++	atomic_inc(&mvm->pending_frames[sta_id]);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index ad48837ead42..eed7c5a31b15 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -181,7 +181,6 @@ void core_tmr_abort_task(
+ 
+ 		if (!__target_check_io_state(se_cmd, se_sess, 0)) {
+ 			spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+-			target_put_sess_cmd(se_cmd);
+ 			goto out;
+ 		}
+ 
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 9a83f149ac85..95dfff88de11 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -873,6 +873,15 @@ struct ext4_inode_info {
+ 	 * by other means, so we have i_data_sem.
+ 	 */
+ 	struct rw_semaphore i_data_sem;
++	/*
++	 * i_mmap_sem is for serializing page faults with truncate / punch hole
++	 * operations. We have to make sure that new page cannot be faulted in
++	 * a section of the inode that is being punched. We cannot easily use
++	 * i_data_sem for this since we need protection for the whole punch
++	 * operation and i_data_sem ranks below transaction start so we have
++	 * to occasionally drop it.
++	 */
++	struct rw_semaphore i_mmap_sem;
+ 	struct inode vfs_inode;
+ 	struct jbd2_inode *jinode;
+ 
+@@ -2287,6 +2296,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
+ extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
+ 			     loff_t lstart, loff_t lend);
+ extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
++extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ extern qsize_t *ext4_get_reserved_space(struct inode *inode);
+ extern void ext4_da_update_reserve_space(struct inode *inode,
+ 					int used, int quota_claim);
+@@ -2632,6 +2642,9 @@ static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
+ 	return changed;
+ }
+ 
++int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
++				      loff_t len);
++
+ struct ext4_group_info {
+ 	unsigned long   bb_state;
+ 	struct rb_root  bb_free_root;
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 87ba10d1d3bc..ea12f565be24 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4741,7 +4741,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ 	int partial_begin, partial_end;
+ 	loff_t start, end;
+ 	ext4_lblk_t lblk;
+-	struct address_space *mapping = inode->i_mapping;
+ 	unsigned int blkbits = inode->i_blkbits;
+ 
+ 	trace_ext4_zero_range(inode, offset, len, mode);
+@@ -4757,17 +4756,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ 	}
+ 
+ 	/*
+-	 * Write out all dirty pages to avoid race conditions
+-	 * Then release them.
+-	 */
+-	if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+-		ret = filemap_write_and_wait_range(mapping, offset,
+-						   offset + len - 1);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	/*
+ 	 * Round up offset. This is not fallocate, we neet to zero out
+ 	 * blocks, so convert interior block aligned part of the range to
+ 	 * unwritten and possibly manually zero out unaligned parts of the
+@@ -4810,6 +4798,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ 	if (mode & FALLOC_FL_KEEP_SIZE)
+ 		flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
+ 
++	/* Wait all existing dio workers, newcomers will block on i_mutex */
++	ext4_inode_block_unlocked_dio(inode);
++	inode_dio_wait(inode);
++
+ 	/* Preallocate the range including the unaligned edges */
+ 	if (partial_begin || partial_end) {
+ 		ret = ext4_alloc_file_blocks(file,
+@@ -4818,7 +4810,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ 				 round_down(offset, 1 << blkbits)) >> blkbits,
+ 				new_size, flags, mode);
+ 		if (ret)
+-			goto out_mutex;
++			goto out_dio;
+ 
+ 	}
+ 
+@@ -4827,16 +4819,23 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ 		flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
+ 			  EXT4_EX_NOCACHE);
+ 
+-		/* Now release the pages and zero block aligned part of pages*/
++		/*
++		 * Prevent page faults from reinstantiating pages we have
++		 * released from page cache.
++		 */
++		down_write(&EXT4_I(inode)->i_mmap_sem);
++		ret = ext4_update_disksize_before_punch(inode, offset, len);
++		if (ret) {
++			up_write(&EXT4_I(inode)->i_mmap_sem);
++			goto out_dio;
++		}
++		/* Now release the pages and zero block aligned part of pages */
+ 		truncate_pagecache_range(inode, start, end - 1);
+ 		inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ 
+-		/* Wait all existing dio workers, newcomers will block on i_mutex */
+-		ext4_inode_block_unlocked_dio(inode);
+-		inode_dio_wait(inode);
+-
+ 		ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+ 					     flags, mode);
++		up_write(&EXT4_I(inode)->i_mmap_sem);
+ 		if (ret)
+ 			goto out_dio;
+ 	}
+@@ -4964,8 +4963,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
+ 			goto out;
+ 	}
+ 
++	/* Wait all existing dio workers, newcomers will block on i_mutex */
++	ext4_inode_block_unlocked_dio(inode);
++	inode_dio_wait(inode);
++
+ 	ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+ 				     flags, mode);
++	ext4_inode_resume_unlocked_dio(inode);
+ 	if (ret)
+ 		goto out;
+ 
+@@ -5424,21 +5428,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+ 			return ret;
+ 	}
+ 
+-	/*
+-	 * Need to round down offset to be aligned with page size boundary
+-	 * for page size > block size.
+-	 */
+-	ioffset = round_down(offset, PAGE_SIZE);
+-
+-	/* Write out all dirty pages */
+-	ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
+-					   LLONG_MAX);
+-	if (ret)
+-		return ret;
+-
+-	/* Take mutex lock */
+ 	mutex_lock(&inode->i_mutex);
+-
+ 	/*
+ 	 * There is no need to overlap collapse range with EOF, in which case
+ 	 * it is effectively a truncate operation
+@@ -5454,17 +5444,43 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+ 		goto out_mutex;
+ 	}
+ 
+-	truncate_pagecache(inode, ioffset);
+-
+ 	/* Wait for existing dio to complete */
+ 	ext4_inode_block_unlocked_dio(inode);
+ 	inode_dio_wait(inode);
+ 
++	/*
++	 * Prevent page faults from reinstantiating pages we have released from
++	 * page cache.
++	 */
++	down_write(&EXT4_I(inode)->i_mmap_sem);
++	/*
++	 * Need to round down offset to be aligned with page size boundary
++	 * for page size > block size.
++	 */
++	ioffset = round_down(offset, PAGE_SIZE);
++	/*
++	 * Write tail of the last page before removed range since it will get
++	 * removed from the page cache below.
++	 */
++	ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
++	if (ret)
++		goto out_mmap;
++	/*
++	 * Write data that will be shifted to preserve them when discarding
++	 * page cache below. We are also protected from pages becoming dirty
++	 * by i_mmap_sem.
++	 */
++	ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
++					   LLONG_MAX);
++	if (ret)
++		goto out_mmap;
++	truncate_pagecache(inode, ioffset);
++
+ 	credits = ext4_writepage_trans_blocks(inode);
+ 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
+ 	if (IS_ERR(handle)) {
+ 		ret = PTR_ERR(handle);
+-		goto out_dio;
++		goto out_mmap;
+ 	}
+ 
+ 	down_write(&EXT4_I(inode)->i_data_sem);
+@@ -5503,7 +5519,8 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+ 
+ out_stop:
+ 	ext4_journal_stop(handle);
+-out_dio:
++out_mmap:
++	up_write(&EXT4_I(inode)->i_mmap_sem);
+ 	ext4_inode_resume_unlocked_dio(inode);
+ out_mutex:
+ 	mutex_unlock(&inode->i_mutex);
+diff --git a/fs/ext4/file.c b/fs/ext4/file.c
+index 0613c256c344..dd65fac5ff2f 100644
+--- a/fs/ext4/file.c
++++ b/fs/ext4/file.c
+@@ -213,7 +213,7 @@ static const struct vm_operations_struct ext4_dax_vm_ops = {
+ #endif
+ 
+ static const struct vm_operations_struct ext4_file_vm_ops = {
+-	.fault		= filemap_fault,
++	.fault		= ext4_filemap_fault,
+ 	.map_pages	= filemap_map_pages,
+ 	.page_mkwrite   = ext4_page_mkwrite,
+ };
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 2b3a53a51582..3291e1af0e24 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3524,6 +3524,35 @@ int ext4_can_truncate(struct inode *inode)
+ }
+ 
+ /*
++ * We have to make sure i_disksize gets properly updated before we truncate
++ * page cache due to hole punching or zero range. Otherwise i_disksize update
++ * can get lost as it may have been postponed to submission of writeback but
++ * that will never happen after we truncate page cache.
++ */
++int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
++				      loff_t len)
++{
++	handle_t *handle;
++	loff_t size = i_size_read(inode);
++
++	WARN_ON(!mutex_is_locked(&inode->i_mutex));
++	if (offset > size || offset + len < size)
++		return 0;
++
++	if (EXT4_I(inode)->i_disksize >= size)
++		return 0;
++
++	handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
++	if (IS_ERR(handle))
++		return PTR_ERR(handle);
++	ext4_update_i_disksize(inode, size);
++	ext4_mark_inode_dirty(handle, inode);
++	ext4_journal_stop(handle);
++
++	return 0;
++}
++
++/*
+  * ext4_punch_hole: punches a hole in a file by releaseing the blocks
+  * associated with the given offset and length
+  *
+@@ -3588,17 +3617,26 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+ 
+ 	}
+ 
++	/* Wait all existing dio workers, newcomers will block on i_mutex */
++	ext4_inode_block_unlocked_dio(inode);
++	inode_dio_wait(inode);
++
++	/*
++	 * Prevent page faults from reinstantiating pages we have released from
++	 * page cache.
++	 */
++	down_write(&EXT4_I(inode)->i_mmap_sem);
+ 	first_block_offset = round_up(offset, sb->s_blocksize);
+ 	last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
+ 
+ 	/* Now release the pages and zero block aligned part of pages*/
+-	if (last_block_offset > first_block_offset)
++	if (last_block_offset > first_block_offset) {
++		ret = ext4_update_disksize_before_punch(inode, offset, length);
++		if (ret)
++			goto out_dio;
+ 		truncate_pagecache_range(inode, first_block_offset,
+ 					 last_block_offset);
+-
+-	/* Wait all existing dio workers, newcomers will block on i_mutex */
+-	ext4_inode_block_unlocked_dio(inode);
+-	inode_dio_wait(inode);
++	}
+ 
+ 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ 		credits = ext4_writepage_trans_blocks(inode);
+@@ -3645,16 +3683,12 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+ 	if (IS_SYNC(inode))
+ 		ext4_handle_sync(handle);
+ 
+-	/* Now release the pages again to reduce race window */
+-	if (last_block_offset > first_block_offset)
+-		truncate_pagecache_range(inode, first_block_offset,
+-					 last_block_offset);
+-
+ 	inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+ 	ext4_mark_inode_dirty(handle, inode);
+ out_stop:
+ 	ext4_journal_stop(handle);
+ out_dio:
++	up_write(&EXT4_I(inode)->i_mmap_sem);
+ 	ext4_inode_resume_unlocked_dio(inode);
+ out_mutex:
+ 	mutex_unlock(&inode->i_mutex);
+@@ -4775,11 +4809,13 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
+ 			} else
+ 				ext4_wait_for_tail_page_commit(inode);
+ 		}
++		down_write(&EXT4_I(inode)->i_mmap_sem);
+ 		/*
+ 		 * Truncate pagecache after we've waited for commit
+ 		 * in data=journal mode to make pages freeable.
+ 		 */
+ 		truncate_pagecache(inode, inode->i_size);
++		up_write(&EXT4_I(inode)->i_mmap_sem);
+ 	}
+ 	/*
+ 	 * We want to call ext4_truncate() even if attr->ia_size ==
+@@ -5234,6 +5270,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+ 
+ 	sb_start_pagefault(inode->i_sb);
+ 	file_update_time(vma->vm_file);
++
++	down_read(&EXT4_I(inode)->i_mmap_sem);
+ 	/* Delalloc case is easy... */
+ 	if (test_opt(inode->i_sb, DELALLOC) &&
+ 	    !ext4_should_journal_data(inode) &&
+@@ -5303,6 +5341,19 @@ retry_alloc:
+ out_ret:
+ 	ret = block_page_mkwrite_return(ret);
+ out:
++	up_read(&EXT4_I(inode)->i_mmap_sem);
+ 	sb_end_pagefault(inode->i_sb);
+ 	return ret;
+ }
++
++int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++	struct inode *inode = file_inode(vma->vm_file);
++	int err;
++
++	down_read(&EXT4_I(inode)->i_mmap_sem);
++	err = filemap_fault(vma, vmf);
++	up_read(&EXT4_I(inode)->i_mmap_sem);
++
++	return err;
++}
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 8a3b9f14d198..6f5ca3e92246 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -945,6 +945,7 @@ static void init_once(void *foo)
+ 	INIT_LIST_HEAD(&ei->i_orphan);
+ 	init_rwsem(&ei->xattr_sem);
+ 	init_rwsem(&ei->i_data_sem);
++	init_rwsem(&ei->i_mmap_sem);
+ 	inode_init_once(&ei->vfs_inode);
+ }
+ 
+diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
+index 011ba6670d99..c70d06a383e2 100644
+--- a/fs/ext4/truncate.h
++++ b/fs/ext4/truncate.h
+@@ -10,8 +10,10 @@
+  */
+ static inline void ext4_truncate_failed_write(struct inode *inode)
+ {
++	down_write(&EXT4_I(inode)->i_mmap_sem);
+ 	truncate_inode_pages(inode->i_mapping, inode->i_size);
+ 	ext4_truncate(inode);
++	up_write(&EXT4_I(inode)->i_mmap_sem);
+ }
+ 
+ /*
+diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
+index 1ba5c97943b8..cfbceb116356 100644
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -845,9 +845,14 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
+ 
+ 		pr_notice("%s(): Link succeeded, unlink failed (err %d). You now have a hard link\n",
+ 			  __func__, ret);
+-		/* Might as well let the VFS know */
+-		d_instantiate(new_dentry, d_inode(old_dentry));
+-		ihold(d_inode(old_dentry));
++		/*
++		 * We can't keep the target in dcache after that.
++		 * For one thing, we can't afford dentry aliases for directories.
++		 * For another, if there was a victim, we _can't_ set new inode
++		 * for that sucker and we have to trigger mount eviction - the
++		 * caller won't do it on its own since we are returning an error.
++		 */
++		d_invalidate(new_dentry);
+ 		new_dir_i->i_mtime = new_dir_i->i_ctime = ITIME(now);
+ 		return ret;
+ 	}
+diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
+index 80021c709af9..0c2632386f35 100644
+--- a/fs/ncpfs/dir.c
++++ b/fs/ncpfs/dir.c
+@@ -633,7 +633,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx,
+ 				d_rehash(newdent);
+ 		} else {
+ 			spin_lock(&dentry->d_lock);
+-			NCP_FINFO(inode)->flags &= ~NCPI_DIR_CACHE;
++			NCP_FINFO(dir)->flags &= ~NCPI_DIR_CACHE;
+ 			spin_unlock(&dentry->d_lock);
+ 		}
+ 	} else {
+diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
+index 692ceda3bc21..a2b1d7ce3e1a 100644
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -618,7 +618,8 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
+ 	 * sole user of this dentry.  Too tricky...  Just unhash for
+ 	 * now.
+ 	 */
+-	d_drop(dentry);
++	if (!err)
++		d_drop(dentry);
+ 	mutex_unlock(&dir->i_mutex);
+ 
+ 	return err;
+@@ -903,6 +904,13 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
+ 	if (!overwrite && new_is_dir && !old_opaque && new_opaque)
+ 		ovl_remove_opaque(newdentry);
+ 
++	/*
++	 * Old dentry now lives in different location. Dentries in
++	 * lowerstack are stale. We cannot drop them here because
++	 * access to them is lockless. This could be only pure upper
++	 * or opaque directory - numlower is zero. Or upper non-dir
++	 * entry - its pureness is tracked by flag opaque.
++	 */
+ 	if (old_opaque != new_opaque) {
+ 		ovl_dentry_set_opaque(old, new_opaque);
+ 		if (!overwrite)
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index a1b069e5e363..e505b44a9184 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -66,6 +66,8 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
+ 	if (upperdentry) {
+ 		mutex_lock(&upperdentry->d_inode->i_mutex);
+ 		err = notify_change(upperdentry, attr, NULL);
++		if (!err)
++			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
+ 		mutex_unlock(&upperdentry->d_inode->i_mutex);
+ 	} else {
+ 		err = ovl_copy_up_last(dentry, attr, false);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index bd6d5c1e667d..39266655d2bd 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -76,12 +76,14 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry)
+ 	if (oe->__upperdentry) {
+ 		type = __OVL_PATH_UPPER;
+ 
+-		if (oe->numlower) {
+-			if (S_ISDIR(dentry->d_inode->i_mode))
+-				type |= __OVL_PATH_MERGE;
+-		} else if (!oe->opaque) {
++		/*
++		 * Non-dir dentry can hold lower dentry from previous
++		 * location. Its purity depends only on opaque flag.
++		 */
++		if (oe->numlower && S_ISDIR(dentry->d_inode->i_mode))
++			type |= __OVL_PATH_MERGE;
++		else if (!oe->opaque)
+ 			type |= __OVL_PATH_PURE;
+-		}
+ 	} else {
+ 		if (oe->numlower > 1)
+ 			type |= __OVL_PATH_MERGE;
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index a6e1bca88cc6..8454fb35fcbe 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -131,9 +131,6 @@ extern void syscall_unregfunc(void);
+ 		void *it_func;						\
+ 		void *__data;						\
+ 									\
+-		if (!cpu_online(raw_smp_processor_id()))		\
+-			return;						\
+-									\
+ 		if (!(cond))						\
+ 			return;						\
+ 		prercu;							\
+@@ -332,15 +329,19 @@ extern void syscall_unregfunc(void);
+  * "void *__data, proto" as the callback prototype.
+  */
+ #define DECLARE_TRACE_NOARGS(name)					\
+-		__DECLARE_TRACE(name, void, , 1, void *__data, __data)
++	__DECLARE_TRACE(name, void, ,					\
++			cpu_online(raw_smp_processor_id()),		\
++			void *__data, __data)
+ 
+ #define DECLARE_TRACE(name, proto, args)				\
+-		__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), 1,	\
+-				PARAMS(void *__data, proto),		\
+-				PARAMS(__data, args))
++	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args),		\
++			cpu_online(raw_smp_processor_id()),		\
++			PARAMS(void *__data, proto),			\
++			PARAMS(__data, args))
+ 
+ #define DECLARE_TRACE_CONDITION(name, proto, args, cond)		\
+-	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
++	__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args),		\
++			cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \
+ 			PARAMS(void *__data, proto),			\
+ 			PARAMS(__data, args))
+ 
+diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
+index 8f81bbbc38fc..e0f4109e64c6 100644
+--- a/include/net/iw_handler.h
++++ b/include/net/iw_handler.h
+@@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
+ /* Send a single event to user space */
+ void wireless_send_event(struct net_device *dev, unsigned int cmd,
+ 			 union iwreq_data *wrqu, const char *extra);
++#ifdef CONFIG_WEXT_CORE
++/* flush all previous wext events - if work is done from netdev notifiers */
++void wireless_nlevent_flush(void);
++#else
++static inline void wireless_nlevent_flush(void) {}
++#endif
+ 
+ /* We may need a function to send a stream of events to user space.
+  * More on that later... */
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index e1af58e23bee..66e6568a4736 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1562,14 +1562,14 @@ event_sched_out(struct perf_event *event,
+ 
+ 	perf_pmu_disable(event->pmu);
+ 
++	event->tstamp_stopped = tstamp;
++	event->pmu->del(event, 0);
++	event->oncpu = -1;
+ 	event->state = PERF_EVENT_STATE_INACTIVE;
+ 	if (event->pending_disable) {
+ 		event->pending_disable = 0;
+ 		event->state = PERF_EVENT_STATE_OFF;
+ 	}
+-	event->tstamp_stopped = tstamp;
+-	event->pmu->del(event, 0);
+-	event->oncpu = -1;
+ 
+ 	if (!is_software_event(event))
+ 		cpuctx->active_oncpu--;
+@@ -7641,6 +7641,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ 		}
+ 	}
+ 
++	/* symmetric to unaccount_event() in _free_event() */
++	account_event(event);
++
+ 	return event;
+ 
+ err_per_task:
+@@ -8004,8 +8007,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ 		}
+ 	}
+ 
+-	account_event(event);
+-
+ 	/*
+ 	 * Special case software events and allow them to be part of
+ 	 * any hardware group.
+@@ -8221,7 +8222,12 @@ err_context:
+ 	perf_unpin_context(ctx);
+ 	put_ctx(ctx);
+ err_alloc:
+-	free_event(event);
++	/*
++	 * If event_file is set, the fput() above will have called ->release()
++	 * and that will take care of freeing the event.
++	 */
++	if (!event_file)
++		free_event(event);
+ err_cpus:
+ 	put_online_cpus();
+ err_task:
+@@ -8265,8 +8271,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ 	/* Mark owner so we could distinguish it from user events. */
+ 	event->owner = EVENT_OWNER_KERNEL;
+ 
+-	account_event(event);
+-
+ 	ctx = find_get_context(event->pmu, task, event);
+ 	if (IS_ERR(ctx)) {
+ 		err = PTR_ERR(ctx);
+diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
+index 5c564a68fb50..d71edcbd0c58 100644
+--- a/net/mac80211/agg-rx.c
++++ b/net/mac80211/agg-rx.c
+@@ -289,7 +289,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
+ 	}
+ 
+ 	/* prepare A-MPDU MLME for Rx aggregation */
+-	tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
++	tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
+ 	if (!tid_agg_rx)
+ 		goto end;
+ 
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index c0a9187bc3a9..cdf8609a6240 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -90,7 +90,7 @@ struct ieee80211_fragment_entry {
+ 	unsigned int last_frag;
+ 	unsigned int extra_len;
+ 	struct sk_buff_head skb_list;
+-	int ccmp; /* Whether fragments were encrypted with CCMP */
++	bool check_sequential_pn; /* needed for CCMP/GCMP */
+ 	u8 last_pn[6]; /* PN of the last fragment if CCMP was used */
+ };
+ 
+diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
+index 3ece7d1034c8..b54f398cda5d 100644
+--- a/net/mac80211/rc80211_minstrel.c
++++ b/net/mac80211/rc80211_minstrel.c
+@@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
+ 	 * computing cur_tp
+ 	 */
+ 	tmp_mrs = &mi->r[idx].stats;
+-	tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma);
++	tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
+ 	tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
+ 
+ 	return tmp_cur_tp;
+diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
+index 7430a1df2ab1..1ec889dc2e46 100644
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -691,7 +691,7 @@ minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
+ 	if (likely(sta->ampdu_mlme.tid_tx[tid]))
+ 		return;
+ 
+-	ieee80211_start_tx_ba_session(pubsta, tid, 5000);
++	ieee80211_start_tx_ba_session(pubsta, tid, 0);
+ }
+ 
+ static void
+@@ -1328,7 +1328,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
+ 	prob = mi->groups[i].rates[j].prob_ewma;
+ 
+ 	/* convert tp_avg from pkt per second in kbps */
+-	tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024;
++	tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
++	tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
+ 
+ 	return tp_avg;
+ }
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index 5793f75c5ffd..d4b08d87537c 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -1725,7 +1725,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
+ 	entry->seq = seq;
+ 	entry->rx_queue = rx_queue;
+ 	entry->last_frag = frag;
+-	entry->ccmp = 0;
++	entry->check_sequential_pn = false;
+ 	entry->extra_len = 0;
+ 
+ 	return entry;
+@@ -1821,15 +1821,27 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 						 rx->seqno_idx, &(rx->skb));
+ 		if (rx->key &&
+ 		    (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
+-		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256) &&
++		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
++		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
++		     rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
+ 		    ieee80211_has_protected(fc)) {
+ 			int queue = rx->security_idx;
+-			/* Store CCMP PN so that we can verify that the next
+-			 * fragment has a sequential PN value. */
+-			entry->ccmp = 1;
++
++			/* Store CCMP/GCMP PN so that we can verify that the
++			 * next fragment has a sequential PN value.
++			 */
++			entry->check_sequential_pn = true;
+ 			memcpy(entry->last_pn,
+ 			       rx->key->u.ccmp.rx_pn[queue],
+ 			       IEEE80211_CCMP_PN_LEN);
++			BUILD_BUG_ON(offsetof(struct ieee80211_key,
++					      u.ccmp.rx_pn) !=
++				     offsetof(struct ieee80211_key,
++					      u.gcmp.rx_pn));
++			BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
++				     sizeof(rx->key->u.gcmp.rx_pn[queue]));
++			BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
++				     IEEE80211_GCMP_PN_LEN);
+ 		}
+ 		return RX_QUEUED;
+ 	}
+@@ -1844,15 +1856,21 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
+ 		return RX_DROP_MONITOR;
+ 	}
+ 
+-	/* Verify that MPDUs within one MSDU have sequential PN values.
+-	 * (IEEE 802.11i, 8.3.3.4.5) */
+-	if (entry->ccmp) {
++	/* "The receiver shall discard MSDUs and MMPDUs whose constituent
++	 *  MPDU PN values are not incrementing in steps of 1."
++	 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
++	 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
++	 */
++	if (entry->check_sequential_pn) {
+ 		int i;
+ 		u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
+ 		int queue;
++
+ 		if (!rx->key ||
+ 		    (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP &&
+-		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256))
++		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 &&
++		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP &&
++		     rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256))
+ 			return RX_DROP_UNUSABLE;
+ 		memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
+ 		for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
+@@ -3359,6 +3377,7 @@ static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
+ 				return false;
+ 			/* ignore action frames to TDLS-peers */
+ 			if (ieee80211_is_action(hdr->frame_control) &&
++			    !is_broadcast_ether_addr(bssid) &&
+ 			    !ether_addr_equal(bssid, hdr->addr1))
+ 				return false;
+ 		}
+diff --git a/net/wireless/core.c b/net/wireless/core.c
+index 2a0bbd22854b..71e9b84847f3 100644
+--- a/net/wireless/core.c
++++ b/net/wireless/core.c
+@@ -1138,6 +1138,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
+ 		return NOTIFY_DONE;
+ 	}
+ 
++	wireless_nlevent_flush();
++
+ 	return NOTIFY_OK;
+ }
+ 
+diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
+index c8717c1d082e..b50ee5d622e1 100644
+--- a/net/wireless/wext-core.c
++++ b/net/wireless/wext-core.c
+@@ -342,6 +342,40 @@ static const int compat_event_type_size[] = {
+ 
+ /* IW event code */
+ 
++void wireless_nlevent_flush(void)
++{
++	struct sk_buff *skb;
++	struct net *net;
++
++	ASSERT_RTNL();
++
++	for_each_net(net) {
++		while ((skb = skb_dequeue(&net->wext_nlevents)))
++			rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
++				    GFP_KERNEL);
++	}
++}
++EXPORT_SYMBOL_GPL(wireless_nlevent_flush);
++
++static int wext_netdev_notifier_call(struct notifier_block *nb,
++				     unsigned long state, void *ptr)
++{
++	/*
++	 * When a netdev changes state in any way, flush all pending messages
++	 * to avoid them going out in a strange order, e.g. RTM_NEWLINK after
++	 * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
++	 * or similar - all of which could otherwise happen due to delays from
++	 * schedule_work().
++	 */
++	wireless_nlevent_flush();
++
++	return NOTIFY_OK;
++}
++
++static struct notifier_block wext_netdev_notifier = {
++	.notifier_call = wext_netdev_notifier_call,
++};
++
+ static int __net_init wext_pernet_init(struct net *net)
+ {
+ 	skb_queue_head_init(&net->wext_nlevents);
+@@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = {
+ 
+ static int __init wireless_nlevent_init(void)
+ {
+-	return register_pernet_subsys(&wext_pernet_ops);
++	int err = register_pernet_subsys(&wext_pernet_ops);
++
++	if (err)
++		return err;
++
++	return register_netdevice_notifier(&wext_netdev_notifier);
+ }
+ 
+ subsys_initcall(wireless_nlevent_init);
+@@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init);
+ /* Process events generated by the wireless layer or the driver. */
+ static void wireless_nlevent_process(struct work_struct *work)
+ {
+-	struct sk_buff *skb;
+-	struct net *net;
+-
+ 	rtnl_lock();
+-
+-	for_each_net(net) {
+-		while ((skb = skb_dequeue(&net->wext_nlevents)))
+-			rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
+-				    GFP_KERNEL);
+-	}
+-
++	wireless_nlevent_flush();
+ 	rtnl_unlock();
+ }
+ 
+diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh
+index 198580d245e0..1659b409ef10 100755
+--- a/scripts/ld-version.sh
++++ b/scripts/ld-version.sh
+@@ -1,7 +1,7 @@
+ #!/usr/bin/awk -f
+ # extract linker version number from stdin and turn into single number
+ 	{
+-	gsub(".*)", "");
++	gsub(".*\\)", "");
+ 	split($1,a, ".");
+ 	print a[1]*10000000 + a[2]*100000 + a[3]*10000 + a[4]*100 + a[5];
+ 	exit
+diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
+index c799cca5abeb..6b864c0fc2b6 100644
+--- a/sound/soc/codecs/wm8958-dsp2.c
++++ b/sound/soc/codecs/wm8958-dsp2.c
+@@ -459,7 +459,7 @@ static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+@@ -549,7 +549,7 @@ static int wm8958_put_vss_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+@@ -582,7 +582,7 @@ static int wm8958_put_vss_hpf_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+@@ -749,7 +749,7 @@ static int wm8958_put_enh_eq_enum(struct snd_kcontrol *kcontrol,
+ 	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+ 	struct wm8994 *control = wm8994->wm8994;
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 	int reg;
+ 
+ 	/* Don't allow on the fly reconfiguration */
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index a1c04dab6684..a484ca8421af 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -361,7 +361,7 @@ static int wm8994_put_drc_enum(struct snd_kcontrol *kcontrol,
+ 	struct wm8994 *control = wm8994->wm8994;
+ 	struct wm8994_pdata *pdata = &control->pdata;
+ 	int drc = wm8994_get_drc(kcontrol->id.name);
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 
+ 	if (drc < 0)
+ 		return drc;
+@@ -468,7 +468,7 @@ static int wm8994_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
+ 	struct wm8994 *control = wm8994->wm8994;
+ 	struct wm8994_pdata *pdata = &control->pdata;
+ 	int block = wm8994_get_retune_mobile_block(kcontrol->id.name);
+-	int value = ucontrol->value.integer.value[0];
++	int value = ucontrol->value.enumerated.item[0];
+ 
+ 	if (block < 0)
+ 		return block;
+diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
+index d01c2095452f..431d94397219 100644
+--- a/sound/soc/codecs/wm_adsp.c
++++ b/sound/soc/codecs/wm_adsp.c
+@@ -248,7 +248,7 @@ static int wm_adsp_fw_get(struct snd_kcontrol *kcontrol,
+ 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ 	struct wm_adsp *adsp = snd_soc_codec_get_drvdata(codec);
+ 
+-	ucontrol->value.integer.value[0] = adsp[e->shift_l].fw;
++	ucontrol->value.enumerated.item[0] = adsp[e->shift_l].fw;
+ 
+ 	return 0;
+ }
+@@ -260,16 +260,16 @@ static int wm_adsp_fw_put(struct snd_kcontrol *kcontrol,
+ 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
+ 	struct wm_adsp *adsp = snd_soc_codec_get_drvdata(codec);
+ 
+-	if (ucontrol->value.integer.value[0] == adsp[e->shift_l].fw)
++	if (ucontrol->value.enumerated.item[0] == adsp[e->shift_l].fw)
+ 		return 0;
+ 
+-	if (ucontrol->value.integer.value[0] >= WM_ADSP_NUM_FW)
++	if (ucontrol->value.enumerated.item[0] >= WM_ADSP_NUM_FW)
+ 		return -EINVAL;
+ 
+ 	if (adsp[e->shift_l].running)
+ 		return -EBUSY;
+ 
+-	adsp[e->shift_l].fw = ucontrol->value.integer.value[0];
++	adsp[e->shift_l].fw = ucontrol->value.enumerated.item[0];
+ 
+ 	return 0;
+ }
+diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
+index b92ab40d2be6..5e8ccb0a7028 100644
+--- a/sound/soc/samsung/i2s.c
++++ b/sound/soc/samsung/i2s.c
+@@ -480,10 +480,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
+ 	unsigned int cdcon_mask = 1 << i2s_regs->cdclkcon_off;
+ 	unsigned int rsrc_mask = 1 << i2s_regs->rclksrc_off;
+ 	u32 mod, mask, val = 0;
++	unsigned long flags;
+ 
+-	spin_lock(i2s->lock);
++	spin_lock_irqsave(i2s->lock, flags);
+ 	mod = readl(i2s->addr + I2SMOD);
+-	spin_unlock(i2s->lock);
++	spin_unlock_irqrestore(i2s->lock, flags);
+ 
+ 	switch (clk_id) {
+ 	case SAMSUNG_I2S_OPCLK:
+@@ -574,11 +575,11 @@ static int i2s_set_sysclk(struct snd_soc_dai *dai,
+ 		return -EINVAL;
+ 	}
+ 
+-	spin_lock(i2s->lock);
++	spin_lock_irqsave(i2s->lock, flags);
+ 	mod = readl(i2s->addr + I2SMOD);
+ 	mod = (mod & ~mask) | val;
+ 	writel(mod, i2s->addr + I2SMOD);
+-	spin_unlock(i2s->lock);
++	spin_unlock_irqrestore(i2s->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -589,6 +590,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
+ 	struct i2s_dai *i2s = to_info(dai);
+ 	int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave;
+ 	u32 mod, tmp = 0;
++	unsigned long flags;
+ 
+ 	lrp_shift = i2s->variant_regs->lrp_off;
+ 	sdf_shift = i2s->variant_regs->sdf_off;
+@@ -648,7 +650,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
+ 		return -EINVAL;
+ 	}
+ 
+-	spin_lock(i2s->lock);
++	spin_lock_irqsave(i2s->lock, flags);
+ 	mod = readl(i2s->addr + I2SMOD);
+ 	/*
+ 	 * Don't change the I2S mode if any controller is active on this
+@@ -656,7 +658,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
+ 	 */
+ 	if (any_active(i2s) &&
+ 		((mod & (sdf_mask | lrp_rlow | mod_slave)) != tmp)) {
+-		spin_unlock(i2s->lock);
++		spin_unlock_irqrestore(i2s->lock, flags);
+ 		dev_err(&i2s->pdev->dev,
+ 				"%s:%d Other DAI busy\n", __func__, __LINE__);
+ 		return -EAGAIN;
+@@ -665,7 +667,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
+ 	mod &= ~(sdf_mask | lrp_rlow | mod_slave);
+ 	mod |= tmp;
+ 	writel(mod, i2s->addr + I2SMOD);
+-	spin_unlock(i2s->lock);
++	spin_unlock_irqrestore(i2s->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -675,6 +677,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
+ {
+ 	struct i2s_dai *i2s = to_info(dai);
+ 	u32 mod, mask = 0, val = 0;
++	unsigned long flags;
+ 
+ 	if (!is_secondary(i2s))
+ 		mask |= (MOD_DC2_EN | MOD_DC1_EN);
+@@ -743,11 +746,11 @@ static int i2s_hw_params(struct snd_pcm_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 
+-	spin_lock(i2s->lock);
++	spin_lock_irqsave(i2s->lock, flags);
+ 	mod = readl(i2s->addr + I2SMOD);
+ 	mod = (mod & ~mask) | val;
+ 	writel(mod, i2s->addr + I2SMOD);
+-	spin_unlock(i2s->lock);
++	spin_unlock_irqrestore(i2s->lock, flags);
+ 
+ 	samsung_asoc_init_dma_data(dai, &i2s->dma_playback, &i2s->dma_capture);
+ 
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index b6c12dccb259..28df6adf362b 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -3324,7 +3324,7 @@ static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol,
+ {
+ 	struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
+ 
+-	ucontrol->value.integer.value[0] = w->params_select;
++	ucontrol->value.enumerated.item[0] = w->params_select;
+ 
+ 	return 0;
+ }
+@@ -3338,13 +3338,13 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
+ 	if (w->power)
+ 		return -EBUSY;
+ 
+-	if (ucontrol->value.integer.value[0] == w->params_select)
++	if (ucontrol->value.enumerated.item[0] == w->params_select)
+ 		return 0;
+ 
+-	if (ucontrol->value.integer.value[0] >= w->num_params)
++	if (ucontrol->value.enumerated.item[0] >= w->num_params)
+ 		return -EINVAL;
+ 
+-	w->params_select = ucontrol->value.integer.value[0];
++	w->params_select = ucontrol->value.enumerated.item[0];
+ 
+ 	return 0;
+ }


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-03-22 22:47 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-03-22 22:47 UTC (permalink / raw
  To: gentoo-commits

commit:     f8fc60510d50d9fd52e4e90cc7516c393a341555
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Mar 22 22:47:43 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Mar 22 22:47:43 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=f8fc6051

Update for gcc 4.9 CPU optimization patch. See bug #572108

 5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
index c4efd06..418201d 100644
--- a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
+++ b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
@@ -283,7 +283,7 @@ gcc version >=4.9
  	int
  	default "7" if MPENTIUM4 || MPSC
 -	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
-+	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || BROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
  	default "4" if MELAN || M486 || MGEODEGX1
  	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
  


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-03-17 22:52 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-03-17 22:52 UTC (permalink / raw
  To: gentoo-commits

commit:     57f73735ff7386b833f9793004b2a35de5cb2703
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Mar 17 22:52:12 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Mar 17 22:52:12 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=57f73735

Linux patch 4.1.20

 0000_README             |    4 +
 1019_linux-4.1.20.patch | 3955 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3959 insertions(+)

diff --git a/0000_README b/0000_README
index ad1d372..3e19785 100644
--- a/0000_README
+++ b/0000_README
@@ -119,6 +119,10 @@ Patch:  1018_linux-4.1.19.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.19
 
+Patch:  1019_linux-4.1.20.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.20
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1019_linux-4.1.20.patch b/1019_linux-4.1.20.patch
new file mode 100644
index 0000000..75d3dea
--- /dev/null
+++ b/1019_linux-4.1.20.patch
@@ -0,0 +1,3955 @@
+diff --git a/Makefile b/Makefile
+index 06107f683bbe..39be1bbd373a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 19
++SUBLEVEL = 20
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
+index d503fbb787d3..88993cc95e8e 100644
+--- a/arch/arm/kvm/guest.c
++++ b/arch/arm/kvm/guest.c
+@@ -155,7 +155,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 	u64 val;
+ 
+ 	val = kvm_arm_timer_get_reg(vcpu, reg->id);
+-	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
++	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
+ }
+ 
+ static unsigned long num_core_regs(void)
+diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
+index 9535bd555d1d..d4e04d2237c4 100644
+--- a/arch/arm64/kvm/guest.c
++++ b/arch/arm64/kvm/guest.c
+@@ -184,7 +184,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
+ 	u64 val;
+ 
+ 	val = kvm_arm_timer_get_reg(vcpu, reg->id);
+-	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
++	return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
+ }
+ 
+ /**
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index 5f5f44edc77d..54923d6b7e16 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -693,15 +693,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
+ asmlinkage void do_ov(struct pt_regs *regs)
+ {
+ 	enum ctx_state prev_state;
+-	siginfo_t info;
++	siginfo_t info = {
++		.si_signo = SIGFPE,
++		.si_code = FPE_INTOVF,
++		.si_addr = (void __user *)regs->cp0_epc,
++	};
+ 
+ 	prev_state = exception_enter();
+ 	die_if_kernel("Integer overflow", regs);
+ 
+-	info.si_code = FPE_INTOVF;
+-	info.si_signo = SIGFPE;
+-	info.si_errno = 0;
+-	info.si_addr = (void __user *) regs->cp0_epc;
+ 	force_sig_info(SIGFPE, &info, current);
+ 	exception_exit(prev_state);
+ }
+@@ -877,7 +877,7 @@ out:
+ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+ 	const char *str)
+ {
+-	siginfo_t info;
++	siginfo_t info = { 0 };
+ 	char b[40];
+ 
+ #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+@@ -905,7 +905,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+ 		else
+ 			info.si_code = FPE_INTOVF;
+ 		info.si_signo = SIGFPE;
+-		info.si_errno = 0;
+ 		info.si_addr = (void __user *) regs->cp0_epc;
+ 		force_sig_info(SIGFPE, &info, current);
+ 		break;
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index 22ee0afc7d5d..ace4ed7d41c6 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -700,7 +700,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+ 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+ 		void __user *uaddr = (void __user *)(long)reg->addr;
+ 
+-		return copy_to_user(uaddr, vs, 16);
++		return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
+ 	} else {
+ 		return -EINVAL;
+ 	}
+@@ -730,7 +730,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+ 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
+ 		void __user *uaddr = (void __user *)(long)reg->addr;
+ 
+-		return copy_from_user(vs, uaddr, 16);
++		return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
+ 	} else {
+ 		return -EINVAL;
+ 	}
+diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
+index 9585c81f755f..ce0b2b4075c7 100644
+--- a/arch/parisc/kernel/ptrace.c
++++ b/arch/parisc/kernel/ptrace.c
+@@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ 
+ long do_syscall_trace_enter(struct pt_regs *regs)
+ {
+-	long ret = 0;
+-
+ 	/* Do the secure computing check first. */
+ 	secure_computing_strict(regs->gr[20]);
+ 
+ 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+-	    tracehook_report_syscall_entry(regs))
+-		ret = -1L;
++	    tracehook_report_syscall_entry(regs)) {
++		/*
++		 * Tracing decided this syscall should not happen or the
++		 * debugger stored an invalid system call number. Skip
++		 * the system call and the system call restart handling.
++		 */
++		regs->gr[20] = -1UL;
++		goto out;
++	}
+ 
+ #ifdef CONFIG_64BIT
+ 	if (!is_compat_task())
+@@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
+ 			regs->gr[24] & 0xffffffff,
+ 			regs->gr[23] & 0xffffffff);
+ 
+-	return ret ? : regs->gr[20];
++out:
++	return regs->gr[20];
+ }
+ 
+ void do_syscall_trace_exit(struct pt_regs *regs)
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 0b8d26d3ba43..02cf40c96fe3 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -343,7 +343,7 @@ tracesys_next:
+ #endif
+ 
+ 	comiclr,>>=	__NR_Linux_syscalls, %r20, %r0
+-	b,n	.Lsyscall_nosys
++	b,n	.Ltracesys_nosys
+ 
+ 	LDREGX  %r20(%r19), %r19
+ 
+@@ -359,6 +359,9 @@ tracesys_next:
+ 	be      0(%sr7,%r19)
+ 	ldo	R%tracesys_exit(%r2),%r2
+ 
++.Ltracesys_nosys:
++	ldo	-ENOSYS(%r0),%r28		/* set errno */
++
+ 	/* Do *not* call this function on the gateway page, because it
+ 	makes a direct call to syscall_trace. */
+ 	
+diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
+index c78ba51ae285..24b7e554db27 100644
+--- a/arch/s390/kernel/compat_signal.c
++++ b/arch/s390/kernel/compat_signal.c
+@@ -293,7 +293,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
+ 
+ 	/* Restore high gprs from signal stack */
+ 	if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
+-			     sizeof(&sregs_ext->gprs_high)))
++			     sizeof(sregs_ext->gprs_high)))
+ 		return -EFAULT;
+ 	for (i = 0; i < NUM_GPRS; i++)
+ 		*(__u32 *)&regs->gprs[i] = gprs_high[i];
+diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
+index d1daead5fcdd..adb3eaf8fe2a 100644
+--- a/arch/x86/kernel/acpi/sleep.c
++++ b/arch/x86/kernel/acpi/sleep.c
+@@ -16,6 +16,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/realmode.h>
+ 
++#include <linux/ftrace.h>
+ #include "../../realmode/rm/wakeup.h"
+ #include "sleep.h"
+ 
+@@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
+        saved_magic = 0x123456789abcdef0L;
+ #endif /* CONFIG_64BIT */
+ 
++	/*
++	 * Pause/unpause graph tracing around do_suspend_lowlevel as it has
++	 * inconsistent call/return info after it jumps to the wakeup vector.
++	 */
++	pause_graph_tracing();
+ 	do_suspend_lowlevel();
++	unpause_graph_tracing();
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
+index 6e6d115fe9b5..d537c9badeb6 100644
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -257,7 +257,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
+ 			return ret;
+ 
+ 		mark_page_dirty(vcpu->kvm, table_gfn);
+-		walker->ptes[level] = pte;
++		walker->ptes[level - 1] = pte;
+ 	}
+ 	return 0;
+ }
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index fed4c84eac44..41a3fb4ed346 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2117,6 +2117,8 @@ static void accumulate_steal_time(struct kvm_vcpu *vcpu)
+ 
+ static void record_steal_time(struct kvm_vcpu *vcpu)
+ {
++	accumulate_steal_time(vcpu);
++
+ 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ 		return;
+ 
+@@ -2262,12 +2264,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		if (!(data & KVM_MSR_ENABLED))
+ 			break;
+ 
+-		vcpu->arch.st.last_steal = current->sched_info.run_delay;
+-
+-		preempt_disable();
+-		accumulate_steal_time(vcpu);
+-		preempt_enable();
+-
+ 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ 
+ 		break;
+@@ -2966,7 +2962,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ 		vcpu->cpu = cpu;
+ 	}
+ 
+-	accumulate_steal_time(vcpu);
+ 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ }
+ 
+@@ -6371,12 +6366,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
+ 	 * KVM_DEBUGREG_WONT_EXIT again.
+ 	 */
+ 	if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
+-		int i;
+-
+ 		WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
+ 		kvm_x86_ops->sync_dirty_debug_regs(vcpu);
+-		for (i = 0; i < KVM_NR_DB_REGS; i++)
+-			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
++		kvm_update_dr0123(vcpu);
++		kvm_update_dr6(vcpu);
++		kvm_update_dr7(vcpu);
++		vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
+ 	}
+ 
+ 	/*
+diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
+index f738c61bc891..6a3c774eaff6 100644
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -142,7 +142,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
+ 		break;
+ 	}
+ 
+-	if (regno > nr_registers) {
++	if (regno >= nr_registers) {
+ 		WARN_ONCE(1, "decoded an instruction with an invalid register");
+ 		return -EINVAL;
+ 	}
+diff --git a/drivers/android/binder.c b/drivers/android/binder.c
+index 6607f3c6ace1..f1a26d937d98 100644
+--- a/drivers/android/binder.c
++++ b/drivers/android/binder.c
+@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
+ 			if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+ 				return -EFAULT;
+ 
+-			ptr += sizeof(void *);
++			ptr += sizeof(cookie);
+ 			list_for_each_entry(w, &proc->delivered_death, entry) {
+ 				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ 
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 666fd8a1500a..34825d63d483 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -332,6 +332,16 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
++	{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
++	{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
++	{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
+ 	{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
+@@ -362,6 +372,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+ 	{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+ 	{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
++	{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
++	{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
++	{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
++	{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
++	{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
+ 
+ 	/* JMicron 360/1/3/5/6, match class to avoid IDE function */
+ 	{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 0d7f0da3a269..ae7cfcb562dc 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
+ int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
+ 		     int cmd, void __user *arg)
+ {
+-	int val = -EINVAL, rc = -EINVAL;
++	unsigned long val;
++	int rc = -EINVAL;
+ 	unsigned long flags;
+ 
+ 	switch (cmd) {
+-	case ATA_IOC_GET_IO32:
++	case HDIO_GET_32BIT:
+ 		spin_lock_irqsave(ap->lock, flags);
+ 		val = ata_ioc32(ap);
+ 		spin_unlock_irqrestore(ap->lock, flags);
+-		if (copy_to_user(arg, &val, 1))
+-			return -EFAULT;
+-		return 0;
++		return put_user(val, (unsigned long __user *)arg);
+ 
+-	case ATA_IOC_SET_IO32:
++	case HDIO_SET_32BIT:
+ 		val = (unsigned long) arg;
+ 		rc = 0;
+ 		spin_lock_irqsave(ap->lock, flags);
+diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
+index 035dacc93382..fd5c5f3370f6 100644
+--- a/drivers/gpu/drm/ast/ast_main.c
++++ b/drivers/gpu/drm/ast/ast_main.c
+@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
+ 	} while (ast_read32(ast, 0x10000) != 0x01);
+ 	data = ast_read32(ast, 0x10004);
+ 
+-	if (data & 0x400)
++	if (data & 0x40)
+ 		ast->dram_bus_width = 16;
+ 	else
+ 		ast->dram_bus_width = 32;
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 91764320c56f..a56eab7f0ab1 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1079,12 +1079,6 @@ force:
+ 
+ 	/* update display watermarks based on new power state */
+ 	radeon_bandwidth_update(rdev);
+-	/* update displays */
+-	radeon_dpm_display_configuration_changed(rdev);
+-
+-	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
+-	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
+-	rdev->pm.dpm.single_display = single_display;
+ 
+ 	/* wait for the rings to drain */
+ 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
+@@ -1101,6 +1095,13 @@ force:
+ 
+ 	radeon_dpm_post_set_power_state(rdev);
+ 
++	/* update displays */
++	radeon_dpm_display_configuration_changed(rdev);
++
++	rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
++	rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
++	rdev->pm.dpm.single_display = single_display;
++
+ 	if (rdev->asic->dpm.force_performance_level) {
+ 		if (rdev->pm.dpm.thermal_active) {
+ 			enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
+diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
+index f155b8380481..2b3105c8aed3 100644
+--- a/drivers/hwmon/ads1015.c
++++ b/drivers/hwmon/ads1015.c
+@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
+ 	struct ads1015_data *data = i2c_get_clientdata(client);
+ 	unsigned int pga = data->channel_data[channel].pga;
+ 	int fullscale = fullscale_table[pga];
+-	const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
++	const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
+ 
+ 	return DIV_ROUND_CLOSEST(reg * fullscale, mask);
+ }
+diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
+index a3dae6d0082a..83ea8c8039fa 100644
+--- a/drivers/hwmon/gpio-fan.c
++++ b/drivers/hwmon/gpio-fan.c
+@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
+ 				  unsigned long *state)
+ {
+ 	struct gpio_fan_data *fan_data = cdev->devdata;
+-	int r;
+ 
+ 	if (!fan_data)
+ 		return -EINVAL;
+ 
+-	r = get_fan_speed_index(fan_data);
+-	if (r < 0)
+-		return r;
+-
+-	*state = r;
++	*state = fan_data->speed_index;
+ 	return 0;
+ }
+ 
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 450ef5001a65..1750db0ef61c 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -227,6 +227,10 @@ static enum iommu_init_state init_state = IOMMU_START_STATE;
+ static int amd_iommu_enable_interrupts(void);
+ static int __init iommu_go_to_state(enum iommu_init_state state);
+ 
++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
++				    u8 bank, u8 cntr, u8 fxn,
++				    u64 *value, bool is_write);
++
+ static inline void update_last_devid(u16 devid)
+ {
+ 	if (devid > amd_iommu_last_bdf)
+@@ -1066,6 +1070,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
+ }
+ 
+ /*
++ * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
++ * Workaround:
++ *     BIOS should enable ATS write permission check by setting
++ *     L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
++ */
++static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
++{
++	u32 value;
++
++	if ((boot_cpu_data.x86 != 0x15) ||
++	    (boot_cpu_data.x86_model < 0x30) ||
++	    (boot_cpu_data.x86_model > 0x3f))
++		return;
++
++	/* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
++	value = iommu_read_l2(iommu, 0x47);
++
++	if (value & BIT(0))
++		return;
++
++	/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
++	iommu_write_l2(iommu, 0x47, value | BIT(0));
++
++	pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
++		dev_name(&iommu->dev->dev));
++}
++
++/*
+  * This function clues the initialization function for one IOMMU
+  * together and also allocates the command buffer and programs the
+  * hardware. It does NOT enable the IOMMU. This is done afterwards.
+@@ -1192,8 +1224,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
+ 	amd_iommu_pc_present = true;
+ 
+ 	/* Check if the performance counters can be written to */
+-	if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) ||
+-	    (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) ||
++	if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
++	    (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
+ 	    (val != val2)) {
+ 		pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
+ 		amd_iommu_pc_present = false;
+@@ -1339,6 +1371,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
+ 	}
+ 
+ 	amd_iommu_erratum_746_workaround(iommu);
++	amd_iommu_ats_write_check_workaround(iommu);
+ 
+ 	iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
+ 					       amd_iommu_groups, "ivhd%d",
+@@ -2362,22 +2395,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
+ }
+ EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
+ 
+-int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
++static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
++				    u8 bank, u8 cntr, u8 fxn,
+ 				    u64 *value, bool is_write)
+ {
+-	struct amd_iommu *iommu;
+ 	u32 offset;
+ 	u32 max_offset_lim;
+ 
+-	/* Make sure the IOMMU PC resource is available */
+-	if (!amd_iommu_pc_present)
+-		return -ENODEV;
+-
+-	/* Locate the iommu associated with the device ID */
+-	iommu = amd_iommu_rlookup_table[devid];
+-
+ 	/* Check for valid iommu and pc register indexing */
+-	if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7)))
++	if (WARN_ON((fxn > 0x28) || (fxn & 7)))
+ 		return -ENODEV;
+ 
+ 	offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
+@@ -2401,3 +2427,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
+ 	return 0;
+ }
+ EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
++
++int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
++				    u64 *value, bool is_write)
++{
++	struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
++
++	/* Make sure the IOMMU PC resource is available */
++	if (!amd_iommu_pc_present || iommu == NULL)
++		return -ENODEV;
++
++	return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
++					value, is_write);
++}
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 8b72ceee0f61..62610aafaac7 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1146,6 +1146,8 @@ static void dm_unprep_request(struct request *rq)
+ 
+ 	if (clone)
+ 		free_rq_clone(clone);
++	else if (!tio->md->queue->mq_ops)
++		free_rq_tio(tio);
+ }
+ 
+ /*
+diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
+index 60ffcf098bef..5f92ec23bb07 100644
+--- a/drivers/media/i2c/adv7604.c
++++ b/drivers/media/i2c/adv7604.c
+@@ -1911,10 +1911,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+ 	}
+ 
+ 	/* tx 5v detect */
+-	tx_5v = io_read(sd, 0x70) & info->cable_det_mask;
++	tx_5v = irq_reg_0x70 & info->cable_det_mask;
+ 	if (tx_5v) {
+ 		v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
+-		io_write(sd, 0x71, tx_5v);
+ 		adv76xx_s_detect_tx_5v_ctrl(sd);
+ 		if (handled)
+ 			*handled = true;
+diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
+index 2a1b6e037e1a..0134ba32a057 100644
+--- a/drivers/mtd/ubi/upd.c
++++ b/drivers/mtd/ubi/upd.c
+@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ 	vol->changing_leb = 1;
+ 	vol->ch_lnum = req->lnum;
+ 
+-	vol->upd_buf = vmalloc(req->bytes);
++	vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
+ 	if (!vol->upd_buf)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
+index 866bac0ae7e9..339b0c5ce60c 100644
+--- a/drivers/net/can/usb/ems_usb.c
++++ b/drivers/net/can/usb/ems_usb.c
+@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
+  */
+ #define EMS_USB_ARM7_CLOCK 8000000
+ 
++#define CPC_TX_QUEUE_TRIGGER_LOW	25
++#define CPC_TX_QUEUE_TRIGGER_HIGH	35
++
+ /*
+  * CAN-Message representation in a CPC_MSG. Message object type is
+  * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
+@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
+ 	switch (urb->status) {
+ 	case 0:
+ 		dev->free_slots = dev->intr_in_buffer[1];
++		if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
++			if (netif_queue_stopped(netdev)){
++				netif_wake_queue(netdev);
++			}
++		}
+ 		break;
+ 
+ 	case -ECONNRESET: /* unlink */
+@@ -529,8 +537,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
+ 	/* Release context */
+ 	context->echo_index = MAX_TX_URBS;
+ 
+-	if (netif_queue_stopped(netdev))
+-		netif_wake_queue(netdev);
+ }
+ 
+ /*
+@@ -590,7 +596,7 @@ static int ems_usb_start(struct ems_usb *dev)
+ 	int err, i;
+ 
+ 	dev->intr_in_buffer[0] = 0;
+-	dev->free_slots = 15; /* initial size */
++	dev->free_slots = 50; /* initial size */
+ 
+ 	for (i = 0; i < MAX_RX_URBS; i++) {
+ 		struct urb *urb = NULL;
+@@ -838,7 +844,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
+ 
+ 		/* Slow down tx path */
+ 		if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
+-		    dev->free_slots < 5) {
++		    dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
+ 			netif_stop_queue(netdev);
+ 		}
+ 	}
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index 9779c1e5688c..90e8b662e44d 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -2797,6 +2797,10 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+ 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ 	int ret;
+ 
++	/* we don't support "match all" in the firmware */
++	if (!req->n_match_sets)
++		return -EOPNOTSUPP;
++
+ 	if (!(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ 		ret = iwl_mvm_cancel_scan_wait_notif(mvm, IWL_MVM_SCAN_OS);
+ 		if (ret)
+diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
+index 7cfd2db02deb..914655e89677 100644
+--- a/drivers/pci/xen-pcifront.c
++++ b/drivers/pci/xen-pcifront.c
+@@ -52,7 +52,7 @@ struct pcifront_device {
+ };
+ 
+ struct pcifront_sd {
+-	int domain;
++	struct pci_sysdata sd;
+ 	struct pcifront_device *pdev;
+ };
+ 
+@@ -66,7 +66,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
+ 				    unsigned int domain, unsigned int bus,
+ 				    struct pcifront_device *pdev)
+ {
+-	sd->domain = domain;
++	/* Because we do not expose that information via XenBus. */
++	sd->sd.node = first_online_node;
++	sd->sd.domain = domain;
+ 	sd->pdev = pdev;
+ }
+ 
+@@ -464,8 +466,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
+ 	dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
+ 		 domain, bus);
+ 
+-	bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
+-	sd = kmalloc(sizeof(*sd), GFP_KERNEL);
++	bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
++	sd = kzalloc(sizeof(*sd), GFP_KERNEL);
+ 	if (!bus_entry || !sd) {
+ 		err = -ENOMEM;
+ 		goto err_out;
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 2e58279fab60..6f50e9d958de 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4095,6 +4095,17 @@ reject:
+ 	return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
+ }
+ 
++static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
++{
++	bool ret;
++
++	spin_lock_bh(&conn->state_lock);
++	ret = (conn->conn_state != TARG_CONN_STATE_LOGGED_IN);
++	spin_unlock_bh(&conn->state_lock);
++
++	return ret;
++}
++
+ int iscsi_target_rx_thread(void *arg)
+ {
+ 	int ret, rc;
+@@ -4112,7 +4123,7 @@ int iscsi_target_rx_thread(void *arg)
+ 	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
+ 	 */
+ 	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+-	if (rc < 0)
++	if (rc < 0 || iscsi_target_check_conn_state(conn))
+ 		return 0;
+ 
+ 	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index f9cde9141836..9a96f1712b7a 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -393,6 +393,7 @@ err:
+ 	if (login->login_complete) {
+ 		if (conn->rx_thread && conn->rx_thread_active) {
+ 			send_sig(SIGINT, conn->rx_thread, 1);
++			complete(&conn->rx_login_comp);
+ 			kthread_stop(conn->rx_thread);
+ 		}
+ 		if (conn->tx_thread && conn->tx_thread_active) {
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index 08aa7cc58694..57fd4e14d4eb 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -329,7 +329,7 @@ static int core_scsi3_pr_seq_non_holder(
+ 			 * RESERVATION CONFLICT on some CDBs */
+ 
+ 	if (!se_sess->se_node_acl->device_list)
+-		return;
++		return 0;
+ 
+ 	se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ 	/*
+diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
+index 733824e3825f..46b966d09af2 100644
+--- a/drivers/target/target_core_sbc.c
++++ b/drivers/target/target_core_sbc.c
+@@ -321,7 +321,8 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
+ 	return 0;
+ }
+ 
+-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
++static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
++					   int *post_ret)
+ {
+ 	unsigned char *buf, *addr;
+ 	struct scatterlist *sg;
+@@ -385,7 +386,8 @@ sbc_execute_rw(struct se_cmd *cmd)
+ 			       cmd->data_direction);
+ }
+ 
+-static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
++static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
++					     int *post_ret)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 
+@@ -395,8 +397,10 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ 	 * sent to the backend driver.
+ 	 */
+ 	spin_lock_irq(&cmd->t_state_lock);
+-	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status)
++	if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
+ 		cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
++		*post_ret = 1;
++	}
+ 	spin_unlock_irq(&cmd->t_state_lock);
+ 
+ 	/*
+@@ -408,7 +412,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
+ 	return TCM_NO_SENSE;
+ }
+ 
+-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
++static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
++						 int *post_ret)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+ 	struct scatterlist *write_sg = NULL, *sg;
+@@ -504,11 +509,11 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
+ 
+ 		if (block_size < PAGE_SIZE) {
+ 			sg_set_page(&write_sg[i], m.page, block_size,
+-				    block_size);
++				    m.piter.sg->offset + block_size);
+ 		} else {
+ 			sg_miter_next(&m);
+ 			sg_set_page(&write_sg[i], m.page, block_size,
+-				    0);
++				    m.piter.sg->offset);
+ 		}
+ 		len -= block_size;
+ 		i++;
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index adb8016955c4..ad48837ead42 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -78,16 +78,18 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
+ 	kfree(tmr);
+ }
+ 
+-static void core_tmr_handle_tas_abort(
+-	struct se_node_acl *tmr_nacl,
+-	struct se_cmd *cmd,
+-	int tas)
++static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+ {
+-	bool remove = true;
++	unsigned long flags;
++	bool remove = true, send_tas;
+ 	/*
+ 	 * TASK ABORTED status (TAS) bit support
+ 	 */
+-	if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	send_tas = (cmd->transport_state & CMD_T_TAS);
++	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
++	if (send_tas) {
+ 		remove = false;
+ 		transport_send_task_abort(cmd);
+ 	}
+@@ -110,6 +112,47 @@ static int target_check_cdb_and_preempt(struct list_head *list,
+ 	return 1;
+ }
+ 
++static bool __target_check_io_state(struct se_cmd *se_cmd,
++				    struct se_session *tmr_sess, int tas)
++{
++	struct se_session *sess = se_cmd->se_sess;
++
++	assert_spin_locked(&sess->sess_cmd_lock);
++	WARN_ON_ONCE(!irqs_disabled());
++	/*
++	 * If command already reached CMD_T_COMPLETE state within
++	 * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
++	 * this se_cmd has been passed to fabric driver and will
++	 * not be aborted.
++	 *
++	 * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
++	 * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
++	 * long as se_cmd->cmd_kref is still active unless zero.
++	 */
++	spin_lock(&se_cmd->t_state_lock);
++	if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
++		pr_debug("Attempted to abort io tag: %u already complete or"
++			" fabric stop, skipping\n",
++			se_cmd->se_tfo->get_task_tag(se_cmd));
++		spin_unlock(&se_cmd->t_state_lock);
++		return false;
++	}
++	if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
++		pr_debug("Attempted to abort io tag: %u already shutdown,"
++			" skipping\n", se_cmd->se_tfo->get_task_tag(se_cmd));
++		spin_unlock(&se_cmd->t_state_lock);
++		return false;
++	}
++	se_cmd->transport_state |= CMD_T_ABORTED;
++
++	if ((tmr_sess != se_cmd->se_sess) && tas)
++		se_cmd->transport_state |= CMD_T_TAS;
++
++	spin_unlock(&se_cmd->t_state_lock);
++
++	return kref_get_unless_zero(&se_cmd->cmd_kref);
++}
++
+ void core_tmr_abort_task(
+ 	struct se_device *dev,
+ 	struct se_tmr_req *tmr,
+@@ -136,25 +179,20 @@ void core_tmr_abort_task(
+ 		printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
+ 			se_cmd->se_tfo->get_fabric_name(), ref_tag);
+ 
+-		spin_lock(&se_cmd->t_state_lock);
+-		if (se_cmd->transport_state & CMD_T_COMPLETE) {
+-			printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
+-			spin_unlock(&se_cmd->t_state_lock);
++		if (!__target_check_io_state(se_cmd, se_sess, 0)) {
+ 			spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
++			target_put_sess_cmd(se_cmd);
+ 			goto out;
+ 		}
+-		se_cmd->transport_state |= CMD_T_ABORTED;
+-		spin_unlock(&se_cmd->t_state_lock);
+ 
+ 		list_del_init(&se_cmd->se_cmd_list);
+-		kref_get(&se_cmd->cmd_kref);
+ 		spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ 
+ 		cancel_work_sync(&se_cmd->work);
+ 		transport_wait_for_tasks(se_cmd);
+ 
+-		target_put_sess_cmd(se_cmd);
+ 		transport_cmd_finish_abort(se_cmd, true);
++		target_put_sess_cmd(se_cmd);
+ 
+ 		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+ 				" ref_tag: %d\n", ref_tag);
+@@ -211,7 +249,8 @@ static void core_tmr_drain_tmr_list(
+ 
+ 		spin_lock(&sess->sess_cmd_lock);
+ 		spin_lock(&cmd->t_state_lock);
+-		if (!(cmd->transport_state & CMD_T_ACTIVE)) {
++		if (!(cmd->transport_state & CMD_T_ACTIVE) ||
++		     (cmd->transport_state & CMD_T_FABRIC_STOP)) {
+ 			spin_unlock(&cmd->t_state_lock);
+ 			spin_unlock(&sess->sess_cmd_lock);
+ 			continue;
+@@ -221,15 +260,22 @@ static void core_tmr_drain_tmr_list(
+ 			spin_unlock(&sess->sess_cmd_lock);
+ 			continue;
+ 		}
++		if (sess->sess_tearing_down || cmd->cmd_wait_set) {
++			spin_unlock(&cmd->t_state_lock);
++			spin_unlock(&sess->sess_cmd_lock);
++			continue;
++		}
+ 		cmd->transport_state |= CMD_T_ABORTED;
+ 		spin_unlock(&cmd->t_state_lock);
+ 
+ 		rc = kref_get_unless_zero(&cmd->cmd_kref);
+-		spin_unlock(&sess->sess_cmd_lock);
+ 		if (!rc) {
+ 			printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
++			spin_unlock(&sess->sess_cmd_lock);
+ 			continue;
+ 		}
++		spin_unlock(&sess->sess_cmd_lock);
++
+ 		list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+ 	}
+ 	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+@@ -254,13 +300,15 @@ static void core_tmr_drain_tmr_list(
+ static void core_tmr_drain_state_list(
+ 	struct se_device *dev,
+ 	struct se_cmd *prout_cmd,
+-	struct se_node_acl *tmr_nacl,
++	struct se_session *tmr_sess,
+ 	int tas,
+ 	struct list_head *preempt_and_abort_list)
+ {
+ 	LIST_HEAD(drain_task_list);
++	struct se_session *sess;
+ 	struct se_cmd *cmd, *next;
+ 	unsigned long flags;
++	int rc;
+ 
+ 	/*
+ 	 * Complete outstanding commands with TASK_ABORTED SAM status.
+@@ -299,6 +347,16 @@ static void core_tmr_drain_state_list(
+ 		if (prout_cmd == cmd)
+ 			continue;
+ 
++		sess = cmd->se_sess;
++		if (WARN_ON_ONCE(!sess))
++			continue;
++
++		spin_lock(&sess->sess_cmd_lock);
++		rc = __target_check_io_state(cmd, tmr_sess, tas);
++		spin_unlock(&sess->sess_cmd_lock);
++		if (!rc)
++			continue;
++
+ 		list_move_tail(&cmd->state_list, &drain_task_list);
+ 		cmd->state_active = false;
+ 	}
+@@ -306,7 +364,7 @@ static void core_tmr_drain_state_list(
+ 
+ 	while (!list_empty(&drain_task_list)) {
+ 		cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
+-		list_del(&cmd->state_list);
++		list_del_init(&cmd->state_list);
+ 
+ 		pr_debug("LUN_RESET: %s cmd: %p"
+ 			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d"
+@@ -330,16 +388,11 @@ static void core_tmr_drain_state_list(
+ 		 * loop above, but we do it down here given that
+ 		 * cancel_work_sync may block.
+ 		 */
+-		if (cmd->t_state == TRANSPORT_COMPLETE)
+-			cancel_work_sync(&cmd->work);
+-
+-		spin_lock_irqsave(&cmd->t_state_lock, flags);
+-		target_stop_cmd(cmd, &flags);
+-
+-		cmd->transport_state |= CMD_T_ABORTED;
+-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++		cancel_work_sync(&cmd->work);
++		transport_wait_for_tasks(cmd);
+ 
+-		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
++		core_tmr_handle_tas_abort(cmd, tas);
++		target_put_sess_cmd(cmd);
+ 	}
+ }
+ 
+@@ -351,6 +404,7 @@ int core_tmr_lun_reset(
+ {
+ 	struct se_node_acl *tmr_nacl = NULL;
+ 	struct se_portal_group *tmr_tpg = NULL;
++	struct se_session *tmr_sess = NULL;
+ 	int tas;
+         /*
+ 	 * TASK_ABORTED status bit, this is configurable via ConfigFS
+@@ -369,8 +423,9 @@ int core_tmr_lun_reset(
+ 	 * or struct se_device passthrough..
+ 	 */
+ 	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
+-		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
+-		tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
++		tmr_sess = tmr->task_cmd->se_sess;
++		tmr_nacl = tmr_sess->se_node_acl;
++		tmr_tpg = tmr_sess->se_tpg;
+ 		if (tmr_nacl && tmr_tpg) {
+ 			pr_debug("LUN_RESET: TMR caller fabric: %s"
+ 				" initiator port %s\n",
+@@ -383,7 +438,7 @@ int core_tmr_lun_reset(
+ 		dev->transport->name, tas);
+ 
+ 	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
+-	core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
++	core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
+ 				preempt_and_abort_list);
+ 
+ 	/*
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 3881504b40d8..be12b9d84052 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -555,9 +555,6 @@ void transport_deregister_session(struct se_session *se_sess)
+ }
+ EXPORT_SYMBOL(transport_deregister_session);
+ 
+-/*
+- * Called with cmd->t_state_lock held.
+- */
+ static void target_remove_from_state_list(struct se_cmd *cmd)
+ {
+ 	struct se_device *dev = cmd->se_dev;
+@@ -582,10 +579,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+ {
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&cmd->t_state_lock, flags);
+-	if (write_pending)
+-		cmd->t_state = TRANSPORT_WRITE_PENDING;
+-
+ 	if (remove_from_lists) {
+ 		target_remove_from_state_list(cmd);
+ 
+@@ -595,6 +588,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
+ 		cmd->se_lun = NULL;
+ 	}
+ 
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	if (write_pending)
++		cmd->t_state = TRANSPORT_WRITE_PENDING;
++
+ 	/*
+ 	 * Determine if frontend context caller is requesting the stopping of
+ 	 * this command for frontend exceptions.
+@@ -649,6 +646,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
+ 
+ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ {
++	bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
++
+ 	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+ 		transport_lun_remove_cmd(cmd);
+ 	/*
+@@ -660,7 +659,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+ 
+ 	if (transport_cmd_check_stop_to_fabric(cmd))
+ 		return;
+-	if (remove)
++	if (remove && ack_kref)
+ 		transport_put_cmd(cmd);
+ }
+ 
+@@ -728,7 +727,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
+ 	 * Check for case where an explicit ABORT_TASK has been received
+ 	 * and transport_wait_for_tasks() will be waiting for completion..
+ 	 */
+-	if (cmd->transport_state & CMD_T_ABORTED &&
++	if (cmd->transport_state & CMD_T_ABORTED ||
+ 	    cmd->transport_state & CMD_T_STOP) {
+ 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ 		complete_all(&cmd->t_transport_stop_comp);
+@@ -1638,7 +1637,7 @@ bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
+ void transport_generic_request_failure(struct se_cmd *cmd,
+ 		sense_reason_t sense_reason)
+ {
+-	int ret = 0;
++	int ret = 0, post_ret = 0;
+ 
+ 	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+ 		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+@@ -1661,7 +1660,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
+ 	 */
+ 	if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ 	     cmd->transport_complete_callback)
+-		cmd->transport_complete_callback(cmd, false);
++		cmd->transport_complete_callback(cmd, false, &post_ret);
+ 
+ 	switch (sense_reason) {
+ 	case TCM_NON_EXISTENT_LUN:
+@@ -1836,19 +1835,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
+ 	return true;
+ }
+ 
++static int __transport_check_aborted_status(struct se_cmd *, int);
++
+ void target_execute_cmd(struct se_cmd *cmd)
+ {
+ 	/*
+-	 * If the received CDB has aleady been aborted stop processing it here.
+-	 */
+-	if (transport_check_aborted_status(cmd, 1))
+-		return;
+-
+-	/*
+ 	 * Determine if frontend context caller is requesting the stopping of
+ 	 * this command for frontend exceptions.
++	 *
++	 * If the received CDB has aleady been aborted stop processing it here.
+ 	 */
+ 	spin_lock_irq(&cmd->t_state_lock);
++	if (__transport_check_aborted_status(cmd, 1)) {
++		spin_unlock_irq(&cmd->t_state_lock);
++		return;
++	}
+ 	if (cmd->transport_state & CMD_T_STOP) {
+ 		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+ 			__func__, __LINE__,
+@@ -2056,11 +2057,13 @@ static void target_complete_ok_work(struct work_struct *work)
+ 	 */
+ 	if (cmd->transport_complete_callback) {
+ 		sense_reason_t rc;
++		bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
++		bool zero_dl = !(cmd->data_length);
++		int post_ret = 0;
+ 
+-		rc = cmd->transport_complete_callback(cmd, true);
+-		if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
+-			if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+-			    !cmd->data_length)
++		rc = cmd->transport_complete_callback(cmd, true, &post_ret);
++		if (!rc && !post_ret) {
++			if (caw && zero_dl)
+ 				goto queue_rsp;
+ 
+ 			return;
+@@ -2209,20 +2212,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
+ }
+ 
+ /**
+- * transport_release_cmd - free a command
+- * @cmd:       command to free
++ * transport_put_cmd - release a reference to a command
++ * @cmd:       command to release
+  *
+- * This routine unconditionally frees a command, and reference counting
+- * or list removal must be done in the caller.
++ * This routine releases our reference to the command and frees it if possible.
+  */
+-static int transport_release_cmd(struct se_cmd *cmd)
++static int transport_put_cmd(struct se_cmd *cmd)
+ {
+ 	BUG_ON(!cmd->se_tfo);
+-
+-	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+-		core_tmr_release_req(cmd->se_tmr_req);
+-	if (cmd->t_task_cdb != cmd->__t_task_cdb)
+-		kfree(cmd->t_task_cdb);
+ 	/*
+ 	 * If this cmd has been setup with target_get_sess_cmd(), drop
+ 	 * the kref and call ->release_cmd() in kref callback.
+@@ -2230,18 +2227,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
+ 	return target_put_sess_cmd(cmd);
+ }
+ 
+-/**
+- * transport_put_cmd - release a reference to a command
+- * @cmd:       command to release
+- *
+- * This routine releases our reference to the command and frees it if possible.
+- */
+-static int transport_put_cmd(struct se_cmd *cmd)
+-{
+-	transport_free_pages(cmd);
+-	return transport_release_cmd(cmd);
+-}
+-
+ void *transport_kmap_data_sg(struct se_cmd *cmd)
+ {
+ 	struct scatterlist *sg = cmd->t_data_sg;
+@@ -2437,34 +2422,59 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
+ 	}
+ }
+ 
+-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
++static bool
++__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
++			   unsigned long *flags);
++
++static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
+ {
+ 	unsigned long flags;
++
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	__transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
++	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++}
++
++int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
++{
+ 	int ret = 0;
++	bool aborted = false, tas = false;
+ 
+ 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
+ 		if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+-			 transport_wait_for_tasks(cmd);
++			target_wait_free_cmd(cmd, &aborted, &tas);
+ 
+-		ret = transport_release_cmd(cmd);
++		if (!aborted || tas)
++			ret = transport_put_cmd(cmd);
+ 	} else {
+ 		if (wait_for_tasks)
+-			transport_wait_for_tasks(cmd);
++			target_wait_free_cmd(cmd, &aborted, &tas);
+ 		/*
+ 		 * Handle WRITE failure case where transport_generic_new_cmd()
+ 		 * has already added se_cmd to state_list, but fabric has
+ 		 * failed command before I/O submission.
+ 		 */
+-		if (cmd->state_active) {
+-			spin_lock_irqsave(&cmd->t_state_lock, flags);
++		if (cmd->state_active)
+ 			target_remove_from_state_list(cmd);
+-			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+-		}
+ 
+ 		if (cmd->se_lun)
+ 			transport_lun_remove_cmd(cmd);
+ 
+-		ret = transport_put_cmd(cmd);
++		if (!aborted || tas)
++			ret = transport_put_cmd(cmd);
++	}
++	/*
++	 * If the task has been internally aborted due to TMR ABORT_TASK
++	 * or LUN_RESET, target_core_tmr.c is responsible for performing
++	 * the remaining calls to target_put_sess_cmd(), and not the
++	 * callers of this function.
++	 */
++	if (aborted) {
++		pr_debug("Detected CMD_T_ABORTED for ITT: %u\n",
++			cmd->se_tfo->get_task_tag(cmd));
++		wait_for_completion(&cmd->cmd_wait_comp);
++		cmd->se_tfo->release_cmd(cmd);
++		ret = 1;
+ 	}
+ 	return ret;
+ }
+@@ -2504,25 +2514,45 @@ out:
+ }
+ EXPORT_SYMBOL(target_get_sess_cmd);
+ 
++static void target_free_cmd_mem(struct se_cmd *cmd)
++{
++	transport_free_pages(cmd);
++
++	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
++		core_tmr_release_req(cmd->se_tmr_req);
++	if (cmd->t_task_cdb != cmd->__t_task_cdb)
++		kfree(cmd->t_task_cdb);
++}
++
+ static void target_release_cmd_kref(struct kref *kref)
+ 		__releases(&se_cmd->se_sess->sess_cmd_lock)
+ {
+ 	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+ 	struct se_session *se_sess = se_cmd->se_sess;
++	bool fabric_stop;
+ 
+ 	if (list_empty(&se_cmd->se_cmd_list)) {
+ 		spin_unlock(&se_sess->sess_cmd_lock);
++		target_free_cmd_mem(se_cmd);
+ 		se_cmd->se_tfo->release_cmd(se_cmd);
+ 		return;
+ 	}
+-	if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
++
++	spin_lock(&se_cmd->t_state_lock);
++	fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
++	spin_unlock(&se_cmd->t_state_lock);
++
++	if (se_cmd->cmd_wait_set || fabric_stop) {
++		list_del_init(&se_cmd->se_cmd_list);
+ 		spin_unlock(&se_sess->sess_cmd_lock);
++		target_free_cmd_mem(se_cmd);
+ 		complete(&se_cmd->cmd_wait_comp);
+ 		return;
+ 	}
+-	list_del(&se_cmd->se_cmd_list);
++	list_del_init(&se_cmd->se_cmd_list);
+ 	spin_unlock(&se_sess->sess_cmd_lock);
+ 
++	target_free_cmd_mem(se_cmd);
+ 	se_cmd->se_tfo->release_cmd(se_cmd);
+ }
+ 
+@@ -2534,6 +2564,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
+ 	struct se_session *se_sess = se_cmd->se_sess;
+ 
+ 	if (!se_sess) {
++		target_free_cmd_mem(se_cmd);
+ 		se_cmd->se_tfo->release_cmd(se_cmd);
+ 		return 1;
+ 	}
+@@ -2551,6 +2582,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+ {
+ 	struct se_cmd *se_cmd;
+ 	unsigned long flags;
++	int rc;
+ 
+ 	spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ 	if (se_sess->sess_tearing_down) {
+@@ -2560,8 +2592,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
+ 	se_sess->sess_tearing_down = 1;
+ 	list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+ 
+-	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+-		se_cmd->cmd_wait_set = 1;
++	list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
++		rc = kref_get_unless_zero(&se_cmd->cmd_kref);
++		if (rc) {
++			se_cmd->cmd_wait_set = 1;
++			spin_lock(&se_cmd->t_state_lock);
++			se_cmd->transport_state |= CMD_T_FABRIC_STOP;
++			spin_unlock(&se_cmd->t_state_lock);
++		}
++	}
+ 
+ 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ }
+@@ -2574,15 +2613,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
+ {
+ 	struct se_cmd *se_cmd, *tmp_cmd;
+ 	unsigned long flags;
++	bool tas;
+ 
+ 	list_for_each_entry_safe(se_cmd, tmp_cmd,
+ 				&se_sess->sess_wait_list, se_cmd_list) {
+-		list_del(&se_cmd->se_cmd_list);
++		list_del_init(&se_cmd->se_cmd_list);
+ 
+ 		pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
+ 			" %d\n", se_cmd, se_cmd->t_state,
+ 			se_cmd->se_tfo->get_cmd_state(se_cmd));
+ 
++		spin_lock_irqsave(&se_cmd->t_state_lock, flags);
++		tas = (se_cmd->transport_state & CMD_T_TAS);
++		spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
++
++		if (!target_put_sess_cmd(se_cmd)) {
++			if (tas)
++				target_put_sess_cmd(se_cmd);
++		}
++
+ 		wait_for_completion(&se_cmd->cmd_wait_comp);
+ 		pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
+ 			" fabric state: %d\n", se_cmd, se_cmd->t_state,
+@@ -2625,34 +2674,38 @@ int transport_clear_lun_ref(struct se_lun *lun)
+ 	return 0;
+ }
+ 
+-/**
+- * transport_wait_for_tasks - wait for completion to occur
+- * @cmd:	command to wait
+- *
+- * Called from frontend fabric context to wait for storage engine
+- * to pause and/or release frontend generated struct se_cmd.
+- */
+-bool transport_wait_for_tasks(struct se_cmd *cmd)
++static bool
++__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
++			   bool *aborted, bool *tas, unsigned long *flags)
++	__releases(&cmd->t_state_lock)
++	__acquires(&cmd->t_state_lock)
+ {
+-	unsigned long flags;
+ 
+-	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	assert_spin_locked(&cmd->t_state_lock);
++	WARN_ON_ONCE(!irqs_disabled());
++
++	if (fabric_stop)
++		cmd->transport_state |= CMD_T_FABRIC_STOP;
++
++	if (cmd->transport_state & CMD_T_ABORTED)
++		*aborted = true;
++
++	if (cmd->transport_state & CMD_T_TAS)
++		*tas = true;
++
+ 	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+-	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ 		return false;
+-	}
+ 
+ 	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+-	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
+-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ 		return false;
+-	}
+ 
+-	if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+-		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++	if (!(cmd->transport_state & CMD_T_ACTIVE))
++		return false;
++
++	if (fabric_stop && *aborted)
+ 		return false;
+-	}
+ 
+ 	cmd->transport_state |= CMD_T_STOP;
+ 
+@@ -2661,20 +2714,37 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
+ 		cmd, cmd->se_tfo->get_task_tag(cmd),
+ 		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+ 
+-	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++	spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+ 
+ 	wait_for_completion(&cmd->t_transport_stop_comp);
+ 
+-	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	spin_lock_irqsave(&cmd->t_state_lock, *flags);
+ 	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+ 
+ 	pr_debug("wait_for_tasks: Stopped wait_for_completion("
+ 		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
+ 		cmd->se_tfo->get_task_tag(cmd));
+ 
++	return true;
++}
++
++/**
++ * transport_wait_for_tasks - wait for completion to occur
++ * @cmd:	command to wait
++ *
++ * Called from frontend fabric context to wait for storage engine
++ * to pause and/or release frontend generated struct se_cmd.
++ */
++bool transport_wait_for_tasks(struct se_cmd *cmd)
++{
++	unsigned long flags;
++	bool ret, aborted = false, tas = false;
++
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
+ 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ 
+-	return true;
++	return ret;
+ }
+ EXPORT_SYMBOL(transport_wait_for_tasks);
+ 
+@@ -2960,8 +3030,13 @@ after_reason:
+ }
+ EXPORT_SYMBOL(transport_send_check_condition_and_sense);
+ 
+-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++	__releases(&cmd->t_state_lock)
++	__acquires(&cmd->t_state_lock)
+ {
++	assert_spin_locked(&cmd->t_state_lock);
++	WARN_ON_ONCE(!irqs_disabled());
++
+ 	if (!(cmd->transport_state & CMD_T_ABORTED))
+ 		return 0;
+ 
+@@ -2969,19 +3044,37 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+ 	 * If cmd has been aborted but either no status is to be sent or it has
+ 	 * already been sent, just return
+ 	 */
+-	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
++	if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
++		if (send_status)
++			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+ 		return 1;
++	}
+ 
+-	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
+-		 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
++	pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
++		" 0x%02x ITT: 0x%08x\n", cmd->t_task_cdb[0],
++		cmd->se_tfo->get_task_tag(cmd));
+ 
+ 	cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
+ 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ 	trace_target_cmd_complete(cmd);
++
++	spin_unlock_irq(&cmd->t_state_lock);
+ 	cmd->se_tfo->queue_status(cmd);
++	spin_lock_irq(&cmd->t_state_lock);
+ 
+ 	return 1;
+ }
++
++int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
++{
++	int ret;
++
++	spin_lock_irq(&cmd->t_state_lock);
++	ret = __transport_check_aborted_status(cmd, send_status);
++	spin_unlock_irq(&cmd->t_state_lock);
++
++	return ret;
++}
+ EXPORT_SYMBOL(transport_check_aborted_status);
+ 
+ void transport_send_task_abort(struct se_cmd *cmd)
+@@ -3003,11 +3096,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
+ 	 */
+ 	if (cmd->data_direction == DMA_TO_DEVICE) {
+ 		if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+-			cmd->transport_state |= CMD_T_ABORTED;
++			spin_lock_irqsave(&cmd->t_state_lock, flags);
++			if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
++				spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++				goto send_abort;
++			}
+ 			cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
++			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ 			return;
+ 		}
+ 	}
++send_abort:
+ 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
+ 
+ 	transport_lun_remove_cmd(cmd);
+diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
+index ad6c87a4653c..fbc6285905a6 100644
+--- a/drivers/usb/chipidea/otg.c
++++ b/drivers/usb/chipidea/otg.c
+@@ -118,7 +118,7 @@ static void ci_otg_work(struct work_struct *work)
+ int ci_hdrc_otg_init(struct ci_hdrc *ci)
+ {
+ 	INIT_WORK(&ci->work, ci_otg_work);
+-	ci->wq = create_singlethread_workqueue("ci_otg");
++	ci->wq = create_freezable_workqueue("ci_otg");
+ 	if (!ci->wq) {
+ 		dev_err(ci->dev, "can't create workqueue\n");
+ 		return -ENODEV;
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 173edd4ca20e..be245d073f15 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -829,7 +829,6 @@ struct dwc3 {
+ 	unsigned		pullups_connected:1;
+ 	unsigned		resize_fifos:1;
+ 	unsigned		setup_packet_pending:1;
+-	unsigned		start_config_issued:1;
+ 	unsigned		three_stage_setup:1;
+ 	unsigned		usb3_lpm_capable:1;
+ 
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 06ecd1e6871c..00f2c456f94b 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -545,7 +545,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ 	int ret;
+ 	u32 reg;
+ 
+-	dwc->start_config_issued = false;
+ 	cfg = le16_to_cpu(ctrl->wValue);
+ 
+ 	switch (state) {
+@@ -727,10 +726,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ 		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
+ 		ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
+ 		break;
+-	case USB_REQ_SET_INTERFACE:
+-		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
+-		dwc->start_config_issued = false;
+-		/* Fall through */
+ 	default:
+ 		dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
+ 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 6fbf461d523c..b886226be241 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
+ 	dep->trb_pool_dma = 0;
+ }
+ 
++static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
++
++/**
++ * dwc3_gadget_start_config - Configure EP resources
++ * @dwc: pointer to our controller context structure
++ * @dep: endpoint that is being enabled
++ *
++ * The assignment of transfer resources cannot perfectly follow the
++ * data book due to the fact that the controller driver does not have
++ * all knowledge of the configuration in advance. It is given this
++ * information piecemeal by the composite gadget framework after every
++ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
++ * programming model in this scenario can cause errors. For two
++ * reasons:
++ *
++ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
++ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
++ * multiple interfaces.
++ *
++ * 2) The databook does not mention doing more DEPXFERCFG for new
++ * endpoint on alt setting (8.1.6).
++ *
++ * The following simplified method is used instead:
++ *
++ * All hardware endpoints can be assigned a transfer resource and this
++ * setting will stay persistent until either a core reset or
++ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
++ * do DEPXFERCFG for every hardware endpoint as well. We are
++ * guaranteed that there are as many transfer resources as endpoints.
++ *
++ * This function is called for each endpoint when it is being enabled
++ * but is triggered only when called for EP0-out, which always happens
++ * first, and which should only happen in one of the above conditions.
++ */
+ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
+ {
+ 	struct dwc3_gadget_ep_cmd_params params;
+ 	u32			cmd;
++	int			i;
++	int			ret;
++
++	if (dep->number)
++		return 0;
+ 
+ 	memset(&params, 0x00, sizeof(params));
++	cmd = DWC3_DEPCMD_DEPSTARTCFG;
+ 
+-	if (dep->number != 1) {
+-		cmd = DWC3_DEPCMD_DEPSTARTCFG;
+-		/* XferRscIdx == 0 for ep0 and 2 for the remaining */
+-		if (dep->number > 1) {
+-			if (dwc->start_config_issued)
+-				return 0;
+-			dwc->start_config_issued = true;
+-			cmd |= DWC3_DEPCMD_PARAM(2);
+-		}
++	ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
++	if (ret)
++		return ret;
+ 
+-		return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
++	for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
++		struct dwc3_ep *dep = dwc->eps[i];
++
++		if (!dep)
++			continue;
++
++		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
++		if (ret)
++			return ret;
+ 	}
+ 
+ 	return 0;
+@@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
+ 		struct dwc3_trb	*trb_st_hw;
+ 		struct dwc3_trb	*trb_link;
+ 
+-		ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+-		if (ret)
+-			return ret;
+-
+ 		dep->endpoint.desc = desc;
+ 		dep->comp_desc = comp_desc;
+ 		dep->type = usb_endpoint_type(desc);
+@@ -1589,8 +1627,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
+ 	}
+ 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ 
+-	dwc->start_config_issued = false;
+-
+ 	/* Start with SuperSpeed Default */
+ 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ 
+@@ -2167,7 +2203,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
+ 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ 
+ 	dwc3_disconnect_gadget(dwc);
+-	dwc->start_config_issued = false;
+ 
+ 	dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ 	dwc->setup_packet_pending = false;
+@@ -2218,7 +2253,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
+ 
+ 	dwc3_stop_active_transfers(dwc);
+ 	dwc3_clear_stall_all_ep(dwc);
+-	dwc->start_config_issued = false;
+ 
+ 	/* Reset device address to zero */
+ 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 1dd9919081f8..7a76fe4c2f9e 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -162,6 +162,9 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ 	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
++	{ USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
++	{ USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
++	{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ 	{ USB_DEVICE(0x1BA4, 0x0002) },	/* Silicon Labs 358x factory default */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 88540596973f..ce3d6af977b7 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_UE910_V2			0x1012
+ #define TELIT_PRODUCT_LE922_USBCFG0		0x1042
+ #define TELIT_PRODUCT_LE922_USBCFG3		0x1043
++#define TELIT_PRODUCT_LE922_USBCFG5		0x1045
+ #define TELIT_PRODUCT_LE920			0x1200
+ #define TELIT_PRODUCT_LE910			0x1201
+ 
+@@ -318,6 +319,7 @@ static void option_instat_callback(struct urb *urb);
+ #define TOSHIBA_PRODUCT_G450			0x0d45
+ 
+ #define ALINK_VENDOR_ID				0x1e0e
++#define SIMCOM_PRODUCT_SIM7100E			0x9001 /* Yes, ALINK_VENDOR_ID */
+ #define ALINK_PRODUCT_PH300			0x9100
+ #define ALINK_PRODUCT_3GU			0x9200
+ 
+@@ -610,6 +612,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
+ 	.reserved = BIT(3) | BIT(4),
+ };
+ 
++static const struct option_blacklist_info simcom_sim7100e_blacklist = {
++	.reserved = BIT(5) | BIT(6),
++};
++
+ static const struct option_blacklist_info telit_le910_blacklist = {
+ 	.sendsetup = BIT(0),
+ 	.reserved = BIT(1) | BIT(2),
+@@ -1130,6 +1136,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
+ 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
++	{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
++	  .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+@@ -1137,6 +1145,8 @@ static const struct usb_device_id option_ids[] = {
+ 	  .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
+ 	{ USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
+ 	  .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
++	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
++	  .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
+@@ -1188,6 +1198,8 @@ static const struct usb_device_id option_ids[] = {
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+ 		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
++	{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+@@ -1657,6 +1669,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
+ 	{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
++	  .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+ 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
+ 	  .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
+ 	},
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 514fa91cf74e..f0a2ad15a992 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -155,14 +155,17 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x1199, 0x9056)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9060)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9061)},	/* Sierra Wireless Modem */
+-	{DEVICE_SWI(0x1199, 0x9070)},	/* Sierra Wireless MC74xx/EM74xx */
+-	{DEVICE_SWI(0x1199, 0x9071)},	/* Sierra Wireless MC74xx/EM74xx */
++	{DEVICE_SWI(0x1199, 0x9070)},	/* Sierra Wireless MC74xx */
++	{DEVICE_SWI(0x1199, 0x9071)},	/* Sierra Wireless MC74xx */
++	{DEVICE_SWI(0x1199, 0x9078)},	/* Sierra Wireless EM74xx */
++	{DEVICE_SWI(0x1199, 0x9079)},	/* Sierra Wireless EM74xx */
+ 	{DEVICE_SWI(0x413c, 0x81a2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a8)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a9)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81b1)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
++	{DEVICE_SWI(0x413c, 0x81b3)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+ 
+ 	/* Huawei devices */
+ 	{DEVICE_HWI(0x03f0, 0x581d)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index e9851add6f4e..c0f4ab83aaa8 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
+ 		info.num_regions = VFIO_PCI_NUM_REGIONS;
+ 		info.num_irqs = VFIO_PCI_NUM_IRQS;
+ 
+-		return copy_to_user((void __user *)arg, &info, minsz);
++		return copy_to_user((void __user *)arg, &info, minsz) ?
++			-EFAULT : 0;
+ 
+ 	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ 		struct pci_dev *pdev = vdev->pdev;
+@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
+ 			return -EINVAL;
+ 		}
+ 
+-		return copy_to_user((void __user *)arg, &info, minsz);
++		return copy_to_user((void __user *)arg, &info, minsz) ?
++			-EFAULT : 0;
+ 
+ 	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+ 		struct vfio_irq_info info;
+@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
+ 		else
+ 			info.flags |= VFIO_IRQ_INFO_NORESIZE;
+ 
+-		return copy_to_user((void __user *)arg, &info, minsz);
++		return copy_to_user((void __user *)arg, &info, minsz) ?
++			-EFAULT : 0;
+ 
+ 	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ 		struct vfio_irq_set hdr;
+diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
+index abcff7a1aa66..973b24ffe332 100644
+--- a/drivers/vfio/platform/vfio_platform_common.c
++++ b/drivers/vfio/platform/vfio_platform_common.c
+@@ -163,7 +163,8 @@ static long vfio_platform_ioctl(void *device_data,
+ 		info.num_regions = vdev->num_regions;
+ 		info.num_irqs = vdev->num_irqs;
+ 
+-		return copy_to_user((void __user *)arg, &info, minsz);
++		return copy_to_user((void __user *)arg, &info, minsz) ?
++			-EFAULT : 0;
+ 
+ 	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ 		struct vfio_region_info info;
+@@ -184,7 +185,8 @@ static long vfio_platform_ioctl(void *device_data,
+ 		info.size = vdev->regions[info.index].size;
+ 		info.flags = vdev->regions[info.index].flags;
+ 
+-		return copy_to_user((void __user *)arg, &info, minsz);
++		return copy_to_user((void __user *)arg, &info, minsz) ?
++			-EFAULT : 0;
+ 
+ 	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
+ 		struct vfio_irq_info info;
+@@ -203,7 +205,8 @@ static long vfio_platform_ioctl(void *device_data,
+ 		info.flags = vdev->irqs[info.index].flags;
+ 		info.count = vdev->irqs[info.index].count;
+ 
+-		return copy_to_user((void __user *)arg, &info, minsz);
++		return copy_to_user((void __user *)arg, &info, minsz) ?
++			-EFAULT : 0;
+ 
+ 	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
+ 		struct vfio_irq_set hdr;
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index 57d8c37a002b..092216540756 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -986,7 +986,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
+ 
+ 		info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
+ 
+-		return copy_to_user((void __user *)arg, &info, minsz);
++		return copy_to_user((void __user *)arg, &info, minsz) ?
++			-EFAULT : 0;
+ 
+ 	} else if (cmd == VFIO_IOMMU_MAP_DMA) {
+ 		struct vfio_iommu_type1_dma_map map;
+@@ -1019,7 +1020,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
+ 		if (ret)
+ 			return ret;
+ 
+-		return copy_to_user((void __user *)arg, &unmap, minsz);
++		return copy_to_user((void __user *)arg, &unmap, minsz) ?
++			-EFAULT : 0;
+ 	}
+ 
+ 	return -ENOTTY;
+diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
+index 58e38d586f52..4d529f3e40df 100644
+--- a/drivers/xen/xen-pciback/pciback.h
++++ b/drivers/xen/xen-pciback/pciback.h
+@@ -37,6 +37,7 @@ struct xen_pcibk_device {
+ 	struct xen_pci_sharedinfo *sh_info;
+ 	unsigned long flags;
+ 	struct work_struct op_work;
++	struct xen_pci_op op;
+ };
+ 
+ struct xen_pcibk_dev_data {
+diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
+index c4a0666de6f5..9cf4653b6bd7 100644
+--- a/drivers/xen/xen-pciback/pciback_ops.c
++++ b/drivers/xen/xen-pciback/pciback_ops.c
+@@ -197,13 +197,27 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
+ 	struct xen_pcibk_dev_data *dev_data;
+ 	int i, result;
+ 	struct msix_entry *entries;
++	u16 cmd;
+ 
+ 	if (unlikely(verbose_request))
+ 		printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
+ 		       pci_name(dev));
++
+ 	if (op->value > SH_INFO_MAX_VEC)
+ 		return -EINVAL;
+ 
++	if (dev->msix_enabled)
++		return -EALREADY;
++
++	/*
++	 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
++	 * to access the BARs where the MSI-X entries reside.
++	 * But VF devices are unique in which the PF needs to be checked.
++	 */
++	pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
++	if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
++		return -ENXIO;
++
+ 	entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
+ 	if (entries == NULL)
+ 		return -ENOMEM;
+@@ -298,9 +312,14 @@ void xen_pcibk_do_op(struct work_struct *data)
+ 		container_of(data, struct xen_pcibk_device, op_work);
+ 	struct pci_dev *dev;
+ 	struct xen_pcibk_dev_data *dev_data = NULL;
+-	struct xen_pci_op *op = &pdev->sh_info->op;
++	struct xen_pci_op *op = &pdev->op;
+ 	int test_intx = 0;
++#ifdef CONFIG_PCI_MSI
++	unsigned int nr = 0;
++#endif
+ 
++	*op = pdev->sh_info->op;
++	barrier();
+ 	dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
+ 
+ 	if (dev == NULL)
+@@ -326,6 +345,7 @@ void xen_pcibk_do_op(struct work_struct *data)
+ 			op->err = xen_pcibk_disable_msi(pdev, dev, op);
+ 			break;
+ 		case XEN_PCI_OP_enable_msix:
++			nr = op->value;
+ 			op->err = xen_pcibk_enable_msix(pdev, dev, op);
+ 			break;
+ 		case XEN_PCI_OP_disable_msix:
+@@ -342,6 +362,17 @@ void xen_pcibk_do_op(struct work_struct *data)
+ 		if ((dev_data->enable_intx != test_intx))
+ 			xen_pcibk_control_isr(dev, 0 /* no reset */);
+ 	}
++	pdev->sh_info->op.err = op->err;
++	pdev->sh_info->op.value = op->value;
++#ifdef CONFIG_PCI_MSI
++	if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
++		unsigned int i;
++
++		for (i = 0; i < nr; i++)
++			pdev->sh_info->op.msix_entries[i].vector =
++				op->msix_entries[i].vector;
++	}
++#endif
+ 	/* Tell the driver domain that we're done. */
+ 	wmb();
+ 	clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index b7f51504f85a..c561d530be2e 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -941,12 +941,12 @@ out:
+ 	spin_unlock_irqrestore(&info->v2p_lock, flags);
+ 
+ out_free:
+-	mutex_lock(&tpg->tv_tpg_mutex);
+-	tpg->tv_tpg_fe_count--;
+-	mutex_unlock(&tpg->tv_tpg_mutex);
+-
+-	if (err)
++	if (err) {
++		mutex_lock(&tpg->tv_tpg_mutex);
++		tpg->tv_tpg_fe_count--;
++		mutex_unlock(&tpg->tv_tpg_mutex);
+ 		kfree(new);
++	}
+ 
+ 	return err;
+ }
+diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
+index 252f5c15806b..78a7b1d73354 100644
+--- a/fs/cifs/cifsfs.h
++++ b/fs/cifs/cifsfs.h
+@@ -31,19 +31,15 @@
+  * so that it will fit. We use hash_64 to convert the value to 31 bits, and
+  * then add 1, to ensure that we don't end up with a 0 as the value.
+  */
+-#if BITS_PER_LONG == 64
+ static inline ino_t
+ cifs_uniqueid_to_ino_t(u64 fileid)
+ {
++	if ((sizeof(ino_t)) < (sizeof(u64)))
++		return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
++
+ 	return (ino_t)fileid;
++
+ }
+-#else
+-static inline ino_t
+-cifs_uniqueid_to_ino_t(u64 fileid)
+-{
+-	return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1;
+-}
+-#endif
+ 
+ extern struct file_system_type cifs_fs_type;
+ extern const struct address_space_operations cifs_addr_ops;
+diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
+index f26ffbfc64d8..f1a5067d5494 100644
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -1395,11 +1395,10 @@ openRetry:
+  * current bigbuf.
+  */
+ static int
+-cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++discard_remaining_data(struct TCP_Server_Info *server)
+ {
+ 	unsigned int rfclen = get_rfc1002_length(server->smallbuf);
+ 	int remaining = rfclen + 4 - server->total_read;
+-	struct cifs_readdata *rdata = mid->callback_data;
+ 
+ 	while (remaining > 0) {
+ 		int length;
+@@ -1413,10 +1412,20 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ 		remaining -= length;
+ 	}
+ 
+-	dequeue_mid(mid, rdata->result);
+ 	return 0;
+ }
+ 
++static int
++cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
++{
++	int length;
++	struct cifs_readdata *rdata = mid->callback_data;
++
++	length = discard_remaining_data(server);
++	dequeue_mid(mid, rdata->result);
++	return length;
++}
++
+ int
+ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ {
+@@ -1445,6 +1454,12 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
+ 		return length;
+ 	server->total_read += length;
+ 
++	if (server->ops->is_status_pending &&
++	    server->ops->is_status_pending(buf, server, 0)) {
++		discard_remaining_data(server);
++		return -1;
++	}
++
+ 	/* Was the SMB read successful? */
+ 	rdata->result = server->ops->map_error(buf, false);
+ 	if (rdata->result != 0) {
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 894f259d3989..657a9c5c4fff 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1042,21 +1042,25 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
+ {
+ 	char *data_offset;
+ 	struct create_context *cc;
+-	unsigned int next = 0;
++	unsigned int next;
++	unsigned int remaining;
+ 	char *name;
+ 
+ 	data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
++	remaining = le32_to_cpu(rsp->CreateContextsLength);
+ 	cc = (struct create_context *)data_offset;
+-	do {
+-		cc = (struct create_context *)((char *)cc + next);
++	while (remaining >= sizeof(struct create_context)) {
+ 		name = le16_to_cpu(cc->NameOffset) + (char *)cc;
+-		if (le16_to_cpu(cc->NameLength) != 4 ||
+-		    strncmp(name, "RqLs", 4)) {
+-			next = le32_to_cpu(cc->Next);
+-			continue;
+-		}
+-		return server->ops->parse_lease_buf(cc, epoch);
+-	} while (next != 0);
++		if (le16_to_cpu(cc->NameLength) == 4 &&
++		    strncmp(name, "RqLs", 4) == 0)
++			return server->ops->parse_lease_buf(cc, epoch);
++
++		next = le32_to_cpu(cc->Next);
++		if (!next)
++			break;
++		remaining -= next;
++		cc = (struct create_context *)((char *)cc + next);
++	}
+ 
+ 	return 0;
+ }
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 0046ab7d4f3d..10bce74c427f 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -269,9 +269,6 @@ static inline int dname_external(const struct dentry *dentry)
+ 	return dentry->d_name.name != dentry->d_iname;
+ }
+ 
+-/*
+- * Make sure other CPUs see the inode attached before the type is set.
+- */
+ static inline void __d_set_inode_and_type(struct dentry *dentry,
+ 					  struct inode *inode,
+ 					  unsigned type_flags)
+@@ -279,28 +276,18 @@ static inline void __d_set_inode_and_type(struct dentry *dentry,
+ 	unsigned flags;
+ 
+ 	dentry->d_inode = inode;
+-	smp_wmb();
+ 	flags = READ_ONCE(dentry->d_flags);
+ 	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+ 	flags |= type_flags;
+ 	WRITE_ONCE(dentry->d_flags, flags);
+ }
+ 
+-/*
+- * Ideally, we want to make sure that other CPUs see the flags cleared before
+- * the inode is detached, but this is really a violation of RCU principles
+- * since the ordering suggests we should always set inode before flags.
+- *
+- * We should instead replace or discard the entire dentry - but that sucks
+- * performancewise on mass deletion/rename.
+- */
+ static inline void __d_clear_type_and_inode(struct dentry *dentry)
+ {
+ 	unsigned flags = READ_ONCE(dentry->d_flags);
+ 
+ 	flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+ 	WRITE_ONCE(dentry->d_flags, flags);
+-	smp_wmb();
+ 	dentry->d_inode = NULL;
+ }
+ 
+@@ -322,17 +309,17 @@ static void dentry_free(struct dentry *dentry)
+ }
+ 
+ /**
+- * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
++ * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups
+  * @dentry: the target dentry
+  * After this call, in-progress rcu-walk path lookup will fail. This
+  * should be called after unhashing, and after changing d_inode (if
+  * the dentry has not already been unhashed).
+  */
+-static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
++static inline void dentry_rcuwalk_invalidate(struct dentry *dentry)
+ {
+-	assert_spin_locked(&dentry->d_lock);
+-	/* Go through a barrier */
+-	write_seqcount_barrier(&dentry->d_seq);
++	lockdep_assert_held(&dentry->d_lock);
++	/* Go through am invalidation barrier */
++	write_seqcount_invalidate(&dentry->d_seq);
+ }
+ 
+ /*
+@@ -370,9 +357,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
+ 	__releases(dentry->d_inode->i_lock)
+ {
+ 	struct inode *inode = dentry->d_inode;
++
++	raw_write_seqcount_begin(&dentry->d_seq);
+ 	__d_clear_type_and_inode(dentry);
+ 	hlist_del_init(&dentry->d_u.d_alias);
+-	dentry_rcuwalk_barrier(dentry);
++	raw_write_seqcount_end(&dentry->d_seq);
+ 	spin_unlock(&dentry->d_lock);
+ 	spin_unlock(&inode->i_lock);
+ 	if (!inode->i_nlink)
+@@ -494,7 +483,7 @@ void __d_drop(struct dentry *dentry)
+ 		__hlist_bl_del(&dentry->d_hash);
+ 		dentry->d_hash.pprev = NULL;
+ 		hlist_bl_unlock(b);
+-		dentry_rcuwalk_barrier(dentry);
++		dentry_rcuwalk_invalidate(dentry);
+ 	}
+ }
+ EXPORT_SYMBOL(__d_drop);
+@@ -1757,8 +1746,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
+ 	spin_lock(&dentry->d_lock);
+ 	if (inode)
+ 		hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
++	raw_write_seqcount_begin(&dentry->d_seq);
+ 	__d_set_inode_and_type(dentry, inode, add_flags);
+-	dentry_rcuwalk_barrier(dentry);
++	raw_write_seqcount_end(&dentry->d_seq);
+ 	spin_unlock(&dentry->d_lock);
+ 	fsnotify_d_instantiate(dentry, inode);
+ }
+diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
+index 9e92c9c2d319..b5f3cc7274f6 100644
+--- a/fs/hpfs/namei.c
++++ b/fs/hpfs/namei.c
+@@ -377,12 +377,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
+ 	struct inode *inode = d_inode(dentry);
+ 	dnode_secno dno;
+ 	int r;
+-	int rep = 0;
+ 	int err;
+ 
+ 	hpfs_lock(dir->i_sb);
+ 	hpfs_adjust_length(name, &len);
+-again:
++
+ 	err = -ENOENT;
+ 	de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
+ 	if (!de)
+@@ -402,33 +401,9 @@ again:
+ 		hpfs_error(dir->i_sb, "there was error when removing dirent");
+ 		err = -EFSERROR;
+ 		break;
+-	case 2:		/* no space for deleting, try to truncate file */
+-
++	case 2:		/* no space for deleting */
+ 		err = -ENOSPC;
+-		if (rep++)
+-			break;
+-
+-		dentry_unhash(dentry);
+-		if (!d_unhashed(dentry)) {
+-			hpfs_unlock(dir->i_sb);
+-			return -ENOSPC;
+-		}
+-		if (generic_permission(inode, MAY_WRITE) ||
+-		    !S_ISREG(inode->i_mode) ||
+-		    get_write_access(inode)) {
+-			d_rehash(dentry);
+-		} else {
+-			struct iattr newattrs;
+-			/*pr_info("truncating file before delete.\n");*/
+-			newattrs.ia_size = 0;
+-			newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+-			err = notify_change(dentry, &newattrs, NULL);
+-			put_write_access(inode);
+-			if (!err)
+-				goto again;
+-		}
+-		hpfs_unlock(dir->i_sb);
+-		return -ENOSPC;
++		break;
+ 	default:
+ 		drop_nlink(inode);
+ 		err = 0;
+diff --git a/fs/jffs2/README.Locking b/fs/jffs2/README.Locking
+index 3ea36554107f..8918ac905a3b 100644
+--- a/fs/jffs2/README.Locking
++++ b/fs/jffs2/README.Locking
+@@ -2,10 +2,6 @@
+ 	JFFS2 LOCKING DOCUMENTATION
+ 	---------------------------
+ 
+-At least theoretically, JFFS2 does not require the Big Kernel Lock
+-(BKL), which was always helpfully obtained for it by Linux 2.4 VFS
+-code. It has its own locking, as described below.
+-
+ This document attempts to describe the existing locking rules for
+ JFFS2. It is not expected to remain perfectly up to date, but ought to
+ be fairly close.
+@@ -69,6 +65,7 @@ Ordering constraints:
+ 	   any f->sem held.
+ 	2. Never attempt to lock two file mutexes in one thread.
+ 	   No ordering rules have been made for doing so.
++	3. Never lock a page cache page with f->sem held.
+ 
+ 
+ 	erase_completion_lock spinlock
+diff --git a/fs/jffs2/build.c b/fs/jffs2/build.c
+index a3750f902adc..c1f04947d7dc 100644
+--- a/fs/jffs2/build.c
++++ b/fs/jffs2/build.c
+@@ -49,7 +49,8 @@ next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
+ 
+ 
+ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+-				    struct jffs2_inode_cache *ic)
++				    struct jffs2_inode_cache *ic,
++				    int *dir_hardlinks)
+ {
+ 	struct jffs2_full_dirent *fd;
+ 
+@@ -68,19 +69,21 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+ 			dbg_fsbuild("child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
+ 				  fd->name, fd->ino, ic->ino);
+ 			jffs2_mark_node_obsolete(c, fd->raw);
++			/* Clear the ic/raw union so it doesn't cause problems later. */
++			fd->ic = NULL;
+ 			continue;
+ 		}
+ 
++		/* From this point, fd->raw is no longer used so we can set fd->ic */
++		fd->ic = child_ic;
++		child_ic->pino_nlink++;
++		/* If we appear (at this stage) to have hard-linked directories,
++		 * set a flag to trigger a scan later */
+ 		if (fd->type == DT_DIR) {
+-			if (child_ic->pino_nlink) {
+-				JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n",
+-					    fd->name, fd->ino, ic->ino);
+-				/* TODO: What do we do about it? */
+-			} else {
+-				child_ic->pino_nlink = ic->ino;
+-			}
+-		} else
+-			child_ic->pino_nlink++;
++			child_ic->flags |= INO_FLAGS_IS_DIR;
++			if (child_ic->pino_nlink > 1)
++				*dir_hardlinks = 1;
++		}
+ 
+ 		dbg_fsbuild("increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino);
+ 		/* Can't free scan_dents so far. We might need them in pass 2 */
+@@ -94,8 +97,7 @@ static void jffs2_build_inode_pass1(struct jffs2_sb_info *c,
+ */
+ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ {
+-	int ret;
+-	int i;
++	int ret, i, dir_hardlinks = 0;
+ 	struct jffs2_inode_cache *ic;
+ 	struct jffs2_full_dirent *fd;
+ 	struct jffs2_full_dirent *dead_fds = NULL;
+@@ -119,7 +121,7 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ 	/* Now scan the directory tree, increasing nlink according to every dirent found. */
+ 	for_each_inode(i, c, ic) {
+ 		if (ic->scan_dents) {
+-			jffs2_build_inode_pass1(c, ic);
++			jffs2_build_inode_pass1(c, ic, &dir_hardlinks);
+ 			cond_resched();
+ 		}
+ 	}
+@@ -155,6 +157,20 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ 	}
+ 
+ 	dbg_fsbuild("pass 2a complete\n");
++
++	if (dir_hardlinks) {
++		/* If we detected directory hardlinks earlier, *hopefully*
++		 * they are gone now because some of the links were from
++		 * dead directories which still had some old dirents lying
++		 * around and not yet garbage-collected, but which have
++		 * been discarded above. So clear the pino_nlink field
++		 * in each directory, so that the final scan below can
++		 * print appropriate warnings. */
++		for_each_inode(i, c, ic) {
++			if (ic->flags & INO_FLAGS_IS_DIR)
++				ic->pino_nlink = 0;
++		}
++	}
+ 	dbg_fsbuild("freeing temporary data structures\n");
+ 
+ 	/* Finally, we can scan again and free the dirent structs */
+@@ -162,6 +178,33 @@ static int jffs2_build_filesystem(struct jffs2_sb_info *c)
+ 		while(ic->scan_dents) {
+ 			fd = ic->scan_dents;
+ 			ic->scan_dents = fd->next;
++			/* We do use the pino_nlink field to count nlink of
++			 * directories during fs build, so set it to the
++			 * parent ino# now. Now that there's hopefully only
++			 * one. */
++			if (fd->type == DT_DIR) {
++				if (!fd->ic) {
++					/* We'll have complained about it and marked the coresponding
++					   raw node obsolete already. Just skip it. */
++					continue;
++				}
++
++				/* We *have* to have set this in jffs2_build_inode_pass1() */
++				BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR));
++
++				/* We clear ic->pino_nlink ∀ directories' ic *only* if dir_hardlinks
++				 * is set. Otherwise, we know this should never trigger anyway, so
++				 * we don't do the check. And ic->pino_nlink still contains the nlink
++				 * value (which is 1). */
++				if (dir_hardlinks && fd->ic->pino_nlink) {
++					JFFS2_ERROR("child dir \"%s\" (ino #%u) of dir ino #%u is also hard linked from dir ino #%u\n",
++						    fd->name, fd->ino, ic->ino, fd->ic->pino_nlink);
++					/* Should we unlink it from its previous parent? */
++				}
++
++				/* For directories, ic->pino_nlink holds that parent inode # */
++				fd->ic->pino_nlink = ic->ino;
++			}
+ 			jffs2_free_full_dirent(fd);
+ 		}
+ 		ic->scan_dents = NULL;
+@@ -240,11 +283,7 @@ static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c,
+ 
+ 			/* Reduce nlink of the child. If it's now zero, stick it on the
+ 			   dead_fds list to be cleaned up later. Else just free the fd */
+-
+-			if (fd->type == DT_DIR)
+-				child_ic->pino_nlink = 0;
+-			else
+-				child_ic->pino_nlink--;
++			child_ic->pino_nlink--;
+ 
+ 			if (!child_ic->pino_nlink) {
+ 				dbg_fsbuild("inode #%u (\"%s\") now has no links; adding to dead_fds list.\n",
+diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c
+index f509f62e12f6..3361979d728c 100644
+--- a/fs/jffs2/file.c
++++ b/fs/jffs2/file.c
+@@ -137,39 +137,33 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 	struct page *pg;
+ 	struct inode *inode = mapping->host;
+ 	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+-	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+-	struct jffs2_raw_inode ri;
+-	uint32_t alloc_len = 0;
+ 	pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ 	uint32_t pageofs = index << PAGE_CACHE_SHIFT;
+ 	int ret = 0;
+ 
+-	jffs2_dbg(1, "%s()\n", __func__);
+-
+-	if (pageofs > inode->i_size) {
+-		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+-					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+-		if (ret)
+-			return ret;
+-	}
+-
+-	mutex_lock(&f->sem);
+ 	pg = grab_cache_page_write_begin(mapping, index, flags);
+-	if (!pg) {
+-		if (alloc_len)
+-			jffs2_complete_reservation(c);
+-		mutex_unlock(&f->sem);
++	if (!pg)
+ 		return -ENOMEM;
+-	}
+ 	*pagep = pg;
+ 
+-	if (alloc_len) {
++	jffs2_dbg(1, "%s()\n", __func__);
++
++	if (pageofs > inode->i_size) {
+ 		/* Make new hole frag from old EOF to new page */
++		struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
++		struct jffs2_raw_inode ri;
+ 		struct jffs2_full_dnode *fn;
++		uint32_t alloc_len;
+ 
+ 		jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
+ 			  (unsigned int)inode->i_size, pageofs);
+ 
++		ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
++					  ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
++		if (ret)
++			goto out_page;
++
++		mutex_lock(&f->sem);
+ 		memset(&ri, 0, sizeof(ri));
+ 
+ 		ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
+@@ -196,6 +190,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 		if (IS_ERR(fn)) {
+ 			ret = PTR_ERR(fn);
+ 			jffs2_complete_reservation(c);
++			mutex_unlock(&f->sem);
+ 			goto out_page;
+ 		}
+ 		ret = jffs2_add_full_dnode_to_inode(c, f, fn);
+@@ -210,10 +205,12 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 			jffs2_mark_node_obsolete(c, fn->raw);
+ 			jffs2_free_full_dnode(fn);
+ 			jffs2_complete_reservation(c);
++			mutex_unlock(&f->sem);
+ 			goto out_page;
+ 		}
+ 		jffs2_complete_reservation(c);
+ 		inode->i_size = pageofs;
++		mutex_unlock(&f->sem);
+ 	}
+ 
+ 	/*
+@@ -222,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
+ 	 * case of a short-copy.
+ 	 */
+ 	if (!PageUptodate(pg)) {
++		mutex_lock(&f->sem);
+ 		ret = jffs2_do_readpage_nolock(inode, pg);
++		mutex_unlock(&f->sem);
+ 		if (ret)
+ 			goto out_page;
+ 	}
+-	mutex_unlock(&f->sem);
+ 	jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
+ 	return ret;
+ 
+ out_page:
+ 	unlock_page(pg);
+ 	page_cache_release(pg);
+-	mutex_unlock(&f->sem);
+ 	return ret;
+ }
+ 
+diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
+index 5a2dec2b064c..95d5880a63ee 100644
+--- a/fs/jffs2/gc.c
++++ b/fs/jffs2/gc.c
+@@ -1296,14 +1296,17 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era
+ 		BUG_ON(start > orig_start);
+ 	}
+ 
+-	/* First, use readpage() to read the appropriate page into the page cache */
+-	/* Q: What happens if we actually try to GC the _same_ page for which commit_write()
+-	 *    triggered garbage collection in the first place?
+-	 * A: I _think_ it's OK. read_cache_page shouldn't deadlock, we'll write out the
+-	 *    page OK. We'll actually write it out again in commit_write, which is a little
+-	 *    suboptimal, but at least we're correct.
+-	 */
++	/* The rules state that we must obtain the page lock *before* f->sem, so
++	 * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
++	 * actually going to *change* so we're safe; we only allow reading.
++	 *
++	 * It is important to note that jffs2_write_begin() will ensure that its
++	 * page is marked Uptodate before allocating space. That means that if we
++	 * end up here trying to GC the *same* page that jffs2_write_begin() is
++	 * trying to write out, read_cache_page() will not deadlock. */
++	mutex_unlock(&f->sem);
+ 	pg_ptr = jffs2_gc_fetch_page(c, f, start, &pg);
++	mutex_lock(&f->sem);
+ 
+ 	if (IS_ERR(pg_ptr)) {
+ 		pr_warn("read_cache_page() returned error: %ld\n",
+diff --git a/fs/jffs2/nodelist.h b/fs/jffs2/nodelist.h
+index fa35ff79ab35..0637271f3770 100644
+--- a/fs/jffs2/nodelist.h
++++ b/fs/jffs2/nodelist.h
+@@ -194,6 +194,7 @@ struct jffs2_inode_cache {
+ #define INO_STATE_CLEARING	6	/* In clear_inode() */
+ 
+ #define INO_FLAGS_XATTR_CHECKED	0x01	/* has no duplicate xattr_ref */
++#define INO_FLAGS_IS_DIR	0x02	/* is a directory */
+ 
+ #define RAWNODE_CLASS_INODE_CACHE	0
+ #define RAWNODE_CLASS_XATTR_DATUM	1
+@@ -249,7 +250,10 @@ struct jffs2_readinode_info
+ 
+ struct jffs2_full_dirent
+ {
+-	struct jffs2_raw_node_ref *raw;
++	union {
++		struct jffs2_raw_node_ref *raw;
++		struct jffs2_inode_cache *ic; /* Just during part of build */
++	};
+ 	struct jffs2_full_dirent *next;
+ 	uint32_t version;
+ 	uint32_t ino; /* == zero for unlink */
+diff --git a/fs/namei.c b/fs/namei.c
+index ccd7f98d85b9..f3cc848da8bc 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1619,10 +1619,10 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
+ 		if (err < 0)
+ 			goto out_err;
+ 
+-		inode = path->dentry->d_inode;
+ 		err = -ENOENT;
+ 		if (d_is_negative(path->dentry))
+ 			goto out_path_put;
++		inode = path->dentry->d_inode;
+ 	}
+ 
+ 	if (should_follow_link(path->dentry, follow)) {
+@@ -3078,6 +3078,7 @@ retry_lookup:
+ 		path_to_nameidata(path, nd);
+ 		goto out;
+ 	}
++	inode = path->dentry->d_inode;
+ finish_lookup:
+ 	/* we _can_ be in RCU mode here */
+ 	if (should_follow_link(path->dentry, !symlink_ok)) {
+@@ -3152,6 +3153,10 @@ opened:
+ 			goto exit_fput;
+ 	}
+ out:
++	if (unlikely(error > 0)) {
++		WARN_ON(1);
++		error = -EINVAL;
++	}
+ 	if (got_write)
+ 		mnt_drop_write(nd->path.mnt);
+ 	path_put(&save_parent);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 2c4f41c34366..84706204cc33 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -2331,9 +2331,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
+ 		dentry = d_add_unique(dentry, igrab(state->inode));
+ 		if (dentry == NULL) {
+ 			dentry = opendata->dentry;
+-		} else if (dentry != ctx->dentry) {
++		} else {
+ 			dput(ctx->dentry);
+-			ctx->dentry = dget(dentry);
++			ctx->dentry = dentry;
+ 		}
+ 		nfs_set_verifier(dentry,
+ 				nfs_save_change_attribute(d_inode(opendata->dir)));
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index 5dfbcd8887bb..2e5fb1c31251 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -487,8 +487,8 @@ enum ata_tf_protocols {
+ };
+ 
+ enum ata_ioctls {
+-	ATA_IOC_GET_IO32	= 0x309,
+-	ATA_IOC_SET_IO32	= 0x324,
++	ATA_IOC_GET_IO32	= 0x309, /* HDIO_GET_32BIT */
++	ATA_IOC_SET_IO32	= 0x324, /* HDIO_SET_32BIT */
+ };
+ 
+ /* core structures */
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index 167ec0934049..ca9df4521734 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -408,9 +408,7 @@ static inline bool d_mountpoint(const struct dentry *dentry)
+  */
+ static inline unsigned __d_entry_type(const struct dentry *dentry)
+ {
+-	unsigned type = READ_ONCE(dentry->d_flags);
+-	smp_rmb();
+-	return type & DCACHE_ENTRY_TYPE;
++	return dentry->d_flags & DCACHE_ENTRY_TYPE;
+ }
+ 
+ static inline bool d_is_miss(const struct dentry *dentry)
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index e0e33787c485..11c2dd114732 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -717,7 +717,7 @@ struct ata_device {
+ 	union {
+ 		u16		id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
+ 		u32		gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
+-	};
++	} ____cacheline_aligned;
+ 
+ 	/* DEVSLP Timing Variables from Identify Device Data Log */
+ 	u8			devslp_timing[ATA_LOG_DEVSLP_SIZE];
+diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
+index b95f914ce083..150f43a9149c 100644
+--- a/include/linux/nfs_fs.h
++++ b/include/linux/nfs_fs.h
+@@ -540,9 +540,7 @@ extern int  nfs_readpage_async(struct nfs_open_context *, struct inode *,
+ 
+ static inline loff_t nfs_size_to_loff_t(__u64 size)
+ {
+-	if (size > (__u64) OFFSET_MAX - 1)
+-		return OFFSET_MAX - 1;
+-	return (loff_t) size;
++	return min_t(u64, size, OFFSET_MAX);
+ }
+ 
+ static inline ino_t
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index 5f68d0a391ce..c07e3a536099 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -266,13 +266,13 @@ static inline void write_seqcount_end(seqcount_t *s)
+ }
+ 
+ /**
+- * write_seqcount_barrier - invalidate in-progress read-side seq operations
++ * write_seqcount_invalidate - invalidate in-progress read-side seq operations
+  * @s: pointer to seqcount_t
+  *
+- * After write_seqcount_barrier, no read-side seq operations will complete
++ * After write_seqcount_invalidate, no read-side seq operations will complete
+  * successfully and see data older than this.
+  */
+-static inline void write_seqcount_barrier(seqcount_t *s)
++static inline void write_seqcount_invalidate(seqcount_t *s)
+ {
+ 	smp_wmb();
+ 	s->sequence+=2;
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index 480e9f82dfea..2b40a1fab293 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -167,6 +167,7 @@ enum se_cmd_flags_table {
+ 	SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
+ 	SCF_COMPARE_AND_WRITE		= 0x00080000,
+ 	SCF_COMPARE_AND_WRITE_POST	= 0x00100000,
++	SCF_ACK_KREF			= 0x00400000,
+ };
+ 
+ /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
+@@ -522,7 +523,7 @@ struct se_cmd {
+ 	sense_reason_t		(*execute_cmd)(struct se_cmd *);
+ 	sense_reason_t		(*execute_rw)(struct se_cmd *, struct scatterlist *,
+ 					      u32, enum dma_data_direction);
+-	sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
++	sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
+ 
+ 	unsigned char		*t_task_cdb;
+ 	unsigned char		__t_task_cdb[TCM_MAX_COMMAND_SIZE];
+@@ -537,6 +538,8 @@ struct se_cmd {
+ #define CMD_T_DEV_ACTIVE	(1 << 7)
+ #define CMD_T_REQUEST_STOP	(1 << 8)
+ #define CMD_T_BUSY		(1 << 9)
++#define CMD_T_TAS		(1 << 10)
++#define CMD_T_FABRIC_STOP	(1 << 11)
+ 	spinlock_t		t_state_lock;
+ 	struct completion	t_transport_stop_comp;
+ 
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index c4de47fc5cca..f69ec1295b0b 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -683,7 +683,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
+ 		 * The ftrace subsystem is for showing formats only.
+ 		 * They can not be enabled or disabled via the event files.
+ 		 */
+-		if (call->class && call->class->reg)
++		if (call->class && call->class->reg &&
++		    !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+ 			return file;
+ 	}
+ 
+diff --git a/mm/memory.c b/mm/memory.c
+index 2a9e09870c20..701d9ad45c46 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3363,8 +3363,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 	if (unlikely(pmd_none(*pmd)) &&
+ 	    unlikely(__pte_alloc(mm, vma, pmd, address)))
+ 		return VM_FAULT_OOM;
+-	/* if an huge pmd materialized from under us just retry later */
+-	if (unlikely(pmd_trans_huge(*pmd)))
++	/*
++	 * If a huge pmd materialized under us just retry later.  Use
++	 * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
++	 * didn't become pmd_trans_huge under us and then back to pmd_none, as
++	 * a result of MADV_DONTNEED running immediately after a huge pmd fault
++	 * in a different thread of this mm, in turn leading to a misleading
++	 * pmd_trans_huge() retval.  All we have to ensure is that it is a
++	 * regular pmd that we can walk with pte_offset_map() and we can do that
++	 * through an atomic read in C, which is what pmd_trans_unstable()
++	 * provides.
++	 */
++	if (unlikely(pmd_trans_unstable(pmd)))
+ 		return 0;
+ 	/*
+ 	 * A regular pmd is established and it can't morph into a huge pmd
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 2c37b1a44a8c..8c4841a6dc4c 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1557,7 +1557,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
+ 					 (GFP_HIGHUSER_MOVABLE |
+ 					  __GFP_THISNODE | __GFP_NOMEMALLOC |
+ 					  __GFP_NORETRY | __GFP_NOWARN) &
+-					 ~GFP_IOFS, 0);
++					 ~__GFP_WAIT, 0);
+ 
+ 	return newpage;
+ }
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index e51af69c61bf..84201c21705e 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -1203,6 +1203,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
+ 	return new_piece;
+ }
+ 
++static size_t sizeof_footer(struct ceph_connection *con)
++{
++	return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
++	    sizeof(struct ceph_msg_footer) :
++	    sizeof(struct ceph_msg_footer_old);
++}
++
+ static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
+ {
+ 	BUG_ON(!msg);
+@@ -2326,9 +2333,9 @@ static int read_partial_message(struct ceph_connection *con)
+ 			ceph_pr_addr(&con->peer_addr.in_addr),
+ 			seq, con->in_seq + 1);
+ 		con->in_base_pos = -front_len - middle_len - data_len -
+-			sizeof(m->footer);
++			sizeof_footer(con);
+ 		con->in_tag = CEPH_MSGR_TAG_READY;
+-		return 0;
++		return 1;
+ 	} else if ((s64)seq - (s64)con->in_seq > 1) {
+ 		pr_err("read_partial_message bad seq %lld expected %lld\n",
+ 		       seq, con->in_seq + 1);
+@@ -2358,10 +2365,10 @@ static int read_partial_message(struct ceph_connection *con)
+ 			/* skip this message */
+ 			dout("alloc_msg said skip message\n");
+ 			con->in_base_pos = -front_len - middle_len - data_len -
+-				sizeof(m->footer);
++				sizeof_footer(con);
+ 			con->in_tag = CEPH_MSGR_TAG_READY;
+ 			con->in_seq++;
+-			return 0;
++			return 1;
+ 		}
+ 
+ 		BUG_ON(!con->in_msg);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index 36b9ac48b8fb..06bf4010d3ed 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -376,6 +376,9 @@ int ip6_forward(struct sk_buff *skb)
+ 	if (skb->pkt_type != PACKET_HOST)
+ 		goto drop;
+ 
++	if (unlikely(skb->sk))
++		goto drop;
++
+ 	if (skb_warn_if_lro(skb))
+ 		goto drop;
+ 
+diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
+index 2928afffbb81..8d79e70bd978 100644
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1218,7 +1218,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
+ 	if (bp[0] == '\\' && bp[1] == 'x') {
+ 		/* HEX STRING */
+ 		bp += 2;
+-		while (len < bufsize) {
++		while (len < bufsize - 1) {
+ 			int h, l;
+ 
+ 			h = hex_to_bin(bp[0]);
+diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
+index b9c0910fb8c4..0608f216f359 100644
+--- a/sound/core/control_compat.c
++++ b/sound/core/control_compat.c
+@@ -170,6 +170,19 @@ struct snd_ctl_elem_value32 {
+         unsigned char reserved[128];
+ };
+ 
++#ifdef CONFIG_X86_X32
++/* x32 has a different alignment for 64bit values from ia32 */
++struct snd_ctl_elem_value_x32 {
++	struct snd_ctl_elem_id id;
++	unsigned int indirect;	/* bit-field causes misalignment */
++	union {
++		s32 integer[128];
++		unsigned char data[512];
++		s64 integer64[64];
++	} value;
++	unsigned char reserved[128];
++};
++#endif /* CONFIG_X86_X32 */
+ 
+ /* get the value type and count of the control */
+ static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id,
+@@ -219,9 +232,11 @@ static int get_elem_size(int type, int count)
+ 
+ static int copy_ctl_value_from_user(struct snd_card *card,
+ 				    struct snd_ctl_elem_value *data,
+-				    struct snd_ctl_elem_value32 __user *data32,
++				    void __user *userdata,
++				    void __user *valuep,
+ 				    int *typep, int *countp)
+ {
++	struct snd_ctl_elem_value32 __user *data32 = userdata;
+ 	int i, type, size;
+ 	int uninitialized_var(count);
+ 	unsigned int indirect;
+@@ -239,8 +254,9 @@ static int copy_ctl_value_from_user(struct snd_card *card,
+ 	if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
+ 	    type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
+ 		for (i = 0; i < count; i++) {
++			s32 __user *intp = valuep;
+ 			int val;
+-			if (get_user(val, &data32->value.integer[i]))
++			if (get_user(val, &intp[i]))
+ 				return -EFAULT;
+ 			data->value.integer.value[i] = val;
+ 		}
+@@ -250,8 +266,7 @@ static int copy_ctl_value_from_user(struct snd_card *card,
+ 			dev_err(card->dev, "snd_ioctl32_ctl_elem_value: unknown type %d\n", type);
+ 			return -EINVAL;
+ 		}
+-		if (copy_from_user(data->value.bytes.data,
+-				   data32->value.data, size))
++		if (copy_from_user(data->value.bytes.data, valuep, size))
+ 			return -EFAULT;
+ 	}
+ 
+@@ -261,7 +276,8 @@ static int copy_ctl_value_from_user(struct snd_card *card,
+ }
+ 
+ /* restore the value to 32bit */
+-static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
++static int copy_ctl_value_to_user(void __user *userdata,
++				  void __user *valuep,
+ 				  struct snd_ctl_elem_value *data,
+ 				  int type, int count)
+ {
+@@ -270,22 +286,22 @@ static int copy_ctl_value_to_user(struct snd_ctl_elem_value32 __user *data32,
+ 	if (type == SNDRV_CTL_ELEM_TYPE_BOOLEAN ||
+ 	    type == SNDRV_CTL_ELEM_TYPE_INTEGER) {
+ 		for (i = 0; i < count; i++) {
++			s32 __user *intp = valuep;
+ 			int val;
+ 			val = data->value.integer.value[i];
+-			if (put_user(val, &data32->value.integer[i]))
++			if (put_user(val, &intp[i]))
+ 				return -EFAULT;
+ 		}
+ 	} else {
+ 		size = get_elem_size(type, count);
+-		if (copy_to_user(data32->value.data,
+-				 data->value.bytes.data, size))
++		if (copy_to_user(valuep, data->value.bytes.data, size))
+ 			return -EFAULT;
+ 	}
+ 	return 0;
+ }
+ 
+-static int snd_ctl_elem_read_user_compat(struct snd_card *card, 
+-					 struct snd_ctl_elem_value32 __user *data32)
++static int ctl_elem_read_user(struct snd_card *card,
++			      void __user *userdata, void __user *valuep)
+ {
+ 	struct snd_ctl_elem_value *data;
+ 	int err, type, count;
+@@ -294,7 +310,9 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+ 	if (data == NULL)
+ 		return -ENOMEM;
+ 
+-	if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
++	err = copy_ctl_value_from_user(card, data, userdata, valuep,
++				       &type, &count);
++	if (err < 0)
+ 		goto error;
+ 
+ 	snd_power_lock(card);
+@@ -303,14 +321,15 @@ static int snd_ctl_elem_read_user_compat(struct snd_card *card,
+ 		err = snd_ctl_elem_read(card, data);
+ 	snd_power_unlock(card);
+ 	if (err >= 0)
+-		err = copy_ctl_value_to_user(data32, data, type, count);
++		err = copy_ctl_value_to_user(userdata, valuep, data,
++					     type, count);
+  error:
+ 	kfree(data);
+ 	return err;
+ }
+ 
+-static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+-					  struct snd_ctl_elem_value32 __user *data32)
++static int ctl_elem_write_user(struct snd_ctl_file *file,
++			       void __user *userdata, void __user *valuep)
+ {
+ 	struct snd_ctl_elem_value *data;
+ 	struct snd_card *card = file->card;
+@@ -320,7 +339,9 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+ 	if (data == NULL)
+ 		return -ENOMEM;
+ 
+-	if ((err = copy_ctl_value_from_user(card, data, data32, &type, &count)) < 0)
++	err = copy_ctl_value_from_user(card, data, userdata, valuep,
++				       &type, &count);
++	if (err < 0)
+ 		goto error;
+ 
+ 	snd_power_lock(card);
+@@ -329,12 +350,39 @@ static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
+ 		err = snd_ctl_elem_write(card, file, data);
+ 	snd_power_unlock(card);
+ 	if (err >= 0)
+-		err = copy_ctl_value_to_user(data32, data, type, count);
++		err = copy_ctl_value_to_user(userdata, valuep, data,
++					     type, count);
+  error:
+ 	kfree(data);
+ 	return err;
+ }
+ 
++static int snd_ctl_elem_read_user_compat(struct snd_card *card,
++					 struct snd_ctl_elem_value32 __user *data32)
++{
++	return ctl_elem_read_user(card, data32, &data32->value);
++}
++
++static int snd_ctl_elem_write_user_compat(struct snd_ctl_file *file,
++					  struct snd_ctl_elem_value32 __user *data32)
++{
++	return ctl_elem_write_user(file, data32, &data32->value);
++}
++
++#ifdef CONFIG_X86_X32
++static int snd_ctl_elem_read_user_x32(struct snd_card *card,
++				      struct snd_ctl_elem_value_x32 __user *data32)
++{
++	return ctl_elem_read_user(card, data32, &data32->value);
++}
++
++static int snd_ctl_elem_write_user_x32(struct snd_ctl_file *file,
++				       struct snd_ctl_elem_value_x32 __user *data32)
++{
++	return ctl_elem_write_user(file, data32, &data32->value);
++}
++#endif /* CONFIG_X86_X32 */
++
+ /* add or replace a user control */
+ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
+ 				   struct snd_ctl_elem_info32 __user *data32,
+@@ -393,6 +441,10 @@ enum {
+ 	SNDRV_CTL_IOCTL_ELEM_WRITE32 = _IOWR('U', 0x13, struct snd_ctl_elem_value32),
+ 	SNDRV_CTL_IOCTL_ELEM_ADD32 = _IOWR('U', 0x17, struct snd_ctl_elem_info32),
+ 	SNDRV_CTL_IOCTL_ELEM_REPLACE32 = _IOWR('U', 0x18, struct snd_ctl_elem_info32),
++#ifdef CONFIG_X86_X32
++	SNDRV_CTL_IOCTL_ELEM_READ_X32 = _IOWR('U', 0x12, struct snd_ctl_elem_value_x32),
++	SNDRV_CTL_IOCTL_ELEM_WRITE_X32 = _IOWR('U', 0x13, struct snd_ctl_elem_value_x32),
++#endif /* CONFIG_X86_X32 */
+ };
+ 
+ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+@@ -431,6 +483,12 @@ static inline long snd_ctl_ioctl_compat(struct file *file, unsigned int cmd, uns
+ 		return snd_ctl_elem_add_compat(ctl, argp, 0);
+ 	case SNDRV_CTL_IOCTL_ELEM_REPLACE32:
+ 		return snd_ctl_elem_add_compat(ctl, argp, 1);
++#ifdef CONFIG_X86_X32
++	case SNDRV_CTL_IOCTL_ELEM_READ_X32:
++		return snd_ctl_elem_read_user_x32(ctl->card, argp);
++	case SNDRV_CTL_IOCTL_ELEM_WRITE_X32:
++		return snd_ctl_elem_write_user_x32(ctl, argp);
++#endif /* CONFIG_X86_X32 */
+ 	}
+ 
+ 	down_read(&snd_ioctl_rwsem);
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index 9630e9f72b7b..1f64ab0c2a95 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -183,6 +183,14 @@ static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream
+ 	return err;
+ }
+ 
++#ifdef CONFIG_X86_X32
++/* X32 ABI has the same struct as x86-64 for snd_pcm_channel_info */
++static int snd_pcm_channel_info_user(struct snd_pcm_substream *substream,
++				     struct snd_pcm_channel_info __user *src);
++#define snd_pcm_ioctl_channel_info_x32(s, p)	\
++	snd_pcm_channel_info_user(s, p)
++#endif /* CONFIG_X86_X32 */
++
+ struct snd_pcm_status32 {
+ 	s32 state;
+ 	struct compat_timespec trigger_tstamp;
+@@ -243,6 +251,71 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
+ 	return err;
+ }
+ 
++#ifdef CONFIG_X86_X32
++/* X32 ABI has 64bit timespec and 64bit alignment */
++struct snd_pcm_status_x32 {
++	s32 state;
++	u32 rsvd; /* alignment */
++	struct timespec trigger_tstamp;
++	struct timespec tstamp;
++	u32 appl_ptr;
++	u32 hw_ptr;
++	s32 delay;
++	u32 avail;
++	u32 avail_max;
++	u32 overrange;
++	s32 suspended_state;
++	u32 audio_tstamp_data;
++	struct timespec audio_tstamp;
++	struct timespec driver_tstamp;
++	u32 audio_tstamp_accuracy;
++	unsigned char reserved[52-2*sizeof(struct timespec)];
++} __packed;
++
++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
++
++static int snd_pcm_status_user_x32(struct snd_pcm_substream *substream,
++				   struct snd_pcm_status_x32 __user *src,
++				   bool ext)
++{
++	struct snd_pcm_status status;
++	int err;
++
++	memset(&status, 0, sizeof(status));
++	/*
++	 * with extension, parameters are read/write,
++	 * get audio_tstamp_data from user,
++	 * ignore rest of status structure
++	 */
++	if (ext && get_user(status.audio_tstamp_data,
++				(u32 __user *)(&src->audio_tstamp_data)))
++		return -EFAULT;
++	err = snd_pcm_status(substream, &status);
++	if (err < 0)
++		return err;
++
++	if (clear_user(src, sizeof(*src)))
++		return -EFAULT;
++	if (put_user(status.state, &src->state) ||
++	    put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
++	    put_timespec(&status.tstamp, &src->tstamp) ||
++	    put_user(status.appl_ptr, &src->appl_ptr) ||
++	    put_user(status.hw_ptr, &src->hw_ptr) ||
++	    put_user(status.delay, &src->delay) ||
++	    put_user(status.avail, &src->avail) ||
++	    put_user(status.avail_max, &src->avail_max) ||
++	    put_user(status.overrange, &src->overrange) ||
++	    put_user(status.suspended_state, &src->suspended_state) ||
++	    put_user(status.audio_tstamp_data, &src->audio_tstamp_data) ||
++	    put_timespec(&status.audio_tstamp, &src->audio_tstamp) ||
++	    put_timespec(&status.driver_tstamp, &src->driver_tstamp) ||
++	    put_user(status.audio_tstamp_accuracy, &src->audio_tstamp_accuracy))
++		return -EFAULT;
++
++	return err;
++}
++#endif /* CONFIG_X86_X32 */
++
+ /* both for HW_PARAMS and HW_REFINE */
+ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
+ 					  int refine, 
+@@ -469,6 +542,93 @@ static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_X86_X32
++/* X32 ABI has 64bit timespec and 64bit alignment */
++struct snd_pcm_mmap_status_x32 {
++	s32 state;
++	s32 pad1;
++	u32 hw_ptr;
++	u32 pad2; /* alignment */
++	struct timespec tstamp;
++	s32 suspended_state;
++	struct timespec audio_tstamp;
++} __packed;
++
++struct snd_pcm_mmap_control_x32 {
++	u32 appl_ptr;
++	u32 avail_min;
++};
++
++struct snd_pcm_sync_ptr_x32 {
++	u32 flags;
++	u32 rsvd; /* alignment */
++	union {
++		struct snd_pcm_mmap_status_x32 status;
++		unsigned char reserved[64];
++	} s;
++	union {
++		struct snd_pcm_mmap_control_x32 control;
++		unsigned char reserved[64];
++	} c;
++} __packed;
++
++static int snd_pcm_ioctl_sync_ptr_x32(struct snd_pcm_substream *substream,
++				      struct snd_pcm_sync_ptr_x32 __user *src)
++{
++	struct snd_pcm_runtime *runtime = substream->runtime;
++	volatile struct snd_pcm_mmap_status *status;
++	volatile struct snd_pcm_mmap_control *control;
++	u32 sflags;
++	struct snd_pcm_mmap_control scontrol;
++	struct snd_pcm_mmap_status sstatus;
++	snd_pcm_uframes_t boundary;
++	int err;
++
++	if (snd_BUG_ON(!runtime))
++		return -EINVAL;
++
++	if (get_user(sflags, &src->flags) ||
++	    get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
++	    get_user(scontrol.avail_min, &src->c.control.avail_min))
++		return -EFAULT;
++	if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
++		err = snd_pcm_hwsync(substream);
++		if (err < 0)
++			return err;
++	}
++	status = runtime->status;
++	control = runtime->control;
++	boundary = recalculate_boundary(runtime);
++	if (!boundary)
++		boundary = 0x7fffffff;
++	snd_pcm_stream_lock_irq(substream);
++	/* FIXME: we should consider the boundary for the sync from app */
++	if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
++		control->appl_ptr = scontrol.appl_ptr;
++	else
++		scontrol.appl_ptr = control->appl_ptr % boundary;
++	if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
++		control->avail_min = scontrol.avail_min;
++	else
++		scontrol.avail_min = control->avail_min;
++	sstatus.state = status->state;
++	sstatus.hw_ptr = status->hw_ptr % boundary;
++	sstatus.tstamp = status->tstamp;
++	sstatus.suspended_state = status->suspended_state;
++	sstatus.audio_tstamp = status->audio_tstamp;
++	snd_pcm_stream_unlock_irq(substream);
++	if (put_user(sstatus.state, &src->s.status.state) ||
++	    put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
++	    put_timespec(&sstatus.tstamp, &src->s.status.tstamp) ||
++	    put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
++	    put_timespec(&sstatus.audio_tstamp, &src->s.status.audio_tstamp) ||
++	    put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
++	    put_user(scontrol.avail_min, &src->c.control.avail_min))
++		return -EFAULT;
++
++	return 0;
++}
++#endif /* CONFIG_X86_X32 */
+ 
+ /*
+  */
+@@ -487,7 +647,12 @@ enum {
+ 	SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
+ 	SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
+ 	SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
+-
++#ifdef CONFIG_X86_X32
++	SNDRV_PCM_IOCTL_CHANNEL_INFO_X32 = _IOR('A', 0x32, struct snd_pcm_channel_info),
++	SNDRV_PCM_IOCTL_STATUS_X32 = _IOR('A', 0x20, struct snd_pcm_status_x32),
++	SNDRV_PCM_IOCTL_STATUS_EXT_X32 = _IOWR('A', 0x24, struct snd_pcm_status_x32),
++	SNDRV_PCM_IOCTL_SYNC_PTR_X32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr_x32),
++#endif /* CONFIG_X86_X32 */
+ };
+ 
+ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+@@ -559,6 +724,16 @@ static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned l
+ 		return snd_pcm_ioctl_rewind_compat(substream, argp);
+ 	case SNDRV_PCM_IOCTL_FORWARD32:
+ 		return snd_pcm_ioctl_forward_compat(substream, argp);
++#ifdef CONFIG_X86_X32
++	case SNDRV_PCM_IOCTL_STATUS_X32:
++		return snd_pcm_status_user_x32(substream, argp, false);
++	case SNDRV_PCM_IOCTL_STATUS_EXT_X32:
++		return snd_pcm_status_user_x32(substream, argp, true);
++	case SNDRV_PCM_IOCTL_SYNC_PTR_X32:
++		return snd_pcm_ioctl_sync_ptr_x32(substream, argp);
++	case SNDRV_PCM_IOCTL_CHANNEL_INFO_X32:
++		return snd_pcm_ioctl_channel_info_x32(substream, argp);
++#endif /* CONFIG_X86_X32 */
+ 	}
+ 
+ 	return -ENOIOCTLCMD;
+diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
+index 5268c1f58c25..09a89094dcf7 100644
+--- a/sound/core/rawmidi_compat.c
++++ b/sound/core/rawmidi_compat.c
+@@ -94,9 +94,58 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_X86_X32
++/* X32 ABI has 64bit timespec and 64bit alignment */
++struct snd_rawmidi_status_x32 {
++	s32 stream;
++	u32 rsvd; /* alignment */
++	struct timespec tstamp;
++	u32 avail;
++	u32 xruns;
++	unsigned char reserved[16];
++} __attribute__((packed));
++
++#define put_timespec(src, dst) copy_to_user(dst, src, sizeof(*dst))
++
++static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
++					struct snd_rawmidi_status_x32 __user *src)
++{
++	int err;
++	struct snd_rawmidi_status status;
++
++	if (rfile->output == NULL)
++		return -EINVAL;
++	if (get_user(status.stream, &src->stream))
++		return -EFAULT;
++
++	switch (status.stream) {
++	case SNDRV_RAWMIDI_STREAM_OUTPUT:
++		err = snd_rawmidi_output_status(rfile->output, &status);
++		break;
++	case SNDRV_RAWMIDI_STREAM_INPUT:
++		err = snd_rawmidi_input_status(rfile->input, &status);
++		break;
++	default:
++		return -EINVAL;
++	}
++	if (err < 0)
++		return err;
++
++	if (put_timespec(&status.tstamp, &src->tstamp) ||
++	    put_user(status.avail, &src->avail) ||
++	    put_user(status.xruns, &src->xruns))
++		return -EFAULT;
++
++	return 0;
++}
++#endif /* CONFIG_X86_X32 */
++
+ enum {
+ 	SNDRV_RAWMIDI_IOCTL_PARAMS32 = _IOWR('W', 0x10, struct snd_rawmidi_params32),
+ 	SNDRV_RAWMIDI_IOCTL_STATUS32 = _IOWR('W', 0x20, struct snd_rawmidi_status32),
++#ifdef CONFIG_X86_X32
++	SNDRV_RAWMIDI_IOCTL_STATUS_X32 = _IOWR('W', 0x20, struct snd_rawmidi_status_x32),
++#endif /* CONFIG_X86_X32 */
+ };
+ 
+ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+@@ -115,6 +164,10 @@ static long snd_rawmidi_ioctl_compat(struct file *file, unsigned int cmd, unsign
+ 		return snd_rawmidi_ioctl_params_compat(rfile, argp);
+ 	case SNDRV_RAWMIDI_IOCTL_STATUS32:
+ 		return snd_rawmidi_ioctl_status_compat(rfile, argp);
++#ifdef CONFIG_X86_X32
++	case SNDRV_RAWMIDI_IOCTL_STATUS_X32:
++		return snd_rawmidi_ioctl_status_x32(rfile, argp);
++#endif /* CONFIG_X86_X32 */
+ 	}
+ 	return -ENOIOCTLCMD;
+ }
+diff --git a/sound/core/seq/oss/seq_oss.c b/sound/core/seq/oss/seq_oss.c
+index 72873a46afeb..4b53b8f2330f 100644
+--- a/sound/core/seq/oss/seq_oss.c
++++ b/sound/core/seq/oss/seq_oss.c
+@@ -148,8 +148,6 @@ odev_release(struct inode *inode, struct file *file)
+ 	if ((dp = file->private_data) == NULL)
+ 		return 0;
+ 
+-	snd_seq_oss_drain_write(dp);
+-
+ 	mutex_lock(&register_mutex);
+ 	snd_seq_oss_release(dp);
+ 	mutex_unlock(&register_mutex);
+diff --git a/sound/core/seq/oss/seq_oss_device.h b/sound/core/seq/oss/seq_oss_device.h
+index b43924325249..d7b4d016b547 100644
+--- a/sound/core/seq/oss/seq_oss_device.h
++++ b/sound/core/seq/oss/seq_oss_device.h
+@@ -127,7 +127,6 @@ int snd_seq_oss_write(struct seq_oss_devinfo *dp, const char __user *buf, int co
+ unsigned int snd_seq_oss_poll(struct seq_oss_devinfo *dp, struct file *file, poll_table * wait);
+ 
+ void snd_seq_oss_reset(struct seq_oss_devinfo *dp);
+-void snd_seq_oss_drain_write(struct seq_oss_devinfo *dp);
+ 
+ /* */
+ void snd_seq_oss_process_queue(struct seq_oss_devinfo *dp, abstime_t time);
+diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
+index dad5b1123e46..0b9c18b2e45f 100644
+--- a/sound/core/seq/oss/seq_oss_init.c
++++ b/sound/core/seq/oss/seq_oss_init.c
+@@ -436,22 +436,6 @@ snd_seq_oss_release(struct seq_oss_devinfo *dp)
+ 
+ 
+ /*
+- * Wait until the queue is empty (if we don't have nonblock)
+- */
+-void
+-snd_seq_oss_drain_write(struct seq_oss_devinfo *dp)
+-{
+-	if (! dp->timer->running)
+-		return;
+-	if (is_write_mode(dp->file_mode) && !is_nonblock_mode(dp->file_mode) &&
+-	    dp->writeq) {
+-		while (snd_seq_oss_writeq_sync(dp->writeq))
+-			;
+-	}
+-}
+-
+-
+-/*
+  * reset sequencer devices
+  */
+ void
+diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
+index e05802ae6e1b..2e908225d754 100644
+--- a/sound/core/timer_compat.c
++++ b/sound/core/timer_compat.c
+@@ -70,13 +70,14 @@ static int snd_timer_user_status_compat(struct file *file,
+ 					struct snd_timer_status32 __user *_status)
+ {
+ 	struct snd_timer_user *tu;
+-	struct snd_timer_status status;
++	struct snd_timer_status32 status;
+ 	
+ 	tu = file->private_data;
+ 	if (snd_BUG_ON(!tu->timeri))
+ 		return -ENXIO;
+ 	memset(&status, 0, sizeof(status));
+-	status.tstamp = tu->tstamp;
++	status.tstamp.tv_sec = tu->tstamp.tv_sec;
++	status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
+ 	status.resolution = snd_timer_resolution(tu->timeri);
+ 	status.lost = tu->timeri->lost;
+ 	status.overrun = tu->overrun;
+@@ -88,12 +89,21 @@ static int snd_timer_user_status_compat(struct file *file,
+ 	return 0;
+ }
+ 
++#ifdef CONFIG_X86_X32
++/* X32 ABI has the same struct as x86-64 */
++#define snd_timer_user_status_x32(file, s) \
++	snd_timer_user_status(file, s)
++#endif /* CONFIG_X86_X32 */
++
+ /*
+  */
+ 
+ enum {
+ 	SNDRV_TIMER_IOCTL_INFO32 = _IOR('T', 0x11, struct snd_timer_info32),
+ 	SNDRV_TIMER_IOCTL_STATUS32 = _IOW('T', 0x14, struct snd_timer_status32),
++#ifdef CONFIG_X86_X32
++	SNDRV_TIMER_IOCTL_STATUS_X32 = _IOW('T', 0x14, struct snd_timer_status),
++#endif /* CONFIG_X86_X32 */
+ };
+ 
+ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
+@@ -122,6 +132,10 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
+ 		return snd_timer_user_info_compat(file, argp);
+ 	case SNDRV_TIMER_IOCTL_STATUS32:
+ 		return snd_timer_user_status_compat(file, argp);
++#ifdef CONFIG_X86_X32
++	case SNDRV_TIMER_IOCTL_STATUS_X32:
++		return snd_timer_user_status_x32(file, argp);
++#endif /* CONFIG_X86_X32 */
+ 	}
+ 	return -ENOIOCTLCMD;
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index df34c78a6ced..91cc6897d595 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3477,6 +3477,29 @@ static void gpio2_mic_hotkey_event(struct hda_codec *codec,
+ 	input_sync(spec->kb_dev);
+ }
+ 
++static int alc_register_micmute_input_device(struct hda_codec *codec)
++{
++	struct alc_spec *spec = codec->spec;
++
++	spec->kb_dev = input_allocate_device();
++	if (!spec->kb_dev) {
++		codec_err(codec, "Out of memory (input_allocate_device)\n");
++		return -ENOMEM;
++	}
++	spec->kb_dev->name = "Microphone Mute Button";
++	spec->kb_dev->evbit[0] = BIT_MASK(EV_KEY);
++	spec->kb_dev->keybit[BIT_WORD(KEY_MICMUTE)] = BIT_MASK(KEY_MICMUTE);
++
++	if (input_register_device(spec->kb_dev)) {
++		codec_err(codec, "input_register_device failed\n");
++		input_free_device(spec->kb_dev);
++		spec->kb_dev = NULL;
++		return -ENOMEM;
++	}
++
++	return 0;
++}
++
+ static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
+ 					     const struct hda_fixup *fix, int action)
+ {
+@@ -3494,20 +3517,8 @@ static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
+ 	struct alc_spec *spec = codec->spec;
+ 
+ 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+-		spec->kb_dev = input_allocate_device();
+-		if (!spec->kb_dev) {
+-			codec_err(codec, "Out of memory (input_allocate_device)\n");
++		if (alc_register_micmute_input_device(codec) != 0)
+ 			return;
+-		}
+-		spec->kb_dev->name = "Microphone Mute Button";
+-		spec->kb_dev->evbit[0] = BIT_MASK(EV_KEY);
+-		spec->kb_dev->keybit[BIT_WORD(KEY_MICMUTE)] = BIT_MASK(KEY_MICMUTE);
+-		if (input_register_device(spec->kb_dev)) {
+-			codec_err(codec, "input_register_device failed\n");
+-			input_free_device(spec->kb_dev);
+-			spec->kb_dev = NULL;
+-			return;
+-		}
+ 
+ 		snd_hda_add_verbs(codec, gpio_init);
+ 		snd_hda_codec_write_cache(codec, codec->core.afg, 0,
+@@ -3537,6 +3548,47 @@ static void alc280_fixup_hp_gpio2_mic_hotkey(struct hda_codec *codec,
+ 	}
+ }
+ 
++static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
++					     const struct hda_fixup *fix, int action)
++{
++	/* Line2 = mic mute hotkey
++	   GPIO2 = mic mute LED */
++	static const struct hda_verb gpio_init[] = {
++		{ 0x01, AC_VERB_SET_GPIO_MASK, 0x04 },
++		{ 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04 },
++		{}
++	};
++
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		if (alc_register_micmute_input_device(codec) != 0)
++			return;
++
++		snd_hda_add_verbs(codec, gpio_init);
++		snd_hda_jack_detect_enable_callback(codec, 0x1b,
++						    gpio2_mic_hotkey_event);
++
++		spec->gen.cap_sync_hook = alc_fixup_gpio_mic_mute_hook;
++		spec->gpio_led = 0;
++		spec->mute_led_polarity = 0;
++		spec->gpio_mic_led_mask = 0x04;
++		return;
++	}
++
++	if (!spec->kb_dev)
++		return;
++
++	switch (action) {
++	case HDA_FIXUP_ACT_PROBE:
++		spec->init_amp = ALC_INIT_DEFAULT;
++		break;
++	case HDA_FIXUP_ACT_FREE:
++		input_unregister_device(spec->kb_dev);
++		spec->kb_dev = NULL;
++	}
++}
++
+ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
+ 				const struct hda_fixup *fix, int action)
+ {
+@@ -3720,6 +3772,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
+ 
+ static void alc_headset_mode_default(struct hda_codec *codec)
+ {
++	static struct coef_fw coef0225[] = {
++		UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
++		{}
++	};
+ 	static struct coef_fw coef0255[] = {
+ 		WRITE_COEF(0x45, 0xc089),
+ 		WRITE_COEF(0x45, 0xc489),
+@@ -3761,6 +3817,9 @@ static void alc_headset_mode_default(struct hda_codec *codec)
+ 	};
+ 
+ 	switch (codec->core.vendor_id) {
++	case 0x10ec0225:
++		alc_process_coef_fw(codec, coef0225);
++		break;
+ 	case 0x10ec0255:
+ 	case 0x10ec0256:
+ 		alc_process_coef_fw(codec, coef0255);
+@@ -4570,6 +4629,14 @@ enum {
+ 	ALC288_FIXUP_DISABLE_AAMIX,
+ 	ALC292_FIXUP_DELL_E7X,
+ 	ALC292_FIXUP_DISABLE_AAMIX,
++	ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
++	ALC275_FIXUP_DELL_XPS,
++	ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
++	ALC293_FIXUP_LENOVO_SPK_NOISE,
++	ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
++	ALC255_FIXUP_DELL_SPK_NOISE,
++	ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++	ALC280_FIXUP_HP_HEADSET_MIC,
+ };
+ 
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -5131,6 +5198,71 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC292_FIXUP_DISABLE_AAMIX
+ 	},
++	[ALC298_FIXUP_DELL1_MIC_NO_PRESENCE] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
++			{ 0x1a, 0x01a1913d }, /* use as headphone mic, without its own jack detect */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_HEADSET_MODE
++	},
++	[ALC275_FIXUP_DELL_XPS] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			/* Enables internal speaker */
++			{0x20, AC_VERB_SET_COEF_INDEX, 0x1f},
++			{0x20, AC_VERB_SET_PROC_COEF, 0x00c0},
++			{0x20, AC_VERB_SET_COEF_INDEX, 0x30},
++			{0x20, AC_VERB_SET_PROC_COEF, 0x00b1},
++			{}
++		}
++	},
++	[ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			/* Disable pass-through path for FRONT 14h */
++			{0x20, AC_VERB_SET_COEF_INDEX, 0x36},
++			{0x20, AC_VERB_SET_PROC_COEF, 0x1737},
++			{}
++		},
++		.chained = true,
++		.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
++	},
++	[ALC293_FIXUP_LENOVO_SPK_NOISE] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_disable_aamix,
++		.chained = true,
++		.chain_id = ALC269_FIXUP_THINKPAD_ACPI
++	},
++	[ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc233_fixup_lenovo_line2_mic_hotkey,
++	},
++	[ALC255_FIXUP_DELL_SPK_NOISE] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_disable_aamix,
++		.chained = true,
++		.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
++	},
++	[ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
++		.type = HDA_FIXUP_VERBS,
++		.v.verbs = (const struct hda_verb[]) {
++			/* Disable pass-through path for FRONT 14h */
++			{ 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
++			{ 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
++			{}
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
++	},
++	[ALC280_FIXUP_HP_HEADSET_MIC] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_disable_aamix,
++		.chained = true,
++		.chain_id = ALC269_FIXUP_HEADSET_MIC,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5142,10 +5274,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ 	SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
++	SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ 	SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ 	SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+ 	SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
++	SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
+ 	SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
+ 	SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
+ 	SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
+@@ -5169,6 +5303,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
++	SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -5228,6 +5364,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ 	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
++	SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ 	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -5275,6 +5412,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
++	SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+ 	SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -5284,6 +5423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+ 	SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ 	SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ 	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+@@ -5365,6 +5505,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
+ 	{}
+ };
++#define ALC225_STANDARD_PINS \
++	{0x12, 0xb7a60130}, \
++	{0x21, 0x04211020}
+ 
+ #define ALC255_STANDARD_PINS \
+ 	{0x18, 0x411111f0}, \
+@@ -5414,7 +5557,20 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{0x1d, 0x40700001}, \
+ 	{0x1e, 0x411111f0}
+ 
++#define ALC298_STANDARD_PINS \
++	{0x18, 0x411111f0}, \
++	{0x19, 0x411111f0}, \
++	{0x1a, 0x411111f0}, \
++	{0x1e, 0x411111f0}, \
++	{0x1f, 0x411111f0}
++
+ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
++	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC225_STANDARD_PINS,
++		{0x14, 0x901701a0}),
++	SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC225_STANDARD_PINS,
++		{0x14, 0x901701b0}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
+ 		ALC255_STANDARD_PINS,
+ 		{0x12, 0x40300000},
+@@ -5708,6 +5864,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x16, 0x411111f0},
+ 		{0x18, 0x411111f0},
+ 		{0x19, 0x411111f0}),
++	SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
++		ALC298_STANDARD_PINS,
++		{0x12, 0x90a60130},
++		{0x13, 0x40000000},
++		{0x14, 0x411111f0},
++		{0x17, 0x90170140},
++		{0x1d, 0x4068a36d},
++		{0x21, 0x03211020}),
+ 	{}
+ };
+ 
+diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
+index c19e021ccf66..11246280945d 100644
+--- a/sound/pci/rme9652/hdsp.c
++++ b/sound/pci/rme9652/hdsp.c
+@@ -2878,7 +2878,7 @@ static int snd_hdsp_get_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
+ {
+ 	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+ 
+-	ucontrol->value.enumerated.item[0] = hdsp_dds_offset(hdsp);
++	ucontrol->value.integer.value[0] = hdsp_dds_offset(hdsp);
+ 	return 0;
+ }
+ 
+@@ -2890,7 +2890,7 @@ static int snd_hdsp_put_dds_offset(struct snd_kcontrol *kcontrol, struct snd_ctl
+ 
+ 	if (!snd_hdsp_use_is_exclusive(hdsp))
+ 		return -EBUSY;
+-	val = ucontrol->value.enumerated.item[0];
++	val = ucontrol->value.integer.value[0];
+ 	spin_lock_irq(&hdsp->lock);
+ 	if (val != hdsp_dds_offset(hdsp))
+ 		change = (hdsp_set_dds_offset(hdsp, val) == 0) ? 1 : 0;
+diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
+index cb666c73712d..7f6190606f5e 100644
+--- a/sound/pci/rme9652/hdspm.c
++++ b/sound/pci/rme9652/hdspm.c
+@@ -1601,6 +1601,9 @@ static void hdspm_set_dds_value(struct hdspm *hdspm, int rate)
+ {
+ 	u64 n;
+ 
++	if (snd_BUG_ON(rate <= 0))
++		return;
++
+ 	if (rate >= 112000)
+ 		rate /= 4;
+ 	else if (rate >= 56000)
+@@ -2215,6 +2218,8 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
+ 		} else {
+ 			/* slave mode, return external sample rate */
+ 			rate = hdspm_external_sample_rate(hdspm);
++			if (!rate)
++				rate = hdspm->system_sample_rate;
+ 		}
+ 	}
+ 
+@@ -2260,8 +2265,11 @@ static int snd_hdspm_put_system_sample_rate(struct snd_kcontrol *kcontrol,
+ 					    ucontrol)
+ {
+ 	struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
++	int rate = ucontrol->value.integer.value[0];
+ 
+-	hdspm_set_dds_value(hdspm, ucontrol->value.enumerated.item[0]);
++	if (rate < 27000 || rate > 207000)
++		return -EINVAL;
++	hdspm_set_dds_value(hdspm, ucontrol->value.integer.value[0]);
+ 	return 0;
+ }
+ 
+@@ -4449,7 +4457,7 @@ static int snd_hdspm_get_tco_word_term(struct snd_kcontrol *kcontrol,
+ {
+ 	struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ 
+-	ucontrol->value.enumerated.item[0] = hdspm->tco->term;
++	ucontrol->value.integer.value[0] = hdspm->tco->term;
+ 
+ 	return 0;
+ }
+@@ -4460,8 +4468,8 @@ static int snd_hdspm_put_tco_word_term(struct snd_kcontrol *kcontrol,
+ {
+ 	struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ 
+-	if (hdspm->tco->term != ucontrol->value.enumerated.item[0]) {
+-		hdspm->tco->term = ucontrol->value.enumerated.item[0];
++	if (hdspm->tco->term != ucontrol->value.integer.value[0]) {
++		hdspm->tco->term = ucontrol->value.integer.value[0];
+ 
+ 		hdspm_tco_write(hdspm);
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 37d8ababfc04..a4d03e5da3e0 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1121,6 +1121,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
+ 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
++	case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+ 	case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
+diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
+index 11ccbb22ea2b..13d0458afc71 100644
+--- a/tools/perf/MANIFEST
++++ b/tools/perf/MANIFEST
+@@ -28,24 +28,20 @@ include/asm-generic/bitops/const_hweight.h
+ include/asm-generic/bitops/fls64.h
+ include/asm-generic/bitops/__fls.h
+ include/asm-generic/bitops/fls.h
+-include/linux/const.h
+ include/linux/perf_event.h
+ include/linux/rbtree.h
+ include/linux/list.h
+ include/linux/hash.h
+ include/linux/stringify.h
+-lib/find_next_bit.c
+ lib/hweight.c
+ lib/rbtree.c
+ include/linux/swab.h
+ arch/*/include/asm/unistd*.h
+-arch/*/include/asm/perf_regs.h
+ arch/*/include/uapi/asm/unistd*.h
+ arch/*/include/uapi/asm/perf_regs.h
+ arch/*/lib/memcpy*.S
+ arch/*/lib/memset*.S
+ include/linux/poison.h
+-include/linux/magic.h
+ include/linux/hw_breakpoint.h
+ include/linux/rbtree_augmented.h
+ include/uapi/linux/perf_event.h
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index 950064a0942d..934d56f6803c 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1602,8 +1602,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
+ {
+ 	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+-
+-	int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
++	int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
++	int sz = nr_longs * sizeof(unsigned long);
+ 	vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
+ 	vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
+ 	vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
+diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
+index 44660aee335f..f84f5856520a 100644
+--- a/virt/kvm/async_pf.c
++++ b/virt/kvm/async_pf.c
+@@ -169,7 +169,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
+ 	 * do alloc nowait since if we are going to sleep anyway we
+ 	 * may as well sleep faulting in page
+ 	 */
+-	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
++	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
+ 	if (!work)
+ 		return 0;
+ 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-03-05 23:38 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-03-05 23:38 UTC (permalink / raw
  To: gentoo-commits

commit:     4a919eff2d079c34d6f84bf9073c32ef11af9864
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Mar  5 23:38:27 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Mar  5 23:38:27 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=4a919eff

Linux patch 4.1.19

 0000_README             |    4 +
 1018_linux-4.1.19.patch | 7602 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 7606 insertions(+)

diff --git a/0000_README b/0000_README
index ed66531..ad1d372 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,10 @@ Patch:  1017_linux-4.1.18.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.18
 
+Patch:  1018_linux-4.1.19.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.19
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1018_linux-4.1.19.patch b/1018_linux-4.1.19.patch
new file mode 100644
index 0000000..449c63a
--- /dev/null
+++ b/1018_linux-4.1.19.patch
@@ -0,0 +1,7602 @@
+diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
+index 071fb18dc57c..07fad3d2fc56 100644
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -1321,6 +1321,14 @@ accept_ra_from_local - BOOLEAN
+ 	   disabled if accept_ra_from_local is disabled
+                on a specific interface.
+ 
++accept_ra_min_hop_limit - INTEGER
++	Minimum hop limit Information in Router Advertisement.
++
++	Hop limit Information in Router Advertisement less than this
++	variable shall be ignored.
++
++	Default: 1
++
+ accept_ra_pinfo - BOOLEAN
+ 	Learn Prefix Information in Router Advertisement.
+ 
+diff --git a/Makefile b/Makefile
+index 001375cfd815..06107f683bbe 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 18
++SUBLEVEL = 19
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
+index 2dc6da70ae59..d7ed252708c5 100644
+--- a/arch/arm/common/icst.c
++++ b/arch/arm/common/icst.c
+@@ -16,7 +16,7 @@
+  */
+ #include <linux/module.h>
+ #include <linux/kernel.h>
+-
++#include <asm/div64.h>
+ #include <asm/hardware/icst.h>
+ 
+ /*
+@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
+ 
+ unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
+ {
+-	return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
++	u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
++	u32 divisor = (vco.r + 2) * p->s2div[vco.s];
++
++	do_div(dividend, divisor);
++	return (unsigned long)dividend;
+ }
+ 
+ EXPORT_SYMBOL(icst_hz);
+@@ -58,6 +62,7 @@ icst_hz_to_vco(const struct icst_params *p, unsigned long freq)
+ 
+ 		if (f > p->vco_min && f <= p->vco_max)
+ 			break;
++		i++;
+ 	} while (i < 8);
+ 
+ 	if (i >= 8)
+diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
+index 6499d93ae68d..47bc45a67e9b 100644
+--- a/arch/mips/include/asm/syscall.h
++++ b/arch/mips/include/asm/syscall.h
+@@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
+ 	/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
+ 	if ((config_enabled(CONFIG_32BIT) ||
+ 	    test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
+-	    (regs->regs[2] == __NR_syscall)) {
++	    (regs->regs[2] == __NR_syscall))
+ 		i++;
+-		n++;
+-	}
+ 
+ 	while (n--)
+ 		ret |= mips_get_syscall_arg(args++, task, regs, i++);
+diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
+index a52db28ecc1e..4457cb605356 100644
+--- a/arch/powerpc/include/asm/eeh.h
++++ b/arch/powerpc/include/asm/eeh.h
+@@ -79,6 +79,7 @@ struct pci_dn;
+ #define EEH_PE_KEEP		(1 << 8)	/* Keep PE on hotplug	*/
+ #define EEH_PE_CFG_RESTRICTED	(1 << 9)	/* Block config on error */
+ #define EEH_PE_REMOVED		(1 << 10)	/* Removed permanently	*/
++#define EEH_PE_PRI_BUS		(1 << 11)	/* Cached primary bus   */
+ 
+ struct eeh_pe {
+ 	int type;			/* PE type: PHB/Bus/Device	*/
+@@ -336,19 +337,13 @@ static inline int eeh_check_failure(const volatile void __iomem *token)
+ 
+ #define eeh_dev_check_failure(x) (0)
+ 
+-static inline void eeh_addr_cache_build(void) { }
+-
+-static inline void eeh_add_device_early(struct pci_dn *pdn) { }
+-
+-static inline void eeh_add_device_tree_early(struct pci_dn *pdn) { }
+-
+-static inline void eeh_add_device_late(struct pci_dev *dev) { }
+-
+-static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
+-
+-static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
+-
+-static inline void eeh_remove_device(struct pci_dev *dev) { }
++#define eeh_addr_cache_build()
++#define eeh_add_device_early(pdn)
++#define eeh_add_device_tree_early(pdn)
++#define eeh_add_device_late(pdev)
++#define eeh_add_device_tree_late(pbus)
++#define eeh_add_sysfs_files(pbus)
++#define eeh_remove_device(pdev)
+ 
+ #define EEH_POSSIBLE_ERROR(val, type) (0)
+ #define EEH_IO_ERROR_VALUE(size) (-1UL)
+diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
+index 24768ff3cb73..90cc67904dc6 100644
+--- a/arch/powerpc/kernel/eeh_driver.c
++++ b/arch/powerpc/kernel/eeh_driver.c
+@@ -561,6 +561,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
+ 	 */
+ 	eeh_pe_state_mark(pe, EEH_PE_KEEP);
+ 	if (bus) {
++		eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
+ 		pci_lock_rescan_remove();
+ 		pcibios_remove_pci_devices(bus);
+ 		pci_unlock_rescan_remove();
+@@ -792,6 +793,7 @@ perm_error:
+ 	 * the their PCI config any more.
+ 	 */
+ 	if (frozen_bus) {
++		eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
+ 		eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+ 
+ 		pci_lock_rescan_remove();
+@@ -875,6 +877,7 @@ static void eeh_handle_special_event(void)
+ 					continue;
+ 
+ 				/* Notify all devices to be down */
++				eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
+ 				bus = eeh_pe_bus_get(phb_pe);
+ 				eeh_pe_dev_traverse(pe,
+ 					eeh_report_failure, NULL);
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index 22f6d954ef89..c3e0420b8a42 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -906,7 +906,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
+ 		bus = pe->phb->bus;
+ 	} else if (pe->type & EEH_PE_BUS ||
+ 		   pe->type & EEH_PE_DEVICE) {
+-		if (pe->bus) {
++		if (pe->state & EEH_PE_PRI_BUS) {
+ 			bus = pe->bus;
+ 			goto out;
+ 		}
+diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
+index ce738ab3d5a9..abb396876b9a 100644
+--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
++++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
+@@ -455,9 +455,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
+ 	 * PCI devices of the PE are expected to be removed prior
+ 	 * to PE reset.
+ 	 */
+-	if (!edev->pe->bus)
++	if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
+ 		edev->pe->bus = pci_find_bus(hose->global_number,
+ 					     pdn->busno);
++		if (edev->pe->bus)
++			edev->pe->state |= EEH_PE_PRI_BUS;
++	}
+ 
+ 	/*
+ 	 * Enable EEH explicitly so that we will do EEH check
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 181c53bac3a7..62855ac37ab7 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -285,6 +285,9 @@ static noinline int vmalloc_fault(unsigned long address)
+ 	if (!pmd_k)
+ 		return -1;
+ 
++	if (pmd_huge(*pmd_k))
++		return 0;
++
+ 	pte_k = pte_offset_kernel(pmd_k, address);
+ 	if (!pte_present(*pte_k))
+ 		return -1;
+@@ -356,8 +359,6 @@ void vmalloc_sync_all(void)
+  * 64-bit:
+  *
+  *   Handle a fault on the vmalloc area
+- *
+- * This assumes no large pages in there.
+  */
+ static noinline int vmalloc_fault(unsigned long address)
+ {
+@@ -399,17 +400,23 @@ static noinline int vmalloc_fault(unsigned long address)
+ 	if (pud_none(*pud_ref))
+ 		return -1;
+ 
+-	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
++	if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
+ 		BUG();
+ 
++	if (pud_huge(*pud))
++		return 0;
++
+ 	pmd = pmd_offset(pud, address);
+ 	pmd_ref = pmd_offset(pud_ref, address);
+ 	if (pmd_none(*pmd_ref))
+ 		return -1;
+ 
+-	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
++	if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
+ 		BUG();
+ 
++	if (pmd_huge(*pmd))
++		return 0;
++
+ 	pte_ref = pte_offset_kernel(pmd_ref, address);
+ 	if (!pte_present(*pte_ref))
+ 		return -1;
+diff --git a/block/bio.c b/block/bio.c
+index 4441522ca339..cbce3e2208f4 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1122,9 +1122,12 @@ int bio_uncopy_user(struct bio *bio)
+ 	if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+ 		/*
+ 		 * if we're in a workqueue, the request is orphaned, so
+-		 * don't copy into a random user address space, just free.
++		 * don't copy into a random user address space, just free
++		 * and return -EINTR so user space doesn't expect any data.
+ 		 */
+-		if (current->mm && bio_data_dir(bio) == READ)
++		if (!current->mm)
++			ret = -EINTR;
++		else if (bio_data_dir(bio) == READ)
+ 			ret = bio_copy_to_iter(bio, bmd->iter);
+ 		if (bmd->is_our_pages)
+ 			bio_free_pages(bio);
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 5bc42f9b23f0..c0f03562a145 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -31,6 +31,11 @@ struct skcipher_sg_list {
+ 	struct scatterlist sg[0];
+ };
+ 
++struct skcipher_tfm {
++	struct crypto_ablkcipher *skcipher;
++	bool has_key;
++};
++
+ struct skcipher_ctx {
+ 	struct list_head tsgl;
+ 	struct af_alg_sgl rsgl;
+@@ -750,19 +755,139 @@ static struct proto_ops algif_skcipher_ops = {
+ 	.poll		=	skcipher_poll,
+ };
+ 
++static int skcipher_check_key(struct socket *sock)
++{
++	int err = 0;
++	struct sock *psk;
++	struct alg_sock *pask;
++	struct skcipher_tfm *tfm;
++	struct sock *sk = sock->sk;
++	struct alg_sock *ask = alg_sk(sk);
++
++	lock_sock(sk);
++	if (ask->refcnt)
++		goto unlock_child;
++
++	psk = ask->parent;
++	pask = alg_sk(ask->parent);
++	tfm = pask->private;
++
++	err = -ENOKEY;
++	lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
++	if (!tfm->has_key)
++		goto unlock;
++
++	if (!pask->refcnt++)
++		sock_hold(psk);
++
++	ask->refcnt = 1;
++	sock_put(psk);
++
++	err = 0;
++
++unlock:
++	release_sock(psk);
++unlock_child:
++	release_sock(sk);
++
++	return err;
++}
++
++static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
++				  size_t size)
++{
++	int err;
++
++	err = skcipher_check_key(sock);
++	if (err)
++		return err;
++
++	return skcipher_sendmsg(sock, msg, size);
++}
++
++static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page,
++				       int offset, size_t size, int flags)
++{
++	int err;
++
++	err = skcipher_check_key(sock);
++	if (err)
++		return err;
++
++	return skcipher_sendpage(sock, page, offset, size, flags);
++}
++
++static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
++				  size_t ignored, int flags)
++{
++	int err;
++
++	err = skcipher_check_key(sock);
++	if (err)
++		return err;
++
++	return skcipher_recvmsg(sock, msg, ignored, flags);
++}
++
++static struct proto_ops algif_skcipher_ops_nokey = {
++	.family		=	PF_ALG,
++
++	.connect	=	sock_no_connect,
++	.socketpair	=	sock_no_socketpair,
++	.getname	=	sock_no_getname,
++	.ioctl		=	sock_no_ioctl,
++	.listen		=	sock_no_listen,
++	.shutdown	=	sock_no_shutdown,
++	.getsockopt	=	sock_no_getsockopt,
++	.mmap		=	sock_no_mmap,
++	.bind		=	sock_no_bind,
++	.accept		=	sock_no_accept,
++	.setsockopt	=	sock_no_setsockopt,
++
++	.release	=	af_alg_release,
++	.sendmsg	=	skcipher_sendmsg_nokey,
++	.sendpage	=	skcipher_sendpage_nokey,
++	.recvmsg	=	skcipher_recvmsg_nokey,
++	.poll		=	skcipher_poll,
++};
++
+ static void *skcipher_bind(const char *name, u32 type, u32 mask)
+ {
+-	return crypto_alloc_ablkcipher(name, type, mask);
++	struct skcipher_tfm *tfm;
++	struct crypto_ablkcipher *skcipher;
++
++	tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
++	if (!tfm)
++		return ERR_PTR(-ENOMEM);
++
++	skcipher = crypto_alloc_ablkcipher(name, type, mask);
++	if (IS_ERR(skcipher)) {
++		kfree(tfm);
++		return ERR_CAST(skcipher);
++	}
++
++	tfm->skcipher = skcipher;
++
++	return tfm;
+ }
+ 
+ static void skcipher_release(void *private)
+ {
+-	crypto_free_ablkcipher(private);
++	struct skcipher_tfm *tfm = private;
++
++	crypto_free_ablkcipher(tfm->skcipher);
++	kfree(tfm);
+ }
+ 
+ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
+ {
+-	return crypto_ablkcipher_setkey(private, key, keylen);
++	struct skcipher_tfm *tfm = private;
++	int err;
++
++	err = crypto_ablkcipher_setkey(tfm->skcipher, key, keylen);
++	tfm->has_key = !err;
++
++	return err;
+ }
+ 
+ static void skcipher_wait(struct sock *sk)
+@@ -790,24 +915,26 @@ static void skcipher_sock_destruct(struct sock *sk)
+ 	af_alg_release_parent(sk);
+ }
+ 
+-static int skcipher_accept_parent(void *private, struct sock *sk)
++static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
+ {
+ 	struct skcipher_ctx *ctx;
+ 	struct alg_sock *ask = alg_sk(sk);
+-	unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
++	struct skcipher_tfm *tfm = private;
++	struct crypto_ablkcipher *skcipher = tfm->skcipher;
++	unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(skcipher);
+ 
+ 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ 	if (!ctx)
+ 		return -ENOMEM;
+ 
+-	ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
++	ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(skcipher),
+ 			       GFP_KERNEL);
+ 	if (!ctx->iv) {
+ 		sock_kfree_s(sk, ctx, len);
+ 		return -ENOMEM;
+ 	}
+ 
+-	memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
++	memset(ctx->iv, 0, crypto_ablkcipher_ivsize(skcipher));
+ 
+ 	INIT_LIST_HEAD(&ctx->tsgl);
+ 	ctx->len = len;
+@@ -820,7 +947,7 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
+ 
+ 	ask->private = ctx;
+ 
+-	ablkcipher_request_set_tfm(&ctx->req, private);
++	ablkcipher_request_set_tfm(&ctx->req, skcipher);
+ 	ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ 					af_alg_complete, &ctx->completion);
+ 
+@@ -829,12 +956,24 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
+ 	return 0;
+ }
+ 
++static int skcipher_accept_parent(void *private, struct sock *sk)
++{
++	struct skcipher_tfm *tfm = private;
++
++	if (!tfm->has_key)
++		return -ENOKEY;
++
++	return skcipher_accept_parent_nokey(private, sk);
++}
++
+ static const struct af_alg_type algif_type_skcipher = {
+ 	.bind		=	skcipher_bind,
+ 	.release	=	skcipher_release,
+ 	.setkey		=	skcipher_setkey,
+ 	.accept		=	skcipher_accept_parent,
++	.accept_nokey	=	skcipher_accept_parent_nokey,
+ 	.ops		=	&algif_skcipher_ops,
++	.ops_nokey	=	&algif_skcipher_ops_nokey,
+ 	.name		=	"skcipher",
+ 	.owner		=	THIS_MODULE
+ };
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index edf2e3ea1740..6a050e12fcdf 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 		if (link->dump == NULL)
+ 			return -EINVAL;
+ 
++		down_read(&crypto_alg_sem);
+ 		list_for_each_entry(alg, &crypto_alg_list, cra_list)
+ 			dump_alloc += CRYPTO_REPORT_MAXSIZE;
+ 
+@@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 				.done = link->done,
+ 				.min_dump_alloc = dump_alloc,
+ 			};
+-			return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
++			err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+ 		}
++		up_read(&crypto_alg_sem);
++
++		return err;
+ 	}
+ 
+ 	err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index e6ea912aee31..666fd8a1500a 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -262,6 +262,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
+ 	{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
++	{ PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
+ 	{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 287c4ba0219f..49840264dd57 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -495,8 +495,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
+ 		}
+ 	}
+ 
+-	/* fabricate port_map from cap.nr_ports */
+-	if (!port_map) {
++	/* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
++	if (!port_map && vers < 0x10300) {
+ 		port_map = (1 << ahci_nr_ports(cap)) - 1;
+ 		dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
+ 
+diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
+index cdf6215a9a22..7dbba387d12a 100644
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
+ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ {
+ 	struct ata_port *ap = qc->ap;
+-	unsigned long flags;
+ 
+ 	if (ap->ops->error_handler) {
+ 		if (in_wq) {
+-			spin_lock_irqsave(ap->lock, flags);
+-
+ 			/* EH might have kicked in while host lock is
+ 			 * released.
+ 			 */
+@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ 				} else
+ 					ata_port_freeze(ap);
+ 			}
+-
+-			spin_unlock_irqrestore(ap->lock, flags);
+ 		} else {
+ 			if (likely(!(qc->err_mask & AC_ERR_HSM)))
+ 				ata_qc_complete(qc);
+@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
+ 		}
+ 	} else {
+ 		if (in_wq) {
+-			spin_lock_irqsave(ap->lock, flags);
+ 			ata_sff_irq_on(ap);
+ 			ata_qc_complete(qc);
+-			spin_unlock_irqrestore(ap->lock, flags);
+ 		} else
+ 			ata_qc_complete(qc);
+ 	}
+@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
+ {
+ 	struct ata_link *link = qc->dev->link;
+ 	struct ata_eh_info *ehi = &link->eh_info;
+-	unsigned long flags = 0;
+ 	int poll_next;
+ 
++	lockdep_assert_held(ap->lock);
++
+ 	WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
+ 
+ 	/* Make sure ata_sff_qc_issue() does not throw things
+@@ -1112,14 +1106,6 @@ fsm_start:
+ 			}
+ 		}
+ 
+-		/* Send the CDB (atapi) or the first data block (ata pio out).
+-		 * During the state transition, interrupt handler shouldn't
+-		 * be invoked before the data transfer is complete and
+-		 * hsm_task_state is changed. Hence, the following locking.
+-		 */
+-		if (in_wq)
+-			spin_lock_irqsave(ap->lock, flags);
+-
+ 		if (qc->tf.protocol == ATA_PROT_PIO) {
+ 			/* PIO data out protocol.
+ 			 * send first data block.
+@@ -1135,9 +1121,6 @@ fsm_start:
+ 			/* send CDB */
+ 			atapi_send_cdb(ap, qc);
+ 
+-		if (in_wq)
+-			spin_unlock_irqrestore(ap->lock, flags);
+-
+ 		/* if polling, ata_sff_pio_task() handles the rest.
+ 		 * otherwise, interrupt handler takes over from here.
+ 		 */
+@@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
+ 	u8 status;
+ 	int poll_next;
+ 
++	spin_lock_irq(ap->lock);
++
+ 	BUG_ON(ap->sff_pio_task_link == NULL);
+ 	/* qc can be NULL if timeout occurred */
+ 	qc = ata_qc_from_tag(ap, link->active_tag);
+ 	if (!qc) {
+ 		ap->sff_pio_task_link = NULL;
+-		return;
++		goto out_unlock;
+ 	}
+ 
+ fsm_start:
+@@ -1381,11 +1366,14 @@ fsm_start:
+ 	 */
+ 	status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
+ 	if (status & ATA_BUSY) {
++		spin_unlock_irq(ap->lock);
+ 		ata_msleep(ap, 2);
++		spin_lock_irq(ap->lock);
++
+ 		status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
+ 		if (status & ATA_BUSY) {
+ 			ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
+-			return;
++			goto out_unlock;
+ 		}
+ 	}
+ 
+@@ -1402,6 +1390,8 @@ fsm_start:
+ 	 */
+ 	if (poll_next)
+ 		goto fsm_start;
++out_unlock:
++	spin_unlock_irq(ap->lock);
+ }
+ 
+ /**
+diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
+index 0f9a9dc06a83..fb16d812c8f5 100644
+--- a/drivers/crypto/atmel-aes.c
++++ b/drivers/crypto/atmel-aes.c
+@@ -260,7 +260,11 @@ static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_ctx *ctx)
+ 
+ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
+ {
+-	clk_prepare_enable(dd->iclk);
++	int err;
++
++	err = clk_prepare_enable(dd->iclk);
++	if (err)
++		return err;
+ 
+ 	if (!(dd->flags & AES_FLAGS_INIT)) {
+ 		atmel_aes_write(dd, AES_CR, AES_CR_SWRST);
+@@ -1320,7 +1324,6 @@ static int atmel_aes_probe(struct platform_device *pdev)
+ 	struct crypto_platform_data *pdata;
+ 	struct device *dev = &pdev->dev;
+ 	struct resource *aes_res;
+-	unsigned long aes_phys_size;
+ 	int err;
+ 
+ 	pdata = pdev->dev.platform_data;
+@@ -1337,7 +1340,7 @@ static int atmel_aes_probe(struct platform_device *pdev)
+ 		goto aes_dd_err;
+ 	}
+ 
+-	aes_dd = kzalloc(sizeof(struct atmel_aes_dev), GFP_KERNEL);
++	aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL);
+ 	if (aes_dd == NULL) {
+ 		dev_err(dev, "unable to alloc data struct.\n");
+ 		err = -ENOMEM;
+@@ -1368,36 +1371,35 @@ static int atmel_aes_probe(struct platform_device *pdev)
+ 		goto res_err;
+ 	}
+ 	aes_dd->phys_base = aes_res->start;
+-	aes_phys_size = resource_size(aes_res);
+ 
+ 	/* Get the IRQ */
+ 	aes_dd->irq = platform_get_irq(pdev,  0);
+ 	if (aes_dd->irq < 0) {
+ 		dev_err(dev, "no IRQ resource info\n");
+ 		err = aes_dd->irq;
+-		goto aes_irq_err;
++		goto res_err;
+ 	}
+ 
+-	err = request_irq(aes_dd->irq, atmel_aes_irq, IRQF_SHARED, "atmel-aes",
+-						aes_dd);
++	err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq,
++			       IRQF_SHARED, "atmel-aes", aes_dd);
+ 	if (err) {
+ 		dev_err(dev, "unable to request aes irq.\n");
+-		goto aes_irq_err;
++		goto res_err;
+ 	}
+ 
+ 	/* Initializing the clock */
+-	aes_dd->iclk = clk_get(&pdev->dev, "aes_clk");
++	aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk");
+ 	if (IS_ERR(aes_dd->iclk)) {
+ 		dev_err(dev, "clock initialization failed.\n");
+ 		err = PTR_ERR(aes_dd->iclk);
+-		goto clk_err;
++		goto res_err;
+ 	}
+ 
+-	aes_dd->io_base = ioremap(aes_dd->phys_base, aes_phys_size);
++	aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res);
+ 	if (!aes_dd->io_base) {
+ 		dev_err(dev, "can't ioremap\n");
+ 		err = -ENOMEM;
+-		goto aes_io_err;
++		goto res_err;
+ 	}
+ 
+ 	atmel_aes_hw_version_init(aes_dd);
+@@ -1434,17 +1436,9 @@ err_algs:
+ err_aes_dma:
+ 	atmel_aes_buff_cleanup(aes_dd);
+ err_aes_buff:
+-	iounmap(aes_dd->io_base);
+-aes_io_err:
+-	clk_put(aes_dd->iclk);
+-clk_err:
+-	free_irq(aes_dd->irq, aes_dd);
+-aes_irq_err:
+ res_err:
+ 	tasklet_kill(&aes_dd->done_task);
+ 	tasklet_kill(&aes_dd->queue_task);
+-	kfree(aes_dd);
+-	aes_dd = NULL;
+ aes_dd_err:
+ 	dev_err(dev, "initialization failed.\n");
+ 
+@@ -1469,16 +1463,6 @@ static int atmel_aes_remove(struct platform_device *pdev)
+ 
+ 	atmel_aes_dma_cleanup(aes_dd);
+ 
+-	iounmap(aes_dd->io_base);
+-
+-	clk_put(aes_dd->iclk);
+-
+-	if (aes_dd->irq > 0)
+-		free_irq(aes_dd->irq, aes_dd);
+-
+-	kfree(aes_dd);
+-	aes_dd = NULL;
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
+index 5b35433c5399..a71c97c03c39 100644
+--- a/drivers/crypto/atmel-sha.c
++++ b/drivers/crypto/atmel-sha.c
+@@ -783,7 +783,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
+ 	dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
+ 			SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
+ 
+-	clk_disable_unprepare(dd->iclk);
++	clk_disable(dd->iclk);
+ 
+ 	if (req->base.complete)
+ 		req->base.complete(&req->base, err);
+@@ -794,7 +794,11 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
+ 
+ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
+ {
+-	clk_prepare_enable(dd->iclk);
++	int err;
++
++	err = clk_enable(dd->iclk);
++	if (err)
++		return err;
+ 
+ 	if (!(SHA_FLAGS_INIT & dd->flags)) {
+ 		atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST);
+@@ -819,7 +823,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
+ 	dev_info(dd->dev,
+ 			"version: 0x%x\n", dd->hw_version);
+ 
+-	clk_disable_unprepare(dd->iclk);
++	clk_disable(dd->iclk);
+ }
+ 
+ static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
+@@ -1345,11 +1349,9 @@ static int atmel_sha_probe(struct platform_device *pdev)
+ 	struct crypto_platform_data	*pdata;
+ 	struct device *dev = &pdev->dev;
+ 	struct resource *sha_res;
+-	unsigned long sha_phys_size;
+ 	int err;
+ 
+-	sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev),
+-				GFP_KERNEL);
++	sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL);
+ 	if (sha_dd == NULL) {
+ 		dev_err(dev, "unable to alloc data struct.\n");
+ 		err = -ENOMEM;
+@@ -1378,7 +1380,6 @@ static int atmel_sha_probe(struct platform_device *pdev)
+ 		goto res_err;
+ 	}
+ 	sha_dd->phys_base = sha_res->start;
+-	sha_phys_size = resource_size(sha_res);
+ 
+ 	/* Get the IRQ */
+ 	sha_dd->irq = platform_get_irq(pdev,  0);
+@@ -1388,28 +1389,32 @@ static int atmel_sha_probe(struct platform_device *pdev)
+ 		goto res_err;
+ 	}
+ 
+-	err = request_irq(sha_dd->irq, atmel_sha_irq, IRQF_SHARED, "atmel-sha",
+-						sha_dd);
++	err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq,
++			       IRQF_SHARED, "atmel-sha", sha_dd);
+ 	if (err) {
+ 		dev_err(dev, "unable to request sha irq.\n");
+ 		goto res_err;
+ 	}
+ 
+ 	/* Initializing the clock */
+-	sha_dd->iclk = clk_get(&pdev->dev, "sha_clk");
++	sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk");
+ 	if (IS_ERR(sha_dd->iclk)) {
+ 		dev_err(dev, "clock initialization failed.\n");
+ 		err = PTR_ERR(sha_dd->iclk);
+-		goto clk_err;
++		goto res_err;
+ 	}
+ 
+-	sha_dd->io_base = ioremap(sha_dd->phys_base, sha_phys_size);
++	sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res);
+ 	if (!sha_dd->io_base) {
+ 		dev_err(dev, "can't ioremap\n");
+ 		err = -ENOMEM;
+-		goto sha_io_err;
++		goto res_err;
+ 	}
+ 
++	err = clk_prepare(sha_dd->iclk);
++	if (err)
++		goto res_err;
++
+ 	atmel_sha_hw_version_init(sha_dd);
+ 
+ 	atmel_sha_get_cap(sha_dd);
+@@ -1421,12 +1426,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
+ 			if (IS_ERR(pdata)) {
+ 				dev_err(&pdev->dev, "platform data not available\n");
+ 				err = PTR_ERR(pdata);
+-				goto err_pdata;
++				goto iclk_unprepare;
+ 			}
+ 		}
+ 		if (!pdata->dma_slave) {
+ 			err = -ENXIO;
+-			goto err_pdata;
++			goto iclk_unprepare;
+ 		}
+ 		err = atmel_sha_dma_init(sha_dd, pdata);
+ 		if (err)
+@@ -1457,12 +1462,8 @@ err_algs:
+ 	if (sha_dd->caps.has_dma)
+ 		atmel_sha_dma_cleanup(sha_dd);
+ err_sha_dma:
+-err_pdata:
+-	iounmap(sha_dd->io_base);
+-sha_io_err:
+-	clk_put(sha_dd->iclk);
+-clk_err:
+-	free_irq(sha_dd->irq, sha_dd);
++iclk_unprepare:
++	clk_unprepare(sha_dd->iclk);
+ res_err:
+ 	tasklet_kill(&sha_dd->done_task);
+ sha_dd_err:
+@@ -1489,6 +1490,8 @@ static int atmel_sha_remove(struct platform_device *pdev)
+ 	if (sha_dd->caps.has_dma)
+ 		atmel_sha_dma_cleanup(sha_dd);
+ 
++	clk_unprepare(sha_dd->iclk);
++
+ 	iounmap(sha_dd->io_base);
+ 
+ 	clk_put(sha_dd->iclk);
+diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
+index ca2999709eb4..2c7a628d0375 100644
+--- a/drivers/crypto/atmel-tdes.c
++++ b/drivers/crypto/atmel-tdes.c
+@@ -218,7 +218,11 @@ static struct atmel_tdes_dev *atmel_tdes_find_dev(struct atmel_tdes_ctx *ctx)
+ 
+ static int atmel_tdes_hw_init(struct atmel_tdes_dev *dd)
+ {
+-	clk_prepare_enable(dd->iclk);
++	int err;
++
++	err = clk_prepare_enable(dd->iclk);
++	if (err)
++		return err;
+ 
+ 	if (!(dd->flags & TDES_FLAGS_INIT)) {
+ 		atmel_tdes_write(dd, TDES_CR, TDES_CR_SWRST);
+@@ -1355,7 +1359,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
+ 	struct crypto_platform_data	*pdata;
+ 	struct device *dev = &pdev->dev;
+ 	struct resource *tdes_res;
+-	unsigned long tdes_phys_size;
+ 	int err;
+ 
+ 	tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
+@@ -1389,7 +1392,6 @@ static int atmel_tdes_probe(struct platform_device *pdev)
+ 		goto res_err;
+ 	}
+ 	tdes_dd->phys_base = tdes_res->start;
+-	tdes_phys_size = resource_size(tdes_res);
+ 
+ 	/* Get the IRQ */
+ 	tdes_dd->irq = platform_get_irq(pdev,  0);
+@@ -1399,26 +1401,26 @@ static int atmel_tdes_probe(struct platform_device *pdev)
+ 		goto res_err;
+ 	}
+ 
+-	err = request_irq(tdes_dd->irq, atmel_tdes_irq, IRQF_SHARED,
+-			"atmel-tdes", tdes_dd);
++	err = devm_request_irq(&pdev->dev, tdes_dd->irq, atmel_tdes_irq,
++			       IRQF_SHARED, "atmel-tdes", tdes_dd);
+ 	if (err) {
+ 		dev_err(dev, "unable to request tdes irq.\n");
+-		goto tdes_irq_err;
++		goto res_err;
+ 	}
+ 
+ 	/* Initializing the clock */
+-	tdes_dd->iclk = clk_get(&pdev->dev, "tdes_clk");
++	tdes_dd->iclk = devm_clk_get(&pdev->dev, "tdes_clk");
+ 	if (IS_ERR(tdes_dd->iclk)) {
+ 		dev_err(dev, "clock initialization failed.\n");
+ 		err = PTR_ERR(tdes_dd->iclk);
+-		goto clk_err;
++		goto res_err;
+ 	}
+ 
+-	tdes_dd->io_base = ioremap(tdes_dd->phys_base, tdes_phys_size);
++	tdes_dd->io_base = devm_ioremap_resource(&pdev->dev, tdes_res);
+ 	if (!tdes_dd->io_base) {
+ 		dev_err(dev, "can't ioremap\n");
+ 		err = -ENOMEM;
+-		goto tdes_io_err;
++		goto res_err;
+ 	}
+ 
+ 	atmel_tdes_hw_version_init(tdes_dd);
+@@ -1474,12 +1476,6 @@ err_tdes_dma:
+ err_pdata:
+ 	atmel_tdes_buff_cleanup(tdes_dd);
+ err_tdes_buff:
+-	iounmap(tdes_dd->io_base);
+-tdes_io_err:
+-	clk_put(tdes_dd->iclk);
+-clk_err:
+-	free_irq(tdes_dd->irq, tdes_dd);
+-tdes_irq_err:
+ res_err:
+ 	tasklet_kill(&tdes_dd->done_task);
+ 	tasklet_kill(&tdes_dd->queue_task);
+@@ -1510,13 +1506,6 @@ static int atmel_tdes_remove(struct platform_device *pdev)
+ 
+ 	atmel_tdes_buff_cleanup(tdes_dd);
+ 
+-	iounmap(tdes_dd->io_base);
+-
+-	clk_put(tdes_dd->iclk);
+-
+-	if (tdes_dd->irq >= 0)
+-		free_irq(tdes_dd->irq, tdes_dd);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 303d937d63c7..ebffc744cb1b 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
+ 
+ 	/* Enable interrupts */
+ 	channel_set_bit(dw, MASK.XFER, dwc->mask);
+-	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ 	channel_set_bit(dw, MASK.ERROR, dwc->mask);
+ 
+ 	dwc->initialized = true;
+@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+ 
+ 		spin_unlock_irqrestore(&dwc->lock, flags);
+ 	}
++
++	/* Re-enable interrupts */
++	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ }
+ 
+ /* ------------------------------------------------------------------------- */
+@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
+ 			dwc_scan_descriptors(dw, dwc);
+ 	}
+ 
+-	/*
+-	 * Re-enable interrupts.
+-	 */
++	/* Re-enable interrupts */
+ 	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
+-	channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ 	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+ }
+ 
+@@ -1256,6 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ int dw_dma_cyclic_start(struct dma_chan *chan)
+ {
+ 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
++	struct dw_dma		*dw = to_dw_dma(chan->device);
+ 	unsigned long		flags;
+ 
+ 	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+@@ -1264,7 +1264,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
+ 	}
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
++
++	/* Enable interrupts to perform cyclic transfer */
++	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
++
+ 	dwc_dostart(dwc, dwc->cdesc->desc[0]);
++
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+index d2cd8d5b27a1..82f8e20cca74 100644
+--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
++++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+@@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ 	gpio = *data++;
+ 
+ 	/* pull up/down */
+-	action = *data++;
++	action = *data++ & 1;
++
++	if (gpio >= ARRAY_SIZE(gtable)) {
++		DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
++		goto out;
++	}
+ 
+ 	function = gtable[gpio].function_reg;
+ 	pad = gtable[gpio].pad_reg;
+@@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
+ 	vlv_gpio_nc_write(dev_priv, pad, val);
+ 	mutex_unlock(&dev_priv->dpio_lock);
+ 
++out:
+ 	return data;
+ }
+ 
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 7354a4cda59d..3aefaa058f0c 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
+ 		       cmd->command_size))
+ 		return -EFAULT;
+ 
+-	reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
++	reloc_info = kmalloc_array(cmd->relocs_num,
++				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
+ 	if (!reloc_info)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
+index c507896aca45..197b157b73d0 100644
+--- a/drivers/gpu/drm/radeon/radeon_sa.c
++++ b/drivers/gpu/drm/radeon/radeon_sa.c
+@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
+ 			/* see if we can skip over some allocations */
+ 		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+ 
++		for (i = 0; i < RADEON_NUM_RINGS; ++i)
++			radeon_fence_ref(fences[i]);
++
+ 		spin_unlock(&sa_manager->wq.lock);
+ 		r = radeon_fence_wait_any(rdev, fences, false);
++		for (i = 0; i < RADEON_NUM_RINGS; ++i)
++			radeon_fence_unref(&fences[i]);
+ 		spin_lock(&sa_manager->wq.lock);
+ 		/* if we have nothing to wait for block */
+ 		if (r == -ENOENT) {
+diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
+index edafd3c2b170..f5c0590bbf73 100644
+--- a/drivers/gpu/drm/radeon/radeon_ttm.c
++++ b/drivers/gpu/drm/radeon/radeon_ttm.c
+@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+ 						       0, PAGE_SIZE,
+ 						       PCI_DMA_BIDIRECTIONAL);
+ 		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+-			while (--i) {
++			while (i--) {
+ 				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ 					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ 				gtt->ttm.dma_address[i] = 0;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index c32a934f7693..353e2ab090ee 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -1349,7 +1349,7 @@ sequence_cmd:
+ 	if (!rc && dump_payload == false && unsol_data)
+ 		iscsit_set_unsoliticed_dataout(cmd);
+ 	else if (dump_payload && imm_data)
+-		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++		target_put_sess_cmd(&cmd->se_cmd);
+ 
+ 	return 0;
+ }
+@@ -1774,7 +1774,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
+ 			    cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
+ 				struct se_cmd *se_cmd = &cmd->se_cmd;
+ 
+-				target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++				target_put_sess_cmd(se_cmd);
+ 			}
+ 		}
+ 
+@@ -1947,7 +1947,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
+ 	spin_unlock_bh(&cmd->istate_lock);
+ 
+ 	if (ret) {
+-		target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++		target_put_sess_cmd(se_cmd);
+ 		transport_send_check_condition_and_sense(se_cmd,
+ 							 se_cmd->pi_err, 0);
+ 	} else {
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 9b84b4c0a000..6fbc7bc824d2 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1334,7 +1334,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
+ 
+ 		BUG_ON(ch->sess == NULL);
+ 
+-		target_put_sess_cmd(ch->sess, &ioctx->cmd);
++		target_put_sess_cmd(&ioctx->cmd);
+ 		goto out;
+ 	}
+ 
+@@ -1365,11 +1365,11 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
+ 		 * not been received in time.
+ 		 */
+ 		srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+-		target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
++		target_put_sess_cmd(&ioctx->cmd);
+ 		break;
+ 	case SRPT_STATE_MGMT_RSP_SENT:
+ 		srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+-		target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
++		target_put_sess_cmd(&ioctx->cmd);
+ 		break;
+ 	default:
+ 		WARN(1, "Unexpected command state (%d)", state);
+@@ -1679,7 +1679,7 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
+ 	struct srpt_send_ioctx *ioctx = container_of(cmd,
+ 				struct srpt_send_ioctx, cmd);
+ 
+-	return target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
++	return target_put_sess_cmd(&ioctx->cmd);
+ }
+ 
+ /**
+@@ -3074,7 +3074,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
+ 		       ioctx->tag);
+ 		srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ 		srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+-		target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd);
++		target_put_sess_cmd(&ioctx->cmd);
+ 	}
+ }
+ 
+diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
+index e272f06258ce..a3f0f5a47490 100644
+--- a/drivers/input/mouse/vmmouse.c
++++ b/drivers/input/mouse/vmmouse.c
+@@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
+ 	priv->abs_dev = abs_dev;
+ 	psmouse->private = priv;
+ 
+-	input_set_capability(rel_dev, EV_REL, REL_WHEEL);
+-
+ 	/* Set up and register absolute device */
+ 	snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
+ 		 psmouse->ps2dev.serio->phys);
+@@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
+ 	abs_dev->id.version = psmouse->model;
+ 	abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
+ 
+-	error = input_register_device(priv->abs_dev);
+-	if (error)
+-		goto init_fail;
+-
+ 	/* Set absolute device capabilities */
+ 	input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
+ 	input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
+@@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
+ 	input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
+ 	input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
+ 
++	error = input_register_device(priv->abs_dev);
++	if (error)
++		goto init_fail;
++
++	/* Add wheel capability to the relative device */
++	input_set_capability(rel_dev, EV_REL, REL_WHEEL);
++
+ 	psmouse->protocol_handler = vmmouse_process_byte;
+ 	psmouse->disconnect = vmmouse_disconnect;
+ 	psmouse->reconnect = vmmouse_reconnect;
+diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
+index 9847613085e1..5a2ec39e1fd9 100644
+--- a/drivers/iommu/dmar.c
++++ b/drivers/iommu/dmar.c
+@@ -1342,7 +1342,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
+ 
+ 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ 
+-	sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
++	sts =  readl(iommu->reg + DMAR_GSTS_REG);
+ 	if (!(sts & DMA_GSTS_QIES))
+ 		goto end;
+ 
+diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
+index 5709ae9c3e77..04b39be8f1f3 100644
+--- a/drivers/iommu/intel_irq_remapping.c
++++ b/drivers/iommu/intel_irq_remapping.c
+@@ -544,7 +544,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
+ 
+ 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
+ 
+-	sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
++	sts = readl(iommu->reg + DMAR_GSTS_REG);
+ 	if (!(sts & DMA_GSTS_IRES))
+ 		goto end;
+ 
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 72ba774df7a7..bd744e31c434 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
+ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
+ 						struct rtnl_link_stats64 *stats);
+ static void bond_slave_arr_handler(struct work_struct *work);
++static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
++				  int mod);
+ 
+ /*---------------------------- General routines -----------------------------*/
+ 
+@@ -2397,7 +2399,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 		 struct slave *slave)
+ {
+ 	struct arphdr *arp = (struct arphdr *)skb->data;
+-	struct slave *curr_active_slave;
++	struct slave *curr_active_slave, *curr_arp_slave;
+ 	unsigned char *arp_ptr;
+ 	__be32 sip, tip;
+ 	int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
+@@ -2444,26 +2446,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ 		     &sip, &tip);
+ 
+ 	curr_active_slave = rcu_dereference(bond->curr_active_slave);
++	curr_arp_slave = rcu_dereference(bond->current_arp_slave);
+ 
+-	/* Backup slaves won't see the ARP reply, but do come through
+-	 * here for each ARP probe (so we swap the sip/tip to validate
+-	 * the probe).  In a "redundant switch, common router" type of
+-	 * configuration, the ARP probe will (hopefully) travel from
+-	 * the active, through one switch, the router, then the other
+-	 * switch before reaching the backup.
++	/* We 'trust' the received ARP enough to validate it if:
++	 *
++	 * (a) the slave receiving the ARP is active (which includes the
++	 * current ARP slave, if any), or
++	 *
++	 * (b) the receiving slave isn't active, but there is a currently
++	 * active slave and it received valid arp reply(s) after it became
++	 * the currently active slave, or
++	 *
++	 * (c) there is an ARP slave that sent an ARP during the prior ARP
++	 * interval, and we receive an ARP reply on any slave.  We accept
++	 * these because switch FDB update delays may deliver the ARP
++	 * reply to a slave other than the sender of the ARP request.
+ 	 *
+-	 * We 'trust' the arp requests if there is an active slave and
+-	 * it received valid arp reply(s) after it became active. This
+-	 * is done to avoid endless looping when we can't reach the
++	 * Note: for (b), backup slaves are receiving the broadcast ARP
++	 * request, not a reply.  This request passes from the sending
++	 * slave through the L2 switch(es) to the receiving slave.  Since
++	 * this is checking the request, sip/tip are swapped for
++	 * validation.
++	 *
++	 * This is done to avoid endless looping when we can't reach the
+ 	 * arp_ip_target and fool ourselves with our own arp requests.
+ 	 */
+-
+ 	if (bond_is_active_slave(slave))
+ 		bond_validate_arp(bond, slave, sip, tip);
+ 	else if (curr_active_slave &&
+ 		 time_after(slave_last_rx(bond, curr_active_slave),
+ 			    curr_active_slave->last_link_up))
+ 		bond_validate_arp(bond, slave, tip, sip);
++	else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
++		 bond_time_in_interval(bond,
++				       dev_trans_start(curr_arp_slave->dev), 1))
++		bond_validate_arp(bond, slave, sip, tip);
+ 
+ out_unlock:
+ 	if (arp != (struct arphdr *)skb->data)
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 0d8af5bb5907..d5415205779f 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+ 	return ret;
+ }
+ 
++static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
++{
++	/* Check if we will never have enough descriptors,
++	 * as gso_segs can be more than current ring size
++	 */
++	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
++}
++
+ static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+ 
+ /* Use GSO to workaround all TSO packets that meet HW bug conditions
+@@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		 * vlan encapsulated.
+ 		 */
+ 		if (skb->protocol == htons(ETH_P_8021Q) ||
+-		    skb->protocol == htons(ETH_P_8021AD))
+-			return tg3_tso_bug(tp, tnapi, txq, skb);
++		    skb->protocol == htons(ETH_P_8021AD)) {
++			if (tg3_tso_bug_gso_check(tnapi, skb))
++				return tg3_tso_bug(tp, tnapi, txq, skb);
++			goto drop;
++		}
+ 
+ 		if (!skb_is_gso_v6(skb)) {
+ 			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+-			    tg3_flag(tp, TSO_BUG))
+-				return tg3_tso_bug(tp, tnapi, txq, skb);
+-
++			    tg3_flag(tp, TSO_BUG)) {
++				if (tg3_tso_bug_gso_check(tnapi, skb))
++					return tg3_tso_bug(tp, tnapi, txq, skb);
++				goto drop;
++			}
+ 			ip_csum = iph->check;
+ 			ip_tot_len = iph->tot_len;
+ 			iph->check = 0;
+@@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	if (would_hit_hwbug) {
+ 		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+ 
+-		if (mss) {
++		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
+ 			/* If it's a TSO packet, do GSO instead of
+ 			 * allocating and copying to a large linear SKB
+ 			 */
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+index 8a083d73efdb..dae2ebb53af7 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
+ 	.enable		= mlx4_en_phc_enable,
+ };
+ 
++#define MLX4_EN_WRAP_AROUND_SEC	10ULL
++
++/* This function calculates the max shift that enables the user range
++ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
++ */
++static u32 freq_to_shift(u16 freq)
++{
++	u32 freq_khz = freq * 1000;
++	u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
++	u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
++		max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
++	/* calculate max possible multiplier in order to fit in 64bit */
++	u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
++
++	/* This comes from the reverse of clocksource_khz2mult */
++	return ilog2(div_u64(max_mul * freq_khz, 1000000));
++}
++
+ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ {
+ 	struct mlx4_dev *dev = mdev->dev;
+@@ -247,12 +265,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
+ 	memset(&mdev->cycles, 0, sizeof(mdev->cycles));
+ 	mdev->cycles.read = mlx4_en_read_clock;
+ 	mdev->cycles.mask = CLOCKSOURCE_MASK(48);
+-	/* Using shift to make calculation more accurate. Since current HW
+-	 * clock frequency is 427 MHz, and cycles are given using a 48 bits
+-	 * register, the biggest shift when calculating using u64, is 14
+-	 * (max_cycles * multiplier < 2^64)
+-	 */
+-	mdev->cycles.shift = 14;
++	mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
+ 	mdev->cycles.mult =
+ 		clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
+ 	mdev->nominal_c_mult = mdev->cycles.mult;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index a5a0b8420d26..e9189597000d 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -2330,8 +2330,6 @@ out:
+ 	/* set offloads */
+ 	priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ 				      NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+-	priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+-	priv->dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+ 
+ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+@@ -2342,8 +2340,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
+ 	/* unset offloads */
+ 	priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ 				      NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+-	priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
+-	priv->dev->features    &= ~NETIF_F_GSO_UDP_TUNNEL;
+ 
+ 	ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+ 				  VXLAN_STEER_BY_OUTER_MAC, 0);
+@@ -2940,6 +2936,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ 		priv->rss_hash_fn = ETH_RSS_HASH_TOP;
+ 	}
+ 
++	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
++		dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
++		dev->features    |= NETIF_F_GSO_UDP_TUNNEL;
++	}
++
+ 	mdev->pndev[port] = dev;
+ 	mdev->upper[port] = NULL;
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
+index 0a56f010c846..760a8b387912 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
+@@ -223,11 +223,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
+ 	stats->collisions = 0;
+ 	stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
+ 	stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
+-	stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
++	stats->rx_over_errors = 0;
+ 	stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+ 	stats->rx_frame_errors = 0;
+ 	stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+-	stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
++	stats->rx_missed_errors = 0;
+ 	stats->tx_aborted_errors = 0;
+ 	stats->tx_carrier_errors = 0;
+ 	stats->tx_fifo_errors = 0;
+diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
+index 73b6fc21ea00..4fedf7fa72c4 100644
+--- a/drivers/net/ethernet/rocker/rocker.c
++++ b/drivers/net/ethernet/rocker/rocker.c
+@@ -3384,12 +3384,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
+ 	info.addr = lw->addr;
+ 	info.vid = lw->vid;
+ 
++	rtnl_lock();
+ 	if (learned && removing)
+ 		call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
+ 					     lw->dev, &info.info);
+ 	else if (learned && !removing)
+ 		call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
+ 					     lw->dev, &info.info);
++	rtnl_unlock();
+ 
+ 	kfree(work);
+ }
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index 00cb41e71312..c56cf0b86f2c 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -833,6 +833,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
+ 	struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ 	struct sk_buff *skb;
+ 	unsigned long flags;
++	u8 overflow;
++
++	overflow = (phy_rxts->ns_hi >> 14) & 0x3;
++	if (overflow)
++		pr_debug("rx timestamp queue overflow, count %d\n", overflow);
+ 
+ 	spin_lock_irqsave(&dp83640->rx_lock, flags);
+ 
+@@ -875,6 +880,7 @@ static void decode_txts(struct dp83640_private *dp83640,
+ 	struct skb_shared_hwtstamps shhwtstamps;
+ 	struct sk_buff *skb;
+ 	u64 ns;
++	u8 overflow;
+ 
+ 	/* We must already have the skb that triggered this. */
+ 
+@@ -884,6 +890,17 @@ static void decode_txts(struct dp83640_private *dp83640,
+ 		pr_debug("have timestamp but tx_queue empty\n");
+ 		return;
+ 	}
++
++	overflow = (phy_txts->ns_hi >> 14) & 0x3;
++	if (overflow) {
++		pr_debug("tx timestamp queue overflow, count %d\n", overflow);
++		while (skb) {
++			skb_complete_tx_timestamp(skb, NULL);
++			skb = skb_dequeue(&dp83640->tx_queue);
++		}
++		return;
++	}
++
+ 	ns = phy2txts(phy_txts);
+ 	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ 	shhwtstamps.hwtstamp = ns_to_ktime(ns);
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index 9c8fabed4444..d1c4bc1c4df0 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
+ 
+ 		if (!__pppoe_xmit(sk_pppox(relay_po), skb))
+ 			goto abort_put;
++
++		sock_put(sk_pppox(relay_po));
+ 	} else {
+ 		if (sock_queue_rcv_skb(sk, skb))
+ 			goto abort_kfree;
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 0bacabfa486e..b35199cc8f34 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -131,24 +131,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
+ 	return i < MAX_CALLID;
+ }
+ 
+-static int add_chan(struct pppox_sock *sock)
++static int add_chan(struct pppox_sock *sock,
++		    struct pptp_addr *sa)
+ {
+ 	static int call_id;
+ 
+ 	spin_lock(&chan_lock);
+-	if (!sock->proto.pptp.src_addr.call_id)	{
++	if (!sa->call_id)	{
+ 		call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
+ 		if (call_id == MAX_CALLID) {
+ 			call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
+ 			if (call_id == MAX_CALLID)
+ 				goto out_err;
+ 		}
+-		sock->proto.pptp.src_addr.call_id = call_id;
+-	} else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
++		sa->call_id = call_id;
++	} else if (test_bit(sa->call_id, callid_bitmap)) {
+ 		goto out_err;
++	}
+ 
+-	set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
+-	rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
++	sock->proto.pptp.src_addr = *sa;
++	set_bit(sa->call_id, callid_bitmap);
++	rcu_assign_pointer(callid_sock[sa->call_id], sock);
+ 	spin_unlock(&chan_lock);
+ 
+ 	return 0;
+@@ -417,7 +420,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 	struct sock *sk = sock->sk;
+ 	struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
+ 	struct pppox_sock *po = pppox_sk(sk);
+-	struct pptp_opt *opt = &po->proto.pptp;
+ 	int error = 0;
+ 
+ 	if (sockaddr_len < sizeof(struct sockaddr_pppox))
+@@ -425,10 +427,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 
+ 	lock_sock(sk);
+ 
+-	opt->src_addr = sp->sa_addr.pptp;
+-	if (add_chan(po))
++	if (sk->sk_state & PPPOX_DEAD) {
++		error = -EALREADY;
++		goto out;
++	}
++
++	if (sk->sk_state & PPPOX_BOUND) {
+ 		error = -EBUSY;
++		goto out;
++	}
++
++	if (add_chan(po, &sp->sa_addr.pptp))
++		error = -EBUSY;
++	else
++		sk->sk_state |= PPPOX_BOUND;
+ 
++out:
+ 	release_sock(sk);
+ 	return error;
+ }
+@@ -499,7 +513,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	}
+ 
+ 	opt->dst_addr = sp->sa_addr.pptp;
+-	sk->sk_state = PPPOX_CONNECTED;
++	sk->sk_state |= PPPOX_CONNECTED;
+ 
+  end:
+ 	release_sock(sk);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 71190dc1eacf..cffb25280a3b 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -542,6 +542,7 @@ static const struct usb_device_id products[] = {
+ 
+ 	/* 3. Combined interface devices matching on interface number */
+ 	{QMI_FIXED_INTF(0x0408, 0xea42, 4)},	/* Yota / Megafon M100-1 */
++	{QMI_FIXED_INTF(0x05c6, 0x6001, 3)},	/* 4G LTE usb-modem U901 */
+ 	{QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
+ 	{QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
+diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
+index 0bf82a20a0fb..48d21e0edd56 100644
+--- a/drivers/pci/pcie/aer/aerdrv.c
++++ b/drivers/pci/pcie/aer/aerdrv.c
+@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
+ 	rpc->rpd = dev;
+ 	INIT_WORK(&rpc->dpc_handler, aer_isr);
+ 	mutex_init(&rpc->rpc_mutex);
+-	init_waitqueue_head(&rpc->wait_release);
+ 
+ 	/* Use PCIe bus function to store rpc into PCIe device */
+ 	set_service_data(dev, rpc);
+@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
+ 		if (rpc->isr)
+ 			free_irq(dev->irq, dev);
+ 
+-		wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
+-
++		flush_work(&rpc->dpc_handler);
+ 		aer_disable_rootport(rpc);
+ 		kfree(rpc);
+ 		set_service_data(dev, NULL);
+diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
+index 84420b7c9456..945c939a86c5 100644
+--- a/drivers/pci/pcie/aer/aerdrv.h
++++ b/drivers/pci/pcie/aer/aerdrv.h
+@@ -72,7 +72,6 @@ struct aer_rpc {
+ 					 * recovery on the same
+ 					 * root port hierarchy
+ 					 */
+-	wait_queue_head_t wait_release;
+ };
+ 
+ struct aer_broadcast_data {
+diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
+index 5653ea94547f..b60a325234c5 100644
+--- a/drivers/pci/pcie/aer/aerdrv_core.c
++++ b/drivers/pci/pcie/aer/aerdrv_core.c
+@@ -784,8 +784,6 @@ void aer_isr(struct work_struct *work)
+ 	while (get_e_source(rpc, &e_src))
+ 		aer_isr_one_error(p_device, &e_src);
+ 	mutex_unlock(&rpc->rpc_mutex);
+-
+-	wake_up(&rpc->wait_release);
+ }
+ 
+ /**
+diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
+index 63bc12d7a73e..153e0a27c7ee 100644
+--- a/drivers/phy/phy-core.c
++++ b/drivers/phy/phy-core.c
+@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
+ 
+ int phy_power_on(struct phy *phy)
+ {
+-	int ret;
++	int ret = 0;
+ 
+ 	if (!phy)
+-		return 0;
++		goto out;
+ 
+ 	if (phy->pwr) {
+ 		ret = regulator_enable(phy->pwr);
+ 		if (ret)
+-			return ret;
++			goto out;
+ 	}
+ 
+ 	ret = phy_pm_runtime_get_sync(phy);
+ 	if (ret < 0 && ret != -ENOTSUPP)
+-		return ret;
++		goto err_pm_sync;
++
+ 	ret = 0; /* Override possible ret == -ENOTSUPP */
+ 
+ 	mutex_lock(&phy->mutex);
+@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
+ 		ret = phy->ops->power_on(phy);
+ 		if (ret < 0) {
+ 			dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
+-			goto out;
++			goto err_pwr_on;
+ 		}
+ 	}
+ 	++phy->power_count;
+ 	mutex_unlock(&phy->mutex);
+ 	return 0;
+ 
+-out:
++err_pwr_on:
+ 	mutex_unlock(&phy->mutex);
+ 	phy_pm_runtime_put_sync(phy);
++err_pm_sync:
+ 	if (phy->pwr)
+ 		regulator_disable(phy->pwr);
+-
++out:
+ 	return ret;
+ }
+ EXPORT_SYMBOL_GPL(phy_power_on);
+diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
+index 6285f46f3ddb..fb9e30ed8018 100644
+--- a/drivers/phy/phy-twl4030-usb.c
++++ b/drivers/phy/phy-twl4030-usb.c
+@@ -719,6 +719,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+ 	pm_runtime_enable(&pdev->dev);
++	pm_runtime_get_sync(&pdev->dev);
+ 
+ 	/* Our job is to use irqs and status from the power module
+ 	 * to keep the transceiver disabled when nothing's connected.
+@@ -754,6 +755,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
+ 	struct twl4030_usb *twl = platform_get_drvdata(pdev);
+ 	int val;
+ 
++	usb_remove_phy(&twl->phy);
+ 	pm_runtime_get_sync(twl->dev);
+ 	cancel_delayed_work(&twl->id_workaround_work);
+ 	device_remove_file(twl->dev, &dev_attr_vbus);
+@@ -761,6 +763,13 @@ static int twl4030_usb_remove(struct platform_device *pdev)
+ 	/* set transceiver mode to power on defaults */
+ 	twl4030_usb_set_mode(twl, -1);
+ 
++	/* idle ulpi before powering off */
++	if (cable_present(twl->linkstat))
++		pm_runtime_put_noidle(twl->dev);
++	pm_runtime_mark_last_busy(twl->dev);
++	pm_runtime_put_sync_suspend(twl->dev);
++	pm_runtime_disable(twl->dev);
++
+ 	/* autogate 60MHz ULPI clock,
+ 	 * clear dpll clock request for i2c access,
+ 	 * disable 32KHz
+@@ -775,11 +784,6 @@ static int twl4030_usb_remove(struct platform_device *pdev)
+ 	/* disable complete OTG block */
+ 	twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
+ 
+-	if (cable_present(twl->linkstat))
+-		pm_runtime_put_noidle(twl->dev);
+-	pm_runtime_mark_last_busy(twl->dev);
+-	pm_runtime_put(twl->dev);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
+index 02bc5a6343c3..aa454241489c 100644
+--- a/drivers/platform/x86/intel_scu_ipcutil.c
++++ b/drivers/platform/x86/intel_scu_ipcutil.c
+@@ -49,7 +49,7 @@ struct scu_ipc_data {
+ 
+ static int scu_reg_access(u32 cmd, struct scu_ipc_data  *data)
+ {
+-	int count = data->count;
++	unsigned int count = data->count;
+ 
+ 	if (count == 0 || count == 3 || count > 4)
+ 		return -EINVAL;
+diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
+index a2597e683e79..6a64e86e8ccd 100644
+--- a/drivers/s390/block/dasd_alias.c
++++ b/drivers/s390/block/dasd_alias.c
+@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ 		spin_unlock_irqrestore(&lcu->lock, flags);
+ 		cancel_work_sync(&lcu->suc_data.worker);
+ 		spin_lock_irqsave(&lcu->lock, flags);
+-		if (device == lcu->suc_data.device)
++		if (device == lcu->suc_data.device) {
++			dasd_put_device(device);
+ 			lcu->suc_data.device = NULL;
++		}
+ 	}
+ 	was_pending = 0;
+ 	if (device == lcu->ruac_data.device) {
+@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
+ 		was_pending = 1;
+ 		cancel_delayed_work_sync(&lcu->ruac_data.dwork);
+ 		spin_lock_irqsave(&lcu->lock, flags);
+-		if (device == lcu->ruac_data.device)
++		if (device == lcu->ruac_data.device) {
++			dasd_put_device(device);
+ 			lcu->ruac_data.device = NULL;
++		}
+ 	}
+ 	private->lcu = NULL;
+ 	spin_unlock_irqrestore(&lcu->lock, flags);
+@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
+ 	if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
+ 		DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
+ 			    " alias data in lcu (rc = %d), retry later", rc);
+-		schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
++		if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
++			dasd_put_device(device);
+ 	} else {
++		dasd_put_device(device);
+ 		lcu->ruac_data.device = NULL;
+ 		lcu->flags &= ~UPDATE_PENDING;
+ 	}
+@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
+ 	 */
+ 	if (!usedev)
+ 		return -EINVAL;
++	dasd_get_device(usedev);
+ 	lcu->ruac_data.device = usedev;
+-	schedule_delayed_work(&lcu->ruac_data.dwork, 0);
++	if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
++		dasd_put_device(usedev);
+ 	return 0;
+ }
+ 
+@@ -722,7 +730,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
+ 	ASCEBC((char *) &cqr->magic, 4);
+ 	ccw = cqr->cpaddr;
+ 	ccw->cmd_code = DASD_ECKD_CCW_RSCK;
+-	ccw->flags = 0 ;
++	ccw->flags = CCW_FLAG_SLI;
+ 	ccw->count = 16;
+ 	ccw->cda = (__u32)(addr_t) cqr->data;
+ 	((char *)cqr->data)[0] = reason;
+@@ -926,6 +934,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
+ 	/* 3. read new alias configuration */
+ 	_schedule_lcu_update(lcu, device);
+ 	lcu->suc_data.device = NULL;
++	dasd_put_device(device);
+ 	spin_unlock_irqrestore(&lcu->lock, flags);
+ }
+ 
+@@ -985,6 +994,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
+ 	}
+ 	lcu->suc_data.reason = reason;
+ 	lcu->suc_data.device = device;
++	dasd_get_device(device);
+ 	spin_unlock(&lcu->lock);
+-	schedule_work(&lcu->suc_data.worker);
++	if (!schedule_work(&lcu->suc_data.worker))
++		dasd_put_device(device);
+ };
+diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
+index b46ace3d4bf0..dd0c133aa312 100644
+--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
++++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
+@@ -568,7 +568,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
+ 			/*
+ 			 * Command Lock contention
+ 			 */
+-			err = SCSI_DH_RETRY;
++			err = SCSI_DH_IMM_RETRY;
+ 		break;
+ 	default:
+ 		break;
+@@ -618,6 +618,8 @@ retry:
+ 		err = mode_select_handle_sense(sdev, h->sense);
+ 		if (err == SCSI_DH_RETRY && retry_cnt--)
+ 			goto retry;
++		if (err == SCSI_DH_IMM_RETRY)
++			goto retry;
+ 	}
+ 	if (err == SCSI_DH_OK) {
+ 		h->state = RDAC_STATE_ACTIVE;
+diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
+index e9ae6b924c70..8b011aef12bd 100644
+--- a/drivers/scsi/qla2xxx/qla_dbg.c
++++ b/drivers/scsi/qla2xxx/qla_dbg.c
+@@ -67,10 +67,10 @@
+  * |                              |                    | 0xd031-0xd0ff	|
+  * |                              |                    | 0xd101-0xd1fe	|
+  * |                              |                    | 0xd214-0xd2fe	|
+- * | Target Mode		  |	  0xe079       |		|
+- * | Target Mode Management	  |	  0xf080       | 0xf002		|
++ * | Target Mode		  |	  0xe080       |		|
++ * | Target Mode Management	  |	  0xf096       | 0xf002		|
+  * |                              |                    | 0xf046-0xf049  |
+- * | Target Mode Task Management  |	  0x1000b      |		|
++ * | Target Mode Task Management  |	  0x1000d      |		|
+  * ----------------------------------------------------------------------
+  */
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index e86201d3b8c6..90d926ca1200 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -274,6 +274,7 @@
+ #define RESPONSE_ENTRY_CNT_FX00		256     /* Number of response entries.*/
+ 
+ struct req_que;
++struct qla_tgt_sess;
+ 
+ /*
+  * (sd.h is not exported, hence local inclusion)
+@@ -2026,6 +2027,7 @@ typedef struct fc_port {
+ 	uint16_t port_id;
+ 
+ 	unsigned long retry_delay_timestamp;
++	struct qla_tgt_sess *tgt_session;
+ } fc_port_t;
+ 
+ #include "qla_mr.h"
+@@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host {
+ 	uint16_t	fcoe_fcf_idx;
+ 	uint8_t		fcoe_vn_port_mac[6];
+ 
++	/* list of commands waiting on workqueue */
++	struct list_head	qla_cmd_list;
++	struct list_head	qla_sess_op_cmd_list;
++	spinlock_t		cmd_list_lock;
++
++	/* Counter to detect races between ELS and RSCN events */
++	atomic_t		generation_tick;
++	/* Time when global fcport update has been scheduled */
++	int			total_fcport_update_gen;
++
+ 	uint32_t	vp_abort_cnt;
+ 
+ 	struct fc_vport	*fc_vport;	/* holds fc_vport * for each vport */
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 998498e2341b..60f9651f2643 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
+ 			QLA_LOGIO_LOGIN_RETRIED : 0;
+ 		qla2x00_post_async_login_done_work(fcport->vha, fcport,
+ 			lio->u.logio.data);
++	} else if (sp->type == SRB_LOGOUT_CMD) {
++		qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
+ 	}
+ }
+ 
+@@ -497,7 +499,10 @@ void
+ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+     uint16_t *data)
+ {
+-	qla2x00_mark_device_lost(vha, fcport, 1, 0);
++	/* Don't re-login in target mode */
++	if (!fcport->tgt_session)
++		qla2x00_mark_device_lost(vha, fcport, 1, 0);
++	qlt_logo_completion_handler(fcport, data[0]);
+ 	return;
+ }
+ 
+@@ -2189,7 +2194,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
+ 	/* Clear outstanding commands array. */
+ 	for (que = 0; que < ha->max_req_queues; que++) {
+ 		req = ha->req_q_map[que];
+-		if (!req)
++		if (!req || !test_bit(que, ha->req_qid_map))
+ 			continue;
+ 		req->out_ptr = (void *)(req->ring + req->length);
+ 		*req->out_ptr = 0;
+@@ -2206,7 +2211,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
+ 
+ 	for (que = 0; que < ha->max_rsp_queues; que++) {
+ 		rsp = ha->rsp_q_map[que];
+-		if (!rsp)
++		if (!rsp || !test_bit(que, ha->rsp_qid_map))
+ 			continue;
+ 		rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+ 		*rsp->in_ptr = 0;
+@@ -2922,24 +2927,14 @@ qla2x00_rport_del(void *data)
+ {
+ 	fc_port_t *fcport = data;
+ 	struct fc_rport *rport;
+-	scsi_qla_host_t *vha = fcport->vha;
+ 	unsigned long flags;
+-	unsigned long vha_flags;
+ 
+ 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+ 	rport = fcport->drport ? fcport->drport: fcport->rport;
+ 	fcport->drport = NULL;
+ 	spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
+-	if (rport) {
++	if (rport)
+ 		fc_remote_port_delete(rport);
+-		/*
+-		 * Release the target mode FC NEXUS in qla_target.c code
+-		 * if target mod is enabled.
+-		 */
+-		spin_lock_irqsave(&vha->hw->hardware_lock, vha_flags);
+-		qlt_fc_port_deleted(vha, fcport);
+-		spin_unlock_irqrestore(&vha->hw->hardware_lock, vha_flags);
+-	}
+ }
+ 
+ /**
+@@ -3379,6 +3374,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ 	LIST_HEAD(new_fcports);
+ 	struct qla_hw_data *ha = vha->hw;
+ 	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
++	int		discovery_gen;
+ 
+ 	/* If FL port exists, then SNS is present */
+ 	if (IS_FWI2_CAPABLE(ha))
+@@ -3449,6 +3445,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ 			fcport->scan_state = QLA_FCPORT_SCAN;
+ 		}
+ 
++		/* Mark the time right before querying FW for connected ports.
++		 * This process is long, asynchronous and by the time it's done,
++		 * collected information might not be accurate anymore. E.g.
++		 * disconnected port might have re-connected and a brand new
++		 * session has been created. In this case session's generation
++		 * will be newer than discovery_gen. */
++		qlt_do_generation_tick(vha, &discovery_gen);
++
+ 		rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
+ 		if (rval != QLA_SUCCESS)
+ 			break;
+@@ -3500,7 +3504,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ 					    atomic_read(&fcport->state),
+ 					    fcport->flags, fcport->fc4_type,
+ 					    fcport->scan_state);
+-					qlt_fc_port_deleted(vha, fcport);
++					qlt_fc_port_deleted(vha, fcport,
++					    discovery_gen);
+ 				}
+ 			}
+ 		}
+@@ -4277,6 +4282,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
+ 			    atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
+ 				spin_unlock_irqrestore(&ha->vport_slock, flags);
+ 				qla2x00_rport_del(fcport);
++
++				/*
++				 * Release the target mode FC NEXUS in
++				 * qla_target.c, if target mod is enabled.
++				 */
++				qlt_fc_port_deleted(vha, fcport,
++				    base_vha->total_fcport_update_gen);
++
+ 				spin_lock_irqsave(&ha->vport_slock, flags);
+ 			}
+ 		}
+@@ -4944,7 +4957,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
+ 
+ 	for (i = 1; i < ha->max_rsp_queues; i++) {
+ 		rsp = ha->rsp_q_map[i];
+-		if (rsp) {
++		if (rsp && test_bit(i, ha->rsp_qid_map)) {
+ 			rsp->options &= ~BIT_0;
+ 			ret = qla25xx_init_rsp_que(base_vha, rsp);
+ 			if (ret != QLA_SUCCESS)
+@@ -4959,8 +4972,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
+ 	}
+ 	for (i = 1; i < ha->max_req_queues; i++) {
+ 		req = ha->req_q_map[i];
+-		if (req) {
+-		/* Clear outstanding commands array. */
++		if (req && test_bit(i, ha->req_qid_map)) {
++			/* Clear outstanding commands array. */
+ 			req->options &= ~BIT_0;
+ 			ret = qla25xx_init_req_que(base_vha, req);
+ 			if (ret != QLA_SUCCESS)
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index a1ab25fca874..dc96f31a8831 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+ 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+ 	logio->control_flags =
+ 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
++	if (!sp->fcport->tgt_session ||
++	    !sp->fcport->tgt_session->keep_nport_handle)
++		logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
+ 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+ 	logio->port_id[1] = sp->fcport->d_id.b.area;
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 6dc14cd782b2..1f3991ba7580 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -2992,9 +2992,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ 		    "MSI-X: Failed to enable support "
+ 		    "-- %d/%d\n Retry with %d vectors.\n",
+ 		    ha->msix_count, ret, ret);
++		ha->msix_count = ret;
++		ha->max_rsp_queues = ha->msix_count - 1;
+ 	}
+-	ha->msix_count = ret;
+-	ha->max_rsp_queues = ha->msix_count - 1;
+ 	ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
+ 				ha->msix_count, GFP_KERNEL);
+ 	if (!ha->msix_entries) {
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index cc94192511cf..63abed122adf 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -601,7 +601,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
+ 	/* Delete request queues */
+ 	for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+ 		req = ha->req_q_map[cnt];
+-		if (req) {
++		if (req && test_bit(cnt, ha->req_qid_map)) {
+ 			ret = qla25xx_delete_req_que(vha, req);
+ 			if (ret != QLA_SUCCESS) {
+ 				ql_log(ql_log_warn, vha, 0x00ea,
+@@ -615,7 +615,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
+ 	/* Delete response queues */
+ 	for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+ 		rsp = ha->rsp_q_map[cnt];
+-		if (rsp) {
++		if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
+ 			ret = qla25xx_delete_rsp_que(vha, rsp);
+ 			if (ret != QLA_SUCCESS) {
+ 				ql_log(ql_log_warn, vha, 0x00eb,
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 7462dd70b150..d00725574577 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -398,6 +398,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
+ 	int cnt;
+ 
+ 	for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
++		if (!test_bit(cnt, ha->req_qid_map))
++			continue;
++
+ 		req = ha->req_q_map[cnt];
+ 		qla2x00_free_req_que(ha, req);
+ 	}
+@@ -405,6 +408,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
+ 	ha->req_q_map = NULL;
+ 
+ 	for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
++		if (!test_bit(cnt, ha->rsp_qid_map))
++			continue;
++
+ 		rsp = ha->rsp_q_map[cnt];
+ 		qla2x00_free_rsp_que(ha, rsp);
+ 	}
+@@ -3229,11 +3235,14 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
+ 		spin_lock_irqsave(vha->host->host_lock, flags);
+ 		fcport->drport = rport;
+ 		spin_unlock_irqrestore(vha->host->host_lock, flags);
++		qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
+ 		set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
+ 		qla2xxx_wake_dpc(base_vha);
+ 	} else {
++		int now;
+ 		fc_remote_port_delete(rport);
+-		qlt_fc_port_deleted(vha, fcport);
++		qlt_do_generation_tick(vha, &now);
++		qlt_fc_port_deleted(vha, fcport, now);
+ 	}
+ }
+ 
+@@ -3763,8 +3772,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
+ 	INIT_LIST_HEAD(&vha->vp_fcports);
+ 	INIT_LIST_HEAD(&vha->work_list);
+ 	INIT_LIST_HEAD(&vha->list);
++	INIT_LIST_HEAD(&vha->qla_cmd_list);
++	INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
+ 
+ 	spin_lock_init(&vha->work_lock);
++	spin_lock_init(&vha->cmd_list_lock);
+ 
+ 	sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+ 	ql_dbg(ql_dbg_init, vha, 0x0041,
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 496a733d0ca3..df6193b48177 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -114,6 +114,10 @@ static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ 	struct atio_from_isp *atio, uint16_t status, int qfull);
+ static void qlt_disable_vha(struct scsi_qla_host *vha);
+ static void qlt_clear_tgt_db(struct qla_tgt *tgt);
++static void qlt_send_notify_ack(struct scsi_qla_host *vha,
++	struct imm_ntfy_from_isp *ntfy,
++	uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
++	uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
+ /*
+  * Global Variables
+  */
+@@ -123,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
+ static DEFINE_MUTEX(qla_tgt_mutex);
+ static LIST_HEAD(qla_tgt_glist);
+ 
++/* This API intentionally takes dest as a parameter, rather than returning
++ * int value to avoid caller forgetting to issue wmb() after the store */
++void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
++{
++	scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
++	*dest = atomic_inc_return(&base_vha->generation_tick);
++	/* memory barrier */
++	wmb();
++}
++
+ /* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+ static struct qla_tgt_sess *qlt_find_sess_by_port_name(
+ 	struct qla_tgt *tgt,
+@@ -382,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
+ 	struct qla_tgt *tgt = sess->tgt;
+ 	struct scsi_qla_host *vha = sess->vha;
+ 	struct qla_hw_data *ha = vha->hw;
++	unsigned long flags;
++	bool logout_started = false;
++	fc_port_t fcport;
++
++	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
++		"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
++		" s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
++		__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
++		sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
++		sess->logout_on_delete, sess->keep_nport_handle,
++		sess->plogi_ack_needed);
+ 
+ 	BUG_ON(!tgt);
++
++	if (sess->logout_on_delete) {
++		int rc;
++
++		memset(&fcport, 0, sizeof(fcport));
++		fcport.loop_id = sess->loop_id;
++		fcport.d_id = sess->s_id;
++		memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
++		fcport.vha = vha;
++		fcport.tgt_session = sess;
++
++		rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
++		if (rc != QLA_SUCCESS)
++			ql_log(ql_log_warn, vha, 0xf085,
++			       "Schedule logo failed sess %p rc %d\n",
++			       sess, rc);
++		else
++			logout_started = true;
++	}
++
+ 	/*
+ 	 * Release the target session for FC Nexus from fabric module code.
+ 	 */
+ 	if (sess->se_sess != NULL)
+ 		ha->tgt.tgt_ops->free_session(sess);
+ 
++	if (logout_started) {
++		bool traced = false;
++
++		while (!ACCESS_ONCE(sess->logout_completed)) {
++			if (!traced) {
++				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
++					"%s: waiting for sess %p logout\n",
++					__func__, sess);
++				traced = true;
++			}
++			msleep(100);
++		}
++
++		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
++			"%s: sess %p logout completed\n",
++			__func__, sess);
++	}
++
++	spin_lock_irqsave(&ha->hardware_lock, flags);
++
++	if (sess->plogi_ack_needed)
++		qlt_send_notify_ack(vha, &sess->tm_iocb,
++				    0, 0, 0, 0, 0, 0);
++
++	list_del(&sess->sess_list_entry);
++
++	spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
+ 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+ 	    "Unregistration of sess %p finished\n", sess);
+ 
+@@ -410,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
+ 
+ 	vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+ 
+-	list_del(&sess->sess_list_entry);
+-	if (sess->deleted)
+-		list_del(&sess->del_list_entry);
++	if (!list_empty(&sess->del_list_entry))
++		list_del_init(&sess->del_list_entry);
++	sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ 
+ 	INIT_WORK(&sess->free_work, qlt_free_session_done);
+ 	schedule_work(&sess->free_work);
+@@ -490,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+ 	struct qla_tgt *tgt = sess->tgt;
+ 	uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
+ 
+-	if (sess->deleted)
+-		return;
++	if (sess->deleted) {
++		/* Upgrade to unconditional deletion in case it was temporary */
++		if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
++			list_del(&sess->del_list_entry);
++		else
++			return;
++	}
+ 
+ 	ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ 	    "Scheduling sess %p for deletion\n", sess);
+-	list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+-	sess->deleted = 1;
+ 
+-	if (immediate)
++	if (immediate) {
+ 		dev_loss_tmo = 0;
++		sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
++		list_add(&sess->del_list_entry, &tgt->del_sess_list);
++	} else {
++		sess->deleted = QLA_SESS_DELETION_PENDING;
++		list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
++	}
+ 
+ 	sess->expires = jiffies + dev_loss_tmo * HZ;
+ 
+ 	ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
+-	    "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
+-	    "deletion in %u secs (expires: %lu) immed: %d\n",
+-	    sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
+-	    sess->expires, immediate);
++	    "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
++	    " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
++	    sess->vha->vp_idx, sess->port_name, sess->loop_id,
++	    sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
++	    dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
++	    sess->generation);
+ 
+ 	if (immediate)
+-		schedule_delayed_work(&tgt->sess_del_work, 0);
++		mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
+ 	else
+ 		schedule_delayed_work(&tgt->sess_del_work,
+ 		    sess->expires - jiffies);
+@@ -579,9 +663,9 @@ out_free_id_list:
+ /* ha->hardware_lock supposed to be held on entry */
+ static void qlt_undelete_sess(struct qla_tgt_sess *sess)
+ {
+-	BUG_ON(!sess->deleted);
++	BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
+ 
+-	list_del(&sess->del_list_entry);
++	list_del_init(&sess->del_list_entry);
+ 	sess->deleted = 0;
+ }
+ 
+@@ -600,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
+ 		    del_list_entry);
+ 		elapsed = jiffies;
+ 		if (time_after_eq(elapsed, sess->expires)) {
+-			qlt_undelete_sess(sess);
++			/* No turning back */
++			list_del_init(&sess->del_list_entry);
++			sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+ 
+ 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+ 			    "Timeout: sess %p about to be deleted\n",
+@@ -644,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
+ 			    fcport->d_id.b.al_pa, fcport->d_id.b.area,
+ 			    fcport->loop_id);
+ 
++			/* Cannot undelete at this point */
++			if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++				spin_unlock_irqrestore(&ha->hardware_lock,
++				    flags);
++				return NULL;
++			}
++
+ 			if (sess->deleted)
+ 				qlt_undelete_sess(sess);
+ 
+@@ -653,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
+ 
+ 			if (sess->local && !local)
+ 				sess->local = 0;
++
++			qlt_do_generation_tick(vha, &sess->generation);
++
+ 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+ 			return sess;
+@@ -674,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
+ 	sess->s_id = fcport->d_id;
+ 	sess->loop_id = fcport->loop_id;
+ 	sess->local = local;
++	INIT_LIST_HEAD(&sess->del_list_entry);
++
++	/* Under normal circumstances we want to logout from firmware when
++	 * session eventually ends and release corresponding nport handle.
++	 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
++	 * code will adjust these flags as necessary. */
++	sess->logout_on_delete = 1;
++	sess->keep_nport_handle = 0;
+ 
+ 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+ 	    "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
+@@ -706,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
+ 	spin_lock_irqsave(&ha->hardware_lock, flags);
+ 	list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
+ 	vha->vha_tgt.qla_tgt->sess_count++;
++	qlt_do_generation_tick(vha, &sess->generation);
+ 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 
+ 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+@@ -719,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
+ }
+ 
+ /*
+- * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
++ * Called from qla2x00_reg_remote_port()
+  */
+ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+ {
+@@ -751,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 		mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ 
+ 		spin_lock_irqsave(&ha->hardware_lock, flags);
++	} else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++		/* Point of no return */
++		spin_unlock_irqrestore(&ha->hardware_lock, flags);
++		return;
+ 	} else {
+ 		kref_get(&sess->se_sess->sess_kref);
+ 
+@@ -781,7 +890,12 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+ 
+-void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
++/*
++ * max_gen - specifies maximum session generation
++ * at which this deletion requestion is still valid
++ */
++void
++qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
+ {
+ 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ 	struct qla_tgt_sess *sess;
+@@ -800,6 +914,15 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 		return;
+ 	}
+ 
++	if (max_gen - sess->generation < 0) {
++		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
++		    "Ignoring stale deletion request for se_sess %p / sess %p"
++		    " for port %8phC, req_gen %d, sess_gen %d\n",
++		    sess->se_sess, sess, sess->port_name, max_gen,
++		    sess->generation);
++		return;
++	}
++
+ 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
+ 
+ 	sess->local = 1;
+@@ -1170,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
+ 	    FCP_TMF_CMPL, true);
+ }
+ 
++static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
++{
++	struct qla_tgt_sess_op *op;
++	struct qla_tgt_cmd *cmd;
++
++	spin_lock(&vha->cmd_list_lock);
++
++	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
++		if (tag == op->atio.u.isp24.exchange_addr) {
++			op->aborted = true;
++			spin_unlock(&vha->cmd_list_lock);
++			return 1;
++		}
++	}
++
++	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
++		if (tag == cmd->atio.u.isp24.exchange_addr) {
++			cmd->state = QLA_TGT_STATE_ABORTED;
++			spin_unlock(&vha->cmd_list_lock);
++			return 1;
++		}
++	}
++
++	spin_unlock(&vha->cmd_list_lock);
++	return 0;
++}
++
++/* drop cmds for the given lun
++ * XXX only looks for cmds on the port through which lun reset was recieved
++ * XXX does not go through the list of other port (which may have cmds
++ *     for the same lun)
++ */
++static void abort_cmds_for_lun(struct scsi_qla_host *vha,
++				uint32_t lun, uint8_t *s_id)
++{
++	struct qla_tgt_sess_op *op;
++	struct qla_tgt_cmd *cmd;
++	uint32_t key;
++
++	key = sid_to_key(s_id);
++	spin_lock(&vha->cmd_list_lock);
++	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
++		uint32_t op_key;
++		uint32_t op_lun;
++
++		op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
++		op_lun = scsilun_to_int(
++			(struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
++		if (op_key == key && op_lun == lun)
++			op->aborted = true;
++	}
++	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
++		uint32_t cmd_key;
++		uint32_t cmd_lun;
++
++		cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
++		cmd_lun = scsilun_to_int(
++			(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
++		if (cmd_key == key && cmd_lun == lun)
++			cmd->state = QLA_TGT_STATE_ABORTED;
++	}
++	spin_unlock(&vha->cmd_list_lock);
++}
++
+ /* ha->hardware_lock supposed to be held on entry */
+ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ 	struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+@@ -1194,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ 	}
+ 	spin_unlock(&se_sess->sess_cmd_lock);
+ 
+-	if (!found_lun)
+-		return -ENOENT;
++	/* cmd not in LIO lists, look in qla list */
++	if (!found_lun) {
++		if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
++			/* send TASK_ABORT response immediately */
++			qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
++			return 0;
++		} else {
++			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
++			    "unable to find cmd in driver or LIO for tag 0x%x\n",
++			    abts->exchange_addr_to_abort);
++			return -ENOENT;
++		}
++	}
+ 
+ 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
+ 	    "qla_target(%d): task abort (tag=%d)\n",
+@@ -1279,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ 		return;
+ 	}
+ 
++	if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++		qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
++		return;
++	}
++
+ 	rc = __qlt_24xx_handle_abts(vha, abts, sess);
+ 	if (rc != 0) {
+ 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
+@@ -1721,21 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
+ 	struct qla_hw_data *ha = vha->hw;
+ 	struct se_cmd *se_cmd = &cmd->se_cmd;
+ 
+-	if (unlikely(cmd->aborted)) {
+-		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+-		    "qla_target(%d): terminating exchange "
+-		    "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
+-		    se_cmd, cmd->tag);
+-
+-		cmd->state = QLA_TGT_STATE_ABORTED;
+-		cmd->cmd_flags |= BIT_6;
+-
+-		qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+-
+-		/* !! At this point cmd could be already freed !! */
+-		return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
+-	}
+-
+ 	prm->cmd = cmd;
+ 	prm->tgt = tgt;
+ 	prm->rq_result = scsi_status;
+@@ -2298,6 +2486,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ 	unsigned long flags = 0;
+ 	int res;
+ 
++	spin_lock_irqsave(&ha->hardware_lock, flags);
++	if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++		cmd->state = QLA_TGT_STATE_PROCESSED;
++		if (cmd->sess->logout_completed)
++			/* no need to terminate. FW already freed exchange. */
++			qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
++		else
++			qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
++		spin_unlock_irqrestore(&ha->hardware_lock, flags);
++		return 0;
++	}
++	spin_unlock_irqrestore(&ha->hardware_lock, flags);
++
+ 	memset(&prm, 0, sizeof(prm));
+ 	qlt_check_srr_debug(cmd, &xmit_type);
+ 
+@@ -2310,9 +2511,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ 	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+ 	    &full_req_cnt);
+ 	if (unlikely(res != 0)) {
+-		if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
+-			return 0;
+-
+ 		return res;
+ 	}
+ 
+@@ -2459,7 +2657,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+ 
+ 	spin_lock_irqsave(&ha->hardware_lock, flags);
+ 
+-	if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
++	if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
++	    (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
+ 		/*
+ 		 * Either a chip reset is active or this request was from
+ 		 * previous life, just abort the processing.
+@@ -2652,6 +2851,89 @@ out:
+ 
+ /* If hardware_lock held on entry, might drop it, then reaquire */
+ /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
++static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
++	struct imm_ntfy_from_isp *ntfy)
++{
++	struct nack_to_isp *nack;
++	struct qla_hw_data *ha = vha->hw;
++	request_t *pkt;
++	int ret = 0;
++
++	ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
++	    "Sending TERM ELS CTIO (ha=%p)\n", ha);
++
++	pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
++	if (pkt == NULL) {
++		ql_dbg(ql_dbg_tgt, vha, 0xe080,
++		    "qla_target(%d): %s failed: unable to allocate "
++		    "request packet\n", vha->vp_idx, __func__);
++		return -ENOMEM;
++	}
++
++	pkt->entry_type = NOTIFY_ACK_TYPE;
++	pkt->entry_count = 1;
++	pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
++
++	nack = (struct nack_to_isp *)pkt;
++	nack->ox_id = ntfy->ox_id;
++
++	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
++	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
++		nack->u.isp24.flags = ntfy->u.isp24.flags &
++			__constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
++	}
++
++	/* terminate */
++	nack->u.isp24.flags |=
++		__constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
++
++	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
++	nack->u.isp24.status = ntfy->u.isp24.status;
++	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
++	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
++	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
++	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
++	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
++	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
++
++	qla2x00_start_iocbs(vha, vha->req);
++	return ret;
++}
++
++static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
++	struct imm_ntfy_from_isp *imm, int ha_locked)
++{
++	unsigned long flags = 0;
++	int rc;
++
++	if (qlt_issue_marker(vha, ha_locked) < 0)
++		return;
++
++	if (ha_locked) {
++		rc = __qlt_send_term_imm_notif(vha, imm);
++
++#if 0	/* Todo  */
++		if (rc == -ENOMEM)
++			qlt_alloc_qfull_cmd(vha, imm, 0, 0);
++#endif
++		goto done;
++	}
++
++	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
++	rc = __qlt_send_term_imm_notif(vha, imm);
++
++#if 0	/* Todo */
++	if (rc == -ENOMEM)
++		qlt_alloc_qfull_cmd(vha, imm, 0, 0);
++#endif
++
++done:
++	if (!ha_locked)
++		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
++}
++
++/* If hardware_lock held on entry, might drop it, then reaquire */
++/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+ 	struct qla_tgt_cmd *cmd,
+ 	struct atio_from_isp *atio)
+@@ -2794,6 +3076,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
+ 
+ }
+ 
++void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
++{
++	struct qla_tgt *tgt = cmd->tgt;
++	struct scsi_qla_host *vha = tgt->vha;
++	struct se_cmd *se_cmd = &cmd->se_cmd;
++
++	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
++	    "qla_target(%d): terminating exchange for aborted cmd=%p "
++	    "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
++	    cmd->tag);
++
++	cmd->state = QLA_TGT_STATE_ABORTED;
++	cmd->cmd_flags |= BIT_6;
++
++	qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
++}
++EXPORT_SYMBOL(qlt_abort_cmd);
++
+ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
+ {
+ 	struct qla_tgt_sess *sess = cmd->sess;
+@@ -3265,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
+ 	if (tgt->tgt_stop)
+ 		goto out_term;
+ 
++	if (cmd->state == QLA_TGT_STATE_ABORTED) {
++		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
++		    "cmd with tag %u is aborted\n",
++		    cmd->atio.u.isp24.exchange_addr);
++		goto out_term;
++	}
++
+ 	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
+ 	cmd->tag = atio->u.isp24.exchange_addr;
+ 	cmd->unpacked_lun = scsilun_to_int(
+@@ -3318,6 +3625,12 @@ out_term:
+ static void qlt_do_work(struct work_struct *work)
+ {
+ 	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
++	scsi_qla_host_t *vha = cmd->vha;
++	unsigned long flags;
++
++	spin_lock_irqsave(&vha->cmd_list_lock, flags);
++	list_del(&cmd->cmd_list);
++	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ 
+ 	__qlt_do_work(cmd);
+ }
+@@ -3369,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
+ 	unsigned long flags;
+ 	uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
+ 
++	spin_lock_irqsave(&vha->cmd_list_lock, flags);
++	list_del(&op->cmd_list);
++	spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
++
++	if (op->aborted) {
++		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
++		    "sess_op with tag %u is aborted\n",
++		    op->atio.u.isp24.exchange_addr);
++		goto out_term;
++	}
++
+ 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+-		"qla_target(%d): Unable to find wwn login"
+-		" (s_id %x:%x:%x), trying to create it manually\n",
+-		vha->vp_idx, s_id[0], s_id[1], s_id[2]);
++	    "qla_target(%d): Unable to find wwn login"
++	    " (s_id %x:%x:%x), trying to create it manually\n",
++	    vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+ 
+ 	if (op->atio.u.raw.entry_count > 1) {
+ 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+-		        "Dropping multy entry atio %p\n", &op->atio);
++		    "Dropping multy entry atio %p\n", &op->atio);
+ 		goto out_term;
+ 	}
+ 
+@@ -3441,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+ 
+ 		memcpy(&op->atio, atio, sizeof(*atio));
+ 		op->vha = vha;
++
++		spin_lock(&vha->cmd_list_lock);
++		list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
++		spin_unlock(&vha->cmd_list_lock);
++
+ 		INIT_WORK(&op->work, qlt_create_sess_from_atio);
+ 		queue_work(qla_tgt_wq, &op->work);
+ 		return 0;
+ 	}
++
++	/* Another WWN used to have our s_id. Our PLOGI scheduled its
++	 * session deletion, but it's still in sess_del_work wq */
++	if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++		ql_dbg(ql_dbg_io, vha, 0x3061,
++		    "New command while old session %p is being deleted\n",
++		    sess);
++		return -EFAULT;
++	}
++
+ 	/*
+ 	 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
+ 	 */
+@@ -3460,6 +3799,11 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+ 
+ 	cmd->cmd_in_wq = 1;
+ 	cmd->cmd_flags |= BIT_0;
++
++	spin_lock(&vha->cmd_list_lock);
++	list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
++	spin_unlock(&vha->cmd_list_lock);
++
+ 	INIT_WORK(&cmd->work, qlt_do_work);
+ 	queue_work(qla_tgt_wq, &cmd->work);
+ 	return 0;
+@@ -3473,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ 	struct scsi_qla_host *vha = sess->vha;
+ 	struct qla_hw_data *ha = vha->hw;
+ 	struct qla_tgt_mgmt_cmd *mcmd;
++	struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ 	int res;
+ 	uint8_t tmr_func;
+ 
+@@ -3513,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ 		ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
+ 		    "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
+ 		tmr_func = TMR_LUN_RESET;
++		abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
+ 		break;
+ 
+ 	case QLA_TGT_CLEAR_TS:
+@@ -3601,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
+ 		    sizeof(struct atio_from_isp));
+ 	}
+ 
++	if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
++		return -EFAULT;
++
+ 	return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+ }
+ 
+@@ -3666,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
+ 	return __qlt_abort_task(vha, iocb, sess);
+ }
+ 
++void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
++{
++	if (fcport->tgt_session) {
++		if (rc != MBS_COMMAND_COMPLETE) {
++			ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
++				"%s: se_sess %p / sess %p from"
++				" port %8phC loop_id %#04x s_id %02x:%02x:%02x"
++				" LOGO failed: %#x\n",
++				__func__,
++				fcport->tgt_session->se_sess,
++				fcport->tgt_session,
++				fcport->port_name, fcport->loop_id,
++				fcport->d_id.b.domain, fcport->d_id.b.area,
++				fcport->d_id.b.al_pa, rc);
++		}
++
++		fcport->tgt_session->logout_completed = 1;
++	}
++}
++
++static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
++    struct imm_ntfy_from_isp *b)
++{
++	struct imm_ntfy_from_isp tmp;
++	memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
++	memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
++	memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
++}
++
++/*
++* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
++*
++* Schedules sessions with matching port_id/loop_id but different wwn for
++* deletion. Returns existing session with matching wwn if present.
++* Null otherwise.
++*/
++static struct qla_tgt_sess *
++qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
++    port_id_t port_id, uint16_t loop_id)
++{
++	struct qla_tgt_sess *sess = NULL, *other_sess;
++	uint64_t other_wwn;
++
++	list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
++
++		other_wwn = wwn_to_u64(other_sess->port_name);
++
++		if (wwn == other_wwn) {
++			WARN_ON(sess);
++			sess = other_sess;
++			continue;
++		}
++
++		/* find other sess with nport_id collision */
++		if (port_id.b24 == other_sess->s_id.b24) {
++			if (loop_id != other_sess->loop_id) {
++				ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
++				    "Invalidating sess %p loop_id %d wwn %llx.\n",
++				    other_sess, other_sess->loop_id, other_wwn);
++
++				/*
++				 * logout_on_delete is set by default, but another
++				 * session that has the same s_id/loop_id combo
++				 * might have cleared it when requested this session
++				 * deletion, so don't touch it
++				 */
++				qlt_schedule_sess_for_deletion(other_sess, true);
++			} else {
++				/*
++				 * Another wwn used to have our s_id/loop_id
++				 * combo - kill the session, but don't log out
++				 */
++				sess->logout_on_delete = 0;
++				qlt_schedule_sess_for_deletion(other_sess,
++				    true);
++			}
++			continue;
++		}
++
++		/* find other sess with nport handle collision */
++		if (loop_id == other_sess->loop_id) {
++			ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
++			       "Invalidating sess %p loop_id %d wwn %llx.\n",
++			       other_sess, other_sess->loop_id, other_wwn);
++
++			/* Same loop_id but different s_id
++			 * Ok to kill and logout */
++			qlt_schedule_sess_for_deletion(other_sess, true);
++		}
++	}
++
++	return sess;
++}
++
++/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
++static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
++{
++	struct qla_tgt_sess_op *op;
++	struct qla_tgt_cmd *cmd;
++	uint32_t key;
++	int count = 0;
++
++	key = (((u32)s_id->b.domain << 16) |
++	       ((u32)s_id->b.area   <<  8) |
++	       ((u32)s_id->b.al_pa));
++
++	spin_lock(&vha->cmd_list_lock);
++	list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
++		uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
++		if (op_key == key) {
++			op->aborted = true;
++			count++;
++		}
++	}
++	list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
++		uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
++		if (cmd_key == key) {
++			cmd->state = QLA_TGT_STATE_ABORTED;
++			count++;
++		}
++	}
++	spin_unlock(&vha->cmd_list_lock);
++
++	return count;
++}
++
+ /*
+  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+  */
+ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+ 	struct imm_ntfy_from_isp *iocb)
+ {
++	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
++	struct qla_hw_data *ha = vha->hw;
++	struct qla_tgt_sess *sess = NULL;
++	uint64_t wwn;
++	port_id_t port_id;
++	uint16_t loop_id;
++	uint16_t wd3_lo;
+ 	int res = 0;
+ 
++	wwn = wwn_to_u64(iocb->u.isp24.port_name);
++
++	port_id.b.domain = iocb->u.isp24.port_id[2];
++	port_id.b.area   = iocb->u.isp24.port_id[1];
++	port_id.b.al_pa  = iocb->u.isp24.port_id[0];
++	port_id.b.rsvd_1 = 0;
++
++	loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
++
+ 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
+ 	    "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
+ 	    vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
+ 
++	/* res = 1 means ack at the end of thread
++	 * res = 0 means ack async/later.
++	 */
+ 	switch (iocb->u.isp24.status_subcode) {
+ 	case ELS_PLOGI:
+-	case ELS_FLOGI:
++
++		/* Mark all stale commands in qla_tgt_wq for deletion */
++		abort_cmds_for_s_id(vha, &port_id);
++
++		if (wwn)
++			sess = qlt_find_sess_invalidate_other(tgt, wwn,
++			    port_id, loop_id);
++
++		if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
++			res = 1;
++			break;
++		}
++
++		if (sess->plogi_ack_needed) {
++			/*
++			 * Initiator sent another PLOGI before last PLOGI could
++			 * finish. Swap plogi iocbs and terminate old one
++			 * without acking, new one will get acked when session
++			 * deletion completes.
++			 */
++			ql_log(ql_log_warn, sess->vha, 0xf094,
++			    "sess %p received double plogi.\n", sess);
++
++			qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
++
++			qlt_send_term_imm_notif(vha, iocb, 1);
++
++			res = 0;
++			break;
++		}
++
++		res = 0;
++
++		/*
++		 * Save immediate Notif IOCB for Ack when sess is done
++		 * and being deleted.
++		 */
++		memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
++		sess->plogi_ack_needed  = 1;
++
++		 /*
++		  * Under normal circumstances we want to release nport handle
++		  * during LOGO process to avoid nport handle leaks inside FW.
++		  * The exception is when LOGO is done while another PLOGI with
++		  * the same nport handle is waiting as might be the case here.
++		  * Note: there is always a possibily of a race where session
++		  * deletion has already started for other reasons (e.g. ACL
++		  * removal) and now PLOGI arrives:
++		  * 1. if PLOGI arrived in FW after nport handle has been freed,
++		  *    FW must have assigned this PLOGI a new/same handle and we
++		  *    can proceed ACK'ing it as usual when session deletion
++		  *    completes.
++		  * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
++		  *    bit reached it, the handle has now been released. We'll
++		  *    get an error when we ACK this PLOGI. Nothing will be sent
++		  *    back to initiator. Initiator should eventually retry
++		  *    PLOGI and situation will correct itself.
++		  */
++		sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
++					   (sess->s_id.b24 == port_id.b24));
++		qlt_schedule_sess_for_deletion(sess, true);
++		break;
++
+ 	case ELS_PRLI:
++		wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
++
++		if (wwn)
++			sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
++			    loop_id);
++
++		if (sess != NULL) {
++			if (sess->deleted) {
++				/*
++				 * Impatient initiator sent PRLI before last
++				 * PLOGI could finish. Will force him to re-try,
++				 * while last one finishes.
++				 */
++				ql_log(ql_log_warn, sess->vha, 0xf095,
++				    "sess %p PRLI received, before plogi ack.\n",
++				    sess);
++				qlt_send_term_imm_notif(vha, iocb, 1);
++				res = 0;
++				break;
++			}
++
++			/*
++			 * This shouldn't happen under normal circumstances,
++			 * since we have deleted the old session during PLOGI
++			 */
++			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
++			    "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
++			    sess->loop_id, sess, iocb->u.isp24.nport_handle);
++
++			sess->local = 0;
++			sess->loop_id = loop_id;
++			sess->s_id = port_id;
++
++			if (wd3_lo & BIT_7)
++				sess->conf_compl_supported = 1;
++
++		}
++		res = 1; /* send notify ack */
++
++		/* Make session global (not used in fabric mode) */
++		if (ha->current_topology != ISP_CFG_F) {
++			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
++			set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
++			qla2xxx_wake_dpc(vha);
++		} else {
++			/* todo: else - create sess here. */
++			res = 1; /* send notify ack */
++		}
++
++		break;
++
+ 	case ELS_LOGO:
+ 	case ELS_PRLO:
+ 		res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+@@ -3699,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+ 		break;
+ 	}
+ 
++	case ELS_FLOGI:	/* should never happen */
+ 	default:
+ 		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
+ 		    "qla_target(%d): Unsupported ELS command %x "
+@@ -5016,6 +5624,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
+ 		if (!sess)
+ 			goto out_term;
+ 	} else {
++		if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++			sess = NULL;
++			goto out_term;
++		}
++
+ 		kref_get(&sess->se_sess->sess_kref);
+ 	}
+ 
+@@ -5070,6 +5683,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
+ 		if (!sess)
+ 			goto out_term;
+ 	} else {
++		if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
++			sess = NULL;
++			goto out_term;
++		}
++
+ 		kref_get(&sess->se_sess->sess_kref);
+ 	}
+ 
+diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
+index 332086776dfe..d30c60a1d522 100644
+--- a/drivers/scsi/qla2xxx/qla_target.h
++++ b/drivers/scsi/qla2xxx/qla_target.h
+@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
+ 			uint32_t srr_rel_offs;
+ 			uint16_t srr_ui;
+ 			uint16_t srr_ox_id;
+-			uint8_t  reserved_4[19];
++			union {
++				struct {
++					uint8_t node_name[8];
++				} plogi; /* PLOGI/ADISC/PDISC */
++				struct {
++					/* PRLI word 3 bit 0-15 */
++					uint16_t wd3_lo;
++					uint8_t resv0[6];
++				} prli;
++				struct {
++					uint8_t port_id[3];
++					uint8_t resv1;
++					uint16_t nport_handle;
++					uint16_t resv2;
++				} req_els;
++			} u;
++			uint8_t port_name[8];
++			uint8_t resv3[3];
+ 			uint8_t  vp_index;
+ 			uint32_t reserved_5;
+ 			uint8_t  port_id[3];
+@@ -234,6 +251,7 @@ struct nack_to_isp {
+ 	uint8_t  reserved[2];
+ 	uint16_t ox_id;
+ } __packed;
++#define NOTIFY_ACK_FLAGS_TERMINATE	BIT_3
+ #define NOTIFY_ACK_SRR_FLAGS_ACCEPT	0
+ #define NOTIFY_ACK_SRR_FLAGS_REJECT	1
+ 
+@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
+ #define	FC_TM_REJECT                4
+ #define FC_TM_FAILED                5
+ 
+-/*
+- * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
+- * terminated, so no more actions is needed and success should be returned
+- * to target.
+- */
+-#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED	0x1717
+-
+ #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+ #define pci_dma_lo32(a) (a & 0xffffffff)
+ #define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
+@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
+ 	struct scsi_qla_host *vha;
+ 	struct atio_from_isp atio;
+ 	struct work_struct work;
++	struct list_head cmd_list;
++	bool aborted;
++};
++
++enum qla_sess_deletion {
++	QLA_SESS_DELETION_NONE		= 0,
++	QLA_SESS_DELETION_PENDING	= 1, /* hopefully we can get rid of
++					      * this one */
++	QLA_SESS_DELETION_IN_PROGRESS	= 2,
+ };
+ 
+ /*
+@@ -884,8 +904,15 @@ struct qla_tgt_sess {
+ 	port_id_t s_id;
+ 
+ 	unsigned int conf_compl_supported:1;
+-	unsigned int deleted:1;
++	unsigned int deleted:2;
+ 	unsigned int local:1;
++	unsigned int logout_on_delete:1;
++	unsigned int plogi_ack_needed:1;
++	unsigned int keep_nport_handle:1;
++
++	unsigned char logout_completed;
++
++	int generation;
+ 
+ 	struct se_session *se_sess;
+ 	struct scsi_qla_host *vha;
+@@ -897,6 +924,10 @@ struct qla_tgt_sess {
+ 
+ 	uint8_t port_name[WWN_SIZE];
+ 	struct work_struct free_work;
++
++	union {
++		struct imm_ntfy_from_isp tm_iocb;
++	};
+ };
+ 
+ struct qla_tgt_cmd {
+@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
+ 	unsigned int conf_compl_supported:1;
+ 	unsigned int sg_mapped:1;
+ 	unsigned int free_sg:1;
+-	unsigned int aborted:1; /* Needed in case of SRR */
+ 	unsigned int write_data_transferred:1;
+ 	unsigned int ctx_dsd_alloced:1;
+ 	unsigned int q_full:1;
+@@ -1027,6 +1057,10 @@ struct qla_tgt_srr_ctio {
+ 	struct qla_tgt_cmd *cmd;
+ };
+ 
++/* Check for Switch reserved address */
++#define IS_SW_RESV_ADDR(_s_id) \
++	((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
++
+ #define QLA_TGT_XMIT_DATA		1
+ #define QLA_TGT_XMIT_STATUS		2
+ #define QLA_TGT_XMIT_ALL		(QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
+@@ -1044,7 +1078,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
+ extern void qlt_lport_deregister(struct scsi_qla_host *);
+ extern void qlt_unreg_sess(struct qla_tgt_sess *);
+ extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+-extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
++extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
+ extern int __init qlt_init(void);
+ extern void qlt_exit(void);
+ extern void qlt_update_vp_map(struct scsi_qla_host *, int);
+@@ -1074,12 +1108,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+ 		ha->host->active_mode |= MODE_INITIATOR;
+ }
+ 
++static inline uint32_t sid_to_key(const uint8_t *s_id)
++{
++	uint32_t key;
++
++	key = (((unsigned long)s_id[0] << 16) |
++	       ((unsigned long)s_id[1] << 8) |
++	       (unsigned long)s_id[2]);
++	return key;
++}
++
+ /*
+  * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
+  */
+ extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+ extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
+ extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
++extern void qlt_abort_cmd(struct qla_tgt_cmd *);
+ extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+ extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
+ extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+@@ -1110,5 +1155,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
+ extern irqreturn_t qla83xx_msix_atio_q(int, void *);
+ extern void qlt_83xx_iospace_config(struct qla_hw_data *);
+ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
++extern void qlt_logo_completion_handler(fc_port_t *, int);
++extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
+ 
+ #endif /* __QLA_TARGET_H */
+diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
+index 962cb89fe0ae..af806fdb0dbc 100644
+--- a/drivers/scsi/qla2xxx/qla_tmpl.c
++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
+@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ 	if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
+ 		for (i = 0; i < vha->hw->max_req_queues; i++) {
+ 			struct req_que *req = vha->hw->req_q_map[i];
++
++			if (!test_bit(i, vha->hw->req_qid_map))
++				continue;
++
+ 			if (req || !buf) {
+ 				length = req ?
+ 				    req->length : REQUEST_ENTRY_CNT_24XX;
+@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
+ 	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
+ 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
++
++			if (!test_bit(i, vha->hw->rsp_qid_map))
++				continue;
++
+ 			if (rsp || !buf) {
+ 				length = rsp ?
+ 				    rsp->length : RESPONSE_ENTRY_CNT_MQ;
+@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+ 	if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+ 		for (i = 0; i < vha->hw->max_req_queues; i++) {
+ 			struct req_que *req = vha->hw->req_q_map[i];
++
++			if (!test_bit(i, vha->hw->req_qid_map))
++				continue;
++
+ 			if (req || !buf) {
+ 				qla27xx_insert16(i, buf, len);
+ 				qla27xx_insert16(1, buf, len);
+@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+ 	} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+ 		for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+ 			struct rsp_que *rsp = vha->hw->rsp_q_map[i];
++
++			if (!test_bit(i, vha->hw->rsp_qid_map))
++				continue;
++
+ 			if (rsp || !buf) {
+ 				qla27xx_insert16(i, buf, len);
+ 				qla27xx_insert16(1, buf, len);
+diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+index 5c9e680aa375..fdad875ca777 100644
+--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+@@ -429,7 +429,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+ 		cmd->cmd_flags |= BIT_14;
+ 	}
+ 
+-	return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++	return target_put_sess_cmd(se_cmd);
+ }
+ 
+ /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
+@@ -669,7 +669,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+ 	cmd->cmd_flags |= BIT_4;
+ 	cmd->bufflen = se_cmd->data_length;
+ 	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
+-	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+ 
+ 	cmd->sg_cnt = se_cmd->t_data_nents;
+ 	cmd->sg = se_cmd->t_data_sg;
+@@ -699,7 +698,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+ 	cmd->sg_cnt = 0;
+ 	cmd->offset = 0;
+ 	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
+-	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+ 	if (cmd->cmd_flags &  BIT_5) {
+ 		pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
+ 		dump_stack();
+@@ -764,14 +762,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
+ {
+ 	struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ 				struct qla_tgt_cmd, se_cmd);
+-	struct scsi_qla_host *vha = cmd->vha;
+-	struct qla_hw_data *ha = vha->hw;
+-
+-	if (!cmd->sg_mapped)
+-		return;
+-
+-	pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+-	cmd->sg_mapped = 0;
++	qlt_abort_cmd(cmd);
+ }
+ 
+ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+@@ -1323,9 +1314,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+ 		return NULL;
+ 	}
+ 
+-	key = (((unsigned long)s_id[0] << 16) |
+-	       ((unsigned long)s_id[1] << 8) |
+-	       (unsigned long)s_id[2]);
++	key = sid_to_key(s_id);
+ 	pr_debug("find_sess_by_s_id: 0x%06x\n", key);
+ 
+ 	se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
+@@ -1360,9 +1349,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
+ 	void *slot;
+ 	int rc;
+ 
+-	key = (((unsigned long)s_id[0] << 16) |
+-	       ((unsigned long)s_id[1] << 8) |
+-	       (unsigned long)s_id[2]);
++	key = sid_to_key(s_id);
+ 	pr_debug("set_sess_by_s_id: %06x\n", key);
+ 
+ 	slot = btree_lookup32(&lport->lport_fcport_map, key);
+@@ -1718,6 +1705,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
+ 	}
+ 
+ 	sess->conf_compl_supported = conf_compl_supported;
++
++	/* Reset logout parameters to default */
++	sess->logout_on_delete = 1;
++	sess->keep_nport_handle = 0;
+ }
+ 
+ /*
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 64ed88a67e6e..ac418e73536d 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -205,6 +205,7 @@ static struct {
+ 	{"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
+ 	{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+ 	{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
++	{"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
+ 	{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
+ 	{"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+ 	{"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 330bbe831066..2e58279fab60 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -712,7 +712,7 @@ static int iscsit_add_reject_from_cmd(
+ 	 */
+ 	if (cmd->se_cmd.se_tfo != NULL) {
+ 		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
+-		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++		target_put_sess_cmd(&cmd->se_cmd);
+ 	}
+ 	return -1;
+ }
+@@ -998,7 +998,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 		hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
+ 		conn->cid);
+ 
+-	target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
++	target_get_sess_cmd(&cmd->se_cmd, true);
+ 
+ 	cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
+ 						     scsilun_to_int(&hdr->lun));
+@@ -1064,7 +1064,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 		if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ 			return -1;
+ 		else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+-			target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++			target_put_sess_cmd(&cmd->se_cmd);
+ 			return 0;
+ 		}
+ 	}
+@@ -1080,7 +1080,7 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 		if (!cmd->sense_reason)
+ 			return 0;
+ 
+-		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++		target_put_sess_cmd(&cmd->se_cmd);
+ 		return 0;
+ 	}
+ 
+@@ -1111,7 +1111,6 @@ static int
+ iscsit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+ 			  bool dump_payload)
+ {
+-	struct iscsi_conn *conn = cmd->conn;
+ 	int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ 	/*
+ 	 * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
+@@ -1138,7 +1137,7 @@ after_immediate_data:
+ 
+ 			rc = iscsit_dump_data_payload(cmd->conn,
+ 						      cmd->first_burst_len, 1);
+-			target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++			target_put_sess_cmd(&cmd->se_cmd);
+ 			return rc;
+ 		} else if (cmd->unsolicited_data)
+ 			iscsit_set_unsoliticed_dataout(cmd);
+@@ -1807,7 +1806,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 				      conn->sess->se_sess, 0, DMA_NONE,
+ 				      TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+ 
+-		target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
++		target_get_sess_cmd(&cmd->se_cmd, true);
+ 		sess_ref = true;
+ 
+ 		switch (function) {
+@@ -1949,7 +1948,7 @@ attach:
+ 	 */
+ 	if (sess_ref) {
+ 		pr_debug("Handle TMR, using sess_ref=true check\n");
+-		target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
++		target_put_sess_cmd(&cmd->se_cmd);
+ 	}
+ 
+ 	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 5a8add721741..83bb55b94434 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1981,7 +1981,7 @@ static void lio_set_default_node_attributes(struct se_node_acl *se_acl)
+ 
+ static int lio_check_stop_free(struct se_cmd *se_cmd)
+ {
+-	return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++	return target_put_sess_cmd(se_cmd);
+ }
+ 
+ static void lio_release_cmd(struct se_cmd *se_cmd)
+diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
+index b18edda3e8af..231e2e0e5894 100644
+--- a/drivers/target/iscsi/iscsi_target_util.c
++++ b/drivers/target/iscsi/iscsi_target_util.c
+@@ -746,7 +746,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
+ 		rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
+ 		if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
+ 			__iscsit_free_cmd(cmd, true, shutdown);
+-			target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++			target_put_sess_cmd(se_cmd);
+ 		}
+ 		break;
+ 	case ISCSI_OP_REJECT:
+@@ -762,7 +762,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
+ 			rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
+ 			if (!rc && shutdown && se_cmd->se_sess) {
+ 				__iscsit_free_cmd(cmd, true, shutdown);
+-				target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++				target_put_sess_cmd(se_cmd);
+ 			}
+ 			break;
+ 		}
+diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
+index 315ec3458eeb..adb8016955c4 100644
+--- a/drivers/target/target_core_tmr.c
++++ b/drivers/target/target_core_tmr.c
+@@ -71,7 +71,7 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
+ 
+ 	if (dev) {
+ 		spin_lock_irqsave(&dev->se_tmr_lock, flags);
+-		list_del(&tmr->tmr_list);
++		list_del_init(&tmr->tmr_list);
+ 		spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+ 	}
+ 
+@@ -153,7 +153,7 @@ void core_tmr_abort_task(
+ 		cancel_work_sync(&se_cmd->work);
+ 		transport_wait_for_tasks(se_cmd);
+ 
+-		target_put_sess_cmd(se_sess, se_cmd);
++		target_put_sess_cmd(se_cmd);
+ 		transport_cmd_finish_abort(se_cmd, true);
+ 
+ 		printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
+@@ -175,9 +175,11 @@ static void core_tmr_drain_tmr_list(
+ 	struct list_head *preempt_and_abort_list)
+ {
+ 	LIST_HEAD(drain_tmr_list);
++	struct se_session *sess;
+ 	struct se_tmr_req *tmr_p, *tmr_pp;
+ 	struct se_cmd *cmd;
+ 	unsigned long flags;
++	bool rc;
+ 	/*
+ 	 * Release all pending and outgoing TMRs aside from the received
+ 	 * LUN_RESET tmr..
+@@ -203,17 +205,31 @@ static void core_tmr_drain_tmr_list(
+ 		if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
+ 			continue;
+ 
++		sess = cmd->se_sess;
++		if (WARN_ON_ONCE(!sess))
++			continue;
++
++		spin_lock(&sess->sess_cmd_lock);
+ 		spin_lock(&cmd->t_state_lock);
+ 		if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+ 			spin_unlock(&cmd->t_state_lock);
++			spin_unlock(&sess->sess_cmd_lock);
+ 			continue;
+ 		}
+ 		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
+ 			spin_unlock(&cmd->t_state_lock);
++			spin_unlock(&sess->sess_cmd_lock);
+ 			continue;
+ 		}
++		cmd->transport_state |= CMD_T_ABORTED;
+ 		spin_unlock(&cmd->t_state_lock);
+ 
++		rc = kref_get_unless_zero(&cmd->cmd_kref);
++		spin_unlock(&sess->sess_cmd_lock);
++		if (!rc) {
++			printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
++			continue;
++		}
+ 		list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
+ 	}
+ 	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
+@@ -227,7 +243,11 @@ static void core_tmr_drain_tmr_list(
+ 			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
+ 			tmr_p->function, tmr_p->response, cmd->t_state);
+ 
++		cancel_work_sync(&cmd->work);
++		transport_wait_for_tasks(cmd);
++
+ 		transport_cmd_finish_abort(cmd, 1);
++		target_put_sess_cmd(cmd);
+ 	}
+ }
+ 
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 675f2d9d1f14..3881504b40d8 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1419,7 +1419,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
+ 	 * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+ 	 * kref_put() to happen during fabric packet acknowledgement.
+ 	 */
+-	ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
++	ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+ 	if (ret)
+ 		return ret;
+ 	/*
+@@ -1433,7 +1433,7 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
+ 	rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
+ 	if (rc) {
+ 		transport_send_check_condition_and_sense(se_cmd, rc, 0);
+-		target_put_sess_cmd(se_sess, se_cmd);
++		target_put_sess_cmd(se_cmd);
+ 		return 0;
+ 	}
+ 
+@@ -1584,7 +1584,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
+ 		se_cmd->se_tmr_req->ref_task_tag = tag;
+ 
+ 	/* See target_submit_cmd for commentary */
+-	ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
++	ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+ 	if (ret) {
+ 		core_tmr_release_req(se_cmd->se_tmr_req);
+ 		return ret;
+@@ -2227,7 +2227,7 @@ static int transport_release_cmd(struct se_cmd *cmd)
+ 	 * If this cmd has been setup with target_get_sess_cmd(), drop
+ 	 * the kref and call ->release_cmd() in kref callback.
+ 	 */
+-	return target_put_sess_cmd(cmd->se_sess, cmd);
++	return target_put_sess_cmd(cmd);
+ }
+ 
+ /**
+@@ -2471,13 +2471,12 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+ EXPORT_SYMBOL(transport_generic_free_cmd);
+ 
+ /* target_get_sess_cmd - Add command to active ->sess_cmd_list
+- * @se_sess:	session to reference
+  * @se_cmd:	command descriptor to add
+  * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
+  */
+-int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+-			       bool ack_kref)
++int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
+ {
++	struct se_session *se_sess = se_cmd->se_sess;
+ 	unsigned long flags;
+ 	int ret = 0;
+ 
+@@ -2499,7 +2498,7 @@ out:
+ 	spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ 
+ 	if (ret && ack_kref)
+-		target_put_sess_cmd(se_sess, se_cmd);
++		target_put_sess_cmd(se_cmd);
+ 
+ 	return ret;
+ }
+@@ -2528,11 +2527,12 @@ static void target_release_cmd_kref(struct kref *kref)
+ }
+ 
+ /* target_put_sess_cmd - Check for active I/O shutdown via kref_put
+- * @se_sess:	session to reference
+  * @se_cmd:	command descriptor to drop
+  */
+-int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
++int target_put_sess_cmd(struct se_cmd *se_cmd)
+ {
++	struct se_session *se_sess = se_cmd->se_sess;
++
+ 	if (!se_sess) {
+ 		se_cmd->se_tfo->release_cmd(se_cmd);
+ 		return 1;
+@@ -3025,8 +3025,17 @@ static void target_tmr_work(struct work_struct *work)
+ 	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
+ 	struct se_device *dev = cmd->se_dev;
+ 	struct se_tmr_req *tmr = cmd->se_tmr_req;
++	unsigned long flags;
+ 	int ret;
+ 
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	if (cmd->transport_state & CMD_T_ABORTED) {
++		tmr->response = TMR_FUNCTION_REJECTED;
++		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++		goto check_stop;
++	}
++	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+ 	switch (tmr->function) {
+ 	case TMR_ABORT_TASK:
+ 		core_tmr_abort_task(dev, tmr, cmd->se_sess);
+@@ -3054,9 +3063,17 @@ static void target_tmr_work(struct work_struct *work)
+ 		break;
+ 	}
+ 
++	spin_lock_irqsave(&cmd->t_state_lock, flags);
++	if (cmd->transport_state & CMD_T_ABORTED) {
++		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++		goto check_stop;
++	}
+ 	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
++	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
++
+ 	cmd->se_tfo->queue_tm_rsp(cmd);
+ 
++check_stop:
+ 	transport_cmd_check_stop_to_fabric(cmd);
+ }
+ 
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 4d5e8409769c..254c183a5efe 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -672,7 +672,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+ /* this is called once with whichever end is closed last */
+ static void pty_unix98_shutdown(struct tty_struct *tty)
+ {
+-	devpts_kill_index(tty->driver_data, tty->index);
++	struct inode *ptmx_inode;
++
++	if (tty->driver->subtype == PTY_TYPE_MASTER)
++		ptmx_inode = tty->driver_data;
++	else
++		ptmx_inode = tty->link->driver_data;
++	devpts_kill_index(ptmx_inode, tty->index);
++	devpts_del_ref(ptmx_inode);
+ }
+ 
+ static const struct tty_operations ptm_unix98_ops = {
+@@ -764,6 +771,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
+ 	set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
+ 	tty->driver_data = inode;
+ 
++	/*
++	 * In the case where all references to ptmx inode are dropped and we
++	 * still have /dev/tty opened pointing to the master/slave pair (ptmx
++	 * is closed/released before /dev/tty), we must make sure that the inode
++	 * is still valid when we call the final pty_unix98_shutdown, thus we
++	 * hold an additional reference to the ptmx inode. For the same /dev/tty
++	 * last close case, we also need to make sure the super_block isn't
++	 * destroyed (devpts instance unmounted), before /dev/tty is closed and
++	 * on its release devpts_kill_index is called.
++	 */
++	devpts_add_ref(inode);
++
+ 	tty_add_file(tty, filp);
+ 
+ 	slave_inode = devpts_pty_new(inode,
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 2fd163b75665..b82b2a0f82a3 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2002,6 +2002,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
+ #define PCIE_VENDOR_ID_WCH		0x1c00
+ #define PCIE_DEVICE_ID_WCH_CH382_2S1P	0x3250
+ #define PCIE_DEVICE_ID_WCH_CH384_4S	0x3470
++#define PCIE_DEVICE_ID_WCH_CH382_2S	0x3253
+ 
+ #define PCI_DEVICE_ID_EXAR_XR17V4358	0x4358
+ #define PCI_DEVICE_ID_EXAR_XR17V8358	0x8358
+@@ -2729,6 +2730,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ 		.subdevice	= PCI_ANY_ID,
+ 		.setup		= pci_wch_ch353_setup,
+ 	},
++	/* WCH CH382 2S card (16850 clone) */
++	{
++		.vendor         = PCIE_VENDOR_ID_WCH,
++		.device         = PCIE_DEVICE_ID_WCH_CH382_2S,
++		.subvendor      = PCI_ANY_ID,
++		.subdevice      = PCI_ANY_ID,
++		.setup          = pci_wch_ch38x_setup,
++	},
+ 	/* WCH CH382 2S1P card (16850 clone) */
+ 	{
+ 		.vendor         = PCIE_VENDOR_ID_WCH,
+@@ -3049,6 +3058,7 @@ enum pci_board_num_t {
+ 	pbn_fintek_4,
+ 	pbn_fintek_8,
+ 	pbn_fintek_12,
++	pbn_wch382_2,
+ 	pbn_wch384_4,
+ 	pbn_pericom_PI7C9X7951,
+ 	pbn_pericom_PI7C9X7952,
+@@ -3879,6 +3889,13 @@ static struct pciserial_board pci_boards[] = {
+ 		.base_baud	= 115200,
+ 		.first_offset	= 0x40,
+ 	},
++	[pbn_wch382_2] = {
++		.flags		= FL_BASE0,
++		.num_ports	= 2,
++		.base_baud	= 115200,
++		.uart_offset	= 8,
++		.first_offset	= 0xC0,
++	},
+ 	[pbn_wch384_4] = {
+ 		.flags		= FL_BASE0,
+ 		.num_ports	= 4,
+@@ -5691,6 +5708,10 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_b0_bt_2_115200 },
+ 
++	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0, 0, pbn_wch382_2 },
++
+ 	{	PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0, 0, pbn_wch384_4 },
+diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
+index 7f49172ccd86..0a88693cd8ca 100644
+--- a/drivers/tty/serial/omap-serial.c
++++ b/drivers/tty/serial/omap-serial.c
+@@ -1368,7 +1368,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
+ 
+ /* Enable or disable the rs485 support */
+ static int
+-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
++serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
+ {
+ 	struct uart_omap_port *up = to_uart_omap_port(port);
+ 	unsigned int mode;
+@@ -1381,8 +1381,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+ 	up->ier = 0;
+ 	serial_out(up, UART_IER, 0);
+ 
++	/* Clamp the delays to [0, 100ms] */
++	rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
++	rs485->delay_rts_after_send  = min(rs485->delay_rts_after_send, 100U);
++
+ 	/* store new config */
+-	port->rs485 = *rs485conf;
++	port->rs485 = *rs485;
+ 
+ 	/*
+ 	 * Just as a precaution, only allow rs485
+diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
+index ea32b386797f..636435b41293 100644
+--- a/drivers/vhost/scsi.c
++++ b/drivers/vhost/scsi.c
+@@ -607,7 +607,7 @@ static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
+ 
+ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
+ {
+-	return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
++	return target_put_sess_cmd(se_cmd);
+ }
+ 
+ static void
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 723470850b94..30bc9fa763bd 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1369,7 +1369,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ 			read_extent_buffer(eb, dest + bytes_left,
+ 					   name_off, name_len);
+ 		if (eb != eb_in) {
+-			btrfs_tree_read_unlock_blocking(eb);
++			if (!path->skip_locking)
++				btrfs_tree_read_unlock_blocking(eb);
+ 			free_extent_buffer(eb);
+ 		}
+ 		ret = btrfs_find_item(fs_root, path, parent, 0,
+@@ -1389,9 +1390,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+ 		eb = path->nodes[0];
+ 		/* make sure we can use eb after releasing the path */
+ 		if (eb != eb_in) {
+-			atomic_inc(&eb->refs);
+-			btrfs_tree_read_lock(eb);
+-			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
++			if (!path->skip_locking)
++				btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
++			path->nodes[0] = NULL;
++			path->locks[0] = 0;
+ 		}
+ 		btrfs_release_path(path);
+ 		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index a2ae42720a6a..bc2d048a9eb9 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1690,7 +1690,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
+  *
+  */
+ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+-				    struct list_head *ins_list)
++				    struct list_head *ins_list, bool *emitted)
+ {
+ 	struct btrfs_dir_item *di;
+ 	struct btrfs_delayed_item *curr, *next;
+@@ -1734,6 +1734,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+ 
+ 		if (over)
+ 			return 1;
++		*emitted = true;
+ 	}
+ 	return 0;
+ }
+diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
+index f70119f25421..0167853c84ae 100644
+--- a/fs/btrfs/delayed-inode.h
++++ b/fs/btrfs/delayed-inode.h
+@@ -144,7 +144,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
+ int btrfs_should_delete_dir_index(struct list_head *del_list,
+ 				  u64 index);
+ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
+-				    struct list_head *ins_list);
++				    struct list_head *ins_list, bool *emitted);
+ 
+ /* for init */
+ int __init btrfs_delayed_inode_init(void);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index df4e0462976e..b114a0539d3d 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5666,6 +5666,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
+ 	char *name_ptr;
+ 	int name_len;
+ 	int is_curr = 0;	/* ctx->pos points to the current index? */
++	bool emitted;
+ 
+ 	/* FIXME, use a real flag for deciding about the key type */
+ 	if (root->fs_info->tree_root == root)
+@@ -5694,6 +5695,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
+ 	if (ret < 0)
+ 		goto err;
+ 
++	emitted = false;
+ 	while (1) {
+ 		leaf = path->nodes[0];
+ 		slot = path->slots[0];
+@@ -5773,6 +5775,7 @@ skip:
+ 
+ 			if (over)
+ 				goto nopos;
++			emitted = true;
+ 			di_len = btrfs_dir_name_len(leaf, di) +
+ 				 btrfs_dir_data_len(leaf, di) + sizeof(*di);
+ 			di_cur += di_len;
+@@ -5785,11 +5788,20 @@ next:
+ 	if (key_type == BTRFS_DIR_INDEX_KEY) {
+ 		if (is_curr)
+ 			ctx->pos++;
+-		ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
++		ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
+ 		if (ret)
+ 			goto nopos;
+ 	}
+ 
++	/*
++	 * If we haven't emitted any dir entry, we must not touch ctx->pos as
++	 * it was was set to the termination value in previous call. We assume
++	 * that "." and ".." were emitted if we reach this point and set the
++	 * termination value as well for an empty directory.
++	 */
++	if (ctx->pos > 2 && !emitted)
++		goto nopos;
++
+ 	/* Reached end of directory/root. Bump pos past the last item. */
+ 	ctx->pos++;
+ 
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index afa09fce8151..e682b36a210f 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -714,7 +714,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 
+ 	ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
+ 	if (!ses->auth_key.response) {
+-		rc = ENOMEM;
++		rc = -ENOMEM;
+ 		ses->auth_key.len = 0;
+ 		goto setup_ntlmv2_rsp_ret;
+ 	}
+diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
+index add566303c68..91360444adf5 100644
+--- a/fs/devpts/inode.c
++++ b/fs/devpts/inode.c
+@@ -569,6 +569,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
+ 	mutex_unlock(&allocated_ptys_lock);
+ }
+ 
++/*
++ * pty code needs to hold extra references in case of last /dev/tty close
++ */
++
++void devpts_add_ref(struct inode *ptmx_inode)
++{
++	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
++
++	atomic_inc(&sb->s_active);
++	ihold(ptmx_inode);
++}
++
++void devpts_del_ref(struct inode *ptmx_inode)
++{
++	struct super_block *sb = pts_sb_from_inode(ptmx_inode);
++
++	iput(ptmx_inode);
++	deactivate_super(sb);
++}
++
+ /**
+  * devpts_pty_new -- create a new inode in /dev/pts/
+  * @ptmx_inode: inode of the master
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 966c614822cc..2b3a53a51582 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -3133,29 +3133,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
+ 	 * case, we allocate an io_end structure to hook to the iocb.
+ 	 */
+ 	iocb->private = NULL;
+-	ext4_inode_aio_set(inode, NULL);
+-	if (!is_sync_kiocb(iocb)) {
+-		io_end = ext4_init_io_end(inode, GFP_NOFS);
+-		if (!io_end) {
+-			ret = -ENOMEM;
+-			goto retake_lock;
+-		}
+-		/*
+-		 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
+-		 */
+-		iocb->private = ext4_get_io_end(io_end);
+-		/*
+-		 * we save the io structure for current async direct
+-		 * IO, so that later ext4_map_blocks() could flag the
+-		 * io structure whether there is a unwritten extents
+-		 * needs to be converted when IO is completed.
+-		 */
+-		ext4_inode_aio_set(inode, io_end);
+-	}
+-
+ 	if (overwrite) {
+ 		get_block_func = ext4_get_block_write_nolock;
+ 	} else {
++		ext4_inode_aio_set(inode, NULL);
++		if (!is_sync_kiocb(iocb)) {
++			io_end = ext4_init_io_end(inode, GFP_NOFS);
++			if (!io_end) {
++				ret = -ENOMEM;
++				goto retake_lock;
++			}
++			/*
++			 * Grab reference for DIO. Will be dropped in
++			 * ext4_end_io_dio()
++			 */
++			iocb->private = ext4_get_io_end(io_end);
++			/*
++			 * we save the io structure for current async direct
++			 * IO, so that later ext4_map_blocks() could flag the
++			 * io structure whether there is a unwritten extents
++			 * needs to be converted when IO is completed.
++			 */
++			ext4_inode_aio_set(inode, io_end);
++		}
+ 		get_block_func = ext4_get_block_write;
+ 		dio_flags = DIO_LOCKING;
+ 	}
+diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
+index 370420bfae8d..7da8ac1047f8 100644
+--- a/fs/ext4/move_extent.c
++++ b/fs/ext4/move_extent.c
+@@ -268,11 +268,12 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
+ 	ext4_lblk_t orig_blk_offset, donor_blk_offset;
+ 	unsigned long blocksize = orig_inode->i_sb->s_blocksize;
+ 	unsigned int tmp_data_size, data_size, replaced_size;
+-	int err2, jblocks, retries = 0;
++	int i, err2, jblocks, retries = 0;
+ 	int replaced_count = 0;
+ 	int from = data_offset_in_page << orig_inode->i_blkbits;
+ 	int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
+ 	struct super_block *sb = orig_inode->i_sb;
++	struct buffer_head *bh = NULL;
+ 
+ 	/*
+ 	 * It needs twice the amount of ordinary journal buffers because
+@@ -383,8 +384,16 @@ data_copy:
+ 	}
+ 	/* Perform all necessary steps similar write_begin()/write_end()
+ 	 * but keeping in mind that i_size will not change */
+-	*err = __block_write_begin(pagep[0], from, replaced_size,
+-				   ext4_get_block);
++	if (!page_has_buffers(pagep[0]))
++		create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
++	bh = page_buffers(pagep[0]);
++	for (i = 0; i < data_offset_in_page; i++)
++		bh = bh->b_this_page;
++	for (i = 0; i < block_len_in_page; i++) {
++		*err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
++		if (*err < 0)
++			break;
++	}
+ 	if (!*err)
+ 		*err = block_commit_write(pagep[0], from, from + replaced_size);
+ 
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index cf0c472047e3..0e783b9f7007 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -198,7 +198,7 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
+ 	if (flex_gd == NULL)
+ 		goto out3;
+ 
+-	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
++	if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
+ 		goto out2;
+ 	flex_gd->count = flexbg_size;
+ 
+diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
+index 0419485891f2..0f1c6f315cdc 100644
+--- a/include/asm-generic/cputime_nsecs.h
++++ b/include/asm-generic/cputime_nsecs.h
+@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
+  */
+ static inline cputime_t timespec_to_cputime(const struct timespec *val)
+ {
+-	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
++	u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ 	return (__force cputime_t) ret;
+ }
+ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
+  */
+ static inline cputime_t timeval_to_cputime(const struct timeval *val)
+ {
+-	u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
++	u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
++			val->tv_usec * NSEC_PER_USEC;
+ 	return (__force cputime_t) ret;
+ }
+ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+new file mode 100644
+index 000000000000..8d9c7e7a6432
+--- /dev/null
++++ b/include/linux/cgroup-defs.h
+@@ -0,0 +1,470 @@
++/*
++ * linux/cgroup-defs.h - basic definitions for cgroup
++ *
++ * This file provides basic type and interface.  Include this file directly
++ * only if necessary to avoid cyclic dependencies.
++ */
++#ifndef _LINUX_CGROUP_DEFS_H
++#define _LINUX_CGROUP_DEFS_H
++
++#include <linux/limits.h>
++#include <linux/list.h>
++#include <linux/idr.h>
++#include <linux/wait.h>
++#include <linux/mutex.h>
++#include <linux/rcupdate.h>
++#include <linux/percpu-refcount.h>
++#include <linux/workqueue.h>
++
++#ifdef CONFIG_CGROUPS
++
++struct cgroup;
++struct cgroup_root;
++struct cgroup_subsys;
++struct cgroup_taskset;
++struct kernfs_node;
++struct kernfs_ops;
++struct kernfs_open_file;
++
++#define MAX_CGROUP_TYPE_NAMELEN 32
++#define MAX_CGROUP_ROOT_NAMELEN 64
++#define MAX_CFTYPE_NAME		64
++
++/* define the enumeration of all cgroup subsystems */
++#define SUBSYS(_x) _x ## _cgrp_id,
++enum cgroup_subsys_id {
++#include <linux/cgroup_subsys.h>
++	CGROUP_SUBSYS_COUNT,
++};
++#undef SUBSYS
++
++/* bits in struct cgroup_subsys_state flags field */
++enum {
++	CSS_NO_REF	= (1 << 0), /* no reference counting for this css */
++	CSS_ONLINE	= (1 << 1), /* between ->css_online() and ->css_offline() */
++	CSS_RELEASED	= (1 << 2), /* refcnt reached zero, released */
++};
++
++/* bits in struct cgroup flags field */
++enum {
++	/* Control Group requires release notifications to userspace */
++	CGRP_NOTIFY_ON_RELEASE,
++	/*
++	 * Clone the parent's configuration when creating a new child
++	 * cpuset cgroup.  For historical reasons, this option can be
++	 * specified at mount time and thus is implemented here.
++	 */
++	CGRP_CPUSET_CLONE_CHILDREN,
++};
++
++/* cgroup_root->flags */
++enum {
++	CGRP_ROOT_SANE_BEHAVIOR	= (1 << 0), /* __DEVEL__sane_behavior specified */
++	CGRP_ROOT_NOPREFIX	= (1 << 1), /* mounted subsystems have no named prefix */
++	CGRP_ROOT_XATTR		= (1 << 2), /* supports extended attributes */
++};
++
++/* cftype->flags */
++enum {
++	CFTYPE_ONLY_ON_ROOT	= (1 << 0),	/* only create on root cgrp */
++	CFTYPE_NOT_ON_ROOT	= (1 << 1),	/* don't create on root cgrp */
++	CFTYPE_NO_PREFIX	= (1 << 3),	/* (DON'T USE FOR NEW FILES) no subsys prefix */
++
++	/* internal flags, do not use outside cgroup core proper */
++	__CFTYPE_ONLY_ON_DFL	= (1 << 16),	/* only on default hierarchy */
++	__CFTYPE_NOT_ON_DFL	= (1 << 17),	/* not on default hierarchy */
++};
++
++/*
++ * Per-subsystem/per-cgroup state maintained by the system.  This is the
++ * fundamental structural building block that controllers deal with.
++ *
++ * Fields marked with "PI:" are public and immutable and may be accessed
++ * directly without synchronization.
++ */
++struct cgroup_subsys_state {
++	/* PI: the cgroup that this css is attached to */
++	struct cgroup *cgroup;
++
++	/* PI: the cgroup subsystem that this css is attached to */
++	struct cgroup_subsys *ss;
++
++	/* reference count - access via css_[try]get() and css_put() */
++	struct percpu_ref refcnt;
++
++	/* PI: the parent css */
++	struct cgroup_subsys_state *parent;
++
++	/* siblings list anchored at the parent's ->children */
++	struct list_head sibling;
++	struct list_head children;
++
++	/*
++	 * PI: Subsys-unique ID.  0 is unused and root is always 1.  The
++	 * matching css can be looked up using css_from_id().
++	 */
++	int id;
++
++	unsigned int flags;
++
++	/*
++	 * Monotonically increasing unique serial number which defines a
++	 * uniform order among all csses.  It's guaranteed that all
++	 * ->children lists are in the ascending order of ->serial_nr and
++	 * used to allow interrupting and resuming iterations.
++	 */
++	u64 serial_nr;
++
++	/*
++	 * Incremented by online self and children.  Used to guarantee that
++	 * parents are not offlined before their children.
++	 */
++	atomic_t online_cnt;
++
++	/* percpu_ref killing and RCU release */
++	struct rcu_head rcu_head;
++	struct work_struct destroy_work;
++};
++
++/*
++ * A css_set is a structure holding pointers to a set of
++ * cgroup_subsys_state objects. This saves space in the task struct
++ * object and speeds up fork()/exit(), since a single inc/dec and a
++ * list_add()/del() can bump the reference count on the entire cgroup
++ * set for a task.
++ */
++struct css_set {
++	/* Reference count */
++	atomic_t refcount;
++
++	/*
++	 * List running through all cgroup groups in the same hash
++	 * slot. Protected by css_set_lock
++	 */
++	struct hlist_node hlist;
++
++	/*
++	 * Lists running through all tasks using this cgroup group.
++	 * mg_tasks lists tasks which belong to this cset but are in the
++	 * process of being migrated out or in.  Protected by
++	 * css_set_rwsem, but, during migration, once tasks are moved to
++	 * mg_tasks, it can be read safely while holding cgroup_mutex.
++	 */
++	struct list_head tasks;
++	struct list_head mg_tasks;
++
++	/*
++	 * List of cgrp_cset_links pointing at cgroups referenced from this
++	 * css_set.  Protected by css_set_lock.
++	 */
++	struct list_head cgrp_links;
++
++	/* the default cgroup associated with this css_set */
++	struct cgroup *dfl_cgrp;
++
++	/*
++	 * Set of subsystem states, one for each subsystem. This array is
++	 * immutable after creation apart from the init_css_set during
++	 * subsystem registration (at boot time).
++	 */
++	struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
++
++	/*
++	 * List of csets participating in the on-going migration either as
++	 * source or destination.  Protected by cgroup_mutex.
++	 */
++	struct list_head mg_preload_node;
++	struct list_head mg_node;
++
++	/*
++	 * If this cset is acting as the source of migration the following
++	 * two fields are set.  mg_src_cgrp is the source cgroup of the
++	 * on-going migration and mg_dst_cset is the destination cset the
++	 * target tasks on this cset should be migrated to.  Protected by
++	 * cgroup_mutex.
++	 */
++	struct cgroup *mg_src_cgrp;
++	struct css_set *mg_dst_cset;
++
++	/*
++	 * On the default hierarhcy, ->subsys[ssid] may point to a css
++	 * attached to an ancestor instead of the cgroup this css_set is
++	 * associated with.  The following node is anchored at
++	 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
++	 * iterate through all css's attached to a given cgroup.
++	 */
++	struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
++
++	/* For RCU-protected deletion */
++	struct rcu_head rcu_head;
++};
++
++struct cgroup {
++	/* self css with NULL ->ss, points back to this cgroup */
++	struct cgroup_subsys_state self;
++
++	unsigned long flags;		/* "unsigned long" so bitops work */
++
++	/*
++	 * idr allocated in-hierarchy ID.
++	 *
++	 * ID 0 is not used, the ID of the root cgroup is always 1, and a
++	 * new cgroup will be assigned with a smallest available ID.
++	 *
++	 * Allocating/Removing ID must be protected by cgroup_mutex.
++	 */
++	int id;
++
++	/*
++	 * If this cgroup contains any tasks, it contributes one to
++	 * populated_cnt.  All children with non-zero popuplated_cnt of
++	 * their own contribute one.  The count is zero iff there's no task
++	 * in this cgroup or its subtree.
++	 */
++	int populated_cnt;
++
++	struct kernfs_node *kn;		/* cgroup kernfs entry */
++	struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
++
++	/*
++	 * The bitmask of subsystems enabled on the child cgroups.
++	 * ->subtree_control is the one configured through
++	 * "cgroup.subtree_control" while ->child_subsys_mask is the
++	 * effective one which may have more subsystems enabled.
++	 * Controller knobs are made available iff it's enabled in
++	 * ->subtree_control.
++	 */
++	unsigned int subtree_control;
++	unsigned int child_subsys_mask;
++
++	/* Private pointers for each registered subsystem */
++	struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
++
++	struct cgroup_root *root;
++
++	/*
++	 * List of cgrp_cset_links pointing at css_sets with tasks in this
++	 * cgroup.  Protected by css_set_lock.
++	 */
++	struct list_head cset_links;
++
++	/*
++	 * On the default hierarchy, a css_set for a cgroup with some
++	 * susbsys disabled will point to css's which are associated with
++	 * the closest ancestor which has the subsys enabled.  The
++	 * following lists all css_sets which point to this cgroup's css
++	 * for the given subsystem.
++	 */
++	struct list_head e_csets[CGROUP_SUBSYS_COUNT];
++
++	/*
++	 * list of pidlists, up to two for each namespace (one for procs, one
++	 * for tasks); created on demand.
++	 */
++	struct list_head pidlists;
++	struct mutex pidlist_mutex;
++
++	/* used to wait for offlining of csses */
++	wait_queue_head_t offline_waitq;
++
++	/* used to schedule release agent */
++	struct work_struct release_agent_work;
++};
++
++/*
++ * A cgroup_root represents the root of a cgroup hierarchy, and may be
++ * associated with a kernfs_root to form an active hierarchy.  This is
++ * internal to cgroup core.  Don't access directly from controllers.
++ */
++struct cgroup_root {
++	struct kernfs_root *kf_root;
++
++	/* The bitmask of subsystems attached to this hierarchy */
++	unsigned int subsys_mask;
++
++	/* Unique id for this hierarchy. */
++	int hierarchy_id;
++
++	/* The root cgroup.  Root is destroyed on its release. */
++	struct cgroup cgrp;
++
++	/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
++	atomic_t nr_cgrps;
++
++	/* A list running through the active hierarchies */
++	struct list_head root_list;
++
++	/* Hierarchy-specific flags */
++	unsigned int flags;
++
++	/* IDs for cgroups in this hierarchy */
++	struct idr cgroup_idr;
++
++	/* The path to use for release notifications. */
++	char release_agent_path[PATH_MAX];
++
++	/* The name for this hierarchy - may be empty */
++	char name[MAX_CGROUP_ROOT_NAMELEN];
++};
++
++/*
++ * struct cftype: handler definitions for cgroup control files
++ *
++ * When reading/writing to a file:
++ *	- the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
++ *	- the 'cftype' of the file is file->f_path.dentry->d_fsdata
++ */
++struct cftype {
++	/*
++	 * By convention, the name should begin with the name of the
++	 * subsystem, followed by a period.  Zero length string indicates
++	 * end of cftype array.
++	 */
++	char name[MAX_CFTYPE_NAME];
++	int private;
++	/*
++	 * If not 0, file mode is set to this value, otherwise it will
++	 * be figured out automatically
++	 */
++	umode_t mode;
++
++	/*
++	 * The maximum length of string, excluding trailing nul, that can
++	 * be passed to write.  If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
++	 */
++	size_t max_write_len;
++
++	/* CFTYPE_* flags */
++	unsigned int flags;
++
++	/*
++	 * Fields used for internal bookkeeping.  Initialized automatically
++	 * during registration.
++	 */
++	struct cgroup_subsys *ss;	/* NULL for cgroup core files */
++	struct list_head node;		/* anchored at ss->cfts */
++	struct kernfs_ops *kf_ops;
++
++	/*
++	 * read_u64() is a shortcut for the common case of returning a
++	 * single integer. Use it in place of read()
++	 */
++	u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
++	/*
++	 * read_s64() is a signed version of read_u64()
++	 */
++	s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
++
++	/* generic seq_file read interface */
++	int (*seq_show)(struct seq_file *sf, void *v);
++
++	/* optional ops, implement all or none */
++	void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
++	void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
++	void (*seq_stop)(struct seq_file *sf, void *v);
++
++	/*
++	 * write_u64() is a shortcut for the common case of accepting
++	 * a single integer (as parsed by simple_strtoull) from
++	 * userspace. Use in place of write(); return 0 or error.
++	 */
++	int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
++			 u64 val);
++	/*
++	 * write_s64() is a signed version of write_u64()
++	 */
++	int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
++			 s64 val);
++
++	/*
++	 * write() is the generic write callback which maps directly to
++	 * kernfs write operation and overrides all other operations.
++	 * Maximum write size is determined by ->max_write_len.  Use
++	 * of_css/cft() to access the associated css and cft.
++	 */
++	ssize_t (*write)(struct kernfs_open_file *of,
++			 char *buf, size_t nbytes, loff_t off);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	struct lock_class_key	lockdep_key;
++#endif
++};
++
++/*
++ * Control Group subsystem type.
++ * See Documentation/cgroups/cgroups.txt for details
++ */
++struct cgroup_subsys {
++	struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
++	int (*css_online)(struct cgroup_subsys_state *css);
++	void (*css_offline)(struct cgroup_subsys_state *css);
++	void (*css_released)(struct cgroup_subsys_state *css);
++	void (*css_free)(struct cgroup_subsys_state *css);
++	void (*css_reset)(struct cgroup_subsys_state *css);
++	void (*css_e_css_changed)(struct cgroup_subsys_state *css);
++
++	int (*can_attach)(struct cgroup_subsys_state *css,
++			  struct cgroup_taskset *tset);
++	void (*cancel_attach)(struct cgroup_subsys_state *css,
++			      struct cgroup_taskset *tset);
++	void (*attach)(struct cgroup_subsys_state *css,
++		       struct cgroup_taskset *tset);
++	void (*fork)(struct task_struct *task);
++	void (*exit)(struct cgroup_subsys_state *css,
++		     struct cgroup_subsys_state *old_css,
++		     struct task_struct *task);
++	void (*bind)(struct cgroup_subsys_state *root_css);
++
++	int disabled;
++	int early_init;
++
++	/*
++	 * If %false, this subsystem is properly hierarchical -
++	 * configuration, resource accounting and restriction on a parent
++	 * cgroup cover those of its children.  If %true, hierarchy support
++	 * is broken in some ways - some subsystems ignore hierarchy
++	 * completely while others are only implemented half-way.
++	 *
++	 * It's now disallowed to create nested cgroups if the subsystem is
++	 * broken and cgroup core will emit a warning message on such
++	 * cases.  Eventually, all subsystems will be made properly
++	 * hierarchical and this will go away.
++	 */
++	bool broken_hierarchy;
++	bool warned_broken_hierarchy;
++
++	/* the following two fields are initialized automtically during boot */
++	int id;
++	const char *name;
++
++	/* link to parent, protected by cgroup_lock() */
++	struct cgroup_root *root;
++
++	/* idr for css->id */
++	struct idr css_idr;
++
++	/*
++	 * List of cftypes.  Each entry is the first entry of an array
++	 * terminated by zero length name.
++	 */
++	struct list_head cfts;
++
++	/*
++	 * Base cftypes which are automatically registered.  The two can
++	 * point to the same array.
++	 */
++	struct cftype *dfl_cftypes;	/* for the default hierarchy */
++	struct cftype *legacy_cftypes;	/* for the legacy hierarchies */
++
++	/*
++	 * A subsystem may depend on other subsystems.  When such subsystem
++	 * is enabled on a cgroup, the depended-upon subsystems are enabled
++	 * together if available.  Subsystems enabled due to dependency are
++	 * not visible to userland until explicitly enabled.  The following
++	 * specifies the mask of subsystems that this one depends on.
++	 */
++	unsigned int depends_on;
++};
++
++#endif	/* CONFIG_CGROUPS */
++#endif	/* _LINUX_CGROUP_DEFS_H */
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index b9cb94c3102a..96a2ecd5aa69 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -11,23 +11,16 @@
+ #include <linux/sched.h>
+ #include <linux/cpumask.h>
+ #include <linux/nodemask.h>
+-#include <linux/rcupdate.h>
+ #include <linux/rculist.h>
+ #include <linux/cgroupstats.h>
+ #include <linux/rwsem.h>
+-#include <linux/idr.h>
+-#include <linux/workqueue.h>
+ #include <linux/fs.h>
+-#include <linux/percpu-refcount.h>
+ #include <linux/seq_file.h>
+ #include <linux/kernfs.h>
+-#include <linux/wait.h>
+ 
+-#ifdef CONFIG_CGROUPS
++#include <linux/cgroup-defs.h>
+ 
+-struct cgroup_root;
+-struct cgroup_subsys;
+-struct cgroup;
++#ifdef CONFIG_CGROUPS
+ 
+ extern int cgroup_init_early(void);
+ extern int cgroup_init(void);
+@@ -40,66 +33,6 @@ extern int cgroupstats_build(struct cgroupstats *stats,
+ extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
+ 			    struct pid *pid, struct task_struct *tsk);
+ 
+-/* define the enumeration of all cgroup subsystems */
+-#define SUBSYS(_x) _x ## _cgrp_id,
+-enum cgroup_subsys_id {
+-#include <linux/cgroup_subsys.h>
+-	CGROUP_SUBSYS_COUNT,
+-};
+-#undef SUBSYS
+-
+-/*
+- * Per-subsystem/per-cgroup state maintained by the system.  This is the
+- * fundamental structural building block that controllers deal with.
+- *
+- * Fields marked with "PI:" are public and immutable and may be accessed
+- * directly without synchronization.
+- */
+-struct cgroup_subsys_state {
+-	/* PI: the cgroup that this css is attached to */
+-	struct cgroup *cgroup;
+-
+-	/* PI: the cgroup subsystem that this css is attached to */
+-	struct cgroup_subsys *ss;
+-
+-	/* reference count - access via css_[try]get() and css_put() */
+-	struct percpu_ref refcnt;
+-
+-	/* PI: the parent css */
+-	struct cgroup_subsys_state *parent;
+-
+-	/* siblings list anchored at the parent's ->children */
+-	struct list_head sibling;
+-	struct list_head children;
+-
+-	/*
+-	 * PI: Subsys-unique ID.  0 is unused and root is always 1.  The
+-	 * matching css can be looked up using css_from_id().
+-	 */
+-	int id;
+-
+-	unsigned int flags;
+-
+-	/*
+-	 * Monotonically increasing unique serial number which defines a
+-	 * uniform order among all csses.  It's guaranteed that all
+-	 * ->children lists are in the ascending order of ->serial_nr and
+-	 * used to allow interrupting and resuming iterations.
+-	 */
+-	u64 serial_nr;
+-
+-	/* percpu_ref killing and RCU release */
+-	struct rcu_head rcu_head;
+-	struct work_struct destroy_work;
+-};
+-
+-/* bits in struct cgroup_subsys_state flags field */
+-enum {
+-	CSS_NO_REF	= (1 << 0), /* no reference counting for this css */
+-	CSS_ONLINE	= (1 << 1), /* between ->css_online() and ->css_offline() */
+-	CSS_RELEASED	= (1 << 2), /* refcnt reached zero, released */
+-};
+-
+ /**
+  * css_get - obtain a reference on the specified css
+  * @css: target css
+@@ -185,307 +118,6 @@ static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
+ 		percpu_ref_put_many(&css->refcnt, n);
+ }
+ 
+-/* bits in struct cgroup flags field */
+-enum {
+-	/* Control Group requires release notifications to userspace */
+-	CGRP_NOTIFY_ON_RELEASE,
+-	/*
+-	 * Clone the parent's configuration when creating a new child
+-	 * cpuset cgroup.  For historical reasons, this option can be
+-	 * specified at mount time and thus is implemented here.
+-	 */
+-	CGRP_CPUSET_CLONE_CHILDREN,
+-};
+-
+-struct cgroup {
+-	/* self css with NULL ->ss, points back to this cgroup */
+-	struct cgroup_subsys_state self;
+-
+-	unsigned long flags;		/* "unsigned long" so bitops work */
+-
+-	/*
+-	 * idr allocated in-hierarchy ID.
+-	 *
+-	 * ID 0 is not used, the ID of the root cgroup is always 1, and a
+-	 * new cgroup will be assigned with a smallest available ID.
+-	 *
+-	 * Allocating/Removing ID must be protected by cgroup_mutex.
+-	 */
+-	int id;
+-
+-	/*
+-	 * If this cgroup contains any tasks, it contributes one to
+-	 * populated_cnt.  All children with non-zero popuplated_cnt of
+-	 * their own contribute one.  The count is zero iff there's no task
+-	 * in this cgroup or its subtree.
+-	 */
+-	int populated_cnt;
+-
+-	struct kernfs_node *kn;		/* cgroup kernfs entry */
+-	struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
+-
+-	/*
+-	 * The bitmask of subsystems enabled on the child cgroups.
+-	 * ->subtree_control is the one configured through
+-	 * "cgroup.subtree_control" while ->child_subsys_mask is the
+-	 * effective one which may have more subsystems enabled.
+-	 * Controller knobs are made available iff it's enabled in
+-	 * ->subtree_control.
+-	 */
+-	unsigned int subtree_control;
+-	unsigned int child_subsys_mask;
+-
+-	/* Private pointers for each registered subsystem */
+-	struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
+-
+-	struct cgroup_root *root;
+-
+-	/*
+-	 * List of cgrp_cset_links pointing at css_sets with tasks in this
+-	 * cgroup.  Protected by css_set_lock.
+-	 */
+-	struct list_head cset_links;
+-
+-	/*
+-	 * On the default hierarchy, a css_set for a cgroup with some
+-	 * susbsys disabled will point to css's which are associated with
+-	 * the closest ancestor which has the subsys enabled.  The
+-	 * following lists all css_sets which point to this cgroup's css
+-	 * for the given subsystem.
+-	 */
+-	struct list_head e_csets[CGROUP_SUBSYS_COUNT];
+-
+-	/*
+-	 * list of pidlists, up to two for each namespace (one for procs, one
+-	 * for tasks); created on demand.
+-	 */
+-	struct list_head pidlists;
+-	struct mutex pidlist_mutex;
+-
+-	/* used to wait for offlining of csses */
+-	wait_queue_head_t offline_waitq;
+-
+-	/* used to schedule release agent */
+-	struct work_struct release_agent_work;
+-};
+-
+-#define MAX_CGROUP_ROOT_NAMELEN 64
+-
+-/* cgroup_root->flags */
+-enum {
+-	CGRP_ROOT_SANE_BEHAVIOR	= (1 << 0), /* __DEVEL__sane_behavior specified */
+-	CGRP_ROOT_NOPREFIX	= (1 << 1), /* mounted subsystems have no named prefix */
+-	CGRP_ROOT_XATTR		= (1 << 2), /* supports extended attributes */
+-};
+-
+-/*
+- * A cgroup_root represents the root of a cgroup hierarchy, and may be
+- * associated with a kernfs_root to form an active hierarchy.  This is
+- * internal to cgroup core.  Don't access directly from controllers.
+- */
+-struct cgroup_root {
+-	struct kernfs_root *kf_root;
+-
+-	/* The bitmask of subsystems attached to this hierarchy */
+-	unsigned int subsys_mask;
+-
+-	/* Unique id for this hierarchy. */
+-	int hierarchy_id;
+-
+-	/* The root cgroup.  Root is destroyed on its release. */
+-	struct cgroup cgrp;
+-
+-	/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
+-	atomic_t nr_cgrps;
+-
+-	/* A list running through the active hierarchies */
+-	struct list_head root_list;
+-
+-	/* Hierarchy-specific flags */
+-	unsigned int flags;
+-
+-	/* IDs for cgroups in this hierarchy */
+-	struct idr cgroup_idr;
+-
+-	/* The path to use for release notifications. */
+-	char release_agent_path[PATH_MAX];
+-
+-	/* The name for this hierarchy - may be empty */
+-	char name[MAX_CGROUP_ROOT_NAMELEN];
+-};
+-
+-/*
+- * A css_set is a structure holding pointers to a set of
+- * cgroup_subsys_state objects. This saves space in the task struct
+- * object and speeds up fork()/exit(), since a single inc/dec and a
+- * list_add()/del() can bump the reference count on the entire cgroup
+- * set for a task.
+- */
+-
+-struct css_set {
+-
+-	/* Reference count */
+-	atomic_t refcount;
+-
+-	/*
+-	 * List running through all cgroup groups in the same hash
+-	 * slot. Protected by css_set_lock
+-	 */
+-	struct hlist_node hlist;
+-
+-	/*
+-	 * Lists running through all tasks using this cgroup group.
+-	 * mg_tasks lists tasks which belong to this cset but are in the
+-	 * process of being migrated out or in.  Protected by
+-	 * css_set_rwsem, but, during migration, once tasks are moved to
+-	 * mg_tasks, it can be read safely while holding cgroup_mutex.
+-	 */
+-	struct list_head tasks;
+-	struct list_head mg_tasks;
+-
+-	/*
+-	 * List of cgrp_cset_links pointing at cgroups referenced from this
+-	 * css_set.  Protected by css_set_lock.
+-	 */
+-	struct list_head cgrp_links;
+-
+-	/* the default cgroup associated with this css_set */
+-	struct cgroup *dfl_cgrp;
+-
+-	/*
+-	 * Set of subsystem states, one for each subsystem. This array is
+-	 * immutable after creation apart from the init_css_set during
+-	 * subsystem registration (at boot time).
+-	 */
+-	struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+-
+-	/*
+-	 * List of csets participating in the on-going migration either as
+-	 * source or destination.  Protected by cgroup_mutex.
+-	 */
+-	struct list_head mg_preload_node;
+-	struct list_head mg_node;
+-
+-	/*
+-	 * If this cset is acting as the source of migration the following
+-	 * two fields are set.  mg_src_cgrp is the source cgroup of the
+-	 * on-going migration and mg_dst_cset is the destination cset the
+-	 * target tasks on this cset should be migrated to.  Protected by
+-	 * cgroup_mutex.
+-	 */
+-	struct cgroup *mg_src_cgrp;
+-	struct css_set *mg_dst_cset;
+-
+-	/*
+-	 * On the default hierarhcy, ->subsys[ssid] may point to a css
+-	 * attached to an ancestor instead of the cgroup this css_set is
+-	 * associated with.  The following node is anchored at
+-	 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to
+-	 * iterate through all css's attached to a given cgroup.
+-	 */
+-	struct list_head e_cset_node[CGROUP_SUBSYS_COUNT];
+-
+-	/* For RCU-protected deletion */
+-	struct rcu_head rcu_head;
+-};
+-
+-/*
+- * struct cftype: handler definitions for cgroup control files
+- *
+- * When reading/writing to a file:
+- *	- the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
+- *	- the 'cftype' of the file is file->f_path.dentry->d_fsdata
+- */
+-
+-/* cftype->flags */
+-enum {
+-	CFTYPE_ONLY_ON_ROOT	= (1 << 0),	/* only create on root cgrp */
+-	CFTYPE_NOT_ON_ROOT	= (1 << 1),	/* don't create on root cgrp */
+-	CFTYPE_NO_PREFIX	= (1 << 3),	/* (DON'T USE FOR NEW FILES) no subsys prefix */
+-
+-	/* internal flags, do not use outside cgroup core proper */
+-	__CFTYPE_ONLY_ON_DFL	= (1 << 16),	/* only on default hierarchy */
+-	__CFTYPE_NOT_ON_DFL	= (1 << 17),	/* not on default hierarchy */
+-};
+-
+-#define MAX_CFTYPE_NAME		64
+-
+-struct cftype {
+-	/*
+-	 * By convention, the name should begin with the name of the
+-	 * subsystem, followed by a period.  Zero length string indicates
+-	 * end of cftype array.
+-	 */
+-	char name[MAX_CFTYPE_NAME];
+-	int private;
+-	/*
+-	 * If not 0, file mode is set to this value, otherwise it will
+-	 * be figured out automatically
+-	 */
+-	umode_t mode;
+-
+-	/*
+-	 * The maximum length of string, excluding trailing nul, that can
+-	 * be passed to write.  If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed.
+-	 */
+-	size_t max_write_len;
+-
+-	/* CFTYPE_* flags */
+-	unsigned int flags;
+-
+-	/*
+-	 * Fields used for internal bookkeeping.  Initialized automatically
+-	 * during registration.
+-	 */
+-	struct cgroup_subsys *ss;	/* NULL for cgroup core files */
+-	struct list_head node;		/* anchored at ss->cfts */
+-	struct kernfs_ops *kf_ops;
+-
+-	/*
+-	 * read_u64() is a shortcut for the common case of returning a
+-	 * single integer. Use it in place of read()
+-	 */
+-	u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
+-	/*
+-	 * read_s64() is a signed version of read_u64()
+-	 */
+-	s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
+-
+-	/* generic seq_file read interface */
+-	int (*seq_show)(struct seq_file *sf, void *v);
+-
+-	/* optional ops, implement all or none */
+-	void *(*seq_start)(struct seq_file *sf, loff_t *ppos);
+-	void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos);
+-	void (*seq_stop)(struct seq_file *sf, void *v);
+-
+-	/*
+-	 * write_u64() is a shortcut for the common case of accepting
+-	 * a single integer (as parsed by simple_strtoull) from
+-	 * userspace. Use in place of write(); return 0 or error.
+-	 */
+-	int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
+-			 u64 val);
+-	/*
+-	 * write_s64() is a signed version of write_u64()
+-	 */
+-	int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
+-			 s64 val);
+-
+-	/*
+-	 * write() is the generic write callback which maps directly to
+-	 * kernfs write operation and overrides all other operations.
+-	 * Maximum write size is determined by ->max_write_len.  Use
+-	 * of_css/cft() to access the associated css and cft.
+-	 */
+-	ssize_t (*write)(struct kernfs_open_file *of,
+-			 char *buf, size_t nbytes, loff_t off);
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-	struct lock_class_key	lockdep_key;
+-#endif
+-};
+-
+ extern struct cgroup_root cgrp_dfl_root;
+ extern struct css_set init_css_set;
+ 
+@@ -612,11 +244,6 @@ int cgroup_rm_cftypes(struct cftype *cfts);
+ 
+ bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
+ 
+-/*
+- * Control Group taskset, used to pass around set of tasks to cgroup_subsys
+- * methods.
+- */
+-struct cgroup_taskset;
+ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
+ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
+ 
+@@ -629,84 +256,6 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
+ 	for ((task) = cgroup_taskset_first((tset)); (task);		\
+ 	     (task) = cgroup_taskset_next((tset)))
+ 
+-/*
+- * Control Group subsystem type.
+- * See Documentation/cgroups/cgroups.txt for details
+- */
+-
+-struct cgroup_subsys {
+-	struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
+-	int (*css_online)(struct cgroup_subsys_state *css);
+-	void (*css_offline)(struct cgroup_subsys_state *css);
+-	void (*css_released)(struct cgroup_subsys_state *css);
+-	void (*css_free)(struct cgroup_subsys_state *css);
+-	void (*css_reset)(struct cgroup_subsys_state *css);
+-	void (*css_e_css_changed)(struct cgroup_subsys_state *css);
+-
+-	int (*can_attach)(struct cgroup_subsys_state *css,
+-			  struct cgroup_taskset *tset);
+-	void (*cancel_attach)(struct cgroup_subsys_state *css,
+-			      struct cgroup_taskset *tset);
+-	void (*attach)(struct cgroup_subsys_state *css,
+-		       struct cgroup_taskset *tset);
+-	void (*fork)(struct task_struct *task);
+-	void (*exit)(struct cgroup_subsys_state *css,
+-		     struct cgroup_subsys_state *old_css,
+-		     struct task_struct *task);
+-	void (*bind)(struct cgroup_subsys_state *root_css);
+-
+-	int disabled;
+-	int early_init;
+-
+-	/*
+-	 * If %false, this subsystem is properly hierarchical -
+-	 * configuration, resource accounting and restriction on a parent
+-	 * cgroup cover those of its children.  If %true, hierarchy support
+-	 * is broken in some ways - some subsystems ignore hierarchy
+-	 * completely while others are only implemented half-way.
+-	 *
+-	 * It's now disallowed to create nested cgroups if the subsystem is
+-	 * broken and cgroup core will emit a warning message on such
+-	 * cases.  Eventually, all subsystems will be made properly
+-	 * hierarchical and this will go away.
+-	 */
+-	bool broken_hierarchy;
+-	bool warned_broken_hierarchy;
+-
+-	/* the following two fields are initialized automtically during boot */
+-	int id;
+-#define MAX_CGROUP_TYPE_NAMELEN 32
+-	const char *name;
+-
+-	/* link to parent, protected by cgroup_lock() */
+-	struct cgroup_root *root;
+-
+-	/* idr for css->id */
+-	struct idr css_idr;
+-
+-	/*
+-	 * List of cftypes.  Each entry is the first entry of an array
+-	 * terminated by zero length name.
+-	 */
+-	struct list_head cfts;
+-
+-	/*
+-	 * Base cftypes which are automatically registered.  The two can
+-	 * point to the same array.
+-	 */
+-	struct cftype *dfl_cftypes;	/* for the default hierarchy */
+-	struct cftype *legacy_cftypes;	/* for the legacy hierarchies */
+-
+-	/*
+-	 * A subsystem may depend on other subsystems.  When such subsystem
+-	 * is enabled on a cgroup, the depended-upon subsystems are enabled
+-	 * together if available.  Subsystems enabled due to dependency are
+-	 * not visible to userland until explicitly enabled.  The following
+-	 * specifies the mask of subsystems that this one depends on.
+-	 */
+-	unsigned int depends_on;
+-};
+-
+ #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
+ #include <linux/cgroup_subsys.h>
+ #undef SUBSYS
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index 867722591be2..99728072e536 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -142,7 +142,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
+  */
+ #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
+ #define __trace_if(cond) \
+-	if (__builtin_constant_p((cond)) ? !!(cond) :			\
++	if (__builtin_constant_p(!!(cond)) ? !!(cond) :			\
+ 	({								\
+ 		int ______r;						\
+ 		static struct ftrace_branch_data			\
+diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
+index 251a2090a554..e0ee0b3000b2 100644
+--- a/include/linux/devpts_fs.h
++++ b/include/linux/devpts_fs.h
+@@ -19,6 +19,8 @@
+ 
+ int devpts_new_index(struct inode *ptmx_inode);
+ void devpts_kill_index(struct inode *ptmx_inode, int idx);
++void devpts_add_ref(struct inode *ptmx_inode);
++void devpts_del_ref(struct inode *ptmx_inode);
+ /* mknod in devpts */
+ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
+ 		void *priv);
+@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
+ /* Dummy stubs in the no-pty case */
+ static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
+ static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
++static inline void devpts_add_ref(struct inode *ptmx_inode) { }
++static inline void devpts_del_ref(struct inode *ptmx_inode) { }
+ static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
+ 		dev_t device, int index, void *priv)
+ {
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index e4b464983322..01c25923675b 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -29,6 +29,7 @@ struct ipv6_devconf {
+ 	__s32		max_desync_factor;
+ 	__s32		max_addresses;
+ 	__s32		accept_ra_defrtr;
++	__s32		accept_ra_min_hop_limit;
+ 	__s32		accept_ra_pinfo;
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	__s32		accept_ra_rtr_pref;
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 1f17abe23725..6633b0cd3fb9 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -203,6 +203,7 @@ struct sk_buff;
+ #else
+ #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
+ #endif
++extern int sysctl_max_skb_frags;
+ 
+ typedef struct skb_frag_struct skb_frag_t;
+ 
+diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
+index a5f7f3ecafa3..a6e1bca88cc6 100644
+--- a/include/linux/tracepoint.h
++++ b/include/linux/tracepoint.h
+@@ -14,8 +14,10 @@
+  * See the file COPYING for more details.
+  */
+ 
++#include <linux/smp.h>
+ #include <linux/errno.h>
+ #include <linux/types.h>
++#include <linux/cpumask.h>
+ #include <linux/rcupdate.h>
+ #include <linux/static_key.h>
+ 
+@@ -129,6 +131,9 @@ extern void syscall_unregfunc(void);
+ 		void *it_func;						\
+ 		void *__data;						\
+ 									\
++		if (!cpu_online(raw_smp_processor_id()))		\
++			return;						\
++									\
+ 		if (!(cond))						\
+ 			return;						\
+ 		prercu;							\
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index e830c3dff61a..7bb69c9c3c43 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -6,8 +6,8 @@
+ #include <linux/mutex.h>
+ #include <net/sock.h>
+ 
+-void unix_inflight(struct file *fp);
+-void unix_notinflight(struct file *fp);
++void unix_inflight(struct user_struct *user, struct file *fp);
++void unix_notinflight(struct user_struct *user, struct file *fp);
+ void unix_gc(void);
+ void wait_for_unix_gc(void);
+ struct sock *unix_get_socket(struct file *filp);
+diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
+index 5e192068e6cb..388dea4da083 100644
+--- a/include/net/ip6_route.h
++++ b/include/net/ip6_route.h
+@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
+ 
+ void ip6_route_input(struct sk_buff *skb);
+ 
+-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+-				   struct flowi6 *fl6);
++struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
++					 struct flowi6 *fl6, int flags);
++
++static inline struct dst_entry *ip6_route_output(struct net *net,
++						 const struct sock *sk,
++						 struct flowi6 *fl6)
++{
++	return ip6_route_output_flags(net, sk, fl6, 0);
++}
++
+ struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
+ 				   int flags);
+ 
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 54271ed0ed45..13f1a97f6b2b 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -59,6 +59,7 @@ struct fib_nh_exception {
+ 	struct rtable __rcu		*fnhe_rth_input;
+ 	struct rtable __rcu		*fnhe_rth_output;
+ 	unsigned long			fnhe_stamp;
++	struct rcu_head			rcu;
+ };
+ 
+ struct fnhe_hash_bucket {
+diff --git a/include/net/scm.h b/include/net/scm.h
+index 262532d111f5..59fa93c01d2a 100644
+--- a/include/net/scm.h
++++ b/include/net/scm.h
+@@ -21,6 +21,7 @@ struct scm_creds {
+ struct scm_fp_list {
+ 	short			count;
+ 	short			max;
++	struct user_struct	*user;
+ 	struct file		*fp[SCM_MAX_FD];
+ };
+ 
+diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
+index 0f4dc3768587..24c8d9d0d946 100644
+--- a/include/target/target_core_fabric.h
++++ b/include/target/target_core_fabric.h
+@@ -155,8 +155,8 @@ bool	transport_wait_for_tasks(struct se_cmd *);
+ int	transport_check_aborted_status(struct se_cmd *, int);
+ int	transport_send_check_condition_and_sense(struct se_cmd *,
+ 		sense_reason_t, int);
+-int	target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
+-int	target_put_sess_cmd(struct se_session *, struct se_cmd *);
++int	target_get_sess_cmd(struct se_cmd *, bool);
++int	target_put_sess_cmd(struct se_cmd *);
+ void	target_sess_cmd_list_set_waiting(struct se_session *);
+ void	target_wait_for_sess_cmds(struct se_session *);
+ 
+diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
+index 5efa54ae567c..80f3b74446a1 100644
+--- a/include/uapi/linux/ipv6.h
++++ b/include/uapi/linux/ipv6.h
+@@ -171,6 +171,8 @@ enum {
+ 	DEVCONF_USE_OPTIMISTIC,
+ 	DEVCONF_ACCEPT_RA_MTU,
+ 	DEVCONF_STABLE_SECRET,
++	DEVCONF_USE_OIF_ADDRS_ONLY,
++	DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT,
+ 	DEVCONF_MAX
+ };
+ 
+diff --git a/ipc/msgutil.c b/ipc/msgutil.c
+index 2b491590ebab..71f448e5e927 100644
+--- a/ipc/msgutil.c
++++ b/ipc/msgutil.c
+@@ -123,7 +123,7 @@ struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst)
+ 	size_t len = src->m_ts;
+ 	size_t alen;
+ 
+-	BUG_ON(dst == NULL);
++	WARN_ON(dst == NULL);
+ 	if (src->m_ts > dst->m_ts)
+ 		return ERR_PTR(-EINVAL);
+ 
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 499a8bd22fad..bbe5f62f2b12 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -155,9 +155,13 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
+ {
+ 	struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
+ 
++	/*
++	 * Callers of shm_lock() must validate the status of the returned ipc
++	 * object pointer (as returned by ipc_lock()), and error out as
++	 * appropriate.
++	 */
+ 	if (IS_ERR(ipcp))
+-		return (struct shmid_kernel *)ipcp;
+-
++		return (void *)ipcp;
+ 	return container_of(ipcp, struct shmid_kernel, shm_perm);
+ }
+ 
+@@ -183,19 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
+ }
+ 
+ 
+-/* This is called by fork, once for every shm attach. */
+-static void shm_open(struct vm_area_struct *vma)
++static int __shm_open(struct vm_area_struct *vma)
+ {
+ 	struct file *file = vma->vm_file;
+ 	struct shm_file_data *sfd = shm_file_data(file);
+ 	struct shmid_kernel *shp;
+ 
+ 	shp = shm_lock(sfd->ns, sfd->id);
+-	BUG_ON(IS_ERR(shp));
++
++	if (IS_ERR(shp))
++		return PTR_ERR(shp);
++
+ 	shp->shm_atim = get_seconds();
+ 	shp->shm_lprid = task_tgid_vnr(current);
+ 	shp->shm_nattch++;
+ 	shm_unlock(shp);
++	return 0;
++}
++
++/* This is called by fork, once for every shm attach. */
++static void shm_open(struct vm_area_struct *vma)
++{
++	int err = __shm_open(vma);
++	/*
++	 * We raced in the idr lookup or with shm_destroy().
++	 * Either way, the ID is busted.
++	 */
++	WARN_ON_ONCE(err);
+ }
+ 
+ /*
+@@ -258,7 +276,14 @@ static void shm_close(struct vm_area_struct *vma)
+ 	down_write(&shm_ids(ns).rwsem);
+ 	/* remove from the list of attaches of the shm segment */
+ 	shp = shm_lock(ns, sfd->id);
+-	BUG_ON(IS_ERR(shp));
++
++	/*
++	 * We raced in the idr lookup or with shm_destroy().
++	 * Either way, the ID is busted.
++	 */
++	if (WARN_ON_ONCE(IS_ERR(shp)))
++		goto done; /* no-op */
++
+ 	shp->shm_lprid = task_tgid_vnr(current);
+ 	shp->shm_dtim = get_seconds();
+ 	shp->shm_nattch--;
+@@ -266,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma)
+ 		shm_destroy(ns, shp);
+ 	else
+ 		shm_unlock(shp);
++done:
+ 	up_write(&shm_ids(ns).rwsem);
+ }
+ 
+@@ -387,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
+ 	struct shm_file_data *sfd = shm_file_data(file);
+ 	int ret;
+ 
++	/*
++	 * In case of remap_file_pages() emulation, the file can represent
++	 * removed IPC ID: propogate shm_lock() error to caller.
++	 */
++	ret =__shm_open(vma);
++	if (ret)
++		return ret;
++
+ 	ret = sfd->file->f_op->mmap(sfd->file, vma);
+-	if (ret != 0)
++	if (ret) {
++		shm_close(vma);
+ 		return ret;
++	}
+ 	sfd->vm_ops = vma->vm_ops;
+ #ifdef CONFIG_MMU
+-	BUG_ON(!sfd->vm_ops->fault);
++	WARN_ON(!sfd->vm_ops->fault);
+ #endif
+ 	vma->vm_ops = &shm_vm_ops;
+-	shm_open(vma);
+-
+-	return ret;
++	return 0;
+ }
+ 
+ static int shm_release(struct inode *ino, struct file *file)
+@@ -1192,7 +1226,6 @@ out_fput:
+ out_nattch:
+ 	down_write(&shm_ids(ns).rwsem);
+ 	shp = shm_lock(ns, shmid);
+-	BUG_ON(IS_ERR(shp));
+ 	shp->shm_nattch--;
+ 	if (shm_may_destroy(ns, shp))
+ 		shm_destroy(ns, shp);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 141d562064a7..6582410a71c7 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1944,7 +1944,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
+ 		/* adjust offset of jmps if necessary */
+ 		if (i < pos && i + insn->off + 1 > pos)
+ 			insn->off += delta;
+-		else if (i > pos && i + insn->off + 1 < pos)
++		else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
+ 			insn->off -= delta;
+ 	}
+ }
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 4d65b66ae60d..359da3abb004 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -4481,6 +4481,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
+ 	INIT_LIST_HEAD(&css->sibling);
+ 	INIT_LIST_HEAD(&css->children);
+ 	css->serial_nr = css_serial_nr_next++;
++	atomic_set(&css->online_cnt, 0);
+ 
+ 	if (cgroup_parent(cgrp)) {
+ 		css->parent = cgroup_css(cgroup_parent(cgrp), ss);
+@@ -4503,6 +4504,10 @@ static int online_css(struct cgroup_subsys_state *css)
+ 	if (!ret) {
+ 		css->flags |= CSS_ONLINE;
+ 		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
++
++		atomic_inc(&css->online_cnt);
++		if (css->parent)
++			atomic_inc(&css->parent->online_cnt);
+ 	}
+ 	return ret;
+ }
+@@ -4740,10 +4745,15 @@ static void css_killed_work_fn(struct work_struct *work)
+ 		container_of(work, struct cgroup_subsys_state, destroy_work);
+ 
+ 	mutex_lock(&cgroup_mutex);
+-	offline_css(css);
+-	mutex_unlock(&cgroup_mutex);
+ 
+-	css_put(css);
++	do {
++		offline_css(css);
++		css_put(css);
++		/* @css can't go away while we're holding cgroup_mutex */
++		css = css->parent;
++	} while (css && atomic_dec_and_test(&css->online_cnt));
++
++	mutex_unlock(&cgroup_mutex);
+ }
+ 
+ /* css kill confirmation processing requires process context, bounce */
+@@ -4752,8 +4762,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+ 	struct cgroup_subsys_state *css =
+ 		container_of(ref, struct cgroup_subsys_state, refcnt);
+ 
+-	INIT_WORK(&css->destroy_work, css_killed_work_fn);
+-	queue_work(cgroup_destroy_wq, &css->destroy_work);
++	if (atomic_dec_and_test(&css->online_cnt)) {
++		INIT_WORK(&css->destroy_work, css_killed_work_fn);
++		queue_work(cgroup_destroy_wq, &css->destroy_work);
++	}
+ }
+ 
+ /**
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 5c01664c26e2..6d631161705c 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -127,6 +127,11 @@ enum {
+  *
+  * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
+  *
++ * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
++ *
++ * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
++ *      sched-RCU for reads.
++ *
+  * WQ: wq->mutex protected.
+  *
+  * WR: wq->mutex protected for writes.  Sched-RCU protected for reads.
+@@ -247,8 +252,8 @@ struct workqueue_struct {
+ 	int			nr_drainers;	/* WQ: drain in progress */
+ 	int			saved_max_active; /* WQ: saved pwq max_active */
+ 
+-	struct workqueue_attrs	*unbound_attrs;	/* WQ: only for unbound wqs */
+-	struct pool_workqueue	*dfl_pwq;	/* WQ: only for unbound wqs */
++	struct workqueue_attrs	*unbound_attrs;	/* PW: only for unbound wqs */
++	struct pool_workqueue	*dfl_pwq;	/* PW: only for unbound wqs */
+ 
+ #ifdef CONFIG_SYSFS
+ 	struct wq_device	*wq_dev;	/* I: for sysfs interface */
+@@ -268,7 +273,7 @@ struct workqueue_struct {
+ 	/* hot fields used during command issue, aligned to cacheline */
+ 	unsigned int		flags ____cacheline_aligned; /* WQ: WQ_* flags */
+ 	struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
+-	struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */
++	struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
+ };
+ 
+ static struct kmem_cache *pwq_cache;
+@@ -347,6 +352,12 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+ 			   lockdep_is_held(&wq->mutex),			\
+ 			   "sched RCU or wq->mutex should be held")
+ 
++#define assert_rcu_or_wq_mutex_or_pool_mutex(wq)			\
++	rcu_lockdep_assert(rcu_read_lock_sched_held() ||		\
++			   lockdep_is_held(&wq->mutex) ||		\
++			   lockdep_is_held(&wq_pool_mutex),		\
++			   "sched RCU, wq->mutex or wq_pool_mutex should be held")
++
+ #define for_each_cpu_worker_pool(pool, cpu)				\
+ 	for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];		\
+ 	     (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
+@@ -551,7 +562,8 @@ static int worker_pool_assign_id(struct worker_pool *pool)
+  * @wq: the target workqueue
+  * @node: the node ID
+  *
+- * This must be called either with pwq_lock held or sched RCU read locked.
++ * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
++ * read locked.
+  * If the pwq needs to be used beyond the locking in effect, the caller is
+  * responsible for guaranteeing that the pwq stays online.
+  *
+@@ -560,7 +572,17 @@ static int worker_pool_assign_id(struct worker_pool *pool)
+ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
+ 						  int node)
+ {
+-	assert_rcu_or_wq_mutex(wq);
++	assert_rcu_or_wq_mutex_or_pool_mutex(wq);
++
++	/*
++	 * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
++	 * delayed item is pending.  The plan is to keep CPU -> NODE
++	 * mapping valid and stable across CPU on/offlines.  Once that
++	 * happens, this workaround can be removed.
++	 */
++	if (unlikely(node == NUMA_NO_NODE))
++		return wq->dfl_pwq;
++
+ 	return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
+ }
+ 
+@@ -1451,13 +1473,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
+ 	timer_stats_timer_set_start_info(&dwork->timer);
+ 
+ 	dwork->wq = wq;
+-	/* timer isn't guaranteed to run in this cpu, record earlier */
+-	if (cpu == WORK_CPU_UNBOUND)
+-		cpu = raw_smp_processor_id();
+ 	dwork->cpu = cpu;
+ 	timer->expires = jiffies + delay;
+ 
+-	add_timer_on(timer, cpu);
++	if (unlikely(cpu != WORK_CPU_UNBOUND))
++		add_timer_on(timer, cpu);
++	else
++		add_timer(timer);
+ }
+ 
+ /**
+@@ -3425,17 +3447,6 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
+ 	return pwq;
+ }
+ 
+-/* undo alloc_unbound_pwq(), used only in the error path */
+-static void free_unbound_pwq(struct pool_workqueue *pwq)
+-{
+-	lockdep_assert_held(&wq_pool_mutex);
+-
+-	if (pwq) {
+-		put_unbound_pool(pwq->pool);
+-		kmem_cache_free(pwq_cache, pwq);
+-	}
+-}
+-
+ /**
+  * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node
+  * @attrs: the wq_attrs of interest
+@@ -3488,6 +3499,7 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
+ {
+ 	struct pool_workqueue *old_pwq;
+ 
++	lockdep_assert_held(&wq_pool_mutex);
+ 	lockdep_assert_held(&wq->mutex);
+ 
+ 	/* link_pwq() can handle duplicate calls */
+@@ -3498,42 +3510,48 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
+ 	return old_pwq;
+ }
+ 
+-/**
+- * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
+- * @wq: the target workqueue
+- * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
+- *
+- * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
+- * machines, this function maps a separate pwq to each NUMA node with
+- * possibles CPUs in @attrs->cpumask so that work items are affine to the
+- * NUMA node it was issued on.  Older pwqs are released as in-flight work
+- * items finish.  Note that a work item which repeatedly requeues itself
+- * back-to-back will stay on its current pwq.
+- *
+- * Performs GFP_KERNEL allocations.
+- *
+- * Return: 0 on success and -errno on failure.
+- */
+-int apply_workqueue_attrs(struct workqueue_struct *wq,
+-			  const struct workqueue_attrs *attrs)
++/* context to store the prepared attrs & pwqs before applying */
++struct apply_wqattrs_ctx {
++	struct workqueue_struct	*wq;		/* target workqueue */
++	struct workqueue_attrs	*attrs;		/* attrs to apply */
++	struct pool_workqueue	*dfl_pwq;
++	struct pool_workqueue	*pwq_tbl[];
++};
++
++/* free the resources after success or abort */
++static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
+ {
++	if (ctx) {
++		int node;
++
++		for_each_node(node)
++			put_pwq_unlocked(ctx->pwq_tbl[node]);
++		put_pwq_unlocked(ctx->dfl_pwq);
++
++		free_workqueue_attrs(ctx->attrs);
++
++		kfree(ctx);
++	}
++}
++
++/* allocate the attrs and pwqs for later installation */
++static struct apply_wqattrs_ctx *
++apply_wqattrs_prepare(struct workqueue_struct *wq,
++		      const struct workqueue_attrs *attrs)
++{
++	struct apply_wqattrs_ctx *ctx;
+ 	struct workqueue_attrs *new_attrs, *tmp_attrs;
+-	struct pool_workqueue **pwq_tbl, *dfl_pwq;
+-	int node, ret;
++	int node;
+ 
+-	/* only unbound workqueues can change attributes */
+-	if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
+-		return -EINVAL;
++	lockdep_assert_held(&wq_pool_mutex);
+ 
+-	/* creating multiple pwqs breaks ordering guarantee */
+-	if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
+-		return -EINVAL;
++	ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]),
++		      GFP_KERNEL);
+ 
+-	pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL);
+ 	new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
+ 	tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
+-	if (!pwq_tbl || !new_attrs || !tmp_attrs)
+-		goto enomem;
++	if (!ctx || !new_attrs || !tmp_attrs)
++		goto out_free;
+ 
+ 	/* make a copy of @attrs and sanitize it */
+ 	copy_workqueue_attrs(new_attrs, attrs);
+@@ -3547,75 +3565,111 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
+ 	copy_workqueue_attrs(tmp_attrs, new_attrs);
+ 
+ 	/*
+-	 * CPUs should stay stable across pwq creations and installations.
+-	 * Pin CPUs, determine the target cpumask for each node and create
+-	 * pwqs accordingly.
+-	 */
+-	get_online_cpus();
+-
+-	mutex_lock(&wq_pool_mutex);
+-
+-	/*
+ 	 * If something goes wrong during CPU up/down, we'll fall back to
+ 	 * the default pwq covering whole @attrs->cpumask.  Always create
+ 	 * it even if we don't use it immediately.
+ 	 */
+-	dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
+-	if (!dfl_pwq)
+-		goto enomem_pwq;
++	ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
++	if (!ctx->dfl_pwq)
++		goto out_free;
+ 
+ 	for_each_node(node) {
+ 		if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) {
+-			pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
+-			if (!pwq_tbl[node])
+-				goto enomem_pwq;
++			ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
++			if (!ctx->pwq_tbl[node])
++				goto out_free;
+ 		} else {
+-			dfl_pwq->refcnt++;
+-			pwq_tbl[node] = dfl_pwq;
++			ctx->dfl_pwq->refcnt++;
++			ctx->pwq_tbl[node] = ctx->dfl_pwq;
+ 		}
+ 	}
+ 
+-	mutex_unlock(&wq_pool_mutex);
++	ctx->attrs = new_attrs;
++	ctx->wq = wq;
++	free_workqueue_attrs(tmp_attrs);
++	return ctx;
++
++out_free:
++	free_workqueue_attrs(tmp_attrs);
++	free_workqueue_attrs(new_attrs);
++	apply_wqattrs_cleanup(ctx);
++	return NULL;
++}
++
++/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
++static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
++{
++	int node;
+ 
+ 	/* all pwqs have been created successfully, let's install'em */
+-	mutex_lock(&wq->mutex);
++	mutex_lock(&ctx->wq->mutex);
+ 
+-	copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
++	copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
+ 
+ 	/* save the previous pwq and install the new one */
+ 	for_each_node(node)
+-		pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]);
++		ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
++							  ctx->pwq_tbl[node]);
+ 
+ 	/* @dfl_pwq might not have been used, ensure it's linked */
+-	link_pwq(dfl_pwq);
+-	swap(wq->dfl_pwq, dfl_pwq);
++	link_pwq(ctx->dfl_pwq);
++	swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
+ 
+-	mutex_unlock(&wq->mutex);
++	mutex_unlock(&ctx->wq->mutex);
++}
+ 
+-	/* put the old pwqs */
+-	for_each_node(node)
+-		put_pwq_unlocked(pwq_tbl[node]);
+-	put_pwq_unlocked(dfl_pwq);
++/**
++ * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
++ * @wq: the target workqueue
++ * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
++ *
++ * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
++ * machines, this function maps a separate pwq to each NUMA node with
++ * possibles CPUs in @attrs->cpumask so that work items are affine to the
++ * NUMA node it was issued on.  Older pwqs are released as in-flight work
++ * items finish.  Note that a work item which repeatedly requeues itself
++ * back-to-back will stay on its current pwq.
++ *
++ * Performs GFP_KERNEL allocations.
++ *
++ * Return: 0 on success and -errno on failure.
++ */
++int apply_workqueue_attrs(struct workqueue_struct *wq,
++			  const struct workqueue_attrs *attrs)
++{
++	struct apply_wqattrs_ctx *ctx;
++	int ret = -ENOMEM;
+ 
+-	put_online_cpus();
+-	ret = 0;
+-	/* fall through */
+-out_free:
+-	free_workqueue_attrs(tmp_attrs);
+-	free_workqueue_attrs(new_attrs);
+-	kfree(pwq_tbl);
+-	return ret;
++	/* only unbound workqueues can change attributes */
++	if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
++		return -EINVAL;
++
++	/* creating multiple pwqs breaks ordering guarantee */
++	if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
++		return -EINVAL;
++
++	/*
++	 * CPUs should stay stable across pwq creations and installations.
++	 * Pin CPUs, determine the target cpumask for each node and create
++	 * pwqs accordingly.
++	 */
++	get_online_cpus();
++	mutex_lock(&wq_pool_mutex);
++
++	ctx = apply_wqattrs_prepare(wq, attrs);
++
++	/* the ctx has been prepared successfully, let's commit it */
++	if (ctx) {
++		apply_wqattrs_commit(ctx);
++		ret = 0;
++	}
+ 
+-enomem_pwq:
+-	free_unbound_pwq(dfl_pwq);
+-	for_each_node(node)
+-		if (pwq_tbl && pwq_tbl[node] != dfl_pwq)
+-			free_unbound_pwq(pwq_tbl[node]);
+ 	mutex_unlock(&wq_pool_mutex);
+ 	put_online_cpus();
+-enomem:
+-	ret = -ENOMEM;
+-	goto out_free;
++
++	apply_wqattrs_cleanup(ctx);
++
++	return ret;
+ }
+ 
+ /**
+diff --git a/lib/klist.c b/lib/klist.c
+index 89b485a2a58d..2a072bfaeace 100644
+--- a/lib/klist.c
++++ b/lib/klist.c
+@@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
+ 			  struct klist_node *n)
+ {
+ 	i->i_klist = k;
+-	i->i_cur = n;
+-	if (n)
+-		kref_get(&n->n_ref);
++	i->i_cur = NULL;
++	if (n && kref_get_unless_zero(&n->n_ref))
++		i->i_cur = n;
+ }
+ EXPORT_SYMBOL_GPL(klist_iter_init_node);
+ 
+diff --git a/mm/mmap.c b/mm/mmap.c
+index b639fa2721d8..d30b8f8f02b1 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2654,12 +2654,29 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+ 	if (!vma || !(vma->vm_flags & VM_SHARED))
+ 		goto out;
+ 
+-	if (start < vma->vm_start || start + size > vma->vm_end)
++	if (start < vma->vm_start)
+ 		goto out;
+ 
+-	if (pgoff == linear_page_index(vma, start)) {
+-		ret = 0;
+-		goto out;
++	if (start + size > vma->vm_end) {
++		struct vm_area_struct *next;
++
++		for (next = vma->vm_next; next; next = next->vm_next) {
++			/* hole between vmas ? */
++			if (next->vm_start != next->vm_prev->vm_end)
++				goto out;
++
++			if (next->vm_file != vma->vm_file)
++				goto out;
++
++			if (next->vm_flags != vma->vm_flags)
++				goto out;
++
++			if (start + size <= next->vm_end)
++				break;
++		}
++
++		if (!next)
++			goto out;
+ 	}
+ 
+ 	prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
+@@ -2669,9 +2686,16 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+ 	flags &= MAP_NONBLOCK;
+ 	flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
+ 	if (vma->vm_flags & VM_LOCKED) {
++		struct vm_area_struct *tmp;
+ 		flags |= MAP_LOCKED;
++
+ 		/* drop PG_Mlocked flag for over-mapped range */
+-		munlock_vma_pages_range(vma, start, start + size);
++		for (tmp = vma; tmp->vm_start >= start + size;
++				tmp = tmp->vm_next) {
++			munlock_vma_pages_range(tmp,
++					max(tmp->vm_start, start),
++					min(tmp->vm_end, start + size));
++		}
+ 	}
+ 
+ 	file = get_file(vma->vm_file);
+diff --git a/net/bridge/br.c b/net/bridge/br.c
+index 02c24cf63c34..c72e01cf09d0 100644
+--- a/net/bridge/br.c
++++ b/net/bridge/br.c
+@@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
+ 	.notifier_call = br_device_event
+ };
+ 
++/* called with RTNL */
+ static int br_netdev_switch_event(struct notifier_block *unused,
+ 				  unsigned long event, void *ptr)
+ {
+@@ -130,7 +131,6 @@ static int br_netdev_switch_event(struct notifier_block *unused,
+ 	struct netdev_switch_notifier_fdb_info *fdb_info;
+ 	int err = NOTIFY_DONE;
+ 
+-	rtnl_lock();
+ 	p = br_port_get_rtnl(dev);
+ 	if (!p)
+ 		goto out;
+@@ -155,7 +155,6 @@ static int br_netdev_switch_event(struct notifier_block *unused,
+ 	}
+ 
+ out:
+-	rtnl_unlock();
+ 	return err;
+ }
+ 
+diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
+index 2c35c02a931e..3556791fdc6e 100644
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -113,7 +113,6 @@ ip:
+ 	case htons(ETH_P_IPV6): {
+ 		const struct ipv6hdr *iph;
+ 		struct ipv6hdr _iph;
+-		__be32 flow_label;
+ 
+ ipv6:
+ 		iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
+@@ -130,8 +129,9 @@ ipv6:
+ 		flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
+ 		flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
+ 
+-		flow_label = ip6_flowlabel(iph);
+-		if (flow_label) {
++		if (skb && ip6_flowlabel(iph)) {
++			__be32 flow_label = ip6_flowlabel(iph);
++
+ 			/* Awesome, IPv6 packet has a flow label so we can
+ 			 * use that to represent the ports without any
+ 			 * further dissection.
+@@ -233,6 +233,13 @@ ipv6:
+ 					return false;
+ 				proto = eth->h_proto;
+ 				nhoff += sizeof(*eth);
++
++				/* Cap headers that we access via pointers at the
++				 * end of the Ethernet header as our maximum alignment
++				 * at that point is only 2 bytes.
++				 */
++				if (NET_IP_ALIGN)
++					hlen = nhoff;
+ 			}
+ 			goto again;
+ 		}
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 8a1741b14302..dce0acb929f1 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+ 		*fplp = fpl;
+ 		fpl->count = 0;
+ 		fpl->max = SCM_MAX_FD;
++		fpl->user = NULL;
+ 	}
+ 	fpp = &fpl->fp[fpl->count];
+ 
+@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
+ 		*fpp++ = file;
+ 		fpl->count++;
+ 	}
++
++	if (!fpl->user)
++		fpl->user = get_uid(current_user());
++
+ 	return num;
+ }
+ 
+@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
+ 		scm->fp = NULL;
+ 		for (i=fpl->count-1; i>=0; i--)
+ 			fput(fpl->fp[i]);
++		free_uid(fpl->user);
+ 		kfree(fpl);
+ 	}
+ }
+@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
+ 		for (i = 0; i < fpl->count; i++)
+ 			get_file(fpl->fp[i]);
+ 		new_fpl->max = new_fpl->count;
++		new_fpl->user = get_uid(fpl->user);
+ 	}
+ 	return new_fpl;
+ }
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 2e5fcda16570..c9793c6c5005 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -79,6 +79,8 @@
+ 
+ struct kmem_cache *skbuff_head_cache __read_mostly;
+ static struct kmem_cache *skbuff_fclone_cache __read_mostly;
++int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
++EXPORT_SYMBOL(sysctl_max_skb_frags);
+ 
+ /**
+  *	skb_panic - private function for out-of-line support
+diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
+index 95b6139d710c..a6beb7b6ae55 100644
+--- a/net/core/sysctl_net_core.c
++++ b/net/core/sysctl_net_core.c
+@@ -26,6 +26,7 @@ static int zero = 0;
+ static int one = 1;
+ static int min_sndbuf = SOCK_MIN_SNDBUF;
+ static int min_rcvbuf = SOCK_MIN_RCVBUF;
++static int max_skb_frags = MAX_SKB_FRAGS;
+ 
+ static int net_msg_warn;	/* Unused, but still a sysctl */
+ 
+@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
+ 		.mode		= 0644,
+ 		.proc_handler	= proc_dointvec
+ 	},
++	{
++		.procname	= "max_skb_frags",
++		.data		= &sysctl_max_skb_frags,
++		.maxlen		= sizeof(int),
++		.mode		= 0644,
++		.proc_handler	= proc_dointvec_minmax,
++		.extra1		= &one,
++		.extra2		= &max_skb_frags,
++	},
+ 	{ }
+ };
+ 
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index 419d23c53ec7..280d46f947ea 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -1839,7 +1839,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	err = EINVAL;
++	err = -EINVAL;
+ 	if (!tb[NETCONFA_IFINDEX])
+ 		goto errout;
+ 
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 6ddde89996f4..b6c7bdea4853 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
+ 		switch (cmsg->cmsg_type) {
+ 		case IP_RETOPTS:
+ 			err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
++
++			/* Our caller is responsible for freeing ipc->opt */
+ 			err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
+ 					     err < 40 ? err : 40);
+ 			if (err)
+diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
+index 05ff44b758df..f6ee0d561aab 100644
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -745,8 +745,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			return err;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 	}
+diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
+index 561cd4b8fc6e..c77aac75759d 100644
+--- a/net/ipv4/raw.c
++++ b/net/ipv4/raw.c
+@@ -543,8 +543,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			goto out;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 	}
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index f45f2a12f37b..1d3cdb4d4ebc 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -125,6 +125,7 @@ static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;
+ static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;
+ static int ip_rt_min_advmss __read_mostly	= 256;
+ 
++static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
+ /*
+  *	Interface to generic destination cache.
+  */
+@@ -753,7 +754,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
+ 				struct fib_nh *nh = &FIB_RES_NH(res);
+ 
+ 				update_or_create_fnhe(nh, fl4->daddr, new_gw,
+-						      0, 0);
++						0, jiffies + ip_rt_gc_timeout);
+ 			}
+ 			if (kill_route)
+ 				rt->dst.obsolete = DST_OBSOLETE_KILL;
+@@ -1538,6 +1539,36 @@ static void ip_handle_martian_source(struct net_device *dev,
+ #endif
+ }
+ 
++static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
++{
++	struct fnhe_hash_bucket *hash;
++	struct fib_nh_exception *fnhe, __rcu **fnhe_p;
++	u32 hval = fnhe_hashfun(daddr);
++
++	spin_lock_bh(&fnhe_lock);
++
++	hash = rcu_dereference_protected(nh->nh_exceptions,
++					 lockdep_is_held(&fnhe_lock));
++	hash += hval;
++
++	fnhe_p = &hash->chain;
++	fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
++	while (fnhe) {
++		if (fnhe->fnhe_daddr == daddr) {
++			rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
++				fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
++			fnhe_flush_routes(fnhe);
++			kfree_rcu(fnhe, rcu);
++			break;
++		}
++		fnhe_p = &fnhe->fnhe_next;
++		fnhe = rcu_dereference_protected(fnhe->fnhe_next,
++						 lockdep_is_held(&fnhe_lock));
++	}
++
++	spin_unlock_bh(&fnhe_lock);
++}
++
+ /* called in rcu_read_lock() section */
+ static int __mkroute_input(struct sk_buff *skb,
+ 			   const struct fib_result *res,
+@@ -1592,11 +1623,20 @@ static int __mkroute_input(struct sk_buff *skb,
+ 
+ 	fnhe = find_exception(&FIB_RES_NH(*res), daddr);
+ 	if (do_cache) {
+-		if (fnhe)
++		if (fnhe) {
+ 			rth = rcu_dereference(fnhe->fnhe_rth_input);
+-		else
+-			rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
++			if (rth && rth->dst.expires &&
++			    time_after(jiffies, rth->dst.expires)) {
++				ip_del_fnhe(&FIB_RES_NH(*res), daddr);
++				fnhe = NULL;
++			} else {
++				goto rt_cache;
++			}
++		}
++
++		rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+ 
++rt_cache:
+ 		if (rt_cache_valid(rth)) {
+ 			skb_dst_set_noref(skb, &rth->dst);
+ 			goto out;
+@@ -1945,19 +1985,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
+ 		struct fib_nh *nh = &FIB_RES_NH(*res);
+ 
+ 		fnhe = find_exception(nh, fl4->daddr);
+-		if (fnhe)
++		if (fnhe) {
+ 			prth = &fnhe->fnhe_rth_output;
+-		else {
+-			if (unlikely(fl4->flowi4_flags &
+-				     FLOWI_FLAG_KNOWN_NH &&
+-				     !(nh->nh_gw &&
+-				       nh->nh_scope == RT_SCOPE_LINK))) {
+-				do_cache = false;
+-				goto add;
++			rth = rcu_dereference(*prth);
++			if (rth && rth->dst.expires &&
++			    time_after(jiffies, rth->dst.expires)) {
++				ip_del_fnhe(nh, fl4->daddr);
++				fnhe = NULL;
++			} else {
++				goto rt_cache;
+ 			}
+-			prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
+ 		}
++
++		if (unlikely(fl4->flowi4_flags &
++			     FLOWI_FLAG_KNOWN_NH &&
++			     !(nh->nh_gw &&
++			       nh->nh_scope == RT_SCOPE_LINK))) {
++			do_cache = false;
++			goto add;
++		}
++		prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
+ 		rth = rcu_dereference(*prth);
++
++rt_cache:
+ 		if (rt_cache_valid(rth)) {
+ 			dst_hold(&rth->dst);
+ 			return rth;
+@@ -2504,7 +2554,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
+ }
+ 
+ #ifdef CONFIG_SYSCTL
+-static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
+ static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
+ static int ip_rt_gc_min_interval __read_mostly	= HZ / 2;
+ static int ip_rt_gc_elasticity __read_mostly	= 8;
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index bb2ce74f6004..19d385a0f02d 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -279,6 +279,7 @@
+ 
+ #include <asm/uaccess.h>
+ #include <asm/ioctls.h>
++#include <asm/unaligned.h>
+ #include <net/busy_poll.h>
+ 
+ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
+@@ -921,7 +922,7 @@ new_segment:
+ 
+ 		i = skb_shinfo(skb)->nr_frags;
+ 		can_coalesce = skb_can_coalesce(skb, i, page, offset);
+-		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
++		if (!can_coalesce && i >= sysctl_max_skb_frags) {
+ 			tcp_mark_push(tp, skb);
+ 			goto new_segment;
+ 		}
+@@ -1187,7 +1188,7 @@ new_segment:
+ 
+ 			if (!skb_can_coalesce(skb, i, pfrag->page,
+ 					      pfrag->offset)) {
+-				if (i == MAX_SKB_FRAGS || !sg) {
++				if (i == sysctl_max_skb_frags || !sg) {
+ 					tcp_mark_push(tp, skb);
+ 					goto new_segment;
+ 				}
+@@ -2603,6 +2604,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
+ 	const struct inet_connection_sock *icsk = inet_csk(sk);
+ 	u32 now = tcp_time_stamp;
+ 	unsigned int start;
++	u64 rate64;
+ 	u32 rate;
+ 
+ 	memset(info, 0, sizeof(*info));
+@@ -2665,15 +2667,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
+ 	info->tcpi_total_retrans = tp->total_retrans;
+ 
+ 	rate = READ_ONCE(sk->sk_pacing_rate);
+-	info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
++	rate64 = rate != ~0U ? rate : ~0ULL;
++	put_unaligned(rate64, &info->tcpi_pacing_rate);
+ 
+ 	rate = READ_ONCE(sk->sk_max_pacing_rate);
+-	info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
++	rate64 = rate != ~0U ? rate : ~0ULL;
++	put_unaligned(rate64, &info->tcpi_max_pacing_rate);
+ 
+ 	do {
+ 		start = u64_stats_fetch_begin_irq(&tp->syncp);
+-		info->tcpi_bytes_acked = tp->bytes_acked;
+-		info->tcpi_bytes_received = tp->bytes_received;
++		put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
++		put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
+ 	} while (u64_stats_fetch_retry_irq(&tp->syncp, start));
+ }
+ EXPORT_SYMBOL_GPL(tcp_get_info);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index cd18c3d3251e..13b92d595138 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -705,7 +705,8 @@ release_sk1:
+    outside socket context is ugly, certainly. What can I do?
+  */
+ 
+-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
++static void tcp_v4_send_ack(struct net *net,
++			    struct sk_buff *skb, u32 seq, u32 ack,
+ 			    u32 win, u32 tsval, u32 tsecr, int oif,
+ 			    struct tcp_md5sig_key *key,
+ 			    int reply_flags, u8 tos)
+@@ -720,7 +721,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+ 			];
+ 	} rep;
+ 	struct ip_reply_arg arg;
+-	struct net *net = dev_net(skb_dst(skb)->dev);
+ 
+ 	memset(&rep.th, 0, sizeof(struct tcphdr));
+ 	memset(&arg, 0, sizeof(arg));
+@@ -782,7 +782,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ 	struct inet_timewait_sock *tw = inet_twsk(sk);
+ 	struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+ 
+-	tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
++	tcp_v4_send_ack(sock_net(sk), skb,
++			tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ 			tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+ 			tcp_time_stamp + tcptw->tw_ts_offset,
+ 			tcptw->tw_ts_recent,
+@@ -801,8 +802,10 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
+ 	/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ 	 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
+ 	 */
+-	tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
+-			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
++	u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
++					     tcp_sk(sk)->snd_nxt;
++
++	tcp_v4_send_ack(sock_net(sk), skb, seq,
+ 			tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
+ 			tcp_time_stamp,
+ 			req->ts_recent,
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 1b8c5ba7d5f7..a390174b96de 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -963,8 +963,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	if (msg->msg_controllen) {
+ 		err = ip_cmsg_send(sock_net(sk), msg, &ipc,
+ 				   sk->sk_family == AF_INET6);
+-		if (err)
++		if (unlikely(err)) {
++			kfree(ipc.opt);
+ 			return err;
++		}
+ 		if (ipc.opt)
+ 			free = 1;
+ 		connected = 0;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index f4795b0d6e6e..f555f4fc1d62 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -195,6 +195,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
+ 	.max_addresses		= IPV6_MAX_ADDRESSES,
+ 	.accept_ra_defrtr	= 1,
+ 	.accept_ra_from_local	= 0,
++	.accept_ra_min_hop_limit= 1,
+ 	.accept_ra_pinfo	= 1,
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	.accept_ra_rtr_pref	= 1,
+@@ -236,6 +237,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
+ 	.max_addresses		= IPV6_MAX_ADDRESSES,
+ 	.accept_ra_defrtr	= 1,
+ 	.accept_ra_from_local	= 0,
++	.accept_ra_min_hop_limit= 1,
+ 	.accept_ra_pinfo	= 1,
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	.accept_ra_rtr_pref	= 1,
+@@ -567,7 +569,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
+ 	if (err < 0)
+ 		goto errout;
+ 
+-	err = EINVAL;
++	err = -EINVAL;
+ 	if (!tb[NETCONFA_IFINDEX])
+ 		goto errout;
+ 
+@@ -3421,6 +3423,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
+ {
+ 	struct inet6_dev *idev = ifp->idev;
+ 	struct net_device *dev = idev->dev;
++	bool notify = false;
+ 
+ 	addrconf_join_solict(dev, &ifp->addr);
+ 
+@@ -3466,7 +3469,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
+ 			/* Because optimistic nodes can use this address,
+ 			 * notify listeners. If DAD fails, RTM_DELADDR is sent.
+ 			 */
+-			ipv6_ifa_notify(RTM_NEWADDR, ifp);
++			notify = true;
+ 		}
+ 	}
+ 
+@@ -3474,6 +3477,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
+ out:
+ 	spin_unlock(&ifp->lock);
+ 	read_unlock_bh(&idev->lock);
++	if (notify)
++		ipv6_ifa_notify(RTM_NEWADDR, ifp);
+ }
+ 
+ static void addrconf_dad_start(struct inet6_ifaddr *ifp)
+@@ -4565,6 +4570,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
+ 	array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
+ 	array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
+ 	array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
++	array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
+ 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
+ #ifdef CONFIG_IPV6_ROUTER_PREF
+ 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
+@@ -5458,6 +5464,13 @@ static struct addrconf_sysctl_table
+ 			.proc_handler	= proc_dointvec,
+ 		},
+ 		{
++			.procname	= "accept_ra_min_hop_limit",
++			.data		= &ipv6_devconf.accept_ra_min_hop_limit,
++			.maxlen		= sizeof(int),
++			.mode		= 0644,
++			.proc_handler	= proc_dointvec,
++		},
++		{
+ 			.procname	= "accept_ra_pinfo",
+ 			.data		= &ipv6_devconf.accept_ra_pinfo,
+ 			.maxlen		= sizeof(int),
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 13ca4cf5616f..8e6cb3f14326 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -162,6 +162,9 @@ ipv4_connected:
+ 	fl6.fl6_dport = inet->inet_dport;
+ 	fl6.fl6_sport = inet->inet_sport;
+ 
++	if (!fl6.flowi6_oif)
++		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
++
+ 	if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
+ 		fl6.flowi6_oif = np->mcast_oif;
+ 
+diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
+index d491125011c4..db939e4ac68a 100644
+--- a/net/ipv6/ip6_flowlabel.c
++++ b/net/ipv6/ip6_flowlabel.c
+@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
+ 		}
+ 		spin_lock_bh(&ip6_sk_fl_lock);
+ 		for (sflp = &np->ipv6_fl_list;
+-		     (sfl = rcu_dereference(*sflp)) != NULL;
++		     (sfl = rcu_dereference_protected(*sflp,
++						      lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
+ 		     sflp = &sfl->next) {
+ 			if (sfl->fl->label == freq.flr_label) {
+ 				if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
+ 					np->flow_label &= ~IPV6_FLOWLABEL_MASK;
+-				*sflp = rcu_dereference(sfl->next);
++				*sflp = sfl->next;
+ 				spin_unlock_bh(&ip6_sk_fl_lock);
+ 				fl_release(sfl->fl);
+ 				kfree_rcu(sfl, rcu);
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index f50228b0abe5..36b9ac48b8fb 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -885,6 +885,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
+ 	struct rt6_info *rt;
+ #endif
+ 	int err;
++	int flags = 0;
+ 
+ 	/* The correct way to handle this would be to do
+ 	 * ip6_route_get_saddr, and then ip6_route_output; however,
+@@ -916,10 +917,13 @@ static int ip6_dst_lookup_tail(struct sock *sk,
+ 			dst_release(*dst);
+ 			*dst = NULL;
+ 		}
++
++		if (fl6->flowi6_oif)
++			flags |= RT6_LOOKUP_F_IFACE;
+ 	}
+ 
+ 	if (!*dst)
+-		*dst = ip6_route_output(net, sk, fl6);
++		*dst = ip6_route_output_flags(net, sk, fl6, flags);
+ 
+ 	err = (*dst)->error;
+ 	if (err)
+diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
+index 96f153c0846b..abb0bdda759a 100644
+--- a/net/ipv6/ndisc.c
++++ b/net/ipv6/ndisc.c
+@@ -1225,18 +1225,16 @@ static void ndisc_router_discovery(struct sk_buff *skb)
+ 
+ 	if (rt)
+ 		rt6_set_expires(rt, jiffies + (HZ * lifetime));
+-	if (ra_msg->icmph.icmp6_hop_limit) {
+-		/* Only set hop_limit on the interface if it is higher than
+-		 * the current hop_limit.
+-		 */
+-		if (in6_dev->cnf.hop_limit < ra_msg->icmph.icmp6_hop_limit) {
++	if (in6_dev->cnf.accept_ra_min_hop_limit < 256 &&
++	    ra_msg->icmph.icmp6_hop_limit) {
++		if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) {
+ 			in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
++			if (rt)
++				dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
++					       ra_msg->icmph.icmp6_hop_limit);
+ 		} else {
+-			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than current\n");
++			ND_PRINTK(2, warn, "RA: Got route advertisement with lower hop_limit than minimum\n");
+ 		}
+-		if (rt)
+-			dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+-				       ra_msg->icmph.icmp6_hop_limit);
+ 	}
+ 
+ skip_defrtr:
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index f371fefa7fdc..fe70bd6a7516 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1030,11 +1030,9 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
+ 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
+ }
+ 
+-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+-				    struct flowi6 *fl6)
++struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
++					 struct flowi6 *fl6, int flags)
+ {
+-	int flags = 0;
+-
+ 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
+ 
+ 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
+@@ -1047,7 +1045,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
+ 
+ 	return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
+ }
+-EXPORT_SYMBOL(ip6_route_output);
++EXPORT_SYMBOL_GPL(ip6_route_output_flags);
+ 
+ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
+ {
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index 6daa52a18d40..123f6f9f854c 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -709,6 +709,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
+ 	if (!addr || addr->sa_family != AF_IUCV)
+ 		return -EINVAL;
+ 
++	if (addr_len < sizeof(struct sockaddr_iucv))
++		return -EINVAL;
++
+ 	lock_sock(sk);
+ 	if (sk->sk_state != IUCV_OPEN) {
+ 		err = -EBADFD;
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index 9e13c2ff8789..fe92a08b3cd5 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
+ 	ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
+ 				  NLM_F_ACK, tunnel, cmd);
+ 
+-	if (ret >= 0)
+-		return genlmsg_multicast_allns(family, msg, 0,	0, GFP_ATOMIC);
++	if (ret >= 0) {
++		ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++		/* We don't care if no one is listening */
++		if (ret == -ESRCH)
++			ret = 0;
++		return ret;
++	}
+ 
+ 	nlmsg_free(msg);
+ 
+@@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
+ 	ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
+ 				   NLM_F_ACK, session, cmd);
+ 
+-	if (ret >= 0)
+-		return genlmsg_multicast_allns(family, msg, 0,	0, GFP_ATOMIC);
++	if (ret >= 0) {
++		ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++		/* We don't care if no one is listening */
++		if (ret == -ESRCH)
++			ret = 0;
++		return ret;
++	}
+ 
+ 	nlmsg_free(msg);
+ 
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index e13c3c3ea4ac..9d134ab3351f 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -60,6 +60,8 @@
+ #include <net/inet_common.h>
+ #include <net/inet_ecn.h>
+ 
++#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
++
+ /* Global data structures. */
+ struct sctp_globals sctp_globals __read_mostly;
+ 
+@@ -1332,6 +1334,8 @@ static __init int sctp_init(void)
+ 	unsigned long limit;
+ 	int max_share;
+ 	int order;
++	int num_entries;
++	int max_entry_order;
+ 
+ 	sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
+ 
+@@ -1384,14 +1388,24 @@ static __init int sctp_init(void)
+ 
+ 	/* Size and allocate the association hash table.
+ 	 * The methodology is similar to that of the tcp hash tables.
++	 * Though not identical.  Start by getting a goal size
+ 	 */
+ 	if (totalram_pages >= (128 * 1024))
+ 		goal = totalram_pages >> (22 - PAGE_SHIFT);
+ 	else
+ 		goal = totalram_pages >> (24 - PAGE_SHIFT);
+ 
+-	for (order = 0; (1UL << order) < goal; order++)
+-		;
++	/* Then compute the page order for said goal */
++	order = get_order(goal);
++
++	/* Now compute the required page order for the maximum sized table we
++	 * want to create
++	 */
++	max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
++				    sizeof(struct sctp_bind_hashbucket));
++
++	/* Limit the page order by that maximum hash table size */
++	order = min(order, max_entry_order);
+ 
+ 	do {
+ 		sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
+@@ -1425,27 +1439,42 @@ static __init int sctp_init(void)
+ 		INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
+ 	}
+ 
+-	/* Allocate and initialize the SCTP port hash table.  */
++	/* Allocate and initialize the SCTP port hash table.
++	 * Note that order is initalized to start at the max sized
++	 * table we want to support.  If we can't get that many pages
++	 * reduce the order and try again
++	 */
+ 	do {
+-		sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
+-					sizeof(struct sctp_bind_hashbucket);
+-		if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
+-			continue;
+ 		sctp_port_hashtable = (struct sctp_bind_hashbucket *)
+ 			__get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
+ 	} while (!sctp_port_hashtable && --order > 0);
++
+ 	if (!sctp_port_hashtable) {
+ 		pr_err("Failed bind hash alloc\n");
+ 		status = -ENOMEM;
+ 		goto err_bhash_alloc;
+ 	}
++
++	/* Now compute the number of entries that will fit in the
++	 * port hash space we allocated
++	 */
++	num_entries = (1UL << order) * PAGE_SIZE /
++		      sizeof(struct sctp_bind_hashbucket);
++
++	/* And finish by rounding it down to the nearest power of two
++	 * this wastes some memory of course, but its needed because
++	 * the hash function operates based on the assumption that
++	 * that the number of entries is a power of two
++	 */
++	sctp_port_hashsize = rounddown_pow_of_two(num_entries);
++
+ 	for (i = 0; i < sctp_port_hashsize; i++) {
+ 		spin_lock_init(&sctp_port_hashtable[i].lock);
+ 		INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
+ 	}
+ 
+-	pr_info("Hash tables configured (established %d bind %d)\n",
+-		sctp_assoc_hashsize, sctp_port_hashsize);
++	pr_info("Hash tables configured (established %d bind %d/%d)\n",
++		sctp_assoc_hashsize, sctp_port_hashsize, num_entries);
+ 
+ 	sctp_sysctl_register();
+ 
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 76e6ec62cf92..3c5833058b03 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -5555,6 +5555,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ 	struct sctp_hmac_algo_param *hmacs;
+ 	__u16 data_len = 0;
+ 	u32 num_idents;
++	int i;
+ 
+ 	if (!ep->auth_enable)
+ 		return -EACCES;
+@@ -5572,8 +5573,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
+ 		return -EFAULT;
+ 	if (put_user(num_idents, &p->shmac_num_idents))
+ 		return -EFAULT;
+-	if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
+-		return -EFAULT;
++	for (i = 0; i < num_idents; i++) {
++		__u16 hmacid = ntohs(hmacs->hmac_ids[i]);
++
++		if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
++			return -EFAULT;
++	}
+ 	return 0;
+ }
+ 
+@@ -6653,6 +6658,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
+ 
+ 			if (cmsgs->srinfo->sinfo_flags &
+ 			    ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
++			      SCTP_SACK_IMMEDIATELY |
+ 			      SCTP_ABORT | SCTP_EOF))
+ 				return -EINVAL;
+ 			break;
+@@ -6676,6 +6682,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
+ 
+ 			if (cmsgs->sinfo->snd_flags &
+ 			    ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
++			      SCTP_SACK_IMMEDIATELY |
+ 			      SCTP_ABORT | SCTP_EOF))
+ 				return -EINVAL;
+ 			break;
+diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
+index 055453d48668..a8dbe8001e46 100644
+--- a/net/switchdev/switchdev.c
++++ b/net/switchdev/switchdev.c
+@@ -15,6 +15,7 @@
+ #include <linux/mutex.h>
+ #include <linux/notifier.h>
+ #include <linux/netdevice.h>
++#include <linux/rtnetlink.h>
+ #include <net/ip_fib.h>
+ #include <net/switchdev.h>
+ 
+@@ -64,7 +65,6 @@ int netdev_switch_port_stp_update(struct net_device *dev, u8 state)
+ }
+ EXPORT_SYMBOL_GPL(netdev_switch_port_stp_update);
+ 
+-static DEFINE_MUTEX(netdev_switch_mutex);
+ static RAW_NOTIFIER_HEAD(netdev_switch_notif_chain);
+ 
+ /**
+@@ -79,9 +79,9 @@ int register_netdev_switch_notifier(struct notifier_block *nb)
+ {
+ 	int err;
+ 
+-	mutex_lock(&netdev_switch_mutex);
++	rtnl_lock();
+ 	err = raw_notifier_chain_register(&netdev_switch_notif_chain, nb);
+-	mutex_unlock(&netdev_switch_mutex);
++	rtnl_unlock();
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(register_netdev_switch_notifier);
+@@ -97,9 +97,9 @@ int unregister_netdev_switch_notifier(struct notifier_block *nb)
+ {
+ 	int err;
+ 
+-	mutex_lock(&netdev_switch_mutex);
++	rtnl_lock();
+ 	err = raw_notifier_chain_unregister(&netdev_switch_notif_chain, nb);
+-	mutex_unlock(&netdev_switch_mutex);
++	rtnl_unlock();
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
+@@ -113,16 +113,17 @@ EXPORT_SYMBOL_GPL(unregister_netdev_switch_notifier);
+  *	Call all network notifier blocks. This should be called by driver
+  *	when it needs to propagate hardware event.
+  *	Return values are same as for atomic_notifier_call_chain().
++ *	rtnl_lock must be held.
+  */
+ int call_netdev_switch_notifiers(unsigned long val, struct net_device *dev,
+ 				 struct netdev_switch_notifier_info *info)
+ {
+ 	int err;
+ 
++	ASSERT_RTNL();
++
+ 	info->dev = dev;
+-	mutex_lock(&netdev_switch_mutex);
+ 	err = raw_notifier_call_chain(&netdev_switch_notif_chain, val, info);
+-	mutex_unlock(&netdev_switch_mutex);
+ 	return err;
+ }
+ EXPORT_SYMBOL_GPL(call_netdev_switch_notifiers);
+diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
+index 1c147c869c2e..948f316019d7 100644
+--- a/net/tipc/subscr.c
++++ b/net/tipc/subscr.c
+@@ -302,11 +302,10 @@ static void subscr_conn_msg_event(struct net *net, int conid,
+ 	struct tipc_net *tn = net_generic(net, tipc_net_id);
+ 
+ 	spin_lock_bh(&subscriber->lock);
+-	subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub);
+-	if (sub)
+-		tipc_nametbl_subscribe(sub);
+-	else
++	if (subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber, &sub))
+ 		tipc_conn_terminate(tn->topsrv, subscriber->conid);
++	else
++		tipc_nametbl_subscribe(sub);
+ 	spin_unlock_bh(&subscriber->lock);
+ }
+ 
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index cb3a01a9ed38..535a642a1688 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1464,7 +1464,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ 	UNIXCB(skb).fp = NULL;
+ 
+ 	for (i = scm->fp->count-1; i >= 0; i--)
+-		unix_notinflight(scm->fp->fp[i]);
++		unix_notinflight(scm->fp->user, scm->fp->fp[i]);
+ }
+ 
+ static void unix_destruct_scm(struct sk_buff *skb)
+@@ -1529,7 +1529,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ 		return -ENOMEM;
+ 
+ 	for (i = scm->fp->count - 1; i >= 0; i--)
+-		unix_inflight(scm->fp->fp[i]);
++		unix_inflight(scm->fp->user, scm->fp->fp[i]);
+ 	return max_level;
+ }
+ 
+@@ -1714,7 +1714,12 @@ restart_locked:
+ 			goto out_unlock;
+ 	}
+ 
+-	if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++	/* other == sk && unix_peer(other) != sk if
++	 * - unix_peer(sk) == NULL, destination address bound to sk
++	 * - unix_peer(sk) == sk by time of get but disconnected before lock
++	 */
++	if (other != sk &&
++	    unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ 		if (timeo) {
+ 			timeo = unix_wait_for_peer(other, timeo);
+ 
+@@ -2131,6 +2136,7 @@ again:
+ 
+ 			if (signal_pending(current)) {
+ 				err = sock_intr_errno(timeo);
++				scm_destroy(&scm);
+ 				goto out;
+ 			}
+ 
+diff --git a/net/unix/diag.c b/net/unix/diag.c
+index c512f64d5287..4d9679701a6d 100644
+--- a/net/unix/diag.c
++++ b/net/unix/diag.c
+@@ -220,7 +220,7 @@ done:
+ 	return skb->len;
+ }
+ 
+-static struct sock *unix_lookup_by_ino(int ino)
++static struct sock *unix_lookup_by_ino(unsigned int ino)
+ {
+ 	int i;
+ 	struct sock *sk;
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index 8fcdc2283af5..6a0d48525fcf 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
+  * descriptor if it is for an AF_UNIX socket.
+  */
+ 
+-void unix_inflight(struct file *fp)
++void unix_inflight(struct user_struct *user, struct file *fp)
+ {
+ 	struct sock *s = unix_get_socket(fp);
+ 
+@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
+ 		}
+ 		unix_tot_inflight++;
+ 	}
+-	fp->f_cred->user->unix_inflight++;
++	user->unix_inflight++;
+ 	spin_unlock(&unix_gc_lock);
+ }
+ 
+-void unix_notinflight(struct file *fp)
++void unix_notinflight(struct user_struct *user, struct file *fp)
+ {
+ 	struct sock *s = unix_get_socket(fp);
+ 
+@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
+ 			list_del_init(&u->link);
+ 		unix_tot_inflight--;
+ 	}
+-	fp->f_cred->user->unix_inflight--;
++	user->unix_inflight--;
+ 	spin_unlock(&unix_gc_lock);
+ }
+ 
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 582091498819..d6bc2b3af9ef 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -23,6 +23,7 @@
+ #include <linux/integrity.h>
+ #include <linux/evm.h>
+ #include <crypto/hash.h>
++#include <crypto/algapi.h>
+ #include "evm.h"
+ 
+ int evm_initialized;
+@@ -148,7 +149,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ 				   xattr_value_len, calc.digest);
+ 		if (rc)
+ 			break;
+-		rc = memcmp(xattr_data->digest, calc.digest,
++		rc = crypto_memneq(xattr_data->digest, calc.digest,
+ 			    sizeof(calc.digest));
+ 		if (rc)
+ 			rc = -EINVAL;
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index 75888dd38a7f..aa999e747c94 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -74,6 +74,18 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream);
+ static DEFINE_RWLOCK(snd_pcm_link_rwlock);
+ static DECLARE_RWSEM(snd_pcm_link_rwsem);
+ 
++/* Writer in rwsem may block readers even during its waiting in queue,
++ * and this may lead to a deadlock when the code path takes read sem
++ * twice (e.g. one in snd_pcm_action_nonatomic() and another in
++ * snd_pcm_stream_lock()).  As a (suboptimal) workaround, let writer to
++ * spin until it gets the lock.
++ */
++static inline void down_write_nonblock(struct rw_semaphore *lock)
++{
++	while (!down_write_trylock(lock))
++		cond_resched();
++}
++
+ /**
+  * snd_pcm_stream_lock - Lock the PCM stream
+  * @substream: PCM substream
+@@ -1816,7 +1828,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
+ 		res = -ENOMEM;
+ 		goto _nolock;
+ 	}
+-	down_write(&snd_pcm_link_rwsem);
++	down_write_nonblock(&snd_pcm_link_rwsem);
+ 	write_lock_irq(&snd_pcm_link_rwlock);
+ 	if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
+ 	    substream->runtime->status->state != substream1->runtime->status->state ||
+@@ -1863,7 +1875,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
+ 	struct snd_pcm_substream *s;
+ 	int res = 0;
+ 
+-	down_write(&snd_pcm_link_rwsem);
++	down_write_nonblock(&snd_pcm_link_rwsem);
+ 	write_lock_irq(&snd_pcm_link_rwlock);
+ 	if (!snd_pcm_stream_linked(substream)) {
+ 		res = -EALREADY;
+diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
+index 801076687bb1..c850345c43b5 100644
+--- a/sound/core/seq/seq_memory.c
++++ b/sound/core/seq/seq_memory.c
+@@ -383,15 +383,20 @@ int snd_seq_pool_init(struct snd_seq_pool *pool)
+ 
+ 	if (snd_BUG_ON(!pool))
+ 		return -EINVAL;
+-	if (pool->ptr)			/* should be atomic? */
+-		return 0;
+ 
+-	pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
+-	if (!pool->ptr)
++	cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
++	if (!cellptr)
+ 		return -ENOMEM;
+ 
+ 	/* add new cells to the free cell list */
+ 	spin_lock_irqsave(&pool->lock, flags);
++	if (pool->ptr) {
++		spin_unlock_irqrestore(&pool->lock, flags);
++		vfree(cellptr);
++		return 0;
++	}
++
++	pool->ptr = cellptr;
+ 	pool->free = NULL;
+ 
+ 	for (cell = 0; cell < pool->size; cell++) {
+diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
+index 921fb2bd8fad..fe686ee41c6d 100644
+--- a/sound/core/seq/seq_ports.c
++++ b/sound/core/seq/seq_ports.c
+@@ -535,19 +535,22 @@ static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+ 					bool is_src, bool ack)
+ {
+ 	struct snd_seq_port_subs_info *grp;
++	struct list_head *list;
++	bool empty;
+ 
+ 	grp = is_src ? &port->c_src : &port->c_dest;
++	list = is_src ? &subs->src_list : &subs->dest_list;
+ 	down_write(&grp->list_mutex);
+ 	write_lock_irq(&grp->list_lock);
+-	if (is_src)
+-		list_del(&subs->src_list);
+-	else
+-		list_del(&subs->dest_list);
++	empty = list_empty(list);
++	if (!empty)
++		list_del_init(list);
+ 	grp->exclusive = 0;
+ 	write_unlock_irq(&grp->list_lock);
+ 	up_write(&grp->list_mutex);
+ 
+-	unsubscribe_port(client, port, grp, &subs->info, ack);
++	if (!empty)
++		unsubscribe_port(client, port, grp, &subs->info, ack);
+ }
+ 
+ /* connect two ports */
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 00e8c5f4de17..bf48e71f73cd 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -422,7 +422,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
+ 	spin_lock_irqsave(&timer->lock, flags);
+ 	list_for_each_entry(ts, &ti->slave_active_head, active_list)
+ 		if (ts->ccallback)
+-			ts->ccallback(ti, event + 100, &tstamp, resolution);
++			ts->ccallback(ts, event + 100, &tstamp, resolution);
+ 	spin_unlock_irqrestore(&timer->lock, flags);
+ }
+ 
+@@ -518,9 +518,13 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
+ 			spin_unlock_irqrestore(&slave_active_lock, flags);
+ 			return -EBUSY;
+ 		}
++		if (timeri->timer)
++			spin_lock(&timeri->timer->lock);
+ 		timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+ 		list_del_init(&timeri->ack_list);
+ 		list_del_init(&timeri->active_list);
++		if (timeri->timer)
++			spin_unlock(&timeri->timer->lock);
+ 		spin_unlock_irqrestore(&slave_active_lock, flags);
+ 		goto __end;
+ 	}
+@@ -1920,6 +1924,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ {
+ 	struct snd_timer_user *tu;
+ 	long result = 0, unit;
++	int qhead;
+ 	int err = 0;
+ 
+ 	tu = file->private_data;
+@@ -1931,7 +1936,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ 
+ 			if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
+ 				err = -EAGAIN;
+-				break;
++				goto _error;
+ 			}
+ 
+ 			set_current_state(TASK_INTERRUPTIBLE);
+@@ -1946,42 +1951,37 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ 
+ 			if (tu->disconnected) {
+ 				err = -ENODEV;
+-				break;
++				goto _error;
+ 			}
+ 			if (signal_pending(current)) {
+ 				err = -ERESTARTSYS;
+-				break;
++				goto _error;
+ 			}
+ 		}
+ 
++		qhead = tu->qhead++;
++		tu->qhead %= tu->queue_size;
+ 		spin_unlock_irq(&tu->qlock);
+-		if (err < 0)
+-			goto _error;
+ 
+ 		if (tu->tread) {
+-			if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
+-					 sizeof(struct snd_timer_tread))) {
++			if (copy_to_user(buffer, &tu->tqueue[qhead],
++					 sizeof(struct snd_timer_tread)))
+ 				err = -EFAULT;
+-				goto _error;
+-			}
+ 		} else {
+-			if (copy_to_user(buffer, &tu->queue[tu->qhead++],
+-					 sizeof(struct snd_timer_read))) {
++			if (copy_to_user(buffer, &tu->queue[qhead],
++					 sizeof(struct snd_timer_read)))
+ 				err = -EFAULT;
+-				goto _error;
+-			}
+ 		}
+ 
+-		tu->qhead %= tu->queue_size;
+-
+-		result += unit;
+-		buffer += unit;
+-
+ 		spin_lock_irq(&tu->qlock);
+ 		tu->qused--;
++		if (err < 0)
++			goto _error;
++		result += unit;
++		buffer += unit;
+ 	}
+-	spin_unlock_irq(&tu->qlock);
+  _error:
++	spin_unlock_irq(&tu->qlock);
+ 	return result > 0 ? result : err;
+ }
+ 
+diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
+index 96592d5ba7bf..c5d5217a4180 100644
+--- a/sound/drivers/dummy.c
++++ b/sound/drivers/dummy.c
+@@ -87,7 +87,7 @@ MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver.");
+ module_param(fake_buffer, bool, 0444);
+ MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
+ #ifdef CONFIG_HIGH_RES_TIMERS
+-module_param(hrtimer, bool, 0444);
++module_param(hrtimer, bool, 0644);
+ MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
+ #endif
+ 
+@@ -109,6 +109,9 @@ struct dummy_timer_ops {
+ 	snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
+ };
+ 
++#define get_dummy_ops(substream) \
++	(*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
++
+ struct dummy_model {
+ 	const char *name;
+ 	int (*playback_constraints)(struct snd_pcm_runtime *runtime);
+@@ -137,7 +140,6 @@ struct snd_dummy {
+ 	int iobox;
+ 	struct snd_kcontrol *cd_volume_ctl;
+ 	struct snd_kcontrol *cd_switch_ctl;
+-	const struct dummy_timer_ops *timer_ops;
+ };
+ 
+ /*
+@@ -231,6 +233,8 @@ struct dummy_model *dummy_models[] = {
+  */
+ 
+ struct dummy_systimer_pcm {
++	/* ops must be the first item */
++	const struct dummy_timer_ops *timer_ops;
+ 	spinlock_t lock;
+ 	struct timer_list timer;
+ 	unsigned long base_time;
+@@ -366,6 +370,8 @@ static struct dummy_timer_ops dummy_systimer_ops = {
+  */
+ 
+ struct dummy_hrtimer_pcm {
++	/* ops must be the first item */
++	const struct dummy_timer_ops *timer_ops;
+ 	ktime_t base_time;
+ 	ktime_t period_time;
+ 	atomic_t running;
+@@ -492,31 +498,25 @@ static struct dummy_timer_ops dummy_hrtimer_ops = {
+ 
+ static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+ {
+-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-
+ 	switch (cmd) {
+ 	case SNDRV_PCM_TRIGGER_START:
+ 	case SNDRV_PCM_TRIGGER_RESUME:
+-		return dummy->timer_ops->start(substream);
++		return get_dummy_ops(substream)->start(substream);
+ 	case SNDRV_PCM_TRIGGER_STOP:
+ 	case SNDRV_PCM_TRIGGER_SUSPEND:
+-		return dummy->timer_ops->stop(substream);
++		return get_dummy_ops(substream)->stop(substream);
+ 	}
+ 	return -EINVAL;
+ }
+ 
+ static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
+ {
+-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-
+-	return dummy->timer_ops->prepare(substream);
++	return get_dummy_ops(substream)->prepare(substream);
+ }
+ 
+ static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
+ {
+-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-
+-	return dummy->timer_ops->pointer(substream);
++	return get_dummy_ops(substream)->pointer(substream);
+ }
+ 
+ static struct snd_pcm_hardware dummy_pcm_hardware = {
+@@ -562,17 +562,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
+ 	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+ 	struct dummy_model *model = dummy->model;
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
++	const struct dummy_timer_ops *ops;
+ 	int err;
+ 
+-	dummy->timer_ops = &dummy_systimer_ops;
++	ops = &dummy_systimer_ops;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ 	if (hrtimer)
+-		dummy->timer_ops = &dummy_hrtimer_ops;
++		ops = &dummy_hrtimer_ops;
+ #endif
+ 
+-	err = dummy->timer_ops->create(substream);
++	err = ops->create(substream);
+ 	if (err < 0)
+ 		return err;
++	get_dummy_ops(substream) = ops;
+ 
+ 	runtime->hw = dummy->pcm_hw;
+ 	if (substream->pcm->device & 1) {
+@@ -594,7 +596,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
+ 			err = model->capture_constraints(substream->runtime);
+ 	}
+ 	if (err < 0) {
+-		dummy->timer_ops->free(substream);
++		get_dummy_ops(substream)->free(substream);
+ 		return err;
+ 	}
+ 	return 0;
+@@ -602,8 +604,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
+ 
+ static int dummy_pcm_close(struct snd_pcm_substream *substream)
+ {
+-	struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
+-	dummy->timer_ops->free(substream);
++	get_dummy_ops(substream)->free(substream);
+ 	return 0;
+ }
+ 
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index 5bc7f2e2715c..194627c6c42b 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -3998,9 +3998,9 @@ static void pin_power_callback(struct hda_codec *codec,
+ 			       struct hda_jack_callback *jack,
+ 			       bool on)
+ {
+-	if (jack && jack->tbl->nid)
++	if (jack && jack->nid)
+ 		sync_power_state_change(codec,
+-					set_pin_power_jack(codec, jack->tbl->nid, on));
++					set_pin_power_jack(codec, jack->nid, on));
+ }
+ 
+ /* callback only doing power up -- called at first */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 09920ba55ba1..69093ce34231 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1976,10 +1976,10 @@ static void azx_remove(struct pci_dev *pci)
+ 	struct hda_intel *hda;
+ 
+ 	if (card) {
+-		/* flush the pending probing work */
++		/* cancel the pending probing work */
+ 		chip = card->private_data;
+ 		hda = container_of(chip, struct hda_intel, chip);
+-		flush_work(&hda->probe_work);
++		cancel_work_sync(&hda->probe_work);
+ 
+ 		snd_card_free(card);
+ 	}
+diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
+index d7cfe7b8c32b..52cc36758dd4 100644
+--- a/sound/pci/hda/hda_jack.c
++++ b/sound/pci/hda/hda_jack.c
+@@ -259,7 +259,7 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
+ 		if (!callback)
+ 			return ERR_PTR(-ENOMEM);
+ 		callback->func = func;
+-		callback->tbl = jack;
++		callback->nid = jack->nid;
+ 		callback->next = jack->callback;
+ 		jack->callback = callback;
+ 	}
+diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
+index b279e327a23b..a13c11c3ddbb 100644
+--- a/sound/pci/hda/hda_jack.h
++++ b/sound/pci/hda/hda_jack.h
+@@ -21,7 +21,7 @@ struct hda_jack_callback;
+ typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
+ 
+ struct hda_jack_callback {
+-	struct hda_jack_tbl *tbl;
++	hda_nid_t nid;
+ 	hda_jack_callback_fn func;
+ 	unsigned int private_data;	/* arbitrary data */
+ 	struct hda_jack_callback *next;
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 4a4e7b282e4f..0374bd5b61c8 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -4401,13 +4401,16 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
+ static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
+ {
+ 	struct ca0132_spec *spec = codec->spec;
++	struct hda_jack_tbl *tbl;
+ 
+ 	/* Delay enabling the HP amp, to let the mic-detection
+ 	 * state machine run.
+ 	 */
+ 	cancel_delayed_work_sync(&spec->unsol_hp_work);
+ 	schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
+-	cb->tbl->block_report = 1;
++	tbl = snd_hda_jack_tbl_get(codec, cb->nid);
++	if (tbl)
++		tbl->block_report = 1;
+ }
+ 
+ static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index d02eccd51f6e..51d519554744 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -433,7 +433,8 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
+ 	eld = &per_pin->sink_eld;
+ 
+ 	mutex_lock(&per_pin->lock);
+-	if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
++	if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
++	    eld->eld_size > ELD_MAX_SIZE) {
+ 		mutex_unlock(&per_pin->lock);
+ 		snd_BUG();
+ 		return -EINVAL;
+@@ -1178,7 +1179,7 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
+ static void jack_callback(struct hda_codec *codec,
+ 			  struct hda_jack_callback *jack)
+ {
+-	check_presence_and_report(codec, jack->tbl->nid);
++	check_presence_and_report(codec, jack->nid);
+ }
+ 
+ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 8189f02f8446..df34c78a6ced 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -277,7 +277,7 @@ static void alc_update_knob_master(struct hda_codec *codec,
+ 	uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
+ 	if (!uctl)
+ 		return;
+-	val = snd_hda_codec_read(codec, jack->tbl->nid, 0,
++	val = snd_hda_codec_read(codec, jack->nid, 0,
+ 				 AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
+ 	val &= HDA_AMP_VOLMASK;
+ 	uctl->value.integer.value[0] = val;
+@@ -1792,7 +1792,6 @@ enum {
+ 	ALC882_FIXUP_NO_PRIMARY_HP,
+ 	ALC887_FIXUP_ASUS_BASS,
+ 	ALC887_FIXUP_BASS_CHMAP,
+-	ALC882_FIXUP_DISABLE_AAMIX,
+ };
+ 
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -1954,8 +1953,6 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
+ 
+ static void alc_fixup_bass_chmap(struct hda_codec *codec,
+ 				 const struct hda_fixup *fix, int action);
+-static void alc_fixup_disable_aamix(struct hda_codec *codec,
+-				    const struct hda_fixup *fix, int action);
+ 
+ static const struct hda_fixup alc882_fixups[] = {
+ 	[ALC882_FIXUP_ABIT_AW9D_MAX] = {
+@@ -2193,10 +2190,6 @@ static const struct hda_fixup alc882_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_bass_chmap,
+ 	},
+-	[ALC882_FIXUP_DISABLE_AAMIX] = {
+-		.type = HDA_FIXUP_FUNC,
+-		.v.func = alc_fixup_disable_aamix,
+-	},
+ };
+ 
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+@@ -2235,6 +2228,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ 	SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+ 	SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
++	SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
+ 
+ 	/* All Apple entries are in codec SSIDs */
+ 	SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
+@@ -2264,7 +2258,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 8e7d4c087a7a..840178a26a6b 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -493,9 +493,9 @@ static void jack_update_power(struct hda_codec *codec,
+ 	if (!spec->num_pwrs)
+ 		return;
+ 
+-	if (jack && jack->tbl->nid) {
+-		stac_toggle_power_map(codec, jack->tbl->nid,
+-				      snd_hda_jack_detect(codec, jack->tbl->nid),
++	if (jack && jack->nid) {
++		stac_toggle_power_map(codec, jack->nid,
++				      snd_hda_jack_detect(codec, jack->nid),
+ 				      true);
+ 		return;
+ 	}
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index 2ee44abd56a6..6cbd03a5e53d 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -487,7 +487,7 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
+ 
+ 	/* IN1/IN2 Control */
+ 	SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1,
+-		RT5645_BST_SFT1, 8, 0, bst_tlv),
++		RT5645_BST_SFT1, 12, 0, bst_tlv),
+ 	SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL,
+ 		RT5645_BST_SFT2, 8, 0, bst_tlv),
+ 
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 35fe58f4fa86..52fe7eb2dea1 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1661,7 +1661,8 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
+ 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
+ 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
+ 		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
+-		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
++		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
++		    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
+ 			continue;
+ 
+ 		dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index bec63e0d2605..f059326a4914 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -2451,7 +2451,6 @@ int snd_usbmidi_create(struct snd_card *card,
+ 	else
+ 		err = snd_usbmidi_create_endpoints(umidi, endpoints);
+ 	if (err < 0) {
+-		snd_usbmidi_free(umidi);
+ 		return err;
+ 	}
+ 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-02-16 15:28 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-02-16 15:28 UTC (permalink / raw
  To: gentoo-commits

commit:     85527ab7ede917abc023e0e77dabb8500b8b3cf1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Feb 16 15:28:51 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Feb 16 15:28:51 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=85527ab7

Linux patch 4.1.18

 0000_README             |     4 +
 1017_linux-4.1.18.patch | 11807 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11811 insertions(+)

diff --git a/0000_README b/0000_README
index 8b9fa0f..ed66531 100644
--- a/0000_README
+++ b/0000_README
@@ -111,6 +111,10 @@ Patch:  1016_linux-4.1.17.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.17
 
+Patch:  1017_linux-4.1.18.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.18
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1017_linux-4.1.18.patch b/1017_linux-4.1.18.patch
new file mode 100644
index 0000000..ef682b7
--- /dev/null
+++ b/1017_linux-4.1.18.patch
@@ -0,0 +1,11807 @@
+diff --git a/Makefile b/Makefile
+index d398dd440bc9..001375cfd815 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 17
++SUBLEVEL = 18
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
+index 0c12ffb155a2..f775d7161ffb 100644
+--- a/arch/arm/Kconfig.debug
++++ b/arch/arm/Kconfig.debug
+@@ -161,10 +161,9 @@ choice
+ 		  mobile SoCs in the Kona family of chips (e.g. bcm28155,
+ 		  bcm11351, etc...)
+ 
+-	config DEBUG_BCM63XX
++	config DEBUG_BCM63XX_UART
+ 		bool "Kernel low-level debugging on BCM63XX UART"
+ 		depends on ARCH_BCM_63XX
+-		select DEBUG_UART_BCM63XX
+ 
+ 	config DEBUG_BERLIN_UART
+ 		bool "Marvell Berlin SoC Debug UART"
+@@ -1304,7 +1303,7 @@ config DEBUG_LL_INCLUDE
+ 	default "debug/vf.S" if DEBUG_VF_UART
+ 	default "debug/vt8500.S" if DEBUG_VT8500_UART0
+ 	default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
+-	default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX
++	default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
+ 	default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
+ 	default "mach/debug-macro.S"
+ 
+@@ -1320,10 +1319,6 @@ config DEBUG_UART_8250
+ 		ARCH_IOP33X || ARCH_IXP4XX || \
+ 		ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
+ 
+-# Compatibility options for BCM63xx
+-config DEBUG_UART_BCM63XX
+-	def_bool ARCH_BCM_63XX
+-
+ config DEBUG_UART_PHYS
+ 	hex "Physical base address of debug UART"
+ 	default 0x00100a00 if DEBUG_NETX_UART
+@@ -1415,7 +1410,7 @@ config DEBUG_UART_PHYS
+ 	default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
+ 	default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
+ 	default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
+-	default 0xfffe8600 if DEBUG_UART_BCM63XX
++	default 0xfffe8600 if DEBUG_BCM63XX_UART
+ 	default 0xfffff700 if ARCH_IOP33X
+ 	depends on ARCH_EP93XX || \
+ 	        DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+@@ -1427,7 +1422,7 @@ config DEBUG_UART_PHYS
+ 		DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
+ 		DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
+ 		DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
+-		DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
++		DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
+ 		DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
+ 
+ config DEBUG_UART_VIRT
+@@ -1466,7 +1461,7 @@ config DEBUG_UART_VIRT
+ 	default 0xfb009000 if DEBUG_REALVIEW_STD_PORT
+ 	default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
+ 	default 0xfc40ab00 if DEBUG_BRCMSTB_UART
+-	default 0xfcfe8600 if DEBUG_UART_BCM63XX
++	default 0xfcfe8600 if DEBUG_BCM63XX_UART
+ 	default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
+ 	default 0xfd000000 if ARCH_SPEAR13XX
+ 	default 0xfd012000 if ARCH_MV78XX0
+@@ -1516,7 +1511,7 @@ config DEBUG_UART_VIRT
+ 		DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
+ 		DEBUG_NETX_UART || \
+ 		DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
+-		DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
++		DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
+ 		DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
+ 
+ config DEBUG_UART_8250_SHIFT
+diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
+index 78514ab0b47a..757ac079e7f2 100644
+--- a/arch/arm/boot/dts/armada-388-gp.dts
++++ b/arch/arm/boot/dts/armada-388-gp.dts
+@@ -288,16 +288,6 @@
+ 		gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
+ 	};
+ 
+-	reg_usb2_1_vbus: v5-vbus1 {
+-		compatible = "regulator-fixed";
+-		regulator-name = "v5.0-vbus1";
+-		regulator-min-microvolt = <5000000>;
+-		regulator-max-microvolt = <5000000>;
+-		enable-active-high;
+-		regulator-always-on;
+-		gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
+-	};
+-
+ 	reg_sata0: pwr-sata0 {
+ 		compatible = "regulator-fixed";
+ 		regulator-name = "pwr_en_sata0";
+diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+index c740e1a2a3a5..4f29968076ce 100644
+--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+@@ -98,7 +98,7 @@
+ 
+ 				phy0: ethernet-phy@1 {
+ 					interrupt-parent = <&pioE>;
+-					interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
++					interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ 					reg = <1>;
+ 				};
+ 			};
+diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
+index 45e7761b7a29..d4d24a081404 100644
+--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
+@@ -141,8 +141,15 @@
+ 			};
+ 
+ 			macb0: ethernet@f8020000 {
++				pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
+ 				phy-mode = "rmii";
+ 				status = "okay";
++
++				ethernet-phy@1 {
++					reg = <0x1>;
++					interrupt-parent = <&pioE>;
++					interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
++				};
+ 			};
+ 
+ 			mmc1: mmc@fc000000 {
+@@ -174,6 +181,10 @@
+ 
+ 			pinctrl@fc06a000 {
+ 				board {
++					pinctrl_macb0_phy_irq: macb0_phy_irq {
++						atmel,pins =
++							<AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
++					};
+ 					pinctrl_mmc0_cd: mmc0_cd {
+ 						atmel,pins =
+ 							<AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
+index 9cf0ab62db7d..cf11660f35a1 100644
+--- a/arch/arm/boot/dts/sama5d4.dtsi
++++ b/arch/arm/boot/dts/sama5d4.dtsi
+@@ -1219,7 +1219,7 @@
+ 			dbgu: serial@fc069000 {
+ 				compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
+ 				reg = <0xfc069000 0x200>;
+-				interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
++				interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>;
+ 				pinctrl-names = "default";
+ 				pinctrl-0 = <&pinctrl_dbgu>;
+ 				clocks = <&dbgu_clk>;
+diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+index f182f6538e90..89ed9b45d533 100644
+--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
++++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+@@ -122,22 +122,14 @@
+ 			};
+ 			mmcsd_default_mode: mmcsd_default {
+ 				mmcsd_default_cfg1 {
+-					/* MCCLK */
+-					pins = "GPIO8_B10";
+-					ste,output = <0>;
+-				};
+-				mmcsd_default_cfg2 {
+-					/* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
+-					pins = "GPIO10_C11", "GPIO15_A12",
+-					"GPIO16_C13", "GPIO23_D15";
+-					ste,output = <1>;
+-				};
+-				mmcsd_default_cfg3 {
+-					/* MCCMD, MCDAT3-0, MCMSFBCLK */
+-					pins = "GPIO9_A10", "GPIO11_B11",
+-					"GPIO12_A11", "GPIO13_C12",
+-					"GPIO14_B12", "GPIO24_C15";
+-					ste,input = <1>;
++					/*
++					 * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2
++					 * MCCMD, MCDAT3-0, MCMSFBCLK
++					 */
++					pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11",
++					       "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12",
++					       "GPIO16_C13", "GPIO23_D15", "GPIO24_C15";
++					ste,output = <2>;
+ 				};
+ 			};
+ 		};
+@@ -802,10 +794,21 @@
+ 			clock-names = "mclk", "apb_pclk";
+ 			interrupt-parent = <&vica>;
+ 			interrupts = <22>;
+-			max-frequency = <48000000>;
++			max-frequency = <400000>;
+ 			bus-width = <4>;
+ 			cap-mmc-highspeed;
+ 			cap-sd-highspeed;
++			full-pwr-cycle;
++			/*
++			 * The STw4811 circuit used with the Nomadik strictly
++			 * requires that all of these signal direction pins be
++			 * routed and used for its 4-bit levelshifter.
++			 */
++			st,sig-dir-dat0;
++			st,sig-dir-dat2;
++			st,sig-dir-dat31;
++			st,sig-dir-cmd;
++			st,sig-pin-fbclk;
+ 			pinctrl-names = "default";
+ 			pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
+ 			vmmc-supply = <&vmmc_regulator>;
+diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
+index eafd120b53f1..8e2a7acb823b 100644
+--- a/arch/arm/mach-omap2/sleep34xx.S
++++ b/arch/arm/mach-omap2/sleep34xx.S
+@@ -86,13 +86,18 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
+ 	stmfd	sp!, {lr}	@ save registers on stack
+ 	/* Setup so that we will disable and enable l2 */
+ 	mov	r1, #0x1
+-	adrl	r2, l2dis_3630	@ may be too distant for plain adr
+-	str	r1, [r2]
++	adrl	r3, l2dis_3630_offset	@ may be too distant for plain adr
++	ldr	r2, [r3]		@ value for offset
++	str	r1, [r2, r3]		@ write to l2dis_3630
+ 	ldmfd	sp!, {pc}	@ restore regs and return
+ ENDPROC(enable_omap3630_toggle_l2_on_restore)
+ 
+-	.text
+-/* Function to call rom code to save secure ram context */
++/*
++ * Function to call rom code to save secure ram context. This gets
++ * relocated to SRAM, so it can be all in .data section. Otherwise
++ * we need to initialize api_params separately.
++ */
++	.data
+ 	.align	3
+ ENTRY(save_secure_ram_context)
+ 	stmfd	sp!, {r4 - r11, lr}	@ save registers on stack
+@@ -126,6 +131,8 @@ ENDPROC(save_secure_ram_context)
+ ENTRY(save_secure_ram_context_sz)
+ 	.word	. - save_secure_ram_context
+ 
++	.text
++
+ /*
+  * ======================
+  * == Idle entry point ==
+@@ -289,12 +296,6 @@ wait_sdrc_ready:
+ 	bic	r5, r5, #0x40
+ 	str	r5, [r4]
+ 
+-/*
+- * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
+- * base instead.
+- * Be careful not to clobber r7 when maintaing this code.
+- */
+-
+ is_dll_in_lock_mode:
+ 	/* Is dll in lock mode? */
+ 	ldr	r4, sdrc_dlla_ctrl
+@@ -302,11 +303,7 @@ is_dll_in_lock_mode:
+ 	tst	r5, #0x4
+ 	bne	exit_nonoff_modes	@ Return if locked
+ 	/* wait till dll locks */
+-	adr	r7, kick_counter
+ wait_dll_lock_timed:
+-	ldr	r4, wait_dll_lock_counter
+-	add	r4, r4, #1
+-	str	r4, [r7, #wait_dll_lock_counter - kick_counter]
+ 	ldr	r4, sdrc_dlla_status
+ 	/* Wait 20uS for lock */
+ 	mov	r6, #8
+@@ -330,9 +327,6 @@ kick_dll:
+ 	orr	r6, r6, #(1<<3)		@ enable dll
+ 	str	r6, [r4]
+ 	dsb
+-	ldr	r4, kick_counter
+-	add	r4, r4, #1
+-	str	r4, [r7]		@ kick_counter
+ 	b	wait_dll_lock_timed
+ 
+ exit_nonoff_modes:
+@@ -360,15 +354,6 @@ sdrc_dlla_status:
+ 	.word	SDRC_DLLA_STATUS_V
+ sdrc_dlla_ctrl:
+ 	.word	SDRC_DLLA_CTRL_V
+-	/*
+-	 * When exporting to userspace while the counters are in SRAM,
+-	 * these 2 words need to be at the end to facilitate retrival!
+-	 */
+-kick_counter:
+-	.word	0
+-wait_dll_lock_counter:
+-	.word	0
+-
+ ENTRY(omap3_do_wfi_sz)
+ 	.word	. - omap3_do_wfi
+ 
+@@ -437,7 +422,9 @@ ENTRY(omap3_restore)
+ 	cmp	r2, #0x0	@ Check if target power state was OFF or RET
+ 	bne	logic_l1_restore
+ 
+-	ldr	r0, l2dis_3630
++	adr	r1, l2dis_3630_offset	@ address for offset
++	ldr	r0, [r1]		@ value for offset
++	ldr	r0, [r1, r0]		@ value at l2dis_3630
+ 	cmp	r0, #0x1	@ should we disable L2 on 3630?
+ 	bne	skipl2dis
+ 	mrc	p15, 0, r0, c1, c0, 1
+@@ -506,7 +493,9 @@ l2_inv_gp:
+ 	mov	r12, #0x2
+ 	smc	#0			@ Call SMI monitor (smieq)
+ logic_l1_restore:
+-	ldr	r1, l2dis_3630
++	adr	r0, l2dis_3630_offset	@ adress for offset
++	ldr	r1, [r0]		@ value for offset
++	ldr	r1, [r0, r1]		@ value at l2dis_3630
+ 	cmp	r1, #0x1		@ Test if L2 re-enable needed on 3630
+ 	bne	skipl2reen
+ 	mrc	p15, 0, r1, c1, c0, 1
+@@ -535,6 +524,10 @@ control_stat:
+ 	.word	CONTROL_STAT
+ control_mem_rta:
+ 	.word	CONTROL_MEM_RTA_CTRL
++l2dis_3630_offset:
++	.long	l2dis_3630 - .
++
++	.data
+ l2dis_3630:
+ 	.word	0
+ 
+diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
+index ad1bb9431e94..5373a3281779 100644
+--- a/arch/arm/mach-omap2/sleep44xx.S
++++ b/arch/arm/mach-omap2/sleep44xx.S
+@@ -29,12 +29,6 @@
+ 	dsb
+ .endm
+ 
+-ppa_zero_params:
+-	.word		0x0
+-
+-ppa_por_params:
+-	.word		1, 0
+-
+ #ifdef CONFIG_ARCH_OMAP4
+ 
+ /*
+@@ -266,7 +260,9 @@ ENTRY(omap4_cpu_resume)
+ 	beq	skip_ns_smp_enable
+ ppa_actrl_retry:
+ 	mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
+-	adr	r3, ppa_zero_params		@ Pointer to parameters
++	adr	r1, ppa_zero_params_offset
++	ldr	r3, [r1]
++	add	r3, r3, r1			@ Pointer to ppa_zero_params
+ 	mov	r1, #0x0			@ Process ID
+ 	mov	r2, #0x4			@ Flag
+ 	mov	r6, #0xff
+@@ -303,7 +299,9 @@ skip_ns_smp_enable:
+ 	ldr     r0, =OMAP4_PPA_L2_POR_INDEX
+ 	ldr     r1, =OMAP44XX_SAR_RAM_BASE
+ 	ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
+-	adr     r3, ppa_por_params
++	adr     r1, ppa_por_params_offset
++	ldr	r3, [r1]
++	add	r3, r3, r1			@ Pointer to ppa_por_params
+ 	str     r4, [r3, #0x04]
+ 	mov	r1, #0x0			@ Process ID
+ 	mov	r2, #0x4			@ Flag
+@@ -328,6 +326,8 @@ skip_l2en:
+ #endif
+ 
+ 	b	cpu_resume			@ Jump to generic resume
++ppa_por_params_offset:
++	.long	ppa_por_params - .
+ ENDPROC(omap4_cpu_resume)
+ #endif	/* CONFIG_ARCH_OMAP4 */
+ 
+@@ -382,4 +382,13 @@ ENTRY(omap_do_wfi)
+ 	nop
+ 
+ 	ldmfd	sp!, {pc}
++ppa_zero_params_offset:
++	.long	ppa_zero_params - .
+ ENDPROC(omap_do_wfi)
++
++	.data
++ppa_zero_params:
++	.word		0
++
++ppa_por_params:
++	.word		1, 0
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 36aa31ff2c06..cc7435c9676e 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -566,9 +566,14 @@ CPU_LE(	movk	x0, #0x30d0, lsl #16	)	// Clear EE and E0E on LE systems
+ #endif
+ 
+ 	/* EL2 debug */
++	mrs	x0, id_aa64dfr0_el1		// Check ID_AA64DFR0_EL1 PMUVer
++	sbfx	x0, x0, #8, #4
++	cmp	x0, #1
++	b.lt	4f				// Skip if no PMU present
+ 	mrs	x0, pmcr_el0			// Disable debug access traps
+ 	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
+ 	msr	mdcr_el2, x0			// all PMU counters from EL1
++4:
+ 
+ 	/* Stage-2 translation */
+ 	msr	vttbr_el2, xzr
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index 7778453762d8..b67b01cb5109 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -1242,9 +1242,6 @@ static void armv8pmu_reset(void *info)
+ 
+ 	/* Initialize & Reset PMNC: C and P bits. */
+ 	armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
+-
+-	/* Disable access from userspace. */
+-	asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
+ }
+ 
+ static int armv8_pmuv3_map_event(struct perf_event *event)
+diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
+index e47ed1c5dce1..545710f854f8 100644
+--- a/arch/arm64/mm/pageattr.c
++++ b/arch/arm64/mm/pageattr.c
+@@ -57,6 +57,9 @@ static int change_memory_common(unsigned long addr, int numpages,
+ 	if (end < MODULES_VADDR || end >= MODULES_END)
+ 		return -EINVAL;
+ 
++	if (!numpages)
++		return 0;
++
+ 	data.set_mask = set_mask;
+ 	data.clear_mask = clear_mask;
+ 
+diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
+index 4c4d93c4bf65..d69dffffaa89 100644
+--- a/arch/arm64/mm/proc-macros.S
++++ b/arch/arm64/mm/proc-macros.S
+@@ -62,3 +62,15 @@
+ 	bfi	\valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+ #endif
+ 	.endm
++
++/*
++ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
++ */
++	.macro	reset_pmuserenr_el0, tmpreg
++	mrs	\tmpreg, id_aa64dfr0_el1	// Check ID_AA64DFR0_EL1 PMUVer
++	sbfx	\tmpreg, \tmpreg, #8, #4
++	cmp	\tmpreg, #1			// Skip if no PMU present
++	b.lt	9000f
++	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
++9000:
++	.endm
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index cdd754e19b9b..d253908a988d 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -165,6 +165,7 @@ ENTRY(cpu_do_resume)
+ 	 */
+ 	ubfx	x11, x11, #1, #1
+ 	msr	oslar_el1, x11
++	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
+ 	mov	x0, x12
+ 	dsb	nsh		// Make sure local tlb invalidation completed
+ 	isb
+@@ -202,7 +203,9 @@ ENTRY(__cpu_setup)
+ 
+ 	mov	x0, #3 << 20
+ 	msr	cpacr_el1, x0			// Enable FP/ASIMD
+-	msr	mdscr_el1, xzr			// Reset mdscr_el1
++	mov	x0, #1 << 12			// Reset mdscr_el1 and disable
++	msr	mdscr_el1, x0			// access to the DCC from EL0
++	reset_pmuserenr_el0 x0			// Disable PMU access from EL0
+ 	/*
+ 	 * Memory region attributes for LPAE:
+ 	 *
+diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
+index 0392112a5d70..a5ecef7188ba 100644
+--- a/arch/m32r/kernel/setup.c
++++ b/arch/m32r/kernel/setup.c
+@@ -81,7 +81,10 @@ static struct resource code_resource = {
+ };
+ 
+ unsigned long memory_start;
++EXPORT_SYMBOL(memory_start);
++
+ unsigned long memory_end;
++EXPORT_SYMBOL(memory_end);
+ 
+ void __init setup_arch(char **);
+ int get_cpuinfo(char *);
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 70f6e7f073b0..7fe24aef7fdc 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
+ static inline pte_t pte_mkyoung(pte_t pte)
+ {
+ 	pte_val(pte) |= _PAGE_ACCESSED;
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ 	if (!(pte_val(pte) & _PAGE_NO_READ))
+ 		pte_val(pte) |= _PAGE_SILENT_READ;
+ 	else
+@@ -558,7 +558,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
+ {
+ 	pmd_val(pmd) |= _PAGE_ACCESSED;
+ 
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ 	if (!(pmd_val(pmd) & _PAGE_NO_READ))
+ 		pmd_val(pmd) |= _PAGE_SILENT_READ;
+ 	else
+diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
+index 97c87027c17f..90b0e8316790 100644
+--- a/arch/mips/mm/tlbex.c
++++ b/arch/mips/mm/tlbex.c
+@@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
+ 	pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+ 	pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
+ #endif
+-#ifdef CONFIG_CPU_MIPSR2
++#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ 	if (cpu_has_rixi) {
+ #ifdef _PAGE_NO_EXEC_SHIFT
+ 		pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+diff --git a/arch/parisc/include/uapi/asm/siginfo.h b/arch/parisc/include/uapi/asm/siginfo.h
+index d7034728f377..1c75565d984b 100644
+--- a/arch/parisc/include/uapi/asm/siginfo.h
++++ b/arch/parisc/include/uapi/asm/siginfo.h
+@@ -1,6 +1,10 @@
+ #ifndef _PARISC_SIGINFO_H
+ #define _PARISC_SIGINFO_H
+ 
++#if defined(__LP64__)
++#define __ARCH_SI_PREAMBLE_SIZE   (4 * sizeof(int))
++#endif
++
+ #include <asm-generic/siginfo.h>
+ 
+ #undef NSIGTRAP
+diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
+index 35f0b62259bb..22f6d954ef89 100644
+--- a/arch/powerpc/kernel/eeh_pe.c
++++ b/arch/powerpc/kernel/eeh_pe.c
+@@ -861,32 +861,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
+ const char *eeh_pe_loc_get(struct eeh_pe *pe)
+ {
+ 	struct pci_bus *bus = eeh_pe_bus_get(pe);
+-	struct device_node *dn = pci_bus_to_OF_node(bus);
++	struct device_node *dn;
+ 	const char *loc = NULL;
+ 
+-	if (!dn)
+-		goto out;
++	while (bus) {
++		dn = pci_bus_to_OF_node(bus);
++		if (!dn) {
++			bus = bus->parent;
++			continue;
++		}
+ 
+-	/* PHB PE or root PE ? */
+-	if (pci_is_root_bus(bus)) {
+-		loc = of_get_property(dn, "ibm,loc-code", NULL);
+-		if (!loc)
++		if (pci_is_root_bus(bus))
+ 			loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
++		else
++			loc = of_get_property(dn, "ibm,slot-location-code",
++					      NULL);
++
+ 		if (loc)
+-			goto out;
++			return loc;
+ 
+-		/* Check the root port */
+-		dn = dn->child;
+-		if (!dn)
+-			goto out;
++		bus = bus->parent;
+ 	}
+ 
+-	loc = of_get_property(dn, "ibm,loc-code", NULL);
+-	if (!loc)
+-		loc = of_get_property(dn, "ibm,slot-location-code", NULL);
+-
+-out:
+-	return loc ? loc : "N/A";
++	return "N/A";
+ }
+ 
+ /**
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index ffd98b2bfa16..f8338e6d3dd7 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -2047,7 +2047,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ 
+ 	/* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
+ 2:	rlwimi	r5, r4, 5, DAWRX_DR | DAWRX_DW
+-	rlwimi	r5, r4, 1, DAWRX_WT
++	rlwimi	r5, r4, 2, DAWRX_WT
+ 	clrrdi	r4, r4, 3
+ 	std	r4, VCPU_DAWR(r3)
+ 	std	r5, VCPU_DAWRX(r3)
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index ac3ddf115f3d..c8fe9ab10792 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -915,21 +915,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+ 				r = -ENXIO;
+ 				break;
+ 			}
+-			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
++			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+ 			break;
+ 		case KVM_REG_PPC_VSCR:
+ 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ 				r = -ENXIO;
+ 				break;
+ 			}
+-			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
++			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+ 			break;
+ 		case KVM_REG_PPC_VRSAVE:
+-			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+-				r = -ENXIO;
+-				break;
+-			}
+-			vcpu->arch.vrsave = set_reg_val(reg->id, val);
++			val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ 			break;
+ #endif /* CONFIG_ALTIVEC */
+ 		default:
+@@ -970,17 +966,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+ 				r = -ENXIO;
+ 				break;
+ 			}
+-			val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
++			vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ 			break;
+ 		case KVM_REG_PPC_VSCR:
+ 			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ 				r = -ENXIO;
+ 				break;
+ 			}
+-			val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
++			vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+ 			break;
+ 		case KVM_REG_PPC_VRSAVE:
+-			val = get_reg_val(reg->id, vcpu->arch.vrsave);
++			if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
++				r = -ENXIO;
++				break;
++			}
++			vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ 			break;
+ #endif /* CONFIG_ALTIVEC */
+ 		default:
+diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
+index 4d1ee88864e8..18c8b819b0aa 100644
+--- a/arch/s390/mm/extable.c
++++ b/arch/s390/mm/extable.c
+@@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
+ 	int i;
+ 
+ 	/* Normalize entries to being relative to the start of the section */
+-	for (p = start, i = 0; p < finish; p++, i += 8)
++	for (p = start, i = 0; p < finish; p++, i += 8) {
+ 		p->insn += i;
++		p->fixup += i + 4;
++	}
+ 	sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
+ 	/* Denormalize all entries */
+-	for (p = start, i = 0; p < finish; p++, i += 8)
++	for (p = start, i = 0; p < finish; p++, i += 8) {
+ 		p->insn -= i;
++		p->fixup -= i + 4;
++	}
+ }
+ 
+ #ifdef CONFIG_MODULES
+diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
+index 30e7ddb27a3a..c690c8e16a96 100644
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -413,7 +413,7 @@ out:
+ 
+ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
+ {
+-	int ret;
++	long ret;
+ 
+ 	if (personality(current->personality) == PER_LINUX32 &&
+ 	    personality(personality) == PER_LINUX)
+diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
+index 47f1ff056a54..22a358ef1b0c 100644
+--- a/arch/um/os-Linux/start_up.c
++++ b/arch/um/os-Linux/start_up.c
+@@ -94,6 +94,8 @@ static int start_ptraced_child(void)
+ {
+ 	int pid, n, status;
+ 
++	fflush(stdout);
++
+ 	pid = fork();
+ 	if (pid == 0)
+ 		ptrace_child();
+diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
+index 78f0c8cbe316..74fcdf3f1534 100644
+--- a/arch/x86/include/asm/pgtable_types.h
++++ b/arch/x86/include/asm/pgtable_types.h
+@@ -337,20 +337,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
+ }
+ static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
+ {
++	pgprotval_t val = pgprot_val(pgprot);
+ 	pgprot_t new;
+-	unsigned long val;
+ 
+-	val = pgprot_val(pgprot);
+ 	pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
+ 		((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
+ 	return new;
+ }
+ static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
+ {
++	pgprotval_t val = pgprot_val(pgprot);
+ 	pgprot_t new;
+-	unsigned long val;
+ 
+-	val = pgprot_val(pgprot);
+ 	pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
+ 			  ((val & _PAGE_PAT_LARGE) >>
+ 			   (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index 89af288ec674..2dd9b3ad3bb5 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -33,7 +33,7 @@ struct cpa_data {
+ 	pgd_t		*pgd;
+ 	pgprot_t	mask_set;
+ 	pgprot_t	mask_clr;
+-	int		numpages;
++	unsigned long	numpages;
+ 	int		flags;
+ 	unsigned long	pfn;
+ 	unsigned	force_split : 1;
+@@ -1324,7 +1324,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
+ 		 * CPA operation. Either a large page has been
+ 		 * preserved or a single page update happened.
+ 		 */
+-		BUG_ON(cpa->numpages > numpages);
++		BUG_ON(cpa->numpages > numpages || !cpa->numpages);
+ 		numpages -= cpa->numpages;
+ 		if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
+ 			cpa->curpage++;
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index f22cc56fd1b3..9641b74b53ef 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -76,6 +76,8 @@ int af_alg_register_type(const struct af_alg_type *type)
+ 		goto unlock;
+ 
+ 	type->ops->owner = THIS_MODULE;
++	if (type->ops_nokey)
++		type->ops_nokey->owner = THIS_MODULE;
+ 	node->type = type;
+ 	list_add(&node->list, &alg_types);
+ 	err = 0;
+@@ -125,6 +127,26 @@ int af_alg_release(struct socket *sock)
+ }
+ EXPORT_SYMBOL_GPL(af_alg_release);
+ 
++void af_alg_release_parent(struct sock *sk)
++{
++	struct alg_sock *ask = alg_sk(sk);
++	unsigned int nokey = ask->nokey_refcnt;
++	bool last = nokey && !ask->refcnt;
++
++	sk = ask->parent;
++	ask = alg_sk(sk);
++
++	lock_sock(sk);
++	ask->nokey_refcnt -= nokey;
++	if (!last)
++		last = !--ask->refcnt;
++	release_sock(sk);
++
++	if (last)
++		sock_put(sk);
++}
++EXPORT_SYMBOL_GPL(af_alg_release_parent);
++
+ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ {
+ 	struct sock *sk = sock->sk;
+@@ -132,6 +154,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 	struct sockaddr_alg *sa = (void *)uaddr;
+ 	const struct af_alg_type *type;
+ 	void *private;
++	int err;
+ 
+ 	if (sock->state == SS_CONNECTED)
+ 		return -EINVAL;
+@@ -157,16 +180,22 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ 		return PTR_ERR(private);
+ 	}
+ 
++	err = -EBUSY;
+ 	lock_sock(sk);
++	if (ask->refcnt | ask->nokey_refcnt)
++		goto unlock;
+ 
+ 	swap(ask->type, type);
+ 	swap(ask->private, private);
+ 
++	err = 0;
++
++unlock:
+ 	release_sock(sk);
+ 
+ 	alg_do_release(type, private);
+ 
+-	return 0;
++	return err;
+ }
+ 
+ static int alg_setkey(struct sock *sk, char __user *ukey,
+@@ -199,11 +228,15 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
+ 	struct sock *sk = sock->sk;
+ 	struct alg_sock *ask = alg_sk(sk);
+ 	const struct af_alg_type *type;
+-	int err = -ENOPROTOOPT;
++	int err = -EBUSY;
+ 
+ 	lock_sock(sk);
++	if (ask->refcnt)
++		goto unlock;
++
+ 	type = ask->type;
+ 
++	err = -ENOPROTOOPT;
+ 	if (level != SOL_ALG || !type)
+ 		goto unlock;
+ 
+@@ -235,6 +268,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
+ 	struct alg_sock *ask = alg_sk(sk);
+ 	const struct af_alg_type *type;
+ 	struct sock *sk2;
++	unsigned int nokey;
+ 	int err;
+ 
+ 	lock_sock(sk);
+@@ -254,20 +288,29 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
+ 	security_sk_clone(sk, sk2);
+ 
+ 	err = type->accept(ask->private, sk2);
+-	if (err) {
+-		sk_free(sk2);
++
++	nokey = err == -ENOKEY;
++	if (nokey && type->accept_nokey)
++		err = type->accept_nokey(ask->private, sk2);
++
++	if (err)
+ 		goto unlock;
+-	}
+ 
+ 	sk2->sk_family = PF_ALG;
+ 
+-	sock_hold(sk);
++	if (nokey || !ask->refcnt++)
++		sock_hold(sk);
++	ask->nokey_refcnt += nokey;
+ 	alg_sk(sk2)->parent = sk;
+ 	alg_sk(sk2)->type = type;
++	alg_sk(sk2)->nokey_refcnt = nokey;
+ 
+ 	newsock->ops = type->ops;
+ 	newsock->state = SS_CONNECTED;
+ 
++	if (nokey)
++		newsock->ops = type->ops_nokey;
++
+ 	err = 0;
+ 
+ unlock:
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index 9c1dc8d6106a..d19b52324cf5 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -451,6 +451,7 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
+ 	struct ahash_alg *alg = crypto_ahash_alg(hash);
+ 
+ 	hash->setkey = ahash_nosetkey;
++	hash->has_setkey = false;
+ 	hash->export = ahash_no_export;
+ 	hash->import = ahash_no_import;
+ 
+@@ -463,8 +464,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
+ 	hash->finup = alg->finup ?: ahash_def_finup;
+ 	hash->digest = alg->digest;
+ 
+-	if (alg->setkey)
++	if (alg->setkey) {
+ 		hash->setkey = alg->setkey;
++		hash->has_setkey = true;
++	}
+ 	if (alg->export)
+ 		hash->export = alg->export;
+ 	if (alg->import)
+diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
+index 1396ad0787fc..d7a3435280d8 100644
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -34,6 +34,11 @@ struct hash_ctx {
+ 	struct ahash_request req;
+ };
+ 
++struct algif_hash_tfm {
++	struct crypto_ahash *hash;
++	bool has_key;
++};
++
+ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
+ 			size_t ignored)
+ {
+@@ -49,7 +54,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
+ 
+ 	lock_sock(sk);
+ 	if (!ctx->more) {
+-		err = crypto_ahash_init(&ctx->req);
++		err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
++						&ctx->completion);
+ 		if (err)
+ 			goto unlock;
+ 	}
+@@ -120,6 +126,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+ 	} else {
+ 		if (!ctx->more) {
+ 			err = crypto_ahash_init(&ctx->req);
++			err = af_alg_wait_for_completion(err, &ctx->completion);
+ 			if (err)
+ 				goto unlock;
+ 		}
+@@ -227,19 +234,151 @@ static struct proto_ops algif_hash_ops = {
+ 	.accept		=	hash_accept,
+ };
+ 
++static int hash_check_key(struct socket *sock)
++{
++	int err = 0;
++	struct sock *psk;
++	struct alg_sock *pask;
++	struct algif_hash_tfm *tfm;
++	struct sock *sk = sock->sk;
++	struct alg_sock *ask = alg_sk(sk);
++
++	lock_sock(sk);
++	if (ask->refcnt)
++		goto unlock_child;
++
++	psk = ask->parent;
++	pask = alg_sk(ask->parent);
++	tfm = pask->private;
++
++	err = -ENOKEY;
++	lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
++	if (!tfm->has_key)
++		goto unlock;
++
++	if (!pask->refcnt++)
++		sock_hold(psk);
++
++	ask->refcnt = 1;
++	sock_put(psk);
++
++	err = 0;
++
++unlock:
++	release_sock(psk);
++unlock_child:
++	release_sock(sk);
++
++	return err;
++}
++
++static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
++			      size_t size)
++{
++	int err;
++
++	err = hash_check_key(sock);
++	if (err)
++		return err;
++
++	return hash_sendmsg(sock, msg, size);
++}
++
++static ssize_t hash_sendpage_nokey(struct socket *sock, struct page *page,
++				   int offset, size_t size, int flags)
++{
++	int err;
++
++	err = hash_check_key(sock);
++	if (err)
++		return err;
++
++	return hash_sendpage(sock, page, offset, size, flags);
++}
++
++static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
++			      size_t ignored, int flags)
++{
++	int err;
++
++	err = hash_check_key(sock);
++	if (err)
++		return err;
++
++	return hash_recvmsg(sock, msg, ignored, flags);
++}
++
++static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
++			     int flags)
++{
++	int err;
++
++	err = hash_check_key(sock);
++	if (err)
++		return err;
++
++	return hash_accept(sock, newsock, flags);
++}
++
++static struct proto_ops algif_hash_ops_nokey = {
++	.family		=	PF_ALG,
++
++	.connect	=	sock_no_connect,
++	.socketpair	=	sock_no_socketpair,
++	.getname	=	sock_no_getname,
++	.ioctl		=	sock_no_ioctl,
++	.listen		=	sock_no_listen,
++	.shutdown	=	sock_no_shutdown,
++	.getsockopt	=	sock_no_getsockopt,
++	.mmap		=	sock_no_mmap,
++	.bind		=	sock_no_bind,
++	.setsockopt	=	sock_no_setsockopt,
++	.poll		=	sock_no_poll,
++
++	.release	=	af_alg_release,
++	.sendmsg	=	hash_sendmsg_nokey,
++	.sendpage	=	hash_sendpage_nokey,
++	.recvmsg	=	hash_recvmsg_nokey,
++	.accept		=	hash_accept_nokey,
++};
++
+ static void *hash_bind(const char *name, u32 type, u32 mask)
+ {
+-	return crypto_alloc_ahash(name, type, mask);
++	struct algif_hash_tfm *tfm;
++	struct crypto_ahash *hash;
++
++	tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
++	if (!tfm)
++		return ERR_PTR(-ENOMEM);
++
++	hash = crypto_alloc_ahash(name, type, mask);
++	if (IS_ERR(hash)) {
++		kfree(tfm);
++		return ERR_CAST(hash);
++	}
++
++	tfm->hash = hash;
++
++	return tfm;
+ }
+ 
+ static void hash_release(void *private)
+ {
+-	crypto_free_ahash(private);
++	struct algif_hash_tfm *tfm = private;
++
++	crypto_free_ahash(tfm->hash);
++	kfree(tfm);
+ }
+ 
+ static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
+ {
+-	return crypto_ahash_setkey(private, key, keylen);
++	struct algif_hash_tfm *tfm = private;
++	int err;
++
++	err = crypto_ahash_setkey(tfm->hash, key, keylen);
++	tfm->has_key = !err;
++
++	return err;
+ }
+ 
+ static void hash_sock_destruct(struct sock *sk)
+@@ -253,12 +392,14 @@ static void hash_sock_destruct(struct sock *sk)
+ 	af_alg_release_parent(sk);
+ }
+ 
+-static int hash_accept_parent(void *private, struct sock *sk)
++static int hash_accept_parent_nokey(void *private, struct sock *sk)
+ {
+ 	struct hash_ctx *ctx;
+ 	struct alg_sock *ask = alg_sk(sk);
+-	unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
+-	unsigned ds = crypto_ahash_digestsize(private);
++	struct algif_hash_tfm *tfm = private;
++	struct crypto_ahash *hash = tfm->hash;
++	unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash);
++	unsigned ds = crypto_ahash_digestsize(hash);
+ 
+ 	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ 	if (!ctx)
+@@ -278,7 +419,7 @@ static int hash_accept_parent(void *private, struct sock *sk)
+ 
+ 	ask->private = ctx;
+ 
+-	ahash_request_set_tfm(&ctx->req, private);
++	ahash_request_set_tfm(&ctx->req, hash);
+ 	ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ 				   af_alg_complete, &ctx->completion);
+ 
+@@ -287,12 +428,24 @@ static int hash_accept_parent(void *private, struct sock *sk)
+ 	return 0;
+ }
+ 
++static int hash_accept_parent(void *private, struct sock *sk)
++{
++	struct algif_hash_tfm *tfm = private;
++
++	if (!tfm->has_key && crypto_ahash_has_setkey(tfm->hash))
++		return -ENOKEY;
++
++	return hash_accept_parent_nokey(private, sk);
++}
++
+ static const struct af_alg_type algif_type_hash = {
+ 	.bind		=	hash_bind,
+ 	.release	=	hash_release,
+ 	.setkey		=	hash_setkey,
+ 	.accept		=	hash_accept_parent,
++	.accept_nokey	=	hash_accept_parent_nokey,
+ 	.ops		=	&algif_hash_ops,
++	.ops_nokey	=	&algif_hash_ops_nokey,
+ 	.name		=	"hash",
+ 	.owner		=	THIS_MODULE
+ };
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 945075292bc9..5bc42f9b23f0 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -387,7 +387,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
+ 
+ 		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+ 		sg = sgl->sg;
+-		sg_unmark_end(sg + sgl->cur);
++		if (sgl->cur)
++			sg_unmark_end(sg + sgl->cur - 1);
+ 		do {
+ 			i = sgl->cur;
+ 			plen = min_t(int, len, PAGE_SIZE);
+diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
+index 24f17e6c5904..4c850ac474e2 100644
+--- a/crypto/asymmetric_keys/x509_public_key.c
++++ b/crypto/asymmetric_keys/x509_public_key.c
+@@ -307,10 +307,6 @@ static int x509_key_preparse(struct key_preparsed_payload *prep)
+ 		srlen = cert->raw_serial_size;
+ 		q = cert->raw_serial;
+ 	}
+-	if (srlen > 1 && *q == 0) {
+-		srlen--;
+-		q++;
+-	}
+ 
+ 	ret = -ENOMEM;
+ 	desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL);
+diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
+index 06f1b60f02b2..4c0a0e271876 100644
+--- a/crypto/crc32c_generic.c
++++ b/crypto/crc32c_generic.c
+@@ -172,4 +172,3 @@ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
+ MODULE_LICENSE("GPL");
+ MODULE_ALIAS_CRYPTO("crc32c");
+ MODULE_ALIAS_CRYPTO("crc32c-generic");
+-MODULE_SOFTDEP("pre: crc32c");
+diff --git a/crypto/shash.c b/crypto/shash.c
+index 47c713954bf3..03fbcd4a82c4 100644
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -354,9 +354,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
+ 	crt->final = shash_async_final;
+ 	crt->finup = shash_async_finup;
+ 	crt->digest = shash_async_digest;
++	crt->setkey = shash_async_setkey;
++
++	crt->has_setkey = alg->setkey != shash_no_setkey;
+ 
+-	if (alg->setkey)
+-		crt->setkey = shash_async_setkey;
+ 	if (alg->export)
+ 		crt->export = shash_async_export;
+ 	if (alg->import)
+diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
+index 54d946a9eee6..6fbb10ca73b1 100644
+--- a/drivers/block/zram/zcomp.c
++++ b/drivers/block/zram/zcomp.c
+@@ -76,7 +76,7 @@ static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
+  */
+ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
+ {
+-	struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
++	struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_NOIO);
+ 	if (!zstrm)
+ 		return NULL;
+ 
+@@ -85,7 +85,7 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
+ 	 * allocate 2 pages. 1 for compressed data, plus 1 extra for the
+ 	 * case when compressed size is larger than the original one
+ 	 */
+-	zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
++	zstrm->buffer = (void *)__get_free_pages(GFP_NOIO | __GFP_ZERO, 1);
+ 	if (!zstrm->private || !zstrm->buffer) {
+ 		zcomp_strm_free(comp, zstrm);
+ 		zstrm = NULL;
+diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c
+index f2afb7e988c3..dd6083124276 100644
+--- a/drivers/block/zram/zcomp_lz4.c
++++ b/drivers/block/zram/zcomp_lz4.c
+@@ -10,17 +10,36 @@
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+ #include <linux/lz4.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
+ 
+ #include "zcomp_lz4.h"
+ 
+ static void *zcomp_lz4_create(void)
+ {
+-	return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
++	void *ret;
++
++	/*
++	 * This function can be called in swapout/fs write path
++	 * so we can't use GFP_FS|IO. And it assumes we already
++	 * have at least one stream in zram initialization so we
++	 * don't do best effort to allocate more stream in here.
++	 * A default stream will work well without further multiple
++	 * streams. That's why we use NORETRY | NOWARN.
++	 */
++	ret = kzalloc(LZ4_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
++					__GFP_NOWARN);
++	if (!ret)
++		ret = __vmalloc(LZ4_MEM_COMPRESS,
++				GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
++				__GFP_ZERO | __GFP_HIGHMEM,
++				PAGE_KERNEL);
++	return ret;
+ }
+ 
+ static void zcomp_lz4_destroy(void *private)
+ {
+-	kfree(private);
++	kvfree(private);
+ }
+ 
+ static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
+diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c
+index da1bc47d588e..edc549920fa0 100644
+--- a/drivers/block/zram/zcomp_lzo.c
++++ b/drivers/block/zram/zcomp_lzo.c
+@@ -10,17 +10,36 @@
+ #include <linux/kernel.h>
+ #include <linux/slab.h>
+ #include <linux/lzo.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
+ 
+ #include "zcomp_lzo.h"
+ 
+ static void *lzo_create(void)
+ {
+-	return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
++	void *ret;
++
++	/*
++	 * This function can be called in swapout/fs write path
++	 * so we can't use GFP_FS|IO. And it assumes we already
++	 * have at least one stream in zram initialization so we
++	 * don't do best effort to allocate more stream in here.
++	 * A default stream will work well without further multiple
++	 * streams. That's why we use NORETRY | NOWARN.
++	 */
++	ret = kzalloc(LZO1X_MEM_COMPRESS, GFP_NOIO | __GFP_NORETRY |
++					__GFP_NOWARN);
++	if (!ret)
++		ret = __vmalloc(LZO1X_MEM_COMPRESS,
++				GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN |
++				__GFP_ZERO | __GFP_HIGHMEM,
++				PAGE_KERNEL);
++	return ret;
+ }
+ 
+ static void lzo_destroy(void *private)
+ {
+-	kfree(private);
++	kvfree(private);
+ }
+ 
+ static int lzo_compress(const unsigned char *src, unsigned char *dst,
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 7bf87d9bfd7d..fdba79c3877c 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -144,6 +144,10 @@ static const struct usb_device_id btusb_table[] = {
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
+ 	  .driver_info = BTUSB_BCM_PATCHRAM },
+ 
++	/* Toshiba Corp - Broadcom based */
++	{ USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
++	  .driver_info = BTUSB_BCM_PATCHRAM },
++
+ 	/* Intel Bluetooth USB Bootloader (RAM module) */
+ 	{ USB_DEVICE(0x8087, 0x0a5a),
+ 	  .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
+diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
+index 1098ed3b9b89..dc45ddb36117 100644
+--- a/drivers/clocksource/vt8500_timer.c
++++ b/drivers/clocksource/vt8500_timer.c
+@@ -50,6 +50,8 @@
+ 
+ #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+ 
++#define MIN_OSCR_DELTA		16
++
+ static void __iomem *regbase;
+ 
+ static cycle_t vt8500_timer_read(struct clocksource *cs)
+@@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
+ 		cpu_relax();
+ 	writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
+ 
+-	if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
++	if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
+ 		return -ETIME;
+ 
+ 	writel(1, regbase + TIMER_IER_VAL);
+@@ -160,7 +162,7 @@ static void __init vt8500_timer_init(struct device_node *np)
+ 		pr_err("%s: setup_irq failed for %s\n", __func__,
+ 							clockevent.name);
+ 	clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
+-					4, 0xf0000000);
++					MIN_OSCR_DELTA * 2, 0xf0000000);
+ }
+ 
+ CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index c89a7abb523f..8d8c35623f2a 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -1230,6 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
+ 	list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
+ 		at_xdmac_remove_xfer(atchan, desc);
+ 
++	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+ 	clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
+ 	spin_unlock_irqrestore(&atchan->lock, flags);
+ 
+@@ -1362,6 +1363,8 @@ static int atmel_xdmac_resume(struct device *dev)
+ 		atchan = to_at_xdmac_chan(chan);
+ 		at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
+ 		if (at_xdmac_chan_is_cyclic(atchan)) {
++			if (at_xdmac_chan_is_paused(atchan))
++				at_xdmac_device_resume(chan);
+ 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
+ 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
+ 			at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 9e504d3b0d4f..303d937d63c7 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -156,6 +156,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
+ 
+ 	/* Enable interrupts */
+ 	channel_set_bit(dw, MASK.XFER, dwc->mask);
++	channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ 	channel_set_bit(dw, MASK.ERROR, dwc->mask);
+ 
+ 	dwc->initialized = true;
+@@ -536,16 +537,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
+ 
+ /* Called with dwc->lock held and all DMAC interrupts disabled */
+ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+-		u32 status_err, u32 status_xfer)
++		u32 status_block, u32 status_err, u32 status_xfer)
+ {
+ 	unsigned long flags;
+ 
+-	if (dwc->mask) {
++	if (status_block & dwc->mask) {
+ 		void (*callback)(void *param);
+ 		void *callback_param;
+ 
+ 		dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
+ 				channel_readl(dwc, LLP));
++		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ 
+ 		callback = dwc->cdesc->period_callback;
+ 		callback_param = dwc->cdesc->period_callback_param;
+@@ -577,6 +579,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
+ 		channel_writel(dwc, CTL_LO, 0);
+ 		channel_writel(dwc, CTL_HI, 0);
+ 
++		dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ 		dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ 		dma_writel(dw, CLEAR.XFER, dwc->mask);
+ 
+@@ -593,10 +596,12 @@ static void dw_dma_tasklet(unsigned long data)
+ {
+ 	struct dw_dma *dw = (struct dw_dma *)data;
+ 	struct dw_dma_chan *dwc;
++	u32 status_block;
+ 	u32 status_xfer;
+ 	u32 status_err;
+ 	int i;
+ 
++	status_block = dma_readl(dw, RAW.BLOCK);
+ 	status_xfer = dma_readl(dw, RAW.XFER);
+ 	status_err = dma_readl(dw, RAW.ERROR);
+ 
+@@ -605,7 +610,8 @@ static void dw_dma_tasklet(unsigned long data)
+ 	for (i = 0; i < dw->dma.chancnt; i++) {
+ 		dwc = &dw->chan[i];
+ 		if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
+-			dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
++			dwc_handle_cyclic(dw, dwc, status_block, status_err,
++					status_xfer);
+ 		else if (status_err & (1 << i))
+ 			dwc_handle_error(dw, dwc);
+ 		else if (status_xfer & (1 << i))
+@@ -616,6 +622,7 @@ static void dw_dma_tasklet(unsigned long data)
+ 	 * Re-enable interrupts.
+ 	 */
+ 	channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
++	channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ 	channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
+ }
+ 
+@@ -635,6 +642,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+ 	 * softirq handler.
+ 	 */
+ 	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ 	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+ 
+ 	status = dma_readl(dw, STATUS_INT);
+@@ -645,6 +653,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
+ 
+ 		/* Try to recover */
+ 		channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
++		channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
+ 		channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
+ 		channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
+ 		channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
+@@ -1111,6 +1120,7 @@ static void dw_dma_off(struct dw_dma *dw)
+ 	dma_writel(dw, CFG, 0);
+ 
+ 	channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
++	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+ 	channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
+ 	channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
+ 	channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
+@@ -1216,6 +1226,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ 
+ 	/* Disable interrupts */
+ 	channel_clear_bit(dw, MASK.XFER, dwc->mask);
++	channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
+ 	channel_clear_bit(dw, MASK.ERROR, dwc->mask);
+ 
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+@@ -1245,7 +1256,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
+ int dw_dma_cyclic_start(struct dma_chan *chan)
+ {
+ 	struct dw_dma_chan	*dwc = to_dw_dma_chan(chan);
+-	struct dw_dma		*dw = to_dw_dma(dwc->chan.device);
+ 	unsigned long		flags;
+ 
+ 	if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
+@@ -1254,27 +1264,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
+ 	}
+ 
+ 	spin_lock_irqsave(&dwc->lock, flags);
+-
+-	/* Assert channel is idle */
+-	if (dma_readl(dw, CH_EN) & dwc->mask) {
+-		dev_err(chan2dev(&dwc->chan),
+-			"%s: BUG: Attempted to start non-idle channel\n",
+-			__func__);
+-		dwc_dump_chan_regs(dwc);
+-		spin_unlock_irqrestore(&dwc->lock, flags);
+-		return -EBUSY;
+-	}
+-
+-	dma_writel(dw, CLEAR.ERROR, dwc->mask);
+-	dma_writel(dw, CLEAR.XFER, dwc->mask);
+-
+-	/* Setup DMAC channel registers */
+-	channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
+-	channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
+-	channel_writel(dwc, CTL_HI, 0);
+-
+-	channel_set_bit(dw, CH_EN, dwc->mask);
+-
++	dwc_dostart(dwc, dwc->cdesc->desc[0]);
+ 	spin_unlock_irqrestore(&dwc->lock, flags);
+ 
+ 	return 0;
+@@ -1479,6 +1469,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
+ 
+ 	dwc_chan_disable(dw, dwc);
+ 
++	dma_writel(dw, CLEAR.BLOCK, dwc->mask);
+ 	dma_writel(dw, CLEAR.ERROR, dwc->mask);
+ 	dma_writel(dw, CLEAR.XFER, dwc->mask);
+ 
+@@ -1569,9 +1560,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ 	/* Force dma off, just in case */
+ 	dw_dma_off(dw);
+ 
+-	/* Disable BLOCK interrupts as well */
+-	channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+-
+ 	/* Create a pool of consistent memory blocks for hardware descriptors */
+ 	dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
+ 					 sizeof(struct dw_desc), 4, 0);
+diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
+index 592af5f0cf39..53587377e672 100644
+--- a/drivers/edac/edac_device.c
++++ b/drivers/edac/edac_device.c
+@@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
+  */
+ void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
+ {
+-	int status;
+-
+ 	if (!edac_dev->edac_check)
+ 		return;
+ 
+-	status = cancel_delayed_work(&edac_dev->work);
+-	if (status == 0) {
+-		/* workq instance might be running, wait for it */
+-		flush_workqueue(edac_workqueue);
+-	}
++	edac_dev->op_state = OP_OFFLINE;
++
++	cancel_delayed_work_sync(&edac_dev->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index af3be1914dbb..63ceb2d98565 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -581,18 +581,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+  */
+ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
+ {
+-	int status;
+-
+-	if (mci->op_state != OP_RUNNING_POLL)
+-		return;
+-
+-	status = cancel_delayed_work(&mci->work);
+-	if (status == 0) {
+-		edac_dbg(0, "not canceled, flush the queue\n");
++	mci->op_state = OP_OFFLINE;
+ 
+-		/* workq instance might be running, wait for it */
+-		flush_workqueue(edac_workqueue);
+-	}
++	cancel_delayed_work_sync(&mci->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
+index 112d63ad1154..67dc90365389 100644
+--- a/drivers/edac/edac_mc_sysfs.c
++++ b/drivers/edac/edac_mc_sysfs.c
+@@ -977,21 +977,26 @@ nomem:
+ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
+ 				 const struct attribute_group **groups)
+ {
++	char *name;
+ 	int i, err;
+ 
+ 	/*
+ 	 * The memory controller needs its own bus, in order to avoid
+ 	 * namespace conflicts at /sys/bus/edac.
+ 	 */
+-	mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
+-	if (!mci->bus->name)
++	name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
++	if (!name)
+ 		return -ENOMEM;
+ 
++	mci->bus->name = name;
++
+ 	edac_dbg(0, "creating bus %s\n", mci->bus->name);
+ 
+ 	err = bus_register(mci->bus);
+-	if (err < 0)
+-		goto fail_free_name;
++	if (err < 0) {
++		kfree(name);
++		return err;
++	}
+ 
+ 	/* get the /sys/devices/system/edac subsys reference */
+ 	mci->dev.type = &mci_attr_type;
+@@ -1060,8 +1065,8 @@ fail_unregister_dimm:
+ 	device_unregister(&mci->dev);
+ fail_unregister_bus:
+ 	bus_unregister(mci->bus);
+-fail_free_name:
+-	kfree(mci->bus->name);
++	kfree(name);
++
+ 	return err;
+ }
+ 
+@@ -1092,10 +1097,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
+ 
+ void edac_unregister_sysfs(struct mem_ctl_info *mci)
+ {
++	const char *name = mci->bus->name;
++
+ 	edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
+ 	device_unregister(&mci->dev);
+ 	bus_unregister(mci->bus);
+-	kfree(mci->bus->name);
++	kfree(name);
+ }
+ 
+ static void mc_attr_release(struct device *dev)
+diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
+index 2cf44b4db80c..b4b38603b804 100644
+--- a/drivers/edac/edac_pci.c
++++ b/drivers/edac/edac_pci.c
+@@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
+  */
+ static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
+ {
+-	int status;
+-
+ 	edac_dbg(0, "\n");
+ 
+-	status = cancel_delayed_work(&pci->work);
+-	if (status == 0)
+-		flush_workqueue(edac_workqueue);
++	pci->op_state = OP_OFFLINE;
++
++	cancel_delayed_work_sync(&pci->work);
++	flush_workqueue(edac_workqueue);
+ }
+ 
+ /*
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 109e776345d3..0ec9ad50ba7c 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -861,28 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
+ {
+ 	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+ 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
++
+ 	if (!port->input) {
+ 		port->vcpi.num_slots = 0;
+ 
+ 		kfree(port->cached_edid);
+ 
+-		/* we can't destroy the connector here, as
+-		   we might be holding the mode_config.mutex
+-		   from an EDID retrieval */
++		/*
++		 * The only time we don't have a connector
++		 * on an output port is if the connector init
++		 * fails.
++		 */
+ 		if (port->connector) {
++			/* we can't destroy the connector here, as
++			 * we might be holding the mode_config.mutex
++			 * from an EDID retrieval */
++
+ 			mutex_lock(&mgr->destroy_connector_lock);
+-			list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
++			list_add(&port->next, &mgr->destroy_connector_list);
+ 			mutex_unlock(&mgr->destroy_connector_lock);
+ 			schedule_work(&mgr->destroy_connector_work);
++			return;
+ 		}
++		/* no need to clean up vcpi
++		 * as if we have no connector we never setup a vcpi */
+ 		drm_dp_port_teardown_pdt(port, port->pdt);
+-
+-		if (!port->input && port->vcpi.vcpi > 0)
+-			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+ 	}
+ 	kfree(port);
+-
+-	(*mgr->cbs->hotplug)(mgr);
+ }
+ 
+ static void drm_dp_put_port(struct drm_dp_mst_port *port)
+@@ -968,17 +973,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
+ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
+ 				 u8 *rad)
+ {
+-	int lct = port->parent->lct;
++	int parent_lct = port->parent->lct;
+ 	int shift = 4;
+-	int idx = lct / 2;
+-	if (lct > 1) {
+-		memcpy(rad, port->parent->rad, idx);
+-		shift = (lct % 2) ? 4 : 0;
++	int idx = (parent_lct - 1) / 2;
++	if (parent_lct > 1) {
++		memcpy(rad, port->parent->rad, idx + 1);
++		shift = (parent_lct % 2) ? 4 : 0;
+ 	} else
+ 		rad[0] = 0;
+ 
+ 	rad[idx] |= port->port_num << shift;
+-	return lct + 1;
++	return parent_lct + 1;
+ }
+ 
+ /*
+@@ -1034,7 +1039,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
+ 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
+ 	for (i = 0; i < (mstb->lct - 1); i++) {
+ 		int shift = (i % 2) ? 0 : 4;
+-		int port_num = mstb->rad[i / 2] >> shift;
++		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
+ 		snprintf(temp, sizeof(temp), "-%d", port_num);
+ 		strlcat(proppath, temp, proppath_size);
+ 	}
+@@ -1112,12 +1117,21 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
+ 		char proppath[255];
+ 		build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
+ 		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+-
++		if (!port->connector) {
++			/* remove it from the port list */
++			mutex_lock(&mstb->mgr->lock);
++			list_del(&port->next);
++			mutex_unlock(&mstb->mgr->lock);
++			/* drop port list reference */
++			drm_dp_put_port(port);
++			goto out;
++		}
+ 		if (port->port_num >= 8) {
+ 			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ 		}
+ 	}
+ 
++out:
+ 	/* put reference to this port */
+ 	drm_dp_put_port(port);
+ }
+@@ -1175,7 +1189,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
+ 
+ 	for (i = 0; i < lct - 1; i++) {
+ 		int shift = (i % 2) ? 0 : 4;
+-		int port_num = rad[i / 2] >> shift;
++		int port_num = (rad[i / 2] >> shift) & 0xf;
+ 
+ 		list_for_each_entry(port, &mstb->ports, next) {
+ 			if (port->port_num == port_num) {
+@@ -1195,6 +1209,50 @@ out:
+ 	return mstb;
+ }
+ 
++static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
++	struct drm_dp_mst_branch *mstb,
++	uint8_t *guid)
++{
++	struct drm_dp_mst_branch *found_mstb;
++	struct drm_dp_mst_port *port;
++
++	list_for_each_entry(port, &mstb->ports, next) {
++		if (!port->mstb)
++			continue;
++
++		if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
++			return port->mstb;
++
++		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
++
++		if (found_mstb)
++			return found_mstb;
++	}
++
++	return NULL;
++}
++
++static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
++	struct drm_dp_mst_topology_mgr *mgr,
++	uint8_t *guid)
++{
++	struct drm_dp_mst_branch *mstb;
++
++	/* find the port by iterating down */
++	mutex_lock(&mgr->lock);
++
++	if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
++		mstb = mgr->mst_primary;
++	else
++		mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
++
++	if (mstb)
++		kref_get(&mstb->kref);
++
++	mutex_unlock(&mgr->lock);
++	return mstb;
++}
++
+ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ 					       struct drm_dp_mst_branch *mstb)
+ {
+@@ -1306,6 +1364,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ 				  struct drm_dp_sideband_msg_tx *txmsg)
+ {
+ 	struct drm_dp_mst_branch *mstb = txmsg->dst;
++	u8 req_type;
+ 
+ 	/* both msg slots are full */
+ 	if (txmsg->seqno == -1) {
+@@ -1322,7 +1381,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
+ 			txmsg->seqno = 1;
+ 		mstb->tx_slots[txmsg->seqno] = txmsg;
+ 	}
+-	hdr->broadcast = 0;
++
++	req_type = txmsg->msg[0] & 0x7f;
++	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
++		req_type == DP_RESOURCE_STATUS_NOTIFY)
++		hdr->broadcast = 1;
++	else
++		hdr->broadcast = 0;
+ 	hdr->path_msg = txmsg->path_msg;
+ 	hdr->lct = mstb->lct;
+ 	hdr->lcr = mstb->lct - 1;
+@@ -1424,26 +1489,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
+ }
+ 
+ /* called holding qlock */
+-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
++static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
++				       struct drm_dp_sideband_msg_tx *txmsg)
+ {
+-	struct drm_dp_sideband_msg_tx *txmsg;
+ 	int ret;
+ 
+ 	/* construct a chunk from the first msg in the tx_msg queue */
+-	if (list_empty(&mgr->tx_msg_upq)) {
+-		mgr->tx_up_in_progress = false;
+-		return;
+-	}
+-
+-	txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
+ 	ret = process_single_tx_qlock(mgr, txmsg, true);
+-	if (ret == 1) {
+-		/* up txmsgs aren't put in slots - so free after we send it */
+-		list_del(&txmsg->next);
+-		kfree(txmsg);
+-	} else if (ret)
++
++	if (ret != 1)
+ 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+-	mgr->tx_up_in_progress = true;
++
++	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
+ }
+ 
+ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
+@@ -1828,11 +1885,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
+ 	drm_dp_encode_up_ack_reply(txmsg, req_type);
+ 
+ 	mutex_lock(&mgr->qlock);
+-	list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
+-	if (!mgr->tx_up_in_progress) {
+-		process_single_up_tx_qlock(mgr);
+-	}
++
++	process_single_up_tx_qlock(mgr, txmsg);
++
+ 	mutex_unlock(&mgr->qlock);
++
++	kfree(txmsg);
+ 	return 0;
+ }
+ 
+@@ -2129,28 +2187,50 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
+ 
+ 	if (mgr->up_req_recv.have_eomt) {
+ 		struct drm_dp_sideband_msg_req_body msg;
+-		struct drm_dp_mst_branch *mstb;
++		struct drm_dp_mst_branch *mstb = NULL;
+ 		bool seqno;
+-		mstb = drm_dp_get_mst_branch_device(mgr,
+-						    mgr->up_req_recv.initial_hdr.lct,
+-						    mgr->up_req_recv.initial_hdr.rad);
+-		if (!mstb) {
+-			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
+-			memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+-			return 0;
++
++		if (!mgr->up_req_recv.initial_hdr.broadcast) {
++			mstb = drm_dp_get_mst_branch_device(mgr,
++							    mgr->up_req_recv.initial_hdr.lct,
++							    mgr->up_req_recv.initial_hdr.rad);
++			if (!mstb) {
++				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++				return 0;
++			}
+ 		}
+ 
+ 		seqno = mgr->up_req_recv.initial_hdr.seqno;
+ 		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
+ 
+ 		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
+-			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
++			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
++
++			if (!mstb)
++				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
++
++			if (!mstb) {
++				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++				return 0;
++			}
++
+ 			drm_dp_update_port(mstb, &msg.u.conn_stat);
+ 			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
+ 			(*mgr->cbs->hotplug)(mgr);
+ 
+ 		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
+-			drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
++			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
++			if (!mstb)
++				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
++
++			if (!mstb) {
++				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
++				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
++				return 0;
++			}
++
+ 			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
+ 		}
+ 
+@@ -2330,6 +2410,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
+ 		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+ 		if (pbn == port->vcpi.pbn) {
+ 			*slots = port->vcpi.num_slots;
++			drm_dp_put_port(port);
+ 			return true;
+ 		}
+ 	}
+@@ -2489,32 +2570,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
+  */
+ int drm_dp_calc_pbn_mode(int clock, int bpp)
+ {
+-	fixed20_12 pix_bw;
+-	fixed20_12 fbpp;
+-	fixed20_12 result;
+-	fixed20_12 margin, tmp;
+-	u32 res;
+-
+-	pix_bw.full = dfixed_const(clock);
+-	fbpp.full = dfixed_const(bpp);
+-	tmp.full = dfixed_const(8);
+-	fbpp.full = dfixed_div(fbpp, tmp);
+-
+-	result.full = dfixed_mul(pix_bw, fbpp);
+-	margin.full = dfixed_const(54);
+-	tmp.full = dfixed_const(64);
+-	margin.full = dfixed_div(margin, tmp);
+-	result.full = dfixed_div(result, margin);
+-
+-	margin.full = dfixed_const(1006);
+-	tmp.full = dfixed_const(1000);
+-	margin.full = dfixed_div(margin, tmp);
+-	result.full = dfixed_mul(result, margin);
+-
+-	result.full = dfixed_div(result, tmp);
+-	result.full = dfixed_ceil(result);
+-	res = dfixed_trunc(result);
+-	return res;
++	u64 kbps;
++	s64 peak_kbps;
++	u32 numerator;
++	u32 denominator;
++
++	kbps = clock * bpp;
++
++	/*
++	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
++	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
++	 * common multiplier to render an integer PBN for all link rate/lane
++	 * counts combinations
++	 * calculate
++	 * peak_kbps *= (1006/1000)
++	 * peak_kbps *= (64/54)
++	 * peak_kbps *= 8    convert to bytes
++	 */
++
++	numerator = 64 * 1006;
++	denominator = 54 * 8 * 1000 * 1000;
++
++	kbps *= numerator;
++	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
++
++	return drm_fixp2int_ceil(peak_kbps);
+ }
+ EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
+ 
+@@ -2522,11 +2602,23 @@ static int test_calc_pbn_mode(void)
+ {
+ 	int ret;
+ 	ret = drm_dp_calc_pbn_mode(154000, 30);
+-	if (ret != 689)
++	if (ret != 689) {
++		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++				154000, 30, 689, ret);
+ 		return -EINVAL;
++	}
+ 	ret = drm_dp_calc_pbn_mode(234000, 30);
+-	if (ret != 1047)
++	if (ret != 1047) {
++		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++				234000, 30, 1047, ret);
+ 		return -EINVAL;
++	}
++	ret = drm_dp_calc_pbn_mode(297000, 24);
++	if (ret != 1063) {
++		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
++				297000, 24, 1063, ret);
++		return -EINVAL;
++	}
+ 	return 0;
+ }
+ 
+@@ -2660,8 +2752,8 @@ static void drm_dp_tx_work(struct work_struct *work)
+ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ {
+ 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
+-	struct drm_connector *connector;
+-
++	struct drm_dp_mst_port *port;
++	bool send_hotplug = false;
+ 	/*
+ 	 * Not a regular list traverse as we have to drop the destroy
+ 	 * connector lock before destroying the connector, to avoid AB->BA
+@@ -2669,16 +2761,25 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
+ 	 */
+ 	for (;;) {
+ 		mutex_lock(&mgr->destroy_connector_lock);
+-		connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
+-		if (!connector) {
++		port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
++		if (!port) {
+ 			mutex_unlock(&mgr->destroy_connector_lock);
+ 			break;
+ 		}
+-		list_del(&connector->destroy_list);
++		list_del(&port->next);
+ 		mutex_unlock(&mgr->destroy_connector_lock);
+ 
+-		mgr->cbs->destroy_connector(mgr, connector);
++		mgr->cbs->destroy_connector(mgr, port->connector);
++
++		drm_dp_port_teardown_pdt(port, port->pdt);
++
++		if (!port->input && port->vcpi.vcpi > 0)
++			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
++		kfree(port);
++		send_hotplug = true;
+ 	}
++	if (send_hotplug)
++		(*mgr->cbs->hotplug)(mgr);
+ }
+ 
+ /**
+@@ -2701,7 +2802,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ 	mutex_init(&mgr->qlock);
+ 	mutex_init(&mgr->payload_lock);
+ 	mutex_init(&mgr->destroy_connector_lock);
+-	INIT_LIST_HEAD(&mgr->tx_msg_upq);
+ 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
+ 	INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
+diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
+index 63503879a676..0d75e75b1da3 100644
+--- a/drivers/gpu/drm/drm_probe_helper.c
++++ b/drivers/gpu/drm/drm_probe_helper.c
+@@ -195,7 +195,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
+ 		mode_flags |= DRM_MODE_FLAG_3D_MASK;
+ 
+ 	list_for_each_entry(mode, &connector->modes, head) {
+-		mode->status = drm_mode_validate_basic(mode);
++		if (mode->status == MODE_OK)
++			mode->status = drm_mode_validate_basic(mode);
+ 
+ 		if (mode->status == MODE_OK)
+ 			mode->status = drm_mode_validate_size(mode, maxX, maxY);
+diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
+index f3e84c44d009..4decf518d106 100644
+--- a/drivers/gpu/drm/i915/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/i915_gem_context.c
+@@ -317,6 +317,10 @@ void i915_gem_context_reset(struct drm_device *dev)
+ 			i915_gem_context_unreference(lctx);
+ 			ring->last_context = NULL;
+ 		}
++
++		/* Force the GPU state to be reinitialised on enabling */
++		if (ring->default_context)
++			ring->default_context->legacy_hw_ctx.initialized = false;
+ 	}
+ }
+ 
+@@ -704,7 +708,7 @@ static int do_switch(struct intel_engine_cs *ring,
+ 			goto unpin_out;
+ 	}
+ 
+-	if (!to->legacy_hw_ctx.initialized) {
++	if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
+ 		hw_flags |= MI_RESTORE_INHIBIT;
+ 		/* NB: If we inhibit the restore, the context is not allowed to
+ 		 * die because future work may end up depending on valid address
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 7b27a114b030..b103773df2a3 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -10391,11 +10391,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
+ 		pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
+ 	}
+ 
+-	/* Clamp bpp to 8 on screens without EDID 1.4 */
+-	if (connector->base.display_info.bpc == 0 && bpp > 24) {
+-		DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
+-			      bpp);
+-		pipe_config->pipe_bpp = 24;
++	/* Clamp bpp to default limit on screens without EDID 1.4 */
++	if (connector->base.display_info.bpc == 0) {
++		int type = connector->base.connector_type;
++		int clamp_bpp = 24;
++
++		/* Fall back to 18 bpp when DP sink capability is unknown. */
++		if (type == DRM_MODE_CONNECTOR_DisplayPort ||
++		    type == DRM_MODE_CONNECTOR_eDP)
++			clamp_bpp = 18;
++
++		if (bpp > clamp_bpp) {
++			DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
++				      bpp, clamp_bpp);
++			pipe_config->pipe_bpp = clamp_bpp;
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
+index 5cb47482d29f..88c557551b89 100644
+--- a/drivers/gpu/drm/i915/intel_dp_mst.c
++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
+@@ -439,9 +439,9 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
+ 
+ 	drm_mode_connector_set_path_property(connector, pathprop);
+ 	drm_reinit_primary_mode_group(dev);
+-	mutex_lock(&dev->mode_config.mutex);
++	drm_modeset_lock_all(dev);
+ 	intel_connector_add_to_fbdev(intel_connector);
+-	mutex_unlock(&dev->mode_config.mutex);
++	drm_modeset_unlock_all(dev);
+ 	drm_connector_register(&intel_connector->base);
+ 	return connector;
+ }
+@@ -452,16 +452,16 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 	struct intel_connector *intel_connector = to_intel_connector(connector);
+ 	struct drm_device *dev = connector->dev;
+ 	/* need to nuke the connector */
+-	mutex_lock(&dev->mode_config.mutex);
++	drm_modeset_lock_all(dev);
+ 	intel_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+-	mutex_unlock(&dev->mode_config.mutex);
++	drm_modeset_unlock_all(dev);
+ 
+ 	intel_connector->unregister(intel_connector);
+ 
+-	mutex_lock(&dev->mode_config.mutex);
++	drm_modeset_lock_all(dev);
+ 	intel_connector_remove_from_fbdev(intel_connector);
+ 	drm_connector_cleanup(connector);
+-	mutex_unlock(&dev->mode_config.mutex);
++	drm_modeset_unlock_all(dev);
+ 
+ 	drm_reinit_primary_mode_group(dev);
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 3162040bc314..05490ef5a2aa 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
+ 
+ 		NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
+ 
++		mutex_lock(&drm->dev->mode_config.mutex);
+ 		if (plugged)
+ 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ 		else
+ 			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++		mutex_unlock(&drm->dev->mode_config.mutex);
++
+ 		drm_helper_hpd_irq_event(connector->dev);
+ 	}
+ 
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 5be50ef2b30e..bb292143997e 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -2310,8 +2310,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+ 	encoder_mode = atombios_get_encoder_mode(encoder);
+ 	if (connector && (radeon_audio != 0) &&
+ 	    ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
+-	     (ENCODER_MODE_IS_DP(encoder_mode) &&
+-	      drm_detect_monitor_audio(radeon_connector_edid(connector)))))
++	     ENCODER_MODE_IS_DP(encoder_mode)))
+ 		radeon_audio_mode_set(encoder, adjusted_mode);
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
+index 44480c1b9738..848b1ffd5cc4 100644
+--- a/drivers/gpu/drm/radeon/dce6_afmt.c
++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
+@@ -282,6 +282,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
+ 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ 	 */
+ 	if (ASIC_IS_DCE8(rdev)) {
++		unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
++			DENTIST_DPREFCLK_WDIVIDER_MASK) >>
++			DENTIST_DPREFCLK_WDIVIDER_SHIFT;
++		div = radeon_audio_decode_dfs_div(div);
++
++		if (div)
++			clock = clock * 100 / div;
++
+ 		WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
+ 		WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
+ 	} else {
+diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+index 9953356fe263..3cf04a2f44bb 100644
+--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
++++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
+@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
+ 	 * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
+ 	 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ 	 */
++	if (ASIC_IS_DCE41(rdev)) {
++		unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
++			DENTIST_DPREFCLK_WDIVIDER_MASK) >>
++			DENTIST_DPREFCLK_WDIVIDER_SHIFT;
++		div = radeon_audio_decode_dfs_div(div);
++
++		if (div)
++			clock = 100 * clock / div;
++	}
++
+ 	WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
+ 	WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
+ }
+diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
+index 4aa5f755572b..13b6029d65cc 100644
+--- a/drivers/gpu/drm/radeon/evergreend.h
++++ b/drivers/gpu/drm/radeon/evergreend.h
+@@ -511,6 +511,11 @@
+ #define DCCG_AUDIO_DTO1_CNTL              0x05cc
+ #       define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
+ 
++#define DCE41_DENTIST_DISPCLK_CNTL			0x049c
++#       define DENTIST_DPREFCLK_WDIVIDER(x)		(((x) & 0x7f) << 24)
++#       define DENTIST_DPREFCLK_WDIVIDER_MASK		(0x7f << 24)
++#       define DENTIST_DPREFCLK_WDIVIDER_SHIFT		24
++
+ /* DCE 4.0 AFMT */
+ #define HDMI_CONTROL                         0x7030
+ #       define HDMI_KEEPOUT_MODE             (1 << 0)
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 91c3f60f8bac..4bca29c5abfa 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -268,6 +268,7 @@ struct radeon_clock {
+ 	uint32_t current_dispclk;
+ 	uint32_t dp_extclk;
+ 	uint32_t max_pixel_clock;
++	uint32_t vco_freq;
+ };
+ 
+ /*
+diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
+index 8f285244c839..de9a2ffcf5f7 100644
+--- a/drivers/gpu/drm/radeon/radeon_atombios.c
++++ b/drivers/gpu/drm/radeon/radeon_atombios.c
+@@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 	}
+ 
+ 	/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
+-	if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
++	if (((dev->pdev->device == 0x9802) ||
++	     (dev->pdev->device == 0x9805) ||
++	     (dev->pdev->device == 0x9806)) &&
+ 	    (dev->pdev->subsystem_vendor == 0x1734) &&
+ 	    (dev->pdev->subsystem_device == 0x11bd)) {
+ 		if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
+@@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
+ 		}
+ 	}
+ 
+-	/* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
+-	if ((dev->pdev->device == 0x9805) &&
+-	    (dev->pdev->subsystem_vendor == 0x1734) &&
+-	    (dev->pdev->subsystem_device == 0x11bd)) {
+-		if (*connector_type == DRM_MODE_CONNECTOR_VGA)
+-			return false;
+-	}
+-
+ 	return true;
+ }
+ 
+@@ -1112,6 +1106,31 @@ union firmware_info {
+ 	ATOM_FIRMWARE_INFO_V2_2 info_22;
+ };
+ 
++union igp_info {
++	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
++	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
++	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
++	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
++	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
++};
++
++static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
++{
++	struct radeon_mode_info *mode_info = &rdev->mode_info;
++	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
++	union igp_info *igp_info;
++	u8 frev, crev;
++	u16 data_offset;
++
++	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
++			&frev, &crev, &data_offset)) {
++		igp_info = (union igp_info *)(mode_info->atom_context->bios +
++			data_offset);
++		rdev->clock.vco_freq =
++			le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
++	}
++}
++
+ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ {
+ 	struct radeon_device *rdev = dev->dev_private;
+@@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
+ 		rdev->mode_info.firmware_flags =
+ 			le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
+ 
++		if (ASIC_IS_DCE8(rdev))
++			rdev->clock.vco_freq =
++				le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
++		else if (ASIC_IS_DCE5(rdev))
++			rdev->clock.vco_freq = rdev->clock.current_dispclk;
++		else if (ASIC_IS_DCE41(rdev))
++			radeon_atombios_get_dentist_vco_freq(rdev);
++		else
++			rdev->clock.vco_freq = rdev->clock.current_dispclk;
++
++		if (rdev->clock.vco_freq == 0)
++			rdev->clock.vco_freq = 360000;	/* 3.6 GHz */
++
+ 		return true;
+ 	}
+ 
+ 	return false;
+ }
+ 
+-union igp_info {
+-	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+-	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+-};
+-
+ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+ {
+ 	struct radeon_mode_info *mode_info = &rdev->mode_info;
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index d77dd1430d58..b214663b370d 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -698,26 +698,37 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
+ {
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
++	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ 
+ 	if (!dig || !dig->afmt)
+ 		return;
+ 
+-	radeon_audio_set_mute(encoder, true);
++	if (!connector)
++		return;
++
++	if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
++		radeon_audio_set_mute(encoder, true);
+ 
+-	radeon_audio_write_speaker_allocation(encoder);
+-	radeon_audio_write_sad_regs(encoder);
+-	radeon_audio_write_latency_fields(encoder, mode);
+-	radeon_audio_set_dto(encoder, mode->clock);
+-	radeon_audio_set_vbi_packet(encoder);
+-	radeon_hdmi_set_color_depth(encoder);
+-	radeon_audio_update_acr(encoder, mode->clock);
+-	radeon_audio_set_audio_packet(encoder);
+-	radeon_audio_select_pin(encoder);
++		radeon_audio_write_speaker_allocation(encoder);
++		radeon_audio_write_sad_regs(encoder);
++		radeon_audio_write_latency_fields(encoder, mode);
++		radeon_audio_set_dto(encoder, mode->clock);
++		radeon_audio_set_vbi_packet(encoder);
++		radeon_hdmi_set_color_depth(encoder);
++		radeon_audio_update_acr(encoder, mode->clock);
++		radeon_audio_set_audio_packet(encoder);
++		radeon_audio_select_pin(encoder);
+ 
+-	if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+-		return;
++		if (radeon_audio_set_avi_packet(encoder, mode) < 0)
++			return;
+ 
+-	radeon_audio_set_mute(encoder, false);
++		radeon_audio_set_mute(encoder, false);
++	} else {
++		radeon_hdmi_set_color_depth(encoder);
++
++		if (radeon_audio_set_avi_packet(encoder, mode) < 0)
++			return;
++	}
+ }
+ 
+ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+@@ -728,28 +739,24 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ 	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+-	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+-	struct radeon_connector_atom_dig *dig_connector =
+-		radeon_connector->con_priv;
+ 
+-	if (!connector)
++	if (!dig || !dig->afmt)
+ 		return;
+ 
+-	if (!dig || !dig->afmt)
++	if (!connector)
+ 		return;
+ 
+-	radeon_audio_write_speaker_allocation(encoder);
+-	radeon_audio_write_sad_regs(encoder);
+-	radeon_audio_write_latency_fields(encoder, mode);
+-	if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
+-		radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
+-	else
+-		radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+-	radeon_audio_set_audio_packet(encoder);
+-	radeon_audio_select_pin(encoder);
++	if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
++		radeon_audio_write_speaker_allocation(encoder);
++		radeon_audio_write_sad_regs(encoder);
++		radeon_audio_write_latency_fields(encoder, mode);
++		radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
++		radeon_audio_set_audio_packet(encoder);
++		radeon_audio_select_pin(encoder);
+ 
+-	if (radeon_audio_set_avi_packet(encoder, mode) < 0)
+-		return;
++		if (radeon_audio_set_avi_packet(encoder, mode) < 0)
++			return;
++	}
+ }
+ 
+ void radeon_audio_mode_set(struct drm_encoder *encoder,
+@@ -768,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
+ 	if (radeon_encoder->audio && radeon_encoder->audio->dpms)
+ 		radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
+ }
++
++unsigned int radeon_audio_decode_dfs_div(unsigned int div)
++{
++	if (div >= 8 && div < 64)
++		return (div - 8) * 25 + 200;
++	else if (div >= 64 && div < 96)
++		return (div - 64) * 50 + 1600;
++	else if (div >= 96 && div < 128)
++		return (div - 96) * 100 + 3200;
++	else
++		return 0;
++}
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
+index 059cc3012062..5c70cceaa4a6 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.h
++++ b/drivers/gpu/drm/radeon/radeon_audio.h
+@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
+ void radeon_audio_mode_set(struct drm_encoder *encoder,
+ 	struct drm_display_mode *mode);
+ void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
++unsigned int radeon_audio_decode_dfs_div(unsigned int div);
+ 
+ #endif
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index 604c44d88e7a..ccab94ed9d94 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1734,6 +1734,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ 	}
+ 
+ 	drm_kms_helper_poll_enable(dev);
++	drm_helper_hpd_irq_event(dev);
+ 
+ 	/* set the power state here in case we are a PX system or headless */
+ 	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+index 42986130cc63..c9ff4cf4c4e7 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+@@ -287,9 +287,9 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
+ 	drm_mode_connector_set_path_property(connector, pathprop);
+ 	drm_reinit_primary_mode_group(dev);
+ 
+-	mutex_lock(&dev->mode_config.mutex);
++	drm_modeset_lock_all(dev);
+ 	radeon_fb_add_connector(rdev, connector);
+-	mutex_unlock(&dev->mode_config.mutex);
++	drm_modeset_unlock_all(dev);
+ 
+ 	drm_connector_register(connector);
+ 	return connector;
+@@ -304,12 +304,12 @@ static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
+ 
+ 	drm_connector_unregister(connector);
+ 	/* need to nuke the connector */
+-	mutex_lock(&dev->mode_config.mutex);
++	drm_modeset_lock_all(dev);
+ 	/* dpms off */
+ 	radeon_fb_remove_connector(rdev, connector);
+ 
+ 	drm_connector_cleanup(connector);
+-	mutex_unlock(&dev->mode_config.mutex);
++	drm_modeset_unlock_all(dev);
+ 	drm_reinit_primary_mode_group(dev);
+ 
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 676362769b8d..741065bd14b3 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -33,6 +33,7 @@
+ #include <linux/slab.h>
+ #include <drm/drmP.h>
+ #include <drm/radeon_drm.h>
++#include <drm/drm_cache.h>
+ #include "radeon.h"
+ #include "radeon_trace.h"
+ 
+@@ -225,7 +226,7 @@ int radeon_bo_create(struct radeon_device *rdev,
+ 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
+ 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+ 	 */
+-	bo->flags &= ~RADEON_GEM_GTT_WC;
++	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+ #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
+ 	/* Don't try to enable write-combining when it can't work, or things
+ 	 * may be slow
+@@ -237,7 +238,13 @@ int radeon_bo_create(struct radeon_device *rdev,
+ 
+ 	DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+ 		      "better performance thanks to write-combining\n");
+-	bo->flags &= ~RADEON_GEM_GTT_WC;
++	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
++#else
++	/* For architectures that don't support WC memory,
++	 * mask out the WC flag from the BO
++	 */
++	if (!drm_arch_can_wc_memory())
++		bo->flags &= ~RADEON_GEM_GTT_WC;
+ #endif
+ 
+ 	radeon_ttm_placement_from_domain(bo, domain);
+diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
+index 9c3377ca17b7..8ec4e4591756 100644
+--- a/drivers/gpu/drm/radeon/radeon_vm.c
++++ b/drivers/gpu/drm/radeon/radeon_vm.c
+@@ -456,15 +456,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 
+ 	if (soffset) {
+ 		/* make sure object fit at this offset */
+-		eoffset = soffset + size;
++		eoffset = soffset + size - 1;
+ 		if (soffset >= eoffset) {
+ 			r = -EINVAL;
+ 			goto error_unreserve;
+ 		}
+ 
+ 		last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
+-		if (last_pfn > rdev->vm_manager.max_pfn) {
+-			dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
++		if (last_pfn >= rdev->vm_manager.max_pfn) {
++			dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
+ 				last_pfn, rdev->vm_manager.max_pfn);
+ 			r = -EINVAL;
+ 			goto error_unreserve;
+@@ -479,7 +479,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 	eoffset /= RADEON_GPU_PAGE_SIZE;
+ 	if (soffset || eoffset) {
+ 		struct interval_tree_node *it;
+-		it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
++		it = interval_tree_iter_first(&vm->va, soffset, eoffset);
+ 		if (it && it != &bo_va->it) {
+ 			struct radeon_bo_va *tmp;
+ 			tmp = container_of(it, struct radeon_bo_va, it);
+@@ -522,7 +522,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
+ 
+ 	if (soffset || eoffset) {
+ 		bo_va->it.start = soffset;
+-		bo_va->it.last = eoffset - 1;
++		bo_va->it.last = eoffset;
+ 		interval_tree_insert(&bo_va->it, &vm->va);
+ 	}
+ 
+@@ -891,7 +891,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
+ 	unsigned i;
+ 
+ 	start >>= radeon_vm_block_size;
+-	end >>= radeon_vm_block_size;
++	end = (end - 1) >> radeon_vm_block_size;
+ 
+ 	for (i = start; i <= end; ++i)
+ 		radeon_bo_fence(vm->page_tables[i].bo, fence, true);
+diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
+index 3afac3013983..c126f6bfbed1 100644
+--- a/drivers/gpu/drm/radeon/sid.h
++++ b/drivers/gpu/drm/radeon/sid.h
+@@ -915,6 +915,11 @@
+ #define DCCG_AUDIO_DTO1_PHASE                           0x05c0
+ #define DCCG_AUDIO_DTO1_MODULE                          0x05c4
+ 
++#define DENTIST_DISPCLK_CNTL				0x0490
++#	define DENTIST_DPREFCLK_WDIVIDER(x)		(((x) & 0x7f) << 24)
++#	define DENTIST_DPREFCLK_WDIVIDER_MASK		(0x7f << 24)
++#	define DENTIST_DPREFCLK_WDIVIDER_SHIFT		24
++
+ #define AFMT_AUDIO_SRC_CONTROL                          0x713c
+ #define		AFMT_AUDIO_SRC_SELECT(x)		(((x) & 7) << 0)
+ /* AFMT_AUDIO_SRC_SELECT
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 15a8d7746fd2..2aa0e927d490 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -25,6 +25,7 @@
+  *
+  **************************************************************************/
+ #include <linux/module.h>
++#include <linux/console.h>
+ 
+ #include <drm/drmP.h>
+ #include "vmwgfx_drv.h"
+@@ -1447,6 +1448,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ static int __init vmwgfx_init(void)
+ {
+ 	int ret;
++
++#ifdef CONFIG_VGA_CONSOLE
++	if (vgacon_text_force())
++		return -EINVAL;
++#endif
++
+ 	ret = drm_pci_init(&driver, &vmw_pci_driver);
+ 	if (ret)
+ 		DRM_ERROR("Failed initializing DRM.\n");
+diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
+index 894531d315b8..046144fc5aff 100644
+--- a/drivers/hwtracing/coresight/coresight.c
++++ b/drivers/hwtracing/coresight/coresight.c
+@@ -543,7 +543,7 @@ static int coresight_name_match(struct device *dev, void *data)
+ 	to_match = data;
+ 	i_csdev = to_coresight_device(dev);
+ 
+-	if (!strcmp(to_match, dev_name(&i_csdev->dev)))
++	if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
+ 		return 1;
+ 
+ 	return 0;
+diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
+index 4fa88ba2963e..131994382b22 100644
+--- a/drivers/infiniband/hw/qib/qib_qp.c
++++ b/drivers/infiniband/hw/qib/qib_qp.c
+@@ -100,9 +100,10 @@ static u32 credit_table[31] = {
+ 	32768                   /* 1E */
+ };
+ 
+-static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
++static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
++			 gfp_t gfp)
+ {
+-	unsigned long page = get_zeroed_page(GFP_KERNEL);
++	unsigned long page = get_zeroed_page(gfp);
+ 
+ 	/*
+ 	 * Free the page if someone raced with us installing it.
+@@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
+  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
+  */
+ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+-		     enum ib_qp_type type, u8 port)
++		     enum ib_qp_type type, u8 port, gfp_t gfp)
+ {
+ 	u32 i, offset, max_scan, qpn;
+ 	struct qpn_map *map;
+@@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
+ 	max_scan = qpt->nmaps - !offset;
+ 	for (i = 0;;) {
+ 		if (unlikely(!map->page)) {
+-			get_map_page(qpt, map);
++			get_map_page(qpt, map, gfp);
+ 			if (unlikely(!map->page))
+ 				break;
+ 		}
+@@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ 	size_t sz;
+ 	size_t sg_list_sz;
+ 	struct ib_qp *ret;
++	gfp_t gfp;
++
+ 
+ 	if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
+ 	    init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
+-	    init_attr->create_flags) {
+-		ret = ERR_PTR(-EINVAL);
+-		goto bail;
+-	}
++	    init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
++		return ERR_PTR(-EINVAL);
++
++	/* GFP_NOIO is applicable in RC QPs only */
++	if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
++	    init_attr->qp_type != IB_QPT_RC)
++		return ERR_PTR(-EINVAL);
++
++	gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
++			GFP_NOIO : GFP_KERNEL;
+ 
+ 	/* Check receive queue parameters if no SRQ is specified. */
+ 	if (!init_attr->srq) {
+@@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ 		sz = sizeof(struct qib_sge) *
+ 			init_attr->cap.max_send_sge +
+ 			sizeof(struct qib_swqe);
+-		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
++		swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
++				gfp, PAGE_KERNEL);
+ 		if (swq == NULL) {
+ 			ret = ERR_PTR(-ENOMEM);
+ 			goto bail;
+@@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ 		} else if (init_attr->cap.max_recv_sge > 1)
+ 			sg_list_sz = sizeof(*qp->r_sg_list) *
+ 				(init_attr->cap.max_recv_sge - 1);
+-		qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
++		qp = kzalloc(sz + sg_list_sz, gfp);
+ 		if (!qp) {
+ 			ret = ERR_PTR(-ENOMEM);
+ 			goto bail_swq;
+ 		}
+ 		RCU_INIT_POINTER(qp->next, NULL);
+-		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
++		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
+ 		if (!qp->s_hdr) {
+ 			ret = ERR_PTR(-ENOMEM);
+ 			goto bail_qp;
+@@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ 			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
+ 			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
+ 				sizeof(struct qib_rwqe);
+-			qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
+-						   qp->r_rq.size * sz);
++			if (gfp != GFP_NOIO)
++				qp->r_rq.wq = vmalloc_user(
++						sizeof(struct qib_rwq) +
++						qp->r_rq.size * sz);
++			else
++				qp->r_rq.wq = __vmalloc(
++						sizeof(struct qib_rwq) +
++						qp->r_rq.size * sz,
++						gfp, PAGE_KERNEL);
++
+ 			if (!qp->r_rq.wq) {
+ 				ret = ERR_PTR(-ENOMEM);
+ 				goto bail_qp;
+@@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
+ 		dev = to_idev(ibpd->device);
+ 		dd = dd_from_dev(dev);
+ 		err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
+-				init_attr->port_num);
++				init_attr->port_num, gfp);
+ 		if (err < 0) {
+ 			ret = ERR_PTR(err);
+ 			vfree(qp->r_rq.wq);
+diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+index f8ea069a3eaf..b2fb5286dbd9 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
++++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
+@@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	struct qib_ibdev *dev = to_idev(ibqp->device);
+ 	struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
+ 	struct qib_mcast *mcast = NULL;
+-	struct qib_mcast_qp *p, *tmp;
++	struct qib_mcast_qp *p, *tmp, *delp = NULL;
+ 	struct rb_node *n;
+ 	int last = 0;
+ 	int ret;
+ 
+-	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
+-		ret = -EINVAL;
+-		goto bail;
+-	}
++	if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
++		return -EINVAL;
+ 
+ 	spin_lock_irq(&ibp->lock);
+ 
+@@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	while (1) {
+ 		if (n == NULL) {
+ 			spin_unlock_irq(&ibp->lock);
+-			ret = -EINVAL;
+-			goto bail;
++			return -EINVAL;
+ 		}
+ 
+ 		mcast = rb_entry(n, struct qib_mcast, rb_node);
+@@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 		 */
+ 		list_del_rcu(&p->list);
+ 		mcast->n_attached--;
++		delp = p;
+ 
+ 		/* If this was the last attached QP, remove the GID too. */
+ 		if (list_empty(&mcast->qp_list)) {
+@@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 	}
+ 
+ 	spin_unlock_irq(&ibp->lock);
++	/* QP not attached */
++	if (!delp)
++		return -EINVAL;
++	/*
++	 * Wait for any list walkers to finish before freeing the
++	 * list element.
++	 */
++	wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
++	qib_mcast_qp_free(delp);
+ 
+-	if (p) {
+-		/*
+-		 * Wait for any list walkers to finish before freeing the
+-		 * list element.
+-		 */
+-		wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
+-		qib_mcast_qp_free(p);
+-	}
+ 	if (last) {
+ 		atomic_dec(&mcast->refcount);
+ 		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
+@@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+ 		dev->n_mcast_grps_allocated--;
+ 		spin_unlock_irq(&dev->n_mcast_grps_lock);
+ 	}
+-
+-	ret = 0;
+-
+-bail:
+-	return ret;
++	return 0;
+ }
+ 
+ int qib_mcast_tree_empty(struct qib_ibport *ibp)
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index ce3d40004458..0f5b400706d7 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1214,7 +1214,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
+ 			input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2,
+ 					     ETP_WMAX_V2, 0, 0);
+ 		}
+-		input_mt_init_slots(dev, 2, 0);
++		input_mt_init_slots(dev, 2, INPUT_MT_SEMI_MT);
+ 		input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
+ 		input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
+ 		break;
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index c11556563ef0..68f5f4a0f1e7 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -258,6 +258,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
+ 		},
+ 	},
+ 	{
++		/* Fujitsu Lifebook U745 */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
++			DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U745"),
++		},
++	},
++	{
+ 		/* Fujitsu T70H */
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index e29d5d7fe220..937832cfa48e 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -341,17 +341,18 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
+ 	arm_lpae_iopte *start, *end;
+ 	unsigned long table_size;
+ 
+-	/* Only leaf entries at the last level */
+-	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
+-		return;
+-
+ 	if (lvl == ARM_LPAE_START_LVL(data))
+ 		table_size = data->pgd_size;
+ 	else
+ 		table_size = 1UL << data->pg_shift;
+ 
+ 	start = ptep;
+-	end = (void *)ptep + table_size;
++
++	/* Only leaf entries at the last level */
++	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
++		end = ptep;
++	else
++		end = (void *)ptep + table_size;
+ 
+ 	while (ptep != end) {
+ 		arm_lpae_iopte pte = *ptep++;
+diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
+index 63cd031b2c28..869d01dd4063 100644
+--- a/drivers/irqchip/irq-atmel-aic-common.c
++++ b/drivers/irqchip/irq-atmel-aic-common.c
+@@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
+ 	    priority > AT91_AIC_IRQ_MAX_PRIORITY)
+ 		return -EINVAL;
+ 
+-	*val &= AT91_AIC_PRIOR;
++	*val &= ~AT91_AIC_PRIOR;
+ 	*val |= priority;
+ 
+ 	return 0;
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 00cde40db572..43829d9493f7 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
+ 	do {
+ 		ret = btree_root(gc_root, c, &op, &writes, &stats);
+ 		closure_sync(&writes);
++		cond_resched();
+ 
+ 		if (ret && ret != -EAGAIN)
+ 			pr_warn("gc failed!");
+@@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
+ 		rw_lock(true, b, b->level);
+ 
+ 		if (b->key.ptr[0] != btree_ptr ||
+-		    b->seq != seq + 1)
++                   b->seq != seq + 1) {
++                       op->lock = b->level;
+ 			goto out;
++               }
+ 	}
+ 
+ 	SET_KEY_PTRS(check_key, 1);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 4dd2bb7167f0..42522c8f13c6 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -708,6 +708,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
+ 	WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
+ 	     sysfs_create_link(&c->kobj, &d->kobj, d->name),
+ 	     "Couldn't create device <-> cache set symlinks");
++
++	clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
+ }
+ 
+ static void bcache_device_detach(struct bcache_device *d)
+@@ -878,8 +880,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
+ 	buf[SB_LABEL_SIZE] = '\0';
+ 	env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
+ 
+-	if (atomic_xchg(&dc->running, 1))
++	if (atomic_xchg(&dc->running, 1)) {
++		kfree(env[1]);
++		kfree(env[2]);
+ 		return;
++	}
+ 
+ 	if (!d->c &&
+ 	    BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
+@@ -1967,6 +1972,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ 			else
+ 				err = "device busy";
+ 			mutex_unlock(&bch_register_lock);
++			if (attr == &ksysfs_register_quiet)
++				goto out;
+ 		}
+ 		goto err;
+ 	}
+@@ -2005,8 +2012,7 @@ out:
+ err_close:
+ 	blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ err:
+-	if (attr != &ksysfs_register_quiet)
+-		pr_info("error opening %s: %s", path, err);
++	pr_info("error opening %s: %s", path, err);
+ 	ret = -EINVAL;
+ 	goto out;
+ }
+@@ -2100,8 +2106,10 @@ static int __init bcache_init(void)
+ 	closure_debug_init();
+ 
+ 	bcache_major = register_blkdev(0, "bcache");
+-	if (bcache_major < 0)
++	if (bcache_major < 0) {
++		unregister_reboot_notifier(&reboot);
+ 		return bcache_major;
++	}
+ 
+ 	if (!(bcache_wq = create_workqueue("bcache")) ||
+ 	    !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index f1986bcd1bf0..540256a0df4f 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
+ 
+ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
+ {
++	struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
++
++	BUG_ON(KEY_INODE(k) != dc->disk.id);
++
+ 	return KEY_DIRTY(k);
+ }
+ 
+@@ -372,11 +376,24 @@ next:
+ 	}
+ }
+ 
++/*
++ * Returns true if we scanned the entire disk
++ */
+ static bool refill_dirty(struct cached_dev *dc)
+ {
+ 	struct keybuf *buf = &dc->writeback_keys;
++	struct bkey start = KEY(dc->disk.id, 0, 0);
+ 	struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
+-	bool searched_from_start = false;
++	struct bkey start_pos;
++
++	/*
++	 * make sure keybuf pos is inside the range for this disk - at bringup
++	 * we might not be attached yet so this disk's inode nr isn't
++	 * initialized then
++	 */
++	if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
++	    bkey_cmp(&buf->last_scanned, &end) > 0)
++		buf->last_scanned = start;
+ 
+ 	if (dc->partial_stripes_expensive) {
+ 		refill_full_stripes(dc);
+@@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
+ 			return false;
+ 	}
+ 
+-	if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
+-		buf->last_scanned = KEY(dc->disk.id, 0, 0);
+-		searched_from_start = true;
+-	}
+-
++	start_pos = buf->last_scanned;
+ 	bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
+ 
+-	return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
++	if (bkey_cmp(&buf->last_scanned, &end) < 0)
++		return false;
++
++	/*
++	 * If we get to the end start scanning again from the beginning, and
++	 * only scan up to where we initially started scanning from:
++	 */
++	buf->last_scanned = start;
++	bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
++
++	return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
+ }
+ 
+ static int bch_writeback_thread(void *arg)
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 0a9dab187b79..073a042aed24 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
+ 
+ static inline void bch_writeback_queue(struct cached_dev *dc)
+ {
+-	wake_up_process(dc->writeback_thread);
++	if (!IS_ERR_OR_NULL(dc->writeback_thread))
++		wake_up_process(dc->writeback_thread);
+ }
+ 
+ static inline void bch_writeback_add(struct cached_dev *dc)
+diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
+index 0b2536247cf5..84e27708ad97 100644
+--- a/drivers/md/dm-exception-store.h
++++ b/drivers/md/dm-exception-store.h
+@@ -70,7 +70,7 @@ struct dm_exception_store_type {
+ 	 * Update the metadata with this exception.
+ 	 */
+ 	void (*commit_exception) (struct dm_exception_store *store,
+-				  struct dm_exception *e,
++				  struct dm_exception *e, int valid,
+ 				  void (*callback) (void *, int success),
+ 				  void *callback_context);
+ 
+diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
+index 808b8419bc48..9feb894e5565 100644
+--- a/drivers/md/dm-snap-persistent.c
++++ b/drivers/md/dm-snap-persistent.c
+@@ -694,7 +694,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
+ }
+ 
+ static void persistent_commit_exception(struct dm_exception_store *store,
+-					struct dm_exception *e,
++					struct dm_exception *e, int valid,
+ 					void (*callback) (void *, int success),
+ 					void *callback_context)
+ {
+@@ -703,6 +703,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
+ 	struct core_exception ce;
+ 	struct commit_callback *cb;
+ 
++	if (!valid)
++		ps->valid = 0;
++
+ 	ce.old_chunk = e->old_chunk;
+ 	ce.new_chunk = e->new_chunk;
+ 	write_exception(ps, ps->current_committed++, &ce);
+diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
+index 1ce9a2586e41..31439d53cf7e 100644
+--- a/drivers/md/dm-snap-transient.c
++++ b/drivers/md/dm-snap-transient.c
+@@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
+ }
+ 
+ static void transient_commit_exception(struct dm_exception_store *store,
+-				       struct dm_exception *e,
++				       struct dm_exception *e, int valid,
+ 				       void (*callback) (void *, int success),
+ 				       void *callback_context)
+ {
+ 	/* Just succeed */
+-	callback(callback_context, 1);
++	callback(callback_context, valid);
+ }
+ 
+ static void transient_usage(struct dm_exception_store *store,
+diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
+index f83a0f3fc365..11ec9d2a27df 100644
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1428,8 +1428,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
+ 	dm_table_event(s->ti->table);
+ }
+ 
+-static void pending_complete(struct dm_snap_pending_exception *pe, int success)
++static void pending_complete(void *context, int success)
+ {
++	struct dm_snap_pending_exception *pe = context;
+ 	struct dm_exception *e;
+ 	struct dm_snapshot *s = pe->snap;
+ 	struct bio *origin_bios = NULL;
+@@ -1500,24 +1501,13 @@ out:
+ 	free_pending_exception(pe);
+ }
+ 
+-static void commit_callback(void *context, int success)
+-{
+-	struct dm_snap_pending_exception *pe = context;
+-
+-	pending_complete(pe, success);
+-}
+-
+ static void complete_exception(struct dm_snap_pending_exception *pe)
+ {
+ 	struct dm_snapshot *s = pe->snap;
+ 
+-	if (unlikely(pe->copy_error))
+-		pending_complete(pe, 0);
+-
+-	else
+-		/* Update the metadata if we are persistent */
+-		s->store->type->commit_exception(s->store, &pe->e,
+-						 commit_callback, pe);
++	/* Update the metadata if we are persistent */
++	s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
++					 pending_complete, pe);
+ }
+ 
+ /*
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 7073b22d4cb4..cb58bb318782 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -3210,8 +3210,8 @@ static void pool_postsuspend(struct dm_target *ti)
+ 	struct pool_c *pt = ti->private;
+ 	struct pool *pool = pt->pool;
+ 
+-	cancel_delayed_work(&pool->waker);
+-	cancel_delayed_work(&pool->no_space_timeout);
++	cancel_delayed_work_sync(&pool->waker);
++	cancel_delayed_work_sync(&pool->no_space_timeout);
+ 	flush_workqueue(pool->wq);
+ 	(void) commit(pool);
+ }
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index 882ca417f328..3ab874703d11 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -2333,9 +2333,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
+ 		dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
+ 				 __func__, c->delivery_system, fe->ops.info.type);
+ 
+-		/* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
+-		 * do it, it is done for it. */
+-		info->caps |= FE_CAN_INVERSION_AUTO;
++		/* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
++		if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
++			info->caps |= FE_CAN_INVERSION_AUTO;
+ 		err = 0;
+ 		break;
+ 	}
+diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
+index a2631be7ffac..08e0f0dd8728 100644
+--- a/drivers/media/dvb-frontends/tda1004x.c
++++ b/drivers/media/dvb-frontends/tda1004x.c
+@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
+ {
+ 	struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
+ 	struct tda1004x_state* state = fe->demodulator_priv;
++	int status;
+ 
+ 	dprintk("%s\n", __func__);
+ 
++	status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
++	if (status == -1)
++		return -EIO;
++
++	/* Only update the properties cache if device is locked */
++	if (!(status & 8))
++		return 0;
++
+ 	// inversion status
+ 	fe_params->inversion = INVERSION_OFF;
+ 	if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
+diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c
+index ac3cd74e824e..067db727e685 100644
+--- a/drivers/media/pci/saa7134/saa7134-alsa.c
++++ b/drivers/media/pci/saa7134/saa7134-alsa.c
+@@ -1218,6 +1218,8 @@ static int alsa_device_init(struct saa7134_dev *dev)
+ 
+ static int alsa_device_exit(struct saa7134_dev *dev)
+ {
++	if (!snd_saa7134_cards[dev->nr])
++		return 1;
+ 
+ 	snd_card_free(snd_saa7134_cards[dev->nr]);
+ 	snd_saa7134_cards[dev->nr] = NULL;
+@@ -1267,7 +1269,8 @@ static void saa7134_alsa_exit(void)
+ 	int idx;
+ 
+ 	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+-		snd_card_free(snd_saa7134_cards[idx]);
++		if (snd_saa7134_cards[idx])
++			snd_card_free(snd_saa7134_cards[idx]);
+ 	}
+ 
+ 	saa7134_dmasound_init = NULL;
+diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
+index 7830aef3db45..40f77685cc4a 100644
+--- a/drivers/media/rc/sunxi-cir.c
++++ b/drivers/media/rc/sunxi-cir.c
+@@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
+ 	if (!ir)
+ 		return -ENOMEM;
+ 
++	spin_lock_init(&ir->ir_lock);
++
+ 	if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
+ 		ir->fifo_size = 64;
+ 	else
+diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
+index 146071b8e116..bfff1d1c70ab 100644
+--- a/drivers/media/usb/gspca/ov534.c
++++ b/drivers/media/usb/gspca/ov534.c
+@@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ 	struct v4l2_fract *tpf = &cp->timeperframe;
+ 	struct sd *sd = (struct sd *) gspca_dev;
+ 
+-	/* Set requested framerate */
+-	sd->frame_rate = tpf->denominator / tpf->numerator;
++	if (tpf->numerator == 0 || tpf->denominator == 0)
++		/* Set default framerate */
++		sd->frame_rate = 30;
++	else
++		/* Set requested framerate */
++		sd->frame_rate = tpf->denominator / tpf->numerator;
++
+ 	if (gspca_dev->streaming)
+ 		set_frame_rate(gspca_dev);
+ 
+diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
+index c70ff406b07a..c028a5c2438e 100644
+--- a/drivers/media/usb/gspca/topro.c
++++ b/drivers/media/usb/gspca/topro.c
+@@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
+ 	struct v4l2_fract *tpf = &cp->timeperframe;
+ 	int fr, i;
+ 
+-	sd->framerate = tpf->denominator / tpf->numerator;
++	if (tpf->numerator == 0 || tpf->denominator == 0)
++		sd->framerate = 30;
++	else
++		sd->framerate = tpf->denominator / tpf->numerator;
++
+ 	if (gspca_dev->streaming)
+ 		setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
+ 
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index cf9d644a8aff..472eaad6fb78 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -2662,10 +2662,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+ 		return res | POLLERR;
+ 
+ 	/*
+-	 * For output streams you can write as long as there are fewer buffers
+-	 * queued than there are buffers available.
++	 * For output streams you can call write() as long as there are fewer
++	 * buffers queued than there are buffers available.
+ 	 */
+-	if (V4L2_TYPE_IS_OUTPUT(q->type) && q->queued_count < q->num_buffers)
++	if (V4L2_TYPE_IS_OUTPUT(q->type) && q->fileio && q->queued_count < q->num_buffers)
+ 		return res | POLLOUT | POLLWRNORM;
+ 
+ 	if (list_empty(&q->done_list))
+diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
+index 31a9ef256d06..ce3044883a42 100644
+--- a/drivers/mmc/core/sd.c
++++ b/drivers/mmc/core/sd.c
+@@ -661,9 +661,25 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
+ 	 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ 	 */
+ 	if (!mmc_host_is_spi(card->host) &&
+-	    (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
+-	     card->sd_bus_speed == UHS_SDR104_BUS_SPEED))
++		(card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
++		 card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
++		 card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
+ 		err = mmc_execute_tuning(card);
++
++		/*
++		 * As SD Specifications Part1 Physical Layer Specification
++		 * Version 3.01 says, CMD19 tuning is available for unlocked
++		 * cards in transfer state of 1.8V signaling mode. The small
++		 * difference between v3.00 and 3.01 spec means that CMD19
++		 * tuning is also available for DDR50 mode.
++		 */
++		if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
++			pr_warn("%s: ddr50 tuning failed\n",
++				mmc_hostname(card->host));
++			err = 0;
++		}
++	}
++
+ out:
+ 	kfree(status);
+ 
+diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
+index 5bc6c7dbbd60..941beb3b5fa2 100644
+--- a/drivers/mmc/core/sdio.c
++++ b/drivers/mmc/core/sdio.c
+@@ -566,8 +566,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
+ 	 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
+ 	 */
+ 	if (!mmc_host_is_spi(card->host) &&
+-	    ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
+-	     (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
++	    ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
++	      (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
+ 		err = mmc_execute_tuning(card);
+ out:
+ 	return err;
+@@ -661,7 +661,7 @@ try_again:
+ 	 */
+ 	if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
+ 		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+-					ocr);
++					ocr_card);
+ 		if (err == -EAGAIN) {
+ 			sdio_reset(host);
+ 			mmc_go_idle(host);
+diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
+index fb266745f824..acece3299756 100644
+--- a/drivers/mmc/host/mmci.c
++++ b/drivers/mmc/host/mmci.c
+@@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = {
+ 	{
+ 		.id     = 0x00280180,
+ 		.mask   = 0x00ffffff,
+-		.data	= &variant_u300,
++		.data	= &variant_nomadik,
+ 	},
+ 	{
+ 		.id     = 0x00480180,
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index cbaf3df3ebd9..f47c4a8370be 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -555,9 +555,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ 
+ 		BUG_ON(len > 65536);
+ 
+-		/* tran, valid */
+-		sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
+-		desc += host->desc_sz;
++		if (len) {
++			/* tran, valid */
++			sdhci_adma_write_desc(host, desc, addr, len,
++					      ADMA2_TRAN_VALID);
++			desc += host->desc_sz;
++		}
+ 
+ 		/*
+ 		 * If this triggers then we have a calculation bug
+@@ -2790,7 +2793,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
+ 
+ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+ {
+-	if (host->runtime_suspended || host->bus_on)
++	if (host->bus_on)
+ 		return;
+ 	host->bus_on = true;
+ 	pm_runtime_get_noresume(host->mmc->parent);
+@@ -2798,7 +2801,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
+ 
+ static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
+ {
+-	if (host->runtime_suspended || !host->bus_on)
++	if (!host->bus_on)
+ 		return;
+ 	host->bus_on = false;
+ 	pm_runtime_put_noidle(host->mmc->parent);
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+new file mode 100644
+index 000000000000..d60a467a983c
+--- /dev/null
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+@@ -0,0 +1,2717 @@
++/******************************************************************************
++ *
++ * This file is provided under a dual BSD/GPLv2 license.  When using or
++ * redistributing this file, you may do so under either license.
++ *
++ * GPL LICENSE SUMMARY
++ *
++ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
++ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of version 2 of the GNU General Public License as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
++ * USA
++ *
++ * The full GNU General Public License is included in this distribution
++ * in the file called COPYING.
++ *
++ * Contact Information:
++ *  Intel Linux Wireless <linuxwifi@intel.com>
++ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
++ *
++ * BSD LICENSE
++ *
++ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
++ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ *  * Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ *  * Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in
++ *    the documentation and/or other materials provided with the
++ *    distribution.
++ *  * Neither the name Intel Corporation nor the names of its
++ *    contributors may be used to endorse or promote products derived
++ *    from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ *****************************************************************************/
++#include <linux/pci.h>
++#include <linux/pci-aspm.h>
++#include <linux/interrupt.h>
++#include <linux/debugfs.h>
++#include <linux/sched.h>
++#include <linux/bitops.h>
++#include <linux/gfp.h>
++#include <linux/vmalloc.h>
++
++#include "iwl-drv.h"
++#include "iwl-trans.h"
++#include "iwl-csr.h"
++#include "iwl-prph.h"
++#include "iwl-scd.h"
++#include "iwl-agn-hw.h"
++#include "iwl-fw-error-dump.h"
++#include "internal.h"
++#include "iwl-fh.h"
++
++/* extended range in FW SRAM */
++#define IWL_FW_MEM_EXTENDED_START	0x40000
++#define IWL_FW_MEM_EXTENDED_END		0x57FFF
++
++static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	if (!trans_pcie->fw_mon_page)
++		return;
++
++	dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
++		       trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
++	__free_pages(trans_pcie->fw_mon_page,
++		     get_order(trans_pcie->fw_mon_size));
++	trans_pcie->fw_mon_page = NULL;
++	trans_pcie->fw_mon_phys = 0;
++	trans_pcie->fw_mon_size = 0;
++}
++
++static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	struct page *page = NULL;
++	dma_addr_t phys;
++	u32 size = 0;
++	u8 power;
++
++	if (!max_power) {
++		/* default max_power is maximum */
++		max_power = 26;
++	} else {
++		max_power += 11;
++	}
++
++	if (WARN(max_power > 26,
++		 "External buffer size for monitor is too big %d, check the FW TLV\n",
++		 max_power))
++		return;
++
++	if (trans_pcie->fw_mon_page) {
++		dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
++					   trans_pcie->fw_mon_size,
++					   DMA_FROM_DEVICE);
++		return;
++	}
++
++	phys = 0;
++	for (power = max_power; power >= 11; power--) {
++		int order;
++
++		size = BIT(power);
++		order = get_order(size);
++		page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
++				   order);
++		if (!page)
++			continue;
++
++		phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
++				    DMA_FROM_DEVICE);
++		if (dma_mapping_error(trans->dev, phys)) {
++			__free_pages(page, order);
++			page = NULL;
++			continue;
++		}
++		IWL_INFO(trans,
++			 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
++			 size, order);
++		break;
++	}
++
++	if (WARN_ON_ONCE(!page))
++		return;
++
++	if (power != max_power)
++		IWL_ERR(trans,
++			"Sorry - debug buffer is only %luK while you requested %luK\n",
++			(unsigned long)BIT(power - 10),
++			(unsigned long)BIT(max_power - 10));
++
++	trans_pcie->fw_mon_page = page;
++	trans_pcie->fw_mon_phys = phys;
++	trans_pcie->fw_mon_size = size;
++}
++
++static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
++{
++	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
++		    ((reg & 0x0000ffff) | (2 << 28)));
++	return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
++}
++
++static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
++{
++	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
++	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
++		    ((reg & 0x0000ffff) | (3 << 28)));
++}
++
++static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
++{
++	if (trans->cfg->apmg_not_supported)
++		return;
++
++	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
++		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
++				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
++				       ~APMG_PS_CTRL_MSK_PWR_SRC);
++	else
++		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
++				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
++				       ~APMG_PS_CTRL_MSK_PWR_SRC);
++}
++
++/* PCI registers */
++#define PCI_CFG_RETRY_TIMEOUT	0x041
++
++static void iwl_pcie_apm_config(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	u16 lctl;
++	u16 cap;
++
++	/*
++	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
++	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
++	 * If so (likely), disable L0S, so device moves directly L0->L1;
++	 *    costs negligible amount of power savings.
++	 * If not (unlikely), enable L0S, so there is at least some
++	 *    power savings, even without L1.
++	 */
++	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
++	if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
++		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
++	else
++		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
++	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
++
++	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
++	trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
++	dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
++		 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
++		 trans->ltr_enabled ? "En" : "Dis");
++}
++
++/*
++ * Start up NIC's basic functionality after it has been reset
++ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
++ * NOTE:  This does not load uCode nor start the embedded processor
++ */
++static int iwl_pcie_apm_init(struct iwl_trans *trans)
++{
++	int ret = 0;
++	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
++
++	/*
++	 * Use "set_bit" below rather than "write", to preserve any hardware
++	 * bits already set by default after reset.
++	 */
++
++	/* Disable L0S exit timer (platform NMI Work/Around) */
++	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
++		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
++			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
++
++	/*
++	 * Disable L0s without affecting L1;
++	 *  don't wait for ICH L0s (ICH bug W/A)
++	 */
++	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
++		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
++
++	/* Set FH wait threshold to maximum (HW error during stress W/A) */
++	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
++
++	/*
++	 * Enable HAP INTA (interrupt from management bus) to
++	 * wake device's PCI Express link L1a -> L0s
++	 */
++	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
++
++	iwl_pcie_apm_config(trans);
++
++	/* Configure analog phase-lock-loop before activating to D0A */
++	if (trans->cfg->base_params->pll_cfg_val)
++		iwl_set_bit(trans, CSR_ANA_PLL_CFG,
++			    trans->cfg->base_params->pll_cfg_val);
++
++	/*
++	 * Set "initialization complete" bit to move adapter from
++	 * D0U* --> D0A* (powered-up active) state.
++	 */
++	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++
++	/*
++	 * Wait for clock stabilization; once stabilized, access to
++	 * device-internal resources is supported, e.g. iwl_write_prph()
++	 * and accesses to uCode SRAM.
++	 */
++	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
++			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
++	if (ret < 0) {
++		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
++		goto out;
++	}
++
++	if (trans->cfg->host_interrupt_operation_mode) {
++		/*
++		 * This is a bit of an abuse - This is needed for 7260 / 3160
++		 * only check host_interrupt_operation_mode even if this is
++		 * not related to host_interrupt_operation_mode.
++		 *
++		 * Enable the oscillator to count wake up time for L1 exit. This
++		 * consumes slightly more power (100uA) - but allows to be sure
++		 * that we wake up from L1 on time.
++		 *
++		 * This looks weird: read twice the same register, discard the
++		 * value, set a bit, and yet again, read that same register
++		 * just to discard the value. But that's the way the hardware
++		 * seems to like it.
++		 */
++		iwl_read_prph(trans, OSC_CLK);
++		iwl_read_prph(trans, OSC_CLK);
++		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
++		iwl_read_prph(trans, OSC_CLK);
++		iwl_read_prph(trans, OSC_CLK);
++	}
++
++	/*
++	 * Enable DMA clock and wait for it to stabilize.
++	 *
++	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
++	 * bits do not disable clocks.  This preserves any hardware
++	 * bits already set by default in "CLK_CTRL_REG" after reset.
++	 */
++	if (!trans->cfg->apmg_not_supported) {
++		iwl_write_prph(trans, APMG_CLK_EN_REG,
++			       APMG_CLK_VAL_DMA_CLK_RQT);
++		udelay(20);
++
++		/* Disable L1-Active */
++		iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
++				  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
++
++		/* Clear the interrupt in APMG if the NIC is in RFKILL */
++		iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
++			       APMG_RTC_INT_STT_RFKILL);
++	}
++
++	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
++
++out:
++	return ret;
++}
++
++/*
++ * Enable LP XTAL to avoid HW bug where device may consume much power if
++ * FW is not loaded after device reset. LP XTAL is disabled by default
++ * after device HW reset. Do it only if XTAL is fed by internal source.
++ * Configure device's "persistence" mode to avoid resetting XTAL again when
++ * SHRD_HW_RST occurs in S3.
++ */
++static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
++{
++	int ret;
++	u32 apmg_gp1_reg;
++	u32 apmg_xtal_cfg_reg;
++	u32 dl_cfg_reg;
++
++	/* Force XTAL ON */
++	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
++				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
++
++	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
++	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
++
++	udelay(10);
++
++	/*
++	 * Set "initialization complete" bit to move adapter from
++	 * D0U* --> D0A* (powered-up active) state.
++	 */
++	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++
++	/*
++	 * Wait for clock stabilization; once stabilized, access to
++	 * device-internal resources is possible.
++	 */
++	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
++			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++			   25000);
++	if (WARN_ON(ret < 0)) {
++		IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
++		/* Release XTAL ON request */
++		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
++					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
++		return;
++	}
++
++	/*
++	 * Clear "disable persistence" to avoid LP XTAL resetting when
++	 * SHRD_HW_RST is applied in S3.
++	 */
++	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
++				    APMG_PCIDEV_STT_VAL_PERSIST_DIS);
++
++	/*
++	 * Force APMG XTAL to be active to prevent its disabling by HW
++	 * caused by APMG idle state.
++	 */
++	apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
++						    SHR_APMG_XTAL_CFG_REG);
++	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
++				 apmg_xtal_cfg_reg |
++				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
++
++	/*
++	 * Reset entire device again - do controller reset (results in
++	 * SHRD_HW_RST). Turn MAC off before proceeding.
++	 */
++	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
++
++	udelay(10);
++
++	/* Enable LP XTAL by indirect access through CSR */
++	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
++	iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
++				 SHR_APMG_GP1_WF_XTAL_LP_EN |
++				 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
++
++	/* Clear delay line clock power up */
++	dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
++	iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
++				 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
++
++	/*
++	 * Enable persistence mode to avoid LP XTAL resetting when
++	 * SHRD_HW_RST is applied in S3.
++	 */
++	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
++
++	/*
++	 * Clear "initialization complete" bit to move adapter from
++	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
++	 */
++	iwl_clear_bit(trans, CSR_GP_CNTRL,
++		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++
++	/* Activates XTAL resources monitor */
++	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
++				 CSR_MONITOR_XTAL_RESOURCES);
++
++	/* Release XTAL ON request */
++	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
++				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
++	udelay(10);
++
++	/* Release APMG XTAL */
++	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
++				 apmg_xtal_cfg_reg &
++				 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
++}
++
++static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
++{
++	int ret = 0;
++
++	/* stop device's busmaster DMA activity */
++	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
++
++	ret = iwl_poll_bit(trans, CSR_RESET,
++			   CSR_RESET_REG_FLAG_MASTER_DISABLED,
++			   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
++	if (ret < 0)
++		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
++
++	IWL_DEBUG_INFO(trans, "stop master\n");
++
++	return ret;
++}
++
++static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
++{
++	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
++
++	if (op_mode_leave) {
++		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
++			iwl_pcie_apm_init(trans);
++
++		/* inform ME that we are leaving */
++		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
++			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
++					  APMG_PCIDEV_STT_VAL_WAKE_ME);
++		else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
++			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
++				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
++			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++				    CSR_HW_IF_CONFIG_REG_PREPARE |
++				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
++			mdelay(1);
++			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
++				      CSR_RESET_LINK_PWR_MGMT_DISABLED);
++		}
++		mdelay(5);
++	}
++
++	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
++
++	/* Stop device's DMA activity */
++	iwl_pcie_apm_stop_master(trans);
++
++	if (trans->cfg->lp_xtal_workaround) {
++		iwl_pcie_apm_lp_xtal_enable(trans);
++		return;
++	}
++
++	/* Reset the entire device */
++	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
++
++	udelay(10);
++
++	/*
++	 * Clear "initialization complete" bit to move adapter from
++	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
++	 */
++	iwl_clear_bit(trans, CSR_GP_CNTRL,
++		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++}
++
++static int iwl_pcie_nic_init(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	/* nic_init */
++	spin_lock(&trans_pcie->irq_lock);
++	iwl_pcie_apm_init(trans);
++
++	spin_unlock(&trans_pcie->irq_lock);
++
++	iwl_pcie_set_pwr(trans, false);
++
++	iwl_op_mode_nic_config(trans->op_mode);
++
++	/* Allocate the RX queue, or reset if it is already allocated */
++	iwl_pcie_rx_init(trans);
++
++	/* Allocate or reset and init all Tx and Command queues */
++	if (iwl_pcie_tx_init(trans))
++		return -ENOMEM;
++
++	if (trans->cfg->base_params->shadow_reg_enable) {
++		/* enable shadow regs in HW */
++		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
++		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
++	}
++
++	return 0;
++}
++
++#define HW_READY_TIMEOUT (50)
++
++/* Note: returns poll_bit return value, which is >= 0 if success */
++static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
++{
++	int ret;
++
++	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
++
++	/* See if we got it */
++	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
++			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
++			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
++			   HW_READY_TIMEOUT);
++
++	if (ret >= 0)
++		iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
++
++	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
++	return ret;
++}
++
++/* Note: returns standard 0/-ERROR code */
++static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
++{
++	int ret;
++	int t = 0;
++	int iter;
++
++	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
++
++	ret = iwl_pcie_set_hw_ready(trans);
++	/* If the card is ready, exit 0 */
++	if (ret >= 0)
++		return 0;
++
++	iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
++		    CSR_RESET_LINK_PWR_MGMT_DISABLED);
++	msleep(1);
++
++	for (iter = 0; iter < 10; iter++) {
++		/* If HW is not ready, prepare the conditions to check again */
++		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++			    CSR_HW_IF_CONFIG_REG_PREPARE);
++
++		do {
++			ret = iwl_pcie_set_hw_ready(trans);
++			if (ret >= 0)
++				return 0;
++
++			usleep_range(200, 1000);
++			t += 200;
++		} while (t < 150000);
++		msleep(25);
++	}
++
++	IWL_ERR(trans, "Couldn't prepare the card\n");
++
++	return ret;
++}
++
++/*
++ * ucode
++ */
++static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
++				   dma_addr_t phy_addr, u32 byte_cnt)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	int ret;
++
++	trans_pcie->ucode_write_complete = false;
++
++	iwl_write_direct32(trans,
++			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
++			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
++
++	iwl_write_direct32(trans,
++			   FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
++			   dst_addr);
++
++	iwl_write_direct32(trans,
++			   FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
++			   phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
++
++	iwl_write_direct32(trans,
++			   FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
++			   (iwl_get_dma_hi_addr(phy_addr)
++				<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
++
++	iwl_write_direct32(trans,
++			   FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
++			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
++			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
++			   FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
++
++	iwl_write_direct32(trans,
++			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
++			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
++			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
++			   FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
++
++	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
++				 trans_pcie->ucode_write_complete, 5 * HZ);
++	if (!ret) {
++		IWL_ERR(trans, "Failed to load firmware chunk!\n");
++		return -ETIMEDOUT;
++	}
++
++	return 0;
++}
++
++static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
++			    const struct fw_desc *section)
++{
++	u8 *v_addr;
++	dma_addr_t p_addr;
++	u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
++	int ret = 0;
++
++	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
++		     section_num);
++
++	v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
++				    GFP_KERNEL | __GFP_NOWARN);
++	if (!v_addr) {
++		IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
++		chunk_sz = PAGE_SIZE;
++		v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
++					    &p_addr, GFP_KERNEL);
++		if (!v_addr)
++			return -ENOMEM;
++	}
++
++	for (offset = 0; offset < section->len; offset += chunk_sz) {
++		u32 copy_size, dst_addr;
++		bool extended_addr = false;
++
++		copy_size = min_t(u32, chunk_sz, section->len - offset);
++		dst_addr = section->offset + offset;
++
++		if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
++		    dst_addr <= IWL_FW_MEM_EXTENDED_END)
++			extended_addr = true;
++
++		if (extended_addr)
++			iwl_set_bits_prph(trans, LMPM_CHICK,
++					  LMPM_CHICK_EXTENDED_ADDR_SPACE);
++
++		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
++		ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
++						   copy_size);
++
++		if (extended_addr)
++			iwl_clear_bits_prph(trans, LMPM_CHICK,
++					    LMPM_CHICK_EXTENDED_ADDR_SPACE);
++
++		if (ret) {
++			IWL_ERR(trans,
++				"Could not load the [%d] uCode section\n",
++				section_num);
++			break;
++		}
++	}
++
++	dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
++	return ret;
++}
++
++/*
++ * Driver Takes the ownership on secure machine before FW load
++ * and prevent race with the BT load.
++ * W/A for ROM bug. (should be remove in the next Si step)
++ */
++static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
++{
++	u32 val, loop = 1000;
++
++	/*
++	 * Check the RSA semaphore is accessible.
++	 * If the HW isn't locked and the rsa semaphore isn't accessible,
++	 * we are in trouble.
++	 */
++	val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
++	if (val & (BIT(1) | BIT(17))) {
++		IWL_INFO(trans,
++			 "can't access the RSA semaphore it is write protected\n");
++		return 0;
++	}
++
++	/* take ownership on the AUX IF */
++	iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
++	iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
++
++	do {
++		iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
++		val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
++		if (val == 0x1) {
++			iwl_write_prph(trans, RSA_ENABLE, 0);
++			return 0;
++		}
++
++		udelay(10);
++		loop--;
++	} while (loop > 0);
++
++	IWL_ERR(trans, "Failed to take ownership on secure machine\n");
++	return -EIO;
++}
++
++static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
++					   const struct fw_img *image,
++					   int cpu,
++					   int *first_ucode_section)
++{
++	int shift_param;
++	int i, ret = 0, sec_num = 0x1;
++	u32 val, last_read_idx = 0;
++
++	if (cpu == 1) {
++		shift_param = 0;
++		*first_ucode_section = 0;
++	} else {
++		shift_param = 16;
++		(*first_ucode_section)++;
++	}
++
++	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
++		last_read_idx = i;
++
++		/*
++		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
++		 * CPU1 to CPU2.
++		 * PAGING_SEPARATOR_SECTION delimiter - separate between
++		 * CPU2 non paged to CPU2 paging sec.
++		 */
++		if (!image->sec[i].data ||
++		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
++		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
++			IWL_DEBUG_FW(trans,
++				     "Break since Data not valid or Empty section, sec = %d\n",
++				     i);
++			break;
++		}
++
++		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
++		if (ret)
++			return ret;
++
++		/* Notify the ucode of the loaded section number and status */
++		val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
++		val = val | (sec_num << shift_param);
++		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
++		sec_num = (sec_num << 1) | 0x1;
++	}
++
++	*first_ucode_section = last_read_idx;
++
++	if (cpu == 1)
++		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
++	else
++		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
++
++	return 0;
++}
++
++static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
++				      const struct fw_img *image,
++				      int cpu,
++				      int *first_ucode_section)
++{
++	int shift_param;
++	int i, ret = 0;
++	u32 last_read_idx = 0;
++
++	if (cpu == 1) {
++		shift_param = 0;
++		*first_ucode_section = 0;
++	} else {
++		shift_param = 16;
++		(*first_ucode_section)++;
++	}
++
++	for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
++		last_read_idx = i;
++
++		/*
++		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
++		 * CPU1 to CPU2.
++		 * PAGING_SEPARATOR_SECTION delimiter - separate between
++		 * CPU2 non paged to CPU2 paging sec.
++		 */
++		if (!image->sec[i].data ||
++		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
++		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
++			IWL_DEBUG_FW(trans,
++				     "Break since Data not valid or Empty section, sec = %d\n",
++				     i);
++			break;
++		}
++
++		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
++		if (ret)
++			return ret;
++	}
++
++	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++		iwl_set_bits_prph(trans,
++				  CSR_UCODE_LOAD_STATUS_ADDR,
++				  (LMPM_CPU_UCODE_LOADING_COMPLETED |
++				   LMPM_CPU_HDRS_LOADING_COMPLETED |
++				   LMPM_CPU_UCODE_LOADING_STARTED) <<
++					shift_param);
++
++	*first_ucode_section = last_read_idx;
++
++	return 0;
++}
++
++static void iwl_pcie_apply_destination(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
++	int i;
++
++	if (dest->version)
++		IWL_ERR(trans,
++			"DBG DEST version is %d - expect issues\n",
++			dest->version);
++
++	IWL_INFO(trans, "Applying debug destination %s\n",
++		 get_fw_dbg_mode_string(dest->monitor_mode));
++
++	if (dest->monitor_mode == EXTERNAL_MODE)
++		iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
++	else
++		IWL_WARN(trans, "PCI should have external buffer debug\n");
++
++	for (i = 0; i < trans->dbg_dest_reg_num; i++) {
++		u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
++		u32 val = le32_to_cpu(dest->reg_ops[i].val);
++
++		switch (dest->reg_ops[i].op) {
++		case CSR_ASSIGN:
++			iwl_write32(trans, addr, val);
++			break;
++		case CSR_SETBIT:
++			iwl_set_bit(trans, addr, BIT(val));
++			break;
++		case CSR_CLEARBIT:
++			iwl_clear_bit(trans, addr, BIT(val));
++			break;
++		case PRPH_ASSIGN:
++			iwl_write_prph(trans, addr, val);
++			break;
++		case PRPH_SETBIT:
++			iwl_set_bits_prph(trans, addr, BIT(val));
++			break;
++		case PRPH_CLEARBIT:
++			iwl_clear_bits_prph(trans, addr, BIT(val));
++			break;
++		case PRPH_BLOCKBIT:
++			if (iwl_read_prph(trans, addr) & BIT(val)) {
++				IWL_ERR(trans,
++					"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
++					val, addr);
++				goto monitor;
++			}
++			break;
++		default:
++			IWL_ERR(trans, "FW debug - unknown OP %d\n",
++				dest->reg_ops[i].op);
++			break;
++		}
++	}
++
++monitor:
++	if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
++		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
++			       trans_pcie->fw_mon_phys >> dest->base_shift);
++		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++				       (trans_pcie->fw_mon_phys +
++					trans_pcie->fw_mon_size - 256) >>
++						dest->end_shift);
++		else
++			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++				       (trans_pcie->fw_mon_phys +
++					trans_pcie->fw_mon_size) >>
++						dest->end_shift);
++	}
++}
++
++static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
++				const struct fw_img *image)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	int ret = 0;
++	int first_ucode_section;
++
++	IWL_DEBUG_FW(trans, "working with %s CPU\n",
++		     image->is_dual_cpus ? "Dual" : "Single");
++
++	/* load to FW the binary non secured sections of CPU1 */
++	ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
++	if (ret)
++		return ret;
++
++	if (image->is_dual_cpus) {
++		/* set CPU2 header address */
++		iwl_write_prph(trans,
++			       LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
++			       LMPM_SECURE_CPU2_HDR_MEM_SPACE);
++
++		/* load to FW the binary sections of CPU2 */
++		ret = iwl_pcie_load_cpu_sections(trans, image, 2,
++						 &first_ucode_section);
++		if (ret)
++			return ret;
++	}
++
++	/* supported for 7000 only for the moment */
++	if (iwlwifi_mod_params.fw_monitor &&
++	    trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
++		iwl_pcie_alloc_fw_monitor(trans, 0);
++
++		if (trans_pcie->fw_mon_size) {
++			iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
++				       trans_pcie->fw_mon_phys >> 4);
++			iwl_write_prph(trans, MON_BUFF_END_ADDR,
++				       (trans_pcie->fw_mon_phys +
++					trans_pcie->fw_mon_size) >> 4);
++		}
++	} else if (trans->dbg_dest_tlv) {
++		iwl_pcie_apply_destination(trans);
++	}
++
++	/* release CPU reset */
++	iwl_write32(trans, CSR_RESET, 0);
++
++	return 0;
++}
++
++static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
++					  const struct fw_img *image)
++{
++	int ret = 0;
++	int first_ucode_section;
++
++	IWL_DEBUG_FW(trans, "working with %s CPU\n",
++		     image->is_dual_cpus ? "Dual" : "Single");
++
++	if (trans->dbg_dest_tlv)
++		iwl_pcie_apply_destination(trans);
++
++	/* TODO: remove in the next Si step */
++	ret = iwl_pcie_rsa_race_bug_wa(trans);
++	if (ret)
++		return ret;
++
++	/* configure the ucode to be ready to get the secured image */
++	/* release CPU reset */
++	iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
++
++	/* load to FW the binary Secured sections of CPU1 */
++	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
++					      &first_ucode_section);
++	if (ret)
++		return ret;
++
++	/* load to FW the binary sections of CPU2 */
++	return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
++					       &first_ucode_section);
++}
++
++static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
++				   const struct fw_img *fw, bool run_in_rfkill)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	bool hw_rfkill;
++	int ret;
++
++	mutex_lock(&trans_pcie->mutex);
++
++	/* Someone called stop_device, don't try to start_fw */
++	if (trans_pcie->is_down) {
++		IWL_WARN(trans,
++			 "Can't start_fw since the HW hasn't been started\n");
++		ret = EIO;
++		goto out;
++	}
++
++	/* This may fail if AMT took ownership of the device */
++	if (iwl_pcie_prepare_card_hw(trans)) {
++		IWL_WARN(trans, "Exit HW not ready\n");
++		ret = -EIO;
++		goto out;
++	}
++
++	iwl_enable_rfkill_int(trans);
++
++	/* If platform's RF_KILL switch is NOT set to KILL */
++	hw_rfkill = iwl_is_rfkill_set(trans);
++	if (hw_rfkill)
++		set_bit(STATUS_RFKILL, &trans->status);
++	else
++		clear_bit(STATUS_RFKILL, &trans->status);
++	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
++	if (hw_rfkill && !run_in_rfkill) {
++		ret = -ERFKILL;
++		goto out;
++	}
++
++	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
++
++	ret = iwl_pcie_nic_init(trans);
++	if (ret) {
++		IWL_ERR(trans, "Unable to init nic\n");
++		goto out;
++	}
++
++	/* make sure rfkill handshake bits are cleared */
++	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
++	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
++		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
++
++	/* clear (again), then enable host interrupts */
++	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
++	iwl_enable_interrupts(trans);
++
++	/* really make sure rfkill handshake bits are cleared */
++	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
++	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
++
++	/* Load the given image to the HW */
++	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
++	else
++		ret = iwl_pcie_load_given_ucode(trans, fw);
++
++out:
++	mutex_unlock(&trans_pcie->mutex);
++	return ret;
++}
++
++static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
++{
++	iwl_pcie_reset_ict(trans);
++	iwl_pcie_tx_start(trans, scd_addr);
++}
++
++static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	bool hw_rfkill, was_hw_rfkill;
++
++	lockdep_assert_held(&trans_pcie->mutex);
++
++	if (trans_pcie->is_down)
++		return;
++
++	trans_pcie->is_down = true;
++
++	was_hw_rfkill = iwl_is_rfkill_set(trans);
++
++	/* tell the device to stop sending interrupts */
++	spin_lock(&trans_pcie->irq_lock);
++	iwl_disable_interrupts(trans);
++	spin_unlock(&trans_pcie->irq_lock);
++
++	/* device going down, Stop using ICT table */
++	iwl_pcie_disable_ict(trans);
++
++	/*
++	 * If a HW restart happens during firmware loading,
++	 * then the firmware loading might call this function
++	 * and later it might be called again due to the
++	 * restart. So don't process again if the device is
++	 * already dead.
++	 */
++	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
++		IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
++		iwl_pcie_tx_stop(trans);
++		iwl_pcie_rx_stop(trans);
++
++		/* Power-down device's busmaster DMA clocks */
++		if (!trans->cfg->apmg_not_supported) {
++			iwl_write_prph(trans, APMG_CLK_DIS_REG,
++				       APMG_CLK_VAL_DMA_CLK_RQT);
++			udelay(5);
++		}
++	}
++
++	/* Make sure (redundant) we've released our request to stay awake */
++	iwl_clear_bit(trans, CSR_GP_CNTRL,
++		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++
++	/* Stop the device, and put it in low power state */
++	iwl_pcie_apm_stop(trans, false);
++
++	/* stop and reset the on-board processor */
++	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
++	udelay(20);
++
++	/*
++	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
++	 * This is a bug in certain verions of the hardware.
++	 * Certain devices also keep sending HW RF kill interrupt all
++	 * the time, unless the interrupt is ACKed even if the interrupt
++	 * should be masked. Re-ACK all the interrupts here.
++	 */
++	spin_lock(&trans_pcie->irq_lock);
++	iwl_disable_interrupts(trans);
++	spin_unlock(&trans_pcie->irq_lock);
++
++
++	/* clear all status bits */
++	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
++	clear_bit(STATUS_INT_ENABLED, &trans->status);
++	clear_bit(STATUS_TPOWER_PMI, &trans->status);
++	clear_bit(STATUS_RFKILL, &trans->status);
++
++	/*
++	 * Even if we stop the HW, we still want the RF kill
++	 * interrupt
++	 */
++	iwl_enable_rfkill_int(trans);
++
++	/*
++	 * Check again since the RF kill state may have changed while
++	 * all the interrupts were disabled, in this case we couldn't
++	 * receive the RF kill interrupt and update the state in the
++	 * op_mode.
++	 * Don't call the op_mode if the rkfill state hasn't changed.
++	 * This allows the op_mode to call stop_device from the rfkill
++	 * notification without endless recursion. Under very rare
++	 * circumstances, we might have a small recursion if the rfkill
++	 * state changed exactly now while we were called from stop_device.
++	 * This is very unlikely but can happen and is supported.
++	 */
++	hw_rfkill = iwl_is_rfkill_set(trans);
++	if (hw_rfkill)
++		set_bit(STATUS_RFKILL, &trans->status);
++	else
++		clear_bit(STATUS_RFKILL, &trans->status);
++	if (hw_rfkill != was_hw_rfkill)
++		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
++
++	/* re-take ownership to prevent other users from stealing the deivce */
++	iwl_pcie_prepare_card_hw(trans);
++}
++
++static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	mutex_lock(&trans_pcie->mutex);
++	_iwl_trans_pcie_stop_device(trans, low_power);
++	mutex_unlock(&trans_pcie->mutex);
++}
++
++void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
++{
++	struct iwl_trans_pcie __maybe_unused *trans_pcie =
++		IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	lockdep_assert_held(&trans_pcie->mutex);
++
++	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
++		_iwl_trans_pcie_stop_device(trans, true);
++}
++
++static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
++		/* Enable persistence mode to avoid reset */
++		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
++			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
++	}
++
++	iwl_disable_interrupts(trans);
++
++	/*
++	 * in testing mode, the host stays awake and the
++	 * hardware won't be reset (not even partially)
++	 */
++	if (test)
++		return;
++
++	iwl_pcie_disable_ict(trans);
++
++	synchronize_irq(trans_pcie->pci_dev->irq);
++
++	iwl_clear_bit(trans, CSR_GP_CNTRL,
++		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++	iwl_clear_bit(trans, CSR_GP_CNTRL,
++		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++
++	if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) {
++		/*
++		 * reset TX queues -- some of their registers reset during S3
++		 * so if we don't reset everything here the D3 image would try
++		 * to execute some invalid memory upon resume
++		 */
++		iwl_trans_pcie_tx_reset(trans);
++	}
++
++	iwl_pcie_set_pwr(trans, true);
++}
++
++static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
++				    enum iwl_d3_status *status,
++				    bool test)
++{
++	u32 val;
++	int ret;
++
++	if (test) {
++		iwl_enable_interrupts(trans);
++		*status = IWL_D3_STATUS_ALIVE;
++		return 0;
++	}
++
++	/*
++	 * Also enables interrupts - none will happen as the device doesn't
++	 * know we're waking it up, only when the opmode actually tells it
++	 * after this call.
++	 */
++	iwl_pcie_reset_ict(trans);
++
++	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++
++	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++		udelay(2);
++
++	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
++			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++			   25000);
++	if (ret < 0) {
++		IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
++		return ret;
++	}
++
++	iwl_pcie_set_pwr(trans, false);
++
++	if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
++		iwl_clear_bit(trans, CSR_GP_CNTRL,
++			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++	} else {
++		iwl_trans_pcie_tx_reset(trans);
++
++		ret = iwl_pcie_rx_init(trans);
++		if (ret) {
++			IWL_ERR(trans,
++				"Failed to resume the device (RX reset)\n");
++			return ret;
++		}
++	}
++
++	val = iwl_read32(trans, CSR_RESET);
++	if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
++		*status = IWL_D3_STATUS_RESET;
++	else
++		*status = IWL_D3_STATUS_ALIVE;
++
++	return 0;
++}
++
++static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	bool hw_rfkill;
++	int err;
++
++	lockdep_assert_held(&trans_pcie->mutex);
++
++	err = iwl_pcie_prepare_card_hw(trans);
++	if (err) {
++		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
++		return err;
++	}
++
++	/* Reset the entire device */
++	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
++
++	usleep_range(10, 15);
++
++	iwl_pcie_apm_init(trans);
++
++	/* From now on, the op_mode will be kept updated about RF kill state */
++	iwl_enable_rfkill_int(trans);
++
++	/* Set is_down to false here so that...*/
++	trans_pcie->is_down = false;
++
++	hw_rfkill = iwl_is_rfkill_set(trans);
++	if (hw_rfkill)
++		set_bit(STATUS_RFKILL, &trans->status);
++	else
++		clear_bit(STATUS_RFKILL, &trans->status);
++	/* ... rfkill can call stop_device and set it false if needed */
++	iwl_trans_pcie_rf_kill(trans, hw_rfkill);
++
++	return 0;
++}
++
++static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	int ret;
++
++	mutex_lock(&trans_pcie->mutex);
++	ret = _iwl_trans_pcie_start_hw(trans, low_power);
++	mutex_unlock(&trans_pcie->mutex);
++
++	return ret;
++}
++
++static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	mutex_lock(&trans_pcie->mutex);
++
++	/* disable interrupts - don't enable HW RF kill interrupt */
++	spin_lock(&trans_pcie->irq_lock);
++	iwl_disable_interrupts(trans);
++	spin_unlock(&trans_pcie->irq_lock);
++
++	iwl_pcie_apm_stop(trans, true);
++
++	spin_lock(&trans_pcie->irq_lock);
++	iwl_disable_interrupts(trans);
++	spin_unlock(&trans_pcie->irq_lock);
++
++	iwl_pcie_disable_ict(trans);
++
++	mutex_unlock(&trans_pcie->mutex);
++
++	synchronize_irq(trans_pcie->pci_dev->irq);
++}
++
++static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
++{
++	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
++}
++
++static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
++{
++	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
++}
++
++static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
++{
++	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
++}
++
++static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
++{
++	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
++			       ((reg & 0x000FFFFF) | (3 << 24)));
++	return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
++}
++
++static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
++				      u32 val)
++{
++	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
++			       ((addr & 0x000FFFFF) | (3 << 24)));
++	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
++}
++
++static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
++{
++	WARN_ON(1);
++	return 0;
++}
++
++static void iwl_trans_pcie_configure(struct iwl_trans *trans,
++				     const struct iwl_trans_config *trans_cfg)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	trans_pcie->cmd_queue = trans_cfg->cmd_queue;
++	trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
++	trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
++	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
++		trans_pcie->n_no_reclaim_cmds = 0;
++	else
++		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
++	if (trans_pcie->n_no_reclaim_cmds)
++		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
++		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
++
++	trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
++	trans_pcie->rx_page_order =
++		iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
++
++	trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
++	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
++	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
++	trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
++
++	trans->command_groups = trans_cfg->command_groups;
++	trans->command_groups_size = trans_cfg->command_groups_size;
++
++	/* init ref_count to 1 (should be cleared when ucode is loaded) */
++	trans_pcie->ref_count = 1;
++
++	/* Initialize NAPI here - it should be before registering to mac80211
++	 * in the opmode but after the HW struct is allocated.
++	 * As this function may be called again in some corner cases don't
++	 * do anything if NAPI was already initialized.
++	 */
++	if (!trans_pcie->napi.poll) {
++		init_dummy_netdev(&trans_pcie->napi_dev);
++		netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
++			       iwl_pcie_dummy_napi_poll, 64);
++	}
++}
++
++void iwl_trans_pcie_free(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	int i;
++
++	synchronize_irq(trans_pcie->pci_dev->irq);
++
++	iwl_pcie_tx_free(trans);
++	iwl_pcie_rx_free(trans);
++
++	free_irq(trans_pcie->pci_dev->irq, trans);
++	iwl_pcie_free_ict(trans);
++
++	pci_disable_msi(trans_pcie->pci_dev);
++	iounmap(trans_pcie->hw_base);
++	pci_release_regions(trans_pcie->pci_dev);
++	pci_disable_device(trans_pcie->pci_dev);
++
++	if (trans_pcie->napi.poll)
++		netif_napi_del(&trans_pcie->napi);
++
++	iwl_pcie_free_fw_monitor(trans);
++
++	for_each_possible_cpu(i) {
++		struct iwl_tso_hdr_page *p =
++			per_cpu_ptr(trans_pcie->tso_hdr_page, i);
++
++		if (p->page)
++			__free_page(p->page);
++	}
++
++	free_percpu(trans_pcie->tso_hdr_page);
++	iwl_trans_free(trans);
++}
++
++static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
++{
++	if (state)
++		set_bit(STATUS_TPOWER_PMI, &trans->status);
++	else
++		clear_bit(STATUS_TPOWER_PMI, &trans->status);
++}
++
++static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
++					   unsigned long *flags)
++{
++	int ret;
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
++
++	if (trans_pcie->cmd_hold_nic_awake)
++		goto out;
++
++	/* this bit wakes up the NIC */
++	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
++				 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++		udelay(2);
++
++	/*
++	 * These bits say the device is running, and should keep running for
++	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
++	 * but they do not indicate that embedded SRAM is restored yet;
++	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
++	 * to/from host DRAM when sleeping/waking for power-saving.
++	 * Each direction takes approximately 1/4 millisecond; with this
++	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
++	 * series of register accesses are expected (e.g. reading Event Log),
++	 * to keep device from sleeping.
++	 *
++	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
++	 * SRAM is okay/restored.  We don't check that here because this call
++	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
++	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
++	 *
++	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
++	 * and do not save/restore SRAM when power cycling.
++	 */
++	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
++			   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
++			   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
++			    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
++	if (unlikely(ret < 0)) {
++		iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
++		WARN_ONCE(1,
++			  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
++			  iwl_read32(trans, CSR_GP_CNTRL));
++		spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
++		return false;
++	}
++
++out:
++	/*
++	 * Fool sparse by faking we release the lock - sparse will
++	 * track nic_access anyway.
++	 */
++	__release(&trans_pcie->reg_lock);
++	return true;
++}
++
++static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
++					      unsigned long *flags)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	lockdep_assert_held(&trans_pcie->reg_lock);
++
++	/*
++	 * Fool sparse by faking we acquiring the lock - sparse will
++	 * track nic_access anyway.
++	 */
++	__acquire(&trans_pcie->reg_lock);
++
++	if (trans_pcie->cmd_hold_nic_awake)
++		goto out;
++
++	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
++				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
++	/*
++	 * Above we read the CSR_GP_CNTRL register, which will flush
++	 * any previous writes, but we need the write that clears the
++	 * MAC_ACCESS_REQ bit to be performed before any other writes
++	 * scheduled on different CPUs (after we drop reg_lock).
++	 */
++	mmiowb();
++out:
++	spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
++}
++
++static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
++				   void *buf, int dwords)
++{
++	unsigned long flags;
++	int offs, ret = 0;
++	u32 *vals = buf;
++
++	if (iwl_trans_grab_nic_access(trans, &flags)) {
++		iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
++		for (offs = 0; offs < dwords; offs++)
++			vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
++		iwl_trans_release_nic_access(trans, &flags);
++	} else {
++		ret = -EBUSY;
++	}
++	return ret;
++}
++
++static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
++				    const void *buf, int dwords)
++{
++	unsigned long flags;
++	int offs, ret = 0;
++	const u32 *vals = buf;
++
++	if (iwl_trans_grab_nic_access(trans, &flags)) {
++		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
++		for (offs = 0; offs < dwords; offs++)
++			iwl_write32(trans, HBUS_TARG_MEM_WDAT,
++				    vals ? vals[offs] : 0);
++		iwl_trans_release_nic_access(trans, &flags);
++	} else {
++		ret = -EBUSY;
++	}
++	return ret;
++}
++
++static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
++					    unsigned long txqs,
++					    bool freeze)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	int queue;
++
++	for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
++		struct iwl_txq *txq = &trans_pcie->txq[queue];
++		unsigned long now;
++
++		spin_lock_bh(&txq->lock);
++
++		now = jiffies;
++
++		if (txq->frozen == freeze)
++			goto next_queue;
++
++		IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
++				    freeze ? "Freezing" : "Waking", queue);
++
++		txq->frozen = freeze;
++
++		if (txq->q.read_ptr == txq->q.write_ptr)
++			goto next_queue;
++
++		if (freeze) {
++			if (unlikely(time_after(now,
++						txq->stuck_timer.expires))) {
++				/*
++				 * The timer should have fired, maybe it is
++				 * spinning right now on the lock.
++				 */
++				goto next_queue;
++			}
++			/* remember how long until the timer fires */
++			txq->frozen_expiry_remainder =
++				txq->stuck_timer.expires - now;
++			del_timer(&txq->stuck_timer);
++			goto next_queue;
++		}
++
++		/*
++		 * Wake a non-empty queue -> arm timer with the
++		 * remainder before it froze
++		 */
++		mod_timer(&txq->stuck_timer,
++			  now + txq->frozen_expiry_remainder);
++
++next_queue:
++		spin_unlock_bh(&txq->lock);
++	}
++}
++
++static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	int i;
++
++	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
++		struct iwl_txq *txq = &trans_pcie->txq[i];
++
++		if (i == trans_pcie->cmd_queue)
++			continue;
++
++		spin_lock_bh(&txq->lock);
++
++		if (!block && !(WARN_ON_ONCE(!txq->block))) {
++			txq->block--;
++			if (!txq->block) {
++				iwl_write32(trans, HBUS_TARG_WRPTR,
++					    txq->q.write_ptr | (i << 8));
++			}
++		} else if (block) {
++			txq->block++;
++		}
++
++		spin_unlock_bh(&txq->lock);
++	}
++}
++
++#define IWL_FLUSH_WAIT_MS	2000
++
++static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	struct iwl_txq *txq;
++	struct iwl_queue *q;
++	int cnt;
++	unsigned long now = jiffies;
++	u32 scd_sram_addr;
++	u8 buf[16];
++	int ret = 0;
++
++	/* waiting for all the tx frames complete might take a while */
++	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
++		u8 wr_ptr;
++
++		if (cnt == trans_pcie->cmd_queue)
++			continue;
++		if (!test_bit(cnt, trans_pcie->queue_used))
++			continue;
++		if (!(BIT(cnt) & txq_bm))
++			continue;
++
++		IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
++		txq = &trans_pcie->txq[cnt];
++		q = &txq->q;
++		wr_ptr = ACCESS_ONCE(q->write_ptr);
++
++		while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
++		       !time_after(jiffies,
++				   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
++			u8 write_ptr = ACCESS_ONCE(q->write_ptr);
++
++			if (WARN_ONCE(wr_ptr != write_ptr,
++				      "WR pointer moved while flushing %d -> %d\n",
++				      wr_ptr, write_ptr))
++				return -ETIMEDOUT;
++			msleep(1);
++		}
++
++		if (q->read_ptr != q->write_ptr) {
++			IWL_ERR(trans,
++				"fail to flush all tx fifo queues Q %d\n", cnt);
++			ret = -ETIMEDOUT;
++			break;
++		}
++		IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
++	}
++
++	if (!ret)
++		return 0;
++
++	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
++		txq->q.read_ptr, txq->q.write_ptr);
++
++	scd_sram_addr = trans_pcie->scd_base_addr +
++			SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
++	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
++
++	iwl_print_hex_error(trans, buf, sizeof(buf));
++
++	for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
++		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
++			iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
++
++	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
++		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
++		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
++		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
++		u32 tbl_dw =
++			iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
++					     SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
++
++		if (cnt & 0x1)
++			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
++		else
++			tbl_dw = tbl_dw & 0x0000FFFF;
++
++		IWL_ERR(trans,
++			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
++			cnt, active ? "" : "in", fifo, tbl_dw,
++			iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
++				(TFD_QUEUE_SIZE_MAX - 1),
++			iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
++	}
++
++	return ret;
++}
++
++static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
++					 u32 mask, u32 value)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	unsigned long flags;
++
++	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
++	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
++	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
++}
++
++void iwl_trans_pcie_ref(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	unsigned long flags;
++
++	if (iwlwifi_mod_params.d0i3_disable)
++		return;
++
++	spin_lock_irqsave(&trans_pcie->ref_lock, flags);
++	IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
++	trans_pcie->ref_count++;
++	spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
++}
++
++void iwl_trans_pcie_unref(struct iwl_trans *trans)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	unsigned long flags;
++
++	if (iwlwifi_mod_params.d0i3_disable)
++		return;
++
++	spin_lock_irqsave(&trans_pcie->ref_lock, flags);
++	IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
++	if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
++		spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
++		return;
++	}
++	trans_pcie->ref_count--;
++	spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
++}
++
++static const char *get_csr_string(int cmd)
++{
++#define IWL_CMD(x) case x: return #x
++	switch (cmd) {
++	IWL_CMD(CSR_HW_IF_CONFIG_REG);
++	IWL_CMD(CSR_INT_COALESCING);
++	IWL_CMD(CSR_INT);
++	IWL_CMD(CSR_INT_MASK);
++	IWL_CMD(CSR_FH_INT_STATUS);
++	IWL_CMD(CSR_GPIO_IN);
++	IWL_CMD(CSR_RESET);
++	IWL_CMD(CSR_GP_CNTRL);
++	IWL_CMD(CSR_HW_REV);
++	IWL_CMD(CSR_EEPROM_REG);
++	IWL_CMD(CSR_EEPROM_GP);
++	IWL_CMD(CSR_OTP_GP_REG);
++	IWL_CMD(CSR_GIO_REG);
++	IWL_CMD(CSR_GP_UCODE_REG);
++	IWL_CMD(CSR_GP_DRIVER_REG);
++	IWL_CMD(CSR_UCODE_DRV_GP1);
++	IWL_CMD(CSR_UCODE_DRV_GP2);
++	IWL_CMD(CSR_LED_REG);
++	IWL_CMD(CSR_DRAM_INT_TBL_REG);
++	IWL_CMD(CSR_GIO_CHICKEN_BITS);
++	IWL_CMD(CSR_ANA_PLL_CFG);
++	IWL_CMD(CSR_HW_REV_WA_REG);
++	IWL_CMD(CSR_MONITOR_STATUS_REG);
++	IWL_CMD(CSR_DBG_HPET_MEM_REG);
++	default:
++		return "UNKNOWN";
++	}
++#undef IWL_CMD
++}
++
++void iwl_pcie_dump_csr(struct iwl_trans *trans)
++{
++	int i;
++	static const u32 csr_tbl[] = {
++		CSR_HW_IF_CONFIG_REG,
++		CSR_INT_COALESCING,
++		CSR_INT,
++		CSR_INT_MASK,
++		CSR_FH_INT_STATUS,
++		CSR_GPIO_IN,
++		CSR_RESET,
++		CSR_GP_CNTRL,
++		CSR_HW_REV,
++		CSR_EEPROM_REG,
++		CSR_EEPROM_GP,
++		CSR_OTP_GP_REG,
++		CSR_GIO_REG,
++		CSR_GP_UCODE_REG,
++		CSR_GP_DRIVER_REG,
++		CSR_UCODE_DRV_GP1,
++		CSR_UCODE_DRV_GP2,
++		CSR_LED_REG,
++		CSR_DRAM_INT_TBL_REG,
++		CSR_GIO_CHICKEN_BITS,
++		CSR_ANA_PLL_CFG,
++		CSR_MONITOR_STATUS_REG,
++		CSR_HW_REV_WA_REG,
++		CSR_DBG_HPET_MEM_REG
++	};
++	IWL_ERR(trans, "CSR values:\n");
++	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
++		"CSR_INT_PERIODIC_REG)\n");
++	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
++		IWL_ERR(trans, "  %25s: 0X%08x\n",
++			get_csr_string(csr_tbl[i]),
++			iwl_read32(trans, csr_tbl[i]));
++	}
++}
++
++#ifdef CONFIG_IWLWIFI_DEBUGFS
++/* create and remove of files */
++#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
++	if (!debugfs_create_file(#name, mode, parent, trans,		\
++				 &iwl_dbgfs_##name##_ops))		\
++		goto err;						\
++} while (0)
++
++/* file operation */
++#define DEBUGFS_READ_FILE_OPS(name)					\
++static const struct file_operations iwl_dbgfs_##name##_ops = {		\
++	.read = iwl_dbgfs_##name##_read,				\
++	.open = simple_open,						\
++	.llseek = generic_file_llseek,					\
++};
++
++#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
++static const struct file_operations iwl_dbgfs_##name##_ops = {          \
++	.write = iwl_dbgfs_##name##_write,                              \
++	.open = simple_open,						\
++	.llseek = generic_file_llseek,					\
++};
++
++#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
++static const struct file_operations iwl_dbgfs_##name##_ops = {		\
++	.write = iwl_dbgfs_##name##_write,				\
++	.read = iwl_dbgfs_##name##_read,				\
++	.open = simple_open,						\
++	.llseek = generic_file_llseek,					\
++};
++
++static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
++				       char __user *user_buf,
++				       size_t count, loff_t *ppos)
++{
++	struct iwl_trans *trans = file->private_data;
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	struct iwl_txq *txq;
++	struct iwl_queue *q;
++	char *buf;
++	int pos = 0;
++	int cnt;
++	int ret;
++	size_t bufsz;
++
++	bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
++
++	if (!trans_pcie->txq)
++		return -EAGAIN;
++
++	buf = kzalloc(bufsz, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
++		txq = &trans_pcie->txq[cnt];
++		q = &txq->q;
++		pos += scnprintf(buf + pos, bufsz - pos,
++				"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
++				cnt, q->read_ptr, q->write_ptr,
++				!!test_bit(cnt, trans_pcie->queue_used),
++				 !!test_bit(cnt, trans_pcie->queue_stopped),
++				 txq->need_update, txq->frozen,
++				 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
++	}
++	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
++	kfree(buf);
++	return ret;
++}
++
++static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
++				       char __user *user_buf,
++				       size_t count, loff_t *ppos)
++{
++	struct iwl_trans *trans = file->private_data;
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	struct iwl_rxq *rxq = &trans_pcie->rxq;
++	char buf[256];
++	int pos = 0;
++	const size_t bufsz = sizeof(buf);
++
++	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
++						rxq->read);
++	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
++						rxq->write);
++	pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
++						rxq->write_actual);
++	pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
++						rxq->need_update);
++	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
++						rxq->free_count);
++	if (rxq->rb_stts) {
++		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
++			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
++	} else {
++		pos += scnprintf(buf + pos, bufsz - pos,
++					"closed_rb_num: Not Allocated\n");
++	}
++	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
++}
++
++static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
++					char __user *user_buf,
++					size_t count, loff_t *ppos)
++{
++	struct iwl_trans *trans = file->private_data;
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
++
++	int pos = 0;
++	char *buf;
++	int bufsz = 24 * 64; /* 24 items * 64 char per item */
++	ssize_t ret;
++
++	buf = kzalloc(bufsz, GFP_KERNEL);
++	if (!buf)
++		return -ENOMEM;
++
++	pos += scnprintf(buf + pos, bufsz - pos,
++			"Interrupt Statistics Report:\n");
++
++	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
++		isr_stats->hw);
++	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
++		isr_stats->sw);
++	if (isr_stats->sw || isr_stats->hw) {
++		pos += scnprintf(buf + pos, bufsz - pos,
++			"\tLast Restarting Code:  0x%X\n",
++			isr_stats->err_code);
++	}
++#ifdef CONFIG_IWLWIFI_DEBUG
++	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
++		isr_stats->sch);
++	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
++		isr_stats->alive);
++#endif
++	pos += scnprintf(buf + pos, bufsz - pos,
++		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
++
++	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
++		isr_stats->ctkill);
++
++	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
++		isr_stats->wakeup);
++
++	pos += scnprintf(buf + pos, bufsz - pos,
++		"Rx command responses:\t\t %u\n", isr_stats->rx);
++
++	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
++		isr_stats->tx);
++
++	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
++		isr_stats->unhandled);
++
++	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
++	kfree(buf);
++	return ret;
++}
++
++static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
++					 const char __user *user_buf,
++					 size_t count, loff_t *ppos)
++{
++	struct iwl_trans *trans = file->private_data;
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
++
++	char buf[8];
++	int buf_size;
++	u32 reset_flag;
++
++	memset(buf, 0, sizeof(buf));
++	buf_size = min(count, sizeof(buf) -  1);
++	if (copy_from_user(buf, user_buf, buf_size))
++		return -EFAULT;
++	if (sscanf(buf, "%x", &reset_flag) != 1)
++		return -EFAULT;
++	if (reset_flag == 0)
++		memset(isr_stats, 0, sizeof(*isr_stats));
++
++	return count;
++}
++
++static ssize_t iwl_dbgfs_csr_write(struct file *file,
++				   const char __user *user_buf,
++				   size_t count, loff_t *ppos)
++{
++	struct iwl_trans *trans = file->private_data;
++	char buf[8];
++	int buf_size;
++	int csr;
++
++	memset(buf, 0, sizeof(buf));
++	buf_size = min(count, sizeof(buf) -  1);
++	if (copy_from_user(buf, user_buf, buf_size))
++		return -EFAULT;
++	if (sscanf(buf, "%d", &csr) != 1)
++		return -EFAULT;
++
++	iwl_pcie_dump_csr(trans);
++
++	return count;
++}
++
++static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
++				     char __user *user_buf,
++				     size_t count, loff_t *ppos)
++{
++	struct iwl_trans *trans = file->private_data;
++	char *buf = NULL;
++	ssize_t ret;
++
++	ret = iwl_dump_fh(trans, &buf);
++	if (ret < 0)
++		return ret;
++	if (!buf)
++		return -EINVAL;
++	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
++	kfree(buf);
++	return ret;
++}
++
++DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
++DEBUGFS_READ_FILE_OPS(fh_reg);
++DEBUGFS_READ_FILE_OPS(rx_queue);
++DEBUGFS_READ_FILE_OPS(tx_queue);
++DEBUGFS_WRITE_FILE_OPS(csr);
++
++/* Create the debugfs files and directories */
++int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
++{
++	struct dentry *dir = trans->dbgfs_dir;
++
++	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
++	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
++	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
++	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
++	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
++	return 0;
++
++err:
++	IWL_ERR(trans, "failed to create the trans debugfs entry\n");
++	return -ENOMEM;
++}
++#endif /*CONFIG_IWLWIFI_DEBUGFS */
++
++static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
++{
++	u32 cmdlen = 0;
++	int i;
++
++	for (i = 0; i < IWL_NUM_OF_TBS; i++)
++		cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
++
++	return cmdlen;
++}
++
++static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
++				   struct iwl_fw_error_dump_data **data,
++				   int allocated_rb_nums)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
++	struct iwl_rxq *rxq = &trans_pcie->rxq;
++	u32 i, r, j, rb_len = 0;
++
++	spin_lock(&rxq->lock);
++
++	r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
++
++	for (i = rxq->read, j = 0;
++	     i != r && j < allocated_rb_nums;
++	     i = (i + 1) & RX_QUEUE_MASK, j++) {
++		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
++		struct iwl_fw_error_dump_rb *rb;
++
++		dma_unmap_page(trans->dev, rxb->page_dma, max_len,
++			       DMA_FROM_DEVICE);
++
++		rb_len += sizeof(**data) + sizeof(*rb) + max_len;
++
++		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
++		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
++		rb = (void *)(*data)->data;
++		rb->index = cpu_to_le32(i);
++		memcpy(rb->data, page_address(rxb->page), max_len);
++		/* remap the page for the free benefit */
++		rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
++						     max_len,
++						     DMA_FROM_DEVICE);
++
++		*data = iwl_fw_error_next_data(*data);
++	}
++
++	spin_unlock(&rxq->lock);
++
++	return rb_len;
++}
++#define IWL_CSR_TO_DUMP (0x250)
++
++static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
++				   struct iwl_fw_error_dump_data **data)
++{
++	u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
++	__le32 *val;
++	int i;
++
++	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
++	(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
++	val = (void *)(*data)->data;
++
++	for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
++		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
++
++	*data = iwl_fw_error_next_data(*data);
++
++	return csr_len;
++}
++
++static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
++				       struct iwl_fw_error_dump_data **data)
++{
++	u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
++	unsigned long flags;
++	__le32 *val;
++	int i;
++
++	if (!iwl_trans_grab_nic_access(trans, &flags))
++		return 0;
++
++	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
++	(*data)->len = cpu_to_le32(fh_regs_len);
++	val = (void *)(*data)->data;
++
++	for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
++		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
++
++	iwl_trans_release_nic_access(trans, &flags);
++
++	*data = iwl_fw_error_next_data(*data);
++
++	return sizeof(**data) + fh_regs_len;
++}
++
++static u32
++iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
++				 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
++				 u32 monitor_len)
++{
++	u32 buf_size_in_dwords = (monitor_len >> 2);
++	u32 *buffer = (u32 *)fw_mon_data->data;
++	unsigned long flags;
++	u32 i;
++
++	if (!iwl_trans_grab_nic_access(trans, &flags))
++		return 0;
++
++	iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
++	for (i = 0; i < buf_size_in_dwords; i++)
++		buffer[i] = iwl_read_prph_no_grab(trans,
++				MON_DMARB_RD_DATA_ADDR);
++	iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
++
++	iwl_trans_release_nic_access(trans, &flags);
++
++	return monitor_len;
++}
++
++static u32
++iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
++			    struct iwl_fw_error_dump_data **data,
++			    u32 monitor_len)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	u32 len = 0;
++
++	if ((trans_pcie->fw_mon_page &&
++	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
++	    trans->dbg_dest_tlv) {
++		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
++		u32 base, write_ptr, wrap_cnt;
++
++		/* If there was a dest TLV - use the values from there */
++		if (trans->dbg_dest_tlv) {
++			write_ptr =
++				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
++			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
++			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
++		} else {
++			base = MON_BUFF_BASE_ADDR;
++			write_ptr = MON_BUFF_WRPTR;
++			wrap_cnt = MON_BUFF_CYCLE_CNT;
++		}
++
++		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
++		fw_mon_data = (void *)(*data)->data;
++		fw_mon_data->fw_mon_wr_ptr =
++			cpu_to_le32(iwl_read_prph(trans, write_ptr));
++		fw_mon_data->fw_mon_cycle_cnt =
++			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
++		fw_mon_data->fw_mon_base_ptr =
++			cpu_to_le32(iwl_read_prph(trans, base));
++
++		len += sizeof(**data) + sizeof(*fw_mon_data);
++		if (trans_pcie->fw_mon_page) {
++			/*
++			 * The firmware is now asserted, it won't write anything
++			 * to the buffer. CPU can take ownership to fetch the
++			 * data. The buffer will be handed back to the device
++			 * before the firmware will be restarted.
++			 */
++			dma_sync_single_for_cpu(trans->dev,
++						trans_pcie->fw_mon_phys,
++						trans_pcie->fw_mon_size,
++						DMA_FROM_DEVICE);
++			memcpy(fw_mon_data->data,
++			       page_address(trans_pcie->fw_mon_page),
++			       trans_pcie->fw_mon_size);
++
++			monitor_len = trans_pcie->fw_mon_size;
++		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
++			/*
++			 * Update pointers to reflect actual values after
++			 * shifting
++			 */
++			base = iwl_read_prph(trans, base) <<
++			       trans->dbg_dest_tlv->base_shift;
++			iwl_trans_read_mem(trans, base, fw_mon_data->data,
++					   monitor_len / sizeof(u32));
++		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
++			monitor_len =
++				iwl_trans_pci_dump_marbh_monitor(trans,
++								 fw_mon_data,
++								 monitor_len);
++		} else {
++			/* Didn't match anything - output no monitor data */
++			monitor_len = 0;
++		}
++
++		len += monitor_len;
++		(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
++	}
++
++	return len;
++}
++
++static struct iwl_trans_dump_data
++*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
++			  const struct iwl_fw_dbg_trigger_tlv *trigger)
++{
++	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++	struct iwl_fw_error_dump_data *data;
++	struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
++	struct iwl_fw_error_dump_txcmd *txcmd;
++	struct iwl_trans_dump_data *dump_data;
++	u32 len, num_rbs;
++	u32 monitor_len;
++	int i, ptr;
++	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
++
++	/* transport dump header */
++	len = sizeof(*dump_data);
++
++	/* host commands */
++	len += sizeof(*data) +
++		cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
++
++	/* FW monitor */
++	if (trans_pcie->fw_mon_page) {
++		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
++		       trans_pcie->fw_mon_size;
++		monitor_len = trans_pcie->fw_mon_size;
++	} else if (trans->dbg_dest_tlv) {
++		u32 base, end;
++
++		base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
++		end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
++
++		base = iwl_read_prph(trans, base) <<
++		       trans->dbg_dest_tlv->base_shift;
++		end = iwl_read_prph(trans, end) <<
++		      trans->dbg_dest_tlv->end_shift;
++
++		/* Make "end" point to the actual end */
++		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
++		    trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
++			end += (1 << trans->dbg_dest_tlv->end_shift);
++		monitor_len = end - base;
++		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
++		       monitor_len;
++	} else {
++		monitor_len = 0;
++	}
++
++	if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
++		dump_data = vzalloc(len);
++		if (!dump_data)
++			return NULL;
++
++		data = (void *)dump_data->data;
++		len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
++		dump_data->len = len;
++
++		return dump_data;
++	}
++
++	/* CSR registers */
++	len += sizeof(*data) + IWL_CSR_TO_DUMP;
++
++	/* FH registers */
++	len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
++
++	if (dump_rbs) {
++		/* RBs */
++		num_rbs = le16_to_cpu(ACCESS_ONCE(
++				      trans_pcie->rxq.rb_stts->closed_rb_num))
++				      & 0x0FFF;
++		num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
++		len += num_rbs * (sizeof(*data) +
++				  sizeof(struct iwl_fw_error_dump_rb) +
++				  (PAGE_SIZE << trans_pcie->rx_page_order));
++	}
++
++	dump_data = vzalloc(len);
++	if (!dump_data)
++		return NULL;
++
++	len = 0;
++	data = (void *)dump_data->data;
++	data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
++	txcmd = (void *)data->data;
++	spin_lock_bh(&cmdq->lock);
++	ptr = cmdq->q.write_ptr;
++	for (i = 0; i < cmdq->q.n_window; i++) {
++		u8 idx = get_cmd_index(&cmdq->q, ptr);
++		u32 caplen, cmdlen;
++
++		cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
++		caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
++
++		if (cmdlen) {
++			len += sizeof(*txcmd) + caplen;
++			txcmd->cmdlen = cpu_to_le32(cmdlen);
++			txcmd->caplen = cpu_to_le32(caplen);
++			memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
++			txcmd = (void *)((u8 *)txcmd->data + caplen);
++		}
++
++		ptr = iwl_queue_dec_wrap(ptr);
++	}
++	spin_unlock_bh(&cmdq->lock);
++
++	data->len = cpu_to_le32(len);
++	len += sizeof(*data);
++	data = iwl_fw_error_next_data(data);
++
++	len += iwl_trans_pcie_dump_csr(trans, &data);
++	len += iwl_trans_pcie_fh_regs_dump(trans, &data);
++	if (dump_rbs)
++		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
++
++	len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
++
++	dump_data->len = len;
++
++	return dump_data;
++}
++
++static const struct iwl_trans_ops trans_ops_pcie = {
++	.start_hw = iwl_trans_pcie_start_hw,
++	.op_mode_leave = iwl_trans_pcie_op_mode_leave,
++	.fw_alive = iwl_trans_pcie_fw_alive,
++	.start_fw = iwl_trans_pcie_start_fw,
++	.stop_device = iwl_trans_pcie_stop_device,
++
++	.d3_suspend = iwl_trans_pcie_d3_suspend,
++	.d3_resume = iwl_trans_pcie_d3_resume,
++
++	.send_cmd = iwl_trans_pcie_send_hcmd,
++
++	.tx = iwl_trans_pcie_tx,
++	.reclaim = iwl_trans_pcie_reclaim,
++
++	.txq_disable = iwl_trans_pcie_txq_disable,
++	.txq_enable = iwl_trans_pcie_txq_enable,
++
++	.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
++	.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
++	.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
++
++	.write8 = iwl_trans_pcie_write8,
++	.write32 = iwl_trans_pcie_write32,
++	.read32 = iwl_trans_pcie_read32,
++	.read_prph = iwl_trans_pcie_read_prph,
++	.write_prph = iwl_trans_pcie_write_prph,
++	.read_mem = iwl_trans_pcie_read_mem,
++	.write_mem = iwl_trans_pcie_write_mem,
++	.configure = iwl_trans_pcie_configure,
++	.set_pmi = iwl_trans_pcie_set_pmi,
++	.grab_nic_access = iwl_trans_pcie_grab_nic_access,
++	.release_nic_access = iwl_trans_pcie_release_nic_access,
++	.set_bits_mask = iwl_trans_pcie_set_bits_mask,
++
++	.ref = iwl_trans_pcie_ref,
++	.unref = iwl_trans_pcie_unref,
++
++	.dump_data = iwl_trans_pcie_dump_data,
++};
++
++struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
++				       const struct pci_device_id *ent,
++				       const struct iwl_cfg *cfg)
++{
++	struct iwl_trans_pcie *trans_pcie;
++	struct iwl_trans *trans;
++	u16 pci_cmd;
++	int ret;
++
++	trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
++				&pdev->dev, cfg, &trans_ops_pcie, 0);
++	if (!trans)
++		return ERR_PTR(-ENOMEM);
++
++	trans->max_skb_frags = IWL_PCIE_MAX_FRAGS;
++
++	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
++
++	trans_pcie->trans = trans;
++	spin_lock_init(&trans_pcie->irq_lock);
++	spin_lock_init(&trans_pcie->reg_lock);
++	spin_lock_init(&trans_pcie->ref_lock);
++	mutex_init(&trans_pcie->mutex);
++	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
++	trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
++	if (!trans_pcie->tso_hdr_page) {
++		ret = -ENOMEM;
++		goto out_no_pci;
++	}
++
++	ret = pci_enable_device(pdev);
++	if (ret)
++		goto out_no_pci;
++
++	if (!cfg->base_params->pcie_l1_allowed) {
++		/*
++		 * W/A - seems to solve weird behavior. We need to remove this
++		 * if we don't want to stay in L1 all the time. This wastes a
++		 * lot of power.
++		 */
++		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
++				       PCIE_LINK_STATE_L1 |
++				       PCIE_LINK_STATE_CLKPM);
++	}
++
++	pci_set_master(pdev);
++
++	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
++	if (!ret)
++		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
++	if (ret) {
++		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
++		if (!ret)
++			ret = pci_set_consistent_dma_mask(pdev,
++							  DMA_BIT_MASK(32));
++		/* both attempts failed: */
++		if (ret) {
++			dev_err(&pdev->dev, "No suitable DMA available\n");
++			goto out_pci_disable_device;
++		}
++	}
++
++	ret = pci_request_regions(pdev, DRV_NAME);
++	if (ret) {
++		dev_err(&pdev->dev, "pci_request_regions failed\n");
++		goto out_pci_disable_device;
++	}
++
++	trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
++	if (!trans_pcie->hw_base) {
++		dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
++		ret = -ENODEV;
++		goto out_pci_release_regions;
++	}
++
++	/* We disable the RETRY_TIMEOUT register (0x41) to keep
++	 * PCI Tx retries from interfering with C3 CPU state */
++	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
++
++	trans->dev = &pdev->dev;
++	trans_pcie->pci_dev = pdev;
++	iwl_disable_interrupts(trans);
++
++	ret = pci_enable_msi(pdev);
++	if (ret) {
++		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
++		/* enable rfkill interrupt: hw bug w/a */
++		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
++		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
++			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
++			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
++		}
++	}
++
++	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
++	/*
++	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
++	 * changed, and now the revision step also includes bit 0-1 (no more
++	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
++	 * in the old format.
++	 */
++	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
++		unsigned long flags;
++
++		trans->hw_rev = (trans->hw_rev & 0xfff0) |
++				(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
++
++		ret = iwl_pcie_prepare_card_hw(trans);
++		if (ret) {
++			IWL_WARN(trans, "Exit HW not ready\n");
++			goto out_pci_disable_msi;
++		}
++
++		/*
++		 * in-order to recognize C step driver should read chip version
++		 * id located at the AUX bus MISC address space.
++		 */
++		iwl_set_bit(trans, CSR_GP_CNTRL,
++			    CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
++		udelay(2);
++
++		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
++				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++				   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
++				   25000);
++		if (ret < 0) {
++			IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
++			goto out_pci_disable_msi;
++		}
++
++		if (iwl_trans_grab_nic_access(trans, &flags)) {
++			u32 hw_step;
++
++			hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
++			hw_step |= ENABLE_WFPM;
++			iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step);
++			hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG);
++			hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
++			if (hw_step == 0x3)
++				trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
++						(SILICON_C_STEP << 2);
++			iwl_trans_release_nic_access(trans, &flags);
++		}
++	}
++
++	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
++	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
++		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
++
++	/* Initialize the wait queue for commands */
++	init_waitqueue_head(&trans_pcie->wait_command_queue);
++
++	ret = iwl_pcie_alloc_ict(trans);
++	if (ret)
++		goto out_pci_disable_msi;
++
++	ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
++				   iwl_pcie_irq_handler,
++				   IRQF_SHARED, DRV_NAME, trans);
++	if (ret) {
++		IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
++		goto out_free_ict;
++	}
++
++	trans_pcie->inta_mask = CSR_INI_SET_MASK;
++
++	return trans;
++
++out_free_ict:
++	iwl_pcie_free_ict(trans);
++out_pci_disable_msi:
++	pci_disable_msi(pdev);
++out_pci_release_regions:
++	pci_release_regions(pdev);
++out_pci_disable_device:
++	pci_disable_device(pdev);
++out_no_pci:
++	free_percpu(trans_pcie->tso_hdr_page);
++	iwl_trans_free(trans);
++	return ERR_PTR(ret);
++}
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
+index 88bf80a942b4..9faf69875fab 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -382,6 +382,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
+ 	{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+@@ -399,10 +400,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
+-	{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+-	{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 1de80a8e357a..840c47d8e2ce 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -7,6 +7,7 @@
+  *
+  * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -33,6 +34,7 @@
+  *
+  * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
+  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
++ * Copyright(c) 2016 Intel Deutschland GmbH
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -881,9 +883,16 @@ static void iwl_pcie_apply_destination(struct iwl_trans *trans)
+ 	if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+ 		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
+ 			       trans_pcie->fw_mon_phys >> dest->base_shift);
+-		iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+-			       (trans_pcie->fw_mon_phys +
+-				trans_pcie->fw_mon_size) >> dest->end_shift);
++		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++					(trans_pcie->fw_mon_phys +
++					trans_pcie->fw_mon_size - 256) >>
++						dest->end_shift);
++		else
++			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
++					(trans_pcie->fw_mon_phys +
++					trans_pcie->fw_mon_size) >>
++						dest->end_shift);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
+index f46c9d7f6528..7f471bff435c 100644
+--- a/drivers/net/wireless/rtlwifi/pci.c
++++ b/drivers/net/wireless/rtlwifi/pci.c
+@@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ 								      hw_queue);
+ 			if (rx_remained_cnt == 0)
+ 				return;
+-
++			buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
++				rtlpci->rx_ring[rxring_idx].idx];
++			pdesc = (struct rtl_rx_desc *)skb->data;
+ 		} else {	/* rx descriptor */
+ 			pdesc = &rtlpci->rx_ring[rxring_idx].desc[
+ 				rtlpci->rx_ring[rxring_idx].idx];
+@@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
+ 		new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ 		if (unlikely(!new_skb))
+ 			goto no_new;
+-		if (rtlpriv->use_new_trx_flow) {
+-			buffer_desc =
+-			  &rtlpci->rx_ring[rxring_idx].buffer_desc
+-				[rtlpci->rx_ring[rxring_idx].idx];
+-			/*means rx wifi info*/
+-			pdesc = (struct rtl_rx_desc *)skb->data;
+-		}
+ 		memset(&rx_status , 0 , sizeof(rx_status));
+ 		rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
+ 						 &rx_status, (u8 *)pdesc, skb);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+index 11344121c55e..47e32cb0ec1a 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8188ee/sw.c
+@@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+ 	u8 tid;
+ 
+ 	rtl8188ee_bt_reg_init(hw);
+-	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+-
+ 	rtlpriv->dm.dm_initialgain_enable = 1;
+ 	rtlpriv->dm.dm_flag = 0;
+ 	rtlpriv->dm.disable_framebursting = 0;
+@@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		rtlpriv->cfg->mod_params->sw_crypto;
++	rtlpriv->cfg->mod_params->disable_watchdog =
++		rtlpriv->cfg->mod_params->disable_watchdog;
+ 	if (rtlpriv->cfg->mod_params->disable_watchdog)
+ 		pr_info("watchdog disabled\n");
+ 	if (!rtlpriv->psc.inactiveps)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+index de6cb6c3a48c..4780bdc63b2b 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+@@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		rtlpriv->cfg->mod_params->sw_crypto;
+ 	if (!rtlpriv->psc.inactiveps)
+ 		pr_info("rtl8192ce: Power Save off (module option)\n");
+ 	if (!rtlpriv->psc.fwctrl_lps)
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index fd4a5353d216..7c6f7f0d18c6 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->dm.disable_framebursting = false;
+ 	rtlpriv->dm.thermalvalue = 0;
+ 	rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		rtlpriv->cfg->mod_params->sw_crypto;
+ 
+ 	/* for firmware buf */
+ 	rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+index b19d0398215f..c6e09a19de1a 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+@@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+ 
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+index e1fd27c888bf..31baca41ac2f 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+@@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
++	rtlpriv->cfg->mod_params->sw_crypto =
++		rtlpriv->cfg->mod_params->sw_crypto;
+ 	if (!rtlpriv->psc.inactiveps)
+ 		pr_info("Power Save off (module option)\n");
+ 	if (!rtlpriv->psc.fwctrl_lps)
+@@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+-MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+-MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
++MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
++MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+ 
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index 2721cf89fb16..aac1ed3f7bb4 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -531,6 +531,8 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw,
+ 			ieee80211_rx(hw, skb);
+ 		else
+ 			dev_kfree_skb_any(skb);
++	} else {
++		dev_kfree_skb_any(skb);
+ 	}
+ }
+ 
+diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
+index 0305729d0986..10cf3747694d 100644
+--- a/drivers/net/wireless/ti/wlcore/io.h
++++ b/drivers/net/wireless/ti/wlcore/io.h
+@@ -207,19 +207,23 @@ static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg,
+ 
+ static inline void wl1271_power_off(struct wl1271 *wl)
+ {
+-	int ret;
++	int ret = 0;
+ 
+ 	if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
+ 		return;
+ 
+-	ret = wl->if_ops->power(wl->dev, false);
++	if (wl->if_ops->power)
++		ret = wl->if_ops->power(wl->dev, false);
+ 	if (!ret)
+ 		clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ }
+ 
+ static inline int wl1271_power_on(struct wl1271 *wl)
+ {
+-	int ret = wl->if_ops->power(wl->dev, true);
++	int ret = 0;
++
++	if (wl->if_ops->power)
++		ret = wl->if_ops->power(wl->dev, true);
+ 	if (ret == 0)
+ 		set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ 
+diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
+index f1ac2839d97c..720e4e4b5a3c 100644
+--- a/drivers/net/wireless/ti/wlcore/spi.c
++++ b/drivers/net/wireless/ti/wlcore/spi.c
+@@ -73,7 +73,10 @@
+  */
+ #define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+ 
+-#define WSPI_MAX_NUM_OF_CHUNKS (SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
++/* Maximum number of SPI write chunks */
++#define WSPI_MAX_NUM_OF_CHUNKS \
++	((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
++
+ 
+ struct wl12xx_spi_glue {
+ 	struct device *dev;
+@@ -268,9 +271,10 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
+ 					     void *buf, size_t len, bool fixed)
+ {
+ 	struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+-	struct spi_transfer t[2 * (WSPI_MAX_NUM_OF_CHUNKS + 1)];
++	/* SPI write buffers - 2 for each chunk */
++	struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
+ 	struct spi_message m;
+-	u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
++	u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */
+ 	u32 *cmd;
+ 	u32 chunk_len;
+ 	int i;
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index d3346d23963b..89b3befc7155 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -140,6 +140,8 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
+ 	type_mask |= IORESOURCE_TYPE_BITS;
+ 
+ 	pci_bus_for_each_resource(bus, r, i) {
++		resource_size_t min_used = min;
++
+ 		if (!r)
+ 			continue;
+ 
+@@ -163,12 +165,12 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
+ 		 * overrides "min".
+ 		 */
+ 		if (avail.start)
+-			min = avail.start;
++			min_used = avail.start;
+ 
+ 		max = avail.end;
+ 
+ 		/* Ok, try it out.. */
+-		ret = allocate_resource(r, res, size, min, max,
++		ret = allocate_resource(r, res, size, min_used, max,
+ 					align, alignf, alignf_data);
+ 		if (ret == 0)
+ 			return 0;
+diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c
+index 2d57e19a2cd4..b5ae685aec61 100644
+--- a/drivers/pci/host/pci-dra7xx.c
++++ b/drivers/pci/host/pci-dra7xx.c
+@@ -289,7 +289,8 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
+ 	}
+ 
+ 	ret = devm_request_irq(&pdev->dev, pp->irq,
+-			       dra7xx_pcie_msi_irq_handler, IRQF_SHARED,
++			       dra7xx_pcie_msi_irq_handler,
++			       IRQF_SHARED | IRQF_NO_THREAD,
+ 			       "dra7-pcie-msi",	pp);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "failed to request irq\n");
+diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
+index c139237e0e52..5b2b83cb67ad 100644
+--- a/drivers/pci/host/pci-exynos.c
++++ b/drivers/pci/host/pci-exynos.c
+@@ -527,7 +527,8 @@ static int __init exynos_add_pcie_port(struct pcie_port *pp,
+ 
+ 		ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ 					exynos_pcie_msi_irq_handler,
+-					IRQF_SHARED, "exynos-pcie", pp);
++					IRQF_SHARED | IRQF_NO_THREAD,
++					"exynos-pcie", pp);
+ 		if (ret) {
+ 			dev_err(&pdev->dev, "failed to request msi irq\n");
+ 			return ret;
+diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
+index fdb95367721e..ebcb0ac8512b 100644
+--- a/drivers/pci/host/pci-imx6.c
++++ b/drivers/pci/host/pci-imx6.c
+@@ -534,7 +534,8 @@ static int __init imx6_add_pcie_port(struct pcie_port *pp,
+ 
+ 		ret = devm_request_irq(&pdev->dev, pp->msi_irq,
+ 				       imx6_pcie_msi_handler,
+-				       IRQF_SHARED, "mx6-pcie-msi", pp);
++				       IRQF_SHARED | IRQF_NO_THREAD,
++				       "mx6-pcie-msi", pp);
+ 		if (ret) {
+ 			dev_err(&pdev->dev, "failed to request MSI irq\n");
+ 			return -ENODEV;
+diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
+index 00e92720d7f7..d9789d6ba47d 100644
+--- a/drivers/pci/host/pci-tegra.c
++++ b/drivers/pci/host/pci-tegra.c
+@@ -1304,7 +1304,7 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
+ 
+ 	msi->irq = err;
+ 
+-	err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
++	err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
+ 			  tegra_msi_irq_chip.name, pcie);
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
+index c086210f2ffd..56ce5640d91a 100644
+--- a/drivers/pci/host/pcie-rcar.c
++++ b/drivers/pci/host/pcie-rcar.c
+@@ -695,14 +695,16 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
+ 
+ 	/* Two irqs are for MSI, but they are also used for non-MSI irqs */
+ 	err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
+-			       IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
++			       IRQF_SHARED | IRQF_NO_THREAD,
++			       rcar_msi_irq_chip.name, pcie);
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ 		goto err;
+ 	}
+ 
+ 	err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
+-			       IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
++			       IRQF_SHARED | IRQF_NO_THREAD,
++			       rcar_msi_irq_chip.name, pcie);
+ 	if (err < 0) {
+ 		dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
+ 		goto err;
+diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
+index 020d78890719..4ea793eaa2bd 100644
+--- a/drivers/pci/host/pcie-spear13xx.c
++++ b/drivers/pci/host/pcie-spear13xx.c
+@@ -281,7 +281,8 @@ static int spear13xx_add_pcie_port(struct pcie_port *pp,
+ 		return -ENODEV;
+ 	}
+ 	ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler,
+-			       IRQF_SHARED, "spear1340-pcie", pp);
++			       IRQF_SHARED | IRQF_NO_THREAD,
++			       "spear1340-pcie", pp);
+ 	if (ret) {
+ 		dev_err(dev, "failed to request irq %d\n", pp->irq);
+ 		return ret;
+diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
+index f1a06a091ccb..577fe5b2f617 100644
+--- a/drivers/pci/host/pcie-xilinx.c
++++ b/drivers/pci/host/pcie-xilinx.c
+@@ -776,7 +776,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+ 
+ 	port->irq = irq_of_parse_and_map(node, 0);
+ 	err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
+-			       IRQF_SHARED, "xilinx-pcie", port);
++			       IRQF_SHARED | IRQF_NO_THREAD,
++			       "xilinx-pcie", port);
+ 	if (err) {
+ 		dev_err(dev, "unable to request irq %d\n", port->irq);
+ 		return err;
+diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
+index bcb90e4888dd..b60309ee80ed 100644
+--- a/drivers/pci/hotplug/acpiphp_glue.c
++++ b/drivers/pci/hotplug/acpiphp_glue.c
+@@ -954,8 +954,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
+ {
+ 	pci_lock_rescan_remove();
+ 
+-	if (slot->flags & SLOT_IS_GOING_AWAY)
++	if (slot->flags & SLOT_IS_GOING_AWAY) {
++		pci_unlock_rescan_remove();
+ 		return -ENODEV;
++	}
+ 
+ 	/* configure all functions */
+ 	if (!(slot->flags & SLOT_ENABLED))
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index cd78f1166b33..9a92d13e3917 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -845,6 +845,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ 		},
+ 	},
+ 	{
++		.ident = "Lenovo ideapad Y700-17ISK",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
++		},
++	},
++	{
+ 		.ident = "Lenovo Yoga 2 11 / 13 / Pro",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+@@ -865,6 +872,20 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ 			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3 Pro-1370"),
+ 		},
+ 	},
++	{
++		.ident = "Lenovo Yoga 700",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
++		},
++	},
++	{
++		.ident = "Lenovo Yoga 900",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 900"),
++		},
++	},
+ 	{}
+ };
+ 
+diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
+index 9f77d23239a2..64ed88a67e6e 100644
+--- a/drivers/scsi/scsi_devinfo.c
++++ b/drivers/scsi/scsi_devinfo.c
+@@ -227,6 +227,7 @@ static struct {
+ 	{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
+ 	{"Promise", "", NULL, BLIST_SPARSELUN},
+ 	{"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
++	{"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024},
+ 	{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
+ 	{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
+ 	{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 11ea52b2c36b..c66fd23b3c13 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -3141,8 +3141,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
+ 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ 	int ret = 0;
+ 
+-	if (!sdkp)
+-		return 0;	/* this can happen */
++	if (!sdkp)	/* E.g.: runtime suspend following sd_remove() */
++		return 0;
+ 
+ 	if (sdkp->WCE && sdkp->media_present) {
+ 		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
+@@ -3181,6 +3181,9 @@ static int sd_resume(struct device *dev)
+ {
+ 	struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ 
++	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
++		return 0;
++
+ 	if (!sdkp->device->manage_start_stop)
+ 		return 0;
+ 
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 9d7b7db75e4b..3bbf4853733c 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1255,7 +1255,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+ 	}
+ 
+ 	sfp->mmap_called = 1;
+-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
++	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ 	vma->vm_private_data = sfp;
+ 	vma->vm_ops = &sg_mmap_vm_ops;
+ 	return 0;
+diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
+index 8bd54a64efd6..64c867405ad4 100644
+--- a/drivers/scsi/sr.c
++++ b/drivers/scsi/sr.c
+@@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev)
+ {
+ 	struct scsi_cd *cd = dev_get_drvdata(dev);
+ 
++	if (!cd)	/* E.g.: runtime suspend following sr_remove() */
++		return 0;
++
+ 	if (cd->media_present)
+ 		return -EBUSY;
+ 	else
+@@ -985,6 +988,7 @@ static int sr_remove(struct device *dev)
+ 	scsi_autopm_get_device(cd->device);
+ 
+ 	del_gendisk(cd->disk);
++	dev_set_drvdata(dev, NULL);
+ 
+ 	mutex_lock(&sr_ref_mutex);
+ 	kref_put(&cd->kref, sr_kref_release);
+diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
+index a0315701c7d9..ed68b2cfe031 100644
+--- a/drivers/staging/speakup/selection.c
++++ b/drivers/staging/speakup/selection.c
+@@ -141,7 +141,9 @@ static void __speakup_paste_selection(struct work_struct *work)
+ 	struct tty_ldisc *ld;
+ 	DECLARE_WAITQUEUE(wait, current);
+ 
+-	ld = tty_ldisc_ref_wait(tty);
++	ld = tty_ldisc_ref(tty);
++	if (!ld)
++		goto tty_unref;
+ 	tty_buffer_lock_exclusive(&vc->port);
+ 
+ 	add_wait_queue(&vc->paste_wait, &wait);
+@@ -161,6 +163,7 @@ static void __speakup_paste_selection(struct work_struct *work)
+ 
+ 	tty_buffer_unlock_exclusive(&vc->port);
+ 	tty_ldisc_deref(ld);
++tty_unref:
+ 	tty_kref_put(tty);
+ }
+ 
+diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
+index 1d9d51bdf517..f41a7da1949d 100644
+--- a/drivers/staging/speakup/serialio.c
++++ b/drivers/staging/speakup/serialio.c
+@@ -6,6 +6,11 @@
+ #include "spk_priv.h"
+ #include "serialio.h"
+ 
++#include <linux/serial_core.h>
++/* WARNING:  Do not change this to <linux/serial.h> without testing that
++ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
++#include <asm/serial.h>
++
+ #ifndef SERIAL_PORT_DFNS
+ #define SERIAL_PORT_DFNS
+ #endif
+@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
+ 	int baud = 9600, quot = 0;
+ 	unsigned int cval = 0;
+ 	int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
+-	const struct old_serial_port *ser = rs_table + index;
++	const struct old_serial_port *ser;
+ 	int err;
+ 
++	if (index >= ARRAY_SIZE(rs_table)) {
++		pr_info("no port info for ttyS%d\n", index);
++		return NULL;
++	}
++	ser = rs_table + index;
++
+ 	/*	Divisor, bytesize and parity */
+ 	quot = ser->baud_base / baud;
+ 	cval = cflag & (CSIZE | CSTOPB);
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 6f2fb546477e..5a8add721741 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -1907,7 +1907,8 @@ static void lio_tpg_release_fabric_acl(
+ }
+ 
+ /*
+- * Called with spin_lock_bh(struct se_portal_group->session_lock) held..
++ * Called with spin_lock_irq(struct se_portal_group->session_lock) held
++ * or not held.
+  *
+  * Also, this function calls iscsit_inc_session_usage_count() on the
+  * struct iscsi_session in question.
+@@ -1915,19 +1916,32 @@ static void lio_tpg_release_fabric_acl(
+ static int lio_tpg_shutdown_session(struct se_session *se_sess)
+ {
+ 	struct iscsi_session *sess = se_sess->fabric_sess_ptr;
++	struct se_portal_group *se_tpg = se_sess->se_tpg;
++	bool local_lock = false;
++
++	if (!spin_is_locked(&se_tpg->session_lock)) {
++		spin_lock_irq(&se_tpg->session_lock);
++		local_lock = true;
++	}
+ 
+ 	spin_lock(&sess->conn_lock);
+ 	if (atomic_read(&sess->session_fall_back_to_erl0) ||
+ 	    atomic_read(&sess->session_logout) ||
+ 	    (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
+ 		spin_unlock(&sess->conn_lock);
++		if (local_lock)
++			spin_unlock_irq(&sess->conn_lock);
+ 		return 0;
+ 	}
+ 	atomic_set(&sess->session_reinstatement, 1);
+ 	spin_unlock(&sess->conn_lock);
+ 
+ 	iscsit_stop_time2retain_timer(sess);
++	spin_unlock_irq(&se_tpg->session_lock);
++
+ 	iscsit_stop_session(sess, 1, 1);
++	if (!local_lock)
++		spin_lock_irq(&se_tpg->session_lock);
+ 
+ 	return 1;
+ }
+diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
+index 5a0f12d08e8b..ec4ea5940bf7 100644
+--- a/drivers/thermal/step_wise.c
++++ b/drivers/thermal/step_wise.c
+@@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
+ 	next_target = instance->target;
+ 	dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
+ 
++	if (!instance->initialized) {
++		if (throttle) {
++			next_target = (cur_state + 1) >= instance->upper ?
++					instance->upper :
++					((cur_state + 1) < instance->lower ?
++					instance->lower : (cur_state + 1));
++		} else {
++			next_target = THERMAL_NO_TARGET;
++		}
++
++		return next_target;
++	}
++
+ 	switch (trend) {
+ 	case THERMAL_TREND_RAISING:
+ 		if (throttle) {
+@@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+ 		dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
+ 					old_target, (int)instance->target);
+ 
+-		if (old_target == instance->target)
++		if (instance->initialized && old_target == instance->target)
+ 			continue;
+ 
+ 		/* Activate a passive thermal instance */
+@@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+ 			instance->target == THERMAL_NO_TARGET)
+ 			update_passive_instance(tz, trip_type, -1);
+ 
+-
++		instance->initialized = true;
+ 		instance->cdev->updated = false; /* cdev needs update */
+ 	}
+ 
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index 4108db7e10c1..a3282bfb343d 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -37,6 +37,7 @@
+ #include <linux/of.h>
+ #include <net/netlink.h>
+ #include <net/genetlink.h>
++#include <linux/suspend.h>
+ 
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/thermal.h>
+@@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
+ static DEFINE_MUTEX(thermal_list_lock);
+ static DEFINE_MUTEX(thermal_governor_lock);
+ 
++static atomic_t in_suspend;
++
+ static struct thermal_governor *def_governor;
+ 
+ static struct thermal_governor *__find_governor(const char *name)
+@@ -471,14 +474,31 @@ static void update_temperature(struct thermal_zone_device *tz)
+ 	mutex_unlock(&tz->lock);
+ 
+ 	trace_thermal_temperature(tz);
+-	dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+-				tz->last_temperature, tz->temperature);
++	if (tz->last_temperature == THERMAL_TEMP_INVALID)
++		dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
++			tz->temperature);
++	else
++		dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
++			tz->last_temperature, tz->temperature);
++}
++
++static void thermal_zone_device_reset(struct thermal_zone_device *tz)
++{
++	struct thermal_instance *pos;
++
++	tz->temperature = THERMAL_TEMP_INVALID;
++	tz->passive = 0;
++	list_for_each_entry(pos, &tz->thermal_instances, tz_node)
++		pos->initialized = false;
+ }
+ 
+ void thermal_zone_device_update(struct thermal_zone_device *tz)
+ {
+ 	int count;
+ 
++	if (atomic_read(&in_suspend))
++		return;
++
+ 	if (!tz->ops->get_temp)
+ 		return;
+ 
+@@ -1016,6 +1036,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
+ 	if (!result) {
+ 		list_add_tail(&dev->tz_node, &tz->thermal_instances);
+ 		list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
++		atomic_set(&tz->need_update, 1);
+ 	}
+ 	mutex_unlock(&cdev->lock);
+ 	mutex_unlock(&tz->lock);
+@@ -1122,6 +1143,7 @@ __thermal_cooling_device_register(struct device_node *np,
+ 				  const struct thermal_cooling_device_ops *ops)
+ {
+ 	struct thermal_cooling_device *cdev;
++	struct thermal_zone_device *pos = NULL;
+ 	int result;
+ 
+ 	if (type && strlen(type) >= THERMAL_NAME_LENGTH)
+@@ -1166,6 +1188,12 @@ __thermal_cooling_device_register(struct device_node *np,
+ 	/* Update binding information for 'this' new cdev */
+ 	bind_cdev(cdev);
+ 
++	mutex_lock(&thermal_list_lock);
++	list_for_each_entry(pos, &thermal_tz_list, node)
++		if (atomic_cmpxchg(&pos->need_update, 1, 0))
++			thermal_zone_device_update(pos);
++	mutex_unlock(&thermal_list_lock);
++
+ 	return cdev;
+ }
+ 
+@@ -1496,6 +1524,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ 	tz->trips = trips;
+ 	tz->passive_delay = passive_delay;
+ 	tz->polling_delay = polling_delay;
++	/* A new thermal zone needs to be updated anyway. */
++	atomic_set(&tz->need_update, 1);
+ 
+ 	dev_set_name(&tz->device, "thermal_zone%d", tz->id);
+ 	result = device_register(&tz->device);
+@@ -1576,7 +1606,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
+ 	if (!tz->ops->get_temp)
+ 		thermal_zone_device_set_polling(tz, 0);
+ 
+-	thermal_zone_device_update(tz);
++	thermal_zone_device_reset(tz);
++	/* Update the new thermal zone and mark it as already updated. */
++	if (atomic_cmpxchg(&tz->need_update, 1, 0))
++		thermal_zone_device_update(tz);
+ 
+ 	return tz;
+ 
+@@ -1810,6 +1843,36 @@ static void thermal_unregister_governors(void)
+ 	thermal_gov_user_space_unregister();
+ }
+ 
++static int thermal_pm_notify(struct notifier_block *nb,
++				unsigned long mode, void *_unused)
++{
++	struct thermal_zone_device *tz;
++
++	switch (mode) {
++	case PM_HIBERNATION_PREPARE:
++	case PM_RESTORE_PREPARE:
++	case PM_SUSPEND_PREPARE:
++		atomic_set(&in_suspend, 1);
++		break;
++	case PM_POST_HIBERNATION:
++	case PM_POST_RESTORE:
++	case PM_POST_SUSPEND:
++		atomic_set(&in_suspend, 0);
++		list_for_each_entry(tz, &thermal_tz_list, node) {
++			thermal_zone_device_reset(tz);
++			thermal_zone_device_update(tz);
++		}
++		break;
++	default:
++		break;
++	}
++	return 0;
++}
++
++static struct notifier_block thermal_pm_nb = {
++	.notifier_call = thermal_pm_notify,
++};
++
+ static int __init thermal_init(void)
+ {
+ 	int result;
+@@ -1830,6 +1893,11 @@ static int __init thermal_init(void)
+ 	if (result)
+ 		goto exit_netlink;
+ 
++	result = register_pm_notifier(&thermal_pm_nb);
++	if (result)
++		pr_warn("Thermal: Can not register suspend notifier, return %d\n",
++			result);
++
+ 	return 0;
+ 
+ exit_netlink:
+@@ -1849,6 +1917,7 @@ error:
+ 
+ static void __exit thermal_exit(void)
+ {
++	unregister_pm_notifier(&thermal_pm_nb);
+ 	of_thermal_destroy_zones();
+ 	genetlink_exit();
+ 	class_unregister(&thermal_class);
+diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
+index 8e391812e503..dce86ee8e9d7 100644
+--- a/drivers/thermal/thermal_core.h
++++ b/drivers/thermal/thermal_core.h
+@@ -41,6 +41,7 @@ struct thermal_instance {
+ 	struct thermal_zone_device *tz;
+ 	struct thermal_cooling_device *cdev;
+ 	int trip;
++	bool initialized;
+ 	unsigned long upper;	/* Highest cooling state for this trip point */
+ 	unsigned long lower;	/* Lowest cooling state for this trip point */
+ 	unsigned long target;	/* expected cooling state */
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index e5edf45e9d4c..33088c70ef3b 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -258,16 +258,13 @@ static void n_tty_check_throttle(struct tty_struct *tty)
+ 
+ static void n_tty_check_unthrottle(struct tty_struct *tty)
+ {
+-	if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+-	    tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
++	if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
+ 		if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
+ 			return;
+ 		if (!tty->count)
+ 			return;
+ 		n_tty_kick_worker(tty);
+-		n_tty_write_wakeup(tty->link);
+-		if (waitqueue_active(&tty->link->write_wait))
+-			wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
++		tty_wakeup(tty->link);
+ 		return;
+ 	}
+ 
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index eb8adc2e68c1..2fd163b75665 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1380,6 +1380,9 @@ ce4100_serial_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_INTEL_BSW_UART1	0x228a
+ #define PCI_DEVICE_ID_INTEL_BSW_UART2	0x228c
+ 
++#define PCI_DEVICE_ID_INTEL_BDW_UART1	0x9ce3
++#define PCI_DEVICE_ID_INTEL_BDW_UART2	0x9ce4
++
+ #define BYT_PRV_CLK			0x800
+ #define BYT_PRV_CLK_EN			(1 << 0)
+ #define BYT_PRV_CLK_M_VAL_SHIFT		1
+@@ -1458,11 +1461,13 @@ byt_serial_setup(struct serial_private *priv,
+ 	switch (pdev->device) {
+ 	case PCI_DEVICE_ID_INTEL_BYT_UART1:
+ 	case PCI_DEVICE_ID_INTEL_BSW_UART1:
++	case PCI_DEVICE_ID_INTEL_BDW_UART1:
+ 		rx_param->src_id = 3;
+ 		tx_param->dst_id = 2;
+ 		break;
+ 	case PCI_DEVICE_ID_INTEL_BYT_UART2:
+ 	case PCI_DEVICE_ID_INTEL_BSW_UART2:
++	case PCI_DEVICE_ID_INTEL_BDW_UART2:
+ 		rx_param->src_id = 5;
+ 		tx_param->dst_id = 4;
+ 		break;
+@@ -2154,6 +2159,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ 		.subdevice	= PCI_ANY_ID,
+ 		.setup		= byt_serial_setup,
+ 	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BDW_UART1,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.setup		= byt_serial_setup,
++	},
++	{
++		.vendor		= PCI_VENDOR_ID_INTEL,
++		.device		= PCI_DEVICE_ID_INTEL_BDW_UART2,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.setup		= byt_serial_setup,
++	},
+ 	/*
+ 	 * ITE
+ 	 */
+@@ -5603,6 +5622,16 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+ 		pbn_byt },
+ 
++	/* Intel Broadwell */
++	{	PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
++		PCI_ANY_ID,  PCI_ANY_ID,
++		PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
++		pbn_byt },
++	{	PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
++		PCI_ANY_ID,  PCI_ANY_ID,
++		PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
++		pbn_byt },
++
+ 	/*
+ 	 * Intel Penwell
+ 	 */
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 5a5c1ab5a375..be96970646a9 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2670,6 +2670,28 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
+ }
+ 
+ /**
++ *	tiocgetd	-	get line discipline
++ *	@tty: tty device
++ *	@p: pointer to user data
++ *
++ *	Retrieves the line discipline id directly from the ldisc.
++ *
++ *	Locking: waits for ldisc reference (in case the line discipline
++ *		is changing or the tty is being hungup)
++ */
++
++static int tiocgetd(struct tty_struct *tty, int __user *p)
++{
++	struct tty_ldisc *ld;
++	int ret;
++
++	ld = tty_ldisc_ref_wait(tty);
++	ret = put_user(ld->ops->num, p);
++	tty_ldisc_deref(ld);
++	return ret;
++}
++
++/**
+  *	send_break	-	performed time break
+  *	@tty: device to break on
+  *	@duration: timeout in mS
+@@ -2895,7 +2917,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 	case TIOCGSID:
+ 		return tiocgsid(tty, real_tty, p);
+ 	case TIOCGETD:
+-		return put_user(tty->ldisc->ops->num, (int __user *)p);
++		return tiocgetd(tty, p);
+ 	case TIOCSETD:
+ 		return tiocsetd(tty, p);
+ 	case TIOCVHANGUP:
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 0fe15aec7ed0..df3deb000a80 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -432,7 +432,8 @@ static void acm_read_bulk_callback(struct urb *urb)
+ 		set_bit(rb->index, &acm->read_urbs_free);
+ 		dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
+ 							__func__, status);
+-		return;
++		if ((status != -ENOENT) || (urb->actual_length == 0))
++			return;
+ 	}
+ 
+ 	usb_mark_last_busy(acm->dev);
+@@ -1414,6 +1415,8 @@ made_compressed_probe:
+ 				usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
+ 				NULL, acm->writesize, acm_write_bulk, snd);
+ 		snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
++		if (quirks & SEND_ZERO_PACKET)
++			snd->urb->transfer_flags |= URB_ZERO_PACKET;
+ 		snd->instance = acm;
+ 	}
+ 
+@@ -1848,6 +1851,11 @@ static const struct usb_device_id acm_ids[] = {
+ 	},
+ #endif
+ 
++	/*Samsung phone in firmware update mode */
++	{ USB_DEVICE(0x04e8, 0x685d),
++	.driver_info = IGNORE_DEVICE,
++	},
++
+ 	/* Exclude Infineon Flash Loader utility */
+ 	{ USB_DEVICE(0x058b, 0x0041),
+ 	.driver_info = IGNORE_DEVICE,
+@@ -1871,6 +1879,10 @@ static const struct usb_device_id acm_ids[] = {
+ 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ 		USB_CDC_ACM_PROTO_AT_CDMA) },
+ 
++	{ USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
++	.driver_info = SEND_ZERO_PACKET,
++	},
++
+ 	{ }
+ };
+ 
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index b3b6c9db6fe5..ac830e0ae38b 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -134,3 +134,4 @@ struct acm {
+ #define IGNORE_DEVICE			BIT(5)
+ #define QUIRK_CONTROL_LINE_STATE	BIT(6)
+ #define CLEAR_HALT_CONDITIONS		BIT(7)
++#define SEND_ZERO_PACKET		BIT(8)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index ee11b301f3da..e56ad83b35a4 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -5346,7 +5346,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	}
+ 
+ 	bos = udev->bos;
+-	udev->bos = NULL;
+ 
+ 	for (i = 0; i < SET_CONFIG_TRIES; ++i) {
+ 
+@@ -5439,8 +5438,11 @@ done:
+ 	usb_set_usb2_hardware_lpm(udev, 1);
+ 	usb_unlocked_enable_lpm(udev);
+ 	usb_enable_ltm(udev);
+-	usb_release_bos_descriptor(udev);
+-	udev->bos = bos;
++	/* release the new BOS descriptor allocated  by hub_port_init() */
++	if (udev->bos != bos) {
++		usb_release_bos_descriptor(udev);
++		udev->bos = bos;
++	}
+ 	return 0;
+ 
+ re_enumerate:
+diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
+index 65b0b6a58599..da03d8b258dd 100644
+--- a/drivers/usb/host/Makefile
++++ b/drivers/usb/host/Makefile
+@@ -26,9 +26,6 @@ obj-$(CONFIG_USB_WHCI_HCD)	+= whci/
+ 
+ obj-$(CONFIG_PCI)		+= pci-quirks.o
+ 
+-obj-$(CONFIG_USB_XHCI_PCI)	+= xhci-pci.o
+-obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
+-
+ obj-$(CONFIG_USB_EHCI_HCD)	+= ehci-hcd.o
+ obj-$(CONFIG_USB_EHCI_PCI)	+= ehci-pci.o
+ obj-$(CONFIG_USB_EHCI_HCD_PLATFORM)	+= ehci-platform.o
+@@ -63,6 +60,8 @@ obj-$(CONFIG_USB_OHCI_HCD_PXA27X)	+= ohci-pxa27x.o
+ obj-$(CONFIG_USB_UHCI_HCD)	+= uhci-hcd.o
+ obj-$(CONFIG_USB_FHCI_HCD)	+= fhci.o
+ obj-$(CONFIG_USB_XHCI_HCD)	+= xhci-hcd.o
++obj-$(CONFIG_USB_XHCI_PCI)	+= xhci-pci.o
++obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
+ obj-$(CONFIG_USB_SL811_HCD)	+= sl811-hcd.o
+ obj-$(CONFIG_USB_SL811_CS)	+= sl811_cs.o
+ obj-$(CONFIG_USB_U132_HCD)	+= u132-hcd.o
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 7e5c90eebb9c..3ff5fcc7c94b 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -23,10 +23,17 @@
+ #include <linux/pci.h>
+ #include <linux/slab.h>
+ #include <linux/module.h>
++#include <linux/acpi.h>
+ 
+ #include "xhci.h"
+ #include "xhci-trace.h"
+ 
++#define SSIC_PORT_NUM		2
++#define SSIC_PORT_CFG2		0x880c
++#define SSIC_PORT_CFG2_OFFSET	0x30
++#define PROG_DONE		(1 << 30)
++#define SSIC_PORT_UNUSED	(1 << 31)
++
+ /* Device for a quirk */
+ #define PCI_VENDOR_ID_FRESCO_LOGIC	0x1b73
+ #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK	0x1000
+@@ -40,6 +47,7 @@
+ #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI		0x22b5
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI		0xa12f
+ #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI	0x9d2f
++#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI		0x0aa8
+ 
+ static const char hcd_name[] = "xhci_hcd";
+ 
+@@ -140,9 +148,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ 		 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
+-		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
++		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
++		 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
+ 		xhci->quirks |= XHCI_PME_STUCK_QUIRK;
+ 	}
++	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
++		 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
++		xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
++	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
+ 			pdev->device == PCI_DEVICE_ID_EJ168) {
+ 		xhci->quirks |= XHCI_RESET_ON_RESUME;
+@@ -169,20 +182,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 				"QUIRK: Resetting on resume");
+ }
+ 
+-/*
+- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+- */
+-static void xhci_pme_quirk(struct xhci_hcd *xhci)
++#ifdef CONFIG_ACPI
++static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
+ {
+-	u32 val;
+-	void __iomem *reg;
+-
+-	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
+-	val = readl(reg);
+-	writel(val | BIT(28), reg);
+-	readl(reg);
++	static const u8 intel_dsm_uuid[] = {
++		0xb7, 0x0c, 0x34, 0xac,	0x01, 0xe9, 0xbf, 0x45,
++		0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
++	};
++	acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
+ }
++#else
++	static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
++#endif /* CONFIG_ACPI */
+ 
+ /* called during probe() after chip reset completes */
+ static int xhci_pci_setup(struct usb_hcd *hcd)
+@@ -263,6 +274,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 			HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ 		xhci->shared_hcd->can_do_streams = 1;
+ 
++	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
++		xhci_pme_acpi_rtd3_enable(dev);
++
+ 	/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
+ 	pm_runtime_put_noidle(&dev->dev);
+ 
+@@ -296,10 +310,65 @@ static void xhci_pci_remove(struct pci_dev *dev)
+ }
+ 
+ #ifdef CONFIG_PM
++/*
++ * In some Intel xHCI controllers, in order to get D3 working,
++ * through a vendor specific SSIC CONFIG register at offset 0x883c,
++ * SSIC PORT need to be marked as "unused" before putting xHCI
++ * into D3. After D3 exit, the SSIC port need to be marked as "used".
++ * Without this change, xHCI might not enter D3 state.
++ */
++static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend)
++{
++	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
++	u32 val;
++	void __iomem *reg;
++	int i;
++
++	for (i = 0; i < SSIC_PORT_NUM; i++) {
++		reg = (void __iomem *) xhci->cap_regs +
++				SSIC_PORT_CFG2 +
++				i * SSIC_PORT_CFG2_OFFSET;
++
++		/* Notify SSIC that SSIC profile programming is not done. */
++		val = readl(reg) & ~PROG_DONE;
++		writel(val, reg);
++
++		/* Mark SSIC port as unused(suspend) or used(resume) */
++		val = readl(reg);
++		if (suspend)
++			val |= SSIC_PORT_UNUSED;
++		else
++			val &= ~SSIC_PORT_UNUSED;
++		writel(val, reg);
++
++		/* Notify SSIC that SSIC profile programming is done */
++		val = readl(reg) | PROG_DONE;
++		writel(val, reg);
++		readl(reg);
++	}
++}
++
++/*
++ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
++ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
++ */
++static void xhci_pme_quirk(struct usb_hcd *hcd)
++{
++	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
++	void __iomem *reg;
++	u32 val;
++
++	reg = (void __iomem *) xhci->cap_regs + 0x80a4;
++	val = readl(reg);
++	writel(val | BIT(28), reg);
++	readl(reg);
++}
++
+ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ {
+ 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+ 	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
++	int			ret;
+ 
+ 	/*
+ 	 * Systems with the TI redriver that loses port status change events
+@@ -309,9 +378,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
+ 		pdev->no_d3cold = true;
+ 
+ 	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+-		xhci_pme_quirk(xhci);
++		xhci_pme_quirk(hcd);
++
++	if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
++		xhci_ssic_port_unused_quirk(hcd, true);
+ 
+-	return xhci_suspend(xhci, do_wakeup);
++	ret = xhci_suspend(xhci, do_wakeup);
++	if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
++		xhci_ssic_port_unused_quirk(hcd, false);
++
++	return ret;
+ }
+ 
+ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+@@ -341,8 +417,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ 		usb_enable_intel_xhci_ports(pdev);
+ 
++	if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
++		xhci_ssic_port_unused_quirk(hcd, false);
++
+ 	if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+-		xhci_pme_quirk(xhci);
++		xhci_pme_quirk(hcd);
+ 
+ 	retval = xhci_resume(xhci, hibernated);
+ 	return retval;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 41d7a05f8af4..e6d858a49d04 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -3001,21 +3001,6 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ }
+ 
+ /*
+- * The TD size is the number of bytes remaining in the TD (including this TRB),
+- * right shifted by 10.
+- * It must fit in bits 21:17, so it can't be bigger than 31.
+- */
+-static u32 xhci_td_remainder(unsigned int remainder)
+-{
+-	u32 max = (1 << (21 - 17 + 1)) - 1;
+-
+-	if ((remainder >> 10) >= max)
+-		return max << 17;
+-	else
+-		return (remainder >> 10) << 17;
+-}
+-
+-/*
+  * For xHCI 1.0 host controllers, TD size is the number of max packet sized
+  * packets remaining in the TD (*not* including this TRB).
+  *
+@@ -3027,30 +3012,36 @@ static u32 xhci_td_remainder(unsigned int remainder)
+  *
+  * TD size = total_packet_count - packets_transferred
+  *
+- * It must fit in bits 21:17, so it can't be bigger than 31.
++ * For xHCI 0.96 and older, TD size field should be the remaining bytes
++ * including this TRB, right shifted by 10
++ *
++ * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
++ * This is taken care of in the TRB_TD_SIZE() macro
++ *
+  * The last TRB in a TD must have the TD size set to zero.
+  */
+-static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
+-		unsigned int total_packet_count, struct urb *urb,
+-		unsigned int num_trbs_left)
++static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
++			      int trb_buff_len, unsigned int td_total_len,
++			      struct urb *urb, unsigned int num_trbs_left)
+ {
+-	int packets_transferred;
++	u32 maxp, total_packet_count;
++
++	if (xhci->hci_version < 0x100)
++		return ((td_total_len - transferred) >> 10);
++
++	maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
++	total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
+ 
+ 	/* One TRB with a zero-length data packet. */
+-	if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
++	if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) ||
++	    trb_buff_len == td_total_len)
+ 		return 0;
+ 
+-	/* All the TRB queueing functions don't count the current TRB in
+-	 * running_total.
+-	 */
+-	packets_transferred = (running_total + trb_buff_len) /
+-		GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+-
+-	if ((total_packet_count - packets_transferred) > 31)
+-		return 31 << 17;
+-	return (total_packet_count - packets_transferred) << 17;
++	/* Queueing functions don't count the current TRB into transferred */
++	return (total_packet_count - ((transferred + trb_buff_len) / maxp));
+ }
+ 
++
+ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		struct urb *urb, int slot_id, unsigned int ep_index)
+ {
+@@ -3172,17 +3163,12 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		}
+ 
+ 		/* Set the TRB length, TD size, and interrupter fields. */
+-		if (xhci->hci_version < 0x100) {
+-			remainder = xhci_td_remainder(
+-					urb->transfer_buffer_length -
+-					running_total);
+-		} else {
+-			remainder = xhci_v1_0_td_remainder(running_total,
+-					trb_buff_len, total_packet_count, urb,
+-					num_trbs - 1);
+-		}
++		remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
++					   urb->transfer_buffer_length,
++					   urb, num_trbs - 1);
++
+ 		length_field = TRB_LEN(trb_buff_len) |
+-			remainder |
++			TRB_TD_SIZE(remainder) |
+ 			TRB_INTR_TARGET(0);
+ 
+ 		if (num_trbs > 1)
+@@ -3345,17 +3331,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 			field |= TRB_ISP;
+ 
+ 		/* Set the TRB length, TD size, and interrupter fields. */
+-		if (xhci->hci_version < 0x100) {
+-			remainder = xhci_td_remainder(
+-					urb->transfer_buffer_length -
+-					running_total);
+-		} else {
+-			remainder = xhci_v1_0_td_remainder(running_total,
+-					trb_buff_len, total_packet_count, urb,
+-					num_trbs - 1);
+-		}
++		remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
++					   urb->transfer_buffer_length,
++					   urb, num_trbs - 1);
++
+ 		length_field = TRB_LEN(trb_buff_len) |
+-			remainder |
++			TRB_TD_SIZE(remainder) |
+ 			TRB_INTR_TARGET(0);
+ 
+ 		if (num_trbs > 1)
+@@ -3393,7 +3374,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	struct usb_ctrlrequest *setup;
+ 	struct xhci_generic_trb *start_trb;
+ 	int start_cycle;
+-	u32 field, length_field;
++	u32 field, length_field, remainder;
+ 	struct urb_priv *urb_priv;
+ 	struct xhci_td *td;
+ 
+@@ -3466,9 +3447,15 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	else
+ 		field = TRB_TYPE(TRB_DATA);
+ 
++	remainder = xhci_td_remainder(xhci, 0,
++				   urb->transfer_buffer_length,
++				   urb->transfer_buffer_length,
++				   urb, 1);
++
+ 	length_field = TRB_LEN(urb->transfer_buffer_length) |
+-		xhci_td_remainder(urb->transfer_buffer_length) |
++		TRB_TD_SIZE(remainder) |
+ 		TRB_INTR_TARGET(0);
++
+ 	if (urb->transfer_buffer_length > 0) {
+ 		if (setup->bRequestType & USB_DIR_IN)
+ 			field |= TRB_DIR_IN;
+@@ -3691,17 +3678,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 				trb_buff_len = td_remain_len;
+ 
+ 			/* Set the TRB length, TD size, & interrupter fields. */
+-			if (xhci->hci_version < 0x100) {
+-				remainder = xhci_td_remainder(
+-						td_len - running_total);
+-			} else {
+-				remainder = xhci_v1_0_td_remainder(
+-						running_total, trb_buff_len,
+-						total_packet_count, urb,
+-						(trbs_per_td - j - 1));
+-			}
++			remainder = xhci_td_remainder(xhci, running_total,
++						   trb_buff_len, td_len,
++						   urb, trbs_per_td - j - 1);
++
+ 			length_field = TRB_LEN(trb_buff_len) |
+-				remainder |
++				TRB_TD_SIZE(remainder) |
+ 				TRB_INTR_TARGET(0);
+ 
+ 			queue_trb(xhci, ep_ring, more_trbs_coming,
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index f6bb118e4501..910f7fac031f 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1559,7 +1559,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ 				"HW died, freeing TD.");
+ 		urb_priv = urb->hcpriv;
+-		for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
++		for (i = urb_priv->td_cnt;
++		     i < urb_priv->length && xhci->devs[urb->dev->slot_id];
++		     i++) {
+ 			td = urb_priv->td[i];
+ 			if (!list_empty(&td->td_list))
+ 				list_del_init(&td->td_list);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 0f26dd2697b6..f18cdf0ec795 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1130,6 +1130,8 @@ enum xhci_setup_dev {
+ /* Normal TRB fields */
+ /* transfer_len bitmasks - bits 0:16 */
+ #define	TRB_LEN(p)		((p) & 0x1ffff)
++/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31) */
++#define TRB_TD_SIZE(p)          (min((p), (u32)31) << 17)
+ /* Interrupter Target - which MSI-X vector to target the completion event at */
+ #define TRB_INTR_TARGET(p)	(((p) & 0x3ff) << 22)
+ #define GET_INTR_TARGET(p)	(((p) >> 22) & 0x3ff)
+@@ -1568,6 +1570,7 @@ struct xhci_hcd {
+ /* For controllers with a broken beyond repair streams implementation */
+ #define XHCI_BROKEN_STREAMS	(1 << 19)
+ #define XHCI_PME_STUCK_QUIRK	(1 << 20)
++#define XHCI_SSIC_PORT_UNUSED	(1 << 22)
+ 	unsigned int		num_active_eps;
+ 	unsigned int		limit_active_eps;
+ 	/* There are two roothubs to keep track of bus suspend info for */
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 59b2126b21a3..1dd9919081f8 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -98,6 +98,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
+ 	{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
+ 	{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
++	{ USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
+ 	{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
+ 	{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
+ 	{ USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index a5a0376bbd48..8c660ae401d8 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -824,6 +824,7 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
++	{ USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
+ 
+ 	/* Papouch devices based on FTDI chip */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 2943b97b2a83..7850071c0ae1 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -615,6 +615,7 @@
+  */
+ #define RATOC_VENDOR_ID		0x0584
+ #define RATOC_PRODUCT_ID_USB60F	0xb020
++#define RATOC_PRODUCT_ID_SCU18	0xb03a
+ 
+ /*
+  * Infineon Technologies
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 4021846139c9..88540596973f 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -271,6 +271,8 @@ static void option_instat_callback(struct urb *urb);
+ #define TELIT_PRODUCT_CC864_SINGLE		0x1006
+ #define TELIT_PRODUCT_DE910_DUAL		0x1010
+ #define TELIT_PRODUCT_UE910_V2			0x1012
++#define TELIT_PRODUCT_LE922_USBCFG0		0x1042
++#define TELIT_PRODUCT_LE922_USBCFG3		0x1043
+ #define TELIT_PRODUCT_LE920			0x1200
+ #define TELIT_PRODUCT_LE910			0x1201
+ 
+@@ -623,6 +625,16 @@ static const struct option_blacklist_info sierra_mc73xx_blacklist = {
+ 	.reserved = BIT(8) | BIT(10) | BIT(11),
+ };
+ 
++static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
++	.sendsetup = BIT(2),
++	.reserved = BIT(0) | BIT(1) | BIT(3),
++};
++
++static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
++	.sendsetup = BIT(0),
++	.reserved = BIT(1) | BIT(2) | BIT(3),
++};
++
+ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
+ 	{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
+@@ -1172,6 +1184,10 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
++	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
++		.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
+ 		.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
+@@ -1691,7 +1707,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
+ 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+-	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
++	{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
+ 		.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ 	{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
+diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
+index 60afb39eb73c..337a0be89fcf 100644
+--- a/drivers/usb/serial/visor.c
++++ b/drivers/usb/serial/visor.c
+@@ -544,6 +544,11 @@ static int treo_attach(struct usb_serial *serial)
+ 		(serial->num_interrupt_in == 0))
+ 		return 0;
+ 
++	if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
++		dev_err(&serial->interface->dev, "missing endpoints\n");
++		return -ENODEV;
++	}
++
+ 	/*
+ 	* It appears that Treos and Kyoceras want to use the
+ 	* 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
+@@ -597,8 +602,10 @@ static int clie_5_attach(struct usb_serial *serial)
+ 	 */
+ 
+ 	/* some sanity check */
+-	if (serial->num_ports < 2)
+-		return -1;
++	if (serial->num_bulk_out < 2) {
++		dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
++		return -ENODEV;
++	}
+ 
+ 	/* port 0 now uses the modified endpoint Address */
+ 	port = serial->port[0];
+diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
+index 82e80e034f25..89bac470f04e 100644
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -166,13 +166,13 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
+ 	mutex_unlock(&vb->balloon_lock);
+ }
+ 
+-static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
++static void release_pages_balloon(struct virtio_balloon *vb)
+ {
+ 	unsigned int i;
+ 
+ 	/* Find pfns pointing at start of each page, get pages and free them. */
+-	for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+-		struct page *page = balloon_pfn_to_page(pfns[i]);
++	for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
++		struct page *page = balloon_pfn_to_page(vb->pfns[i]);
+ 		adjust_managed_page_count(page, 1);
+ 		put_page(page); /* balloon reference */
+ 	}
+@@ -205,8 +205,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
+ 	 */
+ 	if (vb->num_pfns != 0)
+ 		tell_host(vb, vb->deflate_vq);
++	release_pages_balloon(vb);
+ 	mutex_unlock(&vb->balloon_lock);
+-	release_pages_by_pfn(vb->pfns, vb->num_pfns);
+ 	return num_freed_pages;
+ }
+ 
+diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
+index eba1b7ac7294..14f767e8e5c5 100644
+--- a/drivers/virtio/virtio_pci_common.c
++++ b/drivers/virtio/virtio_pci_common.c
+@@ -554,6 +554,7 @@ err_enable_device:
+ static void virtio_pci_remove(struct pci_dev *pci_dev)
+ {
+ 	struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
++	struct device *dev = get_device(&vp_dev->vdev.dev);
+ 
+ 	unregister_virtio_device(&vp_dev->vdev);
+ 
+@@ -564,6 +565,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
+ 
+ 	pci_release_regions(pci_dev);
+ 	pci_disable_device(pci_dev);
++	put_device(dev);
+ }
+ 
+ static struct pci_driver virtio_pci_driver = {
+diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
+index 0ef5cc13fae2..61205e3bbefa 100644
+--- a/fs/btrfs/btrfs_inode.h
++++ b/fs/btrfs/btrfs_inode.h
+@@ -192,6 +192,10 @@ struct btrfs_inode {
+ 	/* File creation time. */
+ 	struct timespec i_otime;
+ 
++	/* Hook into fs_info->delayed_iputs */
++	struct list_head delayed_iput;
++	long delayed_iput_count;
++
+ 	struct inode vfs_inode;
+ };
+ 
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 6f364e1d8d3d..699944a07491 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -1544,7 +1544,7 @@ struct btrfs_fs_info {
+ 
+ 	spinlock_t delayed_iput_lock;
+ 	struct list_head delayed_iputs;
+-	struct rw_semaphore delayed_iput_sem;
++	struct mutex cleaner_delayed_iput_mutex;
+ 
+ 	/* this protects tree_mod_seq_list */
+ 	spinlock_t tree_mod_seq_lock;
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 2ef9a4b72d06..99e8f60c7962 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -1772,8 +1772,11 @@ static int cleaner_kthread(void *arg)
+ 			goto sleep;
+ 		}
+ 
++		mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
+ 		btrfs_run_delayed_iputs(root);
+ 		btrfs_delete_unused_bgs(root->fs_info);
++		mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
++
+ 		again = btrfs_clean_one_deleted_snapshot(root);
+ 		mutex_unlock(&root->fs_info->cleaner_mutex);
+ 
+@@ -2491,8 +2494,8 @@ int open_ctree(struct super_block *sb,
+ 	mutex_init(&fs_info->unused_bg_unpin_mutex);
+ 	mutex_init(&fs_info->reloc_mutex);
+ 	mutex_init(&fs_info->delalloc_root_mutex);
++	mutex_init(&fs_info->cleaner_delayed_iput_mutex);
+ 	seqlock_init(&fs_info->profiles_lock);
+-	init_rwsem(&fs_info->delayed_iput_sem);
+ 
+ 	init_completion(&fs_info->kobj_unregister);
+ 	INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 0ec3acd14cbf..3c1938000a5d 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3985,11 +3985,12 @@ commit_trans:
+ 				if (ret)
+ 					return ret;
+ 				/*
+-				 * make sure that all running delayed iput are
+-				 * done
++				 * The cleaner kthread might still be doing iput
++				 * operations. Wait for it to finish so that
++				 * more space is released.
+ 				 */
+-				down_write(&root->fs_info->delayed_iput_sem);
+-				up_write(&root->fs_info->delayed_iput_sem);
++				mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
++				mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
+ 				goto again;
+ 			} else {
+ 				btrfs_end_transaction(trans, root);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 5136c73b3dce..df4e0462976e 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -3080,56 +3080,46 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
+ 				      start, (size_t)(end - start + 1));
+ }
+ 
+-struct delayed_iput {
+-	struct list_head list;
+-	struct inode *inode;
+-};
+-
+-/* JDM: If this is fs-wide, why can't we add a pointer to
+- * btrfs_inode instead and avoid the allocation? */
+ void btrfs_add_delayed_iput(struct inode *inode)
+ {
+ 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+-	struct delayed_iput *delayed;
++	struct btrfs_inode *binode = BTRFS_I(inode);
+ 
+ 	if (atomic_add_unless(&inode->i_count, -1, 1))
+ 		return;
+ 
+-	delayed = kmalloc(sizeof(*delayed), GFP_NOFS | __GFP_NOFAIL);
+-	delayed->inode = inode;
+-
+ 	spin_lock(&fs_info->delayed_iput_lock);
+-	list_add_tail(&delayed->list, &fs_info->delayed_iputs);
++	if (binode->delayed_iput_count == 0) {
++		ASSERT(list_empty(&binode->delayed_iput));
++		list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs);
++	} else {
++		binode->delayed_iput_count++;
++	}
+ 	spin_unlock(&fs_info->delayed_iput_lock);
+ }
+ 
+ void btrfs_run_delayed_iputs(struct btrfs_root *root)
+ {
+-	LIST_HEAD(list);
+ 	struct btrfs_fs_info *fs_info = root->fs_info;
+-	struct delayed_iput *delayed;
+-	int empty;
+-
+-	spin_lock(&fs_info->delayed_iput_lock);
+-	empty = list_empty(&fs_info->delayed_iputs);
+-	spin_unlock(&fs_info->delayed_iput_lock);
+-	if (empty)
+-		return;
+-
+-	down_read(&fs_info->delayed_iput_sem);
+ 
+ 	spin_lock(&fs_info->delayed_iput_lock);
+-	list_splice_init(&fs_info->delayed_iputs, &list);
+-	spin_unlock(&fs_info->delayed_iput_lock);
+-
+-	while (!list_empty(&list)) {
+-		delayed = list_entry(list.next, struct delayed_iput, list);
+-		list_del(&delayed->list);
+-		iput(delayed->inode);
+-		kfree(delayed);
++	while (!list_empty(&fs_info->delayed_iputs)) {
++		struct btrfs_inode *inode;
++
++		inode = list_first_entry(&fs_info->delayed_iputs,
++				struct btrfs_inode, delayed_iput);
++		if (inode->delayed_iput_count) {
++			inode->delayed_iput_count--;
++			list_move_tail(&inode->delayed_iput,
++					&fs_info->delayed_iputs);
++		} else {
++			list_del_init(&inode->delayed_iput);
++		}
++		spin_unlock(&fs_info->delayed_iput_lock);
++		iput(&inode->vfs_inode);
++		spin_lock(&fs_info->delayed_iput_lock);
+ 	}
+-
+-	up_read(&root->fs_info->delayed_iput_sem);
++	spin_unlock(&fs_info->delayed_iput_lock);
+ }
+ 
+ /*
+@@ -8890,6 +8880,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ 	ei->dir_index = 0;
+ 	ei->last_unlink_trans = 0;
+ 	ei->last_log_commit = 0;
++	ei->delayed_iput_count = 0;
+ 
+ 	spin_lock_init(&ei->lock);
+ 	ei->outstanding_extents = 0;
+@@ -8914,6 +8905,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
+ 	mutex_init(&ei->delalloc_mutex);
+ 	btrfs_ordered_inode_tree_init(&ei->ordered_tree);
+ 	INIT_LIST_HEAD(&ei->delalloc_inodes);
++	INIT_LIST_HEAD(&ei->delayed_iput);
+ 	RB_CLEAR_NODE(&ei->rb_node);
+ 
+ 	return inode;
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 174f5e1e00ab..5113b7257b45 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -6322,6 +6322,14 @@ int btrfs_read_sys_array(struct btrfs_root *root)
+ 				goto out_short_read;
+ 
+ 			num_stripes = btrfs_chunk_num_stripes(sb, chunk);
++			if (!num_stripes) {
++				printk(KERN_ERR
++	    "BTRFS: invalid number of stripes %u in sys_array at offset %u\n",
++					num_stripes, cur_offset);
++				ret = -EIO;
++				break;
++			}
++
+ 			len = btrfs_chunk_item_size(num_stripes);
+ 			if (cur_offset + len > array_size)
+ 				goto out_short_read;
+diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
+index 7febcf2475c5..50b268483302 100644
+--- a/fs/cifs/cifs_debug.c
++++ b/fs/cifs/cifs_debug.c
+@@ -50,7 +50,7 @@ void cifs_vfs_err(const char *fmt, ...)
+ 	vaf.fmt = fmt;
+ 	vaf.va = &args;
+ 
+-	pr_err("CIFS VFS: %pV", &vaf);
++	pr_err_ratelimited("CIFS VFS: %pV", &vaf);
+ 
+ 	va_end(args);
+ }
+diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
+index f40fbaca1b2a..66cf0f9fff89 100644
+--- a/fs/cifs/cifs_debug.h
++++ b/fs/cifs/cifs_debug.h
+@@ -51,14 +51,13 @@ __printf(1, 2) void cifs_vfs_err(const char *fmt, ...);
+ /* information message: e.g., configuration, major event */
+ #define cifs_dbg(type, fmt, ...)					\
+ do {									\
+-	if (type == FYI) {						\
+-		if (cifsFYI & CIFS_INFO) {				\
+-			pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__);	\
+-		}							\
++	if (type == FYI && cifsFYI & CIFS_INFO) {			\
++		pr_debug_ratelimited("%s: "				\
++			    fmt, __FILE__, ##__VA_ARGS__);		\
+ 	} else if (type == VFS) {					\
+ 		cifs_vfs_err(fmt, ##__VA_ARGS__);			\
+ 	} else if (type == NOISY && type != 0) {			\
+-		pr_debug(fmt, ##__VA_ARGS__);				\
++		pr_debug_ratelimited(fmt, ##__VA_ARGS__);		\
+ 	}								\
+ } while (0)
+ 
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 8383d5ea4202..de626b939811 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -357,7 +357,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
+ 	server->session_key.response = NULL;
+ 	server->session_key.len = 0;
+ 	server->lstrp = jiffies;
+-	mutex_unlock(&server->srv_mutex);
+ 
+ 	/* mark submitted MIDs for retry and issue callback */
+ 	INIT_LIST_HEAD(&retry_list);
+@@ -370,6 +369,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
+ 		list_move(&mid_entry->qhead, &retry_list);
+ 	}
+ 	spin_unlock(&GlobalMid_Lock);
++	mutex_unlock(&server->srv_mutex);
+ 
+ 	cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
+ 	list_for_each_safe(tmp, tmp2, &retry_list) {
+diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
+index b1eede3678a9..3634c7adf7d2 100644
+--- a/fs/cifs/readdir.c
++++ b/fs/cifs/readdir.c
+@@ -847,6 +847,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
+ 		 * if buggy server returns . and .. late do we want to
+ 		 * check for that here?
+ 		 */
++		*tmp_buf = 0;
+ 		rc = cifs_filldir(current_entry, file, ctx,
+ 				  tmp_buf, max_len);
+ 		if (rc) {
+diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
+index 126f46b887cc..66106f6ed7b4 100644
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -576,14 +576,16 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+ 	cifs_in_send_dec(server);
+ 	cifs_save_when_sent(mid);
+ 
+-	if (rc < 0)
++	if (rc < 0) {
+ 		server->sequence_number -= 2;
++		cifs_delete_mid(mid);
++	}
++
+ 	mutex_unlock(&server->srv_mutex);
+ 
+ 	if (rc == 0)
+ 		return 0;
+ 
+-	cifs_delete_mid(mid);
+ 	add_credits_and_wake_if(server, credits, optype);
+ 	return rc;
+ }
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index de2d6245e9fa..f895a85d9304 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+ 
+ 	init_special_inode(inode, mode, dev);
+ 	err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
+-	if (!err)
++	if (err)
+ 		goto out_free;
+ 
+ 	err = read_name(inode, name);
+ 	__putname(name);
+ 	if (err)
+ 		goto out_put;
+-	if (err)
+-		goto out_put;
+ 
+ 	d_instantiate(dentry, inode);
+ 	return 0;
+diff --git a/fs/locks.c b/fs/locks.c
+index d3d558ba4da7..8501eecb2af0 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2154,7 +2154,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		goto out;
+ 	}
+ 
+-again:
+ 	error = flock_to_posix_lock(filp, file_lock, &flock);
+ 	if (error)
+ 		goto out;
+@@ -2196,19 +2195,22 @@ again:
+ 	 * Attempt to detect a close/fcntl race and recover by
+ 	 * releasing the lock that was just acquired.
+ 	 */
+-	/*
+-	 * we need that spin_lock here - it prevents reordering between
+-	 * update of i_flctx->flc_posix and check for it done in close().
+-	 * rcu_read_lock() wouldn't do.
+-	 */
+-	spin_lock(&current->files->file_lock);
+-	f = fcheck(fd);
+-	spin_unlock(&current->files->file_lock);
+-	if (!error && f != filp && flock.l_type != F_UNLCK) {
+-		flock.l_type = F_UNLCK;
+-		goto again;
++	if (!error && file_lock->fl_type != F_UNLCK) {
++		/*
++		 * We need that spin_lock here - it prevents reordering between
++		 * update of i_flctx->flc_posix and check for it done in
++		 * close(). rcu_read_lock() wouldn't do.
++		 */
++		spin_lock(&current->files->file_lock);
++		f = fcheck(fd);
++		spin_unlock(&current->files->file_lock);
++		if (f != filp) {
++			file_lock->fl_type = F_UNLCK;
++			error = do_lock_file_wait(filp, cmd, file_lock);
++			WARN_ON_ONCE(error);
++			error = -EBADF;
++		}
+ 	}
+-
+ out:
+ 	locks_free_lock(file_lock);
+ 	return error;
+@@ -2294,7 +2296,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ 		goto out;
+ 	}
+ 
+-again:
+ 	error = flock64_to_posix_lock(filp, file_lock, &flock);
+ 	if (error)
+ 		goto out;
+@@ -2336,14 +2337,22 @@ again:
+ 	 * Attempt to detect a close/fcntl race and recover by
+ 	 * releasing the lock that was just acquired.
+ 	 */
+-	spin_lock(&current->files->file_lock);
+-	f = fcheck(fd);
+-	spin_unlock(&current->files->file_lock);
+-	if (!error && f != filp && flock.l_type != F_UNLCK) {
+-		flock.l_type = F_UNLCK;
+-		goto again;
++	if (!error && file_lock->fl_type != F_UNLCK) {
++		/*
++		 * We need that spin_lock here - it prevents reordering between
++		 * update of i_flctx->flc_posix and check for it done in
++		 * close(). rcu_read_lock() wouldn't do.
++		 */
++		spin_lock(&current->files->file_lock);
++		f = fcheck(fd);
++		spin_unlock(&current->files->file_lock);
++		if (f != filp) {
++			file_lock->fl_type = F_UNLCK;
++			error = do_lock_file_wait(filp, cmd, file_lock);
++			WARN_ON_ONCE(error);
++			error = -EBADF;
++		}
+ 	}
+-
+ out:
+ 	locks_free_lock(file_lock);
+ 	return error;
+diff --git a/fs/nfs/client.c b/fs/nfs/client.c
+index 892aefff3630..fdd234206dff 100644
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
+ 	server->options = data->options;
+ 	server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
+ 		NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
+-		NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
++		NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
+ 
+ 	if (data->rsize)
+ 		server->rsize = nfs_block_size(data->rsize, NULL);
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index fecd9201dbad..c2abdc7db6c3 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1484,11 +1484,9 @@ ff_layout_encode_layoutreturn(struct pnfs_layout_hdr *lo,
+ 	start = xdr_reserve_space(xdr, 4);
+ 	BUG_ON(!start);
+ 
+-	if (ff_layout_encode_ioerr(flo, xdr, args))
+-		goto out;
+-
++	ff_layout_encode_ioerr(flo, xdr, args);
+ 	ff_layout_encode_iostats(flo, xdr, args);
+-out:
++
+ 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
+ 	dprintk("%s: Return\n", __func__);
+ }
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 7f22b6c6fb50..723b8922d76b 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -442,7 +442,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
+ 			nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ 		if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
+ 			inode->i_version = fattr->change_attr;
+-		else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
++		else
+ 			nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
+ 		if (fattr->valid & NFS_ATTR_FATTR_SIZE)
+ 			inode->i_size = nfs_size_to_loff_t(fattr->size);
+@@ -1627,6 +1627,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 	unsigned long invalid = 0;
+ 	unsigned long now = jiffies;
+ 	unsigned long save_cache_validity;
++	bool cache_revalidated = true;
+ 
+ 	dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
+ 			__func__, inode->i_sb->s_id, inode->i_ino,
+@@ -1688,22 +1689,28 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 				nfs_force_lookup_revalidate(inode);
+ 			inode->i_version = fattr->change_attr;
+ 		}
+-	} else if (server->caps & NFS_CAP_CHANGE_ATTR)
++	} else {
+ 		nfsi->cache_validity |= save_cache_validity;
++		cache_revalidated = false;
++	}
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
+ 		memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+-	} else if (server->caps & NFS_CAP_MTIME)
++	} else if (server->caps & NFS_CAP_MTIME) {
+ 		nfsi->cache_validity |= save_cache_validity &
+ 				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_FORCED);
++		cache_revalidated = false;
++	}
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
+ 		memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
+-	} else if (server->caps & NFS_CAP_CTIME)
++	} else if (server->caps & NFS_CAP_CTIME) {
+ 		nfsi->cache_validity |= save_cache_validity &
+ 				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_FORCED);
++		cache_revalidated = false;
++	}
+ 
+ 	/* Check if our cached file size is stale */
+ 	if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
+@@ -1723,19 +1730,23 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 					(long long)cur_isize,
+ 					(long long)new_isize);
+ 		}
+-	} else
++	} else {
+ 		nfsi->cache_validity |= save_cache_validity &
+ 				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_PAGECACHE
+ 				| NFS_INO_REVAL_FORCED);
++		cache_revalidated = false;
++	}
+ 
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_ATIME)
+ 		memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
+-	else if (server->caps & NFS_CAP_ATIME)
++	else if (server->caps & NFS_CAP_ATIME) {
+ 		nfsi->cache_validity |= save_cache_validity &
+ 				(NFS_INO_INVALID_ATIME
+ 				| NFS_INO_REVAL_FORCED);
++		cache_revalidated = false;
++	}
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_MODE) {
+ 		if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
+@@ -1744,36 +1755,42 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			inode->i_mode = newmode;
+ 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ 		}
+-	} else if (server->caps & NFS_CAP_MODE)
++	} else if (server->caps & NFS_CAP_MODE) {
+ 		nfsi->cache_validity |= save_cache_validity &
+ 				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+ 				| NFS_INO_INVALID_ACL
+ 				| NFS_INO_REVAL_FORCED);
++		cache_revalidated = false;
++	}
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
+ 		if (!uid_eq(inode->i_uid, fattr->uid)) {
+ 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ 			inode->i_uid = fattr->uid;
+ 		}
+-	} else if (server->caps & NFS_CAP_OWNER)
++	} else if (server->caps & NFS_CAP_OWNER) {
+ 		nfsi->cache_validity |= save_cache_validity &
+ 				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+ 				| NFS_INO_INVALID_ACL
+ 				| NFS_INO_REVAL_FORCED);
++		cache_revalidated = false;
++	}
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
+ 		if (!gid_eq(inode->i_gid, fattr->gid)) {
+ 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ 			inode->i_gid = fattr->gid;
+ 		}
+-	} else if (server->caps & NFS_CAP_OWNER_GROUP)
++	} else if (server->caps & NFS_CAP_OWNER_GROUP) {
+ 		nfsi->cache_validity |= save_cache_validity &
+ 				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_ACCESS
+ 				| NFS_INO_INVALID_ACL
+ 				| NFS_INO_REVAL_FORCED);
++		cache_revalidated = false;
++	}
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
+ 		if (inode->i_nlink != fattr->nlink) {
+@@ -1782,19 +1799,22 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 				invalid |= NFS_INO_INVALID_DATA;
+ 			set_nlink(inode, fattr->nlink);
+ 		}
+-	} else if (server->caps & NFS_CAP_NLINK)
++	} else if (server->caps & NFS_CAP_NLINK) {
+ 		nfsi->cache_validity |= save_cache_validity &
+ 				(NFS_INO_INVALID_ATTR
+ 				| NFS_INO_REVAL_FORCED);
++		cache_revalidated = false;
++	}
+ 
+ 	if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
+ 		/*
+ 		 * report the blocks in 512byte units
+ 		 */
+ 		inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
+- 	}
+-	if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
++	} else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
+ 		inode->i_blocks = fattr->du.nfs2.blocks;
++	else
++		cache_revalidated = false;
+ 
+ 	/* Update attrtimeo value if we're out of the unstable period */
+ 	if (invalid & NFS_INO_INVALID_ATTR) {
+@@ -1804,9 +1824,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 		/* Set barrier to be more recent than all outstanding updates */
+ 		nfsi->attr_gencount = nfs_inc_attr_generation_counter();
+ 	} else {
+-		if (!time_in_range_open(now, nfsi->attrtimeo_timestamp, nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
+-			if ((nfsi->attrtimeo <<= 1) > NFS_MAXATTRTIMEO(inode))
+-				nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
++		if (cache_revalidated) {
++			if (!time_in_range_open(now, nfsi->attrtimeo_timestamp,
++				nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
++				nfsi->attrtimeo <<= 1;
++				if (nfsi->attrtimeo > NFS_MAXATTRTIMEO(inode))
++					nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
++			}
+ 			nfsi->attrtimeo_timestamp = now;
+ 		}
+ 		/* Set the barrier to be more recent than this fattr */
+@@ -1815,7 +1839,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 	}
+ 
+ 	/* Don't declare attrcache up to date if there were no attrs! */
+-	if (fattr->valid != 0)
++	if (cache_revalidated)
+ 		invalid &= ~NFS_INO_INVALID_ATTR;
+ 
+ 	/* Don't invalidate the data if we were to blame */
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 8f393fcc313b..2c4f41c34366 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1284,6 +1284,7 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
+ 	 * Protect the call to nfs4_state_set_mode_locked and
+ 	 * serialise the stateid update
+ 	 */
++	spin_lock(&state->owner->so_lock);
+ 	write_seqlock(&state->seqlock);
+ 	if (deleg_stateid != NULL) {
+ 		nfs4_stateid_copy(&state->stateid, deleg_stateid);
+@@ -1292,7 +1293,6 @@ static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_s
+ 	if (open_stateid != NULL)
+ 		nfs_set_open_stateid_locked(state, open_stateid, fmode);
+ 	write_sequnlock(&state->seqlock);
+-	spin_lock(&state->owner->so_lock);
+ 	update_open_stateflags(state, fmode);
+ 	spin_unlock(&state->owner->so_lock);
+ }
+@@ -8512,7 +8512,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
+ 	.minor_version = 0,
+ 	.init_caps = NFS_CAP_READDIRPLUS
+ 		| NFS_CAP_ATOMIC_OPEN
+-		| NFS_CAP_CHANGE_ATTR
+ 		| NFS_CAP_POSIX_LOCK,
+ 	.init_client = nfs40_init_client,
+ 	.shutdown_client = nfs40_shutdown_client,
+@@ -8538,7 +8537,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
+ 	.minor_version = 1,
+ 	.init_caps = NFS_CAP_READDIRPLUS
+ 		| NFS_CAP_ATOMIC_OPEN
+-		| NFS_CAP_CHANGE_ATTR
+ 		| NFS_CAP_POSIX_LOCK
+ 		| NFS_CAP_STATEID_NFSV41
+ 		| NFS_CAP_ATOMIC_OPEN_V1,
+@@ -8561,7 +8559,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
+ 	.minor_version = 2,
+ 	.init_caps = NFS_CAP_READDIRPLUS
+ 		| NFS_CAP_ATOMIC_OPEN
+-		| NFS_CAP_CHANGE_ATTR
+ 		| NFS_CAP_POSIX_LOCK
+ 		| NFS_CAP_STATEID_NFSV41
+ 		| NFS_CAP_ATOMIC_OPEN_V1
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index 482cfd34472d..523e485a11b8 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -2518,6 +2518,11 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
+ 	spin_lock(&dlm->master_lock);
+ 	ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
+ 				    namelen, target, dlm->node_num);
++	/* get an extra reference on the mle.
++	 * otherwise the assert_master from the new
++	 * master will destroy this.
++	 */
++	dlm_get_mle_inuse(mle);
+ 	spin_unlock(&dlm->master_lock);
+ 	spin_unlock(&dlm->spinlock);
+ 
+@@ -2553,6 +2558,7 @@ fail:
+ 		if (mle_added) {
+ 			dlm_mle_detach_hb_events(dlm, mle);
+ 			dlm_put_mle(mle);
++			dlm_put_mle_inuse(mle);
+ 		} else if (mle) {
+ 			kmem_cache_free(dlm_mle_cache, mle);
+ 			mle = NULL;
+@@ -2570,17 +2576,6 @@ fail:
+ 	 * ensure that all assert_master work is flushed. */
+ 	flush_workqueue(dlm->dlm_worker);
+ 
+-	/* get an extra reference on the mle.
+-	 * otherwise the assert_master from the new
+-	 * master will destroy this.
+-	 * also, make sure that all callers of dlm_get_mle
+-	 * take both dlm->spinlock and dlm->master_lock */
+-	spin_lock(&dlm->spinlock);
+-	spin_lock(&dlm->master_lock);
+-	dlm_get_mle_inuse(mle);
+-	spin_unlock(&dlm->master_lock);
+-	spin_unlock(&dlm->spinlock);
+-
+ 	/* notify new node and send all lock state */
+ 	/* call send_one_lockres with migration flag.
+ 	 * this serves as notice to the target node that a
+@@ -3309,6 +3304,15 @@ top:
+ 			    mle->new_master != dead_node)
+ 				continue;
+ 
++			if (mle->new_master == dead_node && mle->inuse) {
++				mlog(ML_NOTICE, "%s: target %u died during "
++						"migration from %u, the MLE is "
++						"still keep used, ignore it!\n",
++						dlm->name, dead_node,
++						mle->master);
++				continue;
++			}
++
+ 			/* If we have reached this point, this mle needs to be
+ 			 * removed from the list and freed. */
+ 			dlm_clean_migration_mle(dlm, mle);
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index 3d90ad7ff91f..f25ff5d3a2f9 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -2360,6 +2360,8 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
+ 						break;
+ 					}
+ 				}
++				dlm_lockres_clear_refmap_bit(dlm, res,
++						dead_node);
+ 				spin_unlock(&res->spinlock);
+ 				continue;
+ 			}
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 23157e40dd74..3623ab6fa97f 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -1390,6 +1390,7 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
+ 	unsigned int gen;
+ 	int noqueue_attempted = 0;
+ 	int dlm_locked = 0;
++	int kick_dc = 0;
+ 
+ 	if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
+ 		mlog_errno(-EINVAL);
+@@ -1524,7 +1525,12 @@ update_holders:
+ unlock:
+ 	lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+ 
++	/* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
++	kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
++
+ 	spin_unlock_irqrestore(&lockres->l_lock, flags);
++	if (kick_dc)
++		ocfs2_wake_downconvert_thread(osb);
+ out:
+ 	/*
+ 	 * This is helping work around a lock inversion between the page lock
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 871fcb67be97..758012bfd5f0 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -22,9 +22,9 @@
+ 
+ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
+ {
+-	ssize_t list_size, size;
+-	char *buf, *name, *value;
+-	int error;
++	ssize_t list_size, size, value_size = 0;
++	char *buf, *name, *value = NULL;
++	int uninitialized_var(error);
+ 
+ 	if (!old->d_inode->i_op->getxattr ||
+ 	    !new->d_inode->i_op->getxattr)
+@@ -41,29 +41,40 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
+-	error = -ENOMEM;
+-	value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
+-	if (!value)
+-		goto out;
+-
+ 	list_size = vfs_listxattr(old, buf, list_size);
+ 	if (list_size <= 0) {
+ 		error = list_size;
+-		goto out_free_value;
++		goto out;
+ 	}
+ 
+ 	for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
+-		size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
+-		if (size <= 0) {
++retry:
++		size = vfs_getxattr(old, name, value, value_size);
++		if (size == -ERANGE)
++			size = vfs_getxattr(old, name, NULL, 0);
++
++		if (size < 0) {
+ 			error = size;
+-			goto out_free_value;
++			break;
++		}
++
++		if (size > value_size) {
++			void *new;
++
++			new = krealloc(value, size, GFP_KERNEL);
++			if (!new) {
++				error = -ENOMEM;
++				break;
++			}
++			value = new;
++			value_size = size;
++			goto retry;
+ 		}
++
+ 		error = vfs_setxattr(new, name, value, size, 0);
+ 		if (error)
+-			goto out_free_value;
++			break;
+ 	}
+-
+-out_free_value:
+ 	kfree(value);
+ out:
+ 	kfree(buf);
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index ba0db2638946..a1b069e5e363 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -45,6 +45,19 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
+ 	int err;
+ 	struct dentry *upperdentry;
+ 
++	/*
++	 * Check for permissions before trying to copy-up.  This is redundant
++	 * since it will be rechecked later by ->setattr() on upper dentry.  But
++	 * without this, copy-up can be triggered by just about anybody.
++	 *
++	 * We don't initialize inode->size, which just means that
++	 * inode_newsize_ok() will always check against MAX_LFS_FILESIZE and not
++	 * check for a swapfile (which this won't be anyway).
++	 */
++	err = inode_change_ok(dentry->d_inode, attr);
++	if (err)
++		return err;
++
+ 	err = ovl_want_write(dentry);
+ 	if (err)
+ 		goto out;
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
+index 70e9af551600..adcb1398c481 100644
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -571,7 +571,8 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
+ 			       (int) PTR_ERR(dentry));
+ 			continue;
+ 		}
+-		ovl_cleanup(upper->d_inode, dentry);
++		if (dentry->d_inode)
++			ovl_cleanup(upper->d_inode, dentry);
+ 		dput(dentry);
+ 	}
+ 	mutex_unlock(&upper->d_inode->i_mutex);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index d74af7f78fec..bd6d5c1e667d 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -9,6 +9,7 @@
+ 
+ #include <linux/fs.h>
+ #include <linux/namei.h>
++#include <linux/pagemap.h>
+ #include <linux/xattr.h>
+ #include <linux/security.h>
+ #include <linux/mount.h>
+@@ -847,6 +848,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ 	}
+ 
+ 	sb->s_stack_depth = 0;
++	sb->s_maxbytes = MAX_LFS_FILESIZE;
+ 	if (ufs->config.upperdir) {
+ 		if (!ufs->config.workdir) {
+ 			pr_err("overlayfs: missing 'workdir'\n");
+@@ -986,6 +988,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ 
+ 	root_dentry->d_fsdata = oe;
+ 
++	ovl_copyattr(ovl_dentry_real(root_dentry)->d_inode,
++		     root_dentry->d_inode);
++
+ 	sb->s_magic = OVERLAYFS_SUPER_MAGIC;
+ 	sb->s_op = &ovl_super_operations;
+ 	sb->s_root = root_dentry;
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 6afac3d561ac..78a40ef0c463 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -2052,14 +2052,29 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos,
+ 		epos->offset += adsize;
+ }
+ 
++/*
++ * Only 1 indirect extent in a row really makes sense but allow upto 16 in case
++ * someone does some weird stuff.
++ */
++#define UDF_MAX_INDIR_EXTS 16
++
+ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
+ 		     struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
+ {
+ 	int8_t etype;
++	unsigned int indirections = 0;
+ 
+ 	while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
+ 	       (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
+ 		int block;
++
++		if (++indirections > UDF_MAX_INDIR_EXTS) {
++			udf_err(inode->i_sb,
++				"too many indirect extents in inode %lu\n",
++				inode->i_ino);
++			return -1;
++		}
++
+ 		epos->block = *eloc;
+ 		epos->offset = sizeof(struct allocExtDesc);
+ 		brelse(epos->bh);
+diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
+index b84fee372734..2eafe2c4d239 100644
+--- a/fs/udf/unicode.c
++++ b/fs/udf/unicode.c
+@@ -133,11 +133,15 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
+ 		if (c < 0x80U)
+ 			utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
+ 		else if (c < 0x800U) {
++			if (utf_o->u_len > (UDF_NAME_LEN - 4))
++				break;
+ 			utf_o->u_name[utf_o->u_len++] =
+ 						(uint8_t)(0xc0 | (c >> 6));
+ 			utf_o->u_name[utf_o->u_len++] =
+ 						(uint8_t)(0x80 | (c & 0x3f));
+ 		} else {
++			if (utf_o->u_len > (UDF_NAME_LEN - 5))
++				break;
+ 			utf_o->u_name[utf_o->u_len++] =
+ 						(uint8_t)(0xe0 | (c >> 12));
+ 			utf_o->u_name[utf_o->u_len++] =
+@@ -178,17 +182,22 @@ int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
+ static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
+ {
+ 	unsigned c, i, max_val, utf_char;
+-	int utf_cnt, u_len;
++	int utf_cnt, u_len, u_ch;
+ 
+ 	memset(ocu, 0, sizeof(dstring) * length);
+ 	ocu[0] = 8;
+ 	max_val = 0xffU;
++	u_ch = 1;
+ 
+ try_again:
+ 	u_len = 0U;
+ 	utf_char = 0U;
+ 	utf_cnt = 0U;
+ 	for (i = 0U; i < utf->u_len; i++) {
++		/* Name didn't fit? */
++		if (u_len + 1 + u_ch >= length)
++			return 0;
++
+ 		c = (uint8_t)utf->u_name[i];
+ 
+ 		/* Complete a multi-byte UTF-8 character */
+@@ -230,6 +239,7 @@ try_again:
+ 			if (max_val == 0xffU) {
+ 				max_val = 0xffffU;
+ 				ocu[0] = (uint8_t)0x10U;
++				u_ch = 2;
+ 				goto try_again;
+ 			}
+ 			goto error_out;
+@@ -282,7 +292,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
+ 			c = (c << 8) | ocu[i++];
+ 
+ 		len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
+-				    UDF_NAME_LEN - utf_o->u_len);
++				    UDF_NAME_LEN - 2 - utf_o->u_len);
+ 		/* Valid character? */
+ 		if (len >= 0)
+ 			utf_o->u_len += len;
+@@ -300,15 +310,19 @@ static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
+ 	int len;
+ 	unsigned i, max_val;
+ 	uint16_t uni_char;
+-	int u_len;
++	int u_len, u_ch;
+ 
+ 	memset(ocu, 0, sizeof(dstring) * length);
+ 	ocu[0] = 8;
+ 	max_val = 0xffU;
++	u_ch = 1;
+ 
+ try_again:
+ 	u_len = 0U;
+ 	for (i = 0U; i < uni->u_len; i++) {
++		/* Name didn't fit? */
++		if (u_len + 1 + u_ch >= length)
++			return 0;
+ 		len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
+ 		if (!len)
+ 			continue;
+@@ -321,6 +335,7 @@ try_again:
+ 		if (uni_char > max_val) {
+ 			max_val = 0xffffU;
+ 			ocu[0] = (uint8_t)0x10U;
++			u_ch = 2;
+ 			goto try_again;
+ 		}
+ 
+diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
+index 6fbf2d853a54..48aff071591d 100644
+--- a/fs/xfs/libxfs/xfs_dquot_buf.c
++++ b/fs/xfs/libxfs/xfs_dquot_buf.c
+@@ -54,7 +54,7 @@ xfs_dqcheck(
+ 	xfs_dqid_t	 id,
+ 	uint		 type,	  /* used only when IO_dorepair is true */
+ 	uint		 flags,
+-	char		 *str)
++	const char	 *str)
+ {
+ 	xfs_dqblk_t	 *d = (xfs_dqblk_t *)ddq;
+ 	int		errs = 0;
+@@ -207,7 +207,8 @@ xfs_dquot_buf_verify_crc(
+ STATIC bool
+ xfs_dquot_buf_verify(
+ 	struct xfs_mount	*mp,
+-	struct xfs_buf		*bp)
++	struct xfs_buf		*bp,
++	int			warn)
+ {
+ 	struct xfs_dqblk	*d = (struct xfs_dqblk *)bp->b_addr;
+ 	xfs_dqid_t		id = 0;
+@@ -240,8 +241,7 @@ xfs_dquot_buf_verify(
+ 		if (i == 0)
+ 			id = be32_to_cpu(ddq->d_id);
+ 
+-		error = xfs_dqcheck(mp, ddq, id + i, 0, XFS_QMOPT_DOWARN,
+-				       "xfs_dquot_buf_verify");
++		error = xfs_dqcheck(mp, ddq, id + i, 0, warn, __func__);
+ 		if (error)
+ 			return false;
+ 	}
+@@ -256,7 +256,7 @@ xfs_dquot_buf_read_verify(
+ 
+ 	if (!xfs_dquot_buf_verify_crc(mp, bp))
+ 		xfs_buf_ioerror(bp, -EFSBADCRC);
+-	else if (!xfs_dquot_buf_verify(mp, bp))
++	else if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN))
+ 		xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ 
+ 	if (bp->b_error)
+@@ -264,6 +264,25 @@ xfs_dquot_buf_read_verify(
+ }
+ 
+ /*
++ * readahead errors are silent and simply leave the buffer as !done so a real
++ * read will then be run with the xfs_dquot_buf_ops verifier. See
++ * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than
++ * reporting the failure.
++ */
++static void
++xfs_dquot_buf_readahead_verify(
++	struct xfs_buf	*bp)
++{
++	struct xfs_mount	*mp = bp->b_target->bt_mount;
++
++	if (!xfs_dquot_buf_verify_crc(mp, bp) ||
++	    !xfs_dquot_buf_verify(mp, bp, 0)) {
++		xfs_buf_ioerror(bp, -EIO);
++		bp->b_flags &= ~XBF_DONE;
++	}
++}
++
++/*
+  * we don't calculate the CRC here as that is done when the dquot is flushed to
+  * the buffer after the update is done. This ensures that the dquot in the
+  * buffer always has an up-to-date CRC value.
+@@ -274,7 +293,7 @@ xfs_dquot_buf_write_verify(
+ {
+ 	struct xfs_mount	*mp = bp->b_target->bt_mount;
+ 
+-	if (!xfs_dquot_buf_verify(mp, bp)) {
++	if (!xfs_dquot_buf_verify(mp, bp, XFS_QMOPT_DOWARN)) {
+ 		xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ 		xfs_verifier_error(bp);
+ 		return;
+@@ -286,3 +305,7 @@ const struct xfs_buf_ops xfs_dquot_buf_ops = {
+ 	.verify_write = xfs_dquot_buf_write_verify,
+ };
+ 
++const struct xfs_buf_ops xfs_dquot_buf_ra_ops = {
++	.verify_read = xfs_dquot_buf_readahead_verify,
++	.verify_write = xfs_dquot_buf_write_verify,
++};
+diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
+index 002b6b3a1988..7da6d0b2c2ed 100644
+--- a/fs/xfs/libxfs/xfs_inode_buf.c
++++ b/fs/xfs/libxfs/xfs_inode_buf.c
+@@ -63,11 +63,14 @@ xfs_inobp_check(
+  * has not had the inode cores stamped into it. Hence for readahead, the buffer
+  * may be potentially invalid.
+  *
+- * If the readahead buffer is invalid, we don't want to mark it with an error,
+- * but we do want to clear the DONE status of the buffer so that a followup read
+- * will re-read it from disk. This will ensure that we don't get an unnecessary
+- * warnings during log recovery and we don't get unnecssary panics on debug
+- * kernels.
++ * If the readahead buffer is invalid, we need to mark it with an error and
++ * clear the DONE status of the buffer so that a followup read will re-read it
++ * from disk. We don't report the error otherwise to avoid warnings during log
++ * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
++ * because all we want to do is say readahead failed; there is no-one to report
++ * the error to, so this will distinguish it from a non-ra verifier failure.
++ * Changes to this readahead error behavour also need to be reflected in
++ * xfs_dquot_buf_readahead_verify().
+  */
+ static void
+ xfs_inode_buf_verify(
+@@ -95,6 +98,7 @@ xfs_inode_buf_verify(
+ 						XFS_RANDOM_ITOBP_INOTOBP))) {
+ 			if (readahead) {
+ 				bp->b_flags &= ~XBF_DONE;
++				xfs_buf_ioerror(bp, -EIO);
+ 				return;
+ 			}
+ 
+diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h
+index 1b0a08379759..f51078f1e92a 100644
+--- a/fs/xfs/libxfs/xfs_quota_defs.h
++++ b/fs/xfs/libxfs/xfs_quota_defs.h
+@@ -153,7 +153,7 @@ typedef __uint16_t	xfs_qwarncnt_t;
+ #define XFS_QMOPT_RESBLK_MASK	(XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS)
+ 
+ extern int xfs_dqcheck(struct xfs_mount *mp, xfs_disk_dquot_t *ddq,
+-		       xfs_dqid_t id, uint type, uint flags, char *str);
++		       xfs_dqid_t id, uint type, uint flags, const char *str);
+ extern int xfs_calc_dquots_per_chunk(unsigned int nbblks);
+ 
+ #endif	/* __XFS_QUOTA_H__ */
+diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
+index 8dda4b321343..a3472a38efd2 100644
+--- a/fs/xfs/libxfs/xfs_shared.h
++++ b/fs/xfs/libxfs/xfs_shared.h
+@@ -49,6 +49,7 @@ extern const struct xfs_buf_ops xfs_inobt_buf_ops;
+ extern const struct xfs_buf_ops xfs_inode_buf_ops;
+ extern const struct xfs_buf_ops xfs_inode_buf_ra_ops;
+ extern const struct xfs_buf_ops xfs_dquot_buf_ops;
++extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops;
+ extern const struct xfs_buf_ops xfs_sb_buf_ops;
+ extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
+ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
+index 1790b00bea7a..7dd64bf98c56 100644
+--- a/fs/xfs/xfs_buf.c
++++ b/fs/xfs/xfs_buf.c
+@@ -605,6 +605,13 @@ found:
+ 		}
+ 	}
+ 
++	/*
++	 * Clear b_error if this is a lookup from a caller that doesn't expect
++	 * valid data to be found in the buffer.
++	 */
++	if (!(flags & XBF_READ))
++		xfs_buf_ioerror(bp, 0);
++
+ 	XFS_STATS_INC(xb_get);
+ 	trace_xfs_buf_get(bp, flags, _RET_IP_);
+ 	return bp;
+@@ -1522,6 +1529,16 @@ xfs_wait_buftarg(
+ 	LIST_HEAD(dispose);
+ 	int loop = 0;
+ 
++	/*
++	 * We need to flush the buffer workqueue to ensure that all IO
++	 * completion processing is 100% done. Just waiting on buffer locks is
++	 * not sufficient for async IO as the reference count held over IO is
++	 * not released until after the buffer lock is dropped. Hence we need to
++	 * ensure here that all reference counts have been dropped before we
++	 * start walking the LRU list.
++	 */
++	drain_workqueue(btp->bt_mount->m_buf_workqueue);
++
+ 	/* loop until there is nothing left on the lru list. */
+ 	while (list_lru_count(&btp->bt_lru)) {
+ 		list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index a5d03396dda0..1114afdd5a6b 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -3154,6 +3154,7 @@ xlog_recover_dquot_ra_pass2(
+ 	struct xfs_disk_dquot	*recddq;
+ 	struct xfs_dq_logformat	*dq_f;
+ 	uint			type;
++	int			len;
+ 
+ 
+ 	if (mp->m_qflags == 0)
+@@ -3174,8 +3175,12 @@ xlog_recover_dquot_ra_pass2(
+ 	ASSERT(dq_f);
+ 	ASSERT(dq_f->qlf_len == 1);
+ 
+-	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
+-			  XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
++	len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
++	if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
++		return;
++
++	xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
++			  &xfs_dquot_buf_ra_ops);
+ }
+ 
+ STATIC void
+diff --git a/include/crypto/hash.h b/include/crypto/hash.h
+index 98abda9ed3aa..bbc59bdd6395 100644
+--- a/include/crypto/hash.h
++++ b/include/crypto/hash.h
+@@ -199,6 +199,7 @@ struct crypto_ahash {
+ 		      unsigned int keylen);
+ 
+ 	unsigned int reqsize;
++	bool has_setkey;
+ 	struct crypto_tfm base;
+ };
+ 
+@@ -356,6 +357,11 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
+ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ 			unsigned int keylen);
+ 
++static inline bool crypto_ahash_has_setkey(struct crypto_ahash *tfm)
++{
++	return tfm->has_setkey;
++}
++
+ /**
+  * crypto_ahash_finup() - update and finalize message digest
+  * @req: reference to the ahash_request handle that holds all information
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index 018afb264ac2..a2bfd7843f18 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -30,6 +30,9 @@ struct alg_sock {
+ 
+ 	struct sock *parent;
+ 
++	unsigned int refcnt;
++	unsigned int nokey_refcnt;
++
+ 	const struct af_alg_type *type;
+ 	void *private;
+ };
+@@ -50,9 +53,11 @@ struct af_alg_type {
+ 	void (*release)(void *private);
+ 	int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+ 	int (*accept)(void *private, struct sock *sk);
++	int (*accept_nokey)(void *private, struct sock *sk);
+ 	int (*setauthsize)(void *private, unsigned int authsize);
+ 
+ 	struct proto_ops *ops;
++	struct proto_ops *ops_nokey;
+ 	struct module *owner;
+ 	char name[14];
+ };
+@@ -67,6 +72,7 @@ int af_alg_register_type(const struct af_alg_type *type);
+ int af_alg_unregister_type(const struct af_alg_type *type);
+ 
+ int af_alg_release(struct socket *sock);
++void af_alg_release_parent(struct sock *sk);
+ int af_alg_accept(struct sock *sk, struct socket *newsock);
+ 
+ int af_alg_make_sg(struct af_alg_sgl *sgl, struct iov_iter *iter, int len);
+@@ -83,11 +89,6 @@ static inline struct alg_sock *alg_sk(struct sock *sk)
+ 	return (struct alg_sock *)sk;
+ }
+ 
+-static inline void af_alg_release_parent(struct sock *sk)
+-{
+-	sock_put(alg_sk(sk)->parent);
+-}
+-
+ static inline void af_alg_init_completion(struct af_alg_completion *completion)
+ {
+ 	init_completion(&completion->completion);
+diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
+index 7bfb063029d8..461a0558bca4 100644
+--- a/include/drm/drm_cache.h
++++ b/include/drm/drm_cache.h
+@@ -35,4 +35,13 @@
+ 
+ void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
+ 
++static inline bool drm_arch_can_wc_memory(void)
++{
++#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
++	return false;
++#else
++	return true;
++#endif
++}
++
+ #endif
+diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
+index 54233583c6cb..ca71c03143d1 100644
+--- a/include/drm/drm_crtc.h
++++ b/include/drm/drm_crtc.h
+@@ -731,8 +731,6 @@ struct drm_connector {
+ 	uint8_t num_h_tile, num_v_tile;
+ 	uint8_t tile_h_loc, tile_v_loc;
+ 	uint16_t tile_h_size, tile_v_size;
+-
+-	struct list_head destroy_list;
+ };
+ 
+ /**
+diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
+index a89f505c856b..c7f01d1aa562 100644
+--- a/include/drm/drm_dp_mst_helper.h
++++ b/include/drm/drm_dp_mst_helper.h
+@@ -449,9 +449,7 @@ struct drm_dp_mst_topology_mgr {
+ 	   the mstb tx_slots and txmsg->state once they are queued */
+ 	struct mutex qlock;
+ 	struct list_head tx_msg_downq;
+-	struct list_head tx_msg_upq;
+ 	bool tx_down_in_progress;
+-	bool tx_up_in_progress;
+ 
+ 	/* payload info + lock for it */
+ 	struct mutex payload_lock;
+diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
+index d639049a613d..553210c02ee0 100644
+--- a/include/drm/drm_fixed.h
++++ b/include/drm/drm_fixed.h
+@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
+ #define DRM_FIXED_ONE		(1ULL << DRM_FIXED_POINT)
+ #define DRM_FIXED_DECIMAL_MASK	(DRM_FIXED_ONE - 1)
+ #define DRM_FIXED_DIGITS_MASK	(~DRM_FIXED_DECIMAL_MASK)
++#define DRM_FIXED_EPSILON	1LL
++#define DRM_FIXED_ALMOST_ONE	(DRM_FIXED_ONE - DRM_FIXED_EPSILON)
+ 
+ static inline s64 drm_int2fixp(int a)
+ {
+ 	return ((s64)a) << DRM_FIXED_POINT;
+ }
+ 
+-static inline int drm_fixp2int(int64_t a)
++static inline int drm_fixp2int(s64 a)
+ {
+ 	return ((s64)a) >> DRM_FIXED_POINT;
+ }
+ 
+-static inline unsigned drm_fixp_msbset(int64_t a)
++static inline int drm_fixp2int_ceil(s64 a)
++{
++	if (a > 0)
++		return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
++	else
++		return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
++}
++
++static inline unsigned drm_fixp_msbset(s64 a)
+ {
+ 	unsigned shift, sign = (a >> 63) & 1;
+ 
+@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
+ 	return result;
+ }
+ 
++static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
++{
++	s64 res;
++	bool a_neg = a < 0;
++	bool b_neg = b < 0;
++	u64 a_abs = a_neg ? -a : a;
++	u64 b_abs = b_neg ? -b : b;
++	u64 rem;
++
++	/* determine integer part */
++	u64 res_abs  = div64_u64_rem(a_abs, b_abs, &rem);
++
++	/* determine fractional part */
++	{
++		u32 i = DRM_FIXED_POINT;
++
++		do {
++			rem <<= 1;
++			res_abs <<= 1;
++			if (rem >= b_abs) {
++				res_abs |= 1;
++				rem -= b_abs;
++			}
++		} while (--i != 0);
++	}
++
++	/* round up LSB */
++	{
++		u64 summand = (rem << 1) >= b_abs;
++
++		res_abs += summand;
++	}
++
++	res = (s64) res_abs;
++	if (a_neg ^ b_neg)
++		res = -res;
++	return res;
++}
++
+ static inline s64 drm_fixp_exp(s64 x)
+ {
+ 	s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
+diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
+index e15499422fdc..e91c6f15f6e8 100644
+--- a/include/linux/ceph/messenger.h
++++ b/include/linux/ceph/messenger.h
+@@ -224,6 +224,7 @@ struct ceph_connection {
+ 	struct ceph_entity_addr actual_peer_addr;
+ 
+ 	/* message out temps */
++	struct ceph_msg_header out_hdr;
+ 	struct ceph_msg *out_msg;        /* sending message (== tail of
+ 					    out_sent) */
+ 	bool out_msg_done;
+@@ -233,7 +234,6 @@ struct ceph_connection {
+ 	int out_kvec_left;   /* kvec's left in out_kvec */
+ 	int out_skip;        /* skip this many bytes */
+ 	int out_kvec_bytes;  /* total bytes left */
+-	bool out_kvec_is_msg; /* kvec refers to out_msg */
+ 	int out_more;        /* there is more data after the kvecs */
+ 	__le64 out_temp_ack; /* for writing an ack */
+ 
+diff --git a/include/linux/console.h b/include/linux/console.h
+index 9f50fb413c11..901555a3886e 100644
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -149,6 +149,7 @@ extern int console_trylock(void);
+ extern void console_unlock(void);
+ extern void console_conditional_schedule(void);
+ extern void console_unblank(void);
++extern void console_flush_on_panic(void);
+ extern struct tty_driver *console_device(int *);
+ extern void console_stop(struct console *);
+ extern void console_start(struct console *);
+diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
+index 5e1273d4de14..eda4a72a9b25 100644
+--- a/include/linux/nfs_fs_sb.h
++++ b/include/linux/nfs_fs_sb.h
+@@ -220,7 +220,7 @@ struct nfs_server {
+ #define NFS_CAP_SYMLINKS	(1U << 2)
+ #define NFS_CAP_ACLS		(1U << 3)
+ #define NFS_CAP_ATOMIC_OPEN	(1U << 4)
+-#define NFS_CAP_CHANGE_ATTR	(1U << 5)
++/* #define NFS_CAP_CHANGE_ATTR	(1U << 5) */
+ #define NFS_CAP_FILEID		(1U << 6)
+ #define NFS_CAP_MODE		(1U << 7)
+ #define NFS_CAP_NLINK		(1U << 8)
+diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
+index 33170dbd9db4..5d5174b59802 100644
+--- a/include/linux/radix-tree.h
++++ b/include/linux/radix-tree.h
+@@ -370,12 +370,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
+ 			     struct radix_tree_iter *iter, unsigned flags);
+ 
+ /**
++ * radix_tree_iter_retry - retry this chunk of the iteration
++ * @iter:	iterator state
++ *
++ * If we iterate over a tree protected only by the RCU lock, a race
++ * against deletion or creation may result in seeing a slot for which
++ * radix_tree_deref_retry() returns true.  If so, call this function
++ * and continue the iteration.
++ */
++static inline __must_check
++void **radix_tree_iter_retry(struct radix_tree_iter *iter)
++{
++	iter->next_index = iter->index;
++	return NULL;
++}
++
++/**
+  * radix_tree_chunk_size - get current chunk size
+  *
+  * @iter:	pointer to radix tree iterator
+  * Returns:	current chunk size
+  */
+-static __always_inline unsigned
++static __always_inline long
+ radix_tree_chunk_size(struct radix_tree_iter *iter)
+ {
+ 	return iter->next_index - iter->index;
+@@ -409,9 +425,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
+ 			return slot + offset + 1;
+ 		}
+ 	} else {
+-		unsigned size = radix_tree_chunk_size(iter) - 1;
++		long size = radix_tree_chunk_size(iter);
+ 
+-		while (size--) {
++		while (--size > 0) {
+ 			slot++;
+ 			iter->index++;
+ 			if (likely(*slot))
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index c89c53a113a8..6f48ddc4b2b5 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -105,20 +105,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
+ 		__put_anon_vma(anon_vma);
+ }
+ 
+-static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
+-{
+-	struct anon_vma *anon_vma = vma->anon_vma;
+-	if (anon_vma)
+-		down_write(&anon_vma->root->rwsem);
+-}
+-
+-static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
+-{
+-	struct anon_vma *anon_vma = vma->anon_vma;
+-	if (anon_vma)
+-		up_write(&anon_vma->root->rwsem);
+-}
+-
+ static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
+ {
+ 	down_write(&anon_vma->root->rwsem);
+diff --git a/include/linux/thermal.h b/include/linux/thermal.h
+index 5eac316490ea..2e7d0f7a0ecc 100644
+--- a/include/linux/thermal.h
++++ b/include/linux/thermal.h
+@@ -40,6 +40,9 @@
+ /* No upper/lower limit requirement */
+ #define THERMAL_NO_LIMIT	((u32)~0)
+ 
++/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
++#define THERMAL_TEMP_INVALID	-274000
++
+ /* Unit conversion macros */
+ #define KELVIN_TO_CELSIUS(t)	(long)(((long)t-2732 >= 0) ?	\
+ 				((long)t-2732+5)/10 : ((long)t-2732-5)/10)
+@@ -159,6 +162,7 @@ struct thermal_attr {
+  * @forced_passive:	If > 0, temperature at which to switch on all ACPI
+  *			processor cooling devices.  Currently only used by the
+  *			step-wise governor.
++ * @need_update:	if equals 1, thermal_zone_device_update needs to be invoked.
+  * @ops:	operations this &thermal_zone_device supports
+  * @tzp:	thermal zone parameters
+  * @governor:	pointer to the governor for this thermal zone
+@@ -185,6 +189,7 @@ struct thermal_zone_device {
+ 	int emul_temperature;
+ 	int passive;
+ 	unsigned int forced_passive;
++	atomic_t need_update;
+ 	struct thermal_zone_device_ops *ops;
+ 	const struct thermal_zone_params *tzp;
+ 	struct thermal_governor *governor;
+diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
+index f6cbef78db62..3b91ad5d5115 100644
+--- a/include/sound/rawmidi.h
++++ b/include/sound/rawmidi.h
+@@ -167,6 +167,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
+ int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
+ 			 unsigned char *buffer, int count);
++int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
++			      unsigned char *buffer, int count);
++int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
++			       int count);
+ 
+ /* main midi functions */
+ 
+diff --git a/kernel/futex.c b/kernel/futex.c
+index 2579e407ff67..f3043db6d36f 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2632,6 +2632,11 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ 		if (q.pi_state && (q.pi_state->owner != current)) {
+ 			spin_lock(q.lock_ptr);
+ 			ret = fixup_pi_state_owner(uaddr2, &q, current);
++			/*
++			 * Drop the reference to the pi state which
++			 * the requeue_pi() code acquired for us.
++			 */
++			free_pi_state(q.pi_state);
+ 			spin_unlock(q.lock_ptr);
+ 		}
+ 	} else {
+diff --git a/kernel/panic.c b/kernel/panic.c
+index 8136ad76e5fd..a4f7820f5930 100644
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -23,6 +23,7 @@
+ #include <linux/sysrq.h>
+ #include <linux/init.h>
+ #include <linux/nmi.h>
++#include <linux/console.h>
+ 
+ #define PANIC_TIMER_STEP 100
+ #define PANIC_BLINK_SPD 18
+@@ -146,6 +147,17 @@ void panic(const char *fmt, ...)
+ 
+ 	bust_spinlocks(0);
+ 
++	/*
++	 * We may have ended up stopping the CPU holding the lock (in
++	 * smp_send_stop()) while still having some valuable data in the console
++	 * buffer.  Try to acquire the lock then release it regardless of the
++	 * result.  The release will also print the buffers out.  Locks debug
++	 * should be disabled to avoid reporting bad unlock balance when
++	 * panic() is not being callled from OOPS.
++	 */
++	debug_locks_off();
++	console_flush_on_panic();
++
+ 	if (!panic_blink)
+ 		panic_blink = no_blink;
+ 
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index bff0169e1ad8..3c1aca0c3543 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2173,13 +2173,24 @@ void console_unlock(void)
+ 	static u64 seen_seq;
+ 	unsigned long flags;
+ 	bool wake_klogd = false;
+-	bool retry;
++	bool do_cond_resched, retry;
+ 
+ 	if (console_suspended) {
+ 		up_console_sem();
+ 		return;
+ 	}
+ 
++	/*
++	 * Console drivers are called under logbuf_lock, so
++	 * @console_may_schedule should be cleared before; however, we may
++	 * end up dumping a lot of lines, for example, if called from
++	 * console registration path, and should invoke cond_resched()
++	 * between lines if allowable.  Not doing so can cause a very long
++	 * scheduling stall on a slow console leading to RCU stall and
++	 * softlockup warnings which exacerbate the issue with more
++	 * messages practically incapacitating the system.
++	 */
++	do_cond_resched = console_may_schedule;
+ 	console_may_schedule = 0;
+ 
+ 	/* flush buffered message fragment immediately to console */
+@@ -2241,6 +2252,9 @@ skip:
+ 		call_console_drivers(level, text, len);
+ 		start_critical_timings();
+ 		local_irq_restore(flags);
++
++		if (do_cond_resched)
++			cond_resched();
+ 	}
+ 	console_locked = 0;
+ 
+@@ -2308,6 +2322,25 @@ void console_unblank(void)
+ 	console_unlock();
+ }
+ 
++/**
++ * console_flush_on_panic - flush console content on panic
++ *
++ * Immediately output all pending messages no matter what.
++ */
++void console_flush_on_panic(void)
++{
++	/*
++	 * If someone else is holding the console lock, trylock will fail
++	 * and may_schedule may be set.  Ignore and proceed to unlock so
++	 * that messages are flushed out.  As this can be called from any
++	 * context and we don't want to get preempted while flushing,
++	 * ensure may_schedule is cleared.
++	 */
++	console_trylock();
++	console_may_schedule = 0;
++	console_unlock();
++}
++
+ /*
+  * Return the console tty driver structure and its associated index
+  */
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 4f44028943e6..30c682adcdeb 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -317,24 +317,24 @@ static inline void seccomp_sync_threads(void)
+ 		put_seccomp_filter(thread);
+ 		smp_store_release(&thread->seccomp.filter,
+ 				  caller->seccomp.filter);
++
++		/*
++		 * Don't let an unprivileged task work around
++		 * the no_new_privs restriction by creating
++		 * a thread that sets it up, enters seccomp,
++		 * then dies.
++		 */
++		if (task_no_new_privs(caller))
++			task_set_no_new_privs(thread);
++
+ 		/*
+ 		 * Opt the other thread into seccomp if needed.
+ 		 * As threads are considered to be trust-realm
+ 		 * equivalent (see ptrace_may_access), it is safe to
+ 		 * allow one thread to transition the other.
+ 		 */
+-		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
+-			/*
+-			 * Don't let an unprivileged task work around
+-			 * the no_new_privs restriction by creating
+-			 * a thread that sets it up, enters seccomp,
+-			 * then dies.
+-			 */
+-			if (task_no_new_privs(caller))
+-				task_set_no_new_privs(thread);
+-
++		if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
+ 			seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+-		}
+ 	}
+ }
+ 
+diff --git a/kernel/sys.c b/kernel/sys.c
+index a4e372b798a5..25ae8d2e65e2 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1854,11 +1854,13 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
+ 		user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
+ 	}
+ 
+-	if (prctl_map.exe_fd != (u32)-1)
++	if (prctl_map.exe_fd != (u32)-1) {
+ 		error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
+-	down_read(&mm->mmap_sem);
+-	if (error)
+-		goto out;
++		if (error)
++			return error;
++	}
++
++	down_write(&mm->mmap_sem);
+ 
+ 	/*
+ 	 * We don't validate if these members are pointing to
+@@ -1895,10 +1897,8 @@ static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data
+ 	if (prctl_map.auxv_size)
+ 		memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
+ 
+-	error = 0;
+-out:
+-	up_read(&mm->mmap_sem);
+-	return error;
++	up_write(&mm->mmap_sem);
++	return 0;
+ }
+ #endif /* CONFIG_CHECKPOINT_RESTORE */
+ 
+@@ -1930,7 +1930,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
+ 
+ 	error = -EINVAL;
+ 
+-	down_read(&mm->mmap_sem);
++	down_write(&mm->mmap_sem);
+ 	vma = find_vma(mm, addr);
+ 
+ 	switch (opt) {
+@@ -2033,7 +2033,7 @@ static int prctl_set_mm(int opt, unsigned long addr,
+ 
+ 	error = 0;
+ out:
+-	up_read(&mm->mmap_sem);
++	up_write(&mm->mmap_sem);
+ 	return error;
+ }
+ 
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index ce033c7aa2e8..9cff0ab82b63 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
+ static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
+ {
+ 	struct posix_clock *clk = get_posix_clock(fp);
+-	int result = 0;
++	unsigned int result = 0;
+ 
+ 	if (!clk)
+-		return -ENODEV;
++		return POLLERR;
+ 
+ 	if (clk->ops.poll)
+ 		result = clk->ops.poll(clk, fp, wait);
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 414d9df94724..65dbf8aee751 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -316,8 +316,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
+ 
+ 	delta = timekeeping_get_delta(tkr);
+ 
+-	nsec = delta * tkr->mult + tkr->xtime_nsec;
+-	nsec >>= tkr->shift;
++	nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
+ 
+ 	/* If arch requires, add in get_arch_timeoffset() */
+ 	return nsec + arch_gettimeoffset();
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index dace71fe41f7..517a568f038d 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -1181,7 +1181,7 @@ static inline bool overlap(void *addr, unsigned long len, void *start, void *end
+ 
+ static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
+ {
+-	if (overlap(addr, len, _text, _etext) ||
++	if (overlap(addr, len, _stext, _etext) ||
+ 	    overlap(addr, len, __start_rodata, __end_rodata))
+ 		err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
+ }
+diff --git a/lib/dump_stack.c b/lib/dump_stack.c
+index 6745c6230db3..c30d07e99dba 100644
+--- a/lib/dump_stack.c
++++ b/lib/dump_stack.c
+@@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1);
+ 
+ asmlinkage __visible void dump_stack(void)
+ {
++	unsigned long flags;
+ 	int was_locked;
+ 	int old;
+ 	int cpu;
+@@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void)
+ 	 * Permit this cpu to perform nested stack dumps while serialising
+ 	 * against other CPUs
+ 	 */
+-	preempt_disable();
+-
+ retry:
++	local_irq_save(flags);
+ 	cpu = smp_processor_id();
+ 	old = atomic_cmpxchg(&dump_lock, -1, cpu);
+ 	if (old == -1) {
+@@ -43,6 +43,7 @@ retry:
+ 	} else if (old == cpu) {
+ 		was_locked = 1;
+ 	} else {
++		local_irq_restore(flags);
+ 		cpu_relax();
+ 		goto retry;
+ 	}
+@@ -52,7 +53,7 @@ retry:
+ 	if (!was_locked)
+ 		atomic_set(&dump_lock, -1);
+ 
+-	preempt_enable();
++	local_irq_restore(flags);
+ }
+ #else
+ asmlinkage __visible void dump_stack(void)
+diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
+index 6a08ce7d6adc..acf9da449f81 100644
+--- a/lib/libcrc32c.c
++++ b/lib/libcrc32c.c
+@@ -74,3 +74,4 @@ module_exit(libcrc32c_mod_fini);
+ MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
+ MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations");
+ MODULE_LICENSE("GPL");
++MODULE_SOFTDEP("pre: crc32c");
+diff --git a/lib/radix-tree.c b/lib/radix-tree.c
+index 3d2aa27b845b..8399002aa0f0 100644
+--- a/lib/radix-tree.c
++++ b/lib/radix-tree.c
+@@ -1014,9 +1014,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
+ 		return 0;
+ 
+ 	radix_tree_for_each_slot(slot, root, &iter, first_index) {
+-		results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
++		results[ret] = rcu_dereference_raw(*slot);
+ 		if (!results[ret])
+ 			continue;
++		if (radix_tree_is_indirect_ptr(results[ret])) {
++			slot = radix_tree_iter_retry(&iter);
++			continue;
++		}
+ 		if (++ret == max_items)
+ 			break;
+ 	}
+@@ -1093,9 +1097,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
+ 		return 0;
+ 
+ 	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
+-		results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
++		results[ret] = rcu_dereference_raw(*slot);
+ 		if (!results[ret])
+ 			continue;
++		if (radix_tree_is_indirect_ptr(results[ret])) {
++			slot = radix_tree_iter_retry(&iter);
++			continue;
++		}
+ 		if (++ret == max_items)
+ 			break;
+ 	}
+diff --git a/lib/string_helpers.c b/lib/string_helpers.c
+index c98ae818eb4e..33e79b5eea77 100644
+--- a/lib/string_helpers.c
++++ b/lib/string_helpers.c
+@@ -43,46 +43,73 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units,
+ 		[STRING_UNITS_10] = 1000,
+ 		[STRING_UNITS_2] = 1024,
+ 	};
+-	int i, j;
+-	u32 remainder = 0, sf_cap, exp;
++	static const unsigned int rounding[] = { 500, 50, 5 };
++	int i = 0, j;
++	u32 remainder = 0, sf_cap;
+ 	char tmp[8];
+ 	const char *unit;
+ 
+ 	tmp[0] = '\0';
+-	i = 0;
+-	if (!size)
++
++	if (blk_size == 0)
++		size = 0;
++	if (size == 0)
+ 		goto out;
+ 
+-	while (blk_size >= divisor[units]) {
+-		remainder = do_div(blk_size, divisor[units]);
++	/* This is Napier's algorithm.  Reduce the original block size to
++	 *
++	 * coefficient * divisor[units]^i
++	 *
++	 * we do the reduction so both coefficients are just under 32 bits so
++	 * that multiplying them together won't overflow 64 bits and we keep
++	 * as much precision as possible in the numbers.
++	 *
++	 * Note: it's safe to throw away the remainders here because all the
++	 * precision is in the coefficients.
++	 */
++	while (blk_size >> 32) {
++		do_div(blk_size, divisor[units]);
+ 		i++;
+ 	}
+ 
+-	exp = divisor[units] / (u32)blk_size;
+-	if (size >= exp) {
+-		remainder = do_div(size, divisor[units]);
+-		remainder *= blk_size;
++	while (size >> 32) {
++		do_div(size, divisor[units]);
+ 		i++;
+-	} else {
+-		remainder *= size;
+ 	}
+ 
++	/* now perform the actual multiplication keeping i as the sum of the
++	 * two logarithms */
+ 	size *= blk_size;
+-	size += remainder / divisor[units];
+-	remainder %= divisor[units];
+ 
++	/* and logarithmically reduce it until it's just under the divisor */
+ 	while (size >= divisor[units]) {
+ 		remainder = do_div(size, divisor[units]);
+ 		i++;
+ 	}
+ 
++	/* work out in j how many digits of precision we need from the
++	 * remainder */
+ 	sf_cap = size;
+ 	for (j = 0; sf_cap*10 < 1000; j++)
+ 		sf_cap *= 10;
+ 
+-	if (j) {
++	if (units == STRING_UNITS_2) {
++		/* express the remainder as a decimal.  It's currently the
++		 * numerator of a fraction whose denominator is
++		 * divisor[units], which is 1 << 10 for STRING_UNITS_2 */
+ 		remainder *= 1000;
+-		remainder /= divisor[units];
++		remainder >>= 10;
++	}
++
++	/* add a 5 to the digit below what will be printed to ensure
++	 * an arithmetical round up and carry it through to size */
++	remainder += rounding[j];
++	if (remainder >= 1000) {
++		remainder -= 1000;
++		size += 1;
++	}
++
++	if (j) {
+ 		snprintf(tmp, sizeof(tmp), ".%03u", remainder);
+ 		tmp[j+1] = '\0';
+ 	}
+diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
+index fcad8322ef36..b640609bcd17 100644
+--- a/mm/balloon_compaction.c
++++ b/mm/balloon_compaction.c
+@@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
+ 	bool dequeued_page;
+ 
+ 	dequeued_page = false;
++	spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ 	list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
+ 		/*
+ 		 * Block others from accessing the 'page' while we get around
+@@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
+ 				continue;
+ 			}
+ #endif
+-			spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ 			balloon_page_delete(page);
+ 			__count_vm_event(BALLOON_DEFLATE);
+-			spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ 			unlock_page(page);
+ 			dequeued_page = true;
+ 			break;
+ 		}
+ 	}
++	spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ 
+ 	if (!dequeued_page) {
+ 		/*
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 68dea90334cb..aac1c98a9bc7 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3824,16 +3824,17 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
+ swap_buffers:
+ 	/* Swap primary and spare array */
+ 	thresholds->spare = thresholds->primary;
+-	/* If all events are unregistered, free the spare array */
+-	if (!new) {
+-		kfree(thresholds->spare);
+-		thresholds->spare = NULL;
+-	}
+ 
+ 	rcu_assign_pointer(thresholds->primary, new);
+ 
+ 	/* To be sure that nobody uses thresholds */
+ 	synchronize_rcu();
++
++	/* If all events are unregistered, free the spare array */
++	if (!new) {
++		kfree(thresholds->spare);
++		thresholds->spare = NULL;
++	}
+ unlock:
+ 	mutex_unlock(&memcg->thresholds_lock);
+ }
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 9f48145c884f..e26bc59d7dff 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1557,7 +1557,7 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
+ 		 * Did it turn free?
+ 		 */
+ 		ret = __get_any_page(page, pfn, 0);
+-		if (!PageLRU(page)) {
++		if (ret == 1 && !PageLRU(page)) {
+ 			/* Drop page reference which is from __get_any_page() */
+ 			put_page(page);
+ 			pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
+diff --git a/mm/mlock.c b/mm/mlock.c
+index 6fd2cf15e868..3d3ee6cad776 100644
+--- a/mm/mlock.c
++++ b/mm/mlock.c
+@@ -172,7 +172,7 @@ static void __munlock_isolation_failed(struct page *page)
+  */
+ unsigned int munlock_vma_page(struct page *page)
+ {
+-	unsigned int nr_pages;
++	int nr_pages;
+ 	struct zone *zone = page_zone(page);
+ 
+ 	/* For try_to_munlock() and to serialize with page migration */
+diff --git a/mm/mmap.c b/mm/mmap.c
+index bb50cacc3ea5..b639fa2721d8 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -440,12 +440,16 @@ static void validate_mm(struct mm_struct *mm)
+ 	struct vm_area_struct *vma = mm->mmap;
+ 
+ 	while (vma) {
++		struct anon_vma *anon_vma = vma->anon_vma;
+ 		struct anon_vma_chain *avc;
+ 
+-		vma_lock_anon_vma(vma);
+-		list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+-			anon_vma_interval_tree_verify(avc);
+-		vma_unlock_anon_vma(vma);
++		if (anon_vma) {
++			anon_vma_lock_read(anon_vma);
++			list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
++				anon_vma_interval_tree_verify(avc);
++			anon_vma_unlock_read(anon_vma);
++		}
++
+ 		highest_address = vma->vm_end;
+ 		vma = vma->vm_next;
+ 		i++;
+@@ -2141,32 +2145,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
+  */
+ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ {
+-	int error;
++	int error = 0;
+ 
+ 	if (!(vma->vm_flags & VM_GROWSUP))
+ 		return -EFAULT;
+ 
+-	/*
+-	 * We must make sure the anon_vma is allocated
+-	 * so that the anon_vma locking is not a noop.
+-	 */
++	/* Guard against wrapping around to address 0. */
++	if (address < PAGE_ALIGN(address+4))
++		address = PAGE_ALIGN(address+4);
++	else
++		return -ENOMEM;
++
++	/* We must make sure the anon_vma is allocated. */
+ 	if (unlikely(anon_vma_prepare(vma)))
+ 		return -ENOMEM;
+-	vma_lock_anon_vma(vma);
+ 
+ 	/*
+ 	 * vma->vm_start/vm_end cannot change under us because the caller
+ 	 * is required to hold the mmap_sem in read mode.  We need the
+ 	 * anon_vma lock to serialize against concurrent expand_stacks.
+-	 * Also guard against wrapping around to address 0.
+ 	 */
+-	if (address < PAGE_ALIGN(address+4))
+-		address = PAGE_ALIGN(address+4);
+-	else {
+-		vma_unlock_anon_vma(vma);
+-		return -ENOMEM;
+-	}
+-	error = 0;
++	anon_vma_lock_write(vma->anon_vma);
+ 
+ 	/* Somebody else might have raced and expanded it already */
+ 	if (address > vma->vm_end) {
+@@ -2184,7 +2183,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ 				 * updates, but we only hold a shared mmap_sem
+ 				 * lock here, so we need to protect against
+ 				 * concurrent vma expansions.
+-				 * vma_lock_anon_vma() doesn't help here, as
++				 * anon_vma_lock_write() doesn't help here, as
+ 				 * we don't guarantee that all growable vmas
+ 				 * in a mm share the same root anon vma.
+ 				 * So, we reuse mm->page_table_lock to guard
+@@ -2204,7 +2203,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
+ 			}
+ 		}
+ 	}
+-	vma_unlock_anon_vma(vma);
++	anon_vma_unlock_write(vma->anon_vma);
+ 	khugepaged_enter_vma_merge(vma, vma->vm_flags);
+ 	validate_mm(vma->vm_mm);
+ 	return error;
+@@ -2219,25 +2218,21 @@ int expand_downwards(struct vm_area_struct *vma,
+ {
+ 	int error;
+ 
+-	/*
+-	 * We must make sure the anon_vma is allocated
+-	 * so that the anon_vma locking is not a noop.
+-	 */
+-	if (unlikely(anon_vma_prepare(vma)))
+-		return -ENOMEM;
+-
+ 	address &= PAGE_MASK;
+ 	error = security_mmap_addr(address);
+ 	if (error)
+ 		return error;
+ 
+-	vma_lock_anon_vma(vma);
++	/* We must make sure the anon_vma is allocated. */
++	if (unlikely(anon_vma_prepare(vma)))
++		return -ENOMEM;
+ 
+ 	/*
+ 	 * vma->vm_start/vm_end cannot change under us because the caller
+ 	 * is required to hold the mmap_sem in read mode.  We need the
+ 	 * anon_vma lock to serialize against concurrent expand_stacks.
+ 	 */
++	anon_vma_lock_write(vma->anon_vma);
+ 
+ 	/* Somebody else might have raced and expanded it already */
+ 	if (address < vma->vm_start) {
+@@ -2255,7 +2250,7 @@ int expand_downwards(struct vm_area_struct *vma,
+ 				 * updates, but we only hold a shared mmap_sem
+ 				 * lock here, so we need to protect against
+ 				 * concurrent vma expansions.
+-				 * vma_lock_anon_vma() doesn't help here, as
++				 * anon_vma_lock_write() doesn't help here, as
+ 				 * we don't guarantee that all growable vmas
+ 				 * in a mm share the same root anon vma.
+ 				 * So, we reuse mm->page_table_lock to guard
+@@ -2273,7 +2268,7 @@ int expand_downwards(struct vm_area_struct *vma,
+ 			}
+ 		}
+ 	}
+-	vma_unlock_anon_vma(vma);
++	anon_vma_unlock_write(vma->anon_vma);
+ 	khugepaged_enter_vma_merge(vma, vma->vm_flags);
+ 	validate_mm(vma->vm_mm);
+ 	return error;
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index a8b5e749e84e..fb1ec10ce449 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -306,7 +306,12 @@ static void free_handle(struct zs_pool *pool, unsigned long handle)
+ 
+ static void record_obj(unsigned long handle, unsigned long obj)
+ {
+-	*(unsigned long *)handle = obj;
++	/*
++	 * lsb of @obj represents handle lock while other bits
++	 * represent object value the handle is pointing so
++	 * updating shouldn't do store tearing.
++	 */
++	WRITE_ONCE(*(unsigned long *)handle, obj);
+ }
+ 
+ /* zpool driver */
+@@ -1641,6 +1646,13 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
+ 		free_obj = obj_malloc(d_page, class, handle);
+ 		zs_object_copy(used_obj, free_obj, class);
+ 		index++;
++		/*
++		 * record_obj updates handle's value to free_obj and it will
++		 * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
++		 * breaks synchronization using pin_tag(e,g, zs_free) so
++		 * let's keep the lock bit.
++		 */
++		free_obj |= BIT(HANDLE_PIN_BIT);
+ 		record_obj(handle, free_obj);
+ 		unpin_tag(handle);
+ 		obj_free(pool, class, used_obj);
+diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
+index 967080a9f043..e51af69c61bf 100644
+--- a/net/ceph/messenger.c
++++ b/net/ceph/messenger.c
+@@ -675,6 +675,8 @@ static void reset_connection(struct ceph_connection *con)
+ 	}
+ 	con->in_seq = 0;
+ 	con->in_seq_acked = 0;
++
++	con->out_skip = 0;
+ }
+ 
+ /*
+@@ -774,6 +776,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
+ 
+ static void con_out_kvec_reset(struct ceph_connection *con)
+ {
++	BUG_ON(con->out_skip);
++
+ 	con->out_kvec_left = 0;
+ 	con->out_kvec_bytes = 0;
+ 	con->out_kvec_cur = &con->out_kvec[0];
+@@ -782,9 +786,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
+ static void con_out_kvec_add(struct ceph_connection *con,
+ 				size_t size, void *data)
+ {
+-	int index;
++	int index = con->out_kvec_left;
+ 
+-	index = con->out_kvec_left;
++	BUG_ON(con->out_skip);
+ 	BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
+ 
+ 	con->out_kvec[index].iov_len = size;
+@@ -793,6 +797,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
+ 	con->out_kvec_bytes += size;
+ }
+ 
++/*
++ * Chop off a kvec from the end.  Return residual number of bytes for
++ * that kvec, i.e. how many bytes would have been written if the kvec
++ * hadn't been nuked.
++ */
++static int con_out_kvec_skip(struct ceph_connection *con)
++{
++	int off = con->out_kvec_cur - con->out_kvec;
++	int skip = 0;
++
++	if (con->out_kvec_bytes > 0) {
++		skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
++		BUG_ON(con->out_kvec_bytes < skip);
++		BUG_ON(!con->out_kvec_left);
++		con->out_kvec_bytes -= skip;
++		con->out_kvec_left--;
++	}
++
++	return skip;
++}
++
+ #ifdef CONFIG_BLOCK
+ 
+ /*
+@@ -1200,7 +1225,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
+ 	m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
+ 
+ 	dout("prepare_write_message_footer %p\n", con);
+-	con->out_kvec_is_msg = true;
+ 	con->out_kvec[v].iov_base = &m->footer;
+ 	if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
+ 		if (con->ops->sign_message)
+@@ -1228,7 +1252,6 @@ static void prepare_write_message(struct ceph_connection *con)
+ 	u32 crc;
+ 
+ 	con_out_kvec_reset(con);
+-	con->out_kvec_is_msg = true;
+ 	con->out_msg_done = false;
+ 
+ 	/* Sneak an ack in there first?  If we can get it into the same
+@@ -1268,18 +1291,19 @@ static void prepare_write_message(struct ceph_connection *con)
+ 
+ 	/* tag + hdr + front + middle */
+ 	con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
+-	con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
++	con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
+ 	con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
+ 
+ 	if (m->middle)
+ 		con_out_kvec_add(con, m->middle->vec.iov_len,
+ 			m->middle->vec.iov_base);
+ 
+-	/* fill in crc (except data pages), footer */
++	/* fill in hdr crc and finalize hdr */
+ 	crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
+ 	con->out_msg->hdr.crc = cpu_to_le32(crc);
+-	con->out_msg->footer.flags = 0;
++	memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
+ 
++	/* fill in front and middle crc, footer */
+ 	crc = crc32c(0, m->front.iov_base, m->front.iov_len);
+ 	con->out_msg->footer.front_crc = cpu_to_le32(crc);
+ 	if (m->middle) {
+@@ -1291,6 +1315,7 @@ static void prepare_write_message(struct ceph_connection *con)
+ 	dout("%s front_crc %u middle_crc %u\n", __func__,
+ 	     le32_to_cpu(con->out_msg->footer.front_crc),
+ 	     le32_to_cpu(con->out_msg->footer.middle_crc));
++	con->out_msg->footer.flags = 0;
+ 
+ 	/* is there a data payload? */
+ 	con->out_msg->footer.data_crc = 0;
+@@ -1485,7 +1510,6 @@ static int write_partial_kvec(struct ceph_connection *con)
+ 		}
+ 	}
+ 	con->out_kvec_left = 0;
+-	con->out_kvec_is_msg = false;
+ 	ret = 1;
+ out:
+ 	dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
+@@ -1577,6 +1601,7 @@ static int write_partial_skip(struct ceph_connection *con)
+ {
+ 	int ret;
+ 
++	dout("%s %p %d left\n", __func__, con, con->out_skip);
+ 	while (con->out_skip > 0) {
+ 		size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
+ 
+@@ -2493,13 +2518,13 @@ more:
+ 
+ more_kvec:
+ 	/* kvec data queued? */
+-	if (con->out_skip) {
+-		ret = write_partial_skip(con);
++	if (con->out_kvec_left) {
++		ret = write_partial_kvec(con);
+ 		if (ret <= 0)
+ 			goto out;
+ 	}
+-	if (con->out_kvec_left) {
+-		ret = write_partial_kvec(con);
++	if (con->out_skip) {
++		ret = write_partial_skip(con);
+ 		if (ret <= 0)
+ 			goto out;
+ 	}
+@@ -3026,16 +3051,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
+ 		ceph_msg_put(msg);
+ 	}
+ 	if (con->out_msg == msg) {
+-		dout("%s %p msg %p - was sending\n", __func__, con, msg);
+-		con->out_msg = NULL;
+-		if (con->out_kvec_is_msg) {
+-			con->out_skip = con->out_kvec_bytes;
+-			con->out_kvec_is_msg = false;
++		BUG_ON(con->out_skip);
++		/* footer */
++		if (con->out_msg_done) {
++			con->out_skip += con_out_kvec_skip(con);
++		} else {
++			BUG_ON(!msg->data_length);
++			if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
++				con->out_skip += sizeof(msg->footer);
++			else
++				con->out_skip += sizeof(msg->old_footer);
+ 		}
++		/* data, middle, front */
++		if (msg->data_length)
++			con->out_skip += msg->cursor.total_resid;
++		if (msg->middle)
++			con->out_skip += con_out_kvec_skip(con);
++		con->out_skip += con_out_kvec_skip(con);
++
++		dout("%s %p msg %p - was sending, will write %d skip %d\n",
++		     __func__, con, msg, con->out_kvec_bytes, con->out_skip);
+ 		msg->hdr.seq = 0;
+-
++		con->out_msg = NULL;
+ 		ceph_msg_put(msg);
+ 	}
++
+ 	mutex_unlock(&con->mutex);
+ }
+ 
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index a9c9d961f039..41adfc898a18 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -1727,7 +1727,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
+ 		if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
+ 			continue;
+ 		sdata->u.ibss.last_scan_completed = jiffies;
+-		ieee80211_queue_work(&local->hw, &sdata->work);
+ 	}
+ 	mutex_unlock(&local->iflist_mtx);
+ }
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 817098add1d6..afcc67a157fd 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1299,17 +1299,6 @@ out:
+ 	sdata_unlock(sdata);
+ }
+ 
+-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
+-{
+-	struct ieee80211_sub_if_data *sdata;
+-
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(sdata, &local->interfaces, list)
+-		if (ieee80211_vif_is_mesh(&sdata->vif) &&
+-		    ieee80211_sdata_running(sdata))
+-			ieee80211_queue_work(&local->hw, &sdata->work);
+-	rcu_read_unlock();
+-}
+ 
+ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
+ {
+diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
+index 50c8473cf9dc..472bdc73e950 100644
+--- a/net/mac80211/mesh.h
++++ b/net/mac80211/mesh.h
+@@ -358,14 +358,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
+ 	return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
+ }
+ 
+-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
+-
+ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
+ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
+ void ieee80211s_stop(void);
+ #else
+-static inline void
+-ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
+ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
+ { return false; }
+ static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index a93906103f8b..844825829992 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -4002,8 +4002,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
+ 		if (!(flags & IEEE80211_HW_CONNECTION_MONITOR))
+ 			ieee80211_queue_work(&sdata->local->hw,
+ 					     &sdata->u.mgd.monitor_work);
+-		/* and do all the other regular work too */
+-		ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+ 	}
+ }
+ 
+diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
+index 7bb6a9383f58..ee9351affa5b 100644
+--- a/net/mac80211/scan.c
++++ b/net/mac80211/scan.c
+@@ -310,6 +310,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+ 	bool was_scanning = local->scanning;
+ 	struct cfg80211_scan_request *scan_req;
+ 	struct ieee80211_sub_if_data *scan_sdata;
++	struct ieee80211_sub_if_data *sdata;
+ 
+ 	lockdep_assert_held(&local->mtx);
+ 
+@@ -369,7 +370,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
+ 
+ 	ieee80211_mlme_notify_scan_completed(local);
+ 	ieee80211_ibss_notify_scan_completed(local);
+-	ieee80211_mesh_notify_scan_completed(local);
++
++	/* Requeue all the work that might have been ignored while
++	 * the scan was in progress; if there was none this will
++	 * just be a no-op for the particular interface.
++	 */
++	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
++		if (ieee80211_sdata_running(sdata))
++			ieee80211_queue_work(&sdata->local->hw, &sdata->work);
++	}
++
+ 	if (was_scanning)
+ 		ieee80211_start_next_roc(local);
+ }
+diff --git a/net/rfkill/core.c b/net/rfkill/core.c
+index fa7cd792791c..a97bb7332607 100644
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -1081,17 +1081,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
+ 	return res;
+ }
+ 
+-static bool rfkill_readable(struct rfkill_data *data)
+-{
+-	bool r;
+-
+-	mutex_lock(&data->mtx);
+-	r = !list_empty(&data->events);
+-	mutex_unlock(&data->mtx);
+-
+-	return r;
+-}
+-
+ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ 			       size_t count, loff_t *pos)
+ {
+@@ -1108,8 +1097,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
+ 			goto out;
+ 		}
+ 		mutex_unlock(&data->mtx);
++		/* since we re-check and it just compares pointers,
++		 * using !list_empty() without locking isn't a problem
++		 */
+ 		ret = wait_event_interruptible(data->read_wait,
+-					       rfkill_readable(data));
++					       !list_empty(&data->events));
+ 		mutex_lock(&data->mtx);
+ 
+ 		if (ret)
+diff --git a/scripts/bloat-o-meter b/scripts/bloat-o-meter
+index 23e78dcd12bf..38b64f487315 100755
+--- a/scripts/bloat-o-meter
++++ b/scripts/bloat-o-meter
+@@ -58,8 +58,8 @@ for name in common:
+ delta.sort()
+ delta.reverse()
+ 
+-print "add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
+-      (add, remove, grow, shrink, up, -down, up-down)
+-print "%-40s %7s %7s %+7s" % ("function", "old", "new", "delta")
++print("add/remove: %s/%s grow/shrink: %s/%s up/down: %s/%s (%s)" % \
++      (add, remove, grow, shrink, up, -down, up-down))
++print("%-40s %7s %7s %+7s" % ("function", "old", "new", "delta"))
+ for d, n in delta:
+-    if d: print "%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d)
++    if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index b123c42e7dc8..b554d7f9e3be 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -44,6 +44,13 @@
+ #include <sound/compress_offload.h>
+ #include <sound/compress_driver.h>
+ 
++/* struct snd_compr_codec_caps overflows the ioctl bit size for some
++ * architectures, so we need to disable the relevant ioctls.
++ */
++#if _IOC_SIZEBITS < 14
++#define COMPR_CODEC_CAPS_OVERFLOW
++#endif
++
+ /* TODO:
+  * - add substream support for multiple devices in case of
+  *	SND_DYNAMIC_MINORS is not used
+@@ -438,6 +445,7 @@ out:
+ 	return retval;
+ }
+ 
++#ifndef COMPR_CODEC_CAPS_OVERFLOW
+ static int
+ snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
+ {
+@@ -461,6 +469,7 @@ out:
+ 	kfree(caps);
+ 	return retval;
+ }
++#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
+ 
+ /* revisit this with snd_pcm_preallocate_xxx */
+ static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
+@@ -799,9 +808,11 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ 	case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
+ 		retval = snd_compr_get_caps(stream, arg);
+ 		break;
++#ifndef COMPR_CODEC_CAPS_OVERFLOW
+ 	case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
+ 		retval = snd_compr_get_codec_caps(stream, arg);
+ 		break;
++#endif
+ 	case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
+ 		retval = snd_compr_set_params(stream, arg);
+ 		break;
+diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
+index 58550cc93f28..33e72c809e50 100644
+--- a/sound/core/oss/pcm_oss.c
++++ b/sound/core/oss/pcm_oss.c
+@@ -834,7 +834,8 @@ static int choose_rate(struct snd_pcm_substream *substream,
+ 	return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
+ }
+ 
+-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
++static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
++				     bool trylock)
+ {
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	struct snd_pcm_hw_params *params, *sparams;
+@@ -848,7 +849,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
+ 	struct snd_mask sformat_mask;
+ 	struct snd_mask mask;
+ 
+-	if (mutex_lock_interruptible(&runtime->oss.params_lock))
++	if (trylock) {
++		if (!(mutex_trylock(&runtime->oss.params_lock)))
++			return -EAGAIN;
++	} else if (mutex_lock_interruptible(&runtime->oss.params_lock))
+ 		return -EINTR;
+ 	sw_params = kmalloc(sizeof(*sw_params), GFP_KERNEL);
+ 	params = kmalloc(sizeof(*params), GFP_KERNEL);
+@@ -1092,7 +1096,7 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
+ 		if (asubstream == NULL)
+ 			asubstream = substream;
+ 		if (substream->runtime->oss.params) {
+-			err = snd_pcm_oss_change_params(substream);
++			err = snd_pcm_oss_change_params(substream, false);
+ 			if (err < 0)
+ 				return err;
+ 		}
+@@ -1132,7 +1136,7 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
+ 		return 0;
+ 	runtime = substream->runtime;
+ 	if (runtime->oss.params) {
+-		err = snd_pcm_oss_change_params(substream);
++		err = snd_pcm_oss_change_params(substream, false);
+ 		if (err < 0)
+ 			return err;
+ 	}
+@@ -2163,7 +2167,7 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre
+ 	runtime = substream->runtime;
+ 
+ 	if (runtime->oss.params &&
+-	    (err = snd_pcm_oss_change_params(substream)) < 0)
++	    (err = snd_pcm_oss_change_params(substream, false)) < 0)
+ 		return err;
+ 
+ 	info.fragsize = runtime->oss.period_bytes;
+@@ -2800,7 +2804,12 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
+ 		return -EIO;
+ 	
+ 	if (runtime->oss.params) {
+-		if ((err = snd_pcm_oss_change_params(substream)) < 0)
++		/* use mutex_trylock() for params_lock for avoiding a deadlock
++		 * between mmap_sem and params_lock taken by
++		 * copy_from/to_user() in snd_pcm_oss_write/read()
++		 */
++		err = snd_pcm_oss_change_params(substream, true);
++		if (err < 0)
+ 			return err;
+ 	}
+ #ifdef CONFIG_SND_PCM_OSS_PLUGINS
+diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
+index a7759846fbaa..795437b10082 100644
+--- a/sound/core/rawmidi.c
++++ b/sound/core/rawmidi.c
+@@ -942,31 +942,36 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
+ 	unsigned long flags;
+ 	long result = 0, count1;
+ 	struct snd_rawmidi_runtime *runtime = substream->runtime;
++	unsigned long appl_ptr;
+ 
++	spin_lock_irqsave(&runtime->lock, flags);
+ 	while (count > 0 && runtime->avail) {
+ 		count1 = runtime->buffer_size - runtime->appl_ptr;
+ 		if (count1 > count)
+ 			count1 = count;
+-		spin_lock_irqsave(&runtime->lock, flags);
+ 		if (count1 > (int)runtime->avail)
+ 			count1 = runtime->avail;
++
++		/* update runtime->appl_ptr before unlocking for userbuf */
++		appl_ptr = runtime->appl_ptr;
++		runtime->appl_ptr += count1;
++		runtime->appl_ptr %= runtime->buffer_size;
++		runtime->avail -= count1;
++
+ 		if (kernelbuf)
+-			memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
++			memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
+ 		if (userbuf) {
+ 			spin_unlock_irqrestore(&runtime->lock, flags);
+ 			if (copy_to_user(userbuf + result,
+-					 runtime->buffer + runtime->appl_ptr, count1)) {
++					 runtime->buffer + appl_ptr, count1)) {
+ 				return result > 0 ? result : -EFAULT;
+ 			}
+ 			spin_lock_irqsave(&runtime->lock, flags);
+ 		}
+-		runtime->appl_ptr += count1;
+-		runtime->appl_ptr %= runtime->buffer_size;
+-		runtime->avail -= count1;
+-		spin_unlock_irqrestore(&runtime->lock, flags);
+ 		result += count1;
+ 		count -= count1;
+ 	}
++	spin_unlock_irqrestore(&runtime->lock, flags);
+ 	return result;
+ }
+ 
+@@ -1055,23 +1060,16 @@ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
+ EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
+ 
+ /**
+- * snd_rawmidi_transmit_peek - copy data from the internal buffer
++ * __snd_rawmidi_transmit_peek - copy data from the internal buffer
+  * @substream: the rawmidi substream
+  * @buffer: the buffer pointer
+  * @count: data size to transfer
+  *
+- * Copies data from the internal output buffer to the given buffer.
+- *
+- * Call this in the interrupt handler when the midi output is ready,
+- * and call snd_rawmidi_transmit_ack() after the transmission is
+- * finished.
+- *
+- * Return: The size of copied data, or a negative error code on failure.
++ * This is a variant of snd_rawmidi_transmit_peek() without spinlock.
+  */
+-int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
++int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ 			      unsigned char *buffer, int count)
+ {
+-	unsigned long flags;
+ 	int result, count1;
+ 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+ 
+@@ -1081,7 +1079,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ 		return -EINVAL;
+ 	}
+ 	result = 0;
+-	spin_lock_irqsave(&runtime->lock, flags);
+ 	if (runtime->avail >= runtime->buffer_size) {
+ 		/* warning: lowlevel layer MUST trigger down the hardware */
+ 		goto __skip;
+@@ -1106,25 +1103,47 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+ 		}
+ 	}
+       __skip:
++	return result;
++}
++EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
++
++/**
++ * snd_rawmidi_transmit_peek - copy data from the internal buffer
++ * @substream: the rawmidi substream
++ * @buffer: the buffer pointer
++ * @count: data size to transfer
++ *
++ * Copies data from the internal output buffer to the given buffer.
++ *
++ * Call this in the interrupt handler when the midi output is ready,
++ * and call snd_rawmidi_transmit_ack() after the transmission is
++ * finished.
++ *
++ * Return: The size of copied data, or a negative error code on failure.
++ */
++int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
++			      unsigned char *buffer, int count)
++{
++	struct snd_rawmidi_runtime *runtime = substream->runtime;
++	int result;
++	unsigned long flags;
++
++	spin_lock_irqsave(&runtime->lock, flags);
++	result = __snd_rawmidi_transmit_peek(substream, buffer, count);
+ 	spin_unlock_irqrestore(&runtime->lock, flags);
+ 	return result;
+ }
+ EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
+ 
+ /**
+- * snd_rawmidi_transmit_ack - acknowledge the transmission
++ * __snd_rawmidi_transmit_ack - acknowledge the transmission
+  * @substream: the rawmidi substream
+  * @count: the transferred count
+  *
+- * Advances the hardware pointer for the internal output buffer with
+- * the given size and updates the condition.
+- * Call after the transmission is finished.
+- *
+- * Return: The advanced size if successful, or a negative error code on failure.
++ * This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
+  */
+-int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
++int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+ {
+-	unsigned long flags;
+ 	struct snd_rawmidi_runtime *runtime = substream->runtime;
+ 
+ 	if (runtime->buffer == NULL) {
+@@ -1132,7 +1151,6 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+ 			  "snd_rawmidi_transmit_ack: output is not active!!!\n");
+ 		return -EINVAL;
+ 	}
+-	spin_lock_irqsave(&runtime->lock, flags);
+ 	snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
+ 	runtime->hw_ptr += count;
+ 	runtime->hw_ptr %= runtime->buffer_size;
+@@ -1142,9 +1160,32 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+ 		if (runtime->drain || snd_rawmidi_ready(substream))
+ 			wake_up(&runtime->sleep);
+ 	}
+-	spin_unlock_irqrestore(&runtime->lock, flags);
+ 	return count;
+ }
++EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
++
++/**
++ * snd_rawmidi_transmit_ack - acknowledge the transmission
++ * @substream: the rawmidi substream
++ * @count: the transferred count
++ *
++ * Advances the hardware pointer for the internal output buffer with
++ * the given size and updates the condition.
++ * Call after the transmission is finished.
++ *
++ * Return: The advanced size if successful, or a negative error code on failure.
++ */
++int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
++{
++	struct snd_rawmidi_runtime *runtime = substream->runtime;
++	int result;
++	unsigned long flags;
++
++	spin_lock_irqsave(&runtime->lock, flags);
++	result = __snd_rawmidi_transmit_ack(substream, count);
++	spin_unlock_irqrestore(&runtime->lock, flags);
++	return result;
++}
+ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
+ 
+ /**
+@@ -1160,12 +1201,22 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
+ int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
+ 			 unsigned char *buffer, int count)
+ {
++	struct snd_rawmidi_runtime *runtime = substream->runtime;
++	int result;
++	unsigned long flags;
++
++	spin_lock_irqsave(&runtime->lock, flags);
+ 	if (!substream->opened)
+-		return -EBADFD;
+-	count = snd_rawmidi_transmit_peek(substream, buffer, count);
+-	if (count < 0)
+-		return count;
+-	return snd_rawmidi_transmit_ack(substream, count);
++		result = -EBADFD;
++	else {
++		count = __snd_rawmidi_transmit_peek(substream, buffer, count);
++		if (count <= 0)
++			result = count;
++		else
++			result = __snd_rawmidi_transmit_ack(substream, count);
++	}
++	spin_unlock_irqrestore(&runtime->lock, flags);
++	return result;
+ }
+ EXPORT_SYMBOL(snd_rawmidi_transmit);
+ 
+@@ -1177,8 +1228,9 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
+ 	unsigned long flags;
+ 	long count1, result;
+ 	struct snd_rawmidi_runtime *runtime = substream->runtime;
++	unsigned long appl_ptr;
+ 
+-	if (snd_BUG_ON(!kernelbuf && !userbuf))
++	if (!kernelbuf && !userbuf)
+ 		return -EINVAL;
+ 	if (snd_BUG_ON(!runtime->buffer))
+ 		return -EINVAL;
+@@ -1197,12 +1249,19 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
+ 			count1 = count;
+ 		if (count1 > (long)runtime->avail)
+ 			count1 = runtime->avail;
++
++		/* update runtime->appl_ptr before unlocking for userbuf */
++		appl_ptr = runtime->appl_ptr;
++		runtime->appl_ptr += count1;
++		runtime->appl_ptr %= runtime->buffer_size;
++		runtime->avail -= count1;
++
+ 		if (kernelbuf)
+-			memcpy(runtime->buffer + runtime->appl_ptr,
++			memcpy(runtime->buffer + appl_ptr,
+ 			       kernelbuf + result, count1);
+ 		else if (userbuf) {
+ 			spin_unlock_irqrestore(&runtime->lock, flags);
+-			if (copy_from_user(runtime->buffer + runtime->appl_ptr,
++			if (copy_from_user(runtime->buffer + appl_ptr,
+ 					   userbuf + result, count1)) {
+ 				spin_lock_irqsave(&runtime->lock, flags);
+ 				result = result > 0 ? result : -EFAULT;
+@@ -1210,9 +1269,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
+ 			}
+ 			spin_lock_irqsave(&runtime->lock, flags);
+ 		}
+-		runtime->appl_ptr += count1;
+-		runtime->appl_ptr %= runtime->buffer_size;
+-		runtime->avail -= count1;
+ 		result += count1;
+ 		count -= count1;
+ 	}
+diff --git a/sound/core/seq/oss/seq_oss_init.c b/sound/core/seq/oss/seq_oss_init.c
+index 2de3feff70d0..dad5b1123e46 100644
+--- a/sound/core/seq/oss/seq_oss_init.c
++++ b/sound/core/seq/oss/seq_oss_init.c
+@@ -202,7 +202,7 @@ snd_seq_oss_open(struct file *file, int level)
+ 
+ 	dp->index = i;
+ 	if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) {
+-		pr_err("ALSA: seq_oss: too many applications\n");
++		pr_debug("ALSA: seq_oss: too many applications\n");
+ 		rc = -ENOMEM;
+ 		goto _error;
+ 	}
+diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
+index 48e4fe1b68ab..f38cf91b4faf 100644
+--- a/sound/core/seq/oss/seq_oss_synth.c
++++ b/sound/core/seq/oss/seq_oss_synth.c
+@@ -308,7 +308,7 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
+ 	struct seq_oss_synth *rec;
+ 	struct seq_oss_synthinfo *info;
+ 
+-	if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
++	if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
+ 		return;
+ 	for (i = 0; i < dp->max_synthdev; i++) {
+ 		info = &dp->synths[i];
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index bd4741442909..ce6703ecfcef 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -678,6 +678,9 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
+ 	else
+ 		down_read(&grp->list_mutex);
+ 	list_for_each_entry(subs, &grp->list_head, src_list) {
++		/* both ports ready? */
++		if (atomic_read(&subs->ref_count) != 2)
++			continue;
+ 		event->dest = subs->info.dest;
+ 		if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
+ 			/* convert time according to flag with subscription */
+diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
+index 55170a20ae72..921fb2bd8fad 100644
+--- a/sound/core/seq/seq_ports.c
++++ b/sound/core/seq/seq_ports.c
+@@ -173,10 +173,6 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
+ }
+ 
+ /* */
+-enum group_type {
+-	SRC_LIST, DEST_LIST
+-};
+-
+ static int subscribe_port(struct snd_seq_client *client,
+ 			  struct snd_seq_client_port *port,
+ 			  struct snd_seq_port_subs_info *grp,
+@@ -203,6 +199,20 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
+ 	return NULL;
+ }
+ 
++static void delete_and_unsubscribe_port(struct snd_seq_client *client,
++					struct snd_seq_client_port *port,
++					struct snd_seq_subscribers *subs,
++					bool is_src, bool ack);
++
++static inline struct snd_seq_subscribers *
++get_subscriber(struct list_head *p, bool is_src)
++{
++	if (is_src)
++		return list_entry(p, struct snd_seq_subscribers, src_list);
++	else
++		return list_entry(p, struct snd_seq_subscribers, dest_list);
++}
++
+ /*
+  * remove all subscribers on the list
+  * this is called from port_delete, for each src and dest list.
+@@ -210,7 +220,7 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
+ static void clear_subscriber_list(struct snd_seq_client *client,
+ 				  struct snd_seq_client_port *port,
+ 				  struct snd_seq_port_subs_info *grp,
+-				  int grptype)
++				  int is_src)
+ {
+ 	struct list_head *p, *n;
+ 
+@@ -219,15 +229,13 @@ static void clear_subscriber_list(struct snd_seq_client *client,
+ 		struct snd_seq_client *c;
+ 		struct snd_seq_client_port *aport;
+ 
+-		if (grptype == SRC_LIST) {
+-			subs = list_entry(p, struct snd_seq_subscribers, src_list);
++		subs = get_subscriber(p, is_src);
++		if (is_src)
+ 			aport = get_client_port(&subs->info.dest, &c);
+-		} else {
+-			subs = list_entry(p, struct snd_seq_subscribers, dest_list);
++		else
+ 			aport = get_client_port(&subs->info.sender, &c);
+-		}
+-		list_del(p);
+-		unsubscribe_port(client, port, grp, &subs->info, 0);
++		delete_and_unsubscribe_port(client, port, subs, is_src, false);
++
+ 		if (!aport) {
+ 			/* looks like the connected port is being deleted.
+ 			 * we decrease the counter, and when both ports are deleted
+@@ -235,21 +243,14 @@ static void clear_subscriber_list(struct snd_seq_client *client,
+ 			 */
+ 			if (atomic_dec_and_test(&subs->ref_count))
+ 				kfree(subs);
+-		} else {
+-			/* ok we got the connected port */
+-			struct snd_seq_port_subs_info *agrp;
+-			agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
+-			down_write(&agrp->list_mutex);
+-			if (grptype == SRC_LIST)
+-				list_del(&subs->dest_list);
+-			else
+-				list_del(&subs->src_list);
+-			up_write(&agrp->list_mutex);
+-			unsubscribe_port(c, aport, agrp, &subs->info, 1);
+-			kfree(subs);
+-			snd_seq_port_unlock(aport);
+-			snd_seq_client_unlock(c);
++			continue;
+ 		}
++
++		/* ok we got the connected port */
++		delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
++		kfree(subs);
++		snd_seq_port_unlock(aport);
++		snd_seq_client_unlock(c);
+ 	}
+ }
+ 
+@@ -262,8 +263,8 @@ static int port_delete(struct snd_seq_client *client,
+ 	snd_use_lock_sync(&port->use_lock); 
+ 
+ 	/* clear subscribers info */
+-	clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
+-	clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
++	clear_subscriber_list(client, port, &port->c_src, true);
++	clear_subscriber_list(client, port, &port->c_dest, false);
+ 
+ 	if (port->private_free)
+ 		port->private_free(port->private_data);
+@@ -479,85 +480,120 @@ static int match_subs_info(struct snd_seq_port_subscribe *r,
+ 	return 0;
+ }
+ 
+-
+-/* connect two ports */
+-int snd_seq_port_connect(struct snd_seq_client *connector,
+-			 struct snd_seq_client *src_client,
+-			 struct snd_seq_client_port *src_port,
+-			 struct snd_seq_client *dest_client,
+-			 struct snd_seq_client_port *dest_port,
+-			 struct snd_seq_port_subscribe *info)
++static int check_and_subscribe_port(struct snd_seq_client *client,
++				    struct snd_seq_client_port *port,
++				    struct snd_seq_subscribers *subs,
++				    bool is_src, bool exclusive, bool ack)
+ {
+-	struct snd_seq_port_subs_info *src = &src_port->c_src;
+-	struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
+-	struct snd_seq_subscribers *subs, *s;
+-	int err, src_called = 0;
+-	unsigned long flags;
+-	int exclusive;
++	struct snd_seq_port_subs_info *grp;
++	struct list_head *p;
++	struct snd_seq_subscribers *s;
++	int err;
+ 
+-	subs = kzalloc(sizeof(*subs), GFP_KERNEL);
+-	if (! subs)
+-		return -ENOMEM;
+-
+-	subs->info = *info;
+-	atomic_set(&subs->ref_count, 2);
+-
+-	down_write(&src->list_mutex);
+-	down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
+-
+-	exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
++	grp = is_src ? &port->c_src : &port->c_dest;
+ 	err = -EBUSY;
++	down_write(&grp->list_mutex);
+ 	if (exclusive) {
+-		if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
++		if (!list_empty(&grp->list_head))
+ 			goto __error;
+ 	} else {
+-		if (src->exclusive || dest->exclusive)
++		if (grp->exclusive)
+ 			goto __error;
+ 		/* check whether already exists */
+-		list_for_each_entry(s, &src->list_head, src_list) {
+-			if (match_subs_info(info, &s->info))
+-				goto __error;
+-		}
+-		list_for_each_entry(s, &dest->list_head, dest_list) {
+-			if (match_subs_info(info, &s->info))
++		list_for_each(p, &grp->list_head) {
++			s = get_subscriber(p, is_src);
++			if (match_subs_info(&subs->info, &s->info))
+ 				goto __error;
+ 		}
+ 	}
+ 
+-	if ((err = subscribe_port(src_client, src_port, src, info,
+-				  connector->number != src_client->number)) < 0)
+-		goto __error;
+-	src_called = 1;
+-
+-	if ((err = subscribe_port(dest_client, dest_port, dest, info,
+-				  connector->number != dest_client->number)) < 0)
++	err = subscribe_port(client, port, grp, &subs->info, ack);
++	if (err < 0) {
++		grp->exclusive = 0;
+ 		goto __error;
++	}
+ 
+ 	/* add to list */
+-	write_lock_irqsave(&src->list_lock, flags);
+-	// write_lock(&dest->list_lock); // no other lock yet
+-	list_add_tail(&subs->src_list, &src->list_head);
+-	list_add_tail(&subs->dest_list, &dest->list_head);
+-	// write_unlock(&dest->list_lock); // no other lock yet
+-	write_unlock_irqrestore(&src->list_lock, flags);
++	write_lock_irq(&grp->list_lock);
++	if (is_src)
++		list_add_tail(&subs->src_list, &grp->list_head);
++	else
++		list_add_tail(&subs->dest_list, &grp->list_head);
++	grp->exclusive = exclusive;
++	atomic_inc(&subs->ref_count);
++	write_unlock_irq(&grp->list_lock);
++	err = 0;
++
++ __error:
++	up_write(&grp->list_mutex);
++	return err;
++}
+ 
+-	src->exclusive = dest->exclusive = exclusive;
++static void delete_and_unsubscribe_port(struct snd_seq_client *client,
++					struct snd_seq_client_port *port,
++					struct snd_seq_subscribers *subs,
++					bool is_src, bool ack)
++{
++	struct snd_seq_port_subs_info *grp;
++
++	grp = is_src ? &port->c_src : &port->c_dest;
++	down_write(&grp->list_mutex);
++	write_lock_irq(&grp->list_lock);
++	if (is_src)
++		list_del(&subs->src_list);
++	else
++		list_del(&subs->dest_list);
++	grp->exclusive = 0;
++	write_unlock_irq(&grp->list_lock);
++	up_write(&grp->list_mutex);
++
++	unsubscribe_port(client, port, grp, &subs->info, ack);
++}
++
++/* connect two ports */
++int snd_seq_port_connect(struct snd_seq_client *connector,
++			 struct snd_seq_client *src_client,
++			 struct snd_seq_client_port *src_port,
++			 struct snd_seq_client *dest_client,
++			 struct snd_seq_client_port *dest_port,
++			 struct snd_seq_port_subscribe *info)
++{
++	struct snd_seq_subscribers *subs;
++	bool exclusive;
++	int err;
++
++	subs = kzalloc(sizeof(*subs), GFP_KERNEL);
++	if (!subs)
++		return -ENOMEM;
++
++	subs->info = *info;
++	atomic_set(&subs->ref_count, 0);
++	INIT_LIST_HEAD(&subs->src_list);
++	INIT_LIST_HEAD(&subs->dest_list);
++
++	exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
++
++	err = check_and_subscribe_port(src_client, src_port, subs, true,
++				       exclusive,
++				       connector->number != src_client->number);
++	if (err < 0)
++		goto error;
++	err = check_and_subscribe_port(dest_client, dest_port, subs, false,
++				       exclusive,
++				       connector->number != dest_client->number);
++	if (err < 0)
++		goto error_dest;
+ 
+-	up_write(&dest->list_mutex);
+-	up_write(&src->list_mutex);
+ 	return 0;
+ 
+- __error:
+-	if (src_called)
+-		unsubscribe_port(src_client, src_port, src, info,
+-				 connector->number != src_client->number);
++ error_dest:
++	delete_and_unsubscribe_port(src_client, src_port, subs, true,
++				    connector->number != src_client->number);
++ error:
+ 	kfree(subs);
+-	up_write(&dest->list_mutex);
+-	up_write(&src->list_mutex);
+ 	return err;
+ }
+ 
+-
+ /* remove the connection */
+ int snd_seq_port_disconnect(struct snd_seq_client *connector,
+ 			    struct snd_seq_client *src_client,
+@@ -567,37 +603,28 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
+ 			    struct snd_seq_port_subscribe *info)
+ {
+ 	struct snd_seq_port_subs_info *src = &src_port->c_src;
+-	struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
+ 	struct snd_seq_subscribers *subs;
+ 	int err = -ENOENT;
+-	unsigned long flags;
+ 
+ 	down_write(&src->list_mutex);
+-	down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
+-
+ 	/* look for the connection */
+ 	list_for_each_entry(subs, &src->list_head, src_list) {
+ 		if (match_subs_info(info, &subs->info)) {
+-			write_lock_irqsave(&src->list_lock, flags);
+-			// write_lock(&dest->list_lock);  // no lock yet
+-			list_del(&subs->src_list);
+-			list_del(&subs->dest_list);
+-			// write_unlock(&dest->list_lock);
+-			write_unlock_irqrestore(&src->list_lock, flags);
+-			src->exclusive = dest->exclusive = 0;
+-			unsubscribe_port(src_client, src_port, src, info,
+-					 connector->number != src_client->number);
+-			unsubscribe_port(dest_client, dest_port, dest, info,
+-					 connector->number != dest_client->number);
+-			kfree(subs);
++			atomic_dec(&subs->ref_count); /* mark as not ready */
+ 			err = 0;
+ 			break;
+ 		}
+ 	}
+-
+-	up_write(&dest->list_mutex);
+ 	up_write(&src->list_mutex);
+-	return err;
++	if (err < 0)
++		return err;
++
++	delete_and_unsubscribe_port(src_client, src_port, subs, true,
++				    connector->number != src_client->number);
++	delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
++				    connector->number != dest_client->number);
++	kfree(subs);
++	return 0;
+ }
+ 
+ 
+diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
+index 186f1611103c..a2468f1101d1 100644
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -90,6 +90,9 @@ void snd_seq_timer_delete(struct snd_seq_timer **tmr)
+ 
+ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
+ {
++	unsigned long flags;
++
++	spin_lock_irqsave(&tmr->lock, flags);
+ 	/* setup defaults */
+ 	tmr->ppq = 96;		/* 96 PPQ */
+ 	tmr->tempo = 500000;	/* 120 BPM */
+@@ -105,21 +108,25 @@ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
+ 	tmr->preferred_resolution = seq_default_timer_resolution;
+ 
+ 	tmr->skew = tmr->skew_base = SKEW_BASE;
++	spin_unlock_irqrestore(&tmr->lock, flags);
+ }
+ 
+-void snd_seq_timer_reset(struct snd_seq_timer * tmr)
++static void seq_timer_reset(struct snd_seq_timer *tmr)
+ {
+-	unsigned long flags;
+-
+-	spin_lock_irqsave(&tmr->lock, flags);
+-
+ 	/* reset time & songposition */
+ 	tmr->cur_time.tv_sec = 0;
+ 	tmr->cur_time.tv_nsec = 0;
+ 
+ 	tmr->tick.cur_tick = 0;
+ 	tmr->tick.fraction = 0;
++}
++
++void snd_seq_timer_reset(struct snd_seq_timer *tmr)
++{
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&tmr->lock, flags);
++	seq_timer_reset(tmr);
+ 	spin_unlock_irqrestore(&tmr->lock, flags);
+ }
+ 
+@@ -138,8 +145,11 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
+ 	tmr = q->timer;
+ 	if (tmr == NULL)
+ 		return;
+-	if (!tmr->running)
++	spin_lock_irqsave(&tmr->lock, flags);
++	if (!tmr->running) {
++		spin_unlock_irqrestore(&tmr->lock, flags);
+ 		return;
++	}
+ 
+ 	resolution *= ticks;
+ 	if (tmr->skew != tmr->skew_base) {
+@@ -148,8 +158,6 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
+ 			(((resolution & 0xffff) * tmr->skew) >> 16);
+ 	}
+ 
+-	spin_lock_irqsave(&tmr->lock, flags);
+-
+ 	/* update timer */
+ 	snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
+ 
+@@ -296,26 +304,30 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
+ 	t->callback = snd_seq_timer_interrupt;
+ 	t->callback_data = q;
+ 	t->flags |= SNDRV_TIMER_IFLG_AUTO;
++	spin_lock_irq(&tmr->lock);
+ 	tmr->timeri = t;
++	spin_unlock_irq(&tmr->lock);
+ 	return 0;
+ }
+ 
+ int snd_seq_timer_close(struct snd_seq_queue *q)
+ {
+ 	struct snd_seq_timer *tmr;
++	struct snd_timer_instance *t;
+ 	
+ 	tmr = q->timer;
+ 	if (snd_BUG_ON(!tmr))
+ 		return -EINVAL;
+-	if (tmr->timeri) {
+-		snd_timer_stop(tmr->timeri);
+-		snd_timer_close(tmr->timeri);
+-		tmr->timeri = NULL;
+-	}
++	spin_lock_irq(&tmr->lock);
++	t = tmr->timeri;
++	tmr->timeri = NULL;
++	spin_unlock_irq(&tmr->lock);
++	if (t)
++		snd_timer_close(t);
+ 	return 0;
+ }
+ 
+-int snd_seq_timer_stop(struct snd_seq_timer * tmr)
++static int seq_timer_stop(struct snd_seq_timer *tmr)
+ {
+ 	if (! tmr->timeri)
+ 		return -EINVAL;
+@@ -326,6 +338,17 @@ int snd_seq_timer_stop(struct snd_seq_timer * tmr)
+ 	return 0;
+ }
+ 
++int snd_seq_timer_stop(struct snd_seq_timer *tmr)
++{
++	unsigned long flags;
++	int err;
++
++	spin_lock_irqsave(&tmr->lock, flags);
++	err = seq_timer_stop(tmr);
++	spin_unlock_irqrestore(&tmr->lock, flags);
++	return err;
++}
++
+ static int initialize_timer(struct snd_seq_timer *tmr)
+ {
+ 	struct snd_timer *t;
+@@ -358,13 +381,13 @@ static int initialize_timer(struct snd_seq_timer *tmr)
+ 	return 0;
+ }
+ 
+-int snd_seq_timer_start(struct snd_seq_timer * tmr)
++static int seq_timer_start(struct snd_seq_timer *tmr)
+ {
+ 	if (! tmr->timeri)
+ 		return -EINVAL;
+ 	if (tmr->running)
+-		snd_seq_timer_stop(tmr);
+-	snd_seq_timer_reset(tmr);
++		seq_timer_stop(tmr);
++	seq_timer_reset(tmr);
+ 	if (initialize_timer(tmr) < 0)
+ 		return -EINVAL;
+ 	snd_timer_start(tmr->timeri, tmr->ticks);
+@@ -373,14 +396,25 @@ int snd_seq_timer_start(struct snd_seq_timer * tmr)
+ 	return 0;
+ }
+ 
+-int snd_seq_timer_continue(struct snd_seq_timer * tmr)
++int snd_seq_timer_start(struct snd_seq_timer *tmr)
++{
++	unsigned long flags;
++	int err;
++
++	spin_lock_irqsave(&tmr->lock, flags);
++	err = seq_timer_start(tmr);
++	spin_unlock_irqrestore(&tmr->lock, flags);
++	return err;
++}
++
++static int seq_timer_continue(struct snd_seq_timer *tmr)
+ {
+ 	if (! tmr->timeri)
+ 		return -EINVAL;
+ 	if (tmr->running)
+ 		return -EBUSY;
+ 	if (! tmr->initialized) {
+-		snd_seq_timer_reset(tmr);
++		seq_timer_reset(tmr);
+ 		if (initialize_timer(tmr) < 0)
+ 			return -EINVAL;
+ 	}
+@@ -390,11 +424,24 @@ int snd_seq_timer_continue(struct snd_seq_timer * tmr)
+ 	return 0;
+ }
+ 
++int snd_seq_timer_continue(struct snd_seq_timer *tmr)
++{
++	unsigned long flags;
++	int err;
++
++	spin_lock_irqsave(&tmr->lock, flags);
++	err = seq_timer_continue(tmr);
++	spin_unlock_irqrestore(&tmr->lock, flags);
++	return err;
++}
++
+ /* return current 'real' time. use timeofday() to get better granularity. */
+ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+ {
+ 	snd_seq_real_time_t cur_time;
++	unsigned long flags;
+ 
++	spin_lock_irqsave(&tmr->lock, flags);
+ 	cur_time = tmr->cur_time;
+ 	if (tmr->running) { 
+ 		struct timeval tm;
+@@ -410,7 +457,7 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+ 		}
+ 		snd_seq_sanity_real_time(&cur_time);
+ 	}
+-                
++	spin_unlock_irqrestore(&tmr->lock, flags);
+ 	return cur_time;	
+ }
+ 
+diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
+index 56e0f4cd3f82..81134e067184 100644
+--- a/sound/core/seq/seq_virmidi.c
++++ b/sound/core/seq/seq_virmidi.c
+@@ -155,21 +155,26 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
+ 	struct snd_virmidi *vmidi = substream->runtime->private_data;
+ 	int count, res;
+ 	unsigned char buf[32], *pbuf;
++	unsigned long flags;
+ 
+ 	if (up) {
+ 		vmidi->trigger = 1;
+ 		if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
+ 		    !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
+-			snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
+-			return;		/* ignored */
++			while (snd_rawmidi_transmit(substream, buf,
++						    sizeof(buf)) > 0) {
++				/* ignored */
++			}
++			return;
+ 		}
+ 		if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
+ 			if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
+ 				return;
+ 			vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
+ 		}
++		spin_lock_irqsave(&substream->runtime->lock, flags);
+ 		while (1) {
+-			count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
++			count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
+ 			if (count <= 0)
+ 				break;
+ 			pbuf = buf;
+@@ -179,16 +184,18 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
+ 					snd_midi_event_reset_encode(vmidi->parser);
+ 					continue;
+ 				}
+-				snd_rawmidi_transmit_ack(substream, res);
++				__snd_rawmidi_transmit_ack(substream, res);
+ 				pbuf += res;
+ 				count -= res;
+ 				if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
+ 					if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
+-						return;
++						goto out;
+ 					vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
+ 				}
+ 			}
+ 		}
++	out:
++		spin_unlock_irqrestore(&substream->runtime->lock, flags);
+ 	} else {
+ 		vmidi->trigger = 0;
+ 	}
+@@ -254,9 +261,13 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
+  */
+ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
+ {
++	struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
+ 	struct snd_virmidi *vmidi = substream->runtime->private_data;
+-	snd_midi_event_free(vmidi->parser);
++
++	write_lock_irq(&rdev->filelist_lock);
+ 	list_del(&vmidi->list);
++	write_unlock_irq(&rdev->filelist_lock);
++	snd_midi_event_free(vmidi->parser);
+ 	substream->runtime->private_data = NULL;
+ 	kfree(vmidi);
+ 	return 0;
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index a419878901c4..00e8c5f4de17 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -305,8 +305,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ 	return 0;
+ }
+ 
+-static int _snd_timer_stop(struct snd_timer_instance *timeri,
+-			   int keep_flag, int event);
++static int _snd_timer_stop(struct snd_timer_instance *timeri, int event);
+ 
+ /*
+  * close a timer instance
+@@ -348,7 +347,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
+ 		spin_unlock_irq(&timer->lock);
+ 		mutex_lock(&register_mutex);
+ 		list_del(&timeri->open_list);
+-		if (timer && list_empty(&timer->open_list_head) &&
++		if (list_empty(&timer->open_list_head) &&
+ 		    timer->hw.close)
+ 			timer->hw.close(timer);
+ 		/* remove slave links */
+@@ -452,6 +451,10 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
+ 	unsigned long flags;
+ 
+ 	spin_lock_irqsave(&slave_active_lock, flags);
++	if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
++		spin_unlock_irqrestore(&slave_active_lock, flags);
++		return -EBUSY;
++	}
+ 	timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
+ 	if (timeri->master && timeri->timer) {
+ 		spin_lock(&timeri->timer->lock);
+@@ -476,7 +479,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+ 		return -EINVAL;
+ 	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
+ 		result = snd_timer_start_slave(timeri);
+-		snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
++		if (result >= 0)
++			snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ 		return result;
+ 	}
+ 	timer = timeri->timer;
+@@ -485,16 +489,22 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+ 	if (timer->card && timer->card->shutdown)
+ 		return -ENODEV;
+ 	spin_lock_irqsave(&timer->lock, flags);
++	if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
++			     SNDRV_TIMER_IFLG_START)) {
++		result = -EBUSY;
++		goto unlock;
++	}
+ 	timeri->ticks = timeri->cticks = ticks;
+ 	timeri->pticks = 0;
+ 	result = snd_timer_start1(timer, timeri, ticks);
++ unlock:
+ 	spin_unlock_irqrestore(&timer->lock, flags);
+-	snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
++	if (result >= 0)
++		snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+ 	return result;
+ }
+ 
+-static int _snd_timer_stop(struct snd_timer_instance * timeri,
+-			   int keep_flag, int event)
++static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
+ {
+ 	struct snd_timer *timer;
+ 	unsigned long flags;
+@@ -503,19 +513,26 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
+ 		return -ENXIO;
+ 
+ 	if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
+-		if (!keep_flag) {
+-			spin_lock_irqsave(&slave_active_lock, flags);
+-			timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+-			list_del_init(&timeri->ack_list);
+-			list_del_init(&timeri->active_list);
++		spin_lock_irqsave(&slave_active_lock, flags);
++		if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
+ 			spin_unlock_irqrestore(&slave_active_lock, flags);
++			return -EBUSY;
+ 		}
++		timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
++		list_del_init(&timeri->ack_list);
++		list_del_init(&timeri->active_list);
++		spin_unlock_irqrestore(&slave_active_lock, flags);
+ 		goto __end;
+ 	}
+ 	timer = timeri->timer;
+ 	if (!timer)
+ 		return -EINVAL;
+ 	spin_lock_irqsave(&timer->lock, flags);
++	if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
++			       SNDRV_TIMER_IFLG_START))) {
++		spin_unlock_irqrestore(&timer->lock, flags);
++		return -EBUSY;
++	}
+ 	list_del_init(&timeri->ack_list);
+ 	list_del_init(&timeri->active_list);
+ 	if (timer->card && timer->card->shutdown) {
+@@ -534,9 +551,7 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
+ 			}
+ 		}
+ 	}
+-	if (!keep_flag)
+-		timeri->flags &=
+-			~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
++	timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+ 	spin_unlock_irqrestore(&timer->lock, flags);
+       __end:
+ 	if (event != SNDRV_TIMER_EVENT_RESOLUTION)
+@@ -555,7 +570,7 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
+ 	unsigned long flags;
+ 	int err;
+ 
+-	err = _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_STOP);
++	err = _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_STOP);
+ 	if (err < 0)
+ 		return err;
+ 	timer = timeri->timer;
+@@ -587,10 +602,15 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
+ 	if (timer->card && timer->card->shutdown)
+ 		return -ENODEV;
+ 	spin_lock_irqsave(&timer->lock, flags);
++	if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
++		result = -EBUSY;
++		goto unlock;
++	}
+ 	if (!timeri->cticks)
+ 		timeri->cticks = 1;
+ 	timeri->pticks = 0;
+ 	result = snd_timer_start1(timer, timeri, timer->sticks);
++ unlock:
+ 	spin_unlock_irqrestore(&timer->lock, flags);
+ 	snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
+ 	return result;
+@@ -601,7 +621,7 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
+  */
+ int snd_timer_pause(struct snd_timer_instance * timeri)
+ {
+-	return _snd_timer_stop(timeri, 0, SNDRV_TIMER_EVENT_PAUSE);
++	return _snd_timer_stop(timeri, SNDRV_TIMER_EVENT_PAUSE);
+ }
+ 
+ /*
+@@ -724,8 +744,8 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
+ 			ti->cticks = ti->ticks;
+ 		} else {
+ 			ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+-			if (--timer->running)
+-				list_del_init(&ti->active_list);
++			--timer->running;
++			list_del_init(&ti->active_list);
+ 		}
+ 		if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
+ 		    (ti->flags & SNDRV_TIMER_IFLG_FAST))
+diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
+index d11baaf0f0b4..96592d5ba7bf 100644
+--- a/sound/drivers/dummy.c
++++ b/sound/drivers/dummy.c
+@@ -87,7 +87,7 @@ MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver.");
+ module_param(fake_buffer, bool, 0444);
+ MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
+ #ifdef CONFIG_HIGH_RES_TIMERS
+-module_param(hrtimer, bool, 0644);
++module_param(hrtimer, bool, 0444);
+ MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
+ #endif
+ 
+diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
+index 98e4fc8121a1..5e547cb199f0 100644
+--- a/sound/firewire/bebob/bebob_stream.c
++++ b/sound/firewire/bebob/bebob_stream.c
+@@ -47,14 +47,16 @@ static const unsigned int bridgeco_freq_table[] = {
+ 	[6] = 0x07,
+ };
+ 
+-static unsigned int
+-get_formation_index(unsigned int rate)
++static int
++get_formation_index(unsigned int rate, unsigned int *index)
+ {
+ 	unsigned int i;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) {
+-		if (snd_bebob_rate_table[i] == rate)
+-			return i;
++		if (snd_bebob_rate_table[i] == rate) {
++			*index = i;
++			return 0;
++		}
+ 	}
+ 	return -EINVAL;
+ }
+@@ -367,7 +369,9 @@ make_both_connections(struct snd_bebob *bebob, unsigned int rate)
+ 		goto end;
+ 
+ 	/* confirm params for both streams */
+-	index = get_formation_index(rate);
++	err = get_formation_index(rate, &index);
++	if (err < 0)
++		goto end;
+ 	pcm_channels = bebob->tx_stream_formations[index].pcm;
+ 	midi_channels = bebob->tx_stream_formations[index].midi;
+ 	amdtp_stream_set_parameters(&bebob->tx_stream,
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index b791529bf31c..8f50a257a80d 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -614,6 +614,7 @@ enum {
+ 	CS4208_MAC_AUTO,
+ 	CS4208_MBA6,
+ 	CS4208_MBP11,
++	CS4208_MACMINI,
+ 	CS4208_GPIO0,
+ };
+ 
+@@ -621,6 +622,7 @@ static const struct hda_model_fixup cs4208_models[] = {
+ 	{ .id = CS4208_GPIO0, .name = "gpio0" },
+ 	{ .id = CS4208_MBA6, .name = "mba6" },
+ 	{ .id = CS4208_MBP11, .name = "mbp11" },
++	{ .id = CS4208_MACMINI, .name = "macmini" },
+ 	{}
+ };
+ 
+@@ -632,6 +634,7 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
+ /* codec SSID matching */
+ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
++	SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
+ 	SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+ 	SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
+ 	SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
+@@ -666,6 +669,24 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
+ 	snd_hda_apply_fixup(codec, action);
+ }
+ 
++/* MacMini 7,1 has the inverted jack detection */
++static void cs4208_fixup_macmini(struct hda_codec *codec,
++				 const struct hda_fixup *fix, int action)
++{
++	static const struct hda_pintbl pincfgs[] = {
++		{ 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */
++		{ 0x21, 0x004be140 }, /* SPDIF: disable detect */
++		{ }
++	};
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		/* HP pin (0x10) has an inverted detection */
++		codec->inv_jack_detect = 1;
++		/* disable the bogus Mic and SPDIF jack detections */
++		snd_hda_apply_pincfgs(codec, pincfgs);
++	}
++}
++
+ static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
+ 			       struct snd_ctl_elem_value *ucontrol)
+ {
+@@ -709,6 +730,12 @@ static const struct hda_fixup cs4208_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = CS4208_GPIO0,
+ 	},
++	[CS4208_MACMINI] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = cs4208_fixup_macmini,
++		.chained = true,
++		.chain_id = CS4208_GPIO0,
++	},
+ 	[CS4208_GPIO0] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = cs4208_fixup_gpio0,
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index fb9a8a5787a6..37d8ababfc04 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1118,6 +1118,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	switch (chip->usb_id) {
+ 	case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
+ 	case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
++	case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
+ 	case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+ 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+@@ -1202,8 +1203,12 @@ void snd_usb_set_interface_quirk(struct usb_device *dev)
+ 	 * "Playback Design" products need a 50ms delay after setting the
+ 	 * USB interface.
+ 	 */
+-	if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba)
++	switch (le16_to_cpu(dev->descriptor.idVendor)) {
++	case 0x23ba: /* Playback Design */
++	case 0x0644: /* TEAC Corp. */
+ 		mdelay(50);
++		break;
++	}
+ }
+ 
+ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+@@ -1218,6 +1223,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
+ 	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+ 		mdelay(20);
+ 
++	/*
++	 * "TEAC Corp." products need a 20ms delay after each
++	 * class compliant request
++	 */
++	if ((le16_to_cpu(dev->descriptor.idVendor) == 0x0644) &&
++	    (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
++		mdelay(20);
++
+ 	/* Marantz/Denon devices with USB DAC functionality need a delay
+ 	 * after each class compliant request
+ 	 */
+@@ -1266,7 +1279,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 	case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
+ 	case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
+ 	case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
+-	case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
++	case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
+ 		if (fp->altsetting == 2)
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 		break;
+@@ -1275,6 +1288,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 	case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
+ 	case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
+ 	case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
++	case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
+ 		if (fp->altsetting == 3)
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 		break;
+diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
+index ed5461f065bd..f64a2d54d467 100644
+--- a/tools/lib/traceevent/event-parse.c
++++ b/tools/lib/traceevent/event-parse.c
+@@ -4841,13 +4841,12 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
+ 				    sizeof(long) != 8) {
+ 					char *p;
+ 
+-					ls = 2;
+ 					/* make %l into %ll */
+-					p = strchr(format, 'l');
+-					if (p)
++					if (ls == 1 && (p = strchr(format, 'l')))
+ 						memmove(p+1, p, strlen(p)+1);
+ 					else if (strcmp(format, "%p") == 0)
+ 						strcpy(format, "0x%llx");
++					ls = 2;
+ 				}
+ 				switch (ls) {
+ 				case -2:
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 0c74012575ac..83054ef6c1a1 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -816,7 +816,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
+ 
+ 		machine = machines__find(machines, pid);
+ 		if (!machine)
+-			machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
++			machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
+ 		return machine;
+ 	}
+ 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-01-31 23:29 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-01-31 23:29 UTC (permalink / raw
  To: gentoo-commits

commit:     8e7f5205bb1197eb27e5adb6cd52da00a9452499
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jan 31 23:29:43 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jan 31 23:29:43 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8e7f5205

Linux patch 4.1.17

 0000_README             |    4 +
 1016_linux-4.1.17.patch | 4830 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4834 insertions(+)

diff --git a/0000_README b/0000_README
index 1ca97cd..8b9fa0f 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,10 @@ Patch:  1015_linux-4.1.16.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.16
 
+Patch:  1016_linux-4.1.17.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.17
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1016_linux-4.1.17.patch b/1016_linux-4.1.17.patch
new file mode 100644
index 0000000..214c6ac
--- /dev/null
+++ b/1016_linux-4.1.17.patch
@@ -0,0 +1,4830 @@
+diff --git a/Makefile b/Makefile
+index 7609f1dcdcb9..d398dd440bc9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 16
++SUBLEVEL = 17
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 191dcfab9f60..da09ddcfcc00 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
+ 	__kvm_flush_dcache_pud(pud);
+ }
+ 
++static bool kvm_is_device_pfn(unsigned long pfn)
++{
++	return !pfn_valid(pfn);
++}
++
+ /**
+  * stage2_dissolve_pmd() - clear and flush huge PMD entry
+  * @kvm:	pointer to kvm structure.
+@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
+ 			kvm_tlb_flush_vmid_ipa(kvm, addr);
+ 
+ 			/* No need to invalidate the cache for device mappings */
+-			if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
++			if (!kvm_is_device_pfn(pte_pfn(old_pte)))
+ 				kvm_flush_dcache_pte(old_pte);
+ 
+ 			put_page(virt_to_page(pte));
+@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
+ 
+ 	pte = pte_offset_kernel(pmd, addr);
+ 	do {
+-		if (!pte_none(*pte) &&
+-		    (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
++		if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
+ 			kvm_flush_dcache_pte(*pte);
+ 	} while (pte++, addr += PAGE_SIZE, addr != end);
+ }
+@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
+ 	return kvm_vcpu_dabt_iswrite(vcpu);
+ }
+ 
+-static bool kvm_is_device_pfn(unsigned long pfn)
+-{
+-	return !pfn_valid(pfn);
+-}
+-
+ /**
+  * stage2_wp_ptes - write protect PMD range
+  * @pmd:	pointer to pmd entry
+diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
+index e0e23582c8b4..5fe949b084ac 100644
+--- a/arch/arm/net/bpf_jit_32.c
++++ b/arch/arm/net/bpf_jit_32.c
+@@ -162,19 +162,6 @@ static inline int mem_words_used(struct jit_ctx *ctx)
+ 	return fls(ctx->seen & SEEN_MEM);
+ }
+ 
+-static inline bool is_load_to_a(u16 inst)
+-{
+-	switch (inst) {
+-	case BPF_LD | BPF_W | BPF_LEN:
+-	case BPF_LD | BPF_W | BPF_ABS:
+-	case BPF_LD | BPF_H | BPF_ABS:
+-	case BPF_LD | BPF_B | BPF_ABS:
+-		return true;
+-	default:
+-		return false;
+-	}
+-}
+-
+ static void jit_fill_hole(void *area, unsigned int size)
+ {
+ 	u32 *ptr;
+@@ -186,7 +173,6 @@ static void jit_fill_hole(void *area, unsigned int size)
+ static void build_prologue(struct jit_ctx *ctx)
+ {
+ 	u16 reg_set = saved_regs(ctx);
+-	u16 first_inst = ctx->skf->insns[0].code;
+ 	u16 off;
+ 
+ #ifdef CONFIG_FRAME_POINTER
+@@ -216,7 +202,7 @@ static void build_prologue(struct jit_ctx *ctx)
+ 		emit(ARM_MOV_I(r_X, 0), ctx);
+ 
+ 	/* do not leak kernel data to userspace */
+-	if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
++	if (bpf_needs_clear_a(&ctx->skf->insns[0]))
+ 		emit(ARM_MOV_I(r_A, 0), ctx);
+ 
+ 	/* stack space for the BPF_MEM words */
+diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
+index 17e92f05b1fe..3ca894ecf699 100644
+--- a/arch/arm64/include/asm/kvm_emulate.h
++++ b/arch/arm64/include/asm/kvm_emulate.h
+@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
+ 	*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
+ }
+ 
++/*
++ * vcpu_reg should always be passed a register number coming from a
++ * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
++ * with banked registers.
++ */
+ static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
+ {
+-	if (vcpu_mode_is_32bit(vcpu))
+-		return vcpu_reg32(vcpu, reg_num);
+-
+ 	return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
+ }
+ 
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index d882b833dbdb..608ac6aa497b 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -58,6 +58,12 @@
+  */
+ void ptrace_disable(struct task_struct *child)
+ {
++	/*
++	 * This would be better off in core code, but PTRACE_DETACH has
++	 * grown its fair share of arch-specific worts and changing it
++	 * is likely to cause regressions on obscure architectures.
++	 */
++	user_disable_single_step(child);
+ }
+ 
+ #ifdef CONFIG_HAVE_HW_BREAKPOINT
+diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
+index 74753132c3ac..bbdb53b87e13 100644
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -523,6 +523,10 @@ static int c_show(struct seq_file *m, void *v)
+ 		seq_printf(m, "processor\t: %d\n", i);
+ #endif
+ 
++		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
++			   loops_per_jiffy / (500000UL/HZ),
++			   loops_per_jiffy / (5000UL/HZ) % 100);
++
+ 		/*
+ 		 * Dump out the common processor features in a single line.
+ 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index 53f1f8dccf6c..357418137db7 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -1,3 +1,4 @@
++#include <linux/ftrace.h>
+ #include <linux/percpu.h>
+ #include <linux/slab.h>
+ #include <asm/cacheflush.h>
+@@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ 	local_dbg_save(flags);
+ 
+ 	/*
++	 * Function graph tracer state gets incosistent when the kernel
++	 * calls functions that never return (aka suspend finishers) hence
++	 * disable graph tracing during their execution.
++	 */
++	pause_graph_tracing();
++
++	/*
+ 	 * mm context saved on the stack, it will be restored when
+ 	 * the cpu comes out of reset through the identity mapped
+ 	 * page tables, so that the thread address space is properly
+@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ 			hw_breakpoint_restore(NULL);
+ 	}
+ 
++	unpause_graph_tracing();
++
+ 	/*
+ 	 * Restore pstate flags. OS lock and mdscr have been already
+ 	 * restored, so from this point onwards, debugging is fully
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index 85c57158dcd9..648112e90ed5 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
+ 
+ 	/* Note: These now point to the banked copies */
+ 	*vcpu_spsr(vcpu) = new_spsr_value;
+-	*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
++	*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+ 
+ 	/* Branch to exception vector */
+ 	if (sctlr & (1 << 13))
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 5b8b664422d3..cb34eb8bbb9d 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -450,6 +450,9 @@ void __init paging_init(void)
+ 
+ 	empty_zero_page = virt_to_page(zero_page);
+ 
++	/* Ensure the zero page is visible to the page table walker */
++	dsb(ishst);
++
+ 	/*
+ 	 * TTBR0 is only used for the identity mapping at this stage. Make it
+ 	 * point to zero page to avoid speculatively fetching new entries.
+diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
+index 98a26ce82d26..aee5637ea436 100644
+--- a/arch/arm64/net/bpf_jit.h
++++ b/arch/arm64/net/bpf_jit.h
+@@ -1,7 +1,7 @@
+ /*
+  * BPF JIT compiler for ARM64
+  *
+- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
++ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -35,6 +35,7 @@
+ 	aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
+ 		AARCH64_INSN_BRANCH_COMP_##type)
+ #define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
++#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
+ 
+ /* Conditional branch (immediate) */
+ #define A64_COND_BRANCH(cond, offset) \
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index c047598b09e0..6217f80702d2 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -1,7 +1,7 @@
+ /*
+  * BPF JIT compiler for ARM64
+  *
+- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
++ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of the GNU General Public License version 2 as
+@@ -225,6 +225,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
+ 	u8 jmp_cond;
+ 	s32 jmp_offset;
+ 
++#define check_imm(bits, imm) do {				\
++	if ((((imm) > 0) && ((imm) >> (bits))) ||		\
++	    (((imm) < 0) && (~(imm) >> (bits)))) {		\
++		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
++			i, imm, imm);				\
++		return -EINVAL;					\
++	}							\
++} while (0)
++#define check_imm19(imm) check_imm(19, imm)
++#define check_imm26(imm) check_imm(26, imm)
++
+ 	switch (code) {
+ 	/* dst = src */
+ 	case BPF_ALU | BPF_MOV | BPF_X:
+@@ -258,15 +269,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
+ 		break;
+ 	case BPF_ALU | BPF_DIV | BPF_X:
+ 	case BPF_ALU64 | BPF_DIV | BPF_X:
+-		emit(A64_UDIV(is64, dst, dst, src), ctx);
+-		break;
+ 	case BPF_ALU | BPF_MOD | BPF_X:
+ 	case BPF_ALU64 | BPF_MOD | BPF_X:
+-		ctx->tmp_used = 1;
+-		emit(A64_UDIV(is64, tmp, dst, src), ctx);
+-		emit(A64_MUL(is64, tmp, tmp, src), ctx);
+-		emit(A64_SUB(is64, dst, dst, tmp), ctx);
++	{
++		const u8 r0 = bpf2a64[BPF_REG_0];
++
++		/* if (src == 0) return 0 */
++		jmp_offset = 3; /* skip ahead to else path */
++		check_imm19(jmp_offset);
++		emit(A64_CBNZ(is64, src, jmp_offset), ctx);
++		emit(A64_MOVZ(1, r0, 0, 0), ctx);
++		jmp_offset = epilogue_offset(ctx);
++		check_imm26(jmp_offset);
++		emit(A64_B(jmp_offset), ctx);
++		/* else */
++		switch (BPF_OP(code)) {
++		case BPF_DIV:
++			emit(A64_UDIV(is64, dst, dst, src), ctx);
++			break;
++		case BPF_MOD:
++			ctx->tmp_used = 1;
++			emit(A64_UDIV(is64, tmp, dst, src), ctx);
++			emit(A64_MUL(is64, tmp, tmp, src), ctx);
++			emit(A64_SUB(is64, dst, dst, tmp), ctx);
++			break;
++		}
+ 		break;
++	}
+ 	case BPF_ALU | BPF_LSH | BPF_X:
+ 	case BPF_ALU64 | BPF_LSH | BPF_X:
+ 		emit(A64_LSLV(is64, dst, dst, src), ctx);
+@@ -393,17 +422,6 @@ emit_bswap_uxt:
+ 		emit(A64_ASR(is64, dst, dst, imm), ctx);
+ 		break;
+ 
+-#define check_imm(bits, imm) do {				\
+-	if ((((imm) > 0) && ((imm) >> (bits))) ||		\
+-	    (((imm) < 0) && (~(imm) >> (bits)))) {		\
+-		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
+-			i, imm, imm);				\
+-		return -EINVAL;					\
+-	}							\
+-} while (0)
+-#define check_imm19(imm) check_imm(19, imm)
+-#define check_imm26(imm) check_imm(26, imm)
+-
+ 	/* JUMP off */
+ 	case BPF_JMP | BPF_JA:
+ 		jmp_offset = bpf2a64_offset(i + off, i, ctx);
+diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
+index e23fdf2a9c80..d6d27d51d131 100644
+--- a/arch/mips/net/bpf_jit.c
++++ b/arch/mips/net/bpf_jit.c
+@@ -556,19 +556,6 @@ static inline u16 align_sp(unsigned int num)
+ 	return num;
+ }
+ 
+-static bool is_load_to_a(u16 inst)
+-{
+-	switch (inst) {
+-	case BPF_LD | BPF_W | BPF_LEN:
+-	case BPF_LD | BPF_W | BPF_ABS:
+-	case BPF_LD | BPF_H | BPF_ABS:
+-	case BPF_LD | BPF_B | BPF_ABS:
+-		return true;
+-	default:
+-		return false;
+-	}
+-}
+-
+ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
+ {
+ 	int i = 0, real_off = 0;
+@@ -686,7 +673,6 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx)
+ 
+ static void build_prologue(struct jit_ctx *ctx)
+ {
+-	u16 first_inst = ctx->skf->insns[0].code;
+ 	int sp_off;
+ 
+ 	/* Calculate the total offset for the stack pointer */
+@@ -700,7 +686,7 @@ static void build_prologue(struct jit_ctx *ctx)
+ 		emit_jit_reg_move(r_X, r_zero, ctx);
+ 
+ 	/* Do not leak kernel data to userspace */
+-	if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
++	if (bpf_needs_clear_a(&ctx->skf->insns[0]))
+ 		emit_jit_reg_move(r_A, r_zero, ctx);
+ }
+ 
+diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
+index 4434b54e1d87..78ae5552fdb8 100644
+--- a/arch/mn10300/Kconfig
++++ b/arch/mn10300/Kconfig
+@@ -1,6 +1,7 @@
+ config MN10300
+ 	def_bool y
+ 	select HAVE_OPROFILE
++	select HAVE_UID16
+ 	select GENERIC_IRQ_SHOW
+ 	select ARCH_WANT_IPC_PARSE_VERSION
+ 	select HAVE_ARCH_TRACEHOOK
+@@ -37,9 +38,6 @@ config HIGHMEM
+ config NUMA
+ 	def_bool n
+ 
+-config UID16
+-	def_bool y
+-
+ config RWSEM_GENERIC_SPINLOCK
+ 	def_bool y
+ 
+diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
+index d463c68fe7f0..99897f6645c1 100644
+--- a/arch/powerpc/include/asm/cmpxchg.h
++++ b/arch/powerpc/include/asm/cmpxchg.h
+@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
+ 	unsigned long prev;
+ 
+ 	__asm__ __volatile__(
+-	PPC_RELEASE_BARRIER
++	PPC_ATOMIC_ENTRY_BARRIER
+ "1:	lwarx	%0,0,%2 \n"
+ 	PPC405_ERR77(0,%2)
+ "	stwcx.	%3,0,%2 \n\
+ 	bne-	1b"
+-	PPC_ACQUIRE_BARRIER
++	PPC_ATOMIC_EXIT_BARRIER
+ 	: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
+ 	: "r" (p), "r" (val)
+ 	: "cc", "memory");
+@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
+ 	unsigned long prev;
+ 
+ 	__asm__ __volatile__(
+-	PPC_RELEASE_BARRIER
++	PPC_ATOMIC_ENTRY_BARRIER
+ "1:	ldarx	%0,0,%2 \n"
+ 	PPC405_ERR77(0,%2)
+ "	stdcx.	%3,0,%2 \n\
+ 	bne-	1b"
+-	PPC_ACQUIRE_BARRIER
++	PPC_ATOMIC_EXIT_BARRIER
+ 	: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
+ 	: "r" (p), "r" (val)
+ 	: "cc", "memory");
+@@ -152,14 +152,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
+ 	unsigned int prev;
+ 
+ 	__asm__ __volatile__ (
+-	PPC_RELEASE_BARRIER
++	PPC_ATOMIC_ENTRY_BARRIER
+ "1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
+ 	cmpw	0,%0,%3\n\
+ 	bne-	2f\n"
+ 	PPC405_ERR77(0,%2)
+ "	stwcx.	%4,0,%2\n\
+ 	bne-	1b"
+-	PPC_ACQUIRE_BARRIER
++	PPC_ATOMIC_EXIT_BARRIER
+ 	"\n\
+ 2:"
+ 	: "=&r" (prev), "+m" (*p)
+@@ -198,13 +198,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
+ 	unsigned long prev;
+ 
+ 	__asm__ __volatile__ (
+-	PPC_RELEASE_BARRIER
++	PPC_ATOMIC_ENTRY_BARRIER
+ "1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
+ 	cmpd	0,%0,%3\n\
+ 	bne-	2f\n\
+ 	stdcx.	%4,0,%2\n\
+ 	bne-	1b"
+-	PPC_ACQUIRE_BARRIER
++	PPC_ATOMIC_EXIT_BARRIER
+ 	"\n\
+ 2:"
+ 	: "=&r" (prev), "+m" (*p)
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index af56b5c6c81a..f4f99f01b746 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -108,6 +108,7 @@
+ #define MSR_TS_T	__MASK(MSR_TS_T_LG)	/*  Transaction Transactional */
+ #define MSR_TS_MASK	(MSR_TS_T | MSR_TS_S)   /* Transaction State bits */
+ #define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
++#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
+ #define MSR_TM_TRANSACTIONAL(x)	(((x) & MSR_TS_MASK) == MSR_TS_T)
+ #define MSR_TM_SUSPENDED(x)	(((x) & MSR_TS_MASK) == MSR_TS_S)
+ 
+diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
+index e682a7143edb..c50868681f9e 100644
+--- a/arch/powerpc/include/asm/synch.h
++++ b/arch/powerpc/include/asm/synch.h
+@@ -44,7 +44,7 @@ static inline void isync(void)
+ 	MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
+ #define PPC_ACQUIRE_BARRIER	 "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
+ #define PPC_RELEASE_BARRIER	 stringify_in_c(LWSYNC) "\n"
+-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
++#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
+ #define PPC_ATOMIC_EXIT_BARRIER	 "\n" stringify_in_c(sync) "\n"
+ #else
+ #define PPC_ACQUIRE_BARRIER
+diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
+index 59dad113897b..c2d21d11c2d2 100644
+--- a/arch/powerpc/include/uapi/asm/elf.h
++++ b/arch/powerpc/include/uapi/asm/elf.h
+@@ -295,6 +295,8 @@ do {									\
+ #define R_PPC64_TLSLD		108
+ #define R_PPC64_TOCSAVE		109
+ 
++#define R_PPC64_ENTRY		118
++
+ #define R_PPC64_REL16		249
+ #define R_PPC64_REL16_LO	250
+ #define R_PPC64_REL16_HI	251
+diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
+index 68384514506b..59663af9315f 100644
+--- a/arch/powerpc/kernel/module_64.c
++++ b/arch/powerpc/kernel/module_64.c
+@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ 			 */
+ 			break;
+ 
++		case R_PPC64_ENTRY:
++			/*
++			 * Optimize ELFv2 large code model entry point if
++			 * the TOC is within 2GB range of current location.
++			 */
++			value = my_r2(sechdrs, me) - (unsigned long)location;
++			if (value + 0x80008000 > 0xffffffff)
++				break;
++			/*
++			 * Check for the large code model prolog sequence:
++		         *	ld r2, ...(r12)
++			 *	add r2, r2, r12
++			 */
++			if ((((uint32_t *)location)[0] & ~0xfffc)
++			    != 0xe84c0000)
++				break;
++			if (((uint32_t *)location)[1] != 0x7c426214)
++				break;
++			/*
++			 * If found, replace it with:
++			 *	addis r2, r12, (.TOC.-func)@ha
++			 *	addi r2, r12, (.TOC.-func)@l
++			 */
++			((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
++			((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
++			break;
++
+ 		case R_PPC64_REL16_HA:
+ 			/* Subtract location pointer */
+ 			value -= (unsigned long)location;
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index 0596373cd1c3..c8c8275765e7 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
+ 		msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
+ 	}
+ 
++	/*
++	 * Use the current MSR TM suspended bit to track if we have
++	 * checkpointed state outstanding.
++	 * On signal delivery, we'd normally reclaim the checkpointed
++	 * state to obtain stack pointer (see:get_tm_stackpointer()).
++	 * This will then directly return to userspace without going
++	 * through __switch_to(). However, if the stack frame is bad,
++	 * we need to exit this thread which calls __switch_to() which
++	 * will again attempt to reclaim the already saved tm state.
++	 * Hence we need to check that we've not already reclaimed
++	 * this state.
++	 * We do this using the current MSR, rather tracking it in
++	 * some specific thread_struct bit, as it has the additional
++	 * benifit of checking for a potential TM bad thing exception.
++	 */
++	if (!MSR_TM_SUSPENDED(mfmsr()))
++		return;
++
+ 	tm_reclaim(thr, thr->regs->msr, cause);
+ 
+ 	/* Having done the reclaim, we now have the checkpointed
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index da50e0c9c57e..7356c33dc897 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
+ 		return 1;
+ #endif /* CONFIG_SPE */
+ 
++	/* Get the top half of the MSR from the user context */
++	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
++		return 1;
++	msr_hi <<= 32;
++	/* If TM bits are set to the reserved value, it's an invalid context */
++	if (MSR_TM_RESV(msr_hi))
++		return 1;
++	/* Pull in the MSR TM bits from the user context */
++	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
+ 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
+ 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
+ 	 * transactional versions should be loaded.
+@@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
+ 	current->thread.tm_texasr |= TEXASR_FS;
+ 	/* This loads the checkpointed FP/VEC state, if used */
+ 	tm_recheckpoint(&current->thread, msr);
+-	/* Get the top half of the MSR */
+-	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
+-		return 1;
+-	/* Pull in MSR TM from user context */
+-	regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
+ 
+ 	/* This loads the speculative FP/VEC state, if used */
+ 	if (msr & MSR_FP) {
+diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
+index c7c24d2e2bdb..164fd6474843 100644
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -427,6 +427,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
+ 
+ 	/* get MSR separately, transfer the LE bit if doing signal return */
+ 	err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
++	/* Don't allow reserved mode. */
++	if (MSR_TM_RESV(msr))
++		return -EINVAL;
++
+ 	/* pull in MSR TM from user context */
+ 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index f1e0e5522e3a..f5b3de7f7fa2 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -210,6 +210,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
+ 
+ static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
+ {
++	/*
++	 * Check for illegal transactional state bit combination
++	 * and if we find it, force the TS field to a safe state.
++	 */
++	if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
++		msr &= ~MSR_TS_MASK;
+ 	vcpu->arch.shregs.msr = msr;
+ 	kvmppc_end_cede(vcpu);
+ }
+diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
+index 17cea18a09d3..264c473c1b3c 100644
+--- a/arch/powerpc/net/bpf_jit_comp.c
++++ b/arch/powerpc/net/bpf_jit_comp.c
+@@ -78,18 +78,9 @@ static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
+ 		PPC_LI(r_X, 0);
+ 	}
+ 
+-	switch (filter[0].code) {
+-	case BPF_RET | BPF_K:
+-	case BPF_LD | BPF_W | BPF_LEN:
+-	case BPF_LD | BPF_W | BPF_ABS:
+-	case BPF_LD | BPF_H | BPF_ABS:
+-	case BPF_LD | BPF_B | BPF_ABS:
+-		/* first instruction sets A register (or is RET 'constant') */
+-		break;
+-	default:
+-		/* make sure we dont leak kernel information to user */
++	/* make sure we dont leak kernel information to user */
++	if (bpf_needs_clear_a(&filter[0]))
+ 		PPC_LI(r_A, 0);
+-	}
+ }
+ 
+ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
+index 2241565b0739..b831a2ee32e9 100644
+--- a/arch/powerpc/platforms/powernv/opal.c
++++ b/arch/powerpc/platforms/powernv/opal.c
+@@ -358,7 +358,7 @@ static void opal_handle_message(void)
+ 
+ 	/* Sanity check */
+ 	if (type >= OPAL_MSG_TYPE_MAX) {
+-		pr_warning("%s: Unknown message type: %u\n", __func__, type);
++		pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
+ 		return;
+ 	}
+ 	opal_message_do_notify(type, (void *)&msg);
+diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
+index 7931eeeb649a..8109e92cd619 100644
+--- a/arch/sparc/net/bpf_jit_comp.c
++++ b/arch/sparc/net/bpf_jit_comp.c
+@@ -420,22 +420,9 @@ void bpf_jit_compile(struct bpf_prog *fp)
+ 		}
+ 		emit_reg_move(O7, r_saved_O7);
+ 
+-		switch (filter[0].code) {
+-		case BPF_RET | BPF_K:
+-		case BPF_LD | BPF_W | BPF_LEN:
+-		case BPF_LD | BPF_W | BPF_ABS:
+-		case BPF_LD | BPF_H | BPF_ABS:
+-		case BPF_LD | BPF_B | BPF_ABS:
+-			/* The first instruction sets the A register (or is
+-			 * a "RET 'constant'")
+-			 */
+-			break;
+-		default:
+-			/* Make sure we dont leak kernel information to the
+-			 * user.
+-			 */
++		/* Make sure we dont leak kernel information to the user. */
++		if (bpf_needs_clear_a(&filter[0]))
+ 			emit_clear(r_A); /* A = 0 */
+-		}
+ 
+ 		for (i = 0; i < flen; i++) {
+ 			unsigned int K = filter[i].k;
+diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
+index 4fa687a47a62..6b8d6e8cd449 100644
+--- a/arch/x86/include/asm/boot.h
++++ b/arch/x86/include/asm/boot.h
+@@ -27,7 +27,7 @@
+ #define BOOT_HEAP_SIZE             0x400000
+ #else /* !CONFIG_KERNEL_BZIP2 */
+ 
+-#define BOOT_HEAP_SIZE	0x8000
++#define BOOT_HEAP_SIZE	0x10000
+ 
+ #endif /* !CONFIG_KERNEL_BZIP2 */
+ 
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 80d67dd80351..73e38f14ddeb 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -104,8 +104,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ #endif
+ 		cpumask_set_cpu(cpu, mm_cpumask(next));
+ 
+-		/* Re-load page tables */
++		/*
++		 * Re-load page tables.
++		 *
++		 * This logic has an ordering constraint:
++		 *
++		 *  CPU 0: Write to a PTE for 'next'
++		 *  CPU 0: load bit 1 in mm_cpumask.  if nonzero, send IPI.
++		 *  CPU 1: set bit 1 in next's mm_cpumask
++		 *  CPU 1: load from the PTE that CPU 0 writes (implicit)
++		 *
++		 * We need to prevent an outcome in which CPU 1 observes
++		 * the new PTE value and CPU 0 observes bit 1 clear in
++		 * mm_cpumask.  (If that occurs, then the IPI will never
++		 * be sent, and CPU 0's TLB will contain a stale entry.)
++		 *
++		 * The bad outcome can occur if either CPU's load is
++		 * reordered before that CPU's store, so both CPUs must
++		 * execute full barriers to prevent this from happening.
++		 *
++		 * Thus, switch_mm needs a full barrier between the
++		 * store to mm_cpumask and any operation that could load
++		 * from next->pgd.  TLB fills are special and can happen
++		 * due to instruction fetches or for no reason at all,
++		 * and neither LOCK nor MFENCE orders them.
++		 * Fortunately, load_cr3() is serializing and gives the
++		 * ordering guarantee we need.
++		 *
++		 */
+ 		load_cr3(next->pgd);
++
+ 		trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ 
+ 		/* Stop flush ipis for the previous mm */
+@@ -142,10 +170,14 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 			 * schedule, protecting us from simultaneous changes.
+ 			 */
+ 			cpumask_set_cpu(cpu, mm_cpumask(next));
++
+ 			/*
+ 			 * We were in lazy tlb mode and leave_mm disabled
+ 			 * tlb flush IPI delivery. We must reload CR3
+ 			 * to make sure to use no freed page tables.
++			 *
++			 * As above, load_cr3() is serializing and orders TLB
++			 * fills with respect to the mm_cpumask write.
+ 			 */
+ 			load_cr3(next->pgd);
+ 			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 86db4bcd7ce5..0549ae3cb332 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -182,6 +182,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
+ 			DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
+ 		},
+ 	},
++	{	/* Handle problems with rebooting on the iMac10,1. */
++		.callback = set_pci_reboot,
++		.ident = "Apple iMac10,1",
++		.matches = {
++		    DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
++		    DMI_MATCH(DMI_PRODUCT_NAME, "iMac10,1"),
++		},
++	},
+ 
+ 	/* ASRock */
+ 	{	/* Handle problems with rebooting on ASRock Q1900DC-ITX */
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index e0fd5f47fbb9..5d2e2e9af1c4 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -667,12 +667,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+ 	signal_setup_done(failed, ksig, stepping);
+ }
+ 
+-#ifdef CONFIG_X86_32
+-#define NR_restart_syscall	__NR_restart_syscall
+-#else /* !CONFIG_X86_32 */
+-#define NR_restart_syscall	\
+-	test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
+-#endif /* CONFIG_X86_32 */
++static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
++{
++#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
++	return __NR_restart_syscall;
++#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
++	return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
++		__NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
++#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
++}
+ 
+ /*
+  * Note that 'init' is a special process: it doesn't get signals it doesn't
+@@ -701,7 +704,7 @@ static void do_signal(struct pt_regs *regs)
+ 			break;
+ 
+ 		case -ERESTART_RESTARTBLOCK:
+-			regs->ax = NR_restart_syscall;
++			regs->ax = get_nr_restart_syscall(regs);
+ 			regs->ip -= 2;
+ 			break;
+ 		}
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 454ccb082e18..0d039cd268a8 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1106,6 +1106,7 @@ static void init_vmcb(struct vcpu_svm *svm)
+ 	set_exception_intercept(svm, UD_VECTOR);
+ 	set_exception_intercept(svm, MC_VECTOR);
+ 	set_exception_intercept(svm, AC_VECTOR);
++	set_exception_intercept(svm, DB_VECTOR);
+ 
+ 	set_intercept(svm, INTERCEPT_INTR);
+ 	set_intercept(svm, INTERCEPT_NMI);
+@@ -1638,20 +1639,13 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
+ 	mark_dirty(svm->vmcb, VMCB_SEG);
+ }
+ 
+-static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
++static void update_bp_intercept(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+-	clr_exception_intercept(svm, DB_VECTOR);
+ 	clr_exception_intercept(svm, BP_VECTOR);
+ 
+-	if (svm->nmi_singlestep)
+-		set_exception_intercept(svm, DB_VECTOR);
+-
+ 	if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
+-		if (vcpu->guest_debug &
+-		    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+-			set_exception_intercept(svm, DB_VECTOR);
+ 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+ 			set_exception_intercept(svm, BP_VECTOR);
+ 	} else
+@@ -1757,7 +1751,6 @@ static int db_interception(struct vcpu_svm *svm)
+ 		if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
+ 			svm->vmcb->save.rflags &=
+ 				~(X86_EFLAGS_TF | X86_EFLAGS_RF);
+-		update_db_bp_intercept(&svm->vcpu);
+ 	}
+ 
+ 	if (svm->vcpu.guest_debug &
+@@ -3751,7 +3744,6 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
+ 	 */
+ 	svm->nmi_singlestep = true;
+ 	svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
+-	update_db_bp_intercept(vcpu);
+ }
+ 
+ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
+@@ -4367,7 +4359,7 @@ static struct kvm_x86_ops svm_x86_ops = {
+ 	.vcpu_load = svm_vcpu_load,
+ 	.vcpu_put = svm_vcpu_put,
+ 
+-	.update_db_bp_intercept = update_db_bp_intercept,
++	.update_db_bp_intercept = update_bp_intercept,
+ 	.get_msr = svm_get_msr,
+ 	.set_msr = svm_set_msr,
+ 	.get_segment_base = svm_get_segment_base,
+diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
+index 7c7bc8bef21f..21dda139eb3a 100644
+--- a/arch/x86/kvm/trace.h
++++ b/arch/x86/kvm/trace.h
+@@ -250,7 +250,7 @@ TRACE_EVENT(kvm_inj_virq,
+ #define kvm_trace_sym_exc						\
+ 	EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM),	\
+ 	EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF),		\
+-	EXS(MF), EXS(MC)
++	EXS(MF), EXS(AC), EXS(MC)
+ 
+ /*
+  * Tracepoint for kvm interrupt injection:
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index a243854c35d5..945f9e13f1aa 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -3652,20 +3652,21 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ 		if (!is_paging(vcpu)) {
+ 			hw_cr4 &= ~X86_CR4_PAE;
+ 			hw_cr4 |= X86_CR4_PSE;
+-			/*
+-			 * SMEP/SMAP is disabled if CPU is in non-paging mode
+-			 * in hardware. However KVM always uses paging mode to
+-			 * emulate guest non-paging mode with TDP.
+-			 * To emulate this behavior, SMEP/SMAP needs to be
+-			 * manually disabled when guest switches to non-paging
+-			 * mode.
+-			 */
+-			hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
+ 		} else if (!(cr4 & X86_CR4_PAE)) {
+ 			hw_cr4 &= ~X86_CR4_PAE;
+ 		}
+ 	}
+ 
++	if (!enable_unrestricted_guest && !is_paging(vcpu))
++		/*
++		 * SMEP/SMAP is disabled if CPU is in non-paging mode in
++		 * hardware.  However KVM always uses paging mode without
++		 * unrestricted guest.
++		 * To emulate this behavior, SMEP/SMAP needs to be manually
++		 * disabled when guest switches to non-paging mode.
++		 */
++		hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
++
+ 	vmcs_writel(CR4_READ_SHADOW, cr4);
+ 	vmcs_writel(GUEST_CR4, hw_cr4);
+ 	return 0;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 47a32f743a91..fed4c84eac44 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -940,7 +940,7 @@ static u32 msrs_to_save[] = {
+ 	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
+ #endif
+ 	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
+-	MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS
++	MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
+ };
+ 
+ static unsigned num_msrs_to_save;
+@@ -4117,16 +4117,17 @@ static void kvm_init_msr_list(void)
+ 
+ 		/*
+ 		 * Even MSRs that are valid in the host may not be exposed
+-		 * to the guests in some cases.  We could work around this
+-		 * in VMX with the generic MSR save/load machinery, but it
+-		 * is not really worthwhile since it will really only
+-		 * happen with nested virtualization.
++		 * to the guests in some cases.
+ 		 */
+ 		switch (msrs_to_save[i]) {
+ 		case MSR_IA32_BNDCFGS:
+ 			if (!kvm_x86_ops->mpx_supported())
+ 				continue;
+ 			break;
++		case MSR_TSC_AUX:
++			if (!kvm_x86_ops->rdtscp_supported())
++				continue;
++			break;
+ 		default:
+ 			break;
+ 		}
+diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
+index 4d1c11c07fe1..f738c61bc891 100644
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -120,19 +120,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
+ 	switch (type) {
+ 	case REG_TYPE_RM:
+ 		regno = X86_MODRM_RM(insn->modrm.value);
+-		if (X86_REX_B(insn->rex_prefix.value) == 1)
++		if (X86_REX_B(insn->rex_prefix.value))
+ 			regno += 8;
+ 		break;
+ 
+ 	case REG_TYPE_INDEX:
+ 		regno = X86_SIB_INDEX(insn->sib.value);
+-		if (X86_REX_X(insn->rex_prefix.value) == 1)
++		if (X86_REX_X(insn->rex_prefix.value))
+ 			regno += 8;
+ 		break;
+ 
+ 	case REG_TYPE_BASE:
+ 		regno = X86_SIB_BASE(insn->sib.value);
+-		if (X86_REX_B(insn->rex_prefix.value) == 1)
++		if (X86_REX_B(insn->rex_prefix.value))
+ 			regno += 8;
+ 		break;
+ 
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 90b924acd982..061e0114005e 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -160,7 +160,10 @@ void flush_tlb_current_task(void)
+ 	preempt_disable();
+ 
+ 	count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
++
++	/* This is an implicit full barrier that synchronizes with switch_mm. */
+ 	local_flush_tlb();
++
+ 	trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
+ 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+ 		flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
+@@ -187,17 +190,29 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ 	unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
+ 
+ 	preempt_disable();
+-	if (current->active_mm != mm)
++	if (current->active_mm != mm) {
++		/* Synchronize with switch_mm. */
++		smp_mb();
++
+ 		goto out;
++	}
+ 
+ 	if (!current->mm) {
+ 		leave_mm(smp_processor_id());
++
++		/* Synchronize with switch_mm. */
++		smp_mb();
++
+ 		goto out;
+ 	}
+ 
+ 	if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
+ 		base_pages_to_flush = (end - start) >> PAGE_SHIFT;
+ 
++	/*
++	 * Both branches below are implicit full barriers (MOV to CR or
++	 * INVLPG) that synchronize with switch_mm.
++	 */
+ 	if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
+ 		base_pages_to_flush = TLB_FLUSH_ALL;
+ 		count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
+@@ -227,10 +242,18 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
+ 	preempt_disable();
+ 
+ 	if (current->active_mm == mm) {
+-		if (current->mm)
++		if (current->mm) {
++			/*
++			 * Implicit full barrier (INVLPG) that synchronizes
++			 * with switch_mm.
++			 */
+ 			__flush_tlb_one(start);
+-		else
++		} else {
+ 			leave_mm(smp_processor_id());
++
++			/* Synchronize with switch_mm. */
++			smp_mb();
++		}
+ 	}
+ 
+ 	if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
+diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
+index 53b4c0811f4f..6d3415144dab 100644
+--- a/arch/x86/xen/suspend.c
++++ b/arch/x86/xen/suspend.c
+@@ -32,7 +32,8 @@ static void xen_hvm_post_suspend(int suspend_cancelled)
+ {
+ #ifdef CONFIG_XEN_PVHVM
+ 	int cpu;
+-	xen_hvm_init_shared_info();
++	if (!suspend_cancelled)
++	    xen_hvm_init_shared_info();
+ 	xen_callback_vector();
+ 	xen_unplug_emulated_devices();
+ 	if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
+diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
+index 8a45e92ff60c..05222706dc66 100644
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -404,18 +404,42 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
+ 	return rv;
+ }
+ 
+-static void start_check_enables(struct smi_info *smi_info)
++static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
++{
++	smi_info->last_timeout_jiffies = jiffies;
++	mod_timer(&smi_info->si_timer, new_val);
++	smi_info->timer_running = true;
++}
++
++/*
++ * Start a new message and (re)start the timer and thread.
++ */
++static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
++			  unsigned int size)
++{
++	smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
++
++	if (smi_info->thread)
++		wake_up_process(smi_info->thread);
++
++	smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
++}
++
++static void start_check_enables(struct smi_info *smi_info, bool start_timer)
+ {
+ 	unsigned char msg[2];
+ 
+ 	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ 	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
+ 
+-	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
++	if (start_timer)
++		start_new_msg(smi_info, msg, 2);
++	else
++		smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+ 	smi_info->si_state = SI_CHECKING_ENABLES;
+ }
+ 
+-static void start_clear_flags(struct smi_info *smi_info)
++static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
+ {
+ 	unsigned char msg[3];
+ 
+@@ -424,7 +448,10 @@ static void start_clear_flags(struct smi_info *smi_info)
+ 	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
+ 	msg[2] = WDT_PRE_TIMEOUT_INT;
+ 
+-	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
++	if (start_timer)
++		start_new_msg(smi_info, msg, 3);
++	else
++		smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+ 	smi_info->si_state = SI_CLEARING_FLAGS;
+ }
+ 
+@@ -434,10 +461,8 @@ static void start_getting_msg_queue(struct smi_info *smi_info)
+ 	smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
+ 	smi_info->curr_msg->data_size = 2;
+ 
+-	smi_info->handlers->start_transaction(
+-		smi_info->si_sm,
+-		smi_info->curr_msg->data,
+-		smi_info->curr_msg->data_size);
++	start_new_msg(smi_info, smi_info->curr_msg->data,
++		      smi_info->curr_msg->data_size);
+ 	smi_info->si_state = SI_GETTING_MESSAGES;
+ }
+ 
+@@ -447,20 +472,11 @@ static void start_getting_events(struct smi_info *smi_info)
+ 	smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
+ 	smi_info->curr_msg->data_size = 2;
+ 
+-	smi_info->handlers->start_transaction(
+-		smi_info->si_sm,
+-		smi_info->curr_msg->data,
+-		smi_info->curr_msg->data_size);
++	start_new_msg(smi_info, smi_info->curr_msg->data,
++		      smi_info->curr_msg->data_size);
+ 	smi_info->si_state = SI_GETTING_EVENTS;
+ }
+ 
+-static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+-{
+-	smi_info->last_timeout_jiffies = jiffies;
+-	mod_timer(&smi_info->si_timer, new_val);
+-	smi_info->timer_running = true;
+-}
+-
+ /*
+  * When we have a situtaion where we run out of memory and cannot
+  * allocate messages, we just leave them in the BMC and run the system
+@@ -470,11 +486,11 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+  * Note that we cannot just use disable_irq(), since the interrupt may
+  * be shared.
+  */
+-static inline bool disable_si_irq(struct smi_info *smi_info)
++static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
+ {
+ 	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+ 		smi_info->interrupt_disabled = true;
+-		start_check_enables(smi_info);
++		start_check_enables(smi_info, start_timer);
+ 		return true;
+ 	}
+ 	return false;
+@@ -484,7 +500,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
+ {
+ 	if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
+ 		smi_info->interrupt_disabled = false;
+-		start_check_enables(smi_info);
++		start_check_enables(smi_info, true);
+ 		return true;
+ 	}
+ 	return false;
+@@ -502,7 +518,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
+ 
+ 	msg = ipmi_alloc_smi_msg();
+ 	if (!msg) {
+-		if (!disable_si_irq(smi_info))
++		if (!disable_si_irq(smi_info, true))
+ 			smi_info->si_state = SI_NORMAL;
+ 	} else if (enable_si_irq(smi_info)) {
+ 		ipmi_free_smi_msg(msg);
+@@ -518,7 +534,7 @@ static void handle_flags(struct smi_info *smi_info)
+ 		/* Watchdog pre-timeout */
+ 		smi_inc_stat(smi_info, watchdog_pretimeouts);
+ 
+-		start_clear_flags(smi_info);
++		start_clear_flags(smi_info, true);
+ 		smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
+ 		if (smi_info->intf)
+ 			ipmi_smi_watchdog_pretimeout(smi_info->intf);
+@@ -870,8 +886,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
+ 			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ 			msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+ 
+-			smi_info->handlers->start_transaction(
+-				smi_info->si_sm, msg, 2);
++			start_new_msg(smi_info, msg, 2);
+ 			smi_info->si_state = SI_GETTING_FLAGS;
+ 			goto restart;
+ 		}
+@@ -901,7 +916,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
+ 		 * disable and messages disabled.
+ 		 */
+ 		if (smi_info->supports_event_msg_buff || smi_info->irq) {
+-			start_check_enables(smi_info);
++			start_check_enables(smi_info, true);
+ 		} else {
+ 			smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
+ 			if (!smi_info->curr_msg)
+@@ -1203,14 +1218,14 @@ static int smi_start_processing(void       *send_info,
+ 
+ 	new_smi->intf = intf;
+ 
+-	/* Try to claim any interrupts. */
+-	if (new_smi->irq_setup)
+-		new_smi->irq_setup(new_smi);
+-
+ 	/* Set up the timer that drives the interface. */
+ 	setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+ 	smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
+ 
++	/* Try to claim any interrupts. */
++	if (new_smi->irq_setup)
++		new_smi->irq_setup(new_smi);
++
+ 	/*
+ 	 * Check if the user forcefully enabled the daemon.
+ 	 */
+@@ -3515,7 +3530,7 @@ static int try_smi_init(struct smi_info *new_smi)
+ 	 * Start clearing the flags before we enable interrupts or the
+ 	 * timer to avoid racing with the timer.
+ 	 */
+-	start_clear_flags(new_smi);
++	start_clear_flags(new_smi, false);
+ 
+ 	/*
+ 	 * IRQ is defined to be set when non-zero.  req_events will
+@@ -3817,7 +3832,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
+ 		poll(to_clean);
+ 		schedule_timeout_uninterruptible(1);
+ 	}
+-	disable_si_irq(to_clean);
++	disable_si_irq(to_clean, false);
+ 	while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
+ 		poll(to_clean);
+ 		schedule_timeout_uninterruptible(1);
+diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
+index 30f522848c73..c19e7fc717c3 100644
+--- a/drivers/connector/connector.c
++++ b/drivers/connector/connector.c
+@@ -178,26 +178,21 @@ static int cn_call_callback(struct sk_buff *skb)
+  *
+  * It checks skb, netlink header and msg sizes, and calls callback helper.
+  */
+-static void cn_rx_skb(struct sk_buff *__skb)
++static void cn_rx_skb(struct sk_buff *skb)
+ {
+ 	struct nlmsghdr *nlh;
+-	struct sk_buff *skb;
+ 	int len, err;
+ 
+-	skb = skb_get(__skb);
+-
+ 	if (skb->len >= NLMSG_HDRLEN) {
+ 		nlh = nlmsg_hdr(skb);
+ 		len = nlmsg_len(nlh);
+ 
+ 		if (len < (int)sizeof(struct cn_msg) ||
+ 		    skb->len < nlh->nlmsg_len ||
+-		    len > CONNECTOR_MAX_MSG_SIZE) {
+-			kfree_skb(skb);
++		    len > CONNECTOR_MAX_MSG_SIZE)
+ 			return;
+-		}
+ 
+-		err = cn_call_callback(skb);
++		err = cn_call_callback(skb_get(skb));
+ 		if (err < 0)
+ 			kfree_skb(skb);
+ 	}
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 722a925795a2..9ce9dfeb1258 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1589,7 +1589,7 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
+ 		"Multi-Axis Controller"
+ 	};
+ 	const char *type, *bus;
+-	char buf[64];
++	char buf[64] = "";
+ 	unsigned int i;
+ 	int len;
+ 	int ret;
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 8b0178db6a04..b85a8614c128 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -3928,14 +3928,17 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)
+ 	dev = pci_physfn(dev);
+ 	for (bus = dev->bus; bus; bus = bus->parent) {
+ 		bridge = bus->self;
+-		if (!bridge || !pci_is_pcie(bridge) ||
++		/* If it's an integrated device, allow ATS */
++		if (!bridge)
++			return 1;
++		/* Connected via non-PCIe: no ATS */
++		if (!pci_is_pcie(bridge) ||
+ 		    pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
+ 			return 0;
++		/* If we found the root port, look it up in the ATSR */
+ 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
+ 			break;
+ 	}
+-	if (!bridge)
+-		return 0;
+ 
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
+diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
+index c4198fa490bf..9c1e8adaf4fc 100644
+--- a/drivers/isdn/i4l/isdn_ppp.c
++++ b/drivers/isdn/i4l/isdn_ppp.c
+@@ -301,6 +301,8 @@ isdn_ppp_open(int min, struct file *file)
+ 	is->compflags = 0;
+ 
+ 	is->reset = isdn_ppp_ccp_reset_alloc(is);
++	if (!is->reset)
++		return -ENOMEM;
+ 
+ 	is->lp = NULL;
+ 	is->mp_seqno = 0;       /* MP sequence number */
+@@ -320,6 +322,10 @@ isdn_ppp_open(int min, struct file *file)
+ 	 * VJ header compression init
+ 	 */
+ 	is->slcomp = slhc_init(16, 16);	/* not necessary for 2. link in bundle */
++	if (IS_ERR(is->slcomp)) {
++		isdn_ppp_ccp_reset_free(is);
++		return PTR_ERR(is->slcomp);
++	}
+ #endif
+ #ifdef CONFIG_IPPP_FILTER
+ 	is->pass_filter = NULL;
+@@ -567,10 +573,8 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
+ 			is->maxcid = val;
+ #ifdef CONFIG_ISDN_PPP_VJ
+ 			sltmp = slhc_init(16, val);
+-			if (!sltmp) {
+-				printk(KERN_ERR "ippp, can't realloc slhc struct\n");
+-				return -ENOMEM;
+-			}
++			if (IS_ERR(sltmp))
++				return PTR_ERR(sltmp);
+ 			if (is->slcomp)
+ 				slhc_free(is->slcomp);
+ 			is->slcomp = sltmp;
+diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
+index 084d346fb4c4..e15eef6a94e5 100644
+--- a/drivers/media/platform/vivid/vivid-osd.c
++++ b/drivers/media/platform/vivid/vivid-osd.c
+@@ -85,6 +85,7 @@ static int vivid_fb_ioctl(struct fb_info *info, unsigned cmd, unsigned long arg)
+ 	case FBIOGET_VBLANK: {
+ 		struct fb_vblank vblank;
+ 
++		memset(&vblank, 0, sizeof(vblank));
+ 		vblank.flags = FB_VBLANK_HAVE_COUNT | FB_VBLANK_HAVE_VCOUNT |
+ 			FB_VBLANK_HAVE_VSYNC;
+ 		vblank.count = 0;
+diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
+index 4069234abed5..a50750ce511d 100644
+--- a/drivers/media/usb/airspy/airspy.c
++++ b/drivers/media/usb/airspy/airspy.c
+@@ -132,7 +132,7 @@ struct airspy {
+ 	int            urbs_submitted;
+ 
+ 	/* USB control message buffer */
+-	#define BUF_SIZE 24
++	#define BUF_SIZE 128
+ 	u8 buf[BUF_SIZE];
+ 
+ 	/* Current configuration */
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 16d87bf8ac3c..72ba774df7a7 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1194,7 +1194,6 @@ static int bond_master_upper_dev_link(struct net_device *bond_dev,
+ 	err = netdev_master_upper_dev_link_private(slave_dev, bond_dev, slave);
+ 	if (err)
+ 		return err;
+-	slave_dev->flags |= IFF_SLAVE;
+ 	rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE, GFP_KERNEL);
+ 	return 0;
+ }
+@@ -1452,6 +1451,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
+ 		}
+ 	}
+ 
++	/* set slave flag before open to prevent IPv6 addrconf */
++	slave_dev->flags |= IFF_SLAVE;
++
+ 	/* open the slave since the application closed it */
+ 	res = dev_open(slave_dev);
+ 	if (res) {
+@@ -1712,6 +1714,7 @@ err_close:
+ 	dev_close(slave_dev);
+ 
+ err_restore_mac:
++	slave_dev->flags &= ~IFF_SLAVE;
+ 	if (!bond->params.fail_over_mac ||
+ 	    BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
+ 		/* XXX TODO - fom follow mode needs to change master's
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 9d15566521a7..cfe49a07c7c1 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -715,10 +715,8 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 			val &= 0xffff;
+ 		}
+ 		vj = slhc_init(val2+1, val+1);
+-		if (!vj) {
+-			netdev_err(ppp->dev,
+-				   "PPP: no memory (VJ compressor)\n");
+-			err = -ENOMEM;
++		if (IS_ERR(vj)) {
++			err = PTR_ERR(vj);
+ 			break;
+ 		}
+ 		ppp_lock(ppp);
+diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
+index 079f7adfcde5..27ed25252aac 100644
+--- a/drivers/net/slip/slhc.c
++++ b/drivers/net/slip/slhc.c
+@@ -84,8 +84,9 @@ static long decode(unsigned char **cpp);
+ static unsigned char * put16(unsigned char *cp, unsigned short x);
+ static unsigned short pull16(unsigned char **cpp);
+ 
+-/* Initialize compression data structure
++/* Allocate compression data structure
+  *	slots must be in range 0 to 255 (zero meaning no compression)
++ * Returns pointer to structure or ERR_PTR() on error.
+  */
+ struct slcompress *
+ slhc_init(int rslots, int tslots)
+@@ -94,11 +95,14 @@ slhc_init(int rslots, int tslots)
+ 	register struct cstate *ts;
+ 	struct slcompress *comp;
+ 
++	if (rslots < 0 || rslots > 255 || tslots < 0 || tslots > 255)
++		return ERR_PTR(-EINVAL);
++
+ 	comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
+ 	if (! comp)
+ 		goto out_fail;
+ 
+-	if ( rslots > 0  &&  rslots < 256 ) {
++	if (rslots > 0) {
+ 		size_t rsize = rslots * sizeof(struct cstate);
+ 		comp->rstate = kzalloc(rsize, GFP_KERNEL);
+ 		if (! comp->rstate)
+@@ -106,7 +110,7 @@ slhc_init(int rslots, int tslots)
+ 		comp->rslot_limit = rslots - 1;
+ 	}
+ 
+-	if ( tslots > 0  &&  tslots < 256 ) {
++	if (tslots > 0) {
+ 		size_t tsize = tslots * sizeof(struct cstate);
+ 		comp->tstate = kzalloc(tsize, GFP_KERNEL);
+ 		if (! comp->tstate)
+@@ -141,7 +145,7 @@ out_free2:
+ out_free:
+ 	kfree(comp);
+ out_fail:
+-	return NULL;
++	return ERR_PTR(-ENOMEM);
+ }
+ 
+ 
+diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
+index 05387b1e2e95..a17d86a57734 100644
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -164,7 +164,7 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
+ 	if (cbuff == NULL)
+ 		goto err_exit;
+ 	slcomp = slhc_init(16, 16);
+-	if (slcomp == NULL)
++	if (IS_ERR(slcomp))
+ 		goto err_exit;
+ #endif
+ 	spin_lock_bh(&sl->lock);
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 6928448f6b7f..2b45d0168c3c 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1845,10 +1845,10 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+ 	struct team *team = netdev_priv(dev);
+ 	struct team_port *port;
+ 
+-	rcu_read_lock();
+-	list_for_each_entry_rcu(port, &team->port_list, list)
++	mutex_lock(&team->lock);
++	list_for_each_entry(port, &team->port_list, list)
+ 		vlan_vid_del(port->dev, proto, vid);
+-	rcu_read_unlock();
++	mutex_unlock(&team->lock);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
+index e4b7a47a825c..5efaa9ab5af5 100644
+--- a/drivers/net/usb/cdc_mbim.c
++++ b/drivers/net/usb/cdc_mbim.c
+@@ -100,7 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
+ 	.ndo_stop             = usbnet_stop,
+ 	.ndo_start_xmit       = usbnet_start_xmit,
+ 	.ndo_tx_timeout       = usbnet_tx_timeout,
+-	.ndo_change_mtu       = usbnet_change_mtu,
++	.ndo_change_mtu       = cdc_ncm_change_mtu,
+ 	.ndo_set_mac_address  = eth_mac_addr,
+ 	.ndo_validate_addr    = eth_validate_addr,
+ 	.ndo_vlan_rx_add_vid  = cdc_mbim_rx_add_vid,
+diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
+index 8067b8fbb0ee..0b481c30979b 100644
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -41,6 +41,7 @@
+ #include <linux/module.h>
+ #include <linux/netdevice.h>
+ #include <linux/ctype.h>
++#include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
+ #include <linux/workqueue.h>
+ #include <linux/mii.h>
+@@ -687,6 +688,33 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
+ 	kfree(ctx);
+ }
+ 
++/* we need to override the usbnet change_mtu ndo for two reasons:
++ *  - respect the negotiated maximum datagram size
++ *  - avoid unwanted changes to rx and tx buffers
++ */
++int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
++{
++	struct usbnet *dev = netdev_priv(net);
++	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
++	int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev);
++
++	if (new_mtu <= 0 || new_mtu > maxmtu)
++		return -EINVAL;
++	net->mtu = new_mtu;
++	return 0;
++}
++EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);
++
++static const struct net_device_ops cdc_ncm_netdev_ops = {
++	.ndo_open	     = usbnet_open,
++	.ndo_stop	     = usbnet_stop,
++	.ndo_start_xmit	     = usbnet_start_xmit,
++	.ndo_tx_timeout	     = usbnet_tx_timeout,
++	.ndo_change_mtu	     = cdc_ncm_change_mtu,
++	.ndo_set_mac_address = eth_mac_addr,
++	.ndo_validate_addr   = eth_validate_addr,
++};
++
+ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting)
+ {
+ 	const struct usb_cdc_union_desc *union_desc = NULL;
+@@ -861,6 +889,9 @@ advance:
+ 	/* add our sysfs attrs */
+ 	dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
+ 
++	/* must handle MTU changes */
++	dev->net->netdev_ops = &cdc_ncm_netdev_ops;
++
+ 	return 0;
+ 
+ error2:
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index c8186ffda1a3..2e61a799f32a 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -117,12 +117,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		kfree_skb(skb);
+ 		goto drop;
+ 	}
+-	/* don't change ip_summed == CHECKSUM_PARTIAL, as that
+-	 * will cause bad checksum on forwarded packets
+-	 */
+-	if (skb->ip_summed == CHECKSUM_NONE &&
+-	    rcv->features & NETIF_F_RXCSUM)
+-		skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
+ 	if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
+ 		struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 0085b8df83e2..940f78e41993 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2581,7 +2581,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
+ 			 struct nlattr *tb[], struct nlattr *data[])
+ {
+ 	struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
+-	struct vxlan_dev *vxlan = netdev_priv(dev);
++	struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
+ 	struct vxlan_rdst *dst = &vxlan->default_dst;
+ 	__u32 vni;
+ 	int err;
+@@ -2714,9 +2714,13 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
+ 	if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
+ 		vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL;
+ 
+-	if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET,
+-			   vxlan->dst_port, vxlan->flags)) {
+-		pr_info("duplicate VNI %u\n", vni);
++	list_for_each_entry(tmp, &vn->vxlan_list, next) {
++		if (tmp->default_dst.remote_vni == vni &&
++		    (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
++		     tmp->saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
++		    tmp->dst_port == vxlan->dst_port &&
++		    (tmp->flags & VXLAN_F_RCV_FLAGS) ==
++		    (vxlan->flags & VXLAN_F_RCV_FLAGS))
+ 		return -EEXIST;
+ 	}
+ 
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 0866c5dfdf87..5e5b6184e720 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -2007,8 +2007,11 @@ static int __init netback_init(void)
+ 	if (!xen_domain())
+ 		return -ENODEV;
+ 
+-	/* Allow as many queues as there are CPUs, by default */
+-	xenvif_max_queues = num_online_cpus();
++	/* Allow as many queues as there are CPUs if user has not
++	 * specified a value.
++	 */
++	if (xenvif_max_queues == 0)
++		xenvif_max_queues = num_online_cpus();
+ 
+ 	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
+ 		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 52f081f4dfd5..fd51626e859e 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1710,19 +1710,19 @@ static void xennet_destroy_queues(struct netfront_info *info)
+ }
+ 
+ static int xennet_create_queues(struct netfront_info *info,
+-				unsigned int num_queues)
++				unsigned int *num_queues)
+ {
+ 	unsigned int i;
+ 	int ret;
+ 
+-	info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
++	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
+ 			       GFP_KERNEL);
+ 	if (!info->queues)
+ 		return -ENOMEM;
+ 
+ 	rtnl_lock();
+ 
+-	for (i = 0; i < num_queues; i++) {
++	for (i = 0; i < *num_queues; i++) {
+ 		struct netfront_queue *queue = &info->queues[i];
+ 
+ 		queue->id = i;
+@@ -1732,7 +1732,7 @@ static int xennet_create_queues(struct netfront_info *info,
+ 		if (ret < 0) {
+ 			dev_warn(&info->netdev->dev,
+ 				 "only created %d queues\n", i);
+-			num_queues = i;
++			*num_queues = i;
+ 			break;
+ 		}
+ 
+@@ -1742,11 +1742,11 @@ static int xennet_create_queues(struct netfront_info *info,
+ 			napi_enable(&queue->napi);
+ 	}
+ 
+-	netif_set_real_num_tx_queues(info->netdev, num_queues);
++	netif_set_real_num_tx_queues(info->netdev, *num_queues);
+ 
+ 	rtnl_unlock();
+ 
+-	if (num_queues == 0) {
++	if (*num_queues == 0) {
+ 		dev_err(&info->netdev->dev, "no queues\n");
+ 		return -EINVAL;
+ 	}
+@@ -1792,7 +1792,7 @@ static int talk_to_netback(struct xenbus_device *dev,
+ 	if (info->queues)
+ 		xennet_destroy_queues(info);
+ 
+-	err = xennet_create_queues(info, num_queues);
++	err = xennet_create_queues(info, &num_queues);
+ 	if (err < 0)
+ 		goto destroy_ring;
+ 
+@@ -2140,8 +2140,11 @@ static int __init netif_init(void)
+ 
+ 	pr_info("Initialising Xen virtual ethernet driver\n");
+ 
+-	/* Allow as many queues as there are CPUs, by default */
+-	xennet_max_queues = num_online_cpus();
++	/* Allow as many queues as there are CPUs if user has not
++	 * specified a value.
++	 */
++	if (xennet_max_queues == 0)
++		xennet_max_queues = num_online_cpus();
+ 
+ 	return xenbus_register_frontend(&netfront_driver);
+ }
+diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
+index 761e77bfce5d..e56f1569f6c3 100644
+--- a/drivers/parisc/iommu-helpers.h
++++ b/drivers/parisc/iommu-helpers.h
+@@ -104,7 +104,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
+ 	struct scatterlist *contig_sg;	   /* contig chunk head */
+ 	unsigned long dma_offset, dma_len; /* start/len of DMA stream */
+ 	unsigned int n_mappings = 0;
+-	unsigned int max_seg_size = dma_get_max_seg_size(dev);
++	unsigned int max_seg_size = min(dma_get_max_seg_size(dev),
++					(unsigned)DMA_CHUNK_SIZE);
++	unsigned int max_seg_boundary = dma_get_seg_boundary(dev) + 1;
++	if (max_seg_boundary)	/* check if the addition above didn't overflow */
++		max_seg_size = min(max_seg_size, max_seg_boundary);
+ 
+ 	while (nents > 0) {
+ 
+@@ -138,14 +142,11 @@ iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
+ 
+ 			/*
+ 			** First make sure current dma stream won't
+-			** exceed DMA_CHUNK_SIZE if we coalesce the
++			** exceed max_seg_size if we coalesce the
+ 			** next entry.
+ 			*/   
+-			if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
+-					    IOVP_SIZE) > DMA_CHUNK_SIZE))
+-				break;
+-
+-			if (startsg->length + dma_len > max_seg_size)
++			if (unlikely(ALIGN(dma_len + dma_offset + startsg->length, IOVP_SIZE) >
++				     max_seg_size))
+ 				break;
+ 
+ 			/*
+diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
+index d542e06d6cd3..10e520d6bb75 100644
+--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
++++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
+@@ -1268,6 +1268,7 @@ static int
+ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
+ {
+ 	struct lov_stripe_md *ulsm = _ulsm;
++	struct lov_oinfo **p;
+ 	int nob, i;
+ 
+ 	nob = offsetof(struct lov_stripe_md, lsm_oinfo[lsm->lsm_stripe_count]);
+@@ -1277,9 +1278,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
+ 	if (copy_to_user(ulsm, lsm, sizeof(*ulsm)))
+ 		return -EFAULT;
+ 
+-	for (i = 0; i < lsm->lsm_stripe_count; i++) {
+-		if (copy_to_user(ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i],
+-				      sizeof(lsm->lsm_oinfo[0])))
++	for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
++		struct lov_oinfo __user *up;
++		if (get_user(up, ulsm->lsm_oinfo + i) ||
++		    copy_to_user(up, *p, sizeof(struct lov_oinfo)))
+ 			return -EFAULT;
+ 	}
+ 	return 0;
+@@ -1287,9 +1289,10 @@ echo_copyout_lsm(struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob)
+ 
+ static int
+ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
+-		 void *ulsm, int ulsm_nob)
++		struct lov_stripe_md __user *ulsm, int ulsm_nob)
+ {
+ 	struct echo_client_obd *ec = ed->ed_ec;
++	struct lov_oinfo **p;
+ 	int		     i;
+ 
+ 	if (ulsm_nob < sizeof(*lsm))
+@@ -1305,11 +1308,10 @@ echo_copyin_lsm(struct echo_device *ed, struct lov_stripe_md *lsm,
+ 		return -EINVAL;
+ 
+ 
+-	for (i = 0; i < lsm->lsm_stripe_count; i++) {
+-		if (copy_from_user(lsm->lsm_oinfo[i],
+-				       ((struct lov_stripe_md *)ulsm)-> \
+-				       lsm_oinfo[i],
+-				       sizeof(lsm->lsm_oinfo[0])))
++	for (i = 0, p = lsm->lsm_oinfo; i < lsm->lsm_stripe_count; i++, p++) {
++		struct lov_oinfo __user *up;
++		if (get_user(up, ulsm->lsm_oinfo + i) ||
++		    copy_from_user(*p, up, sizeof(struct lov_oinfo)))
+ 			return -EFAULT;
+ 	}
+ 	return 0;
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index d68c4a4db682..ee11b301f3da 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -1034,10 +1034,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 	unsigned delay;
+ 
+ 	/* Continue a partial initialization */
+-	if (type == HUB_INIT2)
+-		goto init2;
+-	if (type == HUB_INIT3)
++	if (type == HUB_INIT2 || type == HUB_INIT3) {
++		device_lock(hub->intfdev);
++
++		/* Was the hub disconnected while we were waiting? */
++		if (hub->disconnected) {
++			device_unlock(hub->intfdev);
++			kref_put(&hub->kref, hub_release);
++			return;
++		}
++		if (type == HUB_INIT2)
++			goto init2;
+ 		goto init3;
++	}
++	kref_get(&hub->kref);
+ 
+ 	/* The superspeed hub except for root hub has to use Hub Depth
+ 	 * value as an offset into the route string to locate the bits
+@@ -1235,6 +1245,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 			queue_delayed_work(system_power_efficient_wq,
+ 					&hub->init_work,
+ 					msecs_to_jiffies(delay));
++			device_unlock(hub->intfdev);
+ 			return;		/* Continues at init3: below */
+ 		} else {
+ 			msleep(delay);
+@@ -1256,6 +1267,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
+ 	/* Allow autosuspend if it was suppressed */
+ 	if (type <= HUB_INIT3)
+ 		usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
++
++	if (type == HUB_INIT2 || type == HUB_INIT3)
++		device_unlock(hub->intfdev);
++
++	kref_put(&hub->kref, hub_release);
+ }
+ 
+ /* Implement the continuations for the delays above */
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 1e6d7579709e..f6bb118e4501 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4794,8 +4794,16 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+ 	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+ 	slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
+ 	slot_ctx->dev_info |= cpu_to_le32(DEV_HUB);
++	/*
++	 * refer to section 6.2.2: MTT should be 0 for full speed hub,
++	 * but it may be already set to 1 when setup an xHCI virtual
++	 * device, so clear it anyway.
++	 */
+ 	if (tt->multi)
+ 		slot_ctx->dev_info |= cpu_to_le32(DEV_MTT);
++	else if (hdev->speed == USB_SPEED_FULL)
++		slot_ctx->dev_info &= cpu_to_le32(~DEV_MTT);
++
+ 	if (xhci->hci_version > 0x95) {
+ 		xhci_dbg(xhci, "xHCI version %x needs hub "
+ 				"TT think time and number of ports\n",
+@@ -5046,6 +5054,10 @@ static int __init xhci_hcd_init(void)
+ 	BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
+ 	/* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
+ 	BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
++
++	if (usb_disabled())
++		return -ENODEV;
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index 7d4f51a32e66..59b2126b21a3 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -160,6 +160,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
+ 	{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
+ 	{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
++	{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ 	{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+ 	{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
+ 	{ USB_DEVICE(0x1BA4, 0x0002) },	/* Silicon Labs 358x factory default */
+diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
+index f51a5d52c0ed..ec1b8f2c1183 100644
+--- a/drivers/usb/serial/ipaq.c
++++ b/drivers/usb/serial/ipaq.c
+@@ -531,7 +531,8 @@ static int ipaq_open(struct tty_struct *tty,
+ 	 * through. Since this has a reasonably high failure rate, we retry
+ 	 * several times.
+ 	 */
+-	while (retries--) {
++	while (retries) {
++		retries--;
+ 		result = usb_control_msg(serial->dev,
+ 				usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
+ 				0x1, 0, NULL, 0, 100);
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 4bd23bba816f..ee71baddbb10 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
+ 
+ 	vma->vm_ops = &gntdev_vmops;
+ 
+-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
++	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ 
+ 	if (use_ptemod)
+ 		vma->vm_flags |= VM_DONTCOPY;
+diff --git a/fs/direct-io.c b/fs/direct-io.c
+index 745d2342651a..d83a021a659f 100644
+--- a/fs/direct-io.c
++++ b/fs/direct-io.c
+@@ -1159,6 +1159,16 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ 		}
+ 	}
+ 
++	/* Once we sampled i_size check for reads beyond EOF */
++	dio->i_size = i_size_read(inode);
++	if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
++		if (dio->flags & DIO_LOCKING)
++			mutex_unlock(&inode->i_mutex);
++		kmem_cache_free(dio_cache, dio);
++		retval = 0;
++		goto out;
++	}
++
+ 	/*
+ 	 * For file extending writes updating i_size before data writeouts
+ 	 * complete can expose uninitialized blocks in dumb filesystems.
+@@ -1212,7 +1222,6 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+ 	sdio.next_block_for_io = -1;
+ 
+ 	dio->iocb = iocb;
+-	dio->i_size = i_size_read(inode);
+ 
+ 	spin_lock_init(&dio->bio_lock);
+ 	dio->refcount = 1;
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index fa11b3a367be..1ce6e1049a3b 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -428,6 +428,25 @@ static inline void bpf_jit_free(struct bpf_prog *fp)
+ 
+ #define BPF_ANC		BIT(15)
+ 
++static inline bool bpf_needs_clear_a(const struct sock_filter *first)
++{
++	switch (first->code) {
++	case BPF_RET | BPF_K:
++	case BPF_LD | BPF_W | BPF_LEN:
++		return false;
++
++	case BPF_LD | BPF_W | BPF_ABS:
++	case BPF_LD | BPF_H | BPF_ABS:
++	case BPF_LD | BPF_B | BPF_ABS:
++		if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X)
++			return true;
++		return false;
++
++	default:
++		return true;
++	}
++}
++
+ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
+ {
+ 	BUG_ON(ftest->code & BPF_ANC);
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 61f4f2d5c882..9128b4e9f541 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -802,6 +802,7 @@ struct user_struct {
+ 	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
+ #endif
+ 	unsigned long locked_shm; /* How many pages of mlocked shm ? */
++	unsigned long unix_inflight;	/* How many files in flight in unix sockets */
+ 
+ #ifdef CONFIG_KEYS
+ 	struct key *uid_keyring;	/* UID specific keyring */
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 4307e20a4a4a..1f17abe23725 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -3320,7 +3320,8 @@ struct skb_gso_cb {
+ 	int	encap_level;
+ 	__u16	csum_start;
+ };
+-#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
++#define SKB_SGO_CB_OFFSET	32
++#define SKB_GSO_CB(skb) ((struct skb_gso_cb *)((skb)->cb + SKB_SGO_CB_OFFSET))
+ 
+ static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
+ {
+diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
+index 76d1e38aabe1..0c53fd51bf9b 100644
+--- a/include/linux/syscalls.h
++++ b/include/linux/syscalls.h
+@@ -524,7 +524,7 @@ asmlinkage long sys_chown(const char __user *filename,
+ asmlinkage long sys_lchown(const char __user *filename,
+ 				uid_t user, gid_t group);
+ asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
+-#ifdef CONFIG_UID16
++#ifdef CONFIG_HAVE_UID16
+ asmlinkage long sys_chown16(const char __user *filename,
+ 				old_uid_t user, old_gid_t group);
+ asmlinkage long sys_lchown16(const char __user *filename,
+diff --git a/include/linux/types.h b/include/linux/types.h
+index 8715287c3b1f..69c44d981da3 100644
+--- a/include/linux/types.h
++++ b/include/linux/types.h
+@@ -35,7 +35,7 @@ typedef __kernel_gid16_t        gid16_t;
+ 
+ typedef unsigned long		uintptr_t;
+ 
+-#ifdef CONFIG_UID16
++#ifdef CONFIG_HAVE_UID16
+ /* This is defined by include/asm-{arch}/posix_types.h */
+ typedef __kernel_old_uid_t	old_uid_t;
+ typedef __kernel_old_gid_t	old_gid_t;
+diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
+index 7c9b484735c5..e7827ae2462c 100644
+--- a/include/linux/usb/cdc_ncm.h
++++ b/include/linux/usb/cdc_ncm.h
+@@ -133,6 +133,7 @@ struct cdc_ncm_ctx {
+ };
+ 
+ u8 cdc_ncm_select_altsetting(struct usb_interface *intf);
++int cdc_ncm_change_mtu(struct net_device *net, int new_mtu);
+ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting);
+ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf);
+ struct sk_buff *cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign);
+diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
+index 84b20835b736..0dc0a51da38f 100644
+--- a/include/net/inet_ecn.h
++++ b/include/net/inet_ecn.h
+@@ -111,11 +111,24 @@ static inline void ipv4_copy_dscp(unsigned int dscp, struct iphdr *inner)
+ 
+ struct ipv6hdr;
+ 
+-static inline int IP6_ECN_set_ce(struct ipv6hdr *iph)
++/* Note:
++ * IP_ECN_set_ce() has to tweak IPV4 checksum when setting CE,
++ * meaning both changes have no effect on skb->csum if/when CHECKSUM_COMPLETE
++ * In IPv6 case, no checksum compensates the change in IPv6 header,
++ * so we have to update skb->csum.
++ */
++static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
+ {
++	__be32 from, to;
++
+ 	if (INET_ECN_is_not_ect(ipv6_get_dsfield(iph)))
+ 		return 0;
+-	*(__be32*)iph |= htonl(INET_ECN_CE << 20);
++
++	from = *(__be32 *)iph;
++	to = from | htonl(INET_ECN_CE << 20);
++	*(__be32 *)iph = to;
++	if (skb->ip_summed == CHECKSUM_COMPLETE)
++		skb->csum = csum_add(csum_sub(skb->csum, from), to);
+ 	return 1;
+ }
+ 
+@@ -142,7 +155,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
+ 	case cpu_to_be16(ETH_P_IPV6):
+ 		if (skb_network_header(skb) + sizeof(struct ipv6hdr) <=
+ 		    skb_tail_pointer(skb))
+-			return IP6_ECN_set_ce(ipv6_hdr(skb));
++			return IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+ 		break;
+ 	}
+ 
+diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
+index 360c4802288d..7682cb2ae237 100644
+--- a/include/net/inet_timewait_sock.h
++++ b/include/net/inet_timewait_sock.h
+@@ -112,7 +112,19 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
+ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+ 			   struct inet_hashinfo *hashinfo);
+ 
+-void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo);
++void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
++			  bool rearm);
++
++static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
++{
++	__inet_twsk_schedule(tw, timeo, false);
++}
++
++static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
++{
++	__inet_twsk_schedule(tw, timeo, true);
++}
++
+ void inet_twsk_deschedule(struct inet_timewait_sock *tw);
+ 
+ void inet_twsk_purge(struct inet_hashinfo *hashinfo,
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 47dcd3aa6e23..141d562064a7 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1019,6 +1019,16 @@ static int check_alu_op(struct reg_state *regs, struct bpf_insn *insn)
+ 			return -EINVAL;
+ 		}
+ 
++		if ((opcode == BPF_LSH || opcode == BPF_RSH ||
++		     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
++			int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
++
++			if (insn->imm < 0 || insn->imm >= size) {
++				verbose("invalid shift %d\n", insn->imm);
++				return -EINVAL;
++			}
++		}
++
+ 		/* pattern match 'bpf_add Rx, imm' instruction */
+ 		if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 &&
+ 		    regs[insn->dst_reg].type == FRAME_PTR &&
+diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
+index ac4b96eccade..bd3357e69c5c 100644
+--- a/net/batman-adv/bridge_loop_avoidance.c
++++ b/net/batman-adv/bridge_loop_avoidance.c
+@@ -112,21 +112,17 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
+ }
+ 
+ /* finally deinitialize the claim */
+-static void batadv_claim_free_rcu(struct rcu_head *rcu)
++static void batadv_claim_release(struct batadv_bla_claim *claim)
+ {
+-	struct batadv_bla_claim *claim;
+-
+-	claim = container_of(rcu, struct batadv_bla_claim, rcu);
+-
+ 	batadv_backbone_gw_free_ref(claim->backbone_gw);
+-	kfree(claim);
++	kfree_rcu(claim, rcu);
+ }
+ 
+ /* free a claim, call claim_free_rcu if its the last reference */
+ static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
+ {
+ 	if (atomic_dec_and_test(&claim->refcount))
+-		call_rcu(&claim->rcu, batadv_claim_free_rcu);
++		batadv_claim_release(claim);
+ }
+ 
+ /**
+diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
+index 1918cd50b62e..b6bff9c1877a 100644
+--- a/net/batman-adv/hard-interface.h
++++ b/net/batman-adv/hard-interface.h
+@@ -64,18 +64,6 @@ batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
+ 		call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
+ }
+ 
+-/**
+- * batadv_hardif_free_ref_now - decrement the hard interface refcounter and
+- *  possibly free it (without rcu callback)
+- * @hard_iface: the hard interface to free
+- */
+-static inline void
+-batadv_hardif_free_ref_now(struct batadv_hard_iface *hard_iface)
+-{
+-	if (atomic_dec_and_test(&hard_iface->refcount))
+-		batadv_hardif_free_rcu(&hard_iface->rcu);
+-}
+-
+ static inline struct batadv_hard_iface *
+ batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
+ {
+diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
+index a449195c5b2b..2fbd3a6bde9a 100644
+--- a/net/batman-adv/network-coding.c
++++ b/net/batman-adv/network-coding.c
+@@ -175,28 +175,25 @@ void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
+ }
+ 
+ /**
+- * batadv_nc_node_free_rcu - rcu callback to free an nc node and remove
+- *  its refcount on the orig_node
+- * @rcu: rcu pointer of the nc node
++ * batadv_nc_node_release - release nc_node from lists and queue for free after
++ *  rcu grace period
++ * @nc_node: the nc node to free
+  */
+-static void batadv_nc_node_free_rcu(struct rcu_head *rcu)
++static void batadv_nc_node_release(struct batadv_nc_node *nc_node)
+ {
+-	struct batadv_nc_node *nc_node;
+-
+-	nc_node = container_of(rcu, struct batadv_nc_node, rcu);
+ 	batadv_orig_node_free_ref(nc_node->orig_node);
+-	kfree(nc_node);
++	kfree_rcu(nc_node, rcu);
+ }
+ 
+ /**
+- * batadv_nc_node_free_ref - decrements the nc node refcounter and possibly
+- * frees it
++ * batadv_nc_node_free_ref - decrement the nc node refcounter and possibly
++ *  release it
+  * @nc_node: the nc node to free
+  */
+ static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node)
+ {
+ 	if (atomic_dec_and_test(&nc_node->refcount))
+-		call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu);
++		batadv_nc_node_release(nc_node);
+ }
+ 
+ /**
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
+index dfae97408628..77ea1d4de2ba 100644
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -150,86 +150,58 @@ err:
+ }
+ 
+ /**
+- * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
+- * @rcu: rcu pointer of the neigh_ifinfo object
+- */
+-static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
+-{
+-	struct batadv_neigh_ifinfo *neigh_ifinfo;
+-
+-	neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
+-
+-	if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
+-		batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
+-
+-	kfree(neigh_ifinfo);
+-}
+-
+-/**
+- * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
+- *  the neigh_ifinfo (without rcu callback)
++ * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
++ *  free after rcu grace period
+  * @neigh_ifinfo: the neigh_ifinfo object to release
+  */
+ static void
+-batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
++batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
+ {
+-	if (atomic_dec_and_test(&neigh_ifinfo->refcount))
+-		batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
++	if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
++		batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
++
++	kfree_rcu(neigh_ifinfo, rcu);
+ }
+ 
+ /**
+- * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
++ * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
+  *  the neigh_ifinfo
+  * @neigh_ifinfo: the neigh_ifinfo object to release
+  */
+ void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
+ {
+ 	if (atomic_dec_and_test(&neigh_ifinfo->refcount))
+-		call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
++		batadv_neigh_ifinfo_release(neigh_ifinfo);
+ }
+ 
+ /**
+- * batadv_neigh_node_free_rcu - free the neigh_node
+- * @rcu: rcu pointer of the neigh_node
++ * batadv_neigh_node_release - release neigh_node from lists and queue for
++ *  free after rcu grace period
++ * @neigh_node: neigh neighbor to free
+  */
+-static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
++static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
+ {
+ 	struct hlist_node *node_tmp;
+-	struct batadv_neigh_node *neigh_node;
+ 	struct batadv_neigh_ifinfo *neigh_ifinfo;
+ 
+-	neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
+-
+ 	hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
+ 				  &neigh_node->ifinfo_list, list) {
+-		batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
++		batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
+ 	}
+-	batadv_hardif_free_ref_now(neigh_node->if_incoming);
++	batadv_hardif_free_ref(neigh_node->if_incoming);
+ 
+-	kfree(neigh_node);
+-}
+-
+-/**
+- * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
+- *  and possibly free it (without rcu callback)
+- * @neigh_node: neigh neighbor to free
+- */
+-static void
+-batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
+-{
+-	if (atomic_dec_and_test(&neigh_node->refcount))
+-		batadv_neigh_node_free_rcu(&neigh_node->rcu);
++	kfree_rcu(neigh_node, rcu);
+ }
+ 
+ /**
+  * batadv_neigh_node_free_ref - decrement the neighbors refcounter
+- *  and possibly free it
++ *  and possibly release it
+  * @neigh_node: neigh neighbor to free
+  */
+ void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
+ {
+ 	if (atomic_dec_and_test(&neigh_node->refcount))
+-		call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
++		batadv_neigh_node_release(neigh_node);
+ }
+ 
+ /**
+@@ -495,108 +467,99 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
+ }
+ 
+ /**
+- * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
+- * @rcu: rcu pointer of the orig_ifinfo object
++ * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
++ *  free after rcu grace period
++ * @orig_ifinfo: the orig_ifinfo object to release
+  */
+-static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
++static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
+ {
+-	struct batadv_orig_ifinfo *orig_ifinfo;
+ 	struct batadv_neigh_node *router;
+ 
+-	orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
+-
+ 	if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
+-		batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
++		batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
+ 
+ 	/* this is the last reference to this object */
+ 	router = rcu_dereference_protected(orig_ifinfo->router, true);
+ 	if (router)
+-		batadv_neigh_node_free_ref_now(router);
+-	kfree(orig_ifinfo);
++		batadv_neigh_node_free_ref(router);
++
++	kfree_rcu(orig_ifinfo, rcu);
+ }
+ 
+ /**
+- * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
+- *  the orig_ifinfo (without rcu callback)
++ * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
++ *  the orig_ifinfo
+  * @orig_ifinfo: the orig_ifinfo object to release
+  */
+-static void
+-batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
++void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
+ {
+ 	if (atomic_dec_and_test(&orig_ifinfo->refcount))
+-		batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
++		batadv_orig_ifinfo_release(orig_ifinfo);
+ }
+ 
+ /**
+- * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
+- *  the orig_ifinfo
+- * @orig_ifinfo: the orig_ifinfo object to release
++ * batadv_orig_node_free_rcu - free the orig_node
++ * @rcu: rcu pointer of the orig_node
+  */
+-void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
++static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
+ {
+-	if (atomic_dec_and_test(&orig_ifinfo->refcount))
+-		call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
++	struct batadv_orig_node *orig_node;
++
++	orig_node = container_of(rcu, struct batadv_orig_node, rcu);
++
++	batadv_mcast_purge_orig(orig_node);
++
++	batadv_frag_purge_orig(orig_node, NULL);
++
++	if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
++		orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
++
++	kfree(orig_node->tt_buff);
++	kfree(orig_node);
+ }
+ 
+-static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
++/**
++ * batadv_orig_node_release - release orig_node from lists and queue for
++ *  free after rcu grace period
++ * @orig_node: the orig node to free
++ */
++static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
+ {
+ 	struct hlist_node *node_tmp;
+ 	struct batadv_neigh_node *neigh_node;
+-	struct batadv_orig_node *orig_node;
+ 	struct batadv_orig_ifinfo *orig_ifinfo;
+ 
+-	orig_node = container_of(rcu, struct batadv_orig_node, rcu);
+-
+ 	spin_lock_bh(&orig_node->neigh_list_lock);
+ 
+ 	/* for all neighbors towards this originator ... */
+ 	hlist_for_each_entry_safe(neigh_node, node_tmp,
+ 				  &orig_node->neigh_list, list) {
+ 		hlist_del_rcu(&neigh_node->list);
+-		batadv_neigh_node_free_ref_now(neigh_node);
++		batadv_neigh_node_free_ref(neigh_node);
+ 	}
+ 
+ 	hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
+ 				  &orig_node->ifinfo_list, list) {
+ 		hlist_del_rcu(&orig_ifinfo->list);
+-		batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
++		batadv_orig_ifinfo_free_ref(orig_ifinfo);
+ 	}
+ 	spin_unlock_bh(&orig_node->neigh_list_lock);
+ 
+-	batadv_mcast_purge_orig(orig_node);
+-
+ 	/* Free nc_nodes */
+ 	batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
+ 
+-	batadv_frag_purge_orig(orig_node, NULL);
+-
+-	if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
+-		orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
+-
+-	kfree(orig_node->tt_buff);
+-	kfree(orig_node);
++	call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
+ }
+ 
+ /**
+  * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
+- * schedule an rcu callback for freeing it
++ *  release it
+  * @orig_node: the orig node to free
+  */
+ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
+ {
+ 	if (atomic_dec_and_test(&orig_node->refcount))
+-		call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
+-}
+-
+-/**
+- * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
+- * possibly free it (without rcu callback)
+- * @orig_node: the orig node to free
+- */
+-void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
+-{
+-	if (atomic_dec_and_test(&orig_node->refcount))
+-		batadv_orig_node_free_rcu(&orig_node->rcu);
++		batadv_orig_node_release(orig_node);
+ }
+ 
+ void batadv_originator_free(struct batadv_priv *bat_priv)
+diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
+index aa4a43696295..28b751ad549c 100644
+--- a/net/batman-adv/originator.h
++++ b/net/batman-adv/originator.h
+@@ -25,7 +25,6 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
+ void batadv_originator_free(struct batadv_priv *bat_priv);
+ void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
+ void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
+-void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
+ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
+ 					      const uint8_t *addr);
+ struct batadv_neigh_node *
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 4f2a9d2c56db..ddd62c9af5b4 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -219,20 +219,6 @@ int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
+ 	return count;
+ }
+ 
+-static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+-{
+-	struct batadv_tt_orig_list_entry *orig_entry;
+-
+-	orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
+-
+-	/* We are in an rcu callback here, therefore we cannot use
+-	 * batadv_orig_node_free_ref() and its call_rcu():
+-	 * An rcu_barrier() wouldn't wait for that to finish
+-	 */
+-	batadv_orig_node_free_ref_now(orig_entry->orig_node);
+-	kfree(orig_entry);
+-}
+-
+ /**
+  * batadv_tt_local_size_mod - change the size by v of the local table identified
+  *  by vid
+@@ -328,13 +314,25 @@ static void batadv_tt_global_size_dec(struct batadv_orig_node *orig_node,
+ 	batadv_tt_global_size_mod(orig_node, vid, -1);
+ }
+ 
++/**
++ * batadv_tt_orig_list_entry_release - release tt orig entry from lists and
++ *  queue for free after rcu grace period
++ * @orig_entry: tt orig entry to be free'd
++ */
++static void
++batadv_tt_orig_list_entry_release(struct batadv_tt_orig_list_entry *orig_entry)
++{
++	batadv_orig_node_free_ref(orig_entry->orig_node);
++	kfree_rcu(orig_entry, rcu);
++}
++
+ static void
+ batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
+ {
+ 	if (!atomic_dec_and_test(&orig_entry->refcount))
+ 		return;
+ 
+-	call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
++	batadv_tt_orig_list_entry_release(orig_entry);
+ }
+ 
+ /**
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 4ff77a16956c..3d6c8e222391 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -28,6 +28,8 @@
+ const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
+ EXPORT_SYMBOL_GPL(nf_br_ops);
+ 
++static struct lock_class_key bridge_netdev_addr_lock_key;
++
+ /* net device transmit always called with BH disabled */
+ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+@@ -87,6 +89,11 @@ out:
+ 	return NETDEV_TX_OK;
+ }
+ 
++static void br_set_lockdep_class(struct net_device *dev)
++{
++	lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
++}
++
+ static int br_dev_init(struct net_device *dev)
+ {
+ 	struct net_bridge *br = netdev_priv(dev);
+@@ -99,6 +106,7 @@ static int br_dev_init(struct net_device *dev)
+ 	err = br_vlan_init(br);
+ 	if (err)
+ 		free_percpu(br->stats);
++	br_set_lockdep_class(dev);
+ 
+ 	return err;
+ }
+diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
+index 7832d07f48f6..ce658abdc2c8 100644
+--- a/net/bridge/br_stp_if.c
++++ b/net/bridge/br_stp_if.c
+@@ -128,7 +128,10 @@ static void br_stp_start(struct net_bridge *br)
+ 	char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
+ 	char *envp[] = { NULL };
+ 
+-	r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
++	if (net_eq(dev_net(br->dev), &init_net))
++		r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
++	else
++		r = -ENOENT;
+ 
+ 	spin_lock_bh(&br->lock);
+ 
+diff --git a/net/core/dev.c b/net/core/dev.c
+index a42b232805a5..185a3398c651 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2479,6 +2479,8 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
+  *
+  *	It may return NULL if the skb requires no segmentation.  This is
+  *	only possible when GSO is used for verifying header integrity.
++ *
++ *	Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
+  */
+ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ 				  netdev_features_t features, bool tx_path)
+@@ -2493,6 +2495,9 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
+ 			return ERR_PTR(err);
+ 	}
+ 
++	BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
++		     sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
++
+ 	SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
+ 	SKB_GSO_CB(skb)->encap_level = 0;
+ 
+diff --git a/net/core/dst.c b/net/core/dst.c
+index f8db4032d45a..540066cb33ef 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -282,10 +282,11 @@ void dst_release(struct dst_entry *dst)
+ {
+ 	if (dst) {
+ 		int newrefcnt;
++		unsigned short nocache = dst->flags & DST_NOCACHE;
+ 
+ 		newrefcnt = atomic_dec_return(&dst->__refcnt);
+ 		WARN_ON(newrefcnt < 0);
+-		if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
++		if (!newrefcnt && unlikely(nocache))
+ 			call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ 	}
+ }
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 0fa2613b5e35..238bb3f9c51d 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -775,6 +775,11 @@ int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
+ 			if (ftest->k == 0)
+ 				return -EINVAL;
+ 			break;
++		case BPF_ALU | BPF_LSH | BPF_K:
++		case BPF_ALU | BPF_RSH | BPF_K:
++			if (ftest->k >= 32)
++				return -EINVAL;
++			break;
+ 		case BPF_LD | BPF_MEM:
+ 		case BPF_LDX | BPF_MEM:
+ 		case BPF_ST:
+diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
+index 30addee2dd03..838f524cf11a 100644
+--- a/net/dccp/minisocks.c
++++ b/net/dccp/minisocks.c
+@@ -48,8 +48,6 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
+ 			tw->tw_ipv6only = sk->sk_ipv6only;
+ 		}
+ #endif
+-		/* Linkage updates. */
+-		__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
+ 
+ 		/* Get the TIME_WAIT timeout firing. */
+ 		if (timeo < rto)
+@@ -60,6 +58,8 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
+ 			timeo = DCCP_TIMEWAIT_LEN;
+ 
+ 		inet_twsk_schedule(tw, timeo);
++		/* Linkage updates. */
++		__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
+ 		inet_twsk_put(tw);
+ 	} else {
+ 		/* Sorry, if we're out of memory, just CLOSE this
+diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
+index 00ec8d5d7e7e..bb96c1c4edd6 100644
+--- a/net/ipv4/inet_timewait_sock.c
++++ b/net/ipv4/inet_timewait_sock.c
+@@ -153,13 +153,15 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
+ 	/*
+ 	 * Step 2: Hash TW into tcp ehash chain.
+ 	 * Notes :
+-	 * - tw_refcnt is set to 3 because :
++	 * - tw_refcnt is set to 4 because :
+ 	 * - We have one reference from bhash chain.
+ 	 * - We have one reference from ehash chain.
++	 * - We have one reference from timer.
++	 * - One reference for ourself (our caller will release it).
+ 	 * We can use atomic_set() because prior spin_lock()/spin_unlock()
+ 	 * committed into memory all tw fields.
+ 	 */
+-	atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
++	atomic_set(&tw->tw_refcnt, 4);
+ 	inet_twsk_add_node_rcu(tw, &ehead->chain);
+ 
+ 	/* Step 3: Remove SK from hash chain */
+@@ -243,7 +245,7 @@ void inet_twsk_deschedule(struct inet_timewait_sock *tw)
+ }
+ EXPORT_SYMBOL(inet_twsk_deschedule);
+ 
+-void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
++void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
+ {
+ 	/* timeout := RTO * 3.5
+ 	 *
+@@ -271,12 +273,14 @@ void inet_twsk_schedule(struct inet_timewait_sock *tw, const int timeo)
+ 	 */
+ 
+ 	tw->tw_kill = timeo <= 4*HZ;
+-	if (!mod_timer_pinned(&tw->tw_timer, jiffies + timeo)) {
+-		atomic_inc(&tw->tw_refcnt);
++	if (!rearm) {
++		BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
+ 		atomic_inc(&tw->tw_dr->tw_count);
++	} else {
++		mod_timer_pending(&tw->tw_timer, jiffies + timeo);
+ 	}
+ }
+-EXPORT_SYMBOL_GPL(inet_twsk_schedule);
++EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
+ 
+ void inet_twsk_purge(struct inet_hashinfo *hashinfo,
+ 		     struct inet_timewait_death_row *twdr, int family)
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index c65b93a7b711..51573f8a39bc 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -235,6 +235,7 @@ static int ip_finish_output_gso(struct sock *sk, struct sk_buff *skb)
+ 	 * from host network stack.
+ 	 */
+ 	features = netif_skb_features(skb);
++	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
+ 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+ 	if (IS_ERR_OR_NULL(segs)) {
+ 		kfree_skb(skb);
+@@ -893,7 +894,7 @@ static int __ip_append_data(struct sock *sk,
+ 	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+ 	    (sk->sk_protocol == IPPROTO_UDP) &&
+ 	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
+-	    (sk->sk_type == SOCK_DGRAM)) {
++	    (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+ 		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+ 					 hh_len, fragheaderlen, transhdrlen,
+ 					 maxfraglen, flags);
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 17e7339ee5ca..fec2907b85e8 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -163,9 +163,9 @@ kill_with_rst:
+ 		if (tcp_death_row.sysctl_tw_recycle &&
+ 		    tcptw->tw_ts_recent_stamp &&
+ 		    tcp_tw_remember_stamp(tw))
+-			inet_twsk_schedule(tw, tw->tw_timeout);
++			inet_twsk_reschedule(tw, tw->tw_timeout);
+ 		else
+-			inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
++			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
+ 		return TCP_TW_ACK;
+ 	}
+ 
+@@ -203,7 +203,7 @@ kill:
+ 				return TCP_TW_SUCCESS;
+ 			}
+ 		}
+-		inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
++		inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
+ 
+ 		if (tmp_opt.saw_tstamp) {
+ 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
+@@ -253,7 +253,7 @@ kill:
+ 		 * Do not reschedule in the last case.
+ 		 */
+ 		if (paws_reject || th->ack)
+-			inet_twsk_schedule(tw, TCP_TIMEWAIT_LEN);
++			inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
+ 
+ 		return tcp_timewait_check_oow_rate_limit(
+ 			tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
+@@ -324,9 +324,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
+ 		} while (0);
+ #endif
+ 
+-		/* Linkage updates. */
+-		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
+-
+ 		/* Get the TIME_WAIT timeout firing. */
+ 		if (timeo < rto)
+ 			timeo = rto;
+@@ -340,6 +337,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
+ 		}
+ 
+ 		inet_twsk_schedule(tw, timeo);
++		/* Linkage updates. */
++		__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
+ 		inet_twsk_put(tw);
+ 	} else {
+ 		/* Sorry, if we're out of memory, just CLOSE this
+diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
+index 17d35662930d..3e6a472e6b88 100644
+--- a/net/ipv4/tcp_yeah.c
++++ b/net/ipv4/tcp_yeah.c
+@@ -219,7 +219,7 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
+ 	yeah->fast_count = 0;
+ 	yeah->reno_count = max(yeah->reno_count>>1, 2U);
+ 
+-	return tp->snd_cwnd - reduction;
++	return max_t(int, tp->snd_cwnd - reduction, 2);
+ }
+ 
+ static struct tcp_congestion_ops tcp_yeah __read_mostly = {
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index bff69746e05f..78526087126d 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -230,7 +230,7 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+ 	xfrm_dst_ifdown(dst, dev);
+ }
+ 
+-static struct dst_ops xfrm4_dst_ops = {
++static struct dst_ops xfrm4_dst_ops_template = {
+ 	.family =		AF_INET,
+ 	.gc =			xfrm4_garbage_collect,
+ 	.update_pmtu =		xfrm4_update_pmtu,
+@@ -244,7 +244,7 @@ static struct dst_ops xfrm4_dst_ops = {
+ 
+ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
+ 	.family = 		AF_INET,
+-	.dst_ops =		&xfrm4_dst_ops,
++	.dst_ops =		&xfrm4_dst_ops_template,
+ 	.dst_lookup =		xfrm4_dst_lookup,
+ 	.get_saddr =		xfrm4_get_saddr,
+ 	.decode_session =	_decode_session4,
+@@ -266,7 +266,7 @@ static struct ctl_table xfrm4_policy_table[] = {
+ 	{ }
+ };
+ 
+-static int __net_init xfrm4_net_init(struct net *net)
++static int __net_init xfrm4_net_sysctl_init(struct net *net)
+ {
+ 	struct ctl_table *table;
+ 	struct ctl_table_header *hdr;
+@@ -294,7 +294,7 @@ err_alloc:
+ 	return -ENOMEM;
+ }
+ 
+-static void __net_exit xfrm4_net_exit(struct net *net)
++static void __net_exit xfrm4_net_sysctl_exit(struct net *net)
+ {
+ 	struct ctl_table *table;
+ 
+@@ -306,12 +306,44 @@ static void __net_exit xfrm4_net_exit(struct net *net)
+ 	if (!net_eq(net, &init_net))
+ 		kfree(table);
+ }
++#else /* CONFIG_SYSCTL */
++static int inline xfrm4_net_sysctl_init(struct net *net)
++{
++	return 0;
++}
++
++static void inline xfrm4_net_sysctl_exit(struct net *net)
++{
++}
++#endif
++
++static int __net_init xfrm4_net_init(struct net *net)
++{
++	int ret;
++
++	memcpy(&net->xfrm.xfrm4_dst_ops, &xfrm4_dst_ops_template,
++	       sizeof(xfrm4_dst_ops_template));
++	ret = dst_entries_init(&net->xfrm.xfrm4_dst_ops);
++	if (ret)
++		return ret;
++
++	ret = xfrm4_net_sysctl_init(net);
++	if (ret)
++		dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
++
++	return ret;
++}
++
++static void __net_exit xfrm4_net_exit(struct net *net)
++{
++	xfrm4_net_sysctl_exit(net);
++	dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
++}
+ 
+ static struct pernet_operations __net_initdata xfrm4_net_ops = {
+ 	.init	= xfrm4_net_init,
+ 	.exit	= xfrm4_net_exit,
+ };
+-#endif
+ 
+ static void __init xfrm4_policy_init(void)
+ {
+@@ -320,13 +352,9 @@ static void __init xfrm4_policy_init(void)
+ 
+ void __init xfrm4_init(void)
+ {
+-	dst_entries_init(&xfrm4_dst_ops);
+-
+ 	xfrm4_state_init();
+ 	xfrm4_policy_init();
+ 	xfrm4_protocol_init();
+-#ifdef CONFIG_SYSCTL
+ 	register_pernet_subsys(&xfrm4_net_ops);
+-#endif
+ }
+ 
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index a2d685030a34..f4795b0d6e6e 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -5267,13 +5267,10 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
+ 		goto out;
+ 	}
+ 
+-	if (!write) {
+-		err = snprintf(str, sizeof(str), "%pI6",
+-			       &secret->secret);
+-		if (err >= sizeof(str)) {
+-			err = -EIO;
+-			goto out;
+-		}
++	err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
++	if (err >= sizeof(str)) {
++		err = -EIO;
++		goto out;
+ 	}
+ 
+ 	err = proc_dostring(&lctl, write, buffer, lenp, ppos);
+diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
+index 882124ebb438..a8f6986dcbe5 100644
+--- a/net/ipv6/addrlabel.c
++++ b/net/ipv6/addrlabel.c
+@@ -552,7 +552,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh)
+ 
+ 	rcu_read_lock();
+ 	p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
+-	if (p && ip6addrlbl_hold(p))
++	if (p && !ip6addrlbl_hold(p))
+ 		p = NULL;
+ 	lseq = ip6addrlbl_table.seq;
+ 	rcu_read_unlock();
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index bc09cb97b840..f50228b0abe5 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1329,7 +1329,7 @@ emsgsize:
+ 	     (skb && skb_is_gso(skb))) &&
+ 	    (sk->sk_protocol == IPPROTO_UDP) &&
+ 	    (rt->dst.dev->features & NETIF_F_UFO) &&
+-	    (sk->sk_type == SOCK_DGRAM)) {
++	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
+ 		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
+ 					  hh_len, fragheaderlen,
+ 					  transhdrlen, mtu, flags, rt);
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index c1938ad39f8c..c1147acbc8c4 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -465,8 +465,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
+ 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
+ 
+ 		skb_set_queue_mapping(skb, queue_mapping);
++		rcu_read_lock();
+ 		err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
+ 			       np->tclass);
++		rcu_read_unlock();
+ 		err = net_xmit_eval(err);
+ 	}
+ 
+diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
+index 901ef6f8addc..5266ad2d6419 100644
+--- a/net/ipv6/xfrm6_mode_tunnel.c
++++ b/net/ipv6/xfrm6_mode_tunnel.c
+@@ -24,7 +24,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
+ 	struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
+ 
+ 	if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
+-		IP6_ECN_set_ce(inner_iph);
++		IP6_ECN_set_ce(skb, inner_iph);
+ }
+ 
+ /* Add encapsulation header.
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index f337a908a76a..4fb94f6ee15b 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -289,7 +289,7 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+ 	xfrm_dst_ifdown(dst, dev);
+ }
+ 
+-static struct dst_ops xfrm6_dst_ops = {
++static struct dst_ops xfrm6_dst_ops_template = {
+ 	.family =		AF_INET6,
+ 	.gc =			xfrm6_garbage_collect,
+ 	.update_pmtu =		xfrm6_update_pmtu,
+@@ -303,7 +303,7 @@ static struct dst_ops xfrm6_dst_ops = {
+ 
+ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
+ 	.family =		AF_INET6,
+-	.dst_ops =		&xfrm6_dst_ops,
++	.dst_ops =		&xfrm6_dst_ops_template,
+ 	.dst_lookup =		xfrm6_dst_lookup,
+ 	.get_saddr =		xfrm6_get_saddr,
+ 	.decode_session =	_decode_session6,
+@@ -336,7 +336,7 @@ static struct ctl_table xfrm6_policy_table[] = {
+ 	{ }
+ };
+ 
+-static int __net_init xfrm6_net_init(struct net *net)
++static int __net_init xfrm6_net_sysctl_init(struct net *net)
+ {
+ 	struct ctl_table *table;
+ 	struct ctl_table_header *hdr;
+@@ -364,7 +364,7 @@ err_alloc:
+ 	return -ENOMEM;
+ }
+ 
+-static void __net_exit xfrm6_net_exit(struct net *net)
++static void __net_exit xfrm6_net_sysctl_exit(struct net *net)
+ {
+ 	struct ctl_table *table;
+ 
+@@ -376,24 +376,52 @@ static void __net_exit xfrm6_net_exit(struct net *net)
+ 	if (!net_eq(net, &init_net))
+ 		kfree(table);
+ }
++#else /* CONFIG_SYSCTL */
++static int inline xfrm6_net_sysctl_init(struct net *net)
++{
++	return 0;
++}
++
++static void inline xfrm6_net_sysctl_exit(struct net *net)
++{
++}
++#endif
++
++static int __net_init xfrm6_net_init(struct net *net)
++{
++	int ret;
++
++	memcpy(&net->xfrm.xfrm6_dst_ops, &xfrm6_dst_ops_template,
++	       sizeof(xfrm6_dst_ops_template));
++	ret = dst_entries_init(&net->xfrm.xfrm6_dst_ops);
++	if (ret)
++		return ret;
++
++	ret = xfrm6_net_sysctl_init(net);
++	if (ret)
++		dst_entries_destroy(&net->xfrm.xfrm6_dst_ops);
++
++	return ret;
++}
++
++static void __net_exit xfrm6_net_exit(struct net *net)
++{
++	xfrm6_net_sysctl_exit(net);
++	dst_entries_destroy(&net->xfrm.xfrm6_dst_ops);
++}
+ 
+ static struct pernet_operations xfrm6_net_ops = {
+ 	.init	= xfrm6_net_init,
+ 	.exit	= xfrm6_net_exit,
+ };
+-#endif
+ 
+ int __init xfrm6_init(void)
+ {
+ 	int ret;
+ 
+-	dst_entries_init(&xfrm6_dst_ops);
+-
+ 	ret = xfrm6_policy_init();
+-	if (ret) {
+-		dst_entries_destroy(&xfrm6_dst_ops);
++	if (ret)
+ 		goto out;
+-	}
+ 	ret = xfrm6_state_init();
+ 	if (ret)
+ 		goto out_policy;
+@@ -402,9 +430,7 @@ int __init xfrm6_init(void)
+ 	if (ret)
+ 		goto out_state;
+ 
+-#ifdef CONFIG_SYSCTL
+ 	register_pernet_subsys(&xfrm6_net_ops);
+-#endif
+ out:
+ 	return ret;
+ out_state:
+@@ -416,11 +442,8 @@ out_policy:
+ 
+ void xfrm6_fini(void)
+ {
+-#ifdef CONFIG_SYSCTL
+ 	unregister_pernet_subsys(&xfrm6_net_ops);
+-#endif
+ 	xfrm6_protocol_fini();
+ 	xfrm6_policy_fini();
+ 	xfrm6_state_fini();
+-	dst_entries_destroy(&xfrm6_dst_ops);
+ }
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 27e14962b504..b3fe02a2339e 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -337,12 +337,10 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
+ 	unsigned short gso_type = skb_shinfo(skb)->gso_type;
+ 	struct sw_flow_key later_key;
+ 	struct sk_buff *segs, *nskb;
+-	struct ovs_skb_cb ovs_cb;
+ 	int err;
+ 
+-	ovs_cb = *OVS_CB(skb);
++	BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
+ 	segs = __skb_gso_segment(skb, NETIF_F_SG, false);
+-	*OVS_CB(skb) = ovs_cb;
+ 	if (IS_ERR(segs))
+ 		return PTR_ERR(segs);
+ 	if (segs == NULL)
+@@ -360,7 +358,6 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
+ 	/* Queue all of the segments. */
+ 	skb = segs;
+ 	do {
+-		*OVS_CB(skb) = ovs_cb;
+ 		if (gso_type & SKB_GSO_UDP && skb != segs)
+ 			key = &later_key;
+ 
+diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
+index 32ab87d34828..11d0b29ce4b8 100644
+--- a/net/phonet/af_phonet.c
++++ b/net/phonet/af_phonet.c
+@@ -377,6 +377,10 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
+ 	struct sockaddr_pn sa;
+ 	u16 len;
+ 
++	skb = skb_share_check(skb, GFP_ATOMIC);
++	if (!skb)
++		return NET_RX_DROP;
++
+ 	/* check we have at least a full Phonet header */
+ 	if (!pskb_pull(skb, sizeof(struct phonethdr)))
+ 		goto out;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index b453270be3fd..3c6f6b774ba6 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -666,8 +666,10 @@ static void qdisc_rcu_free(struct rcu_head *head)
+ {
+ 	struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
+ 
+-	if (qdisc_is_percpu_stats(qdisc))
++	if (qdisc_is_percpu_stats(qdisc)) {
+ 		free_percpu(qdisc->cpu_bstats);
++		free_percpu(qdisc->cpu_qstats);
++	}
+ 
+ 	kfree((char *) qdisc - qdisc->padded);
+ }
+diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
+index fef2acdf4a2e..ecae5561b912 100644
+--- a/net/sctp/sm_sideeffect.c
++++ b/net/sctp/sm_sideeffect.c
+@@ -244,12 +244,13 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
+ 	int error;
+ 	struct sctp_transport *transport = (struct sctp_transport *) peer;
+ 	struct sctp_association *asoc = transport->asoc;
+-	struct net *net = sock_net(asoc->base.sk);
++	struct sock *sk = asoc->base.sk;
++	struct net *net = sock_net(sk);
+ 
+ 	/* Check whether a task is in the sock.  */
+ 
+-	bh_lock_sock(asoc->base.sk);
+-	if (sock_owned_by_user(asoc->base.sk)) {
++	bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
+ 		pr_debug("%s: sock is busy\n", __func__);
+ 
+ 		/* Try again later.  */
+@@ -272,10 +273,10 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
+ 			   transport, GFP_ATOMIC);
+ 
+ 	if (error)
+-		asoc->base.sk->sk_err = -error;
++		sk->sk_err = -error;
+ 
+ out_unlock:
+-	bh_unlock_sock(asoc->base.sk);
++	bh_unlock_sock(sk);
+ 	sctp_transport_put(transport);
+ }
+ 
+@@ -285,11 +286,12 @@ out_unlock:
+ static void sctp_generate_timeout_event(struct sctp_association *asoc,
+ 					sctp_event_timeout_t timeout_type)
+ {
+-	struct net *net = sock_net(asoc->base.sk);
++	struct sock *sk = asoc->base.sk;
++	struct net *net = sock_net(sk);
+ 	int error = 0;
+ 
+-	bh_lock_sock(asoc->base.sk);
+-	if (sock_owned_by_user(asoc->base.sk)) {
++	bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
+ 		pr_debug("%s: sock is busy: timer %d\n", __func__,
+ 			 timeout_type);
+ 
+@@ -312,10 +314,10 @@ static void sctp_generate_timeout_event(struct sctp_association *asoc,
+ 			   (void *)timeout_type, GFP_ATOMIC);
+ 
+ 	if (error)
+-		asoc->base.sk->sk_err = -error;
++		sk->sk_err = -error;
+ 
+ out_unlock:
+-	bh_unlock_sock(asoc->base.sk);
++	bh_unlock_sock(sk);
+ 	sctp_association_put(asoc);
+ }
+ 
+@@ -365,10 +367,11 @@ void sctp_generate_heartbeat_event(unsigned long data)
+ 	int error = 0;
+ 	struct sctp_transport *transport = (struct sctp_transport *) data;
+ 	struct sctp_association *asoc = transport->asoc;
+-	struct net *net = sock_net(asoc->base.sk);
++	struct sock *sk = asoc->base.sk;
++	struct net *net = sock_net(sk);
+ 
+-	bh_lock_sock(asoc->base.sk);
+-	if (sock_owned_by_user(asoc->base.sk)) {
++	bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
+ 		pr_debug("%s: sock is busy\n", __func__);
+ 
+ 		/* Try again later.  */
+@@ -389,10 +392,10 @@ void sctp_generate_heartbeat_event(unsigned long data)
+ 			   transport, GFP_ATOMIC);
+ 
+ 	 if (error)
+-		 asoc->base.sk->sk_err = -error;
++		sk->sk_err = -error;
+ 
+ out_unlock:
+-	bh_unlock_sock(asoc->base.sk);
++	bh_unlock_sock(sk);
+ 	sctp_transport_put(transport);
+ }
+ 
+@@ -403,10 +406,11 @@ void sctp_generate_proto_unreach_event(unsigned long data)
+ {
+ 	struct sctp_transport *transport = (struct sctp_transport *) data;
+ 	struct sctp_association *asoc = transport->asoc;
+-	struct net *net = sock_net(asoc->base.sk);
++	struct sock *sk = asoc->base.sk;
++	struct net *net = sock_net(sk);
+ 
+-	bh_lock_sock(asoc->base.sk);
+-	if (sock_owned_by_user(asoc->base.sk)) {
++	bh_lock_sock(sk);
++	if (sock_owned_by_user(sk)) {
+ 		pr_debug("%s: sock is busy\n", __func__);
+ 
+ 		/* Try again later.  */
+@@ -427,7 +431,7 @@ void sctp_generate_proto_unreach_event(unsigned long data)
+ 		   asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
+ 
+ out_unlock:
+-	bh_unlock_sock(asoc->base.sk);
++	bh_unlock_sock(sk);
+ 	sctp_association_put(asoc);
+ }
+ 
+diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
+index 3ee27b7704ff..e6bb98e583fb 100644
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -4829,7 +4829,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort(
+ 
+ 	retval = SCTP_DISPOSITION_CONSUME;
+ 
+-	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
++	if (abort)
++		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ 
+ 	/* Even if we can't send the ABORT due to low memory delete the
+ 	 * TCB.  This is a departure from our typical NOMEM handling.
+@@ -4966,7 +4967,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
+ 			SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
+ 	retval = SCTP_DISPOSITION_CONSUME;
+ 
+-	sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
++	if (abort)
++		sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
+ 
+ 	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+ 			SCTP_STATE(SCTP_STATE_CLOSED));
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index a63c2c87a0c6..76e6ec62cf92 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1513,8 +1513,7 @@ static void sctp_close(struct sock *sk, long timeout)
+ 			struct sctp_chunk *chunk;
+ 
+ 			chunk = sctp_make_abort_user(asoc, NULL, 0);
+-			if (chunk)
+-				sctp_primitive_ABORT(net, asoc, chunk);
++			sctp_primitive_ABORT(net, asoc, chunk);
+ 		} else
+ 			sctp_primitive_SHUTDOWN(net, asoc, NULL);
+ 	}
+diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
+index 26d50c565f54..3e0fc5127225 100644
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -320,7 +320,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+ 	struct ctl_table tbl;
+ 	bool changed = false;
+ 	char *none = "none";
+-	char tmp[8];
++	char tmp[8] = {0};
+ 	int ret;
+ 
+ 	memset(&tbl, 0, sizeof(struct ctl_table));
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index a398f624c28d..cb3a01a9ed38 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1481,6 +1481,21 @@ static void unix_destruct_scm(struct sk_buff *skb)
+ 	sock_wfree(skb);
+ }
+ 
++/*
++ * The "user->unix_inflight" variable is protected by the garbage
++ * collection lock, and we just read it locklessly here. If you go
++ * over the limit, there might be a tiny race in actually noticing
++ * it across threads. Tough.
++ */
++static inline bool too_many_unix_fds(struct task_struct *p)
++{
++	struct user_struct *user = current_user();
++
++	if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
++		return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
++	return false;
++}
++
+ #define MAX_RECURSION_LEVEL 4
+ 
+ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+@@ -1489,6 +1504,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ 	unsigned char max_level = 0;
+ 	int unix_sock_count = 0;
+ 
++	if (too_many_unix_fds(current))
++		return -ETOOMANYREFS;
++
+ 	for (i = scm->fp->count - 1; i >= 0; i--) {
+ 		struct sock *sk = unix_get_socket(scm->fp->fp[i]);
+ 
+@@ -1510,10 +1528,8 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
+ 	if (!UNIXCB(skb).fp)
+ 		return -ENOMEM;
+ 
+-	if (unix_sock_count) {
+-		for (i = scm->fp->count - 1; i >= 0; i--)
+-			unix_inflight(scm->fp->fp[i]);
+-	}
++	for (i = scm->fp->count - 1; i >= 0; i--)
++		unix_inflight(scm->fp->fp[i]);
+ 	return max_level;
+ }
+ 
+diff --git a/net/unix/garbage.c b/net/unix/garbage.c
+index a73a226f2d33..8fcdc2283af5 100644
+--- a/net/unix/garbage.c
++++ b/net/unix/garbage.c
+@@ -120,11 +120,11 @@ void unix_inflight(struct file *fp)
+ {
+ 	struct sock *s = unix_get_socket(fp);
+ 
++	spin_lock(&unix_gc_lock);
++
+ 	if (s) {
+ 		struct unix_sock *u = unix_sk(s);
+ 
+-		spin_lock(&unix_gc_lock);
+-
+ 		if (atomic_long_inc_return(&u->inflight) == 1) {
+ 			BUG_ON(!list_empty(&u->link));
+ 			list_add_tail(&u->link, &gc_inflight_list);
+@@ -132,25 +132,28 @@ void unix_inflight(struct file *fp)
+ 			BUG_ON(list_empty(&u->link));
+ 		}
+ 		unix_tot_inflight++;
+-		spin_unlock(&unix_gc_lock);
+ 	}
++	fp->f_cred->user->unix_inflight++;
++	spin_unlock(&unix_gc_lock);
+ }
+ 
+ void unix_notinflight(struct file *fp)
+ {
+ 	struct sock *s = unix_get_socket(fp);
+ 
++	spin_lock(&unix_gc_lock);
++
+ 	if (s) {
+ 		struct unix_sock *u = unix_sk(s);
+ 
+-		spin_lock(&unix_gc_lock);
+ 		BUG_ON(list_empty(&u->link));
+ 
+ 		if (atomic_long_dec_and_test(&u->inflight))
+ 			list_del_init(&u->link);
+ 		unix_tot_inflight--;
+-		spin_unlock(&unix_gc_lock);
+ 	}
++	fp->f_cred->user->unix_inflight--;
++	spin_unlock(&unix_gc_lock);
+ }
+ 
+ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
+diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
+index fbcedbe33190..5097dce5b916 100644
+--- a/net/xfrm/xfrm_output.c
++++ b/net/xfrm/xfrm_output.c
+@@ -153,6 +153,8 @@ static int xfrm_output_gso(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct sk_buff *segs;
+ 
++	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
++	BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_SGO_CB_OFFSET);
+ 	segs = skb_gso_segment(skb, 0);
+ 	kfree_skb(skb);
+ 	if (IS_ERR(segs))
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index 638af0655aaf..4cd2076ff84b 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -2806,7 +2806,6 @@ static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
+ 
+ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
+ {
+-	struct net *net;
+ 	int err = 0;
+ 	if (unlikely(afinfo == NULL))
+ 		return -EINVAL;
+@@ -2837,26 +2836,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
+ 	}
+ 	spin_unlock(&xfrm_policy_afinfo_lock);
+ 
+-	rtnl_lock();
+-	for_each_net(net) {
+-		struct dst_ops *xfrm_dst_ops;
+-
+-		switch (afinfo->family) {
+-		case AF_INET:
+-			xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops;
+-			break;
+-#if IS_ENABLED(CONFIG_IPV6)
+-		case AF_INET6:
+-			xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops;
+-			break;
+-#endif
+-		default:
+-			BUG();
+-		}
+-		*xfrm_dst_ops = *afinfo->dst_ops;
+-	}
+-	rtnl_unlock();
+-
+ 	return err;
+ }
+ EXPORT_SYMBOL(xfrm_policy_register_afinfo);
+@@ -2892,22 +2871,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
+ }
+ EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
+ 
+-static void __net_init xfrm_dst_ops_init(struct net *net)
+-{
+-	struct xfrm_policy_afinfo *afinfo;
+-
+-	rcu_read_lock();
+-	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]);
+-	if (afinfo)
+-		net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops;
+-#if IS_ENABLED(CONFIG_IPV6)
+-	afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]);
+-	if (afinfo)
+-		net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops;
+-#endif
+-	rcu_read_unlock();
+-}
+-
+ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
+ {
+ 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+@@ -3056,7 +3019,6 @@ static int __net_init xfrm_net_init(struct net *net)
+ 	rv = xfrm_policy_init(net);
+ 	if (rv < 0)
+ 		goto out_policy;
+-	xfrm_dst_ops_init(net);
+ 	rv = xfrm_sysctl_init(net);
+ 	if (rv < 0)
+ 		goto out_sysctl;
+diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
+index 3d1984e59a30..e00bcd129336 100644
+--- a/scripts/recordmcount.c
++++ b/scripts/recordmcount.c
+@@ -42,6 +42,7 @@
+ 
+ #ifndef EM_AARCH64
+ #define EM_AARCH64	183
++#define R_AARCH64_NONE		0
+ #define R_AARCH64_ABS64	257
+ #endif
+ 
+@@ -160,6 +161,22 @@ static int make_nop_x86(void *map, size_t const offset)
+ 	return 0;
+ }
+ 
++static unsigned char ideal_nop4_arm64[4] = {0x1f, 0x20, 0x03, 0xd5};
++static int make_nop_arm64(void *map, size_t const offset)
++{
++	uint32_t *ptr;
++
++	ptr = map + offset;
++	/* bl <_mcount> is 0x94000000 before relocation */
++	if (*ptr != 0x94000000)
++		return -1;
++
++	/* Convert to nop */
++	ulseek(fd_map, offset, SEEK_SET);
++	uwrite(fd_map, ideal_nop, 4);
++	return 0;
++}
++
+ /*
+  * Get the whole file as a programming convenience in order to avoid
+  * malloc+lseek+read+free of many pieces.  If successful, then mmap
+@@ -353,7 +370,12 @@ do_file(char const *const fname)
+ 			 altmcount = "__gnu_mcount_nc";
+ 			 break;
+ 	case EM_AARCH64:
+-			 reltype = R_AARCH64_ABS64; gpfx = '_'; break;
++			reltype = R_AARCH64_ABS64;
++			make_nop = make_nop_arm64;
++			rel_type_nop = R_AARCH64_NONE;
++			ideal_nop = ideal_nop4_arm64;
++			gpfx = '_';
++			break;
+ 	case EM_IA_64:	 reltype = R_IA64_IMM64;   gpfx = '_'; break;
+ 	case EM_METAG:	 reltype = R_METAG_ADDR32;
+ 			 altmcount = "_mcount_wrapper";
+diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
+index 49b582a225b0..b9897e2be404 100644
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -377,7 +377,7 @@ static void nop_mcount(Elf_Shdr const *const relhdr,
+ 
+ 		if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
+ 			if (make_nop)
+-				ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset);
++				ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
+ 			if (warn_on_notrace_sect && !once) {
+ 				printf("Section %s has mcount callers being ignored\n",
+ 				       txtname);
+diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
+index 826470d7f000..96e2486a6fc4 100755
+--- a/scripts/recordmcount.pl
++++ b/scripts/recordmcount.pl
+@@ -263,7 +263,8 @@ if ($arch eq "x86_64") {
+ 
+ } elsif ($arch eq "powerpc") {
+     $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
+-    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
++    # See comment in the sparc64 section for why we use '\w'.
++    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
+     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
+ 
+     if ($bits == 64) {
+diff --git a/sound/core/control.c b/sound/core/control.c
+index 196a6fe100ca..a85d45595d02 100644
+--- a/sound/core/control.c
++++ b/sound/core/control.c
+@@ -1405,6 +1405,8 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
+ 		return -EFAULT;
+ 	if (tlv.length < sizeof(unsigned int) * 2)
+ 		return -EINVAL;
++	if (!tlv.numid)
++		return -EINVAL;
+ 	down_read(&card->controls_rwsem);
+ 	kctl = snd_ctl_find_numid(card, tlv.numid);
+ 	if (kctl == NULL) {
+diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
+index 886be7da989d..38514ed6e55c 100644
+--- a/sound/core/hrtimer.c
++++ b/sound/core/hrtimer.c
+@@ -90,7 +90,7 @@ static int snd_hrtimer_start(struct snd_timer *t)
+ 	struct snd_hrtimer *stime = t->private_data;
+ 
+ 	atomic_set(&stime->running, 0);
+-	hrtimer_cancel(&stime->hrt);
++	hrtimer_try_to_cancel(&stime->hrt);
+ 	hrtimer_start(&stime->hrt, ns_to_ktime(t->sticks * resolution),
+ 		      HRTIMER_MODE_REL);
+ 	atomic_set(&stime->running, 1);
+@@ -101,6 +101,7 @@ static int snd_hrtimer_stop(struct snd_timer *t)
+ {
+ 	struct snd_hrtimer *stime = t->private_data;
+ 	atomic_set(&stime->running, 0);
++	hrtimer_try_to_cancel(&stime->hrt);
+ 	return 0;
+ }
+ 
+diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
+index b48b434444ed..9630e9f72b7b 100644
+--- a/sound/core/pcm_compat.c
++++ b/sound/core/pcm_compat.c
+@@ -255,10 +255,15 @@ static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
+ 	if (! (runtime = substream->runtime))
+ 		return -ENOTTY;
+ 
+-	/* only fifo_size is different, so just copy all */
+-	data = memdup_user(data32, sizeof(*data32));
+-	if (IS_ERR(data))
+-		return PTR_ERR(data);
++	data = kmalloc(sizeof(*data), GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
++
++	/* only fifo_size (RO from userspace) is different, so just copy all */
++	if (copy_from_user(data, data32, sizeof(*data32))) {
++		err = -EFAULT;
++		goto error;
++	}
+ 
+ 	if (refine)
+ 		err = snd_pcm_hw_refine(substream, data);
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index edbdab85fc02..bd4741442909 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1962,7 +1962,7 @@ static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
+ 		 * No restrictions so for a user client we can clear
+ 		 * the whole fifo
+ 		 */
+-		if (client->type == USER_CLIENT)
++		if (client->type == USER_CLIENT && client->data.user.fifo)
+ 			snd_seq_fifo_clear(client->data.user.fifo);
+ 	}
+ 
+diff --git a/sound/core/seq/seq_compat.c b/sound/core/seq/seq_compat.c
+index 81f7c109dc46..65175902a68a 100644
+--- a/sound/core/seq/seq_compat.c
++++ b/sound/core/seq/seq_compat.c
+@@ -49,11 +49,12 @@ static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned
+ 	struct snd_seq_port_info *data;
+ 	mm_segment_t fs;
+ 
+-	data = memdup_user(data32, sizeof(*data32));
+-	if (IS_ERR(data))
+-		return PTR_ERR(data);
++	data = kmalloc(sizeof(*data), GFP_KERNEL);
++	if (!data)
++		return -ENOMEM;
+ 
+-	if (get_user(data->flags, &data32->flags) ||
++	if (copy_from_user(data, data32, sizeof(*data32)) ||
++	    get_user(data->flags, &data32->flags) ||
+ 	    get_user(data->time_queue, &data32->time_queue))
+ 		goto error;
+ 	data->kernel = NULL;
+diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
+index a0cda38205b9..77ec21420355 100644
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -142,8 +142,10 @@ static struct snd_seq_queue *queue_new(int owner, int locked)
+ static void queue_delete(struct snd_seq_queue *q)
+ {
+ 	/* stop and release the timer */
++	mutex_lock(&q->timer_mutex);
+ 	snd_seq_timer_stop(q->timer);
+ 	snd_seq_timer_close(q);
++	mutex_unlock(&q->timer_mutex);
+ 	/* wait until access free */
+ 	snd_use_lock_sync(&q->use_lock);
+ 	/* release resources... */
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index a9a1a047c521..a419878901c4 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -65,6 +65,7 @@ struct snd_timer_user {
+ 	int qtail;
+ 	int qused;
+ 	int queue_size;
++	bool disconnected;
+ 	struct snd_timer_read *queue;
+ 	struct snd_timer_tread *tqueue;
+ 	spinlock_t qlock;
+@@ -73,7 +74,7 @@ struct snd_timer_user {
+ 	struct timespec tstamp;		/* trigger tstamp */
+ 	wait_queue_head_t qchange_sleep;
+ 	struct fasync_struct *fasync;
+-	struct mutex tread_sem;
++	struct mutex ioctl_lock;
+ };
+ 
+ /* list of timers */
+@@ -215,11 +216,13 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
+ 		    slave->slave_id == master->slave_id) {
+ 			list_move_tail(&slave->open_list, &master->slave_list_head);
+ 			spin_lock_irq(&slave_active_lock);
++			spin_lock(&master->timer->lock);
+ 			slave->master = master;
+ 			slave->timer = master->timer;
+ 			if (slave->flags & SNDRV_TIMER_IFLG_RUNNING)
+ 				list_add_tail(&slave->active_list,
+ 					      &master->slave_active_head);
++			spin_unlock(&master->timer->lock);
+ 			spin_unlock_irq(&slave_active_lock);
+ 		}
+ 	}
+@@ -288,6 +291,9 @@ int snd_timer_open(struct snd_timer_instance **ti,
+ 		mutex_unlock(&register_mutex);
+ 		return -ENOMEM;
+ 	}
++	/* take a card refcount for safe disconnection */
++	if (timer->card)
++		get_device(&timer->card->card_dev);
+ 	timeri->slave_class = tid->dev_sclass;
+ 	timeri->slave_id = slave_id;
+ 	if (list_empty(&timer->open_list_head) && timer->hw.open)
+@@ -346,15 +352,21 @@ int snd_timer_close(struct snd_timer_instance *timeri)
+ 		    timer->hw.close)
+ 			timer->hw.close(timer);
+ 		/* remove slave links */
++		spin_lock_irq(&slave_active_lock);
++		spin_lock(&timer->lock);
+ 		list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
+ 					 open_list) {
+-			spin_lock_irq(&slave_active_lock);
+-			_snd_timer_stop(slave, 1, SNDRV_TIMER_EVENT_RESOLUTION);
+ 			list_move_tail(&slave->open_list, &snd_timer_slave_list);
+ 			slave->master = NULL;
+ 			slave->timer = NULL;
+-			spin_unlock_irq(&slave_active_lock);
++			list_del_init(&slave->ack_list);
++			list_del_init(&slave->active_list);
+ 		}
++		spin_unlock(&timer->lock);
++		spin_unlock_irq(&slave_active_lock);
++		/* release a card refcount for safe disconnection */
++		if (timer->card)
++			put_device(&timer->card->card_dev);
+ 		mutex_unlock(&register_mutex);
+ 	}
+  out:
+@@ -441,9 +453,12 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
+ 
+ 	spin_lock_irqsave(&slave_active_lock, flags);
+ 	timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
+-	if (timeri->master)
++	if (timeri->master && timeri->timer) {
++		spin_lock(&timeri->timer->lock);
+ 		list_add_tail(&timeri->active_list,
+ 			      &timeri->master->slave_active_head);
++		spin_unlock(&timeri->timer->lock);
++	}
+ 	spin_unlock_irqrestore(&slave_active_lock, flags);
+ 	return 1; /* delayed start */
+ }
+@@ -467,6 +482,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
+ 	timer = timeri->timer;
+ 	if (timer == NULL)
+ 		return -EINVAL;
++	if (timer->card && timer->card->shutdown)
++		return -ENODEV;
+ 	spin_lock_irqsave(&timer->lock, flags);
+ 	timeri->ticks = timeri->cticks = ticks;
+ 	timeri->pticks = 0;
+@@ -489,6 +506,8 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
+ 		if (!keep_flag) {
+ 			spin_lock_irqsave(&slave_active_lock, flags);
+ 			timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
++			list_del_init(&timeri->ack_list);
++			list_del_init(&timeri->active_list);
+ 			spin_unlock_irqrestore(&slave_active_lock, flags);
+ 		}
+ 		goto __end;
+@@ -499,6 +518,10 @@ static int _snd_timer_stop(struct snd_timer_instance * timeri,
+ 	spin_lock_irqsave(&timer->lock, flags);
+ 	list_del_init(&timeri->ack_list);
+ 	list_del_init(&timeri->active_list);
++	if (timer->card && timer->card->shutdown) {
++		spin_unlock_irqrestore(&timer->lock, flags);
++		return 0;
++	}
+ 	if ((timeri->flags & SNDRV_TIMER_IFLG_RUNNING) &&
+ 	    !(--timer->running)) {
+ 		timer->hw.stop(timer);
+@@ -561,6 +584,8 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
+ 	timer = timeri->timer;
+ 	if (! timer)
+ 		return -EINVAL;
++	if (timer->card && timer->card->shutdown)
++		return -ENODEV;
+ 	spin_lock_irqsave(&timer->lock, flags);
+ 	if (!timeri->cticks)
+ 		timeri->cticks = 1;
+@@ -624,6 +649,9 @@ static void snd_timer_tasklet(unsigned long arg)
+ 	unsigned long resolution, ticks;
+ 	unsigned long flags;
+ 
++	if (timer->card && timer->card->shutdown)
++		return;
++
+ 	spin_lock_irqsave(&timer->lock, flags);
+ 	/* now process all callbacks */
+ 	while (!list_empty(&timer->sack_list_head)) {
+@@ -664,6 +692,9 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
+ 	if (timer == NULL)
+ 		return;
+ 
++	if (timer->card && timer->card->shutdown)
++		return;
++
+ 	spin_lock_irqsave(&timer->lock, flags);
+ 
+ 	/* remember the current resolution */
+@@ -694,7 +725,7 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
+ 		} else {
+ 			ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
+ 			if (--timer->running)
+-				list_del(&ti->active_list);
++				list_del_init(&ti->active_list);
+ 		}
+ 		if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
+ 		    (ti->flags & SNDRV_TIMER_IFLG_FAST))
+@@ -874,11 +905,28 @@ static int snd_timer_dev_register(struct snd_device *dev)
+ 	return 0;
+ }
+ 
++/* just for reference in snd_timer_dev_disconnect() below */
++static void snd_timer_user_ccallback(struct snd_timer_instance *timeri,
++				     int event, struct timespec *tstamp,
++				     unsigned long resolution);
++
+ static int snd_timer_dev_disconnect(struct snd_device *device)
+ {
+ 	struct snd_timer *timer = device->device_data;
++	struct snd_timer_instance *ti;
++
+ 	mutex_lock(&register_mutex);
+ 	list_del_init(&timer->device_list);
++	/* wake up pending sleepers */
++	list_for_each_entry(ti, &timer->open_list_head, open_list) {
++		/* FIXME: better to have a ti.disconnect() op */
++		if (ti->ccallback == snd_timer_user_ccallback) {
++			struct snd_timer_user *tu = ti->callback_data;
++
++			tu->disconnected = true;
++			wake_up(&tu->qchange_sleep);
++		}
++	}
+ 	mutex_unlock(&register_mutex);
+ 	return 0;
+ }
+@@ -889,6 +937,8 @@ void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstam
+ 	unsigned long resolution = 0;
+ 	struct snd_timer_instance *ti, *ts;
+ 
++	if (timer->card && timer->card->shutdown)
++		return;
+ 	if (! (timer->hw.flags & SNDRV_TIMER_HW_SLAVE))
+ 		return;
+ 	if (snd_BUG_ON(event < SNDRV_TIMER_EVENT_MSTART ||
+@@ -1047,6 +1097,8 @@ static void snd_timer_proc_read(struct snd_info_entry *entry,
+ 
+ 	mutex_lock(&register_mutex);
+ 	list_for_each_entry(timer, &snd_timer_list, device_list) {
++		if (timer->card && timer->card->shutdown)
++			continue;
+ 		switch (timer->tmr_class) {
+ 		case SNDRV_TIMER_CLASS_GLOBAL:
+ 			snd_iprintf(buffer, "G%i: ", timer->tmr_device);
+@@ -1253,7 +1305,7 @@ static int snd_timer_user_open(struct inode *inode, struct file *file)
+ 		return -ENOMEM;
+ 	spin_lock_init(&tu->qlock);
+ 	init_waitqueue_head(&tu->qchange_sleep);
+-	mutex_init(&tu->tread_sem);
++	mutex_init(&tu->ioctl_lock);
+ 	tu->ticks = 1;
+ 	tu->queue_size = 128;
+ 	tu->queue = kmalloc(tu->queue_size * sizeof(struct snd_timer_read),
+@@ -1273,8 +1325,10 @@ static int snd_timer_user_release(struct inode *inode, struct file *file)
+ 	if (file->private_data) {
+ 		tu = file->private_data;
+ 		file->private_data = NULL;
++		mutex_lock(&tu->ioctl_lock);
+ 		if (tu->timeri)
+ 			snd_timer_close(tu->timeri);
++		mutex_unlock(&tu->ioctl_lock);
+ 		kfree(tu->queue);
+ 		kfree(tu->tqueue);
+ 		kfree(tu);
+@@ -1512,7 +1566,6 @@ static int snd_timer_user_tselect(struct file *file,
+ 	int err = 0;
+ 
+ 	tu = file->private_data;
+-	mutex_lock(&tu->tread_sem);
+ 	if (tu->timeri) {
+ 		snd_timer_close(tu->timeri);
+ 		tu->timeri = NULL;
+@@ -1556,7 +1609,6 @@ static int snd_timer_user_tselect(struct file *file,
+ 	}
+ 
+       __err:
+-      	mutex_unlock(&tu->tread_sem);
+ 	return err;
+ }
+ 
+@@ -1769,7 +1821,7 @@ enum {
+ 	SNDRV_TIMER_IOCTL_PAUSE_OLD = _IO('T', 0x23),
+ };
+ 
+-static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
++static long __snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+ 				 unsigned long arg)
+ {
+ 	struct snd_timer_user *tu;
+@@ -1786,17 +1838,11 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+ 	{
+ 		int xarg;
+ 
+-		mutex_lock(&tu->tread_sem);
+-		if (tu->timeri)	{	/* too late */
+-			mutex_unlock(&tu->tread_sem);
++		if (tu->timeri)	/* too late */
+ 			return -EBUSY;
+-		}
+-		if (get_user(xarg, p)) {
+-			mutex_unlock(&tu->tread_sem);
++		if (get_user(xarg, p))
+ 			return -EFAULT;
+-		}
+ 		tu->tread = xarg ? 1 : 0;
+-		mutex_unlock(&tu->tread_sem);
+ 		return 0;
+ 	}
+ 	case SNDRV_TIMER_IOCTL_GINFO:
+@@ -1829,6 +1875,18 @@ static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
+ 	return -ENOTTY;
+ }
+ 
++static long snd_timer_user_ioctl(struct file *file, unsigned int cmd,
++				 unsigned long arg)
++{
++	struct snd_timer_user *tu = file->private_data;
++	long ret;
++
++	mutex_lock(&tu->ioctl_lock);
++	ret = __snd_timer_user_ioctl(file, cmd, arg);
++	mutex_unlock(&tu->ioctl_lock);
++	return ret;
++}
++
+ static int snd_timer_user_fasync(int fd, struct file * file, int on)
+ {
+ 	struct snd_timer_user *tu;
+@@ -1866,6 +1924,10 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
+ 
+ 			remove_wait_queue(&tu->qchange_sleep, &wait);
+ 
++			if (tu->disconnected) {
++				err = -ENODEV;
++				break;
++			}
+ 			if (signal_pending(current)) {
+ 				err = -ERESTARTSYS;
+ 				break;
+@@ -1915,6 +1977,8 @@ static unsigned int snd_timer_user_poll(struct file *file, poll_table * wait)
+ 	mask = 0;
+ 	if (tu->qused)
+ 		mask |= POLLIN | POLLRDNORM;
++	if (tu->disconnected)
++		mask |= POLLERR;
+ 
+ 	return mask;
+ }
+diff --git a/sound/firewire/bebob/Makefile b/sound/firewire/bebob/Makefile
+index 6cf470c80d1f..af7ed6643266 100644
+--- a/sound/firewire/bebob/Makefile
++++ b/sound/firewire/bebob/Makefile
+@@ -1,4 +1,4 @@
+ snd-bebob-objs := bebob_command.o bebob_stream.o bebob_proc.o bebob_midi.o \
+ 		  bebob_pcm.o bebob_hwdep.o bebob_terratec.o bebob_yamaha.o \
+ 		  bebob_focusrite.o bebob_maudio.o bebob.o
+-obj-m += snd-bebob.o
++obj-$(CONFIG_SND_BEBOB) += snd-bebob.o
+diff --git a/sound/firewire/dice/Makefile b/sound/firewire/dice/Makefile
+index 9ef228ef7baf..55b4be9b0034 100644
+--- a/sound/firewire/dice/Makefile
++++ b/sound/firewire/dice/Makefile
+@@ -1,3 +1,3 @@
+ snd-dice-objs := dice-transaction.o dice-stream.o dice-proc.o dice-midi.o \
+ 		 dice-pcm.o dice-hwdep.o dice.o
+-obj-m += snd-dice.o
++obj-$(CONFIG_SND_DICE) += snd-dice.o
+diff --git a/sound/firewire/fireworks/Makefile b/sound/firewire/fireworks/Makefile
+index 0c7440826db8..15ef7f75a8ef 100644
+--- a/sound/firewire/fireworks/Makefile
++++ b/sound/firewire/fireworks/Makefile
+@@ -1,4 +1,4 @@
+ snd-fireworks-objs := fireworks_transaction.o fireworks_command.o \
+ 		      fireworks_stream.o fireworks_proc.o fireworks_midi.o \
+ 		      fireworks_pcm.o fireworks_hwdep.o fireworks.o
+-obj-m += snd-fireworks.o
++obj-$(CONFIG_SND_FIREWORKS) += snd-fireworks.o
+diff --git a/sound/firewire/oxfw/Makefile b/sound/firewire/oxfw/Makefile
+index a926850864f6..06ff50f4e6c0 100644
+--- a/sound/firewire/oxfw/Makefile
++++ b/sound/firewire/oxfw/Makefile
+@@ -1,3 +1,3 @@
+ snd-oxfw-objs := oxfw-command.o oxfw-stream.o oxfw-control.o oxfw-pcm.o \
+ 		 oxfw-proc.o oxfw-midi.o oxfw-hwdep.o oxfw.o
+-obj-m += snd-oxfw.o
++obj-$(CONFIG_SND_OXFW) += snd-oxfw.o
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index 44dfc7b92bc3..09920ba55ba1 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -329,6 +329,7 @@ enum {
+ 
+ #define AZX_DCAPS_PRESET_CTHDA \
+ 	(AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB |\
++	 AZX_DCAPS_NO_64BIT |\
+ 	 AZX_DCAPS_4K_BDLE_BOUNDARY | AZX_DCAPS_SNOOP_OFF)
+ 
+ /*
+@@ -839,6 +840,36 @@ static int azx_resume(struct device *dev)
+ }
+ #endif /* CONFIG_PM_SLEEP || SUPPORT_VGA_SWITCHEROO */
+ 
++#ifdef CONFIG_PM_SLEEP
++/* put codec down to D3 at hibernation for Intel SKL+;
++ * otherwise BIOS may still access the codec and screw up the driver
++ */
++#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
++#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
++#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
++#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
++
++static int azx_freeze_noirq(struct device *dev)
++{
++	struct pci_dev *pci = to_pci_dev(dev);
++
++	if (IS_SKL_PLUS(pci))
++		pci_set_power_state(pci, PCI_D3hot);
++
++	return 0;
++}
++
++static int azx_thaw_noirq(struct device *dev)
++{
++	struct pci_dev *pci = to_pci_dev(dev);
++
++	if (IS_SKL_PLUS(pci))
++		pci_set_power_state(pci, PCI_D0);
++
++	return 0;
++}
++#endif /* CONFIG_PM_SLEEP */
++
+ #ifdef CONFIG_PM
+ static int azx_runtime_suspend(struct device *dev)
+ {
+@@ -939,6 +970,10 @@ static int azx_runtime_idle(struct device *dev)
+ 
+ static const struct dev_pm_ops azx_pm = {
+ 	SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
++#ifdef CONFIG_PM_SLEEP
++	.freeze_noirq = azx_freeze_noirq,
++	.thaw_noirq = azx_thaw_noirq,
++#endif
+ 	SET_RUNTIME_PM_OPS(azx_runtime_suspend, azx_runtime_resume, azx_runtime_idle)
+ };
+ 
+@@ -1937,9 +1972,17 @@ out_free:
+ static void azx_remove(struct pci_dev *pci)
+ {
+ 	struct snd_card *card = pci_get_drvdata(pci);
++	struct azx *chip;
++	struct hda_intel *hda;
++
++	if (card) {
++		/* flush the pending probing work */
++		chip = card->private_data;
++		hda = container_of(chip, struct hda_intel, chip);
++		flush_work(&hda->probe_work);
+ 
+-	if (card)
+ 		snd_card_free(card);
++	}
+ }
+ 
+ static void azx_shutdown(struct pci_dev *pci)
+@@ -1976,6 +2019,11 @@ static const struct pci_device_id azx_ids[] = {
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ 	{ PCI_DEVICE(0x8086, 0x8d21),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++	/* Lewisburg */
++	{ PCI_DEVICE(0x8086, 0xa1f0),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
++	{ PCI_DEVICE(0x8086, 0xa270),
++	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ 	/* Lynx Point-LP */
+ 	{ PCI_DEVICE(0x8086, 0x9c20),
+ 	  .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+@@ -2156,11 +2204,13 @@ static const struct pci_device_id azx_ids[] = {
+ 	  .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+ 	  .class_mask = 0xffffff,
+ 	  .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
++	  AZX_DCAPS_NO_64BIT |
+ 	  AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
+ #else
+ 	/* this entry seems still valid -- i.e. without emu20kx chip */
+ 	{ PCI_DEVICE(0x1102, 0x0009),
+ 	  .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
++	  AZX_DCAPS_NO_64BIT |
+ 	  AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
+ #endif
+ 	/* CM8888 */
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 57bb5a559f8e..8189f02f8446 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -111,6 +111,7 @@ struct alc_spec {
+ 	void (*power_hook)(struct hda_codec *codec);
+ #endif
+ 	void (*shutup)(struct hda_codec *codec);
++	void (*reboot_notify)(struct hda_codec *codec);
+ 
+ 	int init_amp;
+ 	int codec_variant;	/* flag for other variants */
+@@ -773,6 +774,25 @@ static inline void alc_shutup(struct hda_codec *codec)
+ 		snd_hda_shutup_pins(codec);
+ }
+ 
++static void alc_reboot_notify(struct hda_codec *codec)
++{
++	struct alc_spec *spec = codec->spec;
++
++	if (spec && spec->reboot_notify)
++		spec->reboot_notify(codec);
++	else
++		alc_shutup(codec);
++}
++
++/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
++static void alc_d3_at_reboot(struct hda_codec *codec)
++{
++	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
++	snd_hda_codec_write(codec, codec->core.afg, 0,
++			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
++	msleep(10);
++}
++
+ #define alc_free	snd_hda_gen_free
+ 
+ #ifdef CONFIG_PM
+@@ -818,7 +838,7 @@ static const struct hda_codec_ops alc_patch_ops = {
+ 	.suspend = alc_suspend,
+ 	.check_power_status = snd_hda_gen_check_power_status,
+ #endif
+-	.reboot_notify = alc_shutup,
++	.reboot_notify = alc_reboot_notify,
+ };
+ 
+ 
+@@ -1767,10 +1787,12 @@ enum {
+ 	ALC889_FIXUP_MBA11_VREF,
+ 	ALC889_FIXUP_MBA21_VREF,
+ 	ALC889_FIXUP_MP11_VREF,
++	ALC889_FIXUP_MP41_VREF,
+ 	ALC882_FIXUP_INV_DMIC,
+ 	ALC882_FIXUP_NO_PRIMARY_HP,
+ 	ALC887_FIXUP_ASUS_BASS,
+ 	ALC887_FIXUP_BASS_CHMAP,
++	ALC882_FIXUP_DISABLE_AAMIX,
+ };
+ 
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -1854,7 +1876,7 @@ static void alc889_fixup_mbp_vref(struct hda_codec *codec,
+ 				  const struct hda_fixup *fix, int action)
+ {
+ 	struct alc_spec *spec = codec->spec;
+-	static hda_nid_t nids[2] = { 0x14, 0x15 };
++	static hda_nid_t nids[3] = { 0x14, 0x15, 0x19 };
+ 	int i;
+ 
+ 	if (action != HDA_FIXUP_ACT_INIT)
+@@ -1932,6 +1954,8 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
+ 
+ static void alc_fixup_bass_chmap(struct hda_codec *codec,
+ 				 const struct hda_fixup *fix, int action);
++static void alc_fixup_disable_aamix(struct hda_codec *codec,
++				    const struct hda_fixup *fix, int action);
+ 
+ static const struct hda_fixup alc882_fixups[] = {
+ 	[ALC882_FIXUP_ABIT_AW9D_MAX] = {
+@@ -2142,6 +2166,12 @@ static const struct hda_fixup alc882_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC885_FIXUP_MACPRO_GPIO,
+ 	},
++	[ALC889_FIXUP_MP41_VREF] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc889_fixup_mbp_vref,
++		.chained = true,
++		.chain_id = ALC885_FIXUP_MACPRO_GPIO,
++	},
+ 	[ALC882_FIXUP_INV_DMIC] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_inv_dmic,
+@@ -2163,6 +2193,10 @@ static const struct hda_fixup alc882_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_bass_chmap,
+ 	},
++	[ALC882_FIXUP_DISABLE_AAMIX] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_disable_aamix,
++	},
+ };
+ 
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+@@ -2220,7 +2254,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
+-	SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
++	SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 4,1/5,1", ALC889_FIXUP_MP41_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
+@@ -2230,6 +2264,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+ 	SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
+ 	SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
+ 	SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
+@@ -4194,6 +4229,8 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
+ 	struct alc_spec *spec = codec->spec;
+ 
+ 	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->shutup = alc_no_shutup; /* reduce click noise */
++		spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+ 		spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+ 		codec->power_save_node = 0; /* avoid click noises */
+ 		snd_hda_apply_pincfgs(codec, pincfgs);
+@@ -4525,6 +4562,7 @@ enum {
+ 	ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
+ 	ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC292_FIXUP_TPT440_DOCK,
++	ALC292_FIXUP_TPT440,
+ 	ALC283_FIXUP_BXBT2807_MIC,
+ 	ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
+ 	ALC282_FIXUP_ASPIRE_V5_PINS,
+@@ -4993,6 +5031,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+ 	},
++	[ALC292_FIXUP_TPT440] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_disable_aamix,
++		.chained = true,
++		.chain_id = ALC292_FIXUP_TPT440_DOCK,
++	},
+ 	[ALC283_FIXUP_BXBT2807_MIC] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -5107,6 +5151,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ 	SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+ 	SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
++	SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+ 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+ 	SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
+ 	SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
+@@ -5116,6 +5161,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
++	SND_PCI_QUIRK(0x1028, 0x062c, "Dell Latitude E5550", ALC292_FIXUP_DELL_E7X),
+ 	SND_PCI_QUIRK(0x1028, 0x062e, "Dell Latitude E7450", ALC292_FIXUP_DELL_E7X),
+ 	SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+@@ -5227,12 +5273,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
+-	SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
++	SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440),
+ 	SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+@@ -5322,6 +5369,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ 	{.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
+ 	{.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
+ 	{.id = ALC292_FIXUP_TPT440_DOCK, .name = "tpt440-dock"},
++	{.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
+ 	{}
+ };
+ 
+@@ -5448,6 +5496,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		ALC255_STANDARD_PINS,
+ 		{0x12, 0x90a60170},
++		{0x14, 0x90171130},
++		{0x21, 0x02211040}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x90a60170},
+ 		{0x14, 0x90170140},
+ 		{0x17, 0x40000000},
+ 		{0x1d, 0x40700001},
+@@ -6456,6 +6508,7 @@ static const struct hda_fixup alc662_fixups[] = {
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
+ 	SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
++	SND_PCI_QUIRK(0x1025, 0x0241, "Packard Bell DOTS", ALC662_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
+ 	SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
+@@ -6473,6 +6526,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+ 	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
++	SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
+ 	SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
+ 	SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
+ 	SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index b1bc66783974..8e7d4c087a7a 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -702,6 +702,7 @@ static bool hp_bnb2011_with_dock(struct hda_codec *codec)
+ static bool hp_blike_system(u32 subsystem_id)
+ {
+ 	switch (subsystem_id) {
++	case 0x103c1473: /* HP ProBook 6550b */
+ 	case 0x103c1520:
+ 	case 0x103c1521:
+ 	case 0x103c1523:
+@@ -3109,6 +3110,29 @@ static void stac92hd71bxx_fixup_hp_hdx(struct hda_codec *codec,
+ 	spec->gpio_led = 0x08;
+ }
+ 
++static bool is_hp_output(struct hda_codec *codec, hda_nid_t pin)
++{
++	unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
++
++	/* count line-out, too, as BIOS sets often so */
++	return get_defcfg_connect(pin_cfg) != AC_JACK_PORT_NONE &&
++		(get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
++		 get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT);
++}
++
++static void fixup_hp_headphone(struct hda_codec *codec, hda_nid_t pin)
++{
++	unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, pin);
++
++	/* It was changed in the BIOS to just satisfy MS DTM.
++	 * Lets turn it back into slaved HP
++	 */
++	pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE)) |
++		(AC_JACK_HP_OUT << AC_DEFCFG_DEVICE_SHIFT);
++	pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC | AC_DEFCFG_SEQUENCE))) |
++		0x1f;
++	snd_hda_codec_set_pincfg(codec, pin, pin_cfg);
++}
+ 
+ static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
+ 				   const struct hda_fixup *fix, int action)
+@@ -3118,22 +3142,12 @@ static void stac92hd71bxx_fixup_hp(struct hda_codec *codec,
+ 	if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ 		return;
+ 
+-	if (hp_blike_system(codec->core.subsystem_id)) {
+-		unsigned int pin_cfg = snd_hda_codec_get_pincfg(codec, 0x0f);
+-		if (get_defcfg_device(pin_cfg) == AC_JACK_LINE_OUT ||
+-			get_defcfg_device(pin_cfg) == AC_JACK_SPEAKER  ||
+-			get_defcfg_device(pin_cfg) == AC_JACK_HP_OUT) {
+-			/* It was changed in the BIOS to just satisfy MS DTM.
+-			 * Lets turn it back into slaved HP
+-			 */
+-			pin_cfg = (pin_cfg & (~AC_DEFCFG_DEVICE))
+-					| (AC_JACK_HP_OUT <<
+-						AC_DEFCFG_DEVICE_SHIFT);
+-			pin_cfg = (pin_cfg & (~(AC_DEFCFG_DEF_ASSOC
+-							| AC_DEFCFG_SEQUENCE)))
+-								| 0x1f;
+-			snd_hda_codec_set_pincfg(codec, 0x0f, pin_cfg);
+-		}
++	/* when both output A and F are assigned, these are supposedly
++	 * dock and built-in headphones; fix both pin configs
++	 */
++	if (is_hp_output(codec, 0x0a) && is_hp_output(codec, 0x0f)) {
++		fixup_hp_headphone(codec, 0x0a);
++		fixup_hp_headphone(codec, 0x0f);
+ 	}
+ 
+ 	if (find_mute_led_cfg(codec, 1))
+diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
+index 2306ccf7281e..77c963ced67a 100644
+--- a/sound/pci/rme96.c
++++ b/sound/pci/rme96.c
+@@ -741,10 +741,11 @@ snd_rme96_playback_setrate(struct rme96 *rme96,
+ 	{
+ 		/* change to/from double-speed: reset the DAC (if available) */
+ 		snd_rme96_reset_dac(rme96);
++		return 1; /* need to restore volume */
+ 	} else {
+ 		writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
++		return 0;
+ 	}
+-	return 0;
+ }
+ 
+ static int
+@@ -980,6 +981,7 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
+ 	struct rme96 *rme96 = snd_pcm_substream_chip(substream);
+ 	struct snd_pcm_runtime *runtime = substream->runtime;
+ 	int err, rate, dummy;
++	bool apply_dac_volume = false;
+ 
+ 	runtime->dma_area = (void __force *)(rme96->iobase +
+ 					     RME96_IO_PLAY_BUFFER);
+@@ -993,24 +995,26 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
+ 	{
+                 /* slave clock */
+                 if ((int)params_rate(params) != rate) {
+-			spin_unlock_irq(&rme96->lock);
+-			return -EIO;                    
+-                }
+-	} else if ((err = snd_rme96_playback_setrate(rme96, params_rate(params))) < 0) {
+-		spin_unlock_irq(&rme96->lock);
+-		return err;
+-	}
+-	if ((err = snd_rme96_playback_setformat(rme96, params_format(params))) < 0) {
+-		spin_unlock_irq(&rme96->lock);
+-		return err;
++			err = -EIO;
++			goto error;
++		}
++	} else {
++		err = snd_rme96_playback_setrate(rme96, params_rate(params));
++		if (err < 0)
++			goto error;
++		apply_dac_volume = err > 0; /* need to restore volume later? */
+ 	}
++
++	err = snd_rme96_playback_setformat(rme96, params_format(params));
++	if (err < 0)
++		goto error;
+ 	snd_rme96_setframelog(rme96, params_channels(params), 1);
+ 	if (rme96->capture_periodsize != 0) {
+ 		if (params_period_size(params) << rme96->playback_frlog !=
+ 		    rme96->capture_periodsize)
+ 		{
+-			spin_unlock_irq(&rme96->lock);
+-			return -EBUSY;
++			err = -EBUSY;
++			goto error;
+ 		}
+ 	}
+ 	rme96->playback_periodsize =
+@@ -1021,9 +1025,16 @@ snd_rme96_playback_hw_params(struct snd_pcm_substream *substream,
+ 		rme96->wcreg &= ~(RME96_WCR_PRO | RME96_WCR_DOLBY | RME96_WCR_EMP);
+ 		writel(rme96->wcreg |= rme96->wcreg_spdif_stream, rme96->iobase + RME96_IO_CONTROL_REGISTER);
+ 	}
++
++	err = 0;
++ error:
+ 	spin_unlock_irq(&rme96->lock);
+-		
+-	return 0;
++	if (apply_dac_volume) {
++		usleep_range(3000, 10000);
++		snd_rme96_apply_dac_volume(rme96);
++	}
++
++	return err;
+ }
+ 
+ static int
+diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
+index ee91edcf3cb0..13191891fc4c 100644
+--- a/sound/soc/codecs/arizona.c
++++ b/sound/soc/codecs/arizona.c
+@@ -1354,7 +1354,7 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
+ 	bool reconfig;
+ 	unsigned int aif_tx_state, aif_rx_state;
+ 
+-	if (params_rate(params) % 8000)
++	if (params_rate(params) % 4000)
+ 		rates = &arizona_44k1_bclk_rates[0];
+ 	else
+ 		rates = &arizona_48k_bclk_rates[0];
+diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
+index c5f35a07e8e4..3ad7f5be1cfa 100644
+--- a/sound/soc/codecs/es8328.c
++++ b/sound/soc/codecs/es8328.c
+@@ -85,7 +85,15 @@ static const DECLARE_TLV_DB_SCALE(pga_tlv, 0, 300, 0);
+ static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
+ static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 300, 0);
+ 
+-static const int deemph_settings[] = { 0, 32000, 44100, 48000 };
++static const struct {
++	int rate;
++	unsigned int val;
++} deemph_settings[] = {
++	{ 0,     ES8328_DACCONTROL6_DEEMPH_OFF },
++	{ 32000, ES8328_DACCONTROL6_DEEMPH_32k },
++	{ 44100, ES8328_DACCONTROL6_DEEMPH_44_1k },
++	{ 48000, ES8328_DACCONTROL6_DEEMPH_48k },
++};
+ 
+ static int es8328_set_deemph(struct snd_soc_codec *codec)
+ {
+@@ -97,21 +105,22 @@ static int es8328_set_deemph(struct snd_soc_codec *codec)
+ 	 * rate.
+ 	 */
+ 	if (es8328->deemph) {
+-		best = 1;
+-		for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
+-			if (abs(deemph_settings[i] - es8328->playback_fs) <
+-			    abs(deemph_settings[best] - es8328->playback_fs))
++		best = 0;
++		for (i = 1; i < ARRAY_SIZE(deemph_settings); i++) {
++			if (abs(deemph_settings[i].rate - es8328->playback_fs) <
++			    abs(deemph_settings[best].rate - es8328->playback_fs))
+ 				best = i;
+ 		}
+ 
+-		val = best << 1;
++		val = deemph_settings[best].val;
+ 	} else {
+-		val = 0;
++		val = ES8328_DACCONTROL6_DEEMPH_OFF;
+ 	}
+ 
+ 	dev_dbg(codec->dev, "Set deemphasis %d\n", val);
+ 
+-	return snd_soc_update_bits(codec, ES8328_DACCONTROL6, 0x6, val);
++	return snd_soc_update_bits(codec, ES8328_DACCONTROL6,
++			ES8328_DACCONTROL6_DEEMPH_MASK, val);
+ }
+ 
+ static int es8328_get_deemph(struct snd_kcontrol *kcontrol,
+diff --git a/sound/soc/codecs/es8328.h b/sound/soc/codecs/es8328.h
+index cb36afe10c0e..156c748c89c7 100644
+--- a/sound/soc/codecs/es8328.h
++++ b/sound/soc/codecs/es8328.h
+@@ -153,6 +153,7 @@ int es8328_probe(struct device *dev, struct regmap *regmap);
+ #define ES8328_DACCONTROL6_CLICKFREE (1 << 3)
+ #define ES8328_DACCONTROL6_DAC_INVR (1 << 4)
+ #define ES8328_DACCONTROL6_DAC_INVL (1 << 5)
++#define ES8328_DACCONTROL6_DEEMPH_MASK (3 << 6)
+ #define ES8328_DACCONTROL6_DEEMPH_OFF (0 << 6)
+ #define ES8328_DACCONTROL6_DEEMPH_32k (1 << 6)
+ #define ES8328_DACCONTROL6_DEEMPH_44_1k (2 << 6)
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index 118b0034ba23..154c1a24a303 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -365,8 +365,8 @@ static struct reg_default wm8962_reg[] = {
+ 	{ 16924, 0x0059 },   /* R16924 - HDBASS_PG_1 */
+ 	{ 16925, 0x999A },   /* R16925 - HDBASS_PG_0 */
+ 
+-	{ 17048, 0x0083 },   /* R17408 - HPF_C_1 */
+-	{ 17049, 0x98AD },   /* R17409 - HPF_C_0 */
++	{ 17408, 0x0083 },   /* R17408 - HPF_C_1 */
++	{ 17409, 0x98AD },   /* R17409 - HPF_C_0 */
+ 
+ 	{ 17920, 0x007F },   /* R17920 - ADCL_RETUNE_C1_1 */
+ 	{ 17921, 0xFFFF },   /* R17921 - ADCL_RETUNE_C1_0 */
+diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
+index ff0e4646b934..88317c1b7f96 100644
+--- a/sound/soc/codecs/wm8974.c
++++ b/sound/soc/codecs/wm8974.c
+@@ -575,6 +575,7 @@ static const struct regmap_config wm8974_regmap = {
+ 	.max_register = WM8974_MONOMIX,
+ 	.reg_defaults = wm8974_reg_defaults,
+ 	.num_reg_defaults = ARRAY_SIZE(wm8974_reg_defaults),
++	.cache_type = REGCACHE_FLAT,
+ };
+ 
+ static int wm8974_probe(struct snd_soc_codec *codec)
+diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
+index 23c91fa65ab8..76dd8c6aa4f0 100644
+--- a/sound/soc/davinci/davinci-mcasp.c
++++ b/sound/soc/davinci/davinci-mcasp.c
+@@ -221,8 +221,8 @@ static void mcasp_start_tx(struct davinci_mcasp *mcasp)
+ 
+ 	/* wait for XDATA to be cleared */
+ 	cnt = 0;
+-	while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) &
+-		 ~XRDATA) && (cnt < 100000))
++	while ((mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) & XRDATA) &&
++	       (cnt < 100000))
+ 		cnt++;
+ 
+ 	/* Release TX state machine */
+diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
+index 8c7dc51b1c4f..f7a0cb786d5c 100644
+--- a/sound/soc/sh/rcar/gen.c
++++ b/sound/soc/sh/rcar/gen.c
+@@ -214,7 +214,7 @@ static int rsnd_gen2_probe(struct platform_device *pdev,
+ 		RSND_GEN_S_REG(SCU_SYS_STATUS0,	0x1c8),
+ 		RSND_GEN_S_REG(SCU_SYS_INT_EN0,	0x1cc),
+ 		RSND_GEN_S_REG(SCU_SYS_STATUS1,	0x1d0),
+-		RSND_GEN_S_REG(SCU_SYS_INT_EN1,	0x1c4),
++		RSND_GEN_S_REG(SCU_SYS_INT_EN1,	0x1d4),
+ 		RSND_GEN_M_REG(SRC_SWRSR,	0x200,	0x40),
+ 		RSND_GEN_M_REG(SRC_SRCIR,	0x204,	0x40),
+ 		RSND_GEN_M_REG(SRC_ADINR,	0x214,	0x40),
+diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
+index 025c38fbe3c0..1874cf0e6cab 100644
+--- a/sound/soc/soc-compress.c
++++ b/sound/soc/soc-compress.c
+@@ -623,6 +623,7 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
+ 	struct snd_pcm *be_pcm;
+ 	char new_name[64];
+ 	int ret = 0, direction = 0;
++	int playback = 0, capture = 0;
+ 
+ 	if (rtd->num_codecs > 1) {
+ 		dev_err(rtd->card->dev, "Multicodec not supported for compressed stream\n");
+@@ -634,11 +635,27 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
+ 			rtd->dai_link->stream_name, codec_dai->name, num);
+ 
+ 	if (codec_dai->driver->playback.channels_min)
++		playback = 1;
++	if (codec_dai->driver->capture.channels_min)
++		capture = 1;
++
++	capture = capture && cpu_dai->driver->capture.channels_min;
++	playback = playback && cpu_dai->driver->playback.channels_min;
++
++	/*
++	 * Compress devices are unidirectional so only one of the directions
++	 * should be set, check for that (xor)
++	 */
++	if (playback + capture != 1) {
++		dev_err(rtd->card->dev, "Invalid direction for compress P %d, C %d\n",
++				playback, capture);
++		return -EINVAL;
++	}
++
++	if(playback)
+ 		direction = SND_COMPRESS_PLAYBACK;
+-	else if (codec_dai->driver->capture.channels_min)
+-		direction = SND_COMPRESS_CAPTURE;
+ 	else
+-		return -EINVAL;
++		direction = SND_COMPRESS_CAPTURE;
+ 
+ 	compr = kzalloc(sizeof(*compr), GFP_KERNEL);
+ 	if (compr == NULL) {
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index cd8ed2e393a2..f9a9752d4dbc 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1336,6 +1336,8 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
+ 		}
+ 	}
+ 
++	snd_usb_mixer_fu_apply_quirk(state->mixer, cval, unitid, kctl);
++
+ 	range = (cval->max - cval->min) / cval->res;
+ 	/*
+ 	 * Are there devices with volume range more than 255? I use a bit more
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index 6a803eff87f7..ddca6547399b 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -348,13 +348,6 @@ static struct usbmix_name_map bose_companion5_map[] = {
+ 	{ 0 }	/* terminator */
+ };
+ 
+-/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
+-static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
+-static struct usbmix_name_map dragonfly_1_2_map[] = {
+-	{ 7, NULL, .dB = &dragonfly_1_2_dB },
+-	{ 0 }	/* terminator */
+-};
+-
+ /*
+  * Control map entries
+  */
+@@ -470,11 +463,6 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x05a7, 0x1020),
+ 		.map = bose_companion5_map,
+ 	},
+-	{
+-		/* Dragonfly DAC 1.2 */
+-		.id = USB_ID(0x21b4, 0x0081),
+-		.map = dragonfly_1_2_map,
+-	},
+ 	{ 0 } /* terminator */
+ };
+ 
+diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
+index 337c317ead6f..db9547d04f38 100644
+--- a/sound/usb/mixer_quirks.c
++++ b/sound/usb/mixer_quirks.c
+@@ -37,6 +37,7 @@
+ #include <sound/control.h>
+ #include <sound/hwdep.h>
+ #include <sound/info.h>
++#include <sound/tlv.h>
+ 
+ #include "usbaudio.h"
+ #include "mixer.h"
+@@ -802,7 +803,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol,
+ 		return 0;
+ 
+ 	kcontrol->private_value &= ~(0xff << 24);
+-	kcontrol->private_value |= newval;
++	kcontrol->private_value |= (unsigned int)newval << 24;
+ 	err = snd_ni_update_cur_val(list);
+ 	return err < 0 ? err : 1;
+ }
+@@ -1843,3 +1844,39 @@ void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
+ 	}
+ }
+ 
++static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer,
++					 struct snd_kcontrol *kctl)
++{
++	/* Approximation using 10 ranges based on output measurement on hw v1.2.
++	 * This seems close to the cubic mapping e.g. alsamixer uses. */
++	static const DECLARE_TLV_DB_RANGE(scale,
++		 0,  1, TLV_DB_MINMAX_ITEM(-5300, -4970),
++		 2,  5, TLV_DB_MINMAX_ITEM(-4710, -4160),
++		 6,  7, TLV_DB_MINMAX_ITEM(-3884, -3710),
++		 8, 14, TLV_DB_MINMAX_ITEM(-3443, -2560),
++		15, 16, TLV_DB_MINMAX_ITEM(-2475, -2324),
++		17, 19, TLV_DB_MINMAX_ITEM(-2228, -2031),
++		20, 26, TLV_DB_MINMAX_ITEM(-1910, -1393),
++		27, 31, TLV_DB_MINMAX_ITEM(-1322, -1032),
++		32, 40, TLV_DB_MINMAX_ITEM(-968, -490),
++		41, 50, TLV_DB_MINMAX_ITEM(-441, 0),
++	);
++
++	usb_audio_info(mixer->chip, "applying DragonFly dB scale quirk\n");
++	kctl->tlv.p = scale;
++	kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ;
++	kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
++}
++
++void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
++				  struct usb_mixer_elem_info *cval, int unitid,
++				  struct snd_kcontrol *kctl)
++{
++	switch (mixer->chip->usb_id) {
++	case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
++		if (unitid == 7 && cval->min == 0 && cval->max == 50)
++			snd_dragonfly_quirk_db_scale(mixer, kctl);
++		break;
++	}
++}
++
+diff --git a/sound/usb/mixer_quirks.h b/sound/usb/mixer_quirks.h
+index bdbfab093816..177c329cd4dd 100644
+--- a/sound/usb/mixer_quirks.h
++++ b/sound/usb/mixer_quirks.h
+@@ -9,5 +9,9 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
+ void snd_usb_mixer_rc_memory_change(struct usb_mixer_interface *mixer,
+ 				    int unitid);
+ 
++void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer,
++				  struct usb_mixer_elem_info *cval, int unitid,
++				  struct snd_kcontrol *kctl);
++
+ #endif /* SND_USB_MIXER_QUIRKS_H */
+ 
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index eef9b8e4b949..fb9a8a5787a6 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1122,6 +1122,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+ 	case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+ 	case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+ 	case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
++	case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
+ 		return true;
+ 	}
+ 	return false;
+@@ -1265,6 +1266,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 	case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
+ 	case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
+ 	case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
++	case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
+ 		if (fp->altsetting == 2)
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 		break;


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-01-23 18:30 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-01-23 18:30 UTC (permalink / raw
  To: gentoo-commits

commit:     4071022681de7405f4ef7b2778add1b9d08845cf
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jan 23 18:30:39 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jan 23 18:30:39 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=40710226

Linux 4.1.16. Includes patch for CVE-2016-0728

 0000_README                                        |    8 +-
 1015_linux-4.1.16.patch                            | 1479 ++++++++++++++++++++
 ...ing-refleak-in-join-session-CVE-2016-0728.patch |   81 --
 3 files changed, 1483 insertions(+), 85 deletions(-)

diff --git a/0000_README b/0000_README
index 18a8ebc..1ca97cd 100644
--- a/0000_README
+++ b/0000_README
@@ -103,6 +103,10 @@ Patch:  1014_linux-4.1.15.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.15
 
+Patch:  1015_linux-4.1.16.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.16
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.
@@ -111,10 +115,6 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
-Patch:  1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
-From:   https://bugs.gentoo.org/show_bug.cgi?id=572384
-Desc:   Ensure that thread joining a session keyring does not leak the keyring reference. CVE-2016-0728.
-
 Patch:  2700_ThinkPad-30-brightness-control-fix.patch
 From:   Seth Forshee <seth.forshee@canonical.com>
 Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.

diff --git a/1015_linux-4.1.16.patch b/1015_linux-4.1.16.patch
new file mode 100644
index 0000000..44d7080
--- /dev/null
+++ b/1015_linux-4.1.16.patch
@@ -0,0 +1,1479 @@
+diff --git a/Makefile b/Makefile
+index cf35f6bcffd8..7609f1dcdcb9 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 15
++SUBLEVEL = 16
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
+index f8319a0860fd..39be5acc9c48 100644
+--- a/drivers/char/tpm/tpm.h
++++ b/drivers/char/tpm/tpm.h
+@@ -115,6 +115,13 @@ enum tpm2_startup_types {
+ 	TPM2_SU_STATE	= 0x0001,
+ };
+ 
++enum tpm2_start_method {
++	TPM2_START_ACPI = 2,
++	TPM2_START_FIFO = 6,
++	TPM2_START_CRB = 7,
++	TPM2_START_CRB_WITH_ACPI = 8,
++};
++
+ struct tpm_chip;
+ 
+ struct tpm_vendor_specific {
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 1267322595da..2b971b3e5c1c 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -34,12 +34,6 @@ enum crb_defaults {
+ 	CRB_ACPI_START_INDEX = 1,
+ };
+ 
+-enum crb_start_method {
+-	CRB_SM_ACPI_START = 2,
+-	CRB_SM_CRB = 7,
+-	CRB_SM_CRB_WITH_ACPI_START = 8,
+-};
+-
+ struct acpi_tpm2 {
+ 	struct acpi_table_header hdr;
+ 	u16 platform_class;
+@@ -220,12 +214,6 @@ static int crb_acpi_add(struct acpi_device *device)
+ 	u64 pa;
+ 	int rc;
+ 
+-	chip = tpmm_chip_alloc(dev, &tpm_crb);
+-	if (IS_ERR(chip))
+-		return PTR_ERR(chip);
+-
+-	chip->flags = TPM_CHIP_FLAG_TPM2;
+-
+ 	status = acpi_get_table(ACPI_SIG_TPM2, 1,
+ 				(struct acpi_table_header **) &buf);
+ 	if (ACPI_FAILURE(status)) {
+@@ -233,13 +221,15 @@ static int crb_acpi_add(struct acpi_device *device)
+ 		return -ENODEV;
+ 	}
+ 
+-	/* At least some versions of AMI BIOS have a bug that TPM2 table has
+-	 * zero address for the control area and therefore we must fail.
+-	*/
+-	if (!buf->control_area_pa) {
+-		dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
+-		return -EINVAL;
+-	}
++	/* Should the FIFO driver handle this? */
++	if (buf->start_method == TPM2_START_FIFO)
++		return -ENODEV;
++
++	chip = tpmm_chip_alloc(dev, &tpm_crb);
++	if (IS_ERR(chip))
++		return PTR_ERR(chip);
++
++	chip->flags = TPM_CHIP_FLAG_TPM2;
+ 
+ 	if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
+ 		dev_err(dev, "TPM2 ACPI table has wrong size");
+@@ -259,11 +249,11 @@ static int crb_acpi_add(struct acpi_device *device)
+ 	 * report only ACPI start but in practice seems to require both
+ 	 * ACPI start and CRB start.
+ 	 */
+-	if (sm == CRB_SM_CRB || sm == CRB_SM_CRB_WITH_ACPI_START ||
++	if (sm == TPM2_START_CRB || sm == TPM2_START_FIFO ||
+ 	    !strcmp(acpi_device_hid(device), "MSFT0101"))
+ 		priv->flags |= CRB_FL_CRB_START;
+ 
+-	if (sm == CRB_SM_ACPI_START || sm == CRB_SM_CRB_WITH_ACPI_START)
++	if (sm == TPM2_START_ACPI || sm == TPM2_START_CRB_WITH_ACPI)
+ 		priv->flags |= CRB_FL_ACPI_START;
+ 
+ 	priv->cca = (struct crb_control_area __iomem *)
+diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
+index f2dffa770b8e..696ef1d56b4f 100644
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -1,6 +1,6 @@
+ /*
+  * Copyright (C) 2005, 2006 IBM Corporation
+- * Copyright (C) 2014 Intel Corporation
++ * Copyright (C) 2014, 2015 Intel Corporation
+  *
+  * Authors:
+  * Leendert van Doorn <leendert@watson.ibm.com>
+@@ -28,6 +28,7 @@
+ #include <linux/wait.h>
+ #include <linux/acpi.h>
+ #include <linux/freezer.h>
++#include <acpi/actbl2.h>
+ #include "tpm.h"
+ 
+ enum tis_access {
+@@ -65,6 +66,17 @@ enum tis_defaults {
+ 	TIS_LONG_TIMEOUT = 2000,	/* 2 sec */
+ };
+ 
++struct tpm_info {
++	unsigned long start;
++	unsigned long len;
++	unsigned int irq;
++};
++
++static struct tpm_info tis_default_info = {
++	.start = TIS_MEM_BASE,
++	.len = TIS_MEM_LEN,
++	.irq = 0,
++};
+ 
+ /* Some timeout values are needed before it is known whether the chip is
+  * TPM 1.0 or TPM 2.0.
+@@ -91,26 +103,54 @@ struct priv_data {
+ };
+ 
+ #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
+-static int is_itpm(struct pnp_dev *dev)
++static int has_hid(struct acpi_device *dev, const char *hid)
+ {
+-	struct acpi_device *acpi = pnp_acpi_device(dev);
+ 	struct acpi_hardware_id *id;
+ 
+-	if (!acpi)
+-		return 0;
+-
+-	list_for_each_entry(id, &acpi->pnp.ids, list) {
+-		if (!strcmp("INTC0102", id->id))
++	list_for_each_entry(id, &dev->pnp.ids, list)
++		if (!strcmp(hid, id->id))
+ 			return 1;
+-	}
+ 
+ 	return 0;
+ }
++
++static inline int is_itpm(struct acpi_device *dev)
++{
++	return has_hid(dev, "INTC0102");
++}
++
++static inline int is_fifo(struct acpi_device *dev)
++{
++	struct acpi_table_tpm2 *tbl;
++	acpi_status st;
++
++	/* TPM 1.2 FIFO */
++	if (!has_hid(dev, "MSFT0101"))
++		return 1;
++
++	st = acpi_get_table(ACPI_SIG_TPM2, 1,
++			    (struct acpi_table_header **) &tbl);
++	if (ACPI_FAILURE(st)) {
++		dev_err(&dev->dev, "failed to get TPM2 ACPI table\n");
++		return 0;
++	}
++
++	if (le32_to_cpu(tbl->start_method) != TPM2_START_FIFO)
++		return 0;
++
++	/* TPM 2.0 FIFO */
++	return 1;
++}
+ #else
+-static inline int is_itpm(struct pnp_dev *dev)
++static inline int is_itpm(struct acpi_device *dev)
+ {
+ 	return 0;
+ }
++
++static inline int is_fifo(struct acpi_device *dev)
++{
++	return 1;
++}
+ #endif
+ 
+ /* Before we attempt to access the TPM we must see that the valid bit is set.
+@@ -600,9 +640,8 @@ static void tpm_tis_remove(struct tpm_chip *chip)
+ 	release_locality(chip, chip->vendor.locality, 1);
+ }
+ 
+-static int tpm_tis_init(struct device *dev, acpi_handle acpi_dev_handle,
+-			resource_size_t start, resource_size_t len,
+-			unsigned int irq)
++static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info,
++			acpi_handle acpi_dev_handle)
+ {
+ 	u32 vendor, intfcaps, intmask;
+ 	int rc, i, irq_s, irq_e, probe;
+@@ -622,7 +661,7 @@ static int tpm_tis_init(struct device *dev, acpi_handle acpi_dev_handle,
+ 	chip->acpi_dev_handle = acpi_dev_handle;
+ #endif
+ 
+-	chip->vendor.iobase = devm_ioremap(dev, start, len);
++	chip->vendor.iobase = devm_ioremap(dev, tpm_info->start, tpm_info->len);
+ 	if (!chip->vendor.iobase)
+ 		return -EIO;
+ 
+@@ -707,7 +746,7 @@ static int tpm_tis_init(struct device *dev, acpi_handle acpi_dev_handle,
+ 		  chip->vendor.iobase +
+ 		  TPM_INT_ENABLE(chip->vendor.locality));
+ 	if (interrupts)
+-		chip->vendor.irq = irq;
++		chip->vendor.irq = tpm_info->irq;
+ 	if (interrupts && !chip->vendor.irq) {
+ 		irq_s =
+ 		    ioread8(chip->vendor.iobase +
+@@ -890,27 +929,27 @@ static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
+ 				      const struct pnp_device_id *pnp_id)
+ {
+-	resource_size_t start, len;
+-	unsigned int irq = 0;
++	struct tpm_info tpm_info = tis_default_info;
+ 	acpi_handle acpi_dev_handle = NULL;
+ 
+-	start = pnp_mem_start(pnp_dev, 0);
+-	len = pnp_mem_len(pnp_dev, 0);
++	tpm_info.start = pnp_mem_start(pnp_dev, 0);
++	tpm_info.len = pnp_mem_len(pnp_dev, 0);
+ 
+ 	if (pnp_irq_valid(pnp_dev, 0))
+-		irq = pnp_irq(pnp_dev, 0);
++		tpm_info.irq = pnp_irq(pnp_dev, 0);
+ 	else
+ 		interrupts = false;
+ 
+-	if (is_itpm(pnp_dev))
+-		itpm = true;
+-
+ #ifdef CONFIG_ACPI
+-	if (pnp_acpi_device(pnp_dev))
++	if (pnp_acpi_device(pnp_dev)) {
++		if (is_itpm(pnp_acpi_device(pnp_dev)))
++			itpm = true;
++
+ 		acpi_dev_handle = pnp_acpi_device(pnp_dev)->handle;
++	}
+ #endif
+ 
+-	return tpm_tis_init(&pnp_dev->dev, acpi_dev_handle, start, len, irq);
++	return tpm_tis_init(&pnp_dev->dev, &tpm_info, acpi_dev_handle);
+ }
+ 
+ static struct pnp_device_id tpm_pnp_tbl[] = {
+@@ -930,6 +969,7 @@ MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
+ static void tpm_tis_pnp_remove(struct pnp_dev *dev)
+ {
+ 	struct tpm_chip *chip = pnp_get_drvdata(dev);
++
+ 	tpm_chip_unregister(chip);
+ 	tpm_tis_remove(chip);
+ }
+@@ -950,6 +990,79 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
+ MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
+ #endif
+ 
++#ifdef CONFIG_ACPI
++static int tpm_check_resource(struct acpi_resource *ares, void *data)
++{
++	struct tpm_info *tpm_info = (struct tpm_info *) data;
++	struct resource res;
++
++	if (acpi_dev_resource_interrupt(ares, 0, &res)) {
++		tpm_info->irq = res.start;
++	} else if (acpi_dev_resource_memory(ares, &res)) {
++		tpm_info->start = res.start;
++		tpm_info->len = resource_size(&res);
++	}
++
++	return 1;
++}
++
++static int tpm_tis_acpi_init(struct acpi_device *acpi_dev)
++{
++	struct list_head resources;
++	struct tpm_info tpm_info = tis_default_info;
++	int ret;
++
++	if (!is_fifo(acpi_dev))
++		return -ENODEV;
++
++	INIT_LIST_HEAD(&resources);
++	ret = acpi_dev_get_resources(acpi_dev, &resources, tpm_check_resource,
++				     &tpm_info);
++	if (ret < 0)
++		return ret;
++
++	acpi_dev_free_resource_list(&resources);
++
++	if (!tpm_info.irq)
++		interrupts = false;
++
++	if (is_itpm(acpi_dev))
++		itpm = true;
++
++	return tpm_tis_init(&acpi_dev->dev, &tpm_info, acpi_dev->handle);
++}
++
++static int tpm_tis_acpi_remove(struct acpi_device *dev)
++{
++	struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
++
++	tpm_chip_unregister(chip);
++	tpm_tis_remove(chip);
++
++	return 0;
++}
++
++static struct acpi_device_id tpm_acpi_tbl[] = {
++	{"MSFT0101", 0},	/* TPM 2.0 */
++	/* Add new here */
++	{"", 0},		/* User Specified */
++	{"", 0}			/* Terminator */
++};
++MODULE_DEVICE_TABLE(acpi, tpm_acpi_tbl);
++
++static struct acpi_driver tis_acpi_driver = {
++	.name = "tpm_tis",
++	.ids = tpm_acpi_tbl,
++	.ops = {
++		.add = tpm_tis_acpi_init,
++		.remove = tpm_tis_acpi_remove,
++	},
++	.drv = {
++		.pm = &tpm_tis_pm,
++	},
++};
++#endif
++
+ static struct platform_driver tis_drv = {
+ 	.driver = {
+ 		.name		= "tpm_tis",
+@@ -966,9 +1079,25 @@ static int __init init_tis(void)
+ {
+ 	int rc;
+ #ifdef CONFIG_PNP
+-	if (!force)
+-		return pnp_register_driver(&tis_pnp_driver);
++	if (!force) {
++		rc = pnp_register_driver(&tis_pnp_driver);
++		if (rc)
++			return rc;
++	}
++#endif
++#ifdef CONFIG_ACPI
++	if (!force) {
++		rc = acpi_bus_register_driver(&tis_acpi_driver);
++		if (rc) {
++#ifdef CONFIG_PNP
++			pnp_unregister_driver(&tis_pnp_driver);
+ #endif
++			return rc;
++		}
++	}
++#endif
++	if (!force)
++		return 0;
+ 
+ 	rc = platform_driver_register(&tis_drv);
+ 	if (rc < 0)
+@@ -978,7 +1107,7 @@ static int __init init_tis(void)
+ 		rc = PTR_ERR(pdev);
+ 		goto err_dev;
+ 	}
+-	rc = tpm_tis_init(&pdev->dev, NULL, TIS_MEM_BASE, TIS_MEM_LEN, 0);
++	rc = tpm_tis_init(&pdev->dev, &tis_default_info, NULL);
+ 	if (rc)
+ 		goto err_init;
+ 	return 0;
+@@ -992,9 +1121,14 @@ err_dev:
+ static void __exit cleanup_tis(void)
+ {
+ 	struct tpm_chip *chip;
+-#ifdef CONFIG_PNP
++#if defined(CONFIG_PNP) || defined(CONFIG_ACPI)
+ 	if (!force) {
++#ifdef CONFIG_ACPI
++		acpi_bus_unregister_driver(&tis_acpi_driver);
++#endif
++#ifdef CONFIG_PNP
+ 		pnp_unregister_driver(&tis_pnp_driver);
++#endif
+ 		return;
+ 	}
+ #endif
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index 932bd1862f7a..6e9036a06515 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -1014,13 +1014,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
+ 		sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
+ 		8 * 4;
+ 
+-	ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
+-				&ring_header->dma);
++	ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
++						&ring_header->dma, GFP_KERNEL);
+ 	if (unlikely(!ring_header->desc)) {
+-		dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
++		dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
+ 		goto err_nomem;
+ 	}
+-	memset(ring_header->desc, 0, ring_header->size);
+ 	/* init TPD ring */
+ 
+ 	tpd_ring[0].dma = roundup(ring_header->dma, 8);
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index 6af028d5f9bc..97e4df9bf407 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -736,9 +736,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
+ 	netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
+ 		    jiffies, jiffies - dev->trans_start);
+ 	qca->net_dev->stats.tx_errors++;
+-	/* wake the queue if there is room */
+-	if (qcaspi_tx_ring_has_space(&qca->txr))
+-		netif_wake_queue(dev);
++	/* Trigger tx queue flush and QCA7000 reset */
++	qca->sync = QCASPI_SYNC_UNKNOWN;
+ }
+ 
+ static int
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 7fb244f565b2..13463c4acc86 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1481,6 +1481,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
+ 		if (mdp->cd->shift_rd0)
+ 			desc_status >>= 16;
+ 
++		skb = mdp->rx_skbuff[entry];
+ 		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
+ 				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
+ 			ndev->stats.rx_errors++;
+@@ -1496,12 +1497,11 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
+ 				ndev->stats.rx_missed_errors++;
+ 			if (desc_status & RD_RFS10)
+ 				ndev->stats.rx_over_errors++;
+-		} else {
++		} else	if (skb) {
+ 			if (!mdp->cd->hw_swap)
+ 				sh_eth_soft_swap(
+ 					phys_to_virt(ALIGN(rxdesc->addr, 4)),
+ 					pkt_len + 2);
+-			skb = mdp->rx_skbuff[entry];
+ 			mdp->rx_skbuff[entry] = NULL;
+ 			if (mdp->cd->rpadir)
+ 				skb_reserve(skb, NET_IP_ALIGN);
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index ab33262ed826..9c8fabed4444 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -568,6 +568,9 @@ static int pppoe_create(struct net *net, struct socket *sock)
+ 	sk->sk_family		= PF_PPPOX;
+ 	sk->sk_protocol		= PX_PROTO_OE;
+ 
++	INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work,
++		  pppoe_unbind_sock_work);
++
+ 	return 0;
+ }
+ 
+@@ -632,8 +635,6 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 
+ 	lock_sock(sk);
+ 
+-	INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work);
+-
+ 	error = -EINVAL;
+ 	if (sp->sa_protocol != PX_PROTO_OE)
+ 		goto end;
+@@ -663,8 +664,13 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 			po->pppoe_dev = NULL;
+ 		}
+ 
+-		memset(sk_pppox(po) + 1, 0,
+-		       sizeof(struct pppox_sock) - sizeof(struct sock));
++		po->pppoe_ifindex = 0;
++		memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa));
++		memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay));
++		memset(&po->chan, 0, sizeof(po->chan));
++		po->next = NULL;
++		po->num = 0;
++
+ 		sk->sk_state = PPPOX_NONE;
+ 	}
+ 
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index e3bfbd4d0136..0bacabfa486e 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -420,6 +420,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
+ 	struct pptp_opt *opt = &po->proto.pptp;
+ 	int error = 0;
+ 
++	if (sockaddr_len < sizeof(struct sockaddr_pppox))
++		return -EINVAL;
++
+ 	lock_sock(sk);
+ 
+ 	opt->src_addr = sp->sa_addr.pptp;
+@@ -441,6 +444,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
+ 	struct flowi4 fl4;
+ 	int error = 0;
+ 
++	if (sockaddr_len < sizeof(struct sockaddr_pppox))
++		return -EINVAL;
++
+ 	if (sp->sa_protocol != PX_PROTO_PPTP)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
+index aafa1a1898e4..ce6fad1c43e6 100644
+--- a/drivers/net/usb/r8152.c
++++ b/drivers/net/usb/r8152.c
+@@ -3006,17 +3006,6 @@ static int rtl8152_open(struct net_device *netdev)
+ 
+ 	mutex_lock(&tp->control);
+ 
+-	/* The WORK_ENABLE may be set when autoresume occurs */
+-	if (test_bit(WORK_ENABLE, &tp->flags)) {
+-		clear_bit(WORK_ENABLE, &tp->flags);
+-		usb_kill_urb(tp->intr_urb);
+-		cancel_delayed_work_sync(&tp->schedule);
+-
+-		/* disable the tx/rx, if the workqueue has enabled them. */
+-		if (netif_carrier_ok(netdev))
+-			tp->rtl_ops.disable(tp);
+-	}
+-
+ 	tp->rtl_ops.up(tp);
+ 
+ 	rtl8152_set_speed(tp, AUTONEG_ENABLE,
+@@ -3063,12 +3052,6 @@ static int rtl8152_close(struct net_device *netdev)
+ 	} else {
+ 		mutex_lock(&tp->control);
+ 
+-		/* The autosuspend may have been enabled and wouldn't
+-		 * be disable when autoresume occurs, because the
+-		 * netif_running() would be false.
+-		 */
+-		rtl_runtime_suspend_enable(tp, false);
+-
+ 		tp->rtl_ops.down(tp);
+ 
+ 		mutex_unlock(&tp->control);
+@@ -3369,7 +3352,7 @@ static int rtl8152_resume(struct usb_interface *intf)
+ 		netif_device_attach(tp->netdev);
+ 	}
+ 
+-	if (netif_running(tp->netdev)) {
++	if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
+ 		if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ 			rtl_runtime_suspend_enable(tp, false);
+ 			clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+@@ -3387,6 +3370,8 @@ static int rtl8152_resume(struct usb_interface *intf)
+ 		}
+ 		usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+ 	} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
++		if (tp->netdev->flags & IFF_UP)
++			rtl_runtime_suspend_enable(tp, false);
+ 		clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ 	}
+ 
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 93e54a0f471a..35882dd690a6 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -2764,6 +2764,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
+ 	ret = toshiba_function_keys_get(dev, &special_functions);
+ 	dev->kbd_function_keys_supported = !ret;
+ 
++	dev->hotkey_event_type = 0;
+ 	if (toshiba_acpi_setup_keyboard(dev))
+ 		pr_info("Unable to activate hotkeys\n");
+ 
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index a086e1d69bc7..0fe15aec7ed0 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1848,6 +1848,11 @@ static const struct usb_device_id acm_ids[] = {
+ 	},
+ #endif
+ 
++	/* Exclude Infineon Flash Loader utility */
++	{ USB_DEVICE(0x058b, 0x0041),
++	.driver_info = IGNORE_DEVICE,
++	},
++
+ 	/* control interfaces without any protocol set */
+ 	{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
+ 		USB_CDC_PROTO_NONE) },
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index b9ddf0c1ffe5..894894f2ff93 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -115,7 +115,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 		   USB_SS_MULT(desc->bmAttributes) > 3) {
+ 		dev_warn(ddev, "Isoc endpoint has Mult of %d in "
+ 				"config %d interface %d altsetting %d ep %d: "
+-				"setting to 3\n", desc->bmAttributes + 1,
++				"setting to 3\n",
++				USB_SS_MULT(desc->bmAttributes),
+ 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ 		ep->ss_ep_comp.bmAttributes = 2;
+ 	}
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 1e9a8c9aa531..d68c4a4db682 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -124,6 +124,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
+ 
+ int usb_device_supports_lpm(struct usb_device *udev)
+ {
++	/* Some devices have trouble with LPM */
++	if (udev->quirks & USB_QUIRK_NO_LPM)
++		return 0;
++
+ 	/* USB 2.1 (and greater) devices indicate LPM support through
+ 	 * their USB 2.0 Extended Capabilities BOS descriptor.
+ 	 */
+@@ -4493,6 +4497,8 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
+ 		goto fail;
+ 	}
+ 
++	usb_detect_quirks(udev);
++
+ 	if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) {
+ 		retval = usb_get_bos_descriptor(udev);
+ 		if (!retval) {
+@@ -4691,7 +4697,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
+ 		if (status < 0)
+ 			goto loop;
+ 
+-		usb_detect_quirks(udev);
+ 		if (udev->quirks & USB_QUIRK_DELAY_INIT)
+ 			msleep(1000);
+ 
+@@ -5307,9 +5312,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	if (udev->usb2_hw_lpm_enabled == 1)
+ 		usb_set_usb2_hardware_lpm(udev, 0);
+ 
+-	bos = udev->bos;
+-	udev->bos = NULL;
+-
+ 	/* Disable LPM and LTM while we reset the device and reinstall the alt
+ 	 * settings.  Device-initiated LPM settings, and system exit latency
+ 	 * settings are cleared when the device is reset, so we have to set
+@@ -5318,15 +5320,18 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ 	ret = usb_unlocked_disable_lpm(udev);
+ 	if (ret) {
+ 		dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
+-		goto re_enumerate;
++		goto re_enumerate_no_bos;
+ 	}
+ 	ret = usb_disable_ltm(udev);
+ 	if (ret) {
+ 		dev_err(&udev->dev, "%s Failed to disable LTM\n.",
+ 				__func__);
+-		goto re_enumerate;
++		goto re_enumerate_no_bos;
+ 	}
+ 
++	bos = udev->bos;
++	udev->bos = NULL;
++
+ 	for (i = 0; i < SET_CONFIG_TRIES; ++i) {
+ 
+ 		/* ep0 maxpacket size may change; let the HCD know about it.
+@@ -5423,10 +5428,11 @@ done:
+ 	return 0;
+ 
+ re_enumerate:
+-	/* LPM state doesn't matter when we're about to destroy the device. */
+-	hub_port_logical_disconnect(parent_hub, port1);
+ 	usb_release_bos_descriptor(udev);
+ 	udev->bos = bos;
++re_enumerate_no_bos:
++	/* LPM state doesn't matter when we're about to destroy the device. */
++	hub_port_logical_disconnect(parent_hub, port1);
+ 	return -ENODEV;
+ }
+ 
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index f5a381945db2..017c1de53aa5 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -199,6 +199,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ 			USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ 
++	/* Blackmagic Design Intensity Shuttle */
++	{ USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
++
++	/* Blackmagic Design UltraStudio SDI */
++	{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
++
+ 	{ }  /* terminating entry must be last */
+ };
+ 
+diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
+index b51226abade6..7a454708e948 100644
+--- a/drivers/usb/gadget/udc/pxa27x_udc.c
++++ b/drivers/usb/gadget/udc/pxa27x_udc.c
+@@ -2535,6 +2535,9 @@ static int pxa_udc_suspend(struct platform_device *_dev, pm_message_t state)
+ 	udc->pullup_resume = udc->pullup_on;
+ 	dplus_pullup(udc, 0);
+ 
++	if (udc->driver)
++		udc->driver->disconnect(&udc->gadget);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
+index dc31c425ce01..9f1c0538b211 100644
+--- a/drivers/usb/host/whci/qset.c
++++ b/drivers/usb/host/whci/qset.c
+@@ -377,6 +377,10 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
+ 	if (std->pl_virt == NULL)
+ 		return -ENOMEM;
+ 	std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
++	if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
++		kfree(std->pl_virt);
++		return -EFAULT;
++	}
+ 
+ 	for (p = 0; p < std->num_pointers; p++) {
+ 		std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
+diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
+index 39db8b603627..d1b9e0c7fb0d 100644
+--- a/drivers/usb/musb/Kconfig
++++ b/drivers/usb/musb/Kconfig
+@@ -147,7 +147,7 @@ config USB_TI_CPPI_DMA
+ 
+ config USB_TI_CPPI41_DMA
+ 	bool 'TI CPPI 4.1 (AM335x)'
+-	depends on ARCH_OMAP
++	depends on ARCH_OMAP && DMADEVICES
+ 	select TI_CPPI41
+ 
+ config USB_TUSB_OMAP_DMA
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index eac7ccaa3c85..7d4f51a32e66 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -132,7 +132,6 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
+-	{ USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */
+ 	{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+ 	{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
+ 	{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
+diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
+index 3658662898fc..a204782ae530 100644
+--- a/drivers/usb/serial/usb-serial-simple.c
++++ b/drivers/usb/serial/usb-serial-simple.c
+@@ -53,6 +53,7 @@ DEVICE(funsoft, FUNSOFT_IDS);
+ 
+ /* Infineon Flashloader driver */
+ #define FLASHLOADER_IDS()		\
++	{ USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \
+ 	{ USB_DEVICE(0x8087, 0x0716) }
+ DEVICE(flashloader, FLASHLOADER_IDS);
+ 
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 6d3122afeed3..75e4979e6c15 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -796,6 +796,10 @@ static int uas_slave_configure(struct scsi_device *sdev)
+ 	if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
+ 		sdev->no_report_opcodes = 1;
+ 
++	/* A few buggy USB-ATA bridges don't understand FUA */
++	if (devinfo->flags & US_FL_BROKEN_FUA)
++		sdev->broken_fua = 1;
++
+ 	scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
+ 	return 0;
+ }
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index 87898ca2ed17..4095824c8c6d 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1987,7 +1987,7 @@ UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
+ 		US_FL_IGNORE_RESIDUE ),
+ 
+ /* Reported by Michael Büsch <m@bues.ch> */
+-UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0114,
++UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0116,
+ 		"JMicron",
+ 		"USB to ATA/ATAPI Bridge",
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
+index c85ea530085f..ccc113e83d88 100644
+--- a/drivers/usb/storage/unusual_uas.h
++++ b/drivers/usb/storage/unusual_uas.h
+@@ -132,7 +132,7 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
+ 		"JMicron",
+ 		"JMS567",
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+-		US_FL_NO_REPORT_OPCODES),
++		US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES),
+ 
+ /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
+diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
+index 9948c874e3f1..1d0043dc34e4 100644
+--- a/include/linux/usb/quirks.h
++++ b/include/linux/usb/quirks.h
+@@ -47,4 +47,7 @@
+ /* device generates spurious wakeup, ignore remote wakeup capability */
+ #define USB_QUIRK_IGNORE_REMOTE_WAKEUP		BIT(9)
+ 
++/* device can't handle Link Power Management */
++#define USB_QUIRK_NO_LPM			BIT(10)
++
+ #endif /* __LINUX_USB_QUIRKS_H */
+diff --git a/include/net/dst.h b/include/net/dst.h
+index 0fb99a26e973..182b812d45e1 100644
+--- a/include/net/dst.h
++++ b/include/net/dst.h
+@@ -312,6 +312,39 @@ static inline void skb_dst_force(struct sk_buff *skb)
+ 	}
+ }
+ 
++/**
++ * dst_hold_safe - Take a reference on a dst if possible
++ * @dst: pointer to dst entry
++ *
++ * This helper returns false if it could not safely
++ * take a reference on a dst.
++ */
++static inline bool dst_hold_safe(struct dst_entry *dst)
++{
++	if (dst->flags & DST_NOCACHE)
++		return atomic_inc_not_zero(&dst->__refcnt);
++	dst_hold(dst);
++	return true;
++}
++
++/**
++ * skb_dst_force_safe - makes sure skb dst is refcounted
++ * @skb: buffer
++ *
++ * If dst is not yet refcounted and not destroyed, grab a ref on it.
++ */
++static inline void skb_dst_force_safe(struct sk_buff *skb)
++{
++	if (skb_dst_is_noref(skb)) {
++		struct dst_entry *dst = skb_dst(skb);
++
++		if (!dst_hold_safe(dst))
++			dst = NULL;
++
++		skb->_skb_refdst = (unsigned long)dst;
++	}
++}
++
+ 
+ /**
+  *	__skb_tunnel_rx - prepare skb for rx reinsert
+diff --git a/include/net/sock.h b/include/net/sock.h
+index ed01a012f8d5..4c4b21c00828 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -386,6 +386,7 @@ struct sock {
+ 				sk_no_check_rx : 1,
+ 				sk_userlocks : 4,
+ 				sk_protocol  : 8,
++#define SK_PROTOCOL_MAX U8_MAX
+ 				sk_type      : 16;
+ 	kmemcheck_bitfield_end(flags);
+ 	int			sk_wmem_queued;
+@@ -722,6 +723,8 @@ enum sock_flags {
+ 	SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
+ };
+ 
++#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
++
+ static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
+ {
+ 	nsk->sk_flags = osk->sk_flags;
+@@ -796,7 +799,7 @@ void sk_stream_write_space(struct sock *sk);
+ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+ {
+ 	/* dont let skb dst not refcounted, we are going to leave rcu lock */
+-	skb_dst_force(skb);
++	skb_dst_force_safe(skb);
+ 
+ 	if (!sk->sk_backlog.tail)
+ 		sk->sk_backlog.head = skb;
+diff --git a/include/net/vxlan.h b/include/net/vxlan.h
+index 0082b5d33d7d..7ef9272a405a 100644
+--- a/include/net/vxlan.h
++++ b/include/net/vxlan.h
+@@ -78,7 +78,7 @@ struct vxlanhdr {
+ };
+ 
+ /* VXLAN header flags. */
+-#define VXLAN_HF_RCO BIT(24)
++#define VXLAN_HF_RCO BIT(21)
+ #define VXLAN_HF_VNI BIT(27)
+ #define VXLAN_HF_GBP BIT(31)
+ 
+diff --git a/lib/rhashtable.c b/lib/rhashtable.c
+index cf910e48f8f2..5b17447efa8b 100644
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -506,10 +506,11 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
+ 	if (!iter->walker)
+ 		return -ENOMEM;
+ 
+-	mutex_lock(&ht->mutex);
+-	iter->walker->tbl = rht_dereference(ht->tbl, ht);
++	spin_lock(&ht->lock);
++	iter->walker->tbl =
++		rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock));
+ 	list_add(&iter->walker->list, &iter->walker->tbl->walkers);
+-	mutex_unlock(&ht->mutex);
++	spin_unlock(&ht->lock);
+ 
+ 	return 0;
+ }
+@@ -523,10 +524,10 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init);
+  */
+ void rhashtable_walk_exit(struct rhashtable_iter *iter)
+ {
+-	mutex_lock(&iter->ht->mutex);
++	spin_lock(&iter->ht->lock);
+ 	if (iter->walker->tbl)
+ 		list_del(&iter->walker->list);
+-	mutex_unlock(&iter->ht->mutex);
++	spin_unlock(&iter->ht->lock);
+ 	kfree(iter->walker);
+ }
+ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
+@@ -550,14 +551,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
+ {
+ 	struct rhashtable *ht = iter->ht;
+ 
+-	mutex_lock(&ht->mutex);
++	rcu_read_lock();
+ 
++	spin_lock(&ht->lock);
+ 	if (iter->walker->tbl)
+ 		list_del(&iter->walker->list);
+-
+-	rcu_read_lock();
+-
+-	mutex_unlock(&ht->mutex);
++	spin_unlock(&ht->lock);
+ 
+ 	if (!iter->walker->tbl) {
+ 		iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
+@@ -730,9 +729,6 @@ int rhashtable_init(struct rhashtable *ht,
+ 	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
+ 		return -EINVAL;
+ 
+-	if (params->nelem_hint)
+-		size = rounded_hashtable_size(params);
+-
+ 	memset(ht, 0, sizeof(*ht));
+ 	mutex_init(&ht->mutex);
+ 	spin_lock_init(&ht->lock);
+@@ -752,6 +748,9 @@ int rhashtable_init(struct rhashtable *ht,
+ 
+ 	ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
+ 
++	if (params->nelem_hint)
++		size = rounded_hashtable_size(&ht->p);
++
+ 	/* The maximum (not average) chain length grows with the
+ 	 * size of the hash table, at a rate of (log N)/(log log N).
+ 	 * The value of 16 is selected so that even if the hash
+diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
+index 330c1f4a5a0b..a64884bbf0ce 100644
+--- a/net/ax25/af_ax25.c
++++ b/net/ax25/af_ax25.c
+@@ -806,6 +806,9 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
+ 	struct sock *sk;
+ 	ax25_cb *ax25;
+ 
++	if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
++		return -EINVAL;
++
+ 	if (!net_eq(net, &init_net))
+ 		return -EAFNOSUPPORT;
+ 
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 4322c833e748..8611bc7bdd32 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -520,6 +520,9 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_le
+ 	if (!addr || addr->sa_family != AF_BLUETOOTH)
+ 		return -EINVAL;
+ 
++	if (addr_len < sizeof(struct sockaddr_sco))
++		return -EINVAL;
++
+ 	lock_sock(sk);
+ 
+ 	if (sk->sk_state != BT_OPEN) {
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 075d2e78c87e..2e5fcda16570 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3661,7 +3661,8 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
+ 	serr->ee.ee_info = tstype;
+ 	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
+ 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
+-		if (sk->sk_protocol == IPPROTO_TCP)
++		if (sk->sk_protocol == IPPROTO_TCP &&
++		    sk->sk_type == SOCK_STREAM)
+ 			serr->ee.ee_data -= sk->sk_tskey;
+ 	}
+ 
+@@ -4200,7 +4201,8 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
+ 		return NULL;
+ 	}
+ 
+-	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
++	memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
++		2 * ETH_ALEN);
+ 	skb->mac_header += VLAN_HLEN;
+ 	return skb;
+ }
+diff --git a/net/core/sock.c b/net/core/sock.c
+index dc30dc5bb1b8..47fc8bb3b946 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -421,8 +421,6 @@ static void sock_warn_obsolete_bsdism(const char *name)
+ 	}
+ }
+ 
+-#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
+-
+ static void sock_disable_timestamp(struct sock *sk, unsigned long flags)
+ {
+ 	if (sk->sk_flags & flags) {
+@@ -861,7 +859,8 @@ set_rcvbuf:
+ 
+ 		if (val & SOF_TIMESTAMPING_OPT_ID &&
+ 		    !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) {
+-			if (sk->sk_protocol == IPPROTO_TCP) {
++			if (sk->sk_protocol == IPPROTO_TCP &&
++			    sk->sk_type == SOCK_STREAM) {
+ 				if (sk->sk_state != TCP_ESTABLISHED) {
+ 					ret = -EINVAL;
+ 					break;
+diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
+index 754484b3cd0e..2783c538ec19 100644
+--- a/net/decnet/af_decnet.c
++++ b/net/decnet/af_decnet.c
+@@ -678,6 +678,9 @@ static int dn_create(struct net *net, struct socket *sock, int protocol,
+ {
+ 	struct sock *sk;
+ 
++	if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
++		return -EINVAL;
++
+ 	if (!net_eq(net, &init_net))
+ 		return -EAFNOSUPPORT;
+ 
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index a5aa54ea6533..0cc98b135b8f 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -259,6 +259,9 @@ static int inet_create(struct net *net, struct socket *sock, int protocol,
+ 	int try_loading_module = 0;
+ 	int err;
+ 
++	if (protocol < 0 || protocol >= IPPROTO_MAX)
++		return -EINVAL;
++
+ 	sock->state = SS_UNCONNECTED;
+ 
+ 	/* Look for the requested type/protocol pair. */
+diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
+index 34968cd5c146..4b67937692c9 100644
+--- a/net/ipv4/fou.c
++++ b/net/ipv4/fou.c
+@@ -24,6 +24,7 @@ struct fou {
+ 	u16 type;
+ 	struct udp_offload udp_offloads;
+ 	struct list_head list;
++	struct rcu_head rcu;
+ };
+ 
+ #define FOU_F_REMCSUM_NOPARTIAL BIT(0)
+@@ -421,7 +422,7 @@ static void fou_release(struct fou *fou)
+ 	list_del(&fou->list);
+ 	udp_tunnel_sock_release(sock);
+ 
+-	kfree(fou);
++	kfree_rcu(fou, rcu);
+ }
+ 
+ static int fou_encap_init(struct sock *sk, struct fou *fou, struct fou_cfg *cfg)
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 88203e755af8..cd18c3d3251e 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1509,7 +1509,7 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
+ 	if (likely(sk->sk_rx_dst))
+ 		skb_dst_drop(skb);
+ 	else
+-		skb_dst_force(skb);
++		skb_dst_force_safe(skb);
+ 
+ 	__skb_queue_tail(&tp->ucopy.prequeue, skb);
+ 	tp->ucopy.memory += skb->truesize;
+@@ -1714,8 +1714,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 
+-	if (dst) {
+-		dst_hold(dst);
++	if (dst && dst_hold_safe(dst)) {
+ 		sk->sk_rx_dst = dst;
+ 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+ 	}
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 986440b24978..1ea4322c3b0c 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3143,7 +3143,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+ {
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct tcp_fastopen_request *fo = tp->fastopen_req;
+-	int syn_loss = 0, space, err = 0, copied;
++	int syn_loss = 0, space, err = 0;
+ 	unsigned long last_syn_loss = 0;
+ 	struct sk_buff *syn_data;
+ 
+@@ -3181,17 +3181,18 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+ 		goto fallback;
+ 	syn_data->ip_summed = CHECKSUM_PARTIAL;
+ 	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
+-	copied = copy_from_iter(skb_put(syn_data, space), space,
+-				&fo->data->msg_iter);
+-	if (unlikely(!copied)) {
+-		kfree_skb(syn_data);
+-		goto fallback;
+-	}
+-	if (copied != space) {
+-		skb_trim(syn_data, copied);
+-		space = copied;
++	if (space) {
++		int copied = copy_from_iter(skb_put(syn_data, space), space,
++					    &fo->data->msg_iter);
++		if (unlikely(!copied)) {
++			kfree_skb(syn_data);
++			goto fallback;
++		}
++		if (copied != space) {
++			skb_trim(syn_data, copied);
++			space = copied;
++		}
+ 	}
+-
+ 	/* No more data pending in inet_wait_for_connect() */
+ 	if (space == fo->size)
+ 		fo->data = NULL;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index fd3aa6148dd1..a2d685030a34 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -343,6 +343,12 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
+ 	setup_timer(&ndev->rs_timer, addrconf_rs_timer,
+ 		    (unsigned long)ndev);
+ 	memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
++
++	if (ndev->cnf.stable_secret.initialized)
++		ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
++	else
++		ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
++
+ 	ndev->cnf.mtu6 = dev->mtu;
+ 	ndev->cnf.sysctl = NULL;
+ 	ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
+@@ -2384,7 +2390,7 @@ ok:
+ #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
+ 			if (in6_dev->cnf.optimistic_dad &&
+ 			    !net->ipv6.devconf_all->forwarding && sllao)
+-				addr_flags = IFA_F_OPTIMISTIC;
++				addr_flags |= IFA_F_OPTIMISTIC;
+ #endif
+ 
+ 			/* Do not allow to create too much of autoconfigured
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index 2d044d2a2ccf..bad62fa5e70f 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -109,6 +109,9 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
+ 	int try_loading_module = 0;
+ 	int err;
+ 
++	if (protocol < 0 || protocol >= IPPROTO_MAX)
++		return -EINVAL;
++
+ 	/* Look for the requested type/protocol pair. */
+ lookup_protocol:
+ 	err = -ESOCKTNOSUPPORT;
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index 69f4f689f06a..76be7d311cc4 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -1553,13 +1553,11 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[],
+ 			return -EEXIST;
+ 	} else {
+ 		t = nt;
+-
+-		ip6gre_tunnel_unlink(ign, t);
+-		ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
+-		ip6gre_tunnel_link(ign, t);
+-		netdev_state_change(dev);
+ 	}
+ 
++	ip6gre_tunnel_unlink(ign, t);
++	ip6gre_tnl_change(t, &p, !tb[IFLA_MTU]);
++	ip6gre_tunnel_link(ign, t);
+ 	return 0;
+ }
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index cfb27f56c62f..c1938ad39f8c 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -93,10 +93,9 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+ {
+ 	struct dst_entry *dst = skb_dst(skb);
+ 
+-	if (dst) {
++	if (dst && dst_hold_safe(dst)) {
+ 		const struct rt6_info *rt = (const struct rt6_info *)dst;
+ 
+-		dst_hold(dst);
+ 		sk->sk_rx_dst = dst;
+ 		inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
+ 		if (rt->rt6i_node)
+diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
+index ee0ea25c8e7a..9a1edcde4ba5 100644
+--- a/net/irda/af_irda.c
++++ b/net/irda/af_irda.c
+@@ -1086,6 +1086,9 @@ static int irda_create(struct net *net, struct socket *sock, int protocol,
+ 	struct sock *sk;
+ 	struct irda_sock *self;
+ 
++	if (protocol < 0 || protocol > SK_PROTOCOL_MAX)
++		return -EINVAL;
++
+ 	if (net != &init_net)
+ 		return -EAFNOSUPPORT;
+ 
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index d4b6f3682c14..68c599a5e1d1 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -950,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
+ 		}
+ 		lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
+ 		if (!netif_is_multiqueue(dev))
+-			sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
++			sch->flags |= TCQ_F_ONETXQUEUE;
+ 	}
+ 
+ 	sch->handle = handle;
+diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
+index 0e4198ee2370..3267a5cbb3e8 100644
+--- a/net/sctp/ipv6.c
++++ b/net/sctp/ipv6.c
+@@ -634,6 +634,7 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
+ 	struct sock *newsk;
+ 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+ 	struct sctp6_sock *newsctp6sk;
++	struct ipv6_txoptions *opt;
+ 
+ 	newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot);
+ 	if (!newsk)
+@@ -653,6 +654,13 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
+ 
+ 	memcpy(newnp, np, sizeof(struct ipv6_pinfo));
+ 
++	rcu_read_lock();
++	opt = rcu_dereference(np->opt);
++	if (opt)
++		opt = ipv6_dup_options(newsk, opt);
++	RCU_INIT_POINTER(newnp->opt, opt);
++	rcu_read_unlock();
++
+ 	/* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname()
+ 	 * and getpeername().
+ 	 */
+diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
+index 06320c8c1c86..83a07d468644 100644
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -1652,7 +1652,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
+ 
+ 	/* Set an expiration time for the cookie.  */
+ 	cookie->c.expiration = ktime_add(asoc->cookie_life,
+-					 ktime_get());
++					 ktime_get_real());
+ 
+ 	/* Copy the peer's init packet.  */
+ 	memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr,
+@@ -1780,7 +1780,7 @@ no_hmac:
+ 	if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
+ 		kt = skb_get_ktime(skb);
+ 	else
+-		kt = ktime_get();
++		kt = ktime_get_real();
+ 
+ 	if (!asoc && ktime_before(bear_cookie->expiration, kt)) {
+ 		/*
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 66d796075050..a63c2c87a0c6 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7175,6 +7175,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ 	newsk->sk_type = sk->sk_type;
+ 	newsk->sk_bound_dev_if = sk->sk_bound_dev_if;
+ 	newsk->sk_flags = sk->sk_flags;
++	newsk->sk_tsflags = sk->sk_tsflags;
+ 	newsk->sk_no_check_tx = sk->sk_no_check_tx;
+ 	newsk->sk_no_check_rx = sk->sk_no_check_rx;
+ 	newsk->sk_reuse = sk->sk_reuse;
+@@ -7207,6 +7208,9 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ 	newinet->mc_ttl = 1;
+ 	newinet->mc_index = 0;
+ 	newinet->mc_list = NULL;
++
++	if (newsk->sk_flags & SK_FLAGS_TIMESTAMP)
++		net_enable_timestamp();
+ }
+ 
+ static inline void sctp_copy_descendant(struct sock *sk_to,
+diff --git a/net/socket.c b/net/socket.c
+index 884e32997698..dcbfa868e398 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1705,6 +1705,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ 	msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
+ 	/* We assume all kernel code knows the size of sockaddr_storage */
+ 	msg.msg_namelen = 0;
++	msg.msg_iocb = NULL;
+ 	if (sock->file->f_flags & O_NONBLOCK)
+ 		flags |= MSG_DONTWAIT;
+ 	err = sock_recvmsg(sock, &msg, iov_iter_count(&msg.msg_iter), flags);
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 1975fd8d1c10..a398f624c28d 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2072,14 +2072,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ 
+ 	memset(&scm, 0, sizeof(scm));
+ 
+-	err = mutex_lock_interruptible(&u->readlock);
+-	if (unlikely(err)) {
+-		/* recvmsg() in non blocking mode is supposed to return -EAGAIN
+-		 * sk_rcvtimeo is not honored by mutex_lock_interruptible()
+-		 */
+-		err = noblock ? -EAGAIN : -ERESTARTSYS;
+-		goto out;
+-	}
++	mutex_lock(&u->readlock);
+ 
+ 	if (flags & MSG_PEEK)
+ 		skip = sk_peek_offset(sk, flags);
+@@ -2120,12 +2113,12 @@ again:
+ 
+ 			timeo = unix_stream_data_wait(sk, timeo, last);
+ 
+-			if (signal_pending(current)
+-			    ||  mutex_lock_interruptible(&u->readlock)) {
++			if (signal_pending(current)) {
+ 				err = sock_intr_errno(timeo);
+ 				goto out;
+ 			}
+ 
++			mutex_lock(&u->readlock);
+ 			continue;
+  unlock:
+ 			unix_state_unlock(sk);
+diff --git a/security/keys/gc.c b/security/keys/gc.c
+index c7952375ac53..addf060399e0 100644
+--- a/security/keys/gc.c
++++ b/security/keys/gc.c
+@@ -134,6 +134,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
+ 		kdebug("- %u", key->serial);
+ 		key_check(key);
+ 
++		/* Throw away the key data if the key is instantiated */
++		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
++		    !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
++		    key->type->destroy)
++			key->type->destroy(key);
++
+ 		security_key_free(key);
+ 
+ 		/* deal with the user's key tracking and quota */
+@@ -148,10 +154,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
+ 		if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ 			atomic_dec(&key->user->nikeys);
+ 
+-		/* now throw away the key memory */
+-		if (key->type->destroy)
+-			key->type->destroy(key);
+-
+ 		key_user_put(key->user);
+ 
+ 		kfree(key->description);
+diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
+index 0b9ec78a7a7a..26f0e0a11ed6 100644
+--- a/security/keys/keyctl.c
++++ b/security/keys/keyctl.c
+@@ -757,16 +757,16 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
+ 
+ 	/* the key is probably readable - now try to read it */
+ can_read_key:
+-	ret = key_validate(key);
+-	if (ret == 0) {
+-		ret = -EOPNOTSUPP;
+-		if (key->type->read) {
+-			/* read the data with the semaphore held (since we
+-			 * might sleep) */
+-			down_read(&key->sem);
++	ret = -EOPNOTSUPP;
++	if (key->type->read) {
++		/* Read the data with the semaphore held (since we might sleep)
++		 * to protect against the key being updated or revoked.
++		 */
++		down_read(&key->sem);
++		ret = key_validate(key);
++		if (ret == 0)
+ 			ret = key->type->read(key, buffer, buflen);
+-			up_read(&key->sem);
+-		}
++		up_read(&key->sem);
+ 	}
+ 
+ error2:
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index bd536cb221e2..db91639c81e3 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -794,6 +794,7 @@ long join_session_keyring(const char *name)
+ 		ret = PTR_ERR(keyring);
+ 		goto error2;
+ 	} else if (keyring == new->session_keyring) {
++		key_put(keyring);
+ 		ret = 0;
+ 		goto error2;
+ 	}

diff --git a/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch b/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
deleted file mode 100644
index 49020d7..0000000
--- a/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
+++ /dev/null
@@ -1,81 +0,0 @@
-From 23567fd052a9abb6d67fe8e7a9ccdd9800a540f2 Mon Sep 17 00:00:00 2001
-From: Yevgeny Pats <yevgeny@perception-point.io>
-Date: Tue, 19 Jan 2016 22:09:04 +0000
-Subject: KEYS: Fix keyring ref leak in join_session_keyring()
-
-This fixes CVE-2016-0728.
-
-If a thread is asked to join as a session keyring the keyring that's already
-set as its session, we leak a keyring reference.
-
-This can be tested with the following program:
-
-	#include <stddef.h>
-	#include <stdio.h>
-	#include <sys/types.h>
-	#include <keyutils.h>
-
-	int main(int argc, const char *argv[])
-	{
-		int i = 0;
-		key_serial_t serial;
-
-		serial = keyctl(KEYCTL_JOIN_SESSION_KEYRING,
-				"leaked-keyring");
-		if (serial < 0) {
-			perror("keyctl");
-			return -1;
-		}
-
-		if (keyctl(KEYCTL_SETPERM, serial,
-			   KEY_POS_ALL | KEY_USR_ALL) < 0) {
-			perror("keyctl");
-			return -1;
-		}
-
-		for (i = 0; i < 100; i++) {
-			serial = keyctl(KEYCTL_JOIN_SESSION_KEYRING,
-					"leaked-keyring");
-			if (serial < 0) {
-				perror("keyctl");
-				return -1;
-			}
-		}
-
-		return 0;
-	}
-
-If, after the program has run, there something like the following line in
-/proc/keys:
-
-3f3d898f I--Q---   100 perm 3f3f0000     0     0 keyring   leaked-keyring: empty
-
-with a usage count of 100 * the number of times the program has been run,
-then the kernel is malfunctioning.  If leaked-keyring has zero usages or
-has been garbage collected, then the problem is fixed.
-
-Reported-by: Yevgeny Pats <yevgeny@perception-point.io>
-Signed-off-by: David Howells <dhowells@redhat.com>
-Acked-by: Don Zickus <dzickus@redhat.com>
-Acked-by: Prarit Bhargava <prarit@redhat.com>
-Acked-by: Jarod Wilson <jarod@redhat.com>
-Signed-off-by: James Morris <james.l.morris@oracle.com>
----
- security/keys/process_keys.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
-index a3f85d2..e6d50172 100644
---- a/security/keys/process_keys.c
-+++ b/security/keys/process_keys.c
-@@ -794,6 +794,7 @@ long join_session_keyring(const char *name)
- 		ret = PTR_ERR(keyring);
- 		goto error2;
- 	} else if (keyring == new->session_keyring) {
-+		key_put(keyring);
- 		ret = 0;
- 		goto error2;
- 	}
--- 
-cgit v0.12
-


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2016-01-20 13:54 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2016-01-20 13:54 UTC (permalink / raw
  To: gentoo-commits

commit:     ec211dd8b245afecea707cdc1aa7cd258866312d
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jan 20 13:54:44 2016 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jan 20 13:54:44 2016 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ec211dd8

Ensure that thread joining a session keyring does not leak the keyring reference. CVE-2016-0728.

 0000_README                                        |  4 ++
 ...ing-refleak-in-join-session-CVE-2016-0728.patch | 81 ++++++++++++++++++++++
 2 files changed, 85 insertions(+)

diff --git a/0000_README b/0000_README
index 2294ce5..18a8ebc 100644
--- a/0000_README
+++ b/0000_README
@@ -111,6 +111,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
+Patch:  1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=572384
+Desc:   Ensure that thread joining a session keyring does not leak the keyring reference. CVE-2016-0728.
+
 Patch:  2700_ThinkPad-30-brightness-control-fix.patch
 From:   Seth Forshee <seth.forshee@canonical.com>
 Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.

diff --git a/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch b/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
new file mode 100644
index 0000000..49020d7
--- /dev/null
+++ b/1520_keyring-refleak-in-join-session-CVE-2016-0728.patch
@@ -0,0 +1,81 @@
+From 23567fd052a9abb6d67fe8e7a9ccdd9800a540f2 Mon Sep 17 00:00:00 2001
+From: Yevgeny Pats <yevgeny@perception-point.io>
+Date: Tue, 19 Jan 2016 22:09:04 +0000
+Subject: KEYS: Fix keyring ref leak in join_session_keyring()
+
+This fixes CVE-2016-0728.
+
+If a thread is asked to join as a session keyring the keyring that's already
+set as its session, we leak a keyring reference.
+
+This can be tested with the following program:
+
+	#include <stddef.h>
+	#include <stdio.h>
+	#include <sys/types.h>
+	#include <keyutils.h>
+
+	int main(int argc, const char *argv[])
+	{
+		int i = 0;
+		key_serial_t serial;
+
+		serial = keyctl(KEYCTL_JOIN_SESSION_KEYRING,
+				"leaked-keyring");
+		if (serial < 0) {
+			perror("keyctl");
+			return -1;
+		}
+
+		if (keyctl(KEYCTL_SETPERM, serial,
+			   KEY_POS_ALL | KEY_USR_ALL) < 0) {
+			perror("keyctl");
+			return -1;
+		}
+
+		for (i = 0; i < 100; i++) {
+			serial = keyctl(KEYCTL_JOIN_SESSION_KEYRING,
+					"leaked-keyring");
+			if (serial < 0) {
+				perror("keyctl");
+				return -1;
+			}
+		}
+
+		return 0;
+	}
+
+If, after the program has run, there something like the following line in
+/proc/keys:
+
+3f3d898f I--Q---   100 perm 3f3f0000     0     0 keyring   leaked-keyring: empty
+
+with a usage count of 100 * the number of times the program has been run,
+then the kernel is malfunctioning.  If leaked-keyring has zero usages or
+has been garbage collected, then the problem is fixed.
+
+Reported-by: Yevgeny Pats <yevgeny@perception-point.io>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Acked-by: Don Zickus <dzickus@redhat.com>
+Acked-by: Prarit Bhargava <prarit@redhat.com>
+Acked-by: Jarod Wilson <jarod@redhat.com>
+Signed-off-by: James Morris <james.l.morris@oracle.com>
+---
+ security/keys/process_keys.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
+index a3f85d2..e6d50172 100644
+--- a/security/keys/process_keys.c
++++ b/security/keys/process_keys.c
+@@ -794,6 +794,7 @@ long join_session_keyring(const char *name)
+ 		ret = PTR_ERR(keyring);
+ 		goto error2;
+ 	} else if (keyring == new->session_keyring) {
++		key_put(keyring);
+ 		ret = 0;
+ 		goto error2;
+ 	}
+-- 
+cgit v0.12
+


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-12-15 11:17 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-12-15 11:17 UTC (permalink / raw
  To: gentoo-commits

commit:     56e69a84070c5284582a1a12b7670397aaa78c7e
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Dec 15 11:16:50 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Dec 15 11:16:50 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=56e69a84

Linux patch 4.1.15

 0000_README             |    4 +
 1014_linux-4.1.15.patch | 2619 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2623 insertions(+)

diff --git a/0000_README b/0000_README
index bb7a9d9..2294ce5 100644
--- a/0000_README
+++ b/0000_README
@@ -99,6 +99,10 @@ Patch:  1013_linux-4.1.14.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.14
 
+Patch:  1014_linux-4.1.15.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.15
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1014_linux-4.1.15.patch b/1014_linux-4.1.15.patch
new file mode 100644
index 0000000..114781b
--- /dev/null
+++ b/1014_linux-4.1.15.patch
@@ -0,0 +1,2619 @@
+diff --git a/Makefile b/Makefile
+index 091280d66452..cf35f6bcffd8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 14
++SUBLEVEL = 15
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 1ec6441fe2a5..09138ceba046 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3417,6 +3417,7 @@ static void rbd_queue_workfn(struct work_struct *work)
+ 		goto err_rq;
+ 	}
+ 	img_request->rq = rq;
++	snapc = NULL; /* img_request consumes a ref */
+ 
+ 	if (op_type == OBJ_OP_DISCARD)
+ 		result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
+diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
+index f51d376d10ba..c2f5117fd8cb 100644
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -3675,6 +3675,11 @@ static int pci_probe(struct pci_dev *dev,
+ 
+ 	reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
+ 	ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
++	/* JMicron JMB38x often shows 0 at first read, just ignore it */
++	if (!ohci->it_context_support) {
++		ohci_notice(ohci, "overriding IsoXmitIntMask\n");
++		ohci->it_context_support = 0xf;
++	}
+ 	reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
+ 	ohci->it_context_mask = ohci->it_context_support;
+ 	ohci->n_it = hweight32(ohci->it_context_mask);
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index 9c71295f2fef..85e640440bd9 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -675,7 +675,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
+ 	{ PHY_ID_BCM5461, 0xfffffff0 },
+ 	{ PHY_ID_BCM54616S, 0xfffffff0 },
+ 	{ PHY_ID_BCM5464, 0xfffffff0 },
+-	{ PHY_ID_BCM5482, 0xfffffff0 },
++	{ PHY_ID_BCM5481, 0xfffffff0 },
+ 	{ PHY_ID_BCM5482, 0xfffffff0 },
+ 	{ PHY_ID_BCM50610, 0xfffffff0 },
+ 	{ PHY_ID_BCM50610M, 0xfffffff0 },
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 4e0470d396a3..71190dc1eacf 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -774,6 +774,7 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x2357, 0x9000, 4)},	/* TP-LINK MA260 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},	/* Telit LE920 */
+ 	{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},	/* Telit LE920 */
++	{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)},	/* XS Stick W100-2 from 4G Systems */
+ 	{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)},	/* Olivetti Olicard 100 */
+ 	{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)},	/* Olivetti Olicard 120 */
+ 	{QMI_FIXED_INTF(0x0b3c, 0xc002, 4)},	/* Olivetti Olicard 140 */
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index b072e17479aa..2b0d84d32db4 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -756,8 +756,16 @@ next_slot:
+ 		}
+ 
+ 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+-		if (key.objectid > ino ||
+-		    key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
++
++		if (key.objectid > ino)
++			break;
++		if (WARN_ON_ONCE(key.objectid < ino) ||
++		    key.type < BTRFS_EXTENT_DATA_KEY) {
++			ASSERT(del_nr == 0);
++			path->slots[0]++;
++			goto next_slot;
++		}
++		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
+ 			break;
+ 
+ 		fi = btrfs_item_ptr(leaf, path->slots[0],
+@@ -776,8 +784,8 @@ next_slot:
+ 				btrfs_file_extent_inline_len(leaf,
+ 						     path->slots[0], fi);
+ 		} else {
+-			WARN_ON(1);
+-			extent_end = search_start;
++			/* can't happen */
++			BUG();
+ 		}
+ 
+ 		/*
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index e3b39f0c4666..5136c73b3dce 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1294,8 +1294,14 @@ next_slot:
+ 		num_bytes = 0;
+ 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+ 
+-		if (found_key.objectid > ino ||
+-		    found_key.type > BTRFS_EXTENT_DATA_KEY ||
++		if (found_key.objectid > ino)
++			break;
++		if (WARN_ON_ONCE(found_key.objectid < ino) ||
++		    found_key.type < BTRFS_EXTENT_DATA_KEY) {
++			path->slots[0]++;
++			goto next_slot;
++		}
++		if (found_key.type > BTRFS_EXTENT_DATA_KEY ||
+ 		    found_key.offset > end)
+ 			break;
+ 
+@@ -4184,6 +4190,47 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
+ 
+ }
+ 
++static int truncate_inline_extent(struct inode *inode,
++				  struct btrfs_path *path,
++				  struct btrfs_key *found_key,
++				  const u64 item_end,
++				  const u64 new_size)
++{
++	struct extent_buffer *leaf = path->nodes[0];
++	int slot = path->slots[0];
++	struct btrfs_file_extent_item *fi;
++	u32 size = (u32)(new_size - found_key->offset);
++	struct btrfs_root *root = BTRFS_I(inode)->root;
++
++	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
++
++	if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) {
++		loff_t offset = new_size;
++		loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE);
++
++		/*
++		 * Zero out the remaining of the last page of our inline extent,
++		 * instead of directly truncating our inline extent here - that
++		 * would be much more complex (decompressing all the data, then
++		 * compressing the truncated data, which might be bigger than
++		 * the size of the inline extent, resize the extent, etc).
++		 * We release the path because to get the page we might need to
++		 * read the extent item from disk (data not in the page cache).
++		 */
++		btrfs_release_path(path);
++		return btrfs_truncate_page(inode, offset, page_end - offset, 0);
++	}
++
++	btrfs_set_file_extent_ram_bytes(leaf, fi, size);
++	size = btrfs_file_extent_calc_inline_size(size);
++	btrfs_truncate_item(root, path, size, 1);
++
++	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
++		inode_sub_bytes(inode, item_end + 1 - new_size);
++
++	return 0;
++}
++
+ /*
+  * this can truncate away extent items, csum items and directory items.
+  * It starts at a high offset and removes keys until it can't find
+@@ -4378,27 +4425,40 @@ search_again:
+ 			 * special encodings
+ 			 */
+ 			if (!del_item &&
+-			    btrfs_file_extent_compression(leaf, fi) == 0 &&
+ 			    btrfs_file_extent_encryption(leaf, fi) == 0 &&
+ 			    btrfs_file_extent_other_encoding(leaf, fi) == 0) {
+-				u32 size = new_size - found_key.offset;
+-
+-				if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
+-					inode_sub_bytes(inode, item_end + 1 -
+-							new_size);
+ 
+ 				/*
+-				 * update the ram bytes to properly reflect
+-				 * the new size of our item
++				 * Need to release path in order to truncate a
++				 * compressed extent. So delete any accumulated
++				 * extent items so far.
+ 				 */
+-				btrfs_set_file_extent_ram_bytes(leaf, fi, size);
+-				size =
+-				    btrfs_file_extent_calc_inline_size(size);
+-				btrfs_truncate_item(root, path, size, 1);
++				if (btrfs_file_extent_compression(leaf, fi) !=
++				    BTRFS_COMPRESS_NONE && pending_del_nr) {
++					err = btrfs_del_items(trans, root, path,
++							      pending_del_slot,
++							      pending_del_nr);
++					if (err) {
++						btrfs_abort_transaction(trans,
++									root,
++									err);
++						goto error;
++					}
++					pending_del_nr = 0;
++				}
++
++				err = truncate_inline_extent(inode, path,
++							     &found_key,
++							     item_end,
++							     new_size);
++				if (err) {
++					btrfs_abort_transaction(trans,
++								root, err);
++					goto error;
++				}
+ 			} else if (test_bit(BTRFS_ROOT_REF_COWS,
+ 					    &root->state)) {
+-				inode_sub_bytes(inode, item_end + 1 -
+-						found_key.offset);
++				inode_sub_bytes(inode, item_end + 1 - new_size);
+ 			}
+ 		}
+ delete:
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 8b2c82ce36b3..87c720865ebf 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3166,6 +3166,150 @@ static void clone_update_extent_map(struct inode *inode,
+ 			&BTRFS_I(inode)->runtime_flags);
+ }
+ 
++/*
++ * Make sure we do not end up inserting an inline extent into a file that has
++ * already other (non-inline) extents. If a file has an inline extent it can
++ * not have any other extents and the (single) inline extent must start at the
++ * file offset 0. Failing to respect these rules will lead to file corruption,
++ * resulting in EIO errors on read/write operations, hitting BUG_ON's in mm, etc
++ *
++ * We can have extents that have been already written to disk or we can have
++ * dirty ranges still in delalloc, in which case the extent maps and items are
++ * created only when we run delalloc, and the delalloc ranges might fall outside
++ * the range we are currently locking in the inode's io tree. So we check the
++ * inode's i_size because of that (i_size updates are done while holding the
++ * i_mutex, which we are holding here).
++ * We also check to see if the inode has a size not greater than "datal" but has
++ * extents beyond it, due to an fallocate with FALLOC_FL_KEEP_SIZE (and we are
++ * protected against such concurrent fallocate calls by the i_mutex).
++ *
++ * If the file has no extents but a size greater than datal, do not allow the
++ * copy because we would need turn the inline extent into a non-inline one (even
++ * with NO_HOLES enabled). If we find our destination inode only has one inline
++ * extent, just overwrite it with the source inline extent if its size is less
++ * than the source extent's size, or we could copy the source inline extent's
++ * data into the destination inode's inline extent if the later is greater then
++ * the former.
++ */
++static int clone_copy_inline_extent(struct inode *src,
++				    struct inode *dst,
++				    struct btrfs_trans_handle *trans,
++				    struct btrfs_path *path,
++				    struct btrfs_key *new_key,
++				    const u64 drop_start,
++				    const u64 datal,
++				    const u64 skip,
++				    const u64 size,
++				    char *inline_data)
++{
++	struct btrfs_root *root = BTRFS_I(dst)->root;
++	const u64 aligned_end = ALIGN(new_key->offset + datal,
++				      root->sectorsize);
++	int ret;
++	struct btrfs_key key;
++
++	if (new_key->offset > 0)
++		return -EOPNOTSUPP;
++
++	key.objectid = btrfs_ino(dst);
++	key.type = BTRFS_EXTENT_DATA_KEY;
++	key.offset = 0;
++	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
++	if (ret < 0) {
++		return ret;
++	} else if (ret > 0) {
++		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
++			ret = btrfs_next_leaf(root, path);
++			if (ret < 0)
++				return ret;
++			else if (ret > 0)
++				goto copy_inline_extent;
++		}
++		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
++		if (key.objectid == btrfs_ino(dst) &&
++		    key.type == BTRFS_EXTENT_DATA_KEY) {
++			ASSERT(key.offset > 0);
++			return -EOPNOTSUPP;
++		}
++	} else if (i_size_read(dst) <= datal) {
++		struct btrfs_file_extent_item *ei;
++		u64 ext_len;
++
++		/*
++		 * If the file size is <= datal, make sure there are no other
++		 * extents following (can happen do to an fallocate call with
++		 * the flag FALLOC_FL_KEEP_SIZE).
++		 */
++		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
++				    struct btrfs_file_extent_item);
++		/*
++		 * If it's an inline extent, it can not have other extents
++		 * following it.
++		 */
++		if (btrfs_file_extent_type(path->nodes[0], ei) ==
++		    BTRFS_FILE_EXTENT_INLINE)
++			goto copy_inline_extent;
++
++		ext_len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
++		if (ext_len > aligned_end)
++			return -EOPNOTSUPP;
++
++		ret = btrfs_next_item(root, path);
++		if (ret < 0) {
++			return ret;
++		} else if (ret == 0) {
++			btrfs_item_key_to_cpu(path->nodes[0], &key,
++					      path->slots[0]);
++			if (key.objectid == btrfs_ino(dst) &&
++			    key.type == BTRFS_EXTENT_DATA_KEY)
++				return -EOPNOTSUPP;
++		}
++	}
++
++copy_inline_extent:
++	/*
++	 * We have no extent items, or we have an extent at offset 0 which may
++	 * or may not be inlined. All these cases are dealt the same way.
++	 */
++	if (i_size_read(dst) > datal) {
++		/*
++		 * If the destination inode has an inline extent...
++		 * This would require copying the data from the source inline
++		 * extent into the beginning of the destination's inline extent.
++		 * But this is really complex, both extents can be compressed
++		 * or just one of them, which would require decompressing and
++		 * re-compressing data (which could increase the new compressed
++		 * size, not allowing the compressed data to fit anymore in an
++		 * inline extent).
++		 * So just don't support this case for now (it should be rare,
++		 * we are not really saving space when cloning inline extents).
++		 */
++		return -EOPNOTSUPP;
++	}
++
++	btrfs_release_path(path);
++	ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
++	if (ret)
++		return ret;
++	ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
++	if (ret)
++		return ret;
++
++	if (skip) {
++		const u32 start = btrfs_file_extent_calc_inline_size(0);
++
++		memmove(inline_data + start, inline_data + start + skip, datal);
++	}
++
++	write_extent_buffer(path->nodes[0], inline_data,
++			    btrfs_item_ptr_offset(path->nodes[0],
++						  path->slots[0]),
++			    size);
++	inode_add_bytes(dst, datal);
++
++	return 0;
++}
++
+ /**
+  * btrfs_clone() - clone a range from inode file to another
+  *
+@@ -3432,21 +3576,6 @@ process_slot:
+ 			} else if (type == BTRFS_FILE_EXTENT_INLINE) {
+ 				u64 skip = 0;
+ 				u64 trim = 0;
+-				u64 aligned_end = 0;
+-
+-				/*
+-				 * Don't copy an inline extent into an offset
+-				 * greater than zero. Having an inline extent
+-				 * at such an offset results in chaos as btrfs
+-				 * isn't prepared for such cases. Just skip
+-				 * this case for the same reasons as commented
+-				 * at btrfs_ioctl_clone().
+-				 */
+-				if (last_dest_end > 0) {
+-					ret = -EOPNOTSUPP;
+-					btrfs_end_transaction(trans, root);
+-					goto out;
+-				}
+ 
+ 				if (off > key.offset) {
+ 					skip = off - key.offset;
+@@ -3464,42 +3593,22 @@ process_slot:
+ 				size -= skip + trim;
+ 				datal -= skip + trim;
+ 
+-				aligned_end = ALIGN(new_key.offset + datal,
+-						    root->sectorsize);
+-				ret = btrfs_drop_extents(trans, root, inode,
+-							 drop_start,
+-							 aligned_end,
+-							 1);
++				ret = clone_copy_inline_extent(src, inode,
++							       trans, path,
++							       &new_key,
++							       drop_start,
++							       datal,
++							       skip, size, buf);
+ 				if (ret) {
+ 					if (ret != -EOPNOTSUPP)
+ 						btrfs_abort_transaction(trans,
+-							root, ret);
+-					btrfs_end_transaction(trans, root);
+-					goto out;
+-				}
+-
+-				ret = btrfs_insert_empty_item(trans, root, path,
+-							      &new_key, size);
+-				if (ret) {
+-					btrfs_abort_transaction(trans, root,
+-								ret);
++									root,
++									ret);
+ 					btrfs_end_transaction(trans, root);
+ 					goto out;
+ 				}
+-
+-				if (skip) {
+-					u32 start =
+-					  btrfs_file_extent_calc_inline_size(0);
+-					memmove(buf+start, buf+start+skip,
+-						datal);
+-				}
+-
+ 				leaf = path->nodes[0];
+ 				slot = path->slots[0];
+-				write_extent_buffer(leaf, buf,
+-					    btrfs_item_ptr_offset(leaf, slot),
+-					    size);
+-				inode_add_bytes(inode, datal);
+ 			}
+ 
+ 			/* If we have an implicit hole (NO_HOLES feature). */
+diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
+index 6f518c90e1c1..1fcd7b6e7564 100644
+--- a/fs/btrfs/xattr.c
++++ b/fs/btrfs/xattr.c
+@@ -313,8 +313,10 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+ 		/* check to make sure this item is what we want */
+ 		if (found_key.objectid != key.objectid)
+ 			break;
+-		if (found_key.type != BTRFS_XATTR_ITEM_KEY)
++		if (found_key.type > BTRFS_XATTR_ITEM_KEY)
+ 			break;
++		if (found_key.type < BTRFS_XATTR_ITEM_KEY)
++			goto next;
+ 
+ 		di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+ 		if (verify_dir_item(root, leaf, di))
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 84f37f34f9aa..1e99b29650a9 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1905,7 +1905,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
+ 
+ 	len = sizeof(*head) +
+ 		pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
+-		sizeof(struct timespec);
++		sizeof(struct ceph_timespec);
+ 
+ 	/* calculate (max) length for cap releases */
+ 	len += sizeof(struct ceph_mds_request_release) *
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 12756040ca20..8bec8f1e4b31 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -276,8 +276,12 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
+ 		dput(dentry);
+ 		dentry = ERR_PTR(-EEXIST);
+ 	}
+-	if (IS_ERR(dentry))
++
++	if (IS_ERR(dentry)) {
+ 		mutex_unlock(&d_inode(parent)->i_mutex);
++		simple_release_fs(&debugfs_mount, &debugfs_mount_count);
++	}
++
+ 	return dentry;
+ }
+ 
+diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
+index d41843181818..e770c1ee4613 100644
+--- a/fs/ext4/ext4_jbd2.c
++++ b/fs/ext4/ext4_jbd2.c
+@@ -88,13 +88,13 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
+ 		return 0;
+ 	}
+ 
++	err = handle->h_err;
+ 	if (!handle->h_transaction) {
+-		err = jbd2_journal_stop(handle);
+-		return handle->h_err ? handle->h_err : err;
++		rc = jbd2_journal_stop(handle);
++		return err ? err : rc;
+ 	}
+ 
+ 	sb = handle->h_transaction->t_journal->j_private;
+-	err = handle->h_err;
+ 	rc = jbd2_journal_stop(handle);
+ 
+ 	if (!err)
+diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
+index 5765f88b3904..8082565c59a9 100644
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -426,6 +426,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 	struct buffer_head *bh, *head;
+ 	int ret = 0;
+ 	int nr_submitted = 0;
++	int nr_to_submit = 0;
+ 
+ 	blocksize = 1 << inode->i_blkbits;
+ 
+@@ -478,11 +479,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
+ 			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
+ 		}
+ 		set_buffer_async_write(bh);
++		nr_to_submit++;
+ 	} while ((bh = bh->b_this_page) != head);
+ 
+ 	bh = head = page_buffers(page);
+ 
+-	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
++	if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
++	    nr_to_submit) {
+ 		data_page = ext4_encrypt(inode, page);
+ 		if (IS_ERR(data_page)) {
+ 			ret = PTR_ERR(data_page);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ff89971e3ee0..8a3b9f14d198 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -396,9 +396,13 @@ static void ext4_handle_error(struct super_block *sb)
+ 		smp_wmb();
+ 		sb->s_flags |= MS_RDONLY;
+ 	}
+-	if (test_opt(sb, ERRORS_PANIC))
++	if (test_opt(sb, ERRORS_PANIC)) {
++		if (EXT4_SB(sb)->s_journal &&
++		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
++			return;
+ 		panic("EXT4-fs (device %s): panic forced after error\n",
+ 			sb->s_id);
++	}
+ }
+ 
+ #define ext4_error_ratelimit(sb)					\
+@@ -587,8 +591,12 @@ void __ext4_abort(struct super_block *sb, const char *function,
+ 			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
+ 		save_error_info(sb, function, line);
+ 	}
+-	if (test_opt(sb, ERRORS_PANIC))
++	if (test_opt(sb, ERRORS_PANIC)) {
++		if (EXT4_SB(sb)->s_journal &&
++		  !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
++			return;
+ 		panic("EXT4-fs panic from previous error\n");
++	}
+ }
+ 
+ void __ext4_msg(struct super_block *sb,
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 7003c0925760..0469f32918a5 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -2086,8 +2086,12 @@ static void __journal_abort_soft (journal_t *journal, int errno)
+ 
+ 	__jbd2_journal_abort_hard(journal);
+ 
+-	if (errno)
++	if (errno) {
+ 		jbd2_journal_update_sb_errno(journal);
++		write_lock(&journal->j_state_lock);
++		journal->j_flags |= JBD2_REC_ERR;
++		write_unlock(&journal->j_state_lock);
++	}
+ }
+ 
+ /**
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 976ba792fbc6..7f22b6c6fb50 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1813,7 +1813,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 		if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
+ 			nfsi->attr_gencount = fattr->gencount;
+ 	}
+-	invalid &= ~NFS_INO_INVALID_ATTR;
++
++	/* Don't declare attrcache up to date if there were no attrs! */
++	if (fattr->valid != 0)
++		invalid &= ~NFS_INO_INVALID_ATTR;
++
+ 	/* Don't invalidate the data if we were to blame */
+ 	if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
+ 				|| S_ISLNK(inode->i_mode)))
+diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
+index e42be52a8c18..5dea913baf46 100644
+--- a/fs/nfs/nfs4client.c
++++ b/fs/nfs/nfs4client.c
+@@ -33,7 +33,7 @@ static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
+ 		return ret;
+ 	idr_preload(GFP_KERNEL);
+ 	spin_lock(&nn->nfs_client_lock);
+-	ret = idr_alloc(&nn->cb_ident_idr, clp, 0, 0, GFP_NOWAIT);
++	ret = idr_alloc(&nn->cb_ident_idr, clp, 1, 0, GFP_NOWAIT);
+ 	if (ret >= 0)
+ 		clp->cl_cb_ident = ret;
+ 	spin_unlock(&nn->nfs_client_lock);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 397798368b1a..bb6c324f1f3d 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -765,16 +765,68 @@ void nfs4_unhash_stid(struct nfs4_stid *s)
+ 	s->sc_type = 0;
+ }
+ 
+-static void
++/**
++ * nfs4_get_existing_delegation - Discover if this delegation already exists
++ * @clp:     a pointer to the nfs4_client we're granting a delegation to
++ * @fp:      a pointer to the nfs4_file we're granting a delegation on
++ *
++ * Return:
++ *      On success: NULL if an existing delegation was not found.
++ *
++ *      On error: -EAGAIN if one was previously granted to this nfs4_client
++ *                 for this nfs4_file.
++ *
++ */
++
++static int
++nfs4_get_existing_delegation(struct nfs4_client *clp, struct nfs4_file *fp)
++{
++	struct nfs4_delegation *searchdp = NULL;
++	struct nfs4_client *searchclp = NULL;
++
++	lockdep_assert_held(&state_lock);
++	lockdep_assert_held(&fp->fi_lock);
++
++	list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
++		searchclp = searchdp->dl_stid.sc_client;
++		if (clp == searchclp) {
++			return -EAGAIN;
++		}
++	}
++	return 0;
++}
++
++/**
++ * hash_delegation_locked - Add a delegation to the appropriate lists
++ * @dp:     a pointer to the nfs4_delegation we are adding.
++ * @fp:     a pointer to the nfs4_file we're granting a delegation on
++ *
++ * Return:
++ *      On success: NULL if the delegation was successfully hashed.
++ *
++ *      On error: -EAGAIN if one was previously granted to this
++ *                 nfs4_client for this nfs4_file. Delegation is not hashed.
++ *
++ */
++
++static int
+ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
+ {
++	int status;
++	struct nfs4_client *clp = dp->dl_stid.sc_client;
++
+ 	lockdep_assert_held(&state_lock);
+ 	lockdep_assert_held(&fp->fi_lock);
+ 
++	status = nfs4_get_existing_delegation(clp, fp);
++	if (status)
++		return status;
++	++fp->fi_delegees;
+ 	atomic_inc(&dp->dl_stid.sc_count);
+ 	dp->dl_stid.sc_type = NFS4_DELEG_STID;
+ 	list_add(&dp->dl_perfile, &fp->fi_delegations);
+-	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
++	list_add(&dp->dl_perclnt, &clp->cl_delegations);
++	return 0;
+ }
+ 
+ static bool
+@@ -3351,6 +3403,7 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
+ 	stp->st_access_bmap = 0;
+ 	stp->st_deny_bmap = 0;
+ 	stp->st_openstp = NULL;
++	init_rwsem(&stp->st_rwsem);
+ 	spin_lock(&oo->oo_owner.so_client->cl_lock);
+ 	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
+ 	spin_lock(&fp->fi_lock);
+@@ -3940,6 +3993,18 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
+ 	return fl;
+ }
+ 
++/**
++ * nfs4_setlease - Obtain a delegation by requesting lease from vfs layer
++ * @dp:   a pointer to the nfs4_delegation we're adding.
++ *
++ * Return:
++ *      On success: Return code will be 0 on success.
++ *
++ *      On error: -EAGAIN if there was an existing delegation.
++ *                 nonzero if there is an error in other cases.
++ *
++ */
++
+ static int nfs4_setlease(struct nfs4_delegation *dp)
+ {
+ 	struct nfs4_file *fp = dp->dl_stid.sc_file;
+@@ -3971,16 +4036,19 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
+ 		goto out_unlock;
+ 	/* Race breaker */
+ 	if (fp->fi_deleg_file) {
+-		status = 0;
+-		++fp->fi_delegees;
+-		hash_delegation_locked(dp, fp);
++		status = hash_delegation_locked(dp, fp);
+ 		goto out_unlock;
+ 	}
+ 	fp->fi_deleg_file = filp;
+-	fp->fi_delegees = 1;
+-	hash_delegation_locked(dp, fp);
++	fp->fi_delegees = 0;
++	status = hash_delegation_locked(dp, fp);
+ 	spin_unlock(&fp->fi_lock);
+ 	spin_unlock(&state_lock);
++	if (status) {
++		/* Should never happen, this is a new fi_deleg_file  */
++		WARN_ON_ONCE(1);
++		goto out_fput;
++	}
+ 	return 0;
+ out_unlock:
+ 	spin_unlock(&fp->fi_lock);
+@@ -4000,6 +4068,15 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
+ 	if (fp->fi_had_conflict)
+ 		return ERR_PTR(-EAGAIN);
+ 
++	spin_lock(&state_lock);
++	spin_lock(&fp->fi_lock);
++	status = nfs4_get_existing_delegation(clp, fp);
++	spin_unlock(&fp->fi_lock);
++	spin_unlock(&state_lock);
++
++	if (status)
++		return ERR_PTR(status);
++
+ 	dp = alloc_init_deleg(clp, fh, odstate);
+ 	if (!dp)
+ 		return ERR_PTR(-ENOMEM);
+@@ -4018,9 +4095,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
+ 		status = -EAGAIN;
+ 		goto out_unlock;
+ 	}
+-	++fp->fi_delegees;
+-	hash_delegation_locked(dp, fp);
+-	status = 0;
++	status = hash_delegation_locked(dp, fp);
+ out_unlock:
+ 	spin_unlock(&fp->fi_lock);
+ 	spin_unlock(&state_lock);
+@@ -4181,15 +4256,20 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ 	 */
+ 	if (stp) {
+ 		/* Stateid was found, this is an OPEN upgrade */
++		down_read(&stp->st_rwsem);
+ 		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
+-		if (status)
++		if (status) {
++			up_read(&stp->st_rwsem);
+ 			goto out;
++		}
+ 	} else {
+ 		stp = open->op_stp;
+ 		open->op_stp = NULL;
+ 		init_open_stateid(stp, fp, open);
++		down_read(&stp->st_rwsem);
+ 		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
+ 		if (status) {
++			up_read(&stp->st_rwsem);
+ 			release_open_stateid(stp);
+ 			goto out;
+ 		}
+@@ -4201,6 +4281,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
+ 	}
+ 	update_stateid(&stp->st_stid.sc_stateid);
+ 	memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
++	up_read(&stp->st_rwsem);
+ 
+ 	if (nfsd4_has_session(&resp->cstate)) {
+ 		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
+@@ -4777,10 +4858,13 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
+ 		 * revoked delegations are kept only for free_stateid.
+ 		 */
+ 		return nfserr_bad_stateid;
++	down_write(&stp->st_rwsem);
+ 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
+-	if (status)
+-		return status;
+-	return nfs4_check_fh(current_fh, &stp->st_stid);
++	if (status == nfs_ok)
++		status = nfs4_check_fh(current_fh, &stp->st_stid);
++	if (status != nfs_ok)
++		up_write(&stp->st_rwsem);
++	return status;
+ }
+ 
+ /* 
+@@ -4827,6 +4911,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
+ 		return status;
+ 	oo = openowner(stp->st_stateowner);
+ 	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
++		up_write(&stp->st_rwsem);
+ 		nfs4_put_stid(&stp->st_stid);
+ 		return nfserr_bad_stateid;
+ 	}
+@@ -4857,11 +4942,14 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		goto out;
+ 	oo = openowner(stp->st_stateowner);
+ 	status = nfserr_bad_stateid;
+-	if (oo->oo_flags & NFS4_OO_CONFIRMED)
++	if (oo->oo_flags & NFS4_OO_CONFIRMED) {
++		up_write(&stp->st_rwsem);
+ 		goto put_stateid;
++	}
+ 	oo->oo_flags |= NFS4_OO_CONFIRMED;
+ 	update_stateid(&stp->st_stid.sc_stateid);
+ 	memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
++	up_write(&stp->st_rwsem);
+ 	dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
+ 		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
+ 
+@@ -4940,6 +5028,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
+ 	memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
+ 	status = nfs_ok;
+ put_stateid:
++	up_write(&stp->st_rwsem);
+ 	nfs4_put_stid(&stp->st_stid);
+ out:
+ 	nfsd4_bump_seqid(cstate, status);
+@@ -4993,6 +5082,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		goto out; 
+ 	update_stateid(&stp->st_stid.sc_stateid);
+ 	memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
++	up_write(&stp->st_rwsem);
+ 
+ 	nfsd4_close_open_stateid(stp);
+ 
+@@ -5223,6 +5313,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
+ 	stp->st_access_bmap = 0;
+ 	stp->st_deny_bmap = open_stp->st_deny_bmap;
+ 	stp->st_openstp = open_stp;
++	init_rwsem(&stp->st_rwsem);
+ 	list_add(&stp->st_locks, &open_stp->st_locks);
+ 	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
+ 	spin_lock(&fp->fi_lock);
+@@ -5391,6 +5482,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 					&open_stp, nn);
+ 		if (status)
+ 			goto out;
++		up_write(&open_stp->st_rwsem);
+ 		open_sop = openowner(open_stp->st_stateowner);
+ 		status = nfserr_bad_stateid;
+ 		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
+@@ -5398,6 +5490,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 			goto out;
+ 		status = lookup_or_create_lock_state(cstate, open_stp, lock,
+ 							&lock_stp, &new);
++		if (status == nfs_ok)
++			down_write(&lock_stp->st_rwsem);
+ 	} else {
+ 		status = nfs4_preprocess_seqid_op(cstate,
+ 				       lock->lk_old_lock_seqid,
+@@ -5503,6 +5597,8 @@ out:
+ 		    seqid_mutating_err(ntohl(status)))
+ 			lock_sop->lo_owner.so_seqid++;
+ 
++		up_write(&lock_stp->st_rwsem);
++
+ 		/*
+ 		 * If this is a new, never-before-used stateid, and we are
+ 		 * returning an error, then just go ahead and release it.
+@@ -5673,6 +5769,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ fput:
+ 	fput(filp);
+ put_stateid:
++	up_write(&stp->st_rwsem);
+ 	nfs4_put_stid(&stp->st_stid);
+ out:
+ 	nfsd4_bump_seqid(cstate, status);
+diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
+index dbc4f85a5008..67685b6cfef3 100644
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -533,15 +533,16 @@ struct nfs4_file {
+  * Better suggestions welcome.
+  */
+ struct nfs4_ol_stateid {
+-	struct nfs4_stid    st_stid; /* must be first field */
+-	struct list_head              st_perfile;
+-	struct list_head              st_perstateowner;
+-	struct list_head              st_locks;
+-	struct nfs4_stateowner      * st_stateowner;
+-	struct nfs4_clnt_odstate    * st_clnt_odstate;
+-	unsigned char                 st_access_bmap;
+-	unsigned char                 st_deny_bmap;
+-	struct nfs4_ol_stateid         * st_openstp;
++	struct nfs4_stid		st_stid;
++	struct list_head		st_perfile;
++	struct list_head		st_perstateowner;
++	struct list_head		st_locks;
++	struct nfs4_stateowner		*st_stateowner;
++	struct nfs4_clnt_odstate	*st_clnt_odstate;
++	unsigned char			st_access_bmap;
++	unsigned char			st_deny_bmap;
++	struct nfs4_ol_stateid		*st_openstp;
++	struct rw_semaphore		st_rwsem;
+ };
+ 
+ static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
+diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
+index 176fe6afd94e..4d5e0a573f4f 100644
+--- a/fs/ocfs2/namei.c
++++ b/fs/ocfs2/namei.c
+@@ -365,6 +365,8 @@ static int ocfs2_mknod(struct inode *dir,
+ 		mlog_errno(status);
+ 		goto leave;
+ 	}
++	/* update inode->i_mode after mask with "umask". */
++	inode->i_mode = mode;
+ 
+ 	handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
+ 							    S_ISDIR(mode),
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index 82806c60aa42..e4b464983322 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -224,7 +224,7 @@ struct ipv6_pinfo {
+ 	struct ipv6_ac_socklist	*ipv6_ac_list;
+ 	struct ipv6_fl_socklist __rcu *ipv6_fl_list;
+ 
+-	struct ipv6_txoptions	*opt;
++	struct ipv6_txoptions __rcu	*opt;
+ 	struct sk_buff		*pktoptions;
+ 	struct sk_buff		*rxpmtu;
+ 	struct inet6_cork	cork;
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index eb1cebed3f36..c90c9b70e568 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1007,6 +1007,7 @@ struct journal_s
+ #define JBD2_ABORT_ON_SYNCDATA_ERR	0x040	/* Abort the journal on file
+ 						 * data write error in ordered
+ 						 * mode */
++#define JBD2_REC_ERR	0x080	/* The errno in the sb has been recorded */
+ 
+ /*
+  * Function declarations for the journaling transaction and buffer
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index dfe4ddfbb43c..e830c3dff61a 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -63,6 +63,7 @@ struct unix_sock {
+ #define UNIX_GC_CANDIDATE	0
+ #define UNIX_GC_MAYBE_CYCLE	1
+ 	struct socket_wq	peer_wq;
++	wait_queue_t		peer_wake;
+ };
+ 
+ static inline struct unix_sock *unix_sk(struct sock *sk)
+diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
+index b8529aa1dae7..b0f7445c0fdc 100644
+--- a/include/net/ip6_tunnel.h
++++ b/include/net/ip6_tunnel.h
+@@ -83,11 +83,12 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb,
+ 	err = ip6_local_out_sk(sk, skb);
+ 
+ 	if (net_xmit_eval(err) == 0) {
+-		struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
++		struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
+ 		u64_stats_update_begin(&tstats->syncp);
+ 		tstats->tx_bytes += pkt_len;
+ 		tstats->tx_packets++;
+ 		u64_stats_update_end(&tstats->syncp);
++		put_cpu_ptr(tstats);
+ 	} else {
+ 		stats->tx_errors++;
+ 		stats->tx_aborted_errors++;
+diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
+index d8214cb88bbc..9c2897e56ee1 100644
+--- a/include/net/ip_tunnels.h
++++ b/include/net/ip_tunnels.h
+@@ -207,12 +207,13 @@ static inline void iptunnel_xmit_stats(int err,
+ 				       struct pcpu_sw_netstats __percpu *stats)
+ {
+ 	if (err > 0) {
+-		struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
++		struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
+ 
+ 		u64_stats_update_begin(&tstats->syncp);
+ 		tstats->tx_bytes += err;
+ 		tstats->tx_packets++;
+ 		u64_stats_update_end(&tstats->syncp);
++		put_cpu_ptr(tstats);
+ 	} else if (err < 0) {
+ 		err_stats->tx_errors++;
+ 		err_stats->tx_aborted_errors++;
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index eec8ad3c9843..df555ecd4002 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -205,6 +205,7 @@ extern rwlock_t ip6_ra_lock;
+  */
+ 
+ struct ipv6_txoptions {
++	atomic_t		refcnt;
+ 	/* Length of this structure */
+ 	int			tot_len;
+ 
+@@ -217,7 +218,7 @@ struct ipv6_txoptions {
+ 	struct ipv6_opt_hdr	*dst0opt;
+ 	struct ipv6_rt_hdr	*srcrt;	/* Routing Header */
+ 	struct ipv6_opt_hdr	*dst1opt;
+-
++	struct rcu_head		rcu;
+ 	/* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */
+ };
+ 
+@@ -250,6 +251,24 @@ struct ipv6_fl_socklist {
+ 	struct rcu_head			rcu;
+ };
+ 
++static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
++{
++	struct ipv6_txoptions *opt;
++
++	rcu_read_lock();
++	opt = rcu_dereference(np->opt);
++	if (opt && !atomic_inc_not_zero(&opt->refcnt))
++		opt = NULL;
++	rcu_read_unlock();
++	return opt;
++}
++
++static inline void txopt_put(struct ipv6_txoptions *opt)
++{
++	if (opt && atomic_dec_and_test(&opt->refcnt))
++		kfree_rcu(opt, rcu);
++}
++
+ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
+ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
+ 					 struct ip6_flowlabel *fl,
+@@ -488,6 +507,7 @@ struct ip6_create_arg {
+ 	u32 user;
+ 	const struct in6_addr *src;
+ 	const struct in6_addr *dst;
++	int iif;
+ 	u8 ecn;
+ };
+ 
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 6d778efcfdfd..080b657ef8fb 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -61,6 +61,9 @@ struct Qdisc {
+ 				      */
+ #define TCQ_F_WARN_NONWC	(1 << 16)
+ #define TCQ_F_CPUSTATS		0x20 /* run using percpu statistics */
++#define TCQ_F_NOPARENT		0x40 /* root of its hierarchy :
++				      * qdisc_tree_decrease_qlen() should stop.
++				      */
+ 	u32			limit;
+ 	const struct Qdisc_ops	*ops;
+ 	struct qdisc_size_table	__rcu *stab;
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 8a6616583f38..1c1b8ab34037 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -109,7 +109,7 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
+ 		/* all elements already exist */
+ 		return -EEXIST;
+ 
+-	memcpy(array->value + array->elem_size * index, value, array->elem_size);
++	memcpy(array->value + array->elem_size * index, value, map->value_size);
+ 	return 0;
+ }
+ 
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 2237c1b3cdd2..d6e8cfcb6f7c 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2207,7 +2207,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
+ 	ndm->ndm_pad2    = 0;
+ 	ndm->ndm_flags	 = pn->flags | NTF_PROXY;
+ 	ndm->ndm_type	 = RTN_UNICAST;
+-	ndm->ndm_ifindex = pn->dev->ifindex;
++	ndm->ndm_ifindex = pn->dev ? pn->dev->ifindex : 0;
+ 	ndm->ndm_state	 = NUD_NONE;
+ 
+ 	if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
+@@ -2282,7 +2282,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
+ 		if (h > s_h)
+ 			s_idx = 0;
+ 		for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
+-			if (dev_net(n->dev) != net)
++			if (pneigh_net(n) != net)
+ 				continue;
+ 			if (idx < s_idx)
+ 				goto next;
+diff --git a/net/core/scm.c b/net/core/scm.c
+index 3b6899b7d810..8a1741b14302 100644
+--- a/net/core/scm.c
++++ b/net/core/scm.c
+@@ -305,6 +305,8 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
+ 			err = put_user(cmlen, &cm->cmsg_len);
+ 		if (!err) {
+ 			cmlen = CMSG_SPACE(i*sizeof(int));
++			if (msg->msg_controllen < cmlen)
++				cmlen = msg->msg_controllen;
+ 			msg->msg_control += cmlen;
+ 			msg->msg_controllen -= cmlen;
+ 		}
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index 5165571f397a..a0490508d213 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -202,7 +202,9 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
+ 	security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+ 
+ 
+-	final_p = fl6_update_dst(&fl6, np->opt, &final);
++	rcu_read_lock();
++	final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
++	rcu_read_unlock();
+ 
+ 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ 	if (IS_ERR(dst)) {
+@@ -219,7 +221,10 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
+ 							 &ireq->ir_v6_loc_addr,
+ 							 &ireq->ir_v6_rmt_addr);
+ 		fl6.daddr = ireq->ir_v6_rmt_addr;
+-		err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
++		rcu_read_lock();
++		err = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
++			       np->tclass);
++		rcu_read_unlock();
+ 		err = net_xmit_eval(err);
+ 	}
+ 
+@@ -415,6 +420,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+ {
+ 	struct inet_request_sock *ireq = inet_rsk(req);
+ 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
++	struct ipv6_txoptions *opt;
+ 	struct inet_sock *newinet;
+ 	struct dccp6_sock *newdp6;
+ 	struct sock *newsk;
+@@ -534,13 +540,15 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
+ 	 * Yes, keeping reference count would be much more clever, but we make
+ 	 * one more one thing there: reattach optmem to newsk.
+ 	 */
+-	if (np->opt != NULL)
+-		newnp->opt = ipv6_dup_options(newsk, np->opt);
+-
++	opt = rcu_dereference(np->opt);
++	if (opt) {
++		opt = ipv6_dup_options(newsk, opt);
++		RCU_INIT_POINTER(newnp->opt, opt);
++	}
+ 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
+-	if (newnp->opt != NULL)
+-		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
+-						     newnp->opt->opt_flen);
++	if (opt)
++		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
++						    opt->opt_flen;
+ 
+ 	dccp_sync_mss(newsk, dst_mtu(dst));
+ 
+@@ -793,6 +801,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	struct ipv6_pinfo *np = inet6_sk(sk);
+ 	struct dccp_sock *dp = dccp_sk(sk);
+ 	struct in6_addr *saddr = NULL, *final_p, final;
++	struct ipv6_txoptions *opt;
+ 	struct flowi6 fl6;
+ 	struct dst_entry *dst;
+ 	int addr_type;
+@@ -892,7 +901,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	fl6.fl6_sport = inet->inet_sport;
+ 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ 
+-	final_p = fl6_update_dst(&fl6, np->opt, &final);
++	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++	final_p = fl6_update_dst(&fl6, opt, &final);
+ 
+ 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ 	if (IS_ERR(dst)) {
+@@ -912,9 +922,8 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	__ip6_dst_store(sk, dst, NULL, NULL);
+ 
+ 	icsk->icsk_ext_hdr_len = 0;
+-	if (np->opt != NULL)
+-		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
+-					  np->opt->opt_nflen);
++	if (opt)
++		icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen;
+ 
+ 	inet->inet_dport = usin->sin6_port;
+ 
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index df28693f32e1..c3bfebd501ed 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -134,7 +134,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
+ 			      struct mfc_cache *c, struct rtmsg *rtm);
+ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
+ 				 int cmd);
+-static void mroute_clean_tables(struct mr_table *mrt);
++static void mroute_clean_tables(struct mr_table *mrt, bool all);
+ static void ipmr_expire_process(unsigned long arg);
+ 
+ #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
+@@ -351,7 +351,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
+ static void ipmr_free_table(struct mr_table *mrt)
+ {
+ 	del_timer_sync(&mrt->ipmr_expire_timer);
+-	mroute_clean_tables(mrt);
++	mroute_clean_tables(mrt, true);
+ 	kfree(mrt);
+ }
+ 
+@@ -1209,7 +1209,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
+  *	Close the multicast socket, and clear the vif tables etc
+  */
+ 
+-static void mroute_clean_tables(struct mr_table *mrt)
++static void mroute_clean_tables(struct mr_table *mrt, bool all)
+ {
+ 	int i;
+ 	LIST_HEAD(list);
+@@ -1218,8 +1218,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
+ 	/* Shut down all active vif entries */
+ 
+ 	for (i = 0; i < mrt->maxvif; i++) {
+-		if (!(mrt->vif_table[i].flags & VIFF_STATIC))
+-			vif_delete(mrt, i, 0, &list);
++		if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
++			continue;
++		vif_delete(mrt, i, 0, &list);
+ 	}
+ 	unregister_netdevice_many(&list);
+ 
+@@ -1227,7 +1228,7 @@ static void mroute_clean_tables(struct mr_table *mrt)
+ 
+ 	for (i = 0; i < MFC_LINES; i++) {
+ 		list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
+-			if (c->mfc_flags & MFC_STATIC)
++			if (!all && (c->mfc_flags & MFC_STATIC))
+ 				continue;
+ 			list_del_rcu(&c->list);
+ 			mroute_netlink_event(mrt, c, RTM_DELROUTE);
+@@ -1262,7 +1263,7 @@ static void mrtsock_destruct(struct sock *sk)
+ 						    NETCONFA_IFINDEX_ALL,
+ 						    net->ipv4.devconf_all);
+ 			RCU_INIT_POINTER(mrt->mroute_sk, NULL);
+-			mroute_clean_tables(mrt);
++			mroute_clean_tables(mrt, false);
+ 		}
+ 	}
+ 	rtnl_unlock();
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index c9ab964189a0..87463c814896 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -4438,19 +4438,34 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
+ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
+ {
+ 	struct sk_buff *skb;
++	int err = -ENOMEM;
++	int data_len = 0;
+ 	bool fragstolen;
+ 
+ 	if (size == 0)
+ 		return 0;
+ 
+-	skb = alloc_skb(size, sk->sk_allocation);
++	if (size > PAGE_SIZE) {
++		int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
++
++		data_len = npages << PAGE_SHIFT;
++		size = data_len + (size & ~PAGE_MASK);
++	}
++	skb = alloc_skb_with_frags(size - data_len, data_len,
++				   PAGE_ALLOC_COSTLY_ORDER,
++				   &err, sk->sk_allocation);
+ 	if (!skb)
+ 		goto err;
+ 
++	skb_put(skb, size - data_len);
++	skb->data_len = data_len;
++	skb->len = size;
++
+ 	if (tcp_try_rmem_schedule(sk, skb, skb->truesize))
+ 		goto err_free;
+ 
+-	if (memcpy_from_msg(skb_put(skb, size), msg, size))
++	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
++	if (err)
+ 		goto err_free;
+ 
+ 	TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
+@@ -4466,7 +4481,8 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
+ err_free:
+ 	kfree_skb(skb);
+ err:
+-	return -ENOMEM;
++	return err;
++
+ }
+ 
+ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
+@@ -5622,6 +5638,7 @@ discard:
+ 		}
+ 
+ 		tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
++		tp->copied_seq = tp->rcv_nxt;
+ 		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
+ 
+ 		/* RFC1323: The window in SYN & SYN/ACK segments is
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 441ca6f38981..88203e755af8 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -922,7 +922,8 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
+ 	}
+ 
+ 	md5sig = rcu_dereference_protected(tp->md5sig_info,
+-					   sock_owned_by_user(sk));
++					   sock_owned_by_user(sk) ||
++					   lockdep_is_held(&sk->sk_lock.slock));
+ 	if (!md5sig) {
+ 		md5sig = kmalloc(sizeof(*md5sig), gfp);
+ 		if (!md5sig)
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 8c65dc147d8b..c8f97858d6f6 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -176,6 +176,18 @@ static int tcp_write_timeout(struct sock *sk)
+ 		syn_set = true;
+ 	} else {
+ 		if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
++			/* Some middle-boxes may black-hole Fast Open _after_
++			 * the handshake. Therefore we conservatively disable
++			 * Fast Open on this path on recurring timeouts with
++			 * few or zero bytes acked after Fast Open.
++			 */
++			if (tp->syn_data_acked &&
++			    tp->bytes_acked <= tp->rx_opt.mss_clamp) {
++				tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
++				if (icsk->icsk_retransmits == sysctl_tcp_retries1)
++					NET_INC_STATS_BH(sock_net(sk),
++							 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
++			}
+ 			/* Black hole detection */
+ 			tcp_mtu_probing(icsk, sk);
+ 
+diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
+index eef63b394c5a..2d044d2a2ccf 100644
+--- a/net/ipv6/af_inet6.c
++++ b/net/ipv6/af_inet6.c
+@@ -425,9 +425,11 @@ void inet6_destroy_sock(struct sock *sk)
+ 
+ 	/* Free tx options */
+ 
+-	opt = xchg(&np->opt, NULL);
+-	if (opt)
+-		sock_kfree_s(sk, opt, opt->tot_len);
++	opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL);
++	if (opt) {
++		atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++		txopt_put(opt);
++	}
+ }
+ EXPORT_SYMBOL_GPL(inet6_destroy_sock);
+ 
+@@ -656,7 +658,10 @@ int inet6_sk_rebuild_header(struct sock *sk)
+ 		fl6.fl6_sport = inet->inet_sport;
+ 		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ 
+-		final_p = fl6_update_dst(&fl6, np->opt, &final);
++		rcu_read_lock();
++		final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
++					 &final);
++		rcu_read_unlock();
+ 
+ 		dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ 		if (IS_ERR(dst)) {
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index b10a88986a98..13ca4cf5616f 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -167,8 +167,10 @@ ipv4_connected:
+ 
+ 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ 
+-	opt = flowlabel ? flowlabel->opt : np->opt;
++	rcu_read_lock();
++	opt = flowlabel ? flowlabel->opt : rcu_dereference(np->opt);
+ 	final_p = fl6_update_dst(&fl6, opt, &final);
++	rcu_read_unlock();
+ 
+ 	dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
+ 	err = 0;
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index a7bbbe45570b..adbd6958c398 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -727,6 +727,7 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
+ 			*((char **)&opt2->dst1opt) += dif;
+ 		if (opt2->srcrt)
+ 			*((char **)&opt2->srcrt) += dif;
++		atomic_set(&opt2->refcnt, 1);
+ 	}
+ 	return opt2;
+ }
+@@ -790,7 +791,7 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
+ 		return ERR_PTR(-ENOBUFS);
+ 
+ 	memset(opt2, 0, tot_len);
+-
++	atomic_set(&opt2->refcnt, 1);
+ 	opt2->tot_len = tot_len;
+ 	p = (char *)(opt2 + 1);
+ 
+diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
+index 6927f3fb5597..9beed302eb36 100644
+--- a/net/ipv6/inet6_connection_sock.c
++++ b/net/ipv6/inet6_connection_sock.c
+@@ -77,7 +77,9 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
+ 	memset(fl6, 0, sizeof(*fl6));
+ 	fl6->flowi6_proto = IPPROTO_TCP;
+ 	fl6->daddr = ireq->ir_v6_rmt_addr;
+-	final_p = fl6_update_dst(fl6, np->opt, &final);
++	rcu_read_lock();
++	final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
++	rcu_read_unlock();
+ 	fl6->saddr = ireq->ir_v6_loc_addr;
+ 	fl6->flowi6_oif = ireq->ir_iif;
+ 	fl6->flowi6_mark = ireq->ir_mark;
+@@ -207,7 +209,9 @@ static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
+ 	fl6->fl6_dport = inet->inet_dport;
+ 	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+ 
+-	final_p = fl6_update_dst(fl6, np->opt, &final);
++	rcu_read_lock();
++	final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
++	rcu_read_unlock();
+ 
+ 	dst = __inet6_csk_dst_check(sk, np->dst_cookie);
+ 	if (!dst) {
+@@ -240,7 +244,8 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
+ 	/* Restore final destination back after routing done */
+ 	fl6.daddr = sk->sk_v6_daddr;
+ 
+-	res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
++	res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
++		       np->tclass);
+ 	rcu_read_unlock();
+ 	return res;
+ }
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 5f36266b1f5e..a7aef4b52d65 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -118,7 +118,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
+ 			      int cmd);
+ static int ip6mr_rtm_dumproute(struct sk_buff *skb,
+ 			       struct netlink_callback *cb);
+-static void mroute_clean_tables(struct mr6_table *mrt);
++static void mroute_clean_tables(struct mr6_table *mrt, bool all);
+ static void ipmr_expire_process(unsigned long arg);
+ 
+ #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
+@@ -335,7 +335,7 @@ static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
+ static void ip6mr_free_table(struct mr6_table *mrt)
+ {
+ 	del_timer_sync(&mrt->ipmr_expire_timer);
+-	mroute_clean_tables(mrt);
++	mroute_clean_tables(mrt, true);
+ 	kfree(mrt);
+ }
+ 
+@@ -1543,7 +1543,7 @@ static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
+  *	Close the multicast socket, and clear the vif tables etc
+  */
+ 
+-static void mroute_clean_tables(struct mr6_table *mrt)
++static void mroute_clean_tables(struct mr6_table *mrt, bool all)
+ {
+ 	int i;
+ 	LIST_HEAD(list);
+@@ -1553,8 +1553,9 @@ static void mroute_clean_tables(struct mr6_table *mrt)
+ 	 *	Shut down all active vif entries
+ 	 */
+ 	for (i = 0; i < mrt->maxvif; i++) {
+-		if (!(mrt->vif6_table[i].flags & VIFF_STATIC))
+-			mif6_delete(mrt, i, &list);
++		if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
++			continue;
++		mif6_delete(mrt, i, &list);
+ 	}
+ 	unregister_netdevice_many(&list);
+ 
+@@ -1563,7 +1564,7 @@ static void mroute_clean_tables(struct mr6_table *mrt)
+ 	 */
+ 	for (i = 0; i < MFC6_LINES; i++) {
+ 		list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
+-			if (c->mfc_flags & MFC_STATIC)
++			if (!all && (c->mfc_flags & MFC_STATIC))
+ 				continue;
+ 			write_lock_bh(&mrt_lock);
+ 			list_del(&c->list);
+@@ -1626,7 +1627,7 @@ int ip6mr_sk_done(struct sock *sk)
+ 						     net->ipv6.devconf_all);
+ 			write_unlock_bh(&mrt_lock);
+ 
+-			mroute_clean_tables(mrt);
++			mroute_clean_tables(mrt, false);
+ 			err = 0;
+ 			break;
+ 		}
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 63e6956917c9..4449ad1f8114 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -111,7 +111,8 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
+ 			icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
+ 		}
+ 	}
+-	opt = xchg(&inet6_sk(sk)->opt, opt);
++	opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt,
++		   opt);
+ 	sk_dst_reset(sk);
+ 
+ 	return opt;
+@@ -231,9 +232,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ 				sk->sk_socket->ops = &inet_dgram_ops;
+ 				sk->sk_family = PF_INET;
+ 			}
+-			opt = xchg(&np->opt, NULL);
+-			if (opt)
+-				sock_kfree_s(sk, opt, opt->tot_len);
++			opt = xchg((__force struct ipv6_txoptions **)&np->opt,
++				   NULL);
++			if (opt) {
++				atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++				txopt_put(opt);
++			}
+ 			pktopt = xchg(&np->pktoptions, NULL);
+ 			kfree_skb(pktopt);
+ 
+@@ -403,7 +407,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ 		if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
+ 			break;
+ 
+-		opt = ipv6_renew_options(sk, np->opt, optname,
++		opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++		opt = ipv6_renew_options(sk, opt, optname,
+ 					 (struct ipv6_opt_hdr __user *)optval,
+ 					 optlen);
+ 		if (IS_ERR(opt)) {
+@@ -432,8 +437,10 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ 		retv = 0;
+ 		opt = ipv6_update_options(sk, opt);
+ sticky_done:
+-		if (opt)
+-			sock_kfree_s(sk, opt, opt->tot_len);
++		if (opt) {
++			atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++			txopt_put(opt);
++		}
+ 		break;
+ 	}
+ 
+@@ -486,6 +493,7 @@ sticky_done:
+ 			break;
+ 
+ 		memset(opt, 0, sizeof(*opt));
++		atomic_set(&opt->refcnt, 1);
+ 		opt->tot_len = sizeof(*opt) + optlen;
+ 		retv = -EFAULT;
+ 		if (copy_from_user(opt+1, optval, optlen))
+@@ -502,8 +510,10 @@ update:
+ 		retv = 0;
+ 		opt = ipv6_update_options(sk, opt);
+ done:
+-		if (opt)
+-			sock_kfree_s(sk, opt, opt->tot_len);
++		if (opt) {
++			atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
++			txopt_put(opt);
++		}
+ 		break;
+ 	}
+ 	case IPV6_UNICAST_HOPS:
+@@ -1110,10 +1120,11 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
+ 	case IPV6_RTHDR:
+ 	case IPV6_DSTOPTS:
+ 	{
++		struct ipv6_txoptions *opt;
+ 
+ 		lock_sock(sk);
+-		len = ipv6_getsockopt_sticky(sk, np->opt,
+-					     optname, optval, len);
++		opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++		len = ipv6_getsockopt_sticky(sk, opt, optname, optval, len);
+ 		release_sock(sk);
+ 		/* check if ipv6_getsockopt_sticky() returns err code */
+ 		if (len < 0)
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 083b2927fc67..41e3b5ee8d0b 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -1651,7 +1651,6 @@ out:
+ 	if (!err) {
+ 		ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
+ 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+-		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
+ 	} else {
+ 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ 	}
+@@ -2014,7 +2013,6 @@ out:
+ 	if (!err) {
+ 		ICMP6MSGOUT_INC_STATS(net, idev, type);
+ 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
+-		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
+ 	} else
+ 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
+ 
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 6f187c8d8a1b..d235ed7f47ab 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -190,7 +190,7 @@ static void nf_ct_frag6_expire(unsigned long data)
+ /* Creation primitives. */
+ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
+ 					 u32 user, struct in6_addr *src,
+-					 struct in6_addr *dst, u8 ecn)
++					 struct in6_addr *dst, int iif, u8 ecn)
+ {
+ 	struct inet_frag_queue *q;
+ 	struct ip6_create_arg arg;
+@@ -200,6 +200,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
+ 	arg.user = user;
+ 	arg.src = src;
+ 	arg.dst = dst;
++	arg.iif = iif;
+ 	arg.ecn = ecn;
+ 
+ 	local_bh_disable();
+@@ -603,7 +604,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
+ 	fhdr = (struct frag_hdr *)skb_transport_header(clone);
+ 
+ 	fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
+-		     ip6_frag_ecn(hdr));
++		     skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ 	if (fq == NULL) {
+ 		pr_debug("Can't find and can't create new queue\n");
+ 		goto ret_orig;
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 8072bd4139b7..2c639aee12cb 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -731,6 +731,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
+ 
+ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ {
++	struct ipv6_txoptions *opt_to_free = NULL;
+ 	struct ipv6_txoptions opt_space;
+ 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
+ 	struct in6_addr *daddr, *final_p, final;
+@@ -837,8 +838,10 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 		if (!(opt->opt_nflen|opt->opt_flen))
+ 			opt = NULL;
+ 	}
+-	if (!opt)
+-		opt = np->opt;
++	if (!opt) {
++		opt = txopt_get(np);
++		opt_to_free = opt;
++		}
+ 	if (flowlabel)
+ 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ 	opt = ipv6_fixup_options(&opt_space, opt);
+@@ -901,6 +904,7 @@ done:
+ 	dst_release(dst);
+ out:
+ 	fl6_sock_release(flowlabel);
++	txopt_put(opt_to_free);
+ 	return err < 0 ? err : len;
+ do_confirm:
+ 	dst_confirm(dst);
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 8ffa2c8cce77..9d1f6a28b284 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -108,7 +108,10 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
+ 	return	fq->id == arg->id &&
+ 		fq->user == arg->user &&
+ 		ipv6_addr_equal(&fq->saddr, arg->src) &&
+-		ipv6_addr_equal(&fq->daddr, arg->dst);
++		ipv6_addr_equal(&fq->daddr, arg->dst) &&
++		(arg->iif == fq->iif ||
++		 !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
++					       IPV6_ADDR_LINKLOCAL)));
+ }
+ EXPORT_SYMBOL(ip6_frag_match);
+ 
+@@ -180,7 +183,7 @@ static void ip6_frag_expire(unsigned long data)
+ 
+ static struct frag_queue *
+ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
+-	const struct in6_addr *dst, u8 ecn)
++	const struct in6_addr *dst, int iif, u8 ecn)
+ {
+ 	struct inet_frag_queue *q;
+ 	struct ip6_create_arg arg;
+@@ -190,6 +193,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
+ 	arg.user = IP6_DEFRAG_LOCAL_DELIVER;
+ 	arg.src = src;
+ 	arg.dst = dst;
++	arg.iif = iif;
+ 	arg.ecn = ecn;
+ 
+ 	hash = inet6_hash_frag(id, src, dst);
+@@ -551,7 +555,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ 	}
+ 
+ 	fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
+-		     ip6_frag_ecn(hdr));
++		     skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ 	if (fq) {
+ 		int ret;
+ 
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 21bc2eb53c57..a4cf004f44d0 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -242,7 +242,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
+ 		memset(&fl6, 0, sizeof(fl6));
+ 		fl6.flowi6_proto = IPPROTO_TCP;
+ 		fl6.daddr = ireq->ir_v6_rmt_addr;
+-		final_p = fl6_update_dst(&fl6, np->opt, &final);
++		final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt), &final);
+ 		fl6.saddr = ireq->ir_v6_loc_addr;
+ 		fl6.flowi6_oif = sk->sk_bound_dev_if;
+ 		fl6.flowi6_mark = ireq->ir_mark;
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index e541d68dba8b..cfb27f56c62f 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -121,6 +121,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	struct ipv6_pinfo *np = inet6_sk(sk);
+ 	struct tcp_sock *tp = tcp_sk(sk);
+ 	struct in6_addr *saddr = NULL, *final_p, final;
++	struct ipv6_txoptions *opt;
+ 	struct rt6_info *rt;
+ 	struct flowi6 fl6;
+ 	struct dst_entry *dst;
+@@ -237,7 +238,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 	fl6.fl6_dport = usin->sin6_port;
+ 	fl6.fl6_sport = inet->inet_sport;
+ 
+-	final_p = fl6_update_dst(&fl6, np->opt, &final);
++	opt = rcu_dereference_protected(np->opt, sock_owned_by_user(sk));
++	final_p = fl6_update_dst(&fl6, opt, &final);
+ 
+ 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ 
+@@ -266,9 +268,9 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ 		tcp_fetch_timewait_stamp(sk, dst);
+ 
+ 	icsk->icsk_ext_hdr_len = 0;
+-	if (np->opt)
+-		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
+-					  np->opt->opt_nflen);
++	if (opt)
++		icsk->icsk_ext_hdr_len = opt->opt_flen +
++					 opt->opt_nflen;
+ 
+ 	tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
+ 
+@@ -464,7 +466,8 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
+ 			fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
+ 
+ 		skb_set_queue_mapping(skb, queue_mapping);
+-		err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
++		err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
++			       np->tclass);
+ 		err = net_xmit_eval(err);
+ 	}
+ 
+@@ -994,6 +997,7 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ 	struct inet_request_sock *ireq;
+ 	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
+ 	struct tcp6_sock *newtcp6sk;
++	struct ipv6_txoptions *opt;
+ 	struct inet_sock *newinet;
+ 	struct tcp_sock *newtp;
+ 	struct sock *newsk;
+@@ -1129,13 +1133,15 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
+ 	   but we make one more one thing there: reattach optmem
+ 	   to newsk.
+ 	 */
+-	if (np->opt)
+-		newnp->opt = ipv6_dup_options(newsk, np->opt);
+-
++	opt = rcu_dereference(np->opt);
++	if (opt) {
++		opt = ipv6_dup_options(newsk, opt);
++		RCU_INIT_POINTER(newnp->opt, opt);
++	}
+ 	inet_csk(newsk)->icsk_ext_hdr_len = 0;
+-	if (newnp->opt)
+-		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
+-						     newnp->opt->opt_flen);
++	if (opt)
++		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
++						    opt->opt_flen;
+ 
+ 	tcp_ca_openreq_child(newsk, dst);
+ 
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index e51fc3eee6db..7333f3575fc5 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -1107,6 +1107,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
+ 	struct in6_addr *daddr, *final_p, final;
+ 	struct ipv6_txoptions *opt = NULL;
++	struct ipv6_txoptions *opt_to_free = NULL;
+ 	struct ip6_flowlabel *flowlabel = NULL;
+ 	struct flowi6 fl6;
+ 	struct dst_entry *dst;
+@@ -1260,8 +1261,10 @@ do_udp_sendmsg:
+ 			opt = NULL;
+ 		connected = 0;
+ 	}
+-	if (!opt)
+-		opt = np->opt;
++	if (!opt) {
++		opt = txopt_get(np);
++		opt_to_free = opt;
++	}
+ 	if (flowlabel)
+ 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ 	opt = ipv6_fixup_options(&opt_space, opt);
+@@ -1370,6 +1373,7 @@ release_dst:
+ out:
+ 	dst_release(dst);
+ 	fl6_sock_release(flowlabel);
++	txopt_put(opt_to_free);
+ 	if (!err)
+ 		return len;
+ 	/*
+diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
+index d1ded3777815..0ce9da948ad7 100644
+--- a/net/l2tp/l2tp_ip6.c
++++ b/net/l2tp/l2tp_ip6.c
+@@ -486,6 +486,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 	DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name);
+ 	struct in6_addr *daddr, *final_p, final;
+ 	struct ipv6_pinfo *np = inet6_sk(sk);
++	struct ipv6_txoptions *opt_to_free = NULL;
+ 	struct ipv6_txoptions *opt = NULL;
+ 	struct ip6_flowlabel *flowlabel = NULL;
+ 	struct dst_entry *dst = NULL;
+@@ -575,8 +576,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ 			opt = NULL;
+ 	}
+ 
+-	if (opt == NULL)
+-		opt = np->opt;
++	if (!opt) {
++		opt = txopt_get(np);
++		opt_to_free = opt;
++	}
+ 	if (flowlabel)
+ 		opt = fl6_merge_options(&opt_space, flowlabel, opt);
+ 	opt = ipv6_fixup_options(&opt_space, opt);
+@@ -631,6 +634,7 @@ done:
+ 	dst_release(dst);
+ out:
+ 	fl6_sock_release(flowlabel);
++	txopt_put(opt_to_free);
+ 
+ 	return err < 0 ? err : len;
+ 
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 686e60187401..ebc39e66d704 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1524,6 +1524,20 @@ static void fanout_release(struct sock *sk)
+ 	mutex_unlock(&fanout_mutex);
+ }
+ 
++static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
++					  struct sk_buff *skb)
++{
++	/* Earlier code assumed this would be a VLAN pkt, double-check
++	 * this now that we have the actual packet in hand. We can only
++	 * do this check on Ethernet devices.
++	 */
++	if (unlikely(dev->type != ARPHRD_ETHER))
++		return false;
++
++	skb_reset_mac_header(skb);
++	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
++}
++
+ static const struct proto_ops packet_ops;
+ 
+ static const struct proto_ops packet_ops_spkt;
+@@ -1685,18 +1699,10 @@ retry:
+ 		goto retry;
+ 	}
+ 
+-	if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
+-		/* Earlier code assumed this would be a VLAN pkt,
+-		 * double-check this now that we have the actual
+-		 * packet in hand.
+-		 */
+-		struct ethhdr *ehdr;
+-		skb_reset_mac_header(skb);
+-		ehdr = eth_hdr(skb);
+-		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+-			err = -EMSGSIZE;
+-			goto out_unlock;
+-		}
++	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
++	    !packet_extra_vlan_len_allowed(dev, skb)) {
++		err = -EMSGSIZE;
++		goto out_unlock;
+ 	}
+ 
+ 	skb->protocol = proto;
+@@ -2115,6 +2121,15 @@ static bool ll_header_truncated(const struct net_device *dev, int len)
+ 	return false;
+ }
+ 
++static void tpacket_set_protocol(const struct net_device *dev,
++				 struct sk_buff *skb)
++{
++	if (dev->type == ARPHRD_ETHER) {
++		skb_reset_mac_header(skb);
++		skb->protocol = eth_hdr(skb)->h_proto;
++	}
++}
++
+ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 		void *frame, struct net_device *dev, int size_max,
+ 		__be16 proto, unsigned char *addr, int hlen)
+@@ -2151,8 +2166,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 	skb_reserve(skb, hlen);
+ 	skb_reset_network_header(skb);
+ 
+-	if (!packet_use_direct_xmit(po))
+-		skb_probe_transport_header(skb, 0);
+ 	if (unlikely(po->tp_tx_has_off)) {
+ 		int off_min, off_max, off;
+ 		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
+@@ -2198,6 +2211,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 				dev->hard_header_len);
+ 		if (unlikely(err))
+ 			return err;
++		if (!skb->protocol)
++			tpacket_set_protocol(dev, skb);
+ 
+ 		data += dev->hard_header_len;
+ 		to_write -= dev->hard_header_len;
+@@ -2232,6 +2247,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ 		len = ((to_write > len_max) ? len_max : to_write);
+ 	}
+ 
++	skb_probe_transport_header(skb, 0);
++
+ 	return tp_len;
+ }
+ 
+@@ -2276,12 +2293,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ 	if (unlikely(!(dev->flags & IFF_UP)))
+ 		goto out_put;
+ 
+-	reserve = dev->hard_header_len + VLAN_HLEN;
++	if (po->sk.sk_socket->type == SOCK_RAW)
++		reserve = dev->hard_header_len;
+ 	size_max = po->tx_ring.frame_size
+ 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
+ 
+-	if (size_max > dev->mtu + reserve)
+-		size_max = dev->mtu + reserve;
++	if (size_max > dev->mtu + reserve + VLAN_HLEN)
++		size_max = dev->mtu + reserve + VLAN_HLEN;
+ 
+ 	do {
+ 		ph = packet_current_frame(po, &po->tx_ring,
+@@ -2308,18 +2326,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ 		tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
+ 					  addr, hlen);
+ 		if (likely(tp_len >= 0) &&
+-		    tp_len > dev->mtu + dev->hard_header_len) {
+-			struct ethhdr *ehdr;
+-			/* Earlier code assumed this would be a VLAN pkt,
+-			 * double-check this now that we have the actual
+-			 * packet in hand.
+-			 */
++		    tp_len > dev->mtu + reserve &&
++		    !packet_extra_vlan_len_allowed(dev, skb))
++			tp_len = -EMSGSIZE;
+ 
+-			skb_reset_mac_header(skb);
+-			ehdr = eth_hdr(skb);
+-			if (ehdr->h_proto != htons(ETH_P_8021Q))
+-				tp_len = -EMSGSIZE;
+-		}
+ 		if (unlikely(tp_len < 0)) {
+ 			if (po->tp_loss) {
+ 				__packet_set_status(po, ph,
+@@ -2540,18 +2550,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 
+ 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+ 
+-	if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
+-		/* Earlier code assumed this would be a VLAN pkt,
+-		 * double-check this now that we have the actual
+-		 * packet in hand.
+-		 */
+-		struct ethhdr *ehdr;
+-		skb_reset_mac_header(skb);
+-		ehdr = eth_hdr(skb);
+-		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
+-			err = -EMSGSIZE;
+-			goto out_free;
+-		}
++	if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
++	    !packet_extra_vlan_len_allowed(dev, skb)) {
++		err = -EMSGSIZE;
++		goto out_free;
+ 	}
+ 
+ 	skb->protocol = proto;
+@@ -2582,8 +2584,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ 		len += vnet_hdr_len;
+ 	}
+ 
+-	if (!packet_use_direct_xmit(po))
+-		skb_probe_transport_header(skb, reserve);
++	skb_probe_transport_header(skb, reserve);
++
+ 	if (unlikely(extra_len == 4))
+ 		skb->no_fcs = 1;
+ 
+diff --git a/net/rds/connection.c b/net/rds/connection.c
+index 9d66705f9d41..da6da57e5f36 100644
+--- a/net/rds/connection.c
++++ b/net/rds/connection.c
+@@ -187,12 +187,6 @@ new_conn:
+ 		}
+ 	}
+ 
+-	if (trans == NULL) {
+-		kmem_cache_free(rds_conn_slab, conn);
+-		conn = ERR_PTR(-ENODEV);
+-		goto out;
+-	}
+-
+ 	conn->c_trans = trans;
+ 
+ 	ret = trans->conn_alloc(conn, gfp);
+diff --git a/net/rds/send.c b/net/rds/send.c
+index e9430f537f9c..7b30c0f3180d 100644
+--- a/net/rds/send.c
++++ b/net/rds/send.c
+@@ -986,11 +986,13 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
+ 		release_sock(sk);
+ 	}
+ 
+-	/* racing with another thread binding seems ok here */
++	lock_sock(sk);
+ 	if (daddr == 0 || rs->rs_bound_addr == 0) {
++		release_sock(sk);
+ 		ret = -ENOTCONN; /* XXX not a great errno */
+ 		goto out;
+ 	}
++	release_sock(sk);
+ 
+ 	/* size of rm including all sgs */
+ 	ret = rds_rm_size(msg, payload_len);
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 1e1c89e51a11..d4b6f3682c14 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -253,7 +253,8 @@ int qdisc_set_default(const char *name)
+ }
+ 
+ /* We know handle. Find qdisc among all qdisc's attached to device
+-   (root qdisc, all its children, children of children etc.)
++ * (root qdisc, all its children, children of children etc.)
++ * Note: caller either uses rtnl or rcu_read_lock()
+  */
+ 
+ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
+@@ -264,7 +265,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
+ 	    root->handle == handle)
+ 		return root;
+ 
+-	list_for_each_entry(q, &root->list, list) {
++	list_for_each_entry_rcu(q, &root->list, list) {
+ 		if (q->handle == handle)
+ 			return q;
+ 	}
+@@ -277,15 +278,18 @@ void qdisc_list_add(struct Qdisc *q)
+ 		struct Qdisc *root = qdisc_dev(q)->qdisc;
+ 
+ 		WARN_ON_ONCE(root == &noop_qdisc);
+-		list_add_tail(&q->list, &root->list);
++		ASSERT_RTNL();
++		list_add_tail_rcu(&q->list, &root->list);
+ 	}
+ }
+ EXPORT_SYMBOL(qdisc_list_add);
+ 
+ void qdisc_list_del(struct Qdisc *q)
+ {
+-	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
+-		list_del(&q->list);
++	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
++		ASSERT_RTNL();
++		list_del_rcu(&q->list);
++	}
+ }
+ EXPORT_SYMBOL(qdisc_list_del);
+ 
+@@ -750,14 +754,18 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+ 	if (n == 0)
+ 		return;
+ 	drops = max_t(int, n, 0);
++	rcu_read_lock();
+ 	while ((parentid = sch->parent)) {
+ 		if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
+-			return;
++			break;
+ 
++		if (sch->flags & TCQ_F_NOPARENT)
++			break;
++		/* TODO: perform the search on a per txq basis */
+ 		sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
+ 		if (sch == NULL) {
+-			WARN_ON(parentid != TC_H_ROOT);
+-			return;
++			WARN_ON_ONCE(parentid != TC_H_ROOT);
++			break;
+ 		}
+ 		cops = sch->ops->cl_ops;
+ 		if (cops->qlen_notify) {
+@@ -768,6 +776,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
+ 		sch->q.qlen -= n;
+ 		__qdisc_qstats_drop(sch, drops);
+ 	}
++	rcu_read_unlock();
+ }
+ EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
+ 
+@@ -941,7 +950,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
+ 		}
+ 		lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
+ 		if (!netif_is_multiqueue(dev))
+-			sch->flags |= TCQ_F_ONETXQUEUE;
++			sch->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ 	}
+ 
+ 	sch->handle = handle;
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 6efca30894aa..b453270be3fd 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -743,7 +743,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
+ 			return;
+ 		}
+ 		if (!netif_is_multiqueue(dev))
+-			qdisc->flags |= TCQ_F_ONETXQUEUE;
++			qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ 	}
+ 	dev_queue->qdisc_sleeping = qdisc;
+ }
+diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
+index f3cbaecd283a..3e82f047caaf 100644
+--- a/net/sched/sch_mq.c
++++ b/net/sched/sch_mq.c
+@@ -63,7 +63,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
+ 		if (qdisc == NULL)
+ 			goto err;
+ 		priv->qdiscs[ntx] = qdisc;
+-		qdisc->flags |= TCQ_F_ONETXQUEUE;
++		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ 	}
+ 
+ 	sch->flags |= TCQ_F_MQROOT;
+@@ -156,7 +156,7 @@ static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+ 
+ 	*old = dev_graft_qdisc(dev_queue, new);
+ 	if (new)
+-		new->flags |= TCQ_F_ONETXQUEUE;
++		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ 	if (dev->flags & IFF_UP)
+ 		dev_activate(dev);
+ 	return 0;
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index 3811a745452c..ad70ecf57ce7 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -132,7 +132,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
+ 			goto err;
+ 		}
+ 		priv->qdiscs[i] = qdisc;
+-		qdisc->flags |= TCQ_F_ONETXQUEUE;
++		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ 	}
+ 
+ 	/* If the mqprio options indicate that hardware should own
+@@ -209,7 +209,7 @@ static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+ 	*old = dev_graft_qdisc(dev_queue, new);
+ 
+ 	if (new)
+-		new->flags |= TCQ_F_ONETXQUEUE;
++		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
+ 
+ 	if (dev->flags & IFF_UP)
+ 		dev_activate(dev);
+diff --git a/net/sctp/auth.c b/net/sctp/auth.c
+index 4f15b7d730e1..1543e39f47c3 100644
+--- a/net/sctp/auth.c
++++ b/net/sctp/auth.c
+@@ -809,8 +809,8 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
+ 	if (!has_sha1)
+ 		return -EINVAL;
+ 
+-	memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
+-		hmacs->shmac_num_idents * sizeof(__u16));
++	for (i = 0; i < hmacs->shmac_num_idents; i++)
++		ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
+ 	ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
+ 				hmacs->shmac_num_idents * sizeof(__u16));
+ 	return 0;
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 5f6c4e61325b..66d796075050 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -7387,6 +7387,13 @@ struct proto sctp_prot = {
+ 
+ #if IS_ENABLED(CONFIG_IPV6)
+ 
++#include <net/transp_v6.h>
++static void sctp_v6_destroy_sock(struct sock *sk)
++{
++	sctp_destroy_sock(sk);
++	inet6_destroy_sock(sk);
++}
++
+ struct proto sctpv6_prot = {
+ 	.name		= "SCTPv6",
+ 	.owner		= THIS_MODULE,
+@@ -7396,7 +7403,7 @@ struct proto sctpv6_prot = {
+ 	.accept		= sctp_accept,
+ 	.ioctl		= sctp_ioctl,
+ 	.init		= sctp_init_sock,
+-	.destroy	= sctp_destroy_sock,
++	.destroy	= sctp_v6_destroy_sock,
+ 	.shutdown	= sctp_shutdown,
+ 	.setsockopt	= sctp_setsockopt,
+ 	.getsockopt	= sctp_getsockopt,
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 76e66695621c..1975fd8d1c10 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -316,6 +316,118 @@ found:
+ 	return s;
+ }
+ 
++/* Support code for asymmetrically connected dgram sockets
++ *
++ * If a datagram socket is connected to a socket not itself connected
++ * to the first socket (eg, /dev/log), clients may only enqueue more
++ * messages if the present receive queue of the server socket is not
++ * "too large". This means there's a second writeability condition
++ * poll and sendmsg need to test. The dgram recv code will do a wake
++ * up on the peer_wait wait queue of a socket upon reception of a
++ * datagram which needs to be propagated to sleeping would-be writers
++ * since these might not have sent anything so far. This can't be
++ * accomplished via poll_wait because the lifetime of the server
++ * socket might be less than that of its clients if these break their
++ * association with it or if the server socket is closed while clients
++ * are still connected to it and there's no way to inform "a polling
++ * implementation" that it should let go of a certain wait queue
++ *
++ * In order to propagate a wake up, a wait_queue_t of the client
++ * socket is enqueued on the peer_wait queue of the server socket
++ * whose wake function does a wake_up on the ordinary client socket
++ * wait queue. This connection is established whenever a write (or
++ * poll for write) hit the flow control condition and broken when the
++ * association to the server socket is dissolved or after a wake up
++ * was relayed.
++ */
++
++static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
++				      void *key)
++{
++	struct unix_sock *u;
++	wait_queue_head_t *u_sleep;
++
++	u = container_of(q, struct unix_sock, peer_wake);
++
++	__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
++			    q);
++	u->peer_wake.private = NULL;
++
++	/* relaying can only happen while the wq still exists */
++	u_sleep = sk_sleep(&u->sk);
++	if (u_sleep)
++		wake_up_interruptible_poll(u_sleep, key);
++
++	return 0;
++}
++
++static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
++{
++	struct unix_sock *u, *u_other;
++	int rc;
++
++	u = unix_sk(sk);
++	u_other = unix_sk(other);
++	rc = 0;
++	spin_lock(&u_other->peer_wait.lock);
++
++	if (!u->peer_wake.private) {
++		u->peer_wake.private = other;
++		__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
++
++		rc = 1;
++	}
++
++	spin_unlock(&u_other->peer_wait.lock);
++	return rc;
++}
++
++static void unix_dgram_peer_wake_disconnect(struct sock *sk,
++					    struct sock *other)
++{
++	struct unix_sock *u, *u_other;
++
++	u = unix_sk(sk);
++	u_other = unix_sk(other);
++	spin_lock(&u_other->peer_wait.lock);
++
++	if (u->peer_wake.private == other) {
++		__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
++		u->peer_wake.private = NULL;
++	}
++
++	spin_unlock(&u_other->peer_wait.lock);
++}
++
++static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
++						   struct sock *other)
++{
++	unix_dgram_peer_wake_disconnect(sk, other);
++	wake_up_interruptible_poll(sk_sleep(sk),
++				   POLLOUT |
++				   POLLWRNORM |
++				   POLLWRBAND);
++}
++
++/* preconditions:
++ *	- unix_peer(sk) == other
++ *	- association is stable
++ */
++static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
++{
++	int connected;
++
++	connected = unix_dgram_peer_wake_connect(sk, other);
++
++	if (unix_recvq_full(other))
++		return 1;
++
++	if (connected)
++		unix_dgram_peer_wake_disconnect(sk, other);
++
++	return 0;
++}
++
+ static inline int unix_writable(struct sock *sk)
+ {
+ 	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+@@ -420,6 +532,8 @@ static void unix_release_sock(struct sock *sk, int embrion)
+ 			skpair->sk_state_change(skpair);
+ 			sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
+ 		}
++
++		unix_dgram_peer_wake_disconnect(sk, skpair);
+ 		sock_put(skpair); /* It may now die */
+ 		unix_peer(sk) = NULL;
+ 	}
+@@ -648,6 +762,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
+ 	INIT_LIST_HEAD(&u->link);
+ 	mutex_init(&u->readlock); /* single task reading lock */
+ 	init_waitqueue_head(&u->peer_wait);
++	init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
+ 	unix_insert_socket(unix_sockets_unbound(sk), sk);
+ out:
+ 	if (sk == NULL)
+@@ -1015,6 +1130,8 @@ restart:
+ 	if (unix_peer(sk)) {
+ 		struct sock *old_peer = unix_peer(sk);
+ 		unix_peer(sk) = other;
++		unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
++
+ 		unix_state_double_unlock(sk, other);
+ 
+ 		if (other != old_peer)
+@@ -1453,6 +1570,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ 	struct scm_cookie scm;
+ 	int max_level;
+ 	int data_len = 0;
++	int sk_locked;
+ 
+ 	wait_for_unix_gc();
+ 	err = scm_send(sock, msg, &scm, false);
+@@ -1532,12 +1650,14 @@ restart:
+ 		goto out_free;
+ 	}
+ 
++	sk_locked = 0;
+ 	unix_state_lock(other);
++restart_locked:
+ 	err = -EPERM;
+ 	if (!unix_may_send(sk, other))
+ 		goto out_unlock;
+ 
+-	if (sock_flag(other, SOCK_DEAD)) {
++	if (unlikely(sock_flag(other, SOCK_DEAD))) {
+ 		/*
+ 		 *	Check with 1003.1g - what should
+ 		 *	datagram error
+@@ -1545,10 +1665,14 @@ restart:
+ 		unix_state_unlock(other);
+ 		sock_put(other);
+ 
++		if (!sk_locked)
++			unix_state_lock(sk);
++
+ 		err = 0;
+-		unix_state_lock(sk);
+ 		if (unix_peer(sk) == other) {
+ 			unix_peer(sk) = NULL;
++			unix_dgram_peer_wake_disconnect_wakeup(sk, other);
++
+ 			unix_state_unlock(sk);
+ 
+ 			unix_dgram_disconnected(sk, other);
+@@ -1574,21 +1698,38 @@ restart:
+ 			goto out_unlock;
+ 	}
+ 
+-	if (unix_peer(other) != sk && unix_recvq_full(other)) {
+-		if (!timeo) {
+-			err = -EAGAIN;
+-			goto out_unlock;
++	if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++		if (timeo) {
++			timeo = unix_wait_for_peer(other, timeo);
++
++			err = sock_intr_errno(timeo);
++			if (signal_pending(current))
++				goto out_free;
++
++			goto restart;
+ 		}
+ 
+-		timeo = unix_wait_for_peer(other, timeo);
++		if (!sk_locked) {
++			unix_state_unlock(other);
++			unix_state_double_lock(sk, other);
++		}
+ 
+-		err = sock_intr_errno(timeo);
+-		if (signal_pending(current))
+-			goto out_free;
++		if (unix_peer(sk) != other ||
++		    unix_dgram_peer_wake_me(sk, other)) {
++			err = -EAGAIN;
++			sk_locked = 1;
++			goto out_unlock;
++		}
+ 
+-		goto restart;
++		if (!sk_locked) {
++			sk_locked = 1;
++			goto restart_locked;
++		}
+ 	}
+ 
++	if (unlikely(sk_locked))
++		unix_state_unlock(sk);
++
+ 	if (sock_flag(other, SOCK_RCVTSTAMP))
+ 		__net_timestamp(skb);
+ 	maybe_add_creds(skb, sock, other);
+@@ -1602,6 +1743,8 @@ restart:
+ 	return len;
+ 
+ out_unlock:
++	if (sk_locked)
++		unix_state_unlock(sk);
+ 	unix_state_unlock(other);
+ out_free:
+ 	kfree_skb(skb);
+@@ -2245,14 +2388,16 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
+ 		return mask;
+ 
+ 	writable = unix_writable(sk);
+-	other = unix_peer_get(sk);
+-	if (other) {
+-		if (unix_peer(other) != sk) {
+-			sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
+-			if (unix_recvq_full(other))
+-				writable = 0;
+-		}
+-		sock_put(other);
++	if (writable) {
++		unix_state_lock(sk);
++
++		other = unix_peer(sk);
++		if (other && unix_peer(other) != sk &&
++		    unix_recvq_full(other) &&
++		    unix_dgram_peer_wake_me(sk, other))
++			writable = 0;
++
++		unix_state_unlock(sk);
+ 	}
+ 
+ 	if (writable)
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 225b78b4ef12..d02eccd51f6e 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -48,8 +48,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
+ #define is_haswell(codec)  ((codec)->core.vendor_id == 0x80862807)
+ #define is_broadwell(codec)    ((codec)->core.vendor_id == 0x80862808)
+ #define is_skylake(codec) ((codec)->core.vendor_id == 0x80862809)
++#define is_broxton(codec) ((codec)->core.vendor_id == 0x8086280a)
+ #define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
+-					|| is_skylake(codec))
++				|| is_skylake(codec) || is_broxton(codec))
+ 
+ #define is_valleyview(codec) ((codec)->core.vendor_id == 0x80862882)
+ #define is_cherryview(codec) ((codec)->core.vendor_id == 0x80862883)
+diff --git a/tools/net/Makefile b/tools/net/Makefile
+index ee577ea03ba5..ddf888010652 100644
+--- a/tools/net/Makefile
++++ b/tools/net/Makefile
+@@ -4,6 +4,9 @@ CC = gcc
+ LEX = flex
+ YACC = bison
+ 
++CFLAGS += -Wall -O2
++CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
++
+ %.yacc.c: %.y
+ 	$(YACC) -o $@ -d $<
+ 
+@@ -12,15 +15,13 @@ YACC = bison
+ 
+ all : bpf_jit_disasm bpf_dbg bpf_asm
+ 
+-bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
++bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
+ bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
+ bpf_jit_disasm : bpf_jit_disasm.o
+ 
+-bpf_dbg : CFLAGS = -Wall -O2
+ bpf_dbg : LDLIBS = -lreadline
+ bpf_dbg : bpf_dbg.o
+ 
+-bpf_asm : CFLAGS = -Wall -O2 -I.
+ bpf_asm : LDLIBS =
+ bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
+ bpf_exp.lex.o : bpf_exp.yacc.c


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-12-10 13:54 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-12-10 13:54 UTC (permalink / raw
  To: gentoo-commits

commit:     cc2721358925bd19d1cee58f5cb7a68c054b5272
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Dec 10 13:54:25 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Dec 10 13:54:25 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cc272135

Linux patch 4.1.14

 0000_README             |    4 +
 1013_linux-4.1.14.patch | 3152 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3156 insertions(+)

diff --git a/0000_README b/0000_README
index acad761..bb7a9d9 100644
--- a/0000_README
+++ b/0000_README
@@ -95,6 +95,10 @@ Patch:  1012_linux-4.1.13.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.13
 
+Patch:  1013_linux-4.1.14.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.14
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1013_linux-4.1.14.patch b/1013_linux-4.1.14.patch
new file mode 100644
index 0000000..86576c9
--- /dev/null
+++ b/1013_linux-4.1.14.patch
@@ -0,0 +1,3152 @@
+diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
+index c3b6b301d8b0..749b7bae0c00 100644
+--- a/Documentation/filesystems/proc.txt
++++ b/Documentation/filesystems/proc.txt
+@@ -140,7 +140,8 @@ Table 1-1: Process specific entries in /proc
+  stat		Process status
+  statm		Process memory status information
+  status		Process status in human readable form
+- wchan		If CONFIG_KALLSYMS is set, a pre-decoded wchan
++ wchan		Present with CONFIG_KALLSYMS=y: it shows the kernel function
++		symbol the task is blocked in - or "0" if not blocked.
+  pagemap	Page table
+  stack		Report full stack trace, enable via CONFIG_STACKTRACE
+  smaps		a extension based on maps, showing the memory consumption of
+@@ -309,7 +310,7 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
+   blocked       bitmap of blocked signals
+   sigign        bitmap of ignored signals
+   sigcatch      bitmap of caught signals
+-  wchan         address where process went to sleep
++  0		(place holder, used to be the wchan address, use /proc/PID/wchan instead)
+   0             (place holder)
+   0             (place holder)
+   exit_signal   signal to send to parent thread on exit
+diff --git a/Makefile b/Makefile
+index d5d229db61d5..091280d66452 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 13
++SUBLEVEL = 14
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
+index bc215e4b75fd..6a87233d0b19 100644
+--- a/arch/arm/boot/dts/imx27.dtsi
++++ b/arch/arm/boot/dts/imx27.dtsi
+@@ -477,7 +477,10 @@
+ 				compatible = "fsl,imx27-usb";
+ 				reg = <0x10024000 0x200>;
+ 				interrupts = <56>;
+-				clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
++				clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
++					<&clks IMX27_CLK_USB_AHB_GATE>,
++					<&clks IMX27_CLK_USB_DIV>;
++				clock-names = "ipg", "ahb", "per";
+ 				fsl,usbmisc = <&usbmisc 0>;
+ 				status = "disabled";
+ 			};
+@@ -486,7 +489,10 @@
+ 				compatible = "fsl,imx27-usb";
+ 				reg = <0x10024200 0x200>;
+ 				interrupts = <54>;
+-				clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
++				clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
++					<&clks IMX27_CLK_USB_AHB_GATE>,
++					<&clks IMX27_CLK_USB_DIV>;
++				clock-names = "ipg", "ahb", "per";
+ 				fsl,usbmisc = <&usbmisc 1>;
+ 				dr_mode = "host";
+ 				status = "disabled";
+@@ -496,7 +502,10 @@
+ 				compatible = "fsl,imx27-usb";
+ 				reg = <0x10024400 0x200>;
+ 				interrupts = <55>;
+-				clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
++				clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
++					<&clks IMX27_CLK_USB_AHB_GATE>,
++					<&clks IMX27_CLK_USB_DIV>;
++				clock-names = "ipg", "ahb", "per";
+ 				fsl,usbmisc = <&usbmisc 2>;
+ 				dr_mode = "host";
+ 				status = "disabled";
+@@ -506,7 +515,6 @@
+ 				#index-cells = <1>;
+ 				compatible = "fsl,imx27-usbmisc";
+ 				reg = <0x10024600 0x200>;
+-				clocks = <&clks IMX27_CLK_USB_AHB_GATE>;
+ 			};
+ 
+ 			sahara2: sahara@10025000 {
+diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
+index 1b958e92d674..2e7c1364cb00 100644
+--- a/arch/arm/boot/dts/omap5-uevm.dts
++++ b/arch/arm/boot/dts/omap5-uevm.dts
+@@ -31,6 +31,24 @@
+ 		regulator-max-microvolt = <3000000>;
+ 	};
+ 
++	mmc3_pwrseq: sdhci0_pwrseq {
++		compatible = "mmc-pwrseq-simple";
++		clocks = <&clk32kgaudio>;
++		clock-names = "ext_clock";
++	};
++
++	vmmcsdio_fixed: fixedregulator-mmcsdio {
++		compatible = "regulator-fixed";
++		regulator-name = "vmmcsdio_fixed";
++		regulator-min-microvolt = <1800000>;
++		regulator-max-microvolt = <1800000>;
++		gpio = <&gpio5 12 GPIO_ACTIVE_HIGH>;	/* gpio140 WLAN_EN */
++		enable-active-high;
++		startup-delay-us = <70000>;
++		pinctrl-names = "default";
++		pinctrl-0 = <&wlan_pins>;
++	};
++
+ 	/* HS USB Host PHY on PORT 2 */
+ 	hsusb2_phy: hsusb2_phy {
+ 		compatible = "usb-nop-xceiv";
+@@ -197,12 +215,20 @@
+ 		>;
+ 	};
+ 
+-	mcspi4_pins: pinmux_mcspi4_pins {
++	mmc3_pins: pinmux_mmc3_pins {
++		pinctrl-single,pins = <
++			OMAP5_IOPAD(0x01a4, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_clk */
++			OMAP5_IOPAD(0x01a6, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_cmd */
++			OMAP5_IOPAD(0x01a8, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_data0 */
++			OMAP5_IOPAD(0x01aa, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_data1 */
++			OMAP5_IOPAD(0x01ac, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_data2 */
++			OMAP5_IOPAD(0x01ae, PIN_INPUT_PULLUP | MUX_MODE0) /* wlsdio_data3 */
++		>;
++	};
++
++	wlan_pins: pinmux_wlan_pins {
+ 		pinctrl-single,pins = <
+-			0x164 (PIN_INPUT | MUX_MODE1)		/*  mcspi4_clk */
+-			0x168 (PIN_INPUT | MUX_MODE1)		/*  mcspi4_simo */
+-			0x16a (PIN_INPUT | MUX_MODE1)		/*  mcspi4_somi */
+-			0x16c (PIN_INPUT | MUX_MODE1)		/*  mcspi4_cs0 */
++			OMAP5_IOPAD(0x1bc, PIN_OUTPUT | MUX_MODE6) /* mcspi1_clk.gpio5_140 */
+ 		>;
+ 	};
+ 
+@@ -276,6 +302,12 @@
+ 			0x1A (PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */
+ 		>;
+ 	};
++
++	wlcore_irq_pin: pinmux_wlcore_irq_pin {
++		pinctrl-single,pins = <
++			OMAP5_IOPAD(0x040, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6)	/* llia_wakereqin.gpio1_wk14 */
++		>;
++	};
+ };
+ 
+ &mmc1 {
+@@ -290,8 +322,25 @@
+ };
+ 
+ &mmc3 {
++	vmmc-supply = <&vmmcsdio_fixed>;
++	mmc-pwrseq = <&mmc3_pwrseq>;
+ 	bus-width = <4>;
+-	ti,non-removable;
++	non-removable;
++	cap-power-off-card;
++	pinctrl-names = "default";
++	pinctrl-0 = <&mmc3_pins &wlcore_irq_pin>;
++	interrupts-extended = <&gic GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH
++			       &omap5_pmx_core 0x168>;
++
++	#address-cells = <1>;
++	#size-cells = <0>;
++	wlcore: wlcore@2 {
++		compatible = "ti,wl1271";
++		reg = <2>;
++		interrupt-parent = <&gpio1>;
++		interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;	/* gpio 14 */
++		ref-clock-frequency = <26000000>;
++	};
+ };
+ 
+ &mmc4 {
+@@ -591,11 +640,6 @@
+ 	pinctrl-0 = <&mcspi3_pins>;
+ };
+ 
+-&mcspi4 {
+-	pinctrl-names = "default";
+-	pinctrl-0 = <&mcspi4_pins>;
+-};
+-
+ &uart1 {
+         pinctrl-names = "default";
+         pinctrl-0 = <&uart1_pins>;
+diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
+index a5f5f4090af6..9cf0ab62db7d 100644
+--- a/arch/arm/boot/dts/sama5d4.dtsi
++++ b/arch/arm/boot/dts/sama5d4.dtsi
+@@ -918,11 +918,11 @@
+ 				reg = <0xf8018000 0x4000>;
+ 				interrupts = <33 IRQ_TYPE_LEVEL_HIGH 6>;
+ 				dmas = <&dma1
+-					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+-					AT91_XDMAC_DT_PERID(4)>,
++					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++					| AT91_XDMAC_DT_PERID(4))>,
+ 				       <&dma1
+-					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+-					AT91_XDMAC_DT_PERID(5)>;
++					(AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++					| AT91_XDMAC_DT_PERID(5))>;
+ 				dma-names = "tx", "rx";
+ 				pinctrl-names = "default";
+ 				pinctrl-0 = <&pinctrl_i2c1>;
+diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
+index 5662a872689b..30613204da15 100644
+--- a/arch/arm/common/edma.c
++++ b/arch/arm/common/edma.c
+@@ -406,7 +406,8 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
+ 					BIT(slot));
+ 			if (edma_cc[ctlr]->intr_data[channel].callback)
+ 				edma_cc[ctlr]->intr_data[channel].callback(
+-					channel, EDMA_DMA_COMPLETE,
++					EDMA_CTLR_CHAN(ctlr, channel),
++					EDMA_DMA_COMPLETE,
+ 					edma_cc[ctlr]->intr_data[channel].data);
+ 		}
+ 	} while (sh_ipr);
+@@ -460,7 +461,8 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
+ 					if (edma_cc[ctlr]->intr_data[k].
+ 								callback) {
+ 						edma_cc[ctlr]->intr_data[k].
+-						callback(k,
++						callback(
++						EDMA_CTLR_CHAN(ctlr, k),
+ 						EDMA_DMA_CC_ERROR,
+ 						edma_cc[ctlr]->intr_data
+ 						[k].data);
+diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
+index bd22b2c8a051..d3161c7ee1fd 100644
+--- a/arch/arm/mach-at91/pm_suspend.S
++++ b/arch/arm/mach-at91/pm_suspend.S
+@@ -81,6 +81,8 @@ tmp2	.req	r5
+  *	@r2: base address of second SDRAM Controller or 0 if not present
+  *	@r3: pm information
+  */
++/* at91_pm_suspend_in_sram must be 8-byte aligned per the requirements of fncpy() */
++	.align 3
+ ENTRY(at91_pm_suspend_in_sram)
+ 	/* Save registers on stack */
+ 	stmfd	sp!, {r4 - r12, lr}
+diff --git a/arch/arm/mach-pxa/include/mach/pxa27x.h b/arch/arm/mach-pxa/include/mach/pxa27x.h
+index 599b925a657c..1a4291936c58 100644
+--- a/arch/arm/mach-pxa/include/mach/pxa27x.h
++++ b/arch/arm/mach-pxa/include/mach/pxa27x.h
+@@ -19,7 +19,7 @@
+ #define ARB_CORE_PARK		(1<<24)	   /* Be parked with core when idle */
+ #define ARB_LOCK_FLAG		(1<<23)	   /* Only Locking masters gain access to the bus */
+ 
+-extern int __init pxa27x_set_pwrmode(unsigned int mode);
++extern int pxa27x_set_pwrmode(unsigned int mode);
+ extern void pxa27x_cpu_pm_enter(suspend_state_t state);
+ 
+ #endif /* __MACH_PXA27x_H */
+diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
+index af423a48c2e3..782e6b98dd9a 100644
+--- a/arch/arm/mach-pxa/pxa27x.c
++++ b/arch/arm/mach-pxa/pxa27x.c
+@@ -251,7 +251,7 @@ static struct clk_lookup pxa27x_clkregs[] = {
+  */
+ static unsigned int pwrmode = PWRMODE_SLEEP;
+ 
+-int __init pxa27x_set_pwrmode(unsigned int mode)
++int pxa27x_set_pwrmode(unsigned int mode)
+ {
+ 	switch (mode) {
+ 	case PWRMODE_SLEEP:
+diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
+index fbe74c6806f3..49d1110cff53 100644
+--- a/arch/arm/mach-tegra/board-paz00.c
++++ b/arch/arm/mach-tegra/board-paz00.c
+@@ -39,8 +39,8 @@ static struct platform_device wifi_rfkill_device = {
+ static struct gpiod_lookup_table wifi_gpio_lookup = {
+ 	.dev_id = "rfkill_gpio",
+ 	.table = {
+-		GPIO_LOOKUP_IDX("tegra-gpio", 25, NULL, 0, 0),
+-		GPIO_LOOKUP_IDX("tegra-gpio", 85, NULL, 1, 0),
++		GPIO_LOOKUP("tegra-gpio", 25, "reset", 0),
++		GPIO_LOOKUP("tegra-gpio", 85, "shutdown", 0),
+ 		{ },
+ 	},
+ };
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 6e4b9ff22ef3..64d7486262e5 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -1395,12 +1395,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ 	unsigned long uaddr = vma->vm_start;
+ 	unsigned long usize = vma->vm_end - vma->vm_start;
+ 	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
++	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
++	unsigned long off = vma->vm_pgoff;
+ 
+ 	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+ 
+ 	if (!pages)
+ 		return -ENXIO;
+ 
++	if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
++		return -ENXIO;
++
++	pages += off;
++
+ 	do {
+ 		int ret = vm_insert_page(vma, uaddr, *pages++);
+ 		if (ret) {
+diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
+index d6dd9fdbc3be..d4264bb0a409 100644
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -83,14 +83,14 @@
+ #define compat_sp	regs[13]
+ #define compat_lr	regs[14]
+ #define compat_sp_hyp	regs[15]
+-#define compat_sp_irq	regs[16]
+-#define compat_lr_irq	regs[17]
+-#define compat_sp_svc	regs[18]
+-#define compat_lr_svc	regs[19]
+-#define compat_sp_abt	regs[20]
+-#define compat_lr_abt	regs[21]
+-#define compat_sp_und	regs[22]
+-#define compat_lr_und	regs[23]
++#define compat_lr_irq	regs[16]
++#define compat_sp_irq	regs[17]
++#define compat_lr_svc	regs[18]
++#define compat_sp_svc	regs[19]
++#define compat_lr_abt	regs[20]
++#define compat_sp_abt	regs[21]
++#define compat_lr_und	regs[22]
++#define compat_sp_und	regs[23]
+ #define compat_r8_fiq	regs[24]
+ #define compat_r9_fiq	regs[25]
+ #define compat_r10_fiq	regs[26]
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
+index a2c29865c3fe..aff07bcad882 100644
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -54,9 +54,12 @@ PECOFF_FILE_ALIGNMENT = 0x200;
+ #define PECOFF_EDATA_PADDING
+ #endif
+ 
+-#ifdef CONFIG_DEBUG_ALIGN_RODATA
++#if defined(CONFIG_DEBUG_ALIGN_RODATA)
+ #define ALIGN_DEBUG_RO			. = ALIGN(1<<SECTION_SHIFT);
+ #define ALIGN_DEBUG_RO_MIN(min)		ALIGN_DEBUG_RO
++#elif defined(CONFIG_DEBUG_RODATA)
++#define ALIGN_DEBUG_RO			. = ALIGN(1<<PAGE_SHIFT);
++#define ALIGN_DEBUG_RO_MIN(min)		ALIGN_DEBUG_RO
+ #else
+ #define ALIGN_DEBUG_RO
+ #define ALIGN_DEBUG_RO_MIN(min)		. = ALIGN(min);
+diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
+index d5fa3eaf39a1..41b1b090f56f 100644
+--- a/arch/mips/kvm/emulate.c
++++ b/arch/mips/kvm/emulate.c
+@@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
+ 
+ 	base = (inst >> 21) & 0x1f;
+ 	op_inst = (inst >> 16) & 0x1f;
+-	offset = inst & 0xffff;
++	offset = (int16_t)inst;
+ 	cache = (inst >> 16) & 0x3;
+ 	op = (inst >> 18) & 0x7;
+ 
+diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
+index c567240386a0..d1ee95a7f7dd 100644
+--- a/arch/mips/kvm/locore.S
++++ b/arch/mips/kvm/locore.S
+@@ -165,9 +165,11 @@ FEXPORT(__kvm_mips_vcpu_run)
+ 
+ FEXPORT(__kvm_mips_load_asid)
+ 	/* Set the ASID for the Guest Kernel */
+-	INT_SLL	t0, t0, 1	/* with kseg0 @ 0x40000000, kernel */
+-			        /* addresses shift to 0x80000000 */
+-	bltz	t0, 1f		/* If kernel */
++	PTR_L	t0, VCPU_COP0(k1)
++	LONG_L	t0, COP0_STATUS(t0)
++	andi	t0, KSU_USER | ST0_ERL | ST0_EXL
++	xori	t0, KSU_USER
++	bnez	t0, 1f		/* If kernel */
+ 	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+ 	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+ 1:
+@@ -482,9 +484,11 @@ __kvm_mips_return_to_guest:
+ 	mtc0	t0, CP0_EPC
+ 
+ 	/* Set the ASID for the Guest Kernel */
+-	INT_SLL	t0, t0, 1	/* with kseg0 @ 0x40000000, kernel */
+-				/* addresses shift to 0x80000000 */
+-	bltz	t0, 1f		/* If kernel */
++	PTR_L	t0, VCPU_COP0(k1)
++	LONG_L	t0, COP0_STATUS(t0)
++	andi	t0, KSU_USER | ST0_ERL | ST0_EXL
++	xori	t0, KSU_USER
++	bnez	t0, 1f		/* If kernel */
+ 	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+ 	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+ 1:
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index 52f205ae1281..22ee0afc7d5d 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -277,7 +277,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ 
+ 	if (!gebase) {
+ 		err = -ENOMEM;
+-		goto out_free_cpu;
++		goto out_uninit_cpu;
+ 	}
+ 	kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+ 		  ALIGN(size, PAGE_SIZE), gebase);
+@@ -341,6 +341,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+ out_free_gebase:
+ 	kfree(gebase);
+ 
++out_uninit_cpu:
++	kvm_vcpu_uninit(vcpu);
++
+ out_free_cpu:
+ 	kfree(vcpu);
+ 
+diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
+index 3fc2e6d70c77..a0706fd4ce0a 100644
+--- a/arch/mips/lantiq/clk.c
++++ b/arch/mips/lantiq/clk.c
+@@ -99,6 +99,23 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
+ }
+ EXPORT_SYMBOL(clk_set_rate);
+ 
++long clk_round_rate(struct clk *clk, unsigned long rate)
++{
++	if (unlikely(!clk_good(clk)))
++		return 0;
++	if (clk->rates && *clk->rates) {
++		unsigned long *r = clk->rates;
++
++		while (*r && (*r != rate))
++			r++;
++		if (!*r) {
++			return clk->rate;
++		}
++	}
++	return rate;
++}
++EXPORT_SYMBOL(clk_round_rate);
++
+ int clk_enable(struct clk *clk)
+ {
+ 	if (unlikely(!clk_good(clk)))
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index b745a109bfc1..3dbba9a2bb0f 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -1054,8 +1054,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+ 				   src_id, 0, 2);
+ 
+ 	/* sending vcpu invalid */
+-	if (src_id >= KVM_MAX_VCPUS ||
+-	    kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
++	if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
+ 		return -EINVAL;
+ 
+ 	if (sclp_has_sigpif())
+@@ -1134,6 +1133,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
+ 	trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
+ 				   irq->u.emerg.code, 0, 2);
+ 
++	/* sending vcpu invalid */
++	if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
++		return -EINVAL;
++
+ 	set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
+ 	set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
+ 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
+index 8cd8e7b288c5..c3805cf4b982 100644
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -283,12 +283,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
+ 		r = 0;
+ 		break;
+ 	case KVM_CAP_S390_VECTOR_REGISTERS:
+-		if (MACHINE_HAS_VX) {
++		mutex_lock(&kvm->lock);
++		if (atomic_read(&kvm->online_vcpus)) {
++			r = -EBUSY;
++		} else if (MACHINE_HAS_VX) {
+ 			set_kvm_facility(kvm->arch.model.fac->mask, 129);
+ 			set_kvm_facility(kvm->arch.model.fac->list, 129);
+ 			r = 0;
+ 		} else
+ 			r = -EINVAL;
++		mutex_unlock(&kvm->lock);
+ 		break;
+ 	case KVM_CAP_S390_USER_STSI:
+ 		kvm->arch.user_stsi = 1;
+@@ -1031,7 +1035,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ 	if (!kvm->arch.sca)
+ 		goto out_err;
+ 	spin_lock(&kvm_lock);
+-	sca_offset = (sca_offset + 16) & 0x7f0;
++	sca_offset += 16;
++	if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
++		sca_offset = 0;
+ 	kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
+ 	spin_unlock(&kvm_lock);
+ 
+diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
+index 72e58bd2bee7..7171056fc24d 100644
+--- a/arch/s390/kvm/sigp.c
++++ b/arch/s390/kvm/sigp.c
+@@ -294,12 +294,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
+ 			   u16 cpu_addr, u32 parameter, u64 *status_reg)
+ {
+ 	int rc;
+-	struct kvm_vcpu *dst_vcpu;
++	struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
+ 
+-	if (cpu_addr >= KVM_MAX_VCPUS)
+-		return SIGP_CC_NOT_OPERATIONAL;
+-
+-	dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ 	if (!dst_vcpu)
+ 		return SIGP_CC_NOT_OPERATIONAL;
+ 
+@@ -481,7 +477,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
+ 	trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
+ 
+ 	if (order_code == SIGP_EXTERNAL_CALL) {
+-		dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
++		dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
+ 		BUG_ON(dest_vcpu == NULL);
+ 
+ 		kvm_s390_vcpu_wakeup(dest_vcpu);
+diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
+index b5d7640abc5d..8a4add8e4639 100644
+--- a/arch/x86/include/uapi/asm/svm.h
++++ b/arch/x86/include/uapi/asm/svm.h
+@@ -100,6 +100,7 @@
+ 	{ SVM_EXIT_EXCP_BASE + UD_VECTOR,       "UD excp" }, \
+ 	{ SVM_EXIT_EXCP_BASE + PF_VECTOR,       "PF excp" }, \
+ 	{ SVM_EXIT_EXCP_BASE + NM_VECTOR,       "NM excp" }, \
++	{ SVM_EXIT_EXCP_BASE + AC_VECTOR,       "AC excp" }, \
+ 	{ SVM_EXIT_EXCP_BASE + MC_VECTOR,       "MC excp" }, \
+ 	{ SVM_EXIT_INTR,        "interrupt" }, \
+ 	{ SVM_EXIT_NMI,         "nmi" }, \
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index 205e0f3df501..5732326ec126 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -291,10 +291,9 @@ __setup("nosmap", setup_disable_smap);
+ 
+ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
+ {
+-	unsigned long eflags;
++	unsigned long eflags = native_save_fl();
+ 
+ 	/* This should have been cleared long ago */
+-	raw_local_save_flags(eflags);
+ 	BUG_ON(eflags & X86_EFLAGS_AC);
+ 
+ 	if (cpu_has(c, X86_FEATURE_SMAP)) {
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index 7e5da2cbe59e..174fa035a09a 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -65,6 +65,9 @@ startup_64:
+ 	 * tables and then reload them.
+ 	 */
+ 
++	/* Sanitize CPU configuration */
++	call verify_cpu
++
+ 	/*
+ 	 * Compute the delta between the address I am compiled to run at and the
+ 	 * address I am actually running at.
+@@ -174,6 +177,9 @@ ENTRY(secondary_startup_64)
+ 	 * after the boot processor executes this code.
+ 	 */
+ 
++	/* Sanitize CPU configuration */
++	call verify_cpu
++
+ 	movq	$(init_level4_pgt - __START_KERNEL_map), %rax
+ 1:
+ 
+@@ -288,6 +294,8 @@ ENTRY(secondary_startup_64)
+ 	pushq	%rax		# target address in negative space
+ 	lretq
+ 
++#include "verify_cpu.S"
++
+ #ifdef CONFIG_HOTPLUG_CPU
+ /*
+  * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index d74ac33290ae..1473a02e6ccb 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1194,6 +1194,14 @@ void __init setup_arch(char **cmdline_p)
+ 	clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
+ 			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
+ 			KERNEL_PGD_PTRS);
++
++	/*
++	 * sync back low identity map too.  It is used for example
++	 * in the 32-bit EFI stub.
++	 */
++	clone_pgd_range(initial_page_table,
++			swapper_pg_dir     + KERNEL_PGD_BOUNDARY,
++			min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
+ #endif
+ 
+ 	tboot_probe();
+diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
+index b9242bacbe59..4cf401f581e7 100644
+--- a/arch/x86/kernel/verify_cpu.S
++++ b/arch/x86/kernel/verify_cpu.S
+@@ -34,10 +34,11 @@
+ #include <asm/msr-index.h>
+ 
+ verify_cpu:
+-	pushfl				# Save caller passed flags
+-	pushl	$0			# Kill any dangerous flags
+-	popfl
++	pushf				# Save caller passed flags
++	push	$0			# Kill any dangerous flags
++	popf
+ 
++#ifndef __x86_64__
+ 	pushfl				# standard way to check for cpuid
+ 	popl	%eax
+ 	movl	%eax,%ebx
+@@ -48,6 +49,7 @@ verify_cpu:
+ 	popl	%eax
+ 	cmpl	%eax,%ebx
+ 	jz	verify_cpu_no_longmode	# cpu has no cpuid
++#endif
+ 
+ 	movl	$0x0,%eax		# See if cpuid 1 is implemented
+ 	cpuid
+@@ -130,10 +132,10 @@ verify_cpu_sse_test:
+ 	jmp	verify_cpu_sse_test	# try again
+ 
+ verify_cpu_no_longmode:
+-	popfl				# Restore caller passed flags
++	popf				# Restore caller passed flags
+ 	movl $1,%eax
+ 	ret
+ verify_cpu_sse_ok:
+-	popfl				# Restore caller passed flags
++	popf				# Restore caller passed flags
+ 	xorl %eax, %eax
+ 	ret
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 67d07e051436..7dd9a8d3911a 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -339,6 +339,8 @@ void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
+ 	struct kvm_lapic *apic = vcpu->arch.apic;
+ 
+ 	__kvm_apic_update_irr(pir, apic->regs);
++
++	kvm_make_request(KVM_REQ_EVENT, vcpu);
+ }
+ EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
+ 
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 7858cd9acfe4..454ccb082e18 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1105,6 +1105,7 @@ static void init_vmcb(struct vcpu_svm *svm)
+ 	set_exception_intercept(svm, PF_VECTOR);
+ 	set_exception_intercept(svm, UD_VECTOR);
+ 	set_exception_intercept(svm, MC_VECTOR);
++	set_exception_intercept(svm, AC_VECTOR);
+ 
+ 	set_intercept(svm, INTERCEPT_INTR);
+ 	set_intercept(svm, INTERCEPT_NMI);
+@@ -1791,6 +1792,12 @@ static int ud_interception(struct vcpu_svm *svm)
+ 	return 1;
+ }
+ 
++static int ac_interception(struct vcpu_svm *svm)
++{
++	kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
++	return 1;
++}
++
+ static void svm_fpu_activate(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+@@ -3361,6 +3368,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
+ 	[SVM_EXIT_EXCP_BASE + PF_VECTOR]	= pf_interception,
+ 	[SVM_EXIT_EXCP_BASE + NM_VECTOR]	= nm_interception,
+ 	[SVM_EXIT_EXCP_BASE + MC_VECTOR]	= mc_interception,
++	[SVM_EXIT_EXCP_BASE + AC_VECTOR]	= ac_interception,
+ 	[SVM_EXIT_INTR]				= intr_interception,
+ 	[SVM_EXIT_NMI]				= nmi_interception,
+ 	[SVM_EXIT_SMI]				= nop_on_interception,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index bc3041e1abbc..a243854c35d5 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1567,7 +1567,7 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
+ 	u32 eb;
+ 
+ 	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
+-	     (1u << NM_VECTOR) | (1u << DB_VECTOR);
++	     (1u << NM_VECTOR) | (1u << DB_VECTOR) | (1u << AC_VECTOR);
+ 	if ((vcpu->guest_debug &
+ 	     (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
+ 	    (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
+@@ -5127,6 +5127,9 @@ static int handle_exception(struct kvm_vcpu *vcpu)
+ 		return handle_rmode_exception(vcpu, ex_no, error_code);
+ 
+ 	switch (ex_no) {
++	case AC_VECTOR:
++		kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
++		return 1;
+ 	case DB_VECTOR:
+ 		dr6 = vmcs_readl(EXIT_QUALIFICATION);
+ 		if (!(vcpu->guest_debug &
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index e527a3e13939..fa893c3ec408 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -93,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
++	{ USB_DEVICE(0x0930, 0x021c) },
+ 	{ USB_DEVICE(0x0930, 0x0220) },
+ 	{ USB_DEVICE(0x0930, 0x0227) },
+ 	{ USB_DEVICE(0x0b05, 0x17d0) },
+@@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0CF3, 0x311F) },
+ 	{ USB_DEVICE(0x0cf3, 0x3121) },
+ 	{ USB_DEVICE(0x0CF3, 0x817a) },
++	{ USB_DEVICE(0x0CF3, 0x817b) },
+ 	{ USB_DEVICE(0x0cf3, 0xe003) },
+ 	{ USB_DEVICE(0x0CF3, 0xE004) },
+ 	{ USB_DEVICE(0x0CF3, 0xE005) },
+@@ -153,6 +155,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+@@ -164,6 +167,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index c65501539224..7bf87d9bfd7d 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -191,6 +191,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
+@@ -202,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0cf3, 0x817b), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
+index bc96f103bd7c..9064636a867f 100644
+--- a/drivers/clk/versatile/clk-icst.c
++++ b/drivers/clk/versatile/clk-icst.c
+@@ -156,8 +156,10 @@ struct clk *icst_clk_register(struct device *dev,
+ 	icst->lockreg = base + desc->lock_offset;
+ 
+ 	clk = clk_register(dev, &icst->hw);
+-	if (IS_ERR(clk))
++	if (IS_ERR(clk)) {
++		kfree(pclone);
+ 		kfree(icst);
++	}
+ 
+ 	return clk;
+ }
+diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
+index c5265c1262c5..6aacd205a774 100644
+--- a/drivers/mfd/twl6040.c
++++ b/drivers/mfd/twl6040.c
+@@ -647,6 +647,8 @@ static int twl6040_probe(struct i2c_client *client,
+ 
+ 	twl6040->clk32k = devm_clk_get(&client->dev, "clk32k");
+ 	if (IS_ERR(twl6040->clk32k)) {
++		if (PTR_ERR(twl6040->clk32k) == -EPROBE_DEFER)
++			return -EPROBE_DEFER;
+ 		dev_info(&client->dev, "clk32k is not handled\n");
+ 		twl6040->clk32k = NULL;
+ 	}
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index aede704605c6..141c2a42d7ed 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -915,7 +915,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
+ 	     nla_put(skb, IFLA_CAN_BITTIMING_CONST,
+ 		     sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
+ 
+-	    nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
++	    nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
+ 	    nla_put_u32(skb, IFLA_CAN_STATE, state) ||
+ 	    nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
+ 	    nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
+diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
+index 32bd7f451aa4..0c048e261ee6 100644
+--- a/drivers/net/can/sja1000/sja1000.c
++++ b/drivers/net/can/sja1000/sja1000.c
+@@ -218,6 +218,9 @@ static void sja1000_start(struct net_device *dev)
+ 	priv->write_reg(priv, SJA1000_RXERR, 0x0);
+ 	priv->read_reg(priv, SJA1000_ECC);
+ 
++	/* clear interrupt flags */
++	priv->read_reg(priv, SJA1000_IR);
++
+ 	/* leave reset mode */
+ 	set_normal_mode(dev);
+ }
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 4d608f0117cd..e07afc673d7a 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -949,7 +949,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
+ 	/* Set CPU queue access map - all CPUs have access to all RX
+ 	 * queues and to all TX queues
+ 	 */
+-	for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++)
++	for_each_present_cpu(cpu)
+ 		mvreg_write(pp, MVNETA_CPU_MAP(cpu),
+ 			    (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
+ 			     MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
+diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+index 529ef0594b90..3756e45d8cec 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
+@@ -2382,7 +2382,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
+ 			}
+ 		}
+ 
+-		memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
++		memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+ 		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+ 		INIT_WORK(&priv->mfunc.master.comm_work,
+ 			  mlx4_master_comm_channel);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
+index 983b1d51244d..337811d208bd 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
+@@ -185,7 +185,7 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+ 		return;
+ 	}
+ 
+-	memcpy(s_eqe, eqe, dev->caps.eqe_size - 1);
++	memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+ 	s_eqe->slave_id = slave;
+ 	/* ensure all information is written before setting the ownersip bit */
+ 	dma_wmb();
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index fbb6cfa0f5f1..feca46efa12f 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -1344,7 +1344,9 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
+ 	unsigned int write_ptr;
+ 	efx_qword_t *txd;
+ 
+-	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
++	tx_queue->xmit_more_available = false;
++	if (unlikely(tx_queue->write_count == tx_queue->insert_count))
++		return;
+ 
+ 	do {
+ 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
+index bb89e96a125e..6d4e0047a31d 100644
+--- a/drivers/net/ethernet/sfc/farch.c
++++ b/drivers/net/ethernet/sfc/farch.c
+@@ -319,7 +319,9 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
+ 	unsigned write_ptr;
+ 	unsigned old_write_count = tx_queue->write_count;
+ 
+-	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
++	tx_queue->xmit_more_available = false;
++	if (unlikely(tx_queue->write_count == tx_queue->insert_count))
++		return;
+ 
+ 	do {
+ 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
+index 325dd94bca46..0bdef4a074dd 100644
+--- a/drivers/net/ethernet/sfc/net_driver.h
++++ b/drivers/net/ethernet/sfc/net_driver.h
+@@ -218,6 +218,7 @@ struct efx_tx_buffer {
+  * @tso_packets: Number of packets via the TSO xmit path
+  * @pushes: Number of times the TX push feature has been used
+  * @pio_packets: Number of times the TX PIO feature has been used
++ * @xmit_more_available: Are any packets waiting to be pushed to the NIC
+  * @empty_read_count: If the completion path has seen the queue as empty
+  *	and the transmission path has not yet checked this, the value of
+  *	@read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
+@@ -250,6 +251,7 @@ struct efx_tx_queue {
+ 	unsigned int tso_packets;
+ 	unsigned int pushes;
+ 	unsigned int pio_packets;
++	bool xmit_more_available;
+ 	/* Statistics to supplement MAC stats */
+ 	unsigned long tx_packets;
+ 
+diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
+index aaf2987512b5..e70edc3dea7e 100644
+--- a/drivers/net/ethernet/sfc/tx.c
++++ b/drivers/net/ethernet/sfc/tx.c
+@@ -431,8 +431,20 @@ finish_packet:
+ 	efx_tx_maybe_stop_queue(tx_queue);
+ 
+ 	/* Pass off to hardware */
+-	if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
++	if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
++		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
++
++		/* There could be packets left on the partner queue if those
++		 * SKBs had skb->xmit_more set. If we do not push those they
++		 * could be left for a long time and cause a netdev watchdog.
++		 */
++		if (txq2->xmit_more_available)
++			efx_nic_push_buffers(txq2);
++
+ 		efx_nic_push_buffers(tx_queue);
++	} else {
++		tx_queue->xmit_more_available = skb->xmit_more;
++	}
+ 
+ 	tx_queue->tx_packets++;
+ 
+@@ -721,6 +733,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+ 	tx_queue->read_count = 0;
+ 	tx_queue->old_read_count = 0;
+ 	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
++	tx_queue->xmit_more_available = false;
+ 
+ 	/* Set up TX descriptor ring */
+ 	efx_nic_init_tx(tx_queue);
+@@ -746,6 +759,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+ 
+ 		++tx_queue->read_count;
+ 	}
++	tx_queue->xmit_more_available = false;
+ 	netdev_tx_reset_queue(tx_queue->core_txq);
+ }
+ 
+@@ -1301,8 +1315,20 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
+ 	efx_tx_maybe_stop_queue(tx_queue);
+ 
+ 	/* Pass off to hardware */
+-	if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq))
++	if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
++		struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
++
++		/* There could be packets left on the partner queue if those
++		 * SKBs had skb->xmit_more set. If we do not push those they
++		 * could be left for a long time and cause a netdev watchdog.
++		 */
++		if (txq2->xmit_more_available)
++			efx_nic_push_buffers(txq2);
++
+ 		efx_nic_push_buffers(tx_queue);
++	} else {
++		tx_queue->xmit_more_available = skb->xmit_more;
++	}
+ 
+ 	tx_queue->tso_bursts++;
+ 	return NETDEV_TX_OK;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+index 771cda2a48b2..2e51b816a7e8 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+@@ -721,10 +721,13 @@ static int stmmac_get_ts_info(struct net_device *dev,
+ {
+ 	struct stmmac_priv *priv = netdev_priv(dev);
+ 
+-	if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
++	if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
+ 
+-		info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
++		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
++					SOF_TIMESTAMPING_TX_HARDWARE |
++					SOF_TIMESTAMPING_RX_SOFTWARE |
+ 					SOF_TIMESTAMPING_RX_HARDWARE |
++					SOF_TIMESTAMPING_SOFTWARE |
+ 					SOF_TIMESTAMPING_RAW_HARDWARE;
+ 
+ 		if (priv->ptp_clock)
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 58858c5589db..4dba5fbc735e 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -82,7 +82,7 @@ static const struct proto_ops macvtap_socket_ops;
+ #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
+ 		      NETIF_F_TSO6 | NETIF_F_UFO)
+ #define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+-#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
++#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
+ 
+ static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
+ {
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index db2c3cdf2c40..ab33262ed826 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -589,7 +589,7 @@ static int pppoe_release(struct socket *sock)
+ 
+ 	po = pppox_sk(sk);
+ 
+-	if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
++	if (po->pppoe_dev) {
+ 		dev_put(po->pppoe_dev);
+ 		po->pppoe_dev = NULL;
+ 	}
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index f603f362504b..4e0470d396a3 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -764,6 +764,10 @@ static const struct usb_device_id products[] = {
+ 	{QMI_FIXED_INTF(0x1199, 0x9056, 8)},	/* Sierra Wireless Modem */
+ 	{QMI_FIXED_INTF(0x1199, 0x9057, 8)},
+ 	{QMI_FIXED_INTF(0x1199, 0x9061, 8)},	/* Sierra Wireless Modem */
++	{QMI_FIXED_INTF(0x1199, 0x9070, 8)},	/* Sierra Wireless MC74xx/EM74xx */
++	{QMI_FIXED_INTF(0x1199, 0x9070, 10)},	/* Sierra Wireless MC74xx/EM74xx */
++	{QMI_FIXED_INTF(0x1199, 0x9071, 8)},	/* Sierra Wireless MC74xx/EM74xx */
++	{QMI_FIXED_INTF(0x1199, 0x9071, 10)},	/* Sierra Wireless MC74xx/EM74xx */
+ 	{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},	/* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ 	{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},	/* Alcatel L800MA */
+ 	{QMI_FIXED_INTF(0x2357, 0x0201, 4)},	/* TP-LINK HSUPA Modem MA180 */
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 7fbca37a1adf..237f8e5e493d 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 	/* Do we support "hardware" checksums? */
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
+ 		/* This opens up the world of extra features. */
+-		dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
++		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+ 		if (csum)
+-			dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
++			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+ 
+ 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+ 			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 5e021b0b3f9e..1734cc50ded8 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -3183,7 +3183,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+ 
+ static u32 get_nss_from_chainmask(u16 chain_mask)
+ {
+-	if ((chain_mask & 0x15) == 0x15)
++	if ((chain_mask & 0xf) == 0xf)
+ 		return 4;
+ 	else if ((chain_mask & 0x7) == 0x7)
+ 		return 3;
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
+index 8b16949a9cb9..88bf80a942b4 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -421,14 +421,21 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ /* 8000 Series */
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
+-	{IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
+@@ -437,18 +444,28 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
+ #endif /* CONFIG_IWLMVM */
+ 
+ 	{0}
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 699a4802835f..1de80a8e357a 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -572,10 +572,8 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ 
+ 		do {
+ 			ret = iwl_pcie_set_hw_ready(trans);
+-			if (ret >= 0) {
+-				ret = 0;
+-				goto out;
+-			}
++			if (ret >= 0)
++				return 0;
+ 
+ 			usleep_range(200, 1000);
+ 			t += 200;
+@@ -585,10 +583,6 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ 
+ 	IWL_ERR(trans, "Couldn't prepare the card\n");
+ 
+-out:
+-	iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+-		      CSR_RESET_LINK_PWR_MGMT_DISABLED);
+-
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
+index 1fb329dc6744..24e48bddf186 100644
+--- a/drivers/net/wireless/mwifiex/debugfs.c
++++ b/drivers/net/wireless/mwifiex/debugfs.c
+@@ -593,7 +593,7 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
+ 		(struct mwifiex_private *) file->private_data;
+ 	unsigned long addr = get_zeroed_page(GFP_KERNEL);
+ 	char *buf = (char *) addr;
+-	int pos = 0, ret = 0, i;
++	int pos, ret, i;
+ 	u8 value[MAX_EEPROM_DATA];
+ 
+ 	if (!buf)
+@@ -601,7 +601,7 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
+ 
+ 	if (saved_offset == -1) {
+ 		/* No command has been given */
+-		pos += snprintf(buf, PAGE_SIZE, "0");
++		pos = snprintf(buf, PAGE_SIZE, "0");
+ 		goto done;
+ 	}
+ 
+@@ -610,17 +610,17 @@ mwifiex_rdeeprom_read(struct file *file, char __user *ubuf,
+ 				  (u16) saved_bytes, value);
+ 	if (ret) {
+ 		ret = -EINVAL;
+-		goto done;
++		goto out_free;
+ 	}
+ 
+-	pos += snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
++	pos = snprintf(buf, PAGE_SIZE, "%d %d ", saved_offset, saved_bytes);
+ 
+ 	for (i = 0; i < saved_bytes; i++)
+-		pos += snprintf(buf + strlen(buf), PAGE_SIZE, "%d ", value[i]);
+-
+-	ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
++		pos += scnprintf(buf + pos, PAGE_SIZE - pos, "%d ", value[i]);
+ 
+ done:
++	ret = simple_read_from_buffer(ubuf, count, ppos, buf, pos);
++out_free:
+ 	free_page(addr);
+ 	return ret;
+ }
+diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+index 0038d29a37fe..a470e32c49c1 100644
+--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
++++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+@@ -874,7 +874,7 @@ void lnet_debug_peer(lnet_nid_t nid);
+ 
+ static inline void lnet_peer_set_alive(lnet_peer_t *lp)
+ {
+-	lp->lp_last_alive = lp->lp_last_query = get_seconds();
++	lp->lp_last_alive = lp->lp_last_query = jiffies;
+ 	if (!lp->lp_alive)
+ 		lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
+ }
+diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
+index f8b5b332e7c3..943a0e204532 100644
+--- a/drivers/staging/rtl8712/usb_intf.c
++++ b/drivers/staging/rtl8712/usb_intf.c
+@@ -144,6 +144,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
+ 	{USB_DEVICE(0x0DF6, 0x0058)},
+ 	{USB_DEVICE(0x0DF6, 0x0049)},
+ 	{USB_DEVICE(0x0DF6, 0x004C)},
++	{USB_DEVICE(0x0DF6, 0x006C)},
+ 	{USB_DEVICE(0x0DF6, 0x0064)},
+ 	/* Skyworth */
+ 	{USB_DEVICE(0x14b2, 0x3300)},
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 6b6c6606af5f..e5edf45e9d4c 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -169,7 +169,7 @@ static inline int tty_copy_to_user(struct tty_struct *tty,
+ {
+ 	struct n_tty_data *ldata = tty->disc_data;
+ 
+-	tty_audit_add_data(tty, to, n, ldata->icanon);
++	tty_audit_add_data(tty, from, n, ldata->icanon);
+ 	return copy_to_user(to, from, n);
+ }
+ 
+diff --git a/drivers/tty/tty_audit.c b/drivers/tty/tty_audit.c
+index 90ca082935f6..3d245cd3d8e6 100644
+--- a/drivers/tty/tty_audit.c
++++ b/drivers/tty/tty_audit.c
+@@ -265,7 +265,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
+  *
+  *	Audit @data of @size from @tty, if necessary.
+  */
+-void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
++void tty_audit_add_data(struct tty_struct *tty, const void *data,
+ 			size_t size, unsigned icanon)
+ {
+ 	struct tty_audit_buf *buf;
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index 21837f14a403..5a5c1ab5a375 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -1287,18 +1287,22 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
+ 	int	was_stopped = tty->stopped;
+ 
+ 	if (tty->ops->send_xchar) {
++		down_read(&tty->termios_rwsem);
+ 		tty->ops->send_xchar(tty, ch);
++		up_read(&tty->termios_rwsem);
+ 		return 0;
+ 	}
+ 
+ 	if (tty_write_lock(tty, 0) < 0)
+ 		return -ERESTARTSYS;
+ 
++	down_read(&tty->termios_rwsem);
+ 	if (was_stopped)
+ 		start_tty(tty);
+ 	tty->ops->write(tty, &ch, 1);
+ 	if (was_stopped)
+ 		stop_tty(tty);
++	up_read(&tty->termios_rwsem);
+ 	tty_write_unlock(tty);
+ 	return 0;
+ }
+diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
+index 8e53fe469664..7bbf86b94716 100644
+--- a/drivers/tty/tty_ioctl.c
++++ b/drivers/tty/tty_ioctl.c
+@@ -1144,16 +1144,12 @@ int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
+ 			spin_unlock_irq(&tty->flow_lock);
+ 			break;
+ 		case TCIOFF:
+-			down_read(&tty->termios_rwsem);
+ 			if (STOP_CHAR(tty) != __DISABLED_CHAR)
+ 				retval = tty_send_xchar(tty, STOP_CHAR(tty));
+-			up_read(&tty->termios_rwsem);
+ 			break;
+ 		case TCION:
+-			down_read(&tty->termios_rwsem);
+ 			if (START_CHAR(tty) != __DISABLED_CHAR)
+ 				retval = tty_send_xchar(tty, START_CHAR(tty));
+-			up_read(&tty->termios_rwsem);
+ 			break;
+ 		default:
+ 			return -EINVAL;
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index fa774323ebda..846ceb91ec14 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -68,6 +68,12 @@ struct ci_hdrc_imx_data {
+ 	struct imx_usbmisc_data *usbmisc_data;
+ 	bool supports_runtime_pm;
+ 	bool in_lpm;
++	/* SoC before i.mx6 (except imx23/imx28) needs three clks */
++	bool need_three_clks;
++	struct clk *clk_ipg;
++	struct clk *clk_ahb;
++	struct clk *clk_per;
++	/* --------------------------------- */
+ };
+ 
+ /* Common functions shared by usbmisc drivers */
+@@ -119,6 +125,102 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
+ }
+ 
+ /* End of common functions shared by usbmisc drivers*/
++static int imx_get_clks(struct device *dev)
++{
++	struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
++	int ret = 0;
++
++	data->clk_ipg = devm_clk_get(dev, "ipg");
++	if (IS_ERR(data->clk_ipg)) {
++		/* If the platform only needs one clocks */
++		data->clk = devm_clk_get(dev, NULL);
++		if (IS_ERR(data->clk)) {
++			ret = PTR_ERR(data->clk);
++			dev_err(dev,
++				"Failed to get clks, err=%ld,%ld\n",
++				PTR_ERR(data->clk), PTR_ERR(data->clk_ipg));
++			return ret;
++		}
++		return ret;
++	}
++
++	data->clk_ahb = devm_clk_get(dev, "ahb");
++	if (IS_ERR(data->clk_ahb)) {
++		ret = PTR_ERR(data->clk_ahb);
++		dev_err(dev,
++			"Failed to get ahb clock, err=%d\n", ret);
++		return ret;
++	}
++
++	data->clk_per = devm_clk_get(dev, "per");
++	if (IS_ERR(data->clk_per)) {
++		ret = PTR_ERR(data->clk_per);
++		dev_err(dev,
++			"Failed to get per clock, err=%d\n", ret);
++		return ret;
++	}
++
++	data->need_three_clks = true;
++	return ret;
++}
++
++static int imx_prepare_enable_clks(struct device *dev)
++{
++	struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
++	int ret = 0;
++
++	if (data->need_three_clks) {
++		ret = clk_prepare_enable(data->clk_ipg);
++		if (ret) {
++			dev_err(dev,
++				"Failed to prepare/enable ipg clk, err=%d\n",
++				ret);
++			return ret;
++		}
++
++		ret = clk_prepare_enable(data->clk_ahb);
++		if (ret) {
++			dev_err(dev,
++				"Failed to prepare/enable ahb clk, err=%d\n",
++				ret);
++			clk_disable_unprepare(data->clk_ipg);
++			return ret;
++		}
++
++		ret = clk_prepare_enable(data->clk_per);
++		if (ret) {
++			dev_err(dev,
++				"Failed to prepare/enable per clk, err=%d\n",
++				ret);
++			clk_disable_unprepare(data->clk_ahb);
++			clk_disable_unprepare(data->clk_ipg);
++			return ret;
++		}
++	} else {
++		ret = clk_prepare_enable(data->clk);
++		if (ret) {
++			dev_err(dev,
++				"Failed to prepare/enable clk, err=%d\n",
++				ret);
++			return ret;
++		}
++	}
++
++	return ret;
++}
++
++static void imx_disable_unprepare_clks(struct device *dev)
++{
++	struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
++
++	if (data->need_three_clks) {
++		clk_disable_unprepare(data->clk_per);
++		clk_disable_unprepare(data->clk_ahb);
++		clk_disable_unprepare(data->clk_ipg);
++	} else {
++		clk_disable_unprepare(data->clk);
++	}
++}
+ 
+ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ {
+@@ -137,23 +239,18 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 	if (!data)
+ 		return -ENOMEM;
+ 
++	platform_set_drvdata(pdev, data);
+ 	data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
+ 	if (IS_ERR(data->usbmisc_data))
+ 		return PTR_ERR(data->usbmisc_data);
+ 
+-	data->clk = devm_clk_get(&pdev->dev, NULL);
+-	if (IS_ERR(data->clk)) {
+-		dev_err(&pdev->dev,
+-			"Failed to get clock, err=%ld\n", PTR_ERR(data->clk));
+-		return PTR_ERR(data->clk);
+-	}
++	ret = imx_get_clks(&pdev->dev);
++	if (ret)
++		return ret;
+ 
+-	ret = clk_prepare_enable(data->clk);
+-	if (ret) {
+-		dev_err(&pdev->dev,
+-			"Failed to prepare or enable clock, err=%d\n", ret);
++	ret = imx_prepare_enable_clks(&pdev->dev);
++	if (ret)
+ 		return ret;
+-	}
+ 
+ 	data->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "fsl,usbphy", 0);
+ 	if (IS_ERR(data->phy)) {
+@@ -196,8 +293,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ 		goto disable_device;
+ 	}
+ 
+-	platform_set_drvdata(pdev, data);
+-
+ 	if (data->supports_runtime_pm) {
+ 		pm_runtime_set_active(&pdev->dev);
+ 		pm_runtime_enable(&pdev->dev);
+@@ -210,7 +305,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ disable_device:
+ 	ci_hdrc_remove_device(data->ci_pdev);
+ err_clk:
+-	clk_disable_unprepare(data->clk);
++	imx_disable_unprepare_clks(&pdev->dev);
+ 	return ret;
+ }
+ 
+@@ -224,7 +319,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
+ 		pm_runtime_put_noidle(&pdev->dev);
+ 	}
+ 	ci_hdrc_remove_device(data->ci_pdev);
+-	clk_disable_unprepare(data->clk);
++	imx_disable_unprepare_clks(&pdev->dev);
+ 
+ 	return 0;
+ }
+@@ -236,7 +331,7 @@ static int imx_controller_suspend(struct device *dev)
+ 
+ 	dev_dbg(dev, "at %s\n", __func__);
+ 
+-	clk_disable_unprepare(data->clk);
++	imx_disable_unprepare_clks(dev);
+ 	data->in_lpm = true;
+ 
+ 	return 0;
+@@ -254,7 +349,7 @@ static int imx_controller_resume(struct device *dev)
+ 		return 0;
+ 	}
+ 
+-	ret = clk_prepare_enable(data->clk);
++	ret = imx_prepare_enable_clks(dev);
+ 	if (ret)
+ 		return ret;
+ 
+@@ -269,7 +364,7 @@ static int imx_controller_resume(struct device *dev)
+ 	return 0;
+ 
+ clk_disable:
+-	clk_disable_unprepare(data->clk);
++	imx_disable_unprepare_clks(dev);
+ 	return ret;
+ }
+ 
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 6e53c24fa1cb..92937c14f818 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -1730,6 +1730,22 @@ static int ci_udc_start(struct usb_gadget *gadget,
+ 	return retval;
+ }
+ 
++static void ci_udc_stop_for_otg_fsm(struct ci_hdrc *ci)
++{
++	if (!ci_otg_is_fsm_mode(ci))
++		return;
++
++	mutex_lock(&ci->fsm.lock);
++	if (ci->fsm.otg->state == OTG_STATE_A_PERIPHERAL) {
++		ci->fsm.a_bidl_adis_tmout = 1;
++		ci_hdrc_otg_fsm_start(ci);
++	} else if (ci->fsm.otg->state == OTG_STATE_B_PERIPHERAL) {
++		ci->fsm.protocol = PROTO_UNDEF;
++		ci->fsm.otg->state = OTG_STATE_UNDEFINED;
++	}
++	mutex_unlock(&ci->fsm.lock);
++}
++
+ /**
+  * ci_udc_stop: unregister a gadget driver
+  */
+@@ -1754,6 +1770,7 @@ static int ci_udc_stop(struct usb_gadget *gadget)
+ 	ci->driver = NULL;
+ 	spin_unlock_irqrestore(&ci->lock, flags);
+ 
++	ci_udc_stop_for_otg_fsm(ci);
+ 	return 0;
+ }
+ 
+diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
+index 0924ee40a966..b9adc2ec49dd 100644
+--- a/drivers/usb/class/usblp.c
++++ b/drivers/usb/class/usblp.c
+@@ -869,11 +869,11 @@ static int usblp_wwait(struct usblp *usblp, int nonblock)
+ 
+ 	add_wait_queue(&usblp->wwait, &waita);
+ 	for (;;) {
+-		set_current_state(TASK_INTERRUPTIBLE);
+ 		if (mutex_lock_interruptible(&usblp->mut)) {
+ 			rc = -EINTR;
+ 			break;
+ 		}
++		set_current_state(TASK_INTERRUPTIBLE);
+ 		rc = usblp_wtest(usblp, nonblock);
+ 		mutex_unlock(&usblp->mut);
+ 		if (rc <= 0)
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index 2bbab3d86fff..8e9518fe7763 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -445,12 +445,18 @@ static int dwc3_core_init(struct dwc3 *dwc)
+ 
+ 	reg = dwc3_readl(dwc->regs, DWC3_GSNPSID);
+ 	/* This should read as U3 followed by revision number */
+-	if ((reg & DWC3_GSNPSID_MASK) != 0x55330000) {
++	if ((reg & DWC3_GSNPSID_MASK) == 0x55330000) {
++		/* Detected DWC_usb3 IP */
++		dwc->revision = reg;
++	} else if ((reg & DWC3_GSNPSID_MASK) == 0x33310000) {
++		/* Detected DWC_usb31 IP */
++		dwc->revision = dwc3_readl(dwc->regs, DWC3_VER_NUMBER);
++		dwc->revision |= DWC3_REVISION_IS_DWC31;
++	} else {
+ 		dev_err(dwc->dev, "this is not a DesignWare USB3 DRD Core\n");
+ 		ret = -ENODEV;
+ 		goto err0;
+ 	}
+-	dwc->revision = reg;
+ 
+ 	/*
+ 	 * Write Linux Version Code to our GUID register so it's easy to figure
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index c0eafa6fd403..173edd4ca20e 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -107,6 +107,9 @@
+ #define DWC3_GPRTBIMAP_FS0	0xc188
+ #define DWC3_GPRTBIMAP_FS1	0xc18c
+ 
++#define DWC3_VER_NUMBER		0xc1a0
++#define DWC3_VER_TYPE		0xc1a4
++
+ #define DWC3_GUSB2PHYCFG(n)	(0xc200 + (n * 0x04))
+ #define DWC3_GUSB2I2CCTL(n)	(0xc240 + (n * 0x04))
+ 
+@@ -752,6 +755,14 @@ struct dwc3 {
+ 	u32			num_event_buffers;
+ 	u32			u1u2;
+ 	u32			maximum_speed;
++
++	/*
++	 * All 3.1 IP version constants are greater than the 3.0 IP
++	 * version constants. This works for most version checks in
++	 * dwc3. However, in the future, this may not apply as
++	 * features may be developed on newer versions of the 3.0 IP
++	 * that are not in the 3.1 IP.
++	 */
+ 	u32			revision;
+ 
+ #define DWC3_REVISION_173A	0x5533173a
+@@ -774,6 +785,13 @@ struct dwc3 {
+ #define DWC3_REVISION_270A	0x5533270a
+ #define DWC3_REVISION_280A	0x5533280a
+ 
++/*
++ * NOTICE: we're using bit 31 as a "is usb 3.1" flag. This is really
++ * just so dwc31 revisions are always larger than dwc3.
++ */
++#define DWC3_REVISION_IS_DWC31		0x80000000
++#define DWC3_USB31_REVISION_110A	(0x3131302a | DWC3_REVISION_IS_USB31)
++
+ 	enum dwc3_ep0_next	ep0_next_event;
+ 	enum dwc3_ep0_state	ep0state;
+ 	enum dwc3_link_state	link_state;
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index b773fb53d6a7..830f020230c4 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -25,6 +25,8 @@
+ #include "platform_data.h"
+ 
+ #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3	0xabcd
++#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI 0xabce
++#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31 0xabcf
+ #define PCI_DEVICE_ID_INTEL_BYT		0x0f37
+ #define PCI_DEVICE_ID_INTEL_MRFLD	0x119e
+ #define PCI_DEVICE_ID_INTEL_BSW		0x22B7
+@@ -65,6 +67,21 @@ static int dwc3_pci_quirks(struct pci_dev *pdev)
+ 						sizeof(pdata));
+ 	}
+ 
++	if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS &&
++	    (pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 ||
++	     pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI ||
++	     pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31)) {
++
++		struct dwc3_platform_data pdata;
++
++		memset(&pdata, 0, sizeof(pdata));
++		pdata.usb3_lpm_capable = true;
++		pdata.has_lpm_erratum = true;
++
++		return platform_device_add_data(pci_get_drvdata(pdev), &pdata,
++						sizeof(pdata));
++	}
++
+ 	return 0;
+ }
+ 
+@@ -136,6 +153,14 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ 		PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
+ 				PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
+ 	},
++	{
++		PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
++				PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI),
++	},
++	{
++		PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
++				PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31),
++	},
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
+ 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 333a7c0078fc..6fbf461d523c 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1859,27 +1859,32 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
+ 	unsigned int		i;
+ 	int			ret;
+ 
+-	req = next_request(&dep->req_queued);
+-	if (!req) {
+-		WARN_ON_ONCE(1);
+-		return 1;
+-	}
+-	i = 0;
+ 	do {
+-		slot = req->start_slot + i;
+-		if ((slot == DWC3_TRB_NUM - 1) &&
++		req = next_request(&dep->req_queued);
++		if (!req) {
++			WARN_ON_ONCE(1);
++			return 1;
++		}
++		i = 0;
++		do {
++			slot = req->start_slot + i;
++			if ((slot == DWC3_TRB_NUM - 1) &&
+ 				usb_endpoint_xfer_isoc(dep->endpoint.desc))
+-			slot++;
+-		slot %= DWC3_TRB_NUM;
+-		trb = &dep->trb_pool[slot];
++				slot++;
++			slot %= DWC3_TRB_NUM;
++			trb = &dep->trb_pool[slot];
++
++			ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
++					event, status);
++			if (ret)
++				break;
++		} while (++i < req->request.num_mapped_sgs);
++
++		dwc3_gadget_giveback(dep, req, status);
+ 
+-		ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
+-				event, status);
+ 		if (ret)
+ 			break;
+-	} while (++i < req->request.num_mapped_sgs);
+-
+-	dwc3_gadget_giveback(dep, req, status);
++	} while (1);
+ 
+ 	if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
+ 			list_empty(&dep->req_queued)) {
+@@ -2709,12 +2714,34 @@ int dwc3_gadget_init(struct dwc3 *dwc)
+ 	}
+ 
+ 	dwc->gadget.ops			= &dwc3_gadget_ops;
+-	dwc->gadget.max_speed		= USB_SPEED_SUPER;
+ 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
+ 	dwc->gadget.sg_supported	= true;
+ 	dwc->gadget.name		= "dwc3-gadget";
+ 
+ 	/*
++	 * FIXME We might be setting max_speed to <SUPER, however versions
++	 * <2.20a of dwc3 have an issue with metastability (documented
++	 * elsewhere in this driver) which tells us we can't set max speed to
++	 * anything lower than SUPER.
++	 *
++	 * Because gadget.max_speed is only used by composite.c and function
++	 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
++	 * to happen so we avoid sending SuperSpeed Capability descriptor
++	 * together with our BOS descriptor as that could confuse host into
++	 * thinking we can handle super speed.
++	 *
++	 * Note that, in fact, we won't even support GetBOS requests when speed
++	 * is less than super speed because we don't have means, yet, to tell
++	 * composite.c that we are USB 2.0 + LPM ECN.
++	 */
++	if (dwc->revision < DWC3_REVISION_220A)
++		dwc3_trace(trace_dwc3_gadget,
++				"Changing max_speed on rev %08x\n",
++				dwc->revision);
++
++	dwc->gadget.max_speed		= dwc->maximum_speed;
++
++	/*
+ 	 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
+ 	 * on ep out.
+ 	 */
+diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
+index 351d48550c33..d6ca3697d3c8 100644
+--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
++++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
+@@ -1634,7 +1634,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid)
+ 	spin_lock(&udc->lock);
+ 
+ 	int_enb = usba_int_enb_get(udc);
+-	status = usba_readl(udc, INT_STA) & int_enb;
++	status = usba_readl(udc, INT_STA) & (int_enb | USBA_HIGH_SPEED);
+ 	DBG(DBG_INT, "irq, status=%#08x\n", status);
+ 
+ 	if (status & USBA_DET_SUSPEND) {
+diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
+index bfcbb9aa8816..ee8d5faa0194 100644
+--- a/drivers/usb/host/ehci-orion.c
++++ b/drivers/usb/host/ehci-orion.c
+@@ -224,7 +224,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
+ 	priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
+ 	if (IS_ERR(priv->phy)) {
+ 		err = PTR_ERR(priv->phy);
+-		goto err_phy_get;
++		if (err != -ENOSYS)
++			goto err_phy_get;
+ 	} else {
+ 		err = phy_init(priv->phy);
+ 		if (err)
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 26f62b2b33f8..1e6d7579709e 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -175,6 +175,16 @@ int xhci_reset(struct xhci_hcd *xhci)
+ 	command |= CMD_RESET;
+ 	writel(command, &xhci->op_regs->command);
+ 
++	/* Existing Intel xHCI controllers require a delay of 1 mS,
++	 * after setting the CMD_RESET bit, and before accessing any
++	 * HC registers. This allows the HC to complete the
++	 * reset operation and be ready for HC register access.
++	 * Without this delay, the subsequent HC register access,
++	 * may result in a system hang very rarely.
++	 */
++	if (xhci->quirks & XHCI_INTEL_HOST)
++		udelay(1000);
++
+ 	ret = xhci_handshake(&xhci->op_regs->command,
+ 			CMD_RESET, 0, 10 * 1000 * 1000);
+ 	if (ret)
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 6dca3d794ced..9f65d8477372 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -132,7 +132,7 @@ static inline struct musb *dev_to_musb(struct device *dev)
+ /*-------------------------------------------------------------------------*/
+ 
+ #ifndef CONFIG_BLACKFIN
+-static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
++static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
+ {
+ 	void __iomem *addr = phy->io_priv;
+ 	int	i = 0;
+@@ -151,7 +151,7 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
+ 	 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
+ 	 */
+ 
+-	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
++	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
+ 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
+ 			MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
+ 
+@@ -176,7 +176,7 @@ out:
+ 	return ret;
+ }
+ 
+-static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
++static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
+ {
+ 	void __iomem *addr = phy->io_priv;
+ 	int	i = 0;
+@@ -191,8 +191,8 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
+ 	power &= ~MUSB_POWER_SUSPENDM;
+ 	musb_writeb(addr, MUSB_POWER, power);
+ 
+-	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
+-	musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data);
++	musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
++	musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
+ 	musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
+ 
+ 	while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 7c8eb4c4c175..4021846139c9 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -162,6 +162,7 @@ static void option_instat_callback(struct urb *urb);
+ #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED	0x9001
+ #define NOVATELWIRELESS_PRODUCT_E362		0x9010
+ #define NOVATELWIRELESS_PRODUCT_E371		0x9011
++#define NOVATELWIRELESS_PRODUCT_U620L		0x9022
+ #define NOVATELWIRELESS_PRODUCT_G2		0xA010
+ #define NOVATELWIRELESS_PRODUCT_MC551		0xB001
+ 
+@@ -357,6 +358,7 @@ static void option_instat_callback(struct urb *urb);
+ /* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
+  * It seems to contain a Qualcomm QSC6240/6290 chipset            */
+ #define FOUR_G_SYSTEMS_PRODUCT_W14		0x9603
++#define FOUR_G_SYSTEMS_PRODUCT_W100		0x9b01
+ 
+ /* iBall 3.5G connect wireless modem */
+ #define IBALL_3_5G_CONNECT			0x9605
+@@ -522,6 +524,11 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
+ 	.sendsetup = BIT(0) | BIT(1),
+ };
+ 
++static const struct option_blacklist_info four_g_w100_blacklist = {
++	.sendsetup = BIT(1) | BIT(2),
++	.reserved = BIT(3),
++};
++
+ static const struct option_blacklist_info alcatel_x200_blacklist = {
+ 	.sendsetup = BIT(0) | BIT(1),
+ 	.reserved = BIT(4),
+@@ -1060,6 +1067,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E371, 0xff, 0xff, 0xff) },
++	{ USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_U620L, 0xff, 0x00, 0x00) },
+ 
+ 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
+ 	{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
+@@ -1653,6 +1661,9 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+   	  .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+   	},
++	{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
++	  .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
++	},
+ 	{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ 	{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index f49d262e926b..514fa91cf74e 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -22,6 +22,8 @@
+ #define DRIVER_AUTHOR "Qualcomm Inc"
+ #define DRIVER_DESC "Qualcomm USB Serial driver"
+ 
++#define QUECTEL_EC20_PID	0x9215
++
+ /* standard device layouts supported by this driver */
+ enum qcserial_layouts {
+ 	QCSERIAL_G2K = 0,	/* Gobi 2000 */
+@@ -169,6 +171,38 @@ static const struct usb_device_id id_table[] = {
+ };
+ MODULE_DEVICE_TABLE(usb, id_table);
+ 
++static int handle_quectel_ec20(struct device *dev, int ifnum)
++{
++	int altsetting = 0;
++
++	/*
++	 * Quectel EC20 Mini PCIe LTE module layout:
++	 * 0: DM/DIAG (use libqcdm from ModemManager for communication)
++	 * 1: NMEA
++	 * 2: AT-capable modem port
++	 * 3: Modem interface
++	 * 4: NDIS
++	 */
++	switch (ifnum) {
++	case 0:
++		dev_dbg(dev, "Quectel EC20 DM/DIAG interface found\n");
++		break;
++	case 1:
++		dev_dbg(dev, "Quectel EC20 NMEA GPS interface found\n");
++		break;
++	case 2:
++	case 3:
++		dev_dbg(dev, "Quectel EC20 Modem port found\n");
++		break;
++	case 4:
++		/* Don't claim the QMI/net interface */
++		altsetting = -1;
++		break;
++	}
++
++	return altsetting;
++}
++
+ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ {
+ 	struct usb_host_interface *intf = serial->interface->cur_altsetting;
+@@ -178,6 +212,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ 	__u8 ifnum;
+ 	int altsetting = -1;
+ 
++	/* we only support vendor specific functions */
++	if (intf->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
++		goto done;
++
+ 	nintf = serial->dev->actconfig->desc.bNumInterfaces;
+ 	dev_dbg(dev, "Num Interfaces = %d\n", nintf);
+ 	ifnum = intf->desc.bInterfaceNumber;
+@@ -237,6 +275,12 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ 			altsetting = -1;
+ 		break;
+ 	case QCSERIAL_G2K:
++		/* handle non-standard layouts */
++		if (nintf == 5 && id->idProduct == QUECTEL_EC20_PID) {
++			altsetting = handle_quectel_ec20(dev, ifnum);
++			goto done;
++		}
++
+ 		/*
+ 		 * Gobi 2K+ USB layout:
+ 		 * 0: QMI/net
+@@ -297,29 +341,39 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
+ 		break;
+ 	case QCSERIAL_HWI:
+ 		/*
+-		 * Huawei layout:
+-		 * 0: AT-capable modem port
+-		 * 1: DM/DIAG
+-		 * 2: AT-capable modem port
+-		 * 3: CCID-compatible PCSC interface
+-		 * 4: QMI/net
+-		 * 5: NMEA
++		 * Huawei devices map functions by subclass + protocol
++		 * instead of interface numbers. The protocol identify
++		 * a specific function, while the subclass indicate a
++		 * specific firmware source
++		 *
++		 * This is a blacklist of functions known to be
++		 * non-serial.  The rest are assumed to be serial and
++		 * will be handled by this driver
+ 		 */
+-		switch (ifnum) {
+-		case 0:
+-		case 2:
+-			dev_dbg(dev, "Modem port found\n");
+-			break;
+-		case 1:
+-			dev_dbg(dev, "DM/DIAG interface found\n");
+-			break;
+-		case 5:
+-			dev_dbg(dev, "NMEA GPS interface found\n");
+-			break;
+-		default:
+-			/* don't claim any unsupported interface */
++		switch (intf->desc.bInterfaceProtocol) {
++			/* QMI combined (qmi_wwan) */
++		case 0x07:
++		case 0x37:
++		case 0x67:
++			/* QMI data (qmi_wwan) */
++		case 0x08:
++		case 0x38:
++		case 0x68:
++			/* QMI control (qmi_wwan) */
++		case 0x09:
++		case 0x39:
++		case 0x69:
++			/* NCM like (huawei_cdc_ncm) */
++		case 0x16:
++		case 0x46:
++		case 0x76:
+ 			altsetting = -1;
+ 			break;
++		default:
++			dev_dbg(dev, "Huawei type serial port found (%02x/%02x/%02x)\n",
++				intf->desc.bInterfaceClass,
++				intf->desc.bInterfaceSubClass,
++				intf->desc.bInterfaceProtocol);
+ 		}
+ 		break;
+ 	default:
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
+index e9da41d9fe7f..2694df2f4559 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -159,6 +159,7 @@ static const struct usb_device_id ti_id_table_3410[] = {
+ 	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
+ 	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
+ 	{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
++	{ USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
+ 	{ }	/* terminator */
+ };
+ 
+@@ -191,6 +192,7 @@ static const struct usb_device_id ti_id_table_combined[] = {
+ 	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+ 	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
+ 	{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
++	{ USB_DEVICE(HONEYWELL_VENDOR_ID, HONEYWELL_HGI80_PRODUCT_ID) },
+ 	{ }	/* terminator */
+ };
+ 
+diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
+index 4a2423e84d55..98f35c656c02 100644
+--- a/drivers/usb/serial/ti_usb_3410_5052.h
++++ b/drivers/usb/serial/ti_usb_3410_5052.h
+@@ -56,6 +56,10 @@
+ #define ABBOTT_PRODUCT_ID		ABBOTT_STEREO_PLUG_ID
+ #define ABBOTT_STRIP_PORT_ID		0x3420
+ 
++/* Honeywell vendor and product IDs */
++#define HONEYWELL_VENDOR_ID		0x10ac
++#define HONEYWELL_HGI80_PRODUCT_ID	0x0102  /* Honeywell HGI80 */
++
+ /* Commands */
+ #define TI_GET_VERSION			0x01
+ #define TI_GET_PORT_STATUS		0x02
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index fd02a9ebfc30..70f9c4cba31f 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -364,7 +364,7 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 			struct pid *pid, struct task_struct *task, int whole)
+ {
+-	unsigned long vsize, eip, esp, wchan = ~0UL;
++	unsigned long vsize, eip, esp, wchan = 0;
+ 	int priority, nice;
+ 	int tty_pgrp = -1, tty_nr = 0;
+ 	sigset_t sigign, sigcatch;
+@@ -496,7 +496,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ 	seq_put_decimal_ull(m, ' ', task->blocked.sig[0] & 0x7fffffffUL);
+ 	seq_put_decimal_ull(m, ' ', sigign.sig[0] & 0x7fffffffUL);
+ 	seq_put_decimal_ull(m, ' ', sigcatch.sig[0] & 0x7fffffffUL);
+-	seq_put_decimal_ull(m, ' ', wchan);
++
++	/*
++	 * We used to output the absolute kernel address, but that's an
++	 * information leak - so instead we show a 0/1 flag here, to signal
++	 * to user-space whether there's a wchan field in /proc/PID/wchan.
++	 *
++	 * This works with older implementations of procps as well.
++	 */
++	if (wchan)
++		seq_puts(m, " 1");
++	else
++		seq_puts(m, " 0");
++
+ 	seq_put_decimal_ull(m, ' ', 0);
+ 	seq_put_decimal_ull(m, ' ', 0);
+ 	seq_put_decimal_ll(m, ' ', task->exit_signal);
+diff --git a/fs/proc/base.c b/fs/proc/base.c
+index 093ca14f5701..fcdeb1eb3921 100644
+--- a/fs/proc/base.c
++++ b/fs/proc/base.c
+@@ -238,13 +238,10 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
+ 
+ 	wchan = get_wchan(task);
+ 
+-	if (lookup_symbol_name(wchan, symname) < 0) {
+-		if (!ptrace_may_access(task, PTRACE_MODE_READ))
+-			return 0;
+-		seq_printf(m, "%lu", wchan);
+-	} else {
++	if (wchan && ptrace_may_access(task, PTRACE_MODE_READ) && !lookup_symbol_name(wchan, symname))
+ 		seq_printf(m, "%s", symname);
+-	}
++	else
++		seq_putc(m, '0');
+ 
+ 	return 0;
+ }
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index ad45054309a0..29a57a5b7cee 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -423,6 +423,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+ 	     (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
+ 	     idx++)
+ 
++static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
++{
++	struct kvm_vcpu *vcpu;
++	int i;
++
++	kvm_for_each_vcpu(i, vcpu, kvm)
++		if (vcpu->vcpu_id == id)
++			return vcpu;
++	return NULL;
++}
++
+ #define kvm_for_each_memslot(memslot, slots)	\
+ 	for (memslot = &slots->memslots[0];	\
+ 	      memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
+diff --git a/include/linux/tty.h b/include/linux/tty.h
+index d76631f615c2..9580c09afdbe 100644
+--- a/include/linux/tty.h
++++ b/include/linux/tty.h
+@@ -605,7 +605,7 @@ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+ 
+ /* tty_audit.c */
+ #ifdef CONFIG_AUDIT
+-extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
++extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
+ 			       size_t size, unsigned icanon);
+ extern void tty_audit_exit(void);
+ extern void tty_audit_fork(struct signal_struct *sig);
+@@ -613,8 +613,8 @@ extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
+ extern void tty_audit_push(struct tty_struct *tty);
+ extern int tty_audit_push_current(void);
+ #else
+-static inline void tty_audit_add_data(struct tty_struct *tty,
+-		unsigned char *data, size_t size, unsigned icanon)
++static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
++				      size_t size, unsigned icanon)
+ {
+ }
+ static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
+diff --git a/include/net/inet_common.h b/include/net/inet_common.h
+index 4a92423eefa5..82669da2540c 100644
+--- a/include/net/inet_common.h
++++ b/include/net/inet_common.h
+@@ -41,7 +41,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
+ 
+ static inline void inet_ctl_sock_destroy(struct sock *sk)
+ {
+-	sk_release_kernel(sk);
++	if (sk)
++		sk_release_kernel(sk);
+ }
+ 
+ #endif
+diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
+index 9070dfd6b4ad..4a0015e16d4f 100644
+--- a/net/bluetooth/hidp/core.c
++++ b/net/bluetooth/hidp/core.c
+@@ -401,6 +401,20 @@ static void hidp_idle_timeout(unsigned long arg)
+ {
+ 	struct hidp_session *session = (struct hidp_session *) arg;
+ 
++	/* The HIDP user-space API only contains calls to add and remove
++	 * devices. There is no way to forward events of any kind. Therefore,
++	 * we have to forcefully disconnect a device on idle-timeouts. This is
++	 * unfortunate and weird API design, but it is spec-compliant and
++	 * required for backwards-compatibility. Hence, on idle-timeout, we
++	 * signal driver-detach events, so poll() will be woken up with an
++	 * error-condition on both sockets.
++	 */
++
++	session->intr_sock->sk->sk_err = EUNATCH;
++	session->ctrl_sock->sk->sk_err = EUNATCH;
++	wake_up_interruptible(sk_sleep(session->intr_sock->sk));
++	wake_up_interruptible(sk_sleep(session->ctrl_sock->sk));
++
+ 	hidp_session_terminate(session);
+ }
+ 
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 7fd87e7135b5..58d60cbbc33f 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -2962,6 +2962,11 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 	} else {
+ 		u8 addr_type;
+ 
++		if (cp->addr.type == BDADDR_LE_PUBLIC)
++			addr_type = ADDR_LE_DEV_PUBLIC;
++		else
++			addr_type = ADDR_LE_DEV_RANDOM;
++
+ 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
+ 					       &cp->addr.bdaddr);
+ 		if (conn) {
+@@ -2977,13 +2982,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
+ 			 */
+ 			if (!cp->disconnect)
+ 				conn = NULL;
++		} else {
++			hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
+ 		}
+ 
+-		if (cp->addr.type == BDADDR_LE_PUBLIC)
+-			addr_type = ADDR_LE_DEV_PUBLIC;
+-		else
+-			addr_type = ADDR_LE_DEV_RANDOM;
+-
+ 		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
+ 
+ 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+diff --git a/net/core/dst.c b/net/core/dst.c
+index e956ce6d1378..f8db4032d45a 100644
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -285,7 +285,7 @@ void dst_release(struct dst_entry *dst)
+ 
+ 		newrefcnt = atomic_dec_return(&dst->__refcnt);
+ 		WARN_ON(newrefcnt < 0);
+-		if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
++		if (!newrefcnt && unlikely(dst->flags & DST_NOCACHE))
+ 			call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ 	}
+ }
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 0ca933db1b41..93b802984819 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1547,7 +1547,7 @@ static struct key_vector *leaf_walk_rcu(struct key_vector **tn, t_key key)
+ 	do {
+ 		/* record parent and next child index */
+ 		pn = n;
+-		cindex = key ? get_index(key, pn) : 0;
++		cindex = (key > pn->key) ? get_index(key, pn) : 0;
+ 
+ 		if (cindex >> pn->bits)
+ 			break;
+diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
+index 5aa46d4b44ef..5a8ee3282550 100644
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -36,7 +36,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
+ 				  SKB_GSO_TCP_ECN |
+ 				  SKB_GSO_GRE |
+ 				  SKB_GSO_GRE_CSUM |
+-				  SKB_GSO_IPIP)))
++				  SKB_GSO_IPIP |
++				  SKB_GSO_SIT)))
+ 		goto out;
+ 
+ 	if (!skb->encapsulation)
+diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
+index 3a2c0162c3ba..df28693f32e1 100644
+--- a/net/ipv4/ipmr.c
++++ b/net/ipv4/ipmr.c
+@@ -1683,8 +1683,8 @@ static inline int ipmr_forward_finish(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct ip_options *opt = &(IPCB(skb)->opt);
+ 
+-	IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
+-	IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
++	IP_INC_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
++	IP_ADD_STATS(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
+ 
+ 	if (unlikely(opt->optlen))
+ 		ip_forward_options(skb);
+@@ -1746,7 +1746,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
+ 		 * to blackhole.
+ 		 */
+ 
+-		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
++		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
+ 		ip_rt_put(rt);
+ 		goto out_free;
+ 	}
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index c3852a7ff3c7..f0e829735968 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -45,10 +45,10 @@ static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+ /* Update system visible IP port range */
+ static void set_local_port_range(struct net *net, int range[2])
+ {
+-	write_seqlock(&net->ipv4.ip_local_ports.lock);
++	write_seqlock_bh(&net->ipv4.ip_local_ports.lock);
+ 	net->ipv4.ip_local_ports.range[0] = range[0];
+ 	net->ipv4.ip_local_ports.range[1] = range[1];
+-	write_sequnlock(&net->ipv4.ip_local_ports.lock);
++	write_sequnlock_bh(&net->ipv4.ip_local_ports.lock);
+ }
+ 
+ /* Validate changes from /proc interface. */
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 37b70e82bff8..fd3aa6148dd1 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -411,6 +411,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
+ 	if (err) {
+ 		ipv6_mc_destroy_dev(ndev);
+ 		del_timer(&ndev->regen_timer);
++		snmp6_unregister_dev(ndev);
+ 		goto err_release;
+ 	}
+ 	/* protected by rtnl_lock */
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index ac35a28599be..85c4b2fff504 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1394,34 +1394,20 @@ static int ipip6_tunnel_init(struct net_device *dev)
+ 	return 0;
+ }
+ 
+-static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
++static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
+ {
+ 	struct ip_tunnel *tunnel = netdev_priv(dev);
+ 	struct iphdr *iph = &tunnel->parms.iph;
+ 	struct net *net = dev_net(dev);
+ 	struct sit_net *sitn = net_generic(net, sit_net_id);
+ 
+-	tunnel->dev = dev;
+-	tunnel->net = dev_net(dev);
+-
+ 	iph->version		= 4;
+ 	iph->protocol		= IPPROTO_IPV6;
+ 	iph->ihl		= 5;
+ 	iph->ttl		= 64;
+ 
+-	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+-	if (!dev->tstats)
+-		return -ENOMEM;
+-
+-	tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+-	if (!tunnel->dst_cache) {
+-		free_percpu(dev->tstats);
+-		return -ENOMEM;
+-	}
+-
+ 	dev_hold(dev);
+ 	rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
+-	return 0;
+ }
+ 
+ static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[])
+@@ -1831,23 +1817,19 @@ static int __net_init sit_init_net(struct net *net)
+ 	 */
+ 	sitn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
+ 
+-	err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
+-	if (err)
+-		goto err_dev_free;
+-
+-	ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
+ 	err = register_netdev(sitn->fb_tunnel_dev);
+ 	if (err)
+ 		goto err_reg_dev;
+ 
++	ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
++	ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
++
+ 	t = netdev_priv(sitn->fb_tunnel_dev);
+ 
+ 	strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
+ 	return 0;
+ 
+ err_reg_dev:
+-	dev_put(sitn->fb_tunnel_dev);
+-err_dev_free:
+ 	ipip6_dev_free(sitn->fb_tunnel_dev);
+ err_alloc_dev:
+ 	return err;
+diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
+index a26c401ef4a4..43964594aa12 100644
+--- a/net/irda/irlmp.c
++++ b/net/irda/irlmp.c
+@@ -1839,7 +1839,7 @@ static void *irlmp_seq_hb_idx(struct irlmp_iter_state *iter, loff_t *off)
+ 	for (element = hashbin_get_first(iter->hashbin);
+ 	     element != NULL;
+ 	     element = hashbin_get_next(iter->hashbin)) {
+-		if (!off || *off-- == 0) {
++		if (!off || (*off)-- == 0) {
+ 			/* NB: hashbin left locked */
+ 			return element;
+ 		}
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 26053bf2faa8..a93906103f8b 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -3340,7 +3340,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
+ 
+ 	if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
+ 	    ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
+-		int sig = ifmgd->ave_beacon_signal;
++		int sig = ifmgd->ave_beacon_signal / 16;
+ 		int last_sig = ifmgd->last_ave_beacon_signal;
+ 		struct ieee80211_event event = {
+ 			.type = RSSI_EVENT,
+@@ -4946,6 +4946,25 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
+ 		return 0;
+ 	}
+ 
++	if (ifmgd->assoc_data &&
++	    ether_addr_equal(ifmgd->assoc_data->bss->bssid, req->bssid)) {
++		sdata_info(sdata,
++			   "aborting association with %pM by local choice (Reason: %u=%s)\n",
++			   req->bssid, req->reason_code,
++			   ieee80211_get_reason_code_string(req->reason_code));
++
++		drv_mgd_prepare_tx(sdata->local, sdata);
++		ieee80211_send_deauth_disassoc(sdata, req->bssid,
++					       IEEE80211_STYPE_DEAUTH,
++					       req->reason_code, tx,
++					       frame_buf);
++		ieee80211_destroy_assoc_data(sdata, false);
++		ieee80211_report_disconnect(sdata, frame_buf,
++					    sizeof(frame_buf), true,
++					    req->reason_code);
++		return 0;
++	}
++
+ 	if (ifmgd->associated &&
+ 	    ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
+ 		sdata_info(sdata,
+diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
+index 4c2e7690226a..ab19f3c2104d 100644
+--- a/net/mac80211/trace.h
++++ b/net/mac80211/trace.h
+@@ -33,11 +33,11 @@
+ 			__field(u32, chan_width)					\
+ 			__field(u32, center_freq1)					\
+ 			__field(u32, center_freq2)
+-#define CHANDEF_ASSIGN(c)								\
+-			__entry->control_freq = (c)->chan ? (c)->chan->center_freq : 0;	\
+-			__entry->chan_width = (c)->width;				\
+-			__entry->center_freq1 = (c)->center_freq1;			\
+-			__entry->center_freq2 = (c)->center_freq2;
++#define CHANDEF_ASSIGN(c)							\
++			__entry->control_freq = (c) ? ((c)->chan ? (c)->chan->center_freq : 0) : 0;	\
++			__entry->chan_width = (c) ? (c)->width : 0;			\
++			__entry->center_freq1 = (c) ? (c)->center_freq1 : 0;		\
++			__entry->center_freq2 = (c) ? (c)->center_freq2 : 0;
+ #define CHANDEF_PR_FMT	" control:%d MHz width:%d center: %d/%d MHz"
+ #define CHANDEF_PR_ARG	__entry->control_freq, __entry->chan_width,			\
+ 			__entry->center_freq1, __entry->center_freq2
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index b864ebc6ab8f..67fec9ba97fc 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -2984,6 +2984,13 @@ ieee80211_extend_noa_desc(struct ieee80211_noa_data *data, u32 tsf, int i)
+ 	if (end > 0)
+ 		return false;
+ 
++	/* One shot NOA  */
++	if (data->count[i] == 1)
++		return false;
++
++	if (data->desc[i].interval == 0)
++		return false;
++
+ 	/* End time is in the past, check for repetitions */
+ 	skip = DIV_ROUND_UP(-end, data->desc[i].interval);
+ 	if (data->count[i] < 255) {
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index d139c43ac6e5..0d6038c87bef 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1118,6 +1118,7 @@ static int netlink_insert(struct sock *sk, u32 portid)
+ 		if (err == -EEXIST)
+ 			err = -EADDRINUSE;
+ 		sock_put(sk);
++		goto err;
+ 	}
+ 
+ 	/* We need to ensure that the socket is hashed and visible. */
+diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
+index b33fed6d1584..91ecbd1c2ec1 100644
+--- a/net/nfc/nci/hci.c
++++ b/net/nfc/nci/hci.c
+@@ -101,6 +101,20 @@ struct nci_hcp_packet {
+ #define NCI_HCP_MSG_GET_CMD(header)  (header & 0x3f)
+ #define NCI_HCP_MSG_GET_PIPE(header) (header & 0x7f)
+ 
++static int nci_hci_result_to_errno(u8 result)
++{
++	switch (result) {
++	case NCI_HCI_ANY_OK:
++		return 0;
++	case NCI_HCI_ANY_E_REG_PAR_UNKNOWN:
++		return -EOPNOTSUPP;
++	case NCI_HCI_ANY_E_TIMEOUT:
++		return -ETIME;
++	default:
++		return -1;
++	}
++}
++
+ /* HCI core */
+ static void nci_hci_reset_pipes(struct nci_hci_dev *hdev)
+ {
+@@ -146,18 +160,18 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
+ 	if (!conn_info)
+ 		return -EPROTO;
+ 
+-	skb = nci_skb_alloc(ndev, 2 + conn_info->max_pkt_payload_len +
++	i = 0;
++	skb = nci_skb_alloc(ndev, conn_info->max_pkt_payload_len +
+ 			    NCI_DATA_HDR_SIZE, GFP_KERNEL);
+ 	if (!skb)
+ 		return -ENOMEM;
+ 
+-	skb_reserve(skb, 2 + NCI_DATA_HDR_SIZE);
++	skb_reserve(skb, NCI_DATA_HDR_SIZE + 2);
+ 	*skb_push(skb, 1) = data_type;
+ 
+-	i = 0;
+-	len = conn_info->max_pkt_payload_len;
+-
+ 	do {
++		len = conn_info->max_pkt_payload_len;
++
+ 		/* If last packet add NCI_HFP_NO_CHAINING */
+ 		if (i + conn_info->max_pkt_payload_len -
+ 		    (skb->len + 1) >= data_len) {
+@@ -177,9 +191,15 @@ static int nci_hci_send_data(struct nci_dev *ndev, u8 pipe,
+ 			return r;
+ 
+ 		i += len;
++
+ 		if (i < data_len) {
+-			skb_trim(skb, 0);
+-			skb_pull(skb, len);
++			skb = nci_skb_alloc(ndev,
++					    conn_info->max_pkt_payload_len +
++					    NCI_DATA_HDR_SIZE, GFP_KERNEL);
++			if (!skb)
++				return -ENOMEM;
++
++			skb_reserve(skb, NCI_DATA_HDR_SIZE + 1);
+ 		}
+ 	} while (i < data_len);
+ 
+@@ -212,7 +232,8 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
+ 		     const u8 *param, size_t param_len,
+ 		     struct sk_buff **skb)
+ {
+-	struct nci_conn_info    *conn_info;
++	struct nci_hcp_message *message;
++	struct nci_conn_info   *conn_info;
+ 	struct nci_data data;
+ 	int r;
+ 	u8 pipe = ndev->hci_dev->gate2pipe[gate];
+@@ -232,9 +253,15 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
+ 
+ 	r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
+ 			msecs_to_jiffies(NCI_DATA_TIMEOUT));
+-
+-	if (r == NCI_STATUS_OK && skb)
+-		*skb = conn_info->rx_skb;
++	if (r == NCI_STATUS_OK) {
++		message = (struct nci_hcp_message *)conn_info->rx_skb->data;
++		r = nci_hci_result_to_errno(
++			NCI_HCP_MSG_GET_CMD(message->header));
++		skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
++
++		if (!r && skb)
++			*skb = conn_info->rx_skb;
++	}
+ 
+ 	return r;
+ }
+@@ -328,9 +355,6 @@ static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe,
+ 	struct nci_conn_info    *conn_info;
+ 	u8 status = result;
+ 
+-	if (result != NCI_HCI_ANY_OK)
+-		goto exit;
+-
+ 	conn_info = ndev->hci_dev->conn_info;
+ 	if (!conn_info) {
+ 		status = NCI_STATUS_REJECTED;
+@@ -340,7 +364,7 @@ static void nci_hci_resp_received(struct nci_dev *ndev, u8 pipe,
+ 	conn_info->rx_skb = skb;
+ 
+ exit:
+-	nci_req_complete(ndev, status);
++	nci_req_complete(ndev, NCI_STATUS_OK);
+ }
+ 
+ /* Receive hcp message for pipe, with type and cmd.
+@@ -378,7 +402,7 @@ static void nci_hci_msg_rx_work(struct work_struct *work)
+ 	u8 pipe, type, instruction;
+ 
+ 	while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
+-		pipe = skb->data[0];
++		pipe = NCI_HCP_MSG_GET_PIPE(skb->data[0]);
+ 		skb_pull(skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
+ 		message = (struct nci_hcp_message *)skb->data;
+ 		type = NCI_HCP_MSG_GET_TYPE(message->header);
+@@ -395,7 +419,7 @@ void nci_hci_data_received_cb(void *context,
+ {
+ 	struct nci_dev *ndev = (struct nci_dev *)context;
+ 	struct nci_hcp_packet *packet;
+-	u8 pipe, type, instruction;
++	u8 pipe, type;
+ 	struct sk_buff *hcp_skb;
+ 	struct sk_buff *frag_skb;
+ 	int msg_len;
+@@ -415,7 +439,7 @@ void nci_hci_data_received_cb(void *context,
+ 
+ 	/* it's the last fragment. Does it need re-aggregation? */
+ 	if (skb_queue_len(&ndev->hci_dev->rx_hcp_frags)) {
+-		pipe = packet->header & NCI_HCI_FRAGMENT;
++		pipe = NCI_HCP_MSG_GET_PIPE(packet->header);
+ 		skb_queue_tail(&ndev->hci_dev->rx_hcp_frags, skb);
+ 
+ 		msg_len = 0;
+@@ -434,7 +458,7 @@ void nci_hci_data_received_cb(void *context,
+ 		*skb_put(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN) = pipe;
+ 
+ 		skb_queue_walk(&ndev->hci_dev->rx_hcp_frags, frag_skb) {
+-		       msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
++			msg_len = frag_skb->len - NCI_HCI_HCP_PACKET_HEADER_LEN;
+ 			memcpy(skb_put(hcp_skb, msg_len), frag_skb->data +
+ 			       NCI_HCI_HCP_PACKET_HEADER_LEN, msg_len);
+ 		}
+@@ -452,11 +476,10 @@ void nci_hci_data_received_cb(void *context,
+ 	packet = (struct nci_hcp_packet *)hcp_skb->data;
+ 	type = NCI_HCP_MSG_GET_TYPE(packet->message.header);
+ 	if (type == NCI_HCI_HCP_RESPONSE) {
+-		pipe = packet->header;
+-		instruction = NCI_HCP_MSG_GET_CMD(packet->message.header);
+-		skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN +
+-			 NCI_HCI_HCP_MESSAGE_HEADER_LEN);
+-		nci_hci_hcp_message_rx(ndev, pipe, type, instruction, hcp_skb);
++		pipe = NCI_HCP_MSG_GET_PIPE(packet->header);
++		skb_pull(hcp_skb, NCI_HCI_HCP_PACKET_HEADER_LEN);
++		nci_hci_hcp_message_rx(ndev, pipe, type,
++				       NCI_STATUS_OK, hcp_skb);
+ 	} else {
+ 		skb_queue_tail(&ndev->hci_dev->msg_rx_queue, hcp_skb);
+ 		schedule_work(&ndev->hci_dev->msg_rx_work);
+@@ -488,6 +511,7 @@ EXPORT_SYMBOL(nci_hci_open_pipe);
+ int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ 		      const u8 *param, size_t param_len)
+ {
++	struct nci_hcp_message *message;
+ 	struct nci_conn_info *conn_info;
+ 	struct nci_data data;
+ 	int r;
+@@ -520,6 +544,12 @@ int nci_hci_set_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ 	r = nci_request(ndev, nci_hci_send_data_req,
+ 			(unsigned long)&data,
+ 			msecs_to_jiffies(NCI_DATA_TIMEOUT));
++	if (r == NCI_STATUS_OK) {
++		message = (struct nci_hcp_message *)conn_info->rx_skb->data;
++		r = nci_hci_result_to_errno(
++			NCI_HCP_MSG_GET_CMD(message->header));
++		skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
++	}
+ 
+ 	kfree(tmp);
+ 	return r;
+@@ -529,6 +559,7 @@ EXPORT_SYMBOL(nci_hci_set_param);
+ int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ 		      struct sk_buff **skb)
+ {
++	struct nci_hcp_message *message;
+ 	struct nci_conn_info    *conn_info;
+ 	struct nci_data data;
+ 	int r;
+@@ -553,8 +584,15 @@ int nci_hci_get_param(struct nci_dev *ndev, u8 gate, u8 idx,
+ 	r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
+ 			msecs_to_jiffies(NCI_DATA_TIMEOUT));
+ 
+-	if (r == NCI_STATUS_OK)
+-		*skb = conn_info->rx_skb;
++	if (r == NCI_STATUS_OK) {
++		message = (struct nci_hcp_message *)conn_info->rx_skb->data;
++		r = nci_hci_result_to_errno(
++			NCI_HCP_MSG_GET_CMD(message->header));
++		skb_pull(conn_info->rx_skb, NCI_HCI_HCP_MESSAGE_HEADER_LEN);
++
++		if (!r && skb)
++			*skb = conn_info->rx_skb;
++	}
+ 
+ 	return r;
+ }
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index e1ea5d43b01e..686e60187401 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2686,22 +2686,40 @@ static int packet_release(struct socket *sock)
+  *	Attach a packet hook.
+  */
+ 
+-static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
++static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
++			  __be16 proto)
+ {
+ 	struct packet_sock *po = pkt_sk(sk);
+ 	struct net_device *dev_curr;
+ 	__be16 proto_curr;
+ 	bool need_rehook;
++	struct net_device *dev = NULL;
++	int ret = 0;
++	bool unlisted = false;
+ 
+-	if (po->fanout) {
+-		if (dev)
+-			dev_put(dev);
+-
++	if (po->fanout)
+ 		return -EINVAL;
+-	}
+ 
+ 	lock_sock(sk);
+ 	spin_lock(&po->bind_lock);
++	rcu_read_lock();
++
++	if (name) {
++		dev = dev_get_by_name_rcu(sock_net(sk), name);
++		if (!dev) {
++			ret = -ENODEV;
++			goto out_unlock;
++		}
++	} else if (ifindex) {
++		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
++		if (!dev) {
++			ret = -ENODEV;
++			goto out_unlock;
++		}
++	}
++
++	if (dev)
++		dev_hold(dev);
+ 
+ 	proto_curr = po->prot_hook.type;
+ 	dev_curr = po->prot_hook.dev;
+@@ -2709,14 +2727,29 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
+ 	need_rehook = proto_curr != proto || dev_curr != dev;
+ 
+ 	if (need_rehook) {
+-		unregister_prot_hook(sk, true);
++		if (po->running) {
++			rcu_read_unlock();
++			__unregister_prot_hook(sk, true);
++			rcu_read_lock();
++			dev_curr = po->prot_hook.dev;
++			if (dev)
++				unlisted = !dev_get_by_index_rcu(sock_net(sk),
++								 dev->ifindex);
++		}
+ 
+ 		po->num = proto;
+ 		po->prot_hook.type = proto;
+-		po->prot_hook.dev = dev;
+ 
+-		po->ifindex = dev ? dev->ifindex : 0;
+-		packet_cached_dev_assign(po, dev);
++		if (unlikely(unlisted)) {
++			dev_put(dev);
++			po->prot_hook.dev = NULL;
++			po->ifindex = -1;
++			packet_cached_dev_reset(po);
++		} else {
++			po->prot_hook.dev = dev;
++			po->ifindex = dev ? dev->ifindex : 0;
++			packet_cached_dev_assign(po, dev);
++		}
+ 	}
+ 	if (dev_curr)
+ 		dev_put(dev_curr);
+@@ -2724,7 +2757,7 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
+ 	if (proto == 0 || !need_rehook)
+ 		goto out_unlock;
+ 
+-	if (!dev || (dev->flags & IFF_UP)) {
++	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
+ 		register_prot_hook(sk);
+ 	} else {
+ 		sk->sk_err = ENETDOWN;
+@@ -2733,9 +2766,10 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
+ 	}
+ 
+ out_unlock:
++	rcu_read_unlock();
+ 	spin_unlock(&po->bind_lock);
+ 	release_sock(sk);
+-	return 0;
++	return ret;
+ }
+ 
+ /*
+@@ -2747,8 +2781,6 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ {
+ 	struct sock *sk = sock->sk;
+ 	char name[15];
+-	struct net_device *dev;
+-	int err = -ENODEV;
+ 
+ 	/*
+ 	 *	Check legality
+@@ -2758,19 +2790,13 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ 		return -EINVAL;
+ 	strlcpy(name, uaddr->sa_data, sizeof(name));
+ 
+-	dev = dev_get_by_name(sock_net(sk), name);
+-	if (dev)
+-		err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
+-	return err;
++	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
+ }
+ 
+ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ {
+ 	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
+ 	struct sock *sk = sock->sk;
+-	struct net_device *dev = NULL;
+-	int err;
+-
+ 
+ 	/*
+ 	 *	Check legality
+@@ -2781,16 +2807,8 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
+ 	if (sll->sll_family != AF_PACKET)
+ 		return -EINVAL;
+ 
+-	if (sll->sll_ifindex) {
+-		err = -ENODEV;
+-		dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
+-		if (dev == NULL)
+-			goto out;
+-	}
+-	err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
+-
+-out:
+-	return err;
++	return packet_do_bind(sk, NULL, sll->sll_ifindex,
++			      sll->sll_protocol ? : pkt_sk(sk)->num);
+ }
+ 
+ static struct proto packet_proto = {
+diff --git a/net/rds/connection.c b/net/rds/connection.c
+index da6da57e5f36..9d66705f9d41 100644
+--- a/net/rds/connection.c
++++ b/net/rds/connection.c
+@@ -187,6 +187,12 @@ new_conn:
+ 		}
+ 	}
+ 
++	if (trans == NULL) {
++		kmem_cache_free(rds_conn_slab, conn);
++		conn = ERR_PTR(-ENODEV);
++		goto out;
++	}
++
+ 	conn->c_trans = trans;
+ 
+ 	ret = trans->conn_alloc(conn, gfp);
+diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
+index fbc5ef88bc0e..27a992154804 100644
+--- a/net/rds/tcp_recv.c
++++ b/net/rds/tcp_recv.c
+@@ -214,8 +214,15 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
+ 			}
+ 
+ 			to_copy = min(tc->t_tinc_data_rem, left);
+-			pskb_pull(clone, offset);
+-			pskb_trim(clone, to_copy);
++			if (!pskb_pull(clone, offset) ||
++			    pskb_trim(clone, to_copy)) {
++				pr_warn("rds_tcp_data_recv: pull/trim failed "
++					"left %zu data_rem %zu skb_len %d\n",
++					left, tc->t_tinc_data_rem, skb->len);
++				kfree_skb(clone);
++				desc->error = -ENOMEM;
++				goto out;
++			}
+ 			skb_queue_tail(&tinc->ti_skb_list, clone);
+ 
+ 			rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
+diff --git a/net/tipc/msg.c b/net/tipc/msg.c
+index c3e96e815418..e9333147d6f1 100644
+--- a/net/tipc/msg.c
++++ b/net/tipc/msg.c
+@@ -121,7 +121,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ {
+ 	struct sk_buff *head = *headbuf;
+ 	struct sk_buff *frag = *buf;
+-	struct sk_buff *tail;
++	struct sk_buff *tail = NULL;
+ 	struct tipc_msg *msg;
+ 	u32 fragid;
+ 	int delta;
+@@ -141,9 +141,15 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
+ 		if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
+ 			goto err;
+ 		head = *headbuf = frag;
+-		skb_frag_list_init(head);
+-		TIPC_SKB_CB(head)->tail = NULL;
+ 		*buf = NULL;
++		TIPC_SKB_CB(head)->tail = NULL;
++		if (skb_is_nonlinear(head)) {
++			skb_walk_frags(head, tail) {
++				TIPC_SKB_CB(head)->tail = tail;
++			}
++		} else {
++			skb_frag_list_init(head);
++		}
+ 		return 0;
+ 	}
+ 
+diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
+index 66deebc66aa1..f8dfee5072c0 100644
+--- a/net/tipc/udp_media.c
++++ b/net/tipc/udp_media.c
+@@ -48,6 +48,7 @@
+ #include <linux/tipc_netlink.h>
+ #include "core.h"
+ #include "bearer.h"
++#include "msg.h"
+ 
+ /* IANA assigned UDP port */
+ #define UDP_PORT_DEFAULT	6118
+@@ -216,6 +217,10 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
+ {
+ 	struct udp_bearer *ub;
+ 	struct tipc_bearer *b;
++	int usr = msg_user(buf_msg(skb));
++
++	if ((usr == LINK_PROTOCOL) || (usr == NAME_DISTRIBUTOR))
++		skb_linearize(skb);
+ 
+ 	ub = rcu_dereference_sk_user_data(sk);
+ 	if (!ub) {
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index dd78445c7d50..04b6f3f6ee0b 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -3407,12 +3407,6 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
+ 				     wdev->iftype))
+ 		return -EINVAL;
+ 
+-	if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
+-		params.acl = parse_acl_data(&rdev->wiphy, info);
+-		if (IS_ERR(params.acl))
+-			return PTR_ERR(params.acl);
+-	}
+-
+ 	if (info->attrs[NL80211_ATTR_SMPS_MODE]) {
+ 		params.smps_mode =
+ 			nla_get_u8(info->attrs[NL80211_ATTR_SMPS_MODE]);
+@@ -3436,6 +3430,12 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
+ 		params.smps_mode = NL80211_SMPS_OFF;
+ 	}
+ 
++	if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
++		params.acl = parse_acl_data(&rdev->wiphy, info);
++		if (IS_ERR(params.acl))
++			return PTR_ERR(params.acl);
++	}
++
+ 	wdev_lock(wdev);
+ 	err = rdev_start_ap(rdev, dev, &params);
+ 	if (!err) {
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 417ebb11cf48..bec63e0d2605 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -174,6 +174,8 @@ struct snd_usb_midi_in_endpoint {
+ 		u8 running_status_length;
+ 	} ports[0x10];
+ 	u8 seen_f5;
++	bool in_sysex;
++	u8 last_cin;
+ 	u8 error_resubmit;
+ 	int current_port;
+ };
+@@ -468,6 +470,39 @@ static void snd_usbmidi_maudio_broken_running_status_input(
+ }
+ 
+ /*
++ * QinHeng CH345 is buggy: every second packet inside a SysEx has not CIN 4
++ * but the previously seen CIN, but still with three data bytes.
++ */
++static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep,
++				     uint8_t *buffer, int buffer_length)
++{
++	unsigned int i, cin, length;
++
++	for (i = 0; i + 3 < buffer_length; i += 4) {
++		if (buffer[i] == 0 && i > 0)
++			break;
++		cin = buffer[i] & 0x0f;
++		if (ep->in_sysex &&
++		    cin == ep->last_cin &&
++		    (buffer[i + 1 + (cin == 0x6)] & 0x80) == 0)
++			cin = 0x4;
++#if 0
++		if (buffer[i + 1] == 0x90) {
++			/*
++			 * Either a corrupted running status or a real note-on
++			 * message; impossible to detect reliably.
++			 */
++		}
++#endif
++		length = snd_usbmidi_cin_length[cin];
++		snd_usbmidi_input_data(ep, 0, &buffer[i + 1], length);
++		ep->in_sysex = cin == 0x4;
++		if (!ep->in_sysex)
++			ep->last_cin = cin;
++	}
++}
++
++/*
+  * CME protocol: like the standard protocol, but SysEx commands are sent as a
+  * single USB packet preceded by a 0x0F byte.
+  */
+@@ -660,6 +695,12 @@ static struct usb_protocol_ops snd_usbmidi_cme_ops = {
+ 	.output_packet = snd_usbmidi_output_standard_packet,
+ };
+ 
++static struct usb_protocol_ops snd_usbmidi_ch345_broken_sysex_ops = {
++	.input = ch345_broken_sysex_input,
++	.output = snd_usbmidi_standard_output,
++	.output_packet = snd_usbmidi_output_standard_packet,
++};
++
+ /*
+  * AKAI MPD16 protocol:
+  *
+@@ -1341,6 +1382,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
+ 		 * Various chips declare a packet size larger than 4 bytes, but
+ 		 * do not actually work with larger packets:
+ 		 */
++	case USB_ID(0x0a67, 0x5011): /* Medeli DD305 */
+ 	case USB_ID(0x0a92, 0x1020): /* ESI M4U */
+ 	case USB_ID(0x1430, 0x474b): /* RedOctane GH MIDI INTERFACE */
+ 	case USB_ID(0x15ca, 0x0101): /* Textech USB Midi Cable */
+@@ -2375,6 +2417,10 @@ int snd_usbmidi_create(struct snd_card *card,
+ 
+ 		err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
+ 		break;
++	case QUIRK_MIDI_CH345:
++		umidi->usb_protocol_ops = &snd_usbmidi_ch345_broken_sysex_ops;
++		err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
++		break;
+ 	default:
+ 		dev_err(&umidi->dev->dev, "invalid quirk type %d\n",
+ 			quirk->type);
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index e4756651a52c..ecc2a4ea014d 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2820,6 +2820,17 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	.idProduct = 0x1020,
+ },
+ 
++/* QinHeng devices */
++{
++	USB_DEVICE(0x1a86, 0x752d),
++	.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++		.vendor_name = "QinHeng",
++		.product_name = "CH345",
++		.ifnum = 1,
++		.type = QUIRK_MIDI_CH345
++	}
++},
++
+ /* KeithMcMillen Stringport */
+ {
+ 	USB_DEVICE(0x1f38, 0x0001),
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 00ebc0ca008e..eef9b8e4b949 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -535,6 +535,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
+ 		[QUIRK_MIDI_CME] = create_any_midi_quirk,
+ 		[QUIRK_MIDI_AKAI] = create_any_midi_quirk,
+ 		[QUIRK_MIDI_FTDI] = create_any_midi_quirk,
++		[QUIRK_MIDI_CH345] = create_any_midi_quirk,
+ 		[QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
+ 		[QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
+ 		[QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
+@@ -1271,6 +1272,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 	case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */
+ 	case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
+ 	case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
++	case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
+ 		if (fp->altsetting == 3)
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 		break;
+diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
+index 91d0380431b4..991aa84491cd 100644
+--- a/sound/usb/usbaudio.h
++++ b/sound/usb/usbaudio.h
+@@ -94,6 +94,7 @@ enum quirk_type {
+ 	QUIRK_MIDI_AKAI,
+ 	QUIRK_MIDI_US122L,
+ 	QUIRK_MIDI_FTDI,
++	QUIRK_MIDI_CH345,
+ 	QUIRK_AUDIO_STANDARD_INTERFACE,
+ 	QUIRK_AUDIO_FIXED_ENDPOINT,
+ 	QUIRK_AUDIO_EDIROL_UAXX,


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-11-10  0:30 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-11-10  0:30 UTC (permalink / raw
  To: gentoo-commits

commit:     fe5de491080fffef58f47ac59a6dbab9324f4599
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Nov 10 00:29:54 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Nov 10 00:29:54 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=fe5de491

Linux patch 4.1.13

 0000_README             |    4 +
 1012_linux-4.1.13.patch | 3220 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3224 insertions(+)

diff --git a/0000_README b/0000_README
index ff278e3..acad761 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1011_linux-4.1.12.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.12
 
+Patch:  1012_linux-4.1.13.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.13
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1012_linux-4.1.13.patch b/1012_linux-4.1.13.patch
new file mode 100644
index 0000000..449c35f
--- /dev/null
+++ b/1012_linux-4.1.13.patch
@@ -0,0 +1,3220 @@
+diff --git a/Documentation/devicetree/bindings/mfd/mfd.txt b/Documentation/devicetree/bindings/mfd/mfd.txt
+new file mode 100644
+index 000000000000..af9d6931a1a2
+--- /dev/null
++++ b/Documentation/devicetree/bindings/mfd/mfd.txt
+@@ -0,0 +1,41 @@
++Multi-Function Devices (MFD)
++
++These devices comprise a nexus for heterogeneous hardware blocks containing
++more than one non-unique yet varying hardware functionality.
++
++A typical MFD can be:
++
++- A mixed signal ASIC on an external bus, sometimes a PMIC (Power Management
++  Integrated Circuit) that is manufactured in a lower technology node (rough
++  silicon) that handles analog drivers for things like audio amplifiers, LED
++  drivers, level shifters, PHY (physical interfaces to things like USB or
++  ethernet), regulators etc.
++
++- A range of memory registers containing "miscellaneous system registers" also
++  known as a system controller "syscon" or any other memory range containing a
++  mix of unrelated hardware devices.
++
++Optional properties:
++
++- compatible : "simple-mfd" - this signifies that the operating system should
++  consider all subnodes of the MFD device as separate devices akin to how
++  "simple-bus" inidicates when to see subnodes as children for a simple
++  memory-mapped bus. For more complex devices, when the nexus driver has to
++  probe registers to figure out what child devices exist etc, this should not
++  be used. In the latter case the child devices will be determined by the
++  operating system.
++
++Example:
++
++foo@1000 {
++	compatible = "syscon", "simple-mfd";
++	reg = <0x01000 0x1000>;
++
++	led@08.0 {
++		compatible = "register-bit-led";
++		offset = <0x08>;
++		mask = <0x01>;
++		label = "myled";
++		default-state = "on";
++	};
++};
+diff --git a/Makefile b/Makefile
+index 2320f1911404..d5d229db61d5 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
+index c9df40e5cd3b..e8397879d0a7 100644
+--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
++++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
+@@ -354,11 +354,12 @@
+ 				/* SMPS9 unused */
+ 
+ 				ldo1_reg: ldo1 {
+-					/* VDD_SD  */
++					/* VDD_SD / VDDSHV8  */
+ 					regulator-name = "ldo1";
+ 					regulator-min-microvolt = <1800000>;
+ 					regulator-max-microvolt = <3300000>;
+ 					regulator-boot-on;
++					regulator-always-on;
+ 				};
+ 
+ 				ldo2_reg: ldo2 {
+diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts
+index 7219ac3a3d90..9f730e8e9f87 100644
+--- a/arch/arm/boot/dts/armada-385-db-ap.dts
++++ b/arch/arm/boot/dts/armada-385-db-ap.dts
+@@ -46,7 +46,7 @@
+ 
+ / {
+ 	model = "Marvell Armada 385 Access Point Development Board";
+-	compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x";
++	compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada380";
+ 
+ 	chosen {
+ 		stdout-path = "serial1:115200n8";
+diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
+index 146e71118a72..a0ec8bff83dd 100644
+--- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
++++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
+@@ -915,6 +915,11 @@
+ 	};
+ };
+ 
++&pmu_system_controller {
++	assigned-clocks = <&pmu_system_controller 0>;
++	assigned-clock-parents = <&clock CLK_FIN_PLL>;
++};
++
+ &rtc {
+ 	status = "okay";
+ 	clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
+diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
+index 02eb8b15374f..1171f347878a 100644
+--- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
++++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
+@@ -878,6 +878,11 @@
+ 	};
+ };
+ 
++&pmu_system_controller {
++	assigned-clocks = <&pmu_system_controller 0>;
++	assigned-clock-parents = <&clock CLK_FIN_PLL>;
++};
++
+ &rtc {
+ 	status = "okay";
+ 	clocks = <&clock CLK_RTC>, <&max77802 MAX77802_CLK_32K_AP>;
+diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
+index 43cb3fd76be7..5111f5170d53 100644
+--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
++++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
+@@ -305,8 +305,8 @@
+ &usdhc2 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+-	cd-gpios = <&gpio1 4 0>;
+-	wp-gpios = <&gpio1 2 0>;
++	cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+@@ -314,8 +314,8 @@
+ &usdhc3 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+-	cd-gpios = <&gpio7 0 0>;
+-	wp-gpios = <&gpio7 1 0>;
++	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
+index 78df05e9d1ce..d6515f7a56c4 100644
+--- a/arch/arm/boot/dts/imx6q-arm2.dts
++++ b/arch/arm/boot/dts/imx6q-arm2.dts
+@@ -11,6 +11,7 @@
+  */
+ 
+ /dts-v1/;
++#include <dt-bindings/gpio/gpio.h>
+ #include "imx6q.dtsi"
+ 
+ / {
+@@ -196,8 +197,8 @@
+ };
+ 
+ &usdhc3 {
+-	cd-gpios = <&gpio6 11 0>;
+-	wp-gpios = <&gpio6 14 0>;
++	cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3
+diff --git a/arch/arm/boot/dts/imx6q-gk802.dts b/arch/arm/boot/dts/imx6q-gk802.dts
+index 703539cf36d3..00bd63e63d0c 100644
+--- a/arch/arm/boot/dts/imx6q-gk802.dts
++++ b/arch/arm/boot/dts/imx6q-gk802.dts
+@@ -7,6 +7,7 @@
+  */
+ 
+ /dts-v1/;
++#include <dt-bindings/gpio/gpio.h>
+ #include "imx6q.dtsi"
+ 
+ / {
+@@ -161,7 +162,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+ 	bus-width = <4>;
+-	cd-gpios = <&gpio6 11 0>;
++	cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
+index a43abfa21e33..5645d52850a7 100644
+--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
++++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
+@@ -251,7 +251,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+ 	bus-width = <4>;
+-	cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+@@ -260,7 +260,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+ 	bus-width = <4>;
+-	cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+ 	wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
+index e6d9195a1da7..f4d6ae564ead 100644
+--- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
+@@ -173,7 +173,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc1>;
+ 	vmmc-supply = <&reg_3p3v>;
+-	cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ };
+ 
+@@ -181,7 +181,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+ 	vmmc-supply = <&reg_3p3v>;
+-	cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+index d033bb182060..6a846e0ef505 100644
+--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+@@ -259,6 +259,6 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
+ 	vmmc-supply = <&reg_3p3v>;
+-	cd-gpios = <&gpio1 4 0>;
++	cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
+index 2c253d6d20bd..45e7c39e80d5 100644
+--- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
+@@ -1,3 +1,5 @@
++#include <dt-bindings/gpio/gpio.h>
++
+ / {
+ 	regulators {
+ 		compatible = "simple-bus";
+@@ -181,7 +183,7 @@
+ &usdhc2 { /* module slot */
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+-	cd-gpios = <&gpio2 2 0>;
++	cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+index b5756c21ea1d..4493f6e99330 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+@@ -318,7 +318,7 @@
+ &usdhc3 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+-	cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+index 86f03c1b147c..a857d1294609 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+@@ -324,7 +324,7 @@
+ &usdhc3 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+-	cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+index 4a8d97f47759..1afe3385e2d2 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+@@ -417,7 +417,7 @@
+ &usdhc3 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+-	cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+index 151a3db2aea9..c6833d2b4ff5 100644
+--- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+@@ -41,6 +41,7 @@
+  */
+ #include "imx6qdl-microsom.dtsi"
+ #include "imx6qdl-microsom-ar8035.dtsi"
++#include <dt-bindings/gpio/gpio.h>
+ 
+ / {
+ 	chosen {
+@@ -288,6 +289,6 @@
+ 		&pinctrl_hummingboard_usdhc2
+ 	>;
+ 	vmmc-supply = <&reg_3p3v>;
+-	cd-gpios = <&gpio1 4 0>;
++	cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+index 08218120e770..64e0b6178bf4 100644
+--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+@@ -449,7 +449,7 @@
+ &usdhc3 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+-	cd-gpios = <&gpio7 0 0>;
++	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+@@ -457,7 +457,7 @@
+ &usdhc4 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc4>;
+-	cd-gpios = <&gpio2 6 0>;
++	cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+index 1ce6133b67f5..9e6ecd99b472 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+@@ -409,8 +409,8 @@
+ &usdhc2 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+-	cd-gpios = <&gpio1 4 0>;
+-	wp-gpios = <&gpio1 2 0>;
++	cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ 	status = "disabled";
+ };
+ 
+@@ -418,7 +418,7 @@
+         pinctrl-names = "default";
+         pinctrl-0 = <&pinctrl_usdhc3
+ 		     &pinctrl_usdhc3_cdwp>;
+-        cd-gpios = <&gpio1 27 0>;
+-        wp-gpios = <&gpio1 29 0>;
++	cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+         status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
+index 394a4ace351a..a50356243888 100644
+--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
+@@ -340,7 +340,7 @@
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+ 	bus-width = <4>;
+ 	cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
+-	wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+@@ -349,6 +349,6 @@
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+ 	bus-width = <4>;
+ 	cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
+-	wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+index 3b24b12651b2..e329ca5c3322 100644
+--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+@@ -467,8 +467,8 @@
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+ 	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ 	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+-	cd-gpios = <&gpio6 15 0>;
+-	wp-gpios = <&gpio1 13 0>;
++	cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+index 0b28a9d5241e..1e27485e4293 100644
+--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+@@ -444,8 +444,8 @@
+ &usdhc3 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+-	cd-gpios = <&gpio7 0 0>;
+-	wp-gpios = <&gpio7 1 0>;
++	cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+@@ -453,7 +453,7 @@
+ &usdhc4 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc4>;
+-	cd-gpios = <&gpio2 6 0>;
++	cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+index a626e6dd8022..944eb81cb2b8 100644
+--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+@@ -562,8 +562,8 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+ 	bus-width = <8>;
+-	cd-gpios = <&gpio2 2 0>;
+-	wp-gpios = <&gpio2 3 0>;
++	cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+@@ -571,8 +571,8 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+ 	bus-width = <8>;
+-	cd-gpios = <&gpio2 0 0>;
+-	wp-gpios = <&gpio2 1 0>;
++	cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+index f02b80b41d4f..da08de324e9e 100644
+--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+@@ -680,7 +680,7 @@
+ 	pinctrl-0 = <&pinctrl_usdhc1>;
+ 	bus-width = <4>;
+ 	no-1-8-v;
+-	cd-gpios = <&gpio7 2 0>;
++	cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
+ 	fsl,wp-controller;
+ 	status = "okay";
+ };
+@@ -690,7 +690,7 @@
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+ 	bus-width = <4>;
+ 	no-1-8-v;
+-	cd-gpios = <&gpio7 3 0>;
++	cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
+ 	fsl,wp-controller;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+index 5fb091675582..9e096d811bed 100644
+--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+@@ -9,6 +9,8 @@
+  *
+  */
+ 
++#include <dt-bindings/gpio/gpio.h>
++
+ / {
+ 	regulators {
+ 		compatible = "simple-bus";
+@@ -250,13 +252,13 @@
+ &usdhc1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc1>;
+-	cd-gpios = <&gpio1 2 0>;
++	cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ };
+ 
+ &usdhc3 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+-	cd-gpios = <&gpio3 9 0>;
++	cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
+index 945887d3fdb3..b84dff2e94ea 100644
+--- a/arch/arm/boot/dts/imx6sl-evk.dts
++++ b/arch/arm/boot/dts/imx6sl-evk.dts
+@@ -617,8 +617,8 @@
+ 	pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ 	pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ 	bus-width = <8>;
+-	cd-gpios = <&gpio4 7 0>;
+-	wp-gpios = <&gpio4 6 0>;
++	cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+@@ -627,8 +627,8 @@
+ 	pinctrl-0 = <&pinctrl_usdhc2>;
+ 	pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
+ 	pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
+-	cd-gpios = <&gpio5 0 0>;
+-	wp-gpios = <&gpio4 29 0>;
++	cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+@@ -637,6 +637,6 @@
+ 	pinctrl-0 = <&pinctrl_usdhc3>;
+ 	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ 	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+-	cd-gpios = <&gpio3 22 0>;
++	cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
+index e3c0b63c2205..115f3fd78971 100644
+--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
++++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
+@@ -49,7 +49,7 @@
+ 	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ 	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ 	bus-width = <8>;
+-	cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
+ 	wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ 	keep-power-in-suspend;
+ 	enable-sdio-wakeup;
+@@ -61,7 +61,7 @@
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc4>;
+ 	bus-width = <8>;
+-	cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
+ 	no-1-8-v;
+ 	keep-power-in-suspend;
+ 	enable-sdio-wakup;
+diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
+index cef04cef3a80..ac88c3467078 100644
+--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
++++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
+@@ -293,7 +293,7 @@
+ 	pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ 	pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ 	bus-width = <8>;
+-	cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
+ 	wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
+ 	keep-power-in-suspend;
+ 	enable-sdio-wakeup;
+@@ -304,7 +304,7 @@
+ &usdhc4 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_usdhc4>;
+-	cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>;
+ 	wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
+index 2b4847c7cbd4..fa36571b755a 100644
+--- a/arch/arm/boot/dts/sun7i-a20.dtsi
++++ b/arch/arm/boot/dts/sun7i-a20.dtsi
+@@ -111,7 +111,7 @@
+ 				720000  1200000
+ 				528000  1100000
+ 				312000  1000000
+-				144000  900000
++				144000  1000000
+ 				>;
+ 			#cooling-cells = <2>;
+ 			cooling-min-level = <0>;
+diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
+index f1f79d104309..60c1a0f4d67a 100644
+--- a/arch/arm/kvm/Kconfig
++++ b/arch/arm/kvm/Kconfig
+@@ -21,6 +21,7 @@ config KVM
+ 	depends on MMU && OF
+ 	select PREEMPT_NOTIFIERS
+ 	select ANON_INODES
++	select ARM_GIC
+ 	select HAVE_KVM_CPU_RELAX_INTERCEPT
+ 	select HAVE_KVM_ARCH_TLB_FLUSH_ALL
+ 	select KVM_MMIO
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index f5b00f41c4f6..b8b6e22f9987 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -499,7 +499,7 @@ void __init orion_ge00_switch_init(struct dsa_platform_data *d, int irq)
+ 
+ 	d->netdev = &orion_ge00.dev;
+ 	for (i = 0; i < d->nr_chips; i++)
+-		d->chip[i].host_dev = &orion_ge00_shared.dev;
++		d->chip[i].host_dev = &orion_ge_mvmdio.dev;
+ 	orion_switch_device.dev.platform_data = d;
+ 
+ 	platform_device_register(&orion_switch_device);
+diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
+index aedec81d1198..f6455273b2f8 100644
+--- a/arch/arm/vdso/vdsomunge.c
++++ b/arch/arm/vdso/vdsomunge.c
+@@ -45,7 +45,6 @@
+  * it does.
+  */
+ 
+-#include <byteswap.h>
+ #include <elf.h>
+ #include <errno.h>
+ #include <fcntl.h>
+@@ -59,6 +58,16 @@
+ #include <sys/types.h>
+ #include <unistd.h>
+ 
++#define swab16(x) \
++	((((x) & 0x00ff) << 8) | \
++	 (((x) & 0xff00) >> 8))
++
++#define swab32(x) \
++	((((x) & 0x000000ff) << 24) | \
++	 (((x) & 0x0000ff00) <<  8) | \
++	 (((x) & 0x00ff0000) >>  8) | \
++	 (((x) & 0xff000000) >> 24))
++
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ #define HOST_ORDER ELFDATA2LSB
+ #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+@@ -104,17 +113,17 @@ static void cleanup(void)
+ 
+ static Elf32_Word read_elf_word(Elf32_Word word, bool swap)
+ {
+-	return swap ? bswap_32(word) : word;
++	return swap ? swab32(word) : word;
+ }
+ 
+ static Elf32_Half read_elf_half(Elf32_Half half, bool swap)
+ {
+-	return swap ? bswap_16(half) : half;
++	return swap ? swab16(half) : half;
+ }
+ 
+ static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap)
+ {
+-	*dst = swap ? bswap_32(val) : val;
++	*dst = swap ? swab32(val) : val;
+ }
+ 
+ int main(int argc, char **argv)
+diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
+index 141b2fcabaa6..0f74f05d662a 100644
+--- a/arch/arm64/include/asm/cpuidle.h
++++ b/arch/arm64/include/asm/cpuidle.h
+@@ -5,20 +5,16 @@
+ 
+ #ifdef CONFIG_CPU_IDLE
+ extern int arm_cpuidle_init(unsigned int cpu);
+-extern int cpu_suspend(unsigned long arg);
++extern int arm_cpuidle_suspend(int index);
+ #else
+ static inline int arm_cpuidle_init(unsigned int cpu)
+ {
+ 	return -EOPNOTSUPP;
+ }
+ 
+-static inline int cpu_suspend(unsigned long arg)
++static inline int arm_cpuidle_suspend(int index)
+ {
+ 	return -EOPNOTSUPP;
+ }
+ #endif
+-static inline int arm_cpuidle_suspend(int index)
+-{
+-	return cpu_suspend(index);
+-}
+ #endif
+diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
+index 003802f58963..59a5b0f1e81c 100644
+--- a/arch/arm64/include/asm/suspend.h
++++ b/arch/arm64/include/asm/suspend.h
+@@ -21,6 +21,6 @@ struct sleep_save_sp {
+ 	phys_addr_t save_ptr_stash_phys;
+ };
+ 
+-extern int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
++extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
+ extern void cpu_resume(void);
+ #endif
+diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
+index 7922c2e710ca..7ac3920b1356 100644
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -279,22 +279,24 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
+  */
+ #define __user_swpX_asm(data, addr, res, temp, B)		\
+ 	__asm__ __volatile__(					\
+-	"	mov		%w2, %w1\n"			\
+-	"0:	ldxr"B"		%w1, [%3]\n"			\
+-	"1:	stxr"B"		%w0, %w2, [%3]\n"		\
++	"0:	ldxr"B"		%w2, [%3]\n"			\
++	"1:	stxr"B"		%w0, %w1, [%3]\n"		\
+ 	"	cbz		%w0, 2f\n"			\
+ 	"	mov		%w0, %w4\n"			\
++	"	b		3f\n"				\
+ 	"2:\n"							\
++	"	mov		%w1, %w2\n"			\
++	"3:\n"							\
+ 	"	.pushsection	 .fixup,\"ax\"\n"		\
+ 	"	.align		2\n"				\
+-	"3:	mov		%w0, %w5\n"			\
+-	"	b		2b\n"				\
++	"4:	mov		%w0, %w5\n"			\
++	"	b		3b\n"				\
+ 	"	.popsection"					\
+ 	"	.pushsection	 __ex_table,\"a\"\n"		\
+ 	"	.align		3\n"				\
+-	"	.quad		0b, 3b\n"			\
+-	"	.quad		1b, 3b\n"			\
+-	"	.popsection"					\
++	"	.quad		0b, 4b\n"			\
++	"	.quad		1b, 4b\n"			\
++	"	.popsection\n"					\
+ 	: "=&r" (res), "+r" (data), "=&r" (temp)		\
+ 	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)		\
+ 	: "memory")
+diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
+index a78143a5c99f..2bbd0fee084f 100644
+--- a/arch/arm64/kernel/cpuidle.c
++++ b/arch/arm64/kernel/cpuidle.c
+@@ -37,7 +37,7 @@ int arm_cpuidle_init(unsigned int cpu)
+  * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
+  * operations back-end error code otherwise.
+  */
+-int cpu_suspend(unsigned long arg)
++int arm_cpuidle_suspend(int index)
+ {
+ 	int cpu = smp_processor_id();
+ 
+@@ -47,5 +47,5 @@ int cpu_suspend(unsigned long arg)
+ 	 */
+ 	if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
+ 		return -EOPNOTSUPP;
+-	return cpu_ops[cpu]->cpu_suspend(arg);
++	return cpu_ops[cpu]->cpu_suspend(index);
+ }
+diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
+index ea18cb53921e..24d4733b7e3c 100644
+--- a/arch/arm64/kernel/psci.c
++++ b/arch/arm64/kernel/psci.c
+@@ -546,7 +546,7 @@ static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
+ 	if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY)
+ 		ret = psci_ops.cpu_suspend(state[index - 1], 0);
+ 	else
+-		ret = __cpu_suspend(index, psci_suspend_finisher);
++		ret = cpu_suspend(index, psci_suspend_finisher);
+ 
+ 	return ret;
+ }
+diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
+index 407991bf79f5..ccb6078ed9f2 100644
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackframe *frame)
+ 
+ 	frame->sp = fp + 0x10;
+ 	frame->fp = *(unsigned long *)(fp);
+-	/*
+-	 * -4 here because we care about the PC at time of bl,
+-	 * not where the return will go.
+-	 */
+-	frame->pc = *(unsigned long *)(fp + 8) - 4;
++	frame->pc = *(unsigned long *)(fp + 8);
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index d7daf45ae7a2..53f1f8dccf6c 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -51,13 +51,13 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+ }
+ 
+ /*
+- * __cpu_suspend
++ * cpu_suspend
+  *
+  * arg: argument to pass to the finisher function
+  * fn: finisher function pointer
+  *
+  */
+-int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
++int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ {
+ 	struct mm_struct *mm = current->active_mm;
+ 	int ret;
+@@ -80,17 +80,21 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
+ 	if (ret == 0) {
+ 		/*
+ 		 * We are resuming from reset with TTBR0_EL1 set to the
+-		 * idmap to enable the MMU; restore the active_mm mappings in
+-		 * TTBR0_EL1 unless the active_mm == &init_mm, in which case
+-		 * the thread entered __cpu_suspend with TTBR0_EL1 set to
+-		 * reserved TTBR0 page tables and should be restored as such.
++		 * idmap to enable the MMU; set the TTBR0 to the reserved
++		 * page tables to prevent speculative TLB allocations, flush
++		 * the local tlb and set the default tcr_el1.t0sz so that
++		 * the TTBR0 address space set-up is properly restored.
++		 * If the current active_mm != &init_mm we entered cpu_suspend
++		 * with mappings in TTBR0 that must be restored, so we switch
++		 * them back to complete the address space configuration
++		 * restoration before returning.
+ 		 */
+-		if (mm == &init_mm)
+-			cpu_set_reserved_ttbr0();
+-		else
+-			cpu_switch_mm(mm->pgd, mm);
+-
++		cpu_set_reserved_ttbr0();
+ 		flush_tlb_all();
++		cpu_set_default_tcr_t0sz();
++
++		if (mm != &init_mm)
++			cpu_switch_mm(mm->pgd, mm);
+ 
+ 		/*
+ 		 * Restore per-cpu offset before any kernel
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index caffb10e7aa3..5607693f35cf 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -1041,6 +1041,9 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
+ 	if (!capable(CAP_SYS_ADMIN))
+ 		return -EPERM;
+ 
++	if (!rtas.entry)
++		return -EINVAL;
++
+ 	if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
+ 		return -EFAULT;
+ 
+diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
+index 0cdc154a22b5..4c3f76b425c1 100644
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -667,6 +667,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
+ 		bool conout_found = false;
+ 		void *dummy = NULL;
+ 		u32 h = handles[i];
++		u32 current_fb_base;
+ 
+ 		status = efi_call_early(handle_protocol, h,
+ 					proto, (void **)&gop32);
+@@ -678,7 +679,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
+ 		if (status == EFI_SUCCESS)
+ 			conout_found = true;
+ 
+-		status = __gop_query32(gop32, &info, &size, &fb_base);
++		status = __gop_query32(gop32, &info, &size, &current_fb_base);
+ 		if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ 			/*
+ 			 * Systems that use the UEFI Console Splitter may
+@@ -692,6 +693,7 @@ setup_gop32(struct screen_info *si, efi_guid_t *proto,
+ 			pixel_format = info->pixel_format;
+ 			pixel_info = info->pixel_information;
+ 			pixels_per_scan_line = info->pixels_per_scan_line;
++			fb_base = current_fb_base;
+ 
+ 			/*
+ 			 * Once we've found a GOP supporting ConOut,
+@@ -770,6 +772,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
+ 		bool conout_found = false;
+ 		void *dummy = NULL;
+ 		u64 h = handles[i];
++		u32 current_fb_base;
+ 
+ 		status = efi_call_early(handle_protocol, h,
+ 					proto, (void **)&gop64);
+@@ -781,7 +784,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
+ 		if (status == EFI_SUCCESS)
+ 			conout_found = true;
+ 
+-		status = __gop_query64(gop64, &info, &size, &fb_base);
++		status = __gop_query64(gop64, &info, &size, &current_fb_base);
+ 		if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+ 			/*
+ 			 * Systems that use the UEFI Console Splitter may
+@@ -795,6 +798,7 @@ setup_gop64(struct screen_info *si, efi_guid_t *proto,
+ 			pixel_format = info->pixel_format;
+ 			pixel_info = info->pixel_information;
+ 			pixels_per_scan_line = info->pixels_per_scan_line;
++			fb_base = current_fb_base;
+ 
+ 			/*
+ 			 * Once we've found a GOP supporting ConOut,
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 0cc657160cb6..a10ed8915bf4 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -33,7 +33,7 @@
+ #include <linux/memblock.h>
+ #include <linux/edd.h>
+ 
+-#ifdef CONFIG_KEXEC_CORE
++#ifdef CONFIG_KEXEC
+ #include <linux/kexec.h>
+ #endif
+ 
+@@ -1802,7 +1802,7 @@ static struct notifier_block xen_hvm_cpu_notifier = {
+ 	.notifier_call	= xen_hvm_cpu_notify,
+ };
+ 
+-#ifdef CONFIG_KEXEC_CORE
++#ifdef CONFIG_KEXEC
+ static void xen_hvm_shutdown(void)
+ {
+ 	native_machine_shutdown();
+@@ -1836,7 +1836,7 @@ static void __init xen_hvm_guest_init(void)
+ 	x86_init.irqs.intr_init = xen_init_IRQ;
+ 	xen_hvm_init_time_ops();
+ 	xen_hvm_init_mmu_ops();
+-#ifdef CONFIG_KEXEC_CORE
++#ifdef CONFIG_KEXEC
+ 	machine_ops.shutdown = xen_hvm_shutdown;
+ 	machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+ #endif
+diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
+index db201bca1581..523dd10e1751 100644
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -698,7 +698,7 @@ struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
+ err:
+ 		if (err != -EAGAIN)
+ 			break;
+-		if (signal_pending(current)) {
++		if (fatal_signal_pending(current)) {
+ 			err = -EINTR;
+ 			break;
+ 		}
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index d2627a3d4ed8..dda720c6ab08 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -337,7 +337,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
+ 		crypto_alg_tested(larval->alg.cra_driver_name, 0);
+ 	}
+ 
+-	err = wait_for_completion_interruptible(&larval->completion);
++	err = wait_for_completion_killable(&larval->completion);
+ 	WARN_ON(err);
+ 
+ out:
+diff --git a/crypto/api.c b/crypto/api.c
+index afe4610afc4b..bbc147cb5dec 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -172,7 +172,7 @@ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
+ 	struct crypto_larval *larval = (void *)alg;
+ 	long timeout;
+ 
+-	timeout = wait_for_completion_interruptible_timeout(
++	timeout = wait_for_completion_killable_timeout(
+ 		&larval->completion, 60 * HZ);
+ 
+ 	alg = larval->adult;
+@@ -445,7 +445,7 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
+ err:
+ 		if (err != -EAGAIN)
+ 			break;
+-		if (signal_pending(current)) {
++		if (fatal_signal_pending(current)) {
+ 			err = -EINTR;
+ 			break;
+ 		}
+@@ -562,7 +562,7 @@ void *crypto_alloc_tfm(const char *alg_name,
+ err:
+ 		if (err != -EAGAIN)
+ 			break;
+-		if (signal_pending(current)) {
++		if (fatal_signal_pending(current)) {
+ 			err = -EINTR;
+ 			break;
+ 		}
+diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
+index 41dfe762b7fb..edf2e3ea1740 100644
+--- a/crypto/crypto_user.c
++++ b/crypto/crypto_user.c
+@@ -381,7 +381,7 @@ static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
+ 		err = PTR_ERR(alg);
+ 		if (err != -EAGAIN)
+ 			break;
+-		if (signal_pending(current)) {
++		if (fatal_signal_pending(current)) {
+ 			err = -EINTR;
+ 			break;
+ 		}
+diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
+index 683dff272562..04c0e8f3183c 100644
+--- a/drivers/block/nvme-core.c
++++ b/drivers/block/nvme-core.c
+@@ -590,6 +590,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
+ 	struct nvme_iod *iod = ctx;
+ 	struct request *req = iod_get_private(iod);
+ 	struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
++	bool requeue = false;
+ 
+ 	u16 status = le16_to_cpup(&cqe->status) >> 1;
+ 
+@@ -598,12 +599,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
+ 		    && (jiffies - req->start_time) < req->timeout) {
+ 			unsigned long flags;
+ 
++			requeue = true;
+ 			blk_mq_requeue_request(req);
+ 			spin_lock_irqsave(req->q->queue_lock, flags);
+ 			if (!blk_queue_stopped(req->q))
+ 				blk_mq_kick_requeue_list(req->q);
+ 			spin_unlock_irqrestore(req->q->queue_lock, flags);
+-			return;
++			goto release_iod;
+ 		}
+ 		req->errors = nvme_error_status(status);
+ 	} else
+@@ -613,7 +615,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
+ 		dev_warn(&nvmeq->dev->pci_dev->dev,
+ 			"completing aborted command with status:%04x\n",
+ 			status);
+-
++ release_iod:
+ 	if (iod->nents) {
+ 		dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
+ 			rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+@@ -626,7 +628,8 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
+ 	}
+ 	nvme_free_iod(nvmeq->dev, iod);
+ 
+-	blk_mq_complete_request(req);
++	if (likely(!requeue))
++		blk_mq_complete_request(req);
+ }
+ 
+ /* length is in bytes.  gfp flags indicates whether we may sleep. */
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index fe8f1e4b4c7c..1ec6441fe2a5 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -96,6 +96,8 @@ static int atomic_dec_return_safe(atomic_t *v)
+ #define RBD_MINORS_PER_MAJOR		256
+ #define RBD_SINGLE_MAJOR_PART_SHIFT	4
+ 
++#define RBD_MAX_PARENT_CHAIN_LEN	16
++
+ #define RBD_SNAP_DEV_NAME_PREFIX	"snap_"
+ #define RBD_MAX_SNAP_NAME_LEN	\
+ 			(NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
+@@ -425,7 +427,7 @@ static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
+ 				    size_t count);
+ static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
+ 				       size_t count);
+-static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
++static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
+ static void rbd_spec_put(struct rbd_spec *spec);
+ 
+ static int rbd_dev_id_to_minor(int dev_id)
+@@ -3797,6 +3799,9 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
+ 	q->limits.discard_zeroes_data = 1;
+ 
+ 	blk_queue_merge_bvec(q, rbd_merge_bvec);
++	if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
++		q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
++
+ 	disk->queue = q;
+ 
+ 	q->queuedata = rbd_dev;
+@@ -5142,44 +5147,50 @@ out_err:
+ 	return ret;
+ }
+ 
+-static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
++/*
++ * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
++ * rbd_dev_image_probe() recursion depth, which means it's also the
++ * length of the already discovered part of the parent chain.
++ */
++static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
+ {
+ 	struct rbd_device *parent = NULL;
+-	struct rbd_spec *parent_spec;
+-	struct rbd_client *rbdc;
+ 	int ret;
+ 
+ 	if (!rbd_dev->parent_spec)
+ 		return 0;
+-	/*
+-	 * We need to pass a reference to the client and the parent
+-	 * spec when creating the parent rbd_dev.  Images related by
+-	 * parent/child relationships always share both.
+-	 */
+-	parent_spec = rbd_spec_get(rbd_dev->parent_spec);
+-	rbdc = __rbd_get_client(rbd_dev->rbd_client);
+ 
+-	ret = -ENOMEM;
+-	parent = rbd_dev_create(rbdc, parent_spec);
+-	if (!parent)
++	if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
++		pr_info("parent chain is too long (%d)\n", depth);
++		ret = -EINVAL;
+ 		goto out_err;
++	}
+ 
+-	ret = rbd_dev_image_probe(parent, false);
++	parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
++	if (!parent) {
++		ret = -ENOMEM;
++		goto out_err;
++	}
++
++	/*
++	 * Images related by parent/child relationships always share
++	 * rbd_client and spec/parent_spec, so bump their refcounts.
++	 */
++	__rbd_get_client(rbd_dev->rbd_client);
++	rbd_spec_get(rbd_dev->parent_spec);
++
++	ret = rbd_dev_image_probe(parent, depth);
+ 	if (ret < 0)
+ 		goto out_err;
++
+ 	rbd_dev->parent = parent;
+ 	atomic_set(&rbd_dev->parent_ref, 1);
+-
+ 	return 0;
++
+ out_err:
+-	if (parent) {
+-		rbd_dev_unparent(rbd_dev);
++	rbd_dev_unparent(rbd_dev);
++	if (parent)
+ 		rbd_dev_destroy(parent);
+-	} else {
+-		rbd_put_client(rbdc);
+-		rbd_spec_put(parent_spec);
+-	}
+-
+ 	return ret;
+ }
+ 
+@@ -5297,7 +5308,7 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
+  * parent), initiate a watch on its header object before using that
+  * object to get detailed information about the rbd image.
+  */
+-static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
++static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
+ {
+ 	int ret;
+ 
+@@ -5315,7 +5326,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
+ 	if (ret)
+ 		goto err_out_format;
+ 
+-	if (mapping) {
++	if (!depth) {
+ 		ret = rbd_dev_header_watch_sync(rbd_dev);
+ 		if (ret) {
+ 			if (ret == -ENOENT)
+@@ -5336,7 +5347,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
+ 	 * Otherwise this is a parent image, identified by pool, image
+ 	 * and snap ids - need to fill in names for those ids.
+ 	 */
+-	if (mapping)
++	if (!depth)
+ 		ret = rbd_spec_fill_snap_id(rbd_dev);
+ 	else
+ 		ret = rbd_spec_fill_names(rbd_dev);
+@@ -5358,12 +5369,12 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
+ 		 * Need to warn users if this image is the one being
+ 		 * mapped and has a parent.
+ 		 */
+-		if (mapping && rbd_dev->parent_spec)
++		if (!depth && rbd_dev->parent_spec)
+ 			rbd_warn(rbd_dev,
+ 				 "WARNING: kernel layering is EXPERIMENTAL!");
+ 	}
+ 
+-	ret = rbd_dev_probe_parent(rbd_dev);
++	ret = rbd_dev_probe_parent(rbd_dev, depth);
+ 	if (ret)
+ 		goto err_out_probe;
+ 
+@@ -5374,7 +5385,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
+ err_out_probe:
+ 	rbd_dev_unprobe(rbd_dev);
+ err_out_watch:
+-	if (mapping)
++	if (!depth)
+ 		rbd_dev_header_unwatch_sync(rbd_dev);
+ out_header_name:
+ 	kfree(rbd_dev->header_name);
+@@ -5439,7 +5450,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
+ 	rbdc = NULL;		/* rbd_dev now owns this */
+ 	spec = NULL;		/* rbd_dev now owns this */
+ 
+-	rc = rbd_dev_image_probe(rbd_dev, true);
++	rc = rbd_dev_image_probe(rbd_dev, 0);
+ 	if (rc < 0)
+ 		goto err_out_rbd_dev;
+ 
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 89c7371ab2dc..42ef86c409b6 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1925,7 +1925,8 @@ static void blkback_changed(struct xenbus_device *dev,
+ 			break;
+ 		/* Missed the backend's Closing state -- fallthrough */
+ 	case XenbusStateClosing:
+-		blkfront_closing(info);
++		if (info)
++			blkfront_closing(info);
+ 		break;
+ 	}
+ }
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index e8d16997c5cb..1ee2ab58e37d 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -761,6 +761,11 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
+ 	local_irq_save(flags);
+ 	rdmsrl(MSR_IA32_APERF, aperf);
+ 	rdmsrl(MSR_IA32_MPERF, mperf);
++	if (cpu->prev_mperf == mperf) {
++		local_irq_restore(flags);
++		return;
++	}
++
+ 	local_irq_restore(flags);
+ 
+ 	cpu->last_sample_time = cpu->sample.time;
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 1acf57ba4c86..cd6b9c72c8ac 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -1608,6 +1608,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
+ {
+ 	struct sbridge_pvt *pvt = mci->pvt_info;
+ 	struct pci_dev *pdev;
++	u8 saw_chan_mask = 0;
+ 	int i;
+ 
+ 	for (i = 0; i < sbridge_dev->n_devs; i++) {
+@@ -1641,6 +1642,7 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
+ 		{
+ 			int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
+ 			pvt->pci_tad[id] = pdev;
++			saw_chan_mask |= 1 << id;
+ 		}
+ 			break;
+ 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
+@@ -1661,10 +1663,8 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
+ 	    !pvt-> pci_tad || !pvt->pci_ras  || !pvt->pci_ta)
+ 		goto enodev;
+ 
+-	for (i = 0; i < NUM_CHANNELS; i++) {
+-		if (!pvt->pci_tad[i])
+-			goto enodev;
+-	}
++	if (saw_chan_mask != 0x0f)
++		goto enodev;
+ 	return 0;
+ 
+ enodev:
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 2a2eb96caeda..109e776345d3 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1179,17 +1179,18 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
+ 
+ 		list_for_each_entry(port, &mstb->ports, next) {
+ 			if (port->port_num == port_num) {
+-				if (!port->mstb) {
++				mstb = port->mstb;
++				if (!mstb) {
+ 					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
+-					return NULL;
++					goto out;
+ 				}
+ 
+-				mstb = port->mstb;
+ 				break;
+ 			}
+ 		}
+ 	}
+ 	kref_get(&mstb->kref);
++out:
+ 	mutex_unlock(&mgr->lock);
+ 	return mstb;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
+index 1719078c763a..ce175d05260b 100644
+--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
+@@ -776,7 +776,10 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
+  * Also note, that the object created here is not currently a "first class"
+  * object, in that several ioctls are banned. These are the CPU access
+  * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
+- * direct access via your pointer rather than use those ioctls.
++ * direct access via your pointer rather than use those ioctls. Another
++ * restriction is that we do not allow userptr surfaces to be pinned to the
++ * hardware and so we reject any attempt to create a framebuffer out of a
++ * userptr.
+  *
+  * If you think this is a good interface to use to pass GPU memory between
+  * drivers, please use dma-buf instead. In fact, wherever possible use
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index f208bbc6d58e..7b27a114b030 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1699,6 +1699,8 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
+ 			   I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
+ 	}
+ 
++	I915_WRITE(reg, dpll);
++
+ 	/* Wait for the clocks to stabilize. */
+ 	POSTING_READ(reg);
+ 	udelay(150);
+@@ -13212,6 +13214,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ 	struct drm_i915_gem_object *obj = intel_fb->obj;
+ 
++	if (obj->userptr.mm) {
++		DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
++		return -EINVAL;
++	}
++
+ 	return drm_gem_handle_create(file, &obj->base, handle);
+ }
+ 
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index 9ab7c1c758ae..72f1bb8b0499 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -1298,6 +1298,7 @@ static int gen8_emit_flush_render(struct intel_ringbuffer *ringbuf,
+ 	if (flush_domains) {
+ 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
++		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ 	}
+ 
+ 	if (invalidate_domains) {
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
+index 005b5e04de4d..b7e20dee64c4 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -342,6 +342,7 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
+ 	if (flush_domains) {
+ 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
++		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ 	}
+ 	if (invalidate_domains) {
+ 		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+@@ -412,6 +413,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
+ 	if (flush_domains) {
+ 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+ 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
++		flags |= PIPE_CONTROL_FLUSH_ENABLE;
+ 	}
+ 	if (invalidate_domains) {
+ 		flags |= PIPE_CONTROL_TLB_INVALIDATE;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 0e690bf19fc9..58c959265b1a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -227,11 +227,12 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
+ 	struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ 	struct nvkm_vma *vma;
+ 
+-	if (nvbo->bo.mem.mem_type == TTM_PL_TT)
++	if (is_power_of_2(nvbo->valid_domains))
++		rep->domain = nvbo->valid_domains;
++	else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
+ 		rep->domain = NOUVEAU_GEM_DOMAIN_GART;
+ 	else
+ 		rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
+-
+ 	rep->offset = nvbo->bo.offset;
+ 	if (cli->vm) {
+ 		vma = nouveau_bo_vma_find(nvbo, cli->vm);
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index b4ff4c134fbb..5be50ef2b30e 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -237,6 +237,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
+ 	backlight_update_status(bd);
+ 
+ 	DRM_INFO("radeon atom DIG backlight initialized\n");
++	rdev->mode_info.bl_encoder = radeon_encoder;
+ 
+ 	return;
+ 
+@@ -1624,9 +1625,14 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
+ 		} else
+ 			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ 		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+-			struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
++			if (rdev->mode_info.bl_encoder) {
++				struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ 
+-			atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
++				atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
++			} else {
++				args.ucAction = ATOM_LCD_BLON;
++				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
++			}
+ 		}
+ 		break;
+ 	case DRM_MODE_DPMS_STANDBY:
+@@ -1706,8 +1712,13 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ 			if (ASIC_IS_DCE4(rdev))
+ 				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+ 		}
+-		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+-			atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
++		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
++			if (rdev->mode_info.bl_encoder)
++				atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
++			else
++				atombios_dig_transmitter_setup(encoder,
++							       ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
++		}
+ 		if (ext_encoder)
+ 			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ 		break;
+diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
+index 46eb0fa75a61..91c3f60f8bac 100644
+--- a/drivers/gpu/drm/radeon/radeon.h
++++ b/drivers/gpu/drm/radeon/radeon.h
+@@ -1656,6 +1656,7 @@ struct radeon_pm {
+ 	u8                      fan_max_rpm;
+ 	/* dpm */
+ 	bool                    dpm_enabled;
++	bool                    sysfs_initialized;
+ 	struct radeon_dpm       dpm;
+ };
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
+index ef99917f000d..c6ee80216cf4 100644
+--- a/drivers/gpu/drm/radeon/radeon_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_encoders.c
+@@ -194,7 +194,6 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
+ 			radeon_atom_backlight_init(radeon_encoder, connector);
+ 		else
+ 			radeon_legacy_backlight_init(radeon_encoder, connector);
+-		rdev->mode_info.bl_encoder = radeon_encoder;
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+index 45715307db71..30de43366eae 100644
+--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+@@ -441,6 +441,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
+ 	backlight_update_status(bd);
+ 
+ 	DRM_INFO("radeon legacy LVDS backlight initialized\n");
++	rdev->mode_info.bl_encoder = radeon_encoder;
+ 
+ 	return;
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index 948c33105801..91764320c56f 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -720,10 +720,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
+ 	struct radeon_device *rdev = dev_get_drvdata(dev);
+ 	umode_t effective_mode = attr->mode;
+ 
+-	/* Skip limit attributes if DPM is not enabled */
++	/* Skip attributes if DPM is not enabled */
+ 	if (rdev->pm.pm_method != PM_METHOD_DPM &&
+ 	    (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
+-	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
++	     attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
++	     attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
++	     attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
++	     attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
++	     attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ 		return 0;
+ 
+ 	/* Skip fan attributes if fan is not present */
+@@ -1529,19 +1533,23 @@ int radeon_pm_late_init(struct radeon_device *rdev)
+ 
+ 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
+ 		if (rdev->pm.dpm_enabled) {
+-			ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+-			if (ret)
+-				DRM_ERROR("failed to create device file for dpm state\n");
+-			ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+-			if (ret)
+-				DRM_ERROR("failed to create device file for dpm state\n");
+-			/* XXX: these are noops for dpm but are here for backwards compat */
+-			ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+-			if (ret)
+-				DRM_ERROR("failed to create device file for power profile\n");
+-			ret = device_create_file(rdev->dev, &dev_attr_power_method);
+-			if (ret)
+-				DRM_ERROR("failed to create device file for power method\n");
++			if (!rdev->pm.sysfs_initialized) {
++				ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
++				if (ret)
++					DRM_ERROR("failed to create device file for dpm state\n");
++				ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
++				if (ret)
++					DRM_ERROR("failed to create device file for dpm state\n");
++				/* XXX: these are noops for dpm but are here for backwards compat */
++				ret = device_create_file(rdev->dev, &dev_attr_power_profile);
++				if (ret)
++					DRM_ERROR("failed to create device file for power profile\n");
++				ret = device_create_file(rdev->dev, &dev_attr_power_method);
++				if (ret)
++					DRM_ERROR("failed to create device file for power method\n");
++				if (!ret)
++					rdev->pm.sysfs_initialized = true;
++			}
+ 
+ 			mutex_lock(&rdev->pm.mutex);
+ 			ret = radeon_dpm_late_enable(rdev);
+@@ -1557,7 +1565,8 @@ int radeon_pm_late_init(struct radeon_device *rdev)
+ 			}
+ 		}
+ 	} else {
+-		if (rdev->pm.num_power_states > 1) {
++		if ((rdev->pm.num_power_states > 1) &&
++		    (!rdev->pm.sysfs_initialized)) {
+ 			/* where's the best place to put these? */
+ 			ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+ 			if (ret)
+@@ -1565,6 +1574,8 @@ int radeon_pm_late_init(struct radeon_device *rdev)
+ 			ret = device_create_file(rdev->dev, &dev_attr_power_method);
+ 			if (ret)
+ 				DRM_ERROR("failed to create device file for power method\n");
++			if (!ret)
++				rdev->pm.sysfs_initialized = true;
+ 		}
+ 	}
+ 	return ret;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+index 620bb5cf617c..15a8d7746fd2 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+@@ -1458,6 +1458,9 @@ static void __exit vmwgfx_exit(void)
+ 	drm_pci_exit(&driver, &vmw_pci_driver);
+ }
+ 
++MODULE_INFO(vmw_patch, "ed7d78b2");
++MODULE_INFO(vmw_patch, "54c12bc3");
++
+ module_init(vmwgfx_init);
+ module_exit(vmwgfx_exit);
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+index d26a6daa9719..d8896ed41b9e 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+@@ -636,7 +636,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ 				 uint32_t size,
+ 				 bool shareable,
+ 				 uint32_t *handle,
+-				 struct vmw_dma_buffer **p_dma_buf);
++				 struct vmw_dma_buffer **p_dma_buf,
++				 struct ttm_base_object **p_base);
+ extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+ 				     struct vmw_dma_buffer *dma_buf,
+ 				     uint32_t *handle);
+@@ -650,7 +651,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
+ 					 uint32_t cur_validate_node);
+ extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
+ extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+-				  uint32_t id, struct vmw_dma_buffer **out);
++				  uint32_t id, struct vmw_dma_buffer **out,
++				  struct ttm_base_object **base);
+ extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
+ 				  struct drm_file *file_priv);
+ extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 97ad3bcb99a7..aee1c6ccc52d 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -887,7 +887,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
+ 	struct vmw_relocation *reloc;
+ 	int ret;
+ 
+-	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
++	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
++				     NULL);
+ 	if (unlikely(ret != 0)) {
+ 		DRM_ERROR("Could not find or use MOB buffer.\n");
+ 		ret = -EINVAL;
+@@ -949,7 +950,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
+ 	struct vmw_relocation *reloc;
+ 	int ret;
+ 
+-	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
++	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
++				     NULL);
+ 	if (unlikely(ret != 0)) {
+ 		DRM_ERROR("Could not find or use GMR region.\n");
+ 		ret = -EINVAL;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+index 87e39f68e9d0..e1898982b44a 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
+ 		goto out_unlock;
+ 	}
+ 
+-	ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
++	ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
+ 	if (ret)
+ 		goto out_unlock;
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+index 210ef15b1d09..c5b4c47e86d6 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+@@ -356,7 +356,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ 	}
+ 
+ 	*out_surf = NULL;
+-	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
++	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
+ 	return ret;
+ }
+ 
+@@ -483,7 +483,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ 			  uint32_t size,
+ 			  bool shareable,
+ 			  uint32_t *handle,
+-			  struct vmw_dma_buffer **p_dma_buf)
++			  struct vmw_dma_buffer **p_dma_buf,
++			  struct ttm_base_object **p_base)
+ {
+ 	struct vmw_user_dma_buffer *user_bo;
+ 	struct ttm_buffer_object *tmp;
+@@ -517,6 +518,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ 	}
+ 
+ 	*p_dma_buf = &user_bo->dma;
++	if (p_base) {
++		*p_base = &user_bo->prime.base;
++		kref_get(&(*p_base)->refcount);
++	}
+ 	*handle = user_bo->prime.base.hash.key;
+ 
+ out_no_base_object:
+@@ -633,6 +638,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ 	struct vmw_dma_buffer *dma_buf;
+ 	struct vmw_user_dma_buffer *user_bo;
+ 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
++	struct ttm_base_object *buffer_base;
+ 	int ret;
+ 
+ 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+@@ -645,7 +651,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ 
+ 	switch (arg->op) {
+ 	case drm_vmw_synccpu_grab:
+-		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
++		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
++					     &buffer_base);
+ 		if (unlikely(ret != 0))
+ 			return ret;
+ 
+@@ -653,6 +660,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ 				       dma);
+ 		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
+ 		vmw_dmabuf_unreference(&dma_buf);
++		ttm_base_object_unref(&buffer_base);
+ 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+ 			     ret != -EBUSY)) {
+ 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+@@ -694,7 +702,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
+ 		return ret;
+ 
+ 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+-				    req->size, false, &handle, &dma_buf);
++				    req->size, false, &handle, &dma_buf,
++				    NULL);
+ 	if (unlikely(ret != 0))
+ 		goto out_no_dmabuf;
+ 
+@@ -723,7 +732,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
+ }
+ 
+ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+-			   uint32_t handle, struct vmw_dma_buffer **out)
++			   uint32_t handle, struct vmw_dma_buffer **out,
++			   struct ttm_base_object **p_base)
+ {
+ 	struct vmw_user_dma_buffer *vmw_user_bo;
+ 	struct ttm_base_object *base;
+@@ -745,7 +755,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
+ 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ 				   prime.base);
+ 	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
+-	ttm_base_object_unref(&base);
++	if (p_base)
++		*p_base = base;
++	else
++		ttm_base_object_unref(&base);
+ 	*out = &vmw_user_bo->dma;
+ 
+ 	return 0;
+@@ -1006,7 +1019,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
+ 
+ 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ 				    args->size, false, &args->handle,
+-				    &dma_buf);
++				    &dma_buf, NULL);
+ 	if (unlikely(ret != 0))
+ 		goto out_no_dmabuf;
+ 
+@@ -1034,7 +1047,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
+ 	struct vmw_dma_buffer *out_buf;
+ 	int ret;
+ 
+-	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
++	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
+ 	if (ret != 0)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+index 6a4584a43aa6..d2751ada19b1 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+@@ -470,7 +470,7 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
+ 
+ 	if (arg->buffer_handle != SVGA3D_INVALID_ID) {
+ 		ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle,
+-					     &buffer);
++					     &buffer, NULL);
+ 		if (unlikely(ret != 0)) {
+ 			DRM_ERROR("Could not find buffer for shader "
+ 				  "creation.\n");
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+index 4ecdbf3e59da..17a4107639b2 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+@@ -43,6 +43,7 @@ struct vmw_user_surface {
+ 	struct vmw_surface srf;
+ 	uint32_t size;
+ 	struct drm_master *master;
++	struct ttm_base_object *backup_base;
+ };
+ 
+ /**
+@@ -652,6 +653,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+ 	struct vmw_resource *res = &user_srf->srf.res;
+ 
+ 	*p_base = NULL;
++	if (user_srf->backup_base)
++		ttm_base_object_unref(&user_srf->backup_base);
+ 	vmw_resource_unreference(&res);
+ }
+ 
+@@ -846,7 +849,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
+ 					    res->backup_size,
+ 					    true,
+ 					    &backup_handle,
+-					    &res->backup);
++					    &res->backup,
++					    &user_srf->backup_base);
+ 		if (unlikely(ret != 0)) {
+ 			vmw_resource_unreference(&res);
+ 			goto out_unlock;
+@@ -1309,7 +1313,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+ 
+ 	if (req->buffer_handle != SVGA3D_INVALID_ID) {
+ 		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
+-					     &res->backup);
++					     &res->backup,
++					     &user_srf->backup_base);
+ 	} else if (req->drm_surface_flags &
+ 		   drm_vmw_surface_flag_create_buffer)
+ 		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+@@ -1317,7 +1322,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
+ 					    req->drm_surface_flags &
+ 					    drm_vmw_surface_flag_shareable,
+ 					    &backup_handle,
+-					    &res->backup);
++					    &res->backup,
++					    &user_srf->backup_base);
+ 
+ 	if (unlikely(ret != 0)) {
+ 		vmw_resource_unreference(&res);
+diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
+index 30059c1df2a3..5801227b97ab 100644
+--- a/drivers/i2c/busses/i2c-mv64xxx.c
++++ b/drivers/i2c/busses/i2c-mv64xxx.c
+@@ -669,8 +669,6 @@ mv64xxx_i2c_can_offload(struct mv64xxx_i2c_data *drv_data)
+ 	struct i2c_msg *msgs = drv_data->msgs;
+ 	int num = drv_data->num_msgs;
+ 
+-	return false;
+-
+ 	if (!drv_data->offload_enabled)
+ 		return false;
+ 
+diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
+index 211b13271c61..2ae7150442fc 100644
+--- a/drivers/iio/accel/st_accel_core.c
++++ b/drivers/iio/accel/st_accel_core.c
+@@ -149,8 +149,6 @@
+ #define ST_ACCEL_4_BDU_MASK			0x40
+ #define ST_ACCEL_4_DRDY_IRQ_ADDR		0x21
+ #define ST_ACCEL_4_DRDY_IRQ_INT1_MASK		0x04
+-#define ST_ACCEL_4_IG1_EN_ADDR			0x21
+-#define ST_ACCEL_4_IG1_EN_MASK			0x08
+ #define ST_ACCEL_4_MULTIREAD_BIT		true
+ 
+ static const struct iio_chan_spec st_accel_12bit_channels[] = {
+@@ -446,10 +444,6 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
+ 		.drdy_irq = {
+ 			.addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
+ 			.mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
+-			.ig1 = {
+-				.en_addr = ST_ACCEL_4_IG1_EN_ADDR,
+-				.en_mask = ST_ACCEL_4_IG1_EN_MASK,
+-			},
+ 		},
+ 		.multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
+ 		.bootime = 2, /* guess */
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 0271608a51c4..0962b6821ce1 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -859,6 +859,11 @@ retest:
+ 	case IB_CM_SIDR_REQ_RCVD:
+ 		spin_unlock_irq(&cm_id_priv->lock);
+ 		cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
++		spin_lock_irq(&cm.lock);
++		if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node))
++			rb_erase(&cm_id_priv->sidr_id_node,
++				 &cm.remote_sidr_table);
++		spin_unlock_irq(&cm.lock);
+ 		break;
+ 	case IB_CM_REQ_SENT:
+ 	case IB_CM_MRA_REQ_RCVD:
+@@ -3098,7 +3103,10 @@ int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
+ 	spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ 
+ 	spin_lock_irqsave(&cm.lock, flags);
+-	rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
++	if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
++		rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
++		RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
++	}
+ 	spin_unlock_irqrestore(&cm.lock, flags);
+ 	return 0;
+ 
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index bc7eed67998a..4b9e31a5b3f8 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -100,7 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
+ #define ALPS_FOUR_BUTTONS	0x40	/* 4 direction button present */
+ #define ALPS_PS2_INTERLEAVED	0x80	/* 3-byte PS/2 packet interleaved with
+ 					   6-byte ALPS packet */
+-#define ALPS_DELL		0x100	/* device is a Dell laptop */
++#define ALPS_STICK_BITS		0x100	/* separate stick button bits */
+ #define ALPS_BUTTONPAD		0x200	/* device is a clickpad */
+ 
+ static const struct alps_model_info alps_model_data[] = {
+@@ -159,6 +159,43 @@ static const struct alps_protocol_info alps_v8_protocol_data = {
+ 	ALPS_PROTO_V8, 0x18, 0x18, 0
+ };
+ 
++/*
++ * Some v2 models report the stick buttons in separate bits
++ */
++static const struct dmi_system_id alps_dmi_has_separate_stick_buttons[] = {
++#if defined(CONFIG_DMI) && defined(CONFIG_X86)
++	{
++		/* Extrapolated from other entries */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D420"),
++		},
++	},
++	{
++		/* Reported-by: Hans de Bruin <jmdebruin@xmsnet.nl> */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D430"),
++		},
++	},
++	{
++		/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D620"),
++		},
++	},
++	{
++		/* Extrapolated from other entries */
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Latitude D630"),
++		},
++	},
++#endif
++	{ }
++};
++
+ static void alps_set_abs_params_st(struct alps_data *priv,
+ 				   struct input_dev *dev1);
+ static void alps_set_abs_params_mt(struct alps_data *priv,
+@@ -253,9 +290,8 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
+ 		return;
+ 	}
+ 
+-	/* Dell non interleaved V2 dualpoint has separate stick button bits */
+-	if (priv->proto_version == ALPS_PROTO_V2 &&
+-	    priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
++	/* Some models have separate stick button bits */
++	if (priv->flags & ALPS_STICK_BITS) {
+ 		left |= packet[0] & 1;
+ 		right |= packet[0] & 2;
+ 		middle |= packet[0] & 4;
+@@ -2544,8 +2580,6 @@ static int alps_set_protocol(struct psmouse *psmouse,
+ 	priv->byte0 = protocol->byte0;
+ 	priv->mask0 = protocol->mask0;
+ 	priv->flags = protocol->flags;
+-	if (dmi_name_in_vendors("Dell"))
+-		priv->flags |= ALPS_DELL;
+ 
+ 	priv->x_max = 2000;
+ 	priv->y_max = 1400;
+@@ -2560,6 +2594,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
+ 		priv->set_abs_params = alps_set_abs_params_st;
+ 		priv->x_max = 1023;
+ 		priv->y_max = 767;
++		if (dmi_check_system(alps_dmi_has_separate_stick_buttons))
++			priv->flags |= ALPS_STICK_BITS;
+ 		break;
+ 
+ 	case ALPS_PROTO_V3:
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index ca9f4edbb940..f0fd5352f8ef 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2099,8 +2099,8 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
+ static void clear_dte_entry(u16 devid)
+ {
+ 	/* remove entry from the device table seen by the hardware */
+-	amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
+-	amd_iommu_dev_table[devid].data[1] = 0;
++	amd_iommu_dev_table[devid].data[0]  = IOMMU_PTE_P | IOMMU_PTE_TV;
++	amd_iommu_dev_table[devid].data[1] &= DTE_FLAG_MASK;
+ 
+ 	amd_iommu_apply_erratum_63(devid);
+ }
+diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
+index 05030e523771..cbfd0f4c4608 100644
+--- a/drivers/iommu/amd_iommu_types.h
++++ b/drivers/iommu/amd_iommu_types.h
+@@ -295,6 +295,7 @@
+ #define IOMMU_PTE_IR (1ULL << 61)
+ #define IOMMU_PTE_IW (1ULL << 62)
+ 
++#define DTE_FLAG_MASK	(0x3ffULL << 32)
+ #define DTE_FLAG_IOTLB	(0x01UL << 32)
+ #define DTE_FLAG_GV	(0x01ULL << 55)
+ #define DTE_GLX_SHIFT	(56)
+diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
+index 3465faf1809e..45087c3e5c57 100644
+--- a/drivers/iommu/amd_iommu_v2.c
++++ b/drivers/iommu/amd_iommu_v2.c
+@@ -508,6 +508,13 @@ static void do_fault(struct work_struct *work)
+ 		goto out;
+ 	}
+ 
++	if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) {
++		/* handle_mm_fault would BUG_ON() */
++		up_read(&mm->mmap_sem);
++		handle_fault_error(fault);
++		goto out;
++	}
++
+ 	ret = handle_mm_fault(mm, vma, address, write);
+ 	if (ret & VM_FAULT_ERROR) {
+ 		/* failed to service fault */
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index c23427951ec1..8b0178db6a04 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -2033,15 +2033,19 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
+ 				return -ENOMEM;
+ 			/* It is large page*/
+ 			if (largepage_lvl > 1) {
++				unsigned long nr_superpages, end_pfn;
++
+ 				pteval |= DMA_PTE_LARGE_PAGE;
+ 				lvl_pages = lvl_to_nr_pages(largepage_lvl);
++
++				nr_superpages = sg_res / lvl_pages;
++				end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
++
+ 				/*
+ 				 * Ensure that old small page tables are
+-				 * removed to make room for superpage,
+-				 * if they exist.
++				 * removed to make room for superpage(s).
+ 				 */
+-				dma_pte_free_pagetable(domain, iov_pfn,
+-						       iov_pfn + lvl_pages - 1);
++				dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
+ 			} else {
+ 				pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
+ 			}
+diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
+index f67bbd80433e..ab5353a96a82 100644
+--- a/drivers/irqchip/irq-tegra.c
++++ b/drivers/irqchip/irq-tegra.c
+@@ -215,6 +215,7 @@ static struct irq_chip tegra_ictlr_chip = {
+ 	.irq_unmask		= tegra_unmask,
+ 	.irq_retrigger		= tegra_retrigger,
+ 	.irq_set_wake		= tegra_set_wake,
++	.irq_set_type		= irq_chip_set_type_parent,
+ 	.flags			= IRQCHIP_MASK_ON_SUSPEND,
+ #ifdef CONFIG_SMP
+ 	.irq_set_affinity	= irq_chip_set_affinity_parent,
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index e8c44fcb1ad1..78c1f77e7903 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8013,8 +8013,7 @@ static int remove_and_add_spares(struct mddev *mddev,
+ 		       !test_bit(Bitmap_sync, &rdev->flags)))
+ 			continue;
+ 
+-		if (rdev->saved_raid_disk < 0)
+-			rdev->recovery_offset = 0;
++		rdev->recovery_offset = 0;
+ 		if (mddev->pers->
+ 		    hot_add_disk(mddev, rdev) == 0) {
+ 			if (sysfs_link_rdev(mddev, rdev))
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index 7c0d75547ccf..92cd09f3c69b 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -301,11 +301,16 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ {
+ 	int s;
+ 	uint32_t max_entries = le32_to_cpu(left->header.max_entries);
+-	unsigned target = (nr_left + nr_center + nr_right) / 3;
+-	BUG_ON(target > max_entries);
++	unsigned total = nr_left + nr_center + nr_right;
++	unsigned target_right = total / 3;
++	unsigned remainder = (target_right * 3) != total;
++	unsigned target_left = target_right + remainder;
++
++	BUG_ON(target_left > max_entries);
++	BUG_ON(target_right > max_entries);
+ 
+ 	if (nr_left < nr_right) {
+-		s = nr_left - target;
++		s = nr_left - target_left;
+ 
+ 		if (s < 0 && nr_center < -s) {
+ 			/* not enough in central node */
+@@ -316,10 +321,10 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ 		} else
+ 			shift(left, center, s);
+ 
+-		shift(center, right, target - nr_right);
++		shift(center, right, target_right - nr_right);
+ 
+ 	} else {
+-		s = target - nr_right;
++		s = target_right - nr_right;
+ 		if (s > 0 && nr_center < s) {
+ 			/* not enough in central node */
+ 			shift(center, right, nr_center);
+@@ -329,7 +334,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ 		} else
+ 			shift(center, right, s);
+ 
+-		shift(left, center, nr_left - target);
++		shift(left, center, nr_left - target_left);
+ 	}
+ 
+ 	*key_ptr(parent, c->index) = center->keys[0];
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index c7726cebc495..d6e47033b5e0 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -523,7 +523,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
+ 
+ 	r = new_block(s->info, &right);
+ 	if (r < 0) {
+-		/* FIXME: put left */
++		unlock_block(s->info, left);
+ 		return r;
+ 	}
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 5ce3cd5c4e1d..bff6c1c7fecb 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2248,7 +2248,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
+ 		bio_trim(wbio, sector - r1_bio->sector, sectors);
+ 		wbio->bi_iter.bi_sector += rdev->data_offset;
+ 		wbio->bi_bdev = rdev->bdev;
+-		if (submit_bio_wait(WRITE, wbio) == 0)
++		if (submit_bio_wait(WRITE, wbio) < 0)
+ 			/* failure! */
+ 			ok = rdev_set_badblocks(rdev, sector,
+ 						sectors, 0)
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index fe0122771642..adfc83a0f023 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2590,7 +2590,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
+ 				   choose_data_offset(r10_bio, rdev) +
+ 				   (sector - r10_bio->sector));
+ 		wbio->bi_bdev = rdev->bdev;
+-		if (submit_bio_wait(WRITE, wbio) == 0)
++		if (submit_bio_wait(WRITE, wbio) < 0)
+ 			/* Failure! */
+ 			ok = rdev_set_badblocks(rdev, sector,
+ 						sectors, 0)
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index 23af6772f146..0d767e31f455 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3494,6 +3494,7 @@ returnbi:
+ 		}
+ 	if (!discard_pending &&
+ 	    test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) {
++		int hash;
+ 		clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags);
+ 		clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags);
+ 		if (sh->qd_idx >= 0) {
+@@ -3507,16 +3508,17 @@ returnbi:
+ 		 * no updated data, so remove it from hash list and the stripe
+ 		 * will be reinitialized
+ 		 */
+-		spin_lock_irq(&conf->device_lock);
+ unhash:
++		hash = sh->hash_lock_index;
++		spin_lock_irq(conf->hash_locks + hash);
+ 		remove_hash(sh);
++		spin_unlock_irq(conf->hash_locks + hash);
+ 		if (head_sh->batch_head) {
+ 			sh = list_first_entry(&sh->batch_list,
+ 					      struct stripe_head, batch_list);
+ 			if (sh != head_sh)
+ 					goto unhash;
+ 		}
+-		spin_unlock_irq(&conf->device_lock);
+ 		sh = head_sh;
+ 
+ 		if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state))
+diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
+index 5db588ebfc24..391e98395b41 100644
+--- a/drivers/media/dvb-frontends/si2168.c
++++ b/drivers/media/dvb-frontends/si2168.c
+@@ -457,6 +457,10 @@ static int si2168_init(struct dvb_frontend *fe)
+ 		/* firmware is in the new format */
+ 		for (remaining = fw->size; remaining > 0; remaining -= 17) {
+ 			len = fw->data[fw->size - remaining];
++			if (len > SI2168_ARGLEN) {
++				ret = -EINVAL;
++				break;
++			}
+ 			memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
+ 			cmd.wlen = len;
+ 			cmd.rlen = 1;
+diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
+index d74ae26621ca..c5dbba5b5bc9 100644
+--- a/drivers/media/tuners/si2157.c
++++ b/drivers/media/tuners/si2157.c
+@@ -165,6 +165,10 @@ static int si2157_init(struct dvb_frontend *fe)
+ 
+ 	for (remaining = fw->size; remaining > 0; remaining -= 17) {
+ 		len = fw->data[fw->size - remaining];
++		if (len > SI2157_ARGLEN) {
++			dev_err(&client->dev, "Bad firmware length\n");
++			goto err_release_firmware;
++		}
+ 		memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
+ 		cmd.wlen = len;
+ 		cmd.rlen = 1;
+diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+index 895441fe90f7..e862554952c1 100644
+--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
++++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+@@ -34,6 +34,14 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
+ 	unsigned int pipe;
+ 	u8 requesttype;
+ 
++	mutex_lock(&d->usb_mutex);
++
++	if (req->size > sizeof(dev->buf)) {
++		dev_err(&d->intf->dev, "too large message %u\n", req->size);
++		ret = -EINVAL;
++		goto err_mutex_unlock;
++	}
++
+ 	if (req->index & CMD_WR_FLAG) {
+ 		/* write */
+ 		memcpy(dev->buf, req->data, req->size);
+@@ -50,14 +58,17 @@ static int rtl28xxu_ctrl_msg(struct dvb_usb_device *d, struct rtl28xxu_req *req)
+ 	dvb_usb_dbg_usb_control_msg(d->udev, 0, requesttype, req->value,
+ 			req->index, dev->buf, req->size);
+ 	if (ret < 0)
+-		goto err;
++		goto err_mutex_unlock;
+ 
+ 	/* read request, copy returned data to return buf */
+ 	if (requesttype == (USB_TYPE_VENDOR | USB_DIR_IN))
+ 		memcpy(req->data, dev->buf, req->size);
+ 
++	mutex_unlock(&d->usb_mutex);
++
+ 	return 0;
+-err:
++err_mutex_unlock:
++	mutex_unlock(&d->usb_mutex);
+ 	dev_dbg(&d->intf->dev, "failed=%d\n", ret);
+ 	return ret;
+ }
+diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+index 1b5d7ffb685e..1bdeda05d332 100644
+--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
++++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.h
+@@ -69,7 +69,7 @@
+ 
+ 
+ struct rtl28xxu_dev {
+-	u8 buf[28];
++	u8 buf[128];
+ 	u8 chip_id;
+ 	u8 tuner;
+ 	char *tuner_name;
+diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
+index f8d11efa7b0f..46a389c20bfc 100644
+--- a/drivers/net/wireless/ath/ath9k/init.c
++++ b/drivers/net/wireless/ath/ath9k/init.c
+@@ -874,6 +874,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+ 	hw->max_rate_tries = 10;
+ 	hw->sta_data_size = sizeof(struct ath_node);
+ 	hw->vif_data_size = sizeof(struct ath_vif);
++	hw->extra_tx_headroom = 4;
+ 
+ 	hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
+ 	hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
+diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
+index 1d2223df5cb0..e7d3566c714b 100644
+--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
++++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
+@@ -1022,7 +1022,7 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+ 			u8 *pn = seq.ccmp.pn;
+ 
+ 			ieee80211_get_key_rx_seq(key, i, &seq);
+-			aes_sc->pn = cpu_to_le64(
++			aes_sc[i].pn = cpu_to_le64(
+ 					(u64)pn[5] |
+ 					((u64)pn[4] << 8) |
+ 					((u64)pn[3] << 16) |
+diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
+index 74ad278116be..fd83e30eaf00 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
++++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
+@@ -325,6 +325,6 @@ const struct iwl_cfg iwl7265d_n_cfg = {
+ };
+ 
+ MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+-MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
++MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+ MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+ MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
+index 4310cf102d78..89d6a6100c88 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
++++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
+@@ -298,12 +298,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+ 			u8 *pn = seq.ccmp.pn;
+ 
+ 			ieee80211_get_key_rx_seq(key, i, &seq);
+-			aes_sc->pn = cpu_to_le64((u64)pn[5] |
+-						 ((u64)pn[4] << 8) |
+-						 ((u64)pn[3] << 16) |
+-						 ((u64)pn[2] << 24) |
+-						 ((u64)pn[1] << 32) |
+-						 ((u64)pn[0] << 40));
++			aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
++						   ((u64)pn[4] << 8) |
++						   ((u64)pn[3] << 16) |
++						   ((u64)pn[2] << 24) |
++						   ((u64)pn[1] << 32) |
++						   ((u64)pn[0] << 40));
+ 		}
+ 		data->use_rsc_tsc = true;
+ 		break;
+diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
+index df869633f4dd..1e1c77a59760 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
+@@ -364,7 +364,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+ 	 * abort after reading the nvm in case RF Kill is on, we will complete
+ 	 * the init seq later when RF kill will switch to off
+ 	 */
+-	if (iwl_mvm_is_radio_killed(mvm)) {
++	if (iwl_mvm_is_radio_hw_killed(mvm)) {
+ 		IWL_DEBUG_RF_KILL(mvm,
+ 				  "jump over all phy activities due to RF kill\n");
+ 		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
+@@ -397,7 +397,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+ 	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
+ 			MVM_UCODE_CALIB_TIMEOUT);
+ 
+-	if (ret && iwl_mvm_is_radio_killed(mvm)) {
++	if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
+ 		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
+ 		ret = 1;
+ 	}
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index 60c138a9bf4f..9779c1e5688c 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -2277,6 +2277,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
+ 		iwl_mvm_remove_time_event(mvm, mvmvif,
+ 					  &mvmvif->time_event_data);
+ 		RCU_INIT_POINTER(mvm->csa_vif, NULL);
++		mvmvif->csa_countdown = false;
+ 	}
+ 
+ 	if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
+index 6af21daaaaef..83273adfabdd 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
+@@ -870,6 +870,11 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
+ 	       test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+ }
+ 
++static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
++{
++	return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
++}
++
+ /* Must be called with rcu_read_lock() held and it can only be
+  * released when mvmsta is not needed anymore.
+  */
+diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
+index 2ea01238754e..8d4f287dca3b 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
++++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
+@@ -589,6 +589,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ 	ieee80211_unregister_hw(mvm->hw);
+ 	iwl_mvm_leds_exit(mvm);
+  out_free:
++	flush_delayed_work(&mvm->fw_dump_wk);
+ 	iwl_phy_db_free(mvm->phy_db);
+ 	kfree(mvm->scan_cmd);
+ 	if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
+diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
+index b18569734922..8b16949a9cb9 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
++++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
+@@ -412,6 +412,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
+ 	{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
+ 	{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
++	{IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
+ 
+ /* 8000 Series */
+ 	{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
+index d4567d12e07e..5da6703942d9 100644
+--- a/drivers/net/wireless/rtlwifi/pci.h
++++ b/drivers/net/wireless/rtlwifi/pci.h
+@@ -247,6 +247,8 @@ struct rtl_pci {
+ 	/* MSI support */
+ 	bool msi_support;
+ 	bool using_msi;
++	/* interrupt clear before set */
++	bool int_clear;
+ };
+ 
+ struct mp_adapter {
+diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+index 3fa2fb7c8e4e..76e52dfb2be5 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+@@ -2253,11 +2253,28 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
+ 	}
+ }
+ 
++static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
++{
++	struct rtl_priv *rtlpriv = rtl_priv(hw);
++	u32 tmp = rtl_read_dword(rtlpriv, REG_HISR);
++
++	rtl_write_dword(rtlpriv, REG_HISR, tmp);
++
++	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
++	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
++
++	tmp = rtl_read_dword(rtlpriv, REG_HSISR);
++	rtl_write_dword(rtlpriv, REG_HSISR, tmp);
++}
++
+ void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 
++	if (!rtlpci->int_clear)
++		rtl8821ae_clear_interrupt(hw);/*clear it here first*/
++
+ 	rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ 	rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ 	rtlpci->irq_enabled = true;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
+index a4988121e1ab..8ee141a55bc5 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/sw.c
+@@ -96,6 +96,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+ 
+ 	rtl8821ae_bt_reg_init(hw);
+ 	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++	rtlpci->int_clear = rtlpriv->cfg->mod_params->int_clear;
+ 	rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
+ 
+ 	rtlpriv->dm.dm_initialgain_enable = 1;
+@@ -167,6 +168,7 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw)
+ 	rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ 	rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+ 	rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
++	rtlpci->msi_support = rtlpriv->cfg->mod_params->int_clear;
+ 	if (rtlpriv->cfg->mod_params->disable_watchdog)
+ 		pr_info("watchdog disabled\n");
+ 	rtlpriv->psc.reg_fwctrl_lps = 3;
+@@ -308,6 +310,7 @@ static struct rtl_mod_params rtl8821ae_mod_params = {
+ 	.swctrl_lps = false,
+ 	.fwctrl_lps = true,
+ 	.msi_support = true,
++	.int_clear = true,
+ 	.debug = DBG_EMERG,
+ 	.disable_watchdog = 0,
+ };
+@@ -437,6 +440,7 @@ module_param_named(fwlps, rtl8821ae_mod_params.fwctrl_lps, bool, 0444);
+ module_param_named(msi, rtl8821ae_mod_params.msi_support, bool, 0444);
+ module_param_named(disable_watchdog, rtl8821ae_mod_params.disable_watchdog,
+ 		   bool, 0444);
++module_param_named(int_clear, rtl8821ae_mod_params.int_clear, bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+ MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
+ MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+@@ -444,6 +448,7 @@ MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
+ MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 1)\n");
+ MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
+ MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
++MODULE_PARM_DESC(int_clear, "Set to 1 to disable interrupt clear before set (default 0)\n");
+ 
+ static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
+ 
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index 51572912c53d..f1fa8100f288 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -2233,6 +2233,9 @@ struct rtl_mod_params {
+ 
+ 	/* default 0: 1 means disable */
+ 	bool disable_watchdog;
++
++	/* default 0: 1 means do not disable interrupts */
++	bool int_clear;
+ };
+ 
+ struct rtl_hal_usbint_cfg {
+diff --git a/drivers/of/platform.c b/drivers/of/platform.c
+index a01f57c9e34e..ddf8e42c9367 100644
+--- a/drivers/of/platform.c
++++ b/drivers/of/platform.c
+@@ -25,6 +25,7 @@
+ 
+ const struct of_device_id of_default_bus_match_table[] = {
+ 	{ .compatible = "simple-bus", },
++	{ .compatible = "simple-mfd", },
+ #ifdef CONFIG_ARM_AMBA
+ 	{ .compatible = "arm,amba-bus", },
+ #endif /* CONFIG_ARM_AMBA */
+diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
+index 312f23a8429c..92618686604c 100644
+--- a/drivers/pci/pci-sysfs.c
++++ b/drivers/pci/pci-sysfs.c
+@@ -216,7 +216,7 @@ static ssize_t numa_node_store(struct device *dev,
+ 	if (ret)
+ 		return ret;
+ 
+-	if (!node_online(node))
++	if (node >= MAX_NUMNODES || !node_online(node))
+ 		return -EINVAL;
+ 
+ 	add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index 2062c224e32f..b2602210784d 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -146,7 +146,7 @@ struct byt_gpio_pin_context {
+ struct byt_gpio {
+ 	struct gpio_chip		chip;
+ 	struct platform_device		*pdev;
+-	spinlock_t			lock;
++	raw_spinlock_t			lock;
+ 	void __iomem			*reg_base;
+ 	struct pinctrl_gpio_range	*range;
+ 	struct byt_gpio_pin_context	*saved_context;
+@@ -174,11 +174,11 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned offset)
+ 	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&vg->lock, flags);
++	raw_spin_lock_irqsave(&vg->lock, flags);
+ 	value = readl(reg);
+ 	value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
+ 	writel(value, reg);
+-	spin_unlock_irqrestore(&vg->lock, flags);
++	raw_spin_unlock_irqrestore(&vg->lock, flags);
+ }
+ 
+ static u32 byt_get_gpio_mux(struct byt_gpio *vg, unsigned offset)
+@@ -201,6 +201,9 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
+ 	struct byt_gpio *vg = to_byt_gpio(chip);
+ 	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_CONF0_REG);
+ 	u32 value, gpio_mux;
++	unsigned long flags;
++
++	raw_spin_lock_irqsave(&vg->lock, flags);
+ 
+ 	/*
+ 	 * In most cases, func pin mux 000 means GPIO function.
+@@ -214,18 +217,16 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
+ 	value = readl(reg) & BYT_PIN_MUX;
+ 	gpio_mux = byt_get_gpio_mux(vg, offset);
+ 	if (WARN_ON(gpio_mux != value)) {
+-		unsigned long flags;
+-
+-		spin_lock_irqsave(&vg->lock, flags);
+ 		value = readl(reg) & ~BYT_PIN_MUX;
+ 		value |= gpio_mux;
+ 		writel(value, reg);
+-		spin_unlock_irqrestore(&vg->lock, flags);
+ 
+ 		dev_warn(&vg->pdev->dev,
+ 			 "pin %u forcibly re-configured as GPIO\n", offset);
+ 	}
+ 
++	raw_spin_unlock_irqrestore(&vg->lock, flags);
++
+ 	pm_runtime_get(&vg->pdev->dev);
+ 
+ 	return 0;
+@@ -250,7 +251,7 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
+ 	if (offset >= vg->chip.ngpio)
+ 		return -EINVAL;
+ 
+-	spin_lock_irqsave(&vg->lock, flags);
++	raw_spin_lock_irqsave(&vg->lock, flags);
+ 	value = readl(reg);
+ 
+ 	WARN(value & BYT_DIRECT_IRQ_EN,
+@@ -269,7 +270,7 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
+ 	else if (type & IRQ_TYPE_LEVEL_MASK)
+ 		__irq_set_handler_locked(d->irq, handle_level_irq);
+ 
+-	spin_unlock_irqrestore(&vg->lock, flags);
++	raw_spin_unlock_irqrestore(&vg->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -277,7 +278,15 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
+ static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
+ {
+ 	void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
+-	return readl(reg) & BYT_LEVEL;
++	struct byt_gpio *vg = to_byt_gpio(chip);
++	unsigned long flags;
++	u32 val;
++
++	raw_spin_lock_irqsave(&vg->lock, flags);
++	val = readl(reg);
++	raw_spin_unlock_irqrestore(&vg->lock, flags);
++
++	return val & BYT_LEVEL;
+ }
+ 
+ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+@@ -287,7 +296,7 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ 	unsigned long flags;
+ 	u32 old_val;
+ 
+-	spin_lock_irqsave(&vg->lock, flags);
++	raw_spin_lock_irqsave(&vg->lock, flags);
+ 
+ 	old_val = readl(reg);
+ 
+@@ -296,7 +305,7 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+ 	else
+ 		writel(old_val & ~BYT_LEVEL, reg);
+ 
+-	spin_unlock_irqrestore(&vg->lock, flags);
++	raw_spin_unlock_irqrestore(&vg->lock, flags);
+ }
+ 
+ static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+@@ -306,13 +315,13 @@ static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+ 	unsigned long flags;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&vg->lock, flags);
++	raw_spin_lock_irqsave(&vg->lock, flags);
+ 
+ 	value = readl(reg) | BYT_DIR_MASK;
+ 	value &= ~BYT_INPUT_EN;		/* active low */
+ 	writel(value, reg);
+ 
+-	spin_unlock_irqrestore(&vg->lock, flags);
++	raw_spin_unlock_irqrestore(&vg->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -326,7 +335,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
+ 	unsigned long flags;
+ 	u32 reg_val;
+ 
+-	spin_lock_irqsave(&vg->lock, flags);
++	raw_spin_lock_irqsave(&vg->lock, flags);
+ 
+ 	/*
+ 	 * Before making any direction modifications, do a check if gpio
+@@ -345,7 +354,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
+ 	else
+ 		writel(reg_val & ~BYT_LEVEL, reg);
+ 
+-	spin_unlock_irqrestore(&vg->lock, flags);
++	raw_spin_unlock_irqrestore(&vg->lock, flags);
+ 
+ 	return 0;
+ }
+@@ -354,18 +363,19 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+ {
+ 	struct byt_gpio *vg = to_byt_gpio(chip);
+ 	int i;
+-	unsigned long flags;
+ 	u32 conf0, val, offs;
+ 
+-	spin_lock_irqsave(&vg->lock, flags);
+-
+ 	for (i = 0; i < vg->chip.ngpio; i++) {
+ 		const char *pull_str = NULL;
+ 		const char *pull = NULL;
++		unsigned long flags;
+ 		const char *label;
+ 		offs = vg->range->pins[i] * 16;
++
++		raw_spin_lock_irqsave(&vg->lock, flags);
+ 		conf0 = readl(vg->reg_base + offs + BYT_CONF0_REG);
+ 		val = readl(vg->reg_base + offs + BYT_VAL_REG);
++		raw_spin_unlock_irqrestore(&vg->lock, flags);
+ 
+ 		label = gpiochip_is_requested(chip, i);
+ 		if (!label)
+@@ -418,7 +428,6 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+ 
+ 		seq_puts(s, "\n");
+ 	}
+-	spin_unlock_irqrestore(&vg->lock, flags);
+ }
+ 
+ static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+@@ -450,8 +459,10 @@ static void byt_irq_ack(struct irq_data *d)
+ 	unsigned offset = irqd_to_hwirq(d);
+ 	void __iomem *reg;
+ 
++	raw_spin_lock(&vg->lock);
+ 	reg = byt_gpio_reg(&vg->chip, offset, BYT_INT_STAT_REG);
+ 	writel(BIT(offset % 32), reg);
++	raw_spin_unlock(&vg->lock);
+ }
+ 
+ static void byt_irq_unmask(struct irq_data *d)
+@@ -463,9 +474,9 @@ static void byt_irq_unmask(struct irq_data *d)
+ 	void __iomem *reg;
+ 	u32 value;
+ 
+-	spin_lock_irqsave(&vg->lock, flags);
+-
+ 	reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
++
++	raw_spin_lock_irqsave(&vg->lock, flags);
+ 	value = readl(reg);
+ 
+ 	switch (irqd_get_trigger_type(d)) {
+@@ -486,7 +497,7 @@ static void byt_irq_unmask(struct irq_data *d)
+ 
+ 	writel(value, reg);
+ 
+-	spin_unlock_irqrestore(&vg->lock, flags);
++	raw_spin_unlock_irqrestore(&vg->lock, flags);
+ }
+ 
+ static void byt_irq_mask(struct irq_data *d)
+@@ -578,7 +589,7 @@ static int byt_gpio_probe(struct platform_device *pdev)
+ 	if (IS_ERR(vg->reg_base))
+ 		return PTR_ERR(vg->reg_base);
+ 
+-	spin_lock_init(&vg->lock);
++	raw_spin_lock_init(&vg->lock);
+ 
+ 	gc = &vg->chip;
+ 	gc->label = dev_name(&pdev->dev);
+diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
+index 454536c49315..9c780740fb82 100644
+--- a/drivers/scsi/mvsas/mv_sas.c
++++ b/drivers/scsi/mvsas/mv_sas.c
+@@ -887,6 +887,8 @@ static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
+ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
+ 			  struct mvs_slot_info *slot, u32 slot_idx)
+ {
++	if (!slot)
++		return;
+ 	if (!slot->task)
+ 		return;
+ 	if (!sas_protocol_ata(task->task_proto))
+diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
+index 8589eade1057..de65a8730d88 100644
+--- a/drivers/staging/iio/accel/sca3000_ring.c
++++ b/drivers/staging/iio/accel/sca3000_ring.c
+@@ -116,7 +116,7 @@ static int sca3000_read_first_n_hw_rb(struct iio_buffer *r,
+ 	if (ret)
+ 		goto error_ret;
+ 
+-	for (i = 0; i < num_read; i++)
++	for (i = 0; i < num_read / sizeof(u16); i++)
+ 		*(((u16 *)rx) + i) = be16_to_cpup((__be16 *)rx + i);
+ 
+ 	if (copy_to_user(buf, rx, num_read))
+diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
+index d7c5223f1c3e..2931ea9b75d1 100644
+--- a/drivers/staging/iio/adc/mxs-lradc.c
++++ b/drivers/staging/iio/adc/mxs-lradc.c
+@@ -919,11 +919,12 @@ static int mxs_lradc_read_raw(struct iio_dev *iio_dev,
+ 	case IIO_CHAN_INFO_OFFSET:
+ 		if (chan->type == IIO_TEMP) {
+ 			/* The calculated value from the ADC is in Kelvin, we
+-			 * want Celsius for hwmon so the offset is
+-			 * -272.15 * scale
++			 * want Celsius for hwmon so the offset is -273.15
++			 * The offset is applied before scaling so it is
++			 * actually -213.15 * 4 / 1.012 = -1079.644268
+ 			 */
+-			*val = -1075;
+-			*val2 = 691699;
++			*val = -1079;
++			*val2 = 644268;
+ 
+ 			return IIO_VAL_INT_PLUS_MICRO;
+ 		}
+diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
+index 21d01a491405..e508939daea3 100644
+--- a/drivers/tty/serial/8250/8250_dma.c
++++ b/drivers/tty/serial/8250/8250_dma.c
+@@ -80,10 +80,6 @@ int serial8250_tx_dma(struct uart_8250_port *p)
+ 		return 0;
+ 
+ 	dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+-	if (dma->tx_size < p->port.fifosize) {
+-		ret = -EINVAL;
+-		goto err;
+-	}
+ 
+ 	desc = dmaengine_prep_slave_single(dma->txchan,
+ 					   dma->tx_addr + xmit->tail,
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 9373cca121d3..eb8adc2e68c1 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -1998,6 +1998,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
+ #define PCIE_DEVICE_ID_WCH_CH382_2S1P	0x3250
+ #define PCIE_DEVICE_ID_WCH_CH384_4S	0x3470
+ 
++#define PCI_DEVICE_ID_EXAR_XR17V4358	0x4358
+ #define PCI_DEVICE_ID_EXAR_XR17V8358	0x8358
+ 
+ #define PCI_VENDOR_ID_PERICOM			0x12D8
+@@ -2515,6 +2516,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ 	},
+ 	{
+ 		.vendor = PCI_VENDOR_ID_EXAR,
++		.device = PCI_DEVICE_ID_EXAR_XR17V4358,
++		.subvendor	= PCI_ANY_ID,
++		.subdevice	= PCI_ANY_ID,
++		.setup		= pci_xr17v35x_setup,
++	},
++	{
++		.vendor = PCI_VENDOR_ID_EXAR,
+ 		.device = PCI_DEVICE_ID_EXAR_XR17V8358,
+ 		.subvendor	= PCI_ANY_ID,
+ 		.subdevice	= PCI_ANY_ID,
+@@ -2999,6 +3007,7 @@ enum pci_board_num_t {
+ 	pbn_exar_XR17V352,
+ 	pbn_exar_XR17V354,
+ 	pbn_exar_XR17V358,
++	pbn_exar_XR17V4358,
+ 	pbn_exar_XR17V8358,
+ 	pbn_exar_ibm_saturn,
+ 	pbn_pasemi_1682M,
+@@ -3690,6 +3699,14 @@ static struct pciserial_board pci_boards[] = {
+ 		.reg_shift	= 0,
+ 		.first_offset	= 0,
+ 	},
++	[pbn_exar_XR17V4358] = {
++		.flags		= FL_BASE0,
++		.num_ports	= 12,
++		.base_baud	= 7812500,
++		.uart_offset	= 0x400,
++		.reg_shift	= 0,
++		.first_offset	= 0,
++	},
+ 	[pbn_exar_XR17V8358] = {
+ 		.flags		= FL_BASE0,
+ 		.num_ports	= 16,
+@@ -5133,6 +5150,10 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0,
+ 		0, pbn_exar_XR17V358 },
++	{	PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V4358,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0,
++		0, pbn_exar_XR17V4358 },
+ 	{	PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V8358,
+ 		PCI_ANY_ID, PCI_ANY_ID,
+ 		0,
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index 2af32e26fafc..7e5c90eebb9c 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -135,6 +135,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 		pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+ 		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
++		xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ 	}
+ 	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ 		(pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index ad975a2975ca..41d7a05f8af4 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -2239,6 +2239,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 	u32 trb_comp_code;
+ 	int ret = 0;
+ 	int td_num = 0;
++	bool handling_skipped_tds = false;
+ 
+ 	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ 	xdev = xhci->devs[slot_id];
+@@ -2372,6 +2373,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 		ep->skip = true;
+ 		xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
+ 		goto cleanup;
++	case COMP_PING_ERR:
++		ep->skip = true;
++		xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
++		goto cleanup;
+ 	default:
+ 		if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
+ 			status = 0;
+@@ -2508,13 +2513,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
+ 						 ep, &status);
+ 
+ cleanup:
++
++
++		handling_skipped_tds = ep->skip &&
++			trb_comp_code != COMP_MISSED_INT &&
++			trb_comp_code != COMP_PING_ERR;
++
+ 		/*
+-		 * Do not update event ring dequeue pointer if ep->skip is set.
+-		 * Will roll back to continue process missed tds.
++		 * Do not update event ring dequeue pointer if we're in a loop
++		 * processing missed tds.
+ 		 */
+-		if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
++		if (!handling_skipped_tds)
+ 			inc_deq(xhci, xhci->event_ring);
+-		}
+ 
+ 		if (ret) {
+ 			urb = td->urb;
+@@ -2549,7 +2559,7 @@ cleanup:
+ 	 * Process them as short transfer until reach the td pointed by
+ 	 * the event.
+ 	 */
+-	} while (ep->skip && trb_comp_code != COMP_MISSED_INT);
++	} while (handling_skipped_tds);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index ebcec8cda858..f49d262e926b 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -153,6 +153,8 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x1199, 0x9056)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9060)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9061)},	/* Sierra Wireless Modem */
++	{DEVICE_SWI(0x1199, 0x9070)},	/* Sierra Wireless MC74xx/EM74xx */
++	{DEVICE_SWI(0x1199, 0x9071)},	/* Sierra Wireless MC74xx/EM74xx */
+ 	{DEVICE_SWI(0x413c, 0x81a2)},	/* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a3)},	/* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index af3dd3c55ef1..8b2c82ce36b3 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4494,7 +4494,7 @@ locked:
+ 
+ 	if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
+ 		ret = -EINVAL;
+-		goto out_bargs;
++		goto out_bctl;
+ 	}
+ 
+ do_balance:
+@@ -4508,12 +4508,15 @@ do_balance:
+ 	need_unlock = false;
+ 
+ 	ret = btrfs_balance(bctl, bargs);
++	bctl = NULL;
+ 
+ 	if (arg) {
+ 		if (copy_to_user(arg, bargs, sizeof(*bargs)))
+ 			ret = -EFAULT;
+ 	}
+ 
++out_bctl:
++	kfree(bctl);
+ out_bargs:
+ 	kfree(bargs);
+ out_unlock:
+diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
+index 84d693d37428..871fcb67be97 100644
+--- a/fs/overlayfs/copy_up.c
++++ b/fs/overlayfs/copy_up.c
+@@ -81,11 +81,11 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
+ 	if (len == 0)
+ 		return 0;
+ 
+-	old_file = ovl_path_open(old, O_RDONLY);
++	old_file = ovl_path_open(old, O_LARGEFILE | O_RDONLY);
+ 	if (IS_ERR(old_file))
+ 		return PTR_ERR(old_file);
+ 
+-	new_file = ovl_path_open(new, O_WRONLY);
++	new_file = ovl_path_open(new, O_LARGEFILE | O_WRONLY);
+ 	if (IS_ERR(new_file)) {
+ 		error = PTR_ERR(new_file);
+ 		goto out_fput;
+@@ -267,7 +267,7 @@ out:
+ 
+ out_cleanup:
+ 	ovl_cleanup(wdir, newdentry);
+-	goto out;
++	goto out2;
+ }
+ 
+ /*
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 33f2d27a6792..d74af7f78fec 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -474,6 +474,7 @@ static void ovl_put_super(struct super_block *sb)
+ 	mntput(ufs->upper_mnt);
+ 	for (i = 0; i < ufs->numlower; i++)
+ 		mntput(ufs->lower_mnt[i]);
++	kfree(ufs->lower_mnt);
+ 
+ 	kfree(ufs->config.lowerdir);
+ 	kfree(ufs->config.upperdir);
+@@ -981,6 +982,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
+ 		oe->lowerstack[i].dentry = stack[i].dentry;
+ 		oe->lowerstack[i].mnt = ufs->lower_mnt[i];
+ 	}
++	kfree(stack);
+ 
+ 	root_dentry->d_fsdata = oe;
+ 
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index f6226914acfe..8d948aa9c5c9 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -85,7 +85,7 @@
+ 	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ 	SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ 	.tlv.p  = (tlv_array),\
+-	.info = snd_soc_info_volsw, \
++	.info = snd_soc_info_volsw_sx, \
+ 	.get = snd_soc_get_volsw_sx,\
+ 	.put = snd_soc_put_volsw_sx, \
+ 	.private_value = (unsigned long)&(struct soc_mixer_control) \
+@@ -155,7 +155,7 @@
+ 	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+ 	SNDRV_CTL_ELEM_ACCESS_READWRITE, \
+ 	.tlv.p  = (tlv_array), \
+-	.info = snd_soc_info_volsw, \
++	.info = snd_soc_info_volsw_sx, \
+ 	.get = snd_soc_get_volsw_sx, \
+ 	.put = snd_soc_put_volsw_sx, \
+ 	.private_value = (unsigned long)&(struct soc_mixer_control) \
+@@ -563,6 +563,8 @@ int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
+ 	struct snd_ctl_elem_value *ucontrol);
+ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
+ 	struct snd_ctl_elem_info *uinfo);
++int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
++			  struct snd_ctl_elem_info *uinfo);
+ #define snd_soc_info_bool_ext		snd_ctl_boolean_mono_info
+ int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
+ 	struct snd_ctl_elem_value *ucontrol);
+diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
+index 898be3a8db9a..6d8f8fba3341 100644
+--- a/include/sound/wm8904.h
++++ b/include/sound/wm8904.h
+@@ -119,7 +119,7 @@
+ #define WM8904_MIC_REGS  2
+ #define WM8904_GPIO_REGS 4
+ #define WM8904_DRC_REGS  4
+-#define WM8904_EQ_REGS   25
++#define WM8904_EQ_REGS   24
+ 
+ /**
+  * DRC configurations are specified with a label and a set of register
+diff --git a/kernel/module.c b/kernel/module.c
+index cfc9e843a924..3b9ff966edb9 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -906,11 +906,15 @@ void symbol_put_addr(void *addr)
+ 	if (core_kernel_text(a))
+ 		return;
+ 
+-	/* module_text_address is safe here: we're supposed to have reference
+-	 * to module from symbol_get, so it can't go away. */
++	/*
++	 * Even though we hold a reference on the module; we still need to
++	 * disable preemption in order to safely traverse the data structure.
++	 */
++	preempt_disable();
+ 	modaddr = __module_text_address(a);
+ 	BUG_ON(!modaddr);
+ 	module_put(modaddr);
++	preempt_enable();
+ }
+ EXPORT_SYMBOL_GPL(symbol_put_addr);
+ 
+diff --git a/lib/fault-inject.c b/lib/fault-inject.c
+index f1cdeb024d17..6a823a53e357 100644
+--- a/lib/fault-inject.c
++++ b/lib/fault-inject.c
+@@ -44,7 +44,7 @@ static void fail_dump(struct fault_attr *attr)
+ 		printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
+ 		       "name %pd, interval %lu, probability %lu, "
+ 		       "space %d, times %d\n", attr->dname,
+-		       attr->probability, attr->interval,
++		       attr->interval, attr->probability,
+ 		       atomic_read(&attr->space),
+ 		       atomic_read(&attr->times));
+ 		if (attr->verbose > 1)
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 6bf5e42d560a..1ffef05f1c1f 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2461,6 +2461,11 @@ again:
+ 			break;
+ 		}
+ 
++		if (fatal_signal_pending(current)) {
++			status = -EINTR;
++			break;
++		}
++
+ 		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
+ 						&page, &fsdata);
+ 		if (unlikely(status < 0))
+@@ -2498,10 +2503,6 @@ again:
+ 		written += copied;
+ 
+ 		balance_dirty_pages_ratelimited(mapping);
+-		if (fatal_signal_pending(current)) {
+-			status = -EINTR;
+-			break;
+-		}
+ 	} while (iov_iter_count(i));
+ 
+ 	return written ? written : status;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 078832cf3636..8e792ec5e84c 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2137,7 +2137,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
+ 	for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
+ 	     _pte++, address += PAGE_SIZE) {
+ 		pte_t pteval = *_pte;
+-		if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
++		if (pte_none(pteval) || (pte_present(pteval) &&
++			is_zero_pfn(pte_pfn(pteval)))) {
+ 			if (++none_or_zero <= khugepaged_max_ptes_none)
+ 				continue;
+ 			else
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 36e8f1236637..57197bef5f5b 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -3833,10 +3833,8 @@ int snd_hda_codec_build_pcms(struct hda_codec *codec)
+ 		return -EINVAL;
+ 
+ 	err = snd_hda_codec_parse_pcms(codec);
+-	if (err < 0) {
+-		snd_hda_codec_reset(codec);
++	if (err < 0)
+ 		return err;
+-	}
+ 
+ 	/* attach a new PCM streams */
+ 	list_for_each_entry(cpcm, &codec->pcm_list_head, list) {
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 06cc9d57ba3d..488f4c7be33e 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -819,6 +819,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
+ 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
++	SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
+diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
+index 100d92b5b77e..05977ae1ff2a 100644
+--- a/sound/soc/soc-ops.c
++++ b/sound/soc/soc-ops.c
+@@ -207,6 +207,34 @@ int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
+ EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
+ 
+ /**
++ * snd_soc_info_volsw_sx - Mixer info callback for SX TLV controls
++ * @kcontrol: mixer control
++ * @uinfo: control element information
++ *
++ * Callback to provide information about a single mixer control, or a double
++ * mixer control that spans 2 registers of the SX TLV type. SX TLV controls
++ * have a range that represents both positive and negative values either side
++ * of zero but without a sign bit.
++ *
++ * Returns 0 for success.
++ */
++int snd_soc_info_volsw_sx(struct snd_kcontrol *kcontrol,
++			  struct snd_ctl_elem_info *uinfo)
++{
++	struct soc_mixer_control *mc =
++		(struct soc_mixer_control *)kcontrol->private_value;
++
++	snd_soc_info_volsw(kcontrol, uinfo);
++	/* Max represents the number of levels in an SX control not the
++	 * maximum value, so add the minimum value back on
++	 */
++	uinfo->value.integer.max += mc->min;
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(snd_soc_info_volsw_sx);
++
++/**
+  * snd_soc_get_volsw - single mixer get callback
+  * @kcontrol: mixer control
+  * @ucontrol: control element information


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-11-05 23:29 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-11-05 23:29 UTC (permalink / raw
  To: gentoo-commits

commit:     713b0f04dcd8afaa0d668b6f9a07572dc9d65102
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov  5 23:29:46 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov  5 23:29:46 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=713b0f04

update README

 0000_README | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/0000_README b/0000_README
index 8ed7605..ff278e3 100644
--- a/0000_README
+++ b/0000_README
@@ -142,7 +142,3 @@ Desc:   BFQ v7r8 patch 3 for 4.1: Early Queue Merge (EQM)
 Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.
-
-Patch:  5015_kdbus-8-12-2015.patch
-From:   https://lkml.org
-Desc:   Kernel-level IPC implementation


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-11-05 23:29 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-11-05 23:29 UTC (permalink / raw
  To: gentoo-commits

commit:     a345c8eee693aff99ddd3ce3beb816cb355fc388
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Thu Nov  5 23:29:08 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Thu Nov  5 23:29:08 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a345c8ee

Removing kdbus as per upstream developers. See http://lwn.net/Articles/663062/

 5015_kdbus-8-12-2015.patch | 34349 -------------------------------------------
 1 file changed, 34349 deletions(-)

diff --git a/5015_kdbus-8-12-2015.patch b/5015_kdbus-8-12-2015.patch
deleted file mode 100644
index 4e018f2..0000000
--- a/5015_kdbus-8-12-2015.patch
+++ /dev/null
@@ -1,34349 +0,0 @@
-diff --git a/Documentation/Makefile b/Documentation/Makefile
-index bc05482..e2127a7 100644
---- a/Documentation/Makefile
-+++ b/Documentation/Makefile
-@@ -1,4 +1,4 @@
- subdir-y := accounting auxdisplay blackfin connector \
--	filesystems filesystems ia64 laptops mic misc-devices \
-+	filesystems filesystems ia64 kdbus laptops mic misc-devices \
- 	networking pcmcia prctl ptp spi timers vDSO video4linux \
- 	watchdog
-diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
-index 51f4221..ec7c81b 100644
---- a/Documentation/ioctl/ioctl-number.txt
-+++ b/Documentation/ioctl/ioctl-number.txt
-@@ -292,6 +292,7 @@ Code  Seq#(hex)	Include File		Comments
- 0x92	00-0F	drivers/usb/mon/mon_bin.c
- 0x93	60-7F	linux/auto_fs.h
- 0x94	all	fs/btrfs/ioctl.h
-+0x95	all	uapi/linux/kdbus.h	kdbus IPC driver
- 0x97	00-7F	fs/ceph/ioctl.h		Ceph file system
- 0x99	00-0F				537-Addinboard driver
- 					<mailto:buk@buks.ipn.de>
-diff --git a/Documentation/kdbus/.gitignore b/Documentation/kdbus/.gitignore
-new file mode 100644
-index 0000000..b4a77cc
---- /dev/null
-+++ b/Documentation/kdbus/.gitignore
-@@ -0,0 +1,2 @@
-+*.7
-+*.html
-diff --git a/Documentation/kdbus/Makefile b/Documentation/kdbus/Makefile
-new file mode 100644
-index 0000000..8caffe5
---- /dev/null
-+++ b/Documentation/kdbus/Makefile
-@@ -0,0 +1,44 @@
-+DOCS :=	\
-+	kdbus.xml		\
-+	kdbus.bus.xml		\
-+	kdbus.connection.xml	\
-+	kdbus.endpoint.xml	\
-+	kdbus.fs.xml		\
-+	kdbus.item.xml		\
-+	kdbus.match.xml		\
-+	kdbus.message.xml	\
-+	kdbus.name.xml		\
-+	kdbus.policy.xml	\
-+	kdbus.pool.xml
-+
-+XMLFILES := $(addprefix $(obj)/,$(DOCS))
-+MANFILES := $(patsubst %.xml, %.7, $(XMLFILES))
-+HTMLFILES := $(patsubst %.xml, %.html, $(XMLFILES))
-+
-+XMLTO_ARGS := -m $(srctree)/$(src)/stylesheet.xsl --skip-validation
-+
-+quiet_cmd_db2man = MAN     $@
-+      cmd_db2man = xmlto man $(XMLTO_ARGS) -o $(obj) $<
-+%.7: %.xml
-+	@(which xmlto > /dev/null 2>&1) || \
-+	 (echo "*** You need to install xmlto ***"; \
-+	  exit 1)
-+	$(call cmd,db2man)
-+
-+quiet_cmd_db2html = HTML    $@
-+      cmd_db2html = xmlto html-nochunks $(XMLTO_ARGS) -o $(obj) $<
-+%.html: %.xml
-+	@(which xmlto > /dev/null 2>&1) || \
-+	 (echo "*** You need to install xmlto ***"; \
-+	  exit 1)
-+	$(call cmd,db2html)
-+
-+mandocs: $(MANFILES)
-+
-+htmldocs: $(HTMLFILES)
-+
-+clean-files := $(MANFILES) $(HTMLFILES)
-+
-+# we don't support other %docs targets right now
-+%docs:
-+	@true
-diff --git a/Documentation/kdbus/kdbus.bus.xml b/Documentation/kdbus/kdbus.bus.xml
-new file mode 100644
-index 0000000..83f1198
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.bus.xml
-@@ -0,0 +1,344 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus.bus">
-+
-+  <refentryinfo>
-+    <title>kdbus.bus</title>
-+    <productname>kdbus.bus</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus.bus</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus.bus</refname>
-+    <refpurpose>kdbus bus</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>Description</title>
-+
-+    <para>
-+      A bus is a resource that is shared between connections in order to
-+      transmit messages (see
-+      <citerefentry>
-+        <refentrytitle>kdbus.message</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>).
-+      Each bus is independent, and operations on the bus will not have any
-+      effect on other buses. A bus is a management entity that controls the
-+      addresses of its connections, their policies and message transactions
-+      performed via this bus.
-+    </para>
-+    <para>
-+      Each bus is bound to the mount instance it was created on. It has a
-+      custom name that is unique across all buses of a domain. In
-+      <citerefentry>
-+        <refentrytitle>kdbus.fs</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      a bus is presented as a directory. No operations can be performed on
-+      the bus itself; instead you need to perform the operations on an endpoint
-+      associated with the bus. Endpoints are accessible as files underneath the
-+      bus directory. A default endpoint called <constant>bus</constant> is
-+      provided on each bus.
-+    </para>
-+    <para>
-+      Bus names may be chosen freely except for one restriction: the name must
-+      be prefixed with the numeric effective UID of the creator and a dash. This
-+      is required to avoid namespace clashes between different users. When
-+      creating a bus, the name that is passed in must be properly formatted, or
-+      the kernel will refuse creation of the bus. Example:
-+      <literal>1047-foobar</literal> is an acceptable name for a bus
-+      registered by a user with UID 1047. However,
-+      <literal>1024-foobar</literal> is not, and neither is
-+      <literal>foobar</literal>. The UID must be provided in the
-+      user-namespace of the bus owner.
-+    </para>
-+    <para>
-+      To create a new bus, you need to open the control file of a domain and
-+      employ the <constant>KDBUS_CMD_BUS_MAKE</constant> ioctl. The control
-+      file descriptor that was used to issue
-+      <constant>KDBUS_CMD_BUS_MAKE</constant> must not previously have been
-+      used for any other control-ioctl and must be kept open for the entire
-+      life-time of the created bus. Closing it will immediately cleanup the
-+      entire bus and all its associated resources and endpoints. Every control
-+      file descriptor can only be used to create a single new bus; from that
-+      point on, it is not used for any further communication until the final
-+      <citerefentry>
-+        <refentrytitle>close</refentrytitle>
-+        <manvolnum>2</manvolnum>
-+      </citerefentry>
-+      .
-+    </para>
-+    <para>
-+      Each bus will generate a random, 128-bit UUID upon creation. This UUID
-+      will be returned to creators of connections through
-+      <varname>kdbus_cmd_hello.id128</varname> and can be used to uniquely
-+      identify buses, even across different machines or containers. The UUID
-+      will have its variant bits set to <literal>DCE</literal>, and denote
-+      version 4 (random). For more details on UUIDs, see <ulink
-+      url="https://en.wikipedia.org/wiki/Universally_unique_identifier">
-+      the Wikipedia article on UUIDs</ulink>.
-+    </para>
-+
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Creating buses</title>
-+    <para>
-+      To create a new bus, the <constant>KDBUS_CMD_BUS_MAKE</constant>
-+      command is used. It takes a <type>struct kdbus_cmd</type> argument.
-+    </para>
-+    <programlisting>
-+struct kdbus_cmd {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>The flags for creation.</para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_MAKE_ACCESS_GROUP</constant></term>
-+              <listitem>
-+                <para>Make the bus file group-accessible.</para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_MAKE_ACCESS_WORLD</constant></term>
-+              <listitem>
-+                <para>Make the bus file world-accessible.</para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
-+              <listitem>
-+                <para>
-+                  Requests a set of valid flags for this ioctl. When this bit is
-+                  set, no action is taken; the ioctl will return
-+                  <errorcode>0</errorcode>, and the <varname>flags</varname>
-+                  field will have all bits set that are valid for this command.
-+                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
-+                  cleared by the operation.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            The following items (see
-+            <citerefentry>
-+              <refentrytitle>kdbus.item</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>)
-+            are expected for <constant>KDBUS_CMD_BUS_MAKE</constant>.
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_MAKE_NAME</constant></term>
-+              <listitem>
-+                <para>
-+                  Contains a null-terminated string that identifies the
-+                  bus. The name must be unique across the kdbus domain and
-+                  must start with the effective UID of the caller, followed by
-+                  a '<literal>-</literal>' (dash). This item is mandatory.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_BLOOM_PARAMETER</constant></term>
-+              <listitem>
-+                <para>
-+                  Bus-wide bloom parameters passed in a
-+                  <type>struct kdbus_bloom_parameter</type>. These settings are
-+                  copied back to new connections verbatim. This item is
-+                  mandatory. See
-+                  <citerefentry>
-+                    <refentrytitle>kdbus.item</refentrytitle>
-+                    <manvolnum>7</manvolnum>
-+                  </citerefentry>
-+                  for a more detailed description of this item.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_ATTACH_FLAGS_SEND</constant></term>
-+              <listitem>
-+                <para>
-+                  An optional item that contains a set of attach flags that are
-+                  returned to connections when they query the bus creator
-+                  metadata. If not set, no metadata is returned.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
-+              <listitem><para>
-+                With this item, programs can <emphasis>probe</emphasis> the
-+                kernel for known item types. See
-+                <citerefentry>
-+                  <refentrytitle>kdbus.item</refentrytitle>
-+                  <manvolnum>7</manvolnum>
-+                </citerefentry>
-+                for more details.
-+              </para></listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      Unrecognized items are rejected, and the ioctl will fail with
-+      <varname>errno</varname> set to <constant>EINVAL</constant>.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Return value</title>
-+    <para>
-+      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
-+      on error, <errorcode>-1</errorcode> is returned, and
-+      <varname>errno</varname> is set to indicate the error.
-+      If the issued ioctl is illegal for the file descriptor used,
-+      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
-+    </para>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_BUS_MAKE</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EBADMSG</constant></term>
-+          <listitem><para>
-+            A mandatory item is missing.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            The flags supplied in the <constant>struct kdbus_cmd</constant>
-+            are invalid or the supplied name does not start with the current
-+            UID and a '<literal>-</literal>' (dash).
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EEXIST</constant></term>
-+          <listitem><para>
-+            A bus of that name already exists.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ESHUTDOWN</constant></term>
-+          <listitem><para>
-+            The kdbus mount instance for the bus was already shut down.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EMFILE</constant></term>
-+          <listitem><para>
-+            The maximum number of buses for the current user is exhausted.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.connection</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.endpoint</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.fs</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.message</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.pool</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+</refentry>
-diff --git a/Documentation/kdbus/kdbus.connection.xml b/Documentation/kdbus/kdbus.connection.xml
-new file mode 100644
-index 0000000..4bb5f30
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.connection.xml
-@@ -0,0 +1,1244 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus.connection">
-+
-+  <refentryinfo>
-+    <title>kdbus.connection</title>
-+    <productname>kdbus.connection</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus.connection</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus.connection</refname>
-+    <refpurpose>kdbus connection</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>Description</title>
-+
-+    <para>
-+      Connections are identified by their <emphasis>connection ID</emphasis>,
-+      internally implemented as a <type>uint64_t</type> counter.
-+      The IDs of every newly created bus start at <constant>1</constant>, and
-+      every new connection will increment the counter by <constant>1</constant>.
-+      The IDs are not reused.
-+    </para>
-+    <para>
-+      In higher level tools, the user visible representation of a connection is
-+      defined by the D-Bus protocol specification as
-+      <constant>":1.&lt;ID&gt;"</constant>.
-+    </para>
-+    <para>
-+      Messages with a specific <type>uint64_t</type> destination ID are
-+      directly delivered to the connection with the corresponding ID. Signal
-+      messages (see
-+      <citerefentry>
-+        <refentrytitle>kdbus.message</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>)
-+      may be addressed to the special destination ID
-+      <constant>KDBUS_DST_ID_BROADCAST</constant> (~0ULL) and will then
-+      potentially be delivered to all currently active connections on the bus.
-+      However, in order to receive any signal messages, clients must subscribe
-+      to them by installing a match (see
-+      <citerefentry>
-+        <refentrytitle>kdbus.match</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>).
-+    </para>
-+    <para>
-+      Messages synthesized and sent directly by the kernel will carry the
-+      special source ID <constant>KDBUS_SRC_ID_KERNEL</constant> (0).
-+    </para>
-+    <para>
-+      In addition to the unique <type>uint64_t</type> connection ID,
-+      established connections can request the ownership of
-+      <emphasis>well-known names</emphasis>, under which they can be found and
-+      addressed by other bus clients. A well-known name is associated with one
-+      and only one connection at a time. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.name</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      on name acquisition, the name registry, and the validity of names.
-+    </para>
-+    <para>
-+      Messages can specify the special destination ID
-+      <constant>KDBUS_DST_ID_NAME</constant> (0) and carry a well-known name
-+      in the message data. Such a message is delivered to the destination
-+      connection which owns that well-known name.
-+    </para>
-+
-+    <programlisting><![CDATA[
-+  +-------------------------------------------------------------------------+
-+  | +---------------+     +---------------------------+                     |
-+  | | Connection    |     | Message                   | -----------------+  |
-+  | | :1.22         | --> | src: 22                   |                  |  |
-+  | |               |     | dst: 25                   |                  |  |
-+  | |               |     |                           |                  |  |
-+  | |               |     |                           |                  |  |
-+  | |               |     +---------------------------+                  |  |
-+  | |               |                                                    |  |
-+  | |               | <--------------------------------------+           |  |
-+  | +---------------+                                        |           |  |
-+  |                                                          |           |  |
-+  | +---------------+     +---------------------------+      |           |  |
-+  | | Connection    |     | Message                   | -----+           |  |
-+  | | :1.25         | --> | src: 25                   |                  |  |
-+  | |               |     | dst: 0xffffffffffffffff   | -------------+   |  |
-+  | |               |     |  (KDBUS_DST_ID_BROADCAST) |              |   |  |
-+  | |               |     |                           | ---------+   |   |  |
-+  | |               |     +---------------------------+          |   |   |  |
-+  | |               |                                            |   |   |  |
-+  | |               | <--------------------------------------------------+  |
-+  | +---------------+                                            |   |      |
-+  |                                                              |   |      |
-+  | +---------------+     +---------------------------+          |   |      |
-+  | | Connection    |     | Message                   | --+      |   |      |
-+  | | :1.55         | --> | src: 55                   |   |      |   |      |
-+  | |               |     | dst: 0 / org.foo.bar      |   |      |   |      |
-+  | |               |     |                           |   |      |   |      |
-+  | |               |     |                           |   |      |   |      |
-+  | |               |     +---------------------------+   |      |   |      |
-+  | |               |                                     |      |   |      |
-+  | |               | <------------------------------------------+   |      |
-+  | +---------------+                                     |          |      |
-+  |                                                       |          |      |
-+  | +---------------+                                     |          |      |
-+  | | Connection    |                                     |          |      |
-+  | | :1.81         |                                     |          |      |
-+  | | org.foo.bar   |                                     |          |      |
-+  | |               |                                     |          |      |
-+  | |               |                                     |          |      |
-+  | |               | <-----------------------------------+          |      |
-+  | |               |                                                |      |
-+  | |               | <----------------------------------------------+      |
-+  | +---------------+                                                       |
-+  +-------------------------------------------------------------------------+
-+    ]]></programlisting>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Privileged connections</title>
-+    <para>
-+      A connection is considered <emphasis>privileged</emphasis> if the user
-+      it was created by is the same that created the bus, or if the creating
-+      task had <constant>CAP_IPC_OWNER</constant> set when it called
-+      <constant>KDBUS_CMD_HELLO</constant> (see below).
-+    </para>
-+    <para>
-+      Privileged connections have permission to employ certain restricted
-+      functions and commands, which are explained below and in other kdbus
-+      man-pages.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Activator and policy holder connection</title>
-+    <para>
-+      An <emphasis>activator</emphasis> connection is a placeholder for a
-+      <emphasis>well-known name</emphasis>. Messages sent to such a connection
-+      can be used to start an implementer connection, which will then get all
-+      the messages from the activator copied over. An activator connection
-+      cannot be used to send any message.
-+    </para>
-+    <para>
-+      A <emphasis>policy holder</emphasis> connection only installs a policy
-+      for one or more names. These policy entries are kept active as long as
-+      the connection is alive, and are removed once it terminates. Such a
-+      policy connection type can be used to deploy restrictions for names that
-+      are not yet active on the bus. A policy holder connection cannot be used
-+      to send any message.
-+    </para>
-+    <para>
-+      The creation of activator or policy holder connections is restricted to
-+      privileged users on the bus (see above).
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Monitor connections</title>
-+    <para>
-+      Monitors are eavesdropping connections that receive all the traffic on the
-+      bus, but is invisible to other connections. Such connections have all
-+      properties of any other, regular connection, except for the following
-+      details:
-+    </para>
-+
-+    <itemizedlist>
-+      <listitem><para>
-+        They will get every message sent over the bus, both unicasts and
-+        broadcasts.
-+      </para></listitem>
-+
-+      <listitem><para>
-+        Installing matches for signal messages is neither necessary
-+        nor allowed.
-+      </para></listitem>
-+
-+      <listitem><para>
-+        They cannot send messages or be directly addressed as receiver.
-+      </para></listitem>
-+
-+      <listitem><para>
-+        They cannot own well-known names. Therefore, they also can't operate as
-+        activators.
-+      </para></listitem>
-+
-+      <listitem><para>
-+        Their creation and destruction will not cause
-+        <constant>KDBUS_ITEM_ID_{ADD,REMOVE}</constant> (see
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>).
-+      </para></listitem>
-+
-+      <listitem><para>
-+        They are not listed with their unique name in name registry dumps
-+        (see <constant>KDBUS_CMD_NAME_LIST</constant> in
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>), so other connections cannot detect the presence of
-+	a monitor.
-+      </para></listitem>
-+    </itemizedlist>
-+    <para>
-+      The creation of monitor connections is restricted to privileged users on
-+      the bus (see above).
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Creating connections</title>
-+    <para>
-+      A connection to a bus is created by opening an endpoint file (see
-+      <citerefentry>
-+        <refentrytitle>kdbus.endpoint</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>)
-+      of a bus and becoming an active client with the
-+      <constant>KDBUS_CMD_HELLO</constant> ioctl. Every connection has a unique
-+      identifier on the bus and can address messages to every other connection
-+      on the same bus by using the peer's connection ID as the destination.
-+    </para>
-+    <para>
-+      The <constant>KDBUS_CMD_HELLO</constant> ioctl takes a <type>struct
-+      kdbus_cmd_hello</type> as argument.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_cmd_hello {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  __u64 attach_flags_send;
-+  __u64 attach_flags_recv;
-+  __u64 bus_flags;
-+  __u64 id;
-+  __u64 pool_size;
-+  __u64 offset;
-+  __u8 id128[16];
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem>
-+          <para>Flags to apply to this connection</para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_HELLO_ACCEPT_FD</constant></term>
-+              <listitem>
-+                <para>
-+                  When this flag is set, the connection can be sent file
-+                  descriptors as message payload of unicast messages. If it's
-+                  not set, an attempt to send file descriptors will result in
-+                  <constant>-ECOMM</constant> on the sender's side.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_HELLO_ACTIVATOR</constant></term>
-+              <listitem>
-+                <para>
-+                  Make this connection an activator (see above). With this bit
-+                  set, an item of type <constant>KDBUS_ITEM_NAME</constant> has
-+                  to be attached. This item describes the well-known name this
-+                  connection should be an activator for.
-+                  A connection can not be an activator and a policy holder at
-+                  the same time time, so this bit is not allowed together with
-+                  <constant>KDBUS_HELLO_POLICY_HOLDER</constant>.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_HELLO_POLICY_HOLDER</constant></term>
-+              <listitem>
-+                <para>
-+                  Make this connection a policy holder (see above). With this
-+                  bit set, an item of type <constant>KDBUS_ITEM_NAME</constant>
-+                  has to be attached. This item describes the well-known name
-+                  this connection should hold a policy for.
-+                  A connection can not be an activator and a policy holder at
-+                  the same time time, so this bit is not allowed together with
-+                  <constant>KDBUS_HELLO_ACTIVATOR</constant>.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_HELLO_MONITOR</constant></term>
-+              <listitem>
-+                <para>
-+                  Make this connection a monitor connection (see above).
-+                </para>
-+                <para>
-+                  This flag can only be set by privileged bus connections. See
-+                  below for more information.
-+                  A connection can not be monitor and an activator or a policy
-+                  holder at the same time time, so this bit is not allowed
-+                  together with <constant>KDBUS_HELLO_ACTIVATOR</constant> or
-+                  <constant>KDBUS_HELLO_POLICY_HOLDER</constant>.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
-+              <listitem>
-+                <para>
-+                  Requests a set of valid flags for this ioctl. When this bit is
-+                  set, no action is taken; the ioctl will return
-+                  <errorcode>0</errorcode>, and the <varname>flags</varname>
-+                  field will have all bits set that are valid for this command.
-+                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
-+                  cleared by the operation.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>attach_flags_send</varname></term>
-+        <listitem><para>
-+          Set the bits for metadata this connection permits to be sent to the
-+          receiving peer. Only metadata items that are both allowed to be sent
-+          by the sender and that are requested by the receiver will be attached
-+          to the message.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>attach_flags_recv</varname></term>
-+        <listitem><para>
-+          Request the attachment of metadata for each message received by this
-+          connection. See
-+          <citerefentry>
-+            <refentrytitle>kdbus</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>
-+          for information about metadata, and
-+          <citerefentry>
-+            <refentrytitle>kdbus.item</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>
-+          regarding items in general.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>bus_flags</varname></term>
-+        <listitem><para>
-+          Upon successful completion of the ioctl, this member will contain the
-+          flags of the bus it connected to.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>id</varname></term>
-+        <listitem><para>
-+          Upon successful completion of the command, this member will contain
-+          the numerical ID of the new connection.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>pool_size</varname></term>
-+        <listitem><para>
-+          The size of the communication pool, in bytes. The pool can be
-+          accessed by calling
-+          <citerefentry>
-+            <refentrytitle>mmap</refentrytitle>
-+            <manvolnum>2</manvolnum>
-+          </citerefentry>
-+          on the file descriptor that was used to issue the
-+          <constant>KDBUS_CMD_HELLO</constant> ioctl.
-+          The pool size of a connection must be greater than
-+          <constant>0</constant> and a multiple of
-+          <constant>PAGE_SIZE</constant>. See
-+          <citerefentry>
-+            <refentrytitle>kdbus.pool</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>
-+          for more information.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>offset</varname></term>
-+        <listitem><para>
-+          The kernel will return the offset in the pool where returned details
-+          will be stored. See below.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>id128</varname></term>
-+        <listitem><para>
-+          Upon successful completion of the ioctl, this member will contain the
-+          <emphasis>128-bit UUID</emphasis> of the connected bus.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            Variable list of items containing optional additional information.
-+            The following items are currently expected/valid:
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_CONN_DESCRIPTION</constant></term>
-+              <listitem>
-+                <para>
-+                  Contains a string that describes this connection, so it can
-+                  be identified later.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NAME</constant></term>
-+              <term><constant>KDBUS_ITEM_POLICY_ACCESS</constant></term>
-+              <listitem>
-+                <para>
-+                  For activators and policy holders only, combinations of
-+                  these two items describe policy access entries. See
-+                  <citerefentry>
-+                    <refentrytitle>kdbus.policy</refentrytitle>
-+                    <manvolnum>7</manvolnum>
-+                  </citerefentry>
-+                  for further details.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_CREDS</constant></term>
-+              <term><constant>KDBUS_ITEM_PIDS</constant></term>
-+              <term><constant>KDBUS_ITEM_SECLABEL</constant></term>
-+              <listitem>
-+                <para>
-+                  Privileged bus users may submit these types in order to
-+                  create connections with faked credentials. This information
-+                  will be returned when peer information is queried by
-+                  <constant>KDBUS_CMD_CONN_INFO</constant>. See below for more
-+                  information on retrieving information on connections.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
-+              <listitem><para>
-+                With this item, programs can <emphasis>probe</emphasis> the
-+                kernel for known item types. See
-+                <citerefentry>
-+                  <refentrytitle>kdbus.item</refentrytitle>
-+                  <manvolnum>7</manvolnum>
-+                </citerefentry>
-+                for more details.
-+              </para></listitem>
-+            </varlistentry>
-+          </variablelist>
-+
-+          <para>
-+            Unrecognized items are rejected, and the ioctl will fail with
-+            <varname>errno</varname> set to <constant>EINVAL</constant>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      At the offset returned in the <varname>offset</varname> field of
-+      <type>struct kdbus_cmd_hello</type>, the kernel will store items
-+      of the following types:
-+    </para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><constant>KDBUS_ITEM_BLOOM_PARAMETER</constant></term>
-+        <listitem>
-+          <para>
-+            Bloom filter parameter as defined by the bus creator.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      The offset in the pool has to be freed with the
-+      <constant>KDBUS_CMD_FREE</constant> ioctl. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.pool</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for further information.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Retrieving information on a connection</title>
-+    <para>
-+      The <constant>KDBUS_CMD_CONN_INFO</constant> ioctl can be used to
-+      retrieve credentials and properties of the initial creator of a
-+      connection. This ioctl uses the following struct.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_cmd_info {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  __u64 id;
-+  __u64 attach_flags;
-+  __u64 offset;
-+  __u64 info_size;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>
-+          Currently, no flags are supported.
-+          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
-+          valid flags. If set, the ioctl will return <errorcode>0</errorcode>,
-+          and the <varname>flags</varname> field is set to
-+          <constant>0</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>id</varname></term>
-+        <listitem><para>
-+          The numerical ID of the connection for which information is to be
-+          retrieved. If set to a non-zero value, the
-+          <constant>KDBUS_ITEM_OWNED_NAME</constant> item is ignored.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>attach_flags</varname></term>
-+        <listitem><para>
-+          Specifies which metadata items should be attached to the answer. See
-+          <citerefentry>
-+            <refentrytitle>kdbus.message</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>offset</varname></term>
-+        <listitem><para>
-+          When the ioctl returns, this field will contain the offset of the
-+          connection information inside the caller's pool. See
-+          <citerefentry>
-+            <refentrytitle>kdbus.pool</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>
-+          for further information.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>info_size</varname></term>
-+        <listitem><para>
-+          The kernel will return the size of the returned information, so
-+          applications can optionally
-+          <citerefentry>
-+            <refentrytitle>mmap</refentrytitle>
-+            <manvolnum>2</manvolnum>
-+          </citerefentry>
-+          specific parts of the pool. See
-+          <citerefentry>
-+            <refentrytitle>kdbus.pool</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>
-+          for further information.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            The following items are expected for
-+            <constant>KDBUS_CMD_CONN_INFO</constant>.
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_OWNED_NAME</constant></term>
-+              <listitem>
-+                <para>
-+                  Contains the well-known name of the connection to look up as.
-+                  This item is mandatory if the <varname>id</varname> field is
-+                  set to 0.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
-+              <listitem><para>
-+                With this item, programs can <emphasis>probe</emphasis> the
-+                kernel for known item types. See
-+                <citerefentry>
-+                  <refentrytitle>kdbus.item</refentrytitle>
-+                  <manvolnum>7</manvolnum>
-+                </citerefentry>
-+                for more details.
-+              </para></listitem>
-+            </varlistentry>
-+          </variablelist>
-+          <para>
-+            Unrecognized items are rejected, and the ioctl will fail with
-+            <varname>errno</varname> set to <constant>EINVAL</constant>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      When the ioctl returns, the following struct will be stored in the
-+      caller's pool at <varname>offset</varname>. The fields in this struct
-+      are described below.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_info {
-+  __u64 size;
-+  __u64 id;
-+  __u64 flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>id</varname></term>
-+        <listitem><para>
-+          The connection's unique ID.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>
-+          The connection's flags as specified when it was created.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            Depending on the <varname>flags</varname> field in
-+            <type>struct kdbus_cmd_info</type>, items of types
-+            <constant>KDBUS_ITEM_OWNED_NAME</constant> and
-+            <constant>KDBUS_ITEM_CONN_DESCRIPTION</constant> may follow here.
-+            <constant>KDBUS_ITEM_NEGOTIATE</constant> is also allowed.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      Once the caller is finished with parsing the return buffer, it needs to
-+      employ the <constant>KDBUS_CMD_FREE</constant> command for the offset, in
-+      order to free the buffer part. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.pool</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for further information.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Getting information about a connection's bus creator</title>
-+    <para>
-+      The <constant>KDBUS_CMD_BUS_CREATOR_INFO</constant> ioctl takes the same
-+      struct as <constant>KDBUS_CMD_CONN_INFO</constant>, but is used to
-+      retrieve information about the creator of the bus the connection is
-+      attached to. The metadata returned by this call is collected during the
-+      creation of the bus and is never altered afterwards, so it provides
-+      pristine information on the task that created the bus, at the moment when
-+      it did so.
-+    </para>
-+    <para>
-+      In response to this call, a slice in the connection's pool is allocated
-+      and filled with an object of type <type>struct kdbus_info</type>,
-+      pointed to by the ioctl's <varname>offset</varname> field.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_info {
-+  __u64 size;
-+  __u64 id;
-+  __u64 flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>id</varname></term>
-+        <listitem><para>
-+          The bus ID.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>
-+          The bus flags as specified when it was created.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            Metadata information is stored in items here. The item list
-+            contains a <constant>KDBUS_ITEM_MAKE_NAME</constant> item that
-+            indicates the bus name of the calling connection.
-+            <constant>KDBUS_ITEM_NEGOTIATE</constant> is allowed to probe
-+            for known item types.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      Once the caller is finished with parsing the return buffer, it needs to
-+      employ the <constant>KDBUS_CMD_FREE</constant> command for the offset, in
-+      order to free the buffer part. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.pool</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for further information.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Updating connection details</title>
-+    <para>
-+      Some of a connection's details can be updated with the
-+      <constant>KDBUS_CMD_CONN_UPDATE</constant> ioctl, using the file
-+      descriptor that was used to create the connection. The update command
-+      uses the following struct.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_cmd {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>
-+          Currently, no flags are supported.
-+          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
-+          valid flags. If set, the ioctl will return <errorcode>0</errorcode>,
-+          and the <varname>flags</varname> field is set to
-+          <constant>0</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            Items to describe the connection details to be updated. The
-+            following item types are supported.
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_ATTACH_FLAGS_SEND</constant></term>
-+              <listitem>
-+                <para>
-+                  Supply a new set of metadata items that this connection
-+                  permits to be sent along with messages.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_ATTACH_FLAGS_RECV</constant></term>
-+              <listitem>
-+                <para>
-+                  Supply a new set of metadata items that this connection
-+                  requests to be attached to each message.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NAME</constant></term>
-+              <term><constant>KDBUS_ITEM_POLICY_ACCESS</constant></term>
-+              <listitem>
-+                <para>
-+                  Policy holder connections may supply a new set of policy
-+                  information with these items. For other connection types,
-+                  <constant>EOPNOTSUPP</constant> is returned in
-+                  <varname>errno</varname>.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
-+              <listitem><para>
-+                With this item, programs can <emphasis>probe</emphasis> the
-+                kernel for known item types. See
-+                <citerefentry>
-+                  <refentrytitle>kdbus.item</refentrytitle>
-+                  <manvolnum>7</manvolnum>
-+                </citerefentry>
-+                for more details.
-+              </para></listitem>
-+            </varlistentry>
-+          </variablelist>
-+
-+          <para>
-+            Unrecognized items are rejected, and the ioctl will fail with
-+            <varname>errno</varname> set to <constant>EINVAL</constant>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Termination of connections</title>
-+    <para>
-+      A connection can be terminated by simply calling
-+      <citerefentry>
-+        <refentrytitle>close</refentrytitle>
-+        <manvolnum>2</manvolnum>
-+      </citerefentry>
-+      on its file descriptor. All pending incoming messages will be discarded,
-+      and the memory allocated by the pool will be freed.
-+    </para>
-+
-+    <para>
-+      An alternative way of closing down a connection is via the
-+      <constant>KDBUS_CMD_BYEBYE</constant> ioctl. This ioctl will succeed only
-+      if the message queue of the connection is empty at the time of closing;
-+      otherwise, the ioctl will fail with <varname>errno</varname> set to
-+      <constant>EBUSY</constant>. When this ioctl returns
-+      successfully, the connection has been terminated and won't accept any new
-+      messages from remote peers. This way, a connection can be terminated
-+      race-free, without losing any messages. The ioctl takes an argument of
-+      type <type>struct kdbus_cmd</type>.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_cmd {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>
-+          Currently, no flags are supported.
-+          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
-+          valid flags. If set, the ioctl will fail with
-+          <varname>errno</varname> set to <constant>EPROTO</constant>, and
-+          the <varname>flags</varname> field is set to <constant>0</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            The following item types are supported.
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
-+              <listitem><para>
-+                With this item, programs can <emphasis>probe</emphasis> the
-+                kernel for known item types. See
-+                <citerefentry>
-+                  <refentrytitle>kdbus.item</refentrytitle>
-+                  <manvolnum>7</manvolnum>
-+                </citerefentry>
-+                for more details.
-+              </para></listitem>
-+            </varlistentry>
-+          </variablelist>
-+
-+          <para>
-+            Unrecognized items are rejected, and the ioctl will fail with
-+            <varname>errno</varname> set to <constant>EINVAL</constant>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Return value</title>
-+    <para>
-+      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
-+      on error, <errorcode>-1</errorcode> is returned, and
-+      <varname>errno</varname> is set to indicate the error.
-+      If the issued ioctl is illegal for the file descriptor used,
-+      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
-+    </para>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_HELLO</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EFAULT</constant></term>
-+          <listitem><para>
-+            The supplied pool size was 0 or not a multiple of the page size.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            The flags supplied in <type>struct kdbus_cmd_hello</type>
-+            are invalid.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            An illegal combination of
-+            <constant>KDBUS_HELLO_MONITOR</constant>,
-+            <constant>KDBUS_HELLO_ACTIVATOR</constant> and
-+            <constant>KDBUS_HELLO_POLICY_HOLDER</constant> was passed in
-+            <varname>flags</varname>.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            An invalid set of items was supplied.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ECONNREFUSED</constant></term>
-+          <listitem><para>
-+            The attach_flags_send field did not satisfy the requirements of
-+            the bus.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EPERM</constant></term>
-+          <listitem><para>
-+            A <constant>KDBUS_ITEM_CREDS</constant> items was supplied, but the
-+            current user is not privileged.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ESHUTDOWN</constant></term>
-+          <listitem><para>
-+            The bus you were trying to connect to has already been shut down.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EMFILE</constant></term>
-+          <listitem><para>
-+            The maximum number of connections on the bus has been reached.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EOPNOTSUPP</constant></term>
-+          <listitem><para>
-+            The endpoint does not support the connection flags supplied in
-+            <type>struct kdbus_cmd_hello</type>.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_BYEBYE</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EALREADY</constant></term>
-+          <listitem><para>
-+            The connection has already been shut down.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EBUSY</constant></term>
-+          <listitem><para>
-+            There are still messages queued up in the connection's pool.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_CONN_INFO</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Invalid flags, or neither an ID nor a name was provided, or the
-+            name is invalid.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ESRCH</constant></term>
-+          <listitem><para>
-+            Connection lookup by name failed.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ENXIO</constant></term>
-+          <listitem><para>
-+            No connection with the provided connection ID found.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_CONN_UPDATE</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Illegal flags or items.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Wildcards submitted in policy entries, or illegal sequence
-+            of policy items.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EOPNOTSUPP</constant></term>
-+          <listitem><para>
-+            Operation not supported by connection.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>E2BIG</constant></term>
-+          <listitem><para>
-+            Too many policy items attached.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.endpoint</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.message</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.policy</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.pool</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+</refentry>
-diff --git a/Documentation/kdbus/kdbus.endpoint.xml b/Documentation/kdbus/kdbus.endpoint.xml
-new file mode 100644
-index 0000000..6632485
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.endpoint.xml
-@@ -0,0 +1,429 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus.endpoint">
-+
-+  <refentryinfo>
-+    <title>kdbus.endpoint</title>
-+    <productname>kdbus.endpoint</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus.endpoint</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus.endpoint</refname>
-+    <refpurpose>kdbus endpoint</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>Description</title>
-+
-+    <para>
-+      Endpoints are entry points to a bus (see
-+      <citerefentry>
-+        <refentrytitle>kdbus.bus</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>).
-+      By default, each bus has a default
-+      endpoint called 'bus'. The bus owner has the ability to create custom
-+      endpoints with specific names, permissions, and policy databases
-+      (see below). An endpoint is presented as file underneath the directory
-+      of the parent bus.
-+    </para>
-+    <para>
-+      To create a custom endpoint, open the default endpoint
-+      (<literal>bus</literal>) and use the
-+      <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> ioctl with
-+      <type>struct kdbus_cmd</type>. Custom endpoints always have a policy
-+      database that, by default, forbids any operation. You have to explicitly
-+      install policy entries to allow any operation on this endpoint.
-+    </para>
-+    <para>
-+      Once <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> succeeded, the new
-+      endpoint will appear in the filesystem
-+      (<citerefentry>
-+        <refentrytitle>kdbus.bus</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>), and the used file descriptor will manage the
-+      newly created endpoint resource. It cannot be used to manage further
-+      resources and must be kept open as long as the endpoint is needed. The
-+      endpoint will be terminated as soon as the file descriptor is closed.
-+    </para>
-+    <para>
-+      Endpoint names may be chosen freely except for one restriction: the name
-+      must be prefixed with the numeric effective UID of the creator and a dash.
-+      This is required to avoid namespace clashes between different users. When
-+      creating an endpoint, the name that is passed in must be properly
-+      formatted or the kernel will refuse creation of the endpoint. Example:
-+      <literal>1047-my-endpoint</literal> is an acceptable name for an
-+      endpoint registered by a user with UID 1047. However,
-+      <literal>1024-my-endpoint</literal> is not, and neither is
-+      <literal>my-endpoint</literal>. The UID must be provided in the
-+      user-namespace of the bus.
-+    </para>
-+    <para>
-+      To create connections to a bus, use <constant>KDBUS_CMD_HELLO</constant>
-+      on a file descriptor returned by <function>open()</function> on an
-+      endpoint node. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.connection</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for further details.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Creating custom endpoints</title>
-+    <para>
-+      To create a new endpoint, the
-+      <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> command is used. Along with
-+      the endpoint's name, which will be used to expose the endpoint in the
-+      <citerefentry>
-+        <refentrytitle>kdbus.fs</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>,
-+      the command also optionally takes items to set up the endpoint's
-+      <citerefentry>
-+        <refentrytitle>kdbus.policy</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>.
-+      <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> takes a
-+      <type>struct kdbus_cmd</type> argument.
-+    </para>
-+    <programlisting>
-+struct kdbus_cmd {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>The flags for creation.</para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_MAKE_ACCESS_GROUP</constant></term>
-+              <listitem>
-+                <para>Make the endpoint file group-accessible.</para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_MAKE_ACCESS_WORLD</constant></term>
-+              <listitem>
-+                <para>Make the endpoint file world-accessible.</para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
-+              <listitem>
-+                <para>
-+                  Requests a set of valid flags for this ioctl. When this bit is
-+                  set, no action is taken; the ioctl will return
-+                  <errorcode>0</errorcode>, and the <varname>flags</varname>
-+                  field will have all bits set that are valid for this command.
-+                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
-+                  cleared by the operation.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            The following items are expected for
-+            <constant>KDBUS_CMD_ENDPOINT_MAKE</constant>.
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_MAKE_NAME</constant></term>
-+              <listitem>
-+                <para>Contains a string to identify the endpoint name.</para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NAME</constant></term>
-+              <term><constant>KDBUS_ITEM_POLICY_ACCESS</constant></term>
-+              <listitem>
-+                <para>
-+                  These items are used to set the policy attached to the
-+                  endpoint. For more details on bus and endpoint policies, see
-+                  <citerefentry>
-+                    <refentrytitle>kdbus.policy</refentrytitle>
-+                    <manvolnum>7</manvolnum>
-+                  </citerefentry>.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+          <para>
-+            Unrecognized items are rejected, and the ioctl will fail with
-+            <varname>errno</varname> set to <varname>EINVAL</varname>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Updating endpoints</title>
-+    <para>
-+      To update an existing endpoint, the
-+      <constant>KDBUS_CMD_ENDPOINT_UPDATE</constant> command is used on the file
-+      descriptor that was used to create the endpoint, using
-+      <constant>KDBUS_CMD_ENDPOINT_MAKE</constant>. The only relevant detail of
-+      the endpoint that can be updated is the policy. When the command is
-+      employed, the policy of the endpoint is <emphasis>replaced</emphasis>
-+      atomically with the new set of rules.
-+      The command takes a <type>struct kdbus_cmd</type> argument.
-+    </para>
-+    <programlisting>
-+struct kdbus_cmd {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>
-+          Unused for this command.
-+          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
-+          valid flags. If set, the ioctl will return <errorcode>0</errorcode>,
-+          and the <varname>flags</varname> field is set to
-+          <constant>0</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            The following items are expected for
-+            <constant>KDBUS_CMD_ENDPOINT_UPDATE</constant>.
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NAME</constant></term>
-+              <term><constant>KDBUS_ITEM_POLICY_ACCESS</constant></term>
-+              <listitem>
-+                <para>
-+                  These items are used to set the policy attached to the
-+                  endpoint. For more details on bus and endpoint policies, see
-+                  <citerefentry>
-+                    <refentrytitle>kdbus.policy</refentrytitle>
-+                    <manvolnum>7</manvolnum>
-+                  </citerefentry>.
-+                  Existing policy is atomically replaced with the new rules
-+                  provided.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
-+              <listitem><para>
-+                With this item, programs can <emphasis>probe</emphasis> the
-+                kernel for known item types. See
-+                <citerefentry>
-+                  <refentrytitle>kdbus.item</refentrytitle>
-+                  <manvolnum>7</manvolnum>
-+                </citerefentry>
-+                for more details.
-+              </para></listitem>
-+            </varlistentry>
-+          </variablelist>
-+          <para>
-+            Unrecognized items are rejected, and the ioctl will fail with
-+            <varname>errno</varname> set to <constant>EINVAL</constant>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Return value</title>
-+    <para>
-+      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
-+      on error, <errorcode>-1</errorcode> is returned, and
-+      <varname>errno</varname> is set to indicate the error.
-+      If the issued ioctl is illegal for the file descriptor used,
-+      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
-+    </para>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> may fail with the
-+        following errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            The flags supplied in the <type>struct kdbus_cmd</type>
-+            are invalid.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Illegal combination of <constant>KDBUS_ITEM_NAME</constant> and
-+            <constant>KDBUS_ITEM_POLICY_ACCESS</constant> was provided.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EEXIST</constant></term>
-+          <listitem><para>
-+            An endpoint of that name already exists.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EPERM</constant></term>
-+          <listitem><para>
-+            The calling user is not privileged. See
-+            <citerefentry>
-+              <refentrytitle>kdbus</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for information about privileged users.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_ENDPOINT_UPDATE</constant> may fail with the
-+        following errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            The flags supplied in <type>struct kdbus_cmd</type>
-+            are invalid.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Illegal combination of <constant>KDBUS_ITEM_NAME</constant> and
-+            <constant>KDBUS_ITEM_POLICY_ACCESS</constant> was provided.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.endpoint</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.fs</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.message</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.pool</refentrytitle>
-+           <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+</refentry>
-diff --git a/Documentation/kdbus/kdbus.fs.xml b/Documentation/kdbus/kdbus.fs.xml
-new file mode 100644
-index 0000000..8c2a90e
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.fs.xml
-@@ -0,0 +1,124 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus_fs">
-+
-+  <refentryinfo>
-+    <title>kdbus.fs</title>
-+    <productname>kdbus.fs</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus.fs</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus.fs</refname>
-+    <refpurpose>kdbus file system</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>File-system Layout</title>
-+
-+    <para>
-+      The <emphasis>kdbusfs</emphasis> pseudo filesystem provides access to
-+      kdbus entities, such as <emphasis>buses</emphasis> and
-+      <emphasis>endpoints</emphasis>. Each time the filesystem is mounted,
-+      a new, isolated kdbus instance is created, which is independent from the
-+      other instances.
-+    </para>
-+    <para>
-+      The system-wide standard mount point for <emphasis>kdbusfs</emphasis> is
-+      <constant>/sys/fs/kdbus</constant>.
-+    </para>
-+
-+    <para>
-+      Buses are represented as directories in the file system layout, whereas
-+      endpoints are exposed as files inside these directories. At the top-level,
-+      a <emphasis>control</emphasis> node is present, which can be opened to
-+      create new buses via the <constant>KDBUS_CMD_BUS_MAKE</constant> ioctl.
-+      Each <emphasis>bus</emphasis> shows a default endpoint called
-+      <varname>bus</varname>, which can be opened to either create a connection
-+      with the <constant>KDBUS_CMD_HELLO</constant> ioctl, or to create new
-+      custom endpoints for the bus with
-+      <constant>KDBUS_CMD_ENDPOINT_MAKE</constant>. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.bus</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>,
-+      <citerefentry>
-+        <refentrytitle>kdbus.connection</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry> and
-+      <citerefentry>
-+        <refentrytitle>kdbus.endpoint</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more details.
-+    </para>
-+
-+    <para>Following, you can see an example layout of the
-+    <emphasis>kdbusfs</emphasis> filesystem:</para>
-+
-+<programlisting>
-+        /sys/fs/kdbus/                          ; mount-point
-+        |-- 0-system                            ; bus directory
-+        |   |-- bus                             ; default endpoint
-+        |   `-- 1017-custom                     ; custom endpoint
-+        |-- 1000-user                           ; bus directory
-+        |   |-- bus                             ; default endpoint
-+        |   |-- 1000-service-A                  ; custom endpoint
-+        |   `-- 1000-service-B                  ; custom endpoint
-+        `-- control                             ; control file
-+</programlisting>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Mounting instances</title>
-+    <para>
-+      In order to get a new and separate kdbus environment, a new instance
-+      of <emphasis>kdbusfs</emphasis> can be mounted like this:
-+    </para>
-+<programlisting>
-+  # mount -t kdbusfs kdbusfs /tmp/new_kdbus/
-+</programlisting>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.connection</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.endpoint</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>mount</refentrytitle>
-+          <manvolnum>8</manvolnum>
-+        </citerefentry>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+</refentry>
-diff --git a/Documentation/kdbus/kdbus.item.xml b/Documentation/kdbus/kdbus.item.xml
-new file mode 100644
-index 0000000..ee09dfa
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.item.xml
-@@ -0,0 +1,839 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus">
-+
-+  <refentryinfo>
-+    <title>kdbus.item</title>
-+    <productname>kdbus item</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus.item</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus.item</refname>
-+    <refpurpose>kdbus item structure, layout and usage</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>Description</title>
-+
-+    <para>
-+      To flexibly augment transport structures, data blobs of type
-+      <type>struct kdbus_item</type> can be attached to the structs passed
-+      into the ioctls. Some ioctls make items of certain types mandatory,
-+      others are optional. Items that are unsupported by ioctls they are
-+      attached to will cause the ioctl to fail with <varname>errno</varname>
-+      set to <constant>EINVAL</constant>.
-+      Items are also used for information stored in a connection's
-+      <emphasis>pool</emphasis>, such as received messages, name lists or
-+      requested connection or bus owner information. Depending on the type of
-+      an item, its total size is either fixed or variable.
-+    </para>
-+
-+    <refsect2>
-+      <title>Chaining items</title>
-+      <para>
-+        Whenever items are used as part of the kdbus kernel API, they are
-+        embedded in structs that are embedded inside structs that themselves
-+        include a size field containing the overall size of the structure.
-+        This allows multiple items to be chained up, and an item iterator
-+        (see below) is capable of detecting the end of an item chain.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Alignment</title>
-+      <para>
-+        The kernel expects all items to be aligned to 8-byte boundaries.
-+        Unaligned items will cause the ioctl they are used with to fail
-+        with <varname>errno</varname> set to <constant>EINVAL</constant>.
-+        An item that has an unaligned size itself hence needs to be padded
-+        if it is followed by another item.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Iterating items</title>
-+      <para>
-+        A simple iterator would iterate over the items until the items have
-+        reached the embedding structure's overall size. An example
-+        implementation is shown below.
-+      </para>
-+
-+      <programlisting><![CDATA[
-+#define KDBUS_ALIGN8(val) (((val) + 7) & ~7)
-+
-+#define KDBUS_ITEM_NEXT(item) \
-+    (typeof(item))((uint8_t *)(item) + KDBUS_ALIGN8((item)->size))
-+
-+#define KDBUS_ITEM_FOREACH(item, head, first)                      \
-+    for ((item) = (head)->first;                                   \
-+         ((uint8_t *)(item) < (uint8_t *)(head) + (head)->size) && \
-+          ((uint8_t *)(item) >= (uint8_t *)(head));                \
-+         (item) = KDBUS_ITEM_NEXT(item))
-+      ]]></programlisting>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Item layout</title>
-+    <para>
-+      A <type>struct kdbus_item</type> consists of a
-+      <varname>size</varname> field, describing its overall size, and a
-+      <varname>type</varname> field, both 64 bit wide. They are followed by
-+      a union to store information that is specific to the item's type.
-+      The struct layout is shown below.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_item {
-+  __u64 size;
-+  __u64 type;
-+  /* item payload - see below */
-+  union {
-+    __u8 data[0];
-+    __u32 data32[0];
-+    __u64 data64[0];
-+    char str[0];
-+
-+    __u64 id;
-+    struct kdbus_vec vec;
-+    struct kdbus_creds creds;
-+    struct kdbus_pids pids;
-+    struct kdbus_audit audit;
-+    struct kdbus_caps caps;
-+    struct kdbus_timestamp timestamp;
-+    struct kdbus_name name;
-+    struct kdbus_bloom_parameter bloom_parameter;
-+    struct kdbus_bloom_filter bloom_filter;
-+    struct kdbus_memfd memfd;
-+    int fds[0];
-+    struct kdbus_notify_name_change name_change;
-+    struct kdbus_notify_id_change id_change;
-+    struct kdbus_policy_access policy_access;
-+  };
-+};
-+    </programlisting>
-+
-+    <para>
-+      <type>struct kdbus_item</type> should never be used to allocate
-+      an item instance, as its size may grow in future releases of the API.
-+      Instead, it should be manually assembled by storing the
-+      <varname>size</varname>, <varname>type</varname> and payload to a
-+      struct of its own.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Item types</title>
-+
-+    <refsect2>
-+      <title>Negotiation item</title>
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
-+          <listitem><para>
-+            With this item is attached to any ioctl, programs can
-+            <emphasis>probe</emphasis> the kernel for known item types.
-+            The item carries an array of <type>uint64_t</type> values in
-+            <varname>item.data64</varname>, each set to an item type to
-+            probe. The kernel will reset each member of this array that is
-+            not recognized as valid item type to <constant>0</constant>.
-+            This way, users can negotiate kernel features at start-up to
-+            keep newer userspace compatible with older kernels. This item
-+            is never attached by the kernel in response to any command.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Command specific items</title>
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_PAYLOAD_VEC</constant></term>
-+          <term><constant>KDBUS_ITEM_PAYLOAD_OFF</constant></term>
-+          <listitem><para>
-+            Messages are directly copied by the sending process into the
-+            receiver's
-+            <citerefentry>
-+              <refentrytitle>kdbus.pool</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+            This way, two peers can exchange data by effectively doing a
-+            single-copy from one process to another; the kernel will not buffer
-+            the data anywhere else. <constant>KDBUS_ITEM_PAYLOAD_VEC</constant>
-+            is used when <emphasis>sending</emphasis> message. The item
-+            references a memory address when the payload data can be found.
-+            <constant>KDBUS_ITEM_PAYLOAD_OFF</constant> is used when messages
-+            are <emphasis>received</emphasis>, and the
-+            <constant>offset</constant> value describes the offset inside the
-+            receiving connection's
-+            <citerefentry>
-+              <refentrytitle>kdbus.pool</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            where the message payload can be found. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.message</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on passing of payload data along with a
-+            message.
-+            <programlisting>
-+struct kdbus_vec {
-+  __u64 size;
-+  union {
-+    __u64 address;
-+    __u64 offset;
-+  };
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant></term>
-+          <listitem><para>
-+            Transports a file descriptor of a <emphasis>memfd</emphasis> in
-+            <type>struct kdbus_memfd</type> in <varname>item.memfd</varname>.
-+            The <varname>size</varname> field has to match the actual size of
-+            the memfd that was specified when it was created. The
-+            <varname>start</varname> parameter denotes the offset inside the
-+            memfd at which the referenced payload starts. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.message</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on passing of payload data along with a
-+            message.
-+            <programlisting>
-+struct kdbus_memfd {
-+  __u64 start;
-+  __u64 size;
-+  int fd;
-+  __u32 __pad;
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_FDS</constant></term>
-+          <listitem><para>
-+            Contains an array of <emphasis>file descriptors</emphasis>.
-+            When used with <constant>KDBUS_CMD_SEND</constant>, the values of
-+            this array must be filled with valid file descriptor numbers.
-+            When received as item attached to a message, the array will
-+            contain the numbers of the installed file descriptors, or
-+            <constant>-1</constant> in case an error occurred.
-+            In either case, the number of entries in the array is derived from
-+            the item's total size. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.message</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Items specific to some commands</title>
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_CANCEL_FD</constant></term>
-+          <listitem><para>
-+            Transports a file descriptor that can be used to cancel a
-+            synchronous <constant>KDBUS_CMD_SEND</constant> operation by
-+            writing to it. The file descriptor is stored in
-+            <varname>item.fd[0]</varname>. The item may only contain one
-+            file descriptor. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.message</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on this item and how to use it.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_BLOOM_PARAMETER</constant></term>
-+          <listitem><para>
-+            Contains a set of <emphasis>bloom parameters</emphasis> as
-+            <type>struct kdbus_bloom_parameter</type> in
-+            <varname>item.bloom_parameter</varname>.
-+            The item is passed from userspace to kernel during the
-+            <constant>KDBUS_CMD_BUS_MAKE</constant> ioctl, and returned
-+            verbatim when <constant>KDBUS_CMD_HELLO</constant> is called.
-+            The kernel does not use the bloom parameters, but they need to
-+            be known by each connection on the bus in order to define the
-+            bloom filter hash details. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.match</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on matching and bloom filters.
-+            <programlisting>
-+struct kdbus_bloom_parameter {
-+  __u64 size;
-+  __u64 n_hash;
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_BLOOM_FILTER</constant></term>
-+          <listitem><para>
-+            Carries a <emphasis>bloom filter</emphasis> as
-+            <type>struct kdbus_bloom_filter</type> in
-+            <varname>item.bloom_filter</varname>. It is mandatory to send this
-+            item attached to a <type>struct kdbus_msg</type>, in case the
-+            message is a signal. This item is never transported from kernel to
-+            userspace. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.match</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on matching and bloom filters.
-+            <programlisting>
-+struct kdbus_bloom_filter {
-+  __u64 generation;
-+  __u64 data[0];
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_BLOOM_MASK</constant></term>
-+          <listitem><para>
-+            Transports a <emphasis>bloom mask</emphasis> as binary data blob
-+            stored in <varname>item.data</varname>. This item is used to
-+            describe a match into a connection's match database. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.match</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on matching and bloom filters.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_DST_NAME</constant></term>
-+          <listitem><para>
-+            Contains a <emphasis>well-known name</emphasis> to send a
-+            message to, as null-terminated string in
-+            <varname>item.str</varname>. This item is used with
-+            <constant>KDBUS_CMD_SEND</constant>. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.message</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on how to send a message.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_MAKE_NAME</constant></term>
-+          <listitem><para>
-+            Contains a <emphasis>bus name</emphasis> or
-+            <emphasis>endpoint name</emphasis>, stored as null-terminated
-+            string in <varname>item.str</varname>. This item is sent from
-+            userspace to kernel when buses or endpoints are created, and
-+            returned back to userspace when the bus creator information is
-+            queried. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.bus</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            and
-+            <citerefentry>
-+              <refentrytitle>kdbus.endpoint</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_ATTACH_FLAGS_SEND</constant></term>
-+          <term><constant>KDBUS_ITEM_ATTACH_FLAGS_RECV</constant></term>
-+          <listitem><para>
-+            Contains a set of <emphasis>attach flags</emphasis> at
-+            <emphasis>send</emphasis> or <emphasis>receive</emphasis> time. See
-+            <citerefentry>
-+              <refentrytitle>kdbus</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>,
-+            <citerefentry>
-+              <refentrytitle>kdbus.bus</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry> and
-+            <citerefentry>
-+              <refentrytitle>kdbus.connection</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on attach flags.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_ID</constant></term>
-+          <listitem><para>
-+            Transports a connection's <emphasis>numerical ID</emphasis> of
-+            a connection as <type>uint64_t</type> value in
-+            <varname>item.id</varname>.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_NAME</constant></term>
-+          <listitem><para>
-+            Transports a name associated with the
-+            <emphasis>name registry</emphasis> as null-terminated string as
-+            <type>struct kdbus_name</type> in
-+            <varname>item.name</varname>. The <varname>flags</varname>
-+            contains the flags of the name. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.name</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on how to access the name registry of a bus.
-+            <programlisting>
-+struct kdbus_name {
-+  __u64 flags;
-+  char name[0];
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Items attached by the kernel as metadata</title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_TIMESTAMP</constant></term>
-+          <listitem><para>
-+            Contains both the <emphasis>monotonic</emphasis> and the
-+            <emphasis>realtime</emphasis> timestamp, taken when the message
-+            was processed on the kernel side.
-+            Stored as <type>struct kdbus_timestamp</type> in
-+            <varname>item.timestamp</varname>.
-+            <programlisting>
-+struct kdbus_timestamp {
-+  __u64 seqnum;
-+  __u64 monotonic_ns;
-+  __u64 realtime_ns;
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_CREDS</constant></term>
-+          <listitem><para>
-+            Contains a set of <emphasis>user</emphasis> and
-+            <emphasis>group</emphasis> information as 32-bit values, in the
-+            usual four flavors: real, effective, saved and filesystem related.
-+            Stored as <type>struct kdbus_creds</type> in
-+            <varname>item.creds</varname>.
-+            <programlisting>
-+struct kdbus_creds {
-+  __u32 uid;
-+  __u32 euid;
-+  __u32 suid;
-+  __u32 fsuid;
-+  __u32 gid;
-+  __u32 egid;
-+  __u32 sgid;
-+  __u32 fsgid;
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_PIDS</constant></term>
-+          <listitem><para>
-+            Contains the <emphasis>PID</emphasis>, <emphasis>TID</emphasis>
-+            and <emphasis>parent PID (PPID)</emphasis> of a remote peer.
-+            Stored as <type>struct kdbus_pids</type> in
-+            <varname>item.pids</varname>.
-+            <programlisting>
-+struct kdbus_pids {
-+  __u64 pid;
-+  __u64 tid;
-+  __u64 ppid;
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_AUXGROUPS</constant></term>
-+          <listitem><para>
-+            Contains the <emphasis>auxiliary (supplementary) groups</emphasis>
-+            a remote peer is a member of, stored as array of
-+            <type>uint32_t</type> values in <varname>item.data32</varname>.
-+            The array length can be determined by looking at the item's total
-+            size, subtracting the size of the header and dividing the
-+            remainder by <constant>sizeof(uint32_t)</constant>.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_OWNED_NAME</constant></term>
-+          <listitem><para>
-+            Contains a <emphasis>well-known name</emphasis> currently owned
-+            by a connection. The name is stored as null-terminated string in
-+            <varname>item.str</varname>. Its length can also be derived from
-+            the item's total size.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_TID_COMM</constant> [*]</term>
-+          <listitem><para>
-+            Contains the <emphasis>comm</emphasis> string of a task's
-+            <emphasis>TID</emphasis> (thread ID), stored as null-terminated
-+            string in <varname>item.str</varname>. Its length can also be
-+            derived from the item's total size. Receivers of this item should
-+            not use its contents for any kind of security measures. See below.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_PID_COMM</constant> [*]</term>
-+          <listitem><para>
-+            Contains the <emphasis>comm</emphasis> string of a task's
-+            <emphasis>PID</emphasis> (process ID), stored as null-terminated
-+            string in <varname>item.str</varname>. Its length can also be
-+            derived from the item's total size. Receivers of this item should
-+            not use its contents for any kind of security measures. See below.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_EXE</constant> [*]</term>
-+          <listitem><para>
-+            Contains the <emphasis>path to the executable</emphasis> of a task,
-+            stored as null-terminated string in <varname>item.str</varname>. Its
-+            length can also be derived from the item's total size. Receivers of
-+            this item should not use its contents for any kind of security
-+            measures. See below.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_CMDLINE</constant> [*]</term>
-+          <listitem><para>
-+            Contains the <emphasis>command line arguments</emphasis> of a
-+            task, stored as an <emphasis>array</emphasis> of null-terminated
-+            strings in <varname>item.str</varname>. The total length of all
-+            strings in the array can be derived from the item's total size.
-+            Receivers of this item should not use its contents for any kind
-+            of security measures. See below.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_CGROUP</constant></term>
-+          <listitem><para>
-+            Contains the <emphasis>cgroup path</emphasis> of a task, stored
-+            as null-terminated string in <varname>item.str</varname>. Its
-+            length can also be derived from the item's total size.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_CAPS</constant></term>
-+          <listitem><para>
-+            Contains sets of <emphasis>capabilities</emphasis>, stored as
-+            <type>struct kdbus_caps</type> in <varname>item.caps</varname>.
-+            As the item size may increase in the future, programs should be
-+            written in a way that it takes
-+            <varname>item.caps.last_cap</varname> into account, and derive
-+            the number of sets and rows from the item size and the reported
-+            number of valid capability bits.
-+            <programlisting>
-+struct kdbus_caps {
-+  __u32 last_cap;
-+  __u32 caps[0];
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_SECLABEL</constant></term>
-+          <listitem><para>
-+            Contains the <emphasis>LSM label</emphasis> of a task, stored as
-+            null-terminated string in <varname>item.str</varname>. Its length
-+            can also be derived from the item's total size.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_AUDIT</constant></term>
-+          <listitem><para>
-+            Contains the audit <emphasis>sessionid</emphasis> and
-+            <emphasis>loginuid</emphasis> of a task, stored as
-+            <type>struct kdbus_audit</type> in
-+            <varname>item.audit</varname>.
-+            <programlisting>
-+struct kdbus_audit {
-+  __u32 sessionid;
-+  __u32 loginuid;
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_CONN_DESCRIPTION</constant></term>
-+          <listitem><para>
-+            Contains the <emphasis>connection description</emphasis>, as set
-+            by <constant>KDBUS_CMD_HELLO</constant> or
-+            <constant>KDBUS_CMD_CONN_UPDATE</constant>, stored as
-+            null-terminated string in <varname>item.str</varname>. Its length
-+            can also be derived from the item's total size.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+
-+      <para>
-+        All metadata is automatically translated into the
-+        <emphasis>namespaces</emphasis> of the task that receives them. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.message</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more information.
-+      </para>
-+
-+      <para>
-+        [*] Note that the content stored in metadata items of type
-+        <constant>KDBUS_ITEM_TID_COMM</constant>,
-+        <constant>KDBUS_ITEM_PID_COMM</constant>,
-+        <constant>KDBUS_ITEM_EXE</constant> and
-+        <constant>KDBUS_ITEM_CMDLINE</constant>
-+        can easily be tampered by the sending tasks. Therefore, they should
-+        <emphasis>not</emphasis> be used for any sort of security relevant
-+        assumptions. The only reason they are transmitted is to let
-+        receivers know about details that were set when metadata was
-+        collected, even though the task they were collected from is not
-+        active any longer when the items are received.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Items used for policy entries, matches and notifications</title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_POLICY_ACCESS</constant></term>
-+          <listitem><para>
-+            This item describes a <emphasis>policy access</emphasis> entry to
-+            access the policy database of a
-+            <citerefentry>
-+              <refentrytitle>kdbus.bus</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry> or
-+            <citerefentry>
-+              <refentrytitle>kdbus.endpoint</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+            Please refer to
-+            <citerefentry>
-+              <refentrytitle>kdbus.policy</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on the policy database and how to access it.
-+            <programlisting>
-+struct kdbus_policy_access {
-+  __u64 type;
-+  __u64 access;
-+  __u64 id;
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_ID_ADD</constant></term>
-+          <term><constant>KDBUS_ITEM_ID_REMOVE</constant></term>
-+          <listitem><para>
-+            This item is sent as attachment to a
-+            <emphasis>kernel notification</emphasis> and indicates that a
-+            new connection was created on the bus, or that a connection was
-+            disconnected, respectively. It stores a
-+            <type>struct kdbus_notify_id_change</type> in
-+            <varname>item.id_change</varname>.
-+            The <varname>id</varname> field contains the numeric ID of the
-+            connection that was added or removed, and <varname>flags</varname>
-+            is set to the connection flags, as passed by
-+            <constant>KDBUS_CMD_HELLO</constant>. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.match</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            and
-+            <citerefentry>
-+              <refentrytitle>kdbus.message</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on matches and notification messages.
-+            <programlisting>
-+struct kdbus_notify_id_change {
-+  __u64 id;
-+  __u64 flags;
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_NAME_ADD</constant></term>
-+          <term><constant>KDBUS_ITEM_NAME_REMOVE</constant></term>
-+          <term><constant>KDBUS_ITEM_NAME_CHANGE</constant></term>
-+          <listitem><para>
-+            This item is sent as attachment to a
-+            <emphasis>kernel notification</emphasis> and indicates that a
-+            <emphasis>well-known name</emphasis> appeared, disappeared or
-+            transferred to another owner on the bus. It stores a
-+            <type>struct kdbus_notify_name_change</type> in
-+            <varname>item.name_change</varname>.
-+            <varname>old_id</varname> describes the former owner of the name
-+            and is set to <constant>0</constant> values in case of
-+            <constant>KDBUS_ITEM_NAME_ADD</constant>.
-+            <varname>new_id</varname> describes the new owner of the name and
-+            is set to <constant>0</constant> values in case of
-+            <constant>KDBUS_ITEM_NAME_REMOVE</constant>.
-+            The <varname>name</varname> field contains the well-known name the
-+            notification is about, as null-terminated string. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.match</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            and
-+            <citerefentry>
-+              <refentrytitle>kdbus.message</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information on matches and notification messages.
-+            <programlisting>
-+struct kdbus_notify_name_change {
-+  struct kdbus_notify_id_change old_id;
-+  struct kdbus_notify_id_change new_id;
-+  char name[0];
-+};
-+            </programlisting>
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_REPLY_TIMEOUT</constant></term>
-+          <listitem><para>
-+            This item is sent as attachment to a
-+            <emphasis>kernel notification</emphasis>. It informs the receiver
-+            that an expected reply to a message was not received in time.
-+            The remote peer ID and the message cookie are stored in the message
-+            header. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.message</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information about messages, timeouts and notifications.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ITEM_REPLY_DEAD</constant></term>
-+          <listitem><para>
-+            This item is sent as attachment to a
-+            <emphasis>kernel notification</emphasis>. It informs the receiver
-+            that a remote connection a reply is expected from was disconnected
-+            before that reply was sent. The remote peer ID and the message
-+            cookie are stored in the message header. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.message</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for more information about messages, timeouts and notifications.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.connection</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.endpoint</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.fs</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.message</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.pool</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>memfd_create</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+
-+</refentry>
-diff --git a/Documentation/kdbus/kdbus.match.xml b/Documentation/kdbus/kdbus.match.xml
-new file mode 100644
-index 0000000..ae38e04
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.match.xml
-@@ -0,0 +1,555 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus.match">
-+
-+  <refentryinfo>
-+    <title>kdbus.match</title>
-+    <productname>kdbus.match</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus.match</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus.match</refname>
-+    <refpurpose>kdbus match</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>Description</title>
-+
-+    <para>
-+      kdbus connections can install matches in order to subscribe to signal
-+      messages sent on the bus. Such signal messages can be either directed
-+      to a single connection (by setting a specific connection ID in
-+      <varname>struct kdbus_msg.dst_id</varname> or by sending it to a
-+      well-known name), or to potentially <emphasis>all</emphasis> currently
-+      active connections on the bus (by setting
-+      <varname>struct kdbus_msg.dst_id</varname> to
-+      <constant>KDBUS_DST_ID_BROADCAST</constant>).
-+      A signal message always has the <constant>KDBUS_MSG_SIGNAL</constant>
-+      bit set in the <varname>flags</varname> bitfield.
-+      Also, signal messages can originate from either the kernel (called
-+      <emphasis>notifications</emphasis>), or from other bus connections.
-+      In either case, a bus connection needs to have a suitable
-+      <emphasis>match</emphasis> installed in order to receive any signal
-+      message. Without any rules installed in the connection, no signal message
-+      will be received.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Matches for signal messages from other connections</title>
-+    <para>
-+      Matches for messages from other connections (not kernel notifications)
-+      are implemented as bloom filters (see below). The sender adds certain
-+      properties of the message as elements to a bloom filter bit field, and
-+      sends that along with the signal message.
-+
-+      The receiving connection adds the message properties it is interested in
-+      as elements to a bloom mask bit field, and uploads the mask as match rule,
-+      possibly along with some other rules to further limit the match.
-+
-+      The kernel will match the signal message's bloom filter against the
-+      connection's bloom mask (simply by &amp;-ing it), and will decide whether
-+      the message should be delivered to a connection.
-+    </para>
-+    <para>
-+      The kernel has no notion of any specific properties of the signal message,
-+      all it sees are the bit fields of the bloom filter and the mask to match
-+      against. The use of bloom filters allows simple and efficient matching,
-+      without exposing any message properties or internals to the kernel side.
-+      Clients need to deal with the fact that they might receive signal messages
-+      which they did not subscribe to, as the bloom filter might allow
-+      false-positives to pass the filter.
-+
-+      To allow the future extension of the set of elements in the bloom filter,
-+      the filter specifies a <emphasis>generation</emphasis> number. A later
-+      generation must always contain all elements of the set of the previous
-+      generation, but can add new elements to the set. The match rules mask can
-+      carry an array with all previous generations of masks individually stored.
-+      When the filter and mask are matched by the kernel, the mask with the
-+      closest matching generation is selected as the index into the mask array.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Bloom filters</title>
-+    <para>
-+      Bloom filters allow checking whether a given word is present in a
-+      dictionary.  This allows connections to set up a mask for information it
-+      is interested in, and will be delivered signal messages that have a
-+      matching filter.
-+
-+      For general information, see
-+      <ulink url="https://en.wikipedia.org/wiki/Bloom_filter">the Wikipedia
-+      article on bloom filters</ulink>.
-+    </para>
-+    <para>
-+      The size of the bloom filter is defined per bus when it is created, in
-+      <varname>kdbus_bloom_parameter.size</varname>. All bloom filters attached
-+      to signal messages on the bus must match this size, and all bloom filter
-+      matches uploaded by connections must also match the size, or a multiple
-+      thereof (see below).
-+
-+      The calculation of the mask has to be done in userspace applications. The
-+      kernel just checks the bitmasks to decide whether or not to let the
-+      message pass. All bits in the mask must match the filter in and bit-wise
-+      <emphasis>AND</emphasis> logic, but the mask may have more bits set than
-+      the filter. Consequently, false positive matches are expected to happen,
-+      and programs must deal with that fact by checking the contents of the
-+      payload again at receive time.
-+    </para>
-+    <para>
-+      Masks are entities that are always passed to the kernel as part of a
-+      match (with an item of type <constant>KDBUS_ITEM_BLOOM_MASK</constant>),
-+      and filters can be attached to signals, with an item of type
-+      <constant>KDBUS_ITEM_BLOOM_FILTER</constant>. For a filter to match, all
-+      its bits have to be set in the match mask as well.
-+    </para>
-+    <para>
-+      For example, consider a bus that has a bloom size of 8 bytes, and the
-+      following mask/filter combinations:
-+    </para>
-+    <programlisting><![CDATA[
-+          filter  0x0101010101010101
-+          mask    0x0101010101010101
-+                  -> matches
-+
-+          filter  0x0303030303030303
-+          mask    0x0101010101010101
-+                  -> doesn't match
-+
-+          filter  0x0101010101010101
-+          mask    0x0303030303030303
-+                  -> matches
-+    ]]></programlisting>
-+
-+    <para>
-+      Hence, in order to catch all messages, a mask filled with
-+      <constant>0xff</constant> bytes can be installed as a wildcard match rule.
-+    </para>
-+
-+    <refsect2>
-+      <title>Generations</title>
-+
-+      <para>
-+        Uploaded matches may contain multiple masks, which have to be as large
-+        as the bloom filter size defined by the bus. Each block of a mask is
-+        called a <emphasis>generation</emphasis>, starting at index 0.
-+
-+        At match time, when a signal is about to be delivered, a bloom mask
-+        generation is passed, which denotes which of the bloom masks the filter
-+        should be matched against. This allows programs to provide backward
-+        compatible masks at upload time, while older clients can still match
-+        against older versions of filters.
-+      </para>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Matches for kernel notifications</title>
-+    <para>
-+      To receive kernel generated notifications (see
-+      <citerefentry>
-+        <refentrytitle>kdbus.message</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>),
-+      a connection must install match rules that are different from
-+      the bloom filter matches described in the section above. They can be
-+      filtered by the connection ID that caused the notification to be sent, by
-+      one of the names it currently owns, or by the type of the notification
-+      (ID/name add/remove/change).
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Adding a match</title>
-+    <para>
-+      To add a match, the <constant>KDBUS_CMD_MATCH_ADD</constant> ioctl is
-+      used, which takes a <type>struct kdbus_cmd_match</type> as an argument
-+      described below.
-+
-+      Note that each of the items attached to this command will internally
-+      create one match <emphasis>rule</emphasis>, and the collection of them,
-+      which is submitted as one block via the ioctl, is called a
-+      <emphasis>match</emphasis>. To allow a message to pass, all rules of a
-+      match have to be satisfied. Hence, adding more items to the command will
-+      only narrow the possibility of a match to effectively let the message
-+      pass, and will decrease the chance that the connection's process will be
-+      woken up needlessly.
-+
-+      Multiple matches can be installed per connection. As long as one of it has
-+      a set of rules which allows the message to pass, this one will be
-+      decisive.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_cmd_match {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  __u64 cookie;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>Flags to control the behavior of the ioctl.</para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_MATCH_REPLACE</constant></term>
-+              <listitem>
-+                <para>Make the endpoint file group-accessible</para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
-+              <listitem>
-+                <para>
-+                  Requests a set of valid flags for this ioctl. When this bit is
-+                  set, no action is taken; the ioctl will return
-+                  <errorcode>0</errorcode>, and the <varname>flags</varname>
-+                  field will have all bits set that are valid for this command.
-+                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
-+                  cleared by the operation.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>cookie</varname></term>
-+        <listitem><para>
-+          A cookie which identifies the match, so it can be referred to when
-+          removing it.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+        <para>
-+          Items to define the actual rules of the matches. The following item
-+          types are expected. Each item will create one new match rule.
-+        </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_BLOOM_MASK</constant></term>
-+              <listitem>
-+                <para>
-+                  An item that carries the bloom filter mask to match against
-+                  in its data field. The payload size must match the bloom
-+                  filter size that was specified when the bus was created.
-+                  See the "Bloom filters" section above for more information on
-+                  bloom filters.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NAME</constant></term>
-+              <listitem>
-+                <para>
-+                  When used as part of kernel notifications, this item specifies
-+                  a name that is acquired, lost or that changed its owner (see
-+                  below). When used as part of a match for user-generated signal
-+                  messages, it specifies a name that the sending connection must
-+                  own at the time of sending the signal.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_ID</constant></term>
-+              <listitem>
-+                <para>
-+                  Specify a sender connection's ID that will match this rule.
-+                  For kernel notifications, this specifies the ID of a
-+                  connection that was added to or removed from the bus.
-+                  For used-generated signals, it specifies the ID of the
-+                  connection that sent the signal message.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NAME_ADD</constant></term>
-+              <term><constant>KDBUS_ITEM_NAME_REMOVE</constant></term>
-+              <term><constant>KDBUS_ITEM_NAME_CHANGE</constant></term>
-+              <listitem>
-+                <para>
-+                  These items request delivery of kernel notifications that
-+                  describe a name acquisition, loss, or change. The details
-+                  are stored in the item's
-+                  <varname>kdbus_notify_name_change</varname> member.
-+                  All information specified must be matched in order to make
-+                  the message pass. Use
-+                  <constant>KDBUS_MATCH_ID_ANY</constant> to
-+                  match against any unique connection ID.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_ID_ADD</constant></term>
-+              <term><constant>KDBUS_ITEM_ID_REMOVE</constant></term>
-+              <listitem>
-+                <para>
-+                  These items request delivery of kernel notifications that are
-+                  generated when a connection is created or terminated.
-+                  <type>struct kdbus_notify_id_change</type> is used to
-+                  store the actual match information. This item can be used to
-+                  monitor one particular connection ID, or, when the ID field
-+                  is set to <constant>KDBUS_MATCH_ID_ANY</constant>,
-+                  all of them.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
-+              <listitem><para>
-+                With this item, programs can <emphasis>probe</emphasis> the
-+                kernel for known item types. See
-+                <citerefentry>
-+                  <refentrytitle>kdbus.item</refentrytitle>
-+                  <manvolnum>7</manvolnum>
-+                </citerefentry>
-+                for more details.
-+              </para></listitem>
-+            </varlistentry>
-+          </variablelist>
-+
-+          <para>
-+            Unrecognized items are rejected, and the ioctl will fail with
-+            <varname>errno</varname> set to <constant>EINVAL</constant>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      Refer to
-+      <citerefentry>
-+        <refentrytitle>kdbus.message</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more information on message types.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Removing a match</title>
-+    <para>
-+      Matches can be removed with the
-+      <constant>KDBUS_CMD_MATCH_REMOVE</constant> ioctl, which takes
-+      <type>struct kdbus_cmd_match</type> as argument, but its fields
-+      usage slightly differs compared to that of
-+      <constant>KDBUS_CMD_MATCH_ADD</constant>.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_cmd_match {
-+  __u64 size;
-+  __u64 cookie;
-+  __u64 flags;
-+  __u64 return_flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>cookie</varname></term>
-+        <listitem><para>
-+          The cookie of the match, as it was passed when the match was added.
-+          All matches that have this cookie will be removed.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>
-+          No flags are supported for this use case.
-+          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
-+          valid flags. If set, the ioctl will fail with
-+          <errorcode>-1</errorcode>, <varname>errno</varname> is set to
-+          <constant>EPROTO</constant>, and the <varname>flags</varname> field
-+          is set to <constant>0</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            No items are supported for this use case, but
-+            <constant>KDBUS_ITEM_NEGOTIATE</constant> is allowed nevertheless.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Return value</title>
-+    <para>
-+      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
-+      on error, <errorcode>-1</errorcode> is returned, and
-+      <varname>errno</varname> is set to indicate the error.
-+      If the issued ioctl is illegal for the file descriptor used,
-+      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
-+    </para>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_MATCH_ADD</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Illegal flags or items.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EDOM</constant></term>
-+          <listitem><para>
-+            Illegal bloom filter size.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EMFILE</constant></term>
-+          <listitem><para>
-+            Too many matches for this connection.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_MATCH_REMOVE</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Illegal flags.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EBADSLT</constant></term>
-+          <listitem><para>
-+            A match entry with the given cookie could not be found.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.match</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.fs</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.message</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.pool</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+</refentry>
-diff --git a/Documentation/kdbus/kdbus.message.xml b/Documentation/kdbus/kdbus.message.xml
-new file mode 100644
-index 0000000..0115d9d
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.message.xml
-@@ -0,0 +1,1276 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus.message">
-+
-+  <refentryinfo>
-+    <title>kdbus.message</title>
-+    <productname>kdbus.message</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus.message</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus.message</refname>
-+    <refpurpose>kdbus message</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>Description</title>
-+
-+    <para>
-+      A kdbus message is used to exchange information between two connections
-+      on a bus, or to transport notifications from the kernel to one or many
-+      connections. This document describes the layout of messages, how payload
-+      is added to them and how they are sent and received.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Message layout</title>
-+
-+    <para>The layout of a message is shown below.</para>
-+
-+    <programlisting>
-+  +-------------------------------------------------------------------------+
-+  | Message                                                                 |
-+  | +---------------------------------------------------------------------+ |
-+  | | Header                                                              | |
-+  | | size:          overall message size, including the data records     | |
-+  | | destination:   connection ID of the receiver                        | |
-+  | | source:        connection ID of the sender (set by kernel)          | |
-+  | | payload_type:  "DBusDBus" textual identifier stored as uint64_t     | |
-+  | +---------------------------------------------------------------------+ |
-+  | +---------------------------------------------------------------------+ |
-+  | | Data Record                                                         | |
-+  | | size:  overall record size (without padding)                        | |
-+  | | type:  type of data                                                 | |
-+  | | data:  reference to data (address or file descriptor)               | |
-+  | +---------------------------------------------------------------------+ |
-+  | +---------------------------------------------------------------------+ |
-+  | | padding bytes to the next 8 byte alignment                          | |
-+  | +---------------------------------------------------------------------+ |
-+  | +---------------------------------------------------------------------+ |
-+  | | Data Record                                                         | |
-+  | | size:  overall record size (without padding)                        | |
-+  | | ...                                                                 | |
-+  | +---------------------------------------------------------------------+ |
-+  | +---------------------------------------------------------------------+ |
-+  | | padding bytes to the next 8 byte alignment                          | |
-+  | +---------------------------------------------------------------------+ |
-+  | +---------------------------------------------------------------------+ |
-+  | | Data Record                                                         | |
-+  | | size:  overall record size                                          | |
-+  | | ...                                                                 | |
-+  | +---------------------------------------------------------------------+ |
-+  |   ... further data records ...                                          |
-+  +-------------------------------------------------------------------------+
-+    </programlisting>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Message payload</title>
-+
-+    <para>
-+      When connecting to the bus, receivers request a memory pool of a given
-+      size, large enough to carry all backlog of data enqueued for the
-+      connection. The pool is internally backed by a shared memory file which
-+      can be <function>mmap()</function>ed by the receiver. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.pool</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more information.
-+    </para>
-+
-+    <para>
-+      Message payload must be described in items attached to a message when
-+      it is sent. A receiver can access the payload by looking at the items
-+      that are attached to a message in its pool. The following items are used.
-+    </para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><constant>KDBUS_ITEM_PAYLOAD_VEC</constant></term>
-+        <listitem>
-+          <para>
-+            This item references a piece of memory on the sender side which is
-+            directly copied into the receiver's pool. This way, two peers can
-+            exchange data by effectively doing a single-copy from one process
-+            to another; the kernel will not buffer the data anywhere else.
-+            This item is never found in a message received by a connection.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><constant>KDBUS_ITEM_PAYLOAD_OFF</constant></term>
-+        <listitem>
-+          <para>
-+            This item is attached to messages on the receiving side and points
-+            to a memory area inside the receiver's pool. The
-+            <varname>offset</varname> variable in the item denotes the memory
-+            location relative to the message itself.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant></term>
-+        <listitem>
-+          <para>
-+            Messages can reference <emphasis>memfd</emphasis> files which
-+            contain the data. memfd files are tmpfs-backed files that allow
-+            sealing of the content of the file, which prevents all writable
-+            access to the file content.
-+          </para>
-+          <para>
-+            Only memfds that have
-+            <constant>(F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_SEAL)
-+            </constant>
-+            set are accepted as payload data, which enforces reliable passing of
-+            data. The receiver can assume that neither the sender nor anyone
-+            else can alter the content after the message is sent. If those
-+            seals are not set on the memfd, the ioctl will fail with
-+            <errorcode>-1</errorcode>, and <varname>errno</varname> will be
-+            set to <constant>ETXTBUSY</constant>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><constant>KDBUS_ITEM_FDS</constant></term>
-+        <listitem>
-+          <para>
-+            Messages can transport regular file descriptors via
-+            <constant>KDBUS_ITEM_FDS</constant>. This item carries an array
-+            of <type>int</type> values in <varname>item.fd</varname>. The
-+            maximum number of file descriptors in the item is
-+            <constant>253</constant>, and only one item of this type is
-+            accepted per message. All passed values must be valid file
-+            descriptors; the open count of each file descriptors is increased
-+            by installing it to the receiver's task. This item can only be
-+            used for directed messages, not for broadcasts, and only to
-+            remote peers that have opted-in for receiving file descriptors
-+            at connection time (<constant>KDBUS_HELLO_ACCEPT_FD</constant>).
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      The sender must not make any assumptions on the type in which data is
-+      received by the remote peer. The kernel is free to re-pack multiple
-+      <constant>KDBUS_ITEM_PAYLOAD_VEC</constant> and
-+      <constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant> payloads. For instance, the
-+      kernel may decide to merge multiple <constant>VECs</constant> into a
-+      single <constant>VEC</constant>, inline <constant>MEMFD</constant>
-+      payloads into memory, or merge all passed <constant>VECs</constant> into a
-+      single <constant>MEMFD</constant>. However, the kernel preserves the order
-+      of passed data. This means that the order of all <constant>VEC</constant>
-+      and <constant>MEMFD</constant> items is not changed in respect to each
-+      other. In other words: All passed <constant>VEC</constant> and
-+      <constant>MEMFD</constant> data payloads are treated as a single stream
-+      of data that may be received by the remote peer in a different set of
-+      chunks than it was sent as.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Sending messages</title>
-+
-+    <para>
-+      Messages are passed to the kernel with the
-+      <constant>KDBUS_CMD_SEND</constant> ioctl. Depending on the destination
-+      address of the message, the kernel delivers the message to the specific
-+      destination connection, or to some subset of all connections on the same
-+      bus. Sending messages across buses is not possible. Messages are always
-+      queued in the memory pool of the destination connection (see above).
-+    </para>
-+
-+    <para>
-+      The <constant>KDBUS_CMD_SEND</constant> ioctl uses a
-+      <type>struct kdbus_cmd_send</type> to describe the message
-+      transfer.
-+    </para>
-+    <programlisting>
-+struct kdbus_cmd_send {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  __u64 msg_address;
-+  struct kdbus_msg_info reply;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>Flags for message delivery</para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_SEND_SYNC_REPLY</constant></term>
-+              <listitem>
-+                <para>
-+                  By default, all calls to kdbus are considered asynchronous,
-+                  non-blocking. However, as there are many use cases that need
-+                  to wait for a remote peer to answer a method call, there's a
-+                  way to send a message and wait for a reply in a synchronous
-+                  fashion. This is what the
-+                  <constant>KDBUS_SEND_SYNC_REPLY</constant> controls. The
-+                  <constant>KDBUS_CMD_SEND</constant> ioctl will block until the
-+                  reply has arrived, the timeout limit is reached, in case the
-+                  remote connection was shut down, or if interrupted by a signal
-+                  before any reply; see
-+                  <citerefentry>
-+                    <refentrytitle>signal</refentrytitle>
-+                    <manvolnum>7</manvolnum>
-+                  </citerefentry>.
-+
-+                  The offset of the reply message in the sender's pool is stored
-+                  in <varname>reply</varname> when the ioctl has returned without
-+                  error. Hence, there is no need for another
-+                  <constant>KDBUS_CMD_RECV</constant> ioctl or anything else to
-+                  receive the reply.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
-+              <listitem>
-+                <para>
-+                  Request a set of valid flags for this ioctl. When this bit is
-+                  set, no action is taken; the ioctl will fail with
-+                  <errorcode>-1</errorcode>, <varname>errno</varname>
-+                  is set to <constant>EPROTO</constant>.
-+                  Once the ioctl returned, the <varname>flags</varname>
-+                  field will have all bits set that the kernel recognizes as
-+                  valid for this command.
-+                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
-+                  cleared by the operation.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>msg_address</varname></term>
-+        <listitem><para>
-+          In this field, users have to provide a pointer to a message
-+          (<type>struct kdbus_msg</type>) to send. See below for a
-+          detailed description.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>reply</varname></term>
-+        <listitem><para>
-+          Only used for synchronous replies. See description of
-+          <type>struct kdbus_cmd_recv</type> for more details.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            The following items are currently recognized.
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_CANCEL_FD</constant></term>
-+              <listitem>
-+                <para>
-+                  When this optional item is passed in, and the call is
-+                  executed as SYNC call, the passed in file descriptor can be
-+                  used as alternative cancellation point. The kernel will call
-+                  <citerefentry>
-+                    <refentrytitle>poll</refentrytitle>
-+                    <manvolnum>2</manvolnum>
-+                  </citerefentry>
-+                  on this file descriptor, and once it reports any incoming
-+                  bytes, the blocking send operation will be canceled; the
-+                  blocking, synchronous ioctl call will return
-+                  <errorcode>-1</errorcode>, and <varname>errno</varname> will
-+                  be set to <errorname>ECANCELED</errorname>.
-+                  Any type of file descriptor on which
-+                  <citerefentry>
-+                    <refentrytitle>poll</refentrytitle>
-+                    <manvolnum>2</manvolnum>
-+                  </citerefentry>
-+                  can be called on can be used as payload to this item; for
-+                  example, an eventfd can be used for this purpose, see
-+                  <citerefentry>
-+                    <refentrytitle>eventfd</refentrytitle>
-+                    <manvolnum>2</manvolnum>
-+                  </citerefentry>.
-+                  For asynchronous message sending, this item is allowed but
-+                  ignored.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+          <para>
-+            Unrecognized items are rejected, and the ioctl will fail with
-+            <varname>errno</varname> set to <constant>EINVAL</constant>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      The message referenced by the <varname>msg_address</varname> above has
-+      the following layout.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_msg {
-+  __u64 size;
-+  __u64 flags;
-+  __s64 priority;
-+  __u64 dst_id;
-+  __u64 src_id;
-+  __u64 payload_type;
-+  __u64 cookie;
-+  __u64 timeout_ns;
-+  __u64 cookie_reply;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>Flags to describe message details.</para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_MSG_EXPECT_REPLY</constant></term>
-+              <listitem>
-+                <para>
-+                  Expect a reply to this message from the remote peer. With
-+                  this bit set, the timeout_ns field must be set to a non-zero
-+                  number of nanoseconds in which the receiving peer is expected
-+                  to reply. If such a reply is not received in time, the sender
-+                  will be notified with a timeout message (see below). The
-+                  value must be an absolute value, in nanoseconds and based on
-+                  <constant>CLOCK_MONOTONIC</constant>.
-+                </para><para>
-+                  For a message to be accepted as reply, it must be a direct
-+                  message to the original sender (not a broadcast and not a
-+                  signal message), and its
-+                  <varname>kdbus_msg.cookie_reply</varname> must match the
-+                  previous message's <varname>kdbus_msg.cookie</varname>.
-+                </para><para>
-+                  Expected replies also temporarily open the policy of the
-+                  sending connection, so the other peer is allowed to respond
-+                  within the given time window.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_MSG_NO_AUTO_START</constant></term>
-+              <listitem>
-+                <para>
-+                  By default, when a message is sent to an activator
-+                  connection, the activator is notified and will start an
-+                  implementer. This flag inhibits that behavior. With this bit
-+                  set, and the remote being an activator, the ioctl will fail
-+                  with <varname>errno</varname> set to
-+                  <constant>EADDRNOTAVAIL</constant>.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
-+              <listitem>
-+                <para>
-+                  Requests a set of valid flags for this ioctl. When this bit is
-+                  set, no action is taken; the ioctl will return
-+                  <errorcode>0</errorcode>, and the <varname>flags</varname>
-+                  field will have all bits set that are valid for this command.
-+                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
-+                  cleared by the operation.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>priority</varname></term>
-+        <listitem><para>
-+          The priority of this message. Receiving messages (see below) may
-+          optionally be constrained to messages of a minimal priority. This
-+          allows for use cases where timing critical data is interleaved with
-+          control data on the same connection. If unused, the priority field
-+          should be set to <constant>0</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>dst_id</varname></term>
-+        <listitem><para>
-+          The numeric ID of the destination connection, or
-+          <constant>KDBUS_DST_ID_BROADCAST</constant>
-+          (~0ULL) to address every peer on the bus, or
-+          <constant>KDBUS_DST_ID_NAME</constant> (0) to look
-+          it up dynamically from the bus' name registry.
-+          In the latter case, an item of type
-+          <constant>KDBUS_ITEM_DST_NAME</constant> is mandatory.
-+          Also see
-+          <citerefentry>
-+            <refentrytitle>kdbus.name</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>
-+          .
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>src_id</varname></term>
-+        <listitem><para>
-+          Upon return of the ioctl, this member will contain the sending
-+          connection's numerical ID. Should be 0 at send time.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>payload_type</varname></term>
-+        <listitem><para>
-+          Type of the payload in the actual data records. Currently, only
-+          <constant>KDBUS_PAYLOAD_DBUS</constant> is accepted as input value
-+          of this field. When receiving messages that are generated by the
-+          kernel (notifications), this field will contain
-+          <constant>KDBUS_PAYLOAD_KERNEL</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>cookie</varname></term>
-+        <listitem><para>
-+          Cookie of this message, for later recognition. Also, when replying
-+          to a message (see above), the <varname>cookie_reply</varname>
-+          field must match this value.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>timeout_ns</varname></term>
-+        <listitem><para>
-+          If the message sent requires a reply from the remote peer (see above),
-+          this field contains the timeout in absolute nanoseconds based on
-+          <constant>CLOCK_MONOTONIC</constant>. Also see
-+          <citerefentry>
-+            <refentrytitle>clock_gettime</refentrytitle>
-+            <manvolnum>2</manvolnum>
-+          </citerefentry>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>cookie_reply</varname></term>
-+        <listitem><para>
-+          If the message sent is a reply to another message, this field must
-+          match the cookie of the formerly received message.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            A dynamically sized list of items to contain additional information.
-+            The following items are expected/valid:
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_PAYLOAD_VEC</constant></term>
-+              <term><constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant></term>
-+              <term><constant>KDBUS_ITEM_FDS</constant></term>
-+              <listitem>
-+                <para>
-+                  Actual data records containing the payload. See section
-+                  "Message payload".
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_BLOOM_FILTER</constant></term>
-+              <listitem>
-+                <para>
-+                  Bloom filter for matches (see below).
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_ITEM_DST_NAME</constant></term>
-+              <listitem>
-+                <para>
-+                  Well-known name to send this message to. Required if
-+                  <varname>dst_id</varname> is set to
-+                  <constant>KDBUS_DST_ID_NAME</constant>.
-+                  If a connection holding the given name can't be found,
-+                  the ioctl will fail with <varname>errno</varname> set to
-+                  <constant>ESRCH</constant> is returned.
-+                </para>
-+                <para>
-+                  For messages to a unique name (ID), this item is optional. If
-+                  present, the kernel will make sure the name owner matches the
-+                  given unique name. This allows programs to tie the message
-+                  sending to the condition that a name is currently owned by a
-+                  certain unique name.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      The message will be augmented by the requested metadata items when
-+      queued into the receiver's pool. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.connection</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      and
-+      <citerefentry>
-+        <refentrytitle>kdbus.item</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more information on metadata.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Receiving messages</title>
-+
-+    <para>
-+      Messages are received by the client with the
-+      <constant>KDBUS_CMD_RECV</constant> ioctl. The endpoint file of the bus
-+      supports <function>poll()/epoll()/select()</function>; when new messages
-+      are available on the connection's file descriptor,
-+      <constant>POLLIN</constant> is reported. For compatibility reasons,
-+      <constant>POLLOUT</constant> is always reported as well. Note, however,
-+      that the latter does not guarantee that a message can in fact be sent, as
-+      this depends on how many pending messages the receiver has in its pool.
-+    </para>
-+
-+    <para>
-+      With the <constant>KDBUS_CMD_RECV</constant> ioctl, a
-+      <type>struct kdbus_cmd_recv</type> is used.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_cmd_recv {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  __s64 priority;
-+  __u64 dropped_msgs;
-+  struct kdbus_msg_info msg;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>Flags to control the receive command.</para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_RECV_PEEK</constant></term>
-+              <listitem>
-+                <para>
-+                  Just return the location of the next message. Do not install
-+                  file descriptors or anything else. This is usually used to
-+                  determine the sender of the next queued message.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_RECV_DROP</constant></term>
-+              <listitem>
-+                <para>
-+                  Drop the next message without doing anything else with it,
-+                  and free the pool slice. This a short-cut for
-+                  <constant>KDBUS_RECV_PEEK</constant> and
-+                  <constant>KDBUS_CMD_FREE</constant>.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_RECV_USE_PRIORITY</constant></term>
-+              <listitem>
-+                <para>
-+                  Dequeue the messages ordered by their priority, and filtering
-+                  them with the priority field (see below).
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
-+              <listitem>
-+                <para>
-+                  Request a set of valid flags for this ioctl. When this bit is
-+                  set, no action is taken; the ioctl will fail with
-+                  <errorcode>-1</errorcode>, <varname>errno</varname>
-+                  is set to <constant>EPROTO</constant>.
-+                  Once the ioctl returned, the <varname>flags</varname>
-+                  field will have all bits set that the kernel recognizes as
-+                  valid for this command.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. If the <varname>dropped_msgs</varname>
-+          field is non-zero, <constant>KDBUS_RECV_RETURN_DROPPED_MSGS</constant>
-+          is set. If a file descriptor could not be installed, the
-+          <constant>KDBUS_RECV_RETURN_INCOMPLETE_FDS</constant> flag is set.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>priority</varname></term>
-+        <listitem><para>
-+          With <constant>KDBUS_RECV_USE_PRIORITY</constant> set in
-+          <varname>flags</varname>, messages will be dequeued ordered by their
-+          priority, starting with the highest value. Also, messages will be
-+          filtered by the value given in this field, so the returned message
-+          will at least have the requested priority. If no such message is
-+          waiting in the queue, the ioctl will fail, and
-+          <varname>errno</varname> will be set to <constant>EAGAIN</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>dropped_msgs</varname></term>
-+        <listitem><para>
-+          Whenever a message with <constant>KDBUS_MSG_SIGNAL</constant> is sent
-+          but cannot be queued on a peer (e.g., as it contains FDs but the peer
-+          does not support FDs, or there is no space left in the peer's pool)
-+          the 'dropped_msgs' counter of the peer is incremented. On the next
-+          RECV ioctl, the 'dropped_msgs' field is copied into the ioctl struct
-+          and cleared on the peer. If it was non-zero, the
-+          <constant>KDBUS_RECV_RETURN_DROPPED_MSGS</constant> flag will be set
-+          in <varname>return_flags</varname>. Note that this will only happen
-+          if the ioctl succeeded or failed with <constant>EAGAIN</constant>. In
-+          other error cases, the 'dropped_msgs' field of the peer is left
-+          untouched.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>msg</varname></term>
-+        <listitem><para>
-+          Embedded struct containing information on the received message when
-+          this command succeeded (see below).
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem><para>
-+          Items to specify further details for the receive command.
-+          Currently unused, and all items will be rejected with
-+          <varname>errno</varname> set to <constant>EINVAL</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      Both <type>struct kdbus_cmd_recv</type> and
-+      <type>struct kdbus_cmd_send</type> embed
-+      <type>struct kdbus_msg_info</type>.
-+      For the <constant>KDBUS_CMD_SEND</constant> ioctl, it is used to catch
-+      synchronous replies, if one was requested, and is unused otherwise.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_msg_info {
-+  __u64 offset;
-+  __u64 msg_size;
-+  __u64 return_flags;
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>offset</varname></term>
-+        <listitem><para>
-+          Upon return of the ioctl, this field contains the offset in the
-+          receiver's memory pool. The memory must be freed with
-+          <constant>KDBUS_CMD_FREE</constant>. See
-+          <citerefentry>
-+            <refentrytitle>kdbus.pool</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>
-+          for further details.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>msg_size</varname></term>
-+        <listitem><para>
-+          Upon successful return of the ioctl, this field contains the size of
-+          the allocated slice at offset <varname>offset</varname>.
-+          It is the combination of the size of the stored
-+          <type>struct kdbus_msg</type> object plus all appended VECs.
-+          You can use it in combination with <varname>offset</varname> to map
-+          a single message, instead of mapping the entire pool. See
-+          <citerefentry>
-+            <refentrytitle>kdbus.pool</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>
-+          for further details.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem>
-+          <para>
-+            Kernel-provided return flags. Currently, the following flags are
-+            defined.
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_RECV_RETURN_INCOMPLETE_FDS</constant></term>
-+              <listitem>
-+                <para>
-+                  The message contained memfds or file descriptors, and the
-+                  kernel failed to install one or more of them at receive time.
-+                  Most probably that happened because the maximum number of
-+                  file descriptors for the receiver's task were exceeded.
-+                  In such cases, the message is still delivered, so this is not
-+                  a fatal condition. File descriptors numbers inside the
-+                  <constant>KDBUS_ITEM_FDS</constant> item or memfd files
-+                  referenced by <constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant>
-+                  items which could not be installed will be set to
-+                  <constant>-1</constant>.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      Unless <constant>KDBUS_RECV_DROP</constant> was passed, the
-+      <varname>offset</varname> field contains the location of the new message
-+      inside the receiver's pool after the <constant>KDBUS_CMD_RECV</constant>
-+      ioctl was employed. The message is stored as <type>struct kdbus_msg</type>
-+      at this offset, and can be interpreted with the semantics described above.
-+    </para>
-+    <para>
-+      Also, if the connection allowed for file descriptor to be passed
-+      (<constant>KDBUS_HELLO_ACCEPT_FD</constant>), and if the message contained
-+      any, they will be installed into the receiving process when the
-+      <constant>KDBUS_CMD_RECV</constant> ioctl is called.
-+      <emphasis>memfds</emphasis> may always be part of the message payload.
-+      The receiving task is obliged to close all file descriptors appropriately
-+      once no longer needed. If <constant>KDBUS_RECV_PEEK</constant> is set, no
-+      file descriptors are installed. This allows for peeking at a message,
-+      looking at its metadata only and dropping it via
-+      <constant>KDBUS_RECV_DROP</constant>, without installing any of the file
-+      descriptors into the receiving process.
-+    </para>
-+    <para>
-+      The caller is obliged to call the <constant>KDBUS_CMD_FREE</constant>
-+      ioctl with the returned offset when the memory is no longer needed.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Notifications</title>
-+    <para>
-+      A kernel notification is a regular kdbus message with the following
-+      details.
-+    </para>
-+
-+    <itemizedlist>
-+      <listitem><para>
-+          kdbus_msg.src_id == <constant>KDBUS_SRC_ID_KERNEL</constant>
-+      </para></listitem>
-+      <listitem><para>
-+        kdbus_msg.dst_id == <constant>KDBUS_DST_ID_BROADCAST</constant>
-+      </para></listitem>
-+      <listitem><para>
-+        kdbus_msg.payload_type == <constant>KDBUS_PAYLOAD_KERNEL</constant>
-+      </para></listitem>
-+      <listitem><para>
-+        Has exactly one of the items attached that are described below.
-+      </para></listitem>
-+      <listitem><para>
-+        Always has a timestamp item (<constant>KDBUS_ITEM_TIMESTAMP</constant>)
-+        attached.
-+      </para></listitem>
-+    </itemizedlist>
-+
-+    <para>
-+      The kernel will notify its users of the following events.
-+    </para>
-+
-+    <itemizedlist>
-+      <listitem><para>
-+        When connection <emphasis>A</emphasis> is terminated while connection
-+        <emphasis>B</emphasis> is waiting for a reply from it, connection
-+        <emphasis>B</emphasis> is notified with a message with an item of
-+        type <constant>KDBUS_ITEM_REPLY_DEAD</constant>.
-+      </para></listitem>
-+
-+      <listitem><para>
-+        When connection <emphasis>A</emphasis> does not receive a reply from
-+        connection <emphasis>B</emphasis> within the specified timeout window,
-+        connection <emphasis>A</emphasis> will receive a message with an
-+        item of type <constant>KDBUS_ITEM_REPLY_TIMEOUT</constant>.
-+      </para></listitem>
-+
-+      <listitem><para>
-+        When an ordinary connection (not a monitor) is created on or removed
-+        from a bus, messages with an item of type
-+        <constant>KDBUS_ITEM_ID_ADD</constant> or
-+        <constant>KDBUS_ITEM_ID_REMOVE</constant>, respectively, are delivered
-+        to all bus members that match these messages through their match
-+        database. Eavesdroppers (monitor connections) do not cause such
-+        notifications to be sent. They are invisible on the bus.
-+      </para></listitem>
-+
-+      <listitem><para>
-+        When a connection gains or loses ownership of a name, messages with an
-+        item of type <constant>KDBUS_ITEM_NAME_ADD</constant>,
-+        <constant>KDBUS_ITEM_NAME_REMOVE</constant> or
-+        <constant>KDBUS_ITEM_NAME_CHANGE</constant> are delivered to all bus
-+        members that match these messages through their match database.
-+      </para></listitem>
-+    </itemizedlist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Return value</title>
-+    <para>
-+      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
-+      on error, <errorcode>-1</errorcode> is returned, and
-+      <varname>errno</varname> is set to indicate the error.
-+      If the issued ioctl is illegal for the file descriptor used,
-+      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
-+    </para>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_SEND</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EOPNOTSUPP</constant></term>
-+          <listitem><para>
-+            The connection is not an ordinary connection, or the passed
-+            file descriptors in <constant>KDBUS_ITEM_FDS</constant> item are
-+            either kdbus handles or unix domain sockets. Both are currently
-+            unsupported.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            The submitted payload type is
-+            <constant>KDBUS_PAYLOAD_KERNEL</constant>,
-+            <constant>KDBUS_MSG_EXPECT_REPLY</constant> was set without timeout
-+            or cookie values, <constant>KDBUS_SEND_SYNC_REPLY</constant> was
-+            set without <constant>KDBUS_MSG_EXPECT_REPLY</constant>, an invalid
-+            item was supplied, <constant>src_id</constant> was non-zero and was
-+            different from the current connection's ID, a supplied memfd had a
-+            size of 0, or a string was not properly null-terminated.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ENOTUNIQ</constant></term>
-+          <listitem><para>
-+            The supplied destination is
-+            <constant>KDBUS_DST_ID_BROADCAST</constant> and either
-+            file descriptors were passed, or
-+            <constant>KDBUS_MSG_EXPECT_REPLY</constant> was set,
-+            or a timeout was given.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>E2BIG</constant></term>
-+          <listitem><para>
-+            Too many items.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EMSGSIZE</constant></term>
-+          <listitem><para>
-+            The size of the message header and items or the payload vector
-+            is excessive.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EEXIST</constant></term>
-+          <listitem><para>
-+            Multiple <constant>KDBUS_ITEM_FDS</constant>,
-+            <constant>KDBUS_ITEM_BLOOM_FILTER</constant> or
-+            <constant>KDBUS_ITEM_DST_NAME</constant> items were supplied.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EBADF</constant></term>
-+          <listitem><para>
-+            The supplied <constant>KDBUS_ITEM_FDS</constant> or
-+            <constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant> items
-+            contained an illegal file descriptor.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EMEDIUMTYPE</constant></term>
-+          <listitem><para>
-+            The supplied memfd is not a sealed kdbus memfd.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EMFILE</constant></term>
-+          <listitem><para>
-+            Too many file descriptors inside a
-+            <constant>KDBUS_ITEM_FDS</constant>.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EBADMSG</constant></term>
-+          <listitem><para>
-+            An item had illegal size, both a <constant>dst_id</constant> and a
-+            <constant>KDBUS_ITEM_DST_NAME</constant> was given, or both a name
-+            and a bloom filter was given.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ETXTBSY</constant></term>
-+          <listitem><para>
-+            The supplied kdbus memfd file cannot be sealed or the seal
-+            was removed, because it is shared with other processes or
-+            still mapped with
-+            <citerefentry>
-+              <refentrytitle>mmap</refentrytitle>
-+              <manvolnum>2</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ECOMM</constant></term>
-+          <listitem><para>
-+            A peer does not accept the file descriptors addressed to it.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EFAULT</constant></term>
-+          <listitem><para>
-+            The supplied bloom filter size was not 64-bit aligned, or supplied
-+            memory could not be accessed by the kernel.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EDOM</constant></term>
-+          <listitem><para>
-+            The supplied bloom filter size did not match the bloom filter
-+            size of the bus.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EDESTADDRREQ</constant></term>
-+          <listitem><para>
-+            <constant>dst_id</constant> was set to
-+            <constant>KDBUS_DST_ID_NAME</constant>, but no
-+            <constant>KDBUS_ITEM_DST_NAME</constant> was attached.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ESRCH</constant></term>
-+          <listitem><para>
-+            The name to look up was not found in the name registry.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EADDRNOTAVAIL</constant></term>
-+          <listitem><para>
-+            <constant>KDBUS_MSG_NO_AUTO_START</constant> was given but the
-+            destination connection is an activator.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ENXIO</constant></term>
-+          <listitem><para>
-+            The passed numeric destination connection ID couldn't be found,
-+            or is not connected.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ECONNRESET</constant></term>
-+          <listitem><para>
-+            The destination connection is no longer active.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ETIMEDOUT</constant></term>
-+          <listitem><para>
-+            Timeout while synchronously waiting for a reply.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINTR</constant></term>
-+          <listitem><para>
-+            Interrupted system call while synchronously waiting for a reply.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EPIPE</constant></term>
-+          <listitem><para>
-+            When sending a message, a synchronous reply from the receiving
-+            connection was expected but the connection died before answering.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ENOBUFS</constant></term>
-+          <listitem><para>
-+            Too many pending messages on the receiver side.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EREMCHG</constant></term>
-+          <listitem><para>
-+            Both a well-known name and a unique name (ID) was given, but
-+            the name is not currently owned by that connection.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EXFULL</constant></term>
-+          <listitem><para>
-+            The memory pool of the receiver is full.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EREMOTEIO</constant></term>
-+          <listitem><para>
-+            While synchronously waiting for a reply, the remote peer
-+            failed with an I/O error.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_RECV</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EOPNOTSUPP</constant></term>
-+          <listitem><para>
-+            The connection is not an ordinary connection, or the passed
-+            file descriptors are either kdbus handles or unix domain
-+            sockets. Both are currently unsupported.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Invalid flags or offset.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EAGAIN</constant></term>
-+          <listitem><para>
-+            No message found in the queue.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.connection</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.endpoint</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.fs</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.pool</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>clock_gettime</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>ioctl</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>poll</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>select</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>epoll</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>eventfd</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>memfd_create</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+</refentry>
-diff --git a/Documentation/kdbus/kdbus.name.xml b/Documentation/kdbus/kdbus.name.xml
-new file mode 100644
-index 0000000..3f5f6a6
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.name.xml
-@@ -0,0 +1,711 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus.name">
-+
-+  <refentryinfo>
-+    <title>kdbus.name</title>
-+    <productname>kdbus.name</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus.name</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus.name</refname>
-+    <refpurpose>kdbus.name</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>Description</title>
-+    <para>
-+      Each
-+      <citerefentry>
-+        <refentrytitle>kdbus.bus</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      instantiates a name registry to resolve well-known names into unique
-+      connection IDs for message delivery. The registry will be queried when a
-+      message is sent with <varname>kdbus_msg.dst_id</varname> set to
-+      <constant>KDBUS_DST_ID_NAME</constant>, or when a registry dump is
-+      requested with <constant>KDBUS_CMD_NAME_LIST</constant>.
-+    </para>
-+
-+    <para>
-+      All of the below is subject to policy rules for <emphasis>SEE</emphasis>
-+      and <emphasis>OWN</emphasis> permissions. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.policy</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more information.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Name validity</title>
-+    <para>
-+      A name has to comply with the following rules in order to be considered
-+      valid.
-+    </para>
-+
-+    <itemizedlist>
-+      <listitem>
-+        <para>
-+          The name has two or more elements separated by a
-+          '<literal>.</literal>' (period) character.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          All elements must contain at least one character.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          Each element must only contain the ASCII characters
-+          <literal>[A-Z][a-z][0-9]_</literal> and must not begin with a
-+          digit.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          The name must contain at least one '<literal>.</literal>' (period)
-+          character (and thus at least two elements).
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          The name must not begin with a '<literal>.</literal>' (period)
-+          character.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          The name must not exceed <constant>255</constant> characters in
-+          length.
-+        </para>
-+      </listitem>
-+    </itemizedlist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Acquiring a name</title>
-+    <para>
-+      To acquire a name, a client uses the
-+      <constant>KDBUS_CMD_NAME_ACQUIRE</constant> ioctl with
-+      <type>struct kdbus_cmd</type> as argument.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_cmd {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>Flags to control details in the name acquisition.</para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_NAME_REPLACE_EXISTING</constant></term>
-+              <listitem>
-+                <para>
-+                  Acquiring a name that is already present usually fails,
-+                  unless this flag is set in the call, and
-+                  <constant>KDBUS_NAME_ALLOW_REPLACEMENT</constant> (see below)
-+                  was set when the current owner of the name acquired it, or
-+                  if the current owner is an activator connection (see
-+                  <citerefentry>
-+                    <refentrytitle>kdbus.connection</refentrytitle>
-+                    <manvolnum>7</manvolnum>
-+                  </citerefentry>).
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_NAME_ALLOW_REPLACEMENT</constant></term>
-+              <listitem>
-+                <para>
-+                  Allow other connections to take over this name. When this
-+                  happens, the former owner of the connection will be notified
-+                  of the name loss.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_NAME_QUEUE</constant></term>
-+              <listitem>
-+                <para>
-+                  A name that is already acquired by a connection can not be
-+                  acquired again (unless the
-+                  <constant>KDBUS_NAME_ALLOW_REPLACEMENT</constant> flag was
-+                  set during acquisition; see above).
-+                  However, a connection can put itself in a queue of
-+                  connections waiting for the name to be released. Once that
-+                  happens, the first connection in that queue becomes the new
-+                  owner and is notified accordingly.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
-+              <listitem>
-+                <para>
-+                  Request a set of valid flags for this ioctl. When this bit is
-+                  set, no action is taken; the ioctl will fail with
-+                  <errorcode>-1</errorcode>, and <varname>errno</varname>
-+                  is set to <constant>EPROTO</constant>.
-+                  Once the ioctl returned, the <varname>flags</varname>
-+                  field will have all bits set that the kernel recognizes as
-+                  valid for this command.
-+                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
-+                  cleared by the operation.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem>
-+          <para>
-+            Flags returned by the kernel. Currently, the following may be
-+            returned by the kernel.
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_NAME_IN_QUEUE</constant></term>
-+              <listitem>
-+                <para>
-+                  The name was not acquired yet, but the connection was
-+                  placed in the queue of peers waiting for the name.
-+                  This can only happen if <constant>KDBUS_NAME_QUEUE</constant>
-+                  was set in the <varname>flags</varname> member (see above).
-+                  The connection will receive a name owner change notification
-+                  once the current owner has given up the name and its
-+                  ownership was transferred.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            Items to submit the name. Currently, one item of type
-+            <constant>KDBUS_ITEM_NAME</constant> is expected and allowed, and
-+            the contained string must be a valid bus name.
-+            <constant>KDBUS_ITEM_NEGOTIATE</constant> may be used to probe for
-+            valid item types. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.item</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for a detailed description of how this item is used.
-+          </para>
-+          <para>
-+            Unrecognized items are rejected, and the ioctl will fail with
-+            <varname>errno</varname> set to <errorname>>EINVAL</errorname>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Releasing a name</title>
-+    <para>
-+      A connection may release a name explicitly with the
-+      <constant>KDBUS_CMD_NAME_RELEASE</constant> ioctl. If the connection was
-+      an implementer of an activatable name, its pending messages are moved
-+      back to the activator. If there are any connections queued up as waiters
-+      for the name, the first one in the queue (the oldest entry) will become
-+      the new owner. The same happens implicitly for all names once a
-+      connection terminates. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.connection</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more information on connections.
-+    </para>
-+    <para>
-+      The <constant>KDBUS_CMD_NAME_RELEASE</constant> ioctl uses the same data
-+      structure as the acquisition call
-+      (<constant>KDBUS_CMD_NAME_ACQUIRE</constant>),
-+      but with slightly different field usage.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_cmd {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>
-+          Flags to the command. Currently unused.
-+          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
-+          valid flags. If set, the ioctl will return <errorcode>0</errorcode>,
-+          and the <varname>flags</varname> field is set to
-+          <constant>0</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            Items to submit the name. Currently, one item of type
-+            <constant>KDBUS_ITEM_NAME</constant> is expected and allowed, and
-+            the contained string must be a valid bus name.
-+            <constant>KDBUS_ITEM_NEGOTIATE</constant> may be used to probe for
-+            valid item types. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.item</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+            for a detailed description of how this item is used.
-+          </para>
-+          <para>
-+            Unrecognized items are rejected, and the ioctl will fail with
-+            <varname>errno</varname> set to <constant>EINVAL</constant>.
-+          </para>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Dumping the name registry</title>
-+    <para>
-+      A connection may request a complete or filtered dump of currently active
-+      bus names with the <constant>KDBUS_CMD_LIST</constant> ioctl, which
-+      takes a <type>struct kdbus_cmd_list</type> as argument.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_cmd_list {
-+  __u64 flags;
-+  __u64 return_flags;
-+  __u64 offset;
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem>
-+          <para>
-+            Any combination of flags to specify which names should be dumped.
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_LIST_UNIQUE</constant></term>
-+              <listitem>
-+                <para>
-+                  List the unique (numeric) IDs of the connection, whether it
-+                  owns a name or not.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_LIST_NAMES</constant></term>
-+              <listitem>
-+                <para>
-+                  List well-known names stored in the database which are
-+                  actively owned by a real connection (not an activator).
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_LIST_ACTIVATORS</constant></term>
-+              <listitem>
-+                <para>
-+                  List names that are owned by an activator.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_LIST_QUEUED</constant></term>
-+              <listitem>
-+                <para>
-+                  List connections that are not yet owning a name but are
-+                  waiting for it to become available.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
-+              <listitem>
-+                <para>
-+                  Request a set of valid flags for this ioctl. When this bit is
-+                  set, no action is taken; the ioctl will fail with
-+                  <errorcode>-1</errorcode>, and <varname>errno</varname>
-+                  is set to <constant>EPROTO</constant>.
-+                  Once the ioctl returned, the <varname>flags</varname>
-+                  field will have all bits set that the kernel recognizes as
-+                  valid for this command.
-+                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
-+                  cleared by the operation.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>offset</varname></term>
-+        <listitem><para>
-+          When the ioctl returns successfully, the offset to the name registry
-+          dump inside the connection's pool will be stored in this field.
-+        </para></listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      The returned list of names is stored in a <type>struct kdbus_list</type>
-+      that in turn contains an array of type <type>struct kdbus_info</type>,
-+      The array-size in bytes is given as <varname>list_size</varname>.
-+      The fields inside <type>struct kdbus_info</type> is described next.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_info {
-+  __u64 size;
-+  __u64 id;
-+  __u64 flags;
-+  struct kdbus_item items[0];
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>id</varname></term>
-+        <listitem><para>
-+          The owning connection's unique ID.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>
-+          The flags of the owning connection.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem>
-+          <para>
-+            Items containing the actual name. Currently, one item of type
-+            <constant>KDBUS_ITEM_OWNED_NAME</constant> will be attached,
-+            including the name's flags. In that item, the flags field of the
-+            name may carry the following bits:
-+          </para>
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_NAME_ALLOW_REPLACEMENT</constant></term>
-+              <listitem>
-+                <para>
-+                  Other connections are allowed to take over this name from the
-+                  connection that owns it.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_NAME_IN_QUEUE</constant></term>
-+              <listitem>
-+                <para>
-+                  When retrieving a list of currently acquired names in the
-+                  registry, this flag indicates whether the connection
-+                  actually owns the name or is currently waiting for it to
-+                  become available.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_NAME_ACTIVATOR</constant></term>
-+              <listitem>
-+                <para>
-+                  An activator connection owns a name as a placeholder for an
-+                  implementer, which is started on demand by programs as soon
-+                  as the first message arrives. There's some more information
-+                  on this topic in
-+                  <citerefentry>
-+                    <refentrytitle>kdbus.connection</refentrytitle>
-+                    <manvolnum>7</manvolnum>
-+                  </citerefentry>
-+                  .
-+                </para>
-+                <para>
-+                  In contrast to
-+                  <constant>KDBUS_NAME_REPLACE_EXISTING</constant>,
-+                  when a name is taken over from an activator connection, all
-+                  the messages that have been queued in the activator
-+                  connection will be moved over to the new owner. The activator
-+                  connection will still be tracked for the name and will take
-+                  control again if the implementer connection terminates.
-+                </para>
-+                <para>
-+                  This flag can not be used when acquiring a name, but is
-+                  implicitly set through <constant>KDBUS_CMD_HELLO</constant>
-+                  with <constant>KDBUS_HELLO_ACTIVATOR</constant> set in
-+                  <varname>kdbus_cmd_hello.conn_flags</varname>.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
-+              <listitem>
-+                <para>
-+                  Requests a set of valid flags for this ioctl. When this bit is
-+                  set, no action is taken; the ioctl will return
-+                  <errorcode>0</errorcode>, and the <varname>flags</varname>
-+                  field will have all bits set that are valid for this command.
-+                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
-+                  cleared by the operation.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      The returned buffer must be freed with the
-+      <constant>KDBUS_CMD_FREE</constant> ioctl when the user is finished with
-+      it. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.pool</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more information.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Return value</title>
-+    <para>
-+      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
-+      on error, <errorcode>-1</errorcode> is returned, and
-+      <varname>errno</varname> is set to indicate the error.
-+      If the issued ioctl is illegal for the file descriptor used,
-+      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
-+    </para>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_NAME_ACQUIRE</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Illegal command flags, illegal name provided, or an activator
-+            tried to acquire a second name.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EPERM</constant></term>
-+          <listitem><para>
-+            Policy prohibited name ownership.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EALREADY</constant></term>
-+          <listitem><para>
-+            Connection already owns that name.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EEXIST</constant></term>
-+          <listitem><para>
-+            The name already exists and can not be taken over.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>E2BIG</constant></term>
-+          <listitem><para>
-+            The maximum number of well-known names per connection is exhausted.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_NAME_RELEASE</constant>
-+        may fail with the following errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Invalid command flags, or invalid name provided.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ESRCH</constant></term>
-+          <listitem><para>
-+            Name is not found in the registry.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EADDRINUSE</constant></term>
-+          <listitem><para>
-+            Name is owned by a different connection and can't be released.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_LIST</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Invalid command flags
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>ENOBUFS</constant></term>
-+          <listitem><para>
-+            No available memory in the connection's pool.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.connection</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.policy</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.pool</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+</refentry>
-diff --git a/Documentation/kdbus/kdbus.policy.xml b/Documentation/kdbus/kdbus.policy.xml
-new file mode 100644
-index 0000000..6732416
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.policy.xml
-@@ -0,0 +1,406 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus.policy">
-+
-+  <refentryinfo>
-+    <title>kdbus.policy</title>
-+    <productname>kdbus.policy</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus.policy</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus.policy</refname>
-+    <refpurpose>kdbus policy</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>Description</title>
-+
-+    <para>
-+      A kdbus policy restricts the possibilities of connections to own, see and
-+      talk to well-known names. A policy can be associated with a bus (through a
-+      policy holder connection) or a custom endpoint. kdbus stores its policy
-+      information in a database that can be accessed through the following
-+      ioctl commands:
-+    </para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><constant>KDBUS_CMD_HELLO</constant></term>
-+        <listitem><para>
-+          When creating, or updating, a policy holder connection. See
-+          <citerefentry>
-+            <refentrytitle>kdbus.connection</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><constant>KDBUS_CMD_ENDPOINT_MAKE</constant></term>
-+        <term><constant>KDBUS_CMD_ENDPOINT_UPDATE</constant></term>
-+        <listitem><para>
-+          When creating, or updating, a bus custom endpoint. See
-+          <citerefentry>
-+            <refentrytitle>kdbus.endpoint</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>.
-+        </para></listitem>
-+      </varlistentry>
-+    </variablelist>
-+
-+    <para>
-+      In all cases, the name and policy access information is stored in items
-+      of type <constant>KDBUS_ITEM_NAME</constant> and
-+      <constant>KDBUS_ITEM_POLICY_ACCESS</constant>. For this transport, the
-+      following rules apply.
-+    </para>
-+
-+    <itemizedlist>
-+      <listitem>
-+        <para>
-+          An item of type <constant>KDBUS_ITEM_NAME</constant> must be followed
-+          by at least one <constant>KDBUS_ITEM_POLICY_ACCESS</constant> item.
-+        </para>
-+      </listitem>
-+
-+      <listitem>
-+        <para>
-+          An item of type <constant>KDBUS_ITEM_NAME</constant> can be followed
-+          by an arbitrary number of
-+          <constant>KDBUS_ITEM_POLICY_ACCESS</constant> items.
-+        </para>
-+      </listitem>
-+
-+      <listitem>
-+        <para>
-+          An arbitrary number of groups of names and access levels can be given.
-+        </para>
-+      </listitem>
-+    </itemizedlist>
-+
-+    <para>
-+      Names passed in items of type <constant>KDBUS_ITEM_NAME</constant> must
-+      comply to the rules of valid kdbus.name. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.name</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more information.
-+
-+      The payload of an item of type
-+      <constant>KDBUS_ITEM_POLICY_ACCESS</constant> is defined by the following
-+      struct. For more information on the layout of items, please refer to
-+      <citerefentry>
-+        <refentrytitle>kdbus.item</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>.
-+    </para>
-+
-+    <programlisting>
-+struct kdbus_policy_access {
-+  __u64 type;
-+  __u64 access;
-+  __u64 id;
-+};
-+    </programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>type</varname></term>
-+        <listitem>
-+          <para>
-+            One of the following.
-+          </para>
-+
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_POLICY_ACCESS_USER</constant></term>
-+              <listitem><para>
-+                Grant access to a user with the UID stored in the
-+                <varname>id</varname> field.
-+              </para></listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_POLICY_ACCESS_GROUP</constant></term>
-+              <listitem><para>
-+                Grant access to a user with the GID stored in the
-+                <varname>id</varname> field.
-+              </para></listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_POLICY_ACCESS_WORLD</constant></term>
-+              <listitem><para>
-+                Grant access to everyone. The <varname>id</varname> field
-+                is ignored.
-+              </para></listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>access</varname></term>
-+        <listitem>
-+          <para>
-+            The access to grant. One of the following.
-+          </para>
-+
-+          <variablelist>
-+            <varlistentry>
-+              <term><constant>KDBUS_POLICY_SEE</constant></term>
-+              <listitem><para>
-+                Allow the name to be seen.
-+              </para></listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_POLICY_TALK</constant></term>
-+              <listitem><para>
-+                Allow the name to be talked to.
-+              </para></listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
-+              <term><constant>KDBUS_POLICY_OWN</constant></term>
-+              <listitem><para>
-+                Allow the name to be owned.
-+              </para></listitem>
-+            </varlistentry>
-+          </variablelist>
-+        </listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>id</varname></term>
-+        <listitem><para>
-+           For <constant>KDBUS_POLICY_ACCESS_USER</constant>, stores the UID.
-+           For <constant>KDBUS_POLICY_ACCESS_GROUP</constant>, stores the GID.
-+        </para></listitem>
-+      </varlistentry>
-+
-+    </variablelist>
-+
-+    <para>
-+      All endpoints of buses have an empty policy database by default.
-+      Therefore, unless policy rules are added, all operations will also be
-+      denied by default. Also see
-+      <citerefentry>
-+        <refentrytitle>kdbus.endpoint</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Wildcard names</title>
-+    <para>
-+      Policy holder connections may upload names that contain the wildcard
-+      suffix (<literal>".*"</literal>). Such a policy entry is effective for
-+      every well-known name that extends the provided name by exactly one more
-+      level.
-+
-+      For example, the name <literal>foo.bar.*</literal> matches both
-+      <literal>"foo.bar.baz"</literal> and
-+      <literal>"foo.bar.bazbaz"</literal> are, but not
-+      <literal>"foo.bar.baz.baz"</literal>.
-+
-+      This allows connections to take control over multiple names that the
-+      policy holder doesn't need to know about when uploading the policy.
-+
-+      Such wildcard entries are not allowed for custom endpoints.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Privileged connections</title>
-+    <para>
-+      The policy database is overruled when action is taken by a privileged
-+      connection. Please refer to
-+      <citerefentry>
-+        <refentrytitle>kdbus.connection</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more information on what makes a connection privileged.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Examples</title>
-+    <para>
-+      For instance, a set of policy rules may look like this:
-+    </para>
-+
-+    <programlisting>
-+KDBUS_ITEM_NAME: str='org.foo.bar'
-+KDBUS_ITEM_POLICY_ACCESS: type=USER, access=OWN, ID=1000
-+KDBUS_ITEM_POLICY_ACCESS: type=USER, access=TALK, ID=1001
-+KDBUS_ITEM_POLICY_ACCESS: type=WORLD, access=SEE
-+
-+KDBUS_ITEM_NAME: str='org.blah.baz'
-+KDBUS_ITEM_POLICY_ACCESS: type=USER, access=OWN, ID=0
-+KDBUS_ITEM_POLICY_ACCESS: type=WORLD, access=TALK
-+    </programlisting>
-+
-+    <para>
-+      That means that 'org.foo.bar' may only be owned by UID 1000, but every
-+      user on the bus is allowed to see the name. However, only UID 1001 may
-+      actually send a message to the connection and receive a reply from it.
-+
-+      The second rule allows 'org.blah.baz' to be owned by UID 0 only, but
-+      every user may talk to it.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>TALK access and multiple well-known names per connection</title>
-+    <para>
-+      Note that TALK access is checked against all names of a connection. For
-+      example, if a connection owns both <constant>'org.foo.bar'</constant> and
-+      <constant>'org.blah.baz'</constant>, and the policy database allows
-+      <constant>'org.blah.baz'</constant> to be talked to by WORLD, then this
-+      permission is also granted to <constant>'org.foo.bar'</constant>. That
-+      might sound illogical, but after all, we allow messages to be directed to
-+      either the ID or a well-known name, and policy is applied to the
-+      connection, not the name. In other words, the effective TALK policy for a
-+      connection is the most permissive of all names the connection owns.
-+
-+      For broadcast messages, the receiver needs TALK permissions to the sender
-+      to receive the broadcast.
-+    </para>
-+    <para>
-+      Both the endpoint and the bus policy databases are consulted to allow
-+      name registry listing, owning a well-known name and message delivery.
-+      If either one fails, the operation is failed with
-+      <varname>errno</varname> set to <constant>EPERM</constant>.
-+
-+      For best practices, connections that own names with a restricted TALK
-+      access should not install matches. This avoids cases where the sent
-+      message may pass the bloom filter due to false-positives and may also
-+      satisfy the policy rules.
-+
-+      Also see
-+      <citerefentry>
-+        <refentrytitle>kdbus.match</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Implicit policies</title>
-+    <para>
-+      Depending on the type of the endpoint, a set of implicit rules that
-+      override installed policies might be enforced.
-+
-+      On default endpoints, the following set is enforced and checked before
-+      any user-supplied policy is checked.
-+    </para>
-+
-+    <itemizedlist>
-+      <listitem>
-+        <para>
-+          Privileged connections always override any installed policy. Those
-+          connections could easily install their own policies, so there is no
-+          reason to enforce installed policies.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          Connections can always talk to connections of the same user. This
-+          includes broadcast messages.
-+        </para>
-+      </listitem>
-+    </itemizedlist>
-+
-+    <para>
-+      Custom endpoints have stricter policies. The following rules apply:
-+    </para>
-+
-+    <itemizedlist>
-+      <listitem>
-+        <para>
-+          Policy rules are always enforced, even if the connection is a
-+          privileged connection.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          Policy rules are always enforced for <constant>TALK</constant> access,
-+          even if both ends are running under the same user. This includes
-+          broadcast messages.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          To restrict the set of names that can be seen, endpoint policies can
-+          install <constant>SEE</constant> policies.
-+        </para>
-+      </listitem>
-+    </itemizedlist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.endpoint</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.fs</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.message</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.pool</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+</refentry>
-diff --git a/Documentation/kdbus/kdbus.pool.xml b/Documentation/kdbus/kdbus.pool.xml
-new file mode 100644
-index 0000000..a9e16f1
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.pool.xml
-@@ -0,0 +1,326 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus.pool">
-+
-+  <refentryinfo>
-+    <title>kdbus.pool</title>
-+    <productname>kdbus.pool</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus.pool</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus.pool</refname>
-+    <refpurpose>kdbus pool</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>Description</title>
-+    <para>
-+      A pool for data received from the kernel is installed for every
-+      <emphasis>connection</emphasis> of the <emphasis>bus</emphasis>, and
-+      is sized according to the information stored in the
-+      <varname>pool_size</varname> member of <type>struct kdbus_cmd_hello</type>
-+      when <constant>KDBUS_CMD_HELLO</constant> is employed. Internally, the
-+      pool is segmented into <emphasis>slices</emphasis>, each referenced by its
-+      <emphasis>offset</emphasis> in the pool, expressed in <type>bytes</type>.
-+      See
-+      <citerefentry>
-+        <refentrytitle>kdbus.connection</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more information about <constant>KDBUS_CMD_HELLO</constant>.
-+    </para>
-+
-+    <para>
-+      The pool is written to by the kernel when one of the following
-+      <emphasis>ioctls</emphasis> is issued:
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>KDBUS_CMD_HELLO</constant></term>
-+          <listitem><para>
-+            ... to receive details about the bus the connection was made to
-+          </para></listitem>
-+        </varlistentry>
-+        <varlistentry>
-+          <term><constant>KDBUS_CMD_RECV</constant></term>
-+          <listitem><para>
-+            ... to receive a message
-+          </para></listitem>
-+        </varlistentry>
-+        <varlistentry>
-+          <term><constant>KDBUS_CMD_LIST</constant></term>
-+          <listitem><para>
-+            ... to dump the name registry
-+          </para></listitem>
-+        </varlistentry>
-+        <varlistentry>
-+          <term><constant>KDBUS_CMD_CONN_INFO</constant></term>
-+          <listitem><para>
-+            ... to retrieve information on a connection
-+          </para></listitem>
-+        </varlistentry>
-+        <varlistentry>
-+          <term><constant>KDBUS_CMD_BUS_CREATOR_INFO</constant></term>
-+          <listitem><para>
-+            ... to retrieve information about a connection's bus creator
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+
-+    </para>
-+    <para>
-+      The <varname>offset</varname> fields returned by either one of the
-+      aforementioned ioctls describe offsets inside the pool. In order to make
-+      the slice available for subsequent calls,
-+      <constant>KDBUS_CMD_FREE</constant> has to be called on that offset
-+      (see below). Otherwise, the pool will fill up, and the connection won't
-+      be able to receive any more information through its pool.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Pool slice allocation</title>
-+    <para>
-+      Pool slices are allocated by the kernel in order to report information
-+      back to a task, such as messages, returned name list etc.
-+      Allocation of pool slices cannot be initiated by userspace. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.connection</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      and
-+      <citerefentry>
-+        <refentrytitle>kdbus.name</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for examples of commands that use the <emphasis>pool</emphasis> to
-+      return data.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Accessing the pool memory</title>
-+    <para>
-+      Memory in the pool is read-only for userspace and may only be written
-+      to by the kernel. To read from the pool memory, the caller is expected to
-+      <citerefentry>
-+        <refentrytitle>mmap</refentrytitle>
-+        <manvolnum>2</manvolnum>
-+      </citerefentry>
-+      the buffer into its task, like this:
-+    </para>
-+    <programlisting>
-+uint8_t *buf = mmap(NULL, size, PROT_READ, MAP_SHARED, conn_fd, 0);
-+    </programlisting>
-+
-+    <para>
-+      In order to map the entire pool, the <varname>size</varname> parameter in
-+      the example above should be set to the value of the
-+      <varname>pool_size</varname> member of
-+      <type>struct kdbus_cmd_hello</type> when
-+      <constant>KDBUS_CMD_HELLO</constant> was employed to create the
-+      connection (see above).
-+    </para>
-+
-+    <para>
-+      The <emphasis>file descriptor</emphasis> used to map the memory must be
-+      the one that was used to create the <emphasis>connection</emphasis>.
-+      In other words, the one that was used to call
-+      <constant>KDBUS_CMD_HELLO</constant>. See
-+      <citerefentry>
-+        <refentrytitle>kdbus.connection</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>
-+      for more details.
-+    </para>
-+
-+    <para>
-+      Alternatively, instead of mapping the entire pool buffer, only parts
-+      of it can be mapped. Every kdbus command that returns an
-+      <emphasis>offset</emphasis> (see above) also reports a
-+      <emphasis>size</emphasis> along with it, so programs can be written
-+      in a way that it only maps portions of the pool to access a specific
-+      <emphasis>slice</emphasis>.
-+    </para>
-+
-+    <para>
-+      When access to the pool memory is no longer needed, programs should
-+      call <function>munmap()</function> on the pointer returned by
-+      <function>mmap()</function>.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Freeing pool slices</title>
-+    <para>
-+      The <constant>KDBUS_CMD_FREE</constant> ioctl is used to free a slice
-+      inside the pool, describing an offset that was returned in an
-+      <varname>offset</varname> field of another ioctl struct.
-+      The <constant>KDBUS_CMD_FREE</constant> command takes a
-+      <type>struct kdbus_cmd_free</type> as argument.
-+    </para>
-+
-+<programlisting>
-+struct kdbus_cmd_free {
-+  __u64 size;
-+  __u64 flags;
-+  __u64 return_flags;
-+  __u64 offset;
-+  struct kdbus_item items[0];
-+};
-+</programlisting>
-+
-+    <para>The fields in this struct are described below.</para>
-+
-+    <variablelist>
-+      <varlistentry>
-+        <term><varname>size</varname></term>
-+        <listitem><para>
-+          The overall size of the struct, including its items.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>flags</varname></term>
-+        <listitem><para>
-+          Currently unused.
-+          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
-+          valid flags. If set, the ioctl will return <errorcode>0</errorcode>,
-+          and the <varname>flags</varname> field is set to
-+          <constant>0</constant>.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>return_flags</varname></term>
-+        <listitem><para>
-+          Flags returned by the kernel. Currently unused and always set to
-+          <constant>0</constant> by the kernel.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>offset</varname></term>
-+        <listitem><para>
-+          The offset to free, as returned by other ioctls that allocated
-+          memory for returned information.
-+        </para></listitem>
-+      </varlistentry>
-+
-+      <varlistentry>
-+        <term><varname>items</varname></term>
-+        <listitem><para>
-+          Items to specify further details for the receive command.
-+          Currently unused.
-+          Unrecognized items are rejected, and the ioctl will fail with
-+          <varname>errno</varname> set to <constant>EINVAL</constant>.
-+          All items except for
-+          <constant>KDBUS_ITEM_NEGOTIATE</constant> (see
-+            <citerefentry>
-+              <refentrytitle>kdbus.item</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>
-+          ) will be rejected.
-+        </para></listitem>
-+      </varlistentry>
-+    </variablelist>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Return value</title>
-+    <para>
-+      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
-+      on error, <errorcode>-1</errorcode> is returned, and
-+      <varname>errno</varname> is set to indicate the error.
-+      If the issued ioctl is illegal for the file descriptor used,
-+      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
-+    </para>
-+
-+    <refsect2>
-+      <title>
-+        <constant>KDBUS_CMD_FREE</constant> may fail with the following
-+        errors
-+      </title>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>ENXIO</constant></term>
-+          <listitem><para>
-+            No pool slice found at given offset.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            Invalid flags provided.
-+          </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>EINVAL</constant></term>
-+          <listitem><para>
-+            The offset is valid, but the user is not allowed to free the slice.
-+            This happens, for example, if the offset was retrieved with
-+            <constant>KDBUS_RECV_PEEK</constant>.
-+          </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.connection</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.endpoint</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>mmap</refentrytitle>
-+            <manvolnum>2</manvolnum>
-+          </citerefentry>
-+        </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>munmap</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+</refentry>
-diff --git a/Documentation/kdbus/kdbus.xml b/Documentation/kdbus/kdbus.xml
-new file mode 100644
-index 0000000..d8e7400
---- /dev/null
-+++ b/Documentation/kdbus/kdbus.xml
-@@ -0,0 +1,1012 @@
-+<?xml version='1.0'?> <!--*-nxml-*-->
-+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
-+        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
-+
-+<refentry id="kdbus">
-+
-+  <refentryinfo>
-+    <title>kdbus</title>
-+    <productname>kdbus</productname>
-+  </refentryinfo>
-+
-+  <refmeta>
-+    <refentrytitle>kdbus</refentrytitle>
-+    <manvolnum>7</manvolnum>
-+  </refmeta>
-+
-+  <refnamediv>
-+    <refname>kdbus</refname>
-+    <refpurpose>Kernel Message Bus</refpurpose>
-+  </refnamediv>
-+
-+  <refsect1>
-+    <title>Synopsis</title>
-+    <para>
-+      kdbus is an inter-process communication bus system controlled by the
-+      kernel. It provides user-space with an API to create buses and send
-+      unicast and multicast messages to one, or many, peers connected to the
-+      same bus. It does not enforce any layout on the transmitted data, but
-+      only provides the transport layer used for message interchange between
-+      peers.
-+    </para>
-+    <para>
-+      This set of man-pages gives a comprehensive overview of the kernel-level
-+      API, with all ioctl commands, associated structs and bit masks. However,
-+      most people will not use this API level directly, but rather let one of
-+      the high-level abstraction libraries help them integrate D-Bus
-+      functionality into their applications.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Description</title>
-+    <para>
-+      kdbus provides a pseudo filesystem called <emphasis>kdbusfs</emphasis>,
-+      which is usually mounted on <filename>/sys/fs/kdbus</filename>. Bus
-+      primitives can be accessed as files and sub-directories underneath this
-+      mount-point. Any advanced operations are done via
-+      <function>ioctl()</function> on files created by
-+      <emphasis>kdbusfs</emphasis>. Multiple mount-points of
-+      <emphasis>kdbusfs</emphasis> are independent of each other. This allows
-+      namespacing of kdbus by mounting a new instance of
-+      <emphasis>kdbusfs</emphasis> in a new mount-namespace. kdbus calls these
-+      mount instances domains and each bus belongs to exactly one domain.
-+    </para>
-+
-+    <para>
-+      kdbus was designed as a transport layer for D-Bus, but is in no way
-+      limited, nor controlled by the D-Bus protocol specification. The D-Bus
-+      protocol is one possible application layer on top of kdbus.
-+    </para>
-+
-+    <para>
-+      For the general D-Bus protocol specification, its payload format, its
-+      marshaling, and its communication semantics, please refer to the
-+      <ulink url="http://dbus.freedesktop.org/doc/dbus-specification.html">
-+      D-Bus specification</ulink>.
-+    </para>
-+
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Terminology</title>
-+
-+    <refsect2>
-+      <title>Domain</title>
-+      <para>
-+        A domain is a <emphasis>kdbusfs</emphasis> mount-point containing all
-+        the bus primitives. Each domain is independent, and separate domains
-+        do not affect each other.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Bus</title>
-+      <para>
-+        A bus is a named object inside a domain. Clients exchange messages
-+        over a bus. Multiple buses themselves have no connection to each other;
-+        messages can only be exchanged on the same bus. The default endpoint of
-+        a bus, to which clients establish connections, is the "bus" file
-+        /sys/fs/kdbus/&lt;bus name&gt;/bus.
-+        Common operating system setups create one "system bus" per system,
-+        and one "user bus" for every logged-in user. Applications or services
-+        may create their own private buses. The kernel driver does not
-+        distinguish between different bus types, they are all handled the same
-+        way. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more details.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Endpoint</title>
-+      <para>
-+        An endpoint provides a file to talk to a bus. Opening an endpoint
-+        creates a new connection to the bus to which the endpoint belongs. All
-+        endpoints have unique names and are accessible as files underneath the
-+        directory of a bus, e.g., /sys/fs/kdbus/&lt;bus&gt;/&lt;endpoint&gt;
-+        Every bus has a default endpoint called "bus".
-+        A bus can optionally offer additional endpoints with custom names
-+        to provide restricted access to the bus. Custom endpoints carry
-+        additional policy which can be used to create sandboxes with
-+        locked-down, limited, filtered access to a bus. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.endpoint</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more details.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Connection</title>
-+      <para>
-+        A connection to a bus is created by opening an endpoint file of a
-+        bus. Every ordinary client connection has a unique identifier on the
-+        bus and can address messages to every other connection on the same
-+        bus by using the peer's connection ID as the destination. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.connection</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more details.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Pool</title>
-+      <para>
-+        Each connection allocates a piece of shmem-backed memory that is
-+        used to receive messages and answers to ioctl commands from the kernel.
-+        It is never used to send anything to the kernel. In order to access that
-+        memory, an application must mmap() it into its address space. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.pool</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more details.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Well-known Name</title>
-+      <para>
-+        A connection can, in addition to its implicit unique connection ID,
-+        request the ownership of a textual well-known name. Well-known names are
-+        noted in reverse-domain notation, such as com.example.service1. A
-+        connection that offers a service on a bus is usually reached by its
-+        well-known name. An analogy of connection ID and well-known name is an
-+        IP address and a DNS name associated with that address. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more details.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Message</title>
-+      <para>
-+        Connections can exchange messages with other connections by addressing
-+        the peers with their connection ID or well-known name. A message
-+        consists of a message header with information on how to route the
-+        message, and the message payload, which is a logical byte stream of
-+        arbitrary size. Messages can carry additional file descriptors to be
-+        passed from one connection to another, just like passing file
-+        descriptors over UNIX domain sockets. Every connection can specify which
-+        set of metadata the kernel should attach to the message when it is
-+        delivered to the receiving connection. Metadata contains information
-+        like: system time stamps, UID, GID, TID, proc-starttime, well-known
-+        names, process comm, process exe, process argv, cgroup, capabilities,
-+        seclabel, audit session, loginuid and the connection's human-readable
-+        name. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.message</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more details.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Item</title>
-+      <para>
-+        The API of kdbus implements the notion of items, submitted through and
-+        returned by most ioctls, and stored inside data structures in the
-+        connection's pool. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more details.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Broadcast, signal, filter, match</title>
-+      <para>
-+        Signals are messages that a receiver opts in for by installing a blob of
-+        bytes, called a 'match'. Signal messages must always carry a
-+        counter-part blob, called a 'filter', and signals are only delivered to
-+        peers which have a match that white-lists the message's filter. Senders
-+        of signal messages can use either a single connection ID as receiver,
-+        or the special connection ID
-+        <constant>KDBUS_DST_ID_BROADCAST</constant> to potentially send it to
-+        all connections of a bus, following the logic described above. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.match</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        and
-+        <citerefentry>
-+          <refentrytitle>kdbus.message</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more details.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Policy</title>
-+      <para>
-+        A policy is a set of rules that define which connections can see, talk
-+        to, or register a well-known name on the bus. A policy is attached to
-+        buses and custom endpoints, and modified by policy holder connections or
-+        owners of custom endpoints. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.policy</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more details.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Privileged bus users</title>
-+      <para>
-+        A user connecting to the bus is considered privileged if it is either
-+        the creator of the bus, or if it has the CAP_IPC_OWNER capability flag
-+        set. See
-+        <citerefentry>
-+          <refentrytitle>kdbus.connection</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for more details.
-+      </para>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Bus Layout</title>
-+
-+    <para>
-+      A <emphasis>bus</emphasis> provides and defines an environment that peers
-+      can connect to for message interchange. A bus is created via the kdbus
-+      control interface and can be modified by the bus creator. It applies the
-+      policy that control all bus operations. The bus creator itself does not
-+      participate as a peer. To establish a peer
-+      <emphasis>connection</emphasis>, you have to open one of the
-+      <emphasis>endpoints</emphasis> of a bus. Each bus provides a default
-+      endpoint, but further endpoints can be created on-demand. Endpoints are
-+      used to apply additional policies for all connections on this endpoint.
-+      Thus, they provide additional filters to further restrict access of
-+      specific connections to the bus.
-+    </para>
-+
-+    <para>
-+      Following, you can see an example bus layout:
-+    </para>
-+
-+    <programlisting><![CDATA[
-+                                  Bus Creator
-+                                       |
-+                                       |
-+                                    +-----+
-+                                    | Bus |
-+                                    +-----+
-+                                       |
-+                    __________________/ \__________________
-+                   /                                       \
-+                   |                                       |
-+             +----------+                             +----------+
-+             | Endpoint |                             | Endpoint |
-+             +----------+                             +----------+
-+         _________/|\_________                   _________/|\_________
-+        /          |          \                 /          |          \
-+        |          |          |                 |          |          |
-+        |          |          |                 |          |          |
-+   Connection  Connection  Connection      Connection  Connection  Connection
-+    ]]></programlisting>
-+
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Data structures and interconnections</title>
-+    <programlisting><![CDATA[
-+  +--------------------------------------------------------------------------+
-+  | Domain (Mount Point)                                                     |
-+  | /sys/fs/kdbus/control                                                    |
-+  | +----------------------------------------------------------------------+ |
-+  | | Bus (System Bus)                                                     | |
-+  | | /sys/fs/kdbus/0-system/                                              | |
-+  | | +-------------------------------+ +--------------------------------+ | |
-+  | | | Endpoint                      | | Endpoint                       | | |
-+  | | | /sys/fs/kdbus/0-system/bus    | | /sys/fs/kdbus/0-system/ep.app  | | |
-+  | | +-------------------------------+ +--------------------------------+ | |
-+  | | +--------------+ +--------------+ +--------------+ +---------------+ | |
-+  | | | Connection   | | Connection   | | Connection   | | Connection    | | |
-+  | | | :1.22        | | :1.25        | | :1.55        | | :1.81         | | |
-+  | | +--------------+ +--------------+ +--------------+ +---------------+ | |
-+  | +----------------------------------------------------------------------+ |
-+  |                                                                          |
-+  | +----------------------------------------------------------------------+ |
-+  | | Bus (User Bus for UID 2702)                                          | |
-+  | | /sys/fs/kdbus/2702-user/                                             | |
-+  | | +-------------------------------+ +--------------------------------+ | |
-+  | | | Endpoint                      | | Endpoint                       | | |
-+  | | | /sys/fs/kdbus/2702-user/bus   | | /sys/fs/kdbus/2702-user/ep.app | | |
-+  | | +-------------------------------+ +--------------------------------+ | |
-+  | | +--------------+ +--------------+ +--------------+ +---------------+ | |
-+  | | | Connection   | | Connection   | | Connection   | | Connection    | | |
-+  | | | :1.22        | | :1.25        | | :1.55        | | :1.81         | | |
-+  | | +--------------+ +--------------+ +--------------------------------+ | |
-+  | +----------------------------------------------------------------------+ |
-+  +--------------------------------------------------------------------------+
-+    ]]></programlisting>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>Metadata</title>
-+
-+    <refsect2>
-+      <title>When metadata is collected</title>
-+      <para>
-+        kdbus records data about the system in certain situations. Such metadata
-+        can refer to the currently active process (creds, PIDs, current user
-+        groups, process names and its executable path, cgroup membership,
-+        capabilities, security label and audit information), connection
-+        information (description string, currently owned names) and time stamps.
-+      </para>
-+      <para>
-+        Metadata is collected at the following times.
-+      </para>
-+
-+      <itemizedlist>
-+        <listitem><para>
-+          When a bus is created (<constant>KDBUS_CMD_MAKE</constant>),
-+          information about the calling task is collected. This data is returned
-+          by the kernel via the <constant>KDBUS_CMD_BUS_CREATOR_INFO</constant>
-+          call.
-+        </para></listitem>
-+
-+        <listitem>
-+          <para>
-+            When a connection is created (<constant>KDBUS_CMD_HELLO</constant>),
-+            information about the calling task is collected. Alternatively, a
-+            privileged connection may provide 'faked' information about
-+            credentials, PIDs and security labels which will be stored instead.
-+            This data is returned by the kernel as information on a connection
-+            (<constant>KDBUS_CMD_CONN_INFO</constant>). Only metadata that a
-+            connection allowed to be sent (by setting its bit in
-+            <varname>attach_flags_send</varname>) will be exported in this way.
-+          </para>
-+        </listitem>
-+
-+        <listitem>
-+          <para>
-+            When a message is sent (<constant>KDBUS_CMD_SEND</constant>),
-+            information about the sending task and the sending connection is
-+            collected. This metadata will be attached to the message when it
-+            arrives in the receiver's pool. If the connection sending the
-+            message installed faked credentials (see
-+            <citerefentry>
-+              <refentrytitle>kdbus.connection</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>),
-+            the message will not be augmented by any information about the
-+            currently sending task. Note that only metadata that was requested
-+            by the receiving connection will be collected and attached to
-+            messages.
-+          </para>
-+        </listitem>
-+      </itemizedlist>
-+
-+      <para>
-+        Which metadata items are actually delivered depends on the following
-+        sets and masks:
-+      </para>
-+
-+      <itemizedlist>
-+        <listitem><para>
-+          (a) the system-wide kmod creds mask
-+          (module parameter <varname>attach_flags_mask</varname>)
-+        </para></listitem>
-+
-+        <listitem><para>
-+          (b) the per-connection send creds mask, set by the connecting client
-+        </para></listitem>
-+
-+        <listitem><para>
-+          (c) the per-connection receive creds mask, set by the connecting
-+          client
-+        </para></listitem>
-+
-+        <listitem><para>
-+          (d) the per-bus minimal creds mask, set by the bus creator
-+        </para></listitem>
-+
-+        <listitem><para>
-+          (e) the per-bus owner creds mask, set by the bus creator
-+        </para></listitem>
-+
-+        <listitem><para>
-+          (f) the mask specified when querying creds of a bus peer
-+        </para></listitem>
-+
-+        <listitem><para>
-+          (g) the mask specified when querying creds of a bus owner
-+        </para></listitem>
-+      </itemizedlist>
-+
-+      <para>
-+        With the following rules:
-+      </para>
-+
-+      <itemizedlist>
-+        <listitem>
-+          <para>
-+            [1] The creds attached to messages are determined as
-+            <constant>a &amp; b &amp; c</constant>.
-+          </para>
-+        </listitem>
-+
-+        <listitem>
-+          <para>
-+            [2] When connecting to a bus (<constant>KDBUS_CMD_HELLO</constant>),
-+            and <constant>~b &amp; d != 0</constant>, the call will fail with,
-+            <errorcode>-1</errorcode>, and <varname>errno</varname> is set to
-+            <constant>ECONNREFUSED</constant>.
-+          </para>
-+        </listitem>
-+
-+        <listitem>
-+          <para>
-+            [3] When querying creds of a bus peer, the creds returned are
-+            <constant>a &amp; b &amp; f</constant>.
-+          </para>
-+        </listitem>
-+
-+        <listitem>
-+          <para>
-+            [4] When querying creds of a bus owner, the creds returned are
-+            <constant>a &amp; e &amp; g</constant>.
-+          </para>
-+        </listitem>
-+      </itemizedlist>
-+
-+      <para>
-+        Hence, programs might not always get all requested metadata items that
-+        it requested. Code must be written so that it can cope with this fact.
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Benefits and heads-up</title>
-+      <para>
-+        Attaching metadata to messages has two major benefits.
-+
-+        <itemizedlist>
-+          <listitem>
-+            <para>
-+              Metadata attached to messages is gathered at the moment when the
-+              other side calls <constant>KDBUS_CMD_SEND</constant>, or,
-+              respectively, then the kernel notification is generated. There is
-+              no need for the receiving peer to retrieve information about the
-+              task in a second step. This closes a race gap that would otherwise
-+              be inherent.
-+            </para>
-+          </listitem>
-+          <listitem>
-+            <para>
-+              As metadata is delivered along with messages in the same data
-+              blob, no extra calls to kernel functions etc. are needed to gather
-+              them.
-+            </para>
-+          </listitem>
-+        </itemizedlist>
-+
-+        Note, however, that collecting metadata does come at a price for
-+        performance, so developers should carefully assess which metadata to
-+        really opt-in for. For best practice, data that is not needed as part
-+        of a message should not be requested by the connection in the first
-+        place (see <varname>attach_flags_recv</varname> in
-+        <constant>KDBUS_CMD_HELLO</constant>).
-+      </para>
-+    </refsect2>
-+
-+    <refsect2>
-+      <title>Attach flags for metadata items</title>
-+      <para>
-+        To let the kernel know which metadata information to attach as items
-+        to the aforementioned commands, it uses a bitmask. In those, the
-+        following <emphasis>attach flags</emphasis> are currently supported.
-+        Both the <varname>attach_flags_recv</varname> and
-+        <varname>attach_flags_send</varname> fields of
-+        <type>struct kdbus_cmd_hello</type>, as well as the payload of the
-+        <constant>KDBUS_ITEM_ATTACH_FLAGS_SEND</constant> and
-+        <constant>KDBUS_ITEM_ATTACH_FLAGS_RECV</constant> items follow this
-+        scheme.
-+      </para>
-+
-+      <variablelist>
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_TIMESTAMP</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_TIMESTAMP</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_CREDS</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_CREDS</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_PIDS</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_PIDS</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_AUXGROUPS</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_AUXGROUPS</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_NAMES</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_OWNED_NAME</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_TID_COMM</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_TID_COMM</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_PID_COMM</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_PID_COMM</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_EXE</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_EXE</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_CMDLINE</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_CMDLINE</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_CGROUP</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_CGROUP</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_CAPS</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_CAPS</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_SECLABEL</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_SECLABEL</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_AUDIT</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_AUDIT</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+
-+        <varlistentry>
-+          <term><constant>KDBUS_ATTACH_CONN_DESCRIPTION</constant></term>
-+            <listitem><para>
-+              Requests the attachment of an item of type
-+              <constant>KDBUS_ITEM_CONN_DESCRIPTION</constant>.
-+            </para></listitem>
-+        </varlistentry>
-+      </variablelist>
-+
-+      <para>
-+        Please refer to
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+        for detailed information about the layout and payload of items and
-+        what metadata should be used to.
-+      </para>
-+    </refsect2>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>The ioctl interface</title>
-+
-+    <para>
-+      As stated in the 'synopsis' section above, application developers are
-+      strongly encouraged to use kdbus through one of the high-level D-Bus
-+      abstraction libraries, rather than using the low-level API directly.
-+    </para>
-+
-+    <para>
-+      kdbus on the kernel level exposes its functions exclusively through
-+      <citerefentry>
-+        <refentrytitle>ioctl</refentrytitle>
-+        <manvolnum>2</manvolnum>
-+      </citerefentry>,
-+      employed on file descriptors returned by
-+      <citerefentry>
-+        <refentrytitle>open</refentrytitle>
-+        <manvolnum>2</manvolnum>
-+      </citerefentry>
-+      on pseudo files exposed by
-+      <citerefentry>
-+        <refentrytitle>kdbus.fs</refentrytitle>
-+        <manvolnum>7</manvolnum>
-+      </citerefentry>.
-+    </para>
-+    <para>
-+      Following is a list of all the ioctls, along with the command structs
-+      they must be used with.
-+    </para>
-+
-+    <informaltable frame="none">
-+      <tgroup cols="3" colsep="1">
-+        <thead>
-+          <row>
-+            <entry>ioctl signature</entry>
-+            <entry>command</entry>
-+            <entry>transported struct</entry>
-+          </row>
-+        </thead>
-+        <tbody>
-+          <row>
-+            <entry><constant>0x40189500</constant></entry>
-+            <entry><constant>KDBUS_CMD_BUS_MAKE</constant></entry>
-+            <entry><type>struct kdbus_cmd *</type></entry>
-+          </row><row>
-+            <entry><constant>0x40189510</constant></entry>
-+            <entry><constant>KDBUS_CMD_ENDPOINT_MAKE</constant></entry>
-+            <entry><type>struct kdbus_cmd *</type></entry>
-+          </row><row>
-+            <entry><constant>0xc0609580</constant></entry>
-+            <entry><constant>KDBUS_CMD_HELLO</constant></entry>
-+            <entry><type>struct kdbus_cmd_hello *</type></entry>
-+          </row><row>
-+            <entry><constant>0x40189582</constant></entry>
-+            <entry><constant>KDBUS_CMD_BYEBYE</constant></entry>
-+            <entry><type>struct kdbus_cmd *</type></entry>
-+          </row><row>
-+            <entry><constant>0x40389590</constant></entry>
-+            <entry><constant>KDBUS_CMD_SEND</constant></entry>
-+            <entry><type>struct kdbus_cmd_send *</type></entry>
-+          </row><row>
-+            <entry><constant>0x80409591</constant></entry>
-+            <entry><constant>KDBUS_CMD_RECV</constant></entry>
-+            <entry><type>struct kdbus_cmd_recv *</type></entry>
-+          </row><row>
-+            <entry><constant>0x40209583</constant></entry>
-+            <entry><constant>KDBUS_CMD_FREE</constant></entry>
-+            <entry><type>struct kdbus_cmd_free *</type></entry>
-+          </row><row>
-+            <entry><constant>0x401895a0</constant></entry>
-+            <entry><constant>KDBUS_CMD_NAME_ACQUIRE</constant></entry>
-+            <entry><type>struct kdbus_cmd *</type></entry>
-+          </row><row>
-+            <entry><constant>0x401895a1</constant></entry>
-+            <entry><constant>KDBUS_CMD_NAME_RELEASE</constant></entry>
-+            <entry><type>struct kdbus_cmd *</type></entry>
-+          </row><row>
-+            <entry><constant>0x80289586</constant></entry>
-+            <entry><constant>KDBUS_CMD_LIST</constant></entry>
-+            <entry><type>struct kdbus_cmd_list *</type></entry>
-+          </row><row>
-+            <entry><constant>0x80309584</constant></entry>
-+            <entry><constant>KDBUS_CMD_CONN_INFO</constant></entry>
-+            <entry><type>struct kdbus_cmd_info *</type></entry>
-+          </row><row>
-+            <entry><constant>0x40209551</constant></entry>
-+            <entry><constant>KDBUS_CMD_UPDATE</constant></entry>
-+            <entry><type>struct kdbus_cmd *</type></entry>
-+          </row><row>
-+            <entry><constant>0x80309585</constant></entry>
-+            <entry><constant>KDBUS_CMD_BUS_CREATOR_INFO</constant></entry>
-+            <entry><type>struct kdbus_cmd_info *</type></entry>
-+          </row><row>
-+            <entry><constant>0x40189511</constant></entry>
-+            <entry><constant>KDBUS_CMD_ENDPOINT_UPDATE</constant></entry>
-+            <entry><type>struct kdbus_cmd *</type></entry>
-+          </row><row>
-+            <entry><constant>0x402095b0</constant></entry>
-+            <entry><constant>KDBUS_CMD_MATCH_ADD</constant></entry>
-+            <entry><type>struct kdbus_cmd_match *</type></entry>
-+          </row><row>
-+            <entry><constant>0x402095b1</constant></entry>
-+            <entry><constant>KDBUS_CMD_MATCH_REMOVE</constant></entry>
-+            <entry><type>struct kdbus_cmd_match *</type></entry>
-+          </row>
-+        </tbody>
-+      </tgroup>
-+    </informaltable>
-+
-+    <para>
-+      Depending on the type of <emphasis>kdbusfs</emphasis> node that was
-+      opened and what ioctls have been executed on a file descriptor before,
-+      a different sub-set of ioctl commands is allowed.
-+    </para>
-+
-+    <itemizedlist>
-+      <listitem>
-+        <para>
-+          On a file descriptor resulting from opening a
-+          <emphasis>control node</emphasis>, only the
-+          <constant>KDBUS_CMD_BUS_MAKE</constant> ioctl may be executed.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          On a file descriptor resulting from opening a
-+          <emphasis>bus endpoint node</emphasis>, only the
-+          <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> and
-+          <constant>KDBUS_CMD_HELLO</constant> ioctls may be executed.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          A file descriptor that was used to create a bus
-+          (via <constant>KDBUS_CMD_BUS_MAKE</constant>) is called a
-+          <emphasis>bus owner</emphasis> file descriptor. The bus will be
-+          active as long as the file descriptor is kept open.
-+          A bus owner file descriptor can not be used to
-+          employ any further ioctls. As soon as
-+          <citerefentry>
-+            <refentrytitle>close</refentrytitle>
-+            <manvolnum>2</manvolnum>
-+          </citerefentry>
-+          is called on it, the bus will be shut down, along will all associated
-+          endpoints and connections. See
-+          <citerefentry>
-+            <refentrytitle>kdbus.bus</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>
-+          for more details.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          A file descriptor that was used to create an endpoint
-+          (via <constant>KDBUS_CMD_ENDPOINT_MAKE</constant>) is called an
-+          <emphasis>endpoint owner</emphasis> file descriptor. The endpoint
-+          will be active as long as the file descriptor is kept open.
-+          An endpoint owner file descriptor can only be used
-+          to update details of an endpoint through the
-+          <constant>KDBUS_CMD_ENDPOINT_UPDATE</constant> ioctl. As soon as
-+          <citerefentry>
-+            <refentrytitle>close</refentrytitle>
-+            <manvolnum>2</manvolnum>
-+          </citerefentry>
-+          is called on it, the endpoint will be removed from the bus, and all
-+          connections that are connected to the bus through it are shut down.
-+          See
-+          <citerefentry>
-+            <refentrytitle>kdbus.endpoint</refentrytitle>
-+            <manvolnum>7</manvolnum>
-+          </citerefentry>
-+          for more details.
-+        </para>
-+      </listitem>
-+      <listitem>
-+        <para>
-+          A file descriptor that was used to create a connection
-+          (via <constant>KDBUS_CMD_HELLO</constant>) is called a
-+          <emphasis>connection owner</emphasis> file descriptor. The connection
-+          will be active as long as the file descriptor is kept open.
-+          A connection owner file descriptor may be used to
-+          issue any of the following ioctls.
-+        </para>
-+
-+        <itemizedlist>
-+          <listitem><para>
-+            <constant>KDBUS_CMD_UPDATE</constant> to tweak details of the
-+            connection. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.connection</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+
-+          <listitem><para>
-+            <constant>KDBUS_CMD_BYEBYE</constant> to shut down a connection
-+            without losing messages. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.connection</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+
-+          <listitem><para>
-+            <constant>KDBUS_CMD_FREE</constant> to free a slice of memory in
-+            the pool. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.pool</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+
-+          <listitem><para>
-+            <constant>KDBUS_CMD_CONN_INFO</constant> to retrieve information
-+            on other connections on the bus. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.connection</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+
-+          <listitem><para>
-+            <constant>KDBUS_CMD_BUS_CREATOR_INFO</constant> to retrieve
-+            information on the bus creator. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.connection</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+
-+          <listitem><para>
-+            <constant>KDBUS_CMD_LIST</constant> to retrieve a list of
-+            currently active well-known names and unique IDs on the bus. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.name</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+
-+          <listitem><para>
-+            <constant>KDBUS_CMD_SEND</constant> and
-+            <constant>KDBUS_CMD_RECV</constant> to send or receive a message.
-+            See
-+            <citerefentry>
-+              <refentrytitle>kdbus.message</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+
-+          <listitem><para>
-+            <constant>KDBUS_CMD_NAME_ACQUIRE</constant> and
-+            <constant>KDBUS_CMD_NAME_RELEASE</constant> to acquire or release
-+            a well-known name on the bus. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.name</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+
-+          <listitem><para>
-+            <constant>KDBUS_CMD_MATCH_ADD</constant> and
-+            <constant>KDBUS_CMD_MATCH_REMOVE</constant> to add or remove
-+            a match for signal messages. See
-+            <citerefentry>
-+              <refentrytitle>kdbus.match</refentrytitle>
-+              <manvolnum>7</manvolnum>
-+            </citerefentry>.
-+          </para></listitem>
-+        </itemizedlist>
-+      </listitem>
-+    </itemizedlist>
-+
-+    <para>
-+      These ioctls, along with the structs they transport, are explained in
-+      detail in the other documents linked to in the "See Also" section below.
-+    </para>
-+  </refsect1>
-+
-+  <refsect1>
-+    <title>See Also</title>
-+    <simplelist type="inline">
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.bus</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.connection</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.endpoint</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.fs</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.item</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.message</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.name</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>kdbus.pool</refentrytitle>
-+          <manvolnum>7</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>ioctl</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>mmap</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>open</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <citerefentry>
-+          <refentrytitle>close</refentrytitle>
-+          <manvolnum>2</manvolnum>
-+        </citerefentry>
-+      </member>
-+      <member>
-+        <ulink url="http://freedesktop.org/wiki/Software/dbus">D-Bus</ulink>
-+      </member>
-+    </simplelist>
-+  </refsect1>
-+
-+</refentry>
-diff --git a/Documentation/kdbus/stylesheet.xsl b/Documentation/kdbus/stylesheet.xsl
-new file mode 100644
-index 0000000..52565ea
---- /dev/null
-+++ b/Documentation/kdbus/stylesheet.xsl
-@@ -0,0 +1,16 @@
-+<?xml version="1.0" encoding="UTF-8"?>
-+<stylesheet xmlns="http://www.w3.org/1999/XSL/Transform" version="1.0">
-+	<param name="chunk.quietly">1</param>
-+	<param name="funcsynopsis.style">ansi</param>
-+	<param name="funcsynopsis.tabular.threshold">80</param>
-+	<param name="callout.graphics">0</param>
-+	<param name="paper.type">A4</param>
-+	<param name="generate.section.toc.level">2</param>
-+	<param name="use.id.as.filename">1</param>
-+	<param name="citerefentry.link">1</param>
-+	<strip-space elements="*"/>
-+	<template name="generate.citerefentry.link">
-+		<value-of select="refentrytitle"/>
-+		<text>.html</text>
-+	</template>
-+</stylesheet>
-diff --git a/MAINTAINERS b/MAINTAINERS
-index d8afd29..02f7668 100644
---- a/MAINTAINERS
-+++ b/MAINTAINERS
-@@ -5585,6 +5585,19 @@ S:	Maintained
- F:	Documentation/kbuild/kconfig-language.txt
- F:	scripts/kconfig/
- 
-+KDBUS
-+M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+M:	Daniel Mack <daniel@zonque.org>
-+M:	David Herrmann <dh.herrmann@googlemail.com>
-+M:	Djalal Harouni <tixxdz@opendz.org>
-+L:	linux-kernel@vger.kernel.org
-+S:	Maintained
-+F:	ipc/kdbus/*
-+F:	samples/kdbus/*
-+F:	Documentation/kdbus/*
-+F:	include/uapi/linux/kdbus.h
-+F:	tools/testing/selftests/kdbus/
-+
- KDUMP
- M:	Vivek Goyal <vgoyal@redhat.com>
- M:	Haren Myneni <hbabu@us.ibm.com>
-diff --git a/Makefile b/Makefile
-index f5c8983..a1c8d57 100644
---- a/Makefile
-+++ b/Makefile
-@@ -1343,6 +1343,7 @@ $(help-board-dirs): help-%:
- %docs: scripts_basic FORCE
- 	$(Q)$(MAKE) $(build)=scripts build_docproc
- 	$(Q)$(MAKE) $(build)=Documentation/DocBook $@
-+	$(Q)$(MAKE) $(build)=Documentation/kdbus $@
- 
- else # KBUILD_EXTMOD
- 
-diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
-index 1a0006a..4842a98 100644
---- a/include/uapi/linux/Kbuild
-+++ b/include/uapi/linux/Kbuild
-@@ -215,6 +215,7 @@ header-y += ixjuser.h
- header-y += jffs2.h
- header-y += joystick.h
- header-y += kcmp.h
-+header-y += kdbus.h
- header-y += kdev_t.h
- header-y += kd.h
- header-y += kernelcapi.h
-diff --git a/include/uapi/linux/kdbus.h b/include/uapi/linux/kdbus.h
-new file mode 100644
-index 0000000..4fc44cb
---- /dev/null
-+++ b/include/uapi/linux/kdbus.h
-@@ -0,0 +1,984 @@
-+/*
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef _UAPI_KDBUS_H_
-+#define _UAPI_KDBUS_H_
-+
-+#include <linux/ioctl.h>
-+#include <linux/types.h>
-+
-+#define KDBUS_IOCTL_MAGIC		0x95
-+#define KDBUS_SRC_ID_KERNEL		(0)
-+#define KDBUS_DST_ID_NAME		(0)
-+#define KDBUS_MATCH_ID_ANY		(~0ULL)
-+#define KDBUS_DST_ID_BROADCAST		(~0ULL)
-+#define KDBUS_FLAG_NEGOTIATE		(1ULL << 63)
-+
-+/**
-+ * struct kdbus_notify_id_change - name registry change message
-+ * @id:			New or former owner of the name
-+ * @flags:		flags field from KDBUS_HELLO_*
-+ *
-+ * Sent from kernel to userspace when the owner or activator of
-+ * a well-known name changes.
-+ *
-+ * Attached to:
-+ *   KDBUS_ITEM_ID_ADD
-+ *   KDBUS_ITEM_ID_REMOVE
-+ */
-+struct kdbus_notify_id_change {
-+	__u64 id;
-+	__u64 flags;
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_notify_name_change - name registry change message
-+ * @old_id:		ID and flags of former owner of a name
-+ * @new_id:		ID and flags of new owner of a name
-+ * @name:		Well-known name
-+ *
-+ * Sent from kernel to userspace when the owner or activator of
-+ * a well-known name changes.
-+ *
-+ * Attached to:
-+ *   KDBUS_ITEM_NAME_ADD
-+ *   KDBUS_ITEM_NAME_REMOVE
-+ *   KDBUS_ITEM_NAME_CHANGE
-+ */
-+struct kdbus_notify_name_change {
-+	struct kdbus_notify_id_change old_id;
-+	struct kdbus_notify_id_change new_id;
-+	char name[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_creds - process credentials
-+ * @uid:		User ID
-+ * @euid:		Effective UID
-+ * @suid:		Saved UID
-+ * @fsuid:		Filesystem UID
-+ * @gid:		Group ID
-+ * @egid:		Effective GID
-+ * @sgid:		Saved GID
-+ * @fsgid:		Filesystem GID
-+ *
-+ * Attached to:
-+ *   KDBUS_ITEM_CREDS
-+ */
-+struct kdbus_creds {
-+	__u64 uid;
-+	__u64 euid;
-+	__u64 suid;
-+	__u64 fsuid;
-+	__u64 gid;
-+	__u64 egid;
-+	__u64 sgid;
-+	__u64 fsgid;
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_pids - process identifiers
-+ * @pid:		Process ID
-+ * @tid:		Thread ID
-+ * @ppid:		Parent process ID
-+ *
-+ * The PID and TID of a process.
-+ *
-+ * Attached to:
-+ *   KDBUS_ITEM_PIDS
-+ */
-+struct kdbus_pids {
-+	__u64 pid;
-+	__u64 tid;
-+	__u64 ppid;
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_caps - process capabilities
-+ * @last_cap:	Highest currently known capability bit
-+ * @caps:	Variable number of 32-bit capabilities flags
-+ *
-+ * Contains a variable number of 32-bit capabilities flags.
-+ *
-+ * Attached to:
-+ *   KDBUS_ITEM_CAPS
-+ */
-+struct kdbus_caps {
-+	__u32 last_cap;
-+	__u32 caps[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_audit - audit information
-+ * @sessionid:		The audit session ID
-+ * @loginuid:		The audit login uid
-+ *
-+ * Attached to:
-+ *   KDBUS_ITEM_AUDIT
-+ */
-+struct kdbus_audit {
-+	__u32 sessionid;
-+	__u32 loginuid;
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_timestamp
-+ * @seqnum:		Global per-domain message sequence number
-+ * @monotonic_ns:	Monotonic timestamp, in nanoseconds
-+ * @realtime_ns:	Realtime timestamp, in nanoseconds
-+ *
-+ * Attached to:
-+ *   KDBUS_ITEM_TIMESTAMP
-+ */
-+struct kdbus_timestamp {
-+	__u64 seqnum;
-+	__u64 monotonic_ns;
-+	__u64 realtime_ns;
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_vec - I/O vector for kdbus payload items
-+ * @size:		The size of the vector
-+ * @address:		Memory address of data buffer
-+ * @offset:		Offset in the in-message payload memory,
-+ *			relative to the message head
-+ *
-+ * Attached to:
-+ *   KDBUS_ITEM_PAYLOAD_VEC, KDBUS_ITEM_PAYLOAD_OFF
-+ */
-+struct kdbus_vec {
-+	__u64 size;
-+	union {
-+		__u64 address;
-+		__u64 offset;
-+	};
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_bloom_parameter - bus-wide bloom parameters
-+ * @size:		Size of the bit field in bytes (m / 8)
-+ * @n_hash:		Number of hash functions used (k)
-+ */
-+struct kdbus_bloom_parameter {
-+	__u64 size;
-+	__u64 n_hash;
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_bloom_filter - bloom filter containing n elements
-+ * @generation:		Generation of the element set in the filter
-+ * @data:		Bit field, multiple of 8 bytes
-+ */
-+struct kdbus_bloom_filter {
-+	__u64 generation;
-+	__u64 data[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_memfd - a kdbus memfd
-+ * @start:		The offset into the memfd where the segment starts
-+ * @size:		The size of the memfd segment
-+ * @fd:			The file descriptor number
-+ * @__pad:		Padding to ensure proper alignment and size
-+ *
-+ * Attached to:
-+ *   KDBUS_ITEM_PAYLOAD_MEMFD
-+ */
-+struct kdbus_memfd {
-+	__u64 start;
-+	__u64 size;
-+	int fd;
-+	__u32 __pad;
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_name - a registered well-known name with its flags
-+ * @flags:		Flags from KDBUS_NAME_*
-+ * @name:		Well-known name
-+ *
-+ * Attached to:
-+ *   KDBUS_ITEM_OWNED_NAME
-+ */
-+struct kdbus_name {
-+	__u64 flags;
-+	char name[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * enum kdbus_policy_access_type - permissions of a policy record
-+ * @_KDBUS_POLICY_ACCESS_NULL:	Uninitialized/invalid
-+ * @KDBUS_POLICY_ACCESS_USER:	Grant access to a uid
-+ * @KDBUS_POLICY_ACCESS_GROUP:	Grant access to gid
-+ * @KDBUS_POLICY_ACCESS_WORLD:	World-accessible
-+ */
-+enum kdbus_policy_access_type {
-+	_KDBUS_POLICY_ACCESS_NULL,
-+	KDBUS_POLICY_ACCESS_USER,
-+	KDBUS_POLICY_ACCESS_GROUP,
-+	KDBUS_POLICY_ACCESS_WORLD,
-+};
-+
-+/**
-+ * enum kdbus_policy_access_flags - mode flags
-+ * @KDBUS_POLICY_OWN:		Allow to own a well-known name
-+ *				Implies KDBUS_POLICY_TALK and KDBUS_POLICY_SEE
-+ * @KDBUS_POLICY_TALK:		Allow communication to a well-known name
-+ *				Implies KDBUS_POLICY_SEE
-+ * @KDBUS_POLICY_SEE:		Allow to see a well-known name
-+ */
-+enum kdbus_policy_type {
-+	KDBUS_POLICY_SEE	= 0,
-+	KDBUS_POLICY_TALK,
-+	KDBUS_POLICY_OWN,
-+};
-+
-+/**
-+ * struct kdbus_policy_access - policy access item
-+ * @type:		One of KDBUS_POLICY_ACCESS_* types
-+ * @access:		Access to grant
-+ * @id:			For KDBUS_POLICY_ACCESS_USER, the uid
-+ *			For KDBUS_POLICY_ACCESS_GROUP, the gid
-+ */
-+struct kdbus_policy_access {
-+	__u64 type;	/* USER, GROUP, WORLD */
-+	__u64 access;	/* OWN, TALK, SEE */
-+	__u64 id;	/* uid, gid, 0 */
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * enum kdbus_attach_flags - flags for metadata attachments
-+ * @KDBUS_ATTACH_TIMESTAMP:		Timestamp
-+ * @KDBUS_ATTACH_CREDS:			Credentials
-+ * @KDBUS_ATTACH_PIDS:			PIDs
-+ * @KDBUS_ATTACH_AUXGROUPS:		Auxiliary groups
-+ * @KDBUS_ATTACH_NAMES:			Well-known names
-+ * @KDBUS_ATTACH_TID_COMM:		The "comm" process identifier of the TID
-+ * @KDBUS_ATTACH_PID_COMM:		The "comm" process identifier of the PID
-+ * @KDBUS_ATTACH_EXE:			The path of the executable
-+ * @KDBUS_ATTACH_CMDLINE:		The process command line
-+ * @KDBUS_ATTACH_CGROUP:		The croup membership
-+ * @KDBUS_ATTACH_CAPS:			The process capabilities
-+ * @KDBUS_ATTACH_SECLABEL:		The security label
-+ * @KDBUS_ATTACH_AUDIT:			The audit IDs
-+ * @KDBUS_ATTACH_CONN_DESCRIPTION:	The human-readable connection name
-+ * @_KDBUS_ATTACH_ALL:			All of the above
-+ * @_KDBUS_ATTACH_ANY:			Wildcard match to enable any kind of
-+ *					metatdata.
-+ */
-+enum kdbus_attach_flags {
-+	KDBUS_ATTACH_TIMESTAMP		=  1ULL <<  0,
-+	KDBUS_ATTACH_CREDS		=  1ULL <<  1,
-+	KDBUS_ATTACH_PIDS		=  1ULL <<  2,
-+	KDBUS_ATTACH_AUXGROUPS		=  1ULL <<  3,
-+	KDBUS_ATTACH_NAMES		=  1ULL <<  4,
-+	KDBUS_ATTACH_TID_COMM		=  1ULL <<  5,
-+	KDBUS_ATTACH_PID_COMM		=  1ULL <<  6,
-+	KDBUS_ATTACH_EXE		=  1ULL <<  7,
-+	KDBUS_ATTACH_CMDLINE		=  1ULL <<  8,
-+	KDBUS_ATTACH_CGROUP		=  1ULL <<  9,
-+	KDBUS_ATTACH_CAPS		=  1ULL << 10,
-+	KDBUS_ATTACH_SECLABEL		=  1ULL << 11,
-+	KDBUS_ATTACH_AUDIT		=  1ULL << 12,
-+	KDBUS_ATTACH_CONN_DESCRIPTION	=  1ULL << 13,
-+	_KDBUS_ATTACH_ALL		=  (1ULL << 14) - 1,
-+	_KDBUS_ATTACH_ANY		=  ~0ULL
-+};
-+
-+/**
-+ * enum kdbus_item_type - item types to chain data in a list
-+ * @_KDBUS_ITEM_NULL:			Uninitialized/invalid
-+ * @_KDBUS_ITEM_USER_BASE:		Start of user items
-+ * @KDBUS_ITEM_NEGOTIATE:		Negotiate supported items
-+ * @KDBUS_ITEM_PAYLOAD_VEC:		Vector to data
-+ * @KDBUS_ITEM_PAYLOAD_OFF:		Data at returned offset to message head
-+ * @KDBUS_ITEM_PAYLOAD_MEMFD:		Data as sealed memfd
-+ * @KDBUS_ITEM_FDS:			Attached file descriptors
-+ * @KDBUS_ITEM_CANCEL_FD:		FD used to cancel a synchronous
-+ *					operation by writing to it from
-+ *					userspace
-+ * @KDBUS_ITEM_BLOOM_PARAMETER:		Bus-wide bloom parameters, used with
-+ *					KDBUS_CMD_BUS_MAKE, carries a
-+ *					struct kdbus_bloom_parameter
-+ * @KDBUS_ITEM_BLOOM_FILTER:		Bloom filter carried with a message,
-+ *					used to match against a bloom mask of a
-+ *					connection, carries a struct
-+ *					kdbus_bloom_filter
-+ * @KDBUS_ITEM_BLOOM_MASK:		Bloom mask used to match against a
-+ *					message'sbloom filter
-+ * @KDBUS_ITEM_DST_NAME:		Destination's well-known name
-+ * @KDBUS_ITEM_MAKE_NAME:		Name of domain, bus, endpoint
-+ * @KDBUS_ITEM_ATTACH_FLAGS_SEND:	Attach-flags, used for updating which
-+ *					metadata a connection opts in to send
-+ * @KDBUS_ITEM_ATTACH_FLAGS_RECV:	Attach-flags, used for updating which
-+ *					metadata a connection requests to
-+ *					receive for each reeceived message
-+ * @KDBUS_ITEM_ID:			Connection ID
-+ * @KDBUS_ITEM_NAME:			Well-know name with flags
-+ * @_KDBUS_ITEM_ATTACH_BASE:		Start of metadata attach items
-+ * @KDBUS_ITEM_TIMESTAMP:		Timestamp
-+ * @KDBUS_ITEM_CREDS:			Process credentials
-+ * @KDBUS_ITEM_PIDS:			Process identifiers
-+ * @KDBUS_ITEM_AUXGROUPS:		Auxiliary process groups
-+ * @KDBUS_ITEM_OWNED_NAME:		A name owned by the associated
-+ *					connection
-+ * @KDBUS_ITEM_TID_COMM:		Thread ID "comm" identifier
-+ *					(Don't trust this, see below.)
-+ * @KDBUS_ITEM_PID_COMM:		Process ID "comm" identifier
-+ *					(Don't trust this, see below.)
-+ * @KDBUS_ITEM_EXE:			The path of the executable
-+ *					(Don't trust this, see below.)
-+ * @KDBUS_ITEM_CMDLINE:			The process command line
-+ *					(Don't trust this, see below.)
-+ * @KDBUS_ITEM_CGROUP:			The croup membership
-+ * @KDBUS_ITEM_CAPS:			The process capabilities
-+ * @KDBUS_ITEM_SECLABEL:		The security label
-+ * @KDBUS_ITEM_AUDIT:			The audit IDs
-+ * @KDBUS_ITEM_CONN_DESCRIPTION:	The connection's human-readable name
-+ *					(debugging)
-+ * @_KDBUS_ITEM_POLICY_BASE:		Start of policy items
-+ * @KDBUS_ITEM_POLICY_ACCESS:		Policy access block
-+ * @_KDBUS_ITEM_KERNEL_BASE:		Start of kernel-generated message items
-+ * @KDBUS_ITEM_NAME_ADD:		Notification in kdbus_notify_name_change
-+ * @KDBUS_ITEM_NAME_REMOVE:		Notification in kdbus_notify_name_change
-+ * @KDBUS_ITEM_NAME_CHANGE:		Notification in kdbus_notify_name_change
-+ * @KDBUS_ITEM_ID_ADD:			Notification in kdbus_notify_id_change
-+ * @KDBUS_ITEM_ID_REMOVE:		Notification in kdbus_notify_id_change
-+ * @KDBUS_ITEM_REPLY_TIMEOUT:		Timeout has been reached
-+ * @KDBUS_ITEM_REPLY_DEAD:		Destination died
-+ *
-+ * N.B: The process and thread COMM fields, as well as the CMDLINE and
-+ * EXE fields may be altered by unprivileged processes und should
-+ * hence *not* used for security decisions. Peers should make use of
-+ * these items only for informational purposes, such as generating log
-+ * records.
-+ */
-+enum kdbus_item_type {
-+	_KDBUS_ITEM_NULL,
-+	_KDBUS_ITEM_USER_BASE,
-+	KDBUS_ITEM_NEGOTIATE	= _KDBUS_ITEM_USER_BASE,
-+	KDBUS_ITEM_PAYLOAD_VEC,
-+	KDBUS_ITEM_PAYLOAD_OFF,
-+	KDBUS_ITEM_PAYLOAD_MEMFD,
-+	KDBUS_ITEM_FDS,
-+	KDBUS_ITEM_CANCEL_FD,
-+	KDBUS_ITEM_BLOOM_PARAMETER,
-+	KDBUS_ITEM_BLOOM_FILTER,
-+	KDBUS_ITEM_BLOOM_MASK,
-+	KDBUS_ITEM_DST_NAME,
-+	KDBUS_ITEM_MAKE_NAME,
-+	KDBUS_ITEM_ATTACH_FLAGS_SEND,
-+	KDBUS_ITEM_ATTACH_FLAGS_RECV,
-+	KDBUS_ITEM_ID,
-+	KDBUS_ITEM_NAME,
-+	KDBUS_ITEM_DST_ID,
-+
-+	/* keep these item types in sync with KDBUS_ATTACH_* flags */
-+	_KDBUS_ITEM_ATTACH_BASE	= 0x1000,
-+	KDBUS_ITEM_TIMESTAMP	= _KDBUS_ITEM_ATTACH_BASE,
-+	KDBUS_ITEM_CREDS,
-+	KDBUS_ITEM_PIDS,
-+	KDBUS_ITEM_AUXGROUPS,
-+	KDBUS_ITEM_OWNED_NAME,
-+	KDBUS_ITEM_TID_COMM,
-+	KDBUS_ITEM_PID_COMM,
-+	KDBUS_ITEM_EXE,
-+	KDBUS_ITEM_CMDLINE,
-+	KDBUS_ITEM_CGROUP,
-+	KDBUS_ITEM_CAPS,
-+	KDBUS_ITEM_SECLABEL,
-+	KDBUS_ITEM_AUDIT,
-+	KDBUS_ITEM_CONN_DESCRIPTION,
-+
-+	_KDBUS_ITEM_POLICY_BASE	= 0x2000,
-+	KDBUS_ITEM_POLICY_ACCESS = _KDBUS_ITEM_POLICY_BASE,
-+
-+	_KDBUS_ITEM_KERNEL_BASE	= 0x8000,
-+	KDBUS_ITEM_NAME_ADD	= _KDBUS_ITEM_KERNEL_BASE,
-+	KDBUS_ITEM_NAME_REMOVE,
-+	KDBUS_ITEM_NAME_CHANGE,
-+	KDBUS_ITEM_ID_ADD,
-+	KDBUS_ITEM_ID_REMOVE,
-+	KDBUS_ITEM_REPLY_TIMEOUT,
-+	KDBUS_ITEM_REPLY_DEAD,
-+};
-+
-+/**
-+ * struct kdbus_item - chain of data blocks
-+ * @size:		Overall data record size
-+ * @type:		Kdbus_item type of data
-+ * @data:		Generic bytes
-+ * @data32:		Generic 32 bit array
-+ * @data64:		Generic 64 bit array
-+ * @str:		Generic string
-+ * @id:			Connection ID
-+ * @vec:		KDBUS_ITEM_PAYLOAD_VEC
-+ * @creds:		KDBUS_ITEM_CREDS
-+ * @audit:		KDBUS_ITEM_AUDIT
-+ * @timestamp:		KDBUS_ITEM_TIMESTAMP
-+ * @name:		KDBUS_ITEM_NAME
-+ * @bloom_parameter:	KDBUS_ITEM_BLOOM_PARAMETER
-+ * @bloom_filter:	KDBUS_ITEM_BLOOM_FILTER
-+ * @memfd:		KDBUS_ITEM_PAYLOAD_MEMFD
-+ * @name_change:	KDBUS_ITEM_NAME_ADD
-+ *			KDBUS_ITEM_NAME_REMOVE
-+ *			KDBUS_ITEM_NAME_CHANGE
-+ * @id_change:		KDBUS_ITEM_ID_ADD
-+ *			KDBUS_ITEM_ID_REMOVE
-+ * @policy:		KDBUS_ITEM_POLICY_ACCESS
-+ */
-+struct kdbus_item {
-+	__u64 size;
-+	__u64 type;
-+	union {
-+		__u8 data[0];
-+		__u32 data32[0];
-+		__u64 data64[0];
-+		char str[0];
-+
-+		__u64 id;
-+		struct kdbus_vec vec;
-+		struct kdbus_creds creds;
-+		struct kdbus_pids pids;
-+		struct kdbus_audit audit;
-+		struct kdbus_caps caps;
-+		struct kdbus_timestamp timestamp;
-+		struct kdbus_name name;
-+		struct kdbus_bloom_parameter bloom_parameter;
-+		struct kdbus_bloom_filter bloom_filter;
-+		struct kdbus_memfd memfd;
-+		int fds[0];
-+		struct kdbus_notify_name_change name_change;
-+		struct kdbus_notify_id_change id_change;
-+		struct kdbus_policy_access policy_access;
-+	};
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * enum kdbus_msg_flags - type of message
-+ * @KDBUS_MSG_EXPECT_REPLY:	Expect a reply message, used for
-+ *				method calls. The userspace-supplied
-+ *				cookie identifies the message and the
-+ *				respective reply carries the cookie
-+ *				in cookie_reply
-+ * @KDBUS_MSG_NO_AUTO_START:	Do not start a service if the addressed
-+ *				name is not currently active. This flag is
-+ *				not looked at by the kernel but only
-+ *				serves as hint for userspace implementations.
-+ * @KDBUS_MSG_SIGNAL:		Treat this message as signal
-+ */
-+enum kdbus_msg_flags {
-+	KDBUS_MSG_EXPECT_REPLY	= 1ULL << 0,
-+	KDBUS_MSG_NO_AUTO_START	= 1ULL << 1,
-+	KDBUS_MSG_SIGNAL	= 1ULL << 2,
-+};
-+
-+/**
-+ * enum kdbus_payload_type - type of payload carried by message
-+ * @KDBUS_PAYLOAD_KERNEL:	Kernel-generated simple message
-+ * @KDBUS_PAYLOAD_DBUS:		D-Bus marshalling "DBusDBus"
-+ *
-+ * Any payload-type is accepted. Common types will get added here once
-+ * established.
-+ */
-+enum kdbus_payload_type {
-+	KDBUS_PAYLOAD_KERNEL,
-+	KDBUS_PAYLOAD_DBUS	= 0x4442757344427573ULL,
-+};
-+
-+/**
-+ * struct kdbus_msg - the representation of a kdbus message
-+ * @size:		Total size of the message
-+ * @flags:		Message flags (KDBUS_MSG_*), userspace → kernel
-+ * @priority:		Message queue priority value
-+ * @dst_id:		64-bit ID of the destination connection
-+ * @src_id:		64-bit ID of the source connection
-+ * @payload_type:	Payload type (KDBUS_PAYLOAD_*)
-+ * @cookie:		Userspace-supplied cookie, for the connection
-+ *			to identify its messages
-+ * @timeout_ns:		The time to wait for a message reply from the peer.
-+ *			If there is no reply, and the send command is
-+ *			executed asynchronously, a kernel-generated message
-+ *			with an attached KDBUS_ITEM_REPLY_TIMEOUT item
-+ *			is sent to @src_id. For synchronously executed send
-+ *			command, the value denotes the maximum time the call
-+ *			blocks to wait for a reply. The timeout is expected in
-+ *			nanoseconds and as absolute CLOCK_MONOTONIC value.
-+ * @cookie_reply:	A reply to the requesting message with the same
-+ *			cookie. The requesting connection can match its
-+ *			request and the reply with this value
-+ * @items:		A list of kdbus_items containing the message payload
-+ */
-+struct kdbus_msg {
-+	__u64 size;
-+	__u64 flags;
-+	__s64 priority;
-+	__u64 dst_id;
-+	__u64 src_id;
-+	__u64 payload_type;
-+	__u64 cookie;
-+	union {
-+		__u64 timeout_ns;
-+		__u64 cookie_reply;
-+	};
-+	struct kdbus_item items[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_msg_info - returned message container
-+ * @offset:		Offset of kdbus_msg slice in pool
-+ * @msg_size:		Copy of the kdbus_msg.size field
-+ * @return_flags:	Command return flags, kernel → userspace
-+ */
-+struct kdbus_msg_info {
-+	__u64 offset;
-+	__u64 msg_size;
-+	__u64 return_flags;
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * enum kdbus_send_flags - flags for sending messages
-+ * @KDBUS_SEND_SYNC_REPLY:	Wait for destination connection to
-+ *				reply to this message. The
-+ *				KDBUS_CMD_SEND ioctl() will block
-+ *				until the reply is received, and
-+ *				reply in struct kdbus_cmd_send will
-+ *				yield the offset in the sender's pool
-+ *				where the reply can be found.
-+ *				This flag is only valid if
-+ *				@KDBUS_MSG_EXPECT_REPLY is set as well.
-+ */
-+enum kdbus_send_flags {
-+	KDBUS_SEND_SYNC_REPLY		= 1ULL << 0,
-+};
-+
-+/**
-+ * struct kdbus_cmd_send - send message
-+ * @size:		Overall size of this structure
-+ * @flags:		Flags to change send behavior (KDBUS_SEND_*)
-+ * @return_flags:	Command return flags, kernel → userspace
-+ * @msg_address:	Storage address of the kdbus_msg to send
-+ * @reply:		Storage for message reply if KDBUS_SEND_SYNC_REPLY
-+ *			was given
-+ * @items:		Additional items for this command
-+ */
-+struct kdbus_cmd_send {
-+	__u64 size;
-+	__u64 flags;
-+	__u64 return_flags;
-+	__u64 msg_address;
-+	struct kdbus_msg_info reply;
-+	struct kdbus_item items[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * enum kdbus_recv_flags - flags for de-queuing messages
-+ * @KDBUS_RECV_PEEK:		Return the next queued message without
-+ *				actually de-queuing it, and without installing
-+ *				any file descriptors or other resources. It is
-+ *				usually used to determine the activating
-+ *				connection of a bus name.
-+ * @KDBUS_RECV_DROP:		Drop and free the next queued message and all
-+ *				its resources without actually receiving it.
-+ * @KDBUS_RECV_USE_PRIORITY:	Only de-queue messages with the specified or
-+ *				higher priority (lowest values); if not set,
-+ *				the priority value is ignored.
-+ */
-+enum kdbus_recv_flags {
-+	KDBUS_RECV_PEEK		= 1ULL <<  0,
-+	KDBUS_RECV_DROP		= 1ULL <<  1,
-+	KDBUS_RECV_USE_PRIORITY	= 1ULL <<  2,
-+};
-+
-+/**
-+ * enum kdbus_recv_return_flags - return flags for message receive commands
-+ * @KDBUS_RECV_RETURN_INCOMPLETE_FDS:	One or more file descriptors could not
-+ *					be installed. These descriptors in
-+ *					KDBUS_ITEM_FDS will carry the value -1.
-+ * @KDBUS_RECV_RETURN_DROPPED_MSGS:	There have been dropped messages since
-+ *					the last time a message was received.
-+ *					The 'dropped_msgs' counter contains the
-+ *					number of messages dropped pool
-+ *					overflows or other missed broadcasts.
-+ */
-+enum kdbus_recv_return_flags {
-+	KDBUS_RECV_RETURN_INCOMPLETE_FDS	= 1ULL <<  0,
-+	KDBUS_RECV_RETURN_DROPPED_MSGS		= 1ULL <<  1,
-+};
-+
-+/**
-+ * struct kdbus_cmd_recv - struct to de-queue a buffered message
-+ * @size:		Overall size of this object
-+ * @flags:		KDBUS_RECV_* flags, userspace → kernel
-+ * @return_flags:	Command return flags, kernel → userspace
-+ * @priority:		Minimum priority of the messages to de-queue. Lowest
-+ *			values have the highest priority.
-+ * @dropped_msgs:	In case there were any dropped messages since the last
-+ *			time a message was received, this will be set to the
-+ *			number of lost messages and
-+ *			KDBUS_RECV_RETURN_DROPPED_MSGS will be set in
-+ *			'return_flags'. This can only happen if the ioctl
-+ *			returns 0 or EAGAIN.
-+ * @msg:		Return storage for received message.
-+ * @items:		Additional items for this command.
-+ *
-+ * This struct is used with the KDBUS_CMD_RECV ioctl.
-+ */
-+struct kdbus_cmd_recv {
-+	__u64 size;
-+	__u64 flags;
-+	__u64 return_flags;
-+	__s64 priority;
-+	__u64 dropped_msgs;
-+	struct kdbus_msg_info msg;
-+	struct kdbus_item items[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_cmd_free - struct to free a slice of memory in the pool
-+ * @size:		Overall size of this structure
-+ * @flags:		Flags for the free command, userspace → kernel
-+ * @return_flags:	Command return flags, kernel → userspace
-+ * @offset:		The offset of the memory slice, as returned by other
-+ *			ioctls
-+ * @items:		Additional items to modify the behavior
-+ *
-+ * This struct is used with the KDBUS_CMD_FREE ioctl.
-+ */
-+struct kdbus_cmd_free {
-+	__u64 size;
-+	__u64 flags;
-+	__u64 return_flags;
-+	__u64 offset;
-+	struct kdbus_item items[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * enum kdbus_hello_flags - flags for struct kdbus_cmd_hello
-+ * @KDBUS_HELLO_ACCEPT_FD:	The connection allows the reception of
-+ *				any passed file descriptors
-+ * @KDBUS_HELLO_ACTIVATOR:	Special-purpose connection which registers
-+ *				a well-know name for a process to be started
-+ *				when traffic arrives
-+ * @KDBUS_HELLO_POLICY_HOLDER:	Special-purpose connection which registers
-+ *				policy entries for a name. The provided name
-+ *				is not activated and not registered with the
-+ *				name database, it only allows unprivileged
-+ *				connections to acquire a name, talk or discover
-+ *				a service
-+ * @KDBUS_HELLO_MONITOR:	Special-purpose connection to monitor
-+ *				bus traffic
-+ */
-+enum kdbus_hello_flags {
-+	KDBUS_HELLO_ACCEPT_FD		=  1ULL <<  0,
-+	KDBUS_HELLO_ACTIVATOR		=  1ULL <<  1,
-+	KDBUS_HELLO_POLICY_HOLDER	=  1ULL <<  2,
-+	KDBUS_HELLO_MONITOR		=  1ULL <<  3,
-+};
-+
-+/**
-+ * struct kdbus_cmd_hello - struct to say hello to kdbus
-+ * @size:		The total size of the structure
-+ * @flags:		Connection flags (KDBUS_HELLO_*), userspace → kernel
-+ * @return_flags:	Command return flags, kernel → userspace
-+ * @attach_flags_send:	Mask of metadata to attach to each message sent
-+ *			off by this connection (KDBUS_ATTACH_*)
-+ * @attach_flags_recv:	Mask of metadata to attach to each message receieved
-+ *			by the new connection (KDBUS_ATTACH_*)
-+ * @bus_flags:		The flags field copied verbatim from the original
-+ *			KDBUS_CMD_BUS_MAKE ioctl. It's intended to be useful
-+ *			to do negotiation of features of the payload that is
-+ *			transferred (kernel → userspace)
-+ * @id:			The ID of this connection (kernel → userspace)
-+ * @pool_size:		Size of the connection's buffer where the received
-+ *			messages are placed
-+ * @offset:		Pool offset where items are returned to report
-+ *			additional information about the bus and the newly
-+ *			created connection.
-+ * @items_size:		Size of buffer returned in the pool slice at @offset.
-+ * @id128:		Unique 128-bit ID of the bus (kernel → userspace)
-+ * @items:		A list of items
-+ *
-+ * This struct is used with the KDBUS_CMD_HELLO ioctl.
-+ */
-+struct kdbus_cmd_hello {
-+	__u64 size;
-+	__u64 flags;
-+	__u64 return_flags;
-+	__u64 attach_flags_send;
-+	__u64 attach_flags_recv;
-+	__u64 bus_flags;
-+	__u64 id;
-+	__u64 pool_size;
-+	__u64 offset;
-+	__u64 items_size;
-+	__u8 id128[16];
-+	struct kdbus_item items[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_info - connection information
-+ * @size:		total size of the struct
-+ * @id:			64bit object ID
-+ * @flags:		object creation flags
-+ * @items:		list of items
-+ *
-+ * Note that the user is responsible for freeing the allocated memory with
-+ * the KDBUS_CMD_FREE ioctl.
-+ */
-+struct kdbus_info {
-+	__u64 size;
-+	__u64 id;
-+	__u64 flags;
-+	struct kdbus_item items[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * enum kdbus_list_flags - what to include into the returned list
-+ * @KDBUS_LIST_UNIQUE:		active connections
-+ * @KDBUS_LIST_ACTIVATORS:	activator connections
-+ * @KDBUS_LIST_NAMES:		known well-known names
-+ * @KDBUS_LIST_QUEUED:		queued-up names
-+ */
-+enum kdbus_list_flags {
-+	KDBUS_LIST_UNIQUE		= 1ULL <<  0,
-+	KDBUS_LIST_NAMES		= 1ULL <<  1,
-+	KDBUS_LIST_ACTIVATORS		= 1ULL <<  2,
-+	KDBUS_LIST_QUEUED		= 1ULL <<  3,
-+};
-+
-+/**
-+ * struct kdbus_cmd_list - list connections
-+ * @size:		overall size of this object
-+ * @flags:		flags for the query (KDBUS_LIST_*), userspace → kernel
-+ * @return_flags:	command return flags, kernel → userspace
-+ * @offset:		Offset in the caller's pool buffer where an array of
-+ *			kdbus_info objects is stored.
-+ *			The user must use KDBUS_CMD_FREE to free the
-+ *			allocated memory.
-+ * @list_size:		size of returned list in bytes
-+ * @items:		Items for the command. Reserved for future use.
-+ *
-+ * This structure is used with the KDBUS_CMD_LIST ioctl.
-+ */
-+struct kdbus_cmd_list {
-+	__u64 size;
-+	__u64 flags;
-+	__u64 return_flags;
-+	__u64 offset;
-+	__u64 list_size;
-+	struct kdbus_item items[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * struct kdbus_cmd_info - struct used for KDBUS_CMD_CONN_INFO ioctl
-+ * @size:		The total size of the struct
-+ * @flags:		Flags for this ioctl, userspace → kernel
-+ * @return_flags:	Command return flags, kernel → userspace
-+ * @id:			The 64-bit ID of the connection. If set to zero, passing
-+ *			@name is required. kdbus will look up the name to
-+ *			determine the ID in this case.
-+ * @attach_flags:	Set of attach flags to specify the set of information
-+ *			to receive, userspace → kernel
-+ * @offset:		Returned offset in the caller's pool buffer where the
-+ *			kdbus_info struct result is stored. The user must
-+ *			use KDBUS_CMD_FREE to free the allocated memory.
-+ * @info_size:		Output buffer to report size of data at @offset.
-+ * @items:		The optional item list, containing the
-+ *			well-known name to look up as a KDBUS_ITEM_NAME.
-+ *			Only needed in case @id is zero.
-+ *
-+ * On success, the KDBUS_CMD_CONN_INFO ioctl will return 0 and @offset will
-+ * tell the user the offset in the connection pool buffer at which to find the
-+ * result in a struct kdbus_info.
-+ */
-+struct kdbus_cmd_info {
-+	__u64 size;
-+	__u64 flags;
-+	__u64 return_flags;
-+	__u64 id;
-+	__u64 attach_flags;
-+	__u64 offset;
-+	__u64 info_size;
-+	struct kdbus_item items[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * enum kdbus_cmd_match_flags - flags to control the KDBUS_CMD_MATCH_ADD ioctl
-+ * @KDBUS_MATCH_REPLACE:	If entries with the supplied cookie already
-+ *				exists, remove them before installing the new
-+ *				matches.
-+ */
-+enum kdbus_cmd_match_flags {
-+	KDBUS_MATCH_REPLACE	= 1ULL <<  0,
-+};
-+
-+/**
-+ * struct kdbus_cmd_match - struct to add or remove matches
-+ * @size:		The total size of the struct
-+ * @flags:		Flags for match command (KDBUS_MATCH_*),
-+ *			userspace → kernel
-+ * @return_flags:	Command return flags, kernel → userspace
-+ * @cookie:		Userspace supplied cookie. When removing, the cookie
-+ *			identifies the match to remove
-+ * @items:		A list of items for additional information
-+ *
-+ * This structure is used with the KDBUS_CMD_MATCH_ADD and
-+ * KDBUS_CMD_MATCH_REMOVE ioctl.
-+ */
-+struct kdbus_cmd_match {
-+	__u64 size;
-+	__u64 flags;
-+	__u64 return_flags;
-+	__u64 cookie;
-+	struct kdbus_item items[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * enum kdbus_make_flags - Flags for KDBUS_CMD_{BUS,ENDPOINT}_MAKE
-+ * @KDBUS_MAKE_ACCESS_GROUP:	Make the bus or endpoint node group-accessible
-+ * @KDBUS_MAKE_ACCESS_WORLD:	Make the bus or endpoint node world-accessible
-+ */
-+enum kdbus_make_flags {
-+	KDBUS_MAKE_ACCESS_GROUP		= 1ULL <<  0,
-+	KDBUS_MAKE_ACCESS_WORLD		= 1ULL <<  1,
-+};
-+
-+/**
-+ * enum kdbus_name_flags - flags for KDBUS_CMD_NAME_ACQUIRE
-+ * @KDBUS_NAME_REPLACE_EXISTING:	Try to replace name of other connections
-+ * @KDBUS_NAME_ALLOW_REPLACEMENT:	Allow the replacement of the name
-+ * @KDBUS_NAME_QUEUE:			Name should be queued if busy
-+ * @KDBUS_NAME_IN_QUEUE:		Name is queued
-+ * @KDBUS_NAME_ACTIVATOR:		Name is owned by a activator connection
-+ * @KDBUS_NAME_PRIMARY:			Primary owner of the name
-+ * @KDBUS_NAME_ACQUIRED:		Name was acquired/queued _now_
-+ */
-+enum kdbus_name_flags {
-+	KDBUS_NAME_REPLACE_EXISTING	= 1ULL <<  0,
-+	KDBUS_NAME_ALLOW_REPLACEMENT	= 1ULL <<  1,
-+	KDBUS_NAME_QUEUE		= 1ULL <<  2,
-+	KDBUS_NAME_IN_QUEUE		= 1ULL <<  3,
-+	KDBUS_NAME_ACTIVATOR		= 1ULL <<  4,
-+	KDBUS_NAME_PRIMARY		= 1ULL <<  5,
-+	KDBUS_NAME_ACQUIRED		= 1ULL <<  6,
-+};
-+
-+/**
-+ * struct kdbus_cmd - generic ioctl payload
-+ * @size:		Overall size of this structure
-+ * @flags:		Flags for this ioctl, userspace → kernel
-+ * @return_flags:	Ioctl return flags, kernel → userspace
-+ * @items:		Additional items to modify the behavior
-+ *
-+ * This is a generic ioctl payload object. It's used by all ioctls that only
-+ * take flags and items as input.
-+ */
-+struct kdbus_cmd {
-+	__u64 size;
-+	__u64 flags;
-+	__u64 return_flags;
-+	struct kdbus_item items[0];
-+} __attribute__((__aligned__(8)));
-+
-+/**
-+ * Ioctl API
-+ *
-+ * KDBUS_CMD_BUS_MAKE:		After opening the "control" node, this command
-+ *				creates a new bus with the specified
-+ *				name. The bus is immediately shut down and
-+ *				cleaned up when the opened file descriptor is
-+ *				closed.
-+ *
-+ * KDBUS_CMD_ENDPOINT_MAKE:	Creates a new named special endpoint to talk to
-+ *				the bus. Such endpoints usually carry a more
-+ *				restrictive policy and grant restricted access
-+ *				to specific applications.
-+ * KDBUS_CMD_ENDPOINT_UPDATE:	Update the properties of a custom enpoint. Used
-+ *				to update the policy.
-+ *
-+ * KDBUS_CMD_HELLO:		By opening the bus node, a connection is
-+ *				created. After a HELLO the opened connection
-+ *				becomes an active peer on the bus.
-+ * KDBUS_CMD_UPDATE:		Update the properties of a connection. Used to
-+ *				update the metadata subscription mask and
-+ *				policy.
-+ * KDBUS_CMD_BYEBYE:		Disconnect a connection. If there are no
-+ *				messages queued up in the connection's pool,
-+ *				the call succeeds, and the handle is rendered
-+ *				unusable. Otherwise, -EBUSY is returned without
-+ *				any further side-effects.
-+ * KDBUS_CMD_FREE:		Release the allocated memory in the receiver's
-+ *				pool.
-+ * KDBUS_CMD_CONN_INFO:		Retrieve credentials and properties of the
-+ *				initial creator of the connection. The data was
-+ *				stored at registration time and does not
-+ *				necessarily represent the connected process or
-+ *				the actual state of the process.
-+ * KDBUS_CMD_BUS_CREATOR_INFO:	Retrieve information of the creator of the bus
-+ *				a connection is attached to.
-+ *
-+ * KDBUS_CMD_SEND:		Send a message and pass data from userspace to
-+ *				the kernel.
-+ * KDBUS_CMD_RECV:		Receive a message from the kernel which is
-+ *				placed in the receiver's pool.
-+ *
-+ * KDBUS_CMD_NAME_ACQUIRE:	Request a well-known bus name to associate with
-+ *				the connection. Well-known names are used to
-+ *				address a peer on the bus.
-+ * KDBUS_CMD_NAME_RELEASE:	Release a well-known name the connection
-+ *				currently owns.
-+ * KDBUS_CMD_LIST:		Retrieve the list of all currently registered
-+ *				well-known and unique names.
-+ *
-+ * KDBUS_CMD_MATCH_ADD:		Install a match which broadcast messages should
-+ *				be delivered to the connection.
-+ * KDBUS_CMD_MATCH_REMOVE:	Remove a current match for broadcast messages.
-+ */
-+enum kdbus_ioctl_type {
-+	/* bus owner (00-0f) */
-+	KDBUS_CMD_BUS_MAKE =		_IOW(KDBUS_IOCTL_MAGIC, 0x00,
-+					     struct kdbus_cmd),
-+
-+	/* endpoint owner (10-1f) */
-+	KDBUS_CMD_ENDPOINT_MAKE =	_IOW(KDBUS_IOCTL_MAGIC, 0x10,
-+					     struct kdbus_cmd),
-+	KDBUS_CMD_ENDPOINT_UPDATE =	_IOW(KDBUS_IOCTL_MAGIC, 0x11,
-+					     struct kdbus_cmd),
-+
-+	/* connection owner (80-ff) */
-+	KDBUS_CMD_HELLO =		_IOWR(KDBUS_IOCTL_MAGIC, 0x80,
-+					      struct kdbus_cmd_hello),
-+	KDBUS_CMD_UPDATE =		_IOW(KDBUS_IOCTL_MAGIC, 0x81,
-+					     struct kdbus_cmd),
-+	KDBUS_CMD_BYEBYE =		_IOW(KDBUS_IOCTL_MAGIC, 0x82,
-+					     struct kdbus_cmd),
-+	KDBUS_CMD_FREE =		_IOW(KDBUS_IOCTL_MAGIC, 0x83,
-+					     struct kdbus_cmd_free),
-+	KDBUS_CMD_CONN_INFO =		_IOR(KDBUS_IOCTL_MAGIC, 0x84,
-+					     struct kdbus_cmd_info),
-+	KDBUS_CMD_BUS_CREATOR_INFO =	_IOR(KDBUS_IOCTL_MAGIC, 0x85,
-+					     struct kdbus_cmd_info),
-+	KDBUS_CMD_LIST =		_IOR(KDBUS_IOCTL_MAGIC, 0x86,
-+					     struct kdbus_cmd_list),
-+
-+	KDBUS_CMD_SEND =		_IOW(KDBUS_IOCTL_MAGIC, 0x90,
-+					     struct kdbus_cmd_send),
-+	KDBUS_CMD_RECV =		_IOR(KDBUS_IOCTL_MAGIC, 0x91,
-+					     struct kdbus_cmd_recv),
-+
-+	KDBUS_CMD_NAME_ACQUIRE =	_IOW(KDBUS_IOCTL_MAGIC, 0xa0,
-+					     struct kdbus_cmd),
-+	KDBUS_CMD_NAME_RELEASE =	_IOW(KDBUS_IOCTL_MAGIC, 0xa1,
-+					     struct kdbus_cmd),
-+
-+	KDBUS_CMD_MATCH_ADD =		_IOW(KDBUS_IOCTL_MAGIC, 0xb0,
-+					     struct kdbus_cmd_match),
-+	KDBUS_CMD_MATCH_REMOVE =	_IOW(KDBUS_IOCTL_MAGIC, 0xb1,
-+					     struct kdbus_cmd_match),
-+};
-+
-+#endif /* _UAPI_KDBUS_H_ */
-diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
-index 7b1425a..ce2ac5a 100644
---- a/include/uapi/linux/magic.h
-+++ b/include/uapi/linux/magic.h
-@@ -76,4 +76,6 @@
- #define BTRFS_TEST_MAGIC	0x73727279
- #define NSFS_MAGIC		0x6e736673
- 
-+#define KDBUS_SUPER_MAGIC	0x44427573
-+
- #endif /* __LINUX_MAGIC_H__ */
-diff --git a/init/Kconfig b/init/Kconfig
-index dc24dec..9388071 100644
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -261,6 +261,19 @@ config POSIX_MQUEUE_SYSCTL
- 	depends on SYSCTL
- 	default y
- 
-+config KDBUS
-+	tristate "kdbus interprocess communication"
-+	depends on TMPFS
-+	help
-+	  D-Bus is a system for low-latency, low-overhead, easy to use
-+	  interprocess communication (IPC).
-+
-+	  See the man-pages and HTML files in Documentation/kdbus/
-+	  that are generated by 'make mandocs' and 'make htmldocs'.
-+
-+	  If you have an ordinary machine, select M here. The module
-+	  will be called kdbus.
-+
- config CROSS_MEMORY_ATTACH
- 	bool "Enable process_vm_readv/writev syscalls"
- 	depends on MMU
-diff --git a/ipc/Makefile b/ipc/Makefile
-index 86c7300..68ec416 100644
---- a/ipc/Makefile
-+++ b/ipc/Makefile
-@@ -9,4 +9,4 @@ obj_mq-$(CONFIG_COMPAT) += compat_mq.o
- obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y)
- obj-$(CONFIG_IPC_NS) += namespace.o
- obj-$(CONFIG_POSIX_MQUEUE_SYSCTL) += mq_sysctl.o
--
-+obj-$(CONFIG_KDBUS) += kdbus/
-diff --git a/ipc/kdbus/Makefile b/ipc/kdbus/Makefile
-new file mode 100644
-index 0000000..66663a1
---- /dev/null
-+++ b/ipc/kdbus/Makefile
-@@ -0,0 +1,33 @@
-+#
-+# By setting KDBUS_EXT=2, the kdbus module will be built as kdbus2.ko, and
-+# KBUILD_MODNAME=kdbus2. This has the effect that all exported objects have
-+# different names than usually (kdbus2fs, /sys/fs/kdbus2/) and you can run
-+# your test-infrastructure against the kdbus2.ko, while running your system
-+# on kdbus.ko.
-+#
-+# To just build the module, use:
-+#     make KDBUS_EXT=2 M=ipc/kdbus
-+#
-+
-+kdbus$(KDBUS_EXT)-y := \
-+	bus.o \
-+	connection.o \
-+	endpoint.o \
-+	fs.o \
-+	handle.o \
-+	item.o \
-+	main.o \
-+	match.o \
-+	message.o \
-+	metadata.o \
-+	names.o \
-+	node.o \
-+	notify.o \
-+	domain.o \
-+	policy.o \
-+	pool.o \
-+	reply.o \
-+	queue.o \
-+	util.o
-+
-+obj-$(CONFIG_KDBUS) += kdbus$(KDBUS_EXT).o
-diff --git a/ipc/kdbus/bus.c b/ipc/kdbus/bus.c
-new file mode 100644
-index 0000000..a67f825
---- /dev/null
-+++ b/ipc/kdbus/bus.c
-@@ -0,0 +1,514 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/fs.h>
-+#include <linux/hashtable.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/random.h>
-+#include <linux/sched.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+#include <linux/uio.h>
-+
-+#include "bus.h"
-+#include "notify.h"
-+#include "connection.h"
-+#include "domain.h"
-+#include "endpoint.h"
-+#include "handle.h"
-+#include "item.h"
-+#include "match.h"
-+#include "message.h"
-+#include "metadata.h"
-+#include "names.h"
-+#include "policy.h"
-+#include "util.h"
-+
-+static void kdbus_bus_free(struct kdbus_node *node)
-+{
-+	struct kdbus_bus *bus = container_of(node, struct kdbus_bus, node);
-+
-+	WARN_ON(!list_empty(&bus->monitors_list));
-+	WARN_ON(!hash_empty(bus->conn_hash));
-+
-+	kdbus_notify_free(bus);
-+
-+	kdbus_user_unref(bus->creator);
-+	kdbus_name_registry_free(bus->name_registry);
-+	kdbus_domain_unref(bus->domain);
-+	kdbus_policy_db_clear(&bus->policy_db);
-+	kdbus_meta_proc_unref(bus->creator_meta);
-+	kfree(bus);
-+}
-+
-+static void kdbus_bus_release(struct kdbus_node *node, bool was_active)
-+{
-+	struct kdbus_bus *bus = container_of(node, struct kdbus_bus, node);
-+
-+	if (was_active)
-+		atomic_dec(&bus->creator->buses);
-+}
-+
-+static struct kdbus_bus *kdbus_bus_new(struct kdbus_domain *domain,
-+				       const char *name,
-+				       struct kdbus_bloom_parameter *bloom,
-+				       const u64 *pattach_owner,
-+				       u64 flags, kuid_t uid, kgid_t gid)
-+{
-+	struct kdbus_bus *b;
-+	u64 attach_owner;
-+	int ret;
-+
-+	if (bloom->size < 8 || bloom->size > KDBUS_BUS_BLOOM_MAX_SIZE ||
-+	    !KDBUS_IS_ALIGNED8(bloom->size) || bloom->n_hash < 1)
-+		return ERR_PTR(-EINVAL);
-+
-+	ret = kdbus_sanitize_attach_flags(pattach_owner ? *pattach_owner : 0,
-+					  &attach_owner);
-+	if (ret < 0)
-+		return ERR_PTR(ret);
-+
-+	ret = kdbus_verify_uid_prefix(name, domain->user_namespace, uid);
-+	if (ret < 0)
-+		return ERR_PTR(ret);
-+
-+	b = kzalloc(sizeof(*b), GFP_KERNEL);
-+	if (!b)
-+		return ERR_PTR(-ENOMEM);
-+
-+	kdbus_node_init(&b->node, KDBUS_NODE_BUS);
-+
-+	b->node.free_cb = kdbus_bus_free;
-+	b->node.release_cb = kdbus_bus_release;
-+	b->node.uid = uid;
-+	b->node.gid = gid;
-+	b->node.mode = S_IRUSR | S_IXUSR;
-+
-+	if (flags & (KDBUS_MAKE_ACCESS_GROUP | KDBUS_MAKE_ACCESS_WORLD))
-+		b->node.mode |= S_IRGRP | S_IXGRP;
-+	if (flags & KDBUS_MAKE_ACCESS_WORLD)
-+		b->node.mode |= S_IROTH | S_IXOTH;
-+
-+	b->id = atomic64_inc_return(&domain->last_id);
-+	b->bus_flags = flags;
-+	b->attach_flags_owner = attach_owner;
-+	generate_random_uuid(b->id128);
-+	b->bloom = *bloom;
-+	b->domain = kdbus_domain_ref(domain);
-+
-+	kdbus_policy_db_init(&b->policy_db);
-+
-+	init_rwsem(&b->conn_rwlock);
-+	hash_init(b->conn_hash);
-+	INIT_LIST_HEAD(&b->monitors_list);
-+
-+	INIT_LIST_HEAD(&b->notify_list);
-+	spin_lock_init(&b->notify_lock);
-+	mutex_init(&b->notify_flush_lock);
-+
-+	ret = kdbus_node_link(&b->node, &domain->node, name);
-+	if (ret < 0)
-+		goto exit_unref;
-+
-+	/* cache the metadata/credentials of the creator */
-+	b->creator_meta = kdbus_meta_proc_new();
-+	if (IS_ERR(b->creator_meta)) {
-+		ret = PTR_ERR(b->creator_meta);
-+		b->creator_meta = NULL;
-+		goto exit_unref;
-+	}
-+
-+	ret = kdbus_meta_proc_collect(b->creator_meta,
-+				      KDBUS_ATTACH_CREDS |
-+				      KDBUS_ATTACH_PIDS |
-+				      KDBUS_ATTACH_AUXGROUPS |
-+				      KDBUS_ATTACH_TID_COMM |
-+				      KDBUS_ATTACH_PID_COMM |
-+				      KDBUS_ATTACH_EXE |
-+				      KDBUS_ATTACH_CMDLINE |
-+				      KDBUS_ATTACH_CGROUP |
-+				      KDBUS_ATTACH_CAPS |
-+				      KDBUS_ATTACH_SECLABEL |
-+				      KDBUS_ATTACH_AUDIT);
-+	if (ret < 0)
-+		goto exit_unref;
-+
-+	b->name_registry = kdbus_name_registry_new();
-+	if (IS_ERR(b->name_registry)) {
-+		ret = PTR_ERR(b->name_registry);
-+		b->name_registry = NULL;
-+		goto exit_unref;
-+	}
-+
-+	/*
-+	 * Bus-limits of the creator are accounted on its real UID, just like
-+	 * all other per-user limits.
-+	 */
-+	b->creator = kdbus_user_lookup(domain, current_uid());
-+	if (IS_ERR(b->creator)) {
-+		ret = PTR_ERR(b->creator);
-+		b->creator = NULL;
-+		goto exit_unref;
-+	}
-+
-+	return b;
-+
-+exit_unref:
-+	kdbus_node_deactivate(&b->node);
-+	kdbus_node_unref(&b->node);
-+	return ERR_PTR(ret);
-+}
-+
-+/**
-+ * kdbus_bus_ref() - increase the reference counter of a kdbus_bus
-+ * @bus:		The bus to reference
-+ *
-+ * Every user of a bus, except for its creator, must add a reference to the
-+ * kdbus_bus using this function.
-+ *
-+ * Return: the bus itself
-+ */
-+struct kdbus_bus *kdbus_bus_ref(struct kdbus_bus *bus)
-+{
-+	if (bus)
-+		kdbus_node_ref(&bus->node);
-+	return bus;
-+}
-+
-+/**
-+ * kdbus_bus_unref() - decrease the reference counter of a kdbus_bus
-+ * @bus:		The bus to unref
-+ *
-+ * Release a reference. If the reference count drops to 0, the bus will be
-+ * freed.
-+ *
-+ * Return: NULL
-+ */
-+struct kdbus_bus *kdbus_bus_unref(struct kdbus_bus *bus)
-+{
-+	if (bus)
-+		kdbus_node_unref(&bus->node);
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_bus_find_conn_by_id() - find a connection with a given id
-+ * @bus:		The bus to look for the connection
-+ * @id:			The 64-bit connection id
-+ *
-+ * Looks up a connection with a given id. The returned connection
-+ * is ref'ed, and needs to be unref'ed by the user. Returns NULL if
-+ * the connection can't be found.
-+ */
-+struct kdbus_conn *kdbus_bus_find_conn_by_id(struct kdbus_bus *bus, u64 id)
-+{
-+	struct kdbus_conn *conn, *found = NULL;
-+
-+	down_read(&bus->conn_rwlock);
-+	hash_for_each_possible(bus->conn_hash, conn, hentry, id)
-+		if (conn->id == id) {
-+			found = kdbus_conn_ref(conn);
-+			break;
-+		}
-+	up_read(&bus->conn_rwlock);
-+
-+	return found;
-+}
-+
-+/**
-+ * kdbus_bus_broadcast() - send a message to all subscribed connections
-+ * @bus:	The bus the connections are connected to
-+ * @conn_src:	The source connection, may be %NULL for kernel notifications
-+ * @staging:	Staging object containing the message to send
-+ *
-+ * Send message to all connections that are currently active on the bus.
-+ * Connections must still have matches installed in order to let the message
-+ * pass.
-+ *
-+ * The caller must hold the name-registry lock of @bus.
-+ */
-+void kdbus_bus_broadcast(struct kdbus_bus *bus,
-+			 struct kdbus_conn *conn_src,
-+			 struct kdbus_staging *staging)
-+{
-+	struct kdbus_conn *conn_dst;
-+	unsigned int i;
-+	int ret;
-+
-+	lockdep_assert_held(&bus->name_registry->rwlock);
-+
-+	/*
-+	 * Make sure broadcast are queued on monitors before we send it out to
-+	 * anyone else. Otherwise, connections might react to broadcasts before
-+	 * the monitor gets the broadcast queued. In the worst case, the
-+	 * monitor sees a reaction to the broadcast before the broadcast itself.
-+	 * We don't give ordering guarantees across connections (and monitors
-+	 * can re-construct order via sequence numbers), but we should at least
-+	 * try to avoid re-ordering for monitors.
-+	 */
-+	kdbus_bus_eavesdrop(bus, conn_src, staging);
-+
-+	down_read(&bus->conn_rwlock);
-+	hash_for_each(bus->conn_hash, i, conn_dst, hentry) {
-+		if (!kdbus_conn_is_ordinary(conn_dst))
-+			continue;
-+
-+		/*
-+		 * Check if there is a match for the kmsg object in
-+		 * the destination connection match db
-+		 */
-+		if (!kdbus_match_db_match_msg(conn_dst->match_db, conn_src,
-+					      staging))
-+			continue;
-+
-+		if (conn_src) {
-+			/*
-+			 * Anyone can send broadcasts, as they have no
-+			 * destination. But a receiver needs TALK access to
-+			 * the sender in order to receive broadcasts.
-+			 */
-+			if (!kdbus_conn_policy_talk(conn_dst, NULL, conn_src))
-+				continue;
-+		} else {
-+			/*
-+			 * Check if there is a policy db that prevents the
-+			 * destination connection from receiving this kernel
-+			 * notification
-+			 */
-+			if (!kdbus_conn_policy_see_notification(conn_dst, NULL,
-+								staging->msg))
-+				continue;
-+		}
-+
-+		ret = kdbus_conn_entry_insert(conn_src, conn_dst, staging,
-+					      NULL, NULL);
-+		if (ret < 0)
-+			kdbus_conn_lost_message(conn_dst);
-+	}
-+	up_read(&bus->conn_rwlock);
-+}
-+
-+/**
-+ * kdbus_bus_eavesdrop() - send a message to all subscribed monitors
-+ * @bus:	The bus the monitors are connected to
-+ * @conn_src:	The source connection, may be %NULL for kernel notifications
-+ * @staging:	Staging object containing the message to send
-+ *
-+ * Send message to all monitors that are currently active on the bus. Monitors
-+ * must still have matches installed in order to let the message pass.
-+ *
-+ * The caller must hold the name-registry lock of @bus.
-+ */
-+void kdbus_bus_eavesdrop(struct kdbus_bus *bus,
-+			 struct kdbus_conn *conn_src,
-+			 struct kdbus_staging *staging)
-+{
-+	struct kdbus_conn *conn_dst;
-+	int ret;
-+
-+	/*
-+	 * Monitor connections get all messages; ignore possible errors
-+	 * when sending messages to monitor connections.
-+	 */
-+
-+	lockdep_assert_held(&bus->name_registry->rwlock);
-+
-+	down_read(&bus->conn_rwlock);
-+	list_for_each_entry(conn_dst, &bus->monitors_list, monitor_entry) {
-+		ret = kdbus_conn_entry_insert(conn_src, conn_dst, staging,
-+					      NULL, NULL);
-+		if (ret < 0)
-+			kdbus_conn_lost_message(conn_dst);
-+	}
-+	up_read(&bus->conn_rwlock);
-+}
-+
-+/**
-+ * kdbus_cmd_bus_make() - handle KDBUS_CMD_BUS_MAKE
-+ * @domain:		domain to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: NULL or newly created bus on success, ERR_PTR on failure.
-+ */
-+struct kdbus_bus *kdbus_cmd_bus_make(struct kdbus_domain *domain,
-+				     void __user *argp)
-+{
-+	struct kdbus_bus *bus = NULL;
-+	struct kdbus_cmd *cmd;
-+	struct kdbus_ep *ep = NULL;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_MAKE_NAME, .mandatory = true },
-+		{ .type = KDBUS_ITEM_BLOOM_PARAMETER, .mandatory = true },
-+		{ .type = KDBUS_ITEM_ATTACH_FLAGS_SEND },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
-+				 KDBUS_MAKE_ACCESS_GROUP |
-+				 KDBUS_MAKE_ACCESS_WORLD,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret < 0)
-+		return ERR_PTR(ret);
-+	if (ret > 0)
-+		return NULL;
-+
-+	bus = kdbus_bus_new(domain,
-+			    argv[1].item->str, &argv[2].item->bloom_parameter,
-+			    argv[3].item ? argv[3].item->data64 : NULL,
-+			    cmd->flags, current_euid(), current_egid());
-+	if (IS_ERR(bus)) {
-+		ret = PTR_ERR(bus);
-+		bus = NULL;
-+		goto exit;
-+	}
-+
-+	if (atomic_inc_return(&bus->creator->buses) > KDBUS_USER_MAX_BUSES) {
-+		atomic_dec(&bus->creator->buses);
-+		ret = -EMFILE;
-+		goto exit;
-+	}
-+
-+	if (!kdbus_node_activate(&bus->node)) {
-+		atomic_dec(&bus->creator->buses);
-+		ret = -ESHUTDOWN;
-+		goto exit;
-+	}
-+
-+	ep = kdbus_ep_new(bus, "bus", cmd->flags, bus->node.uid, bus->node.gid,
-+			  false);
-+	if (IS_ERR(ep)) {
-+		ret = PTR_ERR(ep);
-+		ep = NULL;
-+		goto exit;
-+	}
-+
-+	if (!kdbus_node_activate(&ep->node)) {
-+		ret = -ESHUTDOWN;
-+		goto exit;
-+	}
-+
-+	/*
-+	 * Drop our own reference, effectively causing the endpoint to be
-+	 * deactivated and released when the parent bus is.
-+	 */
-+	ep = kdbus_ep_unref(ep);
-+
-+exit:
-+	ret = kdbus_args_clear(&args, ret);
-+	if (ret < 0) {
-+		if (ep) {
-+			kdbus_node_deactivate(&ep->node);
-+			kdbus_ep_unref(ep);
-+		}
-+		if (bus) {
-+			kdbus_node_deactivate(&bus->node);
-+			kdbus_bus_unref(bus);
-+		}
-+		return ERR_PTR(ret);
-+	}
-+	return bus;
-+}
-+
-+/**
-+ * kdbus_cmd_bus_creator_info() - handle KDBUS_CMD_BUS_CREATOR_INFO
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_bus_creator_info(struct kdbus_conn *conn, void __user *argp)
-+{
-+	struct kdbus_cmd_info *cmd;
-+	struct kdbus_bus *bus = conn->ep->bus;
-+	struct kdbus_pool_slice *slice = NULL;
-+	struct kdbus_item *meta_items = NULL;
-+	struct kdbus_item_header item_hdr;
-+	struct kdbus_info info = {};
-+	size_t meta_size, name_len, cnt = 0;
-+	struct kvec kvec[6];
-+	u64 attach_flags, size = 0;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	ret = kdbus_sanitize_attach_flags(cmd->attach_flags, &attach_flags);
-+	if (ret < 0)
-+		goto exit;
-+
-+	attach_flags &= bus->attach_flags_owner;
-+
-+	ret = kdbus_meta_emit(bus->creator_meta, NULL, NULL, conn,
-+			      attach_flags, &meta_items, &meta_size);
-+	if (ret < 0)
-+		goto exit;
-+
-+	name_len = strlen(bus->node.name) + 1;
-+	info.id = bus->id;
-+	info.flags = bus->bus_flags;
-+	item_hdr.type = KDBUS_ITEM_MAKE_NAME;
-+	item_hdr.size = KDBUS_ITEM_HEADER_SIZE + name_len;
-+
-+	kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &size);
-+	kdbus_kvec_set(&kvec[cnt++], &item_hdr, sizeof(item_hdr), &size);
-+	kdbus_kvec_set(&kvec[cnt++], bus->node.name, name_len, &size);
-+	cnt += !!kdbus_kvec_pad(&kvec[cnt], &size);
-+	if (meta_size > 0) {
-+		kdbus_kvec_set(&kvec[cnt++], meta_items, meta_size, &size);
-+		cnt += !!kdbus_kvec_pad(&kvec[cnt], &size);
-+	}
-+
-+	info.size = size;
-+
-+	slice = kdbus_pool_slice_alloc(conn->pool, size, false);
-+	if (IS_ERR(slice)) {
-+		ret = PTR_ERR(slice);
-+		slice = NULL;
-+		goto exit;
-+	}
-+
-+	ret = kdbus_pool_slice_copy_kvec(slice, 0, kvec, cnt, size);
-+	if (ret < 0)
-+		goto exit;
-+
-+	kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->info_size);
-+
-+	if (kdbus_member_set_user(&cmd->offset, argp, typeof(*cmd), offset) ||
-+	    kdbus_member_set_user(&cmd->info_size, argp,
-+				  typeof(*cmd), info_size))
-+		ret = -EFAULT;
-+
-+exit:
-+	kdbus_pool_slice_release(slice);
-+	kfree(meta_items);
-+	return kdbus_args_clear(&args, ret);
-+}
-diff --git a/ipc/kdbus/bus.h b/ipc/kdbus/bus.h
-new file mode 100644
-index 0000000..8c2acae
---- /dev/null
-+++ b/ipc/kdbus/bus.h
-@@ -0,0 +1,101 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_BUS_H
-+#define __KDBUS_BUS_H
-+
-+#include <linux/hashtable.h>
-+#include <linux/list.h>
-+#include <linux/mutex.h>
-+#include <linux/rwsem.h>
-+#include <linux/spinlock.h>
-+#include <uapi/linux/kdbus.h>
-+
-+#include "metadata.h"
-+#include "names.h"
-+#include "node.h"
-+#include "policy.h"
-+
-+struct kdbus_conn;
-+struct kdbus_domain;
-+struct kdbus_staging;
-+struct kdbus_user;
-+
-+/**
-+ * struct kdbus_bus - bus in a domain
-+ * @node:		kdbus_node
-+ * @id:			ID of this bus in the domain
-+ * @bus_flags:		Simple pass-through flags from userspace to userspace
-+ * @attach_flags_owner:	KDBUS_ATTACH_* flags of bus creator that other
-+ *			connections can see or query
-+ * @id128:		Unique random 128 bit ID of this bus
-+ * @bloom:		Bloom parameters
-+ * @domain:		Domain of this bus
-+ * @creator:		Creator of the bus
-+ * @creator_meta:	Meta information about the bus creator
-+ * @last_message_id:	Last used message id
-+ * @policy_db:		Policy database for this bus
-+ * @name_registry:	Name registry of this bus
-+ * @conn_rwlock:	Read/Write lock for all lists of child connections
-+ * @conn_hash:		Map of connection IDs
-+ * @monitors_list:	Connections that monitor this bus
-+ * @notify_list:	List of pending kernel-generated messages
-+ * @notify_lock:	Notification list lock
-+ * @notify_flush_lock:	Notification flushing lock
-+ */
-+struct kdbus_bus {
-+	struct kdbus_node node;
-+
-+	/* static */
-+	u64 id;
-+	u64 bus_flags;
-+	u64 attach_flags_owner;
-+	u8 id128[16];
-+	struct kdbus_bloom_parameter bloom;
-+	struct kdbus_domain *domain;
-+	struct kdbus_user *creator;
-+	struct kdbus_meta_proc *creator_meta;
-+
-+	/* protected by own locks */
-+	atomic64_t last_message_id;
-+	struct kdbus_policy_db policy_db;
-+	struct kdbus_name_registry *name_registry;
-+
-+	/* protected by conn_rwlock */
-+	struct rw_semaphore conn_rwlock;
-+	DECLARE_HASHTABLE(conn_hash, 8);
-+	struct list_head monitors_list;
-+
-+	/* protected by notify_lock */
-+	struct list_head notify_list;
-+	spinlock_t notify_lock;
-+	struct mutex notify_flush_lock;
-+};
-+
-+struct kdbus_bus *kdbus_bus_ref(struct kdbus_bus *bus);
-+struct kdbus_bus *kdbus_bus_unref(struct kdbus_bus *bus);
-+
-+struct kdbus_conn *kdbus_bus_find_conn_by_id(struct kdbus_bus *bus, u64 id);
-+void kdbus_bus_broadcast(struct kdbus_bus *bus,
-+			 struct kdbus_conn *conn_src,
-+			 struct kdbus_staging *staging);
-+void kdbus_bus_eavesdrop(struct kdbus_bus *bus,
-+			 struct kdbus_conn *conn_src,
-+			 struct kdbus_staging *staging);
-+
-+struct kdbus_bus *kdbus_cmd_bus_make(struct kdbus_domain *domain,
-+				     void __user *argp);
-+int kdbus_cmd_bus_creator_info(struct kdbus_conn *conn, void __user *argp);
-+
-+#endif
-diff --git a/ipc/kdbus/connection.c b/ipc/kdbus/connection.c
-new file mode 100644
-index 0000000..ef63d65
---- /dev/null
-+++ b/ipc/kdbus/connection.c
-@@ -0,0 +1,2227 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/audit.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/fs_struct.h>
-+#include <linux/hashtable.h>
-+#include <linux/idr.h>
-+#include <linux/init.h>
-+#include <linux/math64.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <linux/path.h>
-+#include <linux/poll.h>
-+#include <linux/sched.h>
-+#include <linux/shmem_fs.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/syscalls.h>
-+#include <linux/uio.h>
-+
-+#include "bus.h"
-+#include "connection.h"
-+#include "endpoint.h"
-+#include "handle.h"
-+#include "match.h"
-+#include "message.h"
-+#include "metadata.h"
-+#include "names.h"
-+#include "domain.h"
-+#include "item.h"
-+#include "notify.h"
-+#include "policy.h"
-+#include "pool.h"
-+#include "reply.h"
-+#include "util.h"
-+#include "queue.h"
-+
-+#define KDBUS_CONN_ACTIVE_BIAS	(INT_MIN + 2)
-+#define KDBUS_CONN_ACTIVE_NEW	(INT_MIN + 1)
-+
-+static struct kdbus_conn *kdbus_conn_new(struct kdbus_ep *ep,
-+					 struct file *file,
-+					 struct kdbus_cmd_hello *hello,
-+					 const char *name,
-+					 const struct kdbus_creds *creds,
-+					 const struct kdbus_pids *pids,
-+					 const char *seclabel,
-+					 const char *conn_description)
-+{
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+	static struct lock_class_key __key;
-+#endif
-+	struct kdbus_pool_slice *slice = NULL;
-+	struct kdbus_bus *bus = ep->bus;
-+	struct kdbus_conn *conn;
-+	u64 attach_flags_send;
-+	u64 attach_flags_recv;
-+	u64 items_size = 0;
-+	bool is_policy_holder;
-+	bool is_activator;
-+	bool is_monitor;
-+	bool privileged;
-+	bool owner;
-+	struct kvec kvec;
-+	int ret;
-+
-+	struct {
-+		u64 size;
-+		u64 type;
-+		struct kdbus_bloom_parameter bloom;
-+	} bloom_item;
-+
-+	privileged = kdbus_ep_is_privileged(ep, file);
-+	owner = kdbus_ep_is_owner(ep, file);
-+
-+	is_monitor = hello->flags & KDBUS_HELLO_MONITOR;
-+	is_activator = hello->flags & KDBUS_HELLO_ACTIVATOR;
-+	is_policy_holder = hello->flags & KDBUS_HELLO_POLICY_HOLDER;
-+
-+	if (!hello->pool_size || !IS_ALIGNED(hello->pool_size, PAGE_SIZE))
-+		return ERR_PTR(-EINVAL);
-+	if (is_monitor + is_activator + is_policy_holder > 1)
-+		return ERR_PTR(-EINVAL);
-+	if (name && !is_activator && !is_policy_holder)
-+		return ERR_PTR(-EINVAL);
-+	if (!name && (is_activator || is_policy_holder))
-+		return ERR_PTR(-EINVAL);
-+	if (name && !kdbus_name_is_valid(name, true))
-+		return ERR_PTR(-EINVAL);
-+	if (is_monitor && ep->user)
-+		return ERR_PTR(-EOPNOTSUPP);
-+	if (!owner && (is_activator || is_policy_holder || is_monitor))
-+		return ERR_PTR(-EPERM);
-+	if (!owner && (creds || pids || seclabel))
-+		return ERR_PTR(-EPERM);
-+
-+	ret = kdbus_sanitize_attach_flags(hello->attach_flags_send,
-+					  &attach_flags_send);
-+	if (ret < 0)
-+		return ERR_PTR(ret);
-+
-+	ret = kdbus_sanitize_attach_flags(hello->attach_flags_recv,
-+					  &attach_flags_recv);
-+	if (ret < 0)
-+		return ERR_PTR(ret);
-+
-+	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
-+	if (!conn)
-+		return ERR_PTR(-ENOMEM);
-+
-+	kref_init(&conn->kref);
-+	atomic_set(&conn->active, KDBUS_CONN_ACTIVE_NEW);
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+	lockdep_init_map(&conn->dep_map, "s_active", &__key, 0);
-+#endif
-+	mutex_init(&conn->lock);
-+	INIT_LIST_HEAD(&conn->names_list);
-+	INIT_LIST_HEAD(&conn->reply_list);
-+	atomic_set(&conn->request_count, 0);
-+	atomic_set(&conn->lost_count, 0);
-+	INIT_DELAYED_WORK(&conn->work, kdbus_reply_list_scan_work);
-+	conn->cred = get_cred(file->f_cred);
-+	conn->pid = get_pid(task_pid(current));
-+	get_fs_root(current->fs, &conn->root_path);
-+	init_waitqueue_head(&conn->wait);
-+	kdbus_queue_init(&conn->queue);
-+	conn->privileged = privileged;
-+	conn->owner = owner;
-+	conn->ep = kdbus_ep_ref(ep);
-+	conn->id = atomic64_inc_return(&bus->domain->last_id);
-+	conn->flags = hello->flags;
-+	atomic64_set(&conn->attach_flags_send, attach_flags_send);
-+	atomic64_set(&conn->attach_flags_recv, attach_flags_recv);
-+	INIT_LIST_HEAD(&conn->monitor_entry);
-+
-+	if (conn_description) {
-+		conn->description = kstrdup(conn_description, GFP_KERNEL);
-+		if (!conn->description) {
-+			ret = -ENOMEM;
-+			goto exit_unref;
-+		}
-+	}
-+
-+	conn->pool = kdbus_pool_new(conn->description, hello->pool_size);
-+	if (IS_ERR(conn->pool)) {
-+		ret = PTR_ERR(conn->pool);
-+		conn->pool = NULL;
-+		goto exit_unref;
-+	}
-+
-+	conn->match_db = kdbus_match_db_new();
-+	if (IS_ERR(conn->match_db)) {
-+		ret = PTR_ERR(conn->match_db);
-+		conn->match_db = NULL;
-+		goto exit_unref;
-+	}
-+
-+	/* return properties of this connection to the caller */
-+	hello->bus_flags = bus->bus_flags;
-+	hello->id = conn->id;
-+
-+	BUILD_BUG_ON(sizeof(bus->id128) != sizeof(hello->id128));
-+	memcpy(hello->id128, bus->id128, sizeof(hello->id128));
-+
-+	/* privileged processes can impersonate somebody else */
-+	if (creds || pids || seclabel) {
-+		conn->meta_fake = kdbus_meta_fake_new();
-+		if (IS_ERR(conn->meta_fake)) {
-+			ret = PTR_ERR(conn->meta_fake);
-+			conn->meta_fake = NULL;
-+			goto exit_unref;
-+		}
-+
-+		ret = kdbus_meta_fake_collect(conn->meta_fake,
-+					      creds, pids, seclabel);
-+		if (ret < 0)
-+			goto exit_unref;
-+	} else {
-+		conn->meta_proc = kdbus_meta_proc_new();
-+		if (IS_ERR(conn->meta_proc)) {
-+			ret = PTR_ERR(conn->meta_proc);
-+			conn->meta_proc = NULL;
-+			goto exit_unref;
-+		}
-+
-+		ret = kdbus_meta_proc_collect(conn->meta_proc,
-+					      KDBUS_ATTACH_CREDS |
-+					      KDBUS_ATTACH_PIDS |
-+					      KDBUS_ATTACH_AUXGROUPS |
-+					      KDBUS_ATTACH_TID_COMM |
-+					      KDBUS_ATTACH_PID_COMM |
-+					      KDBUS_ATTACH_EXE |
-+					      KDBUS_ATTACH_CMDLINE |
-+					      KDBUS_ATTACH_CGROUP |
-+					      KDBUS_ATTACH_CAPS |
-+					      KDBUS_ATTACH_SECLABEL |
-+					      KDBUS_ATTACH_AUDIT);
-+		if (ret < 0)
-+			goto exit_unref;
-+	}
-+
-+	/*
-+	 * Account the connection against the current user (UID), or for
-+	 * custom endpoints use the anonymous user assigned to the endpoint.
-+	 * Note that limits are always accounted against the real UID, not
-+	 * the effective UID (cred->user always points to the accounting of
-+	 * cred->uid, not cred->euid).
-+	 * In case the caller is privileged, we allow changing the accounting
-+	 * to the faked user.
-+	 */
-+	if (ep->user) {
-+		conn->user = kdbus_user_ref(ep->user);
-+	} else {
-+		kuid_t uid;
-+
-+		if (conn->meta_fake && uid_valid(conn->meta_fake->uid) &&
-+		    conn->privileged)
-+			uid = conn->meta_fake->uid;
-+		else
-+			uid = conn->cred->uid;
-+
-+		conn->user = kdbus_user_lookup(ep->bus->domain, uid);
-+		if (IS_ERR(conn->user)) {
-+			ret = PTR_ERR(conn->user);
-+			conn->user = NULL;
-+			goto exit_unref;
-+		}
-+	}
-+
-+	if (atomic_inc_return(&conn->user->connections) > KDBUS_USER_MAX_CONN) {
-+		/* decremented by destructor as conn->user is valid */
-+		ret = -EMFILE;
-+		goto exit_unref;
-+	}
-+
-+	bloom_item.size = sizeof(bloom_item);
-+	bloom_item.type = KDBUS_ITEM_BLOOM_PARAMETER;
-+	bloom_item.bloom = bus->bloom;
-+	kdbus_kvec_set(&kvec, &bloom_item, bloom_item.size, &items_size);
-+
-+	slice = kdbus_pool_slice_alloc(conn->pool, items_size, false);
-+	if (IS_ERR(slice)) {
-+		ret = PTR_ERR(slice);
-+		slice = NULL;
-+		goto exit_unref;
-+	}
-+
-+	ret = kdbus_pool_slice_copy_kvec(slice, 0, &kvec, 1, items_size);
-+	if (ret < 0)
-+		goto exit_unref;
-+
-+	kdbus_pool_slice_publish(slice, &hello->offset, &hello->items_size);
-+	kdbus_pool_slice_release(slice);
-+
-+	return conn;
-+
-+exit_unref:
-+	kdbus_pool_slice_release(slice);
-+	kdbus_conn_unref(conn);
-+	return ERR_PTR(ret);
-+}
-+
-+static void __kdbus_conn_free(struct kref *kref)
-+{
-+	struct kdbus_conn *conn = container_of(kref, struct kdbus_conn, kref);
-+
-+	WARN_ON(kdbus_conn_active(conn));
-+	WARN_ON(delayed_work_pending(&conn->work));
-+	WARN_ON(!list_empty(&conn->queue.msg_list));
-+	WARN_ON(!list_empty(&conn->names_list));
-+	WARN_ON(!list_empty(&conn->reply_list));
-+
-+	if (conn->user) {
-+		atomic_dec(&conn->user->connections);
-+		kdbus_user_unref(conn->user);
-+	}
-+
-+	kdbus_meta_fake_free(conn->meta_fake);
-+	kdbus_meta_proc_unref(conn->meta_proc);
-+	kdbus_match_db_free(conn->match_db);
-+	kdbus_pool_free(conn->pool);
-+	kdbus_ep_unref(conn->ep);
-+	path_put(&conn->root_path);
-+	put_pid(conn->pid);
-+	put_cred(conn->cred);
-+	kfree(conn->description);
-+	kfree(conn->quota);
-+	kfree(conn);
-+}
-+
-+/**
-+ * kdbus_conn_ref() - take a connection reference
-+ * @conn:		Connection, may be %NULL
-+ *
-+ * Return: the connection itself
-+ */
-+struct kdbus_conn *kdbus_conn_ref(struct kdbus_conn *conn)
-+{
-+	if (conn)
-+		kref_get(&conn->kref);
-+	return conn;
-+}
-+
-+/**
-+ * kdbus_conn_unref() - drop a connection reference
-+ * @conn:		Connection (may be NULL)
-+ *
-+ * When the last reference is dropped, the connection's internal structure
-+ * is freed.
-+ *
-+ * Return: NULL
-+ */
-+struct kdbus_conn *kdbus_conn_unref(struct kdbus_conn *conn)
-+{
-+	if (conn)
-+		kref_put(&conn->kref, __kdbus_conn_free);
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_conn_active() - connection is not disconnected
-+ * @conn:		Connection to check
-+ *
-+ * Return true if the connection was not disconnected, yet. Note that a
-+ * connection might be disconnected asynchronously, unless you hold the
-+ * connection lock. If that's not suitable for you, see kdbus_conn_acquire() to
-+ * suppress connection shutdown for a short period.
-+ *
-+ * Return: true if the connection is still active
-+ */
-+bool kdbus_conn_active(const struct kdbus_conn *conn)
-+{
-+	return atomic_read(&conn->active) >= 0;
-+}
-+
-+/**
-+ * kdbus_conn_acquire() - acquire an active connection reference
-+ * @conn:		Connection
-+ *
-+ * Users can close a connection via KDBUS_BYEBYE (or by destroying the
-+ * endpoint/bus/...) at any time. Whenever this happens, we should deny any
-+ * user-visible action on this connection and signal ECONNRESET instead.
-+ * To avoid testing for connection availability everytime you take the
-+ * connection-lock, you can acquire a connection for short periods.
-+ *
-+ * By calling kdbus_conn_acquire(), you gain an "active reference" to the
-+ * connection. You must also hold a regular reference at any time! As long as
-+ * you hold the active-ref, the connection will not be shut down. However, if
-+ * the connection was shut down, you can never acquire an active-ref again.
-+ *
-+ * kdbus_conn_disconnect() disables the connection and then waits for all active
-+ * references to be dropped. It will also wake up any pending operation.
-+ * However, you must not sleep for an indefinite period while holding an
-+ * active-reference. Otherwise, kdbus_conn_disconnect() might stall. If you need
-+ * to sleep for an indefinite period, either release the reference and try to
-+ * acquire it again after waking up, or make kdbus_conn_disconnect() wake up
-+ * your wait-queue.
-+ *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_conn_acquire(struct kdbus_conn *conn)
-+{
-+	if (!atomic_inc_unless_negative(&conn->active))
-+		return -ECONNRESET;
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+	rwsem_acquire_read(&conn->dep_map, 0, 1, _RET_IP_);
-+#endif
-+
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_conn_release() - release an active connection reference
-+ * @conn:		Connection
-+ *
-+ * This releases an active reference that has been acquired via
-+ * kdbus_conn_acquire(). If the connection was already disabled and this is the
-+ * last active-ref that is dropped, the disconnect-waiter will be woken up and
-+ * properly close the connection.
-+ */
-+void kdbus_conn_release(struct kdbus_conn *conn)
-+{
-+	int v;
-+
-+	if (!conn)
-+		return;
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+	rwsem_release(&conn->dep_map, 1, _RET_IP_);
-+#endif
-+
-+	v = atomic_dec_return(&conn->active);
-+	if (v != KDBUS_CONN_ACTIVE_BIAS)
-+		return;
-+
-+	wake_up_all(&conn->wait);
-+}
-+
-+static int kdbus_conn_connect(struct kdbus_conn *conn, const char *name)
-+{
-+	struct kdbus_ep *ep = conn->ep;
-+	struct kdbus_bus *bus = ep->bus;
-+	int ret;
-+
-+	if (WARN_ON(atomic_read(&conn->active) != KDBUS_CONN_ACTIVE_NEW))
-+		return -EALREADY;
-+
-+	/* make sure the ep-node is active while we add our connection */
-+	if (!kdbus_node_acquire(&ep->node))
-+		return -ESHUTDOWN;
-+
-+	/* lock order: domain -> bus -> ep -> names -> conn */
-+	mutex_lock(&ep->lock);
-+	down_write(&bus->conn_rwlock);
-+
-+	/* link into monitor list */
-+	if (kdbus_conn_is_monitor(conn))
-+		list_add_tail(&conn->monitor_entry, &bus->monitors_list);
-+
-+	/* link into bus and endpoint */
-+	list_add_tail(&conn->ep_entry, &ep->conn_list);
-+	hash_add(bus->conn_hash, &conn->hentry, conn->id);
-+
-+	/* enable lookups and acquire active ref */
-+	atomic_set(&conn->active, 1);
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+	rwsem_acquire_read(&conn->dep_map, 0, 1, _RET_IP_);
-+#endif
-+
-+	up_write(&bus->conn_rwlock);
-+	mutex_unlock(&ep->lock);
-+
-+	kdbus_node_release(&ep->node);
-+
-+	/*
-+	 * Notify subscribers about the new active connection, unless it is
-+	 * a monitor. Monitors are invisible on the bus, can't be addressed
-+	 * directly, and won't cause any notifications.
-+	 */
-+	if (!kdbus_conn_is_monitor(conn)) {
-+		ret = kdbus_notify_id_change(bus, KDBUS_ITEM_ID_ADD,
-+					     conn->id, conn->flags);
-+		if (ret < 0)
-+			goto exit_disconnect;
-+	}
-+
-+	if (kdbus_conn_is_activator(conn)) {
-+		u64 flags = KDBUS_NAME_ACTIVATOR;
-+
-+		if (WARN_ON(!name)) {
-+			ret = -EINVAL;
-+			goto exit_disconnect;
-+		}
-+
-+		ret = kdbus_name_acquire(bus->name_registry, conn, name,
-+					 flags, NULL);
-+		if (ret < 0)
-+			goto exit_disconnect;
-+	}
-+
-+	kdbus_conn_release(conn);
-+	kdbus_notify_flush(bus);
-+	return 0;
-+
-+exit_disconnect:
-+	kdbus_conn_release(conn);
-+	kdbus_conn_disconnect(conn, false);
-+	return ret;
-+}
-+
-+/**
-+ * kdbus_conn_disconnect() - disconnect a connection
-+ * @conn:		The connection to disconnect
-+ * @ensure_queue_empty:	Flag to indicate if the call should fail in
-+ *			case the connection's message list is not
-+ *			empty
-+ *
-+ * If @ensure_msg_list_empty is true, and the connection has pending messages,
-+ * -EBUSY is returned.
-+ *
-+ * Return: 0 on success, negative errno on failure
-+ */
-+int kdbus_conn_disconnect(struct kdbus_conn *conn, bool ensure_queue_empty)
-+{
-+	struct kdbus_queue_entry *entry, *tmp;
-+	struct kdbus_bus *bus = conn->ep->bus;
-+	struct kdbus_reply *r, *r_tmp;
-+	struct kdbus_conn *c;
-+	int i, v;
-+
-+	mutex_lock(&conn->lock);
-+	v = atomic_read(&conn->active);
-+	if (v == KDBUS_CONN_ACTIVE_NEW) {
-+		/* was never connected */
-+		mutex_unlock(&conn->lock);
-+		return 0;
-+	}
-+	if (v < 0) {
-+		/* already dead */
-+		mutex_unlock(&conn->lock);
-+		return -ECONNRESET;
-+	}
-+	if (ensure_queue_empty && !list_empty(&conn->queue.msg_list)) {
-+		/* still busy */
-+		mutex_unlock(&conn->lock);
-+		return -EBUSY;
-+	}
-+
-+	atomic_add(KDBUS_CONN_ACTIVE_BIAS, &conn->active);
-+	mutex_unlock(&conn->lock);
-+
-+	wake_up_interruptible(&conn->wait);
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+	rwsem_acquire(&conn->dep_map, 0, 0, _RET_IP_);
-+	if (atomic_read(&conn->active) != KDBUS_CONN_ACTIVE_BIAS)
-+		lock_contended(&conn->dep_map, _RET_IP_);
-+#endif
-+
-+	wait_event(conn->wait,
-+		   atomic_read(&conn->active) == KDBUS_CONN_ACTIVE_BIAS);
-+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+	lock_acquired(&conn->dep_map, _RET_IP_);
-+	rwsem_release(&conn->dep_map, 1, _RET_IP_);
-+#endif
-+
-+	cancel_delayed_work_sync(&conn->work);
-+	kdbus_policy_remove_owner(&conn->ep->bus->policy_db, conn);
-+
-+	/* lock order: domain -> bus -> ep -> names -> conn */
-+	mutex_lock(&conn->ep->lock);
-+	down_write(&bus->conn_rwlock);
-+
-+	/* remove from bus and endpoint */
-+	hash_del(&conn->hentry);
-+	list_del(&conn->monitor_entry);
-+	list_del(&conn->ep_entry);
-+
-+	up_write(&bus->conn_rwlock);
-+	mutex_unlock(&conn->ep->lock);
-+
-+	/*
-+	 * Remove all names associated with this connection; this possibly
-+	 * moves queued messages back to the activator connection.
-+	 */
-+	kdbus_name_release_all(bus->name_registry, conn);
-+
-+	/* if we die while other connections wait for our reply, notify them */
-+	mutex_lock(&conn->lock);
-+	list_for_each_entry_safe(entry, tmp, &conn->queue.msg_list, entry) {
-+		if (entry->reply)
-+			kdbus_notify_reply_dead(bus,
-+						entry->reply->reply_dst->id,
-+						entry->reply->cookie);
-+		kdbus_queue_entry_free(entry);
-+	}
-+
-+	list_for_each_entry_safe(r, r_tmp, &conn->reply_list, entry)
-+		kdbus_reply_unlink(r);
-+	mutex_unlock(&conn->lock);
-+
-+	/* lock order: domain -> bus -> ep -> names -> conn */
-+	down_read(&bus->conn_rwlock);
-+	hash_for_each(bus->conn_hash, i, c, hentry) {
-+		mutex_lock(&c->lock);
-+		list_for_each_entry_safe(r, r_tmp, &c->reply_list, entry) {
-+			if (r->reply_src != conn)
-+				continue;
-+
-+			if (r->sync)
-+				kdbus_sync_reply_wakeup(r, -EPIPE);
-+			else
-+				/* send a 'connection dead' notification */
-+				kdbus_notify_reply_dead(bus, c->id, r->cookie);
-+
-+			kdbus_reply_unlink(r);
-+		}
-+		mutex_unlock(&c->lock);
-+	}
-+	up_read(&bus->conn_rwlock);
-+
-+	if (!kdbus_conn_is_monitor(conn))
-+		kdbus_notify_id_change(bus, KDBUS_ITEM_ID_REMOVE,
-+				       conn->id, conn->flags);
-+
-+	kdbus_notify_flush(bus);
-+
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_conn_has_name() - check if a connection owns a name
-+ * @conn:		Connection
-+ * @name:		Well-know name to check for
-+ *
-+ * The caller must hold the registry lock of conn->ep->bus.
-+ *
-+ * Return: true if the name is currently owned by the connection
-+ */
-+bool kdbus_conn_has_name(struct kdbus_conn *conn, const char *name)
-+{
-+	struct kdbus_name_owner *owner;
-+
-+	lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
-+
-+	list_for_each_entry(owner, &conn->names_list, conn_entry)
-+		if (!(owner->flags & KDBUS_NAME_IN_QUEUE) &&
-+		    !strcmp(name, owner->name->name))
-+			return true;
-+
-+	return false;
-+}
-+
-+struct kdbus_quota {
-+	u32 memory;
-+	u16 msgs;
-+	u8 fds;
-+};
-+
-+/**
-+ * kdbus_conn_quota_inc() - increase quota accounting
-+ * @c:		connection owning the quota tracking
-+ * @u:		user to account for (or NULL for kernel accounting)
-+ * @memory:	size of memory to account for
-+ * @fds:	number of FDs to account for
-+ *
-+ * This call manages the quotas on resource @c. That is, it's used if other
-+ * users want to use the resources of connection @c, which so far only concerns
-+ * the receive queue of the destination.
-+ *
-+ * This increases the quota-accounting for user @u by @memory bytes and @fds
-+ * file descriptors. If the user has already reached the quota limits, this call
-+ * will not do any accounting but return a negative error code indicating the
-+ * failure.
-+ *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_conn_quota_inc(struct kdbus_conn *c, struct kdbus_user *u,
-+			 size_t memory, size_t fds)
-+{
-+	struct kdbus_quota *quota;
-+	size_t available, accounted;
-+	unsigned int id;
-+
-+	/*
-+	 * Pool Layout:
-+	 * 50% of a pool is always owned by the connection. It is reserved for
-+	 * kernel queries, handling received messages and other tasks that are
-+	 * under control of the pool owner. The other 50% of the pool are used
-+	 * as incoming queue.
-+	 * As we optionally support user-space based policies, we need fair
-+	 * allocation schemes. Furthermore, resource utilization should be
-+	 * maximized, so only minimal resources stay reserved. However, we need
-+	 * to adapt to a dynamic number of users, as we cannot know how many
-+	 * users will talk to a connection. Therefore, the current allocation
-+	 * works like this:
-+	 * We limit the number of bytes in a destination's pool per sending
-+	 * user. The space available for a user is 33% of the unused pool space
-+	 * (whereas the space used by the user itself is also treated as
-+	 * 'unused'). This way, we favor users coming first, but keep enough
-+	 * pool space available for any following users. Given that messages are
-+	 * dequeued in FIFO order, this should balance nicely if the number of
-+	 * users grows. At the same time, this algorithm guarantees that the
-+	 * space available to a connection is reduced dynamically, the more
-+	 * concurrent users talk to a connection.
-+	 */
-+
-+	/* per user-accounting is expensive, so we keep state small */
-+	BUILD_BUG_ON(sizeof(quota->memory) != 4);
-+	BUILD_BUG_ON(sizeof(quota->msgs) != 2);
-+	BUILD_BUG_ON(sizeof(quota->fds) != 1);
-+	BUILD_BUG_ON(KDBUS_CONN_MAX_MSGS > U16_MAX);
-+	BUILD_BUG_ON(KDBUS_CONN_MAX_FDS_PER_USER > U8_MAX);
-+
-+	id = u ? u->id : KDBUS_USER_KERNEL_ID;
-+	if (id >= c->n_quota) {
-+		unsigned int users;
-+
-+		users = max(KDBUS_ALIGN8(id) + 8, id);
-+		quota = krealloc(c->quota, users * sizeof(*quota),
-+				 GFP_KERNEL | __GFP_ZERO);
-+		if (!quota)
-+			return -ENOMEM;
-+
-+		c->n_quota = users;
-+		c->quota = quota;
-+	}
-+
-+	quota = &c->quota[id];
-+	kdbus_pool_accounted(c->pool, &available, &accounted);
-+
-+	/* half the pool is _always_ reserved for the pool owner */
-+	available /= 2;
-+
-+	/*
-+	 * Pool owner slices are un-accounted slices; they can claim more
-+	 * than 50% of the queue. However, the slices we're dealing with here
-+	 * belong to the incoming queue, hence they are 'accounted' slices
-+	 * to which the 50%-limit applies.
-+	 */
-+	if (available < accounted)
-+		return -ENOBUFS;
-+
-+	/* 1/3 of the remaining space (including your own memory) */
-+	available = (available - accounted + quota->memory) / 3;
-+
-+	if (available < quota->memory ||
-+	    available - quota->memory < memory ||
-+	    quota->memory + memory > U32_MAX)
-+		return -ENOBUFS;
-+	if (quota->msgs >= KDBUS_CONN_MAX_MSGS)
-+		return -ENOBUFS;
-+	if (quota->fds + fds < quota->fds ||
-+	    quota->fds + fds > KDBUS_CONN_MAX_FDS_PER_USER)
-+		return -EMFILE;
-+
-+	quota->memory += memory;
-+	quota->fds += fds;
-+	++quota->msgs;
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_conn_quota_dec() - decrease quota accounting
-+ * @c:		connection owning the quota tracking
-+ * @u:		user which was accounted for (or NULL for kernel accounting)
-+ * @memory:	size of memory which was accounted for
-+ * @fds:	number of FDs which were accounted for
-+ *
-+ * This does the reverse of kdbus_conn_quota_inc(). You have to release any
-+ * accounted resources that you called kdbus_conn_quota_inc() for. However, you
-+ * must not call kdbus_conn_quota_dec() if the accounting failed (that is,
-+ * kdbus_conn_quota_inc() failed).
-+ */
-+void kdbus_conn_quota_dec(struct kdbus_conn *c, struct kdbus_user *u,
-+			  size_t memory, size_t fds)
-+{
-+	struct kdbus_quota *quota;
-+	unsigned int id;
-+
-+	id = u ? u->id : KDBUS_USER_KERNEL_ID;
-+	if (WARN_ON(id >= c->n_quota))
-+		return;
-+
-+	quota = &c->quota[id];
-+
-+	if (!WARN_ON(quota->msgs == 0))
-+		--quota->msgs;
-+	if (!WARN_ON(quota->memory < memory))
-+		quota->memory -= memory;
-+	if (!WARN_ON(quota->fds < fds))
-+		quota->fds -= fds;
-+}
-+
-+/**
-+ * kdbus_conn_lost_message() - handle lost messages
-+ * @c:		connection that lost a message
-+ *
-+ * kdbus is reliable. That means, we try hard to never lose messages. However,
-+ * memory is limited, so we cannot rely on transmissions to never fail.
-+ * Therefore, we use quota-limits to let callers know if their unicast message
-+ * cannot be transmitted to a peer. This works fine for unicasts, but for
-+ * broadcasts we cannot make the caller handle the transmission failure.
-+ * Instead, we must let the destination know that it couldn't receive a
-+ * broadcast.
-+ * As this is an unlikely scenario, we keep it simple. A single lost-counter
-+ * remembers the number of lost messages since the last call to RECV. The next
-+ * message retrieval will notify the connection that it lost messages since the
-+ * last message retrieval and thus should resync its state.
-+ */
-+void kdbus_conn_lost_message(struct kdbus_conn *c)
-+{
-+	if (atomic_inc_return(&c->lost_count) == 1)
-+		wake_up_interruptible(&c->wait);
-+}
-+
-+/* Callers should take the conn_dst lock */
-+static struct kdbus_queue_entry *
-+kdbus_conn_entry_make(struct kdbus_conn *conn_src,
-+		      struct kdbus_conn *conn_dst,
-+		      struct kdbus_staging *staging)
-+{
-+	/* The remote connection was disconnected */
-+	if (!kdbus_conn_active(conn_dst))
-+		return ERR_PTR(-ECONNRESET);
-+
-+	/*
-+	 * If the connection does not accept file descriptors but the message
-+	 * has some attached, refuse it.
-+	 *
-+	 * If this is a monitor connection, accept the message. In that
-+	 * case, all file descriptors will be set to -1 at receive time.
-+	 */
-+	if (!kdbus_conn_is_monitor(conn_dst) &&
-+	    !(conn_dst->flags & KDBUS_HELLO_ACCEPT_FD) &&
-+	    staging->gaps && staging->gaps->n_fds > 0)
-+		return ERR_PTR(-ECOMM);
-+
-+	return kdbus_queue_entry_new(conn_src, conn_dst, staging);
-+}
-+
-+/*
-+ * Synchronously responding to a message, allocate a queue entry
-+ * and attach it to the reply tracking object.
-+ * The connection's queue will never get to see it.
-+ */
-+static int kdbus_conn_entry_sync_attach(struct kdbus_conn *conn_dst,
-+					struct kdbus_staging *staging,
-+					struct kdbus_reply *reply_wake)
-+{
-+	struct kdbus_queue_entry *entry;
-+	int remote_ret, ret = 0;
-+
-+	mutex_lock(&reply_wake->reply_dst->lock);
-+
-+	/*
-+	 * If we are still waiting then proceed, allocate a queue
-+	 * entry and attach it to the reply object
-+	 */
-+	if (reply_wake->waiting) {
-+		entry = kdbus_conn_entry_make(reply_wake->reply_src, conn_dst,
-+					      staging);
-+		if (IS_ERR(entry))
-+			ret = PTR_ERR(entry);
-+		else
-+			/* Attach the entry to the reply object */
-+			reply_wake->queue_entry = entry;
-+	} else {
-+		ret = -ECONNRESET;
-+	}
-+
-+	/*
-+	 * Update the reply object and wake up remote peer only
-+	 * on appropriate return codes
-+	 *
-+	 * * -ECOMM: if the replying connection failed with -ECOMM
-+	 *           then wakeup remote peer with -EREMOTEIO
-+	 *
-+	 *           We do this to differenciate between -ECOMM errors
-+	 *           from the original sender perspective:
-+	 *           -ECOMM error during the sync send and
-+	 *           -ECOMM error during the sync reply, this last
-+	 *           one is rewritten to -EREMOTEIO
-+	 *
-+	 * * Wake up on all other return codes.
-+	 */
-+	remote_ret = ret;
-+
-+	if (ret == -ECOMM)
-+		remote_ret = -EREMOTEIO;
-+
-+	kdbus_sync_reply_wakeup(reply_wake, remote_ret);
-+	kdbus_reply_unlink(reply_wake);
-+	mutex_unlock(&reply_wake->reply_dst->lock);
-+
-+	return ret;
-+}
-+
-+/**
-+ * kdbus_conn_entry_insert() - enqueue a message into the receiver's pool
-+ * @conn_src:		The sending connection
-+ * @conn_dst:		The connection to queue into
-+ * @staging:		Message to send
-+ * @reply:		The reply tracker to attach to the queue entry
-+ * @name:		Destination name this msg is sent to, or NULL
-+ *
-+ * Return: 0 on success. negative error otherwise.
-+ */
-+int kdbus_conn_entry_insert(struct kdbus_conn *conn_src,
-+			    struct kdbus_conn *conn_dst,
-+			    struct kdbus_staging *staging,
-+			    struct kdbus_reply *reply,
-+			    const struct kdbus_name_entry *name)
-+{
-+	struct kdbus_queue_entry *entry;
-+	int ret;
-+
-+	kdbus_conn_lock2(conn_src, conn_dst);
-+
-+	entry = kdbus_conn_entry_make(conn_src, conn_dst, staging);
-+	if (IS_ERR(entry)) {
-+		ret = PTR_ERR(entry);
-+		goto exit_unlock;
-+	}
-+
-+	if (reply) {
-+		kdbus_reply_link(reply);
-+		if (!reply->sync)
-+			schedule_delayed_work(&conn_src->work, 0);
-+	}
-+
-+	/*
-+	 * Record the sequence number of the registered name; it will
-+	 * be remembered by the queue, in case messages addressed to a
-+	 * name need to be moved from or to an activator.
-+	 */
-+	if (name)
-+		entry->dst_name_id = name->name_id;
-+
-+	kdbus_queue_entry_enqueue(entry, reply);
-+	wake_up_interruptible(&conn_dst->wait);
-+
-+	ret = 0;
-+
-+exit_unlock:
-+	kdbus_conn_unlock2(conn_src, conn_dst);
-+	return ret;
-+}
-+
-+static int kdbus_conn_wait_reply(struct kdbus_conn *conn_src,
-+				 struct kdbus_cmd_send *cmd_send,
-+				 struct file *ioctl_file,
-+				 struct file *cancel_fd,
-+				 struct kdbus_reply *reply_wait,
-+				 ktime_t expire)
-+{
-+	struct kdbus_queue_entry *entry;
-+	struct poll_wqueues pwq = {};
-+	int ret;
-+
-+	if (WARN_ON(!reply_wait))
-+		return -EIO;
-+
-+	/*
-+	 * Block until the reply arrives. reply_wait is left untouched
-+	 * by the timeout scans that might be conducted for other,
-+	 * asynchronous replies of conn_src.
-+	 */
-+
-+	poll_initwait(&pwq);
-+	poll_wait(ioctl_file, &conn_src->wait, &pwq.pt);
-+
-+	for (;;) {
-+		/*
-+		 * Any of the following conditions will stop our synchronously
-+		 * blocking SEND command:
-+		 *
-+		 * a) The origin sender closed its connection
-+		 * b) The remote peer answered, setting reply_wait->waiting = 0
-+		 * c) The cancel FD was written to
-+		 * d) A signal was received
-+		 * e) The specified timeout was reached, and none of the above
-+		 *    conditions kicked in.
-+		 */
-+
-+		/*
-+		 * We have already acquired an active reference when
-+		 * entering here, but another thread may call
-+		 * KDBUS_CMD_BYEBYE which does not acquire an active
-+		 * reference, therefore kdbus_conn_disconnect() will
-+		 * not wait for us.
-+		 */
-+		if (!kdbus_conn_active(conn_src)) {
-+			ret = -ECONNRESET;
-+			break;
-+		}
-+
-+		/*
-+		 * After the replying peer unset the waiting variable
-+		 * it will wake up us.
-+		 */
-+		if (!reply_wait->waiting) {
-+			ret = reply_wait->err;
-+			break;
-+		}
-+
-+		if (cancel_fd) {
-+			unsigned int r;
-+
-+			r = cancel_fd->f_op->poll(cancel_fd, &pwq.pt);
-+			if (r & POLLIN) {
-+				ret = -ECANCELED;
-+				break;
-+			}
-+		}
-+
-+		if (signal_pending(current)) {
-+			ret = -EINTR;
-+			break;
-+		}
-+
-+		if (!poll_schedule_timeout(&pwq, TASK_INTERRUPTIBLE,
-+					   &expire, 0)) {
-+			ret = -ETIMEDOUT;
-+			break;
-+		}
-+
-+		/*
-+		 * Reset the poll worker func, so the waitqueues are not
-+		 * added to the poll table again. We just reuse what we've
-+		 * collected earlier for further iterations.
-+		 */
-+		init_poll_funcptr(&pwq.pt, NULL);
-+	}
-+
-+	poll_freewait(&pwq);
-+
-+	if (ret == -EINTR) {
-+		/*
-+		 * Interrupted system call. Unref the reply object, and pass
-+		 * the return value down the chain. Mark the reply as
-+		 * interrupted, so the cleanup work can remove it, but do not
-+		 * unlink it from the list. Once the syscall restarts, we'll
-+		 * pick it up and wait on it again.
-+		 */
-+		mutex_lock(&conn_src->lock);
-+		reply_wait->interrupted = true;
-+		schedule_delayed_work(&conn_src->work, 0);
-+		mutex_unlock(&conn_src->lock);
-+
-+		return -ERESTARTSYS;
-+	}
-+
-+	mutex_lock(&conn_src->lock);
-+	reply_wait->waiting = false;
-+	entry = reply_wait->queue_entry;
-+	if (entry) {
-+		ret = kdbus_queue_entry_install(entry,
-+						&cmd_send->reply.return_flags,
-+						true);
-+		kdbus_pool_slice_publish(entry->slice, &cmd_send->reply.offset,
-+					 &cmd_send->reply.msg_size);
-+		kdbus_queue_entry_free(entry);
-+	}
-+	kdbus_reply_unlink(reply_wait);
-+	mutex_unlock(&conn_src->lock);
-+
-+	return ret;
-+}
-+
-+static int kdbus_pin_dst(struct kdbus_bus *bus,
-+			 struct kdbus_staging *staging,
-+			 struct kdbus_name_entry **out_name,
-+			 struct kdbus_conn **out_dst)
-+{
-+	const struct kdbus_msg *msg = staging->msg;
-+	struct kdbus_name_owner *owner = NULL;
-+	struct kdbus_name_entry *name = NULL;
-+	struct kdbus_conn *dst = NULL;
-+	int ret;
-+
-+	lockdep_assert_held(&bus->name_registry->rwlock);
-+
-+	if (!staging->dst_name) {
-+		dst = kdbus_bus_find_conn_by_id(bus, msg->dst_id);
-+		if (!dst)
-+			return -ENXIO;
-+
-+		if (!kdbus_conn_is_ordinary(dst)) {
-+			ret = -ENXIO;
-+			goto error;
-+		}
-+	} else {
-+		name = kdbus_name_lookup_unlocked(bus->name_registry,
-+						  staging->dst_name);
-+		if (name)
-+			owner = kdbus_name_get_owner(name);
-+		if (!owner)
-+			return -ESRCH;
-+
-+		/*
-+		 * If both a name and a connection ID are given as destination
-+		 * of a message, check that the currently owning connection of
-+		 * the name matches the specified ID.
-+		 * This way, we allow userspace to send the message to a
-+		 * specific connection by ID only if the connection currently
-+		 * owns the given name.
-+		 */
-+		if (msg->dst_id != KDBUS_DST_ID_NAME &&
-+		    msg->dst_id != owner->conn->id)
-+			return -EREMCHG;
-+
-+		if ((msg->flags & KDBUS_MSG_NO_AUTO_START) &&
-+		    kdbus_conn_is_activator(owner->conn))
-+			return -EADDRNOTAVAIL;
-+
-+		dst = kdbus_conn_ref(owner->conn);
-+	}
-+
-+	*out_name = name;
-+	*out_dst = dst;
-+	return 0;
-+
-+error:
-+	kdbus_conn_unref(dst);
-+	return ret;
-+}
-+
-+static int kdbus_conn_reply(struct kdbus_conn *src,
-+			    struct kdbus_staging *staging)
-+{
-+	const struct kdbus_msg *msg = staging->msg;
-+	struct kdbus_name_entry *name = NULL;
-+	struct kdbus_reply *reply, *wake = NULL;
-+	struct kdbus_conn *dst = NULL;
-+	struct kdbus_bus *bus = src->ep->bus;
-+	int ret;
-+
-+	if (WARN_ON(msg->dst_id == KDBUS_DST_ID_BROADCAST) ||
-+	    WARN_ON(msg->flags & KDBUS_MSG_EXPECT_REPLY) ||
-+	    WARN_ON(msg->flags & KDBUS_MSG_SIGNAL))
-+		return -EINVAL;
-+
-+	/* name-registry must be locked for lookup *and* collecting data */
-+	down_read(&bus->name_registry->rwlock);
-+
-+	/* find and pin destination */
-+
-+	ret = kdbus_pin_dst(bus, staging, &name, &dst);
-+	if (ret < 0)
-+		goto exit;
-+
-+	mutex_lock(&dst->lock);
-+	reply = kdbus_reply_find(src, dst, msg->cookie_reply);
-+	if (reply) {
-+		if (reply->sync)
-+			wake = kdbus_reply_ref(reply);
-+		kdbus_reply_unlink(reply);
-+	}
-+	mutex_unlock(&dst->lock);
-+
-+	if (!reply) {
-+		ret = -EBADSLT;
-+		goto exit;
-+	}
-+
-+	/* send message */
-+
-+	kdbus_bus_eavesdrop(bus, src, staging);
-+
-+	if (wake)
-+		ret = kdbus_conn_entry_sync_attach(dst, staging, wake);
-+	else
-+		ret = kdbus_conn_entry_insert(src, dst, staging, NULL, name);
-+
-+exit:
-+	up_read(&bus->name_registry->rwlock);
-+	kdbus_reply_unref(wake);
-+	kdbus_conn_unref(dst);
-+	return ret;
-+}
-+
-+static struct kdbus_reply *kdbus_conn_call(struct kdbus_conn *src,
-+					   struct kdbus_staging *staging,
-+					   ktime_t exp)
-+{
-+	const struct kdbus_msg *msg = staging->msg;
-+	struct kdbus_name_entry *name = NULL;
-+	struct kdbus_reply *wait = NULL;
-+	struct kdbus_conn *dst = NULL;
-+	struct kdbus_bus *bus = src->ep->bus;
-+	int ret;
-+
-+	if (WARN_ON(msg->dst_id == KDBUS_DST_ID_BROADCAST) ||
-+	    WARN_ON(msg->flags & KDBUS_MSG_SIGNAL) ||
-+	    WARN_ON(!(msg->flags & KDBUS_MSG_EXPECT_REPLY)))
-+		return ERR_PTR(-EINVAL);
-+
-+	/* resume previous wait-context, if available */
-+
-+	mutex_lock(&src->lock);
-+	wait = kdbus_reply_find(NULL, src, msg->cookie);
-+	if (wait) {
-+		if (wait->interrupted) {
-+			kdbus_reply_ref(wait);
-+			wait->interrupted = false;
-+		} else {
-+			wait = NULL;
-+		}
-+	}
-+	mutex_unlock(&src->lock);
-+
-+	if (wait)
-+		return wait;
-+
-+	if (ktime_compare(ktime_get(), exp) >= 0)
-+		return ERR_PTR(-ETIMEDOUT);
-+
-+	/* name-registry must be locked for lookup *and* collecting data */
-+	down_read(&bus->name_registry->rwlock);
-+
-+	/* find and pin destination */
-+
-+	ret = kdbus_pin_dst(bus, staging, &name, &dst);
-+	if (ret < 0)
-+		goto exit;
-+
-+	if (!kdbus_conn_policy_talk(src, current_cred(), dst)) {
-+		ret = -EPERM;
-+		goto exit;
-+	}
-+
-+	wait = kdbus_reply_new(dst, src, msg, name, true);
-+	if (IS_ERR(wait)) {
-+		ret = PTR_ERR(wait);
-+		wait = NULL;
-+		goto exit;
-+	}
-+
-+	/* send message */
-+
-+	kdbus_bus_eavesdrop(bus, src, staging);
-+
-+	ret = kdbus_conn_entry_insert(src, dst, staging, wait, name);
-+	if (ret < 0)
-+		goto exit;
-+
-+	ret = 0;
-+
-+exit:
-+	up_read(&bus->name_registry->rwlock);
-+	if (ret < 0) {
-+		kdbus_reply_unref(wait);
-+		wait = ERR_PTR(ret);
-+	}
-+	kdbus_conn_unref(dst);
-+	return wait;
-+}
-+
-+static int kdbus_conn_unicast(struct kdbus_conn *src,
-+			      struct kdbus_staging *staging)
-+{
-+	const struct kdbus_msg *msg = staging->msg;
-+	struct kdbus_name_entry *name = NULL;
-+	struct kdbus_reply *wait = NULL;
-+	struct kdbus_conn *dst = NULL;
-+	struct kdbus_bus *bus = src->ep->bus;
-+	bool is_signal = (msg->flags & KDBUS_MSG_SIGNAL);
-+	int ret = 0;
-+
-+	if (WARN_ON(msg->dst_id == KDBUS_DST_ID_BROADCAST) ||
-+	    WARN_ON(!(msg->flags & KDBUS_MSG_EXPECT_REPLY) &&
-+		    msg->cookie_reply != 0))
-+		return -EINVAL;
-+
-+	/* name-registry must be locked for lookup *and* collecting data */
-+	down_read(&bus->name_registry->rwlock);
-+
-+	/* find and pin destination */
-+
-+	ret = kdbus_pin_dst(bus, staging, &name, &dst);
-+	if (ret < 0)
-+		goto exit;
-+
-+	if (is_signal) {
-+		/* like broadcasts we eavesdrop even if the msg is dropped */
-+		kdbus_bus_eavesdrop(bus, src, staging);
-+
-+		/* drop silently if peer is not interested or not privileged */
-+		if (!kdbus_match_db_match_msg(dst->match_db, src, staging) ||
-+		    !kdbus_conn_policy_talk(dst, NULL, src))
-+			goto exit;
-+	} else if (!kdbus_conn_policy_talk(src, current_cred(), dst)) {
-+		ret = -EPERM;
-+		goto exit;
-+	} else if (msg->flags & KDBUS_MSG_EXPECT_REPLY) {
-+		wait = kdbus_reply_new(dst, src, msg, name, false);
-+		if (IS_ERR(wait)) {
-+			ret = PTR_ERR(wait);
-+			wait = NULL;
-+			goto exit;
-+		}
-+	}
-+
-+	/* send message */
-+
-+	if (!is_signal)
-+		kdbus_bus_eavesdrop(bus, src, staging);
-+
-+	ret = kdbus_conn_entry_insert(src, dst, staging, wait, name);
-+	if (ret < 0 && !is_signal)
-+		goto exit;
-+
-+	/* signals are treated like broadcasts, recv-errors are ignored */
-+	ret = 0;
-+
-+exit:
-+	up_read(&bus->name_registry->rwlock);
-+	kdbus_reply_unref(wait);
-+	kdbus_conn_unref(dst);
-+	return ret;
-+}
-+
-+/**
-+ * kdbus_conn_move_messages() - move messages from one connection to another
-+ * @conn_dst:		Connection to copy to
-+ * @conn_src:		Connection to copy from
-+ * @name_id:		Filter for the sequence number of the registered
-+ *			name, 0 means no filtering.
-+ *
-+ * Move all messages from one connection to another. This is used when
-+ * an implementer connection is taking over/giving back a well-known name
-+ * from/to an activator connection.
-+ */
-+void kdbus_conn_move_messages(struct kdbus_conn *conn_dst,
-+			      struct kdbus_conn *conn_src,
-+			      u64 name_id)
-+{
-+	struct kdbus_queue_entry *e, *e_tmp;
-+	struct kdbus_reply *r, *r_tmp;
-+	struct kdbus_bus *bus;
-+	struct kdbus_conn *c;
-+	LIST_HEAD(msg_list);
-+	int i, ret = 0;
-+
-+	if (WARN_ON(conn_src == conn_dst))
-+		return;
-+
-+	bus = conn_src->ep->bus;
-+
-+	/* lock order: domain -> bus -> ep -> names -> conn */
-+	down_read(&bus->conn_rwlock);
-+	hash_for_each(bus->conn_hash, i, c, hentry) {
-+		if (c == conn_src || c == conn_dst)
-+			continue;
-+
-+		mutex_lock(&c->lock);
-+		list_for_each_entry_safe(r, r_tmp, &c->reply_list, entry) {
-+			if (r->reply_src != conn_src)
-+				continue;
-+
-+			/* filter messages for a specific name */
-+			if (name_id > 0 && r->name_id != name_id)
-+				continue;
-+
-+			kdbus_conn_unref(r->reply_src);
-+			r->reply_src = kdbus_conn_ref(conn_dst);
-+		}
-+		mutex_unlock(&c->lock);
-+	}
-+	up_read(&bus->conn_rwlock);
-+
-+	kdbus_conn_lock2(conn_src, conn_dst);
-+	list_for_each_entry_safe(e, e_tmp, &conn_src->queue.msg_list, entry) {
-+		/* filter messages for a specific name */
-+		if (name_id > 0 && e->dst_name_id != name_id)
-+			continue;
-+
-+		if (!(conn_dst->flags & KDBUS_HELLO_ACCEPT_FD) &&
-+		    e->gaps && e->gaps->n_fds > 0) {
-+			kdbus_conn_lost_message(conn_dst);
-+			kdbus_queue_entry_free(e);
-+			continue;
-+		}
-+
-+		ret = kdbus_queue_entry_move(e, conn_dst);
-+		if (ret < 0) {
-+			kdbus_conn_lost_message(conn_dst);
-+			kdbus_queue_entry_free(e);
-+			continue;
-+		}
-+	}
-+	kdbus_conn_unlock2(conn_src, conn_dst);
-+
-+	/* wake up poll() */
-+	wake_up_interruptible(&conn_dst->wait);
-+}
-+
-+/* query the policy-database for all names of @whom */
-+static bool kdbus_conn_policy_query_all(struct kdbus_conn *conn,
-+					const struct cred *conn_creds,
-+					struct kdbus_policy_db *db,
-+					struct kdbus_conn *whom,
-+					unsigned int access)
-+{
-+	struct kdbus_name_owner *owner;
-+	bool pass = false;
-+	int res;
-+
-+	lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
-+
-+	down_read(&db->entries_rwlock);
-+	mutex_lock(&whom->lock);
-+
-+	list_for_each_entry(owner, &whom->names_list, conn_entry) {
-+		if (owner->flags & KDBUS_NAME_IN_QUEUE)
-+			continue;
-+
-+		res = kdbus_policy_query_unlocked(db,
-+					conn_creds ? : conn->cred,
-+					owner->name->name,
-+					kdbus_strhash(owner->name->name));
-+		if (res >= (int)access) {
-+			pass = true;
-+			break;
-+		}
-+	}
-+
-+	mutex_unlock(&whom->lock);
-+	up_read(&db->entries_rwlock);
-+
-+	return pass;
-+}
-+
-+/**
-+ * kdbus_conn_policy_own_name() - verify a connection can own the given name
-+ * @conn:		Connection
-+ * @conn_creds:		Credentials of @conn to use for policy check
-+ * @name:		Name
-+ *
-+ * This verifies that @conn is allowed to acquire the well-known name @name.
-+ *
-+ * Return: true if allowed, false if not.
-+ */
-+bool kdbus_conn_policy_own_name(struct kdbus_conn *conn,
-+				const struct cred *conn_creds,
-+				const char *name)
-+{
-+	unsigned int hash = kdbus_strhash(name);
-+	int res;
-+
-+	if (!conn_creds)
-+		conn_creds = conn->cred;
-+
-+	if (conn->ep->user) {
-+		res = kdbus_policy_query(&conn->ep->policy_db, conn_creds,
-+					 name, hash);
-+		if (res < KDBUS_POLICY_OWN)
-+			return false;
-+	}
-+
-+	if (conn->owner)
-+		return true;
-+
-+	res = kdbus_policy_query(&conn->ep->bus->policy_db, conn_creds,
-+				 name, hash);
-+	return res >= KDBUS_POLICY_OWN;
-+}
-+
-+/**
-+ * kdbus_conn_policy_talk() - verify a connection can talk to a given peer
-+ * @conn:		Connection that tries to talk
-+ * @conn_creds:		Credentials of @conn to use for policy check
-+ * @to:			Connection that is talked to
-+ *
-+ * This verifies that @conn is allowed to talk to @to.
-+ *
-+ * Return: true if allowed, false if not.
-+ */
-+bool kdbus_conn_policy_talk(struct kdbus_conn *conn,
-+			    const struct cred *conn_creds,
-+			    struct kdbus_conn *to)
-+{
-+	if (!conn_creds)
-+		conn_creds = conn->cred;
-+
-+	if (conn->ep->user &&
-+	    !kdbus_conn_policy_query_all(conn, conn_creds, &conn->ep->policy_db,
-+					 to, KDBUS_POLICY_TALK))
-+		return false;
-+
-+	if (conn->owner)
-+		return true;
-+	if (uid_eq(conn_creds->euid, to->cred->uid))
-+		return true;
-+
-+	return kdbus_conn_policy_query_all(conn, conn_creds,
-+					   &conn->ep->bus->policy_db, to,
-+					   KDBUS_POLICY_TALK);
-+}
-+
-+/**
-+ * kdbus_conn_policy_see_name_unlocked() - verify a connection can see a given
-+ *					   name
-+ * @conn:		Connection
-+ * @conn_creds:		Credentials of @conn to use for policy check
-+ * @name:		Name
-+ *
-+ * This verifies that @conn is allowed to see the well-known name @name. Caller
-+ * must hold policy-lock.
-+ *
-+ * Return: true if allowed, false if not.
-+ */
-+bool kdbus_conn_policy_see_name_unlocked(struct kdbus_conn *conn,
-+					 const struct cred *conn_creds,
-+					 const char *name)
-+{
-+	int res;
-+
-+	/*
-+	 * By default, all names are visible on a bus. SEE policies can only be
-+	 * installed on custom endpoints, where by default no name is visible.
-+	 */
-+	if (!conn->ep->user)
-+		return true;
-+
-+	res = kdbus_policy_query_unlocked(&conn->ep->policy_db,
-+					  conn_creds ? : conn->cred,
-+					  name, kdbus_strhash(name));
-+	return res >= KDBUS_POLICY_SEE;
-+}
-+
-+static bool kdbus_conn_policy_see_name(struct kdbus_conn *conn,
-+				       const struct cred *conn_creds,
-+				       const char *name)
-+{
-+	bool res;
-+
-+	down_read(&conn->ep->policy_db.entries_rwlock);
-+	res = kdbus_conn_policy_see_name_unlocked(conn, conn_creds, name);
-+	up_read(&conn->ep->policy_db.entries_rwlock);
-+
-+	return res;
-+}
-+
-+static bool kdbus_conn_policy_see(struct kdbus_conn *conn,
-+				  const struct cred *conn_creds,
-+				  struct kdbus_conn *whom)
-+{
-+	/*
-+	 * By default, all names are visible on a bus, so a connection can
-+	 * always see other connections. SEE policies can only be installed on
-+	 * custom endpoints, where by default no name is visible and we hide
-+	 * peers from each other, unless you see at least _one_ name of the
-+	 * peer.
-+	 */
-+	return !conn->ep->user ||
-+	       kdbus_conn_policy_query_all(conn, conn_creds,
-+					   &conn->ep->policy_db, whom,
-+					   KDBUS_POLICY_SEE);
-+}
-+
-+/**
-+ * kdbus_conn_policy_see_notification() - verify a connection is allowed to
-+ *					  receive a given kernel notification
-+ * @conn:		Connection
-+ * @conn_creds:		Credentials of @conn to use for policy check
-+ * @msg:		Notification message
-+ *
-+ * This checks whether @conn is allowed to see the kernel notification.
-+ *
-+ * Return: true if allowed, false if not.
-+ */
-+bool kdbus_conn_policy_see_notification(struct kdbus_conn *conn,
-+					const struct cred *conn_creds,
-+					const struct kdbus_msg *msg)
-+{
-+	/*
-+	 * Depending on the notification type, broadcasted kernel notifications
-+	 * have to be filtered:
-+	 *
-+	 * KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE}: This notification is forwarded
-+	 *     to a peer if, and only if, that peer can see the name this
-+	 *     notification is for.
-+	 *
-+	 * KDBUS_ITEM_ID_{ADD,REMOVE}: Notifications for ID changes are
-+	 *     broadcast to everyone, to allow tracking peers.
-+	 */
-+
-+	switch (msg->items[0].type) {
-+	case KDBUS_ITEM_NAME_ADD:
-+	case KDBUS_ITEM_NAME_REMOVE:
-+	case KDBUS_ITEM_NAME_CHANGE:
-+		return kdbus_conn_policy_see_name(conn, conn_creds,
-+					msg->items[0].name_change.name);
-+
-+	case KDBUS_ITEM_ID_ADD:
-+	case KDBUS_ITEM_ID_REMOVE:
-+		return true;
-+
-+	default:
-+		WARN(1, "Invalid type for notification broadcast: %llu\n",
-+		     (unsigned long long)msg->items[0].type);
-+		return false;
-+	}
-+}
-+
-+/**
-+ * kdbus_cmd_hello() - handle KDBUS_CMD_HELLO
-+ * @ep:			Endpoint to operate on
-+ * @file:		File this connection is opened on
-+ * @argp:		Command payload
-+ *
-+ * Return: NULL or newly created connection on success, ERR_PTR on failure.
-+ */
-+struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, struct file *file,
-+				   void __user *argp)
-+{
-+	struct kdbus_cmd_hello *cmd;
-+	struct kdbus_conn *c = NULL;
-+	const char *item_name;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_NAME },
-+		{ .type = KDBUS_ITEM_CREDS },
-+		{ .type = KDBUS_ITEM_PIDS },
-+		{ .type = KDBUS_ITEM_SECLABEL },
-+		{ .type = KDBUS_ITEM_CONN_DESCRIPTION },
-+		{ .type = KDBUS_ITEM_POLICY_ACCESS, .multiple = true },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
-+				 KDBUS_HELLO_ACCEPT_FD |
-+				 KDBUS_HELLO_ACTIVATOR |
-+				 KDBUS_HELLO_POLICY_HOLDER |
-+				 KDBUS_HELLO_MONITOR,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret < 0)
-+		return ERR_PTR(ret);
-+	if (ret > 0)
-+		return NULL;
-+
-+	item_name = argv[1].item ? argv[1].item->str : NULL;
-+
-+	c = kdbus_conn_new(ep, file, cmd, item_name,
-+			   argv[2].item ? &argv[2].item->creds : NULL,
-+			   argv[3].item ? &argv[3].item->pids : NULL,
-+			   argv[4].item ? argv[4].item->str : NULL,
-+			   argv[5].item ? argv[5].item->str : NULL);
-+	if (IS_ERR(c)) {
-+		ret = PTR_ERR(c);
-+		c = NULL;
-+		goto exit;
-+	}
-+
-+	ret = kdbus_conn_connect(c, item_name);
-+	if (ret < 0)
-+		goto exit;
-+
-+	if (kdbus_conn_is_activator(c) || kdbus_conn_is_policy_holder(c)) {
-+		ret = kdbus_conn_acquire(c);
-+		if (ret < 0)
-+			goto exit;
-+
-+		ret = kdbus_policy_set(&c->ep->bus->policy_db, args.items,
-+				       args.items_size, 1,
-+				       kdbus_conn_is_policy_holder(c), c);
-+		kdbus_conn_release(c);
-+		if (ret < 0)
-+			goto exit;
-+	}
-+
-+	if (copy_to_user(argp, cmd, sizeof(*cmd)))
-+		ret = -EFAULT;
-+
-+exit:
-+	ret = kdbus_args_clear(&args, ret);
-+	if (ret < 0) {
-+		if (c) {
-+			kdbus_conn_disconnect(c, false);
-+			kdbus_conn_unref(c);
-+		}
-+		return ERR_PTR(ret);
-+	}
-+	return c;
-+}
-+
-+/**
-+ * kdbus_cmd_byebye_unlocked() - handle KDBUS_CMD_BYEBYE
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * The caller must not hold any active reference to @conn or this will deadlock.
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_byebye_unlocked(struct kdbus_conn *conn, void __user *argp)
-+{
-+	struct kdbus_cmd *cmd;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	if (!kdbus_conn_is_ordinary(conn))
-+		return -EOPNOTSUPP;
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	ret = kdbus_conn_disconnect(conn, true);
-+	return kdbus_args_clear(&args, ret);
-+}
-+
-+/**
-+ * kdbus_cmd_conn_info() - handle KDBUS_CMD_CONN_INFO
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_conn_info(struct kdbus_conn *conn, void __user *argp)
-+{
-+	struct kdbus_meta_conn *conn_meta = NULL;
-+	struct kdbus_pool_slice *slice = NULL;
-+	struct kdbus_name_entry *entry = NULL;
-+	struct kdbus_name_owner *owner = NULL;
-+	struct kdbus_conn *owner_conn = NULL;
-+	struct kdbus_item *meta_items = NULL;
-+	struct kdbus_info info = {};
-+	struct kdbus_cmd_info *cmd;
-+	struct kdbus_bus *bus = conn->ep->bus;
-+	struct kvec kvec[3];
-+	size_t meta_size, cnt = 0;
-+	const char *name;
-+	u64 attach_flags, size = 0;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_NAME },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	/* registry must be held throughout lookup *and* collecting data */
-+	down_read(&bus->name_registry->rwlock);
-+
-+	ret = kdbus_sanitize_attach_flags(cmd->attach_flags, &attach_flags);
-+	if (ret < 0)
-+		goto exit;
-+
-+	name = argv[1].item ? argv[1].item->str : NULL;
-+
-+	if (name) {
-+		entry = kdbus_name_lookup_unlocked(bus->name_registry, name);
-+		if (entry)
-+			owner = kdbus_name_get_owner(entry);
-+		if (!owner ||
-+		    !kdbus_conn_policy_see_name(conn, current_cred(), name) ||
-+		    (cmd->id != 0 && owner->conn->id != cmd->id)) {
-+			/* pretend a name doesn't exist if you cannot see it */
-+			ret = -ESRCH;
-+			goto exit;
-+		}
-+
-+		owner_conn = kdbus_conn_ref(owner->conn);
-+	} else if (cmd->id > 0) {
-+		owner_conn = kdbus_bus_find_conn_by_id(bus, cmd->id);
-+		if (!owner_conn || !kdbus_conn_policy_see(conn, current_cred(),
-+							  owner_conn)) {
-+			/* pretend an id doesn't exist if you cannot see it */
-+			ret = -ENXIO;
-+			goto exit;
-+		}
-+	} else {
-+		ret = -EINVAL;
-+		goto exit;
-+	}
-+
-+	attach_flags &= atomic64_read(&owner_conn->attach_flags_send);
-+
-+	conn_meta = kdbus_meta_conn_new();
-+	if (IS_ERR(conn_meta)) {
-+		ret = PTR_ERR(conn_meta);
-+		conn_meta = NULL;
-+		goto exit;
-+	}
-+
-+	ret = kdbus_meta_conn_collect(conn_meta, owner_conn, 0, attach_flags);
-+	if (ret < 0)
-+		goto exit;
-+
-+	ret = kdbus_meta_emit(owner_conn->meta_proc, owner_conn->meta_fake,
-+			      conn_meta, conn, attach_flags,
-+			      &meta_items, &meta_size);
-+	if (ret < 0)
-+		goto exit;
-+
-+	info.id = owner_conn->id;
-+	info.flags = owner_conn->flags;
-+
-+	kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &size);
-+	if (meta_size > 0) {
-+		kdbus_kvec_set(&kvec[cnt++], meta_items, meta_size, &size);
-+		cnt += !!kdbus_kvec_pad(&kvec[cnt], &size);
-+	}
-+
-+	info.size = size;
-+
-+	slice = kdbus_pool_slice_alloc(conn->pool, size, false);
-+	if (IS_ERR(slice)) {
-+		ret = PTR_ERR(slice);
-+		slice = NULL;
-+		goto exit;
-+	}
-+
-+	ret = kdbus_pool_slice_copy_kvec(slice, 0, kvec, cnt, size);
-+	if (ret < 0)
-+		goto exit;
-+
-+	kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->info_size);
-+
-+	if (kdbus_member_set_user(&cmd->offset, argp, typeof(*cmd), offset) ||
-+	    kdbus_member_set_user(&cmd->info_size, argp,
-+				  typeof(*cmd), info_size)) {
-+		ret = -EFAULT;
-+		goto exit;
-+	}
-+
-+	ret = 0;
-+
-+exit:
-+	up_read(&bus->name_registry->rwlock);
-+	kdbus_pool_slice_release(slice);
-+	kfree(meta_items);
-+	kdbus_meta_conn_unref(conn_meta);
-+	kdbus_conn_unref(owner_conn);
-+	return kdbus_args_clear(&args, ret);
-+}
-+
-+/**
-+ * kdbus_cmd_update() - handle KDBUS_CMD_UPDATE
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_update(struct kdbus_conn *conn, void __user *argp)
-+{
-+	struct kdbus_item *item_policy;
-+	u64 *item_attach_send = NULL;
-+	u64 *item_attach_recv = NULL;
-+	struct kdbus_cmd *cmd;
-+	u64 attach_send;
-+	u64 attach_recv;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_ATTACH_FLAGS_SEND },
-+		{ .type = KDBUS_ITEM_ATTACH_FLAGS_RECV },
-+		{ .type = KDBUS_ITEM_NAME, .multiple = true },
-+		{ .type = KDBUS_ITEM_POLICY_ACCESS, .multiple = true },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	item_attach_send = argv[1].item ? &argv[1].item->data64[0] : NULL;
-+	item_attach_recv = argv[2].item ? &argv[2].item->data64[0] : NULL;
-+	item_policy = argv[3].item ? : argv[4].item;
-+
-+	if (item_attach_send) {
-+		if (!kdbus_conn_is_ordinary(conn) &&
-+		    !kdbus_conn_is_monitor(conn)) {
-+			ret = -EOPNOTSUPP;
-+			goto exit;
-+		}
-+
-+		ret = kdbus_sanitize_attach_flags(*item_attach_send,
-+						  &attach_send);
-+		if (ret < 0)
-+			goto exit;
-+	}
-+
-+	if (item_attach_recv) {
-+		if (!kdbus_conn_is_ordinary(conn) &&
-+		    !kdbus_conn_is_monitor(conn) &&
-+		    !kdbus_conn_is_activator(conn)) {
-+			ret = -EOPNOTSUPP;
-+			goto exit;
-+		}
-+
-+		ret = kdbus_sanitize_attach_flags(*item_attach_recv,
-+						  &attach_recv);
-+		if (ret < 0)
-+			goto exit;
-+	}
-+
-+	if (item_policy && !kdbus_conn_is_policy_holder(conn)) {
-+		ret = -EOPNOTSUPP;
-+		goto exit;
-+	}
-+
-+	/* now that we verified the input, update the connection */
-+
-+	if (item_policy) {
-+		ret = kdbus_policy_set(&conn->ep->bus->policy_db, cmd->items,
-+				       KDBUS_ITEMS_SIZE(cmd, items),
-+				       1, true, conn);
-+		if (ret < 0)
-+			goto exit;
-+	}
-+
-+	if (item_attach_send)
-+		atomic64_set(&conn->attach_flags_send, attach_send);
-+
-+	if (item_attach_recv)
-+		atomic64_set(&conn->attach_flags_recv, attach_recv);
-+
-+exit:
-+	return kdbus_args_clear(&args, ret);
-+}
-+
-+/**
-+ * kdbus_cmd_send() - handle KDBUS_CMD_SEND
-+ * @conn:		connection to operate on
-+ * @f:			file this command was called on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_send(struct kdbus_conn *conn, struct file *f, void __user *argp)
-+{
-+	struct kdbus_cmd_send *cmd;
-+	struct kdbus_staging *staging = NULL;
-+	struct kdbus_msg *msg = NULL;
-+	struct file *cancel_fd = NULL;
-+	int ret, ret2;
-+
-+	/* command arguments */
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_CANCEL_FD },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
-+				 KDBUS_SEND_SYNC_REPLY,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	/* message arguments */
-+	struct kdbus_arg msg_argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_PAYLOAD_VEC, .multiple = true },
-+		{ .type = KDBUS_ITEM_PAYLOAD_MEMFD, .multiple = true },
-+		{ .type = KDBUS_ITEM_FDS },
-+		{ .type = KDBUS_ITEM_BLOOM_FILTER },
-+		{ .type = KDBUS_ITEM_DST_NAME },
-+	};
-+	struct kdbus_args msg_args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
-+				 KDBUS_MSG_EXPECT_REPLY |
-+				 KDBUS_MSG_NO_AUTO_START |
-+				 KDBUS_MSG_SIGNAL,
-+		.argv = msg_argv,
-+		.argc = ARRAY_SIZE(msg_argv),
-+	};
-+
-+	if (!kdbus_conn_is_ordinary(conn))
-+		return -EOPNOTSUPP;
-+
-+	/* make sure to parse both, @cmd and @msg on negotiation */
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret < 0)
-+		goto exit;
-+	else if (ret > 0 && !cmd->msg_address) /* negotiation without msg */
-+		goto exit;
-+
-+	ret2 = kdbus_args_parse_msg(&msg_args, KDBUS_PTR(cmd->msg_address),
-+				    &msg);
-+	if (ret2 < 0) { /* cannot parse message */
-+		ret = ret2;
-+		goto exit;
-+	} else if (ret2 > 0 && !ret) { /* msg-negot implies cmd-negot */
-+		ret = -EINVAL;
-+		goto exit;
-+	} else if (ret > 0) { /* negotiation */
-+		goto exit;
-+	}
-+
-+	/* here we parsed both, @cmd and @msg, and neither wants negotiation */
-+
-+	cmd->reply.return_flags = 0;
-+	kdbus_pool_publish_empty(conn->pool, &cmd->reply.offset,
-+				 &cmd->reply.msg_size);
-+
-+	if (argv[1].item) {
-+		cancel_fd = fget(argv[1].item->fds[0]);
-+		if (!cancel_fd) {
-+			ret = -EBADF;
-+			goto exit;
-+		}
-+
-+		if (!cancel_fd->f_op->poll) {
-+			ret = -EINVAL;
-+			goto exit;
-+		}
-+	}
-+
-+	/* patch-in the source of this message */
-+	if (msg->src_id > 0 && msg->src_id != conn->id) {
-+		ret = -EINVAL;
-+		goto exit;
-+	}
-+	msg->src_id = conn->id;
-+
-+	staging = kdbus_staging_new_user(conn->ep->bus, cmd, msg);
-+	if (IS_ERR(staging)) {
-+		ret = PTR_ERR(staging);
-+		staging = NULL;
-+		goto exit;
-+	}
-+
-+	if (msg->dst_id == KDBUS_DST_ID_BROADCAST) {
-+		down_read(&conn->ep->bus->name_registry->rwlock);
-+		kdbus_bus_broadcast(conn->ep->bus, conn, staging);
-+		up_read(&conn->ep->bus->name_registry->rwlock);
-+	} else if (cmd->flags & KDBUS_SEND_SYNC_REPLY) {
-+		struct kdbus_reply *r;
-+		ktime_t exp;
-+
-+		exp = ns_to_ktime(msg->timeout_ns);
-+		r = kdbus_conn_call(conn, staging, exp);
-+		if (IS_ERR(r)) {
-+			ret = PTR_ERR(r);
-+			goto exit;
-+		}
-+
-+		ret = kdbus_conn_wait_reply(conn, cmd, f, cancel_fd, r, exp);
-+		kdbus_reply_unref(r);
-+		if (ret < 0)
-+			goto exit;
-+	} else if ((msg->flags & KDBUS_MSG_EXPECT_REPLY) ||
-+		   msg->cookie_reply == 0) {
-+		ret = kdbus_conn_unicast(conn, staging);
-+		if (ret < 0)
-+			goto exit;
-+	} else {
-+		ret = kdbus_conn_reply(conn, staging);
-+		if (ret < 0)
-+			goto exit;
-+	}
-+
-+	if (kdbus_member_set_user(&cmd->reply, argp, typeof(*cmd), reply))
-+		ret = -EFAULT;
-+
-+exit:
-+	if (cancel_fd)
-+		fput(cancel_fd);
-+	kdbus_staging_free(staging);
-+	ret = kdbus_args_clear(&msg_args, ret);
-+	return kdbus_args_clear(&args, ret);
-+}
-+
-+/**
-+ * kdbus_cmd_recv() - handle KDBUS_CMD_RECV
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_recv(struct kdbus_conn *conn, void __user *argp)
-+{
-+	struct kdbus_queue_entry *entry;
-+	struct kdbus_cmd_recv *cmd;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
-+				 KDBUS_RECV_PEEK |
-+				 KDBUS_RECV_DROP |
-+				 KDBUS_RECV_USE_PRIORITY,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	if (!kdbus_conn_is_ordinary(conn) &&
-+	    !kdbus_conn_is_monitor(conn) &&
-+	    !kdbus_conn_is_activator(conn))
-+		return -EOPNOTSUPP;
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	cmd->dropped_msgs = 0;
-+	cmd->msg.return_flags = 0;
-+	kdbus_pool_publish_empty(conn->pool, &cmd->msg.offset,
-+				 &cmd->msg.msg_size);
-+
-+	/* DROP+priority is not realiably, so prevent it */
-+	if ((cmd->flags & KDBUS_RECV_DROP) &&
-+	    (cmd->flags & KDBUS_RECV_USE_PRIORITY)) {
-+		ret = -EINVAL;
-+		goto exit;
-+	}
-+
-+	mutex_lock(&conn->lock);
-+
-+	entry = kdbus_queue_peek(&conn->queue, cmd->priority,
-+				 cmd->flags & KDBUS_RECV_USE_PRIORITY);
-+	if (!entry) {
-+		mutex_unlock(&conn->lock);
-+		ret = -EAGAIN;
-+	} else if (cmd->flags & KDBUS_RECV_DROP) {
-+		struct kdbus_reply *reply = kdbus_reply_ref(entry->reply);
-+
-+		kdbus_queue_entry_free(entry);
-+
-+		mutex_unlock(&conn->lock);
-+
-+		if (reply) {
-+			mutex_lock(&reply->reply_dst->lock);
-+			if (!list_empty(&reply->entry)) {
-+				kdbus_reply_unlink(reply);
-+				if (reply->sync)
-+					kdbus_sync_reply_wakeup(reply, -EPIPE);
-+				else
-+					kdbus_notify_reply_dead(conn->ep->bus,
-+							reply->reply_dst->id,
-+							reply->cookie);
-+			}
-+			mutex_unlock(&reply->reply_dst->lock);
-+			kdbus_notify_flush(conn->ep->bus);
-+		}
-+
-+		kdbus_reply_unref(reply);
-+	} else {
-+		bool install_fds;
-+
-+		/*
-+		 * PEEK just returns the location of the next message. Do not
-+		 * install FDs nor memfds nor anything else. The only
-+		 * information of interest should be the message header and
-+		 * metadata. Any FD numbers in the payload is undefined for
-+		 * PEEK'ed messages.
-+		 * Also make sure to never install fds into a connection that
-+		 * has refused to receive any. Ordinary connections will not get
-+		 * messages with FDs queued (the receiver will get -ECOMM), but
-+		 * eavesdroppers might.
-+		 */
-+		install_fds = (conn->flags & KDBUS_HELLO_ACCEPT_FD) &&
-+			      !(cmd->flags & KDBUS_RECV_PEEK);
-+
-+		ret = kdbus_queue_entry_install(entry,
-+						&cmd->msg.return_flags,
-+						install_fds);
-+		if (ret < 0) {
-+			mutex_unlock(&conn->lock);
-+			goto exit;
-+		}
-+
-+		kdbus_pool_slice_publish(entry->slice, &cmd->msg.offset,
-+					 &cmd->msg.msg_size);
-+
-+		if (!(cmd->flags & KDBUS_RECV_PEEK))
-+			kdbus_queue_entry_free(entry);
-+
-+		mutex_unlock(&conn->lock);
-+	}
-+
-+	cmd->dropped_msgs = atomic_xchg(&conn->lost_count, 0);
-+	if (cmd->dropped_msgs > 0)
-+		cmd->return_flags |= KDBUS_RECV_RETURN_DROPPED_MSGS;
-+
-+	if (kdbus_member_set_user(&cmd->msg, argp, typeof(*cmd), msg) ||
-+	    kdbus_member_set_user(&cmd->dropped_msgs, argp, typeof(*cmd),
-+				  dropped_msgs))
-+		ret = -EFAULT;
-+
-+exit:
-+	return kdbus_args_clear(&args, ret);
-+}
-+
-+/**
-+ * kdbus_cmd_free() - handle KDBUS_CMD_FREE
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_free(struct kdbus_conn *conn, void __user *argp)
-+{
-+	struct kdbus_cmd_free *cmd;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	if (!kdbus_conn_is_ordinary(conn) &&
-+	    !kdbus_conn_is_monitor(conn) &&
-+	    !kdbus_conn_is_activator(conn))
-+		return -EOPNOTSUPP;
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	ret = kdbus_pool_release_offset(conn->pool, cmd->offset);
-+
-+	return kdbus_args_clear(&args, ret);
-+}
-diff --git a/ipc/kdbus/connection.h b/ipc/kdbus/connection.h
-new file mode 100644
-index 0000000..1ad0820
---- /dev/null
-+++ b/ipc/kdbus/connection.h
-@@ -0,0 +1,260 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_CONNECTION_H
-+#define __KDBUS_CONNECTION_H
-+
-+#include <linux/atomic.h>
-+#include <linux/kref.h>
-+#include <linux/lockdep.h>
-+#include <linux/path.h>
-+
-+#include "limits.h"
-+#include "metadata.h"
-+#include "pool.h"
-+#include "queue.h"
-+#include "util.h"
-+
-+#define KDBUS_HELLO_SPECIAL_CONN	(KDBUS_HELLO_ACTIVATOR | \
-+					 KDBUS_HELLO_POLICY_HOLDER | \
-+					 KDBUS_HELLO_MONITOR)
-+
-+struct kdbus_name_entry;
-+struct kdbus_quota;
-+struct kdbus_staging;
-+
-+/**
-+ * struct kdbus_conn - connection to a bus
-+ * @kref:		Reference count
-+ * @active:		Active references to the connection
-+ * @id:			Connection ID
-+ * @flags:		KDBUS_HELLO_* flags
-+ * @attach_flags_send:	KDBUS_ATTACH_* flags for sending
-+ * @attach_flags_recv:	KDBUS_ATTACH_* flags for receiving
-+ * @description:	Human-readable connection description, used for
-+ *			debugging. This field is only set when the
-+ *			connection is created.
-+ * @ep:			The endpoint this connection belongs to
-+ * @lock:		Connection data lock
-+ * @hentry:		Entry in ID <-> connection map
-+ * @ep_entry:		Entry in endpoint
-+ * @monitor_entry:	Entry in monitor, if the connection is a monitor
-+ * @reply_list:		List of connections this connection should
-+ *			reply to
-+ * @work:		Delayed work to handle timeouts
-+ *			activator for
-+ * @match_db:		Subscription filter to broadcast messages
-+ * @meta_proc:		Process metadata of connection creator, or NULL
-+ * @meta_fake:		Faked metadata, or NULL
-+ * @pool:		The user's buffer to receive messages
-+ * @user:		Owner of the connection
-+ * @cred:		The credentials of the connection at creation time
-+ * @pid:		Pid at creation time
-+ * @root_path:		Root path at creation time
-+ * @request_count:	Number of pending requests issued by this
-+ *			connection that are waiting for replies from
-+ *			other peers
-+ * @lost_count:		Number of lost broadcast messages
-+ * @wait:		Wake up this endpoint
-+ * @queue:		The message queue associated with this connection
-+ * @quota:		Array of per-user quota indexed by user->id
-+ * @n_quota:		Number of elements in quota array
-+ * @names_list:		List of well-known names
-+ * @name_count:		Number of owned well-known names
-+ * @privileged:		Whether this connection is privileged on the domain
-+ * @owner:		Owned by the same user as the bus owner
-+ */
-+struct kdbus_conn {
-+	struct kref kref;
-+	atomic_t active;
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+	struct lockdep_map dep_map;
-+#endif
-+	u64 id;
-+	u64 flags;
-+	atomic64_t attach_flags_send;
-+	atomic64_t attach_flags_recv;
-+	const char *description;
-+	struct kdbus_ep *ep;
-+	struct mutex lock;
-+	struct hlist_node hentry;
-+	struct list_head ep_entry;
-+	struct list_head monitor_entry;
-+	struct list_head reply_list;
-+	struct delayed_work work;
-+	struct kdbus_match_db *match_db;
-+	struct kdbus_meta_proc *meta_proc;
-+	struct kdbus_meta_fake *meta_fake;
-+	struct kdbus_pool *pool;
-+	struct kdbus_user *user;
-+	const struct cred *cred;
-+	struct pid *pid;
-+	struct path root_path;
-+	atomic_t request_count;
-+	atomic_t lost_count;
-+	wait_queue_head_t wait;
-+	struct kdbus_queue queue;
-+
-+	struct kdbus_quota *quota;
-+	unsigned int n_quota;
-+
-+	/* protected by registry->rwlock */
-+	struct list_head names_list;
-+	unsigned int name_count;
-+
-+	bool privileged:1;
-+	bool owner:1;
-+};
-+
-+struct kdbus_conn *kdbus_conn_ref(struct kdbus_conn *conn);
-+struct kdbus_conn *kdbus_conn_unref(struct kdbus_conn *conn);
-+bool kdbus_conn_active(const struct kdbus_conn *conn);
-+int kdbus_conn_acquire(struct kdbus_conn *conn);
-+void kdbus_conn_release(struct kdbus_conn *conn);
-+int kdbus_conn_disconnect(struct kdbus_conn *conn, bool ensure_queue_empty);
-+bool kdbus_conn_has_name(struct kdbus_conn *conn, const char *name);
-+int kdbus_conn_quota_inc(struct kdbus_conn *c, struct kdbus_user *u,
-+			 size_t memory, size_t fds);
-+void kdbus_conn_quota_dec(struct kdbus_conn *c, struct kdbus_user *u,
-+			  size_t memory, size_t fds);
-+void kdbus_conn_lost_message(struct kdbus_conn *c);
-+int kdbus_conn_entry_insert(struct kdbus_conn *conn_src,
-+			    struct kdbus_conn *conn_dst,
-+			    struct kdbus_staging *staging,
-+			    struct kdbus_reply *reply,
-+			    const struct kdbus_name_entry *name);
-+void kdbus_conn_move_messages(struct kdbus_conn *conn_dst,
-+			      struct kdbus_conn *conn_src,
-+			      u64 name_id);
-+
-+/* policy */
-+bool kdbus_conn_policy_own_name(struct kdbus_conn *conn,
-+				const struct cred *conn_creds,
-+				const char *name);
-+bool kdbus_conn_policy_talk(struct kdbus_conn *conn,
-+			    const struct cred *conn_creds,
-+			    struct kdbus_conn *to);
-+bool kdbus_conn_policy_see_name_unlocked(struct kdbus_conn *conn,
-+					 const struct cred *curr_creds,
-+					 const char *name);
-+bool kdbus_conn_policy_see_notification(struct kdbus_conn *conn,
-+					const struct cred *curr_creds,
-+					const struct kdbus_msg *msg);
-+
-+/* command dispatcher */
-+struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, struct file *file,
-+				   void __user *argp);
-+int kdbus_cmd_byebye_unlocked(struct kdbus_conn *conn, void __user *argp);
-+int kdbus_cmd_conn_info(struct kdbus_conn *conn, void __user *argp);
-+int kdbus_cmd_update(struct kdbus_conn *conn, void __user *argp);
-+int kdbus_cmd_send(struct kdbus_conn *conn, struct file *f, void __user *argp);
-+int kdbus_cmd_recv(struct kdbus_conn *conn, void __user *argp);
-+int kdbus_cmd_free(struct kdbus_conn *conn, void __user *argp);
-+
-+/**
-+ * kdbus_conn_is_ordinary() - Check if connection is ordinary
-+ * @conn:		The connection to check
-+ *
-+ * Return: Non-zero if the connection is an ordinary connection
-+ */
-+static inline int kdbus_conn_is_ordinary(const struct kdbus_conn *conn)
-+{
-+	return !(conn->flags & KDBUS_HELLO_SPECIAL_CONN);
-+}
-+
-+/**
-+ * kdbus_conn_is_activator() - Check if connection is an activator
-+ * @conn:		The connection to check
-+ *
-+ * Return: Non-zero if the connection is an activator
-+ */
-+static inline int kdbus_conn_is_activator(const struct kdbus_conn *conn)
-+{
-+	return conn->flags & KDBUS_HELLO_ACTIVATOR;
-+}
-+
-+/**
-+ * kdbus_conn_is_policy_holder() - Check if connection is a policy holder
-+ * @conn:		The connection to check
-+ *
-+ * Return: Non-zero if the connection is a policy holder
-+ */
-+static inline int kdbus_conn_is_policy_holder(const struct kdbus_conn *conn)
-+{
-+	return conn->flags & KDBUS_HELLO_POLICY_HOLDER;
-+}
-+
-+/**
-+ * kdbus_conn_is_monitor() - Check if connection is a monitor
-+ * @conn:		The connection to check
-+ *
-+ * Return: Non-zero if the connection is a monitor
-+ */
-+static inline int kdbus_conn_is_monitor(const struct kdbus_conn *conn)
-+{
-+	return conn->flags & KDBUS_HELLO_MONITOR;
-+}
-+
-+/**
-+ * kdbus_conn_lock2() - Lock two connections
-+ * @a:		connection A to lock or NULL
-+ * @b:		connection B to lock or NULL
-+ *
-+ * Lock two connections at once. As we need to have a stable locking order, we
-+ * always lock the connection with lower memory address first.
-+ */
-+static inline void kdbus_conn_lock2(struct kdbus_conn *a, struct kdbus_conn *b)
-+{
-+	if (a < b) {
-+		if (a)
-+			mutex_lock(&a->lock);
-+		if (b && b != a)
-+			mutex_lock_nested(&b->lock, !!a);
-+	} else {
-+		if (b)
-+			mutex_lock(&b->lock);
-+		if (a && a != b)
-+			mutex_lock_nested(&a->lock, !!b);
-+	}
-+}
-+
-+/**
-+ * kdbus_conn_unlock2() - Unlock two connections
-+ * @a:		connection A to unlock or NULL
-+ * @b:		connection B to unlock or NULL
-+ *
-+ * Unlock two connections at once. See kdbus_conn_lock2().
-+ */
-+static inline void kdbus_conn_unlock2(struct kdbus_conn *a,
-+				      struct kdbus_conn *b)
-+{
-+	if (a)
-+		mutex_unlock(&a->lock);
-+	if (b && b != a)
-+		mutex_unlock(&b->lock);
-+}
-+
-+/**
-+ * kdbus_conn_assert_active() - lockdep assert on active lock
-+ * @conn:	connection that shall be active
-+ *
-+ * This verifies via lockdep that the caller holds an active reference to the
-+ * given connection.
-+ */
-+static inline void kdbus_conn_assert_active(struct kdbus_conn *conn)
-+{
-+	lockdep_assert_held(conn);
-+}
-+
-+#endif
-diff --git a/ipc/kdbus/domain.c b/ipc/kdbus/domain.c
-new file mode 100644
-index 0000000..ac9f760
---- /dev/null
-+++ b/ipc/kdbus/domain.c
-@@ -0,0 +1,296 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/fs.h>
-+#include <linux/idr.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+
-+#include "bus.h"
-+#include "domain.h"
-+#include "handle.h"
-+#include "item.h"
-+#include "limits.h"
-+#include "util.h"
-+
-+static void kdbus_domain_control_free(struct kdbus_node *node)
-+{
-+	kfree(node);
-+}
-+
-+static struct kdbus_node *kdbus_domain_control_new(struct kdbus_domain *domain,
-+						   unsigned int access)
-+{
-+	struct kdbus_node *node;
-+	int ret;
-+
-+	node = kzalloc(sizeof(*node), GFP_KERNEL);
-+	if (!node)
-+		return ERR_PTR(-ENOMEM);
-+
-+	kdbus_node_init(node, KDBUS_NODE_CONTROL);
-+
-+	node->free_cb = kdbus_domain_control_free;
-+	node->mode = domain->node.mode;
-+	node->mode = S_IRUSR | S_IWUSR;
-+	if (access & (KDBUS_MAKE_ACCESS_GROUP | KDBUS_MAKE_ACCESS_WORLD))
-+		node->mode |= S_IRGRP | S_IWGRP;
-+	if (access & KDBUS_MAKE_ACCESS_WORLD)
-+		node->mode |= S_IROTH | S_IWOTH;
-+
-+	ret = kdbus_node_link(node, &domain->node, "control");
-+	if (ret < 0)
-+		goto exit_free;
-+
-+	return node;
-+
-+exit_free:
-+	kdbus_node_deactivate(node);
-+	kdbus_node_unref(node);
-+	return ERR_PTR(ret);
-+}
-+
-+static void kdbus_domain_free(struct kdbus_node *node)
-+{
-+	struct kdbus_domain *domain =
-+		container_of(node, struct kdbus_domain, node);
-+
-+	put_user_ns(domain->user_namespace);
-+	ida_destroy(&domain->user_ida);
-+	idr_destroy(&domain->user_idr);
-+	kfree(domain);
-+}
-+
-+/**
-+ * kdbus_domain_new() - create a new domain
-+ * @access:		The access mode for this node (KDBUS_MAKE_ACCESS_*)
-+ *
-+ * Return: a new kdbus_domain on success, ERR_PTR on failure
-+ */
-+struct kdbus_domain *kdbus_domain_new(unsigned int access)
-+{
-+	struct kdbus_domain *d;
-+	int ret;
-+
-+	d = kzalloc(sizeof(*d), GFP_KERNEL);
-+	if (!d)
-+		return ERR_PTR(-ENOMEM);
-+
-+	kdbus_node_init(&d->node, KDBUS_NODE_DOMAIN);
-+
-+	d->node.free_cb = kdbus_domain_free;
-+	d->node.mode = S_IRUSR | S_IXUSR;
-+	if (access & (KDBUS_MAKE_ACCESS_GROUP | KDBUS_MAKE_ACCESS_WORLD))
-+		d->node.mode |= S_IRGRP | S_IXGRP;
-+	if (access & KDBUS_MAKE_ACCESS_WORLD)
-+		d->node.mode |= S_IROTH | S_IXOTH;
-+
-+	mutex_init(&d->lock);
-+	idr_init(&d->user_idr);
-+	ida_init(&d->user_ida);
-+
-+	/* Pin user namespace so we can guarantee domain-unique bus * names. */
-+	d->user_namespace = get_user_ns(current_user_ns());
-+
-+	ret = kdbus_node_link(&d->node, NULL, NULL);
-+	if (ret < 0)
-+		goto exit_unref;
-+
-+	return d;
-+
-+exit_unref:
-+	kdbus_node_deactivate(&d->node);
-+	kdbus_node_unref(&d->node);
-+	return ERR_PTR(ret);
-+}
-+
-+/**
-+ * kdbus_domain_ref() - take a domain reference
-+ * @domain:		Domain
-+ *
-+ * Return: the domain itself
-+ */
-+struct kdbus_domain *kdbus_domain_ref(struct kdbus_domain *domain)
-+{
-+	if (domain)
-+		kdbus_node_ref(&domain->node);
-+	return domain;
-+}
-+
-+/**
-+ * kdbus_domain_unref() - drop a domain reference
-+ * @domain:		Domain
-+ *
-+ * When the last reference is dropped, the domain internal structure
-+ * is freed.
-+ *
-+ * Return: NULL
-+ */
-+struct kdbus_domain *kdbus_domain_unref(struct kdbus_domain *domain)
-+{
-+	if (domain)
-+		kdbus_node_unref(&domain->node);
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_domain_populate() - populate static domain nodes
-+ * @domain:	domain to populate
-+ * @access:	KDBUS_MAKE_ACCESS_* access restrictions for new nodes
-+ *
-+ * Allocate and activate static sub-nodes of the given domain. This will fail if
-+ * you call it on a non-active node or if the domain was already populated.
-+ *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_domain_populate(struct kdbus_domain *domain, unsigned int access)
-+{
-+	struct kdbus_node *control;
-+
-+	/*
-+	 * Create a control-node for this domain. We drop our own reference
-+	 * immediately, effectively causing the node to be deactivated and
-+	 * released when the parent domain is.
-+	 */
-+	control = kdbus_domain_control_new(domain, access);
-+	if (IS_ERR(control))
-+		return PTR_ERR(control);
-+
-+	kdbus_node_activate(control);
-+	kdbus_node_unref(control);
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_user_lookup() - lookup a kdbus_user object
-+ * @domain:		domain of the user
-+ * @uid:		uid of the user; INVALID_UID for an anon user
-+ *
-+ * Lookup the kdbus user accounting object for the given domain. If INVALID_UID
-+ * is passed, a new anonymous user is created which is private to the caller.
-+ *
-+ * Return: The user object is returned, ERR_PTR on failure.
-+ */
-+struct kdbus_user *kdbus_user_lookup(struct kdbus_domain *domain, kuid_t uid)
-+{
-+	struct kdbus_user *u = NULL, *old = NULL;
-+	int ret;
-+
-+	mutex_lock(&domain->lock);
-+
-+	if (uid_valid(uid)) {
-+		old = idr_find(&domain->user_idr, __kuid_val(uid));
-+		/*
-+		 * If the object is about to be destroyed, ignore it and
-+		 * replace the slot in the IDR later on.
-+		 */
-+		if (old && kref_get_unless_zero(&old->kref)) {
-+			mutex_unlock(&domain->lock);
-+			return old;
-+		}
-+	}
-+
-+	u = kzalloc(sizeof(*u), GFP_KERNEL);
-+	if (!u) {
-+		ret = -ENOMEM;
-+		goto exit;
-+	}
-+
-+	kref_init(&u->kref);
-+	u->domain = kdbus_domain_ref(domain);
-+	u->uid = uid;
-+	atomic_set(&u->buses, 0);
-+	atomic_set(&u->connections, 0);
-+
-+	if (uid_valid(uid)) {
-+		if (old) {
-+			idr_replace(&domain->user_idr, u, __kuid_val(uid));
-+			old->uid = INVALID_UID; /* mark old as removed */
-+		} else {
-+			ret = idr_alloc(&domain->user_idr, u, __kuid_val(uid),
-+					__kuid_val(uid) + 1, GFP_KERNEL);
-+			if (ret < 0)
-+				goto exit;
-+		}
-+	}
-+
-+	/*
-+	 * Allocate the smallest possible index for this user; used
-+	 * in arrays for accounting user quota in receiver queues.
-+	 */
-+	ret = ida_simple_get(&domain->user_ida, 1, 0, GFP_KERNEL);
-+	if (ret < 0)
-+		goto exit;
-+
-+	u->id = ret;
-+	mutex_unlock(&domain->lock);
-+	return u;
-+
-+exit:
-+	if (u) {
-+		if (uid_valid(u->uid))
-+			idr_remove(&domain->user_idr, __kuid_val(u->uid));
-+		kdbus_domain_unref(u->domain);
-+		kfree(u);
-+	}
-+	mutex_unlock(&domain->lock);
-+	return ERR_PTR(ret);
-+}
-+
-+static void __kdbus_user_free(struct kref *kref)
-+{
-+	struct kdbus_user *user = container_of(kref, struct kdbus_user, kref);
-+
-+	WARN_ON(atomic_read(&user->buses) > 0);
-+	WARN_ON(atomic_read(&user->connections) > 0);
-+
-+	mutex_lock(&user->domain->lock);
-+	ida_simple_remove(&user->domain->user_ida, user->id);
-+	if (uid_valid(user->uid))
-+		idr_remove(&user->domain->user_idr, __kuid_val(user->uid));
-+	mutex_unlock(&user->domain->lock);
-+
-+	kdbus_domain_unref(user->domain);
-+	kfree(user);
-+}
-+
-+/**
-+ * kdbus_user_ref() - take a user reference
-+ * @u:		User
-+ *
-+ * Return: @u is returned
-+ */
-+struct kdbus_user *kdbus_user_ref(struct kdbus_user *u)
-+{
-+	if (u)
-+		kref_get(&u->kref);
-+	return u;
-+}
-+
-+/**
-+ * kdbus_user_unref() - drop a user reference
-+ * @u:		User
-+ *
-+ * Return: NULL
-+ */
-+struct kdbus_user *kdbus_user_unref(struct kdbus_user *u)
-+{
-+	if (u)
-+		kref_put(&u->kref, __kdbus_user_free);
-+	return NULL;
-+}
-diff --git a/ipc/kdbus/domain.h b/ipc/kdbus/domain.h
-new file mode 100644
-index 0000000..447a2bd
---- /dev/null
-+++ b/ipc/kdbus/domain.h
-@@ -0,0 +1,77 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_DOMAIN_H
-+#define __KDBUS_DOMAIN_H
-+
-+#include <linux/fs.h>
-+#include <linux/idr.h>
-+#include <linux/kref.h>
-+#include <linux/user_namespace.h>
-+
-+#include "node.h"
-+
-+/**
-+ * struct kdbus_domain - domain for buses
-+ * @node:		Underlying API node
-+ * @lock:		Domain data lock
-+ * @last_id:		Last used object id
-+ * @user_idr:		Set of all users indexed by UID
-+ * @user_ida:		Set of all users to compute small indices
-+ * @user_namespace:	User namespace, pinned at creation time
-+ * @dentry:		Root dentry of VFS mount (don't use outside of kdbusfs)
-+ */
-+struct kdbus_domain {
-+	struct kdbus_node node;
-+	struct mutex lock;
-+	atomic64_t last_id;
-+	struct idr user_idr;
-+	struct ida user_ida;
-+	struct user_namespace *user_namespace;
-+	struct dentry *dentry;
-+};
-+
-+/**
-+ * struct kdbus_user - resource accounting for users
-+ * @kref:		Reference counter
-+ * @domain:		Domain of the user
-+ * @id:			Index of this user
-+ * @uid:		UID of the user
-+ * @buses:		Number of buses the user has created
-+ * @connections:	Number of connections the user has created
-+ */
-+struct kdbus_user {
-+	struct kref kref;
-+	struct kdbus_domain *domain;
-+	unsigned int id;
-+	kuid_t uid;
-+	atomic_t buses;
-+	atomic_t connections;
-+};
-+
-+#define kdbus_domain_from_node(_node) \
-+	container_of((_node), struct kdbus_domain, node)
-+
-+struct kdbus_domain *kdbus_domain_new(unsigned int access);
-+struct kdbus_domain *kdbus_domain_ref(struct kdbus_domain *domain);
-+struct kdbus_domain *kdbus_domain_unref(struct kdbus_domain *domain);
-+int kdbus_domain_populate(struct kdbus_domain *domain, unsigned int access);
-+
-+#define KDBUS_USER_KERNEL_ID 0 /* ID 0 is reserved for kernel accounting */
-+
-+struct kdbus_user *kdbus_user_lookup(struct kdbus_domain *domain, kuid_t uid);
-+struct kdbus_user *kdbus_user_ref(struct kdbus_user *u);
-+struct kdbus_user *kdbus_user_unref(struct kdbus_user *u);
-+
-+#endif
-diff --git a/ipc/kdbus/endpoint.c b/ipc/kdbus/endpoint.c
-new file mode 100644
-index 0000000..44e7a20
---- /dev/null
-+++ b/ipc/kdbus/endpoint.c
-@@ -0,0 +1,303 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/fs.h>
-+#include <linux/idr.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/sched.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+#include <linux/uio.h>
-+
-+#include "bus.h"
-+#include "connection.h"
-+#include "domain.h"
-+#include "endpoint.h"
-+#include "handle.h"
-+#include "item.h"
-+#include "message.h"
-+#include "policy.h"
-+
-+static void kdbus_ep_free(struct kdbus_node *node)
-+{
-+	struct kdbus_ep *ep = container_of(node, struct kdbus_ep, node);
-+
-+	WARN_ON(!list_empty(&ep->conn_list));
-+
-+	kdbus_policy_db_clear(&ep->policy_db);
-+	kdbus_bus_unref(ep->bus);
-+	kdbus_user_unref(ep->user);
-+	kfree(ep);
-+}
-+
-+static void kdbus_ep_release(struct kdbus_node *node, bool was_active)
-+{
-+	struct kdbus_ep *ep = container_of(node, struct kdbus_ep, node);
-+
-+	/* disconnect all connections to this endpoint */
-+	for (;;) {
-+		struct kdbus_conn *conn;
-+
-+		mutex_lock(&ep->lock);
-+		conn = list_first_entry_or_null(&ep->conn_list,
-+						struct kdbus_conn,
-+						ep_entry);
-+		if (!conn) {
-+			mutex_unlock(&ep->lock);
-+			break;
-+		}
-+
-+		/* take reference, release lock, disconnect without lock */
-+		kdbus_conn_ref(conn);
-+		mutex_unlock(&ep->lock);
-+
-+		kdbus_conn_disconnect(conn, false);
-+		kdbus_conn_unref(conn);
-+	}
-+}
-+
-+/**
-+ * kdbus_ep_new() - create a new endpoint
-+ * @bus:		The bus this endpoint will be created for
-+ * @name:		The name of the endpoint
-+ * @access:		The access flags for this node (KDBUS_MAKE_ACCESS_*)
-+ * @uid:		The uid of the node
-+ * @gid:		The gid of the node
-+ * @is_custom:		Whether this is a custom endpoint
-+ *
-+ * This function will create a new endpoint with the given
-+ * name and properties for a given bus.
-+ *
-+ * Return: a new kdbus_ep on success, ERR_PTR on failure.
-+ */
-+struct kdbus_ep *kdbus_ep_new(struct kdbus_bus *bus, const char *name,
-+			      unsigned int access, kuid_t uid, kgid_t gid,
-+			      bool is_custom)
-+{
-+	struct kdbus_ep *e;
-+	int ret;
-+
-+	/*
-+	 * Validate only custom endpoints names, default endpoints
-+	 * with a "bus" name are created when the bus is created
-+	 */
-+	if (is_custom) {
-+		ret = kdbus_verify_uid_prefix(name, bus->domain->user_namespace,
-+					      uid);
-+		if (ret < 0)
-+			return ERR_PTR(ret);
-+	}
-+
-+	e = kzalloc(sizeof(*e), GFP_KERNEL);
-+	if (!e)
-+		return ERR_PTR(-ENOMEM);
-+
-+	kdbus_node_init(&e->node, KDBUS_NODE_ENDPOINT);
-+
-+	e->node.free_cb = kdbus_ep_free;
-+	e->node.release_cb = kdbus_ep_release;
-+	e->node.uid = uid;
-+	e->node.gid = gid;
-+	e->node.mode = S_IRUSR | S_IWUSR;
-+	if (access & (KDBUS_MAKE_ACCESS_GROUP | KDBUS_MAKE_ACCESS_WORLD))
-+		e->node.mode |= S_IRGRP | S_IWGRP;
-+	if (access & KDBUS_MAKE_ACCESS_WORLD)
-+		e->node.mode |= S_IROTH | S_IWOTH;
-+
-+	mutex_init(&e->lock);
-+	INIT_LIST_HEAD(&e->conn_list);
-+	kdbus_policy_db_init(&e->policy_db);
-+	e->bus = kdbus_bus_ref(bus);
-+
-+	ret = kdbus_node_link(&e->node, &bus->node, name);
-+	if (ret < 0)
-+		goto exit_unref;
-+
-+	/*
-+	 * Transactions on custom endpoints are never accounted on the global
-+	 * user limits. Instead, for each custom endpoint, we create a custom,
-+	 * unique user, which all transactions are accounted on. Regardless of
-+	 * the user using that endpoint, it is always accounted on the same
-+	 * user-object. This budget is not shared with ordinary users on
-+	 * non-custom endpoints.
-+	 */
-+	if (is_custom) {
-+		e->user = kdbus_user_lookup(bus->domain, INVALID_UID);
-+		if (IS_ERR(e->user)) {
-+			ret = PTR_ERR(e->user);
-+			e->user = NULL;
-+			goto exit_unref;
-+		}
-+	}
-+
-+	return e;
-+
-+exit_unref:
-+	kdbus_node_deactivate(&e->node);
-+	kdbus_node_unref(&e->node);
-+	return ERR_PTR(ret);
-+}
-+
-+/**
-+ * kdbus_ep_ref() - increase the reference counter of a kdbus_ep
-+ * @ep:			The endpoint to reference
-+ *
-+ * Every user of an endpoint, except for its creator, must add a reference to
-+ * the kdbus_ep instance using this function.
-+ *
-+ * Return: the ep itself
-+ */
-+struct kdbus_ep *kdbus_ep_ref(struct kdbus_ep *ep)
-+{
-+	if (ep)
-+		kdbus_node_ref(&ep->node);
-+	return ep;
-+}
-+
-+/**
-+ * kdbus_ep_unref() - decrease the reference counter of a kdbus_ep
-+ * @ep:		The ep to unref
-+ *
-+ * Release a reference. If the reference count drops to 0, the ep will be
-+ * freed.
-+ *
-+ * Return: NULL
-+ */
-+struct kdbus_ep *kdbus_ep_unref(struct kdbus_ep *ep)
-+{
-+	if (ep)
-+		kdbus_node_unref(&ep->node);
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_ep_is_privileged() - check whether a file is privileged
-+ * @ep:		endpoint to operate on
-+ * @file:	file to test
-+ *
-+ * Return: True if @file is privileged in the domain of @ep.
-+ */
-+bool kdbus_ep_is_privileged(struct kdbus_ep *ep, struct file *file)
-+{
-+	return !ep->user &&
-+		file_ns_capable(file, ep->bus->domain->user_namespace,
-+				CAP_IPC_OWNER);
-+}
-+
-+/**
-+ * kdbus_ep_is_owner() - check whether a file should be treated as bus owner
-+ * @ep:		endpoint to operate on
-+ * @file:	file to test
-+ *
-+ * Return: True if @file should be treated as bus owner on @ep
-+ */
-+bool kdbus_ep_is_owner(struct kdbus_ep *ep, struct file *file)
-+{
-+	return !ep->user &&
-+		(uid_eq(file->f_cred->euid, ep->bus->node.uid) ||
-+		 kdbus_ep_is_privileged(ep, file));
-+}
-+
-+/**
-+ * kdbus_cmd_ep_make() - handle KDBUS_CMD_ENDPOINT_MAKE
-+ * @bus:		bus to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: NULL or newly created endpoint on success, ERR_PTR on failure.
-+ */
-+struct kdbus_ep *kdbus_cmd_ep_make(struct kdbus_bus *bus, void __user *argp)
-+{
-+	const char *item_make_name;
-+	struct kdbus_ep *ep = NULL;
-+	struct kdbus_cmd *cmd;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_MAKE_NAME, .mandatory = true },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
-+				 KDBUS_MAKE_ACCESS_GROUP |
-+				 KDBUS_MAKE_ACCESS_WORLD,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret < 0)
-+		return ERR_PTR(ret);
-+	if (ret > 0)
-+		return NULL;
-+
-+	item_make_name = argv[1].item->str;
-+
-+	ep = kdbus_ep_new(bus, item_make_name, cmd->flags,
-+			  current_euid(), current_egid(), true);
-+	if (IS_ERR(ep)) {
-+		ret = PTR_ERR(ep);
-+		ep = NULL;
-+		goto exit;
-+	}
-+
-+	if (!kdbus_node_activate(&ep->node)) {
-+		ret = -ESHUTDOWN;
-+		goto exit;
-+	}
-+
-+exit:
-+	ret = kdbus_args_clear(&args, ret);
-+	if (ret < 0) {
-+		if (ep) {
-+			kdbus_node_deactivate(&ep->node);
-+			kdbus_ep_unref(ep);
-+		}
-+		return ERR_PTR(ret);
-+	}
-+	return ep;
-+}
-+
-+/**
-+ * kdbus_cmd_ep_update() - handle KDBUS_CMD_ENDPOINT_UPDATE
-+ * @ep:			endpoint to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_ep_update(struct kdbus_ep *ep, void __user *argp)
-+{
-+	struct kdbus_cmd *cmd;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_NAME, .multiple = true },
-+		{ .type = KDBUS_ITEM_POLICY_ACCESS, .multiple = true },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	ret = kdbus_policy_set(&ep->policy_db, args.items, args.items_size,
-+			       0, true, ep);
-+	return kdbus_args_clear(&args, ret);
-+}
-diff --git a/ipc/kdbus/endpoint.h b/ipc/kdbus/endpoint.h
-new file mode 100644
-index 0000000..e0da59f
---- /dev/null
-+++ b/ipc/kdbus/endpoint.h
-@@ -0,0 +1,70 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_ENDPOINT_H
-+#define __KDBUS_ENDPOINT_H
-+
-+#include <linux/list.h>
-+#include <linux/mutex.h>
-+#include <linux/uidgid.h>
-+#include "node.h"
-+#include "policy.h"
-+
-+struct kdbus_bus;
-+struct kdbus_user;
-+
-+/**
-+ * struct kdbus_ep - endpoint to access a bus
-+ * @node:		The kdbus node
-+ * @lock:		Endpoint data lock
-+ * @bus:		Bus behind this endpoint
-+ * @user:		Custom enpoints account against an anonymous user
-+ * @policy_db:		Uploaded policy
-+ * @conn_list:		Connections of this endpoint
-+ *
-+ * An endpoint offers access to a bus; the default endpoint node name is "bus".
-+ * Additional custom endpoints to the same bus can be created and they can
-+ * carry their own policies/filters.
-+ */
-+struct kdbus_ep {
-+	struct kdbus_node node;
-+	struct mutex lock;
-+
-+	/* static */
-+	struct kdbus_bus *bus;
-+	struct kdbus_user *user;
-+
-+	/* protected by own locks */
-+	struct kdbus_policy_db policy_db;
-+
-+	/* protected by ep->lock */
-+	struct list_head conn_list;
-+};
-+
-+#define kdbus_ep_from_node(_node) \
-+	container_of((_node), struct kdbus_ep, node)
-+
-+struct kdbus_ep *kdbus_ep_new(struct kdbus_bus *bus, const char *name,
-+			      unsigned int access, kuid_t uid, kgid_t gid,
-+			      bool policy);
-+struct kdbus_ep *kdbus_ep_ref(struct kdbus_ep *ep);
-+struct kdbus_ep *kdbus_ep_unref(struct kdbus_ep *ep);
-+
-+bool kdbus_ep_is_privileged(struct kdbus_ep *ep, struct file *file);
-+bool kdbus_ep_is_owner(struct kdbus_ep *ep, struct file *file);
-+
-+struct kdbus_ep *kdbus_cmd_ep_make(struct kdbus_bus *bus, void __user *argp);
-+int kdbus_cmd_ep_update(struct kdbus_ep *ep, void __user *argp);
-+
-+#endif
-diff --git a/ipc/kdbus/fs.c b/ipc/kdbus/fs.c
-new file mode 100644
-index 0000000..09c4809
---- /dev/null
-+++ b/ipc/kdbus/fs.c
-@@ -0,0 +1,508 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/dcache.h>
-+#include <linux/fs.h>
-+#include <linux/fsnotify.h>
-+#include <linux/init.h>
-+#include <linux/ipc_namespace.h>
-+#include <linux/magic.h>
-+#include <linux/module.h>
-+#include <linux/mount.h>
-+#include <linux/mutex.h>
-+#include <linux/namei.h>
-+#include <linux/pagemap.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+
-+#include "bus.h"
-+#include "domain.h"
-+#include "endpoint.h"
-+#include "fs.h"
-+#include "handle.h"
-+#include "node.h"
-+
-+#define kdbus_node_from_dentry(_dentry) \
-+	((struct kdbus_node *)(_dentry)->d_fsdata)
-+
-+static struct inode *fs_inode_get(struct super_block *sb,
-+				  struct kdbus_node *node);
-+
-+/*
-+ * Directory Management
-+ */
-+
-+static inline unsigned char kdbus_dt_type(struct kdbus_node *node)
-+{
-+	switch (node->type) {
-+	case KDBUS_NODE_DOMAIN:
-+	case KDBUS_NODE_BUS:
-+		return DT_DIR;
-+	case KDBUS_NODE_CONTROL:
-+	case KDBUS_NODE_ENDPOINT:
-+		return DT_REG;
-+	}
-+
-+	return DT_UNKNOWN;
-+}
-+
-+static int fs_dir_fop_iterate(struct file *file, struct dir_context *ctx)
-+{
-+	struct dentry *dentry = file->f_path.dentry;
-+	struct kdbus_node *parent = kdbus_node_from_dentry(dentry);
-+	struct kdbus_node *old, *next = file->private_data;
-+
-+	/*
-+	 * kdbusfs directory iterator (modelled after sysfs/kernfs)
-+	 * When iterating kdbusfs directories, we iterate all children of the
-+	 * parent kdbus_node object. We use ctx->pos to store the hash of the
-+	 * child and file->private_data to store a reference to the next node
-+	 * object. If ctx->pos is not modified via llseek while you iterate a
-+	 * directory, then we use the file->private_data node pointer to
-+	 * directly access the next node in the tree.
-+	 * However, if you directly seek on the directory, we have to find the
-+	 * closest node to that position and cannot use our node pointer. This
-+	 * means iterating the rb-tree to find the closest match and start over
-+	 * from there.
-+	 * Note that hash values are not necessarily unique. Therefore, llseek
-+	 * is not guaranteed to seek to the same node that you got when you
-+	 * retrieved the position. Seeking to 0, 1, 2 and >=INT_MAX is safe,
-+	 * though. We could use the inode-number as position, but this would
-+	 * require another rb-tree for fast access. Kernfs and others already
-+	 * ignore those conflicts, so we should be fine, too.
-+	 */
-+
-+	if (!dir_emit_dots(file, ctx))
-+		return 0;
-+
-+	/* acquire @next; if deactivated, or seek detected, find next node */
-+	old = next;
-+	if (next && ctx->pos == next->hash) {
-+		if (kdbus_node_acquire(next))
-+			kdbus_node_ref(next);
-+		else
-+			next = kdbus_node_next_child(parent, next);
-+	} else {
-+		next = kdbus_node_find_closest(parent, ctx->pos);
-+	}
-+	kdbus_node_unref(old);
-+
-+	while (next) {
-+		/* emit @next */
-+		file->private_data = next;
-+		ctx->pos = next->hash;
-+
-+		kdbus_node_release(next);
-+
-+		if (!dir_emit(ctx, next->name, strlen(next->name), next->id,
-+			      kdbus_dt_type(next)))
-+			return 0;
-+
-+		/* find next node after @next */
-+		old = next;
-+		next = kdbus_node_next_child(parent, next);
-+		kdbus_node_unref(old);
-+	}
-+
-+	file->private_data = NULL;
-+	ctx->pos = INT_MAX;
-+
-+	return 0;
-+}
-+
-+static loff_t fs_dir_fop_llseek(struct file *file, loff_t offset, int whence)
-+{
-+	struct inode *inode = file_inode(file);
-+	loff_t ret;
-+
-+	/* protect f_off against fop_iterate */
-+	mutex_lock(&inode->i_mutex);
-+	ret = generic_file_llseek(file, offset, whence);
-+	mutex_unlock(&inode->i_mutex);
-+
-+	return ret;
-+}
-+
-+static int fs_dir_fop_release(struct inode *inode, struct file *file)
-+{
-+	kdbus_node_unref(file->private_data);
-+	return 0;
-+}
-+
-+static const struct file_operations fs_dir_fops = {
-+	.read		= generic_read_dir,
-+	.iterate	= fs_dir_fop_iterate,
-+	.llseek		= fs_dir_fop_llseek,
-+	.release	= fs_dir_fop_release,
-+};
-+
-+static struct dentry *fs_dir_iop_lookup(struct inode *dir,
-+					struct dentry *dentry,
-+					unsigned int flags)
-+{
-+	struct dentry *dnew = NULL;
-+	struct kdbus_node *parent;
-+	struct kdbus_node *node;
-+	struct inode *inode;
-+
-+	parent = kdbus_node_from_dentry(dentry->d_parent);
-+	if (!kdbus_node_acquire(parent))
-+		return NULL;
-+
-+	/* returns reference to _acquired_ child node */
-+	node = kdbus_node_find_child(parent, dentry->d_name.name);
-+	if (node) {
-+		dentry->d_fsdata = node;
-+		inode = fs_inode_get(dir->i_sb, node);
-+		if (IS_ERR(inode))
-+			dnew = ERR_CAST(inode);
-+		else
-+			dnew = d_splice_alias(inode, dentry);
-+
-+		kdbus_node_release(node);
-+	}
-+
-+	kdbus_node_release(parent);
-+	return dnew;
-+}
-+
-+static const struct inode_operations fs_dir_iops = {
-+	.permission	= generic_permission,
-+	.lookup		= fs_dir_iop_lookup,
-+};
-+
-+/*
-+ * Inode Management
-+ */
-+
-+static const struct inode_operations fs_inode_iops = {
-+	.permission	= generic_permission,
-+};
-+
-+static struct inode *fs_inode_get(struct super_block *sb,
-+				  struct kdbus_node *node)
-+{
-+	struct inode *inode;
-+
-+	inode = iget_locked(sb, node->id);
-+	if (!inode)
-+		return ERR_PTR(-ENOMEM);
-+	if (!(inode->i_state & I_NEW))
-+		return inode;
-+
-+	inode->i_private = kdbus_node_ref(node);
-+	inode->i_mapping->a_ops = &empty_aops;
-+	inode->i_mode = node->mode & S_IALLUGO;
-+	inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME;
-+	inode->i_uid = node->uid;
-+	inode->i_gid = node->gid;
-+
-+	switch (node->type) {
-+	case KDBUS_NODE_DOMAIN:
-+	case KDBUS_NODE_BUS:
-+		inode->i_mode |= S_IFDIR;
-+		inode->i_op = &fs_dir_iops;
-+		inode->i_fop = &fs_dir_fops;
-+		set_nlink(inode, 2);
-+		break;
-+	case KDBUS_NODE_CONTROL:
-+	case KDBUS_NODE_ENDPOINT:
-+		inode->i_mode |= S_IFREG;
-+		inode->i_op = &fs_inode_iops;
-+		inode->i_fop = &kdbus_handle_ops;
-+		break;
-+	}
-+
-+	unlock_new_inode(inode);
-+
-+	return inode;
-+}
-+
-+/*
-+ * Superblock Management
-+ */
-+
-+static int fs_super_dop_revalidate(struct dentry *dentry, unsigned int flags)
-+{
-+	struct kdbus_node *node;
-+
-+	/* Force lookup on negatives */
-+	if (!dentry->d_inode)
-+		return 0;
-+
-+	node = kdbus_node_from_dentry(dentry);
-+
-+	/* see whether the node has been removed */
-+	if (!kdbus_node_is_active(node))
-+		return 0;
-+
-+	return 1;
-+}
-+
-+static void fs_super_dop_release(struct dentry *dentry)
-+{
-+	kdbus_node_unref(dentry->d_fsdata);
-+}
-+
-+static const struct dentry_operations fs_super_dops = {
-+	.d_revalidate	= fs_super_dop_revalidate,
-+	.d_release	= fs_super_dop_release,
-+};
-+
-+static void fs_super_sop_evict_inode(struct inode *inode)
-+{
-+	struct kdbus_node *node = kdbus_node_from_inode(inode);
-+
-+	truncate_inode_pages_final(&inode->i_data);
-+	clear_inode(inode);
-+	kdbus_node_unref(node);
-+}
-+
-+static const struct super_operations fs_super_sops = {
-+	.statfs		= simple_statfs,
-+	.drop_inode	= generic_delete_inode,
-+	.evict_inode	= fs_super_sop_evict_inode,
-+};
-+
-+static int fs_super_fill(struct super_block *sb)
-+{
-+	struct kdbus_domain *domain = sb->s_fs_info;
-+	struct inode *inode;
-+	int ret;
-+
-+	sb->s_blocksize = PAGE_CACHE_SIZE;
-+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
-+	sb->s_magic = KDBUS_SUPER_MAGIC;
-+	sb->s_maxbytes = MAX_LFS_FILESIZE;
-+	sb->s_op = &fs_super_sops;
-+	sb->s_time_gran = 1;
-+
-+	inode = fs_inode_get(sb, &domain->node);
-+	if (IS_ERR(inode))
-+		return PTR_ERR(inode);
-+
-+	sb->s_root = d_make_root(inode);
-+	if (!sb->s_root) {
-+		/* d_make_root iput()s the inode on failure */
-+		return -ENOMEM;
-+	}
-+
-+	/* sb holds domain reference */
-+	sb->s_root->d_fsdata = &domain->node;
-+	sb->s_d_op = &fs_super_dops;
-+
-+	/* sb holds root reference */
-+	domain->dentry = sb->s_root;
-+
-+	if (!kdbus_node_activate(&domain->node))
-+		return -ESHUTDOWN;
-+
-+	ret = kdbus_domain_populate(domain, KDBUS_MAKE_ACCESS_WORLD);
-+	if (ret < 0)
-+		return ret;
-+
-+	sb->s_flags |= MS_ACTIVE;
-+	return 0;
-+}
-+
-+static void fs_super_kill(struct super_block *sb)
-+{
-+	struct kdbus_domain *domain = sb->s_fs_info;
-+
-+	if (domain) {
-+		kdbus_node_deactivate(&domain->node);
-+		domain->dentry = NULL;
-+	}
-+
-+	kill_anon_super(sb);
-+	kdbus_domain_unref(domain);
-+}
-+
-+static int fs_super_set(struct super_block *sb, void *data)
-+{
-+	int ret;
-+
-+	ret = set_anon_super(sb, data);
-+	if (!ret)
-+		sb->s_fs_info = data;
-+
-+	return ret;
-+}
-+
-+static struct dentry *fs_super_mount(struct file_system_type *fs_type,
-+				     int flags, const char *dev_name,
-+				     void *data)
-+{
-+	struct kdbus_domain *domain;
-+	struct super_block *sb;
-+	int ret;
-+
-+	domain = kdbus_domain_new(KDBUS_MAKE_ACCESS_WORLD);
-+	if (IS_ERR(domain))
-+		return ERR_CAST(domain);
-+
-+	sb = sget(fs_type, NULL, fs_super_set, flags, domain);
-+	if (IS_ERR(sb)) {
-+		kdbus_node_deactivate(&domain->node);
-+		kdbus_domain_unref(domain);
-+		return ERR_CAST(sb);
-+	}
-+
-+	WARN_ON(sb->s_fs_info != domain);
-+	WARN_ON(sb->s_root);
-+
-+	ret = fs_super_fill(sb);
-+	if (ret < 0) {
-+		/* calls into ->kill_sb() when done */
-+		deactivate_locked_super(sb);
-+		return ERR_PTR(ret);
-+	}
-+
-+	return dget(sb->s_root);
-+}
-+
-+static struct file_system_type fs_type = {
-+	.name		= KBUILD_MODNAME "fs",
-+	.owner		= THIS_MODULE,
-+	.mount		= fs_super_mount,
-+	.kill_sb	= fs_super_kill,
-+	.fs_flags	= FS_USERNS_MOUNT,
-+};
-+
-+/**
-+ * kdbus_fs_init() - register kdbus filesystem
-+ *
-+ * This registers a filesystem with the VFS layer. The filesystem is called
-+ * `KBUILD_MODNAME "fs"', which usually resolves to `kdbusfs'. The nameing
-+ * scheme allows to set KBUILD_MODNAME to "kdbus2" and you will get an
-+ * independent filesystem for developers.
-+ *
-+ * Each mount of the kdbusfs filesystem has an kdbus_domain attached.
-+ * Operations on this mount will only affect the attached domain. On each mount
-+ * a new domain is automatically created and used for this mount exclusively.
-+ * If you want to share a domain across multiple mounts, you need to bind-mount
-+ * it.
-+ *
-+ * Mounts of kdbusfs (with a different domain each) are unrelated to each other
-+ * and will never have any effect on any domain but their own.
-+ *
-+ * Return: 0 on success, negative error otherwise.
-+ */
-+int kdbus_fs_init(void)
-+{
-+	return register_filesystem(&fs_type);
-+}
-+
-+/**
-+ * kdbus_fs_exit() - unregister kdbus filesystem
-+ *
-+ * This does the reverse to kdbus_fs_init(). It unregisters the kdbusfs
-+ * filesystem from VFS and cleans up any allocated resources.
-+ */
-+void kdbus_fs_exit(void)
-+{
-+	unregister_filesystem(&fs_type);
-+}
-+
-+/* acquire domain of @node, making sure all ancestors are active */
-+static struct kdbus_domain *fs_acquire_domain(struct kdbus_node *node)
-+{
-+	struct kdbus_domain *domain;
-+	struct kdbus_node *iter;
-+
-+	/* caller must guarantee that @node is linked */
-+	for (iter = node; iter->parent; iter = iter->parent)
-+		if (!kdbus_node_is_active(iter->parent))
-+			return NULL;
-+
-+	/* root nodes are always domains */
-+	if (WARN_ON(iter->type != KDBUS_NODE_DOMAIN))
-+		return NULL;
-+
-+	domain = kdbus_domain_from_node(iter);
-+	if (!kdbus_node_acquire(&domain->node))
-+		return NULL;
-+
-+	return domain;
-+}
-+
-+/**
-+ * kdbus_fs_flush() - flush dcache entries of a node
-+ * @node:		Node to flush entries of
-+ *
-+ * This flushes all VFS filesystem cache entries for a node and all its
-+ * children. This should be called whenever a node is destroyed during
-+ * runtime. It will flush the cache entries so the linked objects can be
-+ * deallocated.
-+ *
-+ * This is a no-op if you call it on active nodes (they really should stay in
-+ * cache) or on nodes with deactivated parents (flushing the parent is enough).
-+ * Furthermore, there is no need to call it on nodes whose lifetime is bound to
-+ * their parents'. In those cases, the parent-flush will always also flush the
-+ * children.
-+ */
-+void kdbus_fs_flush(struct kdbus_node *node)
-+{
-+	struct dentry *dentry, *parent_dentry = NULL;
-+	struct kdbus_domain *domain;
-+	struct qstr name;
-+
-+	/* active nodes should remain in cache */
-+	if (!kdbus_node_is_deactivated(node))
-+		return;
-+
-+	/* nodes that were never linked were never instantiated */
-+	if (!node->parent)
-+		return;
-+
-+	/* acquire domain and verify all ancestors are active */
-+	domain = fs_acquire_domain(node);
-+	if (!domain)
-+		return;
-+
-+	switch (node->type) {
-+	case KDBUS_NODE_ENDPOINT:
-+		if (WARN_ON(!node->parent || !node->parent->name))
-+			goto exit;
-+
-+		name.name = node->parent->name;
-+		name.len = strlen(node->parent->name);
-+		parent_dentry = d_hash_and_lookup(domain->dentry, &name);
-+		if (IS_ERR_OR_NULL(parent_dentry))
-+			goto exit;
-+
-+		/* fallthrough */
-+	case KDBUS_NODE_BUS:
-+		if (WARN_ON(!node->name))
-+			goto exit;
-+
-+		name.name = node->name;
-+		name.len = strlen(node->name);
-+		dentry = d_hash_and_lookup(parent_dentry ? : domain->dentry,
-+					   &name);
-+		if (!IS_ERR_OR_NULL(dentry)) {
-+			d_invalidate(dentry);
-+			dput(dentry);
-+		}
-+
-+		dput(parent_dentry);
-+		break;
-+
-+	default:
-+		/* all other types are bound to their parent lifetime */
-+		break;
-+	}
-+
-+exit:
-+	kdbus_node_release(&domain->node);
-+}
-diff --git a/ipc/kdbus/fs.h b/ipc/kdbus/fs.h
-new file mode 100644
-index 0000000..62f7d6a
---- /dev/null
-+++ b/ipc/kdbus/fs.h
-@@ -0,0 +1,28 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUSFS_H
-+#define __KDBUSFS_H
-+
-+#include <linux/kernel.h>
-+
-+struct kdbus_node;
-+
-+int kdbus_fs_init(void);
-+void kdbus_fs_exit(void);
-+void kdbus_fs_flush(struct kdbus_node *node);
-+
-+#define kdbus_node_from_inode(_inode) \
-+	((struct kdbus_node *)(_inode)->i_private)
-+
-+#endif
-diff --git a/ipc/kdbus/handle.c b/ipc/kdbus/handle.c
-new file mode 100644
-index 0000000..fc60932
---- /dev/null
-+++ b/ipc/kdbus/handle.c
-@@ -0,0 +1,691 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/idr.h>
-+#include <linux/init.h>
-+#include <linux/kdev_t.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <linux/poll.h>
-+#include <linux/rwsem.h>
-+#include <linux/sched.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+#include <linux/syscalls.h>
-+
-+#include "bus.h"
-+#include "connection.h"
-+#include "endpoint.h"
-+#include "fs.h"
-+#include "handle.h"
-+#include "item.h"
-+#include "match.h"
-+#include "message.h"
-+#include "names.h"
-+#include "domain.h"
-+#include "policy.h"
-+
-+static int kdbus_args_verify(struct kdbus_args *args)
-+{
-+	struct kdbus_item *item;
-+	size_t i;
-+	int ret;
-+
-+	KDBUS_ITEMS_FOREACH(item, args->items, args->items_size) {
-+		struct kdbus_arg *arg = NULL;
-+
-+		if (!KDBUS_ITEM_VALID(item, args->items, args->items_size))
-+			return -EINVAL;
-+
-+		for (i = 0; i < args->argc; ++i)
-+			if (args->argv[i].type == item->type)
-+				break;
-+		if (i >= args->argc)
-+			return -EINVAL;
-+
-+		arg = &args->argv[i];
-+
-+		ret = kdbus_item_validate(item);
-+		if (ret < 0)
-+			return ret;
-+
-+		if (arg->item && !arg->multiple)
-+			return -EINVAL;
-+
-+		arg->item = item;
-+	}
-+
-+	if (!KDBUS_ITEMS_END(item, args->items, args->items_size))
-+		return -EINVAL;
-+
-+	return 0;
-+}
-+
-+static int kdbus_args_negotiate(struct kdbus_args *args)
-+{
-+	struct kdbus_item __user *user;
-+	struct kdbus_item *negotiation;
-+	size_t i, j, num;
-+
-+	/*
-+	 * If KDBUS_FLAG_NEGOTIATE is set, we overwrite the flags field with
-+	 * the set of supported flags. Furthermore, if an KDBUS_ITEM_NEGOTIATE
-+	 * item is passed, we iterate its payload (array of u64, each set to an
-+	 * item type) and clear all unsupported item-types to 0.
-+	 * The caller might do this recursively, if other flags or objects are
-+	 * embedded in the payload itself.
-+	 */
-+
-+	if (args->cmd->flags & KDBUS_FLAG_NEGOTIATE) {
-+		if (put_user(args->allowed_flags & ~KDBUS_FLAG_NEGOTIATE,
-+			     &args->user->flags))
-+			return -EFAULT;
-+	}
-+
-+	if (args->argc < 1 || args->argv[0].type != KDBUS_ITEM_NEGOTIATE ||
-+	    !args->argv[0].item)
-+		return 0;
-+
-+	negotiation = args->argv[0].item;
-+	user = (struct kdbus_item __user *)
-+		((u8 __user *)args->user +
-+		 ((u8 *)negotiation - (u8 *)args->cmd));
-+	num = KDBUS_ITEM_PAYLOAD_SIZE(negotiation) / sizeof(u64);
-+
-+	for (i = 0; i < num; ++i) {
-+		for (j = 0; j < args->argc; ++j)
-+			if (negotiation->data64[i] == args->argv[j].type)
-+				break;
-+
-+		if (j < args->argc)
-+			continue;
-+
-+		/* this item is not supported, clear it out */
-+		negotiation->data64[i] = 0;
-+		if (put_user(negotiation->data64[i], &user->data64[i]))
-+			return -EFAULT;
-+	}
-+
-+	return 0;
-+}
-+
-+/**
-+ * __kdbus_args_parse() - parse payload of kdbus command
-+ * @args:		object to parse data into
-+ * @is_cmd:		whether this is a command or msg payload
-+ * @argp:		user-space location of command payload to parse
-+ * @type_size:		overall size of command payload to parse
-+ * @items_offset:	offset of items array in command payload
-+ * @out:		output variable to store pointer to copied payload
-+ *
-+ * This parses the ioctl payload at user-space location @argp into @args. @args
-+ * must be pre-initialized by the caller to reflect the supported flags and
-+ * items of this command. This parser will then copy the command payload into
-+ * kernel-space, verify correctness and consistency and cache pointers to parsed
-+ * items and other data in @args.
-+ *
-+ * If this function succeeded, you must call kdbus_args_clear() to release
-+ * allocated resources before destroying @args.
-+ *
-+ * This can also be used to import kdbus_msg objects. In that case, @is_cmd must
-+ * be set to 'false' and the 'return_flags' field will not be touched (as it
-+ * doesn't exist on kdbus_msg).
-+ *
-+ * Return: On failure a negative error code is returned. Otherwise, 1 is
-+ * returned if negotiation was requested, 0 if not.
-+ */
-+int __kdbus_args_parse(struct kdbus_args *args, bool is_cmd, void __user *argp,
-+		       size_t type_size, size_t items_offset, void **out)
-+{
-+	u64 user_size;
-+	int ret, i;
-+
-+	ret = kdbus_copy_from_user(&user_size, argp, sizeof(user_size));
-+	if (ret < 0)
-+		return ret;
-+
-+	if (user_size < type_size)
-+		return -EINVAL;
-+	if (user_size > KDBUS_CMD_MAX_SIZE)
-+		return -EMSGSIZE;
-+
-+	if (user_size <= sizeof(args->cmd_buf)) {
-+		if (copy_from_user(args->cmd_buf, argp, user_size))
-+			return -EFAULT;
-+		args->cmd = (void*)args->cmd_buf;
-+	} else {
-+		args->cmd = memdup_user(argp, user_size);
-+		if (IS_ERR(args->cmd))
-+			return PTR_ERR(args->cmd);
-+	}
-+
-+	if (args->cmd->size != user_size) {
-+		ret = -EINVAL;
-+		goto error;
-+	}
-+
-+	if (is_cmd)
-+		args->cmd->return_flags = 0;
-+	args->user = argp;
-+	args->items = (void *)((u8 *)args->cmd + items_offset);
-+	args->items_size = args->cmd->size - items_offset;
-+	args->is_cmd = is_cmd;
-+
-+	if (args->cmd->flags & ~args->allowed_flags) {
-+		ret = -EINVAL;
-+		goto error;
-+	}
-+
-+	ret = kdbus_args_verify(args);
-+	if (ret < 0)
-+		goto error;
-+
-+	ret = kdbus_args_negotiate(args);
-+	if (ret < 0)
-+		goto error;
-+
-+	/* mandatory items must be given (but not on negotiation) */
-+	if (!(args->cmd->flags & KDBUS_FLAG_NEGOTIATE)) {
-+		for (i = 0; i < args->argc; ++i)
-+			if (args->argv[i].mandatory && !args->argv[i].item) {
-+				ret = -EINVAL;
-+				goto error;
-+			}
-+	}
-+
-+	*out = args->cmd;
-+	return !!(args->cmd->flags & KDBUS_FLAG_NEGOTIATE);
-+
-+error:
-+	return kdbus_args_clear(args, ret);
-+}
-+
-+/**
-+ * kdbus_args_clear() - release allocated command resources
-+ * @args:	object to release resources of
-+ * @ret:	return value of this command
-+ *
-+ * This frees all allocated resources on @args and copies the command result
-+ * flags into user-space. @ret is usually returned unchanged by this function,
-+ * so it can be used in the final 'return' statement of the command handler.
-+ *
-+ * Return: -EFAULT if return values cannot be copied into user-space, otherwise
-+ *         @ret is returned unchanged.
-+ */
-+int kdbus_args_clear(struct kdbus_args *args, int ret)
-+{
-+	if (!args)
-+		return ret;
-+
-+	if (!IS_ERR_OR_NULL(args->cmd)) {
-+		if (args->is_cmd && put_user(args->cmd->return_flags,
-+					     &args->user->return_flags))
-+			ret = -EFAULT;
-+		if (args->cmd != (void*)args->cmd_buf)
-+			kfree(args->cmd);
-+		args->cmd = NULL;
-+	}
-+
-+	return ret;
-+}
-+
-+/**
-+ * enum kdbus_handle_type - type an handle can be of
-+ * @KDBUS_HANDLE_NONE:		no type set, yet
-+ * @KDBUS_HANDLE_BUS_OWNER:	bus owner
-+ * @KDBUS_HANDLE_EP_OWNER:	endpoint owner
-+ * @KDBUS_HANDLE_CONNECTED:	endpoint connection after HELLO
-+ */
-+enum kdbus_handle_type {
-+	KDBUS_HANDLE_NONE,
-+	KDBUS_HANDLE_BUS_OWNER,
-+	KDBUS_HANDLE_EP_OWNER,
-+	KDBUS_HANDLE_CONNECTED,
-+};
-+
-+/**
-+ * struct kdbus_handle - handle to the kdbus system
-+ * @lock:		handle lock
-+ * @type:		type of this handle (KDBUS_HANDLE_*)
-+ * @bus_owner:		bus this handle owns
-+ * @ep_owner:		endpoint this handle owns
-+ * @conn:		connection this handle owns
-+ */
-+struct kdbus_handle {
-+	struct mutex lock;
-+
-+	enum kdbus_handle_type type;
-+	union {
-+		struct kdbus_bus *bus_owner;
-+		struct kdbus_ep *ep_owner;
-+		struct kdbus_conn *conn;
-+	};
-+};
-+
-+static int kdbus_handle_open(struct inode *inode, struct file *file)
-+{
-+	struct kdbus_handle *handle;
-+	struct kdbus_node *node;
-+	int ret;
-+
-+	node = kdbus_node_from_inode(inode);
-+	if (!kdbus_node_acquire(node))
-+		return -ESHUTDOWN;
-+
-+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-+	if (!handle) {
-+		ret = -ENOMEM;
-+		goto exit;
-+	}
-+
-+	mutex_init(&handle->lock);
-+	handle->type = KDBUS_HANDLE_NONE;
-+
-+	file->private_data = handle;
-+	ret = 0;
-+
-+exit:
-+	kdbus_node_release(node);
-+	return ret;
-+}
-+
-+static int kdbus_handle_release(struct inode *inode, struct file *file)
-+{
-+	struct kdbus_handle *handle = file->private_data;
-+
-+	switch (handle->type) {
-+	case KDBUS_HANDLE_BUS_OWNER:
-+		if (handle->bus_owner) {
-+			kdbus_node_deactivate(&handle->bus_owner->node);
-+			kdbus_bus_unref(handle->bus_owner);
-+		}
-+		break;
-+	case KDBUS_HANDLE_EP_OWNER:
-+		if (handle->ep_owner) {
-+			kdbus_node_deactivate(&handle->ep_owner->node);
-+			kdbus_ep_unref(handle->ep_owner);
-+		}
-+		break;
-+	case KDBUS_HANDLE_CONNECTED:
-+		kdbus_conn_disconnect(handle->conn, false);
-+		kdbus_conn_unref(handle->conn);
-+		break;
-+	case KDBUS_HANDLE_NONE:
-+		/* nothing to clean up */
-+		break;
-+	}
-+
-+	kfree(handle);
-+
-+	return 0;
-+}
-+
-+static long kdbus_handle_ioctl_control(struct file *file, unsigned int cmd,
-+				       void __user *argp)
-+{
-+	struct kdbus_handle *handle = file->private_data;
-+	struct kdbus_node *node = file_inode(file)->i_private;
-+	struct kdbus_domain *domain;
-+	int ret = 0;
-+
-+	if (!kdbus_node_acquire(node))
-+		return -ESHUTDOWN;
-+
-+	/*
-+	 * The parent of control-nodes is always a domain, make sure to pin it
-+	 * so the parent is actually valid.
-+	 */
-+	domain = kdbus_domain_from_node(node->parent);
-+	if (!kdbus_node_acquire(&domain->node)) {
-+		kdbus_node_release(node);
-+		return -ESHUTDOWN;
-+	}
-+
-+	switch (cmd) {
-+	case KDBUS_CMD_BUS_MAKE: {
-+		struct kdbus_bus *bus;
-+
-+		bus = kdbus_cmd_bus_make(domain, argp);
-+		if (IS_ERR_OR_NULL(bus)) {
-+			ret = PTR_ERR_OR_ZERO(bus);
-+			break;
-+		}
-+
-+		handle->bus_owner = bus;
-+		ret = KDBUS_HANDLE_BUS_OWNER;
-+		break;
-+	}
-+
-+	default:
-+		ret = -EBADFD;
-+		break;
-+	}
-+
-+	kdbus_node_release(&domain->node);
-+	kdbus_node_release(node);
-+	return ret;
-+}
-+
-+static long kdbus_handle_ioctl_ep(struct file *file, unsigned int cmd,
-+				  void __user *buf)
-+{
-+	struct kdbus_handle *handle = file->private_data;
-+	struct kdbus_node *node = file_inode(file)->i_private;
-+	struct kdbus_ep *ep, *file_ep = kdbus_ep_from_node(node);
-+	struct kdbus_bus *bus = file_ep->bus;
-+	struct kdbus_conn *conn;
-+	int ret = 0;
-+
-+	if (!kdbus_node_acquire(node))
-+		return -ESHUTDOWN;
-+
-+	switch (cmd) {
-+	case KDBUS_CMD_ENDPOINT_MAKE: {
-+		/* creating custom endpoints is a privileged operation */
-+		if (!kdbus_ep_is_owner(file_ep, file)) {
-+			ret = -EPERM;
-+			break;
-+		}
-+
-+		ep = kdbus_cmd_ep_make(bus, buf);
-+		if (IS_ERR_OR_NULL(ep)) {
-+			ret = PTR_ERR_OR_ZERO(ep);
-+			break;
-+		}
-+
-+		handle->ep_owner = ep;
-+		ret = KDBUS_HANDLE_EP_OWNER;
-+		break;
-+	}
-+
-+	case KDBUS_CMD_HELLO:
-+		conn = kdbus_cmd_hello(file_ep, file, buf);
-+		if (IS_ERR_OR_NULL(conn)) {
-+			ret = PTR_ERR_OR_ZERO(conn);
-+			break;
-+		}
-+
-+		handle->conn = conn;
-+		ret = KDBUS_HANDLE_CONNECTED;
-+		break;
-+
-+	default:
-+		ret = -EBADFD;
-+		break;
-+	}
-+
-+	kdbus_node_release(node);
-+	return ret;
-+}
-+
-+static long kdbus_handle_ioctl_ep_owner(struct file *file, unsigned int command,
-+					void __user *buf)
-+{
-+	struct kdbus_handle *handle = file->private_data;
-+	struct kdbus_ep *ep = handle->ep_owner;
-+	int ret;
-+
-+	if (!kdbus_node_acquire(&ep->node))
-+		return -ESHUTDOWN;
-+
-+	switch (command) {
-+	case KDBUS_CMD_ENDPOINT_UPDATE:
-+		ret = kdbus_cmd_ep_update(ep, buf);
-+		break;
-+	default:
-+		ret = -EBADFD;
-+		break;
-+	}
-+
-+	kdbus_node_release(&ep->node);
-+	return ret;
-+}
-+
-+static long kdbus_handle_ioctl_connected(struct file *file,
-+					 unsigned int command, void __user *buf)
-+{
-+	struct kdbus_handle *handle = file->private_data;
-+	struct kdbus_conn *conn = handle->conn;
-+	struct kdbus_conn *release_conn = NULL;
-+	int ret;
-+
-+	release_conn = conn;
-+	ret = kdbus_conn_acquire(release_conn);
-+	if (ret < 0)
-+		return ret;
-+
-+	switch (command) {
-+	case KDBUS_CMD_BYEBYE:
-+		/*
-+		 * BYEBYE is special; we must not acquire a connection when
-+		 * calling into kdbus_conn_disconnect() or we will deadlock,
-+		 * because kdbus_conn_disconnect() will wait for all acquired
-+		 * references to be dropped.
-+		 */
-+		kdbus_conn_release(release_conn);
-+		release_conn = NULL;
-+		ret = kdbus_cmd_byebye_unlocked(conn, buf);
-+		break;
-+	case KDBUS_CMD_NAME_ACQUIRE:
-+		ret = kdbus_cmd_name_acquire(conn, buf);
-+		break;
-+	case KDBUS_CMD_NAME_RELEASE:
-+		ret = kdbus_cmd_name_release(conn, buf);
-+		break;
-+	case KDBUS_CMD_LIST:
-+		ret = kdbus_cmd_list(conn, buf);
-+		break;
-+	case KDBUS_CMD_CONN_INFO:
-+		ret = kdbus_cmd_conn_info(conn, buf);
-+		break;
-+	case KDBUS_CMD_BUS_CREATOR_INFO:
-+		ret = kdbus_cmd_bus_creator_info(conn, buf);
-+		break;
-+	case KDBUS_CMD_UPDATE:
-+		ret = kdbus_cmd_update(conn, buf);
-+		break;
-+	case KDBUS_CMD_MATCH_ADD:
-+		ret = kdbus_cmd_match_add(conn, buf);
-+		break;
-+	case KDBUS_CMD_MATCH_REMOVE:
-+		ret = kdbus_cmd_match_remove(conn, buf);
-+		break;
-+	case KDBUS_CMD_SEND:
-+		ret = kdbus_cmd_send(conn, file, buf);
-+		break;
-+	case KDBUS_CMD_RECV:
-+		ret = kdbus_cmd_recv(conn, buf);
-+		break;
-+	case KDBUS_CMD_FREE:
-+		ret = kdbus_cmd_free(conn, buf);
-+		break;
-+	default:
-+		ret = -EBADFD;
-+		break;
-+	}
-+
-+	kdbus_conn_release(release_conn);
-+	return ret;
-+}
-+
-+static long kdbus_handle_ioctl(struct file *file, unsigned int cmd,
-+			       unsigned long arg)
-+{
-+	struct kdbus_handle *handle = file->private_data;
-+	struct kdbus_node *node = kdbus_node_from_inode(file_inode(file));
-+	void __user *argp = (void __user *)arg;
-+	long ret = -EBADFD;
-+
-+	switch (cmd) {
-+	case KDBUS_CMD_BUS_MAKE:
-+	case KDBUS_CMD_ENDPOINT_MAKE:
-+	case KDBUS_CMD_HELLO:
-+		mutex_lock(&handle->lock);
-+		if (handle->type == KDBUS_HANDLE_NONE) {
-+			if (node->type == KDBUS_NODE_CONTROL)
-+				ret = kdbus_handle_ioctl_control(file, cmd,
-+								 argp);
-+			else if (node->type == KDBUS_NODE_ENDPOINT)
-+				ret = kdbus_handle_ioctl_ep(file, cmd, argp);
-+
-+			if (ret > 0) {
-+				/*
-+				 * The data given via open() is not sufficient
-+				 * to setup a kdbus handle. Hence, we require
-+				 * the user to perform a setup ioctl. This setup
-+				 * can only be performed once and defines the
-+				 * type of the handle. The different setup
-+				 * ioctls are locked against each other so they
-+				 * cannot race. Once the handle type is set,
-+				 * the type-dependent ioctls are enabled. To
-+				 * improve performance, we don't lock those via
-+				 * handle->lock. Instead, we issue a
-+				 * write-barrier before performing the
-+				 * type-change, which pairs with smp_rmb() in
-+				 * all handlers that access the type field. This
-+				 * guarantees the handle is fully setup, if
-+				 * handle->type is set. If handle->type is
-+				 * unset, you must not make any assumptions
-+				 * without taking handle->lock.
-+				 * Note that handle->type is only set once. It
-+				 * will never change afterwards.
-+				 */
-+				smp_wmb();
-+				handle->type = ret;
-+			}
-+		}
-+		mutex_unlock(&handle->lock);
-+		break;
-+
-+	case KDBUS_CMD_ENDPOINT_UPDATE:
-+	case KDBUS_CMD_BYEBYE:
-+	case KDBUS_CMD_NAME_ACQUIRE:
-+	case KDBUS_CMD_NAME_RELEASE:
-+	case KDBUS_CMD_LIST:
-+	case KDBUS_CMD_CONN_INFO:
-+	case KDBUS_CMD_BUS_CREATOR_INFO:
-+	case KDBUS_CMD_UPDATE:
-+	case KDBUS_CMD_MATCH_ADD:
-+	case KDBUS_CMD_MATCH_REMOVE:
-+	case KDBUS_CMD_SEND:
-+	case KDBUS_CMD_RECV:
-+	case KDBUS_CMD_FREE: {
-+		enum kdbus_handle_type type;
-+
-+		/*
-+		 * This read-barrier pairs with smp_wmb() of the handle setup.
-+		 * it guarantees the handle is fully written, in case the
-+		 * type has been set. It allows us to access the handle without
-+		 * taking handle->lock, given the guarantee that the type is
-+		 * only ever set once, and stays constant afterwards.
-+		 * Furthermore, the handle object itself is not modified in any
-+		 * way after the type is set. That is, the type-field is the
-+		 * last field that is written on any handle. If it has not been
-+		 * set, we must not access the handle here.
-+		 */
-+		type = handle->type;
-+		smp_rmb();
-+
-+		if (type == KDBUS_HANDLE_EP_OWNER)
-+			ret = kdbus_handle_ioctl_ep_owner(file, cmd, argp);
-+		else if (type == KDBUS_HANDLE_CONNECTED)
-+			ret = kdbus_handle_ioctl_connected(file, cmd, argp);
-+
-+		break;
-+	}
-+	default:
-+		ret = -ENOTTY;
-+		break;
-+	}
-+
-+	return ret < 0 ? ret : 0;
-+}
-+
-+static unsigned int kdbus_handle_poll(struct file *file,
-+				      struct poll_table_struct *wait)
-+{
-+	struct kdbus_handle *handle = file->private_data;
-+	enum kdbus_handle_type type;
-+	unsigned int mask = POLLOUT | POLLWRNORM;
-+
-+	/*
-+	 * This pairs with smp_wmb() during handle setup. It guarantees that
-+	 * _iff_ the handle type is set, handle->conn is valid. Furthermore,
-+	 * _iff_ the type is set, the handle object is constant and never
-+	 * changed again. If it's not set, we must not access the handle but
-+	 * bail out. We also must assume no setup has taken place, yet.
-+	 */
-+	type = handle->type;
-+	smp_rmb();
-+
-+	/* Only a connected endpoint can read/write data */
-+	if (type != KDBUS_HANDLE_CONNECTED)
-+		return POLLERR | POLLHUP;
-+
-+	poll_wait(file, &handle->conn->wait, wait);
-+
-+	/*
-+	 * Verify the connection hasn't been deactivated _after_ adding the
-+	 * wait-queue. This guarantees, that if the connection is deactivated
-+	 * after we checked it, the waitqueue is signaled and we're called
-+	 * again.
-+	 */
-+	if (!kdbus_conn_active(handle->conn))
-+		return POLLERR | POLLHUP;
-+
-+	if (!list_empty(&handle->conn->queue.msg_list) ||
-+	    atomic_read(&handle->conn->lost_count) > 0)
-+		mask |= POLLIN | POLLRDNORM;
-+
-+	return mask;
-+}
-+
-+static int kdbus_handle_mmap(struct file *file, struct vm_area_struct *vma)
-+{
-+	struct kdbus_handle *handle = file->private_data;
-+	enum kdbus_handle_type type;
-+	int ret = -EBADFD;
-+
-+	/*
-+	 * This pairs with smp_wmb() during handle setup. It guarantees that
-+	 * _iff_ the handle type is set, handle->conn is valid. Furthermore,
-+	 * _iff_ the type is set, the handle object is constant and never
-+	 * changed again. If it's not set, we must not access the handle but
-+	 * bail out. We also must assume no setup has taken place, yet.
-+	 */
-+	type = handle->type;
-+	smp_rmb();
-+
-+	/* Only connected handles have a pool we can map */
-+	if (type == KDBUS_HANDLE_CONNECTED)
-+		ret = kdbus_pool_mmap(handle->conn->pool, vma);
-+
-+	return ret;
-+}
-+
-+const struct file_operations kdbus_handle_ops = {
-+	.owner =		THIS_MODULE,
-+	.open =			kdbus_handle_open,
-+	.release =		kdbus_handle_release,
-+	.poll =			kdbus_handle_poll,
-+	.llseek =		noop_llseek,
-+	.unlocked_ioctl =	kdbus_handle_ioctl,
-+	.mmap =			kdbus_handle_mmap,
-+#ifdef CONFIG_COMPAT
-+	.compat_ioctl =		kdbus_handle_ioctl,
-+#endif
-+};
-diff --git a/ipc/kdbus/handle.h b/ipc/kdbus/handle.h
-new file mode 100644
-index 0000000..5dde2c1
---- /dev/null
-+++ b/ipc/kdbus/handle.h
-@@ -0,0 +1,103 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_HANDLE_H
-+#define __KDBUS_HANDLE_H
-+
-+#include <linux/fs.h>
-+#include <uapi/linux/kdbus.h>
-+
-+extern const struct file_operations kdbus_handle_ops;
-+
-+/**
-+ * kdbus_arg - information and state of a single ioctl command item
-+ * @type:		item type
-+ * @item:		set by the parser to the first found item of this type
-+ * @multiple:		whether multiple items of this type are allowed
-+ * @mandatory:		whether at least one item of this type is required
-+ *
-+ * This structure describes a single item in an ioctl command payload. The
-+ * caller has to pre-fill the type and flags, the parser will then use this
-+ * information to verify the ioctl payload. @item is set by the parser to point
-+ * to the first occurrence of the item.
-+ */
-+struct kdbus_arg {
-+	u64 type;
-+	struct kdbus_item *item;
-+	bool multiple : 1;
-+	bool mandatory : 1;
-+};
-+
-+/**
-+ * kdbus_args - information and state of ioctl command parser
-+ * @allowed_flags:	set of flags this command supports
-+ * @argc:		number of items in @argv
-+ * @argv:		array of items this command supports
-+ * @user:		set by parser to user-space location of current command
-+ * @cmd:		set by parser to kernel copy of command payload
-+ * @cmd_buf:		inline buf to avoid kmalloc() on small cmds
-+ * @items:		points to item array in @cmd
-+ * @items_size:		size of @items in bytes
-+ * @is_cmd:		whether this is a command-payload or msg-payload
-+ *
-+ * This structure is used to parse ioctl command payloads on each invocation.
-+ * The ioctl handler has to pre-fill the flags and allowed items before passing
-+ * the object to kdbus_args_parse(). The parser will copy the command payload
-+ * into kernel-space and verify the correctness of the data.
-+ *
-+ * We use a 256 bytes buffer for small command payloads, to be allocated on
-+ * stack on syscall entrance.
-+ */
-+struct kdbus_args {
-+	u64 allowed_flags;
-+	size_t argc;
-+	struct kdbus_arg *argv;
-+
-+	struct kdbus_cmd __user *user;
-+	struct kdbus_cmd *cmd;
-+	u8 cmd_buf[256];
-+
-+	struct kdbus_item *items;
-+	size_t items_size;
-+	bool is_cmd : 1;
-+};
-+
-+int __kdbus_args_parse(struct kdbus_args *args, bool is_cmd, void __user *argp,
-+		       size_t type_size, size_t items_offset, void **out);
-+int kdbus_args_clear(struct kdbus_args *args, int ret);
-+
-+#define kdbus_args_parse(_args, _argp, _v)                              \
-+	({                                                              \
-+		BUILD_BUG_ON(offsetof(typeof(**(_v)), size) !=          \
-+			     offsetof(struct kdbus_cmd, size));         \
-+		BUILD_BUG_ON(offsetof(typeof(**(_v)), flags) !=         \
-+			     offsetof(struct kdbus_cmd, flags));        \
-+		BUILD_BUG_ON(offsetof(typeof(**(_v)), return_flags) !=  \
-+			     offsetof(struct kdbus_cmd, return_flags)); \
-+		__kdbus_args_parse((_args), 1, (_argp), sizeof(**(_v)), \
-+				   offsetof(typeof(**(_v)), items),     \
-+				   (void **)(_v));                      \
-+	})
-+
-+#define kdbus_args_parse_msg(_args, _argp, _v)                          \
-+	({                                                              \
-+		BUILD_BUG_ON(offsetof(typeof(**(_v)), size) !=          \
-+			     offsetof(struct kdbus_cmd, size));         \
-+		BUILD_BUG_ON(offsetof(typeof(**(_v)), flags) !=         \
-+			     offsetof(struct kdbus_cmd, flags));        \
-+		__kdbus_args_parse((_args), 0, (_argp), sizeof(**(_v)), \
-+				   offsetof(typeof(**(_v)), items),     \
-+				   (void **)(_v));                      \
-+	})
-+
-+#endif
-diff --git a/ipc/kdbus/item.c b/ipc/kdbus/item.c
-new file mode 100644
-index 0000000..ce78dba
---- /dev/null
-+++ b/ipc/kdbus/item.c
-@@ -0,0 +1,293 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/ctype.h>
-+#include <linux/fs.h>
-+#include <linux/string.h>
-+
-+#include "item.h"
-+#include "limits.h"
-+#include "util.h"
-+
-+/*
-+ * This verifies the string at position @str with size @size is properly
-+ * zero-terminated and does not contain a 0-byte but at the end.
-+ */
-+static bool kdbus_str_valid(const char *str, size_t size)
-+{
-+	return size > 0 && memchr(str, '\0', size) == str + size - 1;
-+}
-+
-+/**
-+ * kdbus_item_validate_name() - validate an item containing a name
-+ * @item:		Item to validate
-+ *
-+ * Return: zero on success or an negative error code on failure
-+ */
-+int kdbus_item_validate_name(const struct kdbus_item *item)
-+{
-+	const char *name = item->str;
-+	unsigned int i;
-+	size_t len;
-+
-+	if (item->size < KDBUS_ITEM_HEADER_SIZE + 2)
-+		return -EINVAL;
-+
-+	if (item->size > KDBUS_ITEM_HEADER_SIZE +
-+			 KDBUS_SYSNAME_MAX_LEN + 1)
-+		return -ENAMETOOLONG;
-+
-+	if (!kdbus_str_valid(name, KDBUS_ITEM_PAYLOAD_SIZE(item)))
-+		return -EINVAL;
-+
-+	len = strlen(name);
-+	if (len == 0)
-+		return -EINVAL;
-+
-+	for (i = 0; i < len; i++) {
-+		if (isalpha(name[i]))
-+			continue;
-+		if (isdigit(name[i]))
-+			continue;
-+		if (name[i] == '_')
-+			continue;
-+		if (i > 0 && i + 1 < len && (name[i] == '-' || name[i] == '.'))
-+			continue;
-+
-+		return -EINVAL;
-+	}
-+
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_item_validate() - validate a single item
-+ * @item:	item to validate
-+ *
-+ * Return: 0 if item is valid, negative error code if not.
-+ */
-+int kdbus_item_validate(const struct kdbus_item *item)
-+{
-+	size_t payload_size = KDBUS_ITEM_PAYLOAD_SIZE(item);
-+	size_t l;
-+	int ret;
-+
-+	BUILD_BUG_ON(KDBUS_ITEM_HEADER_SIZE !=
-+		     sizeof(struct kdbus_item_header));
-+
-+	if (item->size < KDBUS_ITEM_HEADER_SIZE)
-+		return -EINVAL;
-+
-+	switch (item->type) {
-+	case KDBUS_ITEM_NEGOTIATE:
-+		if (payload_size % sizeof(u64) != 0)
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_PAYLOAD_VEC:
-+	case KDBUS_ITEM_PAYLOAD_OFF:
-+		if (payload_size != sizeof(struct kdbus_vec))
-+			return -EINVAL;
-+		if (item->vec.size == 0 || item->vec.size > SIZE_MAX)
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_PAYLOAD_MEMFD:
-+		if (payload_size != sizeof(struct kdbus_memfd))
-+			return -EINVAL;
-+		if (item->memfd.size == 0 || item->memfd.size > SIZE_MAX)
-+			return -EINVAL;
-+		if (item->memfd.fd < 0)
-+			return -EBADF;
-+		break;
-+
-+	case KDBUS_ITEM_FDS:
-+		if (payload_size % sizeof(int) != 0)
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_CANCEL_FD:
-+		if (payload_size != sizeof(int))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_BLOOM_PARAMETER:
-+		if (payload_size != sizeof(struct kdbus_bloom_parameter))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_BLOOM_FILTER:
-+		/* followed by the bloom-mask, depends on the bloom-size */
-+		if (payload_size < sizeof(struct kdbus_bloom_filter))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_BLOOM_MASK:
-+		/* size depends on bloom-size of bus */
-+		break;
-+
-+	case KDBUS_ITEM_CONN_DESCRIPTION:
-+	case KDBUS_ITEM_MAKE_NAME:
-+		ret = kdbus_item_validate_name(item);
-+		if (ret < 0)
-+			return ret;
-+		break;
-+
-+	case KDBUS_ITEM_ATTACH_FLAGS_SEND:
-+	case KDBUS_ITEM_ATTACH_FLAGS_RECV:
-+	case KDBUS_ITEM_ID:
-+	case KDBUS_ITEM_DST_ID:
-+		if (payload_size != sizeof(u64))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_TIMESTAMP:
-+		if (payload_size != sizeof(struct kdbus_timestamp))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_CREDS:
-+		if (payload_size != sizeof(struct kdbus_creds))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_AUXGROUPS:
-+		if (payload_size % sizeof(u32) != 0)
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_NAME:
-+	case KDBUS_ITEM_DST_NAME:
-+	case KDBUS_ITEM_PID_COMM:
-+	case KDBUS_ITEM_TID_COMM:
-+	case KDBUS_ITEM_EXE:
-+	case KDBUS_ITEM_CMDLINE:
-+	case KDBUS_ITEM_CGROUP:
-+	case KDBUS_ITEM_SECLABEL:
-+		if (!kdbus_str_valid(item->str, payload_size))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_CAPS:
-+		if (payload_size < sizeof(u32))
-+			return -EINVAL;
-+		if (payload_size < sizeof(u32) +
-+		    4 * CAP_TO_INDEX(item->caps.last_cap) * sizeof(u32))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_AUDIT:
-+		if (payload_size != sizeof(struct kdbus_audit))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_POLICY_ACCESS:
-+		if (payload_size != sizeof(struct kdbus_policy_access))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_NAME_ADD:
-+	case KDBUS_ITEM_NAME_REMOVE:
-+	case KDBUS_ITEM_NAME_CHANGE:
-+		if (payload_size < sizeof(struct kdbus_notify_name_change))
-+			return -EINVAL;
-+		l = payload_size - offsetof(struct kdbus_notify_name_change,
-+					    name);
-+		if (l > 0 && !kdbus_str_valid(item->name_change.name, l))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_ID_ADD:
-+	case KDBUS_ITEM_ID_REMOVE:
-+		if (payload_size != sizeof(struct kdbus_notify_id_change))
-+			return -EINVAL;
-+		break;
-+
-+	case KDBUS_ITEM_REPLY_TIMEOUT:
-+	case KDBUS_ITEM_REPLY_DEAD:
-+		if (payload_size != 0)
-+			return -EINVAL;
-+		break;
-+
-+	default:
-+		break;
-+	}
-+
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_items_validate() - validate items passed by user-space
-+ * @items:		items to validate
-+ * @items_size:		number of items
-+ *
-+ * This verifies that the passed items pointer is consistent and valid.
-+ * Furthermore, each item is checked for:
-+ *  - valid "size" value
-+ *  - payload is of expected type
-+ *  - payload is fully included in the item
-+ *  - string payloads are zero-terminated
-+ *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_items_validate(const struct kdbus_item *items, size_t items_size)
-+{
-+	const struct kdbus_item *item;
-+	int ret;
-+
-+	KDBUS_ITEMS_FOREACH(item, items, items_size) {
-+		if (!KDBUS_ITEM_VALID(item, items, items_size))
-+			return -EINVAL;
-+
-+		ret = kdbus_item_validate(item);
-+		if (ret < 0)
-+			return ret;
-+	}
-+
-+	if (!KDBUS_ITEMS_END(item, items, items_size))
-+		return -EINVAL;
-+
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_item_set() - Set item content
-+ * @item:	The item to modify
-+ * @type:	The item type to set (KDBUS_ITEM_*)
-+ * @data:	Data to copy to item->data, may be %NULL
-+ * @len:	Number of bytes in @data
-+ *
-+ * This sets type, size and data fields of an item. If @data is NULL, the data
-+ * memory is cleared.
-+ *
-+ * Note that you must align your @data memory to 8 bytes. Trailing padding (in
-+ * case @len is not 8byte aligned) is cleared by this call.
-+ *
-+ * Returns: Pointer to the following item.
-+ */
-+struct kdbus_item *kdbus_item_set(struct kdbus_item *item, u64 type,
-+				  const void *data, size_t len)
-+{
-+	item->type = type;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + len;
-+
-+	if (data) {
-+		memcpy(item->data, data, len);
-+		memset(item->data + len, 0, KDBUS_ALIGN8(len) - len);
-+	} else {
-+		memset(item->data, 0, KDBUS_ALIGN8(len));
-+	}
-+
-+	return KDBUS_ITEM_NEXT(item);
-+}
-diff --git a/ipc/kdbus/item.h b/ipc/kdbus/item.h
-new file mode 100644
-index 0000000..3a7e6cc
---- /dev/null
-+++ b/ipc/kdbus/item.h
-@@ -0,0 +1,61 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_ITEM_H
-+#define __KDBUS_ITEM_H
-+
-+#include <linux/kernel.h>
-+#include <uapi/linux/kdbus.h>
-+
-+#include "util.h"
-+
-+/* generic access and iterators over a stream of items */
-+#define KDBUS_ITEM_NEXT(_i) (typeof(_i))((u8 *)(_i) + KDBUS_ALIGN8((_i)->size))
-+#define KDBUS_ITEMS_SIZE(_h, _is) ((_h)->size - offsetof(typeof(*(_h)), _is))
-+#define KDBUS_ITEM_HEADER_SIZE offsetof(struct kdbus_item, data)
-+#define KDBUS_ITEM_SIZE(_s) KDBUS_ALIGN8(KDBUS_ITEM_HEADER_SIZE + (_s))
-+#define KDBUS_ITEM_PAYLOAD_SIZE(_i) ((_i)->size - KDBUS_ITEM_HEADER_SIZE)
-+
-+#define KDBUS_ITEMS_FOREACH(_i, _is, _s)				\
-+	for ((_i) = (_is);						\
-+	     ((u8 *)(_i) < (u8 *)(_is) + (_s)) &&			\
-+	       ((u8 *)(_i) >= (u8 *)(_is));				\
-+	     (_i) = KDBUS_ITEM_NEXT(_i))
-+
-+#define KDBUS_ITEM_VALID(_i, _is, _s)					\
-+	((_i)->size >= KDBUS_ITEM_HEADER_SIZE &&			\
-+	 (u8 *)(_i) + (_i)->size > (u8 *)(_i) &&			\
-+	 (u8 *)(_i) + (_i)->size <= (u8 *)(_is) + (_s) &&		\
-+	 (u8 *)(_i) >= (u8 *)(_is))
-+
-+#define KDBUS_ITEMS_END(_i, _is, _s)					\
-+	((u8 *)(_i) == ((u8 *)(_is) + KDBUS_ALIGN8(_s)))
-+
-+/**
-+ * struct kdbus_item_header - Describes the fix part of an item
-+ * @size:	The total size of the item
-+ * @type:	The item type, one of KDBUS_ITEM_*
-+ */
-+struct kdbus_item_header {
-+	u64 size;
-+	u64 type;
-+};
-+
-+int kdbus_item_validate_name(const struct kdbus_item *item);
-+int kdbus_item_validate(const struct kdbus_item *item);
-+int kdbus_items_validate(const struct kdbus_item *items, size_t items_size);
-+struct kdbus_item *kdbus_item_set(struct kdbus_item *item, u64 type,
-+				  const void *data, size_t len);
-+
-+#endif
-diff --git a/ipc/kdbus/limits.h b/ipc/kdbus/limits.h
-new file mode 100644
-index 0000000..c54925a
---- /dev/null
-+++ b/ipc/kdbus/limits.h
-@@ -0,0 +1,61 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_DEFAULTS_H
-+#define __KDBUS_DEFAULTS_H
-+
-+#include <linux/kernel.h>
-+
-+/* maximum size of message header and items */
-+#define KDBUS_MSG_MAX_SIZE		SZ_8K
-+
-+/* maximum number of memfd items per message */
-+#define KDBUS_MSG_MAX_MEMFD_ITEMS	16
-+
-+/* max size of ioctl command data */
-+#define KDBUS_CMD_MAX_SIZE		SZ_32K
-+
-+/* maximum number of inflight fds in a target queue per user */
-+#define KDBUS_CONN_MAX_FDS_PER_USER	16
-+
-+/* maximum message payload size */
-+#define KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE		SZ_2M
-+
-+/* maximum size of bloom bit field in bytes */
-+#define KDBUS_BUS_BLOOM_MAX_SIZE		SZ_4K
-+
-+/* maximum length of well-known bus name */
-+#define KDBUS_NAME_MAX_LEN			255
-+
-+/* maximum length of bus, domain, ep name */
-+#define KDBUS_SYSNAME_MAX_LEN			63
-+
-+/* maximum number of matches per connection */
-+#define KDBUS_MATCH_MAX				256
-+
-+/* maximum number of queued messages from the same individual user */
-+#define KDBUS_CONN_MAX_MSGS			256
-+
-+/* maximum number of well-known names per connection */
-+#define KDBUS_CONN_MAX_NAMES			256
-+
-+/* maximum number of queued requests waiting for a reply */
-+#define KDBUS_CONN_MAX_REQUESTS_PENDING		128
-+
-+/* maximum number of connections per user in one domain */
-+#define KDBUS_USER_MAX_CONN			1024
-+
-+/* maximum number of buses per user in one domain */
-+#define KDBUS_USER_MAX_BUSES			16
-+
-+#endif
-diff --git a/ipc/kdbus/main.c b/ipc/kdbus/main.c
-new file mode 100644
-index 0000000..1ad4dc8
---- /dev/null
-+++ b/ipc/kdbus/main.c
-@@ -0,0 +1,114 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
-+#include <linux/fs.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+
-+#include "util.h"
-+#include "fs.h"
-+#include "handle.h"
-+#include "metadata.h"
-+#include "node.h"
-+
-+/*
-+ * This is a simplified outline of the internal kdbus object relations, for
-+ * those interested in the inner life of the driver implementation.
-+ *
-+ * From a mount point's (domain's) perspective:
-+ *
-+ * struct kdbus_domain
-+ *   |» struct kdbus_user *user (many, owned)
-+ *   '» struct kdbus_node node (embedded)
-+ *       |» struct kdbus_node children (many, referenced)
-+ *       |» struct kdbus_node *parent (pinned)
-+ *       '» struct kdbus_bus (many, pinned)
-+ *           |» struct kdbus_node node (embedded)
-+ *           '» struct kdbus_ep (many, pinned)
-+ *               |» struct kdbus_node node (embedded)
-+ *               |» struct kdbus_bus *bus (pinned)
-+ *               |» struct kdbus_conn conn_list (many, pinned)
-+ *               |   |» struct kdbus_ep *ep (pinned)
-+ *               |   |» struct kdbus_name_entry *activator_of (owned)
-+ *               |   |» struct kdbus_match_db *match_db (owned)
-+ *               |   |» struct kdbus_meta *meta (owned)
-+ *               |   |» struct kdbus_match_db *match_db (owned)
-+ *               |   |    '» struct kdbus_match_entry (many, owned)
-+ *               |   |
-+ *               |   |» struct kdbus_pool *pool (owned)
-+ *               |   |    '» struct kdbus_pool_slice *slices (many, owned)
-+ *               |   |       '» struct kdbus_pool *pool (pinned)
-+ *               |   |
-+ *               |   |» struct kdbus_user *user (pinned)
-+ *               |   `» struct kdbus_queue_entry entries (many, embedded)
-+ *               |        |» struct kdbus_pool_slice *slice (pinned)
-+ *               |        |» struct kdbus_conn_reply *reply (owned)
-+ *               |        '» struct kdbus_user *user (pinned)
-+ *               |
-+ *               '» struct kdbus_user *user (pinned)
-+ *                   '» struct kdbus_policy_db policy_db (embedded)
-+ *                        |» struct kdbus_policy_db_entry (many, owned)
-+ *                        |   |» struct kdbus_conn (pinned)
-+ *                        |   '» struct kdbus_ep (pinned)
-+ *                        |
-+ *                        '» struct kdbus_policy_db_cache_entry (many, owned)
-+ *                            '» struct kdbus_conn (pinned)
-+ *
-+ * For the life-time of a file descriptor derived from calling open() on a file
-+ * inside the mount point:
-+ *
-+ * struct kdbus_handle
-+ *  |» struct kdbus_meta *meta (owned)
-+ *  |» struct kdbus_ep *ep (pinned)
-+ *  |» struct kdbus_conn *conn (owned)
-+ *  '» struct kdbus_ep *ep (owned)
-+ */
-+
-+/* kdbus mount-point /sys/fs/kdbus */
-+static struct kobject *kdbus_dir;
-+
-+static int __init kdbus_init(void)
-+{
-+	int ret;
-+
-+	kdbus_dir = kobject_create_and_add(KBUILD_MODNAME, fs_kobj);
-+	if (!kdbus_dir)
-+		return -ENOMEM;
-+
-+	ret = kdbus_fs_init();
-+	if (ret < 0) {
-+		pr_err("cannot register filesystem: %d\n", ret);
-+		goto exit_dir;
-+	}
-+
-+	pr_info("initialized\n");
-+	return 0;
-+
-+exit_dir:
-+	kobject_put(kdbus_dir);
-+	return ret;
-+}
-+
-+static void __exit kdbus_exit(void)
-+{
-+	kdbus_fs_exit();
-+	kobject_put(kdbus_dir);
-+	ida_destroy(&kdbus_node_ida);
-+}
-+
-+module_init(kdbus_init);
-+module_exit(kdbus_exit);
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("D-Bus, powerful, easy to use interprocess communication");
-+MODULE_ALIAS_FS(KBUILD_MODNAME "fs");
-diff --git a/ipc/kdbus/match.c b/ipc/kdbus/match.c
-new file mode 100644
-index 0000000..4ee6a1f
---- /dev/null
-+++ b/ipc/kdbus/match.c
-@@ -0,0 +1,546 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/fs.h>
-+#include <linux/hash.h>
-+#include <linux/init.h>
-+#include <linux/mutex.h>
-+#include <linux/sched.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+
-+#include "bus.h"
-+#include "connection.h"
-+#include "endpoint.h"
-+#include "handle.h"
-+#include "item.h"
-+#include "match.h"
-+#include "message.h"
-+#include "names.h"
-+
-+/**
-+ * struct kdbus_match_db - message filters
-+ * @entries_list:	List of matches
-+ * @mdb_rwlock:		Match data lock
-+ * @entries_count:	Number of entries in database
-+ */
-+struct kdbus_match_db {
-+	struct list_head entries_list;
-+	struct rw_semaphore mdb_rwlock;
-+	unsigned int entries_count;
-+};
-+
-+/**
-+ * struct kdbus_match_entry - a match database entry
-+ * @cookie:		User-supplied cookie to lookup the entry
-+ * @list_entry:		The list entry element for the db list
-+ * @rules_list:		The list head for tracking rules of this entry
-+ */
-+struct kdbus_match_entry {
-+	u64 cookie;
-+	struct list_head list_entry;
-+	struct list_head rules_list;
-+};
-+
-+/**
-+ * struct kdbus_bloom_mask - mask to match against filter
-+ * @generations:	Number of generations carried
-+ * @data:		Array of bloom bit fields
-+ */
-+struct kdbus_bloom_mask {
-+	u64 generations;
-+	u64 *data;
-+};
-+
-+/**
-+ * struct kdbus_match_rule - a rule appended to a match entry
-+ * @type:		An item type to match against
-+ * @bloom_mask:		Bloom mask to match a message's filter against, used
-+ *			with KDBUS_ITEM_BLOOM_MASK
-+ * @name:		Name to match against, used with KDBUS_ITEM_NAME,
-+ *			KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE}
-+ * @old_id:		ID to match against, used with
-+ *			KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE},
-+ *			KDBUS_ITEM_ID_REMOVE
-+ * @new_id:		ID to match against, used with
-+ *			KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE},
-+ *			KDBUS_ITEM_ID_REMOVE
-+ * @src_id:		ID to match against, used with KDBUS_ITEM_ID
-+ * @dst_id:		Message destination ID, used with KDBUS_ITEM_DST_ID
-+ * @rules_entry:	Entry in the entry's rules list
-+ */
-+struct kdbus_match_rule {
-+	u64 type;
-+	union {
-+		struct kdbus_bloom_mask bloom_mask;
-+		struct {
-+			char *name;
-+			u64 old_id;
-+			u64 new_id;
-+		};
-+		u64 src_id;
-+		u64 dst_id;
-+	};
-+	struct list_head rules_entry;
-+};
-+
-+static void kdbus_match_rule_free(struct kdbus_match_rule *rule)
-+{
-+	if (!rule)
-+		return;
-+
-+	switch (rule->type) {
-+	case KDBUS_ITEM_BLOOM_MASK:
-+		kfree(rule->bloom_mask.data);
-+		break;
-+
-+	case KDBUS_ITEM_NAME:
-+	case KDBUS_ITEM_NAME_ADD:
-+	case KDBUS_ITEM_NAME_REMOVE:
-+	case KDBUS_ITEM_NAME_CHANGE:
-+		kfree(rule->name);
-+		break;
-+
-+	case KDBUS_ITEM_ID:
-+	case KDBUS_ITEM_DST_ID:
-+	case KDBUS_ITEM_ID_ADD:
-+	case KDBUS_ITEM_ID_REMOVE:
-+		break;
-+
-+	default:
-+		BUG();
-+	}
-+
-+	list_del(&rule->rules_entry);
-+	kfree(rule);
-+}
-+
-+static void kdbus_match_entry_free(struct kdbus_match_entry *entry)
-+{
-+	struct kdbus_match_rule *r, *tmp;
-+
-+	if (!entry)
-+		return;
-+
-+	list_for_each_entry_safe(r, tmp, &entry->rules_list, rules_entry)
-+		kdbus_match_rule_free(r);
-+
-+	list_del(&entry->list_entry);
-+	kfree(entry);
-+}
-+
-+/**
-+ * kdbus_match_db_free() - free match db resources
-+ * @mdb:		The match database
-+ */
-+void kdbus_match_db_free(struct kdbus_match_db *mdb)
-+{
-+	struct kdbus_match_entry *entry, *tmp;
-+
-+	if (!mdb)
-+		return;
-+
-+	list_for_each_entry_safe(entry, tmp, &mdb->entries_list, list_entry)
-+		kdbus_match_entry_free(entry);
-+
-+	kfree(mdb);
-+}
-+
-+/**
-+ * kdbus_match_db_new() - create a new match database
-+ *
-+ * Return: a new kdbus_match_db on success, ERR_PTR on failure.
-+ */
-+struct kdbus_match_db *kdbus_match_db_new(void)
-+{
-+	struct kdbus_match_db *d;
-+
-+	d = kzalloc(sizeof(*d), GFP_KERNEL);
-+	if (!d)
-+		return ERR_PTR(-ENOMEM);
-+
-+	init_rwsem(&d->mdb_rwlock);
-+	INIT_LIST_HEAD(&d->entries_list);
-+
-+	return d;
-+}
-+
-+static bool kdbus_match_bloom(const struct kdbus_bloom_filter *filter,
-+			      const struct kdbus_bloom_mask *mask,
-+			      const struct kdbus_conn *conn)
-+{
-+	size_t n = conn->ep->bus->bloom.size / sizeof(u64);
-+	const u64 *m;
-+	size_t i;
-+
-+	/*
-+	 * The message's filter carries a generation identifier, the
-+	 * match's mask possibly carries an array of multiple generations
-+	 * of the mask. Select the mask with the closest match of the
-+	 * filter's generation.
-+	 */
-+	m = mask->data + (min(filter->generation, mask->generations - 1) * n);
-+
-+	/*
-+	 * The message's filter contains the messages properties,
-+	 * the match's mask contains the properties to look for in the
-+	 * message. Check the mask bit field against the filter bit field,
-+	 * if the message possibly carries the properties the connection
-+	 * has subscribed to.
-+	 */
-+	for (i = 0; i < n; i++)
-+		if ((filter->data[i] & m[i]) != m[i])
-+			return false;
-+
-+	return true;
-+}
-+
-+static bool kdbus_match_rule_conn(const struct kdbus_match_rule *r,
-+				  struct kdbus_conn *c,
-+				  const struct kdbus_staging *s)
-+{
-+	lockdep_assert_held(&c->ep->bus->name_registry->rwlock);
-+
-+	switch (r->type) {
-+	case KDBUS_ITEM_BLOOM_MASK:
-+		return kdbus_match_bloom(s->bloom_filter, &r->bloom_mask, c);
-+	case KDBUS_ITEM_ID:
-+		return r->src_id == c->id || r->src_id == KDBUS_MATCH_ID_ANY;
-+	case KDBUS_ITEM_DST_ID:
-+		return r->dst_id == s->msg->dst_id ||
-+		       r->dst_id == KDBUS_MATCH_ID_ANY;
-+	case KDBUS_ITEM_NAME:
-+		return kdbus_conn_has_name(c, r->name);
-+	default:
-+		return false;
-+	}
-+}
-+
-+static bool kdbus_match_rule_kernel(const struct kdbus_match_rule *r,
-+				    const struct kdbus_staging *s)
-+{
-+	struct kdbus_item *n = s->notify;
-+
-+	if (WARN_ON(!n) || n->type != r->type)
-+		return false;
-+
-+	switch (r->type) {
-+	case KDBUS_ITEM_ID_ADD:
-+		return r->new_id == KDBUS_MATCH_ID_ANY ||
-+		       r->new_id == n->id_change.id;
-+	case KDBUS_ITEM_ID_REMOVE:
-+		return r->old_id == KDBUS_MATCH_ID_ANY ||
-+		       r->old_id == n->id_change.id;
-+	case KDBUS_ITEM_NAME_ADD:
-+	case KDBUS_ITEM_NAME_CHANGE:
-+	case KDBUS_ITEM_NAME_REMOVE:
-+		return (r->old_id == KDBUS_MATCH_ID_ANY ||
-+		        r->old_id == n->name_change.old_id.id) &&
-+		       (r->new_id == KDBUS_MATCH_ID_ANY ||
-+		        r->new_id == n->name_change.new_id.id) &&
-+		       (!r->name || !strcmp(r->name, n->name_change.name));
-+	default:
-+		return false;
-+	}
-+}
-+
-+static bool kdbus_match_rules(const struct kdbus_match_entry *entry,
-+			      struct kdbus_conn *c,
-+			      const struct kdbus_staging *s)
-+{
-+	struct kdbus_match_rule *r;
-+
-+	list_for_each_entry(r, &entry->rules_list, rules_entry)
-+		if ((c && !kdbus_match_rule_conn(r, c, s)) ||
-+		    (!c && !kdbus_match_rule_kernel(r, s)))
-+			return false;
-+
-+	return true;
-+}
-+
-+/**
-+ * kdbus_match_db_match_msg() - match a msg object agains the database entries
-+ * @mdb:		The match database
-+ * @conn_src:		The connection object originating the message
-+ * @staging:		Staging object containing the message to match against
-+ *
-+ * This function will walk through all the database entries previously uploaded
-+ * with kdbus_match_db_add(). As soon as any of them has an all-satisfied rule
-+ * set, this function will return true.
-+ *
-+ * The caller must hold the registry lock of conn_src->ep->bus, in case conn_src
-+ * is non-NULL.
-+ *
-+ * Return: true if there was a matching database entry, false otherwise.
-+ */
-+bool kdbus_match_db_match_msg(struct kdbus_match_db *mdb,
-+			      struct kdbus_conn *conn_src,
-+			      const struct kdbus_staging *staging)
-+{
-+	struct kdbus_match_entry *entry;
-+	bool matched = false;
-+
-+	down_read(&mdb->mdb_rwlock);
-+	list_for_each_entry(entry, &mdb->entries_list, list_entry) {
-+		matched = kdbus_match_rules(entry, conn_src, staging);
-+		if (matched)
-+			break;
-+	}
-+	up_read(&mdb->mdb_rwlock);
-+
-+	return matched;
-+}
-+
-+static int kdbus_match_db_remove_unlocked(struct kdbus_match_db *mdb,
-+					  u64 cookie)
-+{
-+	struct kdbus_match_entry *entry, *tmp;
-+	bool found = false;
-+
-+	list_for_each_entry_safe(entry, tmp, &mdb->entries_list, list_entry)
-+		if (entry->cookie == cookie) {
-+			kdbus_match_entry_free(entry);
-+			--mdb->entries_count;
-+			found = true;
-+		}
-+
-+	return found ? 0 : -EBADSLT;
-+}
-+
-+/**
-+ * kdbus_cmd_match_add() - handle KDBUS_CMD_MATCH_ADD
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * One call to this function (or one ioctl(KDBUS_CMD_MATCH_ADD), respectively,
-+ * adds one new database entry with n rules attached to it. Each rule is
-+ * described with an kdbus_item, and an entry is considered matching if all
-+ * its rules are satisfied.
-+ *
-+ * The items attached to a kdbus_cmd_match struct have the following mapping:
-+ *
-+ * KDBUS_ITEM_BLOOM_MASK:	A bloom mask
-+ * KDBUS_ITEM_NAME:		A connection's source name
-+ * KDBUS_ITEM_ID:		A connection ID
-+ * KDBUS_ITEM_DST_ID:		A connection ID
-+ * KDBUS_ITEM_NAME_ADD:
-+ * KDBUS_ITEM_NAME_REMOVE:
-+ * KDBUS_ITEM_NAME_CHANGE:	Well-known name changes, carry
-+ *				kdbus_notify_name_change
-+ * KDBUS_ITEM_ID_ADD:
-+ * KDBUS_ITEM_ID_REMOVE:	Connection ID changes, carry
-+ *				kdbus_notify_id_change
-+ *
-+ * For kdbus_notify_{id,name}_change structs, only the ID and name fields
-+ * are looked at when adding an entry. The flags are unused.
-+ *
-+ * Also note that KDBUS_ITEM_BLOOM_MASK, KDBUS_ITEM_NAME, KDBUS_ITEM_ID,
-+ * and KDBUS_ITEM_DST_ID are used to match messages from userspace, while the
-+ * others apply to kernel-generated notifications.
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_match_add(struct kdbus_conn *conn, void __user *argp)
-+{
-+	struct kdbus_match_db *mdb = conn->match_db;
-+	struct kdbus_match_entry *entry = NULL;
-+	struct kdbus_cmd_match *cmd;
-+	struct kdbus_item *item;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_BLOOM_MASK, .multiple = true },
-+		{ .type = KDBUS_ITEM_NAME, .multiple = true },
-+		{ .type = KDBUS_ITEM_ID, .multiple = true },
-+		{ .type = KDBUS_ITEM_DST_ID, .multiple = true },
-+		{ .type = KDBUS_ITEM_NAME_ADD, .multiple = true },
-+		{ .type = KDBUS_ITEM_NAME_REMOVE, .multiple = true },
-+		{ .type = KDBUS_ITEM_NAME_CHANGE, .multiple = true },
-+		{ .type = KDBUS_ITEM_ID_ADD, .multiple = true },
-+		{ .type = KDBUS_ITEM_ID_REMOVE, .multiple = true },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
-+				 KDBUS_MATCH_REPLACE,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	if (!kdbus_conn_is_ordinary(conn))
-+		return -EOPNOTSUPP;
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-+	if (!entry) {
-+		ret = -ENOMEM;
-+		goto exit;
-+	}
-+
-+	entry->cookie = cmd->cookie;
-+	INIT_LIST_HEAD(&entry->list_entry);
-+	INIT_LIST_HEAD(&entry->rules_list);
-+
-+	KDBUS_ITEMS_FOREACH(item, cmd->items, KDBUS_ITEMS_SIZE(cmd, items)) {
-+		struct kdbus_match_rule *rule;
-+		size_t size = item->size - offsetof(struct kdbus_item, data);
-+
-+		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
-+		if (!rule) {
-+			ret = -ENOMEM;
-+			goto exit;
-+		}
-+
-+		rule->type = item->type;
-+		INIT_LIST_HEAD(&rule->rules_entry);
-+
-+		switch (item->type) {
-+		case KDBUS_ITEM_BLOOM_MASK: {
-+			u64 bsize = conn->ep->bus->bloom.size;
-+			u64 generations;
-+			u64 remainder;
-+
-+			generations = div64_u64_rem(size, bsize, &remainder);
-+			if (size < bsize || remainder > 0) {
-+				ret = -EDOM;
-+				break;
-+			}
-+
-+			rule->bloom_mask.data = kmemdup(item->data,
-+							size, GFP_KERNEL);
-+			if (!rule->bloom_mask.data) {
-+				ret = -ENOMEM;
-+				break;
-+			}
-+
-+			rule->bloom_mask.generations = generations;
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_NAME:
-+			if (!kdbus_name_is_valid(item->str, false)) {
-+				ret = -EINVAL;
-+				break;
-+			}
-+
-+			rule->name = kstrdup(item->str, GFP_KERNEL);
-+			if (!rule->name)
-+				ret = -ENOMEM;
-+
-+			break;
-+
-+		case KDBUS_ITEM_ID:
-+			rule->src_id = item->id;
-+			break;
-+
-+		case KDBUS_ITEM_DST_ID:
-+			rule->dst_id = item->id;
-+			break;
-+
-+		case KDBUS_ITEM_NAME_ADD:
-+		case KDBUS_ITEM_NAME_REMOVE:
-+		case KDBUS_ITEM_NAME_CHANGE:
-+			rule->old_id = item->name_change.old_id.id;
-+			rule->new_id = item->name_change.new_id.id;
-+
-+			if (size > sizeof(struct kdbus_notify_name_change)) {
-+				rule->name = kstrdup(item->name_change.name,
-+						     GFP_KERNEL);
-+				if (!rule->name)
-+					ret = -ENOMEM;
-+			}
-+
-+			break;
-+
-+		case KDBUS_ITEM_ID_ADD:
-+		case KDBUS_ITEM_ID_REMOVE:
-+			if (item->type == KDBUS_ITEM_ID_ADD)
-+				rule->new_id = item->id_change.id;
-+			else
-+				rule->old_id = item->id_change.id;
-+
-+			break;
-+		}
-+
-+		if (ret < 0) {
-+			kdbus_match_rule_free(rule);
-+			goto exit;
-+		}
-+
-+		list_add_tail(&rule->rules_entry, &entry->rules_list);
-+	}
-+
-+	down_write(&mdb->mdb_rwlock);
-+
-+	/* Remove any entry that has the same cookie as the current one. */
-+	if (cmd->flags & KDBUS_MATCH_REPLACE)
-+		kdbus_match_db_remove_unlocked(mdb, entry->cookie);
-+
-+	/*
-+	 * If the above removal caught any entry, there will be room for the
-+	 * new one.
-+	 */
-+	if (++mdb->entries_count > KDBUS_MATCH_MAX) {
-+		--mdb->entries_count;
-+		ret = -EMFILE;
-+	} else {
-+		list_add_tail(&entry->list_entry, &mdb->entries_list);
-+		entry = NULL;
-+	}
-+
-+	up_write(&mdb->mdb_rwlock);
-+
-+exit:
-+	kdbus_match_entry_free(entry);
-+	return kdbus_args_clear(&args, ret);
-+}
-+
-+/**
-+ * kdbus_cmd_match_remove() - handle KDBUS_CMD_MATCH_REMOVE
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_match_remove(struct kdbus_conn *conn, void __user *argp)
-+{
-+	struct kdbus_cmd_match *cmd;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	if (!kdbus_conn_is_ordinary(conn))
-+		return -EOPNOTSUPP;
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	down_write(&conn->match_db->mdb_rwlock);
-+	ret = kdbus_match_db_remove_unlocked(conn->match_db, cmd->cookie);
-+	up_write(&conn->match_db->mdb_rwlock);
-+
-+	return kdbus_args_clear(&args, ret);
-+}
-diff --git a/ipc/kdbus/match.h b/ipc/kdbus/match.h
-new file mode 100644
-index 0000000..ceb492f
---- /dev/null
-+++ b/ipc/kdbus/match.h
-@@ -0,0 +1,35 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_MATCH_H
-+#define __KDBUS_MATCH_H
-+
-+struct kdbus_conn;
-+struct kdbus_match_db;
-+struct kdbus_staging;
-+
-+struct kdbus_match_db *kdbus_match_db_new(void);
-+void kdbus_match_db_free(struct kdbus_match_db *db);
-+int kdbus_match_db_add(struct kdbus_conn *conn,
-+		       struct kdbus_cmd_match *cmd);
-+int kdbus_match_db_remove(struct kdbus_conn *conn,
-+			  struct kdbus_cmd_match *cmd);
-+bool kdbus_match_db_match_msg(struct kdbus_match_db *db,
-+			      struct kdbus_conn *conn_src,
-+			      const struct kdbus_staging *staging);
-+
-+int kdbus_cmd_match_add(struct kdbus_conn *conn, void __user *argp);
-+int kdbus_cmd_match_remove(struct kdbus_conn *conn, void __user *argp);
-+
-+#endif
-diff --git a/ipc/kdbus/message.c b/ipc/kdbus/message.c
-new file mode 100644
-index 0000000..ae565cd
---- /dev/null
-+++ b/ipc/kdbus/message.c
-@@ -0,0 +1,1040 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/capability.h>
-+#include <linux/cgroup.h>
-+#include <linux/cred.h>
-+#include <linux/file.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <linux/sched.h>
-+#include <linux/shmem_fs.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+#include <net/sock.h>
-+
-+#include "bus.h"
-+#include "connection.h"
-+#include "domain.h"
-+#include "endpoint.h"
-+#include "handle.h"
-+#include "item.h"
-+#include "match.h"
-+#include "message.h"
-+#include "names.h"
-+#include "policy.h"
-+
-+static const char * const zeros = "\0\0\0\0\0\0\0";
-+
-+static struct kdbus_gaps *kdbus_gaps_new(size_t n_memfds, size_t n_fds)
-+{
-+	size_t size_offsets, size_memfds, size_fds, size;
-+	struct kdbus_gaps *gaps;
-+
-+	size_offsets = n_memfds * sizeof(*gaps->memfd_offsets);
-+	size_memfds = n_memfds * sizeof(*gaps->memfd_files);
-+	size_fds = n_fds * sizeof(*gaps->fd_files);
-+	size = sizeof(*gaps) + size_offsets + size_memfds + size_fds;
-+
-+	gaps = kzalloc(size, GFP_KERNEL);
-+	if (!gaps)
-+		return ERR_PTR(-ENOMEM);
-+
-+	kref_init(&gaps->kref);
-+	gaps->n_memfds = 0; /* we reserve n_memfds, but don't enforce them */
-+	gaps->memfd_offsets = (void *)(gaps + 1);
-+	gaps->memfd_files = (void *)((u8 *)gaps->memfd_offsets + size_offsets);
-+	gaps->n_fds = 0; /* we reserve n_fds, but don't enforce them */
-+	gaps->fd_files = (void *)((u8 *)gaps->memfd_files + size_memfds);
-+
-+	return gaps;
-+}
-+
-+static void kdbus_gaps_free(struct kref *kref)
-+{
-+	struct kdbus_gaps *gaps = container_of(kref, struct kdbus_gaps, kref);
-+	size_t i;
-+
-+	for (i = 0; i < gaps->n_fds; ++i)
-+		if (gaps->fd_files[i])
-+			fput(gaps->fd_files[i]);
-+	for (i = 0; i < gaps->n_memfds; ++i)
-+		if (gaps->memfd_files[i])
-+			fput(gaps->memfd_files[i]);
-+
-+	kfree(gaps);
-+}
-+
-+/**
-+ * kdbus_gaps_ref() - gain reference
-+ * @gaps:	gaps object
-+ *
-+ * Return: @gaps is returned
-+ */
-+struct kdbus_gaps *kdbus_gaps_ref(struct kdbus_gaps *gaps)
-+{
-+	if (gaps)
-+		kref_get(&gaps->kref);
-+	return gaps;
-+}
-+
-+/**
-+ * kdbus_gaps_unref() - drop reference
-+ * @gaps:	gaps object
-+ *
-+ * Return: NULL
-+ */
-+struct kdbus_gaps *kdbus_gaps_unref(struct kdbus_gaps *gaps)
-+{
-+	if (gaps)
-+		kref_put(&gaps->kref, kdbus_gaps_free);
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_gaps_install() - install file-descriptors
-+ * @gaps:		gaps object, or NULL
-+ * @slice:		pool slice that contains the message
-+ * @out_incomplete	output variable to note incomplete fds
-+ *
-+ * This function installs all file-descriptors of @gaps into the current
-+ * process and copies the file-descriptor numbers into the target pool slice.
-+ *
-+ * If the file-descriptors were only partially installed, then @out_incomplete
-+ * will be set to true. Otherwise, it's set to false.
-+ *
-+ * Return: 0 on success, negative error code on failure
-+ */
-+int kdbus_gaps_install(struct kdbus_gaps *gaps, struct kdbus_pool_slice *slice,
-+		       bool *out_incomplete)
-+{
-+	bool incomplete_fds = false;
-+	struct kvec kvec;
-+	size_t i, n_fds;
-+	int ret, *fds;
-+
-+	if (!gaps) {
-+		/* nothing to do */
-+		*out_incomplete = incomplete_fds;
-+		return 0;
-+	}
-+
-+	n_fds = gaps->n_fds + gaps->n_memfds;
-+	if (n_fds < 1) {
-+		/* nothing to do */
-+		*out_incomplete = incomplete_fds;
-+		return 0;
-+	}
-+
-+	fds = kmalloc_array(n_fds, sizeof(*fds), GFP_TEMPORARY);
-+	n_fds = 0;
-+	if (!fds)
-+		return -ENOMEM;
-+
-+	/* 1) allocate fds and copy them over */
-+
-+	if (gaps->n_fds > 0) {
-+		for (i = 0; i < gaps->n_fds; ++i) {
-+			int fd;
-+
-+			fd = get_unused_fd_flags(O_CLOEXEC);
-+			if (fd < 0)
-+				incomplete_fds = true;
-+
-+			WARN_ON(!gaps->fd_files[i]);
-+
-+			fds[n_fds++] = fd < 0 ? -1 : fd;
-+		}
-+
-+		/*
-+		 * The file-descriptor array can only be present once per
-+		 * message. Hence, prepare all fds and then copy them over with
-+		 * a single kvec.
-+		 */
-+
-+		WARN_ON(!gaps->fd_offset);
-+
-+		kvec.iov_base = fds;
-+		kvec.iov_len = gaps->n_fds * sizeof(*fds);
-+		ret = kdbus_pool_slice_copy_kvec(slice, gaps->fd_offset,
-+						 &kvec, 1, kvec.iov_len);
-+		if (ret < 0)
-+			goto exit;
-+	}
-+
-+	for (i = 0; i < gaps->n_memfds; ++i) {
-+		int memfd;
-+
-+		memfd = get_unused_fd_flags(O_CLOEXEC);
-+		if (memfd < 0) {
-+			incomplete_fds = true;
-+			/* memfds are initialized to -1, skip copying it */
-+			continue;
-+		}
-+
-+		fds[n_fds++] = memfd;
-+
-+		/*
-+		 * memfds have to be copied individually as they each are put
-+		 * into a separate item. This should not be an issue, though,
-+		 * as usually there is no need to send more than one memfd per
-+		 * message.
-+		 */
-+
-+		WARN_ON(!gaps->memfd_offsets[i]);
-+		WARN_ON(!gaps->memfd_files[i]);
-+
-+		kvec.iov_base = &memfd;
-+		kvec.iov_len = sizeof(memfd);
-+		ret = kdbus_pool_slice_copy_kvec(slice, gaps->memfd_offsets[i],
-+						 &kvec, 1, kvec.iov_len);
-+		if (ret < 0)
-+			goto exit;
-+	}
-+
-+	/* 2) install fds now that everything was successful */
-+
-+	for (i = 0; i < gaps->n_fds; ++i)
-+		if (fds[i] >= 0)
-+			fd_install(fds[i], get_file(gaps->fd_files[i]));
-+	for (i = 0; i < gaps->n_memfds; ++i)
-+		if (fds[gaps->n_fds + i] >= 0)
-+			fd_install(fds[gaps->n_fds + i],
-+				   get_file(gaps->memfd_files[i]));
-+
-+	ret = 0;
-+
-+exit:
-+	if (ret < 0)
-+		for (i = 0; i < n_fds; ++i)
-+			put_unused_fd(fds[i]);
-+	kfree(fds);
-+	*out_incomplete = incomplete_fds;
-+	return ret;
-+}
-+
-+static struct file *kdbus_get_fd(int fd)
-+{
-+	struct file *f, *ret;
-+	struct inode *inode;
-+	struct socket *sock;
-+
-+	if (fd < 0)
-+		return ERR_PTR(-EBADF);
-+
-+	f = fget_raw(fd);
-+	if (!f)
-+		return ERR_PTR(-EBADF);
-+
-+	inode = file_inode(f);
-+	sock = S_ISSOCK(inode->i_mode) ? SOCKET_I(inode) : NULL;
-+
-+	if (f->f_mode & FMODE_PATH)
-+		ret = f; /* O_PATH is always allowed */
-+	else if (f->f_op == &kdbus_handle_ops)
-+		ret = ERR_PTR(-EOPNOTSUPP); /* disallow kdbus-fd over kdbus */
-+	else if (sock && sock->sk && sock->ops && sock->ops->family == PF_UNIX)
-+		ret = ERR_PTR(-EOPNOTSUPP); /* disallow UDS over kdbus */
-+	else
-+		ret = f; /* all other are allowed */
-+
-+	if (f != ret)
-+		fput(f);
-+
-+	return ret;
-+}
-+
-+static struct file *kdbus_get_memfd(const struct kdbus_memfd *memfd)
-+{
-+	const int m = F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL;
-+	struct file *f, *ret;
-+	int s;
-+
-+	if (memfd->fd < 0)
-+		return ERR_PTR(-EBADF);
-+
-+	f = fget(memfd->fd);
-+	if (!f)
-+		return ERR_PTR(-EBADF);
-+
-+	s = shmem_get_seals(f);
-+	if (s < 0)
-+		ret = ERR_PTR(-EMEDIUMTYPE);
-+	else if ((s & m) != m)
-+		ret = ERR_PTR(-ETXTBSY);
-+	else if (memfd->start + memfd->size > (u64)i_size_read(file_inode(f)))
-+		ret = ERR_PTR(-EFAULT);
-+	else
-+		ret = f;
-+
-+	if (f != ret)
-+		fput(f);
-+
-+	return ret;
-+}
-+
-+static int kdbus_msg_examine(struct kdbus_msg *msg, struct kdbus_bus *bus,
-+			     struct kdbus_cmd_send *cmd, size_t *out_n_memfds,
-+			     size_t *out_n_fds, size_t *out_n_parts)
-+{
-+	struct kdbus_item *item, *fds = NULL, *bloom = NULL, *dstname = NULL;
-+	u64 n_parts, n_memfds, n_fds, vec_size;
-+
-+	/*
-+	 * Step 1:
-+	 * Validate the message and command parameters.
-+	 */
-+
-+	/* KDBUS_PAYLOAD_KERNEL is reserved to kernel messages */
-+	if (msg->payload_type == KDBUS_PAYLOAD_KERNEL)
-+		return -EINVAL;
-+
-+	if (msg->dst_id == KDBUS_DST_ID_BROADCAST) {
-+		/* broadcasts must be marked as signals */
-+		if (!(msg->flags & KDBUS_MSG_SIGNAL))
-+			return -EBADMSG;
-+		/* broadcasts cannot have timeouts */
-+		if (msg->timeout_ns > 0)
-+			return -ENOTUNIQ;
-+	}
-+
-+	if (msg->flags & KDBUS_MSG_EXPECT_REPLY) {
-+		/* if you expect a reply, you must specify a timeout */
-+		if (msg->timeout_ns == 0)
-+			return -EINVAL;
-+		/* signals cannot have replies */
-+		if (msg->flags & KDBUS_MSG_SIGNAL)
-+			return -ENOTUNIQ;
-+	} else {
-+		/* must expect reply if sent as synchronous call */
-+		if (cmd->flags & KDBUS_SEND_SYNC_REPLY)
-+			return -EINVAL;
-+		/* cannot mark replies as signal */
-+		if (msg->cookie_reply && (msg->flags & KDBUS_MSG_SIGNAL))
-+			return -EINVAL;
-+	}
-+
-+	/*
-+	 * Step 2:
-+	 * Validate all passed items. While at it, select some statistics that
-+	 * are required to allocate state objects later on.
-+	 *
-+	 * Generic item validation has already been done via
-+	 * kdbus_item_validate(). Furthermore, the number of items is naturally
-+	 * limited by the maximum message size. Hence, only non-generic item
-+	 * checks are performed here (mainly integer overflow tests).
-+	 */
-+
-+	n_parts = 0;
-+	n_memfds = 0;
-+	n_fds = 0;
-+	vec_size = 0;
-+
-+	KDBUS_ITEMS_FOREACH(item, msg->items, KDBUS_ITEMS_SIZE(msg, items)) {
-+		switch (item->type) {
-+		case KDBUS_ITEM_PAYLOAD_VEC: {
-+			void __force __user *ptr = KDBUS_PTR(item->vec.address);
-+			u64 size = item->vec.size;
-+
-+			if (vec_size + size < vec_size)
-+				return -EMSGSIZE;
-+			if (vec_size + size > KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE)
-+				return -EMSGSIZE;
-+			if (ptr && unlikely(!access_ok(VERIFY_READ, ptr, size)))
-+				return -EFAULT;
-+
-+			if (ptr || size % 8) /* data or padding */
-+				++n_parts;
-+			break;
-+		}
-+		case KDBUS_ITEM_PAYLOAD_MEMFD: {
-+			u64 start = item->memfd.start;
-+			u64 size = item->memfd.size;
-+
-+			if (start + size < start)
-+				return -EMSGSIZE;
-+			if (n_memfds >= KDBUS_MSG_MAX_MEMFD_ITEMS)
-+				return -E2BIG;
-+
-+			++n_memfds;
-+			if (size % 8) /* vec-padding required */
-+				++n_parts;
-+			break;
-+		}
-+		case KDBUS_ITEM_FDS: {
-+			if (fds)
-+				return -EEXIST;
-+
-+			fds = item;
-+			n_fds = KDBUS_ITEM_PAYLOAD_SIZE(item) / sizeof(int);
-+			if (n_fds > KDBUS_CONN_MAX_FDS_PER_USER)
-+				return -EMFILE;
-+
-+			break;
-+		}
-+		case KDBUS_ITEM_BLOOM_FILTER: {
-+			u64 bloom_size;
-+
-+			if (bloom)
-+				return -EEXIST;
-+
-+			bloom = item;
-+			bloom_size = KDBUS_ITEM_PAYLOAD_SIZE(item) -
-+				     offsetof(struct kdbus_bloom_filter, data);
-+			if (!KDBUS_IS_ALIGNED8(bloom_size))
-+				return -EFAULT;
-+			if (bloom_size != bus->bloom.size)
-+				return -EDOM;
-+
-+			break;
-+		}
-+		case KDBUS_ITEM_DST_NAME: {
-+			if (dstname)
-+				return -EEXIST;
-+
-+			dstname = item;
-+			if (!kdbus_name_is_valid(item->str, false))
-+				return -EINVAL;
-+			if (msg->dst_id == KDBUS_DST_ID_BROADCAST)
-+				return -EBADMSG;
-+
-+			break;
-+		}
-+		default:
-+			return -EINVAL;
-+		}
-+	}
-+
-+	/*
-+	 * Step 3:
-+	 * Validate that required items were actually passed, and that no item
-+	 * contradicts the message flags.
-+	 */
-+
-+	/* bloom filters must be attached _iff_ it's a signal */
-+	if (!(msg->flags & KDBUS_MSG_SIGNAL) != !bloom)
-+		return -EBADMSG;
-+	/* destination name is required if no ID is given */
-+	if (msg->dst_id == KDBUS_DST_ID_NAME && !dstname)
-+		return -EDESTADDRREQ;
-+	/* cannot send file-descriptors attached to broadcasts */
-+	if (msg->dst_id == KDBUS_DST_ID_BROADCAST && fds)
-+		return -ENOTUNIQ;
-+
-+	*out_n_memfds = n_memfds;
-+	*out_n_fds = n_fds;
-+	*out_n_parts = n_parts;
-+
-+	return 0;
-+}
-+
-+static bool kdbus_staging_merge_vecs(struct kdbus_staging *staging,
-+				     struct kdbus_item **prev_item,
-+				     struct iovec **prev_vec,
-+				     const struct kdbus_item *merge)
-+{
-+	void __user *ptr = (void __user *)KDBUS_PTR(merge->vec.address);
-+	u64 padding = merge->vec.size % 8;
-+	struct kdbus_item *prev = *prev_item;
-+	struct iovec *vec = *prev_vec;
-+
-+	/* XXX: merging is disabled so far */
-+	if (0 && prev && prev->type == KDBUS_ITEM_PAYLOAD_OFF &&
-+	    !merge->vec.address == !prev->vec.address) {
-+		/*
-+		 * If we merge two VECs, we can always drop the second
-+		 * PAYLOAD_VEC item. Hence, include its size in the previous
-+		 * one.
-+		 */
-+		prev->vec.size += merge->vec.size;
-+
-+		if (ptr) {
-+			/*
-+			 * If we merge two data VECs, we need two iovecs to copy
-+			 * the data. But the items can be easily merged by
-+			 * summing their lengths.
-+			 */
-+			vec = &staging->parts[staging->n_parts++];
-+			vec->iov_len = merge->vec.size;
-+			vec->iov_base = ptr;
-+			staging->n_payload += vec->iov_len;
-+		} else if (padding) {
-+			/*
-+			 * If we merge two 0-vecs with the second 0-vec
-+			 * requiring padding, we need to insert an iovec to copy
-+			 * the 0-padding. We try merging it with the previous
-+			 * 0-padding iovec. This might end up with an
-+			 * iov_len==0, in which case we simply drop the iovec.
-+			 */
-+			if (vec) {
-+				staging->n_payload -= vec->iov_len;
-+				vec->iov_len = prev->vec.size % 8;
-+				if (!vec->iov_len) {
-+					--staging->n_parts;
-+					vec = NULL;
-+				} else {
-+					staging->n_payload += vec->iov_len;
-+				}
-+			} else {
-+				vec = &staging->parts[staging->n_parts++];
-+				vec->iov_len = padding;
-+				vec->iov_base = (char __user *)zeros;
-+				staging->n_payload += vec->iov_len;
-+			}
-+		} else {
-+			/*
-+			 * If we merge two 0-vecs with the second 0-vec having
-+			 * no padding, we know the padding of the first stays
-+			 * the same. Hence, @vec needs no adjustment.
-+			 */
-+		}
-+
-+		/* successfully merged with previous item */
-+		merge = prev;
-+	} else {
-+		/*
-+		 * If we cannot merge the payload item with the previous one,
-+		 * we simply insert a new iovec for the data/padding.
-+		 */
-+		if (ptr) {
-+			vec = &staging->parts[staging->n_parts++];
-+			vec->iov_len = merge->vec.size;
-+			vec->iov_base = ptr;
-+			staging->n_payload += vec->iov_len;
-+		} else if (padding) {
-+			vec = &staging->parts[staging->n_parts++];
-+			vec->iov_len = padding;
-+			vec->iov_base = (char __user *)zeros;
-+			staging->n_payload += vec->iov_len;
-+		} else {
-+			vec = NULL;
-+		}
-+	}
-+
-+	*prev_item = (struct kdbus_item *)merge;
-+	*prev_vec = vec;
-+
-+	return merge == prev;
-+}
-+
-+static int kdbus_staging_import(struct kdbus_staging *staging)
-+{
-+	struct kdbus_item *it, *item, *last, *prev_payload;
-+	struct kdbus_gaps *gaps = staging->gaps;
-+	struct kdbus_msg *msg = staging->msg;
-+	struct iovec *part, *prev_part;
-+	bool drop_item;
-+
-+	drop_item = false;
-+	last = NULL;
-+	prev_payload = NULL;
-+	prev_part = NULL;
-+
-+	/*
-+	 * We modify msg->items along the way; make sure to use @item as offset
-+	 * to the next item (instead of the iterator @it).
-+	 */
-+	for (it = item = msg->items;
-+	     it >= msg->items &&
-+	             (u8 *)it < (u8 *)msg + msg->size &&
-+	             (u8 *)it + it->size <= (u8 *)msg + msg->size; ) {
-+		/*
-+		 * If we dropped items along the way, move current item to
-+		 * front. We must not access @it afterwards, but use @item
-+		 * instead!
-+		 */
-+		if (it != item)
-+			memmove(item, it, it->size);
-+		it = (void *)((u8 *)it + KDBUS_ALIGN8(item->size));
-+
-+		switch (item->type) {
-+		case KDBUS_ITEM_PAYLOAD_VEC: {
-+			size_t offset = staging->n_payload;
-+
-+			if (kdbus_staging_merge_vecs(staging, &prev_payload,
-+						     &prev_part, item)) {
-+				drop_item = true;
-+			} else if (item->vec.address) {
-+				/* real offset is patched later on */
-+				item->type = KDBUS_ITEM_PAYLOAD_OFF;
-+				item->vec.offset = offset;
-+			} else {
-+				item->type = KDBUS_ITEM_PAYLOAD_OFF;
-+				item->vec.offset = ~0ULL;
-+			}
-+
-+			break;
-+		}
-+		case KDBUS_ITEM_PAYLOAD_MEMFD: {
-+			struct file *f;
-+
-+			f = kdbus_get_memfd(&item->memfd);
-+			if (IS_ERR(f))
-+				return PTR_ERR(f);
-+
-+			gaps->memfd_files[gaps->n_memfds] = f;
-+			gaps->memfd_offsets[gaps->n_memfds] =
-+					(u8 *)&item->memfd.fd - (u8 *)msg;
-+			++gaps->n_memfds;
-+
-+			/* memfds cannot be merged */
-+			prev_payload = item;
-+			prev_part = NULL;
-+
-+			/* insert padding to make following VECs aligned */
-+			if (item->memfd.size % 8) {
-+				part = &staging->parts[staging->n_parts++];
-+				part->iov_len = item->memfd.size % 8;
-+				part->iov_base = (char __user *)zeros;
-+				staging->n_payload += part->iov_len;
-+			}
-+
-+			break;
-+		}
-+		case KDBUS_ITEM_FDS: {
-+			size_t i, n_fds;
-+
-+			n_fds = KDBUS_ITEM_PAYLOAD_SIZE(item) / sizeof(int);
-+			for (i = 0; i < n_fds; ++i) {
-+				struct file *f;
-+
-+				f = kdbus_get_fd(item->fds[i]);
-+				if (IS_ERR(f))
-+					return PTR_ERR(f);
-+
-+				gaps->fd_files[gaps->n_fds++] = f;
-+			}
-+
-+			gaps->fd_offset = (u8 *)item->fds - (u8 *)msg;
-+
-+			break;
-+		}
-+		case KDBUS_ITEM_BLOOM_FILTER:
-+			staging->bloom_filter = &item->bloom_filter;
-+			break;
-+		case KDBUS_ITEM_DST_NAME:
-+			staging->dst_name = item->str;
-+			break;
-+		}
-+
-+		/* drop item if we merged it with a previous one */
-+		if (drop_item) {
-+			drop_item = false;
-+		} else {
-+			last = item;
-+			item = KDBUS_ITEM_NEXT(item);
-+		}
-+	}
-+
-+	/* adjust message size regarding dropped items */
-+	msg->size = offsetof(struct kdbus_msg, items);
-+	if (last)
-+		msg->size += ((u8 *)last - (u8 *)msg->items) + last->size;
-+
-+	return 0;
-+}
-+
-+static void kdbus_staging_reserve(struct kdbus_staging *staging)
-+{
-+	struct iovec *part;
-+
-+	part = &staging->parts[staging->n_parts++];
-+	part->iov_base = (void __user *)zeros;
-+	part->iov_len = 0;
-+}
-+
-+static struct kdbus_staging *kdbus_staging_new(struct kdbus_bus *bus,
-+					       size_t n_parts,
-+					       size_t msg_extra_size)
-+{
-+	const size_t reserved_parts = 5; /* see below for explanation */
-+	struct kdbus_staging *staging;
-+	int ret;
-+
-+	n_parts += reserved_parts;
-+
-+	staging = kzalloc(sizeof(*staging) + n_parts * sizeof(*staging->parts) +
-+			  msg_extra_size, GFP_TEMPORARY);
-+	if (!staging)
-+		return ERR_PTR(-ENOMEM);
-+
-+	staging->msg_seqnum = atomic64_inc_return(&bus->last_message_id);
-+	staging->n_parts = 0; /* we reserve n_parts, but don't enforce them */
-+	staging->parts = (void *)(staging + 1);
-+
-+	if (msg_extra_size) /* if requested, allocate message, too */
-+		staging->msg = (void *)((u8 *)staging->parts +
-+				        n_parts * sizeof(*staging->parts));
-+
-+	staging->meta_proc = kdbus_meta_proc_new();
-+	if (IS_ERR(staging->meta_proc)) {
-+		ret = PTR_ERR(staging->meta_proc);
-+		staging->meta_proc = NULL;
-+		goto error;
-+	}
-+
-+	staging->meta_conn = kdbus_meta_conn_new();
-+	if (IS_ERR(staging->meta_conn)) {
-+		ret = PTR_ERR(staging->meta_conn);
-+		staging->meta_conn = NULL;
-+		goto error;
-+	}
-+
-+	/*
-+	 * Prepare iovecs to copy the message into the target pool. We use the
-+	 * following iovecs:
-+	 *   * iovec to copy "kdbus_msg.size"
-+	 *   * iovec to copy "struct kdbus_msg" (minus size) plus items
-+	 *   * iovec for possible padding after the items
-+	 *   * iovec for metadata items
-+	 *   * iovec for possible padding after the items
-+	 *
-+	 * Make sure to update @reserved_parts if you add more parts here.
-+	 */
-+
-+	kdbus_staging_reserve(staging); /* msg.size */
-+	kdbus_staging_reserve(staging); /* msg (minus msg.size) plus items */
-+	kdbus_staging_reserve(staging); /* msg padding */
-+	kdbus_staging_reserve(staging); /* meta */
-+	kdbus_staging_reserve(staging); /* meta padding */
-+
-+	return staging;
-+
-+error:
-+	kdbus_staging_free(staging);
-+	return ERR_PTR(ret);
-+}
-+
-+struct kdbus_staging *kdbus_staging_new_kernel(struct kdbus_bus *bus,
-+					       u64 dst, u64 cookie_timeout,
-+					       size_t it_size, size_t it_type)
-+{
-+	struct kdbus_staging *staging;
-+	size_t size;
-+
-+	size = offsetof(struct kdbus_msg, items) +
-+	       KDBUS_ITEM_HEADER_SIZE + it_size;
-+
-+	staging = kdbus_staging_new(bus, 0, KDBUS_ALIGN8(size));
-+	if (IS_ERR(staging))
-+		return ERR_CAST(staging);
-+
-+	staging->msg->size = size;
-+	staging->msg->flags = (dst == KDBUS_DST_ID_BROADCAST) ?
-+							KDBUS_MSG_SIGNAL : 0;
-+	staging->msg->dst_id = dst;
-+	staging->msg->src_id = KDBUS_SRC_ID_KERNEL;
-+	staging->msg->payload_type = KDBUS_PAYLOAD_KERNEL;
-+	staging->msg->cookie_reply = cookie_timeout;
-+	staging->notify = staging->msg->items;
-+	staging->notify->size = KDBUS_ITEM_HEADER_SIZE + it_size;
-+	staging->notify->type = it_type;
-+
-+	return staging;
-+}
-+
-+struct kdbus_staging *kdbus_staging_new_user(struct kdbus_bus *bus,
-+					     struct kdbus_cmd_send *cmd,
-+					     struct kdbus_msg *msg)
-+{
-+	const size_t reserved_parts = 1; /* see below for explanation */
-+	size_t n_memfds, n_fds, n_parts;
-+	struct kdbus_staging *staging;
-+	int ret;
-+
-+	/*
-+	 * Examine user-supplied message and figure out how many resources we
-+	 * need to allocate in our staging area. This requires us to iterate
-+	 * the message twice, but saves us from re-allocating our resources
-+	 * all the time.
-+	 */
-+
-+	ret = kdbus_msg_examine(msg, bus, cmd, &n_memfds, &n_fds, &n_parts);
-+	if (ret < 0)
-+		return ERR_PTR(ret);
-+
-+	n_parts += reserved_parts;
-+
-+	/*
-+	 * Allocate staging area with the number of required resources. Make
-+	 * sure that we have enough iovecs for all required parts pre-allocated
-+	 * so this will hopefully be the only memory allocation for this
-+	 * message transaction.
-+	 */
-+
-+	staging = kdbus_staging_new(bus, n_parts, 0);
-+	if (IS_ERR(staging))
-+		return ERR_CAST(staging);
-+
-+	staging->msg = msg;
-+
-+	/*
-+	 * If the message contains memfds or fd items, we need to remember some
-+	 * state so we can fill in the requested information at RECV time.
-+	 * File-descriptors cannot be passed at SEND time. Hence, allocate a
-+	 * gaps-object to remember that state. That gaps object is linked to
-+	 * from the staging area, but will also be linked to from the message
-+	 * queue of each peer. Hence, each receiver owns a reference to it, and
-+	 * it will later be used to fill the 'gaps' in message that couldn't be
-+	 * filled at SEND time.
-+	 * Note that the 'gaps' object is read-only once the staging-allocator
-+	 * returns. There might be connections receiving a queued message while
-+	 * the sender still broadcasts the message to other receivers.
-+	 */
-+
-+	if (n_memfds > 0 || n_fds > 0) {
-+		staging->gaps = kdbus_gaps_new(n_memfds, n_fds);
-+		if (IS_ERR(staging->gaps)) {
-+			ret = PTR_ERR(staging->gaps);
-+			staging->gaps = NULL;
-+			kdbus_staging_free(staging);
-+			return ERR_PTR(ret);
-+		}
-+	}
-+
-+	/*
-+	 * kdbus_staging_new() already reserves parts for message setup. For
-+	 * user-supplied messages, we add the following iovecs:
-+	 *   ... variable number of iovecs for payload ...
-+	 *   * final iovec for possible padding of payload
-+	 *
-+	 * Make sure to update @reserved_parts if you add more parts here.
-+	 */
-+
-+	ret = kdbus_staging_import(staging); /* payload */
-+	kdbus_staging_reserve(staging); /* payload padding */
-+
-+	if (ret < 0)
-+		goto error;
-+
-+	return staging;
-+
-+error:
-+	kdbus_staging_free(staging);
-+	return ERR_PTR(ret);
-+}
-+
-+struct kdbus_staging *kdbus_staging_free(struct kdbus_staging *staging)
-+{
-+	if (!staging)
-+		return NULL;
-+
-+	kdbus_meta_conn_unref(staging->meta_conn);
-+	kdbus_meta_proc_unref(staging->meta_proc);
-+	kdbus_gaps_unref(staging->gaps);
-+	kfree(staging);
-+
-+	return NULL;
-+}
-+
-+static int kdbus_staging_collect_metadata(struct kdbus_staging *staging,
-+					  struct kdbus_conn *src,
-+					  struct kdbus_conn *dst,
-+					  u64 *out_attach)
-+{
-+	u64 attach;
-+	int ret;
-+
-+	if (src)
-+		attach = kdbus_meta_msg_mask(src, dst);
-+	else
-+		attach = KDBUS_ATTACH_TIMESTAMP; /* metadata for kernel msgs */
-+
-+	if (src && !src->meta_fake) {
-+		ret = kdbus_meta_proc_collect(staging->meta_proc, attach);
-+		if (ret < 0)
-+			return ret;
-+	}
-+
-+	ret = kdbus_meta_conn_collect(staging->meta_conn, src,
-+				      staging->msg_seqnum, attach);
-+	if (ret < 0)
-+		return ret;
-+
-+	*out_attach = attach;
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_staging_emit() - emit linearized message in target pool
-+ * @staging:		staging object to create message from
-+ * @src:		sender of the message (or NULL)
-+ * @dst:		target connection to allocate message for
-+ *
-+ * This allocates a pool-slice for @dst and copies the message provided by
-+ * @staging into it. The new slice is then returned to the caller for further
-+ * processing. It's not linked into any queue, yet.
-+ *
-+ * Return: Newly allocated slice or ERR_PTR on failure.
-+ */
-+struct kdbus_pool_slice *kdbus_staging_emit(struct kdbus_staging *staging,
-+					    struct kdbus_conn *src,
-+					    struct kdbus_conn *dst)
-+{
-+	struct kdbus_item *item, *meta_items = NULL;
-+	struct kdbus_pool_slice *slice = NULL;
-+	size_t off, size, meta_size;
-+	struct iovec *v;
-+	u64 attach, msg_size;
-+	int ret;
-+
-+	/*
-+	 * Step 1:
-+	 * Collect metadata from @src depending on the attach-flags allowed for
-+	 * @dst. Translate it into the namespaces pinned by @dst.
-+	 */
-+
-+	ret = kdbus_staging_collect_metadata(staging, src, dst, &attach);
-+	if (ret < 0)
-+		goto error;
-+
-+	ret = kdbus_meta_emit(staging->meta_proc, NULL, staging->meta_conn,
-+			      dst, attach, &meta_items, &meta_size);
-+	if (ret < 0)
-+		goto error;
-+
-+	/*
-+	 * Step 2:
-+	 * Setup iovecs for the message. See kdbus_staging_new() for allocation
-+	 * of those iovecs. All reserved iovecs have been initialized with
-+	 * iov_len=0 + iov_base=zeros. Furthermore, the iovecs to copy the
-+	 * actual message payload have already been initialized and need not be
-+	 * touched.
-+	 */
-+
-+	v = staging->parts;
-+	msg_size = staging->msg->size;
-+
-+	/* msg.size */
-+	v->iov_len = sizeof(msg_size);
-+	v->iov_base = (void __user *)&msg_size;
-+	++v;
-+
-+	/* msg (after msg.size) plus items */
-+	v->iov_len = staging->msg->size - sizeof(staging->msg->size);
-+	v->iov_base = (void __user *)((u8 *)staging->msg +
-+				      sizeof(staging->msg->size));
-+	++v;
-+
-+	/* padding after msg */
-+	v->iov_len = KDBUS_ALIGN8(staging->msg->size) - staging->msg->size;
-+	v->iov_base = (void __user *)zeros;
-+	++v;
-+
-+	if (meta_size > 0) {
-+		/* metadata items */
-+		v->iov_len = meta_size;
-+		v->iov_base = (void __user *)meta_items;
-+		++v;
-+
-+		/* padding after metadata */
-+		v->iov_len = KDBUS_ALIGN8(meta_size) - meta_size;
-+		v->iov_base = (void __user *)zeros;
-+		++v;
-+
-+		msg_size = KDBUS_ALIGN8(msg_size) + meta_size;
-+	} else {
-+		/* metadata items */
-+		v->iov_len = 0;
-+		v->iov_base = (void __user *)zeros;
-+		++v;
-+
-+		/* padding after metadata */
-+		v->iov_len = 0;
-+		v->iov_base = (void __user *)zeros;
-+		++v;
-+	}
-+
-+	/* ... payload iovecs are already filled in ... */
-+
-+	/* compute overall size and fill in padding after payload */
-+	size = KDBUS_ALIGN8(msg_size);
-+
-+	if (staging->n_payload > 0) {
-+		size += staging->n_payload;
-+
-+		v = &staging->parts[staging->n_parts - 1];
-+		v->iov_len = KDBUS_ALIGN8(size) - size;
-+		v->iov_base = (void __user *)zeros;
-+
-+		size = KDBUS_ALIGN8(size);
-+	}
-+
-+	/*
-+	 * Step 3:
-+	 * The PAYLOAD_OFF items in the message contain a relative 'offset'
-+	 * field that tells the receiver where to find the actual payload. This
-+	 * offset is relative to the start of the message, and as such depends
-+	 * on the size of the metadata items we inserted. This size is variable
-+	 * and changes for each peer we send the message to. Hence, we remember
-+	 * the last relative offset that was used to calculate the 'offset'
-+	 * fields. For each message, we re-calculate it and patch all items, in
-+	 * case it changed.
-+	 */
-+
-+	off = KDBUS_ALIGN8(msg_size);
-+
-+	if (off != staging->i_payload) {
-+		KDBUS_ITEMS_FOREACH(item, staging->msg->items,
-+				    KDBUS_ITEMS_SIZE(staging->msg, items)) {
-+			if (item->type != KDBUS_ITEM_PAYLOAD_OFF)
-+				continue;
-+
-+			item->vec.offset -= staging->i_payload;
-+			item->vec.offset += off;
-+		}
-+
-+		staging->i_payload = off;
-+	}
-+
-+	/*
-+	 * Step 4:
-+	 * Allocate pool slice and copy over all data. Make sure to properly
-+	 * account on user quota.
-+	 */
-+
-+	ret = kdbus_conn_quota_inc(dst, src ? src->user : NULL, size,
-+				   staging->gaps ? staging->gaps->n_fds : 0);
-+	if (ret < 0)
-+		goto error;
-+
-+	slice = kdbus_pool_slice_alloc(dst->pool, size, true);
-+	if (IS_ERR(slice)) {
-+		ret = PTR_ERR(slice);
-+		slice = NULL;
-+		goto error;
-+	}
-+
-+	WARN_ON(kdbus_pool_slice_size(slice) != size);
-+
-+	ret = kdbus_pool_slice_copy_iovec(slice, 0, staging->parts,
-+					  staging->n_parts, size);
-+	if (ret < 0)
-+		goto error;
-+
-+	/* all done, return slice to caller */
-+	goto exit;
-+
-+error:
-+	if (slice)
-+		kdbus_conn_quota_dec(dst, src ? src->user : NULL, size,
-+				     staging->gaps ? staging->gaps->n_fds : 0);
-+	kdbus_pool_slice_release(slice);
-+	slice = ERR_PTR(ret);
-+exit:
-+	kfree(meta_items);
-+	return slice;
-+}
-diff --git a/ipc/kdbus/message.h b/ipc/kdbus/message.h
-new file mode 100644
-index 0000000..298f9c9
---- /dev/null
-+++ b/ipc/kdbus/message.h
-@@ -0,0 +1,120 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_MESSAGE_H
-+#define __KDBUS_MESSAGE_H
-+
-+#include <linux/fs.h>
-+#include <linux/kref.h>
-+#include <uapi/linux/kdbus.h>
-+
-+struct kdbus_bus;
-+struct kdbus_conn;
-+struct kdbus_meta_conn;
-+struct kdbus_meta_proc;
-+struct kdbus_pool_slice;
-+
-+/**
-+ * struct kdbus_gaps - gaps in message to be filled later
-+ * @kref:		Reference counter
-+ * @n_memfd_offs:	Number of memfds
-+ * @memfd_offs:		Offsets of kdbus_memfd items in target slice
-+ * @n_fds:		Number of fds
-+ * @fds:		Array of sent fds
-+ * @fds_offset:		Offset of fd-array in target slice
-+ *
-+ * The 'gaps' object is used to track data that is needed to fill gaps in a
-+ * message at RECV time. Usually, we try to compile the whole message at SEND
-+ * time. This has the advantage, that we don't have to cache any information and
-+ * can keep the memory consumption small. Furthermore, all copy operations can
-+ * be combined into a single function call, which speeds up transactions
-+ * considerably.
-+ * However, things like file-descriptors can only be fully installed at RECV
-+ * time. The gaps object tracks this data and pins it until a message is
-+ * received. The gaps object is shared between all receivers of the same
-+ * message.
-+ */
-+struct kdbus_gaps {
-+	struct kref kref;
-+
-+	/* state tracking for KDBUS_ITEM_PAYLOAD_MEMFD entries */
-+	size_t n_memfds;
-+	u64 *memfd_offsets;
-+	struct file **memfd_files;
-+
-+	/* state tracking for KDBUS_ITEM_FDS */
-+	size_t n_fds;
-+	struct file **fd_files;
-+	u64 fd_offset;
-+};
-+
-+struct kdbus_gaps *kdbus_gaps_ref(struct kdbus_gaps *gaps);
-+struct kdbus_gaps *kdbus_gaps_unref(struct kdbus_gaps *gaps);
-+int kdbus_gaps_install(struct kdbus_gaps *gaps, struct kdbus_pool_slice *slice,
-+		       bool *out_incomplete);
-+
-+/**
-+ * struct kdbus_staging - staging area to import messages
-+ * @msg:		User-supplied message
-+ * @gaps:		Gaps-object created during import (or NULL if empty)
-+ * @msg_seqnum:		Message sequence number
-+ * @notify_entry:	Entry into list of kernel-generated notifications
-+ * @i_payload:		Current relative index of start of payload
-+ * @n_payload:		Total number of bytes needed for payload
-+ * @n_parts:		Number of parts
-+ * @parts:		Array of iovecs that make up the whole message
-+ * @meta_proc:		Process metadata of the sender (or NULL if empty)
-+ * @meta_conn:		Connection metadata of the sender (or NULL if empty)
-+ * @bloom_filter:	Pointer to the bloom-item in @msg, or NULL
-+ * @dst_name:		Pointer to the dst-name-item in @msg, or NULL
-+ * @notify:		Pointer to the notification item in @msg, or NULL
-+ *
-+ * The kdbus_staging object is a temporary staging area to import user-supplied
-+ * messages into the kernel. It is only used during SEND and dropped once the
-+ * message is queued. Any data that cannot be collected during SEND, is
-+ * collected in a kdbus_gaps object and attached to the message queue.
-+ */
-+struct kdbus_staging {
-+	struct kdbus_msg *msg;
-+	struct kdbus_gaps *gaps;
-+	u64 msg_seqnum;
-+	struct list_head notify_entry;
-+
-+	/* crafted iovecs to copy the message */
-+	size_t i_payload;
-+	size_t n_payload;
-+	size_t n_parts;
-+	struct iovec *parts;
-+
-+	/* metadata state */
-+	struct kdbus_meta_proc *meta_proc;
-+	struct kdbus_meta_conn *meta_conn;
-+
-+	/* cached pointers into @msg */
-+	const struct kdbus_bloom_filter *bloom_filter;
-+	const char *dst_name;
-+	struct kdbus_item *notify;
-+};
-+
-+struct kdbus_staging *kdbus_staging_new_kernel(struct kdbus_bus *bus,
-+					       u64 dst, u64 cookie_timeout,
-+					       size_t it_size, size_t it_type);
-+struct kdbus_staging *kdbus_staging_new_user(struct kdbus_bus *bus,
-+					     struct kdbus_cmd_send *cmd,
-+					     struct kdbus_msg *msg);
-+struct kdbus_staging *kdbus_staging_free(struct kdbus_staging *staging);
-+struct kdbus_pool_slice *kdbus_staging_emit(struct kdbus_staging *staging,
-+					    struct kdbus_conn *src,
-+					    struct kdbus_conn *dst);
-+
-+#endif
-diff --git a/ipc/kdbus/metadata.c b/ipc/kdbus/metadata.c
-new file mode 100644
-index 0000000..71ca475
---- /dev/null
-+++ b/ipc/kdbus/metadata.c
-@@ -0,0 +1,1347 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/audit.h>
-+#include <linux/capability.h>
-+#include <linux/cgroup.h>
-+#include <linux/cred.h>
-+#include <linux/file.h>
-+#include <linux/fs_struct.h>
-+#include <linux/init.h>
-+#include <linux/kref.h>
-+#include <linux/mutex.h>
-+#include <linux/sched.h>
-+#include <linux/security.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+#include <linux/uidgid.h>
-+#include <linux/uio.h>
-+#include <linux/user_namespace.h>
-+
-+#include "bus.h"
-+#include "connection.h"
-+#include "endpoint.h"
-+#include "item.h"
-+#include "message.h"
-+#include "metadata.h"
-+#include "names.h"
-+
-+/**
-+ * struct kdbus_meta_proc - Process metadata
-+ * @kref:		Reference counting
-+ * @lock:		Object lock
-+ * @collected:		Bitmask of collected items
-+ * @valid:		Bitmask of collected and valid items
-+ * @cred:		Credentials
-+ * @pid:		PID of process
-+ * @tgid:		TGID of process
-+ * @ppid:		PPID of process
-+ * @tid_comm:		TID comm line
-+ * @pid_comm:		PID comm line
-+ * @exe_path:		Executable path
-+ * @root_path:		Root-FS path
-+ * @cmdline:		Command-line
-+ * @cgroup:		Full cgroup path
-+ * @seclabel:		Seclabel
-+ * @audit_loginuid:	Audit login-UID
-+ * @audit_sessionid:	Audit session-ID
-+ */
-+struct kdbus_meta_proc {
-+	struct kref kref;
-+	struct mutex lock;
-+	u64 collected;
-+	u64 valid;
-+
-+	/* KDBUS_ITEM_CREDS */
-+	/* KDBUS_ITEM_AUXGROUPS */
-+	/* KDBUS_ITEM_CAPS */
-+	const struct cred *cred;
-+
-+	/* KDBUS_ITEM_PIDS */
-+	struct pid *pid;
-+	struct pid *tgid;
-+	struct pid *ppid;
-+
-+	/* KDBUS_ITEM_TID_COMM */
-+	char tid_comm[TASK_COMM_LEN];
-+	/* KDBUS_ITEM_PID_COMM */
-+	char pid_comm[TASK_COMM_LEN];
-+
-+	/* KDBUS_ITEM_EXE */
-+	struct path exe_path;
-+	struct path root_path;
-+
-+	/* KDBUS_ITEM_CMDLINE */
-+	char *cmdline;
-+
-+	/* KDBUS_ITEM_CGROUP */
-+	char *cgroup;
-+
-+	/* KDBUS_ITEM_SECLABEL */
-+	char *seclabel;
-+
-+	/* KDBUS_ITEM_AUDIT */
-+	kuid_t audit_loginuid;
-+	unsigned int audit_sessionid;
-+};
-+
-+/**
-+ * struct kdbus_meta_conn
-+ * @kref:		Reference counting
-+ * @lock:		Object lock
-+ * @collected:		Bitmask of collected items
-+ * @valid:		Bitmask of collected and valid items
-+ * @ts:			Timestamp values
-+ * @owned_names_items:	Serialized items for owned names
-+ * @owned_names_size:	Size of @owned_names_items
-+ * @conn_description:	Connection description
-+ */
-+struct kdbus_meta_conn {
-+	struct kref kref;
-+	struct mutex lock;
-+	u64 collected;
-+	u64 valid;
-+
-+	/* KDBUS_ITEM_TIMESTAMP */
-+	struct kdbus_timestamp ts;
-+
-+	/* KDBUS_ITEM_OWNED_NAME */
-+	struct kdbus_item *owned_names_items;
-+	size_t owned_names_size;
-+
-+	/* KDBUS_ITEM_CONN_DESCRIPTION */
-+	char *conn_description;
-+};
-+
-+/* fixed size equivalent of "kdbus_caps" */
-+struct kdbus_meta_caps {
-+	u32 last_cap;
-+	struct {
-+		u32 caps[_KERNEL_CAPABILITY_U32S];
-+	} set[4];
-+};
-+
-+/**
-+ * kdbus_meta_proc_new() - Create process metadata object
-+ *
-+ * Return: Pointer to new object on success, ERR_PTR on failure.
-+ */
-+struct kdbus_meta_proc *kdbus_meta_proc_new(void)
-+{
-+	struct kdbus_meta_proc *mp;
-+
-+	mp = kzalloc(sizeof(*mp), GFP_KERNEL);
-+	if (!mp)
-+		return ERR_PTR(-ENOMEM);
-+
-+	kref_init(&mp->kref);
-+	mutex_init(&mp->lock);
-+
-+	return mp;
-+}
-+
-+static void kdbus_meta_proc_free(struct kref *kref)
-+{
-+	struct kdbus_meta_proc *mp = container_of(kref, struct kdbus_meta_proc,
-+						  kref);
-+
-+	path_put(&mp->exe_path);
-+	path_put(&mp->root_path);
-+	if (mp->cred)
-+		put_cred(mp->cred);
-+	put_pid(mp->ppid);
-+	put_pid(mp->tgid);
-+	put_pid(mp->pid);
-+
-+	kfree(mp->seclabel);
-+	kfree(mp->cmdline);
-+	kfree(mp->cgroup);
-+	kfree(mp);
-+}
-+
-+/**
-+ * kdbus_meta_proc_ref() - Gain reference
-+ * @mp:		Process metadata object
-+ *
-+ * Return: @mp is returned
-+ */
-+struct kdbus_meta_proc *kdbus_meta_proc_ref(struct kdbus_meta_proc *mp)
-+{
-+	if (mp)
-+		kref_get(&mp->kref);
-+	return mp;
-+}
-+
-+/**
-+ * kdbus_meta_proc_unref() - Drop reference
-+ * @mp:		Process metadata object
-+ *
-+ * Return: NULL
-+ */
-+struct kdbus_meta_proc *kdbus_meta_proc_unref(struct kdbus_meta_proc *mp)
-+{
-+	if (mp)
-+		kref_put(&mp->kref, kdbus_meta_proc_free);
-+	return NULL;
-+}
-+
-+static void kdbus_meta_proc_collect_pids(struct kdbus_meta_proc *mp)
-+{
-+	struct task_struct *parent;
-+
-+	mp->pid = get_pid(task_pid(current));
-+	mp->tgid = get_pid(task_tgid(current));
-+
-+	rcu_read_lock();
-+	parent = rcu_dereference(current->real_parent);
-+	mp->ppid = get_pid(task_tgid(parent));
-+	rcu_read_unlock();
-+
-+	mp->valid |= KDBUS_ATTACH_PIDS;
-+}
-+
-+static void kdbus_meta_proc_collect_tid_comm(struct kdbus_meta_proc *mp)
-+{
-+	get_task_comm(mp->tid_comm, current);
-+	mp->valid |= KDBUS_ATTACH_TID_COMM;
-+}
-+
-+static void kdbus_meta_proc_collect_pid_comm(struct kdbus_meta_proc *mp)
-+{
-+	get_task_comm(mp->pid_comm, current->group_leader);
-+	mp->valid |= KDBUS_ATTACH_PID_COMM;
-+}
-+
-+static void kdbus_meta_proc_collect_exe(struct kdbus_meta_proc *mp)
-+{
-+	struct file *exe_file;
-+
-+	rcu_read_lock();
-+	exe_file = rcu_dereference(current->mm->exe_file);
-+	if (exe_file) {
-+		mp->exe_path = exe_file->f_path;
-+		path_get(&mp->exe_path);
-+		get_fs_root(current->fs, &mp->root_path);
-+		mp->valid |= KDBUS_ATTACH_EXE;
-+	}
-+	rcu_read_unlock();
-+}
-+
-+static int kdbus_meta_proc_collect_cmdline(struct kdbus_meta_proc *mp)
-+{
-+	struct mm_struct *mm = current->mm;
-+	char *cmdline;
-+
-+	if (!mm->arg_end)
-+		return 0;
-+
-+	cmdline = strndup_user((const char __user *)mm->arg_start,
-+			       mm->arg_end - mm->arg_start);
-+	if (IS_ERR(cmdline))
-+		return PTR_ERR(cmdline);
-+
-+	mp->cmdline = cmdline;
-+	mp->valid |= KDBUS_ATTACH_CMDLINE;
-+
-+	return 0;
-+}
-+
-+static int kdbus_meta_proc_collect_cgroup(struct kdbus_meta_proc *mp)
-+{
-+#ifdef CONFIG_CGROUPS
-+	void *page;
-+	char *s;
-+
-+	page = (void *)__get_free_page(GFP_TEMPORARY);
-+	if (!page)
-+		return -ENOMEM;
-+
-+	s = task_cgroup_path(current, page, PAGE_SIZE);
-+	if (s) {
-+		mp->cgroup = kstrdup(s, GFP_KERNEL);
-+		if (!mp->cgroup) {
-+			free_page((unsigned long)page);
-+			return -ENOMEM;
-+		}
-+	}
-+
-+	free_page((unsigned long)page);
-+	mp->valid |= KDBUS_ATTACH_CGROUP;
-+#endif
-+
-+	return 0;
-+}
-+
-+static int kdbus_meta_proc_collect_seclabel(struct kdbus_meta_proc *mp)
-+{
-+#ifdef CONFIG_SECURITY
-+	char *ctx = NULL;
-+	u32 sid, len;
-+	int ret;
-+
-+	security_task_getsecid(current, &sid);
-+	ret = security_secid_to_secctx(sid, &ctx, &len);
-+	if (ret < 0) {
-+		/*
-+		 * EOPNOTSUPP means no security module is active,
-+		 * lets skip adding the seclabel then. This effectively
-+		 * drops the SECLABEL item.
-+		 */
-+		return (ret == -EOPNOTSUPP) ? 0 : ret;
-+	}
-+
-+	mp->seclabel = kstrdup(ctx, GFP_KERNEL);
-+	security_release_secctx(ctx, len);
-+	if (!mp->seclabel)
-+		return -ENOMEM;
-+
-+	mp->valid |= KDBUS_ATTACH_SECLABEL;
-+#endif
-+
-+	return 0;
-+}
-+
-+static void kdbus_meta_proc_collect_audit(struct kdbus_meta_proc *mp)
-+{
-+#ifdef CONFIG_AUDITSYSCALL
-+	mp->audit_loginuid = audit_get_loginuid(current);
-+	mp->audit_sessionid = audit_get_sessionid(current);
-+	mp->valid |= KDBUS_ATTACH_AUDIT;
-+#endif
-+}
-+
-+/**
-+ * kdbus_meta_proc_collect() - Collect process metadata
-+ * @mp:		Process metadata object
-+ * @what:	Attach flags to collect
-+ *
-+ * This collects process metadata from current and saves it in @mp.
-+ *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_meta_proc_collect(struct kdbus_meta_proc *mp, u64 what)
-+{
-+	int ret;
-+
-+	if (!mp || !(what & (KDBUS_ATTACH_CREDS |
-+			     KDBUS_ATTACH_PIDS |
-+			     KDBUS_ATTACH_AUXGROUPS |
-+			     KDBUS_ATTACH_TID_COMM |
-+			     KDBUS_ATTACH_PID_COMM |
-+			     KDBUS_ATTACH_EXE |
-+			     KDBUS_ATTACH_CMDLINE |
-+			     KDBUS_ATTACH_CGROUP |
-+			     KDBUS_ATTACH_CAPS |
-+			     KDBUS_ATTACH_SECLABEL |
-+			     KDBUS_ATTACH_AUDIT)))
-+		return 0;
-+
-+	mutex_lock(&mp->lock);
-+
-+	/* creds, auxgrps and caps share "struct cred" as context */
-+	{
-+		const u64 m_cred = KDBUS_ATTACH_CREDS |
-+				   KDBUS_ATTACH_AUXGROUPS |
-+				   KDBUS_ATTACH_CAPS;
-+
-+		if ((what & m_cred) && !(mp->collected & m_cred)) {
-+			mp->cred = get_current_cred();
-+			mp->valid |= m_cred;
-+			mp->collected |= m_cred;
-+		}
-+	}
-+
-+	if ((what & KDBUS_ATTACH_PIDS) &&
-+	    !(mp->collected & KDBUS_ATTACH_PIDS)) {
-+		kdbus_meta_proc_collect_pids(mp);
-+		mp->collected |= KDBUS_ATTACH_PIDS;
-+	}
-+
-+	if ((what & KDBUS_ATTACH_TID_COMM) &&
-+	    !(mp->collected & KDBUS_ATTACH_TID_COMM)) {
-+		kdbus_meta_proc_collect_tid_comm(mp);
-+		mp->collected |= KDBUS_ATTACH_TID_COMM;
-+	}
-+
-+	if ((what & KDBUS_ATTACH_PID_COMM) &&
-+	    !(mp->collected & KDBUS_ATTACH_PID_COMM)) {
-+		kdbus_meta_proc_collect_pid_comm(mp);
-+		mp->collected |= KDBUS_ATTACH_PID_COMM;
-+	}
-+
-+	if ((what & KDBUS_ATTACH_EXE) &&
-+	    !(mp->collected & KDBUS_ATTACH_EXE)) {
-+		kdbus_meta_proc_collect_exe(mp);
-+		mp->collected |= KDBUS_ATTACH_EXE;
-+	}
-+
-+	if ((what & KDBUS_ATTACH_CMDLINE) &&
-+	    !(mp->collected & KDBUS_ATTACH_CMDLINE)) {
-+		ret = kdbus_meta_proc_collect_cmdline(mp);
-+		if (ret < 0)
-+			goto exit_unlock;
-+		mp->collected |= KDBUS_ATTACH_CMDLINE;
-+	}
-+
-+	if ((what & KDBUS_ATTACH_CGROUP) &&
-+	    !(mp->collected & KDBUS_ATTACH_CGROUP)) {
-+		ret = kdbus_meta_proc_collect_cgroup(mp);
-+		if (ret < 0)
-+			goto exit_unlock;
-+		mp->collected |= KDBUS_ATTACH_CGROUP;
-+	}
-+
-+	if ((what & KDBUS_ATTACH_SECLABEL) &&
-+	    !(mp->collected & KDBUS_ATTACH_SECLABEL)) {
-+		ret = kdbus_meta_proc_collect_seclabel(mp);
-+		if (ret < 0)
-+			goto exit_unlock;
-+		mp->collected |= KDBUS_ATTACH_SECLABEL;
-+	}
-+
-+	if ((what & KDBUS_ATTACH_AUDIT) &&
-+	    !(mp->collected & KDBUS_ATTACH_AUDIT)) {
-+		kdbus_meta_proc_collect_audit(mp);
-+		mp->collected |= KDBUS_ATTACH_AUDIT;
-+	}
-+
-+	ret = 0;
-+
-+exit_unlock:
-+	mutex_unlock(&mp->lock);
-+	return ret;
-+}
-+
-+/**
-+ * kdbus_meta_fake_new() - Create fake metadata object
-+ *
-+ * Return: Pointer to new object on success, ERR_PTR on failure.
-+ */
-+struct kdbus_meta_fake *kdbus_meta_fake_new(void)
-+{
-+	struct kdbus_meta_fake *mf;
-+
-+	mf = kzalloc(sizeof(*mf), GFP_KERNEL);
-+	if (!mf)
-+		return ERR_PTR(-ENOMEM);
-+
-+	return mf;
-+}
-+
-+/**
-+ * kdbus_meta_fake_free() - Free fake metadata object
-+ * @mf:		Fake metadata object
-+ *
-+ * Return: NULL
-+ */
-+struct kdbus_meta_fake *kdbus_meta_fake_free(struct kdbus_meta_fake *mf)
-+{
-+	if (mf) {
-+		put_pid(mf->ppid);
-+		put_pid(mf->tgid);
-+		put_pid(mf->pid);
-+		kfree(mf->seclabel);
-+		kfree(mf);
-+	}
-+
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_meta_fake_collect() - Fill fake metadata from faked credentials
-+ * @mf:		Fake metadata object
-+ * @creds:	Creds to set, may be %NULL
-+ * @pids:	PIDs to set, may be %NULL
-+ * @seclabel:	Seclabel to set, may be %NULL
-+ *
-+ * This function takes information stored in @creds, @pids and @seclabel and
-+ * resolves them to kernel-representations, if possible. This call uses the
-+ * current task's namespaces to resolve the given information.
-+ *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_meta_fake_collect(struct kdbus_meta_fake *mf,
-+			    const struct kdbus_creds *creds,
-+			    const struct kdbus_pids *pids,
-+			    const char *seclabel)
-+{
-+	if (mf->valid)
-+		return -EALREADY;
-+
-+	if (creds) {
-+		struct user_namespace *ns = current_user_ns();
-+
-+		mf->uid		= make_kuid(ns, creds->uid);
-+		mf->euid	= make_kuid(ns, creds->euid);
-+		mf->suid	= make_kuid(ns, creds->suid);
-+		mf->fsuid	= make_kuid(ns, creds->fsuid);
-+
-+		mf->gid		= make_kgid(ns, creds->gid);
-+		mf->egid	= make_kgid(ns, creds->egid);
-+		mf->sgid	= make_kgid(ns, creds->sgid);
-+		mf->fsgid	= make_kgid(ns, creds->fsgid);
-+
-+		if ((creds->uid   != (uid_t)-1 && !uid_valid(mf->uid))   ||
-+		    (creds->euid  != (uid_t)-1 && !uid_valid(mf->euid))  ||
-+		    (creds->suid  != (uid_t)-1 && !uid_valid(mf->suid))  ||
-+		    (creds->fsuid != (uid_t)-1 && !uid_valid(mf->fsuid)) ||
-+		    (creds->gid   != (gid_t)-1 && !gid_valid(mf->gid))   ||
-+		    (creds->egid  != (gid_t)-1 && !gid_valid(mf->egid))  ||
-+		    (creds->sgid  != (gid_t)-1 && !gid_valid(mf->sgid))  ||
-+		    (creds->fsgid != (gid_t)-1 && !gid_valid(mf->fsgid)))
-+			return -EINVAL;
-+
-+		mf->valid |= KDBUS_ATTACH_CREDS;
-+	}
-+
-+	if (pids) {
-+		mf->pid = get_pid(find_vpid(pids->tid));
-+		mf->tgid = get_pid(find_vpid(pids->pid));
-+		mf->ppid = get_pid(find_vpid(pids->ppid));
-+
-+		if ((pids->tid != 0 && !mf->pid) ||
-+		    (pids->pid != 0 && !mf->tgid) ||
-+		    (pids->ppid != 0 && !mf->ppid)) {
-+			put_pid(mf->pid);
-+			put_pid(mf->tgid);
-+			put_pid(mf->ppid);
-+			mf->pid = NULL;
-+			mf->tgid = NULL;
-+			mf->ppid = NULL;
-+			return -EINVAL;
-+		}
-+
-+		mf->valid |= KDBUS_ATTACH_PIDS;
-+	}
-+
-+	if (seclabel) {
-+		mf->seclabel = kstrdup(seclabel, GFP_KERNEL);
-+		if (!mf->seclabel)
-+			return -ENOMEM;
-+
-+		mf->valid |= KDBUS_ATTACH_SECLABEL;
-+	}
-+
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_meta_conn_new() - Create connection metadata object
-+ *
-+ * Return: Pointer to new object on success, ERR_PTR on failure.
-+ */
-+struct kdbus_meta_conn *kdbus_meta_conn_new(void)
-+{
-+	struct kdbus_meta_conn *mc;
-+
-+	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
-+	if (!mc)
-+		return ERR_PTR(-ENOMEM);
-+
-+	kref_init(&mc->kref);
-+	mutex_init(&mc->lock);
-+
-+	return mc;
-+}
-+
-+static void kdbus_meta_conn_free(struct kref *kref)
-+{
-+	struct kdbus_meta_conn *mc =
-+		container_of(kref, struct kdbus_meta_conn, kref);
-+
-+	kfree(mc->conn_description);
-+	kfree(mc->owned_names_items);
-+	kfree(mc);
-+}
-+
-+/**
-+ * kdbus_meta_conn_ref() - Gain reference
-+ * @mc:		Connection metadata object
-+ */
-+struct kdbus_meta_conn *kdbus_meta_conn_ref(struct kdbus_meta_conn *mc)
-+{
-+	if (mc)
-+		kref_get(&mc->kref);
-+	return mc;
-+}
-+
-+/**
-+ * kdbus_meta_conn_unref() - Drop reference
-+ * @mc:		Connection metadata object
-+ */
-+struct kdbus_meta_conn *kdbus_meta_conn_unref(struct kdbus_meta_conn *mc)
-+{
-+	if (mc)
-+		kref_put(&mc->kref, kdbus_meta_conn_free);
-+	return NULL;
-+}
-+
-+static void kdbus_meta_conn_collect_timestamp(struct kdbus_meta_conn *mc,
-+					      u64 msg_seqnum)
-+{
-+	mc->ts.monotonic_ns = ktime_get_ns();
-+	mc->ts.realtime_ns = ktime_get_real_ns();
-+
-+	if (msg_seqnum)
-+		mc->ts.seqnum = msg_seqnum;
-+
-+	mc->valid |= KDBUS_ATTACH_TIMESTAMP;
-+}
-+
-+static int kdbus_meta_conn_collect_names(struct kdbus_meta_conn *mc,
-+					 struct kdbus_conn *conn)
-+{
-+	const struct kdbus_name_owner *owner;
-+	struct kdbus_item *item;
-+	size_t slen, size;
-+
-+	lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
-+
-+	size = 0;
-+	/* open-code length calculation to avoid final padding */
-+	list_for_each_entry(owner, &conn->names_list, conn_entry)
-+		if (!(owner->flags & KDBUS_NAME_IN_QUEUE))
-+			size = KDBUS_ALIGN8(size) + KDBUS_ITEM_HEADER_SIZE +
-+				sizeof(struct kdbus_name) +
-+				strlen(owner->name->name) + 1;
-+
-+	if (!size)
-+		return 0;
-+
-+	/* make sure we include zeroed padding for convenience helpers */
-+	item = kmalloc(KDBUS_ALIGN8(size), GFP_KERNEL);
-+	if (!item)
-+		return -ENOMEM;
-+
-+	mc->owned_names_items = item;
-+	mc->owned_names_size = size;
-+
-+	list_for_each_entry(owner, &conn->names_list, conn_entry) {
-+		if (owner->flags & KDBUS_NAME_IN_QUEUE)
-+			continue;
-+
-+		slen = strlen(owner->name->name) + 1;
-+		kdbus_item_set(item, KDBUS_ITEM_OWNED_NAME, NULL,
-+			       sizeof(struct kdbus_name) + slen);
-+		item->name.flags = owner->flags;
-+		memcpy(item->name.name, owner->name->name, slen);
-+		item = KDBUS_ITEM_NEXT(item);
-+	}
-+
-+	/* sanity check: the buffer should be completely written now */
-+	WARN_ON((u8 *)item !=
-+			(u8 *)mc->owned_names_items + KDBUS_ALIGN8(size));
-+
-+	mc->valid |= KDBUS_ATTACH_NAMES;
-+	return 0;
-+}
-+
-+static int kdbus_meta_conn_collect_description(struct kdbus_meta_conn *mc,
-+					       struct kdbus_conn *conn)
-+{
-+	if (!conn->description)
-+		return 0;
-+
-+	mc->conn_description = kstrdup(conn->description, GFP_KERNEL);
-+	if (!mc->conn_description)
-+		return -ENOMEM;
-+
-+	mc->valid |= KDBUS_ATTACH_CONN_DESCRIPTION;
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_meta_conn_collect() - Collect connection metadata
-+ * @mc:		Message metadata object
-+ * @conn:	Connection to collect data from
-+ * @msg_seqnum:	Sequence number of the message to send
-+ * @what:	Attach flags to collect
-+ *
-+ * This collects connection metadata from @msg_seqnum and @conn and saves it
-+ * in @mc.
-+ *
-+ * If KDBUS_ATTACH_NAMES is set in @what and @conn is non-NULL, the caller must
-+ * hold the name-registry read-lock of conn->ep->bus->registry.
-+ *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_meta_conn_collect(struct kdbus_meta_conn *mc,
-+			    struct kdbus_conn *conn,
-+			    u64 msg_seqnum, u64 what)
-+{
-+	int ret;
-+
-+	if (!mc || !(what & (KDBUS_ATTACH_TIMESTAMP |
-+			     KDBUS_ATTACH_NAMES |
-+			     KDBUS_ATTACH_CONN_DESCRIPTION)))
-+		return 0;
-+
-+	mutex_lock(&mc->lock);
-+
-+	if (msg_seqnum && (what & KDBUS_ATTACH_TIMESTAMP) &&
-+	    !(mc->collected & KDBUS_ATTACH_TIMESTAMP)) {
-+		kdbus_meta_conn_collect_timestamp(mc, msg_seqnum);
-+		mc->collected |= KDBUS_ATTACH_TIMESTAMP;
-+	}
-+
-+	if (conn && (what & KDBUS_ATTACH_NAMES) &&
-+	    !(mc->collected & KDBUS_ATTACH_NAMES)) {
-+		ret = kdbus_meta_conn_collect_names(mc, conn);
-+		if (ret < 0)
-+			goto exit_unlock;
-+		mc->collected |= KDBUS_ATTACH_NAMES;
-+	}
-+
-+	if (conn && (what & KDBUS_ATTACH_CONN_DESCRIPTION) &&
-+	    !(mc->collected & KDBUS_ATTACH_CONN_DESCRIPTION)) {
-+		ret = kdbus_meta_conn_collect_description(mc, conn);
-+		if (ret < 0)
-+			goto exit_unlock;
-+		mc->collected |= KDBUS_ATTACH_CONN_DESCRIPTION;
-+	}
-+
-+	ret = 0;
-+
-+exit_unlock:
-+	mutex_unlock(&mc->lock);
-+	return ret;
-+}
-+
-+static void kdbus_meta_export_caps(struct kdbus_meta_caps *out,
-+				   const struct kdbus_meta_proc *mp,
-+				   struct user_namespace *user_ns)
-+{
-+	struct user_namespace *iter;
-+	const struct cred *cred = mp->cred;
-+	bool parent = false, owner = false;
-+	int i;
-+
-+	/*
-+	 * This translates the effective capabilities of 'cred' into the given
-+	 * user-namespace. If the given user-namespace is a child-namespace of
-+	 * the user-namespace of 'cred', the mask can be copied verbatim. If
-+	 * not, the mask is cleared.
-+	 * There's one exception: If 'cred' is the owner of any user-namespace
-+	 * in the path between the given user-namespace and the user-namespace
-+	 * of 'cred', then it has all effective capabilities set. This means,
-+	 * the user who created a user-namespace always has all effective
-+	 * capabilities in any child namespaces. Note that this is based on the
-+	 * uid of the namespace creator, not the task hierarchy.
-+	 */
-+	for (iter = user_ns; iter; iter = iter->parent) {
-+		if (iter == cred->user_ns) {
-+			parent = true;
-+			break;
-+		}
-+
-+		if (iter == &init_user_ns)
-+			break;
-+
-+		if ((iter->parent == cred->user_ns) &&
-+		    uid_eq(iter->owner, cred->euid)) {
-+			owner = true;
-+			break;
-+		}
-+	}
-+
-+	out->last_cap = CAP_LAST_CAP;
-+
-+	CAP_FOR_EACH_U32(i) {
-+		if (parent) {
-+			out->set[0].caps[i] = cred->cap_inheritable.cap[i];
-+			out->set[1].caps[i] = cred->cap_permitted.cap[i];
-+			out->set[2].caps[i] = cred->cap_effective.cap[i];
-+			out->set[3].caps[i] = cred->cap_bset.cap[i];
-+		} else if (owner) {
-+			out->set[0].caps[i] = 0U;
-+			out->set[1].caps[i] = ~0U;
-+			out->set[2].caps[i] = ~0U;
-+			out->set[3].caps[i] = ~0U;
-+		} else {
-+			out->set[0].caps[i] = 0U;
-+			out->set[1].caps[i] = 0U;
-+			out->set[2].caps[i] = 0U;
-+			out->set[3].caps[i] = 0U;
-+		}
-+	}
-+
-+	/* clear unused bits */
-+	for (i = 0; i < 4; i++)
-+		out->set[i].caps[CAP_TO_INDEX(CAP_LAST_CAP)] &=
-+					CAP_LAST_U32_VALID_MASK;
-+}
-+
-+/* This is equivalent to from_kuid_munged(), but maps INVALID_UID to itself */
-+static uid_t kdbus_from_kuid_keep(struct user_namespace *ns, kuid_t uid)
-+{
-+	return uid_valid(uid) ? from_kuid_munged(ns, uid) : ((uid_t)-1);
-+}
-+
-+/* This is equivalent to from_kgid_munged(), but maps INVALID_GID to itself */
-+static gid_t kdbus_from_kgid_keep(struct user_namespace *ns, kgid_t gid)
-+{
-+	return gid_valid(gid) ? from_kgid_munged(ns, gid) : ((gid_t)-1);
-+}
-+
-+struct kdbus_meta_staging {
-+	const struct kdbus_meta_proc *mp;
-+	const struct kdbus_meta_fake *mf;
-+	const struct kdbus_meta_conn *mc;
-+	const struct kdbus_conn *conn;
-+	u64 mask;
-+
-+	void *exe;
-+	const char *exe_path;
-+};
-+
-+static size_t kdbus_meta_measure(struct kdbus_meta_staging *staging)
-+{
-+	const struct kdbus_meta_proc *mp = staging->mp;
-+	const struct kdbus_meta_fake *mf = staging->mf;
-+	const struct kdbus_meta_conn *mc = staging->mc;
-+	const u64 mask = staging->mask;
-+	size_t size = 0;
-+
-+	/* process metadata */
-+
-+	if (mf && (mask & KDBUS_ATTACH_CREDS))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_creds));
-+	else if (mp && (mask & KDBUS_ATTACH_CREDS))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_creds));
-+
-+	if (mf && (mask & KDBUS_ATTACH_PIDS))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_pids));
-+	else if (mp && (mask & KDBUS_ATTACH_PIDS))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_pids));
-+
-+	if (mp && (mask & KDBUS_ATTACH_AUXGROUPS))
-+		size += KDBUS_ITEM_SIZE(mp->cred->group_info->ngroups *
-+					sizeof(u64));
-+
-+	if (mp && (mask & KDBUS_ATTACH_TID_COMM))
-+		size += KDBUS_ITEM_SIZE(strlen(mp->tid_comm) + 1);
-+
-+	if (mp && (mask & KDBUS_ATTACH_PID_COMM))
-+		size += KDBUS_ITEM_SIZE(strlen(mp->pid_comm) + 1);
-+
-+	if (staging->exe_path && (mask & KDBUS_ATTACH_EXE))
-+		size += KDBUS_ITEM_SIZE(strlen(staging->exe_path) + 1);
-+
-+	if (mp && (mask & KDBUS_ATTACH_CMDLINE))
-+		size += KDBUS_ITEM_SIZE(strlen(mp->cmdline) + 1);
-+
-+	if (mp && (mask & KDBUS_ATTACH_CGROUP))
-+		size += KDBUS_ITEM_SIZE(strlen(mp->cgroup) + 1);
-+
-+	if (mp && (mask & KDBUS_ATTACH_CAPS))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_meta_caps));
-+
-+	if (mf && (mask & KDBUS_ATTACH_SECLABEL))
-+		size += KDBUS_ITEM_SIZE(strlen(mf->seclabel) + 1);
-+	else if (mp && (mask & KDBUS_ATTACH_SECLABEL))
-+		size += KDBUS_ITEM_SIZE(strlen(mp->seclabel) + 1);
-+
-+	if (mp && (mask & KDBUS_ATTACH_AUDIT))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_audit));
-+
-+	/* connection metadata */
-+
-+	if (mc && (mask & KDBUS_ATTACH_NAMES))
-+		size += KDBUS_ALIGN8(mc->owned_names_size);
-+
-+	if (mc && (mask & KDBUS_ATTACH_CONN_DESCRIPTION))
-+		size += KDBUS_ITEM_SIZE(strlen(mc->conn_description) + 1);
-+
-+	if (mc && (mask & KDBUS_ATTACH_TIMESTAMP))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_timestamp));
-+
-+	return size;
-+}
-+
-+static struct kdbus_item *kdbus_write_head(struct kdbus_item **iter,
-+					   u64 type, u64 size)
-+{
-+	struct kdbus_item *item = *iter;
-+	size_t padding;
-+
-+	item->type = type;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + size;
-+
-+	/* clear padding */
-+	padding = KDBUS_ALIGN8(item->size) - item->size;
-+	if (padding)
-+		memset(item->data + size, 0, padding);
-+
-+	*iter = KDBUS_ITEM_NEXT(item);
-+	return item;
-+}
-+
-+static struct kdbus_item *kdbus_write_full(struct kdbus_item **iter,
-+					   u64 type, u64 size, const void *data)
-+{
-+	struct kdbus_item *item;
-+
-+	item = kdbus_write_head(iter, type, size);
-+	memcpy(item->data, data, size);
-+	return item;
-+}
-+
-+static size_t kdbus_meta_write(struct kdbus_meta_staging *staging, void *mem,
-+			       size_t size)
-+{
-+	struct user_namespace *user_ns = staging->conn->cred->user_ns;
-+	struct pid_namespace *pid_ns = ns_of_pid(staging->conn->pid);
-+	struct kdbus_item *item = NULL, *items = mem;
-+	u8 *end, *owned_names_end = NULL;
-+
-+	/* process metadata */
-+
-+	if (staging->mf && (staging->mask & KDBUS_ATTACH_CREDS)) {
-+		const struct kdbus_meta_fake *mf = staging->mf;
-+
-+		item = kdbus_write_head(&items, KDBUS_ITEM_CREDS,
-+					sizeof(struct kdbus_creds));
-+		item->creds = (struct kdbus_creds){
-+			.uid	= kdbus_from_kuid_keep(user_ns, mf->uid),
-+			.euid	= kdbus_from_kuid_keep(user_ns, mf->euid),
-+			.suid	= kdbus_from_kuid_keep(user_ns, mf->suid),
-+			.fsuid	= kdbus_from_kuid_keep(user_ns, mf->fsuid),
-+			.gid	= kdbus_from_kgid_keep(user_ns, mf->gid),
-+			.egid	= kdbus_from_kgid_keep(user_ns, mf->egid),
-+			.sgid	= kdbus_from_kgid_keep(user_ns, mf->sgid),
-+			.fsgid	= kdbus_from_kgid_keep(user_ns, mf->fsgid),
-+		};
-+	} else if (staging->mp && (staging->mask & KDBUS_ATTACH_CREDS)) {
-+		const struct cred *c = staging->mp->cred;
-+
-+		item = kdbus_write_head(&items, KDBUS_ITEM_CREDS,
-+					sizeof(struct kdbus_creds));
-+		item->creds = (struct kdbus_creds){
-+			.uid	= kdbus_from_kuid_keep(user_ns, c->uid),
-+			.euid	= kdbus_from_kuid_keep(user_ns, c->euid),
-+			.suid	= kdbus_from_kuid_keep(user_ns, c->suid),
-+			.fsuid	= kdbus_from_kuid_keep(user_ns, c->fsuid),
-+			.gid	= kdbus_from_kgid_keep(user_ns, c->gid),
-+			.egid	= kdbus_from_kgid_keep(user_ns, c->egid),
-+			.sgid	= kdbus_from_kgid_keep(user_ns, c->sgid),
-+			.fsgid	= kdbus_from_kgid_keep(user_ns, c->fsgid),
-+		};
-+	}
-+
-+	if (staging->mf && (staging->mask & KDBUS_ATTACH_PIDS)) {
-+		item = kdbus_write_head(&items, KDBUS_ITEM_PIDS,
-+					sizeof(struct kdbus_pids));
-+		item->pids = (struct kdbus_pids){
-+			.pid = pid_nr_ns(staging->mf->tgid, pid_ns),
-+			.tid = pid_nr_ns(staging->mf->pid, pid_ns),
-+			.ppid = pid_nr_ns(staging->mf->ppid, pid_ns),
-+		};
-+	} else if (staging->mp && (staging->mask & KDBUS_ATTACH_PIDS)) {
-+		item = kdbus_write_head(&items, KDBUS_ITEM_PIDS,
-+					sizeof(struct kdbus_pids));
-+		item->pids = (struct kdbus_pids){
-+			.pid = pid_nr_ns(staging->mp->tgid, pid_ns),
-+			.tid = pid_nr_ns(staging->mp->pid, pid_ns),
-+			.ppid = pid_nr_ns(staging->mp->ppid, pid_ns),
-+		};
-+	}
-+
-+	if (staging->mp && (staging->mask & KDBUS_ATTACH_AUXGROUPS)) {
-+		const struct group_info *info = staging->mp->cred->group_info;
-+		size_t i;
-+
-+		item = kdbus_write_head(&items, KDBUS_ITEM_AUXGROUPS,
-+					info->ngroups * sizeof(u64));
-+		for (i = 0; i < info->ngroups; ++i)
-+			item->data64[i] = from_kgid_munged(user_ns,
-+							   GROUP_AT(info, i));
-+	}
-+
-+	if (staging->mp && (staging->mask & KDBUS_ATTACH_TID_COMM))
-+		item = kdbus_write_full(&items, KDBUS_ITEM_TID_COMM,
-+					strlen(staging->mp->tid_comm) + 1,
-+					staging->mp->tid_comm);
-+
-+	if (staging->mp && (staging->mask & KDBUS_ATTACH_PID_COMM))
-+		item = kdbus_write_full(&items, KDBUS_ITEM_PID_COMM,
-+					strlen(staging->mp->pid_comm) + 1,
-+					staging->mp->pid_comm);
-+
-+	if (staging->exe_path && (staging->mask & KDBUS_ATTACH_EXE))
-+		item = kdbus_write_full(&items, KDBUS_ITEM_EXE,
-+					strlen(staging->exe_path) + 1,
-+					staging->exe_path);
-+
-+	if (staging->mp && (staging->mask & KDBUS_ATTACH_CMDLINE))
-+		item = kdbus_write_full(&items, KDBUS_ITEM_CMDLINE,
-+					strlen(staging->mp->cmdline) + 1,
-+					staging->mp->cmdline);
-+
-+	if (staging->mp && (staging->mask & KDBUS_ATTACH_CGROUP))
-+		item = kdbus_write_full(&items, KDBUS_ITEM_CGROUP,
-+					strlen(staging->mp->cgroup) + 1,
-+					staging->mp->cgroup);
-+
-+	if (staging->mp && (staging->mask & KDBUS_ATTACH_CAPS)) {
-+		item = kdbus_write_head(&items, KDBUS_ITEM_CAPS,
-+					sizeof(struct kdbus_meta_caps));
-+		kdbus_meta_export_caps((void*)&item->caps, staging->mp,
-+				       user_ns);
-+	}
-+
-+	if (staging->mf && (staging->mask & KDBUS_ATTACH_SECLABEL))
-+		item = kdbus_write_full(&items, KDBUS_ITEM_SECLABEL,
-+					strlen(staging->mf->seclabel) + 1,
-+					staging->mf->seclabel);
-+	else if (staging->mp && (staging->mask & KDBUS_ATTACH_SECLABEL))
-+		item = kdbus_write_full(&items, KDBUS_ITEM_SECLABEL,
-+					strlen(staging->mp->seclabel) + 1,
-+					staging->mp->seclabel);
-+
-+	if (staging->mp && (staging->mask & KDBUS_ATTACH_AUDIT)) {
-+		item = kdbus_write_head(&items, KDBUS_ITEM_AUDIT,
-+					sizeof(struct kdbus_audit));
-+		item->audit = (struct kdbus_audit){
-+			.loginuid = from_kuid(user_ns,
-+					      staging->mp->audit_loginuid),
-+			.sessionid = staging->mp->audit_sessionid,
-+		};
-+	}
-+
-+	/* connection metadata */
-+
-+	if (staging->mc && (staging->mask & KDBUS_ATTACH_NAMES)) {
-+		memcpy(items, staging->mc->owned_names_items,
-+		       KDBUS_ALIGN8(staging->mc->owned_names_size));
-+		owned_names_end = (u8 *)items + staging->mc->owned_names_size;
-+		items = (void *)KDBUS_ALIGN8((unsigned long)owned_names_end);
-+	}
-+
-+	if (staging->mc && (staging->mask & KDBUS_ATTACH_CONN_DESCRIPTION))
-+		item = kdbus_write_full(&items, KDBUS_ITEM_CONN_DESCRIPTION,
-+				strlen(staging->mc->conn_description) + 1,
-+				staging->mc->conn_description);
-+
-+	if (staging->mc && (staging->mask & KDBUS_ATTACH_TIMESTAMP))
-+		item = kdbus_write_full(&items, KDBUS_ITEM_TIMESTAMP,
-+					sizeof(staging->mc->ts),
-+					&staging->mc->ts);
-+
-+	/*
-+	 * Return real size (minus trailing padding). In case of 'owned_names'
-+	 * we cannot deduce it from item->size, so treat it special.
-+	 */
-+
-+	if (items == (void *)KDBUS_ALIGN8((unsigned long)owned_names_end))
-+		end = owned_names_end;
-+	else if (item)
-+		end = (u8 *)item + item->size;
-+	else
-+		end = mem;
-+
-+	WARN_ON((u8 *)items - (u8 *)mem != size);
-+	WARN_ON((void *)KDBUS_ALIGN8((unsigned long)end) != (void *)items);
-+
-+	return end - (u8 *)mem;
-+}
-+
-+int kdbus_meta_emit(struct kdbus_meta_proc *mp,
-+		    struct kdbus_meta_fake *mf,
-+		    struct kdbus_meta_conn *mc,
-+		    struct kdbus_conn *conn,
-+		    u64 mask,
-+		    struct kdbus_item **out_items,
-+		    size_t *out_size)
-+{
-+	struct kdbus_meta_staging staging = {};
-+	struct kdbus_item *items = NULL;
-+	size_t size = 0;
-+	int ret;
-+
-+	if (WARN_ON(mf && mp))
-+		mp = NULL;
-+
-+	staging.mp = mp;
-+	staging.mf = mf;
-+	staging.mc = mc;
-+	staging.conn = conn;
-+
-+	/* get mask of valid items */
-+	if (mf)
-+		staging.mask |= mf->valid;
-+	if (mp) {
-+		mutex_lock(&mp->lock);
-+		staging.mask |= mp->valid;
-+		mutex_unlock(&mp->lock);
-+	}
-+	if (mc) {
-+		mutex_lock(&mc->lock);
-+		staging.mask |= mc->valid;
-+		mutex_unlock(&mc->lock);
-+	}
-+
-+	staging.mask &= mask;
-+
-+	if (!staging.mask) { /* bail out if nothing to do */
-+		ret = 0;
-+		goto exit;
-+	}
-+
-+	/* EXE is special as it needs a temporary page to assemble */
-+	if (mp && (staging.mask & KDBUS_ATTACH_EXE)) {
-+		struct path p;
-+
-+		/*
-+		 * XXX: We need access to __d_path() so we can write the path
-+		 * relative to conn->root_path. Once upstream, we need
-+		 * EXPORT_SYMBOL(__d_path) or an equivalent of d_path() that
-+		 * takes the root path directly. Until then, we drop this item
-+		 * if the root-paths differ.
-+		 */
-+
-+		get_fs_root(current->fs, &p);
-+		if (path_equal(&p, &conn->root_path)) {
-+			staging.exe = (void *)__get_free_page(GFP_TEMPORARY);
-+			if (!staging.exe) {
-+				path_put(&p);
-+				ret = -ENOMEM;
-+				goto exit;
-+			}
-+
-+			staging.exe_path = d_path(&mp->exe_path, staging.exe,
-+						  PAGE_SIZE);
-+			if (IS_ERR(staging.exe_path)) {
-+				path_put(&p);
-+				ret = PTR_ERR(staging.exe_path);
-+				goto exit;
-+			}
-+		}
-+		path_put(&p);
-+	}
-+
-+	size = kdbus_meta_measure(&staging);
-+	if (!size) { /* bail out if nothing to do */
-+		ret = 0;
-+		goto exit;
-+	}
-+
-+	items = kmalloc(size, GFP_KERNEL);
-+	if (!items) {
-+		ret = -ENOMEM;
-+		goto exit;
-+	}
-+
-+	size = kdbus_meta_write(&staging, items, size);
-+	if (!size) {
-+		kfree(items);
-+		items = NULL;
-+	}
-+
-+	ret = 0;
-+
-+exit:
-+	if (staging.exe)
-+		free_page((unsigned long)staging.exe);
-+	if (ret >= 0) {
-+		*out_items = items;
-+		*out_size = size;
-+	}
-+	return ret;
-+}
-+
-+enum {
-+	KDBUS_META_PROC_NONE,
-+	KDBUS_META_PROC_NORMAL,
-+};
-+
-+/**
-+ * kdbus_proc_permission() - check /proc permissions on target pid
-+ * @pid_ns:		namespace we operate in
-+ * @cred:		credentials of requestor
-+ * @target:		target process
-+ *
-+ * This checks whether a process with credentials @cred can access information
-+ * of @target in the namespace @pid_ns. This tries to follow /proc permissions,
-+ * but is slightly more restrictive.
-+ *
-+ * Return: The /proc access level (KDBUS_META_PROC_*) is returned.
-+ */
-+static unsigned int kdbus_proc_permission(const struct pid_namespace *pid_ns,
-+					  const struct cred *cred,
-+					  struct pid *target)
-+{
-+	if (pid_ns->hide_pid < 1)
-+		return KDBUS_META_PROC_NORMAL;
-+
-+	/* XXX: we need groups_search() exported for aux-groups */
-+	if (gid_eq(cred->egid, pid_ns->pid_gid))
-+		return KDBUS_META_PROC_NORMAL;
-+
-+	/*
-+	 * XXX: If ptrace_may_access(PTRACE_MODE_READ) is granted, you can
-+	 * overwrite hide_pid. However, ptrace_may_access() only supports
-+	 * checking 'current', hence, we cannot use this here. But we
-+	 * simply decide to not support this override, so no need to worry.
-+	 */
-+
-+	return KDBUS_META_PROC_NONE;
-+}
-+
-+/**
-+ * kdbus_meta_proc_mask() - calculate which metadata would be visible to
-+ *			    a connection via /proc
-+ * @prv_pid:		pid of metadata provider
-+ * @req_pid:		pid of metadata requestor
-+ * @req_cred:		credentials of metadata reqeuestor
-+ * @wanted:		metadata that is requested
-+ *
-+ * This checks which metadata items of @prv_pid can be read via /proc by the
-+ * requestor @req_pid.
-+ *
-+ * Return: Set of metadata flags the requestor can see (limited by @wanted).
-+ */
-+static u64 kdbus_meta_proc_mask(struct pid *prv_pid,
-+				struct pid *req_pid,
-+				const struct cred *req_cred,
-+				u64 wanted)
-+{
-+	struct pid_namespace *prv_ns, *req_ns;
-+	unsigned int proc;
-+
-+	prv_ns = ns_of_pid(prv_pid);
-+	req_ns = ns_of_pid(req_pid);
-+
-+	/*
-+	 * If the sender is not visible in the receiver namespace, then the
-+	 * receiver cannot access the sender via its own procfs. Hence, we do
-+	 * not attach any additional metadata.
-+	 */
-+	if (!pid_nr_ns(prv_pid, req_ns))
-+		return 0;
-+
-+	/*
-+	 * If the pid-namespace of the receiver has hide_pid set, it cannot see
-+	 * any process but its own. We shortcut this /proc permission check if
-+	 * provider and requestor are the same. If not, we perform rather
-+	 * expensive /proc permission checks.
-+	 */
-+	if (prv_pid == req_pid)
-+		proc = KDBUS_META_PROC_NORMAL;
-+	else
-+		proc = kdbus_proc_permission(req_ns, req_cred, prv_pid);
-+
-+	/* you need /proc access to read standard process attributes */
-+	if (proc < KDBUS_META_PROC_NORMAL)
-+		wanted &= ~(KDBUS_ATTACH_TID_COMM |
-+			    KDBUS_ATTACH_PID_COMM |
-+			    KDBUS_ATTACH_SECLABEL |
-+			    KDBUS_ATTACH_CMDLINE |
-+			    KDBUS_ATTACH_CGROUP |
-+			    KDBUS_ATTACH_AUDIT |
-+			    KDBUS_ATTACH_CAPS |
-+			    KDBUS_ATTACH_EXE);
-+
-+	/* clear all non-/proc flags */
-+	return wanted & (KDBUS_ATTACH_TID_COMM |
-+			 KDBUS_ATTACH_PID_COMM |
-+			 KDBUS_ATTACH_SECLABEL |
-+			 KDBUS_ATTACH_CMDLINE |
-+			 KDBUS_ATTACH_CGROUP |
-+			 KDBUS_ATTACH_AUDIT |
-+			 KDBUS_ATTACH_CAPS |
-+			 KDBUS_ATTACH_EXE);
-+}
-+
-+/**
-+ * kdbus_meta_get_mask() - calculate attach flags mask for metadata request
-+ * @prv_pid:		pid of metadata provider
-+ * @prv_mask:		mask of metadata the provide grants unchecked
-+ * @req_pid:		pid of metadata requestor
-+ * @req_cred:		credentials of metadata requestor
-+ * @req_mask:		mask of metadata that is requested
-+ *
-+ * This calculates the metadata items that the requestor @req_pid can access
-+ * from the metadata provider @prv_pid. This permission check consists of
-+ * several different parts:
-+ *  - Providers can grant metadata items unchecked. Regardless of their type,
-+ *    they're always granted to the requestor. This mask is passed as @prv_mask.
-+ *  - Basic items (credentials and connection metadata) are granted implicitly
-+ *    to everyone. They're publicly available to any bus-user that can see the
-+ *    provider.
-+ *  - Process credentials that are not granted implicitly follow the same
-+ *    permission checks as /proc. This means, we always assume a requestor
-+ *    process has access to their *own* /proc mount, if they have access to
-+ *    kdbusfs.
-+ *
-+ * Return: Mask of metadata that is granted.
-+ */
-+static u64 kdbus_meta_get_mask(struct pid *prv_pid, u64 prv_mask,
-+			       struct pid *req_pid,
-+			       const struct cred *req_cred, u64 req_mask)
-+{
-+	u64 missing, impl_mask, proc_mask = 0;
-+
-+	/*
-+	 * Connection metadata and basic unix process credentials are
-+	 * transmitted implicitly, and cannot be suppressed. Both are required
-+	 * to perform user-space policies on the receiver-side. Furthermore,
-+	 * connection metadata is public state, anyway, and unix credentials
-+	 * are needed for UDS-compatibility. We extend them slightly by
-+	 * auxiliary groups and additional uids/gids/pids.
-+	 */
-+	impl_mask = /* connection metadata */
-+		    KDBUS_ATTACH_CONN_DESCRIPTION |
-+		    KDBUS_ATTACH_TIMESTAMP |
-+		    KDBUS_ATTACH_NAMES |
-+		    /* credentials and pids */
-+		    KDBUS_ATTACH_AUXGROUPS |
-+		    KDBUS_ATTACH_CREDS |
-+		    KDBUS_ATTACH_PIDS;
-+
-+	/*
-+	 * Calculate the set of metadata that is not granted implicitly nor by
-+	 * the sender, but still requested by the receiver. If any are left,
-+	 * perform rather expensive /proc access checks for them.
-+	 */
-+	missing = req_mask & ~((prv_mask | impl_mask) & req_mask);
-+	if (missing)
-+		proc_mask = kdbus_meta_proc_mask(prv_pid, req_pid, req_cred,
-+						 missing);
-+
-+	return (prv_mask | impl_mask | proc_mask) & req_mask;
-+}
-+
-+/**
-+ */
-+u64 kdbus_meta_info_mask(const struct kdbus_conn *conn, u64 mask)
-+{
-+	return kdbus_meta_get_mask(conn->pid,
-+				   atomic64_read(&conn->attach_flags_send),
-+				   task_pid(current),
-+				   current_cred(),
-+				   mask);
-+}
-+
-+/**
-+ */
-+u64 kdbus_meta_msg_mask(const struct kdbus_conn *snd,
-+			const struct kdbus_conn *rcv)
-+{
-+	return kdbus_meta_get_mask(task_pid(current),
-+				   atomic64_read(&snd->attach_flags_send),
-+				   rcv->pid,
-+				   rcv->cred,
-+				   atomic64_read(&rcv->attach_flags_recv));
-+}
-diff --git a/ipc/kdbus/metadata.h b/ipc/kdbus/metadata.h
-new file mode 100644
-index 0000000..dba7cc7
---- /dev/null
-+++ b/ipc/kdbus/metadata.h
-@@ -0,0 +1,86 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_METADATA_H
-+#define __KDBUS_METADATA_H
-+
-+#include <linux/kernel.h>
-+
-+struct kdbus_conn;
-+struct kdbus_pool_slice;
-+
-+struct kdbus_meta_proc;
-+struct kdbus_meta_conn;
-+
-+/**
-+ * struct kdbus_meta_fake - Fake metadata
-+ * @valid:		Bitmask of collected and valid items
-+ * @uid:		UID of process
-+ * @euid:		EUID of process
-+ * @suid:		SUID of process
-+ * @fsuid:		FSUID of process
-+ * @gid:		GID of process
-+ * @egid:		EGID of process
-+ * @sgid:		SGID of process
-+ * @fsgid:		FSGID of process
-+ * @pid:		PID of process
-+ * @tgid:		TGID of process
-+ * @ppid:		PPID of process
-+ * @seclabel:		Seclabel
-+ */
-+struct kdbus_meta_fake {
-+	u64 valid;
-+
-+	/* KDBUS_ITEM_CREDS */
-+	kuid_t uid, euid, suid, fsuid;
-+	kgid_t gid, egid, sgid, fsgid;
-+
-+	/* KDBUS_ITEM_PIDS */
-+	struct pid *pid, *tgid, *ppid;
-+
-+	/* KDBUS_ITEM_SECLABEL */
-+	char *seclabel;
-+};
-+
-+struct kdbus_meta_proc *kdbus_meta_proc_new(void);
-+struct kdbus_meta_proc *kdbus_meta_proc_ref(struct kdbus_meta_proc *mp);
-+struct kdbus_meta_proc *kdbus_meta_proc_unref(struct kdbus_meta_proc *mp);
-+int kdbus_meta_proc_collect(struct kdbus_meta_proc *mp, u64 what);
-+
-+struct kdbus_meta_fake *kdbus_meta_fake_new(void);
-+struct kdbus_meta_fake *kdbus_meta_fake_free(struct kdbus_meta_fake *mf);
-+int kdbus_meta_fake_collect(struct kdbus_meta_fake *mf,
-+			    const struct kdbus_creds *creds,
-+			    const struct kdbus_pids *pids,
-+			    const char *seclabel);
-+
-+struct kdbus_meta_conn *kdbus_meta_conn_new(void);
-+struct kdbus_meta_conn *kdbus_meta_conn_ref(struct kdbus_meta_conn *mc);
-+struct kdbus_meta_conn *kdbus_meta_conn_unref(struct kdbus_meta_conn *mc);
-+int kdbus_meta_conn_collect(struct kdbus_meta_conn *mc,
-+			    struct kdbus_conn *conn,
-+			    u64 msg_seqnum, u64 what);
-+
-+int kdbus_meta_emit(struct kdbus_meta_proc *mp,
-+		    struct kdbus_meta_fake *mf,
-+		    struct kdbus_meta_conn *mc,
-+		    struct kdbus_conn *conn,
-+		    u64 mask,
-+		    struct kdbus_item **out_items,
-+		    size_t *out_size);
-+u64 kdbus_meta_info_mask(const struct kdbus_conn *conn, u64 mask);
-+u64 kdbus_meta_msg_mask(const struct kdbus_conn *snd,
-+			const struct kdbus_conn *rcv);
-+
-+#endif
-diff --git a/ipc/kdbus/names.c b/ipc/kdbus/names.c
-new file mode 100644
-index 0000000..bf44ca3
---- /dev/null
-+++ b/ipc/kdbus/names.c
-@@ -0,0 +1,854 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/ctype.h>
-+#include <linux/fs.h>
-+#include <linux/hash.h>
-+#include <linux/idr.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <linux/rwsem.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+#include <linux/uio.h>
-+
-+#include "bus.h"
-+#include "connection.h"
-+#include "endpoint.h"
-+#include "handle.h"
-+#include "item.h"
-+#include "names.h"
-+#include "notify.h"
-+#include "policy.h"
-+
-+#define KDBUS_NAME_SAVED_MASK (KDBUS_NAME_ALLOW_REPLACEMENT |	\
-+			       KDBUS_NAME_QUEUE)
-+
-+static bool kdbus_name_owner_is_used(struct kdbus_name_owner *owner)
-+{
-+	return !list_empty(&owner->name_entry) ||
-+	       owner == owner->name->activator;
-+}
-+
-+static struct kdbus_name_owner *
-+kdbus_name_owner_new(struct kdbus_conn *conn, struct kdbus_name_entry *name,
-+		     u64 flags)
-+{
-+	struct kdbus_name_owner *owner;
-+
-+	kdbus_conn_assert_active(conn);
-+
-+	if (conn->name_count >= KDBUS_CONN_MAX_NAMES)
-+		return ERR_PTR(-E2BIG);
-+
-+	owner = kmalloc(sizeof(*owner), GFP_KERNEL);
-+	if (!owner)
-+		return ERR_PTR(-ENOMEM);
-+
-+	owner->flags = flags & KDBUS_NAME_SAVED_MASK;
-+	owner->conn = conn;
-+	owner->name = name;
-+	list_add_tail(&owner->conn_entry, &conn->names_list);
-+	INIT_LIST_HEAD(&owner->name_entry);
-+
-+	++conn->name_count;
-+	return owner;
-+}
-+
-+static void kdbus_name_owner_free(struct kdbus_name_owner *owner)
-+{
-+	if (!owner)
-+		return;
-+
-+	WARN_ON(kdbus_name_owner_is_used(owner));
-+	--owner->conn->name_count;
-+	list_del(&owner->conn_entry);
-+	kfree(owner);
-+}
-+
-+static struct kdbus_name_owner *
-+kdbus_name_owner_find(struct kdbus_name_entry *name, struct kdbus_conn *conn)
-+{
-+	struct kdbus_name_owner *owner;
-+
-+	/*
-+	 * Use conn->names_list over name->queue to make sure boundaries of
-+	 * this linear search are controlled by the connection itself.
-+	 * Furthermore, this will find normal owners as well as activators
-+	 * without any additional code.
-+	 */
-+	list_for_each_entry(owner, &conn->names_list, conn_entry)
-+		if (owner->name == name)
-+			return owner;
-+
-+	return NULL;
-+}
-+
-+static bool kdbus_name_entry_is_used(struct kdbus_name_entry *name)
-+{
-+	return !list_empty(&name->queue) || name->activator;
-+}
-+
-+static struct kdbus_name_owner *
-+kdbus_name_entry_first(struct kdbus_name_entry *name)
-+{
-+	return list_first_entry_or_null(&name->queue, struct kdbus_name_owner,
-+					name_entry);
-+}
-+
-+static struct kdbus_name_entry *
-+kdbus_name_entry_new(struct kdbus_name_registry *r, u32 hash,
-+		     const char *name_str)
-+{
-+	struct kdbus_name_entry *name;
-+	size_t namelen;
-+
-+	lockdep_assert_held(&r->rwlock);
-+
-+	namelen = strlen(name_str);
-+
-+	name = kmalloc(sizeof(*name) + namelen + 1, GFP_KERNEL);
-+	if (!name)
-+		return ERR_PTR(-ENOMEM);
-+
-+	name->name_id = ++r->name_seq_last;
-+	name->activator = NULL;
-+	INIT_LIST_HEAD(&name->queue);
-+	hash_add(r->entries_hash, &name->hentry, hash);
-+	memcpy(name->name, name_str, namelen + 1);
-+
-+	return name;
-+}
-+
-+static void kdbus_name_entry_free(struct kdbus_name_entry *name)
-+{
-+	if (!name)
-+		return;
-+
-+	WARN_ON(kdbus_name_entry_is_used(name));
-+	hash_del(&name->hentry);
-+	kfree(name);
-+}
-+
-+static struct kdbus_name_entry *
-+kdbus_name_entry_find(struct kdbus_name_registry *r, u32 hash,
-+		      const char *name_str)
-+{
-+	struct kdbus_name_entry *name;
-+
-+	lockdep_assert_held(&r->rwlock);
-+
-+	hash_for_each_possible(r->entries_hash, name, hentry, hash)
-+		if (!strcmp(name->name, name_str))
-+			return name;
-+
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_name_registry_new() - create a new name registry
-+ *
-+ * Return: a new kdbus_name_registry on success, ERR_PTR on failure.
-+ */
-+struct kdbus_name_registry *kdbus_name_registry_new(void)
-+{
-+	struct kdbus_name_registry *r;
-+
-+	r = kmalloc(sizeof(*r), GFP_KERNEL);
-+	if (!r)
-+		return ERR_PTR(-ENOMEM);
-+
-+	hash_init(r->entries_hash);
-+	init_rwsem(&r->rwlock);
-+	r->name_seq_last = 0;
-+
-+	return r;
-+}
-+
-+/**
-+ * kdbus_name_registry_free() - free name registry
-+ * @r:		name registry to free, or NULL
-+ *
-+ * Free a name registry and cleanup all internal objects. This is a no-op if
-+ * you pass NULL as registry.
-+ */
-+void kdbus_name_registry_free(struct kdbus_name_registry *r)
-+{
-+	if (!r)
-+		return;
-+
-+	WARN_ON(!hash_empty(r->entries_hash));
-+	kfree(r);
-+}
-+
-+/**
-+ * kdbus_name_lookup_unlocked() - lookup name in registry
-+ * @reg:		name registry
-+ * @name:		name to lookup
-+ *
-+ * This looks up @name in the given name-registry and returns the
-+ * kdbus_name_entry object. The caller must hold the registry-lock and must not
-+ * access the returned object after releasing the lock.
-+ *
-+ * Return: Pointer to name-entry, or NULL if not found.
-+ */
-+struct kdbus_name_entry *
-+kdbus_name_lookup_unlocked(struct kdbus_name_registry *reg, const char *name)
-+{
-+	return kdbus_name_entry_find(reg, kdbus_strhash(name), name);
-+}
-+
-+static int kdbus_name_become_activator(struct kdbus_name_owner *owner,
-+				       u64 *return_flags)
-+{
-+	if (kdbus_name_owner_is_used(owner))
-+		return -EALREADY;
-+	if (owner->name->activator)
-+		return -EEXIST;
-+
-+	owner->name->activator = owner;
-+	owner->flags |= KDBUS_NAME_ACTIVATOR;
-+
-+	if (kdbus_name_entry_first(owner->name)) {
-+		owner->flags |= KDBUS_NAME_IN_QUEUE;
-+	} else {
-+		owner->flags |= KDBUS_NAME_PRIMARY;
-+		kdbus_notify_name_change(owner->conn->ep->bus,
-+					 KDBUS_ITEM_NAME_ADD,
-+					 0, owner->conn->id,
-+					 0, owner->flags,
-+					 owner->name->name);
-+	}
-+
-+	if (return_flags)
-+		*return_flags = owner->flags | KDBUS_NAME_ACQUIRED;
-+
-+	return 0;
-+}
-+
-+static int kdbus_name_update(struct kdbus_name_owner *owner, u64 flags,
-+			     u64 *return_flags)
-+{
-+	struct kdbus_name_owner *primary, *activator;
-+	struct kdbus_name_entry *name;
-+	struct kdbus_bus *bus;
-+	u64 nflags = 0;
-+	int ret = 0;
-+
-+	name = owner->name;
-+	bus = owner->conn->ep->bus;
-+	primary = kdbus_name_entry_first(name);
-+	activator = name->activator;
-+
-+	/* cannot be activator and acquire a name */
-+	if (owner == activator)
-+		return -EUCLEAN;
-+
-+	/* update saved flags */
-+	owner->flags = flags & KDBUS_NAME_SAVED_MASK;
-+
-+	if (!primary) {
-+		/*
-+		 * No primary owner (but maybe an activator). Take over the
-+		 * name.
-+		 */
-+
-+		list_add(&owner->name_entry, &name->queue);
-+		owner->flags |= KDBUS_NAME_PRIMARY;
-+		nflags |= KDBUS_NAME_ACQUIRED;
-+
-+		/* move messages to new owner on activation */
-+		if (activator) {
-+			kdbus_conn_move_messages(owner->conn, activator->conn,
-+						 name->name_id);
-+			kdbus_notify_name_change(bus, KDBUS_ITEM_NAME_CHANGE,
-+					activator->conn->id, owner->conn->id,
-+					activator->flags, owner->flags,
-+					name->name);
-+			activator->flags &= ~KDBUS_NAME_PRIMARY;
-+			activator->flags |= KDBUS_NAME_IN_QUEUE;
-+		} else {
-+			kdbus_notify_name_change(bus, KDBUS_ITEM_NAME_ADD,
-+						 0, owner->conn->id,
-+						 0, owner->flags,
-+						 name->name);
-+		}
-+
-+	} else if (owner == primary) {
-+		/*
-+		 * Already the primary owner of the name, flags were already
-+		 * updated. Nothing to do.
-+		 */
-+
-+		owner->flags |= KDBUS_NAME_PRIMARY;
-+
-+	} else if ((primary->flags & KDBUS_NAME_ALLOW_REPLACEMENT) &&
-+		   (flags & KDBUS_NAME_REPLACE_EXISTING)) {
-+		/*
-+		 * We're not the primary owner but can replace it. Move us
-+		 * ahead of the primary owner and acquire the name (possibly
-+		 * skipping queued owners ahead of us).
-+		 */
-+
-+		list_del_init(&owner->name_entry);
-+		list_add(&owner->name_entry, &name->queue);
-+		owner->flags |= KDBUS_NAME_PRIMARY;
-+		nflags |= KDBUS_NAME_ACQUIRED;
-+
-+		kdbus_notify_name_change(bus, KDBUS_ITEM_NAME_CHANGE,
-+					 primary->conn->id, owner->conn->id,
-+					 primary->flags, owner->flags,
-+					 name->name);
-+
-+		/* requeue old primary, or drop if queueing not wanted */
-+		if (primary->flags & KDBUS_NAME_QUEUE) {
-+			primary->flags &= ~KDBUS_NAME_PRIMARY;
-+			primary->flags |= KDBUS_NAME_IN_QUEUE;
-+		} else {
-+			list_del_init(&primary->name_entry);
-+			kdbus_name_owner_free(primary);
-+		}
-+
-+	} else if (flags & KDBUS_NAME_QUEUE) {
-+		/*
-+		 * Name is already occupied and we cannot take it over, but
-+		 * queuing is allowed. Put us silently on the queue, if not
-+		 * already there.
-+		 */
-+
-+		owner->flags |= KDBUS_NAME_IN_QUEUE;
-+		if (!kdbus_name_owner_is_used(owner)) {
-+			list_add_tail(&owner->name_entry, &name->queue);
-+			nflags |= KDBUS_NAME_ACQUIRED;
-+		}
-+	} else if (kdbus_name_owner_is_used(owner)) {
-+		/*
-+		 * Already queued on name, but re-queueing was not requested.
-+		 * Make sure to unlink it from the name, the caller is
-+		 * responsible for releasing it.
-+		 */
-+
-+		list_del_init(&owner->name_entry);
-+	} else {
-+		/*
-+		 * Name is already claimed and queueing is not requested.
-+		 * Return error to the caller.
-+		 */
-+
-+		ret = -EEXIST;
-+	}
-+
-+	if (return_flags)
-+		*return_flags = owner->flags | nflags;
-+
-+	return ret;
-+}
-+
-+int kdbus_name_acquire(struct kdbus_name_registry *reg,
-+		       struct kdbus_conn *conn, const char *name_str,
-+		       u64 flags, u64 *return_flags)
-+{
-+	struct kdbus_name_entry *name = NULL;
-+	struct kdbus_name_owner *owner = NULL;
-+	u32 hash;
-+	int ret;
-+
-+	kdbus_conn_assert_active(conn);
-+
-+	down_write(&reg->rwlock);
-+
-+	/*
-+	 * Verify the connection has access to the name. Do this before testing
-+	 * for double-acquisitions and other errors to make sure we do not leak
-+	 * information about this name through possible custom endpoints.
-+	 */
-+	if (!kdbus_conn_policy_own_name(conn, current_cred(), name_str)) {
-+		ret = -EPERM;
-+		goto exit;
-+	}
-+
-+	/*
-+	 * Lookup the name entry. If it already exists, search for an owner
-+	 * entry as we might already own that name. If either does not exist,
-+	 * we will allocate a fresh one.
-+	 */
-+	hash = kdbus_strhash(name_str);
-+	name = kdbus_name_entry_find(reg, hash, name_str);
-+	if (name) {
-+		owner = kdbus_name_owner_find(name, conn);
-+	} else {
-+		name = kdbus_name_entry_new(reg, hash, name_str);
-+		if (IS_ERR(name)) {
-+			ret = PTR_ERR(name);
-+			name = NULL;
-+			goto exit;
-+		}
-+	}
-+
-+	/* create name owner object if not already queued */
-+	if (!owner) {
-+		owner = kdbus_name_owner_new(conn, name, flags);
-+		if (IS_ERR(owner)) {
-+			ret = PTR_ERR(owner);
-+			owner = NULL;
-+			goto exit;
-+		}
-+	}
-+
-+	if (flags & KDBUS_NAME_ACTIVATOR)
-+		ret = kdbus_name_become_activator(owner, return_flags);
-+	else
-+		ret = kdbus_name_update(owner, flags, return_flags);
-+	if (ret < 0)
-+		goto exit;
-+
-+exit:
-+	if (owner && !kdbus_name_owner_is_used(owner))
-+		kdbus_name_owner_free(owner);
-+	if (name && !kdbus_name_entry_is_used(name))
-+		kdbus_name_entry_free(name);
-+	up_write(&reg->rwlock);
-+	kdbus_notify_flush(conn->ep->bus);
-+	return ret;
-+}
-+
-+static void kdbus_name_release_unlocked(struct kdbus_name_owner *owner)
-+{
-+	struct kdbus_name_owner *primary, *next;
-+	struct kdbus_name_entry *name;
-+
-+	name = owner->name;
-+	primary = kdbus_name_entry_first(name);
-+
-+	list_del_init(&owner->name_entry);
-+	if (owner == name->activator)
-+		name->activator = NULL;
-+
-+	if (!primary || owner == primary) {
-+		next = kdbus_name_entry_first(name);
-+		if (!next)
-+			next = name->activator;
-+
-+		if (next) {
-+			/* hand to next in queue */
-+			next->flags &= ~KDBUS_NAME_IN_QUEUE;
-+			next->flags |= KDBUS_NAME_PRIMARY;
-+			if (next == name->activator)
-+				kdbus_conn_move_messages(next->conn,
-+							 owner->conn,
-+							 name->name_id);
-+
-+			kdbus_notify_name_change(owner->conn->ep->bus,
-+					KDBUS_ITEM_NAME_CHANGE,
-+					owner->conn->id, next->conn->id,
-+					owner->flags, next->flags,
-+					name->name);
-+		} else {
-+			kdbus_notify_name_change(owner->conn->ep->bus,
-+						 KDBUS_ITEM_NAME_REMOVE,
-+						 owner->conn->id, 0,
-+						 owner->flags, 0,
-+						 name->name);
-+		}
-+	}
-+
-+	kdbus_name_owner_free(owner);
-+	if (!kdbus_name_entry_is_used(name))
-+		kdbus_name_entry_free(name);
-+}
-+
-+static int kdbus_name_release(struct kdbus_name_registry *reg,
-+			      struct kdbus_conn *conn,
-+			      const char *name_str)
-+{
-+	struct kdbus_name_owner *owner;
-+	struct kdbus_name_entry *name;
-+	int ret = 0;
-+
-+	down_write(&reg->rwlock);
-+	name = kdbus_name_entry_find(reg, kdbus_strhash(name_str), name_str);
-+	if (name) {
-+		owner = kdbus_name_owner_find(name, conn);
-+		if (owner)
-+			kdbus_name_release_unlocked(owner);
-+		else
-+			ret = -EADDRINUSE;
-+	} else {
-+		ret = -ESRCH;
-+	}
-+	up_write(&reg->rwlock);
-+
-+	kdbus_notify_flush(conn->ep->bus);
-+	return ret;
-+}
-+
-+/**
-+ * kdbus_name_release_all() - remove all name entries of a given connection
-+ * @reg:		name registry
-+ * @conn:		connection
-+ */
-+void kdbus_name_release_all(struct kdbus_name_registry *reg,
-+			    struct kdbus_conn *conn)
-+{
-+	struct kdbus_name_owner *owner;
-+
-+	down_write(&reg->rwlock);
-+
-+	while ((owner = list_first_entry_or_null(&conn->names_list,
-+						 struct kdbus_name_owner,
-+						 conn_entry)))
-+		kdbus_name_release_unlocked(owner);
-+
-+	up_write(&reg->rwlock);
-+
-+	kdbus_notify_flush(conn->ep->bus);
-+}
-+
-+/**
-+ * kdbus_name_is_valid() - check if a name is valid
-+ * @p:			The name to check
-+ * @allow_wildcard:	Whether or not to allow a wildcard name
-+ *
-+ * A name is valid if all of the following criterias are met:
-+ *
-+ *  - The name has two or more elements separated by a period ('.') character.
-+ *  - All elements must contain at least one character.
-+ *  - Each element must only contain the ASCII characters "[A-Z][a-z][0-9]_-"
-+ *    and must not begin with a digit.
-+ *  - The name must not exceed KDBUS_NAME_MAX_LEN.
-+ *  - If @allow_wildcard is true, the name may end on '.*'
-+ */
-+bool kdbus_name_is_valid(const char *p, bool allow_wildcard)
-+{
-+	bool dot, found_dot = false;
-+	const char *q;
-+
-+	for (dot = true, q = p; *q; q++) {
-+		if (*q == '.') {
-+			if (dot)
-+				return false;
-+
-+			found_dot = true;
-+			dot = true;
-+		} else {
-+			bool good;
-+
-+			good = isalpha(*q) || (!dot && isdigit(*q)) ||
-+				*q == '_' || *q == '-' ||
-+				(allow_wildcard && dot &&
-+					*q == '*' && *(q + 1) == '\0');
-+
-+			if (!good)
-+				return false;
-+
-+			dot = false;
-+		}
-+	}
-+
-+	if (q - p > KDBUS_NAME_MAX_LEN)
-+		return false;
-+
-+	if (dot)
-+		return false;
-+
-+	if (!found_dot)
-+		return false;
-+
-+	return true;
-+}
-+
-+/**
-+ * kdbus_cmd_name_acquire() - handle KDBUS_CMD_NAME_ACQUIRE
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_name_acquire(struct kdbus_conn *conn, void __user *argp)
-+{
-+	const char *item_name;
-+	struct kdbus_cmd *cmd;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_NAME, .mandatory = true },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
-+				 KDBUS_NAME_REPLACE_EXISTING |
-+				 KDBUS_NAME_ALLOW_REPLACEMENT |
-+				 KDBUS_NAME_QUEUE,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	if (!kdbus_conn_is_ordinary(conn))
-+		return -EOPNOTSUPP;
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	item_name = argv[1].item->str;
-+	if (!kdbus_name_is_valid(item_name, false)) {
-+		ret = -EINVAL;
-+		goto exit;
-+	}
-+
-+	ret = kdbus_name_acquire(conn->ep->bus->name_registry, conn, item_name,
-+				 cmd->flags, &cmd->return_flags);
-+
-+exit:
-+	return kdbus_args_clear(&args, ret);
-+}
-+
-+/**
-+ * kdbus_cmd_name_release() - handle KDBUS_CMD_NAME_RELEASE
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_name_release(struct kdbus_conn *conn, void __user *argp)
-+{
-+	struct kdbus_cmd *cmd;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+		{ .type = KDBUS_ITEM_NAME, .mandatory = true },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	if (!kdbus_conn_is_ordinary(conn))
-+		return -EOPNOTSUPP;
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	ret = kdbus_name_release(conn->ep->bus->name_registry, conn,
-+				 argv[1].item->str);
-+	return kdbus_args_clear(&args, ret);
-+}
-+
-+static int kdbus_list_write(struct kdbus_conn *conn,
-+			    struct kdbus_conn *c,
-+			    struct kdbus_pool_slice *slice,
-+			    size_t *pos,
-+			    struct kdbus_name_owner *o,
-+			    bool write)
-+{
-+	struct kvec kvec[4];
-+	size_t cnt = 0;
-+	int ret;
-+
-+	/* info header */
-+	struct kdbus_info info = {
-+		.size = 0,
-+		.id = c->id,
-+		.flags = c->flags,
-+	};
-+
-+	/* fake the header of a kdbus_name item */
-+	struct {
-+		u64 size;
-+		u64 type;
-+		u64 flags;
-+	} h = {};
-+
-+	if (o && !kdbus_conn_policy_see_name_unlocked(conn, current_cred(),
-+						      o->name->name))
-+		return 0;
-+
-+	kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &info.size);
-+
-+	/* append name */
-+	if (o) {
-+		size_t slen = strlen(o->name->name) + 1;
-+
-+		h.size = offsetof(struct kdbus_item, name.name) + slen;
-+		h.type = KDBUS_ITEM_OWNED_NAME;
-+		h.flags = o->flags;
-+
-+		kdbus_kvec_set(&kvec[cnt++], &h, sizeof(h), &info.size);
-+		kdbus_kvec_set(&kvec[cnt++], o->name->name, slen, &info.size);
-+		cnt += !!kdbus_kvec_pad(&kvec[cnt], &info.size);
-+	}
-+
-+	if (write) {
-+		ret = kdbus_pool_slice_copy_kvec(slice, *pos, kvec,
-+						 cnt, info.size);
-+		if (ret < 0)
-+			return ret;
-+	}
-+
-+	*pos += info.size;
-+	return 0;
-+}
-+
-+static int kdbus_list_all(struct kdbus_conn *conn, u64 flags,
-+			  struct kdbus_pool_slice *slice,
-+			  size_t *pos, bool write)
-+{
-+	struct kdbus_conn *c;
-+	size_t p = *pos;
-+	int ret, i;
-+
-+	hash_for_each(conn->ep->bus->conn_hash, i, c, hentry) {
-+		bool added = false;
-+
-+		/* skip monitors */
-+		if (kdbus_conn_is_monitor(c))
-+			continue;
-+
-+		/* all names the connection owns */
-+		if (flags & (KDBUS_LIST_NAMES |
-+			     KDBUS_LIST_ACTIVATORS |
-+			     KDBUS_LIST_QUEUED)) {
-+			struct kdbus_name_owner *o;
-+
-+			list_for_each_entry(o, &c->names_list, conn_entry) {
-+				if (o->flags & KDBUS_NAME_ACTIVATOR) {
-+					if (!(flags & KDBUS_LIST_ACTIVATORS))
-+						continue;
-+
-+					ret = kdbus_list_write(conn, c, slice,
-+							       &p, o, write);
-+					if (ret < 0) {
-+						mutex_unlock(&c->lock);
-+						return ret;
-+					}
-+
-+					added = true;
-+				} else if (o->flags & KDBUS_NAME_IN_QUEUE) {
-+					if (!(flags & KDBUS_LIST_QUEUED))
-+						continue;
-+
-+					ret = kdbus_list_write(conn, c, slice,
-+							       &p, o, write);
-+					if (ret < 0) {
-+						mutex_unlock(&c->lock);
-+						return ret;
-+					}
-+
-+					added = true;
-+				} else if (flags & KDBUS_LIST_NAMES) {
-+					ret = kdbus_list_write(conn, c, slice,
-+							       &p, o, write);
-+					if (ret < 0) {
-+						mutex_unlock(&c->lock);
-+						return ret;
-+					}
-+
-+					added = true;
-+				}
-+			}
-+		}
-+
-+		/* nothing added so far, just add the unique ID */
-+		if (!added && (flags & KDBUS_LIST_UNIQUE)) {
-+			ret = kdbus_list_write(conn, c, slice, &p, NULL, write);
-+			if (ret < 0)
-+				return ret;
-+		}
-+	}
-+
-+	*pos = p;
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_cmd_list() - handle KDBUS_CMD_LIST
-+ * @conn:		connection to operate on
-+ * @argp:		command payload
-+ *
-+ * Return: >=0 on success, negative error code on failure.
-+ */
-+int kdbus_cmd_list(struct kdbus_conn *conn, void __user *argp)
-+{
-+	struct kdbus_name_registry *reg = conn->ep->bus->name_registry;
-+	struct kdbus_pool_slice *slice = NULL;
-+	struct kdbus_cmd_list *cmd;
-+	size_t pos, size;
-+	int ret;
-+
-+	struct kdbus_arg argv[] = {
-+		{ .type = KDBUS_ITEM_NEGOTIATE },
-+	};
-+	struct kdbus_args args = {
-+		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
-+				 KDBUS_LIST_UNIQUE |
-+				 KDBUS_LIST_NAMES |
-+				 KDBUS_LIST_ACTIVATORS |
-+				 KDBUS_LIST_QUEUED,
-+		.argv = argv,
-+		.argc = ARRAY_SIZE(argv),
-+	};
-+
-+	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
-+
-+	/* lock order: domain -> bus -> ep -> names -> conn */
-+	down_read(&reg->rwlock);
-+	down_read(&conn->ep->bus->conn_rwlock);
-+	down_read(&conn->ep->policy_db.entries_rwlock);
-+
-+	/* size of records */
-+	size = 0;
-+	ret = kdbus_list_all(conn, cmd->flags, NULL, &size, false);
-+	if (ret < 0)
-+		goto exit_unlock;
-+
-+	if (size == 0) {
-+		kdbus_pool_publish_empty(conn->pool, &cmd->offset,
-+					 &cmd->list_size);
-+	} else {
-+		slice = kdbus_pool_slice_alloc(conn->pool, size, false);
-+		if (IS_ERR(slice)) {
-+			ret = PTR_ERR(slice);
-+			slice = NULL;
-+			goto exit_unlock;
-+		}
-+
-+		/* copy the records */
-+		pos = 0;
-+		ret = kdbus_list_all(conn, cmd->flags, slice, &pos, true);
-+		if (ret < 0)
-+			goto exit_unlock;
-+
-+		WARN_ON(pos != size);
-+		kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->list_size);
-+	}
-+
-+	if (kdbus_member_set_user(&cmd->offset, argp, typeof(*cmd), offset) ||
-+	    kdbus_member_set_user(&cmd->list_size, argp,
-+				  typeof(*cmd), list_size))
-+		ret = -EFAULT;
-+
-+exit_unlock:
-+	up_read(&conn->ep->policy_db.entries_rwlock);
-+	up_read(&conn->ep->bus->conn_rwlock);
-+	up_read(&reg->rwlock);
-+	kdbus_pool_slice_release(slice);
-+	return kdbus_args_clear(&args, ret);
-+}
-diff --git a/ipc/kdbus/names.h b/ipc/kdbus/names.h
-new file mode 100644
-index 0000000..edac59d
---- /dev/null
-+++ b/ipc/kdbus/names.h
-@@ -0,0 +1,105 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_NAMES_H
-+#define __KDBUS_NAMES_H
-+
-+#include <linux/hashtable.h>
-+#include <linux/rwsem.h>
-+
-+struct kdbus_name_entry;
-+struct kdbus_name_owner;
-+struct kdbus_name_registry;
-+
-+/**
-+ * struct kdbus_name_registry - names registered for a bus
-+ * @entries_hash:	Map of entries
-+ * @lock:		Registry data lock
-+ * @name_seq_last:	Last used sequence number to assign to a name entry
-+ */
-+struct kdbus_name_registry {
-+	DECLARE_HASHTABLE(entries_hash, 8);
-+	struct rw_semaphore rwlock;
-+	u64 name_seq_last;
-+};
-+
-+/**
-+ * struct kdbus_name_entry - well-know name entry
-+ * @name_id:		sequence number of name entry to be able to uniquely
-+ *			identify a name over its registration lifetime
-+ * @activator:		activator of this name, or NULL
-+ * @queue:		list of queued owners
-+ * @hentry:		entry in registry map
-+ * @name:		well-known name
-+ */
-+struct kdbus_name_entry {
-+	u64 name_id;
-+	struct kdbus_name_owner *activator;
-+	struct list_head queue;
-+	struct hlist_node hentry;
-+	char name[];
-+};
-+
-+/**
-+ * struct kdbus_name_owner - owner of a well-known name
-+ * @flags:		KDBUS_NAME_* flags of this owner
-+ * @conn:		connection owning the name
-+ * @name:		name that is owned
-+ * @conn_entry:		link into @conn
-+ * @name_entry:		link into @name
-+ */
-+struct kdbus_name_owner {
-+	u64 flags;
-+	struct kdbus_conn *conn;
-+	struct kdbus_name_entry *name;
-+	struct list_head conn_entry;
-+	struct list_head name_entry;
-+};
-+
-+bool kdbus_name_is_valid(const char *p, bool allow_wildcard);
-+
-+struct kdbus_name_registry *kdbus_name_registry_new(void);
-+void kdbus_name_registry_free(struct kdbus_name_registry *reg);
-+
-+struct kdbus_name_entry *
-+kdbus_name_lookup_unlocked(struct kdbus_name_registry *reg, const char *name);
-+
-+int kdbus_name_acquire(struct kdbus_name_registry *reg,
-+		       struct kdbus_conn *conn, const char *name,
-+		       u64 flags, u64 *return_flags);
-+void kdbus_name_release_all(struct kdbus_name_registry *reg,
-+			    struct kdbus_conn *conn);
-+
-+int kdbus_cmd_name_acquire(struct kdbus_conn *conn, void __user *argp);
-+int kdbus_cmd_name_release(struct kdbus_conn *conn, void __user *argp);
-+int kdbus_cmd_list(struct kdbus_conn *conn, void __user *argp);
-+
-+/**
-+ * kdbus_name_get_owner() - get current owner of a name
-+ * @name:	name to get current owner of
-+ *
-+ * This returns a pointer to the current owner of a name (or its activator if
-+ * there is no owner). The caller must make sure @name is valid and does not
-+ * vanish.
-+ *
-+ * Return: Pointer to current owner or NULL if there is none.
-+ */
-+static inline struct kdbus_name_owner *
-+kdbus_name_get_owner(struct kdbus_name_entry *name)
-+{
-+	return list_first_entry_or_null(&name->queue, struct kdbus_name_owner,
-+					name_entry) ? : name->activator;
-+}
-+
-+#endif
-diff --git a/ipc/kdbus/node.c b/ipc/kdbus/node.c
-new file mode 100644
-index 0000000..89f58bc
---- /dev/null
-+++ b/ipc/kdbus/node.c
-@@ -0,0 +1,897 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/atomic.h>
-+#include <linux/fs.h>
-+#include <linux/idr.h>
-+#include <linux/kdev_t.h>
-+#include <linux/rbtree.h>
-+#include <linux/rwsem.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+#include <linux/wait.h>
-+
-+#include "bus.h"
-+#include "domain.h"
-+#include "endpoint.h"
-+#include "fs.h"
-+#include "handle.h"
-+#include "node.h"
-+#include "util.h"
-+
-+/**
-+ * DOC: kdbus nodes
-+ *
-+ * Nodes unify lifetime management across exposed kdbus objects and provide a
-+ * hierarchy. Each kdbus object, that might be exposed to user-space, has a
-+ * kdbus_node object embedded and is linked into the hierarchy. Each node can
-+ * have any number (0-n) of child nodes linked. Each child retains a reference
-+ * to its parent node. For root-nodes, the parent is NULL.
-+ *
-+ * Each node object goes through a bunch of states during it's lifetime:
-+ *     * NEW
-+ *       * LINKED    (can be skipped by NEW->FREED transition)
-+ *         * ACTIVE  (can be skipped by LINKED->INACTIVE transition)
-+ *       * INACTIVE
-+ *       * DRAINED
-+ *     * FREED
-+ *
-+ * Each node is allocated by the caller and initialized via kdbus_node_init().
-+ * This never fails and sets the object into state NEW. From now on, ref-counts
-+ * on the node manage its lifetime. During init, the ref-count is set to 1. Once
-+ * it drops to 0, the node goes to state FREED and the node->free_cb() callback
-+ * is called to deallocate any memory.
-+ *
-+ * After initializing a node, you usually link it into the hierarchy. You need
-+ * to provide a parent node and a name. The node will be linked as child to the
-+ * parent and a globally unique ID is assigned to the child. The name of the
-+ * child must be unique for all children of this parent. Otherwise, linking the
-+ * child will fail with -EEXIST.
-+ * Note that the child is not marked active, yet. Admittedly, it prevents any
-+ * other node from being linked with the same name (thus, it reserves that
-+ * name), but any child-lookup (via name or unique ID) will never return this
-+ * child unless it has been marked active.
-+ *
-+ * Once successfully linked, you can use kdbus_node_activate() to activate a
-+ * child. This will mark the child active. This state can be skipped by directly
-+ * deactivating the child via kdbus_node_deactivate() (see below).
-+ * By activating a child, you enable any lookups on this child to succeed from
-+ * now on. Furthermore, any code that got its hands on a reference to the node,
-+ * can from now on "acquire" the node.
-+ *
-+ *     Active References (or: 'acquiring' and 'releasing' a node)
-+ *     Additionally to normal object references, nodes support something we call
-+ *     "active references". An active reference can be acquired via
-+ *     kdbus_node_acquire() and released via kdbus_node_release(). A caller
-+ *     _must_ own a normal object reference whenever calling those functions.
-+ *     Unlike object references, acquiring an active reference can fail (by
-+ *     returning 'false' from kdbus_node_acquire()). An active reference can
-+ *     only be acquired if the node is marked active. If it is not marked
-+ *     active, yet, or if it was already deactivated, no more active references
-+ *     can be acquired, ever!
-+ *     Active references are used to track tasks working on a node. Whenever a
-+ *     task enters kernel-space to perform an action on a node, it acquires an
-+ *     active reference, performs the action and releases the reference again.
-+ *     While holding an active reference, the node is guaranteed to stay active.
-+ *     If the node is deactivated in parallel, the node is marked as
-+ *     deactivated, then we wait for all active references to be dropped, before
-+ *     we finally proceed with any cleanups. That is, if you hold an active
-+ *     reference to a node, any resources that are bound to the "active" state
-+ *     are guaranteed to stay accessible until you release your reference.
-+ *
-+ *     Active-references are very similar to rw-locks, where acquiring a node is
-+ *     equal to try-read-lock and releasing to read-unlock. Deactivating a node
-+ *     means write-lock and never releasing it again.
-+ *     Unlike rw-locks, the 'active reference' concept is more versatile and
-+ *     avoids unusual rw-lock usage (never releasing a write-lock..).
-+ *
-+ *     It is safe to acquire multiple active-references recursively. But you
-+ *     need to check the return value of kdbus_node_acquire() on _each_ call. It
-+ *     may stop granting references at _any_ time.
-+ *
-+ *     You're free to perform any operations you want while holding an active
-+ *     reference, except sleeping for an indefinite period. Sleeping for a fixed
-+ *     amount of time is fine, but you usually should not wait on wait-queues
-+ *     without a timeout.
-+ *     For example, if you wait for I/O to happen, you should gather all data
-+ *     and schedule the I/O operation, then release your active reference and
-+ *     wait for it to complete. Then try to acquire a new reference. If it
-+ *     fails, perform any cleanup (the node is now dead). Otherwise, you can
-+ *     finish your operation.
-+ *
-+ * All nodes can be deactivated via kdbus_node_deactivate() at any time. You can
-+ * call this multiple times, even in parallel or on nodes that were never
-+ * linked, and it will just work. The only restriction is, you must not hold an
-+ * active reference when calling kdbus_node_deactivate().
-+ * By deactivating a node, it is immediately marked inactive. Then, we wait for
-+ * all active references to be released (called 'draining' the node). This
-+ * shouldn't take very long as we don't perform long-lasting operations while
-+ * holding an active reference. Note that once the node is marked inactive, no
-+ * new active references can be acquired.
-+ * Once all active references are dropped, the node is considered 'drained'. Now
-+ * kdbus_node_deactivate() is called on each child of the node before we
-+ * continue deactivating our node. That is, once all children are entirely
-+ * deactivated, we call ->release_cb() of our node. ->release_cb() can release
-+ * any resources on that node which are bound to the "active" state of a node.
-+ * When done, we unlink the node from its parent rb-tree, mark it as
-+ * 'released' and return.
-+ * If kdbus_node_deactivate() is called multiple times (even in parallel), all
-+ * but one caller will just wait until the node is fully deactivated. That is,
-+ * one random caller of kdbus_node_deactivate() is selected to call
-+ * ->release_cb() and cleanup the node. Only once all this is done, all other
-+ * callers will return from kdbus_node_deactivate(). That is, it doesn't matter
-+ * whether you're the selected caller or not, it will only return after
-+ * everything is fully done.
-+ *
-+ * When a node is activated, we acquire a normal object reference to the node.
-+ * This reference is dropped after deactivation is fully done (and only iff the
-+ * node really was activated). This allows callers to link+activate a child node
-+ * and then drop all refs. The node will be deactivated together with the
-+ * parent, and then be freed when this reference is dropped.
-+ *
-+ * Currently, nodes provide a bunch of resources that external code can use
-+ * directly. This includes:
-+ *
-+ *     * node->waitq: Each node has its own wait-queue that is used to manage
-+ *                    the 'active' state. When a node is deactivated, we wait on
-+ *                    this queue until all active refs are dropped. Analogously,
-+ *                    when you release an active reference on a deactivated
-+ *                    node, and the active ref-count drops to 0, we wake up a
-+ *                    single thread on this queue. Furthermore, once the
-+ *                    ->release_cb() callback finished, we wake up all waiters.
-+ *                    The node-owner is free to re-use this wait-queue for other
-+ *                    purposes. As node-management uses this queue only during
-+ *                    deactivation, it is usually totally fine to re-use the
-+ *                    queue for other, preferably low-overhead, use-cases.
-+ *
-+ *     * node->type: This field defines the type of the owner of this node. It
-+ *                   must be set during node initialization and must remain
-+ *                   constant. The node management never looks at this value,
-+ *                   but external users might use to gain access to the owner
-+ *                   object of a node.
-+ *                   It is totally up to the owner of the node to define what
-+ *                   their type means. Usually it means you can access the
-+ *                   parent structure via container_of(), as long as you hold an
-+ *                   active reference to the node.
-+ *
-+ *     * node->free_cb:    callback after all references are dropped
-+ *       node->release_cb: callback during node deactivation
-+ *                         These fields must be set by the node owner during
-+ *                         node initialization. They must remain constant. If
-+ *                         NULL, they're skipped.
-+ *
-+ *     * node->mode: filesystem access modes
-+ *       node->uid:  filesystem owner uid
-+ *       node->gid:  filesystem owner gid
-+ *                   These fields must be set by the node owner during node
-+ *                   initialization. They must remain constant and may be
-+ *                   accessed by other callers to properly initialize
-+ *                   filesystem nodes.
-+ *
-+ *     * node->id: This is an unsigned 32bit integer allocated by an IDA. It is
-+ *                 always kept as small as possible during allocation and is
-+ *                 globally unique across all nodes allocated by this module. 0
-+ *                 is reserved as "not assigned" and is the default.
-+ *                 The ID is assigned during kdbus_node_link() and is kept until
-+ *                 the object is freed. Thus, the ID surpasses the active
-+ *                 lifetime of a node. As long as you hold an object reference
-+ *                 to a node (and the node was linked once), the ID is valid and
-+ *                 unique.
-+ *
-+ *     * node->name: name of this node
-+ *       node->hash: 31bit hash-value of @name (range [2..INT_MAX-1])
-+ *                   These values follow the same lifetime rules as node->id.
-+ *                   They're initialized when the node is linked and then remain
-+ *                   constant until the last object reference is dropped.
-+ *                   Unlike the id, the name is only unique across all siblings
-+ *                   and only until the node is deactivated. Currently, the name
-+ *                   is even unique if linked but not activated, yet. This might
-+ *                   change in the future, though. Code should not rely on this.
-+ *
-+ *     * node->lock:     lock to protect node->children, node->rb, node->parent
-+ *     * node->parent: Reference to parent node. This is set during LINK time
-+ *                     and is dropped during destruction. You must not access
-+ *                     it unless you hold an active reference to the node or if
-+ *                     you know the node is dead.
-+ *     * node->children: rb-tree of all linked children of this node. You must
-+ *                       not access this directly, but use one of the iterator
-+ *                       or lookup helpers.
-+ */
-+
-+/*
-+ * Bias values track states of "active references". They're all negative. If a
-+ * node is active, its active-ref-counter is >=0 and tracks all active
-+ * references. Once a node is deactivaed, we subtract NODE_BIAS. This means, the
-+ * counter is now negative but still counts the active references. Once it drops
-+ * to exactly NODE_BIAS, we know all active references were dropped. Exactly one
-+ * thread will change it to NODE_RELEASE now, perform cleanup and then put it
-+ * into NODE_DRAINED. Once drained, all other threads that tried deactivating
-+ * the node will now be woken up (thus, they wait until the node is fully done).
-+ * The initial state during node-setup is NODE_NEW. If a node is directly
-+ * deactivated without having ever been active, it is put into
-+ * NODE_RELEASE_DIRECT instead of NODE_BIAS. This tracks this one-bit state
-+ * across node-deactivation. The task putting it into NODE_RELEASE now knows
-+ * whether the node was active before or not.
-+ *
-+ * Some archs implement atomic_sub(v) with atomic_add(-v), so reserve INT_MIN
-+ * to avoid overflows if multiplied by -1.
-+ */
-+#define KDBUS_NODE_BIAS			(INT_MIN + 5)
-+#define KDBUS_NODE_RELEASE_DIRECT	(KDBUS_NODE_BIAS - 1)
-+#define KDBUS_NODE_RELEASE		(KDBUS_NODE_BIAS - 2)
-+#define KDBUS_NODE_DRAINED		(KDBUS_NODE_BIAS - 3)
-+#define KDBUS_NODE_NEW			(KDBUS_NODE_BIAS - 4)
-+
-+/* global unique ID mapping for kdbus nodes */
-+DEFINE_IDA(kdbus_node_ida);
-+
-+/**
-+ * kdbus_node_name_hash() - hash a name
-+ * @name:	The string to hash
-+ *
-+ * This computes the hash of @name. It is guaranteed to be in the range
-+ * [2..INT_MAX-1]. The values 1, 2 and INT_MAX are unused as they are reserved
-+ * for the filesystem code.
-+ *
-+ * Return: hash value of the passed string
-+ */
-+static unsigned int kdbus_node_name_hash(const char *name)
-+{
-+	unsigned int hash;
-+
-+	/* reserve hash numbers 0, 1 and >=INT_MAX for magic directories */
-+	hash = kdbus_strhash(name) & INT_MAX;
-+	if (hash < 2)
-+		hash += 2;
-+	if (hash >= INT_MAX)
-+		hash = INT_MAX - 1;
-+
-+	return hash;
-+}
-+
-+/**
-+ * kdbus_node_name_compare() - compare a name with a node's name
-+ * @hash:	hash of the string to compare the node with
-+ * @name:	name to compare the node with
-+ * @node:	node to compare the name with
-+ *
-+ * Return: 0 if @name and @hash exactly match the information in @node, or
-+ * an integer less than or greater than zero if @name is found, respectively,
-+ * to be less than or be greater than the string stored in @node.
-+ */
-+static int kdbus_node_name_compare(unsigned int hash, const char *name,
-+				   const struct kdbus_node *node)
-+{
-+	if (hash != node->hash)
-+		return hash - node->hash;
-+
-+	return strcmp(name, node->name);
-+}
-+
-+/**
-+ * kdbus_node_init() - initialize a kdbus_node
-+ * @node:	Pointer to the node to initialize
-+ * @type:	The type the node will have (KDBUS_NODE_*)
-+ *
-+ * The caller is responsible of allocating @node and initializating it to zero.
-+ * Once this call returns, you must use the node_ref() and node_unref()
-+ * functions to manage this node.
-+ */
-+void kdbus_node_init(struct kdbus_node *node, unsigned int type)
-+{
-+	atomic_set(&node->refcnt, 1);
-+	mutex_init(&node->lock);
-+	node->id = 0;
-+	node->type = type;
-+	RB_CLEAR_NODE(&node->rb);
-+	node->children = RB_ROOT;
-+	init_waitqueue_head(&node->waitq);
-+	atomic_set(&node->active, KDBUS_NODE_NEW);
-+}
-+
-+/**
-+ * kdbus_node_link() - link a node into the nodes system
-+ * @node:	Pointer to the node to initialize
-+ * @parent:	Pointer to a parent node, may be %NULL
-+ * @name:	The name of the node (or NULL if root node)
-+ *
-+ * This links a node into the hierarchy. This must not be called multiple times.
-+ * If @parent is NULL, the node becomes a new root node.
-+ *
-+ * This call will fail if @name is not unique across all its siblings or if no
-+ * ID could be allocated. You must not activate a node if linking failed! It is
-+ * safe to deactivate it, though.
-+ *
-+ * Once you linked a node, you must call kdbus_node_deactivate() before you drop
-+ * the last reference (even if you never activate the node).
-+ *
-+ * Return: 0 on success. negative error otherwise.
-+ */
-+int kdbus_node_link(struct kdbus_node *node, struct kdbus_node *parent,
-+		    const char *name)
-+{
-+	int ret;
-+
-+	if (WARN_ON(node->type != KDBUS_NODE_DOMAIN && !parent))
-+		return -EINVAL;
-+
-+	if (WARN_ON(parent && !name))
-+		return -EINVAL;
-+
-+	if (name) {
-+		node->name = kstrdup(name, GFP_KERNEL);
-+		if (!node->name)
-+			return -ENOMEM;
-+
-+		node->hash = kdbus_node_name_hash(name);
-+	}
-+
-+	ret = ida_simple_get(&kdbus_node_ida, 1, 0, GFP_KERNEL);
-+	if (ret < 0)
-+		return ret;
-+
-+	node->id = ret;
-+	ret = 0;
-+
-+	if (parent) {
-+		struct rb_node **n, *prev;
-+
-+		if (!kdbus_node_acquire(parent))
-+			return -ESHUTDOWN;
-+
-+		mutex_lock(&parent->lock);
-+
-+		n = &parent->children.rb_node;
-+		prev = NULL;
-+
-+		while (*n) {
-+			struct kdbus_node *pos;
-+			int result;
-+
-+			pos = kdbus_node_from_rb(*n);
-+			prev = *n;
-+			result = kdbus_node_name_compare(node->hash,
-+							 node->name,
-+							 pos);
-+			if (result == 0) {
-+				ret = -EEXIST;
-+				goto exit_unlock;
-+			}
-+
-+			if (result < 0)
-+				n = &pos->rb.rb_left;
-+			else
-+				n = &pos->rb.rb_right;
-+		}
-+
-+		/* add new node and rebalance the tree */
-+		rb_link_node(&node->rb, prev, n);
-+		rb_insert_color(&node->rb, &parent->children);
-+		node->parent = kdbus_node_ref(parent);
-+
-+exit_unlock:
-+		mutex_unlock(&parent->lock);
-+		kdbus_node_release(parent);
-+	}
-+
-+	return ret;
-+}
-+
-+/**
-+ * kdbus_node_ref() - Acquire object reference
-+ * @node:	node to acquire reference to (or NULL)
-+ *
-+ * This acquires a new reference to @node. You must already own a reference when
-+ * calling this!
-+ * If @node is NULL, this is a no-op.
-+ *
-+ * Return: @node is returned
-+ */
-+struct kdbus_node *kdbus_node_ref(struct kdbus_node *node)
-+{
-+	if (node)
-+		atomic_inc(&node->refcnt);
-+	return node;
-+}
-+
-+/**
-+ * kdbus_node_unref() - Drop object reference
-+ * @node:	node to drop reference to (or NULL)
-+ *
-+ * This drops an object reference to @node. You must not access the node if you
-+ * no longer own a reference.
-+ * If the ref-count drops to 0, the object will be destroyed (->free_cb will be
-+ * called).
-+ *
-+ * If you linked or activated the node, you must deactivate the node before you
-+ * drop your last reference! If you didn't link or activate the node, you can
-+ * drop any reference you want.
-+ *
-+ * Note that this calls into ->free_cb() and thus _might_ sleep. The ->free_cb()
-+ * callbacks must not acquire any outer locks, though. So you can safely drop
-+ * references while holding locks.
-+ *
-+ * If @node is NULL, this is a no-op.
-+ *
-+ * Return: This always returns NULL
-+ */
-+struct kdbus_node *kdbus_node_unref(struct kdbus_node *node)
-+{
-+	if (node && atomic_dec_and_test(&node->refcnt)) {
-+		struct kdbus_node safe = *node;
-+
-+		WARN_ON(atomic_read(&node->active) != KDBUS_NODE_DRAINED);
-+		WARN_ON(!RB_EMPTY_NODE(&node->rb));
-+
-+		if (node->free_cb)
-+			node->free_cb(node);
-+		if (safe.id > 0)
-+			ida_simple_remove(&kdbus_node_ida, safe.id);
-+
-+		kfree(safe.name);
-+
-+		/*
-+		 * kdbusfs relies on the parent to be available even after the
-+		 * node was deactivated and unlinked. Therefore, we pin it
-+		 * until a node is destroyed.
-+		 */
-+		kdbus_node_unref(safe.parent);
-+	}
-+
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_node_is_active() - test whether a node is active
-+ * @node:	node to test
-+ *
-+ * This checks whether @node is active. That means, @node was linked and
-+ * activated by the node owner and hasn't been deactivated, yet. If, and only
-+ * if, a node is active, kdbus_node_acquire() will be able to acquire active
-+ * references.
-+ *
-+ * Note that this function does not give any lifetime guarantees. After this
-+ * call returns, the node might be deactivated immediately. Normally, what you
-+ * want is to acquire a real active reference via kdbus_node_acquire().
-+ *
-+ * Return: true if @node is active, false otherwise
-+ */
-+bool kdbus_node_is_active(struct kdbus_node *node)
-+{
-+	return atomic_read(&node->active) >= 0;
-+}
-+
-+/**
-+ * kdbus_node_is_deactivated() - test whether a node was already deactivated
-+ * @node:	node to test
-+ *
-+ * This checks whether kdbus_node_deactivate() was called on @node. Note that
-+ * this might be true even if you never deactivated the node directly, but only
-+ * one of its ancestors.
-+ *
-+ * Note that even if this returns 'false', the node might get deactivated
-+ * immediately after the call returns.
-+ *
-+ * Return: true if @node was already deactivated, false if not
-+ */
-+bool kdbus_node_is_deactivated(struct kdbus_node *node)
-+{
-+	int v;
-+
-+	v = atomic_read(&node->active);
-+	return v != KDBUS_NODE_NEW && v < 0;
-+}
-+
-+/**
-+ * kdbus_node_activate() - activate a node
-+ * @node:	node to activate
-+ *
-+ * This marks @node as active if, and only if, the node wasn't activated nor
-+ * deactivated, yet, and the parent is still active. Any but the first call to
-+ * kdbus_node_activate() is a no-op.
-+ * If you called kdbus_node_deactivate() before, then even the first call to
-+ * kdbus_node_activate() will be a no-op.
-+ *
-+ * This call doesn't give any lifetime guarantees. The node might get
-+ * deactivated immediately after this call returns. Or the parent might already
-+ * be deactivated, which will make this call a no-op.
-+ *
-+ * If this call successfully activated a node, it will take an object reference
-+ * to it. This reference is dropped after the node is deactivated. Therefore,
-+ * the object owner can safely drop their reference to @node iff they know that
-+ * its parent node will get deactivated at some point. Once the parent node is
-+ * deactivated, it will deactivate all its child and thus drop this reference
-+ * again.
-+ *
-+ * Return: True if this call successfully activated the node, otherwise false.
-+ *         Note that this might return false, even if the node is still active
-+ *         (eg., if you called this a second time).
-+ */
-+bool kdbus_node_activate(struct kdbus_node *node)
-+{
-+	bool res = false;
-+
-+	mutex_lock(&node->lock);
-+	if (atomic_read(&node->active) == KDBUS_NODE_NEW) {
-+		atomic_sub(KDBUS_NODE_NEW, &node->active);
-+		/* activated nodes have ref +1 */
-+		kdbus_node_ref(node);
-+		res = true;
-+	}
-+	mutex_unlock(&node->lock);
-+
-+	return res;
-+}
-+
-+/**
-+ * kdbus_node_deactivate() - deactivate a node
-+ * @node:	The node to deactivate.
-+ *
-+ * This function recursively deactivates this node and all its children. It
-+ * returns only once all children and the node itself were recursively disabled
-+ * (even if you call this function multiple times in parallel).
-+ *
-+ * It is safe to call this function on _any_ node that was initialized _any_
-+ * number of times.
-+ *
-+ * This call may sleep, as it waits for all active references to be dropped.
-+ */
-+void kdbus_node_deactivate(struct kdbus_node *node)
-+{
-+	struct kdbus_node *pos, *child;
-+	struct rb_node *rb;
-+	int v_pre, v_post;
-+
-+	pos = node;
-+
-+	/*
-+	 * To avoid recursion, we perform back-tracking while deactivating
-+	 * nodes. For each node we enter, we first mark the active-counter as
-+	 * deactivated by adding BIAS. If the node as children, we set the first
-+	 * child as current position and start over. If the node has no
-+	 * children, we drain the node by waiting for all active refs to be
-+	 * dropped and then releasing the node.
-+	 *
-+	 * After the node is released, we set its parent as current position
-+	 * and start over. If the current position was the initial node, we're
-+	 * done.
-+	 *
-+	 * Note that this function can be called in parallel by multiple
-+	 * callers. We make sure that each node is only released once, and any
-+	 * racing caller will wait until the other thread fully released that
-+	 * node.
-+	 */
-+
-+	for (;;) {
-+		/*
-+		 * Add BIAS to node->active to mark it as inactive. If it was
-+		 * never active before, immediately mark it as RELEASE_INACTIVE
-+		 * so we remember this state.
-+		 * We cannot remember v_pre as we might iterate into the
-+		 * children, overwriting v_pre, before we can release our node.
-+		 */
-+		mutex_lock(&pos->lock);
-+		v_pre = atomic_read(&pos->active);
-+		if (v_pre >= 0)
-+			atomic_add_return(KDBUS_NODE_BIAS, &pos->active);
-+		else if (v_pre == KDBUS_NODE_NEW)
-+			atomic_set(&pos->active, KDBUS_NODE_RELEASE_DIRECT);
-+		mutex_unlock(&pos->lock);
-+
-+		/* wait until all active references were dropped */
-+		wait_event(pos->waitq,
-+			   atomic_read(&pos->active) <= KDBUS_NODE_BIAS);
-+
-+		mutex_lock(&pos->lock);
-+		/* recurse into first child if any */
-+		rb = rb_first(&pos->children);
-+		if (rb) {
-+			child = kdbus_node_ref(kdbus_node_from_rb(rb));
-+			mutex_unlock(&pos->lock);
-+			pos = child;
-+			continue;
-+		}
-+
-+		/* mark object as RELEASE */
-+		v_post = atomic_read(&pos->active);
-+		if (v_post == KDBUS_NODE_BIAS ||
-+		    v_post == KDBUS_NODE_RELEASE_DIRECT)
-+			atomic_set(&pos->active, KDBUS_NODE_RELEASE);
-+		mutex_unlock(&pos->lock);
-+
-+		/*
-+		 * If this is the thread that marked the object as RELEASE, we
-+		 * perform the actual release. Otherwise, we wait until the
-+		 * release is done and the node is marked as DRAINED.
-+		 */
-+		if (v_post == KDBUS_NODE_BIAS ||
-+		    v_post == KDBUS_NODE_RELEASE_DIRECT) {
-+			if (pos->release_cb)
-+				pos->release_cb(pos, v_post == KDBUS_NODE_BIAS);
-+
-+			if (pos->parent) {
-+				mutex_lock(&pos->parent->lock);
-+				if (!RB_EMPTY_NODE(&pos->rb)) {
-+					rb_erase(&pos->rb,
-+						 &pos->parent->children);
-+					RB_CLEAR_NODE(&pos->rb);
-+				}
-+				mutex_unlock(&pos->parent->lock);
-+			}
-+
-+			/* mark as DRAINED */
-+			atomic_set(&pos->active, KDBUS_NODE_DRAINED);
-+			wake_up_all(&pos->waitq);
-+
-+			/* drop VFS cache */
-+			kdbus_fs_flush(pos);
-+
-+			/*
-+			 * If the node was activated and someone subtracted BIAS
-+			 * from it to deactivate it, we, and only us, are
-+			 * responsible to release the extra ref-count that was
-+			 * taken once in kdbus_node_activate().
-+			 * If the node was never activated, no-one ever
-+			 * subtracted BIAS, but instead skipped that state and
-+			 * immediately went to NODE_RELEASE_DIRECT. In that case
-+			 * we must not drop the reference.
-+			 */
-+			if (v_post == KDBUS_NODE_BIAS)
-+				kdbus_node_unref(pos);
-+		} else {
-+			/* wait until object is DRAINED */
-+			wait_event(pos->waitq,
-+			    atomic_read(&pos->active) == KDBUS_NODE_DRAINED);
-+		}
-+
-+		/*
-+		 * We're done with the current node. Continue on its parent
-+		 * again, which will try deactivating its next child, or itself
-+		 * if no child is left.
-+		 * If we've reached our initial node again, we are done and
-+		 * can safely return.
-+		 */
-+		if (pos == node)
-+			break;
-+
-+		child = pos;
-+		pos = pos->parent;
-+		kdbus_node_unref(child);
-+	}
-+}
-+
-+/**
-+ * kdbus_node_acquire() - Acquire an active ref on a node
-+ * @node:	The node
-+ *
-+ * This acquires an active-reference to @node. This will only succeed if the
-+ * node is active. You must release this active reference via
-+ * kdbus_node_release() again.
-+ *
-+ * See the introduction to "active references" for more details.
-+ *
-+ * Return: %true if @node was non-NULL and active
-+ */
-+bool kdbus_node_acquire(struct kdbus_node *node)
-+{
-+	return node && atomic_inc_unless_negative(&node->active);
-+}
-+
-+/**
-+ * kdbus_node_release() - Release an active ref on a node
-+ * @node:	The node
-+ *
-+ * This releases an active reference that was previously acquired via
-+ * kdbus_node_acquire(). See kdbus_node_acquire() for details.
-+ */
-+void kdbus_node_release(struct kdbus_node *node)
-+{
-+	if (node && atomic_dec_return(&node->active) == KDBUS_NODE_BIAS)
-+		wake_up(&node->waitq);
-+}
-+
-+/**
-+ * kdbus_node_find_child() - Find child by name
-+ * @node:	parent node to search through
-+ * @name:	name of child node
-+ *
-+ * This searches through all children of @node for a child-node with name @name.
-+ * If not found, or if the child is deactivated, NULL is returned. Otherwise,
-+ * the child is acquired and a new reference is returned.
-+ *
-+ * If you're done with the child, you need to release it and drop your
-+ * reference.
-+ *
-+ * This function does not acquire the parent node. However, if the parent was
-+ * already deactivated, then kdbus_node_deactivate() will, at some point, also
-+ * deactivate the child. Therefore, we can rely on the explicit ordering during
-+ * deactivation.
-+ *
-+ * Return: Reference to acquired child node, or NULL if not found / not active.
-+ */
-+struct kdbus_node *kdbus_node_find_child(struct kdbus_node *node,
-+					 const char *name)
-+{
-+	struct kdbus_node *child;
-+	struct rb_node *rb;
-+	unsigned int hash;
-+	int ret;
-+
-+	hash = kdbus_node_name_hash(name);
-+
-+	mutex_lock(&node->lock);
-+	rb = node->children.rb_node;
-+	while (rb) {
-+		child = kdbus_node_from_rb(rb);
-+		ret = kdbus_node_name_compare(hash, name, child);
-+		if (ret < 0)
-+			rb = rb->rb_left;
-+		else if (ret > 0)
-+			rb = rb->rb_right;
-+		else
-+			break;
-+	}
-+	if (rb && kdbus_node_acquire(child))
-+		kdbus_node_ref(child);
-+	else
-+		child = NULL;
-+	mutex_unlock(&node->lock);
-+
-+	return child;
-+}
-+
-+static struct kdbus_node *node_find_closest_unlocked(struct kdbus_node *node,
-+						     unsigned int hash,
-+						     const char *name)
-+{
-+	struct kdbus_node *n, *pos = NULL;
-+	struct rb_node *rb;
-+	int res;
-+
-+	/*
-+	 * Find the closest child with ``node->hash >= hash'', or, if @name is
-+	 * valid, ``node->name >= name'' (where '>=' is the lex. order).
-+	 */
-+
-+	rb = node->children.rb_node;
-+	while (rb) {
-+		n = kdbus_node_from_rb(rb);
-+
-+		if (name)
-+			res = kdbus_node_name_compare(hash, name, n);
-+		else
-+			res = hash - n->hash;
-+
-+		if (res <= 0) {
-+			rb = rb->rb_left;
-+			pos = n;
-+		} else { /* ``hash > n->hash'', ``name > n->name'' */
-+			rb = rb->rb_right;
-+		}
-+	}
-+
-+	return pos;
-+}
-+
-+/**
-+ * kdbus_node_find_closest() - Find closest child-match
-+ * @node:	parent node to search through
-+ * @hash:	hash value to find closest match for
-+ *
-+ * Find the closest child of @node with a hash greater than or equal to @hash.
-+ * The closest match is the left-most child of @node with this property. Which
-+ * means, it is the first child with that hash returned by
-+ * kdbus_node_next_child(), if you'd iterate the whole parent node.
-+ *
-+ * Return: Reference to acquired child, or NULL if none found.
-+ */
-+struct kdbus_node *kdbus_node_find_closest(struct kdbus_node *node,
-+					   unsigned int hash)
-+{
-+	struct kdbus_node *child;
-+	struct rb_node *rb;
-+
-+	mutex_lock(&node->lock);
-+
-+	child = node_find_closest_unlocked(node, hash, NULL);
-+	while (child && !kdbus_node_acquire(child)) {
-+		rb = rb_next(&child->rb);
-+		if (rb)
-+			child = kdbus_node_from_rb(rb);
-+		else
-+			child = NULL;
-+	}
-+	kdbus_node_ref(child);
-+
-+	mutex_unlock(&node->lock);
-+
-+	return child;
-+}
-+
-+/**
-+ * kdbus_node_next_child() - Acquire next child
-+ * @node:	parent node
-+ * @prev:	previous child-node position or NULL
-+ *
-+ * This function returns a reference to the next active child of @node, after
-+ * the passed position @prev. If @prev is NULL, a reference to the first active
-+ * child is returned. If no more active children are found, NULL is returned.
-+ *
-+ * This function acquires the next child it returns. If you're done with the
-+ * returned pointer, you need to release _and_ unref it.
-+ *
-+ * The passed in pointer @prev is not modified by this function, and it does
-+ * *not* have to be active. If @prev was acquired via different means, or if it
-+ * was unlinked from its parent before you pass it in, then this iterator will
-+ * still return the next active child (it will have to search through the
-+ * rb-tree based on the node-name, though).
-+ * However, @prev must not be linked to a different parent than @node!
-+ *
-+ * Return: Reference to next acquired child, or NULL if at the end.
-+ */
-+struct kdbus_node *kdbus_node_next_child(struct kdbus_node *node,
-+					 struct kdbus_node *prev)
-+{
-+	struct kdbus_node *pos = NULL;
-+	struct rb_node *rb;
-+
-+	mutex_lock(&node->lock);
-+
-+	if (!prev) {
-+		/*
-+		 * New iteration; find first node in rb-tree and try to acquire
-+		 * it. If we got it, directly return it as first element.
-+		 * Otherwise, the loop below will find the next active node.
-+		 */
-+		rb = rb_first(&node->children);
-+		if (!rb)
-+			goto exit;
-+		pos = kdbus_node_from_rb(rb);
-+		if (kdbus_node_acquire(pos))
-+			goto exit;
-+	} else if (RB_EMPTY_NODE(&prev->rb)) {
-+		/*
-+		 * The current iterator is no longer linked to the rb-tree. Use
-+		 * its hash value and name to find the next _higher_ node and
-+		 * acquire it. If we got it, return it as next element.
-+		 * Otherwise, the loop below will find the next active node.
-+		 */
-+		pos = node_find_closest_unlocked(node, prev->hash, prev->name);
-+		if (!pos)
-+			goto exit;
-+		if (kdbus_node_acquire(pos))
-+			goto exit;
-+	} else {
-+		/*
-+		 * The current iterator is still linked to the parent. Set it
-+		 * as current position and use the loop below to find the next
-+		 * active element.
-+		 */
-+		pos = prev;
-+	}
-+
-+	/* @pos was already returned or is inactive; find next active node */
-+	do {
-+		rb = rb_next(&pos->rb);
-+		if (rb)
-+			pos = kdbus_node_from_rb(rb);
-+		else
-+			pos = NULL;
-+	} while (pos && !kdbus_node_acquire(pos));
-+
-+exit:
-+	/* @pos is NULL or acquired. Take ref if non-NULL and return it */
-+	kdbus_node_ref(pos);
-+	mutex_unlock(&node->lock);
-+	return pos;
-+}
-diff --git a/ipc/kdbus/node.h b/ipc/kdbus/node.h
-new file mode 100644
-index 0000000..970e02b
---- /dev/null
-+++ b/ipc/kdbus/node.h
-@@ -0,0 +1,86 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_NODE_H
-+#define __KDBUS_NODE_H
-+
-+#include <linux/atomic.h>
-+#include <linux/kernel.h>
-+#include <linux/mutex.h>
-+#include <linux/wait.h>
-+
-+struct kdbus_node;
-+
-+enum kdbus_node_type {
-+	KDBUS_NODE_DOMAIN,
-+	KDBUS_NODE_CONTROL,
-+	KDBUS_NODE_BUS,
-+	KDBUS_NODE_ENDPOINT,
-+};
-+
-+typedef void (*kdbus_node_free_t) (struct kdbus_node *node);
-+typedef void (*kdbus_node_release_t) (struct kdbus_node *node, bool was_active);
-+
-+struct kdbus_node {
-+	atomic_t refcnt;
-+	atomic_t active;
-+	wait_queue_head_t waitq;
-+
-+	/* static members */
-+	unsigned int type;
-+	kdbus_node_free_t free_cb;
-+	kdbus_node_release_t release_cb;
-+	umode_t mode;
-+	kuid_t uid;
-+	kgid_t gid;
-+
-+	/* valid once linked */
-+	char *name;
-+	unsigned int hash;
-+	unsigned int id;
-+	struct kdbus_node *parent; /* may be NULL */
-+
-+	/* valid iff active */
-+	struct mutex lock;
-+	struct rb_node rb;
-+	struct rb_root children;
-+};
-+
-+#define kdbus_node_from_rb(_node) rb_entry((_node), struct kdbus_node, rb)
-+
-+extern struct ida kdbus_node_ida;
-+
-+void kdbus_node_init(struct kdbus_node *node, unsigned int type);
-+
-+int kdbus_node_link(struct kdbus_node *node, struct kdbus_node *parent,
-+		    const char *name);
-+
-+struct kdbus_node *kdbus_node_ref(struct kdbus_node *node);
-+struct kdbus_node *kdbus_node_unref(struct kdbus_node *node);
-+
-+bool kdbus_node_is_active(struct kdbus_node *node);
-+bool kdbus_node_is_deactivated(struct kdbus_node *node);
-+bool kdbus_node_activate(struct kdbus_node *node);
-+void kdbus_node_deactivate(struct kdbus_node *node);
-+
-+bool kdbus_node_acquire(struct kdbus_node *node);
-+void kdbus_node_release(struct kdbus_node *node);
-+
-+struct kdbus_node *kdbus_node_find_child(struct kdbus_node *node,
-+					 const char *name);
-+struct kdbus_node *kdbus_node_find_closest(struct kdbus_node *node,
-+					   unsigned int hash);
-+struct kdbus_node *kdbus_node_next_child(struct kdbus_node *node,
-+					 struct kdbus_node *prev);
-+
-+#endif
-diff --git a/ipc/kdbus/notify.c b/ipc/kdbus/notify.c
-new file mode 100644
-index 0000000..375758c
---- /dev/null
-+++ b/ipc/kdbus/notify.c
-@@ -0,0 +1,204 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/fs.h>
-+#include <linux/init.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <linux/spinlock.h>
-+#include <linux/sched.h>
-+#include <linux/slab.h>
-+
-+#include "bus.h"
-+#include "connection.h"
-+#include "domain.h"
-+#include "endpoint.h"
-+#include "item.h"
-+#include "message.h"
-+#include "notify.h"
-+
-+static inline void kdbus_notify_add_tail(struct kdbus_staging *staging,
-+					 struct kdbus_bus *bus)
-+{
-+	spin_lock(&bus->notify_lock);
-+	list_add_tail(&staging->notify_entry, &bus->notify_list);
-+	spin_unlock(&bus->notify_lock);
-+}
-+
-+static int kdbus_notify_reply(struct kdbus_bus *bus, u64 id,
-+			      u64 cookie, u64 msg_type)
-+{
-+	struct kdbus_staging *s;
-+
-+	s = kdbus_staging_new_kernel(bus, id, cookie, 0, msg_type);
-+	if (IS_ERR(s))
-+		return PTR_ERR(s);
-+
-+	kdbus_notify_add_tail(s, bus);
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_notify_reply_timeout() - queue a timeout reply
-+ * @bus:		Bus which queues the messages
-+ * @id:			The destination's connection ID
-+ * @cookie:		The cookie to set in the reply.
-+ *
-+ * Queues a message that has a KDBUS_ITEM_REPLY_TIMEOUT item attached.
-+ *
-+ * Return: 0 on success, negative errno on failure.
-+ */
-+int kdbus_notify_reply_timeout(struct kdbus_bus *bus, u64 id, u64 cookie)
-+{
-+	return kdbus_notify_reply(bus, id, cookie, KDBUS_ITEM_REPLY_TIMEOUT);
-+}
-+
-+/**
-+ * kdbus_notify_reply_dead() - queue a 'dead' reply
-+ * @bus:		Bus which queues the messages
-+ * @id:			The destination's connection ID
-+ * @cookie:		The cookie to set in the reply.
-+ *
-+ * Queues a message that has a KDBUS_ITEM_REPLY_DEAD item attached.
-+ *
-+ * Return: 0 on success, negative errno on failure.
-+ */
-+int kdbus_notify_reply_dead(struct kdbus_bus *bus, u64 id, u64 cookie)
-+{
-+	return kdbus_notify_reply(bus, id, cookie, KDBUS_ITEM_REPLY_DEAD);
-+}
-+
-+/**
-+ * kdbus_notify_name_change() - queue a notification about a name owner change
-+ * @bus:		Bus which queues the messages
-+ * @type:		The type if the notification; KDBUS_ITEM_NAME_ADD,
-+ *			KDBUS_ITEM_NAME_CHANGE or KDBUS_ITEM_NAME_REMOVE
-+ * @old_id:		The id of the connection that used to own the name
-+ * @new_id:		The id of the new owner connection
-+ * @old_flags:		The flags to pass in the KDBUS_ITEM flags field for
-+ *			the old owner
-+ * @new_flags:		The flags to pass in the KDBUS_ITEM flags field for
-+ *			the new owner
-+ * @name:		The name that was removed or assigned to a new owner
-+ *
-+ * Return: 0 on success, negative errno on failure.
-+ */
-+int kdbus_notify_name_change(struct kdbus_bus *bus, u64 type,
-+			     u64 old_id, u64 new_id,
-+			     u64 old_flags, u64 new_flags,
-+			     const char *name)
-+{
-+	size_t name_len, extra_size;
-+	struct kdbus_staging *s;
-+
-+	name_len = strlen(name) + 1;
-+	extra_size = sizeof(struct kdbus_notify_name_change) + name_len;
-+
-+	s = kdbus_staging_new_kernel(bus, KDBUS_DST_ID_BROADCAST, 0,
-+				     extra_size, type);
-+	if (IS_ERR(s))
-+		return PTR_ERR(s);
-+
-+	s->notify->name_change.old_id.id = old_id;
-+	s->notify->name_change.old_id.flags = old_flags;
-+	s->notify->name_change.new_id.id = new_id;
-+	s->notify->name_change.new_id.flags = new_flags;
-+	memcpy(s->notify->name_change.name, name, name_len);
-+
-+	kdbus_notify_add_tail(s, bus);
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_notify_id_change() - queue a notification about a unique ID change
-+ * @bus:		Bus which queues the messages
-+ * @type:		The type if the notification; KDBUS_ITEM_ID_ADD or
-+ *			KDBUS_ITEM_ID_REMOVE
-+ * @id:			The id of the connection that was added or removed
-+ * @flags:		The flags to pass in the KDBUS_ITEM flags field
-+ *
-+ * Return: 0 on success, negative errno on failure.
-+ */
-+int kdbus_notify_id_change(struct kdbus_bus *bus, u64 type, u64 id, u64 flags)
-+{
-+	struct kdbus_staging *s;
-+	size_t extra_size;
-+
-+	extra_size = sizeof(struct kdbus_notify_id_change);
-+	s = kdbus_staging_new_kernel(bus, KDBUS_DST_ID_BROADCAST, 0,
-+				     extra_size, type);
-+	if (IS_ERR(s))
-+		return PTR_ERR(s);
-+
-+	s->notify->id_change.id = id;
-+	s->notify->id_change.flags = flags;
-+
-+	kdbus_notify_add_tail(s, bus);
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_notify_flush() - send a list of collected messages
-+ * @bus:		Bus which queues the messages
-+ *
-+ * The list is empty after sending the messages.
-+ */
-+void kdbus_notify_flush(struct kdbus_bus *bus)
-+{
-+	LIST_HEAD(notify_list);
-+	struct kdbus_staging *s, *tmp;
-+
-+	mutex_lock(&bus->notify_flush_lock);
-+	down_read(&bus->name_registry->rwlock);
-+
-+	spin_lock(&bus->notify_lock);
-+	list_splice_init(&bus->notify_list, &notify_list);
-+	spin_unlock(&bus->notify_lock);
-+
-+	list_for_each_entry_safe(s, tmp, &notify_list, notify_entry) {
-+		if (s->msg->dst_id != KDBUS_DST_ID_BROADCAST) {
-+			struct kdbus_conn *conn;
-+
-+			conn = kdbus_bus_find_conn_by_id(bus, s->msg->dst_id);
-+			if (conn) {
-+				kdbus_bus_eavesdrop(bus, NULL, s);
-+				kdbus_conn_entry_insert(NULL, conn, s, NULL,
-+							NULL);
-+				kdbus_conn_unref(conn);
-+			}
-+		} else {
-+			kdbus_bus_broadcast(bus, NULL, s);
-+		}
-+
-+		list_del(&s->notify_entry);
-+		kdbus_staging_free(s);
-+	}
-+
-+	up_read(&bus->name_registry->rwlock);
-+	mutex_unlock(&bus->notify_flush_lock);
-+}
-+
-+/**
-+ * kdbus_notify_free() - free a list of collected messages
-+ * @bus:		Bus which queues the messages
-+ */
-+void kdbus_notify_free(struct kdbus_bus *bus)
-+{
-+	struct kdbus_staging *s, *tmp;
-+
-+	list_for_each_entry_safe(s, tmp, &bus->notify_list, notify_entry) {
-+		list_del(&s->notify_entry);
-+		kdbus_staging_free(s);
-+	}
-+}
-diff --git a/ipc/kdbus/notify.h b/ipc/kdbus/notify.h
-new file mode 100644
-index 0000000..03df464
---- /dev/null
-+++ b/ipc/kdbus/notify.h
-@@ -0,0 +1,30 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_NOTIFY_H
-+#define __KDBUS_NOTIFY_H
-+
-+struct kdbus_bus;
-+
-+int kdbus_notify_id_change(struct kdbus_bus *bus, u64 type, u64 id, u64 flags);
-+int kdbus_notify_reply_timeout(struct kdbus_bus *bus, u64 id, u64 cookie);
-+int kdbus_notify_reply_dead(struct kdbus_bus *bus, u64 id, u64 cookie);
-+int kdbus_notify_name_change(struct kdbus_bus *bus, u64 type,
-+			     u64 old_id, u64 new_id,
-+			     u64 old_flags, u64 new_flags,
-+			     const char *name);
-+void kdbus_notify_flush(struct kdbus_bus *bus);
-+void kdbus_notify_free(struct kdbus_bus *bus);
-+
-+#endif
-diff --git a/ipc/kdbus/policy.c b/ipc/kdbus/policy.c
-new file mode 100644
-index 0000000..f2618e15
---- /dev/null
-+++ b/ipc/kdbus/policy.c
-@@ -0,0 +1,489 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/dcache.h>
-+#include <linux/fs.h>
-+#include <linux/init.h>
-+#include <linux/mutex.h>
-+#include <linux/sched.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+
-+#include "bus.h"
-+#include "connection.h"
-+#include "domain.h"
-+#include "item.h"
-+#include "names.h"
-+#include "policy.h"
-+
-+#define KDBUS_POLICY_HASH_SIZE	64
-+
-+/**
-+ * struct kdbus_policy_db_entry_access - a database entry access item
-+ * @type:		One of KDBUS_POLICY_ACCESS_* types
-+ * @access:		Access to grant. One of KDBUS_POLICY_*
-+ * @uid:		For KDBUS_POLICY_ACCESS_USER, the global uid
-+ * @gid:		For KDBUS_POLICY_ACCESS_GROUP, the global gid
-+ * @list:		List entry item for the entry's list
-+ *
-+ * This is the internal version of struct kdbus_policy_db_access.
-+ */
-+struct kdbus_policy_db_entry_access {
-+	u8 type;		/* USER, GROUP, WORLD */
-+	u8 access;		/* OWN, TALK, SEE */
-+	union {
-+		kuid_t uid;	/* global uid */
-+		kgid_t gid;	/* global gid */
-+	};
-+	struct list_head list;
-+};
-+
-+/**
-+ * struct kdbus_policy_db_entry - a policy database entry
-+ * @name:		The name to match the policy entry against
-+ * @hentry:		The hash entry for the database's entries_hash
-+ * @access_list:	List head for keeping tracks of the entry's
-+ *			access items.
-+ * @owner:		The owner of this entry. Can be a kdbus_conn or
-+ *			a kdbus_ep object.
-+ * @wildcard:		The name is a wildcard, such as ending on '.*'
-+ */
-+struct kdbus_policy_db_entry {
-+	char *name;
-+	struct hlist_node hentry;
-+	struct list_head access_list;
-+	const void *owner;
-+	bool wildcard:1;
-+};
-+
-+static void kdbus_policy_entry_free(struct kdbus_policy_db_entry *e)
-+{
-+	struct kdbus_policy_db_entry_access *a, *tmp;
-+
-+	list_for_each_entry_safe(a, tmp, &e->access_list, list) {
-+		list_del(&a->list);
-+		kfree(a);
-+	}
-+
-+	kfree(e->name);
-+	kfree(e);
-+}
-+
-+static unsigned int kdbus_strnhash(const char *str, size_t len)
-+{
-+	unsigned long hash = init_name_hash();
-+
-+	while (len--)
-+		hash = partial_name_hash(*str++, hash);
-+
-+	return end_name_hash(hash);
-+}
-+
-+static const struct kdbus_policy_db_entry *
-+kdbus_policy_lookup(struct kdbus_policy_db *db, const char *name, u32 hash)
-+{
-+	struct kdbus_policy_db_entry *e;
-+	const char *dot;
-+	size_t len;
-+
-+	/* find exact match */
-+	hash_for_each_possible(db->entries_hash, e, hentry, hash)
-+		if (strcmp(e->name, name) == 0 && !e->wildcard)
-+			return e;
-+
-+	/* find wildcard match */
-+
-+	dot = strrchr(name, '.');
-+	if (!dot)
-+		return NULL;
-+
-+	len = dot - name;
-+	hash = kdbus_strnhash(name, len);
-+
-+	hash_for_each_possible(db->entries_hash, e, hentry, hash)
-+		if (e->wildcard && !strncmp(e->name, name, len) &&
-+		    !e->name[len])
-+			return e;
-+
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_policy_db_clear - release all memory from a policy db
-+ * @db:		The policy database
-+ */
-+void kdbus_policy_db_clear(struct kdbus_policy_db *db)
-+{
-+	struct kdbus_policy_db_entry *e;
-+	struct hlist_node *tmp;
-+	unsigned int i;
-+
-+	/* purge entries */
-+	down_write(&db->entries_rwlock);
-+	hash_for_each_safe(db->entries_hash, i, tmp, e, hentry) {
-+		hash_del(&e->hentry);
-+		kdbus_policy_entry_free(e);
-+	}
-+	up_write(&db->entries_rwlock);
-+}
-+
-+/**
-+ * kdbus_policy_db_init() - initialize a new policy database
-+ * @db:		The location of the database
-+ *
-+ * This initializes a new policy-db. The underlying memory must have been
-+ * cleared to zero by the caller.
-+ */
-+void kdbus_policy_db_init(struct kdbus_policy_db *db)
-+{
-+	hash_init(db->entries_hash);
-+	init_rwsem(&db->entries_rwlock);
-+}
-+
-+/**
-+ * kdbus_policy_query_unlocked() - Query the policy database
-+ * @db:		Policy database
-+ * @cred:	Credentials to test against
-+ * @name:	Name to query
-+ * @hash:	Hash value of @name
-+ *
-+ * Same as kdbus_policy_query() but requires the caller to lock the policy
-+ * database against concurrent writes.
-+ *
-+ * Return: The highest KDBUS_POLICY_* access type found, or -EPERM if none.
-+ */
-+int kdbus_policy_query_unlocked(struct kdbus_policy_db *db,
-+				const struct cred *cred, const char *name,
-+				unsigned int hash)
-+{
-+	struct kdbus_policy_db_entry_access *a;
-+	const struct kdbus_policy_db_entry *e;
-+	int i, highest = -EPERM;
-+
-+	e = kdbus_policy_lookup(db, name, hash);
-+	if (!e)
-+		return -EPERM;
-+
-+	list_for_each_entry(a, &e->access_list, list) {
-+		if ((int)a->access <= highest)
-+			continue;
-+
-+		switch (a->type) {
-+		case KDBUS_POLICY_ACCESS_USER:
-+			if (uid_eq(cred->euid, a->uid))
-+				highest = a->access;
-+			break;
-+		case KDBUS_POLICY_ACCESS_GROUP:
-+			if (gid_eq(cred->egid, a->gid)) {
-+				highest = a->access;
-+				break;
-+			}
-+
-+			for (i = 0; i < cred->group_info->ngroups; i++) {
-+				kgid_t gid = GROUP_AT(cred->group_info, i);
-+
-+				if (gid_eq(gid, a->gid)) {
-+					highest = a->access;
-+					break;
-+				}
-+			}
-+
-+			break;
-+		case KDBUS_POLICY_ACCESS_WORLD:
-+			highest = a->access;
-+			break;
-+		}
-+
-+		/* OWN is the highest possible policy */
-+		if (highest >= KDBUS_POLICY_OWN)
-+			break;
-+	}
-+
-+	return highest;
-+}
-+
-+/**
-+ * kdbus_policy_query() - Query the policy database
-+ * @db:		Policy database
-+ * @cred:	Credentials to test against
-+ * @name:	Name to query
-+ * @hash:	Hash value of @name
-+ *
-+ * Query the policy database @db for the access rights of @cred to the name
-+ * @name. The access rights of @cred are returned, or -EPERM if no access is
-+ * granted.
-+ *
-+ * This call effectively searches for the highest access-right granted to
-+ * @cred. The caller should really cache those as policy lookups are rather
-+ * expensive.
-+ *
-+ * Return: The highest KDBUS_POLICY_* access type found, or -EPERM if none.
-+ */
-+int kdbus_policy_query(struct kdbus_policy_db *db, const struct cred *cred,
-+		       const char *name, unsigned int hash)
-+{
-+	int ret;
-+
-+	down_read(&db->entries_rwlock);
-+	ret = kdbus_policy_query_unlocked(db, cred, name, hash);
-+	up_read(&db->entries_rwlock);
-+
-+	return ret;
-+}
-+
-+static void __kdbus_policy_remove_owner(struct kdbus_policy_db *db,
-+					const void *owner)
-+{
-+	struct kdbus_policy_db_entry *e;
-+	struct hlist_node *tmp;
-+	int i;
-+
-+	hash_for_each_safe(db->entries_hash, i, tmp, e, hentry)
-+		if (e->owner == owner) {
-+			hash_del(&e->hentry);
-+			kdbus_policy_entry_free(e);
-+		}
-+}
-+
-+/**
-+ * kdbus_policy_remove_owner() - remove all entries related to a connection
-+ * @db:		The policy database
-+ * @owner:	The connection which items to remove
-+ */
-+void kdbus_policy_remove_owner(struct kdbus_policy_db *db,
-+			       const void *owner)
-+{
-+	down_write(&db->entries_rwlock);
-+	__kdbus_policy_remove_owner(db, owner);
-+	up_write(&db->entries_rwlock);
-+}
-+
-+/*
-+ * Convert user provided policy access to internal kdbus policy
-+ * access
-+ */
-+static struct kdbus_policy_db_entry_access *
-+kdbus_policy_make_access(const struct kdbus_policy_access *uaccess)
-+{
-+	int ret;
-+	struct kdbus_policy_db_entry_access *a;
-+
-+	a = kzalloc(sizeof(*a), GFP_KERNEL);
-+	if (!a)
-+		return ERR_PTR(-ENOMEM);
-+
-+	ret = -EINVAL;
-+	switch (uaccess->access) {
-+	case KDBUS_POLICY_SEE:
-+	case KDBUS_POLICY_TALK:
-+	case KDBUS_POLICY_OWN:
-+		a->access = uaccess->access;
-+		break;
-+	default:
-+		goto err;
-+	}
-+
-+	switch (uaccess->type) {
-+	case KDBUS_POLICY_ACCESS_USER:
-+		a->uid = make_kuid(current_user_ns(), uaccess->id);
-+		if (!uid_valid(a->uid))
-+			goto err;
-+
-+		break;
-+	case KDBUS_POLICY_ACCESS_GROUP:
-+		a->gid = make_kgid(current_user_ns(), uaccess->id);
-+		if (!gid_valid(a->gid))
-+			goto err;
-+
-+		break;
-+	case KDBUS_POLICY_ACCESS_WORLD:
-+		break;
-+	default:
-+		goto err;
-+	}
-+
-+	a->type = uaccess->type;
-+
-+	return a;
-+
-+err:
-+	kfree(a);
-+	return ERR_PTR(ret);
-+}
-+
-+/**
-+ * kdbus_policy_set() - set a connection's policy rules
-+ * @db:				The policy database
-+ * @items:			A list of kdbus_item elements that contain both
-+ *				names and access rules to set.
-+ * @items_size:			The total size of the items.
-+ * @max_policies:		The maximum number of policy entries to allow.
-+ *				Pass 0 for no limit.
-+ * @allow_wildcards:		Boolean value whether wildcard entries (such
-+ *				ending on '.*') should be allowed.
-+ * @owner:			The owner of the new policy items.
-+ *
-+ * This function sets a new set of policies for a given owner. The names and
-+ * access rules are gathered by walking the list of items passed in as
-+ * argument. An item of type KDBUS_ITEM_NAME is expected before any number of
-+ * KDBUS_ITEM_POLICY_ACCESS items. If there are more repetitions of this
-+ * pattern than denoted in @max_policies, -EINVAL is returned.
-+ *
-+ * In order to allow atomic replacement of rules, the function first removes
-+ * all entries that have been created for the given owner previously.
-+ *
-+ * Callers to this function must make sure that the owner is a custom
-+ * endpoint, or if the endpoint is a default endpoint, then it must be
-+ * either a policy holder or an activator.
-+ *
-+ * Return: 0 on success, negative errno on failure.
-+ */
-+int kdbus_policy_set(struct kdbus_policy_db *db,
-+		     const struct kdbus_item *items,
-+		     size_t items_size,
-+		     size_t max_policies,
-+		     bool allow_wildcards,
-+		     const void *owner)
-+{
-+	struct kdbus_policy_db_entry_access *a;
-+	struct kdbus_policy_db_entry *e, *p;
-+	const struct kdbus_item *item;
-+	struct hlist_node *tmp;
-+	HLIST_HEAD(entries);
-+	HLIST_HEAD(restore);
-+	size_t count = 0;
-+	int i, ret = 0;
-+	u32 hash;
-+
-+	/* Walk the list of items and look for new policies */
-+	e = NULL;
-+	KDBUS_ITEMS_FOREACH(item, items, items_size) {
-+		switch (item->type) {
-+		case KDBUS_ITEM_NAME: {
-+			size_t len;
-+
-+			if (max_policies && ++count > max_policies) {
-+				ret = -E2BIG;
-+				goto exit;
-+			}
-+
-+			if (!kdbus_name_is_valid(item->str, true)) {
-+				ret = -EINVAL;
-+				goto exit;
-+			}
-+
-+			e = kzalloc(sizeof(*e), GFP_KERNEL);
-+			if (!e) {
-+				ret = -ENOMEM;
-+				goto exit;
-+			}
-+
-+			INIT_LIST_HEAD(&e->access_list);
-+			e->owner = owner;
-+			hlist_add_head(&e->hentry, &entries);
-+
-+			e->name = kstrdup(item->str, GFP_KERNEL);
-+			if (!e->name) {
-+				ret = -ENOMEM;
-+				goto exit;
-+			}
-+
-+			/*
-+			 * If a supplied name ends with an '.*', cut off that
-+			 * part, only store anything before it, and mark the
-+			 * entry as wildcard.
-+			 */
-+			len = strlen(e->name);
-+			if (len > 2 &&
-+			    e->name[len - 3] == '.' &&
-+			    e->name[len - 2] == '*') {
-+				if (!allow_wildcards) {
-+					ret = -EINVAL;
-+					goto exit;
-+				}
-+
-+				e->name[len - 3] = '\0';
-+				e->wildcard = true;
-+			}
-+
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_POLICY_ACCESS:
-+			if (!e) {
-+				ret = -EINVAL;
-+				goto exit;
-+			}
-+
-+			a = kdbus_policy_make_access(&item->policy_access);
-+			if (IS_ERR(a)) {
-+				ret = PTR_ERR(a);
-+				goto exit;
-+			}
-+
-+			list_add_tail(&a->list, &e->access_list);
-+			break;
-+		}
-+	}
-+
-+	down_write(&db->entries_rwlock);
-+
-+	/* remember previous entries to restore in case of failure */
-+	hash_for_each_safe(db->entries_hash, i, tmp, e, hentry)
-+		if (e->owner == owner) {
-+			hash_del(&e->hentry);
-+			hlist_add_head(&e->hentry, &restore);
-+		}
-+
-+	hlist_for_each_entry_safe(e, tmp, &entries, hentry) {
-+		/* prevent duplicates */
-+		hash = kdbus_strhash(e->name);
-+		hash_for_each_possible(db->entries_hash, p, hentry, hash)
-+			if (strcmp(e->name, p->name) == 0 &&
-+			    e->wildcard == p->wildcard) {
-+				ret = -EEXIST;
-+				goto restore;
-+			}
-+
-+		hlist_del(&e->hentry);
-+		hash_add(db->entries_hash, &e->hentry, hash);
-+	}
-+
-+restore:
-+	/* if we failed, flush all entries we added so far */
-+	if (ret < 0)
-+		__kdbus_policy_remove_owner(db, owner);
-+
-+	/* if we failed, restore entries, otherwise release them */
-+	hlist_for_each_entry_safe(e, tmp, &restore, hentry) {
-+		hlist_del(&e->hentry);
-+		if (ret < 0) {
-+			hash = kdbus_strhash(e->name);
-+			hash_add(db->entries_hash, &e->hentry, hash);
-+		} else {
-+			kdbus_policy_entry_free(e);
-+		}
-+	}
-+
-+	up_write(&db->entries_rwlock);
-+
-+exit:
-+	hlist_for_each_entry_safe(e, tmp, &entries, hentry) {
-+		hlist_del(&e->hentry);
-+		kdbus_policy_entry_free(e);
-+	}
-+
-+	return ret;
-+}
-diff --git a/ipc/kdbus/policy.h b/ipc/kdbus/policy.h
-new file mode 100644
-index 0000000..15dd7bc
---- /dev/null
-+++ b/ipc/kdbus/policy.h
-@@ -0,0 +1,51 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_POLICY_H
-+#define __KDBUS_POLICY_H
-+
-+#include <linux/hashtable.h>
-+#include <linux/rwsem.h>
-+
-+struct kdbus_conn;
-+struct kdbus_item;
-+
-+/**
-+ * struct kdbus_policy_db - policy database
-+ * @entries_hash:	Hashtable of entries
-+ * @entries_rwlock:	Mutex to protect the database's access entries
-+ */
-+struct kdbus_policy_db {
-+	DECLARE_HASHTABLE(entries_hash, 6);
-+	struct rw_semaphore entries_rwlock;
-+};
-+
-+void kdbus_policy_db_init(struct kdbus_policy_db *db);
-+void kdbus_policy_db_clear(struct kdbus_policy_db *db);
-+
-+int kdbus_policy_query_unlocked(struct kdbus_policy_db *db,
-+				const struct cred *cred, const char *name,
-+				unsigned int hash);
-+int kdbus_policy_query(struct kdbus_policy_db *db, const struct cred *cred,
-+		       const char *name, unsigned int hash);
-+
-+void kdbus_policy_remove_owner(struct kdbus_policy_db *db,
-+			       const void *owner);
-+int kdbus_policy_set(struct kdbus_policy_db *db,
-+		     const struct kdbus_item *items,
-+		     size_t items_size,
-+		     size_t max_policies,
-+		     bool allow_wildcards,
-+		     const void *owner);
-+
-+#endif
-diff --git a/ipc/kdbus/pool.c b/ipc/kdbus/pool.c
-new file mode 100644
-index 0000000..63ccd55
---- /dev/null
-+++ b/ipc/kdbus/pool.c
-@@ -0,0 +1,728 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/aio.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/highmem.h>
-+#include <linux/init.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/pagemap.h>
-+#include <linux/rbtree.h>
-+#include <linux/sched.h>
-+#include <linux/shmem_fs.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/uaccess.h>
-+#include <linux/uio.h>
-+
-+#include "pool.h"
-+#include "util.h"
-+
-+/**
-+ * struct kdbus_pool - the receiver's buffer
-+ * @f:			The backing shmem file
-+ * @size:		The size of the file
-+ * @accounted_size:	Currently accounted memory in bytes
-+ * @lock:		Pool data lock
-+ * @slices:		All slices sorted by address
-+ * @slices_busy:	Tree of allocated slices
-+ * @slices_free:	Tree of free slices
-+ *
-+ * The receiver's buffer, managed as a pool of allocated and free
-+ * slices containing the queued messages.
-+ *
-+ * Messages sent with KDBUS_CMD_SEND are copied directly by the
-+ * sending process into the receiver's pool.
-+ *
-+ * Messages received with KDBUS_CMD_RECV just return the offset
-+ * to the data placed in the pool.
-+ *
-+ * The internally allocated memory needs to be returned by the receiver
-+ * with KDBUS_CMD_FREE.
-+ */
-+struct kdbus_pool {
-+	struct file *f;
-+	size_t size;
-+	size_t accounted_size;
-+	struct mutex lock;
-+
-+	struct list_head slices;
-+	struct rb_root slices_busy;
-+	struct rb_root slices_free;
-+};
-+
-+/**
-+ * struct kdbus_pool_slice - allocated element in kdbus_pool
-+ * @pool:		Pool this slice belongs to
-+ * @off:		Offset of slice in the shmem file
-+ * @size:		Size of slice
-+ * @entry:		Entry in "all slices" list
-+ * @rb_node:		Entry in free or busy list
-+ * @free:		Unused slice
-+ * @accounted:		Accounted as queue slice
-+ * @ref_kernel:		Kernel holds a reference
-+ * @ref_user:		Userspace holds a reference
-+ *
-+ * The pool has one or more slices, always spanning the entire size of the
-+ * pool.
-+ *
-+ * Every slice is an element in a list sorted by the buffer address, to
-+ * provide access to the next neighbor slice.
-+ *
-+ * Every slice is member in either the busy or the free tree. The free
-+ * tree is organized by slice size, the busy tree organized by buffer
-+ * offset.
-+ */
-+struct kdbus_pool_slice {
-+	struct kdbus_pool *pool;
-+	size_t off;
-+	size_t size;
-+
-+	struct list_head entry;
-+	struct rb_node rb_node;
-+
-+	bool free:1;
-+	bool accounted:1;
-+	bool ref_kernel:1;
-+	bool ref_user:1;
-+};
-+
-+static struct kdbus_pool_slice *kdbus_pool_slice_new(struct kdbus_pool *pool,
-+						     size_t off, size_t size)
-+{
-+	struct kdbus_pool_slice *slice;
-+
-+	slice = kzalloc(sizeof(*slice), GFP_KERNEL);
-+	if (!slice)
-+		return NULL;
-+
-+	slice->pool = pool;
-+	slice->off = off;
-+	slice->size = size;
-+	slice->free = true;
-+	return slice;
-+}
-+
-+/* insert a slice into the free tree */
-+static void kdbus_pool_add_free_slice(struct kdbus_pool *pool,
-+				      struct kdbus_pool_slice *slice)
-+{
-+	struct rb_node **n;
-+	struct rb_node *pn = NULL;
-+
-+	n = &pool->slices_free.rb_node;
-+	while (*n) {
-+		struct kdbus_pool_slice *pslice;
-+
-+		pn = *n;
-+		pslice = rb_entry(pn, struct kdbus_pool_slice, rb_node);
-+		if (slice->size < pslice->size)
-+			n = &pn->rb_left;
-+		else
-+			n = &pn->rb_right;
-+	}
-+
-+	rb_link_node(&slice->rb_node, pn, n);
-+	rb_insert_color(&slice->rb_node, &pool->slices_free);
-+}
-+
-+/* insert a slice into the busy tree */
-+static void kdbus_pool_add_busy_slice(struct kdbus_pool *pool,
-+				      struct kdbus_pool_slice *slice)
-+{
-+	struct rb_node **n;
-+	struct rb_node *pn = NULL;
-+
-+	n = &pool->slices_busy.rb_node;
-+	while (*n) {
-+		struct kdbus_pool_slice *pslice;
-+
-+		pn = *n;
-+		pslice = rb_entry(pn, struct kdbus_pool_slice, rb_node);
-+		if (slice->off < pslice->off)
-+			n = &pn->rb_left;
-+		else if (slice->off > pslice->off)
-+			n = &pn->rb_right;
-+		else
-+			BUG();
-+	}
-+
-+	rb_link_node(&slice->rb_node, pn, n);
-+	rb_insert_color(&slice->rb_node, &pool->slices_busy);
-+}
-+
-+static struct kdbus_pool_slice *kdbus_pool_find_slice(struct kdbus_pool *pool,
-+						      size_t off)
-+{
-+	struct rb_node *n;
-+
-+	n = pool->slices_busy.rb_node;
-+	while (n) {
-+		struct kdbus_pool_slice *s;
-+
-+		s = rb_entry(n, struct kdbus_pool_slice, rb_node);
-+		if (off < s->off)
-+			n = n->rb_left;
-+		else if (off > s->off)
-+			n = n->rb_right;
-+		else
-+			return s;
-+	}
-+
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_pool_slice_alloc() - allocate memory from a pool
-+ * @pool:	The receiver's pool
-+ * @size:	The number of bytes to allocate
-+ * @accounted:	Whether this slice should be accounted for
-+ *
-+ * The returned slice is used for kdbus_pool_slice_release() to
-+ * free the allocated memory. If either @kvec or @iovec is non-NULL, the data
-+ * will be copied from kernel or userspace memory into the new slice at
-+ * offset 0.
-+ *
-+ * Return: the allocated slice on success, ERR_PTR on failure.
-+ */
-+struct kdbus_pool_slice *kdbus_pool_slice_alloc(struct kdbus_pool *pool,
-+						size_t size, bool accounted)
-+{
-+	size_t slice_size = KDBUS_ALIGN8(size);
-+	struct rb_node *n, *found = NULL;
-+	struct kdbus_pool_slice *s;
-+	int ret = 0;
-+
-+	if (WARN_ON(!size))
-+		return ERR_PTR(-EINVAL);
-+
-+	/* search a free slice with the closest matching size */
-+	mutex_lock(&pool->lock);
-+	n = pool->slices_free.rb_node;
-+	while (n) {
-+		s = rb_entry(n, struct kdbus_pool_slice, rb_node);
-+		if (slice_size < s->size) {
-+			found = n;
-+			n = n->rb_left;
-+		} else if (slice_size > s->size) {
-+			n = n->rb_right;
-+		} else {
-+			found = n;
-+			break;
-+		}
-+	}
-+
-+	/* no slice with the minimum size found in the pool */
-+	if (!found) {
-+		ret = -EXFULL;
-+		goto exit_unlock;
-+	}
-+
-+	/* no exact match, use the closest one */
-+	if (!n) {
-+		struct kdbus_pool_slice *s_new;
-+
-+		s = rb_entry(found, struct kdbus_pool_slice, rb_node);
-+
-+		/* split-off the remainder of the size to its own slice */
-+		s_new = kdbus_pool_slice_new(pool, s->off + slice_size,
-+					     s->size - slice_size);
-+		if (!s_new) {
-+			ret = -ENOMEM;
-+			goto exit_unlock;
-+		}
-+
-+		list_add(&s_new->entry, &s->entry);
-+		kdbus_pool_add_free_slice(pool, s_new);
-+
-+		/* adjust our size now that we split-off another slice */
-+		s->size = slice_size;
-+	}
-+
-+	/* move slice from free to the busy tree */
-+	rb_erase(found, &pool->slices_free);
-+	kdbus_pool_add_busy_slice(pool, s);
-+
-+	WARN_ON(s->ref_kernel || s->ref_user);
-+
-+	s->ref_kernel = true;
-+	s->free = false;
-+	s->accounted = accounted;
-+	if (accounted)
-+		pool->accounted_size += s->size;
-+	mutex_unlock(&pool->lock);
-+
-+	return s;
-+
-+exit_unlock:
-+	mutex_unlock(&pool->lock);
-+	return ERR_PTR(ret);
-+}
-+
-+static void __kdbus_pool_slice_release(struct kdbus_pool_slice *slice)
-+{
-+	struct kdbus_pool *pool = slice->pool;
-+
-+	/* don't free the slice if either has a reference */
-+	if (slice->ref_kernel || slice->ref_user)
-+		return;
-+
-+	if (WARN_ON(slice->free))
-+		return;
-+
-+	rb_erase(&slice->rb_node, &pool->slices_busy);
-+
-+	/* merge with the next free slice */
-+	if (!list_is_last(&slice->entry, &pool->slices)) {
-+		struct kdbus_pool_slice *s;
-+
-+		s = list_entry(slice->entry.next,
-+			       struct kdbus_pool_slice, entry);
-+		if (s->free) {
-+			rb_erase(&s->rb_node, &pool->slices_free);
-+			list_del(&s->entry);
-+			slice->size += s->size;
-+			kfree(s);
-+		}
-+	}
-+
-+	/* merge with previous free slice */
-+	if (pool->slices.next != &slice->entry) {
-+		struct kdbus_pool_slice *s;
-+
-+		s = list_entry(slice->entry.prev,
-+			       struct kdbus_pool_slice, entry);
-+		if (s->free) {
-+			rb_erase(&s->rb_node, &pool->slices_free);
-+			list_del(&slice->entry);
-+			s->size += slice->size;
-+			kfree(slice);
-+			slice = s;
-+		}
-+	}
-+
-+	slice->free = true;
-+	kdbus_pool_add_free_slice(pool, slice);
-+}
-+
-+/**
-+ * kdbus_pool_slice_release() - drop kernel-reference on allocated slice
-+ * @slice:		Slice allocated from the pool
-+ *
-+ * This releases the kernel-reference on the given slice. If the
-+ * kernel-reference and the user-reference on a slice are dropped, the slice is
-+ * returned to the pool.
-+ *
-+ * So far, we do not implement full ref-counting on slices. Each, kernel and
-+ * user-space can have exactly one reference to a slice. If both are dropped at
-+ * the same time, the slice is released.
-+ */
-+void kdbus_pool_slice_release(struct kdbus_pool_slice *slice)
-+{
-+	struct kdbus_pool *pool;
-+
-+	if (!slice)
-+		return;
-+
-+	/* @slice may be freed, so keep local ptr to @pool */
-+	pool = slice->pool;
-+
-+	mutex_lock(&pool->lock);
-+	/* kernel must own a ref to @slice to drop it */
-+	WARN_ON(!slice->ref_kernel);
-+	slice->ref_kernel = false;
-+	/* no longer kernel-owned, de-account slice */
-+	if (slice->accounted && !WARN_ON(pool->accounted_size < slice->size))
-+		pool->accounted_size -= slice->size;
-+	__kdbus_pool_slice_release(slice);
-+	mutex_unlock(&pool->lock);
-+}
-+
-+/**
-+ * kdbus_pool_release_offset() - release a public offset
-+ * @pool:		pool to operate on
-+ * @off:		offset to release
-+ *
-+ * This should be called whenever user-space frees a slice given to them. It
-+ * verifies the slice is available and public, and then drops it. It ensures
-+ * correct locking and barriers against queues.
-+ *
-+ * Return: 0 on success, ENXIO if the offset is invalid or not public.
-+ */
-+int kdbus_pool_release_offset(struct kdbus_pool *pool, size_t off)
-+{
-+	struct kdbus_pool_slice *slice;
-+	int ret = 0;
-+
-+	/* 'pool->size' is used as dummy offset for empty slices */
-+	if (off == pool->size)
-+		return 0;
-+
-+	mutex_lock(&pool->lock);
-+	slice = kdbus_pool_find_slice(pool, off);
-+	if (slice && slice->ref_user) {
-+		slice->ref_user = false;
-+		__kdbus_pool_slice_release(slice);
-+	} else {
-+		ret = -ENXIO;
-+	}
-+	mutex_unlock(&pool->lock);
-+
-+	return ret;
-+}
-+
-+/**
-+ * kdbus_pool_publish_empty() - publish empty slice to user-space
-+ * @pool:		pool to operate on
-+ * @off:		output storage for offset, or NULL
-+ * @size:		output storage for size, or NULL
-+ *
-+ * This is the same as kdbus_pool_slice_publish(), but uses a dummy slice with
-+ * size 0. The returned offset points to the end of the pool and is never
-+ * returned on real slices.
-+ */
-+void kdbus_pool_publish_empty(struct kdbus_pool *pool, u64 *off, u64 *size)
-+{
-+	if (off)
-+		*off = pool->size;
-+	if (size)
-+		*size = 0;
-+}
-+
-+/**
-+ * kdbus_pool_slice_publish() - publish slice to user-space
-+ * @slice:		The slice
-+ * @out_offset:		Output storage for offset, or NULL
-+ * @out_size:		Output storage for size, or NULL
-+ *
-+ * This prepares a slice to be published to user-space.
-+ *
-+ * This call combines the following operations:
-+ *   * the memory region is flushed so the user's memory view is consistent
-+ *   * the slice is marked as referenced by user-space, so user-space has to
-+ *     call KDBUS_CMD_FREE to release it
-+ *   * the offset and size of the slice are written to the given output
-+ *     arguments, if non-NULL
-+ */
-+void kdbus_pool_slice_publish(struct kdbus_pool_slice *slice,
-+			      u64 *out_offset, u64 *out_size)
-+{
-+	mutex_lock(&slice->pool->lock);
-+	/* kernel must own a ref to @slice to gain a user-space ref */
-+	WARN_ON(!slice->ref_kernel);
-+	slice->ref_user = true;
-+	mutex_unlock(&slice->pool->lock);
-+
-+	if (out_offset)
-+		*out_offset = slice->off;
-+	if (out_size)
-+		*out_size = slice->size;
-+}
-+
-+/**
-+ * kdbus_pool_slice_offset() - Get a slice's offset inside the pool
-+ * @slice:	Slice to return the offset of
-+ *
-+ * Return: The internal offset @slice inside the pool.
-+ */
-+off_t kdbus_pool_slice_offset(const struct kdbus_pool_slice *slice)
-+{
-+	return slice->off;
-+}
-+
-+/**
-+ * kdbus_pool_slice_size() - get size of a pool slice
-+ * @slice:	slice to query
-+ *
-+ * Return: size of the given slice
-+ */
-+size_t kdbus_pool_slice_size(const struct kdbus_pool_slice *slice)
-+{
-+	return slice->size;
-+}
-+
-+/**
-+ * kdbus_pool_new() - create a new pool
-+ * @name:		Name of the (deleted) file which shows up in
-+ *			/proc, used for debugging
-+ * @size:		Maximum size of the pool
-+ *
-+ * Return: a new kdbus_pool on success, ERR_PTR on failure.
-+ */
-+struct kdbus_pool *kdbus_pool_new(const char *name, size_t size)
-+{
-+	struct kdbus_pool_slice *s;
-+	struct kdbus_pool *p;
-+	struct file *f;
-+	char *n = NULL;
-+	int ret;
-+
-+	p = kzalloc(sizeof(*p), GFP_KERNEL);
-+	if (!p)
-+		return ERR_PTR(-ENOMEM);
-+
-+	if (name) {
-+		n = kasprintf(GFP_KERNEL, KBUILD_MODNAME "-conn:%s", name);
-+		if (!n) {
-+			ret = -ENOMEM;
-+			goto exit_free;
-+		}
-+	}
-+
-+	f = shmem_file_setup(n ?: KBUILD_MODNAME "-conn", size, 0);
-+	kfree(n);
-+
-+	if (IS_ERR(f)) {
-+		ret = PTR_ERR(f);
-+		goto exit_free;
-+	}
-+
-+	ret = get_write_access(file_inode(f));
-+	if (ret < 0)
-+		goto exit_put_shmem;
-+
-+	/* allocate first slice spanning the entire pool */
-+	s = kdbus_pool_slice_new(p, 0, size);
-+	if (!s) {
-+		ret = -ENOMEM;
-+		goto exit_put_write;
-+	}
-+
-+	p->f = f;
-+	p->size = size;
-+	p->slices_free = RB_ROOT;
-+	p->slices_busy = RB_ROOT;
-+	mutex_init(&p->lock);
-+
-+	INIT_LIST_HEAD(&p->slices);
-+	list_add(&s->entry, &p->slices);
-+
-+	kdbus_pool_add_free_slice(p, s);
-+	return p;
-+
-+exit_put_write:
-+	put_write_access(file_inode(f));
-+exit_put_shmem:
-+	fput(f);
-+exit_free:
-+	kfree(p);
-+	return ERR_PTR(ret);
-+}
-+
-+/**
-+ * kdbus_pool_free() - destroy pool
-+ * @pool:		The receiver's pool
-+ */
-+void kdbus_pool_free(struct kdbus_pool *pool)
-+{
-+	struct kdbus_pool_slice *s, *tmp;
-+
-+	if (!pool)
-+		return;
-+
-+	list_for_each_entry_safe(s, tmp, &pool->slices, entry) {
-+		list_del(&s->entry);
-+		kfree(s);
-+	}
-+
-+	put_write_access(file_inode(pool->f));
-+	fput(pool->f);
-+	kfree(pool);
-+}
-+
-+/**
-+ * kdbus_pool_accounted() - retrieve accounting information
-+ * @pool:		pool to query
-+ * @size:		output for overall pool size
-+ * @acc:		output for currently accounted size
-+ *
-+ * This returns accounting information of the pool. Note that the data might
-+ * change after the function returns, as the pool lock is dropped. You need to
-+ * protect the data via other means, if you need reliable accounting.
-+ */
-+void kdbus_pool_accounted(struct kdbus_pool *pool, size_t *size, size_t *acc)
-+{
-+	mutex_lock(&pool->lock);
-+	if (size)
-+		*size = pool->size;
-+	if (acc)
-+		*acc = pool->accounted_size;
-+	mutex_unlock(&pool->lock);
-+}
-+
-+/**
-+ * kdbus_pool_slice_copy_iovec() - copy user memory to a slice
-+ * @slice:		The slice to write to
-+ * @off:		Offset in the slice to write to
-+ * @iov:		iovec array, pointing to data to copy
-+ * @iov_len:		Number of elements in @iov
-+ * @total_len:		Total number of bytes described in members of @iov
-+ *
-+ * User memory referenced by @iov will be copied into @slice at offset @off.
-+ *
-+ * Return: the numbers of bytes copied, negative errno on failure.
-+ */
-+ssize_t
-+kdbus_pool_slice_copy_iovec(const struct kdbus_pool_slice *slice, loff_t off,
-+			    struct iovec *iov, size_t iov_len, size_t total_len)
-+{
-+	struct iov_iter iter;
-+	ssize_t len;
-+
-+	if (WARN_ON(off + total_len > slice->size))
-+		return -EFAULT;
-+
-+	off += slice->off;
-+	iov_iter_init(&iter, WRITE, iov, iov_len, total_len);
-+	len = vfs_iter_write(slice->pool->f, &iter, &off);
-+
-+	return (len >= 0 && len != total_len) ? -EFAULT : len;
-+}
-+
-+/**
-+ * kdbus_pool_slice_copy_kvec() - copy kernel memory to a slice
-+ * @slice:		The slice to write to
-+ * @off:		Offset in the slice to write to
-+ * @kvec:		kvec array, pointing to data to copy
-+ * @kvec_len:		Number of elements in @kvec
-+ * @total_len:		Total number of bytes described in members of @kvec
-+ *
-+ * Kernel memory referenced by @kvec will be copied into @slice at offset @off.
-+ *
-+ * Return: the numbers of bytes copied, negative errno on failure.
-+ */
-+ssize_t kdbus_pool_slice_copy_kvec(const struct kdbus_pool_slice *slice,
-+				   loff_t off, struct kvec *kvec,
-+				   size_t kvec_len, size_t total_len)
-+{
-+	struct iov_iter iter;
-+	mm_segment_t old_fs;
-+	ssize_t len;
-+
-+	if (WARN_ON(off + total_len > slice->size))
-+		return -EFAULT;
-+
-+	off += slice->off;
-+	iov_iter_kvec(&iter, WRITE | ITER_KVEC, kvec, kvec_len, total_len);
-+
-+	old_fs = get_fs();
-+	set_fs(get_ds());
-+	len = vfs_iter_write(slice->pool->f, &iter, &off);
-+	set_fs(old_fs);
-+
-+	return (len >= 0 && len != total_len) ? -EFAULT : len;
-+}
-+
-+/**
-+ * kdbus_pool_slice_copy() - copy data from one slice into another
-+ * @slice_dst:		destination slice
-+ * @slice_src:		source slice
-+ *
-+ * Return: 0 on success, negative error number on failure.
-+ */
-+int kdbus_pool_slice_copy(const struct kdbus_pool_slice *slice_dst,
-+			  const struct kdbus_pool_slice *slice_src)
-+{
-+	struct file *f_src = slice_src->pool->f;
-+	struct file *f_dst = slice_dst->pool->f;
-+	struct inode *i_dst = file_inode(f_dst);
-+	struct address_space *mapping_dst = f_dst->f_mapping;
-+	const struct address_space_operations *aops = mapping_dst->a_ops;
-+	unsigned long len = slice_src->size;
-+	loff_t off_src = slice_src->off;
-+	loff_t off_dst = slice_dst->off;
-+	mm_segment_t old_fs;
-+	int ret = 0;
-+
-+	if (WARN_ON(slice_src->size != slice_dst->size) ||
-+	    WARN_ON(slice_src->free || slice_dst->free))
-+		return -EINVAL;
-+
-+	mutex_lock(&i_dst->i_mutex);
-+	old_fs = get_fs();
-+	set_fs(get_ds());
-+	while (len > 0) {
-+		unsigned long page_off;
-+		unsigned long copy_len;
-+		char __user *kaddr;
-+		struct page *page;
-+		ssize_t n_read;
-+		void *fsdata;
-+		long status;
-+
-+		page_off = off_dst & (PAGE_CACHE_SIZE - 1);
-+		copy_len = min_t(unsigned long,
-+				 PAGE_CACHE_SIZE - page_off, len);
-+
-+		status = aops->write_begin(f_dst, mapping_dst, off_dst,
-+					   copy_len, 0, &page, &fsdata);
-+		if (unlikely(status < 0)) {
-+			ret = status;
-+			break;
-+		}
-+
-+		kaddr = (char __force __user *)kmap(page) + page_off;
-+		n_read = __vfs_read(f_src, kaddr, copy_len, &off_src);
-+		kunmap(page);
-+		mark_page_accessed(page);
-+		flush_dcache_page(page);
-+
-+		if (unlikely(n_read != copy_len)) {
-+			ret = -EFAULT;
-+			break;
-+		}
-+
-+		status = aops->write_end(f_dst, mapping_dst, off_dst,
-+					 copy_len, copy_len, page, fsdata);
-+		if (unlikely(status != copy_len)) {
-+			ret = -EFAULT;
-+			break;
-+		}
-+
-+		off_dst += copy_len;
-+		len -= copy_len;
-+	}
-+	set_fs(old_fs);
-+	mutex_unlock(&i_dst->i_mutex);
-+
-+	return ret;
-+}
-+
-+/**
-+ * kdbus_pool_mmap() -  map the pool into the process
-+ * @pool:		The receiver's pool
-+ * @vma:		passed by mmap() syscall
-+ *
-+ * Return: the result of the mmap() call, negative errno on failure.
-+ */
-+int kdbus_pool_mmap(const struct kdbus_pool *pool, struct vm_area_struct *vma)
-+{
-+	/* deny write access to the pool */
-+	if (vma->vm_flags & VM_WRITE)
-+		return -EPERM;
-+	vma->vm_flags &= ~VM_MAYWRITE;
-+
-+	/* do not allow to map more than the size of the file */
-+	if ((vma->vm_end - vma->vm_start) > pool->size)
-+		return -EFAULT;
-+
-+	/* replace the connection file with our shmem file */
-+	if (vma->vm_file)
-+		fput(vma->vm_file);
-+	vma->vm_file = get_file(pool->f);
-+
-+	return pool->f->f_op->mmap(pool->f, vma);
-+}
-diff --git a/ipc/kdbus/pool.h b/ipc/kdbus/pool.h
-new file mode 100644
-index 0000000..a903821
---- /dev/null
-+++ b/ipc/kdbus/pool.h
-@@ -0,0 +1,46 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_POOL_H
-+#define __KDBUS_POOL_H
-+
-+#include <linux/uio.h>
-+
-+struct kdbus_pool;
-+struct kdbus_pool_slice;
-+
-+struct kdbus_pool *kdbus_pool_new(const char *name, size_t size);
-+void kdbus_pool_free(struct kdbus_pool *pool);
-+void kdbus_pool_accounted(struct kdbus_pool *pool, size_t *size, size_t *acc);
-+int kdbus_pool_mmap(const struct kdbus_pool *pool, struct vm_area_struct *vma);
-+int kdbus_pool_release_offset(struct kdbus_pool *pool, size_t off);
-+void kdbus_pool_publish_empty(struct kdbus_pool *pool, u64 *off, u64 *size);
-+
-+struct kdbus_pool_slice *kdbus_pool_slice_alloc(struct kdbus_pool *pool,
-+						size_t size, bool accounted);
-+void kdbus_pool_slice_release(struct kdbus_pool_slice *slice);
-+void kdbus_pool_slice_publish(struct kdbus_pool_slice *slice,
-+			      u64 *out_offset, u64 *out_size);
-+off_t kdbus_pool_slice_offset(const struct kdbus_pool_slice *slice);
-+size_t kdbus_pool_slice_size(const struct kdbus_pool_slice *slice);
-+int kdbus_pool_slice_copy(const struct kdbus_pool_slice *slice_dst,
-+			  const struct kdbus_pool_slice *slice_src);
-+ssize_t kdbus_pool_slice_copy_kvec(const struct kdbus_pool_slice *slice,
-+				   loff_t off, struct kvec *kvec,
-+				   size_t kvec_count, size_t total_len);
-+ssize_t kdbus_pool_slice_copy_iovec(const struct kdbus_pool_slice *slice,
-+				    loff_t off, struct iovec *iov,
-+				    size_t iov_count, size_t total_len);
-+
-+#endif
-diff --git a/ipc/kdbus/queue.c b/ipc/kdbus/queue.c
-new file mode 100644
-index 0000000..f9c44d7
---- /dev/null
-+++ b/ipc/kdbus/queue.c
-@@ -0,0 +1,363 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/audit.h>
-+#include <linux/file.h>
-+#include <linux/fs.h>
-+#include <linux/hashtable.h>
-+#include <linux/idr.h>
-+#include <linux/init.h>
-+#include <linux/math64.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <linux/poll.h>
-+#include <linux/sched.h>
-+#include <linux/sizes.h>
-+#include <linux/slab.h>
-+#include <linux/syscalls.h>
-+#include <linux/uio.h>
-+
-+#include "util.h"
-+#include "domain.h"
-+#include "connection.h"
-+#include "item.h"
-+#include "message.h"
-+#include "metadata.h"
-+#include "queue.h"
-+#include "reply.h"
-+
-+/**
-+ * kdbus_queue_init() - initialize data structure related to a queue
-+ * @queue:	The queue to initialize
-+ */
-+void kdbus_queue_init(struct kdbus_queue *queue)
-+{
-+	INIT_LIST_HEAD(&queue->msg_list);
-+	queue->msg_prio_queue = RB_ROOT;
-+}
-+
-+/**
-+ * kdbus_queue_peek() - Retrieves an entry from a queue
-+ * @queue:		The queue
-+ * @priority:		The minimum priority of the entry to peek
-+ * @use_priority:	Boolean flag whether or not to peek by priority
-+ *
-+ * Look for a entry in a queue, either by priority, or the oldest one (FIFO).
-+ * The entry is not freed, put off the queue's lists or anything else.
-+ *
-+ * Return: the peeked queue entry on success, NULL if no suitable msg is found
-+ */
-+struct kdbus_queue_entry *kdbus_queue_peek(struct kdbus_queue *queue,
-+					   s64 priority, bool use_priority)
-+{
-+	struct kdbus_queue_entry *e;
-+
-+	if (list_empty(&queue->msg_list))
-+		return NULL;
-+
-+	if (use_priority) {
-+		/* get next entry with highest priority */
-+		e = rb_entry(queue->msg_prio_highest,
-+			     struct kdbus_queue_entry, prio_node);
-+
-+		/* no entry with the requested priority */
-+		if (e->priority > priority)
-+			return NULL;
-+	} else {
-+		/* ignore the priority, return the next entry in the entry */
-+		e = list_first_entry(&queue->msg_list,
-+				     struct kdbus_queue_entry, entry);
-+	}
-+
-+	return e;
-+}
-+
-+static void kdbus_queue_entry_link(struct kdbus_queue_entry *entry)
-+{
-+	struct kdbus_queue *queue = &entry->conn->queue;
-+	struct rb_node **n, *pn = NULL;
-+	bool highest = true;
-+
-+	lockdep_assert_held(&entry->conn->lock);
-+	if (WARN_ON(!list_empty(&entry->entry)))
-+		return;
-+
-+	/* sort into priority entry tree */
-+	n = &queue->msg_prio_queue.rb_node;
-+	while (*n) {
-+		struct kdbus_queue_entry *e;
-+
-+		pn = *n;
-+		e = rb_entry(pn, struct kdbus_queue_entry, prio_node);
-+
-+		/* existing node for this priority, add to its list */
-+		if (likely(entry->priority == e->priority)) {
-+			list_add_tail(&entry->prio_entry, &e->prio_entry);
-+			goto prio_done;
-+		}
-+
-+		if (entry->priority < e->priority) {
-+			n = &pn->rb_left;
-+		} else {
-+			n = &pn->rb_right;
-+			highest = false;
-+		}
-+	}
-+
-+	/* cache highest-priority entry */
-+	if (highest)
-+		queue->msg_prio_highest = &entry->prio_node;
-+
-+	/* new node for this priority */
-+	rb_link_node(&entry->prio_node, pn, n);
-+	rb_insert_color(&entry->prio_node, &queue->msg_prio_queue);
-+	INIT_LIST_HEAD(&entry->prio_entry);
-+
-+prio_done:
-+	/* add to unsorted fifo list */
-+	list_add_tail(&entry->entry, &queue->msg_list);
-+}
-+
-+static void kdbus_queue_entry_unlink(struct kdbus_queue_entry *entry)
-+{
-+	struct kdbus_queue *queue = &entry->conn->queue;
-+
-+	lockdep_assert_held(&entry->conn->lock);
-+	if (list_empty(&entry->entry))
-+		return;
-+
-+	list_del_init(&entry->entry);
-+
-+	if (list_empty(&entry->prio_entry)) {
-+		/*
-+		 * Single entry for this priority, update cached
-+		 * highest-priority entry, remove the tree node.
-+		 */
-+		if (queue->msg_prio_highest == &entry->prio_node)
-+			queue->msg_prio_highest = rb_next(&entry->prio_node);
-+
-+		rb_erase(&entry->prio_node, &queue->msg_prio_queue);
-+	} else {
-+		struct kdbus_queue_entry *q;
-+
-+		/*
-+		 * Multiple entries for this priority entry, get next one in
-+		 * the list. Update cached highest-priority entry, store the
-+		 * new one as the tree node.
-+		 */
-+		q = list_first_entry(&entry->prio_entry,
-+				     struct kdbus_queue_entry, prio_entry);
-+		list_del(&entry->prio_entry);
-+
-+		if (queue->msg_prio_highest == &entry->prio_node)
-+			queue->msg_prio_highest = &q->prio_node;
-+
-+		rb_replace_node(&entry->prio_node, &q->prio_node,
-+				&queue->msg_prio_queue);
-+	}
-+}
-+
-+/**
-+ * kdbus_queue_entry_new() - allocate a queue entry
-+ * @src:	source connection, or NULL
-+ * @dst:	destination connection
-+ * @s:		staging object carrying the message
-+ *
-+ * Allocates a queue entry based on a given msg and allocate space for
-+ * the message payload and the requested metadata in the connection's pool.
-+ * The entry is not actually added to the queue's lists at this point.
-+ *
-+ * Return: the allocated entry on success, or an ERR_PTR on failures.
-+ */
-+struct kdbus_queue_entry *kdbus_queue_entry_new(struct kdbus_conn *src,
-+						struct kdbus_conn *dst,
-+						struct kdbus_staging *s)
-+{
-+	struct kdbus_queue_entry *entry;
-+	int ret;
-+
-+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
-+	if (!entry)
-+		return ERR_PTR(-ENOMEM);
-+
-+	INIT_LIST_HEAD(&entry->entry);
-+	entry->priority = s->msg->priority;
-+	entry->conn = kdbus_conn_ref(dst);
-+	entry->gaps = kdbus_gaps_ref(s->gaps);
-+
-+	entry->slice = kdbus_staging_emit(s, src, dst);
-+	if (IS_ERR(entry->slice)) {
-+		ret = PTR_ERR(entry->slice);
-+		entry->slice = NULL;
-+		goto error;
-+	}
-+
-+	entry->user = src ? kdbus_user_ref(src->user) : NULL;
-+	return entry;
-+
-+error:
-+	kdbus_queue_entry_free(entry);
-+	return ERR_PTR(ret);
-+}
-+
-+/**
-+ * kdbus_queue_entry_free() - free resources of an entry
-+ * @entry:	The entry to free
-+ *
-+ * Removes resources allocated by a queue entry, along with the entry itself.
-+ * Note that the entry's slice is not freed at this point.
-+ */
-+void kdbus_queue_entry_free(struct kdbus_queue_entry *entry)
-+{
-+	if (!entry)
-+		return;
-+
-+	lockdep_assert_held(&entry->conn->lock);
-+
-+	kdbus_queue_entry_unlink(entry);
-+	kdbus_reply_unref(entry->reply);
-+
-+	if (entry->slice) {
-+		kdbus_conn_quota_dec(entry->conn, entry->user,
-+				     kdbus_pool_slice_size(entry->slice),
-+				     entry->gaps ? entry->gaps->n_fds : 0);
-+		kdbus_pool_slice_release(entry->slice);
-+	}
-+
-+	kdbus_user_unref(entry->user);
-+	kdbus_gaps_unref(entry->gaps);
-+	kdbus_conn_unref(entry->conn);
-+	kfree(entry);
-+}
-+
-+/**
-+ * kdbus_queue_entry_install() - install message components into the
-+ *				 receiver's process
-+ * @entry:		The queue entry to install
-+ * @return_flags:	Pointer to store the return flags for userspace
-+ * @install_fds:	Whether or not to install associated file descriptors
-+ *
-+ * Return: 0 on success.
-+ */
-+int kdbus_queue_entry_install(struct kdbus_queue_entry *entry,
-+			      u64 *return_flags, bool install_fds)
-+{
-+	bool incomplete_fds = false;
-+	int ret;
-+
-+	lockdep_assert_held(&entry->conn->lock);
-+
-+	ret = kdbus_gaps_install(entry->gaps, entry->slice, &incomplete_fds);
-+	if (ret < 0)
-+		return ret;
-+
-+	if (incomplete_fds)
-+		*return_flags |= KDBUS_RECV_RETURN_INCOMPLETE_FDS;
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_queue_entry_enqueue() - enqueue an entry
-+ * @entry:		entry to enqueue
-+ * @reply:		reply to link to this entry (or NULL if none)
-+ *
-+ * This enqueues an unqueued entry into the message queue of the linked
-+ * connection. It also binds a reply object to the entry so we can remember it
-+ * when the message is moved.
-+ *
-+ * Once this call returns (and the connection lock is released), this entry can
-+ * be dequeued by the target connection. Note that the entry will not be removed
-+ * from the queue until it is destroyed.
-+ */
-+void kdbus_queue_entry_enqueue(struct kdbus_queue_entry *entry,
-+			       struct kdbus_reply *reply)
-+{
-+	lockdep_assert_held(&entry->conn->lock);
-+
-+	if (WARN_ON(entry->reply) || WARN_ON(!list_empty(&entry->entry)))
-+		return;
-+
-+	entry->reply = kdbus_reply_ref(reply);
-+	kdbus_queue_entry_link(entry);
-+}
-+
-+/**
-+ * kdbus_queue_entry_move() - move queue entry
-+ * @e:		queue entry to move
-+ * @dst:	destination connection to queue the entry on
-+ *
-+ * This moves a queue entry onto a different connection. It allocates a new
-+ * slice on the target connection and copies the message over. If the copy
-+ * succeeded, we move the entry from @src to @dst.
-+ *
-+ * On failure, the entry is left untouched.
-+ *
-+ * The queue entry must be queued right now, and after the call succeeds it will
-+ * be queued on the destination, but no longer on the source.
-+ *
-+ * The caller must hold the connection lock of the source *and* destination.
-+ *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_queue_entry_move(struct kdbus_queue_entry *e,
-+			   struct kdbus_conn *dst)
-+{
-+	struct kdbus_pool_slice *slice = NULL;
-+	struct kdbus_conn *src = e->conn;
-+	size_t size, fds;
-+	int ret;
-+
-+	lockdep_assert_held(&src->lock);
-+	lockdep_assert_held(&dst->lock);
-+
-+	if (WARN_ON(list_empty(&e->entry)))
-+		return -EINVAL;
-+	if (src == dst)
-+		return 0;
-+
-+	size = kdbus_pool_slice_size(e->slice);
-+	fds = e->gaps ? e->gaps->n_fds : 0;
-+
-+	ret = kdbus_conn_quota_inc(dst, e->user, size, fds);
-+	if (ret < 0)
-+		return ret;
-+
-+	slice = kdbus_pool_slice_alloc(dst->pool, size, true);
-+	if (IS_ERR(slice)) {
-+		ret = PTR_ERR(slice);
-+		slice = NULL;
-+		goto error;
-+	}
-+
-+	ret = kdbus_pool_slice_copy(slice, e->slice);
-+	if (ret < 0)
-+		goto error;
-+
-+	kdbus_queue_entry_unlink(e);
-+	kdbus_conn_quota_dec(src, e->user, size, fds);
-+	kdbus_pool_slice_release(e->slice);
-+	kdbus_conn_unref(e->conn);
-+
-+	e->slice = slice;
-+	e->conn = kdbus_conn_ref(dst);
-+	kdbus_queue_entry_link(e);
-+
-+	return 0;
-+
-+error:
-+	kdbus_pool_slice_release(slice);
-+	kdbus_conn_quota_dec(dst, e->user, size, fds);
-+	return ret;
-+}
-diff --git a/ipc/kdbus/queue.h b/ipc/kdbus/queue.h
-new file mode 100644
-index 0000000..bf686d1
---- /dev/null
-+++ b/ipc/kdbus/queue.h
-@@ -0,0 +1,84 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_QUEUE_H
-+#define __KDBUS_QUEUE_H
-+
-+#include <linux/list.h>
-+#include <linux/rbtree.h>
-+
-+struct kdbus_conn;
-+struct kdbus_pool_slice;
-+struct kdbus_reply;
-+struct kdbus_staging;
-+struct kdbus_user;
-+
-+/**
-+ * struct kdbus_queue - a connection's message queue
-+ * @msg_list:		List head for kdbus_queue_entry objects
-+ * @msg_prio_queue:	RB tree root for messages, sorted by priority
-+ * @msg_prio_highest:	Link to the RB node referencing the message with the
-+ *			highest priority in the tree.
-+ */
-+struct kdbus_queue {
-+	struct list_head msg_list;
-+	struct rb_root msg_prio_queue;
-+	struct rb_node *msg_prio_highest;
-+};
-+
-+/**
-+ * struct kdbus_queue_entry - messages waiting to be read
-+ * @entry:		Entry in the connection's list
-+ * @prio_node:		Entry in the priority queue tree
-+ * @prio_entry:		Queue tree node entry in the list of one priority
-+ * @priority:		Message priority
-+ * @dst_name_id:	The sequence number of the name this message is
-+ *			addressed to, 0 for messages sent to an ID
-+ * @conn:		Connection this entry is queued on
-+ * @gaps:		Gaps object to fill message gaps at RECV time
-+ * @user:		User used for accounting
-+ * @slice:		Slice in the receiver's pool for the message
-+ * @reply:		The reply block if a reply to this message is expected
-+ */
-+struct kdbus_queue_entry {
-+	struct list_head entry;
-+	struct rb_node prio_node;
-+	struct list_head prio_entry;
-+
-+	s64 priority;
-+	u64 dst_name_id;
-+
-+	struct kdbus_conn *conn;
-+	struct kdbus_gaps *gaps;
-+	struct kdbus_user *user;
-+	struct kdbus_pool_slice *slice;
-+	struct kdbus_reply *reply;
-+};
-+
-+void kdbus_queue_init(struct kdbus_queue *queue);
-+struct kdbus_queue_entry *kdbus_queue_peek(struct kdbus_queue *queue,
-+					   s64 priority, bool use_priority);
-+
-+struct kdbus_queue_entry *kdbus_queue_entry_new(struct kdbus_conn *src,
-+						struct kdbus_conn *dst,
-+						struct kdbus_staging *s);
-+void kdbus_queue_entry_free(struct kdbus_queue_entry *entry);
-+int kdbus_queue_entry_install(struct kdbus_queue_entry *entry,
-+			      u64 *return_flags, bool install_fds);
-+void kdbus_queue_entry_enqueue(struct kdbus_queue_entry *entry,
-+			       struct kdbus_reply *reply);
-+int kdbus_queue_entry_move(struct kdbus_queue_entry *entry,
-+			   struct kdbus_conn *dst);
-+
-+#endif /* __KDBUS_QUEUE_H */
-diff --git a/ipc/kdbus/reply.c b/ipc/kdbus/reply.c
-new file mode 100644
-index 0000000..e6791d8
---- /dev/null
-+++ b/ipc/kdbus/reply.c
-@@ -0,0 +1,252 @@
-+#include <linux/init.h>
-+#include <linux/mm.h>
-+#include <linux/module.h>
-+#include <linux/mutex.h>
-+#include <linux/slab.h>
-+#include <linux/uio.h>
-+
-+#include "bus.h"
-+#include "connection.h"
-+#include "endpoint.h"
-+#include "message.h"
-+#include "metadata.h"
-+#include "names.h"
-+#include "domain.h"
-+#include "item.h"
-+#include "notify.h"
-+#include "policy.h"
-+#include "reply.h"
-+#include "util.h"
-+
-+/**
-+ * kdbus_reply_new() - Allocate and set up a new kdbus_reply object
-+ * @reply_src:		The connection a reply is expected from
-+ * @reply_dst:		The connection this reply object belongs to
-+ * @msg:		Message associated with the reply
-+ * @name_entry:		Name entry used to send the message
-+ * @sync:		Whether or not to make this reply synchronous
-+ *
-+ * Allocate and fill a new kdbus_reply object.
-+ *
-+ * Return: New kdbus_conn object on success, ERR_PTR on error.
-+ */
-+struct kdbus_reply *kdbus_reply_new(struct kdbus_conn *reply_src,
-+				    struct kdbus_conn *reply_dst,
-+				    const struct kdbus_msg *msg,
-+				    struct kdbus_name_entry *name_entry,
-+				    bool sync)
-+{
-+	struct kdbus_reply *r;
-+	int ret;
-+
-+	if (atomic_inc_return(&reply_dst->request_count) >
-+	    KDBUS_CONN_MAX_REQUESTS_PENDING) {
-+		ret = -EMLINK;
-+		goto exit_dec_request_count;
-+	}
-+
-+	r = kzalloc(sizeof(*r), GFP_KERNEL);
-+	if (!r) {
-+		ret = -ENOMEM;
-+		goto exit_dec_request_count;
-+	}
-+
-+	kref_init(&r->kref);
-+	INIT_LIST_HEAD(&r->entry);
-+	r->reply_src = kdbus_conn_ref(reply_src);
-+	r->reply_dst = kdbus_conn_ref(reply_dst);
-+	r->cookie = msg->cookie;
-+	r->name_id = name_entry ? name_entry->name_id : 0;
-+	r->deadline_ns = msg->timeout_ns;
-+
-+	if (sync) {
-+		r->sync = true;
-+		r->waiting = true;
-+	}
-+
-+	return r;
-+
-+exit_dec_request_count:
-+	atomic_dec(&reply_dst->request_count);
-+	return ERR_PTR(ret);
-+}
-+
-+static void __kdbus_reply_free(struct kref *kref)
-+{
-+	struct kdbus_reply *reply =
-+		container_of(kref, struct kdbus_reply, kref);
-+
-+	atomic_dec(&reply->reply_dst->request_count);
-+	kdbus_conn_unref(reply->reply_src);
-+	kdbus_conn_unref(reply->reply_dst);
-+	kfree(reply);
-+}
-+
-+/**
-+ * kdbus_reply_ref() - Increase reference on kdbus_reply
-+ * @r:		The reply, may be %NULL
-+ *
-+ * Return: The reply object with an extra reference
-+ */
-+struct kdbus_reply *kdbus_reply_ref(struct kdbus_reply *r)
-+{
-+	if (r)
-+		kref_get(&r->kref);
-+	return r;
-+}
-+
-+/**
-+ * kdbus_reply_unref() - Decrease reference on kdbus_reply
-+ * @r:		The reply, may be %NULL
-+ *
-+ * Return: NULL
-+ */
-+struct kdbus_reply *kdbus_reply_unref(struct kdbus_reply *r)
-+{
-+	if (r)
-+		kref_put(&r->kref, __kdbus_reply_free);
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_reply_link() - Link reply object into target connection
-+ * @r:		Reply to link
-+ */
-+void kdbus_reply_link(struct kdbus_reply *r)
-+{
-+	if (WARN_ON(!list_empty(&r->entry)))
-+		return;
-+
-+	list_add(&r->entry, &r->reply_dst->reply_list);
-+	kdbus_reply_ref(r);
-+}
-+
-+/**
-+ * kdbus_reply_unlink() - Unlink reply object from target connection
-+ * @r:		Reply to unlink
-+ */
-+void kdbus_reply_unlink(struct kdbus_reply *r)
-+{
-+	if (!list_empty(&r->entry)) {
-+		list_del_init(&r->entry);
-+		kdbus_reply_unref(r);
-+	}
-+}
-+
-+/**
-+ * kdbus_sync_reply_wakeup() - Wake a synchronously blocking reply
-+ * @reply:	The reply object
-+ * @err:	Error code to set on the remote side
-+ *
-+ * Wake up remote peer (method origin) with the appropriate synchronous reply
-+ * code.
-+ */
-+void kdbus_sync_reply_wakeup(struct kdbus_reply *reply, int err)
-+{
-+	if (WARN_ON(!reply->sync))
-+		return;
-+
-+	reply->waiting = false;
-+	reply->err = err;
-+	wake_up_interruptible(&reply->reply_dst->wait);
-+}
-+
-+/**
-+ * kdbus_reply_find() - Find the corresponding reply object
-+ * @replying:	The replying connection or NULL
-+ * @reply_dst:	The connection the reply will be sent to
-+ *		(method origin)
-+ * @cookie:	The cookie of the requesting message
-+ *
-+ * Lookup a reply object that should be sent as a reply by
-+ * @replying to @reply_dst with the given cookie.
-+ *
-+ * Callers must take the @reply_dst lock.
-+ *
-+ * Return: the corresponding reply object or NULL if not found
-+ */
-+struct kdbus_reply *kdbus_reply_find(struct kdbus_conn *replying,
-+				     struct kdbus_conn *reply_dst,
-+				     u64 cookie)
-+{
-+	struct kdbus_reply *r;
-+
-+	list_for_each_entry(r, &reply_dst->reply_list, entry) {
-+		if (r->cookie == cookie &&
-+		    (!replying || r->reply_src == replying))
-+			return r;
-+	}
-+
-+	return NULL;
-+}
-+
-+/**
-+ * kdbus_reply_list_scan_work() - Worker callback to scan the replies of a
-+ *				  connection for exceeded timeouts
-+ * @work:		Work struct of the connection to scan
-+ *
-+ * Walk the list of replies stored with a connection and look for entries
-+ * that have exceeded their timeout. If such an entry is found, a timeout
-+ * notification is sent to the waiting peer, and the reply is removed from
-+ * the list.
-+ *
-+ * The work is rescheduled to the nearest timeout found during the list
-+ * iteration.
-+ */
-+void kdbus_reply_list_scan_work(struct work_struct *work)
-+{
-+	struct kdbus_conn *conn =
-+		container_of(work, struct kdbus_conn, work.work);
-+	struct kdbus_reply *reply, *reply_tmp;
-+	u64 deadline = ~0ULL;
-+	u64 now;
-+
-+	now = ktime_get_ns();
-+
-+	mutex_lock(&conn->lock);
-+	if (!kdbus_conn_active(conn)) {
-+		mutex_unlock(&conn->lock);
-+		return;
-+	}
-+
-+	list_for_each_entry_safe(reply, reply_tmp, &conn->reply_list, entry) {
-+		/*
-+		 * If the reply block is waiting for synchronous I/O,
-+		 * the timeout is handled by wait_event_*_timeout(),
-+		 * so we don't have to care for it here.
-+		 */
-+		if (reply->sync && !reply->interrupted)
-+			continue;
-+
-+		WARN_ON(reply->reply_dst != conn);
-+
-+		if (reply->deadline_ns > now) {
-+			/* remember next timeout */
-+			if (deadline > reply->deadline_ns)
-+				deadline = reply->deadline_ns;
-+
-+			continue;
-+		}
-+
-+		/*
-+		 * A zero deadline means the connection died, was
-+		 * cleaned up already and the notification was sent.
-+		 * Don't send notifications for reply trackers that were
-+		 * left in an interrupted syscall state.
-+		 */
-+		if (reply->deadline_ns != 0 && !reply->interrupted)
-+			kdbus_notify_reply_timeout(conn->ep->bus, conn->id,
-+						   reply->cookie);
-+
-+		kdbus_reply_unlink(reply);
-+	}
-+
-+	/* rearm delayed work with next timeout */
-+	if (deadline != ~0ULL)
-+		schedule_delayed_work(&conn->work,
-+				      nsecs_to_jiffies(deadline - now));
-+
-+	mutex_unlock(&conn->lock);
-+
-+	kdbus_notify_flush(conn->ep->bus);
-+}
-diff --git a/ipc/kdbus/reply.h b/ipc/kdbus/reply.h
-new file mode 100644
-index 0000000..68d5232
---- /dev/null
-+++ b/ipc/kdbus/reply.h
-@@ -0,0 +1,68 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_REPLY_H
-+#define __KDBUS_REPLY_H
-+
-+/**
-+ * struct kdbus_reply - an entry of kdbus_conn's list of replies
-+ * @kref:		Ref-count of this object
-+ * @entry:		The entry of the connection's reply_list
-+ * @reply_src:		The connection the reply will be sent from
-+ * @reply_dst:		The connection the reply will be sent to
-+ * @queue_entry:	The queue entry item that is prepared by the replying
-+ *			connection
-+ * @deadline_ns:	The deadline of the reply, in nanoseconds
-+ * @cookie:		The cookie of the requesting message
-+ * @name_id:		ID of the well-known name the original msg was sent to
-+ * @sync:		The reply block is waiting for synchronous I/O
-+ * @waiting:		The condition to synchronously wait for
-+ * @interrupted:	The sync reply was left in an interrupted state
-+ * @err:		The error code for the synchronous reply
-+ */
-+struct kdbus_reply {
-+	struct kref kref;
-+	struct list_head entry;
-+	struct kdbus_conn *reply_src;
-+	struct kdbus_conn *reply_dst;
-+	struct kdbus_queue_entry *queue_entry;
-+	u64 deadline_ns;
-+	u64 cookie;
-+	u64 name_id;
-+	bool sync:1;
-+	bool waiting:1;
-+	bool interrupted:1;
-+	int err;
-+};
-+
-+struct kdbus_reply *kdbus_reply_new(struct kdbus_conn *reply_src,
-+				    struct kdbus_conn *reply_dst,
-+				    const struct kdbus_msg *msg,
-+				    struct kdbus_name_entry *name_entry,
-+				    bool sync);
-+
-+struct kdbus_reply *kdbus_reply_ref(struct kdbus_reply *r);
-+struct kdbus_reply *kdbus_reply_unref(struct kdbus_reply *r);
-+
-+void kdbus_reply_link(struct kdbus_reply *r);
-+void kdbus_reply_unlink(struct kdbus_reply *r);
-+
-+struct kdbus_reply *kdbus_reply_find(struct kdbus_conn *replying,
-+				     struct kdbus_conn *reply_dst,
-+				     u64 cookie);
-+
-+void kdbus_sync_reply_wakeup(struct kdbus_reply *reply, int err);
-+void kdbus_reply_list_scan_work(struct work_struct *work);
-+
-+#endif /* __KDBUS_REPLY_H */
-diff --git a/ipc/kdbus/util.c b/ipc/kdbus/util.c
-new file mode 100644
-index 0000000..72b1883
---- /dev/null
-+++ b/ipc/kdbus/util.c
-@@ -0,0 +1,156 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <linux/capability.h>
-+#include <linux/cred.h>
-+#include <linux/ctype.h>
-+#include <linux/err.h>
-+#include <linux/file.h>
-+#include <linux/slab.h>
-+#include <linux/string.h>
-+#include <linux/uaccess.h>
-+#include <linux/uio.h>
-+#include <linux/user_namespace.h>
-+
-+#include "limits.h"
-+#include "util.h"
-+
-+/**
-+ * kdbus_copy_from_user() - copy aligned data from user-space
-+ * @dest:	target buffer in kernel memory
-+ * @user_ptr:	user-provided source buffer
-+ * @size:	memory size to copy from user
-+ *
-+ * This copies @size bytes from @user_ptr into the kernel, just like
-+ * copy_from_user() does. But we enforce an 8-byte alignment and reject any
-+ * unaligned user-space pointers.
-+ *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_copy_from_user(void *dest, void __user *user_ptr, size_t size)
-+{
-+	if (!KDBUS_IS_ALIGNED8((uintptr_t)user_ptr))
-+		return -EFAULT;
-+
-+	if (copy_from_user(dest, user_ptr, size))
-+		return -EFAULT;
-+
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_verify_uid_prefix() - verify UID prefix of a user-supplied name
-+ * @name:	user-supplied name to verify
-+ * @user_ns:	user-namespace to act in
-+ * @kuid:	Kernel internal uid of user
-+ *
-+ * This verifies that the user-supplied name @name has their UID as prefix. This
-+ * is the default name-spacing policy we enforce on user-supplied names for
-+ * public kdbus entities like buses and endpoints.
-+ *
-+ * The user must supply names prefixed with "<UID>-", whereas the UID is
-+ * interpreted in the user-namespace of the domain. If the user fails to supply
-+ * such a prefixed name, we reject it.
-+ *
-+ * Return: 0 on success, negative error code on failure
-+ */
-+int kdbus_verify_uid_prefix(const char *name, struct user_namespace *user_ns,
-+			    kuid_t kuid)
-+{
-+	uid_t uid;
-+	char prefix[16];
-+
-+	/*
-+	 * The kuid must have a mapping into the userns of the domain
-+	 * otherwise do not allow creation of buses nor endpoints.
-+	 */
-+	uid = from_kuid(user_ns, kuid);
-+	if (uid == (uid_t) -1)
-+		return -EINVAL;
-+
-+	snprintf(prefix, sizeof(prefix), "%u-", uid);
-+	if (strncmp(name, prefix, strlen(prefix)) != 0)
-+		return -EINVAL;
-+
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_sanitize_attach_flags() - Sanitize attach flags from user-space
-+ * @flags:		Attach flags provided by userspace
-+ * @attach_flags:	A pointer where to store the valid attach flags
-+ *
-+ * Convert attach-flags provided by user-space into a valid mask. If the mask
-+ * is invalid, an error is returned. The sanitized attach flags are stored in
-+ * the output parameter.
-+ *
-+ * Return: 0 on success, negative error on failure.
-+ */
-+int kdbus_sanitize_attach_flags(u64 flags, u64 *attach_flags)
-+{
-+	/* 'any' degrades to 'all' for compatibility */
-+	if (flags == _KDBUS_ATTACH_ANY)
-+		flags = _KDBUS_ATTACH_ALL;
-+
-+	/* reject unknown attach flags */
-+	if (flags & ~_KDBUS_ATTACH_ALL)
-+		return -EINVAL;
-+
-+	*attach_flags = flags;
-+	return 0;
-+}
-+
-+/**
-+ * kdbus_kvec_set - helper utility to assemble kvec arrays
-+ * @kvec:	kvec entry to use
-+ * @src:	Source address to set in @kvec
-+ * @len:	Number of bytes in @src
-+ * @total_len:	Pointer to total length variable
-+ *
-+ * Set @src and @len in @kvec, and increase @total_len by @len.
-+ */
-+void kdbus_kvec_set(struct kvec *kvec, void *src, size_t len, u64 *total_len)
-+{
-+	kvec->iov_base = src;
-+	kvec->iov_len = len;
-+	*total_len += len;
-+}
-+
-+static const char * const zeros = "\0\0\0\0\0\0\0";
-+
-+/**
-+ * kdbus_kvec_pad - conditionally write a padding kvec
-+ * @kvec:	kvec entry to use
-+ * @len:	Total length used for kvec array
-+ *
-+ * Check if the current total byte length of the array in @len is aligned to
-+ * 8 bytes. If it isn't, fill @kvec with padding information and increase @len
-+ * by the number of bytes stored in @kvec.
-+ *
-+ * Return: the number of added padding bytes.
-+ */
-+size_t kdbus_kvec_pad(struct kvec *kvec, u64 *len)
-+{
-+	size_t pad = KDBUS_ALIGN8(*len) - *len;
-+
-+	if (!pad)
-+		return 0;
-+
-+	kvec->iov_base = (void *)zeros;
-+	kvec->iov_len = pad;
-+
-+	*len += pad;
-+
-+	return pad;
-+}
-diff --git a/ipc/kdbus/util.h b/ipc/kdbus/util.h
-new file mode 100644
-index 0000000..5297166
---- /dev/null
-+++ b/ipc/kdbus/util.h
-@@ -0,0 +1,73 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-+ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ * Copyright (C) 2013-2015 Linux Foundation
-+ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#ifndef __KDBUS_UTIL_H
-+#define __KDBUS_UTIL_H
-+
-+#include <linux/dcache.h>
-+#include <linux/ioctl.h>
-+
-+#include <uapi/linux/kdbus.h>
-+
-+/* all exported addresses are 64 bit */
-+#define KDBUS_PTR(addr) ((void __user *)(uintptr_t)(addr))
-+
-+/* all exported sizes are 64 bit and data aligned to 64 bit */
-+#define KDBUS_ALIGN8(s) ALIGN((s), 8)
-+#define KDBUS_IS_ALIGNED8(s) (IS_ALIGNED(s, 8))
-+
-+/**
-+ * kdbus_member_set_user - write a structure member to user memory
-+ * @_s:		Variable to copy from
-+ * @_b:		Buffer to write to
-+ * @_t:		Structure type
-+ * @_m:		Member name in the passed structure
-+ *
-+ * Return: the result of copy_to_user()
-+ */
-+#define kdbus_member_set_user(_s, _b, _t, _m)				\
-+({									\
-+	u64 __user *_sz =						\
-+		(void __user *)((u8 __user *)(_b) + offsetof(_t, _m));	\
-+	copy_to_user(_sz, _s, FIELD_SIZEOF(_t, _m));			\
-+})
-+
-+/**
-+ * kdbus_strhash - calculate a hash
-+ * @str:	String
-+ *
-+ * Return: hash value
-+ */
-+static inline unsigned int kdbus_strhash(const char *str)
-+{
-+	unsigned long hash = init_name_hash();
-+
-+	while (*str)
-+		hash = partial_name_hash(*str++, hash);
-+
-+	return end_name_hash(hash);
-+}
-+
-+int kdbus_verify_uid_prefix(const char *name, struct user_namespace *user_ns,
-+			    kuid_t kuid);
-+int kdbus_sanitize_attach_flags(u64 flags, u64 *attach_flags);
-+
-+int kdbus_copy_from_user(void *dest, void __user *user_ptr, size_t size);
-+
-+struct kvec;
-+
-+void kdbus_kvec_set(struct kvec *kvec, void *src, size_t len, u64 *total_len);
-+size_t kdbus_kvec_pad(struct kvec *kvec, u64 *len);
-+
-+#endif
-diff --git a/samples/Kconfig b/samples/Kconfig
-index 224ebb4..a4c6b2f 100644
---- a/samples/Kconfig
-+++ b/samples/Kconfig
-@@ -55,6 +55,13 @@ config SAMPLE_KDB
- 	  Build an example of how to dynamically add the hello
- 	  command to the kdb shell.
- 
-+config SAMPLE_KDBUS
-+	bool "Build kdbus API example"
-+	depends on KDBUS
-+	help
-+	  Build an example of how the kdbus API can be used from
-+	  userspace.
-+
- config SAMPLE_RPMSG_CLIENT
- 	tristate "Build rpmsg client sample -- loadable modules only"
- 	depends on RPMSG && m
-diff --git a/samples/Makefile b/samples/Makefile
-index f00257b..f0ad51e 100644
---- a/samples/Makefile
-+++ b/samples/Makefile
-@@ -1,4 +1,5 @@
- # Makefile for Linux samples code
- 
- obj-$(CONFIG_SAMPLES)	+= kobject/ kprobes/ trace_events/ livepatch/ \
--			   hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/
-+			   hw_breakpoint/ kfifo/ kdb/ kdbus/ hidraw/ rpmsg/ \
-+			   seccomp/
-diff --git a/samples/kdbus/.gitignore b/samples/kdbus/.gitignore
-new file mode 100644
-index 0000000..ee07d98
---- /dev/null
-+++ b/samples/kdbus/.gitignore
-@@ -0,0 +1 @@
-+kdbus-workers
-diff --git a/samples/kdbus/Makefile b/samples/kdbus/Makefile
-new file mode 100644
-index 0000000..137f842
---- /dev/null
-+++ b/samples/kdbus/Makefile
-@@ -0,0 +1,9 @@
-+# kbuild trick to avoid linker error. Can be omitted if a module is built.
-+obj- := dummy.o
-+
-+hostprogs-$(CONFIG_SAMPLE_KDBUS) += kdbus-workers
-+
-+always := $(hostprogs-y)
-+
-+HOSTCFLAGS_kdbus-workers.o += -I$(objtree)/usr/include
-+HOSTLOADLIBES_kdbus-workers := -lrt
-diff --git a/samples/kdbus/kdbus-api.h b/samples/kdbus/kdbus-api.h
-new file mode 100644
-index 0000000..7f3abae
---- /dev/null
-+++ b/samples/kdbus/kdbus-api.h
-@@ -0,0 +1,114 @@
-+#ifndef KDBUS_API_H
-+#define KDBUS_API_H
-+
-+#include <sys/ioctl.h>
-+#include <linux/kdbus.h>
-+
-+#define KDBUS_ALIGN8(l) (((l) + 7) & ~7)
-+#define KDBUS_ITEM_HEADER_SIZE offsetof(struct kdbus_item, data)
-+#define KDBUS_ITEM_SIZE(s) KDBUS_ALIGN8((s) + KDBUS_ITEM_HEADER_SIZE)
-+#define KDBUS_ITEM_NEXT(item) \
-+	(typeof(item))((uint8_t *)(item) + KDBUS_ALIGN8((item)->size))
-+#define KDBUS_FOREACH(iter, first, _size)				\
-+	for ((iter) = (first);						\
-+	     ((uint8_t *)(iter) < (uint8_t *)(first) + (_size)) &&	\
-+	       ((uint8_t *)(iter) >= (uint8_t *)(first));		\
-+	     (iter) = (void *)((uint8_t *)(iter) + KDBUS_ALIGN8((iter)->size)))
-+
-+static inline int kdbus_cmd_bus_make(int control_fd, struct kdbus_cmd *cmd)
-+{
-+	int ret = ioctl(control_fd, KDBUS_CMD_BUS_MAKE, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_endpoint_make(int bus_fd, struct kdbus_cmd *cmd)
-+{
-+	int ret = ioctl(bus_fd, KDBUS_CMD_ENDPOINT_MAKE, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_endpoint_update(int ep_fd, struct kdbus_cmd *cmd)
-+{
-+	int ret = ioctl(ep_fd, KDBUS_CMD_ENDPOINT_UPDATE, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_hello(int bus_fd, struct kdbus_cmd_hello *cmd)
-+{
-+	int ret = ioctl(bus_fd, KDBUS_CMD_HELLO, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_update(int fd, struct kdbus_cmd *cmd)
-+{
-+	int ret = ioctl(fd, KDBUS_CMD_UPDATE, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_byebye(int conn_fd, struct kdbus_cmd *cmd)
-+{
-+	int ret = ioctl(conn_fd, KDBUS_CMD_BYEBYE, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_free(int conn_fd, struct kdbus_cmd_free *cmd)
-+{
-+	int ret = ioctl(conn_fd, KDBUS_CMD_FREE, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_conn_info(int conn_fd, struct kdbus_cmd_info *cmd)
-+{
-+	int ret = ioctl(conn_fd, KDBUS_CMD_CONN_INFO, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_bus_creator_info(int conn_fd, struct kdbus_cmd_info *cmd)
-+{
-+	int ret = ioctl(conn_fd, KDBUS_CMD_BUS_CREATOR_INFO, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_list(int fd, struct kdbus_cmd_list *cmd)
-+{
-+	int ret = ioctl(fd, KDBUS_CMD_LIST, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_send(int conn_fd, struct kdbus_cmd_send *cmd)
-+{
-+	int ret = ioctl(conn_fd, KDBUS_CMD_SEND, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_recv(int conn_fd, struct kdbus_cmd_recv *cmd)
-+{
-+	int ret = ioctl(conn_fd, KDBUS_CMD_RECV, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_name_acquire(int conn_fd, struct kdbus_cmd *cmd)
-+{
-+	int ret = ioctl(conn_fd, KDBUS_CMD_NAME_ACQUIRE, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_name_release(int conn_fd, struct kdbus_cmd *cmd)
-+{
-+	int ret = ioctl(conn_fd, KDBUS_CMD_NAME_RELEASE, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_match_add(int conn_fd, struct kdbus_cmd_match *cmd)
-+{
-+	int ret = ioctl(conn_fd, KDBUS_CMD_MATCH_ADD, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+static inline int kdbus_cmd_match_remove(int conn_fd, struct kdbus_cmd_match *cmd)
-+{
-+	int ret = ioctl(conn_fd, KDBUS_CMD_MATCH_REMOVE, cmd);
-+	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
-+}
-+
-+#endif /* KDBUS_API_H */
-diff --git a/samples/kdbus/kdbus-workers.c b/samples/kdbus/kdbus-workers.c
-new file mode 100644
-index 0000000..5a6dfdc
---- /dev/null
-+++ b/samples/kdbus/kdbus-workers.c
-@@ -0,0 +1,1346 @@
-+/*
-+ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+/*
-+ * Example: Workers
-+ * This program computes prime-numbers based on the sieve of Eratosthenes. The
-+ * master sets up a shared memory region and spawns workers which clear out the
-+ * non-primes. The master reacts to keyboard input and to client-requests to
-+ * control what each worker does. Note that this is in no way meant as efficient
-+ * way to compute primes. It should only serve as example how a master/worker
-+ * concept can be implemented with kdbus used as control messages.
-+ *
-+ * The main process is called the 'master'. It creates a new, private bus which
-+ * will be used between the master and its workers to communicate. The master
-+ * then spawns a fixed number of workers. Whenever a worker dies (detected via
-+ * SIGCHLD), the master spawns a new worker. When done, the master waits for all
-+ * workers to exit, prints a status report and exits itself.
-+ *
-+ * The master process does *not* keep track of its workers. Instead, this
-+ * example implements a PULL model. That is, the master acquires a well-known
-+ * name on the bus which each worker uses to request tasks from the master. If
-+ * there are no more tasks, the master will return an empty task-list, which
-+ * casues a worker to exit immediately.
-+ *
-+ * As tasks can be computationally expensive, we support cancellation. Whenever
-+ * the master process is interrupted, it will drop its well-known name on the
-+ * bus. This causes kdbus to broadcast a name-change notification. The workers
-+ * check for broadcast messages regularly and will exit if they receive one.
-+ *
-+ * This example exists of 4 objects:
-+ *  * master: The master object contains the context of the master process. This
-+ *            process manages the prime-context, spawns workers and assigns
-+ *            prime-ranges to each worker to compute.
-+ *            The master itself does not do any prime-computations itself.
-+ *  * child:  The child object contains the context of a worker. It inherits the
-+ *            prime context from its parent (the master) and then creates a new
-+ *            bus context to request prime-ranges to compute.
-+ *  * prime:  The "prime" object is used to abstract how we compute primes. When
-+ *            allocated, it prepares a memory region to hold 1 bit for each
-+ *            natural number up to a fixed maximum ('MAX_PRIMES').
-+ *            The memory region is backed by a memfd which we share between
-+ *            processes. Each worker now gets assigned a range of natural
-+ *            numbers which it clears multiples of off the memory region. The
-+ *            master process is responsible of distributing all natural numbers
-+ *            up to the fixed maximum to its workers.
-+ *  * bus:    The bus object is an abstraction of the kdbus API. It is pretty
-+ *            straightfoward and only manages the connection-fd plus the
-+ *            memory-mapped pool in a single object.
-+ *
-+ * This example is in reversed order, which should make it easier to read
-+ * top-down, but requires some forward-declarations. Just ignore those.
-+ */
-+
-+#include <stdio.h>
-+#include <stdlib.h>
-+#include <sys/syscall.h>
-+
-+/* glibc < 2.7 does not ship sys/signalfd.h */
-+/* we require kernels with __NR_memfd_create */
-+#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 7 && defined(__NR_memfd_create)
-+
-+#include <ctype.h>
-+#include <errno.h>
-+#include <fcntl.h>
-+#include <linux/memfd.h>
-+#include <signal.h>
-+#include <stdbool.h>
-+#include <stddef.h>
-+#include <stdint.h>
-+#include <string.h>
-+#include <sys/mman.h>
-+#include <sys/poll.h>
-+#include <sys/signalfd.h>
-+#include <sys/time.h>
-+#include <sys/wait.h>
-+#include <time.h>
-+#include <unistd.h>
-+#include "kdbus-api.h"
-+
-+/* FORWARD DECLARATIONS */
-+
-+#define POOL_SIZE (16 * 1024 * 1024)
-+#define MAX_PRIMES (2UL << 24)
-+#define WORKER_COUNT (16)
-+#define PRIME_STEPS (65536 * 4)
-+
-+static const char *arg_busname = "example-workers";
-+static const char *arg_modname = "kdbus";
-+static const char *arg_master = "org.freedesktop.master";
-+
-+static int err_assert(int r_errno, const char *msg, const char *func, int line,
-+		      const char *file)
-+{
-+	r_errno = (r_errno != 0) ? -abs(r_errno) : -EFAULT;
-+	if (r_errno < 0) {
-+		errno = -r_errno;
-+		fprintf(stderr, "ERR: %s: %m (%s:%d in %s)\n",
-+			msg, func, line, file);
-+	}
-+	return r_errno;
-+}
-+
-+#define err_r(_r, _msg) err_assert((_r), (_msg), __func__, __LINE__, __FILE__)
-+#define err(_msg) err_r(errno, (_msg))
-+
-+struct prime;
-+struct bus;
-+struct master;
-+struct child;
-+
-+struct prime {
-+	int fd;
-+	uint8_t *area;
-+	size_t max;
-+	size_t done;
-+	size_t status;
-+};
-+
-+static int prime_new(struct prime **out);
-+static void prime_free(struct prime *p);
-+static bool prime_done(struct prime *p);
-+static void prime_consume(struct prime *p, size_t amount);
-+static int prime_run(struct prime *p, struct bus *cancel, size_t number);
-+static void prime_print(struct prime *p);
-+
-+struct bus {
-+	int fd;
-+	uint8_t *pool;
-+};
-+
-+static int bus_open_connection(struct bus **out, uid_t uid, const char *name,
-+			       uint64_t recv_flags);
-+static void bus_close_connection(struct bus *b);
-+static void bus_poool_free_slice(struct bus *b, uint64_t offset);
-+static int bus_acquire_name(struct bus *b, const char *name);
-+static int bus_install_name_loss_match(struct bus *b, const char *name);
-+static int bus_poll(struct bus *b);
-+static int bus_make(uid_t uid, const char *name);
-+
-+struct master {
-+	size_t n_workers;
-+	size_t max_workers;
-+
-+	int signal_fd;
-+	int control_fd;
-+
-+	struct prime *prime;
-+	struct bus *bus;
-+};
-+
-+static int master_new(struct master **out);
-+static void master_free(struct master *m);
-+static int master_run(struct master *m);
-+static int master_poll(struct master *m);
-+static int master_handle_stdin(struct master *m);
-+static int master_handle_signal(struct master *m);
-+static int master_handle_bus(struct master *m);
-+static int master_reply(struct master *m, const struct kdbus_msg *msg);
-+static int master_waitpid(struct master *m);
-+static int master_spawn(struct master *m);
-+
-+struct child {
-+	struct bus *bus;
-+	struct prime *prime;
-+};
-+
-+static int child_new(struct child **out, struct prime *p);
-+static void child_free(struct child *c);
-+static int child_run(struct child *c);
-+
-+/* END OF FORWARD DECLARATIONS */
-+
-+/*
-+ * This is the main entrypoint of this example. It is pretty straightforward. We
-+ * create a master object, run the computation, print a status report and then
-+ * exit. Nothing particularly interesting here, so lets look into the master
-+ * object...
-+ */
-+int main(int argc, char **argv)
-+{
-+	struct master *m = NULL;
-+	int r;
-+
-+	r = master_new(&m);
-+	if (r < 0)
-+		goto out;
-+
-+	r = master_run(m);
-+	if (r < 0)
-+		goto out;
-+
-+	if (0)
-+		prime_print(m->prime);
-+
-+out:
-+	master_free(m);
-+	if (r < 0 && r != -EINTR)
-+		fprintf(stderr, "failed\n");
-+	else
-+		fprintf(stderr, "done\n");
-+	return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
-+}
-+
-+/*
-+ * ...this will allocate a new master context. It keeps track of the current
-+ * number of children/workers that are running, manages a signalfd to track
-+ * SIGCHLD, and creates a private kdbus bus. Afterwards, it opens its connection
-+ * to the bus and acquires a well known-name (arg_master).
-+ */
-+static int master_new(struct master **out)
-+{
-+	struct master *m;
-+	sigset_t smask;
-+	int r;
-+
-+	m = calloc(1, sizeof(*m));
-+	if (!m)
-+		return err("cannot allocate master");
-+
-+	m->max_workers = WORKER_COUNT;
-+	m->signal_fd = -1;
-+	m->control_fd = -1;
-+
-+	/* Block SIGINT and SIGCHLD signals */
-+	sigemptyset(&smask);
-+	sigaddset(&smask, SIGINT);
-+	sigaddset(&smask, SIGCHLD);
-+	sigprocmask(SIG_BLOCK, &smask, NULL);
-+
-+	m->signal_fd = signalfd(-1, &smask, SFD_CLOEXEC);
-+	if (m->signal_fd < 0) {
-+		r = err("cannot create signalfd");
-+		goto error;
-+	}
-+
-+	r = prime_new(&m->prime);
-+	if (r < 0)
-+		goto error;
-+
-+	m->control_fd = bus_make(getuid(), arg_busname);
-+	if (m->control_fd < 0) {
-+		r = m->control_fd;
-+		goto error;
-+	}
-+
-+	/*
-+	 * Open a bus connection for the master, and require each received
-+	 * message to have a metadata item of type KDBUS_ITEM_PIDS attached.
-+	 * The current UID is needed to compute the name of the bus node to
-+	 * connect to.
-+	 */
-+	r = bus_open_connection(&m->bus, getuid(),
-+				arg_busname, KDBUS_ATTACH_PIDS);
-+	if (r < 0)
-+		goto error;
-+
-+	/*
-+	 * Acquire a well-known name on the bus, so children can address
-+	 * messages to the master using KDBUS_DST_ID_NAME as destination-ID
-+	 * of messages.
-+	 */
-+	r = bus_acquire_name(m->bus, arg_master);
-+	if (r < 0)
-+		goto error;
-+
-+	*out = m;
-+	return 0;
-+
-+error:
-+	master_free(m);
-+	return r;
-+}
-+
-+/* pretty straightforward destructor of a master object */
-+static void master_free(struct master *m)
-+{
-+	if (!m)
-+		return;
-+
-+	bus_close_connection(m->bus);
-+	if (m->control_fd >= 0)
-+		close(m->control_fd);
-+	prime_free(m->prime);
-+	if (m->signal_fd >= 0)
-+		close(m->signal_fd);
-+	free(m);
-+}
-+
-+static int master_run(struct master *m)
-+{
-+	int res, r = 0;
-+
-+	while (!prime_done(m->prime)) {
-+		while (m->n_workers < m->max_workers) {
-+			r = master_spawn(m);
-+			if (r < 0)
-+				break;
-+		}
-+
-+		r = master_poll(m);
-+		if (r < 0)
-+			break;
-+	}
-+
-+	if (r < 0) {
-+		bus_close_connection(m->bus);
-+		m->bus = NULL;
-+	}
-+
-+	while (m->n_workers > 0) {
-+		res = master_poll(m);
-+		if (res < 0) {
-+			if (m->bus) {
-+				bus_close_connection(m->bus);
-+				m->bus = NULL;
-+			}
-+			r = res;
-+		}
-+	}
-+
-+	return r == -EINTR ? 0 : r;
-+}
-+
-+static int master_poll(struct master *m)
-+{
-+	struct pollfd fds[3] = {};
-+	int r = 0, n = 0;
-+
-+	/*
-+	 * Add stdin, the eventfd and the connection owner file descriptor to
-+	 * the pollfd table, and handle incoming traffic on the latter in
-+	 * master_handle_bus().
-+	 */
-+	fds[n].fd = STDIN_FILENO;
-+	fds[n++].events = POLLIN;
-+	fds[n].fd = m->signal_fd;
-+	fds[n++].events = POLLIN;
-+	if (m->bus) {
-+		fds[n].fd = m->bus->fd;
-+		fds[n++].events = POLLIN;
-+	}
-+
-+	r = poll(fds, n, -1);
-+	if (r < 0)
-+		return err("poll() failed");
-+
-+	if (fds[0].revents & POLLIN)
-+		r = master_handle_stdin(m);
-+	else if (fds[0].revents)
-+		r = err("ERR/HUP on stdin");
-+	if (r < 0)
-+		return r;
-+
-+	if (fds[1].revents & POLLIN)
-+		r = master_handle_signal(m);
-+	else if (fds[1].revents)
-+		r = err("ERR/HUP on signalfd");
-+	if (r < 0)
-+		return r;
-+
-+	if (fds[2].revents & POLLIN)
-+		r = master_handle_bus(m);
-+	else if (fds[2].revents)
-+		r = err("ERR/HUP on bus");
-+
-+	return r;
-+}
-+
-+static int master_handle_stdin(struct master *m)
-+{
-+	char buf[128];
-+	ssize_t l;
-+	int r = 0;
-+
-+	l = read(STDIN_FILENO, buf, sizeof(buf));
-+	if (l < 0)
-+		return err("cannot read stdin");
-+	if (l == 0)
-+		return err_r(-EINVAL, "EOF on stdin");
-+
-+	while (l-- > 0) {
-+		switch (buf[l]) {
-+		case 'q':
-+			/* quit */
-+			r = -EINTR;
-+			break;
-+		case '\n':
-+		case ' ':
-+			/* ignore */
-+			break;
-+		default:
-+			if (isgraph(buf[l]))
-+				fprintf(stderr, "invalid input '%c'\n", buf[l]);
-+			else
-+				fprintf(stderr, "invalid input 0x%x\n", buf[l]);
-+			break;
-+		}
-+	}
-+
-+	return r;
-+}
-+
-+static int master_handle_signal(struct master *m)
-+{
-+	struct signalfd_siginfo val;
-+	ssize_t l;
-+
-+	l = read(m->signal_fd, &val, sizeof(val));
-+	if (l < 0)
-+		return err("cannot read signalfd");
-+	if (l != sizeof(val))
-+		return err_r(-EINVAL, "invalid data from signalfd");
-+
-+	switch (val.ssi_signo) {
-+	case SIGCHLD:
-+		return master_waitpid(m);
-+	case SIGINT:
-+		return err_r(-EINTR, "interrupted");
-+	default:
-+		return err_r(-EINVAL, "caught invalid signal");
-+	}
-+}
-+
-+static int master_handle_bus(struct master *m)
-+{
-+	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
-+	const struct kdbus_msg *msg = NULL;
-+	const struct kdbus_item *item;
-+	const struct kdbus_vec *vec = NULL;
-+	int r = 0;
-+
-+	/*
-+	 * To receive a message, the KDBUS_CMD_RECV ioctl is used.
-+	 * It takes an argument of type 'struct kdbus_cmd_recv', which
-+	 * will contain information on the received message when the call
-+	 * returns. See kdbus.message(7).
-+	 */
-+	r = kdbus_cmd_recv(m->bus->fd, &recv);
-+	/*
-+	 * EAGAIN is returned when there is no message waiting on this
-+	 * connection. This is not an error - simply bail out.
-+	 */
-+	if (r == -EAGAIN)
-+		return 0;
-+	if (r < 0)
-+		return err_r(r, "cannot receive message");
-+
-+	/*
-+	 * Messages received by a connection are stored inside the connection's
-+	 * pool, at an offset that has been returned in the 'recv' command
-+	 * struct above. The value describes the relative offset from the
-+	 * start address of the pool. A message is described with
-+	 * 'struct kdbus_msg'. See kdbus.message(7).
-+	 */
-+	msg = (void *)(m->bus->pool + recv.msg.offset);
-+
-+	/*
-+	 * A messages describes its actual payload in an array of items.
-+	 * KDBUS_FOREACH() is a simple iterator that walks such an array.
-+	 * struct kdbus_msg has a field to denote its total size, which is
-+	 * needed to determine the number of items in the array.
-+	 */
-+	KDBUS_FOREACH(item, msg->items,
-+		      msg->size - offsetof(struct kdbus_msg, items)) {
-+		/*
-+		 * An item of type PAYLOAD_OFF describes in-line memory
-+		 * stored in the pool at a described offset. That offset is
-+		 * relative to the start address of the message header.
-+		 * This example program only expects one single item of that
-+		 * type, remembers the struct kdbus_vec member of the item
-+		 * when it sees it, and bails out if there is more than one
-+		 * of them.
-+		 */
-+		if (item->type == KDBUS_ITEM_PAYLOAD_OFF) {
-+			if (vec) {
-+				r = err_r(-EEXIST,
-+					  "message with multiple vecs");
-+				break;
-+			}
-+			vec = &item->vec;
-+			if (vec->size != 1) {
-+				r = err_r(-EINVAL, "invalid message size");
-+				break;
-+			}
-+
-+		/*
-+		 * MEMFDs are transported as items of type PAYLOAD_MEMFD.
-+		 * If such an item is attached, a new file descriptor was
-+		 * installed into the task when KDBUS_CMD_RECV was called, and
-+		 * its number is stored in item->memfd.fd.
-+		 * Implementers *must* handle this item type and close the
-+		 * file descriptor when no longer needed in order to prevent
-+		 * file descriptor exhaustion. This example program just bails
-+		 * out with an error in this case, as memfds are not expected
-+		 * in this context.
-+		 */
-+		} else if (item->type == KDBUS_ITEM_PAYLOAD_MEMFD) {
-+			r = err_r(-EINVAL, "message with memfd");
-+			break;
-+		}
-+	}
-+	if (r < 0)
-+		goto exit;
-+	if (!vec) {
-+		r = err_r(-EINVAL, "empty message");
-+		goto exit;
-+	}
-+
-+	switch (*((const uint8_t *)msg + vec->offset)) {
-+	case 'r': {
-+		r = master_reply(m, msg);
-+		break;
-+	}
-+	default:
-+		r = err_r(-EINVAL, "invalid message type");
-+		break;
-+	}
-+
-+exit:
-+	/*
-+	 * We are done with the memory slice that was given to us through
-+	 * recv.msg.offset. Tell the kernel it can use it for other content
-+	 * in the future. See kdbus.pool(7).
-+	 */
-+	bus_poool_free_slice(m->bus, recv.msg.offset);
-+	return r;
-+}
-+
-+static int master_reply(struct master *m, const struct kdbus_msg *msg)
-+{
-+	struct kdbus_cmd_send cmd;
-+	struct kdbus_item *item;
-+	struct kdbus_msg *reply;
-+	size_t size, status, p[2];
-+	int r;
-+
-+	/*
-+	 * This functions sends a message over kdbus. To do this, it uses the
-+	 * KDBUS_CMD_SEND ioctl, which takes a command struct argument of type
-+	 * 'struct kdbus_cmd_send'. This struct stores a pointer to the actual
-+	 * message to send. See kdbus.message(7).
-+	 */
-+	p[0] = m->prime->done;
-+	p[1] = prime_done(m->prime) ? 0 : PRIME_STEPS;
-+
-+	size = sizeof(*reply);
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+
-+	/* Prepare the message to send */
-+	reply = alloca(size);
-+	memset(reply, 0, size);
-+	reply->size = size;
-+
-+	/* Each message has a cookie that can be used to send replies */
-+	reply->cookie = 1;
-+
-+	/* The payload_type is arbitrary, but it must be non-zero */
-+	reply->payload_type = 0xdeadbeef;
-+
-+	/*
-+	 * We are sending a reply. Let the kernel know the cookie of the
-+	 * message we are replying to.
-+	 */
-+	reply->cookie_reply = msg->cookie;
-+
-+	/*
-+	 * Messages can either be directed to a well-known name (stored as
-+	 * string) or to a unique name (stored as number). This example does
-+	 * the latter. If the message would be directed to a well-known name
-+	 * instead, the message's dst_id field would be set to
-+	 * KDBUS_DST_ID_NAME, and the name would be attaches in an item of type
-+	 * KDBUS_ITEM_DST_NAME. See below for an example, and also refer to
-+	 * kdbus.message(7).
-+	 */
-+	reply->dst_id = msg->src_id;
-+
-+	/* Our message has exactly one item to store its payload */
-+	item = reply->items;
-+	item->type = KDBUS_ITEM_PAYLOAD_VEC;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
-+	item->vec.address = (uintptr_t)p;
-+	item->vec.size = sizeof(p);
-+
-+	/*
-+	 * Now prepare the command struct, and reference the message we want
-+	 * to send.
-+	 */
-+	memset(&cmd, 0, sizeof(cmd));
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)reply;
-+
-+	/*
-+	 * Finally, employ the command on the connection owner
-+	 * file descriptor.
-+	 */
-+	r = kdbus_cmd_send(m->bus->fd, &cmd);
-+	if (r < 0)
-+		return err_r(r, "cannot send reply");
-+
-+	if (p[1]) {
-+		prime_consume(m->prime, p[1]);
-+		status = m->prime->done * 10000 / m->prime->max;
-+		if (status != m->prime->status) {
-+			m->prime->status = status;
-+			fprintf(stderr, "status: %7.3lf%%\n",
-+				(double)status / 100);
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+static int master_waitpid(struct master *m)
-+{
-+	pid_t pid;
-+	int r;
-+
-+	while ((pid = waitpid(-1, &r, WNOHANG)) > 0) {
-+		if (m->n_workers > 0)
-+			--m->n_workers;
-+		if (!WIFEXITED(r))
-+			r = err_r(-EINVAL, "child died unexpectedly");
-+		else if (WEXITSTATUS(r) != 0)
-+			r = err_r(-WEXITSTATUS(r), "child failed");
-+	}
-+
-+	return r;
-+}
-+
-+static int master_spawn(struct master *m)
-+{
-+	struct child *c = NULL;
-+	struct prime *p = NULL;
-+	pid_t pid;
-+	int r;
-+
-+	/* Spawn off one child and call child_run() inside it */
-+
-+	pid = fork();
-+	if (pid < 0)
-+		return err("cannot fork");
-+	if (pid > 0) {
-+		/* parent */
-+		++m->n_workers;
-+		return 0;
-+	}
-+
-+	/* child */
-+
-+	p = m->prime;
-+	m->prime = NULL;
-+	master_free(m);
-+
-+	r = child_new(&c, p);
-+	if (r < 0)
-+		goto exit;
-+
-+	r = child_run(c);
-+
-+exit:
-+	child_free(c);
-+	exit(abs(r));
-+}
-+
-+static int child_new(struct child **out, struct prime *p)
-+{
-+	struct child *c;
-+	int r;
-+
-+	c = calloc(1, sizeof(*c));
-+	if (!c)
-+		return err("cannot allocate child");
-+
-+	c->prime = p;
-+
-+	/*
-+	 * Open a connection to the bus and require each received message to
-+	 * carry a list of the well-known names the sendind connection currently
-+	 * owns. The current UID is needed in order to determine the name of the
-+	 * bus node to connect to.
-+	 */
-+	r = bus_open_connection(&c->bus, getuid(),
-+				arg_busname, KDBUS_ATTACH_NAMES);
-+	if (r < 0)
-+		goto error;
-+
-+	/*
-+	 * Install a kdbus match so the child's connection gets notified when
-+	 * the master loses its well-known name.
-+	 */
-+	r = bus_install_name_loss_match(c->bus, arg_master);
-+	if (r < 0)
-+		goto error;
-+
-+	*out = c;
-+	return 0;
-+
-+error:
-+	child_free(c);
-+	return r;
-+}
-+
-+static void child_free(struct child *c)
-+{
-+	if (!c)
-+		return;
-+
-+	bus_close_connection(c->bus);
-+	prime_free(c->prime);
-+	free(c);
-+}
-+
-+static int child_run(struct child *c)
-+{
-+	struct kdbus_cmd_send cmd;
-+	struct kdbus_item *item;
-+	struct kdbus_vec *vec = NULL;
-+	struct kdbus_msg *msg;
-+	struct timespec spec;
-+	size_t n, steps, size;
-+	int r = 0;
-+
-+	/*
-+	 * Let's send a message to the master and ask for work. To do this,
-+	 * we use the KDBUS_CMD_SEND ioctl, which takes an argument of type
-+	 * 'struct kdbus_cmd_send'. This struct stores a pointer to the actual
-+	 * message to send. See kdbus.message(7).
-+	 */
-+	size = sizeof(*msg);
-+	size += KDBUS_ITEM_SIZE(strlen(arg_master) + 1);
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+
-+	msg = alloca(size);
-+	memset(msg, 0, size);
-+	msg->size = size;
-+
-+	/*
-+	 * Tell the kernel that we expect a reply to this message. This means
-+	 * that
-+	 *
-+	 * a) The remote peer will gain temporary permission to talk to us
-+	 *    even if it would not be allowed to normally.
-+	 *
-+	 * b) A timeout value is required.
-+	 *
-+	 *    For asynchronous send commands, if no reply is received, we will
-+	 *    get a kernel notification with an item of type
-+	 *    KDBUS_ITEM_REPLY_TIMEOUT attached.
-+	 *
-+	 *    For synchronous send commands (which this example does), the
-+	 *    ioctl will block until a reply is received or the timeout is
-+	 *    exceeded.
-+	 */
-+	msg->flags = KDBUS_MSG_EXPECT_REPLY;
-+
-+	/* Set our cookie. Replies must use this cookie to send their reply. */
-+	msg->cookie = 1;
-+
-+	/* The payload_type is arbitrary, but it must be non-zero */
-+	msg->payload_type = 0xdeadbeef;
-+
-+	/*
-+	 * We are sending our message to the current owner of a well-known
-+	 * name. This makes an item of type KDBUS_ITEM_DST_NAME mandatory.
-+	 */
-+	msg->dst_id = KDBUS_DST_ID_NAME;
-+
-+	/*
-+	 * Set the reply timeout to 5 seconds. Timeouts are always set in
-+	 * absolute timestamps, based con CLOCK_MONOTONIC. See kdbus.message(7).
-+	 */
-+	clock_gettime(CLOCK_MONOTONIC_COARSE, &spec);
-+	msg->timeout_ns += (5 + spec.tv_sec) * 1000ULL * 1000ULL * 1000ULL;
-+	msg->timeout_ns += spec.tv_nsec;
-+
-+	/*
-+	 * Fill the appended items. First, set the well-known name of the
-+	 * destination we want to talk to.
-+	 */
-+	item = msg->items;
-+	item->type = KDBUS_ITEM_DST_NAME;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + strlen(arg_master) + 1;
-+	strcpy(item->str, arg_master);
-+
-+	/*
-+	 * The 2nd item contains a vector to memory we want to send. It
-+	 * can be content of any type. In our case, we're sending a one-byte
-+	 * string only. The memory referenced by this item will be copied into
-+	 * the pool of the receiver connection, and does not need to be valid
-+	 * after the command is employed.
-+	 */
-+	item = KDBUS_ITEM_NEXT(item);
-+	item->type = KDBUS_ITEM_PAYLOAD_VEC;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
-+	item->vec.address = (uintptr_t)"r";
-+	item->vec.size = 1;
-+
-+	/* Set up the command struct and reference the message we prepared */
-+	memset(&cmd, 0, sizeof(cmd));
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg;
-+
-+	/*
-+	 * The send commands knows a mode in which it will block until a
-+	 * reply to a message is received. This example uses that mode.
-+	 * The pool offset to the received reply will be stored in the command
-+	 * struct after the send command returned. See below.
-+	 */
-+	cmd.flags = KDBUS_SEND_SYNC_REPLY;
-+
-+	/*
-+	 * Finally, employ the command on the connection owner
-+	 * file descriptor.
-+	 */
-+	r = kdbus_cmd_send(c->bus->fd, &cmd);
-+	if (r == -ESRCH || r == -EPIPE || r == -ECONNRESET)
-+		return 0;
-+	if (r < 0)
-+		return err_r(r, "cannot send request to master");
-+
-+	/*
-+	 * The command was sent with the KDBUS_SEND_SYNC_REPLY flag set,
-+	 * and returned successfully, which means that cmd.reply.offset now
-+	 * points to a message inside our connection's pool where the reply
-+	 * is found. This is equivalent to receiving the reply with
-+	 * KDBUS_CMD_RECV, but it doesn't require waiting for the reply with
-+	 * poll() and also saves the ioctl to receive the message.
-+	 */
-+	msg = (void *)(c->bus->pool + cmd.reply.offset);
-+
-+	/*
-+	 * A messages describes its actual payload in an array of items.
-+	 * KDBUS_FOREACH() is a simple iterator that walks such an array.
-+	 * struct kdbus_msg has a field to denote its total size, which is
-+	 * needed to determine the number of items in the array.
-+	 */
-+	KDBUS_FOREACH(item, msg->items,
-+		      msg->size - offsetof(struct kdbus_msg, items)) {
-+		/*
-+		 * An item of type PAYLOAD_OFF describes in-line memory
-+		 * stored in the pool at a described offset. That offset is
-+		 * relative to the start address of the message header.
-+		 * This example program only expects one single item of that
-+		 * type, remembers the struct kdbus_vec member of the item
-+		 * when it sees it, and bails out if there is more than one
-+		 * of them.
-+		 */
-+		if (item->type == KDBUS_ITEM_PAYLOAD_OFF) {
-+			if (vec) {
-+				r = err_r(-EEXIST,
-+					  "message with multiple vecs");
-+				break;
-+			}
-+			vec = &item->vec;
-+			if (vec->size != 2 * sizeof(size_t)) {
-+				r = err_r(-EINVAL, "invalid message size");
-+				break;
-+			}
-+		/*
-+		 * MEMFDs are transported as items of type PAYLOAD_MEMFD.
-+		 * If such an item is attached, a new file descriptor was
-+		 * installed into the task when KDBUS_CMD_RECV was called, and
-+		 * its number is stored in item->memfd.fd.
-+		 * Implementers *must* handle this item type close the
-+		 * file descriptor when no longer needed in order to prevent
-+		 * file descriptor exhaustion. This example program just bails
-+		 * out with an error in this case, as memfds are not expected
-+		 * in this context.
-+		 */
-+		} else if (item->type == KDBUS_ITEM_PAYLOAD_MEMFD) {
-+			r = err_r(-EINVAL, "message with memfd");
-+			break;
-+		}
-+	}
-+	if (r < 0)
-+		goto exit;
-+	if (!vec) {
-+		r = err_r(-EINVAL, "empty message");
-+		goto exit;
-+	}
-+
-+	n = ((size_t *)((const uint8_t *)msg + vec->offset))[0];
-+	steps = ((size_t *)((const uint8_t *)msg + vec->offset))[1];
-+
-+	while (steps-- > 0) {
-+		++n;
-+		r = prime_run(c->prime, c->bus, n);
-+		if (r < 0)
-+			break;
-+		r = bus_poll(c->bus);
-+		if (r != 0) {
-+			r = r < 0 ? r : -EINTR;
-+			break;
-+		}
-+	}
-+
-+exit:
-+	/*
-+	 * We are done with the memory slice that was given to us through
-+	 * cmd.reply.offset. Tell the kernel it can use it for other content
-+	 * in the future. See kdbus.pool(7).
-+	 */
-+	bus_poool_free_slice(c->bus, cmd.reply.offset);
-+	return r;
-+}
-+
-+/*
-+ * Prime Computation
-+ *
-+ */
-+
-+static int prime_new(struct prime **out)
-+{
-+	struct prime *p;
-+	int r;
-+
-+	p = calloc(1, sizeof(*p));
-+	if (!p)
-+		return err("cannot allocate prime memory");
-+
-+	p->fd = -1;
-+	p->area = MAP_FAILED;
-+	p->max = MAX_PRIMES;
-+
-+	/*
-+	 * Prepare and map a memfd to store the bit-fields for the number
-+	 * ranges we want to perform the prime detection on.
-+	 */
-+	p->fd = syscall(__NR_memfd_create, "prime-area", MFD_CLOEXEC);
-+	if (p->fd < 0) {
-+		r = err("cannot create memfd");
-+		goto error;
-+	}
-+
-+	r = ftruncate(p->fd, p->max / 8 + 1);
-+	if (r < 0) {
-+		r = err("cannot ftruncate area");
-+		goto error;
-+	}
-+
-+	p->area = mmap(NULL, p->max / 8 + 1, PROT_READ | PROT_WRITE,
-+		       MAP_SHARED, p->fd, 0);
-+	if (p->area == MAP_FAILED) {
-+		r = err("cannot mmap memfd");
-+		goto error;
-+	}
-+
-+	*out = p;
-+	return 0;
-+
-+error:
-+	prime_free(p);
-+	return r;
-+}
-+
-+static void prime_free(struct prime *p)
-+{
-+	if (!p)
-+		return;
-+
-+	if (p->area != MAP_FAILED)
-+		munmap(p->area, p->max / 8 + 1);
-+	if (p->fd >= 0)
-+		close(p->fd);
-+	free(p);
-+}
-+
-+static bool prime_done(struct prime *p)
-+{
-+	return p->done >= p->max;
-+}
-+
-+static void prime_consume(struct prime *p, size_t amount)
-+{
-+	p->done += amount;
-+}
-+
-+static int prime_run(struct prime *p, struct bus *cancel, size_t number)
-+{
-+	size_t i, n = 0;
-+	int r;
-+
-+	if (number < 2 || number > 65535)
-+		return 0;
-+
-+	for (i = number * number;
-+	     i < p->max && i > number;
-+	     i += number) {
-+		p->area[i / 8] |= 1 << (i % 8);
-+
-+		if (!(++n % (1 << 20))) {
-+			r = bus_poll(cancel);
-+			if (r != 0)
-+				return r < 0 ? r : -EINTR;
-+		}
-+	}
-+
-+	return 0;
-+}
-+
-+static void prime_print(struct prime *p)
-+{
-+	size_t i, l = 0;
-+
-+	fprintf(stderr, "PRIMES:");
-+	for (i = 0; i < p->max; ++i) {
-+		if (!(p->area[i / 8] & (1 << (i % 8))))
-+			fprintf(stderr, "%c%7zu", !(l++ % 16) ? '\n' : ' ', i);
-+	}
-+	fprintf(stderr, "\nEND\n");
-+}
-+
-+static int bus_open_connection(struct bus **out, uid_t uid, const char *name,
-+			       uint64_t recv_flags)
-+{
-+	struct kdbus_cmd_hello hello;
-+	char path[128];
-+	struct bus *b;
-+	int r;
-+
-+	/*
-+	 * The 'bus' object is our representation of a kdbus connection which
-+	 * stores two details: the connection owner file descriptor, and the
-+	 * mmap()ed memory of its associated pool. See kdbus.connection(7) and
-+	 * kdbus.pool(7).
-+	 */
-+	b = calloc(1, sizeof(*b));
-+	if (!b)
-+		return err("cannot allocate bus memory");
-+
-+	b->fd = -1;
-+	b->pool = MAP_FAILED;
-+
-+	/* Compute the name of the bus node to connect to. */
-+	snprintf(path, sizeof(path), "/sys/fs/%s/%lu-%s/bus",
-+		 arg_modname, (unsigned long)uid, name);
-+	b->fd = open(path, O_RDWR | O_CLOEXEC);
-+	if (b->fd < 0) {
-+		r = err("cannot open bus");
-+		goto error;
-+	}
-+
-+	/*
-+	 * To make a connection to the bus, the KDBUS_CMD_HELLO ioctl is used.
-+	 * It takes an argument of type 'struct kdbus_cmd_hello'.
-+	 */
-+	memset(&hello, 0, sizeof(hello));
-+	hello.size = sizeof(hello);
-+
-+	/*
-+	 * Specify a mask of metadata attach flags, describing metadata items
-+	 * that this new connection allows to be sent.
-+	 */
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+
-+	/*
-+	 * Specify a mask of metadata attach flags, describing metadata items
-+	 * that this new connection wants to be receive along with each message.
-+	 */
-+	hello.attach_flags_recv = recv_flags;
-+
-+	/*
-+	 * A connection may choose the size of its pool, but the number has to
-+	 * comply with two rules: a) it must be greater than 0, and b) it must
-+	 * be a mulitple of PAGE_SIZE. See kdbus.pool(7).
-+	 */
-+	hello.pool_size = POOL_SIZE;
-+
-+	/*
-+	 * Now employ the command on the file descriptor opened above.
-+	 * This command will turn the file descriptor into a connection-owner
-+	 * file descriptor that controls the life-time of the connection; once
-+	 * it's closed, the connection is shut down.
-+	 */
-+	r = kdbus_cmd_hello(b->fd, &hello);
-+	if (r < 0) {
-+		err_r(r, "HELLO failed");
-+		goto error;
-+	}
-+
-+	bus_poool_free_slice(b, hello.offset);
-+
-+	/*
-+	 * Map the pool of the connection. Its size has been set in the
-+	 * command struct above. See kdbus.pool(7).
-+	 */
-+	b->pool = mmap(NULL, POOL_SIZE, PROT_READ, MAP_SHARED, b->fd, 0);
-+	if (b->pool == MAP_FAILED) {
-+		r = err("cannot mmap pool");
-+		goto error;
-+	}
-+
-+	*out = b;
-+	return 0;
-+
-+error:
-+	bus_close_connection(b);
-+	return r;
-+}
-+
-+static void bus_close_connection(struct bus *b)
-+{
-+	if (!b)
-+		return;
-+
-+	/*
-+	 * A bus connection is closed by simply calling close() on the
-+	 * connection owner file descriptor. The unique name and all owned
-+	 * well-known names of the conneciton will disappear.
-+	 * See kdbus.connection(7).
-+	 */
-+	if (b->pool != MAP_FAILED)
-+		munmap(b->pool, POOL_SIZE);
-+	if (b->fd >= 0)
-+		close(b->fd);
-+	free(b);
-+}
-+
-+static void bus_poool_free_slice(struct bus *b, uint64_t offset)
-+{
-+	struct kdbus_cmd_free cmd = {
-+		.size = sizeof(cmd),
-+		.offset = offset,
-+	};
-+	int r;
-+
-+	/*
-+	 * Once we're done with a piece of pool memory that was returned
-+	 * by a command, we have to call the KDBUS_CMD_FREE ioctl on it so it
-+	 * can be reused. The command takes an argument of type
-+	 * 'struct kdbus_cmd_free', in which the pool offset of the slice to
-+	 * free is stored. The ioctl is employed on the connection owner
-+	 * file descriptor. See kdbus.pool(7),
-+	 */
-+	r = kdbus_cmd_free(b->fd, &cmd);
-+	if (r < 0)
-+		err_r(r, "cannot free pool slice");
-+}
-+
-+static int bus_acquire_name(struct bus *b, const char *name)
-+{
-+	struct kdbus_item *item;
-+	struct kdbus_cmd *cmd;
-+	size_t size;
-+	int r;
-+
-+	/*
-+	 * This function acquires a well-known name on the bus through the
-+	 * KDBUS_CMD_NAME_ACQUIRE ioctl. This ioctl takes an argument of type
-+	 * 'struct kdbus_cmd', which is assembled below. See kdbus.name(7).
-+	 */
-+	size = sizeof(*cmd);
-+	size += KDBUS_ITEM_SIZE(strlen(name) + 1);
-+
-+	cmd = alloca(size);
-+	memset(cmd, 0, size);
-+	cmd->size = size;
-+
-+	/*
-+	 * The command requires an item of type KDBUS_ITEM_NAME, and its
-+	 * content must be a valid bus name.
-+	 */
-+	item = cmd->items;
-+	item->type = KDBUS_ITEM_NAME;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
-+	strcpy(item->str, name);
-+
-+	/*
-+	 * Employ the command on the connection owner file descriptor.
-+	 */
-+	r = kdbus_cmd_name_acquire(b->fd, cmd);
-+	if (r < 0)
-+		return err_r(r, "cannot acquire name");
-+
-+	return 0;
-+}
-+
-+static int bus_install_name_loss_match(struct bus *b, const char *name)
-+{
-+	struct kdbus_cmd_match *match;
-+	struct kdbus_item *item;
-+	size_t size;
-+	int r;
-+
-+	/*
-+	 * In order to install a match for signal messages, we have to
-+	 * assemble a 'struct kdbus_cmd_match' and use it along with the
-+	 * KDBUS_CMD_MATCH_ADD ioctl. See kdbus.match(7).
-+	 */
-+	size = sizeof(*match);
-+	size += KDBUS_ITEM_SIZE(sizeof(item->name_change) + strlen(name) + 1);
-+
-+	match = alloca(size);
-+	memset(match, 0, size);
-+	match->size = size;
-+
-+	/*
-+	 * A match is comprised of many 'rules', each of which describes a
-+	 * mandatory detail of the message. All rules of a match must be
-+	 * satified in order to make a message pass.
-+	 */
-+	item = match->items;
-+
-+	/*
-+	 * In this case, we're interested in notifications that inform us
-+	 * about a well-known name being removed from the bus.
-+	 */
-+	item->type = KDBUS_ITEM_NAME_REMOVE;
-+	item->size = KDBUS_ITEM_HEADER_SIZE +
-+			sizeof(item->name_change) + strlen(name) + 1;
-+
-+	/*
-+	 * We could limit the match further and require a specific unique-ID
-+	 * to be the new or the old owner of the name. In this case, however,
-+	 * we don't, and allow 'any' id.
-+	 */
-+	item->name_change.old_id.id = KDBUS_MATCH_ID_ANY;
-+	item->name_change.new_id.id = KDBUS_MATCH_ID_ANY;
-+
-+	/* Copy in the well-known name we're interested in */
-+	strcpy(item->name_change.name, name);
-+
-+	/*
-+	 * Add the match through the KDBUS_CMD_MATCH_ADD ioctl, employed on
-+	 * the connection owner fd.
-+	 */
-+	r = kdbus_cmd_match_add(b->fd, match);
-+	if (r < 0)
-+		return err_r(r, "cannot add match");
-+
-+	return 0;
-+}
-+
-+static int bus_poll(struct bus *b)
-+{
-+	struct pollfd fds[1] = {};
-+	int r;
-+
-+	/*
-+	 * A connection endpoint supports poll() and will wake-up the
-+	 * task with POLLIN set once a message has arrived.
-+	 */
-+	fds[0].fd = b->fd;
-+	fds[0].events = POLLIN;
-+	r = poll(fds, sizeof(fds) / sizeof(*fds), 0);
-+	if (r < 0)
-+		return err("cannot poll bus");
-+	return !!(fds[0].revents & POLLIN);
-+}
-+
-+static int bus_make(uid_t uid, const char *name)
-+{
-+	struct kdbus_item *item;
-+	struct kdbus_cmd *make;
-+	char path[128], busname[128];
-+	size_t size;
-+	int r, fd;
-+
-+	/*
-+	 * Compute the full path to the 'control' node. 'arg_modname' may be
-+	 * set to a different value than 'kdbus' for development purposes.
-+	 * The 'control' node is the primary entry point to kdbus that must be
-+	 * used in order to create a bus. See kdbus(7) and kdbus.bus(7).
-+	 */
-+	snprintf(path, sizeof(path), "/sys/fs/%s/control", arg_modname);
-+
-+	/*
-+	 * Compute the bus name. A valid bus name must always be prefixed with
-+	 * the EUID of the currently running process in order to avoid name
-+	 * conflicts. See kdbus.bus(7).
-+	 */
-+	snprintf(busname, sizeof(busname), "%lu-%s", (unsigned long)uid, name);
-+
-+	fd = open(path, O_RDWR | O_CLOEXEC);
-+	if (fd < 0)
-+		return err("cannot open control file");
-+
-+	/*
-+	 * The KDBUS_CMD_BUS_MAKE ioctl takes an argument of type
-+	 * 'struct kdbus_cmd', and expects at least two items attached to
-+	 * it: one to decribe the bloom parameters to be propagated to
-+	 * connections of the bus, and the name of the bus that was computed
-+	 * above. Assemble this struct now, and fill it with values.
-+	 */
-+	size = sizeof(*make);
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_parameter));
-+	size += KDBUS_ITEM_SIZE(strlen(busname) + 1);
-+
-+	make = alloca(size);
-+	memset(make, 0, size);
-+	make->size = size;
-+
-+	/*
-+	 * Each item has a 'type' and 'size' field, and must be stored at an
-+	 * 8-byte aligned address. The KDBUS_ITEM_NEXT macro is used to advance
-+	 * the pointer. See kdbus.item(7) for more details.
-+	 */
-+	item = make->items;
-+	item->type = KDBUS_ITEM_BLOOM_PARAMETER;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(item->bloom_parameter);
-+	item->bloom_parameter.size = 8;
-+	item->bloom_parameter.n_hash = 1;
-+
-+	/* The name of the new bus is stored in the next item. */
-+	item = KDBUS_ITEM_NEXT(item);
-+	item->type = KDBUS_ITEM_MAKE_NAME;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + strlen(busname) + 1;
-+	strcpy(item->str, busname);
-+
-+	/*
-+	 * Now create the bus via the KDBUS_CMD_BUS_MAKE ioctl and return the
-+	 * fd that was used back to the caller of this function. This fd is now
-+	 * called a 'bus owner file descriptor', and it controls the life-time
-+	 * of the newly created bus; once the file descriptor is closed, the
-+	 * bus goes away, and all connections are shut down. See kdbus.bus(7).
-+	 */
-+	r = kdbus_cmd_bus_make(fd, make);
-+	if (r < 0) {
-+		err_r(r, "cannot make bus");
-+		close(fd);
-+		return r;
-+	}
-+
-+	return fd;
-+}
-+
-+#else
-+
-+#warning "Skipping compilation due to unsupported libc version"
-+
-+int main(int argc, char **argv)
-+{
-+	fprintf(stderr,
-+		"Compilation of %s was skipped due to unsupported libc.\n",
-+		argv[0]);
-+
-+	return EXIT_FAILURE;
-+}
-+
-+#endif /* libc sanity check */
-diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
-index 95abddc..b57100c 100644
---- a/tools/testing/selftests/Makefile
-+++ b/tools/testing/selftests/Makefile
-@@ -5,6 +5,7 @@ TARGETS += exec
- TARGETS += firmware
- TARGETS += ftrace
- TARGETS += kcmp
-+TARGETS += kdbus
- TARGETS += memfd
- TARGETS += memory-hotplug
- TARGETS += mount
-diff --git a/tools/testing/selftests/kdbus/.gitignore b/tools/testing/selftests/kdbus/.gitignore
-new file mode 100644
-index 0000000..d3ef42f
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/.gitignore
-@@ -0,0 +1 @@
-+kdbus-test
-diff --git a/tools/testing/selftests/kdbus/Makefile b/tools/testing/selftests/kdbus/Makefile
-new file mode 100644
-index 0000000..8f36cb5
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/Makefile
-@@ -0,0 +1,49 @@
-+CFLAGS += -I../../../../usr/include/
-+CFLAGS += -I../../../../samples/kdbus/
-+CFLAGS += -I../../../../include/uapi/
-+CFLAGS += -std=gnu99
-+CFLAGS += -DKBUILD_MODNAME=\"kdbus\" -D_GNU_SOURCE
-+LDLIBS = -pthread -lcap -lm
-+
-+OBJS= \
-+	kdbus-enum.o		\
-+	kdbus-util.o		\
-+	kdbus-test.o		\
-+	kdbus-test.o		\
-+	test-activator.o	\
-+	test-benchmark.o	\
-+	test-bus.o		\
-+	test-chat.o		\
-+	test-connection.o	\
-+	test-daemon.o		\
-+	test-endpoint.o		\
-+	test-fd.o		\
-+	test-free.o		\
-+	test-match.o		\
-+	test-message.o		\
-+	test-metadata-ns.o	\
-+	test-monitor.o		\
-+	test-names.o		\
-+	test-policy.o		\
-+	test-policy-ns.o	\
-+	test-policy-priv.o	\
-+	test-sync.o		\
-+	test-timeout.o
-+
-+all: kdbus-test
-+
-+include ../lib.mk
-+
-+%.o: %.c kdbus-enum.h kdbus-test.h kdbus-util.h
-+	$(CC) $(CFLAGS) -c $< -o $@
-+
-+kdbus-test: $(OBJS)
-+	$(CC) $(CFLAGS) $^ $(LDLIBS) -o $@
-+
-+TEST_PROGS := kdbus-test
-+
-+run_tests:
-+	./kdbus-test --tap
-+
-+clean:
-+	rm -f *.o kdbus-test
-diff --git a/tools/testing/selftests/kdbus/kdbus-enum.c b/tools/testing/selftests/kdbus/kdbus-enum.c
-new file mode 100644
-index 0000000..4f1e579
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/kdbus-enum.c
-@@ -0,0 +1,94 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+struct kdbus_enum_table {
-+	long long id;
-+	const char *name;
-+};
-+
-+#define TABLE(what) static struct kdbus_enum_table kdbus_table_##what[]
-+#define ENUM(_id) { .id = _id, .name = STRINGIFY(_id) }
-+#define LOOKUP(what)							\
-+	const char *enum_##what(long long id)				\
-+	{								\
-+		for (size_t i = 0; i < ELEMENTSOF(kdbus_table_##what); i++) \
-+			if (id == kdbus_table_##what[i].id)		\
-+				return kdbus_table_##what[i].name;	\
-+		return "UNKNOWN";					\
-+	}
-+
-+TABLE(CMD) = {
-+	ENUM(KDBUS_CMD_BUS_MAKE),
-+	ENUM(KDBUS_CMD_ENDPOINT_MAKE),
-+	ENUM(KDBUS_CMD_HELLO),
-+	ENUM(KDBUS_CMD_SEND),
-+	ENUM(KDBUS_CMD_RECV),
-+	ENUM(KDBUS_CMD_LIST),
-+	ENUM(KDBUS_CMD_NAME_RELEASE),
-+	ENUM(KDBUS_CMD_CONN_INFO),
-+	ENUM(KDBUS_CMD_MATCH_ADD),
-+	ENUM(KDBUS_CMD_MATCH_REMOVE),
-+};
-+LOOKUP(CMD);
-+
-+TABLE(MSG) = {
-+	ENUM(_KDBUS_ITEM_NULL),
-+	ENUM(KDBUS_ITEM_PAYLOAD_VEC),
-+	ENUM(KDBUS_ITEM_PAYLOAD_OFF),
-+	ENUM(KDBUS_ITEM_PAYLOAD_MEMFD),
-+	ENUM(KDBUS_ITEM_FDS),
-+	ENUM(KDBUS_ITEM_BLOOM_PARAMETER),
-+	ENUM(KDBUS_ITEM_BLOOM_FILTER),
-+	ENUM(KDBUS_ITEM_DST_NAME),
-+	ENUM(KDBUS_ITEM_MAKE_NAME),
-+	ENUM(KDBUS_ITEM_ATTACH_FLAGS_SEND),
-+	ENUM(KDBUS_ITEM_ATTACH_FLAGS_RECV),
-+	ENUM(KDBUS_ITEM_ID),
-+	ENUM(KDBUS_ITEM_NAME),
-+	ENUM(KDBUS_ITEM_TIMESTAMP),
-+	ENUM(KDBUS_ITEM_CREDS),
-+	ENUM(KDBUS_ITEM_PIDS),
-+	ENUM(KDBUS_ITEM_AUXGROUPS),
-+	ENUM(KDBUS_ITEM_OWNED_NAME),
-+	ENUM(KDBUS_ITEM_TID_COMM),
-+	ENUM(KDBUS_ITEM_PID_COMM),
-+	ENUM(KDBUS_ITEM_EXE),
-+	ENUM(KDBUS_ITEM_CMDLINE),
-+	ENUM(KDBUS_ITEM_CGROUP),
-+	ENUM(KDBUS_ITEM_CAPS),
-+	ENUM(KDBUS_ITEM_SECLABEL),
-+	ENUM(KDBUS_ITEM_AUDIT),
-+	ENUM(KDBUS_ITEM_CONN_DESCRIPTION),
-+	ENUM(KDBUS_ITEM_NAME_ADD),
-+	ENUM(KDBUS_ITEM_NAME_REMOVE),
-+	ENUM(KDBUS_ITEM_NAME_CHANGE),
-+	ENUM(KDBUS_ITEM_ID_ADD),
-+	ENUM(KDBUS_ITEM_ID_REMOVE),
-+	ENUM(KDBUS_ITEM_REPLY_TIMEOUT),
-+	ENUM(KDBUS_ITEM_REPLY_DEAD),
-+};
-+LOOKUP(MSG);
-+
-+TABLE(PAYLOAD) = {
-+	ENUM(KDBUS_PAYLOAD_KERNEL),
-+	ENUM(KDBUS_PAYLOAD_DBUS),
-+};
-+LOOKUP(PAYLOAD);
-diff --git a/tools/testing/selftests/kdbus/kdbus-enum.h b/tools/testing/selftests/kdbus/kdbus-enum.h
-new file mode 100644
-index 0000000..ed28cca
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/kdbus-enum.h
-@@ -0,0 +1,15 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#pragma once
-+
-+const char *enum_CMD(long long id);
-+const char *enum_MSG(long long id);
-+const char *enum_MATCH(long long id);
-+const char *enum_PAYLOAD(long long id);
-diff --git a/tools/testing/selftests/kdbus/kdbus-test.c b/tools/testing/selftests/kdbus/kdbus-test.c
-new file mode 100644
-index 0000000..db57381
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/kdbus-test.c
-@@ -0,0 +1,905 @@
-+#include <errno.h>
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <time.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <assert.h>
-+#include <getopt.h>
-+#include <stdbool.h>
-+#include <signal.h>
-+#include <sys/mount.h>
-+#include <sys/prctl.h>
-+#include <sys/wait.h>
-+#include <sys/syscall.h>
-+#include <sys/eventfd.h>
-+#include <linux/sched.h>
-+
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+#include "kdbus-test.h"
-+
-+enum {
-+	TEST_CREATE_BUS		= 1 << 0,
-+	TEST_CREATE_CONN	= 1 << 1,
-+};
-+
-+struct kdbus_test {
-+	const char *name;
-+	const char *desc;
-+	int (*func)(struct kdbus_test_env *env);
-+	unsigned int flags;
-+};
-+
-+struct kdbus_test_args {
-+	bool mntns;
-+	bool pidns;
-+	bool userns;
-+	char *uid_map;
-+	char *gid_map;
-+	int loop;
-+	int wait;
-+	int fork;
-+	int tap_output;
-+	char *module;
-+	char *root;
-+	char *test;
-+	char *busname;
-+};
-+
-+static const struct kdbus_test tests[] = {
-+	{
-+		.name	= "bus-make",
-+		.desc	= "bus make functions",
-+		.func	= kdbus_test_bus_make,
-+		.flags	= 0,
-+	},
-+	{
-+		.name	= "hello",
-+		.desc	= "the HELLO command",
-+		.func	= kdbus_test_hello,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "byebye",
-+		.desc	= "the BYEBYE command",
-+		.func	= kdbus_test_byebye,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "chat",
-+		.desc	= "a chat pattern",
-+		.func	= kdbus_test_chat,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "daemon",
-+		.desc	= "a simple daemon",
-+		.func	= kdbus_test_daemon,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "fd-passing",
-+		.desc	= "file descriptor passing",
-+		.func	= kdbus_test_fd_passing,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "endpoint",
-+		.desc	= "custom endpoint",
-+		.func	= kdbus_test_custom_endpoint,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "monitor",
-+		.desc	= "monitor functionality",
-+		.func	= kdbus_test_monitor,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "name-basics",
-+		.desc	= "basic name registry functions",
-+		.func	= kdbus_test_name_basic,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "name-conflict",
-+		.desc	= "name registry conflict details",
-+		.func	= kdbus_test_name_conflict,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "name-queue",
-+		.desc	= "queuing of names",
-+		.func	= kdbus_test_name_queue,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "name-takeover",
-+		.desc	= "takeover of names",
-+		.func	= kdbus_test_name_takeover,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "message-basic",
-+		.desc	= "basic message handling",
-+		.func	= kdbus_test_message_basic,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "message-prio",
-+		.desc	= "handling of messages with priority",
-+		.func	= kdbus_test_message_prio,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "message-quota",
-+		.desc	= "message quotas are enforced",
-+		.func	= kdbus_test_message_quota,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "memory-access",
-+		.desc	= "memory access",
-+		.func	= kdbus_test_memory_access,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "timeout",
-+		.desc	= "timeout",
-+		.func	= kdbus_test_timeout,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "sync-byebye",
-+		.desc	= "synchronous replies vs. BYEBYE",
-+		.func	= kdbus_test_sync_byebye,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "sync-reply",
-+		.desc	= "synchronous replies",
-+		.func	= kdbus_test_sync_reply,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "message-free",
-+		.desc	= "freeing of memory",
-+		.func	= kdbus_test_free,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "connection-info",
-+		.desc	= "retrieving connection information",
-+		.func	= kdbus_test_conn_info,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "connection-update",
-+		.desc	= "updating connection information",
-+		.func	= kdbus_test_conn_update,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "writable-pool",
-+		.desc	= "verifying pools are never writable",
-+		.func	= kdbus_test_writable_pool,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "policy",
-+		.desc	= "policy",
-+		.func	= kdbus_test_policy,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "policy-priv",
-+		.desc	= "unprivileged bus access",
-+		.func	= kdbus_test_policy_priv,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "policy-ns",
-+		.desc	= "policy in user namespaces",
-+		.func	= kdbus_test_policy_ns,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "metadata-ns",
-+		.desc	= "metadata in different namespaces",
-+		.func	= kdbus_test_metadata_ns,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "match-id-add",
-+		.desc	= "adding of matches by id",
-+		.func	= kdbus_test_match_id_add,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "match-id-remove",
-+		.desc	= "removing of matches by id",
-+		.func	= kdbus_test_match_id_remove,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "match-replace",
-+		.desc	= "replace of matches with the same cookie",
-+		.func	= kdbus_test_match_replace,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "match-name-add",
-+		.desc	= "adding of matches by name",
-+		.func	= kdbus_test_match_name_add,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "match-name-remove",
-+		.desc	= "removing of matches by name",
-+		.func	= kdbus_test_match_name_remove,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "match-name-change",
-+		.desc	= "matching for name changes",
-+		.func	= kdbus_test_match_name_change,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "match-bloom",
-+		.desc	= "matching with bloom filters",
-+		.func	= kdbus_test_match_bloom,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "activator",
-+		.desc	= "activator connections",
-+		.func	= kdbus_test_activator,
-+		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
-+	},
-+	{
-+		.name	= "benchmark",
-+		.desc	= "benchmark",
-+		.func	= kdbus_test_benchmark,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "benchmark-nomemfds",
-+		.desc	= "benchmark without using memfds",
-+		.func	= kdbus_test_benchmark_nomemfds,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+	{
-+		.name	= "benchmark-uds",
-+		.desc	= "benchmark comparison to UDS",
-+		.func	= kdbus_test_benchmark_uds,
-+		.flags	= TEST_CREATE_BUS,
-+	},
-+};
-+
-+#define N_TESTS ((int) (sizeof(tests) / sizeof(tests[0])))
-+
-+static int test_prepare_env(const struct kdbus_test *t,
-+			    const struct kdbus_test_args *args,
-+			    struct kdbus_test_env *env)
-+{
-+	if (t->flags & TEST_CREATE_BUS) {
-+		char *s;
-+		char *n = NULL;
-+		int ret;
-+
-+		asprintf(&s, "%s/control", args->root);
-+
-+		env->control_fd = open(s, O_RDWR);
-+		free(s);
-+		ASSERT_RETURN(env->control_fd >= 0);
-+
-+		if (!args->busname) {
-+			n = unique_name("test-bus");
-+			ASSERT_RETURN(n);
-+		}
-+
-+		ret = kdbus_create_bus(env->control_fd,
-+				       args->busname ?: n,
-+				       _KDBUS_ATTACH_ALL, &s);
-+		free(n);
-+		ASSERT_RETURN(ret == 0);
-+
-+		asprintf(&env->buspath, "%s/%s/bus", args->root, s);
-+		free(s);
-+	}
-+
-+	if (t->flags & TEST_CREATE_CONN) {
-+		env->conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+		ASSERT_RETURN(env->conn);
-+	}
-+
-+	env->root = args->root;
-+	env->module = args->module;
-+
-+	return 0;
-+}
-+
-+void test_unprepare_env(const struct kdbus_test *t, struct kdbus_test_env *env)
-+{
-+	if (env->conn) {
-+		kdbus_conn_free(env->conn);
-+		env->conn = NULL;
-+	}
-+
-+	if (env->control_fd >= 0) {
-+		close(env->control_fd);
-+		env->control_fd = -1;
-+	}
-+
-+	if (env->buspath) {
-+		free(env->buspath);
-+		env->buspath = NULL;
-+	}
-+}
-+
-+static int test_run(const struct kdbus_test *t,
-+		    const struct kdbus_test_args *kdbus_args,
-+		    int wait)
-+{
-+	int ret;
-+	struct kdbus_test_env env = {};
-+
-+	ret = test_prepare_env(t, kdbus_args, &env);
-+	if (ret != TEST_OK)
-+		return ret;
-+
-+	if (wait > 0) {
-+		printf("Sleeping %d seconds before running test ...\n", wait);
-+		sleep(wait);
-+	}
-+
-+	ret = t->func(&env);
-+	test_unprepare_env(t, &env);
-+	return ret;
-+}
-+
-+static int test_run_forked(const struct kdbus_test *t,
-+			   const struct kdbus_test_args *kdbus_args,
-+			   int wait)
-+{
-+	int ret;
-+	pid_t pid;
-+
-+	pid = fork();
-+	if (pid < 0) {
-+		return TEST_ERR;
-+	} else if (pid == 0) {
-+		ret = test_run(t, kdbus_args, wait);
-+		_exit(ret);
-+	}
-+
-+	pid = waitpid(pid, &ret, 0);
-+	if (pid <= 0)
-+		return TEST_ERR;
-+	else if (!WIFEXITED(ret))
-+		return TEST_ERR;
-+	else
-+		return WEXITSTATUS(ret);
-+}
-+
-+static void print_test_result(int ret)
-+{
-+	switch (ret) {
-+	case TEST_OK:
-+		printf("OK");
-+		break;
-+	case TEST_SKIP:
-+		printf("SKIPPED");
-+		break;
-+	case TEST_ERR:
-+		printf("ERROR");
-+		break;
-+	}
-+}
-+
-+static int start_all_tests(struct kdbus_test_args *kdbus_args)
-+{
-+	int ret;
-+	unsigned int fail_cnt = 0;
-+	unsigned int skip_cnt = 0;
-+	unsigned int ok_cnt = 0;
-+	unsigned int i;
-+
-+	if (kdbus_args->tap_output) {
-+		printf("1..%d\n", N_TESTS);
-+		fflush(stdout);
-+	}
-+
-+	kdbus_util_verbose = false;
-+
-+	for (i = 0; i < N_TESTS; i++) {
-+		const struct kdbus_test *t = tests + i;
-+
-+		if (!kdbus_args->tap_output) {
-+			unsigned int n;
-+
-+			printf("Testing %s (%s) ", t->desc, t->name);
-+			for (n = 0; n < 60 - strlen(t->desc) - strlen(t->name); n++)
-+				printf(".");
-+			printf(" ");
-+		}
-+
-+		ret = test_run_forked(t, kdbus_args, 0);
-+		switch (ret) {
-+		case TEST_OK:
-+			ok_cnt++;
-+			break;
-+		case TEST_SKIP:
-+			skip_cnt++;
-+			break;
-+		case TEST_ERR:
-+			fail_cnt++;
-+			break;
-+		}
-+
-+		if (kdbus_args->tap_output) {
-+			printf("%sok %d - %s%s (%s)\n",
-+			       (ret == TEST_ERR) ? "not " : "", i + 1,
-+			       (ret == TEST_SKIP) ? "# SKIP " : "",
-+			       t->desc, t->name);
-+			fflush(stdout);
-+		} else {
-+			print_test_result(ret);
-+			printf("\n");
-+		}
-+	}
-+
-+	if (kdbus_args->tap_output)
-+		printf("Failed %d/%d tests, %.2f%% okay\n", fail_cnt, N_TESTS,
-+		       100.0 - (fail_cnt * 100.0) / ((float) N_TESTS));
-+	else
-+		printf("\nSUMMARY: %u tests passed, %u skipped, %u failed\n",
-+		       ok_cnt, skip_cnt, fail_cnt);
-+
-+	return fail_cnt > 0 ? TEST_ERR : TEST_OK;
-+}
-+
-+static int start_one_test(struct kdbus_test_args *kdbus_args)
-+{
-+	int i, ret;
-+	bool test_found = false;
-+
-+	for (i = 0; i < N_TESTS; i++) {
-+		const struct kdbus_test *t = tests + i;
-+
-+		if (strcmp(t->name, kdbus_args->test))
-+			continue;
-+
-+		do {
-+			test_found = true;
-+			if (kdbus_args->fork)
-+				ret = test_run_forked(t, kdbus_args,
-+						      kdbus_args->wait);
-+			else
-+				ret = test_run(t, kdbus_args,
-+					       kdbus_args->wait);
-+
-+			printf("Testing %s: ", t->desc);
-+			print_test_result(ret);
-+			printf("\n");
-+
-+			if (ret != TEST_OK)
-+				break;
-+		} while (kdbus_args->loop);
-+
-+		return ret;
-+	}
-+
-+	if (!test_found) {
-+		printf("Unknown test-id '%s'\n", kdbus_args->test);
-+		return TEST_ERR;
-+	}
-+
-+	return TEST_OK;
-+}
-+
-+static void usage(const char *argv0)
-+{
-+	unsigned int i, j;
-+
-+	printf("Usage: %s [options]\n"
-+	       "Options:\n"
-+	       "\t-a, --tap		Output test results in TAP format\n"
-+	       "\t-m, --module <module>	Kdbus module name\n"
-+	       "\t-x, --loop		Run in a loop\n"
-+	       "\t-f, --fork		Fork before running a test\n"
-+	       "\t-h, --help		Print this help\n"
-+	       "\t-r, --root <root>	Toplevel of the kdbus hierarchy\n"
-+	       "\t-t, --test <test-id>	Run one specific test only, in verbose mode\n"
-+	       "\t-b, --bus <busname>	Instead of generating a random bus name, take <busname>.\n"
-+	       "\t-w, --wait <secs>	Wait <secs> before actually starting test\n"
-+	       "\t    --mntns		New mount namespace\n"
-+	       "\t    --pidns		New PID namespace\n"
-+	       "\t    --userns		New user namespace\n"
-+	       "\t    --uidmap uid_map	UID map for user namespace\n"
-+	       "\t    --gidmap gid_map	GID map for user namespace\n"
-+	       "\n", argv0);
-+
-+	printf("By default, all test are run once, and a summary is printed.\n"
-+	       "Available tests for --test:\n\n");
-+
-+	for (i = 0; i < N_TESTS; i++) {
-+		const struct kdbus_test *t = tests + i;
-+
-+		printf("\t%s", t->name);
-+
-+		for (j = 0; j < 24 - strlen(t->name); j++)
-+			printf(" ");
-+
-+		printf("Test %s\n", t->desc);
-+	}
-+
-+	printf("\n");
-+	printf("Note that some tests may, if run specifically by --test, "
-+	       "behave differently, and not terminate by themselves.\n");
-+
-+	exit(EXIT_FAILURE);
-+}
-+
-+void print_kdbus_test_args(struct kdbus_test_args *args)
-+{
-+	if (args->userns || args->pidns || args->mntns)
-+		printf("# Starting tests in new %s%s%s namespaces%s\n",
-+			args->mntns ? "MOUNT " : "",
-+			args->pidns ? "PID " : "",
-+			args->userns ? "USER " : "",
-+			args->mntns ? ", kdbusfs will be remounted" : "");
-+	else
-+		printf("# Starting tests in the same namespaces\n");
-+}
-+
-+void print_metadata_support(void)
-+{
-+	bool no_meta_audit, no_meta_cgroups, no_meta_seclabel;
-+
-+	/*
-+	 * KDBUS_ATTACH_CGROUP, KDBUS_ATTACH_AUDIT and
-+	 * KDBUS_ATTACH_SECLABEL
-+	 */
-+	no_meta_audit = !config_auditsyscall_is_enabled();
-+	no_meta_cgroups = !config_cgroups_is_enabled();
-+	no_meta_seclabel = !config_security_is_enabled();
-+
-+	if (no_meta_audit | no_meta_cgroups | no_meta_seclabel)
-+		printf("# Starting tests without %s%s%s metadata support\n",
-+		       no_meta_audit ? "AUDIT " : "",
-+		       no_meta_cgroups ? "CGROUP " : "",
-+		       no_meta_seclabel ? "SECLABEL " : "");
-+	else
-+		printf("# Starting tests with full metadata support\n");
-+}
-+
-+int run_tests(struct kdbus_test_args *kdbus_args)
-+{
-+	int ret;
-+	static char control[4096];
-+
-+	snprintf(control, sizeof(control), "%s/control", kdbus_args->root);
-+
-+	if (access(control, W_OK) < 0) {
-+		printf("Unable to locate control node at '%s'.\n",
-+			control);
-+		return TEST_ERR;
-+	}
-+
-+	if (kdbus_args->test) {
-+		ret = start_one_test(kdbus_args);
-+	} else {
-+		do {
-+			ret = start_all_tests(kdbus_args);
-+			if (ret != TEST_OK)
-+				break;
-+		} while (kdbus_args->loop);
-+	}
-+
-+	return ret;
-+}
-+
-+static void nop_handler(int sig) {}
-+
-+static int test_prepare_mounts(struct kdbus_test_args *kdbus_args)
-+{
-+	int ret;
-+	char kdbusfs[64] = {'\0'};
-+
-+	snprintf(kdbusfs, sizeof(kdbusfs), "%sfs", kdbus_args->module);
-+
-+	/* make current mount slave */
-+	ret = mount(NULL, "/", NULL, MS_SLAVE|MS_REC, NULL);
-+	if (ret < 0) {
-+		ret = -errno;
-+		printf("error mount() root: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	/* Remount procfs since we need it in our tests */
-+	if (kdbus_args->pidns) {
-+		ret = mount("proc", "/proc", "proc",
-+			    MS_NOSUID|MS_NOEXEC|MS_NODEV, NULL);
-+		if (ret < 0) {
-+			ret = -errno;
-+			printf("error mount() /proc : %d (%m)\n", ret);
-+			return ret;
-+		}
-+	}
-+
-+	/* Remount kdbusfs */
-+	ret = mount(kdbusfs, kdbus_args->root, kdbusfs,
-+		    MS_NOSUID|MS_NOEXEC|MS_NODEV, NULL);
-+	if (ret < 0) {
-+		ret = -errno;
-+		printf("error mount() %s :%d (%m)\n", kdbusfs, ret);
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+int run_tests_in_namespaces(struct kdbus_test_args *kdbus_args)
-+{
-+	int ret;
-+	int efd = -1;
-+	int status;
-+	pid_t pid, rpid;
-+	struct sigaction oldsa;
-+	struct sigaction sa = {
-+		.sa_handler = nop_handler,
-+		.sa_flags = SA_NOCLDSTOP,
-+	};
-+
-+	efd = eventfd(0, EFD_CLOEXEC);
-+	if (efd < 0) {
-+		ret = -errno;
-+		printf("eventfd() failed: %d (%m)\n", ret);
-+		return TEST_ERR;
-+	}
-+
-+	ret = sigaction(SIGCHLD, &sa, &oldsa);
-+	if (ret < 0) {
-+		ret = -errno;
-+		printf("sigaction() failed: %d (%m)\n", ret);
-+		return TEST_ERR;
-+	}
-+
-+	/* setup namespaces */
-+	pid = syscall(__NR_clone, SIGCHLD|
-+		      (kdbus_args->userns ? CLONE_NEWUSER : 0) |
-+		      (kdbus_args->mntns ? CLONE_NEWNS : 0) |
-+		      (kdbus_args->pidns ? CLONE_NEWPID : 0), NULL);
-+	if (pid < 0) {
-+		printf("clone() failed: %d (%m)\n", -errno);
-+		return TEST_ERR;
-+	}
-+
-+	if (pid == 0) {
-+		eventfd_t event_status = 0;
-+
-+		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
-+		if (ret < 0) {
-+			ret = -errno;
-+			printf("error prctl(): %d (%m)\n", ret);
-+			_exit(TEST_ERR);
-+		}
-+
-+		/* reset sighandlers of childs */
-+		ret = sigaction(SIGCHLD, &oldsa, NULL);
-+		if (ret < 0) {
-+			ret = -errno;
-+			printf("sigaction() failed: %d (%m)\n", ret);
-+			_exit(TEST_ERR);
-+		}
-+
-+		ret = eventfd_read(efd, &event_status);
-+		if (ret < 0 || event_status != 1) {
-+			printf("error eventfd_read()\n");
-+			_exit(TEST_ERR);
-+		}
-+
-+		if (kdbus_args->mntns) {
-+			ret = test_prepare_mounts(kdbus_args);
-+			if (ret < 0) {
-+				printf("error preparing mounts\n");
-+				_exit(TEST_ERR);
-+			}
-+		}
-+
-+		ret = run_tests(kdbus_args);
-+		_exit(ret);
-+	}
-+
-+	/* Setup userns mapping */
-+	if (kdbus_args->userns) {
-+		ret = userns_map_uid_gid(pid, kdbus_args->uid_map,
-+					 kdbus_args->gid_map);
-+		if (ret < 0) {
-+			printf("error mapping uid and gid in userns\n");
-+			eventfd_write(efd, 2);
-+			return TEST_ERR;
-+		}
-+	}
-+
-+	ret = eventfd_write(efd, 1);
-+	if (ret < 0) {
-+		ret = -errno;
-+		printf("error eventfd_write(): %d (%m)\n", ret);
-+		return TEST_ERR;
-+	}
-+
-+	rpid = waitpid(pid, &status, 0);
-+	ASSERT_RETURN_VAL(rpid == pid, TEST_ERR);
-+
-+	close(efd);
-+
-+	if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
-+		return TEST_ERR;
-+
-+	return TEST_OK;
-+}
-+
-+int start_tests(struct kdbus_test_args *kdbus_args)
-+{
-+	int ret;
-+	bool namespaces;
-+	static char fspath[4096];
-+
-+	namespaces = (kdbus_args->mntns || kdbus_args->pidns ||
-+		      kdbus_args->userns);
-+
-+	/* for pidns we need mntns set */
-+	if (kdbus_args->pidns && !kdbus_args->mntns) {
-+		printf("Failed: please set both pid and mnt namesapces\n");
-+		return TEST_ERR;
-+	}
-+
-+	if (kdbus_args->userns) {
-+		if (!config_user_ns_is_enabled()) {
-+			printf("User namespace not supported\n");
-+			return TEST_ERR;
-+		}
-+
-+		if (!kdbus_args->uid_map || !kdbus_args->gid_map) {
-+			printf("Failed: please specify uid or gid mapping\n");
-+			return TEST_ERR;
-+		}
-+	}
-+
-+	print_kdbus_test_args(kdbus_args);
-+	print_metadata_support();
-+
-+	/* setup kdbus paths */
-+	if (!kdbus_args->module)
-+		kdbus_args->module = "kdbus";
-+
-+	if (!kdbus_args->root) {
-+		snprintf(fspath, sizeof(fspath), "/sys/fs/%s",
-+			 kdbus_args->module);
-+		kdbus_args->root = fspath;
-+	}
-+
-+	/* Start tests */
-+	if (namespaces)
-+		ret = run_tests_in_namespaces(kdbus_args);
-+	else
-+		ret = run_tests(kdbus_args);
-+
-+	return ret;
-+}
-+
-+int main(int argc, char *argv[])
-+{
-+	int t, ret = 0;
-+	struct kdbus_test_args *kdbus_args;
-+	enum {
-+		ARG_MNTNS = 0x100,
-+		ARG_PIDNS,
-+		ARG_USERNS,
-+		ARG_UIDMAP,
-+		ARG_GIDMAP,
-+	};
-+
-+	kdbus_args = malloc(sizeof(*kdbus_args));
-+	if (!kdbus_args) {
-+		printf("unable to malloc() kdbus_args\n");
-+		return EXIT_FAILURE;
-+	}
-+
-+	memset(kdbus_args, 0, sizeof(*kdbus_args));
-+
-+	static const struct option options[] = {
-+		{ "loop",	no_argument,		NULL, 'x' },
-+		{ "help",	no_argument,		NULL, 'h' },
-+		{ "root",	required_argument,	NULL, 'r' },
-+		{ "test",	required_argument,	NULL, 't' },
-+		{ "bus",	required_argument,	NULL, 'b' },
-+		{ "wait",	required_argument,	NULL, 'w' },
-+		{ "fork",	no_argument,		NULL, 'f' },
-+		{ "module",	required_argument,	NULL, 'm' },
-+		{ "tap",	no_argument,		NULL, 'a' },
-+		{ "mntns",	no_argument,		NULL, ARG_MNTNS },
-+		{ "pidns",	no_argument,		NULL, ARG_PIDNS },
-+		{ "userns",	no_argument,		NULL, ARG_USERNS },
-+		{ "uidmap",	required_argument,	NULL, ARG_UIDMAP },
-+		{ "gidmap",	required_argument,	NULL, ARG_GIDMAP },
-+		{}
-+	};
-+
-+	srand(time(NULL));
-+
-+	while ((t = getopt_long(argc, argv, "hxfm:r:t:b:w:a", options, NULL)) >= 0) {
-+		switch (t) {
-+		case 'x':
-+			kdbus_args->loop = 1;
-+			break;
-+
-+		case 'm':
-+			kdbus_args->module = optarg;
-+			break;
-+
-+		case 'r':
-+			kdbus_args->root = optarg;
-+			break;
-+
-+		case 't':
-+			kdbus_args->test = optarg;
-+			break;
-+
-+		case 'b':
-+			kdbus_args->busname = optarg;
-+			break;
-+
-+		case 'w':
-+			kdbus_args->wait = strtol(optarg, NULL, 10);
-+			break;
-+
-+		case 'f':
-+			kdbus_args->fork = 1;
-+			break;
-+
-+		case 'a':
-+			kdbus_args->tap_output = 1;
-+			break;
-+
-+		case ARG_MNTNS:
-+			kdbus_args->mntns = true;
-+			break;
-+
-+		case ARG_PIDNS:
-+			kdbus_args->pidns = true;
-+			break;
-+
-+		case ARG_USERNS:
-+			kdbus_args->userns = true;
-+			break;
-+
-+		case ARG_UIDMAP:
-+			kdbus_args->uid_map = optarg;
-+			break;
-+
-+		case ARG_GIDMAP:
-+			kdbus_args->gid_map = optarg;
-+			break;
-+
-+		default:
-+		case 'h':
-+			usage(argv[0]);
-+		}
-+	}
-+
-+	ret = start_tests(kdbus_args);
-+	if (ret == TEST_ERR)
-+		return EXIT_FAILURE;
-+
-+	free(kdbus_args);
-+
-+	return 0;
-+}
-diff --git a/tools/testing/selftests/kdbus/kdbus-test.h b/tools/testing/selftests/kdbus/kdbus-test.h
-new file mode 100644
-index 0000000..ee937f9
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/kdbus-test.h
-@@ -0,0 +1,84 @@
-+#ifndef _TEST_KDBUS_H_
-+#define _TEST_KDBUS_H_
-+
-+struct kdbus_test_env {
-+	char *buspath;
-+	const char *root;
-+	const char *module;
-+	int control_fd;
-+	struct kdbus_conn *conn;
-+};
-+
-+enum {
-+	TEST_OK,
-+	TEST_SKIP,
-+	TEST_ERR,
-+};
-+
-+#define ASSERT_RETURN_VAL(cond, val)		\
-+	if (!(cond)) {			\
-+		fprintf(stderr,	"Assertion '%s' failed in %s(), %s:%d\n", \
-+			#cond, __func__, __FILE__, __LINE__);	\
-+		return val;	\
-+	}
-+
-+#define ASSERT_EXIT_VAL(cond, val)		\
-+	if (!(cond)) {			\
-+		fprintf(stderr, "Assertion '%s' failed in %s(), %s:%d\n", \
-+			#cond, __func__, __FILE__, __LINE__);	\
-+		_exit(val);	\
-+	}
-+
-+#define ASSERT_BREAK(cond)		\
-+	if (!(cond)) {			\
-+		fprintf(stderr, "Assertion '%s' failed in %s(), %s:%d\n", \
-+			#cond, __func__, __FILE__, __LINE__);	\
-+		break; \
-+	}
-+
-+#define ASSERT_RETURN(cond)		\
-+	ASSERT_RETURN_VAL(cond, TEST_ERR)
-+
-+#define ASSERT_EXIT(cond)		\
-+	ASSERT_EXIT_VAL(cond, EXIT_FAILURE)
-+
-+int kdbus_test_activator(struct kdbus_test_env *env);
-+int kdbus_test_benchmark(struct kdbus_test_env *env);
-+int kdbus_test_benchmark_nomemfds(struct kdbus_test_env *env);
-+int kdbus_test_benchmark_uds(struct kdbus_test_env *env);
-+int kdbus_test_bus_make(struct kdbus_test_env *env);
-+int kdbus_test_byebye(struct kdbus_test_env *env);
-+int kdbus_test_chat(struct kdbus_test_env *env);
-+int kdbus_test_conn_info(struct kdbus_test_env *env);
-+int kdbus_test_conn_update(struct kdbus_test_env *env);
-+int kdbus_test_daemon(struct kdbus_test_env *env);
-+int kdbus_test_custom_endpoint(struct kdbus_test_env *env);
-+int kdbus_test_fd_passing(struct kdbus_test_env *env);
-+int kdbus_test_free(struct kdbus_test_env *env);
-+int kdbus_test_hello(struct kdbus_test_env *env);
-+int kdbus_test_match_bloom(struct kdbus_test_env *env);
-+int kdbus_test_match_id_add(struct kdbus_test_env *env);
-+int kdbus_test_match_id_remove(struct kdbus_test_env *env);
-+int kdbus_test_match_replace(struct kdbus_test_env *env);
-+int kdbus_test_match_name_add(struct kdbus_test_env *env);
-+int kdbus_test_match_name_change(struct kdbus_test_env *env);
-+int kdbus_test_match_name_remove(struct kdbus_test_env *env);
-+int kdbus_test_message_basic(struct kdbus_test_env *env);
-+int kdbus_test_message_prio(struct kdbus_test_env *env);
-+int kdbus_test_message_quota(struct kdbus_test_env *env);
-+int kdbus_test_memory_access(struct kdbus_test_env *env);
-+int kdbus_test_metadata_ns(struct kdbus_test_env *env);
-+int kdbus_test_monitor(struct kdbus_test_env *env);
-+int kdbus_test_name_basic(struct kdbus_test_env *env);
-+int kdbus_test_name_conflict(struct kdbus_test_env *env);
-+int kdbus_test_name_queue(struct kdbus_test_env *env);
-+int kdbus_test_name_takeover(struct kdbus_test_env *env);
-+int kdbus_test_policy(struct kdbus_test_env *env);
-+int kdbus_test_policy_ns(struct kdbus_test_env *env);
-+int kdbus_test_policy_priv(struct kdbus_test_env *env);
-+int kdbus_test_sync_byebye(struct kdbus_test_env *env);
-+int kdbus_test_sync_reply(struct kdbus_test_env *env);
-+int kdbus_test_timeout(struct kdbus_test_env *env);
-+int kdbus_test_writable_pool(struct kdbus_test_env *env);
-+
-+#endif /* _TEST_KDBUS_H_ */
-diff --git a/tools/testing/selftests/kdbus/kdbus-util.c b/tools/testing/selftests/kdbus/kdbus-util.c
-new file mode 100644
-index 0000000..82fa89b
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/kdbus-util.c
-@@ -0,0 +1,1612 @@
-+/*
-+ * Copyright (C) 2013-2015 Daniel Mack
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2014-2015 Djalal Harouni
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <stdio.h>
-+#include <stdarg.h>
-+#include <string.h>
-+#include <time.h>
-+#include <inttypes.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <stdbool.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <poll.h>
-+#include <grp.h>
-+#include <sys/capability.h>
-+#include <sys/mman.h>
-+#include <sys/stat.h>
-+#include <sys/time.h>
-+#include <linux/unistd.h>
-+#include <linux/memfd.h>
-+
-+#ifndef __NR_memfd_create
-+  #ifdef __x86_64__
-+    #define __NR_memfd_create 319
-+  #elif defined __arm__
-+    #define __NR_memfd_create 385
-+  #else
-+    #define __NR_memfd_create 356
-+  #endif
-+#endif
-+
-+#include "kdbus-api.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+#ifndef F_ADD_SEALS
-+#define F_LINUX_SPECIFIC_BASE	1024
-+#define F_ADD_SEALS     (F_LINUX_SPECIFIC_BASE + 9)
-+#define F_GET_SEALS     (F_LINUX_SPECIFIC_BASE + 10)
-+
-+#define F_SEAL_SEAL     0x0001  /* prevent further seals from being set */
-+#define F_SEAL_SHRINK   0x0002  /* prevent file from shrinking */
-+#define F_SEAL_GROW     0x0004  /* prevent file from growing */
-+#define F_SEAL_WRITE    0x0008  /* prevent writes */
-+#endif
-+
-+int kdbus_util_verbose = true;
-+
-+int kdbus_sysfs_get_parameter_mask(const char *path, uint64_t *mask)
-+{
-+	int ret;
-+	FILE *file;
-+	unsigned long long value;
-+
-+	file = fopen(path, "r");
-+	if (!file) {
-+		ret = -errno;
-+		kdbus_printf("--- error fopen(): %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	ret = fscanf(file, "%llu", &value);
-+	if (ret != 1) {
-+		if (ferror(file))
-+			ret = -errno;
-+		else
-+			ret = -EIO;
-+
-+		kdbus_printf("--- error fscanf(): %d\n", ret);
-+		fclose(file);
-+		return ret;
-+	}
-+
-+	*mask = (uint64_t)value;
-+
-+	fclose(file);
-+
-+	return 0;
-+}
-+
-+int kdbus_sysfs_set_parameter_mask(const char *path, uint64_t mask)
-+{
-+	int ret;
-+	FILE *file;
-+
-+	file = fopen(path, "w");
-+	if (!file) {
-+		ret = -errno;
-+		kdbus_printf("--- error open(): %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	ret = fprintf(file, "%llu", (unsigned long long)mask);
-+	if (ret <= 0) {
-+		ret = -EIO;
-+		kdbus_printf("--- error fprintf(): %d\n", ret);
-+	}
-+
-+	fclose(file);
-+
-+	return ret > 0 ? 0 : ret;
-+}
-+
-+int kdbus_create_bus(int control_fd, const char *name,
-+		     uint64_t owner_meta, char **path)
-+{
-+	struct {
-+		struct kdbus_cmd cmd;
-+
-+		/* bloom size item */
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_bloom_parameter bloom;
-+		} bp;
-+
-+		/* owner metadata items */
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			uint64_t flags;
-+		} attach;
-+
-+		/* name item */
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			char str[64];
-+		} name;
-+	} bus_make;
-+	int ret;
-+
-+	memset(&bus_make, 0, sizeof(bus_make));
-+	bus_make.bp.size = sizeof(bus_make.bp);
-+	bus_make.bp.type = KDBUS_ITEM_BLOOM_PARAMETER;
-+	bus_make.bp.bloom.size = 64;
-+	bus_make.bp.bloom.n_hash = 1;
-+
-+	snprintf(bus_make.name.str, sizeof(bus_make.name.str),
-+		 "%u-%s", getuid(), name);
-+
-+	bus_make.attach.type = KDBUS_ITEM_ATTACH_FLAGS_SEND;
-+	bus_make.attach.size = sizeof(bus_make.attach);
-+	bus_make.attach.flags = owner_meta;
-+
-+	bus_make.name.type = KDBUS_ITEM_MAKE_NAME;
-+	bus_make.name.size = KDBUS_ITEM_HEADER_SIZE +
-+			     strlen(bus_make.name.str) + 1;
-+
-+	bus_make.cmd.flags = KDBUS_MAKE_ACCESS_WORLD;
-+	bus_make.cmd.size = sizeof(bus_make.cmd) +
-+			     bus_make.bp.size +
-+			     bus_make.attach.size +
-+			     bus_make.name.size;
-+
-+	kdbus_printf("Creating bus with name >%s< on control fd %d ...\n",
-+		     name, control_fd);
-+
-+	ret = kdbus_cmd_bus_make(control_fd, &bus_make.cmd);
-+	if (ret < 0) {
-+		kdbus_printf("--- error when making bus: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	if (ret == 0 && path)
-+		*path = strdup(bus_make.name.str);
-+
-+	return ret;
-+}
-+
-+struct kdbus_conn *
-+kdbus_hello(const char *path, uint64_t flags,
-+	    const struct kdbus_item *item, size_t item_size)
-+{
-+	struct kdbus_cmd_free cmd_free = {};
-+	int fd, ret;
-+	struct {
-+		struct kdbus_cmd_hello hello;
-+
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			char str[16];
-+		} conn_name;
-+
-+		uint8_t extra_items[item_size];
-+	} h;
-+	struct kdbus_conn *conn;
-+
-+	memset(&h, 0, sizeof(h));
-+
-+	if (item_size > 0)
-+		memcpy(h.extra_items, item, item_size);
-+
-+	kdbus_printf("-- opening bus connection %s\n", path);
-+	fd = open(path, O_RDWR|O_CLOEXEC);
-+	if (fd < 0) {
-+		kdbus_printf("--- error %d (%m)\n", fd);
-+		return NULL;
-+	}
-+
-+	h.hello.flags = flags | KDBUS_HELLO_ACCEPT_FD;
-+	h.hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+	h.hello.attach_flags_recv = _KDBUS_ATTACH_ALL;
-+	h.conn_name.type = KDBUS_ITEM_CONN_DESCRIPTION;
-+	strcpy(h.conn_name.str, "this-is-my-name");
-+	h.conn_name.size = KDBUS_ITEM_HEADER_SIZE + strlen(h.conn_name.str) + 1;
-+
-+	h.hello.size = sizeof(h);
-+	h.hello.pool_size = POOL_SIZE;
-+
-+	ret = kdbus_cmd_hello(fd, (struct kdbus_cmd_hello *) &h.hello);
-+	if (ret < 0) {
-+		kdbus_printf("--- error when saying hello: %d (%m)\n", ret);
-+		return NULL;
-+	}
-+	kdbus_printf("-- Our peer ID for %s: %llu -- bus uuid: '%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x'\n",
-+		     path, (unsigned long long)h.hello.id,
-+		     h.hello.id128[0],  h.hello.id128[1],  h.hello.id128[2],
-+		     h.hello.id128[3],  h.hello.id128[4],  h.hello.id128[5],
-+		     h.hello.id128[6],  h.hello.id128[7],  h.hello.id128[8],
-+		     h.hello.id128[9],  h.hello.id128[10], h.hello.id128[11],
-+		     h.hello.id128[12], h.hello.id128[13], h.hello.id128[14],
-+		     h.hello.id128[15]);
-+
-+	cmd_free.size = sizeof(cmd_free);
-+	cmd_free.offset = h.hello.offset;
-+	kdbus_cmd_free(fd, &cmd_free);
-+
-+	conn = malloc(sizeof(*conn));
-+	if (!conn) {
-+		kdbus_printf("unable to malloc()!?\n");
-+		return NULL;
-+	}
-+
-+	conn->buf = mmap(NULL, POOL_SIZE, PROT_READ, MAP_SHARED, fd, 0);
-+	if (conn->buf == MAP_FAILED) {
-+		free(conn);
-+		close(fd);
-+		kdbus_printf("--- error mmap (%m)\n");
-+		return NULL;
-+	}
-+
-+	conn->fd = fd;
-+	conn->id = h.hello.id;
-+	return conn;
-+}
-+
-+struct kdbus_conn *
-+kdbus_hello_registrar(const char *path, const char *name,
-+		      const struct kdbus_policy_access *access,
-+		      size_t num_access, uint64_t flags)
-+{
-+	struct kdbus_item *item, *items;
-+	size_t i, size;
-+
-+	size = KDBUS_ITEM_SIZE(strlen(name) + 1) +
-+		num_access * KDBUS_ITEM_SIZE(sizeof(*access));
-+
-+	items = alloca(size);
-+
-+	item = items;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
-+	item->type = KDBUS_ITEM_NAME;
-+	strcpy(item->str, name);
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	for (i = 0; i < num_access; i++) {
-+		item->size = KDBUS_ITEM_HEADER_SIZE +
-+			     sizeof(struct kdbus_policy_access);
-+		item->type = KDBUS_ITEM_POLICY_ACCESS;
-+
-+		item->policy_access.type = access[i].type;
-+		item->policy_access.access = access[i].access;
-+		item->policy_access.id = access[i].id;
-+
-+		item = KDBUS_ITEM_NEXT(item);
-+	}
-+
-+	return kdbus_hello(path, flags, items, size);
-+}
-+
-+struct kdbus_conn *kdbus_hello_activator(const char *path, const char *name,
-+				   const struct kdbus_policy_access *access,
-+				   size_t num_access)
-+{
-+	return kdbus_hello_registrar(path, name, access, num_access,
-+				     KDBUS_HELLO_ACTIVATOR);
-+}
-+
-+bool kdbus_item_in_message(struct kdbus_msg *msg, uint64_t type)
-+{
-+	const struct kdbus_item *item;
-+
-+	KDBUS_ITEM_FOREACH(item, msg, items)
-+		if (item->type == type)
-+			return true;
-+
-+	return false;
-+}
-+
-+int kdbus_bus_creator_info(struct kdbus_conn *conn,
-+			   uint64_t flags,
-+			   uint64_t *offset)
-+{
-+	struct kdbus_cmd_info *cmd;
-+	size_t size = sizeof(*cmd);
-+	int ret;
-+
-+	cmd = alloca(size);
-+	memset(cmd, 0, size);
-+	cmd->size = size;
-+	cmd->attach_flags = flags;
-+
-+	ret = kdbus_cmd_bus_creator_info(conn->fd, cmd);
-+	if (ret < 0) {
-+		kdbus_printf("--- error when requesting info: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	if (offset)
-+		*offset = cmd->offset;
-+	else
-+		kdbus_free(conn, cmd->offset);
-+
-+	return 0;
-+}
-+
-+int kdbus_conn_info(struct kdbus_conn *conn, uint64_t id,
-+		    const char *name, uint64_t flags,
-+		    uint64_t *offset)
-+{
-+	struct kdbus_cmd_info *cmd;
-+	size_t size = sizeof(*cmd);
-+	struct kdbus_info *info;
-+	int ret;
-+
-+	if (name)
-+		size += KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
-+
-+	cmd = alloca(size);
-+	memset(cmd, 0, size);
-+	cmd->size = size;
-+	cmd->attach_flags = flags;
-+
-+	if (name) {
-+		cmd->items[0].size = KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
-+		cmd->items[0].type = KDBUS_ITEM_NAME;
-+		strcpy(cmd->items[0].str, name);
-+	} else {
-+		cmd->id = id;
-+	}
-+
-+	ret = kdbus_cmd_conn_info(conn->fd, cmd);
-+	if (ret < 0) {
-+		kdbus_printf("--- error when requesting info: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	info = (struct kdbus_info *) (conn->buf + cmd->offset);
-+	if (info->size != cmd->info_size) {
-+		kdbus_printf("%s(): size mismatch: %d != %d\n", __func__,
-+				(int) info->size, (int) cmd->info_size);
-+		return -EIO;
-+	}
-+
-+	if (offset)
-+		*offset = cmd->offset;
-+	else
-+		kdbus_free(conn, cmd->offset);
-+
-+	return 0;
-+}
-+
-+void kdbus_conn_free(struct kdbus_conn *conn)
-+{
-+	if (!conn)
-+		return;
-+
-+	if (conn->buf)
-+		munmap(conn->buf, POOL_SIZE);
-+
-+	if (conn->fd >= 0)
-+		close(conn->fd);
-+
-+	free(conn);
-+}
-+
-+int sys_memfd_create(const char *name, __u64 size)
-+{
-+	int ret, fd;
-+
-+	fd = syscall(__NR_memfd_create, name, MFD_ALLOW_SEALING);
-+	if (fd < 0)
-+		return fd;
-+
-+	ret = ftruncate(fd, size);
-+	if (ret < 0) {
-+		close(fd);
-+		return ret;
-+	}
-+
-+	return fd;
-+}
-+
-+int sys_memfd_seal_set(int fd)
-+{
-+	return fcntl(fd, F_ADD_SEALS, F_SEAL_SHRINK |
-+			 F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL);
-+}
-+
-+off_t sys_memfd_get_size(int fd, off_t *size)
-+{
-+	struct stat stat;
-+	int ret;
-+
-+	ret = fstat(fd, &stat);
-+	if (ret < 0) {
-+		kdbus_printf("stat() failed: %m\n");
-+		return ret;
-+	}
-+
-+	*size = stat.st_size;
-+	return 0;
-+}
-+
-+static int __kdbus_msg_send(const struct kdbus_conn *conn,
-+			    const char *name,
-+			    uint64_t cookie,
-+			    uint64_t flags,
-+			    uint64_t timeout,
-+			    int64_t priority,
-+			    uint64_t dst_id,
-+			    uint64_t cmd_flags,
-+			    int cancel_fd)
-+{
-+	struct kdbus_cmd_send *cmd = NULL;
-+	struct kdbus_msg *msg = NULL;
-+	const char ref1[1024 * 128 + 3] = "0123456789_0";
-+	const char ref2[] = "0123456789_1";
-+	struct kdbus_item *item;
-+	struct timespec now;
-+	uint64_t size;
-+	int memfd = -1;
-+	int ret;
-+
-+	size = sizeof(*msg) + 3 * KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+
-+	if (dst_id == KDBUS_DST_ID_BROADCAST)
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
-+	else {
-+		memfd = sys_memfd_create("my-name-is-nice", 1024 * 1024);
-+		if (memfd < 0) {
-+			kdbus_printf("failed to create memfd: %m\n");
-+			return memfd;
-+		}
-+
-+		if (write(memfd, "kdbus memfd 1234567", 19) != 19) {
-+			ret = -errno;
-+			kdbus_printf("writing to memfd failed: %m\n");
-+			goto out;
-+		}
-+
-+		ret = sys_memfd_seal_set(memfd);
-+		if (ret < 0) {
-+			ret = -errno;
-+			kdbus_printf("memfd sealing failed: %m\n");
-+			goto out;
-+		}
-+
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
-+	}
-+
-+	if (name)
-+		size += KDBUS_ITEM_SIZE(strlen(name) + 1);
-+
-+	msg = malloc(size);
-+	if (!msg) {
-+		ret = -errno;
-+		kdbus_printf("unable to malloc()!?\n");
-+		goto out;
-+	}
-+
-+	if (dst_id == KDBUS_DST_ID_BROADCAST)
-+		flags |= KDBUS_MSG_SIGNAL;
-+
-+	memset(msg, 0, size);
-+	msg->flags = flags;
-+	msg->priority = priority;
-+	msg->size = size;
-+	msg->src_id = conn->id;
-+	msg->dst_id = name ? 0 : dst_id;
-+	msg->cookie = cookie;
-+	msg->payload_type = KDBUS_PAYLOAD_DBUS;
-+
-+	if (timeout) {
-+		ret = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
-+		if (ret < 0)
-+			goto out;
-+
-+		msg->timeout_ns = now.tv_sec * 1000000000ULL +
-+				  now.tv_nsec + timeout;
-+	}
-+
-+	item = msg->items;
-+
-+	if (name) {
-+		item->type = KDBUS_ITEM_DST_NAME;
-+		item->size = KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
-+		strcpy(item->str, name);
-+		item = KDBUS_ITEM_NEXT(item);
-+	}
-+
-+	item->type = KDBUS_ITEM_PAYLOAD_VEC;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
-+	item->vec.address = (uintptr_t)&ref1;
-+	item->vec.size = sizeof(ref1);
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	/* data padding for ref1 */
-+	item->type = KDBUS_ITEM_PAYLOAD_VEC;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
-+	item->vec.address = (uintptr_t)NULL;
-+	item->vec.size =  KDBUS_ALIGN8(sizeof(ref1)) - sizeof(ref1);
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	item->type = KDBUS_ITEM_PAYLOAD_VEC;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
-+	item->vec.address = (uintptr_t)&ref2;
-+	item->vec.size = sizeof(ref2);
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	if (dst_id == KDBUS_DST_ID_BROADCAST) {
-+		item->type = KDBUS_ITEM_BLOOM_FILTER;
-+		item->size = KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
-+		item->bloom_filter.generation = 0;
-+	} else {
-+		item->type = KDBUS_ITEM_PAYLOAD_MEMFD;
-+		item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_memfd);
-+		item->memfd.size = 16;
-+		item->memfd.fd = memfd;
-+	}
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	size = sizeof(*cmd);
-+	if (cancel_fd != -1)
-+		size += KDBUS_ITEM_SIZE(sizeof(cancel_fd));
-+
-+	cmd = malloc(size);
-+	if (!cmd) {
-+		ret = -errno;
-+		kdbus_printf("unable to malloc()!?\n");
-+		goto out;
-+	}
-+
-+	cmd->size = size;
-+	cmd->flags = cmd_flags;
-+	cmd->msg_address = (uintptr_t)msg;
-+
-+	item = cmd->items;
-+
-+	if (cancel_fd != -1) {
-+		item->type = KDBUS_ITEM_CANCEL_FD;
-+		item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(cancel_fd);
-+		item->fds[0] = cancel_fd;
-+		item = KDBUS_ITEM_NEXT(item);
-+	}
-+
-+	ret = kdbus_cmd_send(conn->fd, cmd);
-+	if (ret < 0) {
-+		kdbus_printf("error sending message: %d (%m)\n", ret);
-+		goto out;
-+	}
-+
-+	if (cmd_flags & KDBUS_SEND_SYNC_REPLY) {
-+		struct kdbus_msg *reply;
-+
-+		kdbus_printf("SYNC REPLY @offset %llu:\n", cmd->reply.offset);
-+		reply = (struct kdbus_msg *)(conn->buf + cmd->reply.offset);
-+		kdbus_msg_dump(conn, reply);
-+
-+		kdbus_msg_free(reply);
-+
-+		ret = kdbus_free(conn, cmd->reply.offset);
-+		if (ret < 0)
-+			goto out;
-+	}
-+
-+out:
-+	free(msg);
-+	free(cmd);
-+
-+	if (memfd >= 0)
-+		close(memfd);
-+
-+	return ret < 0 ? ret : 0;
-+}
-+
-+int kdbus_msg_send(const struct kdbus_conn *conn, const char *name,
-+		   uint64_t cookie, uint64_t flags, uint64_t timeout,
-+		   int64_t priority, uint64_t dst_id)
-+{
-+	return __kdbus_msg_send(conn, name, cookie, flags, timeout, priority,
-+				dst_id, 0, -1);
-+}
-+
-+int kdbus_msg_send_sync(const struct kdbus_conn *conn, const char *name,
-+			uint64_t cookie, uint64_t flags, uint64_t timeout,
-+			int64_t priority, uint64_t dst_id, int cancel_fd)
-+{
-+	return __kdbus_msg_send(conn, name, cookie, flags, timeout, priority,
-+				dst_id, KDBUS_SEND_SYNC_REPLY, cancel_fd);
-+}
-+
-+int kdbus_msg_send_reply(const struct kdbus_conn *conn,
-+			 uint64_t reply_cookie,
-+			 uint64_t dst_id)
-+{
-+	struct kdbus_cmd_send cmd = {};
-+	struct kdbus_msg *msg;
-+	const char ref1[1024 * 128 + 3] = "0123456789_0";
-+	struct kdbus_item *item;
-+	uint64_t size;
-+	int ret;
-+
-+	size = sizeof(struct kdbus_msg);
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+
-+	msg = malloc(size);
-+	if (!msg) {
-+		kdbus_printf("unable to malloc()!?\n");
-+		return -ENOMEM;
-+	}
-+
-+	memset(msg, 0, size);
-+	msg->size = size;
-+	msg->src_id = conn->id;
-+	msg->dst_id = dst_id;
-+	msg->cookie_reply = reply_cookie;
-+	msg->payload_type = KDBUS_PAYLOAD_DBUS;
-+
-+	item = msg->items;
-+
-+	item->type = KDBUS_ITEM_PAYLOAD_VEC;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
-+	item->vec.address = (uintptr_t)&ref1;
-+	item->vec.size = sizeof(ref1);
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg;
-+
-+	ret = kdbus_cmd_send(conn->fd, &cmd);
-+	if (ret < 0)
-+		kdbus_printf("error sending message: %d (%m)\n", ret);
-+
-+	free(msg);
-+
-+	return ret;
-+}
-+
-+static char *msg_id(uint64_t id, char *buf)
-+{
-+	if (id == 0)
-+		return "KERNEL";
-+	if (id == ~0ULL)
-+		return "BROADCAST";
-+	sprintf(buf, "%llu", (unsigned long long)id);
-+	return buf;
-+}
-+
-+int kdbus_msg_dump(const struct kdbus_conn *conn, const struct kdbus_msg *msg)
-+{
-+	const struct kdbus_item *item = msg->items;
-+	char buf_src[32];
-+	char buf_dst[32];
-+	uint64_t timeout = 0;
-+	uint64_t cookie_reply = 0;
-+	int ret = 0;
-+
-+	if (msg->flags & KDBUS_MSG_EXPECT_REPLY)
-+		timeout = msg->timeout_ns;
-+	else
-+		cookie_reply = msg->cookie_reply;
-+
-+	kdbus_printf("MESSAGE: %s (%llu bytes) flags=0x%08llx, %s → %s, "
-+		     "cookie=%llu, timeout=%llu cookie_reply=%llu priority=%lli\n",
-+		enum_PAYLOAD(msg->payload_type), (unsigned long long)msg->size,
-+		(unsigned long long)msg->flags,
-+		msg_id(msg->src_id, buf_src), msg_id(msg->dst_id, buf_dst),
-+		(unsigned long long)msg->cookie, (unsigned long long)timeout,
-+		(unsigned long long)cookie_reply, (long long)msg->priority);
-+
-+	KDBUS_ITEM_FOREACH(item, msg, items) {
-+		if (item->size < KDBUS_ITEM_HEADER_SIZE) {
-+			kdbus_printf("  +%s (%llu bytes) invalid data record\n",
-+				     enum_MSG(item->type), item->size);
-+			ret = -EINVAL;
-+			break;
-+		}
-+
-+		switch (item->type) {
-+		case KDBUS_ITEM_PAYLOAD_OFF: {
-+			char *s;
-+
-+			if (item->vec.offset == ~0ULL)
-+				s = "[\\0-bytes]";
-+			else
-+				s = (char *)msg + item->vec.offset;
-+
-+			kdbus_printf("  +%s (%llu bytes) off=%llu size=%llu '%s'\n",
-+			       enum_MSG(item->type), item->size,
-+			       (unsigned long long)item->vec.offset,
-+			       (unsigned long long)item->vec.size, s);
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_FDS: {
-+			int i, n = (item->size - KDBUS_ITEM_HEADER_SIZE) /
-+					sizeof(int);
-+
-+			kdbus_printf("  +%s (%llu bytes, %d fds)\n",
-+			       enum_MSG(item->type), item->size, n);
-+
-+			for (i = 0; i < n; i++)
-+				kdbus_printf("    fd[%d] = %d\n",
-+					     i, item->fds[i]);
-+
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_PAYLOAD_MEMFD: {
-+			char *buf;
-+			off_t size;
-+
-+			buf = mmap(NULL, item->memfd.size, PROT_READ,
-+				   MAP_PRIVATE, item->memfd.fd, 0);
-+			if (buf == MAP_FAILED) {
-+				kdbus_printf("mmap() fd=%i size=%llu failed: %m\n",
-+					     item->memfd.fd, item->memfd.size);
-+				break;
-+			}
-+
-+			if (sys_memfd_get_size(item->memfd.fd, &size) < 0) {
-+				kdbus_printf("KDBUS_CMD_MEMFD_SIZE_GET failed: %m\n");
-+				break;
-+			}
-+
-+			kdbus_printf("  +%s (%llu bytes) fd=%i size=%llu filesize=%llu '%s'\n",
-+			       enum_MSG(item->type), item->size, item->memfd.fd,
-+			       (unsigned long long)item->memfd.size,
-+			       (unsigned long long)size, buf);
-+			munmap(buf, item->memfd.size);
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_CREDS:
-+			kdbus_printf("  +%s (%llu bytes) uid=%lld, euid=%lld, suid=%lld, fsuid=%lld, "
-+							"gid=%lld, egid=%lld, sgid=%lld, fsgid=%lld\n",
-+				enum_MSG(item->type), item->size,
-+				item->creds.uid, item->creds.euid,
-+				item->creds.suid, item->creds.fsuid,
-+				item->creds.gid, item->creds.egid,
-+				item->creds.sgid, item->creds.fsgid);
-+			break;
-+
-+		case KDBUS_ITEM_PIDS:
-+			kdbus_printf("  +%s (%llu bytes) pid=%lld, tid=%lld, ppid=%lld\n",
-+				enum_MSG(item->type), item->size,
-+				item->pids.pid, item->pids.tid,
-+				item->pids.ppid);
-+			break;
-+
-+		case KDBUS_ITEM_AUXGROUPS: {
-+			int i, n;
-+
-+			kdbus_printf("  +%s (%llu bytes)\n",
-+				     enum_MSG(item->type), item->size);
-+			n = (item->size - KDBUS_ITEM_HEADER_SIZE) /
-+				sizeof(uint64_t);
-+
-+			for (i = 0; i < n; i++)
-+				kdbus_printf("    gid[%d] = %lld\n",
-+					     i, item->data64[i]);
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_NAME:
-+		case KDBUS_ITEM_PID_COMM:
-+		case KDBUS_ITEM_TID_COMM:
-+		case KDBUS_ITEM_EXE:
-+		case KDBUS_ITEM_CGROUP:
-+		case KDBUS_ITEM_SECLABEL:
-+		case KDBUS_ITEM_DST_NAME:
-+		case KDBUS_ITEM_CONN_DESCRIPTION:
-+			kdbus_printf("  +%s (%llu bytes) '%s' (%zu)\n",
-+				     enum_MSG(item->type), item->size,
-+				     item->str, strlen(item->str));
-+			break;
-+
-+		case KDBUS_ITEM_OWNED_NAME: {
-+			kdbus_printf("  +%s (%llu bytes) '%s' (%zu) flags=0x%08llx\n",
-+				     enum_MSG(item->type), item->size,
-+				     item->name.name, strlen(item->name.name),
-+				     item->name.flags);
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_CMDLINE: {
-+			size_t size = item->size - KDBUS_ITEM_HEADER_SIZE;
-+			const char *str = item->str;
-+			int count = 0;
-+
-+			kdbus_printf("  +%s (%llu bytes) ",
-+				     enum_MSG(item->type), item->size);
-+			while (size) {
-+				kdbus_printf("'%s' ", str);
-+				size -= strlen(str) + 1;
-+				str += strlen(str) + 1;
-+				count++;
-+			}
-+
-+			kdbus_printf("(%d string%s)\n",
-+				     count, (count == 1) ? "" : "s");
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_AUDIT:
-+			kdbus_printf("  +%s (%llu bytes) loginuid=%u sessionid=%u\n",
-+			       enum_MSG(item->type), item->size,
-+			       item->audit.loginuid, item->audit.sessionid);
-+			break;
-+
-+		case KDBUS_ITEM_CAPS: {
-+			const uint32_t *cap;
-+			int n, i;
-+
-+			kdbus_printf("  +%s (%llu bytes) len=%llu bytes, last_cap %d\n",
-+				     enum_MSG(item->type), item->size,
-+				     (unsigned long long)item->size -
-+					KDBUS_ITEM_HEADER_SIZE,
-+				     (int) item->caps.last_cap);
-+
-+			cap = item->caps.caps;
-+			n = (item->size - offsetof(struct kdbus_item, caps.caps))
-+				/ 4 / sizeof(uint32_t);
-+
-+			kdbus_printf("    CapInh=");
-+			for (i = 0; i < n; i++)
-+				kdbus_printf("%08x", cap[(0 * n) + (n - i - 1)]);
-+
-+			kdbus_printf(" CapPrm=");
-+			for (i = 0; i < n; i++)
-+				kdbus_printf("%08x", cap[(1 * n) + (n - i - 1)]);
-+
-+			kdbus_printf(" CapEff=");
-+			for (i = 0; i < n; i++)
-+				kdbus_printf("%08x", cap[(2 * n) + (n - i - 1)]);
-+
-+			kdbus_printf(" CapBnd=");
-+			for (i = 0; i < n; i++)
-+				kdbus_printf("%08x", cap[(3 * n) + (n - i - 1)]);
-+			kdbus_printf("\n");
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_TIMESTAMP:
-+			kdbus_printf("  +%s (%llu bytes) seq=%llu realtime=%lluns monotonic=%lluns\n",
-+			       enum_MSG(item->type), item->size,
-+			       (unsigned long long)item->timestamp.seqnum,
-+			       (unsigned long long)item->timestamp.realtime_ns,
-+			       (unsigned long long)item->timestamp.monotonic_ns);
-+			break;
-+
-+		case KDBUS_ITEM_REPLY_TIMEOUT:
-+			kdbus_printf("  +%s (%llu bytes) cookie=%llu\n",
-+			       enum_MSG(item->type), item->size,
-+			       msg->cookie_reply);
-+			break;
-+
-+		case KDBUS_ITEM_NAME_ADD:
-+		case KDBUS_ITEM_NAME_REMOVE:
-+		case KDBUS_ITEM_NAME_CHANGE:
-+			kdbus_printf("  +%s (%llu bytes) '%s', old id=%lld, now id=%lld, old_flags=0x%llx new_flags=0x%llx\n",
-+				enum_MSG(item->type),
-+				(unsigned long long) item->size,
-+				item->name_change.name,
-+				item->name_change.old_id.id,
-+				item->name_change.new_id.id,
-+				item->name_change.old_id.flags,
-+				item->name_change.new_id.flags);
-+			break;
-+
-+		case KDBUS_ITEM_ID_ADD:
-+		case KDBUS_ITEM_ID_REMOVE:
-+			kdbus_printf("  +%s (%llu bytes) id=%llu flags=%llu\n",
-+			       enum_MSG(item->type),
-+			       (unsigned long long) item->size,
-+			       (unsigned long long) item->id_change.id,
-+			       (unsigned long long) item->id_change.flags);
-+			break;
-+
-+		default:
-+			kdbus_printf("  +%s (%llu bytes)\n",
-+				     enum_MSG(item->type), item->size);
-+			break;
-+		}
-+	}
-+
-+	if ((char *)item - ((char *)msg + msg->size) >= 8) {
-+		kdbus_printf("invalid padding at end of message\n");
-+		ret = -EINVAL;
-+	}
-+
-+	kdbus_printf("\n");
-+
-+	return ret;
-+}
-+
-+void kdbus_msg_free(struct kdbus_msg *msg)
-+{
-+	const struct kdbus_item *item;
-+	int nfds, i;
-+
-+	if (!msg)
-+		return;
-+
-+	KDBUS_ITEM_FOREACH(item, msg, items) {
-+		switch (item->type) {
-+		/* close all memfds */
-+		case KDBUS_ITEM_PAYLOAD_MEMFD:
-+			close(item->memfd.fd);
-+			break;
-+		case KDBUS_ITEM_FDS:
-+			nfds = (item->size - KDBUS_ITEM_HEADER_SIZE) /
-+				sizeof(int);
-+
-+			for (i = 0; i < nfds; i++)
-+				close(item->fds[i]);
-+
-+			break;
-+		}
-+	}
-+}
-+
-+int kdbus_msg_recv(struct kdbus_conn *conn,
-+		   struct kdbus_msg **msg_out,
-+		   uint64_t *offset)
-+{
-+	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
-+	struct kdbus_msg *msg;
-+	int ret;
-+
-+	ret = kdbus_cmd_recv(conn->fd, &recv);
-+	if (ret < 0)
-+		return ret;
-+
-+	msg = (struct kdbus_msg *)(conn->buf + recv.msg.offset);
-+	ret = kdbus_msg_dump(conn, msg);
-+	if (ret < 0) {
-+		kdbus_msg_free(msg);
-+		return ret;
-+	}
-+
-+	if (msg_out) {
-+		*msg_out = msg;
-+
-+		if (offset)
-+			*offset = recv.msg.offset;
-+	} else {
-+		kdbus_msg_free(msg);
-+
-+		ret = kdbus_free(conn, recv.msg.offset);
-+		if (ret < 0)
-+			return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+/*
-+ * Returns: 0 on success, negative errno on failure.
-+ *
-+ * We must return -ETIMEDOUT, -ECONNREST, -EAGAIN and other errors.
-+ * We must return the result of kdbus_msg_recv()
-+ */
-+int kdbus_msg_recv_poll(struct kdbus_conn *conn,
-+			int timeout_ms,
-+			struct kdbus_msg **msg_out,
-+			uint64_t *offset)
-+{
-+	int ret;
-+
-+	do {
-+		struct timeval before, after, diff;
-+		struct pollfd fd;
-+
-+		fd.fd = conn->fd;
-+		fd.events = POLLIN | POLLPRI | POLLHUP;
-+		fd.revents = 0;
-+
-+		gettimeofday(&before, NULL);
-+		ret = poll(&fd, 1, timeout_ms);
-+		gettimeofday(&after, NULL);
-+
-+		if (ret == 0) {
-+			ret = -ETIMEDOUT;
-+			break;
-+		}
-+
-+		if (ret > 0) {
-+			if (fd.revents & POLLIN)
-+				ret = kdbus_msg_recv(conn, msg_out, offset);
-+
-+			if (fd.revents & (POLLHUP | POLLERR))
-+				ret = -ECONNRESET;
-+		}
-+
-+		if (ret == 0 || ret != -EAGAIN)
-+			break;
-+
-+		timersub(&after, &before, &diff);
-+		timeout_ms -= diff.tv_sec * 1000UL +
-+			      diff.tv_usec / 1000UL;
-+	} while (timeout_ms > 0);
-+
-+	return ret;
-+}
-+
-+int kdbus_free(const struct kdbus_conn *conn, uint64_t offset)
-+{
-+	struct kdbus_cmd_free cmd_free = {};
-+	int ret;
-+
-+	cmd_free.size = sizeof(cmd_free);
-+	cmd_free.offset = offset;
-+	cmd_free.flags = 0;
-+
-+	ret = kdbus_cmd_free(conn->fd, &cmd_free);
-+	if (ret < 0) {
-+		kdbus_printf("KDBUS_CMD_FREE failed: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+int kdbus_name_acquire(struct kdbus_conn *conn,
-+		       const char *name, uint64_t *flags)
-+{
-+	struct kdbus_cmd *cmd_name;
-+	size_t name_len = strlen(name) + 1;
-+	uint64_t size = sizeof(*cmd_name) + KDBUS_ITEM_SIZE(name_len);
-+	struct kdbus_item *item;
-+	int ret;
-+
-+	cmd_name = alloca(size);
-+
-+	memset(cmd_name, 0, size);
-+
-+	item = cmd_name->items;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + name_len;
-+	item->type = KDBUS_ITEM_NAME;
-+	strcpy(item->str, name);
-+
-+	cmd_name->size = size;
-+	if (flags)
-+		cmd_name->flags = *flags;
-+
-+	ret = kdbus_cmd_name_acquire(conn->fd, cmd_name);
-+	if (ret < 0) {
-+		kdbus_printf("error aquiring name: %s\n", strerror(-ret));
-+		return ret;
-+	}
-+
-+	kdbus_printf("%s(): flags after call: 0x%llx\n", __func__,
-+		     cmd_name->return_flags);
-+
-+	if (flags)
-+		*flags = cmd_name->return_flags;
-+
-+	return 0;
-+}
-+
-+int kdbus_name_release(struct kdbus_conn *conn, const char *name)
-+{
-+	struct kdbus_cmd *cmd_name;
-+	size_t name_len = strlen(name) + 1;
-+	uint64_t size = sizeof(*cmd_name) + KDBUS_ITEM_SIZE(name_len);
-+	struct kdbus_item *item;
-+	int ret;
-+
-+	cmd_name = alloca(size);
-+
-+	memset(cmd_name, 0, size);
-+
-+	item = cmd_name->items;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + name_len;
-+	item->type = KDBUS_ITEM_NAME;
-+	strcpy(item->str, name);
-+
-+	cmd_name->size = size;
-+
-+	kdbus_printf("conn %lld giving up name '%s'\n",
-+		     (unsigned long long) conn->id, name);
-+
-+	ret = kdbus_cmd_name_release(conn->fd, cmd_name);
-+	if (ret < 0) {
-+		kdbus_printf("error releasing name: %s\n", strerror(-ret));
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+int kdbus_list(struct kdbus_conn *conn, uint64_t flags)
-+{
-+	struct kdbus_cmd_list cmd_list = {};
-+	struct kdbus_info *list, *name;
-+	int ret;
-+
-+	cmd_list.size = sizeof(cmd_list);
-+	cmd_list.flags = flags;
-+
-+	ret = kdbus_cmd_list(conn->fd, &cmd_list);
-+	if (ret < 0) {
-+		kdbus_printf("error listing names: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	kdbus_printf("REGISTRY:\n");
-+	list = (struct kdbus_info *)(conn->buf + cmd_list.offset);
-+
-+	KDBUS_FOREACH(name, list, cmd_list.list_size) {
-+		uint64_t flags = 0;
-+		struct kdbus_item *item;
-+		const char *n = "MISSING-NAME";
-+
-+		if (name->size == sizeof(struct kdbus_cmd))
-+			continue;
-+
-+		KDBUS_ITEM_FOREACH(item, name, items)
-+			if (item->type == KDBUS_ITEM_OWNED_NAME) {
-+				n = item->name.name;
-+				flags = item->name.flags;
-+
-+				kdbus_printf("%8llu flags=0x%08llx conn=0x%08llx '%s'\n",
-+					     name->id,
-+					     (unsigned long long) flags,
-+					     name->flags, n);
-+			}
-+	}
-+	kdbus_printf("\n");
-+
-+	ret = kdbus_free(conn, cmd_list.offset);
-+
-+	return ret;
-+}
-+
-+int kdbus_conn_update_attach_flags(struct kdbus_conn *conn,
-+				   uint64_t attach_flags_send,
-+				   uint64_t attach_flags_recv)
-+{
-+	int ret;
-+	size_t size;
-+	struct kdbus_cmd *update;
-+	struct kdbus_item *item;
-+
-+	size = sizeof(struct kdbus_cmd);
-+	size += KDBUS_ITEM_SIZE(sizeof(uint64_t)) * 2;
-+
-+	update = malloc(size);
-+	if (!update) {
-+		kdbus_printf("error malloc: %m\n");
-+		return -ENOMEM;
-+	}
-+
-+	memset(update, 0, size);
-+	update->size = size;
-+
-+	item = update->items;
-+
-+	item->type = KDBUS_ITEM_ATTACH_FLAGS_SEND;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(uint64_t);
-+	item->data64[0] = attach_flags_send;
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	item->type = KDBUS_ITEM_ATTACH_FLAGS_RECV;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(uint64_t);
-+	item->data64[0] = attach_flags_recv;
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	ret = kdbus_cmd_update(conn->fd, update);
-+	if (ret < 0)
-+		kdbus_printf("error conn update: %d (%m)\n", ret);
-+
-+	free(update);
-+
-+	return ret;
-+}
-+
-+int kdbus_conn_update_policy(struct kdbus_conn *conn, const char *name,
-+			     const struct kdbus_policy_access *access,
-+			     size_t num_access)
-+{
-+	struct kdbus_cmd *update;
-+	struct kdbus_item *item;
-+	size_t i, size;
-+	int ret;
-+
-+	size = sizeof(struct kdbus_cmd);
-+	size += KDBUS_ITEM_SIZE(strlen(name) + 1);
-+	size += num_access * KDBUS_ITEM_SIZE(sizeof(struct kdbus_policy_access));
-+
-+	update = malloc(size);
-+	if (!update) {
-+		kdbus_printf("error malloc: %m\n");
-+		return -ENOMEM;
-+	}
-+
-+	memset(update, 0, size);
-+	update->size = size;
-+
-+	item = update->items;
-+
-+	item->type = KDBUS_ITEM_NAME;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
-+	strcpy(item->str, name);
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	for (i = 0; i < num_access; i++) {
-+		item->size = KDBUS_ITEM_HEADER_SIZE +
-+			     sizeof(struct kdbus_policy_access);
-+		item->type = KDBUS_ITEM_POLICY_ACCESS;
-+
-+		item->policy_access.type = access[i].type;
-+		item->policy_access.access = access[i].access;
-+		item->policy_access.id = access[i].id;
-+
-+		item = KDBUS_ITEM_NEXT(item);
-+	}
-+
-+	ret = kdbus_cmd_update(conn->fd, update);
-+	if (ret < 0)
-+		kdbus_printf("error conn update: %d (%m)\n", ret);
-+
-+	free(update);
-+
-+	return ret;
-+}
-+
-+int kdbus_add_match_id(struct kdbus_conn *conn, uint64_t cookie,
-+		       uint64_t type, uint64_t id)
-+{
-+	struct {
-+		struct kdbus_cmd_match cmd;
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_notify_id_change chg;
-+		} item;
-+	} buf;
-+	int ret;
-+
-+	memset(&buf, 0, sizeof(buf));
-+
-+	buf.cmd.size = sizeof(buf);
-+	buf.cmd.cookie = cookie;
-+	buf.item.size = sizeof(buf.item);
-+	buf.item.type = type;
-+	buf.item.chg.id = id;
-+
-+	ret = kdbus_cmd_match_add(conn->fd, &buf.cmd);
-+	if (ret < 0)
-+		kdbus_printf("--- error adding conn match: %d (%m)\n", ret);
-+
-+	return ret;
-+}
-+
-+int kdbus_add_match_empty(struct kdbus_conn *conn)
-+{
-+	struct {
-+		struct kdbus_cmd_match cmd;
-+		struct kdbus_item item;
-+	} buf;
-+	int ret;
-+
-+	memset(&buf, 0, sizeof(buf));
-+
-+	buf.item.size = sizeof(uint64_t) * 3;
-+	buf.item.type = KDBUS_ITEM_ID;
-+	buf.item.id = KDBUS_MATCH_ID_ANY;
-+
-+	buf.cmd.size = sizeof(buf.cmd) + buf.item.size;
-+
-+	ret = kdbus_cmd_match_add(conn->fd, &buf.cmd);
-+	if (ret < 0)
-+		kdbus_printf("--- error adding conn match: %d (%m)\n", ret);
-+
-+	return ret;
-+}
-+
-+static int all_ids_are_mapped(const char *path)
-+{
-+	int ret;
-+	FILE *file;
-+	uint32_t inside_id, length;
-+
-+	file = fopen(path, "r");
-+	if (!file) {
-+		ret = -errno;
-+		kdbus_printf("error fopen() %s: %d (%m)\n",
-+			     path, ret);
-+		return ret;
-+	}
-+
-+	ret = fscanf(file, "%u\t%*u\t%u", &inside_id, &length);
-+	if (ret != 2) {
-+		if (ferror(file))
-+			ret = -errno;
-+		else
-+			ret = -EIO;
-+
-+		kdbus_printf("--- error fscanf(): %d\n", ret);
-+		fclose(file);
-+		return ret;
-+	}
-+
-+	fclose(file);
-+
-+	/*
-+	 * If length is 4294967295 which means the invalid uid
-+	 * (uid_t) -1 then we are able to map all uid/gids
-+	 */
-+	if (inside_id == 0 && length == (uid_t) -1)
-+		return 1;
-+
-+	return 0;
-+}
-+
-+int all_uids_gids_are_mapped(void)
-+{
-+	int ret;
-+
-+	ret = all_ids_are_mapped("/proc/self/uid_map");
-+	if (ret <= 0) {
-+		kdbus_printf("--- error not all uids are mapped\n");
-+		return 0;
-+	}
-+
-+	ret = all_ids_are_mapped("/proc/self/gid_map");
-+	if (ret <= 0) {
-+		kdbus_printf("--- error not all gids are mapped\n");
-+		return 0;
-+	}
-+
-+	return 1;
-+}
-+
-+int drop_privileges(uid_t uid, gid_t gid)
-+{
-+	int ret;
-+
-+	ret = setgroups(0, NULL);
-+	if (ret < 0) {
-+		ret = -errno;
-+		kdbus_printf("error setgroups: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	ret = setresgid(gid, gid, gid);
-+	if (ret < 0) {
-+		ret = -errno;
-+		kdbus_printf("error setresgid: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	ret = setresuid(uid, uid, uid);
-+	if (ret < 0) {
-+		ret = -errno;
-+		kdbus_printf("error setresuid: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	return ret;
-+}
-+
-+uint64_t now(clockid_t clock)
-+{
-+	struct timespec spec;
-+
-+	clock_gettime(clock, &spec);
-+	return spec.tv_sec * 1000ULL * 1000ULL * 1000ULL + spec.tv_nsec;
-+}
-+
-+char *unique_name(const char *prefix)
-+{
-+	unsigned int i;
-+	uint64_t u_now;
-+	char n[17];
-+	char *str;
-+	int r;
-+
-+	/*
-+	 * This returns a random string which is guaranteed to be
-+	 * globally unique across all calls to unique_name(). We
-+	 * compose the string as:
-+	 *   <prefix>-<random>-<time>
-+	 * With:
-+	 *   <prefix>: string provided by the caller
-+	 *   <random>: a random alpha string of 16 characters
-+	 *   <time>: the current time in micro-seconds since last boot
-+	 *
-+	 * The <random> part makes the string always look vastly different,
-+	 * the <time> part makes sure no two calls return the same string.
-+	 */
-+
-+	u_now = now(CLOCK_MONOTONIC);
-+
-+	for (i = 0; i < sizeof(n) - 1; ++i)
-+		n[i] = 'a' + (rand() % ('z' - 'a'));
-+	n[sizeof(n) - 1] = 0;
-+
-+	r = asprintf(&str, "%s-%s-%" PRIu64, prefix, n, u_now);
-+	if (r < 0)
-+		return NULL;
-+
-+	return str;
-+}
-+
-+static int do_userns_map_id(pid_t pid,
-+			    const char *map_file,
-+			    const char *map_id)
-+{
-+	int ret;
-+	int fd;
-+	char *map;
-+	unsigned int i;
-+
-+	map = strndupa(map_id, strlen(map_id));
-+	if (!map) {
-+		ret = -errno;
-+		kdbus_printf("error strndupa %s: %d (%m)\n",
-+			map_file, ret);
-+		return ret;
-+	}
-+
-+	for (i = 0; i < strlen(map); i++)
-+		if (map[i] == ',')
-+			map[i] = '\n';
-+
-+	fd = open(map_file, O_RDWR);
-+	if (fd < 0) {
-+		ret = -errno;
-+		kdbus_printf("error open %s: %d (%m)\n",
-+			map_file, ret);
-+		return ret;
-+	}
-+
-+	ret = write(fd, map, strlen(map));
-+	if (ret < 0) {
-+		ret = -errno;
-+		kdbus_printf("error write to %s: %d (%m)\n",
-+			     map_file, ret);
-+		goto out;
-+	}
-+
-+	ret = 0;
-+
-+out:
-+	close(fd);
-+	return ret;
-+}
-+
-+int userns_map_uid_gid(pid_t pid,
-+		       const char *map_uid,
-+		       const char *map_gid)
-+{
-+	int fd, ret;
-+	char file_id[128] = {'\0'};
-+
-+	snprintf(file_id, sizeof(file_id), "/proc/%ld/uid_map",
-+		 (long) pid);
-+
-+	ret = do_userns_map_id(pid, file_id, map_uid);
-+	if (ret < 0)
-+		return ret;
-+
-+	snprintf(file_id, sizeof(file_id), "/proc/%ld/setgroups",
-+		 (long) pid);
-+
-+	fd = open(file_id, O_WRONLY);
-+	if (fd >= 0) {
-+		write(fd, "deny\n", 5);
-+		close(fd);
-+	}
-+
-+	snprintf(file_id, sizeof(file_id), "/proc/%ld/gid_map",
-+		 (long) pid);
-+
-+	return do_userns_map_id(pid, file_id, map_gid);
-+}
-+
-+static int do_cap_get_flag(cap_t caps, cap_value_t cap)
-+{
-+	int ret;
-+	cap_flag_value_t flag_set;
-+
-+	ret = cap_get_flag(caps, cap, CAP_EFFECTIVE, &flag_set);
-+	if (ret < 0) {
-+		ret = -errno;
-+		kdbus_printf("error cap_get_flag(): %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	return (flag_set == CAP_SET);
-+}
-+
-+/*
-+ * Returns:
-+ *  1 in case all the requested effective capabilities are set.
-+ *  0 in case we do not have the requested capabilities. This value
-+ *    will be used to abort tests with TEST_SKIP
-+ *  Negative errno on failure.
-+ *
-+ *  Terminate args with a negative value.
-+ */
-+int test_is_capable(int cap, ...)
-+{
-+	int ret;
-+	va_list ap;
-+	cap_t caps;
-+
-+	caps = cap_get_proc();
-+	if (!caps) {
-+		ret = -errno;
-+		kdbus_printf("error cap_get_proc(): %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	ret = do_cap_get_flag(caps, (cap_value_t)cap);
-+	if (ret <= 0)
-+		goto out;
-+
-+	va_start(ap, cap);
-+	while ((cap = va_arg(ap, int)) > 0) {
-+		ret = do_cap_get_flag(caps, (cap_value_t)cap);
-+		if (ret <= 0)
-+			break;
-+	}
-+	va_end(ap);
-+
-+out:
-+	cap_free(caps);
-+	return ret;
-+}
-+
-+int config_user_ns_is_enabled(void)
-+{
-+	return (access("/proc/self/uid_map", F_OK) == 0);
-+}
-+
-+int config_auditsyscall_is_enabled(void)
-+{
-+	return (access("/proc/self/loginuid", F_OK) == 0);
-+}
-+
-+int config_cgroups_is_enabled(void)
-+{
-+	return (access("/proc/self/cgroup", F_OK) == 0);
-+}
-+
-+int config_security_is_enabled(void)
-+{
-+	int fd;
-+	int ret;
-+	char buf[128];
-+
-+	/* CONFIG_SECURITY is disabled */
-+	if (access("/proc/self/attr/current", F_OK) != 0)
-+		return 0;
-+
-+	/*
-+	 * Now only if read() fails with -EINVAL then we assume
-+	 * that SECLABEL and LSM are disabled
-+	 */
-+	fd = open("/proc/self/attr/current", O_RDONLY|O_CLOEXEC);
-+	if (fd < 0)
-+		return 1;
-+
-+	ret = read(fd, buf, sizeof(buf));
-+	if (ret == -1 && errno == EINVAL)
-+		ret = 0;
-+	else
-+		ret = 1;
-+
-+	close(fd);
-+
-+	return ret;
-+}
-diff --git a/tools/testing/selftests/kdbus/kdbus-util.h b/tools/testing/selftests/kdbus/kdbus-util.h
-new file mode 100644
-index 0000000..e1e18b9
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/kdbus-util.h
-@@ -0,0 +1,218 @@
-+/*
-+ * Copyright (C) 2013-2015 Kay Sievers
-+ * Copyright (C) 2013-2015 Daniel Mack
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#pragma once
-+
-+#define BIT(X) (1 << (X))
-+
-+#include <time.h>
-+#include <stdbool.h>
-+#include <linux/kdbus.h>
-+
-+#define _STRINGIFY(x) #x
-+#define STRINGIFY(x) _STRINGIFY(x)
-+#define ELEMENTSOF(x) (sizeof(x)/sizeof((x)[0]))
-+
-+#define KDBUS_PTR(addr) ((void *)(uintptr_t)(addr))
-+
-+#define KDBUS_ALIGN8(l) (((l) + 7) & ~7)
-+#define KDBUS_ITEM_HEADER_SIZE offsetof(struct kdbus_item, data)
-+#define KDBUS_ITEM_SIZE(s) KDBUS_ALIGN8((s) + KDBUS_ITEM_HEADER_SIZE)
-+
-+#define KDBUS_ITEM_NEXT(item) \
-+	(typeof(item))((uint8_t *)(item) + KDBUS_ALIGN8((item)->size))
-+#define KDBUS_ITEM_FOREACH(item, head, first)				\
-+	for ((item) = (head)->first;					\
-+	     ((uint8_t *)(item) < (uint8_t *)(head) + (head)->size) &&	\
-+	       ((uint8_t *)(item) >= (uint8_t *)(head));		\
-+	     (item) = KDBUS_ITEM_NEXT(item))
-+#define KDBUS_FOREACH(iter, first, _size)				\
-+	for ((iter) = (first);						\
-+	     ((uint8_t *)(iter) < (uint8_t *)(first) + (_size)) &&	\
-+	       ((uint8_t *)(iter) >= (uint8_t *)(first));		\
-+	     (iter) = (void *)((uint8_t *)(iter) + KDBUS_ALIGN8((iter)->size)))
-+
-+#define _KDBUS_ATTACH_BITS_SET_NR (__builtin_popcountll(_KDBUS_ATTACH_ALL))
-+
-+/* Sum of KDBUS_ITEM_* that reflects _KDBUS_ATTACH_ALL */
-+#define KDBUS_ATTACH_ITEMS_TYPE_SUM					\
-+	((((_KDBUS_ATTACH_BITS_SET_NR - 1) *				\
-+	((_KDBUS_ATTACH_BITS_SET_NR - 1) + 1)) / 2) +			\
-+	(_KDBUS_ITEM_ATTACH_BASE * _KDBUS_ATTACH_BITS_SET_NR))
-+
-+#define POOL_SIZE (16 * 1024LU * 1024LU)
-+
-+#define UNPRIV_UID 65534
-+#define UNPRIV_GID 65534
-+
-+/* Dump as user of process, useful for user namespace testing */
-+#define SUID_DUMP_USER	1
-+
-+extern int kdbus_util_verbose;
-+
-+#define kdbus_printf(X...) \
-+	if (kdbus_util_verbose) \
-+		printf(X)
-+
-+#define RUN_UNPRIVILEGED(child_uid, child_gid, _child_, _parent_) ({	\
-+		pid_t pid, rpid;					\
-+		int ret;						\
-+									\
-+		pid = fork();						\
-+		if (pid == 0) {						\
-+			ret = drop_privileges(child_uid, child_gid);	\
-+			ASSERT_EXIT_VAL(ret == 0, ret);			\
-+									\
-+			_child_;					\
-+			_exit(0);					\
-+		} else if (pid > 0) {					\
-+			_parent_;					\
-+			rpid = waitpid(pid, &ret, 0);			\
-+			ASSERT_RETURN(rpid == pid);			\
-+			ASSERT_RETURN(WIFEXITED(ret));			\
-+			ASSERT_RETURN(WEXITSTATUS(ret) == 0);		\
-+			ret = TEST_OK;					\
-+		} else {						\
-+			ret = pid;					\
-+		}							\
-+									\
-+		ret;							\
-+	})
-+
-+#define RUN_UNPRIVILEGED_CONN(_var_, _bus_, _code_)			\
-+	RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_GID, ({			\
-+		struct kdbus_conn *_var_;				\
-+		_var_ = kdbus_hello(_bus_, 0, NULL, 0);			\
-+		ASSERT_EXIT(_var_);					\
-+		_code_;							\
-+		kdbus_conn_free(_var_);					\
-+	}), ({ 0; }))
-+
-+#define RUN_CLONE_CHILD(clone_ret, flags, _setup_, _child_body_,	\
-+			_parent_setup_, _parent_body_) ({		\
-+	pid_t pid, rpid;						\
-+	int ret;							\
-+	int efd = -1;							\
-+									\
-+	_setup_;							\
-+	efd = eventfd(0, EFD_CLOEXEC);					\
-+	ASSERT_RETURN(efd >= 0);					\
-+	*(clone_ret) = 0;						\
-+	pid = syscall(__NR_clone, flags, NULL);				\
-+	if (pid == 0) {							\
-+		eventfd_t event_status = 0;				\
-+		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);			\
-+		ASSERT_EXIT(ret == 0);					\
-+		ret = eventfd_read(efd, &event_status);			\
-+		if (ret < 0 || event_status != 1) {			\
-+			kdbus_printf("error eventfd_read()\n");		\
-+			_exit(EXIT_FAILURE);				\
-+		}							\
-+		_child_body_;						\
-+		_exit(0);						\
-+	} else if (pid > 0) {						\
-+		_parent_setup_;						\
-+		ret = eventfd_write(efd, 1);				\
-+		ASSERT_RETURN(ret >= 0);				\
-+		_parent_body_;						\
-+		rpid = waitpid(pid, &ret, 0);				\
-+		ASSERT_RETURN(rpid == pid);				\
-+		ASSERT_RETURN(WIFEXITED(ret));				\
-+		ASSERT_RETURN(WEXITSTATUS(ret) == 0);			\
-+		ret = TEST_OK;						\
-+	} else {							\
-+		ret = -errno;						\
-+		*(clone_ret) = -errno;					\
-+	}								\
-+	close(efd);							\
-+	ret;								\
-+})
-+
-+/* Enums for parent if it should drop privs or not */
-+enum kdbus_drop_parent {
-+	DO_NOT_DROP,
-+	DROP_SAME_UNPRIV,
-+	DROP_OTHER_UNPRIV,
-+};
-+
-+struct kdbus_conn {
-+	int fd;
-+	uint64_t id;
-+	unsigned char *buf;
-+};
-+
-+int kdbus_sysfs_get_parameter_mask(const char *path, uint64_t *mask);
-+int kdbus_sysfs_set_parameter_mask(const char *path, uint64_t mask);
-+
-+int sys_memfd_create(const char *name, __u64 size);
-+int sys_memfd_seal_set(int fd);
-+off_t sys_memfd_get_size(int fd, off_t *size);
-+
-+int kdbus_list(struct kdbus_conn *conn, uint64_t flags);
-+int kdbus_name_release(struct kdbus_conn *conn, const char *name);
-+int kdbus_name_acquire(struct kdbus_conn *conn, const char *name,
-+		       uint64_t *flags);
-+void kdbus_msg_free(struct kdbus_msg *msg);
-+int kdbus_msg_recv(struct kdbus_conn *conn,
-+		   struct kdbus_msg **msg, uint64_t *offset);
-+int kdbus_msg_recv_poll(struct kdbus_conn *conn, int timeout_ms,
-+			struct kdbus_msg **msg_out, uint64_t *offset);
-+int kdbus_free(const struct kdbus_conn *conn, uint64_t offset);
-+int kdbus_msg_dump(const struct kdbus_conn *conn,
-+		   const struct kdbus_msg *msg);
-+int kdbus_create_bus(int control_fd, const char *name,
-+		     uint64_t owner_meta, char **path);
-+int kdbus_msg_send(const struct kdbus_conn *conn, const char *name,
-+		   uint64_t cookie, uint64_t flags, uint64_t timeout,
-+		   int64_t priority, uint64_t dst_id);
-+int kdbus_msg_send_sync(const struct kdbus_conn *conn, const char *name,
-+			uint64_t cookie, uint64_t flags, uint64_t timeout,
-+			int64_t priority, uint64_t dst_id, int cancel_fd);
-+int kdbus_msg_send_reply(const struct kdbus_conn *conn,
-+			 uint64_t reply_cookie,
-+			 uint64_t dst_id);
-+struct kdbus_conn *kdbus_hello(const char *path, uint64_t hello_flags,
-+			       const struct kdbus_item *item,
-+			       size_t item_size);
-+struct kdbus_conn *kdbus_hello_registrar(const char *path, const char *name,
-+					 const struct kdbus_policy_access *access,
-+					 size_t num_access, uint64_t flags);
-+struct kdbus_conn *kdbus_hello_activator(const char *path, const char *name,
-+					 const struct kdbus_policy_access *access,
-+					 size_t num_access);
-+bool kdbus_item_in_message(struct kdbus_msg *msg, uint64_t type);
-+int kdbus_bus_creator_info(struct kdbus_conn *conn,
-+			   uint64_t flags,
-+			   uint64_t *offset);
-+int kdbus_conn_info(struct kdbus_conn *conn, uint64_t id,
-+		    const char *name, uint64_t flags, uint64_t *offset);
-+void kdbus_conn_free(struct kdbus_conn *conn);
-+int kdbus_conn_update_attach_flags(struct kdbus_conn *conn,
-+				   uint64_t attach_flags_send,
-+				   uint64_t attach_flags_recv);
-+int kdbus_conn_update_policy(struct kdbus_conn *conn, const char *name,
-+			     const struct kdbus_policy_access *access,
-+			     size_t num_access);
-+
-+int kdbus_add_match_id(struct kdbus_conn *conn, uint64_t cookie,
-+		       uint64_t type, uint64_t id);
-+int kdbus_add_match_empty(struct kdbus_conn *conn);
-+
-+int all_uids_gids_are_mapped(void);
-+int drop_privileges(uid_t uid, gid_t gid);
-+uint64_t now(clockid_t clock);
-+char *unique_name(const char *prefix);
-+
-+int userns_map_uid_gid(pid_t pid, const char *map_uid, const char *map_gid);
-+int test_is_capable(int cap, ...);
-+int config_user_ns_is_enabled(void);
-+int config_auditsyscall_is_enabled(void);
-+int config_cgroups_is_enabled(void);
-+int config_security_is_enabled(void);
-diff --git a/tools/testing/selftests/kdbus/test-activator.c b/tools/testing/selftests/kdbus/test-activator.c
-new file mode 100644
-index 0000000..3d1b763
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-activator.c
-@@ -0,0 +1,318 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <time.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stdbool.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <poll.h>
-+#include <sys/capability.h>
-+#include <sys/types.h>
-+#include <sys/wait.h>
-+
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+static int kdbus_starter_poll(struct kdbus_conn *conn)
-+{
-+	int ret;
-+	struct pollfd fd;
-+
-+	fd.fd = conn->fd;
-+	fd.events = POLLIN | POLLPRI | POLLHUP;
-+	fd.revents = 0;
-+
-+	ret = poll(&fd, 1, 100);
-+	if (ret == 0)
-+		return -ETIMEDOUT;
-+	else if (ret > 0) {
-+		if (fd.revents & POLLIN)
-+			return 0;
-+
-+		if (fd.revents & (POLLHUP | POLLERR))
-+			ret = -ECONNRESET;
-+	}
-+
-+	return ret;
-+}
-+
-+/* Ensure that kdbus activator logic is safe */
-+static int kdbus_priv_activator(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	struct kdbus_msg *msg = NULL;
-+	uint64_t cookie = 0xdeadbeef;
-+	uint64_t flags = KDBUS_NAME_REPLACE_EXISTING;
-+	struct kdbus_conn *activator;
-+	struct kdbus_conn *service;
-+	struct kdbus_conn *client;
-+	struct kdbus_conn *holder;
-+	struct kdbus_policy_access *access;
-+
-+	access = (struct kdbus_policy_access[]){
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = getuid(),
-+			.access = KDBUS_POLICY_OWN,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = getuid(),
-+			.access = KDBUS_POLICY_TALK,
-+		},
-+	};
-+
-+	activator = kdbus_hello_activator(env->buspath, "foo.priv.activator",
-+					  access, 2);
-+	ASSERT_RETURN(activator);
-+
-+	service = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(service);
-+
-+	client = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(client);
-+
-+	/*
-+	 * Make sure that other users can't TALK to the activator
-+	 */
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		/* Try to talk using the ID */
-+		ret = kdbus_msg_send(unpriv, NULL, 0xdeadbeef, 0, 0,
-+				     0, activator->id);
-+		ASSERT_EXIT(ret == -ENXIO);
-+
-+		/* Try to talk to the name */
-+		ret = kdbus_msg_send(unpriv, "foo.priv.activator",
-+				     0xdeadbeef, 0, 0, 0,
-+				     KDBUS_DST_ID_NAME);
-+		ASSERT_EXIT(ret == -EPERM);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Make sure that we did not receive anything, so the
-+	 * service will not be started automatically
-+	 */
-+
-+	ret = kdbus_starter_poll(activator);
-+	ASSERT_RETURN(ret == -ETIMEDOUT);
-+
-+	/*
-+	 * Now try to emulate the starter/service logic and
-+	 * acquire the name.
-+	 */
-+
-+	cookie++;
-+	ret = kdbus_msg_send(service, "foo.priv.activator", cookie,
-+			     0, 0, 0, KDBUS_DST_ID_NAME);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_starter_poll(activator);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Policies are still checked, access denied */
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "foo.priv.activator",
-+					 &flags);
-+		ASSERT_RETURN(ret == -EPERM);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = kdbus_name_acquire(service, "foo.priv.activator",
-+				 &flags);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* We read our previous starter message */
-+
-+	ret = kdbus_msg_recv_poll(service, 100, NULL, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Try to talk, we still fail */
-+
-+	cookie++;
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		/* Try to talk to the name */
-+		ret = kdbus_msg_send(unpriv, "foo.priv.activator",
-+				     cookie, 0, 0, 0,
-+				     KDBUS_DST_ID_NAME);
-+		ASSERT_EXIT(ret == -EPERM);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/* Still nothing to read */
-+
-+	ret = kdbus_msg_recv_poll(service, 100, NULL, NULL);
-+	ASSERT_RETURN(ret == -ETIMEDOUT);
-+
-+	/* We receive every thing now */
-+
-+	cookie++;
-+	ret = kdbus_msg_send(client, "foo.priv.activator", cookie,
-+			     0, 0, 0, KDBUS_DST_ID_NAME);
-+	ASSERT_RETURN(ret == 0);
-+	ret = kdbus_msg_recv_poll(service, 100, &msg, NULL);
-+	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
-+
-+	kdbus_msg_free(msg);
-+
-+	/* Policies default to deny TALK now */
-+	kdbus_conn_free(activator);
-+
-+	cookie++;
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		/* Try to talk to the name */
-+		ret = kdbus_msg_send(unpriv, "foo.priv.activator",
-+				     cookie, 0, 0, 0,
-+				     KDBUS_DST_ID_NAME);
-+		ASSERT_EXIT(ret == -EPERM);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = kdbus_msg_recv_poll(service, 100, NULL, NULL);
-+	ASSERT_RETURN(ret == -ETIMEDOUT);
-+
-+	/* Same user is able to TALK */
-+	cookie++;
-+	ret = kdbus_msg_send(client, "foo.priv.activator", cookie,
-+			     0, 0, 0, KDBUS_DST_ID_NAME);
-+	ASSERT_RETURN(ret == 0);
-+	ret = kdbus_msg_recv_poll(service, 100, &msg, NULL);
-+	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
-+
-+	kdbus_msg_free(msg);
-+
-+	access = (struct kdbus_policy_access []){
-+		{
-+			.type = KDBUS_POLICY_ACCESS_WORLD,
-+			.id = getuid(),
-+			.access = KDBUS_POLICY_TALK,
-+		},
-+	};
-+
-+	holder = kdbus_hello_registrar(env->buspath, "foo.priv.activator",
-+				       access, 1, KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(holder);
-+
-+	/* Now we are able to TALK to the name */
-+
-+	cookie++;
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		/* Try to talk to the name */
-+		ret = kdbus_msg_send(unpriv, "foo.priv.activator",
-+				     cookie, 0, 0, 0,
-+				     KDBUS_DST_ID_NAME);
-+		ASSERT_EXIT(ret == 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = kdbus_msg_recv_poll(service, 100, NULL, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "foo.priv.activator",
-+					 &flags);
-+		ASSERT_RETURN(ret == -EPERM);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	kdbus_conn_free(service);
-+	kdbus_conn_free(client);
-+	kdbus_conn_free(holder);
-+
-+	return 0;
-+}
-+
-+int kdbus_test_activator(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	struct kdbus_conn *activator;
-+	struct pollfd fds[2];
-+	bool activator_done = false;
-+	struct kdbus_policy_access access[2];
-+
-+	access[0].type = KDBUS_POLICY_ACCESS_USER;
-+	access[0].id = getuid();
-+	access[0].access = KDBUS_POLICY_OWN;
-+
-+	access[1].type = KDBUS_POLICY_ACCESS_WORLD;
-+	access[1].access = KDBUS_POLICY_TALK;
-+
-+	activator = kdbus_hello_activator(env->buspath, "foo.test.activator",
-+					  access, 2);
-+	ASSERT_RETURN(activator);
-+
-+	ret = kdbus_add_match_empty(env->conn);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_list(env->conn, KDBUS_LIST_NAMES |
-+				    KDBUS_LIST_UNIQUE |
-+				    KDBUS_LIST_ACTIVATORS |
-+				    KDBUS_LIST_QUEUED);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_send(env->conn, "foo.test.activator", 0xdeafbeef,
-+			     0, 0, 0, KDBUS_DST_ID_NAME);
-+	ASSERT_RETURN(ret == 0);
-+
-+	fds[0].fd = activator->fd;
-+	fds[1].fd = env->conn->fd;
-+
-+	kdbus_printf("-- entering poll loop ...\n");
-+
-+	for (;;) {
-+		int i, nfds = sizeof(fds) / sizeof(fds[0]);
-+
-+		for (i = 0; i < nfds; i++) {
-+			fds[i].events = POLLIN | POLLPRI;
-+			fds[i].revents = 0;
-+		}
-+
-+		ret = poll(fds, nfds, 3000);
-+		ASSERT_RETURN(ret >= 0);
-+
-+		ret = kdbus_list(env->conn, KDBUS_LIST_NAMES);
-+		ASSERT_RETURN(ret == 0);
-+
-+		if ((fds[0].revents & POLLIN) && !activator_done) {
-+			uint64_t flags = KDBUS_NAME_REPLACE_EXISTING;
-+
-+			kdbus_printf("Starter was called back!\n");
-+
-+			ret = kdbus_name_acquire(env->conn,
-+						 "foo.test.activator", &flags);
-+			ASSERT_RETURN(ret == 0);
-+
-+			activator_done = true;
-+		}
-+
-+		if (fds[1].revents & POLLIN) {
-+			kdbus_msg_recv(env->conn, NULL, NULL);
-+			break;
-+		}
-+	}
-+
-+	/* Check if all uids/gids are mapped */
-+	if (!all_uids_gids_are_mapped())
-+		return TEST_SKIP;
-+
-+	/* Check now capabilities, so we run the previous tests */
-+	ret = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	if (!ret)
-+		return TEST_SKIP;
-+
-+	ret = kdbus_priv_activator(env);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_conn_free(activator);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-benchmark.c b/tools/testing/selftests/kdbus/test-benchmark.c
-new file mode 100644
-index 0000000..8a9744b
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-benchmark.c
-@@ -0,0 +1,451 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <time.h>
-+#include <fcntl.h>
-+#include <locale.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <stdbool.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <poll.h>
-+#include <sys/time.h>
-+#include <sys/mman.h>
-+#include <sys/socket.h>
-+#include <math.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+#define SERVICE_NAME "foo.bar.echo"
-+
-+/*
-+ * To have a banchmark comparison with unix socket, set:
-+ * user_memfd	= false;
-+ * compare_uds	= true;
-+ * attach_none	= true;		do not attached metadata
-+ */
-+
-+static bool use_memfd = true;		/* transmit memfd? */
-+static bool compare_uds = false;		/* unix-socket comparison? */
-+static bool attach_none = false;		/* clear attach-flags? */
-+static char stress_payload[8192];
-+
-+struct stats {
-+	uint64_t count;
-+	uint64_t latency_acc;
-+	uint64_t latency_low;
-+	uint64_t latency_high;
-+	uint64_t latency_avg;
-+	uint64_t latency_ssquares;
-+};
-+
-+static struct stats stats;
-+
-+static void reset_stats(void)
-+{
-+	stats.count = 0;
-+	stats.latency_acc = 0;
-+	stats.latency_low = UINT64_MAX;
-+	stats.latency_high = 0;
-+	stats.latency_avg = 0;
-+	stats.latency_ssquares = 0;
-+}
-+
-+static void dump_stats(bool is_uds)
-+{
-+	if (stats.count > 0) {
-+		kdbus_printf("stats %s: %'llu packets processed, latency (nsecs) min/max/avg/dev %'7llu // %'7llu // %'7llu // %'7.f\n",
-+			     is_uds ? " (UNIX)" : "(KDBUS)",
-+			     (unsigned long long) stats.count,
-+			     (unsigned long long) stats.latency_low,
-+			     (unsigned long long) stats.latency_high,
-+			     (unsigned long long) stats.latency_avg,
-+			     sqrt(stats.latency_ssquares / stats.count));
-+	} else {
-+		kdbus_printf("*** no packets received. bus stuck?\n");
-+	}
-+}
-+
-+static void add_stats(uint64_t prev)
-+{
-+	uint64_t diff, latency_avg_prev;
-+
-+	diff = now(CLOCK_THREAD_CPUTIME_ID) - prev;
-+
-+	stats.count++;
-+	stats.latency_acc += diff;
-+
-+	/* see Welford62 */
-+	latency_avg_prev = stats.latency_avg;
-+	stats.latency_avg = stats.latency_acc / stats.count;
-+	stats.latency_ssquares += (diff - latency_avg_prev) * (diff - stats.latency_avg);
-+
-+	if (stats.latency_low > diff)
-+		stats.latency_low = diff;
-+
-+	if (stats.latency_high < diff)
-+		stats.latency_high = diff;
-+}
-+
-+static int setup_simple_kdbus_msg(struct kdbus_conn *conn,
-+				  uint64_t dst_id,
-+				  struct kdbus_msg **msg_out)
-+{
-+	struct kdbus_msg *msg;
-+	struct kdbus_item *item;
-+	uint64_t size;
-+
-+	size = sizeof(struct kdbus_msg);
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+
-+	msg = malloc(size);
-+	ASSERT_RETURN_VAL(msg, -ENOMEM);
-+
-+	memset(msg, 0, size);
-+	msg->size = size;
-+	msg->src_id = conn->id;
-+	msg->dst_id = dst_id;
-+	msg->payload_type = KDBUS_PAYLOAD_DBUS;
-+
-+	item = msg->items;
-+
-+	item->type = KDBUS_ITEM_PAYLOAD_VEC;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
-+	item->vec.address = (uintptr_t) stress_payload;
-+	item->vec.size = sizeof(stress_payload);
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	*msg_out = msg;
-+
-+	return 0;
-+}
-+
-+static int setup_memfd_kdbus_msg(struct kdbus_conn *conn,
-+				 uint64_t dst_id,
-+				 off_t *memfd_item_offset,
-+				 struct kdbus_msg **msg_out)
-+{
-+	struct kdbus_msg *msg;
-+	struct kdbus_item *item;
-+	uint64_t size;
-+
-+	size = sizeof(struct kdbus_msg);
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
-+
-+	msg = malloc(size);
-+	ASSERT_RETURN_VAL(msg, -ENOMEM);
-+
-+	memset(msg, 0, size);
-+	msg->size = size;
-+	msg->src_id = conn->id;
-+	msg->dst_id = dst_id;
-+	msg->payload_type = KDBUS_PAYLOAD_DBUS;
-+
-+	item = msg->items;
-+
-+	item->type = KDBUS_ITEM_PAYLOAD_VEC;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
-+	item->vec.address = (uintptr_t) stress_payload;
-+	item->vec.size = sizeof(stress_payload);
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	item->type = KDBUS_ITEM_PAYLOAD_MEMFD;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_memfd);
-+	item->memfd.size = sizeof(uint64_t);
-+
-+	*memfd_item_offset = (unsigned char *)item - (unsigned char *)msg;
-+	*msg_out = msg;
-+
-+	return 0;
-+}
-+
-+static int
-+send_echo_request(struct kdbus_conn *conn, uint64_t dst_id,
-+		  void *kdbus_msg, off_t memfd_item_offset)
-+{
-+	struct kdbus_cmd_send cmd = {};
-+	int memfd = -1;
-+	int ret;
-+
-+	if (use_memfd) {
-+		uint64_t now_ns = now(CLOCK_THREAD_CPUTIME_ID);
-+		struct kdbus_item *item = memfd_item_offset + kdbus_msg;
-+		memfd = sys_memfd_create("memfd-name", 0);
-+		ASSERT_RETURN_VAL(memfd >= 0, memfd);
-+
-+		ret = write(memfd, &now_ns, sizeof(now_ns));
-+		ASSERT_RETURN_VAL(ret == sizeof(now_ns), -EAGAIN);
-+
-+		ret = sys_memfd_seal_set(memfd);
-+		ASSERT_RETURN_VAL(ret == 0, -errno);
-+
-+		item->memfd.fd = memfd;
-+	}
-+
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)kdbus_msg;
-+
-+	ret = kdbus_cmd_send(conn->fd, &cmd);
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	close(memfd);
-+
-+	return 0;
-+}
-+
-+static int
-+handle_echo_reply(struct kdbus_conn *conn, uint64_t send_ns)
-+{
-+	int ret;
-+	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
-+	struct kdbus_msg *msg;
-+	const struct kdbus_item *item;
-+	bool has_memfd = false;
-+
-+	ret = kdbus_cmd_recv(conn->fd, &recv);
-+	if (ret == -EAGAIN)
-+		return ret;
-+
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	if (!use_memfd)
-+		goto out;
-+
-+	msg = (struct kdbus_msg *)(conn->buf + recv.msg.offset);
-+
-+	KDBUS_ITEM_FOREACH(item, msg, items) {
-+		switch (item->type) {
-+		case KDBUS_ITEM_PAYLOAD_MEMFD: {
-+			char *buf;
-+
-+			buf = mmap(NULL, item->memfd.size, PROT_READ,
-+				   MAP_PRIVATE, item->memfd.fd, 0);
-+			ASSERT_RETURN_VAL(buf != MAP_FAILED, -EINVAL);
-+			ASSERT_RETURN_VAL(item->memfd.size == sizeof(uint64_t),
-+					  -EINVAL);
-+
-+			add_stats(*(uint64_t*)buf);
-+			munmap(buf, item->memfd.size);
-+			close(item->memfd.fd);
-+			has_memfd = true;
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_PAYLOAD_OFF:
-+			/* ignore */
-+			break;
-+		}
-+	}
-+
-+out:
-+	if (!has_memfd)
-+		add_stats(send_ns);
-+
-+	ret = kdbus_free(conn, recv.msg.offset);
-+	ASSERT_RETURN_VAL(ret == 0, -errno);
-+
-+	return 0;
-+}
-+
-+static int benchmark(struct kdbus_test_env *env)
-+{
-+	static char buf[sizeof(stress_payload)];
-+	struct kdbus_msg *kdbus_msg = NULL;
-+	off_t memfd_cached_offset = 0;
-+	int ret;
-+	struct kdbus_conn *conn_a, *conn_b;
-+	struct pollfd fds[2];
-+	uint64_t start, send_ns, now_ns, diff;
-+	unsigned int i;
-+	int uds[2];
-+
-+	setlocale(LC_ALL, "");
-+
-+	for (i = 0; i < sizeof(stress_payload); i++)
-+		stress_payload[i] = i;
-+
-+	/* setup kdbus pair */
-+
-+	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn_a && conn_b);
-+
-+	ret = kdbus_add_match_empty(conn_a);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_add_match_empty(conn_b);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_name_acquire(conn_a, SERVICE_NAME, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	if (attach_none) {
-+		ret = kdbus_conn_update_attach_flags(conn_a,
-+						     _KDBUS_ATTACH_ALL,
-+						     0);
-+		ASSERT_RETURN(ret == 0);
-+	}
-+
-+	/* setup UDS pair */
-+
-+	ret = socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK, 0, uds);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* setup a kdbus msg now */
-+	if (use_memfd) {
-+		ret = setup_memfd_kdbus_msg(conn_b, conn_a->id,
-+					    &memfd_cached_offset,
-+					    &kdbus_msg);
-+		ASSERT_RETURN(ret == 0);
-+	} else {
-+		ret = setup_simple_kdbus_msg(conn_b, conn_a->id, &kdbus_msg);
-+		ASSERT_RETURN(ret == 0);
-+	}
-+
-+	/* start benchmark */
-+
-+	kdbus_printf("-- entering poll loop ...\n");
-+
-+	do {
-+		/* run kdbus benchmark */
-+		fds[0].fd = conn_a->fd;
-+		fds[1].fd = conn_b->fd;
-+
-+		/* cancel any pending message */
-+		handle_echo_reply(conn_a, 0);
-+
-+		start = now(CLOCK_THREAD_CPUTIME_ID);
-+		reset_stats();
-+
-+		send_ns = now(CLOCK_THREAD_CPUTIME_ID);
-+		ret = send_echo_request(conn_b, conn_a->id,
-+					kdbus_msg, memfd_cached_offset);
-+		ASSERT_RETURN(ret == 0);
-+
-+		while (1) {
-+			unsigned int nfds = sizeof(fds) / sizeof(fds[0]);
-+			unsigned int i;
-+
-+			for (i = 0; i < nfds; i++) {
-+				fds[i].events = POLLIN | POLLPRI | POLLHUP;
-+				fds[i].revents = 0;
-+			}
-+
-+			ret = poll(fds, nfds, 10);
-+			if (ret < 0)
-+				break;
-+
-+			if (fds[0].revents & POLLIN) {
-+				ret = handle_echo_reply(conn_a, send_ns);
-+				ASSERT_RETURN(ret == 0);
-+
-+				send_ns = now(CLOCK_THREAD_CPUTIME_ID);
-+				ret = send_echo_request(conn_b, conn_a->id,
-+							kdbus_msg,
-+							memfd_cached_offset);
-+				ASSERT_RETURN(ret == 0);
-+			}
-+
-+			now_ns = now(CLOCK_THREAD_CPUTIME_ID);
-+			diff = now_ns - start;
-+			if (diff > 1000000000ULL) {
-+				start = now_ns;
-+
-+				dump_stats(false);
-+				break;
-+			}
-+		}
-+
-+		if (!compare_uds)
-+			continue;
-+
-+		/* run unix-socket benchmark as comparison */
-+
-+		fds[0].fd = uds[0];
-+		fds[1].fd = uds[1];
-+
-+		/* cancel any pendign message */
-+		read(uds[1], buf, sizeof(buf));
-+
-+		start = now(CLOCK_THREAD_CPUTIME_ID);
-+		reset_stats();
-+
-+		send_ns = now(CLOCK_THREAD_CPUTIME_ID);
-+		ret = write(uds[0], stress_payload, sizeof(stress_payload));
-+		ASSERT_RETURN(ret == sizeof(stress_payload));
-+
-+		while (1) {
-+			unsigned int nfds = sizeof(fds) / sizeof(fds[0]);
-+			unsigned int i;
-+
-+			for (i = 0; i < nfds; i++) {
-+				fds[i].events = POLLIN | POLLPRI | POLLHUP;
-+				fds[i].revents = 0;
-+			}
-+
-+			ret = poll(fds, nfds, 10);
-+			if (ret < 0)
-+				break;
-+
-+			if (fds[1].revents & POLLIN) {
-+				ret = read(uds[1], buf, sizeof(buf));
-+				ASSERT_RETURN(ret == sizeof(buf));
-+
-+				add_stats(send_ns);
-+
-+				send_ns = now(CLOCK_THREAD_CPUTIME_ID);
-+				ret = write(uds[0], buf, sizeof(buf));
-+				ASSERT_RETURN(ret == sizeof(buf));
-+			}
-+
-+			now_ns = now(CLOCK_THREAD_CPUTIME_ID);
-+			diff = now_ns - start;
-+			if (diff > 1000000000ULL) {
-+				start = now_ns;
-+
-+				dump_stats(true);
-+				break;
-+			}
-+		}
-+
-+	} while (kdbus_util_verbose);
-+
-+	kdbus_printf("-- closing bus connections\n");
-+
-+	free(kdbus_msg);
-+
-+	kdbus_conn_free(conn_a);
-+	kdbus_conn_free(conn_b);
-+
-+	return (stats.count > 1) ? TEST_OK : TEST_ERR;
-+}
-+
-+int kdbus_test_benchmark(struct kdbus_test_env *env)
-+{
-+	use_memfd = true;
-+	attach_none = false;
-+	compare_uds = false;
-+	return benchmark(env);
-+}
-+
-+int kdbus_test_benchmark_nomemfds(struct kdbus_test_env *env)
-+{
-+	use_memfd = false;
-+	attach_none = false;
-+	compare_uds = false;
-+	return benchmark(env);
-+}
-+
-+int kdbus_test_benchmark_uds(struct kdbus_test_env *env)
-+{
-+	use_memfd = false;
-+	attach_none = true;
-+	compare_uds = true;
-+	return benchmark(env);
-+}
-diff --git a/tools/testing/selftests/kdbus/test-bus.c b/tools/testing/selftests/kdbus/test-bus.c
-new file mode 100644
-index 0000000..762fb30
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-bus.c
-@@ -0,0 +1,175 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <limits.h>
-+#include <sys/mman.h>
-+#include <stdbool.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+#include "kdbus-test.h"
-+
-+static struct kdbus_item *kdbus_get_item(struct kdbus_info *info,
-+					 uint64_t type)
-+{
-+	struct kdbus_item *item;
-+
-+	KDBUS_ITEM_FOREACH(item, info, items)
-+		if (item->type == type)
-+			return item;
-+
-+	return NULL;
-+}
-+
-+static int test_bus_creator_info(const char *bus_path)
-+{
-+	int ret;
-+	uint64_t offset;
-+	struct kdbus_conn *conn;
-+	struct kdbus_info *info;
-+	struct kdbus_item *item;
-+	char *tmp, *busname;
-+
-+	/* extract the bus-name from @bus_path */
-+	tmp = strdup(bus_path);
-+	ASSERT_RETURN(tmp);
-+	busname = strrchr(tmp, '/');
-+	ASSERT_RETURN(busname);
-+	*busname = 0;
-+	busname = strrchr(tmp, '/');
-+	ASSERT_RETURN(busname);
-+	++busname;
-+
-+	conn = kdbus_hello(bus_path, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	ret = kdbus_bus_creator_info(conn, _KDBUS_ATTACH_ALL, &offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	info = (struct kdbus_info *)(conn->buf + offset);
-+
-+	item = kdbus_get_item(info, KDBUS_ITEM_MAKE_NAME);
-+	ASSERT_RETURN(item);
-+	ASSERT_RETURN(!strcmp(item->str, busname));
-+
-+	ret = kdbus_free(conn, offset);
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	free(tmp);
-+	kdbus_conn_free(conn);
-+	return 0;
-+}
-+
-+int kdbus_test_bus_make(struct kdbus_test_env *env)
-+{
-+	struct {
-+		struct kdbus_cmd cmd;
-+
-+		/* bloom size item */
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_bloom_parameter bloom;
-+		} bs;
-+
-+		/* name item */
-+		uint64_t n_size;
-+		uint64_t n_type;
-+		char name[64];
-+	} bus_make;
-+	char s[PATH_MAX], *name;
-+	int ret, control_fd2;
-+	uid_t uid;
-+
-+	name = unique_name("");
-+	ASSERT_RETURN(name);
-+
-+	snprintf(s, sizeof(s), "%s/control", env->root);
-+	env->control_fd = open(s, O_RDWR|O_CLOEXEC);
-+	ASSERT_RETURN(env->control_fd >= 0);
-+
-+	control_fd2 = open(s, O_RDWR|O_CLOEXEC);
-+	ASSERT_RETURN(control_fd2 >= 0);
-+
-+	memset(&bus_make, 0, sizeof(bus_make));
-+
-+	bus_make.bs.size = sizeof(bus_make.bs);
-+	bus_make.bs.type = KDBUS_ITEM_BLOOM_PARAMETER;
-+	bus_make.bs.bloom.size = 64;
-+	bus_make.bs.bloom.n_hash = 1;
-+
-+	bus_make.n_type = KDBUS_ITEM_MAKE_NAME;
-+
-+	uid = getuid();
-+
-+	/* missing uid prefix */
-+	snprintf(bus_make.name, sizeof(bus_make.name), "foo");
-+	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
-+	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
-+			    sizeof(bus_make.bs) + bus_make.n_size;
-+	ret = kdbus_cmd_bus_make(env->control_fd, &bus_make.cmd);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	/* non alphanumeric character */
-+	snprintf(bus_make.name, sizeof(bus_make.name), "%u-blah@123", uid);
-+	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
-+	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
-+			    sizeof(bus_make.bs) + bus_make.n_size;
-+	ret = kdbus_cmd_bus_make(env->control_fd, &bus_make.cmd);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	/* '-' at the end */
-+	snprintf(bus_make.name, sizeof(bus_make.name), "%u-blah-", uid);
-+	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
-+	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
-+			    sizeof(bus_make.bs) + bus_make.n_size;
-+	ret = kdbus_cmd_bus_make(env->control_fd, &bus_make.cmd);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	/* create a new bus */
-+	snprintf(bus_make.name, sizeof(bus_make.name), "%u-%s-1", uid, name);
-+	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
-+	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
-+			    sizeof(bus_make.bs) + bus_make.n_size;
-+	ret = kdbus_cmd_bus_make(env->control_fd, &bus_make.cmd);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_cmd_bus_make(control_fd2, &bus_make.cmd);
-+	ASSERT_RETURN(ret == -EEXIST);
-+
-+	snprintf(s, sizeof(s), "%s/%u-%s-1/bus", env->root, uid, name);
-+	ASSERT_RETURN(access(s, F_OK) == 0);
-+
-+	ret = test_bus_creator_info(s);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* can't use the same fd for bus make twice, even though a different
-+	 * bus name is used
-+	 */
-+	snprintf(bus_make.name, sizeof(bus_make.name), "%u-%s-2", uid, name);
-+	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
-+	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
-+			    sizeof(bus_make.bs) + bus_make.n_size;
-+	ret = kdbus_cmd_bus_make(env->control_fd, &bus_make.cmd);
-+	ASSERT_RETURN(ret == -EBADFD);
-+
-+	/* create a new bus, with different fd and different bus name */
-+	snprintf(bus_make.name, sizeof(bus_make.name), "%u-%s-2", uid, name);
-+	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
-+	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
-+			    sizeof(bus_make.bs) + bus_make.n_size;
-+	ret = kdbus_cmd_bus_make(control_fd2, &bus_make.cmd);
-+	ASSERT_RETURN(ret == 0);
-+
-+	close(control_fd2);
-+	free(name);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-chat.c b/tools/testing/selftests/kdbus/test-chat.c
-new file mode 100644
-index 0000000..41e5b53
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-chat.c
-@@ -0,0 +1,124 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <time.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <poll.h>
-+#include <stdbool.h>
-+
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+int kdbus_test_chat(struct kdbus_test_env *env)
-+{
-+	int ret, cookie;
-+	struct kdbus_conn *conn_a, *conn_b;
-+	struct pollfd fds[2];
-+	uint64_t flags;
-+	int count;
-+
-+	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn_a && conn_b);
-+
-+	flags = KDBUS_NAME_ALLOW_REPLACEMENT;
-+	ret = kdbus_name_acquire(conn_a, "foo.bar.test", &flags);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_name_acquire(conn_a, "foo.bar.baz", NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	flags = KDBUS_NAME_QUEUE;
-+	ret = kdbus_name_acquire(conn_b, "foo.bar.baz", &flags);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_name_acquire(conn_a, "foo.bar.double", NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	flags = 0;
-+	ret = kdbus_name_acquire(conn_a, "foo.bar.double", &flags);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(!(flags & KDBUS_NAME_ACQUIRED));
-+
-+	ret = kdbus_name_release(conn_a, "foo.bar.double");
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_name_release(conn_a, "foo.bar.double");
-+	ASSERT_RETURN(ret == -ESRCH);
-+
-+	ret = kdbus_list(conn_b, KDBUS_LIST_UNIQUE |
-+				 KDBUS_LIST_NAMES  |
-+				 KDBUS_LIST_QUEUED |
-+				 KDBUS_LIST_ACTIVATORS);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_add_match_empty(conn_a);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_add_match_empty(conn_b);
-+	ASSERT_RETURN(ret == 0);
-+
-+	cookie = 0;
-+	ret = kdbus_msg_send(conn_b, NULL, 0xc0000000 | cookie, 0, 0, 0,
-+			     KDBUS_DST_ID_BROADCAST);
-+	ASSERT_RETURN(ret == 0);
-+
-+	fds[0].fd = conn_a->fd;
-+	fds[1].fd = conn_b->fd;
-+
-+	kdbus_printf("-- entering poll loop ...\n");
-+
-+	for (count = 0;; count++) {
-+		int i, nfds = sizeof(fds) / sizeof(fds[0]);
-+
-+		for (i = 0; i < nfds; i++) {
-+			fds[i].events = POLLIN | POLLPRI | POLLHUP;
-+			fds[i].revents = 0;
-+		}
-+
-+		ret = poll(fds, nfds, 3000);
-+		ASSERT_RETURN(ret >= 0);
-+
-+		if (fds[0].revents & POLLIN) {
-+			if (count > 2)
-+				kdbus_name_release(conn_a, "foo.bar.baz");
-+
-+			ret = kdbus_msg_recv(conn_a, NULL, NULL);
-+			ASSERT_RETURN(ret == 0);
-+			ret = kdbus_msg_send(conn_a, NULL,
-+					     0xc0000000 | cookie++,
-+					     0, 0, 0, conn_b->id);
-+			ASSERT_RETURN(ret == 0);
-+		}
-+
-+		if (fds[1].revents & POLLIN) {
-+			ret = kdbus_msg_recv(conn_b, NULL, NULL);
-+			ASSERT_RETURN(ret == 0);
-+			ret = kdbus_msg_send(conn_b, NULL,
-+					     0xc0000000 | cookie++,
-+					     0, 0, 0, conn_a->id);
-+			ASSERT_RETURN(ret == 0);
-+		}
-+
-+		ret = kdbus_list(conn_b, KDBUS_LIST_UNIQUE |
-+					 KDBUS_LIST_NAMES  |
-+					 KDBUS_LIST_QUEUED |
-+					 KDBUS_LIST_ACTIVATORS);
-+		ASSERT_RETURN(ret == 0);
-+
-+		if (count > 10)
-+			break;
-+	}
-+
-+	kdbus_printf("-- closing bus connections\n");
-+	kdbus_conn_free(conn_a);
-+	kdbus_conn_free(conn_b);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-connection.c b/tools/testing/selftests/kdbus/test-connection.c
-new file mode 100644
-index 0000000..4688ce8
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-connection.c
-@@ -0,0 +1,597 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <limits.h>
-+#include <sys/types.h>
-+#include <sys/capability.h>
-+#include <sys/mman.h>
-+#include <sys/syscall.h>
-+#include <sys/wait.h>
-+#include <stdbool.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+#include "kdbus-test.h"
-+
-+int kdbus_test_hello(struct kdbus_test_env *env)
-+{
-+	struct kdbus_cmd_free cmd_free = {};
-+	struct kdbus_cmd_hello hello;
-+	int fd, ret;
-+
-+	memset(&hello, 0, sizeof(hello));
-+
-+	fd = open(env->buspath, O_RDWR|O_CLOEXEC);
-+	ASSERT_RETURN(fd >= 0);
-+
-+	hello.flags = KDBUS_HELLO_ACCEPT_FD;
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+	hello.attach_flags_recv = _KDBUS_ATTACH_ALL;
-+	hello.size = sizeof(struct kdbus_cmd_hello);
-+	hello.pool_size = POOL_SIZE;
-+
-+	/* an unaligned hello must result in -EFAULT */
-+	ret = kdbus_cmd_hello(fd, (struct kdbus_cmd_hello *) ((char *) &hello + 1));
-+	ASSERT_RETURN(ret == -EFAULT);
-+
-+	/* a size of 0 must return EMSGSIZE */
-+	hello.size = 1;
-+	hello.flags = KDBUS_HELLO_ACCEPT_FD;
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+	ret = kdbus_cmd_hello(fd, &hello);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	hello.size = sizeof(struct kdbus_cmd_hello);
-+
-+	/* check faulty flags */
-+	hello.flags = 1ULL << 32;
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+	ret = kdbus_cmd_hello(fd, &hello);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	/* check for faulty pool sizes */
-+	hello.pool_size = 0;
-+	hello.flags = KDBUS_HELLO_ACCEPT_FD;
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+	ret = kdbus_cmd_hello(fd, &hello);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	hello.pool_size = 4097;
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+	ret = kdbus_cmd_hello(fd, &hello);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	hello.pool_size = POOL_SIZE;
-+
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+	hello.offset = (__u64)-1;
-+
-+	/* success test */
-+	ret = kdbus_cmd_hello(fd, &hello);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* The kernel should have returned some items */
-+	ASSERT_RETURN(hello.offset != (__u64)-1);
-+	cmd_free.size = sizeof(cmd_free);
-+	cmd_free.offset = hello.offset;
-+	ret = kdbus_cmd_free(fd, &cmd_free);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	close(fd);
-+
-+	fd = open(env->buspath, O_RDWR|O_CLOEXEC);
-+	ASSERT_RETURN(fd >= 0);
-+
-+	/* no ACTIVATOR flag without a name */
-+	hello.flags = KDBUS_HELLO_ACTIVATOR;
-+	ret = kdbus_cmd_hello(fd, &hello);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	close(fd);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_byebye(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn;
-+	struct kdbus_cmd_recv cmd_recv = { .size = sizeof(cmd_recv) };
-+	struct kdbus_cmd cmd_byebye = { .size = sizeof(cmd_byebye) };
-+	int ret;
-+
-+	/* create a 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+
-+	ret = kdbus_add_match_empty(conn);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_add_match_empty(env->conn);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* send over 1st connection */
-+	ret = kdbus_msg_send(env->conn, NULL, 0, 0, 0, 0,
-+			     KDBUS_DST_ID_BROADCAST);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* say byebye on the 2nd, which must fail */
-+	ret = kdbus_cmd_byebye(conn->fd, &cmd_byebye);
-+	ASSERT_RETURN(ret == -EBUSY);
-+
-+	/* receive the message */
-+	ret = kdbus_cmd_recv(conn->fd, &cmd_recv);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_free(conn, cmd_recv.msg.offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* and try again */
-+	ret = kdbus_cmd_byebye(conn->fd, &cmd_byebye);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* a 2nd try should result in -ECONNRESET */
-+	ret = kdbus_cmd_byebye(conn->fd, &cmd_byebye);
-+	ASSERT_RETURN(ret == -ECONNRESET);
-+
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-+
-+/* Get only the first item */
-+static struct kdbus_item *kdbus_get_item(struct kdbus_info *info,
-+					 uint64_t type)
-+{
-+	struct kdbus_item *item;
-+
-+	KDBUS_ITEM_FOREACH(item, info, items)
-+		if (item->type == type)
-+			return item;
-+
-+	return NULL;
-+}
-+
-+static unsigned int kdbus_count_item(struct kdbus_info *info,
-+				     uint64_t type)
-+{
-+	unsigned int i = 0;
-+	const struct kdbus_item *item;
-+
-+	KDBUS_ITEM_FOREACH(item, info, items)
-+		if (item->type == type)
-+			i++;
-+
-+	return i;
-+}
-+
-+static int kdbus_fuzz_conn_info(struct kdbus_test_env *env, int capable)
-+{
-+	int ret;
-+	unsigned int cnt = 0;
-+	uint64_t offset = 0;
-+	struct kdbus_info *info;
-+	struct kdbus_conn *conn;
-+	struct kdbus_conn *privileged;
-+	const struct kdbus_item *item;
-+	uint64_t valid_flags = KDBUS_ATTACH_NAMES |
-+			       KDBUS_ATTACH_CREDS |
-+			       KDBUS_ATTACH_PIDS |
-+			       KDBUS_ATTACH_CONN_DESCRIPTION;
-+
-+	uint64_t invalid_flags = KDBUS_ATTACH_NAMES	|
-+				 KDBUS_ATTACH_CREDS	|
-+				 KDBUS_ATTACH_PIDS	|
-+				 KDBUS_ATTACH_CAPS	|
-+				 KDBUS_ATTACH_CGROUP	|
-+				 KDBUS_ATTACH_CONN_DESCRIPTION;
-+
-+	struct kdbus_creds cached_creds;
-+	uid_t ruid, euid, suid;
-+	gid_t rgid, egid, sgid;
-+
-+	getresuid(&ruid, &euid, &suid);
-+	getresgid(&rgid, &egid, &sgid);
-+
-+	cached_creds.uid = ruid;
-+	cached_creds.euid = euid;
-+	cached_creds.suid = suid;
-+	cached_creds.fsuid = ruid;
-+
-+	cached_creds.gid = rgid;
-+	cached_creds.egid = egid;
-+	cached_creds.sgid = sgid;
-+	cached_creds.fsgid = rgid;
-+
-+	struct kdbus_pids cached_pids = {
-+		.pid	= getpid(),
-+		.tid	= syscall(SYS_gettid),
-+		.ppid	= getppid(),
-+	};
-+
-+	ret = kdbus_conn_info(env->conn, env->conn->id, NULL,
-+			      valid_flags, &offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	info = (struct kdbus_info *)(env->conn->buf + offset);
-+	ASSERT_RETURN(info->id == env->conn->id);
-+
-+	/* We do not have any well-known name */
-+	item = kdbus_get_item(info, KDBUS_ITEM_NAME);
-+	ASSERT_RETURN(item == NULL);
-+
-+	item = kdbus_get_item(info, KDBUS_ITEM_CONN_DESCRIPTION);
-+	if (valid_flags & KDBUS_ATTACH_CONN_DESCRIPTION) {
-+		ASSERT_RETURN(item);
-+	} else {
-+		ASSERT_RETURN(item == NULL);
-+	}
-+
-+	kdbus_free(env->conn, offset);
-+
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	privileged = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(privileged);
-+
-+	ret = kdbus_conn_info(conn, conn->id, NULL, valid_flags, &offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	info = (struct kdbus_info *)(conn->buf + offset);
-+	ASSERT_RETURN(info->id == conn->id);
-+
-+	/* We do not have any well-known name */
-+	item = kdbus_get_item(info, KDBUS_ITEM_NAME);
-+	ASSERT_RETURN(item == NULL);
-+
-+	cnt = kdbus_count_item(info, KDBUS_ITEM_CREDS);
-+	if (valid_flags & KDBUS_ATTACH_CREDS) {
-+		ASSERT_RETURN(cnt == 1);
-+
-+		item = kdbus_get_item(info, KDBUS_ITEM_CREDS);
-+		ASSERT_RETURN(item);
-+
-+		/* Compare received items with cached creds */
-+		ASSERT_RETURN(memcmp(&item->creds, &cached_creds,
-+				      sizeof(struct kdbus_creds)) == 0);
-+	} else {
-+		ASSERT_RETURN(cnt == 0);
-+	}
-+
-+	item = kdbus_get_item(info, KDBUS_ITEM_PIDS);
-+	if (valid_flags & KDBUS_ATTACH_PIDS) {
-+		ASSERT_RETURN(item);
-+
-+		/* Compare item->pids with cached PIDs */
-+		ASSERT_RETURN(item->pids.pid == cached_pids.pid &&
-+			      item->pids.tid == cached_pids.tid &&
-+			      item->pids.ppid == cached_pids.ppid);
-+	} else {
-+		ASSERT_RETURN(item == NULL);
-+	}
-+
-+	/* We did not request KDBUS_ITEM_CAPS */
-+	item = kdbus_get_item(info, KDBUS_ITEM_CAPS);
-+	ASSERT_RETURN(item == NULL);
-+
-+	kdbus_free(conn, offset);
-+
-+	ret = kdbus_name_acquire(conn, "com.example.a", NULL);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = kdbus_conn_info(conn, conn->id, NULL, valid_flags, &offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	info = (struct kdbus_info *)(conn->buf + offset);
-+	ASSERT_RETURN(info->id == conn->id);
-+
-+	item = kdbus_get_item(info, KDBUS_ITEM_OWNED_NAME);
-+	if (valid_flags & KDBUS_ATTACH_NAMES) {
-+		ASSERT_RETURN(item && !strcmp(item->name.name, "com.example.a"));
-+	} else {
-+		ASSERT_RETURN(item == NULL);
-+	}
-+
-+	kdbus_free(conn, offset);
-+
-+	ret = kdbus_conn_info(conn, 0, "com.example.a", valid_flags, &offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	info = (struct kdbus_info *)(conn->buf + offset);
-+	ASSERT_RETURN(info->id == conn->id);
-+
-+	kdbus_free(conn, offset);
-+
-+	/* does not have the necessary caps to drop to unprivileged */
-+	if (!capable)
-+		goto continue_test;
-+
-+	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_GID, ({
-+		ret = kdbus_conn_info(conn, conn->id, NULL,
-+				      valid_flags, &offset);
-+		ASSERT_EXIT(ret == 0);
-+
-+		info = (struct kdbus_info *)(conn->buf + offset);
-+		ASSERT_EXIT(info->id == conn->id);
-+
-+		if (valid_flags & KDBUS_ATTACH_NAMES) {
-+			item = kdbus_get_item(info, KDBUS_ITEM_OWNED_NAME);
-+			ASSERT_EXIT(item &&
-+				    strcmp(item->name.name,
-+				           "com.example.a") == 0);
-+		}
-+
-+		if (valid_flags & KDBUS_ATTACH_CREDS) {
-+			item = kdbus_get_item(info, KDBUS_ITEM_CREDS);
-+			ASSERT_EXIT(item);
-+
-+			/* Compare received items with cached creds */
-+			ASSERT_EXIT(memcmp(&item->creds, &cached_creds,
-+				    sizeof(struct kdbus_creds)) == 0);
-+		}
-+
-+		if (valid_flags & KDBUS_ATTACH_PIDS) {
-+			item = kdbus_get_item(info, KDBUS_ITEM_PIDS);
-+			ASSERT_EXIT(item);
-+
-+			/*
-+			 * Compare item->pids with cached pids of
-+			 * privileged one.
-+			 *
-+			 * cmd_info will always return cached pids.
-+			 */
-+			ASSERT_EXIT(item->pids.pid == cached_pids.pid &&
-+				    item->pids.tid == cached_pids.tid);
-+		}
-+
-+		kdbus_free(conn, offset);
-+
-+		/*
-+		 * Use invalid_flags and make sure that userspace
-+		 * do not play with us.
-+		 */
-+		ret = kdbus_conn_info(conn, conn->id, NULL,
-+				      invalid_flags, &offset);
-+		ASSERT_EXIT(ret == 0);
-+
-+		/*
-+		 * Make sure that we return only one creds item and
-+		 * it points to the cached creds.
-+		 */
-+		cnt = kdbus_count_item(info, KDBUS_ITEM_CREDS);
-+		if (invalid_flags & KDBUS_ATTACH_CREDS) {
-+			ASSERT_EXIT(cnt == 1);
-+
-+			item = kdbus_get_item(info, KDBUS_ITEM_CREDS);
-+			ASSERT_EXIT(item);
-+
-+			/* Compare received items with cached creds */
-+			ASSERT_EXIT(memcmp(&item->creds, &cached_creds,
-+				    sizeof(struct kdbus_creds)) == 0);
-+		} else {
-+			ASSERT_EXIT(cnt == 0);
-+		}
-+
-+		if (invalid_flags & KDBUS_ATTACH_PIDS) {
-+			cnt = kdbus_count_item(info, KDBUS_ITEM_PIDS);
-+			ASSERT_EXIT(cnt == 1);
-+
-+			item = kdbus_get_item(info, KDBUS_ITEM_PIDS);
-+			ASSERT_EXIT(item);
-+
-+			/* Compare item->pids with cached pids */
-+			ASSERT_EXIT(item->pids.pid == cached_pids.pid &&
-+				    item->pids.tid == cached_pids.tid);
-+		}
-+
-+		cnt = kdbus_count_item(info, KDBUS_ITEM_CGROUP);
-+		if (invalid_flags & KDBUS_ATTACH_CGROUP) {
-+			ASSERT_EXIT(cnt == 1);
-+		} else {
-+			ASSERT_EXIT(cnt == 0);
-+		}
-+
-+		cnt = kdbus_count_item(info, KDBUS_ITEM_CAPS);
-+		if (invalid_flags & KDBUS_ATTACH_CAPS) {
-+			ASSERT_EXIT(cnt == 1);
-+		} else {
-+			ASSERT_EXIT(cnt == 0);
-+		}
-+
-+		kdbus_free(conn, offset);
-+	}),
-+	({ 0; }));
-+	ASSERT_RETURN(ret == 0);
-+
-+continue_test:
-+
-+	/* A second name */
-+	ret = kdbus_name_acquire(conn, "com.example.b", NULL);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = kdbus_conn_info(conn, conn->id, NULL, valid_flags, &offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	info = (struct kdbus_info *)(conn->buf + offset);
-+	ASSERT_RETURN(info->id == conn->id);
-+
-+	cnt = kdbus_count_item(info, KDBUS_ITEM_OWNED_NAME);
-+	if (valid_flags & KDBUS_ATTACH_NAMES) {
-+		ASSERT_RETURN(cnt == 2);
-+	} else {
-+		ASSERT_RETURN(cnt == 0);
-+	}
-+
-+	kdbus_free(conn, offset);
-+
-+	ASSERT_RETURN(ret == 0);
-+
-+	return 0;
-+}
-+
-+int kdbus_test_conn_info(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	int have_caps;
-+	struct {
-+		struct kdbus_cmd_info cmd_info;
-+
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			char str[64];
-+		} name;
-+	} buf;
-+
-+	buf.cmd_info.size = sizeof(struct kdbus_cmd_info);
-+	buf.cmd_info.flags = 0;
-+	buf.cmd_info.attach_flags = 0;
-+	buf.cmd_info.id = env->conn->id;
-+
-+	ret = kdbus_conn_info(env->conn, env->conn->id, NULL, 0, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* try to pass a name that is longer than the buffer's size */
-+	buf.name.size = KDBUS_ITEM_HEADER_SIZE + 1;
-+	buf.name.type = KDBUS_ITEM_NAME;
-+	strcpy(buf.name.str, "foo.bar.bla");
-+
-+	buf.cmd_info.id = 0;
-+	buf.cmd_info.size = sizeof(buf.cmd_info) + buf.name.size;
-+	ret = kdbus_cmd_conn_info(env->conn->fd, (struct kdbus_cmd_info *) &buf);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	/* Pass a non existent name */
-+	ret = kdbus_conn_info(env->conn, 0, "non.existent.name", 0, NULL);
-+	ASSERT_RETURN(ret == -ESRCH);
-+
-+	if (!all_uids_gids_are_mapped())
-+		return TEST_SKIP;
-+
-+	/* Test for caps here, so we run the previous test */
-+	have_caps = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
-+	ASSERT_RETURN(have_caps >= 0);
-+
-+	ret = kdbus_fuzz_conn_info(env, have_caps);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Now if we have skipped some tests then let the user know */
-+	if (!have_caps)
-+		return TEST_SKIP;
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_conn_update(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn;
-+	struct kdbus_msg *msg;
-+	int found = 0;
-+	int ret;
-+
-+	/*
-+	 * kdbus_hello() sets all attach flags. Receive a message by this
-+	 * connection, and make sure a timestamp item (just to pick one) is
-+	 * present.
-+	 */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	ret = kdbus_msg_send(env->conn, NULL, 0x12345678, 0, 0, 0, conn->id);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	found = kdbus_item_in_message(msg, KDBUS_ITEM_TIMESTAMP);
-+	ASSERT_RETURN(found == 1);
-+
-+	kdbus_msg_free(msg);
-+
-+	/*
-+	 * Now, modify the attach flags and repeat the action. The item must
-+	 * now be missing.
-+	 */
-+	found = 0;
-+
-+	ret = kdbus_conn_update_attach_flags(conn,
-+					     _KDBUS_ATTACH_ALL,
-+					     _KDBUS_ATTACH_ALL &
-+					     ~KDBUS_ATTACH_TIMESTAMP);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_send(env->conn, NULL, 0x12345678, 0, 0, 0, conn->id);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	found = kdbus_item_in_message(msg, KDBUS_ITEM_TIMESTAMP);
-+	ASSERT_RETURN(found == 0);
-+
-+	/* Provide a bogus attach_flags value */
-+	ret = kdbus_conn_update_attach_flags(conn,
-+					     _KDBUS_ATTACH_ALL + 1,
-+					     _KDBUS_ATTACH_ALL);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	kdbus_msg_free(msg);
-+
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_writable_pool(struct kdbus_test_env *env)
-+{
-+	struct kdbus_cmd_free cmd_free = {};
-+	struct kdbus_cmd_hello hello;
-+	int fd, ret;
-+	void *map;
-+
-+	fd = open(env->buspath, O_RDWR | O_CLOEXEC);
-+	ASSERT_RETURN(fd >= 0);
-+
-+	memset(&hello, 0, sizeof(hello));
-+	hello.flags = KDBUS_HELLO_ACCEPT_FD;
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+	hello.attach_flags_recv = _KDBUS_ATTACH_ALL;
-+	hello.size = sizeof(struct kdbus_cmd_hello);
-+	hello.pool_size = POOL_SIZE;
-+	hello.offset = (__u64)-1;
-+
-+	/* success test */
-+	ret = kdbus_cmd_hello(fd, &hello);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* The kernel should have returned some items */
-+	ASSERT_RETURN(hello.offset != (__u64)-1);
-+	cmd_free.size = sizeof(cmd_free);
-+	cmd_free.offset = hello.offset;
-+	ret = kdbus_cmd_free(fd, &cmd_free);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/* pools cannot be mapped writable */
-+	map = mmap(NULL, POOL_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
-+	ASSERT_RETURN(map == MAP_FAILED);
-+
-+	/* pools can always be mapped readable */
-+	map = mmap(NULL, POOL_SIZE, PROT_READ, MAP_SHARED, fd, 0);
-+	ASSERT_RETURN(map != MAP_FAILED);
-+
-+	/* make sure we cannot change protection masks to writable */
-+	ret = mprotect(map, POOL_SIZE, PROT_READ | PROT_WRITE);
-+	ASSERT_RETURN(ret < 0);
-+
-+	munmap(map, POOL_SIZE);
-+	close(fd);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-daemon.c b/tools/testing/selftests/kdbus/test-daemon.c
-new file mode 100644
-index 0000000..8bc2386
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-daemon.c
-@@ -0,0 +1,65 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <time.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <poll.h>
-+#include <stdbool.h>
-+
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+int kdbus_test_daemon(struct kdbus_test_env *env)
-+{
-+	struct pollfd fds[2];
-+	int count;
-+	int ret;
-+
-+	/* This test doesn't make any sense in non-interactive mode */
-+	if (!kdbus_util_verbose)
-+		return TEST_OK;
-+
-+	printf("Created connection %llu on bus '%s'\n",
-+		(unsigned long long) env->conn->id, env->buspath);
-+
-+	ret = kdbus_name_acquire(env->conn, "com.example.kdbus-test", NULL);
-+	ASSERT_RETURN(ret == 0);
-+	printf("  Aquired name: com.example.kdbus-test\n");
-+
-+	fds[0].fd = env->conn->fd;
-+	fds[1].fd = STDIN_FILENO;
-+
-+	printf("Monitoring connections:\n");
-+
-+	for (count = 0;; count++) {
-+		int i, nfds = sizeof(fds) / sizeof(fds[0]);
-+
-+		for (i = 0; i < nfds; i++) {
-+			fds[i].events = POLLIN | POLLPRI | POLLHUP;
-+			fds[i].revents = 0;
-+		}
-+
-+		ret = poll(fds, nfds, -1);
-+		if (ret <= 0)
-+			break;
-+
-+		if (fds[0].revents & POLLIN) {
-+			ret = kdbus_msg_recv(env->conn, NULL, NULL);
-+			ASSERT_RETURN(ret == 0);
-+		}
-+
-+		/* stdin */
-+		if (fds[1].revents & POLLIN)
-+			break;
-+	}
-+
-+	printf("Closing bus connection\n");
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-endpoint.c b/tools/testing/selftests/kdbus/test-endpoint.c
-new file mode 100644
-index 0000000..34a7be4
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-endpoint.c
-@@ -0,0 +1,352 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <libgen.h>
-+#include <sys/capability.h>
-+#include <sys/wait.h>
-+#include <stdbool.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+#include "kdbus-test.h"
-+
-+#define KDBUS_SYSNAME_MAX_LEN			63
-+
-+static int install_name_add_match(struct kdbus_conn *conn, const char *name)
-+{
-+	struct {
-+		struct kdbus_cmd_match cmd;
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_notify_name_change chg;
-+		} item;
-+		char name[64];
-+	} buf;
-+	int ret;
-+
-+	/* install the match rule */
-+	memset(&buf, 0, sizeof(buf));
-+	buf.item.type = KDBUS_ITEM_NAME_ADD;
-+	buf.item.chg.old_id.id = KDBUS_MATCH_ID_ANY;
-+	buf.item.chg.new_id.id = KDBUS_MATCH_ID_ANY;
-+	strncpy(buf.name, name, sizeof(buf.name) - 1);
-+	buf.item.size = sizeof(buf.item) + strlen(buf.name) + 1;
-+	buf.cmd.size = sizeof(buf.cmd) + buf.item.size;
-+
-+	ret = kdbus_cmd_match_add(conn->fd, &buf.cmd);
-+	if (ret < 0)
-+		return ret;
-+
-+	return 0;
-+}
-+
-+static int create_endpoint(const char *buspath, uid_t uid, const char *name,
-+			   uint64_t flags)
-+{
-+	struct {
-+		struct kdbus_cmd cmd;
-+
-+		/* name item */
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			/* max should be KDBUS_SYSNAME_MAX_LEN */
-+			char str[128];
-+		} name;
-+	} ep_make;
-+	int fd, ret;
-+
-+	fd = open(buspath, O_RDWR);
-+	if (fd < 0)
-+		return fd;
-+
-+	memset(&ep_make, 0, sizeof(ep_make));
-+
-+	snprintf(ep_make.name.str,
-+		 /* Use the KDBUS_SYSNAME_MAX_LEN or sizeof(str) */
-+		 KDBUS_SYSNAME_MAX_LEN > strlen(name) ?
-+		 KDBUS_SYSNAME_MAX_LEN : sizeof(ep_make.name.str),
-+		 "%u-%s", uid, name);
-+
-+	ep_make.name.type = KDBUS_ITEM_MAKE_NAME;
-+	ep_make.name.size = KDBUS_ITEM_HEADER_SIZE +
-+			    strlen(ep_make.name.str) + 1;
-+
-+	ep_make.cmd.flags = flags;
-+	ep_make.cmd.size = sizeof(ep_make.cmd) + ep_make.name.size;
-+
-+	ret = kdbus_cmd_endpoint_make(fd, &ep_make.cmd);
-+	if (ret < 0) {
-+		kdbus_printf("error creating endpoint: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	return fd;
-+}
-+
-+static int unpriv_test_custom_ep(const char *buspath)
-+{
-+	int ret, ep_fd1, ep_fd2;
-+	char *ep1, *ep2, *tmp1, *tmp2;
-+
-+	tmp1 = strdup(buspath);
-+	tmp2 = strdup(buspath);
-+	ASSERT_RETURN(tmp1 && tmp2);
-+
-+	ret = asprintf(&ep1, "%s/%u-%s", dirname(tmp1), getuid(), "apps1");
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = asprintf(&ep2, "%s/%u-%s", dirname(tmp2), getuid(), "apps2");
-+	ASSERT_RETURN(ret >= 0);
-+
-+	free(tmp1);
-+	free(tmp2);
-+
-+	/* endpoint only accessible to current uid */
-+	ep_fd1 = create_endpoint(buspath, getuid(), "apps1", 0);
-+	ASSERT_RETURN(ep_fd1 >= 0);
-+
-+	/* endpoint world accessible */
-+	ep_fd2 = create_endpoint(buspath, getuid(), "apps2",
-+				  KDBUS_MAKE_ACCESS_WORLD);
-+	ASSERT_RETURN(ep_fd2 >= 0);
-+
-+	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_UID, ({
-+		int ep_fd;
-+		struct kdbus_conn *ep_conn;
-+
-+		/*
-+		 * Make sure that we are not able to create custom
-+		 * endpoints
-+		 */
-+		ep_fd = create_endpoint(buspath, getuid(),
-+					"unpriv_costum_ep", 0);
-+		ASSERT_EXIT(ep_fd == -EPERM);
-+
-+		/*
-+		 * Endpoint "apps1" only accessible to same users,
-+		 * that own the endpoint. Access denied by VFS
-+		 */
-+		ep_conn = kdbus_hello(ep1, 0, NULL, 0);
-+		ASSERT_EXIT(!ep_conn && errno == EACCES);
-+
-+		/* Endpoint "apps2" world accessible */
-+		ep_conn = kdbus_hello(ep2, 0, NULL, 0);
-+		ASSERT_EXIT(ep_conn);
-+
-+		kdbus_conn_free(ep_conn);
-+
-+		_exit(EXIT_SUCCESS);
-+	}),
-+	({ 0; }));
-+	ASSERT_RETURN(ret == 0);
-+
-+	close(ep_fd1);
-+	close(ep_fd2);
-+	free(ep1);
-+	free(ep2);
-+
-+	return 0;
-+}
-+
-+static int update_endpoint(int fd, const char *name)
-+{
-+	int len = strlen(name) + 1;
-+	struct {
-+		struct kdbus_cmd cmd;
-+
-+		/* name item */
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			char str[KDBUS_ALIGN8(len)];
-+		} name;
-+
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_policy_access access;
-+		} access;
-+	} ep_update;
-+	int ret;
-+
-+	memset(&ep_update, 0, sizeof(ep_update));
-+
-+	ep_update.name.size = KDBUS_ITEM_HEADER_SIZE + len;
-+	ep_update.name.type = KDBUS_ITEM_NAME;
-+	strncpy(ep_update.name.str, name, sizeof(ep_update.name.str) - 1);
-+
-+	ep_update.access.size = sizeof(ep_update.access);
-+	ep_update.access.type = KDBUS_ITEM_POLICY_ACCESS;
-+	ep_update.access.access.type = KDBUS_POLICY_ACCESS_WORLD;
-+	ep_update.access.access.access = KDBUS_POLICY_SEE;
-+
-+	ep_update.cmd.size = sizeof(ep_update);
-+
-+	ret = kdbus_cmd_endpoint_update(fd, &ep_update.cmd);
-+	if (ret < 0) {
-+		kdbus_printf("error updating endpoint: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+int kdbus_test_custom_endpoint(struct kdbus_test_env *env)
-+{
-+	char *ep, *tmp;
-+	int ret, ep_fd;
-+	struct kdbus_msg *msg;
-+	struct kdbus_conn *ep_conn;
-+	struct kdbus_conn *reader;
-+	const char *name = "foo.bar.baz";
-+	const char *epname = "foo";
-+	char fake_ep[KDBUS_SYSNAME_MAX_LEN + 1] = {'\0'};
-+
-+	memset(fake_ep, 'X', sizeof(fake_ep) - 1);
-+
-+	/* Try to create a custom endpoint with a long name */
-+	ret = create_endpoint(env->buspath, getuid(), fake_ep, 0);
-+	ASSERT_RETURN(ret == -ENAMETOOLONG);
-+
-+	/* Try to create a custom endpoint with a different uid */
-+	ret = create_endpoint(env->buspath, getuid() + 1, "foobar", 0);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	/* create a custom endpoint, and open a connection on it */
-+	ep_fd = create_endpoint(env->buspath, getuid(), "foo", 0);
-+	ASSERT_RETURN(ep_fd >= 0);
-+
-+	tmp = strdup(env->buspath);
-+	ASSERT_RETURN(tmp);
-+
-+	ret = asprintf(&ep, "%s/%u-%s", dirname(tmp), getuid(), epname);
-+	free(tmp);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/* Register a connection that listen to broadcasts */
-+	reader = kdbus_hello(ep, 0, NULL, 0);
-+	ASSERT_RETURN(reader);
-+
-+	/* Register to kernel signals */
-+	ret = kdbus_add_match_id(reader, 0x1, KDBUS_ITEM_ID_ADD,
-+				 KDBUS_MATCH_ID_ANY);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_add_match_id(reader, 0x2, KDBUS_ITEM_ID_REMOVE,
-+				 KDBUS_MATCH_ID_ANY);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = install_name_add_match(reader, name);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Monitor connections are not supported on custom endpoints */
-+	ep_conn = kdbus_hello(ep, KDBUS_HELLO_MONITOR, NULL, 0);
-+	ASSERT_RETURN(!ep_conn && errno == EOPNOTSUPP);
-+
-+	ep_conn = kdbus_hello(ep, 0, NULL, 0);
-+	ASSERT_RETURN(ep_conn);
-+
-+	/* Check that the reader got the IdAdd notification */
-+	ret = kdbus_msg_recv(reader, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_ID_ADD);
-+	ASSERT_RETURN(msg->items[0].id_change.id == ep_conn->id);
-+	kdbus_msg_free(msg);
-+
-+	/*
-+	 * Add a name add match on the endpoint connection, acquire name from
-+	 * the unfiltered connection, and make sure the filtered connection
-+	 * did not get the notification on the name owner change. Also, the
-+	 * endpoint connection may not be able to call conn_info, neither on
-+	 * the name nor on the ID.
-+	 */
-+	ret = install_name_add_match(ep_conn, name);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_name_acquire(env->conn, name, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(ep_conn, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	ret = kdbus_conn_info(ep_conn, 0, name, 0, NULL);
-+	ASSERT_RETURN(ret == -ESRCH);
-+
-+	ret = kdbus_conn_info(ep_conn, 0, "random.crappy.name", 0, NULL);
-+	ASSERT_RETURN(ret == -ESRCH);
-+
-+	ret = kdbus_conn_info(ep_conn, env->conn->id, NULL, 0, NULL);
-+	ASSERT_RETURN(ret == -ENXIO);
-+
-+	ret = kdbus_conn_info(ep_conn, 0x0fffffffffffffffULL, NULL, 0, NULL);
-+	ASSERT_RETURN(ret == -ENXIO);
-+
-+	/* Check that the reader did not receive the name notification */
-+	ret = kdbus_msg_recv(reader, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	/*
-+	 * Release the name again, update the custom endpoint policy,
-+	 * and try again. This time, the connection on the custom endpoint
-+	 * should have gotten it.
-+	 */
-+	ret = kdbus_name_release(env->conn, name);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Check that the reader did not receive the name notification */
-+	ret = kdbus_msg_recv(reader, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	ret = update_endpoint(ep_fd, name);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_name_acquire(env->conn, name, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(ep_conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_NAME_ADD);
-+	ASSERT_RETURN(msg->items[0].name_change.old_id.id == 0);
-+	ASSERT_RETURN(msg->items[0].name_change.new_id.id == env->conn->id);
-+	ASSERT_RETURN(strcmp(msg->items[0].name_change.name, name) == 0);
-+	kdbus_msg_free(msg);
-+
-+	ret = kdbus_msg_recv(reader, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(strcmp(msg->items[0].name_change.name, name) == 0);
-+
-+	kdbus_msg_free(msg);
-+
-+	ret = kdbus_conn_info(ep_conn, 0, name, 0, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_conn_info(ep_conn, env->conn->id, NULL, 0, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* If we have privileges test custom endpoints */
-+	ret = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * All uids/gids are mapped and we have the necessary caps
-+	 */
-+	if (ret && all_uids_gids_are_mapped()) {
-+		ret = unpriv_test_custom_ep(env->buspath);
-+		ASSERT_RETURN(ret == 0);
-+	}
-+
-+	kdbus_conn_free(reader);
-+	kdbus_conn_free(ep_conn);
-+	close(ep_fd);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-fd.c b/tools/testing/selftests/kdbus/test-fd.c
-new file mode 100644
-index 0000000..2ae0f5a
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-fd.c
-@@ -0,0 +1,789 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <time.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stdbool.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <sys/types.h>
-+#include <sys/mman.h>
-+#include <sys/socket.h>
-+#include <sys/wait.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+#define KDBUS_MSG_MAX_ITEMS     128
-+#define KDBUS_USER_MAX_CONN	256
-+
-+/* maximum number of inflight fds in a target queue per user */
-+#define KDBUS_CONN_MAX_FDS_PER_USER	16
-+
-+/* maximum number of memfd items per message */
-+#define KDBUS_MSG_MAX_MEMFD_ITEMS       16
-+
-+static int make_msg_payload_dbus(uint64_t src_id, uint64_t dst_id,
-+				 uint64_t msg_size,
-+				 struct kdbus_msg **msg_dbus)
-+{
-+	struct kdbus_msg *msg;
-+
-+	msg = malloc(msg_size);
-+	ASSERT_RETURN_VAL(msg, -ENOMEM);
-+
-+	memset(msg, 0, msg_size);
-+	msg->size = msg_size;
-+	msg->src_id = src_id;
-+	msg->dst_id = dst_id;
-+	msg->payload_type = KDBUS_PAYLOAD_DBUS;
-+
-+	*msg_dbus = msg;
-+
-+	return 0;
-+}
-+
-+static void make_item_memfds(struct kdbus_item *item,
-+			     int *memfds, size_t memfd_size)
-+{
-+	size_t i;
-+
-+	for (i = 0; i < memfd_size; i++) {
-+		item->type = KDBUS_ITEM_PAYLOAD_MEMFD;
-+		item->size = KDBUS_ITEM_HEADER_SIZE +
-+			     sizeof(struct kdbus_memfd);
-+		item->memfd.fd = memfds[i];
-+		item->memfd.size = sizeof(uint64_t); /* const size */
-+		item = KDBUS_ITEM_NEXT(item);
-+	}
-+}
-+
-+static void make_item_fds(struct kdbus_item *item,
-+			  int *fd_array, size_t fd_size)
-+{
-+	size_t i;
-+	item->type = KDBUS_ITEM_FDS;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + (sizeof(int) * fd_size);
-+
-+	for (i = 0; i < fd_size; i++)
-+		item->fds[i] = fd_array[i];
-+}
-+
-+static int memfd_write(const char *name, void *buf, size_t bufsize)
-+{
-+	ssize_t ret;
-+	int memfd;
-+
-+	memfd = sys_memfd_create(name, 0);
-+	ASSERT_RETURN_VAL(memfd >= 0, memfd);
-+
-+	ret = write(memfd, buf, bufsize);
-+	ASSERT_RETURN_VAL(ret == (ssize_t)bufsize, -EAGAIN);
-+
-+	ret = sys_memfd_seal_set(memfd);
-+	ASSERT_RETURN_VAL(ret == 0, -errno);
-+
-+	return memfd;
-+}
-+
-+static int send_memfds(struct kdbus_conn *conn, uint64_t dst_id,
-+		       int *memfds_array, size_t memfd_count)
-+{
-+	struct kdbus_cmd_send cmd = {};
-+	struct kdbus_item *item;
-+	struct kdbus_msg *msg;
-+	uint64_t size;
-+	int ret;
-+
-+	size = sizeof(struct kdbus_msg);
-+	size += memfd_count * KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
-+
-+	if (dst_id == KDBUS_DST_ID_BROADCAST)
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
-+
-+	ret = make_msg_payload_dbus(conn->id, dst_id, size, &msg);
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	item = msg->items;
-+
-+	if (dst_id == KDBUS_DST_ID_BROADCAST) {
-+		item->type = KDBUS_ITEM_BLOOM_FILTER;
-+		item->size = KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
-+		item = KDBUS_ITEM_NEXT(item);
-+
-+		msg->flags |= KDBUS_MSG_SIGNAL;
-+	}
-+
-+	make_item_memfds(item, memfds_array, memfd_count);
-+
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg;
-+
-+	ret = kdbus_cmd_send(conn->fd, &cmd);
-+	if (ret < 0) {
-+		kdbus_printf("error sending message: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	free(msg);
-+	return 0;
-+}
-+
-+static int send_fds(struct kdbus_conn *conn, uint64_t dst_id,
-+		    int *fd_array, size_t fd_count)
-+{
-+	struct kdbus_cmd_send cmd = {};
-+	struct kdbus_item *item;
-+	struct kdbus_msg *msg;
-+	uint64_t size;
-+	int ret;
-+
-+	size = sizeof(struct kdbus_msg);
-+	size += KDBUS_ITEM_SIZE(sizeof(int) * fd_count);
-+
-+	if (dst_id == KDBUS_DST_ID_BROADCAST)
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
-+
-+	ret = make_msg_payload_dbus(conn->id, dst_id, size, &msg);
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	item = msg->items;
-+
-+	if (dst_id == KDBUS_DST_ID_BROADCAST) {
-+		item->type = KDBUS_ITEM_BLOOM_FILTER;
-+		item->size = KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
-+		item = KDBUS_ITEM_NEXT(item);
-+
-+		msg->flags |= KDBUS_MSG_SIGNAL;
-+	}
-+
-+	make_item_fds(item, fd_array, fd_count);
-+
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg;
-+
-+	ret = kdbus_cmd_send(conn->fd, &cmd);
-+	if (ret < 0) {
-+		kdbus_printf("error sending message: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	free(msg);
-+	return ret;
-+}
-+
-+static int send_fds_memfds(struct kdbus_conn *conn, uint64_t dst_id,
-+			   int *fds_array, size_t fd_count,
-+			   int *memfds_array, size_t memfd_count)
-+{
-+	struct kdbus_cmd_send cmd = {};
-+	struct kdbus_item *item;
-+	struct kdbus_msg *msg;
-+	uint64_t size;
-+	int ret;
-+
-+	size = sizeof(struct kdbus_msg);
-+	size += memfd_count * KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
-+	size += KDBUS_ITEM_SIZE(sizeof(int) * fd_count);
-+
-+	ret = make_msg_payload_dbus(conn->id, dst_id, size, &msg);
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	item = msg->items;
-+
-+	make_item_fds(item, fds_array, fd_count);
-+	item = KDBUS_ITEM_NEXT(item);
-+	make_item_memfds(item, memfds_array, memfd_count);
-+
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg;
-+
-+	ret = kdbus_cmd_send(conn->fd, &cmd);
-+	if (ret < 0) {
-+		kdbus_printf("error sending message: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	free(msg);
-+	return ret;
-+}
-+
-+/* Return the number of received fds */
-+static unsigned int kdbus_item_get_nfds(struct kdbus_msg *msg)
-+{
-+	unsigned int fds = 0;
-+	const struct kdbus_item *item;
-+
-+	KDBUS_ITEM_FOREACH(item, msg, items) {
-+		switch (item->type) {
-+		case KDBUS_ITEM_FDS: {
-+			fds += (item->size - KDBUS_ITEM_HEADER_SIZE) /
-+				sizeof(int);
-+			break;
-+		}
-+
-+		case KDBUS_ITEM_PAYLOAD_MEMFD:
-+			fds++;
-+			break;
-+
-+		default:
-+			break;
-+		}
-+	}
-+
-+	return fds;
-+}
-+
-+static struct kdbus_msg *
-+get_kdbus_msg_with_fd(struct kdbus_conn *conn_src,
-+		      uint64_t dst_id, uint64_t cookie, int fd)
-+{
-+	int ret;
-+	uint64_t size;
-+	struct kdbus_item *item;
-+	struct kdbus_msg *msg;
-+
-+	size = sizeof(struct kdbus_msg);
-+	if (fd >= 0)
-+		size += KDBUS_ITEM_SIZE(sizeof(int));
-+
-+	ret = make_msg_payload_dbus(conn_src->id, dst_id, size, &msg);
-+	ASSERT_RETURN_VAL(ret == 0, NULL);
-+
-+	msg->cookie = cookie;
-+
-+	if (fd >= 0) {
-+		item = msg->items;
-+
-+		make_item_fds(item, (int *)&fd, 1);
-+	}
-+
-+	return msg;
-+}
-+
-+static int kdbus_test_no_fds(struct kdbus_test_env *env,
-+			     int *fds, int *memfd)
-+{
-+	pid_t pid;
-+	int ret, status;
-+	uint64_t cookie;
-+	int connfd1, connfd2;
-+	struct kdbus_msg *msg, *msg_sync_reply;
-+	struct kdbus_cmd_hello hello;
-+	struct kdbus_conn *conn_src, *conn_dst, *conn_dummy;
-+	struct kdbus_cmd_send cmd = {};
-+	struct kdbus_cmd_free cmd_free = {};
-+
-+	conn_src = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn_src);
-+
-+	connfd1 = open(env->buspath, O_RDWR|O_CLOEXEC);
-+	ASSERT_RETURN(connfd1 >= 0);
-+
-+	connfd2 = open(env->buspath, O_RDWR|O_CLOEXEC);
-+	ASSERT_RETURN(connfd2 >= 0);
-+
-+	/*
-+	 * Create connections without KDBUS_HELLO_ACCEPT_FD
-+	 * to test if send fd operations are blocked
-+	 */
-+	conn_dst = malloc(sizeof(*conn_dst));
-+	ASSERT_RETURN(conn_dst);
-+
-+	conn_dummy = malloc(sizeof(*conn_dummy));
-+	ASSERT_RETURN(conn_dummy);
-+
-+	memset(&hello, 0, sizeof(hello));
-+	hello.size = sizeof(struct kdbus_cmd_hello);
-+	hello.pool_size = POOL_SIZE;
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+
-+	ret = kdbus_cmd_hello(connfd1, &hello);
-+	ASSERT_RETURN(ret == 0);
-+
-+	cmd_free.size = sizeof(cmd_free);
-+	cmd_free.offset = hello.offset;
-+	ret = kdbus_cmd_free(connfd1, &cmd_free);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	conn_dst->fd = connfd1;
-+	conn_dst->id = hello.id;
-+
-+	memset(&hello, 0, sizeof(hello));
-+	hello.size = sizeof(struct kdbus_cmd_hello);
-+	hello.pool_size = POOL_SIZE;
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
-+
-+	ret = kdbus_cmd_hello(connfd2, &hello);
-+	ASSERT_RETURN(ret == 0);
-+
-+	cmd_free.size = sizeof(cmd_free);
-+	cmd_free.offset = hello.offset;
-+	ret = kdbus_cmd_free(connfd2, &cmd_free);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	conn_dummy->fd = connfd2;
-+	conn_dummy->id = hello.id;
-+
-+	conn_dst->buf = mmap(NULL, POOL_SIZE, PROT_READ,
-+			     MAP_SHARED, connfd1, 0);
-+	ASSERT_RETURN(conn_dst->buf != MAP_FAILED);
-+
-+	conn_dummy->buf = mmap(NULL, POOL_SIZE, PROT_READ,
-+			       MAP_SHARED, connfd2, 0);
-+	ASSERT_RETURN(conn_dummy->buf != MAP_FAILED);
-+
-+	/*
-+	 * Send fds to connection that do not accept fd passing
-+	 */
-+	ret = send_fds(conn_src, conn_dst->id, fds, 1);
-+	ASSERT_RETURN(ret == -ECOMM);
-+
-+	/*
-+	 * memfd are kdbus payload
-+	 */
-+	ret = send_memfds(conn_src, conn_dst->id, memfd, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv_poll(conn_dst, 100, NULL, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	cookie = time(NULL);
-+
-+	pid = fork();
-+	ASSERT_RETURN_VAL(pid >= 0, pid);
-+
-+	if (pid == 0) {
-+		struct timespec now;
-+
-+		/*
-+		 * A sync send/reply to a connection that do not
-+		 * accept fds should fail if it contains an fd
-+		 */
-+		msg_sync_reply = get_kdbus_msg_with_fd(conn_dst,
-+						       conn_dummy->id,
-+						       cookie, fds[0]);
-+		ASSERT_EXIT(msg_sync_reply);
-+
-+		ret = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
-+		ASSERT_EXIT(ret == 0);
-+
-+		msg_sync_reply->timeout_ns = now.tv_sec * 1000000000ULL +
-+					     now.tv_nsec + 100000000ULL;
-+		msg_sync_reply->flags = KDBUS_MSG_EXPECT_REPLY;
-+
-+		memset(&cmd, 0, sizeof(cmd));
-+		cmd.size = sizeof(cmd);
-+		cmd.msg_address = (uintptr_t)msg_sync_reply;
-+		cmd.flags = KDBUS_SEND_SYNC_REPLY;
-+
-+		ret = kdbus_cmd_send(conn_dst->fd, &cmd);
-+		ASSERT_EXIT(ret == -ECOMM);
-+
-+		/*
-+		 * Now send a normal message, but the sync reply
-+		 * will fail since it contains an fd that the
-+		 * original sender do not want.
-+		 *
-+		 * The original sender will fail with -ETIMEDOUT
-+		 */
-+		cookie++;
-+		ret = kdbus_msg_send_sync(conn_dst, NULL, cookie,
-+					  KDBUS_MSG_EXPECT_REPLY,
-+					  5000000000ULL, 0, conn_src->id, -1);
-+		ASSERT_EXIT(ret == -EREMOTEIO);
-+
-+		cookie++;
-+		ret = kdbus_msg_recv_poll(conn_dst, 100, &msg, NULL);
-+		ASSERT_EXIT(ret == 0);
-+		ASSERT_EXIT(msg->cookie == cookie);
-+
-+		free(msg_sync_reply);
-+		kdbus_msg_free(msg);
-+
-+		_exit(EXIT_SUCCESS);
-+	}
-+
-+	ret = kdbus_msg_recv_poll(conn_dummy, 100, NULL, NULL);
-+	ASSERT_RETURN(ret == -ETIMEDOUT);
-+
-+	cookie++;
-+	ret = kdbus_msg_recv_poll(conn_src, 100, &msg, NULL);
-+	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
-+
-+	kdbus_msg_free(msg);
-+
-+	/*
-+	 * Try to reply with a kdbus connection handle, this should
-+	 * fail with -EOPNOTSUPP
-+	 */
-+	msg_sync_reply = get_kdbus_msg_with_fd(conn_src,
-+					       conn_dst->id,
-+					       cookie, conn_dst->fd);
-+	ASSERT_RETURN(msg_sync_reply);
-+
-+	msg_sync_reply->cookie_reply = cookie;
-+
-+	memset(&cmd, 0, sizeof(cmd));
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg_sync_reply;
-+
-+	ret = kdbus_cmd_send(conn_src->fd, &cmd);
-+	ASSERT_RETURN(ret == -EOPNOTSUPP);
-+
-+	free(msg_sync_reply);
-+
-+	/*
-+	 * Try to reply with a normal fd, this should fail even
-+	 * if the response is a sync reply
-+	 *
-+	 * From the sender view we fail with -ECOMM
-+	 */
-+	msg_sync_reply = get_kdbus_msg_with_fd(conn_src,
-+					       conn_dst->id,
-+					       cookie, fds[0]);
-+	ASSERT_RETURN(msg_sync_reply);
-+
-+	msg_sync_reply->cookie_reply = cookie;
-+
-+	memset(&cmd, 0, sizeof(cmd));
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg_sync_reply;
-+
-+	ret = kdbus_cmd_send(conn_src->fd, &cmd);
-+	ASSERT_RETURN(ret == -ECOMM);
-+
-+	free(msg_sync_reply);
-+
-+	/*
-+	 * Resend another normal message and check if the queue
-+	 * is clear
-+	 */
-+	cookie++;
-+	ret = kdbus_msg_send(conn_src, NULL, cookie, 0, 0, 0,
-+			     conn_dst->id);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = waitpid(pid, &status, 0);
-+	ASSERT_RETURN_VAL(ret >= 0, ret);
-+
-+	kdbus_conn_free(conn_dummy);
-+	kdbus_conn_free(conn_dst);
-+	kdbus_conn_free(conn_src);
-+
-+	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
-+}
-+
-+static int kdbus_send_multiple_fds(struct kdbus_conn *conn_src,
-+				   struct kdbus_conn *conn_dst)
-+{
-+	int ret, i;
-+	unsigned int nfds;
-+	int fds[KDBUS_CONN_MAX_FDS_PER_USER + 1];
-+	int memfds[KDBUS_MSG_MAX_ITEMS + 1];
-+	struct kdbus_msg *msg;
-+	uint64_t dummy_value;
-+
-+	dummy_value = time(NULL);
-+
-+	for (i = 0; i < KDBUS_CONN_MAX_FDS_PER_USER + 1; i++) {
-+		fds[i] = open("/dev/null", O_RDWR|O_CLOEXEC);
-+		ASSERT_RETURN_VAL(fds[i] >= 0, -errno);
-+	}
-+
-+	/* Send KDBUS_CONN_MAX_FDS_PER_USER with one more fd */
-+	ret = send_fds(conn_src, conn_dst->id, fds,
-+		       KDBUS_CONN_MAX_FDS_PER_USER + 1);
-+	ASSERT_RETURN(ret == -EMFILE);
-+
-+	/* Retry with the correct KDBUS_CONN_MAX_FDS_PER_USER */
-+	ret = send_fds(conn_src, conn_dst->id, fds,
-+		       KDBUS_CONN_MAX_FDS_PER_USER);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Check we got the right number of fds */
-+	nfds = kdbus_item_get_nfds(msg);
-+	ASSERT_RETURN(nfds == KDBUS_CONN_MAX_FDS_PER_USER);
-+
-+	kdbus_msg_free(msg);
-+
-+	for (i = 0; i < KDBUS_MSG_MAX_ITEMS + 1; i++, dummy_value++) {
-+		memfds[i] = memfd_write("memfd-name",
-+					&dummy_value,
-+					sizeof(dummy_value));
-+		ASSERT_RETURN_VAL(memfds[i] >= 0, memfds[i]);
-+	}
-+
-+	/* Send KDBUS_MSG_MAX_ITEMS with one more memfd */
-+	ret = send_memfds(conn_src, conn_dst->id,
-+			  memfds, KDBUS_MSG_MAX_ITEMS + 1);
-+	ASSERT_RETURN(ret == -E2BIG);
-+
-+	ret = send_memfds(conn_src, conn_dst->id,
-+			  memfds, KDBUS_MSG_MAX_MEMFD_ITEMS + 1);
-+	ASSERT_RETURN(ret == -E2BIG);
-+
-+	/* Retry with the correct KDBUS_MSG_MAX_ITEMS */
-+	ret = send_memfds(conn_src, conn_dst->id,
-+			  memfds, KDBUS_MSG_MAX_MEMFD_ITEMS);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Check we got the right number of fds */
-+	nfds = kdbus_item_get_nfds(msg);
-+	ASSERT_RETURN(nfds == KDBUS_MSG_MAX_MEMFD_ITEMS);
-+
-+	kdbus_msg_free(msg);
-+
-+
-+	/*
-+	 * Combine multiple KDBUS_CONN_MAX_FDS_PER_USER+1 fds and
-+	 * 10 memfds
-+	 */
-+	ret = send_fds_memfds(conn_src, conn_dst->id,
-+			      fds, KDBUS_CONN_MAX_FDS_PER_USER + 1,
-+			      memfds, 10);
-+	ASSERT_RETURN(ret == -EMFILE);
-+
-+	ret = kdbus_msg_recv(conn_dst, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	/*
-+	 * Combine multiple KDBUS_CONN_MAX_FDS_PER_USER fds and
-+	 * (128 - 1) + 1 memfds, all fds take one item, while each
-+	 * memfd takes one item
-+	 */
-+	ret = send_fds_memfds(conn_src, conn_dst->id,
-+			      fds, KDBUS_CONN_MAX_FDS_PER_USER,
-+			      memfds, (KDBUS_MSG_MAX_ITEMS - 1) + 1);
-+	ASSERT_RETURN(ret == -E2BIG);
-+
-+	ret = send_fds_memfds(conn_src, conn_dst->id,
-+			      fds, KDBUS_CONN_MAX_FDS_PER_USER,
-+			      memfds, KDBUS_MSG_MAX_MEMFD_ITEMS + 1);
-+	ASSERT_RETURN(ret == -E2BIG);
-+
-+	ret = kdbus_msg_recv(conn_dst, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	/*
-+	 * Send KDBUS_CONN_MAX_FDS_PER_USER fds +
-+	 * KDBUS_MSG_MAX_MEMFD_ITEMS memfds
-+	 */
-+	ret = send_fds_memfds(conn_src, conn_dst->id,
-+			      fds, KDBUS_CONN_MAX_FDS_PER_USER,
-+			      memfds, KDBUS_MSG_MAX_MEMFD_ITEMS);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Check we got the right number of fds */
-+	nfds = kdbus_item_get_nfds(msg);
-+	ASSERT_RETURN(nfds == KDBUS_CONN_MAX_FDS_PER_USER +
-+			      KDBUS_MSG_MAX_MEMFD_ITEMS);
-+
-+	kdbus_msg_free(msg);
-+
-+
-+	/*
-+	 * Re-send fds + memfds, close them, but do not receive them
-+	 * and try to queue more
-+	 */
-+	ret = send_fds_memfds(conn_src, conn_dst->id,
-+			      fds, KDBUS_CONN_MAX_FDS_PER_USER,
-+			      memfds, KDBUS_MSG_MAX_MEMFD_ITEMS);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* close old references and get a new ones */
-+	for (i = 0; i < KDBUS_CONN_MAX_FDS_PER_USER + 1; i++) {
-+		close(fds[i]);
-+		fds[i] = open("/dev/null", O_RDWR|O_CLOEXEC);
-+		ASSERT_RETURN_VAL(fds[i] >= 0, -errno);
-+	}
-+
-+	/* should fail since we have already fds in the queue */
-+	ret = send_fds(conn_src, conn_dst->id, fds,
-+		       KDBUS_CONN_MAX_FDS_PER_USER);
-+	ASSERT_RETURN(ret == -EMFILE);
-+
-+	/* This should succeed */
-+	ret = send_memfds(conn_src, conn_dst->id,
-+			  memfds, KDBUS_MSG_MAX_MEMFD_ITEMS);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	nfds = kdbus_item_get_nfds(msg);
-+	ASSERT_RETURN(nfds == KDBUS_CONN_MAX_FDS_PER_USER +
-+			      KDBUS_MSG_MAX_MEMFD_ITEMS);
-+
-+	kdbus_msg_free(msg);
-+
-+	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	nfds = kdbus_item_get_nfds(msg);
-+	ASSERT_RETURN(nfds == KDBUS_MSG_MAX_MEMFD_ITEMS);
-+
-+	kdbus_msg_free(msg);
-+
-+	ret = kdbus_msg_recv(conn_dst, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	for (i = 0; i < KDBUS_CONN_MAX_FDS_PER_USER + 1; i++)
-+		close(fds[i]);
-+
-+	for (i = 0; i < KDBUS_MSG_MAX_ITEMS + 1; i++)
-+		close(memfds[i]);
-+
-+	return 0;
-+}
-+
-+int kdbus_test_fd_passing(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn_src, *conn_dst;
-+	const char *str = "stackenblocken";
-+	const struct kdbus_item *item;
-+	struct kdbus_msg *msg;
-+	unsigned int i;
-+	uint64_t now;
-+	int fds_conn[2];
-+	int sock_pair[2];
-+	int fds[2];
-+	int memfd;
-+	int ret;
-+
-+	now = (uint64_t) time(NULL);
-+
-+	/* create two connections */
-+	conn_src = kdbus_hello(env->buspath, 0, NULL, 0);
-+	conn_dst = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn_src && conn_dst);
-+
-+	fds_conn[0] = conn_src->fd;
-+	fds_conn[1] = conn_dst->fd;
-+
-+	ret = socketpair(AF_UNIX, SOCK_STREAM, 0, sock_pair);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Setup memfd */
-+	memfd = memfd_write("memfd-name", &now, sizeof(now));
-+	ASSERT_RETURN(memfd >= 0);
-+
-+	/* Setup pipes */
-+	ret = pipe(fds);
-+	ASSERT_RETURN(ret == 0);
-+
-+	i = write(fds[1], str, strlen(str));
-+	ASSERT_RETURN(i == strlen(str));
-+
-+	/*
-+	 * Try to ass the handle of a connection as message payload.
-+	 * This must fail.
-+	 */
-+	ret = send_fds(conn_src, conn_dst->id, fds_conn, 2);
-+	ASSERT_RETURN(ret == -ENOTSUP);
-+
-+	ret = send_fds(conn_dst, conn_src->id, fds_conn, 2);
-+	ASSERT_RETURN(ret == -ENOTSUP);
-+
-+	ret = send_fds(conn_src, conn_dst->id, sock_pair, 2);
-+	ASSERT_RETURN(ret == -ENOTSUP);
-+
-+	/*
-+	 * Send fds and memfds to connection that do not accept fds
-+	 */
-+	ret = kdbus_test_no_fds(env, fds, (int *)&memfd);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Try to broadcast file descriptors. This must fail. */
-+	ret = send_fds(conn_src, KDBUS_DST_ID_BROADCAST, fds, 1);
-+	ASSERT_RETURN(ret == -ENOTUNIQ);
-+
-+	/* Try to broadcast memfd. This must succeed. */
-+	ret = send_memfds(conn_src, KDBUS_DST_ID_BROADCAST, (int *)&memfd, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Open code this loop */
-+loop_send_fds:
-+
-+	/*
-+	 * Send the read end of the pipe and close it.
-+	 */
-+	ret = send_fds(conn_src, conn_dst->id, fds, 1);
-+	ASSERT_RETURN(ret == 0);
-+	close(fds[0]);
-+
-+	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	KDBUS_ITEM_FOREACH(item, msg, items) {
-+		if (item->type == KDBUS_ITEM_FDS) {
-+			char tmp[14];
-+			int nfds = (item->size - KDBUS_ITEM_HEADER_SIZE) /
-+					sizeof(int);
-+			ASSERT_RETURN(nfds == 1);
-+
-+			i = read(item->fds[0], tmp, sizeof(tmp));
-+			if (i != 0) {
-+				ASSERT_RETURN(i == sizeof(tmp));
-+				ASSERT_RETURN(memcmp(tmp, str, sizeof(tmp)) == 0);
-+
-+				/* Write EOF */
-+				close(fds[1]);
-+
-+				/*
-+				 * Resend the read end of the pipe,
-+				 * the receiver still holds a reference
-+				 * to it...
-+				 */
-+				goto loop_send_fds;
-+			}
-+
-+			/* Got EOF */
-+
-+			/*
-+			 * Close the last reference to the read end
-+			 * of the pipe, other references are
-+			 * automatically closed just after send.
-+			 */
-+			close(item->fds[0]);
-+		}
-+	}
-+
-+	/*
-+	 * Try to resend the read end of the pipe. Must fail with
-+	 * -EBADF since both the sender and receiver closed their
-+	 * references to it. We assume the above since sender and
-+	 * receiver are on the same process.
-+	 */
-+	ret = send_fds(conn_src, conn_dst->id, fds, 1);
-+	ASSERT_RETURN(ret == -EBADF);
-+
-+	/* Then we clear out received any data... */
-+	kdbus_msg_free(msg);
-+
-+	ret = kdbus_send_multiple_fds(conn_src, conn_dst);
-+	ASSERT_RETURN(ret == 0);
-+
-+	close(sock_pair[0]);
-+	close(sock_pair[1]);
-+	close(memfd);
-+
-+	kdbus_conn_free(conn_src);
-+	kdbus_conn_free(conn_dst);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-free.c b/tools/testing/selftests/kdbus/test-free.c
-new file mode 100644
-index 0000000..f666da3
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-free.c
-@@ -0,0 +1,64 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <stdbool.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+#include "kdbus-test.h"
-+
-+static int sample_ioctl_call(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	struct kdbus_cmd_list cmd_list = {
-+		.flags = KDBUS_LIST_QUEUED,
-+		.size = sizeof(cmd_list),
-+	};
-+
-+	ret = kdbus_cmd_list(env->conn->fd, &cmd_list);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* DON'T FREE THIS SLICE OF MEMORY! */
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_free(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	struct kdbus_cmd_free cmd_free = {};
-+
-+	/* free an unallocated buffer */
-+	cmd_free.size = sizeof(cmd_free);
-+	cmd_free.flags = 0;
-+	cmd_free.offset = 0;
-+	ret = kdbus_cmd_free(env->conn->fd, &cmd_free);
-+	ASSERT_RETURN(ret == -ENXIO);
-+
-+	/* free a buffer out of the pool's bounds */
-+	cmd_free.size = sizeof(cmd_free);
-+	cmd_free.offset = POOL_SIZE + 1;
-+	ret = kdbus_cmd_free(env->conn->fd, &cmd_free);
-+	ASSERT_RETURN(ret == -ENXIO);
-+
-+	/*
-+	 * The user application is responsible for freeing the allocated
-+	 * memory with the KDBUS_CMD_FREE ioctl, so let's test what happens
-+	 * if we forget about it.
-+	 */
-+
-+	ret = sample_ioctl_call(env);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = sample_ioctl_call(env);
-+	ASSERT_RETURN(ret == 0);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-match.c b/tools/testing/selftests/kdbus/test-match.c
-new file mode 100644
-index 0000000..2360dc1
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-match.c
-@@ -0,0 +1,441 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <stdbool.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+#include "kdbus-test.h"
-+
-+int kdbus_test_match_id_add(struct kdbus_test_env *env)
-+{
-+	struct {
-+		struct kdbus_cmd_match cmd;
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_notify_id_change chg;
-+		} item;
-+	} buf;
-+	struct kdbus_conn *conn;
-+	struct kdbus_msg *msg;
-+	int ret;
-+
-+	memset(&buf, 0, sizeof(buf));
-+
-+	buf.cmd.size = sizeof(buf);
-+	buf.cmd.cookie = 0xdeafbeefdeaddead;
-+	buf.item.size = sizeof(buf.item);
-+	buf.item.type = KDBUS_ITEM_ID_ADD;
-+	buf.item.chg.id = KDBUS_MATCH_ID_ANY;
-+
-+	/* match on id add */
-+	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* create 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+
-+	/* 1st connection should have received a notification */
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_ID_ADD);
-+	ASSERT_RETURN(msg->items[0].id_change.id == conn->id);
-+
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_match_id_remove(struct kdbus_test_env *env)
-+{
-+	struct {
-+		struct kdbus_cmd_match cmd;
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_notify_id_change chg;
-+		} item;
-+	} buf;
-+	struct kdbus_conn *conn;
-+	struct kdbus_msg *msg;
-+	size_t id;
-+	int ret;
-+
-+	/* create 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+	id = conn->id;
-+
-+	memset(&buf, 0, sizeof(buf));
-+	buf.cmd.size = sizeof(buf);
-+	buf.cmd.cookie = 0xdeafbeefdeaddead;
-+	buf.item.size = sizeof(buf.item);
-+	buf.item.type = KDBUS_ITEM_ID_REMOVE;
-+	buf.item.chg.id = id;
-+
-+	/* register match on 2nd connection */
-+	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* remove 2nd connection again */
-+	kdbus_conn_free(conn);
-+
-+	/* 1st connection should have received a notification */
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_ID_REMOVE);
-+	ASSERT_RETURN(msg->items[0].id_change.id == id);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_match_replace(struct kdbus_test_env *env)
-+{
-+	struct {
-+		struct kdbus_cmd_match cmd;
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_notify_id_change chg;
-+		} item;
-+	} buf;
-+	struct kdbus_conn *conn;
-+	struct kdbus_msg *msg;
-+	size_t id;
-+	int ret;
-+
-+	/* add a match to id_add */
-+	ASSERT_RETURN(kdbus_test_match_id_add(env) == TEST_OK);
-+
-+	/* do a replace of the match from id_add to id_remove */
-+	memset(&buf, 0, sizeof(buf));
-+
-+	buf.cmd.size = sizeof(buf);
-+	buf.cmd.cookie = 0xdeafbeefdeaddead;
-+	buf.cmd.flags = KDBUS_MATCH_REPLACE;
-+	buf.item.size = sizeof(buf.item);
-+	buf.item.type = KDBUS_ITEM_ID_REMOVE;
-+	buf.item.chg.id = KDBUS_MATCH_ID_ANY;
-+
-+	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
-+
-+	/* create 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+	id = conn->id;
-+
-+	/* 1st connection should _not_ have received a notification */
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret != 0);
-+
-+	/* remove 2nd connection */
-+	kdbus_conn_free(conn);
-+
-+	/* 1st connection should _now_ have received a notification */
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_ID_REMOVE);
-+	ASSERT_RETURN(msg->items[0].id_change.id == id);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_match_name_add(struct kdbus_test_env *env)
-+{
-+	struct {
-+		struct kdbus_cmd_match cmd;
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_notify_name_change chg;
-+		} item;
-+		char name[64];
-+	} buf;
-+	struct kdbus_msg *msg;
-+	char *name;
-+	int ret;
-+
-+	name = "foo.bla.blaz";
-+
-+	/* install the match rule */
-+	memset(&buf, 0, sizeof(buf));
-+	buf.item.type = KDBUS_ITEM_NAME_ADD;
-+	buf.item.chg.old_id.id = KDBUS_MATCH_ID_ANY;
-+	buf.item.chg.new_id.id = KDBUS_MATCH_ID_ANY;
-+	strncpy(buf.name, name, sizeof(buf.name) - 1);
-+	buf.item.size = sizeof(buf.item) + strlen(buf.name) + 1;
-+	buf.cmd.size = sizeof(buf.cmd) + buf.item.size;
-+
-+	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* acquire the name */
-+	ret = kdbus_name_acquire(env->conn, name, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* we should have received a notification */
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_NAME_ADD);
-+	ASSERT_RETURN(msg->items[0].name_change.old_id.id == 0);
-+	ASSERT_RETURN(msg->items[0].name_change.new_id.id == env->conn->id);
-+	ASSERT_RETURN(strcmp(msg->items[0].name_change.name, name) == 0);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_match_name_remove(struct kdbus_test_env *env)
-+{
-+	struct {
-+		struct kdbus_cmd_match cmd;
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_notify_name_change chg;
-+		} item;
-+		char name[64];
-+	} buf;
-+	struct kdbus_msg *msg;
-+	char *name;
-+	int ret;
-+
-+	name = "foo.bla.blaz";
-+
-+	/* acquire the name */
-+	ret = kdbus_name_acquire(env->conn, name, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* install the match rule */
-+	memset(&buf, 0, sizeof(buf));
-+	buf.item.type = KDBUS_ITEM_NAME_REMOVE;
-+	buf.item.chg.old_id.id = KDBUS_MATCH_ID_ANY;
-+	buf.item.chg.new_id.id = KDBUS_MATCH_ID_ANY;
-+	strncpy(buf.name, name, sizeof(buf.name) - 1);
-+	buf.item.size = sizeof(buf.item) + strlen(buf.name) + 1;
-+	buf.cmd.size = sizeof(buf.cmd) + buf.item.size;
-+
-+	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* release the name again */
-+	kdbus_name_release(env->conn, name);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* we should have received a notification */
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_NAME_REMOVE);
-+	ASSERT_RETURN(msg->items[0].name_change.old_id.id == env->conn->id);
-+	ASSERT_RETURN(msg->items[0].name_change.new_id.id == 0);
-+	ASSERT_RETURN(strcmp(msg->items[0].name_change.name, name) == 0);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_match_name_change(struct kdbus_test_env *env)
-+{
-+	struct {
-+		struct kdbus_cmd_match cmd;
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			struct kdbus_notify_name_change chg;
-+		} item;
-+		char name[64];
-+	} buf;
-+	struct kdbus_conn *conn;
-+	struct kdbus_msg *msg;
-+	uint64_t flags;
-+	char *name = "foo.bla.baz";
-+	int ret;
-+
-+	/* acquire the name */
-+	ret = kdbus_name_acquire(env->conn, name, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* install the match rule */
-+	memset(&buf, 0, sizeof(buf));
-+	buf.item.type = KDBUS_ITEM_NAME_CHANGE;
-+	buf.item.chg.old_id.id = KDBUS_MATCH_ID_ANY;
-+	buf.item.chg.new_id.id = KDBUS_MATCH_ID_ANY;
-+	strncpy(buf.name, name, sizeof(buf.name) - 1);
-+	buf.item.size = sizeof(buf.item) + strlen(buf.name) + 1;
-+	buf.cmd.size = sizeof(buf.cmd) + buf.item.size;
-+
-+	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* create a 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+
-+	/* allow the new connection to own the same name */
-+	/* queue the 2nd connection as waiting owner */
-+	flags = KDBUS_NAME_QUEUE;
-+	ret = kdbus_name_acquire(conn, name, &flags);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(flags & KDBUS_NAME_IN_QUEUE);
-+
-+	/* release name from 1st connection */
-+	ret = kdbus_name_release(env->conn, name);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* we should have received a notification */
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_NAME_CHANGE);
-+	ASSERT_RETURN(msg->items[0].name_change.old_id.id == env->conn->id);
-+	ASSERT_RETURN(msg->items[0].name_change.new_id.id == conn->id);
-+	ASSERT_RETURN(strcmp(msg->items[0].name_change.name, name) == 0);
-+
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-+
-+static int send_bloom_filter(const struct kdbus_conn *conn,
-+			     uint64_t cookie,
-+			     const uint8_t *filter,
-+			     size_t filter_size,
-+			     uint64_t filter_generation)
-+{
-+	struct kdbus_cmd_send cmd = {};
-+	struct kdbus_msg *msg;
-+	struct kdbus_item *item;
-+	uint64_t size;
-+	int ret;
-+
-+	size = sizeof(struct kdbus_msg);
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + filter_size;
-+
-+	msg = alloca(size);
-+
-+	memset(msg, 0, size);
-+	msg->size = size;
-+	msg->src_id = conn->id;
-+	msg->dst_id = KDBUS_DST_ID_BROADCAST;
-+	msg->flags = KDBUS_MSG_SIGNAL;
-+	msg->payload_type = KDBUS_PAYLOAD_DBUS;
-+	msg->cookie = cookie;
-+
-+	item = msg->items;
-+	item->type = KDBUS_ITEM_BLOOM_FILTER;
-+	item->size = KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) +
-+				filter_size;
-+
-+	item->bloom_filter.generation = filter_generation;
-+	memcpy(item->bloom_filter.data, filter, filter_size);
-+
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg;
-+
-+	ret = kdbus_cmd_send(conn->fd, &cmd);
-+	if (ret < 0) {
-+		kdbus_printf("error sending message: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	return 0;
-+}
-+
-+int kdbus_test_match_bloom(struct kdbus_test_env *env)
-+{
-+	struct {
-+		struct kdbus_cmd_match cmd;
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			uint8_t data_gen0[64];
-+			uint8_t data_gen1[64];
-+		} item;
-+	} buf;
-+	struct kdbus_conn *conn;
-+	struct kdbus_msg *msg;
-+	uint64_t cookie = 0xf000f00f;
-+	uint8_t filter[64];
-+	int ret;
-+
-+	/* install the match rule */
-+	memset(&buf, 0, sizeof(buf));
-+	buf.cmd.size = sizeof(buf);
-+
-+	buf.item.size = sizeof(buf.item);
-+	buf.item.type = KDBUS_ITEM_BLOOM_MASK;
-+	buf.item.data_gen0[0] = 0x55;
-+	buf.item.data_gen0[63] = 0x80;
-+
-+	buf.item.data_gen1[1] = 0xaa;
-+	buf.item.data_gen1[9] = 0x02;
-+
-+	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* create a 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+
-+	/* a message with a 0'ed out filter must not reach the other peer */
-+	memset(filter, 0, sizeof(filter));
-+	ret = send_bloom_filter(conn, ++cookie, filter, sizeof(filter), 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	/* now set the filter to the connection's mask and expect success */
-+	filter[0] = 0x55;
-+	filter[63] = 0x80;
-+	ret = send_bloom_filter(conn, ++cookie, filter, sizeof(filter), 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == cookie);
-+
-+	/* broaden the filter and try again. this should also succeed. */
-+	filter[0] = 0xff;
-+	filter[8] = 0xff;
-+	filter[63] = 0xff;
-+	ret = send_bloom_filter(conn, ++cookie, filter, sizeof(filter), 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == cookie);
-+
-+	/* the same filter must not match against bloom generation 1 */
-+	ret = send_bloom_filter(conn, ++cookie, filter, sizeof(filter), 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	/* set a different filter and try again */
-+	filter[1] = 0xaa;
-+	filter[9] = 0x02;
-+	ret = send_bloom_filter(conn, ++cookie, filter, sizeof(filter), 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(env->conn, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == cookie);
-+
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-message.c b/tools/testing/selftests/kdbus/test-message.c
-new file mode 100644
-index 0000000..563dc85
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-message.c
-@@ -0,0 +1,734 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <time.h>
-+#include <stdbool.h>
-+#include <sys/eventfd.h>
-+#include <sys/types.h>
-+#include <sys/wait.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+#include "kdbus-test.h"
-+
-+/* maximum number of queued messages from the same individual user */
-+#define KDBUS_CONN_MAX_MSGS			256
-+
-+/* maximum number of queued requests waiting for a reply */
-+#define KDBUS_CONN_MAX_REQUESTS_PENDING		128
-+
-+/* maximum message payload size */
-+#define KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE		(2 * 1024UL * 1024UL)
-+
-+int kdbus_test_message_basic(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn;
-+	struct kdbus_conn *sender;
-+	struct kdbus_msg *msg;
-+	uint64_t cookie = 0x1234abcd5678eeff;
-+	uint64_t offset;
-+	int ret;
-+
-+	sender = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(sender != NULL);
-+
-+	/* create a 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+
-+	ret = kdbus_add_match_empty(conn);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_add_match_empty(sender);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* send over 1st connection */
-+	ret = kdbus_msg_send(sender, NULL, cookie, 0, 0, 0,
-+			     KDBUS_DST_ID_BROADCAST);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Make sure that we do get our own broadcasts */
-+	ret = kdbus_msg_recv(sender, &msg, &offset);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == cookie);
-+
-+	kdbus_msg_free(msg);
-+
-+	/* ... and receive on the 2nd */
-+	ret = kdbus_msg_recv_poll(conn, 100, &msg, &offset);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == cookie);
-+
-+	kdbus_msg_free(msg);
-+
-+	/* Msgs that expect a reply must have timeout and cookie */
-+	ret = kdbus_msg_send(sender, NULL, 0, KDBUS_MSG_EXPECT_REPLY,
-+			     0, 0, conn->id);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	/* Faked replies with a valid reply cookie are rejected */
-+	ret = kdbus_msg_send_reply(conn, time(NULL) ^ cookie, sender->id);
-+	ASSERT_RETURN(ret == -EBADSLT);
-+
-+	ret = kdbus_free(conn, offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_conn_free(sender);
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-+
-+static int msg_recv_prio(struct kdbus_conn *conn,
-+			 int64_t requested_prio,
-+			 int64_t expected_prio)
-+{
-+	struct kdbus_cmd_recv recv = {
-+		.size = sizeof(recv),
-+		.flags = KDBUS_RECV_USE_PRIORITY,
-+		.priority = requested_prio,
-+	};
-+	struct kdbus_msg *msg;
-+	int ret;
-+
-+	ret = kdbus_cmd_recv(conn->fd, &recv);
-+	if (ret < 0) {
-+		kdbus_printf("error receiving message: %d (%m)\n", -errno);
-+		return ret;
-+	}
-+
-+	msg = (struct kdbus_msg *)(conn->buf + recv.msg.offset);
-+	kdbus_msg_dump(conn, msg);
-+
-+	if (msg->priority != expected_prio) {
-+		kdbus_printf("expected message prio %lld, got %lld\n",
-+			     (unsigned long long) expected_prio,
-+			     (unsigned long long) msg->priority);
-+		return -EINVAL;
-+	}
-+
-+	kdbus_msg_free(msg);
-+	ret = kdbus_free(conn, recv.msg.offset);
-+	if (ret < 0)
-+		return ret;
-+
-+	return 0;
-+}
-+
-+int kdbus_test_message_prio(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *a, *b;
-+	uint64_t cookie = 0;
-+
-+	a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(a && b);
-+
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,   25, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0, -600, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,   10, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,  -35, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0, -100, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,   20, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,  -15, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0, -800, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0, -150, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,   10, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0, -800, a->id) == 0);
-+	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,  -10, a->id) == 0);
-+
-+	ASSERT_RETURN(msg_recv_prio(a, -200, -800) == 0);
-+	ASSERT_RETURN(msg_recv_prio(a, -100, -800) == 0);
-+	ASSERT_RETURN(msg_recv_prio(a, -400, -600) == 0);
-+	ASSERT_RETURN(msg_recv_prio(a, -400, -600) == -EAGAIN);
-+	ASSERT_RETURN(msg_recv_prio(a, 10, -150) == 0);
-+	ASSERT_RETURN(msg_recv_prio(a, 10, -100) == 0);
-+
-+	kdbus_printf("--- get priority (all)\n");
-+	ASSERT_RETURN(kdbus_msg_recv(a, NULL, NULL) == 0);
-+
-+	kdbus_conn_free(a);
-+	kdbus_conn_free(b);
-+
-+	return TEST_OK;
-+}
-+
-+static int kdbus_test_notify_kernel_quota(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	unsigned int i;
-+	struct kdbus_conn *conn;
-+	struct kdbus_conn *reader;
-+	struct kdbus_msg *msg = NULL;
-+	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
-+
-+	reader = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(reader);
-+
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	/* Register for ID signals */
-+	ret = kdbus_add_match_id(reader, 0x1, KDBUS_ITEM_ID_ADD,
-+				 KDBUS_MATCH_ID_ANY);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_add_match_id(reader, 0x2, KDBUS_ITEM_ID_REMOVE,
-+				 KDBUS_MATCH_ID_ANY);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Each iteration two notifications: add and remove ID */
-+	for (i = 0; i < KDBUS_CONN_MAX_MSGS / 2; i++) {
-+		struct kdbus_conn *notifier;
-+
-+		notifier = kdbus_hello(env->buspath, 0, NULL, 0);
-+		ASSERT_RETURN(notifier);
-+
-+		kdbus_conn_free(notifier);
-+	}
-+
-+	/*
-+	 * Now the reader queue is full with kernel notfications,
-+	 * but as a user we still have room to push our messages.
-+	 */
-+	ret = kdbus_msg_send(conn, NULL, 0xdeadbeef, 0, 0, 0, reader->id);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* More ID kernel notifications that will be lost */
-+	kdbus_conn_free(conn);
-+
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	kdbus_conn_free(conn);
-+
-+	/*
-+	 * We lost only 3 packets since only signal msgs are
-+	 * accounted. The connection ID add/remove notification
-+	 */
-+	ret = kdbus_cmd_recv(reader->fd, &recv);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(recv.return_flags & KDBUS_RECV_RETURN_DROPPED_MSGS);
-+	ASSERT_RETURN(recv.dropped_msgs == 3);
-+
-+	msg = (struct kdbus_msg *)(reader->buf + recv.msg.offset);
-+	kdbus_msg_free(msg);
-+
-+	/* Read our queue */
-+	for (i = 0; i < KDBUS_CONN_MAX_MSGS - 1; i++) {
-+		memset(&recv, 0, sizeof(recv));
-+		recv.size = sizeof(recv);
-+
-+		ret = kdbus_cmd_recv(reader->fd, &recv);
-+		ASSERT_RETURN(ret == 0);
-+		ASSERT_RETURN(!(recv.return_flags &
-+			        KDBUS_RECV_RETURN_DROPPED_MSGS));
-+
-+		msg = (struct kdbus_msg *)(reader->buf + recv.msg.offset);
-+		kdbus_msg_free(msg);
-+	}
-+
-+	ret = kdbus_msg_recv(reader, NULL, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(reader, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	kdbus_conn_free(reader);
-+
-+	return 0;
-+}
-+
-+/* Return the number of message successfully sent */
-+static int kdbus_fill_conn_queue(struct kdbus_conn *conn_src,
-+				 uint64_t dst_id,
-+				 unsigned int max_msgs)
-+{
-+	unsigned int i;
-+	uint64_t cookie = 0;
-+	size_t size;
-+	struct kdbus_cmd_send cmd = {};
-+	struct kdbus_msg *msg;
-+	int ret;
-+
-+	size = sizeof(struct kdbus_msg);
-+	msg = malloc(size);
-+	ASSERT_RETURN_VAL(msg, -ENOMEM);
-+
-+	memset(msg, 0, size);
-+	msg->size = size;
-+	msg->src_id = conn_src->id;
-+	msg->dst_id = dst_id;
-+	msg->payload_type = KDBUS_PAYLOAD_DBUS;
-+
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg;
-+
-+	for (i = 0; i < max_msgs; i++) {
-+		msg->cookie = cookie++;
-+		ret = kdbus_cmd_send(conn_src->fd, &cmd);
-+		if (ret < 0)
-+			break;
-+	}
-+
-+	free(msg);
-+
-+	return i;
-+}
-+
-+static int kdbus_test_activator_quota(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	unsigned int i;
-+	unsigned int activator_msgs_count = 0;
-+	uint64_t cookie = time(NULL);
-+	struct kdbus_conn *conn;
-+	struct kdbus_conn *sender;
-+	struct kdbus_conn *activator;
-+	struct kdbus_msg *msg;
-+	uint64_t flags = KDBUS_NAME_REPLACE_EXISTING;
-+	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
-+	struct kdbus_policy_access access = {
-+		.type = KDBUS_POLICY_ACCESS_USER,
-+		.id = geteuid(),
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	activator = kdbus_hello_activator(env->buspath, "foo.test.activator",
-+					  &access, 1);
-+	ASSERT_RETURN(activator);
-+
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	sender = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn || sender);
-+
-+	ret = kdbus_list(sender, KDBUS_LIST_NAMES |
-+				 KDBUS_LIST_UNIQUE |
-+				 KDBUS_LIST_ACTIVATORS |
-+				 KDBUS_LIST_QUEUED);
-+	ASSERT_RETURN(ret == 0);
-+
-+	for (i = 0; i < KDBUS_CONN_MAX_MSGS; i++) {
-+		ret = kdbus_msg_send(sender, "foo.test.activator",
-+				     cookie++, 0, 0, 0,
-+				     KDBUS_DST_ID_NAME);
-+		if (ret < 0)
-+			break;
-+		activator_msgs_count++;
-+	}
-+
-+	/* we must have at least sent one message */
-+	ASSERT_RETURN_VAL(i > 0, -errno);
-+	ASSERT_RETURN(ret == -ENOBUFS);
-+
-+	/* Good, activator queue is full now */
-+
-+	/* ENXIO on direct send (activators can never be addressed by ID) */
-+	ret = kdbus_msg_send(conn, NULL, cookie++, 0, 0, 0, activator->id);
-+	ASSERT_RETURN(ret == -ENXIO);
-+
-+	/* can't queue more */
-+	ret = kdbus_msg_send(conn, "foo.test.activator", cookie++,
-+			     0, 0, 0, KDBUS_DST_ID_NAME);
-+	ASSERT_RETURN(ret == -ENOBUFS);
-+
-+	/* no match installed, so the broadcast will not inc dropped_msgs */
-+	ret = kdbus_msg_send(sender, NULL, cookie++, 0, 0, 0,
-+			     KDBUS_DST_ID_BROADCAST);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Check activator queue */
-+	ret = kdbus_cmd_recv(activator->fd, &recv);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(recv.dropped_msgs == 0);
-+
-+	activator_msgs_count--;
-+
-+	msg = (struct kdbus_msg *)(activator->buf + recv.msg.offset);
-+	kdbus_msg_free(msg);
-+
-+
-+	/* Stage 1) of test check the pool memory quota */
-+
-+	/* Consume the connection pool memory */
-+	for (i = 0; i < KDBUS_CONN_MAX_MSGS; i++) {
-+		ret = kdbus_msg_send(sender, NULL,
-+				     cookie++, 0, 0, 0, conn->id);
-+		if (ret < 0)
-+			break;
-+	}
-+
-+	/* consume one message, so later at least one can be moved */
-+	memset(&recv, 0, sizeof(recv));
-+	recv.size = sizeof(recv);
-+	ret = kdbus_cmd_recv(conn->fd, &recv);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(recv.dropped_msgs == 0);
-+	msg = (struct kdbus_msg *)(conn->buf + recv.msg.offset);
-+	kdbus_msg_free(msg);
-+
-+	/* Try to acquire the name now */
-+	ret = kdbus_name_acquire(conn, "foo.test.activator", &flags);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* try to read messages and see if we have lost some */
-+	memset(&recv, 0, sizeof(recv));
-+	recv.size = sizeof(recv);
-+	ret = kdbus_cmd_recv(conn->fd, &recv);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(recv.dropped_msgs != 0);
-+
-+	/* number of dropped msgs < received ones (at least one was moved) */
-+	ASSERT_RETURN(recv.dropped_msgs < activator_msgs_count);
-+
-+	/* Deduct the number of dropped msgs from the activator msgs */
-+	activator_msgs_count -= recv.dropped_msgs;
-+
-+	msg = (struct kdbus_msg *)(activator->buf + recv.msg.offset);
-+	kdbus_msg_free(msg);
-+
-+	/*
-+	 * Release the name and hand it back to activator, now
-+	 * we should have 'activator_msgs_count' msgs again in
-+	 * the activator queue
-+	 */
-+	ret = kdbus_name_release(conn, "foo.test.activator");
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* make sure that we got our previous activator msgs */
-+	ret = kdbus_msg_recv(activator, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->src_id == sender->id);
-+
-+	activator_msgs_count--;
-+
-+	kdbus_msg_free(msg);
-+
-+
-+	/* Stage 2) of test check max message quota */
-+
-+	/* Empty conn queue */
-+	for (i = 0; i < KDBUS_CONN_MAX_MSGS; i++) {
-+		ret = kdbus_msg_recv(conn, NULL, NULL);
-+		if (ret == -EAGAIN)
-+			break;
-+	}
-+
-+	/* fill queue with max msgs quota */
-+	ret = kdbus_fill_conn_queue(sender, conn->id, KDBUS_CONN_MAX_MSGS);
-+	ASSERT_RETURN(ret == KDBUS_CONN_MAX_MSGS);
-+
-+	/* This one is lost but it is not accounted */
-+	ret = kdbus_msg_send(sender, NULL,
-+			     cookie++, 0, 0, 0, conn->id);
-+	ASSERT_RETURN(ret == -ENOBUFS);
-+
-+	/* Acquire the name again */
-+	ret = kdbus_name_acquire(conn, "foo.test.activator", &flags);
-+	ASSERT_RETURN(ret == 0);
-+
-+	memset(&recv, 0, sizeof(recv));
-+	recv.size = sizeof(recv);
-+
-+	/*
-+	 * Try to read messages and make sure that we have lost all
-+	 * the activator messages due to quota checks. Our queue is
-+	 * already full.
-+	 */
-+	ret = kdbus_cmd_recv(conn->fd, &recv);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(recv.dropped_msgs == activator_msgs_count);
-+
-+	msg = (struct kdbus_msg *)(activator->buf + recv.msg.offset);
-+	kdbus_msg_free(msg);
-+
-+	kdbus_conn_free(sender);
-+	kdbus_conn_free(conn);
-+	kdbus_conn_free(activator);
-+
-+	return 0;
-+}
-+
-+static int kdbus_test_expected_reply_quota(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	unsigned int i, n;
-+	unsigned int count;
-+	uint64_t cookie = 0x1234abcd5678eeff;
-+	struct kdbus_conn *conn;
-+	struct kdbus_conn *connections[9];
-+
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	for (i = 0; i < 9; i++) {
-+		connections[i] = kdbus_hello(env->buspath, 0, NULL, 0);
-+		ASSERT_RETURN(connections[i]);
-+	}
-+
-+	count = 0;
-+	/* Send 16 messages to 8 different connections */
-+	for (i = 0; i < 8; i++) {
-+		for (n = 0; n < 16; n++) {
-+			ret = kdbus_msg_send(conn, NULL, cookie++,
-+					     KDBUS_MSG_EXPECT_REPLY,
-+					     100000000ULL, 0,
-+					     connections[i]->id);
-+			if (ret < 0)
-+				break;
-+
-+			count++;
-+		}
-+	}
-+
-+	/*
-+	 * We should have queued at least
-+	 * KDBUS_CONN_MAX_REQUESTS_PENDING method call
-+	 */
-+	ASSERT_RETURN(count == KDBUS_CONN_MAX_REQUESTS_PENDING);
-+
-+	/*
-+	 * Now try to send a message to the last connection,
-+	 * if we have reached KDBUS_CONN_MAX_REQUESTS_PENDING
-+	 * no further requests are allowed
-+	 */
-+	ret = kdbus_msg_send(conn, NULL, cookie++, KDBUS_MSG_EXPECT_REPLY,
-+			     1000000000ULL, 0, connections[8]->id);
-+	ASSERT_RETURN(ret == -EMLINK);
-+
-+	for (i = 0; i < 9; i++)
-+		kdbus_conn_free(connections[i]);
-+
-+	kdbus_conn_free(conn);
-+
-+	return 0;
-+}
-+
-+int kdbus_test_pool_quota(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *a, *b, *c;
-+	struct kdbus_cmd_send cmd = {};
-+	struct kdbus_item *item;
-+	struct kdbus_msg *recv_msg;
-+	struct kdbus_msg *msg;
-+	uint64_t cookie = time(NULL);
-+	uint64_t size;
-+	unsigned int i;
-+	char *payload;
-+	int ret;
-+
-+	/* just a guard */
-+	if (POOL_SIZE <= KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE ||
-+	    POOL_SIZE % KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE != 0)
-+		return 0;
-+
-+	payload = calloc(KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE, sizeof(char));
-+	ASSERT_RETURN_VAL(payload, -ENOMEM);
-+
-+	a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	c = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(a && b && c);
-+
-+	size = sizeof(struct kdbus_msg);
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+
-+	msg = malloc(size);
-+	ASSERT_RETURN_VAL(msg, -ENOMEM);
-+
-+	memset(msg, 0, size);
-+	msg->size = size;
-+	msg->src_id = a->id;
-+	msg->dst_id = c->id;
-+	msg->payload_type = KDBUS_PAYLOAD_DBUS;
-+
-+	item = msg->items;
-+	item->type = KDBUS_ITEM_PAYLOAD_VEC;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
-+	item->vec.address = (uintptr_t)payload;
-+	item->vec.size = KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE;
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg;
-+
-+	/*
-+	 * Send 2097248 bytes, a user is only allowed to get 33% of half of
-+	 * the free space of the pool, the already used space is
-+	 * accounted as free space
-+	 */
-+	size += KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE;
-+	for (i = size; i < (POOL_SIZE / 2 / 3); i += size) {
-+		msg->cookie = cookie++;
-+
-+		ret = kdbus_cmd_send(a->fd, &cmd);
-+		ASSERT_RETURN_VAL(ret == 0, ret);
-+	}
-+
-+	/* Try to get more than 33% */
-+	msg->cookie = cookie++;
-+	ret = kdbus_cmd_send(a->fd, &cmd);
-+	ASSERT_RETURN(ret == -ENOBUFS);
-+
-+	/* We still can pass small messages */
-+	ret = kdbus_msg_send(b, NULL, cookie++, 0, 0, 0, c->id);
-+	ASSERT_RETURN(ret == 0);
-+
-+	for (i = size; i < (POOL_SIZE / 2 / 3); i += size) {
-+		ret = kdbus_msg_recv(c, &recv_msg, NULL);
-+		ASSERT_RETURN(ret == 0);
-+		ASSERT_RETURN(recv_msg->src_id == a->id);
-+
-+		kdbus_msg_free(recv_msg);
-+	}
-+
-+	ret = kdbus_msg_recv(c, &recv_msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(recv_msg->src_id == b->id);
-+
-+	kdbus_msg_free(recv_msg);
-+
-+	ret = kdbus_msg_recv(c, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	free(msg);
-+	free(payload);
-+
-+	kdbus_conn_free(c);
-+	kdbus_conn_free(b);
-+	kdbus_conn_free(a);
-+
-+	return 0;
-+}
-+
-+int kdbus_test_message_quota(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *a, *b;
-+	uint64_t cookie = 0;
-+	int ret;
-+	int i;
-+
-+	ret = kdbus_test_activator_quota(env);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_test_notify_kernel_quota(env);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_test_pool_quota(env);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_test_expected_reply_quota(env);
-+	ASSERT_RETURN(ret == 0);
-+
-+	a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	b = kdbus_hello(env->buspath, 0, NULL, 0);
-+
-+	ret = kdbus_fill_conn_queue(b, a->id, KDBUS_CONN_MAX_MSGS);
-+	ASSERT_RETURN(ret == KDBUS_CONN_MAX_MSGS);
-+
-+	ret = kdbus_msg_send(b, NULL, ++cookie, 0, 0, 0, a->id);
-+	ASSERT_RETURN(ret == -ENOBUFS);
-+
-+	for (i = 0; i < KDBUS_CONN_MAX_MSGS; ++i) {
-+		ret = kdbus_msg_recv(a, NULL, NULL);
-+		ASSERT_RETURN(ret == 0);
-+	}
-+
-+	ret = kdbus_msg_recv(a, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	ret = kdbus_fill_conn_queue(b, a->id, KDBUS_CONN_MAX_MSGS + 1);
-+	ASSERT_RETURN(ret == KDBUS_CONN_MAX_MSGS);
-+
-+	ret = kdbus_msg_send(b, NULL, ++cookie, 0, 0, 0, a->id);
-+	ASSERT_RETURN(ret == -ENOBUFS);
-+
-+	kdbus_conn_free(a);
-+	kdbus_conn_free(b);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_memory_access(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *a, *b;
-+	struct kdbus_cmd_send cmd = {};
-+	struct kdbus_item *item;
-+	struct kdbus_msg *msg;
-+	uint64_t test_addr = 0;
-+	char line[256];
-+	uint64_t size;
-+	FILE *f;
-+	int ret;
-+
-+	/*
-+	 * Search in /proc/kallsyms for the address of a kernel symbol that
-+	 * should always be there, regardless of the config. Use that address
-+	 * in a PAYLOAD_VEC item and make sure it's inaccessible.
-+	 */
-+
-+	f = fopen("/proc/kallsyms", "r");
-+	if (!f)
-+		return TEST_SKIP;
-+
-+	while (fgets(line, sizeof(line), f)) {
-+		char *s = line;
-+
-+		if (!strsep(&s, " "))
-+			continue;
-+
-+		if (!strsep(&s, " "))
-+			continue;
-+
-+		if (!strncmp(s, "mutex_lock", 10)) {
-+			test_addr = strtoull(line, NULL, 16);
-+			break;
-+		}
-+	}
-+
-+	fclose(f);
-+
-+	if (!test_addr)
-+		return TEST_SKIP;
-+
-+	a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(a && b);
-+
-+	size = sizeof(struct kdbus_msg);
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+
-+	msg = alloca(size);
-+	ASSERT_RETURN_VAL(msg, -ENOMEM);
-+
-+	memset(msg, 0, size);
-+	msg->size = size;
-+	msg->src_id = a->id;
-+	msg->dst_id = b->id;
-+	msg->payload_type = KDBUS_PAYLOAD_DBUS;
-+
-+	item = msg->items;
-+	item->type = KDBUS_ITEM_PAYLOAD_VEC;
-+	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
-+	item->vec.address = test_addr;
-+	item->vec.size = sizeof(void*);
-+	item = KDBUS_ITEM_NEXT(item);
-+
-+	cmd.size = sizeof(cmd);
-+	cmd.msg_address = (uintptr_t)msg;
-+
-+	ret = kdbus_cmd_send(a->fd, &cmd);
-+	ASSERT_RETURN(ret == -EFAULT);
-+
-+	kdbus_conn_free(b);
-+	kdbus_conn_free(a);
-+
-+	return 0;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-metadata-ns.c b/tools/testing/selftests/kdbus/test-metadata-ns.c
-new file mode 100644
-index 0000000..1f6edc0
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-metadata-ns.c
-@@ -0,0 +1,500 @@
-+/*
-+ * Test metadata in new namespaces. Even if our tests can run
-+ * in a namespaced setup, this test is necessary so we can inspect
-+ * metadata on the same kdbusfs but between multiple namespaces
-+ */
-+
-+#include <stdio.h>
-+#include <string.h>
-+#include <sched.h>
-+#include <time.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <signal.h>
-+#include <sys/wait.h>
-+#include <sys/prctl.h>
-+#include <sys/eventfd.h>
-+#include <sys/syscall.h>
-+#include <sys/capability.h>
-+#include <linux/sched.h>
-+
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+static const struct kdbus_creds privileged_creds = {};
-+
-+static const struct kdbus_creds unmapped_creds = {
-+	.uid	= UNPRIV_UID,
-+	.euid	= UNPRIV_UID,
-+	.suid	= UNPRIV_UID,
-+	.fsuid	= UNPRIV_UID,
-+	.gid	= UNPRIV_GID,
-+	.egid	= UNPRIV_GID,
-+	.sgid	= UNPRIV_GID,
-+	.fsgid	= UNPRIV_GID,
-+};
-+
-+static const struct kdbus_pids unmapped_pids = {};
-+
-+/* Get only the first item */
-+static struct kdbus_item *kdbus_get_item(struct kdbus_msg *msg,
-+					 uint64_t type)
-+{
-+	struct kdbus_item *item;
-+
-+	KDBUS_ITEM_FOREACH(item, msg, items)
-+		if (item->type == type)
-+			return item;
-+
-+	return NULL;
-+}
-+
-+static int kdbus_match_kdbus_creds(struct kdbus_msg *msg,
-+				   const struct kdbus_creds *expected_creds)
-+{
-+	struct kdbus_item *item;
-+
-+	item = kdbus_get_item(msg, KDBUS_ITEM_CREDS);
-+	ASSERT_RETURN(item);
-+
-+	ASSERT_RETURN(memcmp(&item->creds, expected_creds,
-+			     sizeof(struct kdbus_creds)) == 0);
-+
-+	return 0;
-+}
-+
-+static int kdbus_match_kdbus_pids(struct kdbus_msg *msg,
-+				  const struct kdbus_pids *expected_pids)
-+{
-+	struct kdbus_item *item;
-+
-+	item = kdbus_get_item(msg, KDBUS_ITEM_PIDS);
-+	ASSERT_RETURN(item);
-+
-+	ASSERT_RETURN(memcmp(&item->pids, expected_pids,
-+			     sizeof(struct kdbus_pids)) == 0);
-+
-+	return 0;
-+}
-+
-+static int __kdbus_clone_userns_test(const char *bus,
-+				     struct kdbus_conn *conn,
-+				     uint64_t grandpa_pid,
-+				     int signal_fd)
-+{
-+	int clone_ret;
-+	int ret;
-+	struct kdbus_msg *msg = NULL;
-+	const struct kdbus_item *item;
-+	uint64_t cookie = time(NULL) ^ 0xdeadbeef;
-+	struct kdbus_conn *unpriv_conn = NULL;
-+	struct kdbus_pids parent_pids = {
-+		.pid = getppid(),
-+		.tid = getppid(),
-+		.ppid = grandpa_pid,
-+	};
-+
-+	ret = drop_privileges(UNPRIV_UID, UNPRIV_GID);
-+	ASSERT_EXIT(ret == 0);
-+
-+	unpriv_conn = kdbus_hello(bus, 0, NULL, 0);
-+	ASSERT_EXIT(unpriv_conn);
-+
-+	ret = kdbus_add_match_empty(unpriv_conn);
-+	ASSERT_EXIT(ret == 0);
-+
-+	/*
-+	 * ping privileged connection from this new unprivileged
-+	 * one
-+	 */
-+
-+	ret = kdbus_msg_send(unpriv_conn, NULL, cookie, 0, 0,
-+			     0, conn->id);
-+	ASSERT_EXIT(ret == 0);
-+
-+	/*
-+	 * Since we just dropped privileges, the dumpable flag
-+	 * was just cleared which makes the /proc/$clone_child/uid_map
-+	 * to be owned by root, hence any userns uid mapping will fail
-+	 * with -EPERM since the mapping will be done by uid 65534.
-+	 *
-+	 * To avoid this set the dumpable flag again which makes
-+	 * procfs update the /proc/$clone_child/ inodes owner to 65534.
-+	 *
-+	 * Using this we will be able write to /proc/$clone_child/uid_map
-+	 * as uid 65534 and map the uid 65534 to 0 inside the user namespace.
-+	 */
-+	ret = prctl(PR_SET_DUMPABLE, SUID_DUMP_USER);
-+	ASSERT_EXIT(ret == 0);
-+
-+	/* Make child privileged in its new userns and run tests */
-+
-+	ret = RUN_CLONE_CHILD(&clone_ret,
-+			      SIGCHLD | CLONE_NEWUSER | CLONE_NEWPID,
-+	({ 0;  /* Clone setup, nothing */ }),
-+	({
-+		eventfd_t event_status = 0;
-+		struct kdbus_conn *userns_conn;
-+
-+		/* ping connection from the new user namespace */
-+		userns_conn = kdbus_hello(bus, 0, NULL, 0);
-+		ASSERT_EXIT(userns_conn);
-+
-+		ret = kdbus_add_match_empty(userns_conn);
-+		ASSERT_EXIT(ret == 0);
-+
-+		cookie++;
-+		ret = kdbus_msg_send(userns_conn, NULL, cookie,
-+				     0, 0, 0, conn->id);
-+		ASSERT_EXIT(ret == 0);
-+
-+		/* Parent did send */
-+		ret = eventfd_read(signal_fd, &event_status);
-+		ASSERT_RETURN(ret >= 0 && event_status == 1);
-+
-+		/*
-+		 * Receive from privileged connection
-+		 */
-+		kdbus_printf("Privileged → unprivileged/privileged "
-+			     "in its userns "
-+			     "(different userns and pidns):\n");
-+		ret = kdbus_msg_recv_poll(userns_conn, 300, &msg, NULL);
-+		ASSERT_EXIT(ret == 0);
-+		ASSERT_EXIT(msg->dst_id == userns_conn->id);
-+
-+		item = kdbus_get_item(msg, KDBUS_ITEM_CAPS);
-+		ASSERT_EXIT(item);
-+
-+		/* uid/gid not mapped, so we have unpriv cached creds */
-+		ret = kdbus_match_kdbus_creds(msg, &unmapped_creds);
-+		ASSERT_EXIT(ret == 0);
-+
-+		/*
-+		 * Diffent pid namepsaces. This is the child pidns
-+		 * so it should not see its parent kdbus_pids
-+		 */
-+		ret = kdbus_match_kdbus_pids(msg, &unmapped_pids);
-+		ASSERT_EXIT(ret == 0);
-+
-+		kdbus_msg_free(msg);
-+
-+
-+		/*
-+		 * Receive broadcast from privileged connection
-+		 */
-+		kdbus_printf("Privileged → unprivileged/privileged "
-+			     "in its userns "
-+			     "(different userns and pidns):\n");
-+		ret = kdbus_msg_recv_poll(userns_conn, 300, &msg, NULL);
-+		ASSERT_EXIT(ret == 0);
-+		ASSERT_EXIT(msg->dst_id == KDBUS_DST_ID_BROADCAST);
-+
-+		item = kdbus_get_item(msg, KDBUS_ITEM_CAPS);
-+		ASSERT_EXIT(item);
-+
-+		/* uid/gid not mapped, so we have unpriv cached creds */
-+		ret = kdbus_match_kdbus_creds(msg, &unmapped_creds);
-+		ASSERT_EXIT(ret == 0);
-+
-+		/*
-+		 * Diffent pid namepsaces. This is the child pidns
-+		 * so it should not see its parent kdbus_pids
-+		 */
-+		ret = kdbus_match_kdbus_pids(msg, &unmapped_pids);
-+		ASSERT_EXIT(ret == 0);
-+
-+		kdbus_msg_free(msg);
-+
-+		kdbus_conn_free(userns_conn);
-+	}),
-+	({
-+		/* Parent setup map child uid/gid */
-+		ret = userns_map_uid_gid(pid, "0 65534 1", "0 65534 1");
-+		ASSERT_EXIT(ret == 0);
-+	}),
-+	({ 0; }));
-+	/* Unprivileged was not able to create user namespace */
-+	if (clone_ret == -EPERM) {
-+		kdbus_printf("-- CLONE_NEWUSER TEST Failed for "
-+			     "uid: %u\n -- Make sure that your kernel "
-+			     "do not allow CLONE_NEWUSER for "
-+			     "unprivileged users\n", UNPRIV_UID);
-+		ret = 0;
-+		goto out;
-+	}
-+
-+	ASSERT_EXIT(ret == 0);
-+
-+
-+	/*
-+	 * Receive from privileged connection
-+	 */
-+	kdbus_printf("\nPrivileged → unprivileged (same namespaces):\n");
-+	ret = kdbus_msg_recv_poll(unpriv_conn, 300, &msg, NULL);
-+
-+	ASSERT_EXIT(ret == 0);
-+	ASSERT_EXIT(msg->dst_id == unpriv_conn->id);
-+
-+	/* will get the privileged creds */
-+	ret = kdbus_match_kdbus_creds(msg, &privileged_creds);
-+	ASSERT_EXIT(ret == 0);
-+
-+	/* Same pidns so will get the kdbus_pids */
-+	ret = kdbus_match_kdbus_pids(msg, &parent_pids);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_msg_free(msg);
-+
-+
-+	/*
-+	 * Receive broadcast from privileged connection
-+	 */
-+	kdbus_printf("\nPrivileged → unprivileged (same namespaces):\n");
-+	ret = kdbus_msg_recv_poll(unpriv_conn, 300, &msg, NULL);
-+
-+	ASSERT_EXIT(ret == 0);
-+	ASSERT_EXIT(msg->dst_id == KDBUS_DST_ID_BROADCAST);
-+
-+	/* will get the privileged creds */
-+	ret = kdbus_match_kdbus_creds(msg, &privileged_creds);
-+	ASSERT_EXIT(ret == 0);
-+
-+	ret = kdbus_match_kdbus_pids(msg, &parent_pids);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_msg_free(msg);
-+
-+out:
-+	kdbus_conn_free(unpriv_conn);
-+
-+	return ret;
-+}
-+
-+static int kdbus_clone_userns_test(const char *bus,
-+				   struct kdbus_conn *conn)
-+{
-+	int ret, status, efd;
-+	pid_t pid, ppid;
-+	uint64_t unpriv_conn_id, userns_conn_id;
-+	struct kdbus_msg *msg;
-+	const struct kdbus_item *item;
-+	struct kdbus_pids expected_pids;
-+	struct kdbus_conn *monitor;
-+
-+	kdbus_printf("STARTING TEST 'metadata-ns'.\n");
-+
-+	monitor = kdbus_hello(bus, KDBUS_HELLO_MONITOR, NULL, 0);
-+	ASSERT_EXIT(monitor);
-+
-+	/*
-+	 * parent will signal to child that is in its
-+	 * userns to read its queue
-+	 */
-+	efd = eventfd(0, EFD_CLOEXEC);
-+	ASSERT_RETURN_VAL(efd >= 0, efd);
-+
-+	ppid = getppid();
-+
-+	pid = fork();
-+	ASSERT_RETURN_VAL(pid >= 0, -errno);
-+
-+	if (pid == 0) {
-+		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
-+		ASSERT_EXIT_VAL(ret == 0, -errno);
-+
-+		ret = __kdbus_clone_userns_test(bus, conn, ppid, efd);
-+		_exit(ret);
-+	}
-+
-+
-+	/* Phase 1) privileged receives from unprivileged */
-+
-+	/*
-+	 * Receive from the unprivileged child
-+	 */
-+	kdbus_printf("\nUnprivileged → privileged (same namespaces):\n");
-+	ret = kdbus_msg_recv_poll(conn, 300, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	unpriv_conn_id = msg->src_id;
-+
-+	/* Unprivileged user */
-+	ret = kdbus_match_kdbus_creds(msg, &unmapped_creds);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Set the expected creds_pids */
-+	expected_pids = (struct kdbus_pids) {
-+		.pid = pid,
-+		.tid = pid,
-+		.ppid = getpid(),
-+	};
-+	ret = kdbus_match_kdbus_pids(msg, &expected_pids);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_msg_free(msg);
-+
-+
-+	/*
-+	 * Receive from the unprivileged that is in his own
-+	 * userns and pidns
-+	 */
-+
-+	kdbus_printf("\nUnprivileged/privileged in its userns → privileged "
-+		     "(different userns and pidns)\n");
-+	ret = kdbus_msg_recv_poll(conn, 300, &msg, NULL);
-+	if (ret == -ETIMEDOUT)
-+		/* perhaps unprivileged userns is not allowed */
-+		goto wait;
-+
-+	ASSERT_RETURN(ret == 0);
-+
-+	userns_conn_id = msg->src_id;
-+
-+	item = kdbus_get_item(msg, KDBUS_ITEM_CAPS);
-+	ASSERT_RETURN(item);
-+
-+	/*
-+	 * Compare received items, creds must be translated into
-+	 * the receiver user namespace, so the user is unprivileged
-+	 */
-+	ret = kdbus_match_kdbus_creds(msg, &unmapped_creds);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * We should have the kdbus_pids since we are the parent
-+	 * pidns
-+	 */
-+	item = kdbus_get_item(msg, KDBUS_ITEM_PIDS);
-+	ASSERT_RETURN(item);
-+
-+	ASSERT_RETURN(memcmp(&item->pids, &unmapped_pids,
-+			     sizeof(struct kdbus_pids)) != 0);
-+
-+	/*
-+	 * Parent pid of the unprivileged/privileged in its userns
-+	 * is the unprivileged child pid that was forked here.
-+	 */
-+	ASSERT_RETURN((uint64_t)pid == item->pids.ppid);
-+
-+	kdbus_msg_free(msg);
-+
-+
-+	/* Phase 2) Privileged connection sends now 3 packets */
-+
-+	/*
-+	 * Sending to unprivileged connections a unicast
-+	 */
-+	ret = kdbus_msg_send(conn, NULL, 0xdeadbeef, 0, 0,
-+			     0, unpriv_conn_id);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* signal to child that is in its userns */
-+	ret = eventfd_write(efd, 1);
-+	ASSERT_EXIT(ret == 0);
-+
-+	/*
-+	 * Sending to unprivileged/privilged in its userns
-+	 * connections a unicast
-+	 */
-+	ret = kdbus_msg_send(conn, NULL, 0xdeadbeef, 0, 0,
-+			     0, userns_conn_id);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Sending to unprivileged connections a broadcast
-+	 */
-+	ret = kdbus_msg_send(conn, NULL, 0xdeadbeef, 0, 0,
-+			     0, KDBUS_DST_ID_BROADCAST);
-+	ASSERT_RETURN(ret == 0);
-+
-+
-+wait:
-+	ret = waitpid(pid, &status, 0);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ASSERT_RETURN(WIFEXITED(status))
-+	ASSERT_RETURN(!WEXITSTATUS(status));
-+
-+	/* Dump monitor queue */
-+	kdbus_printf("\n\nMonitor queue:\n");
-+	for (;;) {
-+		ret = kdbus_msg_recv_poll(monitor, 100, &msg, NULL);
-+		if (ret < 0)
-+			break;
-+
-+		if (msg->payload_type == KDBUS_PAYLOAD_DBUS) {
-+			/*
-+			 * Parent pidns should see all the
-+			 * pids
-+			 */
-+			item = kdbus_get_item(msg, KDBUS_ITEM_PIDS);
-+			ASSERT_RETURN(item);
-+
-+			ASSERT_RETURN(item->pids.pid != 0 &&
-+				      item->pids.tid != 0 &&
-+				      item->pids.ppid != 0);
-+		}
-+
-+		kdbus_msg_free(msg);
-+	}
-+
-+	kdbus_conn_free(monitor);
-+	close(efd);
-+
-+	return 0;
-+}
-+
-+int kdbus_test_metadata_ns(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	struct kdbus_conn *holder, *conn;
-+	struct kdbus_policy_access policy_access = {
-+		/* Allow world so we can inspect metadata in namespace */
-+		.type = KDBUS_POLICY_ACCESS_WORLD,
-+		.id = geteuid(),
-+		.access = KDBUS_POLICY_TALK,
-+	};
-+
-+	/*
-+	 * We require user-namespaces and all uids/gids
-+	 * should be mapped (we can just require the necessary ones)
-+	 */
-+	if (!config_user_ns_is_enabled() ||
-+	    !all_uids_gids_are_mapped())
-+		return TEST_SKIP;
-+
-+	ret = test_is_capable(CAP_SETUID, CAP_SETGID, CAP_SYS_ADMIN, -1);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/* no enough privileges, SKIP test */
-+	if (!ret)
-+		return TEST_SKIP;
-+
-+	holder = kdbus_hello_registrar(env->buspath, "com.example.metadata",
-+				       &policy_access, 1,
-+				       KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(holder);
-+
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	ret = kdbus_add_match_empty(conn);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_name_acquire(conn, "com.example.metadata", NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	ret = kdbus_clone_userns_test(env->buspath, conn);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_conn_free(holder);
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-monitor.c b/tools/testing/selftests/kdbus/test-monitor.c
-new file mode 100644
-index 0000000..e00d738
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-monitor.c
-@@ -0,0 +1,176 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <time.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <stdbool.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <signal.h>
-+#include <sys/time.h>
-+#include <sys/mman.h>
-+#include <sys/capability.h>
-+#include <sys/wait.h>
-+
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+#include "kdbus-test.h"
-+
-+int kdbus_test_monitor(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *monitor, *conn;
-+	unsigned int cookie = 0xdeadbeef;
-+	struct kdbus_msg *msg;
-+	uint64_t offset = 0;
-+	int ret;
-+
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	/* add matches to make sure the monitor do not trigger an item add or
-+	 * remove on connect and disconnect, respectively.
-+	 */
-+	ret = kdbus_add_match_id(conn, 0x1, KDBUS_ITEM_ID_ADD,
-+				 KDBUS_MATCH_ID_ANY);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_add_match_id(conn, 0x2, KDBUS_ITEM_ID_REMOVE,
-+				 KDBUS_MATCH_ID_ANY);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* register a monitor */
-+	monitor = kdbus_hello(env->buspath, KDBUS_HELLO_MONITOR, NULL, 0);
-+	ASSERT_RETURN(monitor);
-+
-+	/* make sure we did not receive a monitor connect notification */
-+	ret = kdbus_msg_recv(conn, &msg, &offset);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	/* check that a monitor cannot acquire a name */
-+	ret = kdbus_name_acquire(monitor, "foo.bar.baz", NULL);
-+	ASSERT_RETURN(ret == -EOPNOTSUPP);
-+
-+	ret = kdbus_msg_send(env->conn, NULL, cookie, 0, 0,  0, conn->id);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* the recipient should have gotten the message */
-+	ret = kdbus_msg_recv(conn, &msg, &offset);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == cookie);
-+	kdbus_msg_free(msg);
-+	kdbus_free(conn, offset);
-+
-+	/* and so should the monitor */
-+	ret = kdbus_msg_recv(monitor, &msg, &offset);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == cookie);
-+
-+	kdbus_msg_free(msg);
-+	kdbus_free(monitor, offset);
-+
-+	/* Installing matches for monitors must fais must fail */
-+	ret = kdbus_add_match_empty(monitor);
-+	ASSERT_RETURN(ret == -EOPNOTSUPP);
-+
-+	cookie++;
-+	ret = kdbus_msg_send(env->conn, NULL, cookie, 0, 0, 0,
-+			     KDBUS_DST_ID_BROADCAST);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* The monitor should get the message. */
-+	ret = kdbus_msg_recv_poll(monitor, 100, &msg, &offset);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == cookie);
-+
-+	kdbus_msg_free(msg);
-+	kdbus_free(monitor, offset);
-+
-+	/*
-+	 * Since we are the only monitor, update the attach flags
-+	 * and tell we are not interessted in attach flags recv
-+	 */
-+
-+	ret = kdbus_conn_update_attach_flags(monitor,
-+					     _KDBUS_ATTACH_ALL,
-+					     0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	cookie++;
-+	ret = kdbus_msg_send(env->conn, NULL, cookie, 0, 0, 0,
-+			     KDBUS_DST_ID_BROADCAST);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv_poll(monitor, 100, &msg, &offset);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == cookie);
-+
-+	ret = kdbus_item_in_message(msg, KDBUS_ITEM_TIMESTAMP);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_msg_free(msg);
-+	kdbus_free(monitor, offset);
-+
-+	/*
-+	 * Now we are interested in KDBUS_ITEM_TIMESTAMP and
-+	 * KDBUS_ITEM_CREDS
-+	 */
-+	ret = kdbus_conn_update_attach_flags(monitor,
-+					     _KDBUS_ATTACH_ALL,
-+					     KDBUS_ATTACH_TIMESTAMP |
-+					     KDBUS_ATTACH_CREDS);
-+	ASSERT_RETURN(ret == 0);
-+
-+	cookie++;
-+	ret = kdbus_msg_send(env->conn, NULL, cookie, 0, 0, 0,
-+			     KDBUS_DST_ID_BROADCAST);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv_poll(monitor, 100, &msg, &offset);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == cookie);
-+
-+	ret = kdbus_item_in_message(msg, KDBUS_ITEM_TIMESTAMP);
-+	ASSERT_RETURN(ret == 1);
-+
-+	ret = kdbus_item_in_message(msg, KDBUS_ITEM_CREDS);
-+	ASSERT_RETURN(ret == 1);
-+
-+	/* the KDBUS_ITEM_PID_COMM was not requested */
-+	ret = kdbus_item_in_message(msg, KDBUS_ITEM_PID_COMM);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_msg_free(msg);
-+	kdbus_free(monitor, offset);
-+
-+	kdbus_conn_free(monitor);
-+	/* make sure we did not receive a monitor disconnect notification */
-+	ret = kdbus_msg_recv(conn, &msg, &offset);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	kdbus_conn_free(conn);
-+
-+	/* Make sure that monitor as unprivileged is not allowed */
-+	ret = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	if (ret && all_uids_gids_are_mapped()) {
-+		ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_UID, ({
-+			monitor = kdbus_hello(env->buspath,
-+					      KDBUS_HELLO_MONITOR,
-+					      NULL, 0);
-+			ASSERT_EXIT(!monitor && errno == EPERM);
-+
-+			_exit(EXIT_SUCCESS);
-+		}),
-+		({ 0; }));
-+		ASSERT_RETURN(ret == 0);
-+	}
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-names.c b/tools/testing/selftests/kdbus/test-names.c
-new file mode 100644
-index 0000000..e400dc8
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-names.c
-@@ -0,0 +1,272 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <time.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <limits.h>
-+#include <getopt.h>
-+#include <stdbool.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+#include "kdbus-test.h"
-+
-+struct test_name {
-+	const char *name;
-+	__u64 owner_id;
-+	__u64 flags;
-+};
-+
-+static bool conn_test_names(const struct kdbus_conn *conn,
-+			    const struct test_name *tests,
-+			    unsigned int n_tests)
-+{
-+	struct kdbus_cmd_list cmd_list = {};
-+	struct kdbus_info *name, *list;
-+	unsigned int i;
-+	int ret;
-+
-+	cmd_list.size = sizeof(cmd_list);
-+	cmd_list.flags = KDBUS_LIST_NAMES |
-+			 KDBUS_LIST_ACTIVATORS |
-+			 KDBUS_LIST_QUEUED;
-+
-+	ret = kdbus_cmd_list(conn->fd, &cmd_list);
-+	ASSERT_RETURN(ret == 0);
-+
-+	list = (struct kdbus_info *)(conn->buf + cmd_list.offset);
-+
-+	for (i = 0; i < n_tests; i++) {
-+		const struct test_name *t = tests + i;
-+		bool found = false;
-+
-+		KDBUS_FOREACH(name, list, cmd_list.list_size) {
-+			struct kdbus_item *item;
-+
-+			KDBUS_ITEM_FOREACH(item, name, items) {
-+				if (item->type != KDBUS_ITEM_OWNED_NAME ||
-+				    strcmp(item->name.name, t->name) != 0)
-+					continue;
-+
-+				if (t->owner_id == name->id &&
-+				    t->flags == item->name.flags) {
-+					found = true;
-+					break;
-+				}
-+			}
-+		}
-+
-+		if (!found)
-+			return false;
-+	}
-+
-+	return true;
-+}
-+
-+static bool conn_is_name_primary_owner(const struct kdbus_conn *conn,
-+				       const char *needle)
-+{
-+	struct test_name t = {
-+		.name = needle,
-+		.owner_id = conn->id,
-+		.flags = KDBUS_NAME_PRIMARY,
-+	};
-+
-+	return conn_test_names(conn, &t, 1);
-+}
-+
-+int kdbus_test_name_basic(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn;
-+	char *name, *dot_name, *invalid_name, *wildcard_name;
-+	int ret;
-+
-+	name = "foo.bla.blaz";
-+	dot_name = ".bla.blaz";
-+	invalid_name = "foo";
-+	wildcard_name = "foo.bla.bl.*";
-+
-+	/* create a 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+
-+	/* acquire name "foo.bar.xxx" name */
-+	ret = kdbus_name_acquire(conn, "foo.bar.xxx", NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Name is not valid, must fail */
-+	ret = kdbus_name_acquire(env->conn, dot_name, NULL);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	ret = kdbus_name_acquire(env->conn, invalid_name, NULL);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	ret = kdbus_name_acquire(env->conn, wildcard_name, NULL);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	/* check that we can acquire a name */
-+	ret = kdbus_name_acquire(env->conn, name, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = conn_is_name_primary_owner(env->conn, name);
-+	ASSERT_RETURN(ret == true);
-+
-+	/* ... and release it again */
-+	ret = kdbus_name_release(env->conn, name);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = conn_is_name_primary_owner(env->conn, name);
-+	ASSERT_RETURN(ret == false);
-+
-+	/* check that we can't release it again */
-+	ret = kdbus_name_release(env->conn, name);
-+	ASSERT_RETURN(ret == -ESRCH);
-+
-+	/* check that we can't release a name that we don't own */
-+	ret = kdbus_name_release(env->conn, "foo.bar.xxx");
-+	ASSERT_RETURN(ret == -EADDRINUSE);
-+
-+	/* Name is not valid, must fail */
-+	ret = kdbus_name_release(env->conn, dot_name);
-+	ASSERT_RETURN(ret == -ESRCH);
-+
-+	ret = kdbus_name_release(env->conn, invalid_name);
-+	ASSERT_RETURN(ret == -ESRCH);
-+
-+	ret = kdbus_name_release(env->conn, wildcard_name);
-+	ASSERT_RETURN(ret == -ESRCH);
-+
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_name_conflict(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn;
-+	char *name;
-+	int ret;
-+
-+	name = "foo.bla.blaz";
-+
-+	/* create a 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+
-+	/* allow the new connection to own the same name */
-+	/* acquire name from the 1st connection */
-+	ret = kdbus_name_acquire(env->conn, name, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = conn_is_name_primary_owner(env->conn, name);
-+	ASSERT_RETURN(ret == true);
-+
-+	/* check that we also can't acquire it again from the 2nd connection */
-+	ret = kdbus_name_acquire(conn, name, NULL);
-+	ASSERT_RETURN(ret == -EEXIST);
-+
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_name_queue(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn;
-+	struct test_name t[2];
-+	const char *name;
-+	uint64_t flags;
-+	int ret;
-+
-+	name = "foo.bla.blaz";
-+
-+	flags = 0;
-+
-+	/* create a 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+
-+	/* allow the new connection to own the same name */
-+	/* acquire name from the 1st connection */
-+	ret = kdbus_name_acquire(env->conn, name, &flags);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = conn_is_name_primary_owner(env->conn, name);
-+	ASSERT_RETURN(ret == true);
-+
-+	/* queue the 2nd connection as waiting owner */
-+	flags = KDBUS_NAME_QUEUE;
-+	ret = kdbus_name_acquire(conn, name, &flags);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(flags & KDBUS_NAME_IN_QUEUE);
-+
-+	t[0].name = name;
-+	t[0].owner_id = env->conn->id;
-+	t[0].flags = KDBUS_NAME_PRIMARY;
-+	t[1].name = name;
-+	t[1].owner_id = conn->id;
-+	t[1].flags = KDBUS_NAME_QUEUE | KDBUS_NAME_IN_QUEUE;
-+	ret = conn_test_names(conn, t, 2);
-+	ASSERT_RETURN(ret == true);
-+
-+	/* release name from 1st connection */
-+	ret = kdbus_name_release(env->conn, name);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* now the name should be owned by the 2nd connection */
-+	t[0].name = name;
-+	t[0].owner_id = conn->id;
-+	t[0].flags = KDBUS_NAME_PRIMARY | KDBUS_NAME_QUEUE;
-+	ret = conn_test_names(conn, t, 1);
-+	ASSERT_RETURN(ret == true);
-+
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_name_takeover(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn;
-+	struct test_name t;
-+	const char *name;
-+	uint64_t flags;
-+	int ret;
-+
-+	name = "foo.bla.blaz";
-+
-+	flags = KDBUS_NAME_ALLOW_REPLACEMENT;
-+
-+	/* create a 2nd connection */
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn != NULL);
-+
-+	/* acquire name for 1st connection */
-+	ret = kdbus_name_acquire(env->conn, name, &flags);
-+	ASSERT_RETURN(ret == 0);
-+
-+	t.name = name;
-+	t.owner_id = env->conn->id;
-+	t.flags = KDBUS_NAME_ALLOW_REPLACEMENT | KDBUS_NAME_PRIMARY;
-+	ret = conn_test_names(conn, &t, 1);
-+	ASSERT_RETURN(ret == true);
-+
-+	/* now steal name with 2nd connection */
-+	flags = KDBUS_NAME_REPLACE_EXISTING;
-+	ret = kdbus_name_acquire(conn, name, &flags);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(flags & KDBUS_NAME_ACQUIRED);
-+
-+	ret = conn_is_name_primary_owner(conn, name);
-+	ASSERT_RETURN(ret == true);
-+
-+	kdbus_conn_free(conn);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-policy-ns.c b/tools/testing/selftests/kdbus/test-policy-ns.c
-new file mode 100644
-index 0000000..3437012
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-policy-ns.c
-@@ -0,0 +1,632 @@
-+/*
-+ * Test metadata and policies in new namespaces. Even if our tests
-+ * can run in a namespaced setup, this test is necessary so we can
-+ * inspect policies on the same kdbusfs but between multiple
-+ * namespaces.
-+ *
-+ * Copyright (C) 2014-2015 Djalal Harouni
-+ *
-+ * kdbus is free software; you can redistribute it and/or modify it under
-+ * the terms of the GNU Lesser General Public License as published by the
-+ * Free Software Foundation; either version 2.1 of the License, or (at
-+ * your option) any later version.
-+ */
-+
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <pthread.h>
-+#include <sched.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <stdint.h>
-+#include <stdbool.h>
-+#include <unistd.h>
-+#include <errno.h>
-+#include <signal.h>
-+#include <sys/wait.h>
-+#include <sys/prctl.h>
-+#include <sys/eventfd.h>
-+#include <sys/syscall.h>
-+#include <sys/capability.h>
-+#include <linux/sched.h>
-+
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+#define MAX_CONN	64
-+#define POLICY_NAME	"foo.test.policy-test"
-+
-+#define KDBUS_CONN_MAX_MSGS_PER_USER            16
-+
-+/**
-+ * Note: this test can be used to inspect policy_db->talk_access_hash
-+ *
-+ * The purpose of these tests:
-+ * 1) Check KDBUS_POLICY_TALK
-+ * 2) Check the cache state: kdbus_policy_db->talk_access_hash
-+ * Should be extended
-+ */
-+
-+/**
-+ * Check a list of connections against conn_db[0]
-+ * conn_db[0] will own the name "foo.test.policy-test" and the
-+ * policy holder connection for this name will update the policy
-+ * entries, so different use cases can be tested.
-+ */
-+static struct kdbus_conn **conn_db;
-+
-+static void *kdbus_recv_echo(void *ptr)
-+{
-+	int ret;
-+	struct kdbus_conn *conn = ptr;
-+
-+	ret = kdbus_msg_recv_poll(conn, 200, NULL, NULL);
-+
-+	return (void *)(long)ret;
-+}
-+
-+/* Trigger kdbus_policy_set() */
-+static int kdbus_set_policy_talk(struct kdbus_conn *conn,
-+				 const char *name,
-+				 uid_t id, unsigned int type)
-+{
-+	int ret;
-+	struct kdbus_policy_access access = {
-+		.type = type,
-+		.id = id,
-+		.access = KDBUS_POLICY_TALK,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn, name, &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	return TEST_OK;
-+}
-+
-+/* return TEST_OK or TEST_ERR on failure */
-+static int kdbus_register_same_activator(char *bus, const char *name,
-+					 struct kdbus_conn **c)
-+{
-+	int ret;
-+	struct kdbus_conn *activator;
-+
-+	activator = kdbus_hello_activator(bus, name, NULL, 0);
-+	if (activator) {
-+		*c = activator;
-+		fprintf(stderr, "--- error was able to register name twice '%s'.\n",
-+			name);
-+		return TEST_ERR;
-+	}
-+
-+	ret = -errno;
-+	/* -EEXIST means test succeeded */
-+	if (ret == -EEXIST)
-+		return TEST_OK;
-+
-+	return TEST_ERR;
-+}
-+
-+/* return TEST_OK or TEST_ERR on failure */
-+static int kdbus_register_policy_holder(char *bus, const char *name,
-+					struct kdbus_conn **conn)
-+{
-+	struct kdbus_conn *c;
-+	struct kdbus_policy_access access[2];
-+
-+	access[0].type = KDBUS_POLICY_ACCESS_USER;
-+	access[0].access = KDBUS_POLICY_OWN;
-+	access[0].id = geteuid();
-+
-+	access[1].type = KDBUS_POLICY_ACCESS_WORLD;
-+	access[1].access = KDBUS_POLICY_TALK;
-+	access[1].id = geteuid();
-+
-+	c = kdbus_hello_registrar(bus, name, access, 2,
-+				  KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(c);
-+
-+	*conn = c;
-+
-+	return TEST_OK;
-+}
-+
-+/**
-+ * Create new threads for receiving from multiple senders,
-+ * The 'conn_db' will be populated by newly created connections.
-+ * Caller should free all allocated connections.
-+ *
-+ * return 0 on success, negative errno on failure.
-+ */
-+static int kdbus_recv_in_threads(const char *bus, const char *name,
-+				 struct kdbus_conn **conn_db)
-+{
-+	int ret;
-+	bool pool_full = false;
-+	unsigned int sent_packets = 0;
-+	unsigned int lost_packets = 0;
-+	unsigned int i, tid;
-+	unsigned long dst_id;
-+	unsigned long cookie = 1;
-+	unsigned int thread_nr = MAX_CONN - 1;
-+	pthread_t thread_id[MAX_CONN - 1] = {'\0'};
-+
-+	dst_id = name ? KDBUS_DST_ID_NAME : conn_db[0]->id;
-+
-+	for (tid = 0, i = 1; tid < thread_nr; tid++, i++) {
-+		ret = pthread_create(&thread_id[tid], NULL,
-+				     kdbus_recv_echo, (void *)conn_db[0]);
-+		if (ret < 0) {
-+			ret = -errno;
-+			kdbus_printf("error pthread_create: %d (%m)\n",
-+				      ret);
-+			break;
-+		}
-+
-+		/* just free before re-using */
-+		kdbus_conn_free(conn_db[i]);
-+		conn_db[i] = NULL;
-+
-+		/* We need to create connections here */
-+		conn_db[i] = kdbus_hello(bus, 0, NULL, 0);
-+		if (!conn_db[i]) {
-+			ret = -errno;
-+			break;
-+		}
-+
-+		ret = kdbus_add_match_empty(conn_db[i]);
-+		if (ret < 0)
-+			break;
-+
-+		ret = kdbus_msg_send(conn_db[i], name, cookie++,
-+				     0, 0, 0, dst_id);
-+		if (ret < 0) {
-+			/*
-+			 * Receivers are not reading their messages,
-+			 * not scheduled ?!
-+			 *
-+			 * So set the pool full here, perhaps the
-+			 * connection pool or queue was full, later
-+			 * recheck receivers errors
-+			 */
-+			if (ret == -ENOBUFS || ret == -EXFULL)
-+				pool_full = true;
-+			break;
-+		}
-+
-+		sent_packets++;
-+	}
-+
-+	for (tid = 0; tid < thread_nr; tid++) {
-+		int thread_ret = 0;
-+
-+		if (thread_id[tid]) {
-+			pthread_join(thread_id[tid], (void *)&thread_ret);
-+			if (thread_ret < 0) {
-+				/* Update only if send did not fail */
-+				if (ret == 0)
-+					ret = thread_ret;
-+
-+				lost_packets++;
-+			}
-+		}
-+	}
-+
-+	/*
-+	 * When sending if we did fail with -ENOBUFS or -EXFULL
-+	 * then we should have set lost_packet and we should at
-+	 * least have sent_packets set to KDBUS_CONN_MAX_MSGS_PER_USER
-+	 */
-+	if (pool_full) {
-+		ASSERT_RETURN(lost_packets > 0);
-+
-+		/*
-+		 * We should at least send KDBUS_CONN_MAX_MSGS_PER_USER
-+		 *
-+		 * For every send operation we create a thread to
-+		 * recv the packet, so we keep the queue clean
-+		 */
-+		ASSERT_RETURN(sent_packets >= KDBUS_CONN_MAX_MSGS_PER_USER);
-+
-+		/*
-+		 * Set ret to zero since we only failed due to
-+		 * the receiving threads that have not been
-+		 * scheduled
-+		 */
-+		ret = 0;
-+	}
-+
-+	return ret;
-+}
-+
-+/* Return: TEST_OK or TEST_ERR on failure */
-+static int kdbus_normal_test(const char *bus, const char *name,
-+			     struct kdbus_conn **conn_db)
-+{
-+	int ret;
-+
-+	ret = kdbus_recv_in_threads(bus, name, conn_db);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	return TEST_OK;
-+}
-+
-+static int kdbus_fork_test_by_id(const char *bus,
-+				 struct kdbus_conn **conn_db,
-+				 int parent_status, int child_status)
-+{
-+	int ret;
-+	pid_t pid;
-+	uint64_t cookie = 0x9876ecba;
-+	struct kdbus_msg *msg = NULL;
-+	uint64_t offset = 0;
-+	int status = 0;
-+
-+	/*
-+	 * If the child_status is not EXIT_SUCCESS, then we expect
-+	 * that sending from the child will fail, thus receiving
-+	 * from parent must error with -ETIMEDOUT, and vice versa.
-+	 */
-+	bool parent_timedout = !!child_status;
-+	bool child_timedout = !!parent_status;
-+
-+	pid = fork();
-+	ASSERT_RETURN_VAL(pid >= 0, pid);
-+
-+	if (pid == 0) {
-+		struct kdbus_conn *conn_src;
-+
-+		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
-+		ASSERT_EXIT(ret == 0);
-+
-+		ret = drop_privileges(65534, 65534);
-+		ASSERT_EXIT(ret == 0);
-+
-+		conn_src = kdbus_hello(bus, 0, NULL, 0);
-+		ASSERT_EXIT(conn_src);
-+
-+		ret = kdbus_add_match_empty(conn_src);
-+		ASSERT_EXIT(ret == 0);
-+
-+		/*
-+		 * child_status is always checked against send
-+		 * operations, in case it fails always return
-+		 * EXIT_FAILURE.
-+		 */
-+		ret = kdbus_msg_send(conn_src, NULL, cookie,
-+				     0, 0, 0, conn_db[0]->id);
-+		ASSERT_EXIT(ret == child_status);
-+
-+		ret = kdbus_msg_recv_poll(conn_src, 100, NULL, NULL);
-+
-+		kdbus_conn_free(conn_src);
-+
-+		/*
-+		 * Child kdbus_msg_recv_poll() should timeout since
-+		 * the parent_status was set to a non EXIT_SUCCESS
-+		 * value.
-+		 */
-+		if (child_timedout)
-+			_exit(ret == -ETIMEDOUT ? EXIT_SUCCESS : EXIT_FAILURE);
-+
-+		_exit(ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
-+	}
-+
-+	ret = kdbus_msg_recv_poll(conn_db[0], 100, &msg, &offset);
-+	/*
-+	 * If parent_timedout is set then this should fail with
-+	 * -ETIMEDOUT since the child_status was set to a non
-+	 * EXIT_SUCCESS value. Otherwise, assume
-+	 * that kdbus_msg_recv_poll() has succeeded.
-+	 */
-+	if (parent_timedout) {
-+		ASSERT_RETURN_VAL(ret == -ETIMEDOUT, TEST_ERR);
-+
-+		/* timedout no need to continue, we don't have the
-+		 * child connection ID, so just terminate. */
-+		goto out;
-+	} else {
-+		ASSERT_RETURN_VAL(ret == 0, ret);
-+	}
-+
-+	ret = kdbus_msg_send(conn_db[0], NULL, ++cookie,
-+			     0, 0, 0, msg->src_id);
-+	/*
-+	 * parent_status is checked against send operations,
-+	 * on failures always return TEST_ERR.
-+	 */
-+	ASSERT_RETURN_VAL(ret == parent_status, TEST_ERR);
-+
-+	kdbus_msg_free(msg);
-+	kdbus_free(conn_db[0], offset);
-+
-+out:
-+	ret = waitpid(pid, &status, 0);
-+	ASSERT_RETURN_VAL(ret >= 0, ret);
-+
-+	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
-+}
-+
-+/*
-+ * Return: TEST_OK, TEST_ERR or TEST_SKIP
-+ * we return TEST_OK only if the children return with the expected
-+ * 'expected_status' that is specified as an argument.
-+ */
-+static int kdbus_fork_test(const char *bus, const char *name,
-+			   struct kdbus_conn **conn_db, int expected_status)
-+{
-+	pid_t pid;
-+	int ret = 0;
-+	int status = 0;
-+
-+	pid = fork();
-+	ASSERT_RETURN_VAL(pid >= 0, pid);
-+
-+	if (pid == 0) {
-+		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
-+		ASSERT_EXIT(ret == 0);
-+
-+		ret = drop_privileges(65534, 65534);
-+		ASSERT_EXIT(ret == 0);
-+
-+		ret = kdbus_recv_in_threads(bus, name, conn_db);
-+		_exit(ret == expected_status ? EXIT_SUCCESS : EXIT_FAILURE);
-+	}
-+
-+	ret = waitpid(pid, &status, 0);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
-+}
-+
-+/* Return EXIT_SUCCESS, EXIT_FAILURE or negative errno */
-+static int __kdbus_clone_userns_test(const char *bus,
-+				     const char *name,
-+				     struct kdbus_conn **conn_db,
-+				     int expected_status)
-+{
-+	int efd;
-+	pid_t pid;
-+	int ret = 0;
-+	unsigned int uid = 65534;
-+	int status;
-+
-+	ret = drop_privileges(uid, uid);
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	/*
-+	 * Since we just dropped privileges, the dumpable flag was just
-+	 * cleared which makes the /proc/$clone_child/uid_map to be
-+	 * owned by root, hence any userns uid mapping will fail with
-+	 * -EPERM since the mapping will be done by uid 65534.
-+	 *
-+	 * To avoid this set the dumpable flag again which makes procfs
-+	 * update the /proc/$clone_child/ inodes owner to 65534.
-+	 *
-+	 * Using this we will be able write to /proc/$clone_child/uid_map
-+	 * as uid 65534 and map the uid 65534 to 0 inside the user
-+	 * namespace.
-+	 */
-+	ret = prctl(PR_SET_DUMPABLE, SUID_DUMP_USER);
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	/* sync parent/child */
-+	efd = eventfd(0, EFD_CLOEXEC);
-+	ASSERT_RETURN_VAL(efd >= 0, efd);
-+
-+	pid = syscall(__NR_clone, SIGCHLD|CLONE_NEWUSER, NULL);
-+	if (pid < 0) {
-+		ret = -errno;
-+		kdbus_printf("error clone: %d (%m)\n", ret);
-+		/*
-+		 * Normal user not allowed to create userns,
-+		 * so nothing to worry about ?
-+		 */
-+		if (ret == -EPERM) {
-+			kdbus_printf("-- CLONE_NEWUSER TEST Failed for uid: %u\n"
-+				"-- Make sure that your kernel do not allow "
-+				"CLONE_NEWUSER for unprivileged users\n"
-+				"-- Upstream Commit: "
-+				"https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=5eaf563e\n",
-+				uid);
-+			ret = 0;
-+		}
-+
-+		return ret;
-+	}
-+
-+	if (pid == 0) {
-+		struct kdbus_conn *conn_src;
-+		eventfd_t event_status = 0;
-+
-+		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
-+		ASSERT_EXIT(ret == 0);
-+
-+		ret = eventfd_read(efd, &event_status);
-+		ASSERT_EXIT(ret >= 0 && event_status == 1);
-+
-+		/* ping connection from the new user namespace */
-+		conn_src = kdbus_hello(bus, 0, NULL, 0);
-+		ASSERT_EXIT(conn_src);
-+
-+		ret = kdbus_add_match_empty(conn_src);
-+		ASSERT_EXIT(ret == 0);
-+
-+		ret = kdbus_msg_send(conn_src, name, 0xabcd1234,
-+				     0, 0, 0, KDBUS_DST_ID_NAME);
-+		kdbus_conn_free(conn_src);
-+
-+		_exit(ret == expected_status ? EXIT_SUCCESS : EXIT_FAILURE);
-+	}
-+
-+	ret = userns_map_uid_gid(pid, "0 65534 1", "0 65534 1");
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	/* Tell child we are ready */
-+	ret = eventfd_write(efd, 1);
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	ret = waitpid(pid, &status, 0);
-+	ASSERT_RETURN_VAL(ret >= 0, ret);
-+
-+	close(efd);
-+
-+	return status == EXIT_SUCCESS ? TEST_OK : TEST_ERR;
-+}
-+
-+static int kdbus_clone_userns_test(const char *bus,
-+				   const char *name,
-+				   struct kdbus_conn **conn_db,
-+				   int expected_status)
-+{
-+	pid_t pid;
-+	int ret = 0;
-+	int status;
-+
-+	pid = fork();
-+	ASSERT_RETURN_VAL(pid >= 0, -errno);
-+
-+	if (pid == 0) {
-+		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
-+		if (ret < 0)
-+			_exit(EXIT_FAILURE);
-+
-+		ret = __kdbus_clone_userns_test(bus, name, conn_db,
-+						expected_status);
-+		_exit(ret);
-+	}
-+
-+	/*
-+	 * Receive in the original (root privileged) user namespace,
-+	 * must fail with -ETIMEDOUT.
-+	 */
-+	ret = kdbus_msg_recv_poll(conn_db[0], 100, NULL, NULL);
-+	ASSERT_RETURN_VAL(ret == -ETIMEDOUT, ret);
-+
-+	ret = waitpid(pid, &status, 0);
-+	ASSERT_RETURN_VAL(ret >= 0, ret);
-+
-+	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
-+}
-+
-+int kdbus_test_policy_ns(struct kdbus_test_env *env)
-+{
-+	int i;
-+	int ret;
-+	struct kdbus_conn *activator = NULL;
-+	struct kdbus_conn *policy_holder = NULL;
-+	char *bus = env->buspath;
-+
-+	ret = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/* no enough privileges, SKIP test */
-+	if (!ret)
-+		return TEST_SKIP;
-+
-+	/* we require user-namespaces */
-+	if (access("/proc/self/uid_map", F_OK) != 0)
-+		return TEST_SKIP;
-+
-+	/* uids/gids must be mapped */
-+	if (!all_uids_gids_are_mapped())
-+		return TEST_SKIP;
-+
-+	conn_db = calloc(MAX_CONN, sizeof(struct kdbus_conn *));
-+	ASSERT_RETURN(conn_db);
-+
-+	memset(conn_db, 0, MAX_CONN * sizeof(struct kdbus_conn *));
-+
-+	conn_db[0] = kdbus_hello(bus, 0, NULL, 0);
-+	ASSERT_RETURN(conn_db[0]);
-+
-+	ret = kdbus_add_match_empty(conn_db[0]);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_fork_test_by_id(bus, conn_db, -EPERM, -EPERM);
-+	ASSERT_EXIT(ret == 0);
-+
-+	ret = kdbus_register_policy_holder(bus, POLICY_NAME,
-+					   &policy_holder);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Try to register the same name with an activator */
-+	ret = kdbus_register_same_activator(bus, POLICY_NAME,
-+					    &activator);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Acquire POLICY_NAME */
-+	ret = kdbus_name_acquire(conn_db[0], POLICY_NAME, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_normal_test(bus, POLICY_NAME, conn_db);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_list(conn_db[0], KDBUS_LIST_NAMES |
-+				     KDBUS_LIST_UNIQUE |
-+				     KDBUS_LIST_ACTIVATORS |
-+				     KDBUS_LIST_QUEUED);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_fork_test(bus, POLICY_NAME, conn_db, EXIT_SUCCESS);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * children connections are able to talk to conn_db[0] since
-+	 * current POLICY_NAME TALK type is KDBUS_POLICY_ACCESS_WORLD,
-+	 * so expect EXIT_SUCCESS when sending from child. However,
-+	 * since the child's connection does not own any well-known
-+	 * name, The parent connection conn_db[0] should fail with
-+	 * -EPERM but since it is a privileged bus user the TALK is
-+	 *  allowed.
-+	 */
-+	ret = kdbus_fork_test_by_id(bus, conn_db,
-+				    EXIT_SUCCESS, EXIT_SUCCESS);
-+	ASSERT_EXIT(ret == 0);
-+
-+	/*
-+	 * Connections that can talk are perhaps being destroyed now.
-+	 * Restrict the policy and purge cache entries where the
-+	 * conn_db[0] is the destination.
-+	 *
-+	 * Now only connections with uid == 0 are allowed to talk.
-+	 */
-+	ret = kdbus_set_policy_talk(policy_holder, POLICY_NAME,
-+				    geteuid(), KDBUS_POLICY_ACCESS_USER);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Testing connections (FORK+DROP) again:
-+	 * After setting the policy re-check connections
-+	 * we expect the children to fail with -EPERM
-+	 */
-+	ret = kdbus_fork_test(bus, POLICY_NAME, conn_db, -EPERM);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Now expect that both parent and child to fail.
-+	 *
-+	 * Child should fail with -EPERM since we just restricted
-+	 * the POLICY_NAME TALK to uid 0 and its uid is 65534.
-+	 *
-+	 * Since the parent's connection will timeout when receiving
-+	 * from the child, we never continue. FWIW just put -EPERM.
-+	 */
-+	ret = kdbus_fork_test_by_id(bus, conn_db, -EPERM, -EPERM);
-+	ASSERT_EXIT(ret == 0);
-+
-+	/* Check if the name can be reached in a new userns */
-+	ret = kdbus_clone_userns_test(bus, POLICY_NAME, conn_db, -EPERM);
-+	ASSERT_RETURN(ret == 0);
-+
-+	for (i = 0; i < MAX_CONN; i++)
-+		kdbus_conn_free(conn_db[i]);
-+
-+	kdbus_conn_free(activator);
-+	kdbus_conn_free(policy_holder);
-+
-+	free(conn_db);
-+
-+	return ret;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-policy-priv.c b/tools/testing/selftests/kdbus/test-policy-priv.c
-new file mode 100644
-index 0000000..0208638
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-policy-priv.c
-@@ -0,0 +1,1285 @@
-+#include <errno.h>
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stdint.h>
-+#include <stdbool.h>
-+#include <unistd.h>
-+#include <time.h>
-+#include <sys/capability.h>
-+#include <sys/eventfd.h>
-+#include <sys/wait.h>
-+
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+static int test_policy_priv_by_id(const char *bus,
-+				  struct kdbus_conn *conn_dst,
-+				  bool drop_second_user,
-+				  int parent_status,
-+				  int child_status)
-+{
-+	int ret = 0;
-+	uint64_t expected_cookie = time(NULL) ^ 0xdeadbeef;
-+
-+	ASSERT_RETURN(conn_dst);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, bus, ({
-+		ret = kdbus_msg_send(unpriv, NULL,
-+				     expected_cookie, 0, 0, 0,
-+				     conn_dst->id);
-+		ASSERT_EXIT(ret == child_status);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = kdbus_msg_recv_poll(conn_dst, 300, NULL, NULL);
-+	ASSERT_RETURN(ret == parent_status);
-+
-+	return 0;
-+}
-+
-+static int test_policy_priv_by_broadcast(const char *bus,
-+					 struct kdbus_conn *conn_dst,
-+					 int drop_second_user,
-+					 int parent_status,
-+					 int child_status)
-+{
-+	int efd;
-+	int ret = 0;
-+	eventfd_t event_status = 0;
-+	struct kdbus_msg *msg = NULL;
-+	uid_t second_uid = UNPRIV_UID;
-+	gid_t second_gid = UNPRIV_GID;
-+	struct kdbus_conn *child_2 = conn_dst;
-+	uint64_t expected_cookie = time(NULL) ^ 0xdeadbeef;
-+
-+	/* Drop to another unprivileged user other than UNPRIV_UID */
-+	if (drop_second_user == DROP_OTHER_UNPRIV) {
-+		second_uid = UNPRIV_UID - 1;
-+		second_gid = UNPRIV_GID - 1;
-+	}
-+
-+	/* child will signal parent to send broadcast */
-+	efd = eventfd(0, EFD_CLOEXEC);
-+	ASSERT_RETURN_VAL(efd >= 0, efd);
-+
-+	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_GID, ({
-+		struct kdbus_conn *child;
-+
-+		child = kdbus_hello(bus, 0, NULL, 0);
-+		ASSERT_EXIT(child);
-+
-+		ret = kdbus_add_match_empty(child);
-+		ASSERT_EXIT(ret == 0);
-+
-+		/* signal parent */
-+		ret = eventfd_write(efd, 1);
-+		ASSERT_EXIT(ret == 0);
-+
-+		/* Use a little bit high time */
-+		ret = kdbus_msg_recv_poll(child, 500, &msg, NULL);
-+		ASSERT_EXIT(ret == child_status);
-+
-+		/*
-+		 * If we expect the child to get the broadcast
-+		 * message, then check the received cookie.
-+		 */
-+		if (ret == 0) {
-+			ASSERT_EXIT(expected_cookie == msg->cookie);
-+		}
-+
-+		/* Use expected_cookie since 'msg' might be NULL */
-+		ret = kdbus_msg_send(child, NULL, expected_cookie + 1,
-+				     0, 0, 0, KDBUS_DST_ID_BROADCAST);
-+		ASSERT_EXIT(ret == 0);
-+
-+		kdbus_msg_free(msg);
-+		kdbus_conn_free(child);
-+	}),
-+	({
-+		if (drop_second_user == DO_NOT_DROP) {
-+			ASSERT_RETURN(child_2);
-+
-+			ret = eventfd_read(efd, &event_status);
-+			ASSERT_RETURN(ret >= 0 && event_status == 1);
-+
-+			ret = kdbus_msg_send(child_2, NULL,
-+					     expected_cookie, 0, 0, 0,
-+					     KDBUS_DST_ID_BROADCAST);
-+			ASSERT_RETURN(ret == 0);
-+
-+			/* drop own broadcast */
-+			ret = kdbus_msg_recv(child_2, &msg, NULL);
-+			ASSERT_RETURN(ret == 0);
-+			ASSERT_RETURN(msg->src_id == child_2->id);
-+			kdbus_msg_free(msg);
-+
-+			/* Use a little bit high time */
-+			ret = kdbus_msg_recv_poll(child_2, 1000,
-+						  &msg, NULL);
-+			ASSERT_RETURN(ret == parent_status);
-+
-+			/*
-+			 * Check returned cookie in case we expect
-+			 * success.
-+			 */
-+			if (ret == 0) {
-+				ASSERT_RETURN(msg->cookie ==
-+					      expected_cookie + 1);
-+			}
-+
-+			kdbus_msg_free(msg);
-+		} else {
-+			/*
-+			 * Two unprivileged users will try to
-+			 * communicate using broadcast.
-+			 */
-+			ret = RUN_UNPRIVILEGED(second_uid, second_gid, ({
-+				child_2 = kdbus_hello(bus, 0, NULL, 0);
-+				ASSERT_EXIT(child_2);
-+
-+				ret = kdbus_add_match_empty(child_2);
-+				ASSERT_EXIT(ret == 0);
-+
-+				ret = eventfd_read(efd, &event_status);
-+				ASSERT_EXIT(ret >= 0 && event_status == 1);
-+
-+				ret = kdbus_msg_send(child_2, NULL,
-+						expected_cookie, 0, 0, 0,
-+						KDBUS_DST_ID_BROADCAST);
-+				ASSERT_EXIT(ret == 0);
-+
-+				/* drop own broadcast */
-+				ret = kdbus_msg_recv(child_2, &msg, NULL);
-+				ASSERT_RETURN(ret == 0);
-+				ASSERT_RETURN(msg->src_id == child_2->id);
-+				kdbus_msg_free(msg);
-+
-+				/* Use a little bit high time */
-+				ret = kdbus_msg_recv_poll(child_2, 1000,
-+							  &msg, NULL);
-+				ASSERT_EXIT(ret == parent_status);
-+
-+				/*
-+				 * Check returned cookie in case we expect
-+				 * success.
-+				 */
-+				if (ret == 0) {
-+					ASSERT_EXIT(msg->cookie ==
-+						    expected_cookie + 1);
-+				}
-+
-+				kdbus_msg_free(msg);
-+				kdbus_conn_free(child_2);
-+			}),
-+			({ 0; }));
-+			ASSERT_RETURN(ret == 0);
-+		}
-+	}));
-+	ASSERT_RETURN(ret == 0);
-+
-+	close(efd);
-+
-+	return ret;
-+}
-+
-+static void nosig(int sig)
-+{
-+}
-+
-+static int test_priv_before_policy_upload(struct kdbus_test_env *env)
-+{
-+	int ret = 0;
-+	struct kdbus_conn *conn;
-+
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	/*
-+	 * Make sure unprivileged bus user cannot acquire names
-+	 * before registring any policy holder.
-+	 */
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
-+		ASSERT_EXIT(ret < 0);
-+	}));
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Make sure unprivileged bus users cannot talk by default
-+	 * to privileged ones, unless a policy holder that allows
-+	 * this was uploaded.
-+	 */
-+
-+	ret = test_policy_priv_by_id(env->buspath, conn, false,
-+				     -ETIMEDOUT, -EPERM);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Activate matching for a privileged connection */
-+	ret = kdbus_add_match_empty(conn);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * First make sure that BROADCAST with msg flag
-+	 * KDBUS_MSG_EXPECT_REPLY will fail with -ENOTUNIQ
-+	 */
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_msg_send(unpriv, NULL, 0xdeadbeef,
-+				     KDBUS_MSG_EXPECT_REPLY,
-+				     5000000000ULL, 0,
-+				     KDBUS_DST_ID_BROADCAST);
-+		ASSERT_EXIT(ret == -ENOTUNIQ);
-+	}));
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Test broadcast with a privileged connection.
-+	 *
-+	 * The first unprivileged receiver should not get the
-+	 * broadcast message sent by the privileged connection,
-+	 * since there is no a TALK policy that allows the
-+	 * unprivileged to TALK to the privileged connection. It
-+	 * will fail with -ETIMEDOUT
-+	 *
-+	 * Then second case:
-+	 * The privileged connection should get the broadcast
-+	 * message from the unprivileged one. Since the receiver is
-+	 * a privileged bus user and it has default TALK access to
-+	 * all connections it will receive those.
-+	 */
-+
-+	ret = test_policy_priv_by_broadcast(env->buspath, conn,
-+					    DO_NOT_DROP,
-+					    0, -ETIMEDOUT);
-+	ASSERT_RETURN(ret == 0);
-+
-+
-+	/*
-+	 * Test broadcast with two unprivileged connections running
-+	 * under the same user.
-+	 *
-+	 * Both connections should succeed.
-+	 */
-+
-+	ret = test_policy_priv_by_broadcast(env->buspath, NULL,
-+					    DROP_SAME_UNPRIV, 0, 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Test broadcast with two unprivileged connections running
-+	 * under different users.
-+	 *
-+	 * Both connections will fail with -ETIMEDOUT.
-+	 */
-+
-+	ret = test_policy_priv_by_broadcast(env->buspath, NULL,
-+					    DROP_OTHER_UNPRIV,
-+					    -ETIMEDOUT, -ETIMEDOUT);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_conn_free(conn);
-+
-+	return ret;
-+}
-+
-+static int test_broadcast_after_policy_upload(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	int efd;
-+	eventfd_t event_status = 0;
-+	struct kdbus_msg *msg = NULL;
-+	struct kdbus_conn *owner_a, *owner_b;
-+	struct kdbus_conn *holder_a, *holder_b;
-+	struct kdbus_policy_access access = {};
-+	uint64_t expected_cookie = time(NULL) ^ 0xdeadbeef;
-+
-+	owner_a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(owner_a);
-+
-+	ret = kdbus_name_acquire(owner_a, "com.example.broadcastA", NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged bus users cannot talk by default
-+	 * to privileged ones, unless a policy holder that allows
-+	 * this was uploaded.
-+	 */
-+
-+	++expected_cookie;
-+	ret = test_policy_priv_by_id(env->buspath, owner_a, false,
-+				     -ETIMEDOUT, -EPERM);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Make sure that privileged won't receive broadcasts unless
-+	 * it installs a match. It will fail with -ETIMEDOUT
-+	 *
-+	 * At same time check that the unprivileged connection will
-+	 * not receive the broadcast message from the privileged one
-+	 * since the privileged one owns a name with a restricted
-+	 * policy TALK (actually the TALK policy is still not
-+	 * registered so we fail by default), thus the unprivileged
-+	 * receiver is not able to TALK to that name.
-+	 */
-+
-+	/* Activate matching for a privileged connection */
-+	ret = kdbus_add_match_empty(owner_a);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Redo the previous test. The privileged conn owner_a is
-+	 * able to TALK to any connection so it will receive the
-+	 * broadcast message now.
-+	 */
-+
-+	ret = test_policy_priv_by_broadcast(env->buspath, owner_a,
-+					    DO_NOT_DROP,
-+					    0, -ETIMEDOUT);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Test that broadcast between two unprivileged users running
-+	 * under the same user still succeed.
-+	 */
-+
-+	ret = test_policy_priv_by_broadcast(env->buspath, NULL,
-+					    DROP_SAME_UNPRIV, 0, 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Test broadcast with two unprivileged connections running
-+	 * under different users.
-+	 *
-+	 * Both connections will fail with -ETIMEDOUT.
-+	 */
-+
-+	ret = test_policy_priv_by_broadcast(env->buspath, NULL,
-+					    DROP_OTHER_UNPRIV,
-+					    -ETIMEDOUT, -ETIMEDOUT);
-+	ASSERT_RETURN(ret == 0);
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_USER,
-+		.id = geteuid(),
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	holder_a = kdbus_hello_registrar(env->buspath,
-+					 "com.example.broadcastA",
-+					 &access, 1,
-+					 KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(holder_a);
-+
-+	holder_b = kdbus_hello_registrar(env->buspath,
-+					 "com.example.broadcastB",
-+					 &access, 1,
-+					 KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(holder_b);
-+
-+	/* Free connections and their received messages and restart */
-+	kdbus_conn_free(owner_a);
-+
-+	owner_a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(owner_a);
-+
-+	/* Activate matching for a privileged connection */
-+	ret = kdbus_add_match_empty(owner_a);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_name_acquire(owner_a, "com.example.broadcastA", NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	owner_b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(owner_b);
-+
-+	ret = kdbus_name_acquire(owner_b, "com.example.broadcastB", NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	/* Activate matching for a privileged connection */
-+	ret = kdbus_add_match_empty(owner_b);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Test that even if "com.example.broadcastA" and
-+	 * "com.example.broadcastB" do have a TALK access by default
-+	 * they are able to signal each other using broadcast due to
-+	 * the fact they are privileged connections, they receive
-+	 * all broadcasts if the match allows it.
-+	 */
-+
-+	++expected_cookie;
-+	ret = kdbus_msg_send(owner_a, NULL, expected_cookie, 0,
-+			     0, 0, KDBUS_DST_ID_BROADCAST);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv_poll(owner_a, 100, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == expected_cookie);
-+
-+	/* Check src ID */
-+	ASSERT_RETURN(msg->src_id == owner_a->id);
-+
-+	kdbus_msg_free(msg);
-+
-+	ret = kdbus_msg_recv_poll(owner_b, 100, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+	ASSERT_RETURN(msg->cookie == expected_cookie);
-+
-+	/* Check src ID */
-+	ASSERT_RETURN(msg->src_id == owner_a->id);
-+
-+	kdbus_msg_free(msg);
-+
-+	/* Release name "com.example.broadcastB" */
-+
-+	ret = kdbus_name_release(owner_b, "com.example.broadcastB");
-+	ASSERT_EXIT(ret >= 0);
-+
-+	/* KDBUS_POLICY_OWN for unprivileged connections */
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_WORLD,
-+		.id = geteuid(),
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	/* Update the policy so unprivileged will own the name */
-+
-+	ret = kdbus_conn_update_policy(holder_b,
-+				       "com.example.broadcastB",
-+				       &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Send broadcasts from an unprivileged connection that
-+	 * owns a name "com.example.broadcastB".
-+	 *
-+	 * We'll have four destinations here:
-+	 *
-+	 * 1) destination owner_a: privileged connection that owns
-+	 * "com.example.broadcastA". It will receive the broadcast
-+	 * since it is a privileged has default TALK access to all
-+	 * connections, and it is subscribed to the match.
-+	 * Will succeed.
-+	 *
-+	 * owner_b: privileged connection (running under a different
-+	 * uid) that do not own names, but with an empty broadcast
-+	 * match, so it will receive broadcasts since it has default
-+	 * TALK access to all connection.
-+	 *
-+	 * unpriv_a: unpriv connection that do not own any name.
-+	 * It will receive the broadcast since it is running under
-+	 * the same user of the one broadcasting and did install
-+	 * matches. It should get the message.
-+	 *
-+	 * unpriv_b: unpriv connection is not interested in broadcast
-+	 * messages, so it did not install broadcast matches. Should
-+	 * fail with -ETIMEDOUT
-+	 */
-+
-+	++expected_cookie;
-+	efd = eventfd(0, EFD_CLOEXEC);
-+	ASSERT_RETURN_VAL(efd >= 0, efd);
-+
-+	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_UID, ({
-+		struct kdbus_conn *unpriv_owner;
-+		struct kdbus_conn *unpriv_a, *unpriv_b;
-+
-+		unpriv_owner = kdbus_hello(env->buspath, 0, NULL, 0);
-+		ASSERT_EXIT(unpriv_owner);
-+
-+		unpriv_a = kdbus_hello(env->buspath, 0, NULL, 0);
-+		ASSERT_EXIT(unpriv_a);
-+
-+		unpriv_b = kdbus_hello(env->buspath, 0, NULL, 0);
-+		ASSERT_EXIT(unpriv_b);
-+
-+		ret = kdbus_name_acquire(unpriv_owner,
-+					 "com.example.broadcastB",
-+					 NULL);
-+		ASSERT_EXIT(ret >= 0);
-+
-+		ret = kdbus_add_match_empty(unpriv_a);
-+		ASSERT_EXIT(ret == 0);
-+
-+		/* Signal that we are doing broadcasts */
-+		ret = eventfd_write(efd, 1);
-+		ASSERT_EXIT(ret == 0);
-+
-+		/*
-+		 * Do broadcast from a connection that owns the
-+		 * names "com.example.broadcastB".
-+		 */
-+		ret = kdbus_msg_send(unpriv_owner, NULL,
-+				     expected_cookie,
-+				     0, 0, 0,
-+				     KDBUS_DST_ID_BROADCAST);
-+		ASSERT_EXIT(ret == 0);
-+
-+		/*
-+		 * Unprivileged connection running under the same
-+		 * user. It should succeed.
-+		 */
-+		ret = kdbus_msg_recv_poll(unpriv_a, 300, &msg, NULL);
-+		ASSERT_EXIT(ret == 0 && msg->cookie == expected_cookie);
-+
-+		/*
-+		 * Did not install matches, not interested in
-+		 * broadcasts
-+		 */
-+		ret = kdbus_msg_recv_poll(unpriv_b, 300, NULL, NULL);
-+		ASSERT_EXIT(ret == -ETIMEDOUT);
-+	}),
-+	({
-+		ret = eventfd_read(efd, &event_status);
-+		ASSERT_RETURN(ret >= 0 && event_status == 1);
-+
-+		/*
-+		 * owner_a must fail with -ETIMEDOUT, since it owns
-+		 * name "com.example.broadcastA" and its TALK
-+		 * access is restriced.
-+		 */
-+		ret = kdbus_msg_recv_poll(owner_a, 300, &msg, NULL);
-+		ASSERT_RETURN(ret == 0);
-+
-+		/* confirm the received cookie */
-+		ASSERT_RETURN(msg->cookie == expected_cookie);
-+
-+		kdbus_msg_free(msg);
-+
-+		/*
-+		 * owner_b got the broadcast from an unprivileged
-+		 * connection.
-+		 */
-+		ret = kdbus_msg_recv_poll(owner_b, 300, &msg, NULL);
-+		ASSERT_RETURN(ret == 0);
-+
-+		/* confirm the received cookie */
-+		ASSERT_RETURN(msg->cookie == expected_cookie);
-+
-+		kdbus_msg_free(msg);
-+
-+	}));
-+	ASSERT_RETURN(ret == 0);
-+
-+	close(efd);
-+
-+	/*
-+	 * Test broadcast with two unprivileged connections running
-+	 * under different users.
-+	 *
-+	 * Both connections will fail with -ETIMEDOUT.
-+	 */
-+
-+	ret = test_policy_priv_by_broadcast(env->buspath, NULL,
-+					    DROP_OTHER_UNPRIV,
-+					    -ETIMEDOUT, -ETIMEDOUT);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* Drop received broadcasts by privileged */
-+	ret = kdbus_msg_recv_poll(owner_a, 100, NULL, NULL);
-+	ret = kdbus_msg_recv_poll(owner_a, 100, NULL, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(owner_a, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	ret = kdbus_msg_recv_poll(owner_b, 100, NULL, NULL);
-+	ret = kdbus_msg_recv_poll(owner_b, 100, NULL, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_msg_recv(owner_b, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
-+
-+	/*
-+	 * Perform last tests, allow others to talk to name
-+	 * "com.example.broadcastA". So now receiving broadcasts
-+	 * from it should succeed since the TALK policy allow it.
-+	 */
-+
-+	/* KDBUS_POLICY_OWN for unprivileged connections */
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_WORLD,
-+		.id = geteuid(),
-+		.access = KDBUS_POLICY_TALK,
-+	};
-+
-+	ret = kdbus_conn_update_policy(holder_a,
-+				       "com.example.broadcastA",
-+				       &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Unprivileged is able to TALK to "com.example.broadcastA"
-+	 * now so it will receive its broadcasts
-+	 */
-+	ret = test_policy_priv_by_broadcast(env->buspath, owner_a,
-+					    DO_NOT_DROP, 0, 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	++expected_cookie;
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.broadcastB",
-+					 NULL);
-+		ASSERT_EXIT(ret >= 0);
-+		ret = kdbus_msg_send(unpriv, NULL, expected_cookie,
-+				     0, 0, 0, KDBUS_DST_ID_BROADCAST);
-+		ASSERT_EXIT(ret == 0);
-+	}));
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* owner_a is privileged it will get the broadcast now. */
-+	ret = kdbus_msg_recv_poll(owner_a, 300, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* confirm the received cookie */
-+	ASSERT_RETURN(msg->cookie == expected_cookie);
-+
-+	kdbus_msg_free(msg);
-+
-+	/*
-+	 * owner_a released name "com.example.broadcastA". It should
-+	 * receive broadcasts since it is still privileged and has
-+	 * the right match.
-+	 *
-+	 * Unprivileged connection will own a name and will try to
-+	 * signal to the privileged connection.
-+	 */
-+
-+	ret = kdbus_name_release(owner_a, "com.example.broadcastA");
-+	ASSERT_EXIT(ret >= 0);
-+
-+	++expected_cookie;
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.broadcastB",
-+					 NULL);
-+		ASSERT_EXIT(ret >= 0);
-+		ret = kdbus_msg_send(unpriv, NULL, expected_cookie,
-+				     0, 0, 0, KDBUS_DST_ID_BROADCAST);
-+		ASSERT_EXIT(ret == 0);
-+	}));
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* owner_a will get the broadcast now. */
-+	ret = kdbus_msg_recv_poll(owner_a, 300, &msg, NULL);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* confirm the received cookie */
-+	ASSERT_RETURN(msg->cookie == expected_cookie);
-+
-+	kdbus_msg_free(msg);
-+
-+	kdbus_conn_free(owner_a);
-+	kdbus_conn_free(owner_b);
-+	kdbus_conn_free(holder_a);
-+	kdbus_conn_free(holder_b);
-+
-+	return 0;
-+}
-+
-+static int test_policy_priv(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn_a, *conn_b, *conn, *owner;
-+	struct kdbus_policy_access access, *acc;
-+	sigset_t sset;
-+	size_t num;
-+	int ret;
-+
-+	/*
-+	 * Make sure we have CAP_SETUID/SETGID so we can drop privileges
-+	 */
-+
-+	ret = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	if (!ret)
-+		return TEST_SKIP;
-+
-+	/* make sure that uids and gids are mapped */
-+	if (!all_uids_gids_are_mapped())
-+		return TEST_SKIP;
-+
-+	/*
-+	 * Setup:
-+	 *  conn_a: policy holder for com.example.a
-+	 *  conn_b: name holder of com.example.b
-+	 */
-+
-+	signal(SIGUSR1, nosig);
-+	sigemptyset(&sset);
-+	sigaddset(&sset, SIGUSR1);
-+	sigprocmask(SIG_BLOCK, &sset, NULL);
-+
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	/*
-+	 * Before registering any policy holder, make sure that the
-+	 * bus is secure by default. This test is necessary, it catches
-+	 * several cases where old D-Bus was vulnerable.
-+	 */
-+
-+	ret = test_priv_before_policy_upload(env);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Make sure unprivileged are not able to register policy
-+	 * holders
-+	 */
-+
-+	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_GID, ({
-+		struct kdbus_conn *holder;
-+
-+		holder = kdbus_hello_registrar(env->buspath,
-+					       "com.example.a", NULL, 0,
-+					       KDBUS_HELLO_POLICY_HOLDER);
-+		ASSERT_EXIT(holder == NULL && errno == EPERM);
-+	}),
-+	({ 0; }));
-+	ASSERT_RETURN(ret == 0);
-+
-+
-+	/* Register policy holder */
-+
-+	conn_a = kdbus_hello_registrar(env->buspath, "com.example.a",
-+				       NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(conn_a);
-+
-+	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn_b);
-+
-+	ret = kdbus_name_acquire(conn_b, "com.example.b", NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	/*
-+	 * Make sure bus-owners can always acquire names.
-+	 */
-+	ret = kdbus_name_acquire(conn, "com.example.a", NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	kdbus_conn_free(conn);
-+
-+	/*
-+	 * Make sure unprivileged users cannot acquire names with default
-+	 * policy assigned.
-+	 */
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
-+		ASSERT_EXIT(ret < 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged users can acquire names if we make them
-+	 * world-accessible.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_WORLD,
-+		.id = 0,
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	/*
-+	 * Make sure unprivileged/normal connections are not able
-+	 * to update policies
-+	 */
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_conn_update_policy(unpriv, "com.example.a",
-+					       &access, 1);
-+		ASSERT_EXIT(ret == -EOPNOTSUPP);
-+	}));
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
-+		ASSERT_EXIT(ret >= 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged users can acquire names if we make them
-+	 * gid-accessible. But only if the gid matches.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_GROUP,
-+		.id = UNPRIV_GID,
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
-+		ASSERT_EXIT(ret >= 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_GROUP,
-+		.id = 1,
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
-+		ASSERT_EXIT(ret < 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged users can acquire names if we make them
-+	 * uid-accessible. But only if the uid matches.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_USER,
-+		.id = UNPRIV_UID,
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
-+		ASSERT_EXIT(ret >= 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_USER,
-+		.id = 1,
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
-+		ASSERT_EXIT(ret < 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged users cannot acquire names if no owner-policy
-+	 * matches, even if SEE/TALK policies match.
-+	 */
-+
-+	num = 4;
-+	acc = (struct kdbus_policy_access[]){
-+		{
-+			.type = KDBUS_POLICY_ACCESS_GROUP,
-+			.id = UNPRIV_GID,
-+			.access = KDBUS_POLICY_SEE,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = UNPRIV_UID,
-+			.access = KDBUS_POLICY_TALK,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_WORLD,
-+			.id = 0,
-+			.access = KDBUS_POLICY_TALK,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_WORLD,
-+			.id = 0,
-+			.access = KDBUS_POLICY_SEE,
-+		},
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.a", acc, num);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
-+		ASSERT_EXIT(ret < 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged users can acquire names if the only matching
-+	 * policy is somewhere in the middle.
-+	 */
-+
-+	num = 5;
-+	acc = (struct kdbus_policy_access[]){
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = 1,
-+			.access = KDBUS_POLICY_OWN,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = 2,
-+			.access = KDBUS_POLICY_OWN,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = UNPRIV_UID,
-+			.access = KDBUS_POLICY_OWN,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = 3,
-+			.access = KDBUS_POLICY_OWN,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = 4,
-+			.access = KDBUS_POLICY_OWN,
-+		},
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.a", acc, num);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
-+		ASSERT_EXIT(ret >= 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Clear policies
-+	 */
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.a", NULL, 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Make sure privileged bus users can _always_ talk to others.
-+	 */
-+
-+	conn = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	ret = kdbus_msg_send(conn, "com.example.b", 0xdeadbeef, 0, 0, 0, 0);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	ret = kdbus_msg_recv_poll(conn_b, 300, NULL, NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	kdbus_conn_free(conn);
-+
-+	/*
-+	 * Make sure unprivileged bus users cannot talk by default.
-+	 */
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret == -EPERM);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged bus users can talk to equals, even without
-+	 * policy.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_USER,
-+		.id = UNPRIV_UID,
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.c", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		struct kdbus_conn *owner;
-+
-+		owner = kdbus_hello(env->buspath, 0, NULL, 0);
-+		ASSERT_RETURN(owner);
-+
-+		ret = kdbus_name_acquire(owner, "com.example.c", NULL);
-+		ASSERT_EXIT(ret >= 0);
-+
-+		ret = kdbus_msg_send(unpriv, "com.example.c", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret >= 0);
-+		ret = kdbus_msg_recv_poll(owner, 100, NULL, NULL);
-+		ASSERT_EXIT(ret >= 0);
-+
-+		kdbus_conn_free(owner);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged bus users can talk to privileged users if a
-+	 * suitable UID policy is set.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_USER,
-+		.id = UNPRIV_UID,
-+		.access = KDBUS_POLICY_TALK,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.b", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret >= 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = kdbus_msg_recv_poll(conn_b, 100, NULL, NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged bus users can talk to privileged users if a
-+	 * suitable GID policy is set.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_GROUP,
-+		.id = UNPRIV_GID,
-+		.access = KDBUS_POLICY_TALK,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.b", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret >= 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = kdbus_msg_recv_poll(conn_b, 100, NULL, NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged bus users can talk to privileged users if a
-+	 * suitable WORLD policy is set.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_WORLD,
-+		.id = 0,
-+		.access = KDBUS_POLICY_TALK,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.b", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret >= 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = kdbus_msg_recv_poll(conn_b, 100, NULL, NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged bus users cannot talk to privileged users if
-+	 * no suitable policy is set.
-+	 */
-+
-+	num = 5;
-+	acc = (struct kdbus_policy_access[]){
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = 0,
-+			.access = KDBUS_POLICY_OWN,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = 1,
-+			.access = KDBUS_POLICY_TALK,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = UNPRIV_UID,
-+			.access = KDBUS_POLICY_SEE,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = 3,
-+			.access = KDBUS_POLICY_TALK,
-+		},
-+		{
-+			.type = KDBUS_POLICY_ACCESS_USER,
-+			.id = 4,
-+			.access = KDBUS_POLICY_TALK,
-+		},
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.b", acc, num);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret == -EPERM);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Make sure unprivileged bus users can talk to privileged users if a
-+	 * suitable OWN privilege overwrites TALK.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_WORLD,
-+		.id = 0,
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.b", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret >= 0);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = kdbus_msg_recv_poll(conn_b, 100, NULL, NULL);
-+	ASSERT_EXIT(ret >= 0);
-+
-+	/*
-+	 * Make sure the TALK cache is reset correctly when policies are
-+	 * updated.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_WORLD,
-+		.id = 0,
-+		.access = KDBUS_POLICY_TALK,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.b", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
-+		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret >= 0);
-+
-+		ret = kdbus_msg_recv_poll(conn_b, 100, NULL, NULL);
-+		ASSERT_EXIT(ret >= 0);
-+
-+		ret = kdbus_conn_update_policy(conn_a, "com.example.b",
-+					       NULL, 0);
-+		ASSERT_RETURN(ret == 0);
-+
-+		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret == -EPERM);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/*
-+	 * Make sure the TALK cache is reset correctly when policy holders
-+	 * disconnect.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_WORLD,
-+		.id = 0,
-+		.access = KDBUS_POLICY_OWN,
-+	};
-+
-+	conn = kdbus_hello_registrar(env->buspath, "com.example.c",
-+				     NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(conn);
-+
-+	ret = kdbus_conn_update_policy(conn, "com.example.c", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	owner = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(owner);
-+
-+	ret = kdbus_name_acquire(owner, "com.example.c", NULL);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_GID, ({
-+		struct kdbus_conn *unpriv;
-+
-+		/* wait for parent to be finished */
-+		sigemptyset(&sset);
-+		ret = sigsuspend(&sset);
-+		ASSERT_RETURN(ret == -1 && errno == EINTR);
-+
-+		unpriv = kdbus_hello(env->buspath, 0, NULL, 0);
-+		ASSERT_RETURN(unpriv);
-+
-+		ret = kdbus_msg_send(unpriv, "com.example.c", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret >= 0);
-+
-+		ret = kdbus_msg_recv_poll(owner, 100, NULL, NULL);
-+		ASSERT_EXIT(ret >= 0);
-+
-+		/* free policy holder */
-+		kdbus_conn_free(conn);
-+
-+		ret = kdbus_msg_send(unpriv, "com.example.c", 0xdeadbeef, 0, 0,
-+				     0, 0);
-+		ASSERT_EXIT(ret == -EPERM);
-+
-+		kdbus_conn_free(unpriv);
-+	}), ({
-+		/* make sure policy holder is only valid in child */
-+		kdbus_conn_free(conn);
-+		kill(pid, SIGUSR1);
-+	}));
-+	ASSERT_RETURN(ret >= 0);
-+
-+
-+	/*
-+	 * The following tests are necessary.
-+	 */
-+
-+	ret = test_broadcast_after_policy_upload(env);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_conn_free(owner);
-+
-+	/*
-+	 * cleanup resources
-+	 */
-+
-+	kdbus_conn_free(conn_b);
-+	kdbus_conn_free(conn_a);
-+
-+	return TEST_OK;
-+}
-+
-+int kdbus_test_policy_priv(struct kdbus_test_env *env)
-+{
-+	pid_t pid;
-+	int ret;
-+
-+	/* make sure to exit() if a child returns from fork() */
-+	pid = getpid();
-+	ret = test_policy_priv(env);
-+	if (pid != getpid())
-+		exit(1);
-+
-+	return ret;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-policy.c b/tools/testing/selftests/kdbus/test-policy.c
-new file mode 100644
-index 0000000..96d20d5
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-policy.c
-@@ -0,0 +1,80 @@
-+#include <errno.h>
-+#include <stdio.h>
-+#include <string.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stdint.h>
-+#include <stdbool.h>
-+#include <unistd.h>
-+
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+int kdbus_test_policy(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn_a, *conn_b;
-+	struct kdbus_policy_access access;
-+	int ret;
-+
-+	/* Invalid name */
-+	conn_a = kdbus_hello_registrar(env->buspath, ".example.a",
-+				       NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(conn_a == NULL);
-+
-+	conn_a = kdbus_hello_registrar(env->buspath, "example",
-+				       NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(conn_a == NULL);
-+
-+	conn_a = kdbus_hello_registrar(env->buspath, "com.example.a",
-+				       NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(conn_a);
-+
-+	conn_b = kdbus_hello_registrar(env->buspath, "com.example.b",
-+				       NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
-+	ASSERT_RETURN(conn_b);
-+
-+	/*
-+	 * Verify there cannot be any duplicate entries, except for specific vs.
-+	 * wildcard entries.
-+	 */
-+
-+	access = (struct kdbus_policy_access){
-+		.type = KDBUS_POLICY_ACCESS_USER,
-+		.id = geteuid(),
-+		.access = KDBUS_POLICY_SEE,
-+	};
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_conn_update_policy(conn_b, "com.example.a", &access, 1);
-+	ASSERT_RETURN(ret == -EEXIST);
-+
-+	ret = kdbus_conn_update_policy(conn_b, "com.example.a.*", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.a.*", &access, 1);
-+	ASSERT_RETURN(ret == -EEXIST);
-+
-+	ret = kdbus_conn_update_policy(conn_a, "com.example.*", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_conn_update_policy(conn_b, "com.example.a", &access, 1);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_conn_update_policy(conn_b, "com.example.*", &access, 1);
-+	ASSERT_RETURN(ret == -EEXIST);
-+
-+	/* Invalid name */
-+	ret = kdbus_conn_update_policy(conn_b, ".example.*", &access, 1);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	ret = kdbus_conn_update_policy(conn_b, "example", &access, 1);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	kdbus_conn_free(conn_b);
-+	kdbus_conn_free(conn_a);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-sync.c b/tools/testing/selftests/kdbus/test-sync.c
-new file mode 100644
-index 0000000..0655a54
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-sync.c
-@@ -0,0 +1,369 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <time.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <pthread.h>
-+#include <stdbool.h>
-+#include <signal.h>
-+#include <sys/wait.h>
-+#include <sys/eventfd.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+static struct kdbus_conn *conn_a, *conn_b;
-+static unsigned int cookie = 0xdeadbeef;
-+
-+static void nop_handler(int sig) {}
-+
-+static int interrupt_sync(struct kdbus_conn *conn_src,
-+			  struct kdbus_conn *conn_dst)
-+{
-+	pid_t pid;
-+	int ret, status;
-+	struct kdbus_msg *msg = NULL;
-+	struct sigaction sa = {
-+		.sa_handler = nop_handler,
-+		.sa_flags = SA_NOCLDSTOP|SA_RESTART,
-+	};
-+
-+	cookie++;
-+	pid = fork();
-+	ASSERT_RETURN_VAL(pid >= 0, pid);
-+
-+	if (pid == 0) {
-+		ret = sigaction(SIGINT, &sa, NULL);
-+		ASSERT_EXIT(ret == 0);
-+
-+		ret = kdbus_msg_send_sync(conn_dst, NULL, cookie,
-+					  KDBUS_MSG_EXPECT_REPLY,
-+					  100000000ULL, 0, conn_src->id, -1);
-+		ASSERT_EXIT(ret == -ETIMEDOUT);
-+
-+		_exit(EXIT_SUCCESS);
-+	}
-+
-+	ret = kdbus_msg_recv_poll(conn_src, 100, &msg, NULL);
-+	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
-+
-+	kdbus_msg_free(msg);
-+
-+	ret = kill(pid, SIGINT);
-+	ASSERT_RETURN_VAL(ret == 0, ret);
-+
-+	ret = waitpid(pid, &status, 0);
-+	ASSERT_RETURN_VAL(ret >= 0, ret);
-+
-+	if (WIFSIGNALED(status))
-+		return TEST_ERR;
-+
-+	ret = kdbus_msg_recv_poll(conn_src, 100, NULL, NULL);
-+	ASSERT_RETURN(ret == -ETIMEDOUT);
-+
-+	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
-+}
-+
-+static int close_epipe_sync(const char *bus)
-+{
-+	pid_t pid;
-+	int ret, status;
-+	struct kdbus_conn *conn_src;
-+	struct kdbus_conn *conn_dst;
-+	struct kdbus_msg *msg = NULL;
-+
-+	conn_src = kdbus_hello(bus, 0, NULL, 0);
-+	ASSERT_RETURN(conn_src);
-+
-+	ret = kdbus_add_match_empty(conn_src);
-+	ASSERT_RETURN(ret == 0);
-+
-+	conn_dst = kdbus_hello(bus, 0, NULL, 0);
-+	ASSERT_RETURN(conn_dst);
-+
-+	cookie++;
-+	pid = fork();
-+	ASSERT_RETURN_VAL(pid >= 0, pid);
-+
-+	if (pid == 0) {
-+		uint64_t dst_id;
-+
-+		/* close our reference */
-+		dst_id = conn_dst->id;
-+		kdbus_conn_free(conn_dst);
-+
-+		ret = kdbus_msg_recv_poll(conn_src, 100, &msg, NULL);
-+		ASSERT_EXIT(ret == 0 && msg->cookie == cookie);
-+		ASSERT_EXIT(msg->src_id == dst_id);
-+
-+		cookie++;
-+		ret = kdbus_msg_send_sync(conn_src, NULL, cookie,
-+					  KDBUS_MSG_EXPECT_REPLY,
-+					  100000000ULL, 0, dst_id, -1);
-+		ASSERT_EXIT(ret == -EPIPE);
-+
-+		_exit(EXIT_SUCCESS);
-+	}
-+
-+	ret = kdbus_msg_send(conn_dst, NULL, cookie, 0, 0, 0,
-+			     KDBUS_DST_ID_BROADCAST);
-+	ASSERT_RETURN(ret == 0);
-+
-+	cookie++;
-+	ret = kdbus_msg_recv_poll(conn_dst, 100, &msg, NULL);
-+	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
-+
-+	kdbus_msg_free(msg);
-+
-+	/* destroy connection */
-+	kdbus_conn_free(conn_dst);
-+	kdbus_conn_free(conn_src);
-+
-+	ret = waitpid(pid, &status, 0);
-+	ASSERT_RETURN_VAL(ret >= 0, ret);
-+
-+	if (!WIFEXITED(status))
-+		return TEST_ERR;
-+
-+	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
-+}
-+
-+static int cancel_fd_sync(struct kdbus_conn *conn_src,
-+			  struct kdbus_conn *conn_dst)
-+{
-+	pid_t pid;
-+	int cancel_fd;
-+	int ret, status;
-+	uint64_t counter = 1;
-+	struct kdbus_msg *msg = NULL;
-+
-+	cancel_fd = eventfd(0, 0);
-+	ASSERT_RETURN_VAL(cancel_fd >= 0, cancel_fd);
-+
-+	cookie++;
-+	pid = fork();
-+	ASSERT_RETURN_VAL(pid >= 0, pid);
-+
-+	if (pid == 0) {
-+		ret = kdbus_msg_send_sync(conn_dst, NULL, cookie,
-+					  KDBUS_MSG_EXPECT_REPLY,
-+					  100000000ULL, 0, conn_src->id,
-+					  cancel_fd);
-+		ASSERT_EXIT(ret == -ECANCELED);
-+
-+		_exit(EXIT_SUCCESS);
-+	}
-+
-+	ret = kdbus_msg_recv_poll(conn_src, 100, &msg, NULL);
-+	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
-+
-+	kdbus_msg_free(msg);
-+
-+	ret = write(cancel_fd, &counter, sizeof(counter));
-+	ASSERT_RETURN(ret == sizeof(counter));
-+
-+	ret = waitpid(pid, &status, 0);
-+	ASSERT_RETURN_VAL(ret >= 0, ret);
-+
-+	if (WIFSIGNALED(status))
-+		return TEST_ERR;
-+
-+	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
-+}
-+
-+static int no_cancel_sync(struct kdbus_conn *conn_src,
-+			  struct kdbus_conn *conn_dst)
-+{
-+	pid_t pid;
-+	int cancel_fd;
-+	int ret, status;
-+	struct kdbus_msg *msg = NULL;
-+
-+	/* pass eventfd, but never signal it so it shouldn't have any effect */
-+
-+	cancel_fd = eventfd(0, 0);
-+	ASSERT_RETURN_VAL(cancel_fd >= 0, cancel_fd);
-+
-+	cookie++;
-+	pid = fork();
-+	ASSERT_RETURN_VAL(pid >= 0, pid);
-+
-+	if (pid == 0) {
-+		ret = kdbus_msg_send_sync(conn_dst, NULL, cookie,
-+					  KDBUS_MSG_EXPECT_REPLY,
-+					  100000000ULL, 0, conn_src->id,
-+					  cancel_fd);
-+		ASSERT_EXIT(ret == 0);
-+
-+		_exit(EXIT_SUCCESS);
-+	}
-+
-+	ret = kdbus_msg_recv_poll(conn_src, 100, &msg, NULL);
-+	ASSERT_RETURN_VAL(ret == 0 && msg->cookie == cookie, -1);
-+
-+	kdbus_msg_free(msg);
-+
-+	ret = kdbus_msg_send_reply(conn_src, cookie, conn_dst->id);
-+	ASSERT_RETURN_VAL(ret >= 0, ret);
-+
-+	ret = waitpid(pid, &status, 0);
-+	ASSERT_RETURN_VAL(ret >= 0, ret);
-+
-+	if (WIFSIGNALED(status))
-+		return -1;
-+
-+	return (status == EXIT_SUCCESS) ? 0 : -1;
-+}
-+
-+static void *run_thread_reply(void *data)
-+{
-+	int ret;
-+	unsigned long status = TEST_OK;
-+
-+	ret = kdbus_msg_recv_poll(conn_a, 3000, NULL, NULL);
-+	if (ret < 0)
-+		goto exit_thread;
-+
-+	kdbus_printf("Thread received message, sending reply ...\n");
-+
-+	/* using an unknown cookie must fail */
-+	ret = kdbus_msg_send_reply(conn_a, ~cookie, conn_b->id);
-+	if (ret != -EBADSLT) {
-+		status = TEST_ERR;
-+		goto exit_thread;
-+	}
-+
-+	ret = kdbus_msg_send_reply(conn_a, cookie, conn_b->id);
-+	if (ret != 0) {
-+		status = TEST_ERR;
-+		goto exit_thread;
-+	}
-+
-+exit_thread:
-+	pthread_exit(NULL);
-+	return (void *) status;
-+}
-+
-+int kdbus_test_sync_reply(struct kdbus_test_env *env)
-+{
-+	unsigned long status;
-+	pthread_t thread;
-+	int ret;
-+
-+	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn_a && conn_b);
-+
-+	pthread_create(&thread, NULL, run_thread_reply, NULL);
-+
-+	ret = kdbus_msg_send_sync(conn_b, NULL, cookie,
-+				  KDBUS_MSG_EXPECT_REPLY,
-+				  5000000000ULL, 0, conn_a->id, -1);
-+
-+	pthread_join(thread, (void *) &status);
-+	ASSERT_RETURN(status == 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = interrupt_sync(conn_a, conn_b);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = close_epipe_sync(env->buspath);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = cancel_fd_sync(conn_a, conn_b);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = no_cancel_sync(conn_a, conn_b);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_printf("-- closing bus connections\n");
-+
-+	kdbus_conn_free(conn_a);
-+	kdbus_conn_free(conn_b);
-+
-+	return TEST_OK;
-+}
-+
-+#define BYEBYE_ME ((void*)0L)
-+#define BYEBYE_THEM ((void*)1L)
-+
-+static void *run_thread_byebye(void *data)
-+{
-+	struct kdbus_cmd cmd_byebye = { .size = sizeof(cmd_byebye) };
-+	int ret;
-+
-+	ret = kdbus_msg_recv_poll(conn_a, 3000, NULL, NULL);
-+	if (ret == 0) {
-+		kdbus_printf("Thread received message, invoking BYEBYE ...\n");
-+		kdbus_msg_recv(conn_a, NULL, NULL);
-+		if (data == BYEBYE_ME)
-+			kdbus_cmd_byebye(conn_b->fd, &cmd_byebye);
-+		else if (data == BYEBYE_THEM)
-+			kdbus_cmd_byebye(conn_a->fd, &cmd_byebye);
-+	}
-+
-+	pthread_exit(NULL);
-+	return NULL;
-+}
-+
-+int kdbus_test_sync_byebye(struct kdbus_test_env *env)
-+{
-+	pthread_t thread;
-+	int ret;
-+
-+	/*
-+	 * This sends a synchronous message to a thread, which waits until it
-+	 * received the message and then invokes BYEBYE on the *ORIGINAL*
-+	 * connection. That is, on the same connection that synchronously waits
-+	 * for an reply.
-+	 * This should properly wake the connection up and cause ECONNRESET as
-+	 * the connection is disconnected now.
-+	 *
-+	 * The second time, we do the same but invoke BYEBYE on the *TARGET*
-+	 * connection. This should also wake up the synchronous sender as the
-+	 * reply cannot be sent by a disconnected target.
-+	 */
-+
-+	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn_a && conn_b);
-+
-+	pthread_create(&thread, NULL, run_thread_byebye, BYEBYE_ME);
-+
-+	ret = kdbus_msg_send_sync(conn_b, NULL, cookie,
-+				  KDBUS_MSG_EXPECT_REPLY,
-+				  5000000000ULL, 0, conn_a->id, -1);
-+
-+	ASSERT_RETURN(ret == -ECONNRESET);
-+
-+	pthread_join(thread, NULL);
-+
-+	kdbus_conn_free(conn_a);
-+	kdbus_conn_free(conn_b);
-+
-+	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn_a && conn_b);
-+
-+	pthread_create(&thread, NULL, run_thread_byebye, BYEBYE_THEM);
-+
-+	ret = kdbus_msg_send_sync(conn_b, NULL, cookie,
-+				  KDBUS_MSG_EXPECT_REPLY,
-+				  5000000000ULL, 0, conn_a->id, -1);
-+
-+	ASSERT_RETURN(ret == -EPIPE);
-+
-+	pthread_join(thread, NULL);
-+
-+	kdbus_conn_free(conn_a);
-+	kdbus_conn_free(conn_b);
-+
-+	return TEST_OK;
-+}
-diff --git a/tools/testing/selftests/kdbus/test-timeout.c b/tools/testing/selftests/kdbus/test-timeout.c
-new file mode 100644
-index 0000000..cfd1930
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-timeout.c
-@@ -0,0 +1,99 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <time.h>
-+#include <fcntl.h>
-+#include <stdlib.h>
-+#include <stddef.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <poll.h>
-+#include <stdbool.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+int timeout_msg_recv(struct kdbus_conn *conn, uint64_t *expected)
-+{
-+	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
-+	struct kdbus_msg *msg;
-+	int ret;
-+
-+	ret = kdbus_cmd_recv(conn->fd, &recv);
-+	if (ret < 0) {
-+		kdbus_printf("error receiving message: %d (%m)\n", ret);
-+		return ret;
-+	}
-+
-+	msg = (struct kdbus_msg *)(conn->buf + recv.msg.offset);
-+
-+	ASSERT_RETURN_VAL(msg->payload_type == KDBUS_PAYLOAD_KERNEL, -EINVAL);
-+	ASSERT_RETURN_VAL(msg->src_id == KDBUS_SRC_ID_KERNEL, -EINVAL);
-+	ASSERT_RETURN_VAL(msg->dst_id == conn->id, -EINVAL);
-+
-+	*expected &= ~(1ULL << msg->cookie_reply);
-+	kdbus_printf("Got message timeout for cookie %llu\n",
-+		     msg->cookie_reply);
-+
-+	ret = kdbus_free(conn, recv.msg.offset);
-+	if (ret < 0)
-+		return ret;
-+
-+	return 0;
-+}
-+
-+int kdbus_test_timeout(struct kdbus_test_env *env)
-+{
-+	struct kdbus_conn *conn_a, *conn_b;
-+	struct pollfd fd;
-+	int ret, i, n_msgs = 4;
-+	uint64_t expected = 0;
-+	uint64_t cookie = 0xdeadbeef;
-+
-+	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
-+	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
-+	ASSERT_RETURN(conn_a && conn_b);
-+
-+	fd.fd = conn_b->fd;
-+
-+	/*
-+	 * send messages that expect a reply (within 100 msec),
-+	 * but never answer it.
-+	 */
-+	for (i = 0; i < n_msgs; i++, cookie++) {
-+		kdbus_printf("Sending message with cookie %llu ...\n",
-+			     (unsigned long long)cookie);
-+		ASSERT_RETURN(kdbus_msg_send(conn_b, NULL, cookie,
-+			      KDBUS_MSG_EXPECT_REPLY,
-+			      (i + 1) * 100ULL * 1000000ULL, 0,
-+			      conn_a->id) == 0);
-+		expected |= 1ULL << cookie;
-+	}
-+
-+	for (;;) {
-+		fd.events = POLLIN | POLLPRI | POLLHUP;
-+		fd.revents = 0;
-+
-+		ret = poll(&fd, 1, (n_msgs + 1) * 100);
-+		if (ret == 0)
-+			kdbus_printf("--- timeout\n");
-+		if (ret <= 0)
-+			break;
-+
-+		if (fd.revents & POLLIN)
-+			ASSERT_RETURN(!timeout_msg_recv(conn_b, &expected));
-+
-+		if (expected == 0)
-+			break;
-+	}
-+
-+	ASSERT_RETURN(expected == 0);
-+
-+	kdbus_conn_free(conn_a);
-+	kdbus_conn_free(conn_b);
-+
-+	return TEST_OK;
-+}


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-10-27 13:19 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-10-27 13:19 UTC (permalink / raw
  To: gentoo-commits

commit:     149745ed61e0ffc43e55b6682710f9553c3ceb45
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Oct 27 13:19:39 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Oct 27 13:19:39 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=149745ed

Linux patch 4.1.12

 0000_README             |    4 +
 1011_linux-4.1.12.patch | 1494 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1498 insertions(+)

diff --git a/0000_README b/0000_README
index 18e95dd..8ed7605 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch:  1010_linux-4.1.11.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.11
 
+Patch:  1011_linux-4.1.12.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.12
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1011_linux-4.1.12.patch b/1011_linux-4.1.12.patch
new file mode 100644
index 0000000..07910df
--- /dev/null
+++ b/1011_linux-4.1.12.patch
@@ -0,0 +1,1494 @@
+diff --git a/Makefile b/Makefile
+index c7d877b1c248..2320f1911404 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 81151663ef38..3258174e6152 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -31,7 +31,7 @@ endif
+ CHECKFLAGS	+= -D__aarch64__
+ 
+ ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+-CFLAGS_MODULE	+= -mcmodel=large
++KBUILD_CFLAGS_MODULE	+= -mcmodel=large
+ endif
+ 
+ # Default value
+diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
+index 56283f8a675c..cf7319422768 100644
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -80,7 +80,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
+ #define PAGE_S2			__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+ #define PAGE_S2_DEVICE		__pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
+ 
+-#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
++#define PAGE_NONE		__pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+ #define PAGE_SHARED		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+ #define PAGE_SHARED_EXEC	__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+ #define PAGE_COPY		__pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+@@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+ 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
+-			      PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
++			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
+ 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
+ 	return pte;
+ }
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index df81caab7383..f1e0e5522e3a 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+ 		vc->runner = vcpu;
+ 		if (n_ceded == vc->n_runnable) {
+ 			kvmppc_vcore_blocked(vc);
+-		} else if (should_resched()) {
++		} else if (need_resched()) {
+ 			vc->vcore_state = VCORE_PREEMPT;
+ 			/* Let something else run */
+ 			cond_resched_lock(&vc->lock);
+diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
+index 2e48eb8813ff..c90930de76ba 100644
+--- a/arch/sparc/crypto/aes_glue.c
++++ b/arch/sparc/crypto/aes_glue.c
+@@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
+ 		.blkcipher = {
+ 			.min_keysize	= AES_MIN_KEY_SIZE,
+ 			.max_keysize	= AES_MAX_KEY_SIZE,
++			.ivsize		= AES_BLOCK_SIZE,
+ 			.setkey		= aes_set_key,
+ 			.encrypt	= cbc_encrypt,
+ 			.decrypt	= cbc_decrypt,
+@@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
+ 		.blkcipher = {
+ 			.min_keysize	= AES_MIN_KEY_SIZE,
+ 			.max_keysize	= AES_MAX_KEY_SIZE,
++			.ivsize		= AES_BLOCK_SIZE,
+ 			.setkey		= aes_set_key,
+ 			.encrypt	= ctr_crypt,
+ 			.decrypt	= ctr_crypt,
+diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
+index 6bf2479a12fb..561a84d93cf6 100644
+--- a/arch/sparc/crypto/camellia_glue.c
++++ b/arch/sparc/crypto/camellia_glue.c
+@@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
+ 		.blkcipher = {
+ 			.min_keysize	= CAMELLIA_MIN_KEY_SIZE,
+ 			.max_keysize	= CAMELLIA_MAX_KEY_SIZE,
++			.ivsize		= CAMELLIA_BLOCK_SIZE,
+ 			.setkey		= camellia_set_key,
+ 			.encrypt	= cbc_encrypt,
+ 			.decrypt	= cbc_decrypt,
+diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
+index dd6a34fa6e19..61af794aa2d3 100644
+--- a/arch/sparc/crypto/des_glue.c
++++ b/arch/sparc/crypto/des_glue.c
+@@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
+ 		.blkcipher = {
+ 			.min_keysize	= DES_KEY_SIZE,
+ 			.max_keysize	= DES_KEY_SIZE,
++			.ivsize		= DES_BLOCK_SIZE,
+ 			.setkey		= des_set_key,
+ 			.encrypt	= cbc_encrypt,
+ 			.decrypt	= cbc_decrypt,
+@@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
+ 		.blkcipher = {
+ 			.min_keysize	= DES3_EDE_KEY_SIZE,
+ 			.max_keysize	= DES3_EDE_KEY_SIZE,
++			.ivsize		= DES3_EDE_BLOCK_SIZE,
+ 			.setkey		= des3_ede_set_key,
+ 			.encrypt	= cbc3_encrypt,
+ 			.decrypt	= cbc3_decrypt,
+diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
+index 8f3271842533..67b6cd00a44f 100644
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void)
+ /*
+  * Returns true when we need to resched and can (barring IRQ state).
+  */
+-static __always_inline bool should_resched(void)
++static __always_inline bool should_resched(int preempt_offset)
+ {
+-	return unlikely(!raw_cpu_read_4(__preempt_count));
++	return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
+ }
+ 
+ #ifdef CONFIG_PREEMPT
+diff --git a/crypto/ahash.c b/crypto/ahash.c
+index 8acb886032ae..9c1dc8d6106a 100644
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
+ 	struct crypto_alg *base = &alg->halg.base;
+ 
+ 	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
+-	    alg->halg.statesize > PAGE_SIZE / 8)
++	    alg->halg.statesize > PAGE_SIZE / 8 ||
++	    alg->halg.statesize == 0)
+ 		return -EINVAL;
+ 
+ 	base->cra_type = &crypto_ahash_type;
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 010ce0b1f517..fe8f1e4b4c7c 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -5174,7 +5174,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
+ out_err:
+ 	if (parent) {
+ 		rbd_dev_unparent(rbd_dev);
+-		kfree(rbd_dev->header_name);
+ 		rbd_dev_destroy(parent);
+ 	} else {
+ 		rbd_put_client(rbdc);
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 7f467fdc9107..2a2eb96caeda 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -2766,12 +2766,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
+ 	if (msgs[num - 1].flags & I2C_M_RD)
+ 		reading = true;
+ 
+-	if (!reading) {
++	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
+ 		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
+ 		ret = -EIO;
+ 		goto out;
+ 	}
+ 
++	memset(&msg, 0, sizeof(msg));
+ 	msg.req_type = DP_REMOTE_I2C_READ;
+ 	msg.u.i2c_read.num_transactions = num - 1;
+ 	msg.u.i2c_read.port_number = port->port_num;
+diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
+index eb7e61078a5b..92586b0af3ab 100644
+--- a/drivers/gpu/drm/drm_sysfs.c
++++ b/drivers/gpu/drm/drm_sysfs.c
+@@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device,
+ 			   char *buf)
+ {
+ 	struct drm_connector *connector = to_drm_connector(device);
+-	struct drm_device *dev = connector->dev;
+-	uint64_t dpms_status;
+-	int ret;
++	int dpms;
+ 
+-	ret = drm_object_property_get_value(&connector->base,
+-					    dev->mode_config.dpms_property,
+-					    &dpms_status);
+-	if (ret)
+-		return 0;
++	dpms = READ_ONCE(connector->dpms);
+ 
+ 	return snprintf(buf, PAGE_SIZE, "%s\n",
+-			drm_get_dpms_name((int)dpms_status));
++			drm_get_dpms_name(dpms));
+ }
+ 
+ static ssize_t enabled_show(struct device *device,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+index 6751553abe4a..567791b27d6d 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+@@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info)
+ 	return 0;
+ }
+ 
++static int
++nouveau_fbcon_open(struct fb_info *info, int user)
++{
++	struct nouveau_fbdev *fbcon = info->par;
++	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
++	int ret = pm_runtime_get_sync(drm->dev->dev);
++	if (ret < 0 && ret != -EACCES)
++		return ret;
++	return 0;
++}
++
++static int
++nouveau_fbcon_release(struct fb_info *info, int user)
++{
++	struct nouveau_fbdev *fbcon = info->par;
++	struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
++	pm_runtime_put(drm->dev->dev);
++	return 0;
++}
++
+ static struct fb_ops nouveau_fbcon_ops = {
+ 	.owner = THIS_MODULE,
++	.fb_open = nouveau_fbcon_open,
++	.fb_release = nouveau_fbcon_release,
+ 	.fb_check_var = drm_fb_helper_check_var,
+ 	.fb_set_par = drm_fb_helper_set_par,
+ 	.fb_fillrect = nouveau_fbcon_fillrect,
+@@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = {
+ 
+ static struct fb_ops nouveau_fbcon_sw_ops = {
+ 	.owner = THIS_MODULE,
++	.fb_open = nouveau_fbcon_open,
++	.fb_release = nouveau_fbcon_release,
+ 	.fb_check_var = drm_fb_helper_check_var,
+ 	.fb_set_par = drm_fb_helper_set_par,
+ 	.fb_fillrect = cfb_fillrect,
+diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
+index d2e9e9efc159..6743174acdbc 100644
+--- a/drivers/gpu/drm/radeon/radeon_display.c
++++ b/drivers/gpu/drm/radeon/radeon_display.c
+@@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
+ 	radeon_fbdev_init(rdev);
+ 	drm_kms_helper_poll_init(rdev->ddev);
+ 
+-	if (rdev->pm.dpm_enabled) {
+-		/* do dpm late init */
+-		ret = radeon_pm_late_init(rdev);
+-		if (ret) {
+-			rdev->pm.dpm_enabled = false;
+-			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
+-		}
+-		/* set the dpm state for PX since there won't be
+-		 * a modeset to call this.
+-		 */
+-		radeon_pm_compute_clocks(rdev);
+-	}
++	/* do pm late init */
++	ret = radeon_pm_late_init(rdev);
+ 
+ 	return 0;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+index 257b10be5cda..42986130cc63 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
+@@ -283,6 +283,7 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
+ 	radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
+ 
+ 	drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
++	drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
+ 	drm_mode_connector_set_path_property(connector, pathprop);
+ 	drm_reinit_primary_mode_group(dev);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
+index c1ba83a8dd8c..948c33105801 100644
+--- a/drivers/gpu/drm/radeon/radeon_pm.c
++++ b/drivers/gpu/drm/radeon/radeon_pm.c
+@@ -1331,14 +1331,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
+ 	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+ 
+ 	if (rdev->pm.num_power_states > 1) {
+-		/* where's the best place to put these? */
+-		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+-		if (ret)
+-			DRM_ERROR("failed to create device file for power profile\n");
+-		ret = device_create_file(rdev->dev, &dev_attr_power_method);
+-		if (ret)
+-			DRM_ERROR("failed to create device file for power method\n");
+-
+ 		if (radeon_debugfs_pm_init(rdev)) {
+ 			DRM_ERROR("Failed to register debugfs file for PM!\n");
+ 		}
+@@ -1396,20 +1388,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
+ 		goto dpm_failed;
+ 	rdev->pm.dpm_enabled = true;
+ 
+-	ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
+-	if (ret)
+-		DRM_ERROR("failed to create device file for dpm state\n");
+-	ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
+-	if (ret)
+-		DRM_ERROR("failed to create device file for dpm state\n");
+-	/* XXX: these are noops for dpm but are here for backwards compat */
+-	ret = device_create_file(rdev->dev, &dev_attr_power_profile);
+-	if (ret)
+-		DRM_ERROR("failed to create device file for power profile\n");
+-	ret = device_create_file(rdev->dev, &dev_attr_power_method);
+-	if (ret)
+-		DRM_ERROR("failed to create device file for power method\n");
+-
+ 	if (radeon_debugfs_pm_init(rdev)) {
+ 		DRM_ERROR("Failed to register debugfs file for dpm!\n");
+ 	}
+@@ -1550,9 +1528,44 @@ int radeon_pm_late_init(struct radeon_device *rdev)
+ 	int ret = 0;
+ 
+ 	if (rdev->pm.pm_method == PM_METHOD_DPM) {
+-		mutex_lock(&rdev->pm.mutex);
+-		ret = radeon_dpm_late_enable(rdev);
+-		mutex_unlock(&rdev->pm.mutex);
++		if (rdev->pm.dpm_enabled) {
++			ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
++			if (ret)
++				DRM_ERROR("failed to create device file for dpm state\n");
++			ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
++			if (ret)
++				DRM_ERROR("failed to create device file for dpm state\n");
++			/* XXX: these are noops for dpm but are here for backwards compat */
++			ret = device_create_file(rdev->dev, &dev_attr_power_profile);
++			if (ret)
++				DRM_ERROR("failed to create device file for power profile\n");
++			ret = device_create_file(rdev->dev, &dev_attr_power_method);
++			if (ret)
++				DRM_ERROR("failed to create device file for power method\n");
++
++			mutex_lock(&rdev->pm.mutex);
++			ret = radeon_dpm_late_enable(rdev);
++			mutex_unlock(&rdev->pm.mutex);
++			if (ret) {
++				rdev->pm.dpm_enabled = false;
++				DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
++			} else {
++				/* set the dpm state for PX since there won't be
++				 * a modeset to call this.
++				 */
++				radeon_pm_compute_clocks(rdev);
++			}
++		}
++	} else {
++		if (rdev->pm.num_power_states > 1) {
++			/* where's the best place to put these? */
++			ret = device_create_file(rdev->dev, &dev_attr_power_profile);
++			if (ret)
++				DRM_ERROR("failed to create device file for power profile\n");
++			ret = device_create_file(rdev->dev, &dev_attr_power_method);
++			if (ret)
++				DRM_ERROR("failed to create device file for power method\n");
++		}
+ 	}
+ 	return ret;
+ }
+diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
+index 0a80e4aabaed..3f7d4876937e 100644
+--- a/drivers/i2c/busses/i2c-designware-platdrv.c
++++ b/drivers/i2c/busses/i2c-designware-platdrv.c
+@@ -24,6 +24,7 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/delay.h>
++#include <linux/dmi.h>
+ #include <linux/i2c.h>
+ #include <linux/clk.h>
+ #include <linux/clk-provider.h>
+@@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
+ }
+ 
+ #ifdef CONFIG_ACPI
++/*
++ * The HCNT/LCNT information coming from ACPI should be the most accurate
++ * for given platform. However, some systems get it wrong. On such systems
++ * we get better results by calculating those based on the input clock.
++ */
++static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
++	{
++		.ident = "Dell Inspiron 7348",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
++		},
++	},
++	{ }
++};
++
+ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
+ 			       u16 *hcnt, u16 *lcnt, u32 *sda_hold)
+ {
+@@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
+ 	acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ 	union acpi_object *obj;
+ 
++	if (dmi_check_system(dw_i2c_no_acpi_params))
++		return;
++
+ 	if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
+ 		return;
+ 
+@@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
+ 	adap->dev.parent = &pdev->dev;
+ 	adap->dev.of_node = pdev->dev.of_node;
+ 
+-	r = i2c_add_numbered_adapter(adap);
+-	if (r) {
+-		dev_err(&pdev->dev, "failure adding adapter\n");
+-		return r;
+-	}
+-
+ 	if (dev->pm_runtime_disabled) {
+ 		pm_runtime_forbid(&pdev->dev);
+ 	} else {
+@@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev)
+ 		pm_runtime_enable(&pdev->dev);
+ 	}
+ 
++	r = i2c_add_numbered_adapter(adap);
++	if (r) {
++		dev_err(&pdev->dev, "failure adding adapter\n");
++		pm_runtime_disable(&pdev->dev);
++		return r;
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
+index 5a84bea5b845..d9d022cdfff0 100644
+--- a/drivers/i2c/busses/i2c-rcar.c
++++ b/drivers/i2c/busses/i2c-rcar.c
+@@ -688,15 +688,16 @@ static int rcar_i2c_probe(struct platform_device *pdev)
+ 		return ret;
+ 	}
+ 
++	pm_runtime_enable(dev);
++	platform_set_drvdata(pdev, priv);
++
+ 	ret = i2c_add_numbered_adapter(adap);
+ 	if (ret < 0) {
+ 		dev_err(dev, "reg adap failed: %d\n", ret);
++		pm_runtime_disable(dev);
+ 		return ret;
+ 	}
+ 
+-	pm_runtime_enable(dev);
+-	platform_set_drvdata(pdev, priv);
+-
+ 	dev_info(dev, "probed\n");
+ 
+ 	return 0;
+diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
+index 297e9c9ac943..424794271703 100644
+--- a/drivers/i2c/busses/i2c-s3c2410.c
++++ b/drivers/i2c/busses/i2c-s3c2410.c
+@@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
+ 	i2c->adap.nr = i2c->pdata->bus_num;
+ 	i2c->adap.dev.of_node = pdev->dev.of_node;
+ 
++	platform_set_drvdata(pdev, i2c);
++
++	pm_runtime_enable(&pdev->dev);
++
+ 	ret = i2c_add_numbered_adapter(&i2c->adap);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "failed to add bus to i2c core\n");
++		pm_runtime_disable(&pdev->dev);
+ 		s3c24xx_i2c_deregister_cpufreq(i2c);
+ 		clk_unprepare(i2c->clk);
+ 		return ret;
+ 	}
+ 
+-	platform_set_drvdata(pdev, i2c);
+-
+-	pm_runtime_enable(&pdev->dev);
+ 	pm_runtime_enable(&i2c->adap.dev);
+ 
+ 	dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index e22e6c892b8a..7073b22d4cb4 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -2959,7 +2959,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ 						metadata_low_callback,
+ 						pool);
+ 	if (r)
+-		goto out_free_pt;
++		goto out_flags_changed;
+ 
+ 	pt->callbacks.congested_fn = pool_is_congested;
+ 	dm_table_add_target_callbacks(ti->table, &pt->callbacks);
+diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
+index a354ac677ec7..1074a0d68680 100644
+--- a/drivers/mfd/max77843.c
++++ b/drivers/mfd/max77843.c
+@@ -79,7 +79,7 @@ static int max77843_chg_init(struct max77843 *max77843)
+ 	if (!max77843->i2c_chg) {
+ 		dev_err(&max77843->i2c->dev,
+ 				"Cannot allocate I2C device for Charger\n");
+-		return PTR_ERR(max77843->i2c_chg);
++		return -ENODEV;
+ 	}
+ 	i2c_set_clientdata(max77843->i2c_chg, max77843);
+ 
+diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
+index 28df37420da9..ac02c675c59c 100644
+--- a/drivers/net/ethernet/ibm/emac/core.h
++++ b/drivers/net/ethernet/ibm/emac/core.h
+@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
+ 	u32 index;
+ };
+ 
+-#define EMAC_ETHTOOL_REGS_VER		0
+-#define EMAC4_ETHTOOL_REGS_VER		1
+-#define EMAC4SYNC_ETHTOOL_REGS_VER	2
++#define EMAC_ETHTOOL_REGS_VER		3
++#define EMAC4_ETHTOOL_REGS_VER		4
++#define EMAC4SYNC_ETHTOOL_REGS_VER	5
+ 
+ #endif /* __IBM_NEWEMAC_CORE_H */
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index b62a5e3a1c65..db2c3cdf2c40 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev)
+ 			if (po->pppoe_dev == dev &&
+ 			    sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
+ 				pppox_unbind_sock(sk);
+-				sk->sk_state = PPPOX_ZOMBIE;
+ 				sk->sk_state_change(sk);
+ 				po->pppoe_dev = NULL;
+ 				dev_put(dev);
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
+index faf635654312..293ed4381cc0 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx25.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
+@@ -26,7 +26,8 @@
+ #include "pinctrl-imx.h"
+ 
+ enum imx25_pads {
+-	MX25_PAD_RESERVE0 = 1,
++	MX25_PAD_RESERVE0 = 0,
++	MX25_PAD_RESERVE1 = 1,
+ 	MX25_PAD_A10 = 2,
+ 	MX25_PAD_A13 = 3,
+ 	MX25_PAD_A14 = 4,
+@@ -169,6 +170,7 @@ enum imx25_pads {
+ /* Pad names for the pinmux subsystem */
+ static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
+ 	IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
++	IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
+ 	IMX_PINCTRL_PIN(MX25_PAD_A10),
+ 	IMX_PINCTRL_PIN(MX25_PAD_A13),
+ 	IMX_PINCTRL_PIN(MX25_PAD_A14),
+diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
+index a1800c150839..08cb419eb4e6 100644
+--- a/drivers/xen/preempt.c
++++ b/drivers/xen/preempt.c
+@@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
+ asmlinkage __visible void xen_maybe_preempt_hcall(void)
+ {
+ 	if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
+-		     && should_resched())) {
++		     && need_resched())) {
+ 		/*
+ 		 * Clear flag as we may be rescheduled on a different
+ 		 * cpu.
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 614aaa1969bd..723470850b94 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -1786,7 +1786,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
+ 	int found = 0;
+ 	struct extent_buffer *eb;
+ 	struct btrfs_inode_extref *extref;
+-	struct extent_buffer *leaf;
+ 	u32 item_size;
+ 	u32 cur_offset;
+ 	unsigned long ptr;
+@@ -1814,9 +1813,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
+ 		btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ 		btrfs_release_path(path);
+ 
+-		leaf = path->nodes[0];
+-		item_size = btrfs_item_size_nr(leaf, slot);
+-		ptr = btrfs_item_ptr_offset(leaf, slot);
++		item_size = btrfs_item_size_nr(eb, slot);
++		ptr = btrfs_item_ptr_offset(eb, slot);
+ 		cur_offset = 0;
+ 
+ 		while (cur_offset < item_size) {
+@@ -1830,7 +1828,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
+ 			if (ret)
+ 				break;
+ 
+-			cur_offset += btrfs_inode_extref_name_len(leaf, extref);
++			cur_offset += btrfs_inode_extref_name_len(eb, extref);
+ 			cur_offset += sizeof(*extref);
+ 		}
+ 		btrfs_tree_read_unlock_blocking(eb);
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 37d456a9a3b8..af3dd3c55ef1 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -4492,6 +4492,11 @@ locked:
+ 		bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
+ 	}
+ 
++	if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
++		ret = -EINVAL;
++		goto out_bargs;
++	}
++
+ do_balance:
+ 	/*
+ 	 * Ownership of bctl and mutually_exclusive_operation_running
+diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
+index ebc31331a837..e1cc5b45069a 100644
+--- a/fs/btrfs/volumes.h
++++ b/fs/btrfs/volumes.h
+@@ -372,6 +372,14 @@ struct map_lookup {
+ #define BTRFS_BALANCE_ARGS_VRANGE	(1ULL << 4)
+ #define BTRFS_BALANCE_ARGS_LIMIT	(1ULL << 5)
+ 
++#define BTRFS_BALANCE_ARGS_MASK			\
++	(BTRFS_BALANCE_ARGS_PROFILES |		\
++	 BTRFS_BALANCE_ARGS_USAGE |		\
++	 BTRFS_BALANCE_ARGS_DEVID | 		\
++	 BTRFS_BALANCE_ARGS_DRANGE |		\
++	 BTRFS_BALANCE_ARGS_VRANGE |		\
++	 BTRFS_BALANCE_ARGS_LIMIT)
++
+ /*
+  * Profile changing flags.  When SOFT is set we won't relocate chunk if
+  * it already has the target profile (even though it may be
+diff --git a/fs/locks.c b/fs/locks.c
+index 653faabb07f4..d3d558ba4da7 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -862,12 +862,11 @@ static int posix_locks_deadlock(struct file_lock *caller_fl,
+  * whether or not a lock was successfully freed by testing the return
+  * value for -ENOENT.
+  */
+-static int flock_lock_file(struct file *filp, struct file_lock *request)
++static int flock_lock_inode(struct inode *inode, struct file_lock *request)
+ {
+ 	struct file_lock *new_fl = NULL;
+ 	struct file_lock *fl;
+ 	struct file_lock_context *ctx;
+-	struct inode *inode = file_inode(filp);
+ 	int error = 0;
+ 	bool found = false;
+ 	LIST_HEAD(dispose);
+@@ -890,7 +889,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
+ 		goto find_conflict;
+ 
+ 	list_for_each_entry(fl, &ctx->flc_flock, fl_list) {
+-		if (filp != fl->fl_file)
++		if (request->fl_file != fl->fl_file)
+ 			continue;
+ 		if (request->fl_type == fl->fl_type)
+ 			goto out;
+@@ -1164,20 +1163,19 @@ int posix_lock_file(struct file *filp, struct file_lock *fl,
+ EXPORT_SYMBOL(posix_lock_file);
+ 
+ /**
+- * posix_lock_file_wait - Apply a POSIX-style lock to a file
+- * @filp: The file to apply the lock to
++ * posix_lock_inode_wait - Apply a POSIX-style lock to a file
++ * @inode: inode of file to which lock request should be applied
+  * @fl: The lock to be applied
+  *
+- * Add a POSIX style lock to a file.
+- * We merge adjacent & overlapping locks whenever possible.
+- * POSIX locks are sorted by owner task, then by starting address
++ * Variant of posix_lock_file_wait that does not take a filp, and so can be
++ * used after the filp has already been torn down.
+  */
+-int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
++int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+ {
+ 	int error;
+ 	might_sleep ();
+ 	for (;;) {
+-		error = posix_lock_file(filp, fl, NULL);
++		error = __posix_lock_file(inode, fl, NULL);
+ 		if (error != FILE_LOCK_DEFERRED)
+ 			break;
+ 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
+@@ -1189,7 +1187,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
+ 	}
+ 	return error;
+ }
+-EXPORT_SYMBOL(posix_lock_file_wait);
++EXPORT_SYMBOL(posix_lock_inode_wait);
+ 
+ /**
+  * locks_mandatory_locked - Check for an active lock
+@@ -1851,18 +1849,18 @@ int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
+ }
+ 
+ /**
+- * flock_lock_file_wait - Apply a FLOCK-style lock to a file
+- * @filp: The file to apply the lock to
++ * flock_lock_inode_wait - Apply a FLOCK-style lock to a file
++ * @inode: inode of the file to apply to
+  * @fl: The lock to be applied
+  *
+- * Add a FLOCK style lock to a file.
++ * Apply a FLOCK style lock request to an inode.
+  */
+-int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
++int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
+ {
+ 	int error;
+ 	might_sleep();
+ 	for (;;) {
+-		error = flock_lock_file(filp, fl);
++		error = flock_lock_inode(inode, fl);
+ 		if (error != FILE_LOCK_DEFERRED)
+ 			break;
+ 		error = wait_event_interruptible(fl->fl_wait, !fl->fl_next);
+@@ -1874,8 +1872,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
+ 	}
+ 	return error;
+ }
+-
+-EXPORT_SYMBOL(flock_lock_file_wait);
++EXPORT_SYMBOL(flock_lock_inode_wait);
+ 
+ /**
+  *	sys_flock: - flock() system call.
+@@ -2401,7 +2398,8 @@ locks_remove_flock(struct file *filp)
+ 		.fl_type = F_UNLCK,
+ 		.fl_end = OFFSET_MAX,
+ 	};
+-	struct file_lock_context *flctx = file_inode(filp)->i_flctx;
++	struct inode *inode = file_inode(filp);
++	struct file_lock_context *flctx = inode->i_flctx;
+ 
+ 	if (list_empty(&flctx->flc_flock))
+ 		return;
+@@ -2409,7 +2407,7 @@ locks_remove_flock(struct file *filp)
+ 	if (filp->f_op->flock)
+ 		filp->f_op->flock(filp, F_SETLKW, &fl);
+ 	else
+-		flock_lock_file(filp, &fl);
++		flock_lock_inode(inode, &fl);
+ 
+ 	if (fl.fl_ops && fl.fl_ops->fl_release_private)
+ 		fl.fl_ops->fl_release_private(&fl);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index c245874d7e9d..8f393fcc313b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -5367,15 +5367,15 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
+ 	return err;
+ }
+ 
+-static int do_vfs_lock(struct file *file, struct file_lock *fl)
++static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
+ {
+ 	int res = 0;
+ 	switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
+ 		case FL_POSIX:
+-			res = posix_lock_file_wait(file, fl);
++			res = posix_lock_inode_wait(inode, fl);
+ 			break;
+ 		case FL_FLOCK:
+-			res = flock_lock_file_wait(file, fl);
++			res = flock_lock_inode_wait(inode, fl);
+ 			break;
+ 		default:
+ 			BUG();
+@@ -5435,7 +5435,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
+ 	switch (task->tk_status) {
+ 		case 0:
+ 			renew_lease(calldata->server, calldata->timestamp);
+-			do_vfs_lock(calldata->fl.fl_file, &calldata->fl);
++			do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
+ 			if (nfs4_update_lock_stateid(calldata->lsp,
+ 					&calldata->res.stateid))
+ 				break;
+@@ -5543,7 +5543,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
+ 	mutex_lock(&sp->so_delegreturn_mutex);
+ 	/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
+ 	down_read(&nfsi->rwsem);
+-	if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
++	if (do_vfs_lock(inode, request) == -ENOENT) {
+ 		up_read(&nfsi->rwsem);
+ 		mutex_unlock(&sp->so_delegreturn_mutex);
+ 		goto out;
+@@ -5684,7 +5684,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
+ 				data->timestamp);
+ 		if (data->arg.new_lock) {
+ 			data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
+-			if (do_vfs_lock(data->fl.fl_file, &data->fl) < 0) {
++			if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
+ 				rpc_restart_call_prepare(task);
+ 				break;
+ 			}
+@@ -5926,7 +5926,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
+ 	if (status != 0)
+ 		goto out;
+ 	request->fl_flags |= FL_ACCESS;
+-	status = do_vfs_lock(request->fl_file, request);
++	status = do_vfs_lock(state->inode, request);
+ 	if (status < 0)
+ 		goto out;
+ 	down_read(&nfsi->rwsem);
+@@ -5934,7 +5934,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
+ 		/* Yes: cache locks! */
+ 		/* ...but avoid races with delegation recall... */
+ 		request->fl_flags = fl_flags & ~FL_SLEEP;
+-		status = do_vfs_lock(request->fl_file, request);
++		status = do_vfs_lock(state->inode, request);
+ 		up_read(&nfsi->rwsem);
+ 		goto out;
+ 	}
+diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
+index cdefaa331a07..c29d9421bd5e 100644
+--- a/fs/nfsd/blocklayout.c
++++ b/fs/nfsd/blocklayout.c
+@@ -56,14 +56,6 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
+ 	u32 device_generation = 0;
+ 	int error;
+ 
+-	/*
+-	 * We do not attempt to support I/O smaller than the fs block size,
+-	 * or not aligned to it.
+-	 */
+-	if (args->lg_minlength < block_size) {
+-		dprintk("pnfsd: I/O too small\n");
+-		goto out_layoutunavailable;
+-	}
+ 	if (seg->offset & (block_size - 1)) {
+ 		dprintk("pnfsd: I/O misaligned\n");
+ 		goto out_layoutunavailable;
+diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
+index eb6f9e6c3075..b6a53e8e526a 100644
+--- a/include/asm-generic/preempt.h
++++ b/include/asm-generic/preempt.h
+@@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
+ /*
+  * Returns true when we need to resched and can (barring IRQ state).
+  */
+-static __always_inline bool should_resched(void)
++static __always_inline bool should_resched(int preempt_offset)
+ {
+-	return unlikely(!preempt_count() && tif_need_resched());
++	return unlikely(preempt_count() == preempt_offset &&
++			tif_need_resched());
+ }
+ 
+ #ifdef CONFIG_PREEMPT
+diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
+index 86d0b25ed054..a89f505c856b 100644
+--- a/include/drm/drm_dp_mst_helper.h
++++ b/include/drm/drm_dp_mst_helper.h
+@@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write {
+ 	u8 *bytes;
+ };
+ 
++#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
+ struct drm_dp_remote_i2c_read {
+ 	u8 num_transactions;
+ 	u8 port_number;
+@@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read {
+ 		u8 *bytes;
+ 		u8 no_stop_bit;
+ 		u8 i2c_transaction_delay;
+-	} transactions[4];
++	} transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
+ 	u8 read_i2c_device_id;
+ 	u8 num_bytes_read;
+ };
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index f93192333b37..fdc369fa69e8 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1036,12 +1036,12 @@ extern void locks_remove_file(struct file *);
+ extern void locks_release_private(struct file_lock *);
+ extern void posix_test_lock(struct file *, struct file_lock *);
+ extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *);
+-extern int posix_lock_file_wait(struct file *, struct file_lock *);
++extern int posix_lock_inode_wait(struct inode *, struct file_lock *);
+ extern int posix_unblock_lock(struct file_lock *);
+ extern int vfs_test_lock(struct file *, struct file_lock *);
+ extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *);
+ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
+-extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
++extern int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl);
+ extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
+ extern void lease_get_mtime(struct inode *, struct timespec *time);
+ extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
+@@ -1127,7 +1127,8 @@ static inline int posix_lock_file(struct file *filp, struct file_lock *fl,
+ 	return -ENOLCK;
+ }
+ 
+-static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
++static inline int posix_lock_inode_wait(struct inode *inode,
++					struct file_lock *fl)
+ {
+ 	return -ENOLCK;
+ }
+@@ -1153,8 +1154,8 @@ static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl)
+ 	return 0;
+ }
+ 
+-static inline int flock_lock_file_wait(struct file *filp,
+-				       struct file_lock *request)
++static inline int flock_lock_inode_wait(struct inode *inode,
++					struct file_lock *request)
+ {
+ 	return -ENOLCK;
+ }
+@@ -1192,6 +1193,20 @@ static inline void show_fd_locks(struct seq_file *f,
+ 			struct file *filp, struct files_struct *files) {}
+ #endif /* !CONFIG_FILE_LOCKING */
+ 
++static inline struct inode *file_inode(const struct file *f)
++{
++	return f->f_inode;
++}
++
++static inline int posix_lock_file_wait(struct file *filp, struct file_lock *fl)
++{
++	return posix_lock_inode_wait(file_inode(filp), fl);
++}
++
++static inline int flock_lock_file_wait(struct file *filp, struct file_lock *fl)
++{
++	return flock_lock_inode_wait(file_inode(filp), fl);
++}
+ 
+ struct fasync_struct {
+ 	spinlock_t		fa_lock;
+@@ -1991,11 +2006,6 @@ extern void ihold(struct inode * inode);
+ extern void iput(struct inode *);
+ extern int generic_update_time(struct inode *, struct timespec *, int);
+ 
+-static inline struct inode *file_inode(const struct file *f)
+-{
+-	return f->f_inode;
+-}
+-
+ /* /sys/fs */
+ extern struct kobject *fs_kobj;
+ 
+diff --git a/include/linux/preempt.h b/include/linux/preempt.h
+index de83b4eb1642..8cd6725c5758 100644
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -20,7 +20,8 @@
+ #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+ extern void preempt_count_add(int val);
+ extern void preempt_count_sub(int val);
+-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
++#define preempt_count_dec_and_test() \
++	({ preempt_count_sub(1); should_resched(0); })
+ #else
+ #define preempt_count_add(val)	__preempt_count_add(val)
+ #define preempt_count_sub(val)	__preempt_count_sub(val)
+@@ -59,7 +60,7 @@ do { \
+ 
+ #define preempt_check_resched() \
+ do { \
+-	if (should_resched()) \
++	if (should_resched(0)) \
+ 		__preempt_schedule(); \
+ } while (0)
+ 
+diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
+index dbeec4d4a3be..5cb25f17331a 100644
+--- a/include/linux/preempt_mask.h
++++ b/include/linux/preempt_mask.h
+@@ -71,13 +71,21 @@
+  */
+ #define in_nmi()	(preempt_count() & NMI_MASK)
+ 
++/*
++ * The preempt_count offset after preempt_disable();
++ */
+ #if defined(CONFIG_PREEMPT_COUNT)
+-# define PREEMPT_CHECK_OFFSET 1
++# define PREEMPT_DISABLE_OFFSET	PREEMPT_OFFSET
+ #else
+-# define PREEMPT_CHECK_OFFSET 0
++# define PREEMPT_DISABLE_OFFSET	0
+ #endif
+ 
+ /*
++ * The preempt_count offset after spin_lock()
++ */
++#define PREEMPT_LOCK_OFFSET	PREEMPT_DISABLE_OFFSET
++
++/*
+  * The preempt_count offset needed for things like:
+  *
+  *  spin_lock_bh()
+@@ -90,7 +98,7 @@
+  *
+  * Work as expected.
+  */
+-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
++#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
+ 
+ /*
+  * Are we running in atomic context?  WARNING: this macro cannot
+@@ -106,7 +114,7 @@
+  * (used by the scheduler, *after* releasing the kernel lock)
+  */
+ #define in_atomic_preempt_off() \
+-		((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
++		((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
+ 
+ #ifdef CONFIG_PREEMPT_COUNT
+ # define preemptible()	(preempt_count() == 0 && !irqs_disabled())
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 26a2e6122734..61f4f2d5c882 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2834,12 +2834,6 @@ extern int _cond_resched(void);
+ 
+ extern int __cond_resched_lock(spinlock_t *lock);
+ 
+-#ifdef CONFIG_PREEMPT_COUNT
+-#define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
+-#else
+-#define PREEMPT_LOCK_OFFSET	0
+-#endif
+-
+ #define cond_resched_lock(lock) ({				\
+ 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
+ 	__cond_resched_lock(lock);				\
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index eb1c55b8255a..4307e20a4a4a 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2588,6 +2588,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
+ {
+ 	if (skb->ip_summed == CHECKSUM_COMPLETE)
+ 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
++	else if (skb->ip_summed == CHECKSUM_PARTIAL &&
++		 skb_checksum_start_offset(skb) < 0)
++		skb->ip_summed = CHECKSUM_NONE;
+ }
+ 
+ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+diff --git a/include/net/af_unix.h b/include/net/af_unix.h
+index a175ba4a7adb..dfe4ddfbb43c 100644
+--- a/include/net/af_unix.h
++++ b/include/net/af_unix.h
+@@ -64,7 +64,11 @@ struct unix_sock {
+ #define UNIX_GC_MAYBE_CYCLE	1
+ 	struct socket_wq	peer_wq;
+ };
+-#define unix_sk(__sk) ((struct unix_sock *)__sk)
++
++static inline struct unix_sock *unix_sk(struct sock *sk)
++{
++	return (struct unix_sock *)sk;
++}
+ 
+ #define peer_wait peer_wq.wait
+ 
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 3a4898ec8c67..ed01a012f8d5 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -826,6 +826,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
+ 	if (sk_rcvqueues_full(sk, limit))
+ 		return -ENOBUFS;
+ 
++	/*
++	 * If the skb was allocated from pfmemalloc reserves, only
++	 * allow SOCK_MEMALLOC sockets to use it as this socket is
++	 * helping free memory
++	 */
++	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
++		return -ENOMEM;
++
+ 	__sk_add_backlog(sk, skb);
+ 	sk->sk_backlog.len += skb->truesize;
+ 	return 0;
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 8476206a1e19..4d870eb6086b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4232,7 +4232,7 @@ SYSCALL_DEFINE0(sched_yield)
+ 
+ int __sched _cond_resched(void)
+ {
+-	if (should_resched()) {
++	if (should_resched(0)) {
+ 		preempt_schedule_common();
+ 		return 1;
+ 	}
+@@ -4250,7 +4250,7 @@ EXPORT_SYMBOL(_cond_resched);
+  */
+ int __cond_resched_lock(spinlock_t *lock)
+ {
+-	int resched = should_resched();
++	int resched = should_resched(PREEMPT_LOCK_OFFSET);
+ 	int ret = 0;
+ 
+ 	lockdep_assert_held(lock);
+@@ -4272,7 +4272,7 @@ int __sched __cond_resched_softirq(void)
+ {
+ 	BUG_ON(!in_softirq());
+ 
+-	if (should_resched()) {
++	if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
+ 		local_bh_enable();
+ 		preempt_schedule_common();
+ 		local_bh_disable();
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 586ad91300b0..5c01664c26e2 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -1451,13 +1451,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
+ 	timer_stats_timer_set_start_info(&dwork->timer);
+ 
+ 	dwork->wq = wq;
++	/* timer isn't guaranteed to run in this cpu, record earlier */
++	if (cpu == WORK_CPU_UNBOUND)
++		cpu = raw_smp_processor_id();
+ 	dwork->cpu = cpu;
+ 	timer->expires = jiffies + delay;
+ 
+-	if (unlikely(cpu != WORK_CPU_UNBOUND))
+-		add_timer_on(timer, cpu);
+-	else
+-		add_timer(timer);
++	add_timer_on(timer, cpu);
+ }
+ 
+ /**
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index a04225d372ba..68dea90334cb 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -3677,6 +3677,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
+ 	ret = page_counter_memparse(args, "-1", &threshold);
+ 	if (ret)
+ 		return ret;
++	threshold <<= PAGE_SHIFT;
+ 
+ 	mutex_lock(&memcg->thresholds_lock);
+ 
+diff --git a/net/core/ethtool.c b/net/core/ethtool.c
+index 1d00b8922902..4a6824767f3d 100644
+--- a/net/core/ethtool.c
++++ b/net/core/ethtool.c
+@@ -1273,7 +1273,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
+ 
+ 	gstrings.len = ret;
+ 
+-	data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
++	data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
+ 	if (!data)
+ 		return -ENOMEM;
+ 
+diff --git a/net/core/filter.c b/net/core/filter.c
+index bf831a85c315..0fa2613b5e35 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -1526,9 +1526,13 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
+ 		goto out;
+ 
+ 	/* We're copying the filter that has been originally attached,
+-	 * so no conversion/decode needed anymore.
++	 * so no conversion/decode needed anymore. eBPF programs that
++	 * have no original program cannot be dumped through this.
+ 	 */
++	ret = -EACCES;
+ 	fprog = filter->prog->orig_prog;
++	if (!fprog)
++		goto out;
+ 
+ 	ret = fprog->len;
+ 	if (!len)
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index a2e4e47b2839..075d2e78c87e 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2976,11 +2976,12 @@ EXPORT_SYMBOL(skb_append_datato_frags);
+  */
+ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
+ {
++	unsigned char *data = skb->data;
++
+ 	BUG_ON(len > skb->len);
+-	skb->len -= len;
+-	BUG_ON(skb->len < skb->data_len);
+-	skb_postpull_rcsum(skb, skb->data, len);
+-	return skb->data += len;
++	__skb_pull(skb, len);
++	skb_postpull_rcsum(skb, data, len);
++	return skb->data;
+ }
+ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
+ 
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index e664706b350c..4d2bc8c6694f 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -568,21 +568,22 @@ EXPORT_SYMBOL(inet_rtx_syn_ack);
+ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
+ 			       struct request_sock *req)
+ {
+-	struct listen_sock *lopt = queue->listen_opt;
+ 	struct request_sock **prev;
++	struct listen_sock *lopt;
+ 	bool found = false;
+ 
+ 	spin_lock(&queue->syn_wait_lock);
+-
+-	for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
+-	     prev = &(*prev)->dl_next) {
+-		if (*prev == req) {
+-			*prev = req->dl_next;
+-			found = true;
+-			break;
++	lopt = queue->listen_opt;
++	if (lopt) {
++		for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
++		     prev = &(*prev)->dl_next) {
++			if (*prev == req) {
++				*prev = req->dl_next;
++				found = true;
++				break;
++			}
+ 		}
+ 	}
+-
+ 	spin_unlock(&queue->syn_wait_lock);
+ 	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
+ 		reqsk_put(req);
+@@ -676,20 +677,20 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue,
+ 	req->num_timeout = 0;
+ 	req->sk = NULL;
+ 
++	setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
++	mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
++	req->rsk_hash = hash;
++
+ 	/* before letting lookups find us, make sure all req fields
+ 	 * are committed to memory and refcnt initialized.
+ 	 */
+ 	smp_wmb();
+ 	atomic_set(&req->rsk_refcnt, 2);
+-	setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
+-	req->rsk_hash = hash;
+ 
+ 	spin_lock(&queue->syn_wait_lock);
+ 	req->dl_next = lopt->syn_table[hash];
+ 	lopt->syn_table[hash] = req;
+ 	spin_unlock(&queue->syn_wait_lock);
+-
+-	mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
+ }
+ EXPORT_SYMBOL(reqsk_queue_hash_req);
+ 
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index a29a504492af..e3db498f0233 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ 	tunnel = container_of(work, struct l2tp_tunnel, del_work);
+ 	sk = l2tp_tunnel_sock_lookup(tunnel);
+ 	if (!sk)
+-		return;
++		goto out;
+ 
+ 	sock = sk->sk_socket;
+ 
+@@ -1340,6 +1340,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
+ 	}
+ 
+ 	l2tp_tunnel_sock_put(sk);
++out:
++	l2tp_tunnel_dec_refcount(tunnel);
+ }
+ 
+ /* Create a socket for the tunnel, if one isn't set up by
+@@ -1639,8 +1641,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
+  */
+ int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
+ {
++	l2tp_tunnel_inc_refcount(tunnel);
+ 	l2tp_tunnel_closeall(tunnel);
+-	return (false == queue_work(l2tp_wq, &tunnel->del_work));
++	if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
++		l2tp_tunnel_dec_refcount(tunnel);
++		return 1;
++	}
++	return 0;
+ }
+ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 980121e75d2e..d139c43ac6e5 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -2683,6 +2683,7 @@ static int netlink_dump(struct sock *sk)
+ 	struct sk_buff *skb = NULL;
+ 	struct nlmsghdr *nlh;
+ 	int len, err = -ENOBUFS;
++	int alloc_min_size;
+ 	int alloc_size;
+ 
+ 	mutex_lock(nlk->cb_mutex);
+@@ -2691,9 +2692,6 @@ static int netlink_dump(struct sock *sk)
+ 		goto errout_skb;
+ 	}
+ 
+-	cb = &nlk->cb;
+-	alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
+-
+ 	if (!netlink_rx_is_mmaped(sk) &&
+ 	    atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
+ 		goto errout_skb;
+@@ -2703,23 +2701,35 @@ static int netlink_dump(struct sock *sk)
+ 	 * to reduce number of system calls on dump operations, if user
+ 	 * ever provided a big enough buffer.
+ 	 */
+-	if (alloc_size < nlk->max_recvmsg_len) {
+-		skb = netlink_alloc_skb(sk,
+-					nlk->max_recvmsg_len,
+-					nlk->portid,
++	cb = &nlk->cb;
++	alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
++
++	if (alloc_min_size < nlk->max_recvmsg_len) {
++		alloc_size = nlk->max_recvmsg_len;
++		skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
+ 					GFP_KERNEL |
+ 					__GFP_NOWARN |
+ 					__GFP_NORETRY);
+-		/* available room should be exact amount to avoid MSG_TRUNC */
+-		if (skb)
+-			skb_reserve(skb, skb_tailroom(skb) -
+-					 nlk->max_recvmsg_len);
+ 	}
+-	if (!skb)
++	if (!skb) {
++		alloc_size = alloc_min_size;
+ 		skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
+ 					GFP_KERNEL);
++	}
+ 	if (!skb)
+ 		goto errout_skb;
++
++	/* Trim skb to allocated size. User is expected to provide buffer as
++	 * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
++	 * netlink_recvmsg())). dump will pack as many smaller messages as
++	 * could fit within the allocated skb. skb is typically allocated
++	 * with larger space than required (could be as much as near 2x the
++	 * requested size with align to next power of 2 approach). Allowing
++	 * dump to use the excess space makes it difficult for a user to have a
++	 * reasonable static buffer based on the expected largest dump of a
++	 * single netdev. The outcome is MSG_TRUNC error.
++	 */
++	skb_reserve(skb, skb_tailroom(skb) - alloc_size);
+ 	netlink_skb_set_owner_r(skb, sk);
+ 
+ 	len = cb->dump(skb, cb);
+diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
+index aa349514e4cb..eed562295c78 100644
+--- a/net/openvswitch/flow_table.c
++++ b/net/openvswitch/flow_table.c
+@@ -92,7 +92,8 @@ struct sw_flow *ovs_flow_alloc(void)
+ 
+ 	/* Initialize the default stat node. */
+ 	stats = kmem_cache_alloc_node(flow_stats_cache,
+-				      GFP_KERNEL | __GFP_ZERO, 0);
++				      GFP_KERNEL | __GFP_ZERO,
++				      node_online(0) ? 0 : NUMA_NO_NODE);
+ 	if (!stats)
+ 		goto err;
+ 
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index 3f63ceac8e01..844dd85426dc 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -166,6 +166,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
+ 
+ 	skb2->skb_iif = skb->dev->ifindex;
+ 	skb2->dev = dev;
++	skb_sender_cpu_clear(skb2);
+ 	err = dev_queue_xmit(skb2);
+ 
+ out:
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index f9f13a32ddb8..2873b8d65608 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -146,7 +146,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
+ 	ctxt->read_hdr = head;
+ 	pages_needed =
+ 		min_t(int, pages_needed, rdma_read_max_sge(xprt, pages_needed));
+-	read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
++	read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
++		     rs_length);
+ 
+ 	for (pno = 0; pno < pages_needed; pno++) {
+ 		int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
+@@ -245,7 +246,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
+ 	ctxt->direction = DMA_FROM_DEVICE;
+ 	ctxt->frmr = frmr;
+ 	pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
+-	read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
++	read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
++		     rs_length);
+ 
+ 	frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
+ 	frmr->direction = DMA_FROM_DEVICE;
+diff --git a/net/tipc/msg.h b/net/tipc/msg.h
+index e1d3595e2ee9..4cbb0fbad046 100644
+--- a/net/tipc/msg.h
++++ b/net/tipc/msg.h
+@@ -353,7 +353,7 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
+ static inline u32 msg_importance(struct tipc_msg *m)
+ {
+ 	if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+-		return msg_bits(m, 5, 13, 0x7);
++		return msg_bits(m, 9, 0, 0x7);
+ 	if (likely(msg_isdata(m) && !msg_errcode(m)))
+ 		return msg_user(m);
+ 	return TIPC_SYSTEM_IMPORTANCE;
+@@ -362,7 +362,7 @@ static inline u32 msg_importance(struct tipc_msg *m)
+ static inline void msg_set_importance(struct tipc_msg *m, u32 i)
+ {
+ 	if (unlikely(msg_user(m) == MSG_FRAGMENTER))
+-		msg_set_bits(m, 5, 13, 0x7, i);
++		msg_set_bits(m, 9, 0, 0x7, i);
+ 	else if (likely(i < TIPC_SYSTEM_IMPORTANCE))
+ 		msg_set_user(m, i);
+ 	else
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 06430598cf51..76e66695621c 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1938,6 +1938,11 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
+ 		goto out;
+ 	}
+ 
++	if (flags & MSG_PEEK)
++		skip = sk_peek_offset(sk, flags);
++	else
++		skip = 0;
++
+ 	do {
+ 		int chunk;
+ 		struct sk_buff *skb, *last;
+@@ -1984,7 +1989,6 @@ again:
+ 			break;
+ 		}
+ 
+-		skip = sk_peek_offset(sk, flags);
+ 		while (skip >= unix_skb_len(skb)) {
+ 			skip -= unix_skb_len(skb);
+ 			last = skb;
+@@ -2048,6 +2052,16 @@ again:
+ 
+ 			sk_peek_offset_fwd(sk, chunk);
+ 
++			if (UNIXCB(skb).fp)
++				break;
++
++			skip = 0;
++			last = skb;
++			unix_state_lock(sk);
++			skb = skb_peek_next(skb, &sk->sk_receive_queue);
++			if (skb)
++				goto again;
++			unix_state_unlock(sk);
+ 			break;
+ 		}
+ 	} while (size);


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-10-26 20:51 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-10-26 20:51 UTC (permalink / raw
  To: gentoo-commits

commit:     19c36945db482f1174588877bf34ec85a8b039af
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Oct 26 20:51:30 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Oct 26 20:51:30 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=19c36945

Remove redundant patchsets.

 0000_README                                        |  8 ---
 1600_dm-crypt-limit-max-segment-size.patch         | 84 ----------------------
 2000_inet-deadlock-in-reqsk-queue-unlink-fix.patch | 32 ---------
 3 files changed, 124 deletions(-)

diff --git a/0000_README b/0000_README
index fa3fbdb..18e95dd 100644
--- a/0000_README
+++ b/0000_README
@@ -95,14 +95,6 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
-Patch:  1600_dm-crypt-limit-max-segment-size.patch
-From:   https://bugzilla.kernel.org/show_bug.cgi?id=104421
-Desc:   dm crypt: constrain crypt device's max_segment_size to PAGE_SIZE.
-
-Patch:  2000_inet-deadlock-in-reqsk-queue-unlink-fix.patch
-From:   http://git.kernel.org/
-Desc:   inet: Patch to fix potential deadlock in reqsk_queue_unlink()
-
 Patch:  2700_ThinkPad-30-brightness-control-fix.patch
 From:   Seth Forshee <seth.forshee@canonical.com>
 Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.

diff --git a/1600_dm-crypt-limit-max-segment-size.patch b/1600_dm-crypt-limit-max-segment-size.patch
deleted file mode 100644
index 82aca44..0000000
--- a/1600_dm-crypt-limit-max-segment-size.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From 586b286b110e94eb31840ac5afc0c24e0881fe34 Mon Sep 17 00:00:00 2001
-From: Mike Snitzer <snitzer@redhat.com>
-Date: Wed, 9 Sep 2015 21:34:51 -0400
-Subject: dm crypt: constrain crypt device's max_segment_size to PAGE_SIZE
-
-Setting the dm-crypt device's max_segment_size to PAGE_SIZE is an
-unfortunate constraint that is required to avoid the potential for
-exceeding dm-crypt's underlying device's max_segments limits -- due to
-crypt_alloc_buffer() possibly allocating pages for the encryption bio
-that are not as physically contiguous as the original bio.
-
-It is interesting to note that this problem was already fixed back in
-2007 via commit 91e106259 ("dm crypt: use bio_add_page").  But Linux 4.0
-commit cf2f1abfb ("dm crypt: don't allocate pages for a partial
-request") regressed dm-crypt back to _not_ using bio_add_page().  But
-given dm-crypt's cpu parallelization changes all depend on commit
-cf2f1abfb's abandoning of the more complex io fragments processing that
-dm-crypt previously had we cannot easily go back to using
-bio_add_page().
-
-So all said the cleanest way to resolve this issue is to fix dm-crypt to
-properly constrain the original bios entering dm-crypt so the encryption
-bios that dm-crypt generates from the original bios are always
-compatible with the underlying device's max_segments queue limits.
-
-It should be noted that technically Linux 4.3 does _not_ need this fix
-because of the block core's new late bio-splitting capability.  But, it
-is reasoned, there is little to be gained by having the block core split
-the encrypted bio that is composed of PAGE_SIZE segments.  That said, in
-the future we may revert this change.
-
-Fixes: cf2f1abfb ("dm crypt: don't allocate pages for a partial request")
-Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=104421
-Suggested-by: Jeff Moyer <jmoyer@redhat.com>
-Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-Cc: stable@vger.kernel.org # 4.0+
-
-diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
-index d60c88d..4b3b6f8 100644
---- a/drivers/md/dm-crypt.c
-+++ b/drivers/md/dm-crypt.c
-@@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
- 
- /*
-  * Generate a new unfragmented bio with the given size
-- * This should never violate the device limitations
-+ * This should never violate the device limitations (but only because
-+ * max_segment_size is being constrained to PAGE_SIZE).
-  *
-  * This function may be called concurrently. If we allocate from the mempool
-  * concurrently, there is a possibility of deadlock. For example, if we have
-@@ -2045,9 +2046,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
- 	return fn(ti, cc->dev, cc->start, ti->len, data);
- }
- 
-+static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
-+{
-+	/*
-+	 * Unfortunate constraint that is required to avoid the potential
-+	 * for exceeding underlying device's max_segments limits -- due to
-+	 * crypt_alloc_buffer() possibly allocating pages for the encryption
-+	 * bio that are not as physically contiguous as the original bio.
-+	 */
-+	limits->max_segment_size = PAGE_SIZE;
-+}
-+
- static struct target_type crypt_target = {
- 	.name   = "crypt",
--	.version = {1, 14, 0},
-+	.version = {1, 14, 1},
- 	.module = THIS_MODULE,
- 	.ctr    = crypt_ctr,
- 	.dtr    = crypt_dtr,
-@@ -2058,6 +2070,7 @@ static struct target_type crypt_target = {
- 	.resume = crypt_resume,
- 	.message = crypt_message,
- 	.iterate_devices = crypt_iterate_devices,
-+	.io_hints = crypt_io_hints,
- };
- 
- static int __init dm_crypt_init(void)
--- 
-cgit v0.10.2
-

diff --git a/2000_inet-deadlock-in-reqsk-queue-unlink-fix.patch b/2000_inet-deadlock-in-reqsk-queue-unlink-fix.patch
deleted file mode 100644
index 890f5e5..0000000
--- a/2000_inet-deadlock-in-reqsk-queue-unlink-fix.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From 83fccfc3940c4a2db90fd7e7079f5b465cd8c6af Mon Sep 17 00:00:00 2001
-From: Eric Dumazet <edumazet@google.com>
-Date: Thu, 13 Aug 2015 15:44:51 -0700
-Subject: inet: fix potential deadlock in reqsk_queue_unlink()
-
-When replacing del_timer() with del_timer_sync(), I introduced
-a deadlock condition :
-
-reqsk_queue_unlink() is called from inet_csk_reqsk_queue_drop()
-
-inet_csk_reqsk_queue_drop() can be called from many contexts,
-one being the timer handler itself (reqsk_timer_handler()).
-
-In this case, del_timer_sync() loops forever.
-
-Simple fix is to test if timer is pending.
-
-Fixes: 2235f2ac75fd ("inet: fix races with reqsk timers")
-Signed-off-by: Eric Dumazet <edumazet@google.com>
-Signed-off-by: David S. Miller <davem@davemloft.net>
-
---- a/net/ipv4/inet_connection_sock.c	2015-10-02 07:49:42.759957268 -0400
-+++ b/net/ipv4/inet_connection_sock.c	2015-10-02 07:50:12.929957111 -0400
-@@ -584,7 +584,7 @@ static bool reqsk_queue_unlink(struct re
- 	}
- 
- 	spin_unlock(&queue->syn_wait_lock);
--	if (del_timer_sync(&req->rsk_timer))
-+	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
- 		reqsk_put(req);
- 	return found;
- }


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-10-26 20:49 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-10-26 20:49 UTC (permalink / raw
  To: gentoo-commits

commit:     a96b0651fc6a971fe0c2d4a77f574c77dfbddd0b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Oct 26 20:49:29 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Oct 26 20:49:29 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a96b0651

Linux patch 4.1.11

 0000_README             |    4 +
 1010_linux-4.1.11.patch | 8151 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 8155 insertions(+)

diff --git a/0000_README b/0000_README
index b9b941a..fa3fbdb 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1009_linux-4.1.10.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.10
 
+Patch:  1010_linux-4.1.11.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.11
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1010_linux-4.1.11.patch b/1010_linux-4.1.11.patch
new file mode 100644
index 0000000..0200b32
--- /dev/null
+++ b/1010_linux-4.1.11.patch
@@ -0,0 +1,8151 @@
+diff --git a/Documentation/HOWTO b/Documentation/HOWTO
+index 93aa8604630e..21152d397b88 100644
+--- a/Documentation/HOWTO
++++ b/Documentation/HOWTO
+@@ -218,16 +218,16 @@ The development process
+ Linux kernel development process currently consists of a few different
+ main kernel "branches" and lots of different subsystem-specific kernel
+ branches.  These different branches are:
+-  - main 3.x kernel tree
+-  - 3.x.y -stable kernel tree
+-  - 3.x -git kernel patches
++  - main 4.x kernel tree
++  - 4.x.y -stable kernel tree
++  - 4.x -git kernel patches
+   - subsystem specific kernel trees and patches
+-  - the 3.x -next kernel tree for integration tests
++  - the 4.x -next kernel tree for integration tests
+ 
+-3.x kernel tree
++4.x kernel tree
+ -----------------
+-3.x kernels are maintained by Linus Torvalds, and can be found on
+-kernel.org in the pub/linux/kernel/v3.x/ directory.  Its development
++4.x kernels are maintained by Linus Torvalds, and can be found on
++kernel.org in the pub/linux/kernel/v4.x/ directory.  Its development
+ process is as follows:
+   - As soon as a new kernel is released a two weeks window is open,
+     during this period of time maintainers can submit big diffs to
+@@ -262,20 +262,20 @@ mailing list about kernel releases:
+ 	released according to perceived bug status, not according to a
+ 	preconceived timeline."
+ 
+-3.x.y -stable kernel tree
++4.x.y -stable kernel tree
+ ---------------------------
+ Kernels with 3-part versions are -stable kernels. They contain
+ relatively small and critical fixes for security problems or significant
+-regressions discovered in a given 3.x kernel.
++regressions discovered in a given 4.x kernel.
+ 
+ This is the recommended branch for users who want the most recent stable
+ kernel and are not interested in helping test development/experimental
+ versions.
+ 
+-If no 3.x.y kernel is available, then the highest numbered 3.x
++If no 4.x.y kernel is available, then the highest numbered 4.x
+ kernel is the current stable kernel.
+ 
+-3.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
++4.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
+ are released as needs dictate.  The normal release period is approximately
+ two weeks, but it can be longer if there are no pressing problems.  A
+ security-related problem, instead, can cause a release to happen almost
+@@ -285,7 +285,7 @@ The file Documentation/stable_kernel_rules.txt in the kernel tree
+ documents what kinds of changes are acceptable for the -stable tree, and
+ how the release process works.
+ 
+-3.x -git patches
++4.x -git patches
+ ------------------
+ These are daily snapshots of Linus' kernel tree which are managed in a
+ git repository (hence the name.) These patches are usually released
+@@ -317,9 +317,9 @@ revisions to it, and maintainers can mark patches as under review,
+ accepted, or rejected.  Most of these patchwork sites are listed at
+ http://patchwork.kernel.org/.
+ 
+-3.x -next kernel tree for integration tests
++4.x -next kernel tree for integration tests
+ ---------------------------------------------
+-Before updates from subsystem trees are merged into the mainline 3.x
++Before updates from subsystem trees are merged into the mainline 4.x
+ tree, they need to be integration-tested.  For this purpose, a special
+ testing repository exists into which virtually all subsystem trees are
+ pulled on an almost daily basis:
+diff --git a/Makefile b/Makefile
+index d02f16b510dc..c7d877b1c248 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 10
++SUBLEVEL = 11
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/Makefile b/arch/arm/Makefile
+index 985227cbbd1b..47f10e7ad1f6 100644
+--- a/arch/arm/Makefile
++++ b/arch/arm/Makefile
+@@ -50,6 +50,14 @@ AS		+= -EL
+ LD		+= -EL
+ endif
+ 
++#
++# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
++# later may result in code being generated that handles signed short and signed
++# char struct members incorrectly. So disable it.
++# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
++#
++KBUILD_CFLAGS	+= $(call cc-option,-fno-ipa-sra)
++
+ # This selects which instruction set is used.
+ # Note that GCC does not numerically define an architecture version
+ # macro, but instead defines a whole series of macros which makes
+diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
+index dd45e6971bc3..9351296356dc 100644
+--- a/arch/arm/boot/dts/imx25-pdk.dts
++++ b/arch/arm/boot/dts/imx25-pdk.dts
+@@ -10,6 +10,7 @@
+  */
+ 
+ /dts-v1/;
++#include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/input/input.h>
+ #include "imx25.dtsi"
+ 
+@@ -114,8 +115,8 @@
+ &esdhc1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_esdhc1>;
+-	cd-gpios = <&gpio2 1 0>;
+-	wp-gpios = <&gpio2 0 0>;
++	cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts
+index 93d3ea12328c..0f3fe29b816e 100644
+--- a/arch/arm/boot/dts/imx51-apf51dev.dts
++++ b/arch/arm/boot/dts/imx51-apf51dev.dts
+@@ -98,7 +98,7 @@
+ &esdhc1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_esdhc1>;
+-	cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
+ 	bus-width = <4>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
+index e9337ad52f59..3bc18835fb4b 100644
+--- a/arch/arm/boot/dts/imx53-ard.dts
++++ b/arch/arm/boot/dts/imx53-ard.dts
+@@ -103,8 +103,8 @@
+ &esdhc1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_esdhc1>;
+-	cd-gpios = <&gpio1 1 0>;
+-	wp-gpios = <&gpio1 9 0>;
++	cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
+index d0e0f57eb432..53f40885c530 100644
+--- a/arch/arm/boot/dts/imx53-m53evk.dts
++++ b/arch/arm/boot/dts/imx53-m53evk.dts
+@@ -124,8 +124,8 @@
+ &esdhc1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_esdhc1>;
+-	cd-gpios = <&gpio1 1 0>;
+-	wp-gpios = <&gpio1 9 0>;
++	cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
+index 181ae5ebf23f..1f55187ed9ce 100644
+--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
++++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
+@@ -147,8 +147,8 @@
+ &esdhc3 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_esdhc3>;
+-	cd-gpios = <&gpio3 11 0>;
+-	wp-gpios = <&gpio3 12 0>;
++	cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
+ 	bus-width = <8>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
+index 1d325576bcc0..fc89ce1e5763 100644
+--- a/arch/arm/boot/dts/imx53-smd.dts
++++ b/arch/arm/boot/dts/imx53-smd.dts
+@@ -41,8 +41,8 @@
+ &esdhc1 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_esdhc1>;
+-	cd-gpios = <&gpio3 13 0>;
+-	wp-gpios = <&gpio4 11 0>;
++	cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
+ 	status = "okay";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
+index 4f1f0e2868bf..e03373a58760 100644
+--- a/arch/arm/boot/dts/imx53-tqma53.dtsi
++++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
+@@ -41,8 +41,8 @@
+ 	pinctrl-0 = <&pinctrl_esdhc2>,
+ 		    <&pinctrl_esdhc2_cdwp>;
+ 	vmmc-supply = <&reg_3p3v>;
+-	wp-gpios = <&gpio1 2 0>;
+-	cd-gpios = <&gpio1 4 0>;
++	wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+ 	status = "disabled";
+ };
+ 
+diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
+index 704bd72cbfec..d3e50b22064f 100644
+--- a/arch/arm/boot/dts/imx53-tx53.dtsi
++++ b/arch/arm/boot/dts/imx53-tx53.dtsi
+@@ -183,7 +183,7 @@
+ };
+ 
+ &esdhc1 {
+-	cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
+ 	fsl,wp-controller;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_esdhc1>;
+@@ -191,7 +191,7 @@
+ };
+ 
+ &esdhc2 {
+-	cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>;
++	cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+ 	fsl,wp-controller;
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_esdhc2>;
+diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts
+index c17d3ad6dba5..fc51b87ad208 100644
+--- a/arch/arm/boot/dts/imx53-voipac-bsb.dts
++++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts
+@@ -119,8 +119,8 @@
+ &esdhc2 {
+ 	pinctrl-names = "default";
+ 	pinctrl-0 = <&pinctrl_esdhc2>;
+-	cd-gpios = <&gpio3 25 0>;
+-	wp-gpios = <&gpio2 19 0>;
++	cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
++	wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ 	vmmc-supply = <&reg_3p3v>;
+ 	status = "okay";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
+index 488a640796ac..394a4ace351a 100644
+--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
+@@ -35,7 +35,6 @@
+ 			compatible = "regulator-fixed";
+ 			reg = <1>;
+ 			pinctrl-names = "default";
+-			pinctrl-0 = <&pinctrl_usbh1>;
+ 			regulator-name = "usbh1_vbus";
+ 			regulator-min-microvolt = <5000000>;
+ 			regulator-max-microvolt = <5000000>;
+@@ -47,7 +46,6 @@
+ 			compatible = "regulator-fixed";
+ 			reg = <2>;
+ 			pinctrl-names = "default";
+-			pinctrl-0 = <&pinctrl_usbotg>;
+ 			regulator-name = "usb_otg_vbus";
+ 			regulator-min-microvolt = <5000000>;
+ 			regulator-max-microvolt = <5000000>;
+diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
+index a5474113cd50..67659a0ed13e 100644
+--- a/arch/arm/boot/dts/omap3-beagle.dts
++++ b/arch/arm/boot/dts/omap3-beagle.dts
+@@ -202,7 +202,7 @@
+ 
+ 	tfp410_pins: pinmux_tfp410_pins {
+ 		pinctrl-single,pins = <
+-			0x194 (PIN_OUTPUT | MUX_MODE4)	/* hdq_sio.gpio_170 */
++			0x196 (PIN_OUTPUT | MUX_MODE4)	/* hdq_sio.gpio_170 */
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
+index 74777a6e200a..1b958e92d674 100644
+--- a/arch/arm/boot/dts/omap5-uevm.dts
++++ b/arch/arm/boot/dts/omap5-uevm.dts
+@@ -174,8 +174,8 @@
+ 
+ 	i2c5_pins: pinmux_i2c5_pins {
+ 		pinctrl-single,pins = <
+-			0x184 (PIN_INPUT | MUX_MODE0)		/* i2c5_scl */
+-			0x186 (PIN_INPUT | MUX_MODE0)		/* i2c5_sda */
++			0x186 (PIN_INPUT | MUX_MODE0)		/* i2c5_scl */
++			0x188 (PIN_INPUT | MUX_MODE0)		/* i2c5_sda */
+ 		>;
+ 	};
+ 
+diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
+index a6ad93c9bce3..fd9eefce0a7b 100644
+--- a/arch/arm/kernel/kgdb.c
++++ b/arch/arm/kernel/kgdb.c
+@@ -259,15 +259,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+ 	if (err)
+ 		return err;
+ 
+-	patch_text((void *)bpt->bpt_addr,
+-		   *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
++	/* Machine is already stopped, so we can use __patch_text() directly */
++	__patch_text((void *)bpt->bpt_addr,
++		     *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
+ 
+ 	return err;
+ }
+ 
+ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+ {
+-	patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
++	/* Machine is already stopped, so we can use __patch_text() directly */
++	__patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
+ 
+ 	return 0;
+ }
+diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
+index 423663e23791..586eef26203d 100644
+--- a/arch/arm/kernel/signal.c
++++ b/arch/arm/kernel/signal.c
+@@ -343,12 +343,17 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
+ 		 */
+ 		thumb = handler & 1;
+ 
+-#if __LINUX_ARM_ARCH__ >= 7
++#if __LINUX_ARM_ARCH__ >= 6
+ 		/*
+-		 * Clear the If-Then Thumb-2 execution state
+-		 * ARM spec requires this to be all 000s in ARM mode
+-		 * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
+-		 * signal transition without this.
++		 * Clear the If-Then Thumb-2 execution state.  ARM spec
++		 * requires this to be all 000s in ARM mode.  Snapdragon
++		 * S4/Krait misbehaves on a Thumb=>ARM signal transition
++		 * without this.
++		 *
++		 * We must do this whenever we are running on a Thumb-2
++		 * capable CPU, which includes ARMv6T2.  However, we elect
++		 * to do this whenever we're on an ARMv6 or later CPU for
++		 * simplicity.
+ 		 */
+ 		cpsr &= ~PSR_IT_MASK;
+ #endif
+diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
+index 48efe2ee452c..58048b333d31 100644
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -518,8 +518,7 @@ ARM_BE8(rev	r6, r6  )
+ 
+ 	mrc	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
+ 	str	r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
+-	bic	r2, #1			@ Clear ENABLE
+-	mcr	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
++
+ 	isb
+ 
+ 	mrrc	p15, 3, rr_lo_hi(r2, r3), c14	@ CNTV_CVAL
+@@ -532,6 +531,9 @@ ARM_BE8(rev	r6, r6  )
+ 	mcrr	p15, 4, r2, r2, c14	@ CNTVOFF
+ 
+ 1:
++	mov	r2, #0			@ Clear ENABLE
++	mcr	p15, 0, r2, c14, c3, 1	@ CNTV_CTL
++
+ 	@ Allow physical timer/counter access for the host
+ 	mrc	p15, 4, r2, c14, c1, 0	@ CNTHCTL
+ 	orr	r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
+diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
+index 1d5accbd3dcf..191dcfab9f60 100644
+--- a/arch/arm/kvm/mmu.c
++++ b/arch/arm/kvm/mmu.c
+@@ -1790,8 +1790,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ 		if (vma->vm_flags & VM_PFNMAP) {
+ 			gpa_t gpa = mem->guest_phys_addr +
+ 				    (vm_start - mem->userspace_addr);
+-			phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
+-					 vm_start - vma->vm_start;
++			phys_addr_t pa;
++
++			pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
++			pa += vm_start - vma->vm_start;
+ 
+ 			/* IO region dirty page logging not allowed */
+ 			if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
+diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
+index 9bdf54795f05..56978199c479 100644
+--- a/arch/arm/mach-exynos/mcpm-exynos.c
++++ b/arch/arm/mach-exynos/mcpm-exynos.c
+@@ -20,6 +20,7 @@
+ #include <asm/cputype.h>
+ #include <asm/cp15.h>
+ #include <asm/mcpm.h>
++#include <asm/smp_plat.h>
+ 
+ #include "regs-pmu.h"
+ #include "common.h"
+@@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
+ 		cluster >= EXYNOS5420_NR_CLUSTERS)
+ 		return -EINVAL;
+ 
+-	exynos_cpu_power_up(cpunr);
++	if (!exynos_cpu_power_state(cpunr)) {
++		exynos_cpu_power_up(cpunr);
++
++		/*
++		 * This assumes the cluster number of the big cores(Cortex A15)
++		 * is 0 and the Little cores(Cortex A7) is 1.
++		 * When the system was booted from the Little core,
++		 * they should be reset during power up cpu.
++		 */
++		if (cluster &&
++		    cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
++			/*
++			 * Before we reset the Little cores, we should wait
++			 * the SPARE2 register is set to 1 because the init
++			 * codes of the iROM will set the register after
++			 * initialization.
++			 */
++			while (!pmu_raw_readl(S5P_PMU_SPARE2))
++				udelay(10);
++
++			pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
++					EXYNOS_SWRESET);
++		}
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
+index b7614333d296..fba9068ed260 100644
+--- a/arch/arm/mach-exynos/regs-pmu.h
++++ b/arch/arm/mach-exynos/regs-pmu.h
+@@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
+ #define SPREAD_ENABLE						0xF
+ #define SPREAD_USE_STANDWFI					0xF
+ 
++#define EXYNOS5420_KFC_CORE_RESET0				BIT(8)
++#define EXYNOS5420_KFC_ETM_RESET0				BIT(20)
++
++#define EXYNOS5420_KFC_CORE_RESET(_nr)				\
++	((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
++
+ #define EXYNOS5420_BB_CON1					0x0784
+ #define EXYNOS5420_BB_SEL_EN					BIT(31)
+ #define EXYNOS5420_BB_PMOS_EN					BIT(7)
+diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
+index 352962bc2e78..5170fd5c8e97 100644
+--- a/arch/arm64/kernel/efi.c
++++ b/arch/arm64/kernel/efi.c
+@@ -257,7 +257,8 @@ static bool __init efi_virtmap_init(void)
+ 		 */
+ 		if (!is_normal_ram(md))
+ 			prot = __pgprot(PROT_DEVICE_nGnRE);
+-		else if (md->type == EFI_RUNTIME_SERVICES_CODE)
++		else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
++			 !PAGE_ALIGNED(md->phys_addr))
+ 			prot = PAGE_KERNEL_EXEC;
+ 		else
+ 			prot = PAGE_KERNEL;
+diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
+index 08cafc518b9a..0f03a8fe2314 100644
+--- a/arch/arm64/kernel/entry-ftrace.S
++++ b/arch/arm64/kernel/entry-ftrace.S
+@@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
+ ENDPROC(ftrace_stub)
+ 
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
++	/* save return value regs*/
++	.macro save_return_regs
++	sub sp, sp, #64
++	stp x0, x1, [sp]
++	stp x2, x3, [sp, #16]
++	stp x4, x5, [sp, #32]
++	stp x6, x7, [sp, #48]
++	.endm
++
++	/* restore return value regs*/
++	.macro restore_return_regs
++	ldp x0, x1, [sp]
++	ldp x2, x3, [sp, #16]
++	ldp x4, x5, [sp, #32]
++	ldp x6, x7, [sp, #48]
++	add sp, sp, #64
++	.endm
++
+ /*
+  * void ftrace_graph_caller(void)
+  *
+@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
+  * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
+  */
+ ENTRY(return_to_handler)
+-	str	x0, [sp, #-16]!
++	save_return_regs
+ 	mov	x0, x29			//     parent's fp
+ 	bl	ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
+ 	mov	x30, x0			// restore the original return address
+-	ldr	x0, [sp], #16
++	restore_return_regs
+ 	ret
+ END(return_to_handler)
+ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 96da13167d4a..fa5efaa5c3ac 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -279,6 +279,7 @@ retry:
+ 			 * starvation.
+ 			 */
+ 			mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
++			mm_flags |= FAULT_FLAG_TRIED;
+ 			goto retry;
+ 		}
+ 	}
+diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
+index 5a822bb790f7..066e74f666ae 100644
+--- a/arch/m68k/include/asm/linkage.h
++++ b/arch/m68k/include/asm/linkage.h
+@@ -4,4 +4,34 @@
+ #define __ALIGN .align 4
+ #define __ALIGN_STR ".align 4"
+ 
++/*
++ * Make sure the compiler doesn't do anything stupid with the
++ * arguments on the stack - they are owned by the *caller*, not
++ * the callee. This just fools gcc into not spilling into them,
++ * and keeps it from doing tailcall recursion and/or using the
++ * stack slots for temporaries, since they are live and "used"
++ * all the way to the end of the function.
++ */
++#define asmlinkage_protect(n, ret, args...) \
++	__asmlinkage_protect##n(ret, ##args)
++#define __asmlinkage_protect_n(ret, args...) \
++	__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
++#define __asmlinkage_protect0(ret) \
++	__asmlinkage_protect_n(ret)
++#define __asmlinkage_protect1(ret, arg1) \
++	__asmlinkage_protect_n(ret, "m" (arg1))
++#define __asmlinkage_protect2(ret, arg1, arg2) \
++	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
++#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
++	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
++#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
++	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++			      "m" (arg4))
++#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
++	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++			      "m" (arg4), "m" (arg5))
++#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
++	__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
++			      "m" (arg4), "m" (arg5), "m" (arg6))
++
+ #endif
+diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
+index 22f04ca2ff3e..2efb18aafa4f 100644
+--- a/arch/mips/loongson/common/env.c
++++ b/arch/mips/loongson/common/env.c
+@@ -64,6 +64,9 @@ void __init prom_init_env(void)
+ 	}
+ 	if (memsize == 0)
+ 		memsize = 256;
++
++	loongson_sysconf.nr_uarts = 1;
++
+ 	pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
+ #else
+ 	struct boot_params *boot_p;
+diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
+index 609d1241b0c4..371eec113659 100644
+--- a/arch/mips/mm/dma-default.c
++++ b/arch/mips/mm/dma-default.c
+@@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+ 	else
+ #endif
+ #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
+-	     if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
++	     if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
+ 		dma_flag = __GFP_DMA;
+ 	else
+ #endif
+diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
+index 453a8a47a467..964c0ce584ce 100644
+--- a/arch/powerpc/kvm/book3s.c
++++ b/arch/powerpc/kvm/book3s.c
+@@ -826,12 +826,15 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
+ 	unsigned long size = kvmppc_get_gpr(vcpu, 4);
+ 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
+ 	u64 buf;
++	int srcu_idx;
+ 	int ret;
+ 
+ 	if (!is_power_of_2(size) || (size > sizeof(buf)))
+ 		return H_TOO_HARD;
+ 
++	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ 	ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
++	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+ 	if (ret != 0)
+ 		return H_TOO_HARD;
+ 
+@@ -866,6 +869,7 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
+ 	unsigned long addr = kvmppc_get_gpr(vcpu, 5);
+ 	unsigned long val = kvmppc_get_gpr(vcpu, 6);
+ 	u64 buf;
++	int srcu_idx;
+ 	int ret;
+ 
+ 	switch (size) {
+@@ -889,7 +893,9 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
+ 		return H_TOO_HARD;
+ 	}
+ 
++	srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ 	ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
++	srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+ 	if (ret != 0)
+ 		return H_TOO_HARD;
+ 
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 3b2d2c5b6376..ffd98b2bfa16 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1171,6 +1171,7 @@ mc_cont:
+ 	bl	kvmhv_accumulate_time
+ #endif
+ 
++	mr 	r3, r12
+ 	/* Increment exit count, poke other threads to exit */
+ 	bl	kvmhv_commence_exit
+ 	nop
+diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
+index bca2aeb6e4b6..3ff29cf6d05c 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -99,6 +99,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
+ 	struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ 	struct pnv_phb *phb = hose->private_data;
+ 	struct msi_desc *entry;
++	irq_hw_number_t hwirq;
+ 
+ 	if (WARN_ON(!phb))
+ 		return;
+@@ -106,10 +107,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
+ 	list_for_each_entry(entry, &pdev->msi_list, list) {
+ 		if (entry->irq == NO_IRQ)
+ 			continue;
++		hwirq = virq_to_hw(entry->irq);
+ 		irq_set_msi_desc(entry->irq, NULL);
+-		msi_bitmap_free_hwirqs(&phb->msi_bmp,
+-			virq_to_hw(entry->irq) - phb->msi_base, 1);
+ 		irq_dispose_mapping(entry->irq);
++		msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
+ 	}
+ }
+ #endif /* CONFIG_PCI_MSI */
+diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
+index f086c6f22dc9..fd16cb5d83f3 100644
+--- a/arch/powerpc/sysdev/fsl_msi.c
++++ b/arch/powerpc/sysdev/fsl_msi.c
+@@ -128,15 +128,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+ 	struct msi_desc *entry;
+ 	struct fsl_msi *msi_data;
++	irq_hw_number_t hwirq;
+ 
+ 	list_for_each_entry(entry, &pdev->msi_list, list) {
+ 		if (entry->irq == NO_IRQ)
+ 			continue;
++		hwirq = virq_to_hw(entry->irq);
+ 		msi_data = irq_get_chip_data(entry->irq);
+ 		irq_set_msi_desc(entry->irq, NULL);
+-		msi_bitmap_free_hwirqs(&msi_data->bitmap,
+-				       virq_to_hw(entry->irq), 1);
+ 		irq_dispose_mapping(entry->irq);
++		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
+ 	}
+ 
+ 	return;
+diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
+index a3f660eed6de..89496cf4e04d 100644
+--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
++++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
+@@ -65,6 +65,7 @@ static struct irq_chip mpic_pasemi_msi_chip = {
+ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+ 	struct msi_desc *entry;
++	irq_hw_number_t hwirq;
+ 
+ 	pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
+ 
+@@ -72,10 +73,11 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
+ 		if (entry->irq == NO_IRQ)
+ 			continue;
+ 
++		hwirq = virq_to_hw(entry->irq);
+ 		irq_set_msi_desc(entry->irq, NULL);
+-		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
+-				       virq_to_hw(entry->irq), ALLOC_CHUNK);
+ 		irq_dispose_mapping(entry->irq);
++		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
++				       hwirq, ALLOC_CHUNK);
+ 	}
+ 
+ 	return;
+diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
+index b2cef1809389..13a34b237559 100644
+--- a/arch/powerpc/sysdev/mpic_u3msi.c
++++ b/arch/powerpc/sysdev/mpic_u3msi.c
+@@ -107,15 +107,16 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
+ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
+ {
+ 	struct msi_desc *entry;
++	irq_hw_number_t hwirq;
+ 
+         list_for_each_entry(entry, &pdev->msi_list, list) {
+ 		if (entry->irq == NO_IRQ)
+ 			continue;
+ 
++		hwirq = virq_to_hw(entry->irq);
+ 		irq_set_msi_desc(entry->irq, NULL);
+-		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
+-				       virq_to_hw(entry->irq), 1);
+ 		irq_dispose_mapping(entry->irq);
++		msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
+ 	}
+ 
+ 	return;
+diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
+index 6e2e6aa378bb..02a137daa182 100644
+--- a/arch/powerpc/sysdev/ppc4xx_msi.c
++++ b/arch/powerpc/sysdev/ppc4xx_msi.c
+@@ -124,16 +124,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
+ {
+ 	struct msi_desc *entry;
+ 	struct ppc4xx_msi *msi_data = &ppc4xx_msi;
++	irq_hw_number_t hwirq;
+ 
+ 	dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
+ 
+ 	list_for_each_entry(entry, &dev->msi_list, list) {
+ 		if (entry->irq == NO_IRQ)
+ 			continue;
++		hwirq = virq_to_hw(entry->irq);
+ 		irq_set_msi_desc(entry->irq, NULL);
+-		msi_bitmap_free_hwirqs(&msi_data->bitmap,
+-				virq_to_hw(entry->irq), 1);
+ 		irq_dispose_mapping(entry->irq);
++		msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
+ 	}
+ }
+ 
+diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
+index d4788111c161..fac6ac9790fa 100644
+--- a/arch/s390/boot/compressed/Makefile
++++ b/arch/s390/boot/compressed/Makefile
+@@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
+ 
+ KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
+ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
+-KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
++KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
+ KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
+ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
+ 
+diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
+index fe8d6924efaa..c78ba51ae285 100644
+--- a/arch/s390/kernel/compat_signal.c
++++ b/arch/s390/kernel/compat_signal.c
+@@ -48,6 +48,19 @@ typedef struct
+ 	struct ucontext32 uc;
+ } rt_sigframe32;
+ 
++static inline void sigset_to_sigset32(unsigned long *set64,
++				      compat_sigset_word *set32)
++{
++	set32[0] = (compat_sigset_word) set64[0];
++	set32[1] = (compat_sigset_word)(set64[0] >> 32);
++}
++
++static inline void sigset32_to_sigset(compat_sigset_word *set32,
++				      unsigned long *set64)
++{
++	set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
++}
++
+ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
+ {
+ 	int err;
+@@ -303,10 +316,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
+ {
+ 	struct pt_regs *regs = task_pt_regs(current);
+ 	sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
++	compat_sigset_t cset;
+ 	sigset_t set;
+ 
+-	if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
++	if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
+ 		goto badframe;
++	sigset32_to_sigset(cset.sig, set.sig);
+ 	set_current_blocked(&set);
+ 	if (restore_sigregs32(regs, &frame->sregs))
+ 		goto badframe;
+@@ -323,10 +338,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
+ {
+ 	struct pt_regs *regs = task_pt_regs(current);
+ 	rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
++	compat_sigset_t cset;
+ 	sigset_t set;
+ 
+-	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
++	if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
+ 		goto badframe;
++	sigset32_to_sigset(cset.sig, set.sig);
+ 	set_current_blocked(&set);
+ 	if (compat_restore_altstack(&frame->uc.uc_stack))
+ 		goto badframe;
+@@ -397,7 +414,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
+ 		return -EFAULT;
+ 
+ 	/* Create struct sigcontext32 on the signal stack */
+-	memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
++	sigset_to_sigset32(set->sig, sc.oldmask);
+ 	sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
+ 	if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
+ 		return -EFAULT;
+@@ -458,6 +475,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
+ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
+ 			    struct pt_regs *regs)
+ {
++	compat_sigset_t cset;
+ 	rt_sigframe32 __user *frame;
+ 	unsigned long restorer;
+ 	size_t frame_size;
+@@ -505,11 +523,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
+ 	store_sigregs();
+ 
+ 	/* Create ucontext on the signal stack. */
++	sigset_to_sigset32(set->sig, cset.sig);
+ 	if (__put_user(uc_flags, &frame->uc.uc_flags) ||
+ 	    __put_user(0, &frame->uc.uc_link) ||
+ 	    __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
+ 	    save_sigregs32(regs, &frame->uc.uc_mcontext) ||
+-	    __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
++	    __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
+ 	    save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
+ 		return -EFAULT;
+ 
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index aef653193160..d1918a8c4393 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -325,10 +325,15 @@ done:
+ 
+ static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
+ {
++	unsigned long flags;
++
+ 	if (instr[0] != 0x90)
+ 		return;
+ 
++	local_irq_save(flags);
+ 	add_nops(instr + (a->instrlen - a->padlen), a->padlen);
++	sync_core();
++	local_irq_restore(flags);
+ 
+ 	DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
+ 		   instr, a->instrlen - a->padlen, a->padlen);
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index cde732c1b495..307a49828826 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -336,6 +336,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
+ 	apic_write(APIC_LVTT, lvtt_value);
+ 
+ 	if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
++		/*
++		 * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
++		 * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
++		 * According to Intel, MFENCE can do the serialization here.
++		 */
++		asm volatile("mfence" : : : "memory");
++
+ 		printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
+ 		return;
+ 	}
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 2813ea0f142e..22212615a137 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -2098,9 +2098,12 @@ static struct event_constraint *
+ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ 			    struct perf_event *event)
+ {
+-	struct event_constraint *c1 = cpuc->event_constraint[idx];
++	struct event_constraint *c1 = NULL;
+ 	struct event_constraint *c2;
+ 
++	if (idx >= 0) /* fake does < 0 */
++		c1 = cpuc->event_constraint[idx];
++
+ 	/*
+ 	 * first time only
+ 	 * - static constraint: no change across incremental scheduling calls
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index c76d3e37c6e1..403ace539b73 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -184,10 +184,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
+ }
+ 
+ #ifdef CONFIG_KEXEC_FILE
+-static int get_nr_ram_ranges_callback(unsigned long start_pfn,
+-				unsigned long nr_pfn, void *arg)
++static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
+ {
+-	int *nr_ranges = arg;
++	unsigned int *nr_ranges = arg;
+ 
+ 	(*nr_ranges)++;
+ 	return 0;
+@@ -213,7 +212,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
+ 
+ 	ced->image = image;
+ 
+-	walk_system_ram_range(0, -1, &nr_ranges,
++	walk_system_ram_res(0, -1, &nr_ranges,
+ 				get_nr_ram_ranges_callback);
+ 
+ 	ced->max_nr_ranges = nr_ranges;
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 4bd6c197563d..6c9cb6073832 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -1393,7 +1393,18 @@ END(error_exit)
+ /* Runs on exception stack */
+ ENTRY(nmi)
+ 	INTR_FRAME
++	/*
++	 * Fix up the exception frame if we're on Xen.
++	 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
++	 * one value to the stack on native, so it may clobber the rdx
++	 * scratch slot, but it won't clobber any of the important
++	 * slots past it.
++	 *
++	 * Xen is a different story, because the Xen frame itself overlaps
++	 * the "NMI executing" variable.
++	 */
+ 	PARAVIRT_ADJUST_EXCEPTION_FRAME
++
+ 	/*
+ 	 * We allow breakpoints in NMIs. If a breakpoint occurs, then
+ 	 * the iretq it performs will take us out of NMI context.
+@@ -1445,9 +1456,12 @@ ENTRY(nmi)
+ 	 * we don't want to enable interrupts, because then we'll end
+ 	 * up in an awkward situation in which IRQs are on but NMIs
+ 	 * are off.
++	 *
++	 * We also must not push anything to the stack before switching
++	 * stacks lest we corrupt the "NMI executing" variable.
+ 	 */
+ 
+-	SWAPGS
++	SWAPGS_UNSAFE_STACK
+ 	cld
+ 	movq	%rsp, %rdx
+ 	movq	PER_CPU_VAR(kernel_stack), %rsp
+diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
+index c614dd492f5f..1f316f066c49 100644
+--- a/arch/x86/kernel/paravirt.c
++++ b/arch/x86/kernel/paravirt.c
+@@ -41,10 +41,18 @@
+ #include <asm/timer.h>
+ #include <asm/special_insns.h>
+ 
+-/* nop stub */
+-void _paravirt_nop(void)
+-{
+-}
++/*
++ * nop stub, which must not clobber anything *including the stack* to
++ * avoid confusing the entry prologues.
++ */
++extern void _paravirt_nop(void);
++asm (".pushsection .entry.text, \"ax\"\n"
++     ".global _paravirt_nop\n"
++     "_paravirt_nop:\n\t"
++     "ret\n\t"
++     ".size _paravirt_nop, . - _paravirt_nop\n\t"
++     ".type _paravirt_nop, @function\n\t"
++     ".popsection");
+ 
+ /* identity function, which can be inlined */
+ u32 _paravirt_ident_32(u32 x)
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index 5e0bf57d9944..58e02d938218 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -499,27 +499,59 @@ void set_personality_ia32(bool x32)
+ }
+ EXPORT_SYMBOL_GPL(set_personality_ia32);
+ 
++/*
++ * Called from fs/proc with a reference on @p to find the function
++ * which called into schedule(). This needs to be done carefully
++ * because the task might wake up and we might look at a stack
++ * changing under us.
++ */
+ unsigned long get_wchan(struct task_struct *p)
+ {
+-	unsigned long stack;
+-	u64 fp, ip;
++	unsigned long start, bottom, top, sp, fp, ip;
+ 	int count = 0;
+ 
+ 	if (!p || p == current || p->state == TASK_RUNNING)
+ 		return 0;
+-	stack = (unsigned long)task_stack_page(p);
+-	if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
++
++	start = (unsigned long)task_stack_page(p);
++	if (!start)
++		return 0;
++
++	/*
++	 * Layout of the stack page:
++	 *
++	 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
++	 * PADDING
++	 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
++	 * stack
++	 * ----------- bottom = start + sizeof(thread_info)
++	 * thread_info
++	 * ----------- start
++	 *
++	 * The tasks stack pointer points at the location where the
++	 * framepointer is stored. The data on the stack is:
++	 * ... IP FP ... IP FP
++	 *
++	 * We need to read FP and IP, so we need to adjust the upper
++	 * bound by another unsigned long.
++	 */
++	top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
++	top -= 2 * sizeof(unsigned long);
++	bottom = start + sizeof(struct thread_info);
++
++	sp = READ_ONCE(p->thread.sp);
++	if (sp < bottom || sp > top)
+ 		return 0;
+-	fp = *(u64 *)(p->thread.sp);
++
++	fp = READ_ONCE(*(unsigned long *)sp);
+ 	do {
+-		if (fp < (unsigned long)stack ||
+-		    fp >= (unsigned long)stack+THREAD_SIZE)
++		if (fp < bottom || fp > top)
+ 			return 0;
+-		ip = *(u64 *)(fp+8);
++		ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
+ 		if (!in_sched_functions(ip))
+ 			return ip;
+-		fp = *(u64 *)fp;
+-	} while (count++ < 16);
++		fp = READ_ONCE(*(unsigned long *)fp);
++	} while (count++ < 16 && p->state != TASK_RUNNING);
+ 	return 0;
+ }
+ 
+diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
+index 505449700e0c..21187ebee7d0 100644
+--- a/arch/x86/kernel/tsc.c
++++ b/arch/x86/kernel/tsc.c
+@@ -21,6 +21,7 @@
+ #include <asm/hypervisor.h>
+ #include <asm/nmi.h>
+ #include <asm/x86_init.h>
++#include <asm/geode.h>
+ 
+ unsigned int __read_mostly cpu_khz;	/* TSC clocks / usec, not used here */
+ EXPORT_SYMBOL(cpu_khz);
+@@ -1004,15 +1005,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
+ 
+ static void __init check_system_tsc_reliable(void)
+ {
+-#ifdef CONFIG_MGEODE_LX
+-	/* RTSC counts during suspend */
++#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
++	if (is_geode_lx()) {
++		/* RTSC counts during suspend */
+ #define RTSC_SUSP 0x100
+-	unsigned long res_low, res_high;
++		unsigned long res_low, res_high;
+ 
+-	rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+-	/* Geode_LX - the OLPC CPU has a very reliable TSC */
+-	if (res_low & RTSC_SUSP)
+-		tsc_clocksource_reliable = 1;
++		rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
++		/* Geode_LX - the OLPC CPU has a very reliable TSC */
++		if (res_low & RTSC_SUSP)
++			tsc_clocksource_reliable = 1;
++	}
+ #endif
+ 	if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
+ 		tsc_clocksource_reliable = 1;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 4911bf19122b..7858cd9acfe4 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -512,7 +512,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+ 	if (svm->vmcb->control.next_rip != 0) {
+-		WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
++		WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
+ 		svm->next_rip = svm->vmcb->control.next_rip;
+ 	}
+ 
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index 2d73807f0d31..bc3041e1abbc 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6144,6 +6144,8 @@ static __init int hardware_setup(void)
+ 	memcpy(vmx_msr_bitmap_longmode_x2apic,
+ 			vmx_msr_bitmap_longmode, PAGE_SIZE);
+ 
++	set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
++
+ 	if (enable_apicv) {
+ 		for (msr = 0x800; msr <= 0x8ff; msr++)
+ 			vmx_disable_intercept_msr_read_x2apic(msr);
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index 3fba623e3ba5..f9977a7a9444 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
+ 	 * has been zapped already via cleanup_highmem().
+ 	 */
+ 	all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
+-	set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
++	set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
+ 
+ 	rodata_test();
+ 
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 841ea05e1b02..477384985ac9 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -679,6 +679,70 @@ out:
+ }
+ 
+ /*
++ * Iterate the EFI memory map in reverse order because the regions
++ * will be mapped top-down. The end result is the same as if we had
++ * mapped things forward, but doesn't require us to change the
++ * existing implementation of efi_map_region().
++ */
++static inline void *efi_map_next_entry_reverse(void *entry)
++{
++	/* Initial call */
++	if (!entry)
++		return memmap.map_end - memmap.desc_size;
++
++	entry -= memmap.desc_size;
++	if (entry < memmap.map)
++		return NULL;
++
++	return entry;
++}
++
++/*
++ * efi_map_next_entry - Return the next EFI memory map descriptor
++ * @entry: Previous EFI memory map descriptor
++ *
++ * This is a helper function to iterate over the EFI memory map, which
++ * we do in different orders depending on the current configuration.
++ *
++ * To begin traversing the memory map @entry must be %NULL.
++ *
++ * Returns %NULL when we reach the end of the memory map.
++ */
++static void *efi_map_next_entry(void *entry)
++{
++	if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
++		/*
++		 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
++		 * config table feature requires us to map all entries
++		 * in the same order as they appear in the EFI memory
++		 * map. That is to say, entry N must have a lower
++		 * virtual address than entry N+1. This is because the
++		 * firmware toolchain leaves relative references in
++		 * the code/data sections, which are split and become
++		 * separate EFI memory regions. Mapping things
++		 * out-of-order leads to the firmware accessing
++		 * unmapped addresses.
++		 *
++		 * Since we need to map things this way whether or not
++		 * the kernel actually makes use of
++		 * EFI_PROPERTIES_TABLE, let's just switch to this
++		 * scheme by default for 64-bit.
++		 */
++		return efi_map_next_entry_reverse(entry);
++	}
++
++	/* Initial call */
++	if (!entry)
++		return memmap.map;
++
++	entry += memmap.desc_size;
++	if (entry >= memmap.map_end)
++		return NULL;
++
++	return entry;
++}
++
++/*
+  * Map the efi memory ranges of the runtime services and update new_mmap with
+  * virtual addresses.
+  */
+@@ -688,7 +752,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
+ 	unsigned long left = 0;
+ 	efi_memory_desc_t *md;
+ 
+-	for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++	p = NULL;
++	while ((p = efi_map_next_entry(p))) {
+ 		md = p;
+ 		if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+ #ifdef CONFIG_X86_64
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index a671e837228d..0cc657160cb6 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -33,6 +33,10 @@
+ #include <linux/memblock.h>
+ #include <linux/edd.h>
+ 
++#ifdef CONFIG_KEXEC_CORE
++#include <linux/kexec.h>
++#endif
++
+ #include <xen/xen.h>
+ #include <xen/events.h>
+ #include <xen/interface/xen.h>
+@@ -1798,6 +1802,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
+ 	.notifier_call	= xen_hvm_cpu_notify,
+ };
+ 
++#ifdef CONFIG_KEXEC_CORE
++static void xen_hvm_shutdown(void)
++{
++	native_machine_shutdown();
++	if (kexec_in_progress)
++		xen_reboot(SHUTDOWN_soft_reset);
++}
++
++static void xen_hvm_crash_shutdown(struct pt_regs *regs)
++{
++	native_machine_crash_shutdown(regs);
++	xen_reboot(SHUTDOWN_soft_reset);
++}
++#endif
++
+ static void __init xen_hvm_guest_init(void)
+ {
+ 	if (xen_pv_domain())
+@@ -1817,6 +1836,10 @@ static void __init xen_hvm_guest_init(void)
+ 	x86_init.irqs.intr_init = xen_init_IRQ;
+ 	xen_hvm_init_time_ops();
+ 	xen_hvm_init_mmu_ops();
++#ifdef CONFIG_KEXEC_CORE
++	machine_ops.shutdown = xen_hvm_shutdown;
++	machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
++#endif
+ }
+ #endif
+ 
+diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
+index df0c66cb7ad3..fdba441457ec 100644
+--- a/drivers/base/cacheinfo.c
++++ b/drivers/base/cacheinfo.c
+@@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
+ 
+ 			if (sibling == cpu) /* skip itself */
+ 				continue;
++
+ 			sib_cpu_ci = get_cpu_cacheinfo(sibling);
++			if (!sib_cpu_ci->info_list)
++				continue;
++
+ 			sib_leaf = sib_cpu_ci->info_list + index;
+ 			cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+ 			cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+@@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
+ 
+ static void free_cache_attributes(unsigned int cpu)
+ {
++	if (!per_cpu_cacheinfo(cpu))
++		return;
++
+ 	cache_shared_cpu_map_remove(cpu);
+ 
+ 	kfree(per_cpu_cacheinfo(cpu));
+@@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
+ 		break;
+ 	case CPU_DEAD:
+ 		cache_remove_dev(cpu);
+-		if (per_cpu_cacheinfo(cpu))
+-			free_cache_attributes(cpu);
++		free_cache_attributes(cpu);
+ 		break;
+ 	}
+ 	return notifier_from_errno(rc);
+diff --git a/drivers/base/property.c b/drivers/base/property.c
+index 1d0b116cae95..0a60ef1500cd 100644
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -26,9 +26,10 @@
+  */
+ void device_add_property_set(struct device *dev, struct property_set *pset)
+ {
+-	if (pset)
+-		pset->fwnode.type = FWNODE_PDATA;
++	if (!pset)
++		return;
+ 
++	pset->fwnode.type = FWNODE_PDATA;
+ 	set_secondary_fwnode(dev, &pset->fwnode);
+ }
+ EXPORT_SYMBOL_GPL(device_add_property_set);
+diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
+index 5799a0b9e6cc..c8941f39c919 100644
+--- a/drivers/base/regmap/regmap-debugfs.c
++++ b/drivers/base/regmap/regmap-debugfs.c
+@@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
+ /* Calculate the length of a fixed format  */
+ static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
+ {
+-	snprintf(buf, buf_size, "%x", max_val);
+-	return strlen(buf);
++	return snprintf(NULL, 0, "%x", max_val);
+ }
+ 
+ static ssize_t regmap_name_read_file(struct file *file,
+@@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
+ 		/* If we're in the region the user is trying to read */
+ 		if (p >= *ppos) {
+ 			/* ...but not beyond it */
+-			if (buf_pos >= count - 1 - tot_len)
++			if (buf_pos + tot_len + 1 >= count)
+ 				break;
+ 
+ 			/* Format the register */
+diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
+index 757636d166cf..4ab28cfb8d2a 100644
+--- a/drivers/clk/ti/clk-3xxx.c
++++ b/drivers/clk/ti/clk-3xxx.c
+@@ -163,7 +163,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
+ 	DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
+ 	DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
+ 	DT_CLK(NULL, "uart3_ick", "uart3_ick"),
+-	DT_CLK(NULL, "uart4_ick", "uart4_ick"),
+ 	DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
+ 	DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
+ 	DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
+@@ -308,6 +307,7 @@ static struct ti_dt_clk am35xx_clks[] = {
+ static struct ti_dt_clk omap36xx_clks[] = {
+ 	DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
+ 	DT_CLK(NULL, "uart4_fck", "uart4_fck"),
++	DT_CLK(NULL, "uart4_ick", "uart4_ick"),
+ 	{ .node_name = NULL },
+ };
+ 
+diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
+index bab67db54b7e..663045ce6fac 100644
+--- a/drivers/cpufreq/cpufreq-dt.c
++++ b/drivers/cpufreq/cpufreq-dt.c
+@@ -255,7 +255,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
+ 			rcu_read_unlock();
+ 
+ 			tol_uV = opp_uV * priv->voltage_tolerance / 100;
+-			if (regulator_is_supported_voltage(cpu_reg, opp_uV,
++			if (regulator_is_supported_voltage(cpu_reg,
++							   opp_uV - tol_uV,
+ 							   opp_uV + tol_uV)) {
+ 				if (opp_uV < min_uV)
+ 					min_uV = opp_uV;
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 6f9d27f9001c..e8d16997c5cb 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -48,9 +48,9 @@ static inline int32_t mul_fp(int32_t x, int32_t y)
+ 	return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
+ }
+ 
+-static inline int32_t div_fp(int32_t x, int32_t y)
++static inline int32_t div_fp(s64 x, s64 y)
+ {
+-	return div_s64((int64_t)x << FRAC_BITS, y);
++	return div64_s64((int64_t)x << FRAC_BITS, y);
+ }
+ 
+ static inline int ceiling_fp(int32_t x)
+@@ -795,7 +795,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
+ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
+ {
+ 	int32_t core_busy, max_pstate, current_pstate, sample_ratio;
+-	u32 duration_us;
++	s64 duration_us;
+ 	u32 sample_time;
+ 
+ 	/*
+@@ -822,8 +822,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
+ 	 * to adjust our busyness.
+ 	 */
+ 	sample_time = pid_params.sample_rate_ms  * USEC_PER_MSEC;
+-	duration_us = (u32) ktime_us_delta(cpu->sample.time,
+-					   cpu->last_sample_time);
++	duration_us = ktime_us_delta(cpu->sample.time,
++				     cpu->last_sample_time);
+ 	if (duration_us > sample_time * 3) {
+ 		sample_ratio = div_fp(int_tofp(sample_time),
+ 				      int_tofp(duration_us));
+diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
+index 1022c2e1a2b0..9e504d3b0d4f 100644
+--- a/drivers/dma/dw/core.c
++++ b/drivers/dma/dw/core.c
+@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ 	INIT_LIST_HEAD(&dw->dma.channels);
+ 	for (i = 0; i < nr_channels; i++) {
+ 		struct dw_dma_chan	*dwc = &dw->chan[i];
+-		int			r = nr_channels - i - 1;
+ 
+ 		dwc->chan.device = &dw->dma;
+ 		dma_cookie_init(&dwc->chan);
+@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ 
+ 		/* 7 is highest priority & 0 is lowest. */
+ 		if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
+-			dwc->priority = r;
++			dwc->priority = nr_channels - i - 1;
+ 		else
+ 			dwc->priority = i;
+ 
+@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
+ 		/* Hardware configuration */
+ 		if (autocfg) {
+ 			unsigned int dwc_params;
++			unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
+ 			void __iomem *addr = chip->regs + r * sizeof(u32);
+ 
+ 			dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
+diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
+index e29560e6b40b..950c87f5d279 100644
+--- a/drivers/firmware/efi/libstub/arm-stub.c
++++ b/drivers/firmware/efi/libstub/arm-stub.c
+@@ -13,6 +13,7 @@
+  */
+ 
+ #include <linux/efi.h>
++#include <linux/sort.h>
+ #include <asm/efi.h>
+ 
+ #include "efistub.h"
+@@ -305,6 +306,44 @@ fail:
+  */
+ #define EFI_RT_VIRTUAL_BASE	0x40000000
+ 
++static int cmp_mem_desc(const void *l, const void *r)
++{
++	const efi_memory_desc_t *left = l, *right = r;
++
++	return (left->phys_addr > right->phys_addr) ? 1 : -1;
++}
++
++/*
++ * Returns whether region @left ends exactly where region @right starts,
++ * or false if either argument is NULL.
++ */
++static bool regions_are_adjacent(efi_memory_desc_t *left,
++				 efi_memory_desc_t *right)
++{
++	u64 left_end;
++
++	if (left == NULL || right == NULL)
++		return false;
++
++	left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
++
++	return left_end == right->phys_addr;
++}
++
++/*
++ * Returns whether region @left and region @right have compatible memory type
++ * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
++ */
++static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
++						      efi_memory_desc_t *right)
++{
++	static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
++					 EFI_MEMORY_WC | EFI_MEMORY_UC |
++					 EFI_MEMORY_RUNTIME;
++
++	return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
++}
++
+ /*
+  * efi_get_virtmap() - create a virtual mapping for the EFI memory map
+  *
+@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
+ 		     int *count)
+ {
+ 	u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
+-	efi_memory_desc_t *out = runtime_map;
++	efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
+ 	int l;
+ 
+-	for (l = 0; l < map_size; l += desc_size) {
+-		efi_memory_desc_t *in = (void *)memory_map + l;
++	/*
++	 * To work around potential issues with the Properties Table feature
++	 * introduced in UEFI 2.5, which may split PE/COFF executable images
++	 * in memory into several RuntimeServicesCode and RuntimeServicesData
++	 * regions, we need to preserve the relative offsets between adjacent
++	 * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
++	 * The easiest way to find adjacent regions is to sort the memory map
++	 * before traversing it.
++	 */
++	sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
++
++	for (l = 0; l < map_size; l += desc_size, prev = in) {
+ 		u64 paddr, size;
+ 
++		in = (void *)memory_map + l;
+ 		if (!(in->attribute & EFI_MEMORY_RUNTIME))
+ 			continue;
+ 
++		paddr = in->phys_addr;
++		size = in->num_pages * EFI_PAGE_SIZE;
++
+ 		/*
+ 		 * Make the mapping compatible with 64k pages: this allows
+ 		 * a 4k page size kernel to kexec a 64k page size kernel and
+ 		 * vice versa.
+ 		 */
+-		paddr = round_down(in->phys_addr, SZ_64K);
+-		size = round_up(in->num_pages * EFI_PAGE_SIZE +
+-				in->phys_addr - paddr, SZ_64K);
+-
+-		/*
+-		 * Avoid wasting memory on PTEs by choosing a virtual base that
+-		 * is compatible with section mappings if this region has the
+-		 * appropriate size and physical alignment. (Sections are 2 MB
+-		 * on 4k granule kernels)
+-		 */
+-		if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+-			efi_virt_base = round_up(efi_virt_base, SZ_2M);
++		if (!regions_are_adjacent(prev, in) ||
++		    !regions_have_compatible_memory_type_attrs(prev, in)) {
++
++			paddr = round_down(in->phys_addr, SZ_64K);
++			size += in->phys_addr - paddr;
++
++			/*
++			 * Avoid wasting memory on PTEs by choosing a virtual
++			 * base that is compatible with section mappings if this
++			 * region has the appropriate size and physical
++			 * alignment. (Sections are 2 MB on 4k granule kernels)
++			 */
++			if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
++				efi_virt_base = round_up(efi_virt_base, SZ_2M);
++			else
++				efi_virt_base = round_up(efi_virt_base, SZ_64K);
++		}
+ 
+ 		in->virt_addr = efi_virt_base + in->phys_addr - paddr;
+ 		efi_virt_base += size;
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index b0487c9f018c..7f467fdc9107 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
+ 	struct drm_dp_mst_port *port, *tmp;
+ 	bool wake_tx = false;
+ 
+-	cancel_work_sync(&mstb->mgr->work);
+-
+ 	/*
+ 	 * destroy all ports - don't need lock
+ 	 * as there are no more references to the mst branch
+@@ -1977,6 +1975,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
+ 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
+ 	mutex_unlock(&mgr->lock);
++	flush_work(&mgr->work);
++	flush_work(&mgr->destroy_connector_work);
+ }
+ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
+ 
+@@ -2730,6 +2730,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
+  */
+ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
+ {
++	flush_work(&mgr->work);
+ 	flush_work(&mgr->destroy_connector_work);
+ 	mutex_lock(&mgr->payload_lock);
+ 	kfree(mgr->payloads);
+diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
+index f861361a635e..4924d381b664 100644
+--- a/drivers/gpu/drm/drm_lock.c
++++ b/drivers/gpu/drm/drm_lock.c
+@@ -61,6 +61,9 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
+ 	struct drm_master *master = file_priv->master;
+ 	int ret = 0;
+ 
++	if (drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
+ 	++file_priv->lock_count;
+ 
+ 	if (lock->context == DRM_KERNEL_CONTEXT) {
+@@ -153,6 +156,9 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
+ 	struct drm_lock *lock = data;
+ 	struct drm_master *master = file_priv->master;
+ 
++	if (drm_core_check_feature(dev, DRIVER_MODESET))
++		return -EINVAL;
++
+ 	if (lock->context == DRM_KERNEL_CONTEXT) {
+ 		DRM_ERROR("Process %d using kernel context %d\n",
+ 			  task_pid_nr(current), lock->context);
+diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
+index c684085cb56a..fadf9865709e 100644
+--- a/drivers/gpu/drm/i915/intel_bios.c
++++ b/drivers/gpu/drm/i915/intel_bios.c
+@@ -41,7 +41,7 @@ find_section(struct bdb_header *bdb, int section_id)
+ {
+ 	u8 *base = (u8 *)bdb;
+ 	int index = 0;
+-	u16 total, current_size;
++	u32 total, current_size;
+ 	u8 current_id;
+ 
+ 	/* skip to first section */
+@@ -56,6 +56,10 @@ find_section(struct bdb_header *bdb, int section_id)
+ 		current_size = *((u16 *)(base + index));
+ 		index += 2;
+ 
++		/* The MIPI Sequence Block v3+ has a separate size field. */
++		if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
++			current_size = *((const u32 *)(base + index + 1));
++
+ 		if (index + current_size > total)
+ 			return NULL;
+ 
+@@ -845,6 +849,12 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
+ 		return;
+ 	}
+ 
++	/* Fail gracefully for forward incompatible sequence block. */
++	if (sequence->version >= 3) {
++		DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
++		return;
++	}
++
+ 	DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
+ 
+ 	block_size = get_blocksize(sequence);
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 32248791bc4b..52921a871230 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
+ 		  adjusted_mode->hdisplay,
+ 		  adjusted_mode->vdisplay);
+ 
+-	if (qcrtc->index == 0)
++	if (bo->is_primary == false)
+ 		recreate_primary = true;
+ 
+ 	if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
+@@ -886,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
+ 		drm_connector_to_qxl_output(connector);
+ 	struct drm_device *ddev = connector->dev;
+ 	struct qxl_device *qdev = ddev->dev_private;
+-	int connected;
++	bool connected = false;
+ 
+ 	/* The first monitor is always connected */
+-	connected = (output->index == 0) ||
+-		    (qdev->client_monitors_config &&
+-		     qdev->client_monitors_config->count > output->index &&
+-		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
++	if (!qdev->client_monitors_config) {
++		if (output->index == 0)
++			connected = true;
++	} else
++		connected = qdev->client_monitors_config->count > output->index &&
++		     qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
+ 
+ 	DRM_DEBUG("#%d connected: %d\n", output->index, connected);
+ 	if (!connected)
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index dd39f434b4a7..b4ff4c134fbb 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
+ 		} else
+ 			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ 		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+-			args.ucAction = ATOM_LCD_BLON;
+-			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
++			struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
++
++			atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
+ 		}
+ 		break;
+ 	case DRM_MODE_DPMS_STANDBY:
+@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ 				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+ 		}
+ 		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+-			atombios_dig_transmitter_setup(encoder,
+-						       ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
++			atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
+ 		if (ext_encoder)
+ 			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+ 		break;
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index bd1c99deac71..2aaedbe0b023 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -354,6 +354,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
+ 
+ /* NCT6776 specific data */
+ 
++/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
++#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
++#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
++
+ static const s8 NCT6776_ALARM_BITS[] = {
+ 	0, 1, 2, 3, 8, 21, 20, 16,	/* in0.. in7 */
+ 	17, -1, -1, -1, -1, -1, -1,	/* in8..in14 */
+@@ -3528,8 +3532,8 @@ static int nct6775_probe(struct platform_device *pdev)
+ 		data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
+ 		data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ 		data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+-		data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+-		data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
++		data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
++		data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
+ 		data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ 		data->REG_PWM[0] = NCT6775_REG_PWM;
+ 		data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+@@ -3600,8 +3604,8 @@ static int nct6775_probe(struct platform_device *pdev)
+ 		data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+ 		data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ 		data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+-		data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+-		data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
++		data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
++		data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
+ 		data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ 		data->REG_PWM[0] = NCT6775_REG_PWM;
+ 		data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+@@ -3677,8 +3681,8 @@ static int nct6775_probe(struct platform_device *pdev)
+ 		data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+ 		data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ 		data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+-		data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+-		data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
++		data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
++		data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
+ 		data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ 		data->REG_PWM[0] = NCT6775_REG_PWM;
+ 		data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index 575a072d765f..c32a934f7693 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -2996,9 +2996,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
+ static int
+ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
+ {
+-	int ret;
++	struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
++	int ret = 0;
+ 
+ 	switch (state) {
++	case ISTATE_REMOVE:
++		spin_lock_bh(&conn->cmd_lock);
++		list_del_init(&cmd->i_conn_node);
++		spin_unlock_bh(&conn->cmd_lock);
++		isert_put_cmd(isert_cmd, true);
++		break;
+ 	case ISTATE_SEND_NOPIN_WANT_RESPONSE:
+ 		ret = isert_put_nopin(cmd, conn, false);
+ 		break;
+@@ -3363,6 +3370,41 @@ isert_wait4flush(struct isert_conn *isert_conn)
+ 	wait_for_completion(&isert_conn->wait_comp_err);
+ }
+ 
++/**
++ * isert_put_unsol_pending_cmds() - Drop commands waiting for
++ *     unsolicitate dataout
++ * @conn:    iscsi connection
++ *
++ * We might still have commands that are waiting for unsolicited
++ * dataouts messages. We must put the extra reference on those
++ * before blocking on the target_wait_for_session_cmds
++ */
++static void
++isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
++{
++	struct iscsi_cmd *cmd, *tmp;
++	static LIST_HEAD(drop_cmd_list);
++
++	spin_lock_bh(&conn->cmd_lock);
++	list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
++		if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
++		    (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
++		    (cmd->write_data_done < cmd->se_cmd.data_length))
++			list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
++	}
++	spin_unlock_bh(&conn->cmd_lock);
++
++	list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
++		list_del_init(&cmd->i_conn_node);
++		if (cmd->i_state != ISTATE_REMOVE) {
++			struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
++
++			isert_info("conn %p dropping cmd %p\n", conn, cmd);
++			isert_put_cmd(isert_cmd, true);
++		}
++	}
++}
++
+ static void isert_wait_conn(struct iscsi_conn *conn)
+ {
+ 	struct isert_conn *isert_conn = conn->context;
+@@ -3381,8 +3423,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
+ 	isert_conn_terminate(isert_conn);
+ 	mutex_unlock(&isert_conn->mutex);
+ 
+-	isert_wait4cmds(conn);
+ 	isert_wait4flush(isert_conn);
++	isert_put_unsol_pending_cmds(conn);
++	isert_wait4cmds(conn);
+ 	isert_wait4logout(isert_conn);
+ 
+ 	queue_work(isert_release_wq, &isert_conn->release_work);
+diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
+index a2e8c3f876cb..c2c578f0b268 100644
+--- a/drivers/irqchip/irq-atmel-aic5.c
++++ b/drivers/irqchip/irq-atmel-aic5.c
+@@ -88,28 +88,36 @@ static void aic5_mask(struct irq_data *d)
+ {
+ 	struct irq_domain *domain = d->domain;
+ 	struct irq_domain_chip_generic *dgc = domain->gc;
+-	struct irq_chip_generic *gc = dgc->gc[0];
++	struct irq_chip_generic *bgc = dgc->gc[0];
++	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 
+-	/* Disable interrupt on AIC5 */
+-	irq_gc_lock(gc);
++	/*
++	 * Disable interrupt on AIC5. We always take the lock of the
++	 * first irq chip as all chips share the same registers.
++	 */
++	irq_gc_lock(bgc);
+ 	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
+ 	irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
+ 	gc->mask_cache &= ~d->mask;
+-	irq_gc_unlock(gc);
++	irq_gc_unlock(bgc);
+ }
+ 
+ static void aic5_unmask(struct irq_data *d)
+ {
+ 	struct irq_domain *domain = d->domain;
+ 	struct irq_domain_chip_generic *dgc = domain->gc;
+-	struct irq_chip_generic *gc = dgc->gc[0];
++	struct irq_chip_generic *bgc = dgc->gc[0];
++	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ 
+-	/* Enable interrupt on AIC5 */
+-	irq_gc_lock(gc);
++	/*
++	 * Enable interrupt on AIC5. We always take the lock of the
++	 * first irq chip as all chips share the same registers.
++	 */
++	irq_gc_lock(bgc);
+ 	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
+ 	irq_reg_writel(gc, 1, AT91_AIC5_IECR);
+ 	gc->mask_cache |= d->mask;
+-	irq_gc_unlock(gc);
++	irq_gc_unlock(bgc);
+ }
+ 
+ static int aic5_retrigger(struct irq_data *d)
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index c00e2db351ba..9a791dd52199 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -921,8 +921,10 @@ retry_baser:
+ 			 * non-cacheable as well.
+ 			 */
+ 			shr = tmp & GITS_BASER_SHAREABILITY_MASK;
+-			if (!shr)
++			if (!shr) {
+ 				cache = GITS_BASER_nC;
++				__flush_dcache_area(base, alloc_size);
++			}
+ 			goto retry_baser;
+ 		}
+ 
+@@ -1163,6 +1165,8 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
+ 		return NULL;
+ 	}
+ 
++	__flush_dcache_area(itt, sz);
++
+ 	dev->its = its;
+ 	dev->itt = itt;
+ 	dev->nr_ites = nr_ites;
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index 7fb2a19ac649..557f8a53a062 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -223,12 +223,15 @@ static int led_classdev_next_name(const char *init_name, char *name,
+ {
+ 	unsigned int i = 0;
+ 	int ret = 0;
++	struct device *dev;
+ 
+ 	strlcpy(name, init_name, len);
+ 
+-	while (class_find_device(leds_class, NULL, name, match_name) &&
+-	       (ret < len))
++	while ((ret < len) &&
++	       (dev = class_find_device(leds_class, NULL, name, match_name))) {
++		put_device(dev);
+ 		ret = snprintf(name, len, "%s_%u", init_name, ++i);
++	}
+ 
+ 	if (ret >= len)
+ 		return -ENOMEM;
+diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
+index 3ee198b65843..cc7ece1712b5 100644
+--- a/drivers/macintosh/windfarm_core.c
++++ b/drivers/macintosh/windfarm_core.c
+@@ -435,7 +435,7 @@ int wf_unregister_client(struct notifier_block *nb)
+ {
+ 	mutex_lock(&wf_lock);
+ 	blocking_notifier_chain_unregister(&wf_client_list, nb);
+-	wf_client_count++;
++	wf_client_count--;
+ 	if (wf_client_count == 0)
+ 		wf_stop_thread();
+ 	mutex_unlock(&wf_lock);
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index c90118e90708..a7621a258936 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -2000,7 +2000,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ 	if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
+ 		ret = bitmap_storage_alloc(&store, chunks,
+ 					   !bitmap->mddev->bitmap_info.external,
+-					   bitmap->cluster_slot);
++					   mddev_is_clustered(bitmap->mddev)
++					   ? bitmap->cluster_slot : 0);
+ 	if (ret)
+ 		goto err;
+ 
+diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
+index 004e463c9423..8308f4b434ec 100644
+--- a/drivers/md/dm-cache-policy-cleaner.c
++++ b/drivers/md/dm-cache-policy-cleaner.c
+@@ -435,7 +435,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
+ static struct dm_cache_policy_type wb_policy_type = {
+ 	.name = "cleaner",
+ 	.version = {1, 0, 0},
+-	.hint_size = 0,
++	.hint_size = 4,
+ 	.owner = THIS_MODULE,
+ 	.create = wb_create
+ };
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 5503e43e5f28..049282e6482f 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -955,7 +955,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
+ 
+ /*
+  * Generate a new unfragmented bio with the given size
+- * This should never violate the device limitations
++ * This should never violate the device limitations (but only because
++ * max_segment_size is being constrained to PAGE_SIZE).
+  *
+  * This function may be called concurrently. If we allocate from the mempool
+  * concurrently, there is a possibility of deadlock. For example, if we have
+@@ -2040,9 +2041,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
+ 	return fn(ti, cc->dev, cc->start, ti->len, data);
+ }
+ 
++static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
++{
++	/*
++	 * Unfortunate constraint that is required to avoid the potential
++	 * for exceeding underlying device's max_segments limits -- due to
++	 * crypt_alloc_buffer() possibly allocating pages for the encryption
++	 * bio that are not as physically contiguous as the original bio.
++	 */
++	limits->max_segment_size = PAGE_SIZE;
++}
++
+ static struct target_type crypt_target = {
+ 	.name   = "crypt",
+-	.version = {1, 14, 0},
++	.version = {1, 14, 1},
+ 	.module = THIS_MODULE,
+ 	.ctr    = crypt_ctr,
+ 	.dtr    = crypt_dtr,
+@@ -2054,6 +2066,7 @@ static struct target_type crypt_target = {
+ 	.message = crypt_message,
+ 	.merge  = crypt_merge,
+ 	.iterate_devices = crypt_iterate_devices,
++	.io_hints = crypt_io_hints,
+ };
+ 
+ static int __init dm_crypt_init(void)
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 88e4c7f24986..2c1f2e13719e 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -327,8 +327,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
+ 		 */
+ 		if (min_region_size > (1 << 13)) {
+ 			/* If not a power of 2, make it the next power of 2 */
+-			if (min_region_size & (min_region_size - 1))
+-				region_size = 1 << fls(region_size);
++			region_size = roundup_pow_of_two(min_region_size);
+ 			DMINFO("Choosing default region size of %lu sectors",
+ 			       region_size);
+ 		} else {
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 697f34fba06b..8b72ceee0f61 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -2925,8 +2925,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ 
+ 	might_sleep();
+ 
+-	map = dm_get_live_table(md, &srcu_idx);
+-
+ 	spin_lock(&_minor_lock);
+ 	idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
+ 	set_bit(DMF_FREEING, &md->flags);
+@@ -2940,14 +2938,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
+ 	 * do not race with internal suspend.
+ 	 */
+ 	mutex_lock(&md->suspend_lock);
++	map = dm_get_live_table(md, &srcu_idx);
+ 	if (!dm_suspended_md(md)) {
+ 		dm_table_presuspend_targets(map);
+ 		dm_table_postsuspend_targets(map);
+ 	}
+-	mutex_unlock(&md->suspend_lock);
+-
+ 	/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
+ 	dm_put_live_table(md, srcu_idx);
++	mutex_unlock(&md->suspend_lock);
+ 
+ 	/*
+ 	 * Rare, but there may be I/O requests still going to complete,
+diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
+index bf2b80d5c470..8731b6ea026b 100644
+--- a/drivers/md/persistent-data/dm-btree-internal.h
++++ b/drivers/md/persistent-data/dm-btree-internal.h
+@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
+ 
+ extern struct dm_block_validator btree_node_validator;
+ 
++/*
++ * Value type for upper levels of multi-level btrees.
++ */
++extern void init_le64_type(struct dm_transaction_manager *tm,
++			   struct dm_btree_value_type *vt);
++
+ #endif	/* DM_BTREE_INTERNAL_H */
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index a03178e91a79..7c0d75547ccf 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
+ 	return r;
+ }
+ 
+-static struct dm_btree_value_type le64_type = {
+-	.context = NULL,
+-	.size = sizeof(__le64),
+-	.inc = NULL,
+-	.dec = NULL,
+-	.equal = NULL
+-};
+-
+ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ 		    uint64_t *keys, dm_block_t *new_root)
+ {
+@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
+ 	int index = 0, r = 0;
+ 	struct shadow_spine spine;
+ 	struct btree_node *n;
++	struct dm_btree_value_type le64_vt;
+ 
++	init_le64_type(info->tm, &le64_vt);
+ 	init_shadow_spine(&spine, info);
+ 	for (level = 0; level < info->levels; level++) {
+ 		r = remove_raw(&spine, info,
+ 			       (level == last_level ?
+-				&info->value_type : &le64_type),
++				&info->value_type : &le64_vt),
+ 			       root, keys[level], (unsigned *)&index);
+ 		if (r < 0)
+ 			break;
+diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
+index 1b5e13ec7f96..0dee514ba4c5 100644
+--- a/drivers/md/persistent-data/dm-btree-spine.c
++++ b/drivers/md/persistent-data/dm-btree-spine.c
+@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
+ {
+ 	return s->root;
+ }
++
++static void le64_inc(void *context, const void *value_le)
++{
++	struct dm_transaction_manager *tm = context;
++	__le64 v_le;
++
++	memcpy(&v_le, value_le, sizeof(v_le));
++	dm_tm_inc(tm, le64_to_cpu(v_le));
++}
++
++static void le64_dec(void *context, const void *value_le)
++{
++	struct dm_transaction_manager *tm = context;
++	__le64 v_le;
++
++	memcpy(&v_le, value_le, sizeof(v_le));
++	dm_tm_dec(tm, le64_to_cpu(v_le));
++}
++
++static int le64_equal(void *context, const void *value1_le, const void *value2_le)
++{
++	__le64 v1_le, v2_le;
++
++	memcpy(&v1_le, value1_le, sizeof(v1_le));
++	memcpy(&v2_le, value2_le, sizeof(v2_le));
++	return v1_le == v2_le;
++}
++
++void init_le64_type(struct dm_transaction_manager *tm,
++		    struct dm_btree_value_type *vt)
++{
++	vt->context = tm;
++	vt->size = sizeof(__le64);
++	vt->inc = le64_inc;
++	vt->dec = le64_dec;
++	vt->equal = le64_equal;
++}
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index fdd3793e22f9..c7726cebc495 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
+ 	struct btree_node *n;
+ 	struct dm_btree_value_type le64_type;
+ 
+-	le64_type.context = NULL;
+-	le64_type.size = sizeof(__le64);
+-	le64_type.inc = NULL;
+-	le64_type.dec = NULL;
+-	le64_type.equal = NULL;
+-
++	init_le64_type(info->tm, &le64_type);
+ 	init_shadow_spine(&spine, info);
+ 
+ 	for (level = 0; level < (info->levels - 1); level++) {
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index efb654eb5399..0875e5e7e09a 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -83,7 +83,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
+ 	char b[BDEVNAME_SIZE];
+ 	char b2[BDEVNAME_SIZE];
+ 	struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
+-	bool discard_supported = false;
++	unsigned short blksize = 512;
+ 
+ 	if (!conf)
+ 		return -ENOMEM;
+@@ -98,6 +98,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
+ 		sector_div(sectors, mddev->chunk_sectors);
+ 		rdev1->sectors = sectors * mddev->chunk_sectors;
+ 
++		blksize = max(blksize, queue_logical_block_size(
++				      rdev1->bdev->bd_disk->queue));
++
+ 		rdev_for_each(rdev2, mddev) {
+ 			pr_debug("md/raid0:%s:   comparing %s(%llu)"
+ 				 " with %s(%llu)\n",
+@@ -134,6 +137,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
+ 	}
+ 	pr_debug("md/raid0:%s: FINAL %d zones\n",
+ 		 mdname(mddev), conf->nr_strip_zones);
++	/*
++	 * now since we have the hard sector sizes, we can make sure
++	 * chunk size is a multiple of that sector size
++	 */
++	if ((mddev->chunk_sectors << 9) % blksize) {
++		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
++		       mdname(mddev),
++		       mddev->chunk_sectors << 9, blksize);
++		err = -EINVAL;
++		goto abort;
++	}
++
+ 	err = -ENOMEM;
+ 	conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
+ 				conf->nr_strip_zones, GFP_KERNEL);
+@@ -188,19 +203,12 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
+ 		}
+ 		dev[j] = rdev1;
+ 
+-		if (mddev->queue)
+-			disk_stack_limits(mddev->gendisk, rdev1->bdev,
+-					  rdev1->data_offset << 9);
+-
+ 		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
+ 			conf->has_merge_bvec = 1;
+ 
+ 		if (!smallest || (rdev1->sectors < smallest->sectors))
+ 			smallest = rdev1;
+ 		cnt++;
+-
+-		if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
+-			discard_supported = true;
+ 	}
+ 	if (cnt != mddev->raid_disks) {
+ 		printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
+@@ -261,28 +269,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
+ 			 (unsigned long long)smallest->sectors);
+ 	}
+ 
+-	/*
+-	 * now since we have the hard sector sizes, we can make sure
+-	 * chunk size is a multiple of that sector size
+-	 */
+-	if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
+-		printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
+-		       mdname(mddev),
+-		       mddev->chunk_sectors << 9);
+-		goto abort;
+-	}
+-
+-	if (mddev->queue) {
+-		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
+-		blk_queue_io_opt(mddev->queue,
+-				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
+-
+-		if (!discard_supported)
+-			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+-		else
+-			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
+-	}
+-
+ 	pr_debug("md/raid0:%s: done.\n", mdname(mddev));
+ 	*private_conf = conf;
+ 
+@@ -433,12 +419,6 @@ static int raid0_run(struct mddev *mddev)
+ 	if (md_check_no_bitmap(mddev))
+ 		return -EINVAL;
+ 
+-	if (mddev->queue) {
+-		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+-		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
+-		blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
+-	}
+-
+ 	/* if private is not null, we are here after takeover */
+ 	if (mddev->private == NULL) {
+ 		ret = create_strip_zones(mddev, &conf);
+@@ -447,6 +427,29 @@ static int raid0_run(struct mddev *mddev)
+ 		mddev->private = conf;
+ 	}
+ 	conf = mddev->private;
++	if (mddev->queue) {
++		struct md_rdev *rdev;
++		bool discard_supported = false;
++
++		blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
++		blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
++		blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
++
++		blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
++		blk_queue_io_opt(mddev->queue,
++				 (mddev->chunk_sectors << 9) * mddev->raid_disks);
++
++		rdev_for_each(rdev, mddev) {
++			disk_stack_limits(mddev->gendisk, rdev->bdev,
++					  rdev->data_offset << 9);
++			if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
++				discard_supported = true;
++		}
++		if (!discard_supported)
++			queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
++		else
++			queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
++	}
+ 
+ 	/* calculate array device size */
+ 	md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index 8be0df758e68..a0b1b460377d 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -373,7 +373,7 @@ int mmc_of_parse(struct mmc_host *host)
+ 					   0, &cd_gpio_invert);
+ 		if (!ret)
+ 			dev_info(host->parent, "Got CD GPIO\n");
+-		else if (ret != -ENOENT)
++		else if (ret != -ENOENT && ret != -ENOSYS)
+ 			return ret;
+ 
+ 		/*
+@@ -397,7 +397,7 @@ int mmc_of_parse(struct mmc_host *host)
+ 	ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
+ 	if (!ret)
+ 		dev_info(host->parent, "Got WP GPIO\n");
+-	else if (ret != -ENOENT)
++	else if (ret != -ENOENT && ret != -ENOSYS)
+ 		return ret;
+ 
+ 	/* See the comment on CD inversion above */
+diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
+index 5f5adafb253a..b354c8bffb9e 100644
+--- a/drivers/mmc/host/dw_mmc.c
++++ b/drivers/mmc/host/dw_mmc.c
+@@ -99,6 +99,9 @@ struct idmac_desc {
+ 
+ 	__le32		des3;	/* buffer 2 physical address */
+ };
++
++/* Each descriptor can transfer up to 4KB of data in chained mode */
++#define DW_MCI_DESC_DATA_LENGTH	0x1000
+ #endif /* CONFIG_MMC_DW_IDMAC */
+ 
+ static bool dw_mci_reset(struct dw_mci *host);
+@@ -462,66 +465,96 @@ static void dw_mci_idmac_complete_dma(struct dw_mci *host)
+ static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
+ 				    unsigned int sg_len)
+ {
++	unsigned int desc_len;
+ 	int i;
+ 	if (host->dma_64bit_address == 1) {
+-		struct idmac_desc_64addr *desc = host->sg_cpu;
++		struct idmac_desc_64addr *desc_first, *desc_last, *desc;
++
++		desc_first = desc_last = desc = host->sg_cpu;
+ 
+-		for (i = 0; i < sg_len; i++, desc++) {
++		for (i = 0; i < sg_len; i++) {
+ 			unsigned int length = sg_dma_len(&data->sg[i]);
+ 			u64 mem_addr = sg_dma_address(&data->sg[i]);
+ 
+-			/*
+-			 * Set the OWN bit and disable interrupts for this
+-			 * descriptor
+-			 */
+-			desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
+-						IDMAC_DES0_CH;
+-			/* Buffer length */
+-			IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
+-
+-			/* Physical address to DMA to/from */
+-			desc->des4 = mem_addr & 0xffffffff;
+-			desc->des5 = mem_addr >> 32;
++			for ( ; length ; desc++) {
++				desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
++					   length : DW_MCI_DESC_DATA_LENGTH;
++
++				length -= desc_len;
++
++				/*
++				 * Set the OWN bit and disable interrupts
++				 * for this descriptor
++				 */
++				desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
++							IDMAC_DES0_CH;
++
++				/* Buffer length */
++				IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
++
++				/* Physical address to DMA to/from */
++				desc->des4 = mem_addr & 0xffffffff;
++				desc->des5 = mem_addr >> 32;
++
++				/* Update physical address for the next desc */
++				mem_addr += desc_len;
++
++				/* Save pointer to the last descriptor */
++				desc_last = desc;
++			}
+ 		}
+ 
+ 		/* Set first descriptor */
+-		desc = host->sg_cpu;
+-		desc->des0 |= IDMAC_DES0_FD;
++		desc_first->des0 |= IDMAC_DES0_FD;
+ 
+ 		/* Set last descriptor */
+-		desc = host->sg_cpu + (i - 1) *
+-				sizeof(struct idmac_desc_64addr);
+-		desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+-		desc->des0 |= IDMAC_DES0_LD;
++		desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
++		desc_last->des0 |= IDMAC_DES0_LD;
+ 
+ 	} else {
+-		struct idmac_desc *desc = host->sg_cpu;
++		struct idmac_desc *desc_first, *desc_last, *desc;
++
++		desc_first = desc_last = desc = host->sg_cpu;
+ 
+-		for (i = 0; i < sg_len; i++, desc++) {
++		for (i = 0; i < sg_len; i++) {
+ 			unsigned int length = sg_dma_len(&data->sg[i]);
+ 			u32 mem_addr = sg_dma_address(&data->sg[i]);
+ 
+-			/*
+-			 * Set the OWN bit and disable interrupts for this
+-			 * descriptor
+-			 */
+-			desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
+-					IDMAC_DES0_DIC | IDMAC_DES0_CH);
+-			/* Buffer length */
+-			IDMAC_SET_BUFFER1_SIZE(desc, length);
++			for ( ; length ; desc++) {
++				desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
++					   length : DW_MCI_DESC_DATA_LENGTH;
++
++				length -= desc_len;
++
++				/*
++				 * Set the OWN bit and disable interrupts
++				 * for this descriptor
++				 */
++				desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
++							 IDMAC_DES0_DIC |
++							 IDMAC_DES0_CH);
++
++				/* Buffer length */
++				IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
+ 
+-			/* Physical address to DMA to/from */
+-			desc->des2 = cpu_to_le32(mem_addr);
++				/* Physical address to DMA to/from */
++				desc->des2 = cpu_to_le32(mem_addr);
++
++				/* Update physical address for the next desc */
++				mem_addr += desc_len;
++
++				/* Save pointer to the last descriptor */
++				desc_last = desc;
++			}
+ 		}
+ 
+ 		/* Set first descriptor */
+-		desc = host->sg_cpu;
+-		desc->des0 |= cpu_to_le32(IDMAC_DES0_FD);
++		desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
+ 
+ 		/* Set last descriptor */
+-		desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
+-		desc->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC));
+-		desc->des0 |= cpu_to_le32(IDMAC_DES0_LD);
++		desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
++					       IDMAC_DES0_DIC));
++		desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
+ 	}
+ 
+ 	wmb();
+@@ -2406,7 +2439,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
+ #ifdef CONFIG_MMC_DW_IDMAC
+ 		mmc->max_segs = host->ring_size;
+ 		mmc->max_blk_size = 65536;
+-		mmc->max_seg_size = 0x1000;
++		mmc->max_seg_size = DW_MCI_DESC_DATA_LENGTH;
+ 		mmc->max_req_size = mmc->max_seg_size * host->ring_size;
+ 		mmc->max_blk_count = mmc->max_req_size / 512;
+ #else
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index 82f512d87cb8..461698b038f7 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -868,6 +868,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ 			 struct esdhc_platform_data *boarddata)
+ {
+ 	struct device_node *np = pdev->dev.of_node;
++	int ret;
+ 
+ 	if (!np)
+ 		return -ENODEV;
+@@ -903,6 +904,14 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
+ 
+ 	mmc_of_parse_voltage(np, &host->ocr_mask);
+ 
++	/* call to generic mmc_of_parse to support additional capabilities */
++	ret = mmc_of_parse(host->mmc);
++	if (ret)
++		return ret;
++
++	if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
++		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
++
+ 	return 0;
+ }
+ #else
+@@ -924,6 +933,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
+ 	struct esdhc_platform_data *boarddata;
+ 	int err;
+ 	struct pltfm_imx_data *imx_data;
++	bool dt = true;
+ 
+ 	host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
+ 	if (IS_ERR(host))
+@@ -1011,11 +1021,44 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
+ 		}
+ 		imx_data->boarddata = *((struct esdhc_platform_data *)
+ 					host->mmc->parent->platform_data);
++		dt = false;
++	}
++	/* write_protect */
++	if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
++		err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
++		if (err) {
++			dev_err(mmc_dev(host->mmc),
++				"failed to request write-protect gpio!\n");
++			goto disable_clk;
++		}
++		host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ 	}
+ 
+ 	/* card_detect */
+-	if (boarddata->cd_type == ESDHC_CD_CONTROLLER)
++	switch (boarddata->cd_type) {
++	case ESDHC_CD_GPIO:
++		if (dt)
++			break;
++		err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
++		if (err) {
++			dev_err(mmc_dev(host->mmc),
++				"failed to request card-detect gpio!\n");
++			goto disable_clk;
++		}
++		/* fall through */
++
++	case ESDHC_CD_CONTROLLER:
++		/* we have a working card_detect back */
+ 		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
++		break;
++
++	case ESDHC_CD_PERMANENT:
++		host->mmc->caps |= MMC_CAP_NONREMOVABLE;
++		break;
++
++	case ESDHC_CD_NONE:
++		break;
++	}
+ 
+ 	switch (boarddata->max_bus_width) {
+ 	case 8:
+@@ -1048,11 +1091,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
+ 		host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
+ 	}
+ 
+-	/* call to generic mmc_of_parse to support additional capabilities */
+-	err = mmc_of_parse(host->mmc);
+-	if (err)
+-		goto disable_clk;
+-
+ 	err = sdhci_add_host(host);
+ 	if (err)
+ 		goto disable_clk;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index fd41b91436ec..cbaf3df3ebd9 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -55,8 +55,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
+ static void sdhci_tuning_timer(unsigned long data);
+ static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+-					struct mmc_data *data,
+-					struct sdhci_host_next *next);
++					struct mmc_data *data);
+ static int sdhci_do_get_cd(struct sdhci_host *host);
+ 
+ #ifdef CONFIG_PM
+@@ -510,7 +509,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
+ 		goto fail;
+ 	BUG_ON(host->align_addr & host->align_mask);
+ 
+-	host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
++	host->sg_count = sdhci_pre_dma_transfer(host, data);
+ 	if (host->sg_count < 0)
+ 		goto unmap_align;
+ 
+@@ -649,9 +648,11 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
+ 		}
+ 	}
+ 
+-	if (!data->host_cookie)
++	if (data->host_cookie == COOKIE_MAPPED) {
+ 		dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ 			data->sg_len, direction);
++		data->host_cookie = COOKIE_UNMAPPED;
++	}
+ }
+ 
+ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+@@ -847,7 +848,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ 		} else {
+ 			int sg_cnt;
+ 
+-			sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
++			sg_cnt = sdhci_pre_dma_transfer(host, data);
+ 			if (sg_cnt <= 0) {
+ 				/*
+ 				 * This only happens when someone fed
+@@ -963,11 +964,13 @@ static void sdhci_finish_data(struct sdhci_host *host)
+ 		if (host->flags & SDHCI_USE_ADMA)
+ 			sdhci_adma_table_post(host, data);
+ 		else {
+-			if (!data->host_cookie)
++			if (data->host_cookie == COOKIE_MAPPED) {
+ 				dma_unmap_sg(mmc_dev(host->mmc),
+ 					data->sg, data->sg_len,
+ 					(data->flags & MMC_DATA_READ) ?
+ 					DMA_FROM_DEVICE : DMA_TO_DEVICE);
++				data->host_cookie = COOKIE_UNMAPPED;
++			}
+ 		}
+ 	}
+ 
+@@ -2131,49 +2134,36 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ 	struct mmc_data *data = mrq->data;
+ 
+ 	if (host->flags & SDHCI_REQ_USE_DMA) {
+-		if (data->host_cookie)
++		if (data->host_cookie == COOKIE_GIVEN ||
++				data->host_cookie == COOKIE_MAPPED)
+ 			dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ 					 data->flags & MMC_DATA_WRITE ?
+ 					 DMA_TO_DEVICE : DMA_FROM_DEVICE);
+-		mrq->data->host_cookie = 0;
++		data->host_cookie = COOKIE_UNMAPPED;
+ 	}
+ }
+ 
+ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+-				       struct mmc_data *data,
+-				       struct sdhci_host_next *next)
++				       struct mmc_data *data)
+ {
+ 	int sg_count;
+ 
+-	if (!next && data->host_cookie &&
+-	    data->host_cookie != host->next_data.cookie) {
+-		pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
+-			__func__, data->host_cookie, host->next_data.cookie);
+-		data->host_cookie = 0;
++	if (data->host_cookie == COOKIE_MAPPED) {
++		data->host_cookie = COOKIE_GIVEN;
++		return data->sg_count;
+ 	}
+ 
+-	/* Check if next job is already prepared */
+-	if (next ||
+-	    (!next && data->host_cookie != host->next_data.cookie)) {
+-		sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
+-				     data->sg_len,
+-				     data->flags & MMC_DATA_WRITE ?
+-				     DMA_TO_DEVICE : DMA_FROM_DEVICE);
+-
+-	} else {
+-		sg_count = host->next_data.sg_count;
+-		host->next_data.sg_count = 0;
+-	}
++	WARN_ON(data->host_cookie == COOKIE_GIVEN);
+ 
++	sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++				data->flags & MMC_DATA_WRITE ?
++				DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ 
+ 	if (sg_count == 0)
+-		return -EINVAL;
++		return -ENOSPC;
+ 
+-	if (next) {
+-		next->sg_count = sg_count;
+-		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+-	} else
+-		host->sg_count = sg_count;
++	data->sg_count = sg_count;
++	data->host_cookie = COOKIE_MAPPED;
+ 
+ 	return sg_count;
+ }
+@@ -2183,16 +2173,10 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ {
+ 	struct sdhci_host *host = mmc_priv(mmc);
+ 
+-	if (mrq->data->host_cookie) {
+-		mrq->data->host_cookie = 0;
+-		return;
+-	}
++	mrq->data->host_cookie = COOKIE_UNMAPPED;
+ 
+ 	if (host->flags & SDHCI_REQ_USE_DMA)
+-		if (sdhci_pre_dma_transfer(host,
+-					mrq->data,
+-					&host->next_data) < 0)
+-			mrq->data->host_cookie = 0;
++		sdhci_pre_dma_transfer(host, mrq->data);
+ }
+ 
+ static void sdhci_card_event(struct mmc_host *mmc)
+@@ -3090,7 +3074,6 @@ int sdhci_add_host(struct sdhci_host *host)
+ 		host->max_clk = host->ops->get_max_clock(host);
+ 	}
+ 
+-	host->next_data.cookie = 1;
+ 	/*
+ 	 * In case of Host Controller v3.00, find out whether clock
+ 	 * multiplier is supported.
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index e639b7f435e5..eea23f62356a 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -309,9 +309,10 @@ struct sdhci_adma2_64_desc {
+  */
+ #define SDHCI_MAX_SEGS		128
+ 
+-struct sdhci_host_next {
+-	unsigned int	sg_count;
+-	s32		cookie;
++enum sdhci_cookie {
++	COOKIE_UNMAPPED,
++	COOKIE_MAPPED,
++	COOKIE_GIVEN,
+ };
+ 
+ struct sdhci_host {
+@@ -506,7 +507,6 @@ struct sdhci_host {
+ #define SDHCI_TUNING_MODE_1	0
+ 	struct timer_list	tuning_timer;	/* Timer for tuning */
+ 
+-	struct sdhci_host_next	next_data;
+ 	unsigned long private[0] ____cacheline_aligned;
+ };
+ 
+diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
+index a4615fcc3d00..94a357d93bab 100644
+--- a/drivers/mtd/nand/pxa3xx_nand.c
++++ b/drivers/mtd/nand/pxa3xx_nand.c
+@@ -1475,6 +1475,9 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
+ 	if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
+ 		goto KEEP_CONFIG;
+ 
++	/* Set a default chunk size */
++	info->chunk_size = 512;
++
+ 	ret = pxa3xx_nand_sensing(info);
+ 	if (ret) {
+ 		dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
+diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
+index 6f93b2990d25..499b8e433d3d 100644
+--- a/drivers/mtd/nand/sunxi_nand.c
++++ b/drivers/mtd/nand/sunxi_nand.c
+@@ -138,6 +138,10 @@
+ #define NFC_ECC_MODE		GENMASK(15, 12)
+ #define NFC_RANDOM_SEED		GENMASK(30, 16)
+ 
++/* NFC_USER_DATA helper macros */
++#define NFC_BUF_TO_USER_DATA(buf)	((buf)[0] | ((buf)[1] << 8) | \
++					((buf)[2] << 16) | ((buf)[3] << 24))
++
+ #define NFC_DEFAULT_TIMEOUT_MS	1000
+ 
+ #define NFC_SRAM_SIZE		1024
+@@ -632,15 +636,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
+ 		offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
+ 
+ 		/* Fill OOB data in */
+-		if (oob_required) {
+-			tmp = 0xffffffff;
+-			memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
+-				    4);
+-		} else {
+-			memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
+-				    chip->oob_poi + offset - mtd->writesize,
+-				    4);
+-		}
++		writel(NFC_BUF_TO_USER_DATA(chip->oob_poi +
++					    layout->oobfree[i].offset),
++		       nfc->regs + NFC_REG_USER_DATA_BASE);
+ 
+ 		chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
+ 
+@@ -770,14 +768,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
+ 		offset += ecc->size;
+ 
+ 		/* Fill OOB data in */
+-		if (oob_required) {
+-			tmp = 0xffffffff;
+-			memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
+-				    4);
+-		} else {
+-			memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
+-				    4);
+-		}
++		writel(NFC_BUF_TO_USER_DATA(oob),
++		       nfc->regs + NFC_REG_USER_DATA_BASE);
+ 
+ 		tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
+ 		      (1 << 30);
+@@ -1312,6 +1304,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
+ 					node);
+ 		nand_release(&chip->mtd);
+ 		sunxi_nand_ecc_cleanup(&chip->nand.ecc);
++		list_del(&chip->node);
+ 	}
+ }
+ 
+diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
+index 5bbd1f094f4e..1fc23e48fe8e 100644
+--- a/drivers/mtd/ubi/io.c
++++ b/drivers/mtd/ubi/io.c
+@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
+ 		goto bad;
+ 	}
+ 
++	if (data_size > ubi->leb_size) {
++		ubi_err(ubi, "bad data_size");
++		goto bad;
++	}
++
+ 	if (vol_type == UBI_VID_STATIC) {
+ 		/*
+ 		 * Although from high-level point of view static volumes may
+diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
+index 68c9c5ea676f..bf2f916df4e2 100644
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -646,6 +646,7 @@ static int init_volumes(struct ubi_device *ubi,
+ 		if (ubi->corr_peb_count)
+ 			ubi_err(ubi, "%d PEBs are corrupted and not used",
+ 				ubi->corr_peb_count);
++		return -ENOSPC;
+ 	}
+ 	ubi->rsvd_pebs += reserved_pebs;
+ 	ubi->avail_pebs -= reserved_pebs;
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 16214d3d57a4..18fef94542f8 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ 		if (ubi->corr_peb_count)
+ 			ubi_err(ubi, "%d PEBs are corrupted and not used",
+ 				ubi->corr_peb_count);
++		err = -ENOSPC;
+ 		goto out_free;
+ 	}
+ 	ubi->avail_pebs -= reserved_pebs;
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index 4f6bf996851e..7dfbcde34509 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -2864,7 +2864,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
+ 		return;
+ 
+ 	pci_sriov_set_totalvfs(pdev, 7);
+-	igb_pci_enable_sriov(pdev, max_vfs);
++	igb_enable_sriov(pdev, max_vfs);
+ 
+ #endif /* CONFIG_PCI_IOV */
+ }
+diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
+index 2fd9e180272b..c5dc6b57212e 100644
+--- a/drivers/net/wireless/ath/ath10k/htc.c
++++ b/drivers/net/wireless/ath/ath10k/htc.c
+@@ -163,8 +163,10 @@ int ath10k_htc_send(struct ath10k_htc *htc,
+ 	skb_cb->eid = eid;
+ 	skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ 	ret = dma_mapping_error(dev, skb_cb->paddr);
+-	if (ret)
++	if (ret) {
++		ret = -EIO;
+ 		goto err_credits;
++	}
+ 
+ 	sg_item.transfer_id = ep->eid;
+ 	sg_item.transfer_context = skb;
+diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
+index cbd2bc9e6202..7f4854a52a7c 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
+@@ -371,8 +371,10 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+ 	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ 				       DMA_TO_DEVICE);
+ 	res = dma_mapping_error(dev, skb_cb->paddr);
+-	if (res)
++	if (res) {
++		res = -EIO;
+ 		goto err_free_txdesc;
++	}
+ 
+ 	skb_put(txdesc, len);
+ 	cmd = (struct htt_cmd *)txdesc->data;
+@@ -463,8 +465,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+ 	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ 				       DMA_TO_DEVICE);
+ 	res = dma_mapping_error(dev, skb_cb->paddr);
+-	if (res)
++	if (res) {
++		res = -EIO;
+ 		goto err_free_txbuf;
++	}
+ 
+ 	if (likely(use_frags)) {
+ 		frags = skb_cb->htt.txbuf->frags;
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 973485bd4121..5e021b0b3f9e 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -4464,6 +4464,21 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+ 	return ret;
+ }
+ 
++static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
++{
++	/* Even though there's a WMI enum for fragmentation threshold no known
++	 * firmware actually implements it. Moreover it is not possible to rely
++	 * frame fragmentation to mac80211 because firmware clears the "more
++	 * fragments" bit in frame control making it impossible for remote
++	 * devices to reassemble frames.
++	 *
++	 * Hence implement a dummy callback just to say fragmentation isn't
++	 * supported. This effectively prevents mac80211 from doing frame
++	 * fragmentation in software.
++	 */
++	return -EOPNOTSUPP;
++}
++
+ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ 			 u32 queues, bool drop)
+ {
+@@ -5108,6 +5123,7 @@ static const struct ieee80211_ops ath10k_ops = {
+ 	.remain_on_channel		= ath10k_remain_on_channel,
+ 	.cancel_remain_on_channel	= ath10k_cancel_remain_on_channel,
+ 	.set_rts_threshold		= ath10k_set_rts_threshold,
++	.set_frag_threshold		= ath10k_mac_op_set_frag_threshold,
+ 	.flush				= ath10k_flush,
+ 	.tx_last_beacon			= ath10k_tx_last_beacon,
+ 	.set_antenna			= ath10k_set_antenna,
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index ead543282128..3c4c800ab505 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -1378,8 +1378,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+ 
+ 	req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
+ 	ret = dma_mapping_error(ar->dev, req_paddr);
+-	if (ret)
++	if (ret) {
++		ret = -EIO;
+ 		goto err_dma;
++	}
+ 
+ 	if (resp && resp_len) {
+ 		tresp = kzalloc(*resp_len, GFP_KERNEL);
+@@ -1391,8 +1393,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+ 		resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
+ 					    DMA_FROM_DEVICE);
+ 		ret = dma_mapping_error(ar->dev, resp_paddr);
+-		if (ret)
++		if (ret) {
++			ret = EIO;
+ 			goto err_req;
++		}
+ 
+ 		xfer.wait_for_resp = true;
+ 		xfer.resp_len = 0;
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index c7ea77edce24..408ecd98e61b 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -2517,6 +2517,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
+ 				ath10k_warn(ar, "failed to map beacon: %d\n",
+ 					    ret);
+ 				dev_kfree_skb_any(bcn);
++				ret = -EIO;
+ 				goto skip;
+ 			}
+ 
+diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+index 1c6788aecc62..40d72312f3df 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
++++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+@@ -203,8 +203,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
+ 
+ 	/* Copy firmware into DMA-accessible memory */
+ 	fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+-	if (!fw)
+-		return -ENOMEM;
++	if (!fw) {
++		status = -ENOMEM;
++		goto out;
++	}
+ 	len = fw_entry->size;
+ 
+ 	if (len % 4)
+@@ -217,6 +219,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
+ 
+ 	status = rsi_copy_to_card(common, fw, len, num_blocks);
+ 	kfree(fw);
++
++out:
+ 	release_firmware(fw_entry);
+ 	return status;
+ }
+diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+index 30c2cf7fa93b..de4900862836 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
++++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+@@ -148,8 +148,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
+ 
+ 	/* Copy firmware into DMA-accessible memory */
+ 	fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+-	if (!fw)
+-		return -ENOMEM;
++	if (!fw) {
++		status = -ENOMEM;
++		goto out;
++	}
+ 	len = fw_entry->size;
+ 
+ 	if (len % 4)
+@@ -162,6 +164,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
+ 
+ 	status = rsi_copy_to_card(common, fw, len, num_blocks);
+ 	kfree(fw);
++
++out:
+ 	release_firmware(fw_entry);
+ 	return status;
+ }
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index e031c943286e..52f081f4dfd5 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1353,7 +1353,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
+ 		queue->tx_evtchn = queue->rx_evtchn = 0;
+ 		queue->tx_irq = queue->rx_irq = 0;
+ 
+-		napi_synchronize(&queue->napi);
++		if (netif_running(info->netdev))
++			napi_synchronize(&queue->napi);
+ 
+ 		xennet_release_tx_bufs(queue);
+ 		xennet_release_rx_bufs(queue);
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index b965c12168b7..502a82ca1db0 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
+ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
+ 			       void *arg)
+ {
+-	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++	struct pci_dev *tdev = pci_get_slot(dev->bus,
++					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ 	ssize_t ret;
+ 
+ 	if (!tdev)
+@@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
+ static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
+ 				const void *arg)
+ {
+-	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++	struct pci_dev *tdev = pci_get_slot(dev->bus,
++					    PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ 	ssize_t ret;
+ 
+ 	if (!tdev)
+@@ -473,22 +475,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
+ 	.release = pci_vpd_pci22_release,
+ };
+ 
+-static int pci_vpd_f0_dev_check(struct pci_dev *dev)
+-{
+-	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+-	int ret = 0;
+-
+-	if (!tdev)
+-		return -ENODEV;
+-	if (!tdev->vpd || !tdev->multifunction ||
+-	    dev->class != tdev->class || dev->vendor != tdev->vendor ||
+-	    dev->device != tdev->device)
+-		ret = -ENODEV;
+-
+-	pci_dev_put(tdev);
+-	return ret;
+-}
+-
+ int pci_vpd_pci22_init(struct pci_dev *dev)
+ {
+ 	struct pci_vpd_pci22 *vpd;
+@@ -497,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
+ 	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+ 	if (!cap)
+ 		return -ENODEV;
+-	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+-		int ret = pci_vpd_f0_dev_check(dev);
+ 
+-		if (ret)
+-			return ret;
+-	}
+ 	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
+ 	if (!vpd)
+ 		return -ENOMEM;
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index 6fbd3f2b5992..d3346d23963b 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -256,6 +256,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
+ 
+ 		res->start = start;
+ 		res->end = end;
++		res->flags &= ~IORESOURCE_UNSET;
++		orig_res.flags &= ~IORESOURCE_UNSET;
+ 		dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
+ 				 &orig_res, res);
+ 
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 804cd3b02c66..4a6933f02cd0 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1915,11 +1915,27 @@ static void quirk_netmos(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
+ 			 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+ 
++/*
++ * Quirk non-zero PCI functions to route VPD access through function 0 for
++ * devices that share VPD resources between functions.  The functions are
++ * expected to be identical devices.
++ */
+ static void quirk_f0_vpd_link(struct pci_dev *dev)
+ {
+-	if (!dev->multifunction || !PCI_FUNC(dev->devfn))
++	struct pci_dev *f0;
++
++	if (!PCI_FUNC(dev->devfn))
+ 		return;
+-	dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++
++	f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
++	if (!f0)
++		return;
++
++	if (f0->vpd && dev->class == f0->class &&
++	    dev->vendor == f0->vendor && dev->device == f0->device)
++		dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++
++	pci_dev_put(f0);
+ }
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ 			      PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
+diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
+index 803945259da8..42861cc70158 100644
+--- a/drivers/pcmcia/sa1100_generic.c
++++ b/drivers/pcmcia/sa1100_generic.c
+@@ -93,7 +93,6 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
+ 	for (i = 0; i < sinfo->nskt; i++)
+ 		soc_pcmcia_remove_one(&sinfo->skt[i]);
+ 
+-	clk_put(sinfo->clk);
+ 	kfree(sinfo);
+ 	return 0;
+ }
+diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
+index cf6de2c2b329..553d70a67f80 100644
+--- a/drivers/pcmcia/sa11xx_base.c
++++ b/drivers/pcmcia/sa11xx_base.c
+@@ -222,7 +222,7 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
+ 	int i, ret = 0;
+ 	struct clk *clk;
+ 
+-	clk = clk_get(dev, NULL);
++	clk = devm_clk_get(dev, NULL);
+ 	if (IS_ERR(clk))
+ 		return PTR_ERR(clk);
+ 
+@@ -251,7 +251,6 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
+ 	if (ret) {
+ 		while (--i >= 0)
+ 			soc_pcmcia_remove_one(&sinfo->skt[i]);
+-		clk_put(clk);
+ 		kfree(sinfo);
+ 	} else {
+ 		dev_set_drvdata(dev, sinfo);
+diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
+index 9956b9902bb4..93e54a0f471a 100644
+--- a/drivers/platform/x86/toshiba_acpi.c
++++ b/drivers/platform/x86/toshiba_acpi.c
+@@ -2525,11 +2525,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
+ 	if (error)
+ 		return error;
+ 
+-	error = toshiba_hotkey_event_type_get(dev, &events_type);
+-	if (error) {
+-		pr_err("Unable to query Hotkey Event Type\n");
+-		return error;
+-	}
++	if (toshiba_hotkey_event_type_get(dev, &events_type))
++		pr_notice("Unable to query Hotkey Event Type\n");
++
+ 	dev->hotkey_event_type = events_type;
+ 
+ 	dev->hotkey_dev = input_allocate_device();
+diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
+index 7f3d389bd601..a67eeace6a89 100644
+--- a/drivers/power/avs/Kconfig
++++ b/drivers/power/avs/Kconfig
+@@ -13,7 +13,7 @@ menuconfig POWER_AVS
+ 
+ config ROCKCHIP_IODOMAIN
+         tristate "Rockchip IO domain support"
+-        depends on ARCH_ROCKCHIP && OF
++        depends on POWER_AVS && ARCH_ROCKCHIP && OF
+         help
+           Say y here to enable support io domains on Rockchip SoCs. It is
+           necessary for the io domain setting of the SoC to match the
+diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
+index add419d6ff34..a56a7b243e91 100644
+--- a/drivers/scsi/3w-9xxx.c
++++ b/drivers/scsi/3w-9xxx.c
+@@ -212,6 +212,17 @@ static const struct file_operations twa_fops = {
+ 	.llseek		= noop_llseek,
+ };
+ 
++/*
++ * The controllers use an inline buffer instead of a mapped SGL for small,
++ * single entry buffers.  Note that we treat a zero-length transfer like
++ * a mapped SGL.
++ */
++static bool twa_command_mapped(struct scsi_cmnd *cmd)
++{
++	return scsi_sg_count(cmd) != 1 ||
++		scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
++}
++
+ /* This function will complete an aen request from the isr */
+ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
+ {
+@@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
+ 				}
+ 
+ 				/* Now complete the io */
+-				scsi_dma_unmap(cmd);
++				if (twa_command_mapped(cmd))
++					scsi_dma_unmap(cmd);
+ 				cmd->scsi_done(cmd);
+ 				tw_dev->state[request_id] = TW_S_COMPLETED;
+ 				twa_free_request_id(tw_dev, request_id);
+@@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
+ 				struct scsi_cmnd *cmd = tw_dev->srb[i];
+ 
+ 				cmd->result = (DID_RESET << 16);
+-				scsi_dma_unmap(cmd);
++				if (twa_command_mapped(cmd))
++					scsi_dma_unmap(cmd);
+ 				cmd->scsi_done(cmd);
+ 			}
+ 		}
+@@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
+ 	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+ 	switch (retval) {
+ 	case SCSI_MLQUEUE_HOST_BUSY:
+-		scsi_dma_unmap(SCpnt);
++		if (twa_command_mapped(SCpnt))
++			scsi_dma_unmap(SCpnt);
+ 		twa_free_request_id(tw_dev, request_id);
+ 		break;
+ 	case 1:
+ 		SCpnt->result = (DID_ERROR << 16);
+-		scsi_dma_unmap(SCpnt);
++		if (twa_command_mapped(SCpnt))
++			scsi_dma_unmap(SCpnt);
+ 		done(SCpnt);
+ 		tw_dev->state[request_id] = TW_S_COMPLETED;
+ 		twa_free_request_id(tw_dev, request_id);
+@@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+ 		/* Map sglist from scsi layer to cmd packet */
+ 
+ 		if (scsi_sg_count(srb)) {
+-			if ((scsi_sg_count(srb) == 1) &&
+-			    (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
++			if (!twa_command_mapped(srb)) {
+ 				if (srb->sc_data_direction == DMA_TO_DEVICE ||
+ 				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
+ 					scsi_sg_copy_to_buffer(srb,
+@@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
+ {
+ 	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+ 
+-	if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
++	if (!twa_command_mapped(cmd) &&
+ 	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+ 	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
+ 		if (scsi_sg_count(cmd) == 1) {
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index a9aa38903efe..cccab6188328 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -4554,7 +4554,7 @@ static ssize_t ipr_store_raw_mode(struct device *dev,
+ 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ 	res = (struct ipr_resource_entry *)sdev->hostdata;
+ 	if (res) {
+-		if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
++		if (ipr_is_af_dasd_device(res)) {
+ 			res->raw_mode = simple_strtoul(buf, NULL, 10);
+ 			len = strlen(buf);
+ 			if (res->sdev)
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index ce6c770d74d5..c6b93d273799 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -2169,8 +2169,17 @@ int scsi_error_handler(void *data)
+ 	 * We never actually get interrupted because kthread_run
+ 	 * disables signal delivery for the created thread.
+ 	 */
+-	while (!kthread_should_stop()) {
++	while (true) {
++		/*
++		 * The sequence in kthread_stop() sets the stop flag first
++		 * then wakes the process.  To avoid missed wakeups, the task
++		 * should always be in a non running state before the stop
++		 * flag is checked
++		 */
+ 		set_current_state(TASK_INTERRUPTIBLE);
++		if (kthread_should_stop())
++			break;
++
+ 		if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
+ 		    shost->host_failed != atomic_read(&shost->host_busy)) {
+ 			SCSI_LOG_ERROR_RECOVERY(1,
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index e3223ac75a7c..f089082c00e1 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -624,6 +624,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
+ 	if (!(sccr1_reg & SSCR1_TIE))
+ 		mask &= ~SSSR_TFS;
+ 
++	/* Ignore RX timeout interrupt if it is disabled */
++	if (!(sccr1_reg & SSCR1_TINTE))
++		mask &= ~SSSR_TINT;
++
+ 	if (!(status & mask))
+ 		return IRQ_NONE;
+ 
+diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
+index 2e32ea2f194f..be6155cba9de 100644
+--- a/drivers/spi/spi-xtensa-xtfpga.c
++++ b/drivers/spi/spi-xtensa-xtfpga.c
+@@ -34,13 +34,13 @@ struct xtfpga_spi {
+ static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
+ 				      unsigned addr, u32 val)
+ {
+-	iowrite32(val, spi->regs + addr);
++	__raw_writel(val, spi->regs + addr);
+ }
+ 
+ static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
+ 					     unsigned addr)
+ {
+-	return ioread32(spi->regs + addr);
++	return __raw_readl(spi->regs + addr);
+ }
+ 
+ static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index d35c1a13217c..029dbd33b4b2 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1427,8 +1427,7 @@ static struct class spi_master_class = {
+  *
+  * The caller is responsible for assigning the bus number and initializing
+  * the master's methods before calling spi_register_master(); and (after errors
+- * adding the device) calling spi_master_put() and kfree() to prevent a memory
+- * leak.
++ * adding the device) calling spi_master_put() to prevent a memory leak.
+  */
+ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+ {
+diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
+index 92c909eed6b5..8fab566e0f0b 100644
+--- a/drivers/spi/spidev.c
++++ b/drivers/spi/spidev.c
+@@ -664,7 +664,8 @@ static int spidev_release(struct inode *inode, struct file *filp)
+ 		kfree(spidev->rx_buffer);
+ 		spidev->rx_buffer = NULL;
+ 
+-		spidev->speed_hz = spidev->spi->max_speed_hz;
++		if (spidev->spi)
++			spidev->speed_hz = spidev->spi->max_speed_hz;
+ 
+ 		/* ... after we unbound from the underlying device? */
+ 		spin_lock_irq(&spidev->spi_lock);
+diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
+index b0b96ab31954..abbc42a56e7c 100644
+--- a/drivers/staging/android/ion/ion.c
++++ b/drivers/staging/android/ion/ion.c
+@@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
+ 		mutex_unlock(&client->lock);
+ 		goto end;
+ 	}
+-	mutex_unlock(&client->lock);
+ 
+ 	handle = ion_handle_create(client, buffer);
+-	if (IS_ERR(handle))
++	if (IS_ERR(handle)) {
++		mutex_unlock(&client->lock);
+ 		goto end;
++	}
+ 
+-	mutex_lock(&client->lock);
+ 	ret = ion_handle_add(client, handle);
+ 	mutex_unlock(&client->lock);
+ 	if (ret) {
+diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
+index 4299cf45f947..5e1f16c36b49 100644
+--- a/drivers/staging/speakup/fakekey.c
++++ b/drivers/staging/speakup/fakekey.c
+@@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
+ 	__this_cpu_write(reporting_keystroke, true);
+ 	input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
+ 	input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
++	input_sync(virt_keyboard);
+ 	__this_cpu_write(reporting_keystroke, false);
+ 
+ 	/* reenable preemption */
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 0ab6e2efd28c..330bbe831066 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -341,7 +341,6 @@ static struct iscsi_np *iscsit_get_np(
+ 
+ struct iscsi_np *iscsit_add_np(
+ 	struct __kernel_sockaddr_storage *sockaddr,
+-	char *ip_str,
+ 	int network_transport)
+ {
+ 	struct sockaddr_in *sock_in;
+@@ -370,11 +369,9 @@ struct iscsi_np *iscsit_add_np(
+ 	np->np_flags |= NPF_IP_NETWORK;
+ 	if (sockaddr->ss_family == AF_INET6) {
+ 		sock_in6 = (struct sockaddr_in6 *)sockaddr;
+-		snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
+ 		np->np_port = ntohs(sock_in6->sin6_port);
+ 	} else {
+ 		sock_in = (struct sockaddr_in *)sockaddr;
+-		sprintf(np->np_ip, "%s", ip_str);
+ 		np->np_port = ntohs(sock_in->sin_port);
+ 	}
+ 
+@@ -411,8 +408,8 @@ struct iscsi_np *iscsit_add_np(
+ 	list_add_tail(&np->np_list, &g_np_list);
+ 	mutex_unlock(&np_lock);
+ 
+-	pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
+-		np->np_ip, np->np_port, np->np_transport->name);
++	pr_debug("CORE[0] - Added Network Portal: %pISc:%hu on %s\n",
++		&np->np_sockaddr, np->np_port, np->np_transport->name);
+ 
+ 	return np;
+ }
+@@ -481,8 +478,8 @@ int iscsit_del_np(struct iscsi_np *np)
+ 	list_del(&np->np_list);
+ 	mutex_unlock(&np_lock);
+ 
+-	pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
+-		np->np_ip, np->np_port, np->np_transport->name);
++	pr_debug("CORE[0] - Removed Network Portal: %pISc:%hu on %s\n",
++		&np->np_sockaddr, np->np_port, np->np_transport->name);
+ 
+ 	iscsit_put_transport(np->np_transport);
+ 	kfree(np);
+@@ -3467,7 +3464,6 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
+ 						tpg_np_list) {
+ 				struct iscsi_np *np = tpg_np->tpg_np;
+ 				bool inaddr_any = iscsit_check_inaddr_any(np);
+-				char *fmt_str;
+ 
+ 				if (np->np_network_transport != network_transport)
+ 					continue;
+@@ -3495,15 +3491,18 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
+ 					}
+ 				}
+ 
+-				if (np->np_sockaddr.ss_family == AF_INET6)
+-					fmt_str = "TargetAddress=[%s]:%hu,%hu";
+-				else
+-					fmt_str = "TargetAddress=%s:%hu,%hu";
+-
+-				len = sprintf(buf, fmt_str,
+-					inaddr_any ? conn->local_ip : np->np_ip,
+-					np->np_port,
+-					tpg->tpgt);
++				if (inaddr_any) {
++					len = sprintf(buf, "TargetAddress="
++						      "%s:%hu,%hu",
++						      conn->local_ip,
++						      np->np_port,
++						      tpg->tpgt);
++				} else {
++					len = sprintf(buf, "TargetAddress="
++						      "%pISpc,%hu",
++						      &np->np_sockaddr,
++						      tpg->tpgt);
++				}
+ 				len += 1;
+ 
+ 				if ((len + payload_len) > buffer_len) {
+diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
+index 7d0f9c00d9c2..d294f030a097 100644
+--- a/drivers/target/iscsi/iscsi_target.h
++++ b/drivers/target/iscsi/iscsi_target.h
+@@ -13,7 +13,7 @@ extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
+ extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
+ 				struct iscsi_np *, int);
+ extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
+-				char *, int);
++				int);
+ extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
+ 				struct iscsi_portal_group *, bool);
+ extern int iscsit_del_np(struct iscsi_np *);
+diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
+index 469fce44ebad..6f2fb546477e 100644
+--- a/drivers/target/iscsi/iscsi_target_configfs.c
++++ b/drivers/target/iscsi/iscsi_target_configfs.c
+@@ -100,7 +100,7 @@ static ssize_t lio_target_np_store_sctp(
+ 		 * Use existing np->np_sockaddr for SCTP network portal reference
+ 		 */
+ 		tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
+-					np->np_ip, tpg_np, ISCSI_SCTP_TCP);
++					tpg_np, ISCSI_SCTP_TCP);
+ 		if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
+ 			goto out;
+ 	} else {
+@@ -178,7 +178,7 @@ static ssize_t lio_target_np_store_iser(
+ 		}
+ 
+ 		tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
+-				np->np_ip, tpg_np, ISCSI_INFINIBAND);
++				tpg_np, ISCSI_INFINIBAND);
+ 		if (IS_ERR(tpg_np_iser)) {
+ 			rc = PTR_ERR(tpg_np_iser);
+ 			goto out;
+@@ -249,8 +249,8 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
+ 			return ERR_PTR(-EINVAL);
+ 		}
+ 		str++; /* Skip over leading "[" */
+-		*str2 = '\0'; /* Terminate the IPv6 address */
+-		str2++; /* Skip over the "]" */
++		*str2 = '\0'; /* Terminate the unbracketed IPv6 address */
++		str2++; /* Skip over the \0 */
+ 		port_str = strstr(str2, ":");
+ 		if (!port_str) {
+ 			pr_err("Unable to locate \":port\""
+@@ -317,7 +317,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
+ 	 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
+ 	 *
+ 	 */
+-	tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
++	tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, NULL,
+ 				ISCSI_TCP);
+ 	if (IS_ERR(tpg_np)) {
+ 		iscsit_put_tpg(tpg);
+@@ -345,8 +345,8 @@ static void lio_target_call_delnpfromtpg(
+ 
+ 	se_tpg = &tpg->tpg_se_tpg;
+ 	pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
+-		" PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
+-		tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
++		" PORTAL: %pISc:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
++		tpg->tpgt, &tpg_np->tpg_np->np_sockaddr, tpg_np->tpg_np->np_port);
+ 
+ 	ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
+ 	if (ret < 0)
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index c3bccaddb592..39654e917cd8 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -879,8 +879,8 @@ static void iscsi_handle_login_thread_timeout(unsigned long data)
+ 	struct iscsi_np *np = (struct iscsi_np *) data;
+ 
+ 	spin_lock_bh(&np->np_thread_lock);
+-	pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
+-			np->np_ip, np->np_port);
++	pr_err("iSCSI Login timeout on Network Portal %pISc:%hu\n",
++			&np->np_sockaddr, np->np_port);
+ 
+ 	if (np->np_login_timer_flags & ISCSI_TF_STOP) {
+ 		spin_unlock_bh(&np->np_thread_lock);
+@@ -1358,8 +1358,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ 	spin_lock_bh(&np->np_thread_lock);
+ 	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
+ 		spin_unlock_bh(&np->np_thread_lock);
+-		pr_err("iSCSI Network Portal on %s:%hu currently not"
+-			" active.\n", np->np_ip, np->np_port);
++		pr_err("iSCSI Network Portal on %pISc:%hu currently not"
++			" active.\n", &np->np_sockaddr, np->np_port);
+ 		iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ 				ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+ 		goto new_sess_out;
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
+index 5e3295fe404d..3bc7d62c0a65 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.c
++++ b/drivers/target/iscsi/iscsi_target_tpg.c
+@@ -460,7 +460,6 @@ static bool iscsit_tpg_check_network_portal(
+ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
+ 	struct iscsi_portal_group *tpg,
+ 	struct __kernel_sockaddr_storage *sockaddr,
+-	char *ip_str,
+ 	struct iscsi_tpg_np *tpg_np_parent,
+ 	int network_transport)
+ {
+@@ -470,8 +469,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
+ 	if (!tpg_np_parent) {
+ 		if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
+ 				network_transport)) {
+-			pr_err("Network Portal: %s already exists on a"
+-				" different TPG on %s\n", ip_str,
++			pr_err("Network Portal: %pISc already exists on a"
++				" different TPG on %s\n", sockaddr,
+ 				tpg->tpg_tiqn->tiqn);
+ 			return ERR_PTR(-EEXIST);
+ 		}
+@@ -484,7 +483,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
+ 		return ERR_PTR(-ENOMEM);
+ 	}
+ 
+-	np = iscsit_add_np(sockaddr, ip_str, network_transport);
++	np = iscsit_add_np(sockaddr, network_transport);
+ 	if (IS_ERR(np)) {
+ 		kfree(tpg_np);
+ 		return ERR_CAST(np);
+@@ -514,8 +513,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
+ 		spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
+ 	}
+ 
+-	pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
+-		tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
++	pr_debug("CORE[%s] - Added Network Portal: %pISc:%hu,%hu on %s\n",
++		tpg->tpg_tiqn->tiqn, &np->np_sockaddr, np->np_port, tpg->tpgt,
+ 		np->np_transport->name);
+ 
+ 	return tpg_np;
+@@ -528,8 +527,8 @@ static int iscsit_tpg_release_np(
+ {
+ 	iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
+ 
+-	pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
+-		tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
++	pr_debug("CORE[%s] - Removed Network Portal: %pISc:%hu,%hu on %s\n",
++		tpg->tpg_tiqn->tiqn, &np->np_sockaddr, np->np_port, tpg->tpgt,
+ 		np->np_transport->name);
+ 
+ 	tpg_np->tpg_np = NULL;
+diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
+index 95ff5bdecd71..28abda89ea98 100644
+--- a/drivers/target/iscsi/iscsi_target_tpg.h
++++ b/drivers/target/iscsi/iscsi_target_tpg.h
+@@ -22,7 +22,7 @@ extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session
+ extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
+ extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
+ extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
+-			struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
++			struct __kernel_sockaddr_storage *, struct iscsi_tpg_np *,
+ 			int);
+ extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
+ 			struct iscsi_tpg_np *);
+diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
+index a15411c79ae9..08aa7cc58694 100644
+--- a/drivers/target/target_core_pr.c
++++ b/drivers/target/target_core_pr.c
+@@ -328,6 +328,9 @@ static int core_scsi3_pr_seq_non_holder(
+ 	int legacy = 0; /* Act like a legacy device and return
+ 			 * RESERVATION CONFLICT on some CDBs */
+ 
++	if (!se_sess->se_node_acl->device_list)
++		return;
++
+ 	se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
+ 	/*
+ 	 * Determine if the registration should be ignored due to
+diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
+index 1738b1646988..9fc33e84439a 100644
+--- a/drivers/target/target_core_ua.c
++++ b/drivers/target/target_core_ua.c
+@@ -48,7 +48,7 @@ target_scsi3_ua_check(struct se_cmd *cmd)
+ 		return 0;
+ 
+ 	nacl = sess->se_node_acl;
+-	if (!nacl)
++	if (!nacl || !nacl->device_list)
+ 		return 0;
+ 
+ 	deve = nacl->device_list[cmd->orig_fe_lun];
+@@ -90,7 +90,7 @@ int core_scsi3_ua_allocate(
+ 	/*
+ 	 * PASSTHROUGH OPS
+ 	 */
+-	if (!nacl)
++	if (!nacl || !nacl->device_list)
+ 		return -EINVAL;
+ 
+ 	ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
+@@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition(
+ 		return;
+ 
+ 	nacl = sess->se_node_acl;
+-	if (!nacl)
++	if (!nacl || !nacl->device_list)
+ 		return;
+ 
+ 	spin_lock_irq(&nacl->device_list_lock);
+@@ -276,7 +276,7 @@ int core_scsi3_ua_clear_for_request_sense(
+ 		return -EINVAL;
+ 
+ 	nacl = sess->se_node_acl;
+-	if (!nacl)
++	if (!nacl || !nacl->device_list)
+ 		return -EINVAL;
+ 
+ 	spin_lock_irq(&nacl->device_list_lock);
+diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
+index 8fd680ac941b..4609305a1591 100644
+--- a/drivers/target/target_core_xcopy.c
++++ b/drivers/target/target_core_xcopy.c
+@@ -465,6 +465,8 @@ int target_xcopy_setup_pt(void)
+ 	memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
+ 	INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
+ 	INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
++	INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
++	spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
+ 
+ 	xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
+ 	xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
+@@ -666,7 +668,7 @@ static int target_xcopy_read_source(
+ 	pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
+ 		(unsigned long long)src_lba, src_sectors, length);
+ 
+-	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
++	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+ 			      DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
+ 	xop->src_pt_cmd = xpt_cmd;
+ 
+@@ -726,7 +728,7 @@ static int target_xcopy_write_destination(
+ 	pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
+ 		(unsigned long long)dst_lba, dst_sectors, length);
+ 
+-	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
++	transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
+ 			      DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
+ 	xop->dst_pt_cmd = xpt_cmd;
+ 
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 16ed0b6c7f9c..6b6c6606af5f 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
+ 		spin_lock_irqsave(&tty->ctrl_lock, flags);
+ 		tty->ctrl_status |= TIOCPKT_FLUSHREAD;
+ 		spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+-		if (waitqueue_active(&tty->link->read_wait))
+-			wake_up_interruptible(&tty->link->read_wait);
++		wake_up_interruptible(&tty->link->read_wait);
+ 	}
+ }
+ 
+@@ -1383,8 +1382,7 @@ handle_newline:
+ 			put_tty_queue(c, ldata);
+ 			smp_store_release(&ldata->canon_head, ldata->read_head);
+ 			kill_fasync(&tty->fasync, SIGIO, POLL_IN);
+-			if (waitqueue_active(&tty->read_wait))
+-				wake_up_interruptible_poll(&tty->read_wait, POLLIN);
++			wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+ 			return 0;
+ 		}
+ 	}
+@@ -1670,8 +1668,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ 
+ 	if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
+ 		kill_fasync(&tty->fasync, SIGIO, POLL_IN);
+-		if (waitqueue_active(&tty->read_wait))
+-			wake_up_interruptible_poll(&tty->read_wait, POLLIN);
++		wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+ 	}
+ }
+ 
+@@ -1890,10 +1887,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
+ 	}
+ 
+ 	/* The termios change make the tty ready for I/O */
+-	if (waitqueue_active(&tty->write_wait))
+-		wake_up_interruptible(&tty->write_wait);
+-	if (waitqueue_active(&tty->read_wait))
+-		wake_up_interruptible(&tty->read_wait);
++	wake_up_interruptible(&tty->write_wait);
++	wake_up_interruptible(&tty->read_wait);
+ }
+ 
+ /**
+diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
+index 4506e405c8f3..b4fd8debf941 100644
+--- a/drivers/tty/serial/8250/8250_core.c
++++ b/drivers/tty/serial/8250/8250_core.c
+@@ -339,6 +339,14 @@ configured less than Maximum supported fifo bytes */
+ 				  UART_FCR7_64BYTE,
+ 		.flags		= UART_CAP_FIFO,
+ 	},
++	[PORT_RT2880] = {
++		.name		= "Palmchip BK-3103",
++		.fifo_size	= 16,
++		.tx_loadsz	= 16,
++		.fcr		= UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
++		.rxtrig_bytes	= {1, 4, 8, 14},
++		.flags		= UART_CAP_FIFO,
++	},
+ };
+ 
+ /* Uart divisor latch read */
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 763eb20fe321..0cc622afb67d 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -1360,9 +1360,9 @@ static void pl011_tx_softirq(struct work_struct *work)
+ 	struct uart_amba_port *uap =
+ 		container_of(dwork, struct uart_amba_port, tx_softirq_work);
+ 
+-	spin_lock(&uap->port.lock);
++	spin_lock_irq(&uap->port.lock);
+ 	while (pl011_tx_chars(uap)) ;
+-	spin_unlock(&uap->port.lock);
++	spin_unlock_irq(&uap->port.lock);
+ }
+ 
+ static void pl011_tx_irq_seen(struct uart_amba_port *uap)
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 5ca1dfb0561c..85323ff75edf 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -2640,7 +2640,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
+ 	ret = atmel_init_gpios(port, &pdev->dev);
+ 	if (ret < 0) {
+ 		dev_err(&pdev->dev, "Failed to initialize GPIOs.");
+-		goto err;
++		goto err_clear_bit;
+ 	}
+ 
+ 	ret = atmel_init_port(port, pdev);
+diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
+index e5695467598f..21837f14a403 100644
+--- a/drivers/tty/tty_io.c
++++ b/drivers/tty/tty_io.c
+@@ -2144,8 +2144,24 @@ retry_open:
+ 	if (!noctty &&
+ 	    current->signal->leader &&
+ 	    !current->signal->tty &&
+-	    tty->session == NULL)
+-		__proc_set_tty(tty);
++	    tty->session == NULL) {
++		/*
++		 * Don't let a process that only has write access to the tty
++		 * obtain the privileges associated with having a tty as
++		 * controlling terminal (being able to reopen it with full
++		 * access through /dev/tty, being able to perform pushback).
++		 * Many distributions set the group of all ttys to "tty" and
++		 * grant write-only access to all terminals for setgid tty
++		 * binaries, which should not imply full privileges on all ttys.
++		 *
++		 * This could theoretically break old code that performs open()
++		 * on a write-only file descriptor. In that case, it might be
++		 * necessary to also permit this if
++		 * inode_permission(inode, MAY_READ) == 0.
++		 */
++		if (filp->f_mode & FMODE_READ)
++			__proc_set_tty(tty);
++	}
+ 	spin_unlock_irq(&current->sighand->siglock);
+ 	read_unlock(&tasklist_lock);
+ 	tty_unlock(tty);
+@@ -2434,7 +2450,7 @@ static int fionbio(struct file *file, int __user *p)
+  *		Takes ->siglock() when updating signal->tty
+  */
+ 
+-static int tiocsctty(struct tty_struct *tty, int arg)
++static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
+ {
+ 	int ret = 0;
+ 
+@@ -2468,6 +2484,13 @@ static int tiocsctty(struct tty_struct *tty, int arg)
+ 			goto unlock;
+ 		}
+ 	}
++
++	/* See the comment in tty_open(). */
++	if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
++		ret = -EPERM;
++		goto unlock;
++	}
++
+ 	proc_set_tty(tty);
+ unlock:
+ 	read_unlock(&tasklist_lock);
+@@ -2860,7 +2883,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ 		no_tty();
+ 		return 0;
+ 	case TIOCSCTTY:
+-		return tiocsctty(tty, arg);
++		return tiocsctty(tty, file, arg);
+ 	case TIOCGPGRP:
+ 		return tiocgpgrp(tty, real_tty, p);
+ 	case TIOCSPGRP:
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index 389f0e034259..fa774323ebda 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -56,7 +56,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ 	{ .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
+ 	{ .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
+ 	{ .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
+-	{ .compatible = "fsl,imx6sx-usb", .data = &imx6sl_usb_data},
++	{ .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
+ 	{ /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 764f668d45a9..6e53c24fa1cb 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -656,6 +656,44 @@ __acquires(hwep->lock)
+ 	return 0;
+ }
+ 
++static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
++{
++	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
++	int direction, retval = 0;
++	unsigned long flags;
++
++	if (ep == NULL || hwep->ep.desc == NULL)
++		return -EINVAL;
++
++	if (usb_endpoint_xfer_isoc(hwep->ep.desc))
++		return -EOPNOTSUPP;
++
++	spin_lock_irqsave(hwep->lock, flags);
++
++	if (value && hwep->dir == TX && check_transfer &&
++		!list_empty(&hwep->qh.queue) &&
++			!usb_endpoint_xfer_control(hwep->ep.desc)) {
++		spin_unlock_irqrestore(hwep->lock, flags);
++		return -EAGAIN;
++	}
++
++	direction = hwep->dir;
++	do {
++		retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
++
++		if (!value)
++			hwep->wedge = 0;
++
++		if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
++			hwep->dir = (hwep->dir == TX) ? RX : TX;
++
++	} while (hwep->dir != direction);
++
++	spin_unlock_irqrestore(hwep->lock, flags);
++	return retval;
++}
++
++
+ /**
+  * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
+  * @gadget: gadget
+@@ -1051,7 +1089,7 @@ __acquires(ci->lock)
+ 				num += ci->hw_ep_max / 2;
+ 
+ 			spin_unlock(&ci->lock);
+-			err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
++			err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
+ 			spin_lock(&ci->lock);
+ 			if (!err)
+ 				isr_setup_status_phase(ci);
+@@ -1110,8 +1148,8 @@ delegate:
+ 
+ 	if (err < 0) {
+ 		spin_unlock(&ci->lock);
+-		if (usb_ep_set_halt(&hwep->ep))
+-			dev_err(ci->dev, "error: ep_set_halt\n");
++		if (_ep_set_halt(&hwep->ep, 1, false))
++			dev_err(ci->dev, "error: _ep_set_halt\n");
+ 		spin_lock(&ci->lock);
+ 	}
+ }
+@@ -1142,9 +1180,9 @@ __acquires(ci->lock)
+ 					err = isr_setup_status_phase(ci);
+ 				if (err < 0) {
+ 					spin_unlock(&ci->lock);
+-					if (usb_ep_set_halt(&hwep->ep))
++					if (_ep_set_halt(&hwep->ep, 1, false))
+ 						dev_err(ci->dev,
+-							"error: ep_set_halt\n");
++						"error: _ep_set_halt\n");
+ 					spin_lock(&ci->lock);
+ 				}
+ 			}
+@@ -1390,41 +1428,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+  */
+ static int ep_set_halt(struct usb_ep *ep, int value)
+ {
+-	struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
+-	int direction, retval = 0;
+-	unsigned long flags;
+-
+-	if (ep == NULL || hwep->ep.desc == NULL)
+-		return -EINVAL;
+-
+-	if (usb_endpoint_xfer_isoc(hwep->ep.desc))
+-		return -EOPNOTSUPP;
+-
+-	spin_lock_irqsave(hwep->lock, flags);
+-
+-#ifndef STALL_IN
+-	/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
+-	if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
+-	    !list_empty(&hwep->qh.queue)) {
+-		spin_unlock_irqrestore(hwep->lock, flags);
+-		return -EAGAIN;
+-	}
+-#endif
+-
+-	direction = hwep->dir;
+-	do {
+-		retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
+-
+-		if (!value)
+-			hwep->wedge = 0;
+-
+-		if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
+-			hwep->dir = (hwep->dir == TX) ? RX : TX;
+-
+-	} while (hwep->dir != direction);
+-
+-	spin_unlock_irqrestore(hwep->lock, flags);
+-	return retval;
++	return _ep_set_halt(ep, value, true);
+ }
+ 
+ /**
+diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
+index b2a540b43f97..b9ddf0c1ffe5 100644
+--- a/drivers/usb/core/config.c
++++ b/drivers/usb/core/config.c
+@@ -112,7 +112,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 				cfgno, inum, asnum, ep->desc.bEndpointAddress);
+ 		ep->ss_ep_comp.bmAttributes = 16;
+ 	} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
+-			desc->bmAttributes > 2) {
++		   USB_SS_MULT(desc->bmAttributes) > 3) {
+ 		dev_warn(ddev, "Isoc endpoint has Mult of %d in "
+ 				"config %d interface %d altsetting %d ep %d: "
+ 				"setting to 3\n", desc->bmAttributes + 1,
+@@ -121,7 +121,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
+ 	}
+ 
+ 	if (usb_endpoint_xfer_isoc(&ep->desc))
+-		max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
++		max_tx = (desc->bMaxBurst + 1) *
++			(USB_SS_MULT(desc->bmAttributes)) *
+ 			usb_endpoint_maxp(&ep->desc);
+ 	else if (usb_endpoint_xfer_int(&ep->desc))
+ 		max_tx = usb_endpoint_maxp(&ep->desc) *
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index d85abfed84cc..f5a381945db2 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ 	{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+ 
++	/* Logitech ConferenceCam CC3000e */
++	{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
++	{ USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
++
++	/* Logitech PTZ Pro Camera */
++	{ USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
++
+ 	/* Logitech Quickcam Fusion */
+ 	{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
+@@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = {
+ 	/* Philips PSC805 audio device */
+ 	{ USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
+ 
++	/* Plantronic Audio 655 DSP */
++	{ USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
++
++	/* Plantronic Audio 648 USB */
++	{ USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ 	/* Artisman Watchdog Dongle */
+ 	{ USB_DEVICE(0x04b4, 0x0526), .driver_info =
+ 			USB_QUIRK_CONFIG_INTF_STRINGS },
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 9a8c936cd42c..41f841fa6c4d 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1498,10 +1498,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 	 * use Event Data TRBs, and we don't chain in a link TRB on short
+ 	 * transfers, we're basically dividing by 1.
+ 	 *
+-	 * xHCI 1.0 specification indicates that the Average TRB Length should
+-	 * be set to 8 for control endpoints.
++	 * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
++	 * should be set to 8 for control endpoints.
+ 	 */
+-	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
++	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
+ 		ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
+ 	else
+ 		ep_ctx->tx_info |=
+@@ -1792,8 +1792,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ 	int size;
+ 	int i, j, num_ports;
+ 
+-	if (timer_pending(&xhci->cmd_timer))
+-		del_timer_sync(&xhci->cmd_timer);
++	del_timer_sync(&xhci->cmd_timer);
+ 
+ 	/* Free the Event Ring Segment Table and the actual Event Ring */
+ 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
+@@ -2321,6 +2320,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 
+ 	INIT_LIST_HEAD(&xhci->cmd_list);
+ 
++	/* init command timeout timer */
++	setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
++		    (unsigned long)xhci);
++
+ 	page_size = readl(&xhci->op_regs->page_size);
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ 			"Supported page size register = 0x%x", page_size);
+@@ -2505,10 +2508,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
+ 			"Wrote ERST address to ir_set 0.");
+ 	xhci_print_ir_set(xhci, 0);
+ 
+-	/* init command timeout timer */
+-	setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
+-		    (unsigned long)xhci);
+-
+ 	/*
+ 	 * XXX: Might need to set the Interrupter Moderation Register to
+ 	 * something other than the default (~1ms minimum between interrupts).
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index b3a0a2275f5a..ad975a2975ca 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -302,6 +302,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
+ 	ret = xhci_handshake(&xhci->op_regs->cmd_ring,
+ 			CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
+ 	if (ret < 0) {
++		/* we are about to kill xhci, give it one more chance */
++		xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
++			      &xhci->op_regs->cmd_ring);
++		udelay(1000);
++		ret = xhci_handshake(&xhci->op_regs->cmd_ring,
++				     CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
++		if (ret == 0)
++			return 0;
++
+ 		xhci_err(xhci, "Stopped the command ring failed, "
+ 				"maybe the host is dead\n");
+ 		xhci->xhc_state |= XHCI_STATE_DYING;
+@@ -3041,9 +3050,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	struct xhci_td *td;
+ 	struct scatterlist *sg;
+ 	int num_sgs;
+-	int trb_buff_len, this_sg_len, running_total;
++	int trb_buff_len, this_sg_len, running_total, ret;
+ 	unsigned int total_packet_count;
++	bool zero_length_needed;
+ 	bool first_trb;
++	int last_trb_num;
+ 	u64 addr;
+ 	bool more_trbs_coming;
+ 
+@@ -3059,13 +3070,27 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
+ 			usb_endpoint_maxp(&urb->ep->desc));
+ 
+-	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
++	ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ 			ep_index, urb->stream_id,
+ 			num_trbs, urb, 0, mem_flags);
+-	if (trb_buff_len < 0)
+-		return trb_buff_len;
++	if (ret < 0)
++		return ret;
+ 
+ 	urb_priv = urb->hcpriv;
++
++	/* Deal with URB_ZERO_PACKET - need one more td/trb */
++	zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
++		urb_priv->length == 2;
++	if (zero_length_needed) {
++		num_trbs++;
++		xhci_dbg(xhci, "Creating zero length td.\n");
++		ret = prepare_transfer(xhci, xhci->devs[slot_id],
++				ep_index, urb->stream_id,
++				1, urb, 1, mem_flags);
++		if (ret < 0)
++			return ret;
++	}
++
+ 	td = urb_priv->td[0];
+ 
+ 	/*
+@@ -3095,6 +3120,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		trb_buff_len = urb->transfer_buffer_length;
+ 
+ 	first_trb = true;
++	last_trb_num = zero_length_needed ? 2 : 1;
+ 	/* Queue the first TRB, even if it's zero-length */
+ 	do {
+ 		u32 field = 0;
+@@ -3112,12 +3138,15 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		/* Chain all the TRBs together; clear the chain bit in the last
+ 		 * TRB to indicate it's the last TRB in the chain.
+ 		 */
+-		if (num_trbs > 1) {
++		if (num_trbs > last_trb_num) {
+ 			field |= TRB_CHAIN;
+-		} else {
+-			/* FIXME - add check for ZERO_PACKET flag before this */
++		} else if (num_trbs == last_trb_num) {
+ 			td->last_trb = ep_ring->enqueue;
+ 			field |= TRB_IOC;
++		} else if (zero_length_needed && num_trbs == 1) {
++			trb_buff_len = 0;
++			urb_priv->td[1]->last_trb = ep_ring->enqueue;
++			field |= TRB_IOC;
+ 		}
+ 
+ 		/* Only set interrupt on short packet for IN endpoints */
+@@ -3179,7 +3208,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		if (running_total + trb_buff_len > urb->transfer_buffer_length)
+ 			trb_buff_len =
+ 				urb->transfer_buffer_length - running_total;
+-	} while (running_total < urb->transfer_buffer_length);
++	} while (num_trbs > 0);
+ 
+ 	check_trb_math(urb, num_trbs, running_total);
+ 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+@@ -3197,7 +3226,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	int num_trbs;
+ 	struct xhci_generic_trb *start_trb;
+ 	bool first_trb;
++	int last_trb_num;
+ 	bool more_trbs_coming;
++	bool zero_length_needed;
+ 	int start_cycle;
+ 	u32 field, length_field;
+ 
+@@ -3228,7 +3259,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		num_trbs++;
+ 		running_total += TRB_MAX_BUFF_SIZE;
+ 	}
+-	/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
+ 
+ 	ret = prepare_transfer(xhci, xhci->devs[slot_id],
+ 			ep_index, urb->stream_id,
+@@ -3237,6 +3267,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		return ret;
+ 
+ 	urb_priv = urb->hcpriv;
++
++	/* Deal with URB_ZERO_PACKET - need one more td/trb */
++	zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
++		urb_priv->length == 2;
++	if (zero_length_needed) {
++		num_trbs++;
++		xhci_dbg(xhci, "Creating zero length td.\n");
++		ret = prepare_transfer(xhci, xhci->devs[slot_id],
++				ep_index, urb->stream_id,
++				1, urb, 1, mem_flags);
++		if (ret < 0)
++			return ret;
++	}
++
+ 	td = urb_priv->td[0];
+ 
+ 	/*
+@@ -3258,7 +3302,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		trb_buff_len = urb->transfer_buffer_length;
+ 
+ 	first_trb = true;
+-
++	last_trb_num = zero_length_needed ? 2 : 1;
+ 	/* Queue the first TRB, even if it's zero-length */
+ 	do {
+ 		u32 remainder = 0;
+@@ -3275,12 +3319,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		/* Chain all the TRBs together; clear the chain bit in the last
+ 		 * TRB to indicate it's the last TRB in the chain.
+ 		 */
+-		if (num_trbs > 1) {
++		if (num_trbs > last_trb_num) {
+ 			field |= TRB_CHAIN;
+-		} else {
+-			/* FIXME - add check for ZERO_PACKET flag before this */
++		} else if (num_trbs == last_trb_num) {
+ 			td->last_trb = ep_ring->enqueue;
+ 			field |= TRB_IOC;
++		} else if (zero_length_needed && num_trbs == 1) {
++			trb_buff_len = 0;
++			urb_priv->td[1]->last_trb = ep_ring->enqueue;
++			field |= TRB_IOC;
+ 		}
+ 
+ 		/* Only set interrupt on short packet for IN endpoints */
+@@ -3318,7 +3365,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 		trb_buff_len = urb->transfer_buffer_length - running_total;
+ 		if (trb_buff_len > TRB_MAX_BUFF_SIZE)
+ 			trb_buff_len = TRB_MAX_BUFF_SIZE;
+-	} while (running_total < urb->transfer_buffer_length);
++	} while (num_trbs > 0);
+ 
+ 	check_trb_math(urb, num_trbs, running_total);
+ 	giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
+@@ -3385,8 +3432,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
+ 	if (start_cycle == 0)
+ 		field |= 0x1;
+ 
+-	/* xHCI 1.0 6.4.1.2.1: Transfer Type field */
+-	if (xhci->hci_version == 0x100) {
++	/* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
++	if (xhci->hci_version >= 0x100) {
+ 		if (urb->transfer_buffer_length > 0) {
+ 			if (setup->bRequestType & USB_DIR_IN)
+ 				field |= TRB_TX_TYPE(TRB_DATA_IN);
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index c502c2277aeb..26f62b2b33f8 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci)
+ 				"waited %u microseconds.\n",
+ 				XHCI_MAX_HALT_USEC);
+ 	if (!ret)
+-		xhci->xhc_state &= ~XHCI_STATE_HALTED;
++		xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
++
+ 	return ret;
+ }
+ 
+@@ -683,8 +684,11 @@ void xhci_stop(struct usb_hcd *hcd)
+ 	u32 temp;
+ 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ 
++	mutex_lock(&xhci->mutex);
++
+ 	if (!usb_hcd_is_primary_hcd(hcd)) {
+ 		xhci_only_stop_hcd(xhci->shared_hcd);
++		mutex_unlock(&xhci->mutex);
+ 		return;
+ 	}
+ 
+@@ -723,6 +727,7 @@ void xhci_stop(struct usb_hcd *hcd)
+ 	xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ 			"xhci_stop completed - status = %x",
+ 			readl(&xhci->op_regs->status));
++	mutex_unlock(&xhci->mutex);
+ }
+ 
+ /*
+@@ -1340,6 +1345,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+ 
+ 	if (usb_endpoint_xfer_isoc(&urb->ep->desc))
+ 		size = urb->number_of_packets;
++	else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
++	    urb->transfer_buffer_length > 0 &&
++	    urb->transfer_flags & URB_ZERO_PACKET &&
++	    !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
++		size = 2;
+ 	else
+ 		size = 1;
+ 
+@@ -3790,6 +3800,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
+ 
+ 	mutex_lock(&xhci->mutex);
+ 
++	if (xhci->xhc_state)	/* dying or halted */
++		goto out;
++
+ 	if (!udev->slot_id) {
+ 		xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ 				"Bad Slot ID %d", udev->slot_id);
+diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
+index 3ad5d19e4d04..23c794813e6a 100644
+--- a/drivers/usb/misc/chaoskey.c
++++ b/drivers/usb/misc/chaoskey.c
+@@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data,
+ 	if (this_time > max)
+ 		this_time = max;
+ 
+-	memcpy(data, dev->buf, this_time);
++	memcpy(data, dev->buf + dev->used, this_time);
+ 
+ 	dev->used += this_time;
+ 
+diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
+index 8bd8c5e26921..d5a140745640 100644
+--- a/drivers/usb/musb/musb_cppi41.c
++++ b/drivers/usb/musb/musb_cppi41.c
+@@ -614,7 +614,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
+ {
+ 	struct musb *musb = controller->musb;
+ 	struct device *dev = musb->controller;
+-	struct device_node *np = dev->of_node;
++	struct device_node *np = dev->parent->of_node;
+ 	struct cppi41_dma_channel *cppi41_channel;
+ 	int count;
+ 	int i;
+@@ -664,7 +664,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
+ 		musb_dma->status = MUSB_DMA_STATUS_FREE;
+ 		musb_dma->max_len = SZ_4M;
+ 
+-		dc = dma_request_slave_channel(dev, str);
++		dc = dma_request_slave_channel(dev->parent, str);
+ 		if (!dc) {
+ 			dev_err(dev, "Failed to request %s.\n", str);
+ 			ret = -EPROBE_DEFER;
+@@ -694,7 +694,7 @@ struct dma_controller *dma_controller_create(struct musb *musb,
+ 	struct cppi41_dma_controller *controller;
+ 	int ret = 0;
+ 
+-	if (!musb->controller->of_node) {
++	if (!musb->controller->parent->of_node) {
+ 		dev_err(musb->controller, "Need DT for the DMA engine.\n");
+ 		return NULL;
+ 	}
+diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
+index 65d931a28a14..dcac5e7f19e0 100644
+--- a/drivers/usb/musb/musb_dsps.c
++++ b/drivers/usb/musb/musb_dsps.c
+@@ -225,8 +225,11 @@ static void dsps_musb_enable(struct musb *musb)
+ 
+ 	dsps_writel(reg_base, wrp->epintr_set, epmask);
+ 	dsps_writel(reg_base, wrp->coreintr_set, coremask);
+-	/* start polling for ID change. */
+-	mod_timer(&glue->timer, jiffies + msecs_to_jiffies(wrp->poll_timeout));
++	/* start polling for ID change in dual-role idle mode */
++	if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
++			musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
++		mod_timer(&glue->timer, jiffies +
++				msecs_to_jiffies(wrp->poll_timeout));
+ 	dsps_musb_try_idle(musb, 0);
+ }
+ 
+diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
+index deee68eafb72..0cd85f2ccddd 100644
+--- a/drivers/usb/phy/phy-generic.c
++++ b/drivers/usb/phy/phy-generic.c
+@@ -230,7 +230,8 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
+ 		clk_rate = pdata->clk_rate;
+ 		needs_vcc = pdata->needs_vcc;
+ 		if (gpio_is_valid(pdata->gpio_reset)) {
+-			err = devm_gpio_request_one(dev, pdata->gpio_reset, 0,
++			err = devm_gpio_request_one(dev, pdata->gpio_reset,
++						    GPIOF_ACTIVE_LOW,
+ 						    dev_name(dev));
+ 			if (!err)
+ 				nop->gpiod_reset =
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 876423b8892c..7c8eb4c4c175 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -278,6 +278,10 @@ static void option_instat_callback(struct urb *urb);
+ #define ZTE_PRODUCT_MF622			0x0001
+ #define ZTE_PRODUCT_MF628			0x0015
+ #define ZTE_PRODUCT_MF626			0x0031
++#define ZTE_PRODUCT_ZM8620_X			0x0396
++#define ZTE_PRODUCT_ME3620_MBIM			0x0426
++#define ZTE_PRODUCT_ME3620_X			0x1432
++#define ZTE_PRODUCT_ME3620_L			0x1433
+ #define ZTE_PRODUCT_AC2726			0xfff1
+ #define ZTE_PRODUCT_MG880			0xfffd
+ #define ZTE_PRODUCT_CDMA_TECH			0xfffe
+@@ -544,6 +548,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
+ 	.sendsetup = BIT(1) | BIT(2) | BIT(3),
+ };
+ 
++static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
++	.reserved = BIT(2) | BIT(3) | BIT(4),
++};
++
++static const struct option_blacklist_info zte_me3620_xl_blacklist = {
++	.reserved = BIT(3) | BIT(4) | BIT(5),
++};
++
++static const struct option_blacklist_info zte_zm8620_x_blacklist = {
++	.reserved = BIT(3) | BIT(4) | BIT(5),
++};
++
+ static const struct option_blacklist_info huawei_cdc12_blacklist = {
+ 	.reserved = BIT(1) | BIT(2),
+ };
+@@ -1591,6 +1607,14 @@ static const struct usb_device_id option_ids[] = {
+ 	 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
+ 	 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
++	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
++	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
++	 .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
++	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
++	 .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
++	{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
++	 .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
+ 	{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
+index 6c3734d2b45a..d3ea90bef84d 100644
+--- a/drivers/usb/serial/whiteheat.c
++++ b/drivers/usb/serial/whiteheat.c
+@@ -80,6 +80,8 @@ static int  whiteheat_firmware_download(struct usb_serial *serial,
+ static int  whiteheat_firmware_attach(struct usb_serial *serial);
+ 
+ /* function prototypes for the Connect Tech WhiteHEAT serial converter */
++static int whiteheat_probe(struct usb_serial *serial,
++				const struct usb_device_id *id);
+ static int  whiteheat_attach(struct usb_serial *serial);
+ static void whiteheat_release(struct usb_serial *serial);
+ static int  whiteheat_port_probe(struct usb_serial_port *port);
+@@ -116,6 +118,7 @@ static struct usb_serial_driver whiteheat_device = {
+ 	.description =		"Connect Tech - WhiteHEAT",
+ 	.id_table =		id_table_std,
+ 	.num_ports =		4,
++	.probe =		whiteheat_probe,
+ 	.attach =		whiteheat_attach,
+ 	.release =		whiteheat_release,
+ 	.port_probe =		whiteheat_port_probe,
+@@ -217,6 +220,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
+ /*****************************************************************************
+  * Connect Tech's White Heat serial driver functions
+  *****************************************************************************/
++
++static int whiteheat_probe(struct usb_serial *serial,
++				const struct usb_device_id *id)
++{
++	struct usb_host_interface *iface_desc;
++	struct usb_endpoint_descriptor *endpoint;
++	size_t num_bulk_in = 0;
++	size_t num_bulk_out = 0;
++	size_t min_num_bulk;
++	unsigned int i;
++
++	iface_desc = serial->interface->cur_altsetting;
++
++	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
++		endpoint = &iface_desc->endpoint[i].desc;
++		if (usb_endpoint_is_bulk_in(endpoint))
++			++num_bulk_in;
++		if (usb_endpoint_is_bulk_out(endpoint))
++			++num_bulk_out;
++	}
++
++	min_num_bulk = COMMAND_PORT + 1;
++	if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
++		return -ENODEV;
++
++	return 0;
++}
++
+ static int whiteheat_attach(struct usb_serial *serial)
+ {
+ 	struct usb_serial_port *command_port;
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 109462303087..d1e1e1704da1 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -298,7 +298,7 @@ config FB_ARMCLCD
+ 
+ # Helper logic selected only by the ARM Versatile platform family.
+ config PLAT_VERSATILE_CLCD
+-	def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
++	def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
+ 	depends on ARM
+ 	depends on FB_ARMCLCD && FB=y
+ 
+diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
+index a29afb37c48c..47bd8a14d01f 100644
+--- a/drivers/watchdog/sunxi_wdt.c
++++ b/drivers/watchdog/sunxi_wdt.c
+@@ -184,7 +184,7 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev)
+ 	/* Set system reset function */
+ 	reg = readl(wdt_base + regs->wdt_cfg);
+ 	reg &= ~(regs->wdt_reset_mask);
+-	reg |= ~(regs->wdt_reset_val);
++	reg |= regs->wdt_reset_val;
+ 	writel(reg, wdt_base + regs->wdt_cfg);
+ 
+ 	/* Enable watchdog */
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index c7e4163ede87..ccfd31f1df3a 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1234,6 +1234,13 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ 				goto out_clear;
+ 			}
+ 			bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
++			/*
++			 * If the partition is not aligned on a page
++			 * boundary, we can't do dax I/O to it.
++			 */
++			if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) ||
++			    (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
++				bdev->bd_inode->i_flags &= ~S_DAX;
+ 		}
+ 	} else {
+ 		if (bdev->bd_contains == bdev) {
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index c32d226bfecc..885f533a34d9 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -2795,7 +2795,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
+ 			      bio_end_io_t end_io_func,
+ 			      int mirror_num,
+ 			      unsigned long prev_bio_flags,
+-			      unsigned long bio_flags)
++			      unsigned long bio_flags,
++			      bool force_bio_submit)
+ {
+ 	int ret = 0;
+ 	struct bio *bio;
+@@ -2813,6 +2814,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
+ 			contig = bio_end_sector(bio) == sector;
+ 
+ 		if (prev_bio_flags != bio_flags || !contig ||
++		    force_bio_submit ||
+ 		    merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
+ 		    bio_add_page(bio, page, page_size, offset) < page_size) {
+ 			ret = submit_one_bio(rw, bio, mirror_num,
+@@ -2906,7 +2908,8 @@ static int __do_readpage(struct extent_io_tree *tree,
+ 			 get_extent_t *get_extent,
+ 			 struct extent_map **em_cached,
+ 			 struct bio **bio, int mirror_num,
+-			 unsigned long *bio_flags, int rw)
++			 unsigned long *bio_flags, int rw,
++			 u64 *prev_em_start)
+ {
+ 	struct inode *inode = page->mapping->host;
+ 	u64 start = page_offset(page);
+@@ -2954,6 +2957,7 @@ static int __do_readpage(struct extent_io_tree *tree,
+ 	}
+ 	while (cur <= end) {
+ 		unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
++		bool force_bio_submit = false;
+ 
+ 		if (cur >= last_byte) {
+ 			char *userpage;
+@@ -3004,6 +3008,49 @@ static int __do_readpage(struct extent_io_tree *tree,
+ 		block_start = em->block_start;
+ 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ 			block_start = EXTENT_MAP_HOLE;
++
++		/*
++		 * If we have a file range that points to a compressed extent
++		 * and it's followed by a consecutive file range that points to
++		 * to the same compressed extent (possibly with a different
++		 * offset and/or length, so it either points to the whole extent
++		 * or only part of it), we must make sure we do not submit a
++		 * single bio to populate the pages for the 2 ranges because
++		 * this makes the compressed extent read zero out the pages
++		 * belonging to the 2nd range. Imagine the following scenario:
++		 *
++		 *  File layout
++		 *  [0 - 8K]                     [8K - 24K]
++		 *    |                               |
++		 *    |                               |
++		 * points to extent X,         points to extent X,
++		 * offset 4K, length of 8K     offset 0, length 16K
++		 *
++		 * [extent X, compressed length = 4K uncompressed length = 16K]
++		 *
++		 * If the bio to read the compressed extent covers both ranges,
++		 * it will decompress extent X into the pages belonging to the
++		 * first range and then it will stop, zeroing out the remaining
++		 * pages that belong to the other range that points to extent X.
++		 * So here we make sure we submit 2 bios, one for the first
++		 * range and another one for the third range. Both will target
++		 * the same physical extent from disk, but we can't currently
++		 * make the compressed bio endio callback populate the pages
++		 * for both ranges because each compressed bio is tightly
++		 * coupled with a single extent map, and each range can have
++		 * an extent map with a different offset value relative to the
++		 * uncompressed data of our extent and different lengths. This
++		 * is a corner case so we prioritize correctness over
++		 * non-optimal behavior (submitting 2 bios for the same extent).
++		 */
++		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
++		    prev_em_start && *prev_em_start != (u64)-1 &&
++		    *prev_em_start != em->orig_start)
++			force_bio_submit = true;
++
++		if (prev_em_start)
++			*prev_em_start = em->orig_start;
++
+ 		free_extent_map(em);
+ 		em = NULL;
+ 
+@@ -3053,7 +3100,8 @@ static int __do_readpage(struct extent_io_tree *tree,
+ 					 bdev, bio, pnr,
+ 					 end_bio_extent_readpage, mirror_num,
+ 					 *bio_flags,
+-					 this_bio_flag);
++					 this_bio_flag,
++					 force_bio_submit);
+ 		if (!ret) {
+ 			nr++;
+ 			*bio_flags = this_bio_flag;
+@@ -3080,7 +3128,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
+ 					     get_extent_t *get_extent,
+ 					     struct extent_map **em_cached,
+ 					     struct bio **bio, int mirror_num,
+-					     unsigned long *bio_flags, int rw)
++					     unsigned long *bio_flags, int rw,
++					     u64 *prev_em_start)
+ {
+ 	struct inode *inode;
+ 	struct btrfs_ordered_extent *ordered;
+@@ -3100,7 +3149,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
+ 
+ 	for (index = 0; index < nr_pages; index++) {
+ 		__do_readpage(tree, pages[index], get_extent, em_cached, bio,
+-			      mirror_num, bio_flags, rw);
++			      mirror_num, bio_flags, rw, prev_em_start);
+ 		page_cache_release(pages[index]);
+ 	}
+ }
+@@ -3110,7 +3159,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
+ 			       int nr_pages, get_extent_t *get_extent,
+ 			       struct extent_map **em_cached,
+ 			       struct bio **bio, int mirror_num,
+-			       unsigned long *bio_flags, int rw)
++			       unsigned long *bio_flags, int rw,
++			       u64 *prev_em_start)
+ {
+ 	u64 start = 0;
+ 	u64 end = 0;
+@@ -3131,7 +3181,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
+ 						  index - first_index, start,
+ 						  end, get_extent, em_cached,
+ 						  bio, mirror_num, bio_flags,
+-						  rw);
++						  rw, prev_em_start);
+ 			start = page_start;
+ 			end = start + PAGE_CACHE_SIZE - 1;
+ 			first_index = index;
+@@ -3142,7 +3192,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
+ 		__do_contiguous_readpages(tree, &pages[first_index],
+ 					  index - first_index, start,
+ 					  end, get_extent, em_cached, bio,
+-					  mirror_num, bio_flags, rw);
++					  mirror_num, bio_flags, rw,
++					  prev_em_start);
+ }
+ 
+ static int __extent_read_full_page(struct extent_io_tree *tree,
+@@ -3168,7 +3219,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
+ 	}
+ 
+ 	ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
+-			    bio_flags, rw);
++			    bio_flags, rw, NULL);
+ 	return ret;
+ }
+ 
+@@ -3194,7 +3245,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
+ 	int ret;
+ 
+ 	ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
+-				      &bio_flags, READ);
++			    &bio_flags, READ, NULL);
+ 	if (bio)
+ 		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
+ 	return ret;
+@@ -3447,7 +3498,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
+ 						 sector, iosize, pg_offset,
+ 						 bdev, &epd->bio, max_nr,
+ 						 end_bio_extent_writepage,
+-						 0, 0, 0);
++						 0, 0, 0, false);
+ 			if (ret)
+ 				SetPageError(page);
+ 		}
+@@ -3749,7 +3800,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
+ 		ret = submit_extent_page(rw, tree, p, offset >> 9,
+ 					 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
+ 					 -1, end_bio_extent_buffer_writepage,
+-					 0, epd->bio_flags, bio_flags);
++					 0, epd->bio_flags, bio_flags, false);
+ 		epd->bio_flags = bio_flags;
+ 		if (ret) {
+ 			set_btree_ioerr(p);
+@@ -4153,6 +4204,7 @@ int extent_readpages(struct extent_io_tree *tree,
+ 	struct page *page;
+ 	struct extent_map *em_cached = NULL;
+ 	int nr = 0;
++	u64 prev_em_start = (u64)-1;
+ 
+ 	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
+ 		page = list_entry(pages->prev, struct page, lru);
+@@ -4169,12 +4221,12 @@ int extent_readpages(struct extent_io_tree *tree,
+ 		if (nr < ARRAY_SIZE(pagepool))
+ 			continue;
+ 		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
+-				   &bio, 0, &bio_flags, READ);
++				   &bio, 0, &bio_flags, READ, &prev_em_start);
+ 		nr = 0;
+ 	}
+ 	if (nr)
+ 		__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
+-				   &bio, 0, &bio_flags, READ);
++				   &bio, 0, &bio_flags, READ, &prev_em_start);
+ 
+ 	if (em_cached)
+ 		free_extent_map(em_cached);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 8bb013672aee..e3b39f0c4666 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5035,7 +5035,8 @@ void btrfs_evict_inode(struct inode *inode)
+ 		goto no_delete;
+ 	}
+ 	/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
+-	btrfs_wait_ordered_range(inode, 0, (u64)-1);
++	if (!special_file(inode->i_mode))
++		btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ 
+ 	btrfs_free_io_failure_record(inode, 0, (u64)-1);
+ 
+diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
+index aa0dc2573374..afa09fce8151 100644
+--- a/fs/cifs/cifsencrypt.c
++++ b/fs/cifs/cifsencrypt.c
+@@ -444,6 +444,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	return 0;
+ }
+ 
++/* Server has provided av pairs/target info in the type 2 challenge
++ * packet and we have plucked it and stored within smb session.
++ * We parse that blob here to find the server given timestamp
++ * as part of ntlmv2 authentication (or local current time as
++ * default in case of failure)
++ */
++static __le64
++find_timestamp(struct cifs_ses *ses)
++{
++	unsigned int attrsize;
++	unsigned int type;
++	unsigned int onesize = sizeof(struct ntlmssp2_name);
++	unsigned char *blobptr;
++	unsigned char *blobend;
++	struct ntlmssp2_name *attrptr;
++
++	if (!ses->auth_key.len || !ses->auth_key.response)
++		return 0;
++
++	blobptr = ses->auth_key.response;
++	blobend = blobptr + ses->auth_key.len;
++
++	while (blobptr + onesize < blobend) {
++		attrptr = (struct ntlmssp2_name *) blobptr;
++		type = le16_to_cpu(attrptr->type);
++		if (type == NTLMSSP_AV_EOL)
++			break;
++		blobptr += 2; /* advance attr type */
++		attrsize = le16_to_cpu(attrptr->length);
++		blobptr += 2; /* advance attr size */
++		if (blobptr + attrsize > blobend)
++			break;
++		if (type == NTLMSSP_AV_TIMESTAMP) {
++			if (attrsize == sizeof(u64))
++				return *((__le64 *)blobptr);
++		}
++		blobptr += attrsize; /* advance attr value */
++	}
++
++	return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
++}
++
+ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
+ 			    const struct nls_table *nls_cp)
+ {
+@@ -641,6 +683,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 	struct ntlmv2_resp *ntlmv2;
+ 	char ntlmv2_hash[16];
+ 	unsigned char *tiblob = NULL; /* target info blob */
++	__le64 rsp_timestamp;
+ 
+ 	if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
+ 		if (!ses->domainName) {
+@@ -659,6 +702,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 		}
+ 	}
+ 
++	/* Must be within 5 minutes of the server (or in range +/-2h
++	 * in case of Mac OS X), so simply carry over server timestamp
++	 * (as Windows 7 does)
++	 */
++	rsp_timestamp = find_timestamp(ses);
++
+ 	baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
+ 	tilen = ses->auth_key.len;
+ 	tiblob = ses->auth_key.response;
+@@ -675,8 +724,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
+ 			(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
+ 	ntlmv2->blob_signature = cpu_to_le32(0x00000101);
+ 	ntlmv2->reserved = 0;
+-	/* Must be within 5 minutes of the server */
+-	ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
++	ntlmv2->time = rsp_timestamp;
++
+ 	get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
+ 	ntlmv2->reserved2 = 0;
+ 
+diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
+index f621b44cb800..6b66dd5d1540 100644
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
+ 	struct tcon_link *tlink = NULL;
+ 	struct cifs_tcon *tcon = NULL;
+ 	struct TCP_Server_Info *server;
+-	struct cifs_io_parms io_parms;
+ 
+ 	/*
+ 	 * To avoid spurious oplock breaks from server, in the case of
+@@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
+ 			rc = -ENOSYS;
+ 		cifsFileInfo_put(open_file);
+ 		cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
+-		if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
+-			unsigned int bytes_written;
+-
+-			io_parms.netfid = open_file->fid.netfid;
+-			io_parms.pid = open_file->pid;
+-			io_parms.tcon = tcon;
+-			io_parms.offset = 0;
+-			io_parms.length = attrs->ia_size;
+-			rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
+-					  NULL, NULL, 1);
+-			cifs_dbg(FYI, "Wrt seteof rc %d\n", rc);
+-		}
+ 	} else
+ 		rc = -EINVAL;
+ 
+@@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
+ 	else
+ 		rc = -ENOSYS;
+ 	cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
+-	if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
+-		__u16 netfid;
+-		int oplock = 0;
+ 
+-		rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN,
+-				   GENERIC_WRITE, CREATE_NOT_DIR, &netfid,
+-				   &oplock, NULL, cifs_sb->local_nls,
+-				   cifs_remap(cifs_sb));
+-		if (rc == 0) {
+-			unsigned int bytes_written;
+-
+-			io_parms.netfid = netfid;
+-			io_parms.pid = current->tgid;
+-			io_parms.tcon = tcon;
+-			io_parms.offset = 0;
+-			io_parms.length = attrs->ia_size;
+-			rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL,
+-					  NULL,  1);
+-			cifs_dbg(FYI, "wrt seteof rc %d\n", rc);
+-			CIFSSMBClose(xid, tcon, netfid);
+-		}
+-	}
+ 	if (tlink)
+ 		cifs_put_tlink(tlink);
+ 
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 54daee5ad4c1..1678b9cb94c7 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -50,9 +50,13 @@ change_conf(struct TCP_Server_Info *server)
+ 		break;
+ 	default:
+ 		server->echoes = true;
+-		server->oplocks = true;
++		if (enable_oplocks) {
++			server->oplocks = true;
++			server->oplock_credits = 1;
++		} else
++			server->oplocks = false;
++
+ 		server->echo_credits = 1;
+-		server->oplock_credits = 1;
+ 	}
+ 	server->credits -= server->echo_credits + server->oplock_credits;
+ 	return 0;
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 54cbe19d9c08..894f259d3989 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -46,6 +46,7 @@
+ #include "smb2status.h"
+ #include "smb2glob.h"
+ #include "cifspdu.h"
++#include "cifs_spnego.h"
+ 
+ /*
+  *  The following table defines the expected "StructureSize" of SMB2 requests
+@@ -427,19 +428,15 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+ 		cifs_dbg(FYI, "missing security blob on negprot\n");
+ 
+ 	rc = cifs_enable_signing(server, ses->sign);
+-#ifdef CONFIG_SMB2_ASN1  /* BB REMOVEME when updated asn1.c ready */
+ 	if (rc)
+ 		goto neg_exit;
+-	if (blob_length)
++	if (blob_length) {
+ 		rc = decode_negTokenInit(security_blob, blob_length, server);
+-	if (rc == 1)
+-		rc = 0;
+-	else if (rc == 0) {
+-		rc = -EIO;
+-		goto neg_exit;
++		if (rc == 1)
++			rc = 0;
++		else if (rc == 0)
++			rc = -EIO;
+ 	}
+-#endif
+-
+ neg_exit:
+ 	free_rsp_buf(resp_buftype, rsp);
+ 	return rc;
+@@ -533,7 +530,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ 	__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
+ 	struct TCP_Server_Info *server = ses->server;
+ 	u16 blob_length = 0;
+-	char *security_blob;
++	struct key *spnego_key = NULL;
++	char *security_blob = NULL;
+ 	char *ntlmssp_blob = NULL;
+ 	bool use_spnego = false; /* else use raw ntlmssp */
+ 
+@@ -561,7 +559,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ 	ses->ntlmssp->sesskey_per_smbsess = true;
+ 
+ 	/* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
+-	ses->sectype = RawNTLMSSP;
++	if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
++		ses->sectype = RawNTLMSSP;
+ 
+ ssetup_ntlmssp_authenticate:
+ 	if (phase == NtLmChallenge)
+@@ -590,7 +589,48 @@ ssetup_ntlmssp_authenticate:
+ 	iov[0].iov_base = (char *)req;
+ 	/* 4 for rfc1002 length field and 1 for pad */
+ 	iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
+-	if (phase == NtLmNegotiate) {
++
++	if (ses->sectype == Kerberos) {
++#ifdef CONFIG_CIFS_UPCALL
++		struct cifs_spnego_msg *msg;
++
++		spnego_key = cifs_get_spnego_key(ses);
++		if (IS_ERR(spnego_key)) {
++			rc = PTR_ERR(spnego_key);
++			spnego_key = NULL;
++			goto ssetup_exit;
++		}
++
++		msg = spnego_key->payload.data;
++		/*
++		 * check version field to make sure that cifs.upcall is
++		 * sending us a response in an expected form
++		 */
++		if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
++			cifs_dbg(VFS,
++				  "bad cifs.upcall version. Expected %d got %d",
++				  CIFS_SPNEGO_UPCALL_VERSION, msg->version);
++			rc = -EKEYREJECTED;
++			goto ssetup_exit;
++		}
++		ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
++						 GFP_KERNEL);
++		if (!ses->auth_key.response) {
++			cifs_dbg(VFS,
++				"Kerberos can't allocate (%u bytes) memory",
++				msg->sesskey_len);
++			rc = -ENOMEM;
++			goto ssetup_exit;
++		}
++		ses->auth_key.len = msg->sesskey_len;
++		blob_length = msg->secblob_len;
++		iov[1].iov_base = msg->data + msg->sesskey_len;
++		iov[1].iov_len = blob_length;
++#else
++		rc = -EOPNOTSUPP;
++		goto ssetup_exit;
++#endif /* CONFIG_CIFS_UPCALL */
++	} else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
+ 		ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
+ 				       GFP_KERNEL);
+ 		if (ntlmssp_blob == NULL) {
+@@ -613,6 +653,8 @@ ssetup_ntlmssp_authenticate:
+ 			/* with raw NTLMSSP we don't encapsulate in SPNEGO */
+ 			security_blob = ntlmssp_blob;
+ 		}
++		iov[1].iov_base = security_blob;
++		iov[1].iov_len = blob_length;
+ 	} else if (phase == NtLmAuthenticate) {
+ 		req->hdr.SessionId = ses->Suid;
+ 		ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
+@@ -640,6 +682,8 @@ ssetup_ntlmssp_authenticate:
+ 		} else {
+ 			security_blob = ntlmssp_blob;
+ 		}
++		iov[1].iov_base = security_blob;
++		iov[1].iov_len = blob_length;
+ 	} else {
+ 		cifs_dbg(VFS, "illegal ntlmssp phase\n");
+ 		rc = -EIO;
+@@ -651,8 +695,6 @@ ssetup_ntlmssp_authenticate:
+ 				cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
+ 					    1 /* pad */ - 4 /* rfc1001 len */);
+ 	req->SecurityBufferLength = cpu_to_le16(blob_length);
+-	iov[1].iov_base = security_blob;
+-	iov[1].iov_len = blob_length;
+ 
+ 	inc_rfc1001_len(req, blob_length - 1 /* pad */);
+ 
+@@ -663,6 +705,7 @@ ssetup_ntlmssp_authenticate:
+ 
+ 	kfree(security_blob);
+ 	rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
++	ses->Suid = rsp->hdr.SessionId;
+ 	if (resp_buftype != CIFS_NO_BUFFER &&
+ 	    rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
+ 		if (phase != NtLmNegotiate) {
+@@ -680,7 +723,6 @@ ssetup_ntlmssp_authenticate:
+ 		/* NTLMSSP Negotiate sent now processing challenge (response) */
+ 		phase = NtLmChallenge; /* process ntlmssp challenge */
+ 		rc = 0; /* MORE_PROCESSING is not an error here but expected */
+-		ses->Suid = rsp->hdr.SessionId;
+ 		rc = decode_ntlmssp_challenge(rsp->Buffer,
+ 				le16_to_cpu(rsp->SecurityBufferLength), ses);
+ 	}
+@@ -737,6 +779,10 @@ keygen_exit:
+ 		kfree(ses->auth_key.response);
+ 		ses->auth_key.response = NULL;
+ 	}
++	if (spnego_key) {
++		key_invalidate(spnego_key);
++		key_put(spnego_key);
++	}
+ 	kfree(ses->ntlmssp);
+ 
+ 	return rc;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 5d03eb0ec0ac..0046ab7d4f3d 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -1676,7 +1676,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
+ 				DCACHE_OP_COMPARE	|
+ 				DCACHE_OP_REVALIDATE	|
+ 				DCACHE_OP_WEAK_REVALIDATE	|
+-				DCACHE_OP_DELETE ));
++				DCACHE_OP_DELETE	|
++				DCACHE_OP_SELECT_INODE));
+ 	dentry->d_op = op;
+ 	if (!op)
+ 		return;
+@@ -1692,6 +1693,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
+ 		dentry->d_flags |= DCACHE_OP_DELETE;
+ 	if (op->d_prune)
+ 		dentry->d_flags |= DCACHE_OP_PRUNE;
++	if (op->d_select_inode)
++		dentry->d_flags |= DCACHE_OP_SELECT_INODE;
+ 
+ }
+ EXPORT_SYMBOL(d_set_d_op);
+@@ -2923,6 +2926,13 @@ restart:
+ 
+ 		if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
+ 			struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
++			/* Escaped? */
++			if (dentry != vfsmnt->mnt_root) {
++				bptr = *buffer;
++				blen = *buflen;
++				error = 3;
++				break;
++			}
+ 			/* Global root? */
+ 			if (mnt != parent) {
+ 				dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
+diff --git a/fs/internal.h b/fs/internal.h
+index 01dce1d1476b..4d5af583ab03 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -107,6 +107,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
+ extern long do_handle_open(int mountdirfd,
+ 			   struct file_handle __user *ufh, int open_flag);
+ extern int open_check_o_direct(struct file *f);
++extern int vfs_open(const struct path *, struct file *, const struct cred *);
+ 
+ /*
+  * inode.c
+diff --git a/fs/namei.c b/fs/namei.c
+index fe30d3be43a8..ccd7f98d85b9 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -505,6 +505,24 @@ struct nameidata {
+ 	char *saved_names[MAX_NESTED_LINKS + 1];
+ };
+ 
++/**
++ * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
++ * @path: nameidate to verify
++ *
++ * Rename can sometimes move a file or directory outside of a bind
++ * mount, path_connected allows those cases to be detected.
++ */
++static bool path_connected(const struct path *path)
++{
++	struct vfsmount *mnt = path->mnt;
++
++	/* Only bind mounts can have disconnected paths */
++	if (mnt->mnt_root == mnt->mnt_sb->s_root)
++		return true;
++
++	return is_subdir(path->dentry, mnt->mnt_root);
++}
++
+ /*
+  * Path walking has 2 modes, rcu-walk and ref-walk (see
+  * Documentation/filesystems/path-lookup.txt).  In situations when we can't
+@@ -1194,6 +1212,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
+ 				goto failed;
+ 			nd->path.dentry = parent;
+ 			nd->seq = seq;
++			if (unlikely(!path_connected(&nd->path)))
++				goto failed;
+ 			break;
+ 		}
+ 		if (!follow_up_rcu(&nd->path))
+@@ -1290,7 +1310,7 @@ static void follow_mount(struct path *path)
+ 	}
+ }
+ 
+-static void follow_dotdot(struct nameidata *nd)
++static int follow_dotdot(struct nameidata *nd)
+ {
+ 	if (!nd->root.mnt)
+ 		set_root(nd);
+@@ -1306,6 +1326,10 @@ static void follow_dotdot(struct nameidata *nd)
+ 			/* rare case of legitimate dget_parent()... */
+ 			nd->path.dentry = dget_parent(nd->path.dentry);
+ 			dput(old);
++			if (unlikely(!path_connected(&nd->path))) {
++				path_put(&nd->path);
++				return -ENOENT;
++			}
+ 			break;
+ 		}
+ 		if (!follow_up(&nd->path))
+@@ -1313,6 +1337,7 @@ static void follow_dotdot(struct nameidata *nd)
+ 	}
+ 	follow_mount(&nd->path);
+ 	nd->inode = nd->path.dentry->d_inode;
++	return 0;
+ }
+ 
+ /*
+@@ -1428,8 +1453,6 @@ static int lookup_fast(struct nameidata *nd,
+ 		negative = d_is_negative(dentry);
+ 		if (read_seqcount_retry(&dentry->d_seq, seq))
+ 			return -ECHILD;
+-		if (negative)
+-			return -ENOENT;
+ 
+ 		/*
+ 		 * This sequence count validates that the parent had no
+@@ -1450,6 +1473,12 @@ static int lookup_fast(struct nameidata *nd,
+ 				goto unlazy;
+ 			}
+ 		}
++		/*
++		 * Note: do negative dentry check after revalidation in
++		 * case that drops it.
++		 */
++		if (negative)
++			return -ENOENT;
+ 		path->mnt = mnt;
+ 		path->dentry = dentry;
+ 		if (likely(__follow_mount_rcu(nd, path, inode)))
+@@ -1541,7 +1570,7 @@ static inline int handle_dots(struct nameidata *nd, int type)
+ 			if (follow_dotdot_rcu(nd))
+ 				return -ECHILD;
+ 		} else
+-			follow_dotdot(nd);
++			return follow_dotdot(nd);
+ 	}
+ 	return 0;
+ }
+@@ -2290,7 +2319,7 @@ mountpoint_last(struct nameidata *nd, struct path *path)
+ 	if (unlikely(nd->last_type != LAST_NORM)) {
+ 		error = handle_dots(nd, nd->last_type);
+ 		if (error)
+-			goto out;
++			return error;
+ 		dentry = dget(nd->path.dentry);
+ 		goto done;
+ 	}
+diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
+index a46bf6de9ce4..fb1fb2774d34 100644
+--- a/fs/nfs/filelayout/filelayout.c
++++ b/fs/nfs/filelayout/filelayout.c
+@@ -628,23 +628,18 @@ out_put:
+ 	goto out;
+ }
+ 
+-static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl)
++static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
+ {
+ 	int i;
+ 
+-	for (i = 0; i < fl->num_fh; i++) {
+-		if (!fl->fh_array[i])
+-			break;
+-		kfree(fl->fh_array[i]);
++	if (fl->fh_array) {
++		for (i = 0; i < fl->num_fh; i++) {
++			if (!fl->fh_array[i])
++				break;
++			kfree(fl->fh_array[i]);
++		}
++		kfree(fl->fh_array);
+ 	}
+-	kfree(fl->fh_array);
+-	fl->fh_array = NULL;
+-}
+-
+-static void
+-_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
+-{
+-	filelayout_free_fh_array(fl);
+ 	kfree(fl);
+ }
+ 
+@@ -715,21 +710,21 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
+ 		/* Do we want to use a mempool here? */
+ 		fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
+ 		if (!fl->fh_array[i])
+-			goto out_err_free;
++			goto out_err;
+ 
+ 		p = xdr_inline_decode(&stream, 4);
+ 		if (unlikely(!p))
+-			goto out_err_free;
++			goto out_err;
+ 		fl->fh_array[i]->size = be32_to_cpup(p++);
+ 		if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
+ 			printk(KERN_ERR "NFS: Too big fh %d received %d\n",
+ 			       i, fl->fh_array[i]->size);
+-			goto out_err_free;
++			goto out_err;
+ 		}
+ 
+ 		p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
+ 		if (unlikely(!p))
+-			goto out_err_free;
++			goto out_err;
+ 		memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
+ 		dprintk("DEBUG: %s: fh len %d\n", __func__,
+ 			fl->fh_array[i]->size);
+@@ -738,8 +733,6 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
+ 	__free_page(scratch);
+ 	return 0;
+ 
+-out_err_free:
+-	filelayout_free_fh_array(fl);
+ out_err:
+ 	__free_page(scratch);
+ 	return -EIO;
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 069914ce7641..93d355c8b467 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -508,7 +508,7 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
+ 	 * for it without upsetting the slab allocator.
+ 	 */
+ 	if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
+-			sizeof(struct page) > PAGE_SIZE)
++			sizeof(struct page *) > PAGE_SIZE)
+ 		return 0;
+ 
+ 	return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
+diff --git a/fs/nfs/read.c b/fs/nfs/read.c
+index ae0ff7a11b40..01b8cc8e8cfc 100644
+--- a/fs/nfs/read.c
++++ b/fs/nfs/read.c
+@@ -72,6 +72,9 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
+ {
+ 	struct nfs_pgio_mirror *mirror;
+ 
++	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
++		pgio->pg_ops->pg_cleanup(pgio);
++
+ 	pgio->pg_ops = &nfs_pgio_rw_ops;
+ 
+ 	/* read path should never have more than one mirror */
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 07115b9b1ad2..d9851a6a2813 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1203,7 +1203,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
+ 		return 1;
+ 	if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
+ 		       list_empty_careful(&flctx->flc_posix)))
+-		return 0;
++		return 1;
+ 
+ 	/* Check to see if there are whole file write locks */
+ 	ret = 0;
+@@ -1331,6 +1331,9 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
+ {
+ 	struct nfs_pgio_mirror *mirror;
+ 
++	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
++		pgio->pg_ops->pg_cleanup(pgio);
++
+ 	pgio->pg_ops = &nfs_pgio_rw_ops;
+ 
+ 	nfs_pageio_stop_mirroring(pgio);
+diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
+index fdf4b41d0609..482cfd34472d 100644
+--- a/fs/ocfs2/dlm/dlmmaster.c
++++ b/fs/ocfs2/dlm/dlmmaster.c
+@@ -1439,6 +1439,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
+ 	int found, ret;
+ 	int set_maybe;
+ 	int dispatch_assert = 0;
++	int dispatched = 0;
+ 
+ 	if (!dlm_grab(dlm))
+ 		return DLM_MASTER_RESP_NO;
+@@ -1658,15 +1659,18 @@ send_response:
+ 			mlog(ML_ERROR, "failed to dispatch assert master work\n");
+ 			response = DLM_MASTER_RESP_ERROR;
+ 			dlm_lockres_put(res);
+-		} else
++		} else {
++			dispatched = 1;
+ 			__dlm_lockres_grab_inflight_worker(dlm, res);
++		}
+ 		spin_unlock(&res->spinlock);
+ 	} else {
+ 		if (res)
+ 			dlm_lockres_put(res);
+ 	}
+ 
+-	dlm_put(dlm);
++	if (!dispatched)
++		dlm_put(dlm);
+ 	return response;
+ }
+ 
+@@ -2090,7 +2094,6 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
+ 
+ 
+ 	/* queue up work for dlm_assert_master_worker */
+-	dlm_grab(dlm);  /* get an extra ref for the work item */
+ 	dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
+ 	item->u.am.lockres = res; /* already have a ref */
+ 	/* can optionally ignore node numbers higher than this node */
+diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
+index ce12e0b1a31f..3d90ad7ff91f 100644
+--- a/fs/ocfs2/dlm/dlmrecovery.c
++++ b/fs/ocfs2/dlm/dlmrecovery.c
+@@ -1694,6 +1694,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
+ 	unsigned int hash;
+ 	int master = DLM_LOCK_RES_OWNER_UNKNOWN;
+ 	u32 flags = DLM_ASSERT_MASTER_REQUERY;
++	int dispatched = 0;
+ 
+ 	if (!dlm_grab(dlm)) {
+ 		/* since the domain has gone away on this
+@@ -1719,8 +1720,10 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
+ 				dlm_put(dlm);
+ 				/* sender will take care of this and retry */
+ 				return ret;
+-			} else
++			} else {
++				dispatched = 1;
+ 				__dlm_lockres_grab_inflight_worker(dlm, res);
++			}
+ 			spin_unlock(&res->spinlock);
+ 		} else {
+ 			/* put.. incase we are not the master */
+@@ -1730,7 +1733,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
+ 	}
+ 	spin_unlock(&dlm->spinlock);
+ 
+-	dlm_put(dlm);
++	if (!dispatched)
++		dlm_put(dlm);
+ 	return master;
+ }
+ 
+diff --git a/fs/open.c b/fs/open.c
+index 98e5a52dc68c..f9d2bf935099 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -678,18 +678,18 @@ int open_check_o_direct(struct file *f)
+ }
+ 
+ static int do_dentry_open(struct file *f,
++			  struct inode *inode,
+ 			  int (*open)(struct inode *, struct file *),
+ 			  const struct cred *cred)
+ {
+ 	static const struct file_operations empty_fops = {};
+-	struct inode *inode;
+ 	int error;
+ 
+ 	f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
+ 				FMODE_PREAD | FMODE_PWRITE;
+ 
+ 	path_get(&f->f_path);
+-	inode = f->f_inode = f->f_path.dentry->d_inode;
++	f->f_inode = inode;
+ 	f->f_mapping = inode->i_mapping;
+ 
+ 	if (unlikely(f->f_flags & O_PATH)) {
+@@ -793,7 +793,8 @@ int finish_open(struct file *file, struct dentry *dentry,
+ 	BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
+ 
+ 	file->f_path.dentry = dentry;
+-	error = do_dentry_open(file, open, current_cred());
++	error = do_dentry_open(file, d_backing_inode(dentry), open,
++			       current_cred());
+ 	if (!error)
+ 		*opened |= FILE_OPENED;
+ 
+@@ -822,6 +823,28 @@ int finish_no_open(struct file *file, struct dentry *dentry)
+ }
+ EXPORT_SYMBOL(finish_no_open);
+ 
++/**
++ * vfs_open - open the file at the given path
++ * @path: path to open
++ * @file: newly allocated file with f_flag initialized
++ * @cred: credentials to use
++ */
++int vfs_open(const struct path *path, struct file *file,
++	     const struct cred *cred)
++{
++	struct dentry *dentry = path->dentry;
++	struct inode *inode = dentry->d_inode;
++
++	file->f_path = *path;
++	if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
++		inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
++		if (IS_ERR(inode))
++			return PTR_ERR(inode);
++	}
++
++	return do_dentry_open(file, inode, NULL, cred);
++}
++
+ struct file *dentry_open(const struct path *path, int flags,
+ 			 const struct cred *cred)
+ {
+@@ -853,26 +876,6 @@ struct file *dentry_open(const struct path *path, int flags,
+ }
+ EXPORT_SYMBOL(dentry_open);
+ 
+-/**
+- * vfs_open - open the file at the given path
+- * @path: path to open
+- * @filp: newly allocated file with f_flag initialized
+- * @cred: credentials to use
+- */
+-int vfs_open(const struct path *path, struct file *filp,
+-	     const struct cred *cred)
+-{
+-	struct inode *inode = path->dentry->d_inode;
+-
+-	if (inode->i_op->dentry_open)
+-		return inode->i_op->dentry_open(path->dentry, filp, cred);
+-	else {
+-		filp->f_path = *path;
+-		return do_dentry_open(filp, NULL, cred);
+-	}
+-}
+-EXPORT_SYMBOL(vfs_open);
+-
+ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
+ {
+ 	int lookup_flags = 0;
+diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
+index 04f124884687..ba0db2638946 100644
+--- a/fs/overlayfs/inode.c
++++ b/fs/overlayfs/inode.c
+@@ -336,37 +336,33 @@ static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
+ 	return true;
+ }
+ 
+-static int ovl_dentry_open(struct dentry *dentry, struct file *file,
+-		    const struct cred *cred)
++struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
+ {
+ 	int err;
+ 	struct path realpath;
+ 	enum ovl_path_type type;
+-	bool want_write = false;
++
++	if (d_is_dir(dentry))
++		return d_backing_inode(dentry);
+ 
+ 	type = ovl_path_real(dentry, &realpath);
+-	if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) {
+-		want_write = true;
++	if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
+ 		err = ovl_want_write(dentry);
+ 		if (err)
+-			goto out;
++			return ERR_PTR(err);
+ 
+-		if (file->f_flags & O_TRUNC)
++		if (file_flags & O_TRUNC)
+ 			err = ovl_copy_up_last(dentry, NULL, true);
+ 		else
+ 			err = ovl_copy_up(dentry);
++		ovl_drop_write(dentry);
+ 		if (err)
+-			goto out_drop_write;
++			return ERR_PTR(err);
+ 
+ 		ovl_path_upper(dentry, &realpath);
+ 	}
+ 
+-	err = vfs_open(&realpath, file, cred);
+-out_drop_write:
+-	if (want_write)
+-		ovl_drop_write(dentry);
+-out:
+-	return err;
++	return d_backing_inode(realpath.dentry);
+ }
+ 
+ static const struct inode_operations ovl_file_inode_operations = {
+@@ -377,7 +373,6 @@ static const struct inode_operations ovl_file_inode_operations = {
+ 	.getxattr	= ovl_getxattr,
+ 	.listxattr	= ovl_listxattr,
+ 	.removexattr	= ovl_removexattr,
+-	.dentry_open	= ovl_dentry_open,
+ };
+ 
+ static const struct inode_operations ovl_symlink_inode_operations = {
+diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
+index 17ac5afc9ffb..ea5a40b06e3a 100644
+--- a/fs/overlayfs/overlayfs.h
++++ b/fs/overlayfs/overlayfs.h
+@@ -173,6 +173,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
+ 		     void *value, size_t size);
+ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
+ int ovl_removexattr(struct dentry *dentry, const char *name);
++struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags);
+ 
+ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
+ 			    struct ovl_entry *oe);
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index 155989455a72..33f2d27a6792 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -275,6 +275,7 @@ static void ovl_dentry_release(struct dentry *dentry)
+ 
+ static const struct dentry_operations ovl_dentry_operations = {
+ 	.d_release = ovl_dentry_release,
++	.d_select_inode = ovl_d_select_inode,
+ };
+ 
+ static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
+diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
+index 96f3448b6eb4..fd65b3f1923c 100644
+--- a/fs/ubifs/xattr.c
++++ b/fs/ubifs/xattr.c
+@@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
+ {
+ 	int err;
+ 
+-	mutex_lock(&inode->i_mutex);
+ 	err = security_inode_init_security(inode, dentry, qstr,
+ 					   &init_xattrs, 0);
+-	mutex_unlock(&inode->i_mutex);
+-
+ 	if (err) {
+ 		struct ubifs_info *c = dentry->i_sb->s_fs_info;
+ 		ubifs_err(c, "cannot initialize security for inode %lu, error %d",
+diff --git a/include/linux/dcache.h b/include/linux/dcache.h
+index df334cbacc6d..167ec0934049 100644
+--- a/include/linux/dcache.h
++++ b/include/linux/dcache.h
+@@ -160,6 +160,7 @@ struct dentry_operations {
+ 	char *(*d_dname)(struct dentry *, char *, int);
+ 	struct vfsmount *(*d_automount)(struct path *);
+ 	int (*d_manage)(struct dentry *, bool);
++	struct inode *(*d_select_inode)(struct dentry *, unsigned);
+ } ____cacheline_aligned;
+ 
+ /*
+@@ -225,6 +226,7 @@ struct dentry_operations {
+ 
+ #define DCACHE_MAY_FREE			0x00800000
+ #define DCACHE_FALLTHRU			0x01000000 /* Fall through to lower layer */
++#define DCACHE_OP_SELECT_INODE		0x02000000 /* Unioned entry: dcache op selects inode */
+ 
+ extern seqlock_t rename_lock;
+ 
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 571aab91bfc0..f93192333b37 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1641,7 +1641,6 @@ struct inode_operations {
+ 	int (*set_acl)(struct inode *, struct posix_acl *, int);
+ 
+ 	/* WARNING: probably going away soon, do not use! */
+-	int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
+ } ____cacheline_aligned;
+ 
+ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
+@@ -2193,7 +2192,6 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
+ extern struct file *filp_open(const char *, int, umode_t);
+ extern struct file *file_open_root(struct dentry *, struct vfsmount *,
+ 				   const char *, int);
+-extern int vfs_open(const struct path *, struct file *, const struct cred *);
+ extern struct file * dentry_open(const struct path *, int, const struct cred *);
+ extern int filp_close(struct file *, fl_owner_t id);
+ 
+diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
+index de722d4e9d61..258daf914c6d 100644
+--- a/include/linux/mmc/core.h
++++ b/include/linux/mmc/core.h
+@@ -121,6 +121,7 @@ struct mmc_data {
+ 	struct mmc_request	*mrq;		/* associated request */
+ 
+ 	unsigned int		sg_len;		/* size of scatter list */
++	int			sg_count;	/* mapped sg entries */
+ 	struct scatterlist	*sg;		/* I/O scatter list */
+ 	s32			host_cookie;	/* host private data */
+ };
+diff --git a/include/linux/security.h b/include/linux/security.h
+index 18264ea9e314..5d45b4fd91d2 100644
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -2527,7 +2527,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
+ 				      unsigned long arg4,
+ 				      unsigned long arg5)
+ {
+-	return cap_task_prctl(option, arg2, arg3, arg3, arg5);
++	return cap_task_prctl(option, arg2, arg3, arg4, arg5);
+ }
+ 
+ static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
+diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
+index d81d584157e1..e8635854a55b 100644
+--- a/include/net/netfilter/nf_queue.h
++++ b/include/net/netfilter/nf_queue.h
+@@ -24,6 +24,8 @@ struct nf_queue_entry {
+ struct nf_queue_handler {
+ 	int			(*outfn)(struct nf_queue_entry *entry,
+ 					 unsigned int queuenum);
++	void			(*nf_hook_drop)(struct net *net,
++						struct nf_hook_ops *ops);
+ };
+ 
+ void nf_register_queue_handler(const struct nf_queue_handler *qh);
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index e6bcf55dcf20..fd0ca42b1d63 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -125,7 +125,7 @@ static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
+ 
+ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
+ {
+-	return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
++	return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
+ }
+ 
+ unsigned int nft_parse_register(const struct nlattr *attr);
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
+index 73abbc54063d..7bd03f867fca 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -787,7 +787,6 @@ struct iscsi_np {
+ 	enum iscsi_timer_flags_table np_login_timer_flags;
+ 	u32			np_exports;
+ 	enum np_flags_table	np_flags;
+-	unsigned char		np_ip[IPV6_ADDRESS_SPACE];
+ 	u16			np_port;
+ 	spinlock_t		np_thread_lock;
+ 	struct completion	np_restart_comp;
+diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
+index 9ce083960a25..f18490985fc8 100644
+--- a/include/xen/interface/sched.h
++++ b/include/xen/interface/sched.h
+@@ -107,5 +107,13 @@ struct sched_watchdog {
+ #define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
+ #define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
+ #define SHUTDOWN_watchdog   4  /* Restart because watchdog time expired.     */
++/*
++ * Domain asked to perform 'soft reset' for it. The expected behavior is to
++ * reset internal Xen state for the domain returning it to the point where it
++ * was created but leaving the domain's memory contents and vCPU contexts
++ * intact. This will allow the domain to start over and set up all Xen specific
++ * interfaces again.
++ */
++#define SHUTDOWN_soft_reset 5
+ 
+ #endif /* __XEN_PUBLIC_SCHED_H__ */
+diff --git a/ipc/msg.c b/ipc/msg.c
+index 2b6fdbb9e0e9..652540613d26 100644
+--- a/ipc/msg.c
++++ b/ipc/msg.c
+@@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
+ 		return retval;
+ 	}
+ 
+-	/* ipc_addid() locks msq upon success. */
+-	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
+-	if (id < 0) {
+-		ipc_rcu_putref(msq, msg_rcu_free);
+-		return id;
+-	}
+-
+ 	msq->q_stime = msq->q_rtime = 0;
+ 	msq->q_ctime = get_seconds();
+ 	msq->q_cbytes = msq->q_qnum = 0;
+@@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
+ 	INIT_LIST_HEAD(&msq->q_receivers);
+ 	INIT_LIST_HEAD(&msq->q_senders);
+ 
++	/* ipc_addid() locks msq upon success. */
++	id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
++	if (id < 0) {
++		ipc_rcu_putref(msq, msg_rcu_free);
++		return id;
++	}
++
+ 	ipc_unlock_object(&msq->q_perm);
+ 	rcu_read_unlock();
+ 
+diff --git a/ipc/shm.c b/ipc/shm.c
+index 6d767071c367..499a8bd22fad 100644
+--- a/ipc/shm.c
++++ b/ipc/shm.c
+@@ -550,12 +550,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ 	if (IS_ERR(file))
+ 		goto no_file;
+ 
+-	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
+-	if (id < 0) {
+-		error = id;
+-		goto no_id;
+-	}
+-
+ 	shp->shm_cprid = task_tgid_vnr(current);
+ 	shp->shm_lprid = 0;
+ 	shp->shm_atim = shp->shm_dtim = 0;
+@@ -564,6 +558,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
+ 	shp->shm_nattch = 0;
+ 	shp->shm_file = file;
+ 	shp->shm_creator = current;
++
++	id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
++	if (id < 0) {
++		error = id;
++		goto no_id;
++	}
++
+ 	list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
+ 
+ 	/*
+diff --git a/ipc/util.c b/ipc/util.c
+index ff3323ef8d8b..c917e9fd10b1 100644
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
+ 	rcu_read_lock();
+ 	spin_lock(&new->lock);
+ 
++	current_euid_egid(&euid, &egid);
++	new->cuid = new->uid = euid;
++	new->gid = new->cgid = egid;
++
+ 	id = idr_alloc(&ids->ipcs_idr, new,
+ 		       (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
+ 		       GFP_NOWAIT);
+@@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
+ 
+ 	ids->in_use++;
+ 
+-	current_euid_egid(&euid, &egid);
+-	new->cuid = new->uid = euid;
+-	new->gid = new->cgid = egid;
+-
+ 	if (next_id < 0) {
+ 		new->seq = ids->seq++;
+ 		if (ids->seq > IPCID_SEQ_MAX)
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 94817491407b..e1af58e23bee 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4411,14 +4411,6 @@ static void ring_buffer_wakeup(struct perf_event *event)
+ 	rcu_read_unlock();
+ }
+ 
+-static void rb_free_rcu(struct rcu_head *rcu_head)
+-{
+-	struct ring_buffer *rb;
+-
+-	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
+-	rb_free(rb);
+-}
+-
+ struct ring_buffer *ring_buffer_get(struct perf_event *event)
+ {
+ 	struct ring_buffer *rb;
+diff --git a/kernel/events/internal.h b/kernel/events/internal.h
+index 9f6ce9ba4a04..a6adc36a3732 100644
+--- a/kernel/events/internal.h
++++ b/kernel/events/internal.h
+@@ -11,6 +11,7 @@
+ struct ring_buffer {
+ 	atomic_t			refcount;
+ 	struct rcu_head			rcu_head;
++	struct irq_work			irq_work;
+ #ifdef CONFIG_PERF_USE_VMALLOC
+ 	struct work_struct		work;
+ 	int				page_order;	/* allocation order  */
+@@ -55,6 +56,15 @@ struct ring_buffer {
+ };
+ 
+ extern void rb_free(struct ring_buffer *rb);
++
++static inline void rb_free_rcu(struct rcu_head *rcu_head)
++{
++	struct ring_buffer *rb;
++
++	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
++	rb_free(rb);
++}
++
+ extern struct ring_buffer *
+ rb_alloc(int nr_pages, long watermark, int cpu, int flags);
+ extern void perf_event_wakeup(struct perf_event *event);
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index a7604c81168e..7f63ad978cb8 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle)
+ 	rcu_read_unlock();
+ }
+ 
++static void rb_irq_work(struct irq_work *work);
++
+ static void
+ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
+ {
+@@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
+ 
+ 	INIT_LIST_HEAD(&rb->event_list);
+ 	spin_lock_init(&rb->event_lock);
++	init_irq_work(&rb->irq_work, rb_irq_work);
++}
++
++static void ring_buffer_put_async(struct ring_buffer *rb)
++{
++	if (!atomic_dec_and_test(&rb->refcount))
++		return;
++
++	rb->rcu_head.next = (void *)rb;
++	irq_work_queue(&rb->irq_work);
+ }
+ 
+ /*
+@@ -319,7 +331,7 @@ err_put:
+ 	rb_free_aux(rb);
+ 
+ err:
+-	ring_buffer_put(rb);
++	ring_buffer_put_async(rb);
+ 	handle->event = NULL;
+ 
+ 	return NULL;
+@@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
+ 
+ 	local_set(&rb->aux_nest, 0);
+ 	rb_free_aux(rb);
+-	ring_buffer_put(rb);
++	ring_buffer_put_async(rb);
+ }
+ 
+ /*
+@@ -559,7 +571,18 @@ static void __rb_free_aux(struct ring_buffer *rb)
+ void rb_free_aux(struct ring_buffer *rb)
+ {
+ 	if (atomic_dec_and_test(&rb->aux_refcount))
++		irq_work_queue(&rb->irq_work);
++}
++
++static void rb_irq_work(struct irq_work *work)
++{
++	struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
++
++	if (!atomic_read(&rb->aux_refcount))
+ 		__rb_free_aux(rb);
++
++	if (rb->rcu_head.next == (void *)rb)
++		call_rcu(&rb->rcu_head, rb_free_rcu);
+ }
+ 
+ #ifndef CONFIG_PERF_USE_VMALLOC
+diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
+index df2f4642d1e7..5c38f59741e2 100644
+--- a/kernel/irq/proc.c
++++ b/kernel/irq/proc.c
+@@ -12,6 +12,7 @@
+ #include <linux/seq_file.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel_stat.h>
++#include <linux/mutex.h>
+ 
+ #include "internals.h"
+ 
+@@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
+ 
+ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
+ {
++	static DEFINE_MUTEX(register_lock);
+ 	char name [MAX_NAMELEN];
+ 
+-	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
++	if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
+ 		return;
+ 
++	/*
++	 * irq directories are registered only when a handler is
++	 * added, not when the descriptor is created, so multiple
++	 * tasks might try to register at the same time.
++	 */
++	mutex_lock(&register_lock);
++
++	if (desc->dir)
++		goto out_unlock;
++
+ 	memset(name, 0, MAX_NAMELEN);
+ 	sprintf(name, "%d", irq);
+ 
+ 	/* create /proc/irq/1234 */
+ 	desc->dir = proc_mkdir(name, root_irq_dir);
+ 	if (!desc->dir)
+-		return;
++		goto out_unlock;
+ 
+ #ifdef CONFIG_SMP
+ 	/* create /proc/irq/<irq>/smp_affinity */
+@@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
+ 
+ 	proc_create_data("spurious", 0444, desc->dir,
+ 			 &irq_spurious_proc_fops, (void *)(long)irq);
++
++out_unlock:
++	mutex_unlock(&register_lock);
+ }
+ 
+ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index e6910526c84b..8476206a1e19 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2217,11 +2217,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+ 	 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
+ 	 * schedule one last time. The schedule call will never return, and
+ 	 * the scheduled task must drop that reference.
+-	 * The test for TASK_DEAD must occur while the runqueue locks are
+-	 * still held, otherwise prev could be scheduled on another cpu, die
+-	 * there before we look at prev->state, and then the reference would
+-	 * be dropped twice.
+-	 *		Manfred Spraul <manfred@colorfullife.com>
++	 *
++	 * We must observe prev->state before clearing prev->on_cpu (in
++	 * finish_lock_switch), otherwise a concurrent wakeup can get prev
++	 * running on another CPU and we could rave with its RUNNING -> DEAD
++	 * transition, resulting in a double drop.
+ 	 */
+ 	prev_state = prev->state;
+ 	vtime_task_switch(prev);
+@@ -2358,13 +2358,20 @@ unsigned long nr_running(void)
+ 
+ /*
+  * Check if only the current task is running on the cpu.
++ *
++ * Caution: this function does not check that the caller has disabled
++ * preemption, thus the result might have a time-of-check-to-time-of-use
++ * race.  The caller is responsible to use it correctly, for example:
++ *
++ * - from a non-preemptable section (of course)
++ *
++ * - from a thread that is bound to a single CPU
++ *
++ * - in a loop with very short iterations (e.g. a polling loop)
+  */
+ bool single_task_running(void)
+ {
+-	if (cpu_rq(smp_processor_id())->nr_running == 1)
+-		return true;
+-	else
+-		return false;
++	return raw_rq()->nr_running == 1;
+ }
+ EXPORT_SYMBOL(single_task_running);
+ 
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index c2980e8733bc..77690b653ca9 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5126,18 +5126,21 @@ again:
+ 		 * entity, update_curr() will update its vruntime, otherwise
+ 		 * forget we've ever seen it.
+ 		 */
+-		if (curr && curr->on_rq)
+-			update_curr(cfs_rq);
+-		else
+-			curr = NULL;
++		if (curr) {
++			if (curr->on_rq)
++				update_curr(cfs_rq);
++			else
++				curr = NULL;
+ 
+-		/*
+-		 * This call to check_cfs_rq_runtime() will do the throttle and
+-		 * dequeue its entity in the parent(s). Therefore the 'simple'
+-		 * nr_running test will indeed be correct.
+-		 */
+-		if (unlikely(check_cfs_rq_runtime(cfs_rq)))
+-			goto simple;
++			/*
++			 * This call to check_cfs_rq_runtime() will do the
++			 * throttle and dequeue its entity in the parent(s).
++			 * Therefore the 'simple' nr_running test will indeed
++			 * be correct.
++			 */
++			if (unlikely(check_cfs_rq_runtime(cfs_rq)))
++				goto simple;
++		}
+ 
+ 		se = pick_next_entity(cfs_rq, curr);
+ 		cfs_rq = group_cfs_rq(se);
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index e0e129993958..aa1f059de4f7 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1068,9 +1068,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
+ 	 * After ->on_cpu is cleared, the task can be moved to a different CPU.
+ 	 * We must ensure this doesn't happen until the switch is completely
+ 	 * finished.
++	 *
++	 * Pairs with the control dependency and rmb in try_to_wake_up().
+ 	 */
+-	smp_wmb();
+-	prev->on_cpu = 0;
++	smp_store_release(&prev->on_cpu, 0);
+ #endif
+ #ifdef CONFIG_DEBUG_SPINLOCK
+ 	/* this is a valid case when another task releases the spinlock */
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 946acb72179f..414d9df94724 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1615,7 +1615,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
+ 	negative = (tick_error < 0);
+ 
+ 	/* Sort out the magnitude of the correction */
+-	tick_error = abs(tick_error);
++	tick_error = abs64(tick_error);
+ 	for (adj = 0; tick_error > interval; adj++)
+ 		tick_error >>= 1;
+ 
+diff --git a/lib/iommu-common.c b/lib/iommu-common.c
+index df30632f0bef..4fdeee02e0a9 100644
+--- a/lib/iommu-common.c
++++ b/lib/iommu-common.c
+@@ -21,8 +21,7 @@ static	DEFINE_PER_CPU(unsigned int, iommu_hash_common);
+ 
+ static inline bool need_flush(struct iommu_map_table *iommu)
+ {
+-	return (iommu->lazy_flush != NULL &&
+-		(iommu->flags & IOMMU_NEED_FLUSH) != 0);
++	return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
+ }
+ 
+ static inline void set_flush(struct iommu_map_table *iommu)
+@@ -211,7 +210,8 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
+ 			goto bail;
+ 		}
+ 	}
+-	if (n < pool->hint || need_flush(iommu)) {
++	if (iommu->lazy_flush &&
++	    (n < pool->hint || need_flush(iommu))) {
+ 		clear_flush(iommu);
+ 		iommu->lazy_flush(iommu);
+ 	}
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 8c4c1f9f9a9a..a6ff935476e3 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -2897,6 +2897,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
+ 			continue;
+ 
+ 		/*
++		 * Shared VMAs have their own reserves and do not affect
++		 * MAP_PRIVATE accounting but it is possible that a shared
++		 * VMA is using the same page so check and skip such VMAs.
++		 */
++		if (iter_vma->vm_flags & VM_MAYSHARE)
++			continue;
++
++		/*
+ 		 * Unmap the page from other VMAs without their own reserves.
+ 		 * They get marked to be SIGKILLed if they fault in these
+ 		 * areas. This is because a future no-page fault on this VMA
+diff --git a/mm/migrate.c b/mm/migrate.c
+index f53838fe3dfe..2c37b1a44a8c 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -1062,7 +1062,7 @@ out:
+ 	if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+ 		put_new_page(new_hpage, private);
+ 	else
+-		put_page(new_hpage);
++		putback_active_hugepage(new_hpage);
+ 
+ 	if (result) {
+ 		if (rc)
+diff --git a/mm/slab.c b/mm/slab.c
+index 3dd2d1ff9d5d..330039fdcf18 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -2189,9 +2189,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
+ 			size += BYTES_PER_WORD;
+ 	}
+ #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
+-	if (size >= kmalloc_size(INDEX_NODE + 1)
+-	    && cachep->object_size > cache_line_size()
+-	    && ALIGN(size, cachep->align) < PAGE_SIZE) {
++	/*
++	 * To activate debug pagealloc, off-slab management is necessary
++	 * requirement. In early phase of initialization, small sized slab
++	 * doesn't get initialized so it would not be possible. So, we need
++	 * to check size >= 256. It guarantees that all necessary small
++	 * sized slab is initialized in current slab initialization sequence.
++	 */
++	if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
++		size >= 256 && cachep->object_size > cache_line_size() &&
++		ALIGN(size, cachep->align) < PAGE_SIZE) {
+ 		cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
+ 		size = PAGE_SIZE;
+ 	}
+diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
+index aad022dd15df..95b3167cf036 100644
+--- a/net/batman-adv/distributed-arp-table.c
++++ b/net/batman-adv/distributed-arp-table.c
+@@ -15,6 +15,7 @@
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
+  */
+ 
++#include <linux/bitops.h>
+ #include <linux/if_ether.h>
+ #include <linux/if_arp.h>
+ #include <linux/if_vlan.h>
+@@ -422,7 +423,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
+ 	int j;
+ 
+ 	/* check if orig node candidate is running DAT */
+-	if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
++	if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
+ 		goto out;
+ 
+ 	/* Check if this node has already been selected... */
+@@ -682,9 +683,9 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ 					   uint16_t tvlv_value_len)
+ {
+ 	if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+-		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
++		clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
+ 	else
+-		orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
++		set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
+ }
+ 
+ /**
+diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
+index b24e4bb64fb5..8653c1a506f4 100644
+--- a/net/batman-adv/multicast.c
++++ b/net/batman-adv/multicast.c
+@@ -15,6 +15,8 @@
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
+  */
+ 
++#include <linux/bitops.h>
++#include <linux/bug.h>
+ #include "main.h"
+ #include "multicast.h"
+ #include "originator.h"
+@@ -565,19 +567,26 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
+  *
+  * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
+  * orig, has toggled then this method updates counter and list accordingly.
++ *
++ * Caller needs to hold orig->mcast_handler_lock.
+  */
+ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
+ 					     struct batadv_orig_node *orig,
+ 					     uint8_t mcast_flags)
+ {
++	struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
++	struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
++
+ 	/* switched from flag unset to set */
+ 	if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
+ 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
+ 		atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
+ 
+ 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+-		hlist_add_head_rcu(&orig->mcast_want_all_unsnoopables_node,
+-				   &bat_priv->mcast.want_all_unsnoopables_list);
++		/* flag checks above + mcast_handler_lock prevents this */
++		WARN_ON(!hlist_unhashed(node));
++
++		hlist_add_head_rcu(node, head);
+ 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ 	/* switched from flag set to unset */
+ 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
+@@ -585,7 +594,10 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
+ 		atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
+ 
+ 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+-		hlist_del_rcu(&orig->mcast_want_all_unsnoopables_node);
++		/* flag checks above + mcast_handler_lock prevents this */
++		WARN_ON(hlist_unhashed(node));
++
++		hlist_del_init_rcu(node);
+ 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ 	}
+ }
+@@ -598,19 +610,26 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
+  *
+  * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
+  * toggled then this method updates counter and list accordingly.
++ *
++ * Caller needs to hold orig->mcast_handler_lock.
+  */
+ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
+ 					  struct batadv_orig_node *orig,
+ 					  uint8_t mcast_flags)
+ {
++	struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
++	struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
++
+ 	/* switched from flag unset to set */
+ 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
+ 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
+ 		atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
+ 
+ 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+-		hlist_add_head_rcu(&orig->mcast_want_all_ipv4_node,
+-				   &bat_priv->mcast.want_all_ipv4_list);
++		/* flag checks above + mcast_handler_lock prevents this */
++		WARN_ON(!hlist_unhashed(node));
++
++		hlist_add_head_rcu(node, head);
+ 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ 	/* switched from flag set to unset */
+ 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
+@@ -618,7 +637,10 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
+ 		atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
+ 
+ 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+-		hlist_del_rcu(&orig->mcast_want_all_ipv4_node);
++		/* flag checks above + mcast_handler_lock prevents this */
++		WARN_ON(hlist_unhashed(node));
++
++		hlist_del_init_rcu(node);
+ 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ 	}
+ }
+@@ -631,19 +653,26 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
+  *
+  * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
+  * toggled then this method updates counter and list accordingly.
++ *
++ * Caller needs to hold orig->mcast_handler_lock.
+  */
+ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
+ 					  struct batadv_orig_node *orig,
+ 					  uint8_t mcast_flags)
+ {
++	struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
++	struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
++
+ 	/* switched from flag unset to set */
+ 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
+ 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
+ 		atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
+ 
+ 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+-		hlist_add_head_rcu(&orig->mcast_want_all_ipv6_node,
+-				   &bat_priv->mcast.want_all_ipv6_list);
++		/* flag checks above + mcast_handler_lock prevents this */
++		WARN_ON(!hlist_unhashed(node));
++
++		hlist_add_head_rcu(node, head);
+ 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ 	/* switched from flag set to unset */
+ 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
+@@ -651,7 +680,10 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
+ 		atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
+ 
+ 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
+-		hlist_del_rcu(&orig->mcast_want_all_ipv6_node);
++		/* flag checks above + mcast_handler_lock prevents this */
++		WARN_ON(hlist_unhashed(node));
++
++		hlist_del_init_rcu(node);
+ 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
+ 	}
+ }
+@@ -674,39 +706,42 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ 	uint8_t mcast_flags = BATADV_NO_FLAGS;
+ 	bool orig_initialized;
+ 
+-	orig_initialized = orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST;
++	if (orig_mcast_enabled && tvlv_value &&
++	    (tvlv_value_len >= sizeof(mcast_flags)))
++		mcast_flags = *(uint8_t *)tvlv_value;
++
++	spin_lock_bh(&orig->mcast_handler_lock);
++	orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
++				    &orig->capa_initialized);
+ 
+ 	/* If mcast support is turned on decrease the disabled mcast node
+ 	 * counter only if we had increased it for this node before. If this
+ 	 * is a completely new orig_node no need to decrease the counter.
+ 	 */
+ 	if (orig_mcast_enabled &&
+-	    !(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) {
++	    !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
+ 		if (orig_initialized)
+ 			atomic_dec(&bat_priv->mcast.num_disabled);
+-		orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
++		set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
+ 	/* If mcast support is being switched off or if this is an initial
+ 	 * OGM without mcast support then increase the disabled mcast
+ 	 * node counter.
+ 	 */
+ 	} else if (!orig_mcast_enabled &&
+-		   (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
++		   (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) ||
+ 		    !orig_initialized)) {
+ 		atomic_inc(&bat_priv->mcast.num_disabled);
+-		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
++		clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
+ 	}
+ 
+-	orig->capa_initialized |= BATADV_ORIG_CAPA_HAS_MCAST;
+-
+-	if (orig_mcast_enabled && tvlv_value &&
+-	    (tvlv_value_len >= sizeof(mcast_flags)))
+-		mcast_flags = *(uint8_t *)tvlv_value;
++	set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
+ 
+ 	batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
+ 	batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
+ 	batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
+ 
+ 	orig->mcast_flags = mcast_flags;
++	spin_unlock_bh(&orig->mcast_handler_lock);
+ }
+ 
+ /**
+@@ -740,11 +775,15 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
+ {
+ 	struct batadv_priv *bat_priv = orig->bat_priv;
+ 
+-	if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
+-	    orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
++	spin_lock_bh(&orig->mcast_handler_lock);
++
++	if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) &&
++	    test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized))
+ 		atomic_dec(&bat_priv->mcast.num_disabled);
+ 
+ 	batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
+ 	batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
+ 	batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
++
++	spin_unlock_bh(&orig->mcast_handler_lock);
+ }
+diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
+index 127cc4d7380a..a449195c5b2b 100644
+--- a/net/batman-adv/network-coding.c
++++ b/net/batman-adv/network-coding.c
+@@ -15,6 +15,7 @@
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
+  */
+ 
++#include <linux/bitops.h>
+ #include <linux/debugfs.h>
+ 
+ #include "main.h"
+@@ -105,9 +106,9 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
+ 					  uint16_t tvlv_value_len)
+ {
+ 	if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
+-		orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
++		clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
+ 	else
+-		orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
++		set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
+ }
+ 
+ /**
+@@ -871,7 +872,7 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
+ 		goto out;
+ 
+ 	/* check if orig node is network coding enabled */
+-	if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
++	if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
+ 		goto out;
+ 
+ 	/* accept ogms from 'good' neighbors and single hop neighbors */
+diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
+index 90e805aba379..dfae97408628 100644
+--- a/net/batman-adv/originator.c
++++ b/net/batman-adv/originator.c
+@@ -678,8 +678,13 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
+ 	orig_node->last_seen = jiffies;
+ 	reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
+ 	orig_node->bcast_seqno_reset = reset_time;
++
+ #ifdef CONFIG_BATMAN_ADV_MCAST
+ 	orig_node->mcast_flags = BATADV_NO_FLAGS;
++	INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
++	INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
++	INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
++	spin_lock_init(&orig_node->mcast_handler_lock);
+ #endif
+ 
+ 	/* create a vlan object for the "untagged" LAN */
+diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
+index 5ec31d7de24f..a0b1b861b968 100644
+--- a/net/batman-adv/soft-interface.c
++++ b/net/batman-adv/soft-interface.c
+@@ -172,6 +172,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
+ 	int gw_mode;
+ 	enum batadv_forw_mode forw_mode;
+ 	struct batadv_orig_node *mcast_single_orig = NULL;
++	int network_offset = ETH_HLEN;
+ 
+ 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
+ 		goto dropped;
+@@ -184,14 +185,18 @@ static int batadv_interface_tx(struct sk_buff *skb,
+ 	case ETH_P_8021Q:
+ 		vhdr = vlan_eth_hdr(skb);
+ 
+-		if (vhdr->h_vlan_encapsulated_proto != ethertype)
++		if (vhdr->h_vlan_encapsulated_proto != ethertype) {
++			network_offset += VLAN_HLEN;
+ 			break;
++		}
+ 
+ 		/* fall through */
+ 	case ETH_P_BATMAN:
+ 		goto dropped;
+ 	}
+ 
++	skb_set_network_header(skb, network_offset);
++
+ 	if (batadv_bla_tx(bat_priv, skb, vid))
+ 		goto dropped;
+ 
+@@ -449,6 +454,9 @@ out:
+  */
+ void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
+ {
++	if (!vlan)
++		return;
++
+ 	if (atomic_dec_and_test(&vlan->refcount)) {
+ 		spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+ 		hlist_del_rcu(&vlan->list);
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 07b263a437d1..4f2a9d2c56db 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -15,6 +15,7 @@
+  * along with this program; if not, see <http://www.gnu.org/licenses/>.
+  */
+ 
++#include <linux/bitops.h>
+ #include "main.h"
+ #include "translation-table.h"
+ #include "soft-interface.h"
+@@ -575,6 +576,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ 
+ 	/* increase the refcounter of the related vlan */
+ 	vlan = batadv_softif_vlan_get(bat_priv, vid);
++	if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
++		 addr, BATADV_PRINT_VID(vid)))
++		goto out;
+ 
+ 	batadv_dbg(BATADV_DBG_TT, bat_priv,
+ 		   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
+@@ -1015,6 +1019,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
+ 	struct batadv_tt_local_entry *tt_local_entry;
+ 	uint16_t flags, curr_flags = BATADV_NO_FLAGS;
+ 	struct batadv_softif_vlan *vlan;
++	void *tt_entry_exists;
+ 
+ 	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
+ 	if (!tt_local_entry)
+@@ -1042,11 +1047,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
+ 	 * immediately purge it
+ 	 */
+ 	batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
+-	hlist_del_rcu(&tt_local_entry->common.hash_entry);
++
++	tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
++					     batadv_compare_tt,
++					     batadv_choose_tt,
++					     &tt_local_entry->common);
++	if (!tt_entry_exists)
++		goto out;
++
++	/* extra call to free the local tt entry */
+ 	batadv_tt_local_entry_free_ref(tt_local_entry);
+ 
+ 	/* decrease the reference held for this vlan */
+ 	vlan = batadv_softif_vlan_get(bat_priv, vid);
++	if (!vlan)
++		goto out;
++
+ 	batadv_softif_vlan_free_ref(vlan);
+ 	batadv_softif_vlan_free_ref(vlan);
+ 
+@@ -1147,8 +1163,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
+ 			/* decrease the reference held for this vlan */
+ 			vlan = batadv_softif_vlan_get(bat_priv,
+ 						      tt_common_entry->vid);
+-			batadv_softif_vlan_free_ref(vlan);
+-			batadv_softif_vlan_free_ref(vlan);
++			if (vlan) {
++				batadv_softif_vlan_free_ref(vlan);
++				batadv_softif_vlan_free_ref(vlan);
++			}
+ 
+ 			batadv_tt_local_entry_free_ref(tt_local);
+ 		}
+@@ -1843,7 +1861,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
+ 		}
+ 		spin_unlock_bh(list_lock);
+ 	}
+-	orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT;
++	clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
+ }
+ 
+ static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
+@@ -2802,7 +2820,7 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
+ 				return;
+ 		}
+ 	}
+-	orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT;
++	set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
+ }
+ 
+ static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
+@@ -3188,8 +3206,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
+ 
+ 			/* decrease the reference held for this vlan */
+ 			vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
+-			batadv_softif_vlan_free_ref(vlan);
+-			batadv_softif_vlan_free_ref(vlan);
++			if (vlan) {
++				batadv_softif_vlan_free_ref(vlan);
++				batadv_softif_vlan_free_ref(vlan);
++			}
+ 
+ 			batadv_tt_local_entry_free_ref(tt_local);
+ 		}
+@@ -3302,7 +3322,8 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
+ 	bool has_tt_init;
+ 
+ 	tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
+-	has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT;
++	has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
++			       &orig_node->capa_initialized);
+ 
+ 	/* orig table not initialised AND first diff is in the OGM OR the ttvn
+ 	 * increased by one -> we can apply the attached changes
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index 9398c3fb4174..26c37be2aa05 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -204,6 +204,7 @@ struct batadv_orig_bat_iv {
+  * @batadv_dat_addr_t:  address of the orig node in the distributed hash
+  * @last_seen: time when last packet from this node was received
+  * @bcast_seqno_reset: time when the broadcast seqno window was reset
++ * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
+  * @mcast_flags: multicast flags announced by the orig node
+  * @mcast_want_all_unsnoop_node: a list node for the
+  *  mcast.want_all_unsnoopables list
+@@ -251,13 +252,15 @@ struct batadv_orig_node {
+ 	unsigned long last_seen;
+ 	unsigned long bcast_seqno_reset;
+ #ifdef CONFIG_BATMAN_ADV_MCAST
++	/* synchronizes mcast tvlv specific orig changes */
++	spinlock_t mcast_handler_lock;
+ 	uint8_t mcast_flags;
+ 	struct hlist_node mcast_want_all_unsnoopables_node;
+ 	struct hlist_node mcast_want_all_ipv4_node;
+ 	struct hlist_node mcast_want_all_ipv6_node;
+ #endif
+-	uint8_t capabilities;
+-	uint8_t capa_initialized;
++	unsigned long capabilities;
++	unsigned long capa_initialized;
+ 	atomic_t last_ttvn;
+ 	unsigned char *tt_buff;
+ 	int16_t tt_buff_len;
+@@ -296,10 +299,10 @@ struct batadv_orig_node {
+  *  (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
+  */
+ enum batadv_orig_capabilities {
+-	BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
+-	BATADV_ORIG_CAPA_HAS_NC = BIT(1),
+-	BATADV_ORIG_CAPA_HAS_TT = BIT(2),
+-	BATADV_ORIG_CAPA_HAS_MCAST = BIT(3),
++	BATADV_ORIG_CAPA_HAS_DAT,
++	BATADV_ORIG_CAPA_HAS_NC,
++	BATADV_ORIG_CAPA_HAS_TT,
++	BATADV_ORIG_CAPA_HAS_MCAST,
+ };
+ 
+ /**
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 7b815bcc8c9b..69ad5091e2ce 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -2294,12 +2294,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
+ 	if (!conn)
+ 		return 1;
+ 
+-	chan = conn->smp;
+-	if (!chan) {
+-		BT_ERR("SMP security requested but not available");
+-		return 1;
+-	}
+-
+ 	if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
+ 		return 1;
+ 
+@@ -2313,6 +2307,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
+ 		if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
+ 			return 0;
+ 
++	chan = conn->smp;
++	if (!chan) {
++		BT_ERR("SMP security requested but not available");
++		return 1;
++	}
++
+ 	l2cap_chan_lock(chan);
+ 
+ 	/* If SMP is already in progress ignore this request */
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index b27fc401c6a9..e664706b350c 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -584,7 +584,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
+ 	}
+ 
+ 	spin_unlock(&queue->syn_wait_lock);
+-	if (del_timer_sync(&req->rsk_timer))
++	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
+ 		reqsk_put(req);
+ 	return found;
+ }
+diff --git a/net/netfilter/core.c b/net/netfilter/core.c
+index e6163017c42d..5d0c6fd59475 100644
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -89,6 +89,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
+ 	static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
+ #endif
+ 	synchronize_net();
++	nf_queue_nf_hook_drop(reg);
+ }
+ EXPORT_SYMBOL(nf_unregister_hook);
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
+index 5d2b806a862e..38fbc194b9cb 100644
+--- a/net/netfilter/ipvs/ip_vs_core.c
++++ b/net/netfilter/ipvs/ip_vs_core.c
+@@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
+ 		 * return *ignored=0 i.e. ICMP and NF_DROP
+ 		 */
+ 		sched = rcu_dereference(svc->scheduler);
+-		dest = sched->schedule(svc, skb, iph);
++		if (sched) {
++			/* read svc->sched_data after svc->scheduler */
++			smp_rmb();
++			dest = sched->schedule(svc, skb, iph);
++		} else {
++			dest = NULL;
++		}
+ 		if (!dest) {
+ 			IP_VS_DBG(1, "p-schedule: no dest found.\n");
+ 			kfree(param.pe_data);
+@@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
+ 	}
+ 
+ 	sched = rcu_dereference(svc->scheduler);
+-	dest = sched->schedule(svc, skb, iph);
++	if (sched) {
++		/* read svc->sched_data after svc->scheduler */
++		smp_rmb();
++		dest = sched->schedule(svc, skb, iph);
++	} else {
++		dest = NULL;
++	}
+ 	if (dest == NULL) {
+ 		IP_VS_DBG(1, "Schedule: no dest found.\n");
+ 		return NULL;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 285eae3a1454..24c554201a76 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
+ 	__ip_vs_dst_cache_reset(dest);
+ 	spin_unlock_bh(&dest->dst_lock);
+ 
+-	sched = rcu_dereference_protected(svc->scheduler, 1);
+ 	if (add) {
+ 		ip_vs_start_estimator(svc->net, &dest->stats);
+ 		list_add_rcu(&dest->n_list, &svc->destinations);
+ 		svc->num_dests++;
+-		if (sched->add_dest)
++		sched = rcu_dereference_protected(svc->scheduler, 1);
++		if (sched && sched->add_dest)
+ 			sched->add_dest(svc, dest);
+ 	} else {
+-		if (sched->upd_dest)
++		sched = rcu_dereference_protected(svc->scheduler, 1);
++		if (sched && sched->upd_dest)
+ 			sched->upd_dest(svc, dest);
+ 	}
+ }
+@@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
+ 		struct ip_vs_scheduler *sched;
+ 
+ 		sched = rcu_dereference_protected(svc->scheduler, 1);
+-		if (sched->del_dest)
++		if (sched && sched->del_dest)
+ 			sched->del_dest(svc, dest);
+ 	}
+ }
+@@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
+ 	ip_vs_use_count_inc();
+ 
+ 	/* Lookup the scheduler by 'u->sched_name' */
+-	sched = ip_vs_scheduler_get(u->sched_name);
+-	if (sched == NULL) {
+-		pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
+-		ret = -ENOENT;
+-		goto out_err;
++	if (strcmp(u->sched_name, "none")) {
++		sched = ip_vs_scheduler_get(u->sched_name);
++		if (!sched) {
++			pr_info("Scheduler module ip_vs_%s not found\n",
++				u->sched_name);
++			ret = -ENOENT;
++			goto out_err;
++		}
+ 	}
+ 
+ 	if (u->pe_name && *u->pe_name) {
+@@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
+ 	spin_lock_init(&svc->stats.lock);
+ 
+ 	/* Bind the scheduler */
+-	ret = ip_vs_bind_scheduler(svc, sched);
+-	if (ret)
+-		goto out_err;
+-	sched = NULL;
++	if (sched) {
++		ret = ip_vs_bind_scheduler(svc, sched);
++		if (ret)
++			goto out_err;
++		sched = NULL;
++	}
+ 
+ 	/* Bind the ct retriever */
+ 	RCU_INIT_POINTER(svc->pe, pe);
+@@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
+ static int
+ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
+ {
+-	struct ip_vs_scheduler *sched, *old_sched;
++	struct ip_vs_scheduler *sched = NULL, *old_sched;
+ 	struct ip_vs_pe *pe = NULL, *old_pe = NULL;
+ 	int ret = 0;
+ 
+ 	/*
+ 	 * Lookup the scheduler, by 'u->sched_name'
+ 	 */
+-	sched = ip_vs_scheduler_get(u->sched_name);
+-	if (sched == NULL) {
+-		pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
+-		return -ENOENT;
++	if (strcmp(u->sched_name, "none")) {
++		sched = ip_vs_scheduler_get(u->sched_name);
++		if (!sched) {
++			pr_info("Scheduler module ip_vs_%s not found\n",
++				u->sched_name);
++			return -ENOENT;
++		}
+ 	}
+ 	old_sched = sched;
+ 
+@@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
+ 
+ 	old_sched = rcu_dereference_protected(svc->scheduler, 1);
+ 	if (sched != old_sched) {
++		if (old_sched) {
++			ip_vs_unbind_scheduler(svc, old_sched);
++			RCU_INIT_POINTER(svc->scheduler, NULL);
++			/* Wait all svc->sched_data users */
++			synchronize_rcu();
++		}
+ 		/* Bind the new scheduler */
+-		ret = ip_vs_bind_scheduler(svc, sched);
+-		if (ret) {
+-			old_sched = sched;
+-			goto out;
++		if (sched) {
++			ret = ip_vs_bind_scheduler(svc, sched);
++			if (ret) {
++				ip_vs_scheduler_put(sched);
++				goto out;
++			}
+ 		}
+-		/* Unbind the old scheduler on success */
+-		ip_vs_unbind_scheduler(svc, old_sched);
+ 	}
+ 
+ 	/*
+@@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
+ 		const struct ip_vs_iter *iter = seq->private;
+ 		const struct ip_vs_dest *dest;
+ 		struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
++		char *sched_name = sched ? sched->name : "none";
+ 
+ 		if (iter->table == ip_vs_svc_table) {
+ #ifdef CONFIG_IP_VS_IPV6
+@@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
+ 					   ip_vs_proto_name(svc->protocol),
+ 					   &svc->addr.in6,
+ 					   ntohs(svc->port),
+-					   sched->name);
++					   sched_name);
+ 			else
+ #endif
+ 				seq_printf(seq, "%s  %08X:%04X %s %s ",
+ 					   ip_vs_proto_name(svc->protocol),
+ 					   ntohl(svc->addr.ip),
+ 					   ntohs(svc->port),
+-					   sched->name,
++					   sched_name,
+ 					   (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
+ 		} else {
+ 			seq_printf(seq, "FWM  %08X %s %s",
+-				   svc->fwmark, sched->name,
++				   svc->fwmark, sched_name,
+ 				   (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
+ 		}
+ 
+@@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
+ {
+ 	struct ip_vs_scheduler *sched;
+ 	struct ip_vs_kstats kstats;
++	char *sched_name;
+ 
+ 	sched = rcu_dereference_protected(src->scheduler, 1);
++	sched_name = sched ? sched->name : "none";
+ 	dst->protocol = src->protocol;
+ 	dst->addr = src->addr.ip;
+ 	dst->port = src->port;
+ 	dst->fwmark = src->fwmark;
+-	strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name));
++	strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
+ 	dst->flags = src->flags;
+ 	dst->timeout = src->timeout / HZ;
+ 	dst->netmask = src->netmask;
+@@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
+ 	struct ip_vs_flags flags = { .flags = svc->flags,
+ 				     .mask = ~0 };
+ 	struct ip_vs_kstats kstats;
++	char *sched_name;
+ 
+ 	nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
+ 	if (!nl_service)
+@@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
+ 	}
+ 
+ 	sched = rcu_dereference_protected(svc->scheduler, 1);
++	sched_name = sched ? sched->name : "none";
+ 	pe = rcu_dereference_protected(svc->pe, 1);
+-	if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) ||
++	if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
+ 	    (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
+ 	    nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
+ 	    nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
+diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
+index 199760c71f39..7e8141647943 100644
+--- a/net/netfilter/ipvs/ip_vs_sched.c
++++ b/net/netfilter/ipvs/ip_vs_sched.c
+@@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
+ 
+ 	if (sched->done_service)
+ 		sched->done_service(svc);
+-	/* svc->scheduler can not be set to NULL */
++	/* svc->scheduler can be set to NULL only by caller */
+ }
+ 
+ 
+@@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
+ 
+ void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
+ {
+-	struct ip_vs_scheduler *sched;
++	struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
++	char *sched_name = sched ? sched->name : "none";
+ 
+-	sched = rcu_dereference(svc->scheduler);
+ 	if (svc->fwmark) {
+ 		IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
+-			     sched->name, svc->fwmark, svc->fwmark, msg);
++			     sched_name, svc->fwmark, svc->fwmark, msg);
+ #ifdef CONFIG_IP_VS_IPV6
+ 	} else if (svc->af == AF_INET6) {
+ 		IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
+-			     sched->name, ip_vs_proto_name(svc->protocol),
++			     sched_name, ip_vs_proto_name(svc->protocol),
+ 			     &svc->addr.in6, ntohs(svc->port), msg);
+ #endif
+ 	} else {
+ 		IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
+-			     sched->name, ip_vs_proto_name(svc->protocol),
++			     sched_name, ip_vs_proto_name(svc->protocol),
+ 			     &svc->addr.ip, ntohs(svc->port), msg);
+ 	}
+ }
+diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
+index 19b9cce6c210..150047c739fa 100644
+--- a/net/netfilter/ipvs/ip_vs_sync.c
++++ b/net/netfilter/ipvs/ip_vs_sync.c
+@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
+ 			pkts = atomic_add_return(1, &cp->in_pkts);
+ 		else
+ 			pkts = sysctl_sync_threshold(ipvs);
+-		ip_vs_sync_conn(net, cp->control, pkts);
++		ip_vs_sync_conn(net, cp, pkts);
+ 	}
+ }
+ 
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 19986ec5f21a..258f1e05250f 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+ 
+ 	memset(&fl4, 0, sizeof(fl4));
+ 	fl4.daddr = daddr;
+-	fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
+ 	fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
+ 			   FLOWI_FLAG_KNOWN_NH : 0;
+ 
+@@ -519,10 +518,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
+ 	if (ret == NF_ACCEPT) {
+ 		nf_reset(skb);
+ 		skb_forward_csum(skb);
++		if (!skb->sk)
++			skb_sender_cpu_clear(skb);
+ 	}
+ 	return ret;
+ }
+ 
++/* In the event of a remote destination, it's possible that we would have
++ * matches against an old socket (particularly a TIME-WAIT socket). This
++ * causes havoc down the line (ip_local_out et. al. expect regular sockets
++ * and invalid memory accesses will happen) so simply drop the association
++ * in this case.
++*/
++static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
++{
++	/* If dev is set, the packet came from the LOCAL_IN callback and
++	 * not from a local TCP socket.
++	 */
++	if (skb->dev)
++		skb_orphan(skb);
++}
++
+ /* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
+ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
+ 					 struct ip_vs_conn *cp, int local)
+@@ -534,12 +550,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
+ 		ip_vs_notrack(skb);
+ 	else
+ 		ip_vs_update_conntrack(skb, cp, 1);
++
++	/* Remove the early_demux association unless it's bound for the
++	 * exact same port and address on this host after translation.
++	 */
++	if (!local || cp->vport != cp->dport ||
++	    !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
++		ip_vs_drop_early_demux_sk(skb);
++
+ 	if (!local) {
+ 		skb_forward_csum(skb);
++		if (!skb->sk)
++			skb_sender_cpu_clear(skb);
+ 		NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
+ 			NULL, skb_dst(skb)->dev, dst_output_sk);
+ 	} else
+ 		ret = NF_ACCEPT;
++
+ 	return ret;
+ }
+ 
+@@ -553,7 +580,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
+ 	if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
+ 		ip_vs_notrack(skb);
+ 	if (!local) {
++		ip_vs_drop_early_demux_sk(skb);
+ 		skb_forward_csum(skb);
++		if (!skb->sk)
++			skb_sender_cpu_clear(skb);
+ 		NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
+ 			NULL, skb_dst(skb)->dev, dst_output_sk);
+ 	} else
+@@ -841,6 +871,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
+ 	struct ipv6hdr *old_ipv6h = NULL;
+ #endif
+ 
++	ip_vs_drop_early_demux_sk(skb);
++
+ 	if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
+ 		new_skb = skb_realloc_headroom(skb, max_headroom);
+ 		if (!new_skb)
+diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
+index 7a17070c5dab..b45a4223cb05 100644
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
+ 			a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
+ 	}
+ 
+-	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
++	return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
++	       nf_ct_zone(a->master) == nf_ct_zone(b->master);
+ }
+ 
+ static inline int expect_matches(const struct nf_conntrack_expect *a,
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index d1c23940a86a..6b8b0abbfab4 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
+ 	}
+ 
+ 	err = nf_ct_expect_related_report(exp, portid, report);
+-	if (err < 0)
+-		goto err_exp;
+-
+-	return 0;
+-err_exp:
+ 	nf_ct_expect_put(exp);
+ err_ct:
+ 	nf_ct_put(ct);
+diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
+index ea7f36784b3d..399210693c2a 100644
+--- a/net/netfilter/nf_internals.h
++++ b/net/netfilter/nf_internals.h
+@@ -19,6 +19,7 @@ unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
+ /* nf_queue.c */
+ int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
+ 	     struct nf_hook_state *state, unsigned int queuenum);
++void nf_queue_nf_hook_drop(struct nf_hook_ops *ops);
+ int __init netfilter_queue_init(void);
+ 
+ /* nf_log.c */
+diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
+index 675d12c69e32..a5d41dfa9f05 100644
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -107,12 +107,17 @@ EXPORT_SYMBOL(nf_log_register);
+ 
+ void nf_log_unregister(struct nf_logger *logger)
+ {
++	const struct nf_logger *log;
+ 	int i;
+ 
+ 	mutex_lock(&nf_log_mutex);
+-	for (i = 0; i < NFPROTO_NUMPROTO; i++)
+-		RCU_INIT_POINTER(loggers[i][logger->type], NULL);
++	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
++		log = nft_log_dereference(loggers[i][logger->type]);
++		if (log == logger)
++			RCU_INIT_POINTER(loggers[i][logger->type], NULL);
++	}
+ 	mutex_unlock(&nf_log_mutex);
++	synchronize_rcu();
+ }
+ EXPORT_SYMBOL(nf_log_unregister);
+ 
+diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
+index 2e88032cd5ad..cd60d397fe05 100644
+--- a/net/netfilter/nf_queue.c
++++ b/net/netfilter/nf_queue.c
+@@ -105,6 +105,23 @@ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
+ }
+ EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
+ 
++void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
++{
++	const struct nf_queue_handler *qh;
++	struct net *net;
++
++	rtnl_lock();
++	rcu_read_lock();
++	qh = rcu_dereference(queue_handler);
++	if (qh) {
++		for_each_net(net) {
++			qh->nf_hook_drop(net, ops);
++		}
++	}
++	rcu_read_unlock();
++	rtnl_unlock();
++}
++
+ /*
+  * Any packet that leaves via this function must come back
+  * through nf_reinject().
+diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
+index f153b07073af..f77bad46ac68 100644
+--- a/net/netfilter/nf_tables_core.c
++++ b/net/netfilter/nf_tables_core.c
+@@ -114,7 +114,8 @@ unsigned int
+ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
+ {
+ 	const struct nft_chain *chain = ops->priv, *basechain = chain;
+-	const struct net *net = read_pnet(&nft_base_chain(basechain)->pnet);
++	const struct net *chain_net = read_pnet(&nft_base_chain(basechain)->pnet);
++	const struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
+ 	const struct nft_rule *rule;
+ 	const struct nft_expr *expr, *last;
+ 	struct nft_regs regs;
+@@ -124,6 +125,10 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
+ 	int rulenum;
+ 	unsigned int gencursor = nft_genmask_cur(net);
+ 
++	/* Ignore chains that are not for the current network namespace */
++	if (!net_eq(net, chain_net))
++		return NF_ACCEPT;
++
+ do_chain:
+ 	rulenum = 0;
+ 	rule = list_entry(&chain->rules, struct nft_rule, list);
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 8b117c90ecd7..69e3ceffa14d 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -432,6 +432,7 @@ done:
+ static void nfnetlink_rcv(struct sk_buff *skb)
+ {
+ 	struct nlmsghdr *nlh = nlmsg_hdr(skb);
++	u_int16_t res_id;
+ 	int msglen;
+ 
+ 	if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+@@ -456,7 +457,12 @@ static void nfnetlink_rcv(struct sk_buff *skb)
+ 
+ 		nfgenmsg = nlmsg_data(nlh);
+ 		skb_pull(skb, msglen);
+-		nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
++		/* Work around old nft using host byte order */
++		if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
++			res_id = NFNL_SUBSYS_NFTABLES;
++		else
++			res_id = ntohs(nfgenmsg->res_id);
++		nfnetlink_rcv_batch(skb, nlh, res_id);
+ 	} else {
+ 		netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
+ 	}
+diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
+index 11c7682fa0ea..32d0437abdd8 100644
+--- a/net/netfilter/nfnetlink_queue_core.c
++++ b/net/netfilter/nfnetlink_queue_core.c
+@@ -824,6 +824,27 @@ static struct notifier_block nfqnl_dev_notifier = {
+ 	.notifier_call	= nfqnl_rcv_dev_event,
+ };
+ 
++static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long ops_ptr)
++{
++	return entry->elem == (struct nf_hook_ops *)ops_ptr;
++}
++
++static void nfqnl_nf_hook_drop(struct net *net, struct nf_hook_ops *hook)
++{
++	struct nfnl_queue_net *q = nfnl_queue_pernet(net);
++	int i;
++
++	rcu_read_lock();
++	for (i = 0; i < INSTANCE_BUCKETS; i++) {
++		struct nfqnl_instance *inst;
++		struct hlist_head *head = &q->instance_table[i];
++
++		hlist_for_each_entry_rcu(inst, head, hlist)
++			nfqnl_flush(inst, nf_hook_cmp, (unsigned long)hook);
++	}
++	rcu_read_unlock();
++}
++
+ static int
+ nfqnl_rcv_nl_event(struct notifier_block *this,
+ 		   unsigned long event, void *ptr)
+@@ -1031,7 +1052,8 @@ static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
+ };
+ 
+ static const struct nf_queue_handler nfqh = {
+-	.outfn	= &nfqnl_enqueue_packet,
++	.outfn		= &nfqnl_enqueue_packet,
++	.nf_hook_drop	= &nfqnl_nf_hook_drop,
+ };
+ 
+ static int
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 7f29cfc76349..4d05c7bf5a03 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -617,6 +617,13 @@ struct nft_xt {
+ 
+ static struct nft_expr_type nft_match_type;
+ 
++static bool nft_match_cmp(const struct xt_match *match,
++			  const char *name, u32 rev, u32 family)
++{
++	return strcmp(match->name, name) == 0 && match->revision == rev &&
++	       (match->family == NFPROTO_UNSPEC || match->family == family);
++}
++
+ static const struct nft_expr_ops *
+ nft_match_select_ops(const struct nft_ctx *ctx,
+ 		     const struct nlattr * const tb[])
+@@ -624,7 +631,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
+ 	struct nft_xt *nft_match;
+ 	struct xt_match *match;
+ 	char *mt_name;
+-	__u32 rev, family;
++	u32 rev, family;
+ 
+ 	if (tb[NFTA_MATCH_NAME] == NULL ||
+ 	    tb[NFTA_MATCH_REV] == NULL ||
+@@ -639,8 +646,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
+ 	list_for_each_entry(nft_match, &nft_match_list, head) {
+ 		struct xt_match *match = nft_match->ops.data;
+ 
+-		if (strcmp(match->name, mt_name) == 0 &&
+-		    match->revision == rev && match->family == family) {
++		if (nft_match_cmp(match, mt_name, rev, family)) {
+ 			if (!try_module_get(match->me))
+ 				return ERR_PTR(-ENOENT);
+ 
+@@ -691,6 +697,13 @@ static LIST_HEAD(nft_target_list);
+ 
+ static struct nft_expr_type nft_target_type;
+ 
++static bool nft_target_cmp(const struct xt_target *tg,
++			   const char *name, u32 rev, u32 family)
++{
++	return strcmp(tg->name, name) == 0 && tg->revision == rev &&
++	       (tg->family == NFPROTO_UNSPEC || tg->family == family);
++}
++
+ static const struct nft_expr_ops *
+ nft_target_select_ops(const struct nft_ctx *ctx,
+ 		      const struct nlattr * const tb[])
+@@ -698,7 +711,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
+ 	struct nft_xt *nft_target;
+ 	struct xt_target *target;
+ 	char *tg_name;
+-	__u32 rev, family;
++	u32 rev, family;
+ 
+ 	if (tb[NFTA_TARGET_NAME] == NULL ||
+ 	    tb[NFTA_TARGET_REV] == NULL ||
+@@ -713,8 +726,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
+ 	list_for_each_entry(nft_target, &nft_target_list, head) {
+ 		struct xt_target *target = nft_target->ops.data;
+ 
+-		if (strcmp(target->name, tg_name) == 0 &&
+-		    target->revision == rev && target->family == family) {
++		if (nft_target_cmp(target, tg_name, rev, family)) {
+ 			if (!try_module_get(target->me))
+ 				return ERR_PTR(-ENOENT);
+ 
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+index 7de33d1af9b6..7fa6d78331ed 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+@@ -382,6 +382,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ 		      int byte_count)
+ {
+ 	struct ib_send_wr send_wr;
++	u32 xdr_off;
+ 	int sge_no;
+ 	int sge_bytes;
+ 	int page_no;
+@@ -416,8 +417,8 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ 	ctxt->direction = DMA_TO_DEVICE;
+ 
+ 	/* Map the payload indicated by 'byte_count' */
++	xdr_off = 0;
+ 	for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
+-		int xdr_off = 0;
+ 		sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
+ 		byte_count -= sge_bytes;
+ 		ctxt->sge[sge_no].addr =
+@@ -455,6 +456,13 @@ static int send_reply(struct svcxprt_rdma *rdma,
+ 	}
+ 	rqstp->rq_next_page = rqstp->rq_respages + 1;
+ 
++	/* The loop above bumps sc_dma_used for each sge. The
++	 * xdr_buf.tail gets a separate sge, but resides in the
++	 * same page as xdr_buf.head. Don't count it twice.
++	 */
++	if (sge_no > ctxt->count)
++		atomic_dec(&rdma->sc_dma_used);
++
+ 	if (sge_no > rdma->sc_max_sge) {
+ 		pr_err("svcrdma: Too many sges (%d)\n", sge_no);
+ 		goto err;
+diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
+index 885683a3b0bd..e0406211716b 100644
+--- a/sound/arm/Kconfig
++++ b/sound/arm/Kconfig
+@@ -9,6 +9,14 @@ menuconfig SND_ARM
+ 	  Drivers that are implemented on ASoC can be found in
+ 	  "ALSA for SoC audio support" section.
+ 
++config SND_PXA2XX_LIB
++	tristate
++	select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
++	select SND_DMAENGINE_PCM
++
++config SND_PXA2XX_LIB_AC97
++	bool
++
+ if SND_ARM
+ 
+ config SND_ARMAACI
+@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
+ 	tristate
+ 	select SND_PCM
+ 
+-config SND_PXA2XX_LIB
+-	tristate
+-	select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
+-
+-config SND_PXA2XX_LIB_AC97
+-	bool
+-
+ config SND_PXA2XX_AC97
+ 	tristate "AC97 driver for the Intel PXA2xx chip"
+ 	depends on ARCH_PXA
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 3a24f7739aaa..b791529bf31c 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -634,6 +634,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
+ 	SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
+ 	SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
++	SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
+ 	{} /* terminator */
+ };
+ 
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6fe862594e9b..57bb5a559f8e 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4182,6 +4182,24 @@ static void alc_fixup_disable_aamix(struct hda_codec *codec,
+ 	}
+ }
+ 
++/* fixup for Thinkpad docks: add dock pins, avoid HP parser fixup */
++static void alc_fixup_tpt440_dock(struct hda_codec *codec,
++				  const struct hda_fixup *fix, int action)
++{
++	static const struct hda_pintbl pincfgs[] = {
++		{ 0x16, 0x21211010 }, /* dock headphone */
++		{ 0x19, 0x21a11010 }, /* dock mic */
++		{ }
++	};
++	struct alc_spec *spec = codec->spec;
++
++	if (action == HDA_FIXUP_ACT_PRE_PROBE) {
++		spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
++		codec->power_save_node = 0; /* avoid click noises */
++		snd_hda_apply_pincfgs(codec, pincfgs);
++	}
++}
++
+ static void alc_shutup_dell_xps13(struct hda_codec *codec)
+ {
+ 	struct alc_spec *spec = codec->spec;
+@@ -4507,7 +4525,6 @@ enum {
+ 	ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
+ 	ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC292_FIXUP_TPT440_DOCK,
+-	ALC292_FIXUP_TPT440_DOCK2,
+ 	ALC283_FIXUP_BXBT2807_MIC,
+ 	ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
+ 	ALC282_FIXUP_ASPIRE_V5_PINS,
+@@ -4972,17 +4989,7 @@ static const struct hda_fixup alc269_fixups[] = {
+ 	},
+ 	[ALC292_FIXUP_TPT440_DOCK] = {
+ 		.type = HDA_FIXUP_FUNC,
+-		.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+-		.chained = true,
+-		.chain_id = ALC292_FIXUP_TPT440_DOCK2
+-	},
+-	[ALC292_FIXUP_TPT440_DOCK2] = {
+-		.type = HDA_FIXUP_PINS,
+-		.v.pins = (const struct hda_pintbl[]) {
+-			{ 0x16, 0x21211010 }, /* dock headphone */
+-			{ 0x19, 0x21a11010 }, /* dock mic */
+-			{ }
+-		},
++		.v.func = alc_fixup_tpt440_dock,
+ 		.chained = true,
+ 		.chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
+ 	},
+@@ -5226,6 +5233,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
++	SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
+ 	SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+ 	SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 25f0f45e6640..b1bc66783974 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -4522,7 +4522,11 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
+ 		return err;
+ 
+ 	spec = codec->spec;
+-	codec->power_save_node = 1;
++	/* enable power_save_node only for new 92HD89xx chips, as it causes
++	 * click noises on old 92HD73xx chips.
++	 */
++	if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670)
++		codec->power_save_node = 1;
+ 	spec->linear_tone_beep = 0;
+ 	spec->gen.mixer_nid = 0x1d;
+ 	spec->have_spdif_mux = 1;
+diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
+index c75995f2779c..b914a08258ea 100644
+--- a/sound/soc/au1x/db1200.c
++++ b/sound/soc/au1x/db1200.c
+@@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2s_dai = {
+ 	.cpu_dai_name	= "au1xpsc_i2s.2",
+ 	.platform_name	= "au1xpsc-pcm.2",
+ 	.codec_name	= "wm8731.0-001b",
++	.dai_fmt	= SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
++			  SND_SOC_DAIFMT_CBM_CFM,
+ 	.ops		= &db1200_i2s_wm8731_ops,
+ };
+ 
+@@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2s_dai = {
+ 	.cpu_dai_name	= "au1xpsc_i2s.3",
+ 	.platform_name	= "au1xpsc-pcm.3",
+ 	.codec_name	= "wm8731.0-001b",
++	.dai_fmt	= SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
++			  SND_SOC_DAIFMT_CBM_CFM,
+ 	.ops		= &db1200_i2s_wm8731_ops,
+ };
+ 
+diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
+index 3593a1496056..3a29c0ac5d8a 100644
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -1339,8 +1339,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
+ 			sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
+ 
+ 	snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
+-			SGTL5000_BIAS_R_MASK,
+-			sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT);
++			SGTL5000_BIAS_VOLT_MASK,
++			sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT);
+ 	/*
+ 	 * disable DAP
+ 	 * TODO:
+diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
+index a3e97b46b64e..0d28e3b356f6 100644
+--- a/sound/soc/dwc/designware_i2s.c
++++ b/sound/soc/dwc/designware_i2s.c
+@@ -131,10 +131,10 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
+ 
+ 	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ 		for (i = 0; i < 4; i++)
+-			i2s_write_reg(dev->i2s_base, TOR(i), 0);
++			i2s_read_reg(dev->i2s_base, TOR(i));
+ 	} else {
+ 		for (i = 0; i < 4; i++)
+-			i2s_write_reg(dev->i2s_base, ROR(i), 0);
++			i2s_read_reg(dev->i2s_base, ROR(i));
+ 	}
+ }
+ 
+diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
+index 39cea80846c3..f2bf8661dd21 100644
+--- a/sound/soc/pxa/Kconfig
++++ b/sound/soc/pxa/Kconfig
+@@ -1,7 +1,6 @@
+ config SND_PXA2XX_SOC
+ 	tristate "SoC Audio for the Intel PXA2xx chip"
+ 	depends on ARCH_PXA
+-	select SND_ARM
+ 	select SND_PXA2XX_LIB
+ 	help
+ 	  Say Y or M if you want to add support for codecs attached to
+@@ -25,7 +24,6 @@ config SND_PXA2XX_AC97
+ config SND_PXA2XX_SOC_AC97
+ 	tristate
+ 	select AC97_BUS
+-	select SND_ARM
+ 	select SND_PXA2XX_LIB_AC97
+ 	select SND_SOC_AC97_BUS
+ 
+diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
+index 1f6054650991..9e4b04e0fbd1 100644
+--- a/sound/soc/pxa/pxa2xx-ac97.c
++++ b/sound/soc/pxa/pxa2xx-ac97.c
+@@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
+ 	.reset	= pxa2xx_ac97_cold_reset,
+ };
+ 
+-static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
++static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
+ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
+ 	.addr		= __PREG(PCDR),
+ 	.addr_width	= DMA_SLAVE_BUSWIDTH_4_BYTES,
+@@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
+ 	.filter_data	= &pxa2xx_ac97_pcm_stereo_in_req,
+ };
+ 
+-static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
++static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
+ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
+ 	.addr		= __PREG(PCDR),
+ 	.addr_width	= DMA_SLAVE_BUSWIDTH_4_BYTES,
+diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
+index 82e350e9501c..ac75816ada7c 100644
+--- a/sound/synth/emux/emux_oss.c
++++ b/sound/synth/emux/emux_oss.c
+@@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu)
+ 	struct snd_seq_oss_reg *arg;
+ 	struct snd_seq_device *dev;
+ 
+-	if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
++	/* using device#1 here for avoiding conflicts with OPL3 */
++	if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
+ 			       sizeof(struct snd_seq_oss_reg), &dev) < 0)
+ 		return;
+ 
+diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
+index 29f94f6f0d9e..ed5461f065bd 100644
+--- a/tools/lib/traceevent/event-parse.c
++++ b/tools/lib/traceevent/event-parse.c
+@@ -3721,7 +3721,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
+ 	struct format_field *field;
+ 	struct printk_map *printk;
+ 	long long val, fval;
+-	unsigned long addr;
++	unsigned long long addr;
+ 	char *str;
+ 	unsigned char *hex;
+ 	int print;
+@@ -3754,13 +3754,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
+ 		 */
+ 		if (!(field->flags & FIELD_IS_ARRAY) &&
+ 		    field->size == pevent->long_size) {
+-			addr = *(unsigned long *)(data + field->offset);
++
++			/* Handle heterogeneous recording and processing
++			 * architectures
++			 *
++			 * CASE I:
++			 * Traces recorded on 32-bit devices (32-bit
++			 * addressing) and processed on 64-bit devices:
++			 * In this case, only 32 bits should be read.
++			 *
++			 * CASE II:
++			 * Traces recorded on 64 bit devices and processed
++			 * on 32-bit devices:
++			 * In this case, 64 bits must be read.
++			 */
++			addr = (pevent->long_size == 8) ?
++				*(unsigned long long *)(data + field->offset) :
++				(unsigned long long)*(unsigned int *)(data + field->offset);
++
+ 			/* Check if it matches a print format */
+ 			printk = find_printk(pevent, addr);
+ 			if (printk)
+ 				trace_seq_puts(s, printk->printk);
+ 			else
+-				trace_seq_printf(s, "%lx", addr);
++				trace_seq_printf(s, "%llx", addr);
+ 			break;
+ 		}
+ 		str = malloc(len + 1);
+diff --git a/tools/perf/arch/alpha/Build b/tools/perf/arch/alpha/Build
+new file mode 100644
+index 000000000000..1bb8bf6d7fd4
+--- /dev/null
++++ b/tools/perf/arch/alpha/Build
+@@ -0,0 +1 @@
++# empty
+diff --git a/tools/perf/arch/mips/Build b/tools/perf/arch/mips/Build
+new file mode 100644
+index 000000000000..1bb8bf6d7fd4
+--- /dev/null
++++ b/tools/perf/arch/mips/Build
+@@ -0,0 +1 @@
++# empty
+diff --git a/tools/perf/arch/parisc/Build b/tools/perf/arch/parisc/Build
+new file mode 100644
+index 000000000000..1bb8bf6d7fd4
+--- /dev/null
++++ b/tools/perf/arch/parisc/Build
+@@ -0,0 +1 @@
++# empty
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index f7b8218785f6..a1f3ffc2786d 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -1227,7 +1227,7 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
+ static void print_aggr(char *prefix)
+ {
+ 	struct perf_evsel *counter;
+-	int cpu, cpu2, s, s2, id, nr;
++	int cpu, s, s2, id, nr;
+ 	double uval;
+ 	u64 ena, run, val;
+ 
+@@ -1240,8 +1240,7 @@ static void print_aggr(char *prefix)
+ 			val = ena = run = 0;
+ 			nr = 0;
+ 			for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
+-				cpu2 = perf_evsel__cpus(counter)->map[cpu];
+-				s2 = aggr_get_id(evsel_list->cpus, cpu2);
++				s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
+ 				if (s2 != id)
+ 					continue;
+ 				val += counter->counts->cpu[cpu].val;
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index 918fd8ae2d80..23eea5e7fa94 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1426,7 +1426,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
+ 	if (ph->needs_swap)
+ 		nr = bswap_32(nr);
+ 
+-	ph->env.nr_cpus_online = nr;
++	ph->env.nr_cpus_avail = nr;
+ 
+ 	ret = readn(fd, &nr, sizeof(nr));
+ 	if (ret != sizeof(nr))
+@@ -1435,7 +1435,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
+ 	if (ph->needs_swap)
+ 		nr = bswap_32(nr);
+ 
+-	ph->env.nr_cpus_avail = nr;
++	ph->env.nr_cpus_online = nr;
+ 	return 0;
+ }
+ 
+diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
+index cc22b9158b93..c7966c0fa13e 100644
+--- a/tools/perf/util/hist.c
++++ b/tools/perf/util/hist.c
+@@ -151,6 +151,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
+ 	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
+ 	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+ 
++	if (h->srcline)
++		hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
++
+ 	if (h->transaction)
+ 		hists__new_col_len(hists, HISTC_TRANSACTION,
+ 				   hist_entry__transaction_len());
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index a7ab6063e038..3ddfab315e19 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -1253,8 +1253,6 @@ out_close:
+ static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
+ 		       bool temp)
+ {
+-	GElf_Ehdr *ehdr;
+-
+ 	kcore->elfclass = elfclass;
+ 
+ 	if (temp)
+@@ -1271,9 +1269,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
+ 	if (!gelf_newehdr(kcore->elf, elfclass))
+ 		goto out_end;
+ 
+-	ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
+-	if (!ehdr)
+-		goto out_end;
++	memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
+ 
+ 	return 0;
+ 
+@@ -1330,23 +1326,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
+ static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
+ 			   u64 addr, u64 len)
+ {
+-	GElf_Phdr gphdr;
+-	GElf_Phdr *phdr;
+-
+-	phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
+-	if (!phdr)
+-		return -1;
+-
+-	phdr->p_type	= PT_LOAD;
+-	phdr->p_flags	= PF_R | PF_W | PF_X;
+-	phdr->p_offset	= offset;
+-	phdr->p_vaddr	= addr;
+-	phdr->p_paddr	= 0;
+-	phdr->p_filesz	= len;
+-	phdr->p_memsz	= len;
+-	phdr->p_align	= page_size;
+-
+-	if (!gelf_update_phdr(kcore->elf, idx, phdr))
++	GElf_Phdr phdr = {
++		.p_type		= PT_LOAD,
++		.p_flags	= PF_R | PF_W | PF_X,
++		.p_offset	= offset,
++		.p_vaddr	= addr,
++		.p_paddr	= 0,
++		.p_filesz	= len,
++		.p_memsz	= len,
++		.p_align	= page_size,
++	};
++
++	if (!gelf_update_phdr(kcore->elf, idx, &phdr))
+ 		return -1;
+ 
+ 	return 0;
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index 9ff4193dfa49..79db45336e3a 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -771,40 +771,14 @@ static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
+ 	return KVM_MMIO_BUS;
+ }
+ 
+-static int
+-kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
++static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
++				enum kvm_bus bus_idx,
++				struct kvm_ioeventfd *args)
+ {
+-	enum kvm_bus              bus_idx;
+-	struct _ioeventfd        *p;
+-	struct eventfd_ctx       *eventfd;
+-	int                       ret;
+-
+-	bus_idx = ioeventfd_bus_from_flags(args->flags);
+-	/* must be natural-word sized, or 0 to ignore length */
+-	switch (args->len) {
+-	case 0:
+-	case 1:
+-	case 2:
+-	case 4:
+-	case 8:
+-		break;
+-	default:
+-		return -EINVAL;
+-	}
+-
+-	/* check for range overflow */
+-	if (args->addr + args->len < args->addr)
+-		return -EINVAL;
+ 
+-	/* check for extra flags that we don't understand */
+-	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
+-		return -EINVAL;
+-
+-	/* ioeventfd with no length can't be combined with DATAMATCH */
+-	if (!args->len &&
+-	    args->flags & (KVM_IOEVENTFD_FLAG_PIO |
+-			   KVM_IOEVENTFD_FLAG_DATAMATCH))
+-		return -EINVAL;
++	struct eventfd_ctx *eventfd;
++	struct _ioeventfd *p;
++	int ret;
+ 
+ 	eventfd = eventfd_ctx_fdget(args->fd);
+ 	if (IS_ERR(eventfd))
+@@ -843,16 +817,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+ 	if (ret < 0)
+ 		goto unlock_fail;
+ 
+-	/* When length is ignored, MMIO is also put on a separate bus, for
+-	 * faster lookups.
+-	 */
+-	if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
+-		ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
+-					      p->addr, 0, &p->dev);
+-		if (ret < 0)
+-			goto register_fail;
+-	}
+-
+ 	kvm->buses[bus_idx]->ioeventfd_count++;
+ 	list_add_tail(&p->list, &kvm->ioeventfds);
+ 
+@@ -860,8 +824,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+ 
+ 	return 0;
+ 
+-register_fail:
+-	kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
+ unlock_fail:
+ 	mutex_unlock(&kvm->slots_lock);
+ 
+@@ -873,14 +835,13 @@ fail:
+ }
+ 
+ static int
+-kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
++kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
++			   struct kvm_ioeventfd *args)
+ {
+-	enum kvm_bus              bus_idx;
+ 	struct _ioeventfd        *p, *tmp;
+ 	struct eventfd_ctx       *eventfd;
+ 	int                       ret = -ENOENT;
+ 
+-	bus_idx = ioeventfd_bus_from_flags(args->flags);
+ 	eventfd = eventfd_ctx_fdget(args->fd);
+ 	if (IS_ERR(eventfd))
+ 		return PTR_ERR(eventfd);
+@@ -901,10 +862,6 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+ 			continue;
+ 
+ 		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
+-		if (!p->length) {
+-			kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
+-						  &p->dev);
+-		}
+ 		kvm->buses[bus_idx]->ioeventfd_count--;
+ 		ioeventfd_release(p);
+ 		ret = 0;
+@@ -918,6 +875,71 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+ 	return ret;
+ }
+ 
++static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
++{
++	enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
++	int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
++
++	if (!args->len && bus_idx == KVM_MMIO_BUS)
++		kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
++
++	return ret;
++}
++
++static int
++kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
++{
++	enum kvm_bus              bus_idx;
++	int ret;
++
++	bus_idx = ioeventfd_bus_from_flags(args->flags);
++	/* must be natural-word sized, or 0 to ignore length */
++	switch (args->len) {
++	case 0:
++	case 1:
++	case 2:
++	case 4:
++	case 8:
++		break;
++	default:
++		return -EINVAL;
++	}
++
++	/* check for range overflow */
++	if (args->addr + args->len < args->addr)
++		return -EINVAL;
++
++	/* check for extra flags that we don't understand */
++	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
++		return -EINVAL;
++
++	/* ioeventfd with no length can't be combined with DATAMATCH */
++	if (!args->len &&
++	    args->flags & (KVM_IOEVENTFD_FLAG_PIO |
++			   KVM_IOEVENTFD_FLAG_DATAMATCH))
++		return -EINVAL;
++
++	ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
++	if (ret)
++		goto fail;
++
++	/* When length is ignored, MMIO is also put on a separate bus, for
++	 * faster lookups.
++	 */
++	if (!args->len && bus_idx == KVM_MMIO_BUS) {
++		ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
++		if (ret < 0)
++			goto fast_fail;
++	}
++
++	return 0;
++
++fast_fail:
++	kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
++fail:
++	return ret;
++}
++
+ int
+ kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
+ {
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 90977418aeb6..85422985235f 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2935,10 +2935,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
+ static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
+ 				 const struct kvm_io_range *r2)
+ {
+-	if (r1->addr < r2->addr)
++	gpa_t addr1 = r1->addr;
++	gpa_t addr2 = r2->addr;
++
++	if (addr1 < addr2)
+ 		return -1;
+-	if (r1->addr + r1->len > r2->addr + r2->len)
++
++	/* If r2->len == 0, match the exact address.  If r2->len != 0,
++	 * accept any overlapping write.  Any order is acceptable for
++	 * overlapping ranges, because kvm_io_bus_get_first_dev ensures
++	 * we process all of them.
++	 */
++	if (r2->len) {
++		addr1 += r1->len;
++		addr2 += r2->len;
++	}
++
++	if (addr1 > addr2)
+ 		return 1;
++
+ 	return 0;
+ }
+ 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-10-03 16:07 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-10-03 16:07 UTC (permalink / raw
  To: gentoo-commits

commit:     516de23c184688cd1071c1c815fbb2e9827612ad
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Oct  3 16:07:16 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Oct  3 16:07:16 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=516de23c

Linux patch 4.1.10

 0000_README             |    4 +
 1009_linux-4.1.10.patch | 1353 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 1357 insertions(+)

diff --git a/0000_README b/0000_README
index 348e8f5..b9b941a 100644
--- a/0000_README
+++ b/0000_README
@@ -79,6 +79,10 @@ Patch:  1008_linux-4.1.9.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.9
 
+Patch:  1009_linux-4.1.10.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.10
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1009_linux-4.1.10.patch b/1009_linux-4.1.10.patch
new file mode 100644
index 0000000..8d80808
--- /dev/null
+++ b/1009_linux-4.1.10.patch
@@ -0,0 +1,1353 @@
+diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
+index 41b3f3f864e8..5d88f37480b6 100644
+--- a/Documentation/devicetree/bindings/net/ethernet.txt
++++ b/Documentation/devicetree/bindings/net/ethernet.txt
+@@ -25,7 +25,11 @@ The following properties are common to the Ethernet controllers:
+   flow control thresholds.
+ - tx-fifo-depth: the size of the controller's transmit fifo in bytes. This
+   is used for components that can have configurable fifo sizes.
++- managed: string, specifies the PHY management type. Supported values are:
++  "auto", "in-band-status". "auto" is the default, it usess MDIO for
++  management if fixed-link is not specified.
+ 
+ Child nodes of the Ethernet controller are typically the individual PHY devices
+ connected via the MDIO bus (sometimes the MDIO bus controller is separate).
+ They are described in the phy.txt file in this same directory.
++For non-MDIO PHY management see fixed-link.txt.
+diff --git a/Makefile b/Makefile
+index e071176b2ce6..d02f16b510dc 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 9
++SUBLEVEL = 10
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
+index f1ff39a3d1c1..54d946a9eee6 100644
+--- a/drivers/block/zram/zcomp.c
++++ b/drivers/block/zram/zcomp.c
+@@ -325,12 +325,14 @@ void zcomp_destroy(struct zcomp *comp)
+  * allocate new zcomp and initialize it. return compressing
+  * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
+  * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
+- * case of allocation error.
++ * case of allocation error, or any other error potentially
++ * returned by functions zcomp_strm_{multi,single}_create.
+  */
+ struct zcomp *zcomp_create(const char *compress, int max_strm)
+ {
+ 	struct zcomp *comp;
+ 	struct zcomp_backend *backend;
++	int error;
+ 
+ 	backend = find_backend(compress);
+ 	if (!backend)
+@@ -342,12 +344,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm)
+ 
+ 	comp->backend = backend;
+ 	if (max_strm > 1)
+-		zcomp_strm_multi_create(comp, max_strm);
++		error = zcomp_strm_multi_create(comp, max_strm);
+ 	else
+-		zcomp_strm_single_create(comp);
+-	if (!comp->stream) {
++		error = zcomp_strm_single_create(comp);
++	if (error) {
+ 		kfree(comp);
+-		return ERR_PTR(-ENOMEM);
++		return ERR_PTR(error);
+ 	}
+ 	return comp;
+ }
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index cedb572bf25a..db9ebbc1a732 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -417,7 +417,7 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch  *ds, int port)
+ 	core_writel(priv, port, CORE_FAST_AGE_PORT);
+ 
+ 	reg = core_readl(priv, CORE_FAST_AGE_CTRL);
+-	reg |= EN_AGE_PORT | FAST_AGE_STR_DONE;
++	reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
+ 	core_writel(priv, reg, CORE_FAST_AGE_CTRL);
+ 
+ 	do {
+@@ -431,6 +431,8 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch  *ds, int port)
+ 	if (!timeout)
+ 		return -ETIMEDOUT;
+ 
++	core_writel(priv, 0, CORE_FAST_AGE_CTRL);
++
+ 	return 0;
+ }
+ 
+@@ -506,7 +508,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
+ 	u32 reg;
+ 
+ 	reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+-	cur_hw_state = reg >> G_MISTP_STATE_SHIFT;
++	cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
+ 
+ 	switch (state) {
+ 	case BR_STATE_DISABLED:
+@@ -530,10 +532,12 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
+ 	}
+ 
+ 	/* Fast-age ARL entries if we are moving a port from Learning or
+-	 * Forwarding state to Disabled, Blocking or Listening state
++	 * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
++	 * state (hw_state)
+ 	 */
+ 	if (cur_hw_state != hw_state) {
+-		if (cur_hw_state & 4 && !(hw_state & 4)) {
++		if (cur_hw_state >= G_MISTP_LEARN_STATE &&
++		    hw_state <= G_MISTP_LISTEN_STATE) {
+ 			ret = bcm_sf2_sw_fast_age_port(ds, port);
+ 			if (ret) {
+ 				pr_err("%s: fast-ageing failed\n", __func__);
+@@ -889,15 +893,11 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
+ 					 struct fixed_phy_status *status)
+ {
+ 	struct bcm_sf2_priv *priv = ds_to_priv(ds);
+-	u32 duplex, pause, speed;
++	u32 duplex, pause;
+ 	u32 reg;
+ 
+ 	duplex = core_readl(priv, CORE_DUPSTS);
+ 	pause = core_readl(priv, CORE_PAUSESTS);
+-	speed = core_readl(priv, CORE_SPDSTS);
+-
+-	speed >>= (port * SPDSTS_SHIFT);
+-	speed &= SPDSTS_MASK;
+ 
+ 	status->link = 0;
+ 
+@@ -925,18 +925,6 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
+ 		reg &= ~LINK_STS;
+ 	core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+ 
+-	switch (speed) {
+-	case SPDSTS_10:
+-		status->speed = SPEED_10;
+-		break;
+-	case SPDSTS_100:
+-		status->speed = SPEED_100;
+-		break;
+-	case SPDSTS_1000:
+-		status->speed = SPEED_1000;
+-		break;
+-	}
+-
+ 	if ((pause & (1 << port)) &&
+ 	    (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
+ 		status->asym_pause = 1;
+diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
+index 22e2ebf31333..789d7b7737da 100644
+--- a/drivers/net/dsa/bcm_sf2.h
++++ b/drivers/net/dsa/bcm_sf2.h
+@@ -112,8 +112,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off)	\
+ 	spin_unlock(&priv->indir_lock);					\
+ 	return (u64)indir << 32 | dir;					\
+ }									\
+-static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off,	\
+-							u64 val)	\
++static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val,	\
++							u32 off)	\
+ {									\
+ 	spin_lock(&priv->indir_lock);					\
+ 	reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE);	\
+diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
+index da48e66377b5..8207877d6237 100644
+--- a/drivers/net/ethernet/altera/altera_tse_main.c
++++ b/drivers/net/ethernet/altera/altera_tse_main.c
+@@ -511,8 +511,7 @@ static int tse_poll(struct napi_struct *napi, int budget)
+ 
+ 	if (rxcomplete < budget) {
+ 
+-		napi_gro_flush(napi, false);
+-		__napi_complete(napi);
++		napi_complete(napi);
+ 
+ 		netdev_dbg(priv->dev,
+ 			   "NAPI Complete, did %d packets with budget %d\n",
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 66d47e448e4d..570390b5cd42 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -1396,6 +1396,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+ 		if ((status & BD_ENET_RX_LAST) == 0)
+ 			netdev_err(ndev, "rcv is not +last\n");
+ 
++		writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+ 
+ 		/* Check for errors. */
+ 		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 74d0389bf233..4d608f0117cd 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3029,8 +3029,8 @@ static int mvneta_probe(struct platform_device *pdev)
+ 	const char *dt_mac_addr;
+ 	char hw_mac_addr[ETH_ALEN];
+ 	const char *mac_from;
++	const char *managed;
+ 	int phy_mode;
+-	int fixed_phy = 0;
+ 	int err;
+ 
+ 	/* Our multiqueue support is not complete, so for now, only
+@@ -3064,7 +3064,6 @@ static int mvneta_probe(struct platform_device *pdev)
+ 			dev_err(&pdev->dev, "cannot register fixed PHY\n");
+ 			goto err_free_irq;
+ 		}
+-		fixed_phy = 1;
+ 
+ 		/* In the case of a fixed PHY, the DT node associated
+ 		 * to the PHY is the Ethernet MAC DT node.
+@@ -3088,8 +3087,10 @@ static int mvneta_probe(struct platform_device *pdev)
+ 	pp = netdev_priv(dev);
+ 	pp->phy_node = phy_node;
+ 	pp->phy_interface = phy_mode;
+-	pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
+-				fixed_phy;
++
++	err = of_property_read_string(dn, "managed", &managed);
++	pp->use_inband_status = (err == 0 &&
++				 strcmp(managed, "in-band-status") == 0);
+ 
+ 	pp->clk = devm_clk_get(&pdev->dev, NULL);
+ 	if (IS_ERR(pp->clk)) {
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index eab4e080ebd2..80aac20104de 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -1256,8 +1256,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
+ 		rss_context->hash_fn = MLX4_RSS_HASH_TOP;
+ 		memcpy(rss_context->rss_key, priv->rss_key,
+ 		       MLX4_EN_RSS_KEY_SIZE);
+-		netdev_rss_key_fill(rss_context->rss_key,
+-				    MLX4_EN_RSS_KEY_SIZE);
+ 	} else {
+ 		en_err(priv, "Unknown RSS hash function requested\n");
+ 		err = -EINVAL;
+diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
+index 8c350c5d54ad..58858c5589db 100644
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -1054,10 +1054,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
+ 		return 0;
+ 
+ 	case TUNSETSNDBUF:
+-		if (get_user(u, up))
++		if (get_user(s, sp))
+ 			return -EFAULT;
+ 
+-		q->sk.sk_sndbuf = u;
++		q->sk.sk_sndbuf = s;
+ 		return 0;
+ 
+ 	case TUNGETVNETHDRSZ:
+diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
+index 1960b46add65..479b93f9581c 100644
+--- a/drivers/net/phy/fixed_phy.c
++++ b/drivers/net/phy/fixed_phy.c
+@@ -52,6 +52,10 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
+ 	u16 lpagb = 0;
+ 	u16 lpa = 0;
+ 
++	if (!fp->status.link)
++		goto done;
++	bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
++
+ 	if (fp->status.duplex) {
+ 		bmcr |= BMCR_FULLDPLX;
+ 
+@@ -96,15 +100,13 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
+ 		}
+ 	}
+ 
+-	if (fp->status.link)
+-		bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
+-
+ 	if (fp->status.pause)
+ 		lpa |= LPA_PAUSE_CAP;
+ 
+ 	if (fp->status.asym_pause)
+ 		lpa |= LPA_PAUSE_ASYM;
+ 
++done:
+ 	fp->regs[MII_PHYSID1] = 0;
+ 	fp->regs[MII_PHYSID2] = 0;
+ 
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 3c86b107275a..e0498571ae26 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -778,7 +778,7 @@ int usbnet_stop (struct net_device *net)
+ {
+ 	struct usbnet		*dev = netdev_priv(net);
+ 	struct driver_info	*info = dev->driver_info;
+-	int			retval, pm;
++	int			retval, pm, mpn;
+ 
+ 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
+ 	netif_stop_queue (net);
+@@ -809,6 +809,8 @@ int usbnet_stop (struct net_device *net)
+ 
+ 	usbnet_purge_paused_rxq(dev);
+ 
++	mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
++
+ 	/* deferred work (task, timer, softirq) must also stop.
+ 	 * can't flush_scheduled_work() until we drop rtnl (later),
+ 	 * else workers could deadlock; so make workers a NOP.
+@@ -819,8 +821,7 @@ int usbnet_stop (struct net_device *net)
+ 	if (!pm)
+ 		usb_autopm_put_interface(dev->intf);
+ 
+-	if (info->manage_power &&
+-	    !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
++	if (info->manage_power && mpn)
+ 		info->manage_power(dev, 0);
+ 	else
+ 		usb_autopm_put_interface(dev->intf);
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 21a0fbf1ed94..0085b8df83e2 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -2212,6 +2212,8 @@ static int vxlan_open(struct net_device *dev)
+ 
+ 	if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
+ 		ret = vxlan_igmp_join(vxlan);
++		if (ret == -EADDRINUSE)
++			ret = 0;
+ 		if (ret) {
+ 			vxlan_sock_release(vs);
+ 			return ret;
+diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
+index 0c064485d1c2..bec8ec2b31f6 100644
+--- a/drivers/of/of_mdio.c
++++ b/drivers/of/of_mdio.c
+@@ -263,7 +263,8 @@ EXPORT_SYMBOL(of_phy_attach);
+ bool of_phy_is_fixed_link(struct device_node *np)
+ {
+ 	struct device_node *dn;
+-	int len;
++	int len, err;
++	const char *managed;
+ 
+ 	/* New binding */
+ 	dn = of_get_child_by_name(np, "fixed-link");
+@@ -272,6 +273,10 @@ bool of_phy_is_fixed_link(struct device_node *np)
+ 		return true;
+ 	}
+ 
++	err = of_property_read_string(np, "managed", &managed);
++	if (err == 0 && strcmp(managed, "auto") != 0)
++		return true;
++
+ 	/* Old binding */
+ 	if (of_get_property(np, "fixed-link", &len) &&
+ 	    len == (5 * sizeof(__be32)))
+@@ -286,8 +291,18 @@ int of_phy_register_fixed_link(struct device_node *np)
+ 	struct fixed_phy_status status = {};
+ 	struct device_node *fixed_link_node;
+ 	const __be32 *fixed_link_prop;
+-	int len;
++	int len, err;
+ 	struct phy_device *phy;
++	const char *managed;
++
++	err = of_property_read_string(np, "managed", &managed);
++	if (err == 0) {
++		if (strcmp(managed, "in-band-status") == 0) {
++			/* status is zeroed, namely its .link member */
++			phy = fixed_phy_register(PHY_POLL, &status, np);
++			return IS_ERR(phy) ? PTR_ERR(phy) : 0;
++		}
++	}
+ 
+ 	/* New binding */
+ 	fixed_link_node = of_get_child_by_name(np, "fixed-link");
+diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
+index 06697315a088..fb4dd7b3ee71 100644
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -54,8 +54,9 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+ #define HPWMI_HARDWARE_QUERY 0x4
+ #define HPWMI_WIRELESS_QUERY 0x5
+ #define HPWMI_BIOS_QUERY 0x9
++#define HPWMI_FEATURE_QUERY 0xb
+ #define HPWMI_HOTKEY_QUERY 0xc
+-#define HPWMI_FEATURE_QUERY 0xd
++#define HPWMI_FEATURE2_QUERY 0xd
+ #define HPWMI_WIRELESS2_QUERY 0x1b
+ #define HPWMI_POSTCODEERROR_QUERY 0x2a
+ 
+@@ -295,25 +296,33 @@ static int hp_wmi_tablet_state(void)
+ 	return (state & 0x4) ? 1 : 0;
+ }
+ 
+-static int __init hp_wmi_bios_2009_later(void)
++static int __init hp_wmi_bios_2008_later(void)
+ {
+ 	int state = 0;
+ 	int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state,
+ 				       sizeof(state), sizeof(state));
+-	if (ret)
+-		return ret;
++	if (!ret)
++		return 1;
+ 
+-	return (state & 0x10) ? 1 : 0;
++	return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
+ }
+ 
+-static int hp_wmi_enable_hotkeys(void)
++static int __init hp_wmi_bios_2009_later(void)
+ {
+-	int ret;
+-	int query = 0x6e;
++	int state = 0;
++	int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state,
++				       sizeof(state), sizeof(state));
++	if (!ret)
++		return 1;
+ 
+-	ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
+-				   0);
++	return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
++}
+ 
++static int __init hp_wmi_enable_hotkeys(void)
++{
++	int value = 0x6e;
++	int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
++				       sizeof(value), 0);
+ 	if (ret)
+ 		return -EINVAL;
+ 	return 0;
+@@ -663,7 +672,7 @@ static int __init hp_wmi_input_setup(void)
+ 			    hp_wmi_tablet_state());
+ 	input_sync(hp_wmi_input_dev);
+ 
+-	if (hp_wmi_bios_2009_later() == 4)
++	if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
+ 		hp_wmi_enable_hotkeys();
+ 
+ 	status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index ff667e18b2d6..9ba383f5b0c4 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -980,7 +980,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
+ 
+ 	ih = igmpv3_report_hdr(skb);
+ 	num = ntohs(ih->ngrec);
+-	len = sizeof(*ih);
++	len = skb_transport_offset(skb) + sizeof(*ih);
+ 
+ 	for (i = 0; i < num; i++) {
+ 		len += sizeof(*grec);
+@@ -1035,7 +1035,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
+ 
+ 	icmp6h = icmp6_hdr(skb);
+ 	num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
+-	len = sizeof(*icmp6h);
++	len = skb_transport_offset(skb) + sizeof(*icmp6h);
+ 
+ 	for (i = 0; i < num; i++) {
+ 		__be16 *nsrcs, _nsrcs;
+diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
+index 9a12668f7d62..0ad144fb0c79 100644
+--- a/net/core/fib_rules.c
++++ b/net/core/fib_rules.c
+@@ -615,15 +615,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
+ {
+ 	int idx = 0;
+ 	struct fib_rule *rule;
++	int err = 0;
+ 
+ 	rcu_read_lock();
+ 	list_for_each_entry_rcu(rule, &ops->rules_list, list) {
+ 		if (idx < cb->args[1])
+ 			goto skip;
+ 
+-		if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
+-				     cb->nlh->nlmsg_seq, RTM_NEWRULE,
+-				     NLM_F_MULTI, ops) < 0)
++		err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
++				       cb->nlh->nlmsg_seq, RTM_NEWRULE,
++				       NLM_F_MULTI, ops);
++		if (err)
+ 			break;
+ skip:
+ 		idx++;
+@@ -632,7 +634,7 @@ skip:
+ 	cb->args[1] = idx;
+ 	rules_ops_put(ops);
+ 
+-	return skb->len;
++	return err;
+ }
+ 
+ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
+@@ -648,7 +650,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
+ 		if (ops == NULL)
+ 			return -EAFNOSUPPORT;
+ 
+-		return dump_rules(skb, cb, ops);
++		dump_rules(skb, cb, ops);
++
++		return skb->len;
+ 	}
+ 
+ 	rcu_read_lock();
+diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
+index 74dddf84adcd..556ecf96a385 100644
+--- a/net/core/sock_diag.c
++++ b/net/core/sock_diag.c
+@@ -86,6 +86,9 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
+ 		goto out;
+ 
+ 	fprog = filter->prog->orig_prog;
++	if (!fprog)
++		goto out;
++
+ 	flen = bpf_classic_proglen(fprog);
+ 
+ 	attr = nla_reserve(skb, attrtype, flen);
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index a369e8a70b2c..986440b24978 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2893,6 +2893,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
+ 	skb_reserve(skb, MAX_TCP_HEADER);
+ 	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
+ 			     TCPHDR_ACK | TCPHDR_RST);
++	skb_mstamp_get(&skb->skb_mstamp);
+ 	/* Send it off. */
+ 	if (tcp_transmit_skb(sk, skb, 0, priority))
+ 		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
+diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
+index 447a7fbd1bb6..f5e2ba1c18bf 100644
+--- a/net/ipv6/exthdrs_offload.c
++++ b/net/ipv6/exthdrs_offload.c
+@@ -36,6 +36,6 @@ out:
+ 	return ret;
+ 
+ out_rt:
+-	inet_del_offload(&rthdr_offload, IPPROTO_ROUTING);
++	inet6_del_offload(&rthdr_offload, IPPROTO_ROUTING);
+ 	goto out;
+ }
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index a38d3ac0f18f..69f4f689f06a 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -361,6 +361,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
+ 	struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
+ 
+ 	ip6gre_tunnel_unlink(ign, t);
++	ip6_tnl_dst_reset(t);
+ 	dev_put(dev);
+ }
+ 
+diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
+index 74ceb73c1c9a..5f36266b1f5e 100644
+--- a/net/ipv6/ip6mr.c
++++ b/net/ipv6/ip6mr.c
+@@ -550,7 +550,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
+ 
+ 	if (it->cache == &mrt->mfc6_unres_queue)
+ 		spin_unlock_bh(&mfc_unres_lock);
+-	else if (it->cache == mrt->mfc6_cache_array)
++	else if (it->cache == &mrt->mfc6_cache_array[it->ct])
+ 		read_unlock(&mrt_lock);
+ }
+ 
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index c73ae5039e46..f371fefa7fdc 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1515,7 +1515,7 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
+ 	return -EINVAL;
+ }
+ 
+-int ip6_route_add(struct fib6_config *cfg)
++int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
+ {
+ 	int err;
+ 	struct net *net = cfg->fc_nlinfo.nl_net;
+@@ -1523,7 +1523,6 @@ int ip6_route_add(struct fib6_config *cfg)
+ 	struct net_device *dev = NULL;
+ 	struct inet6_dev *idev = NULL;
+ 	struct fib6_table *table;
+-	struct mx6_config mxc = { .mx = NULL, };
+ 	int addr_type;
+ 
+ 	if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
+@@ -1719,6 +1718,32 @@ install_route:
+ 
+ 	cfg->fc_nlinfo.nl_net = dev_net(dev);
+ 
++	*rt_ret = rt;
++
++	return 0;
++out:
++	if (dev)
++		dev_put(dev);
++	if (idev)
++		in6_dev_put(idev);
++	if (rt)
++		dst_free(&rt->dst);
++
++	*rt_ret = NULL;
++
++	return err;
++}
++
++int ip6_route_add(struct fib6_config *cfg)
++{
++	struct mx6_config mxc = { .mx = NULL, };
++	struct rt6_info *rt = NULL;
++	int err;
++
++	err = ip6_route_info_create(cfg, &rt);
++	if (err)
++		goto out;
++
+ 	err = ip6_convert_metrics(&mxc, cfg);
+ 	if (err)
+ 		goto out;
+@@ -1726,14 +1751,12 @@ install_route:
+ 	err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
+ 
+ 	kfree(mxc.mx);
++
+ 	return err;
+ out:
+-	if (dev)
+-		dev_put(dev);
+-	if (idev)
+-		in6_dev_put(idev);
+ 	if (rt)
+ 		dst_free(&rt->dst);
++
+ 	return err;
+ }
+ 
+@@ -2496,19 +2519,78 @@ errout:
+ 	return err;
+ }
+ 
+-static int ip6_route_multipath(struct fib6_config *cfg, int add)
++struct rt6_nh {
++	struct rt6_info *rt6_info;
++	struct fib6_config r_cfg;
++	struct mx6_config mxc;
++	struct list_head next;
++};
++
++static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
++{
++	struct rt6_nh *nh;
++
++	list_for_each_entry(nh, rt6_nh_list, next) {
++		pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
++		        &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
++		        nh->r_cfg.fc_ifindex);
++	}
++}
++
++static int ip6_route_info_append(struct list_head *rt6_nh_list,
++				 struct rt6_info *rt, struct fib6_config *r_cfg)
++{
++	struct rt6_nh *nh;
++	struct rt6_info *rtnh;
++	int err = -EEXIST;
++
++	list_for_each_entry(nh, rt6_nh_list, next) {
++		/* check if rt6_info already exists */
++		rtnh = nh->rt6_info;
++
++		if (rtnh->dst.dev == rt->dst.dev &&
++		    rtnh->rt6i_idev == rt->rt6i_idev &&
++		    ipv6_addr_equal(&rtnh->rt6i_gateway,
++				    &rt->rt6i_gateway))
++			return err;
++	}
++
++	nh = kzalloc(sizeof(*nh), GFP_KERNEL);
++	if (!nh)
++		return -ENOMEM;
++	nh->rt6_info = rt;
++	err = ip6_convert_metrics(&nh->mxc, r_cfg);
++	if (err) {
++		kfree(nh);
++		return err;
++	}
++	memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
++	list_add_tail(&nh->next, rt6_nh_list);
++
++	return 0;
++}
++
++static int ip6_route_multipath_add(struct fib6_config *cfg)
+ {
+ 	struct fib6_config r_cfg;
+ 	struct rtnexthop *rtnh;
++	struct rt6_info *rt;
++	struct rt6_nh *err_nh;
++	struct rt6_nh *nh, *nh_safe;
+ 	int remaining;
+ 	int attrlen;
+-	int err = 0, last_err = 0;
++	int err = 1;
++	int nhn = 0;
++	int replace = (cfg->fc_nlinfo.nlh &&
++		       (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
++	LIST_HEAD(rt6_nh_list);
+ 
+ 	remaining = cfg->fc_mp_len;
+-beginning:
+ 	rtnh = (struct rtnexthop *)cfg->fc_mp;
+ 
+-	/* Parse a Multipath Entry */
++	/* Parse a Multipath Entry and build a list (rt6_nh_list) of
++	 * rt6_info structs per nexthop
++	 */
+ 	while (rtnh_ok(rtnh, remaining)) {
+ 		memcpy(&r_cfg, cfg, sizeof(*cfg));
+ 		if (rtnh->rtnh_ifindex)
+@@ -2524,22 +2606,32 @@ beginning:
+ 				r_cfg.fc_flags |= RTF_GATEWAY;
+ 			}
+ 		}
+-		err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
++
++		err = ip6_route_info_create(&r_cfg, &rt);
++		if (err)
++			goto cleanup;
++
++		err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
+ 		if (err) {
+-			last_err = err;
+-			/* If we are trying to remove a route, do not stop the
+-			 * loop when ip6_route_del() fails (because next hop is
+-			 * already gone), we should try to remove all next hops.
+-			 */
+-			if (add) {
+-				/* If add fails, we should try to delete all
+-				 * next hops that have been already added.
+-				 */
+-				add = 0;
+-				remaining = cfg->fc_mp_len - remaining;
+-				goto beginning;
+-			}
++			dst_free(&rt->dst);
++			goto cleanup;
++		}
++
++		rtnh = rtnh_next(rtnh, &remaining);
++	}
++
++	err_nh = NULL;
++	list_for_each_entry(nh, &rt6_nh_list, next) {
++		err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
++		/* nh->rt6_info is used or freed at this point, reset to NULL*/
++		nh->rt6_info = NULL;
++		if (err) {
++			if (replace && nhn)
++				ip6_print_replace_route_err(&rt6_nh_list);
++			err_nh = nh;
++			goto add_errout;
+ 		}
++
+ 		/* Because each route is added like a single route we remove
+ 		 * these flags after the first nexthop: if there is a collision,
+ 		 * we have already failed to add the first nexthop:
+@@ -2549,6 +2641,63 @@ beginning:
+ 		 */
+ 		cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+ 						     NLM_F_REPLACE);
++		nhn++;
++	}
++
++	goto cleanup;
++
++add_errout:
++	/* Delete routes that were already added */
++	list_for_each_entry(nh, &rt6_nh_list, next) {
++		if (err_nh == nh)
++			break;
++		ip6_route_del(&nh->r_cfg);
++	}
++
++cleanup:
++	list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
++		if (nh->rt6_info)
++			dst_free(&nh->rt6_info->dst);
++		if (nh->mxc.mx)
++			kfree(nh->mxc.mx);
++		list_del(&nh->next);
++		kfree(nh);
++	}
++
++	return err;
++}
++
++static int ip6_route_multipath_del(struct fib6_config *cfg)
++{
++	struct fib6_config r_cfg;
++	struct rtnexthop *rtnh;
++	int remaining;
++	int attrlen;
++	int err = 1, last_err = 0;
++
++	remaining = cfg->fc_mp_len;
++	rtnh = (struct rtnexthop *)cfg->fc_mp;
++
++	/* Parse a Multipath Entry */
++	while (rtnh_ok(rtnh, remaining)) {
++		memcpy(&r_cfg, cfg, sizeof(*cfg));
++		if (rtnh->rtnh_ifindex)
++			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
++
++		attrlen = rtnh_attrlen(rtnh);
++		if (attrlen > 0) {
++			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
++
++			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
++			if (nla) {
++				nla_memcpy(&r_cfg.fc_gateway, nla, 16);
++				r_cfg.fc_flags |= RTF_GATEWAY;
++			}
++		}
++		err = ip6_route_del(&r_cfg);
++		if (err)
++			last_err = err;
++
+ 		rtnh = rtnh_next(rtnh, &remaining);
+ 	}
+ 
+@@ -2565,7 +2714,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 		return err;
+ 
+ 	if (cfg.fc_mp)
+-		return ip6_route_multipath(&cfg, 0);
++		return ip6_route_multipath_del(&cfg);
+ 	else
+ 		return ip6_route_del(&cfg);
+ }
+@@ -2580,7 +2729,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
+ 		return err;
+ 
+ 	if (cfg.fc_mp)
+-		return ip6_route_multipath(&cfg, 1);
++		return ip6_route_multipath_add(&cfg);
+ 	else
+ 		return ip6_route_add(&cfg);
+ }
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index 4856d975492d..980121e75d2e 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -123,6 +123,24 @@ static inline u32 netlink_group_mask(u32 group)
+ 	return group ? 1 << (group - 1) : 0;
+ }
+ 
++static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
++					   gfp_t gfp_mask)
++{
++	unsigned int len = skb_end_offset(skb);
++	struct sk_buff *new;
++
++	new = alloc_skb(len, gfp_mask);
++	if (new == NULL)
++		return NULL;
++
++	NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
++	NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
++	NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
++
++	memcpy(skb_put(new, len), skb->data, len);
++	return new;
++}
++
+ int netlink_add_tap(struct netlink_tap *nt)
+ {
+ 	if (unlikely(nt->dev->type != ARPHRD_NETLINK))
+@@ -204,7 +222,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
+ 	int ret = -ENOMEM;
+ 
+ 	dev_hold(dev);
+-	nskb = skb_clone(skb, GFP_ATOMIC);
++
++	if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
++		nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
++	else
++		nskb = skb_clone(skb, GFP_ATOMIC);
+ 	if (nskb) {
+ 		nskb->dev = dev;
+ 		nskb->protocol = htons((u16) sk->sk_protocol);
+@@ -276,11 +298,6 @@ static void netlink_rcv_wake(struct sock *sk)
+ }
+ 
+ #ifdef CONFIG_NETLINK_MMAP
+-static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
+-{
+-	return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
+-}
+-
+ static bool netlink_rx_is_mmaped(struct sock *sk)
+ {
+ 	return nlk_sk(sk)->rx_ring.pg_vec != NULL;
+@@ -832,7 +849,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
+ }
+ 
+ #else /* CONFIG_NETLINK_MMAP */
+-#define netlink_skb_is_mmaped(skb)	false
+ #define netlink_rx_is_mmaped(sk)	false
+ #define netlink_tx_is_mmaped(sk)	false
+ #define netlink_mmap			sock_no_mmap
+@@ -1080,8 +1096,8 @@ static int netlink_insert(struct sock *sk, u32 portid)
+ 
+ 	lock_sock(sk);
+ 
+-	err = -EBUSY;
+-	if (nlk_sk(sk)->portid)
++	err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
++	if (nlk_sk(sk)->bound)
+ 		goto err;
+ 
+ 	err = -ENOMEM;
+@@ -1101,10 +1117,13 @@ static int netlink_insert(struct sock *sk, u32 portid)
+ 			err = -EOVERFLOW;
+ 		if (err == -EEXIST)
+ 			err = -EADDRINUSE;
+-		nlk_sk(sk)->portid = 0;
+ 		sock_put(sk);
+ 	}
+ 
++	/* We need to ensure that the socket is hashed and visible. */
++	smp_wmb();
++	nlk_sk(sk)->bound = portid;
++
+ err:
+ 	release_sock(sk);
+ 	return err;
+@@ -1484,6 +1503,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
+ 	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
+ 	int err;
+ 	long unsigned int groups = nladdr->nl_groups;
++	bool bound;
+ 
+ 	if (addr_len < sizeof(struct sockaddr_nl))
+ 		return -EINVAL;
+@@ -1500,9 +1520,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
+ 			return err;
+ 	}
+ 
+-	if (nlk->portid)
++	bound = nlk->bound;
++	if (bound) {
++		/* Ensure nlk->portid is up-to-date. */
++		smp_rmb();
++
+ 		if (nladdr->nl_pid != nlk->portid)
+ 			return -EINVAL;
++	}
+ 
+ 	if (nlk->netlink_bind && groups) {
+ 		int group;
+@@ -1518,7 +1543,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
+ 		}
+ 	}
+ 
+-	if (!nlk->portid) {
++	/* No need for barriers here as we return to user-space without
++	 * using any of the bound attributes.
++	 */
++	if (!bound) {
+ 		err = nladdr->nl_pid ?
+ 			netlink_insert(sk, nladdr->nl_pid) :
+ 			netlink_autobind(sock);
+@@ -1566,7 +1594,10 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
+ 	    !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
+ 		return -EPERM;
+ 
+-	if (!nlk->portid)
++	/* No need for barriers here as we return to user-space without
++	 * using any of the bound attributes.
++	 */
++	if (!nlk->bound)
+ 		err = netlink_autobind(sock);
+ 
+ 	if (err == 0) {
+@@ -2323,10 +2354,13 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ 		dst_group = nlk->dst_group;
+ 	}
+ 
+-	if (!nlk->portid) {
++	if (!nlk->bound) {
+ 		err = netlink_autobind(sock);
+ 		if (err)
+ 			goto out;
++	} else {
++		/* Ensure nlk is hashed and visible. */
++		smp_rmb();
+ 	}
+ 
+ 	/* It's a really convoluted way for userland to ask for mmaped
+diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
+index 89008405d6b4..14437d9b1965 100644
+--- a/net/netlink/af_netlink.h
++++ b/net/netlink/af_netlink.h
+@@ -35,6 +35,7 @@ struct netlink_sock {
+ 	unsigned long		state;
+ 	size_t			max_recvmsg_len;
+ 	wait_queue_head_t	wait;
++	bool			bound;
+ 	bool			cb_running;
+ 	struct netlink_callback	cb;
+ 	struct mutex		*cb_mutex;
+@@ -59,6 +60,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
+ 	return container_of(sk, struct netlink_sock, sk);
+ }
+ 
++static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
++{
++#ifdef CONFIG_NETLINK_MMAP
++	return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
++#else
++	return false;
++#endif /* CONFIG_NETLINK_MMAP */
++}
++
+ struct netlink_table {
+ 	struct rhashtable	hash;
+ 	struct hlist_head	mc_list;
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 096c6276e6b9..27e14962b504 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -906,7 +906,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ 	if (error)
+ 		goto err_kfree_flow;
+ 
+-	ovs_flow_mask_key(&new_flow->key, &key, &mask);
++	ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
+ 
+ 	/* Extract flow identifier. */
+ 	error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
+@@ -1033,7 +1033,7 @@ static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
+ 	struct sw_flow_key masked_key;
+ 	int error;
+ 
+-	ovs_flow_mask_key(&masked_key, key, mask);
++	ovs_flow_mask_key(&masked_key, key, true, mask);
+ 	error = ovs_nla_copy_actions(a, &masked_key, &acts, log);
+ 	if (error) {
+ 		OVS_NLERR(log,
+diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
+index 4613df8c8290..aa349514e4cb 100644
+--- a/net/openvswitch/flow_table.c
++++ b/net/openvswitch/flow_table.c
+@@ -56,20 +56,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range)
+ }
+ 
+ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
+-		       const struct sw_flow_mask *mask)
++		       bool full, const struct sw_flow_mask *mask)
+ {
+-	const long *m = (const long *)((const u8 *)&mask->key +
+-				mask->range.start);
+-	const long *s = (const long *)((const u8 *)src +
+-				mask->range.start);
+-	long *d = (long *)((u8 *)dst + mask->range.start);
++	int start = full ? 0 : mask->range.start;
++	int len = full ? sizeof *dst : range_n_bytes(&mask->range);
++	const long *m = (const long *)((const u8 *)&mask->key + start);
++	const long *s = (const long *)((const u8 *)src + start);
++	long *d = (long *)((u8 *)dst + start);
+ 	int i;
+ 
+-	/* The memory outside of the 'mask->range' are not set since
+-	 * further operations on 'dst' only uses contents within
+-	 * 'mask->range'.
++	/* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
++	 * if 'full' is false the memory outside of the 'mask->range' is left
++	 * uninitialized. This can be used as an optimization when further
++	 * operations on 'dst' only use contents within 'mask->range'.
+ 	 */
+-	for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
++	for (i = 0; i < len; i += sizeof(long))
+ 		*d++ = *s++ & *m++;
+ }
+ 
+@@ -473,7 +474,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
+ 	u32 hash;
+ 	struct sw_flow_key masked_key;
+ 
+-	ovs_flow_mask_key(&masked_key, unmasked, mask);
++	ovs_flow_mask_key(&masked_key, unmasked, false, mask);
+ 	hash = flow_hash(&masked_key, &mask->range);
+ 	head = find_bucket(ti, hash);
+ 	hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
+diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
+index 616eda10d955..2dd9900f533d 100644
+--- a/net/openvswitch/flow_table.h
++++ b/net/openvswitch/flow_table.h
+@@ -86,5 +86,5 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *,
+ bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *);
+ 
+ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
+-		       const struct sw_flow_mask *mask);
++		       bool full, const struct sw_flow_mask *mask);
+ #endif /* flow_table.h */
+diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
+index 715e01e5910a..f23a3b68bba6 100644
+--- a/net/sched/cls_fw.c
++++ b/net/sched/cls_fw.c
+@@ -33,7 +33,6 @@
+ 
+ struct fw_head {
+ 	u32			mask;
+-	bool			mask_set;
+ 	struct fw_filter __rcu	*ht[HTSIZE];
+ 	struct rcu_head		rcu;
+ };
+@@ -84,7 +83,7 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ 			}
+ 		}
+ 	} else {
+-		/* old method */
++		/* Old method: classify the packet using its skb mark. */
+ 		if (id && (TC_H_MAJ(id) == 0 ||
+ 			   !(TC_H_MAJ(id ^ tp->q->handle)))) {
+ 			res->classid = id;
+@@ -114,14 +113,9 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
+ 
+ static int fw_init(struct tcf_proto *tp)
+ {
+-	struct fw_head *head;
+-
+-	head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
+-	if (head == NULL)
+-		return -ENOBUFS;
+-
+-	head->mask_set = false;
+-	rcu_assign_pointer(tp->root, head);
++	/* We don't allocate fw_head here, because in the old method
++	 * we don't need it at all.
++	 */
+ 	return 0;
+ }
+ 
+@@ -252,7 +246,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
+ 	int err;
+ 
+ 	if (!opt)
+-		return handle ? -EINVAL : 0;
++		return handle ? -EINVAL : 0; /* Succeed if it is old method. */
+ 
+ 	err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy);
+ 	if (err < 0)
+@@ -302,11 +296,17 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
+ 	if (!handle)
+ 		return -EINVAL;
+ 
+-	if (!head->mask_set) {
+-		head->mask = 0xFFFFFFFF;
++	if (!head) {
++		u32 mask = 0xFFFFFFFF;
+ 		if (tb[TCA_FW_MASK])
+-			head->mask = nla_get_u32(tb[TCA_FW_MASK]);
+-		head->mask_set = true;
++			mask = nla_get_u32(tb[TCA_FW_MASK]);
++
++		head = kzalloc(sizeof(*head), GFP_KERNEL);
++		if (!head)
++			return -ENOBUFS;
++		head->mask = mask;
++
++		rcu_assign_pointer(tp->root, head);
+ 	}
+ 
+ 	f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
+diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
+index cab9e9b43967..4fbb67430ce4 100644
+--- a/net/sched/cls_u32.c
++++ b/net/sched/cls_u32.c
+@@ -490,6 +490,19 @@ static bool u32_destroy(struct tcf_proto *tp, bool force)
+ 					return false;
+ 			}
+ 		}
++
++		if (tp_c->refcnt > 1)
++			return false;
++
++		if (tp_c->refcnt == 1) {
++			struct tc_u_hnode *ht;
++
++			for (ht = rtnl_dereference(tp_c->hlist);
++			     ht;
++			     ht = rtnl_dereference(ht->next))
++				if (!ht_empty(ht))
++					return false;
++		}
+ 	}
+ 
+ 	if (root_ht && --root_ht->refcnt == 0)
+diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
+index 53b7acde9aa3..e13c3c3ea4ac 100644
+--- a/net/sctp/protocol.c
++++ b/net/sctp/protocol.c
+@@ -1166,7 +1166,7 @@ static void sctp_v4_del_protocol(void)
+ 	unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
+ }
+ 
+-static int __net_init sctp_net_init(struct net *net)
++static int __net_init sctp_defaults_init(struct net *net)
+ {
+ 	int status;
+ 
+@@ -1259,12 +1259,6 @@ static int __net_init sctp_net_init(struct net *net)
+ 
+ 	sctp_dbg_objcnt_init(net);
+ 
+-	/* Initialize the control inode/socket for handling OOTB packets.  */
+-	if ((status = sctp_ctl_sock_init(net))) {
+-		pr_err("Failed to initialize the SCTP control sock\n");
+-		goto err_ctl_sock_init;
+-	}
+-
+ 	/* Initialize the local address list. */
+ 	INIT_LIST_HEAD(&net->sctp.local_addr_list);
+ 	spin_lock_init(&net->sctp.local_addr_lock);
+@@ -1280,9 +1274,6 @@ static int __net_init sctp_net_init(struct net *net)
+ 
+ 	return 0;
+ 
+-err_ctl_sock_init:
+-	sctp_dbg_objcnt_exit(net);
+-	sctp_proc_exit(net);
+ err_init_proc:
+ 	cleanup_sctp_mibs(net);
+ err_init_mibs:
+@@ -1291,15 +1282,12 @@ err_sysctl_register:
+ 	return status;
+ }
+ 
+-static void __net_exit sctp_net_exit(struct net *net)
++static void __net_exit sctp_defaults_exit(struct net *net)
+ {
+ 	/* Free the local address list */
+ 	sctp_free_addr_wq(net);
+ 	sctp_free_local_addr_list(net);
+ 
+-	/* Free the control endpoint.  */
+-	inet_ctl_sock_destroy(net->sctp.ctl_sock);
+-
+ 	sctp_dbg_objcnt_exit(net);
+ 
+ 	sctp_proc_exit(net);
+@@ -1307,9 +1295,32 @@ static void __net_exit sctp_net_exit(struct net *net)
+ 	sctp_sysctl_net_unregister(net);
+ }
+ 
+-static struct pernet_operations sctp_net_ops = {
+-	.init = sctp_net_init,
+-	.exit = sctp_net_exit,
++static struct pernet_operations sctp_defaults_ops = {
++	.init = sctp_defaults_init,
++	.exit = sctp_defaults_exit,
++};
++
++static int __net_init sctp_ctrlsock_init(struct net *net)
++{
++	int status;
++
++	/* Initialize the control inode/socket for handling OOTB packets.  */
++	status = sctp_ctl_sock_init(net);
++	if (status)
++		pr_err("Failed to initialize the SCTP control sock\n");
++
++	return status;
++}
++
++static void __net_init sctp_ctrlsock_exit(struct net *net)
++{
++	/* Free the control endpoint.  */
++	inet_ctl_sock_destroy(net->sctp.ctl_sock);
++}
++
++static struct pernet_operations sctp_ctrlsock_ops = {
++	.init = sctp_ctrlsock_init,
++	.exit = sctp_ctrlsock_exit,
+ };
+ 
+ /* Initialize the universe into something sensible.  */
+@@ -1442,8 +1453,11 @@ static __init int sctp_init(void)
+ 	sctp_v4_pf_init();
+ 	sctp_v6_pf_init();
+ 
+-	status = sctp_v4_protosw_init();
++	status = register_pernet_subsys(&sctp_defaults_ops);
++	if (status)
++		goto err_register_defaults;
+ 
++	status = sctp_v4_protosw_init();
+ 	if (status)
+ 		goto err_protosw_init;
+ 
+@@ -1451,9 +1465,9 @@ static __init int sctp_init(void)
+ 	if (status)
+ 		goto err_v6_protosw_init;
+ 
+-	status = register_pernet_subsys(&sctp_net_ops);
++	status = register_pernet_subsys(&sctp_ctrlsock_ops);
+ 	if (status)
+-		goto err_register_pernet_subsys;
++		goto err_register_ctrlsock;
+ 
+ 	status = sctp_v4_add_protocol();
+ 	if (status)
+@@ -1469,12 +1483,14 @@ out:
+ err_v6_add_protocol:
+ 	sctp_v4_del_protocol();
+ err_add_protocol:
+-	unregister_pernet_subsys(&sctp_net_ops);
+-err_register_pernet_subsys:
++	unregister_pernet_subsys(&sctp_ctrlsock_ops);
++err_register_ctrlsock:
+ 	sctp_v6_protosw_exit();
+ err_v6_protosw_init:
+ 	sctp_v4_protosw_exit();
+ err_protosw_init:
++	unregister_pernet_subsys(&sctp_defaults_ops);
++err_register_defaults:
+ 	sctp_v4_pf_exit();
+ 	sctp_v6_pf_exit();
+ 	sctp_sysctl_unregister();
+@@ -1507,12 +1523,14 @@ static __exit void sctp_exit(void)
+ 	sctp_v6_del_protocol();
+ 	sctp_v4_del_protocol();
+ 
+-	unregister_pernet_subsys(&sctp_net_ops);
++	unregister_pernet_subsys(&sctp_ctrlsock_ops);
+ 
+ 	/* Free protosw registrations */
+ 	sctp_v6_protosw_exit();
+ 	sctp_v4_protosw_exit();
+ 
++	unregister_pernet_subsys(&sctp_defaults_ops);
++
+ 	/* Unregister with socket layer. */
+ 	sctp_v6_pf_exit();
+ 	sctp_v4_pf_exit();


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-10-02 12:08 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-10-02 12:08 UTC (permalink / raw
  To: gentoo-commits

commit:     cb0333eb392976ebff5a7d56008620f7c0862790
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Oct  2 12:08:15 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Oct  2 12:08:15 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=cb0333eb

inet: Patch to fix potential deadlock in reqsk_queue_unlink()

 0000_README                                        |  4 +++
 2000_inet-deadlock-in-reqsk-queue-unlink-fix.patch | 32 ++++++++++++++++++++++
 2 files changed, 36 insertions(+)

diff --git a/0000_README b/0000_README
index 46b8cb0..348e8f5 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch:  1600_dm-crypt-limit-max-segment-size.patch
 From:   https://bugzilla.kernel.org/show_bug.cgi?id=104421
 Desc:   dm crypt: constrain crypt device's max_segment_size to PAGE_SIZE.
 
+Patch:  2000_inet-deadlock-in-reqsk-queue-unlink-fix.patch
+From:   http://git.kernel.org/
+Desc:   inet: Patch to fix potential deadlock in reqsk_queue_unlink()
+
 Patch:  2700_ThinkPad-30-brightness-control-fix.patch
 From:   Seth Forshee <seth.forshee@canonical.com>
 Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.

diff --git a/2000_inet-deadlock-in-reqsk-queue-unlink-fix.patch b/2000_inet-deadlock-in-reqsk-queue-unlink-fix.patch
new file mode 100644
index 0000000..890f5e5
--- /dev/null
+++ b/2000_inet-deadlock-in-reqsk-queue-unlink-fix.patch
@@ -0,0 +1,32 @@
+From 83fccfc3940c4a2db90fd7e7079f5b465cd8c6af Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 13 Aug 2015 15:44:51 -0700
+Subject: inet: fix potential deadlock in reqsk_queue_unlink()
+
+When replacing del_timer() with del_timer_sync(), I introduced
+a deadlock condition :
+
+reqsk_queue_unlink() is called from inet_csk_reqsk_queue_drop()
+
+inet_csk_reqsk_queue_drop() can be called from many contexts,
+one being the timer handler itself (reqsk_timer_handler()).
+
+In this case, del_timer_sync() loops forever.
+
+Simple fix is to test if timer is pending.
+
+Fixes: 2235f2ac75fd ("inet: fix races with reqsk timers")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+
+--- a/net/ipv4/inet_connection_sock.c	2015-10-02 07:49:42.759957268 -0400
++++ b/net/ipv4/inet_connection_sock.c	2015-10-02 07:50:12.929957111 -0400
+@@ -584,7 +584,7 @@ static bool reqsk_queue_unlink(struct re
+ 	}
+ 
+ 	spin_unlock(&queue->syn_wait_lock);
+-	if (del_timer_sync(&req->rsk_timer))
++	if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
+ 		reqsk_put(req);
+ 	return found;
+ }


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-09-29 17:50 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-09-29 17:50 UTC (permalink / raw
  To: gentoo-commits

commit:     a246795e14884680031e6838755d88dfa0ce1790
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Tue Sep 29 17:50:31 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Tue Sep 29 17:50:31 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a246795e

Linux patch 4.1.9

 0000_README            |    4 +
 1008_linux-4.1.9.patch | 5955 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5959 insertions(+)

diff --git a/0000_README b/0000_README
index 4a96d2e..46b8cb0 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch:  1007_linux-4.1.8.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.8
 
+Patch:  1008_linux-4.1.9.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.9
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1008_linux-4.1.9.patch b/1008_linux-4.1.9.patch
new file mode 100644
index 0000000..000c373
--- /dev/null
+++ b/1008_linux-4.1.9.patch
@@ -0,0 +1,5955 @@
+diff --git a/Makefile b/Makefile
+index dbf3baa5fabb..e071176b2ce6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c
+index bd245d34952d..a0765e7ed6c7 100644
+--- a/arch/arm/boot/compressed/decompress.c
++++ b/arch/arm/boot/compressed/decompress.c
+@@ -57,5 +57,5 @@ extern char * strstr(const char * s1, const char *s2);
+ 
+ int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
+ {
+-	return decompress(input, len, NULL, NULL, output, NULL, error);
++	return __decompress(input, len, NULL, NULL, output, 0, NULL, error);
+ }
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index d9631ecddd56..d6223cbcb661 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -450,7 +450,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ 	 * Map the VGIC hardware resources before running a vcpu the first
+ 	 * time on this VM.
+ 	 */
+-	if (unlikely(!vgic_ready(kvm))) {
++	if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) {
+ 		ret = kvm_vgic_map_resources(kvm);
+ 		if (ret)
+ 			return ret;
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 7796af4b1d6f..6f0a3b41b009 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -101,6 +101,10 @@ config NO_IOPORT_MAP
+ config STACKTRACE_SUPPORT
+ 	def_bool y
+ 
++config ILLEGAL_POINTER_VALUE
++	hex
++	default 0xdead000000000000
++
+ config LOCKDEP_SUPPORT
+ 	def_bool y
+ 
+@@ -409,6 +413,22 @@ config ARM64_ERRATUM_845719
+ 
+ 	  If unsure, say Y.
+ 
++config ARM64_ERRATUM_843419
++	bool "Cortex-A53: 843419: A load or store might access an incorrect address"
++	depends on MODULES
++	default y
++	help
++	  This option builds kernel modules using the large memory model in
++	  order to avoid the use of the ADRP instruction, which can cause
++	  a subsequent memory access to use an incorrect address on Cortex-A53
++	  parts up to r0p4.
++
++	  Note that the kernel itself must be linked with a version of ld
++	  which fixes potentially affected ADRP instructions through the
++	  use of veneers.
++
++	  If unsure, say Y.
++
+ endmenu
+ 
+ 
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 4d2a925998f9..81151663ef38 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -30,6 +30,10 @@ endif
+ 
+ CHECKFLAGS	+= -D__aarch64__
+ 
++ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
++CFLAGS_MODULE	+= -mcmodel=large
++endif
++
+ # Default value
+ head-y		:= arch/arm64/kernel/head.o
+ 
+diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
+index f800d45ea226..44a59c20e773 100644
+--- a/arch/arm64/include/asm/memory.h
++++ b/arch/arm64/include/asm/memory.h
+@@ -114,6 +114,14 @@ extern phys_addr_t		memstart_addr;
+ #define PHYS_OFFSET		({ memstart_addr; })
+ 
+ /*
++ * The maximum physical address that the linear direct mapping
++ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
++ * a 2's complement signed quantity and negated to derive the
++ * maximum size of the linear mapping.)
++ */
++#define MAX_MEMBLOCK_ADDR	({ memstart_addr - PAGE_OFFSET - 1; })
++
++/*
+  * PFNs are used to describe any physical page; this means
+  * PFN 0 == physical address 0.
+  *
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 3dca15634e69..c31e59fe2cb8 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -157,6 +157,7 @@ void fpsimd_thread_switch(struct task_struct *next)
+ void fpsimd_flush_thread(void)
+ {
+ 	memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
++	fpsimd_flush_task_state(current);
+ 	set_thread_flag(TIF_FOREIGN_FPSTATE);
+ }
+ 
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index 19f915e8f6e0..36aa31ff2c06 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -565,6 +565,11 @@ CPU_LE(	movk	x0, #0x30d0, lsl #16	)	// Clear EE and E0E on LE systems
+ 	msr	hstr_el2, xzr			// Disable CP15 traps to EL2
+ #endif
+ 
++	/* EL2 debug */
++	mrs	x0, pmcr_el0			// Disable debug access traps
++	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
++	msr	mdcr_el2, x0			// all PMU counters from EL1
++
+ 	/* Stage-2 translation */
+ 	msr	vttbr_el2, xzr
+ 
+diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
+index 67bf4107f6ef..876eb8df50bf 100644
+--- a/arch/arm64/kernel/module.c
++++ b/arch/arm64/kernel/module.c
+@@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ 			ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
+ 					     AARCH64_INSN_IMM_ADR);
+ 			break;
++#ifndef CONFIG_ARM64_ERRATUM_843419
+ 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ 			overflow_check = false;
+ 		case R_AARCH64_ADR_PREL_PG_HI21:
+ 			ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
+ 					     AARCH64_INSN_IMM_ADR);
+ 			break;
++#endif
+ 		case R_AARCH64_ADD_ABS_LO12_NC:
+ 		case R_AARCH64_LDST8_ABS_LO12_NC:
+ 			overflow_check = false;
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index c0cff3410166..c58aee062590 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ 
+ /*
+  * VFP save/restore code.
++ *
++ * We have to be careful with endianness, since the fpsimd context-switch
++ * code operates on 128-bit (Q) register values whereas the compat ABI
++ * uses an array of 64-bit (D) registers. Consequently, we need to swap
++ * the two halves of each Q register when running on a big-endian CPU.
+  */
++union __fpsimd_vreg {
++	__uint128_t	raw;
++	struct {
++#ifdef __AARCH64EB__
++		u64	hi;
++		u64	lo;
++#else
++		u64	lo;
++		u64	hi;
++#endif
++	};
++};
++
+ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
+ {
+ 	struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
+ 	compat_ulong_t magic = VFP_MAGIC;
+ 	compat_ulong_t size = VFP_STORAGE_SIZE;
+ 	compat_ulong_t fpscr, fpexc;
+-	int err = 0;
++	int i, err = 0;
+ 
+ 	/*
+ 	 * Save the hardware registers to the fpsimd_state structure.
+@@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
+ 	/*
+ 	 * Now copy the FP registers. Since the registers are packed,
+ 	 * we can copy the prefix we want (V0-V15) as it is.
+-	 * FIXME: Won't work if big endian.
+ 	 */
+-	err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
+-			      sizeof(frame->ufp.fpregs));
++	for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
++		union __fpsimd_vreg vreg = {
++			.raw = fpsimd->vregs[i >> 1],
++		};
++
++		__put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
++		__put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
++	}
+ 
+ 	/* Create an AArch32 fpscr from the fpsr and the fpcr. */
+ 	fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
+@@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
+ 	compat_ulong_t magic = VFP_MAGIC;
+ 	compat_ulong_t size = VFP_STORAGE_SIZE;
+ 	compat_ulong_t fpscr;
+-	int err = 0;
++	int i, err = 0;
+ 
+ 	__get_user_error(magic, &frame->magic, err);
+ 	__get_user_error(size, &frame->size, err);
+@@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
+ 	if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
+ 		return -EINVAL;
+ 
+-	/*
+-	 * Copy the FP registers into the start of the fpsimd_state.
+-	 * FIXME: Won't work if big endian.
+-	 */
+-	err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
+-				sizeof(frame->ufp.fpregs));
++	/* Copy the FP registers into the start of the fpsimd_state. */
++	for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
++		union __fpsimd_vreg vreg;
++
++		__get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
++		__get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
++		fpsimd.vregs[i >> 1] = vreg.raw;
++	}
+ 
+ 	/* Extract the fpsr and the fpcr from the fpscr */
+ 	__get_user_error(fpscr, &frame->ufp.fpscr, err);
+diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
+index 5befd010e232..64f9e60b31da 100644
+--- a/arch/arm64/kvm/hyp.S
++++ b/arch/arm64/kvm/hyp.S
+@@ -844,8 +844,6 @@
+ 	mrs	x3, cntv_ctl_el0
+ 	and	x3, x3, #3
+ 	str	w3, [x0, #VCPU_TIMER_CNTV_CTL]
+-	bic	x3, x3, #1		// Clear Enable
+-	msr	cntv_ctl_el0, x3
+ 
+ 	isb
+ 
+@@ -853,6 +851,9 @@
+ 	str	x3, [x0, #VCPU_TIMER_CNTV_CVAL]
+ 
+ 1:
++	// Disable the virtual timer
++	msr	cntv_ctl_el0, xzr
++
+ 	// Allow physical timer/counter access for the host
+ 	mrs	x2, cnthctl_el2
+ 	orr	x2, x2, #3
+@@ -947,13 +948,15 @@ ENTRY(__kvm_vcpu_run)
+ 	// Guest context
+ 	add	x2, x0, #VCPU_CONTEXT
+ 
++	// We must restore the 32-bit state before the sysregs, thanks
++	// to Cortex-A57 erratum #852523.
++	restore_guest_32bit_state
+ 	bl __restore_sysregs
+ 	bl __restore_fpsimd
+ 
+ 	skip_debug_state x3, 1f
+ 	bl	__restore_debug
+ 1:
+-	restore_guest_32bit_state
+ 	restore_guest_regs
+ 
+ 	// That's it, no more messing around.
+diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
+index 28a09529f206..3a7692745868 100644
+--- a/arch/m32r/boot/compressed/misc.c
++++ b/arch/m32r/boot/compressed/misc.c
+@@ -86,6 +86,7 @@ decompress_kernel(int mmu_on, unsigned char *zimage_data,
+ 	free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE;
+ 
+ 	puts("\nDecompressing Linux... ");
+-	decompress(input_data, input_len, NULL, NULL, output_data, NULL, error);
++	__decompress(input_data, input_len, NULL, NULL, output_data, 0,
++			NULL, error);
+ 	puts("done.\nBooting the kernel.\n");
+ }
+diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
+index 54831069a206..080cd53bac36 100644
+--- a/arch/mips/boot/compressed/decompress.c
++++ b/arch/mips/boot/compressed/decompress.c
+@@ -111,8 +111,8 @@ void decompress_kernel(unsigned long boot_heap_start)
+ 	puts("\n");
+ 
+ 	/* Decompress the kernel with according algorithm */
+-	decompress((char *)zimage_start, zimage_size, 0, 0,
+-		   (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error);
++	__decompress((char *)zimage_start, zimage_size, 0, 0,
++		   (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
+ 
+ 	/* FIXME: should we flush cache here? */
+ 	puts("Now, booting the kernel...\n");
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index 6983fcd48131..2b95e34fa9e8 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -1137,7 +1137,7 @@ emul:
+ 			break;
+ 
+ 		case mfhc_op:
+-			if (!cpu_has_mips_r2)
++			if (!cpu_has_mips_r2_r6)
+ 				goto sigill;
+ 
+ 			/* copregister rd -> gpr[rt] */
+@@ -1148,7 +1148,7 @@ emul:
+ 			break;
+ 
+ 		case mthc_op:
+-			if (!cpu_has_mips_r2)
++			if (!cpu_has_mips_r2_r6)
+ 				goto sigill;
+ 
+ 			/* copregister rd <- gpr[rt] */
+@@ -1181,6 +1181,24 @@ emul:
+ 			}
+ 			break;
+ 
++		case bc1eqz_op:
++		case bc1nez_op:
++			if (!cpu_has_mips_r6 || delay_slot(xcp))
++				return SIGILL;
++
++			cond = likely = 0;
++			switch (MIPSInst_RS(ir)) {
++			case bc1eqz_op:
++				if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
++				    cond = 1;
++				break;
++			case bc1nez_op:
++				if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
++				    cond = 1;
++				break;
++			}
++			goto branch_common;
++
+ 		case bc_op:
+ 			if (delay_slot(xcp))
+ 				return SIGILL;
+@@ -1207,7 +1225,7 @@ emul:
+ 			case bct_op:
+ 				break;
+ 			}
+-
++branch_common:
+ 			set_delay_slot(xcp);
+ 			if (cond) {
+ 				/*
+diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
+index f3191db6e2e9..c0eab24f6a9e 100644
+--- a/arch/parisc/kernel/irq.c
++++ b/arch/parisc/kernel/irq.c
+@@ -507,8 +507,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+ 	struct pt_regs *old_regs;
+ 	unsigned long eirr_val;
+ 	int irq, cpu = smp_processor_id();
+-#ifdef CONFIG_SMP
+ 	struct irq_desc *desc;
++#ifdef CONFIG_SMP
+ 	cpumask_t dest;
+ #endif
+ 
+@@ -521,8 +521,12 @@ void do_cpu_irq_mask(struct pt_regs *regs)
+ 		goto set_out;
+ 	irq = eirr_to_irq(eirr_val);
+ 
+-#ifdef CONFIG_SMP
++	/* Filter out spurious interrupts, mostly from serial port at bootup */
+ 	desc = irq_to_desc(irq);
++	if (unlikely(!desc->action))
++		goto set_out;
++
++#ifdef CONFIG_SMP
+ 	cpumask_copy(&dest, desc->irq_data.affinity);
+ 	if (irqd_is_per_cpu(&desc->irq_data) &&
+ 	    !cpumask_test_cpu(smp_processor_id(), &dest)) {
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 7ef22e3387e0..0b8d26d3ba43 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -821,7 +821,7 @@ cas2_action:
+ 	/* 64bit CAS */
+ #ifdef CONFIG_64BIT
+ 19:	ldd,ma	0(%sr3,%r26), %r29
+-	sub,=	%r29, %r25, %r0
++	sub,*=	%r29, %r25, %r0
+ 	b,n	cas2_end
+ 20:	std,ma	%r24, 0(%sr3,%r26)
+ 	copy	%r0, %r28
+diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
+index 73eddda53b8e..4eec430d8fa8 100644
+--- a/arch/powerpc/boot/Makefile
++++ b/arch/powerpc/boot/Makefile
+@@ -28,6 +28,9 @@ BOOTCFLAGS	+= -m64
+ endif
+ ifdef CONFIG_CPU_BIG_ENDIAN
+ BOOTCFLAGS	+= -mbig-endian
++else
++BOOTCFLAGS	+= -mlittle-endian
++BOOTCFLAGS	+= $(call cc-option,-mabi=elfv2)
+ endif
+ 
+ BOOTAFLAGS	:= -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
+diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
+index 43e6ad424c7f..88d27e3258d2 100644
+--- a/arch/powerpc/include/asm/pgtable-ppc64.h
++++ b/arch/powerpc/include/asm/pgtable-ppc64.h
+@@ -135,7 +135,19 @@
+ #define pte_iterate_hashed_end() } while(0)
+ 
+ #ifdef CONFIG_PPC_HAS_HASH_64K
+-#define pte_pagesize_index(mm, addr, pte)	get_slice_psize(mm, addr)
++/*
++ * We expect this to be called only for user addresses or kernel virtual
++ * addresses other than the linear mapping.
++ */
++#define pte_pagesize_index(mm, addr, pte)			\
++	({							\
++		unsigned int psize;				\
++		if (is_kernel_addr(addr))			\
++			psize = MMU_PAGE_4K;			\
++		else						\
++			psize = get_slice_psize(mm, addr);	\
++		psize;						\
++	})
+ #else
+ #define pte_pagesize_index(mm, addr, pte)	MMU_PAGE_4K
+ #endif
+diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
+index 7a4ede16b283..b77ef369c0f0 100644
+--- a/arch/powerpc/include/asm/rtas.h
++++ b/arch/powerpc/include/asm/rtas.h
+@@ -343,6 +343,7 @@ extern void rtas_power_off(void);
+ extern void rtas_halt(void);
+ extern void rtas_os_term(char *str);
+ extern int rtas_get_sensor(int sensor, int index, int *state);
++extern int rtas_get_sensor_fast(int sensor, int index, int *state);
+ extern int rtas_get_power_level(int powerdomain, int *level);
+ extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
+ extern bool rtas_indicator_present(int token, int *maxindex);
+diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
+index 58abeda64cb7..15cca17cba4b 100644
+--- a/arch/powerpc/include/asm/switch_to.h
++++ b/arch/powerpc/include/asm/switch_to.h
+@@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {}
+ 
+ extern void enable_kernel_fp(void);
+ extern void enable_kernel_altivec(void);
++extern void enable_kernel_vsx(void);
+ extern int emulate_altivec(struct pt_regs *);
+ extern void __giveup_vsx(struct task_struct *);
+ extern void giveup_vsx(struct task_struct *);
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index 9ee61d15653d..cb565ad0a5b6 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -310,11 +310,26 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
+ 	if (!(pe->type & EEH_PE_PHB)) {
+ 		if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+ 			eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
++
++		/*
++		 * The config space of some PCI devices can't be accessed
++		 * when their PEs are in frozen state. Otherwise, fenced
++		 * PHB might be seen. Those PEs are identified with flag
++		 * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED
++		 * is set automatically when the PE is put to EEH_PE_ISOLATED.
++		 *
++		 * Restoring BARs possibly triggers PCI config access in
++		 * (OPAL) firmware and then causes fenced PHB. If the
++		 * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's
++		 * pointless to restore BARs and dump config space.
++		 */
+ 		eeh_ops->configure_bridge(pe);
+-		eeh_pe_restore_bars(pe);
++		if (!(pe->state & EEH_PE_CFG_BLOCKED)) {
++			eeh_pe_restore_bars(pe);
+ 
+-		pci_regs_buf[0] = 0;
+-		eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
++			pci_regs_buf[0] = 0;
++			eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen);
++		}
+ 	}
+ 
+ 	eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
+@@ -1118,9 +1133,6 @@ void eeh_add_device_late(struct pci_dev *dev)
+ 		return;
+ 	}
+ 
+-	if (eeh_has_flag(EEH_PROBE_MODE_DEV))
+-		eeh_ops->probe(pdn, NULL);
+-
+ 	/*
+ 	 * The EEH cache might not be removed correctly because of
+ 	 * unbalanced kref to the device during unplug time, which
+@@ -1144,6 +1156,9 @@ void eeh_add_device_late(struct pci_dev *dev)
+ 		dev->dev.archdata.edev = NULL;
+ 	}
+ 
++	if (eeh_has_flag(EEH_PROBE_MODE_DEV))
++		eeh_ops->probe(pdn, NULL);
++
+ 	edev->pdev = dev;
+ 	dev->dev.archdata.edev = edev;
+ 
+diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
+index febb50dd5328..0596373cd1c3 100644
+--- a/arch/powerpc/kernel/process.c
++++ b/arch/powerpc/kernel/process.c
+@@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
+ #endif /* CONFIG_ALTIVEC */
+ 
+ #ifdef CONFIG_VSX
+-#if 0
+-/* not currently used, but some crazy RAID module might want to later */
+ void enable_kernel_vsx(void)
+ {
+ 	WARN_ON(preemptible());
+@@ -220,7 +218,6 @@ void enable_kernel_vsx(void)
+ #endif /* CONFIG_SMP */
+ }
+ EXPORT_SYMBOL(enable_kernel_vsx);
+-#endif
+ 
+ void giveup_vsx(struct task_struct *tsk)
+ {
+diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
+index 7a488c108410..caffb10e7aa3 100644
+--- a/arch/powerpc/kernel/rtas.c
++++ b/arch/powerpc/kernel/rtas.c
+@@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state)
+ }
+ EXPORT_SYMBOL(rtas_get_sensor);
+ 
++int rtas_get_sensor_fast(int sensor, int index, int *state)
++{
++	int token = rtas_token("get-sensor-state");
++	int rc;
++
++	if (token == RTAS_UNKNOWN_SERVICE)
++		return -ENOENT;
++
++	rc = rtas_call(token, 2, 2, state, sensor, index);
++	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
++				    rc <= RTAS_EXTENDED_DELAY_MAX));
++
++	if (rc < 0)
++		return rtas_error_rc(rc);
++	return rc;
++}
++
+ bool rtas_indicator_present(int token, int *maxindex)
+ {
+ 	int proplen, count, i;
+diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
+index 43dafb9d6a46..4d87122cf6a7 100644
+--- a/arch/powerpc/mm/hugepage-hash64.c
++++ b/arch/powerpc/mm/hugepage-hash64.c
+@@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	BUG_ON(index >= 4096);
+ 
+ 	vpn = hpt_vpn(ea, vsid, ssize);
+-	hash = hpt_hash(vpn, shift, ssize);
+ 	hpte_slot_array = get_hpte_slot_array(pmdp);
+ 	if (psize == MMU_PAGE_4K) {
+ 		/*
+@@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	valid = hpte_valid(hpte_slot_array, index);
+ 	if (valid) {
+ 		/* update the hpte bits */
++		hash = hpt_hash(vpn, shift, ssize);
+ 		hidx =  hpte_hash_index(hpte_slot_array, index);
+ 		if (hidx & _PTEIDX_SECONDARY)
+ 			hash = ~hash;
+@@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
+ 	if (!valid) {
+ 		unsigned long hpte_group;
+ 
++		hash = hpt_hash(vpn, shift, ssize);
+ 		/* insert new entry */
+ 		pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
+ 		new_pmd |= _PAGE_HASHPTE;
+diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
+index 02e4a1745516..3b6647e574b6 100644
+--- a/arch/powerpc/platforms/pseries/ras.c
++++ b/arch/powerpc/platforms/pseries/ras.c
+@@ -189,7 +189,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
+ 	int state;
+ 	int critical;
+ 
+-	status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
++	status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
++				      &state);
+ 
+ 	if (state > 3)
+ 		critical = 1;		/* Time Critical */
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index df6a7041922b..e6e8b241d717 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -268,6 +268,11 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
+ 			eeh_dev_init(PCI_DN(np), pci->phb);
+ 		}
+ 		break;
++	case OF_RECONFIG_DETACH_NODE:
++		pci = PCI_DN(np);
++		if (pci)
++			list_del(&pci->list);
++		break;
+ 	default:
+ 		err = NOTIFY_DONE;
+ 		break;
+diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
+index 42506b371b74..4da604ebf6fd 100644
+--- a/arch/s390/boot/compressed/misc.c
++++ b/arch/s390/boot/compressed/misc.c
+@@ -167,7 +167,7 @@ unsigned long decompress_kernel(void)
+ #endif
+ 
+ 	puts("Uncompressing Linux... ");
+-	decompress(input_data, input_len, NULL, NULL, output, NULL, error);
++	__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
+ 	puts("Ok, booting the kernel.\n");
+ 	return (unsigned long) output;
+ }
+diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
+index 95470a472d2c..208a9753ab38 100644
+--- a/arch/sh/boot/compressed/misc.c
++++ b/arch/sh/boot/compressed/misc.c
+@@ -132,7 +132,7 @@ void decompress_kernel(void)
+ 
+ 	puts("Uncompressing Linux... ");
+ 	cache_control(CACHE_ENABLE);
+-	decompress(input_data, input_len, NULL, NULL, output, NULL, error);
++	__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
+ 	cache_control(CACHE_DISABLE);
+ 	puts("Ok, booting the kernel.\n");
+ }
+diff --git a/arch/unicore32/boot/compressed/misc.c b/arch/unicore32/boot/compressed/misc.c
+index 176d5bda3559..5c65dfee278c 100644
+--- a/arch/unicore32/boot/compressed/misc.c
++++ b/arch/unicore32/boot/compressed/misc.c
+@@ -119,8 +119,8 @@ unsigned long decompress_kernel(unsigned long output_start,
+ 	output_ptr = get_unaligned_le32(tmp);
+ 
+ 	arch_decomp_puts("Uncompressing Linux...");
+-	decompress(input_data, input_data_end - input_data, NULL, NULL,
+-			output_data, NULL, error);
++	__decompress(input_data, input_data_end - input_data, NULL, NULL,
++			output_data, 0, NULL, error);
+ 	arch_decomp_puts(" done, booting the kernel.\n");
+ 	return output_ptr;
+ }
+diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
+index a107b935e22f..e28437e0f708 100644
+--- a/arch/x86/boot/compressed/misc.c
++++ b/arch/x86/boot/compressed/misc.c
+@@ -424,7 +424,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
+ #endif
+ 
+ 	debug_putstr("\nDecompressing Linux... ");
+-	decompress(input_data, input_len, NULL, NULL, output, NULL, error);
++	__decompress(input_data, input_len, NULL, NULL, output, output_len,
++			NULL, error);
+ 	parse_elf(output);
+ 	/*
+ 	 * 32-bit always performs relocations. 64-bit relocations are only
+diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
+index c8140e12816a..c23ab1ee3a9a 100644
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end)
+ 
+ 	vaddr = start;
+ 	pgd_idx = pgd_index(vaddr);
++	pmd_idx = pmd_index(vaddr);
+ 
+ 	for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
+ 		for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
+diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
+index b79685e06b70..279c5d674edf 100644
+--- a/block/blk-mq-sysfs.c
++++ b/block/blk-mq-sysfs.c
+@@ -141,15 +141,26 @@ static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
+ 
+ static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
+ {
+-	char *start_page = page;
+ 	struct request *rq;
++	int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
++
++	list_for_each_entry(rq, list, queuelist) {
++		const int rq_len = 2 * sizeof(rq) + 2;
++
++		/* if the output will be truncated */
++		if (PAGE_SIZE - 1 < len + rq_len) {
++			/* backspacing if it can't hold '\t...\n' */
++			if (PAGE_SIZE - 1 < len + 5)
++				len -= rq_len;
++			len += snprintf(page + len, PAGE_SIZE - 1 - len,
++					"\t...\n");
++			break;
++		}
++		len += snprintf(page + len, PAGE_SIZE - 1 - len,
++				"\t%p\n", rq);
++	}
+ 
+-	page += sprintf(page, "%s:\n", msg);
+-
+-	list_for_each_entry(rq, list, queuelist)
+-		page += sprintf(page, "\t%p\n", rq);
+-
+-	return page - start_page;
++	return len;
+ }
+ 
+ static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
+diff --git a/drivers/base/node.c b/drivers/base/node.c
+index a2aa65b4215d..b10479c87357 100644
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -388,6 +388,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
+ 	for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
+ 		int page_nid;
+ 
++		/*
++		 * memory block could have several absent sections from start.
++		 * skip pfn range from absent section
++		 */
++		if (!pfn_present(pfn)) {
++			pfn = round_down(pfn + PAGES_PER_SECTION,
++					 PAGES_PER_SECTION) - 1;
++			continue;
++		}
++
+ 		page_nid = get_nid_for_pfn(pfn);
+ 		if (page_nid < 0)
+ 			continue;
+diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
+index ab300ea19434..41f93334cc44 100644
+--- a/drivers/crypto/vmx/aes.c
++++ b/drivers/crypto/vmx/aes.c
+@@ -80,6 +80,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
+ 
+     pagefault_disable();
+     enable_kernel_altivec();
++    enable_kernel_vsx();
+     ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+     ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+     pagefault_enable();
+@@ -97,6 +98,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+     } else {
+         pagefault_disable();
+         enable_kernel_altivec();
++        enable_kernel_vsx();
+         aes_p8_encrypt(src, dst, &ctx->enc_key);
+         pagefault_enable();
+     }
+@@ -111,6 +113,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+     } else {
+         pagefault_disable();
+         enable_kernel_altivec();
++        enable_kernel_vsx();
+         aes_p8_decrypt(src, dst, &ctx->dec_key);
+         pagefault_enable();
+     }
+diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
+index 1a559b7dddb5..c8e7f653e5d3 100644
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -81,6 +81,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
+ 
+     pagefault_disable();
+     enable_kernel_altivec();
++    enable_kernel_vsx();
+     ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+     ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+     pagefault_enable();
+@@ -108,6 +109,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
+     } else {
+         pagefault_disable();
+         enable_kernel_altivec();
++        enable_kernel_vsx();
+ 
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+         ret = blkcipher_walk_virt(desc, &walk);
+@@ -143,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
+     } else {
+         pagefault_disable();
+         enable_kernel_altivec();
++        enable_kernel_vsx();
+ 
+ 	blkcipher_walk_init(&walk, dst, src, nbytes);
+         ret = blkcipher_walk_virt(desc, &walk);
+diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
+index 96dbee4bf4a6..266e708d63df 100644
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -79,6 +79,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
+ 
+     pagefault_disable();
+     enable_kernel_altivec();
++    enable_kernel_vsx();
+     ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+     pagefault_enable();
+ 
+@@ -97,6 +98,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
+ 
+     pagefault_disable();
+     enable_kernel_altivec();
++    enable_kernel_vsx();
+     aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
+     pagefault_enable();
+ 
+@@ -127,6 +129,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
+         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+             pagefault_disable();
+             enable_kernel_altivec();
++            enable_kernel_vsx();
+             aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr,
+                 (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv);
+             pagefault_enable();
+diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
+index d0ffe277af5c..917b3f09e724 100644
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -116,6 +116,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+ 
+     pagefault_disable();
+     enable_kernel_altivec();
++    enable_kernel_vsx();
+     enable_kernel_fp();
+     gcm_init_p8(ctx->htable, (const u64 *) key);
+     pagefault_enable();
+@@ -142,6 +143,7 @@ static int p8_ghash_update(struct shash_desc *desc,
+                     GHASH_DIGEST_SIZE - dctx->bytes);
+             pagefault_disable();
+             enable_kernel_altivec();
++            enable_kernel_vsx();
+             enable_kernel_fp();
+             gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
+                     GHASH_DIGEST_SIZE);
+@@ -154,6 +156,7 @@ static int p8_ghash_update(struct shash_desc *desc,
+         if (len) {
+             pagefault_disable();
+             enable_kernel_altivec();
++            enable_kernel_vsx();
+             enable_kernel_fp();
+             gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+             pagefault_enable();
+@@ -182,6 +185,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+                 dctx->buffer[i] = 0;
+             pagefault_disable();
+             enable_kernel_altivec();
++            enable_kernel_vsx();
+             enable_kernel_fp();
+             gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
+                     GHASH_DIGEST_SIZE);
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index c097d3a82bda..a9b01bcf7d0a 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -3387,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
+ 	    rdev->pdev->subsystem_device == 0x30ae)
+ 		return;
+ 
++	/* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
++	 * - it hangs on resume inside the dynclk 1 table.
++	 */
++	if (rdev->family == CHIP_RS480 &&
++	    rdev->pdev->subsystem_vendor == 0x103c &&
++	    rdev->pdev->subsystem_device == 0x280a)
++		return;
++
+ 	/* DYN CLK 1 */
+ 	table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
+ 	if (table)
+diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
+index b716b0815644..bebf11a6622a 100644
+--- a/drivers/infiniband/core/uverbs.h
++++ b/drivers/infiniband/core/uverbs.h
+@@ -85,7 +85,7 @@
+  */
+ 
+ struct ib_uverbs_device {
+-	struct kref				ref;
++	atomic_t				refcount;
+ 	int					num_comp_vectors;
+ 	struct completion			comp;
+ 	struct device			       *dev;
+@@ -94,6 +94,7 @@ struct ib_uverbs_device {
+ 	struct cdev			        cdev;
+ 	struct rb_root				xrcd_tree;
+ 	struct mutex				xrcd_tree_mutex;
++	struct kobject				kobj;
+ };
+ 
+ struct ib_uverbs_event_file {
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index a9f048990dfc..ccc2494b4ea7 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -2244,6 +2244,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
+ 		next->send_flags = user_wr->send_flags;
+ 
+ 		if (is_ud) {
++			if (next->opcode != IB_WR_SEND &&
++			    next->opcode != IB_WR_SEND_WITH_IMM) {
++				ret = -EINVAL;
++				goto out_put;
++			}
++
+ 			next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
+ 						     file->ucontext);
+ 			if (!next->wr.ud.ah) {
+@@ -2283,9 +2289,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
+ 					user_wr->wr.atomic.compare_add;
+ 				next->wr.atomic.swap = user_wr->wr.atomic.swap;
+ 				next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
++			case IB_WR_SEND:
+ 				break;
+ 			default:
+-				break;
++				ret = -EINVAL;
++				goto out_put;
+ 			}
+ 		}
+ 
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 88cce9bb72fe..09686d49d4c1 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -129,14 +129,18 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
+ static void ib_uverbs_add_one(struct ib_device *device);
+ static void ib_uverbs_remove_one(struct ib_device *device);
+ 
+-static void ib_uverbs_release_dev(struct kref *ref)
++static void ib_uverbs_release_dev(struct kobject *kobj)
+ {
+ 	struct ib_uverbs_device *dev =
+-		container_of(ref, struct ib_uverbs_device, ref);
++		container_of(kobj, struct ib_uverbs_device, kobj);
+ 
+-	complete(&dev->comp);
++	kfree(dev);
+ }
+ 
++static struct kobj_type ib_uverbs_dev_ktype = {
++	.release = ib_uverbs_release_dev,
++};
++
+ static void ib_uverbs_release_event_file(struct kref *ref)
+ {
+ 	struct ib_uverbs_event_file *file =
+@@ -302,13 +306,19 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
+ 	return context->device->dealloc_ucontext(context);
+ }
+ 
++static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
++{
++	complete(&dev->comp);
++}
++
+ static void ib_uverbs_release_file(struct kref *ref)
+ {
+ 	struct ib_uverbs_file *file =
+ 		container_of(ref, struct ib_uverbs_file, ref);
+ 
+ 	module_put(file->device->ib_dev->owner);
+-	kref_put(&file->device->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&file->device->refcount))
++		ib_uverbs_comp_dev(file->device);
+ 
+ 	kfree(file);
+ }
+@@ -742,9 +752,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
+ 	int ret;
+ 
+ 	dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
+-	if (dev)
+-		kref_get(&dev->ref);
+-	else
++	if (!atomic_inc_not_zero(&dev->refcount))
+ 		return -ENXIO;
+ 
+ 	if (!try_module_get(dev->ib_dev->owner)) {
+@@ -765,6 +773,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
+ 	mutex_init(&file->mutex);
+ 
+ 	filp->private_data = file;
++	kobject_get(&dev->kobj);
+ 
+ 	return nonseekable_open(inode, filp);
+ 
+@@ -772,13 +781,16 @@ err_module:
+ 	module_put(dev->ib_dev->owner);
+ 
+ err:
+-	kref_put(&dev->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&dev->refcount))
++		ib_uverbs_comp_dev(dev);
++
+ 	return ret;
+ }
+ 
+ static int ib_uverbs_close(struct inode *inode, struct file *filp)
+ {
+ 	struct ib_uverbs_file *file = filp->private_data;
++	struct ib_uverbs_device *dev = file->device;
+ 
+ 	ib_uverbs_cleanup_ucontext(file, file->ucontext);
+ 
+@@ -786,6 +798,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
+ 		kref_put(&file->async_file->ref, ib_uverbs_release_event_file);
+ 
+ 	kref_put(&file->ref, ib_uverbs_release_file);
++	kobject_put(&dev->kobj);
+ 
+ 	return 0;
+ }
+@@ -881,10 +894,11 @@ static void ib_uverbs_add_one(struct ib_device *device)
+ 	if (!uverbs_dev)
+ 		return;
+ 
+-	kref_init(&uverbs_dev->ref);
++	atomic_set(&uverbs_dev->refcount, 1);
+ 	init_completion(&uverbs_dev->comp);
+ 	uverbs_dev->xrcd_tree = RB_ROOT;
+ 	mutex_init(&uverbs_dev->xrcd_tree_mutex);
++	kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
+ 
+ 	spin_lock(&map_lock);
+ 	devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
+@@ -911,6 +925,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
+ 	cdev_init(&uverbs_dev->cdev, NULL);
+ 	uverbs_dev->cdev.owner = THIS_MODULE;
+ 	uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
++	uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
+ 	kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
+ 	if (cdev_add(&uverbs_dev->cdev, base, 1))
+ 		goto err_cdev;
+@@ -941,9 +956,10 @@ err_cdev:
+ 		clear_bit(devnum, overflow_map);
+ 
+ err:
+-	kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&uverbs_dev->refcount))
++		ib_uverbs_comp_dev(uverbs_dev);
+ 	wait_for_completion(&uverbs_dev->comp);
+-	kfree(uverbs_dev);
++	kobject_put(&uverbs_dev->kobj);
+ 	return;
+ }
+ 
+@@ -963,9 +979,10 @@ static void ib_uverbs_remove_one(struct ib_device *device)
+ 	else
+ 		clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
+ 
+-	kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
++	if (atomic_dec_and_test(&uverbs_dev->refcount))
++		ib_uverbs_comp_dev(uverbs_dev);
+ 	wait_for_completion(&uverbs_dev->comp);
+-	kfree(uverbs_dev);
++	kobject_put(&uverbs_dev->kobj);
+ }
+ 
+ static char *uverbs_devnode(struct device *dev, umode_t *mode)
+diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
+index f50a546224ad..33fdd50123f7 100644
+--- a/drivers/infiniband/hw/mlx4/ah.c
++++ b/drivers/infiniband/hw/mlx4/ah.c
+@@ -148,9 +148,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
+ 	enum rdma_link_layer ll;
+ 
+ 	memset(ah_attr, 0, sizeof *ah_attr);
+-	ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+ 	ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
+ 	ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
++	if (ll == IB_LINK_LAYER_ETHERNET)
++		ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
++	else
++		ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
++
+ 	ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
+ 	if (ah->av.ib.stat_rate)
+ 		ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
+diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
+index 0176caa5792c..2857ed89725e 100644
+--- a/drivers/infiniband/hw/mlx4/cq.c
++++ b/drivers/infiniband/hw/mlx4/cq.c
+@@ -629,7 +629,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
+ 	 * simulated FLUSH_ERR completions
+ 	 */
+ 	list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
+-		mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1);
++		mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1);
+ 		if (*npolled >= num_entries)
+ 			goto out;
+ 	}
+diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
+index ed327e6c8fdc..a0559a8af4f4 100644
+--- a/drivers/infiniband/hw/mlx4/mcg.c
++++ b/drivers/infiniband/hw/mlx4/mcg.c
+@@ -206,15 +206,16 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad)
+ {
+ 	struct mlx4_ib_dev *dev = ctx->dev;
+ 	struct ib_ah_attr	ah_attr;
++	unsigned long flags;
+ 
+-	spin_lock(&dev->sm_lock);
++	spin_lock_irqsave(&dev->sm_lock, flags);
+ 	if (!dev->sm_ah[ctx->port - 1]) {
+ 		/* port is not yet Active, sm_ah not ready */
+-		spin_unlock(&dev->sm_lock);
++		spin_unlock_irqrestore(&dev->sm_lock, flags);
+ 		return -EAGAIN;
+ 	}
+ 	mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr);
+-	spin_unlock(&dev->sm_lock);
++	spin_unlock_irqrestore(&dev->sm_lock, flags);
+ 	return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev),
+ 				    ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY,
+ 				    &ah_attr, NULL, mad);
+diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
+index 6797108ce873..69fb5ba94d0f 100644
+--- a/drivers/infiniband/hw/mlx4/sysfs.c
++++ b/drivers/infiniband/hw/mlx4/sysfs.c
+@@ -640,6 +640,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
+ 	struct mlx4_port *p;
+ 	int i;
+ 	int ret;
++	int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) ==
++			IB_LINK_LAYER_ETHERNET;
+ 
+ 	p = kzalloc(sizeof *p, GFP_KERNEL);
+ 	if (!p)
+@@ -657,7 +659,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave)
+ 
+ 	p->pkey_group.name  = "pkey_idx";
+ 	p->pkey_group.attrs =
+-		alloc_group_attrs(show_port_pkey, store_port_pkey,
++		alloc_group_attrs(show_port_pkey,
++				  is_eth ? NULL : store_port_pkey,
+ 				  dev->dev->caps.pkey_table_len[port_num]);
+ 	if (!p->pkey_group.attrs) {
+ 		ret = -ENOMEM;
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 71c593583864..0c52f078759c 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -1119,19 +1119,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ 	return &mr->ibmr;
+ 
+ error:
+-	/*
+-	 * Destroy the umem *before* destroying the MR, to ensure we
+-	 * will not have any in-flight notifiers when destroying the
+-	 * MR.
+-	 *
+-	 * As the MR is completely invalid to begin with, and this
+-	 * error path is only taken if we can't push the mr entry into
+-	 * the pagefault tree, this is safe.
+-	 */
+-
+ 	ib_umem_release(umem);
+-	/* Kill the MR, and return an error code. */
+-	clean_mr(mr);
+ 	return ERR_PTR(err);
+ }
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
+index ad843c786e72..5afaa218508d 100644
+--- a/drivers/infiniband/hw/qib/qib_keys.c
++++ b/drivers/infiniband/hw/qib/qib_keys.c
+@@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
+ 	 * unrestricted LKEY.
+ 	 */
+ 	rkt->gen++;
++	/*
++	 * bits are capped in qib_verbs.c to insure enough bits
++	 * for generation number
++	 */
+ 	mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
+ 		((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
+ 		 << 8);
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
+index 4a3599890ea5..9dd5d9a0556b 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.c
++++ b/drivers/infiniband/hw/qib/qib_verbs.c
+@@ -40,6 +40,7 @@
+ #include <linux/rculist.h>
+ #include <linux/mm.h>
+ #include <linux/random.h>
++#include <linux/vmalloc.h>
+ 
+ #include "qib.h"
+ #include "qib_common.h"
+@@ -2089,10 +2090,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
+ 	 * the LKEY).  The remaining bits act as a generation number or tag.
+ 	 */
+ 	spin_lock_init(&dev->lk_table.lock);
++	/* insure generation is at least 4 bits see keys.c */
++	if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
++		qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
++			ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
++		ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
++	}
+ 	dev->lk_table.max = 1 << ib_qib_lkey_table_size;
+ 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+ 	dev->lk_table.table = (struct qib_mregion __rcu **)
+-		__get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
++		vmalloc(lk_tab_size);
+ 	if (dev->lk_table.table == NULL) {
+ 		ret = -ENOMEM;
+ 		goto err_lk;
+@@ -2265,7 +2272,7 @@ err_tx:
+ 					sizeof(struct qib_pio_header),
+ 				  dev->pio_hdrs, dev->pio_hdrs_phys);
+ err_hdrs:
+-	free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
++	vfree(dev->lk_table.table);
+ err_lk:
+ 	kfree(dev->qp_table);
+ err_qpt:
+@@ -2319,8 +2326,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
+ 					sizeof(struct qib_pio_header),
+ 				  dev->pio_hdrs, dev->pio_hdrs_phys);
+ 	lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
+-	free_pages((unsigned long) dev->lk_table.table,
+-		   get_order(lk_tab_size));
++	vfree(dev->lk_table.table);
+ 	kfree(dev->qp_table);
+ }
+ 
+diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
+index bfc8948fdd35..44ca28c83fe6 100644
+--- a/drivers/infiniband/hw/qib/qib_verbs.h
++++ b/drivers/infiniband/hw/qib/qib_verbs.h
+@@ -647,6 +647,8 @@ struct qib_qpn_table {
+ 	struct qpn_map map[QPNMAP_ENTRIES];
+ };
+ 
++#define MAX_LKEY_TABLE_BITS 23
++
+ struct qib_lkey_table {
+ 	spinlock_t lock; /* protect changes in this struct */
+ 	u32 next;               /* next unused index (speeds search) */
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
+index 6a594aac2290..c933d882c35c 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
+@@ -201,6 +201,7 @@ iser_initialize_task_headers(struct iscsi_task *task,
+ 		goto out;
+ 	}
+ 
++	tx_desc->mapped = true;
+ 	tx_desc->dma_addr = dma_addr;
+ 	tx_desc->tx_sg[0].addr   = tx_desc->dma_addr;
+ 	tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
+@@ -360,16 +361,19 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
+ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
+ {
+ 	struct iscsi_iser_task *iser_task = task->dd_data;
+-	struct iser_tx_desc    *tx_desc   = &iser_task->desc;
+-	struct iser_conn       *iser_conn	  = task->conn->dd_data;
++	struct iser_tx_desc *tx_desc = &iser_task->desc;
++	struct iser_conn *iser_conn = task->conn->dd_data;
+ 	struct iser_device *device = iser_conn->ib_conn.device;
+ 
+ 	/* DEVICE_REMOVAL event might have already released the device */
+ 	if (!device)
+ 		return;
+ 
+-	ib_dma_unmap_single(device->ib_device,
+-		tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
++	if (likely(tx_desc->mapped)) {
++		ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
++				    ISER_HEADERS_LEN, DMA_TO_DEVICE);
++		tx_desc->mapped = false;
++	}
+ 
+ 	/* mgmt tasks do not need special cleanup */
+ 	if (!task->sc)
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 262ba1f8ee50..d2b6caf7694d 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -270,6 +270,7 @@ enum iser_desc_type {
+  *                 sg[1] optionally points to either of immediate data
+  *                 unsolicited data-out or control
+  * @num_sge:       number sges used on this TX task
++ * @mapped:        Is the task header mapped
+  */
+ struct iser_tx_desc {
+ 	struct iser_hdr              iser_header;
+@@ -278,6 +279,7 @@ struct iser_tx_desc {
+ 	u64		             dma_addr;
+ 	struct ib_sge		     tx_sg[2];
+ 	int                          num_sge;
++	bool			     mapped;
+ };
+ 
+ #define ISER_RX_PAD_SIZE	(256 - (ISER_RX_PAYLOAD_SIZE + \
+diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
+index 3e2118e8ed87..0a47f42fec24 100644
+--- a/drivers/infiniband/ulp/iser/iser_initiator.c
++++ b/drivers/infiniband/ulp/iser/iser_initiator.c
+@@ -454,7 +454,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ 	unsigned long buf_offset;
+ 	unsigned long data_seg_len;
+ 	uint32_t itt;
+-	int err = 0;
++	int err;
+ 	struct ib_sge *tx_dsg;
+ 
+ 	itt = (__force uint32_t)hdr->itt;
+@@ -475,7 +475,9 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ 	memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
+ 
+ 	/* build the tx desc */
+-	iser_initialize_task_headers(task, tx_desc);
++	err = iser_initialize_task_headers(task, tx_desc);
++	if (err)
++		goto send_data_out_error;
+ 
+ 	mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
+ 	tx_dsg = &tx_desc->tx_sg[1];
+@@ -502,7 +504,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
+ 
+ send_data_out_error:
+ 	kmem_cache_free(ig.desc_cache, tx_desc);
+-	iser_err("conn %p failed err %d\n",conn, err);
++	iser_err("conn %p failed err %d\n", conn, err);
+ 	return err;
+ }
+ 
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 75c01b27bd0b..025f93105444 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -2761,6 +2761,13 @@ static int srp_sdev_count(struct Scsi_Host *host)
+ 	return c;
+ }
+ 
++/*
++ * Return values:
++ * < 0 upon failure. Caller is responsible for SRP target port cleanup.
++ * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
++ *    removal has been scheduled.
++ * 0 and target->state != SRP_TARGET_REMOVED upon success.
++ */
+ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
+ {
+ 	struct srp_rport_identifiers ids;
+@@ -3266,7 +3273,7 @@ static ssize_t srp_create_target(struct device *dev,
+ 					srp_free_ch_ib(target, ch);
+ 					srp_free_req_data(target, ch);
+ 					target->ch_count = ch - target->ch;
+-					break;
++					goto connected;
+ 				}
+ 			}
+ 
+@@ -3276,6 +3283,7 @@ static ssize_t srp_create_target(struct device *dev,
+ 		node_idx++;
+ 	}
+ 
++connected:
+ 	target->scsi_host->nr_hw_queues = target->ch_count;
+ 
+ 	ret = srp_add_target(host, target);
+@@ -3298,6 +3306,8 @@ out:
+ 	mutex_unlock(&host->add_target_mutex);
+ 
+ 	scsi_host_put(target->scsi_host);
++	if (ret < 0)
++		scsi_host_put(target->scsi_host);
+ 
+ 	return ret;
+ 
+diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
+index a18f41b89b6a..2ae522f0d2b2 100644
+--- a/drivers/input/evdev.c
++++ b/drivers/input/evdev.c
+@@ -290,19 +290,14 @@ static int evdev_flush(struct file *file, fl_owner_t id)
+ {
+ 	struct evdev_client *client = file->private_data;
+ 	struct evdev *evdev = client->evdev;
+-	int retval;
+ 
+-	retval = mutex_lock_interruptible(&evdev->mutex);
+-	if (retval)
+-		return retval;
++	mutex_lock(&evdev->mutex);
+ 
+-	if (!evdev->exist || client->revoked)
+-		retval = -ENODEV;
+-	else
+-		retval = input_flush_device(&evdev->handle, file);
++	if (evdev->exist && !client->revoked)
++		input_flush_device(&evdev->handle, file);
+ 
+ 	mutex_unlock(&evdev->mutex);
+-	return retval;
++	return 0;
+ }
+ 
+ static void evdev_free(struct device *dev)
+diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
+index abeedc9a78c2..2570f2a25dc4 100644
+--- a/drivers/iommu/fsl_pamu.c
++++ b/drivers/iommu/fsl_pamu.c
+@@ -41,7 +41,6 @@ struct pamu_isr_data {
+ 
+ static struct paace *ppaact;
+ static struct paace *spaact;
+-static struct ome *omt __initdata;
+ 
+ /*
+  * Table for matching compatible strings, for device tree
+@@ -50,7 +49,7 @@ static struct ome *omt __initdata;
+  * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
+  * string would be used.
+  */
+-static const struct of_device_id guts_device_ids[] __initconst = {
++static const struct of_device_id guts_device_ids[] = {
+ 	{ .compatible = "fsl,qoriq-device-config-1.0", },
+ 	{ .compatible = "fsl,qoriq-device-config-2.0", },
+ 	{}
+@@ -599,7 +598,7 @@ found_cpu_node:
+  * Memory accesses to QMAN and BMAN private memory need not be coherent, so
+  * clear the PAACE entry coherency attribute for them.
+  */
+-static void __init setup_qbman_paace(struct paace *ppaace, int  paace_type)
++static void setup_qbman_paace(struct paace *ppaace, int  paace_type)
+ {
+ 	switch (paace_type) {
+ 	case QMAN_PAACE:
+@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int  paace_type)
+  * this table to translate device transaction to appropriate corenet
+  * transaction.
+  */
+-static void __init setup_omt(struct ome *omt)
++static void setup_omt(struct ome *omt)
+ {
+ 	struct ome *ome;
+ 
+@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt)
+  * Get the maximum number of PAACT table entries
+  * and subwindows supported by PAMU
+  */
+-static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
++static void get_pamu_cap_values(unsigned long pamu_reg_base)
+ {
+ 	u32 pc_val;
+ 
+@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
+ }
+ 
+ /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
+-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
+-				 phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
+-				 phys_addr_t omt_phys)
++static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
++			  phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
++			  phys_addr_t omt_phys)
+ {
+ 	u32 *pc;
+ 	struct pamu_mmap_regs *pamu_regs;
+@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu
+ }
+ 
+ /* Enable all device LIODNS */
+-static void __init setup_liodns(void)
++static void setup_liodns(void)
+ {
+ 	int i, len;
+ 	struct paace *ppaace;
+@@ -846,7 +845,7 @@ struct ccsr_law {
+ /*
+  * Create a coherence subdomain for a given memory block.
+  */
+-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
++static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
+ {
+ 	struct device_node *np;
+ 	const __be32 *iprop;
+@@ -988,7 +987,7 @@ error:
+ static const struct {
+ 	u32 svr;
+ 	u32 port_id;
+-} port_id_map[] __initconst = {
++} port_id_map[] = {
+ 	{(SVR_P2040 << 8) | 0x10, 0xFF000000},	/* P2040 1.0 */
+ 	{(SVR_P2040 << 8) | 0x11, 0xFF000000},	/* P2040 1.1 */
+ 	{(SVR_P2041 << 8) | 0x10, 0xFF000000},	/* P2041 1.0 */
+@@ -1006,7 +1005,7 @@ static const struct {
+ 
+ #define SVR_SECURITY	0x80000	/* The Security (E) bit */
+ 
+-static int __init fsl_pamu_probe(struct platform_device *pdev)
++static int fsl_pamu_probe(struct platform_device *pdev)
+ {
+ 	struct device *dev = &pdev->dev;
+ 	void __iomem *pamu_regs = NULL;
+@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
+ 	int irq;
+ 	phys_addr_t ppaact_phys;
+ 	phys_addr_t spaact_phys;
++	struct ome *omt;
+ 	phys_addr_t omt_phys;
+ 	size_t mem_size = 0;
+ 	unsigned int order = 0;
+@@ -1200,7 +1200,7 @@ error:
+ 	return ret;
+ }
+ 
+-static struct platform_driver fsl_of_pamu_driver __initdata = {
++static struct platform_driver fsl_of_pamu_driver = {
+ 	.driver = {
+ 		.name = "fsl-of-pamu",
+ 	},
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index c87c4b1bfc00..c23427951ec1 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -681,6 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
+ 	struct context_entry *context;
+ 	u64 *entry;
+ 
++	entry = &root->lo;
+ 	if (ecs_enabled(iommu)) {
+ 		if (devfn >= 0x80) {
+ 			devfn -= 0x80;
+@@ -688,7 +689,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
+ 		}
+ 		devfn *= 2;
+ 	}
+-	entry = &root->lo;
+ 	if (*entry & 1)
+ 		context = phys_to_virt(*entry & VTD_PAGE_MASK);
+ 	else {
+diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
+index 4e460216bd16..e29d5d7fe220 100644
+--- a/drivers/iommu/io-pgtable-arm.c
++++ b/drivers/iommu/io-pgtable-arm.c
+@@ -200,6 +200,10 @@ typedef u64 arm_lpae_iopte;
+ 
+ static bool selftest_running = false;
+ 
++static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
++			    unsigned long iova, size_t size, int lvl,
++			    arm_lpae_iopte *ptep);
++
+ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
+ 			     unsigned long iova, phys_addr_t paddr,
+ 			     arm_lpae_iopte prot, int lvl,
+@@ -207,10 +211,21 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
+ {
+ 	arm_lpae_iopte pte = prot;
+ 
+-	/* We require an unmap first */
+ 	if (iopte_leaf(*ptep, lvl)) {
++		/* We require an unmap first */
+ 		WARN_ON(!selftest_running);
+ 		return -EEXIST;
++	} else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
++		/*
++		 * We need to unmap and free the old table before
++		 * overwriting it with a block entry.
++		 */
++		arm_lpae_iopte *tblp;
++		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
++
++		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
++		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
++			return -EINVAL;
+ 	}
+ 
+ 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
+diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
+index c845d99ecf6b..e0ff5f4d7fed 100644
+--- a/drivers/iommu/tegra-smmu.c
++++ b/drivers/iommu/tegra-smmu.c
+@@ -26,6 +26,7 @@ struct tegra_smmu {
+ 	const struct tegra_smmu_soc *soc;
+ 
+ 	unsigned long pfn_mask;
++	unsigned long tlb_mask;
+ 
+ 	unsigned long *asids;
+ 	struct mutex lock;
+@@ -65,7 +66,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
+ #define SMMU_TLB_CONFIG 0x14
+ #define  SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29)
+ #define  SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28)
+-#define  SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f)
++#define  SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \
++	((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask)
+ 
+ #define SMMU_PTC_CONFIG 0x18
+ #define  SMMU_PTC_CONFIG_ENABLE (1 << 29)
+@@ -716,6 +718,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
+ 	smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
+ 	dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
+ 		mc->soc->num_address_bits, smmu->pfn_mask);
++	smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1;
++	dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines,
++		smmu->tlb_mask);
+ 
+ 	value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
+ 
+@@ -725,7 +730,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
+ 	smmu_writel(smmu, value, SMMU_PTC_CONFIG);
+ 
+ 	value = SMMU_TLB_CONFIG_HIT_UNDER_MISS |
+-		SMMU_TLB_CONFIG_ACTIVE_LINES(0x20);
++		SMMU_TLB_CONFIG_ACTIVE_LINES(smmu);
+ 
+ 	if (soc->supports_round_robin_arbitration)
+ 		value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION;
+diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
+index 8c91fd5eb6fd..3ac9c4194814 100644
+--- a/drivers/isdn/gigaset/ser-gigaset.c
++++ b/drivers/isdn/gigaset/ser-gigaset.c
+@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
+ 	cs->hw.ser->tty = tty;
+ 	atomic_set(&cs->hw.ser->refcnt, 1);
+ 	init_completion(&cs->hw.ser->dead_cmp);
+-
+ 	tty->disc_data = cs;
+ 
++	/* Set the amount of data we're willing to receive per call
++	 * from the hardware driver to half of the input buffer size
++	 * to leave some reserve.
++	 * Note: We don't do flow control towards the hardware driver.
++	 * If more data is received than will fit into the input buffer,
++	 * it will be dropped and an error will be logged. This should
++	 * never happen as the device is slow and the buffer size ample.
++	 */
++	tty->receive_room = RBUFSIZE/2;
++
+ 	/* OK.. Initialization of the datastructures and the HW is done.. Now
+ 	 * startup system and notify the LL that we are ready to run
+ 	 */
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index e4621511d118..e8c44fcb1ad1 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -5365,6 +5365,8 @@ static void __md_stop(struct mddev *mddev)
+ {
+ 	struct md_personality *pers = mddev->pers;
+ 	mddev_detach(mddev);
++	/* Ensure ->event_work is done */
++	flush_workqueue(md_misc_wq);
+ 	spin_lock(&mddev->lock);
+ 	mddev->ready = 0;
+ 	mddev->pers = NULL;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index f55c3f35b746..fe0122771642 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3566,6 +3566,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
+ 			/* far_copies must be 1 */
+ 			conf->prev.stride = conf->dev_sectors;
+ 	}
++	conf->reshape_safe = conf->reshape_progress;
+ 	spin_lock_init(&conf->device_lock);
+ 	INIT_LIST_HEAD(&conf->retry_list);
+ 
+@@ -3770,7 +3771,6 @@ static int run(struct mddev *mddev)
+ 		}
+ 		conf->offset_diff = min_offset_diff;
+ 
+-		conf->reshape_safe = conf->reshape_progress;
+ 		clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ 		clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ 		set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
+@@ -4113,6 +4113,7 @@ static int raid10_start_reshape(struct mddev *mddev)
+ 		conf->reshape_progress = size;
+ 	} else
+ 		conf->reshape_progress = 0;
++	conf->reshape_safe = conf->reshape_progress;
+ 	spin_unlock_irq(&conf->device_lock);
+ 
+ 	if (mddev->delta_disks && mddev->bitmap) {
+@@ -4180,6 +4181,7 @@ abort:
+ 		rdev->new_data_offset = rdev->data_offset;
+ 	smp_wmb();
+ 	conf->reshape_progress = MaxSector;
++	conf->reshape_safe = MaxSector;
+ 	mddev->reshape_position = MaxSector;
+ 	spin_unlock_irq(&conf->device_lock);
+ 	return ret;
+@@ -4534,6 +4536,7 @@ static void end_reshape(struct r10conf *conf)
+ 	md_finish_reshape(conf->mddev);
+ 	smp_wmb();
+ 	conf->reshape_progress = MaxSector;
++	conf->reshape_safe = MaxSector;
+ 	spin_unlock_irq(&conf->device_lock);
+ 
+ 	/* read-ahead size must cover two whole stripes, which is
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index b6793d2e051f..23af6772f146 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -2151,6 +2151,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
+ 	if (!sc)
+ 		return -ENOMEM;
+ 
++	/* Need to ensure auto-resizing doesn't interfere */
++	mutex_lock(&conf->cache_size_mutex);
++
+ 	for (i = conf->max_nr_stripes; i; i--) {
+ 		nsh = alloc_stripe(sc, GFP_KERNEL);
+ 		if (!nsh)
+@@ -2167,6 +2170,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
+ 			kmem_cache_free(sc, nsh);
+ 		}
+ 		kmem_cache_destroy(sc);
++		mutex_unlock(&conf->cache_size_mutex);
+ 		return -ENOMEM;
+ 	}
+ 	/* Step 2 - Must use GFP_NOIO now.
+@@ -2213,6 +2217,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
+ 	} else
+ 		err = -ENOMEM;
+ 
++	mutex_unlock(&conf->cache_size_mutex);
+ 	/* Step 4, return new stripes to service */
+ 	while(!list_empty(&newstripes)) {
+ 		nsh = list_entry(newstripes.next, struct stripe_head, lru);
+@@ -2240,7 +2245,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
+ static int drop_one_stripe(struct r5conf *conf)
+ {
+ 	struct stripe_head *sh;
+-	int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS;
++	int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
+ 
+ 	spin_lock_irq(conf->hash_locks + hash);
+ 	sh = get_free_stripe(conf, hash);
+@@ -5846,12 +5851,14 @@ static void raid5d(struct md_thread *thread)
+ 	pr_debug("%d stripes handled\n", handled);
+ 
+ 	spin_unlock_irq(&conf->device_lock);
+-	if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) {
++	if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
++	    mutex_trylock(&conf->cache_size_mutex)) {
+ 		grow_one_stripe(conf, __GFP_NOWARN);
+ 		/* Set flag even if allocation failed.  This helps
+ 		 * slow down allocation requests when mem is short
+ 		 */
+ 		set_bit(R5_DID_ALLOC, &conf->cache_state);
++		mutex_unlock(&conf->cache_size_mutex);
+ 	}
+ 
+ 	async_tx_issue_pending_all();
+@@ -5883,18 +5890,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
+ 		return -EINVAL;
+ 
+ 	conf->min_nr_stripes = size;
++	mutex_lock(&conf->cache_size_mutex);
+ 	while (size < conf->max_nr_stripes &&
+ 	       drop_one_stripe(conf))
+ 		;
++	mutex_unlock(&conf->cache_size_mutex);
+ 
+ 
+ 	err = md_allow_write(mddev);
+ 	if (err)
+ 		return err;
+ 
++	mutex_lock(&conf->cache_size_mutex);
+ 	while (size > conf->max_nr_stripes)
+ 		if (!grow_one_stripe(conf, GFP_KERNEL))
+ 			break;
++	mutex_unlock(&conf->cache_size_mutex);
+ 
+ 	return 0;
+ }
+@@ -6360,11 +6371,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
+ 				      struct shrink_control *sc)
+ {
+ 	struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
+-	int ret = 0;
+-	while (ret < sc->nr_to_scan) {
+-		if (drop_one_stripe(conf) == 0)
+-			return SHRINK_STOP;
+-		ret++;
++	unsigned long ret = SHRINK_STOP;
++
++	if (mutex_trylock(&conf->cache_size_mutex)) {
++		ret= 0;
++		while (ret < sc->nr_to_scan &&
++		       conf->max_nr_stripes > conf->min_nr_stripes) {
++			if (drop_one_stripe(conf) == 0) {
++				ret = SHRINK_STOP;
++				break;
++			}
++			ret++;
++		}
++		mutex_unlock(&conf->cache_size_mutex);
+ 	}
+ 	return ret;
+ }
+@@ -6433,6 +6452,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
+ 		goto abort;
+ 	spin_lock_init(&conf->device_lock);
+ 	seqcount_init(&conf->gen_lock);
++	mutex_init(&conf->cache_size_mutex);
+ 	init_waitqueue_head(&conf->wait_for_stripe);
+ 	init_waitqueue_head(&conf->wait_for_overlap);
+ 	INIT_LIST_HEAD(&conf->handle_list);
+diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
+index 896d603ad0da..03472fbbd882 100644
+--- a/drivers/md/raid5.h
++++ b/drivers/md/raid5.h
+@@ -482,7 +482,8 @@ struct r5conf {
+ 	 */
+ 	int			active_name;
+ 	char			cache_name[2][32];
+-	struct kmem_cache		*slab_cache; /* for allocating stripes */
++	struct kmem_cache	*slab_cache; /* for allocating stripes */
++	struct mutex		cache_size_mutex; /* Protect changes to cache size */
+ 
+ 	int			seq_flush, seq_write;
+ 	int			quiesce;
+diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
+index a30cc2f7e4f1..ddf59ee5ca40 100644
+--- a/drivers/media/platform/am437x/am437x-vpfe.c
++++ b/drivers/media/platform/am437x/am437x-vpfe.c
+@@ -1185,14 +1185,24 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe)
+ static int vpfe_release(struct file *file)
+ {
+ 	struct vpfe_device *vpfe = video_drvdata(file);
++	bool fh_singular;
+ 	int ret;
+ 
+ 	mutex_lock(&vpfe->lock);
+ 
+-	if (v4l2_fh_is_singular_file(file))
+-		vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
++	/* Save the singular status before we call the clean-up helper */
++	fh_singular = v4l2_fh_is_singular_file(file);
++
++	/* the release helper will cleanup any on-going streaming */
+ 	ret = _vb2_fop_release(file, NULL);
+ 
++	/*
++	 * If this was the last open file.
++	 * Then de-initialize hw module.
++	 */
++	if (fh_singular)
++		vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev);
++
+ 	mutex_unlock(&vpfe->lock);
+ 
+ 	return ret;
+@@ -1577,7 +1587,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
+ 		return -EBUSY;
+ 	}
+ 
+-	ret = vpfe_try_fmt(file, priv, fmt);
++	ret = vpfe_try_fmt(file, priv, &format);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
+index 18d0a871747f..947d8be7b245 100644
+--- a/drivers/media/platform/omap3isp/isp.c
++++ b/drivers/media/platform/omap3isp/isp.c
+@@ -829,14 +829,14 @@ static int isp_pipeline_link_notify(struct media_link *link, u32 flags,
+ 	int ret;
+ 
+ 	if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
+-	    !(link->flags & MEDIA_LNK_FL_ENABLED)) {
++	    !(flags & MEDIA_LNK_FL_ENABLED)) {
+ 		/* Powering off entities is assumed to never fail. */
+ 		isp_pipeline_pm_power(source, -sink_use);
+ 		isp_pipeline_pm_power(sink, -source_use);
+ 		return 0;
+ 	}
+ 
+-	if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
++	if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
+ 		(flags & MEDIA_LNK_FL_ENABLED)) {
+ 
+ 		ret = isp_pipeline_pm_power(source, sink_use);
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index f8c5e47a30aa..0aba9ff92102 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1191,9 +1191,6 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
+ {
+ 	struct rc_dev *dev = to_rc_dev(device);
+ 
+-	if (!dev || !dev->input_dev)
+-		return -ENODEV;
+-
+ 	if (dev->rc_map.name)
+ 		ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
+ 	if (dev->driver_name)
+diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
+index 511e9a25c151..16c4d26f51e7 100644
+--- a/drivers/memory/tegra/tegra114.c
++++ b/drivers/memory/tegra/tegra114.c
+@@ -935,6 +935,7 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = {
+ 	.num_swgroups = ARRAY_SIZE(tegra114_swgroups),
+ 	.supports_round_robin_arbitration = false,
+ 	.supports_request_limit = false,
++	.num_tlb_lines = 32,
+ 	.num_asids = 4,
+ 	.ops = &tegra114_smmu_ops,
+ };
+diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
+index 278d40b854c1..b153d0b732cf 100644
+--- a/drivers/memory/tegra/tegra124.c
++++ b/drivers/memory/tegra/tegra124.c
+@@ -981,6 +981,7 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = {
+ 	.num_swgroups = ARRAY_SIZE(tegra124_swgroups),
+ 	.supports_round_robin_arbitration = true,
+ 	.supports_request_limit = true,
++	.num_tlb_lines = 32,
+ 	.num_asids = 128,
+ 	.ops = &tegra124_smmu_ops,
+ };
+diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
+index 71fe9376fe53..f422b18f45f3 100644
+--- a/drivers/memory/tegra/tegra30.c
++++ b/drivers/memory/tegra/tegra30.c
+@@ -957,6 +957,7 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = {
+ 	.num_swgroups = ARRAY_SIZE(tegra30_swgroups),
+ 	.supports_round_robin_arbitration = false,
+ 	.supports_request_limit = false,
++	.num_tlb_lines = 16,
+ 	.num_asids = 4,
+ 	.ops = &tegra30_smmu_ops,
+ };
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 1ef01647265f..4f1b0bdb9cf8 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -778,14 +778,9 @@ int cxl_reset(struct cxl *adapter)
+ {
+ 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
+ 	int rc;
+-	int i;
+-	u32 val;
+ 
+ 	dev_info(&dev->dev, "CXL reset\n");
+ 
+-	for (i = 0; i < adapter->slices; i++)
+-		cxl_remove_afu(adapter->afu[i]);
+-
+ 	/* pcie_warm_reset requests a fundamental pci reset which includes a
+ 	 * PERST assert/deassert.  PERST triggers a loading of the image
+ 	 * if "user" or "factory" is selected in sysfs */
+@@ -794,20 +789,6 @@ int cxl_reset(struct cxl *adapter)
+ 		return rc;
+ 	}
+ 
+-	/* the PERST done above fences the PHB.  So, reset depends on EEH
+-	 * to unbind the driver, tell Sapphire to reinit the PHB, and rebind
+-	 * the driver.  Do an mmio read explictly to ensure EEH notices the
+-	 * fenced PHB.  Retry for a few seconds before giving up. */
+-	i = 0;
+-	while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) &&
+-		(i < 5)) {
+-		msleep(500);
+-		i++;
+-	}
+-
+-	if (val != 0xffffffff)
+-		dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n");
+-
+ 	return rc;
+ }
+ 
+@@ -1062,8 +1043,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ 	int slice;
+ 	int rc;
+ 
+-	pci_dev_get(dev);
+-
+ 	if (cxl_verbose)
+ 		dump_cxl_config_space(dev);
+ 
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index 92e7671426eb..588fb7908642 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -330,8 +330,10 @@ EXPORT_SYMBOL(mmc_start_bkops);
+  */
+ static void mmc_wait_data_done(struct mmc_request *mrq)
+ {
+-	mrq->host->context_info.is_done_rcv = true;
+-	wake_up_interruptible(&mrq->host->context_info.wait);
++	struct mmc_context_info *context_info = &mrq->host->context_info;
++
++	context_info->is_done_rcv = true;
++	wake_up_interruptible(&context_info->wait);
+ }
+ 
+ static void mmc_wait_done(struct mmc_request *mrq)
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index 7a3fc16d0a6c..53cfc7cedefe 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -549,6 +549,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
+ static const struct sdhci_pci_fixes sdhci_o2 = {
+ 	.probe = sdhci_pci_o2_probe,
+ 	.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
++	.quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
+ 	.probe_slot = sdhci_pci_o2_probe_slot,
+ 	.resume = sdhci_pci_o2_resume,
+ };
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index bec8a307f8cd..fd41b91436ec 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1146,6 +1146,7 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host)
+ 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
+ 		break;
+ 	case MMC_TIMING_UHS_DDR50:
++	case MMC_TIMING_MMC_DDR52:
+ 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
+ 		break;
+ 	case MMC_TIMING_MMC_HS400:
+@@ -1598,7 +1599,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
+ 				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
+ 				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
+ 				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
+-				 (ios->timing == MMC_TIMING_UHS_DDR50))) {
++				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
++				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
+ 			u16 preset;
+ 
+ 			sdhci_enable_preset_value(host, true);
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index d5fe5d5f490f..16d87bf8ac3c 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
+ 	call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
+ }
+ 
++static struct slave *bond_get_old_active(struct bonding *bond,
++					 struct slave *new_active)
++{
++	struct slave *slave;
++	struct list_head *iter;
++
++	bond_for_each_slave(bond, slave, iter) {
++		if (slave == new_active)
++			continue;
++
++		if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
++			return slave;
++	}
++
++	return NULL;
++}
++
+ /* bond_do_fail_over_mac
+  *
+  * Perform special MAC address swapping for fail_over_mac settings
+@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
+ 		if (!new_active)
+ 			return;
+ 
++		if (!old_active)
++			old_active = bond_get_old_active(bond, new_active);
++
+ 		if (old_active) {
+ 			ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
+ 			ether_addr_copy(saddr.sa_data,
+@@ -1902,6 +1922,7 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
+ 		bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
+ 		netdev_info(bond_dev, "Destroying bond %s\n",
+ 			    bond_dev->name);
++		bond_remove_proc_entry(bond);
+ 		unregister_netdevice(bond_dev);
+ 	}
+ 	return ret;
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index 069952fa5d64..0d8af5bb5907 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -10757,7 +10757,7 @@ static ssize_t tg3_show_temp(struct device *dev,
+ 	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
+ 				sizeof(temperature));
+ 	spin_unlock_bh(&tp->lock);
+-	return sprintf(buf, "%u\n", temperature);
++	return sprintf(buf, "%u\n", temperature * 1000);
+ }
+ 
+ 
+diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
+index caae6cb2bc1a..a1c30ee60888 100644
+--- a/drivers/net/ethernet/brocade/bna/bnad.c
++++ b/drivers/net/ethernet/brocade/bna/bnad.c
+@@ -675,6 +675,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+ 			if (!next_cmpl->valid)
+ 				break;
+ 		}
++		packets++;
+ 
+ 		/* TODO: BNA_CQ_EF_LOCAL ? */
+ 		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
+@@ -691,7 +692,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
+ 		else
+ 			bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
+ 
+-		packets++;
+ 		rcb->rxq->rx_packets++;
+ 		rcb->rxq->rx_bytes += totlen;
+ 		ccb->bytes_per_intr += totlen;
+diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+index c754b2027281..c9da1b5d4804 100644
+--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
+ 
+ static inline bool fm10k_page_is_reserved(struct page *page)
+ {
+-	return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+ 
+ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
+diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
+index c2bd4f98a837..212d668dabb3 100644
+--- a/drivers/net/ethernet/intel/igb/igb.h
++++ b/drivers/net/ethernet/intel/igb/igb.h
+@@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+ 			 struct sk_buff *skb);
+ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
+ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
++void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
+ #ifdef CONFIG_IGB_HWMON
+ void igb_sysfs_exit(struct igb_adapter *adapter);
+ int igb_sysfs_init(struct igb_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+index d5673eb90c54..0afc0913e5b9 100644
+--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
+@@ -2991,6 +2991,7 @@ static int igb_set_channels(struct net_device *netdev,
+ {
+ 	struct igb_adapter *adapter = netdev_priv(netdev);
+ 	unsigned int count = ch->combined_count;
++	unsigned int max_combined = 0;
+ 
+ 	/* Verify they are not requesting separate vectors */
+ 	if (!count || ch->rx_count || ch->tx_count)
+@@ -3001,11 +3002,13 @@ static int igb_set_channels(struct net_device *netdev,
+ 		return -EINVAL;
+ 
+ 	/* Verify the number of channels doesn't exceed hw limits */
+-	if (count > igb_max_channels(adapter))
++	max_combined = igb_max_channels(adapter);
++	if (count > max_combined)
+ 		return -EINVAL;
+ 
+ 	if (count != adapter->rss_queues) {
+ 		adapter->rss_queues = count;
++		igb_set_flag_queue_pairs(adapter, max_combined);
+ 
+ 		/* Hardware has to reinitialize queues and interrupts to
+ 		 * match the new configuration.
+diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
+index a0a9b1fcb5e8..4f6bf996851e 100644
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -1205,10 +1205,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ 
+ 	/* allocate q_vector and rings */
+ 	q_vector = adapter->q_vector[v_idx];
+-	if (!q_vector)
++	if (!q_vector) {
+ 		q_vector = kzalloc(size, GFP_KERNEL);
+-	else
++	} else if (size > ksize(q_vector)) {
++		kfree_rcu(q_vector, rcu);
++		q_vector = kzalloc(size, GFP_KERNEL);
++	} else {
+ 		memset(q_vector, 0, size);
++	}
+ 	if (!q_vector)
+ 		return -ENOMEM;
+ 
+@@ -2901,6 +2905,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
+ 
+ 	adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+ 
++	igb_set_flag_queue_pairs(adapter, max_rss_queues);
++}
++
++void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
++			      const u32 max_rss_queues)
++{
++	struct e1000_hw *hw = &adapter->hw;
++
+ 	/* Determine if we need to pair queues. */
+ 	switch (hw->mac.type) {
+ 	case e1000_82575:
+@@ -6584,7 +6596,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+ 
+ static inline bool igb_page_is_reserved(struct page *page)
+ {
+-	return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+ 
+ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 5be12a00e1f4..463ff47200f1 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -1829,7 +1829,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
+ 
+ static inline bool ixgbe_page_is_reserved(struct page *page)
+ {
+-	return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+index e71cdde9cb01..1d7b00b038a2 100644
+--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
+ 
+ static inline bool ixgbevf_page_is_reserved(struct page *page)
+ {
+-	return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
++	return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+ }
+ 
+ /**
+diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
+index 2619c9fbf42d..983b1d51244d 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
+@@ -573,7 +573,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+ 							continue;
+ 						mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
+ 							 __func__, i, port);
+-						s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
++						s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
+ 						if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
+ 							eqe->event.port_change.port =
+ 								cpu_to_be32(
+@@ -608,7 +608,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
+ 							continue;
+ 						if (i == mlx4_master_func_num(dev))
+ 							continue;
+-						s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
++						s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
+ 						if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
+ 							eqe->event.port_change.port =
+ 								cpu_to_be32(
+diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
+index cf98cc9bbc8d..73b6fc21ea00 100644
+--- a/drivers/net/ethernet/rocker/rocker.c
++++ b/drivers/net/ethernet/rocker/rocker.c
+@@ -4587,6 +4587,7 @@ static void rocker_remove_ports(struct rocker *rocker)
+ 		rocker_port = rocker->ports[i];
+ 		rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
+ 		unregister_netdev(rocker_port->dev);
++		free_netdev(rocker_port->dev);
+ 	}
+ 	kfree(rocker->ports);
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
+index ad3996038018..799c2929c536 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
+@@ -158,6 +158,8 @@ struct dma_desc {
+ 			u32 buffer2_size:13;
+ 			u32 reserved4:3;
+ 		} etx;		/* -- enhanced -- */
++
++		u64 all_flags;
+ 	} des01;
+ 	unsigned int des2;
+ 	unsigned int des3;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+index 6249a4ec08f0..573708123338 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+@@ -38,7 +38,6 @@ struct rk_priv_data {
+ 	bool clock_input;
+ 
+ 	struct clk *clk_mac;
+-	struct clk *clk_mac_pll;
+ 	struct clk *gmac_clkin;
+ 	struct clk *mac_clk_rx;
+ 	struct clk *mac_clk_tx;
+@@ -208,7 +207,7 @@ static int gmac_clk_init(struct rk_priv_data *bsp_priv)
+ 		dev_info(dev, "%s: clock input from PHY\n", __func__);
+ 	} else {
+ 		if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+-			clk_set_rate(bsp_priv->clk_mac_pll, 50000000);
++			clk_set_rate(bsp_priv->clk_mac, 50000000);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+index 1e2bcf5f89e1..7d944449f5ef 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+@@ -240,6 +240,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ 				  int mode, int end)
+ {
++	p->des01.all_flags = 0;
+ 	p->des01.erx.own = 1;
+ 	p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
+ 
+@@ -254,7 +255,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ 
+ static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
+ {
+-	p->des01.etx.own = 0;
++	p->des01.all_flags = 0;
+ 	if (mode == STMMAC_CHAIN_MODE)
+ 		ehn_desc_tx_set_on_chain(p, end);
+ 	else
+diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+index 35ad4f427ae2..48c3456445b2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+@@ -123,6 +123,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
+ 			       int end)
+ {
++	p->des01.all_flags = 0;
+ 	p->des01.rx.own = 1;
+ 	p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
+ 
+@@ -137,7 +138,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
+ 
+ static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
+ {
+-	p->des01.tx.own = 0;
++	p->des01.all_flags = 0;
+ 	if (mode == STMMAC_CHAIN_MODE)
+ 		ndesc_tx_set_on_chain(p, end);
+ 	else
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 2c5ce2baca87..c274cdc5df1e 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -829,8 +829,11 @@ static int stmmac_init_phy(struct net_device *dev)
+ 
+ 	phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
+ 
+-	if (IS_ERR(phydev)) {
++	if (IS_ERR_OR_NULL(phydev)) {
+ 		pr_err("%s: Could not attach to PHY\n", dev->name);
++		if (!phydev)
++			return -ENODEV;
++
+ 		return PTR_ERR(phydev);
+ 	}
+ 
+@@ -1189,41 +1192,41 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+ 		goto err_tx_skbuff;
+ 
+ 	if (priv->extend_desc) {
+-		priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
+-						   sizeof(struct
+-							  dma_extended_desc),
+-						   &priv->dma_rx_phy,
+-						   GFP_KERNEL);
++		priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize *
++						    sizeof(struct
++							   dma_extended_desc),
++						    &priv->dma_rx_phy,
++						    GFP_KERNEL);
+ 		if (!priv->dma_erx)
+ 			goto err_dma;
+ 
+-		priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
+-						   sizeof(struct
+-							  dma_extended_desc),
+-						   &priv->dma_tx_phy,
+-						   GFP_KERNEL);
++		priv->dma_etx = dma_zalloc_coherent(priv->device, txsize *
++						    sizeof(struct
++							   dma_extended_desc),
++						    &priv->dma_tx_phy,
++						    GFP_KERNEL);
+ 		if (!priv->dma_etx) {
+ 			dma_free_coherent(priv->device, priv->dma_rx_size *
+-					sizeof(struct dma_extended_desc),
+-					priv->dma_erx, priv->dma_rx_phy);
++					  sizeof(struct dma_extended_desc),
++					  priv->dma_erx, priv->dma_rx_phy);
+ 			goto err_dma;
+ 		}
+ 	} else {
+-		priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
+-						  sizeof(struct dma_desc),
+-						  &priv->dma_rx_phy,
+-						  GFP_KERNEL);
++		priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize *
++						   sizeof(struct dma_desc),
++						   &priv->dma_rx_phy,
++						   GFP_KERNEL);
+ 		if (!priv->dma_rx)
+ 			goto err_dma;
+ 
+-		priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
+-						  sizeof(struct dma_desc),
+-						  &priv->dma_tx_phy,
+-						  GFP_KERNEL);
++		priv->dma_tx = dma_zalloc_coherent(priv->device, txsize *
++						   sizeof(struct dma_desc),
++						   &priv->dma_tx_phy,
++						   GFP_KERNEL);
+ 		if (!priv->dma_tx) {
+ 			dma_free_coherent(priv->device, priv->dma_rx_size *
+-					sizeof(struct dma_desc),
+-					priv->dma_rx, priv->dma_rx_phy);
++					  sizeof(struct dma_desc),
++					  priv->dma_rx, priv->dma_rx_phy);
+ 			goto err_dma;
+ 		}
+ 	}
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 63c7810e1545..7fbca37a1adf 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+ 	else
+ 		vi->hdr_len = sizeof(struct virtio_net_hdr);
+ 
+-	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
++	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
++	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
+ 		vi->any_header_sg = true;
+ 
+ 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+index 23806c243a53..fd4a5353d216 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
+ 	{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
+ 	{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ 	{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
++	{RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
+ 	{RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ 	{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
+ 	{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+index 57966e3c8e8d..3fa2fb7c8e4e 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+@@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
+ 
+ 	rtl_write_byte(rtlpriv, MSR, bt_msr);
+ 	rtlpriv->cfg->ops->led_control(hw, ledaction);
+-	if ((bt_msr & 0xfc) == MSR_AP)
++	if ((bt_msr & MSR_MASK) == MSR_AP)
+ 		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ 	else
+ 		rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
+index 53668fc8f23e..1d6110f9c1fb 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h
+@@ -429,6 +429,7 @@
+ #define	MSR_ADHOC				0x01
+ #define	MSR_INFRA				0x02
+ #define	MSR_AP					0x03
++#define MSR_MASK				0x03
+ 
+ #define	RRSR_RSC_OFFSET				21
+ #define	RRSR_SHORT_OFFSET			23
+diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
+index 0d2594395ffb..0866c5dfdf87 100644
+--- a/drivers/net/xen-netback/netback.c
++++ b/drivers/net/xen-netback/netback.c
+@@ -1571,13 +1571,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
+ 		smp_rmb();
+ 
+ 		while (dc != dp) {
+-			BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
++			BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
+ 			pending_idx =
+ 				queue->dealloc_ring[pending_index(dc++)];
+ 
+-			pending_idx_release[gop-queue->tx_unmap_ops] =
++			pending_idx_release[gop - queue->tx_unmap_ops] =
+ 				pending_idx;
+-			queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
++			queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
+ 				queue->mmap_pages[pending_idx];
+ 			gnttab_set_unmap_op(gop,
+ 					    idx_to_kaddr(queue, pending_idx),
+diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c
+index d251f7229c4e..051286562fab 100644
+--- a/drivers/nfc/st21nfca/st21nfca.c
++++ b/drivers/nfc/st21nfca/st21nfca.c
+@@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
+ 				ST21NFCA_DEVICE_MGNT_GATE,
+ 				ST21NFCA_DEVICE_MGNT_PIPE);
+ 	if (r < 0)
+-		goto free_info;
++		return r;
+ 
+ 	/* Get pipe list */
+ 	r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE,
+ 			ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list),
+ 			&skb_pipe_list);
+ 	if (r < 0)
+-		goto free_info;
++		return r;
+ 
+ 	/* Complete the existing gate_pipe table */
+ 	for (i = 0; i < skb_pipe_list->len; i++) {
+@@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
+ 			info->src_host_id != ST21NFCA_ESE_HOST_ID) {
+ 			pr_err("Unexpected apdu_reader pipe on host %x\n",
+ 				info->src_host_id);
++			kfree_skb(skb_pipe_info);
+ 			continue;
+ 		}
+ 
+@@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
+ 			hdev->pipes[st21nfca_gates[j].pipe].dest_host =
+ 							info->src_host_id;
+ 		}
++		kfree_skb(skb_pipe_info);
+ 	}
+ 
+ 	/*
+@@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev)
+ 					st21nfca_gates[i].gate,
+ 					st21nfca_gates[i].pipe);
+ 			if (r < 0)
+-				goto free_info;
++				goto free_list;
+ 		}
+ 	}
+ 
+ 	memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates));
+-free_info:
+-	kfree_skb(skb_pipe_info);
++free_list:
+ 	kfree_skb(skb_pipe_list);
+ 	return r;
+ }
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index cde35c5d0191..d91f721a05b6 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -955,7 +955,9 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
+ }
+ 
+ #ifdef CONFIG_HAVE_MEMBLOCK
+-#define MAX_PHYS_ADDR	((phys_addr_t)~0)
++#ifndef MAX_MEMBLOCK_ADDR
++#define MAX_MEMBLOCK_ADDR	((phys_addr_t)~0)
++#endif
+ 
+ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
+ {
+@@ -972,16 +974,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
+ 	}
+ 	size &= PAGE_MASK;
+ 
+-	if (base > MAX_PHYS_ADDR) {
++	if (base > MAX_MEMBLOCK_ADDR) {
+ 		pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
+ 				base, base + size);
+ 		return;
+ 	}
+ 
+-	if (base + size - 1 > MAX_PHYS_ADDR) {
++	if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
+ 		pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
+-				((u64)MAX_PHYS_ADDR) + 1, base + size);
+-		size = MAX_PHYS_ADDR - base + 1;
++				((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
++		size = MAX_MEMBLOCK_ADDR - base + 1;
+ 	}
+ 
+ 	if (base + size < phys_offset) {
+diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
+index dceb9ddfd99a..a32c1f6c252c 100644
+--- a/drivers/parisc/lba_pci.c
++++ b/drivers/parisc/lba_pci.c
+@@ -1556,8 +1556,11 @@ lba_driver_probe(struct parisc_device *dev)
+ 	if (lba_dev->hba.lmmio_space.flags)
+ 		pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
+ 					lba_dev->hba.lmmio_space_offset);
+-	if (lba_dev->hba.gmmio_space.flags)
+-		pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
++	if (lba_dev->hba.gmmio_space.flags) {
++		/* pci_add_resource(&resources, &lba_dev->hba.gmmio_space); */
++		pr_warn("LBA: Not registering GMMIO space %pR\n",
++			&lba_dev->hba.gmmio_space);
++	}
+ 
+ 	pci_add_resource(&resources, &lba_dev->hba.bus_num);
+ 
+diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
+index 944f50015ed0..73de4efcbe6e 100644
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -2,7 +2,7 @@
+ # PCI configuration
+ #
+ config PCI_BUS_ADDR_T_64BIT
+-	def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
++	def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
+ 	depends on PCI
+ 
+ config PCI_MSI
+diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
+index 2f797cb7e205..774781450885 100644
+--- a/drivers/pinctrl/pinctrl-at91.c
++++ b/drivers/pinctrl/pinctrl-at91.c
+@@ -320,6 +320,9 @@ static const struct pinctrl_ops at91_pctrl_ops = {
+ static void __iomem *pin_to_controller(struct at91_pinctrl *info,
+ 				 unsigned int bank)
+ {
++	if (!gpio_chips[bank])
++		return NULL;
++
+ 	return gpio_chips[bank]->regbase;
+ }
+ 
+@@ -729,6 +732,10 @@ static int at91_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
+ 		pin = &pins_conf[i];
+ 		at91_pin_dbg(info->dev, pin);
+ 		pio = pin_to_controller(info, pin->bank);
++
++		if (!pio)
++			continue;
++
+ 		mask = pin_to_mask(pin->pin);
+ 		at91_mux_disable_interrupt(pio, mask);
+ 		switch (pin->mux) {
+@@ -848,6 +855,10 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
+ 	*config = 0;
+ 	dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id);
+ 	pio = pin_to_controller(info, pin_to_bank(pin_id));
++
++	if (!pio)
++		return -EINVAL;
++
+ 	pin = pin_id % MAX_NB_GPIO_PER_BANK;
+ 
+ 	if (at91_mux_get_multidrive(pio, pin))
+@@ -889,6 +900,10 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev,
+ 			"%s:%d, pin_id=%d, config=0x%lx",
+ 			__func__, __LINE__, pin_id, config);
+ 		pio = pin_to_controller(info, pin_to_bank(pin_id));
++
++		if (!pio)
++			return -EINVAL;
++
+ 		pin = pin_id % MAX_NB_GPIO_PER_BANK;
+ 		mask = pin_to_mask(pin);
+ 
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index cb7cd8d79329..cd78f1166b33 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -852,6 +852,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ 		},
+ 	},
+ 	{
++		.ident = "Lenovo Yoga 3 14",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3 14"),
++		},
++	},
++	{
+ 		.ident = "Lenovo Yoga 3 Pro 1370",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
+index 4337c3bc6ace..afea84c7a155 100644
+--- a/drivers/rtc/rtc-abx80x.c
++++ b/drivers/rtc/rtc-abx80x.c
+@@ -28,7 +28,7 @@
+ #define ABX8XX_REG_WD		0x07
+ 
+ #define ABX8XX_REG_CTRL1	0x10
+-#define ABX8XX_CTRL_WRITE	BIT(1)
++#define ABX8XX_CTRL_WRITE	BIT(0)
+ #define ABX8XX_CTRL_12_24	BIT(6)
+ 
+ #define ABX8XX_REG_CFG_KEY	0x1f
+diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
+index 76cbad7a99d3..c5a2523b0185 100644
+--- a/drivers/rtc/rtc-s3c.c
++++ b/drivers/rtc/rtc-s3c.c
+@@ -39,6 +39,7 @@ struct s3c_rtc {
+ 	void __iomem *base;
+ 	struct clk *rtc_clk;
+ 	struct clk *rtc_src_clk;
++	bool clk_disabled;
+ 
+ 	struct s3c_rtc_data *data;
+ 
+@@ -71,9 +72,12 @@ static void s3c_rtc_enable_clk(struct s3c_rtc *info)
+ 	unsigned long irq_flags;
+ 
+ 	spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
+-	clk_enable(info->rtc_clk);
+-	if (info->data->needs_src_clk)
+-		clk_enable(info->rtc_src_clk);
++	if (info->clk_disabled) {
++		clk_enable(info->rtc_clk);
++		if (info->data->needs_src_clk)
++			clk_enable(info->rtc_src_clk);
++		info->clk_disabled = false;
++	}
+ 	spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
+ }
+ 
+@@ -82,9 +86,12 @@ static void s3c_rtc_disable_clk(struct s3c_rtc *info)
+ 	unsigned long irq_flags;
+ 
+ 	spin_lock_irqsave(&info->alarm_clk_lock, irq_flags);
+-	if (info->data->needs_src_clk)
+-		clk_disable(info->rtc_src_clk);
+-	clk_disable(info->rtc_clk);
++	if (!info->clk_disabled) {
++		if (info->data->needs_src_clk)
++			clk_disable(info->rtc_src_clk);
++		clk_disable(info->rtc_clk);
++		info->clk_disabled = true;
++	}
+ 	spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags);
+ }
+ 
+@@ -128,6 +135,11 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
+ 
+ 	s3c_rtc_disable_clk(info);
+ 
++	if (enabled)
++		s3c_rtc_enable_clk(info);
++	else
++		s3c_rtc_disable_clk(info);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
+index 8c70d785ba73..ab60287ee72d 100644
+--- a/drivers/rtc/rtc-s5m.c
++++ b/drivers/rtc/rtc-s5m.c
+@@ -635,6 +635,16 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
+ 	case S2MPS13X:
+ 		data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
+ 		ret = regmap_write(info->regmap, info->regs->ctrl, data[0]);
++		if (ret < 0)
++			break;
++
++		/*
++		 * Should set WUDR & (RUDR or AUDR) bits to high after writing
++		 * RTC_CTRL register like writing Alarm registers. We can't find
++		 * the description from datasheet but vendor code does that
++		 * really.
++		 */
++		ret = s5m8767_rtc_set_alarm_reg(info);
+ 		break;
+ 
+ 	default:
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 94e909c5a503..00d18c2bdb0f 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -1875,8 +1875,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ 			spin_unlock(&root->fs_info->trans_lock);
+ 
+ 			wait_for_commit(root, prev_trans);
++			ret = prev_trans->aborted;
+ 
+ 			btrfs_put_transaction(prev_trans);
++			if (ret)
++				goto cleanup_transaction;
+ 		} else {
+ 			spin_unlock(&root->fs_info->trans_lock);
+ 		}
+diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
+index 8b7898b7670f..64a9bca976d0 100644
+--- a/fs/cifs/ioctl.c
++++ b/fs/cifs/ioctl.c
+@@ -67,6 +67,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
+ 		goto out_drop_write;
+ 	}
+ 
++	if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) {
++		rc = -EBADF;
++		cifs_dbg(VFS, "src file seems to be from a different filesystem type\n");
++		goto out_fput;
++	}
++
+ 	if ((!src_file.file->private_data) || (!dst_file->private_data)) {
+ 		rc = -EBADF;
+ 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
+diff --git a/fs/coredump.c b/fs/coredump.c
+index bbbe139ab280..8dd099dc5f9b 100644
+--- a/fs/coredump.c
++++ b/fs/coredump.c
+@@ -506,10 +506,10 @@ void do_coredump(const siginfo_t *siginfo)
+ 	const struct cred *old_cred;
+ 	struct cred *cred;
+ 	int retval = 0;
+-	int flag = 0;
+ 	int ispipe;
+ 	struct files_struct *displaced;
+-	bool need_nonrelative = false;
++	/* require nonrelative corefile path and be extra careful */
++	bool need_suid_safe = false;
+ 	bool core_dumped = false;
+ 	static atomic_t core_dump_count = ATOMIC_INIT(0);
+ 	struct coredump_params cprm = {
+@@ -543,9 +543,8 @@ void do_coredump(const siginfo_t *siginfo)
+ 	 */
+ 	if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
+ 		/* Setuid core dump mode */
+-		flag = O_EXCL;		/* Stop rewrite attacks */
+ 		cred->fsuid = GLOBAL_ROOT_UID;	/* Dump root private */
+-		need_nonrelative = true;
++		need_suid_safe = true;
+ 	}
+ 
+ 	retval = coredump_wait(siginfo->si_signo, &core_state);
+@@ -626,7 +625,7 @@ void do_coredump(const siginfo_t *siginfo)
+ 		if (cprm.limit < binfmt->min_coredump)
+ 			goto fail_unlock;
+ 
+-		if (need_nonrelative && cn.corename[0] != '/') {
++		if (need_suid_safe && cn.corename[0] != '/') {
+ 			printk(KERN_WARNING "Pid %d(%s) can only dump core "\
+ 				"to fully qualified path!\n",
+ 				task_tgid_vnr(current), current->comm);
+@@ -634,8 +633,35 @@ void do_coredump(const siginfo_t *siginfo)
+ 			goto fail_unlock;
+ 		}
+ 
++		/*
++		 * Unlink the file if it exists unless this is a SUID
++		 * binary - in that case, we're running around with root
++		 * privs and don't want to unlink another user's coredump.
++		 */
++		if (!need_suid_safe) {
++			mm_segment_t old_fs;
++
++			old_fs = get_fs();
++			set_fs(KERNEL_DS);
++			/*
++			 * If it doesn't exist, that's fine. If there's some
++			 * other problem, we'll catch it at the filp_open().
++			 */
++			(void) sys_unlink((const char __user *)cn.corename);
++			set_fs(old_fs);
++		}
++
++		/*
++		 * There is a race between unlinking and creating the
++		 * file, but if that causes an EEXIST here, that's
++		 * fine - another process raced with us while creating
++		 * the corefile, and the other process won. To userspace,
++		 * what matters is that at least one of the two processes
++		 * writes its coredump successfully, not which one.
++		 */
+ 		cprm.file = filp_open(cn.corename,
+-				 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
++				 O_CREAT | 2 | O_NOFOLLOW |
++				 O_LARGEFILE | O_EXCL,
+ 				 0600);
+ 		if (IS_ERR(cprm.file))
+ 			goto fail_unlock;
+@@ -652,11 +678,15 @@ void do_coredump(const siginfo_t *siginfo)
+ 		if (!S_ISREG(inode->i_mode))
+ 			goto close_fail;
+ 		/*
+-		 * Dont allow local users get cute and trick others to coredump
+-		 * into their pre-created files.
++		 * Don't dump core if the filesystem changed owner or mode
++		 * of the file during file creation. This is an issue when
++		 * a process dumps core while its cwd is e.g. on a vfat
++		 * filesystem.
+ 		 */
+ 		if (!uid_eq(inode->i_uid, current_fsuid()))
+ 			goto close_fail;
++		if ((inode->i_mode & 0677) != 0600)
++			goto close_fail;
+ 		if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
+ 			goto close_fail;
+ 		if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
+diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
+index 8db0b464483f..63cd2c147221 100644
+--- a/fs/ecryptfs/dentry.c
++++ b/fs/ecryptfs/dentry.c
+@@ -45,20 +45,20 @@
+ static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+ {
+ 	struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+-	int rc;
+-
+-	if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE))
+-		return 1;
++	int rc = 1;
+ 
+ 	if (flags & LOOKUP_RCU)
+ 		return -ECHILD;
+ 
+-	rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
++	if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE)
++		rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
++
+ 	if (d_really_is_positive(dentry)) {
+-		struct inode *lower_inode =
+-			ecryptfs_inode_to_lower(d_inode(dentry));
++		struct inode *inode = d_inode(dentry);
+ 
+-		fsstack_copy_attr_all(d_inode(dentry), lower_inode);
++		fsstack_copy_attr_all(inode, ecryptfs_inode_to_lower(inode));
++		if (!inode->i_nlink)
++			return 0;
+ 	}
+ 	return rc;
+ }
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 6b4eb94b04a5..ff89971e3ee0 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -324,6 +324,22 @@ static void save_error_info(struct super_block *sb, const char *func,
+ 	ext4_commit_super(sb, 1);
+ }
+ 
++/*
++ * The del_gendisk() function uninitializes the disk-specific data
++ * structures, including the bdi structure, without telling anyone
++ * else.  Once this happens, any attempt to call mark_buffer_dirty()
++ * (for example, by ext4_commit_super), will cause a kernel OOPS.
++ * This is a kludge to prevent these oops until we can put in a proper
++ * hook in del_gendisk() to inform the VFS and file system layers.
++ */
++static int block_device_ejected(struct super_block *sb)
++{
++	struct inode *bd_inode = sb->s_bdev->bd_inode;
++	struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
++
++	return bdi->dev == NULL;
++}
++
+ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
+ {
+ 	struct super_block		*sb = journal->j_private;
+@@ -4591,7 +4607,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
+ 	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
+ 	int error = 0;
+ 
+-	if (!sbh)
++	if (!sbh || block_device_ejected(sb))
+ 		return error;
+ 	if (buffer_write_io_error(sbh)) {
+ 		/*
+@@ -4807,10 +4823,11 @@ static int ext4_freeze(struct super_block *sb)
+ 		error = jbd2_journal_flush(journal);
+ 		if (error < 0)
+ 			goto out;
++
++		/* Journal blocked and flushed, clear needs_recovery flag. */
++		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+ 	}
+ 
+-	/* Journal blocked and flushed, clear needs_recovery flag. */
+-	EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
+ 	error = ext4_commit_super(sb, 1);
+ out:
+ 	if (journal)
+@@ -4828,8 +4845,11 @@ static int ext4_unfreeze(struct super_block *sb)
+ 	if (sb->s_flags & MS_RDONLY)
+ 		return 0;
+ 
+-	/* Reset the needs_recovery flag before the fs is unlocked. */
+-	EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
++	if (EXT4_SB(sb)->s_journal) {
++		/* Reset the needs_recovery flag before the fs is unlocked. */
++		EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
++	}
++
+ 	ext4_commit_super(sb, 1);
+ 	return 0;
+ }
+diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
+index d3fa6bd9503e..221719eac5de 100644
+--- a/fs/hfs/bnode.c
++++ b/fs/hfs/bnode.c
+@@ -288,7 +288,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
+ 			page_cache_release(page);
+ 			goto fail;
+ 		}
+-		page_cache_release(page);
+ 		node->page[i] = page;
+ 	}
+ 
+@@ -398,11 +397,11 @@ node_error:
+ 
+ void hfs_bnode_free(struct hfs_bnode *node)
+ {
+-	//int i;
++	int i;
+ 
+-	//for (i = 0; i < node->tree->pages_per_bnode; i++)
+-	//	if (node->page[i])
+-	//		page_cache_release(node->page[i]);
++	for (i = 0; i < node->tree->pages_per_bnode; i++)
++		if (node->page[i])
++			page_cache_release(node->page[i]);
+ 	kfree(node);
+ }
+ 
+diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
+index 9f4ee7f52026..6fc766df0461 100644
+--- a/fs/hfs/brec.c
++++ b/fs/hfs/brec.c
+@@ -131,13 +131,16 @@ skip:
+ 	hfs_bnode_write(node, entry, data_off + key_len, entry_len);
+ 	hfs_bnode_dump(node);
+ 
+-	if (new_node) {
+-		/* update parent key if we inserted a key
+-		 * at the start of the first node
+-		 */
+-		if (!rec && new_node != node)
+-			hfs_brec_update_parent(fd);
++	/*
++	 * update parent key if we inserted a key
++	 * at the start of the node and it is not the new node
++	 */
++	if (!rec && new_node != node) {
++		hfs_bnode_read_key(node, fd->search_key, data_off + size);
++		hfs_brec_update_parent(fd);
++	}
+ 
++	if (new_node) {
+ 		hfs_bnode_put(fd->bnode);
+ 		if (!new_node->parent) {
+ 			hfs_btree_inc_height(tree);
+@@ -166,9 +169,6 @@ skip:
+ 		goto again;
+ 	}
+ 
+-	if (!rec)
+-		hfs_brec_update_parent(fd);
+-
+ 	return 0;
+ }
+ 
+@@ -366,6 +366,8 @@ again:
+ 	if (IS_ERR(parent))
+ 		return PTR_ERR(parent);
+ 	__hfs_brec_find(parent, fd);
++	if (fd->record < 0)
++		return -ENOENT;
+ 	hfs_bnode_dump(parent);
+ 	rec = fd->record;
+ 
+diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
+index 759708fd9331..63924662aaf3 100644
+--- a/fs/hfsplus/bnode.c
++++ b/fs/hfsplus/bnode.c
+@@ -454,7 +454,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
+ 			page_cache_release(page);
+ 			goto fail;
+ 		}
+-		page_cache_release(page);
+ 		node->page[i] = page;
+ 	}
+ 
+@@ -566,13 +565,11 @@ node_error:
+ 
+ void hfs_bnode_free(struct hfs_bnode *node)
+ {
+-#if 0
+ 	int i;
+ 
+ 	for (i = 0; i < node->tree->pages_per_bnode; i++)
+ 		if (node->page[i])
+ 			page_cache_release(node->page[i]);
+-#endif
+ 	kfree(node);
+ }
+ 
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 4227dc4f7437..8c44654ce274 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -417,12 +417,12 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+  * journal_clean_one_cp_list
+  *
+  * Find all the written-back checkpoint buffers in the given list and
+- * release them.
++ * release them. If 'destroy' is set, clean all buffers unconditionally.
+  *
+  * Called with j_list_lock held.
+  * Returns 1 if we freed the transaction, 0 otherwise.
+  */
+-static int journal_clean_one_cp_list(struct journal_head *jh)
++static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy)
+ {
+ 	struct journal_head *last_jh;
+ 	struct journal_head *next_jh = jh;
+@@ -436,7 +436,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh)
+ 	do {
+ 		jh = next_jh;
+ 		next_jh = jh->b_cpnext;
+-		ret = __try_to_free_cp_buf(jh);
++		if (!destroy)
++			ret = __try_to_free_cp_buf(jh);
++		else
++			ret = __jbd2_journal_remove_checkpoint(jh) + 1;
+ 		if (!ret)
+ 			return freed;
+ 		if (ret == 2)
+@@ -459,10 +462,11 @@ static int journal_clean_one_cp_list(struct journal_head *jh)
+  * journal_clean_checkpoint_list
+  *
+  * Find all the written-back checkpoint buffers in the journal and release them.
++ * If 'destroy' is set, release all buffers unconditionally.
+  *
+  * Called with j_list_lock held.
+  */
+-void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
++void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
+ {
+ 	transaction_t *transaction, *last_transaction, *next_transaction;
+ 	int ret;
+@@ -476,7 +480,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+ 	do {
+ 		transaction = next_transaction;
+ 		next_transaction = transaction->t_cpnext;
+-		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list);
++		ret = journal_clean_one_cp_list(transaction->t_checkpoint_list,
++						destroy);
+ 		/*
+ 		 * This function only frees up some memory if possible so we
+ 		 * dont have an obligation to finish processing. Bail out if
+@@ -492,7 +497,7 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+ 		 * we can possibly see not yet submitted buffers on io_list
+ 		 */
+ 		ret = journal_clean_one_cp_list(transaction->
+-				t_checkpoint_io_list);
++				t_checkpoint_io_list, destroy);
+ 		if (need_resched())
+ 			return;
+ 		/*
+@@ -506,6 +511,28 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+ }
+ 
+ /*
++ * Remove buffers from all checkpoint lists as journal is aborted and we just
++ * need to free memory
++ */
++void jbd2_journal_destroy_checkpoint(journal_t *journal)
++{
++	/*
++	 * We loop because __jbd2_journal_clean_checkpoint_list() may abort
++	 * early due to a need of rescheduling.
++	 */
++	while (1) {
++		spin_lock(&journal->j_list_lock);
++		if (!journal->j_checkpoint_transactions) {
++			spin_unlock(&journal->j_list_lock);
++			break;
++		}
++		__jbd2_journal_clean_checkpoint_list(journal, true);
++		spin_unlock(&journal->j_list_lock);
++		cond_resched();
++	}
++}
++
++/*
+  * journal_remove_checkpoint: called after a buffer has been committed
+  * to disk (either by being write-back flushed to disk, or being
+  * committed to the log).
+diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
+index b73e0215baa7..362e5f614450 100644
+--- a/fs/jbd2/commit.c
++++ b/fs/jbd2/commit.c
+@@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
+ 	 * frees some memory
+ 	 */
+ 	spin_lock(&journal->j_list_lock);
+-	__jbd2_journal_clean_checkpoint_list(journal);
++	__jbd2_journal_clean_checkpoint_list(journal, false);
+ 	spin_unlock(&journal->j_list_lock);
+ 
+ 	jbd_debug(3, "JBD2: commit phase 1\n");
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index 112fad9e1e20..7003c0925760 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -1708,8 +1708,17 @@ int jbd2_journal_destroy(journal_t *journal)
+ 	while (journal->j_checkpoint_transactions != NULL) {
+ 		spin_unlock(&journal->j_list_lock);
+ 		mutex_lock(&journal->j_checkpoint_mutex);
+-		jbd2_log_do_checkpoint(journal);
++		err = jbd2_log_do_checkpoint(journal);
+ 		mutex_unlock(&journal->j_checkpoint_mutex);
++		/*
++		 * If checkpointing failed, just free the buffers to avoid
++		 * looping forever
++		 */
++		if (err) {
++			jbd2_journal_destroy_checkpoint(journal);
++			spin_lock(&journal->j_list_lock);
++			break;
++		}
+ 		spin_lock(&journal->j_list_lock);
+ 	}
+ 
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 6f5f0f425e86..fecd9201dbad 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -1039,6 +1039,11 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
+ 	    hdr->res.verf->committed == NFS_DATA_SYNC)
+ 		ff_layout_set_layoutcommit(hdr);
+ 
++	/* zero out fattr since we don't care DS attr at all */
++	hdr->fattr.valid = 0;
++	if (task->tk_status >= 0)
++		nfs_writeback_update_inode(hdr);
++
+ 	return 0;
+ }
+ 
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index f13e1969eedd..b28fa4cbea52 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -500,16 +500,19 @@ int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo,
+ 					   range->offset, range->length))
+ 			continue;
+ 		/* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE)
+-		 * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4)
++		 * + array length + deviceid(NFS4_DEVICEID4_SIZE)
++		 * + status(4) + opnum(4)
+ 		 */
+ 		p = xdr_reserve_space(xdr,
+-				24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
++				28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE);
+ 		if (unlikely(!p))
+ 			return -ENOBUFS;
+ 		p = xdr_encode_hyper(p, err->offset);
+ 		p = xdr_encode_hyper(p, err->length);
+ 		p = xdr_encode_opaque_fixed(p, &err->stateid,
+ 					    NFS4_STATEID_SIZE);
++		/* Encode 1 error */
++		*p++ = cpu_to_be32(1);
+ 		p = xdr_encode_opaque_fixed(p, &err->deviceid,
+ 					    NFS4_DEVICEID4_SIZE);
+ 		*p++ = cpu_to_be32(err->status);
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index 5d25b9d97c29..976ba792fbc6 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1270,13 +1270,6 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
+ 	return 0;
+ }
+ 
+-static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
+-{
+-	if (!(fattr->valid & NFS_ATTR_FATTR_CTIME))
+-		return 0;
+-	return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0;
+-}
+-
+ static atomic_long_t nfs_attr_generation_counter;
+ 
+ static unsigned long nfs_read_attr_generation_counter(void)
+@@ -1425,7 +1418,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n
+ 	const struct nfs_inode *nfsi = NFS_I(inode);
+ 
+ 	return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
+-		nfs_ctime_need_update(inode, fattr) ||
+ 		((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
+ }
+ 
+@@ -1488,6 +1480,13 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr
+ {
+ 	unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ 
++	/*
++	 * Don't revalidate the pagecache if we hold a delegation, but do
++	 * force an attribute update
++	 */
++	if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
++		invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED;
++
+ 	if (S_ISDIR(inode->i_mode))
+ 		invalid |= NFS_INO_INVALID_DATA;
+ 	nfs_set_cache_invalid(inode, invalid);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index d3f205126609..c245874d7e9d 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1152,6 +1152,8 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
+ 		return 0;
+ 	if ((delegation->type & fmode) != fmode)
+ 		return 0;
++	if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
++		return 0;
+ 	if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
+ 		return 0;
+ 	nfs_mark_delegation_referenced(delegation);
+@@ -1216,6 +1218,7 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
+ }
+ 
+ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
++		nfs4_stateid *arg_stateid,
+ 		nfs4_stateid *stateid, fmode_t fmode)
+ {
+ 	clear_bit(NFS_O_RDWR_STATE, &state->flags);
+@@ -1234,8 +1237,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
+ 	if (stateid == NULL)
+ 		return;
+ 	/* Handle races with OPEN */
+-	if (!nfs4_stateid_match_other(stateid, &state->open_stateid) ||
+-	    !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
++	if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
++	    (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
++	    !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
+ 		nfs_resync_open_stateid_locked(state);
+ 		return;
+ 	}
+@@ -1244,10 +1248,12 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
+ 	nfs4_stateid_copy(&state->open_stateid, stateid);
+ }
+ 
+-static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
++static void nfs_clear_open_stateid(struct nfs4_state *state,
++	nfs4_stateid *arg_stateid,
++	nfs4_stateid *stateid, fmode_t fmode)
+ {
+ 	write_seqlock(&state->seqlock);
+-	nfs_clear_open_stateid_locked(state, stateid, fmode);
++	nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
+ 	write_sequnlock(&state->seqlock);
+ 	if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
+ 		nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
+@@ -2413,7 +2419,7 @@ static int _nfs4_do_open(struct inode *dir,
+ 		goto err_free_label;
+ 	state = ctx->state;
+ 
+-	if ((opendata->o_arg.open_flags & O_EXCL) &&
++	if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
+ 	    (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
+ 		nfs4_exclusive_attrset(opendata, sattr);
+ 
+@@ -2672,7 +2678,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
+ 				goto out_release;
+ 			}
+ 	}
+-	nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode);
++	nfs_clear_open_stateid(state, &calldata->arg.stateid,
++			res_stateid, calldata->arg.fmode);
+ out_release:
+ 	nfs_release_seqid(calldata->arg.seqid);
+ 	nfs_refresh_inode(calldata->inode, calldata->res.fattr);
+@@ -8571,6 +8578,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
+ 	.reboot_recovery_ops = &nfs41_reboot_recovery_ops,
+ 	.nograce_recovery_ops = &nfs41_nograce_recovery_ops,
+ 	.state_renewal_ops = &nfs41_state_renewal_ops,
++	.mig_recovery_ops = &nfs41_mig_recovery_ops,
+ };
+ #endif
+ 
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 7b4552678536..069914ce7641 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init);
+ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
+ {
+ 	spin_lock(&hdr->lock);
+-	if (pos < hdr->io_start + hdr->good_bytes) {
+-		set_bit(NFS_IOHDR_ERROR, &hdr->flags);
++	if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
++	    || pos < hdr->io_start + hdr->good_bytes) {
+ 		clear_bit(NFS_IOHDR_EOF, &hdr->flags);
+ 		hdr->good_bytes = pos - hdr->io_start;
+ 		hdr->error = error;
+diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
+index f37e25b6311c..1705c78ee2d8 100644
+--- a/fs/nfs/pnfs_nfs.c
++++ b/fs/nfs/pnfs_nfs.c
+@@ -359,26 +359,31 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
+ 	return false;
+ }
+ 
++/*
++ * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
++ * declare a match.
++ */
+ static bool
+ _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
+ 			       const struct list_head *dsaddrs2)
+ {
+ 	struct nfs4_pnfs_ds_addr *da1, *da2;
+-
+-	/* step through both lists, comparing as we go */
+-	for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
+-	     da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
+-	     da1 != NULL && da2 != NULL;
+-	     da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
+-	     da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
+-		if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
+-				   (struct sockaddr *)&da2->da_addr))
+-			return false;
++	struct sockaddr *sa1, *sa2;
++	bool match = false;
++
++	list_for_each_entry(da1, dsaddrs1, da_node) {
++		sa1 = (struct sockaddr *)&da1->da_addr;
++		match = false;
++		list_for_each_entry(da2, dsaddrs2, da_node) {
++			sa2 = (struct sockaddr *)&da2->da_addr;
++			match = same_sockaddr(sa1, sa2);
++			if (match)
++				break;
++		}
++		if (!match)
++			break;
+ 	}
+-	if (da1 == NULL && da2 == NULL)
+-		return true;
+-
+-	return false;
++	return match;
+ }
+ 
+ /*
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index daf355642845..07115b9b1ad2 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1383,24 +1383,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
+ {
+ 	struct nfs_pgio_args *argp = &hdr->args;
+ 	struct nfs_pgio_res *resp = &hdr->res;
++	u64 size = argp->offset + resp->count;
+ 
+ 	if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
++		fattr->size = size;
++	if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
++		fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
+ 		return;
+-	if (argp->offset + resp->count != fattr->size)
+-		return;
+-	if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
++	}
++	if (size != fattr->size)
+ 		return;
+ 	/* Set attribute barrier */
+ 	nfs_fattr_set_barrier(fattr);
++	/* ...and update size */
++	fattr->valid |= NFS_ATTR_FATTR_SIZE;
+ }
+ 
+ void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
+ {
+-	struct nfs_fattr *fattr = hdr->res.fattr;
++	struct nfs_fattr *fattr = &hdr->fattr;
+ 	struct inode *inode = hdr->inode;
+ 
+-	if (fattr == NULL)
+-		return;
+ 	spin_lock(&inode->i_lock);
+ 	nfs_writeback_check_extend(hdr, fattr);
+ 	nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 6e13504f736e..397798368b1a 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -777,13 +777,16 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
+ 	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
+ }
+ 
+-static void
++static bool
+ unhash_delegation_locked(struct nfs4_delegation *dp)
+ {
+ 	struct nfs4_file *fp = dp->dl_stid.sc_file;
+ 
+ 	lockdep_assert_held(&state_lock);
+ 
++	if (list_empty(&dp->dl_perfile))
++		return false;
++
+ 	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
+ 	/* Ensure that deleg break won't try to requeue it */
+ 	++dp->dl_time;
+@@ -792,16 +795,21 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
+ 	list_del_init(&dp->dl_recall_lru);
+ 	list_del_init(&dp->dl_perfile);
+ 	spin_unlock(&fp->fi_lock);
++	return true;
+ }
+ 
+ static void destroy_delegation(struct nfs4_delegation *dp)
+ {
++	bool unhashed;
++
+ 	spin_lock(&state_lock);
+-	unhash_delegation_locked(dp);
++	unhashed = unhash_delegation_locked(dp);
+ 	spin_unlock(&state_lock);
+-	put_clnt_odstate(dp->dl_clnt_odstate);
+-	nfs4_put_deleg_lease(dp->dl_stid.sc_file);
+-	nfs4_put_stid(&dp->dl_stid);
++	if (unhashed) {
++		put_clnt_odstate(dp->dl_clnt_odstate);
++		nfs4_put_deleg_lease(dp->dl_stid.sc_file);
++		nfs4_put_stid(&dp->dl_stid);
++	}
+ }
+ 
+ static void revoke_delegation(struct nfs4_delegation *dp)
+@@ -1004,16 +1012,20 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
+ 	sop->so_ops->so_free(sop);
+ }
+ 
+-static void unhash_ol_stateid(struct nfs4_ol_stateid *stp)
++static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
+ {
+ 	struct nfs4_file *fp = stp->st_stid.sc_file;
+ 
+ 	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
+ 
++	if (list_empty(&stp->st_perfile))
++		return false;
++
+ 	spin_lock(&fp->fi_lock);
+-	list_del(&stp->st_perfile);
++	list_del_init(&stp->st_perfile);
+ 	spin_unlock(&fp->fi_lock);
+ 	list_del(&stp->st_perstateowner);
++	return true;
+ }
+ 
+ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
+@@ -1063,25 +1075,27 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
+ 	list_add(&stp->st_locks, reaplist);
+ }
+ 
+-static void unhash_lock_stateid(struct nfs4_ol_stateid *stp)
++static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+ 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
+ 
+ 	lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
+ 
+ 	list_del_init(&stp->st_locks);
+-	unhash_ol_stateid(stp);
+ 	nfs4_unhash_stid(&stp->st_stid);
++	return unhash_ol_stateid(stp);
+ }
+ 
+ static void release_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+ 	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
++	bool unhashed;
+ 
+ 	spin_lock(&oo->oo_owner.so_client->cl_lock);
+-	unhash_lock_stateid(stp);
++	unhashed = unhash_lock_stateid(stp);
+ 	spin_unlock(&oo->oo_owner.so_client->cl_lock);
+-	nfs4_put_stid(&stp->st_stid);
++	if (unhashed)
++		nfs4_put_stid(&stp->st_stid);
+ }
+ 
+ static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
+@@ -1129,7 +1143,7 @@ static void release_lockowner(struct nfs4_lockowner *lo)
+ 	while (!list_empty(&lo->lo_owner.so_stateids)) {
+ 		stp = list_first_entry(&lo->lo_owner.so_stateids,
+ 				struct nfs4_ol_stateid, st_perstateowner);
+-		unhash_lock_stateid(stp);
++		WARN_ON(!unhash_lock_stateid(stp));
+ 		put_ol_stateid_locked(stp, &reaplist);
+ 	}
+ 	spin_unlock(&clp->cl_lock);
+@@ -1142,21 +1156,26 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
+ {
+ 	struct nfs4_ol_stateid *stp;
+ 
++	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
++
+ 	while (!list_empty(&open_stp->st_locks)) {
+ 		stp = list_entry(open_stp->st_locks.next,
+ 				struct nfs4_ol_stateid, st_locks);
+-		unhash_lock_stateid(stp);
++		WARN_ON(!unhash_lock_stateid(stp));
+ 		put_ol_stateid_locked(stp, reaplist);
+ 	}
+ }
+ 
+-static void unhash_open_stateid(struct nfs4_ol_stateid *stp,
++static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
+ 				struct list_head *reaplist)
+ {
++	bool unhashed;
++
+ 	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+ 
+-	unhash_ol_stateid(stp);
++	unhashed = unhash_ol_stateid(stp);
+ 	release_open_stateid_locks(stp, reaplist);
++	return unhashed;
+ }
+ 
+ static void release_open_stateid(struct nfs4_ol_stateid *stp)
+@@ -1164,8 +1183,8 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp)
+ 	LIST_HEAD(reaplist);
+ 
+ 	spin_lock(&stp->st_stid.sc_client->cl_lock);
+-	unhash_open_stateid(stp, &reaplist);
+-	put_ol_stateid_locked(stp, &reaplist);
++	if (unhash_open_stateid(stp, &reaplist))
++		put_ol_stateid_locked(stp, &reaplist);
+ 	spin_unlock(&stp->st_stid.sc_client->cl_lock);
+ 	free_ol_stateid_reaplist(&reaplist);
+ }
+@@ -1210,8 +1229,8 @@ static void release_openowner(struct nfs4_openowner *oo)
+ 	while (!list_empty(&oo->oo_owner.so_stateids)) {
+ 		stp = list_first_entry(&oo->oo_owner.so_stateids,
+ 				struct nfs4_ol_stateid, st_perstateowner);
+-		unhash_open_stateid(stp, &reaplist);
+-		put_ol_stateid_locked(stp, &reaplist);
++		if (unhash_open_stateid(stp, &reaplist))
++			put_ol_stateid_locked(stp, &reaplist);
+ 	}
+ 	spin_unlock(&clp->cl_lock);
+ 	free_ol_stateid_reaplist(&reaplist);
+@@ -1714,7 +1733,7 @@ __destroy_client(struct nfs4_client *clp)
+ 	spin_lock(&state_lock);
+ 	while (!list_empty(&clp->cl_delegations)) {
+ 		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
+-		unhash_delegation_locked(dp);
++		WARN_ON(!unhash_delegation_locked(dp));
+ 		list_add(&dp->dl_recall_lru, &reaplist);
+ 	}
+ 	spin_unlock(&state_lock);
+@@ -4346,7 +4365,7 @@ nfs4_laundromat(struct nfsd_net *nn)
+ 			new_timeo = min(new_timeo, t);
+ 			break;
+ 		}
+-		unhash_delegation_locked(dp);
++		WARN_ON(!unhash_delegation_locked(dp));
+ 		list_add(&dp->dl_recall_lru, &reaplist);
+ 	}
+ 	spin_unlock(&state_lock);
+@@ -4714,7 +4733,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ 		if (check_for_locks(stp->st_stid.sc_file,
+ 				    lockowner(stp->st_stateowner)))
+ 			break;
+-		unhash_lock_stateid(stp);
++		WARN_ON(!unhash_lock_stateid(stp));
+ 		spin_unlock(&cl->cl_lock);
+ 		nfs4_put_stid(s);
+ 		ret = nfs_ok;
+@@ -4930,20 +4949,23 @@ out:
+ static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
+ {
+ 	struct nfs4_client *clp = s->st_stid.sc_client;
++	bool unhashed;
+ 	LIST_HEAD(reaplist);
+ 
+ 	s->st_stid.sc_type = NFS4_CLOSED_STID;
+ 	spin_lock(&clp->cl_lock);
+-	unhash_open_stateid(s, &reaplist);
++	unhashed = unhash_open_stateid(s, &reaplist);
+ 
+ 	if (clp->cl_minorversion) {
+-		put_ol_stateid_locked(s, &reaplist);
++		if (unhashed)
++			put_ol_stateid_locked(s, &reaplist);
+ 		spin_unlock(&clp->cl_lock);
+ 		free_ol_stateid_reaplist(&reaplist);
+ 	} else {
+ 		spin_unlock(&clp->cl_lock);
+ 		free_ol_stateid_reaplist(&reaplist);
+-		move_to_close_lru(s, clp->net);
++		if (unhashed)
++			move_to_close_lru(s, clp->net);
+ 	}
+ }
+ 
+@@ -5982,7 +6004,7 @@ nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
+ 
+ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
+ 				    struct list_head *collect,
+-				    void (*func)(struct nfs4_ol_stateid *))
++				    bool (*func)(struct nfs4_ol_stateid *))
+ {
+ 	struct nfs4_openowner *oop;
+ 	struct nfs4_ol_stateid *stp, *st_next;
+@@ -5996,9 +6018,9 @@ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
+ 			list_for_each_entry_safe(lst, lst_next,
+ 					&stp->st_locks, st_locks) {
+ 				if (func) {
+-					func(lst);
+-					nfsd_inject_add_lock_to_list(lst,
+-								collect);
++					if (func(lst))
++						nfsd_inject_add_lock_to_list(lst,
++									collect);
+ 				}
+ 				++count;
+ 				/*
+@@ -6268,7 +6290,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
+ 				continue;
+ 
+ 			atomic_inc(&clp->cl_refcount);
+-			unhash_delegation_locked(dp);
++			WARN_ON(!unhash_delegation_locked(dp));
+ 			list_add(&dp->dl_recall_lru, victims);
+ 		}
+ 		++count;
+@@ -6598,7 +6620,7 @@ nfs4_state_shutdown_net(struct net *net)
+ 	spin_lock(&state_lock);
+ 	list_for_each_safe(pos, next, &nn->del_recall_lru) {
+ 		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
+-		unhash_delegation_locked(dp);
++		WARN_ON(!unhash_delegation_locked(dp));
+ 		list_add(&dp->dl_recall_lru, &reaplist);
+ 	}
+ 	spin_unlock(&state_lock);
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index d4d84451e0e6..3dd1b616b92b 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2139,6 +2139,27 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
+ 		return nfsd4_encode_user(xdr, rqstp, ace->who_uid);
+ }
+ 
++static inline __be32
++nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type)
++{
++	__be32 *p;
++
++	if (layout_type) {
++		p = xdr_reserve_space(xdr, 8);
++		if (!p)
++			return nfserr_resource;
++		*p++ = cpu_to_be32(1);
++		*p++ = cpu_to_be32(layout_type);
++	} else {
++		p = xdr_reserve_space(xdr, 4);
++		if (!p)
++			return nfserr_resource;
++		*p++ = cpu_to_be32(0);
++	}
++
++	return 0;
++}
++
+ #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
+ 			      FATTR4_WORD0_RDATTR_ERROR)
+ #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
+@@ -2692,20 +2713,16 @@ out_acl:
+ 		p = xdr_encode_hyper(p, stat.ino);
+ 	}
+ #ifdef CONFIG_NFSD_PNFS
+-	if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) ||
+-	    (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) {
+-		if (exp->ex_layout_type) {
+-			p = xdr_reserve_space(xdr, 8);
+-			if (!p)
+-				goto out_resource;
+-			*p++ = cpu_to_be32(1);
+-			*p++ = cpu_to_be32(exp->ex_layout_type);
+-		} else {
+-			p = xdr_reserve_space(xdr, 4);
+-			if (!p)
+-				goto out_resource;
+-			*p++ = cpu_to_be32(0);
+-		}
++	if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
++		status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
++		if (status)
++			goto out;
++	}
++
++	if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
++		status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
++		if (status)
++			goto out;
+ 	}
+ 
+ 	if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) {
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index edb640ae9a94..eb1cebed3f36 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+ extern void jbd2_journal_commit_transaction(journal_t *);
+ 
+ /* Checkpoint list management */
+-void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
++void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
+ int __jbd2_journal_remove_checkpoint(struct journal_head *);
++void jbd2_journal_destroy_checkpoint(journal_t *journal);
+ void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
+ 
+ 
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 0755b9fd03a7..b2085582d44e 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1002,6 +1002,34 @@ static inline int page_mapped(struct page *page)
+ }
+ 
+ /*
++ * Return true only if the page has been allocated with
++ * ALLOC_NO_WATERMARKS and the low watermark was not
++ * met implying that the system is under some pressure.
++ */
++static inline bool page_is_pfmemalloc(struct page *page)
++{
++	/*
++	 * Page index cannot be this large so this must be
++	 * a pfmemalloc page.
++	 */
++	return page->index == -1UL;
++}
++
++/*
++ * Only to be called by the page allocator on a freshly allocated
++ * page.
++ */
++static inline void set_page_pfmemalloc(struct page *page)
++{
++	page->index = -1UL;
++}
++
++static inline void clear_page_pfmemalloc(struct page *page)
++{
++	page->index = 0;
++}
++
++/*
+  * Different kinds of faults, as returned by handle_mm_fault().
+  * Used to decide whether a process gets delivered SIGBUS or
+  * just gets major/minor fault counters bumped up.
+diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
+index 8d37e26a1007..c0c6b33535fb 100644
+--- a/include/linux/mm_types.h
++++ b/include/linux/mm_types.h
+@@ -63,15 +63,6 @@ struct page {
+ 		union {
+ 			pgoff_t index;		/* Our offset within mapping. */
+ 			void *freelist;		/* sl[aou]b first free object */
+-			bool pfmemalloc;	/* If set by the page allocator,
+-						 * ALLOC_NO_WATERMARKS was set
+-						 * and the low watermark was not
+-						 * met implying that the system
+-						 * is under some pressure. The
+-						 * caller should try ensure
+-						 * this page is only used to
+-						 * free other pages.
+-						 */
+ 		};
+ 
+ 		union {
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f15154a879c7..eb1c55b8255a 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1590,20 +1590,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
+ 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ 
+ 	/*
+-	 * Propagate page->pfmemalloc to the skb if we can. The problem is
+-	 * that not all callers have unique ownership of the page. If
+-	 * pfmemalloc is set, we check the mapping as a mapping implies
+-	 * page->index is set (index and pfmemalloc share space).
+-	 * If it's a valid mapping, we cannot use page->pfmemalloc but we
+-	 * do not lose pfmemalloc information as the pages would not be
+-	 * allocated using __GFP_MEMALLOC.
++	 * Propagate page pfmemalloc to the skb if we can. The problem is
++	 * that not all callers have unique ownership of the page but rely
++	 * on page_is_pfmemalloc doing the right thing(tm).
+ 	 */
+ 	frag->page.p		  = page;
+ 	frag->page_offset	  = off;
+ 	skb_frag_size_set(frag, size);
+ 
+ 	page = compound_head(page);
+-	if (page->pfmemalloc && !page->mapping)
++	if (page_is_pfmemalloc(page))
+ 		skb->pfmemalloc	= true;
+ }
+ 
+@@ -2250,7 +2246,7 @@ static inline struct page *dev_alloc_page(void)
+ static inline void skb_propagate_pfmemalloc(struct page *page,
+ 					     struct sk_buff *skb)
+ {
+-	if (page && page->pfmemalloc)
++	if (page_is_pfmemalloc(page))
+ 		skb->pfmemalloc = true;
+ }
+ 
+diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
+index 7591788e9fbf..357e44c1a46b 100644
+--- a/include/linux/sunrpc/xprtsock.h
++++ b/include/linux/sunrpc/xprtsock.h
+@@ -42,6 +42,7 @@ struct sock_xprt {
+ 	/*
+ 	 * Connection of transports
+ 	 */
++	unsigned long		sock_state;
+ 	struct delayed_work	connect_worker;
+ 	struct sockaddr_storage	srcaddr;
+ 	unsigned short		srcport;
+@@ -76,6 +77,8 @@ struct sock_xprt {
+  */
+ #define TCP_RPC_REPLY		(1UL << 6)
+ 
++#define XPRT_SOCK_CONNECTING	1U
++
+ #endif /* __KERNEL__ */
+ 
+ #endif /* _LINUX_SUNRPC_XPRTSOCK_H */
+diff --git a/include/net/act_api.h b/include/net/act_api.h
+index 3ee4c92afd1b..931738bc5bba 100644
+--- a/include/net/act_api.h
++++ b/include/net/act_api.h
+@@ -99,7 +99,6 @@ struct tc_action_ops {
+ 
+ int tcf_hash_search(struct tc_action *a, u32 index);
+ void tcf_hash_destroy(struct tc_action *a);
+-int tcf_hash_release(struct tc_action *a, int bind);
+ u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
+ int tcf_hash_check(u32 index, struct tc_action *a, int bind);
+ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
+@@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
+ void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
+ void tcf_hash_insert(struct tc_action *a);
+ 
++int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
++
++static inline int tcf_hash_release(struct tc_action *a, bool bind)
++{
++	return __tcf_hash_release(a, bind, false);
++}
++
+ int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
+ int tcf_unregister_action(struct tc_action_ops *a);
+ int tcf_action_destroy(struct list_head *actions, int bind);
+diff --git a/include/net/ip.h b/include/net/ip.h
+index d14af7edd197..f41fc497b21b 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
+ }
+ 
+ /* datagram.c */
++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
+ 
+ void ip4_datagram_release_cb(struct sock *sk);
+diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
+index 63deb8d9f82a..d298857cd845 100644
+--- a/include/soc/tegra/mc.h
++++ b/include/soc/tegra/mc.h
+@@ -59,6 +59,7 @@ struct tegra_smmu_soc {
+ 	bool supports_round_robin_arbitration;
+ 	bool supports_request_limit;
+ 
++	unsigned int num_tlb_lines;
+ 	unsigned int num_asids;
+ 
+ 	const struct tegra_smmu_ops *ops;
+diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
+index fd1a02cb3c82..003dca933803 100644
+--- a/include/trace/events/sunrpc.h
++++ b/include/trace/events/sunrpc.h
+@@ -529,18 +529,21 @@ TRACE_EVENT(svc_xprt_do_enqueue,
+ 
+ 	TP_STRUCT__entry(
+ 		__field(struct svc_xprt *, xprt)
+-		__field(struct svc_rqst *, rqst)
++		__field_struct(struct sockaddr_storage, ss)
++		__field(int, pid)
++		__field(unsigned long, flags)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->xprt = xprt;
+-		__entry->rqst = rqst;
++		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
++		__entry->pid = rqst? rqst->rq_task->pid : 0;
++		__entry->flags = xprt ? xprt->xpt_flags : 0;
+ 	),
+ 
+ 	TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt,
+-		(struct sockaddr *)&__entry->xprt->xpt_remote,
+-		__entry->rqst ? __entry->rqst->rq_task->pid : 0,
+-		show_svc_xprt_flags(__entry->xprt->xpt_flags))
++		(struct sockaddr *)&__entry->ss,
++		__entry->pid, show_svc_xprt_flags(__entry->flags))
+ );
+ 
+ TRACE_EVENT(svc_xprt_dequeue,
+@@ -589,16 +592,20 @@ TRACE_EVENT(svc_handle_xprt,
+ 	TP_STRUCT__entry(
+ 		__field(struct svc_xprt *, xprt)
+ 		__field(int, len)
++		__field_struct(struct sockaddr_storage, ss)
++		__field(unsigned long, flags)
+ 	),
+ 
+ 	TP_fast_assign(
+ 		__entry->xprt = xprt;
++		xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss));
+ 		__entry->len = len;
++		__entry->flags = xprt ? xprt->xpt_flags : 0;
+ 	),
+ 
+ 	TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt,
+-		(struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len,
+-		show_svc_xprt_flags(__entry->xprt->xpt_flags))
++		(struct sockaddr *)&__entry->ss,
++		__entry->len, show_svc_xprt_flags(__entry->flags))
+ );
+ #endif /* _TRACE_SUNRPC_H */
+ 
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 03c1eaaa6ef5..8209fa2d36ef 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -1854,13 +1854,21 @@ static int check_unshare_flags(unsigned long unshare_flags)
+ 				CLONE_NEWUSER|CLONE_NEWPID))
+ 		return -EINVAL;
+ 	/*
+-	 * Not implemented, but pretend it works if there is nothing to
+-	 * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
+-	 * needs to unshare vm.
++	 * Not implemented, but pretend it works if there is nothing
++	 * to unshare.  Note that unsharing the address space or the
++	 * signal handlers also need to unshare the signal queues (aka
++	 * CLONE_THREAD).
+ 	 */
+ 	if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
+-		/* FIXME: get_task_mm() increments ->mm_users */
+-		if (atomic_read(&current->mm->mm_users) > 1)
++		if (!thread_group_empty(current))
++			return -EINVAL;
++	}
++	if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) {
++		if (atomic_read(&current->sighand->count) > 1)
++			return -EINVAL;
++	}
++	if (unshare_flags & CLONE_VM) {
++		if (!current_is_single_threaded())
+ 			return -EINVAL;
+ 	}
+ 
+@@ -1929,16 +1937,16 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
+ 	if (unshare_flags & CLONE_NEWUSER)
+ 		unshare_flags |= CLONE_THREAD | CLONE_FS;
+ 	/*
+-	 * If unsharing a thread from a thread group, must also unshare vm.
+-	 */
+-	if (unshare_flags & CLONE_THREAD)
+-		unshare_flags |= CLONE_VM;
+-	/*
+ 	 * If unsharing vm, must also unshare signal handlers.
+ 	 */
+ 	if (unshare_flags & CLONE_VM)
+ 		unshare_flags |= CLONE_SIGHAND;
+ 	/*
++	 * If unsharing a signal handlers, must also unshare the signal queues.
++	 */
++	if (unshare_flags & CLONE_SIGHAND)
++		unshare_flags |= CLONE_THREAD;
++	/*
+ 	 * If unsharing namespace, must also unshare filesystem information.
+ 	 */
+ 	if (unshare_flags & CLONE_NEWNS)
+diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
+index 6dd0335ea61b..0234361b24b8 100644
+--- a/lib/decompress_bunzip2.c
++++ b/lib/decompress_bunzip2.c
+@@ -743,12 +743,12 @@ exit_0:
+ }
+ 
+ #ifdef PREBOOT
+-STATIC int INIT decompress(unsigned char *buf, long len,
++STATIC int INIT __decompress(unsigned char *buf, long len,
+ 			long (*fill)(void*, unsigned long),
+ 			long (*flush)(void*, unsigned long),
+-			unsigned char *outbuf,
++			unsigned char *outbuf, long olen,
+ 			long *pos,
+-			void(*error)(char *x))
++			void (*error)(char *x))
+ {
+ 	return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
+ }
+diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
+index d4c7891635ec..555c06bf20da 100644
+--- a/lib/decompress_inflate.c
++++ b/lib/decompress_inflate.c
+@@ -1,4 +1,5 @@
+ #ifdef STATIC
++#define PREBOOT
+ /* Pre-boot environment: included */
+ 
+ /* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
+@@ -33,23 +34,23 @@ static long INIT nofill(void *buffer, unsigned long len)
+ }
+ 
+ /* Included from initramfs et al code */
+-STATIC int INIT gunzip(unsigned char *buf, long len,
++STATIC int INIT __gunzip(unsigned char *buf, long len,
+ 		       long (*fill)(void*, unsigned long),
+ 		       long (*flush)(void*, unsigned long),
+-		       unsigned char *out_buf,
++		       unsigned char *out_buf, long out_len,
+ 		       long *pos,
+ 		       void(*error)(char *x)) {
+ 	u8 *zbuf;
+ 	struct z_stream_s *strm;
+ 	int rc;
+-	size_t out_len;
+ 
+ 	rc = -1;
+ 	if (flush) {
+ 		out_len = 0x8000; /* 32 K */
+ 		out_buf = malloc(out_len);
+ 	} else {
+-		out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
++		if (!out_len)
++			out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */
+ 	}
+ 	if (!out_buf) {
+ 		error("Out of memory while allocating output buffer");
+@@ -181,4 +182,24 @@ gunzip_nomem1:
+ 	return rc; /* returns Z_OK (0) if successful */
+ }
+ 
+-#define decompress gunzip
++#ifndef PREBOOT
++STATIC int INIT gunzip(unsigned char *buf, long len,
++		       long (*fill)(void*, unsigned long),
++		       long (*flush)(void*, unsigned long),
++		       unsigned char *out_buf,
++		       long *pos,
++		       void (*error)(char *x))
++{
++	return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error);
++}
++#else
++STATIC int INIT __decompress(unsigned char *buf, long len,
++			   long (*fill)(void*, unsigned long),
++			   long (*flush)(void*, unsigned long),
++			   unsigned char *out_buf, long out_len,
++			   long *pos,
++			   void (*error)(char *x))
++{
++	return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error);
++}
++#endif
+diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
+index 40f66ebe57b7..036fc882cd72 100644
+--- a/lib/decompress_unlz4.c
++++ b/lib/decompress_unlz4.c
+@@ -196,12 +196,12 @@ exit_0:
+ }
+ 
+ #ifdef PREBOOT
+-STATIC int INIT decompress(unsigned char *buf, long in_len,
++STATIC int INIT __decompress(unsigned char *buf, long in_len,
+ 			      long (*fill)(void*, unsigned long),
+ 			      long (*flush)(void*, unsigned long),
+-			      unsigned char *output,
++			      unsigned char *output, long out_len,
+ 			      long *posp,
+-			      void(*error)(char *x)
++			      void (*error)(char *x)
+ 	)
+ {
+ 	return unlz4(buf, in_len - 4, fill, flush, output, posp, error);
+diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
+index 0be83af62b88..decb64629c14 100644
+--- a/lib/decompress_unlzma.c
++++ b/lib/decompress_unlzma.c
+@@ -667,13 +667,12 @@ exit_0:
+ }
+ 
+ #ifdef PREBOOT
+-STATIC int INIT decompress(unsigned char *buf, long in_len,
++STATIC int INIT __decompress(unsigned char *buf, long in_len,
+ 			      long (*fill)(void*, unsigned long),
+ 			      long (*flush)(void*, unsigned long),
+-			      unsigned char *output,
++			      unsigned char *output, long out_len,
+ 			      long *posp,
+-			      void(*error)(char *x)
+-	)
++			      void (*error)(char *x))
+ {
+ 	return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
+ }
+diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
+index b94a31bdd87d..f4c158e3a022 100644
+--- a/lib/decompress_unlzo.c
++++ b/lib/decompress_unlzo.c
+@@ -31,6 +31,7 @@
+  */
+ 
+ #ifdef STATIC
++#define PREBOOT
+ #include "lzo/lzo1x_decompress_safe.c"
+ #else
+ #include <linux/decompress/unlzo.h>
+@@ -287,4 +288,14 @@ exit:
+ 	return ret;
+ }
+ 
+-#define decompress unlzo
++#ifdef PREBOOT
++STATIC int INIT __decompress(unsigned char *buf, long len,
++			   long (*fill)(void*, unsigned long),
++			   long (*flush)(void*, unsigned long),
++			   unsigned char *out_buf, long olen,
++			   long *pos,
++			   void (*error)(char *x))
++{
++	return unlzo(buf, len, fill, flush, out_buf, pos, error);
++}
++#endif
+diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
+index b07a78340e9d..25d59a95bd66 100644
+--- a/lib/decompress_unxz.c
++++ b/lib/decompress_unxz.c
+@@ -394,4 +394,14 @@ error_alloc_state:
+  * This macro is used by architecture-specific files to decompress
+  * the kernel image.
+  */
+-#define decompress unxz
++#ifdef XZ_PREBOOT
++STATIC int INIT __decompress(unsigned char *buf, long len,
++			   long (*fill)(void*, unsigned long),
++			   long (*flush)(void*, unsigned long),
++			   unsigned char *out_buf, long olen,
++			   long *pos,
++			   void (*error)(char *x))
++{
++	return unxz(buf, len, fill, flush, out_buf, pos, error);
++}
++#endif
+diff --git a/lib/rhashtable.c b/lib/rhashtable.c
+index 8609378e6505..cf910e48f8f2 100644
+--- a/lib/rhashtable.c
++++ b/lib/rhashtable.c
+@@ -612,6 +612,8 @@ next:
+ 		iter->skip = 0;
+ 	}
+ 
++	iter->p = NULL;
++
+ 	/* Ensure we see any new tables. */
+ 	smp_rmb();
+ 
+@@ -622,8 +624,6 @@ next:
+ 		return ERR_PTR(-EAGAIN);
+ 	}
+ 
+-	iter->p = NULL;
+-
+ out:
+ 
+ 	return obj;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index ebffa0e4a9c0..18490f3bd7f1 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -983,12 +983,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
+ 	set_page_owner(page, order, gfp_flags);
+ 
+ 	/*
+-	 * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to
++	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
+ 	 * allocate the page. The expectation is that the caller is taking
+ 	 * steps that will free more memory. The caller should avoid the page
+ 	 * being used for !PFMEMALLOC purposes.
+ 	 */
+-	page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
++	if (alloc_flags & ALLOC_NO_WATERMARKS)
++		set_page_pfmemalloc(page);
++	else
++		clear_page_pfmemalloc(page);
+ 
+ 	return 0;
+ }
+diff --git a/mm/slab.c b/mm/slab.c
+index 7eb38dd1cefa..3dd2d1ff9d5d 100644
+--- a/mm/slab.c
++++ b/mm/slab.c
+@@ -1602,7 +1602,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ 	}
+ 
+ 	/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
+-	if (unlikely(page->pfmemalloc))
++	if (page_is_pfmemalloc(page))
+ 		pfmemalloc_active = true;
+ 
+ 	nr_pages = (1 << cachep->gfporder);
+@@ -1613,7 +1613,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
+ 		add_zone_page_state(page_zone(page),
+ 			NR_SLAB_UNRECLAIMABLE, nr_pages);
+ 	__SetPageSlab(page);
+-	if (page->pfmemalloc)
++	if (page_is_pfmemalloc(page))
+ 		SetPageSlabPfmemalloc(page);
+ 
+ 	if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
+diff --git a/mm/slub.c b/mm/slub.c
+index 54c0876b43d5..08342c523a85 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
+ 	inc_slabs_node(s, page_to_nid(page), page->objects);
+ 	page->slab_cache = s;
+ 	__SetPageSlab(page);
+-	if (page->pfmemalloc)
++	if (page_is_pfmemalloc(page))
+ 		SetPageSlabPfmemalloc(page);
+ 
+ 	start = page_address(page);
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 0d024fc8aa8e..1a17bd7c0ce5 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1153,7 +1153,7 @@ cull_mlocked:
+ 		if (PageSwapCache(page))
+ 			try_to_free_swap(page);
+ 		unlock_page(page);
+-		putback_lru_page(page);
++		list_add(&page->lru, &ret_pages);
+ 		continue;
+ 
+ activate_locked:
+diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
+index e97572b5d2cc..0ff6e1bbca91 100644
+--- a/net/bridge/br_forward.c
++++ b/net/bridge/br_forward.c
+@@ -42,6 +42,7 @@ int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
+ 	} else {
+ 		skb_push(skb, ETH_HLEN);
+ 		br_drop_fake_rtable(skb);
++		skb_sender_cpu_clear(skb);
+ 		dev_queue_xmit(skb);
+ 	}
+ 
+diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
+index e29ad70b3000..d1f910c0d586 100644
+--- a/net/bridge/br_mdb.c
++++ b/net/bridge/br_mdb.c
+@@ -348,7 +348,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
+ 		return -ENOMEM;
+ 	rcu_assign_pointer(*pp, p);
+ 
+-	br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
+ 	return 0;
+ }
+ 
+@@ -371,6 +370,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
+ 	if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+ 		return -EINVAL;
+ 
++	memset(&ip, 0, sizeof(ip));
+ 	ip.proto = entry->addr.proto;
+ 	if (ip.proto == htons(ETH_P_IP))
+ 		ip.u.ip4 = entry->addr.u.ip4;
+@@ -417,6 +417,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
+ 	if (!netif_running(br->dev) || br->multicast_disabled)
+ 		return -EINVAL;
+ 
++	memset(&ip, 0, sizeof(ip));
+ 	ip.proto = entry->addr.proto;
+ 	if (ip.proto == htons(ETH_P_IP)) {
+ 		if (timer_pending(&br->ip4_other_query.timer))
+diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
+index 4b5c236998ff..a7559ef312bd 100644
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
+ 		+ nla_total_size(1)	/* IFLA_BRPORT_FAST_LEAVE */
+ 		+ nla_total_size(1)	/* IFLA_BRPORT_LEARNING */
+ 		+ nla_total_size(1)	/* IFLA_BRPORT_UNICAST_FLOOD */
++		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP */
++		+ nla_total_size(1)	/* IFLA_BRPORT_PROXYARP_WIFI */
+ 		+ 0;
+ }
+ 
+@@ -504,6 +506,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
+ 	[IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
+ 	[IFLA_BRPORT_LEARNING]	= { .type = NLA_U8 },
+ 	[IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
++	[IFLA_BRPORT_PROXYARP]	= { .type = NLA_U8 },
++	[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
+ };
+ 
+ /* Change the state of the port and notify spanning tree */
+@@ -711,9 +715,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
+ 				    struct nlattr *tb[],
+ 				    struct nlattr *data[])
+ {
++	struct net_bridge *br = netdev_priv(brdev);
++	int ret;
++
+ 	if (!data)
+ 		return 0;
+-	return br_setport(br_port_get_rtnl(dev), data);
++
++	spin_lock_bh(&br->lock);
++	ret = br_setport(br_port_get_rtnl(dev), data);
++	spin_unlock_bh(&br->lock);
++
++	return ret;
+ }
+ 
+ static int br_port_fill_slave_info(struct sk_buff *skb,
+diff --git a/net/core/datagram.c b/net/core/datagram.c
+index b80fb91bb3f7..617088aee21d 100644
+--- a/net/core/datagram.c
++++ b/net/core/datagram.c
+@@ -131,6 +131,35 @@ out_noerr:
+ 	goto out;
+ }
+ 
++static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
++{
++	struct sk_buff *nskb;
++
++	if (skb->peeked)
++		return skb;
++
++	/* We have to unshare an skb before modifying it. */
++	if (!skb_shared(skb))
++		goto done;
++
++	nskb = skb_clone(skb, GFP_ATOMIC);
++	if (!nskb)
++		return ERR_PTR(-ENOMEM);
++
++	skb->prev->next = nskb;
++	skb->next->prev = nskb;
++	nskb->prev = skb->prev;
++	nskb->next = skb->next;
++
++	consume_skb(skb);
++	skb = nskb;
++
++done:
++	skb->peeked = 1;
++
++	return skb;
++}
++
+ /**
+  *	__skb_recv_datagram - Receive a datagram skbuff
+  *	@sk: socket
+@@ -165,7 +194,9 @@ out_noerr:
+ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ 				    int *peeked, int *off, int *err)
+ {
++	struct sk_buff_head *queue = &sk->sk_receive_queue;
+ 	struct sk_buff *skb, *last;
++	unsigned long cpu_flags;
+ 	long timeo;
+ 	/*
+ 	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
+@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ 		 * Look at current nfs client by the way...
+ 		 * However, this function was correct in any case. 8)
+ 		 */
+-		unsigned long cpu_flags;
+-		struct sk_buff_head *queue = &sk->sk_receive_queue;
+ 		int _off = *off;
+ 
+ 		last = (struct sk_buff *)queue;
+@@ -199,7 +228,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ 					_off -= skb->len;
+ 					continue;
+ 				}
+-				skb->peeked = 1;
++
++				skb = skb_set_peeked(skb);
++				error = PTR_ERR(skb);
++				if (IS_ERR(skb))
++					goto unlock_err;
++
+ 				atomic_inc(&skb->users);
+ 			} else
+ 				__skb_unlink(skb, queue);
+@@ -223,6 +257,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
+ 
+ 	return NULL;
+ 
++unlock_err:
++	spin_unlock_irqrestore(&queue->lock, cpu_flags);
+ no_packet:
+ 	*err = error;
+ 	return NULL;
+@@ -622,7 +658,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
+ 		    !skb->csum_complete_sw)
+ 			netdev_rx_csum_fault(skb->dev);
+ 	}
+-	skb->csum_valid = !sum;
++	if (!skb_shared(skb))
++		skb->csum_valid = !sum;
+ 	return sum;
+ }
+ EXPORT_SYMBOL(__skb_checksum_complete_head);
+@@ -642,11 +679,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
+ 			netdev_rx_csum_fault(skb->dev);
+ 	}
+ 
+-	/* Save full packet checksum */
+-	skb->csum = csum;
+-	skb->ip_summed = CHECKSUM_COMPLETE;
+-	skb->csum_complete_sw = 1;
+-	skb->csum_valid = !sum;
++	if (!skb_shared(skb)) {
++		/* Save full packet checksum */
++		skb->csum = csum;
++		skb->ip_summed = CHECKSUM_COMPLETE;
++		skb->csum_complete_sw = 1;
++		skb->csum_valid = !sum;
++	}
+ 
+ 	return sum;
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index aa82f9ab6a36..a42b232805a5 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -672,10 +672,6 @@ int dev_get_iflink(const struct net_device *dev)
+ 	if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
+ 		return dev->netdev_ops->ndo_get_iflink(dev);
+ 
+-	/* If dev->rtnl_link_ops is set, it's a virtual interface. */
+-	if (dev->rtnl_link_ops)
+-		return 0;
+-
+ 	return dev->ifindex;
+ }
+ EXPORT_SYMBOL(dev_get_iflink);
+@@ -3341,6 +3337,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
+ 	local_irq_save(flags);
+ 
+ 	rps_lock(sd);
++	if (!netif_running(skb->dev))
++		goto drop;
+ 	qlen = skb_queue_len(&sd->input_pkt_queue);
+ 	if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
+ 		if (qlen) {
+@@ -3362,6 +3360,7 @@ enqueue:
+ 		goto enqueue;
+ 	}
+ 
++drop:
+ 	sd->dropped++;
+ 	rps_unlock(sd);
+ 
+@@ -3667,8 +3666,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
+ 
+ 	pt_prev = NULL;
+ 
+-	rcu_read_lock();
+-
+ another_round:
+ 	skb->skb_iif = skb->dev->ifindex;
+ 
+@@ -3678,7 +3675,7 @@ another_round:
+ 	    skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+ 		skb = skb_vlan_untag(skb);
+ 		if (unlikely(!skb))
+-			goto unlock;
++			goto out;
+ 	}
+ 
+ #ifdef CONFIG_NET_CLS_ACT
+@@ -3708,7 +3705,7 @@ skip_taps:
+ 	if (static_key_false(&ingress_needed)) {
+ 		skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
+ 		if (!skb)
+-			goto unlock;
++			goto out;
+ 	}
+ 
+ 	skb->tc_verd = 0;
+@@ -3725,7 +3722,7 @@ ncls:
+ 		if (vlan_do_receive(&skb))
+ 			goto another_round;
+ 		else if (unlikely(!skb))
+-			goto unlock;
++			goto out;
+ 	}
+ 
+ 	rx_handler = rcu_dereference(skb->dev->rx_handler);
+@@ -3737,7 +3734,7 @@ ncls:
+ 		switch (rx_handler(&skb)) {
+ 		case RX_HANDLER_CONSUMED:
+ 			ret = NET_RX_SUCCESS;
+-			goto unlock;
++			goto out;
+ 		case RX_HANDLER_ANOTHER:
+ 			goto another_round;
+ 		case RX_HANDLER_EXACT:
+@@ -3791,8 +3788,7 @@ drop:
+ 		ret = NET_RX_DROP;
+ 	}
+ 
+-unlock:
+-	rcu_read_unlock();
++out:
+ 	return ret;
+ }
+ 
+@@ -3823,29 +3819,30 @@ static int __netif_receive_skb(struct sk_buff *skb)
+ 
+ static int netif_receive_skb_internal(struct sk_buff *skb)
+ {
++	int ret;
++
+ 	net_timestamp_check(netdev_tstamp_prequeue, skb);
+ 
+ 	if (skb_defer_rx_timestamp(skb))
+ 		return NET_RX_SUCCESS;
+ 
++	rcu_read_lock();
++
+ #ifdef CONFIG_RPS
+ 	if (static_key_false(&rps_needed)) {
+ 		struct rps_dev_flow voidflow, *rflow = &voidflow;
+-		int cpu, ret;
+-
+-		rcu_read_lock();
+-
+-		cpu = get_rps_cpu(skb->dev, skb, &rflow);
++		int cpu = get_rps_cpu(skb->dev, skb, &rflow);
+ 
+ 		if (cpu >= 0) {
+ 			ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
+ 			rcu_read_unlock();
+ 			return ret;
+ 		}
+-		rcu_read_unlock();
+ 	}
+ #endif
+-	return __netif_receive_skb(skb);
++	ret = __netif_receive_skb(skb);
++	rcu_read_unlock();
++	return ret;
+ }
+ 
+ /**
+@@ -4390,8 +4387,10 @@ static int process_backlog(struct napi_struct *napi, int quota)
+ 		struct sk_buff *skb;
+ 
+ 		while ((skb = __skb_dequeue(&sd->process_queue))) {
++			rcu_read_lock();
+ 			local_irq_enable();
+ 			__netif_receive_skb(skb);
++			rcu_read_unlock();
+ 			local_irq_disable();
+ 			input_queue_head_incr(sd);
+ 			if (++work >= quota) {
+@@ -6027,6 +6026,7 @@ static void rollback_registered_many(struct list_head *head)
+ 		unlist_netdevice(dev);
+ 
+ 		dev->reg_state = NETREG_UNREGISTERING;
++		on_each_cpu(flush_backlog, dev, 1);
+ 	}
+ 
+ 	synchronize_net();
+@@ -6297,7 +6297,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev)
+ 	struct netdev_queue *tx;
+ 	size_t sz = count * sizeof(*tx);
+ 
+-	BUG_ON(count < 1 || count > 0xffff);
++	if (count < 1 || count > 0xffff)
++		return -EINVAL;
+ 
+ 	tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ 	if (!tx) {
+@@ -6650,8 +6651,6 @@ void netdev_run_todo(void)
+ 
+ 		dev->reg_state = NETREG_UNREGISTERED;
+ 
+-		on_each_cpu(flush_backlog, dev, 1);
+-
+ 		netdev_wait_allrefs(dev);
+ 
+ 		/* paranoia */
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index 508155b283dd..043ea1867d0f 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -3490,8 +3490,10 @@ static int pktgen_thread_worker(void *arg)
+ 	pktgen_rem_thread(t);
+ 
+ 	/* Wait for kthread_stop */
+-	while (!kthread_should_stop()) {
++	for (;;) {
+ 		set_current_state(TASK_INTERRUPTIBLE);
++		if (kthread_should_stop())
++			break;
+ 		schedule();
+ 	}
+ 	__set_current_state(TASK_RUNNING);
+diff --git a/net/core/request_sock.c b/net/core/request_sock.c
+index 87b22c0bc08c..b42f0e26f89e 100644
+--- a/net/core/request_sock.c
++++ b/net/core/request_sock.c
+@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
+ 			spin_lock_bh(&queue->syn_wait_lock);
+ 			while ((req = lopt->syn_table[i]) != NULL) {
+ 				lopt->syn_table[i] = req->dl_next;
++				/* Because of following del_timer_sync(),
++				 * we must release the spinlock here
++				 * or risk a dead lock.
++				 */
++				spin_unlock_bh(&queue->syn_wait_lock);
+ 				atomic_inc(&lopt->qlen_dec);
+-				if (del_timer(&req->rsk_timer))
++				if (del_timer_sync(&req->rsk_timer))
+ 					reqsk_put(req);
+ 				reqsk_put(req);
++				spin_lock_bh(&queue->syn_wait_lock);
+ 			}
+ 			spin_unlock_bh(&queue->syn_wait_lock);
+ 		}
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 8de36824018d..fe95cb704aaa 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1287,10 +1287,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
+ 	[IFLA_INFO_SLAVE_DATA]	= { .type = NLA_NESTED },
+ };
+ 
+-static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
+-	[IFLA_VF_INFO]		= { .type = NLA_NESTED },
+-};
+-
+ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
+ 	[IFLA_VF_MAC]		= { .len = sizeof(struct ifla_vf_mac) },
+ 	[IFLA_VF_VLAN]		= { .len = sizeof(struct ifla_vf_vlan) },
+@@ -1437,96 +1433,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
+ 	return 0;
+ }
+ 
+-static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
++static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
+ {
+-	int rem, err = -EINVAL;
+-	struct nlattr *vf;
+ 	const struct net_device_ops *ops = dev->netdev_ops;
++	int err = -EINVAL;
+ 
+-	nla_for_each_nested(vf, attr, rem) {
+-		switch (nla_type(vf)) {
+-		case IFLA_VF_MAC: {
+-			struct ifla_vf_mac *ivm;
+-			ivm = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_mac)
+-				err = ops->ndo_set_vf_mac(dev, ivm->vf,
+-							  ivm->mac);
+-			break;
+-		}
+-		case IFLA_VF_VLAN: {
+-			struct ifla_vf_vlan *ivv;
+-			ivv = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_vlan)
+-				err = ops->ndo_set_vf_vlan(dev, ivv->vf,
+-							   ivv->vlan,
+-							   ivv->qos);
+-			break;
+-		}
+-		case IFLA_VF_TX_RATE: {
+-			struct ifla_vf_tx_rate *ivt;
+-			struct ifla_vf_info ivf;
+-			ivt = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_get_vf_config)
+-				err = ops->ndo_get_vf_config(dev, ivt->vf,
+-							     &ivf);
+-			if (err)
+-				break;
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_rate)
+-				err = ops->ndo_set_vf_rate(dev, ivt->vf,
+-							   ivf.min_tx_rate,
+-							   ivt->rate);
+-			break;
+-		}
+-		case IFLA_VF_RATE: {
+-			struct ifla_vf_rate *ivt;
+-			ivt = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_rate)
+-				err = ops->ndo_set_vf_rate(dev, ivt->vf,
+-							   ivt->min_tx_rate,
+-							   ivt->max_tx_rate);
+-			break;
+-		}
+-		case IFLA_VF_SPOOFCHK: {
+-			struct ifla_vf_spoofchk *ivs;
+-			ivs = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_spoofchk)
+-				err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
+-							       ivs->setting);
+-			break;
+-		}
+-		case IFLA_VF_LINK_STATE: {
+-			struct ifla_vf_link_state *ivl;
+-			ivl = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_link_state)
+-				err = ops->ndo_set_vf_link_state(dev, ivl->vf,
+-								 ivl->link_state);
+-			break;
+-		}
+-		case IFLA_VF_RSS_QUERY_EN: {
+-			struct ifla_vf_rss_query_en *ivrssq_en;
++	if (tb[IFLA_VF_MAC]) {
++		struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]);
+ 
+-			ivrssq_en = nla_data(vf);
+-			err = -EOPNOTSUPP;
+-			if (ops->ndo_set_vf_rss_query_en)
+-				err = ops->ndo_set_vf_rss_query_en(dev,
+-							    ivrssq_en->vf,
+-							    ivrssq_en->setting);
+-			break;
+-		}
+-		default:
+-			err = -EINVAL;
+-			break;
+-		}
+-		if (err)
+-			break;
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_mac)
++			err = ops->ndo_set_vf_mac(dev, ivm->vf,
++						  ivm->mac);
++		if (err < 0)
++			return err;
++	}
++
++	if (tb[IFLA_VF_VLAN]) {
++		struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_vlan)
++			err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan,
++						   ivv->qos);
++		if (err < 0)
++			return err;
++	}
++
++	if (tb[IFLA_VF_TX_RATE]) {
++		struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]);
++		struct ifla_vf_info ivf;
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_get_vf_config)
++			err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf);
++		if (err < 0)
++			return err;
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_rate)
++			err = ops->ndo_set_vf_rate(dev, ivt->vf,
++						   ivf.min_tx_rate,
++						   ivt->rate);
++		if (err < 0)
++			return err;
++	}
++
++	if (tb[IFLA_VF_RATE]) {
++		struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_rate)
++			err = ops->ndo_set_vf_rate(dev, ivt->vf,
++						   ivt->min_tx_rate,
++						   ivt->max_tx_rate);
++		if (err < 0)
++			return err;
+ 	}
++
++	if (tb[IFLA_VF_SPOOFCHK]) {
++		struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_spoofchk)
++			err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
++						       ivs->setting);
++		if (err < 0)
++			return err;
++	}
++
++	if (tb[IFLA_VF_LINK_STATE]) {
++		struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]);
++
++		err = -EOPNOTSUPP;
++		if (ops->ndo_set_vf_link_state)
++			err = ops->ndo_set_vf_link_state(dev, ivl->vf,
++							 ivl->link_state);
++		if (err < 0)
++			return err;
++	}
++
++	if (tb[IFLA_VF_RSS_QUERY_EN]) {
++		struct ifla_vf_rss_query_en *ivrssq_en;
++
++		err = -EOPNOTSUPP;
++		ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]);
++		if (ops->ndo_set_vf_rss_query_en)
++			err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf,
++							   ivrssq_en->setting);
++		if (err < 0)
++			return err;
++	}
++
+ 	return err;
+ }
+ 
+@@ -1722,14 +1720,21 @@ static int do_setlink(const struct sk_buff *skb,
+ 	}
+ 
+ 	if (tb[IFLA_VFINFO_LIST]) {
++		struct nlattr *vfinfo[IFLA_VF_MAX + 1];
+ 		struct nlattr *attr;
+ 		int rem;
++
+ 		nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
+-			if (nla_type(attr) != IFLA_VF_INFO) {
++			if (nla_type(attr) != IFLA_VF_INFO ||
++			    nla_len(attr) < NLA_HDRLEN) {
+ 				err = -EINVAL;
+ 				goto errout;
+ 			}
+-			err = do_setvfinfo(dev, attr);
++			err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr,
++					       ifla_vf_policy);
++			if (err < 0)
++				goto errout;
++			err = do_setvfinfo(dev, vfinfo);
+ 			if (err < 0)
+ 				goto errout;
+ 			status |= DO_SETLINK_NOTIFY;
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 41ec02242ea7..a2e4e47b2839 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
+ 
+ 	if (skb && frag_size) {
+ 		skb->head_frag = 1;
+-		if (virt_to_head_page(data)->pfmemalloc)
++		if (page_is_pfmemalloc(virt_to_head_page(data)))
+ 			skb->pfmemalloc = 1;
+ 	}
+ 	return skb;
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index 827cda560a55..57978c5b2c91 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -732,7 +732,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
+ 		return -ENODEV;
+ 
+ 	/* Use already configured phy mode */
+-	p->phy_interface = p->phy->interface;
++	if (p->phy_interface == PHY_INTERFACE_MODE_NA)
++		p->phy_interface = p->phy->interface;
+ 	phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+ 			   p->phy_interface);
+ 
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index 90c0e8386116..574fad9cca05 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -20,7 +20,7 @@
+ #include <net/route.h>
+ #include <net/tcp_states.h>
+ 
+-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ {
+ 	struct inet_sock *inet = inet_sk(sk);
+ 	struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
+@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 
+ 	sk_dst_reset(sk);
+ 
+-	lock_sock(sk);
+-
+ 	oif = sk->sk_bound_dev_if;
+ 	saddr = inet->inet_saddr;
+ 	if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
+@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	sk_dst_set(sk, &rt->dst);
+ 	err = 0;
+ out:
+-	release_sock(sk);
+ 	return err;
+ }
++EXPORT_SYMBOL(__ip4_datagram_connect);
++
++int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
++{
++	int res;
++
++	lock_sock(sk);
++	res = __ip4_datagram_connect(sk, uaddr, addr_len);
++	release_sock(sk);
++	return res;
++}
+ EXPORT_SYMBOL(ip4_datagram_connect);
+ 
+ /* Because UDP xmit path can manipulate sk_dst_cache without holding
+diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
+index 09b62e17dd8c..0ca933db1b41 100644
+--- a/net/ipv4/fib_trie.c
++++ b/net/ipv4/fib_trie.c
+@@ -1780,8 +1780,6 @@ void fib_table_flush_external(struct fib_table *tb)
+ 		if (hlist_empty(&n->leaf)) {
+ 			put_child_root(pn, n->key, NULL);
+ 			node_free(n);
+-		} else {
+-			leaf_pull_suffix(pn, n);
+ 		}
+ 	}
+ }
+@@ -1852,8 +1850,6 @@ int fib_table_flush(struct fib_table *tb)
+ 		if (hlist_empty(&n->leaf)) {
+ 			put_child_root(pn, n->key, NULL);
+ 			node_free(n);
+-		} else {
+-			leaf_pull_suffix(pn, n);
+ 		}
+ 	}
+ 
+@@ -2457,7 +2453,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter,
+ 		key = l->key + 1;
+ 		iter->pos++;
+ 
+-		if (pos-- <= 0)
++		if (--pos <= 0)
+ 			break;
+ 
+ 		l = NULL;
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 8976ca423a07..b27fc401c6a9 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -584,7 +584,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
+ 	}
+ 
+ 	spin_unlock(&queue->syn_wait_lock);
+-	if (del_timer(&req->rsk_timer))
++	if (del_timer_sync(&req->rsk_timer))
+ 		reqsk_put(req);
+ 	return found;
+ }
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index cc1da6d9cb35..cae22a1a8777 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -342,7 +342,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ 	ihl = ip_hdrlen(skb);
+ 
+ 	/* Determine the position of this fragment. */
+-	end = offset + skb->len - ihl;
++	end = offset + skb->len - skb_network_offset(skb) - ihl;
+ 	err = -EINVAL;
+ 
+ 	/* Is this the final fragment? */
+@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ 		goto err;
+ 
+ 	err = -ENOMEM;
+-	if (!pskb_pull(skb, ihl))
++	if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
+ 		goto err;
+ 
+ 	err = pskb_trim_rcsum(skb, end - offset);
+@@ -613,6 +613,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
+ 	iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
+ 	iph->tot_len = htons(len);
+ 	iph->tos |= ecn;
++
++	ip_send_check(iph);
++
+ 	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
+ 	qp->q.fragments = NULL;
+ 	qp->q.fragments_tail = NULL;
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 4c2c3ba4ba65..626d9e56a6bd 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
+ EXPORT_SYMBOL(ip_tunnel_encap);
+ 
+ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+-			    struct rtable *rt, __be16 df)
++			    struct rtable *rt, __be16 df,
++			    const struct iphdr *inner_iph)
+ {
+ 	struct ip_tunnel *tunnel = netdev_priv(dev);
+ 	int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len;
+@@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
+ 
+ 	if (skb->protocol == htons(ETH_P_IP)) {
+ 		if (!skb_is_gso(skb) &&
+-		    (df & htons(IP_DF)) && mtu < pkt_size) {
++		    (inner_iph->frag_off & htons(IP_DF)) &&
++		    mtu < pkt_size) {
+ 			memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ 			return -E2BIG;
+@@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ 		goto tx_error;
+ 	}
+ 
+-	if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) {
++	if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) {
+ 		ip_rt_put(rt);
+ 		goto tx_error;
+ 	}
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index fc1c658ec6c1..441ca6f38981 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
+ 	req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
+ 	if (req) {
+ 		nsk = tcp_check_req(sk, skb, req, false);
+-		if (!nsk)
++		if (!nsk || nsk == sk)
+ 			reqsk_put(req);
+ 		return nsk;
+ 	}
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 83aa604f9273..1b8c5ba7d5f7 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
+ 
+ 	skb->sk = sk;
+ 	skb->destructor = sock_efree;
+-	dst = sk->sk_rx_dst;
++	dst = READ_ONCE(sk->sk_rx_dst);
+ 
+ 	if (dst)
+ 		dst = dst_check(dst, 0);
+-	if (dst)
+-		skb_dst_set_noref(skb, dst);
++	if (dst) {
++		/* DST_NOCACHE can not be used without taking a reference */
++		if (dst->flags & DST_NOCACHE) {
++			if (likely(atomic_inc_not_zero(&dst->__refcnt)))
++				skb_dst_set(skb, dst);
++		} else {
++			skb_dst_set_noref(skb, dst);
++		}
++	}
+ }
+ 
+ int udp_rcv(struct sk_buff *skb)
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 62d908e64eeb..b10a88986a98 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
+ 	return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
+ }
+ 
+-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
++static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ {
+ 	struct sockaddr_in6	*usin = (struct sockaddr_in6 *) uaddr;
+ 	struct inet_sock	*inet = inet_sk(sk);
+@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 	if (usin->sin6_family == AF_INET) {
+ 		if (__ipv6_only_sock(sk))
+ 			return -EAFNOSUPPORT;
+-		err = ip4_datagram_connect(sk, uaddr, addr_len);
++		err = __ip4_datagram_connect(sk, uaddr, addr_len);
+ 		goto ipv4_connected;
+ 	}
+ 
+@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ 		sin.sin_addr.s_addr = daddr->s6_addr32[3];
+ 		sin.sin_port = usin->sin6_port;
+ 
+-		err = ip4_datagram_connect(sk,
+-					   (struct sockaddr *) &sin,
+-					   sizeof(sin));
++		err = __ip4_datagram_connect(sk,
++					     (struct sockaddr *) &sin,
++					     sizeof(sin));
+ 
+ ipv4_connected:
+ 		if (err)
+@@ -204,6 +204,16 @@ out:
+ 	fl6_sock_release(flowlabel);
+ 	return err;
+ }
++
++int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
++{
++	int res;
++
++	lock_sock(sk);
++	res = __ip6_datagram_connect(sk, uaddr, addr_len);
++	release_sock(sk);
++	return res;
++}
+ EXPORT_SYMBOL_GPL(ip6_datagram_connect);
+ 
+ int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
+diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
+index f2e464eba5ef..57990c929cd8 100644
+--- a/net/ipv6/ip6_input.c
++++ b/net/ipv6/ip6_input.c
+@@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb)
+ 				if (offset < 0)
+ 					goto out;
+ 
+-				if (!ipv6_is_mld(skb, nexthdr, offset))
+-					goto out;
++				if (ipv6_is_mld(skb, nexthdr, offset))
++					deliver = true;
+ 
+-				deliver = true;
++				goto out;
+ 			}
+ 			/* unknown RA - process it normally */
+ 		}
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index e893cd18612f..08b62047c67f 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
+ static const struct net_offload sit_offload = {
+ 	.callbacks = {
+ 		.gso_segment	= ipv6_gso_segment,
+-		.gro_receive	= ipv6_gro_receive,
+-		.gro_complete	= ipv6_gro_complete,
+ 	},
+ };
+ 
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 3adffb300238..e541d68dba8b 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -946,7 +946,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
+ 				   &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
+ 	if (req) {
+ 		nsk = tcp_check_req(sk, skb, req, false);
+-		if (!nsk)
++		if (!nsk || nsk == sk)
+ 			reqsk_put(req);
+ 		return nsk;
+ 	}
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index 667111ee6a20..5787f15a3a12 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -301,9 +301,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
+ 	if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
+ 		return TX_CONTINUE;
+ 
+-	if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+-		return TX_CONTINUE;
+-
+ 	if (tx->flags & IEEE80211_TX_PS_BUFFERED)
+ 		return TX_CONTINUE;
+ 
+diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
+index bf6e76643f78..4856d975492d 100644
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -355,25 +355,52 @@ err1:
+ 	return NULL;
+ }
+ 
++
++static void
++__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
++		   unsigned int order)
++{
++	struct netlink_sock *nlk = nlk_sk(sk);
++	struct sk_buff_head *queue;
++	struct netlink_ring *ring;
++
++	queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
++	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
++
++	spin_lock_bh(&queue->lock);
++
++	ring->frame_max		= req->nm_frame_nr - 1;
++	ring->head		= 0;
++	ring->frame_size	= req->nm_frame_size;
++	ring->pg_vec_pages	= req->nm_block_size / PAGE_SIZE;
++
++	swap(ring->pg_vec_len, req->nm_block_nr);
++	swap(ring->pg_vec_order, order);
++	swap(ring->pg_vec, pg_vec);
++
++	__skb_queue_purge(queue);
++	spin_unlock_bh(&queue->lock);
++
++	WARN_ON(atomic_read(&nlk->mapped));
++
++	if (pg_vec)
++		free_pg_vec(pg_vec, order, req->nm_block_nr);
++}
++
+ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
+-			    bool closing, bool tx_ring)
++			    bool tx_ring)
+ {
+ 	struct netlink_sock *nlk = nlk_sk(sk);
+ 	struct netlink_ring *ring;
+-	struct sk_buff_head *queue;
+ 	void **pg_vec = NULL;
+ 	unsigned int order = 0;
+-	int err;
+ 
+ 	ring  = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
+-	queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
+ 
+-	if (!closing) {
+-		if (atomic_read(&nlk->mapped))
+-			return -EBUSY;
+-		if (atomic_read(&ring->pending))
+-			return -EBUSY;
+-	}
++	if (atomic_read(&nlk->mapped))
++		return -EBUSY;
++	if (atomic_read(&ring->pending))
++		return -EBUSY;
+ 
+ 	if (req->nm_block_nr) {
+ 		if (ring->pg_vec != NULL)
+@@ -405,31 +432,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
+ 			return -EINVAL;
+ 	}
+ 
+-	err = -EBUSY;
+ 	mutex_lock(&nlk->pg_vec_lock);
+-	if (closing || atomic_read(&nlk->mapped) == 0) {
+-		err = 0;
+-		spin_lock_bh(&queue->lock);
+-
+-		ring->frame_max		= req->nm_frame_nr - 1;
+-		ring->head		= 0;
+-		ring->frame_size	= req->nm_frame_size;
+-		ring->pg_vec_pages	= req->nm_block_size / PAGE_SIZE;
+-
+-		swap(ring->pg_vec_len, req->nm_block_nr);
+-		swap(ring->pg_vec_order, order);
+-		swap(ring->pg_vec, pg_vec);
+-
+-		__skb_queue_purge(queue);
+-		spin_unlock_bh(&queue->lock);
+-
+-		WARN_ON(atomic_read(&nlk->mapped));
++	if (atomic_read(&nlk->mapped) == 0) {
++		__netlink_set_ring(sk, req, tx_ring, pg_vec, order);
++		mutex_unlock(&nlk->pg_vec_lock);
++		return 0;
+ 	}
++
+ 	mutex_unlock(&nlk->pg_vec_lock);
+ 
+ 	if (pg_vec)
+ 		free_pg_vec(pg_vec, order, req->nm_block_nr);
+-	return err;
++
++	return -EBUSY;
+ }
+ 
+ static void netlink_mm_open(struct vm_area_struct *vma)
+@@ -898,10 +913,10 @@ static void netlink_sock_destruct(struct sock *sk)
+ 
+ 		memset(&req, 0, sizeof(req));
+ 		if (nlk->rx_ring.pg_vec)
+-			netlink_set_ring(sk, &req, true, false);
++			__netlink_set_ring(sk, &req, false, NULL, 0);
+ 		memset(&req, 0, sizeof(req));
+ 		if (nlk->tx_ring.pg_vec)
+-			netlink_set_ring(sk, &req, true, true);
++			__netlink_set_ring(sk, &req, true, NULL, 0);
+ 	}
+ #endif /* CONFIG_NETLINK_MMAP */
+ 
+@@ -1079,6 +1094,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
+ 
+ 	err = __netlink_insert(table, sk);
+ 	if (err) {
++		/* In case the hashtable backend returns with -EBUSY
++		 * from here, it must not escape to the caller.
++		 */
++		if (unlikely(err == -EBUSY))
++			err = -EOVERFLOW;
+ 		if (err == -EEXIST)
+ 			err = -EADDRINUSE;
+ 		nlk_sk(sk)->portid = 0;
+@@ -2197,7 +2217,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
+ 			return -EINVAL;
+ 		if (copy_from_user(&req, optval, sizeof(req)))
+ 			return -EFAULT;
+-		err = netlink_set_ring(sk, &req, false,
++		err = netlink_set_ring(sk, &req,
+ 				       optname == NETLINK_TX_RING);
+ 		break;
+ 	}
+diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c
+index ed54ec533836..b33fed6d1584 100644
+--- a/net/nfc/nci/hci.c
++++ b/net/nfc/nci/hci.c
+@@ -233,7 +233,7 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd,
+ 	r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data,
+ 			msecs_to_jiffies(NCI_DATA_TIMEOUT));
+ 
+-	if (r == NCI_STATUS_OK)
++	if (r == NCI_STATUS_OK && skb)
+ 		*skb = conn_info->rx_skb;
+ 
+ 	return r;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index fe1610ddeacf..e1ea5d43b01e 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2307,7 +2307,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
+ 		}
+ 		tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
+ 					  addr, hlen);
+-		if (tp_len > dev->mtu + dev->hard_header_len) {
++		if (likely(tp_len >= 0) &&
++		    tp_len > dev->mtu + dev->hard_header_len) {
+ 			struct ethhdr *ehdr;
+ 			/* Earlier code assumed this would be a VLAN pkt,
+ 			 * double-check this now that we have the actual
+@@ -2688,7 +2689,7 @@ static int packet_release(struct socket *sock)
+ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
+ {
+ 	struct packet_sock *po = pkt_sk(sk);
+-	const struct net_device *dev_curr;
++	struct net_device *dev_curr;
+ 	__be16 proto_curr;
+ 	bool need_rehook;
+ 
+@@ -2712,15 +2713,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
+ 
+ 		po->num = proto;
+ 		po->prot_hook.type = proto;
+-
+-		if (po->prot_hook.dev)
+-			dev_put(po->prot_hook.dev);
+-
+ 		po->prot_hook.dev = dev;
+ 
+ 		po->ifindex = dev ? dev->ifindex : 0;
+ 		packet_cached_dev_assign(po, dev);
+ 	}
++	if (dev_curr)
++		dev_put(dev_curr);
+ 
+ 	if (proto == 0 || !need_rehook)
+ 		goto out_unlock;
+diff --git a/net/rds/info.c b/net/rds/info.c
+index 9a6b4f66187c..140a44a5f7b7 100644
+--- a/net/rds/info.c
++++ b/net/rds/info.c
+@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
+ 
+ 	/* check for all kinds of wrapping and the like */
+ 	start = (unsigned long)optval;
+-	if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
++	if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 3d43e4979f27..f8d9c2a2c451 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a)
+ }
+ EXPORT_SYMBOL(tcf_hash_destroy);
+ 
+-int tcf_hash_release(struct tc_action *a, int bind)
++int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
+ {
+ 	struct tcf_common *p = a->priv;
+ 	int ret = 0;
+@@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
+ 	if (p) {
+ 		if (bind)
+ 			p->tcfc_bindcnt--;
+-		else if (p->tcfc_bindcnt > 0)
++		else if (strict && p->tcfc_bindcnt > 0)
+ 			return -EPERM;
+ 
+ 		p->tcfc_refcnt--;
+@@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
+ 			ret = 1;
+ 		}
+ 	}
++
+ 	return ret;
+ }
+-EXPORT_SYMBOL(tcf_hash_release);
++EXPORT_SYMBOL(__tcf_hash_release);
+ 
+ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
+ 			   struct tc_action *a)
+@@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
+ 		head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
+ 		hlist_for_each_entry_safe(p, n, head, tcfc_head) {
+ 			a->priv = p;
+-			ret = tcf_hash_release(a, 0);
++			ret = __tcf_hash_release(a, false, true);
+ 			if (ret == ACT_P_DELETED) {
+ 				module_put(a->ops->owner);
+ 				n_i++;
+@@ -413,7 +414,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
+ 	int ret = 0;
+ 
+ 	list_for_each_entry_safe(a, tmp, actions, list) {
+-		ret = tcf_hash_release(a, bind);
++		ret = __tcf_hash_release(a, bind, true);
+ 		if (ret == ACT_P_DELETED)
+ 			module_put(a->ops->owner);
+ 		else if (ret < 0)
+diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
+index dc6a2d324bd8..521ffca91228 100644
+--- a/net/sched/act_bpf.c
++++ b/net/sched/act_bpf.c
+@@ -27,9 +27,10 @@
+ struct tcf_bpf_cfg {
+ 	struct bpf_prog *filter;
+ 	struct sock_filter *bpf_ops;
+-	char *bpf_name;
++	const char *bpf_name;
+ 	u32 bpf_fd;
+ 	u16 bpf_num_ops;
++	bool is_ebpf;
+ };
+ 
+ static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
+@@ -200,6 +201,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
+ 	cfg->bpf_ops = bpf_ops;
+ 	cfg->bpf_num_ops = bpf_num_ops;
+ 	cfg->filter = fp;
++	cfg->is_ebpf = false;
+ 
+ 	return 0;
+ }
+@@ -234,18 +236,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
+ 	cfg->bpf_fd = bpf_fd;
+ 	cfg->bpf_name = name;
+ 	cfg->filter = fp;
++	cfg->is_ebpf = true;
+ 
+ 	return 0;
+ }
+ 
++static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
++{
++	if (cfg->is_ebpf)
++		bpf_prog_put(cfg->filter);
++	else
++		bpf_prog_destroy(cfg->filter);
++
++	kfree(cfg->bpf_ops);
++	kfree(cfg->bpf_name);
++}
++
++static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
++				  struct tcf_bpf_cfg *cfg)
++{
++	cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
++	cfg->filter = prog->filter;
++
++	cfg->bpf_ops = prog->bpf_ops;
++	cfg->bpf_name = prog->bpf_name;
++}
++
+ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
+ 			struct nlattr *est, struct tc_action *act,
+ 			int replace, int bind)
+ {
+ 	struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
++	struct tcf_bpf_cfg cfg, old;
+ 	struct tc_act_bpf *parm;
+ 	struct tcf_bpf *prog;
+-	struct tcf_bpf_cfg cfg;
+ 	bool is_bpf, is_ebpf;
+ 	int ret;
+ 
+@@ -294,6 +318,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
+ 	prog = to_bpf(act);
+ 	spin_lock_bh(&prog->tcf_lock);
+ 
++	if (ret != ACT_P_CREATED)
++		tcf_bpf_prog_fill_cfg(prog, &old);
++
+ 	prog->bpf_ops = cfg.bpf_ops;
+ 	prog->bpf_name = cfg.bpf_name;
+ 
+@@ -309,29 +336,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
+ 
+ 	if (ret == ACT_P_CREATED)
+ 		tcf_hash_insert(act);
++	else
++		tcf_bpf_cfg_cleanup(&old);
+ 
+ 	return ret;
+ 
+ destroy_fp:
+-	if (is_ebpf)
+-		bpf_prog_put(cfg.filter);
+-	else
+-		bpf_prog_destroy(cfg.filter);
+-
+-	kfree(cfg.bpf_ops);
+-	kfree(cfg.bpf_name);
+-
++	tcf_bpf_cfg_cleanup(&cfg);
+ 	return ret;
+ }
+ 
+ static void tcf_bpf_cleanup(struct tc_action *act, int bind)
+ {
+-	const struct tcf_bpf *prog = act->priv;
++	struct tcf_bpf_cfg tmp;
+ 
+-	if (tcf_bpf_is_ebpf(prog))
+-		bpf_prog_put(prog->filter);
+-	else
+-		bpf_prog_destroy(prog->filter);
++	tcf_bpf_prog_fill_cfg(act->priv, &tmp);
++	tcf_bpf_cfg_cleanup(&tmp);
+ }
+ 
+ static struct tc_action_ops act_bpf_ops __read_mostly = {
+diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
+index 91bd9c19471d..c0b86f2bfe22 100644
+--- a/net/sched/cls_bpf.c
++++ b/net/sched/cls_bpf.c
+@@ -364,7 +364,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
+ 		goto errout;
+ 
+ 	if (oldprog) {
+-		list_replace_rcu(&prog->link, &oldprog->link);
++		list_replace_rcu(&oldprog->link, &prog->link);
+ 		tcf_unbind_filter(tp, &oldprog->res);
+ 		call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
+ 	} else {
+diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
+index a620c4e288a5..75df923f5c03 100644
+--- a/net/sched/cls_flow.c
++++ b/net/sched/cls_flow.c
+@@ -419,6 +419,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
+ 	if (!fnew)
+ 		goto err2;
+ 
++	tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
++
+ 	fold = (struct flow_filter *)*arg;
+ 	if (fold) {
+ 		err = -EINVAL;
+@@ -480,7 +482,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
+ 		fnew->mask  = ~0U;
+ 		fnew->tp = tp;
+ 		get_random_bytes(&fnew->hashrnd, 4);
+-		tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
+ 	}
+ 
+ 	fnew->perturb_timer.function = flow_perturbation;
+@@ -520,7 +521,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
+ 	if (*arg == 0)
+ 		list_add_tail_rcu(&fnew->list, &head->filters);
+ 	else
+-		list_replace_rcu(&fnew->list, &fold->list);
++		list_replace_rcu(&fold->list, &fnew->list);
+ 
+ 	*arg = (unsigned long)fnew;
+ 
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index c244c45b78d7..9291598b5aad 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -162,10 +162,10 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
+ 	skb = dequeue_head(flow);
+ 	len = qdisc_pkt_len(skb);
+ 	q->backlogs[idx] -= len;
+-	kfree_skb(skb);
+ 	sch->q.qlen--;
+ 	qdisc_qstats_drop(sch);
+ 	qdisc_qstats_backlog_dec(sch, skb);
++	kfree_skb(skb);
+ 	flow->dropped++;
+ 	return idx;
+ }
+diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
+index 1d4fe24af06a..d109d308ec3a 100644
+--- a/net/sunrpc/xprt.c
++++ b/net/sunrpc/xprt.c
+@@ -611,6 +611,7 @@ static void xprt_autoclose(struct work_struct *work)
+ 	xprt->ops->close(xprt);
+ 	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ 	xprt_release_write(xprt, NULL);
++	wake_up_bit(&xprt->state, XPRT_LOCKED);
+ }
+ 
+ /**
+@@ -720,6 +721,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
+ 	xprt->ops->release_xprt(xprt, NULL);
+ out:
+ 	spin_unlock_bh(&xprt->transport_lock);
++	wake_up_bit(&xprt->state, XPRT_LOCKED);
+ }
+ 
+ /**
+@@ -1389,6 +1391,10 @@ out:
+ static void xprt_destroy(struct rpc_xprt *xprt)
+ {
+ 	dprintk("RPC:       destroying transport %p\n", xprt);
++
++	/* Exclude transport connect/disconnect handlers */
++	wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
++
+ 	del_timer_sync(&xprt->timer);
+ 
+ 	rpc_xprt_debugfs_unregister(xprt);
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 66891e32c5e3..5e3ad598d3f5 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -834,6 +834,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
+ 	sk->sk_user_data = NULL;
+ 
+ 	xs_restore_old_callbacks(transport, sk);
++	xprt_clear_connected(xprt);
+ 	write_unlock_bh(&sk->sk_callback_lock);
+ 	xs_sock_reset_connection_flags(xprt);
+ 
+@@ -1433,6 +1434,7 @@ out:
+ static void xs_tcp_state_change(struct sock *sk)
+ {
+ 	struct rpc_xprt *xprt;
++	struct sock_xprt *transport;
+ 
+ 	read_lock_bh(&sk->sk_callback_lock);
+ 	if (!(xprt = xprt_from_sock(sk)))
+@@ -1444,13 +1446,12 @@ static void xs_tcp_state_change(struct sock *sk)
+ 			sock_flag(sk, SOCK_ZAPPED),
+ 			sk->sk_shutdown);
+ 
++	transport = container_of(xprt, struct sock_xprt, xprt);
+ 	trace_rpc_socket_state_change(xprt, sk->sk_socket);
+ 	switch (sk->sk_state) {
+ 	case TCP_ESTABLISHED:
+ 		spin_lock(&xprt->transport_lock);
+ 		if (!xprt_test_and_set_connected(xprt)) {
+-			struct sock_xprt *transport = container_of(xprt,
+-					struct sock_xprt, xprt);
+ 
+ 			/* Reset TCP record info */
+ 			transport->tcp_offset = 0;
+@@ -1459,6 +1460,8 @@ static void xs_tcp_state_change(struct sock *sk)
+ 			transport->tcp_flags =
+ 				TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
+ 			xprt->connect_cookie++;
++			clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
++			xprt_clear_connecting(xprt);
+ 
+ 			xprt_wake_pending_tasks(xprt, -EAGAIN);
+ 		}
+@@ -1494,6 +1497,9 @@ static void xs_tcp_state_change(struct sock *sk)
+ 		smp_mb__after_atomic();
+ 		break;
+ 	case TCP_CLOSE:
++		if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
++					&transport->sock_state))
++			xprt_clear_connecting(xprt);
+ 		xs_sock_mark_closed(xprt);
+ 	}
+  out:
+@@ -2110,6 +2116,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+ 	/* Tell the socket layer to start connecting... */
+ 	xprt->stat.connect_count++;
+ 	xprt->stat.connect_start = jiffies;
++	set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
+ 	ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
+ 	switch (ret) {
+ 	case 0:
+@@ -2174,7 +2181,6 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ 	case -EINPROGRESS:
+ 	case -EALREADY:
+ 		xprt_unlock_connect(xprt, transport);
+-		xprt_clear_connecting(xprt);
+ 		return;
+ 	case -EINVAL:
+ 		/* Happens, for instance, if the user specified a link
+@@ -2216,13 +2222,14 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
+ 
+ 	WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
+ 
+-	/* Start by resetting any existing state */
+-	xs_reset_transport(transport);
+-
+-	if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
++	if (transport->sock != NULL) {
+ 		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
+ 				"seconds\n",
+ 				xprt, xprt->reestablish_timeout / HZ);
++
++		/* Start by resetting any existing state */
++		xs_reset_transport(transport);
++
+ 		queue_delayed_work(rpciod_workqueue,
+ 				   &transport->connect_worker,
+ 				   xprt->reestablish_timeout);
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index f485600c4507..20cc6df07157 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -2009,6 +2009,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
+ 	res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1);
+ 	if (res)
+ 		goto exit;
++	security_sk_clone(sock->sk, new_sock->sk);
+ 
+ 	new_sk = new_sock->sk;
+ 	new_tsock = tipc_sk(new_sk);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 91f6928560e1..6fe862594e9b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1134,7 +1134,7 @@ static const struct hda_fixup alc880_fixups[] = {
+ 		/* override all pins as BIOS on old Amilo is broken */
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+-			{ 0x14, 0x0121411f }, /* HP */
++			{ 0x14, 0x0121401f }, /* HP */
+ 			{ 0x15, 0x99030120 }, /* speaker */
+ 			{ 0x16, 0x99030130 }, /* bass speaker */
+ 			{ 0x17, 0x411111f0 }, /* N/A */
+@@ -1154,7 +1154,7 @@ static const struct hda_fixup alc880_fixups[] = {
+ 		/* almost compatible with FUJITSU, but no bass and SPDIF */
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+-			{ 0x14, 0x0121411f }, /* HP */
++			{ 0x14, 0x0121401f }, /* HP */
+ 			{ 0x15, 0x99030120 }, /* speaker */
+ 			{ 0x16, 0x411111f0 }, /* N/A */
+ 			{ 0x17, 0x411111f0 }, /* N/A */
+@@ -1363,7 +1363,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810),
+ 	SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM),
+ 	SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE),
+-	SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734),
++	SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU),
+ 	SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU),
+ 	SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734),
+ 	SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU),
+@@ -5118,8 +5118,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+@@ -6454,6 +6457,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13),
+ 	SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_XPS13),
++	SND_PCI_QUIRK(0x1028, 0x060d, "Dell M3800", ALC668_FIXUP_DELL_XPS13),
+ 	SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 8b7e391dd0b8..cd8ed2e393a2 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -2522,7 +2522,7 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
+ 		for (c = 0; c < MAX_CHANNELS; c++) {
+ 			if (!(cval->cmask & (1 << c)))
+ 				continue;
+-			if (cval->cached & (1 << c)) {
++			if (cval->cached & (1 << (c + 1))) {
+ 				err = snd_usb_set_cur_mix_value(cval, c + 1, idx,
+ 							cval->cache_val[idx]);
+ 				if (err < 0)


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-09-28 23:57 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-09-28 23:57 UTC (permalink / raw
  To: gentoo-commits

commit:     c90ba25cf30f0a5d1aadc37493e2d3eb8e44c748
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Sep 28 23:57:34 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Sep 28 23:57:34 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c90ba25c

dm crypt: constrain crypt device's max_segment_size to PAGE_SIZE. See bug #561558. Thanks to kipplasterjoe for reporting.

 0000_README                                |  4 ++
 1600_dm-crypt-limit-max-segment-size.patch | 84 ++++++++++++++++++++++++++++++
 2 files changed, 88 insertions(+)

diff --git a/0000_README b/0000_README
index b88684d..4a96d2e 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,10 @@ Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
 From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
 Desc:   Enable link security restrictions by default.
 
+Patch:  1600_dm-crypt-limit-max-segment-size.patch
+From:   https://bugzilla.kernel.org/show_bug.cgi?id=104421
+Desc:   dm crypt: constrain crypt device's max_segment_size to PAGE_SIZE.
+
 Patch:  2700_ThinkPad-30-brightness-control-fix.patch
 From:   Seth Forshee <seth.forshee@canonical.com>
 Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.

diff --git a/1600_dm-crypt-limit-max-segment-size.patch b/1600_dm-crypt-limit-max-segment-size.patch
new file mode 100644
index 0000000..82aca44
--- /dev/null
+++ b/1600_dm-crypt-limit-max-segment-size.patch
@@ -0,0 +1,84 @@
+From 586b286b110e94eb31840ac5afc0c24e0881fe34 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Wed, 9 Sep 2015 21:34:51 -0400
+Subject: dm crypt: constrain crypt device's max_segment_size to PAGE_SIZE
+
+Setting the dm-crypt device's max_segment_size to PAGE_SIZE is an
+unfortunate constraint that is required to avoid the potential for
+exceeding dm-crypt's underlying device's max_segments limits -- due to
+crypt_alloc_buffer() possibly allocating pages for the encryption bio
+that are not as physically contiguous as the original bio.
+
+It is interesting to note that this problem was already fixed back in
+2007 via commit 91e106259 ("dm crypt: use bio_add_page").  But Linux 4.0
+commit cf2f1abfb ("dm crypt: don't allocate pages for a partial
+request") regressed dm-crypt back to _not_ using bio_add_page().  But
+given dm-crypt's cpu parallelization changes all depend on commit
+cf2f1abfb's abandoning of the more complex io fragments processing that
+dm-crypt previously had we cannot easily go back to using
+bio_add_page().
+
+So all said the cleanest way to resolve this issue is to fix dm-crypt to
+properly constrain the original bios entering dm-crypt so the encryption
+bios that dm-crypt generates from the original bios are always
+compatible with the underlying device's max_segments queue limits.
+
+It should be noted that technically Linux 4.3 does _not_ need this fix
+because of the block core's new late bio-splitting capability.  But, it
+is reasoned, there is little to be gained by having the block core split
+the encrypted bio that is composed of PAGE_SIZE segments.  That said, in
+the future we may revert this change.
+
+Fixes: cf2f1abfb ("dm crypt: don't allocate pages for a partial request")
+Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=104421
+Suggested-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Cc: stable@vger.kernel.org # 4.0+
+
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index d60c88d..4b3b6f8 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
+ 
+ /*
+  * Generate a new unfragmented bio with the given size
+- * This should never violate the device limitations
++ * This should never violate the device limitations (but only because
++ * max_segment_size is being constrained to PAGE_SIZE).
+  *
+  * This function may be called concurrently. If we allocate from the mempool
+  * concurrently, there is a possibility of deadlock. For example, if we have
+@@ -2045,9 +2046,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
+ 	return fn(ti, cc->dev, cc->start, ti->len, data);
+ }
+ 
++static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
++{
++	/*
++	 * Unfortunate constraint that is required to avoid the potential
++	 * for exceeding underlying device's max_segments limits -- due to
++	 * crypt_alloc_buffer() possibly allocating pages for the encryption
++	 * bio that are not as physically contiguous as the original bio.
++	 */
++	limits->max_segment_size = PAGE_SIZE;
++}
++
+ static struct target_type crypt_target = {
+ 	.name   = "crypt",
+-	.version = {1, 14, 0},
++	.version = {1, 14, 1},
+ 	.module = THIS_MODULE,
+ 	.ctr    = crypt_ctr,
+ 	.dtr    = crypt_dtr,
+@@ -2058,6 +2070,7 @@ static struct target_type crypt_target = {
+ 	.resume = crypt_resume,
+ 	.message = crypt_message,
+ 	.iterate_devices = crypt_iterate_devices,
++	.io_hints = crypt_io_hints,
+ };
+ 
+ static int __init dm_crypt_init(void)
+-- 
+cgit v0.10.2
+


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-09-21 22:16 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-09-21 22:16 UTC (permalink / raw
  To: gentoo-commits

commit:     8fe6c9dc74e88b375ffa56515338a480f17ef3d1
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Sep 21 22:16:05 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Sep 21 22:16:05 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8fe6c9dc

Linux patch 4.1.8

 0000_README            |    4 +
 1007_linux-4.1.8.patch | 4367 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4371 insertions(+)

diff --git a/0000_README b/0000_README
index ad474e3..b88684d 100644
--- a/0000_README
+++ b/0000_README
@@ -71,6 +71,10 @@ Patch:  1006_linux-4.1.7.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.7
 
+Patch:  1007_linux-4.1.8.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.8
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1007_linux-4.1.8.patch b/1007_linux-4.1.8.patch
new file mode 100644
index 0000000..a41c476
--- /dev/null
+++ b/1007_linux-4.1.8.patch
@@ -0,0 +1,4367 @@
+diff --git a/Documentation/ABI/testing/configfs-usb-gadget-loopback b/Documentation/ABI/testing/configfs-usb-gadget-loopback
+index 9aae5bfb9908..06beefbcf061 100644
+--- a/Documentation/ABI/testing/configfs-usb-gadget-loopback
++++ b/Documentation/ABI/testing/configfs-usb-gadget-loopback
+@@ -5,4 +5,4 @@ Description:
+ 		The attributes:
+ 
+ 		qlen		- depth of loopback queue
+-		bulk_buflen	- buffer length
++		buflen		- buffer length
+diff --git a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
+index 29477c319f61..bc7ff731aa0c 100644
+--- a/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
++++ b/Documentation/ABI/testing/configfs-usb-gadget-sourcesink
+@@ -9,4 +9,4 @@ Description:
+ 		isoc_maxpacket	- 0 - 1023 (fs), 0 - 1024 (hs/ss)
+ 		isoc_mult	- 0..2 (hs/ss only)
+ 		isoc_maxburst	- 0..15 (ss only)
+-		qlen		- buffer length
++		buflen		- buffer length
+diff --git a/Documentation/usb/gadget-testing.txt b/Documentation/usb/gadget-testing.txt
+index f45b2bf4b41d..820664af8f6a 100644
+--- a/Documentation/usb/gadget-testing.txt
++++ b/Documentation/usb/gadget-testing.txt
+@@ -237,9 +237,7 @@ Testing the LOOPBACK function
+ -----------------------------
+ 
+ device: run the gadget
+-host: test-usb
+-
+-http://www.linux-usb.org/usbtest/testusb.c
++host: test-usb (tools/usb/testusb.c)
+ 
+ 8. MASS STORAGE function
+ ========================
+@@ -588,9 +586,8 @@ Testing the SOURCESINK function
+ -------------------------------
+ 
+ device: run the gadget
+-host: test-usb
++host: test-usb (tools/usb/testusb.c)
+ 
+-http://www.linux-usb.org/usbtest/testusb.c
+ 
+ 16. UAC1 function
+ =================
+diff --git a/Makefile b/Makefile
+index b8591e5f79b8..dbf3baa5fabb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 7
++SUBLEVEL = 8
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 45df48ba0b12..19f4cc634b0e 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -538,6 +538,7 @@ config ARCH_ORION5X
+ 	select MVEBU_MBUS
+ 	select PCI
+ 	select PLAT_ORION_LEGACY
++	select MULTI_IRQ_HANDLER
+ 	help
+ 	  Support for the following Marvell Orion 5x series SoCs:
+ 	  Orion-1 (5181), Orion-VoIP (5181L), Orion-NAS (5182),
+diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
+index 0b9906880c0c..75aba40c69e1 100644
+--- a/arch/arm/boot/dts/exynos3250-rinato.dts
++++ b/arch/arm/boot/dts/exynos3250-rinato.dts
+@@ -181,7 +181,7 @@
+ 
+ 		display-timings {
+ 			timing-0 {
+-				clock-frequency = <0>;
++				clock-frequency = <4600000>;
+ 				hactive = <320>;
+ 				vactive = <320>;
+ 				hfront-porch = <1>;
+diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
+index 165968d51d8f..8eca5878a877 100644
+--- a/arch/arm/boot/dts/rk3288.dtsi
++++ b/arch/arm/boot/dts/rk3288.dtsi
+@@ -584,7 +584,7 @@
+ 		compatible = "rockchip,rk3288-wdt", "snps,dw-wdt";
+ 		reg = <0xff800000 0x100>;
+ 		clocks = <&cru PCLK_WDT>;
+-		interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
++		interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ 		status = "disabled";
+ 	};
+ 
+diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
+index 57d5df0c1fbd..7581e036bda6 100644
+--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
++++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
+@@ -331,7 +331,7 @@ static struct clockdomain l4per2_7xx_clkdm = {
+ 	.dep_bit	  = DRA7XX_L4PER2_STATDEP_SHIFT,
+ 	.wkdep_srcs	  = l4per2_wkup_sleep_deps,
+ 	.sleepdep_srcs	  = l4per2_wkup_sleep_deps,
+-	.flags		  = CLKDM_CAN_HWSUP_SWSUP,
++	.flags		  = CLKDM_CAN_SWSUP,
+ };
+ 
+ static struct clockdomain mpu0_7xx_clkdm = {
+diff --git a/arch/arm/mach-orion5x/include/mach/irqs.h b/arch/arm/mach-orion5x/include/mach/irqs.h
+index a6fa9d8f12d8..2431d9923427 100644
+--- a/arch/arm/mach-orion5x/include/mach/irqs.h
++++ b/arch/arm/mach-orion5x/include/mach/irqs.h
+@@ -16,42 +16,42 @@
+ /*
+  * Orion Main Interrupt Controller
+  */
+-#define IRQ_ORION5X_BRIDGE		0
+-#define IRQ_ORION5X_DOORBELL_H2C	1
+-#define IRQ_ORION5X_DOORBELL_C2H	2
+-#define IRQ_ORION5X_UART0		3
+-#define IRQ_ORION5X_UART1		4
+-#define IRQ_ORION5X_I2C			5
+-#define IRQ_ORION5X_GPIO_0_7		6
+-#define IRQ_ORION5X_GPIO_8_15		7
+-#define IRQ_ORION5X_GPIO_16_23		8
+-#define IRQ_ORION5X_GPIO_24_31		9
+-#define IRQ_ORION5X_PCIE0_ERR		10
+-#define IRQ_ORION5X_PCIE0_INT		11
+-#define IRQ_ORION5X_USB1_CTRL		12
+-#define IRQ_ORION5X_DEV_BUS_ERR		14
+-#define IRQ_ORION5X_PCI_ERR		15
+-#define IRQ_ORION5X_USB_BR_ERR		16
+-#define IRQ_ORION5X_USB0_CTRL		17
+-#define IRQ_ORION5X_ETH_RX		18
+-#define IRQ_ORION5X_ETH_TX		19
+-#define IRQ_ORION5X_ETH_MISC		20
+-#define IRQ_ORION5X_ETH_SUM		21
+-#define IRQ_ORION5X_ETH_ERR		22
+-#define IRQ_ORION5X_IDMA_ERR		23
+-#define IRQ_ORION5X_IDMA_0		24
+-#define IRQ_ORION5X_IDMA_1		25
+-#define IRQ_ORION5X_IDMA_2		26
+-#define IRQ_ORION5X_IDMA_3		27
+-#define IRQ_ORION5X_CESA		28
+-#define IRQ_ORION5X_SATA		29
+-#define IRQ_ORION5X_XOR0		30
+-#define IRQ_ORION5X_XOR1		31
++#define IRQ_ORION5X_BRIDGE		(1 + 0)
++#define IRQ_ORION5X_DOORBELL_H2C	(1 + 1)
++#define IRQ_ORION5X_DOORBELL_C2H	(1 + 2)
++#define IRQ_ORION5X_UART0		(1 + 3)
++#define IRQ_ORION5X_UART1		(1 + 4)
++#define IRQ_ORION5X_I2C			(1 + 5)
++#define IRQ_ORION5X_GPIO_0_7		(1 + 6)
++#define IRQ_ORION5X_GPIO_8_15		(1 + 7)
++#define IRQ_ORION5X_GPIO_16_23		(1 + 8)
++#define IRQ_ORION5X_GPIO_24_31		(1 + 9)
++#define IRQ_ORION5X_PCIE0_ERR		(1 + 10)
++#define IRQ_ORION5X_PCIE0_INT		(1 + 11)
++#define IRQ_ORION5X_USB1_CTRL		(1 + 12)
++#define IRQ_ORION5X_DEV_BUS_ERR		(1 + 14)
++#define IRQ_ORION5X_PCI_ERR		(1 + 15)
++#define IRQ_ORION5X_USB_BR_ERR		(1 + 16)
++#define IRQ_ORION5X_USB0_CTRL		(1 + 17)
++#define IRQ_ORION5X_ETH_RX		(1 + 18)
++#define IRQ_ORION5X_ETH_TX		(1 + 19)
++#define IRQ_ORION5X_ETH_MISC		(1 + 20)
++#define IRQ_ORION5X_ETH_SUM		(1 + 21)
++#define IRQ_ORION5X_ETH_ERR		(1 + 22)
++#define IRQ_ORION5X_IDMA_ERR		(1 + 23)
++#define IRQ_ORION5X_IDMA_0		(1 + 24)
++#define IRQ_ORION5X_IDMA_1		(1 + 25)
++#define IRQ_ORION5X_IDMA_2		(1 + 26)
++#define IRQ_ORION5X_IDMA_3		(1 + 27)
++#define IRQ_ORION5X_CESA		(1 + 28)
++#define IRQ_ORION5X_SATA		(1 + 29)
++#define IRQ_ORION5X_XOR0		(1 + 30)
++#define IRQ_ORION5X_XOR1		(1 + 31)
+ 
+ /*
+  * Orion General Purpose Pins
+  */
+-#define IRQ_ORION5X_GPIO_START	32
++#define IRQ_ORION5X_GPIO_START	33
+ #define NR_GPIO_IRQS		32
+ 
+ #define NR_IRQS			(IRQ_ORION5X_GPIO_START + NR_GPIO_IRQS)
+diff --git a/arch/arm/mach-orion5x/irq.c b/arch/arm/mach-orion5x/irq.c
+index cd4bac4d7e43..086ecb87d885 100644
+--- a/arch/arm/mach-orion5x/irq.c
++++ b/arch/arm/mach-orion5x/irq.c
+@@ -42,7 +42,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs)
+ 	stat = readl_relaxed(MAIN_IRQ_CAUSE);
+ 	stat &= readl_relaxed(MAIN_IRQ_MASK);
+ 	if (stat) {
+-		unsigned int hwirq = __fls(stat);
++		unsigned int hwirq = 1 + __fls(stat);
+ 		handle_IRQ(hwirq, regs);
+ 		return;
+ 	}
+@@ -51,7 +51,7 @@ __exception_irq_entry orion5x_legacy_handle_irq(struct pt_regs *regs)
+ 
+ void __init orion5x_init_irq(void)
+ {
+-	orion_irq_init(0, MAIN_IRQ_MASK);
++	orion_irq_init(1, MAIN_IRQ_MASK);
+ 
+ #ifdef CONFIG_MULTI_IRQ_HANDLER
+ 	set_handle_irq(orion5x_legacy_handle_irq);
+diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
+index 2e6ab67e2284..611a5f96d3ca 100644
+--- a/arch/arm/mach-rockchip/platsmp.c
++++ b/arch/arm/mach-rockchip/platsmp.c
+@@ -72,29 +72,22 @@ static struct reset_control *rockchip_get_core_reset(int cpu)
+ static int pmu_set_power_domain(int pd, bool on)
+ {
+ 	u32 val = (on) ? 0 : BIT(pd);
++	struct reset_control *rstc = rockchip_get_core_reset(pd);
+ 	int ret;
+ 
++	if (IS_ERR(rstc) && read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
++		pr_err("%s: could not get reset control for core %d\n",
++		       __func__, pd);
++		return PTR_ERR(rstc);
++	}
++
+ 	/*
+ 	 * We need to soft reset the cpu when we turn off the cpu power domain,
+ 	 * or else the active processors might be stalled when the individual
+ 	 * processor is powered down.
+ 	 */
+-	if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9) {
+-		struct reset_control *rstc = rockchip_get_core_reset(pd);
+-
+-		if (IS_ERR(rstc)) {
+-			pr_err("%s: could not get reset control for core %d\n",
+-			       __func__, pd);
+-			return PTR_ERR(rstc);
+-		}
+-
+-		if (on)
+-			reset_control_deassert(rstc);
+-		else
+-			reset_control_assert(rstc);
+-
+-		reset_control_put(rstc);
+-	}
++	if (!IS_ERR(rstc) && !on)
++		reset_control_assert(rstc);
+ 
+ 	ret = regmap_update_bits(pmu, PMU_PWRDN_CON, BIT(pd), val);
+ 	if (ret < 0) {
+@@ -112,6 +105,12 @@ static int pmu_set_power_domain(int pd, bool on)
+ 		}
+ 	}
+ 
++	if (!IS_ERR(rstc)) {
++		if (on)
++			reset_control_deassert(rstc);
++		reset_control_put(rstc);
++	}
++
+ 	return 0;
+ }
+ 
+@@ -147,8 +146,12 @@ static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
+ 		 * the mailbox:
+ 		 * sram_base_addr + 4: 0xdeadbeaf
+ 		 * sram_base_addr + 8: start address for pc
++		 * The cpu0 need to wait the other cpus other than cpu0 entering
++		 * the wfe state.The wait time is affected by many aspects.
++		 * (e.g: cpu frequency, bootrom frequency, sram frequency, ...)
+ 		 * */
+-		udelay(10);
++		mdelay(1); /* ensure the cpus other than cpu0 to startup */
++
+ 		writel(virt_to_phys(secondary_startup), sram_base_addr + 8);
+ 		writel(0xDEADBEAF, sram_base_addr + 4);
+ 		dsb_sev();
+diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+index b027a89737b6..c6d601cc9764 100644
+--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
++++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+@@ -421,14 +421,20 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
+ 	rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+ 	v = pte & ~HPTE_V_HVLOCK;
+ 	if (v & HPTE_V_VALID) {
+-		u64 pte1;
+-
+-		pte1 = be64_to_cpu(hpte[1]);
+ 		hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
+-		rb = compute_tlbie_rb(v, pte1, pte_index);
++		rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
+ 		do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
+-		/* Read PTE low word after tlbie to get final R/C values */
+-		remove_revmap_chain(kvm, pte_index, rev, v, pte1);
++		/*
++		 * The reference (R) and change (C) bits in a HPT
++		 * entry can be set by hardware at any time up until
++		 * the HPTE is invalidated and the TLB invalidation
++		 * sequence has completed.  This means that when
++		 * removing a HPTE, we need to re-read the HPTE after
++		 * the invalidation sequence has completed in order to
++		 * obtain reliable values of R and C.
++		 */
++		remove_revmap_chain(kvm, pte_index, rev, v,
++				    be64_to_cpu(hpte[1]));
+ 	}
+ 	r = rev->guest_rpte & ~HPTE_GR_RESERVED;
+ 	note_hpte_modification(kvm, rev);
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 4d70df26c402..3b2d2c5b6376 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -1127,6 +1127,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ 	cmpwi	r12, BOOK3S_INTERRUPT_H_DOORBELL
+ 	bne	3f
+ 	lbz	r0, HSTATE_HOST_IPI(r13)
++	cmpwi	r0, 0
+ 	beq	4f
+ 	b	guest_exit_cont
+ 3:
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 7262fe438c99..1942f22e6694 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -683,7 +683,7 @@ static void __init setup_memory(void)
+ /*
+  * Setup hardware capabilities.
+  */
+-static void __init setup_hwcaps(void)
++static int __init setup_hwcaps(void)
+ {
+ 	static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
+ 	struct cpuid cpu_id;
+@@ -749,9 +749,11 @@ static void __init setup_hwcaps(void)
+ 		elf_hwcap |= HWCAP_S390_TE;
+ 
+ 	/*
+-	 * Vector extension HWCAP_S390_VXRS is bit 11.
++	 * Vector extension HWCAP_S390_VXRS is bit 11. The Vector extension
++	 * can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
++	 * instead of facility bit 129.
+ 	 */
+-	if (test_facility(129))
++	if (MACHINE_HAS_VX)
+ 		elf_hwcap |= HWCAP_S390_VXRS;
+ 	get_cpu_id(&cpu_id);
+ 	add_device_randomness(&cpu_id, sizeof(cpu_id));
+@@ -788,7 +790,9 @@ static void __init setup_hwcaps(void)
+ 		strcpy(elf_platform, "z13");
+ 		break;
+ 	}
++	return 0;
+ }
++arch_initcall(setup_hwcaps);
+ 
+ /*
+  * Add system information as device randomness
+@@ -871,11 +875,6 @@ void __init setup_arch(char **cmdline_p)
+         cpu_init();
+ 
+ 	/*
+-	 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
+-	 */
+-	setup_hwcaps();
+-
+-	/*
+ 	 * Create kernel page tables and switch to virtual addressing.
+ 	 */
+         paging_init();
+diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+index 2079baf06bdd..daf8d2b9a217 100644
+--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
++++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
+@@ -294,6 +294,7 @@ static struct ahash_alg ghash_async_alg = {
+ 			.cra_name		= "ghash",
+ 			.cra_driver_name	= "ghash-clmulni",
+ 			.cra_priority		= 400,
++			.cra_ctxsize		= sizeof(struct ghash_async_ctx),
+ 			.cra_flags		= CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ 			.cra_blocksize		= GHASH_BLOCK_SIZE,
+ 			.cra_type		= &crypto_ahash_type,
+diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
+index a0bf89fd2647..4e10d73cf018 100644
+--- a/arch/x86/include/asm/desc.h
++++ b/arch/x86/include/asm/desc.h
+@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
+ 	set_ldt(NULL, 0);
+ }
+ 
+-/*
+- * load one particular LDT into the current CPU
+- */
+-static inline void load_LDT_nolock(mm_context_t *pc)
+-{
+-	set_ldt(pc->ldt, pc->size);
+-}
+-
+-static inline void load_LDT(mm_context_t *pc)
+-{
+-	preempt_disable();
+-	load_LDT_nolock(pc);
+-	preempt_enable();
+-}
+-
+ static inline unsigned long get_desc_base(const struct desc_struct *desc)
+ {
+ 	return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
+diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
+index 09b9620a73b4..364d27481a52 100644
+--- a/arch/x86/include/asm/mmu.h
++++ b/arch/x86/include/asm/mmu.h
+@@ -9,8 +9,7 @@
+  * we put the segment information here.
+  */
+ typedef struct {
+-	void *ldt;
+-	int size;
++	struct ldt_struct *ldt;
+ 
+ #ifdef CONFIG_X86_64
+ 	/* True if mm supports a task running in 32 bit compatibility mode. */
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index e997f70f80c4..80d67dd80351 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
+ #endif
+ 
+ /*
++ * ldt_structs can be allocated, used, and freed, but they are never
++ * modified while live.
++ */
++struct ldt_struct {
++	/*
++	 * Xen requires page-aligned LDTs with special permissions.  This is
++	 * needed to prevent us from installing evil descriptors such as
++	 * call gates.  On native, we could merge the ldt_struct and LDT
++	 * allocations, but it's not worth trying to optimize.
++	 */
++	struct desc_struct *entries;
++	int size;
++};
++
++static inline void load_mm_ldt(struct mm_struct *mm)
++{
++	struct ldt_struct *ldt;
++
++	/* lockless_dereference synchronizes with smp_store_release */
++	ldt = lockless_dereference(mm->context.ldt);
++
++	/*
++	 * Any change to mm->context.ldt is followed by an IPI to all
++	 * CPUs with the mm active.  The LDT will not be freed until
++	 * after the IPI is handled by all such CPUs.  This means that,
++	 * if the ldt_struct changes before we return, the values we see
++	 * will be safe, and the new values will be loaded before we run
++	 * any user code.
++	 *
++	 * NB: don't try to convert this to use RCU without extreme care.
++	 * We would still need IRQs off, because we don't want to change
++	 * the local LDT after an IPI loaded a newer value than the one
++	 * that we can see.
++	 */
++
++	if (unlikely(ldt))
++		set_ldt(ldt->entries, ldt->size);
++	else
++		clear_LDT();
++
++	DEBUG_LOCKS_WARN_ON(preemptible());
++}
++
++/*
+  * Used for LDT copy/destruction.
+  */
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 		 * was called and then modify_ldt changed
+ 		 * prev->context.ldt but suppressed an IPI to this CPU.
+ 		 * In this case, prev->context.ldt != NULL, because we
+-		 * never free an LDT while the mm still exists.  That
+-		 * means that next->context.ldt != prev->context.ldt,
+-		 * because mms never share an LDT.
++		 * never set context.ldt to NULL while the mm still
++		 * exists.  That means that next->context.ldt !=
++		 * prev->context.ldt, because mms never share an LDT.
+ 		 */
+ 		if (unlikely(prev->context.ldt != next->context.ldt))
+-			load_LDT_nolock(&next->context);
++			load_mm_ldt(next);
+ 	}
+ #ifdef CONFIG_SMP
+ 	  else {
+@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ 			load_cr3(next->pgd);
+ 			trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
+ 			load_mm_cr4(next);
+-			load_LDT_nolock(&next->context);
++			load_mm_ldt(next);
+ 		}
+ 	}
+ #endif
+diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
+index dbe76a14c3c9..07bea80223f6 100644
+--- a/arch/x86/kernel/acpi/boot.c
++++ b/arch/x86/kernel/acpi/boot.c
+@@ -489,6 +489,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger,
+ 		polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
+ 
+ 	mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
++	acpi_penalize_sci_irq(bus_irq, trigger, polarity);
+ 
+ 	/*
+ 	 * stash over-ride to indicate we've been here
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index a62cf04dac8a..205e0f3df501 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -1434,7 +1434,7 @@ void cpu_init(void)
+ 	load_sp0(t, &current->thread);
+ 	set_tss_desc(cpu, t);
+ 	load_TR_desc();
+-	load_LDT(&init_mm.context);
++	load_mm_ldt(&init_mm);
+ 
+ 	clear_all_debug_regs();
+ 	dbg_restore_debug_regs();
+@@ -1483,7 +1483,7 @@ void cpu_init(void)
+ 	load_sp0(t, thread);
+ 	set_tss_desc(cpu, t);
+ 	load_TR_desc();
+-	load_LDT(&init_mm.context);
++	load_mm_ldt(&init_mm);
+ 
+ 	t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+ 
+diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
+index b4a41cf030ed..e166d833cf63 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
++++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
+@@ -116,6 +116,27 @@ void mce_intel_hcpu_update(unsigned long cpu)
+ 	per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
+ }
+ 
++static void cmci_toggle_interrupt_mode(bool on)
++{
++	unsigned long flags, *owned;
++	int bank;
++	u64 val;
++
++	raw_spin_lock_irqsave(&cmci_discover_lock, flags);
++	owned = this_cpu_ptr(mce_banks_owned);
++	for_each_set_bit(bank, owned, MAX_NR_BANKS) {
++		rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
++
++		if (on)
++			val |= MCI_CTL2_CMCI_EN;
++		else
++			val &= ~MCI_CTL2_CMCI_EN;
++
++		wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
++	}
++	raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
++}
++
+ unsigned long cmci_intel_adjust_timer(unsigned long interval)
+ {
+ 	if ((this_cpu_read(cmci_backoff_cnt) > 0) &&
+@@ -145,7 +166,7 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
+ 		 */
+ 		if (!atomic_read(&cmci_storm_on_cpus)) {
+ 			__this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
+-			cmci_reenable();
++			cmci_toggle_interrupt_mode(true);
+ 			cmci_recheck();
+ 		}
+ 		return CMCI_POLL_INTERVAL;
+@@ -156,22 +177,6 @@ unsigned long cmci_intel_adjust_timer(unsigned long interval)
+ 	}
+ }
+ 
+-static void cmci_storm_disable_banks(void)
+-{
+-	unsigned long flags, *owned;
+-	int bank;
+-	u64 val;
+-
+-	raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+-	owned = this_cpu_ptr(mce_banks_owned);
+-	for_each_set_bit(bank, owned, MAX_NR_BANKS) {
+-		rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+-		val &= ~MCI_CTL2_CMCI_EN;
+-		wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+-	}
+-	raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+-}
+-
+ static bool cmci_storm_detect(void)
+ {
+ 	unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
+@@ -193,7 +198,7 @@ static bool cmci_storm_detect(void)
+ 	if (cnt <= CMCI_STORM_THRESHOLD)
+ 		return false;
+ 
+-	cmci_storm_disable_banks();
++	cmci_toggle_interrupt_mode(false);
+ 	__this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
+ 	r = atomic_add_return(1, &cmci_storm_on_cpus);
+ 	mce_timer_kick(CMCI_STORM_INTERVAL);
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index aa4e3a74e541..4cc98a4e8ea9 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -2170,21 +2170,25 @@ static unsigned long get_segment_base(unsigned int segment)
+ 	int idx = segment >> 3;
+ 
+ 	if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
++		struct ldt_struct *ldt;
++
+ 		if (idx > LDT_ENTRIES)
+ 			return 0;
+ 
+-		if (idx > current->active_mm->context.size)
++		/* IRQs are off, so this synchronizes with smp_store_release */
++		ldt = lockless_dereference(current->active_mm->context.ldt);
++		if (!ldt || idx > ldt->size)
+ 			return 0;
+ 
+-		desc = current->active_mm->context.ldt;
++		desc = &ldt->entries[idx];
+ 	} else {
+ 		if (idx > GDT_ENTRIES)
+ 			return 0;
+ 
+-		desc = raw_cpu_ptr(gdt_page.gdt);
++		desc = raw_cpu_ptr(gdt_page.gdt) + idx;
+ 	}
+ 
+-	return get_desc_base(desc + idx);
++	return get_desc_base(desc);
+ }
+ 
+ #ifdef CONFIG_COMPAT
+diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
+index c37886d759cc..2bcc0525f1c1 100644
+--- a/arch/x86/kernel/ldt.c
++++ b/arch/x86/kernel/ldt.c
+@@ -12,6 +12,7 @@
+ #include <linux/string.h>
+ #include <linux/mm.h>
+ #include <linux/smp.h>
++#include <linux/slab.h>
+ #include <linux/vmalloc.h>
+ #include <linux/uaccess.h>
+ 
+@@ -20,82 +21,82 @@
+ #include <asm/mmu_context.h>
+ #include <asm/syscalls.h>
+ 
+-#ifdef CONFIG_SMP
++/* context.lock is held for us, so we don't need any locking. */
+ static void flush_ldt(void *current_mm)
+ {
+-	if (current->active_mm == current_mm)
+-		load_LDT(&current->active_mm->context);
++	mm_context_t *pc;
++
++	if (current->active_mm != current_mm)
++		return;
++
++	pc = &current->active_mm->context;
++	set_ldt(pc->ldt->entries, pc->ldt->size);
+ }
+-#endif
+ 
+-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
++static struct ldt_struct *alloc_ldt_struct(int size)
+ {
+-	void *oldldt, *newldt;
+-	int oldsize;
+-
+-	if (mincount <= pc->size)
+-		return 0;
+-	oldsize = pc->size;
+-	mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
+-			(~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
+-	if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
+-		newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
++	struct ldt_struct *new_ldt;
++	int alloc_size;
++
++	if (size > LDT_ENTRIES)
++		return NULL;
++
++	new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
++	if (!new_ldt)
++		return NULL;
++
++	BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
++	alloc_size = size * LDT_ENTRY_SIZE;
++
++	/*
++	 * Xen is very picky: it requires a page-aligned LDT that has no
++	 * trailing nonzero bytes in any page that contains LDT descriptors.
++	 * Keep it simple: zero the whole allocation and never allocate less
++	 * than PAGE_SIZE.
++	 */
++	if (alloc_size > PAGE_SIZE)
++		new_ldt->entries = vzalloc(alloc_size);
+ 	else
+-		newldt = (void *)__get_free_page(GFP_KERNEL);
+-
+-	if (!newldt)
+-		return -ENOMEM;
++		new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ 
+-	if (oldsize)
+-		memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
+-	oldldt = pc->ldt;
+-	memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
+-	       (mincount - oldsize) * LDT_ENTRY_SIZE);
++	if (!new_ldt->entries) {
++		kfree(new_ldt);
++		return NULL;
++	}
+ 
+-	paravirt_alloc_ldt(newldt, mincount);
++	new_ldt->size = size;
++	return new_ldt;
++}
+ 
+-#ifdef CONFIG_X86_64
+-	/* CHECKME: Do we really need this ? */
+-	wmb();
+-#endif
+-	pc->ldt = newldt;
+-	wmb();
+-	pc->size = mincount;
+-	wmb();
+-
+-	if (reload) {
+-#ifdef CONFIG_SMP
+-		preempt_disable();
+-		load_LDT(pc);
+-		if (!cpumask_equal(mm_cpumask(current->mm),
+-				   cpumask_of(smp_processor_id())))
+-			smp_call_function(flush_ldt, current->mm, 1);
+-		preempt_enable();
+-#else
+-		load_LDT(pc);
+-#endif
+-	}
+-	if (oldsize) {
+-		paravirt_free_ldt(oldldt, oldsize);
+-		if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
+-			vfree(oldldt);
+-		else
+-			put_page(virt_to_page(oldldt));
+-	}
+-	return 0;
++/* After calling this, the LDT is immutable. */
++static void finalize_ldt_struct(struct ldt_struct *ldt)
++{
++	paravirt_alloc_ldt(ldt->entries, ldt->size);
+ }
+ 
+-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++/* context.lock is held */
++static void install_ldt(struct mm_struct *current_mm,
++			struct ldt_struct *ldt)
+ {
+-	int err = alloc_ldt(new, old->size, 0);
+-	int i;
++	/* Synchronizes with lockless_dereference in load_mm_ldt. */
++	smp_store_release(&current_mm->context.ldt, ldt);
++
++	/* Activate the LDT for all CPUs using current_mm. */
++	on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
++}
+ 
+-	if (err < 0)
+-		return err;
++static void free_ldt_struct(struct ldt_struct *ldt)
++{
++	if (likely(!ldt))
++		return;
+ 
+-	for (i = 0; i < old->size; i++)
+-		write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
+-	return 0;
++	paravirt_free_ldt(ldt->entries, ldt->size);
++	if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
++		vfree(ldt->entries);
++	else
++		kfree(ldt->entries);
++	kfree(ldt);
+ }
+ 
+ /*
+@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+  */
+ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+ {
++	struct ldt_struct *new_ldt;
+ 	struct mm_struct *old_mm;
+ 	int retval = 0;
+ 
+ 	mutex_init(&mm->context.lock);
+-	mm->context.size = 0;
+ 	old_mm = current->mm;
+-	if (old_mm && old_mm->context.size > 0) {
+-		mutex_lock(&old_mm->context.lock);
+-		retval = copy_ldt(&mm->context, &old_mm->context);
+-		mutex_unlock(&old_mm->context.lock);
++	if (!old_mm) {
++		mm->context.ldt = NULL;
++		return 0;
+ 	}
++
++	mutex_lock(&old_mm->context.lock);
++	if (!old_mm->context.ldt) {
++		mm->context.ldt = NULL;
++		goto out_unlock;
++	}
++
++	new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
++	if (!new_ldt) {
++		retval = -ENOMEM;
++		goto out_unlock;
++	}
++
++	memcpy(new_ldt->entries, old_mm->context.ldt->entries,
++	       new_ldt->size * LDT_ENTRY_SIZE);
++	finalize_ldt_struct(new_ldt);
++
++	mm->context.ldt = new_ldt;
++
++out_unlock:
++	mutex_unlock(&old_mm->context.lock);
+ 	return retval;
+ }
+ 
+@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+  */
+ void destroy_context(struct mm_struct *mm)
+ {
+-	if (mm->context.size) {
+-#ifdef CONFIG_X86_32
+-		/* CHECKME: Can this ever happen ? */
+-		if (mm == current->active_mm)
+-			clear_LDT();
+-#endif
+-		paravirt_free_ldt(mm->context.ldt, mm->context.size);
+-		if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
+-			vfree(mm->context.ldt);
+-		else
+-			put_page(virt_to_page(mm->context.ldt));
+-		mm->context.size = 0;
+-	}
++	free_ldt_struct(mm->context.ldt);
++	mm->context.ldt = NULL;
+ }
+ 
+ static int read_ldt(void __user *ptr, unsigned long bytecount)
+ {
+-	int err;
++	int retval;
+ 	unsigned long size;
+ 	struct mm_struct *mm = current->mm;
+ 
+-	if (!mm->context.size)
+-		return 0;
++	mutex_lock(&mm->context.lock);
++
++	if (!mm->context.ldt) {
++		retval = 0;
++		goto out_unlock;
++	}
++
+ 	if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
+ 		bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
+ 
+-	mutex_lock(&mm->context.lock);
+-	size = mm->context.size * LDT_ENTRY_SIZE;
++	size = mm->context.ldt->size * LDT_ENTRY_SIZE;
+ 	if (size > bytecount)
+ 		size = bytecount;
+ 
+-	err = 0;
+-	if (copy_to_user(ptr, mm->context.ldt, size))
+-		err = -EFAULT;
+-	mutex_unlock(&mm->context.lock);
+-	if (err < 0)
+-		goto error_return;
++	if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
++		retval = -EFAULT;
++		goto out_unlock;
++	}
++
+ 	if (size != bytecount) {
+-		/* zero-fill the rest */
+-		if (clear_user(ptr + size, bytecount - size) != 0) {
+-			err = -EFAULT;
+-			goto error_return;
++		/* Zero-fill the rest and pretend we read bytecount bytes. */
++		if (clear_user(ptr + size, bytecount - size)) {
++			retval = -EFAULT;
++			goto out_unlock;
+ 		}
+ 	}
+-	return bytecount;
+-error_return:
+-	return err;
++	retval = bytecount;
++
++out_unlock:
++	mutex_unlock(&mm->context.lock);
++	return retval;
+ }
+ 
+ static int read_default_ldt(void __user *ptr, unsigned long bytecount)
+@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ 	struct desc_struct ldt;
+ 	int error;
+ 	struct user_desc ldt_info;
++	int oldsize, newsize;
++	struct ldt_struct *new_ldt, *old_ldt;
+ 
+ 	error = -EINVAL;
+ 	if (bytecount != sizeof(ldt_info))
+@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
+ 			goto out;
+ 	}
+ 
+-	mutex_lock(&mm->context.lock);
+-	if (ldt_info.entry_number >= mm->context.size) {
+-		error = alloc_ldt(&current->mm->context,
+-				  ldt_info.entry_number + 1, 1);
+-		if (error < 0)
+-			goto out_unlock;
+-	}
+-
+-	/* Allow LDTs to be cleared by the user. */
+-	if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
+-		if (oldmode || LDT_empty(&ldt_info)) {
+-			memset(&ldt, 0, sizeof(ldt));
+-			goto install;
++	if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
++	    LDT_empty(&ldt_info)) {
++		/* The user wants to clear the entry. */
++		memset(&ldt, 0, sizeof(ldt));
++	} else {
++		if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
++			error = -EINVAL;
++			goto out;
+ 		}
++
++		fill_ldt(&ldt, &ldt_info);
++		if (oldmode)
++			ldt.avl = 0;
+ 	}
+ 
+-	if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+-		error = -EINVAL;
++	mutex_lock(&mm->context.lock);
++
++	old_ldt = mm->context.ldt;
++	oldsize = old_ldt ? old_ldt->size : 0;
++	newsize = max((int)(ldt_info.entry_number + 1), oldsize);
++
++	error = -ENOMEM;
++	new_ldt = alloc_ldt_struct(newsize);
++	if (!new_ldt)
+ 		goto out_unlock;
+-	}
+ 
+-	fill_ldt(&ldt, &ldt_info);
+-	if (oldmode)
+-		ldt.avl = 0;
++	if (old_ldt)
++		memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
++	new_ldt->entries[ldt_info.entry_number] = ldt;
++	finalize_ldt_struct(new_ldt);
+ 
+-	/* Install the new entry ...  */
+-install:
+-	write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
++	install_ldt(mm, new_ldt);
++	free_ldt_struct(old_ldt);
+ 	error = 0;
+ 
+ out_unlock:
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index ddfdbf74f174..5e0bf57d9944 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -122,11 +122,11 @@ void __show_regs(struct pt_regs *regs, int all)
+ void release_thread(struct task_struct *dead_task)
+ {
+ 	if (dead_task->mm) {
+-		if (dead_task->mm->context.size) {
++		if (dead_task->mm->context.ldt) {
+ 			pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
+ 				dead_task->comm,
+ 				dead_task->mm->context.ldt,
+-				dead_task->mm->context.size);
++				dead_task->mm->context.ldt->size);
+ 			BUG();
+ 		}
+ 	}
+diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
+index 9b4d51d0c0d0..0ccb53a9fcd9 100644
+--- a/arch/x86/kernel/step.c
++++ b/arch/x86/kernel/step.c
+@@ -5,6 +5,7 @@
+ #include <linux/mm.h>
+ #include <linux/ptrace.h>
+ #include <asm/desc.h>
++#include <asm/mmu_context.h>
+ 
+ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
+ {
+@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
+ 		struct desc_struct *desc;
+ 		unsigned long base;
+ 
+-		seg &= ~7UL;
++		seg >>= 3;
+ 
+ 		mutex_lock(&child->mm->context.lock);
+-		if (unlikely((seg >> 3) >= child->mm->context.size))
++		if (unlikely(!child->mm->context.ldt ||
++			     seg >= child->mm->context.ldt->size))
+ 			addr = -1L; /* bogus selector, access would fault */
+ 		else {
+-			desc = child->mm->context.ldt + seg;
++			desc = &child->mm->context.ldt->entries[seg];
+ 			base = get_desc_base(desc);
+ 
+ 			/* 16-bit code segment? */
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index b73337634214..554e877e0bc4 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -357,12 +357,6 @@ static u64 __get_spte_lockless(u64 *sptep)
+ {
+ 	return ACCESS_ONCE(*sptep);
+ }
+-
+-static bool __check_direct_spte_mmio_pf(u64 spte)
+-{
+-	/* It is valid if the spte is zapped. */
+-	return spte == 0ull;
+-}
+ #else
+ union split_spte {
+ 	struct {
+@@ -478,23 +472,6 @@ retry:
+ 
+ 	return spte.spte;
+ }
+-
+-static bool __check_direct_spte_mmio_pf(u64 spte)
+-{
+-	union split_spte sspte = (union split_spte)spte;
+-	u32 high_mmio_mask = shadow_mmio_mask >> 32;
+-
+-	/* It is valid if the spte is zapped. */
+-	if (spte == 0ull)
+-		return true;
+-
+-	/* It is valid if the spte is being zapped. */
+-	if (sspte.spte_low == 0ull &&
+-	    (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
+-		return true;
+-
+-	return false;
+-}
+ #endif
+ 
+ static bool spte_is_locklessly_modifiable(u64 spte)
+@@ -3343,21 +3320,6 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+ 	return vcpu_match_mmio_gva(vcpu, addr);
+ }
+ 
+-
+-/*
+- * On direct hosts, the last spte is only allows two states
+- * for mmio page fault:
+- *   - It is the mmio spte
+- *   - It is zapped or it is being zapped.
+- *
+- * This function completely checks the spte when the last spte
+- * is not the mmio spte.
+- */
+-static bool check_direct_spte_mmio_pf(u64 spte)
+-{
+-	return __check_direct_spte_mmio_pf(spte);
+-}
+-
+ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
+ {
+ 	struct kvm_shadow_walk_iterator iterator;
+@@ -3400,13 +3362,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+ 	}
+ 
+ 	/*
+-	 * It's ok if the gva is remapped by other cpus on shadow guest,
+-	 * it's a BUG if the gfn is not a mmio page.
+-	 */
+-	if (direct && !check_direct_spte_mmio_pf(spte))
+-		return RET_MMIO_PF_BUG;
+-
+-	/*
+ 	 * If the page table is zapped by other cpus, let CPU fault again on
+ 	 * the address.
+ 	 */
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index ea306adbbc13..47a32f743a91 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2192,7 +2192,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ 		if (guest_cpuid_has_tsc_adjust(vcpu)) {
+ 			if (!msr_info->host_initiated) {
+ 				s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
+-				kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
++				adjust_tsc_offset_guest(vcpu, adj);
+ 			}
+ 			vcpu->arch.ia32_tsc_adjust_msr = data;
+ 		}
+diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
+index 9b868124128d..274a52b1183e 100644
+--- a/arch/x86/math-emu/fpu_entry.c
++++ b/arch/x86/math-emu/fpu_entry.c
+@@ -29,7 +29,6 @@
+ 
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+-#include <asm/desc.h>
+ #include <asm/user.h>
+ #include <asm/i387.h>
+ 
+@@ -185,7 +184,7 @@ void math_emulate(struct math_emu_info *info)
+ 			math_abort(FPU_info, SIGILL);
+ 		}
+ 
+-		code_descriptor = LDT_DESCRIPTOR(FPU_CS);
++		code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
+ 		if (SEG_D_SIZE(code_descriptor)) {
+ 			/* The above test may be wrong, the book is not clear */
+ 			/* Segmented 32 bit protected mode */
+diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
+index 2c614410a5f3..d342fce49447 100644
+--- a/arch/x86/math-emu/fpu_system.h
++++ b/arch/x86/math-emu/fpu_system.h
+@@ -16,9 +16,24 @@
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ 
+-/* s is always from a cpu register, and the cpu does bounds checking
+- * during register load --> no further bounds checks needed */
+-#define LDT_DESCRIPTOR(s)	(((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
++#include <asm/desc.h>
++#include <asm/mmu_context.h>
++
++static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
++{
++	static struct desc_struct zero_desc;
++	struct desc_struct ret = zero_desc;
++
++#ifdef CONFIG_MODIFY_LDT_SYSCALL
++	seg >>= 3;
++	mutex_lock(&current->mm->context.lock);
++	if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
++		ret = current->mm->context.ldt->entries[seg];
++	mutex_unlock(&current->mm->context.lock);
++#endif
++	return ret;
++}
++
+ #define SEG_D_SIZE(x)		((x).b & (3 << 21))
+ #define SEG_G_BIT(x)		((x).b & (1 << 23))
+ #define SEG_GRANULARITY(x)	(((x).b & (1 << 23)) ? 4096 : 1)
+diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
+index 6ef5e99380f9..8300db71c2a6 100644
+--- a/arch/x86/math-emu/get_address.c
++++ b/arch/x86/math-emu/get_address.c
+@@ -20,7 +20,6 @@
+ #include <linux/stddef.h>
+ 
+ #include <asm/uaccess.h>
+-#include <asm/desc.h>
+ 
+ #include "fpu_system.h"
+ #include "exception.h"
+@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
+ 		addr->selector = PM_REG_(segment);
+ 	}
+ 
+-	descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
++	descriptor = FPU_get_ldt_descriptor(addr->selector);
+ 	base_address = SEG_BASE_ADDR(descriptor);
+ 	address = base_address + offset;
+ 	limit = base_address
+diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
+index 757678fb26e1..bf9384488399 100644
+--- a/arch/x86/power/cpu.c
++++ b/arch/x86/power/cpu.c
+@@ -23,6 +23,7 @@
+ #include <asm/debugreg.h>
+ #include <asm/fpu-internal.h> /* pcntxt_mask */
+ #include <asm/cpu.h>
++#include <asm/mmu_context.h>
+ 
+ #ifdef CONFIG_X86_32
+ __visible unsigned long saved_context_ebx;
+@@ -154,7 +155,7 @@ static void fix_processor_context(void)
+ 	syscall_init();				/* This sets MSR_*STAR and related */
+ #endif
+ 	load_TR_desc();				/* This does ltr */
+-	load_LDT(&current->active_mm->context);	/* This does lldt */
++	load_mm_ldt(current->active_mm);	/* This does lldt */
+ }
+ 
+ /**
+diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
+index 677bfcf4ee5d..28f33a8b7f5f 100644
+--- a/arch/xtensa/include/asm/traps.h
++++ b/arch/xtensa/include/asm/traps.h
+@@ -25,30 +25,39 @@ static inline void spill_registers(void)
+ {
+ #if XCHAL_NUM_AREGS > 16
+ 	__asm__ __volatile__ (
+-		"	call12	1f\n"
++		"	call8	1f\n"
+ 		"	_j	2f\n"
+ 		"	retw\n"
+ 		"	.align	4\n"
+ 		"1:\n"
++#if XCHAL_NUM_AREGS == 32
++		"	_entry	a1, 32\n"
++		"	addi	a8, a0, 3\n"
++		"	_entry	a1, 16\n"
++		"	mov	a12, a12\n"
++		"	retw\n"
++#else
+ 		"	_entry	a1, 48\n"
+-		"	addi	a12, a0, 3\n"
+-#if XCHAL_NUM_AREGS > 32
+-		"	.rept	(" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
++		"	call12	1f\n"
++		"	retw\n"
++		"	.align	4\n"
++		"1:\n"
++		"	.rept	(" __stringify(XCHAL_NUM_AREGS) " - 16) / 12\n"
+ 		"	_entry	a1, 48\n"
+ 		"	mov	a12, a0\n"
+ 		"	.endr\n"
+-#endif
+-		"	_entry	a1, 48\n"
++		"	_entry	a1, 16\n"
+ #if XCHAL_NUM_AREGS % 12 == 0
+-		"	mov	a8, a8\n"
+-#elif XCHAL_NUM_AREGS % 12 == 4
+ 		"	mov	a12, a12\n"
+-#elif XCHAL_NUM_AREGS % 12 == 8
++#elif XCHAL_NUM_AREGS % 12 == 4
+ 		"	mov	a4, a4\n"
++#elif XCHAL_NUM_AREGS % 12 == 8
++		"	mov	a8, a8\n"
+ #endif
+ 		"	retw\n"
++#endif
+ 		"2:\n"
+-		: : : "a12", "a13", "memory");
++		: : : "a8", "a9", "memory");
+ #else
+ 	__asm__ __volatile__ (
+ 		"	mov	a12, a12\n"
+diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
+index 82bbfa5a05b3..a2a902140c4e 100644
+--- a/arch/xtensa/kernel/entry.S
++++ b/arch/xtensa/kernel/entry.S
+@@ -568,12 +568,13 @@ user_exception_exit:
+ 	 *	 (if we have restored WSBITS-1 frames).
+ 	 */
+ 
++2:
+ #if XCHAL_HAVE_THREADPTR
+ 	l32i	a3, a1, PT_THREADPTR
+ 	wur	a3, threadptr
+ #endif
+ 
+-2:	j	common_exception_exit
++	j	common_exception_exit
+ 
+ 	/* This is the kernel exception exit.
+ 	 * We avoided to do a MOVSP when we entered the exception, but we
+@@ -1820,7 +1821,7 @@ ENDPROC(system_call)
+ 	mov	a12, a0
+ 	.endr
+ #endif
+-	_entry	a1, 48
++	_entry	a1, 16
+ #if XCHAL_NUM_AREGS % 12 == 0
+ 	mov	a8, a8
+ #elif XCHAL_NUM_AREGS % 12 == 4
+@@ -1844,7 +1845,7 @@ ENDPROC(system_call)
+ 
+ ENTRY(_switch_to)
+ 
+-	entry	a1, 16
++	entry	a1, 48
+ 
+ 	mov	a11, a3			# and 'next' (a3)
+ 
+diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
+index ff6d8adc9cda..fb765524cc3d 100644
+--- a/drivers/acpi/acpi_pnp.c
++++ b/drivers/acpi/acpi_pnp.c
+@@ -153,6 +153,7 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
+ 	{"AEI0250"},		/* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
+ 	{"AEI1240"},		/* Actiontec ISA PNP 56K X2 Fax Modem */
+ 	{"AKY1021"},		/* Rockwell 56K ACF II Fax+Data+Voice Modem */
++	{"ALI5123"},		/* ALi Fast Infrared Controller */
+ 	{"AZT4001"},		/* AZT3005 PnP SOUND DEVICE */
+ 	{"BDP3336"},		/* Best Data Products Inc. Smart One 336F PnP Modem */
+ 	{"BRI0A49"},		/* Boca Complete Ofc Communicator 14.4 Data-FAX */
+diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
+index cfd7581cc19f..b09ad554430a 100644
+--- a/drivers/acpi/pci_link.c
++++ b/drivers/acpi/pci_link.c
+@@ -826,6 +826,22 @@ void acpi_penalize_isa_irq(int irq, int active)
+ }
+ 
+ /*
++ * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
++ * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
++ * PCI IRQs.
++ */
++void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
++{
++	if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
++		if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
++		    polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
++			acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
++		else
++			acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
++	}
++}
++
++/*
+  * Over-ride default table to reserve additional IRQs for use by ISA
+  * e.g. acpi_irq_isa=5
+  * Useful for telling ACPI how not to interfere with your ISA sound card.
+diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
+index 65ee94454bbd..e6ea912aee31 100644
+--- a/drivers/ata/ahci.c
++++ b/drivers/ata/ahci.c
+@@ -349,6 +349,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
+ 	/* JMicron 362B and 362C have an AHCI function with IDE class code */
+ 	{ PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr },
+ 	{ PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr },
++	/* May need to update quirk_jmicron_async_suspend() for additions */
+ 
+ 	/* ATI */
+ 	{ PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
+@@ -1377,18 +1378,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ 	else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000)
+ 		ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS;
+ 
+-	/*
+-	 * The JMicron chip 361/363 contains one SATA controller and one
+-	 * PATA controller,for powering on these both controllers, we must
+-	 * follow the sequence one by one, otherwise one of them can not be
+-	 * powered on successfully, so here we disable the async suspend
+-	 * method for these chips.
+-	 */
+-	if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
+-		(pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
+-		pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
+-		device_disable_async_suspend(&pdev->dev);
+-
+ 	/* acquire resources */
+ 	rc = pcim_enable_device(pdev);
+ 	if (rc)
+diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
+index 47e418b8c8ba..4d1a5d2c4287 100644
+--- a/drivers/ata/pata_jmicron.c
++++ b/drivers/ata/pata_jmicron.c
+@@ -143,18 +143,6 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i
+ 	};
+ 	const struct ata_port_info *ppi[] = { &info, NULL };
+ 
+-	/*
+-	 * The JMicron chip 361/363 contains one SATA controller and one
+-	 * PATA controller,for powering on these both controllers, we must
+-	 * follow the sequence one by one, otherwise one of them can not be
+-	 * powered on successfully, so here we disable the async suspend
+-	 * method for these chips.
+-	 */
+-	if (pdev->vendor == PCI_VENDOR_ID_JMICRON &&
+-		(pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 ||
+-		pdev->device == PCI_DEVICE_ID_JMICRON_JMB361))
+-		device_disable_async_suspend(&pdev->dev);
+-
+ 	return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0);
+ }
+ 
+diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
+index 5b93852392b8..0d752851a1ee 100644
+--- a/drivers/auxdisplay/ks0108.c
++++ b/drivers/auxdisplay/ks0108.c
+@@ -139,6 +139,7 @@ static int __init ks0108_init(void)
+ 
+ 	ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
+ 		NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
++	parport_put_port(ks0108_parport);
+ 	if (ks0108_pardevice == NULL) {
+ 		printk(KERN_ERR KS0108_NAME ": ERROR: "
+ 			"parport didn't register new device\n");
+diff --git a/drivers/base/devres.c b/drivers/base/devres.c
+index c8a53d1e019f..875464690117 100644
+--- a/drivers/base/devres.c
++++ b/drivers/base/devres.c
+@@ -297,10 +297,10 @@ void * devres_get(struct device *dev, void *new_res,
+ 	if (!dr) {
+ 		add_dr(dev, &new_dr->node);
+ 		dr = new_dr;
+-		new_dr = NULL;
++		new_res = NULL;
+ 	}
+ 	spin_unlock_irqrestore(&dev->devres_lock, flags);
+-	devres_free(new_dr);
++	devres_free(new_res);
+ 
+ 	return dr->data;
+ }
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index ebf034b97278..7403de94832c 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -375,9 +375,7 @@ int platform_device_add(struct platform_device *pdev)
+ 
+ 	while (--i >= 0) {
+ 		struct resource *r = &pdev->resource[i];
+-		unsigned long type = resource_type(r);
+-
+-		if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
++		if (r->parent)
+ 			release_resource(r);
+ 	}
+ 
+@@ -408,9 +406,7 @@ void platform_device_del(struct platform_device *pdev)
+ 
+ 		for (i = 0; i < pdev->num_resources; i++) {
+ 			struct resource *r = &pdev->resource[i];
+-			unsigned long type = resource_type(r);
+-
+-			if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
++			if (r->parent)
+ 				release_resource(r);
+ 		}
+ 	}
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index c7b0fcebf168..ac3c07db92f1 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -37,7 +37,7 @@ struct pm_clock_entry {
+  * @dev: The device for the given clock
+  * @ce: PM clock entry corresponding to the clock.
+  */
+-static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
++static inline void __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
+ {
+ 	int ret;
+ 
+@@ -49,8 +49,6 @@ static inline int __pm_clk_enable(struct device *dev, struct pm_clock_entry *ce)
+ 			dev_err(dev, "%s: failed to enable clk %p, error %d\n",
+ 				__func__, ce->clk, ret);
+ 	}
+-
+-	return ret;
+ }
+ 
+ /**
+diff --git a/drivers/clk/pistachio/clk-pistachio.c b/drivers/clk/pistachio/clk-pistachio.c
+index 8c0fe8828f99..c4ceb5eaf46c 100644
+--- a/drivers/clk/pistachio/clk-pistachio.c
++++ b/drivers/clk/pistachio/clk-pistachio.c
+@@ -159,9 +159,15 @@ PNAME(mux_debug) = { "mips_pll_mux", "rpu_v_pll_mux",
+ 		     "wifi_pll_mux", "bt_pll_mux" };
+ static u32 mux_debug_idx[] = { 0x0, 0x1, 0x2, 0x4, 0x8, 0x10 };
+ 
+-static unsigned int pistachio_critical_clks[] __initdata = {
+-	CLK_MIPS,
+-	CLK_PERIPH_SYS,
++static unsigned int pistachio_critical_clks_core[] __initdata = {
++	CLK_MIPS
++};
++
++static unsigned int pistachio_critical_clks_sys[] __initdata = {
++	PERIPH_CLK_SYS,
++	PERIPH_CLK_SYS_BUS,
++	PERIPH_CLK_DDR,
++	PERIPH_CLK_ROM,
+ };
+ 
+ static void __init pistachio_clk_init(struct device_node *np)
+@@ -193,8 +199,8 @@ static void __init pistachio_clk_init(struct device_node *np)
+ 
+ 	pistachio_clk_register_provider(p);
+ 
+-	pistachio_clk_force_enable(p, pistachio_critical_clks,
+-				   ARRAY_SIZE(pistachio_critical_clks));
++	pistachio_clk_force_enable(p, pistachio_critical_clks_core,
++				   ARRAY_SIZE(pistachio_critical_clks_core));
+ }
+ CLK_OF_DECLARE(pistachio_clk, "img,pistachio-clk", pistachio_clk_init);
+ 
+@@ -261,6 +267,9 @@ static void __init pistachio_clk_periph_init(struct device_node *np)
+ 				    ARRAY_SIZE(pistachio_periph_gates));
+ 
+ 	pistachio_clk_register_provider(p);
++
++	pistachio_clk_force_enable(p, pistachio_critical_clks_sys,
++				   ARRAY_SIZE(pistachio_critical_clks_sys));
+ }
+ CLK_OF_DECLARE(pistachio_clk_periph, "img,pistachio-clk-periph",
+ 	       pistachio_clk_periph_init);
+diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
+index de537560bf70..ebd0d2a3b5da 100644
+--- a/drivers/clk/pistachio/clk-pll.c
++++ b/drivers/clk/pistachio/clk-pll.c
+@@ -115,8 +115,7 @@ static int pll_gf40lp_frac_enable(struct clk_hw *hw)
+ 	u32 val;
+ 
+ 	val = pll_readl(pll, PLL_CTRL3);
+-	val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_DACPD |
+-		 PLL_FRAC_CTRL3_DSMPD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
++	val &= ~(PLL_FRAC_CTRL3_PD | PLL_FRAC_CTRL3_FOUTPOSTDIVPD |
+ 		 PLL_FRAC_CTRL3_FOUT4PHASEPD | PLL_FRAC_CTRL3_FOUTVCOPD);
+ 	pll_writel(pll, val, PLL_CTRL3);
+ 
+@@ -233,7 +232,7 @@ static int pll_gf40lp_laint_enable(struct clk_hw *hw)
+ 	u32 val;
+ 
+ 	val = pll_readl(pll, PLL_CTRL1);
+-	val &= ~(PLL_INT_CTRL1_PD | PLL_INT_CTRL1_DSMPD |
++	val &= ~(PLL_INT_CTRL1_PD |
+ 		 PLL_INT_CTRL1_FOUTPOSTDIVPD | PLL_INT_CTRL1_FOUTVCOPD);
+ 	pll_writel(pll, val, PLL_CTRL1);
+ 
+diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
+index 6cd88d963a7f..542e45ef5087 100644
+--- a/drivers/clk/pxa/clk-pxa25x.c
++++ b/drivers/clk/pxa/clk-pxa25x.c
+@@ -79,7 +79,7 @@ unsigned int pxa25x_get_clk_frequency_khz(int info)
+ 			clks[3] / 1000000, (clks[3] % 1000000) / 10000);
+ 	}
+ 
+-	return (unsigned int)clks[0];
++	return (unsigned int)clks[0] / KHz;
+ }
+ 
+ static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
+diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
+index 5f9b54b024b9..267511df1e59 100644
+--- a/drivers/clk/pxa/clk-pxa27x.c
++++ b/drivers/clk/pxa/clk-pxa27x.c
+@@ -80,7 +80,7 @@ unsigned int pxa27x_get_clk_frequency_khz(int info)
+ 		pr_info("System bus clock: %ld.%02ldMHz\n",
+ 			clks[4] / 1000000, (clks[4] % 1000000) / 10000);
+ 	}
+-	return (unsigned int)clks[0];
++	return (unsigned int)clks[0] / KHz;
+ }
+ 
+ bool pxa27x_is_ppll_disabled(void)
+diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
+index ac03ba49e9d1..4af4eed5f89f 100644
+--- a/drivers/clk/pxa/clk-pxa3xx.c
++++ b/drivers/clk/pxa/clk-pxa3xx.c
+@@ -78,7 +78,7 @@ unsigned int pxa3xx_get_clk_frequency_khz(int info)
+ 		pr_info("System bus clock: %ld.%02ldMHz\n",
+ 			clks[4] / 1000000, (clks[4] % 1000000) / 10000);
+ 	}
+-	return (unsigned int)clks[0];
++	return (unsigned int)clks[0] / KHz;
+ }
+ 
+ static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
+diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
+index 54a756b90a37..457c540585f9 100644
+--- a/drivers/clk/qcom/gcc-apq8084.c
++++ b/drivers/clk/qcom/gcc-apq8084.c
+@@ -2105,6 +2105,7 @@ static struct clk_branch gcc_ce1_clk = {
+ 				"ce1_clk_src",
+ 			},
+ 			.num_parents = 1,
++			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
+index c66f7bc2ae87..5d75bffab141 100644
+--- a/drivers/clk/qcom/gcc-msm8916.c
++++ b/drivers/clk/qcom/gcc-msm8916.c
+@@ -2278,7 +2278,7 @@ static struct clk_branch gcc_prng_ahb_clk = {
+ 	.halt_check = BRANCH_HALT_VOTED,
+ 	.clkr = {
+ 		.enable_reg = 0x45004,
+-		.enable_mask = BIT(0),
++		.enable_mask = BIT(8),
+ 		.hw.init = &(struct clk_init_data){
+ 			.name = "gcc_prng_ahb_clk",
+ 			.parent_names = (const char *[]){
+diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
+index c39d09874e74..f06a082e3e87 100644
+--- a/drivers/clk/qcom/gcc-msm8974.c
++++ b/drivers/clk/qcom/gcc-msm8974.c
+@@ -1783,6 +1783,7 @@ static struct clk_branch gcc_ce1_clk = {
+ 				"ce1_clk_src",
+ 			},
+ 			.num_parents = 1,
++			.flags = CLK_SET_RATE_PARENT,
+ 			.ops = &clk_branch2_ops,
+ 		},
+ 	},
+diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
+index d17eb4528a28..37f96117fd3d 100644
+--- a/drivers/clk/rockchip/clk-rk3288.c
++++ b/drivers/clk/rockchip/clk-rk3288.c
+@@ -578,7 +578,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
+ 	COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
+ 			RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
+ 			RK3288_CLKGATE_CON(2), 5, GFLAGS),
+-	MUX(SCLK_MAC, "mac_clk", mux_mac_p, 0,
++	MUX(SCLK_MAC, "mac_clk", mux_mac_p, CLK_SET_RATE_PARENT,
+ 			RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
+ 	GATE(SCLK_MACREF_OUT, "sclk_macref_out", "mac_clk", 0,
+ 			RK3288_CLKGATE_CON(5), 3, GFLAGS),
+diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
+index 714d6ba782c8..f7890bf652e6 100644
+--- a/drivers/clk/samsung/clk-exynos4.c
++++ b/drivers/clk/samsung/clk-exynos4.c
+@@ -85,6 +85,7 @@
+ #define DIV_PERIL4		0xc560
+ #define DIV_PERIL5		0xc564
+ #define E4X12_DIV_CAM1		0xc568
++#define E4X12_GATE_BUS_FSYS1	0xc744
+ #define GATE_SCLK_CAM		0xc820
+ #define GATE_IP_CAM		0xc920
+ #define GATE_IP_TV		0xc924
+@@ -1095,6 +1096,7 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
+ 		0),
+ 	GATE(CLK_PPMUIMAGE, "ppmuimage", "aclk200", E4X12_GATE_IP_IMAGE, 9, 0,
+ 		0),
++	GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0),
+ 	GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
+ 	GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
+ 	GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
+diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
+index e668e479a697..bdd284249cc3 100644
+--- a/drivers/clk/samsung/clk-s5pv210.c
++++ b/drivers/clk/samsung/clk-s5pv210.c
+@@ -828,6 +828,8 @@ static void __init __s5pv210_clk_init(struct device_node *np,
+ 
+ 	s5pv210_clk_sleep_init();
+ 
++	samsung_clk_of_add_provider(np, ctx);
++
+ 	pr_info("%s clocks: mout_apll = %ld, mout_mpll = %ld\n"
+ 		"\tmout_epll = %ld, mout_vpll = %ld\n",
+ 		is_s5p6442 ? "S5P6442" : "S5PV210",
+diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c
+index c6e86a9a2aa3..5122ef25f595 100644
+--- a/drivers/clk/versatile/clk-sp810.c
++++ b/drivers/clk/versatile/clk-sp810.c
+@@ -128,8 +128,8 @@ static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec,
+ {
+ 	struct clk_sp810 *sp810 = data;
+ 
+-	if (WARN_ON(clkspec->args_count != 1 || clkspec->args[0] >
+-			ARRAY_SIZE(sp810->timerclken)))
++	if (WARN_ON(clkspec->args_count != 1 ||
++		    clkspec->args[0] >=	ARRAY_SIZE(sp810->timerclken)))
+ 		return NULL;
+ 
+ 	return sp810->timerclken[clkspec->args[0]].clk;
+diff --git a/drivers/crypto/vmx/ghashp8-ppc.pl b/drivers/crypto/vmx/ghashp8-ppc.pl
+index 0a6f899839dd..d8429cb71f02 100644
+--- a/drivers/crypto/vmx/ghashp8-ppc.pl
++++ b/drivers/crypto/vmx/ghashp8-ppc.pl
+@@ -61,6 +61,12 @@ $code=<<___;
+ 	mtspr		256,r0
+ 	li		r10,0x30
+ 	lvx_u		$H,0,r4			# load H
++	le?xor		r7,r7,r7
++	le?addi		r7,r7,0x8		# need a vperm start with 08
++	le?lvsr		5,0,r7
++	le?vspltisb	6,0x0f
++	le?vxor		5,5,6			# set a b-endian mask
++	le?vperm	$H,$H,$H,5
+ 
+ 	vspltisb	$xC2,-16		# 0xf0
+ 	vspltisb	$t0,1			# one
+diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
+index a19d2c71e205..fb91df1631d9 100644
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -647,15 +647,18 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
+ 
+ 	pci_disable_device(drm_dev->pdev);
+ 	/*
+-	 * During hibernation on some GEN4 platforms the BIOS may try to access
++	 * During hibernation on some platforms the BIOS may try to access
+ 	 * the device even though it's already in D3 and hang the machine. So
+ 	 * leave the device in D0 on those platforms and hope the BIOS will
+-	 * power down the device properly. Platforms where this was seen:
+-	 * Lenovo Thinkpad X301, X61s
++	 * power down the device properly. The issue was seen on multiple old
++	 * GENs with different BIOS vendors, so having an explicit blacklist
++	 * is inpractical; apply the workaround on everything pre GEN6. The
++	 * platforms where the issue was seen:
++	 * Lenovo Thinkpad X301, X61s, X60, T60, X41
++	 * Fujitsu FSC S7110
++	 * Acer Aspire 1830T
+ 	 */
+-	if (!(hibernation &&
+-	      drm_dev->pdev->subsystem_vendor == PCI_VENDOR_ID_LENOVO &&
+-	      INTEL_INFO(dev_priv)->gen == 4))
++	if (!(hibernation && INTEL_INFO(dev_priv)->gen < 6))
+ 		pci_set_power_state(drm_dev->pdev, PCI_D3hot);
+ 
+ 	return 0;
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 683a9b004c11..7d53d7e15455 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -3190,13 +3190,13 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
+ #define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
+ 
+ #define I915_READ64_2x32(lower_reg, upper_reg) ({			\
+-	u32 upper, lower, tmp;						\
+-	tmp = I915_READ(upper_reg);					\
++	u32 upper, lower, old_upper, loop = 0;				\
++	upper = I915_READ(upper_reg);					\
+ 	do {								\
+-		upper = tmp;						\
++		old_upper = upper;					\
+ 		lower = I915_READ(lower_reg);				\
+-		tmp = I915_READ(upper_reg);				\
+-	} while (upper != tmp);						\
++		upper = I915_READ(upper_reg);				\
++	} while (upper != old_upper && loop++ < 2);			\
+ 	(u64)upper << 32 | lower; })
+ 
+ #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
+diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+index a3190e793ed4..479024a4caad 100644
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -1025,6 +1025,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
+ 		u32 old_read = obj->base.read_domains;
+ 		u32 old_write = obj->base.write_domain;
+ 
++		obj->dirty = 1; /* be paranoid  */
+ 		obj->base.write_domain = obj->base.pending_write_domain;
+ 		if (obj->base.write_domain == 0)
+ 			obj->base.pending_read_domains |= obj->base.read_domains;
+@@ -1032,7 +1033,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
+ 
+ 		i915_vma_move_to_active(vma, ring);
+ 		if (obj->base.write_domain) {
+-			obj->dirty = 1;
+ 			i915_gem_request_assign(&obj->last_write_req, req);
+ 
+ 			intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index 57c887843dc3..f208bbc6d58e 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -13781,6 +13781,24 @@ void intel_modeset_init(struct drm_device *dev)
+ 	if (INTEL_INFO(dev)->num_pipes == 0)
+ 		return;
+ 
++	/*
++	 * There may be no VBT; and if the BIOS enabled SSC we can
++	 * just keep using it to avoid unnecessary flicker.  Whereas if the
++	 * BIOS isn't using it, don't assume it will work even if the VBT
++	 * indicates as much.
++	 */
++	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
++		bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
++					    DREF_SSC1_ENABLE);
++
++		if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
++			DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
++				     bios_lvds_use_ssc ? "en" : "dis",
++				     dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
++			dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
++		}
++	}
++
+ 	intel_init_display(dev);
+ 	intel_init_audio(dev);
+ 
+@@ -14266,7 +14284,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
+ 
+ void intel_modeset_gem_init(struct drm_device *dev)
+ {
+-	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_crtc *c;
+ 	struct drm_i915_gem_object *obj;
+ 	int ret;
+@@ -14275,16 +14292,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
+ 	intel_init_gt_powersave(dev);
+ 	mutex_unlock(&dev->struct_mutex);
+ 
+-	/*
+-	 * There may be no VBT; and if the BIOS enabled SSC we can
+-	 * just keep using it to avoid unnecessary flicker.  Whereas if the
+-	 * BIOS isn't using it, don't assume it will work even if the VBT
+-	 * indicates as much.
+-	 */
+-	if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
+-		dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
+-						DREF_SSC1_ENABLE);
+-
+ 	intel_modeset_init_hw(dev);
+ 
+ 	intel_setup_overlay(dev);
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index b1fe32b119ef..fb2983f77141 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -4691,9 +4691,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+ 
+ 		intel_dp_probe_oui(intel_dp);
+ 
+-		if (!intel_dp_probe_mst(intel_dp))
++		if (!intel_dp_probe_mst(intel_dp)) {
++			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
++			intel_dp_check_link_status(intel_dp);
++			drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ 			goto mst_fail;
+-
++		}
+ 	} else {
+ 		if (intel_dp->is_mst) {
+ 			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
+@@ -4701,10 +4704,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
+ 		}
+ 
+ 		if (!intel_dp->is_mst) {
+-			/*
+-			 * we'll check the link status via the normal hot plug path later -
+-			 * but for short hpds we should check it now
+-			 */
+ 			drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ 			intel_dp_check_link_status(intel_dp);
+ 			drm_modeset_unlock(&dev->mode_config.connection_mutex);
+diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
+index 51966426addf..c7a0b8d8fac9 100644
+--- a/drivers/gpu/drm/i915/intel_dsi.c
++++ b/drivers/gpu/drm/i915/intel_dsi.c
+@@ -1036,11 +1036,7 @@ void intel_dsi_init(struct drm_device *dev)
+ 	intel_connector->unregister = intel_connector_unregister;
+ 
+ 	/* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */
+-	if (dev_priv->vbt.dsi.config->dual_link) {
+-		/* XXX: does dual link work on either pipe? */
+-		intel_encoder->crtc_mask = (1 << PIPE_A);
+-		intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
+-	} else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
++	if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) {
+ 		intel_encoder->crtc_mask = (1 << PIPE_A);
+ 		intel_dsi->ports = (1 << PORT_A);
+ 	} else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) {
+@@ -1048,6 +1044,9 @@ void intel_dsi_init(struct drm_device *dev)
+ 		intel_dsi->ports = (1 << PORT_C);
+ 	}
+ 
++	if (dev_priv->vbt.dsi.config->dual_link)
++		intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C));
++
+ 	/* Create a DSI host (and a device) for each port. */
+ 	for_each_dsi_port(port, intel_dsi->ports) {
+ 		struct intel_dsi_host *host;
+diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
+index 4a0a8b29b0a1..32248791bc4b 100644
+--- a/drivers/gpu/drm/qxl/qxl_display.c
++++ b/drivers/gpu/drm/qxl/qxl_display.c
+@@ -160,9 +160,35 @@ static int qxl_add_monitors_config_modes(struct drm_connector *connector,
+ 	*pwidth = head->width;
+ 	*pheight = head->height;
+ 	drm_mode_probed_add(connector, mode);
++	/* remember the last custom size for mode validation */
++	qdev->monitors_config_width = mode->hdisplay;
++	qdev->monitors_config_height = mode->vdisplay;
+ 	return 1;
+ }
+ 
++static struct mode_size {
++	int w;
++	int h;
++} common_modes[] = {
++	{ 640,  480},
++	{ 720,  480},
++	{ 800,  600},
++	{ 848,  480},
++	{1024,  768},
++	{1152,  768},
++	{1280,  720},
++	{1280,  800},
++	{1280,  854},
++	{1280,  960},
++	{1280, 1024},
++	{1440,  900},
++	{1400, 1050},
++	{1680, 1050},
++	{1600, 1200},
++	{1920, 1080},
++	{1920, 1200}
++};
++
+ static int qxl_add_common_modes(struct drm_connector *connector,
+                                 unsigned pwidth,
+                                 unsigned pheight)
+@@ -170,29 +196,6 @@ static int qxl_add_common_modes(struct drm_connector *connector,
+ 	struct drm_device *dev = connector->dev;
+ 	struct drm_display_mode *mode = NULL;
+ 	int i;
+-	struct mode_size {
+-		int w;
+-		int h;
+-	} common_modes[] = {
+-		{ 640,  480},
+-		{ 720,  480},
+-		{ 800,  600},
+-		{ 848,  480},
+-		{1024,  768},
+-		{1152,  768},
+-		{1280,  720},
+-		{1280,  800},
+-		{1280,  854},
+-		{1280,  960},
+-		{1280, 1024},
+-		{1440,  900},
+-		{1400, 1050},
+-		{1680, 1050},
+-		{1600, 1200},
+-		{1920, 1080},
+-		{1920, 1200}
+-	};
+-
+ 	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ 		mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h,
+ 				    60, false, false, false);
+@@ -823,11 +826,22 @@ static int qxl_conn_get_modes(struct drm_connector *connector)
+ static int qxl_conn_mode_valid(struct drm_connector *connector,
+ 			       struct drm_display_mode *mode)
+ {
++	struct drm_device *ddev = connector->dev;
++	struct qxl_device *qdev = ddev->dev_private;
++	int i;
++
+ 	/* TODO: is this called for user defined modes? (xrandr --add-mode)
+ 	 * TODO: check that the mode fits in the framebuffer */
+-	DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay,
+-		  mode->vdisplay, mode->status);
+-	return MODE_OK;
++
++	if(qdev->monitors_config_width == mode->hdisplay &&
++	   qdev->monitors_config_height == mode->vdisplay)
++		return MODE_OK;
++
++	for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
++		if (common_modes[i].w == mode->hdisplay && common_modes[i].h == mode->vdisplay)
++			return MODE_OK;
++	}
++	return MODE_BAD;
+ }
+ 
+ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index 7c6cafe21f5f..e66143cc1a7a 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -325,6 +325,8 @@ struct qxl_device {
+ 	struct work_struct fb_work;
+ 
+ 	struct drm_property *hotplug_mode_update_property;
++	int monitors_config_width;
++	int monitors_config_height;
+ };
+ 
+ /* forward declaration for QXL_INFO_IO */
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index b435c859dcbc..447dbfa6c793 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -171,8 +171,9 @@ radeon_dp_aux_transfer_atom(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
+ 		return -E2BIG;
+ 
+ 	tx_buf[0] = msg->address & 0xff;
+-	tx_buf[1] = msg->address >> 8;
+-	tx_buf[2] = msg->request << 4;
++	tx_buf[1] = (msg->address >> 8) & 0xff;
++	tx_buf[2] = (msg->request << 4) |
++		((msg->address >> 16) & 0xf);
+ 	tx_buf[3] = msg->size ? (msg->size - 1) : 0;
+ 
+ 	switch (msg->request & ~DP_AUX_I2C_MOT) {
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index 59b3d3221294..d77dd1430d58 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -522,13 +522,15 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
+ 		return err;
+ 	}
+ 
+-	if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
+-		if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
+-			frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
+-		else
+-			frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
+-	} else {
+-		frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
++	if (radeon_encoder->output_csc != RADEON_OUTPUT_CSC_BYPASS) {
++		if (drm_rgb_quant_range_selectable(radeon_connector_edid(connector))) {
++			if (radeon_encoder->output_csc == RADEON_OUTPUT_CSC_TVRGB)
++				frame.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED;
++			else
++				frame.quantization_range = HDMI_QUANTIZATION_RANGE_FULL;
++		} else {
++			frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
++		}
+ 	}
+ 
+ 	err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 94b21ae70ef7..5a2cafb4f1bc 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -95,6 +95,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
+ 			if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+ 				drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ 			} else if (radeon_dp_needs_link_train(radeon_connector)) {
++				/* Don't try to start link training before we
++				 * have the dpcd */
++				if (!radeon_dp_getdpcd(radeon_connector))
++					return;
++
+ 				/* set it to OFF so that drm_helper_connector_dpms()
+ 				 * won't return immediately since the current state
+ 				 * is ON at this point.
+diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+index fcbd60bb0349..3b0c229d7dcd 100644
+--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
++++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+@@ -116,8 +116,8 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
+ 	       AUX_SW_WR_BYTES(bytes));
+ 
+ 	/* write the data header into the registers */
+-	/* request, addres, msg size */
+-	byte = (msg->request << 4);
++	/* request, address, msg size */
++	byte = (msg->request << 4) | ((msg->address >> 16) & 0xf);
+ 	WREG32(AUX_SW_DATA + aux_offset[instance],
+ 	       AUX_SW_DATA_MASK(byte) | AUX_SW_AUTOINCREMENT_DISABLE);
+ 
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index a2dbbbe0d8d7..39bf74793b8b 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -537,7 +537,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
+ 	struct cp2112_device *dev = (struct cp2112_device *)adap->algo_data;
+ 	struct hid_device *hdev = dev->hdev;
+ 	u8 buf[64];
+-	__be16 word;
++	__le16 word;
+ 	ssize_t count;
+ 	size_t read_length = 0;
+ 	unsigned int retries;
+@@ -554,7 +554,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
+ 		if (I2C_SMBUS_READ == read_write)
+ 			count = cp2112_read_req(buf, addr, read_length);
+ 		else
+-			count = cp2112_write_req(buf, addr, data->byte, NULL,
++			count = cp2112_write_req(buf, addr, command, NULL,
+ 						 0);
+ 		break;
+ 	case I2C_SMBUS_BYTE_DATA:
+@@ -569,7 +569,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
+ 		break;
+ 	case I2C_SMBUS_WORD_DATA:
+ 		read_length = 2;
+-		word = cpu_to_be16(data->word);
++		word = cpu_to_le16(data->word);
+ 
+ 		if (I2C_SMBUS_READ == read_write)
+ 			count = cp2112_write_read_req(buf, addr, read_length,
+@@ -582,7 +582,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
+ 		size = I2C_SMBUS_WORD_DATA;
+ 		read_write = I2C_SMBUS_READ;
+ 		read_length = 2;
+-		word = cpu_to_be16(data->word);
++		word = cpu_to_le16(data->word);
+ 
+ 		count = cp2112_write_read_req(buf, addr, read_length, command,
+ 					      (u8 *)&word, 2);
+@@ -675,7 +675,7 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
+ 		data->byte = buf[0];
+ 		break;
+ 	case I2C_SMBUS_WORD_DATA:
+-		data->word = be16_to_cpup((__be16 *)buf);
++		data->word = le16_to_cpup((__le16 *)buf);
+ 		break;
+ 	case I2C_SMBUS_BLOCK_DATA:
+ 		if (read_length > I2C_SMBUS_BLOCK_MAX) {
+diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
+index bfbe1bedda7f..eab5bd6a2442 100644
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -164,7 +164,7 @@ static void hid_io_error(struct hid_device *hid)
+ 	if (time_after(jiffies, usbhid->stop_retry)) {
+ 
+ 		/* Retries failed, so do a port reset unless we lack bandwidth*/
+-		if (test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
++		if (!test_bit(HID_NO_BANDWIDTH, &usbhid->iofl)
+ 		     && !test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) {
+ 
+ 			schedule_work(&usbhid->reset_work);
+diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
+index b3d0e94f72eb..8d2439345673 100644
+--- a/drivers/iio/gyro/Kconfig
++++ b/drivers/iio/gyro/Kconfig
+@@ -53,7 +53,8 @@ config ADXRS450
+ config BMG160
+ 	tristate "BOSCH BMG160 Gyro Sensor"
+ 	depends on I2C
+-	select IIO_TRIGGERED_BUFFER if IIO_BUFFER
++	select IIO_BUFFER
++	select IIO_TRIGGERED_BUFFER
+ 	help
+ 	  Say yes here to build support for Bosch BMG160 Tri-axis Gyro Sensor
+ 	  driver. This driver also supports BMI055 gyroscope.
+diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
+index 2fd68f2219a7..d42e4fe2c7ed 100644
+--- a/drivers/iio/imu/adis16400_core.c
++++ b/drivers/iio/imu/adis16400_core.c
+@@ -780,7 +780,7 @@ static struct adis16400_chip_info adis16400_chips[] = {
+ 		.flags = ADIS16400_HAS_PROD_ID |
+ 				ADIS16400_HAS_SERIAL_NUMBER |
+ 				ADIS16400_BURST_DIAG_STAT,
+-		.gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
++		.gyro_scale_micro = IIO_DEGREE_TO_RAD(40000), /* 0.04 deg/s */
+ 		.accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
+ 		.temp_scale_nano = 73860000, /* 0.07386 C */
+ 		.temp_offset = 31000000 / 73860, /* 31 C = 0x00 */
+diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
+index 989605dd6f78..b94bfd3f595b 100644
+--- a/drivers/iio/imu/adis16480.c
++++ b/drivers/iio/imu/adis16480.c
+@@ -110,6 +110,10 @@
+ struct adis16480_chip_info {
+ 	unsigned int num_channels;
+ 	const struct iio_chan_spec *channels;
++	unsigned int gyro_max_val;
++	unsigned int gyro_max_scale;
++	unsigned int accel_max_val;
++	unsigned int accel_max_scale;
+ };
+ 
+ struct adis16480 {
+@@ -497,19 +501,21 @@ static int adis16480_set_filter_freq(struct iio_dev *indio_dev,
+ static int adis16480_read_raw(struct iio_dev *indio_dev,
+ 	const struct iio_chan_spec *chan, int *val, int *val2, long info)
+ {
++	struct adis16480 *st = iio_priv(indio_dev);
++
+ 	switch (info) {
+ 	case IIO_CHAN_INFO_RAW:
+ 		return adis_single_conversion(indio_dev, chan, 0, val);
+ 	case IIO_CHAN_INFO_SCALE:
+ 		switch (chan->type) {
+ 		case IIO_ANGL_VEL:
+-			*val = 0;
+-			*val2 = IIO_DEGREE_TO_RAD(20000); /* 0.02 degree/sec */
+-			return IIO_VAL_INT_PLUS_MICRO;
++			*val = st->chip_info->gyro_max_scale;
++			*val2 = st->chip_info->gyro_max_val;
++			return IIO_VAL_FRACTIONAL;
+ 		case IIO_ACCEL:
+-			*val = 0;
+-			*val2 = IIO_G_TO_M_S_2(800); /* 0.8 mg */
+-			return IIO_VAL_INT_PLUS_MICRO;
++			*val = st->chip_info->accel_max_scale;
++			*val2 = st->chip_info->accel_max_val;
++			return IIO_VAL_FRACTIONAL;
+ 		case IIO_MAGN:
+ 			*val = 0;
+ 			*val2 = 100; /* 0.0001 gauss */
+@@ -674,18 +680,39 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
+ 	[ADIS16375] = {
+ 		.channels = adis16485_channels,
+ 		.num_channels = ARRAY_SIZE(adis16485_channels),
++		/*
++		 * storing the value in rad/degree and the scale in degree
++		 * gives us the result in rad and better precession than
++		 * storing the scale directly in rad.
++		 */
++		.gyro_max_val = IIO_RAD_TO_DEGREE(22887),
++		.gyro_max_scale = 300,
++		.accel_max_val = IIO_M_S_2_TO_G(21973),
++		.accel_max_scale = 18,
+ 	},
+ 	[ADIS16480] = {
+ 		.channels = adis16480_channels,
+ 		.num_channels = ARRAY_SIZE(adis16480_channels),
++		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
++		.gyro_max_scale = 450,
++		.accel_max_val = IIO_M_S_2_TO_G(12500),
++		.accel_max_scale = 5,
+ 	},
+ 	[ADIS16485] = {
+ 		.channels = adis16485_channels,
+ 		.num_channels = ARRAY_SIZE(adis16485_channels),
++		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
++		.gyro_max_scale = 450,
++		.accel_max_val = IIO_M_S_2_TO_G(20000),
++		.accel_max_scale = 5,
+ 	},
+ 	[ADIS16488] = {
+ 		.channels = adis16480_channels,
+ 		.num_channels = ARRAY_SIZE(adis16480_channels),
++		.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
++		.gyro_max_scale = 450,
++		.accel_max_val = IIO_M_S_2_TO_G(22500),
++		.accel_max_scale = 18,
+ 	},
+ };
+ 
+diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
+index df919f44d513..7fa280b28ecb 100644
+--- a/drivers/iio/industrialio-buffer.c
++++ b/drivers/iio/industrialio-buffer.c
+@@ -151,7 +151,7 @@ unsigned int iio_buffer_poll(struct file *filp,
+ 	struct iio_buffer *rb = indio_dev->buffer;
+ 
+ 	if (!indio_dev->info)
+-		return -ENODEV;
++		return 0;
+ 
+ 	poll_wait(filp, &rb->pollq, wait);
+ 	if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
+diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
+index a99692ba91bc..69b8c338fa89 100644
+--- a/drivers/iio/industrialio-event.c
++++ b/drivers/iio/industrialio-event.c
+@@ -84,7 +84,7 @@ static unsigned int iio_event_poll(struct file *filep,
+ 	unsigned int events = 0;
+ 
+ 	if (!indio_dev->info)
+-		return -ENODEV;
++		return events;
+ 
+ 	poll_wait(filep, &ev_int->wait, wait);
+ 
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index 8bfda6ade2c0..384574c3987c 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -845,10 +845,10 @@ struct device_node *of_find_matching_node_by_address(struct device_node *from,
+ 	struct resource res;
+ 
+ 	while (dn) {
+-		if (of_address_to_resource(dn, 0, &res))
+-			continue;
+-		if (res.start == base_address)
++		if (!of_address_to_resource(dn, 0, &res) &&
++		    res.start == base_address)
+ 			return dn;
++
+ 		dn = of_find_matching_node(dn, matches);
+ 	}
+ 
+diff --git a/drivers/pci/access.c b/drivers/pci/access.c
+index d9b64a175990..b965c12168b7 100644
+--- a/drivers/pci/access.c
++++ b/drivers/pci/access.c
+@@ -439,6 +439,56 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
+ 	.release = pci_vpd_pci22_release,
+ };
+ 
++static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
++			       void *arg)
++{
++	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++	ssize_t ret;
++
++	if (!tdev)
++		return -ENODEV;
++
++	ret = pci_read_vpd(tdev, pos, count, arg);
++	pci_dev_put(tdev);
++	return ret;
++}
++
++static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
++				const void *arg)
++{
++	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++	ssize_t ret;
++
++	if (!tdev)
++		return -ENODEV;
++
++	ret = pci_write_vpd(tdev, pos, count, arg);
++	pci_dev_put(tdev);
++	return ret;
++}
++
++static const struct pci_vpd_ops pci_vpd_f0_ops = {
++	.read = pci_vpd_f0_read,
++	.write = pci_vpd_f0_write,
++	.release = pci_vpd_pci22_release,
++};
++
++static int pci_vpd_f0_dev_check(struct pci_dev *dev)
++{
++	struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
++	int ret = 0;
++
++	if (!tdev)
++		return -ENODEV;
++	if (!tdev->vpd || !tdev->multifunction ||
++	    dev->class != tdev->class || dev->vendor != tdev->vendor ||
++	    dev->device != tdev->device)
++		ret = -ENODEV;
++
++	pci_dev_put(tdev);
++	return ret;
++}
++
+ int pci_vpd_pci22_init(struct pci_dev *dev)
+ {
+ 	struct pci_vpd_pci22 *vpd;
+@@ -447,12 +497,21 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
+ 	cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
+ 	if (!cap)
+ 		return -ENODEV;
++	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
++		int ret = pci_vpd_f0_dev_check(dev);
++
++		if (ret)
++			return ret;
++	}
+ 	vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
+ 	if (!vpd)
+ 		return -ENOMEM;
+ 
+ 	vpd->base.len = PCI_VPD_PCI22_SIZE;
+-	vpd->base.ops = &pci_vpd_pci22_ops;
++	if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
++		vpd->base.ops = &pci_vpd_f0_ops;
++	else
++		vpd->base.ops = &pci_vpd_pci22_ops;
+ 	mutex_init(&vpd->lock);
+ 	vpd->cap = cap;
+ 	vpd->busy = false;
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index c6dc1dfd25d5..804cd3b02c66 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -1576,6 +1576,18 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB3
+ 
+ #endif
+ 
++static void quirk_jmicron_async_suspend(struct pci_dev *dev)
++{
++	if (dev->multifunction) {
++		device_disable_async_suspend(&dev->dev);
++		dev_info(&dev->dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
++	}
++}
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
++
+ #ifdef CONFIG_X86_IO_APIC
+ static void quirk_alder_ioapic(struct pci_dev *pdev)
+ {
+@@ -1903,6 +1915,15 @@ static void quirk_netmos(struct pci_dev *dev)
+ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
+ 			 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+ 
++static void quirk_f0_vpd_link(struct pci_dev *dev)
++{
++	if (!dev->multifunction || !PCI_FUNC(dev->devfn))
++		return;
++	dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
++}
++DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
++			      PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
++
+ static void quirk_e100_interrupt(struct pci_dev *dev)
+ {
+ 	u16 command, pmcsr;
+@@ -2838,12 +2859,15 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
+ 
+ static void fixup_ti816x_class(struct pci_dev *dev)
+ {
++	u32 class = dev->class;
++
+ 	/* TI 816x devices do not have class code set when in PCIe boot mode */
+-	dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
+-	dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
++	dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
++	dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
++		 class, dev->class);
+ }
+ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
+-				 PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
++			      PCI_CLASS_NOT_DEFINED, 0, fixup_ti816x_class);
+ 
+ /* Some PCIe devices do not work reliably with the claimed maximum
+  * payload size supported.
+diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
+index bd2b75c0d1d1..4fa7bcaf454e 100644
+--- a/drivers/regulator/pbias-regulator.c
++++ b/drivers/regulator/pbias-regulator.c
+@@ -30,6 +30,7 @@
+ struct pbias_reg_info {
+ 	u32 enable;
+ 	u32 enable_mask;
++	u32 disable_val;
+ 	u32 vmode;
+ 	unsigned int enable_time;
+ 	char *name;
+@@ -62,6 +63,7 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = {
+ 	.enable = BIT(1),
+ 	.enable_mask = BIT(1),
+ 	.vmode = BIT(0),
++	.disable_val = 0,
+ 	.enable_time = 100,
+ 	.name = "pbias_mmc_omap2430"
+ };
+@@ -77,6 +79,7 @@ static const struct pbias_reg_info pbias_sim_omap3 = {
+ static const struct pbias_reg_info pbias_mmc_omap4 = {
+ 	.enable = BIT(26) | BIT(22),
+ 	.enable_mask = BIT(26) | BIT(25) | BIT(22),
++	.disable_val = BIT(25),
+ 	.vmode = BIT(21),
+ 	.enable_time = 100,
+ 	.name = "pbias_mmc_omap4"
+@@ -85,6 +88,7 @@ static const struct pbias_reg_info pbias_mmc_omap4 = {
+ static const struct pbias_reg_info pbias_mmc_omap5 = {
+ 	.enable = BIT(27) | BIT(26),
+ 	.enable_mask = BIT(27) | BIT(25) | BIT(26),
++	.disable_val = BIT(25),
+ 	.vmode = BIT(21),
+ 	.enable_time = 100,
+ 	.name = "pbias_mmc_omap5"
+@@ -159,6 +163,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
+ 		drvdata[data_idx].desc.enable_reg = res->start;
+ 		drvdata[data_idx].desc.enable_mask = info->enable_mask;
+ 		drvdata[data_idx].desc.enable_val = info->enable;
++		drvdata[data_idx].desc.disable_val = info->disable_val;
+ 
+ 		cfg.init_data = pbias_matches[idx].init_data;
+ 		cfg.driver_data = &drvdata[data_idx];
+diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
+index 1efa4fdb7fe2..f45cd0cb1b32 100644
+--- a/drivers/s390/char/sclp_early.c
++++ b/drivers/s390/char/sclp_early.c
+@@ -7,6 +7,7 @@
+ #define KMSG_COMPONENT "sclp_early"
+ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+ 
++#include <linux/errno.h>
+ #include <asm/ctl_reg.h>
+ #include <asm/sclp.h>
+ #include <asm/ipl.h>
+diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
+index c956395cf46f..c89bada875f8 100644
+--- a/drivers/soc/tegra/pmc.c
++++ b/drivers/soc/tegra/pmc.c
+@@ -732,12 +732,12 @@ void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
+ 	u32 value, checksum;
+ 
+ 	if (!pmc->soc->has_tsense_reset)
+-		goto out;
++		return;
+ 
+ 	np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip");
+ 	if (!np) {
+ 		dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled);
+-		goto out;
++		return;
+ 	}
+ 
+ 	if (of_property_read_u32(np, "nvidia,i2c-controller-id", &ctrl_id)) {
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index 37875cf942f7..a5067739ee93 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -257,13 +257,11 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
+ 	spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
+ 	bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
+ 
+-	/* handle all the modes */
++	/* handle all the 3-wire mode */
+ 	if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
+ 		cs |= BCM2835_SPI_CS_REN;
+-	if (spi->mode & SPI_CPOL)
+-		cs |= BCM2835_SPI_CS_CPOL;
+-	if (spi->mode & SPI_CPHA)
+-		cs |= BCM2835_SPI_CS_CPHA;
++	else
++		cs &= ~BCM2835_SPI_CS_REN;
+ 
+ 	/* for gpio_cs set dummy CS so that no HW-CS get changed
+ 	 * we can not run this in bcm2835_spi_set_cs, as it does
+@@ -291,6 +289,25 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
+ 	return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
+ }
+ 
++static int bcm2835_spi_prepare_message(struct spi_master *master,
++				       struct spi_message *msg)
++{
++	struct spi_device *spi = msg->spi;
++	struct bcm2835_spi *bs = spi_master_get_devdata(master);
++	u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
++
++	cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
++
++	if (spi->mode & SPI_CPOL)
++		cs |= BCM2835_SPI_CS_CPOL;
++	if (spi->mode & SPI_CPHA)
++		cs |= BCM2835_SPI_CS_CPHA;
++
++	bcm2835_wr(bs, BCM2835_SPI_CS, cs);
++
++	return 0;
++}
++
+ static void bcm2835_spi_handle_err(struct spi_master *master,
+ 				   struct spi_message *msg)
+ {
+@@ -429,6 +446,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
+ 	master->set_cs = bcm2835_spi_set_cs;
+ 	master->transfer_one = bcm2835_spi_transfer_one;
+ 	master->handle_err = bcm2835_spi_handle_err;
++	master->prepare_message = bcm2835_spi_prepare_message;
+ 	master->dev.of_node = pdev->dev.of_node;
+ 
+ 	bs = spi_master_get_devdata(master);
+diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
+index 06b34e5bcfa3..47bb9b898dfd 100644
+--- a/drivers/spi/spi-bitbang-txrx.h
++++ b/drivers/spi/spi-bitbang-txrx.h
+@@ -49,7 +49,7 @@ bitbang_txrx_be_cpha0(struct spi_device *spi,
+ {
+ 	/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+ 
+-	bool oldbit = !(word & 1);
++	u32 oldbit = (!(word & (1<<(bits-1)))) << 31;
+ 	/* clock starts at inactive polarity */
+ 	for (word <<= (32 - bits); likely(bits); bits--) {
+ 
+@@ -81,7 +81,7 @@ bitbang_txrx_be_cpha1(struct spi_device *spi,
+ {
+ 	/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+ 
+-	bool oldbit = !(word & (1 << 31));
++	u32 oldbit = (!(word & (1<<(bits-1)))) << 31;
+ 	/* clock starts at inactive polarity */
+ 	for (word <<= (32 - bits); likely(bits); bits--) {
+ 
+diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
+index eb03e1215195..7edede6e024b 100644
+--- a/drivers/spi/spi-dw-mmio.c
++++ b/drivers/spi/spi-dw-mmio.c
+@@ -74,6 +74,9 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
+ 
+ 	dws->max_freq = clk_get_rate(dwsmmio->clk);
+ 
++	of_property_read_u32(pdev->dev.of_node, "reg-io-width",
++			     &dws->reg_io_width);
++
+ 	num_cs = 4;
+ 
+ 	if (pdev->dev.of_node)
+diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
+index 8d67d03c71eb..4fbfcdc5cb24 100644
+--- a/drivers/spi/spi-dw.c
++++ b/drivers/spi/spi-dw.c
+@@ -194,7 +194,7 @@ static void dw_writer(struct dw_spi *dws)
+ 			else
+ 				txw = *(u16 *)(dws->tx);
+ 		}
+-		dw_writel(dws, DW_SPI_DR, txw);
++		dw_write_io_reg(dws, DW_SPI_DR, txw);
+ 		dws->tx += dws->n_bytes;
+ 	}
+ }
+@@ -205,7 +205,7 @@ static void dw_reader(struct dw_spi *dws)
+ 	u16 rxw;
+ 
+ 	while (max--) {
+-		rxw = dw_readl(dws, DW_SPI_DR);
++		rxw = dw_read_io_reg(dws, DW_SPI_DR);
+ 		/* Care rx only if the transfer's original "rx" is not null */
+ 		if (dws->rx_end - dws->len) {
+ 			if (dws->n_bytes == 1)
+diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
+index 6c91391c1a4f..b75ed327d5a2 100644
+--- a/drivers/spi/spi-dw.h
++++ b/drivers/spi/spi-dw.h
+@@ -109,6 +109,7 @@ struct dw_spi {
+ 	u32			fifo_len;	/* depth of the FIFO buffer */
+ 	u32			max_freq;	/* max bus freq supported */
+ 
++	u32			reg_io_width;	/* DR I/O width in bytes */
+ 	u16			bus_num;
+ 	u16			num_cs;		/* supported slave numbers */
+ 
+@@ -145,11 +146,45 @@ static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
+ 	return __raw_readl(dws->regs + offset);
+ }
+ 
++static inline u16 dw_readw(struct dw_spi *dws, u32 offset)
++{
++	return __raw_readw(dws->regs + offset);
++}
++
+ static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
+ {
+ 	__raw_writel(val, dws->regs + offset);
+ }
+ 
++static inline void dw_writew(struct dw_spi *dws, u32 offset, u16 val)
++{
++	__raw_writew(val, dws->regs + offset);
++}
++
++static inline u32 dw_read_io_reg(struct dw_spi *dws, u32 offset)
++{
++	switch (dws->reg_io_width) {
++	case 2:
++		return dw_readw(dws, offset);
++	case 4:
++	default:
++		return dw_readl(dws, offset);
++	}
++}
++
++static inline void dw_write_io_reg(struct dw_spi *dws, u32 offset, u32 val)
++{
++	switch (dws->reg_io_width) {
++	case 2:
++		dw_writew(dws, offset, val);
++		break;
++	case 4:
++	default:
++		dw_writel(dws, offset, val);
++		break;
++	}
++}
++
+ static inline void spi_enable_chip(struct dw_spi *dws, int enable)
+ {
+ 	dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
+diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
+index acce90ac7371..bb916c8d40db 100644
+--- a/drivers/spi/spi-img-spfi.c
++++ b/drivers/spi/spi-img-spfi.c
+@@ -105,6 +105,10 @@ struct img_spfi {
+ 	bool rx_dma_busy;
+ };
+ 
++struct img_spfi_device_data {
++	bool gpio_requested;
++};
++
+ static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
+ {
+ 	return readl(spfi->regs + reg);
+@@ -267,15 +271,15 @@ static int img_spfi_start_pio(struct spi_master *master,
+ 		cpu_relax();
+ 	}
+ 
+-	ret = spfi_wait_all_done(spfi);
+-	if (ret < 0)
+-		return ret;
+-
+ 	if (rx_bytes > 0 || tx_bytes > 0) {
+ 		dev_err(spfi->dev, "PIO transfer timed out\n");
+ 		return -ETIMEDOUT;
+ 	}
+ 
++	ret = spfi_wait_all_done(spfi);
++	if (ret < 0)
++		return ret;
++
+ 	return 0;
+ }
+ 
+@@ -440,21 +444,50 @@ static int img_spfi_unprepare(struct spi_master *master,
+ 
+ static int img_spfi_setup(struct spi_device *spi)
+ {
+-	int ret;
+-
+-	ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ?
+-			       GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
+-			       dev_name(&spi->dev));
+-	if (ret)
+-		dev_err(&spi->dev, "can't request chipselect gpio %d\n",
++	int ret = -EINVAL;
++	struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
++
++	if (!spfi_data) {
++		spfi_data = kzalloc(sizeof(*spfi_data), GFP_KERNEL);
++		if (!spfi_data)
++			return -ENOMEM;
++		spfi_data->gpio_requested = false;
++		spi_set_ctldata(spi, spfi_data);
++	}
++	if (!spfi_data->gpio_requested) {
++		ret = gpio_request_one(spi->cs_gpio,
++				       (spi->mode & SPI_CS_HIGH) ?
++				       GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
++				       dev_name(&spi->dev));
++		if (ret)
++			dev_err(&spi->dev, "can't request chipselect gpio %d\n",
+ 				spi->cs_gpio);
+-
++		else
++			spfi_data->gpio_requested = true;
++	} else {
++		if (gpio_is_valid(spi->cs_gpio)) {
++			int mode = ((spi->mode & SPI_CS_HIGH) ?
++				    GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH);
++
++			ret = gpio_direction_output(spi->cs_gpio, mode);
++			if (ret)
++				dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n",
++					spi->cs_gpio, ret);
++		}
++	}
+ 	return ret;
+ }
+ 
+ static void img_spfi_cleanup(struct spi_device *spi)
+ {
+-	gpio_free(spi->cs_gpio);
++	struct img_spfi_device_data *spfi_data = spi_get_ctldata(spi);
++
++	if (spfi_data) {
++		if (spfi_data->gpio_requested)
++			gpio_free(spi->cs_gpio);
++		kfree(spfi_data);
++		spi_set_ctldata(spi, NULL);
++	}
+ }
+ 
+ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
+diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
+index bcc7c635d8e7..7872f3c78b51 100644
+--- a/drivers/spi/spi-sh-msiof.c
++++ b/drivers/spi/spi-sh-msiof.c
+@@ -48,8 +48,8 @@ struct sh_msiof_spi_priv {
+ 	const struct sh_msiof_chipdata *chipdata;
+ 	struct sh_msiof_spi_info *info;
+ 	struct completion done;
+-	int tx_fifo_size;
+-	int rx_fifo_size;
++	unsigned int tx_fifo_size;
++	unsigned int rx_fifo_size;
+ 	void *tx_dma_page;
+ 	void *rx_dma_page;
+ 	dma_addr_t tx_dma_addr;
+@@ -95,8 +95,6 @@ struct sh_msiof_spi_priv {
+ #define MDR2_WDLEN1(i)	(((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+ #define MDR2_GRPMASK1	0x00000001 /* Group Output Mask 1 (SH, A1) */
+ 
+-#define MAX_WDLEN	256U
+-
+ /* TSCR and RSCR */
+ #define SCR_BRPS_MASK	    0x1f00 /* Prescaler Setting (1-32) */
+ #define SCR_BRPS(i)	(((i) - 1) << 8)
+@@ -850,7 +848,12 @@ static int sh_msiof_transfer_one(struct spi_master *master,
+ 		 *  DMA supports 32-bit words only, hence pack 8-bit and 16-bit
+ 		 *  words, with byte resp. word swapping.
+ 		 */
+-		unsigned int l = min(len, MAX_WDLEN * 4);
++		unsigned int l = 0;
++
++		if (tx_buf)
++			l = min(len, p->tx_fifo_size * 4);
++		if (rx_buf)
++			l = min(len, p->rx_fifo_size * 4);
+ 
+ 		if (bits <= 8) {
+ 			if (l & 3)
+@@ -963,7 +966,7 @@ static const struct sh_msiof_chipdata sh_data = {
+ 
+ static const struct sh_msiof_chipdata r8a779x_data = {
+ 	.tx_fifo_size = 64,
+-	.rx_fifo_size = 256,
++	.rx_fifo_size = 64,
+ 	.master_flags = SPI_MASTER_MUST_TX,
+ };
+ 
+diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
+index 133f53a9c1d4..a339c1e9997a 100644
+--- a/drivers/spi/spi-xilinx.c
++++ b/drivers/spi/spi-xilinx.c
+@@ -249,19 +249,23 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+ 	xspi->tx_ptr = t->tx_buf;
+ 	xspi->rx_ptr = t->rx_buf;
+ 	remaining_words = t->len / xspi->bytes_per_word;
+-	reinit_completion(&xspi->done);
+ 
+ 	if (xspi->irq >= 0 &&  remaining_words > xspi->buffer_size) {
++		u32 isr;
+ 		use_irq = true;
+-		xspi->write_fn(XSPI_INTR_TX_EMPTY,
+-				xspi->regs + XIPIF_V123B_IISR_OFFSET);
+-		/* Enable the global IPIF interrupt */
+-		xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+-				xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+ 		/* Inhibit irq to avoid spurious irqs on tx_empty*/
+ 		cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ 		xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ 			       xspi->regs + XSPI_CR_OFFSET);
++		/* ACK old irqs (if any) */
++		isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
++		if (isr)
++			xspi->write_fn(isr,
++				       xspi->regs + XIPIF_V123B_IISR_OFFSET);
++		/* Enable the global IPIF interrupt */
++		xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
++				xspi->regs + XIPIF_V123B_DGIER_OFFSET);
++		reinit_completion(&xspi->done);
+ 	}
+ 
+ 	while (remaining_words) {
+@@ -302,8 +306,10 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+ 		remaining_words -= n_words;
+ 	}
+ 
+-	if (use_irq)
++	if (use_irq) {
+ 		xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
++		xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
++	}
+ 
+ 	return t->len;
+ }
+diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
+index 934af3ff7897..b0fc027cf485 100644
+--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
++++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
+@@ -120,8 +120,20 @@ static int adl_pci7x3x_do_insn_bits(struct comedi_device *dev,
+ {
+ 	unsigned long reg = (unsigned long)s->private;
+ 
+-	if (comedi_dio_update_state(s, data))
+-		outl(s->state, dev->iobase + reg);
++	if (comedi_dio_update_state(s, data)) {
++		unsigned int val = s->state;
++
++		if (s->n_chan == 16) {
++			/*
++			 * It seems the PCI-7230 needs the 16-bit DO state
++			 * to be shifted left by 16 bits before being written
++			 * to the 32-bit register.  Set the value in both
++			 * halves of the register to be sure.
++			 */
++			val |= val << 16;
++		}
++		outl(val, dev->iobase + reg);
++	}
+ 
+ 	data[1] = s->state;
+ 
+diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
+index eaa9add491df..dc0b25a54088 100644
+--- a/drivers/staging/comedi/drivers/usbduxsigma.c
++++ b/drivers/staging/comedi/drivers/usbduxsigma.c
+@@ -550,27 +550,6 @@ static int usbduxsigma_ai_cmdtest(struct comedi_device *dev,
+ 	if (err)
+ 		return 3;
+ 
+-	/* Step 4: fix up any arguments */
+-
+-	if (high_speed) {
+-		/*
+-		 * every 2 channels get a time window of 125us. Thus, if we
+-		 * sample all 16 channels we need 1ms. If we sample only one
+-		 * channel we need only 125us
+-		 */
+-		devpriv->ai_interval = interval;
+-		devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
+-	} else {
+-		/* interval always 1ms */
+-		devpriv->ai_interval = 1;
+-		devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
+-	}
+-	if (devpriv->ai_timer < 1)
+-		err |= -EINVAL;
+-
+-	if (err)
+-		return 4;
+-
+ 	return 0;
+ }
+ 
+@@ -668,6 +647,22 @@ static int usbduxsigma_ai_cmd(struct comedi_device *dev,
+ 
+ 	down(&devpriv->sem);
+ 
++	if (devpriv->high_speed) {
++		/*
++		 * every 2 channels get a time window of 125us. Thus, if we
++		 * sample all 16 channels we need 1ms. If we sample only one
++		 * channel we need only 125us
++		 */
++		unsigned int interval = usbduxsigma_chans_to_interval(len);
++
++		devpriv->ai_interval = interval;
++		devpriv->ai_timer = cmd->scan_begin_arg / (125000 * interval);
++	} else {
++		/* interval always 1ms */
++		devpriv->ai_interval = 1;
++		devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
++	}
++
+ 	for (i = 0; i < len; i++) {
+ 		unsigned int chan  = CR_CHAN(cmd->chanlist[i]);
+ 
+@@ -917,25 +912,6 @@ static int usbduxsigma_ao_cmdtest(struct comedi_device *dev,
+ 	if (err)
+ 		return 3;
+ 
+-	/* Step 4: fix up any arguments */
+-
+-	/* we count in timer steps */
+-	if (high_speed) {
+-		/* timing of the conversion itself: every 125 us */
+-		devpriv->ao_timer = cmd->convert_arg / 125000;
+-	} else {
+-		/*
+-		 * timing of the scan: every 1ms
+-		 * we get all channels at once
+-		 */
+-		devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
+-	}
+-	if (devpriv->ao_timer < 1)
+-		err |= -EINVAL;
+-
+-	if (err)
+-		return 4;
+-
+ 	return 0;
+ }
+ 
+@@ -948,6 +924,20 @@ static int usbduxsigma_ao_cmd(struct comedi_device *dev,
+ 
+ 	down(&devpriv->sem);
+ 
++	if (cmd->convert_src == TRIG_TIMER) {
++		/*
++		 * timing of the conversion itself: every 125 us
++		 * at high speed (not used yet)
++		 */
++		devpriv->ao_timer = cmd->convert_arg / 125000;
++	} else {
++		/*
++		 * timing of the scan: every 1ms
++		 * we get all channels at once
++		 */
++		devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
++	}
++
+ 	devpriv->ao_counter = devpriv->ao_timer;
+ 
+ 	if (cmd->start_src == TRIG_NOW) {
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 46bcebba54b2..9373cca121d3 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -2000,6 +2000,12 @@ pci_wch_ch38x_setup(struct serial_private *priv,
+ 
+ #define PCI_DEVICE_ID_EXAR_XR17V8358	0x8358
+ 
++#define PCI_VENDOR_ID_PERICOM			0x12D8
++#define PCI_DEVICE_ID_PERICOM_PI7C9X7951	0x7951
++#define PCI_DEVICE_ID_PERICOM_PI7C9X7952	0x7952
++#define PCI_DEVICE_ID_PERICOM_PI7C9X7954	0x7954
++#define PCI_DEVICE_ID_PERICOM_PI7C9X7958	0x7958
++
+ /* Unknown vendors/cards - this should not be in linux/pci_ids.h */
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584	0x1584
+ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1588	0x1588
+@@ -2314,27 +2320,12 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ 	 * Pericom
+ 	 */
+ 	{
+-		.vendor		= 0x12d8,
+-		.device		= 0x7952,
+-		.subvendor	= PCI_ANY_ID,
+-		.subdevice	= PCI_ANY_ID,
+-		.setup		= pci_pericom_setup,
+-	},
+-	{
+-		.vendor		= 0x12d8,
+-		.device		= 0x7954,
+-		.subvendor	= PCI_ANY_ID,
+-		.subdevice	= PCI_ANY_ID,
+-		.setup		= pci_pericom_setup,
+-	},
+-	{
+-		.vendor		= 0x12d8,
+-		.device		= 0x7958,
+-		.subvendor	= PCI_ANY_ID,
+-		.subdevice	= PCI_ANY_ID,
+-		.setup		= pci_pericom_setup,
++		.vendor         = PCI_VENDOR_ID_PERICOM,
++		.device         = PCI_ANY_ID,
++		.subvendor      = PCI_ANY_ID,
++		.subdevice      = PCI_ANY_ID,
++		.setup          = pci_pericom_setup,
+ 	},
+-
+ 	/*
+ 	 * PLX
+ 	 */
+@@ -3031,6 +3022,10 @@ enum pci_board_num_t {
+ 	pbn_fintek_8,
+ 	pbn_fintek_12,
+ 	pbn_wch384_4,
++	pbn_pericom_PI7C9X7951,
++	pbn_pericom_PI7C9X7952,
++	pbn_pericom_PI7C9X7954,
++	pbn_pericom_PI7C9X7958,
+ };
+ 
+ /*
+@@ -3848,7 +3843,6 @@ static struct pciserial_board pci_boards[] = {
+ 		.base_baud	= 115200,
+ 		.first_offset	= 0x40,
+ 	},
+-
+ 	[pbn_wch384_4] = {
+ 		.flags		= FL_BASE0,
+ 		.num_ports	= 4,
+@@ -3856,6 +3850,33 @@ static struct pciserial_board pci_boards[] = {
+ 		.uart_offset    = 8,
+ 		.first_offset   = 0xC0,
+ 	},
++	/*
++	 * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
++	 */
++	[pbn_pericom_PI7C9X7951] = {
++		.flags          = FL_BASE0,
++		.num_ports      = 1,
++		.base_baud      = 921600,
++		.uart_offset	= 0x8,
++	},
++	[pbn_pericom_PI7C9X7952] = {
++		.flags          = FL_BASE0,
++		.num_ports      = 2,
++		.base_baud      = 921600,
++		.uart_offset	= 0x8,
++	},
++	[pbn_pericom_PI7C9X7954] = {
++		.flags          = FL_BASE0,
++		.num_ports      = 4,
++		.base_baud      = 921600,
++		.uart_offset	= 0x8,
++	},
++	[pbn_pericom_PI7C9X7958] = {
++		.flags          = FL_BASE0,
++		.num_ports      = 8,
++		.base_baud      = 921600,
++		.uart_offset	= 0x8,
++	},
+ };
+ 
+ static const struct pci_device_id blacklist[] = {
+@@ -5117,6 +5138,25 @@ static struct pci_device_id serial_pci_tbl[] = {
+ 		0,
+ 		0, pbn_exar_XR17V8358 },
+ 	/*
++	 * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
++	 */
++	{   PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7951,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0,
++		0, pbn_pericom_PI7C9X7951 },
++	{   PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7952,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0,
++		0, pbn_pericom_PI7C9X7952 },
++	{   PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7954,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0,
++		0, pbn_pericom_PI7C9X7954 },
++	{   PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7958,
++		PCI_ANY_ID, PCI_ANY_ID,
++		0,
++		0, pbn_pericom_PI7C9X7958 },
++	/*
+ 	 * Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
+ 	 */
+ 	{	PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
+diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
+index 50a09cd76d50..658b392d1170 100644
+--- a/drivers/tty/serial/8250/8250_pnp.c
++++ b/drivers/tty/serial/8250/8250_pnp.c
+@@ -41,6 +41,12 @@ static const struct pnp_device_id pnp_dev_table[] = {
+ 	{	"AEI1240",		0	},
+ 	/* Rockwell 56K ACF II Fax+Data+Voice Modem */
+ 	{	"AKY1021",		0 /*SPCI_FL_NO_SHIRQ*/	},
++	/*
++	 * ALi Fast Infrared Controller
++	 * Native driver (ali-ircc) is broken so at least
++	 * it can be used with irtty-sir.
++	 */
++	{	"ALI5123",		0	},
+ 	/* AZT3005 PnP SOUND DEVICE */
+ 	{	"AZT4001",		0	},
+ 	/* Best Data Products Inc. Smart One 336F PnP Modem */
+@@ -364,6 +370,11 @@ static const struct pnp_device_id pnp_dev_table[] = {
+ 	/* Winbond CIR port, should not be probed. We should keep track
+ 	   of it to prevent the legacy serial driver from probing it */
+ 	{	"WEC1022",		CIR_PORT	},
++	/*
++	 * SMSC IrCC SIR/FIR port, should not be probed by serial driver
++	 * as well so its own driver can bind to it.
++	 */
++	{	"SMCF010",		CIR_PORT	},
+ 	{	"",			0	}
+ };
+ 
+diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c
+index 35c55505b3eb..5a41b8fbb10a 100644
+--- a/drivers/tty/serial/men_z135_uart.c
++++ b/drivers/tty/serial/men_z135_uart.c
+@@ -392,7 +392,6 @@ static irqreturn_t men_z135_intr(int irq, void *data)
+ 	struct men_z135_port *uart = (struct men_z135_port *)data;
+ 	struct uart_port *port = &uart->port;
+ 	bool handled = false;
+-	unsigned long flags;
+ 	int irq_id;
+ 
+ 	uart->stat_reg = ioread32(port->membase + MEN_Z135_STAT_REG);
+@@ -401,7 +400,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
+ 	if (!irq_id)
+ 		goto out;
+ 
+-	spin_lock_irqsave(&port->lock, flags);
++	spin_lock(&port->lock);
+ 	/* It's save to write to IIR[7:6] RXC[9:8] */
+ 	iowrite8(irq_id, port->membase + MEN_Z135_STAT_REG);
+ 
+@@ -427,7 +426,7 @@ static irqreturn_t men_z135_intr(int irq, void *data)
+ 		handled = true;
+ 	}
+ 
+-	spin_unlock_irqrestore(&port->lock, flags);
++	spin_unlock(&port->lock);
+ out:
+ 	return IRQ_RETVAL(handled);
+ }
+@@ -717,7 +716,7 @@ static void men_z135_set_termios(struct uart_port *port,
+ 
+ 	baud = uart_get_baud_rate(port, termios, old, 0, uart_freq / 16);
+ 
+-	spin_lock(&port->lock);
++	spin_lock_irq(&port->lock);
+ 	if (tty_termios_baud_rate(termios))
+ 		tty_termios_encode_baud_rate(termios, baud, baud);
+ 
+@@ -725,7 +724,7 @@ static void men_z135_set_termios(struct uart_port *port,
+ 	iowrite32(bd_reg, port->membase + MEN_Z135_BAUD_REG);
+ 
+ 	uart_update_timeout(port, termios->c_cflag, baud);
+-	spin_unlock(&port->lock);
++	spin_unlock_irq(&port->lock);
+ }
+ 
+ static const char *men_z135_type(struct uart_port *port)
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index a0ae942d9562..1e0d9b8c48c9 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -295,15 +295,6 @@ static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
+ 	if (ourport->tx_mode != S3C24XX_TX_DMA)
+ 		enable_tx_dma(ourport);
+ 
+-	while (xmit->tail & (dma_get_cache_alignment() - 1)) {
+-		if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
+-			return 0;
+-		wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
+-		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+-		port->icount.tx++;
+-		count--;
+-	}
+-
+ 	dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
+ 	dma->tx_transfer_addr = dma->tx_addr + xmit->tail;
+ 
+@@ -342,7 +333,9 @@ static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport)
+ 		return;
+ 	}
+ 
+-	if (!ourport->dma || !ourport->dma->tx_chan || count < port->fifosize)
++	if (!ourport->dma || !ourport->dma->tx_chan ||
++	    count < ourport->min_dma_size ||
++	    xmit->tail & (dma_get_cache_alignment() - 1))
+ 		s3c24xx_serial_start_tx_pio(ourport);
+ 	else
+ 		s3c24xx_serial_start_tx_dma(ourport, count);
+@@ -736,15 +729,20 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
+ 	struct uart_port *port = &ourport->port;
+ 	struct circ_buf *xmit = &port->state->xmit;
+ 	unsigned long flags;
+-	int count;
++	int count, dma_count = 0;
+ 
+ 	spin_lock_irqsave(&port->lock, flags);
+ 
+ 	count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ 
+-	if (ourport->dma && ourport->dma->tx_chan && count >= port->fifosize) {
+-		s3c24xx_serial_start_tx_dma(ourport, count);
+-		goto out;
++	if (ourport->dma && ourport->dma->tx_chan &&
++	    count >= ourport->min_dma_size) {
++		int align = dma_get_cache_alignment() -
++			(xmit->tail & (dma_get_cache_alignment() - 1));
++		if (count-align >= ourport->min_dma_size) {
++			dma_count = count-align;
++			count = align;
++		}
+ 	}
+ 
+ 	if (port->x_char) {
+@@ -765,14 +763,24 @@ static irqreturn_t s3c24xx_serial_tx_chars(int irq, void *id)
+ 
+ 	/* try and drain the buffer... */
+ 
+-	count = port->fifosize;
+-	while (!uart_circ_empty(xmit) && count-- > 0) {
++	if (count > port->fifosize) {
++		count = port->fifosize;
++		dma_count = 0;
++	}
++
++	while (!uart_circ_empty(xmit) && count > 0) {
+ 		if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)
+ 			break;
+ 
+ 		wr_regb(port, S3C2410_UTXH, xmit->buf[xmit->tail]);
+ 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ 		port->icount.tx++;
++		count--;
++	}
++
++	if (!count && dma_count) {
++		s3c24xx_serial_start_tx_dma(ourport, dma_count);
++		goto out;
+ 	}
+ 
+ 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) {
+@@ -1838,6 +1846,13 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
+ 	else if (ourport->info->fifosize)
+ 		ourport->port.fifosize = ourport->info->fifosize;
+ 
++	/*
++	 * DMA transfers must be aligned at least to cache line size,
++	 * so find minimal transfer size suitable for DMA mode
++	 */
++	ourport->min_dma_size = max_t(int, ourport->port.fifosize,
++				    dma_get_cache_alignment());
++
+ 	probe_index++;
+ 
+ 	dbg("%s: initialising port %p...\n", __func__, ourport);
+diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
+index d275032aa68d..fc5deaa4f382 100644
+--- a/drivers/tty/serial/samsung.h
++++ b/drivers/tty/serial/samsung.h
+@@ -82,6 +82,7 @@ struct s3c24xx_uart_port {
+ 	unsigned char			tx_claimed;
+ 	unsigned int			pm_level;
+ 	unsigned long			baudclk_rate;
++	unsigned int			min_dma_size;
+ 
+ 	unsigned int			rx_irq;
+ 	unsigned int			tx_irq;
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 69e769c35cf5..06ecd1e6871c 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -820,6 +820,11 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
+ 		unsigned maxp = ep0->endpoint.maxpacket;
+ 
+ 		transfer_size += (maxp - (transfer_size % maxp));
++
++		/* Maximum of DWC3_EP0_BOUNCE_SIZE can only be received */
++		if (transfer_size > DWC3_EP0_BOUNCE_SIZE)
++			transfer_size = DWC3_EP0_BOUNCE_SIZE;
++
+ 		transferred = min_t(u32, ur->length,
+ 				transfer_size - length);
+ 		memcpy(ur->buf, dwc->ep0_bounce, transferred);
+@@ -941,11 +946,14 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
+ 			return;
+ 		}
+ 
+-		WARN_ON(req->request.length > DWC3_EP0_BOUNCE_SIZE);
+-
+ 		maxpacket = dep->endpoint.maxpacket;
+ 		transfer_size = roundup(req->request.length, maxpacket);
+ 
++		if (transfer_size > DWC3_EP0_BOUNCE_SIZE) {
++			dev_WARN(dwc->dev, "bounce buf can't handle req len\n");
++			transfer_size = DWC3_EP0_BOUNCE_SIZE;
++		}
++
+ 		dwc->ep0_bounced = true;
+ 
+ 		/*
+diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
+index 531861547253..96d935b00504 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -975,6 +975,29 @@ free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep)
+ 			"%s:%d Error!\n", __func__, __LINE__);
+ }
+ 
++static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
++	struct usb_endpoint_descriptor *ep_desc,
++	unsigned int factor, bool is_playback)
++{
++	int chmask, srate, ssize;
++	u16 max_packet_size;
++
++	if (is_playback) {
++		chmask = uac2_opts->p_chmask;
++		srate = uac2_opts->p_srate;
++		ssize = uac2_opts->p_ssize;
++	} else {
++		chmask = uac2_opts->c_chmask;
++		srate = uac2_opts->c_srate;
++		ssize = uac2_opts->c_ssize;
++	}
++
++	max_packet_size = num_channels(chmask) * ssize *
++		DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
++	ep_desc->wMaxPacketSize = cpu_to_le16(min(max_packet_size,
++				le16_to_cpu(ep_desc->wMaxPacketSize)));
++}
++
+ static int
+ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
+ {
+@@ -1070,10 +1093,14 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
+ 	uac2->p_prm.uac2 = uac2;
+ 	uac2->c_prm.uac2 = uac2;
+ 
++	/* Calculate wMaxPacketSize according to audio bandwidth */
++	set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
++	set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
++	set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
++	set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
++
+ 	hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
+-	hs_epout_desc.wMaxPacketSize = fs_epout_desc.wMaxPacketSize;
+ 	hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
+-	hs_epin_desc.wMaxPacketSize = fs_epin_desc.wMaxPacketSize;
+ 
+ 	ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL);
+ 	if (ret)
+diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
+index 309706fe4bf0..9704053dfe05 100644
+--- a/drivers/usb/gadget/udc/m66592-udc.c
++++ b/drivers/usb/gadget/udc/m66592-udc.c
+@@ -1052,7 +1052,7 @@ static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
+ 				tmp = m66592_read(m66592, M66592_INTSTS0) &
+ 								M66592_CTSQ;
+ 				udelay(1);
+-			} while (tmp != M66592_CS_IDST || timeout-- > 0);
++			} while (tmp != M66592_CS_IDST && timeout-- > 0);
+ 
+ 			if (tmp == M66592_CS_IDST)
+ 				m66592_bset(m66592,
+diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
+index 5e44407aa099..5216f2b09d63 100644
+--- a/drivers/usb/host/ehci-sysfs.c
++++ b/drivers/usb/host/ehci-sysfs.c
+@@ -29,7 +29,7 @@ static ssize_t show_companion(struct device *dev,
+ 	int			count = PAGE_SIZE;
+ 	char			*ptr = buf;
+ 
+-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+ 	nports = HCS_N_PORTS(ehci->hcs_params);
+ 
+ 	for (index = 0; index < nports; ++index) {
+@@ -54,7 +54,7 @@ static ssize_t store_companion(struct device *dev,
+ 	struct ehci_hcd		*ehci;
+ 	int			portnum, new_owner;
+ 
+-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+ 	new_owner = PORT_OWNER;		/* Owned by companion */
+ 	if (sscanf(buf, "%d", &portnum) != 1)
+ 		return -EINVAL;
+@@ -85,7 +85,7 @@ static ssize_t show_uframe_periodic_max(struct device *dev,
+ 	struct ehci_hcd		*ehci;
+ 	int			n;
+ 
+-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+ 	n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
+ 	return n;
+ }
+@@ -101,7 +101,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
+ 	unsigned long		flags;
+ 	ssize_t			ret;
+ 
+-	ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
++	ehci = hcd_to_ehci(dev_get_drvdata(dev));
+ 	if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+ 		return -EINVAL;
+ 
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 4c8b3b82103d..a5a0376bbd48 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -605,6 +605,10 @@ static const struct usb_device_id id_table_combined[] = {
+ 	{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
+ 		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ 	{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) },
++	{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) },
+ 	/*
+ 	 * ELV devices:
+ 	 */
+diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
+index 792e054126de..2943b97b2a83 100644
+--- a/drivers/usb/serial/ftdi_sio_ids.h
++++ b/drivers/usb/serial/ftdi_sio_ids.h
+@@ -568,6 +568,14 @@
+  */
+ #define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */
+ 
++/*
++ * CustomWare / ShipModul NMEA multiplexers product ids (FTDI_VID)
++ */
++#define FTDI_CUSTOMWARE_MINIPLEX_PID	0xfd48	/* MiniPlex first generation NMEA Multiplexer */
++#define FTDI_CUSTOMWARE_MINIPLEX2_PID	0xfd49	/* MiniPlex-USB and MiniPlex-2 series */
++#define FTDI_CUSTOMWARE_MINIPLEX2WI_PID	0xfd4a	/* MiniPlex-2Wi */
++#define FTDI_CUSTOMWARE_MINIPLEX3_PID	0xfd4b	/* MiniPlex-3 series */
++
+ 
+ /********************************/
+ /** third-party VID/PID combos **/
+diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
+index f5257af33ecf..ae682e4eeaef 100644
+--- a/drivers/usb/serial/pl2303.c
++++ b/drivers/usb/serial/pl2303.c
+@@ -362,21 +362,38 @@ static speed_t pl2303_encode_baud_rate_direct(unsigned char buf[4],
+ static speed_t pl2303_encode_baud_rate_divisor(unsigned char buf[4],
+ 								speed_t baud)
+ {
+-	unsigned int tmp;
++	unsigned int baseline, mantissa, exponent;
+ 
+ 	/*
+ 	 * Apparently the formula is:
+-	 * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
++	 *   baudrate = 12M * 32 / (mantissa * 4^exponent)
++	 * where
++	 *   mantissa = buf[8:0]
++	 *   exponent = buf[11:9]
+ 	 */
+-	tmp = 12000000 * 32 / baud;
++	baseline = 12000000 * 32;
++	mantissa = baseline / baud;
++	if (mantissa == 0)
++		mantissa = 1;	/* Avoid dividing by zero if baud > 32*12M. */
++	exponent = 0;
++	while (mantissa >= 512) {
++		if (exponent < 7) {
++			mantissa >>= 2;	/* divide by 4 */
++			exponent++;
++		} else {
++			/* Exponent is maxed. Trim mantissa and leave. */
++			mantissa = 511;
++			break;
++		}
++	}
++
+ 	buf[3] = 0x80;
+ 	buf[2] = 0;
+-	buf[1] = (tmp >= 256);
+-	while (tmp >= 256) {
+-		tmp >>= 2;
+-		buf[1] <<= 1;
+-	}
+-	buf[0] = tmp;
++	buf[1] = exponent << 1 | mantissa >> 8;
++	buf[0] = mantissa & 0xff;
++
++	/* Calculate and return the exact baud rate. */
++	baud = (baseline / mantissa) >> (exponent << 1);
+ 
+ 	return baud;
+ }
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index d156545728c2..ebcec8cda858 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -139,6 +139,7 @@ static const struct usb_device_id id_table[] = {
+ 	{USB_DEVICE(0x0AF0, 0x8120)},	/* Option GTM681W */
+ 
+ 	/* non-Gobi Sierra Wireless devices */
++	{DEVICE_SWI(0x03f0, 0x4e1d)},	/* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+ 	{DEVICE_SWI(0x0f3d, 0x68a2)},	/* Sierra Wireless MC7700 */
+ 	{DEVICE_SWI(0x114f, 0x68a2)},	/* Sierra Wireless MC7750 */
+ 	{DEVICE_SWI(0x1199, 0x68a2)},	/* Sierra Wireless MC7710 */
+diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
+index 8fceec7298e0..6ed804450a5a 100644
+--- a/drivers/usb/serial/symbolserial.c
++++ b/drivers/usb/serial/symbolserial.c
+@@ -94,7 +94,7 @@ exit:
+ 
+ static int symbol_open(struct tty_struct *tty, struct usb_serial_port *port)
+ {
+-	struct symbol_private *priv = usb_get_serial_data(port->serial);
++	struct symbol_private *priv = usb_get_serial_port_data(port);
+ 	unsigned long flags;
+ 	int result = 0;
+ 
+@@ -120,7 +120,7 @@ static void symbol_close(struct usb_serial_port *port)
+ static void symbol_throttle(struct tty_struct *tty)
+ {
+ 	struct usb_serial_port *port = tty->driver_data;
+-	struct symbol_private *priv = usb_get_serial_data(port->serial);
++	struct symbol_private *priv = usb_get_serial_port_data(port);
+ 
+ 	spin_lock_irq(&priv->lock);
+ 	priv->throttled = true;
+@@ -130,7 +130,7 @@ static void symbol_throttle(struct tty_struct *tty)
+ static void symbol_unthrottle(struct tty_struct *tty)
+ {
+ 	struct usb_serial_port *port = tty->driver_data;
+-	struct symbol_private *priv = usb_get_serial_data(port->serial);
++	struct symbol_private *priv = usb_get_serial_port_data(port);
+ 	int result;
+ 	bool was_throttled;
+ 
+diff --git a/fs/ceph/super.c b/fs/ceph/super.c
+index 4e9905374078..0d47422e3548 100644
+--- a/fs/ceph/super.c
++++ b/fs/ceph/super.c
+@@ -466,7 +466,7 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
+ 	if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
+ 		seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
+ 	if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
+-		seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
++		seq_show_option(m, "snapdirname", fsopt->snapdir_name);
+ 
+ 	return 0;
+ }
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 0a9fb6b53126..6a1119e87fbb 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -394,17 +394,17 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
+ 	struct sockaddr *srcaddr;
+ 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
+ 
+-	seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string);
++	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
+ 	cifs_show_security(s, tcon->ses);
+ 	cifs_show_cache_flavor(s, cifs_sb);
+ 
+ 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
+ 		seq_puts(s, ",multiuser");
+ 	else if (tcon->ses->user_name)
+-		seq_printf(s, ",username=%s", tcon->ses->user_name);
++		seq_show_option(s, "username", tcon->ses->user_name);
+ 
+ 	if (tcon->ses->domainName)
+-		seq_printf(s, ",domain=%s", tcon->ses->domainName);
++		seq_show_option(s, "domain", tcon->ses->domainName);
+ 
+ 	if (srcaddr->sa_family != AF_UNSPEC) {
+ 		struct sockaddr_in *saddr4;
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ca12affdba96..6b4eb94b04a5 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1738,10 +1738,10 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
+ 	}
+ 
+ 	if (sbi->s_qf_names[USRQUOTA])
+-		seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
++		seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);
+ 
+ 	if (sbi->s_qf_names[GRPQUOTA])
+-		seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
++		seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);
+ #endif
+ }
+ 
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 859c6edbf81a..c18b49dc5d4f 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -1334,11 +1334,11 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ 	if (is_ancestor(root, sdp->sd_master_dir))
+ 		seq_puts(s, ",meta");
+ 	if (args->ar_lockproto[0])
+-		seq_printf(s, ",lockproto=%s", args->ar_lockproto);
++		seq_show_option(s, "lockproto", args->ar_lockproto);
+ 	if (args->ar_locktable[0])
+-		seq_printf(s, ",locktable=%s", args->ar_locktable);
++		seq_show_option(s, "locktable", args->ar_locktable);
+ 	if (args->ar_hostdata[0])
+-		seq_printf(s, ",hostdata=%s", args->ar_hostdata);
++		seq_show_option(s, "hostdata", args->ar_hostdata);
+ 	if (args->ar_spectator)
+ 		seq_puts(s, ",spectator");
+ 	if (args->ar_localflocks)
+diff --git a/fs/hfs/super.c b/fs/hfs/super.c
+index eee7206c38d1..410b65eea683 100644
+--- a/fs/hfs/super.c
++++ b/fs/hfs/super.c
+@@ -135,9 +135,9 @@ static int hfs_show_options(struct seq_file *seq, struct dentry *root)
+ 	struct hfs_sb_info *sbi = HFS_SB(root->d_sb);
+ 
+ 	if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f))
+-		seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator);
++		seq_show_option_n(seq, "creator", (char *)&sbi->s_creator, 4);
+ 	if (sbi->s_type != cpu_to_be32(0x3f3f3f3f))
+-		seq_printf(seq, ",type=%.4s", (char *)&sbi->s_type);
++		seq_show_option_n(seq, "type", (char *)&sbi->s_type, 4);
+ 	seq_printf(seq, ",uid=%u,gid=%u",
+ 			from_kuid_munged(&init_user_ns, sbi->s_uid),
+ 			from_kgid_munged(&init_user_ns, sbi->s_gid));
+diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
+index c90b72ee676d..bb806e58c977 100644
+--- a/fs/hfsplus/options.c
++++ b/fs/hfsplus/options.c
+@@ -218,9 +218,9 @@ int hfsplus_show_options(struct seq_file *seq, struct dentry *root)
+ 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(root->d_sb);
+ 
+ 	if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
+-		seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
++		seq_show_option_n(seq, "creator", (char *)&sbi->creator, 4);
+ 	if (sbi->type != HFSPLUS_DEF_CR_TYPE)
+-		seq_printf(seq, ",type=%.4s", (char *)&sbi->type);
++		seq_show_option_n(seq, "type", (char *)&sbi->type, 4);
+ 	seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask,
+ 			from_kuid_munged(&init_user_ns, sbi->uid),
+ 			from_kgid_munged(&init_user_ns, sbi->gid));
+diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
+index 07d8d8f52faf..de2d6245e9fa 100644
+--- a/fs/hostfs/hostfs_kern.c
++++ b/fs/hostfs/hostfs_kern.c
+@@ -260,7 +260,7 @@ static int hostfs_show_options(struct seq_file *seq, struct dentry *root)
+ 	size_t offset = strlen(root_ino) + 1;
+ 
+ 	if (strlen(root_path) > offset)
+-		seq_printf(seq, ",%s", root_path + offset);
++		seq_show_option(seq, root_path + offset, NULL);
+ 
+ 	if (append)
+ 		seq_puts(seq, ",append");
+diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
+index a0872f239f04..9e92c9c2d319 100644
+--- a/fs/hpfs/namei.c
++++ b/fs/hpfs/namei.c
+@@ -8,6 +8,17 @@
+ #include <linux/sched.h>
+ #include "hpfs_fn.h"
+ 
++static void hpfs_update_directory_times(struct inode *dir)
++{
++	time_t t = get_seconds();
++	if (t == dir->i_mtime.tv_sec &&
++	    t == dir->i_ctime.tv_sec)
++		return;
++	dir->i_mtime.tv_sec = dir->i_ctime.tv_sec = t;
++	dir->i_mtime.tv_nsec = dir->i_ctime.tv_nsec = 0;
++	hpfs_write_inode_nolock(dir);
++}
++
+ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ {
+ 	const unsigned char *name = dentry->d_name.name;
+@@ -99,6 +110,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+ 		result->i_mode = mode | S_IFDIR;
+ 		hpfs_write_inode_nolock(result);
+ 	}
++	hpfs_update_directory_times(dir);
+ 	d_instantiate(dentry, result);
+ 	hpfs_unlock(dir->i_sb);
+ 	return 0;
+@@ -187,6 +199,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, b
+ 		result->i_mode = mode | S_IFREG;
+ 		hpfs_write_inode_nolock(result);
+ 	}
++	hpfs_update_directory_times(dir);
+ 	d_instantiate(dentry, result);
+ 	hpfs_unlock(dir->i_sb);
+ 	return 0;
+@@ -262,6 +275,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, de
+ 	insert_inode_hash(result);
+ 
+ 	hpfs_write_inode_nolock(result);
++	hpfs_update_directory_times(dir);
+ 	d_instantiate(dentry, result);
+ 	brelse(bh);
+ 	hpfs_unlock(dir->i_sb);
+@@ -340,6 +354,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
+ 	insert_inode_hash(result);
+ 
+ 	hpfs_write_inode_nolock(result);
++	hpfs_update_directory_times(dir);
+ 	d_instantiate(dentry, result);
+ 	hpfs_unlock(dir->i_sb);
+ 	return 0;
+@@ -423,6 +438,8 @@ again:
+ out1:
+ 	hpfs_brelse4(&qbh);
+ out:
++	if (!err)
++		hpfs_update_directory_times(dir);
+ 	hpfs_unlock(dir->i_sb);
+ 	return err;
+ }
+@@ -477,6 +494,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
+ out1:
+ 	hpfs_brelse4(&qbh);
+ out:
++	if (!err)
++		hpfs_update_directory_times(dir);
+ 	hpfs_unlock(dir->i_sb);
+ 	return err;
+ }
+@@ -595,7 +614,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 		goto end1;
+ 	}
+ 
+-	end:
++end:
+ 	hpfs_i(i)->i_parent_dir = new_dir->i_ino;
+ 	if (S_ISDIR(i->i_mode)) {
+ 		inc_nlink(new_dir);
+@@ -610,6 +629,10 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ 		brelse(bh);
+ 	}
+ end1:
++	if (!err) {
++		hpfs_update_directory_times(old_dir);
++		hpfs_update_directory_times(new_dir);
++	}
+ 	hpfs_unlock(i->i_sb);
+ 	return err;
+ }
+diff --git a/fs/libfs.c b/fs/libfs.c
+index 02813592e121..f4641fd27bda 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -1176,7 +1176,7 @@ void make_empty_dir_inode(struct inode *inode)
+ 	inode->i_uid = GLOBAL_ROOT_UID;
+ 	inode->i_gid = GLOBAL_ROOT_GID;
+ 	inode->i_rdev = 0;
+-	inode->i_size = 2;
++	inode->i_size = 0;
+ 	inode->i_blkbits = PAGE_SHIFT;
+ 	inode->i_blocks = 0;
+ 
+diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
+index 403c5660b306..a482e312c7b2 100644
+--- a/fs/ocfs2/super.c
++++ b/fs/ocfs2/super.c
+@@ -1550,8 +1550,8 @@ static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
+ 		seq_printf(s, ",localflocks,");
+ 
+ 	if (osb->osb_cluster_stack[0])
+-		seq_printf(s, ",cluster_stack=%.*s", OCFS2_STACK_LABEL_LEN,
+-			   osb->osb_cluster_stack);
++		seq_show_option_n(s, "cluster_stack", osb->osb_cluster_stack,
++				  OCFS2_STACK_LABEL_LEN);
+ 	if (opts & OCFS2_MOUNT_USRQUOTA)
+ 		seq_printf(s, ",usrquota");
+ 	if (opts & OCFS2_MOUNT_GRPQUOTA)
+diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
+index bf8537c7f455..155989455a72 100644
+--- a/fs/overlayfs/super.c
++++ b/fs/overlayfs/super.c
+@@ -517,10 +517,10 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
+ 	struct super_block *sb = dentry->d_sb;
+ 	struct ovl_fs *ufs = sb->s_fs_info;
+ 
+-	seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
++	seq_show_option(m, "lowerdir", ufs->config.lowerdir);
+ 	if (ufs->config.upperdir) {
+-		seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
+-		seq_printf(m, ",workdir=%s", ufs->config.workdir);
++		seq_show_option(m, "upperdir", ufs->config.upperdir);
++		seq_show_option(m, "workdir", ufs->config.workdir);
+ 	}
+ 	return 0;
+ }
+diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
+index 0111ad0466ed..cf6fa25f884b 100644
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -714,18 +714,20 @@ static int reiserfs_show_options(struct seq_file *seq, struct dentry *root)
+ 		seq_puts(seq, ",acl");
+ 
+ 	if (REISERFS_SB(s)->s_jdev)
+-		seq_printf(seq, ",jdev=%s", REISERFS_SB(s)->s_jdev);
++		seq_show_option(seq, "jdev", REISERFS_SB(s)->s_jdev);
+ 
+ 	if (journal->j_max_commit_age != journal->j_default_max_commit_age)
+ 		seq_printf(seq, ",commit=%d", journal->j_max_commit_age);
+ 
+ #ifdef CONFIG_QUOTA
+ 	if (REISERFS_SB(s)->s_qf_names[USRQUOTA])
+-		seq_printf(seq, ",usrjquota=%s", REISERFS_SB(s)->s_qf_names[USRQUOTA]);
++		seq_show_option(seq, "usrjquota",
++				REISERFS_SB(s)->s_qf_names[USRQUOTA]);
+ 	else if (opts & (1 << REISERFS_USRQUOTA))
+ 		seq_puts(seq, ",usrquota");
+ 	if (REISERFS_SB(s)->s_qf_names[GRPQUOTA])
+-		seq_printf(seq, ",grpjquota=%s", REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
++		seq_show_option(seq, "grpjquota",
++				REISERFS_SB(s)->s_qf_names[GRPQUOTA]);
+ 	else if (opts & (1 << REISERFS_GRPQUOTA))
+ 		seq_puts(seq, ",grpquota");
+ 	if (REISERFS_SB(s)->s_jquota_fmt) {
+diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
+index 74bcbabfa523..b14bbd6bb05f 100644
+--- a/fs/xfs/libxfs/xfs_da_format.h
++++ b/fs/xfs/libxfs/xfs_da_format.h
+@@ -680,8 +680,15 @@ typedef struct xfs_attr_leaf_name_remote {
+ typedef struct xfs_attr_leafblock {
+ 	xfs_attr_leaf_hdr_t	hdr;	/* constant-structure header block */
+ 	xfs_attr_leaf_entry_t	entries[1];	/* sorted on key, not name */
+-	xfs_attr_leaf_name_local_t namelist;	/* grows from bottom of buf */
+-	xfs_attr_leaf_name_remote_t valuelist;	/* grows from bottom of buf */
++	/*
++	 * The rest of the block contains the following structures after the
++	 * leaf entries, growing from the bottom up. The variables are never
++	 * referenced and definining them can actually make gcc optimize away
++	 * accesses to the 'entries' array above index 0 so don't do that.
++	 *
++	 * xfs_attr_leaf_name_local_t namelist;
++	 * xfs_attr_leaf_name_remote_t valuelist;
++	 */
+ } xfs_attr_leafblock_t;
+ 
+ /*
+diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
+index de1ea16f5748..534bbf283d6b 100644
+--- a/fs/xfs/libxfs/xfs_dir2_data.c
++++ b/fs/xfs/libxfs/xfs_dir2_data.c
+@@ -252,7 +252,8 @@ xfs_dir3_data_reada_verify(
+ 		return;
+ 	case cpu_to_be32(XFS_DIR2_DATA_MAGIC):
+ 	case cpu_to_be32(XFS_DIR3_DATA_MAGIC):
+-		xfs_dir3_data_verify(bp);
++		bp->b_ops = &xfs_dir3_data_buf_ops;
++		bp->b_ops->verify_read(bp);
+ 		return;
+ 	default:
+ 		xfs_buf_ioerror(bp, -EFSCORRUPTED);
+diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
+index 41b80d3d3877..06bb4218b362 100644
+--- a/fs/xfs/libxfs/xfs_dir2_node.c
++++ b/fs/xfs/libxfs/xfs_dir2_node.c
+@@ -2132,6 +2132,7 @@ xfs_dir2_node_replace(
+ 	int			error;		/* error return value */
+ 	int			i;		/* btree level */
+ 	xfs_ino_t		inum;		/* new inode number */
++	int			ftype;		/* new file type */
+ 	xfs_dir2_leaf_t		*leaf;		/* leaf structure */
+ 	xfs_dir2_leaf_entry_t	*lep;		/* leaf entry being changed */
+ 	int			rval;		/* internal return value */
+@@ -2145,7 +2146,14 @@ xfs_dir2_node_replace(
+ 	state = xfs_da_state_alloc();
+ 	state->args = args;
+ 	state->mp = args->dp->i_mount;
++
++	/*
++	 * We have to save new inode number and ftype since
++	 * xfs_da3_node_lookup_int() is going to overwrite them
++	 */
+ 	inum = args->inumber;
++	ftype = args->filetype;
++
+ 	/*
+ 	 * Lookup the entry to change in the btree.
+ 	 */
+@@ -2183,7 +2191,7 @@ xfs_dir2_node_replace(
+ 		 * Fill in the new inode number and log the entry.
+ 		 */
+ 		dep->inumber = cpu_to_be64(inum);
+-		args->dp->d_ops->data_put_ftype(dep, args->filetype);
++		args->dp->d_ops->data_put_ftype(dep, ftype);
+ 		xfs_dir2_data_log_entry(args, state->extrablk.bp, dep);
+ 		rval = 0;
+ 	}
+diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
+index 858e1e62bbaa..65a45372fb1f 100644
+--- a/fs/xfs/xfs_super.c
++++ b/fs/xfs/xfs_super.c
+@@ -504,9 +504,9 @@ xfs_showargs(
+ 		seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
+ 
+ 	if (mp->m_logname)
+-		seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
++		seq_show_option(m, MNTOPT_LOGDEV, mp->m_logname);
+ 	if (mp->m_rtname)
+-		seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
++		seq_show_option(m, MNTOPT_RTDEV, mp->m_rtname);
+ 
+ 	if (mp->m_dalign > 0)
+ 		seq_printf(m, "," MNTOPT_SUNIT "=%d",
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 4550be3bb63b..808c43afa8ac 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -198,7 +198,7 @@ struct pci_dev;
+ 
+ int acpi_pci_irq_enable (struct pci_dev *dev);
+ void acpi_penalize_isa_irq(int irq, int active);
+-
++void acpi_penalize_sci_irq(int irq, int trigger, int polarity);
+ void acpi_pci_irq_disable (struct pci_dev *dev);
+ 
+ extern int ec_read(u8 addr, u8 *val);
+diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
+index d86b753e9b30..5ed7771ad386 100644
+--- a/include/linux/iio/iio.h
++++ b/include/linux/iio/iio.h
+@@ -642,6 +642,15 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
+ #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
+ 
+ /**
++ * IIO_RAD_TO_DEGREE() - Convert rad to degree
++ * @rad: A value in rad
++ *
++ * Returns the given value converted from rad to degree
++ */
++#define IIO_RAD_TO_DEGREE(rad) \
++	(((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
++
++/**
+  * IIO_G_TO_M_S_2() - Convert g to meter / second**2
+  * @g: A value in g
+  *
+@@ -649,4 +658,12 @@ int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
+  */
+ #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
+ 
++/**
++ * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
++ * @ms2: A value in meter / second**2
++ *
++ * Returns the given value converted from meter / second**2 to g
++ */
++#define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
++
+ #endif /* _INDUSTRIAL_IO_H_ */
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 3ef3a52068df..6e935e5eab56 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -180,6 +180,8 @@ enum pci_dev_flags {
+ 	PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
+ 	/* Do not use PM reset even if device advertises NoSoftRst- */
+ 	PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
++	/* Get VPD from function 0 VPD */
++	PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
+ };
+ 
+ enum pci_irq_reroute_variant {
+diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
+index afbb1fd77c77..7848473a5bc8 100644
+--- a/include/linux/seq_file.h
++++ b/include/linux/seq_file.h
+@@ -148,6 +148,41 @@ static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
+ #endif
+ }
+ 
++/**
++ * seq_show_options - display mount options with appropriate escapes.
++ * @m: the seq_file handle
++ * @name: the mount option name
++ * @value: the mount option name's value, can be NULL
++ */
++static inline void seq_show_option(struct seq_file *m, const char *name,
++				   const char *value)
++{
++	seq_putc(m, ',');
++	seq_escape(m, name, ",= \t\n\\");
++	if (value) {
++		seq_putc(m, '=');
++		seq_escape(m, value, ", \t\n\\");
++	}
++}
++
++/**
++ * seq_show_option_n - display mount options with appropriate escapes
++ *		       where @value must be a specific length.
++ * @m: the seq_file handle
++ * @name: the mount option name
++ * @value: the mount option name's value, cannot be NULL
++ * @length: the length of @value to display
++ *
++ * This is a macro since this uses "length" to define the size of the
++ * stack buffer.
++ */
++#define seq_show_option_n(m, name, value, length) {	\
++	char val_buf[length + 1];			\
++	strncpy(val_buf, value, length);		\
++	val_buf[length] = '\0';				\
++	seq_show_option(m, name, val_buf);		\
++}
++
+ #define SEQ_START_TOKEN ((void *)1)
+ /*
+  * Helpers for iteration over list_head-s in seq_files
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index e8a5491be756..4d65b66ae60d 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -1319,7 +1319,7 @@ static int cgroup_show_options(struct seq_file *seq,
+ 
+ 	for_each_subsys(ss, ssid)
+ 		if (root->subsys_mask & (1 << ssid))
+-			seq_printf(seq, ",%s", ss->name);
++			seq_show_option(seq, ss->name, NULL);
+ 	if (root->flags & CGRP_ROOT_NOPREFIX)
+ 		seq_puts(seq, ",noprefix");
+ 	if (root->flags & CGRP_ROOT_XATTR)
+@@ -1327,13 +1327,14 @@ static int cgroup_show_options(struct seq_file *seq,
+ 
+ 	spin_lock(&release_agent_path_lock);
+ 	if (strlen(root->release_agent_path))
+-		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
++		seq_show_option(seq, "release_agent",
++				root->release_agent_path);
+ 	spin_unlock(&release_agent_path_lock);
+ 
+ 	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
+ 		seq_puts(seq, ",clone_children");
+ 	if (strlen(root->name))
+-		seq_printf(seq, ",name=%s", root->name);
++		seq_show_option(seq, "name", root->name);
+ 	return 0;
+ }
+ 
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 123673291ffb..e6910526c84b 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5328,6 +5328,14 @@ static int sched_cpu_active(struct notifier_block *nfb,
+ 	case CPU_STARTING:
+ 		set_cpu_rq_start_time();
+ 		return NOTIFY_OK;
++	case CPU_ONLINE:
++		/*
++		 * At this point a starting CPU has marked itself as online via
++		 * set_cpu_online(). But it might not yet have marked itself
++		 * as active, which is essential from here on.
++		 *
++		 * Thus, fall-through and help the starting CPU along.
++		 */
+ 	case CPU_DOWN_FAILED:
+ 		set_cpu_active((long)hcpu, true);
+ 		return NOTIFY_OK;
+diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
+index 79e8f71aef5b..3f76eb84b395 100644
+--- a/net/ceph/ceph_common.c
++++ b/net/ceph/ceph_common.c
+@@ -495,8 +495,11 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client)
+ 	struct ceph_options *opt = client->options;
+ 	size_t pos = m->count;
+ 
+-	if (opt->name)
+-		seq_printf(m, "name=%s,", opt->name);
++	if (opt->name) {
++		seq_puts(m, "name=");
++		seq_escape(m, opt->name, ", \t\n\\");
++		seq_putc(m, ',');
++	}
+ 	if (opt->key)
+ 		seq_puts(m, "secret=<hidden>,");
+ 
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 7f8d7f19e044..280235cc3a98 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -1095,7 +1095,7 @@ static void selinux_write_opts(struct seq_file *m,
+ 		seq_puts(m, prefix);
+ 		if (has_comma)
+ 			seq_putc(m, '\"');
+-		seq_puts(m, opts->mnt_opts[i]);
++		seq_escape(m, opts->mnt_opts[i], "\"\n\\");
+ 		if (has_comma)
+ 			seq_putc(m, '\"');
+ 	}
+diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
+index 4373ada95648..3a91a00fb973 100644
+--- a/sound/soc/codecs/adav80x.c
++++ b/sound/soc/codecs/adav80x.c
+@@ -864,7 +864,6 @@ const struct regmap_config adav80x_regmap_config = {
+ 	.val_bits = 8,
+ 	.pad_bits = 1,
+ 	.reg_bits = 7,
+-	.read_flag_mask = 0x01,
+ 
+ 	.max_register = ADAV80X_PLL_OUTE,
+ 
+diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
+index eff4b4d512b7..ee91edcf3cb0 100644
+--- a/sound/soc/codecs/arizona.c
++++ b/sound/soc/codecs/arizona.c
+@@ -1610,17 +1610,6 @@ int arizona_init_dai(struct arizona_priv *priv, int id)
+ }
+ EXPORT_SYMBOL_GPL(arizona_init_dai);
+ 
+-static irqreturn_t arizona_fll_clock_ok(int irq, void *data)
+-{
+-	struct arizona_fll *fll = data;
+-
+-	arizona_fll_dbg(fll, "clock OK\n");
+-
+-	complete(&fll->ok);
+-
+-	return IRQ_HANDLED;
+-}
+-
+ static struct {
+ 	unsigned int min;
+ 	unsigned int max;
+@@ -1902,17 +1891,18 @@ static int arizona_is_enabled_fll(struct arizona_fll *fll)
+ static int arizona_enable_fll(struct arizona_fll *fll)
+ {
+ 	struct arizona *arizona = fll->arizona;
+-	unsigned long time_left;
+ 	bool use_sync = false;
+ 	int already_enabled = arizona_is_enabled_fll(fll);
+ 	struct arizona_fll_cfg cfg;
++	int i;
++	unsigned int val;
+ 
+ 	if (already_enabled < 0)
+ 		return already_enabled;
+ 
+ 	if (already_enabled) {
+ 		/* Facilitate smooth refclk across the transition */
+-		regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x7,
++		regmap_update_bits_async(fll->arizona->regmap, fll->base + 0x9,
+ 					 ARIZONA_FLL1_GAIN_MASK, 0);
+ 		regmap_update_bits_async(fll->arizona->regmap, fll->base + 1,
+ 					 ARIZONA_FLL1_FREERUN,
+@@ -1964,9 +1954,6 @@ static int arizona_enable_fll(struct arizona_fll *fll)
+ 	if (!already_enabled)
+ 		pm_runtime_get(arizona->dev);
+ 
+-	/* Clear any pending completions */
+-	try_wait_for_completion(&fll->ok);
+-
+ 	regmap_update_bits_async(arizona->regmap, fll->base + 1,
+ 				 ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
+ 	if (use_sync)
+@@ -1978,10 +1965,24 @@ static int arizona_enable_fll(struct arizona_fll *fll)
+ 		regmap_update_bits_async(arizona->regmap, fll->base + 1,
+ 					 ARIZONA_FLL1_FREERUN, 0);
+ 
+-	time_left = wait_for_completion_timeout(&fll->ok,
+-					  msecs_to_jiffies(250));
+-	if (time_left == 0)
++	arizona_fll_dbg(fll, "Waiting for FLL lock...\n");
++	val = 0;
++	for (i = 0; i < 15; i++) {
++		if (i < 5)
++			usleep_range(200, 400);
++		else
++			msleep(20);
++
++		regmap_read(arizona->regmap,
++			    ARIZONA_INTERRUPT_RAW_STATUS_5,
++			    &val);
++		if (val & (ARIZONA_FLL1_CLOCK_OK_STS << (fll->id - 1)))
++			break;
++	}
++	if (i == 15)
+ 		arizona_fll_warn(fll, "Timed out waiting for lock\n");
++	else
++		arizona_fll_dbg(fll, "FLL locked (%d polls)\n", i);
+ 
+ 	return 0;
+ }
+@@ -2066,11 +2067,8 @@ EXPORT_SYMBOL_GPL(arizona_set_fll);
+ int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
+ 		     int ok_irq, struct arizona_fll *fll)
+ {
+-	int ret;
+ 	unsigned int val;
+ 
+-	init_completion(&fll->ok);
+-
+ 	fll->id = id;
+ 	fll->base = base;
+ 	fll->arizona = arizona;
+@@ -2092,13 +2090,6 @@ int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
+ 	snprintf(fll->clock_ok_name, sizeof(fll->clock_ok_name),
+ 		 "FLL%d clock OK", id);
+ 
+-	ret = arizona_request_irq(arizona, ok_irq, fll->clock_ok_name,
+-				  arizona_fll_clock_ok, fll);
+-	if (ret != 0) {
+-		dev_err(arizona->dev, "Failed to get FLL%d clock OK IRQ: %d\n",
+-			id, ret);
+-	}
+-
+ 	regmap_update_bits(arizona->regmap, fll->base + 1,
+ 			   ARIZONA_FLL1_FREERUN, 0);
+ 
+diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
+index 11ff899b0272..14e8485b5585 100644
+--- a/sound/soc/codecs/arizona.h
++++ b/sound/soc/codecs/arizona.h
+@@ -233,7 +233,6 @@ struct arizona_fll {
+ 	int id;
+ 	unsigned int base;
+ 	unsigned int vco_mult;
+-	struct completion ok;
+ 
+ 	unsigned int fout;
+ 	int sync_src;
+diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
+index 178e55d4d481..06317f7d945f 100644
+--- a/sound/soc/codecs/rt5640.c
++++ b/sound/soc/codecs/rt5640.c
+@@ -985,6 +985,35 @@ static int rt5640_hp_event(struct snd_soc_dapm_widget *w,
+ 	return 0;
+ }
+ 
++static int rt5640_lout_event(struct snd_soc_dapm_widget *w,
++	struct snd_kcontrol *kcontrol, int event)
++{
++	struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
++
++	switch (event) {
++	case SND_SOC_DAPM_POST_PMU:
++		hp_amp_power_on(codec);
++		snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
++			RT5640_PWR_LM, RT5640_PWR_LM);
++		snd_soc_update_bits(codec, RT5640_OUTPUT,
++			RT5640_L_MUTE | RT5640_R_MUTE, 0);
++		break;
++
++	case SND_SOC_DAPM_PRE_PMD:
++		snd_soc_update_bits(codec, RT5640_OUTPUT,
++			RT5640_L_MUTE | RT5640_R_MUTE,
++			RT5640_L_MUTE | RT5640_R_MUTE);
++		snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
++			RT5640_PWR_LM, 0);
++		break;
++
++	default:
++		return 0;
++	}
++
++	return 0;
++}
++
+ static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w,
+ 			   struct snd_kcontrol *kcontrol, int event)
+ {
+@@ -1180,13 +1209,16 @@ static const struct snd_soc_dapm_widget rt5640_dapm_widgets[] = {
+ 		0, rt5640_spo_l_mix, ARRAY_SIZE(rt5640_spo_l_mix)),
+ 	SND_SOC_DAPM_MIXER("SPOR MIX", SND_SOC_NOPM, 0,
+ 		0, rt5640_spo_r_mix, ARRAY_SIZE(rt5640_spo_r_mix)),
+-	SND_SOC_DAPM_MIXER("LOUT MIX", RT5640_PWR_ANLG1, RT5640_PWR_LM_BIT, 0,
++	SND_SOC_DAPM_MIXER("LOUT MIX", SND_SOC_NOPM, 0, 0,
+ 		rt5640_lout_mix, ARRAY_SIZE(rt5640_lout_mix)),
+ 	SND_SOC_DAPM_SUPPLY_S("Improve HP Amp Drv", 1, SND_SOC_NOPM,
+ 		0, 0, rt5640_hp_power_event, SND_SOC_DAPM_POST_PMU),
+ 	SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0,
+ 		rt5640_hp_event,
+ 		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
++	SND_SOC_DAPM_PGA_S("LOUT amp", 1, SND_SOC_NOPM, 0, 0,
++		rt5640_lout_event,
++		SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ 	SND_SOC_DAPM_SUPPLY("HP L Amp", RT5640_PWR_ANLG1,
+ 		RT5640_PWR_HP_L_BIT, 0, NULL, 0),
+ 	SND_SOC_DAPM_SUPPLY("HP R Amp", RT5640_PWR_ANLG1,
+@@ -1501,8 +1533,10 @@ static const struct snd_soc_dapm_route rt5640_dapm_routes[] = {
+ 	{"HP R Playback", "Switch", "HP Amp"},
+ 	{"HPOL", NULL, "HP L Playback"},
+ 	{"HPOR", NULL, "HP R Playback"},
+-	{"LOUTL", NULL, "LOUT MIX"},
+-	{"LOUTR", NULL, "LOUT MIX"},
++
++	{"LOUT amp", NULL, "LOUT MIX"},
++	{"LOUTL", NULL, "LOUT amp"},
++	{"LOUTR", NULL, "LOUT amp"},
+ };
+ 
+ static const struct snd_soc_dapm_route rt5640_specific_dapm_routes[] = {
+diff --git a/sound/soc/samsung/arndale_rt5631.c b/sound/soc/samsung/arndale_rt5631.c
+index 8bf2e2c4bafb..9e371eb3e4fa 100644
+--- a/sound/soc/samsung/arndale_rt5631.c
++++ b/sound/soc/samsung/arndale_rt5631.c
+@@ -116,15 +116,6 @@ static int arndale_audio_probe(struct platform_device *pdev)
+ 	return ret;
+ }
+ 
+-static int arndale_audio_remove(struct platform_device *pdev)
+-{
+-	struct snd_soc_card *card = platform_get_drvdata(pdev);
+-
+-	snd_soc_unregister_card(card);
+-
+-	return 0;
+-}
+-
+ static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
+ 	{ .compatible = "samsung,arndale-rt5631", },
+ 	{ .compatible = "samsung,arndale-alc5631", },
+@@ -139,7 +130,6 @@ static struct platform_driver arndale_audio_driver = {
+ 		.of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
+ 	},
+ 	.probe = arndale_audio_probe,
+-	.remove = arndale_audio_remove,
+ };
+ 
+ module_platform_driver(arndale_audio_driver);


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-09-14 15:20 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-09-14 15:20 UTC (permalink / raw
  To: gentoo-commits

commit:     8f2b461875c44efdd4cc8d0f80537f69c8ccf5a3
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Sep 14 15:20:19 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Sep 14 15:20:19 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8f2b4618

Linux patch 4.1.7

 0000_README            |    4 +
 1006_linux-4.1.7.patch | 3075 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3079 insertions(+)

diff --git a/0000_README b/0000_README
index 83ddebf..ad474e3 100644
--- a/0000_README
+++ b/0000_README
@@ -67,6 +67,10 @@ Patch:  1005_linux-4.1.6.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.6
 
+Patch:  1006_linux-4.1.7.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.7
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1006_linux-4.1.7.patch b/1006_linux-4.1.7.patch
new file mode 100644
index 0000000..8d4a5d9
--- /dev/null
+++ b/1006_linux-4.1.7.patch
@@ -0,0 +1,3075 @@
+diff --git a/Makefile b/Makefile
+index 838dabcb7f48..b8591e5f79b8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 6
++SUBLEVEL = 7
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
+index f03a091cd076..dfcc0dd637e5 100644
+--- a/arch/arm/boot/dts/dra7.dtsi
++++ b/arch/arm/boot/dts/dra7.dtsi
+@@ -116,7 +116,7 @@
+ 				ranges = <0 0x2000 0x2000>;
+ 
+ 				scm_conf: scm_conf@0 {
+-					compatible = "syscon";
++					compatible = "syscon", "simple-bus";
+ 					reg = <0x0 0x1400>;
+ 					#address-cells = <1>;
+ 					#size-cells = <1>;
+diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
+index f74a8ded515f..38c786018a09 100644
+--- a/arch/arm/boot/dts/imx6qdl.dtsi
++++ b/arch/arm/boot/dts/imx6qdl.dtsi
+@@ -153,10 +153,10 @@
+ 			interrupt-names = "msi";
+ 			#interrupt-cells = <1>;
+ 			interrupt-map-mask = <0 0 0 0x7>;
+-			interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+-			                <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+-			                <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+-			                <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
++			interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
++			                <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
++			                <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
++			                <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
+ 			clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
+ 				 <&clks IMX6QDL_CLK_LVDS1_GATE>,
+ 				 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
+diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
+index 11a7963be003..2390f387c271 100644
+--- a/arch/arm/boot/dts/omap2430.dtsi
++++ b/arch/arm/boot/dts/omap2430.dtsi
+@@ -51,7 +51,8 @@
+ 				};
+ 
+ 				scm_conf: scm_conf@270 {
+-					compatible = "syscon";
++					compatible = "syscon",
++						     "simple-bus";
+ 					reg = <0x270 0x240>;
+ 					#address-cells = <1>;
+ 					#size-cells = <1>;
+diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
+index f884d6adb71e..84be9da74c7e 100644
+--- a/arch/arm/boot/dts/omap4.dtsi
++++ b/arch/arm/boot/dts/omap4.dtsi
+@@ -191,7 +191,8 @@
+ 				};
+ 
+ 				omap4_padconf_global: omap4_padconf_global@5a0 {
+-					compatible = "syscon";
++					compatible = "syscon",
++						     "simple-bus";
+ 					reg = <0x5a0 0x170>;
+ 					#address-cells = <1>;
+ 					#size-cells = <1>;
+diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
+index 7d24ae0306b5..874a26f9dc0f 100644
+--- a/arch/arm/boot/dts/omap5.dtsi
++++ b/arch/arm/boot/dts/omap5.dtsi
+@@ -180,7 +180,8 @@
+ 				};
+ 
+ 				omap5_padconf_global: omap5_padconf_global@5a0 {
+-					compatible = "syscon";
++					compatible = "syscon",
++						     "simple-bus";
+ 					reg = <0x5a0 0xec>;
+ 					#address-cells = <1>;
+ 					#size-cells = <1>;
+diff --git a/arch/arm/mach-bcm/Makefile b/arch/arm/mach-bcm/Makefile
+index 4c38674c73ec..54d274da7ccb 100644
+--- a/arch/arm/mach-bcm/Makefile
++++ b/arch/arm/mach-bcm/Makefile
+@@ -43,5 +43,5 @@ obj-$(CONFIG_ARCH_BCM_63XX)	:= bcm63xx.o
+ ifeq ($(CONFIG_ARCH_BRCMSTB),y)
+ CFLAGS_platsmp-brcmstb.o	+= -march=armv7-a
+ obj-y				+= brcmstb.o
+-obj-$(CONFIG_SMP)		+= headsmp-brcmstb.o platsmp-brcmstb.o
++obj-$(CONFIG_SMP)		+= platsmp-brcmstb.o
+ endif
+diff --git a/arch/arm/mach-bcm/brcmstb.h b/arch/arm/mach-bcm/brcmstb.h
+deleted file mode 100644
+index ec0c3d112b36..000000000000
+--- a/arch/arm/mach-bcm/brcmstb.h
++++ /dev/null
+@@ -1,19 +0,0 @@
+-/*
+- * Copyright (C) 2013-2014 Broadcom Corporation
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation version 2.
+- *
+- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+- * kind, whether express or implied; without even the implied warranty
+- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- */
+-
+-#ifndef __BRCMSTB_H__
+-#define __BRCMSTB_H__
+-
+-void brcmstb_secondary_startup(void);
+-
+-#endif /* __BRCMSTB_H__ */
+diff --git a/arch/arm/mach-bcm/headsmp-brcmstb.S b/arch/arm/mach-bcm/headsmp-brcmstb.S
+deleted file mode 100644
+index 199c1ea58248..000000000000
+--- a/arch/arm/mach-bcm/headsmp-brcmstb.S
++++ /dev/null
+@@ -1,33 +0,0 @@
+-/*
+- * SMP boot code for secondary CPUs
+- * Based on arch/arm/mach-tegra/headsmp.S
+- *
+- * Copyright (C) 2010 NVIDIA, Inc.
+- * Copyright (C) 2013-2014 Broadcom Corporation
+- *
+- * This program is free software; you can redistribute it and/or
+- * modify it under the terms of the GNU General Public License as
+- * published by the Free Software Foundation version 2.
+- *
+- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+- * kind, whether express or implied; without even the implied warranty
+- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+- * GNU General Public License for more details.
+- */
+-
+-#include <asm/assembler.h>
+-#include <linux/linkage.h>
+-#include <linux/init.h>
+-
+-        .section ".text.head", "ax"
+-
+-ENTRY(brcmstb_secondary_startup)
+-        /*
+-         * Ensure CPU is in a sane state by disabling all IRQs and switching
+-         * into SVC mode.
+-         */
+-        setmode	PSR_I_BIT | PSR_F_BIT | SVC_MODE, r0
+-
+-        bl      v7_invalidate_l1
+-        b       secondary_startup
+-ENDPROC(brcmstb_secondary_startup)
+diff --git a/arch/arm/mach-bcm/platsmp-brcmstb.c b/arch/arm/mach-bcm/platsmp-brcmstb.c
+index e209e6fc7caf..44d6bddf7a4e 100644
+--- a/arch/arm/mach-bcm/platsmp-brcmstb.c
++++ b/arch/arm/mach-bcm/platsmp-brcmstb.c
+@@ -30,8 +30,6 @@
+ #include <asm/mach-types.h>
+ #include <asm/smp_plat.h>
+ 
+-#include "brcmstb.h"
+-
+ enum {
+ 	ZONE_MAN_CLKEN_MASK		= BIT(0),
+ 	ZONE_MAN_RESET_CNTL_MASK	= BIT(1),
+@@ -153,7 +151,7 @@ static void brcmstb_cpu_boot(u32 cpu)
+ 	 * Set the reset vector to point to the secondary_startup
+ 	 * routine
+ 	 */
+-	cpu_set_boot_addr(cpu, virt_to_phys(brcmstb_secondary_startup));
++	cpu_set_boot_addr(cpu, virt_to_phys(secondary_startup));
+ 
+ 	/* Unhalt the cpu */
+ 	cpu_rst_cfg_set(cpu, 0);
+diff --git a/arch/arm/mach-berlin/headsmp.S b/arch/arm/mach-berlin/headsmp.S
+index 4a4c56a58ad3..dc82a3486b05 100644
+--- a/arch/arm/mach-berlin/headsmp.S
++++ b/arch/arm/mach-berlin/headsmp.S
+@@ -12,12 +12,6 @@
+ #include <linux/init.h>
+ #include <asm/assembler.h>
+ 
+-ENTRY(berlin_secondary_startup)
+- ARM_BE8(setend be)
+-	bl	v7_invalidate_l1
+-	b       secondary_startup
+-ENDPROC(berlin_secondary_startup)
+-
+ /*
+  * If the following instruction is set in the reset exception vector, CPUs
+  * will fetch the value of the software reset address vector when being
+diff --git a/arch/arm/mach-berlin/platsmp.c b/arch/arm/mach-berlin/platsmp.c
+index 702e7982015a..34a3753e7356 100644
+--- a/arch/arm/mach-berlin/platsmp.c
++++ b/arch/arm/mach-berlin/platsmp.c
+@@ -22,7 +22,6 @@
+ #define RESET_VECT		0x00
+ #define SW_RESET_ADDR		0x94
+ 
+-extern void berlin_secondary_startup(void);
+ extern u32 boot_inst;
+ 
+ static void __iomem *cpu_ctrl;
+@@ -85,7 +84,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus)
+ 	 * Write the secondary startup address into the SW reset address
+ 	 * vector. This is used by boot_inst.
+ 	 */
+-	writel(virt_to_phys(berlin_secondary_startup), vectors_base + SW_RESET_ADDR);
++	writel(virt_to_phys(secondary_startup), vectors_base + SW_RESET_ADDR);
+ 
+ 	iounmap(vectors_base);
+ unmap_scu:
+diff --git a/arch/arm/mach-hisi/Makefile b/arch/arm/mach-hisi/Makefile
+index 6b7b3033de0b..659db1933ed3 100644
+--- a/arch/arm/mach-hisi/Makefile
++++ b/arch/arm/mach-hisi/Makefile
+@@ -6,4 +6,4 @@ CFLAGS_platmcpm.o	:= -march=armv7-a
+ 
+ obj-y	+= hisilicon.o
+ obj-$(CONFIG_MCPM)		+= platmcpm.o
+-obj-$(CONFIG_SMP)		+= platsmp.o hotplug.o headsmp.o
++obj-$(CONFIG_SMP)		+= platsmp.o hotplug.o
+diff --git a/arch/arm/mach-hisi/core.h b/arch/arm/mach-hisi/core.h
+index 92a682d8e939..c7648ef1825c 100644
+--- a/arch/arm/mach-hisi/core.h
++++ b/arch/arm/mach-hisi/core.h
+@@ -12,7 +12,6 @@ extern void hi3xxx_cpu_die(unsigned int cpu);
+ extern int hi3xxx_cpu_kill(unsigned int cpu);
+ extern void hi3xxx_set_cpu(int cpu, bool enable);
+ 
+-extern void hisi_secondary_startup(void);
+ extern struct smp_operations hix5hd2_smp_ops;
+ extern void hix5hd2_set_cpu(int cpu, bool enable);
+ extern void hix5hd2_cpu_die(unsigned int cpu);
+diff --git a/arch/arm/mach-hisi/headsmp.S b/arch/arm/mach-hisi/headsmp.S
+deleted file mode 100644
+index 81e35b159e75..000000000000
+--- a/arch/arm/mach-hisi/headsmp.S
++++ /dev/null
+@@ -1,16 +0,0 @@
+-/*
+- *  Copyright (c) 2014 Hisilicon Limited.
+- *  Copyright (c) 2014 Linaro Ltd.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- */
+-#include <linux/linkage.h>
+-#include <linux/init.h>
+-
+-	__CPUINIT
+-
+-ENTRY(hisi_secondary_startup)
+-	bl	v7_invalidate_l1
+-	b	secondary_startup
+diff --git a/arch/arm/mach-hisi/platsmp.c b/arch/arm/mach-hisi/platsmp.c
+index 8880c8e8b296..51744127db66 100644
+--- a/arch/arm/mach-hisi/platsmp.c
++++ b/arch/arm/mach-hisi/platsmp.c
+@@ -118,7 +118,7 @@ static int hix5hd2_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ {
+ 	phys_addr_t jumpaddr;
+ 
+-	jumpaddr = virt_to_phys(hisi_secondary_startup);
++	jumpaddr = virt_to_phys(secondary_startup);
+ 	hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr);
+ 	hix5hd2_set_cpu(cpu, true);
+ 	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+@@ -156,7 +156,7 @@ static int hip01_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ 	struct device_node *node;
+ 
+ 
+-	jumpaddr = virt_to_phys(hisi_secondary_startup);
++	jumpaddr = virt_to_phys(secondary_startup);
+ 	hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr);
+ 
+ 	node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
+diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S
+index de5047c8a6c8..b5e976816b63 100644
+--- a/arch/arm/mach-imx/headsmp.S
++++ b/arch/arm/mach-imx/headsmp.S
+@@ -25,7 +25,6 @@ diag_reg_offset:
+ 	.endm
+ 
+ ENTRY(v7_secondary_startup)
+-	bl	v7_invalidate_l1
+ 	set_diag_reg
+ 	b	secondary_startup
+ ENDPROC(v7_secondary_startup)
+diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
+index 08d5ed46b996..48e4c4b3cd1c 100644
+--- a/arch/arm/mach-mvebu/headsmp-a9.S
++++ b/arch/arm/mach-mvebu/headsmp-a9.S
+@@ -21,7 +21,6 @@
+ 
+ ENTRY(mvebu_cortex_a9_secondary_startup)
+ ARM_BE8(setend	be)
+-	bl      v7_invalidate_l1
+ 	bl	armada_38x_scu_power_up
+ 	b	secondary_startup
+ ENDPROC(mvebu_cortex_a9_secondary_startup)
+diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
+index 3b56722dfd8a..6833df45d7b1 100644
+--- a/arch/arm/mach-omap2/omap-wakeupgen.c
++++ b/arch/arm/mach-omap2/omap-wakeupgen.c
+@@ -392,6 +392,7 @@ static struct irq_chip wakeupgen_chip = {
+ 	.irq_mask		= wakeupgen_mask,
+ 	.irq_unmask		= wakeupgen_unmask,
+ 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
++	.irq_set_type		= irq_chip_set_type_parent,
+ 	.flags			= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
+ #ifdef CONFIG_SMP
+ 	.irq_set_affinity	= irq_chip_set_affinity_parent,
+diff --git a/arch/arm/mach-prima2/headsmp.S b/arch/arm/mach-prima2/headsmp.S
+index d86fe33c5f53..209d9fc5c16c 100644
+--- a/arch/arm/mach-prima2/headsmp.S
++++ b/arch/arm/mach-prima2/headsmp.S
+@@ -15,7 +15,6 @@
+  * ready for them to initialise.
+  */
+ ENTRY(sirfsoc_secondary_startup)
+-	bl v7_invalidate_l1
+         mrc     p15, 0, r0, c0, c0, 5
+         and     r0, r0, #15
+         adr     r4, 1f
+diff --git a/arch/arm/mach-rockchip/core.h b/arch/arm/mach-rockchip/core.h
+index 39bca96b555a..492c048813da 100644
+--- a/arch/arm/mach-rockchip/core.h
++++ b/arch/arm/mach-rockchip/core.h
+@@ -17,4 +17,3 @@ extern char rockchip_secondary_trampoline;
+ extern char rockchip_secondary_trampoline_end;
+ 
+ extern unsigned long rockchip_boot_fn;
+-extern void rockchip_secondary_startup(void);
+diff --git a/arch/arm/mach-rockchip/headsmp.S b/arch/arm/mach-rockchip/headsmp.S
+index 46c22dedf632..d69708b07282 100644
+--- a/arch/arm/mach-rockchip/headsmp.S
++++ b/arch/arm/mach-rockchip/headsmp.S
+@@ -15,14 +15,6 @@
+ #include <linux/linkage.h>
+ #include <linux/init.h>
+ 
+-ENTRY(rockchip_secondary_startup)
+-	mrc	p15, 0, r0, c0, c0, 0	@ read main ID register
+-	ldr	r1, =0x00000c09		@ Cortex-A9 primary part number
+-	teq	r0, r1
+-	beq	v7_invalidate_l1
+-	b	secondary_startup
+-ENDPROC(rockchip_secondary_startup)
+-
+ ENTRY(rockchip_secondary_trampoline)
+ 	ldr	pc, 1f
+ ENDPROC(rockchip_secondary_trampoline)
+diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c
+index 5b4ca3c3c879..2e6ab67e2284 100644
+--- a/arch/arm/mach-rockchip/platsmp.c
++++ b/arch/arm/mach-rockchip/platsmp.c
+@@ -149,8 +149,7 @@ static int __cpuinit rockchip_boot_secondary(unsigned int cpu,
+ 		 * sram_base_addr + 8: start address for pc
+ 		 * */
+ 		udelay(10);
+-		writel(virt_to_phys(rockchip_secondary_startup),
+-			sram_base_addr + 8);
++		writel(virt_to_phys(secondary_startup), sram_base_addr + 8);
+ 		writel(0xDEADBEAF, sram_base_addr + 4);
+ 		dsb_sev();
+ 	}
+@@ -189,7 +188,7 @@ static int __init rockchip_smp_prepare_sram(struct device_node *node)
+ 	}
+ 
+ 	/* set the boot function for the sram code */
+-	rockchip_boot_fn = virt_to_phys(rockchip_secondary_startup);
++	rockchip_boot_fn = virt_to_phys(secondary_startup);
+ 
+ 	/* copy the trampoline to sram, that runs during startup of the core */
+ 	memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
+diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
+index afc60bad6fd6..476092b86c6e 100644
+--- a/arch/arm/mach-shmobile/common.h
++++ b/arch/arm/mach-shmobile/common.h
+@@ -14,7 +14,6 @@ extern void shmobile_smp_sleep(void);
+ extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
+ 			      unsigned long arg);
+ extern int shmobile_smp_cpu_disable(unsigned int cpu);
+-extern void shmobile_invalidate_start(void);
+ extern void shmobile_boot_scu(void);
+ extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
+ extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
+diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
+index 69df8bfac167..fa5248c52399 100644
+--- a/arch/arm/mach-shmobile/headsmp-scu.S
++++ b/arch/arm/mach-shmobile/headsmp-scu.S
+@@ -22,7 +22,7 @@
+  * Boot code for secondary CPUs.
+  *
+  * First we turn on L1 cache coherency for our CPU. Then we jump to
+- * shmobile_invalidate_start that invalidates the cache and hands over control
++ * secondary_startup that invalidates the cache and hands over control
+  * to the common ARM startup code.
+  */
+ ENTRY(shmobile_boot_scu)
+@@ -36,7 +36,7 @@ ENTRY(shmobile_boot_scu)
+ 	bic	r2, r2, r3		@ Clear bits of our CPU (Run Mode)
+ 	str	r2, [r0, #8]		@ write back
+ 
+-	b	shmobile_invalidate_start
++	b	secondary_startup
+ ENDPROC(shmobile_boot_scu)
+ 
+ 	.text
+diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
+index 50c491567e11..330c1fc63197 100644
+--- a/arch/arm/mach-shmobile/headsmp.S
++++ b/arch/arm/mach-shmobile/headsmp.S
+@@ -16,13 +16,6 @@
+ #include <asm/assembler.h>
+ #include <asm/memory.h>
+ 
+-#ifdef CONFIG_SMP
+-ENTRY(shmobile_invalidate_start)
+-	bl	v7_invalidate_l1
+-	b	secondary_startup
+-ENDPROC(shmobile_invalidate_start)
+-#endif
+-
+ /*
+  * Reset vector for secondary CPUs.
+  * This will be mapped at address 0 by SBAR register.
+diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
+index f483b560b066..b0790fc32282 100644
+--- a/arch/arm/mach-shmobile/platsmp-apmu.c
++++ b/arch/arm/mach-shmobile/platsmp-apmu.c
+@@ -133,7 +133,7 @@ void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus,
+ int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ {
+ 	/* For this particular CPU register boot vector */
+-	shmobile_smp_hook(cpu, virt_to_phys(shmobile_invalidate_start), 0);
++	shmobile_smp_hook(cpu, virt_to_phys(secondary_startup), 0);
+ 
+ 	return apmu_wrap(cpu, apmu_power_on);
+ }
+diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
+index a0f3b1cd497c..767c09e954a0 100644
+--- a/arch/arm/mach-socfpga/core.h
++++ b/arch/arm/mach-socfpga/core.h
+@@ -31,7 +31,6 @@
+ 
+ #define RSTMGR_MPUMODRST_CPU1		0x2     /* CPU1 Reset */
+ 
+-extern void socfpga_secondary_startup(void);
+ extern void __iomem *socfpga_scu_base_addr;
+ 
+ extern void socfpga_init_clocks(void);
+diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
+index f65ea0af4af3..5bb016427107 100644
+--- a/arch/arm/mach-socfpga/headsmp.S
++++ b/arch/arm/mach-socfpga/headsmp.S
+@@ -30,8 +30,3 @@ ENTRY(secondary_trampoline)
+ 1:	.long	.
+ 	.long	socfpga_cpu1start_addr
+ ENTRY(secondary_trampoline_end)
+-
+-ENTRY(socfpga_secondary_startup)
+-       bl      v7_invalidate_l1
+-       b       secondary_startup
+-ENDPROC(socfpga_secondary_startup)
+diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
+index c64d89b7c0ca..79c5336c569f 100644
+--- a/arch/arm/mach-socfpga/platsmp.c
++++ b/arch/arm/mach-socfpga/platsmp.c
+@@ -40,7 +40,7 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ 
+ 		memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
+ 
+-		writel(virt_to_phys(socfpga_secondary_startup),
++		writel(virt_to_phys(secondary_startup),
+ 		       sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
+ 
+ 		flush_cache_all();
+diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
+index e48a74458c25..fffad2426ee4 100644
+--- a/arch/arm/mach-tegra/Makefile
++++ b/arch/arm/mach-tegra/Makefile
+@@ -19,7 +19,7 @@ obj-$(CONFIG_ARCH_TEGRA_3x_SOC)		+= pm-tegra30.o
+ ifeq ($(CONFIG_CPU_IDLE),y)
+ obj-$(CONFIG_ARCH_TEGRA_3x_SOC)		+= cpuidle-tegra30.o
+ endif
+-obj-$(CONFIG_SMP)			+= platsmp.o headsmp.o
++obj-$(CONFIG_SMP)			+= platsmp.o
+ obj-$(CONFIG_HOTPLUG_CPU)               += hotplug.o
+ 
+ obj-$(CONFIG_ARCH_TEGRA_114_SOC)	+= sleep-tegra30.o
+diff --git a/arch/arm/mach-tegra/headsmp.S b/arch/arm/mach-tegra/headsmp.S
+deleted file mode 100644
+index 2072e7322c39..000000000000
+--- a/arch/arm/mach-tegra/headsmp.S
++++ /dev/null
+@@ -1,12 +0,0 @@
+-#include <linux/linkage.h>
+-#include <linux/init.h>
+-
+-#include "sleep.h"
+-
+-        .section ".text.head", "ax"
+-
+-ENTRY(tegra_secondary_startup)
+-        check_cpu_part_num 0xc09, r8, r9
+-        bleq    v7_invalidate_l1
+-        b       secondary_startup
+-ENDPROC(tegra_secondary_startup)
+diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
+index 894c5c472184..6fd9db54887e 100644
+--- a/arch/arm/mach-tegra/reset.c
++++ b/arch/arm/mach-tegra/reset.c
+@@ -94,7 +94,7 @@ void __init tegra_cpu_reset_handler_init(void)
+ 	__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] =
+ 		*((u32 *)cpu_possible_mask);
+ 	__tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] =
+-		virt_to_phys((void *)tegra_secondary_startup);
++		virt_to_phys((void *)secondary_startup);
+ #endif
+ 
+ #ifdef CONFIG_PM_SLEEP
+diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h
+index 29c3dec0126a..9c479c7925b8 100644
+--- a/arch/arm/mach-tegra/reset.h
++++ b/arch/arm/mach-tegra/reset.h
+@@ -37,7 +37,6 @@ void __tegra_cpu_reset_handler_start(void);
+ void __tegra_cpu_reset_handler(void);
+ void __tegra20_cpu1_resettable_status_offset(void);
+ void __tegra_cpu_reset_handler_end(void);
+-void tegra_secondary_startup(void);
+ 
+ #ifdef CONFIG_PM_SLEEP
+ #define tegra_cpu_lp1_mask \
+diff --git a/arch/arm/mach-zynq/common.h b/arch/arm/mach-zynq/common.h
+index 382c60e9aa16..7038cae95ddc 100644
+--- a/arch/arm/mach-zynq/common.h
++++ b/arch/arm/mach-zynq/common.h
+@@ -17,8 +17,6 @@
+ #ifndef __MACH_ZYNQ_COMMON_H__
+ #define __MACH_ZYNQ_COMMON_H__
+ 
+-void zynq_secondary_startup(void);
+-
+ extern int zynq_slcr_init(void);
+ extern int zynq_early_slcr_init(void);
+ extern void zynq_slcr_system_reset(void);
+diff --git a/arch/arm/mach-zynq/headsmp.S b/arch/arm/mach-zynq/headsmp.S
+index dd8c071941e7..045c72720a4d 100644
+--- a/arch/arm/mach-zynq/headsmp.S
++++ b/arch/arm/mach-zynq/headsmp.S
+@@ -22,8 +22,3 @@ zynq_secondary_trampoline_jump:
+ .globl zynq_secondary_trampoline_end
+ zynq_secondary_trampoline_end:
+ ENDPROC(zynq_secondary_trampoline)
+-
+-ENTRY(zynq_secondary_startup)
+-	bl	v7_invalidate_l1
+-	b	secondary_startup
+-ENDPROC(zynq_secondary_startup)
+diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
+index 52d768ff7857..f66816c49186 100644
+--- a/arch/arm/mach-zynq/platsmp.c
++++ b/arch/arm/mach-zynq/platsmp.c
+@@ -87,10 +87,9 @@ int zynq_cpun_start(u32 address, int cpu)
+ }
+ EXPORT_SYMBOL(zynq_cpun_start);
+ 
+-static int zynq_boot_secondary(unsigned int cpu,
+-						struct task_struct *idle)
++static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle)
+ {
+-	return zynq_cpun_start(virt_to_phys(zynq_secondary_startup), cpu);
++	return zynq_cpun_start(virt_to_phys(secondary_startup), cpu);
+ }
+ 
+ /*
+diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
+index 3d1054f11a8a..7911f14c2157 100644
+--- a/arch/arm/mm/proc-v7.S
++++ b/arch/arm/mm/proc-v7.S
+@@ -268,7 +268,10 @@ __v7_ca15mp_setup:
+ __v7_b15mp_setup:
+ __v7_ca17mp_setup:
+ 	mov	r10, #0
+-1:
++1:	adr	r12, __v7_setup_stack		@ the local stack
++	stmia	r12, {r0-r5, lr}		@ v7_invalidate_l1 touches r0-r6
++	bl      v7_invalidate_l1
++	ldmia	r12, {r0-r5, lr}
+ #ifdef CONFIG_SMP
+ 	ALT_SMP(mrc	p15, 0, r0, c1, c0, 1)
+ 	ALT_UP(mov	r0, #(1 << 6))		@ fake it for UP
+@@ -277,7 +280,7 @@ __v7_ca17mp_setup:
+ 	orreq	r0, r0, r10			@ Enable CPU-specific SMP bits
+ 	mcreq	p15, 0, r0, c1, c0, 1
+ #endif
+-	b	__v7_setup
++	b	__v7_setup_cont
+ 
+ __v7_pj4b_setup:
+ #ifdef CONFIG_CPU_PJ4B
+@@ -335,10 +338,11 @@ __v7_pj4b_setup:
+ 
+ __v7_setup:
+ 	adr	r12, __v7_setup_stack		@ the local stack
+-	stmia	r12, {r0-r5, r7, r9, r11, lr}
+-	bl      v7_flush_dcache_louis
+-	ldmia	r12, {r0-r5, r7, r9, r11, lr}
++	stmia	r12, {r0-r5, lr}		@ v7_invalidate_l1 touches r0-r6
++	bl      v7_invalidate_l1
++	ldmia	r12, {r0-r5, lr}
+ 
++__v7_setup_cont:
+ 	mrc	p15, 0, r0, c0, c0, 0		@ read main ID register
+ 	and	r10, r0, #0xff000000		@ ARM?
+ 	teq	r10, #0x41000000
+@@ -460,7 +464,7 @@ ENDPROC(__v7_setup)
+ 
+ 	.align	2
+ __v7_setup_stack:
+-	.space	4 * 11				@ 11 registers
++	.space	4 * 7				@ 12 registers
+ 
+ 	__INITDATA
+ 
+diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
+index 8aa791051029..1160434eece0 100644
+--- a/arch/arm/vdso/Makefile
++++ b/arch/arm/vdso/Makefile
+@@ -6,9 +6,15 @@ obj-vdso := vgettimeofday.o datapage.o
+ targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.so.raw vdso.lds
+ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+ 
+-ccflags-y := -shared -fPIC -fno-common -fno-builtin -fno-stack-protector
+-ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 -DDISABLE_BRANCH_PROFILING
+-ccflags-y += -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
++ccflags-y := -fPIC -fno-common -fno-builtin -fno-stack-protector
++ccflags-y += -DDISABLE_BRANCH_PROFILING
++
++VDSO_LDFLAGS := -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
++VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
++VDSO_LDFLAGS += -nostdlib -shared
++VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
++VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
++VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
+ 
+ obj-$(CONFIG_VDSO) += vdso.o
+ extra-$(CONFIG_VDSO) += vdso.lds
+@@ -40,10 +46,8 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
+ 
+ # Actual build commands
+ quiet_cmd_vdsold = VDSO    $@
+-      cmd_vdsold = $(CC) $(c_flags) -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) \
+-                   $(call cc-ldoption, -Wl$(comma)--build-id) \
+-                   -Wl,-Bsymbolic -Wl,-z,max-page-size=4096 \
+-                   -Wl,-z,common-page-size=4096 -o $@
++      cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
++                   -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
+ 
+ quiet_cmd_vdsomunge = MUNGE   $@
+       cmd_vdsomunge = $(objtree)/$(obj)/vdsomunge $< $@
+diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
+index cce18c85d2e8..7778453762d8 100644
+--- a/arch/arm64/kernel/perf_event.c
++++ b/arch/arm64/kernel/perf_event.c
+@@ -1318,7 +1318,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
+ 	/* Don't bother with PPIs; they're already affine */
+ 	irq = platform_get_irq(pdev, 0);
+ 	if (irq >= 0 && irq_is_percpu(irq))
+-		return 0;
++		goto out;
+ 
+ 	irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+ 	if (!irqs)
+@@ -1355,6 +1355,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
+ 	else
+ 		kfree(irqs);
+ 
++out:
+ 	cpu_pmu->plat_device = pdev;
+ 	return 0;
+ }
+diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
+index f02530e726f6..85c57158dcd9 100644
+--- a/arch/arm64/kvm/inject_fault.c
++++ b/arch/arm64/kvm/inject_fault.c
+@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
+ {
+ 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
+ 		inject_abt32(vcpu, false, addr);
+-
+-	inject_abt64(vcpu, false, addr);
++	else
++		inject_abt64(vcpu, false, addr);
+ }
+ 
+ /**
+@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
+ {
+ 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
+ 		inject_abt32(vcpu, true, addr);
+-
+-	inject_abt64(vcpu, true, addr);
++	else
++		inject_abt64(vcpu, true, addr);
+ }
+ 
+ /**
+@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+ {
+ 	if (!(vcpu->arch.hcr_el2 & HCR_RW))
+ 		inject_undef32(vcpu);
+-
+-	inject_undef64(vcpu);
++	else
++		inject_undef64(vcpu);
+ }
+diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
+index ad4d44635c76..a6f6b762c47a 100644
+--- a/arch/mips/kernel/scall64-64.S
++++ b/arch/mips/kernel/scall64-64.S
+@@ -80,7 +80,7 @@ syscall_trace_entry:
+ 	SAVE_STATIC
+ 	move	s0, t2
+ 	move	a0, sp
+-	daddiu	a1, v0, __NR_64_Linux
++	move	a1, v0
+ 	jal	syscall_trace_enter
+ 
+ 	bltz	v0, 2f			# seccomp failed? Skip syscall
+diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
+index 446cc654da56..4b2010654c46 100644
+--- a/arch/mips/kernel/scall64-n32.S
++++ b/arch/mips/kernel/scall64-n32.S
+@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
+ 	SAVE_STATIC
+ 	move	s0, t2
+ 	move	a0, sp
+-	daddiu	a1, v0, __NR_N32_Linux
++	move	a1, v0
+ 	jal	syscall_trace_enter
+ 
+ 	bltz	v0, 2f			# seccomp failed? Skip syscall
+diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
+index 6fe6b182c998..9dfce4e0417d 100644
+--- a/arch/x86/include/asm/sigcontext.h
++++ b/arch/x86/include/asm/sigcontext.h
+@@ -57,9 +57,9 @@ struct sigcontext {
+ 	unsigned long ip;
+ 	unsigned long flags;
+ 	unsigned short cs;
+-	unsigned short __pad2;	/* Was called gs, but was always zero. */
+-	unsigned short __pad1;	/* Was called fs, but was always zero. */
+-	unsigned short ss;
++	unsigned short gs;
++	unsigned short fs;
++	unsigned short __pad0;
+ 	unsigned long err;
+ 	unsigned long trapno;
+ 	unsigned long oldmask;
+diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
+index 16dc4e8a2cd3..d8b9f9081e86 100644
+--- a/arch/x86/include/uapi/asm/sigcontext.h
++++ b/arch/x86/include/uapi/asm/sigcontext.h
+@@ -177,24 +177,9 @@ struct sigcontext {
+ 	__u64 rip;
+ 	__u64 eflags;		/* RFLAGS */
+ 	__u16 cs;
+-
+-	/*
+-	 * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"),
+-	 * Linux saved and restored fs and gs in these slots.  This
+-	 * was counterproductive, as fsbase and gsbase were never
+-	 * saved, so arch_prctl was presumably unreliable.
+-	 *
+-	 * If these slots are ever needed for any other purpose, there
+-	 * is some risk that very old 64-bit binaries could get
+-	 * confused.  I doubt that many such binaries still work,
+-	 * though, since the same patch in 2.5.64 also removed the
+-	 * 64-bit set_thread_area syscall, so it appears that there is
+-	 * no TLS API that works in both pre- and post-2.5.64 kernels.
+-	 */
+-	__u16 __pad2;		/* Was gs. */
+-	__u16 __pad1;		/* Was fs. */
+-
+-	__u16 ss;
++	__u16 gs;
++	__u16 fs;
++	__u16 __pad0;
+ 	__u64 err;
+ 	__u64 trapno;
+ 	__u64 oldmask;
+diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
+index dcb52850a28f..cde732c1b495 100644
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -1424,7 +1424,7 @@ static inline void __x2apic_disable(void)
+ {
+ 	u64 msr;
+ 
+-	if (cpu_has_apic)
++	if (!cpu_has_apic)
+ 		return;
+ 
+ 	rdmsrl(MSR_IA32_APICBASE, msr);
+@@ -1483,10 +1483,13 @@ void x2apic_setup(void)
+ 
+ static __init void x2apic_disable(void)
+ {
+-	u32 x2apic_id;
++	u32 x2apic_id, state = x2apic_state;
+ 
+-	if (x2apic_state != X2APIC_ON)
+-		goto out;
++	x2apic_mode = 0;
++	x2apic_state = X2APIC_DISABLED;
++
++	if (state != X2APIC_ON)
++		return;
+ 
+ 	x2apic_id = read_apic_id();
+ 	if (x2apic_id >= 255)
+@@ -1494,9 +1497,6 @@ static __init void x2apic_disable(void)
+ 
+ 	__x2apic_disable();
+ 	register_lapic_address(mp_lapic_addr);
+-out:
+-	x2apic_state = X2APIC_DISABLED;
+-	x2apic_mode = 0;
+ }
+ 
+ static __init void x2apic_enable(void)
+diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
+index 6e338e3b1dc0..971743774248 100644
+--- a/arch/x86/kernel/process.c
++++ b/arch/x86/kernel/process.c
+@@ -453,6 +453,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
+ static void mwait_idle(void)
+ {
+ 	if (!current_set_polling_and_test()) {
++		trace_cpu_idle_rcuidle(1, smp_processor_id());
+ 		if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
+ 			smp_mb(); /* quirk */
+ 			clflush((void *)&current_thread_info()->flags);
+@@ -464,6 +465,7 @@ static void mwait_idle(void)
+ 			__sti_mwait(0, 0);
+ 		else
+ 			local_irq_enable();
++		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
+ 	} else {
+ 		local_irq_enable();
+ 	}
+diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
+index 1ea14fd53933..e0fd5f47fbb9 100644
+--- a/arch/x86/kernel/signal.c
++++ b/arch/x86/kernel/signal.c
+@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+ 		COPY(r15);
+ #endif /* CONFIG_X86_64 */
+ 
++#ifdef CONFIG_X86_32
+ 		COPY_SEG_CPL3(cs);
+ 		COPY_SEG_CPL3(ss);
++#else /* !CONFIG_X86_32 */
++		/* Kernel saves and restores only the CS segment register on signals,
++		 * which is the bare minimum needed to allow mixed 32/64-bit code.
++		 * App's signal handler can save/restore other segments if needed. */
++		COPY_SEG_CPL3(cs);
++#endif /* CONFIG_X86_32 */
+ 
+ 		get_user_ex(tmpflags, &sc->flags);
+ 		regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
+ #else /* !CONFIG_X86_32 */
+ 		put_user_ex(regs->flags, &sc->flags);
+ 		put_user_ex(regs->cs, &sc->cs);
+-		put_user_ex(0, &sc->__pad2);
+-		put_user_ex(0, &sc->__pad1);
+-		put_user_ex(regs->ss, &sc->ss);
++		put_user_ex(0, &sc->gs);
++		put_user_ex(0, &sc->fs);
+ #endif /* CONFIG_X86_32 */
+ 
+ 		put_user_ex(fpstate, &sc->fpstate);
+@@ -450,19 +456,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
+ 
+ 	regs->sp = (unsigned long)frame;
+ 
+-	/*
+-	 * Set up the CS and SS registers to run signal handlers in
+-	 * 64-bit mode, even if the handler happens to be interrupting
+-	 * 32-bit or 16-bit code.
+-	 *
+-	 * SS is subtle.  In 64-bit mode, we don't need any particular
+-	 * SS descriptor, but we do need SS to be valid.  It's possible
+-	 * that the old SS is entirely bogus -- this can happen if the
+-	 * signal we're trying to deliver is #GP or #SS caused by a bad
+-	 * SS value.
+-	 */
++	/* Set up the CS register to run signal handlers in 64-bit mode,
++	   even if the handler happens to be interrupting 32-bit code. */
+ 	regs->cs = __USER_CS;
+-	regs->ss = __USER_DS;
+ 
+ 	return 0;
+ }
+diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
+index e88fda867a33..484145368a24 100644
+--- a/arch/x86/xen/Kconfig
++++ b/arch/x86/xen/Kconfig
+@@ -8,7 +8,7 @@ config XEN
+ 	select PARAVIRT_CLOCK
+ 	select XEN_HAVE_PVMMU
+ 	depends on X86_64 || (X86_32 && X86_PAE)
+-	depends on X86_TSC
++	depends on X86_LOCAL_APIC && X86_TSC
+ 	help
+ 	  This is the Linux Xen port.  Enabling this will allow the
+ 	  kernel to boot in a paravirtualized environment under the
+@@ -17,7 +17,7 @@ config XEN
+ config XEN_DOM0
+ 	def_bool y
+ 	depends on XEN && PCI_XEN && SWIOTLB_XEN
+-	depends on X86_LOCAL_APIC && X86_IO_APIC && ACPI && PCI
++	depends on X86_IO_APIC && ACPI && PCI
+ 
+ config XEN_PVHVM
+ 	def_bool y
+diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
+index 7322755f337a..4b6e29ac0968 100644
+--- a/arch/x86/xen/Makefile
++++ b/arch/x86/xen/Makefile
+@@ -13,13 +13,13 @@ CFLAGS_mmu.o			:= $(nostackp)
+ obj-y		:= enlighten.o setup.o multicalls.o mmu.o irq.o \
+ 			time.o xen-asm.o xen-asm_$(BITS).o \
+ 			grant-table.o suspend.o platform-pci-unplug.o \
+-			p2m.o
++			p2m.o apic.o
+ 
+ obj-$(CONFIG_EVENT_TRACING) += trace.o
+ 
+ obj-$(CONFIG_SMP)		+= smp.o
+ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
+ obj-$(CONFIG_XEN_DEBUG_FS)	+= debugfs.o
+-obj-$(CONFIG_XEN_DOM0)		+= apic.o vga.o
++obj-$(CONFIG_XEN_DOM0)		+= vga.o
+ obj-$(CONFIG_SWIOTLB_XEN)	+= pci-swiotlb-xen.o
+ obj-$(CONFIG_XEN_EFI)		+= efi.o
+diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
+index 9e195c683549..bef30cbb56c4 100644
+--- a/arch/x86/xen/xen-ops.h
++++ b/arch/x86/xen/xen-ops.h
+@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
+ 
+ #ifdef CONFIG_XEN_DOM0
+ void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
+-void __init xen_init_apic(void);
+ #else
+ static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
+ 				       size_t size)
+ {
+ }
+-static inline void __init xen_init_apic(void)
+-{
+-}
+ #endif
+ 
++void __init xen_init_apic(void);
++
+ #ifdef CONFIG_XEN_EFI
+ extern void xen_efi_init(void);
+ #else
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 12600bfffca9..e0057d035200 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
+  * Description:
+  *    Enables a low level driver to set a hard upper limit,
+  *    max_hw_sectors, on the size of requests.  max_hw_sectors is set by
+- *    the device driver based upon the combined capabilities of I/O
+- *    controller and storage device.
++ *    the device driver based upon the capabilities of the I/O
++ *    controller.
+  *
+  *    max_sectors is a soft limit imposed by the block layer for
+  *    filesystem type requests.  This value can be overridden on a
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 41c99be9bd41..e0064d180f04 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -694,11 +694,11 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
+  *	RETURNS:
+  *	Block address read from @tf.
+  */
+-u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
++u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
+ {
+ 	u64 block = 0;
+ 
+-	if (!dev || tf->flags & ATA_TFLAG_LBA) {
++	if (tf->flags & ATA_TFLAG_LBA) {
+ 		if (tf->flags & ATA_TFLAG_LBA48) {
+ 			block |= (u64)tf->hob_lbah << 40;
+ 			block |= (u64)tf->hob_lbam << 32;
+@@ -2147,24 +2147,6 @@ static int ata_dev_config_ncq(struct ata_device *dev,
+ 	return 0;
+ }
+ 
+-static void ata_dev_config_sense_reporting(struct ata_device *dev)
+-{
+-	unsigned int err_mask;
+-
+-	if (!ata_id_has_sense_reporting(dev->id))
+-		return;
+-
+-	if (ata_id_sense_reporting_enabled(dev->id))
+-		return;
+-
+-	err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
+-	if (err_mask) {
+-		ata_dev_dbg(dev,
+-			    "failed to enable Sense Data Reporting, Emask 0x%x\n",
+-			    err_mask);
+-	}
+-}
+-
+ /**
+  *	ata_dev_configure - Configure the specified ATA/ATAPI device
+  *	@dev: Target device to configure
+@@ -2387,7 +2369,7 @@ int ata_dev_configure(struct ata_device *dev)
+ 					dev->devslp_timing[i] = sata_setting[j];
+ 				}
+ 		}
+-		ata_dev_config_sense_reporting(dev);
++
+ 		dev->cdb_len = 16;
+ 	}
+ 
+@@ -4248,6 +4230,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
++	{ "FCCT*M500*",			NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 
+ 	/* devices that don't properly handle TRIM commands */
+ 	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 7465031a893c..cb0508af1459 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -1592,8 +1592,6 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
+ 	tf->hob_lbah = buf[10];
+ 	tf->nsect = buf[12];
+ 	tf->hob_nsect = buf[13];
+-	if (ata_id_has_ncq_autosense(dev->id))
+-		tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
+ 
+ 	return 0;
+ }
+@@ -1630,70 +1628,6 @@ unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
+ }
+ 
+ /**
+- *	ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
+- *	@dev: device to perform REQUEST_SENSE_SENSE_DATA_EXT to
+- *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
+- *	@dfl_sense_key: default sense key to use
+- *
+- *	Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
+- *	SENSE.  This function is EH helper.
+- *
+- *	LOCKING:
+- *	Kernel thread context (may sleep).
+- *
+- *	RETURNS:
+- *	encoded sense data on success, 0 on failure or if sense data
+- *	is not available.
+- */
+-static u32 ata_eh_request_sense(struct ata_queued_cmd *qc,
+-				struct scsi_cmnd *cmd)
+-{
+-	struct ata_device *dev = qc->dev;
+-	struct ata_taskfile tf;
+-	unsigned int err_mask;
+-
+-	if (!cmd)
+-		return 0;
+-
+-	DPRINTK("ATA request sense\n");
+-	ata_dev_warn(dev, "request sense\n");
+-	if (!ata_id_sense_reporting_enabled(dev->id)) {
+-		ata_dev_warn(qc->dev, "sense data reporting disabled\n");
+-		return 0;
+-	}
+-	ata_tf_init(dev, &tf);
+-
+-	tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
+-	tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+-	tf.command = ATA_CMD_REQ_SENSE_DATA;
+-	tf.protocol = ATA_PROT_NODATA;
+-
+-	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+-	/*
+-	 * ACS-4 states:
+-	 * The device may set the SENSE DATA AVAILABLE bit to one in the
+-	 * STATUS field and clear the ERROR bit to zero in the STATUS field
+-	 * to indicate that the command returned completion without an error
+-	 * and the sense data described in table 306 is available.
+-	 *
+-	 * IOW the 'ATA_SENSE' bit might not be set even though valid
+-	 * sense data is available.
+-	 * So check for both.
+-	 */
+-	if ((tf.command & ATA_SENSE) ||
+-		tf.lbah != 0 || tf.lbam != 0 || tf.lbal != 0) {
+-		ata_scsi_set_sense(cmd, tf.lbah, tf.lbam, tf.lbal);
+-		qc->flags |= ATA_QCFLAG_SENSE_VALID;
+-		ata_dev_warn(dev, "sense data %02x/%02x/%02x\n",
+-			     tf.lbah, tf.lbam, tf.lbal);
+-	} else {
+-		ata_dev_warn(dev, "request sense failed stat %02x emask %x\n",
+-			     tf.command, err_mask);
+-	}
+-	return err_mask;
+-}
+-
+-/**
+  *	atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
+  *	@dev: device to perform REQUEST_SENSE to
+  *	@sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
+@@ -1855,19 +1789,6 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
+ 	memcpy(&qc->result_tf, &tf, sizeof(tf));
+ 	qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
+ 	qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
+-	if (qc->result_tf.auxiliary) {
+-		char sense_key, asc, ascq;
+-
+-		sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
+-		asc = (qc->result_tf.auxiliary >> 8) & 0xff;
+-		ascq = qc->result_tf.auxiliary & 0xff;
+-		ata_dev_dbg(dev, "NCQ Autosense %02x/%02x/%02x\n",
+-			    sense_key, asc, ascq);
+-		ata_scsi_set_sense(qc->scsicmd, sense_key, asc, ascq);
+-		ata_scsi_set_sense_information(qc->scsicmd, &qc->result_tf);
+-		qc->flags |= ATA_QCFLAG_SENSE_VALID;
+-	}
+-
+ 	ehc->i.err_mask &= ~AC_ERR_DEV;
+ }
+ 
+@@ -1897,27 +1818,6 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
+ 		return ATA_EH_RESET;
+ 	}
+ 
+-	/*
+-	 * Sense data reporting does not work if the
+-	 * device fault bit is set.
+-	 */
+-	if ((stat & ATA_SENSE) && !(stat & ATA_DF) &&
+-	    !(qc->flags & ATA_QCFLAG_SENSE_VALID)) {
+-		if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
+-			tmp = ata_eh_request_sense(qc, qc->scsicmd);
+-			if (tmp)
+-				qc->err_mask |= tmp;
+-			else
+-				ata_scsi_set_sense_information(qc->scsicmd, tf);
+-		} else {
+-			ata_dev_warn(qc->dev, "sense data available but port frozen\n");
+-		}
+-	}
+-
+-	/* Set by NCQ autosense or request sense above */
+-	if (qc->flags & ATA_QCFLAG_SENSE_VALID)
+-		return 0;
+-
+ 	if (stat & (ATA_ERR | ATA_DF))
+ 		qc->err_mask |= AC_ERR_DEV;
+ 	else
+@@ -2661,15 +2561,14 @@ static void ata_eh_link_report(struct ata_link *link)
+ 
+ #ifdef CONFIG_ATA_VERBOSE_ERROR
+ 		if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
+-				    ATA_SENSE | ATA_ERR)) {
++				    ATA_ERR)) {
+ 			if (res->command & ATA_BUSY)
+ 				ata_dev_err(qc->dev, "status: { Busy }\n");
+ 			else
+-				ata_dev_err(qc->dev, "status: { %s%s%s%s%s}\n",
++				ata_dev_err(qc->dev, "status: { %s%s%s%s}\n",
+ 				  res->command & ATA_DRDY ? "DRDY " : "",
+ 				  res->command & ATA_DF ? "DF " : "",
+ 				  res->command & ATA_DRQ ? "DRQ " : "",
+-				  res->command & ATA_SENSE ? "SENSE " : "",
+ 				  res->command & ATA_ERR ? "ERR " : "");
+ 		}
+ 
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 641a61a59e89..0d7f0da3a269 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -270,28 +270,13 @@ DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR,
+ 	    ata_scsi_park_show, ata_scsi_park_store);
+ EXPORT_SYMBOL_GPL(dev_attr_unload_heads);
+ 
+-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
++static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+ {
+-	if (!cmd)
+-		return;
+-
+ 	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ 
+ 	scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
+ }
+ 
+-void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
+-				    const struct ata_taskfile *tf)
+-{
+-	u64 information;
+-
+-	if (!cmd)
+-		return;
+-
+-	information = ata_tf_read_block(tf, NULL);
+-	scsi_set_sense_information(cmd->sense_buffer, information);
+-}
+-
+ static ssize_t
+ ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr,
+ 			  const char *buf, size_t count)
+@@ -1792,9 +1777,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
+ 	    ((cdb[2] & 0x20) || need_sense)) {
+ 		ata_gen_passthru_sense(qc);
+ 	} else {
+-		if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
+-			cmd->result = SAM_STAT_CHECK_CONDITION;
+-		} else if (!need_sense) {
++		if (!need_sense) {
+ 			cmd->result = SAM_STAT_GOOD;
+ 		} else {
+ 			/* TODO: decide which descriptor format to use
+diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
+index a998a175f9f1..f840ca18a7c0 100644
+--- a/drivers/ata/libata.h
++++ b/drivers/ata/libata.h
+@@ -67,8 +67,7 @@ extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag);
+ extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
+ 			   u64 block, u32 n_block, unsigned int tf_flags,
+ 			   unsigned int tag);
+-extern u64 ata_tf_read_block(const struct ata_taskfile *tf,
+-			     struct ata_device *dev);
++extern u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev);
+ extern unsigned ata_exec_internal(struct ata_device *dev,
+ 				  struct ata_taskfile *tf, const u8 *cdb,
+ 				  int dma_dir, void *buf, unsigned int buflen,
+@@ -138,9 +137,6 @@ extern int ata_scsi_add_hosts(struct ata_host *host,
+ 			      struct scsi_host_template *sht);
+ extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
+ extern int ata_scsi_offline_dev(struct ata_device *dev);
+-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq);
+-extern void ata_scsi_set_sense_information(struct scsi_cmnd *cmd,
+-					   const struct ata_taskfile *tf);
+ extern void ata_scsi_media_change_notify(struct ata_device *dev);
+ extern void ata_scsi_hotplug(struct work_struct *work);
+ extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
+diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
+index 81751a49d8bf..56486d92c4e7 100644
+--- a/drivers/base/regmap/regcache-rbtree.c
++++ b/drivers/base/regmap/regcache-rbtree.c
+@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
+ 	if (!blk)
+ 		return -ENOMEM;
+ 
+-	present = krealloc(rbnode->cache_present,
+-		    BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
+-	if (!present) {
+-		kfree(blk);
+-		return -ENOMEM;
++	if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
++		present = krealloc(rbnode->cache_present,
++				   BITS_TO_LONGS(blklen) * sizeof(*present),
++				   GFP_KERNEL);
++		if (!present) {
++			kfree(blk);
++			return -ENOMEM;
++		}
++
++		memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
++		       (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
++		       * sizeof(*present));
++	} else {
++		present = rbnode->cache_present;
+ 	}
+ 
+ 	/* insert the register value in the correct place in the rbnode block */
+diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
+index 713fc9ff1149..3e9ec9523f73 100644
+--- a/drivers/block/xen-blkback/blkback.c
++++ b/drivers/block/xen-blkback/blkback.c
+@@ -362,8 +362,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
+ 		return;
+ 	}
+ 
+-	if (work_pending(&blkif->persistent_purge_work)) {
+-		pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n");
++	if (work_busy(&blkif->persistent_purge_work)) {
++		pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
+ 		return;
+ 	}
+ 
+diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
+index 2c61cf8c6f61..89c7371ab2dc 100644
+--- a/drivers/block/xen-blkfront.c
++++ b/drivers/block/xen-blkfront.c
+@@ -1118,8 +1118,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
+ 				 * Add the used indirect page back to the list of
+ 				 * available pages for indirect grefs.
+ 				 */
+-				indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
+-				list_add(&indirect_page->lru, &info->indirect_pages);
++				if (!info->feature_persistent) {
++					indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
++					list_add(&indirect_page->lru, &info->indirect_pages);
++				}
+ 				s->indirect_grants[i]->gref = GRANT_INVALID_REF;
+ 				list_add_tail(&s->indirect_grants[i]->node, &info->grants);
+ 			}
+diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
+index 4b93a1efb36d..ac03ba49e9d1 100644
+--- a/drivers/clk/pxa/clk-pxa3xx.c
++++ b/drivers/clk/pxa/clk-pxa3xx.c
+@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
+ PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
+ PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
+ 
+-#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
++#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
+ #define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp,	\
+ 		    div_hp, bit, is_lp, flags)				\
+ 	PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,		\
+diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
+index 332c8ef8dae2..0436997e054b 100644
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -909,13 +909,14 @@ static int ahash_final_ctx(struct ahash_request *req)
+ 			  state->buflen_1;
+ 	u32 *sh_desc = ctx->sh_desc_fin, *desc;
+ 	dma_addr_t ptr = ctx->sh_desc_fin_dma;
+-	int sec4_sg_bytes;
++	int sec4_sg_bytes, sec4_sg_src_index;
+ 	int digestsize = crypto_ahash_digestsize(ahash);
+ 	struct ahash_edesc *edesc;
+ 	int ret = 0;
+ 	int sh_len;
+ 
+-	sec4_sg_bytes = (1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry);
++	sec4_sg_src_index = 1 + (buflen ? 1 : 0);
++	sec4_sg_bytes = sec4_sg_src_index * sizeof(struct sec4_sg_entry);
+ 
+ 	/* allocate space for base edesc and hw desc commands, link tables */
+ 	edesc = kmalloc(sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN +
+@@ -942,7 +943,7 @@ static int ahash_final_ctx(struct ahash_request *req)
+ 	state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
+ 						buf, state->buf_dma, buflen,
+ 						last_buflen);
+-	(edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
++	(edesc->sec4_sg + sec4_sg_src_index - 1)->len |= SEC4_SG_LEN_FIN;
+ 
+ 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+ 					    sec4_sg_bytes, DMA_TO_DEVICE);
+diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
+index 08f8d5cd6334..becb738c897b 100644
+--- a/drivers/crypto/nx/nx-sha256.c
++++ b/drivers/crypto/nx/nx-sha256.c
+@@ -71,7 +71,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
+ 	struct sha256_state *sctx = shash_desc_ctx(desc);
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+-	struct nx_sg *in_sg;
+ 	struct nx_sg *out_sg;
+ 	u64 to_process = 0, leftover, total;
+ 	unsigned long irq_flags;
+@@ -97,7 +96,6 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
+ 	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ 	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ 
+-	in_sg = nx_ctx->in_sg;
+ 	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ 			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ 	max_sg_len = min_t(u64, max_sg_len,
+@@ -114,17 +112,12 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
+ 	}
+ 
+ 	do {
+-		/*
+-		 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
+-		 * this update. This value is also restricted by the sg list
+-		 * limits.
+-		 */
+-		to_process = total - to_process;
+-		to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
++		int used_sgs = 0;
++		struct nx_sg *in_sg = nx_ctx->in_sg;
+ 
+ 		if (buf_len) {
+ 			data_len = buf_len;
+-			in_sg = nx_build_sg_list(nx_ctx->in_sg,
++			in_sg = nx_build_sg_list(in_sg,
+ 						 (u8 *) sctx->buf,
+ 						 &data_len,
+ 						 max_sg_len);
+@@ -133,15 +126,27 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
+ 				rc = -EINVAL;
+ 				goto out;
+ 			}
++			used_sgs = in_sg - nx_ctx->in_sg;
+ 		}
+ 
++		/* to_process: SHA256_BLOCK_SIZE aligned chunk to be
++		 * processed in this iteration. This value is restricted
++		 * by sg list limits and number of sgs we already used
++		 * for leftover data. (see above)
++		 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
++		 * but because data may not be aligned, we need to account
++		 * for that too. */
++		to_process = min_t(u64, total,
++			(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
++		to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
++
+ 		data_len = to_process - buf_len;
+ 		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+ 					 &data_len, max_sg_len);
+ 
+ 		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ 
+-		to_process = (data_len + buf_len);
++		to_process = data_len + buf_len;
+ 		leftover = total - to_process;
+ 
+ 		/*
+diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
+index aff0fe58eac0..b6e183d58d73 100644
+--- a/drivers/crypto/nx/nx-sha512.c
++++ b/drivers/crypto/nx/nx-sha512.c
+@@ -71,7 +71,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+ 	struct sha512_state *sctx = shash_desc_ctx(desc);
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+-	struct nx_sg *in_sg;
+ 	struct nx_sg *out_sg;
+ 	u64 to_process, leftover = 0, total;
+ 	unsigned long irq_flags;
+@@ -97,7 +96,6 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+ 	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ 	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ 
+-	in_sg = nx_ctx->in_sg;
+ 	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ 			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ 	max_sg_len = min_t(u64, max_sg_len,
+@@ -114,18 +112,12 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+ 	}
+ 
+ 	do {
+-		/*
+-		 * to_process: the SHA512_BLOCK_SIZE data chunk to process in
+-		 * this update. This value is also restricted by the sg list
+-		 * limits.
+-		 */
+-		to_process = total - leftover;
+-		to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+-		leftover = total - to_process;
++		int used_sgs = 0;
++		struct nx_sg *in_sg = nx_ctx->in_sg;
+ 
+ 		if (buf_len) {
+ 			data_len = buf_len;
+-			in_sg = nx_build_sg_list(nx_ctx->in_sg,
++			in_sg = nx_build_sg_list(in_sg,
+ 						 (u8 *) sctx->buf,
+ 						 &data_len, max_sg_len);
+ 
+@@ -133,8 +125,20 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+ 				rc = -EINVAL;
+ 				goto out;
+ 			}
++			used_sgs = in_sg - nx_ctx->in_sg;
+ 		}
+ 
++		/* to_process: SHA512_BLOCK_SIZE aligned chunk to be
++		 * processed in this iteration. This value is restricted
++		 * by sg list limits and number of sgs we already used
++		 * for leftover data. (see above)
++		 * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len,
++		 * but because data may not be aligned, we need to account
++		 * for that too. */
++		to_process = min_t(u64, total,
++			(max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE);
++		to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
++
+ 		data_len = to_process - buf_len;
+ 		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+ 					 &data_len, max_sg_len);
+@@ -146,7 +150,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+ 			goto out;
+ 		}
+ 
+-		to_process = (data_len + buf_len);
++		to_process = data_len + buf_len;
+ 		leftover = total - to_process;
+ 
+ 		/*
+diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
+index 3515b381c131..711d8ad74f11 100644
+--- a/drivers/edac/ppc4xx_edac.c
++++ b/drivers/edac/ppc4xx_edac.c
+@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
+ 	 */
+ 
+ 	for (row = 0; row < mci->nr_csrows; row++) {
+-		struct csrow_info *csi = &mci->csrows[row];
++		struct csrow_info *csi = mci->csrows[row];
+ 
+ 		/*
+ 		 * Get the configuration settings for this
+diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+index 60b0c13d7ff5..aebc4595afa0 100644
+--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
++++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+@@ -559,7 +559,7 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
+ 	return 0;
+ }
+ 
+-#ifdef CONFIG_PM
++#ifdef CONFIG_PM_SLEEP
+ static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
+ {
+ 	struct drm_device *drm_dev = dev_get_drvdata(dev);
+diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
+index d714a4b5711e..b1fe32b119ef 100644
+--- a/drivers/gpu/drm/i915/intel_dp.c
++++ b/drivers/gpu/drm/i915/intel_dp.c
+@@ -1150,6 +1150,19 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
+ 	return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
+ }
+ 
++static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
++{
++	/* WaDisableHBR2:skl */
++	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
++		return false;
++
++	if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
++	    (INTEL_INFO(dev)->gen >= 9))
++		return true;
++	else
++		return false;
++}
++
+ static int
+ intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
+ {
+@@ -1163,11 +1176,8 @@ intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
+ 
+ 	*source_rates = default_rates;
+ 
+-	if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
+-		/* WaDisableHBR2:skl */
+-		return (DP_LINK_BW_2_7 >> 3) + 1;
+-	else if (INTEL_INFO(dev)->gen >= 8 ||
+-	    (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
++	/* This depends on the fact that 5.4 is last value in the array */
++	if (intel_dp_source_supports_hbr2(dev))
+ 		return (DP_LINK_BW_5_4 >> 3) + 1;
+ 	else
+ 		return (DP_LINK_BW_2_7 >> 3) + 1;
+@@ -3783,10 +3793,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
+ 		}
+ 	}
+ 
+-	/* Training Pattern 3 support, both source and sink */
++	/* Training Pattern 3 support, Intel platforms that support HBR2 alone
++	 * have support for TP3 hence that check is used along with dpcd check
++	 * to ensure TP3 can be enabled.
++	 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
++	 * supported but still not enabled.
++	 */
+ 	if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
+ 	    intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
+-	    (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
++	    intel_dp_source_supports_hbr2(dev)) {
+ 		intel_dp->use_tps3 = true;
+ 		DRM_DEBUG_KMS("Displayport TPS3 supported\n");
+ 	} else
+diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
+index 424e62197787..9ab7c1c758ae 100644
+--- a/drivers/gpu/drm/i915/intel_lrc.c
++++ b/drivers/gpu/drm/i915/intel_lrc.c
+@@ -848,6 +848,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
+ 		ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+ 		if (ret)
+ 			goto unpin_ctx_obj;
++
++		ctx_obj->dirty = true;
+ 	}
+ 
+ 	return ret;
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+index 654c8daeb5ab..97ad3bcb99a7 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ 	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
+ 				     true, NULL);
+ 	if (unlikely(ret != 0))
+-		goto out_err;
++		goto out_err_nores;
+ 
+ 	ret = vmw_validate_buffers(dev_priv, sw_context);
+ 	if (unlikely(ret != 0))
+@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ 	vmw_resource_relocations_free(&sw_context->res_relocations);
+ 
+ 	vmw_fifo_commit(dev_priv, command_size);
++	mutex_unlock(&dev_priv->binding_mutex);
+ 
+ 	vmw_query_bo_switch_commit(dev_priv, sw_context);
+ 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
+@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
+ 		DRM_ERROR("Fence submission error. Syncing.\n");
+ 
+ 	vmw_resource_list_unreserve(&sw_context->resource_list, false);
+-	mutex_unlock(&dev_priv->binding_mutex);
+ 
+ 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
+ 				    (void *) fence);
+diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
+index 008e89bf6f3c..32d52d29cc68 100644
+--- a/drivers/hid/hid-input.c
++++ b/drivers/hid/hid-input.c
+@@ -462,12 +462,15 @@ out:
+ 
+ static void hidinput_cleanup_battery(struct hid_device *dev)
+ {
++	const struct power_supply_desc *psy_desc;
++
+ 	if (!dev->battery)
+ 		return;
+ 
++	psy_desc = dev->battery->desc;
+ 	power_supply_unregister(dev->battery);
+-	kfree(dev->battery->desc->name);
+-	kfree(dev->battery->desc);
++	kfree(psy_desc->name);
++	kfree(psy_desc);
+ 	dev->battery = NULL;
+ }
+ #else  /* !CONFIG_HID_BATTERY_STRENGTH */
+diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
+index 94167310e15a..b905d501e752 100644
+--- a/drivers/hid/hid-uclogic.c
++++ b/drivers/hid/hid-uclogic.c
+@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
+ 	for (p = drvdata->rdesc;
+ 	     p <= drvdata->rdesc + drvdata->rsize - 4;) {
+ 		if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
+-		    p[3] < sizeof(params)) {
++		    p[3] < ARRAY_SIZE(params)) {
+ 			v = params[p[3]];
+ 			put_unaligned(cpu_to_le32(v), (s32 *)p);
+ 			p += 4;
+diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
+index 097d7216d98e..c6dc644aa580 100644
+--- a/drivers/input/keyboard/gpio_keys_polled.c
++++ b/drivers/input/keyboard/gpio_keys_polled.c
+@@ -246,7 +246,7 @@ static int gpio_keys_polled_probe(struct platform_device *pdev)
+ 		 * convert it to descriptor.
+ 		 */
+ 		if (!button->gpiod && gpio_is_valid(button->gpio)) {
+-			unsigned flags = 0;
++			unsigned flags = GPIOF_IN;
+ 
+ 			if (button->active_low)
+ 				flags |= GPIOF_ACTIVE_LOW;
+diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
+index 692fe2bc8197..c12bb93334ff 100644
+--- a/drivers/irqchip/irq-crossbar.c
++++ b/drivers/irqchip/irq-crossbar.c
+@@ -68,7 +68,9 @@ static struct irq_chip crossbar_chip = {
+ 	.irq_mask		= irq_chip_mask_parent,
+ 	.irq_unmask		= irq_chip_unmask_parent,
+ 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+-	.irq_set_wake		= irq_chip_set_wake_parent,
++	.irq_set_type		= irq_chip_set_type_parent,
++	.flags			= IRQCHIP_MASK_ON_SUSPEND |
++				  IRQCHIP_SKIP_SET_WAKE,
+ #ifdef CONFIG_SMP
+ 	.irq_set_affinity	= irq_chip_set_affinity_parent,
+ #endif
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 79f694120ddf..cde1d6749017 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -1295,8 +1295,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
+ 		return r;
+ 
+ 	disk_super = dm_block_data(copy);
+-	dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
+-	dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
++	dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
++	dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
+ 	dm_sm_dec_block(pmd->metadata_sm, held_root);
+ 
+ 	return dm_tm_unlock(pmd->tm, copy);
+diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
+index 6ca6dfab50eb..6523903e15fe 100644
+--- a/drivers/mfd/arizona-core.c
++++ b/drivers/mfd/arizona-core.c
+@@ -912,10 +912,6 @@ int arizona_dev_init(struct arizona *arizona)
+ 			     arizona->pdata.gpio_defaults[i]);
+ 	}
+ 
+-	pm_runtime_set_autosuspend_delay(arizona->dev, 100);
+-	pm_runtime_use_autosuspend(arizona->dev);
+-	pm_runtime_enable(arizona->dev);
+-
+ 	/* Chip default */
+ 	if (!arizona->pdata.clk32k_src)
+ 		arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
+@@ -1012,11 +1008,17 @@ int arizona_dev_init(struct arizona *arizona)
+ 					   arizona->pdata.spk_fmt[i]);
+ 	}
+ 
++	pm_runtime_set_active(arizona->dev);
++	pm_runtime_enable(arizona->dev);
++
+ 	/* Set up for interrupts */
+ 	ret = arizona_irq_init(arizona);
+ 	if (ret != 0)
+ 		goto err_reset;
+ 
++	pm_runtime_set_autosuspend_delay(arizona->dev, 100);
++	pm_runtime_use_autosuspend(arizona->dev);
++
+ 	arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
+ 			    arizona_clkgen_err, arizona);
+ 	arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
+@@ -1045,10 +1047,6 @@ int arizona_dev_init(struct arizona *arizona)
+ 		goto err_irq;
+ 	}
+ 
+-#ifdef CONFIG_PM
+-	regulator_disable(arizona->dcvdd);
+-#endif
+-
+ 	return 0;
+ 
+ err_irq:
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
+index 72427f21edff..edfec540c893 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
+@@ -855,6 +855,18 @@ static int pcan_usb_probe(struct usb_interface *intf)
+ /*
+  * describe the PCAN-USB adapter
+  */
++static const struct can_bittiming_const pcan_usb_const = {
++	.name = "pcan_usb",
++	.tseg1_min = 1,
++	.tseg1_max = 16,
++	.tseg2_min = 1,
++	.tseg2_max = 8,
++	.sjw_max = 4,
++	.brp_min = 1,
++	.brp_max = 64,
++	.brp_inc = 1,
++};
++
+ const struct peak_usb_adapter pcan_usb = {
+ 	.name = "PCAN-USB",
+ 	.device_id = PCAN_USB_PRODUCT_ID,
+@@ -863,17 +875,7 @@ const struct peak_usb_adapter pcan_usb = {
+ 	.clock = {
+ 		.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
+ 	},
+-	.bittiming_const = {
+-		.name = "pcan_usb",
+-		.tseg1_min = 1,
+-		.tseg1_max = 16,
+-		.tseg2_min = 1,
+-		.tseg2_max = 8,
+-		.sjw_max = 4,
+-		.brp_min = 1,
+-		.brp_max = 64,
+-		.brp_inc = 1,
+-	},
++	.bittiming_const = &pcan_usb_const,
+ 
+ 	/* size of device private data */
+ 	.sizeof_dev_private = sizeof(struct pcan_usb),
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index 7921cff93a63..5a2e341a6d1e 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -792,9 +792,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
+ 	dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx];
+ 
+ 	dev->can.clock = peak_usb_adapter->clock;
+-	dev->can.bittiming_const = &peak_usb_adapter->bittiming_const;
++	dev->can.bittiming_const = peak_usb_adapter->bittiming_const;
+ 	dev->can.do_set_bittiming = peak_usb_set_bittiming;
+-	dev->can.data_bittiming_const = &peak_usb_adapter->data_bittiming_const;
++	dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const;
+ 	dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming;
+ 	dev->can.do_set_mode = peak_usb_set_mode;
+ 	dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter;
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+index 9e624f05ad4d..506fe506c9d3 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+@@ -48,8 +48,8 @@ struct peak_usb_adapter {
+ 	u32 device_id;
+ 	u32 ctrlmode_supported;
+ 	struct can_clock clock;
+-	const struct can_bittiming_const bittiming_const;
+-	const struct can_bittiming_const data_bittiming_const;
++	const struct can_bittiming_const * const bittiming_const;
++	const struct can_bittiming_const * const data_bittiming_const;
+ 	unsigned int ctrl_count;
+ 
+ 	int (*intf_probe)(struct usb_interface *intf);
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+index 09d14e70abd7..ce44a033f63b 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+@@ -990,6 +990,30 @@ static void pcan_usb_fd_free(struct peak_usb_device *dev)
+ }
+ 
+ /* describes the PCAN-USB FD adapter */
++static const struct can_bittiming_const pcan_usb_fd_const = {
++	.name = "pcan_usb_fd",
++	.tseg1_min = 1,
++	.tseg1_max = 64,
++	.tseg2_min = 1,
++	.tseg2_max = 16,
++	.sjw_max = 16,
++	.brp_min = 1,
++	.brp_max = 1024,
++	.brp_inc = 1,
++};
++
++static const struct can_bittiming_const pcan_usb_fd_data_const = {
++	.name = "pcan_usb_fd",
++	.tseg1_min = 1,
++	.tseg1_max = 16,
++	.tseg2_min = 1,
++	.tseg2_max = 8,
++	.sjw_max = 4,
++	.brp_min = 1,
++	.brp_max = 1024,
++	.brp_inc = 1,
++};
++
+ const struct peak_usb_adapter pcan_usb_fd = {
+ 	.name = "PCAN-USB FD",
+ 	.device_id = PCAN_USBFD_PRODUCT_ID,
+@@ -999,28 +1023,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
+ 	.clock = {
+ 		.freq = PCAN_UFD_CRYSTAL_HZ,
+ 	},
+-	.bittiming_const = {
+-		.name = "pcan_usb_fd",
+-		.tseg1_min = 1,
+-		.tseg1_max = 64,
+-		.tseg2_min = 1,
+-		.tseg2_max = 16,
+-		.sjw_max = 16,
+-		.brp_min = 1,
+-		.brp_max = 1024,
+-		.brp_inc = 1,
+-	},
+-	.data_bittiming_const = {
+-		.name = "pcan_usb_fd",
+-		.tseg1_min = 1,
+-		.tseg1_max = 16,
+-		.tseg2_min = 1,
+-		.tseg2_max = 8,
+-		.sjw_max = 4,
+-		.brp_min = 1,
+-		.brp_max = 1024,
+-		.brp_inc = 1,
+-	},
++	.bittiming_const = &pcan_usb_fd_const,
++	.data_bittiming_const = &pcan_usb_fd_data_const,
+ 
+ 	/* size of device private data */
+ 	.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+@@ -1058,6 +1062,30 @@ const struct peak_usb_adapter pcan_usb_fd = {
+ };
+ 
+ /* describes the PCAN-USB Pro FD adapter */
++static const struct can_bittiming_const pcan_usb_pro_fd_const = {
++	.name = "pcan_usb_pro_fd",
++	.tseg1_min = 1,
++	.tseg1_max = 64,
++	.tseg2_min = 1,
++	.tseg2_max = 16,
++	.sjw_max = 16,
++	.brp_min = 1,
++	.brp_max = 1024,
++	.brp_inc = 1,
++};
++
++static const struct can_bittiming_const pcan_usb_pro_fd_data_const = {
++	.name = "pcan_usb_pro_fd",
++	.tseg1_min = 1,
++	.tseg1_max = 16,
++	.tseg2_min = 1,
++	.tseg2_max = 8,
++	.sjw_max = 4,
++	.brp_min = 1,
++	.brp_max = 1024,
++	.brp_inc = 1,
++};
++
+ const struct peak_usb_adapter pcan_usb_pro_fd = {
+ 	.name = "PCAN-USB Pro FD",
+ 	.device_id = PCAN_USBPROFD_PRODUCT_ID,
+@@ -1067,28 +1095,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
+ 	.clock = {
+ 		.freq = PCAN_UFD_CRYSTAL_HZ,
+ 	},
+-	.bittiming_const = {
+-		.name = "pcan_usb_pro_fd",
+-		.tseg1_min = 1,
+-		.tseg1_max = 64,
+-		.tseg2_min = 1,
+-		.tseg2_max = 16,
+-		.sjw_max = 16,
+-		.brp_min = 1,
+-		.brp_max = 1024,
+-		.brp_inc = 1,
+-	},
+-	.data_bittiming_const = {
+-		.name = "pcan_usb_pro_fd",
+-		.tseg1_min = 1,
+-		.tseg1_max = 16,
+-		.tseg2_min = 1,
+-		.tseg2_max = 8,
+-		.sjw_max = 4,
+-		.brp_min = 1,
+-		.brp_max = 1024,
+-		.brp_inc = 1,
+-	},
++	.bittiming_const = &pcan_usb_pro_fd_const,
++	.data_bittiming_const = &pcan_usb_pro_fd_data_const,
+ 
+ 	/* size of device private data */
+ 	.sizeof_dev_private = sizeof(struct pcan_usb_fd_device),
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+index dec51717635e..a5ad2e6aa73a 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+@@ -1004,6 +1004,18 @@ int pcan_usb_pro_probe(struct usb_interface *intf)
+ /*
+  * describe the PCAN-USB Pro adapter
+  */
++static const struct can_bittiming_const pcan_usb_pro_const = {
++	.name = "pcan_usb_pro",
++	.tseg1_min = 1,
++	.tseg1_max = 16,
++	.tseg2_min = 1,
++	.tseg2_max = 8,
++	.sjw_max = 4,
++	.brp_min = 1,
++	.brp_max = 1024,
++	.brp_inc = 1,
++};
++
+ const struct peak_usb_adapter pcan_usb_pro = {
+ 	.name = "PCAN-USB Pro",
+ 	.device_id = PCAN_USBPRO_PRODUCT_ID,
+@@ -1012,17 +1024,7 @@ const struct peak_usb_adapter pcan_usb_pro = {
+ 	.clock = {
+ 		.freq = PCAN_USBPRO_CRYSTAL_HZ,
+ 	},
+-	.bittiming_const = {
+-		.name = "pcan_usb_pro",
+-		.tseg1_min = 1,
+-		.tseg1_max = 16,
+-		.tseg2_min = 1,
+-		.tseg2_max = 8,
+-		.sjw_max = 4,
+-		.brp_min = 1,
+-		.brp_max = 1024,
+-		.brp_inc = 1,
+-	},
++	.bittiming_const = &pcan_usb_pro_const,
+ 
+ 	/* size of device private data */
+ 	.sizeof_dev_private = sizeof(struct pcan_usb_pro_device),
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index 37e6a6f91487..699a4802835f 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -457,10 +457,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
+ 		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+ 			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ 					  APMG_PCIDEV_STT_VAL_WAKE_ME);
+-		else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
++		else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
++			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
++				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ 			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ 				    CSR_HW_IF_CONFIG_REG_PREPARE |
+ 				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
++			mdelay(1);
++			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
++				      CSR_RESET_LINK_PWR_MGMT_DISABLED);
++		}
+ 		mdelay(5);
+ 	}
+ 
+@@ -555,6 +561,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ 	if (ret >= 0)
+ 		return 0;
+ 
++	iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
++		    CSR_RESET_LINK_PWR_MGMT_DISABLED);
++	msleep(1);
++
+ 	for (iter = 0; iter < 10; iter++) {
+ 		/* If HW is not ready, prepare the conditions to check again */
+ 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+@@ -562,8 +572,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ 
+ 		do {
+ 			ret = iwl_pcie_set_hw_ready(trans);
+-			if (ret >= 0)
+-				return 0;
++			if (ret >= 0) {
++				ret = 0;
++				goto out;
++			}
+ 
+ 			usleep_range(200, 1000);
+ 			t += 200;
+@@ -573,6 +585,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+ 
+ 	IWL_ERR(trans, "Couldn't prepare the card\n");
+ 
++out:
++	iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
++		      CSR_RESET_LINK_PWR_MGMT_DISABLED);
++
+ 	return ret;
+ }
+ 
+diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+index b6cc9ff47fc2..1c6788aecc62 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
++++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
+ 		(struct rsi_91x_sdiodev *)adapter->rsi_dev;
+ 	u32 len;
+ 	u32 num_blocks;
++	const u8 *fw;
+ 	const struct firmware *fw_entry = NULL;
+ 	u32 block_size = dev->tx_blk_size;
+ 	int status = 0;
+@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
+ 		return status;
+ 	}
+ 
++	/* Copy firmware into DMA-accessible memory */
++	fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
++	if (!fw)
++		return -ENOMEM;
+ 	len = fw_entry->size;
+ 
+ 	if (len % 4)
+@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
+ 	rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
+ 	rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
+ 
+-	status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks);
++	status = rsi_copy_to_card(common, fw, len, num_blocks);
++	kfree(fw);
+ 	release_firmware(fw_entry);
+ 	return status;
+ }
+diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+index 1106ce76707e..30c2cf7fa93b 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
++++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
+ 		return status;
+ 	}
+ 
++	/* Copy firmware into DMA-accessible memory */
+ 	fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
++	if (!fw)
++		return -ENOMEM;
+ 	len = fw_entry->size;
+ 
+ 	if (len % 4)
+@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
+ 	rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
+ 
+ 	status = rsi_copy_to_card(common, fw, len, num_blocks);
++	kfree(fw);
+ 	release_firmware(fw_entry);
+ 	return status;
+ }
+diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
+index 3b3a88b53b11..585d0883c7e5 100644
+--- a/drivers/net/wireless/rtlwifi/core.c
++++ b/drivers/net/wireless/rtlwifi/core.c
+@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
++	struct rtl_tcb_desc tcb_desc;
+ 
+-	if (skb)
+-		rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL);
++	if (skb) {
++		memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
++		rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
++	}
+ }
+ 
+ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
+diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+index 1017f02d7bf7..7bf88d9dcdc3 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
+ module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
+ module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
+ module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
++module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
+ module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
+ 		   bool, 0444);
+ MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
+index 73de4efcbe6e..944f50015ed0 100644
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -2,7 +2,7 @@
+ # PCI configuration
+ #
+ config PCI_BUS_ADDR_T_64BIT
+-	def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
++	def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC))
+ 	depends on PCI
+ 
+ config PCI_MSI
+diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
+index 26270c351624..ce129e595b55 100644
+--- a/drivers/scsi/fnic/fnic.h
++++ b/drivers/scsi/fnic/fnic.h
+@@ -39,7 +39,7 @@
+ 
+ #define DRV_NAME		"fnic"
+ #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
+-#define DRV_VERSION		"1.6.0.17"
++#define DRV_VERSION		"1.6.0.17a"
+ #define PFX			DRV_NAME ": "
+ #define DFX                     DRV_NAME "%d: "
+ 
+diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
+index 155b286f1a9d..25436cd2860c 100644
+--- a/drivers/scsi/fnic/fnic_scsi.c
++++ b/drivers/scsi/fnic/fnic_scsi.c
+@@ -425,6 +425,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
+ 	unsigned long ptr;
+ 	struct fc_rport_priv *rdata;
+ 	spinlock_t *io_lock = NULL;
++	int io_lock_acquired = 0;
+ 
+ 	if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+ 		return SCSI_MLQUEUE_HOST_BUSY;
+@@ -518,6 +519,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
+ 	spin_lock_irqsave(io_lock, flags);
+ 
+ 	/* initialize rest of io_req */
++	io_lock_acquired = 1;
+ 	io_req->port_id = rport->port_id;
+ 	io_req->start_time = jiffies;
+ 	CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
+@@ -571,7 +573,7 @@ out:
+ 		  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+ 
+ 	/* if only we issued IO, will we have the io lock */
+-	if (CMD_FLAGS(sc) & FNIC_IO_INITIALIZED)
++	if (io_lock_acquired)
+ 		spin_unlock_irqrestore(io_lock, flags);
+ 
+ 	atomic_dec(&fnic->in_flight);
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index 1b3a09473452..30f9ef0c0d4f 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
+ 	if (resp) {
+ 		resp(sp, fp, arg);
+ 		res = true;
+-	} else if (!IS_ERR(fp)) {
+-		fc_frame_free(fp);
+ 	}
+ 
+ 	spin_lock_bh(&ep->ex_lock);
+@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+ 	 * If new exch resp handler is valid then call that
+ 	 * first.
+ 	 */
+-	fc_invoke_resp(ep, sp, fp);
++	if (!fc_invoke_resp(ep, sp, fp))
++		fc_frame_free(fp);
+ 
+ 	fc_exch_release(ep);
+ 	return;
+@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
+ 	fc_exch_hold(ep);
+ 	if (!rc)
+ 		fc_exch_delete(ep);
+-	fc_invoke_resp(ep, sp, fp);
++	if (!fc_invoke_resp(ep, sp, fp))
++		fc_frame_free(fp);
+ 	if (has_rec)
+ 		fc_exch_timer_set(ep, ep->r_a_tov);
+ 	fc_exch_release(ep);
+diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
+index c6795941b45d..2d5909c4685c 100644
+--- a/drivers/scsi/libfc/fc_fcp.c
++++ b/drivers/scsi/libfc/fc_fcp.c
+@@ -1039,11 +1039,26 @@ restart:
+ 		fc_fcp_pkt_hold(fsp);
+ 		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ 
+-		if (!fc_fcp_lock_pkt(fsp)) {
++		spin_lock_bh(&fsp->scsi_pkt_lock);
++		if (!(fsp->state & FC_SRB_COMPL)) {
++			fsp->state |= FC_SRB_COMPL;
++			/*
++			 * TODO: dropping scsi_pkt_lock and then reacquiring
++			 * again around fc_fcp_cleanup_cmd() is required,
++			 * since fc_fcp_cleanup_cmd() calls into
++			 * fc_seq_set_resp() and that func preempts cpu using
++			 * schedule. May be schedule and related code should be
++			 * removed instead of unlocking here to avoid scheduling
++			 * while atomic bug.
++			 */
++			spin_unlock_bh(&fsp->scsi_pkt_lock);
++
+ 			fc_fcp_cleanup_cmd(fsp, error);
++
++			spin_lock_bh(&fsp->scsi_pkt_lock);
+ 			fc_io_compl(fsp);
+-			fc_fcp_unlock_pkt(fsp);
+ 		}
++		spin_unlock_bh(&fsp->scsi_pkt_lock);
+ 
+ 		fc_fcp_pkt_release(fsp);
+ 		spin_lock_irqsave(&si->scsi_queue_lock, flags);
+diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
+index 8053f24f0349..98d9bb6ff725 100644
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ {
+ 	struct iscsi_conn *conn = cls_conn->dd_data;
+ 	struct iscsi_session *session = conn->session;
+-	unsigned long flags;
+ 
+ 	del_timer_sync(&conn->transport_timer);
+ 
++	mutex_lock(&session->eh_mutex);
+ 	spin_lock_bh(&session->frwd_lock);
+ 	conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
+ 	if (session->leadconn == conn) {
+@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ 	}
+ 	spin_unlock_bh(&session->frwd_lock);
+ 
+-	/*
+-	 * Block until all in-progress commands for this connection
+-	 * time out or fail.
+-	 */
+-	for (;;) {
+-		spin_lock_irqsave(session->host->host_lock, flags);
+-		if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
+-			spin_unlock_irqrestore(session->host->host_lock, flags);
+-			break;
+-		}
+-		spin_unlock_irqrestore(session->host->host_lock, flags);
+-		msleep_interruptible(500);
+-		iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
+-				  "host_busy %d host_failed %d\n",
+-				  atomic_read(&session->host->host_busy),
+-				  session->host->host_failed);
+-		/*
+-		 * force eh_abort() to unblock
+-		 */
+-		wake_up(&conn->ehwait);
+-	}
+-
+ 	/* flush queued up work because we free the connection below */
+ 	iscsi_suspend_tx(conn);
+ 
+@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
+ 	if (session->leadconn == conn)
+ 		session->leadconn = NULL;
+ 	spin_unlock_bh(&session->frwd_lock);
++	mutex_unlock(&session->eh_mutex);
+ 
+ 	iscsi_destroy_conn(cls_conn);
+ }
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index 59c31bf88d92..ce6c770d74d5 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -26,7 +26,6 @@
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/jiffies.h>
+-#include <asm/unaligned.h>
+ 
+ #include <scsi/scsi.h>
+ #include <scsi/scsi_cmnd.h>
+@@ -2587,33 +2586,3 @@ void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq)
+ 	}
+ }
+ EXPORT_SYMBOL(scsi_build_sense_buffer);
+-
+-/**
+- * scsi_set_sense_information - set the information field in a
+- *		formatted sense data buffer
+- * @buf:	Where to build sense data
+- * @info:	64-bit information value to be set
+- *
+- **/
+-void scsi_set_sense_information(u8 *buf, u64 info)
+-{
+-	if ((buf[0] & 0x7f) == 0x72) {
+-		u8 *ucp, len;
+-
+-		len = buf[7];
+-		ucp = (char *)scsi_sense_desc_find(buf, len + 8, 0);
+-		if (!ucp) {
+-			buf[7] = len + 0xa;
+-			ucp = buf + 8 + len;
+-		}
+-		ucp[0] = 0;
+-		ucp[1] = 0xa;
+-		ucp[2] = 0x80; /* Valid bit */
+-		ucp[3] = 0;
+-		put_unaligned_be64(info, &ucp[4]);
+-	} else if ((buf[0] & 0x7f) == 0x70) {
+-		buf[0] |= 0x80;
+-		put_unaligned_be64(info, &buf[3]);
+-	}
+-}
+-EXPORT_SYMBOL(scsi_set_sense_information);
+diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
+index 9e43ae1d2163..e4b799837948 100644
+--- a/drivers/scsi/scsi_pm.c
++++ b/drivers/scsi/scsi_pm.c
+@@ -217,15 +217,15 @@ static int sdev_runtime_suspend(struct device *dev)
+ {
+ 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ 	struct scsi_device *sdev = to_scsi_device(dev);
+-	int err;
++	int err = 0;
+ 
+-	err = blk_pre_runtime_suspend(sdev->request_queue);
+-	if (err)
+-		return err;
+-	if (pm && pm->runtime_suspend)
++	if (pm && pm->runtime_suspend) {
++		err = blk_pre_runtime_suspend(sdev->request_queue);
++		if (err)
++			return err;
+ 		err = pm->runtime_suspend(dev);
+-	blk_post_runtime_suspend(sdev->request_queue, err);
+-
++		blk_post_runtime_suspend(sdev->request_queue, err);
++	}
+ 	return err;
+ }
+ 
+@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
+ 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ 	int err = 0;
+ 
+-	blk_pre_runtime_resume(sdev->request_queue);
+-	if (pm && pm->runtime_resume)
++	if (pm && pm->runtime_resume) {
++		blk_pre_runtime_resume(sdev->request_queue);
+ 		err = pm->runtime_resume(dev);
+-	blk_post_runtime_resume(sdev->request_queue, err);
+-
++		blk_post_runtime_resume(sdev->request_queue, err);
++	}
+ 	return err;
+ }
+ 
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index 7f9d65fe4fd9..11ea52b2c36b 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
+ 	max_xfer = sdkp->max_xfer_blocks;
+ 	max_xfer <<= ilog2(sdp->sector_size) - 9;
+ 
+-	max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue),
+-				max_xfer);
+-	blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer);
++	sdkp->disk->queue->limits.max_sectors =
++		min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
++
+ 	set_capacity(disk, sdkp->capacity);
+ 	sd_config_write_same(sdkp);
+ 	kfree(buffer);
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 305a5cbc099a..0ab6e2efd28c 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -968,9 +968,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ 		cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;
+ 
+ 	conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
+-	if (hdr->flags & ISCSI_FLAG_CMD_READ) {
++	if (hdr->flags & ISCSI_FLAG_CMD_READ)
+ 		cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
+-	} else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
++	else
+ 		cmd->targ_xfer_tag = 0xFFFFFFFF;
+ 	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
+ 	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
+diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
+index 96b2011d25f3..658be6cc3db6 100644
+--- a/drivers/xen/xenbus/xenbus_client.c
++++ b/drivers/xen/xenbus/xenbus_client.c
+@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+ 
+ 	rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
+ 			       addrs);
+-	if (!rv)
++	if (!rv) {
+ 		vunmap(vaddr);
++		free_xenballooned_pages(node->nr_handles, node->hvm.pages);
++	}
+ 	else
+ 		WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
+ 		     node->nr_handles);
+diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
+index 45c39a37f924..8bc073d297db 100644
+--- a/include/drm/drm_pciids.h
++++ b/include/drm/drm_pciids.h
+@@ -172,6 +172,7 @@
+ 	{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
++	{0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ 	{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index 533dbb6428f5..5dfbcd8887bb 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -385,8 +385,6 @@ enum {
+ 	SATA_SSP		= 0x06,	/* Software Settings Preservation */
+ 	SATA_DEVSLP		= 0x09,	/* Device Sleep */
+ 
+-	SETFEATURE_SENSE_DATA = 0xC3, /* Sense Data Reporting feature */
+-
+ 	/* feature values for SET_MAX */
+ 	ATA_SET_MAX_ADDR	= 0x00,
+ 	ATA_SET_MAX_PASSWD	= 0x01,
+@@ -530,8 +528,6 @@ struct ata_bmdma_prd {
+ #define ata_id_cdb_intr(id)	(((id)[ATA_ID_CONFIG] & 0x60) == 0x20)
+ #define ata_id_has_da(id)	((id)[ATA_ID_SATA_CAPABILITY_2] & (1 << 4))
+ #define ata_id_has_devslp(id)	((id)[ATA_ID_FEATURE_SUPP] & (1 << 8))
+-#define ata_id_has_ncq_autosense(id) \
+-				((id)[ATA_ID_FEATURE_SUPP] & (1 << 7))
+ 
+ static inline bool ata_id_has_hipm(const u16 *id)
+ {
+@@ -710,20 +706,6 @@ static inline bool ata_id_has_read_log_dma_ext(const u16 *id)
+ 	return id[ATA_ID_COMMAND_SET_3] & (1 << 3);
+ }
+ 
+-static inline bool ata_id_has_sense_reporting(const u16 *id)
+-{
+-	if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+-		return false;
+-	return id[ATA_ID_COMMAND_SET_3] & (1 << 6);
+-}
+-
+-static inline bool ata_id_sense_reporting_enabled(const u16 *id)
+-{
+-	if (!(id[ATA_ID_CFS_ENABLE_2] & (1 << 15)))
+-		return false;
+-	return id[ATA_ID_COMMAND_SET_4] & (1 << 6);
+-}
+-
+ /**
+  *	ata_id_major_version	-	get ATA level of drive
+  *	@id: Identify data
+diff --git a/include/linux/irq.h b/include/linux/irq.h
+index 62c6901cab55..3532dca843f4 100644
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -467,6 +467,7 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
+ 					const struct cpumask *dest,
+ 					bool force);
+ extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
++extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
+ #endif
+ 
+ /* Handling of unhandled and spurious interrupts: */
+diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
+index 5a4bb5bb66b3..1e1421b06565 100644
+--- a/include/scsi/scsi_eh.h
++++ b/include/scsi/scsi_eh.h
+@@ -59,7 +59,6 @@ extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
+ 				   u64 * info_out);
+ 
+ extern void scsi_build_sense_buffer(int desc, u8 *buf, u8 key, u8 asc, u8 ascq);
+-extern void scsi_set_sense_information(u8 *buf, u64 info);
+ 
+ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
+ 
+diff --git a/ipc/sem.c b/ipc/sem.c
+index d1a6edd17eba..c50aa5755c62 100644
+--- a/ipc/sem.c
++++ b/ipc/sem.c
+@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
+ }
+ 
+ /*
++ * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
++ * are only control barriers.
++ * The code must pair with spin_unlock(&sem->lock) or
++ * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
++ *
++ * smp_rmb() is sufficient, as writes cannot pass the control barrier.
++ */
++#define ipc_smp_acquire__after_spin_is_unlocked()	smp_rmb()
++
++/*
+  * Wait until all currently ongoing simple ops have completed.
+  * Caller must own sem_perm.lock.
+  * New simple ops cannot start, because simple ops first check
+@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
+ 		sem = sma->sem_base + i;
+ 		spin_unlock_wait(&sem->lock);
+ 	}
++	ipc_smp_acquire__after_spin_is_unlocked();
+ }
+ 
+ /*
+@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ 		/* Then check that the global lock is free */
+ 		if (!spin_is_locked(&sma->sem_perm.lock)) {
+ 			/*
+-			 * The ipc object lock check must be visible on all
+-			 * cores before rechecking the complex count.  Otherwise
+-			 * we can race with  another thread that does:
++			 * We need a memory barrier with acquire semantics,
++			 * otherwise we can race with another thread that does:
+ 			 *	complex_count++;
+ 			 *	spin_unlock(sem_perm.lock);
+ 			 */
+-			smp_rmb();
++			ipc_smp_acquire__after_spin_is_unlocked();
+ 
+ 			/*
+ 			 * Now repeat the test of complex_count:
+@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
+ 		rcu_read_lock();
+ 		un = list_entry_rcu(ulp->list_proc.next,
+ 				    struct sem_undo, list_proc);
+-		if (&un->list_proc == &ulp->list_proc)
+-			semid = -1;
+-		 else
+-			semid = un->semid;
++		if (&un->list_proc == &ulp->list_proc) {
++			/*
++			 * We must wait for freeary() before freeing this ulp,
++			 * in case we raced with last sem_undo. There is a small
++			 * possibility where we exit while freeary() didn't
++			 * finish unlocking sem_undo_list.
++			 */
++			spin_unlock_wait(&ulp->lock);
++			rcu_read_unlock();
++			break;
++		}
++		spin_lock(&ulp->lock);
++		semid = un->semid;
++		spin_unlock(&ulp->lock);
+ 
++		/* exit_sem raced with IPC_RMID, nothing to do */
+ 		if (semid == -1) {
+ 			rcu_read_unlock();
+-			break;
++			continue;
+ 		}
+ 
+-		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
++		sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
+ 		/* exit_sem raced with IPC_RMID, nothing to do */
+ 		if (IS_ERR(sma)) {
+ 			rcu_read_unlock();
+diff --git a/kernel/cpuset.c b/kernel/cpuset.c
+index ee14e3a35a29..f0acff0f66c9 100644
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -1223,7 +1223,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
+ 	spin_unlock_irq(&callback_lock);
+ 
+ 	/* use trialcs->mems_allowed as a temp variable */
+-	update_nodemasks_hier(cs, &cs->mems_allowed);
++	update_nodemasks_hier(cs, &trialcs->mems_allowed);
+ done:
+ 	return retval;
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 0ceb386777ae..94817491407b 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1886,8 +1886,6 @@ event_sched_in(struct perf_event *event,
+ 
+ 	perf_pmu_disable(event->pmu);
+ 
+-	event->tstamp_running += tstamp - event->tstamp_stopped;
+-
+ 	perf_set_shadow_time(event, ctx, tstamp);
+ 
+ 	perf_log_itrace_start(event);
+@@ -1899,6 +1897,8 @@ event_sched_in(struct perf_event *event,
+ 		goto out;
+ 	}
+ 
++	event->tstamp_running += tstamp - event->tstamp_stopped;
++
+ 	if (!is_software_event(event))
+ 		cpuctx->active_oncpu++;
+ 	if (!ctx->nr_active++)
+@@ -3976,28 +3976,21 @@ static void perf_event_for_each(struct perf_event *event,
+ 		perf_event_for_each_child(sibling, func);
+ }
+ 
+-static int perf_event_period(struct perf_event *event, u64 __user *arg)
+-{
+-	struct perf_event_context *ctx = event->ctx;
+-	int ret = 0, active;
++struct period_event {
++	struct perf_event *event;
+ 	u64 value;
++};
+ 
+-	if (!is_sampling_event(event))
+-		return -EINVAL;
+-
+-	if (copy_from_user(&value, arg, sizeof(value)))
+-		return -EFAULT;
+-
+-	if (!value)
+-		return -EINVAL;
++static int __perf_event_period(void *info)
++{
++	struct period_event *pe = info;
++	struct perf_event *event = pe->event;
++	struct perf_event_context *ctx = event->ctx;
++	u64 value = pe->value;
++	bool active;
+ 
+-	raw_spin_lock_irq(&ctx->lock);
++	raw_spin_lock(&ctx->lock);
+ 	if (event->attr.freq) {
+-		if (value > sysctl_perf_event_sample_rate) {
+-			ret = -EINVAL;
+-			goto unlock;
+-		}
+-
+ 		event->attr.sample_freq = value;
+ 	} else {
+ 		event->attr.sample_period = value;
+@@ -4016,11 +4009,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
+ 		event->pmu->start(event, PERF_EF_RELOAD);
+ 		perf_pmu_enable(ctx->pmu);
+ 	}
++	raw_spin_unlock(&ctx->lock);
+ 
+-unlock:
++	return 0;
++}
++
++static int perf_event_period(struct perf_event *event, u64 __user *arg)
++{
++	struct period_event pe = { .event = event, };
++	struct perf_event_context *ctx = event->ctx;
++	struct task_struct *task;
++	u64 value;
++
++	if (!is_sampling_event(event))
++		return -EINVAL;
++
++	if (copy_from_user(&value, arg, sizeof(value)))
++		return -EFAULT;
++
++	if (!value)
++		return -EINVAL;
++
++	if (event->attr.freq && value > sysctl_perf_event_sample_rate)
++		return -EINVAL;
++
++	task = ctx->task;
++	pe.value = value;
++
++	if (!task) {
++		cpu_function_call(event->cpu, __perf_event_period, &pe);
++		return 0;
++	}
++
++retry:
++	if (!task_function_call(task, __perf_event_period, &pe))
++		return 0;
++
++	raw_spin_lock_irq(&ctx->lock);
++	if (ctx->is_active) {
++		raw_spin_unlock_irq(&ctx->lock);
++		task = ctx->task;
++		goto retry;
++	}
++
++	__perf_event_period(&pe);
+ 	raw_spin_unlock_irq(&ctx->lock);
+ 
+-	return ret;
++	return 0;
+ }
+ 
+ static const struct file_operations perf_fops;
+@@ -4766,12 +4801,20 @@ static const struct file_operations perf_fops = {
+  * to user-space before waking everybody up.
+  */
+ 
++static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
++{
++	/* only the parent has fasync state */
++	if (event->parent)
++		event = event->parent;
++	return &event->fasync;
++}
++
+ void perf_event_wakeup(struct perf_event *event)
+ {
+ 	ring_buffer_wakeup(event);
+ 
+ 	if (event->pending_kill) {
+-		kill_fasync(&event->fasync, SIGIO, event->pending_kill);
++		kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
+ 		event->pending_kill = 0;
+ 	}
+ }
+@@ -6117,7 +6160,7 @@ static int __perf_event_overflow(struct perf_event *event,
+ 	else
+ 		perf_event_output(event, data, regs);
+ 
+-	if (event->fasync && event->pending_kill) {
++	if (*perf_event_fasync(event) && event->pending_kill) {
+ 		event->pending_wakeup = 1;
+ 		irq_work_queue(&event->pending);
+ 	}
+diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
+index 725c416085e3..a7604c81168e 100644
+--- a/kernel/events/ring_buffer.c
++++ b/kernel/events/ring_buffer.c
+@@ -547,11 +547,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
+ 		rb->aux_priv = NULL;
+ 	}
+ 
+-	for (pg = 0; pg < rb->aux_nr_pages; pg++)
+-		rb_free_aux_page(rb, pg);
++	if (rb->aux_nr_pages) {
++		for (pg = 0; pg < rb->aux_nr_pages; pg++)
++			rb_free_aux_page(rb, pg);
+ 
+-	kfree(rb->aux_pages);
+-	rb->aux_nr_pages = 0;
++		kfree(rb->aux_pages);
++		rb->aux_nr_pages = 0;
++	}
+ }
+ 
+ void rb_free_aux(struct ring_buffer *rb)
+diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
+index eb9a4ea394ab..94bbd8fee90d 100644
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -934,6 +934,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
+ }
+ 
+ /**
++ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
++ * @data:	Pointer to interrupt specific data
++ * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
++ *
++ * Conditional, as the underlying parent chip might not implement it.
++ */
++int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
++{
++	data = data->parent_data;
++
++	if (data->chip->irq_set_type)
++		return data->chip->irq_set_type(data, type);
++
++	return -ENOSYS;
++}
++
++/**
+  * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
+  * @data:	Pointer to interrupt specific data
+  *
+@@ -946,7 +963,7 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
+ 		if (data->chip && data->chip->irq_retrigger)
+ 			return data->chip->irq_retrigger(data);
+ 
+-	return -ENOSYS;
++	return 0;
+ }
+ 
+ /**
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index 501820c815b3..9f48145c884f 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1558,6 +1558,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
+ 		 */
+ 		ret = __get_any_page(page, pfn, 0);
+ 		if (!PageLRU(page)) {
++			/* Drop page reference which is from __get_any_page() */
++			put_page(page);
+ 			pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
+ 				pfn, page->flags);
+ 			return -EIO;
+@@ -1587,13 +1589,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
+ 	unlock_page(hpage);
+ 
+ 	ret = isolate_huge_page(hpage, &pagelist);
+-	if (ret) {
+-		/*
+-		 * get_any_page() and isolate_huge_page() takes a refcount each,
+-		 * so need to drop one here.
+-		 */
+-		put_page(hpage);
+-	} else {
++	/*
++	 * get_any_page() and isolate_huge_page() takes a refcount each,
++	 * so need to drop one here.
++	 */
++	put_page(hpage);
++	if (!ret) {
+ 		pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
+ 		return -EBUSY;
+ 	}
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 81925b923318..fcf6fe063d82 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -1541,6 +1541,7 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err)
+ 	struct p9_client *clnt = fid->clnt;
+ 	struct p9_req_t *req;
+ 	int total = 0;
++	*err = 0;
+ 
+ 	p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
+ 		   fid->fid, (unsigned long long) offset, (int)iov_iter_count(to));
+@@ -1616,6 +1617,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ 	struct p9_client *clnt = fid->clnt;
+ 	struct p9_req_t *req;
+ 	int total = 0;
++	*err = 0;
+ 
+ 	p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n",
+ 				fid->fid, (unsigned long long) offset,
+diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
+index 247552a7f6c2..3ece7d1034c8 100644
+--- a/net/mac80211/rc80211_minstrel.c
++++ b/net/mac80211/rc80211_minstrel.c
+@@ -92,14 +92,15 @@ int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma)
+ static inline void
+ minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
+ {
+-	int j = MAX_THR_RATES;
+-	struct minstrel_rate_stats *tmp_mrs = &mi->r[j - 1].stats;
++	int j;
++	struct minstrel_rate_stats *tmp_mrs;
+ 	struct minstrel_rate_stats *cur_mrs = &mi->r[i].stats;
+ 
+-	while (j > 0 && (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) >
+-	       minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))) {
+-		j--;
++	for (j = MAX_THR_RATES; j > 0; --j) {
+ 		tmp_mrs = &mi->r[tp_list[j - 1]].stats;
++		if (minstrel_get_tp_avg(&mi->r[i], cur_mrs->prob_ewma) <=
++		    minstrel_get_tp_avg(&mi->r[tp_list[j - 1]], tmp_mrs->prob_ewma))
++			break;
+ 	}
+ 
+ 	if (j < MAX_THR_RATES - 1)
+diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
+index 9cb8522d8d22..f3d3fb42b873 100755
+--- a/scripts/kconfig/streamline_config.pl
++++ b/scripts/kconfig/streamline_config.pl
+@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
+ my $kconfig = $ARGV[1];
+ my $lsmod_file = $ENV{'LSMOD'};
+ 
+-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
++my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
+ chomp @makefiles;
+ 
+ my %depends;
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 5645481af3d9..36e8f1236637 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -3259,7 +3259,7 @@ static int add_std_chmaps(struct hda_codec *codec)
+ 			struct snd_pcm_chmap *chmap;
+ 			const struct snd_pcm_chmap_elem *elem;
+ 
+-			if (!pcm || pcm->own_chmap ||
++			if (!pcm || !pcm->pcm || pcm->own_chmap ||
+ 			    !hinfo->substreams)
+ 				continue;
+ 			elem = hinfo->chmap ? hinfo->chmap : snd_pcm_std_chmaps;
+diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
+index ac0db1679f09..5bc7f2e2715c 100644
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -671,7 +671,8 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
+ 		}
+ 		for (i = 0; i < path->depth; i++) {
+ 			if (path->path[i] == nid) {
+-				if (dir == HDA_OUTPUT || path->idx[i] == idx)
++				if (dir == HDA_OUTPUT || idx == -1 ||
++				    path->idx[i] == idx)
+ 					return true;
+ 				break;
+ 			}
+@@ -682,7 +683,7 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
+ 
+ /* check whether the NID is referred by any active paths */
+ #define is_active_nid_for_any(codec, nid) \
+-	is_active_nid(codec, nid, HDA_OUTPUT, 0)
++	is_active_nid(codec, nid, HDA_OUTPUT, -1)
+ 
+ /* get the default amp value for the target state */
+ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
+@@ -883,8 +884,7 @@ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path,
+ 	struct hda_gen_spec *spec = codec->spec;
+ 	int i;
+ 
+-	if (!enable)
+-		path->active = false;
++	path->active = enable;
+ 
+ 	/* make sure the widget is powered up */
+ 	if (enable && (spec->power_down_unused || codec->power_save_node))
+@@ -902,9 +902,6 @@ void snd_hda_activate_path(struct hda_codec *codec, struct nid_path *path,
+ 		if (has_amp_out(codec, path, i))
+ 			activate_amp_out(codec, path, i, enable);
+ 	}
+-
+-	if (enable)
+-		path->active = true;
+ }
+ EXPORT_SYMBOL_GPL(snd_hda_activate_path);
+ 
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 78b719b5b34d..06cc9d57ba3d 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -200,12 +200,33 @@ static int cx_auto_init(struct hda_codec *codec)
+ 	return 0;
+ }
+ 
+-#define cx_auto_free	snd_hda_gen_free
++static void cx_auto_reboot_notify(struct hda_codec *codec)
++{
++	struct conexant_spec *spec = codec->spec;
++
++	if (codec->core.vendor_id != 0x14f150f2)
++		return;
++
++	/* Turn the CX20722 codec into D3 to avoid spurious noises
++	   from the internal speaker during (and after) reboot */
++	cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
++
++	snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
++	snd_hda_codec_write(codec, codec->core.afg, 0,
++			    AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
++}
++
++static void cx_auto_free(struct hda_codec *codec)
++{
++	cx_auto_reboot_notify(codec);
++	snd_hda_gen_free(codec);
++}
+ 
+ static const struct hda_codec_ops cx_auto_patch_ops = {
+ 	.build_controls = cx_auto_build_controls,
+ 	.build_pcms = snd_hda_gen_build_pcms,
+ 	.init = cx_auto_init,
++	.reboot_notify = cx_auto_reboot_notify,
+ 	.free = cx_auto_free,
+ 	.unsol_event = snd_hda_jack_unsol_event,
+ #ifdef CONFIG_PM
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1e99f075a5ab..91f6928560e1 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5119,6 +5119,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
++	SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+diff --git a/sound/usb/card.c b/sound/usb/card.c
+index 1fab9778807a..0450593980fd 100644
+--- a/sound/usb/card.c
++++ b/sound/usb/card.c
+@@ -638,7 +638,7 @@ int snd_usb_autoresume(struct snd_usb_audio *chip)
+ 	int err = -ENODEV;
+ 
+ 	down_read(&chip->shutdown_rwsem);
+-	if (chip->probing && chip->in_pm)
++	if (chip->probing || chip->in_pm)
+ 		err = 0;
+ 	else if (!chip->shutdown)
+ 		err = usb_autopm_get_interface(chip->pm_intf);
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 754e689596a2..00ebc0ca008e 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -1268,6 +1268,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
+ 			return SNDRV_PCM_FMTBIT_DSD_U32_BE;
+ 		break;
+ 
++	case USB_ID(0x20b1, 0x000a): /* Gustard DAC-X20U */
+ 	case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
+ 	case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
+ 		if (fp->altsetting == 3)


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-08-17 15:38 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-08-17 15:38 UTC (permalink / raw
  To: gentoo-commits

commit:     274174ebb8986e7755b47ecf774bcfeb13e25948
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Aug 17 15:37:54 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Aug 17 15:37:54 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=274174eb

Linux patch 4.1.6

 0000_README            |    4 +
 1005_linux-4.1.6.patch | 4380 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 4384 insertions(+)

diff --git a/0000_README b/0000_README
index fd7a57d..83ddebf 100644
--- a/0000_README
+++ b/0000_README
@@ -63,6 +63,10 @@ Patch:  1004_linux-4.1.5.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.5
 
+Patch:  1005_linux-4.1.6.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.6
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1005_linux-4.1.6.patch b/1005_linux-4.1.6.patch
new file mode 100644
index 0000000..3dcd61f
--- /dev/null
+++ b/1005_linux-4.1.6.patch
@@ -0,0 +1,4380 @@
+diff --git a/Documentation/devicetree/bindings/clock/keystone-pll.txt b/Documentation/devicetree/bindings/clock/keystone-pll.txt
+index 225990f79b7c..47570d207215 100644
+--- a/Documentation/devicetree/bindings/clock/keystone-pll.txt
++++ b/Documentation/devicetree/bindings/clock/keystone-pll.txt
+@@ -15,8 +15,8 @@ Required properties:
+ - compatible : shall be "ti,keystone,main-pll-clock" or "ti,keystone,pll-clock"
+ - clocks : parent clock phandle
+ - reg - pll control0 and pll multipler registers
+-- reg-names : control and multiplier. The multiplier is applicable only for
+-		main pll clock
++- reg-names : control, multiplier and post-divider. The multiplier and
++		post-divider registers are applicable only for main pll clock
+ - fixed-postdiv : fixed post divider value. If absent, use clkod register bits
+ 		for postdiv
+ 
+@@ -25,8 +25,8 @@ Example:
+ 		#clock-cells = <0>;
+ 		compatible = "ti,keystone,main-pll-clock";
+ 		clocks = <&refclksys>;
+-		reg = <0x02620350 4>, <0x02310110 4>;
+-		reg-names = "control", "multiplier";
++		reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
++		reg-names = "control", "multiplier", "post-divider";
+ 		fixed-postdiv = <2>;
+ 	};
+ 
+diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
+index c86f2f1ae4f6..1fec1135791d 100644
+--- a/Documentation/input/alps.txt
++++ b/Documentation/input/alps.txt
+@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
+  byte 5:  0   z6   z5   z4   z3   z2   z1   z0
+ 
+ Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
+-the DualPoint Stick. For non interleaved dualpoint devices the pointingstick
+-buttons get reported separately in the PSM, PSR and PSL bits.
++the DualPoint Stick. The M, R and L bits signal the combined status of both
++the pointingstick and touchpad buttons, except for Dell dualpoint devices
++where the pointingstick buttons get reported separately in the PSM, PSR
++and PSL bits.
+ 
+ Dualpoint device -- interleaved packet format
+ ---------------------------------------------
+diff --git a/Makefile b/Makefile
+index 068dd690933d..838dabcb7f48 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 5
++SUBLEVEL = 6
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
+index b6478e97d6a7..e6540b5cfa4c 100644
+--- a/arch/arm/boot/dts/imx35.dtsi
++++ b/arch/arm/boot/dts/imx35.dtsi
+@@ -286,8 +286,8 @@
+ 			can1: can@53fe4000 {
+ 				compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+ 				reg = <0x53fe4000 0x1000>;
+-				clocks = <&clks 33>;
+-				clock-names = "ipg";
++				clocks = <&clks 33>, <&clks 33>;
++				clock-names = "ipg", "per";
+ 				interrupts = <43>;
+ 				status = "disabled";
+ 			};
+@@ -295,8 +295,8 @@
+ 			can2: can@53fe8000 {
+ 				compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
+ 				reg = <0x53fe8000 0x1000>;
+-				clocks = <&clks 34>;
+-				clock-names = "ipg";
++				clocks = <&clks 34>, <&clks 34>;
++				clock-names = "ipg", "per";
+ 				interrupts = <44>;
+ 				status = "disabled";
+ 			};
+diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
+index 4773d6af66a0..d56d68fe7ffc 100644
+--- a/arch/arm/boot/dts/k2e-clocks.dtsi
++++ b/arch/arm/boot/dts/k2e-clocks.dtsi
+@@ -13,9 +13,8 @@ clocks {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,keystone,main-pll-clock";
+ 		clocks = <&refclksys>;
+-		reg = <0x02620350 4>, <0x02310110 4>;
+-		reg-names = "control", "multiplier";
+-		fixed-postdiv = <2>;
++		reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
++		reg-names = "control", "multiplier", "post-divider";
+ 	};
+ 
+ 	papllclk: papllclk@2620358 {
+diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
+index d5adee3c0067..af9b7190533a 100644
+--- a/arch/arm/boot/dts/k2hk-clocks.dtsi
++++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
+@@ -22,9 +22,8 @@ clocks {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,keystone,main-pll-clock";
+ 		clocks = <&refclksys>;
+-		reg = <0x02620350 4>, <0x02310110 4>;
+-		reg-names = "control", "multiplier";
+-		fixed-postdiv = <2>;
++		reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
++		reg-names = "control", "multiplier", "post-divider";
+ 	};
+ 
+ 	papllclk: papllclk@2620358 {
+diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
+index eb1e3e29f073..ef8464bb11ff 100644
+--- a/arch/arm/boot/dts/k2l-clocks.dtsi
++++ b/arch/arm/boot/dts/k2l-clocks.dtsi
+@@ -22,9 +22,8 @@ clocks {
+ 		#clock-cells = <0>;
+ 		compatible = "ti,keystone,main-pll-clock";
+ 		clocks = <&refclksys>;
+-		reg = <0x02620350 4>, <0x02310110 4>;
+-		reg-names = "control", "multiplier";
+-		fixed-postdiv = <2>;
++		reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
++		reg-names = "control", "multiplier", "post-divider";
+ 	};
+ 
+ 	papllclk: papllclk@2620358 {
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index 752969ff9de0..5286e7773ed4 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
+  * registers.  This address is needed early so the OCP registers that
+  * are part of the device's address space can be ioremapped properly.
+  *
++ * If SYSC access is not needed, the registers will not be remapped
++ * and non-availability of MPU access is not treated as an error.
++ *
+  * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
+  * -ENXIO on absent or invalid register target address space.
+  */
+@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
+ 
+ 	_save_mpu_port_index(oh);
+ 
++	/* if we don't need sysc access we don't need to ioremap */
++	if (!oh->class->sysc)
++		return 0;
++
++	/* we can't continue without MPU PORT if we need sysc access */
+ 	if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
+ 		return -ENXIO;
+ 
+@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
+ 			 oh->name);
+ 
+ 		/* Extract the IO space from device tree blob */
+-		if (!np)
++		if (!np) {
++			pr_err("omap_hwmod: %s: no dt node\n", oh->name);
+ 			return -ENXIO;
++		}
+ 
+ 		va_start = of_iomap(np, index + oh->mpu_rt_idx);
+ 	} else {
+@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
+ 				oh->name, np->name);
+ 	}
+ 
+-	if (oh->class->sysc) {
+-		r = _init_mpu_rt_base(oh, NULL, index, np);
+-		if (r < 0) {
+-			WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
+-			     oh->name);
+-			return 0;
+-		}
++	r = _init_mpu_rt_base(oh, NULL, index, np);
++	if (r < 0) {
++		WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
++		     oh->name);
++		return 0;
+ 	}
+ 
+ 	r = _init_clocks(oh, NULL);
+diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
+index d26fcd4cd6e6..c0cff3410166 100644
+--- a/arch/arm64/kernel/signal32.c
++++ b/arch/arm64/kernel/signal32.c
+@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
+ 		 * Other callers might not initialize the si_lsb field,
+ 		 * so check explicitely for the right codes here.
+ 		 */
+-		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
++		if (from->si_signo == SIGBUS &&
++		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
+ 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
+ #endif
+ 		break;
+@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
+ 
+ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ {
+-	memset(to, 0, sizeof *to);
+-
+ 	if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
+ 	    copy_from_user(to->_sifields._pad,
+ 			   from->_sifields._pad, SI_PAD_SIZE))
+diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
+index 7fc8397d16f2..fd2a36a79f97 100644
+--- a/arch/mips/ath79/setup.c
++++ b/arch/mips/ath79/setup.c
+@@ -186,6 +186,7 @@ int get_c0_perfcount_int(void)
+ {
+ 	return ATH79_MISC_IRQ(5);
+ }
++EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+ 
+ unsigned int get_c0_compare_int(void)
+ {
+diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+deleted file mode 100644
+index 11d3b572b1b3..000000000000
+--- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
++++ /dev/null
+@@ -1,10 +0,0 @@
+-#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
+-#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
+-
+-#include <asm/bmips.h>
+-
+-#define plat_post_dma_flush	bmips_post_dma_flush
+-
+-#include <asm/mach-generic/dma-coherence.h>
+-
+-#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
+diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
+index 819af9d057a8..70f6e7f073b0 100644
+--- a/arch/mips/include/asm/pgtable.h
++++ b/arch/mips/include/asm/pgtable.h
+@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
+ 		 * Make sure the buddy is global too (if it's !none,
+ 		 * it better already be global)
+ 		 */
++#ifdef CONFIG_SMP
++		/*
++		 * For SMP, multiple CPUs can race, so we need to do
++		 * this atomically.
++		 */
++#ifdef CONFIG_64BIT
++#define LL_INSN "lld"
++#define SC_INSN "scd"
++#else /* CONFIG_32BIT */
++#define LL_INSN "ll"
++#define SC_INSN "sc"
++#endif
++		unsigned long page_global = _PAGE_GLOBAL;
++		unsigned long tmp;
++
++		__asm__ __volatile__ (
++			"	.set	push\n"
++			"	.set	noreorder\n"
++			"1:	" LL_INSN "	%[tmp], %[buddy]\n"
++			"	bnez	%[tmp], 2f\n"
++			"	 or	%[tmp], %[tmp], %[global]\n"
++			"	" SC_INSN "	%[tmp], %[buddy]\n"
++			"	beqz	%[tmp], 1b\n"
++			"	 nop\n"
++			"2:\n"
++			"	.set pop"
++			: [buddy] "+m" (buddy->pte),
++			  [tmp] "=&r" (tmp)
++			: [global] "r" (page_global));
++#else /* !CONFIG_SMP */
+ 		if (pte_none(*buddy))
+ 			pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
++#endif /* CONFIG_SMP */
+ 	}
+ #endif
+ }
+diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
+index 28d6d9364bd1..a71da576883c 100644
+--- a/arch/mips/include/asm/stackframe.h
++++ b/arch/mips/include/asm/stackframe.h
+@@ -152,6 +152,31 @@
+ 		.set	noreorder
+ 		bltz	k0, 8f
+ 		 move	k1, sp
++#ifdef CONFIG_EVA
++		/*
++		 * Flush interAptiv's Return Prediction Stack (RPS) by writing
++		 * EntryHi. Toggling Config7.RPS is slower and less portable.
++		 *
++		 * The RPS isn't automatically flushed when exceptions are
++		 * taken, which can result in kernel mode speculative accesses
++		 * to user addresses if the RPS mispredicts. That's harmless
++		 * when user and kernel share the same address space, but with
++		 * EVA the same user segments may be unmapped to kernel mode,
++		 * even containing sensitive MMIO regions or invalid memory.
++		 *
++		 * This can happen when the kernel sets the return address to
++		 * ret_from_* and jr's to the exception handler, which looks
++		 * more like a tail call than a function call. If nested calls
++		 * don't evict the last user address in the RPS, it will
++		 * mispredict the return and fetch from a user controlled
++		 * address into the icache.
++		 *
++		 * More recent EVA-capable cores with MAAR to restrict
++		 * speculative accesses aren't affected.
++		 */
++		MFC0	k0, CP0_ENTRYHI
++		MTC0	k0, CP0_ENTRYHI
++#endif
+ 		.set	reorder
+ 		/* Called from user mode, new stack. */
+ 		get_saved_sp
+diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
+index 3e4491aa6d6b..789d7bf4fef3 100644
+--- a/arch/mips/kernel/mips-mt-fpaff.c
++++ b/arch/mips/kernel/mips-mt-fpaff.c
+@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ 				      unsigned long __user *user_mask_ptr)
+ {
+ 	unsigned int real_len;
+-	cpumask_t mask;
++	cpumask_t allowed, mask;
+ 	int retval;
+ 	struct task_struct *p;
+ 
+@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ 	if (retval)
+ 		goto out_unlock;
+ 
+-	cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
++	cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
++	cpumask_and(&mask, &allowed, cpu_active_mask);
+ 
+ out_unlock:
+ 	read_unlock(&tasklist_lock);
+diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
+index 74bab9ddd0e1..c6bbf2165051 100644
+--- a/arch/mips/kernel/relocate_kernel.S
++++ b/arch/mips/kernel/relocate_kernel.S
+@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
+ 
+ process_entry:
+ 	PTR_L		s2, (s0)
+-	PTR_ADD		s0, s0, SZREG
++	PTR_ADDIU	s0, s0, SZREG
+ 
+ 	/*
+ 	 * In case of a kdump/crash kernel, the indirection page is not
+@@ -61,9 +61,9 @@ copy_word:
+ 	/* copy page word by word */
+ 	REG_L		s5, (s2)
+ 	REG_S		s5, (s4)
+-	PTR_ADD		s4, s4, SZREG
+-	PTR_ADD		s2, s2, SZREG
+-	LONG_SUB	s6, s6, 1
++	PTR_ADDIU	s4, s4, SZREG
++	PTR_ADDIU	s2, s2, SZREG
++	LONG_ADDIU	s6, s6, -1
+ 	beq		s6, zero, process_entry
+ 	b		copy_word
+ 	b		process_entry
+diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
+index 19a7705f2a01..5d7f2634996f 100644
+--- a/arch/mips/kernel/signal32.c
++++ b/arch/mips/kernel/signal32.c
+@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
+ 
+ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
+ {
+-	memset(to, 0, sizeof *to);
+-
+ 	if (copy_from_user(to, from, 3*sizeof(int)) ||
+ 	    copy_from_user(to->_sifields._pad,
+ 			   from->_sifields._pad, SI_PAD_SIZE32))
+diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
+index d2d1c1933bc9..5f5f44edc77d 100644
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
+ void show_stack(struct task_struct *task, unsigned long *sp)
+ {
+ 	struct pt_regs regs;
++	mm_segment_t old_fs = get_fs();
+ 	if (sp) {
+ 		regs.regs[29] = (unsigned long)sp;
+ 		regs.regs[31] = 0;
+@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
+ 			prepare_frametrace(&regs);
+ 		}
+ 	}
++	/*
++	 * show_stack() deals exclusively with kernel mode, so be sure to access
++	 * the stack in the kernel (not user) address space.
++	 */
++	set_fs(KERNEL_DS);
+ 	show_stacktrace(task, &regs);
++	set_fs(old_fs);
+ }
+ 
+ static void show_code(unsigned int __user *pc)
+@@ -1518,6 +1525,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
+ 	const int field = 2 * sizeof(unsigned long);
+ 	int multi_match = regs->cp0_status & ST0_TS;
+ 	enum ctx_state prev_state;
++	mm_segment_t old_fs = get_fs();
+ 
+ 	prev_state = exception_enter();
+ 	show_regs(regs);
+@@ -1539,8 +1547,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
+ 		dump_tlb_all();
+ 	}
+ 
++	if (!user_mode(regs))
++		set_fs(KERNEL_DS);
++
+ 	show_code((unsigned int __user *) regs->cp0_epc);
+ 
++	set_fs(old_fs);
++
+ 	/*
+ 	 * Some chips may have other causes of machine check (e.g. SB1
+ 	 * graduation timer)
+diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
+index af84bef0c90d..eb3efd137fd1 100644
+--- a/arch/mips/kernel/unaligned.c
++++ b/arch/mips/kernel/unaligned.c
+@@ -438,7 +438,7 @@ do {                                                        \
+ 		: "memory");                                \
+ } while(0)
+ 
+-#define     StoreDW(addr, value, res) \
++#define     _StoreDW(addr, value, res) \
+ do {                                                        \
+ 		__asm__ __volatile__ (                      \
+ 			".set\tpush\n\t"		    \
+diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
+index 6ab10573490d..d01ade63492f 100644
+--- a/arch/mips/lantiq/irq.c
++++ b/arch/mips/lantiq/irq.c
+@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
+ {
+ 	return ltq_perfcount_irq;
+ }
++EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+ 
+ unsigned int get_c0_compare_int(void)
+ {
+diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
+index 185e68261f45..a7f7d9ffb402 100644
+--- a/arch/mips/mti-malta/malta-time.c
++++ b/arch/mips/mti-malta/malta-time.c
+@@ -148,6 +148,7 @@ int get_c0_perfcount_int(void)
+ 
+ 	return mips_cpu_perf_irq;
+ }
++EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+ 
+ unsigned int get_c0_compare_int(void)
+ {
+@@ -165,14 +166,17 @@ unsigned int get_c0_compare_int(void)
+ 
+ static void __init init_rtc(void)
+ {
+-	/* stop the clock whilst setting it up */
+-	CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
++	unsigned char freq, ctrl;
+ 
+-	/* 32KHz time base */
+-	CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
++	/* Set 32KHz time base if not already set */
++	freq = CMOS_READ(RTC_FREQ_SELECT);
++	if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
++		CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
+ 
+-	/* start the clock */
+-	CMOS_WRITE(RTC_24H, RTC_CONTROL);
++	/* Ensure SET bit is clear so RTC can run */
++	ctrl = CMOS_READ(RTC_CONTROL);
++	if (ctrl & RTC_SET)
++		CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
+ }
+ 
+ void __init plat_time_init(void)
+diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
+index e1d69895fb1d..a120b7a5a8fe 100644
+--- a/arch/mips/mti-sead3/sead3-time.c
++++ b/arch/mips/mti-sead3/sead3-time.c
+@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
+ 		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ 	return -1;
+ }
++EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+ 
+ unsigned int get_c0_compare_int(void)
+ {
+diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
+index 67889fcea8aa..ab73f6f405bb 100644
+--- a/arch/mips/pistachio/time.c
++++ b/arch/mips/pistachio/time.c
+@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
+ {
+ 	return gic_get_c0_perfcount_int();
+ }
++EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+ 
+ void __init plat_time_init(void)
+ {
+diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
+index 7cf91b92e9d1..199ace4ca1ad 100644
+--- a/arch/mips/ralink/irq.c
++++ b/arch/mips/ralink/irq.c
+@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
+ {
+ 	return rt_perfcount_irq;
+ }
++EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+ 
+ unsigned int get_c0_compare_int(void)
+ {
+diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
+index d3a831ac0f92..da50e0c9c57e 100644
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
+ 
+ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
+ {
+-	memset(to, 0, sizeof *to);
+-
+ 	if (copy_from_user(to, from, 3*sizeof(int)) ||
+ 	    copy_from_user(to->_sifields._pad,
+ 			   from->_sifields._pad, SI_PAD_SIZE32))
+diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
+index 1f0aa2024e94..6424249d5f78 100644
+--- a/arch/sparc/include/asm/visasm.h
++++ b/arch/sparc/include/asm/visasm.h
+@@ -28,16 +28,10 @@
+  * Must preserve %o5 between VISEntryHalf and VISExitHalf */
+ 
+ #define VISEntryHalf					\
+-	rd		%fprs, %o5;			\
+-	andcc		%o5, FPRS_FEF, %g0;		\
+-	be,pt		%icc, 297f;			\
+-	 sethi		%hi(298f), %g7;			\
+-	sethi		%hi(VISenterhalf), %g1;		\
+-	jmpl		%g1 + %lo(VISenterhalf), %g0;	\
+-	 or		%g7, %lo(298f), %g7;		\
+-	clr		%o5;				\
+-297:	wr		%o5, FPRS_FEF, %fprs;		\
+-298:
++	VISEntry
++
++#define VISExitHalf					\
++	VISExit
+ 
+ #define VISEntryHalfFast(fail_label)			\
+ 	rd		%fprs, %o5;			\
+@@ -47,7 +41,7 @@
+ 	ba,a,pt		%xcc, fail_label;		\
+ 297:	wr		%o5, FPRS_FEF, %fprs;
+ 
+-#define VISExitHalf					\
++#define VISExitHalfFast					\
+ 	wr		%o5, 0, %fprs;
+ 
+ #ifndef __ASSEMBLY__
+diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
+index 140527a20e7d..83aeeb1dffdb 100644
+--- a/arch/sparc/lib/NG4memcpy.S
++++ b/arch/sparc/lib/NG4memcpy.S
+@@ -240,8 +240,11 @@ FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
+ 	add		%o0, 0x40, %o0
+ 	bne,pt		%icc, 1b
+ 	 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
++#ifdef NON_USER_COPY
++	VISExitHalfFast
++#else
+ 	VISExitHalf
+-
++#endif
+ 	brz,pn		%o2, .Lexit
+ 	 cmp		%o2, 19
+ 	ble,pn		%icc, .Lsmall_unaligned
+diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
+index b320ae9e2e2e..a063d84336d6 100644
+--- a/arch/sparc/lib/VISsave.S
++++ b/arch/sparc/lib/VISsave.S
+@@ -44,9 +44,8 @@ vis1:	ldub		[%g6 + TI_FPSAVED], %g3
+ 
+ 	 stx		%g3, [%g6 + TI_GSR]
+ 2:	add		%g6, %g1, %g3
+-	cmp		%o5, FPRS_DU
+-	be,pn		%icc, 6f
+-	 sll		%g1, 3, %g1
++	mov		FPRS_DU | FPRS_DL | FPRS_FEF, %o5
++	sll		%g1, 3, %g1
+ 	stb		%o5, [%g3 + TI_FPSAVED]
+ 	rd		%gsr, %g2
+ 	add		%g6, %g1, %g3
+@@ -80,65 +79,3 @@ vis1:	ldub		[%g6 + TI_FPSAVED], %g3
+ 	.align		32
+ 80:	jmpl		%g7 + %g0, %g0
+ 	 nop
+-
+-6:	ldub		[%g3 + TI_FPSAVED], %o5
+-	or		%o5, FPRS_DU, %o5
+-	add		%g6, TI_FPREGS+0x80, %g2
+-	stb		%o5, [%g3 + TI_FPSAVED]
+-
+-	sll		%g1, 5, %g1
+-	add		%g6, TI_FPREGS+0xc0, %g3
+-	wr		%g0, FPRS_FEF, %fprs
+-	membar		#Sync
+-	stda		%f32, [%g2 + %g1] ASI_BLK_P
+-	stda		%f48, [%g3 + %g1] ASI_BLK_P
+-	membar		#Sync
+-	ba,pt		%xcc, 80f
+-	 nop
+-
+-	.align		32
+-80:	jmpl		%g7 + %g0, %g0
+-	 nop
+-
+-	.align		32
+-VISenterhalf:
+-	ldub		[%g6 + TI_FPDEPTH], %g1
+-	brnz,a,pn	%g1, 1f
+-	 cmp		%g1, 1
+-	stb		%g0, [%g6 + TI_FPSAVED]
+-	stx		%fsr, [%g6 + TI_XFSR]
+-	clr		%o5
+-	jmpl		%g7 + %g0, %g0
+-	 wr		%g0, FPRS_FEF, %fprs
+-
+-1:	bne,pn		%icc, 2f
+-	 srl		%g1, 1, %g1
+-	ba,pt		%xcc, vis1
+-	 sub		%g7, 8, %g7
+-2:	addcc		%g6, %g1, %g3
+-	sll		%g1, 3, %g1
+-	andn		%o5, FPRS_DU, %g2
+-	stb		%g2, [%g3 + TI_FPSAVED]
+-
+-	rd		%gsr, %g2
+-	add		%g6, %g1, %g3
+-	stx		%g2, [%g3 + TI_GSR]
+-	add		%g6, %g1, %g2
+-	stx		%fsr, [%g2 + TI_XFSR]
+-	sll		%g1, 5, %g1
+-3:	andcc		%o5, FPRS_DL, %g0
+-	be,pn		%icc, 4f
+-	 add		%g6, TI_FPREGS, %g2
+-
+-	add		%g6, TI_FPREGS+0x40, %g3
+-	membar		#Sync
+-	stda		%f0, [%g2 + %g1] ASI_BLK_P
+-	stda		%f16, [%g3 + %g1] ASI_BLK_P
+-	membar		#Sync
+-	ba,pt		%xcc, 4f
+-	 nop
+-
+-	.align		32
+-4:	and		%o5, FPRS_DU, %o5
+-	jmpl		%g7 + %g0, %g0
+-	 wr		%o5, FPRS_FEF, %fprs
+diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
+index 1d649a95660c..8069ce12f20b 100644
+--- a/arch/sparc/lib/ksyms.c
++++ b/arch/sparc/lib/ksyms.c
+@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
+ void VISenter(void);
+ EXPORT_SYMBOL(VISenter);
+ 
+-/* CRYPTO code needs this */
+-void VISenterhalf(void);
+-EXPORT_SYMBOL(VISenterhalf);
+-
+ extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
+ extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
+ 		unsigned long *);
+diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
+index e8c2c04143cd..c667e104a0c2 100644
+--- a/arch/tile/kernel/compat_signal.c
++++ b/arch/tile/kernel/compat_signal.c
+@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
+ 	if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
+ 		return -EFAULT;
+ 
+-	memset(to, 0, sizeof(*to));
+-
+ 	err = __get_user(to->si_signo, &from->si_signo);
+ 	err |= __get_user(to->si_errno, &from->si_errno);
+ 	err |= __get_user(to->si_code, &from->si_code);
+diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
+index 02c2eff7478d..4bd6c197563d 100644
+--- a/arch/x86/kernel/entry_64.S
++++ b/arch/x86/kernel/entry_64.S
+@@ -793,8 +793,6 @@ retint_kernel:
+ restore_c_regs_and_iret:
+ 	RESTORE_C_REGS
+ 	REMOVE_PT_GPREGS_FROM_STACK 8
+-
+-irq_return:
+ 	INTERRUPT_RETURN
+ 
+ ENTRY(native_iret)
+@@ -1413,11 +1411,12 @@ ENTRY(nmi)
+ 	 *  If the variable is not set and the stack is not the NMI
+ 	 *  stack then:
+ 	 *    o Set the special variable on the stack
+-	 *    o Copy the interrupt frame into a "saved" location on the stack
+-	 *    o Copy the interrupt frame into a "copy" location on the stack
++	 *    o Copy the interrupt frame into an "outermost" location on the
++	 *      stack
++	 *    o Copy the interrupt frame into an "iret" location on the stack
+ 	 *    o Continue processing the NMI
+ 	 *  If the variable is set or the previous stack is the NMI stack:
+-	 *    o Modify the "copy" location to jump to the repeate_nmi
++	 *    o Modify the "iret" location to jump to the repeat_nmi
+ 	 *    o return back to the first NMI
+ 	 *
+ 	 * Now on exit of the first NMI, we first clear the stack variable
+@@ -1426,32 +1425,151 @@ ENTRY(nmi)
+ 	 * a nested NMI that updated the copy interrupt stack frame, a
+ 	 * jump will be made to the repeat_nmi code that will handle the second
+ 	 * NMI.
++	 *
++	 * However, espfix prevents us from directly returning to userspace
++	 * with a single IRET instruction.  Similarly, IRET to user mode
++	 * can fault.  We therefore handle NMIs from user space like
++	 * other IST entries.
+ 	 */
+ 
+ 	/* Use %rdx as our temp variable throughout */
+ 	pushq_cfi %rdx
+ 	CFI_REL_OFFSET rdx, 0
+ 
++	testb	$3, CS-RIP+8(%rsp)
++	jz	.Lnmi_from_kernel
++
+ 	/*
+-	 * If %cs was not the kernel segment, then the NMI triggered in user
+-	 * space, which means it is definitely not nested.
++	 * NMI from user mode.  We need to run on the thread stack, but we
++	 * can't go through the normal entry paths: NMIs are masked, and
++	 * we don't want to enable interrupts, because then we'll end
++	 * up in an awkward situation in which IRQs are on but NMIs
++	 * are off.
+ 	 */
+-	cmpl $__KERNEL_CS, 16(%rsp)
+-	jne first_nmi
++
++	SWAPGS
++	cld
++	movq	%rsp, %rdx
++	movq	PER_CPU_VAR(kernel_stack), %rsp
++	pushq	5*8(%rdx)	/* pt_regs->ss */
++	pushq	4*8(%rdx)	/* pt_regs->rsp */
++	pushq	3*8(%rdx)	/* pt_regs->flags */
++	pushq	2*8(%rdx)	/* pt_regs->cs */
++	pushq	1*8(%rdx)	/* pt_regs->rip */
++	pushq   $-1		/* pt_regs->orig_ax */
++	pushq   %rdi		/* pt_regs->di */
++	pushq   %rsi		/* pt_regs->si */
++	pushq   (%rdx)		/* pt_regs->dx */
++	pushq   %rcx		/* pt_regs->cx */
++	pushq   %rax		/* pt_regs->ax */
++	pushq   %r8		/* pt_regs->r8 */
++	pushq   %r9		/* pt_regs->r9 */
++	pushq   %r10		/* pt_regs->r10 */
++	pushq   %r11		/* pt_regs->r11 */
++	pushq	%rbx		/* pt_regs->rbx */
++	pushq	%rbp		/* pt_regs->rbp */
++	pushq	%r12		/* pt_regs->r12 */
++	pushq	%r13		/* pt_regs->r13 */
++	pushq	%r14		/* pt_regs->r14 */
++	pushq	%r15		/* pt_regs->r15 */
+ 
+ 	/*
+-	 * Check the special variable on the stack to see if NMIs are
+-	 * executing.
++	 * At this point we no longer need to worry about stack damage
++	 * due to nesting -- we're on the normal thread stack and we're
++	 * done with the NMI stack.
++	 */
++	movq	%rsp, %rdi
++	movq	$-1, %rsi
++	call	do_nmi
++
++	/*
++	 * Return back to user mode.  We must *not* do the normal exit
++	 * work, because we don't want to enable interrupts.  Fortunately,
++	 * do_nmi doesn't modify pt_regs.
++	 */
++	SWAPGS
++	jmp	restore_c_regs_and_iret
++
++.Lnmi_from_kernel:
++	/*
++	 * Here's what our stack frame will look like:
++	 * +---------------------------------------------------------+
++	 * | original SS                                             |
++	 * | original Return RSP                                     |
++	 * | original RFLAGS                                         |
++	 * | original CS                                             |
++	 * | original RIP                                            |
++	 * +---------------------------------------------------------+
++	 * | temp storage for rdx                                    |
++	 * +---------------------------------------------------------+
++	 * | "NMI executing" variable                                |
++	 * +---------------------------------------------------------+
++	 * | iret SS          } Copied from "outermost" frame        |
++	 * | iret Return RSP  } on each loop iteration; overwritten  |
++	 * | iret RFLAGS      } by a nested NMI to force another     |
++	 * | iret CS          } iteration if needed.                 |
++	 * | iret RIP         }                                      |
++	 * +---------------------------------------------------------+
++	 * | outermost SS          } initialized in first_nmi;       |
++	 * | outermost Return RSP  } will not be changed before      |
++	 * | outermost RFLAGS      } NMI processing is done.         |
++	 * | outermost CS          } Copied to "iret" frame on each  |
++	 * | outermost RIP         } iteration.                      |
++	 * +---------------------------------------------------------+
++	 * | pt_regs                                                 |
++	 * +---------------------------------------------------------+
++	 *
++	 * The "original" frame is used by hardware.  Before re-enabling
++	 * NMIs, we need to be done with it, and we need to leave enough
++	 * space for the asm code here.
++	 *
++	 * We return by executing IRET while RSP points to the "iret" frame.
++	 * That will either return for real or it will loop back into NMI
++	 * processing.
++	 *
++	 * The "outermost" frame is copied to the "iret" frame on each
++	 * iteration of the loop, so each iteration starts with the "iret"
++	 * frame pointing to the final return target.
++	 */
++
++	/*
++	 * Determine whether we're a nested NMI.
++	 *
++	 * If we interrupted kernel code between repeat_nmi and
++	 * end_repeat_nmi, then we are a nested NMI.  We must not
++	 * modify the "iret" frame because it's being written by
++	 * the outer NMI.  That's okay; the outer NMI handler is
++	 * about to about to call do_nmi anyway, so we can just
++	 * resume the outer NMI.
++	 */
++
++	movq	$repeat_nmi, %rdx
++	cmpq	8(%rsp), %rdx
++	ja	1f
++	movq	$end_repeat_nmi, %rdx
++	cmpq	8(%rsp), %rdx
++	ja	nested_nmi_out
++1:
++
++	/*
++	 * Now check "NMI executing".  If it's set, then we're nested.
++	 * This will not detect if we interrupted an outer NMI just
++	 * before IRET.
+ 	 */
+ 	cmpl $1, -8(%rsp)
+ 	je nested_nmi
+ 
+ 	/*
+-	 * Now test if the previous stack was an NMI stack.
+-	 * We need the double check. We check the NMI stack to satisfy the
+-	 * race when the first NMI clears the variable before returning.
+-	 * We check the variable because the first NMI could be in a
+-	 * breakpoint routine using a breakpoint stack.
++	 * Now test if the previous stack was an NMI stack.  This covers
++	 * the case where we interrupt an outer NMI after it clears
++	 * "NMI executing" but before IRET.  We need to be careful, though:
++	 * there is one case in which RSP could point to the NMI stack
++	 * despite there being no NMI active: naughty userspace controls
++	 * RSP at the very beginning of the SYSCALL targets.  We can
++	 * pull a fast one on naughty userspace, though: we program
++	 * SYSCALL to mask DF, so userspace cannot cause DF to be set
++	 * if it controls the kernel's RSP.  We set DF before we clear
++	 * "NMI executing".
+ 	 */
+ 	lea	6*8(%rsp), %rdx
+ 	/* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
+@@ -1462,25 +1580,21 @@ ENTRY(nmi)
+ 	cmpq	%rdx, 4*8(%rsp)
+ 	/* If it is below the NMI stack, it is a normal NMI */
+ 	jb	first_nmi
+-	/* Ah, it is within the NMI stack, treat it as nested */
++
++	/* Ah, it is within the NMI stack. */
++
++	testb	$(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
++	jz	first_nmi	/* RSP was user controlled. */
++
++	/* This is a nested NMI. */
+ 
+ 	CFI_REMEMBER_STATE
+ 
+ nested_nmi:
+ 	/*
+-	 * Do nothing if we interrupted the fixup in repeat_nmi.
+-	 * It's about to repeat the NMI handler, so we are fine
+-	 * with ignoring this one.
++	 * Modify the "iret" frame to point to repeat_nmi, forcing another
++	 * iteration of NMI handling.
+ 	 */
+-	movq $repeat_nmi, %rdx
+-	cmpq 8(%rsp), %rdx
+-	ja 1f
+-	movq $end_repeat_nmi, %rdx
+-	cmpq 8(%rsp), %rdx
+-	ja nested_nmi_out
+-
+-1:
+-	/* Set up the interrupted NMIs stack to jump to repeat_nmi */
+ 	leaq -1*8(%rsp), %rdx
+ 	movq %rdx, %rsp
+ 	CFI_ADJUST_CFA_OFFSET 1*8
+@@ -1499,60 +1613,23 @@ nested_nmi_out:
+ 	popq_cfi %rdx
+ 	CFI_RESTORE rdx
+ 
+-	/* No need to check faults here */
++	/* We are returning to kernel mode, so this cannot result in a fault. */
+ 	INTERRUPT_RETURN
+ 
+ 	CFI_RESTORE_STATE
+ first_nmi:
+-	/*
+-	 * Because nested NMIs will use the pushed location that we
+-	 * stored in rdx, we must keep that space available.
+-	 * Here's what our stack frame will look like:
+-	 * +-------------------------+
+-	 * | original SS             |
+-	 * | original Return RSP     |
+-	 * | original RFLAGS         |
+-	 * | original CS             |
+-	 * | original RIP            |
+-	 * +-------------------------+
+-	 * | temp storage for rdx    |
+-	 * +-------------------------+
+-	 * | NMI executing variable  |
+-	 * +-------------------------+
+-	 * | copied SS               |
+-	 * | copied Return RSP       |
+-	 * | copied RFLAGS           |
+-	 * | copied CS               |
+-	 * | copied RIP              |
+-	 * +-------------------------+
+-	 * | Saved SS                |
+-	 * | Saved Return RSP        |
+-	 * | Saved RFLAGS            |
+-	 * | Saved CS                |
+-	 * | Saved RIP               |
+-	 * +-------------------------+
+-	 * | pt_regs                 |
+-	 * +-------------------------+
+-	 *
+-	 * The saved stack frame is used to fix up the copied stack frame
+-	 * that a nested NMI may change to make the interrupted NMI iret jump
+-	 * to the repeat_nmi. The original stack frame and the temp storage
+-	 * is also used by nested NMIs and can not be trusted on exit.
+-	 */
+-	/* Do not pop rdx, nested NMIs will corrupt that part of the stack */
++	/* Restore rdx. */
+ 	movq (%rsp), %rdx
+ 	CFI_RESTORE rdx
+ 
+-	/* Set the NMI executing variable on the stack. */
++	/* Set "NMI executing" on the stack. */
+ 	pushq_cfi $1
+ 
+-	/*
+-	 * Leave room for the "copied" frame
+-	 */
++	/* Leave room for the "iret" frame */
+ 	subq $(5*8), %rsp
+ 	CFI_ADJUST_CFA_OFFSET 5*8
+ 
+-	/* Copy the stack frame to the Saved frame */
++	/* Copy the "original" frame to the "outermost" frame */
+ 	.rept 5
+ 	pushq_cfi 11*8(%rsp)
+ 	.endr
+@@ -1560,6 +1637,7 @@ first_nmi:
+ 
+ 	/* Everything up to here is safe from nested NMIs */
+ 
++repeat_nmi:
+ 	/*
+ 	 * If there was a nested NMI, the first NMI's iret will return
+ 	 * here. But NMIs are still enabled and we can take another
+@@ -1568,16 +1646,21 @@ first_nmi:
+ 	 * it will just return, as we are about to repeat an NMI anyway.
+ 	 * This makes it safe to copy to the stack frame that a nested
+ 	 * NMI will update.
+-	 */
+-repeat_nmi:
+-	/*
+-	 * Update the stack variable to say we are still in NMI (the update
+-	 * is benign for the non-repeat case, where 1 was pushed just above
+-	 * to this very stack slot).
++	 *
++	 * RSP is pointing to "outermost RIP".  gsbase is unknown, but, if
++	 * we're repeating an NMI, gsbase has the same value that it had on
++	 * the first iteration.  paranoid_entry will load the kernel
++	 * gsbase if needed before we call do_nmi.
++	 *
++	 * Set "NMI executing" in case we came back here via IRET.
+ 	 */
+ 	movq $1, 10*8(%rsp)
+ 
+-	/* Make another copy, this one may be modified by nested NMIs */
++	/*
++	 * Copy the "outermost" frame to the "iret" frame.  NMIs that nest
++	 * here must not modify the "iret" frame while we're writing to
++	 * it or it will end up containing garbage.
++	 */
+ 	addq $(10*8), %rsp
+ 	CFI_ADJUST_CFA_OFFSET -10*8
+ 	.rept 5
+@@ -1588,9 +1671,9 @@ repeat_nmi:
+ end_repeat_nmi:
+ 
+ 	/*
+-	 * Everything below this point can be preempted by a nested
+-	 * NMI if the first NMI took an exception and reset our iret stack
+-	 * so that we repeat another NMI.
++	 * Everything below this point can be preempted by a nested NMI.
++	 * If this happens, then the inner NMI will change the "iret"
++	 * frame to point back to repeat_nmi.
+ 	 */
+ 	pushq_cfi $-1		/* ORIG_RAX: no syscall to restart */
+ 	ALLOC_PT_GPREGS_ON_STACK
+@@ -1605,29 +1688,11 @@ end_repeat_nmi:
+ 	call paranoid_entry
+ 	DEFAULT_FRAME 0
+ 
+-	/*
+-	 * Save off the CR2 register. If we take a page fault in the NMI then
+-	 * it could corrupt the CR2 value. If the NMI preempts a page fault
+-	 * handler before it was able to read the CR2 register, and then the
+-	 * NMI itself takes a page fault, the page fault that was preempted
+-	 * will read the information from the NMI page fault and not the
+-	 * origin fault. Save it off and restore it if it changes.
+-	 * Use the r12 callee-saved register.
+-	 */
+-	movq %cr2, %r12
+-
+ 	/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+ 	movq %rsp,%rdi
+ 	movq $-1,%rsi
+ 	call do_nmi
+ 
+-	/* Did the NMI take a page fault? Restore cr2 if it did */
+-	movq %cr2, %rcx
+-	cmpq %rcx, %r12
+-	je 1f
+-	movq %r12, %cr2
+-1:
+-	
+ 	testl %ebx,%ebx				/* swapgs needed? */
+ 	jnz nmi_restore
+ nmi_swapgs:
+@@ -1635,12 +1700,27 @@ nmi_swapgs:
+ nmi_restore:
+ 	RESTORE_EXTRA_REGS
+ 	RESTORE_C_REGS
+-	/* Pop the extra iret frame at once */
++
++	/* Point RSP at the "iret" frame. */
+ 	REMOVE_PT_GPREGS_FROM_STACK 6*8
+ 
+-	/* Clear the NMI executing stack variable */
+-	movq $0, 5*8(%rsp)
+-	jmp irq_return
++	/*
++	 * Clear "NMI executing".  Set DF first so that we can easily
++	 * distinguish the remaining code between here and IRET from
++	 * the SYSCALL entry and exit paths.  On a native kernel, we
++	 * could just inspect RIP, but, on paravirt kernels,
++	 * INTERRUPT_RETURN can translate into a jump into a
++	 * hypercall page.
++	 */
++	std
++	movq	$0, 5*8(%rsp)		/* clear "NMI executing" */
++
++	/*
++	 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
++	 * stack in a single instruction.  We are returning to kernel
++	 * mode, so this cannot result in a fault.
++	 */
++	INTERRUPT_RETURN
+ 	CFI_ENDPROC
+ END(nmi)
+ 
+diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
+index c3e985d1751c..d05bd2e2ee91 100644
+--- a/arch/x86/kernel/nmi.c
++++ b/arch/x86/kernel/nmi.c
+@@ -408,15 +408,15 @@ static void default_do_nmi(struct pt_regs *regs)
+ NOKPROBE_SYMBOL(default_do_nmi);
+ 
+ /*
+- * NMIs can hit breakpoints which will cause it to lose its
+- * NMI context with the CPU when the breakpoint does an iret.
+- */
+-#ifdef CONFIG_X86_32
+-/*
+- * For i386, NMIs use the same stack as the kernel, and we can
+- * add a workaround to the iret problem in C (preventing nested
+- * NMIs if an NMI takes a trap). Simply have 3 states the NMI
+- * can be in:
++ * NMIs can page fault or hit breakpoints which will cause it to lose
++ * its NMI context with the CPU when the breakpoint or page fault does an IRET.
++ *
++ * As a result, NMIs can nest if NMIs get unmasked due an IRET during
++ * NMI processing.  On x86_64, the asm glue protects us from nested NMIs
++ * if the outer NMI came from kernel mode, but we can still nest if the
++ * outer NMI came from user mode.
++ *
++ * To handle these nested NMIs, we have three states:
+  *
+  *  1) not running
+  *  2) executing
+@@ -430,15 +430,14 @@ NOKPROBE_SYMBOL(default_do_nmi);
+  * (Note, the latch is binary, thus multiple NMIs triggering,
+  *  when one is running, are ignored. Only one NMI is restarted.)
+  *
+- * If an NMI hits a breakpoint that executes an iret, another
+- * NMI can preempt it. We do not want to allow this new NMI
+- * to run, but we want to execute it when the first one finishes.
+- * We set the state to "latched", and the exit of the first NMI will
+- * perform a dec_return, if the result is zero (NOT_RUNNING), then
+- * it will simply exit the NMI handler. If not, the dec_return
+- * would have set the state to NMI_EXECUTING (what we want it to
+- * be when we are running). In this case, we simply jump back
+- * to rerun the NMI handler again, and restart the 'latched' NMI.
++ * If an NMI executes an iret, another NMI can preempt it. We do not
++ * want to allow this new NMI to run, but we want to execute it when the
++ * first one finishes.  We set the state to "latched", and the exit of
++ * the first NMI will perform a dec_return, if the result is zero
++ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the
++ * dec_return would have set the state to NMI_EXECUTING (what we want it
++ * to be when we are running). In this case, we simply jump back to
++ * rerun the NMI handler again, and restart the 'latched' NMI.
+  *
+  * No trap (breakpoint or page fault) should be hit before nmi_restart,
+  * thus there is no race between the first check of state for NOT_RUNNING
+@@ -461,49 +460,36 @@ enum nmi_states {
+ static DEFINE_PER_CPU(enum nmi_states, nmi_state);
+ static DEFINE_PER_CPU(unsigned long, nmi_cr2);
+ 
+-#define nmi_nesting_preprocess(regs)					\
+-	do {								\
+-		if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {	\
+-			this_cpu_write(nmi_state, NMI_LATCHED);		\
+-			return;						\
+-		}							\
+-		this_cpu_write(nmi_state, NMI_EXECUTING);		\
+-		this_cpu_write(nmi_cr2, read_cr2());			\
+-	} while (0);							\
+-	nmi_restart:
+-
+-#define nmi_nesting_postprocess()					\
+-	do {								\
+-		if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))	\
+-			write_cr2(this_cpu_read(nmi_cr2));		\
+-		if (this_cpu_dec_return(nmi_state))			\
+-			goto nmi_restart;				\
+-	} while (0)
+-#else /* x86_64 */
++#ifdef CONFIG_X86_64
+ /*
+- * In x86_64 things are a bit more difficult. This has the same problem
+- * where an NMI hitting a breakpoint that calls iret will remove the
+- * NMI context, allowing a nested NMI to enter. What makes this more
+- * difficult is that both NMIs and breakpoints have their own stack.
+- * When a new NMI or breakpoint is executed, the stack is set to a fixed
+- * point. If an NMI is nested, it will have its stack set at that same
+- * fixed address that the first NMI had, and will start corrupting the
+- * stack. This is handled in entry_64.S, but the same problem exists with
+- * the breakpoint stack.
++ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint.  Without
++ * some care, the inner breakpoint will clobber the outer breakpoint's
++ * stack.
+  *
+- * If a breakpoint is being processed, and the debug stack is being used,
+- * if an NMI comes in and also hits a breakpoint, the stack pointer
+- * will be set to the same fixed address as the breakpoint that was
+- * interrupted, causing that stack to be corrupted. To handle this case,
+- * check if the stack that was interrupted is the debug stack, and if
+- * so, change the IDT so that new breakpoints will use the current stack
+- * and not switch to the fixed address. On return of the NMI, switch back
+- * to the original IDT.
++ * If a breakpoint is being processed, and the debug stack is being
++ * used, if an NMI comes in and also hits a breakpoint, the stack
++ * pointer will be set to the same fixed address as the breakpoint that
++ * was interrupted, causing that stack to be corrupted. To handle this
++ * case, check if the stack that was interrupted is the debug stack, and
++ * if so, change the IDT so that new breakpoints will use the current
++ * stack and not switch to the fixed address. On return of the NMI,
++ * switch back to the original IDT.
+  */
+ static DEFINE_PER_CPU(int, update_debug_stack);
++#endif
+ 
+-static inline void nmi_nesting_preprocess(struct pt_regs *regs)
++dotraplinkage notrace void
++do_nmi(struct pt_regs *regs, long error_code)
+ {
++	if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
++		this_cpu_write(nmi_state, NMI_LATCHED);
++		return;
++	}
++	this_cpu_write(nmi_state, NMI_EXECUTING);
++	this_cpu_write(nmi_cr2, read_cr2());
++nmi_restart:
++
++#ifdef CONFIG_X86_64
+ 	/*
+ 	 * If we interrupted a breakpoint, it is possible that
+ 	 * the nmi handler will have breakpoints too. We need to
+@@ -514,22 +500,8 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
+ 		debug_stack_set_zero();
+ 		this_cpu_write(update_debug_stack, 1);
+ 	}
+-}
+-
+-static inline void nmi_nesting_postprocess(void)
+-{
+-	if (unlikely(this_cpu_read(update_debug_stack))) {
+-		debug_stack_reset();
+-		this_cpu_write(update_debug_stack, 0);
+-	}
+-}
+ #endif
+ 
+-dotraplinkage notrace void
+-do_nmi(struct pt_regs *regs, long error_code)
+-{
+-	nmi_nesting_preprocess(regs);
+-
+ 	nmi_enter();
+ 
+ 	inc_irq_stat(__nmi_count);
+@@ -539,8 +511,17 @@ do_nmi(struct pt_regs *regs, long error_code)
+ 
+ 	nmi_exit();
+ 
+-	/* On i386, may loop back to preprocess */
+-	nmi_nesting_postprocess();
++#ifdef CONFIG_X86_64
++	if (unlikely(this_cpu_read(update_debug_stack))) {
++		debug_stack_reset();
++		this_cpu_write(update_debug_stack, 0);
++	}
++#endif
++
++	if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
++		write_cr2(this_cpu_read(nmi_cr2));
++	if (this_cpu_dec_return(nmi_state))
++		goto nmi_restart;
+ }
+ NOKPROBE_SYMBOL(do_nmi);
+ 
+diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
+index 9d28383fc1e7..c4ea87eedf8a 100644
+--- a/arch/x86/kvm/lapic.h
++++ b/arch/x86/kvm/lapic.h
+@@ -150,7 +150,7 @@ static inline bool kvm_apic_vid_enabled(struct kvm *kvm)
+ 
+ static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
+ {
+-	return vcpu->arch.apic->pending_events;
++	return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
+ }
+ 
+ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector);
+diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
+index 46957ead3060..a671e837228d 100644
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 	pte_t pte;
+ 	unsigned long pfn;
+ 	struct page *page;
++	unsigned char dummy;
+ 
+ 	ptep = lookup_address((unsigned long)v, &level);
+ 	BUG_ON(ptep == NULL);
+@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 
+ 	pte = pfn_pte(pfn, prot);
+ 
++	/*
++	 * Careful: update_va_mapping() will fail if the virtual address
++	 * we're poking isn't populated in the page tables.  We don't
++	 * need to worry about the direct map (that's always in the page
++	 * tables), but we need to be careful about vmap space.  In
++	 * particular, the top level page table can lazily propagate
++	 * entries between processes, so if we've switched mms since we
++	 * vmapped the target in the first place, we might not have the
++	 * top-level page table entry populated.
++	 *
++	 * We disable preemption because we want the same mm active when
++	 * we probe the target and when we issue the hypercall.  We'll
++	 * have the same nominal mm, but if we're a kernel thread, lazy
++	 * mm dropping could change our pgd.
++	 *
++	 * Out of an abundance of caution, this uses __get_user() to fault
++	 * in the target address just in case there's some obscure case
++	 * in which the target address isn't readable.
++	 */
++
++	preempt_disable();
++
++	pagefault_disable();	/* Avoid warnings due to being atomic. */
++	__get_user(dummy, (unsigned char __user __force *)v);
++	pagefault_enable();
++
+ 	if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
+ 		BUG();
+ 
+@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
+ 				BUG();
+ 	} else
+ 		kmap_flush_unused();
++
++	preempt_enable();
+ }
+ 
+ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
+ 	const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
+ 	int i;
+ 
++	/*
++	 * We need to mark the all aliases of the LDT pages RO.  We
++	 * don't need to call vm_flush_aliases(), though, since that's
++	 * only responsible for flushing aliases out the TLBs, not the
++	 * page tables, and Xen will flush the TLB for us if needed.
++	 *
++	 * To avoid confusing future readers: none of this is necessary
++	 * to load the LDT.  The hypervisor only checks this when the
++	 * LDT is faulted in due to subsequent descriptor access.
++	 */
++
+ 	for(i = 0; i < entries; i += entries_per_page)
+ 		set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
+ }
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 53f253574abe..010ce0b1f517 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -522,6 +522,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
+ #  define rbd_assert(expr)	((void) 0)
+ #endif /* !RBD_DEBUG */
+ 
++static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
+ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
+ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
+ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
+@@ -1797,6 +1798,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
+ 	obj_request_done_set(obj_request);
+ }
+ 
++static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
++{
++	dout("%s: obj %p\n", __func__, obj_request);
++
++	if (obj_request_img_data_test(obj_request))
++		rbd_osd_copyup_callback(obj_request);
++	else
++		obj_request_done_set(obj_request);
++}
++
+ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+ 				struct ceph_msg *msg)
+ {
+@@ -1845,6 +1856,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
+ 		rbd_osd_discard_callback(obj_request);
+ 		break;
+ 	case CEPH_OSD_OP_CALL:
++		rbd_osd_call_callback(obj_request);
++		break;
+ 	case CEPH_OSD_OP_NOTIFY_ACK:
+ 	case CEPH_OSD_OP_WATCH:
+ 		rbd_osd_trivial_callback(obj_request);
+@@ -2509,13 +2522,15 @@ out_unwind:
+ }
+ 
+ static void
+-rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
++rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
+ {
+ 	struct rbd_img_request *img_request;
+ 	struct rbd_device *rbd_dev;
+ 	struct page **pages;
+ 	u32 page_count;
+ 
++	dout("%s: obj %p\n", __func__, obj_request);
++
+ 	rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
+ 		obj_request->type == OBJ_REQUEST_NODATA);
+ 	rbd_assert(obj_request_img_data_test(obj_request));
+@@ -2542,9 +2557,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
+ 	if (!obj_request->result)
+ 		obj_request->xferred = obj_request->length;
+ 
+-	/* Finish up with the normal image object callback */
+-
+-	rbd_img_obj_callback(obj_request);
++	obj_request_done_set(obj_request);
+ }
+ 
+ static void
+@@ -2629,7 +2642,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
+ 
+ 	/* All set, send it off. */
+ 
+-	orig_request->callback = rbd_img_obj_copyup_callback;
+ 	osdc = &rbd_dev->rbd_client->client->osdc;
+ 	img_result = rbd_obj_request_submit(osdc, orig_request);
+ 	if (!img_result)
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index da8faf78536a..5643b65cee20 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
+ static void start_khwrngd(void)
+ {
+ 	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
+-	if (hwrng_fill == ERR_PTR(-ENOMEM)) {
++	if (IS_ERR(hwrng_fill)) {
+ 		pr_err("hwrng_fill thread creation failed");
+ 		hwrng_fill = NULL;
+ 	}
+diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
+index a43048b5b05f..3c1a123f909c 100644
+--- a/drivers/char/i8k.c
++++ b/drivers/char/i8k.c
+@@ -900,6 +900,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
+ 
+ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
+ 
++static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
++	{
++		/*
++		 * CPU fan speed going up and down on Dell Studio XPS 8100
++		 * for unknown reasons.
++		 */
++		.ident = "Dell Studio XPS 8100",
++		.matches = {
++			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
++			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
++		},
++	},
++	{ }
++};
++
+ /*
+  * Probe for the presence of a supported laptop.
+  */
+@@ -911,7 +926,8 @@ static int __init i8k_probe(void)
+ 	/*
+ 	 * Get DMI information
+ 	 */
+-	if (!dmi_check_system(i8k_dmi_table)) {
++	if (!dmi_check_system(i8k_dmi_table) ||
++	    dmi_check_system(i8k_blacklist_dmi_table)) {
+ 		if (!ignore_dmi && !force)
+ 			return -ENODEV;
+ 
+diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c
+index 0dd8a4b12747..4a375ead70e9 100644
+--- a/drivers/clk/keystone/pll.c
++++ b/drivers/clk/keystone/pll.c
+@@ -37,7 +37,8 @@
+  *	Main PLL or any other PLLs in the device such as ARM PLL, DDR PLL
+  *	or PA PLL available on keystone2. These PLLs are controlled by
+  *	this register. Main PLL is controlled by a PLL controller.
+- * @pllm: PLL register map address
++ * @pllm: PLL register map address for multiplier bits
++ * @pllod: PLL register map address for post divider bits
+  * @pll_ctl0: PLL controller map address
+  * @pllm_lower_mask: multiplier lower mask
+  * @pllm_upper_mask: multiplier upper mask
+@@ -53,6 +54,7 @@ struct clk_pll_data {
+ 	u32 phy_pllm;
+ 	u32 phy_pll_ctl0;
+ 	void __iomem *pllm;
++	void __iomem *pllod;
+ 	void __iomem *pll_ctl0;
+ 	u32 pllm_lower_mask;
+ 	u32 pllm_upper_mask;
+@@ -102,7 +104,11 @@ static unsigned long clk_pllclk_recalc(struct clk_hw *hw,
+ 		/* read post divider from od bits*/
+ 		postdiv = ((val & pll_data->clkod_mask) >>
+ 				 pll_data->clkod_shift) + 1;
+-	else
++	else if (pll_data->pllod) {
++		postdiv = readl(pll_data->pllod);
++		postdiv = ((postdiv & pll_data->clkod_mask) >>
++				pll_data->clkod_shift) + 1;
++	} else
+ 		postdiv = pll_data->postdiv;
+ 
+ 	rate /= (prediv + 1);
+@@ -172,12 +178,21 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
+ 		/* assume the PLL has output divider register bits */
+ 		pll_data->clkod_mask = CLKOD_MASK;
+ 		pll_data->clkod_shift = CLKOD_SHIFT;
++
++		/*
++		 * Check if there is an post-divider register. If not
++		 * assume od bits are part of control register.
++		 */
++		i = of_property_match_string(node, "reg-names",
++					     "post-divider");
++		pll_data->pllod = of_iomap(node, i);
+ 	}
+ 
+ 	i = of_property_match_string(node, "reg-names", "control");
+ 	pll_data->pll_ctl0 = of_iomap(node, i);
+ 	if (!pll_data->pll_ctl0) {
+ 		pr_err("%s: ioremap failed\n", __func__);
++		iounmap(pll_data->pllod);
+ 		goto out;
+ 	}
+ 
+@@ -193,6 +208,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl)
+ 		pll_data->pllm = of_iomap(node, i);
+ 		if (!pll_data->pllm) {
+ 			iounmap(pll_data->pll_ctl0);
++			iounmap(pll_data->pllod);
+ 			goto out;
+ 		}
+ 	}
+diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
+index 48f453555f1f..ede9e9e3c419 100644
+--- a/drivers/crypto/ixp4xx_crypto.c
++++ b/drivers/crypto/ixp4xx_crypto.c
+@@ -904,7 +904,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
+ 		crypt->mode |= NPE_OP_NOT_IN_PLACE;
+ 		/* This was never tested by Intel
+ 		 * for more than one dst buffer, I think. */
+-		BUG_ON(req->dst->length < nbytes);
+ 		req_ctx->dst = NULL;
+ 		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+ 					flags, DMA_FROM_DEVICE))
+diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
+index 67f80813a06f..e4311ce0cd78 100644
+--- a/drivers/crypto/nx/nx-aes-ccm.c
++++ b/drivers/crypto/nx/nx-aes-ccm.c
+@@ -494,8 +494,9 @@ out:
+ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
+ {
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
++	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ 	struct blkcipher_desc desc;
+-	u8 *iv = nx_ctx->priv.ccm.iv;
++	u8 *iv = rctx->iv;
+ 
+ 	iv[0] = 3;
+ 	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
+@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aead_request *req)
+ static int ccm4309_aes_nx_decrypt(struct aead_request *req)
+ {
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
++	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ 	struct blkcipher_desc desc;
+-	u8 *iv = nx_ctx->priv.ccm.iv;
++	u8 *iv = rctx->iv;
+ 
+ 	iv[0] = 3;
+ 	memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
+diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
+index 2617cd4d54dd..dd7e9f3f5b6b 100644
+--- a/drivers/crypto/nx/nx-aes-ctr.c
++++ b/drivers/crypto/nx/nx-aes-ctr.c
+@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct crypto_tfm *tfm,
+ 	if (key_len < CTR_RFC3686_NONCE_SIZE)
+ 		return -EINVAL;
+ 
+-	memcpy(nx_ctx->priv.ctr.iv,
++	memcpy(nx_ctx->priv.ctr.nonce,
+ 	       in_key + key_len - CTR_RFC3686_NONCE_SIZE,
+ 	       CTR_RFC3686_NONCE_SIZE);
+ 
+@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
+ 				unsigned int           nbytes)
+ {
+ 	struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+-	u8 *iv = nx_ctx->priv.ctr.iv;
++	u8 iv[16];
+ 
++	memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
+ 	memcpy(iv + CTR_RFC3686_NONCE_SIZE,
+ 	       desc->info, CTR_RFC3686_IV_SIZE);
+ 	iv[12] = iv[13] = iv[14] = 0;
+ 	iv[15] = 1;
+ 
+-	desc->info = nx_ctx->priv.ctr.iv;
++	desc->info = iv;
+ 
+ 	return ctr_aes_nx_crypt(desc, dst, src, nbytes);
+ }
+diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
+index 88c562434bc0..c6ebeb644db4 100644
+--- a/drivers/crypto/nx/nx-aes-gcm.c
++++ b/drivers/crypto/nx/nx-aes-gcm.c
+@@ -330,6 +330,7 @@ out:
+ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
+ {
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
++	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ 	struct blkcipher_desc desc;
+ 	unsigned int nbytes = req->cryptlen;
+@@ -339,7 +340,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
+ 
+ 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ 
+-	desc.info = nx_ctx->priv.gcm.iv;
++	desc.info = rctx->iv;
+ 	/* initialize the counter */
+ 	*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
+ 
+@@ -434,8 +435,8 @@ out:
+ 
+ static int gcm_aes_nx_encrypt(struct aead_request *req)
+ {
+-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+-	char *iv = nx_ctx->priv.gcm.iv;
++	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
++	char *iv = rctx->iv;
+ 
+ 	memcpy(iv, req->iv, 12);
+ 
+@@ -444,8 +445,8 @@ static int gcm_aes_nx_encrypt(struct aead_request *req)
+ 
+ static int gcm_aes_nx_decrypt(struct aead_request *req)
+ {
+-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+-	char *iv = nx_ctx->priv.gcm.iv;
++	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
++	char *iv = rctx->iv;
+ 
+ 	memcpy(iv, req->iv, 12);
+ 
+@@ -455,7 +456,8 @@ static int gcm_aes_nx_decrypt(struct aead_request *req)
+ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
+ {
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+-	char *iv = nx_ctx->priv.gcm.iv;
++	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
++	char *iv = rctx->iv;
+ 	char *nonce = nx_ctx->priv.gcm.nonce;
+ 
+ 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
+@@ -467,7 +469,8 @@ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
+ static int gcm4106_aes_nx_decrypt(struct aead_request *req)
+ {
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+-	char *iv = nx_ctx->priv.gcm.iv;
++	struct nx_gcm_rctx *rctx = aead_request_ctx(req);
++	char *iv = rctx->iv;
+ 	char *nonce = nx_ctx->priv.gcm.nonce;
+ 
+ 	memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
+diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
+index 8c2faffab4a3..c2f7d4befb55 100644
+--- a/drivers/crypto/nx/nx-aes-xcbc.c
++++ b/drivers/crypto/nx/nx-aes-xcbc.c
+@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
+ 			   unsigned int         key_len)
+ {
+ 	struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
++	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ 
+ 	switch (key_len) {
+ 	case AES_KEYSIZE_128:
+@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
+ 		return -EINVAL;
+ 	}
+ 
+-	memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
++	memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
+ 
+ 	return 0;
+ }
+@@ -148,32 +149,29 @@ out:
+ 	return rc;
+ }
+ 
+-static int nx_xcbc_init(struct shash_desc *desc)
++static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
+ {
+-	struct xcbc_state *sctx = shash_desc_ctx(desc);
+-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
++	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+-	struct nx_sg *out_sg;
+-	int len;
++	int err;
+ 
+-	nx_ctx_init(nx_ctx, HCOP_FC_AES);
++	err = nx_crypto_ctx_aes_xcbc_init(tfm);
++	if (err)
++		return err;
+ 
+-	memset(sctx, 0, sizeof *sctx);
++	nx_ctx_init(nx_ctx, HCOP_FC_AES);
+ 
+ 	NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ 	csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
+ 
+-	memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
+-	memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
+-
+-	len = AES_BLOCK_SIZE;
+-	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+-				  &len, nx_ctx->ap->sglen);
++	return 0;
++}
+ 
+-	if (len != AES_BLOCK_SIZE)
+-		return -EINVAL;
++static int nx_xcbc_init(struct shash_desc *desc)
++{
++	struct xcbc_state *sctx = shash_desc_ctx(desc);
+ 
+-	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
++	memset(sctx, 0, sizeof *sctx);
+ 
+ 	return 0;
+ }
+@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_desc *desc,
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ 	struct nx_sg *in_sg;
++	struct nx_sg *out_sg;
+ 	u32 to_process = 0, leftover, total;
+ 	unsigned int max_sg_len;
+ 	unsigned long irq_flags;
+@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_desc *desc,
+ 	max_sg_len = min_t(u64, max_sg_len,
+ 				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+ 
++	data_len = AES_BLOCK_SIZE;
++	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
++				  &len, nx_ctx->ap->sglen);
++
++	if (data_len != AES_BLOCK_SIZE) {
++		rc = -EINVAL;
++		goto out;
++	}
++
++	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
++
+ 	do {
+ 		to_process = total - to_process;
+ 		to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
+ 						(u8 *) sctx->buffer,
+ 						&data_len,
+ 						max_sg_len);
+-			if (data_len != sctx->count)
+-				return -EINVAL;
++			if (data_len != sctx->count) {
++				rc = -EINVAL;
++				goto out;
++			}
+ 		}
+ 
+ 		data_len = to_process - sctx->count;
+@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_desc *desc,
+ 					&data_len,
+ 					max_sg_len);
+ 
+-		if (data_len != to_process - sctx->count)
+-			return -EINVAL;
++		if (data_len != to_process - sctx->count) {
++			rc = -EINVAL;
++			goto out;
++		}
+ 
+ 		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
+ 					sizeof(struct nx_sg);
+@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
+ 	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
+ 				 &len, nx_ctx->ap->sglen);
+ 
+-	if (len != sctx->count)
+-		return -EINVAL;
++	if (len != sctx->count) {
++		rc = -EINVAL;
++		goto out;
++	}
+ 
+ 	len = AES_BLOCK_SIZE;
+ 	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+ 				  nx_ctx->ap->sglen);
+ 
+-	if (len != AES_BLOCK_SIZE)
+-		return -EINVAL;
++	if (len != AES_BLOCK_SIZE) {
++		rc = -EINVAL;
++		goto out;
++	}
+ 
+ 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg = {
+ 		.cra_blocksize   = AES_BLOCK_SIZE,
+ 		.cra_module      = THIS_MODULE,
+ 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+-		.cra_init        = nx_crypto_ctx_aes_xcbc_init,
++		.cra_init        = nx_crypto_ctx_aes_xcbc_init2,
+ 		.cra_exit        = nx_crypto_ctx_exit,
+ 	}
+ };
+diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
+index 23621da624c3..08f8d5cd6334 100644
+--- a/drivers/crypto/nx/nx-sha256.c
++++ b/drivers/crypto/nx/nx-sha256.c
+@@ -29,30 +29,28 @@
+ #include "nx.h"
+ 
+ 
+-static int nx_sha256_init(struct shash_desc *desc)
++static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
+ {
+-	struct sha256_state *sctx = shash_desc_ctx(desc);
+-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+-	int len;
+-	int rc;
++	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
++	int err;
+ 
+-	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
++	err = nx_crypto_ctx_sha_init(tfm);
++	if (err)
++		return err;
+ 
+-	memset(sctx, 0, sizeof *sctx);
++	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+ 
+ 	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
+ 
+ 	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
+ 
+-	len = SHA256_DIGEST_SIZE;
+-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+-				  &nx_ctx->op.outlen,
+-				  &len,
+-				  (u8 *) sctx->state,
+-				  NX_DS_SHA256);
++	return 0;
++}
+ 
+-	if (rc)
+-		goto out;
++static int nx_sha256_init(struct shash_desc *desc) {
++	struct sha256_state *sctx = shash_desc_ctx(desc);
++
++	memset(sctx, 0, sizeof *sctx);
+ 
+ 	sctx->state[0] = __cpu_to_be32(SHA256_H0);
+ 	sctx->state[1] = __cpu_to_be32(SHA256_H1);
+@@ -64,7 +62,6 @@ static int nx_sha256_init(struct shash_desc *desc)
+ 	sctx->state[7] = __cpu_to_be32(SHA256_H7);
+ 	sctx->count = 0;
+ 
+-out:
+ 	return 0;
+ }
+ 
+@@ -74,10 +71,13 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
+ 	struct sha256_state *sctx = shash_desc_ctx(desc);
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
++	struct nx_sg *in_sg;
++	struct nx_sg *out_sg;
+ 	u64 to_process = 0, leftover, total;
+ 	unsigned long irq_flags;
+ 	int rc = 0;
+ 	int data_len;
++	u32 max_sg_len;
+ 	u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
+ 
+ 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+@@ -97,6 +97,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
+ 	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ 	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ 
++	in_sg = nx_ctx->in_sg;
++	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
++			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
++	max_sg_len = min_t(u64, max_sg_len,
++			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++
++	data_len = SHA256_DIGEST_SIZE;
++	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
++				  &data_len, max_sg_len);
++	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
++
++	if (data_len != SHA256_DIGEST_SIZE) {
++		rc = -EINVAL;
++		goto out;
++	}
++
+ 	do {
+ 		/*
+ 		 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
+@@ -108,25 +124,22 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
+ 
+ 		if (buf_len) {
+ 			data_len = buf_len;
+-			rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+-						  &nx_ctx->op.inlen,
+-						  &data_len,
+-						  (u8 *) sctx->buf,
+-						  NX_DS_SHA256);
++			in_sg = nx_build_sg_list(nx_ctx->in_sg,
++						 (u8 *) sctx->buf,
++						 &data_len,
++						 max_sg_len);
+ 
+-			if (rc || data_len != buf_len)
++			if (data_len != buf_len) {
++				rc = -EINVAL;
+ 				goto out;
++			}
+ 		}
+ 
+ 		data_len = to_process - buf_len;
+-		rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+-					  &nx_ctx->op.inlen,
+-					  &data_len,
+-					  (u8 *) data,
+-					  NX_DS_SHA256);
++		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
++					 &data_len, max_sg_len);
+ 
+-		if (rc)
+-			goto out;
++		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ 
+ 		to_process = (data_len + buf_len);
+ 		leftover = total - to_process;
+@@ -173,12 +186,19 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
+ 	struct sha256_state *sctx = shash_desc_ctx(desc);
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
++	struct nx_sg *in_sg, *out_sg;
+ 	unsigned long irq_flags;
+-	int rc;
++	u32 max_sg_len;
++	int rc = 0;
+ 	int len;
+ 
+ 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ 
++	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
++			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
++	max_sg_len = min_t(u64, max_sg_len,
++			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++
+ 	/* final is represented by continuing the operation and indicating that
+ 	 * this is not an intermediate operation */
+ 	if (sctx->count >= SHA256_BLOCK_SIZE) {
+@@ -195,25 +215,24 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
+ 	csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
+ 
+ 	len = sctx->count & (SHA256_BLOCK_SIZE - 1);
+-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+-				  &nx_ctx->op.inlen,
+-				  &len,
+-				  (u8 *) sctx->buf,
+-				  NX_DS_SHA256);
++	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
++				 &len, max_sg_len);
+ 
+-	if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
++	if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
++		rc = -EINVAL;
+ 		goto out;
++	}
+ 
+ 	len = SHA256_DIGEST_SIZE;
+-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+-				  &nx_ctx->op.outlen,
+-				  &len,
+-				  out,
+-				  NX_DS_SHA256);
++	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
+ 
+-	if (rc || len != SHA256_DIGEST_SIZE)
++	if (len != SHA256_DIGEST_SIZE) {
++		rc = -EINVAL;
+ 		goto out;
++	}
+ 
++	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
++	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ 	if (!nx_ctx->op.outlen) {
+ 		rc = -EINVAL;
+ 		goto out;
+@@ -268,7 +287,7 @@ struct shash_alg nx_shash_sha256_alg = {
+ 		.cra_blocksize   = SHA256_BLOCK_SIZE,
+ 		.cra_module      = THIS_MODULE,
+ 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+-		.cra_init        = nx_crypto_ctx_sha_init,
++		.cra_init        = nx_crypto_ctx_sha256_init,
+ 		.cra_exit        = nx_crypto_ctx_exit,
+ 	}
+ };
+diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
+index b3adf1022673..aff0fe58eac0 100644
+--- a/drivers/crypto/nx/nx-sha512.c
++++ b/drivers/crypto/nx/nx-sha512.c
+@@ -28,30 +28,29 @@
+ #include "nx.h"
+ 
+ 
+-static int nx_sha512_init(struct shash_desc *desc)
++static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
+ {
+-	struct sha512_state *sctx = shash_desc_ctx(desc);
+-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+-	int len;
+-	int rc;
++	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
++	int err;
+ 
+-	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
++	err = nx_crypto_ctx_sha_init(tfm);
++	if (err)
++		return err;
+ 
+-	memset(sctx, 0, sizeof *sctx);
++	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+ 
+ 	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
+ 
+ 	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
+ 
+-	len = SHA512_DIGEST_SIZE;
+-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+-				  &nx_ctx->op.outlen,
+-				  &len,
+-				  (u8 *)sctx->state,
+-				  NX_DS_SHA512);
++	return 0;
++}
+ 
+-	if (rc || len != SHA512_DIGEST_SIZE)
+-		goto out;
++static int nx_sha512_init(struct shash_desc *desc)
++{
++	struct sha512_state *sctx = shash_desc_ctx(desc);
++
++	memset(sctx, 0, sizeof *sctx);
+ 
+ 	sctx->state[0] = __cpu_to_be64(SHA512_H0);
+ 	sctx->state[1] = __cpu_to_be64(SHA512_H1);
+@@ -63,7 +62,6 @@ static int nx_sha512_init(struct shash_desc *desc)
+ 	sctx->state[7] = __cpu_to_be64(SHA512_H7);
+ 	sctx->count[0] = 0;
+ 
+-out:
+ 	return 0;
+ }
+ 
+@@ -73,10 +71,13 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+ 	struct sha512_state *sctx = shash_desc_ctx(desc);
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
++	struct nx_sg *in_sg;
++	struct nx_sg *out_sg;
+ 	u64 to_process, leftover = 0, total;
+ 	unsigned long irq_flags;
+ 	int rc = 0;
+ 	int data_len;
++	u32 max_sg_len;
+ 	u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
+ 
+ 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+@@ -96,6 +97,22 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+ 	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ 	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+ 
++	in_sg = nx_ctx->in_sg;
++	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
++			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
++	max_sg_len = min_t(u64, max_sg_len,
++			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++
++	data_len = SHA512_DIGEST_SIZE;
++	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
++				  &data_len, max_sg_len);
++	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
++
++	if (data_len != SHA512_DIGEST_SIZE) {
++		rc = -EINVAL;
++		goto out;
++	}
++
+ 	do {
+ 		/*
+ 		 * to_process: the SHA512_BLOCK_SIZE data chunk to process in
+@@ -108,25 +125,26 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
+ 
+ 		if (buf_len) {
+ 			data_len = buf_len;
+-			rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+-						  &nx_ctx->op.inlen,
+-						  &data_len,
+-						  (u8 *) sctx->buf,
+-						  NX_DS_SHA512);
++			in_sg = nx_build_sg_list(nx_ctx->in_sg,
++						 (u8 *) sctx->buf,
++						 &data_len, max_sg_len);
+ 
+-			if (rc || data_len != buf_len)
++			if (data_len != buf_len) {
++				rc = -EINVAL;
+ 				goto out;
++			}
+ 		}
+ 
+ 		data_len = to_process - buf_len;
+-		rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+-					  &nx_ctx->op.inlen,
+-					  &data_len,
+-					  (u8 *) data,
+-					  NX_DS_SHA512);
++		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
++					 &data_len, max_sg_len);
++
++		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ 
+-		if (rc || data_len != (to_process - buf_len))
++		if (data_len != (to_process - buf_len)) {
++			rc = -EINVAL;
+ 			goto out;
++		}
+ 
+ 		to_process = (data_len + buf_len);
+ 		leftover = total - to_process;
+@@ -172,13 +190,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
+ 	struct sha512_state *sctx = shash_desc_ctx(desc);
+ 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
++	struct nx_sg *in_sg, *out_sg;
++	u32 max_sg_len;
+ 	u64 count0;
+ 	unsigned long irq_flags;
+-	int rc;
++	int rc = 0;
+ 	int len;
+ 
+ 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+ 
++	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
++			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
++	max_sg_len = min_t(u64, max_sg_len,
++			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++
+ 	/* final is represented by continuing the operation and indicating that
+ 	 * this is not an intermediate operation */
+ 	if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
+@@ -200,24 +225,20 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
+ 	csbcpb->cpb.sha512.message_bit_length_lo = count0;
+ 
+ 	len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
+-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+-				  &nx_ctx->op.inlen,
+-				  &len,
+-				  (u8 *)sctx->buf,
+-				  NX_DS_SHA512);
++	in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
++				 max_sg_len);
+ 
+-	if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
++	if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
++		rc = -EINVAL;
+ 		goto out;
++	}
+ 
+ 	len = SHA512_DIGEST_SIZE;
+-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+-				  &nx_ctx->op.outlen,
+-				  &len,
+-				  out,
+-				  NX_DS_SHA512);
++	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
++				 max_sg_len);
+ 
+-	if (rc)
+-		goto out;
++	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
++	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ 
+ 	if (!nx_ctx->op.outlen) {
+ 		rc = -EINVAL;
+@@ -273,7 +294,7 @@ struct shash_alg nx_shash_sha512_alg = {
+ 		.cra_blocksize   = SHA512_BLOCK_SIZE,
+ 		.cra_module      = THIS_MODULE,
+ 		.cra_ctxsize     = sizeof(struct nx_crypto_ctx),
+-		.cra_init        = nx_crypto_ctx_sha_init,
++		.cra_init        = nx_crypto_ctx_sha512_init,
+ 		.cra_exit        = nx_crypto_ctx_exit,
+ 	}
+ };
+diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
+index 1da6dc59d0dd..737d33dc50b8 100644
+--- a/drivers/crypto/nx/nx.c
++++ b/drivers/crypto/nx/nx.c
+@@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct nx_sg       *nx_dst,
+  * @delta:  is the amount we need to crop in order to bound the list.
+  *
+  */
+-static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
++static long int trim_sg_list(struct nx_sg *sg,
++			     struct nx_sg *end,
++			     unsigned int delta,
++			     unsigned int *nbytes)
+ {
++	long int oplen;
++	long int data_back;
++	unsigned int is_delta = delta;
++
+ 	while (delta && end > sg) {
+ 		struct nx_sg *last = end - 1;
+ 
+@@ -228,54 +235,20 @@ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int d
+ 			delta -= last->len;
+ 		}
+ 	}
+-	return (sg - end) * sizeof(struct nx_sg);
+-}
+-
+-/**
+- * nx_sha_build_sg_list - walk and build sg list to sha modes
+- *			  using right bounds and limits.
+- * @nx_ctx: NX crypto context for the lists we're building
+- * @nx_sg: current sg list in or out list
+- * @op_len: current op_len to be used in order to build a sg list
+- * @nbytes:  number or bytes to be processed
+- * @offset: buf offset
+- * @mode: SHA256 or SHA512
+- */
+-int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
+-			  struct nx_sg 	      *nx_in_outsg,
+-			  s64		      *op_len,
+-			  unsigned int        *nbytes,
+-			  u8 		      *offset,
+-			  u32		      mode)
+-{
+-	unsigned int delta = 0;
+-	unsigned int total = *nbytes;
+-	struct nx_sg *nx_insg = nx_in_outsg;
+-	unsigned int max_sg_len;
+ 
+-	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+-			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+-	max_sg_len = min_t(u64, max_sg_len,
+-			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+-
+-	*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+-	nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
+-
+-	switch (mode) {
+-	case NX_DS_SHA256:
+-		if (*nbytes < total)
+-			delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
+-		break;
+-	case NX_DS_SHA512:
+-		if (*nbytes < total)
+-			delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
+-		break;
+-	default:
+-		return -EINVAL;
++	/* There are cases where we need to crop list in order to make it
++	 * a block size multiple, but we also need to align data. In order to
++	 * that we need to calculate how much we need to put back to be
++	 * processed
++	 */
++	oplen = (sg - end) * sizeof(struct nx_sg);
++	if (is_delta) {
++		data_back = (abs(oplen) / AES_BLOCK_SIZE) *  sg->len;
++		data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
++		*nbytes -= data_back;
+ 	}
+-	*op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
+ 
+-	return 0;
++	return oplen;
+ }
+ 
+ /**
+@@ -330,8 +303,8 @@ int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
+ 	/* these lengths should be negative, which will indicate to phyp that
+ 	 * the input and output parameters are scatterlists, not linear
+ 	 * buffers */
+-	nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
+-	nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
++	nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
++	nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
+ 
+ 	return 0;
+ }
+@@ -662,12 +635,14 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
+ /* entry points from the crypto tfm initializers */
+ int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
+ {
++	tfm->crt_aead.reqsize = sizeof(struct nx_ccm_rctx);
+ 	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ 				  NX_MODE_AES_CCM);
+ }
+ 
+ int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
+ {
++	tfm->crt_aead.reqsize = sizeof(struct nx_gcm_rctx);
+ 	return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ 				  NX_MODE_AES_GCM);
+ }
+diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
+index 6c9ecaaead52..c3ed83764fef 100644
+--- a/drivers/crypto/nx/nx.h
++++ b/drivers/crypto/nx/nx.h
+@@ -2,6 +2,8 @@
+ #ifndef __NX_H__
+ #define __NX_H__
+ 
++#include <crypto/ctr.h>
++
+ #define NX_NAME		"nx-crypto"
+ #define NX_STRING	"IBM Power7+ Nest Accelerator Crypto Driver"
+ #define NX_VERSION	"1.0"
+@@ -91,8 +93,11 @@ struct nx_crypto_driver {
+ 
+ #define NX_GCM4106_NONCE_LEN		(4)
+ #define NX_GCM_CTR_OFFSET		(12)
+-struct nx_gcm_priv {
++struct nx_gcm_rctx {
+ 	u8 iv[16];
++};
++
++struct nx_gcm_priv {
+ 	u8 iauth_tag[16];
+ 	u8 nonce[NX_GCM4106_NONCE_LEN];
+ };
+@@ -100,8 +105,11 @@ struct nx_gcm_priv {
+ #define NX_CCM_AES_KEY_LEN		(16)
+ #define NX_CCM4309_AES_KEY_LEN		(19)
+ #define NX_CCM4309_NONCE_LEN		(3)
+-struct nx_ccm_priv {
++struct nx_ccm_rctx {
+ 	u8 iv[16];
++};
++
++struct nx_ccm_priv {
+ 	u8 b0[16];
+ 	u8 iauth_tag[16];
+ 	u8 oauth_tag[16];
+@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
+ };
+ 
+ struct nx_ctr_priv {
+-	u8 iv[16];
++	u8 nonce[CTR_RFC3686_NONCE_SIZE];
+ };
+ 
+ struct nx_crypto_ctx {
+@@ -153,8 +161,6 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
+ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
+ int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
+ 		  u32 may_sleep);
+-int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
+-			 s64 *, unsigned int *, u8 *, u32);
+ struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
+ int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
+ 		      struct scatterlist *, struct scatterlist *, unsigned int *,
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index 1dc5b0a17cf7..34139a8894a0 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -73,7 +73,8 @@
+ 				       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ 				       ICP_QAT_HW_CIPHER_DECRYPT)
+ 
+-static atomic_t active_dev;
++static DEFINE_MUTEX(algs_lock);
++static unsigned int active_devs;
+ 
+ struct qat_alg_buf {
+ 	uint32_t len;
+@@ -1271,7 +1272,10 @@ static struct crypto_alg qat_algs[] = { {
+ 
+ int qat_algs_register(void)
+ {
+-	if (atomic_add_return(1, &active_dev) == 1) {
++	int ret = 0;
++
++	mutex_lock(&algs_lock);
++	if (++active_devs == 1) {
+ 		int i;
+ 
+ 		for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+@@ -1280,21 +1284,25 @@ int qat_algs_register(void)
+ 				CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
+ 				CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+ 
+-		return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
++		ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+ 	}
+-	return 0;
++	mutex_unlock(&algs_lock);
++	return ret;
+ }
+ 
+ int qat_algs_unregister(void)
+ {
+-	if (atomic_sub_return(1, &active_dev) == 0)
+-		return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+-	return 0;
++	int ret = 0;
++
++	mutex_lock(&algs_lock);
++	if (--active_devs == 0)
++		ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
++	mutex_unlock(&algs_lock);
++	return ret;
+ }
+ 
+ int qat_algs_init(void)
+ {
+-	atomic_set(&active_dev, 0);
+ 	crypto_get_default_rng();
+ 	return 0;
+ }
+diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
+index 7992164ea9ec..c89a7abb523f 100644
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -648,16 +648,17 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ 			desc->lld.mbr_sa = mem;
+ 			desc->lld.mbr_da = atchan->sconfig.dst_addr;
+ 		}
+-		desc->lld.mbr_cfg = atchan->cfg;
+-		dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
++		dwidth = at_xdmac_get_dwidth(atchan->cfg);
+ 		fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
+-			       ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
++			       ? dwidth
+ 			       : AT_XDMAC_CC_DWIDTH_BYTE;
+ 		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2			/* next descriptor view */
+ 			| AT_XDMAC_MBR_UBC_NDEN					/* next descriptor dst parameter update */
+ 			| AT_XDMAC_MBR_UBC_NSEN					/* next descriptor src parameter update */
+ 			| (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE)		/* descriptor fetch */
+ 			| (len >> fixed_dwidth);				/* microblock length */
++		desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
++				    AT_XDMAC_CC_DWIDTH(fixed_dwidth);
+ 		dev_dbg(chan2dev(chan),
+ 			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
+ 			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 340f9e607cd8..3dabc52b9615 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
+ 			desc->txd.callback = last->txd.callback;
+ 			desc->txd.callback_param = last->txd.callback_param;
+ 		}
+-		last->last = false;
++		desc->last = false;
+ 
+ 		dma_cookie_assign(&desc->txd);
+ 
+@@ -2621,6 +2621,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
+ 		desc->rqcfg.brst_len = 1;
+ 
+ 	desc->rqcfg.brst_len = get_burst_len(desc, len);
++	desc->bytes_requested = len;
+ 
+ 	desc->txd.flags = flags;
+ 
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 778bbb6425b8..b0487c9f018c 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1294,7 +1294,6 @@ retry:
+ 				goto retry;
+ 			}
+ 			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
+-			WARN(1, "fail\n");
+ 
+ 			return -EIO;
+ 		}
+diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
+index 8ae6f7f06b3a..683a9b004c11 100644
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -3190,15 +3190,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
+ #define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
+ 
+ #define I915_READ64_2x32(lower_reg, upper_reg) ({			\
+-		u32 upper = I915_READ(upper_reg);			\
+-		u32 lower = I915_READ(lower_reg);			\
+-		u32 tmp = I915_READ(upper_reg);				\
+-		if (upper != tmp) {					\
+-			upper = tmp;					\
+-			lower = I915_READ(lower_reg);			\
+-			WARN_ON(I915_READ(upper_reg) != upper);		\
+-		}							\
+-		(u64)upper << 32 | lower; })
++	u32 upper, lower, tmp;						\
++	tmp = I915_READ(upper_reg);					\
++	do {								\
++		upper = tmp;						\
++		lower = I915_READ(lower_reg);				\
++		tmp = I915_READ(upper_reg);				\
++	} while (upper != tmp);						\
++	(u64)upper << 32 | lower; })
+ 
+ #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
+ #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
+diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
+index 6377b22269ad..7ee23d1d1e74 100644
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
+ 	}
+ 
+ 	/* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+-	args->phys_swizzle_mode = args->swizzle_mode;
++	if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
++		args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
++	else
++		args->phys_swizzle_mode = args->swizzle_mode;
+ 	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+ 		args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+ 	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
+index 68fd9fc677e3..44480c1b9738 100644
+--- a/drivers/gpu/drm/radeon/dce6_afmt.c
++++ b/drivers/gpu/drm/radeon/dce6_afmt.c
+@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
+ 	struct radeon_device *rdev = encoder->dev->dev_private;
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+-	u32 offset;
+ 
+-	if (!dig || !dig->afmt || !dig->afmt->pin)
++	if (!dig || !dig->afmt || !dig->pin)
+ 		return;
+ 
+-	offset = dig->afmt->offset;
+-
+-	WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
+-	       AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
++	WREG32(AFMT_AUDIO_SRC_CONTROL +  dig->afmt->offset,
++	       AFMT_AUDIO_SRC_SELECT(dig->pin->id));
+ }
+ 
+ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+-		struct drm_connector *connector, struct drm_display_mode *mode)
++				    struct drm_connector *connector,
++				    struct drm_display_mode *mode)
+ {
+ 	struct radeon_device *rdev = encoder->dev->dev_private;
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+-	u32 tmp = 0, offset;
++	u32 tmp = 0;
+ 
+-	if (!dig || !dig->afmt || !dig->afmt->pin)
++	if (!dig || !dig->afmt || !dig->pin)
+ 		return;
+ 
+-	offset = dig->afmt->pin->offset;
+-
+ 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ 		if (connector->latency_present[1])
+ 			tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
+@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
+ 		else
+ 			tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
+ 	}
+-	WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
++	WREG32_ENDPOINT(dig->pin->offset,
++			AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
+ }
+ 
+ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+-	u8 *sadb, int sad_count)
++					     u8 *sadb, int sad_count)
+ {
+ 	struct radeon_device *rdev = encoder->dev->dev_private;
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+-	u32 offset, tmp;
++	u32 tmp;
+ 
+-	if (!dig || !dig->afmt || !dig->afmt->pin)
++	if (!dig || !dig->afmt || !dig->pin)
+ 		return;
+ 
+-	offset = dig->afmt->pin->offset;
+-
+ 	/* program the speaker allocation */
+-	tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
++	tmp = RREG32_ENDPOINT(dig->pin->offset,
++			      AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ 	tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ 	/* set HDMI mode */
+ 	tmp |= HDMI_CONNECTION;
+@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
+ 		tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ 	else
+ 		tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+-	WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
++	WREG32_ENDPOINT(dig->pin->offset,
++			AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+ }
+ 
+ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+-	u8 *sadb, int sad_count)
++					   u8 *sadb, int sad_count)
+ {
+ 	struct radeon_device *rdev = encoder->dev->dev_private;
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+-	u32 offset, tmp;
++	u32 tmp;
+ 
+-	if (!dig || !dig->afmt || !dig->afmt->pin)
++	if (!dig || !dig->afmt || !dig->pin)
+ 		return;
+ 
+-	offset = dig->afmt->pin->offset;
+-
+ 	/* program the speaker allocation */
+-	tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
++	tmp = RREG32_ENDPOINT(dig->pin->offset,
++			      AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ 	tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ 	/* set DP mode */
+ 	tmp |= DP_CONNECTION;
+@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
+ 		tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ 	else
+ 		tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+-	WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
++	WREG32_ENDPOINT(dig->pin->offset,
++			AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+ }
+ 
+ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
+-	struct cea_sad *sads, int sad_count)
++			      struct cea_sad *sads, int sad_count)
+ {
+-	u32 offset;
+ 	int i;
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
+ 		{ AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ 	};
+ 
+-	if (!dig || !dig->afmt || !dig->afmt->pin)
++	if (!dig || !dig->afmt || !dig->pin)
+ 		return;
+ 
+-	offset = dig->afmt->pin->offset;
+-
+ 	for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ 		u32 value = 0;
+ 		u8 stereo_freqs = 0;
+@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
+ 
+ 		value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
+ 
+-		WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
++		WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
+ 	}
+ }
+ 
+@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
+ }
+ 
+ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
+-	struct radeon_crtc *crtc, unsigned int clock)
++			     struct radeon_crtc *crtc, unsigned int clock)
+ {
+ 	/* Two dtos; generally use dto0 for HDMI */
+ 	u32 value = 0;
+@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
+ }
+ 
+ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
+-	struct radeon_crtc *crtc, unsigned int clock)
++			   struct radeon_crtc *crtc, unsigned int clock)
+ {
+ 	/* Two dtos; generally use dto1 for DP */
+ 	u32 value = 0;
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index fa719c53449b..59b3d3221294 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
+ static void radeon_audio_enable(struct radeon_device *rdev,
+ 				struct r600_audio_pin *pin, u8 enable_mask)
+ {
++	struct drm_encoder *encoder;
++	struct radeon_encoder *radeon_encoder;
++	struct radeon_encoder_atom_dig *dig;
++	int pin_count = 0;
++
++	if (!pin)
++		return;
++
++	if (rdev->mode_info.mode_config_initialized) {
++		list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
++			if (radeon_encoder_is_digital(encoder)) {
++				radeon_encoder = to_radeon_encoder(encoder);
++				dig = radeon_encoder->enc_priv;
++				if (dig->pin == pin)
++					pin_count++;
++			}
++		}
++
++		if ((pin_count > 1) && (enable_mask == 0))
++			return;
++	}
++
+ 	if (rdev->audio.funcs->enable)
+ 		rdev->audio.funcs->enable(rdev, pin, enable_mask);
+ }
+@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
+ 
+ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
+ {
+-	struct radeon_encoder *radeon_encoder;
+-	struct drm_connector *connector;
+-	struct radeon_connector *radeon_connector = NULL;
++	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct cea_sad *sads;
+ 	int sad_count;
+ 
+-	list_for_each_entry(connector,
+-		&encoder->dev->mode_config.connector_list, head) {
+-		if (connector->encoder == encoder) {
+-			radeon_connector = to_radeon_connector(connector);
+-			break;
+-		}
+-	}
+-
+-	if (!radeon_connector) {
+-		DRM_ERROR("Couldn't find encoder's connector\n");
++	if (!connector)
+ 		return;
+-	}
+ 
+ 	sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
+ 	if (sad_count <= 0) {
+@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
+ 	}
+ 	BUG_ON(!sads);
+ 
+-	radeon_encoder = to_radeon_encoder(encoder);
+-
+ 	if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
+ 		radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
+ 
+@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
+ 
+ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
+ {
++	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+-	struct drm_connector *connector;
+-	struct radeon_connector *radeon_connector = NULL;
+ 	u8 *sadb = NULL;
+ 	int sad_count;
+ 
+-	list_for_each_entry(connector,
+-			    &encoder->dev->mode_config.connector_list, head) {
+-		if (connector->encoder == encoder) {
+-			radeon_connector = to_radeon_connector(connector);
+-			break;
+-		}
+-	}
+-
+-	if (!radeon_connector) {
+-		DRM_ERROR("Couldn't find encoder's connector\n");
++	if (!connector)
+ 		return;
+-	}
+ 
+-	sad_count = drm_edid_to_speaker_allocation(
+-		radeon_connector_edid(connector), &sadb);
++	sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
++						   &sadb);
+ 	if (sad_count < 0) {
+ 		DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
+ 			  sad_count);
+@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
+ }
+ 
+ static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
+-	struct drm_display_mode *mode)
++					      struct drm_display_mode *mode)
+ {
+-	struct radeon_encoder *radeon_encoder;
+-	struct drm_connector *connector;
+-	struct radeon_connector *radeon_connector = 0;
+-
+-	list_for_each_entry(connector,
+-		&encoder->dev->mode_config.connector_list, head) {
+-		if (connector->encoder == encoder) {
+-			radeon_connector = to_radeon_connector(connector);
+-			break;
+-		}
+-	}
++	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 
+-	if (!radeon_connector) {
+-		DRM_ERROR("Couldn't find encoder's connector\n");
++	if (!connector)
+ 		return;
+-	}
+-
+-	radeon_encoder = to_radeon_encoder(encoder);
+ 
+ 	if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
+ 		radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
+@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
+ }
+ 
+ void radeon_audio_detect(struct drm_connector *connector,
++			 struct drm_encoder *encoder,
+ 			 enum drm_connector_status status)
+ {
+-	struct radeon_device *rdev;
+-	struct radeon_encoder *radeon_encoder;
++	struct drm_device *dev = connector->dev;
++	struct radeon_device *rdev = dev->dev_private;
++	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig;
+ 
+-	if (!connector || !connector->encoder)
++	if (!radeon_audio_chipset_supported(rdev))
+ 		return;
+ 
+-	rdev = connector->encoder->dev->dev_private;
+-
+-	if (!radeon_audio_chipset_supported(rdev))
++	if (!radeon_encoder_is_digital(encoder))
+ 		return;
+ 
+-	radeon_encoder = to_radeon_encoder(connector->encoder);
+ 	dig = radeon_encoder->enc_priv;
+ 
+ 	if (status == connector_status_connected) {
+-		if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+-			radeon_encoder->audio = NULL;
+-			return;
+-		}
+-
+ 		if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ 			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ 
+@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
+ 			radeon_encoder->audio = rdev->audio.hdmi_funcs;
+ 		}
+ 
+-		dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
+-		radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
++		if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
++			if (!dig->pin)
++				dig->pin = radeon_audio_get_pin(encoder);
++			radeon_audio_enable(rdev, dig->pin, 0xf);
++		} else {
++			radeon_audio_enable(rdev, dig->pin, 0);
++			dig->pin = NULL;
++		}
+ 	} else {
+-		radeon_audio_enable(rdev, dig->afmt->pin, 0);
+-		dig->afmt->pin = NULL;
++		radeon_audio_enable(rdev, dig->pin, 0);
++		dig->pin = NULL;
+ 	}
+ }
+ 
+@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
+ }
+ 
+ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
+-	struct drm_display_mode *mode)
++				       struct drm_display_mode *mode)
+ {
+ 	struct radeon_device *rdev = encoder->dev->dev_private;
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+-	struct drm_connector *connector;
+-	struct radeon_connector *radeon_connector = NULL;
++	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ 	u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ 	struct hdmi_avi_infoframe frame;
+ 	int err;
+ 
+-	list_for_each_entry(connector,
+-		&encoder->dev->mode_config.connector_list, head) {
+-		if (connector->encoder == encoder) {
+-			radeon_connector = to_radeon_connector(connector);
+-			break;
+-		}
+-	}
+-
+-	if (!radeon_connector) {
+-		DRM_ERROR("Couldn't find encoder's connector\n");
+-		return -ENOENT;
+-	}
++	if (!connector)
++		return -EINVAL;
+ 
+ 	err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ 	if (err < 0) {
+@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
+ 		return err;
+ 	}
+ 
+-	if (dig && dig->afmt &&
+-		radeon_encoder->audio && radeon_encoder->audio->set_avi_packet)
++	if (dig && dig->afmt && radeon_encoder->audio &&
++	    radeon_encoder->audio->set_avi_packet)
+ 		radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
+ 			buffer, sizeof(buffer));
+ 
+@@ -745,7 +719,7 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
+ }
+ 
+ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+-	struct drm_display_mode *mode)
++				     struct drm_display_mode *mode)
+ {
+ 	struct drm_device *dev = encoder->dev;
+ 	struct radeon_device *rdev = dev->dev_private;
+@@ -756,6 +730,9 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ 	struct radeon_connector_atom_dig *dig_connector =
+ 		radeon_connector->con_priv;
+ 
++	if (!connector)
++		return;
++
+ 	if (!dig || !dig->afmt)
+ 		return;
+ 
+@@ -774,7 +751,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
+ }
+ 
+ void radeon_audio_mode_set(struct drm_encoder *encoder,
+-	struct drm_display_mode *mode)
++			   struct drm_display_mode *mode)
+ {
+ 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
+index 8438304f7139..059cc3012062 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.h
++++ b/drivers/gpu/drm/radeon/radeon_audio.h
+@@ -68,7 +68,8 @@ struct radeon_audio_funcs
+ 
+ int radeon_audio_init(struct radeon_device *rdev);
+ void radeon_audio_detect(struct drm_connector *connector,
+-	enum drm_connector_status status);
++			 struct drm_encoder *encoder,
++			 enum drm_connector_status status);
+ u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
+ 	u32 offset, u32 reg);
+ void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
+diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
+index 3e5f6b71f3ad..c097d3a82bda 100644
+--- a/drivers/gpu/drm/radeon/radeon_combios.c
++++ b/drivers/gpu/drm/radeon/radeon_combios.c
+@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
+ 
+ 			if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
+ 			    (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
++				u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
++
++				if (hss > lvds->native_mode.hdisplay)
++					hss = (10 - 1) * 8;
++
+ 				lvds->native_mode.htotal = lvds->native_mode.hdisplay +
+ 					(RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
+ 				lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
+-					(RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
++					hss;
+ 				lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
+ 					(RBIOS8(tmp + 23) * 8);
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index cebb65e07e1d..94b21ae70ef7 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -1379,8 +1379,16 @@ out:
+ 	/* updated in get modes as well since we need to know if it's analog or digital */
+ 	radeon_connector_update_scratch_regs(connector, ret);
+ 
+-	if (radeon_audio != 0)
+-		radeon_audio_detect(connector, ret);
++	if ((radeon_audio != 0) && radeon_connector->use_digital) {
++		const struct drm_connector_helper_funcs *connector_funcs =
++			connector->helper_private;
++
++		encoder = connector_funcs->best_encoder(connector);
++		if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
++			radeon_connector_get_edid(connector);
++			radeon_audio_detect(connector, encoder, ret);
++		}
++	}
+ 
+ exit:
+ 	pm_runtime_mark_last_busy(connector->dev->dev);
+@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
+ 
+ 	radeon_connector_update_scratch_regs(connector, ret);
+ 
+-	if (radeon_audio != 0)
+-		radeon_audio_detect(connector, ret);
++	if ((radeon_audio != 0) && encoder) {
++		radeon_connector_get_edid(connector);
++		radeon_audio_detect(connector, encoder, ret);
++	}
+ 
+ out:
+ 	pm_runtime_mark_last_busy(connector->dev->dev);
+diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
+index f01c797b78cf..9af2d8398e90 100644
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -237,7 +237,6 @@ struct radeon_afmt {
+ 	int offset;
+ 	bool last_buffer_filled_status;
+ 	int id;
+-	struct r600_audio_pin *pin;
+ };
+ 
+ struct radeon_mode_info {
+@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
+ 	uint8_t backlight_level;
+ 	int panel_mode;
+ 	struct radeon_afmt *afmt;
++	struct r600_audio_pin *pin;
+ 	int active_mst_links;
+ };
+ 
+diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
+index 6153df735e82..08ff89d222e5 100644
+--- a/drivers/hwmon/nct7904.c
++++ b/drivers/hwmon/nct7904.c
+@@ -575,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
+ 	{"nct7904", 0},
+ 	{}
+ };
++MODULE_DEVICE_TABLE(i2c, nct7904_id);
+ 
+ static struct i2c_driver nct7904_driver = {
+ 	.class = I2C_CLASS_HWMON,
+diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
+index a353b7de6d22..bc7eed67998a 100644
+--- a/drivers/input/mouse/alps.c
++++ b/drivers/input/mouse/alps.c
+@@ -20,6 +20,7 @@
+ #include <linux/input/mt.h>
+ #include <linux/serio.h>
+ #include <linux/libps2.h>
++#include <linux/dmi.h>
+ 
+ #include "psmouse.h"
+ #include "alps.h"
+@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
+ #define ALPS_FOUR_BUTTONS	0x40	/* 4 direction button present */
+ #define ALPS_PS2_INTERLEAVED	0x80	/* 3-byte PS/2 packet interleaved with
+ 					   6-byte ALPS packet */
++#define ALPS_DELL		0x100	/* device is a Dell laptop */
+ #define ALPS_BUTTONPAD		0x200	/* device is a clickpad */
+ 
+ static const struct alps_model_info alps_model_data[] = {
+@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
+ 		return;
+ 	}
+ 
+-	/* Non interleaved V2 dualpoint has separate stick button bits */
++	/* Dell non interleaved V2 dualpoint has separate stick button bits */
+ 	if (priv->proto_version == ALPS_PROTO_V2 &&
+-	    priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) {
++	    priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
+ 		left |= packet[0] & 1;
+ 		right |= packet[0] & 2;
+ 		middle |= packet[0] & 4;
+@@ -2542,6 +2544,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
+ 	priv->byte0 = protocol->byte0;
+ 	priv->mask0 = protocol->mask0;
+ 	priv->flags = protocol->flags;
++	if (dmi_name_in_vendors("Dell"))
++		priv->flags |= ALPS_DELL;
+ 
+ 	priv->x_max = 2000;
+ 	priv->y_max = 1400;
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index e8d84566f311..697f34fba06b 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1719,7 +1719,8 @@ static int dm_merge_bvec(struct request_queue *q,
+ 	struct mapped_device *md = q->queuedata;
+ 	struct dm_table *map = dm_get_live_table_fast(md);
+ 	struct dm_target *ti;
+-	sector_t max_sectors, max_size = 0;
++	sector_t max_sectors;
++	int max_size = 0;
+ 
+ 	if (unlikely(!map))
+ 		goto out;
+@@ -1732,18 +1733,10 @@ static int dm_merge_bvec(struct request_queue *q,
+ 	 * Find maximum amount of I/O that won't need splitting
+ 	 */
+ 	max_sectors = min(max_io_len(bvm->bi_sector, ti),
+-			  (sector_t) queue_max_sectors(q));
++			  (sector_t) BIO_MAX_SECTORS);
+ 	max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
+-
+-	/*
+-	 * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
+-	 * to the targets' merge function since it holds sectors not bytes).
+-	 * Just doing this as an interim fix for stable@ because the more
+-	 * comprehensive cleanup of switching to sector_t will impact every
+-	 * DM target that implements a ->merge hook.
+-	 */
+-	if (max_size > INT_MAX)
+-		max_size = INT_MAX;
++	if (max_size < 0)
++		max_size = 0;
+ 
+ 	/*
+ 	 * merge_bvec_fn() returns number of bytes
+@@ -1751,13 +1744,13 @@ static int dm_merge_bvec(struct request_queue *q,
+ 	 * max is precomputed maximal io size
+ 	 */
+ 	if (max_size && ti->type->merge)
+-		max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
++		max_size = ti->type->merge(ti, bvm, biovec, max_size);
+ 	/*
+ 	 * If the target doesn't support merge method and some of the devices
+-	 * provided their merge_bvec method (we know this by looking for the
+-	 * max_hw_sectors that dm_set_device_limits may set), then we can't
+-	 * allow bios with multiple vector entries.  So always set max_size
+-	 * to 0, and the code below allows just one page.
++	 * provided their merge_bvec method (we know this by looking at
++	 * queue_max_hw_sectors), then we can't allow bios with multiple vector
++	 * entries.  So always set max_size to 0, and the code below allows
++	 * just one page.
+ 	 */
+ 	else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
+ 		max_size = 0;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index b9200282fd77..e4621511d118 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -5740,7 +5740,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
+ 	char *ptr;
+ 	int err;
+ 
+-	file = kmalloc(sizeof(*file), GFP_NOIO);
++	file = kzalloc(sizeof(*file), GFP_NOIO);
+ 	if (!file)
+ 		return -ENOMEM;
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index cd7b0c1e882d..5ce3cd5c4e1d 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -1475,6 +1475,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ {
+ 	char b[BDEVNAME_SIZE];
+ 	struct r1conf *conf = mddev->private;
++	unsigned long flags;
+ 
+ 	/*
+ 	 * If it is not operational, then we have already marked it as dead
+@@ -1494,14 +1495,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
+ 		return;
+ 	}
+ 	set_bit(Blocked, &rdev->flags);
++	spin_lock_irqsave(&conf->device_lock, flags);
+ 	if (test_and_clear_bit(In_sync, &rdev->flags)) {
+-		unsigned long flags;
+-		spin_lock_irqsave(&conf->device_lock, flags);
+ 		mddev->degraded++;
+ 		set_bit(Faulty, &rdev->flags);
+-		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 	} else
+ 		set_bit(Faulty, &rdev->flags);
++	spin_unlock_irqrestore(&conf->device_lock, flags);
+ 	/*
+ 	 * if recovery is running, make sure it aborts.
+ 	 */
+@@ -1567,7 +1567,10 @@ static int raid1_spare_active(struct mddev *mddev)
+ 	 * Find all failed disks within the RAID1 configuration
+ 	 * and mark them readable.
+ 	 * Called under mddev lock, so rcu protection not needed.
++	 * device_lock used to avoid races with raid1_end_read_request
++	 * which expects 'In_sync' flags and ->degraded to be consistent.
+ 	 */
++	spin_lock_irqsave(&conf->device_lock, flags);
+ 	for (i = 0; i < conf->raid_disks; i++) {
+ 		struct md_rdev *rdev = conf->mirrors[i].rdev;
+ 		struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
+@@ -1598,7 +1601,6 @@ static int raid1_spare_active(struct mddev *mddev)
+ 			sysfs_notify_dirent_safe(rdev->sysfs_state);
+ 		}
+ 	}
+-	spin_lock_irqsave(&conf->device_lock, flags);
+ 	mddev->degraded -= count;
+ 	spin_unlock_irqrestore(&conf->device_lock, flags);
+ 
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index 7681237fe298..ead543282128 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -1524,12 +1524,11 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
+ 		switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
+ 		case QCA6174_HW_1_0_CHIP_ID_REV:
+ 		case QCA6174_HW_1_1_CHIP_ID_REV:
++		case QCA6174_HW_2_1_CHIP_ID_REV:
++		case QCA6174_HW_2_2_CHIP_ID_REV:
+ 			return 3;
+ 		case QCA6174_HW_1_3_CHIP_ID_REV:
+ 			return 2;
+-		case QCA6174_HW_2_1_CHIP_ID_REV:
+-		case QCA6174_HW_2_2_CHIP_ID_REV:
+-			return 6;
+ 		case QCA6174_HW_3_0_CHIP_ID_REV:
+ 		case QCA6174_HW_3_1_CHIP_ID_REV:
+ 		case QCA6174_HW_3_2_CHIP_ID_REV:
+diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
+index 8882afbef688..6285f46f3ddb 100644
+--- a/drivers/phy/phy-twl4030-usb.c
++++ b/drivers/phy/phy-twl4030-usb.c
+@@ -144,6 +144,16 @@
+ #define PMBR1				0x0D
+ #define GPIO_USB_4PIN_ULPI_2430C	(3 << 0)
+ 
++/*
++ * If VBUS is valid or ID is ground, then we know a
++ * cable is present and we need to be runtime-enabled
++ */
++static inline bool cable_present(enum omap_musb_vbus_id_status stat)
++{
++	return stat == OMAP_MUSB_VBUS_VALID ||
++		stat == OMAP_MUSB_ID_GROUND;
++}
++
+ struct twl4030_usb {
+ 	struct usb_phy		phy;
+ 	struct device		*dev;
+@@ -536,8 +546,10 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
+ 
+ 	mutex_lock(&twl->lock);
+ 	if (status >= 0 && status != twl->linkstat) {
++		status_changed =
++			cable_present(twl->linkstat) !=
++			cable_present(status);
+ 		twl->linkstat = status;
+-		status_changed = true;
+ 	}
+ 	mutex_unlock(&twl->lock);
+ 
+@@ -553,15 +565,11 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
+ 		 * USB_LINK_VBUS state.  musb_hdrc won't care until it
+ 		 * starts to handle softconnect right.
+ 		 */
+-		if ((status == OMAP_MUSB_VBUS_VALID) ||
+-		    (status == OMAP_MUSB_ID_GROUND)) {
+-			if (pm_runtime_suspended(twl->dev))
+-				pm_runtime_get_sync(twl->dev);
++		if (cable_present(status)) {
++			pm_runtime_get_sync(twl->dev);
+ 		} else {
+-			if (pm_runtime_active(twl->dev)) {
+-				pm_runtime_mark_last_busy(twl->dev);
+-				pm_runtime_put_autosuspend(twl->dev);
+-			}
++			pm_runtime_mark_last_busy(twl->dev);
++			pm_runtime_put_autosuspend(twl->dev);
+ 		}
+ 		omap_musb_mailbox(status);
+ 	}
+@@ -766,6 +774,9 @@ static int twl4030_usb_remove(struct platform_device *pdev)
+ 
+ 	/* disable complete OTG block */
+ 	twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
++
++	if (cable_present(twl->linkstat))
++		pm_runtime_put_noidle(twl->dev);
+ 	pm_runtime_mark_last_busy(twl->dev);
+ 	pm_runtime_put(twl->dev);
+ 
+diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
+index 882744852aac..a9aa38903efe 100644
+--- a/drivers/scsi/ipr.c
++++ b/drivers/scsi/ipr.c
+@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
+ {
+ 	struct ipr_trace_entry *trace_entry;
+ 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
++	unsigned int trace_index;
+ 
+-	trace_entry = &ioa_cfg->trace[atomic_add_return
+-			(1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
++	trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
++	trace_entry = &ioa_cfg->trace[trace_index];
+ 	trace_entry->time = jiffies;
+ 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
+ 	trace_entry->type = type;
+@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
+ 
+ static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+ {
++	unsigned int hrrq;
++
+ 	if (ioa_cfg->hrrq_num == 1)
+-		return 0;
+-	else
+-		return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
++		hrrq = 0;
++	else {
++		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
++		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
++	}
++	return hrrq;
+ }
+ 
+ /**
+@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
+ 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+ 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+-	unsigned long hrrq_flags;
++	unsigned long lock_flags;
+ 
+ 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
+ 
+ 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
+ 		scsi_dma_unmap(scsi_cmd);
+ 
+-		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
+ 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ 		scsi_cmd->scsi_done(scsi_cmd);
+-		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
+ 	} else {
+-		spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
++		spin_lock(&ipr_cmd->hrrq->_lock);
+ 		ipr_erp_start(ioa_cfg, ipr_cmd);
+-		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
++		spin_unlock(&ipr_cmd->hrrq->_lock);
++		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ 	}
+ }
+ 
+diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
+index 73790a1d0969..6b97ee45c7b4 100644
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
+ 
+ #define IPR_NUM_TRACE_INDEX_BITS	8
+ #define IPR_NUM_TRACE_ENTRIES		(1 << IPR_NUM_TRACE_INDEX_BITS)
++#define IPR_TRACE_INDEX_MASK		(IPR_NUM_TRACE_ENTRIES - 1)
+ #define IPR_TRACE_SIZE	(sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
+ 	char trace_start[8];
+ #define IPR_TRACE_START_LABEL			"trace"
+diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
+index 9c934e6d2ea1..c61add46b426 100644
+--- a/drivers/staging/lustre/lustre/obdclass/debug.c
++++ b/drivers/staging/lustre/lustre/obdclass/debug.c
+@@ -40,7 +40,7 @@
+ 
+ #define DEBUG_SUBSYSTEM D_OTHER
+ 
+-#include <linux/unaligned/access_ok.h>
++#include <asm/unaligned.h>
+ 
+ #include "../include/obd_support.h"
+ #include "../include/lustre_debug.h"
+diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
+index 15baacb126ad..376e4a0c15c6 100644
+--- a/drivers/staging/vt6655/device_main.c
++++ b/drivers/staging/vt6655/device_main.c
+@@ -1486,8 +1486,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
+ 		}
+ 	}
+ 
+-	if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
+-		if (conf->assoc) {
++	if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
++	    priv->op_mode != NL80211_IFTYPE_AP) {
++		if (conf->assoc && conf->beacon_rate) {
+ 			CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
+ 				       conf->sync_tsf);
+ 
+diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
+index 1d30b0975651..67098a8a7a02 100644
+--- a/drivers/thermal/samsung/exynos_tmu.c
++++ b/drivers/thermal/samsung/exynos_tmu.c
+@@ -1209,6 +1209,8 @@ err_clk_sec:
+ 	if (!IS_ERR(data->clk_sec))
+ 		clk_unprepare(data->clk_sec);
+ err_sensor:
++	if (!IS_ERR_OR_NULL(data->regulator))
++		regulator_disable(data->regulator);
+ 	thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
+ 
+ 	return ret;
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index 74fea4fa41b1..3ad48e1c0c57 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
+ 	},
+ };
+ 
+-module_platform_driver(ci_hdrc_driver);
++static int __init ci_hdrc_platform_register(void)
++{
++	ci_hdrc_host_driver_init();
++	return platform_driver_register(&ci_hdrc_driver);
++}
++module_init(ci_hdrc_platform_register);
++
++static void __exit ci_hdrc_platform_unregister(void)
++{
++	platform_driver_unregister(&ci_hdrc_driver);
++}
++module_exit(ci_hdrc_platform_unregister);
+ 
+ MODULE_ALIAS("platform:ci_hdrc");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
+index 21fe1a314313..2f8af40e87ca 100644
+--- a/drivers/usb/chipidea/host.c
++++ b/drivers/usb/chipidea/host.c
+@@ -237,9 +237,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
+ 	rdrv->name	= "host";
+ 	ci->roles[CI_ROLE_HOST] = rdrv;
+ 
++	return 0;
++}
++
++void ci_hdrc_host_driver_init(void)
++{
+ 	ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
+ 	orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
+ 	ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
+-
+-	return 0;
+ }
+diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
+index 5707bf379bfb..0f12f131bdd3 100644
+--- a/drivers/usb/chipidea/host.h
++++ b/drivers/usb/chipidea/host.h
+@@ -5,6 +5,7 @@
+ 
+ int ci_hdrc_host_init(struct ci_hdrc *ci);
+ void ci_hdrc_host_destroy(struct ci_hdrc *ci);
++void ci_hdrc_host_driver_init(void);
+ 
+ #else
+ 
+@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
+ 
+ }
+ 
++static void ci_hdrc_host_driver_init(void)
++{
++
++}
++
+ #endif
+ 
+ #endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
+diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
+index 6d3eb8b00a48..531861547253 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
+ 			factor = 1000;
+ 		} else {
+ 			ep_desc = &hs_epin_desc;
+-			factor = 125;
++			factor = 8000;
+ 		}
+ 
+ 		/* pre-compute some values for iso_complete() */
+ 		uac2->p_framesize = opts->p_ssize *
+ 				    num_channels(opts->p_chmask);
+ 		rate = opts->p_srate * uac2->p_framesize;
+-		uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor;
++		uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
+ 		uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
+ 					prm->max_psize);
+ 
+diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
+index d69c35558f68..7d69931cf45d 100644
+--- a/drivers/usb/gadget/udc/udc-core.c
++++ b/drivers/usb/gadget/udc/udc-core.c
+@@ -321,6 +321,7 @@ err4:
+ 
+ err3:
+ 	put_device(&udc->dev);
++	device_del(&gadget->dev);
+ 
+ err2:
+ 	put_device(&gadget->dev);
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index 3e442f77a2b9..9a8c936cd42c 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
+ 	int size;
+ 	int i, j, num_ports;
+ 
+-	del_timer_sync(&xhci->cmd_timer);
++	if (timer_pending(&xhci->cmd_timer))
++		del_timer_sync(&xhci->cmd_timer);
+ 
+ 	/* Free the Event Ring Segment Table and the actual Event Ring */
+ 	size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index d095677a0702..b3a0a2275f5a 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
+ 		return 0;
+ 	/* offset in TRBs */
+ 	segment_offset = trb - seg->trbs;
+-	if (segment_offset > TRBS_PER_SEGMENT)
++	if (segment_offset >= TRBS_PER_SEGMENT)
+ 		return 0;
+ 	return seg->dma + (segment_offset * sizeof(*trb));
+ }
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 19b85ee98a72..876423b8892c 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ 	{ USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
+ 	  .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
++	{ USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
++	  .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
+ 	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
+diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
+index 9c63897b3a56..d156545728c2 100644
+--- a/drivers/usb/serial/qcserial.c
++++ b/drivers/usb/serial/qcserial.c
+@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x1199, 0x901c)},	/* Sierra Wireless EM7700 */
+ 	{DEVICE_SWI(0x1199, 0x901f)},	/* Sierra Wireless EM7355 */
+ 	{DEVICE_SWI(0x1199, 0x9040)},	/* Sierra Wireless Modem */
+-	{DEVICE_SWI(0x1199, 0x9041)},	/* Sierra Wireless MC7305/MC7355 */
+ 	{DEVICE_SWI(0x1199, 0x9051)},	/* Netgear AirCard 340U */
+ 	{DEVICE_SWI(0x1199, 0x9053)},	/* Sierra Wireless Modem */
+ 	{DEVICE_SWI(0x1199, 0x9054)},	/* Sierra Wireless Modem */
+@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
+ 	{DEVICE_SWI(0x413c, 0x81a4)},	/* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a8)},	/* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
+ 	{DEVICE_SWI(0x413c, 0x81a9)},	/* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
++	{DEVICE_SWI(0x413c, 0x81b1)},	/* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
+ 
+ 	/* Huawei devices */
+ 	{DEVICE_HWI(0x03f0, 0x581d)},	/* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
+diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
+index 46179a0828eb..07d1ecd564f7 100644
+--- a/drivers/usb/serial/sierra.c
++++ b/drivers/usb/serial/sierra.c
+@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
+ 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ 	},
++	{ USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
+ 	/* AT&T Direct IP LTE modems */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+ 	  .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
+index 89274850741b..4bd23bba816f 100644
+--- a/drivers/xen/gntdev.c
++++ b/drivers/xen/gntdev.c
+@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
+ 
+ 	pr_debug("priv %p\n", priv);
+ 
++	mutex_lock(&priv->lock);
+ 	while (!list_empty(&priv->maps)) {
+ 		map = list_entry(priv->maps.next, struct grant_map, next);
+ 		list_del(&map->next);
+ 		gntdev_put_map(NULL /* already removed */, map);
+ 	}
+ 	WARN_ON(!list_empty(&priv->freeable_maps));
++	mutex_unlock(&priv->lock);
+ 
+ 	if (use_ptemod)
+ 		mmu_notifier_unregister(&priv->mn, priv->mm);
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 039f9c8a95e8..6e13504f736e 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -4397,9 +4397,9 @@ laundromat_main(struct work_struct *laundry)
+ 	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
+ }
+ 
+-static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
++static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
+ {
+-	if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
++	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
+ 		return nfserr_bad_stateid;
+ 	return nfs_ok;
+ }
+@@ -4574,20 +4574,48 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
+ 	return nfs_ok;
+ }
+ 
++static struct file *
++nfs4_find_file(struct nfs4_stid *s, int flags)
++{
++	switch (s->sc_type) {
++	case NFS4_DELEG_STID:
++		if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
++			return NULL;
++		return get_file(s->sc_file->fi_deleg_file);
++	case NFS4_OPEN_STID:
++	case NFS4_LOCK_STID:
++		if (flags & RD_STATE)
++			return find_readable_file(s->sc_file);
++		else
++			return find_writeable_file(s->sc_file);
++		break;
++	}
++
++	return NULL;
++}
++
++static __be32
++nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
++{
++	__be32 status;
++
++	status = nfsd4_check_openowner_confirmed(ols);
++	if (status)
++		return status;
++	return nfs4_check_openmode(ols, flags);
++}
++
+ /*
+-* Checks for stateid operations
+-*/
++ * Checks for stateid operations
++ */
+ __be32
+ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
+ 			   stateid_t *stateid, int flags, struct file **filpp)
+ {
+-	struct nfs4_stid *s;
+-	struct nfs4_ol_stateid *stp = NULL;
+-	struct nfs4_delegation *dp = NULL;
+-	struct svc_fh *current_fh = &cstate->current_fh;
+-	struct inode *ino = d_inode(current_fh->fh_dentry);
++	struct svc_fh *fhp = &cstate->current_fh;
++	struct inode *ino = d_inode(fhp->fh_dentry);
+ 	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+-	struct file *file = NULL;
++	struct nfs4_stid *s;
+ 	__be32 status;
+ 
+ 	if (filpp)
+@@ -4597,60 +4625,39 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
+ 		return nfserr_grace;
+ 
+ 	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+-		return check_special_stateids(net, current_fh, stateid, flags);
++		return check_special_stateids(net, fhp, stateid, flags);
+ 
+ 	status = nfsd4_lookup_stateid(cstate, stateid,
+ 				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
+ 				&s, nn);
+ 	if (status)
+ 		return status;
+-	status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
++	status = check_stateid_generation(stateid, &s->sc_stateid,
++			nfsd4_has_session(cstate));
+ 	if (status)
+ 		goto out;
++
+ 	switch (s->sc_type) {
+ 	case NFS4_DELEG_STID:
+-		dp = delegstateid(s);
+-		status = nfs4_check_delegmode(dp, flags);
+-		if (status)
+-			goto out;
+-		if (filpp) {
+-			file = dp->dl_stid.sc_file->fi_deleg_file;
+-			if (!file) {
+-				WARN_ON_ONCE(1);
+-				status = nfserr_serverfault;
+-				goto out;
+-			}
+-			get_file(file);
+-		}
++		status = nfs4_check_delegmode(delegstateid(s), flags);
+ 		break;
+ 	case NFS4_OPEN_STID:
+ 	case NFS4_LOCK_STID:
+-		stp = openlockstateid(s);
+-		status = nfs4_check_fh(current_fh, stp);
+-		if (status)
+-			goto out;
+-		status = nfsd4_check_openowner_confirmed(stp);
+-		if (status)
+-			goto out;
+-		status = nfs4_check_openmode(stp, flags);
+-		if (status)
+-			goto out;
+-		if (filpp) {
+-			struct nfs4_file *fp = stp->st_stid.sc_file;
+-
+-			if (flags & RD_STATE)
+-				file = find_readable_file(fp);
+-			else
+-				file = find_writeable_file(fp);
+-		}
++		status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
+ 		break;
+ 	default:
+ 		status = nfserr_bad_stateid;
++		break;
++	}
++	if (status)
+ 		goto out;
++	status = nfs4_check_fh(fhp, s);
++
++	if (!status && filpp) {
++		*filpp = nfs4_find_file(s, flags);
++		if (!*filpp)
++			status = nfserr_serverfault;
+ 	}
+-	status = nfs_ok;
+-	if (file)
+-		*filpp = file;
+ out:
+ 	nfs4_put_stid(s);
+ 	return status;
+@@ -4754,7 +4761,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
+ 	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
+ 	if (status)
+ 		return status;
+-	return nfs4_check_fh(current_fh, stp);
++	return nfs4_check_fh(current_fh, &stp->st_stid);
+ }
+ 
+ /* 
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 158badf945df..d4d84451e0e6 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2142,6 +2142,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
+ #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
+ 			      FATTR4_WORD0_RDATTR_ERROR)
+ #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
++#define WORD2_ABSENT_FS_ATTRS 0
+ 
+ #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+ static inline __be32
+@@ -2170,7 +2171,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
+ { return 0; }
+ #endif
+ 
+-static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
++static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
+ {
+ 	/* As per referral draft:  */
+ 	if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
+@@ -2183,6 +2184,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
+ 	}
+ 	*bmval0 &= WORD0_ABSENT_FS_ATTRS;
+ 	*bmval1 &= WORD1_ABSENT_FS_ATTRS;
++	*bmval2 &= WORD2_ABSENT_FS_ATTRS;
+ 	return 0;
+ }
+ 
+@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
+ 	BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
+ 
+ 	if (exp->ex_fslocs.migrated) {
+-		BUG_ON(bmval[2]);
+-		status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
++		status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
+ 		if (status)
+ 			goto out;
+ 	}
+@@ -2290,8 +2291,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
+ 	}
+ 
+ #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
+-	if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) ||
+-			bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
++	if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
++	     bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
+ 		err = security_inode_getsecctx(d_inode(dentry),
+ 						&context, &contextlen);
+ 		contextsupport = (err == 0);
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index 92e48c70f0f0..39ddcaf0918f 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -412,16 +412,36 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
+ 					 unsigned int flags)
+ {
+ 	struct fsnotify_mark *lmark, *mark;
++	LIST_HEAD(to_free);
+ 
++	/*
++	 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
++	 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
++	 * to_free list so we have to use mark_mutex even when accessing that
++	 * list. And freeing mark requires us to drop mark_mutex. So we can
++	 * reliably free only the first mark in the list. That's why we first
++	 * move marks to free to to_free list in one go and then free marks in
++	 * to_free list one by one.
++	 */
+ 	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+ 	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
+-		if (mark->flags & flags) {
+-			fsnotify_get_mark(mark);
+-			fsnotify_destroy_mark_locked(mark, group);
+-			fsnotify_put_mark(mark);
+-		}
++		if (mark->flags & flags)
++			list_move(&mark->g_list, &to_free);
+ 	}
+ 	mutex_unlock(&group->mark_mutex);
++
++	while (1) {
++		mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
++		if (list_empty(&to_free)) {
++			mutex_unlock(&group->mark_mutex);
++			break;
++		}
++		mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
++		fsnotify_get_mark(mark);
++		fsnotify_destroy_mark_locked(mark, group);
++		mutex_unlock(&group->mark_mutex);
++		fsnotify_put_mark(mark);
++	}
+ }
+ 
+ /*
+diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
+index f906a250da6a..9ea70127074d 100644
+--- a/fs/ocfs2/aops.c
++++ b/fs/ocfs2/aops.c
+@@ -686,7 +686,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
+ 
+ 	if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
+ 		u64 s = i_size_read(inode);
+-		sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) +
++		sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
+ 			(do_div(s, osb->s_clustersize) >> 9);
+ 
+ 		ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
+@@ -911,7 +911,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
+ 		BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
+ 
+ 		ret = blkdev_issue_zeroout(osb->sb->s_bdev,
+-				p_cpos << (osb->s_clustersize_bits - 9),
++				(u64)p_cpos << (osb->s_clustersize_bits - 9),
+ 				zero_len_head >> 9, GFP_NOFS, false);
+ 		if (ret < 0)
+ 			mlog_errno(ret);
+diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
+index 8b23aa2f52dd..23157e40dd74 100644
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
+ 	osb->dc_work_sequence = osb->dc_wake_sequence;
+ 
+ 	processed = osb->blocked_lock_count;
+-	while (processed) {
+-		BUG_ON(list_empty(&osb->blocked_lock_list));
+-
++	/*
++	 * blocked lock processing in this loop might call iput which can
++	 * remove items off osb->blocked_lock_list. Downconvert up to
++	 * 'processed' number of locks, but stop short if we had some
++	 * removed in ocfs2_mark_lockres_freeing when downconverting.
++	 */
++	while (processed && !list_empty(&osb->blocked_lock_list)) {
+ 		lockres = list_entry(osb->blocked_lock_list.next,
+ 				     struct ocfs2_lock_res, l_blocked_list);
+ 		list_del_init(&lockres->l_blocked_list);
+diff --git a/fs/signalfd.c b/fs/signalfd.c
+index 7e412ad74836..270221fcef42 100644
+--- a/fs/signalfd.c
++++ b/fs/signalfd.c
+@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
+ 		 * Other callers might not initialize the si_lsb field,
+ 		 * so check explicitly for the right codes here.
+ 		 */
+-		if (kinfo->si_code == BUS_MCEERR_AR ||
+-		    kinfo->si_code == BUS_MCEERR_AO)
++		if (kinfo->si_signo == SIGBUS &&
++		    (kinfo->si_code == BUS_MCEERR_AR ||
++		     kinfo->si_code == BUS_MCEERR_AO))
+ 			err |= __put_user((short) kinfo->si_addr_lsb,
+ 					  &uinfo->ssi_addr_lsb);
+ #endif
+diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
+index 3d4ea7eb2b68..12b75f3ba0a0 100644
+--- a/include/linux/mtd/nand.h
++++ b/include/linux/mtd/nand.h
+@@ -176,17 +176,17 @@ typedef enum {
+ /* Chip may not exist, so silence any errors in scan */
+ #define NAND_SCAN_SILENT_NODEV	0x00040000
+ /*
+- * This option could be defined by controller drivers to protect against
+- * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+- */
+-#define NAND_USE_BOUNCE_BUFFER	0x00080000
+-/*
+  * Autodetect nand buswidth with readid/onfi.
+  * This suppose the driver will configure the hardware in 8 bits mode
+  * when calling nand_scan_ident, and update its configuration
+  * before calling nand_scan_tail.
+  */
+ #define NAND_BUSWIDTH_AUTO      0x00080000
++/*
++ * This option could be defined by controller drivers to protect against
++ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
++ */
++#define NAND_USE_BOUNCE_BUFFER	0x00100000
+ 
+ /* Options set by nand scan */
+ /* Nand scan has allocated controller struct */
+diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
+index efe3443572ba..413417f3707b 100644
+--- a/include/uapi/linux/pci_regs.h
++++ b/include/uapi/linux/pci_regs.h
+@@ -319,6 +319,7 @@
+ #define PCI_MSIX_PBA		8	/* Pending Bit Array offset */
+ #define  PCI_MSIX_PBA_BIR	0x00000007 /* BAR index */
+ #define  PCI_MSIX_PBA_OFFSET	0xfffffff8 /* Offset into specified BAR */
++#define PCI_MSIX_FLAGS_BIRMASK	PCI_MSIX_PBA_BIR /* deprecated */
+ #define PCI_CAP_MSIX_SIZEOF	12	/* size of MSIX registers */
+ 
+ /* MSI-X Table entry format */
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index 3aaea7ffd077..c3fc5c2b63f3 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -143,7 +143,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
+ 		if (!leaf)
+ 			return -ENOMEM;
+ 		INIT_LIST_HEAD(&leaf->msg_list);
+-		info->qsize += sizeof(*leaf);
+ 	}
+ 	leaf->priority = msg->m_type;
+ 	rb_link_node(&leaf->rb_node, parent, p);
+@@ -188,7 +187,6 @@ try_again:
+ 			     "lazy leaf delete!\n");
+ 		rb_erase(&leaf->rb_node, &info->msg_tree);
+ 		if (info->node_cache) {
+-			info->qsize -= sizeof(*leaf);
+ 			kfree(leaf);
+ 		} else {
+ 			info->node_cache = leaf;
+@@ -201,7 +199,6 @@ try_again:
+ 		if (list_empty(&leaf->msg_list)) {
+ 			rb_erase(&leaf->rb_node, &info->msg_tree);
+ 			if (info->node_cache) {
+-				info->qsize -= sizeof(*leaf);
+ 				kfree(leaf);
+ 			} else {
+ 				info->node_cache = leaf;
+@@ -1026,7 +1023,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
+ 		/* Save our speculative allocation into the cache */
+ 		INIT_LIST_HEAD(&new_leaf->msg_list);
+ 		info->node_cache = new_leaf;
+-		info->qsize += sizeof(*new_leaf);
+ 		new_leaf = NULL;
+ 	} else {
+ 		kfree(new_leaf);
+@@ -1133,7 +1129,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
+ 		/* Save our speculative allocation into the cache */
+ 		INIT_LIST_HEAD(&new_leaf->msg_list);
+ 		info->node_cache = new_leaf;
+-		info->qsize += sizeof(*new_leaf);
+ 	} else {
+ 		kfree(new_leaf);
+ 	}
+diff --git a/kernel/signal.c b/kernel/signal.c
+index d51c5ddd855c..0206be728dac 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -2753,12 +2753,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
+ 		 * Other callers might not initialize the si_lsb field,
+ 		 * so check explicitly for the right codes here.
+ 		 */
+-		if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
++		if (from->si_signo == SIGBUS &&
++		    (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
+ 			err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
+ #endif
+ #ifdef SEGV_BNDERR
+-		err |= __put_user(from->si_lower, &to->si_lower);
+-		err |= __put_user(from->si_upper, &to->si_upper);
++		if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
++			err |= __put_user(from->si_lower, &to->si_lower);
++			err |= __put_user(from->si_upper, &to->si_upper);
++		}
+ #endif
+ 		break;
+ 	case __SI_CHLD:
+@@ -3022,7 +3025,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
+ 			int, sig,
+ 			struct compat_siginfo __user *, uinfo)
+ {
+-	siginfo_t info;
++	siginfo_t info = {};
+ 	int ret = copy_siginfo_from_user32(&info, uinfo);
+ 	if (unlikely(ret))
+ 		return ret;
+@@ -3066,7 +3069,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
+ 			int, sig,
+ 			struct compat_siginfo __user *, uinfo)
+ {
+-	siginfo_t info;
++	siginfo_t info = {};
+ 
+ 	if (copy_siginfo_from_user32(&info, uinfo))
+ 		return -EFAULT;
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 5e8eadd71bac..0d024fc8aa8e 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -937,21 +937,17 @@ static unsigned long shrink_page_list(struct list_head *page_list,
+ 		 *
+ 		 * 2) Global reclaim encounters a page, memcg encounters a
+ 		 *    page that is not marked for immediate reclaim or
+-		 *    the caller does not have __GFP_IO. In this case mark
++		 *    the caller does not have __GFP_FS (or __GFP_IO if it's
++		 *    simply going to swap, not to fs). In this case mark
+ 		 *    the page for immediate reclaim and continue scanning.
+ 		 *
+-		 *    __GFP_IO is checked  because a loop driver thread might
++		 *    Require may_enter_fs because we would wait on fs, which
++		 *    may not have submitted IO yet. And the loop driver might
+ 		 *    enter reclaim, and deadlock if it waits on a page for
+ 		 *    which it is needed to do the write (loop masks off
+ 		 *    __GFP_IO|__GFP_FS for this reason); but more thought
+ 		 *    would probably show more reasons.
+ 		 *
+-		 *    Don't require __GFP_FS, since we're not going into the
+-		 *    FS, just waiting on its writeback completion. Worryingly,
+-		 *    ext4 gfs2 and xfs allocate pages with
+-		 *    grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
+-		 *    may_enter_fs here is liable to OOM on them.
+-		 *
+ 		 * 3) memcg encounters a page that is not already marked
+ 		 *    PageReclaim. memcg does not have any dirty pages
+ 		 *    throttling so we could easily OOM just because too many
+@@ -968,7 +964,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
+ 
+ 			/* Case 2 above */
+ 			} else if (global_reclaim(sc) ||
+-			    !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) {
++			    !PageReclaim(page) || !may_enter_fs) {
+ 				/*
+ 				 * This is slightly racy - end_page_writeback()
+ 				 * might have just cleared PageReclaim, then
+diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
+index 1ab3dc9c8f99..7b815bcc8c9b 100644
+--- a/net/bluetooth/smp.c
++++ b/net/bluetooth/smp.c
+@@ -2295,6 +2295,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
+ 		return 1;
+ 
+ 	chan = conn->smp;
++	if (!chan) {
++		BT_ERR("SMP security requested but not available");
++		return 1;
++	}
+ 
+ 	if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
+ 		return 1;
+diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
+index e061355f535f..bf20593d3085 100644
+--- a/sound/firewire/amdtp.c
++++ b/sound/firewire/amdtp.c
+@@ -730,8 +730,9 @@ static void handle_in_packet(struct amdtp_stream *s,
+ 	    s->data_block_counter != UINT_MAX)
+ 		data_block_counter = s->data_block_counter;
+ 
+-	if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) ||
+-	    (s->data_block_counter == UINT_MAX)) {
++	if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
++	     data_block_counter == s->tx_first_dbc) ||
++	    s->data_block_counter == UINT_MAX) {
+ 		lost = false;
+ 	} else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
+ 		lost = data_block_counter != s->data_block_counter;
+diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
+index 8a03a91e728b..25c905537658 100644
+--- a/sound/firewire/amdtp.h
++++ b/sound/firewire/amdtp.h
+@@ -153,6 +153,8 @@ struct amdtp_stream {
+ 
+ 	/* quirk: fixed interval of dbc between previos/current packets. */
+ 	unsigned int tx_dbc_interval;
++	/* quirk: indicate the value of dbc field in a first packet. */
++	unsigned int tx_first_dbc;
+ 
+ 	bool callbacked;
+ 	wait_queue_head_t callback_wait;
+diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
+index 2682e7e3e5c9..c94a432f7cc6 100644
+--- a/sound/firewire/fireworks/fireworks.c
++++ b/sound/firewire/fireworks/fireworks.c
+@@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit,
+ 	err = get_hardware_info(efw);
+ 	if (err < 0)
+ 		goto error;
++	/* AudioFire8 (since 2009) and AudioFirePre8 */
+ 	if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
+ 		efw->is_af9 = true;
++	/* These models uses the same firmware. */
++	if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
++	    entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
++	    entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
++	    entry->model_id == MODEL_GIBSON_RIP ||
++	    entry->model_id == MODEL_GIBSON_GOLDTOP)
++		efw->is_fireworks3 = true;
+ 
+ 	snd_efw_proc_init(efw);
+ 
+diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
+index 4f0201a95222..084d414b228c 100644
+--- a/sound/firewire/fireworks/fireworks.h
++++ b/sound/firewire/fireworks/fireworks.h
+@@ -71,6 +71,7 @@ struct snd_efw {
+ 
+ 	/* for quirks */
+ 	bool is_af9;
++	bool is_fireworks3;
+ 	u32 firmware_version;
+ 
+ 	unsigned int midi_in_ports;
+diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
+index c55db1bddc80..7e353f1f7bff 100644
+--- a/sound/firewire/fireworks/fireworks_stream.c
++++ b/sound/firewire/fireworks/fireworks_stream.c
+@@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
+ 	efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
+ 	/* Fireworks reset dbc at bus reset. */
+ 	efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
++	/*
++	 * But Recent firmwares starts packets with non-zero dbc.
++	 * Driver version 5.7.6 installs firmware version 5.7.3.
++	 */
++	if (efw->is_fireworks3 &&
++	    (efw->firmware_version == 0x5070000 ||
++	     efw->firmware_version == 0x5070300 ||
++	     efw->firmware_version == 0x5080000))
++		efw->tx_stream.tx_first_dbc = 0x02;
+ 	/* AudioFire9 always reports wrong dbs. */
+ 	if (efw->is_af9)
+ 		efw->tx_stream.flags |= CIP_WRONG_DBS;
+diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
+index 50e9dd675579..3a24f7739aaa 100644
+--- a/sound/pci/hda/patch_cirrus.c
++++ b/sound/pci/hda/patch_cirrus.c
+@@ -1001,9 +1001,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
+ 
+ 	spec->spdif_present = spdif_present;
+ 	/* SPDIF TX on/off */
+-	if (spdif_present)
+-		snd_hda_set_pin_ctl(codec, spdif_pin,
+-				    spdif_present ? PIN_OUT : 0);
++	snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
+ 
+ 	cs_automute(codec);
+ }
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 590bcfb0e82f..1e99f075a5ab 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5118,6 +5118,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
++	SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
+diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
+index 477e13d30971..e7ba557979cb 100644
+--- a/sound/soc/codecs/pcm1681.c
++++ b/sound/soc/codecs/pcm1681.c
+@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
+ 
+ 	if (val != -1) {
+ 		regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
+-					PCM1681_DEEMPH_RATE_MASK, val);
++				   PCM1681_DEEMPH_RATE_MASK, val << 3);
+ 		enable = 1;
+ 	} else
+ 		enable = 0;
+diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
+index a984485108cd..f7549cc7ea85 100644
+--- a/sound/soc/codecs/ssm4567.c
++++ b/sound/soc/codecs/ssm4567.c
+@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ 	if (invert_fclk)
+ 		ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
+ 
+-	return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1);
++	return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
++			SSM4567_SAI_CTRL_1_BCLK |
++			SSM4567_SAI_CTRL_1_FSYNC |
++			SSM4567_SAI_CTRL_1_LJ |
++			SSM4567_SAI_CTRL_1_TDM |
++			SSM4567_SAI_CTRL_1_PDM,
++			ctrl1);
+ }
+ 
+ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
+diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
+index 7b50a9d17ec1..edc186908358 100644
+--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
++++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
+@@ -42,6 +42,11 @@
+ #define MIN_FRAGMENT_SIZE (50 * 1024)
+ #define MAX_FRAGMENT_SIZE (1024 * 1024)
+ #define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz)  (((pcm_wd_sz + 15) >> 4) << 1)
++#ifdef CONFIG_PM
++#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
++#else
++#define GET_USAGE_COUNT(dev) 1
++#endif
+ 
+ int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
+ {
+@@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state)
+ 	int ret = 0;
+ 	int usage_count = 0;
+ 
+-#ifdef CONFIG_PM
+-	usage_count = atomic_read(&dev->power.usage_count);
+-#else
+-	usage_count = 1;
+-#endif
+-
+ 	if (state == true) {
+ 		ret = pm_runtime_get_sync(dev);
+-
++		usage_count = GET_USAGE_COUNT(dev);
+ 		dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
+ 		if (ret < 0) {
+ 			dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
+@@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
+ 			}
+ 		}
+ 	} else {
++		usage_count = GET_USAGE_COUNT(dev);
+ 		dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
+ 		return sst_pm_runtime_put(ctx);
+ 	}
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 158204d08924..b6c12dccb259 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1811,6 +1811,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
+ 					   size_t count, loff_t *ppos)
+ {
+ 	struct snd_soc_dapm_widget *w = file->private_data;
++	struct snd_soc_card *card = w->dapm->card;
+ 	char *buf;
+ 	int in, out;
+ 	ssize_t ret;
+@@ -1820,6 +1821,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
+ 	if (!buf)
+ 		return -ENOMEM;
+ 
++	mutex_lock(&card->dapm_mutex);
++
+ 	/* Supply widgets are not handled by is_connected_{input,output}_ep() */
+ 	if (w->is_supply) {
+ 		in = 0;
+@@ -1866,6 +1869,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
+ 					p->sink->name);
+ 	}
+ 
++	mutex_unlock(&card->dapm_mutex);
++
+ 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ 
+ 	kfree(buf);
+@@ -2140,11 +2145,15 @@ static ssize_t dapm_widget_show(struct device *dev,
+ 	struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
+ 	int i, count = 0;
+ 
++	mutex_lock(&rtd->card->dapm_mutex);
++
+ 	for (i = 0; i < rtd->num_codecs; i++) {
+ 		struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
+ 		count += dapm_widget_show_codec(codec, buf + count);
+ 	}
+ 
++	mutex_unlock(&rtd->card->dapm_mutex);
++
+ 	return count;
+ }
+ 
+@@ -3100,16 +3109,10 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
+ 	}
+ 
+ 	prefix = soc_dapm_prefix(dapm);
+-	if (prefix) {
++	if (prefix)
+ 		w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
+-		if (widget->sname)
+-			w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
+-					     widget->sname);
+-	} else {
++	else
+ 		w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
+-		if (widget->sname)
+-			w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
+-	}
+ 	if (w->name == NULL) {
+ 		kfree(w);
+ 		return NULL;
+@@ -3557,7 +3560,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
+ 				break;
+ 			}
+ 
+-			if (!w->sname || !strstr(w->sname, dai_w->name))
++			if (!w->sname || !strstr(w->sname, dai_w->sname))
+ 				continue;
+ 
+ 			if (dai_w->id == snd_soc_dapm_dai_in) {


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-08-12 14:17 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-08-12 14:17 UTC (permalink / raw
  To: gentoo-commits

commit:     bf5ec0ef3757347790f7c0269c3a657e2d1fdd2b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Aug 12 14:17:29 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Aug 12 14:17:29 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bf5ec0ef

kdbus patch update 8-12-2015

 0000_README                                        |    2 +-
 ...s-7-22-2015.patch => 5015_kdbus-8-12-2015.patch | 1265 ++++++++++++--------
 2 files changed, 757 insertions(+), 510 deletions(-)

diff --git a/0000_README b/0000_README
index 148063b..fd7a57d 100644
--- a/0000_README
+++ b/0000_README
@@ -115,6 +115,6 @@ Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.
 
-Patch:  5015_kdbus-7-22-15.patch
+Patch:  5015_kdbus-8-12-2015.patch
 From:   https://lkml.org
 Desc:   Kernel-level IPC implementation

diff --git a/5015_kdbus-7-22-2015.patch b/5015_kdbus-8-12-2015.patch
similarity index 97%
rename from 5015_kdbus-7-22-2015.patch
rename to 5015_kdbus-8-12-2015.patch
index b110b5c..4e018f2 100644
--- a/5015_kdbus-7-22-2015.patch
+++ b/5015_kdbus-8-12-2015.patch
@@ -7482,10 +7482,10 @@ index 1a0006a..4842a98 100644
  header-y += kernelcapi.h
 diff --git a/include/uapi/linux/kdbus.h b/include/uapi/linux/kdbus.h
 new file mode 100644
-index 0000000..ecffc6b
+index 0000000..4fc44cb
 --- /dev/null
 +++ b/include/uapi/linux/kdbus.h
-@@ -0,0 +1,980 @@
+@@ -0,0 +1,984 @@
 +/*
 + * kdbus is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the
@@ -8342,6 +8342,8 @@ index 0000000..ecffc6b
 + * @KDBUS_NAME_QUEUE:			Name should be queued if busy
 + * @KDBUS_NAME_IN_QUEUE:		Name is queued
 + * @KDBUS_NAME_ACTIVATOR:		Name is owned by a activator connection
++ * @KDBUS_NAME_PRIMARY:			Primary owner of the name
++ * @KDBUS_NAME_ACQUIRED:		Name was acquired/queued _now_
 + */
 +enum kdbus_name_flags {
 +	KDBUS_NAME_REPLACE_EXISTING	= 1ULL <<  0,
@@ -8349,6 +8351,8 @@ index 0000000..ecffc6b
 +	KDBUS_NAME_QUEUE		= 1ULL <<  2,
 +	KDBUS_NAME_IN_QUEUE		= 1ULL <<  3,
 +	KDBUS_NAME_ACTIVATOR		= 1ULL <<  4,
++	KDBUS_NAME_PRIMARY		= 1ULL <<  5,
++	KDBUS_NAME_ACQUIRED		= 1ULL <<  6,
 +};
 +
 +/**
@@ -9072,10 +9076,10 @@ index 0000000..a67f825
 +}
 diff --git a/ipc/kdbus/bus.h b/ipc/kdbus/bus.h
 new file mode 100644
-index 0000000..238986e
+index 0000000..8c2acae
 --- /dev/null
 +++ b/ipc/kdbus/bus.h
-@@ -0,0 +1,99 @@
+@@ -0,0 +1,101 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -9122,6 +9126,7 @@ index 0000000..238986e
 + * @domain:		Domain of this bus
 + * @creator:		Creator of the bus
 + * @creator_meta:	Meta information about the bus creator
++ * @last_message_id:	Last used message id
 + * @policy_db:		Policy database for this bus
 + * @name_registry:	Name registry of this bus
 + * @conn_rwlock:	Read/Write lock for all lists of child connections
@@ -9145,6 +9150,7 @@ index 0000000..238986e
 +	struct kdbus_meta_proc *creator_meta;
 +
 +	/* protected by own locks */
++	atomic64_t last_message_id;
 +	struct kdbus_policy_db policy_db;
 +	struct kdbus_name_registry *name_registry;
 +
@@ -9177,10 +9183,10 @@ index 0000000..238986e
 +#endif
 diff --git a/ipc/kdbus/connection.c b/ipc/kdbus/connection.c
 new file mode 100644
-index 0000000..d94b417e
+index 0000000..ef63d65
 --- /dev/null
 +++ b/ipc/kdbus/connection.c
-@@ -0,0 +1,2207 @@
+@@ -0,0 +1,2227 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -9235,7 +9241,8 @@ index 0000000..d94b417e
 +#define KDBUS_CONN_ACTIVE_BIAS	(INT_MIN + 2)
 +#define KDBUS_CONN_ACTIVE_NEW	(INT_MIN + 1)
 +
-+static struct kdbus_conn *kdbus_conn_new(struct kdbus_ep *ep, bool privileged,
++static struct kdbus_conn *kdbus_conn_new(struct kdbus_ep *ep,
++					 struct file *file,
 +					 struct kdbus_cmd_hello *hello,
 +					 const char *name,
 +					 const struct kdbus_creds *creds,
@@ -9255,6 +9262,8 @@ index 0000000..d94b417e
 +	bool is_policy_holder;
 +	bool is_activator;
 +	bool is_monitor;
++	bool privileged;
++	bool owner;
 +	struct kvec kvec;
 +	int ret;
 +
@@ -9264,6 +9273,9 @@ index 0000000..d94b417e
 +		struct kdbus_bloom_parameter bloom;
 +	} bloom_item;
 +
++	privileged = kdbus_ep_is_privileged(ep, file);
++	owner = kdbus_ep_is_owner(ep, file);
++
 +	is_monitor = hello->flags & KDBUS_HELLO_MONITOR;
 +	is_activator = hello->flags & KDBUS_HELLO_ACTIVATOR;
 +	is_policy_holder = hello->flags & KDBUS_HELLO_POLICY_HOLDER;
@@ -9280,9 +9292,9 @@ index 0000000..d94b417e
 +		return ERR_PTR(-EINVAL);
 +	if (is_monitor && ep->user)
 +		return ERR_PTR(-EOPNOTSUPP);
-+	if (!privileged && (is_activator || is_policy_holder || is_monitor))
++	if (!owner && (is_activator || is_policy_holder || is_monitor))
 +		return ERR_PTR(-EPERM);
-+	if ((creds || pids || seclabel) && !privileged)
++	if (!owner && (creds || pids || seclabel))
 +		return ERR_PTR(-EPERM);
 +
 +	ret = kdbus_sanitize_attach_flags(hello->attach_flags_send,
@@ -9306,18 +9318,17 @@ index 0000000..d94b417e
 +#endif
 +	mutex_init(&conn->lock);
 +	INIT_LIST_HEAD(&conn->names_list);
-+	INIT_LIST_HEAD(&conn->names_queue_list);
 +	INIT_LIST_HEAD(&conn->reply_list);
-+	atomic_set(&conn->name_count, 0);
 +	atomic_set(&conn->request_count, 0);
 +	atomic_set(&conn->lost_count, 0);
 +	INIT_DELAYED_WORK(&conn->work, kdbus_reply_list_scan_work);
-+	conn->cred = get_current_cred();
++	conn->cred = get_cred(file->f_cred);
 +	conn->pid = get_pid(task_pid(current));
 +	get_fs_root(current->fs, &conn->root_path);
 +	init_waitqueue_head(&conn->wait);
 +	kdbus_queue_init(&conn->queue);
 +	conn->privileged = privileged;
++	conn->owner = owner;
 +	conn->ep = kdbus_ep_ref(ep);
 +	conn->id = atomic64_inc_return(&bus->domain->last_id);
 +	conn->flags = hello->flags;
@@ -9397,11 +9408,21 @@ index 0000000..d94b417e
 +	 * Note that limits are always accounted against the real UID, not
 +	 * the effective UID (cred->user always points to the accounting of
 +	 * cred->uid, not cred->euid).
++	 * In case the caller is privileged, we allow changing the accounting
++	 * to the faked user.
 +	 */
 +	if (ep->user) {
 +		conn->user = kdbus_user_ref(ep->user);
 +	} else {
-+		conn->user = kdbus_user_lookup(ep->bus->domain, current_uid());
++		kuid_t uid;
++
++		if (conn->meta_fake && uid_valid(conn->meta_fake->uid) &&
++		    conn->privileged)
++			uid = conn->meta_fake->uid;
++		else
++			uid = conn->cred->uid;
++
++		conn->user = kdbus_user_lookup(ep->bus->domain, uid);
 +		if (IS_ERR(conn->user)) {
 +			ret = PTR_ERR(conn->user);
 +			conn->user = NULL;
@@ -9450,7 +9471,6 @@ index 0000000..d94b417e
 +	WARN_ON(delayed_work_pending(&conn->work));
 +	WARN_ON(!list_empty(&conn->queue.msg_list));
 +	WARN_ON(!list_empty(&conn->names_list));
-+	WARN_ON(!list_empty(&conn->names_queue_list));
 +	WARN_ON(!list_empty(&conn->reply_list));
 +
 +	if (conn->user) {
@@ -9784,12 +9804,13 @@ index 0000000..d94b417e
 + */
 +bool kdbus_conn_has_name(struct kdbus_conn *conn, const char *name)
 +{
-+	struct kdbus_name_entry *e;
++	struct kdbus_name_owner *owner;
 +
 +	lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
 +
-+	list_for_each_entry(e, &conn->names_list, conn_entry)
-+		if (strcmp(e->name, name) == 0)
++	list_for_each_entry(owner, &conn->names_list, conn_entry)
++		if (!(owner->flags & KDBUS_NAME_IN_QUEUE) &&
++		    !strcmp(name, owner->name->name))
 +			return true;
 +
 +	return false;
@@ -10218,6 +10239,7 @@ index 0000000..d94b417e
 +			 struct kdbus_conn **out_dst)
 +{
 +	const struct kdbus_msg *msg = staging->msg;
++	struct kdbus_name_owner *owner = NULL;
 +	struct kdbus_name_entry *name = NULL;
 +	struct kdbus_conn *dst = NULL;
 +	int ret;
@@ -10236,7 +10258,9 @@ index 0000000..d94b417e
 +	} else {
 +		name = kdbus_name_lookup_unlocked(bus->name_registry,
 +						  staging->dst_name);
-+		if (!name)
++		if (name)
++			owner = kdbus_name_get_owner(name);
++		if (!owner)
 +			return -ESRCH;
 +
 +		/*
@@ -10248,19 +10272,14 @@ index 0000000..d94b417e
 +		 * owns the given name.
 +		 */
 +		if (msg->dst_id != KDBUS_DST_ID_NAME &&
-+		    msg->dst_id != name->conn->id)
++		    msg->dst_id != owner->conn->id)
 +			return -EREMCHG;
 +
-+		if (!name->conn && name->activator)
-+			dst = kdbus_conn_ref(name->activator);
-+		else
-+			dst = kdbus_conn_ref(name->conn);
-+
 +		if ((msg->flags & KDBUS_MSG_NO_AUTO_START) &&
-+		    kdbus_conn_is_activator(dst)) {
-+			ret = -EADDRNOTAVAIL;
-+			goto error;
-+		}
++		    kdbus_conn_is_activator(owner->conn))
++			return -EADDRNOTAVAIL;
++
++		dst = kdbus_conn_ref(owner->conn);
 +	}
 +
 +	*out_name = name;
@@ -10306,7 +10325,7 @@ index 0000000..d94b417e
 +	mutex_unlock(&dst->lock);
 +
 +	if (!reply) {
-+		ret = -EPERM;
++		ret = -EBADSLT;
 +		goto exit;
 +	}
 +
@@ -10549,7 +10568,7 @@ index 0000000..d94b417e
 +					struct kdbus_conn *whom,
 +					unsigned int access)
 +{
-+	struct kdbus_name_entry *ne;
++	struct kdbus_name_owner *owner;
 +	bool pass = false;
 +	int res;
 +
@@ -10558,10 +10577,14 @@ index 0000000..d94b417e
 +	down_read(&db->entries_rwlock);
 +	mutex_lock(&whom->lock);
 +
-+	list_for_each_entry(ne, &whom->names_list, conn_entry) {
-+		res = kdbus_policy_query_unlocked(db, conn_creds ? : conn->cred,
-+						  ne->name,
-+						  kdbus_strhash(ne->name));
++	list_for_each_entry(owner, &whom->names_list, conn_entry) {
++		if (owner->flags & KDBUS_NAME_IN_QUEUE)
++			continue;
++
++		res = kdbus_policy_query_unlocked(db,
++					conn_creds ? : conn->cred,
++					owner->name->name,
++					kdbus_strhash(owner->name->name));
 +		if (res >= (int)access) {
 +			pass = true;
 +			break;
@@ -10601,7 +10624,7 @@ index 0000000..d94b417e
 +			return false;
 +	}
 +
-+	if (conn->privileged)
++	if (conn->owner)
 +		return true;
 +
 +	res = kdbus_policy_query(&conn->ep->bus->policy_db, conn_creds,
@@ -10631,7 +10654,7 @@ index 0000000..d94b417e
 +					 to, KDBUS_POLICY_TALK))
 +		return false;
 +
-+	if (conn->privileged)
++	if (conn->owner)
 +		return true;
 +	if (uid_eq(conn_creds->euid, to->cred->uid))
 +		return true;
@@ -10750,12 +10773,12 @@ index 0000000..d94b417e
 +/**
 + * kdbus_cmd_hello() - handle KDBUS_CMD_HELLO
 + * @ep:			Endpoint to operate on
-+ * @privileged:		Whether the caller is privileged
++ * @file:		File this connection is opened on
 + * @argp:		Command payload
 + *
 + * Return: NULL or newly created connection on success, ERR_PTR on failure.
 + */
-+struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, bool privileged,
++struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, struct file *file,
 +				   void __user *argp)
 +{
 +	struct kdbus_cmd_hello *cmd;
@@ -10790,7 +10813,7 @@ index 0000000..d94b417e
 +
 +	item_name = argv[1].item ? argv[1].item->str : NULL;
 +
-+	c = kdbus_conn_new(ep, privileged, cmd, item_name,
++	c = kdbus_conn_new(ep, file, cmd, item_name,
 +			   argv[2].item ? &argv[2].item->creds : NULL,
 +			   argv[3].item ? &argv[3].item->pids : NULL,
 +			   argv[4].item ? argv[4].item->str : NULL,
@@ -10879,6 +10902,7 @@ index 0000000..d94b417e
 +	struct kdbus_meta_conn *conn_meta = NULL;
 +	struct kdbus_pool_slice *slice = NULL;
 +	struct kdbus_name_entry *entry = NULL;
++	struct kdbus_name_owner *owner = NULL;
 +	struct kdbus_conn *owner_conn = NULL;
 +	struct kdbus_item *meta_items = NULL;
 +	struct kdbus_info info = {};
@@ -10915,15 +10939,17 @@ index 0000000..d94b417e
 +
 +	if (name) {
 +		entry = kdbus_name_lookup_unlocked(bus->name_registry, name);
-+		if (!entry || !entry->conn ||
++		if (entry)
++			owner = kdbus_name_get_owner(entry);
++		if (!owner ||
 +		    !kdbus_conn_policy_see_name(conn, current_cred(), name) ||
-+		    (cmd->id != 0 && entry->conn->id != cmd->id)) {
++		    (cmd->id != 0 && owner->conn->id != cmd->id)) {
 +			/* pretend a name doesn't exist if you cannot see it */
 +			ret = -ESRCH;
 +			goto exit;
 +		}
 +
-+		owner_conn = kdbus_conn_ref(entry->conn);
++		owner_conn = kdbus_conn_ref(owner->conn);
 +	} else if (cmd->id > 0) {
 +		owner_conn = kdbus_bus_find_conn_by_id(bus, cmd->id);
 +		if (!owner_conn || !kdbus_conn_policy_see(conn, current_cred(),
@@ -11390,10 +11416,10 @@ index 0000000..d94b417e
 +}
 diff --git a/ipc/kdbus/connection.h b/ipc/kdbus/connection.h
 new file mode 100644
-index 0000000..5ee864e
+index 0000000..1ad0820
 --- /dev/null
 +++ b/ipc/kdbus/connection.h
-@@ -0,0 +1,261 @@
+@@ -0,0 +1,260 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -11426,6 +11452,7 @@ index 0000000..5ee864e
 +					 KDBUS_HELLO_POLICY_HOLDER | \
 +					 KDBUS_HELLO_MONITOR)
 +
++struct kdbus_name_entry;
 +struct kdbus_quota;
 +struct kdbus_staging;
 +
@@ -11457,7 +11484,6 @@ index 0000000..5ee864e
 + * @cred:		The credentials of the connection at creation time
 + * @pid:		Pid at creation time
 + * @root_path:		Root path at creation time
-+ * @name_count:		Number of owned well-known names
 + * @request_count:	Number of pending requests issued by this
 + *			connection that are waiting for replies from
 + *			other peers
@@ -11466,10 +11492,10 @@ index 0000000..5ee864e
 + * @queue:		The message queue associated with this connection
 + * @quota:		Array of per-user quota indexed by user->id
 + * @n_quota:		Number of elements in quota array
-+ * @activator_of:	Well-known name entry this connection acts as an
 + * @names_list:		List of well-known names
-+ * @names_queue_list:	Well-known names this connection waits for
-+ * @privileged:		Whether this connection is privileged on the bus
++ * @name_count:		Number of owned well-known names
++ * @privileged:		Whether this connection is privileged on the domain
++ * @owner:		Owned by the same user as the bus owner
 + */
 +struct kdbus_conn {
 +	struct kref kref;
@@ -11497,7 +11523,6 @@ index 0000000..5ee864e
 +	const struct cred *cred;
 +	struct pid *pid;
 +	struct path root_path;
-+	atomic_t name_count;
 +	atomic_t request_count;
 +	atomic_t lost_count;
 +	wait_queue_head_t wait;
@@ -11507,11 +11532,11 @@ index 0000000..5ee864e
 +	unsigned int n_quota;
 +
 +	/* protected by registry->rwlock */
-+	struct kdbus_name_entry *activator_of;
 +	struct list_head names_list;
-+	struct list_head names_queue_list;
++	unsigned int name_count;
 +
 +	bool privileged:1;
++	bool owner:1;
 +};
 +
 +struct kdbus_conn *kdbus_conn_ref(struct kdbus_conn *conn);
@@ -11550,7 +11575,7 @@ index 0000000..5ee864e
 +					const struct kdbus_msg *msg);
 +
 +/* command dispatcher */
-+struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, bool privileged,
++struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, struct file *file,
 +				   void __user *argp);
 +int kdbus_cmd_byebye_unlocked(struct kdbus_conn *conn, void __user *argp);
 +int kdbus_cmd_conn_info(struct kdbus_conn *conn, void __user *argp);
@@ -12042,10 +12067,10 @@ index 0000000..447a2bd
 +#endif
 diff --git a/ipc/kdbus/endpoint.c b/ipc/kdbus/endpoint.c
 new file mode 100644
-index 0000000..977964d
+index 0000000..44e7a20
 --- /dev/null
 +++ b/ipc/kdbus/endpoint.c
-@@ -0,0 +1,275 @@
+@@ -0,0 +1,303 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -12232,6 +12257,34 @@ index 0000000..977964d
 +}
 +
 +/**
++ * kdbus_ep_is_privileged() - check whether a file is privileged
++ * @ep:		endpoint to operate on
++ * @file:	file to test
++ *
++ * Return: True if @file is privileged in the domain of @ep.
++ */
++bool kdbus_ep_is_privileged(struct kdbus_ep *ep, struct file *file)
++{
++	return !ep->user &&
++		file_ns_capable(file, ep->bus->domain->user_namespace,
++				CAP_IPC_OWNER);
++}
++
++/**
++ * kdbus_ep_is_owner() - check whether a file should be treated as bus owner
++ * @ep:		endpoint to operate on
++ * @file:	file to test
++ *
++ * Return: True if @file should be treated as bus owner on @ep
++ */
++bool kdbus_ep_is_owner(struct kdbus_ep *ep, struct file *file)
++{
++	return !ep->user &&
++		(uid_eq(file->f_cred->euid, ep->bus->node.uid) ||
++		 kdbus_ep_is_privileged(ep, file));
++}
++
++/**
 + * kdbus_cmd_ep_make() - handle KDBUS_CMD_ENDPOINT_MAKE
 + * @bus:		bus to operate on
 + * @argp:		command payload
@@ -12323,10 +12376,10 @@ index 0000000..977964d
 +}
 diff --git a/ipc/kdbus/endpoint.h b/ipc/kdbus/endpoint.h
 new file mode 100644
-index 0000000..bc1b94a
+index 0000000..e0da59f
 --- /dev/null
 +++ b/ipc/kdbus/endpoint.h
-@@ -0,0 +1,67 @@
+@@ -0,0 +1,70 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -12390,6 +12443,9 @@ index 0000000..bc1b94a
 +struct kdbus_ep *kdbus_ep_ref(struct kdbus_ep *ep);
 +struct kdbus_ep *kdbus_ep_unref(struct kdbus_ep *ep);
 +
++bool kdbus_ep_is_privileged(struct kdbus_ep *ep, struct file *file);
++bool kdbus_ep_is_owner(struct kdbus_ep *ep, struct file *file);
++
 +struct kdbus_ep *kdbus_cmd_ep_make(struct kdbus_bus *bus, void __user *argp);
 +int kdbus_cmd_ep_update(struct kdbus_ep *ep, void __user *argp);
 +
@@ -12944,10 +13000,10 @@ index 0000000..62f7d6a
 +#endif
 diff --git a/ipc/kdbus/handle.c b/ipc/kdbus/handle.c
 new file mode 100644
-index 0000000..e0e06b0
+index 0000000..fc60932
 --- /dev/null
 +++ b/ipc/kdbus/handle.c
-@@ -0,0 +1,709 @@
+@@ -0,0 +1,691 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -13214,7 +13270,6 @@ index 0000000..e0e06b0
 + * @bus_owner:		bus this handle owns
 + * @ep_owner:		endpoint this handle owns
 + * @conn:		connection this handle owns
-+ * @privileged:		Flag to mark a handle as privileged
 + */
 +struct kdbus_handle {
 +	struct mutex lock;
@@ -13225,8 +13280,6 @@ index 0000000..e0e06b0
 +		struct kdbus_ep *ep_owner;
 +		struct kdbus_conn *conn;
 +	};
-+
-+	bool privileged:1;
 +};
 +
 +static int kdbus_handle_open(struct inode *inode, struct file *file)
@@ -13248,23 +13301,6 @@ index 0000000..e0e06b0
 +	mutex_init(&handle->lock);
 +	handle->type = KDBUS_HANDLE_NONE;
 +
-+	if (node->type == KDBUS_NODE_ENDPOINT) {
-+		struct kdbus_ep *ep = kdbus_ep_from_node(node);
-+		struct kdbus_bus *bus = ep->bus;
-+
-+		/*
-+		 * A connection is privileged if it is opened on an endpoint
-+		 * without custom policy and either:
-+		 *   * the user has CAP_IPC_OWNER in the domain user namespace
-+		 * or
-+		 *   * the callers euid matches the uid of the bus creator
-+		 */
-+		if (!ep->user &&
-+		    (ns_capable(bus->domain->user_namespace, CAP_IPC_OWNER) ||
-+		     uid_eq(file->f_cred->euid, bus->node.uid)))
-+			handle->privileged = true;
-+	}
-+
 +	file->private_data = handle;
 +	ret = 0;
 +
@@ -13356,6 +13392,7 @@ index 0000000..e0e06b0
 +	struct kdbus_handle *handle = file->private_data;
 +	struct kdbus_node *node = file_inode(file)->i_private;
 +	struct kdbus_ep *ep, *file_ep = kdbus_ep_from_node(node);
++	struct kdbus_bus *bus = file_ep->bus;
 +	struct kdbus_conn *conn;
 +	int ret = 0;
 +
@@ -13363,14 +13400,14 @@ index 0000000..e0e06b0
 +		return -ESHUTDOWN;
 +
 +	switch (cmd) {
-+	case KDBUS_CMD_ENDPOINT_MAKE:
++	case KDBUS_CMD_ENDPOINT_MAKE: {
 +		/* creating custom endpoints is a privileged operation */
-+		if (!handle->privileged) {
++		if (!kdbus_ep_is_owner(file_ep, file)) {
 +			ret = -EPERM;
 +			break;
 +		}
 +
-+		ep = kdbus_cmd_ep_make(file_ep->bus, buf);
++		ep = kdbus_cmd_ep_make(bus, buf);
 +		if (IS_ERR_OR_NULL(ep)) {
 +			ret = PTR_ERR_OR_ZERO(ep);
 +			break;
@@ -13379,9 +13416,10 @@ index 0000000..e0e06b0
 +		handle->ep_owner = ep;
 +		ret = KDBUS_HANDLE_EP_OWNER;
 +		break;
++	}
 +
 +	case KDBUS_CMD_HELLO:
-+		conn = kdbus_cmd_hello(file_ep, handle->privileged, buf);
++		conn = kdbus_cmd_hello(file_ep, file, buf);
 +		if (IS_ERR_OR_NULL(conn)) {
 +			ret = PTR_ERR_OR_ZERO(conn);
 +			break;
@@ -13659,7 +13697,7 @@ index 0000000..e0e06b0
 +};
 diff --git a/ipc/kdbus/handle.h b/ipc/kdbus/handle.h
 new file mode 100644
-index 0000000..8a36c05
+index 0000000..5dde2c1
 --- /dev/null
 +++ b/ipc/kdbus/handle.h
 @@ -0,0 +1,103 @@
@@ -13710,7 +13748,7 @@ index 0000000..8a36c05
 + * @argv:		array of items this command supports
 + * @user:		set by parser to user-space location of current command
 + * @cmd:		set by parser to kernel copy of command payload
-+ * @cmd_buf:		512 bytes inline buf to avoid kmalloc() on small cmds
++ * @cmd_buf:		inline buf to avoid kmalloc() on small cmds
 + * @items:		points to item array in @cmd
 + * @items_size:		size of @items in bytes
 + * @is_cmd:		whether this is a command-payload or msg-payload
@@ -13720,7 +13758,7 @@ index 0000000..8a36c05
 + * the object to kdbus_args_parse(). The parser will copy the command payload
 + * into kernel-space and verify the correctness of the data.
 + *
-+ * We use a 512 bytes buffer for small command payloads, to be allocated on
++ * We use a 256 bytes buffer for small command payloads, to be allocated on
 + * stack on syscall entrance.
 + */
 +struct kdbus_args {
@@ -13730,7 +13768,7 @@ index 0000000..8a36c05
 +
 +	struct kdbus_cmd __user *user;
 +	struct kdbus_cmd *cmd;
-+	u8 cmd_buf[512];
++	u8 cmd_buf[256];
 +
 +	struct kdbus_item *items;
 +	size_t items_size;
@@ -14914,7 +14952,7 @@ index 0000000..ceb492f
 +#endif
 diff --git a/ipc/kdbus/message.c b/ipc/kdbus/message.c
 new file mode 100644
-index 0000000..3520f45
+index 0000000..ae565cd
 --- /dev/null
 +++ b/ipc/kdbus/message.c
 @@ -0,0 +1,1040 @@
@@ -15591,7 +15629,7 @@ index 0000000..3520f45
 +	if (!staging)
 +		return ERR_PTR(-ENOMEM);
 +
-+	staging->msg_seqnum = atomic64_inc_return(&bus->domain->last_id);
++	staging->msg_seqnum = atomic64_inc_return(&bus->last_message_id);
 +	staging->n_parts = 0; /* we reserve n_parts, but don't enforce them */
 +	staging->parts = (void *)(staging + 1);
 +
@@ -15806,9 +15844,9 @@ index 0000000..3520f45
 +{
 +	struct kdbus_item *item, *meta_items = NULL;
 +	struct kdbus_pool_slice *slice = NULL;
-+	size_t off, size, msg_size, meta_size;
++	size_t off, size, meta_size;
 +	struct iovec *v;
-+	u64 attach;
++	u64 attach, msg_size;
 +	int ret;
 +
 +	/*
@@ -15840,7 +15878,7 @@ index 0000000..3520f45
 +
 +	/* msg.size */
 +	v->iov_len = sizeof(msg_size);
-+	v->iov_base = &msg_size;
++	v->iov_base = (void __user *)&msg_size;
 +	++v;
 +
 +	/* msg (after msg.size) plus items */
@@ -15857,7 +15895,7 @@ index 0000000..3520f45
 +	if (meta_size > 0) {
 +		/* metadata items */
 +		v->iov_len = meta_size;
-+		v->iov_base = meta_items;
++		v->iov_base = (void __user *)meta_items;
 +		++v;
 +
 +		/* padding after metadata */
@@ -16086,10 +16124,10 @@ index 0000000..298f9c9
 +#endif
 diff --git a/ipc/kdbus/metadata.c b/ipc/kdbus/metadata.c
 new file mode 100644
-index 0000000..d4973a9
+index 0000000..71ca475
 --- /dev/null
 +++ b/ipc/kdbus/metadata.c
-@@ -0,0 +1,1342 @@
+@@ -0,0 +1,1347 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -16695,7 +16733,7 @@ index 0000000..d4973a9
 +static int kdbus_meta_conn_collect_names(struct kdbus_meta_conn *mc,
 +					 struct kdbus_conn *conn)
 +{
-+	const struct kdbus_name_entry *e;
++	const struct kdbus_name_owner *owner;
 +	struct kdbus_item *item;
 +	size_t slen, size;
 +
@@ -16703,9 +16741,11 @@ index 0000000..d4973a9
 +
 +	size = 0;
 +	/* open-code length calculation to avoid final padding */
-+	list_for_each_entry(e, &conn->names_list, conn_entry)
-+		size = KDBUS_ALIGN8(size) + KDBUS_ITEM_HEADER_SIZE +
-+			sizeof(struct kdbus_name) + strlen(e->name) + 1;
++	list_for_each_entry(owner, &conn->names_list, conn_entry)
++		if (!(owner->flags & KDBUS_NAME_IN_QUEUE))
++			size = KDBUS_ALIGN8(size) + KDBUS_ITEM_HEADER_SIZE +
++				sizeof(struct kdbus_name) +
++				strlen(owner->name->name) + 1;
 +
 +	if (!size)
 +		return 0;
@@ -16718,12 +16758,15 @@ index 0000000..d4973a9
 +	mc->owned_names_items = item;
 +	mc->owned_names_size = size;
 +
-+	list_for_each_entry(e, &conn->names_list, conn_entry) {
-+		slen = strlen(e->name) + 1;
++	list_for_each_entry(owner, &conn->names_list, conn_entry) {
++		if (owner->flags & KDBUS_NAME_IN_QUEUE)
++			continue;
++
++		slen = strlen(owner->name->name) + 1;
 +		kdbus_item_set(item, KDBUS_ITEM_OWNED_NAME, NULL,
 +			       sizeof(struct kdbus_name) + slen);
-+		item->name.flags = e->flags;
-+		memcpy(item->name.name, e->name, slen);
++		item->name.flags = owner->flags;
++		memcpy(item->name.name, owner->name->name, slen);
 +		item = KDBUS_ITEM_NEXT(item);
 +	}
 +
@@ -17526,10 +17569,10 @@ index 0000000..dba7cc7
 +#endif
 diff --git a/ipc/kdbus/names.c b/ipc/kdbus/names.c
 new file mode 100644
-index 0000000..057f806
+index 0000000..bf44ca3
 --- /dev/null
 +++ b/ipc/kdbus/names.c
-@@ -0,0 +1,770 @@
+@@ -0,0 +1,854 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -17566,167 +17609,128 @@ index 0000000..057f806
 +#include "notify.h"
 +#include "policy.h"
 +
-+struct kdbus_name_pending {
-+	u64 flags;
-+	struct kdbus_conn *conn;
-+	struct kdbus_name_entry *name;
-+	struct list_head conn_entry;
-+	struct list_head name_entry;
-+};
++#define KDBUS_NAME_SAVED_MASK (KDBUS_NAME_ALLOW_REPLACEMENT |	\
++			       KDBUS_NAME_QUEUE)
 +
-+static int kdbus_name_pending_new(struct kdbus_name_entry *e,
-+				  struct kdbus_conn *conn, u64 flags)
++static bool kdbus_name_owner_is_used(struct kdbus_name_owner *owner)
 +{
-+	struct kdbus_name_pending *p;
-+
-+	kdbus_conn_assert_active(conn);
-+
-+	p = kmalloc(sizeof(*p), GFP_KERNEL);
-+	if (!p)
-+		return -ENOMEM;
-+
-+	p->flags = flags;
-+	p->conn = conn;
-+	p->name = e;
-+	list_add_tail(&p->conn_entry, &conn->names_queue_list);
-+	list_add_tail(&p->name_entry, &e->queue);
-+
-+	return 0;
++	return !list_empty(&owner->name_entry) ||
++	       owner == owner->name->activator;
 +}
 +
-+static void kdbus_name_pending_free(struct kdbus_name_pending *p)
++static struct kdbus_name_owner *
++kdbus_name_owner_new(struct kdbus_conn *conn, struct kdbus_name_entry *name,
++		     u64 flags)
 +{
-+	if (!p)
-+		return;
++	struct kdbus_name_owner *owner;
 +
-+	list_del(&p->name_entry);
-+	list_del(&p->conn_entry);
-+	kfree(p);
-+}
-+
-+static struct kdbus_name_entry *
-+kdbus_name_entry_new(struct kdbus_name_registry *r, u32 hash, const char *name)
-+{
-+	struct kdbus_name_entry *e;
-+	size_t namelen;
++	kdbus_conn_assert_active(conn);
 +
-+	namelen = strlen(name);
++	if (conn->name_count >= KDBUS_CONN_MAX_NAMES)
++		return ERR_PTR(-E2BIG);
 +
-+	e = kmalloc(sizeof(*e) + namelen + 1, GFP_KERNEL);
-+	if (!e)
++	owner = kmalloc(sizeof(*owner), GFP_KERNEL);
++	if (!owner)
 +		return ERR_PTR(-ENOMEM);
 +
-+	e->name_id = ++r->name_seq_last;
-+	e->flags = 0;
-+	e->conn = NULL;
-+	e->activator = NULL;
-+	INIT_LIST_HEAD(&e->queue);
-+	INIT_LIST_HEAD(&e->conn_entry);
-+	hash_add(r->entries_hash, &e->hentry, hash);
-+	memcpy(e->name, name, namelen + 1);
++	owner->flags = flags & KDBUS_NAME_SAVED_MASK;
++	owner->conn = conn;
++	owner->name = name;
++	list_add_tail(&owner->conn_entry, &conn->names_list);
++	INIT_LIST_HEAD(&owner->name_entry);
 +
-+	return e;
++	++conn->name_count;
++	return owner;
 +}
 +
-+static void kdbus_name_entry_free(struct kdbus_name_entry *e)
++static void kdbus_name_owner_free(struct kdbus_name_owner *owner)
 +{
-+	if (!e)
++	if (!owner)
 +		return;
 +
-+	WARN_ON(!list_empty(&e->conn_entry));
-+	WARN_ON(!list_empty(&e->queue));
-+	WARN_ON(e->activator);
-+	WARN_ON(e->conn);
-+
-+	hash_del(&e->hentry);
-+	kfree(e);
++	WARN_ON(kdbus_name_owner_is_used(owner));
++	--owner->conn->name_count;
++	list_del(&owner->conn_entry);
++	kfree(owner);
 +}
 +
-+static void kdbus_name_entry_set_owner(struct kdbus_name_entry *e,
-+				       struct kdbus_conn *conn, u64 flags)
++static struct kdbus_name_owner *
++kdbus_name_owner_find(struct kdbus_name_entry *name, struct kdbus_conn *conn)
 +{
-+	WARN_ON(e->conn);
++	struct kdbus_name_owner *owner;
++
++	/*
++	 * Use conn->names_list over name->queue to make sure boundaries of
++	 * this linear search are controlled by the connection itself.
++	 * Furthermore, this will find normal owners as well as activators
++	 * without any additional code.
++	 */
++	list_for_each_entry(owner, &conn->names_list, conn_entry)
++		if (owner->name == name)
++			return owner;
 +
-+	e->conn = kdbus_conn_ref(conn);
-+	e->flags = flags;
-+	atomic_inc(&conn->name_count);
-+	list_add_tail(&e->conn_entry, &e->conn->names_list);
++	return NULL;
 +}
 +
-+static void kdbus_name_entry_remove_owner(struct kdbus_name_entry *e)
++static bool kdbus_name_entry_is_used(struct kdbus_name_entry *name)
 +{
-+	WARN_ON(!e->conn);
-+
-+	list_del_init(&e->conn_entry);
-+	atomic_dec(&e->conn->name_count);
-+	e->flags = 0;
-+	e->conn = kdbus_conn_unref(e->conn);
++	return !list_empty(&name->queue) || name->activator;
 +}
 +
-+static void kdbus_name_entry_replace_owner(struct kdbus_name_entry *e,
-+					   struct kdbus_conn *conn, u64 flags)
++static struct kdbus_name_owner *
++kdbus_name_entry_first(struct kdbus_name_entry *name)
 +{
-+	if (WARN_ON(!e->conn) || WARN_ON(conn == e->conn))
-+		return;
-+
-+	kdbus_notify_name_change(conn->ep->bus, KDBUS_ITEM_NAME_CHANGE,
-+				 e->conn->id, conn->id,
-+				 e->flags, flags, e->name);
-+	kdbus_name_entry_remove_owner(e);
-+	kdbus_name_entry_set_owner(e, conn, flags);
++	return list_first_entry_or_null(&name->queue, struct kdbus_name_owner,
++					name_entry);
 +}
 +
-+/**
-+ * kdbus_name_is_valid() - check if a name is valid
-+ * @p:			The name to check
-+ * @allow_wildcard:	Whether or not to allow a wildcard name
-+ *
-+ * A name is valid if all of the following criterias are met:
-+ *
-+ *  - The name has two or more elements separated by a period ('.') character.
-+ *  - All elements must contain at least one character.
-+ *  - Each element must only contain the ASCII characters "[A-Z][a-z][0-9]_-"
-+ *    and must not begin with a digit.
-+ *  - The name must not exceed KDBUS_NAME_MAX_LEN.
-+ *  - If @allow_wildcard is true, the name may end on '.*'
-+ */
-+bool kdbus_name_is_valid(const char *p, bool allow_wildcard)
++static struct kdbus_name_entry *
++kdbus_name_entry_new(struct kdbus_name_registry *r, u32 hash,
++		     const char *name_str)
 +{
-+	bool dot, found_dot = false;
-+	const char *q;
++	struct kdbus_name_entry *name;
++	size_t namelen;
 +
-+	for (dot = true, q = p; *q; q++) {
-+		if (*q == '.') {
-+			if (dot)
-+				return false;
++	lockdep_assert_held(&r->rwlock);
 +
-+			found_dot = true;
-+			dot = true;
-+		} else {
-+			bool good;
++	namelen = strlen(name_str);
 +
-+			good = isalpha(*q) || (!dot && isdigit(*q)) ||
-+				*q == '_' || *q == '-' ||
-+				(allow_wildcard && dot &&
-+					*q == '*' && *(q + 1) == '\0');
++	name = kmalloc(sizeof(*name) + namelen + 1, GFP_KERNEL);
++	if (!name)
++		return ERR_PTR(-ENOMEM);
 +
-+			if (!good)
-+				return false;
++	name->name_id = ++r->name_seq_last;
++	name->activator = NULL;
++	INIT_LIST_HEAD(&name->queue);
++	hash_add(r->entries_hash, &name->hentry, hash);
++	memcpy(name->name, name_str, namelen + 1);
 +
-+			dot = false;
-+		}
-+	}
++	return name;
++}
 +
-+	if (q - p > KDBUS_NAME_MAX_LEN)
-+		return false;
++static void kdbus_name_entry_free(struct kdbus_name_entry *name)
++{
++	if (!name)
++		return;
 +
-+	if (dot)
-+		return false;
++	WARN_ON(kdbus_name_entry_is_used(name));
++	hash_del(&name->hentry);
++	kfree(name);
++}
 +
-+	if (!found_dot)
-+		return false;
++static struct kdbus_name_entry *
++kdbus_name_entry_find(struct kdbus_name_registry *r, u32 hash,
++		      const char *name_str)
++{
++	struct kdbus_name_entry *name;
 +
-+	return true;
++	lockdep_assert_held(&r->rwlock);
++
++	hash_for_each_possible(r->entries_hash, name, hentry, hash)
++		if (!strcmp(name->name, name_str))
++			return name;
++
++	return NULL;
 +}
 +
 +/**
@@ -17750,32 +17754,19 @@ index 0000000..057f806
 +}
 +
 +/**
-+ * kdbus_name_registry_free() - drop a name reg's reference
-+ * @reg:		The name registry, may be %NULL
++ * kdbus_name_registry_free() - free name registry
++ * @r:		name registry to free, or NULL
 + *
-+ * Cleanup the name registry's internal structures.
++ * Free a name registry and cleanup all internal objects. This is a no-op if
++ * you pass NULL as registry.
 + */
-+void kdbus_name_registry_free(struct kdbus_name_registry *reg)
++void kdbus_name_registry_free(struct kdbus_name_registry *r)
 +{
-+	if (!reg)
++	if (!r)
 +		return;
 +
-+	WARN_ON(!hash_empty(reg->entries_hash));
-+	kfree(reg);
-+}
-+
-+static struct kdbus_name_entry *
-+kdbus_name_find(struct kdbus_name_registry *reg, u32 hash, const char *name)
-+{
-+	struct kdbus_name_entry *e;
-+
-+	lockdep_assert_held(&reg->rwlock);
-+
-+	hash_for_each_possible(reg->entries_hash, e, hentry, hash)
-+		if (strcmp(e->name, name) == 0)
-+			return e;
-+
-+	return NULL;
++	WARN_ON(!hash_empty(r->entries_hash));
++	kfree(r);
 +}
 +
 +/**
@@ -17792,169 +17783,286 @@ index 0000000..057f806
 +struct kdbus_name_entry *
 +kdbus_name_lookup_unlocked(struct kdbus_name_registry *reg, const char *name)
 +{
-+	return kdbus_name_find(reg, kdbus_strhash(name), name);
++	return kdbus_name_entry_find(reg, kdbus_strhash(name), name);
 +}
 +
-+/**
-+ * kdbus_name_acquire() - acquire a name
-+ * @reg:		The name registry
-+ * @conn:		The connection to pin this entry to
-+ * @name:		The name to acquire
-+ * @flags:		Acquisition flags (KDBUS_NAME_*)
-+ * @return_flags:	Pointer to return flags for the acquired name
-+ *			(KDBUS_NAME_*), may be %NULL
-+ *
-+ * Callers must ensure that @conn is either a privileged bus user or has
-+ * sufficient privileges in the policy-db to own the well-known name @name.
-+ *
-+ * Return: 0 success, negative error number on failure.
-+ */
-+int kdbus_name_acquire(struct kdbus_name_registry *reg,
-+		       struct kdbus_conn *conn, const char *name,
-+		       u64 flags, u64 *return_flags)
++static int kdbus_name_become_activator(struct kdbus_name_owner *owner,
++				       u64 *return_flags)
 +{
-+	struct kdbus_name_entry *e;
-+	u64 rflags = 0;
++	if (kdbus_name_owner_is_used(owner))
++		return -EALREADY;
++	if (owner->name->activator)
++		return -EEXIST;
++
++	owner->name->activator = owner;
++	owner->flags |= KDBUS_NAME_ACTIVATOR;
++
++	if (kdbus_name_entry_first(owner->name)) {
++		owner->flags |= KDBUS_NAME_IN_QUEUE;
++	} else {
++		owner->flags |= KDBUS_NAME_PRIMARY;
++		kdbus_notify_name_change(owner->conn->ep->bus,
++					 KDBUS_ITEM_NAME_ADD,
++					 0, owner->conn->id,
++					 0, owner->flags,
++					 owner->name->name);
++	}
++
++	if (return_flags)
++		*return_flags = owner->flags | KDBUS_NAME_ACQUIRED;
++
++	return 0;
++}
++
++static int kdbus_name_update(struct kdbus_name_owner *owner, u64 flags,
++			     u64 *return_flags)
++{
++	struct kdbus_name_owner *primary, *activator;
++	struct kdbus_name_entry *name;
++	struct kdbus_bus *bus;
++	u64 nflags = 0;
 +	int ret = 0;
-+	u32 hash;
 +
-+	kdbus_conn_assert_active(conn);
++	name = owner->name;
++	bus = owner->conn->ep->bus;
++	primary = kdbus_name_entry_first(name);
++	activator = name->activator;
 +
-+	down_write(&reg->rwlock);
++	/* cannot be activator and acquire a name */
++	if (owner == activator)
++		return -EUCLEAN;
 +
-+	if (!kdbus_conn_policy_own_name(conn, current_cred(), name)) {
-+		ret = -EPERM;
-+		goto exit_unlock;
-+	}
++	/* update saved flags */
++	owner->flags = flags & KDBUS_NAME_SAVED_MASK;
 +
-+	hash = kdbus_strhash(name);
-+	e = kdbus_name_find(reg, hash, name);
-+	if (!e) {
-+		/* claim new name */
++	if (!primary) {
++		/*
++		 * No primary owner (but maybe an activator). Take over the
++		 * name.
++		 */
 +
-+		if (conn->activator_of) {
-+			ret = -EINVAL;
-+			goto exit_unlock;
++		list_add(&owner->name_entry, &name->queue);
++		owner->flags |= KDBUS_NAME_PRIMARY;
++		nflags |= KDBUS_NAME_ACQUIRED;
++
++		/* move messages to new owner on activation */
++		if (activator) {
++			kdbus_conn_move_messages(owner->conn, activator->conn,
++						 name->name_id);
++			kdbus_notify_name_change(bus, KDBUS_ITEM_NAME_CHANGE,
++					activator->conn->id, owner->conn->id,
++					activator->flags, owner->flags,
++					name->name);
++			activator->flags &= ~KDBUS_NAME_PRIMARY;
++			activator->flags |= KDBUS_NAME_IN_QUEUE;
++		} else {
++			kdbus_notify_name_change(bus, KDBUS_ITEM_NAME_ADD,
++						 0, owner->conn->id,
++						 0, owner->flags,
++						 name->name);
 +		}
 +
-+		e = kdbus_name_entry_new(reg, hash, name);
-+		if (IS_ERR(e)) {
-+			ret = PTR_ERR(e);
-+			goto exit_unlock;
-+		}
++	} else if (owner == primary) {
++		/*
++		 * Already the primary owner of the name, flags were already
++		 * updated. Nothing to do.
++		 */
 +
-+		if (kdbus_conn_is_activator(conn)) {
-+			e->activator = kdbus_conn_ref(conn);
-+			conn->activator_of = e;
-+		}
++		owner->flags |= KDBUS_NAME_PRIMARY;
++
++	} else if ((primary->flags & KDBUS_NAME_ALLOW_REPLACEMENT) &&
++		   (flags & KDBUS_NAME_REPLACE_EXISTING)) {
++		/*
++		 * We're not the primary owner but can replace it. Move us
++		 * ahead of the primary owner and acquire the name (possibly
++		 * skipping queued owners ahead of us).
++		 */
++
++		list_del_init(&owner->name_entry);
++		list_add(&owner->name_entry, &name->queue);
++		owner->flags |= KDBUS_NAME_PRIMARY;
++		nflags |= KDBUS_NAME_ACQUIRED;
 +
-+		kdbus_name_entry_set_owner(e, conn, flags);
-+		kdbus_notify_name_change(e->conn->ep->bus, KDBUS_ITEM_NAME_ADD,
-+					 0, e->conn->id, 0, e->flags, e->name);
-+	} else if (e->conn == conn || e == conn->activator_of) {
-+		/* connection already owns that name */
-+		ret = -EALREADY;
-+	} else if (kdbus_conn_is_activator(conn)) {
-+		/* activator claims existing name */
-+
-+		if (conn->activator_of) {
-+			ret = -EINVAL; /* multiple names not allowed */
-+		} else if (e->activator) {
-+			ret = -EEXIST; /* only one activator per name */
++		kdbus_notify_name_change(bus, KDBUS_ITEM_NAME_CHANGE,
++					 primary->conn->id, owner->conn->id,
++					 primary->flags, owner->flags,
++					 name->name);
++
++		/* requeue old primary, or drop if queueing not wanted */
++		if (primary->flags & KDBUS_NAME_QUEUE) {
++			primary->flags &= ~KDBUS_NAME_PRIMARY;
++			primary->flags |= KDBUS_NAME_IN_QUEUE;
 +		} else {
-+			e->activator = kdbus_conn_ref(conn);
-+			conn->activator_of = e;
-+		}
-+	} else if (e->flags & KDBUS_NAME_ACTIVATOR) {
-+		/* claim name of an activator */
-+
-+		kdbus_conn_move_messages(conn, e->activator, 0);
-+		kdbus_name_entry_replace_owner(e, conn, flags);
-+	} else if ((flags & KDBUS_NAME_REPLACE_EXISTING) &&
-+		   (e->flags & KDBUS_NAME_ALLOW_REPLACEMENT)) {
-+		/* claim name of a previous owner */
-+
-+		if (e->flags & KDBUS_NAME_QUEUE) {
-+			/* move owner back to queue if they asked for it */
-+			ret = kdbus_name_pending_new(e, e->conn, e->flags);
-+			if (ret < 0)
-+				goto exit_unlock;
++			list_del_init(&primary->name_entry);
++			kdbus_name_owner_free(primary);
 +		}
 +
-+		kdbus_name_entry_replace_owner(e, conn, flags);
 +	} else if (flags & KDBUS_NAME_QUEUE) {
-+		/* add to waiting-queue of the name */
++		/*
++		 * Name is already occupied and we cannot take it over, but
++		 * queuing is allowed. Put us silently on the queue, if not
++		 * already there.
++		 */
 +
-+		ret = kdbus_name_pending_new(e, conn, flags);
-+		if (ret >= 0)
-+			/* tell the caller that we queued it */
-+			rflags |= KDBUS_NAME_IN_QUEUE;
++		owner->flags |= KDBUS_NAME_IN_QUEUE;
++		if (!kdbus_name_owner_is_used(owner)) {
++			list_add_tail(&owner->name_entry, &name->queue);
++			nflags |= KDBUS_NAME_ACQUIRED;
++		}
++	} else if (kdbus_name_owner_is_used(owner)) {
++		/*
++		 * Already queued on name, but re-queueing was not requested.
++		 * Make sure to unlink it from the name, the caller is
++		 * responsible for releasing it.
++		 */
++
++		list_del_init(&owner->name_entry);
 +	} else {
-+		/* the name is busy, return a failure */
++		/*
++		 * Name is already claimed and queueing is not requested.
++		 * Return error to the caller.
++		 */
++
 +		ret = -EEXIST;
 +	}
 +
-+	if (ret == 0 && return_flags)
-+		*return_flags = rflags;
++	if (return_flags)
++		*return_flags = owner->flags | nflags;
 +
-+exit_unlock:
-+	up_write(&reg->rwlock);
-+	kdbus_notify_flush(conn->ep->bus);
 +	return ret;
 +}
 +
-+static void kdbus_name_release_unlocked(struct kdbus_name_registry *reg,
-+					struct kdbus_name_entry *e)
++int kdbus_name_acquire(struct kdbus_name_registry *reg,
++		       struct kdbus_conn *conn, const char *name_str,
++		       u64 flags, u64 *return_flags)
 +{
-+	struct kdbus_name_pending *p;
++	struct kdbus_name_entry *name = NULL;
++	struct kdbus_name_owner *owner = NULL;
++	u32 hash;
++	int ret;
++
++	kdbus_conn_assert_active(conn);
 +
-+	lockdep_assert_held(&reg->rwlock);
++	down_write(&reg->rwlock);
 +
-+	p = list_first_entry_or_null(&e->queue, struct kdbus_name_pending,
-+				     name_entry);
++	/*
++	 * Verify the connection has access to the name. Do this before testing
++	 * for double-acquisitions and other errors to make sure we do not leak
++	 * information about this name through possible custom endpoints.
++	 */
++	if (!kdbus_conn_policy_own_name(conn, current_cred(), name_str)) {
++		ret = -EPERM;
++		goto exit;
++	}
 +
-+	if (p) {
-+		/* give it to first active waiter in the queue */
-+		kdbus_name_entry_replace_owner(e, p->conn, p->flags);
-+		kdbus_name_pending_free(p);
-+	} else if (e->activator && e->activator != e->conn) {
-+		/* hand it back to an active activator connection */
-+		kdbus_conn_move_messages(e->activator, e->conn, e->name_id);
-+		kdbus_name_entry_replace_owner(e, e->activator,
-+					       KDBUS_NAME_ACTIVATOR);
++	/*
++	 * Lookup the name entry. If it already exists, search for an owner
++	 * entry as we might already own that name. If either does not exist,
++	 * we will allocate a fresh one.
++	 */
++	hash = kdbus_strhash(name_str);
++	name = kdbus_name_entry_find(reg, hash, name_str);
++	if (name) {
++		owner = kdbus_name_owner_find(name, conn);
 +	} else {
-+		/* release the name */
-+		kdbus_notify_name_change(e->conn->ep->bus,
-+					 KDBUS_ITEM_NAME_REMOVE,
-+					 e->conn->id, 0, e->flags, 0, e->name);
-+		kdbus_name_entry_remove_owner(e);
-+		kdbus_name_entry_free(e);
++		name = kdbus_name_entry_new(reg, hash, name_str);
++		if (IS_ERR(name)) {
++			ret = PTR_ERR(name);
++			name = NULL;
++			goto exit;
++		}
++	}
++
++	/* create name owner object if not already queued */
++	if (!owner) {
++		owner = kdbus_name_owner_new(conn, name, flags);
++		if (IS_ERR(owner)) {
++			ret = PTR_ERR(owner);
++			owner = NULL;
++			goto exit;
++		}
++	}
++
++	if (flags & KDBUS_NAME_ACTIVATOR)
++		ret = kdbus_name_become_activator(owner, return_flags);
++	else
++		ret = kdbus_name_update(owner, flags, return_flags);
++	if (ret < 0)
++		goto exit;
++
++exit:
++	if (owner && !kdbus_name_owner_is_used(owner))
++		kdbus_name_owner_free(owner);
++	if (name && !kdbus_name_entry_is_used(name))
++		kdbus_name_entry_free(name);
++	up_write(&reg->rwlock);
++	kdbus_notify_flush(conn->ep->bus);
++	return ret;
++}
++
++static void kdbus_name_release_unlocked(struct kdbus_name_owner *owner)
++{
++	struct kdbus_name_owner *primary, *next;
++	struct kdbus_name_entry *name;
++
++	name = owner->name;
++	primary = kdbus_name_entry_first(name);
++
++	list_del_init(&owner->name_entry);
++	if (owner == name->activator)
++		name->activator = NULL;
++
++	if (!primary || owner == primary) {
++		next = kdbus_name_entry_first(name);
++		if (!next)
++			next = name->activator;
++
++		if (next) {
++			/* hand to next in queue */
++			next->flags &= ~KDBUS_NAME_IN_QUEUE;
++			next->flags |= KDBUS_NAME_PRIMARY;
++			if (next == name->activator)
++				kdbus_conn_move_messages(next->conn,
++							 owner->conn,
++							 name->name_id);
++
++			kdbus_notify_name_change(owner->conn->ep->bus,
++					KDBUS_ITEM_NAME_CHANGE,
++					owner->conn->id, next->conn->id,
++					owner->flags, next->flags,
++					name->name);
++		} else {
++			kdbus_notify_name_change(owner->conn->ep->bus,
++						 KDBUS_ITEM_NAME_REMOVE,
++						 owner->conn->id, 0,
++						 owner->flags, 0,
++						 name->name);
++		}
 +	}
++
++	kdbus_name_owner_free(owner);
++	if (!kdbus_name_entry_is_used(name))
++		kdbus_name_entry_free(name);
 +}
 +
 +static int kdbus_name_release(struct kdbus_name_registry *reg,
 +			      struct kdbus_conn *conn,
-+			      const char *name)
++			      const char *name_str)
 +{
-+	struct kdbus_name_pending *p;
-+	struct kdbus_name_entry *e;
++	struct kdbus_name_owner *owner;
++	struct kdbus_name_entry *name;
 +	int ret = 0;
 +
 +	down_write(&reg->rwlock);
-+	e = kdbus_name_find(reg, kdbus_strhash(name), name);
-+	if (!e) {
-+		ret = -ESRCH;
-+	} else if (e->conn == conn) {
-+		kdbus_name_release_unlocked(reg, e);
++	name = kdbus_name_entry_find(reg, kdbus_strhash(name_str), name_str);
++	if (name) {
++		owner = kdbus_name_owner_find(name, conn);
++		if (owner)
++			kdbus_name_release_unlocked(owner);
++		else
++			ret = -EADDRINUSE;
 +	} else {
-+		ret = -EADDRINUSE;
-+		list_for_each_entry(p, &e->queue, name_entry) {
-+			if (p->conn == conn) {
-+				kdbus_name_pending_free(p);
-+				ret = 0;
-+				break;
-+			}
-+		}
++		ret = -ESRCH;
 +	}
 +	up_write(&reg->rwlock);
 +
@@ -17970,33 +18078,74 @@ index 0000000..057f806
 +void kdbus_name_release_all(struct kdbus_name_registry *reg,
 +			    struct kdbus_conn *conn)
 +{
-+	struct kdbus_name_pending *p;
-+	struct kdbus_conn *activator = NULL;
-+	struct kdbus_name_entry *e;
++	struct kdbus_name_owner *owner;
 +
 +	down_write(&reg->rwlock);
 +
-+	if (conn->activator_of) {
-+		activator = conn->activator_of->activator;
-+		conn->activator_of->activator = NULL;
-+	}
-+
-+	while ((p = list_first_entry_or_null(&conn->names_queue_list,
-+					     struct kdbus_name_pending,
-+					     conn_entry)))
-+		kdbus_name_pending_free(p);
-+	while ((e = list_first_entry_or_null(&conn->names_list,
-+					     struct kdbus_name_entry,
-+					     conn_entry)))
-+		kdbus_name_release_unlocked(reg, e);
++	while ((owner = list_first_entry_or_null(&conn->names_list,
++						 struct kdbus_name_owner,
++						 conn_entry)))
++		kdbus_name_release_unlocked(owner);
 +
 +	up_write(&reg->rwlock);
 +
-+	kdbus_conn_unref(activator);
 +	kdbus_notify_flush(conn->ep->bus);
 +}
 +
 +/**
++ * kdbus_name_is_valid() - check if a name is valid
++ * @p:			The name to check
++ * @allow_wildcard:	Whether or not to allow a wildcard name
++ *
++ * A name is valid if all of the following criterias are met:
++ *
++ *  - The name has two or more elements separated by a period ('.') character.
++ *  - All elements must contain at least one character.
++ *  - Each element must only contain the ASCII characters "[A-Z][a-z][0-9]_-"
++ *    and must not begin with a digit.
++ *  - The name must not exceed KDBUS_NAME_MAX_LEN.
++ *  - If @allow_wildcard is true, the name may end on '.*'
++ */
++bool kdbus_name_is_valid(const char *p, bool allow_wildcard)
++{
++	bool dot, found_dot = false;
++	const char *q;
++
++	for (dot = true, q = p; *q; q++) {
++		if (*q == '.') {
++			if (dot)
++				return false;
++
++			found_dot = true;
++			dot = true;
++		} else {
++			bool good;
++
++			good = isalpha(*q) || (!dot && isdigit(*q)) ||
++				*q == '_' || *q == '-' ||
++				(allow_wildcard && dot &&
++					*q == '*' && *(q + 1) == '\0');
++
++			if (!good)
++				return false;
++
++			dot = false;
++		}
++	}
++
++	if (q - p > KDBUS_NAME_MAX_LEN)
++		return false;
++
++	if (dot)
++		return false;
++
++	if (!found_dot)
++		return false;
++
++	return true;
++}
++
++/**
 + * kdbus_cmd_name_acquire() - handle KDBUS_CMD_NAME_ACQUIRE
 + * @conn:		connection to operate on
 + * @argp:		command payload
@@ -18035,20 +18184,9 @@ index 0000000..057f806
 +		goto exit;
 +	}
 +
-+	/*
-+	 * Do atomic_inc_return here to reserve our slot, then decrement
-+	 * it before returning.
-+	 */
-+	if (atomic_inc_return(&conn->name_count) > KDBUS_CONN_MAX_NAMES) {
-+		ret = -E2BIG;
-+		goto exit_dec;
-+	}
-+
 +	ret = kdbus_name_acquire(conn->ep->bus->name_registry, conn, item_name,
 +				 cmd->flags, &cmd->return_flags);
 +
-+exit_dec:
-+	atomic_dec(&conn->name_count);
 +exit:
 +	return kdbus_args_clear(&args, ret);
 +}
@@ -18091,7 +18229,7 @@ index 0000000..057f806
 +			    struct kdbus_conn *c,
 +			    struct kdbus_pool_slice *slice,
 +			    size_t *pos,
-+			    struct kdbus_name_entry *e,
++			    struct kdbus_name_owner *o,
 +			    bool write)
 +{
 +	struct kvec kvec[4];
@@ -18112,22 +18250,22 @@ index 0000000..057f806
 +		u64 flags;
 +	} h = {};
 +
-+	if (e && !kdbus_conn_policy_see_name_unlocked(conn, current_cred(),
-+						      e->name))
++	if (o && !kdbus_conn_policy_see_name_unlocked(conn, current_cred(),
++						      o->name->name))
 +		return 0;
 +
 +	kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &info.size);
 +
 +	/* append name */
-+	if (e) {
-+		size_t slen = strlen(e->name) + 1;
++	if (o) {
++		size_t slen = strlen(o->name->name) + 1;
 +
 +		h.size = offsetof(struct kdbus_item, name.name) + slen;
 +		h.type = KDBUS_ITEM_OWNED_NAME;
-+		h.flags = e->flags;
++		h.flags = o->flags;
 +
 +		kdbus_kvec_set(&kvec[cnt++], &h, sizeof(h), &info.size);
-+		kdbus_kvec_set(&kvec[cnt++], e->name, slen, &info.size);
++		kdbus_kvec_set(&kvec[cnt++], o->name->name, slen, &info.size);
 +		cnt += !!kdbus_kvec_pad(&kvec[cnt], &info.size);
 +	}
 +
@@ -18157,63 +18295,52 @@ index 0000000..057f806
 +		if (kdbus_conn_is_monitor(c))
 +			continue;
 +
-+		/* skip activators */
-+		if (!(flags & KDBUS_LIST_ACTIVATORS) &&
-+		    kdbus_conn_is_activator(c))
-+			continue;
-+
 +		/* all names the connection owns */
-+		if (flags & (KDBUS_LIST_NAMES | KDBUS_LIST_ACTIVATORS)) {
-+			struct kdbus_name_entry *e;
++		if (flags & (KDBUS_LIST_NAMES |
++			     KDBUS_LIST_ACTIVATORS |
++			     KDBUS_LIST_QUEUED)) {
++			struct kdbus_name_owner *o;
 +
-+			list_for_each_entry(e, &c->names_list, conn_entry) {
-+				struct kdbus_conn *a = e->activator;
++			list_for_each_entry(o, &c->names_list, conn_entry) {
++				if (o->flags & KDBUS_NAME_ACTIVATOR) {
++					if (!(flags & KDBUS_LIST_ACTIVATORS))
++						continue;
 +
-+				if ((flags & KDBUS_LIST_ACTIVATORS) &&
-+				    a && a != c) {
-+					ret = kdbus_list_write(conn, a, slice,
-+							       &p, e, write);
++					ret = kdbus_list_write(conn, c, slice,
++							       &p, o, write);
 +					if (ret < 0) {
 +						mutex_unlock(&c->lock);
 +						return ret;
 +					}
 +
 +					added = true;
-+				}
++				} else if (o->flags & KDBUS_NAME_IN_QUEUE) {
++					if (!(flags & KDBUS_LIST_QUEUED))
++						continue;
 +
-+				if (flags & KDBUS_LIST_NAMES ||
-+				    kdbus_conn_is_activator(c)) {
 +					ret = kdbus_list_write(conn, c, slice,
-+							       &p, e, write);
++							       &p, o, write);
 +					if (ret < 0) {
 +						mutex_unlock(&c->lock);
 +						return ret;
 +					}
 +
 +					added = true;
-+				}
-+			}
-+		}
++				} else if (flags & KDBUS_LIST_NAMES) {
++					ret = kdbus_list_write(conn, c, slice,
++							       &p, o, write);
++					if (ret < 0) {
++						mutex_unlock(&c->lock);
++						return ret;
++					}
 +
-+		/* queue of names the connection is currently waiting for */
-+		if (flags & KDBUS_LIST_QUEUED) {
-+			struct kdbus_name_pending *q;
-+
-+			list_for_each_entry(q, &c->names_queue_list,
-+					    conn_entry) {
-+				ret = kdbus_list_write(conn, c, slice, &p,
-+						       q->name, write);
-+				if (ret < 0) {
-+					mutex_unlock(&c->lock);
-+					return ret;
++					added = true;
 +				}
-+
-+				added = true;
 +			}
 +		}
 +
 +		/* nothing added so far, just add the unique ID */
-+		if (!added && flags & KDBUS_LIST_UNIQUE) {
++		if (!added && (flags & KDBUS_LIST_UNIQUE)) {
 +			ret = kdbus_list_write(conn, c, slice, &p, NULL, write);
 +			if (ret < 0)
 +				return ret;
@@ -18302,10 +18429,10 @@ index 0000000..057f806
 +}
 diff --git a/ipc/kdbus/names.h b/ipc/kdbus/names.h
 new file mode 100644
-index 0000000..3dd2589
+index 0000000..edac59d
 --- /dev/null
 +++ b/ipc/kdbus/names.h
-@@ -0,0 +1,74 @@
+@@ -0,0 +1,105 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -18326,6 +18453,10 @@ index 0000000..3dd2589
 +#include <linux/hashtable.h>
 +#include <linux/rwsem.h>
 +
++struct kdbus_name_entry;
++struct kdbus_name_owner;
++struct kdbus_name_registry;
++
 +/**
 + * struct kdbus_name_registry - names registered for a bus
 + * @entries_hash:	Map of entries
@@ -18340,27 +18471,37 @@ index 0000000..3dd2589
 +
 +/**
 + * struct kdbus_name_entry - well-know name entry
-+ * @name_id:		Sequence number of name entry to be able to uniquely
++ * @name_id:		sequence number of name entry to be able to uniquely
 + *			identify a name over its registration lifetime
-+ * @flags:		KDBUS_NAME_* flags
-+ * @conn:		Connection owning the name
-+ * @activator:		Connection of the activator queuing incoming messages
-+ * @queue:		List of queued connections
-+ * @conn_entry:		Entry in connection
-+ * @hentry:		Entry in registry map
-+ * @name:		The well-known name
++ * @activator:		activator of this name, or NULL
++ * @queue:		list of queued owners
++ * @hentry:		entry in registry map
++ * @name:		well-known name
 + */
 +struct kdbus_name_entry {
 +	u64 name_id;
-+	u64 flags;
-+	struct kdbus_conn *conn;
-+	struct kdbus_conn *activator;
++	struct kdbus_name_owner *activator;
 +	struct list_head queue;
-+	struct list_head conn_entry;
 +	struct hlist_node hentry;
 +	char name[];
 +};
 +
++/**
++ * struct kdbus_name_owner - owner of a well-known name
++ * @flags:		KDBUS_NAME_* flags of this owner
++ * @conn:		connection owning the name
++ * @name:		name that is owned
++ * @conn_entry:		link into @conn
++ * @name_entry:		link into @name
++ */
++struct kdbus_name_owner {
++	u64 flags;
++	struct kdbus_conn *conn;
++	struct kdbus_name_entry *name;
++	struct list_head conn_entry;
++	struct list_head name_entry;
++};
++
 +bool kdbus_name_is_valid(const char *p, bool allow_wildcard);
 +
 +struct kdbus_name_registry *kdbus_name_registry_new(void);
@@ -18379,6 +18520,23 @@ index 0000000..3dd2589
 +int kdbus_cmd_name_release(struct kdbus_conn *conn, void __user *argp);
 +int kdbus_cmd_list(struct kdbus_conn *conn, void __user *argp);
 +
++/**
++ * kdbus_name_get_owner() - get current owner of a name
++ * @name:	name to get current owner of
++ *
++ * This returns a pointer to the current owner of a name (or its activator if
++ * there is no owner). The caller must make sure @name is valid and does not
++ * vanish.
++ *
++ * Return: Pointer to current owner or NULL if there is none.
++ */
++static inline struct kdbus_name_owner *
++kdbus_name_get_owner(struct kdbus_name_entry *name)
++{
++	return list_first_entry_or_null(&name->queue, struct kdbus_name_owner,
++					name_entry) ? : name->activator;
++}
++
 +#endif
 diff --git a/ipc/kdbus/node.c b/ipc/kdbus/node.c
 new file mode 100644
@@ -22164,10 +22322,10 @@ index 0000000..7f3abae
 +#endif /* KDBUS_API_H */
 diff --git a/samples/kdbus/kdbus-workers.c b/samples/kdbus/kdbus-workers.c
 new file mode 100644
-index 0000000..c3ba958
+index 0000000..5a6dfdc
 --- /dev/null
 +++ b/samples/kdbus/kdbus-workers.c
-@@ -0,0 +1,1345 @@
+@@ -0,0 +1,1346 @@
 +/*
 + * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
 + *
@@ -22229,9 +22387,11 @@ index 0000000..c3ba958
 +
 +#include <stdio.h>
 +#include <stdlib.h>
++#include <sys/syscall.h>
 +
 +/* glibc < 2.7 does not ship sys/signalfd.h */
-+#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 7
++/* we require kernels with __NR_memfd_create */
++#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 7 && defined(__NR_memfd_create)
 +
 +#include <ctype.h>
 +#include <errno.h>
@@ -22245,7 +22405,6 @@ index 0000000..c3ba958
 +#include <sys/mman.h>
 +#include <sys/poll.h>
 +#include <sys/signalfd.h>
-+#include <sys/syscall.h>
 +#include <sys/time.h>
 +#include <sys/wait.h>
 +#include <time.h>
@@ -23710,10 +23869,10 @@ index 0000000..ed28cca
 +const char *enum_PAYLOAD(long long id);
 diff --git a/tools/testing/selftests/kdbus/kdbus-test.c b/tools/testing/selftests/kdbus/kdbus-test.c
 new file mode 100644
-index 0000000..db732e5
+index 0000000..db57381
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-test.c
-@@ -0,0 +1,899 @@
+@@ -0,0 +1,905 @@
 +#include <errno.h>
 +#include <stdio.h>
 +#include <string.h>
@@ -23834,6 +23993,12 @@ index 0000000..db732e5
 +		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
 +	},
 +	{
++		.name	= "name-takeover",
++		.desc	= "takeover of names",
++		.func	= kdbus_test_name_takeover,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
 +		.name	= "message-basic",
 +		.desc	= "basic message handling",
 +		.func	= kdbus_test_message_basic,
@@ -24615,10 +24780,10 @@ index 0000000..db732e5
 +}
 diff --git a/tools/testing/selftests/kdbus/kdbus-test.h b/tools/testing/selftests/kdbus/kdbus-test.h
 new file mode 100644
-index 0000000..a5c6ae8
+index 0000000..ee937f9
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-test.h
-@@ -0,0 +1,83 @@
+@@ -0,0 +1,84 @@
 +#ifndef _TEST_KDBUS_H_
 +#define _TEST_KDBUS_H_
 +
@@ -24693,6 +24858,7 @@ index 0000000..a5c6ae8
 +int kdbus_test_name_basic(struct kdbus_test_env *env);
 +int kdbus_test_name_conflict(struct kdbus_test_env *env);
 +int kdbus_test_name_queue(struct kdbus_test_env *env);
++int kdbus_test_name_takeover(struct kdbus_test_env *env);
 +int kdbus_test_policy(struct kdbus_test_env *env);
 +int kdbus_test_policy_ns(struct kdbus_test_env *env);
 +int kdbus_test_policy_priv(struct kdbus_test_env *env);
@@ -24704,10 +24870,10 @@ index 0000000..a5c6ae8
 +#endif /* _TEST_KDBUS_H_ */
 diff --git a/tools/testing/selftests/kdbus/kdbus-util.c b/tools/testing/selftests/kdbus/kdbus-util.c
 new file mode 100644
-index 0000000..a5e54ca
+index 0000000..82fa89b
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-util.c
-@@ -0,0 +1,1611 @@
+@@ -0,0 +1,1612 @@
 +/*
 + * Copyright (C) 2013-2015 Daniel Mack
 + * Copyright (C) 2013-2015 Kay Sievers
@@ -25865,11 +26031,12 @@ index 0000000..a5e54ca
 +			if (item->type == KDBUS_ITEM_OWNED_NAME) {
 +				n = item->name.name;
 +				flags = item->name.flags;
-+			}
 +
-+		kdbus_printf("%8llu flags=0x%08llx conn=0x%08llx '%s'\n",
-+			     name->id, (unsigned long long) flags,
-+			     name->flags, n);
++				kdbus_printf("%8llu flags=0x%08llx conn=0x%08llx '%s'\n",
++					     name->id,
++					     (unsigned long long) flags,
++					     name->flags, n);
++			}
 +	}
 +	kdbus_printf("\n");
 +
@@ -27507,10 +27674,10 @@ index 0000000..762fb30
 +}
 diff --git a/tools/testing/selftests/kdbus/test-chat.c b/tools/testing/selftests/kdbus/test-chat.c
 new file mode 100644
-index 0000000..71a92d8
+index 0000000..41e5b53
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-chat.c
-@@ -0,0 +1,122 @@
+@@ -0,0 +1,124 @@
 +#include <stdio.h>
 +#include <string.h>
 +#include <time.h>
@@ -27554,8 +27721,10 @@ index 0000000..71a92d8
 +	ret = kdbus_name_acquire(conn_a, "foo.bar.double", NULL);
 +	ASSERT_RETURN(ret == 0);
 +
-+	ret = kdbus_name_acquire(conn_a, "foo.bar.double", NULL);
-+	ASSERT_RETURN(ret == -EALREADY);
++	flags = 0;
++	ret = kdbus_name_acquire(conn_a, "foo.bar.double", &flags);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(!(flags & KDBUS_NAME_ACQUIRED));
 +
 +	ret = kdbus_name_release(conn_a, "foo.bar.double");
 +	ASSERT_RETURN(ret == 0);
@@ -29979,7 +30148,7 @@ index 0000000..2360dc1
 +}
 diff --git a/tools/testing/selftests/kdbus/test-message.c b/tools/testing/selftests/kdbus/test-message.c
 new file mode 100644
-index 0000000..ddc1e0a
+index 0000000..563dc85
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-message.c
 @@ -0,0 +1,734 @@
@@ -30060,7 +30229,7 @@ index 0000000..ddc1e0a
 +
 +	/* Faked replies with a valid reply cookie are rejected */
 +	ret = kdbus_msg_send_reply(conn, time(NULL) ^ cookie, sender->id);
-+	ASSERT_RETURN(ret == -EPERM);
++	ASSERT_RETURN(ret == -EBADSLT);
 +
 +	ret = kdbus_free(conn, offset);
 +	ASSERT_RETURN(ret == 0);
@@ -31407,10 +31576,10 @@ index 0000000..e00d738
 +}
 diff --git a/tools/testing/selftests/kdbus/test-names.c b/tools/testing/selftests/kdbus/test-names.c
 new file mode 100644
-index 0000000..66ebb47
+index 0000000..e400dc8
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-names.c
-@@ -0,0 +1,194 @@
+@@ -0,0 +1,272 @@
 +#include <stdio.h>
 +#include <string.h>
 +#include <time.h>
@@ -31430,39 +31599,68 @@ index 0000000..66ebb47
 +#include "kdbus-enum.h"
 +#include "kdbus-test.h"
 +
-+static int conn_is_name_owner(const struct kdbus_conn *conn,
-+			      const char *needle)
++struct test_name {
++	const char *name;
++	__u64 owner_id;
++	__u64 flags;
++};
++
++static bool conn_test_names(const struct kdbus_conn *conn,
++			    const struct test_name *tests,
++			    unsigned int n_tests)
 +{
-+	struct kdbus_cmd_list cmd_list = { .size = sizeof(cmd_list) };
++	struct kdbus_cmd_list cmd_list = {};
 +	struct kdbus_info *name, *list;
-+	bool found = false;
++	unsigned int i;
 +	int ret;
 +
-+	cmd_list.flags = KDBUS_LIST_NAMES;
++	cmd_list.size = sizeof(cmd_list);
++	cmd_list.flags = KDBUS_LIST_NAMES |
++			 KDBUS_LIST_ACTIVATORS |
++			 KDBUS_LIST_QUEUED;
 +
 +	ret = kdbus_cmd_list(conn->fd, &cmd_list);
 +	ASSERT_RETURN(ret == 0);
 +
 +	list = (struct kdbus_info *)(conn->buf + cmd_list.offset);
-+	KDBUS_FOREACH(name, list, cmd_list.list_size) {
-+		struct kdbus_item *item;
-+		const char *n = NULL;
 +
-+		KDBUS_ITEM_FOREACH(item, name, items)
-+			if (item->type == KDBUS_ITEM_OWNED_NAME)
-+				n = item->name.name;
++	for (i = 0; i < n_tests; i++) {
++		const struct test_name *t = tests + i;
++		bool found = false;
 +
-+		if (name->id == conn->id &&
-+		    n && strcmp(needle, n) == 0) {
-+			found = true;
-+			break;
++		KDBUS_FOREACH(name, list, cmd_list.list_size) {
++			struct kdbus_item *item;
++
++			KDBUS_ITEM_FOREACH(item, name, items) {
++				if (item->type != KDBUS_ITEM_OWNED_NAME ||
++				    strcmp(item->name.name, t->name) != 0)
++					continue;
++
++				if (t->owner_id == name->id &&
++				    t->flags == item->name.flags) {
++					found = true;
++					break;
++				}
++			}
 +		}
++
++		if (!found)
++			return false;
 +	}
 +
-+	ret = kdbus_free(conn, cmd_list.offset);
-+	ASSERT_RETURN(ret == 0);
++	return true;
++}
++
++static bool conn_is_name_primary_owner(const struct kdbus_conn *conn,
++				       const char *needle)
++{
++	struct test_name t = {
++		.name = needle,
++		.owner_id = conn->id,
++		.flags = KDBUS_NAME_PRIMARY,
++	};
 +
-+	return found ? 0 : -1;
++	return conn_test_names(conn, &t, 1);
 +}
 +
 +int kdbus_test_name_basic(struct kdbus_test_env *env)
@@ -31498,15 +31696,15 @@ index 0000000..66ebb47
 +	ret = kdbus_name_acquire(env->conn, name, NULL);
 +	ASSERT_RETURN(ret == 0);
 +
-+	ret = conn_is_name_owner(env->conn, name);
-+	ASSERT_RETURN(ret == 0);
++	ret = conn_is_name_primary_owner(env->conn, name);
++	ASSERT_RETURN(ret == true);
 +
 +	/* ... and release it again */
 +	ret = kdbus_name_release(env->conn, name);
 +	ASSERT_RETURN(ret == 0);
 +
-+	ret = conn_is_name_owner(env->conn, name);
-+	ASSERT_RETURN(ret != 0);
++	ret = conn_is_name_primary_owner(env->conn, name);
++	ASSERT_RETURN(ret == false);
 +
 +	/* check that we can't release it again */
 +	ret = kdbus_name_release(env->conn, name);
@@ -31548,12 +31746,8 @@ index 0000000..66ebb47
 +	ret = kdbus_name_acquire(env->conn, name, NULL);
 +	ASSERT_RETURN(ret == 0);
 +
-+	ret = conn_is_name_owner(env->conn, name);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* check that we can't acquire it again from the 1st connection */
-+	ret = kdbus_name_acquire(env->conn, name, NULL);
-+	ASSERT_RETURN(ret == -EALREADY);
++	ret = conn_is_name_primary_owner(env->conn, name);
++	ASSERT_RETURN(ret == true);
 +
 +	/* check that we also can't acquire it again from the 2nd connection */
 +	ret = kdbus_name_acquire(conn, name, NULL);
@@ -31567,13 +31761,14 @@ index 0000000..66ebb47
 +int kdbus_test_name_queue(struct kdbus_test_env *env)
 +{
 +	struct kdbus_conn *conn;
++	struct test_name t[2];
 +	const char *name;
 +	uint64_t flags;
 +	int ret;
 +
 +	name = "foo.bla.blaz";
 +
-+	flags = KDBUS_NAME_ALLOW_REPLACEMENT;
++	flags = 0;
 +
 +	/* create a 2nd connection */
 +	conn = kdbus_hello(env->buspath, 0, NULL, 0);
@@ -31584,8 +31779,8 @@ index 0000000..66ebb47
 +	ret = kdbus_name_acquire(env->conn, name, &flags);
 +	ASSERT_RETURN(ret == 0);
 +
-+	ret = conn_is_name_owner(env->conn, name);
-+	ASSERT_RETURN(ret == 0);
++	ret = conn_is_name_primary_owner(env->conn, name);
++	ASSERT_RETURN(ret == true);
 +
 +	/* queue the 2nd connection as waiting owner */
 +	flags = KDBUS_NAME_QUEUE;
@@ -31593,13 +31788,65 @@ index 0000000..66ebb47
 +	ASSERT_RETURN(ret == 0);
 +	ASSERT_RETURN(flags & KDBUS_NAME_IN_QUEUE);
 +
++	t[0].name = name;
++	t[0].owner_id = env->conn->id;
++	t[0].flags = KDBUS_NAME_PRIMARY;
++	t[1].name = name;
++	t[1].owner_id = conn->id;
++	t[1].flags = KDBUS_NAME_QUEUE | KDBUS_NAME_IN_QUEUE;
++	ret = conn_test_names(conn, t, 2);
++	ASSERT_RETURN(ret == true);
++
 +	/* release name from 1st connection */
 +	ret = kdbus_name_release(env->conn, name);
 +	ASSERT_RETURN(ret == 0);
 +
 +	/* now the name should be owned by the 2nd connection */
-+	ret = conn_is_name_owner(conn, name);
++	t[0].name = name;
++	t[0].owner_id = conn->id;
++	t[0].flags = KDBUS_NAME_PRIMARY | KDBUS_NAME_QUEUE;
++	ret = conn_test_names(conn, t, 1);
++	ASSERT_RETURN(ret == true);
++
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
++
++int kdbus_test_name_takeover(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn;
++	struct test_name t;
++	const char *name;
++	uint64_t flags;
++	int ret;
++
++	name = "foo.bla.blaz";
++
++	flags = KDBUS_NAME_ALLOW_REPLACEMENT;
++
++	/* create a 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++
++	/* acquire name for 1st connection */
++	ret = kdbus_name_acquire(env->conn, name, &flags);
++	ASSERT_RETURN(ret == 0);
++
++	t.name = name;
++	t.owner_id = env->conn->id;
++	t.flags = KDBUS_NAME_ALLOW_REPLACEMENT | KDBUS_NAME_PRIMARY;
++	ret = conn_test_names(conn, &t, 1);
++	ASSERT_RETURN(ret == true);
++
++	/* now steal name with 2nd connection */
++	flags = KDBUS_NAME_REPLACE_EXISTING;
++	ret = kdbus_name_acquire(conn, name, &flags);
 +	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(flags & KDBUS_NAME_ACQUIRED);
++
++	ret = conn_is_name_primary_owner(conn, name);
++	ASSERT_RETURN(ret == true);
 +
 +	kdbus_conn_free(conn);
 +
@@ -33622,7 +33869,7 @@ index 0000000..96d20d5
 +}
 diff --git a/tools/testing/selftests/kdbus/test-sync.c b/tools/testing/selftests/kdbus/test-sync.c
 new file mode 100644
-index 0000000..e2be910
+index 0000000..0655a54
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-sync.c
 @@ -0,0 +1,369 @@
@@ -33863,7 +34110,7 @@ index 0000000..e2be910
 +
 +	/* using an unknown cookie must fail */
 +	ret = kdbus_msg_send_reply(conn_a, ~cookie, conn_b->id);
-+	if (ret != -EPERM) {
++	if (ret != -EBADSLT) {
 +		status = TEST_ERR;
 +		goto exit_thread;
 +	}


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-08-10 23:42 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-08-10 23:42 UTC (permalink / raw
  To: gentoo-commits

commit:     570e61538e27bd5acd3e23c3b24f1f2bd4b41e19
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Aug 10 23:42:16 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Aug 10 23:42:16 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=570e6153

Linux patch 4.1.5

 0000_README            |    4 +
 1004_linux-4.1.5.patch | 5750 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 5754 insertions(+)

diff --git a/0000_README b/0000_README
index ceda226..148063b 100644
--- a/0000_README
+++ b/0000_README
@@ -59,6 +59,10 @@ Patch:  1003_linux-4.1.4.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.4
 
+Patch:  1004_linux-4.1.5.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.5
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1004_linux-4.1.5.patch b/1004_linux-4.1.5.patch
new file mode 100644
index 0000000..ed56bc8
--- /dev/null
+++ b/1004_linux-4.1.5.patch
@@ -0,0 +1,5750 @@
+diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904
+index 014f112e2a14..57fffe33ebfc 100644
+--- a/Documentation/hwmon/nct7904
++++ b/Documentation/hwmon/nct7904
+@@ -35,11 +35,11 @@ temp1_input		Local temperature (1/1000 degree,
+ temp[2-9]_input		CPU temperatures (1/1000 degree,
+ 			0.125 degree resolution)
+ 
+-fan[1-4]_mode		R/W, 0/1 for manual or SmartFan mode
++pwm[1-4]_enable		R/W, 1/2 for manual or SmartFan mode
+ 			Setting SmartFan mode is supported only if it has been
+ 			previously configured by BIOS (or configuration EEPROM)
+ 
+-fan[1-4]_pwm		R/O in SmartFan mode, R/W in manual control mode
++pwm[1-4]		R/O in SmartFan mode, R/W in manual control mode
+ 
+ The driver checks sensor control registers and does not export the sensors
+ that are not enabled. Anyway, a sensor that is enabled may actually be not
+diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
+index 74b6c6d97210..d2b1c40cb666 100644
+--- a/Documentation/kbuild/makefiles.txt
++++ b/Documentation/kbuild/makefiles.txt
+@@ -952,6 +952,14 @@ When kbuild executes, the following steps are followed (roughly):
+ 	$(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
+ 	mode) if this option is supported by $(AR).
+ 
++    ARCH_CPPFLAGS, ARCH_AFLAGS, ARCH_CFLAGS   Overrides the kbuild defaults
++
++	These variables are appended to the KBUILD_CPPFLAGS,
++	KBUILD_AFLAGS, and KBUILD_CFLAGS, respectively, after the
++	top-level Makefile has set any other flags. This provides a
++	means for an architecture to override the defaults.
++
++
+ --- 6.2 Add prerequisites to archheaders:
+ 
+ 	The archheaders: rule is used to generate header files that
+diff --git a/Makefile b/Makefile
+index 36f3225cdf1f..068dd690933d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 4
++SUBLEVEL = 5
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+@@ -783,10 +783,11 @@ endif
+ include scripts/Makefile.kasan
+ include scripts/Makefile.extrawarn
+ 
+-# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
+-KBUILD_CPPFLAGS += $(KCPPFLAGS)
+-KBUILD_AFLAGS += $(KAFLAGS)
+-KBUILD_CFLAGS += $(KCFLAGS)
++# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
++# last assignments
++KBUILD_CPPFLAGS += $(ARCH_CPPFLAGS) $(KCPPFLAGS)
++KBUILD_AFLAGS   += $(ARCH_AFLAGS)   $(KAFLAGS)
++KBUILD_CFLAGS   += $(ARCH_CFLAGS)   $(KCFLAGS)
+ 
+ # Use --build-id when available.
+ LDFLAGS_BUILD_ID = $(patsubst -Wl$(comma)%,%,\
+diff --git a/arch/arc/Makefile b/arch/arc/Makefile
+index db72fec0e160..2f21e1e0ecf7 100644
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -43,7 +43,8 @@ endif
+ 
+ ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+ # Generic build system uses -O2, we want -O3
+-cflags-y  += -O3
++# Note: No need to add to cflags-y as that happens anyways
++ARCH_CFLAGS += -O3
+ endif
+ 
+ # small data is default for elf32 tool-chain. If not usable, disable it
+diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
+index 624a9d048ca9..dae03e66fa9e 100644
+--- a/arch/arc/include/asm/bitops.h
++++ b/arch/arc/include/asm/bitops.h
+@@ -18,83 +18,49 @@
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+ #include <asm/barrier.h>
++#ifndef CONFIG_ARC_HAS_LLSC
++#include <asm/smp.h>
++#endif
+ 
+-/*
+- * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
+- * The Kconfig glue ensures that in SMP, this is only set if the container
+- * SoC/platform has cross-core coherent LLOCK/SCOND
+- */
+ #if defined(CONFIG_ARC_HAS_LLSC)
+ 
+-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned int temp;
+-
+-	m += nr >> 5;
+-
+-	/*
+-	 * ARC ISA micro-optimization:
+-	 *
+-	 * Instructions dealing with bitpos only consider lower 5 bits (0-31)
+-	 * e.g (x << 33) is handled like (x << 1) by ASL instruction
+-	 *  (mem pointer still needs adjustment to point to next word)
+-	 *
+-	 * Hence the masking to clamp @nr arg can be elided in general.
+-	 *
+-	 * However if @nr is a constant (above assumed it in a register),
+-	 * and greater than 31, gcc can optimize away (x << 33) to 0,
+-	 * as overflow, given the 32-bit ISA. Thus masking needs to be done
+-	 * for constant @nr, but no code is generated due to const prop.
+-	 */
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	__asm__ __volatile__(
+-	"1:	llock   %0, [%1]	\n"
+-	"	bset    %0, %0, %2	\n"
+-	"	scond   %0, [%1]	\n"
+-	"	bnz     1b	\n"
+-	: "=&r"(temp)
+-	: "r"(m), "ir"(nr)
+-	: "cc");
+-}
+-
+-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned int temp;
+-
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	__asm__ __volatile__(
+-	"1:	llock   %0, [%1]	\n"
+-	"	bclr    %0, %0, %2	\n"
+-	"	scond   %0, [%1]	\n"
+-	"	bnz     1b	\n"
+-	: "=&r"(temp)
+-	: "r"(m), "ir"(nr)
+-	: "cc");
+-}
+-
+-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned int temp;
+-
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
++/*
++ * Hardware assisted Atomic-R-M-W
++ */
+ 
+-	__asm__ __volatile__(
+-	"1:	llock   %0, [%1]	\n"
+-	"	bxor    %0, %0, %2	\n"
+-	"	scond   %0, [%1]	\n"
+-	"	bnz     1b		\n"
+-	: "=&r"(temp)
+-	: "r"(m), "ir"(nr)
+-	: "cc");
++#define BIT_OP(op, c_op, asm_op)					\
++static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
++{									\
++	unsigned int temp;						\
++									\
++	m += nr >> 5;							\
++									\
++	/*								\
++	 * ARC ISA micro-optimization:					\
++	 *								\
++	 * Instructions dealing with bitpos only consider lower 5 bits	\
++	 * e.g (x << 33) is handled like (x << 1) by ASL instruction	\
++	 *  (mem pointer still needs adjustment to point to next word)	\
++	 *								\
++	 * Hence the masking to clamp @nr arg can be elided in general.	\
++	 *								\
++	 * However if @nr is a constant (above assumed in a register),	\
++	 * and greater than 31, gcc can optimize away (x << 33) to 0,	\
++	 * as overflow, given the 32-bit ISA. Thus masking needs to be	\
++	 * done for const @nr, but no code is generated due to gcc	\
++	 * const prop.							\
++	 */								\
++	nr &= 0x1f;							\
++									\
++	__asm__ __volatile__(						\
++	"1:	llock       %0, [%1]		\n"			\
++	"	" #asm_op " %0, %0, %2	\n"				\
++	"	scond       %0, [%1]		\n"			\
++	"	bnz         1b			\n"			\
++	: "=&r"(temp)	/* Early clobber, to prevent reg reuse */	\
++	: "r"(m),	/* Not "m": llock only supports reg direct addr mode */	\
++	  "ir"(nr)							\
++	: "cc");							\
+ }
+ 
+ /*
+@@ -108,91 +74,37 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+  * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
+  * and the old value of bit is returned
+  */
+-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long old, temp;
+-
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	/*
+-	 * Explicit full memory barrier needed before/after as
+-	 * LLOCK/SCOND themselves don't provide any such semantics
+-	 */
+-	smp_mb();
+-
+-	__asm__ __volatile__(
+-	"1:	llock   %0, [%2]	\n"
+-	"	bset    %1, %0, %3	\n"
+-	"	scond   %1, [%2]	\n"
+-	"	bnz     1b		\n"
+-	: "=&r"(old), "=&r"(temp)
+-	: "r"(m), "ir"(nr)
+-	: "cc");
+-
+-	smp_mb();
+-
+-	return (old & (1 << nr)) != 0;
+-}
+-
+-static inline int
+-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned int old, temp;
+-
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	smp_mb();
+-
+-	__asm__ __volatile__(
+-	"1:	llock   %0, [%2]	\n"
+-	"	bclr    %1, %0, %3	\n"
+-	"	scond   %1, [%2]	\n"
+-	"	bnz     1b		\n"
+-	: "=&r"(old), "=&r"(temp)
+-	: "r"(m), "ir"(nr)
+-	: "cc");
+-
+-	smp_mb();
+-
+-	return (old & (1 << nr)) != 0;
+-}
+-
+-static inline int
+-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned int old, temp;
+-
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	smp_mb();
+-
+-	__asm__ __volatile__(
+-	"1:	llock   %0, [%2]	\n"
+-	"	bxor    %1, %0, %3	\n"
+-	"	scond   %1, [%2]	\n"
+-	"	bnz     1b		\n"
+-	: "=&r"(old), "=&r"(temp)
+-	: "r"(m), "ir"(nr)
+-	: "cc");
+-
+-	smp_mb();
+-
+-	return (old & (1 << nr)) != 0;
++#define TEST_N_BIT_OP(op, c_op, asm_op)					\
++static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
++{									\
++	unsigned long old, temp;					\
++									\
++	m += nr >> 5;							\
++									\
++	nr &= 0x1f;							\
++									\
++	/*								\
++	 * Explicit full memory barrier needed before/after as		\
++	 * LLOCK/SCOND themselves don't provide any such smenatic	\
++	 */								\
++	smp_mb();							\
++									\
++	__asm__ __volatile__(						\
++	"1:	llock       %0, [%2]	\n"				\
++	"	" #asm_op " %1, %0, %3	\n"				\
++	"	scond       %1, [%2]	\n"				\
++	"	bnz         1b		\n"				\
++	: "=&r"(old), "=&r"(temp)					\
++	: "r"(m), "ir"(nr)						\
++	: "cc");							\
++									\
++	smp_mb();							\
++									\
++	return (old & (1 << nr)) != 0;					\
+ }
+ 
+ #else	/* !CONFIG_ARC_HAS_LLSC */
+ 
+-#include <asm/smp.h>
+-
+ /*
+  * Non hardware assisted Atomic-R-M-W
+  * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+@@ -209,111 +121,37 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+  *             at compile time)
+  */
+ 
+-static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long temp, flags;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	bitops_lock(flags);
+-
+-	temp = *m;
+-	*m = temp | (1UL << nr);
+-
+-	bitops_unlock(flags);
++#define BIT_OP(op, c_op, asm_op)					\
++static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
++{									\
++	unsigned long temp, flags;					\
++	m += nr >> 5;							\
++									\
++	/*								\
++	 * spin lock/unlock provide the needed smp_mb() before/after	\
++	 */								\
++	bitops_lock(flags);						\
++									\
++	temp = *m;							\
++	*m = temp c_op (1UL << (nr & 0x1f));					\
++									\
++	bitops_unlock(flags);						\
+ }
+ 
+-static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long temp, flags;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	bitops_lock(flags);
+-
+-	temp = *m;
+-	*m = temp & ~(1UL << nr);
+-
+-	bitops_unlock(flags);
+-}
+-
+-static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long temp, flags;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	bitops_lock(flags);
+-
+-	temp = *m;
+-	*m = temp ^ (1UL << nr);
+-
+-	bitops_unlock(flags);
+-}
+-
+-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long old, flags;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	/*
+-	 * spin lock/unlock provide the needed smp_mb() before/after
+-	 */
+-	bitops_lock(flags);
+-
+-	old = *m;
+-	*m = old | (1 << nr);
+-
+-	bitops_unlock(flags);
+-
+-	return (old & (1 << nr)) != 0;
+-}
+-
+-static inline int
+-test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long old, flags;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	bitops_lock(flags);
+-
+-	old = *m;
+-	*m = old & ~(1 << nr);
+-
+-	bitops_unlock(flags);
+-
+-	return (old & (1 << nr)) != 0;
+-}
+-
+-static inline int
+-test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long old, flags;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	bitops_lock(flags);
+-
+-	old = *m;
+-	*m = old ^ (1 << nr);
+-
+-	bitops_unlock(flags);
+-
+-	return (old & (1 << nr)) != 0;
++#define TEST_N_BIT_OP(op, c_op, asm_op)					\
++static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
++{									\
++	unsigned long old, flags;					\
++	m += nr >> 5;							\
++									\
++	bitops_lock(flags);						\
++									\
++	old = *m;							\
++	*m = old c_op (1UL << (nr & 0x1f));				\
++									\
++	bitops_unlock(flags);						\
++									\
++	return (old & (1UL << (nr & 0x1f))) != 0;			\
+ }
+ 
+ #endif /* CONFIG_ARC_HAS_LLSC */
+@@ -322,86 +160,45 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+  * Non atomic variants
+  **************************************/
+ 
+-static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long temp;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	temp = *m;
+-	*m = temp | (1UL << nr);
++#define __BIT_OP(op, c_op, asm_op)					\
++static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m)	\
++{									\
++	unsigned long temp;						\
++	m += nr >> 5;							\
++									\
++	temp = *m;							\
++	*m = temp c_op (1UL << (nr & 0x1f));				\
+ }
+ 
+-static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long temp;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	temp = *m;
+-	*m = temp & ~(1UL << nr);
++#define __TEST_N_BIT_OP(op, c_op, asm_op)				\
++static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
++{									\
++	unsigned long old;						\
++	m += nr >> 5;							\
++									\
++	old = *m;							\
++	*m = old c_op (1UL << (nr & 0x1f));				\
++									\
++	return (old & (1UL << (nr & 0x1f))) != 0;			\
+ }
+ 
+-static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long temp;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	temp = *m;
+-	*m = temp ^ (1UL << nr);
+-}
+-
+-static inline int
+-__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long old;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	old = *m;
+-	*m = old | (1 << nr);
+-
+-	return (old & (1 << nr)) != 0;
+-}
+-
+-static inline int
+-__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long old;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	old = *m;
+-	*m = old & ~(1 << nr);
+-
+-	return (old & (1 << nr)) != 0;
+-}
+-
+-static inline int
+-__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+-{
+-	unsigned long old;
+-	m += nr >> 5;
+-
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	old = *m;
+-	*m = old ^ (1 << nr);
+-
+-	return (old & (1 << nr)) != 0;
+-}
++#define BIT_OPS(op, c_op, asm_op)					\
++									\
++	/* set_bit(), clear_bit(), change_bit() */			\
++	BIT_OP(op, c_op, asm_op)					\
++									\
++	/* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
++	TEST_N_BIT_OP(op, c_op, asm_op)					\
++									\
++	/* __set_bit(), __clear_bit(), __change_bit() */		\
++	__BIT_OP(op, c_op, asm_op)					\
++									\
++	/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
++	__TEST_N_BIT_OP(op, c_op, asm_op)
++
++BIT_OPS(set, |, bset)
++BIT_OPS(clear, & ~, bclr)
++BIT_OPS(change, ^, bxor)
+ 
+ /*
+  * This routine doesn't need to be atomic.
+@@ -413,10 +210,7 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
+ 
+ 	addr += nr >> 5;
+ 
+-	if (__builtin_constant_p(nr))
+-		nr &= 0x1f;
+-
+-	mask = 1 << nr;
++	mask = 1UL << (nr & 0x1f);
+ 
+ 	return ((mask & *addr) != 0);
+ }
+diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
+index 1bfeec2c0558..2a58af7a2e3a 100644
+--- a/arch/arc/include/asm/ptrace.h
++++ b/arch/arc/include/asm/ptrace.h
+@@ -63,7 +63,7 @@ struct callee_regs {
+ 	long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
+ };
+ 
+-#define instruction_pointer(regs)	((regs)->ret)
++#define instruction_pointer(regs)	(unsigned long)((regs)->ret)
+ #define profile_pc(regs)		instruction_pointer(regs)
+ 
+ /* return 1 if user mode or 0 if kernel mode */
+diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
+index 7128fad991ac..c9df40e5cd3b 100644
+--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
++++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
+@@ -544,6 +544,10 @@
+ 	phy-supply = <&ldousb_reg>;
+ };
+ 
++&usb2_phy2 {
++	phy-supply = <&ldousb_reg>;
++};
++
+ &usb1 {
+ 	dr_mode = "host";
+ 	pinctrl-names = "default";
+diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
+index aa465904f6cc..096f68be99e2 100644
+--- a/arch/arm/boot/dts/dra7-evm.dts
++++ b/arch/arm/boot/dts/dra7-evm.dts
+@@ -686,7 +686,8 @@
+ 
+ &dcan1 {
+ 	status = "ok";
+-	pinctrl-names = "default", "sleep";
+-	pinctrl-0 = <&dcan1_pins_default>;
++	pinctrl-names = "default", "sleep", "active";
++	pinctrl-0 = <&dcan1_pins_sleep>;
+ 	pinctrl-1 = <&dcan1_pins_sleep>;
++	pinctrl-2 = <&dcan1_pins_default>;
+ };
+diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts
+index ce0390f081d9..6b05f6a0ba84 100644
+--- a/arch/arm/boot/dts/dra72-evm.dts
++++ b/arch/arm/boot/dts/dra72-evm.dts
+@@ -497,9 +497,10 @@
+ 
+ &dcan1 {
+ 	status = "ok";
+-	pinctrl-names = "default", "sleep";
+-	pinctrl-0 = <&dcan1_pins_default>;
++	pinctrl-names = "default", "sleep", "active";
++	pinctrl-0 = <&dcan1_pins_sleep>;
+ 	pinctrl-1 = <&dcan1_pins_sleep>;
++	pinctrl-2 = <&dcan1_pins_default>;
+ };
+ 
+ &qspi {
+diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
+index 6d0893a3828e..78b6fd0b86e6 100644
+--- a/arch/arm/mach-imx/gpc.c
++++ b/arch/arm/mach-imx/gpc.c
+@@ -291,8 +291,6 @@ void __init imx_gpc_check_dt(void)
+ 	}
+ }
+ 
+-#ifdef CONFIG_PM_GENERIC_DOMAINS
+-
+ static void _imx6q_pm_pu_power_off(struct generic_pm_domain *genpd)
+ {
+ 	int iso, iso2sw;
+@@ -399,7 +397,6 @@ static struct genpd_onecell_data imx_gpc_onecell_data = {
+ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
+ {
+ 	struct clk *clk;
+-	bool is_off;
+ 	int i;
+ 
+ 	imx6q_pu_domain.reg = pu_reg;
+@@ -416,18 +413,13 @@ static int imx_gpc_genpd_init(struct device *dev, struct regulator *pu_reg)
+ 	}
+ 	imx6q_pu_domain.num_clks = i;
+ 
+-	is_off = IS_ENABLED(CONFIG_PM);
+-	if (is_off) {
+-		_imx6q_pm_pu_power_off(&imx6q_pu_domain.base);
+-	} else {
+-		/*
+-		 * Enable power if compiled without CONFIG_PM in case the
+-		 * bootloader disabled it.
+-		 */
+-		imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
+-	}
++	/* Enable power always in case bootloader disabled it. */
++	imx6q_pm_pu_power_on(&imx6q_pu_domain.base);
++
++	if (!IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS))
++		return 0;
+ 
+-	pm_genpd_init(&imx6q_pu_domain.base, NULL, is_off);
++	pm_genpd_init(&imx6q_pu_domain.base, NULL, false);
+ 	return of_genpd_add_provider_onecell(dev->of_node,
+ 					     &imx_gpc_onecell_data);
+ 
+@@ -437,13 +429,6 @@ clk_err:
+ 	return -EINVAL;
+ }
+ 
+-#else
+-static inline int imx_gpc_genpd_init(struct device *dev, struct regulator *reg)
+-{
+-	return 0;
+-}
+-#endif /* CONFIG_PM_GENERIC_DOMAINS */
+-
+ static int imx_gpc_probe(struct platform_device *pdev)
+ {
+ 	struct regulator *pu_reg;
+diff --git a/arch/arm/mach-pxa/capc7117.c b/arch/arm/mach-pxa/capc7117.c
+index c092730749b9..bf366b39fa61 100644
+--- a/arch/arm/mach-pxa/capc7117.c
++++ b/arch/arm/mach-pxa/capc7117.c
+@@ -24,6 +24,7 @@
+ #include <linux/ata_platform.h>
+ #include <linux/serial_8250.h>
+ #include <linux/gpio.h>
++#include <linux/regulator/machine.h>
+ 
+ #include <asm/mach-types.h>
+ #include <asm/mach/arch.h>
+@@ -144,6 +145,8 @@ static void __init capc7117_init(void)
+ 
+ 	capc7117_uarts_init();
+ 	capc7117_ide_init();
++
++	regulator_has_full_constraints();
+ }
+ 
+ MACHINE_START(CAPC7117,
+diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
+index bb99f59a36d8..a17a91eb8e9a 100644
+--- a/arch/arm/mach-pxa/cm-x2xx.c
++++ b/arch/arm/mach-pxa/cm-x2xx.c
+@@ -13,6 +13,7 @@
+ #include <linux/syscore_ops.h>
+ #include <linux/irq.h>
+ #include <linux/gpio.h>
++#include <linux/regulator/machine.h>
+ 
+ #include <linux/dm9000.h>
+ #include <linux/leds.h>
+@@ -466,6 +467,8 @@ static void __init cmx2xx_init(void)
+ 	cmx2xx_init_ac97();
+ 	cmx2xx_init_touchscreen();
+ 	cmx2xx_init_leds();
++
++	regulator_has_full_constraints();
+ }
+ 
+ static void __init cmx2xx_init_irq(void)
+diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
+index 4d3588d26c2a..5851f4c254c1 100644
+--- a/arch/arm/mach-pxa/cm-x300.c
++++ b/arch/arm/mach-pxa/cm-x300.c
+@@ -835,6 +835,8 @@ static void __init cm_x300_init(void)
+ 	cm_x300_init_ac97();
+ 	cm_x300_init_wi2wi();
+ 	cm_x300_init_bl();
++
++	regulator_has_full_constraints();
+ }
+ 
+ static void __init cm_x300_fixup(struct tag *tags, char **cmdline)
+diff --git a/arch/arm/mach-pxa/colibri-pxa270.c b/arch/arm/mach-pxa/colibri-pxa270.c
+index 5f9d9303b346..3503826333c7 100644
+--- a/arch/arm/mach-pxa/colibri-pxa270.c
++++ b/arch/arm/mach-pxa/colibri-pxa270.c
+@@ -18,6 +18,7 @@
+ #include <linux/mtd/partitions.h>
+ #include <linux/mtd/physmap.h>
+ #include <linux/platform_device.h>
++#include <linux/regulator/machine.h>
+ #include <linux/ucb1400.h>
+ 
+ #include <asm/mach/arch.h>
+@@ -294,6 +295,8 @@ static void __init colibri_pxa270_init(void)
+ 		printk(KERN_ERR "Illegal colibri_pxa270_baseboard type %d\n",
+ 				colibri_pxa270_baseboard);
+ 	}
++
++	regulator_has_full_constraints();
+ }
+ 
+ /* The "Income s.r.o. SH-Dmaster PXA270 SBC" board can be booted either
+diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
+index 51531ecffca8..9d7072b04045 100644
+--- a/arch/arm/mach-pxa/em-x270.c
++++ b/arch/arm/mach-pxa/em-x270.c
+@@ -1306,6 +1306,8 @@ static void __init em_x270_init(void)
+ 	em_x270_init_i2c();
+ 	em_x270_init_camera();
+ 	em_x270_userspace_consumers_init();
++
++	regulator_has_full_constraints();
+ }
+ 
+ MACHINE_START(EM_X270, "Compulab EM-X270")
+diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
+index c98511c5abd1..9b0eb0252af6 100644
+--- a/arch/arm/mach-pxa/icontrol.c
++++ b/arch/arm/mach-pxa/icontrol.c
+@@ -26,6 +26,7 @@
+ #include <linux/spi/spi.h>
+ #include <linux/spi/pxa2xx_spi.h>
+ #include <linux/can/platform/mcp251x.h>
++#include <linux/regulator/machine.h>
+ 
+ #include "generic.h"
+ 
+@@ -185,6 +186,8 @@ static void __init icontrol_init(void)
+ 	mxm_8x10_mmc_init();
+ 
+ 	icontrol_can_init();
++
++	regulator_has_full_constraints();
+ }
+ 
+ MACHINE_START(ICONTROL, "iControl/SafeTcam boards using Embedian MXM-8x10 CoM")
+diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c
+index 872dcb20e757..066e3a250ee0 100644
+--- a/arch/arm/mach-pxa/trizeps4.c
++++ b/arch/arm/mach-pxa/trizeps4.c
+@@ -26,6 +26,7 @@
+ #include <linux/dm9000.h>
+ #include <linux/mtd/physmap.h>
+ #include <linux/mtd/partitions.h>
++#include <linux/regulator/machine.h>
+ #include <linux/i2c/pxa-i2c.h>
+ 
+ #include <asm/types.h>
+@@ -534,6 +535,8 @@ static void __init trizeps4_init(void)
+ 
+ 	BCR_writew(trizeps_conxs_bcr);
+ 	board_backlight_power(1);
++
++	regulator_has_full_constraints();
+ }
+ 
+ static void __init trizeps4_map_io(void)
+diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
+index aa89488f961e..54122a983ae3 100644
+--- a/arch/arm/mach-pxa/vpac270.c
++++ b/arch/arm/mach-pxa/vpac270.c
+@@ -24,6 +24,7 @@
+ #include <linux/dm9000.h>
+ #include <linux/ucb1400.h>
+ #include <linux/ata_platform.h>
++#include <linux/regulator/machine.h>
+ #include <linux/regulator/max1586.h>
+ #include <linux/i2c/pxa-i2c.h>
+ 
+@@ -711,6 +712,8 @@ static void __init vpac270_init(void)
+ 	vpac270_ts_init();
+ 	vpac270_rtc_init();
+ 	vpac270_ide_init();
++
++	regulator_has_full_constraints();
+ }
+ 
+ MACHINE_START(VPAC270, "Voipac PXA270")
+diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
+index ac2ae5c71ab4..6158566fa0f7 100644
+--- a/arch/arm/mach-pxa/zeus.c
++++ b/arch/arm/mach-pxa/zeus.c
+@@ -868,6 +868,8 @@ static void __init zeus_init(void)
+ 	i2c_register_board_info(0, ARRAY_AND_SIZE(zeus_i2c_devices));
+ 	pxa2xx_set_spi_info(3, &pxa2xx_spi_ssp3_master_info);
+ 	spi_register_board_info(zeus_spi_board_info, ARRAY_SIZE(zeus_spi_board_info));
++
++	regulator_has_full_constraints();
+ }
+ 
+ static struct map_desc zeus_io_desc[] __initdata = {
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 7e7583ddd607..6e4b9ff22ef3 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -1953,7 +1953,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
+ {
+ 	int next_bitmap;
+ 
+-	if (mapping->nr_bitmaps > mapping->extensions)
++	if (mapping->nr_bitmaps >= mapping->extensions)
+ 		return -EINVAL;
+ 
+ 	next_bitmap = mapping->nr_bitmaps;
+diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
+index ab21e0d58278..352962bc2e78 100644
+--- a/arch/arm64/kernel/efi.c
++++ b/arch/arm64/kernel/efi.c
+@@ -122,12 +122,12 @@ static int __init uefi_init(void)
+ 
+ 	/* Show what we know for posterity */
+ 	c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
+-			     sizeof(vendor));
++			     sizeof(vendor) * sizeof(efi_char16_t));
+ 	if (c16) {
+ 		for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
+ 			vendor[i] = c16[i];
+ 		vendor[i] = '\0';
+-		early_memunmap(c16, sizeof(vendor));
++		early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
+ 	}
+ 
+ 	pr_info("EFI v%u.%.02u by %s\n",
+diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
+index 23b1a97fae7a..52c179bec0cc 100644
+--- a/arch/avr32/mach-at32ap/clock.c
++++ b/arch/avr32/mach-at32ap/clock.c
+@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
+ {
+ 	unsigned long flags;
+ 
++	if (!clk)
++		return 0;
++
+ 	spin_lock_irqsave(&clk_lock, flags);
+ 	__clk_enable(clk);
+ 	spin_unlock_irqrestore(&clk_lock, flags);
+@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
+ {
+ 	unsigned long flags;
+ 
++	if (IS_ERR_OR_NULL(clk))
++		return;
++
+ 	spin_lock_irqsave(&clk_lock, flags);
+ 	__clk_disable(clk);
+ 	spin_unlock_irqrestore(&clk_lock, flags);
+@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
+ 	unsigned long flags;
+ 	unsigned long rate;
+ 
++	if (!clk)
++		return 0;
++
+ 	spin_lock_irqsave(&clk_lock, flags);
+ 	rate = clk->get_rate(clk);
+ 	spin_unlock_irqrestore(&clk_lock, flags);
+@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
+ {
+ 	unsigned long flags, actual_rate;
+ 
++	if (!clk)
++		return 0;
++
+ 	if (!clk->set_rate)
+ 		return -ENOSYS;
+ 
+@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
+ 	unsigned long flags;
+ 	long ret;
+ 
++	if (!clk)
++		return 0;
++
+ 	if (!clk->set_rate)
+ 		return -ENOSYS;
+ 
+@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
+ 	unsigned long flags;
+ 	int ret;
+ 
++	if (!clk)
++		return 0;
++
+ 	if (!clk->set_parent)
+ 		return -ENOSYS;
+ 
+@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
+ 
+ struct clk *clk_get_parent(struct clk *clk)
+ {
+-	return clk->parent;
++	return !clk ? NULL : clk->parent;
+ }
+ EXPORT_SYMBOL(clk_get_parent);
+ 
+diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
+index f5016656494f..a3b1ffe50aa0 100644
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -1417,6 +1417,7 @@ config CPU_MIPS64_R6
+ 	select CPU_SUPPORTS_HIGHMEM
+ 	select CPU_SUPPORTS_MSA
+ 	select GENERIC_CSUM
++	select MIPS_O32_FP64_SUPPORT if MIPS32_O32
+ 	help
+ 	  Choose this option to build a kernel for release 6 or later of the
+ 	  MIPS64 architecture.  New MIPS processors, starting with the Warrior
+diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
+index 084780b355aa..1b0625189835 100644
+--- a/arch/mips/include/asm/fpu.h
++++ b/arch/mips/include/asm/fpu.h
+@@ -74,7 +74,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
+ 		goto fr_common;
+ 
+ 	case FPU_64BIT:
+-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
++#if !(defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) \
+       || defined(CONFIG_64BIT))
+ 		/* we only have a 32-bit FPU */
+ 		return SIGFPE;
+diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
+index 2b25d1ba1ea0..16f1ea9ab191 100644
+--- a/arch/mips/include/asm/smp.h
++++ b/arch/mips/include/asm/smp.h
+@@ -23,6 +23,7 @@
+ extern int smp_num_siblings;
+ extern cpumask_t cpu_sibling_map[];
+ extern cpumask_t cpu_core_map[];
++extern cpumask_t cpu_foreign_map;
+ 
+ #define raw_smp_processor_id() (current_thread_info()->cpu)
+ 
+diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
+index faa46ebd9dda..d0744cc77ea7 100644
+--- a/arch/mips/kernel/smp.c
++++ b/arch/mips/kernel/smp.c
+@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
+ cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+ EXPORT_SYMBOL(cpu_core_map);
+ 
++/*
++ * A logcal cpu mask containing only one VPE per core to
++ * reduce the number of IPIs on large MT systems.
++ */
++cpumask_t cpu_foreign_map __read_mostly;
++EXPORT_SYMBOL(cpu_foreign_map);
++
+ /* representing cpus for which sibling maps can be computed */
+ static cpumask_t cpu_sibling_setup_map;
+ 
+@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu)
+ 	}
+ }
+ 
++/*
++ * Calculate a new cpu_foreign_map mask whenever a
++ * new cpu appears or disappears.
++ */
++static inline void calculate_cpu_foreign_map(void)
++{
++	int i, k, core_present;
++	cpumask_t temp_foreign_map;
++
++	/* Re-calculate the mask */
++	for_each_online_cpu(i) {
++		core_present = 0;
++		for_each_cpu(k, &temp_foreign_map)
++			if (cpu_data[i].package == cpu_data[k].package &&
++			    cpu_data[i].core == cpu_data[k].core)
++				core_present = 1;
++		if (!core_present)
++			cpumask_set_cpu(i, &temp_foreign_map);
++	}
++
++	cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
++}
++
+ struct plat_smp_ops *mp_ops;
+ EXPORT_SYMBOL(mp_ops);
+ 
+@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
+ 	set_cpu_sibling_map(cpu);
+ 	set_cpu_core_map(cpu);
+ 
++	calculate_cpu_foreign_map();
++
+ 	cpumask_set_cpu(cpu, &cpu_callin_map);
+ 
+ 	synchronise_count_slave(cpu);
+@@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void)
+ static void stop_this_cpu(void *dummy)
+ {
+ 	/*
+-	 * Remove this CPU:
++	 * Remove this CPU. Be a bit slow here and
++	 * set the bits for every online CPU so we don't miss
++	 * any IPI whilst taking this VPE down.
+ 	 */
++
++	cpumask_copy(&cpu_foreign_map, cpu_online_mask);
++
++	/* Make it visible to every other CPU */
++	smp_mb();
++
+ 	set_cpu_online(smp_processor_id(), false);
++	calculate_cpu_foreign_map();
+ 	local_irq_disable();
+ 	while (1);
+ }
+@@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
+ 	mp_ops->prepare_cpus(max_cpus);
+ 	set_cpu_sibling_map(0);
+ 	set_cpu_core_map(0);
++	calculate_cpu_foreign_map();
+ #ifndef CONFIG_HOTPLUG_CPU
+ 	init_cpu_present(cpu_possible_mask);
+ #endif
+diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
+index 22b9b2cb9219..6983fcd48131 100644
+--- a/arch/mips/math-emu/cp1emu.c
++++ b/arch/mips/math-emu/cp1emu.c
+@@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ 			/* Fall through */
+ 		case jr_op:
+ 			/* For R6, JR already emulated in jalr_op */
+-			if (NO_R6EMU && insn.r_format.opcode == jr_op)
++			if (NO_R6EMU && insn.r_format.func == jr_op)
+ 				break;
+ 			*contpc = regs->regs[insn.r_format.rs];
+ 			return 1;
+diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
+index 2e03ab173591..dca0efc078c1 100644
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -37,6 +37,7 @@
+ #include <asm/cacheflush.h> /* for run_uncached() */
+ #include <asm/traps.h>
+ #include <asm/dma-coherence.h>
++#include <asm/mips-cm.h>
+ 
+ /*
+  * Special Variant of smp_call_function for use by cache functions:
+@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
+ {
+ 	preempt_disable();
+ 
+-#ifndef CONFIG_MIPS_MT_SMP
+-	smp_call_function(func, info, 1);
+-#endif
++	/*
++	 * The Coherent Manager propagates address-based cache ops to other
++	 * cores but not index-based ops. However, r4k_on_each_cpu is used
++	 * in both cases so there is no easy way to tell what kind of op is
++	 * executed to the other cores. The best we can probably do is
++	 * to restrict that call when a CM is not present because both
++	 * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
++	 */
++	if (!mips_cm_present())
++		smp_call_function_many(&cpu_foreign_map, func, info, 1);
+ 	func(info);
+ 	preempt_enable();
+ }
+diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
+index 3a08eae3318f..3edbb9fc91b4 100644
+--- a/arch/parisc/include/asm/pgalloc.h
++++ b/arch/parisc/include/asm/pgalloc.h
+@@ -72,7 +72,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+ 
+ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ {
+-	if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
++	if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) {
+ 		/*
+ 		 * This is the permanent pmd attached to the pgd;
+ 		 * cannot free it.
+@@ -81,6 +81,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+ 		 */
+ 		mm_inc_nr_pmds(mm);
+ 		return;
++	}
+ 	free_pages((unsigned long)pmd, PMD_ORDER);
+ }
+ 
+diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
+index 0a183756d6ec..f93c4a4e6580 100644
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -16,7 +16,7 @@
+ #include <asm/processor.h>
+ #include <asm/cache.h>
+ 
+-extern spinlock_t pa_dbit_lock;
++extern spinlock_t pa_tlb_lock;
+ 
+ /*
+  * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
+@@ -33,6 +33,19 @@ extern spinlock_t pa_dbit_lock;
+  */
+ #define kern_addr_valid(addr)	(1)
+ 
++/* Purge data and instruction TLB entries.  Must be called holding
++ * the pa_tlb_lock.  The TLB purge instructions are slow on SMP
++ * machines since the purge must be broadcast to all CPUs.
++ */
++
++static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
++{
++	mtsp(mm->context, 1);
++	pdtlb(addr);
++	if (unlikely(split_tlb))
++		pitlb(addr);
++}
++
+ /* Certain architectures need to do special things when PTEs
+  * within a page table are directly modified.  Thus, the following
+  * hook is made available.
+@@ -42,15 +55,20 @@ extern spinlock_t pa_dbit_lock;
+                 *(pteptr) = (pteval);                           \
+         } while(0)
+ 
+-extern void purge_tlb_entries(struct mm_struct *, unsigned long);
++#define pte_inserted(x)						\
++	((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED))		\
++	 == (_PAGE_PRESENT|_PAGE_ACCESSED))
+ 
+-#define set_pte_at(mm, addr, ptep, pteval)                      \
+-	do {                                                    \
++#define set_pte_at(mm, addr, ptep, pteval)			\
++	do {							\
++		pte_t old_pte;					\
+ 		unsigned long flags;				\
+-		spin_lock_irqsave(&pa_dbit_lock, flags);	\
+-		set_pte(ptep, pteval);                          \
+-		purge_tlb_entries(mm, addr);                    \
+-		spin_unlock_irqrestore(&pa_dbit_lock, flags);	\
++		spin_lock_irqsave(&pa_tlb_lock, flags);		\
++		old_pte = *ptep;				\
++		set_pte(ptep, pteval);				\
++		if (pte_inserted(old_pte))			\
++			purge_tlb_entries(mm, addr);		\
++		spin_unlock_irqrestore(&pa_tlb_lock, flags);	\
+ 	} while (0)
+ 
+ #endif /* !__ASSEMBLY__ */
+@@ -268,7 +286,7 @@ extern unsigned long *empty_zero_page;
+ 
+ #define pte_none(x)     (pte_val(x) == 0)
+ #define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
+-#define pte_clear(mm,addr,xp)	do { pte_val(*(xp)) = 0; } while (0)
++#define pte_clear(mm, addr, xp)  set_pte_at(mm, addr, xp, __pte(0))
+ 
+ #define pmd_flag(x)	(pmd_val(x) & PxD_FLAG_MASK)
+ #define pmd_address(x)	((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
+@@ -435,15 +453,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
+ 	if (!pte_young(*ptep))
+ 		return 0;
+ 
+-	spin_lock_irqsave(&pa_dbit_lock, flags);
++	spin_lock_irqsave(&pa_tlb_lock, flags);
+ 	pte = *ptep;
+ 	if (!pte_young(pte)) {
+-		spin_unlock_irqrestore(&pa_dbit_lock, flags);
++		spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ 		return 0;
+ 	}
+ 	set_pte(ptep, pte_mkold(pte));
+ 	purge_tlb_entries(vma->vm_mm, addr);
+-	spin_unlock_irqrestore(&pa_dbit_lock, flags);
++	spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ 	return 1;
+ }
+ 
+@@ -453,11 +471,12 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ 	pte_t old_pte;
+ 	unsigned long flags;
+ 
+-	spin_lock_irqsave(&pa_dbit_lock, flags);
++	spin_lock_irqsave(&pa_tlb_lock, flags);
+ 	old_pte = *ptep;
+-	pte_clear(mm,addr,ptep);
+-	purge_tlb_entries(mm, addr);
+-	spin_unlock_irqrestore(&pa_dbit_lock, flags);
++	set_pte(ptep, __pte(0));
++	if (pte_inserted(old_pte))
++		purge_tlb_entries(mm, addr);
++	spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ 
+ 	return old_pte;
+ }
+@@ -465,10 +484,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+ {
+ 	unsigned long flags;
+-	spin_lock_irqsave(&pa_dbit_lock, flags);
++	spin_lock_irqsave(&pa_tlb_lock, flags);
+ 	set_pte(ptep, pte_wrprotect(*ptep));
+ 	purge_tlb_entries(mm, addr);
+-	spin_unlock_irqrestore(&pa_dbit_lock, flags);
++	spin_unlock_irqrestore(&pa_tlb_lock, flags);
+ }
+ 
+ #define pte_same(A,B)	(pte_val(A) == pte_val(B))
+diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
+index 9d086a599fa0..e84b96478193 100644
+--- a/arch/parisc/include/asm/tlbflush.h
++++ b/arch/parisc/include/asm/tlbflush.h
+@@ -13,6 +13,9 @@
+  * active at any one time on the Merced bus.  This tlb purge
+  * synchronisation is fairly lightweight and harmless so we activate
+  * it on all systems not just the N class.
++
++ * It is also used to ensure PTE updates are atomic and consistent
++ * with the TLB.
+  */
+ extern spinlock_t pa_tlb_lock;
+ 
+@@ -24,20 +27,24 @@ extern void flush_tlb_all_local(void *);
+ 
+ #define smp_flush_tlb_all()	flush_tlb_all()
+ 
++int __flush_tlb_range(unsigned long sid,
++	unsigned long start, unsigned long end);
++
++#define flush_tlb_range(vma, start, end) \
++	__flush_tlb_range((vma)->vm_mm->context, start, end)
++
++#define flush_tlb_kernel_range(start, end) \
++	__flush_tlb_range(0, start, end)
++
+ /*
+  * flush_tlb_mm()
+  *
+- * XXX This code is NOT valid for HP-UX compatibility processes,
+- * (although it will probably work 99% of the time). HP-UX
+- * processes are free to play with the space id's and save them
+- * over long periods of time, etc. so we have to preserve the
+- * space and just flush the entire tlb. We need to check the
+- * personality in order to do that, but the personality is not
+- * currently being set correctly.
+- *
+- * Of course, Linux processes could do the same thing, but
+- * we don't support that (and the compilers, dynamic linker,
+- * etc. do not do that).
++ * The code to switch to a new context is NOT valid for processes
++ * which play with the space id's.  Thus, we have to preserve the
++ * space and just flush the entire tlb.  However, the compilers,
++ * dynamic linker, etc, do not manipulate space id's, so there
++ * could be a significant performance benefit in switching contexts
++ * and not flushing the whole tlb.
+  */
+ 
+ static inline void flush_tlb_mm(struct mm_struct *mm)
+@@ -45,10 +52,18 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
+ 	BUG_ON(mm == &init_mm); /* Should never happen */
+ 
+ #if 1 || defined(CONFIG_SMP)
++	/* Except for very small threads, flushing the whole TLB is
++	 * faster than using __flush_tlb_range.  The pdtlb and pitlb
++	 * instructions are very slow because of the TLB broadcast.
++	 * It might be faster to do local range flushes on all CPUs
++	 * on PA 2.0 systems.
++	 */
+ 	flush_tlb_all();
+ #else
+ 	/* FIXME: currently broken, causing space id and protection ids
+-	 *  to go out of sync, resulting in faults on userspace accesses.
++	 * to go out of sync, resulting in faults on userspace accesses.
++	 * This approach needs further investigation since running many
++	 * small applications (e.g., GCC testsuite) is faster on HP-UX.
+ 	 */
+ 	if (mm) {
+ 		if (mm->context != 0)
+@@ -65,22 +80,12 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
+ {
+ 	unsigned long flags, sid;
+ 
+-	/* For one page, it's not worth testing the split_tlb variable */
+-
+-	mb();
+ 	sid = vma->vm_mm->context;
+ 	purge_tlb_start(flags);
+ 	mtsp(sid, 1);
+ 	pdtlb(addr);
+-	pitlb(addr);
++	if (unlikely(split_tlb))
++		pitlb(addr);
+ 	purge_tlb_end(flags);
+ }
+-
+-void __flush_tlb_range(unsigned long sid,
+-	unsigned long start, unsigned long end);
+-
+-#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
+-
+-#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
+-
+ #endif
+diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
+index f6448c7c62b5..cda6dbbe9842 100644
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -342,12 +342,15 @@ EXPORT_SYMBOL(flush_data_cache_local);
+ EXPORT_SYMBOL(flush_kernel_icache_range_asm);
+ 
+ #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
+-int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
++static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
++
++#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
++static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
+ 
+ void __init parisc_setup_cache_timing(void)
+ {
+ 	unsigned long rangetime, alltime;
+-	unsigned long size;
++	unsigned long size, start;
+ 
+ 	alltime = mfctl(16);
+ 	flush_data_cache();
+@@ -364,14 +367,43 @@ void __init parisc_setup_cache_timing(void)
+ 	/* Racy, but if we see an intermediate value, it's ok too... */
+ 	parisc_cache_flush_threshold = size * alltime / rangetime;
+ 
+-	parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); 
++	parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
+ 	if (!parisc_cache_flush_threshold)
+ 		parisc_cache_flush_threshold = FLUSH_THRESHOLD;
+ 
+ 	if (parisc_cache_flush_threshold > cache_info.dc_size)
+ 		parisc_cache_flush_threshold = cache_info.dc_size;
+ 
+-	printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
++	printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
++		parisc_cache_flush_threshold/1024);
++
++	/* calculate TLB flush threshold */
++
++	alltime = mfctl(16);
++	flush_tlb_all();
++	alltime = mfctl(16) - alltime;
++
++	size = PAGE_SIZE;
++	start = (unsigned long) _text;
++	rangetime = mfctl(16);
++	while (start < (unsigned long) _end) {
++		flush_tlb_kernel_range(start, start + PAGE_SIZE);
++		start += PAGE_SIZE;
++		size += PAGE_SIZE;
++	}
++	rangetime = mfctl(16) - rangetime;
++
++	printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
++		alltime, size, rangetime);
++
++	parisc_tlb_flush_threshold = size * alltime / rangetime;
++	parisc_tlb_flush_threshold *= num_online_cpus();
++	parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
++	if (!parisc_tlb_flush_threshold)
++		parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
++
++	printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
++		parisc_tlb_flush_threshold/1024);
+ }
+ 
+ extern void purge_kernel_dcache_page_asm(unsigned long);
+@@ -403,48 +435,45 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ }
+ EXPORT_SYMBOL(copy_user_page);
+ 
+-void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+-{
+-	unsigned long flags;
+-
+-	/* Note: purge_tlb_entries can be called at startup with
+-	   no context.  */
+-
+-	purge_tlb_start(flags);
+-	mtsp(mm->context, 1);
+-	pdtlb(addr);
+-	pitlb(addr);
+-	purge_tlb_end(flags);
+-}
+-EXPORT_SYMBOL(purge_tlb_entries);
+-
+-void __flush_tlb_range(unsigned long sid, unsigned long start,
+-		       unsigned long end)
++/* __flush_tlb_range()
++ *
++ * returns 1 if all TLBs were flushed.
++ */
++int __flush_tlb_range(unsigned long sid, unsigned long start,
++		      unsigned long end)
+ {
+-	unsigned long npages;
++	unsigned long flags, size;
+ 
+-	npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+-	if (npages >= 512)  /* 2MB of space: arbitrary, should be tuned */
++	size = (end - start);
++	if (size >= parisc_tlb_flush_threshold) {
+ 		flush_tlb_all();
+-	else {
+-		unsigned long flags;
++		return 1;
++	}
+ 
++	/* Purge TLB entries for small ranges using the pdtlb and
++	   pitlb instructions.  These instructions execute locally
++	   but cause a purge request to be broadcast to other TLBs.  */
++	if (likely(!split_tlb)) {
++		while (start < end) {
++			purge_tlb_start(flags);
++			mtsp(sid, 1);
++			pdtlb(start);
++			purge_tlb_end(flags);
++			start += PAGE_SIZE;
++		}
++		return 0;
++	}
++
++	/* split TLB case */
++	while (start < end) {
+ 		purge_tlb_start(flags);
+ 		mtsp(sid, 1);
+-		if (split_tlb) {
+-			while (npages--) {
+-				pdtlb(start);
+-				pitlb(start);
+-				start += PAGE_SIZE;
+-			}
+-		} else {
+-			while (npages--) {
+-				pdtlb(start);
+-				start += PAGE_SIZE;
+-			}
+-		}
++		pdtlb(start);
++		pitlb(start);
+ 		purge_tlb_end(flags);
++		start += PAGE_SIZE;
+ 	}
++	return 0;
+ }
+ 
+ static void cacheflush_h_tmp_function(void *dummy)
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index 75819617f93b..c5ef4081b01d 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -45,7 +45,7 @@
+ 	.level 2.0
+ #endif
+ 
+-	.import         pa_dbit_lock,data
++	.import		pa_tlb_lock,data
+ 
+ 	/* space_to_prot macro creates a prot id from a space id */
+ 
+@@ -420,8 +420,8 @@
+ 	SHLREG		%r9,PxD_VALUE_SHIFT,\pmd
+ 	extru		\va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+ 	dep		%r0,31,PAGE_SHIFT,\pmd  /* clear offset */
+-	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd
+-	LDREG		%r0(\pmd),\pte		/* pmd is now pte */
++	shladd		\index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
++	LDREG		%r0(\pmd),\pte
+ 	bb,>=,n		\pte,_PAGE_PRESENT_BIT,\fault
+ 	.endm
+ 
+@@ -453,57 +453,53 @@
+ 	L2_ptep		\pgd,\pte,\index,\va,\fault
+ 	.endm
+ 
+-	/* Acquire pa_dbit_lock lock. */
+-	.macro		dbit_lock	spc,tmp,tmp1
++	/* Acquire pa_tlb_lock lock and recheck page is still present. */
++	.macro		tlb_lock	spc,ptp,pte,tmp,tmp1,fault
+ #ifdef CONFIG_SMP
+ 	cmpib,COND(=),n	0,\spc,2f
+-	load32		PA(pa_dbit_lock),\tmp
++	load32		PA(pa_tlb_lock),\tmp
+ 1:	LDCW		0(\tmp),\tmp1
+ 	cmpib,COND(=)	0,\tmp1,1b
+ 	nop
++	LDREG		0(\ptp),\pte
++	bb,<,n		\pte,_PAGE_PRESENT_BIT,2f
++	b		\fault
++	stw		 \spc,0(\tmp)
+ 2:
+ #endif
+ 	.endm
+ 
+-	/* Release pa_dbit_lock lock without reloading lock address. */
+-	.macro		dbit_unlock0	spc,tmp
++	/* Release pa_tlb_lock lock without reloading lock address. */
++	.macro		tlb_unlock0	spc,tmp
+ #ifdef CONFIG_SMP
+ 	or,COND(=)	%r0,\spc,%r0
+ 	stw             \spc,0(\tmp)
+ #endif
+ 	.endm
+ 
+-	/* Release pa_dbit_lock lock. */
+-	.macro		dbit_unlock1	spc,tmp
++	/* Release pa_tlb_lock lock. */
++	.macro		tlb_unlock1	spc,tmp
+ #ifdef CONFIG_SMP
+-	load32		PA(pa_dbit_lock),\tmp
+-	dbit_unlock0	\spc,\tmp
++	load32		PA(pa_tlb_lock),\tmp
++	tlb_unlock0	\spc,\tmp
+ #endif
+ 	.endm
+ 
+ 	/* Set the _PAGE_ACCESSED bit of the PTE.  Be clever and
+ 	 * don't needlessly dirty the cache line if it was already set */
+-	.macro		update_ptep	spc,ptep,pte,tmp,tmp1
+-#ifdef CONFIG_SMP
+-	or,COND(=)	%r0,\spc,%r0
+-	LDREG		0(\ptep),\pte
+-#endif
++	.macro		update_accessed	ptp,pte,tmp,tmp1
+ 	ldi		_PAGE_ACCESSED,\tmp1
+ 	or		\tmp1,\pte,\tmp
+ 	and,COND(<>)	\tmp1,\pte,%r0
+-	STREG		\tmp,0(\ptep)
++	STREG		\tmp,0(\ptp)
+ 	.endm
+ 
+ 	/* Set the dirty bit (and accessed bit).  No need to be
+ 	 * clever, this is only used from the dirty fault */
+-	.macro		update_dirty	spc,ptep,pte,tmp
+-#ifdef CONFIG_SMP
+-	or,COND(=)	%r0,\spc,%r0
+-	LDREG		0(\ptep),\pte
+-#endif
++	.macro		update_dirty	ptp,pte,tmp
+ 	ldi		_PAGE_ACCESSED|_PAGE_DIRTY,\tmp
+ 	or		\tmp,\pte,\pte
+-	STREG		\pte,0(\ptep)
++	STREG		\pte,0(\ptp)
+ 	.endm
+ 
+ 	/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
+@@ -1148,14 +1144,14 @@ dtlb_miss_20w:
+ 
+ 	L3_ptep		ptp,pte,t0,va,dtlb_check_alias_20w
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20w
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb	spc,pte,prot
+ 	
+ 	idtlbt          pte,prot
+-	dbit_unlock1	spc,t0
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1174,14 +1170,14 @@ nadtlb_miss_20w:
+ 
+ 	L3_ptep		ptp,pte,t0,va,nadtlb_check_alias_20w
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb	spc,pte,prot
+ 
+ 	idtlbt          pte,prot
+-	dbit_unlock1	spc,t0
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1202,20 +1198,20 @@ dtlb_miss_11:
+ 
+ 	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_11
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_11
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb_11	spc,pte,prot
+ 
+-	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
++	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
+ 	mtsp		spc,%sr1
+ 
+ 	idtlba		pte,(%sr1,va)
+ 	idtlbp		prot,(%sr1,va)
+ 
+-	mtsp		t0, %sr1	/* Restore sr1 */
+-	dbit_unlock1	spc,t0
++	mtsp		t1, %sr1	/* Restore sr1 */
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1235,21 +1231,20 @@ nadtlb_miss_11:
+ 
+ 	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_11
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_11
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb_11	spc,pte,prot
+ 
+-
+-	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
++	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
+ 	mtsp		spc,%sr1
+ 
+ 	idtlba		pte,(%sr1,va)
+ 	idtlbp		prot,(%sr1,va)
+ 
+-	mtsp		t0, %sr1	/* Restore sr1 */
+-	dbit_unlock1	spc,t0
++	mtsp		t1, %sr1	/* Restore sr1 */
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1269,16 +1264,16 @@ dtlb_miss_20:
+ 
+ 	L2_ptep		ptp,pte,t0,va,dtlb_check_alias_20
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,dtlb_check_alias_20
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb	spc,pte,prot
+ 
+-	f_extend	pte,t0
++	f_extend	pte,t1
+ 
+ 	idtlbt          pte,prot
+-	dbit_unlock1	spc,t0
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1297,16 +1292,16 @@ nadtlb_miss_20:
+ 
+ 	L2_ptep		ptp,pte,t0,va,nadtlb_check_alias_20
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,nadtlb_check_alias_20
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb	spc,pte,prot
+ 
+-	f_extend	pte,t0
++	f_extend	pte,t1
+ 	
+-        idtlbt          pte,prot
+-	dbit_unlock1	spc,t0
++	idtlbt		pte,prot
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1406,14 +1401,14 @@ itlb_miss_20w:
+ 
+ 	L3_ptep		ptp,pte,t0,va,itlb_fault
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb	spc,pte,prot
+ 	
+ 	iitlbt          pte,prot
+-	dbit_unlock1	spc,t0
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1430,14 +1425,14 @@ naitlb_miss_20w:
+ 
+ 	L3_ptep		ptp,pte,t0,va,naitlb_check_alias_20w
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20w
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb	spc,pte,prot
+ 
+ 	iitlbt          pte,prot
+-	dbit_unlock1	spc,t0
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1458,20 +1453,20 @@ itlb_miss_11:
+ 
+ 	L2_ptep		ptp,pte,t0,va,itlb_fault
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb_11	spc,pte,prot
+ 
+-	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
++	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
+ 	mtsp		spc,%sr1
+ 
+ 	iitlba		pte,(%sr1,va)
+ 	iitlbp		prot,(%sr1,va)
+ 
+-	mtsp		t0, %sr1	/* Restore sr1 */
+-	dbit_unlock1	spc,t0
++	mtsp		t1, %sr1	/* Restore sr1 */
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1482,20 +1477,20 @@ naitlb_miss_11:
+ 
+ 	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_11
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_11
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb_11	spc,pte,prot
+ 
+-	mfsp		%sr1,t0  /* Save sr1 so we can use it in tlb inserts */
++	mfsp		%sr1,t1  /* Save sr1 so we can use it in tlb inserts */
+ 	mtsp		spc,%sr1
+ 
+ 	iitlba		pte,(%sr1,va)
+ 	iitlbp		prot,(%sr1,va)
+ 
+-	mtsp		t0, %sr1	/* Restore sr1 */
+-	dbit_unlock1	spc,t0
++	mtsp		t1, %sr1	/* Restore sr1 */
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1516,16 +1511,16 @@ itlb_miss_20:
+ 
+ 	L2_ptep		ptp,pte,t0,va,itlb_fault
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,itlb_fault
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb	spc,pte,prot
+ 
+-	f_extend	pte,t0	
++	f_extend	pte,t1
+ 
+ 	iitlbt          pte,prot
+-	dbit_unlock1	spc,t0
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1536,16 +1531,16 @@ naitlb_miss_20:
+ 
+ 	L2_ptep		ptp,pte,t0,va,naitlb_check_alias_20
+ 
+-	dbit_lock	spc,t0,t1
+-	update_ptep	spc,ptp,pte,t0,t1
++	tlb_lock	spc,ptp,pte,t0,t1,naitlb_check_alias_20
++	update_accessed	ptp,pte,t0,t1
+ 
+ 	make_insert_tlb	spc,pte,prot
+ 
+-	f_extend	pte,t0
++	f_extend	pte,t1
+ 
+ 	iitlbt          pte,prot
+-	dbit_unlock1	spc,t0
+ 
++	tlb_unlock1	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1568,14 +1563,14 @@ dbit_trap_20w:
+ 
+ 	L3_ptep		ptp,pte,t0,va,dbit_fault
+ 
+-	dbit_lock	spc,t0,t1
+-	update_dirty	spc,ptp,pte,t1
++	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
++	update_dirty	ptp,pte,t1
+ 
+ 	make_insert_tlb	spc,pte,prot
+ 		
+ 	idtlbt          pte,prot
+-	dbit_unlock0	spc,t0
+ 
++	tlb_unlock0	spc,t0
+ 	rfir
+ 	nop
+ #else
+@@ -1588,8 +1583,8 @@ dbit_trap_11:
+ 
+ 	L2_ptep		ptp,pte,t0,va,dbit_fault
+ 
+-	dbit_lock	spc,t0,t1
+-	update_dirty	spc,ptp,pte,t1
++	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
++	update_dirty	ptp,pte,t1
+ 
+ 	make_insert_tlb_11	spc,pte,prot
+ 
+@@ -1600,8 +1595,8 @@ dbit_trap_11:
+ 	idtlbp		prot,(%sr1,va)
+ 
+ 	mtsp            t1, %sr1     /* Restore sr1 */
+-	dbit_unlock0	spc,t0
+ 
++	tlb_unlock0	spc,t0
+ 	rfir
+ 	nop
+ 
+@@ -1612,16 +1607,16 @@ dbit_trap_20:
+ 
+ 	L2_ptep		ptp,pte,t0,va,dbit_fault
+ 
+-	dbit_lock	spc,t0,t1
+-	update_dirty	spc,ptp,pte,t1
++	tlb_lock	spc,ptp,pte,t0,t1,dbit_fault
++	update_dirty	ptp,pte,t1
+ 
+ 	make_insert_tlb	spc,pte,prot
+ 
+ 	f_extend	pte,t1
+ 	
+-        idtlbt          pte,prot
+-	dbit_unlock0	spc,t0
++	idtlbt		pte,prot
+ 
++	tlb_unlock0	spc,t0
+ 	rfir
+ 	nop
+ #endif
+diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
+index 47ee620d15d2..7f67c4c96a7a 100644
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -43,10 +43,6 @@
+ 
+ #include "../math-emu/math-emu.h"	/* for handle_fpe() */
+ 
+-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+-DEFINE_SPINLOCK(pa_dbit_lock);
+-#endif
+-
+ static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+ 	struct pt_regs *regs);
+ 
+diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
+index ccde8f084ce4..112ccf497562 100644
+--- a/arch/powerpc/kernel/idle_power7.S
++++ b/arch/powerpc/kernel/idle_power7.S
+@@ -52,6 +52,22 @@
+ 	.text
+ 
+ /*
++ * Used by threads when the lock bit of core_idle_state is set.
++ * Threads will spin in HMT_LOW until the lock bit is cleared.
++ * r14 - pointer to core_idle_state
++ * r15 - used to load contents of core_idle_state
++ */
++
++core_idle_lock_held:
++	HMT_LOW
++3:	lwz	r15,0(r14)
++	andi.   r15,r15,PNV_CORE_IDLE_LOCK_BIT
++	bne	3b
++	HMT_MEDIUM
++	lwarx	r15,0,r14
++	blr
++
++/*
+  * Pass requested state in r3:
+  *	r3 - PNV_THREAD_NAP/SLEEP/WINKLE
+  *
+@@ -150,6 +166,10 @@ power7_enter_nap_mode:
+ 	ld	r14,PACA_CORE_IDLE_STATE_PTR(r13)
+ lwarx_loop1:
+ 	lwarx	r15,0,r14
++
++	andi.   r9,r15,PNV_CORE_IDLE_LOCK_BIT
++	bnel	core_idle_lock_held
++
+ 	andc	r15,r15,r7			/* Clear thread bit */
+ 
+ 	andi.	r15,r15,PNV_CORE_IDLE_THREAD_BITS
+@@ -294,7 +314,7 @@ lwarx_loop2:
+ 	 * workaround undo code or resyncing timebase or restoring context
+ 	 * In either case loop until the lock bit is cleared.
+ 	 */
+-	bne	core_idle_lock_held
++	bnel	core_idle_lock_held
+ 
+ 	cmpwi	cr2,r15,0
+ 	lbz	r4,PACA_SUBCORE_SIBLING_MASK(r13)
+@@ -319,15 +339,6 @@ lwarx_loop2:
+ 	isync
+ 	b	common_exit
+ 
+-core_idle_lock_held:
+-	HMT_LOW
+-core_idle_lock_loop:
+-	lwz	r15,0(14)
+-	andi.   r9,r15,PNV_CORE_IDLE_LOCK_BIT
+-	bne	core_idle_lock_loop
+-	HMT_MEDIUM
+-	b	lwarx_loop2
+-
+ first_thread_in_subcore:
+ 	/* First thread in subcore to wakeup */
+ 	ori	r15,r15,PNV_CORE_IDLE_LOCK_BIT
+diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
+index cfad7fca01d6..d7697ab802f6 100644
+--- a/arch/s390/include/asm/ctl_reg.h
++++ b/arch/s390/include/asm/ctl_reg.h
+@@ -57,7 +57,10 @@ union ctlreg0 {
+ 		unsigned long lap  : 1; /* Low-address-protection control */
+ 		unsigned long	   : 4;
+ 		unsigned long edat : 1; /* Enhanced-DAT-enablement control */
+-		unsigned long	   : 23;
++		unsigned long	   : 4;
++		unsigned long afp  : 1; /* AFP-register control */
++		unsigned long vx   : 1; /* Vector enablement control */
++		unsigned long	   : 17;
+ 	};
+ };
+ 
+diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
+index bff5e3b6d822..8ba32436effe 100644
+--- a/arch/s390/kernel/cache.c
++++ b/arch/s390/kernel/cache.c
+@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
+ 	union cache_topology ct;
+ 	enum cache_type ctype;
+ 
++	if (!test_facility(34))
++		return -EOPNOTSUPP;
+ 	if (!this_cpu_ci)
+ 		return -EINVAL;
+ 	ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
+diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
+index 505c17c0ae1a..56b550893593 100644
+--- a/arch/s390/kernel/nmi.c
++++ b/arch/s390/kernel/nmi.c
+@@ -21,6 +21,7 @@
+ #include <asm/nmi.h>
+ #include <asm/crw.h>
+ #include <asm/switch_to.h>
++#include <asm/ctl_reg.h>
+ 
+ struct mcck_struct {
+ 	int kill_task;
+@@ -129,26 +130,30 @@ static int notrace s390_revalidate_registers(struct mci *mci)
+ 	} else
+ 		asm volatile("lfpc 0(%0)" : : "a" (fpt_creg_save_area));
+ 
+-	asm volatile(
+-		"	ld	0,0(%0)\n"
+-		"	ld	1,8(%0)\n"
+-		"	ld	2,16(%0)\n"
+-		"	ld	3,24(%0)\n"
+-		"	ld	4,32(%0)\n"
+-		"	ld	5,40(%0)\n"
+-		"	ld	6,48(%0)\n"
+-		"	ld	7,56(%0)\n"
+-		"	ld	8,64(%0)\n"
+-		"	ld	9,72(%0)\n"
+-		"	ld	10,80(%0)\n"
+-		"	ld	11,88(%0)\n"
+-		"	ld	12,96(%0)\n"
+-		"	ld	13,104(%0)\n"
+-		"	ld	14,112(%0)\n"
+-		"	ld	15,120(%0)\n"
+-		: : "a" (fpt_save_area));
+-	/* Revalidate vector registers */
+-	if (MACHINE_HAS_VX && current->thread.vxrs) {
++	if (!MACHINE_HAS_VX) {
++		/* Revalidate floating point registers */
++		asm volatile(
++			"	ld	0,0(%0)\n"
++			"	ld	1,8(%0)\n"
++			"	ld	2,16(%0)\n"
++			"	ld	3,24(%0)\n"
++			"	ld	4,32(%0)\n"
++			"	ld	5,40(%0)\n"
++			"	ld	6,48(%0)\n"
++			"	ld	7,56(%0)\n"
++			"	ld	8,64(%0)\n"
++			"	ld	9,72(%0)\n"
++			"	ld	10,80(%0)\n"
++			"	ld	11,88(%0)\n"
++			"	ld	12,96(%0)\n"
++			"	ld	13,104(%0)\n"
++			"	ld	14,112(%0)\n"
++			"	ld	15,120(%0)\n"
++			: : "a" (fpt_save_area));
++	} else {
++		/* Revalidate vector registers */
++		union ctlreg0 cr0;
++
+ 		if (!mci->vr) {
+ 			/*
+ 			 * Vector registers can't be restored and therefore
+@@ -156,8 +161,12 @@ static int notrace s390_revalidate_registers(struct mci *mci)
+ 			 */
+ 			kill_task = 1;
+ 		}
++		cr0.val = S390_lowcore.cregs_save_area[0];
++		cr0.afp = cr0.vx = 1;
++		__ctl_load(cr0.val, 0, 0);
+ 		restore_vx_regs((__vector128 *)
+-				S390_lowcore.vector_save_area_addr);
++				&S390_lowcore.vector_save_area);
++		__ctl_load(S390_lowcore.cregs_save_area[0], 0, 0);
+ 	}
+ 	/* Revalidate access registers */
+ 	asm volatile(
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index dc5edc29b73a..8f587d871b9f 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
+ asmlinkage void execve_tail(void)
+ {
+ 	current->thread.fp_regs.fpc = 0;
+-	asm volatile("sfpc %0,%0" : : "d" (0));
++	asm volatile("sfpc %0" : : "d" (0));
+ }
+ 
+ /*
+diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
+index 43c3169ea49c..ada0c07fe1a8 100644
+--- a/arch/s390/kernel/sclp.S
++++ b/arch/s390/kernel/sclp.S
+@@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
+ 	jno	.Lesa2
+ 	ahi	%r15,-80
+ 	stmh	%r6,%r15,96(%r15)		# store upper register halves
++	basr	%r13,0
++	lmh	%r0,%r15,.Lzeroes-.(%r13)	# clear upper register halves
+ .Lesa2:
+ 	lr	%r10,%r2			# save string pointer
+ 	lhi	%r2,0
+@@ -291,6 +293,8 @@ ENTRY(_sclp_print_early)
+ .Lesa3:
+ 	lm	%r6,%r15,120(%r15)		# restore registers
+ 	br	%r14
++.Lzeroes:
++	.fill	64,4,0
+ 
+ .LwritedataS4:
+ 	.long	0x00760005			# SCLP command for write data
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 9afb9d602f84..dc2d7aa56440 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -415,13 +415,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
+ 		EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
+ 			      BPF_REG_1, offsetof(struct sk_buff, data));
+ 	}
+-	/* BPF compatibility: clear A (%b7) and X (%b8) registers */
+-	if (REG_SEEN(BPF_REG_7))
+-		/* lghi %b7,0 */
+-		EMIT4_IMM(0xa7090000, BPF_REG_7, 0);
+-	if (REG_SEEN(BPF_REG_8))
+-		/* lghi %b8,0 */
+-		EMIT4_IMM(0xa7090000, BPF_REG_8, 0);
++	/* BPF compatibility: clear A (%b0) and X (%b7) registers */
++	if (REG_SEEN(BPF_REG_A))
++		/* lghi %ba,0 */
++		EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
++	if (REG_SEEN(BPF_REG_X))
++		/* lghi %bx,0 */
++		EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
+ }
+ 
+ /*
+diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
+index d366675e4bf8..396b5c96e272 100644
+--- a/arch/tile/kernel/setup.c
++++ b/arch/tile/kernel/setup.c
+@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
+ 
+ void __init free_initrd_mem(unsigned long begin, unsigned long end)
+ {
+-	free_bootmem(__pa(begin), end - begin);
++	free_bootmem_late(__pa(begin), end - begin);
+ }
+ 
+ static int __init setup_initrd(char *str)
+diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
+index 48304b89b601..0cdc154a22b5 100644
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
+ 		unsigned int e820_type = 0;
+ 		unsigned long m = efi->efi_memmap;
+ 
++#ifdef CONFIG_X86_64
++		m |= (u64)efi->efi_memmap_hi << 32;
++#endif
++
+ 		d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
+ 		switch (d->type) {
+ 		case EFI_RESERVED_TYPE:
+diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
+index 8b22422fbad8..74a2a8dc9908 100644
+--- a/arch/x86/include/asm/kasan.h
++++ b/arch/x86/include/asm/kasan.h
+@@ -14,15 +14,11 @@
+ 
+ #ifndef __ASSEMBLY__
+ 
+-extern pte_t kasan_zero_pte[];
+-extern pte_t kasan_zero_pmd[];
+-extern pte_t kasan_zero_pud[];
+-
+ #ifdef CONFIG_KASAN
+-void __init kasan_map_early_shadow(pgd_t *pgd);
++void __init kasan_early_init(void);
+ void __init kasan_init(void);
+ #else
+-static inline void kasan_map_early_shadow(pgd_t *pgd) { }
++static inline void kasan_early_init(void) { }
+ static inline void kasan_init(void) { }
+ #endif
+ 
+diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
+index 883f6b933fa4..e997f70f80c4 100644
+--- a/arch/x86/include/asm/mmu_context.h
++++ b/arch/x86/include/asm/mmu_context.h
+@@ -23,7 +23,7 @@ extern struct static_key rdpmc_always_available;
+ 
+ static inline void load_mm_cr4(struct mm_struct *mm)
+ {
+-	if (static_key_true(&rdpmc_always_available) ||
++	if (static_key_false(&rdpmc_always_available) ||
+ 	    atomic_read(&mm->context.perf_rdpmc_allowed))
+ 		cr4_set_bits(X86_CR4_PCE);
+ 	else
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+index e4d1b8b738fa..cb77b11bc414 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+@@ -934,6 +934,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
+ 		return 0;
+ 
+ 	/*
++	 * Getting up-to-date values requires an SMP IPI which is not
++	 * possible if we're being called in interrupt context. Return
++	 * the cached values instead.
++	 */
++	if (unlikely(in_interrupt()))
++		goto out;
++
++	/*
+ 	 * Notice that we don't perform the reading of an RMID
+ 	 * atomically, because we can't hold a spin lock across the
+ 	 * IPIs.
+diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
+index 5a4668136e98..f129a9af6357 100644
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
+ 	/* Kill off the identity-map trampoline */
+ 	reset_early_page_tables();
+ 
+-	kasan_map_early_shadow(early_level4_pgt);
+-
+-	/* clear bss before set_intr_gate with early_idt_handler */
+ 	clear_bss();
+ 
++	clear_page(init_level4_pgt);
++
++	kasan_early_init();
++
+ 	for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
+ 		set_intr_gate(i, early_idt_handler_array[i]);
+ 	load_idt((const struct desc_ptr *)&idt_descr);
+@@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
+ 	 */
+ 	load_ucode_bsp();
+ 
+-	clear_page(init_level4_pgt);
+ 	/* set init_level4_pgt kernel high mapping*/
+ 	init_level4_pgt[511] = early_level4_pgt[511];
+ 
+-	kasan_map_early_shadow(init_level4_pgt);
+-
+ 	x86_64_start_reservations(real_mode_data);
+ }
+ 
+diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
+index df7e78057ae0..7e5da2cbe59e 100644
+--- a/arch/x86/kernel/head_64.S
++++ b/arch/x86/kernel/head_64.S
+@@ -516,38 +516,9 @@ ENTRY(phys_base)
+ 	/* This must match the first entry in level2_kernel_pgt */
+ 	.quad   0x0000000000000000
+ 
+-#ifdef CONFIG_KASAN
+-#define FILL(VAL, COUNT)				\
+-	.rept (COUNT) ;					\
+-	.quad	(VAL) ;					\
+-	.endr
+-
+-NEXT_PAGE(kasan_zero_pte)
+-	FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
+-NEXT_PAGE(kasan_zero_pmd)
+-	FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
+-NEXT_PAGE(kasan_zero_pud)
+-	FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
+-
+-#undef FILL
+-#endif
+-
+-
+ #include "../../x86/xen/xen-head.S"
+ 	
+ 	__PAGE_ALIGNED_BSS
+ NEXT_PAGE(empty_zero_page)
+ 	.skip PAGE_SIZE
+ 
+-#ifdef CONFIG_KASAN
+-/*
+- * This page used as early shadow. We don't use empty_zero_page
+- * at early stages, stack instrumentation could write some garbage
+- * to this page.
+- * Latter we reuse it as zero shadow for large ranges of memory
+- * that allowed to access, but not instrumented by kasan
+- * (vmalloc/vmemmap ...).
+- */
+-NEXT_PAGE(kasan_zero_page)
+-	.skip PAGE_SIZE
+-#endif
+diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
+index 4860906c6b9f..9a54dbe98064 100644
+--- a/arch/x86/mm/kasan_init_64.c
++++ b/arch/x86/mm/kasan_init_64.c
+@@ -11,7 +11,19 @@
+ extern pgd_t early_level4_pgt[PTRS_PER_PGD];
+ extern struct range pfn_mapped[E820_X_MAX];
+ 
+-extern unsigned char kasan_zero_page[PAGE_SIZE];
++static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
++static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
++static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
++
++/*
++ * This page used as early shadow. We don't use empty_zero_page
++ * at early stages, stack instrumentation could write some garbage
++ * to this page.
++ * Latter we reuse it as zero shadow for large ranges of memory
++ * that allowed to access, but not instrumented by kasan
++ * (vmalloc/vmemmap ...).
++ */
++static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
+ 
+ static int __init map_range(struct range *range)
+ {
+@@ -36,7 +48,7 @@ static void __init clear_pgds(unsigned long start,
+ 		pgd_clear(pgd_offset_k(start));
+ }
+ 
+-void __init kasan_map_early_shadow(pgd_t *pgd)
++static void __init kasan_map_early_shadow(pgd_t *pgd)
+ {
+ 	int i;
+ 	unsigned long start = KASAN_SHADOW_START;
+@@ -73,7 +85,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
+ 	while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
+ 		WARN_ON(!pmd_none(*pmd));
+ 		set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
+-					| __PAGE_KERNEL_RO));
++					| _KERNPG_TABLE));
+ 		addr += PMD_SIZE;
+ 		pmd = pmd_offset(pud, addr);
+ 	}
+@@ -99,7 +111,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
+ 	while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
+ 		WARN_ON(!pud_none(*pud));
+ 		set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
+-					| __PAGE_KERNEL_RO));
++					| _KERNPG_TABLE));
+ 		addr += PUD_SIZE;
+ 		pud = pud_offset(pgd, addr);
+ 	}
+@@ -124,7 +136,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
+ 	while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
+ 		WARN_ON(!pgd_none(*pgd));
+ 		set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
+-					| __PAGE_KERNEL_RO));
++					| _KERNPG_TABLE));
+ 		addr += PGDIR_SIZE;
+ 		pgd = pgd_offset_k(addr);
+ 	}
+@@ -166,6 +178,26 @@ static struct notifier_block kasan_die_notifier = {
+ };
+ #endif
+ 
++void __init kasan_early_init(void)
++{
++	int i;
++	pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
++	pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
++	pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
++
++	for (i = 0; i < PTRS_PER_PTE; i++)
++		kasan_zero_pte[i] = __pte(pte_val);
++
++	for (i = 0; i < PTRS_PER_PMD; i++)
++		kasan_zero_pmd[i] = __pmd(pmd_val);
++
++	for (i = 0; i < PTRS_PER_PUD; i++)
++		kasan_zero_pud[i] = __pud(pud_val);
++
++	kasan_map_early_shadow(early_level4_pgt);
++	kasan_map_early_shadow(init_level4_pgt);
++}
++
+ void __init kasan_init(void)
+ {
+ 	int i;
+@@ -176,6 +208,7 @@ void __init kasan_init(void)
+ 
+ 	memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
+ 	load_cr3(early_level4_pgt);
++	__flush_tlb_all();
+ 
+ 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+ 
+@@ -202,5 +235,6 @@ void __init kasan_init(void)
+ 	memset(kasan_zero_page, 0, PAGE_SIZE);
+ 
+ 	load_cr3(init_level4_pgt);
++	__flush_tlb_all();
+ 	init_task.kasan_depth = 0;
+ }
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 3250f2371aea..90b924acd982 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
+ 		} else {
+ 			unsigned long addr;
+ 			unsigned long nr_pages =
+-				f->flush_end - f->flush_start / PAGE_SIZE;
++				(f->flush_end - f->flush_start) / PAGE_SIZE;
+ 			addr = f->flush_start;
+ 			while (addr < f->flush_end) {
+ 				__flush_tlb_single(addr);
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 02744df576d5..841ea05e1b02 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -946,6 +946,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
+ 
+ static int __init arch_parse_efi_cmdline(char *str)
+ {
++	if (!str) {
++		pr_warn("need at least one option\n");
++		return -EINVAL;
++	}
++
+ 	if (parse_option_str(str, "old_map"))
+ 		set_bit(EFI_OLD_MEMMAP, &efi.flags);
+ 	if (parse_option_str(str, "debug"))
+diff --git a/block/bio-integrity.c b/block/bio-integrity.c
+index 5cbd5d9ea61d..39ce74d10e2b 100644
+--- a/block/bio-integrity.c
++++ b/block/bio-integrity.c
+@@ -51,7 +51,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+ 	unsigned long idx = BIO_POOL_NONE;
+ 	unsigned inline_vecs;
+ 
+-	if (!bs) {
++	if (!bs || !bs->bio_integrity_pool) {
+ 		bip = kmalloc(sizeof(struct bio_integrity_payload) +
+ 			      sizeof(struct bio_vec) * nr_vecs, gfp_mask);
+ 		inline_vecs = nr_vecs;
+@@ -104,7 +104,7 @@ void bio_integrity_free(struct bio *bio)
+ 		kfree(page_address(bip->bip_vec->bv_page) +
+ 		      bip->bip_vec->bv_offset);
+ 
+-	if (bs) {
++	if (bs && bs->bio_integrity_pool) {
+ 		if (bip->bip_slab != BIO_POOL_NONE)
+ 			bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
+ 				  bip->bip_slab);
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index 0ac817b750db..6817e28960b7 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -716,8 +716,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
+ 		return -EINVAL;
+ 
+ 	disk = get_gendisk(MKDEV(major, minor), &part);
+-	if (!disk || part)
++	if (!disk)
+ 		return -EINVAL;
++	if (part) {
++		put_disk(disk);
++		return -EINVAL;
++	}
+ 
+ 	rcu_read_lock();
+ 	spin_lock_irq(disk->queue->queue_lock);
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 594eea04266e..2dc1fd6c5bdb 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -1968,7 +1968,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+ 		goto err_hctxs;
+ 
+ 	setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
+-	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
++	blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
+ 
+ 	q->nr_queues = nr_cpu_ids;
+ 	q->nr_hw_queues = set->nr_hw_queues;
+diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
+index 7ccc084bf1df..85aa76116a30 100644
+--- a/drivers/ata/libata-pmp.c
++++ b/drivers/ata/libata-pmp.c
+@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
+ 				       ATA_LFLAG_NO_SRST |
+ 				       ATA_LFLAG_ASSUME_ATA;
+ 		}
++	} else if (vendor == 0x11ab && devid == 0x4140) {
++		/* Marvell 4140 quirks */
++		ata_for_each_link(link, ap, EDGE) {
++			/* port 4 is for SEMB device and it doesn't like SRST */
++			if (link->pmp == 4)
++				link->flags |= ATA_LFLAG_DISABLED;
++		}
+ 	}
+ }
+ 
+diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
+index bf12a25eb3a2..0f8db28353c5 100644
+--- a/drivers/clk/st/clk-flexgen.c
++++ b/drivers/clk/st/clk-flexgen.c
+@@ -303,6 +303,8 @@ void __init st_of_flexgen_setup(struct device_node *np)
+ 	if (!rlock)
+ 		goto err;
+ 
++	spin_lock_init(rlock);
++
+ 	for (i = 0; i < clk_data->clk_num; i++) {
+ 		struct clk *clk;
+ 		const char *clk_name;
+diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
+index a917c4c7eaa9..6ae068ab07c8 100644
+--- a/drivers/clk/st/clkgen-fsyn.c
++++ b/drivers/clk/st/clkgen-fsyn.c
+@@ -340,7 +340,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
+ 		    CLKGEN_FIELD(0x30c, 0xf, 20),
+ 		    CLKGEN_FIELD(0x310, 0xf, 20) },
+ 	.lockstatus_present = true,
+-	.lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
++	.lock_status = CLKGEN_FIELD(0x2f0, 0x1, 24),
+ 	.powerup_polarity = 1,
+ 	.standby_polarity = 1,
+ 	.pll_ops	= &st_quadfs_pll_c32_ops,
+diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
+index fdcff10f6d30..ef6514636bfc 100644
+--- a/drivers/clk/st/clkgen-mux.c
++++ b/drivers/clk/st/clkgen-mux.c
+@@ -582,7 +582,7 @@ static struct clkgen_mux_data stih416_a9_mux_data = {
+ };
+ static struct clkgen_mux_data stih407_a9_mux_data = {
+ 	.offset = 0x1a4,
+-	.shift = 1,
++	.shift = 0,
+ 	.width = 2,
+ };
+ 
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index c45d274a75c8..6f9d27f9001c 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -678,6 +678,7 @@ static struct cpu_defaults knl_params = {
+ 		.get_max = core_get_max_pstate,
+ 		.get_min = core_get_min_pstate,
+ 		.get_turbo = knl_get_turbo_pstate,
++		.get_scaling = core_get_scaling,
+ 		.set = core_set_pstate,
+ 	},
+ };
+diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
+index 46307098f8ba..0a70e46d5416 100644
+--- a/drivers/crypto/omap-des.c
++++ b/drivers/crypto/omap-des.c
+@@ -536,9 +536,6 @@ static int omap_des_crypt_dma_stop(struct omap_des_dev *dd)
+ 	dmaengine_terminate_all(dd->dma_lch_in);
+ 	dmaengine_terminate_all(dd->dma_lch_out);
+ 
+-	dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
+-	dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len, DMA_FROM_DEVICE);
+-
+ 	return err;
+ }
+ 
+diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
+index 4fd9961d552e..d42537425438 100644
+--- a/drivers/firmware/efi/cper.c
++++ b/drivers/firmware/efi/cper.c
+@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
+ 	return ret;
+ }
+ 
+-static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem)
++static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
++	int len)
+ {
+ 	struct cper_mem_err_compact cmem;
+ 
++	/* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
++	if (len == sizeof(struct cper_sec_mem_err_old) &&
++	    (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
++		pr_err(FW_WARN "valid bits set for fields beyond structure\n");
++		return;
++	}
+ 	if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
+ 		printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
+ 	if (mem->validation_bits & CPER_MEM_VALID_PA)
+@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
+ 	} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
+ 		struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
+ 		printk("%s""section_type: memory error\n", newpfx);
+-		if (gdata->error_data_length >= sizeof(*mem_err))
+-			cper_print_mem(newpfx, mem_err);
++		if (gdata->error_data_length >=
++		    sizeof(struct cper_sec_mem_err_old))
++			cper_print_mem(newpfx, mem_err,
++				       gdata->error_data_length);
+ 		else
+ 			goto err_section_too_small;
+ 	} else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index e14363d12690..63226e9036a1 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -57,6 +57,11 @@ bool efi_runtime_disabled(void)
+ 
+ static int __init parse_efi_cmdline(char *str)
+ {
++	if (!str) {
++		pr_warn("need at least one option\n");
++		return -EINVAL;
++	}
++
+ 	if (parse_option_str(str, "noruntime"))
+ 		disable_runtime = true;
+ 
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
+index 89049335b738..cd6dae08175e 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -863,8 +863,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
+ 
+ 	pm_runtime_get_sync(dev->dev);
+ 
++	mutex_lock(&cli->mutex);
+ 	if (cli->abi16)
+ 		nouveau_abi16_fini(cli->abi16);
++	mutex_unlock(&cli->mutex);
+ 
+ 	mutex_lock(&drm->client.mutex);
+ 	list_del(&cli->head);
+diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+index 4ef602c5469d..495c57644ced 100644
+--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
+@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
+ 	if (ret)
+ 		return ret;
+ 
+-	if (RING_SPACE(chan, 49)) {
++	if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
+ 		nouveau_fbcon_gpu_lockup(info);
+ 		return 0;
+ 	}
+diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
+index 7da7958556a3..981342d142ff 100644
+--- a/drivers/gpu/drm/nouveau/nv50_display.c
++++ b/drivers/gpu/drm/nouveau/nv50_display.c
+@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
+ {
+ 	struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
+ 
+-	if (show && nv_crtc->cursor.nvbo)
++	if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
+ 		nv50_crtc_cursor_show(nv_crtc);
+ 	else
+ 		nv50_crtc_cursor_hide(nv_crtc);
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+index 80614f1b2074..282143f49d72 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
+ {
+ 	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
+ 	struct nv04_instobj_priv *node = (void *)object;
++	struct nvkm_subdev *subdev = (void *)priv;
++
++	mutex_lock(&subdev->mutex);
+ 	nvkm_mm_free(&priv->heap, &node->mem);
++	mutex_unlock(&subdev->mutex);
++
+ 	nvkm_instobj_destroy(&node->base);
+ }
+ 
+@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ 	struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
+ 	struct nv04_instobj_priv *node;
+ 	struct nvkm_instobj_args *args = data;
++	struct nvkm_subdev *subdev = (void *)priv;
+ 	int ret;
+ 
+ 	if (!args->align)
+@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
+ 	if (ret)
+ 		return ret;
+ 
++	mutex_lock(&subdev->mutex);
+ 	ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
+ 			   args->align, &node->mem);
++	mutex_unlock(&subdev->mutex);
+ 	if (ret)
+ 		return ret;
+ 
+diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
+index 3318de690e00..a2dbbbe0d8d7 100644
+--- a/drivers/hid/hid-cp2112.c
++++ b/drivers/hid/hid-cp2112.c
+@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
+ 	struct cp2112_force_read_report report;
+ 	int ret;
+ 
++	if (size > sizeof(dev->read_data))
++		size = sizeof(dev->read_data);
+ 	report.report = CP2112_DATA_READ_FORCE_SEND;
+ 	report.length = cpu_to_be16(size);
+ 
+diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
+index 28fcb2e246d5..fbfc02bb2cfa 100644
+--- a/drivers/hwmon/nct7802.c
++++ b/drivers/hwmon/nct7802.c
+@@ -195,7 +195,7 @@ abort:
+ }
+ 
+ static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
+-				 unsigned int voltage)
++				 unsigned long voltage)
+ {
+ 	int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
+ 	int err;
+diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
+index b77b82f24480..6153df735e82 100644
+--- a/drivers/hwmon/nct7904.c
++++ b/drivers/hwmon/nct7904.c
+@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
+ 	return sprintf(buf, "%d\n", val);
+ }
+ 
+-static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
+-			  const char *buf, size_t count)
++static ssize_t store_enable(struct device *dev,
++			    struct device_attribute *devattr,
++			    const char *buf, size_t count)
+ {
+ 	int index = to_sensor_dev_attr(devattr)->index;
+ 	struct nct7904_data *data = dev_get_drvdata(dev);
+@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
+ 
+ 	if (kstrtoul(buf, 10, &val) < 0)
+ 		return -EINVAL;
+-	if (val > 1 || (val && !data->fan_mode[index]))
++	if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
+ 		return -EINVAL;
+ 
+ 	ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
+-				val ? data->fan_mode[index] : 0);
++				val == 2 ? data->fan_mode[index] : 0);
+ 
+ 	return ret ? ret : count;
+ }
+ 
+-/* Return 0 for manual mode or 1 for SmartFan mode */
+-static ssize_t show_mode(struct device *dev,
+-			 struct device_attribute *devattr, char *buf)
++/* Return 1 for manual mode or 2 for SmartFan mode */
++static ssize_t show_enable(struct device *dev,
++			   struct device_attribute *devattr, char *buf)
+ {
+ 	int index = to_sensor_dev_attr(devattr)->index;
+ 	struct nct7904_data *data = dev_get_drvdata(dev);
+@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
+ 	if (val < 0)
+ 		return val;
+ 
+-	return sprintf(buf, "%d\n", val ? 1 : 0);
++	return sprintf(buf, "%d\n", val ? 2 : 1);
+ }
+ 
+ /* 2 attributes per channel: pwm and mode */
+-static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR,
++static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
+ 			show_pwm, store_pwm, 0);
+-static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR,
+-			show_mode, store_mode, 0);
+-static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR,
++static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
++			show_enable, store_enable, 0);
++static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
+ 			show_pwm, store_pwm, 1);
+-static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR,
+-			show_mode, store_mode, 1);
+-static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR,
++static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
++			show_enable, store_enable, 1);
++static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
+ 			show_pwm, store_pwm, 2);
+-static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR,
+-			show_mode, store_mode, 2);
+-static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR,
++static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
++			show_enable, store_enable, 2);
++static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
+ 			show_pwm, store_pwm, 3);
+-static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR,
+-			show_mode, store_mode, 3);
++static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
++			show_enable, store_enable, 3);
+ 
+ static struct attribute *nct7904_fanctl_attrs[] = {
+-	&sensor_dev_attr_fan1_pwm.dev_attr.attr,
+-	&sensor_dev_attr_fan1_mode.dev_attr.attr,
+-	&sensor_dev_attr_fan2_pwm.dev_attr.attr,
+-	&sensor_dev_attr_fan2_mode.dev_attr.attr,
+-	&sensor_dev_attr_fan3_pwm.dev_attr.attr,
+-	&sensor_dev_attr_fan3_mode.dev_attr.attr,
+-	&sensor_dev_attr_fan4_pwm.dev_attr.attr,
+-	&sensor_dev_attr_fan4_mode.dev_attr.attr,
++	&sensor_dev_attr_pwm1.dev_attr.attr,
++	&sensor_dev_attr_pwm1_enable.dev_attr.attr,
++	&sensor_dev_attr_pwm2.dev_attr.attr,
++	&sensor_dev_attr_pwm2_enable.dev_attr.attr,
++	&sensor_dev_attr_pwm3.dev_attr.attr,
++	&sensor_dev_attr_pwm3_enable.dev_attr.attr,
++	&sensor_dev_attr_pwm4.dev_attr.attr,
++	&sensor_dev_attr_pwm4_enable.dev_attr.attr,
+ 	NULL
+ };
+ 
+diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+index e5cc43074196..2d13fd08ceb7 100644
+--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
++++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+@@ -176,7 +176,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
+ 		else
+ 			size += ipoib_recvq_size * ipoib_max_conn_qp;
+ 	} else
+-		goto out_free_wq;
++		if (ret != -ENOSYS)
++			goto out_free_wq;
+ 
+ 	priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
+ 	if (IS_ERR(priv->recv_cq)) {
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 35c8d0ceabee..3a32caf06bf1 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -1199,7 +1199,7 @@ static void set_input_params(struct psmouse *psmouse,
+ 					ABS_MT_POSITION_Y);
+ 		/* Image sensors can report per-contact pressure */
+ 		input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
+-		input_mt_init_slots(dev, 3, INPUT_MT_POINTER | INPUT_MT_TRACK);
++		input_mt_init_slots(dev, 2, INPUT_MT_POINTER | INPUT_MT_TRACK);
+ 
+ 		/* Image sensors can signal 4 and 5 finger clicks */
+ 		__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
+diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
+index f2c6c352c55a..2c41107240de 100644
+--- a/drivers/input/touchscreen/usbtouchscreen.c
++++ b/drivers/input/touchscreen/usbtouchscreen.c
+@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
+ 		goto err_out;
+ 	}
+ 
++	/* TSC-25 data sheet specifies a delay after the RESET command */
++	msleep(150);
++
+ 	/* set coordinate output rate */
+ 	buf[0] = buf[1] = 0xFF;
+ 	ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 5ecfaf29933a..c87c4b1bfc00 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1756,8 +1756,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
+ 
+ static void domain_exit(struct dmar_domain *domain)
+ {
++	struct dmar_drhd_unit *drhd;
++	struct intel_iommu *iommu;
+ 	struct page *freelist = NULL;
+-	int i;
+ 
+ 	/* Domain 0 is reserved, so dont process it */
+ 	if (!domain)
+@@ -1777,8 +1778,10 @@ static void domain_exit(struct dmar_domain *domain)
+ 
+ 	/* clear attached or cached domains */
+ 	rcu_read_lock();
+-	for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus)
+-		iommu_detach_domain(domain, g_iommus[i]);
++	for_each_active_iommu(iommu, drhd)
++		if (domain_type_is_vm(domain) ||
++		    test_bit(iommu->seq_id, domain->iommu_bmp))
++			iommu_detach_domain(domain, iommu);
+ 	rcu_read_unlock();
+ 
+ 	dma_free_pagelist(freelist);
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 1b7e155869f6..c00e2db351ba 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -75,6 +75,13 @@ struct its_node {
+ 
+ #define ITS_ITT_ALIGN		SZ_256
+ 
++struct event_lpi_map {
++	unsigned long		*lpi_map;
++	u16			*col_map;
++	irq_hw_number_t		lpi_base;
++	int			nr_lpis;
++};
++
+ /*
+  * The ITS view of a device - belongs to an ITS, a collection, owns an
+  * interrupt translation table, and a list of interrupts.
+@@ -82,11 +89,8 @@ struct its_node {
+ struct its_device {
+ 	struct list_head	entry;
+ 	struct its_node		*its;
+-	struct its_collection	*collection;
++	struct event_lpi_map	event_map;
+ 	void			*itt;
+-	unsigned long		*lpi_map;
+-	irq_hw_number_t		lpi_base;
+-	int			nr_lpis;
+ 	u32			nr_ites;
+ 	u32			device_id;
+ };
+@@ -99,6 +103,14 @@ static struct rdists *gic_rdists;
+ #define gic_data_rdist()		(raw_cpu_ptr(gic_rdists->rdist))
+ #define gic_data_rdist_rd_base()	(gic_data_rdist()->rd_base)
+ 
++static struct its_collection *dev_event_to_col(struct its_device *its_dev,
++					       u32 event)
++{
++	struct its_node *its = its_dev->its;
++
++	return its->collections + its_dev->event_map.col_map[event];
++}
++
+ /*
+  * ITS command descriptors - parameters to be encoded in a command
+  * block.
+@@ -134,7 +146,7 @@ struct its_cmd_desc {
+ 		struct {
+ 			struct its_device *dev;
+ 			struct its_collection *col;
+-			u32 id;
++			u32 event_id;
+ 		} its_movi_cmd;
+ 
+ 		struct {
+@@ -241,7 +253,7 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
+ 
+ 	its_fixup_cmd(cmd);
+ 
+-	return desc->its_mapd_cmd.dev->collection;
++	return NULL;
+ }
+ 
+ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
+@@ -260,52 +272,72 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
+ static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd,
+ 						  struct its_cmd_desc *desc)
+ {
++	struct its_collection *col;
++
++	col = dev_event_to_col(desc->its_mapvi_cmd.dev,
++			       desc->its_mapvi_cmd.event_id);
++
+ 	its_encode_cmd(cmd, GITS_CMD_MAPVI);
+ 	its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id);
+ 	its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id);
+ 	its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id);
+-	its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id);
++	its_encode_collection(cmd, col->col_id);
+ 
+ 	its_fixup_cmd(cmd);
+ 
+-	return desc->its_mapvi_cmd.dev->collection;
++	return col;
+ }
+ 
+ static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
+ 						 struct its_cmd_desc *desc)
+ {
++	struct its_collection *col;
++
++	col = dev_event_to_col(desc->its_movi_cmd.dev,
++			       desc->its_movi_cmd.event_id);
++
+ 	its_encode_cmd(cmd, GITS_CMD_MOVI);
+ 	its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
+-	its_encode_event_id(cmd, desc->its_movi_cmd.id);
++	its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
+ 	its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
+ 
+ 	its_fixup_cmd(cmd);
+ 
+-	return desc->its_movi_cmd.dev->collection;
++	return col;
+ }
+ 
+ static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
+ 						    struct its_cmd_desc *desc)
+ {
++	struct its_collection *col;
++
++	col = dev_event_to_col(desc->its_discard_cmd.dev,
++			       desc->its_discard_cmd.event_id);
++
+ 	its_encode_cmd(cmd, GITS_CMD_DISCARD);
+ 	its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
+ 	its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
+ 
+ 	its_fixup_cmd(cmd);
+ 
+-	return desc->its_discard_cmd.dev->collection;
++	return col;
+ }
+ 
+ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
+ 						struct its_cmd_desc *desc)
+ {
++	struct its_collection *col;
++
++	col = dev_event_to_col(desc->its_inv_cmd.dev,
++			       desc->its_inv_cmd.event_id);
++
+ 	its_encode_cmd(cmd, GITS_CMD_INV);
+ 	its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
+ 	its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
+ 
+ 	its_fixup_cmd(cmd);
+ 
+-	return desc->its_inv_cmd.dev->collection;
++	return col;
+ }
+ 
+ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
+@@ -497,7 +529,7 @@ static void its_send_movi(struct its_device *dev,
+ 
+ 	desc.its_movi_cmd.dev = dev;
+ 	desc.its_movi_cmd.col = col;
+-	desc.its_movi_cmd.id = id;
++	desc.its_movi_cmd.event_id = id;
+ 
+ 	its_send_single_command(dev->its, its_build_movi_cmd, &desc);
+ }
+@@ -528,7 +560,7 @@ static void its_send_invall(struct its_node *its, struct its_collection *col)
+ static inline u32 its_get_event_id(struct irq_data *d)
+ {
+ 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+-	return d->hwirq - its_dev->lpi_base;
++	return d->hwirq - its_dev->event_map.lpi_base;
+ }
+ 
+ static void lpi_set_config(struct irq_data *d, bool enable)
+@@ -583,7 +615,7 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ 
+ 	target_col = &its_dev->its->collections[cpu];
+ 	its_send_movi(its_dev, target_col, id);
+-	its_dev->collection = target_col;
++	its_dev->event_map.col_map[id] = cpu;
+ 
+ 	return IRQ_SET_MASK_OK_DONE;
+ }
+@@ -713,8 +745,10 @@ out:
+ 	return bitmap;
+ }
+ 
+-static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
++static void its_lpi_free(struct event_lpi_map *map)
+ {
++	int base = map->lpi_base;
++	int nr_ids = map->nr_lpis;
+ 	int lpi;
+ 
+ 	spin_lock(&lpi_lock);
+@@ -731,7 +765,8 @@ static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids)
+ 
+ 	spin_unlock(&lpi_lock);
+ 
+-	kfree(bitmap);
++	kfree(map->lpi_map);
++	kfree(map->col_map);
+ }
+ 
+ /*
+@@ -1099,11 +1134,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
+ 	struct its_device *dev;
+ 	unsigned long *lpi_map;
+ 	unsigned long flags;
++	u16 *col_map = NULL;
+ 	void *itt;
+ 	int lpi_base;
+ 	int nr_lpis;
+ 	int nr_ites;
+-	int cpu;
+ 	int sz;
+ 
+ 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+@@ -1117,20 +1152,24 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
+ 	sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
+ 	itt = kzalloc(sz, GFP_KERNEL);
+ 	lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis);
++	if (lpi_map)
++		col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL);
+ 
+-	if (!dev || !itt || !lpi_map) {
++	if (!dev || !itt || !lpi_map || !col_map) {
+ 		kfree(dev);
+ 		kfree(itt);
+ 		kfree(lpi_map);
++		kfree(col_map);
+ 		return NULL;
+ 	}
+ 
+ 	dev->its = its;
+ 	dev->itt = itt;
+ 	dev->nr_ites = nr_ites;
+-	dev->lpi_map = lpi_map;
+-	dev->lpi_base = lpi_base;
+-	dev->nr_lpis = nr_lpis;
++	dev->event_map.lpi_map = lpi_map;
++	dev->event_map.col_map = col_map;
++	dev->event_map.lpi_base = lpi_base;
++	dev->event_map.nr_lpis = nr_lpis;
+ 	dev->device_id = dev_id;
+ 	INIT_LIST_HEAD(&dev->entry);
+ 
+@@ -1138,10 +1177,6 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
+ 	list_add(&dev->entry, &its->its_device_list);
+ 	raw_spin_unlock_irqrestore(&its->lock, flags);
+ 
+-	/* Bind the device to the first possible CPU */
+-	cpu = cpumask_first(cpu_online_mask);
+-	dev->collection = &its->collections[cpu];
+-
+ 	/* Map device to its ITT */
+ 	its_send_mapd(dev, 1);
+ 
+@@ -1163,12 +1198,13 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
+ {
+ 	int idx;
+ 
+-	idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis);
+-	if (idx == dev->nr_lpis)
++	idx = find_first_zero_bit(dev->event_map.lpi_map,
++				  dev->event_map.nr_lpis);
++	if (idx == dev->event_map.nr_lpis)
+ 		return -ENOSPC;
+ 
+-	*hwirq = dev->lpi_base + idx;
+-	set_bit(idx, dev->lpi_map);
++	*hwirq = dev->event_map.lpi_base + idx;
++	set_bit(idx, dev->event_map.lpi_map);
+ 
+ 	return 0;
+ }
+@@ -1288,7 +1324,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ 		irq_domain_set_hwirq_and_chip(domain, virq + i,
+ 					      hwirq, &its_irq_chip, its_dev);
+ 		dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n",
+-			(int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i);
++			(int)(hwirq - its_dev->event_map.lpi_base),
++			(int)hwirq, virq + i);
+ 	}
+ 
+ 	return 0;
+@@ -1300,6 +1337,9 @@ static void its_irq_domain_activate(struct irq_domain *domain,
+ 	struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ 	u32 event = its_get_event_id(d);
+ 
++	/* Bind the LPI to the first possible CPU */
++	its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask);
++
+ 	/* Map the GIC IRQ and event to the device */
+ 	its_send_mapvi(its_dev, d->hwirq, event);
+ }
+@@ -1327,17 +1367,16 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ 		u32 event = its_get_event_id(data);
+ 
+ 		/* Mark interrupt index as unused */
+-		clear_bit(event, its_dev->lpi_map);
++		clear_bit(event, its_dev->event_map.lpi_map);
+ 
+ 		/* Nuke the entry in the domain */
+ 		irq_domain_reset_irq_data(data);
+ 	}
+ 
+ 	/* If all interrupts have been freed, start mopping the floor */
+-	if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) {
+-		its_lpi_free(its_dev->lpi_map,
+-			     its_dev->lpi_base,
+-			     its_dev->nr_lpis);
++	if (bitmap_empty(its_dev->event_map.lpi_map,
++			 its_dev->event_map.nr_lpis)) {
++		its_lpi_free(&its_dev->event_map);
+ 
+ 		/* Unmap device/itt */
+ 		its_send_mapd(its_dev, 0);
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 2caf492890d6..e8d84566f311 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1053,13 +1053,10 @@ static struct dm_rq_target_io *tio_from_request(struct request *rq)
+  */
+ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
+ {
+-	int nr_requests_pending;
+-
+ 	atomic_dec(&md->pending[rw]);
+ 
+ 	/* nudge anyone waiting on suspend queue */
+-	nr_requests_pending = md_in_flight(md);
+-	if (!nr_requests_pending)
++	if (!md_in_flight(md))
+ 		wake_up(&md->wait);
+ 
+ 	/*
+@@ -1071,8 +1068,7 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
+ 	if (run_queue) {
+ 		if (md->queue->mq_ops)
+ 			blk_mq_run_hw_queues(md->queue, true);
+-		else if (!nr_requests_pending ||
+-			 (nr_requests_pending >= md->queue->nr_congestion_on))
++		else
+ 			blk_run_queue_async(md->queue);
+ 	}
+ 
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 9157a29c8dbf..cd7b0c1e882d 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
+ 		spin_lock_irqsave(&conf->device_lock, flags);
+ 		if (r1_bio->mddev->degraded == conf->raid_disks ||
+ 		    (r1_bio->mddev->degraded == conf->raid_disks-1 &&
+-		     !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags)))
++		     test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
+ 			uptodate = 1;
+ 		spin_unlock_irqrestore(&conf->device_lock, flags);
+ 	}
+diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
+index d1b55fe62817..e4dc8cdf67a3 100644
+--- a/drivers/misc/cxl/context.c
++++ b/drivers/misc/cxl/context.c
+@@ -113,11 +113,11 @@ static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+ 
+ 	if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ 		area = ctx->afu->psn_phys;
+-		if (offset > ctx->afu->adapter->ps_size)
++		if (offset >= ctx->afu->adapter->ps_size)
+ 			return VM_FAULT_SIGBUS;
+ 	} else {
+ 		area = ctx->psn_phys;
+-		if (offset > ctx->psn_size)
++		if (offset >= ctx->psn_size)
+ 			return VM_FAULT_SIGBUS;
+ 	}
+ 
+diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c
+index 8ccddceead66..de350dd46218 100644
+--- a/drivers/misc/cxl/main.c
++++ b/drivers/misc/cxl/main.c
+@@ -73,7 +73,7 @@ static inline void cxl_slbia_core(struct mm_struct *mm)
+ 		spin_lock(&adapter->afu_list_lock);
+ 		for (slice = 0; slice < adapter->slices; slice++) {
+ 			afu = adapter->afu[slice];
+-			if (!afu->enabled)
++			if (!afu || !afu->enabled)
+ 				continue;
+ 			rcu_read_lock();
+ 			idr_for_each_entry(&afu->contexts_idr, ctx, id)
+diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
+index 3e2968159506..e40bcd03bd47 100644
+--- a/drivers/misc/mei/main.c
++++ b/drivers/misc/mei/main.c
+@@ -685,7 +685,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
+ 	/* Fill in the data structures */
+ 	devno = MKDEV(MAJOR(mei_devt), dev->minor);
+ 	cdev_init(&dev->cdev, &mei_fops);
+-	dev->cdev.owner = mei_fops.owner;
++	dev->cdev.owner = parent->driver->owner;
+ 
+ 	/* Add the device */
+ 	ret = cdev_add(&dev->cdev, devno, 1);
+diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
+index 9df2b6801f76..d0abdffb0d7c 100644
+--- a/drivers/mmc/host/omap_hsmmc.c
++++ b/drivers/mmc/host/omap_hsmmc.c
+@@ -1062,6 +1062,10 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
+ 
+ 		if (status & (CTO_EN | CCRC_EN))
+ 			end_cmd = 1;
++		if (host->data || host->response_busy) {
++			end_trans = !end_cmd;
++			host->response_busy = 0;
++		}
+ 		if (status & (CTO_EN | DTO_EN))
+ 			hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
+ 		else if (status & (CCRC_EN | DCRC_EN))
+@@ -1081,10 +1085,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
+ 			}
+ 			dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
+ 		}
+-		if (host->data || host->response_busy) {
+-			end_trans = !end_cmd;
+-			host->response_busy = 0;
+-		}
+ 	}
+ 
+ 	OMAP_HSMMC_WRITE(host->base, STAT, status);
+diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
+index 3497cfaf683c..a870c42731d7 100644
+--- a/drivers/mmc/host/sdhci-esdhc.h
++++ b/drivers/mmc/host/sdhci-esdhc.h
+@@ -45,6 +45,6 @@
+ #define ESDHC_DMA_SYSCTL	0x40c
+ #define ESDHC_DMA_SNOOP		0x00000040
+ 
+-#define ESDHC_HOST_CONTROL_RES	0x05
++#define ESDHC_HOST_CONTROL_RES	0x01
+ 
+ #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
+diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
+index b5103a247bc1..065dc70caa1d 100644
+--- a/drivers/mmc/host/sdhci-pxav3.c
++++ b/drivers/mmc/host/sdhci-pxav3.c
+@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
+ 			goto err_of_parse;
+ 		sdhci_get_of_property(pdev);
+ 		pdata = pxav3_get_mmc_pdata(dev);
++		pdev->dev.platform_data = pdata;
+ 	} else if (pdata) {
+ 		/* on-chip device */
+ 		if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index d3dbb28057e9..bec8a307f8cd 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3037,8 +3037,11 @@ int sdhci_add_host(struct sdhci_host *host)
+ 						      GFP_KERNEL);
+ 		host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
+ 		if (!host->adma_table || !host->align_buffer) {
+-			dma_free_coherent(mmc_dev(mmc), host->adma_table_sz,
+-					  host->adma_table, host->adma_addr);
++			if (host->adma_table)
++				dma_free_coherent(mmc_dev(mmc),
++						  host->adma_table_sz,
++						  host->adma_table,
++						  host->adma_addr);
+ 			kfree(host->align_buffer);
+ 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
+ 				mmc_hostname(mmc));
+diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
+index 041525d2595c..5d214d135332 100644
+--- a/drivers/net/can/c_can/c_can.c
++++ b/drivers/net/can/c_can/c_can.c
+@@ -592,6 +592,7 @@ static int c_can_start(struct net_device *dev)
+ {
+ 	struct c_can_priv *priv = netdev_priv(dev);
+ 	int err;
++	struct pinctrl *p;
+ 
+ 	/* basic c_can configuration */
+ 	err = c_can_chip_config(dev);
+@@ -604,8 +605,13 @@ static int c_can_start(struct net_device *dev)
+ 
+ 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ 
+-	/* activate pins */
+-	pinctrl_pm_select_default_state(dev->dev.parent);
++	/* Attempt to use "active" if available else use "default" */
++	p = pinctrl_get_select(priv->device, "active");
++	if (!IS_ERR(p))
++		pinctrl_put(p);
++	else
++		pinctrl_pm_select_default_state(priv->device);
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index e9b1810d319f..aede704605c6 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
+ 		struct can_frame *cf = (struct can_frame *)skb->data;
+ 		u8 dlc = cf->can_dlc;
+ 
+-		if (!(skb->tstamp.tv64))
+-			__net_timestamp(skb);
+-
+ 		netif_rx(priv->echo_skb[idx]);
+ 		priv->echo_skb[idx] = NULL;
+ 
+@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
+-	__net_timestamp(skb);
+ 	skb->protocol = htons(ETH_P_CAN);
+ 	skb->pkt_type = PACKET_BROADCAST;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -589,6 +585,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+ 
+ 	can_skb_reserve(skb);
+ 	can_skb_prv(skb)->ifindex = dev->ifindex;
++	can_skb_prv(skb)->skbcnt = 0;
+ 
+ 	*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+ 	memset(*cf, 0, sizeof(struct can_frame));
+@@ -607,7 +604,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
+-	__net_timestamp(skb);
+ 	skb->protocol = htons(ETH_P_CANFD);
+ 	skb->pkt_type = PACKET_BROADCAST;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -618,6 +614,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ 
+ 	can_skb_reserve(skb);
+ 	can_skb_prv(skb)->ifindex = dev->ifindex;
++	can_skb_prv(skb)->skbcnt = 0;
+ 
+ 	*cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
+ 	memset(*cfd, 0, sizeof(struct canfd_frame));
+diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
+index 7deb80dcbe8c..2f9ebad4ff56 100644
+--- a/drivers/net/can/rcar_can.c
++++ b/drivers/net/can/rcar_can.c
+@@ -526,7 +526,7 @@ static int rcar_can_open(struct net_device *ndev)
+ 	napi_enable(&priv->napi);
+ 	err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev);
+ 	if (err) {
+-		netdev_err(ndev, "error requesting interrupt %x\n", ndev->irq);
++		netdev_err(ndev, "error requesting interrupt %d\n", ndev->irq);
+ 		goto out_close;
+ 	}
+ 	can_led_event(ndev, CAN_LED_EVENT_OPEN);
+@@ -758,8 +758,9 @@ static int rcar_can_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	irq = platform_get_irq(pdev, 0);
+-	if (!irq) {
++	if (irq < 0) {
+ 		dev_err(&pdev->dev, "No IRQ resource\n");
++		err = irq;
+ 		goto fail;
+ 	}
+ 
+@@ -823,7 +824,7 @@ static int rcar_can_probe(struct platform_device *pdev)
+ 
+ 	devm_can_led_init(ndev);
+ 
+-	dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
++	dev_info(&pdev->dev, "device registered (regs @ %p, IRQ%d)\n",
+ 		 priv->regs, ndev->irq);
+ 
+ 	return 0;
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
+index f64f5290d6f8..a23a7af8eb9a 100644
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
+ 	if (!skb)
+ 		return;
+ 
+-	__net_timestamp(skb);
+ 	skb->dev = sl->dev;
+ 	skb->protocol = htons(ETH_P_CAN);
+ 	skb->pkt_type = PACKET_BROADCAST;
+@@ -215,6 +214,7 @@ static void slc_bump(struct slcan *sl)
+ 
+ 	can_skb_reserve(skb);
+ 	can_skb_prv(skb)->ifindex = sl->dev->ifindex;
++	can_skb_prv(skb)->skbcnt = 0;
+ 
+ 	memcpy(skb_put(skb, sizeof(struct can_frame)),
+ 	       &cf, sizeof(struct can_frame));
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index bf63fee4e743..34c625ea2801 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1221,17 +1221,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
+ 	struct spi_device *spi = to_spi_device(dev);
+ 	struct mcp251x_priv *priv = spi_get_drvdata(spi);
+ 
+-	if (priv->after_suspend & AFTER_SUSPEND_POWER) {
++	if (priv->after_suspend & AFTER_SUSPEND_POWER)
+ 		mcp251x_power_enable(priv->power, 1);
++
++	if (priv->after_suspend & AFTER_SUSPEND_UP) {
++		mcp251x_power_enable(priv->transceiver, 1);
+ 		queue_work(priv->wq, &priv->restart_work);
+ 	} else {
+-		if (priv->after_suspend & AFTER_SUSPEND_UP) {
+-			mcp251x_power_enable(priv->transceiver, 1);
+-			queue_work(priv->wq, &priv->restart_work);
+-		} else {
+-			priv->after_suspend = 0;
+-		}
++		priv->after_suspend = 0;
+ 	}
++
+ 	priv->force_quit = 0;
+ 	enable_irq(spi->irq);
+ 	return 0;
+diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
+index 0ce868de855d..674f367087c5 100644
+--- a/drivers/net/can/vcan.c
++++ b/drivers/net/can/vcan.c
+@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
+ 	skb->dev       = dev;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
+-	if (!(skb->tstamp.tv64))
+-		__net_timestamp(skb);
+-
+ 	netif_rx_ni(skb);
+ }
+ 
+diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+index 8e604a3931ca..ef20be084b24 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
++++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
+ 		hw_addr = (const u8 *)(mac_override +
+ 				 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
+ 
+-		/* The byte order is little endian 16 bit, meaning 214365 */
+-		data->hw_addr[0] = hw_addr[1];
+-		data->hw_addr[1] = hw_addr[0];
+-		data->hw_addr[2] = hw_addr[3];
+-		data->hw_addr[3] = hw_addr[2];
+-		data->hw_addr[4] = hw_addr[5];
+-		data->hw_addr[5] = hw_addr[4];
++		/*
++		 * Store the MAC address from MAO section.
++		 * No byte swapping is required in MAO section
++		 */
++		memcpy(data->hw_addr, hw_addr, ETH_ALEN);
+ 
+ 		/*
+ 		 * Force the use of the OTP MAC address in case of reserved MAC
+diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
+index ef32e177f662..281451c274ca 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
+@@ -225,7 +225,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+ 
+ 	if (info->band == IEEE80211_BAND_2GHZ &&
+ 	    !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
+-		rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS;
++		rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
+ 	else
+ 		rate_flags =
+ 			BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
+index dc179094e6a0..37e6a6f91487 100644
+--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
+@@ -2515,6 +2515,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+ 		trans->hw_rev = (trans->hw_rev & 0xfff0) |
+ 				(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
+ 
++		ret = iwl_pcie_prepare_card_hw(trans);
++		if (ret) {
++			IWL_WARN(trans, "Exit HW not ready\n");
++			goto out_pci_disable_msi;
++		}
++
+ 		/*
+ 		 * in-order to recognize C step driver should read chip version
+ 		 * id located at the AUX bus MISC address space.
+diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+index 5ac59fbb2440..d3a3be7476e1 100644
+--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
++++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
+ 			     unsigned num_configs)
+ {
+ 	struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
+-	const struct imx1_pinctrl_soc_info *info = ipctl->info;
+ 	int i;
+ 
+ 	for (i = 0; i != num_configs; ++i) {
+ 		imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
+ 
+ 		dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
+-			info->pins[pin_id].name);
++			pin_desc_get(pctldev, pin_id)->name);
+ 	}
+ 
+ 	return 0;
+diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
+index ff828117798f..8de135174e82 100644
+--- a/drivers/regulator/s2mps11.c
++++ b/drivers/regulator/s2mps11.c
+@@ -34,6 +34,8 @@
+ #include <linux/mfd/samsung/s2mps14.h>
+ #include <linux/mfd/samsung/s2mpu02.h>
+ 
++/* The highest number of possible regulators for supported devices. */
++#define S2MPS_REGULATOR_MAX		S2MPS13_REGULATOR_MAX
+ struct s2mps11_info {
+ 	unsigned int rdev_num;
+ 	int ramp_delay2;
+@@ -49,7 +51,7 @@ struct s2mps11_info {
+ 	 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
+ 	 * the suspend mode was enabled.
+ 	 */
+-	unsigned long long s2mps14_suspend_state:50;
++	DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
+ 
+ 	/* Array of size rdev_num with GPIO-s for external sleep control */
+ 	int *ext_control_gpio;
+@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
+ 	switch (s2mps11->dev_type) {
+ 	case S2MPS13X:
+ 	case S2MPS14X:
+-		if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
++		if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
+ 			val = S2MPS14_ENABLE_SUSPEND;
+ 		else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
+ 			val = S2MPS14_ENABLE_EXT_CONTROL;
+@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
+ 			val = rdev->desc->enable_mask;
+ 		break;
+ 	case S2MPU02:
+-		if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev)))
++		if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
+ 			val = S2MPU02_ENABLE_SUSPEND;
+ 		else
+ 			val = rdev->desc->enable_mask;
+@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev));
++	set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
+ 	/*
+ 	 * Don't enable suspend mode if regulator is already disabled because
+ 	 * this would effectively for a short time turn on the regulator after
+@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
+ 	case S2MPS11X:
+ 		s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
+ 		regulators = s2mps11_regulators;
++		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+ 		break;
+ 	case S2MPS13X:
+ 		s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
+ 		regulators = s2mps13_regulators;
++		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+ 		break;
+ 	case S2MPS14X:
+ 		s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
+ 		regulators = s2mps14_regulators;
++		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+ 		break;
+ 	case S2MPU02:
+ 		s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
+ 		regulators = s2mpu02_regulators;
++		BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
+ 		break;
+ 	default:
+ 		dev_err(&pdev->dev, "Invalid device type: %u\n",
+diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
+index 0e6ee3ca30e6..e9ae6b924c70 100644
+--- a/drivers/scsi/qla2xxx/qla_dbg.c
++++ b/drivers/scsi/qla2xxx/qla_dbg.c
+@@ -68,7 +68,7 @@
+  * |                              |                    | 0xd101-0xd1fe	|
+  * |                              |                    | 0xd214-0xd2fe	|
+  * | Target Mode		  |	  0xe079       |		|
+- * | Target Mode Management	  |	  0xf072       | 0xf002		|
++ * | Target Mode Management	  |	  0xf080       | 0xf002		|
+  * |                              |                    | 0xf046-0xf049  |
+  * | Target Mode Task Management  |	  0x1000b      |		|
+  * ----------------------------------------------------------------------
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 285cb204f300..998498e2341b 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -2924,6 +2924,7 @@ qla2x00_rport_del(void *data)
+ 	struct fc_rport *rport;
+ 	scsi_qla_host_t *vha = fcport->vha;
+ 	unsigned long flags;
++	unsigned long vha_flags;
+ 
+ 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+ 	rport = fcport->drport ? fcport->drport: fcport->rport;
+@@ -2935,7 +2936,9 @@ qla2x00_rport_del(void *data)
+ 		 * Release the target mode FC NEXUS in qla_target.c code
+ 		 * if target mod is enabled.
+ 		 */
++		spin_lock_irqsave(&vha->hw->hardware_lock, vha_flags);
+ 		qlt_fc_port_deleted(vha, fcport);
++		spin_unlock_irqrestore(&vha->hw->hardware_lock, vha_flags);
+ 	}
+ }
+ 
+@@ -3303,6 +3306,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
+ 	 * Create target mode FC NEXUS in qla_target.c if target mode is
+ 	 * enabled..
+ 	 */
++
+ 	qlt_fc_port_added(vha, fcport);
+ 
+ 	spin_lock_irqsave(fcport->vha->host->host_lock, flags);
+@@ -3460,20 +3464,43 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ 			if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
+ 				continue;
+ 
+-			if (fcport->scan_state == QLA_FCPORT_SCAN &&
+-			    atomic_read(&fcport->state) == FCS_ONLINE) {
+-				qla2x00_mark_device_lost(vha, fcport,
+-				    ql2xplogiabsentdevice, 0);
+-				if (fcport->loop_id != FC_NO_LOOP_ID &&
+-				    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
+-				    fcport->port_type != FCT_INITIATOR &&
+-				    fcport->port_type != FCT_BROADCAST) {
+-					ha->isp_ops->fabric_logout(vha,
+-					    fcport->loop_id,
+-					    fcport->d_id.b.domain,
+-					    fcport->d_id.b.area,
+-					    fcport->d_id.b.al_pa);
+-					qla2x00_clear_loop_id(fcport);
++			if (fcport->scan_state == QLA_FCPORT_SCAN) {
++				if (qla_ini_mode_enabled(base_vha) &&
++				    atomic_read(&fcport->state) == FCS_ONLINE) {
++					qla2x00_mark_device_lost(vha, fcport,
++					    ql2xplogiabsentdevice, 0);
++					if (fcport->loop_id != FC_NO_LOOP_ID &&
++					    (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
++					    fcport->port_type != FCT_INITIATOR &&
++					    fcport->port_type != FCT_BROADCAST) {
++						ha->isp_ops->fabric_logout(vha,
++						    fcport->loop_id,
++						    fcport->d_id.b.domain,
++						    fcport->d_id.b.area,
++						    fcport->d_id.b.al_pa);
++						qla2x00_clear_loop_id(fcport);
++					}
++				} else if (!qla_ini_mode_enabled(base_vha)) {
++					/*
++					 * In target mode, explicitly kill
++					 * sessions and log out of devices
++					 * that are gone, so that we don't
++					 * end up with an initiator using the
++					 * wrong ACL (if the fabric recycles
++					 * an FC address and we have a stale
++					 * session around) and so that we don't
++					 * report initiators that are no longer
++					 * on the fabric.
++					 */
++					ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
++					    "port gone, logging out/killing session: "
++					    "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
++					    "scan_state %d\n",
++					    fcport->port_name,
++					    atomic_read(&fcport->state),
++					    fcport->flags, fcport->fc4_type,
++					    fcport->scan_state);
++					qlt_fc_port_deleted(vha, fcport);
+ 				}
+ 			}
+ 		}
+@@ -3494,6 +3521,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ 			    (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ 				continue;
+ 
++			/*
++			 * If we're not an initiator, skip looking for devices
++			 * and logging in.  There's no reason for us to do it,
++			 * and it seems to actively cause problems in target
++			 * mode if we race with the initiator logging into us
++			 * (we might get the "port ID used" status back from
++			 * our login command and log out the initiator, which
++			 * seems to cause havoc).
++			 */
++			if (!qla_ini_mode_enabled(base_vha)) {
++				if (fcport->scan_state == QLA_FCPORT_FOUND) {
++					ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
++					    "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
++					    "scan_state %d (initiator mode disabled; skipping "
++					    "login)\n", fcport->port_name,
++					    atomic_read(&fcport->state),
++					    fcport->flags, fcport->fc4_type,
++					    fcport->scan_state);
++				}
++				continue;
++			}
++
+ 			if (fcport->loop_id == FC_NO_LOOP_ID) {
+ 				fcport->loop_id = next_loopid;
+ 				rval = qla2x00_find_new_loop_id(
+@@ -3520,16 +3569,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
+ 			    test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ 				break;
+ 
+-			/* Find a new loop ID to use. */
+-			fcport->loop_id = next_loopid;
+-			rval = qla2x00_find_new_loop_id(base_vha, fcport);
+-			if (rval != QLA_SUCCESS) {
+-				/* Ran out of IDs to use */
+-				break;
+-			}
++			/*
++			 * If we're not an initiator, skip looking for devices
++			 * and logging in.  There's no reason for us to do it,
++			 * and it seems to actively cause problems in target
++			 * mode if we race with the initiator logging into us
++			 * (we might get the "port ID used" status back from
++			 * our login command and log out the initiator, which
++			 * seems to cause havoc).
++			 */
++			if (qla_ini_mode_enabled(base_vha)) {
++				/* Find a new loop ID to use. */
++				fcport->loop_id = next_loopid;
++				rval = qla2x00_find_new_loop_id(base_vha,
++				    fcport);
++				if (rval != QLA_SUCCESS) {
++					/* Ran out of IDs to use */
++					break;
++				}
+ 
+-			/* Login and update database */
+-			qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
++				/* Login and update database */
++				qla2x00_fabric_dev_login(vha, fcport,
++				    &next_loopid);
++			} else {
++				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
++					"new port %8phC state 0x%x flags 0x%x fc4_type "
++					"0x%x scan_state %d (initiator mode disabled; "
++					"skipping login)\n",
++					fcport->port_name,
++					atomic_read(&fcport->state),
++					fcport->flags, fcport->fc4_type,
++					fcport->scan_state);
++			}
+ 
+ 			list_move_tail(&fcport->list, &vha->vp_fcports);
+ 		}
+@@ -3725,11 +3796,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
+ 			fcport->fp_speed = new_fcport->fp_speed;
+ 
+ 			/*
+-			 * If address the same and state FCS_ONLINE, nothing
+-			 * changed.
++			 * If address the same and state FCS_ONLINE
++			 * (or in target mode), nothing changed.
+ 			 */
+ 			if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
+-			    atomic_read(&fcport->state) == FCS_ONLINE) {
++			    (atomic_read(&fcport->state) == FCS_ONLINE ||
++			     !qla_ini_mode_enabled(base_vha))) {
+ 				break;
+ 			}
+ 
+@@ -3749,6 +3821,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
+ 			 * Log it out if still logged in and mark it for
+ 			 * relogin later.
+ 			 */
++			if (!qla_ini_mode_enabled(base_vha)) {
++				ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
++					 "port changed FC ID, %8phC"
++					 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
++					 fcport->port_name,
++					 fcport->d_id.b.domain,
++					 fcport->d_id.b.area,
++					 fcport->d_id.b.al_pa,
++					 fcport->loop_id,
++					 new_fcport->d_id.b.domain,
++					 new_fcport->d_id.b.area,
++					 new_fcport->d_id.b.al_pa);
++				fcport->d_id.b24 = new_fcport->d_id.b24;
++				break;
++			}
++
+ 			fcport->d_id.b24 = new_fcport->d_id.b24;
+ 			fcport->flags |= FCF_LOGIN_NEEDED;
+ 			if (fcport->loop_id != FC_NO_LOOP_ID &&
+@@ -3768,6 +3856,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
+ 		if (found)
+ 			continue;
+ 		/* If device was not in our fcports list, then add it. */
++		new_fcport->scan_state = QLA_FCPORT_FOUND;
+ 		list_add_tail(&new_fcport->list, new_fcports);
+ 
+ 		/* Allocate a new replacement fcport. */
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index fe8a8d157e22..496a733d0ca3 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -113,6 +113,7 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
+ static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ 	struct atio_from_isp *atio, uint16_t status, int qfull);
+ static void qlt_disable_vha(struct scsi_qla_host *vha);
++static void qlt_clear_tgt_db(struct qla_tgt *tgt);
+ /*
+  * Global Variables
+  */
+@@ -431,10 +432,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+ 
+ 	loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+ 	if (loop_id == 0xFFFF) {
+-#if 0 /* FIXME: Re-enable Global event handling.. */
+ 		/* Global event */
+-		atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
+-		qlt_clear_tgt_db(ha->tgt.qla_tgt);
++		atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
++		qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
++#if 0 /* FIXME: do we need to choose a session here? */
+ 		if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
+ 			sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
+ 			    typeof(*sess), sess_list_entry);
+@@ -782,25 +783,20 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 
+ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+ {
+-	struct qla_hw_data *ha = vha->hw;
+ 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+ 	struct qla_tgt_sess *sess;
+-	unsigned long flags;
+ 
+ 	if (!vha->hw->tgt.tgt_ops)
+ 		return;
+ 
+-	if (!tgt || (fcport->port_type != FCT_INITIATOR))
++	if (!tgt)
+ 		return;
+ 
+-	spin_lock_irqsave(&ha->hardware_lock, flags);
+ 	if (tgt->tgt_stop) {
+-		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 		return;
+ 	}
+ 	sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ 	if (!sess) {
+-		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ 		return;
+ 	}
+ 
+@@ -808,7 +804,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+ 
+ 	sess->local = 1;
+ 	qlt_schedule_sess_for_deletion(sess, false);
+-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+ 
+ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+@@ -2347,9 +2342,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ 		res = qlt_build_ctio_crc2_pkt(&prm, vha);
+ 	else
+ 		res = qlt_24xx_build_ctio_pkt(&prm, vha);
+-	if (unlikely(res != 0))
++	if (unlikely(res != 0)) {
++		vha->req->cnt += full_req_cnt;
+ 		goto out_unmap_unlock;
+-
++	}
+ 
+ 	pkt = (struct ctio7_to_24xx *)prm.pkt;
+ 
+@@ -2487,8 +2483,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+ 	else
+ 		res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ 
+-	if (unlikely(res != 0))
++	if (unlikely(res != 0)) {
++		vha->req->cnt += prm.req_cnt;
+ 		goto out_unlock_free_unmap;
++	}
++
+ 	pkt = (struct ctio7_to_24xx *)prm.pkt;
+ 	pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+ 	    CTIO7_FLAGS_STATUS_MODE_0);
+@@ -2717,7 +2716,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+ 	struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+ {
+-	unsigned long flags;
++	unsigned long flags = 0;
+ 	int rc;
+ 
+ 	if (qlt_issue_marker(vha, ha_locked) < 0)
+@@ -2733,17 +2732,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+ 	rc = __qlt_send_term_exchange(vha, cmd, atio);
+ 	if (rc == -ENOMEM)
+ 		qlt_alloc_qfull_cmd(vha, atio, 0, 0);
+-	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+ 
+ done:
+ 	if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
+ 	    !cmd->cmd_sent_to_fw)) {
+-		if (!ha_locked && !in_interrupt())
+-			msleep(250); /* just in case */
+-
+-		qlt_unmap_sg(vha, cmd);
++		if (cmd->sg_mapped)
++			qlt_unmap_sg(vha, cmd);
+ 		vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ 	}
++
++	if (!ha_locked)
++		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
++
+ 	return;
+ }
+ 
+@@ -3347,6 +3347,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
+ 	cmd->loop_id = sess->loop_id;
+ 	cmd->conf_compl_supported = sess->conf_compl_supported;
+ 
++	cmd->cmd_flags = 0;
++	cmd->jiffies_at_alloc = get_jiffies_64();
++
++	cmd->reset_count = vha->hw->chip_reset;
++
+ 	return cmd;
+ }
+ 
+@@ -3453,11 +3458,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+ 		return -ENOMEM;
+ 	}
+ 
+-	cmd->cmd_flags = 0;
+-	cmd->jiffies_at_alloc = get_jiffies_64();
+-
+-	cmd->reset_count = vha->hw->chip_reset;
+-
+ 	cmd->cmd_in_wq = 1;
+ 	cmd->cmd_flags |= BIT_0;
+ 	INIT_WORK(&cmd->work, qlt_do_work);
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index c95a4e943fc6..59c31bf88d92 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -944,7 +944,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
+ 			    scmd->sdb.length);
+ 		scmd->sdb.table.sgl = &ses->sense_sgl;
+ 		scmd->sc_data_direction = DMA_FROM_DEVICE;
+-		scmd->sdb.table.nents = 1;
++		scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
+ 		scmd->cmnd[0] = REQUEST_SENSE;
+ 		scmd->cmnd[4] = scmd->sdb.length;
+ 		scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index b1a263137a23..448ebdaa3d69 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
+ 
+ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
+ {
+-	if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
++	if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
+ 		return;
+ 	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
+ }
+@@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
+ 
+ 	if (mq) {
+ 		if (nents <= SCSI_MAX_SG_SEGMENTS) {
+-			sdb->table.nents = nents;
+-			sg_init_table(sdb->table.sgl, sdb->table.nents);
++			sdb->table.nents = sdb->table.orig_nents = nents;
++			sg_init_table(sdb->table.sgl, nents);
+ 			return 0;
+ 		}
+ 		first_chunk = sdb->table.sgl;
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 1ac38e73df7e..9ad41168d26d 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -859,7 +859,7 @@ sdev_store_queue_depth(struct device *dev, struct device_attribute *attr,
+ 
+ 	depth = simple_strtoul(buf, NULL, 0);
+ 
+-	if (depth < 1 || depth > sht->can_queue)
++	if (depth < 1 || depth > sdev->host->can_queue)
+ 		return -EINVAL;
+ 
+ 	retval = sht->change_queue_depth(sdev, depth);
+diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
+index 9a1c34205254..525ab4c1f306 100644
+--- a/drivers/scsi/st.c
++++ b/drivers/scsi/st.c
+@@ -1274,9 +1274,9 @@ static int st_open(struct inode *inode, struct file *filp)
+ 	spin_lock(&st_use_lock);
+ 	STp->in_use = 0;
+ 	spin_unlock(&st_use_lock);
+-	scsi_tape_put(STp);
+ 	if (resumed)
+ 		scsi_autopm_put_device(STp->device);
++	scsi_tape_put(STp);
+ 	return retval;
+ 
+ }
+diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
+index 788e2b176a4f..acce90ac7371 100644
+--- a/drivers/spi/spi-img-spfi.c
++++ b/drivers/spi/spi-img-spfi.c
+@@ -40,6 +40,7 @@
+ #define SPFI_CONTROL_SOFT_RESET			BIT(11)
+ #define SPFI_CONTROL_SEND_DMA			BIT(10)
+ #define SPFI_CONTROL_GET_DMA			BIT(9)
++#define SPFI_CONTROL_SE			BIT(8)
+ #define SPFI_CONTROL_TMODE_SHIFT		5
+ #define SPFI_CONTROL_TMODE_MASK			0x7
+ #define SPFI_CONTROL_TMODE_SINGLE		0
+@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
+ 	else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
+ 		 xfer->rx_nbits == SPI_NBITS_QUAD)
+ 		val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
++	val |= SPFI_CONTROL_SE;
+ 	spfi_writel(spfi, val, SPFI_CONTROL);
+ }
+ 
+diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
+index f08e812b2984..412b9c86b997 100644
+--- a/drivers/spi/spi-imx.c
++++ b/drivers/spi/spi-imx.c
+@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
+ {
+ 	struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ 
+-	if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
+-	    && (transfer->len > spi_imx->tx_wml))
++	if (spi_imx->dma_is_inited
++	    && transfer->len > spi_imx->rx_wml * sizeof(u32)
++	    && transfer->len > spi_imx->tx_wml * sizeof(u32))
+ 		return true;
+ 	return false;
+ }
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index 74e6114ff18f..305a5cbc099a 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -4001,7 +4001,13 @@ get_immediate:
+ 	}
+ 
+ transport_err:
+-	iscsit_take_action_for_connection_exit(conn);
++	/*
++	 * Avoid the normal connection failure code-path if this connection
++	 * is still within LOGIN mode, and iscsi_np process context is
++	 * responsible for cleaning up the early connection failure.
++	 */
++	if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
++		iscsit_take_action_for_connection_exit(conn);
+ out:
+ 	return 0;
+ }
+@@ -4093,7 +4099,7 @@ reject:
+ 
+ int iscsi_target_rx_thread(void *arg)
+ {
+-	int ret;
++	int ret, rc;
+ 	u8 buffer[ISCSI_HDR_LEN], opcode;
+ 	u32 checksum = 0, digest = 0;
+ 	struct iscsi_conn *conn = arg;
+@@ -4103,10 +4109,16 @@ int iscsi_target_rx_thread(void *arg)
+ 	 * connection recovery / failure event can be triggered externally.
+ 	 */
+ 	allow_signal(SIGINT);
++	/*
++	 * Wait for iscsi_post_login_handler() to complete before allowing
++	 * incoming iscsi/tcp socket I/O, and/or failing the connection.
++	 */
++	rc = wait_for_completion_interruptible(&conn->rx_login_comp);
++	if (rc < 0)
++		return 0;
+ 
+ 	if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
+ 		struct completion comp;
+-		int rc;
+ 
+ 		init_completion(&comp);
+ 		rc = wait_for_completion_interruptible(&comp);
+@@ -4543,7 +4555,18 @@ static void iscsit_logout_post_handler_closesession(
+ 	struct iscsi_conn *conn)
+ {
+ 	struct iscsi_session *sess = conn->sess;
+-	int sleep = cmpxchg(&conn->tx_thread_active, true, false);
++	int sleep = 1;
++	/*
++	 * Traditional iscsi/tcp will invoke this logic from TX thread
++	 * context during session logout, so clear tx_thread_active and
++	 * sleep if iscsit_close_connection() has not already occured.
++	 *
++	 * Since iser-target invokes this logic from it's own workqueue,
++	 * always sleep waiting for RX/TX thread shutdown to complete
++	 * within iscsit_close_connection().
++	 */
++	if (conn->conn_transport->transport_type == ISCSI_TCP)
++		sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ 
+ 	atomic_set(&conn->conn_logout_remove, 0);
+ 	complete(&conn->conn_logout_comp);
+@@ -4557,7 +4580,10 @@ static void iscsit_logout_post_handler_closesession(
+ static void iscsit_logout_post_handler_samecid(
+ 	struct iscsi_conn *conn)
+ {
+-	int sleep = cmpxchg(&conn->tx_thread_active, true, false);
++	int sleep = 1;
++
++	if (conn->conn_transport->transport_type == ISCSI_TCP)
++		sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ 
+ 	atomic_set(&conn->conn_logout_remove, 0);
+ 	complete(&conn->conn_logout_comp);
+@@ -4776,6 +4802,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ 	struct iscsi_session *sess;
+ 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+ 	struct se_session *se_sess, *se_sess_tmp;
++	LIST_HEAD(free_list);
+ 	int session_count = 0;
+ 
+ 	spin_lock_bh(&se_tpg->session_lock);
+@@ -4797,14 +4824,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
+ 		}
+ 		atomic_set(&sess->session_reinstatement, 1);
+ 		spin_unlock(&sess->conn_lock);
+-		spin_unlock_bh(&se_tpg->session_lock);
+ 
+-		iscsit_free_session(sess);
+-		spin_lock_bh(&se_tpg->session_lock);
++		list_move_tail(&se_sess->sess_list, &free_list);
++	}
++	spin_unlock_bh(&se_tpg->session_lock);
++
++	list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
++		sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ 
++		iscsit_free_session(sess);
+ 		session_count++;
+ 	}
+-	spin_unlock_bh(&se_tpg->session_lock);
+ 
+ 	pr_debug("Released %d iSCSI Session(s) from Target Portal"
+ 			" Group: %hu\n", session_count, tpg->tpgt);
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index 70d799dfab03..c3bccaddb592 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
+ 	init_completion(&conn->conn_logout_comp);
+ 	init_completion(&conn->rx_half_close_comp);
+ 	init_completion(&conn->tx_half_close_comp);
++	init_completion(&conn->rx_login_comp);
+ 	spin_lock_init(&conn->cmd_lock);
+ 	spin_lock_init(&conn->conn_usage_lock);
+ 	spin_lock_init(&conn->immed_queue_lock);
+@@ -699,7 +700,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
+ 		iscsit_start_nopin_timer(conn);
+ }
+ 
+-static int iscsit_start_kthreads(struct iscsi_conn *conn)
++int iscsit_start_kthreads(struct iscsi_conn *conn)
+ {
+ 	int ret = 0;
+ 
+@@ -734,6 +735,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
+ 
+ 	return 0;
+ out_tx:
++	send_sig(SIGINT, conn->tx_thread, 1);
+ 	kthread_stop(conn->tx_thread);
+ 	conn->tx_thread_active = false;
+ out_bitmap:
+@@ -744,7 +746,7 @@ out_bitmap:
+ 	return ret;
+ }
+ 
+-int iscsi_post_login_handler(
++void iscsi_post_login_handler(
+ 	struct iscsi_np *np,
+ 	struct iscsi_conn *conn,
+ 	u8 zero_tsih)
+@@ -754,7 +756,6 @@ int iscsi_post_login_handler(
+ 	struct se_session *se_sess = sess->se_sess;
+ 	struct iscsi_portal_group *tpg = sess->tpg;
+ 	struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
+-	int rc;
+ 
+ 	iscsit_inc_conn_usage_count(conn);
+ 
+@@ -795,10 +796,6 @@ int iscsi_post_login_handler(
+ 			sess->sess_ops->InitiatorName);
+ 		spin_unlock_bh(&sess->conn_lock);
+ 
+-		rc = iscsit_start_kthreads(conn);
+-		if (rc)
+-			return rc;
+-
+ 		iscsi_post_login_start_timers(conn);
+ 		/*
+ 		 * Determine CPU mask to ensure connection's RX and TX kthreads
+@@ -807,15 +804,20 @@ int iscsi_post_login_handler(
+ 		iscsit_thread_get_cpumask(conn);
+ 		conn->conn_rx_reset_cpumask = 1;
+ 		conn->conn_tx_reset_cpumask = 1;
+-
++		/*
++		 * Wakeup the sleeping iscsi_target_rx_thread() now that
++		 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
++		 */
++		complete(&conn->rx_login_comp);
+ 		iscsit_dec_conn_usage_count(conn);
++
+ 		if (stop_timer) {
+ 			spin_lock_bh(&se_tpg->session_lock);
+ 			iscsit_stop_time2retain_timer(sess);
+ 			spin_unlock_bh(&se_tpg->session_lock);
+ 		}
+ 		iscsit_dec_session_usage_count(sess);
+-		return 0;
++		return;
+ 	}
+ 
+ 	iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
+@@ -856,10 +858,6 @@ int iscsi_post_login_handler(
+ 		" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
+ 	spin_unlock_bh(&se_tpg->session_lock);
+ 
+-	rc = iscsit_start_kthreads(conn);
+-	if (rc)
+-		return rc;
+-
+ 	iscsi_post_login_start_timers(conn);
+ 	/*
+ 	 * Determine CPU mask to ensure connection's RX and TX kthreads
+@@ -868,10 +866,12 @@ int iscsi_post_login_handler(
+ 	iscsit_thread_get_cpumask(conn);
+ 	conn->conn_rx_reset_cpumask = 1;
+ 	conn->conn_tx_reset_cpumask = 1;
+-
++	/*
++	 * Wakeup the sleeping iscsi_target_rx_thread() now that
++	 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
++	 */
++	complete(&conn->rx_login_comp);
+ 	iscsit_dec_conn_usage_count(conn);
+-
+-	return 0;
+ }
+ 
+ static void iscsi_handle_login_thread_timeout(unsigned long data)
+@@ -1436,23 +1436,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ 	if (ret < 0)
+ 		goto new_sess_out;
+ 
+-	if (!conn->sess) {
+-		pr_err("struct iscsi_conn session pointer is NULL!\n");
+-		goto new_sess_out;
+-	}
+-
+ 	iscsi_stop_login_thread_timer(np);
+ 
+-	if (signal_pending(current))
+-		goto new_sess_out;
+-
+ 	if (ret == 1) {
+ 		tpg_np = conn->tpg_np;
+ 
+-		ret = iscsi_post_login_handler(np, conn, zero_tsih);
+-		if (ret < 0)
+-			goto new_sess_out;
+-
++		iscsi_post_login_handler(np, conn, zero_tsih);
+ 		iscsit_deaccess_np(np, tpg, tpg_np);
+ 	}
+ 
+diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
+index 29d098324b7f..55cbf4533544 100644
+--- a/drivers/target/iscsi/iscsi_target_login.h
++++ b/drivers/target/iscsi/iscsi_target_login.h
+@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
+ extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
+ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
+ extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+-extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
++extern int iscsit_start_kthreads(struct iscsi_conn *);
++extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
+ extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
+ 				bool, bool);
+ extern int iscsi_target_login_thread(void *);
+diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
+index 8c02fa34716f..f9cde9141836 100644
+--- a/drivers/target/iscsi/iscsi_target_nego.c
++++ b/drivers/target/iscsi/iscsi_target_nego.c
+@@ -17,6 +17,7 @@
+  ******************************************************************************/
+ 
+ #include <linux/ctype.h>
++#include <linux/kthread.h>
+ #include <scsi/iscsi_proto.h>
+ #include <target/target_core_base.h>
+ #include <target/target_core_fabric.h>
+@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
+ 		ntohl(login_rsp->statsn), login->rsp_length);
+ 
+ 	padding = ((-login->rsp_length) & 3);
++	/*
++	 * Before sending the last login response containing the transition
++	 * bit for full-feature-phase, go ahead and start up TX/RX threads
++	 * now to avoid potential resource allocation failures after the
++	 * final login response has been sent.
++	 */
++	if (login->login_complete) {
++		int rc = iscsit_start_kthreads(conn);
++		if (rc) {
++			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
++					    ISCSI_LOGIN_STATUS_NO_RESOURCES);
++			return -1;
++		}
++	}
+ 
+ 	if (conn->conn_transport->iscsit_put_login_tx(conn, login,
+ 					login->rsp_length + padding) < 0)
+-		return -1;
++		goto err;
+ 
+ 	login->rsp_length		= 0;
+ 	mutex_lock(&sess->cmdsn_mutex);
+@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
+ 	mutex_unlock(&sess->cmdsn_mutex);
+ 
+ 	return 0;
++
++err:
++	if (login->login_complete) {
++		if (conn->rx_thread && conn->rx_thread_active) {
++			send_sig(SIGINT, conn->rx_thread, 1);
++			kthread_stop(conn->rx_thread);
++		}
++		if (conn->tx_thread && conn->tx_thread_active) {
++			send_sig(SIGINT, conn->tx_thread, 1);
++			kthread_stop(conn->tx_thread);
++		}
++		spin_lock(&iscsit_global->ts_bitmap_lock);
++		bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
++				      get_order(1));
++		spin_unlock(&iscsit_global->ts_bitmap_lock);
++	}
++	return -1;
+ }
+ 
+ static void iscsi_target_sk_data_ready(struct sock *sk)
+diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
+index 396344cb011f..16ed0b6c7f9c 100644
+--- a/drivers/tty/n_tty.c
++++ b/drivers/tty/n_tty.c
+@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
+  *	Locking: ctrl_lock
+  */
+ 
+-static void isig(int sig, struct tty_struct *tty)
++static void __isig(int sig, struct tty_struct *tty)
+ {
+-	struct n_tty_data *ldata = tty->disc_data;
+ 	struct pid *tty_pgrp = tty_get_pgrp(tty);
+ 	if (tty_pgrp) {
+ 		kill_pgrp(tty_pgrp, sig, 1);
+ 		put_pid(tty_pgrp);
+ 	}
++}
+ 
+-	if (!L_NOFLSH(tty)) {
++static void isig(int sig, struct tty_struct *tty)
++{
++	struct n_tty_data *ldata = tty->disc_data;
++
++	if (L_NOFLSH(tty)) {
++		/* signal only */
++		__isig(sig, tty);
++
++	} else { /* signal and flush */
+ 		up_read(&tty->termios_rwsem);
+ 		down_write(&tty->termios_rwsem);
+ 
++		__isig(sig, tty);
++
+ 		/* clear echo buffer */
+ 		mutex_lock(&ldata->output_lock);
+ 		ldata->echo_head = ldata->echo_tail = 0;
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 88250395b0ce..01aa52f574e5 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -1132,11 +1132,6 @@ static int imx_startup(struct uart_port *port)
+ 	while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0))
+ 		udelay(1);
+ 
+-	/* Can we enable the DMA support? */
+-	if (is_imx6q_uart(sport) && !uart_console(port) &&
+-	    !sport->dma_is_inited)
+-		imx_uart_dma_init(sport);
+-
+ 	spin_lock_irqsave(&sport->port.lock, flags);
+ 
+ 	/*
+@@ -1145,9 +1140,6 @@ static int imx_startup(struct uart_port *port)
+ 	writel(USR1_RTSD, sport->port.membase + USR1);
+ 	writel(USR2_ORE, sport->port.membase + USR2);
+ 
+-	if (sport->dma_is_inited && !sport->dma_is_enabled)
+-		imx_enable_dma(sport);
+-
+ 	temp = readl(sport->port.membase + UCR1);
+ 	temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
+ 
+@@ -1318,6 +1310,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
+ 			} else {
+ 				ucr2 |= UCR2_CTSC;
+ 			}
++
++			/* Can we enable the DMA support? */
++			if (is_imx6q_uart(sport) && !uart_console(port)
++				&& !sport->dma_is_inited)
++				imx_uart_dma_init(sport);
+ 		} else {
+ 			termios->c_cflag &= ~CRTSCTS;
+ 		}
+@@ -1434,6 +1431,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
+ 	if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
+ 		imx_enable_ms(&sport->port);
+ 
++	if (sport->dma_is_inited && !sport->dma_is_enabled)
++		imx_enable_dma(sport);
+ 	spin_unlock_irqrestore(&sport->port.lock, flags);
+ }
+ 
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index 0b7bb12dfc68..ec540445bb71 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1409,7 +1409,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
+ 	mutex_lock(&port->mutex);
+ 	uart_shutdown(tty, state);
+ 	tty_port_tty_set(port, NULL);
+-	tty->closing = 0;
++
+ 	spin_lock_irqsave(&port->lock, flags);
+ 
+ 	if (port->blocked_open) {
+@@ -1435,6 +1435,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
+ 	mutex_unlock(&port->mutex);
+ 
+ 	tty_ldisc_flush(tty);
++	tty->closing = 0;
+ }
+ 
+ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 0827d7c96527..ee07ba41c8db 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
+ 	u32 pls = status_reg & PORT_PLS_MASK;
+ 
+ 	/* resume state is a xHCI internal state.
+-	 * Do not report it to usb core.
++	 * Do not report it to usb core, instead, pretend to be U3,
++	 * thus usb core knows it's not ready for transfer
+ 	 */
+-	if (pls == XDEV_RESUME)
++	if (pls == XDEV_RESUME) {
++		*status |= USB_SS_PORT_LS_U3;
+ 		return;
++	}
+ 
+ 	/* When the CAS bit is set then warm reset
+ 	 * should be performed on port
+@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ 		status |= USB_PORT_STAT_C_RESET << 16;
+ 	/* USB3.0 only */
+ 	if (hcd->speed == HCD_USB3) {
+-		if ((raw_port_status & PORT_PLC))
++		/* Port link change with port in resume state should not be
++		 * reported to usbcore, as this is an internal state to be
++		 * handled by xhci driver. Reporting PLC to usbcore may
++		 * cause usbcore clearing PLC first and port change event
++		 * irq won't be generated.
++		 */
++		if ((raw_port_status & PORT_PLC) &&
++			(raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
+ 			status |= USB_PORT_STAT_C_LINK_STATE << 16;
+ 		if ((raw_port_status & PORT_WRC))
+ 			status |= USB_PORT_STAT_C_BH_RESET << 16;
+@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
+ 	spin_lock_irqsave(&xhci->lock, flags);
+ 
+ 	if (hcd->self.root_hub->do_remote_wakeup) {
+-		if (bus_state->resuming_ports) {
++		if (bus_state->resuming_ports ||	/* USB2 */
++		    bus_state->port_remote_wakeup) {	/* USB3 */
+ 			spin_unlock_irqrestore(&xhci->lock, flags);
+-			xhci_dbg(xhci, "suspend failed because "
+-						"a port is resuming\n");
++			xhci_dbg(xhci, "suspend failed because a port is resuming\n");
+ 			return -EBUSY;
+ 		}
+ 	}
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 7d34cbfaf373..d095677a0702 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
+ 		usb_hcd_resume_root_hub(hcd);
+ 	}
+ 
++	if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
++		bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
++
+ 	if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
+ 		xhci_dbg(xhci, "port resume event for port %d\n", port_id);
+ 
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 36bf089b708f..c502c2277aeb 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
+ 			return -EINVAL;
+ 	}
+ 
++	if (virt_dev->tt_info)
++		old_active_eps = virt_dev->tt_info->active_eps;
++
+ 	if (virt_dev->udev != udev) {
+ 		/* If the virt_dev and the udev does not match, this virt_dev
+ 		 * may belong to another udev.
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 6977f8491fa7..0f26dd2697b6 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -285,6 +285,7 @@ struct xhci_op_regs {
+ #define XDEV_U0		(0x0 << 5)
+ #define XDEV_U2		(0x2 << 5)
+ #define XDEV_U3		(0x3 << 5)
++#define XDEV_INACTIVE	(0x6 << 5)
+ #define XDEV_RESUME	(0xf << 5)
+ /* true: port has power (see HCC_PPC) */
+ #define PORT_POWER	(1 << 9)
+diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
+index caf188800c67..87898ca2ed17 100644
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
+ 		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ 		US_FL_NO_READ_DISC_INFO ),
+ 
++/* Reported by Oliver Neukum <oneukum@suse.com>
++ * This device morphes spontaneously into another device if the access
++ * pattern of Windows isn't followed. Thus writable media would be dirty
++ * if the initial instance is used. So the device is limited to its
++ * virtual CD.
++ * And yes, the concept that BCD goes up to 9 is not heeded */
++UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
++		"ZTE,Incorporated",
++		"ZTE WCDMA Technologies MSM",
++		USB_SC_DEVICE, USB_PR_DEVICE, NULL,
++		US_FL_SINGLE_LUN ),
++
+ /* Reported by Sven Geggus <sven-usbst@geggus.net>
+  * This encrypted pen drive returns bogus data for the initial READ(10).
+  */
+diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
+index 2ee28266fd07..fa49d3294cd5 100644
+--- a/drivers/vhost/vhost.c
++++ b/drivers/vhost/vhost.c
+@@ -886,6 +886,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
+ 		}
+ 		if (eventfp != d->log_file) {
+ 			filep = d->log_file;
++			d->log_file = eventfp;
+ 			ctx = d->log_ctx;
+ 			d->log_ctx = eventfp ?
+ 				eventfd_ctx_fileget(eventfp) : NULL;
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 50bb3c207621..5d03eb0ec0ac 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -642,7 +642,7 @@ static inline bool fast_dput(struct dentry *dentry)
+ 
+ 	/*
+ 	 * If we have a d_op->d_delete() operation, we sould not
+-	 * let the dentry count go to zero, so use "put__or_lock".
++	 * let the dentry count go to zero, so use "put_or_lock".
+ 	 */
+ 	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
+ 		return lockref_put_or_lock(&dentry->d_lockref);
+@@ -697,7 +697,7 @@ static inline bool fast_dput(struct dentry *dentry)
+ 	 */
+ 	smp_rmb();
+ 	d_flags = ACCESS_ONCE(dentry->d_flags);
+-	d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
++	d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
+ 
+ 	/* Nothing to do? Dropping the reference was all we needed? */
+ 	if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
+@@ -776,6 +776,9 @@ repeat:
+ 	if (unlikely(d_unhashed(dentry)))
+ 		goto kill_it;
+ 
++	if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
++		goto kill_it;
++
+ 	if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
+ 		if (dentry->d_op->d_delete(dentry))
+ 			goto kill_it;
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 02c6875dd945..fce3cc1a3fa7 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -1350,6 +1350,36 @@ enum umount_tree_flags {
+ 	UMOUNT_PROPAGATE = 2,
+ 	UMOUNT_CONNECTED = 4,
+ };
++
++static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
++{
++	/* Leaving mounts connected is only valid for lazy umounts */
++	if (how & UMOUNT_SYNC)
++		return true;
++
++	/* A mount without a parent has nothing to be connected to */
++	if (!mnt_has_parent(mnt))
++		return true;
++
++	/* Because the reference counting rules change when mounts are
++	 * unmounted and connected, umounted mounts may not be
++	 * connected to mounted mounts.
++	 */
++	if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
++		return true;
++
++	/* Has it been requested that the mount remain connected? */
++	if (how & UMOUNT_CONNECTED)
++		return false;
++
++	/* Is the mount locked such that it needs to remain connected? */
++	if (IS_MNT_LOCKED(mnt))
++		return false;
++
++	/* By default disconnect the mount */
++	return true;
++}
++
+ /*
+  * mount_lock must be held
+  * namespace_sem must be held for write
+@@ -1387,10 +1417,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
+ 		if (how & UMOUNT_SYNC)
+ 			p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
+ 
+-		disconnect = !(((how & UMOUNT_CONNECTED) &&
+-				mnt_has_parent(p) &&
+-				(p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
+-			       IS_MNT_LOCKED_AND_LAZY(p));
++		disconnect = disconnect_mount(p, how);
+ 
+ 		pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
+ 				 disconnect ? &unmounted : NULL);
+@@ -1527,11 +1554,8 @@ void __detach_mounts(struct dentry *dentry)
+ 	while (!hlist_empty(&mp->m_list)) {
+ 		mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
+ 		if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
+-			struct mount *p, *tmp;
+-			list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts,  mnt_child) {
+-				hlist_add_head(&p->mnt_umount.s_list, &unmounted);
+-				umount_mnt(p);
+-			}
++			hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
++			umount_mnt(mnt);
+ 		}
+ 		else umount_tree(mnt, UMOUNT_CONNECTED);
+ 	}
+diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
+index f734562c6d24..5d25b9d97c29 100644
+--- a/fs/nfs/inode.c
++++ b/fs/nfs/inode.c
+@@ -1242,9 +1242,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
+ 	if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
+ 		cur_size = i_size_read(inode);
+ 		new_isize = nfs_size_to_loff_t(fattr->size);
+-		if (cur_size != new_isize && nfsi->nrequests == 0)
++		if (cur_size != new_isize)
+ 			invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ 	}
++	if (nfsi->nrequests != 0)
++		invalid &= ~NFS_INO_REVAL_PAGECACHE;
+ 
+ 	/* Have any file permissions changed? */
+ 	if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
+@@ -1682,8 +1684,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			invalid |= NFS_INO_INVALID_ATTR
+ 				| NFS_INO_INVALID_DATA
+ 				| NFS_INO_INVALID_ACCESS
+-				| NFS_INO_INVALID_ACL
+-				| NFS_INO_REVAL_PAGECACHE;
++				| NFS_INO_INVALID_ACL;
+ 			if (S_ISDIR(inode->i_mode))
+ 				nfs_force_lookup_revalidate(inode);
+ 			inode->i_version = fattr->change_attr;
+@@ -1715,7 +1716,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
+ 			if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
+ 				i_size_write(inode, new_isize);
+ 				invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+-				invalid &= ~NFS_INO_REVAL_PAGECACHE;
+ 			}
+ 			dprintk("NFS: isize change on server for file %s/%ld "
+ 					"(%Ld to %Ld)\n",
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 55e1e3af23a3..d3f205126609 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1204,12 +1204,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
+ 
+ static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
+ {
++	if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
++		return;
+ 	if (state->n_wronly)
+ 		set_bit(NFS_O_WRONLY_STATE, &state->flags);
+ 	if (state->n_rdonly)
+ 		set_bit(NFS_O_RDONLY_STATE, &state->flags);
+ 	if (state->n_rdwr)
+ 		set_bit(NFS_O_RDWR_STATE, &state->flags);
++	set_bit(NFS_OPEN_STATE, &state->flags);
+ }
+ 
+ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 282b39369510..7b4552678536 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -1110,8 +1110,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
+ 			nfs_list_remove_request(req);
+ 			if (__nfs_pageio_add_request(desc, req))
+ 				continue;
+-			if (desc->pg_error < 0)
++			if (desc->pg_error < 0) {
++				list_splice_tail(&head, &mirror->pg_list);
++				mirror->pg_recoalesce = 1;
+ 				return 0;
++			}
+ 			break;
+ 		}
+ 	} while (mirror->pg_recoalesce);
+diff --git a/fs/pnode.h b/fs/pnode.h
+index 7114ce6e6b9e..0fcdbe7ca648 100644
+--- a/fs/pnode.h
++++ b/fs/pnode.h
+@@ -20,8 +20,6 @@
+ #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
+ #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
+ #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
+-#define IS_MNT_LOCKED_AND_LAZY(m) \
+-	(((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
+ 
+ #define CL_EXPIRE    		0x01
+ #define CL_SLAVE     		0x02
+diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
+index 20de88d1bf86..dd714037c322 100644
+--- a/fs/xfs/libxfs/xfs_attr_remote.c
++++ b/fs/xfs/libxfs/xfs_attr_remote.c
+@@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
+ 	struct xfs_buf	*bp)
+ {
+ 	struct xfs_mount *mp = bp->b_target->bt_mount;
+-	struct xfs_buf_log_item	*bip = bp->b_fspriv;
++	int		blksize = mp->m_attr_geo->blksize;
+ 	char		*ptr;
+ 	int		len;
+ 	xfs_daddr_t	bno;
+-	int		blksize = mp->m_attr_geo->blksize;
+ 
+ 	/* no verification of non-crc buffers */
+ 	if (!xfs_sb_version_hascrc(&mp->m_sb))
+@@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
+ 	ASSERT(len >= blksize);
+ 
+ 	while (len > 0) {
++		struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
++
+ 		if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
+ 			xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ 			xfs_verifier_error(bp);
+ 			return;
+ 		}
+-		if (bip) {
+-			struct xfs_attr3_rmt_hdr *rmt;
+ 
+-			rmt = (struct xfs_attr3_rmt_hdr *)ptr;
+-			rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn);
++		/*
++		 * Ensure we aren't writing bogus LSNs to disk. See
++		 * xfs_attr3_rmt_hdr_set() for the explanation.
++		 */
++		if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
++			xfs_buf_ioerror(bp, -EFSCORRUPTED);
++			xfs_verifier_error(bp);
++			return;
+ 		}
+ 		xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
+ 
+@@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
+ 	rmt->rm_owner = cpu_to_be64(ino);
+ 	rmt->rm_blkno = cpu_to_be64(bno);
+ 
++	/*
++	 * Remote attribute blocks are written synchronously, so we don't
++	 * have an LSN that we can stamp in them that makes any sense to log
++	 * recovery. To ensure that log recovery handles overwrites of these
++	 * blocks sanely (i.e. once they've been freed and reallocated as some
++	 * other type of metadata) we need to ensure that the LSN has a value
++	 * that tells log recovery to ignore the LSN and overwrite the buffer
++	 * with whatever is in it's log. To do this, we use the magic
++	 * NULLCOMMITLSN to indicate that the LSN is invalid.
++	 */
++	rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
++
+ 	return sizeof(struct xfs_attr3_rmt_hdr);
+ }
+ 
+@@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
+ 
+ 		/*
+ 		 * Allocate a single extent, up to the size of the value.
++		 *
++		 * Note that we have to consider this a data allocation as we
++		 * write the remote attribute without logging the contents.
++		 * Hence we must ensure that we aren't using blocks that are on
++		 * the busy list so that we don't overwrite blocks which have
++		 * recently been freed but their transactions are not yet
++		 * committed to disk. If we overwrite the contents of a busy
++		 * extent and then crash then the block may not contain the
++		 * correct metadata after log recovery occurs.
+ 		 */
+ 		xfs_bmap_init(args->flist, args->firstblock);
+ 		nmap = 1;
+ 		error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
+-				  blkcnt,
+-				  XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
+-				  args->firstblock, args->total, &map, &nmap,
+-				  args->flist);
++				  blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
++				  args->total, &map, &nmap, args->flist);
+ 		if (!error) {
+ 			error = xfs_bmap_finish(&args->trans, args->flist,
+ 						&committed);
+diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
+index 4f5784f85a5b..a5d03396dda0 100644
+--- a/fs/xfs/xfs_log_recover.c
++++ b/fs/xfs/xfs_log_recover.c
+@@ -1887,9 +1887,14 @@ xlog_recover_get_buf_lsn(
+ 		uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
+ 		break;
+ 	case XFS_ATTR3_RMT_MAGIC:
+-		lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
+-		uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
+-		break;
++		/*
++		 * Remote attr blocks are written synchronously, rather than
++		 * being logged. That means they do not contain a valid LSN
++		 * (i.e. transactionally ordered) in them, and hence any time we
++		 * see a buffer to replay over the top of a remote attribute
++		 * block we should simply do so.
++		 */
++		goto recover_immediately;
+ 	case XFS_SB_MAGIC:
+ 		lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
+ 		uuid = &((struct xfs_dsb *)blk)->sb_uuid;
+diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
+index b6a52a4b457a..51bb6532785c 100644
+--- a/include/linux/can/skb.h
++++ b/include/linux/can/skb.h
+@@ -27,10 +27,12 @@
+ /**
+  * struct can_skb_priv - private additional data inside CAN sk_buffs
+  * @ifindex:	ifindex of the first interface the CAN frame appeared on
++ * @skbcnt:	atomic counter to have an unique id together with skb pointer
+  * @cf:		align to the following CAN frame at skb->data
+  */
+ struct can_skb_priv {
+ 	int ifindex;
++	int skbcnt;
+ 	struct can_frame cf[0];
+ };
+ 
+diff --git a/include/linux/cper.h b/include/linux/cper.h
+index 76abba4b238e..dcacb1a72e26 100644
+--- a/include/linux/cper.h
++++ b/include/linux/cper.h
+@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
+ 	__u64	mm_reg_addr;
+ };
+ 
+-/* Memory Error Section */
++/* Old Memory Error Section UEFI 2.1, 2.2 */
++struct cper_sec_mem_err_old {
++	__u64	validation_bits;
++	__u64	error_status;
++	__u64	physical_addr;
++	__u64	physical_addr_mask;
++	__u16	node;
++	__u16	card;
++	__u16	module;
++	__u16	bank;
++	__u16	device;
++	__u16	row;
++	__u16	column;
++	__u16	bit_pos;
++	__u64	requestor_id;
++	__u64	responder_id;
++	__u64	target_id;
++	__u8	error_type;
++};
++
++/* Memory Error Section UEFI >= 2.3 */
+ struct cper_sec_mem_err {
+ 	__u64	validation_bits;
+ 	__u64	error_status;
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index 1da602982cf9..6cd8c0ee4b6f 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
+  *            SAVE_REGS. If another ops with this flag set is already registered
+  *            for any of the functions that this ops will be registered for, then
+  *            this ops will fail to register or set_filter_ip.
++ * PID     - Is affected by set_ftrace_pid (allows filtering on those pids)
+  */
+ enum {
+ 	FTRACE_OPS_FL_ENABLED			= 1 << 0,
+@@ -132,6 +133,7 @@ enum {
+ 	FTRACE_OPS_FL_MODIFYING			= 1 << 11,
+ 	FTRACE_OPS_FL_ALLOC_TRAMP		= 1 << 12,
+ 	FTRACE_OPS_FL_IPMODIFY			= 1 << 13,
++	FTRACE_OPS_FL_PID			= 1 << 14,
+ };
+ 
+ #ifdef CONFIG_DYNAMIC_FTRACE
+@@ -159,6 +161,7 @@ struct ftrace_ops {
+ 	struct ftrace_ops		*next;
+ 	unsigned long			flags;
+ 	void				*private;
++	ftrace_func_t			saved_func;
+ 	int __percpu			*disabled;
+ #ifdef CONFIG_DYNAMIC_FTRACE
+ 	int				nr_trampolines;
+diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
+index 54e7af301888..73abbc54063d 100644
+--- a/include/target/iscsi/iscsi_target_core.h
++++ b/include/target/iscsi/iscsi_target_core.h
+@@ -606,6 +606,7 @@ struct iscsi_conn {
+ 	int			bitmap_id;
+ 	int			rx_thread_active;
+ 	struct task_struct	*rx_thread;
++	struct completion	rx_login_comp;
+ 	int			tx_thread_active;
+ 	struct task_struct	*tx_thread;
+ 	/* list_head for session connection list */
+diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
+index 9065107f083e..7a5237a1bce5 100644
+--- a/kernel/irq/resend.c
++++ b/kernel/irq/resend.c
+@@ -75,13 +75,21 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq)
+ 		    !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) {
+ #ifdef CONFIG_HARDIRQS_SW_RESEND
+ 			/*
+-			 * If the interrupt has a parent irq and runs
+-			 * in the thread context of the parent irq,
+-			 * retrigger the parent.
++			 * If the interrupt is running in the thread
++			 * context of the parent irq we need to be
++			 * careful, because we cannot trigger it
++			 * directly.
+ 			 */
+-			if (desc->parent_irq &&
+-			    irq_settings_is_nested_thread(desc))
++			if (irq_settings_is_nested_thread(desc)) {
++				/*
++				 * If the parent_irq is valid, we
++				 * retrigger the parent, otherwise we
++				 * do nothing.
++				 */
++				if (!desc->parent_irq)
++					return;
+ 				irq = desc->parent_irq;
++			}
+ 			/* Set it pending and activate the softirq: */
+ 			set_bit(irq, irqs_resend);
+ 			tasklet_schedule(&resend_tasklet);
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 02bece4a99ea..eb11011b5292 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -98,6 +98,13 @@ struct ftrace_pid {
+ 	struct pid *pid;
+ };
+ 
++static bool ftrace_pids_enabled(void)
++{
++	return !list_empty(&ftrace_pids);
++}
++
++static void ftrace_update_trampoline(struct ftrace_ops *ops);
++
+ /*
+  * ftrace_disabled is set when an anomaly is discovered.
+  * ftrace_disabled is much stronger than ftrace_enabled.
+@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
+ static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
+ static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
+ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
+-ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
+ static struct ftrace_ops global_ops;
+ static struct ftrace_ops control_ops;
+ 
+@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
+ 	if (!test_tsk_trace_trace(current))
+ 		return;
+ 
+-	ftrace_pid_function(ip, parent_ip, op, regs);
+-}
+-
+-static void set_ftrace_pid_function(ftrace_func_t func)
+-{
+-	/* do not set ftrace_pid_function to itself! */
+-	if (func != ftrace_pid_func)
+-		ftrace_pid_function = func;
++	op->saved_func(ip, parent_ip, op, regs);
+ }
+ 
+ /**
+@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
+ void clear_ftrace_function(void)
+ {
+ 	ftrace_trace_function = ftrace_stub;
+-	ftrace_pid_function = ftrace_stub;
+ }
+ 
+ static void control_ops_disable_all(struct ftrace_ops *ops)
+@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
+ 	} else
+ 		add_ftrace_ops(&ftrace_ops_list, ops);
+ 
++	/* Always save the function, and reset at unregistering */
++	ops->saved_func = ops->func;
++
++	if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
++		ops->func = ftrace_pid_func;
++
+ 	ftrace_update_trampoline(ops);
+ 
+ 	if (ftrace_enabled)
+@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
+ 	if (ftrace_enabled)
+ 		update_ftrace_function();
+ 
++	ops->func = ops->saved_func;
++
+ 	return 0;
+ }
+ 
+ static void ftrace_update_pid_func(void)
+ {
++	bool enabled = ftrace_pids_enabled();
++	struct ftrace_ops *op;
++
+ 	/* Only do something if we are tracing something */
+ 	if (ftrace_trace_function == ftrace_stub)
+ 		return;
+ 
++	do_for_each_ftrace_op(op, ftrace_ops_list) {
++		if (op->flags & FTRACE_OPS_FL_PID) {
++			op->func = enabled ? ftrace_pid_func :
++				op->saved_func;
++			ftrace_update_trampoline(op);
++		}
++	} while_for_each_ftrace_op(op);
++
+ 	update_ftrace_function();
+ }
+ 
+@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
+ 	.local_hash.filter_hash		= EMPTY_HASH,
+ 	INIT_OPS_HASH(global_ops)
+ 	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
+-					  FTRACE_OPS_FL_INITIALIZED,
++					  FTRACE_OPS_FL_INITIALIZED |
++					  FTRACE_OPS_FL_PID,
+ };
+ 
+ /*
+@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
+ 
+ static struct ftrace_ops global_ops = {
+ 	.func			= ftrace_stub,
+-	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
++	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
++				  FTRACE_OPS_FL_INITIALIZED |
++				  FTRACE_OPS_FL_PID,
+ };
+ 
+ static int __init ftrace_nodyn_init(void)
+@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
+ 		if (WARN_ON(tr->ops->func != ftrace_stub))
+ 			printk("ftrace ops had %pS for function\n",
+ 			       tr->ops->func);
+-		/* Only the top level instance does pid tracing */
+-		if (!list_empty(&ftrace_pids)) {
+-			set_ftrace_pid_function(func);
+-			func = ftrace_pid_func;
+-		}
+ 	}
+ 	tr->ops->func = func;
+ 	tr->ops->private = tr;
+@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
+ {
+ 	mutex_lock(&ftrace_lock);
+ 
+-	if (list_empty(&ftrace_pids) && (!*pos))
++	if (!ftrace_pids_enabled() && (!*pos))
+ 		return (void *) 1;
+ 
+ 	return seq_list_start(&ftrace_pids, *pos);
+@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
+ 	.func			= ftrace_stub,
+ 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
+ 				   FTRACE_OPS_FL_INITIALIZED |
++				   FTRACE_OPS_FL_PID |
+ 				   FTRACE_OPS_FL_STUB,
+ #ifdef FTRACE_GRAPH_TRAMP_ADDR
+ 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
+diff --git a/lib/dma-debug.c b/lib/dma-debug.c
+index ae4b65e17e64..dace71fe41f7 100644
+--- a/lib/dma-debug.c
++++ b/lib/dma-debug.c
+@@ -574,6 +574,9 @@ void debug_dma_assert_idle(struct page *page)
+ 	unsigned long flags;
+ 	phys_addr_t cln;
+ 
++	if (dma_debug_disabled())
++		return;
++
+ 	if (!page)
+ 		return;
+ 
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 689c818ed007..62c635f2bcfc 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -89,6 +89,8 @@ struct timer_list can_stattimer;   /* timer for statistics update */
+ struct s_stats    can_stats;       /* packet statistics */
+ struct s_pstats   can_pstats;      /* receive list statistics */
+ 
++static atomic_t skbcounter = ATOMIC_INIT(0);
++
+ /*
+  * af_can socket functions
+  */
+@@ -310,12 +312,8 @@ int can_send(struct sk_buff *skb, int loop)
+ 		return err;
+ 	}
+ 
+-	if (newskb) {
+-		if (!(newskb->tstamp.tv64))
+-			__net_timestamp(newskb);
+-
++	if (newskb)
+ 		netif_rx_ni(newskb);
+-	}
+ 
+ 	/* update statistics */
+ 	can_stats.tx_frames++;
+@@ -683,6 +681,10 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev)
+ 	can_stats.rx_frames++;
+ 	can_stats.rx_frames_delta++;
+ 
++	/* create non-zero unique skb identifier together with *skb */
++	while (!(can_skb_prv(skb)->skbcnt))
++		can_skb_prv(skb)->skbcnt = atomic_inc_return(&skbcounter);
++
+ 	rcu_read_lock();
+ 
+ 	/* deliver the packet to sockets listening on all devices */
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index b523453585be..a1ba6875c2a2 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -261,6 +261,7 @@ static void bcm_can_tx(struct bcm_op *op)
+ 
+ 	can_skb_reserve(skb);
+ 	can_skb_prv(skb)->ifindex = dev->ifindex;
++	can_skb_prv(skb)->skbcnt = 0;
+ 
+ 	memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
+ 
+@@ -1217,6 +1218,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
+ 	}
+ 
+ 	can_skb_prv(skb)->ifindex = dev->ifindex;
++	can_skb_prv(skb)->skbcnt = 0;
+ 	skb->dev = dev;
+ 	can_skb_set_owner(skb, sk);
+ 	err = can_send(skb, 1); /* send with loopback */
+diff --git a/net/can/raw.c b/net/can/raw.c
+index 31b9748cbb4e..2e67b1423cd3 100644
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -75,7 +75,7 @@ MODULE_ALIAS("can-proto-1");
+  */
+ 
+ struct uniqframe {
+-	ktime_t tstamp;
++	int skbcnt;
+ 	const struct sk_buff *skb;
+ 	unsigned int join_rx_count;
+ };
+@@ -133,7 +133,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
+ 
+ 	/* eliminate multiple filter matches for the same skb */
+ 	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
+-	    ktime_equal(this_cpu_ptr(ro->uniq)->tstamp, oskb->tstamp)) {
++	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
+ 		if (ro->join_filters) {
+ 			this_cpu_inc(ro->uniq->join_rx_count);
+ 			/* drop frame until all enabled filters matched */
+@@ -144,7 +144,7 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
+ 		}
+ 	} else {
+ 		this_cpu_ptr(ro->uniq)->skb = oskb;
+-		this_cpu_ptr(ro->uniq)->tstamp = oskb->tstamp;
++		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
+ 		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
+ 		/* drop first frame to check all enabled filters? */
+ 		if (ro->join_filters && ro->count > 1)
+@@ -749,6 +749,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+ 
+ 	can_skb_reserve(skb);
+ 	can_skb_prv(skb)->ifindex = dev->ifindex;
++	can_skb_prv(skb)->skbcnt = 0;
+ 
+ 	err = memcpy_from_msg(skb_put(skb, size), msg, size);
+ 	if (err < 0)
+diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
+index 29236e832e44..c09c0131bfa2 100644
+--- a/net/mac80211/debugfs_netdev.c
++++ b/net/mac80211/debugfs_netdev.c
+@@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
+ 
+ 	debugfs_remove_recursive(sdata->vif.debugfs_dir);
+ 	sdata->vif.debugfs_dir = NULL;
++	sdata->debugfs.subdir_stations = NULL;
+ }
+ 
+ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
+diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
+index 273b8bff6ba4..657ba9f5d308 100644
+--- a/net/rds/ib_rdma.c
++++ b/net/rds/ib_rdma.c
+@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
+ 	}
+ 
+ 	ibmr = rds_ib_alloc_fmr(rds_ibdev);
+-	if (IS_ERR(ibmr))
++	if (IS_ERR(ibmr)) {
++		rds_ib_dev_put(rds_ibdev);
+ 		return ibmr;
++	}
+ 
+ 	ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
+ 	if (ret == 0)
+diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
+index d126c03361ae..75888dd38a7f 100644
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
+ void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
+ {
+ 	if (substream->pcm->nonatomic) {
+-		down_read(&snd_pcm_link_rwsem);
++		down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
+ 		mutex_lock(&substream->self_group.mutex);
+ 	} else {
+ 		read_lock(&snd_pcm_link_rwlock);
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index c403dd10d126..44dfc7b92bc3 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2056,6 +2056,8 @@ static const struct pci_device_id azx_ids[] = {
+ 	/* ATI HDMI */
+ 	{ PCI_DEVICE(0x1002, 0x1308),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
++	{ PCI_DEVICE(0x1002, 0x157a),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0x793b),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ 	{ PCI_DEVICE(0x1002, 0x7919),
+@@ -2110,8 +2112,14 @@ static const struct pci_device_id azx_ids[] = {
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0xaab0),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
++	{ PCI_DEVICE(0x1002, 0xaac0),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0xaac8),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
++	{ PCI_DEVICE(0x1002, 0xaad8),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
++	{ PCI_DEVICE(0x1002, 0xaae8),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	/* VIA VT8251/VT8237A */
+ 	{ PCI_DEVICE(0x1106, 0x3288),
+ 	  .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 5f44f60a6389..225b78b4ef12 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -3333,6 +3333,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
+ { .id = 0x10de0070, .name = "GPU 70 HDMI/DP",	.patch = patch_nvhdmi },
+ { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",	.patch = patch_nvhdmi },
+ { .id = 0x10de0072, .name = "GPU 72 HDMI/DP",	.patch = patch_nvhdmi },
++{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP",	.patch = patch_nvhdmi },
+ { .id = 0x10de8001, .name = "MCP73 HDMI",	.patch = patch_nvhdmi_2ch },
+ { .id = 0x11069f80, .name = "VX900 HDMI/DP",	.patch = patch_via_hdmi },
+ { .id = 0x11069f81, .name = "VX900 HDMI/DP",	.patch = patch_via_hdmi },
+@@ -3396,6 +3397,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067");
+ MODULE_ALIAS("snd-hda-codec-id:10de0070");
+ MODULE_ALIAS("snd-hda-codec-id:10de0071");
+ MODULE_ALIAS("snd-hda-codec-id:10de0072");
++MODULE_ALIAS("snd-hda-codec-id:10de007d");
+ MODULE_ALIAS("snd-hda-codec-id:10de8001");
+ MODULE_ALIAS("snd-hda-codec-id:11069f80");
+ MODULE_ALIAS("snd-hda-codec-id:11069f81");
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 0e75998db39f..590bcfb0e82f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2224,7 +2224,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
+ 	SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
+-	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
++	SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
+ 
+ 	SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
+ 	SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+@@ -5004,7 +5004,7 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ 0x14, 0x90170110 },
+ 			{ 0x17, 0x40000008 },
+ 			{ 0x18, 0x411111f0 },
+-			{ 0x19, 0x411111f0 },
++			{ 0x19, 0x01a1913c },
+ 			{ 0x1a, 0x411111f0 },
+ 			{ 0x1b, 0x411111f0 },
+ 			{ 0x1d, 0x40f89b2d },
+@@ -5114,6 +5114,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
++	SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+@@ -5382,6 +5383,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ 		{0x1d, 0x40700001},
+ 		{0x21, 0x02211030}),
+ 	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
++		{0x12, 0x40000000},
++		{0x14, 0x90170130},
++		{0x17, 0x411111f0},
++		{0x18, 0x411111f0},
++		{0x19, 0x411111f0},
++		{0x1a, 0x411111f0},
++		{0x1b, 0x01014020},
++		{0x1d, 0x4054c029},
++		{0x1e, 0x411111f0},
++		{0x21, 0x0221103f}),
++	SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 		{0x12, 0x90a60160},
+ 		{0x14, 0x90170120},
+ 		{0x17, 0x90170140},
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 6c66d7e16439..25f0f45e6640 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
+ 		      "HP Mini", STAC_92HD83XXX_HP_LED),
+ 	SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
+-	SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91,
++	/* match both for 0xfa91 and 0xfa93 */
++	SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91,
+ 		      "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
+ 	{} /* terminator */
+ };
+diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
+index 8461d6bf992f..204cc074adb9 100644
+--- a/sound/usb/line6/pcm.c
++++ b/sound/usb/line6/pcm.c
+@@ -186,12 +186,8 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
+ 	int ret = 0;
+ 
+ 	spin_lock_irqsave(&pstr->lock, flags);
+-	if (!test_and_set_bit(type, &pstr->running)) {
+-		if (pstr->active_urbs || pstr->unlink_urbs) {
+-			ret = -EBUSY;
+-			goto error;
+-		}
+-
++	if (!test_and_set_bit(type, &pstr->running) &&
++	    !(pstr->active_urbs || pstr->unlink_urbs)) {
+ 		pstr->count = 0;
+ 		/* Submit all currently available URBs */
+ 		if (direction == SNDRV_PCM_STREAM_PLAYBACK)
+@@ -199,7 +195,6 @@ static int line6_stream_start(struct snd_line6_pcm *line6pcm, int direction,
+ 		else
+ 			ret = line6_submit_audio_in_all_urbs(line6pcm);
+ 	}
+- error:
+ 	if (ret < 0)
+ 		clear_bit(type, &pstr->running);
+ 	spin_unlock_irqrestore(&pstr->lock, flags);
+diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
+index e5000da9e9d7..6a803eff87f7 100644
+--- a/sound/usb/mixer_maps.c
++++ b/sound/usb/mixer_maps.c
+@@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
+ 	{ 0 }
+ };
+ 
++/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
++static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
++static struct usbmix_name_map bose_companion5_map[] = {
++	{ 3, NULL, .dB = &bose_companion5_dB },
++	{ 0 }	/* terminator */
++};
++
++/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
++static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
++static struct usbmix_name_map dragonfly_1_2_map[] = {
++	{ 7, NULL, .dB = &dragonfly_1_2_dB },
++	{ 0 }	/* terminator */
++};
++
+ /*
+  * Control map entries
+  */
+@@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
+ 		.id = USB_ID(0x25c4, 0x0003),
+ 		.map = scms_usb3318_map,
+ 	},
++	{
++		/* Bose Companion 5 */
++		.id = USB_ID(0x05a7, 0x1020),
++		.map = bose_companion5_map,
++	},
++	{
++		/* Dragonfly DAC 1.2 */
++		.id = USB_ID(0x21b4, 0x0081),
++		.map = dragonfly_1_2_map,
++	},
+ 	{ 0 } /* terminator */
+ };
+ 
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 2f6d3e9a1bcd..e4756651a52c 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -2512,6 +2512,74 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ 	}
+ },
+ 
++/* Steinberg devices */
++{
++	/* Steinberg MI2 */
++	USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x2040),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = & (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 3,
++				.type = QUIRK_MIDI_FIXED_ENDPOINT,
++				.data = &(const struct snd_usb_midi_endpoint_info) {
++					.out_cables = 0x0001,
++					.in_cables  = 0x0001
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++{
++	/* Steinberg MI4 */
++	USB_DEVICE_VENDOR_SPEC(0x0a4e, 0x4040),
++	.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
++		.ifnum = QUIRK_ANY_INTERFACE,
++		.type = QUIRK_COMPOSITE,
++		.data = & (const struct snd_usb_audio_quirk[]) {
++			{
++				.ifnum = 0,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 1,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 2,
++				.type = QUIRK_AUDIO_STANDARD_INTERFACE
++			},
++			{
++				.ifnum = 3,
++				.type = QUIRK_MIDI_FIXED_ENDPOINT,
++				.data = &(const struct snd_usb_midi_endpoint_info) {
++					.out_cables = 0x0001,
++					.in_cables  = 0x0001
++				}
++			},
++			{
++				.ifnum = -1
++			}
++		}
++	}
++},
++
+ /* TerraTec devices */
+ {
+ 	USB_DEVICE_VENDOR_SPEC(0x0ccd, 0x0012),
+diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
+index 995b7a8596b1..658b0a89796d 100644
+--- a/tools/perf/ui/browsers/hists.c
++++ b/tools/perf/ui/browsers/hists.c
+@@ -45,7 +45,7 @@ static struct rb_node *hists__filter_entries(struct rb_node *nd,
+ 
+ static bool hist_browser__has_filter(struct hist_browser *hb)
+ {
+-	return hists__has_filter(hb->hists) || hb->min_pcnt;
++	return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter;
+ }
+ 
+ static int hist_browser__get_folding(struct hist_browser *browser)
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 201f6c4ca738..99378a5c57a7 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -1893,6 +1893,8 @@ int setup_intlist(struct intlist **list, const char *list_str,
+ 		pr_err("problems parsing %s list\n", list_name);
+ 		return -1;
+ 	}
++
++	symbol_conf.has_filter = true;
+ 	return 0;
+ }
+ 
+diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
+index 09561500164a..be0217989bcc 100644
+--- a/tools/perf/util/symbol.h
++++ b/tools/perf/util/symbol.h
+@@ -105,7 +105,8 @@ struct symbol_conf {
+ 			demangle_kernel,
+ 			filter_relative,
+ 			show_hist_headers,
+-			branch_callstack;
++			branch_callstack,
++			has_filter;
+ 	const char	*vmlinux_name,
+ 			*kallsyms_name,
+ 			*source_prefix,


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-08-03 19:01 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-08-03 19:01 UTC (permalink / raw
  To: gentoo-commits

commit:     ca1be7247b8e5d2dfeb7ae78ece8b4ec4cb819a2
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Aug  3 19:01:14 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Aug  3 19:01:14 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ca1be724

Linux patch 4.1.4

 0000_README            |     4 +
 1003_linux-4.1.4.patch | 11078 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 11082 insertions(+)

diff --git a/0000_README b/0000_README
index eab69c9..ceda226 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch:  1002_linux-4.1.3.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.3
 
+Patch:  1003_linux-4.1.4.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.4
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1003_linux-4.1.4.patch b/1003_linux-4.1.4.patch
new file mode 100644
index 0000000..f7598b8
--- /dev/null
+++ b/1003_linux-4.1.4.patch
@@ -0,0 +1,11078 @@
+diff --git a/Documentation/ABI/testing/ima_policy b/Documentation/ABI/testing/ima_policy
+index d0d0c578324c..0a378a88217a 100644
+--- a/Documentation/ABI/testing/ima_policy
++++ b/Documentation/ABI/testing/ima_policy
+@@ -20,17 +20,19 @@ Description:
+ 		action: measure | dont_measure | appraise | dont_appraise | audit
+ 		condition:= base | lsm  [option]
+ 			base:	[[func=] [mask=] [fsmagic=] [fsuuid=] [uid=]
+-				 [fowner]]
++				[euid=] [fowner=]]
+ 			lsm:	[[subj_user=] [subj_role=] [subj_type=]
+ 				 [obj_user=] [obj_role=] [obj_type=]]
+ 			option:	[[appraise_type=]] [permit_directio]
+ 
+ 		base: 	func:= [BPRM_CHECK][MMAP_CHECK][FILE_CHECK][MODULE_CHECK]
+ 				[FIRMWARE_CHECK]
+-			mask:= [MAY_READ] [MAY_WRITE] [MAY_APPEND] [MAY_EXEC]
++			mask:= [[^]MAY_READ] [[^]MAY_WRITE] [[^]MAY_APPEND]
++			       [[^]MAY_EXEC]
+ 			fsmagic:= hex value
+ 			fsuuid:= file system UUID (e.g 8bcbe394-4f13-4144-be8e-5aa9ea2ce2f6)
+ 			uid:= decimal value
++			euid:= decimal value
+ 			fowner:=decimal value
+ 		lsm:  	are LSM specific
+ 		option:	appraise_type:= [imasig]
+@@ -49,11 +51,25 @@ Description:
+ 			dont_measure fsmagic=0x01021994
+ 			dont_appraise fsmagic=0x01021994
+ 			# RAMFS_MAGIC
+-			dont_measure fsmagic=0x858458f6
+ 			dont_appraise fsmagic=0x858458f6
++			# DEVPTS_SUPER_MAGIC
++			dont_measure fsmagic=0x1cd1
++			dont_appraise fsmagic=0x1cd1
++			# BINFMTFS_MAGIC
++			dont_measure fsmagic=0x42494e4d
++			dont_appraise fsmagic=0x42494e4d
+ 			# SECURITYFS_MAGIC
+ 			dont_measure fsmagic=0x73636673
+ 			dont_appraise fsmagic=0x73636673
++			# SELINUX_MAGIC
++			dont_measure fsmagic=0xf97cff8c
++			dont_appraise fsmagic=0xf97cff8c
++			# CGROUP_SUPER_MAGIC
++			dont_measure fsmagic=0x27e0eb
++			dont_appraise fsmagic=0x27e0eb
++			# NSFS_MAGIC
++			dont_measure fsmagic=0x6e736673
++			dont_appraise fsmagic=0x6e736673
+ 
+ 			measure func=BPRM_CHECK
+ 			measure func=FILE_MMAP mask=MAY_EXEC
+@@ -70,10 +86,6 @@ Description:
+ 		Examples of LSM specific definitions:
+ 
+ 		SELinux:
+-			# SELINUX_MAGIC
+-			dont_measure fsmagic=0xf97cff8c
+-			dont_appraise fsmagic=0xf97cff8c
+-
+ 			dont_measure obj_type=var_log_t
+ 			dont_appraise obj_type=var_log_t
+ 			dont_measure obj_type=auditd_log_t
+diff --git a/Documentation/ABI/testing/sysfs-ata b/Documentation/ABI/testing/sysfs-ata
+index 0a932155cbba..9231daef3813 100644
+--- a/Documentation/ABI/testing/sysfs-ata
++++ b/Documentation/ABI/testing/sysfs-ata
+@@ -90,6 +90,17 @@ gscr
+ 	130:	SATA_PMP_GSCR_SII_GPIO
+ 	Only valid if the device is a PM.
+ 
++trim
++
++	Shows the DSM TRIM mode currently used by the device. Valid
++	values are:
++	unsupported:		Drive does not support DSM TRIM
++	unqueued:		Drive supports unqueued DSM TRIM only
++	queued:			Drive supports queued DSM TRIM
++	forced_unqueued:	Drive's unqueued DSM support is known to be
++				buggy and only unqueued TRIM commands
++				are sent
++
+ spdn_cnt
+ 
+ 	Number of time libata decided to lower the speed of link due to errors.
+diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
+index 3befcb19f414..1fbdd79d1624 100644
+--- a/Documentation/ABI/testing/sysfs-bus-iio
++++ b/Documentation/ABI/testing/sysfs-bus-iio
+@@ -1165,10 +1165,8 @@ Description:
+ 		object is near the sensor, usually be observing
+ 		reflectivity of infrared or ultrasound emitted.
+ 		Often these sensors are unit less and as such conversion
+-		to SI units is not possible.  Where it is, the units should
+-		be meters.  If such a conversion is not possible, the reported
+-		values should behave in the same way as a distance, i.e. lower
+-		values indicate something is closer to the sensor.
++		to SI units is not possible. Higher proximity measurements
++		indicate closer objects, and vice versa.
+ 
+ What:		/sys/.../iio:deviceX/in_illuminance_input
+ What:		/sys/.../iio:deviceX/in_illuminance_raw
+diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
+index adda2a8d1d52..e357b020861d 100644
+--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
++++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
+@@ -92,5 +92,5 @@ mpp61         61       gpo, dev(wen1), uart1(txd), audio(rclk)
+ mpp62         62       gpio, dev(a2), uart1(cts), tdm(drx), pcie(clkreq0),
+                        audio(mclk), uart0(cts)
+ mpp63         63       gpo, spi0(sck), tclk
+-mpp64         64       gpio, spi0(miso), spi0-1(cs1)
+-mpp65         65       gpio, spi0(mosi), spi0-1(cs2)
++mpp64         64       gpio, spi0(miso), spi0(cs1)
++mpp65         65       gpio, spi0(mosi), spi0(cs2)
+diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
+index 7de0cda4a379..bedbe42c8c0a 100644
+--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
++++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
+@@ -22,8 +22,8 @@ mpp5          5        gpio, dev(ad7), spi0(cs2), spi1(cs2)
+ mpp6          6        gpio, dev(ad0), led(p1), audio(rclk)
+ mpp7          7        gpio, dev(ad1), ptp(clk), led(p2), audio(extclk)
+ mpp8          8        gpio, dev (bootcs), spi0(cs0), spi1(cs0)
+-mpp9          9        gpio, nf(wen), spi0(sck), spi1(sck)
+-mpp10        10        gpio, nf(ren), dram(vttctrl), led(c1)
++mpp9          9        gpio, spi0(sck), spi1(sck), nand(we)
++mpp10        10        gpio, dram(vttctrl), led(c1), nand(re)
+ mpp11        11        gpio, dev(a0), led(c2), audio(sdo)
+ mpp12        12        gpio, dev(a1), audio(bclk)
+ mpp13        13        gpio, dev(readyn), pcie0(rstoutn), pcie1(rstoutn)
+diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt
+index b17c96849fc9..4ac138aaaf87 100644
+--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt
++++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-38x-pinctrl.txt
+@@ -27,15 +27,15 @@ mpp8          8        gpio, ge0(txd1), dev(ad10)
+ mpp9          9        gpio, ge0(txd2), dev(ad11)
+ mpp10         10       gpio, ge0(txd3), dev(ad12)
+ mpp11         11       gpio, ge0(txctl), dev(ad13)
+-mpp12         12       gpio, ge0(rxd0), pcie0(rstout), pcie1(rstout) [1], spi0(cs1), dev(ad14)
+-mpp13         13       gpio, ge0(rxd1), pcie0(clkreq), pcie1(clkreq) [1], spi0(cs2), dev(ad15)
+-mpp14         14       gpio, ge0(rxd2), ptp(clk), m(vtt_ctrl), spi0(cs3), dev(wen1)
+-mpp15         15       gpio, ge0(rxd3), ge(mdc slave), pcie0(rstout), spi0(mosi), pcie1(rstout) [1]
+-mpp16         16       gpio, ge0(rxctl), ge(mdio slave), m(decc_err), spi0(miso), pcie0(clkreq)
++mpp12         12       gpio, ge0(rxd0), pcie0(rstout), spi0(cs1), dev(ad14), pcie3(clkreq)
++mpp13         13       gpio, ge0(rxd1), pcie0(clkreq), pcie1(clkreq) [1], spi0(cs2), dev(ad15), pcie2(clkreq)
++mpp14         14       gpio, ge0(rxd2), ptp(clk), m(vtt_ctrl), spi0(cs3), dev(wen1), pcie3(clkreq)
++mpp15         15       gpio, ge0(rxd3), ge(mdc slave), pcie0(rstout), spi0(mosi)
++mpp16         16       gpio, ge0(rxctl), ge(mdio slave), m(decc_err), spi0(miso), pcie0(clkreq), pcie1(clkreq) [1]
+ mpp17         17       gpio, ge0(rxclk), ptp(clk), ua1(rxd), spi0(sck), sata1(prsnt)
+-mpp18         18       gpio, ge0(rxerr), ptp(trig_gen), ua1(txd), spi0(cs0), pcie1(rstout) [1]
+-mpp19         19       gpio, ge0(col), ptp(event_req), pcie0(clkreq), sata1(prsnt), ua0(cts)
+-mpp20         20       gpio, ge0(txclk), ptp(clk), pcie1(rstout) [1], sata0(prsnt), ua0(rts)
++mpp18         18       gpio, ge0(rxerr), ptp(trig_gen), ua1(txd), spi0(cs0)
++mpp19         19       gpio, ge0(col), ptp(event_req), ge0(txerr), sata1(prsnt), ua0(cts)
++mpp20         20       gpio, ge0(txclk), ptp(clk), sata0(prsnt), ua0(rts)
+ mpp21         21       gpio, spi0(cs1), ge1(rxd0), sata0(prsnt), sd0(cmd), dev(bootcs)
+ mpp22         22       gpio, spi0(mosi), dev(ad0)
+ mpp23         23       gpio, spi0(sck), dev(ad2)
+@@ -58,23 +58,23 @@ mpp39         39       gpio, i2c1(sck), ge1(rxd2), ua0(cts), sd0(d1), dev(a2)
+ mpp40         40       gpio, i2c1(sda), ge1(rxd3), ua0(rts), sd0(d2), dev(ad6)
+ mpp41         41       gpio, ua1(rxd), ge1(rxctl), ua0(cts), spi1(cs3), dev(burst/last)
+ mpp42         42       gpio, ua1(txd), ua0(rts), dev(ad7)
+-mpp43         43       gpio, pcie0(clkreq), m(vtt_ctrl), m(decc_err), pcie0(rstout), dev(clkout)
+-mpp44         44       gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], sata3(prsnt) [3], pcie0(rstout)
+-mpp45         45       gpio, ref(clk_out0), pcie0(rstout), pcie1(rstout) [1], pcie2(rstout), pcie3(rstout)
+-mpp46         46       gpio, ref(clk_out1), pcie0(rstout), pcie1(rstout) [1], pcie2(rstout), pcie3(rstout)
+-mpp47         47       gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], spi1(cs2), sata3(prsnt) [2]
+-mpp48         48       gpio, sata0(prsnt), m(vtt_ctrl), tdm2c(pclk), audio(mclk), sd0(d4)
+-mpp49         49       gpio, sata2(prsnt) [2], sata3(prsnt) [2], tdm2c(fsync), audio(lrclk), sd0(d5)
+-mpp50         50       gpio, pcie0(rstout), pcie1(rstout) [1], tdm2c(drx), audio(extclk), sd0(cmd)
++mpp43         43       gpio, pcie0(clkreq), m(vtt_ctrl), m(decc_err), spi1(cs2), dev(clkout)
++mpp44         44       gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], sata3(prsnt) [3]
++mpp45         45       gpio, ref(clk_out0), pcie0(rstout)
++mpp46         46       gpio, ref(clk_out1), pcie0(rstout)
++mpp47         47       gpio, sata0(prsnt), sata1(prsnt), sata2(prsnt) [2], sata3(prsnt) [2]
++mpp48         48       gpio, sata0(prsnt), m(vtt_ctrl), tdm2c(pclk), audio(mclk), sd0(d4), pcie0(clkreq)
++mpp49         49       gpio, sata2(prsnt) [2], sata3(prsnt) [2], tdm2c(fsync), audio(lrclk), sd0(d5), pcie1(clkreq)
++mpp50         50       gpio, pcie0(rstout), tdm2c(drx), audio(extclk), sd0(cmd)
+ mpp51         51       gpio, tdm2c(dtx), audio(sdo), m(decc_err)
+-mpp52         52       gpio, pcie0(rstout), pcie1(rstout) [1], tdm2c(intn), audio(sdi), sd0(d6)
++mpp52         52       gpio, pcie0(rstout), tdm2c(intn), audio(sdi), sd0(d6)
+ mpp53         53       gpio, sata1(prsnt), sata0(prsnt), tdm2c(rstn), audio(bclk), sd0(d7)
+-mpp54         54       gpio, sata0(prsnt), sata1(prsnt), pcie0(rstout), pcie1(rstout) [1], sd0(d3)
++mpp54         54       gpio, sata0(prsnt), sata1(prsnt), pcie0(rstout), ge0(txerr), sd0(d3)
+ mpp55         55       gpio, ua1(cts), ge(mdio), pcie1(clkreq) [1], spi1(cs1), sd0(d0)
+ mpp56         56       gpio, ua1(rts), ge(mdc), m(decc_err), spi1(mosi)
+ mpp57         57       gpio, spi1(sck), sd0(clk)
+ mpp58         58       gpio, pcie1(clkreq) [1], i2c1(sck), pcie2(clkreq), spi1(miso), sd0(d1)
+-mpp59         59       gpio, pcie0(rstout), i2c1(sda), pcie1(rstout) [1], spi1(cs0), sd0(d2)
++mpp59         59       gpio, pcie0(rstout), i2c1(sda), spi1(cs0), sd0(d2)
+ 
+ [1]: only available on 88F6820 and 88F6828
+ [2]: only available on 88F6828
+diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
+index 373dbccd7ab0..96e7744cab84 100644
+--- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
++++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-xp-pinctrl.txt
+@@ -42,15 +42,15 @@ mpp20         20       gpio, ge0(rxd4), ge1(rxd2), lcd(d20), ptp(clk)
+ mpp21         21       gpio, ge0(rxd5), ge1(rxd3), lcd(d21), mem(bat)
+ mpp22         22       gpio, ge0(rxd6), ge1(rxctl), lcd(d22), sata0(prsnt)
+ mpp23         23       gpio, ge0(rxd7), ge1(rxclk), lcd(d23), sata1(prsnt)
+-mpp24         24       gpio, lcd(hsync), sata1(prsnt), nf(bootcs-re), tdm(rst)
+-mpp25         25       gpio, lcd(vsync), sata0(prsnt), nf(bootcs-we), tdm(pclk)
+-mpp26         26       gpio, lcd(clk), tdm(fsync), vdd(cpu1-pd)
++mpp24         24       gpio, lcd(hsync), sata1(prsnt), tdm(rst)
++mpp25         25       gpio, lcd(vsync), sata0(prsnt), tdm(pclk)
++mpp26         26       gpio, lcd(clk), tdm(fsync)
+ mpp27         27       gpio, lcd(e), tdm(dtx), ptp(trig)
+ mpp28         28       gpio, lcd(pwm), tdm(drx), ptp(evreq)
+-mpp29         29       gpio, lcd(ref-clk), tdm(int0), ptp(clk), vdd(cpu0-pd)
++mpp29         29       gpio, lcd(ref-clk), tdm(int0), ptp(clk)
+ mpp30         30       gpio, tdm(int1), sd0(clk)
+-mpp31         31       gpio, tdm(int2), sd0(cmd), vdd(cpu0-pd)
+-mpp32         32       gpio, tdm(int3), sd0(d0), vdd(cpu1-pd)
++mpp31         31       gpio, tdm(int2), sd0(cmd)
++mpp32         32       gpio, tdm(int3), sd0(d0)
+ mpp33         33       gpio, tdm(int4), sd0(d1), mem(bat)
+ mpp34         34       gpio, tdm(int5), sd0(d2), sata0(prsnt)
+ mpp35         35       gpio, tdm(int6), sd0(d3), sata1(prsnt)
+@@ -58,21 +58,18 @@ mpp36         36       gpio, spi(mosi)
+ mpp37         37       gpio, spi(miso)
+ mpp38         38       gpio, spi(sck)
+ mpp39         39       gpio, spi(cs0)
+-mpp40         40       gpio, spi(cs1), uart2(cts), lcd(vga-hsync), vdd(cpu1-pd),
+-                       pcie(clkreq0)
++mpp40         40       gpio, spi(cs1), uart2(cts), lcd(vga-hsync), pcie(clkreq0)
+ mpp41         41       gpio, spi(cs2), uart2(rts), lcd(vga-vsync), sata1(prsnt),
+                        pcie(clkreq1)
+-mpp42         42       gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer),
+-                       vdd(cpu0-pd)
+-mpp43         43       gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout),
+-                       vdd(cpu2-3-pd){1}
++mpp42         42       gpio, uart2(rxd), uart0(cts), tdm(int7), tdm-1(timer)
++mpp43         43       gpio, uart2(txd), uart0(rts), spi(cs3), pcie(rstout)
+ mpp44         44       gpio, uart2(cts), uart3(rxd), spi(cs4), pcie(clkreq2),
+                        mem(bat)
+ mpp45         45       gpio, uart2(rts), uart3(txd), spi(cs5), sata1(prsnt)
+ mpp46         46       gpio, uart3(rts), uart1(rts), spi(cs6), sata0(prsnt)
+ mpp47         47       gpio, uart3(cts), uart1(cts), spi(cs7), pcie(clkreq3),
+                        ref(clkout)
+-mpp48         48       gpio, tclk, dev(burst/last)
++mpp48         48       gpio, dev(clkout), dev(burst/last)
+ 
+ * Marvell Armada XP (mv78260 and mv78460 only)
+ 
+@@ -84,9 +81,9 @@ mpp51         51       gpio, dev(ad16)
+ mpp52         52       gpio, dev(ad17)
+ mpp53         53       gpio, dev(ad18)
+ mpp54         54       gpio, dev(ad19)
+-mpp55         55       gpio, dev(ad20), vdd(cpu0-pd)
+-mpp56         56       gpio, dev(ad21), vdd(cpu1-pd)
+-mpp57         57       gpio, dev(ad22), vdd(cpu2-3-pd){1}
++mpp55         55       gpio, dev(ad20)
++mpp56         56       gpio, dev(ad21)
++mpp57         57       gpio, dev(ad22)
+ mpp58         58       gpio, dev(ad23)
+ mpp59         59       gpio, dev(ad24)
+ mpp60         60       gpio, dev(ad25)
+@@ -96,6 +93,3 @@ mpp63         63       gpio, dev(ad28)
+ mpp64         64       gpio, dev(ad29)
+ mpp65         65       gpio, dev(ad30)
+ mpp66         66       gpio, dev(ad31)
+-
+-Notes:
+-* {1} vdd(cpu2-3-pd) only available on mv78460.
+diff --git a/Documentation/devicetree/bindings/usb/atmel-usb.txt b/Documentation/devicetree/bindings/usb/atmel-usb.txt
+index e180d56c75db..de773a00e2d4 100644
+--- a/Documentation/devicetree/bindings/usb/atmel-usb.txt
++++ b/Documentation/devicetree/bindings/usb/atmel-usb.txt
+@@ -60,9 +60,9 @@ Atmel High-Speed USB device controller
+ 
+ Required properties:
+  - compatible: Should be one of the following
+-	       "at91sam9rl-udc"
+-	       "at91sam9g45-udc"
+-	       "sama5d3-udc"
++	       "atmel,at91sam9rl-udc"
++	       "atmel,at91sam9g45-udc"
++	       "atmel,sama5d3-udc"
+  - reg: Address and length of the register set for the device
+  - interrupts: Should contain usba interrupt
+  - ep childnode: To specify the number of endpoints and their properties.
+diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
+index 6726139bd289..cd03a0faca8f 100644
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1398,7 +1398,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
+ 			The list of supported hash algorithms is defined
+ 			in crypto/hash_info.h.
+ 
+-	ima_tcb		[IMA]
++	ima_policy=	[IMA]
++			The builtin measurement policy to load during IMA
++			setup.  Specyfing "tcb" as the value, measures all
++			programs exec'd, files mmap'd for exec, and all files
++			opened with the read mode bit set by either the
++			effective uid (euid=0) or uid=0.
++			Format: "tcb"
++
++	ima_tcb		[IMA] Deprecated.  Use ima_policy= instead.
+ 			Load a policy which meets the needs of the Trusted
+ 			Computing Base.  This means IMA will measure all
+ 			programs exec'd, files mmap'd for exec, and all files
+diff --git a/Makefile b/Makefile
+index e3cdec4898be..36f3225cdf1f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
+index 89ef4a540db5..45e7761b7a29 100644
+--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
++++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
+@@ -108,8 +108,8 @@
+ 			mmc0: mmc@f8000000 {
+ 				pinctrl-names = "default";
+ 				pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_cd>;
+-				slot@1 {
+-					reg = <1>;
++				slot@0 {
++					reg = <0>;
+ 					bus-width = <4>;
+ 					cd-gpios = <&pioE 5 0>;
+ 				};
+diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
+index 70e59c5ceb2f..e54421176af8 100644
+--- a/arch/arm/boot/dts/at91sam9g45.dtsi
++++ b/arch/arm/boot/dts/at91sam9g45.dtsi
+@@ -1148,7 +1148,7 @@
+ 			usb2: gadget@fff78000 {
+ 				#address-cells = <1>;
+ 				#size-cells = <0>;
+-				compatible = "atmel,at91sam9rl-udc";
++				compatible = "atmel,at91sam9g45-udc";
+ 				reg = <0x00600000 0x80000
+ 				       0xfff78000 0x400>;
+ 				interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>;
+diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
+index 3aa56ae3410a..3314a7303754 100644
+--- a/arch/arm/boot/dts/at91sam9x5.dtsi
++++ b/arch/arm/boot/dts/at91sam9x5.dtsi
+@@ -1062,7 +1062,7 @@
+ 			usb2: gadget@f803c000 {
+ 				#address-cells = <1>;
+ 				#size-cells = <0>;
+-				compatible = "atmel,at91sam9rl-udc";
++				compatible = "atmel,at91sam9g45-udc";
+ 				reg = <0x00500000 0x80000
+ 				       0xf803c000 0x400>;
+ 				interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
+diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
+index bbcfb5a19c77..0cb8b0b11c3f 100644
+--- a/arch/arm/boot/dts/imx23.dtsi
++++ b/arch/arm/boot/dts/imx23.dtsi
+@@ -435,6 +435,7 @@
+ 				interrupts = <36 37 38 39 40 41 42 43 44>;
+ 				status = "disabled";
+ 				clocks = <&clks 26>;
++				#io-channel-cells = <1>;
+ 			};
+ 
+ 			spdif@80054000 {
+diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
+index 57ab8587f7b9..37e6182f1470 100644
+--- a/arch/arm/boot/dts/sama5d3.dtsi
++++ b/arch/arm/boot/dts/sama5d3.dtsi
+@@ -1321,7 +1321,7 @@
+ 		usb0: gadget@00500000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			compatible = "atmel,at91sam9rl-udc";
++			compatible = "atmel,sama5d3-udc";
+ 			reg = <0x00500000 0x100000
+ 			       0xf8030000 0x4000>;
+ 			interrupts = <33 IRQ_TYPE_LEVEL_HIGH 2>;
+diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
+index 6b1bb58f9c0b..a5f5f4090af6 100644
+--- a/arch/arm/boot/dts/sama5d4.dtsi
++++ b/arch/arm/boot/dts/sama5d4.dtsi
+@@ -123,7 +123,7 @@
+ 		usb0: gadget@00400000 {
+ 			#address-cells = <1>;
+ 			#size-cells = <0>;
+-			compatible = "atmel,at91sam9rl-udc";
++			compatible = "atmel,sama5d3-udc";
+ 			reg = <0x00400000 0x100000
+ 			       0xfc02c000 0x4000>;
+ 			interrupts = <47 IRQ_TYPE_LEVEL_HIGH 2>;
+@@ -1125,10 +1125,10 @@
+ 				compatible = "atmel,at91sam9g46-aes";
+ 				reg = <0xfc044000 0x100>;
+ 				interrupts = <12 IRQ_TYPE_LEVEL_HIGH 0>;
+-				dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+-					AT91_XDMAC_DT_PERID(41)>,
+-				       <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+-					AT91_XDMAC_DT_PERID(40)>;
++				dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++					| AT91_XDMAC_DT_PERID(41))>,
++				       <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++					| AT91_XDMAC_DT_PERID(40))>;
+ 				dma-names = "tx", "rx";
+ 				clocks = <&aes_clk>;
+ 				clock-names = "aes_clk";
+@@ -1139,10 +1139,10 @@
+ 				compatible = "atmel,at91sam9g46-tdes";
+ 				reg = <0xfc04c000 0x100>;
+ 				interrupts = <14 IRQ_TYPE_LEVEL_HIGH 0>;
+-				dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+-					AT91_XDMAC_DT_PERID(42)>,
+-				       <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+-					AT91_XDMAC_DT_PERID(43)>;
++				dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++					| AT91_XDMAC_DT_PERID(42))>,
++				       <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++					| AT91_XDMAC_DT_PERID(43))>;
+ 				dma-names = "tx", "rx";
+ 				clocks = <&tdes_clk>;
+ 				clock-names = "tdes_clk";
+@@ -1153,8 +1153,8 @@
+ 				compatible = "atmel,at91sam9g46-sha";
+ 				reg = <0xfc050000 0x100>;
+ 				interrupts = <15 IRQ_TYPE_LEVEL_HIGH 0>;
+-				dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1))
+-					AT91_XDMAC_DT_PERID(44)>;
++				dmas = <&dma0 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
++					| AT91_XDMAC_DT_PERID(44))>;
+ 				dma-names = "tx";
+ 				clocks = <&sha_clk>;
+ 				clock-names = "sha_clk";
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index cca5b8758185..f11d82527076 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -576,7 +576,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
+ 	struct pt_regs *old_regs = set_irq_regs(regs);
+ 
+ 	if ((unsigned)ipinr < NR_IPI) {
+-		trace_ipi_entry(ipi_types[ipinr]);
++		trace_ipi_entry_rcuidle(ipi_types[ipinr]);
+ 		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ 	}
+ 
+@@ -635,7 +635,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
+ 	}
+ 
+ 	if ((unsigned)ipinr < NR_IPI)
+-		trace_ipi_exit(ipi_types[ipinr]);
++		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+ 	set_irq_regs(old_regs);
+ }
+ 
+diff --git a/arch/arm/mach-dove/include/mach/irqs.h b/arch/arm/mach-dove/include/mach/irqs.h
+index 03d401d20453..3f29e6bca058 100644
+--- a/arch/arm/mach-dove/include/mach/irqs.h
++++ b/arch/arm/mach-dove/include/mach/irqs.h
+@@ -14,73 +14,73 @@
+ /*
+  * Dove Low Interrupt Controller
+  */
+-#define IRQ_DOVE_BRIDGE		0
+-#define IRQ_DOVE_H2C		1
+-#define IRQ_DOVE_C2H		2
+-#define IRQ_DOVE_NAND		3
+-#define IRQ_DOVE_PDMA		4
+-#define IRQ_DOVE_SPI1		5
+-#define IRQ_DOVE_SPI0		6
+-#define IRQ_DOVE_UART_0		7
+-#define IRQ_DOVE_UART_1		8
+-#define IRQ_DOVE_UART_2		9
+-#define IRQ_DOVE_UART_3		10
+-#define IRQ_DOVE_I2C		11
+-#define IRQ_DOVE_GPIO_0_7	12
+-#define IRQ_DOVE_GPIO_8_15	13
+-#define IRQ_DOVE_GPIO_16_23	14
+-#define IRQ_DOVE_PCIE0_ERR	15
+-#define IRQ_DOVE_PCIE0		16
+-#define IRQ_DOVE_PCIE1_ERR	17
+-#define IRQ_DOVE_PCIE1		18
+-#define IRQ_DOVE_I2S0		19
+-#define IRQ_DOVE_I2S0_ERR	20
+-#define IRQ_DOVE_I2S1		21
+-#define IRQ_DOVE_I2S1_ERR	22
+-#define IRQ_DOVE_USB_ERR	23
+-#define IRQ_DOVE_USB0		24
+-#define IRQ_DOVE_USB1		25
+-#define IRQ_DOVE_GE00_RX	26
+-#define IRQ_DOVE_GE00_TX	27
+-#define IRQ_DOVE_GE00_MISC	28
+-#define IRQ_DOVE_GE00_SUM	29
+-#define IRQ_DOVE_GE00_ERR	30
+-#define IRQ_DOVE_CRYPTO		31
++#define IRQ_DOVE_BRIDGE		(1 + 0)
++#define IRQ_DOVE_H2C		(1 + 1)
++#define IRQ_DOVE_C2H		(1 + 2)
++#define IRQ_DOVE_NAND		(1 + 3)
++#define IRQ_DOVE_PDMA		(1 + 4)
++#define IRQ_DOVE_SPI1		(1 + 5)
++#define IRQ_DOVE_SPI0		(1 + 6)
++#define IRQ_DOVE_UART_0		(1 + 7)
++#define IRQ_DOVE_UART_1		(1 + 8)
++#define IRQ_DOVE_UART_2		(1 + 9)
++#define IRQ_DOVE_UART_3		(1 + 10)
++#define IRQ_DOVE_I2C		(1 + 11)
++#define IRQ_DOVE_GPIO_0_7	(1 + 12)
++#define IRQ_DOVE_GPIO_8_15	(1 + 13)
++#define IRQ_DOVE_GPIO_16_23	(1 + 14)
++#define IRQ_DOVE_PCIE0_ERR	(1 + 15)
++#define IRQ_DOVE_PCIE0		(1 + 16)
++#define IRQ_DOVE_PCIE1_ERR	(1 + 17)
++#define IRQ_DOVE_PCIE1		(1 + 18)
++#define IRQ_DOVE_I2S0		(1 + 19)
++#define IRQ_DOVE_I2S0_ERR	(1 + 20)
++#define IRQ_DOVE_I2S1		(1 + 21)
++#define IRQ_DOVE_I2S1_ERR	(1 + 22)
++#define IRQ_DOVE_USB_ERR	(1 + 23)
++#define IRQ_DOVE_USB0		(1 + 24)
++#define IRQ_DOVE_USB1		(1 + 25)
++#define IRQ_DOVE_GE00_RX	(1 + 26)
++#define IRQ_DOVE_GE00_TX	(1 + 27)
++#define IRQ_DOVE_GE00_MISC	(1 + 28)
++#define IRQ_DOVE_GE00_SUM	(1 + 29)
++#define IRQ_DOVE_GE00_ERR	(1 + 30)
++#define IRQ_DOVE_CRYPTO		(1 + 31)
+ 
+ /*
+  * Dove High Interrupt Controller
+  */
+-#define IRQ_DOVE_AC97		32
+-#define IRQ_DOVE_PMU		33
+-#define IRQ_DOVE_CAM		34
+-#define IRQ_DOVE_SDIO0		35
+-#define IRQ_DOVE_SDIO1		36
+-#define IRQ_DOVE_SDIO0_WAKEUP	37
+-#define IRQ_DOVE_SDIO1_WAKEUP	38
+-#define IRQ_DOVE_XOR_00		39
+-#define IRQ_DOVE_XOR_01		40
+-#define IRQ_DOVE_XOR0_ERR	41
+-#define IRQ_DOVE_XOR_10		42
+-#define IRQ_DOVE_XOR_11		43
+-#define IRQ_DOVE_XOR1_ERR	44
+-#define IRQ_DOVE_LCD_DCON	45
+-#define IRQ_DOVE_LCD1		46
+-#define IRQ_DOVE_LCD0		47
+-#define IRQ_DOVE_GPU		48
+-#define IRQ_DOVE_PERFORM_MNTR	49
+-#define IRQ_DOVE_VPRO_DMA1	51
+-#define IRQ_DOVE_SSP_TIMER	54
+-#define IRQ_DOVE_SSP		55
+-#define IRQ_DOVE_MC_L2_ERR	56
+-#define IRQ_DOVE_CRYPTO_ERR	59
+-#define IRQ_DOVE_GPIO_24_31	60
+-#define IRQ_DOVE_HIGH_GPIO	61
+-#define IRQ_DOVE_SATA		62
++#define IRQ_DOVE_AC97		(1 + 32)
++#define IRQ_DOVE_PMU		(1 + 33)
++#define IRQ_DOVE_CAM		(1 + 34)
++#define IRQ_DOVE_SDIO0		(1 + 35)
++#define IRQ_DOVE_SDIO1		(1 + 36)
++#define IRQ_DOVE_SDIO0_WAKEUP	(1 + 37)
++#define IRQ_DOVE_SDIO1_WAKEUP	(1 + 38)
++#define IRQ_DOVE_XOR_00		(1 + 39)
++#define IRQ_DOVE_XOR_01		(1 + 40)
++#define IRQ_DOVE_XOR0_ERR	(1 + 41)
++#define IRQ_DOVE_XOR_10		(1 + 42)
++#define IRQ_DOVE_XOR_11		(1 + 43)
++#define IRQ_DOVE_XOR1_ERR	(1 + 44)
++#define IRQ_DOVE_LCD_DCON	(1 + 45)
++#define IRQ_DOVE_LCD1		(1 + 46)
++#define IRQ_DOVE_LCD0		(1 + 47)
++#define IRQ_DOVE_GPU		(1 + 48)
++#define IRQ_DOVE_PERFORM_MNTR	(1 + 49)
++#define IRQ_DOVE_VPRO_DMA1	(1 + 51)
++#define IRQ_DOVE_SSP_TIMER	(1 + 54)
++#define IRQ_DOVE_SSP		(1 + 55)
++#define IRQ_DOVE_MC_L2_ERR	(1 + 56)
++#define IRQ_DOVE_CRYPTO_ERR	(1 + 59)
++#define IRQ_DOVE_GPIO_24_31	(1 + 60)
++#define IRQ_DOVE_HIGH_GPIO	(1 + 61)
++#define IRQ_DOVE_SATA		(1 + 62)
+ 
+ /*
+  * DOVE General Purpose Pins
+  */
+-#define IRQ_DOVE_GPIO_START	64
++#define IRQ_DOVE_GPIO_START	65
+ #define NR_GPIO_IRQS		64
+ 
+ /*
+diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
+index 4a5a7aedcb76..df0223f76fa9 100644
+--- a/arch/arm/mach-dove/irq.c
++++ b/arch/arm/mach-dove/irq.c
+@@ -126,14 +126,14 @@ __exception_irq_entry dove_legacy_handle_irq(struct pt_regs *regs)
+ 	stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_LOW_OFF);
+ 	stat &= readl_relaxed(dove_irq_base + IRQ_MASK_LOW_OFF);
+ 	if (stat) {
+-		unsigned int hwirq = __fls(stat);
++		unsigned int hwirq = 1 + __fls(stat);
+ 		handle_IRQ(hwirq, regs);
+ 		return;
+ 	}
+ 	stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_HIGH_OFF);
+ 	stat &= readl_relaxed(dove_irq_base + IRQ_MASK_HIGH_OFF);
+ 	if (stat) {
+-		unsigned int hwirq = 32 + __fls(stat);
++		unsigned int hwirq = 33 + __fls(stat);
+ 		handle_IRQ(hwirq, regs);
+ 		return;
+ 	}
+@@ -144,8 +144,8 @@ void __init dove_init_irq(void)
+ {
+ 	int i;
+ 
+-	orion_irq_init(0, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF);
+-	orion_irq_init(32, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF);
++	orion_irq_init(1, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF);
++	orion_irq_init(33, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF);
+ 
+ #ifdef CONFIG_MULTI_IRQ_HANDLER
+ 	set_handle_irq(dove_legacy_handle_irq);
+diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
+index 9005b07296c8..aedec81d1198 100644
+--- a/arch/arm/vdso/vdsomunge.c
++++ b/arch/arm/vdso/vdsomunge.c
+@@ -45,13 +45,11 @@
+  * it does.
+  */
+ 
+-#define _GNU_SOURCE
+-
+ #include <byteswap.h>
+ #include <elf.h>
+ #include <errno.h>
+-#include <error.h>
+ #include <fcntl.h>
++#include <stdarg.h>
+ #include <stdbool.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+@@ -82,11 +80,25 @@
+ #define EF_ARM_ABI_FLOAT_HARD 0x400
+ #endif
+ 
++static int failed;
++static const char *argv0;
+ static const char *outfile;
+ 
++static void fail(const char *fmt, ...)
++{
++	va_list ap;
++
++	failed = 1;
++	fprintf(stderr, "%s: ", argv0);
++	va_start(ap, fmt);
++	vfprintf(stderr, fmt, ap);
++	va_end(ap);
++	exit(EXIT_FAILURE);
++}
++
+ static void cleanup(void)
+ {
+-	if (error_message_count > 0 && outfile != NULL)
++	if (failed && outfile != NULL)
+ 		unlink(outfile);
+ }
+ 
+@@ -119,68 +131,66 @@ int main(int argc, char **argv)
+ 	int infd;
+ 
+ 	atexit(cleanup);
++	argv0 = argv[0];
+ 
+ 	if (argc != 3)
+-		error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
++		fail("Usage: %s [infile] [outfile]\n", argv[0]);
+ 
+ 	infile = argv[1];
+ 	outfile = argv[2];
+ 
+ 	infd = open(infile, O_RDONLY);
+ 	if (infd < 0)
+-		error(EXIT_FAILURE, errno, "Cannot open %s", infile);
++		fail("Cannot open %s: %s\n", infile, strerror(errno));
+ 
+ 	if (fstat(infd, &stat) != 0)
+-		error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
++		fail("Failed stat for %s: %s\n", infile, strerror(errno));
+ 
+ 	inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
+ 	if (inbuf == MAP_FAILED)
+-		error(EXIT_FAILURE, errno, "Failed to map %s", infile);
++		fail("Failed to map %s: %s\n", infile, strerror(errno));
+ 
+ 	close(infd);
+ 
+ 	inhdr = inbuf;
+ 
+ 	if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
+-		error(EXIT_FAILURE, 0, "Not an ELF file");
++		fail("Not an ELF file\n");
+ 
+ 	if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
+-		error(EXIT_FAILURE, 0, "Unsupported ELF class");
++		fail("Unsupported ELF class\n");
+ 
+ 	swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
+ 
+ 	if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
+-		error(EXIT_FAILURE, 0, "Not a shared object");
++		fail("Not a shared object\n");
+ 
+-	if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
+-		error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
+-		      inhdr->e_machine);
+-	}
++	if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
++		fail("Unsupported architecture %#x\n", inhdr->e_machine);
+ 
+ 	e_flags = read_elf_word(inhdr->e_flags, swap);
+ 
+ 	if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
+-		error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
+-		      EF_ARM_EABI_VERSION(e_flags));
++		fail("Unsupported EABI version %#x\n",
++		     EF_ARM_EABI_VERSION(e_flags));
+ 	}
+ 
+ 	if (e_flags & EF_ARM_ABI_FLOAT_HARD)
+-		error(EXIT_FAILURE, 0,
+-		      "Unexpected hard-float flag set in e_flags");
++		fail("Unexpected hard-float flag set in e_flags\n");
+ 
+ 	clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
+ 
+ 	outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
+ 	if (outfd < 0)
+-		error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
++		fail("Cannot open %s: %s\n", outfile, strerror(errno));
+ 
+ 	if (ftruncate(outfd, stat.st_size) != 0)
+-		error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
++		fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
+ 
+ 	outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+ 		      outfd, 0);
+ 	if (outbuf == MAP_FAILED)
+-		error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
++		fail("Failed to map %s: %s\n", outfile, strerror(errno));
+ 
+ 	close(outfd);
+ 
+@@ -195,7 +205,7 @@ int main(int argc, char **argv)
+ 	}
+ 
+ 	if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
+-		error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
++		fail("Failed to sync %s: %s\n", outfile, strerror(errno));
+ 
+ 	return EXIT_SUCCESS;
+ }
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 2cb008177252..d3a202b85ba6 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -569,7 +569,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
+ 	struct pt_regs *old_regs = set_irq_regs(regs);
+ 
+ 	if ((unsigned)ipinr < NR_IPI) {
+-		trace_ipi_entry(ipi_types[ipinr]);
++		trace_ipi_entry_rcuidle(ipi_types[ipinr]);
+ 		__inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ 	}
+ 
+@@ -612,7 +612,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
+ 	}
+ 
+ 	if ((unsigned)ipinr < NR_IPI)
+-		trace_ipi_exit(ipi_types[ipinr]);
++		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+ 	set_irq_regs(old_regs);
+ }
+ 
+diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
+index 2de9d2e59d96..0eeb4f0930a0 100644
+--- a/arch/arm64/mm/hugetlbpage.c
++++ b/arch/arm64/mm/hugetlbpage.c
+@@ -40,13 +40,13 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+ 
+ int pmd_huge(pmd_t pmd)
+ {
+-	return !(pmd_val(pmd) & PMD_TABLE_BIT);
++	return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
+ }
+ 
+ int pud_huge(pud_t pud)
+ {
+ #ifndef __PAGETABLE_PMD_FOLDED
+-	return !(pud_val(pud) & PUD_TABLE_BIT);
++	return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
+ #else
+ 	return 0;
+ #endif
+diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
+index de0a81a539a0..98a26ce82d26 100644
+--- a/arch/arm64/net/bpf_jit.h
++++ b/arch/arm64/net/bpf_jit.h
+@@ -110,6 +110,10 @@
+ /* Rd = Rn >> shift; signed */
+ #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
+ 
++/* Zero extend */
++#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
++#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
++
+ /* Move wide (immediate) */
+ #define A64_MOVEW(sf, Rd, imm16, shift, type) \
+ 	aarch64_insn_gen_movewide(Rd, imm16, shift, \
+diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
+index dc6a4842683a..c047598b09e0 100644
+--- a/arch/arm64/net/bpf_jit_comp.c
++++ b/arch/arm64/net/bpf_jit_comp.c
+@@ -113,9 +113,9 @@ static inline void emit_a64_mov_i(const int is64, const int reg,
+ static inline int bpf2a64_offset(int bpf_to, int bpf_from,
+ 				 const struct jit_ctx *ctx)
+ {
+-	int to = ctx->offset[bpf_to + 1];
++	int to = ctx->offset[bpf_to];
+ 	/* -1 to account for the Branch instruction */
+-	int from = ctx->offset[bpf_from + 1] - 1;
++	int from = ctx->offset[bpf_from] - 1;
+ 
+ 	return to - from;
+ }
+@@ -289,23 +289,41 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
+ 	case BPF_ALU | BPF_END | BPF_FROM_BE:
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ 		if (BPF_SRC(code) == BPF_FROM_BE)
+-			break;
++			goto emit_bswap_uxt;
+ #else /* !CONFIG_CPU_BIG_ENDIAN */
+ 		if (BPF_SRC(code) == BPF_FROM_LE)
+-			break;
++			goto emit_bswap_uxt;
+ #endif
+ 		switch (imm) {
+ 		case 16:
+ 			emit(A64_REV16(is64, dst, dst), ctx);
++			/* zero-extend 16 bits into 64 bits */
++			emit(A64_UXTH(is64, dst, dst), ctx);
+ 			break;
+ 		case 32:
+ 			emit(A64_REV32(is64, dst, dst), ctx);
++			/* upper 32 bits already cleared */
+ 			break;
+ 		case 64:
+ 			emit(A64_REV64(dst, dst), ctx);
+ 			break;
+ 		}
+ 		break;
++emit_bswap_uxt:
++		switch (imm) {
++		case 16:
++			/* zero-extend 16 bits into 64 bits */
++			emit(A64_UXTH(is64, dst, dst), ctx);
++			break;
++		case 32:
++			/* zero-extend 32 bits into 64 bits */
++			emit(A64_UXTW(is64, dst, dst), ctx);
++			break;
++		case 64:
++			/* nop */
++			break;
++		}
++		break;
+ 	/* dst = imm */
+ 	case BPF_ALU | BPF_MOV | BPF_K:
+ 	case BPF_ALU64 | BPF_MOV | BPF_K:
+@@ -640,10 +658,11 @@ static int build_body(struct jit_ctx *ctx)
+ 		const struct bpf_insn *insn = &prog->insnsi[i];
+ 		int ret;
+ 
++		ret = build_insn(insn, ctx);
++
+ 		if (ctx->image == NULL)
+ 			ctx->offset[i] = ctx->idx;
+ 
+-		ret = build_insn(insn, ctx);
+ 		if (ret > 0) {
+ 			i++;
+ 			continue;
+diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
+index 33013dfcd3e1..5c68c85d5dbe 100644
+--- a/arch/m68k/Kconfig.cpu
++++ b/arch/m68k/Kconfig.cpu
+@@ -125,6 +125,13 @@ endif # M68KCLASSIC
+ 
+ if COLDFIRE
+ 
++choice
++	prompt "ColdFire SoC type"
++	default M520x
++	help
++	  Select the type of ColdFire System-on-Chip (SoC) that you want
++	  to build for.
++
+ config M5206
+ 	bool "MCF5206"
+ 	depends on !MMU
+@@ -174,9 +181,6 @@ config M525x
+ 	help
+ 	  Freescale (Motorola) Coldfire 5251/5253 processor support.
+ 
+-config M527x
+-	bool
+-
+ config M5271
+ 	bool "MCF5271"
+ 	depends on !MMU
+@@ -223,9 +227,6 @@ config M5307
+ 	help
+ 	  Motorola ColdFire 5307 processor support.
+ 
+-config M53xx
+-	bool
+-
+ config M532x
+ 	bool "MCF532x"
+ 	depends on !MMU
+@@ -251,9 +252,6 @@ config M5407
+ 	help
+ 	  Motorola ColdFire 5407 processor support.
+ 
+-config M54xx
+-	bool
+-
+ config M547x
+ 	bool "MCF547x"
+ 	select M54xx
+@@ -280,6 +278,17 @@ config M5441x
+ 	help
+ 	  Freescale Coldfire 54410/54415/54416/54417/54418 processor support.
+ 
++endchoice
++
++config M527x
++	bool
++
++config M53xx
++	bool
++
++config M54xx
++	bool
++
+ endif # COLDFIRE
+ 
+ 
+@@ -416,22 +425,10 @@ config HAVE_MBAR
+ config HAVE_IPSBAR
+ 	bool
+ 
+-config CLOCK_SET
+-	bool "Enable setting the CPU clock frequency"
+-	depends on COLDFIRE
+-	default n
+-	help
+-	  On some CPU's you do not need to know what the core CPU clock
+-	  frequency is. On these you can disable clock setting. On some
+-	  traditional 68K parts, and on all ColdFire parts you need to set
+-	  the appropriate CPU clock frequency. On these devices many of the
+-	  onboard peripherals derive their timing from the master CPU clock
+-	  frequency.
+-
+ config CLOCK_FREQ
+ 	int "Set the core clock frequency"
+ 	default "66666666"
+-	depends on CLOCK_SET
++	depends on COLDFIRE
+ 	help
+ 	  Define the CPU clock frequency in use. This is the core clock
+ 	  frequency, it may or may not be the same as the external clock
+diff --git a/arch/m68k/include/asm/coldfire.h b/arch/m68k/include/asm/coldfire.h
+index c94557b91448..50aa4dac9ca2 100644
+--- a/arch/m68k/include/asm/coldfire.h
++++ b/arch/m68k/include/asm/coldfire.h
+@@ -19,7 +19,7 @@
+  *	in any case new boards come along from time to time that have yet
+  *	another different clocking frequency.
+  */
+-#ifdef CONFIG_CLOCK_SET
++#ifdef CONFIG_CLOCK_FREQ
+ #define	MCF_CLK		CONFIG_CLOCK_FREQ
+ #else
+ #error "Don't know what your ColdFire CPU clock frequency is??"
+diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
+index e5a693b16da2..443f44de1020 100644
+--- a/arch/openrisc/Kconfig
++++ b/arch/openrisc/Kconfig
+@@ -17,6 +17,7 @@ config OPENRISC
+ 	select GENERIC_IRQ_SHOW
+ 	select GENERIC_IOMAP
+ 	select GENERIC_CPU_DEVICES
++	select HAVE_UID16
+ 	select GENERIC_ATOMIC64
+ 	select GENERIC_CLOCKEVENTS
+ 	select GENERIC_STRNCPY_FROM_USER
+@@ -31,9 +32,6 @@ config MMU
+ config HAVE_DMA_ATTRS
+ 	def_bool y
+ 
+-config UID16
+-	def_bool y
+-
+ config RWSEM_GENERIC_SPINLOCK
+ 	def_bool y
+ 
+diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
+index 9d518d693b4b..844b06d67df4 100644
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+ 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+ 	}
+ }
++
++const char *arch_vma_name(struct vm_area_struct *vma)
++{
++	if (vma->vm_flags & VM_MPX)
++		return "[mpx]";
++	return NULL;
++}
+diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
+index c439ec478216..4d1c11c07fe1 100644
+--- a/arch/x86/mm/mpx.c
++++ b/arch/x86/mm/mpx.c
+@@ -18,26 +18,9 @@
+ #include <asm/processor.h>
+ #include <asm/fpu-internal.h>
+ 
+-static const char *mpx_mapping_name(struct vm_area_struct *vma)
+-{
+-	return "[mpx]";
+-}
+-
+-static struct vm_operations_struct mpx_vma_ops = {
+-	.name = mpx_mapping_name,
+-};
+-
+-static int is_mpx_vma(struct vm_area_struct *vma)
+-{
+-	return (vma->vm_ops == &mpx_vma_ops);
+-}
+-
+ /*
+  * This is really a simplified "vm_mmap". it only handles MPX
+  * bounds tables (the bounds directory is user-allocated).
+- *
+- * Later on, we use the vma->vm_ops to uniquely identify these
+- * VMAs.
+  */
+ static unsigned long mpx_mmap(unsigned long len)
+ {
+@@ -83,7 +66,6 @@ static unsigned long mpx_mmap(unsigned long len)
+ 		ret = -ENOMEM;
+ 		goto out;
+ 	}
+-	vma->vm_ops = &mpx_vma_ops;
+ 
+ 	if (vm_flags & VM_LOCKED) {
+ 		up_write(&mm->mmap_sem);
+@@ -661,7 +643,7 @@ static int zap_bt_entries(struct mm_struct *mm,
+ 		 * so stop immediately and return an error.  This
+ 		 * probably results in a SIGSEGV.
+ 		 */
+-		if (!is_mpx_vma(vma))
++		if (!(vma->vm_flags & VM_MPX))
+ 			return -EINVAL;
+ 
+ 		len = min(vma->vm_end, end) - addr;
+diff --git a/block/bio.c b/block/bio.c
+index f66a4eae16ee..4441522ca339 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -1814,8 +1814,9 @@ EXPORT_SYMBOL(bio_endio_nodec);
+  * Allocates and returns a new bio which represents @sectors from the start of
+  * @bio, and updates @bio to represent the remaining sectors.
+  *
+- * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
+- * responsibility to ensure that @bio is not freed before the split.
++ * Unless this is a discard request the newly allocated bio will point
++ * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
++ * @bio is not freed before the split.
+  */
+ struct bio *bio_split(struct bio *bio, int sectors,
+ 		      gfp_t gfp, struct bio_set *bs)
+@@ -1825,7 +1826,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
+ 	BUG_ON(sectors <= 0);
+ 	BUG_ON(sectors >= bio_sectors(bio));
+ 
+-	split = bio_clone_fast(bio, gfp, bs);
++	/*
++	 * Discards need a mutable bio_vec to accommodate the payload
++	 * required by the DSM TRIM and UNMAP commands.
++	 */
++	if (bio->bi_rw & REQ_DISCARD)
++		split = bio_clone_bioset(bio, gfp, bs);
++	else
++		split = bio_clone_fast(bio, gfp, bs);
++
+ 	if (!split)
+ 		return NULL;
+ 
+diff --git a/crypto/asymmetric_keys/asymmetric_keys.h b/crypto/asymmetric_keys/asymmetric_keys.h
+index f97330886d58..3f5b537ab33e 100644
+--- a/crypto/asymmetric_keys/asymmetric_keys.h
++++ b/crypto/asymmetric_keys/asymmetric_keys.h
+@@ -11,6 +11,9 @@
+ 
+ extern struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id);
+ 
++extern int __asymmetric_key_hex_to_key_id(const char *id,
++					  struct asymmetric_key_id *match_id,
++					  size_t hexlen);
+ static inline
+ const struct asymmetric_key_ids *asymmetric_key_ids(const struct key *key)
+ {
+diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
+index bcbbbd794e1d..b0e4ed23d668 100644
+--- a/crypto/asymmetric_keys/asymmetric_type.c
++++ b/crypto/asymmetric_keys/asymmetric_type.c
+@@ -104,6 +104,15 @@ static bool asymmetric_match_key_ids(
+ 	return false;
+ }
+ 
++/* helper function can be called directly with pre-allocated memory */
++inline int __asymmetric_key_hex_to_key_id(const char *id,
++				   struct asymmetric_key_id *match_id,
++				   size_t hexlen)
++{
++	match_id->len = hexlen;
++	return hex2bin(match_id->data, id, hexlen);
++}
++
+ /**
+  * asymmetric_key_hex_to_key_id - Convert a hex string into a key ID.
+  * @id: The ID as a hex string.
+@@ -111,21 +120,20 @@ static bool asymmetric_match_key_ids(
+ struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id)
+ {
+ 	struct asymmetric_key_id *match_id;
+-	size_t hexlen;
++	size_t asciihexlen;
+ 	int ret;
+ 
+ 	if (!*id)
+ 		return ERR_PTR(-EINVAL);
+-	hexlen = strlen(id);
+-	if (hexlen & 1)
++	asciihexlen = strlen(id);
++	if (asciihexlen & 1)
+ 		return ERR_PTR(-EINVAL);
+ 
+-	match_id = kmalloc(sizeof(struct asymmetric_key_id) + hexlen / 2,
++	match_id = kmalloc(sizeof(struct asymmetric_key_id) + asciihexlen / 2,
+ 			   GFP_KERNEL);
+ 	if (!match_id)
+ 		return ERR_PTR(-ENOMEM);
+-	match_id->len = hexlen / 2;
+-	ret = hex2bin(match_id->data, id, hexlen / 2);
++	ret = __asymmetric_key_hex_to_key_id(id, match_id, asciihexlen / 2);
+ 	if (ret < 0) {
+ 		kfree(match_id);
+ 		return ERR_PTR(-EINVAL);
+diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
+index a6c42031628e..24f17e6c5904 100644
+--- a/crypto/asymmetric_keys/x509_public_key.c
++++ b/crypto/asymmetric_keys/x509_public_key.c
+@@ -28,17 +28,30 @@ static bool use_builtin_keys;
+ static struct asymmetric_key_id *ca_keyid;
+ 
+ #ifndef MODULE
++static struct {
++	struct asymmetric_key_id id;
++	unsigned char data[10];
++} cakey;
++
+ static int __init ca_keys_setup(char *str)
+ {
+ 	if (!str)		/* default system keyring */
+ 		return 1;
+ 
+ 	if (strncmp(str, "id:", 3) == 0) {
+-		struct asymmetric_key_id *p;
+-		p = asymmetric_key_hex_to_key_id(str + 3);
+-		if (p == ERR_PTR(-EINVAL))
+-			pr_err("Unparsable hex string in ca_keys\n");
+-		else if (!IS_ERR(p))
++		struct asymmetric_key_id *p = &cakey.id;
++		size_t hexlen = (strlen(str) - 3) / 2;
++		int ret;
++
++		if (hexlen == 0 || hexlen > sizeof(cakey.data)) {
++			pr_err("Missing or invalid ca_keys id\n");
++			return 1;
++		}
++
++		ret = __asymmetric_key_hex_to_key_id(str + 3, p, hexlen);
++		if (ret < 0)
++			pr_err("Unparsable ca_keys id hex string\n");
++		else
+ 			ca_keyid = p;	/* owner key 'id:xxxxxx' */
+ 	} else if (strcmp(str, "builtin") == 0) {
+ 		use_builtin_keys = true;
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 37fb19047603..73f056a597a9 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -352,13 +352,16 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
+ 				pdata->mmio_size = resource_size(rentry->res);
+ 			pdata->mmio_base = ioremap(rentry->res->start,
+ 						   pdata->mmio_size);
+-			if (!pdata->mmio_base)
+-				goto err_out;
+ 			break;
+ 		}
+ 
+ 	acpi_dev_free_resource_list(&resource_list);
+ 
++	if (!pdata->mmio_base) {
++		ret = -ENOMEM;
++		goto err_out;
++	}
++
+ 	pdata->dev_desc = dev_desc;
+ 
+ 	if (dev_desc->setup)
+diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
+index 87b27521fcac..7f50dd9eb1d0 100644
+--- a/drivers/acpi/acpica/aclocal.h
++++ b/drivers/acpi/acpica/aclocal.h
+@@ -213,6 +213,7 @@ struct acpi_table_list {
+ 
+ #define ACPI_TABLE_INDEX_DSDT           (0)
+ #define ACPI_TABLE_INDEX_FACS           (1)
++#define ACPI_TABLE_INDEX_X_FACS         (2)
+ 
+ struct acpi_find_context {
+ 	char *search_for;
+diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
+index 7d2486005e3f..05be59c772c7 100644
+--- a/drivers/acpi/acpica/tbfadt.c
++++ b/drivers/acpi/acpica/tbfadt.c
+@@ -350,9 +350,18 @@ void acpi_tb_parse_fadt(u32 table_index)
+ 	/* If Hardware Reduced flag is set, there is no FACS */
+ 
+ 	if (!acpi_gbl_reduced_hardware) {
+-		acpi_tb_install_fixed_table((acpi_physical_address)
+-					    acpi_gbl_FADT.Xfacs, ACPI_SIG_FACS,
+-					    ACPI_TABLE_INDEX_FACS);
++		if (acpi_gbl_FADT.facs) {
++			acpi_tb_install_fixed_table((acpi_physical_address)
++						    acpi_gbl_FADT.facs,
++						    ACPI_SIG_FACS,
++						    ACPI_TABLE_INDEX_FACS);
++		}
++		if (acpi_gbl_FADT.Xfacs) {
++			acpi_tb_install_fixed_table((acpi_physical_address)
++						    acpi_gbl_FADT.Xfacs,
++						    ACPI_SIG_FACS,
++						    ACPI_TABLE_INDEX_X_FACS);
++		}
+ 	}
+ }
+ 
+@@ -491,13 +500,9 @@ static void acpi_tb_convert_fadt(void)
+ 	acpi_gbl_FADT.header.length = sizeof(struct acpi_table_fadt);
+ 
+ 	/*
+-	 * Expand the 32-bit FACS and DSDT addresses to 64-bit as necessary.
++	 * Expand the 32-bit DSDT addresses to 64-bit as necessary.
+ 	 * Later ACPICA code will always use the X 64-bit field.
+ 	 */
+-	acpi_gbl_FADT.Xfacs = acpi_tb_select_address("FACS",
+-						     acpi_gbl_FADT.facs,
+-						     acpi_gbl_FADT.Xfacs);
+-
+ 	acpi_gbl_FADT.Xdsdt = acpi_tb_select_address("DSDT",
+ 						     acpi_gbl_FADT.dsdt,
+ 						     acpi_gbl_FADT.Xdsdt);
+diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
+index 6559a58439c5..2fb1afaacc6d 100644
+--- a/drivers/acpi/acpica/tbutils.c
++++ b/drivers/acpi/acpica/tbutils.c
+@@ -68,7 +68,8 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size);
+ 
+ acpi_status acpi_tb_initialize_facs(void)
+ {
+-	acpi_status status;
++	struct acpi_table_facs *facs32;
++	struct acpi_table_facs *facs64;
+ 
+ 	/* If Hardware Reduced flag is set, there is no FACS */
+ 
+@@ -77,11 +78,22 @@ acpi_status acpi_tb_initialize_facs(void)
+ 		return (AE_OK);
+ 	}
+ 
+-	status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+-					 ACPI_CAST_INDIRECT_PTR(struct
+-								acpi_table_header,
+-								&acpi_gbl_FACS));
+-	return (status);
++	(void)acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
++				      ACPI_CAST_INDIRECT_PTR(struct
++							     acpi_table_header,
++							     &facs32));
++	(void)acpi_get_table_by_index(ACPI_TABLE_INDEX_X_FACS,
++				      ACPI_CAST_INDIRECT_PTR(struct
++							     acpi_table_header,
++							     &facs64));
++
++	if (acpi_gbl_use32_bit_facs_addresses) {
++		acpi_gbl_FACS = facs32 ? facs32 : facs64;
++	} else {
++		acpi_gbl_FACS = facs64 ? facs64 : facs32;
++	}
++
++	return (AE_OK);
+ }
+ #endif				/* !ACPI_REDUCED_HARDWARE */
+ 
+@@ -101,7 +113,7 @@ acpi_status acpi_tb_initialize_facs(void)
+ u8 acpi_tb_tables_loaded(void)
+ {
+ 
+-	if (acpi_gbl_root_table_list.current_table_count >= 3) {
++	if (acpi_gbl_root_table_list.current_table_count >= 4) {
+ 		return (TRUE);
+ 	}
+ 
+@@ -357,11 +369,11 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
+ 	table_entry = ACPI_ADD_PTR(u8, table, sizeof(struct acpi_table_header));
+ 
+ 	/*
+-	 * First two entries in the table array are reserved for the DSDT
+-	 * and FACS, which are not actually present in the RSDT/XSDT - they
+-	 * come from the FADT
++	 * First three entries in the table array are reserved for the DSDT
++	 * and 32bit/64bit FACS, which are not actually present in the
++	 * RSDT/XSDT - they come from the FADT
+ 	 */
+-	acpi_gbl_root_table_list.current_table_count = 2;
++	acpi_gbl_root_table_list.current_table_count = 3;
+ 
+ 	/* Initialize the root table array from the RSDT/XSDT */
+ 
+diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
+index aadb3002a2dd..b63e35d6d1bf 100644
+--- a/drivers/acpi/acpica/tbxfload.c
++++ b/drivers/acpi/acpica/tbxfload.c
+@@ -166,7 +166,8 @@ static acpi_status acpi_tb_load_namespace(void)
+ 
+ 	(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+ 	for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+-		if ((!ACPI_COMPARE_NAME
++		if (!acpi_gbl_root_table_list.tables[i].address ||
++		    (!ACPI_COMPARE_NAME
+ 		     (&(acpi_gbl_root_table_list.tables[i].signature),
+ 		      ACPI_SIG_SSDT)
+ 		     &&
+diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c
+index 083a76891889..42a32a66ef22 100644
+--- a/drivers/acpi/acpica/utxfinit.c
++++ b/drivers/acpi/acpica/utxfinit.c
+@@ -179,10 +179,12 @@ acpi_status __init acpi_enable_subsystem(u32 flags)
+ 	 * Obtain a permanent mapping for the FACS. This is required for the
+ 	 * Global Lock and the Firmware Waking Vector
+ 	 */
+-	status = acpi_tb_initialize_facs();
+-	if (ACPI_FAILURE(status)) {
+-		ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
+-		return_ACPI_STATUS(status);
++	if (!(flags & ACPI_NO_FACS_INIT)) {
++		status = acpi_tb_initialize_facs();
++		if (ACPI_FAILURE(status)) {
++			ACPI_WARNING((AE_INFO, "Could not map the FACS table"));
++			return_ACPI_STATUS(status);
++		}
+ 	}
+ #endif				/* !ACPI_REDUCED_HARDWARE */
+ 
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index 5226a8b921ae..98f5316aad72 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -175,10 +175,14 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
+ 	if (!addr || !length)
+ 		return;
+ 
+-	acpi_reserve_region(addr, length, gas->space_id, 0, desc);
++	/* Resources are never freed */
++	if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
++		request_region(addr, length, desc);
++	else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
++		request_mem_region(addr, length, desc);
+ }
+ 
+-static void __init acpi_reserve_resources(void)
++static int __init acpi_reserve_resources(void)
+ {
+ 	acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
+ 		"ACPI PM1a_EVT_BLK");
+@@ -207,7 +211,10 @@ static void __init acpi_reserve_resources(void)
+ 	if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
+ 		acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
+ 			       acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
++
++	return 0;
+ }
++fs_initcall_sync(acpi_reserve_resources);
+ 
+ void acpi_os_printf(const char *fmt, ...)
+ {
+@@ -1838,7 +1845,6 @@ acpi_status __init acpi_os_initialize(void)
+ 
+ acpi_status __init acpi_os_initialize1(void)
+ {
+-	acpi_reserve_resources();
+ 	kacpid_wq = alloc_workqueue("kacpid", 0, 1);
+ 	kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
+ 	kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index fcb7807ea8b7..f1c966e05078 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -26,7 +26,6 @@
+ #include <linux/device.h>
+ #include <linux/export.h>
+ #include <linux/ioport.h>
+-#include <linux/list.h>
+ #include <linux/slab.h>
+ 
+ #ifdef CONFIG_X86
+@@ -194,6 +193,7 @@ static bool acpi_decode_space(struct resource_win *win,
+ 	u8 iodec = attr->granularity == 0xfff ? ACPI_DECODE_10 : ACPI_DECODE_16;
+ 	bool wp = addr->info.mem.write_protect;
+ 	u64 len = attr->address_length;
++	u64 start, end, offset = 0;
+ 	struct resource *res = &win->res;
+ 
+ 	/*
+@@ -205,9 +205,6 @@ static bool acpi_decode_space(struct resource_win *win,
+ 		pr_debug("ACPI: Invalid address space min_addr_fix %d, max_addr_fix %d, len %llx\n",
+ 			 addr->min_address_fixed, addr->max_address_fixed, len);
+ 
+-	res->start = attr->minimum;
+-	res->end = attr->maximum;
+-
+ 	/*
+ 	 * For bridges that translate addresses across the bridge,
+ 	 * translation_offset is the offset that must be added to the
+@@ -215,12 +212,22 @@ static bool acpi_decode_space(struct resource_win *win,
+ 	 * primary side. Non-bridge devices must list 0 for all Address
+ 	 * Translation offset bits.
+ 	 */
+-	if (addr->producer_consumer == ACPI_PRODUCER) {
+-		res->start += attr->translation_offset;
+-		res->end += attr->translation_offset;
+-	} else if (attr->translation_offset) {
++	if (addr->producer_consumer == ACPI_PRODUCER)
++		offset = attr->translation_offset;
++	else if (attr->translation_offset)
+ 		pr_debug("ACPI: translation_offset(%lld) is invalid for non-bridge device.\n",
+ 			 attr->translation_offset);
++	start = attr->minimum + offset;
++	end = attr->maximum + offset;
++
++	win->offset = offset;
++	res->start = start;
++	res->end = end;
++	if (sizeof(resource_size_t) < sizeof(u64) &&
++	    (offset != win->offset || start != res->start || end != res->end)) {
++		pr_warn("acpi resource window ([%#llx-%#llx] ignored, not CPU addressable)\n",
++			attr->minimum, attr->maximum);
++		return false;
+ 	}
+ 
+ 	switch (addr->resource_type) {
+@@ -237,8 +244,6 @@ static bool acpi_decode_space(struct resource_win *win,
+ 		return false;
+ 	}
+ 
+-	win->offset = attr->translation_offset;
+-
+ 	if (addr->producer_consumer == ACPI_PRODUCER)
+ 		res->flags |= IORESOURCE_WINDOW;
+ 
+@@ -622,162 +627,3 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
+ 	return (type & types) ? 0 : 1;
+ }
+ EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
+-
+-struct reserved_region {
+-	struct list_head node;
+-	u64 start;
+-	u64 end;
+-};
+-
+-static LIST_HEAD(reserved_io_regions);
+-static LIST_HEAD(reserved_mem_regions);
+-
+-static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
+-			 char *desc)
+-{
+-	unsigned int length = end - start + 1;
+-	struct resource *res;
+-
+-	res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
+-		request_region(start, length, desc) :
+-		request_mem_region(start, length, desc);
+-	if (!res)
+-		return -EIO;
+-
+-	res->flags &= ~flags;
+-	return 0;
+-}
+-
+-static int add_region_before(u64 start, u64 end, u8 space_id,
+-			     unsigned long flags, char *desc,
+-			     struct list_head *head)
+-{
+-	struct reserved_region *reg;
+-	int error;
+-
+-	reg = kmalloc(sizeof(*reg), GFP_KERNEL);
+-	if (!reg)
+-		return -ENOMEM;
+-
+-	error = request_range(start, end, space_id, flags, desc);
+-	if (error)
+-		return error;
+-
+-	reg->start = start;
+-	reg->end = end;
+-	list_add_tail(&reg->node, head);
+-	return 0;
+-}
+-
+-/**
+- * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
+- * @start: Starting address of the region.
+- * @length: Length of the region.
+- * @space_id: Identifier of address space to reserve the region from.
+- * @flags: Resource flags to clear for the region after requesting it.
+- * @desc: Region description (for messages).
+- *
+- * Reserve an I/O or memory region as a system resource to prevent others from
+- * using it.  If the new region overlaps with one of the regions (in the given
+- * address space) already reserved by this routine, only the non-overlapping
+- * parts of it will be reserved.
+- *
+- * Returned is either 0 (success) or a negative error code indicating a resource
+- * reservation problem.  It is the code of the first encountered error, but the
+- * routine doesn't abort until it has attempted to request all of the parts of
+- * the new region that don't overlap with other regions reserved previously.
+- *
+- * The resources requested by this routine are never released.
+- */
+-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
+-			unsigned long flags, char *desc)
+-{
+-	struct list_head *regions;
+-	struct reserved_region *reg;
+-	u64 end = start + length - 1;
+-	int ret = 0, error = 0;
+-
+-	if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+-		regions = &reserved_io_regions;
+-	else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+-		regions = &reserved_mem_regions;
+-	else
+-		return -EINVAL;
+-
+-	if (list_empty(regions))
+-		return add_region_before(start, end, space_id, flags, desc, regions);
+-
+-	list_for_each_entry(reg, regions, node)
+-		if (reg->start == end + 1) {
+-			/* The new region can be prepended to this one. */
+-			ret = request_range(start, end, space_id, flags, desc);
+-			if (!ret)
+-				reg->start = start;
+-
+-			return ret;
+-		} else if (reg->start > end) {
+-			/* No overlap.  Add the new region here and get out. */
+-			return add_region_before(start, end, space_id, flags,
+-						 desc, &reg->node);
+-		} else if (reg->end == start - 1) {
+-			goto combine;
+-		} else if (reg->end >= start) {
+-			goto overlap;
+-		}
+-
+-	/* The new region goes after the last existing one. */
+-	return add_region_before(start, end, space_id, flags, desc, regions);
+-
+- overlap:
+-	/*
+-	 * The new region overlaps an existing one.
+-	 *
+-	 * The head part of the new region immediately preceding the existing
+-	 * overlapping one can be combined with it right away.
+-	 */
+-	if (reg->start > start) {
+-		error = request_range(start, reg->start - 1, space_id, flags, desc);
+-		if (error)
+-			ret = error;
+-		else
+-			reg->start = start;
+-	}
+-
+- combine:
+-	/*
+-	 * The new region is adjacent to an existing one.  If it extends beyond
+-	 * that region all the way to the next one, it is possible to combine
+-	 * all three of them.
+-	 */
+-	while (reg->end < end) {
+-		struct reserved_region *next = NULL;
+-		u64 a = reg->end + 1, b = end;
+-
+-		if (!list_is_last(&reg->node, regions)) {
+-			next = list_next_entry(reg, node);
+-			if (next->start <= end)
+-				b = next->start - 1;
+-		}
+-		error = request_range(a, b, space_id, flags, desc);
+-		if (!error) {
+-			if (next && next->start == b + 1) {
+-				reg->end = next->end;
+-				list_del(&next->node);
+-				kfree(next);
+-			} else {
+-				reg->end = end;
+-				break;
+-			}
+-		} else if (next) {
+-			if (!ret)
+-				ret = error;
+-
+-			reg = next;
+-		} else {
+-			break;
+-		}
+-	}
+-
+-	return ret ? ret : error;
+-}
+-EXPORT_SYMBOL_GPL(acpi_reserve_region);
+diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
+index 577849c6611a..41c99be9bd41 100644
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev)
+ 		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
+ 					 dev->max_sectors);
+ 
++	if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
++		dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
++					 dev->max_sectors);
++
+ 	if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
+ 		dev->max_sectors = ATA_MAX_SECTORS_LBA48;
+ 
+@@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "Slimtype DVD A  DS8A8SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
+ 	{ "Slimtype DVD A  DS8A9SH", NULL,	ATA_HORKAGE_MAX_SEC_LBA48 },
+ 
++	/*
++	 * Causes silent data corruption with higher max sects.
++	 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
++	 */
++	{ "ST380013AS",		"3.20",		ATA_HORKAGE_MAX_SEC_1024 },
++
+ 	/* Devices we expect to fail diagnostics */
+ 
+ 	/* Devices where NCQ should be avoided */
+@@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "ST3320[68]13AS",	"SD1[5-9]",	ATA_HORKAGE_NONCQ |
+ 						ATA_HORKAGE_FIRMWARE_WARN },
+ 
+-	/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
++	/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
+ 	{ "ST1000LM024 HN-M101MBB", "2AR10001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
+ 	{ "ST1000LM024 HN-M101MBB", "2BA30001",	ATA_HORKAGE_BROKEN_FPDMA_AA },
++	{ "VB0250EAVER",	"HPG7",		ATA_HORKAGE_BROKEN_FPDMA_AA },
+ 
+ 	/* Blacklist entries taken from Silicon Image 3124/3132
+ 	   Windows driver .inf file - also several Linux problem reports */
+@@ -4225,11 +4236,11 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "PIONEER DVD-RW  DVR-216D",	NULL,	ATA_HORKAGE_NOSETXFER },
+ 
+ 	/* devices that don't properly handle queued TRIM commands */
+-	{ "Micron_M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
++	{ "Micron_M500_*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Crucial_CT*M500*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+-	{ "Micron_M5[15]0*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
++	{ "Micron_M5[15]0_*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 	{ "Crucial_CT*M550*",		"MU01",	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+@@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
+ 	{ "Samsung SSD 8*",		NULL,	ATA_HORKAGE_NO_NCQ_TRIM |
+ 						ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ 
++	/* devices that don't properly handle TRIM commands */
++	{ "SuperSSpeed S238*",		NULL,	ATA_HORKAGE_NOTRIM, },
++
+ 	/*
+ 	 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
+ 	 * (Return Zero After Trim) flags in the ATA Command Set are
+@@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
+ 	else /* In the ancient relic department - skip all of this */
+ 		return 0;
+ 
+-	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
++	/* On some disks, this command causes spin-up, so we need longer timeout */
++	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
+ 
+ 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
+ 	return err_mask;
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index cf0022ec07f2..7465031a893c 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -1507,16 +1507,21 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
+ {
+ 	struct ata_taskfile tf;
+ 	unsigned int err_mask;
++	bool dma = false;
+ 
+ 	DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
+ 
++retry:
+ 	ata_tf_init(dev, &tf);
+-	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id)) {
++	if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
++	    !(dev->horkage & ATA_HORKAGE_NO_NCQ_LOG)) {
+ 		tf.command = ATA_CMD_READ_LOG_DMA_EXT;
+ 		tf.protocol = ATA_PROT_DMA;
++		dma = true;
+ 	} else {
+ 		tf.command = ATA_CMD_READ_LOG_EXT;
+ 		tf.protocol = ATA_PROT_PIO;
++		dma = false;
+ 	}
+ 	tf.lbal = log;
+ 	tf.lbam = page;
+@@ -1527,6 +1532,12 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
+ 	err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
+ 				     buf, sectors * ATA_SECT_SIZE, 0);
+ 
++	if (err_mask && dma) {
++		dev->horkage |= ATA_HORKAGE_NO_NCQ_LOG;
++		ata_dev_warn(dev, "READ LOG DMA EXT failed, trying unqueued\n");
++		goto retry;
++	}
++
+ 	DPRINTK("EXIT, err_mask=%x\n", err_mask);
+ 	return err_mask;
+ }
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 3131adcc1f87..641a61a59e89 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
+ 		rbuf[14] = (lowest_aligned >> 8) & 0x3f;
+ 		rbuf[15] = lowest_aligned;
+ 
+-		if (ata_id_has_trim(args->id)) {
++		if (ata_id_has_trim(args->id) &&
++		    !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
+ 			rbuf[14] |= 0x80; /* LBPME */
+ 
+ 			if (ata_id_has_zero_after_trim(args->id) &&
+diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
+index 3227b7c8a05f..e2d94972962d 100644
+--- a/drivers/ata/libata-transport.c
++++ b/drivers/ata/libata-transport.c
+@@ -560,6 +560,29 @@ show_ata_dev_gscr(struct device *dev,
+ 
+ static DEVICE_ATTR(gscr, S_IRUGO, show_ata_dev_gscr, NULL);
+ 
++static ssize_t
++show_ata_dev_trim(struct device *dev,
++		  struct device_attribute *attr, char *buf)
++{
++	struct ata_device *ata_dev = transport_class_to_dev(dev);
++	unsigned char *mode;
++
++	if (!ata_id_has_trim(ata_dev->id))
++		mode = "unsupported";
++	else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
++		mode = "forced_unsupported";
++	else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
++			mode = "forced_unqueued";
++	else if (ata_fpdma_dsm_supported(ata_dev))
++		mode = "queued";
++	else
++		mode = "unqueued";
++
++	return snprintf(buf, 20, "%s\n", mode);
++}
++
++static DEVICE_ATTR(trim, S_IRUGO, show_ata_dev_trim, NULL);
++
+ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
+ 			       "ata_device", NULL, NULL, NULL);
+ 
+@@ -733,6 +756,7 @@ struct scsi_transport_template *ata_attach_transport(void)
+ 	SETUP_DEV_ATTRIBUTE(ering);
+ 	SETUP_DEV_ATTRIBUTE(id);
+ 	SETUP_DEV_ATTRIBUTE(gscr);
++	SETUP_DEV_ATTRIBUTE(trim);
+ 	BUG_ON(count > ATA_DEV_ATTRS);
+ 	i->dev_attrs[count] = NULL;
+ 
+diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
+index 171841ad1008..4d1d9de4f9bf 100644
+--- a/drivers/base/firmware_class.c
++++ b/drivers/base/firmware_class.c
+@@ -544,10 +544,8 @@ static void fw_dev_release(struct device *dev)
+ 	kfree(fw_priv);
+ }
+ 
+-static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
++static int do_firmware_uevent(struct firmware_priv *fw_priv, struct kobj_uevent_env *env)
+ {
+-	struct firmware_priv *fw_priv = to_firmware_priv(dev);
+-
+ 	if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
+ 		return -ENOMEM;
+ 	if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
+@@ -558,6 +556,18 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
+ 	return 0;
+ }
+ 
++static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
++{
++	struct firmware_priv *fw_priv = to_firmware_priv(dev);
++	int err = 0;
++
++	mutex_lock(&fw_lock);
++	if (fw_priv->buf)
++		err = do_firmware_uevent(fw_priv, env);
++	mutex_unlock(&fw_lock);
++	return err;
++}
++
+ static struct class firmware_class = {
+ 	.name		= "firmware",
+ 	.class_attrs	= firmware_class_attrs,
+diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
+index 7fdd0172605a..c7b0fcebf168 100644
+--- a/drivers/base/power/clock_ops.c
++++ b/drivers/base/power/clock_ops.c
+@@ -93,7 +93,7 @@ static int __pm_clk_add(struct device *dev, const char *con_id,
+ 			return -ENOMEM;
+ 		}
+ 	} else {
+-		if (IS_ERR(ce->clk) || !__clk_get(clk)) {
++		if (IS_ERR(clk) || !__clk_get(clk)) {
+ 			kfree(ce);
+ 			return -ENOENT;
+ 		}
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index d7173cb1ea76..cef6fa83a274 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -86,8 +86,6 @@ static DEFINE_MUTEX(loop_index_mutex);
+ static int max_part;
+ static int part_shift;
+ 
+-static struct workqueue_struct *loop_wq;
+-
+ static int transfer_xor(struct loop_device *lo, int cmd,
+ 			struct page *raw_page, unsigned raw_off,
+ 			struct page *loop_page, unsigned loop_off,
+@@ -725,6 +723,12 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
+ 	size = get_loop_size(lo, file);
+ 	if ((loff_t)(sector_t)size != size)
+ 		goto out_putf;
++	error = -ENOMEM;
++	lo->wq = alloc_workqueue("kloopd%d",
++			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 16,
++			lo->lo_number);
++	if (!lo->wq)
++		goto out_putf;
+ 
+ 	error = 0;
+ 
+@@ -872,6 +876,8 @@ static int loop_clr_fd(struct loop_device *lo)
+ 	lo->lo_flags = 0;
+ 	if (!part_shift)
+ 		lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
++	destroy_workqueue(lo->wq);
++	lo->wq = NULL;
+ 	mutex_unlock(&lo->lo_ctl_mutex);
+ 	/*
+ 	 * Need not hold lo_ctl_mutex to fput backing file.
+@@ -1425,9 +1431,13 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 		const struct blk_mq_queue_data *bd)
+ {
+ 	struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
++	struct loop_device *lo = cmd->rq->q->queuedata;
+ 
+ 	blk_mq_start_request(bd->rq);
+ 
++	if (lo->lo_state != Lo_bound)
++		return -EIO;
++
+ 	if (cmd->rq->cmd_flags & REQ_WRITE) {
+ 		struct loop_device *lo = cmd->rq->q->queuedata;
+ 		bool need_sched = true;
+@@ -1441,9 +1451,9 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ 		spin_unlock_irq(&lo->lo_lock);
+ 
+ 		if (need_sched)
+-			queue_work(loop_wq, &lo->write_work);
++			queue_work(lo->wq, &lo->write_work);
+ 	} else {
+-		queue_work(loop_wq, &cmd->read_work);
++		queue_work(lo->wq, &cmd->read_work);
+ 	}
+ 
+ 	return BLK_MQ_RQ_QUEUE_OK;
+@@ -1455,9 +1465,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
+ 	struct loop_device *lo = cmd->rq->q->queuedata;
+ 	int ret = -EIO;
+ 
+-	if (lo->lo_state != Lo_bound)
+-		goto failed;
+-
+ 	if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+ 		goto failed;
+ 
+@@ -1806,13 +1813,6 @@ static int __init loop_init(void)
+ 		goto misc_out;
+ 	}
+ 
+-	loop_wq = alloc_workqueue("kloopd",
+-			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_UNBOUND, 0);
+-	if (!loop_wq) {
+-		err = -ENOMEM;
+-		goto misc_out;
+-	}
+-
+ 	blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
+ 				  THIS_MODULE, loop_probe, NULL, NULL);
+ 
+@@ -1850,8 +1850,6 @@ static void __exit loop_exit(void)
+ 	blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
+ 	unregister_blkdev(LOOP_MAJOR, "loop");
+ 
+-	destroy_workqueue(loop_wq);
+-
+ 	misc_deregister(&loop_misc);
+ }
+ 
+diff --git a/drivers/block/loop.h b/drivers/block/loop.h
+index 301c27f8323f..49564edf5581 100644
+--- a/drivers/block/loop.h
++++ b/drivers/block/loop.h
+@@ -54,6 +54,7 @@ struct loop_device {
+ 	gfp_t		old_gfp_mask;
+ 
+ 	spinlock_t		lo_lock;
++	struct workqueue_struct *wq;
+ 	struct list_head	write_cmd_head;
+ 	struct work_struct	write_work;
+ 	bool			write_started;
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index ec6c5c6e1ac9..53f253574abe 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -2001,11 +2001,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
+ 	rbd_assert(obj_request_type_valid(type));
+ 
+ 	size = strlen(object_name) + 1;
+-	name = kmalloc(size, GFP_KERNEL);
++	name = kmalloc(size, GFP_NOIO);
+ 	if (!name)
+ 		return NULL;
+ 
+-	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
++	obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
+ 	if (!obj_request) {
+ 		kfree(name);
+ 		return NULL;
+diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
+index 4bba86677adc..3f146c9911c1 100644
+--- a/drivers/bluetooth/btbcm.c
++++ b/drivers/bluetooth/btbcm.c
+@@ -378,12 +378,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
+ 
+ 	/* Read Verbose Config Version Info */
+ 	skb = btbcm_read_verbose_config(hdev);
+-	if (IS_ERR(skb))
+-		return PTR_ERR(skb);
+-
+-	BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
+-		get_unaligned_le16(skb->data + 5));
+-	kfree_skb(skb);
++	if (!IS_ERR(skb)) {
++		BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
++			get_unaligned_le16(skb->data + 5));
++		kfree_skb(skb);
++	}
+ 
+ 	set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
+ 
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 420cc9f3eb76..c65501539224 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -268,7 +268,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC },
+ 
+ 	/* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */
+-	{ USB_DEVICE(0x1300, 0x0001), .driver_info = BTUSB_SWAVE },
++	{ USB_DEVICE(0x1310, 0x0001), .driver_info = BTUSB_SWAVE },
+ 
+ 	/* Digianswer devices */
+ 	{ USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER },
+@@ -1993,6 +1993,8 @@ static int btusb_setup_intel(struct hci_dev *hdev)
+ 	}
+ 	fw_ptr = fw->data;
+ 
++	kfree_skb(skb);
++
+ 	/* This Intel specific command enables the manufacturer mode of the
+ 	 * controller.
+ 	 *
+@@ -2334,6 +2336,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
+ 	struct intel_boot_params *params;
+ 	const struct firmware *fw;
+ 	const u8 *fw_ptr;
++	u32 frag_len;
+ 	char fwname[64];
+ 	ktime_t calltime, delta, rettime;
+ 	unsigned long long duration;
+@@ -2540,24 +2543,33 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
+ 	}
+ 
+ 	fw_ptr = fw->data + 644;
++	frag_len = 0;
+ 
+ 	while (fw_ptr - fw->data < fw->size) {
+-		struct hci_command_hdr *cmd = (void *)fw_ptr;
+-		u8 cmd_len;
++		struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
+ 
+-		cmd_len = sizeof(*cmd) + cmd->plen;
++		frag_len += sizeof(*cmd) + cmd->plen;
+ 
+-		/* Send each command from the firmware data buffer as
+-		 * a single Data fragment.
++		/* The paramter length of the secure send command requires
++		 * a 4 byte alignment. It happens so that the firmware file
++		 * contains proper Intel_NOP commands to align the fragments
++		 * as needed.
++		 *
++		 * Send set of commands with 4 byte alignment from the
++		 * firmware data buffer as a single Data fragement.
+ 		 */
+-		err = btusb_intel_secure_send(hdev, 0x01, cmd_len, fw_ptr);
+-		if (err < 0) {
+-			BT_ERR("%s: Failed to send firmware data (%d)",
+-			       hdev->name, err);
+-			goto done;
+-		}
++		if (!(frag_len % 4)) {
++			err = btusb_intel_secure_send(hdev, 0x01, frag_len,
++						      fw_ptr);
++			if (err < 0) {
++				BT_ERR("%s: Failed to send firmware data (%d)",
++				       hdev->name, err);
++				goto done;
++			}
+ 
+-		fw_ptr += cmd_len;
++			fw_ptr += frag_len;
++			frag_len = 0;
++		}
+ 	}
+ 
+ 	set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
+index aaa0f2a87118..60397ec77ff7 100644
+--- a/drivers/bus/arm-ccn.c
++++ b/drivers/bus/arm-ccn.c
+@@ -212,7 +212,7 @@ static int arm_ccn_node_to_xp_port(int node)
+ 
+ static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
+ {
+-	*config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24));
++	*config &= ~((0xff << 0) | (0xff << 8) | (0x3 << 24));
+ 	*config |= (node_xp << 0) | (type << 8) | (port << 24);
+ }
+ 
+diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
+index 0b4188b9af7c..c6dea3f6917b 100644
+--- a/drivers/char/agp/intel-gtt.c
++++ b/drivers/char/agp/intel-gtt.c
+@@ -581,7 +581,7 @@ static inline int needs_ilk_vtd_wa(void)
+ 	/* Query intel_iommu to see if we need the workaround. Presumably that
+ 	 * was loaded first.
+ 	 */
+-	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
++	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ||
+ 	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+ 	     intel_iommu_gfx_mapped)
+ 		return 1;
+diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
+index 283f00a7f036..1082d4bb016a 100644
+--- a/drivers/char/tpm/tpm-chip.c
++++ b/drivers/char/tpm/tpm-chip.c
+@@ -129,8 +129,9 @@ struct tpm_chip *tpmm_chip_alloc(struct device *dev,
+ 
+ 	device_initialize(&chip->dev);
+ 
+-	chip->cdev.owner = chip->pdev->driver->owner;
+ 	cdev_init(&chip->cdev, &tpm_fops);
++	chip->cdev.owner = chip->pdev->driver->owner;
++	chip->cdev.kobj.parent = &chip->dev.kobj;
+ 
+ 	return chip;
+ }
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index b26ceee3585e..1267322595da 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -233,6 +233,14 @@ static int crb_acpi_add(struct acpi_device *device)
+ 		return -ENODEV;
+ 	}
+ 
++	/* At least some versions of AMI BIOS have a bug that TPM2 table has
++	 * zero address for the control area and therefore we must fail.
++	*/
++	if (!buf->control_area_pa) {
++		dev_err(dev, "TPM2 ACPI table has a zero address for the control area\n");
++		return -EINVAL;
++	}
++
+ 	if (buf->hdr.length < sizeof(struct acpi_tpm2)) {
+ 		dev_err(dev, "TPM2 ACPI table has wrong size");
+ 		return -EINVAL;
+@@ -267,7 +275,7 @@ static int crb_acpi_add(struct acpi_device *device)
+ 
+ 	memcpy_fromio(&pa, &priv->cca->cmd_pa, 8);
+ 	pa = le64_to_cpu(pa);
+-	priv->cmd = devm_ioremap_nocache(dev, le64_to_cpu(pa),
++	priv->cmd = devm_ioremap_nocache(dev, pa,
+ 					 ioread32(&priv->cca->cmd_size));
+ 	if (!priv->cmd) {
+ 		dev_err(dev, "ioremap of the command buffer failed\n");
+@@ -276,7 +284,7 @@ static int crb_acpi_add(struct acpi_device *device)
+ 
+ 	memcpy_fromio(&pa, &priv->cca->rsp_pa, 8);
+ 	pa = le64_to_cpu(pa);
+-	priv->rsp = devm_ioremap_nocache(dev, le64_to_cpu(pa),
++	priv->rsp = devm_ioremap_nocache(dev, pa,
+ 					 ioread32(&priv->cca->rsp_size));
+ 	if (!priv->rsp) {
+ 		dev_err(dev, "ioremap of the response buffer failed\n");
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index 42ffa5e7a1e0..27ebf9511cb4 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -578,6 +578,9 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ 		goto cleanup;
+ 	}
+ 
++	ibmvtpm->dev = dev;
++	ibmvtpm->vdev = vio_dev;
++
+ 	crq_q = &ibmvtpm->crq_queue;
+ 	crq_q->crq_addr = (struct ibmvtpm_crq *)get_zeroed_page(GFP_KERNEL);
+ 	if (!crq_q->crq_addr) {
+@@ -622,8 +625,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ 
+ 	crq_q->index = 0;
+ 
+-	ibmvtpm->dev = dev;
+-	ibmvtpm->vdev = vio_dev;
+ 	TPM_VPRIV(chip) = (void *)ibmvtpm;
+ 
+ 	spin_lock_init(&ibmvtpm->rtce_lock);
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 5b0f41868b42..9f9cadd00bc8 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -230,11 +230,12 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
+ 	if (!c)
+ 		return;
+ 
++	/* This should be JSON format, i.e. elements separated with a comma */
+ 	seq_printf(s, "\"%s\": { ", c->name);
+ 	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
+ 	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
+-	seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
+-	seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
++	seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
++	seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
+ 	seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+ }
+ 
+diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
+index b95d17fbb8d7..92936f0912d2 100644
+--- a/drivers/clk/qcom/clk-rcg2.c
++++ b/drivers/clk/qcom/clk-rcg2.c
+@@ -530,19 +530,16 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
+ 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ 	struct freq_tbl f = *rcg->freq_tbl;
+ 	const struct frac_entry *frac = frac_table_pixel;
+-	unsigned long request, src_rate;
++	unsigned long request;
+ 	int delta = 100000;
+ 	u32 mask = BIT(rcg->hid_width) - 1;
+ 	u32 hid_div;
+-	int index = qcom_find_src_index(hw, rcg->parent_map, f.src);
+-	struct clk *parent = clk_get_parent_by_index(hw->clk, index);
+ 
+ 	for (; frac->num; frac++) {
+ 		request = (rate * frac->den) / frac->num;
+ 
+-		src_rate = __clk_round_rate(parent, request);
+-		if ((src_rate < (request - delta)) ||
+-			(src_rate > (request + delta)))
++		if ((parent_rate < (request - delta)) ||
++			(parent_rate > (request + delta)))
+ 			continue;
+ 
+ 		regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
+diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
+index d86bc46b93bd..0a1df821860f 100644
+--- a/drivers/clk/ti/clk-dra7-atl.c
++++ b/drivers/clk/ti/clk-dra7-atl.c
+@@ -252,6 +252,11 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
+ 		}
+ 
+ 		clk = of_clk_get_from_provider(&clkspec);
++		if (IS_ERR(clk)) {
++			pr_err("%s: failed to get atl clock %d from provider\n",
++			       __func__, i);
++			return PTR_ERR(clk);
++		}
+ 
+ 		cdesc = to_atl_desc(__clk_get_hw(clk));
+ 		cdesc->cinfo = cinfo;
+diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
+index 83564c9cfdbe..c844616028d2 100644
+--- a/drivers/clocksource/exynos_mct.c
++++ b/drivers/clocksource/exynos_mct.c
+@@ -466,15 +466,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
+ 	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
+ 
+ 	if (mct_int_type == MCT_INT_SPI) {
+-		evt->irq = mct_irqs[MCT_L0_IRQ + cpu];
+-		if (request_irq(evt->irq, exynos4_mct_tick_isr,
+-				IRQF_TIMER | IRQF_NOBALANCING,
+-				evt->name, mevt)) {
+-			pr_err("exynos-mct: cannot register IRQ %d\n",
+-				evt->irq);
++
++		if (evt->irq == -1)
+ 			return -EIO;
+-		}
+-		irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu));
++
++		irq_force_affinity(evt->irq, cpumask_of(cpu));
++		enable_irq(evt->irq);
+ 	} else {
+ 		enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
+ 	}
+@@ -487,10 +484,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt)
+ static void exynos4_local_timer_stop(struct clock_event_device *evt)
+ {
+ 	evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+-	if (mct_int_type == MCT_INT_SPI)
+-		free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick));
+-	else
++	if (mct_int_type == MCT_INT_SPI) {
++		if (evt->irq != -1)
++			disable_irq_nosync(evt->irq);
++	} else {
+ 		disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
++	}
+ }
+ 
+ static int exynos4_mct_cpu_notify(struct notifier_block *self,
+@@ -522,7 +521,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
+ 
+ static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+ {
+-	int err;
++	int err, cpu;
+ 	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
+ 	struct clk *mct_clk, *tick_clk;
+ 
+@@ -549,7 +548,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
+ 		WARN(err, "MCT: can't request IRQ %d (%d)\n",
+ 		     mct_irqs[MCT_L0_IRQ], err);
+ 	} else {
+-		irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0));
++		for_each_possible_cpu(cpu) {
++			int mct_irq = mct_irqs[MCT_L0_IRQ + cpu];
++			struct mct_clock_event_device *pcpu_mevt =
++				per_cpu_ptr(&percpu_mct_tick, cpu);
++
++			pcpu_mevt->evt.irq = -1;
++
++			irq_set_status_flags(mct_irq, IRQ_NOAUTOEN);
++			if (request_irq(mct_irq,
++					exynos4_mct_tick_isr,
++					IRQF_TIMER | IRQF_NOBALANCING,
++					pcpu_mevt->name, pcpu_mevt)) {
++				pr_err("exynos-mct: cannot register IRQ (cpu%d)\n",
++									cpu);
++
++				continue;
++			}
++			pcpu_mevt->evt.irq = mct_irq;
++		}
+ 	}
+ 
+ 	err = register_cpu_notifier(&exynos4_mct_cpu_nb);
+diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
+index 1c56001df676..50f1b422dee3 100644
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -273,7 +273,8 @@ static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+ 	dma_cookie_t cookie = 0;
+ 	int busy = mv_chan_is_busy(mv_chan);
+ 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
+-	int seen_current = 0;
++	int current_cleaned = 0;
++	struct mv_xor_desc *hw_desc;
+ 
+ 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
+ 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
+@@ -285,38 +286,57 @@ static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
+ 
+ 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
+ 					chain_node) {
+-		prefetch(_iter);
+-		prefetch(&_iter->async_tx);
+ 
+-		/* do not advance past the current descriptor loaded into the
+-		 * hardware channel, subsequent descriptors are either in
+-		 * process or have not been submitted
+-		 */
+-		if (seen_current)
+-			break;
++		/* clean finished descriptors */
++		hw_desc = iter->hw_desc;
++		if (hw_desc->status & XOR_DESC_SUCCESS) {
++			cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
++								cookie);
+ 
+-		/* stop the search if we reach the current descriptor and the
+-		 * channel is busy
+-		 */
+-		if (iter->async_tx.phys == current_desc) {
+-			seen_current = 1;
+-			if (busy)
++			/* done processing desc, clean slot */
++			mv_xor_clean_slot(iter, mv_chan);
++
++			/* break if we did cleaned the current */
++			if (iter->async_tx.phys == current_desc) {
++				current_cleaned = 1;
++				break;
++			}
++		} else {
++			if (iter->async_tx.phys == current_desc) {
++				current_cleaned = 0;
+ 				break;
++			}
+ 		}
+-
+-		cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
+-
+-		if (mv_xor_clean_slot(iter, mv_chan))
+-			break;
+ 	}
+ 
+ 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
+-		struct mv_xor_desc_slot *chain_head;
+-		chain_head = list_entry(mv_chan->chain.next,
+-					struct mv_xor_desc_slot,
+-					chain_node);
+-
+-		mv_xor_start_new_chain(mv_chan, chain_head);
++		if (current_cleaned) {
++			/*
++			 * current descriptor cleaned and removed, run
++			 * from list head
++			 */
++			iter = list_entry(mv_chan->chain.next,
++					  struct mv_xor_desc_slot,
++					  chain_node);
++			mv_xor_start_new_chain(mv_chan, iter);
++		} else {
++			if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
++				/*
++				 * descriptors are still waiting after
++				 * current, trigger them
++				 */
++				iter = list_entry(iter->chain_node.next,
++						  struct mv_xor_desc_slot,
++						  chain_node);
++				mv_xor_start_new_chain(mv_chan, iter);
++			} else {
++				/*
++				 * some descriptors are still waiting
++				 * to be cleaned
++				 */
++				tasklet_schedule(&mv_chan->irq_tasklet);
++			}
++		}
+ 	}
+ 
+ 	if (cookie > 0)
+diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
+index 91958dba39a2..0e302b3a33ad 100644
+--- a/drivers/dma/mv_xor.h
++++ b/drivers/dma/mv_xor.h
+@@ -31,6 +31,7 @@
+ #define XOR_OPERATION_MODE_XOR		0
+ #define XOR_OPERATION_MODE_MEMCPY	2
+ #define XOR_DESCRIPTOR_SWAP		BIT(14)
++#define XOR_DESC_SUCCESS		0x40000000
+ 
+ #define XOR_DESC_DMA_OWNED		BIT(31)
+ #define XOR_DESC_EOD_INT_EN		BIT(31)
+diff --git a/drivers/edac/octeon_edac-l2c.c b/drivers/edac/octeon_edac-l2c.c
+index 7e98084d3645..afea7fc625cc 100644
+--- a/drivers/edac/octeon_edac-l2c.c
++++ b/drivers/edac/octeon_edac-l2c.c
+@@ -151,7 +151,7 @@ static int octeon_l2c_probe(struct platform_device *pdev)
+ 	l2c->ctl_name = "octeon_l2c_err";
+ 
+ 
+-	if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
++	if (OCTEON_IS_OCTEON1PLUS()) {
+ 		union cvmx_l2t_err l2t_err;
+ 		union cvmx_l2d_err l2d_err;
+ 
+diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
+index bb19e0732681..cda6dab5067a 100644
+--- a/drivers/edac/octeon_edac-lmc.c
++++ b/drivers/edac/octeon_edac-lmc.c
+@@ -234,7 +234,7 @@ static int octeon_lmc_edac_probe(struct platform_device *pdev)
+ 	layers[0].size = 1;
+ 	layers[0].is_virt_csrow = false;
+ 
+-	if (OCTEON_IS_MODEL(OCTEON_FAM_1_PLUS)) {
++	if (OCTEON_IS_OCTEON1PLUS()) {
+ 		union cvmx_lmcx_mem_cfg0 cfg0;
+ 
+ 		cfg0.u64 = cvmx_read_csr(CVMX_LMCX_MEM_CFG0(0));
+diff --git a/drivers/edac/octeon_edac-pc.c b/drivers/edac/octeon_edac-pc.c
+index 0f83c33a7d1f..2ab6cf24c959 100644
+--- a/drivers/edac/octeon_edac-pc.c
++++ b/drivers/edac/octeon_edac-pc.c
+@@ -73,7 +73,7 @@ static int  co_cache_error_event(struct notifier_block *this,
+ 			edac_device_handle_ce(p->ed, cpu, 0, "dcache");
+ 
+ 		/* Clear the error indication */
+-		if (OCTEON_IS_MODEL(OCTEON_FAM_2))
++		if (OCTEON_IS_OCTEON2())
+ 			write_octeon_c0_dcacheerr(1);
+ 		else
+ 			write_octeon_c0_dcacheerr(0);
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index 97b1616aa391..bba843c2b0ac 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -89,9 +89,9 @@ static void dmi_table(u8 *buf,
+ 
+ 	/*
+ 	 * Stop when we have seen all the items the table claimed to have
+-	 * (SMBIOS < 3.0 only) OR we reach an end-of-table marker OR we run
+-	 * off the end of the table (should never happen but sometimes does
+-	 * on bogus implementations.)
++	 * (SMBIOS < 3.0 only) OR we reach an end-of-table marker (SMBIOS
++	 * >= 3.0 only) OR we run off the end of the table (should never
++	 * happen but sometimes does on bogus implementations.)
+ 	 */
+ 	while ((!dmi_num || i < dmi_num) &&
+ 	       (data - buf + sizeof(struct dmi_header)) <= dmi_len) {
+@@ -110,8 +110,13 @@ static void dmi_table(u8 *buf,
+ 
+ 		/*
+ 		 * 7.45 End-of-Table (Type 127) [SMBIOS reference spec v3.0.0]
++		 * For tables behind a 64-bit entry point, we have no item
++		 * count and no exact table length, so stop on end-of-table
++		 * marker. For tables behind a 32-bit entry point, we have
++		 * seen OEM structures behind the end-of-table marker on
++		 * some systems, so don't trust it.
+ 		 */
+-		if (dm->type == DMI_ENTRY_END_OF_TABLE)
++		if (!dmi_num && dm->type == DMI_ENTRY_END_OF_TABLE)
+ 			break;
+ 
+ 		data += 2;
+diff --git a/drivers/gpu/drm/bridge/ptn3460.c b/drivers/gpu/drm/bridge/ptn3460.c
+index 9d2f053382e1..63a09e4079f3 100644
+--- a/drivers/gpu/drm/bridge/ptn3460.c
++++ b/drivers/gpu/drm/bridge/ptn3460.c
+@@ -15,6 +15,7 @@
+ 
+ #include <linux/delay.h>
+ #include <linux/gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
+index 3007b44e6bf4..800a025dd062 100644
+--- a/drivers/gpu/drm/drm_crtc.c
++++ b/drivers/gpu/drm/drm_crtc.c
+@@ -2749,8 +2749,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
+ 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ 		return -EINVAL;
+ 
+-	/* For some reason crtc x/y offsets are signed internally. */
+-	if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
++	/*
++	 * Universal plane src offsets are only 16.16, prevent havoc for
++	 * drivers using universal plane code internally.
++	 */
++	if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000)
+ 		return -ERANGE;
+ 
+ 	drm_modeset_lock_all(dev);
+@@ -5048,12 +5051,9 @@ void drm_mode_config_reset(struct drm_device *dev)
+ 		if (encoder->funcs->reset)
+ 			encoder->funcs->reset(encoder);
+ 
+-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+-		connector->status = connector_status_unknown;
+-
++	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ 		if (connector->funcs->reset)
+ 			connector->funcs->reset(connector);
+-	}
+ }
+ EXPORT_SYMBOL(drm_mode_config_reset);
+ 
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 132581ca4ad8..778bbb6425b8 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -867,8 +867,16 @@ static void drm_dp_destroy_port(struct kref *kref)
+ 		port->vcpi.num_slots = 0;
+ 
+ 		kfree(port->cached_edid);
+-		if (port->connector)
+-			(*port->mgr->cbs->destroy_connector)(mgr, port->connector);
++
++		/* we can't destroy the connector here, as
++		   we might be holding the mode_config.mutex
++		   from an EDID retrieval */
++		if (port->connector) {
++			mutex_lock(&mgr->destroy_connector_lock);
++			list_add(&port->connector->destroy_list, &mgr->destroy_connector_list);
++			mutex_unlock(&mgr->destroy_connector_lock);
++			schedule_work(&mgr->destroy_connector_work);
++		}
+ 		drm_dp_port_teardown_pdt(port, port->pdt);
+ 
+ 		if (!port->input && port->vcpi.vcpi > 0)
+@@ -1163,6 +1171,8 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
+ 	struct drm_dp_mst_port *port;
+ 	int i;
+ 	/* find the port by iterating down */
++
++	mutex_lock(&mgr->lock);
+ 	mstb = mgr->mst_primary;
+ 
+ 	for (i = 0; i < lct - 1; i++) {
+@@ -1182,6 +1192,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
+ 		}
+ 	}
+ 	kref_get(&mstb->kref);
++	mutex_unlock(&mgr->lock);
+ 	return mstb;
+ }
+ 
+@@ -1189,7 +1200,7 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
+ 					       struct drm_dp_mst_branch *mstb)
+ {
+ 	struct drm_dp_mst_port *port;
+-
++	struct drm_dp_mst_branch *mstb_child;
+ 	if (!mstb->link_address_sent) {
+ 		drm_dp_send_link_address(mgr, mstb);
+ 		mstb->link_address_sent = true;
+@@ -1204,17 +1215,31 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
+ 		if (!port->available_pbn)
+ 			drm_dp_send_enum_path_resources(mgr, mstb, port);
+ 
+-		if (port->mstb)
+-			drm_dp_check_and_send_link_address(mgr, port->mstb);
++		if (port->mstb) {
++			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
++			if (mstb_child) {
++				drm_dp_check_and_send_link_address(mgr, mstb_child);
++				drm_dp_put_mst_branch_device(mstb_child);
++			}
++		}
+ 	}
+ }
+ 
+ static void drm_dp_mst_link_probe_work(struct work_struct *work)
+ {
+ 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
++	struct drm_dp_mst_branch *mstb;
+ 
+-	drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
+-
++	mutex_lock(&mgr->lock);
++	mstb = mgr->mst_primary;
++	if (mstb) {
++		kref_get(&mstb->kref);
++	}
++	mutex_unlock(&mgr->lock);
++	if (mstb) {
++		drm_dp_check_and_send_link_address(mgr, mstb);
++		drm_dp_put_mst_branch_device(mstb);
++	}
+ }
+ 
+ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
+@@ -2632,6 +2657,30 @@ static void drm_dp_tx_work(struct work_struct *work)
+ 	mutex_unlock(&mgr->qlock);
+ }
+ 
++static void drm_dp_destroy_connector_work(struct work_struct *work)
++{
++	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
++	struct drm_connector *connector;
++
++	/*
++	 * Not a regular list traverse as we have to drop the destroy
++	 * connector lock before destroying the connector, to avoid AB->BA
++	 * ordering between this lock and the config mutex.
++	 */
++	for (;;) {
++		mutex_lock(&mgr->destroy_connector_lock);
++		connector = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_connector, destroy_list);
++		if (!connector) {
++			mutex_unlock(&mgr->destroy_connector_lock);
++			break;
++		}
++		list_del(&connector->destroy_list);
++		mutex_unlock(&mgr->destroy_connector_lock);
++
++		mgr->cbs->destroy_connector(mgr, connector);
++	}
++}
++
+ /**
+  * drm_dp_mst_topology_mgr_init - initialise a topology manager
+  * @mgr: manager struct to initialise
+@@ -2651,10 +2700,13 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ 	mutex_init(&mgr->lock);
+ 	mutex_init(&mgr->qlock);
+ 	mutex_init(&mgr->payload_lock);
++	mutex_init(&mgr->destroy_connector_lock);
+ 	INIT_LIST_HEAD(&mgr->tx_msg_upq);
+ 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
++	INIT_LIST_HEAD(&mgr->destroy_connector_list);
+ 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
+ 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
++	INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
+ 	init_waitqueue_head(&mgr->tx_waitq);
+ 	mgr->dev = dev;
+ 	mgr->aux = aux;
+@@ -2679,6 +2731,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
+  */
+ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
+ {
++	flush_work(&mgr->destroy_connector_work);
+ 	mutex_lock(&mgr->payload_lock);
+ 	kfree(mgr->payloads);
+ 	mgr->payloads = NULL;
+diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
+index aa8bbb460c57..9cfcd0aef0df 100644
+--- a/drivers/gpu/drm/drm_ioc32.c
++++ b/drivers/gpu/drm/drm_ioc32.c
+@@ -70,6 +70,8 @@
+ 
+ #define DRM_IOCTL_WAIT_VBLANK32		DRM_IOWR(0x3a, drm_wait_vblank32_t)
+ 
++#define DRM_IOCTL_MODE_ADDFB232		DRM_IOWR(0xb8, drm_mode_fb_cmd232_t)
++
+ typedef struct drm_version_32 {
+ 	int version_major;	  /**< Major version */
+ 	int version_minor;	  /**< Minor version */
+@@ -1016,6 +1018,63 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+ 	return 0;
+ }
+ 
++typedef struct drm_mode_fb_cmd232 {
++	u32 fb_id;
++	u32 width;
++	u32 height;
++	u32 pixel_format;
++	u32 flags;
++	u32 handles[4];
++	u32 pitches[4];
++	u32 offsets[4];
++	u64 modifier[4];
++} __attribute__((packed)) drm_mode_fb_cmd232_t;
++
++static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
++				  unsigned long arg)
++{
++	struct drm_mode_fb_cmd232 __user *argp = (void __user *)arg;
++	struct drm_mode_fb_cmd232 req32;
++	struct drm_mode_fb_cmd2 __user *req64;
++	int i;
++	int err;
++
++	if (copy_from_user(&req32, argp, sizeof(req32)))
++		return -EFAULT;
++
++	req64 = compat_alloc_user_space(sizeof(*req64));
++
++	if (!access_ok(VERIFY_WRITE, req64, sizeof(*req64))
++	    || __put_user(req32.width, &req64->width)
++	    || __put_user(req32.height, &req64->height)
++	    || __put_user(req32.pixel_format, &req64->pixel_format)
++	    || __put_user(req32.flags, &req64->flags))
++		return -EFAULT;
++
++	for (i = 0; i < 4; i++) {
++		if (__put_user(req32.handles[i], &req64->handles[i]))
++			return -EFAULT;
++		if (__put_user(req32.pitches[i], &req64->pitches[i]))
++			return -EFAULT;
++		if (__put_user(req32.offsets[i], &req64->offsets[i]))
++			return -EFAULT;
++		if (__put_user(req32.modifier[i], &req64->modifier[i]))
++			return -EFAULT;
++	}
++
++	err = drm_ioctl(file, DRM_IOCTL_MODE_ADDFB2, (unsigned long)req64);
++	if (err)
++		return err;
++
++	if (__get_user(req32.fb_id, &req64->fb_id))
++		return -EFAULT;
++
++	if (copy_to_user(argp, &req32, sizeof(req32)))
++		return -EFAULT;
++
++	return 0;
++}
++
+ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ 	[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+ 	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
+@@ -1048,6 +1107,7 @@ static drm_ioctl_compat_t *drm_compat_ioctls[] = {
+ 	[DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW32)] = compat_drm_update_draw,
+ #endif
+ 	[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
++	[DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB232)] = compat_drm_mode_addfb2,
+ };
+ 
+ /**
+diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
+index 2d0995e7afc3..596bce56e379 100644
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2401,6 +2401,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
+ 	}
+ 
+ 	request->emitted_jiffies = jiffies;
++	ring->last_submitted_seqno = request->seqno;
+ 	list_add_tail(&request->list, &ring->request_list);
+ 	request->file_priv = NULL;
+ 
+diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
+index 0239fbff7bf7..ad90fa3045e5 100644
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -502,17 +502,17 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
+ 		struct page *page_table;
+ 
+ 		if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
+-			continue;
++			break;
+ 
+ 		pd = ppgtt->pdp.page_directory[pdpe];
+ 
+ 		if (WARN_ON(!pd->page_table[pde]))
+-			continue;
++			break;
+ 
+ 		pt = pd->page_table[pde];
+ 
+ 		if (WARN_ON(!pt->page))
+-			continue;
++			break;
+ 
+ 		page_table = pt->page;
+ 
+diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
+index 176de6322e4d..23aa04cded6b 100644
+--- a/drivers/gpu/drm/i915/i915_ioc32.c
++++ b/drivers/gpu/drm/i915/i915_ioc32.c
+@@ -204,7 +204,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ 	drm_ioctl_compat_t *fn = NULL;
+ 	int ret;
+ 
+-	if (nr < DRM_COMMAND_BASE)
++	if (nr < DRM_COMMAND_BASE || nr >= DRM_COMMAND_END)
+ 		return drm_compat_ioctl(filp, cmd, arg);
+ 
+ 	if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(i915_compat_ioctls))
+diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
+index 6d494432b19f..b0df8d10482a 100644
+--- a/drivers/gpu/drm/i915/i915_irq.c
++++ b/drivers/gpu/drm/i915/i915_irq.c
+@@ -2650,18 +2650,11 @@ static void gen8_disable_vblank(struct drm_device *dev, int pipe)
+ 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+ 
+-static struct drm_i915_gem_request *
+-ring_last_request(struct intel_engine_cs *ring)
+-{
+-	return list_entry(ring->request_list.prev,
+-			  struct drm_i915_gem_request, list);
+-}
+-
+ static bool
+-ring_idle(struct intel_engine_cs *ring)
++ring_idle(struct intel_engine_cs *ring, u32 seqno)
+ {
+ 	return (list_empty(&ring->request_list) ||
+-		i915_gem_request_completed(ring_last_request(ring), false));
++		i915_seqno_passed(seqno, ring->last_submitted_seqno));
+ }
+ 
+ static bool
+@@ -2883,7 +2876,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
+ 		acthd = intel_ring_get_active_head(ring);
+ 
+ 		if (ring->hangcheck.seqno == seqno) {
+-			if (ring_idle(ring)) {
++			if (ring_idle(ring, seqno)) {
+ 				ring->hangcheck.action = HANGCHECK_IDLE;
+ 
+ 				if (waitqueue_active(&ring->irq_queue)) {
+diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
+index 773d1d24e604..a30db4b4050e 100644
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -3209,6 +3209,7 @@ enum skl_disp_power_wells {
+ #define   BLM_POLARITY_PNV			(1 << 0) /* pnv only */
+ 
+ #define BLC_HIST_CTL	(dev_priv->info.display_mmio_offset + 0x61260)
++#define  BLM_HISTOGRAM_ENABLE			(1 << 31)
+ 
+ /* New registers for PCH-split platforms. Safe where new bits show up, the
+  * register layout machtes with gen4 BLC_PWM_CTL[12]. */
+diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
+index d0f3cbc87474..57c887843dc3 100644
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -12499,6 +12499,16 @@ intel_check_primary_plane(struct drm_plane *plane,
+ 				intel_crtc->atomic.wait_vblank = true;
+ 		}
+ 
++		/*
++		 * FIXME: Actually if we will still have any other plane enabled
++		 * on the pipe we could let IPS enabled still, but for
++		 * now lets consider that when we make primary invisible
++		 * by setting DSPCNTR to 0 on update_primary_plane function
++		 * IPS needs to be disable.
++		 */
++		if (!state->visible || !fb)
++			intel_crtc->atomic.disable_ips = true;
++
+ 		intel_crtc->atomic.fb_bits |=
+ 			INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
+ 
+@@ -12590,6 +12600,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc)
+ 	if (intel_crtc->atomic.disable_fbc)
+ 		intel_fbc_disable(dev);
+ 
++	if (intel_crtc->atomic.disable_ips)
++		hsw_disable_ips(intel_crtc);
++
+ 	if (intel_crtc->atomic.pre_disable_primary)
+ 		intel_pre_disable_primary(crtc);
+ 
+diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
+index 897f17db08af..68d1f74a7403 100644
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -424,6 +424,7 @@ struct intel_crtc_atomic_commit {
+ 	/* Sleepable operations to perform before commit */
+ 	bool wait_for_flips;
+ 	bool disable_fbc;
++	bool disable_ips;
+ 	bool pre_disable_primary;
+ 	bool update_wm;
+ 	unsigned disabled_planes;
+diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
+index 08532d4ffe0a..2bf92cba4a55 100644
+--- a/drivers/gpu/drm/i915/intel_panel.c
++++ b/drivers/gpu/drm/i915/intel_panel.c
+@@ -879,6 +879,14 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
+ 
+ 	/* XXX: combine this into above write? */
+ 	intel_panel_actually_set_backlight(connector, panel->backlight.level);
++
++	/*
++	 * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
++	 * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
++	 * that has backlight.
++	 */
++	if (IS_GEN2(dev))
++		I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+ }
+ 
+ static void i965_enable_backlight(struct intel_connector *connector)
+diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
+index c761fe05ad6f..94514d364d25 100644
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
+@@ -266,6 +266,13 @@ struct  intel_engine_cs {
+ 	 * Do we have some not yet emitted requests outstanding?
+ 	 */
+ 	struct drm_i915_gem_request *outstanding_lazy_request;
++	/**
++	 * Seqno of request most recently submitted to request_list.
++	 * Used exclusively by hang checker to avoid grabbing lock while
++	 * inspecting request list.
++	 */
++	u32 last_submitted_seqno;
++
+ 	bool gpu_caches_dirty;
+ 
+ 	wait_queue_head_t irq_queue;
+diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
+index ff2a74651dd4..a18807ec8371 100644
+--- a/drivers/gpu/drm/i915/intel_uncore.c
++++ b/drivers/gpu/drm/i915/intel_uncore.c
+@@ -1220,10 +1220,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
+ 	struct drm_i915_private *dev_priv = dev->dev_private;
+ 	struct drm_i915_reg_read *reg = data;
+ 	struct register_whitelist const *entry = whitelist;
++	unsigned size;
++	u64 offset;
+ 	int i, ret = 0;
+ 
+ 	for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+-		if (entry->offset == reg->offset &&
++		if (entry->offset == (reg->offset & -entry->size) &&
+ 		    (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ 			break;
+ 	}
+@@ -1231,23 +1233,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
+ 	if (i == ARRAY_SIZE(whitelist))
+ 		return -EINVAL;
+ 
++	/* We use the low bits to encode extra flags as the register should
++	 * be naturally aligned (and those that are not so aligned merely
++	 * limit the available flags for that register).
++	 */
++	offset = entry->offset;
++	size = entry->size;
++	size |= reg->offset ^ offset;
++
+ 	intel_runtime_pm_get(dev_priv);
+ 
+-	switch (entry->size) {
++	switch (size) {
++	case 8 | 1:
++		reg->val = I915_READ64_2x32(offset, offset+4);
++		break;
+ 	case 8:
+-		reg->val = I915_READ64(reg->offset);
++		reg->val = I915_READ64(offset);
+ 		break;
+ 	case 4:
+-		reg->val = I915_READ(reg->offset);
++		reg->val = I915_READ(offset);
+ 		break;
+ 	case 2:
+-		reg->val = I915_READ16(reg->offset);
++		reg->val = I915_READ16(offset);
+ 		break;
+ 	case 1:
+-		reg->val = I915_READ8(reg->offset);
++		reg->val = I915_READ8(offset);
+ 		break;
+ 	default:
+-		MISSING_CASE(entry->size);
+ 		ret = -EINVAL;
+ 		goto out;
+ 	}
+diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
+index 97823644d347..f33251d67914 100644
+--- a/drivers/gpu/drm/qxl/qxl_cmd.c
++++ b/drivers/gpu/drm/qxl/qxl_cmd.c
+@@ -505,6 +505,7 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
+ 
+ 	cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
+ 	cmd->type = QXL_SURFACE_CMD_CREATE;
++	cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
+ 	cmd->u.surface_create.format = surf->surf.format;
+ 	cmd->u.surface_create.width = surf->surf.width;
+ 	cmd->u.surface_create.height = surf->surf.height;
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index b110883f8253..7354a4cda59d 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -122,8 +122,10 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
+ 	qobj = gem_to_qxl_bo(gobj);
+ 
+ 	ret = qxl_release_list_add(release, qobj);
+-	if (ret)
++	if (ret) {
++		drm_gem_object_unreference_unlocked(gobj);
+ 		return NULL;
++	}
+ 
+ 	return qobj;
+ }
+diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
+index 8730562323a8..4a09947be244 100644
+--- a/drivers/gpu/drm/radeon/ci_dpm.c
++++ b/drivers/gpu/drm/radeon/ci_dpm.c
+@@ -5818,7 +5818,7 @@ int ci_dpm_init(struct radeon_device *rdev)
+ 			tmp |= DPM_ENABLED;
+ 			break;
+ 		default:
+-			DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
++			DRM_DEBUG("Invalid PCC GPIO: %u!\n", gpio.shift);
+ 			break;
+ 		}
+ 		WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index ba50f3c1c2e0..845665362475 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -4579,6 +4579,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev,
+ 	WDOORBELL32(ring->doorbell_index, ring->wptr);
+ }
+ 
++static void cik_compute_stop(struct radeon_device *rdev,
++			     struct radeon_ring *ring)
++{
++	u32 j, tmp;
++
++	cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
++	/* Disable wptr polling. */
++	tmp = RREG32(CP_PQ_WPTR_POLL_CNTL);
++	tmp &= ~WPTR_POLL_EN;
++	WREG32(CP_PQ_WPTR_POLL_CNTL, tmp);
++	/* Disable HQD. */
++	if (RREG32(CP_HQD_ACTIVE) & 1) {
++		WREG32(CP_HQD_DEQUEUE_REQUEST, 1);
++		for (j = 0; j < rdev->usec_timeout; j++) {
++			if (!(RREG32(CP_HQD_ACTIVE) & 1))
++				break;
++			udelay(1);
++		}
++		WREG32(CP_HQD_DEQUEUE_REQUEST, 0);
++		WREG32(CP_HQD_PQ_RPTR, 0);
++		WREG32(CP_HQD_PQ_WPTR, 0);
++	}
++	cik_srbm_select(rdev, 0, 0, 0, 0);
++}
++
+ /**
+  * cik_cp_compute_enable - enable/disable the compute CP MEs
+  *
+@@ -4592,6 +4617,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
+ 	if (enable)
+ 		WREG32(CP_MEC_CNTL, 0);
+ 	else {
++		/*
++		 * To make hibernation reliable we need to clear compute ring
++		 * configuration before halting the compute ring.
++		 */
++		mutex_lock(&rdev->srbm_mutex);
++		cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
++		cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
++		mutex_unlock(&rdev->srbm_mutex);
++
+ 		WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
+ 		rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ 		rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+@@ -7905,23 +7939,27 @@ restart_ih:
+ 		case 1: /* D1 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D1 vblank */
+-				if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[0]) {
+-						drm_handle_vblank(rdev->ddev, 0);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[0]))
+-						radeon_crtc_handle_vblank(rdev, 0);
+-					rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D1 vblank\n");
++				if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[0]) {
++					drm_handle_vblank(rdev->ddev, 0);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[0]))
++					radeon_crtc_handle_vblank(rdev, 0);
++				rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D1 vblank\n");
++
+ 				break;
+ 			case 1: /* D1 vline */
+-				if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D1 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int &= ~LB_D1_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D1 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -7931,23 +7969,27 @@ restart_ih:
+ 		case 2: /* D2 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D2 vblank */
+-				if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[1]) {
+-						drm_handle_vblank(rdev->ddev, 1);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[1]))
+-						radeon_crtc_handle_vblank(rdev, 1);
+-					rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D2 vblank\n");
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[1]) {
++					drm_handle_vblank(rdev->ddev, 1);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[1]))
++					radeon_crtc_handle_vblank(rdev, 1);
++				rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D2 vblank\n");
++
+ 				break;
+ 			case 1: /* D2 vline */
+-				if (rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D2 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont & LB_D2_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D2 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -7957,23 +7999,27 @@ restart_ih:
+ 		case 3: /* D3 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D3 vblank */
+-				if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[2]) {
+-						drm_handle_vblank(rdev->ddev, 2);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[2]))
+-						radeon_crtc_handle_vblank(rdev, 2);
+-					rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D3 vblank\n");
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[2]) {
++					drm_handle_vblank(rdev->ddev, 2);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[2]))
++					radeon_crtc_handle_vblank(rdev, 2);
++				rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D3 vblank\n");
++
+ 				break;
+ 			case 1: /* D3 vline */
+-				if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D3 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D3 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -7983,23 +8029,27 @@ restart_ih:
+ 		case 4: /* D4 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D4 vblank */
+-				if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[3]) {
+-						drm_handle_vblank(rdev->ddev, 3);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[3]))
+-						radeon_crtc_handle_vblank(rdev, 3);
+-					rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D4 vblank\n");
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[3]) {
++					drm_handle_vblank(rdev->ddev, 3);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[3]))
++					radeon_crtc_handle_vblank(rdev, 3);
++				rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D4 vblank\n");
++
+ 				break;
+ 			case 1: /* D4 vline */
+-				if (rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D4 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D4 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -8009,23 +8059,27 @@ restart_ih:
+ 		case 5: /* D5 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D5 vblank */
+-				if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[4]) {
+-						drm_handle_vblank(rdev->ddev, 4);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[4]))
+-						radeon_crtc_handle_vblank(rdev, 4);
+-					rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D5 vblank\n");
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[4]) {
++					drm_handle_vblank(rdev->ddev, 4);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[4]))
++					radeon_crtc_handle_vblank(rdev, 4);
++				rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D5 vblank\n");
++
+ 				break;
+ 			case 1: /* D5 vline */
+-				if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D5 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D5 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -8035,23 +8089,27 @@ restart_ih:
+ 		case 6: /* D6 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D6 vblank */
+-				if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[5]) {
+-						drm_handle_vblank(rdev->ddev, 5);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[5]))
+-						radeon_crtc_handle_vblank(rdev, 5);
+-					rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D6 vblank\n");
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[5]) {
++					drm_handle_vblank(rdev->ddev, 5);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[5]))
++					radeon_crtc_handle_vblank(rdev, 5);
++				rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D6 vblank\n");
++
+ 				break;
+ 			case 1: /* D6 vline */
+-				if (rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D6 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D6 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -8071,88 +8129,112 @@ restart_ih:
+ 		case 42: /* HPD hotplug */
+ 			switch (src_data) {
+ 			case 0:
+-				if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD1\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD1\n");
++
+ 				break;
+ 			case 1:
+-				if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD2\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD2\n");
++
+ 				break;
+ 			case 2:
+-				if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD3\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD3\n");
++
+ 				break;
+ 			case 3:
+-				if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD4\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD4\n");
++
+ 				break;
+ 			case 4:
+-				if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD5\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD5\n");
++
+ 				break;
+ 			case 5:
+-				if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD6\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD6\n");
++
+ 				break;
+ 			case 6:
+-				if (rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 1\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int & DC_HPD1_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int &= ~DC_HPD1_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 1\n");
++
+ 				break;
+ 			case 7:
+-				if (rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 2\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont & DC_HPD2_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 2\n");
++
+ 				break;
+ 			case 8:
+-				if (rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 3\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 3\n");
++
+ 				break;
+ 			case 9:
+-				if (rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 4\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 4\n");
++
+ 				break;
+ 			case 10:
+-				if (rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 5\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 5\n");
++
+ 				break;
+ 			case 11:
+-				if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 6\n");
+-				}
++				if (!(rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.cik.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 6\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
+index f86eb54e7763..d16f2eebd95e 100644
+--- a/drivers/gpu/drm/radeon/cik_sdma.c
++++ b/drivers/gpu/drm/radeon/cik_sdma.c
+@@ -268,6 +268,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
+ 	}
+ 	rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ 	rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
++
++	/* FIXME use something else than big hammer but after few days can not
++	 * seem to find good combination so reset SDMA blocks as it seems we
++	 * do not shut them down properly. This fix hibernation and does not
++	 * affect suspend to ram.
++	 */
++	WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
++	(void)RREG32(SRBM_SOFT_RESET);
++	udelay(50);
++	WREG32(SRBM_SOFT_RESET, 0);
++	(void)RREG32(SRBM_SOFT_RESET);
+ }
+ 
+ /**
+diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
+index f848acfd3fc8..feef136cdb55 100644
+--- a/drivers/gpu/drm/radeon/evergreen.c
++++ b/drivers/gpu/drm/radeon/evergreen.c
+@@ -4855,7 +4855,7 @@ restart_ih:
+ 		return IRQ_NONE;
+ 
+ 	rptr = rdev->ih.rptr;
+-	DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
++	DRM_DEBUG("evergreen_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
+ 
+ 	/* Order reading of wptr vs. reading of IH ring data */
+ 	rmb();
+@@ -4873,23 +4873,27 @@ restart_ih:
+ 		case 1: /* D1 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D1 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[0]) {
+-						drm_handle_vblank(rdev->ddev, 0);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[0]))
+-						radeon_crtc_handle_vblank(rdev, 0);
+-					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D1 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[0]) {
++					drm_handle_vblank(rdev->ddev, 0);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[0]))
++					radeon_crtc_handle_vblank(rdev, 0);
++				rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D1 vblank\n");
++
+ 				break;
+ 			case 1: /* D1 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D1 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D1 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4899,23 +4903,27 @@ restart_ih:
+ 		case 2: /* D2 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D2 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[1]) {
+-						drm_handle_vblank(rdev->ddev, 1);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[1]))
+-						radeon_crtc_handle_vblank(rdev, 1);
+-					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D2 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[1]) {
++					drm_handle_vblank(rdev->ddev, 1);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[1]))
++					radeon_crtc_handle_vblank(rdev, 1);
++				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D2 vblank\n");
++
+ 				break;
+ 			case 1: /* D2 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D2 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D2 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4925,23 +4933,27 @@ restart_ih:
+ 		case 3: /* D3 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D3 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[2]) {
+-						drm_handle_vblank(rdev->ddev, 2);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[2]))
+-						radeon_crtc_handle_vblank(rdev, 2);
+-					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D3 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: D3 vblank - IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[2]) {
++					drm_handle_vblank(rdev->ddev, 2);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[2]))
++					radeon_crtc_handle_vblank(rdev, 2);
++				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D3 vblank\n");
++
+ 				break;
+ 			case 1: /* D3 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D3 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: D3 vline - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D3 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4951,23 +4963,27 @@ restart_ih:
+ 		case 4: /* D4 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D4 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[3]) {
+-						drm_handle_vblank(rdev->ddev, 3);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[3]))
+-						radeon_crtc_handle_vblank(rdev, 3);
+-					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D4 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: D4 vblank - IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[3]) {
++					drm_handle_vblank(rdev->ddev, 3);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[3]))
++					radeon_crtc_handle_vblank(rdev, 3);
++				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D4 vblank\n");
++
+ 				break;
+ 			case 1: /* D4 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D4 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: D4 vline - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D4 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4977,23 +4993,27 @@ restart_ih:
+ 		case 5: /* D5 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D5 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[4]) {
+-						drm_handle_vblank(rdev->ddev, 4);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[4]))
+-						radeon_crtc_handle_vblank(rdev, 4);
+-					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D5 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: D5 vblank - IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[4]) {
++					drm_handle_vblank(rdev->ddev, 4);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[4]))
++					radeon_crtc_handle_vblank(rdev, 4);
++				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D5 vblank\n");
++
+ 				break;
+ 			case 1: /* D5 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D5 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: D5 vline - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D5 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -5003,23 +5023,27 @@ restart_ih:
+ 		case 6: /* D6 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D6 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[5]) {
+-						drm_handle_vblank(rdev->ddev, 5);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[5]))
+-						radeon_crtc_handle_vblank(rdev, 5);
+-					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D6 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: D6 vblank - IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[5]) {
++					drm_handle_vblank(rdev->ddev, 5);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[5]))
++					radeon_crtc_handle_vblank(rdev, 5);
++				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D6 vblank\n");
++
+ 				break;
+ 			case 1: /* D6 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D6 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: D6 vline - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D6 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -5039,88 +5063,100 @@ restart_ih:
+ 		case 42: /* HPD hotplug */
+ 			switch (src_data) {
+ 			case 0:
+-				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD1\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD1\n");
+ 				break;
+ 			case 1:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD2\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD2\n");
+ 				break;
+ 			case 2:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD3\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD3\n");
+ 				break;
+ 			case 3:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD4\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD4\n");
+ 				break;
+ 			case 4:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD5\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD5\n");
+ 				break;
+ 			case 5:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD6\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD6\n");
+ 				break;
+ 			case 6:
+-				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 1\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 1\n");
+ 				break;
+ 			case 7:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 2\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 2\n");
+ 				break;
+ 			case 8:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 3\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 3\n");
+ 				break;
+ 			case 9:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 4\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 4\n");
+ 				break;
+ 			case 10:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 5\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 5\n");
+ 				break;
+ 			case 11:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 6\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 6\n");
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -5130,46 +5166,52 @@ restart_ih:
+ 		case 44: /* hdmi */
+ 			switch (src_data) {
+ 			case 0:
+-				if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
+-					rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
+-					queue_hdmi = true;
+-					DRM_DEBUG("IH: HDMI0\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
++				queue_hdmi = true;
++				DRM_DEBUG("IH: HDMI0\n");
+ 				break;
+ 			case 1:
+-				if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
+-					rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
+-					queue_hdmi = true;
+-					DRM_DEBUG("IH: HDMI1\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
++				queue_hdmi = true;
++				DRM_DEBUG("IH: HDMI1\n");
+ 				break;
+ 			case 2:
+-				if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
+-					rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
+-					queue_hdmi = true;
+-					DRM_DEBUG("IH: HDMI2\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
++				queue_hdmi = true;
++				DRM_DEBUG("IH: HDMI2\n");
+ 				break;
+ 			case 3:
+-				if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
+-					rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
+-					queue_hdmi = true;
+-					DRM_DEBUG("IH: HDMI3\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
++				queue_hdmi = true;
++				DRM_DEBUG("IH: HDMI3\n");
+ 				break;
+ 			case 4:
+-				if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
+-					rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
+-					queue_hdmi = true;
+-					DRM_DEBUG("IH: HDMI4\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
++				queue_hdmi = true;
++				DRM_DEBUG("IH: HDMI4\n");
+ 				break;
+ 			case 5:
+-				if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
+-					rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
+-					queue_hdmi = true;
+-					DRM_DEBUG("IH: HDMI5\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
++				queue_hdmi = true;
++				DRM_DEBUG("IH: HDMI5\n");
+ 				break;
+ 			default:
+ 				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index 8f6d862a1882..21e479fefcab 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -4039,23 +4039,27 @@ restart_ih:
+ 		case 1: /* D1 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D1 vblank */
+-				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[0]) {
+-						drm_handle_vblank(rdev->ddev, 0);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[0]))
+-						radeon_crtc_handle_vblank(rdev, 0);
+-					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D1 vblank\n");
++				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[0]) {
++					drm_handle_vblank(rdev->ddev, 0);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[0]))
++					radeon_crtc_handle_vblank(rdev, 0);
++				rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D1 vblank\n");
++
+ 				break;
+ 			case 1: /* D1 vline */
+-				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D1 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
++				    DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D1 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4065,23 +4069,27 @@ restart_ih:
+ 		case 5: /* D2 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D2 vblank */
+-				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[1]) {
+-						drm_handle_vblank(rdev->ddev, 1);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[1]))
+-						radeon_crtc_handle_vblank(rdev, 1);
+-					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D2 vblank\n");
++				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[1]) {
++					drm_handle_vblank(rdev->ddev, 1);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[1]))
++					radeon_crtc_handle_vblank(rdev, 1);
++				rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D2 vblank\n");
++
+ 				break;
+ 			case 1: /* D1 vline */
+-				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D2 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D2 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4101,46 +4109,53 @@ restart_ih:
+ 		case 19: /* HPD/DAC hotplug */
+ 			switch (src_data) {
+ 			case 0:
+-				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+-					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD1\n");
+-				}
++				if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
++					DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD1\n");
+ 				break;
+ 			case 1:
+-				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+-					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD2\n");
+-				}
++				if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
++					DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD2\n");
+ 				break;
+ 			case 4:
+-				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+-					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD3\n");
+-				}
++				if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
++					DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD3\n");
+ 				break;
+ 			case 5:
+-				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+-					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD4\n");
+-				}
++				if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
++					DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD4\n");
+ 				break;
+ 			case 10:
+-				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+-					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD5\n");
+-				}
++				if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
++					DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD5\n");
+ 				break;
+ 			case 12:
+-				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+-					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD6\n");
+-				}
++				if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
++					DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD6\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -4150,18 +4165,22 @@ restart_ih:
+ 		case 21: /* hdmi */
+ 			switch (src_data) {
+ 			case 4:
+-				if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
+-					rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+-					queue_hdmi = true;
+-					DRM_DEBUG("IH: HDMI0\n");
+-				}
++				if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
++					DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
++				queue_hdmi = true;
++				DRM_DEBUG("IH: HDMI0\n");
++
+ 				break;
+ 			case 5:
+-				if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
+-					rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
+-					queue_hdmi = true;
+-					DRM_DEBUG("IH: HDMI1\n");
+-				}
++				if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
++					DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
++				queue_hdmi = true;
++				DRM_DEBUG("IH: HDMI1\n");
++
+ 				break;
+ 			default:
+ 				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
+index 25191f126f3b..fa719c53449b 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.c
++++ b/drivers/gpu/drm/radeon/radeon_audio.c
+@@ -242,6 +242,13 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
+ 	.dpms = evergreen_dp_enable,
+ };
+ 
++static void radeon_audio_enable(struct radeon_device *rdev,
++				struct r600_audio_pin *pin, u8 enable_mask)
++{
++	if (rdev->audio.funcs->enable)
++		rdev->audio.funcs->enable(rdev, pin, enable_mask);
++}
++
+ static void radeon_audio_interface_init(struct radeon_device *rdev)
+ {
+ 	if (ASIC_IS_DCE6(rdev)) {
+@@ -307,7 +314,7 @@ int radeon_audio_init(struct radeon_device *rdev)
+ 
+ 	/* disable audio.  it will be set up later */
+ 	for (i = 0; i < rdev->audio.num_pins; i++)
+-		radeon_audio_enable(rdev, &rdev->audio.pin[i], false);
++		radeon_audio_enable(rdev, &rdev->audio.pin[i], 0);
+ 
+ 	return 0;
+ }
+@@ -443,13 +450,6 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
+ 		radeon_encoder->audio->select_pin(encoder);
+ }
+ 
+-void radeon_audio_enable(struct radeon_device *rdev,
+-	struct r600_audio_pin *pin, u8 enable_mask)
+-{
+-	if (rdev->audio.funcs->enable)
+-		rdev->audio.funcs->enable(rdev, pin, enable_mask);
+-}
+-
+ void radeon_audio_detect(struct drm_connector *connector,
+ 			 enum drm_connector_status status)
+ {
+@@ -469,22 +469,22 @@ void radeon_audio_detect(struct drm_connector *connector,
+ 	dig = radeon_encoder->enc_priv;
+ 
+ 	if (status == connector_status_connected) {
+-		struct radeon_connector *radeon_connector;
+-		int sink_type;
+-
+ 		if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ 			radeon_encoder->audio = NULL;
+ 			return;
+ 		}
+ 
+-		radeon_connector = to_radeon_connector(connector);
+-		sink_type = radeon_dp_getsinktype(radeon_connector);
++		if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
++			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ 
+-		if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
+-			sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+-			radeon_encoder->audio = rdev->audio.dp_funcs;
+-		else
++			if (radeon_dp_getsinktype(radeon_connector) ==
++			    CONNECTOR_OBJECT_ID_DISPLAYPORT)
++				radeon_encoder->audio = rdev->audio.dp_funcs;
++			else
++				radeon_encoder->audio = rdev->audio.hdmi_funcs;
++		} else {
+ 			radeon_encoder->audio = rdev->audio.hdmi_funcs;
++		}
+ 
+ 		dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
+ 		radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+@@ -502,7 +502,7 @@ void radeon_audio_fini(struct radeon_device *rdev)
+ 		return;
+ 
+ 	for (i = 0; i < rdev->audio.num_pins; i++)
+-		radeon_audio_enable(rdev, &rdev->audio.pin[i], false);
++		radeon_audio_enable(rdev, &rdev->audio.pin[i], 0);
+ 
+ 	rdev->audio.enabled = false;
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
+index c92d059ab204..8438304f7139 100644
+--- a/drivers/gpu/drm/radeon/radeon_audio.h
++++ b/drivers/gpu/drm/radeon/radeon_audio.h
+@@ -74,8 +74,6 @@ u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
+ void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
+ 	u32 offset,	u32 reg, u32 v);
+ struct r600_audio_pin *radeon_audio_get_pin(struct drm_encoder *encoder);
+-void radeon_audio_enable(struct radeon_device *rdev,
+-	struct r600_audio_pin *pin, u8 enable_mask);
+ void radeon_audio_fini(struct radeon_device *rdev);
+ void radeon_audio_mode_set(struct drm_encoder *encoder,
+ 	struct drm_display_mode *mode);
+diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
+index 45e54060ee97..fa661744a1f5 100644
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -205,8 +205,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
+ 			| (x << 16)
+ 			| y));
+ 		/* offset is from DISP(2)_BASE_ADDRESS */
+-		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
+-								      (yorigin * 256)));
++		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
++		       radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr +
++		       yorigin * 256);
+ 	}
+ 
+ 	radeon_crtc->cursor_x = x;
+@@ -227,51 +228,32 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ 	return ret;
+ }
+ 
+-static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
++static void radeon_set_cursor(struct drm_crtc *crtc)
+ {
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 	struct radeon_device *rdev = crtc->dev->dev_private;
+-	struct radeon_bo *robj = gem_to_radeon_bo(obj);
+-	uint64_t gpu_addr;
+-	int ret;
+-
+-	ret = radeon_bo_reserve(robj, false);
+-	if (unlikely(ret != 0))
+-		goto fail;
+-	/* Only 27 bit offset for legacy cursor */
+-	ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+-				       ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+-				       &gpu_addr);
+-	radeon_bo_unreserve(robj);
+-	if (ret)
+-		goto fail;
+ 
+ 	if (ASIC_IS_DCE4(rdev)) {
+ 		WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+-		       upper_32_bits(gpu_addr));
++		       upper_32_bits(radeon_crtc->cursor_addr));
+ 		WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+-		       gpu_addr & 0xffffffff);
++		       lower_32_bits(radeon_crtc->cursor_addr));
+ 	} else if (ASIC_IS_AVIVO(rdev)) {
+ 		if (rdev->family >= CHIP_RV770) {
+ 			if (radeon_crtc->crtc_id)
+-				WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
++				WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH,
++				       upper_32_bits(radeon_crtc->cursor_addr));
+ 			else
+-				WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
++				WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH,
++				       upper_32_bits(radeon_crtc->cursor_addr));
+ 		}
+ 		WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+-		       gpu_addr & 0xffffffff);
++		       lower_32_bits(radeon_crtc->cursor_addr));
+ 	} else {
+-		radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
+ 		/* offset is from DISP(2)_BASE_ADDRESS */
+-		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
++		WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset,
++		       radeon_crtc->cursor_addr - radeon_crtc->legacy_display_base_addr);
+ 	}
+-
+-	return 0;
+-
+-fail:
+-	drm_gem_object_unreference_unlocked(obj);
+-
+-	return ret;
+ }
+ 
+ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+@@ -283,7 +265,9 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+ 			    int32_t hot_y)
+ {
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++	struct radeon_device *rdev = crtc->dev->dev_private;
+ 	struct drm_gem_object *obj;
++	struct radeon_bo *robj;
+ 	int ret;
+ 
+ 	if (!handle) {
+@@ -305,6 +289,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+ 		return -ENOENT;
+ 	}
+ 
++	robj = gem_to_radeon_bo(obj);
++	ret = radeon_bo_reserve(robj, false);
++	if (ret != 0) {
++		drm_gem_object_unreference_unlocked(obj);
++		return ret;
++	}
++	/* Only 27 bit offset for legacy cursor */
++	ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
++				       ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
++				       &radeon_crtc->cursor_addr);
++	radeon_bo_unreserve(robj);
++	if (ret) {
++		DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
++		drm_gem_object_unreference_unlocked(obj);
++		return ret;
++	}
++
+ 	radeon_crtc->cursor_width = width;
+ 	radeon_crtc->cursor_height = height;
+ 
+@@ -323,13 +324,8 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
+ 		radeon_crtc->cursor_hot_y = hot_y;
+ 	}
+ 
+-	ret = radeon_set_cursor(crtc, obj);
+-
+-	if (ret)
+-		DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
+-			  ret);
+-	else
+-		radeon_show_cursor(crtc);
++	radeon_set_cursor(crtc);
++	radeon_show_cursor(crtc);
+ 
+ 	radeon_lock_cursor(crtc, false);
+ 
+@@ -341,8 +337,7 @@ unpin:
+ 			radeon_bo_unpin(robj);
+ 			radeon_bo_unreserve(robj);
+ 		}
+-		if (radeon_crtc->cursor_bo != obj)
+-			drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
++		drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ 	}
+ 
+ 	radeon_crtc->cursor_bo = obj;
+@@ -360,7 +355,6 @@ unpin:
+ void radeon_cursor_reset(struct drm_crtc *crtc)
+ {
+ 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+-	int ret;
+ 
+ 	if (radeon_crtc->cursor_bo) {
+ 		radeon_lock_cursor(crtc, true);
+@@ -368,12 +362,8 @@ void radeon_cursor_reset(struct drm_crtc *crtc)
+ 		radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
+ 					  radeon_crtc->cursor_y);
+ 
+-		ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
+-		if (ret)
+-			DRM_ERROR("radeon_set_cursor returned %d, not showing "
+-				  "cursor\n", ret);
+-		else
+-			radeon_show_cursor(crtc);
++		radeon_set_cursor(crtc);
++		radeon_show_cursor(crtc);
+ 
+ 		radeon_lock_cursor(crtc, false);
+ 	}
+diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
+index a7fdfa4f0857..604c44d88e7a 100644
+--- a/drivers/gpu/drm/radeon/radeon_device.c
++++ b/drivers/gpu/drm/radeon/radeon_device.c
+@@ -1572,11 +1572,21 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
+ 		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ 	}
+ 
+-	/* unpin the front buffers */
++	/* unpin the front buffers and cursors */
+ 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ 		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
+ 		struct radeon_bo *robj;
+ 
++		if (radeon_crtc->cursor_bo) {
++			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
++			r = radeon_bo_reserve(robj, false);
++			if (r == 0) {
++				radeon_bo_unpin(robj);
++				radeon_bo_unreserve(robj);
++			}
++		}
++
+ 		if (rfb == NULL || rfb->obj == NULL) {
+ 			continue;
+ 		}
+@@ -1639,6 +1649,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ {
+ 	struct drm_connector *connector;
+ 	struct radeon_device *rdev = dev->dev_private;
++	struct drm_crtc *crtc;
+ 	int r;
+ 
+ 	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+@@ -1678,6 +1689,27 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
+ 
+ 	radeon_restore_bios_scratch_regs(rdev);
+ 
++	/* pin cursors */
++	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
++		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
++
++		if (radeon_crtc->cursor_bo) {
++			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
++			r = radeon_bo_reserve(robj, false);
++			if (r == 0) {
++				/* Only 27 bit offset for legacy cursor */
++				r = radeon_bo_pin_restricted(robj,
++							     RADEON_GEM_DOMAIN_VRAM,
++							     ASIC_IS_AVIVO(rdev) ?
++							     0 : 1 << 27,
++							     &radeon_crtc->cursor_addr);
++				if (r != 0)
++					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
++				radeon_bo_unreserve(robj);
++			}
++		}
++	}
++
+ 	/* init dig PHYs, disp eng pll */
+ 	if (rdev->is_atom_bios) {
+ 		radeon_atom_encoder_init(rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
+index aeb676708e60..634793ea8418 100644
+--- a/drivers/gpu/drm/radeon/radeon_fb.c
++++ b/drivers/gpu/drm/radeon/radeon_fb.c
+@@ -257,7 +257,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
+ 	}
+ 
+ 	info->par = rfbdev;
+-	info->skip_vt_switch = true;
+ 
+ 	ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ 	if (ret) {
+diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
+index 5450fa95a47e..c4777c8d0312 100644
+--- a/drivers/gpu/drm/radeon/radeon_gart.c
++++ b/drivers/gpu/drm/radeon/radeon_gart.c
+@@ -260,8 +260,10 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
+ 			}
+ 		}
+ 	}
+-	mb();
+-	radeon_gart_tlb_flush(rdev);
++	if (rdev->gart.ptr) {
++		mb();
++		radeon_gart_tlb_flush(rdev);
++	}
+ }
+ 
+ /**
+@@ -306,8 +308,10 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
+ 			page_base += RADEON_GPU_PAGE_SIZE;
+ 		}
+ 	}
+-	mb();
+-	radeon_gart_tlb_flush(rdev);
++	if (rdev->gart.ptr) {
++		mb();
++		radeon_gart_tlb_flush(rdev);
++	}
+ 	return 0;
+ }
+ 
+diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
+index ac3c1310b953..186d0b792a02 100644
+--- a/drivers/gpu/drm/radeon/radeon_gem.c
++++ b/drivers/gpu/drm/radeon/radeon_gem.c
+@@ -36,6 +36,7 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
+ 	if (robj) {
+ 		if (robj->gem_base.import_attach)
+ 			drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
++		radeon_mn_unregister(robj);
+ 		radeon_bo_unref(&robj);
+ 	}
+ }
+@@ -471,6 +472,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+ 		r = ret;
+ 
+ 	/* Flush HDP cache via MMIO if necessary */
++	cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
+ 	if (rdev->asic->mmio_hdp_flush &&
+ 	    radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
+ 		robj->rdev->asic->mmio_hdp_flush(rdev);
+diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+index 7162c935371c..f682e5351252 100644
+--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
++++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
+@@ -79,10 +79,12 @@ static void radeon_hotplug_work_func(struct work_struct *work)
+ 	struct drm_mode_config *mode_config = &dev->mode_config;
+ 	struct drm_connector *connector;
+ 
++	mutex_lock(&mode_config->mutex);
+ 	if (mode_config->num_connector) {
+ 		list_for_each_entry(connector, &mode_config->connector_list, head)
+ 			radeon_connector_hotplug(connector);
+ 	}
++	mutex_unlock(&mode_config->mutex);
+ 	/* Just fire off a uevent and let userspace tell us what to do */
+ 	drm_helper_hpd_irq_event(dev);
+ }
+diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
+index fa91a17b81b6..f01c797b78cf 100644
+--- a/drivers/gpu/drm/radeon/radeon_mode.h
++++ b/drivers/gpu/drm/radeon/radeon_mode.h
+@@ -343,7 +343,6 @@ struct radeon_crtc {
+ 	int max_cursor_width;
+ 	int max_cursor_height;
+ 	uint32_t legacy_display_base_addr;
+-	uint32_t legacy_cursor_offset;
+ 	enum radeon_rmx_type rmx_type;
+ 	u8 h_border;
+ 	u8 v_border;
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 318165d4855c..676362769b8d 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -75,7 +75,6 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+ 	bo = container_of(tbo, struct radeon_bo, tbo);
+ 
+ 	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
+-	radeon_mn_unregister(bo);
+ 
+ 	mutex_lock(&bo->rdev->gem.mutex);
+ 	list_del_init(&bo->list);
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 4c679b802bc8..e15185b16504 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -6466,23 +6466,27 @@ restart_ih:
+ 		case 1: /* D1 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D1 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[0]) {
+-						drm_handle_vblank(rdev->ddev, 0);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[0]))
+-						radeon_crtc_handle_vblank(rdev, 0);
+-					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D1 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[0]) {
++					drm_handle_vblank(rdev->ddev, 0);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[0]))
++					radeon_crtc_handle_vblank(rdev, 0);
++				rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D1 vblank\n");
++
+ 				break;
+ 			case 1: /* D1 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D1 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D1 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6492,23 +6496,27 @@ restart_ih:
+ 		case 2: /* D2 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D2 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[1]) {
+-						drm_handle_vblank(rdev->ddev, 1);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[1]))
+-						radeon_crtc_handle_vblank(rdev, 1);
+-					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D2 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[1]) {
++					drm_handle_vblank(rdev->ddev, 1);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[1]))
++					radeon_crtc_handle_vblank(rdev, 1);
++				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D2 vblank\n");
++
+ 				break;
+ 			case 1: /* D2 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D2 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D2 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6518,23 +6526,27 @@ restart_ih:
+ 		case 3: /* D3 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D3 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[2]) {
+-						drm_handle_vblank(rdev->ddev, 2);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[2]))
+-						radeon_crtc_handle_vblank(rdev, 2);
+-					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D3 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[2]) {
++					drm_handle_vblank(rdev->ddev, 2);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[2]))
++					radeon_crtc_handle_vblank(rdev, 2);
++				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D3 vblank\n");
++
+ 				break;
+ 			case 1: /* D3 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D3 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D3 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6544,23 +6556,27 @@ restart_ih:
+ 		case 4: /* D4 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D4 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[3]) {
+-						drm_handle_vblank(rdev->ddev, 3);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[3]))
+-						radeon_crtc_handle_vblank(rdev, 3);
+-					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D4 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[3]) {
++					drm_handle_vblank(rdev->ddev, 3);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[3]))
++					radeon_crtc_handle_vblank(rdev, 3);
++				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D4 vblank\n");
++
+ 				break;
+ 			case 1: /* D4 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D4 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D4 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6570,23 +6586,27 @@ restart_ih:
+ 		case 5: /* D5 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D5 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[4]) {
+-						drm_handle_vblank(rdev->ddev, 4);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[4]))
+-						radeon_crtc_handle_vblank(rdev, 4);
+-					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D5 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[4]) {
++					drm_handle_vblank(rdev->ddev, 4);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[4]))
++					radeon_crtc_handle_vblank(rdev, 4);
++				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D5 vblank\n");
++
+ 				break;
+ 			case 1: /* D5 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D5 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D5 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6596,23 +6616,27 @@ restart_ih:
+ 		case 6: /* D6 vblank/vline */
+ 			switch (src_data) {
+ 			case 0: /* D6 vblank */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+-					if (rdev->irq.crtc_vblank_int[5]) {
+-						drm_handle_vblank(rdev->ddev, 5);
+-						rdev->pm.vblank_sync = true;
+-						wake_up(&rdev->irq.vblank_queue);
+-					}
+-					if (atomic_read(&rdev->irq.pflip[5]))
+-						radeon_crtc_handle_vblank(rdev, 5);
+-					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+-					DRM_DEBUG("IH: D6 vblank\n");
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				if (rdev->irq.crtc_vblank_int[5]) {
++					drm_handle_vblank(rdev->ddev, 5);
++					rdev->pm.vblank_sync = true;
++					wake_up(&rdev->irq.vblank_queue);
+ 				}
++				if (atomic_read(&rdev->irq.pflip[5]))
++					radeon_crtc_handle_vblank(rdev, 5);
++				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
++				DRM_DEBUG("IH: D6 vblank\n");
++
+ 				break;
+ 			case 1: /* D6 vline */
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+-					DRM_DEBUG("IH: D6 vline\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
++				DRM_DEBUG("IH: D6 vline\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+@@ -6632,88 +6656,112 @@ restart_ih:
+ 		case 42: /* HPD hotplug */
+ 			switch (src_data) {
+ 			case 0:
+-				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD1\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD1\n");
++
+ 				break;
+ 			case 1:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD2\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD2\n");
++
+ 				break;
+ 			case 2:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD3\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD3\n");
++
+ 				break;
+ 			case 3:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD4\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD4\n");
++
+ 				break;
+ 			case 4:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD5\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD5\n");
++
+ 				break;
+ 			case 5:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+-					queue_hotplug = true;
+-					DRM_DEBUG("IH: HPD6\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
++				queue_hotplug = true;
++				DRM_DEBUG("IH: HPD6\n");
++
+ 				break;
+ 			case 6:
+-				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 1\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 1\n");
++
+ 				break;
+ 			case 7:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 2\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 2\n");
++
+ 				break;
+ 			case 8:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 3\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 3\n");
++
+ 				break;
+ 			case 9:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 4\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 4\n");
++
+ 				break;
+ 			case 10:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 5\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 5\n");
++
+ 				break;
+ 			case 11:
+-				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
+-					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
+-					queue_dp = true;
+-					DRM_DEBUG("IH: HPD_RX 6\n");
+-				}
++				if (!(rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT))
++					DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
++
++				rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT;
++				queue_dp = true;
++				DRM_DEBUG("IH: HPD_RX 6\n");
++
+ 				break;
+ 			default:
+ 				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
+diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
+index ff8b83f5e929..9dfcedec05a6 100644
+--- a/drivers/gpu/drm/radeon/si_dpm.c
++++ b/drivers/gpu/drm/radeon/si_dpm.c
+@@ -2925,6 +2925,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ 	/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
++	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ 	{ 0, 0, 0, 0 },
+ };
+ 
+diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+index eb2282cc4a56..eba5f8a52fbd 100644
+--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
++++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+@@ -54,55 +54,56 @@ static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
+ 		       &rk_obj->dma_attrs);
+ }
+ 
+-int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+-			  struct vm_area_struct *vma)
++static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
++					struct vm_area_struct *vma)
++
+ {
++	int ret;
+ 	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ 	struct drm_device *drm = obj->dev;
+-	unsigned long vm_size;
+ 
+-	vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+-	vm_size = vma->vm_end - vma->vm_start;
+-
+-	if (vm_size > obj->size)
+-		return -EINVAL;
++	/*
++	 * dma_alloc_attrs() allocated a struct page table for rk_obj, so clear
++	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
++	 */
++	vma->vm_flags &= ~VM_PFNMAP;
+ 
+-	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
++	ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+ 			     obj->size, &rk_obj->dma_attrs);
++	if (ret)
++		drm_gem_vm_close(vma);
++
++	return ret;
+ }
+ 
+-/* drm driver mmap file operations */
+-int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
++int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
++			  struct vm_area_struct *vma)
+ {
+-	struct drm_file *priv = filp->private_data;
+-	struct drm_device *dev = priv->minor->dev;
+-	struct drm_gem_object *obj;
+-	struct drm_vma_offset_node *node;
++	struct drm_device *drm = obj->dev;
+ 	int ret;
+ 
+-	if (drm_device_is_unplugged(dev))
+-		return -ENODEV;
++	mutex_lock(&drm->struct_mutex);
++	ret = drm_gem_mmap_obj(obj, obj->size, vma);
++	mutex_unlock(&drm->struct_mutex);
++	if (ret)
++		return ret;
+ 
+-	mutex_lock(&dev->struct_mutex);
++	return rockchip_drm_gem_object_mmap(obj, vma);
++}
+ 
+-	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+-					   vma->vm_pgoff,
+-					   vma_pages(vma));
+-	if (!node) {
+-		mutex_unlock(&dev->struct_mutex);
+-		DRM_ERROR("failed to find vma node.\n");
+-		return -EINVAL;
+-	} else if (!drm_vma_node_is_allowed(node, filp)) {
+-		mutex_unlock(&dev->struct_mutex);
+-		return -EACCES;
+-	}
++/* drm driver mmap file operations */
++int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++	struct drm_gem_object *obj;
++	int ret;
+ 
+-	obj = container_of(node, struct drm_gem_object, vma_node);
+-	ret = rockchip_gem_mmap_buf(obj, vma);
++	ret = drm_gem_mmap(filp, vma);
++	if (ret)
++		return ret;
+ 
+-	mutex_unlock(&dev->struct_mutex);
++	obj = vma->vm_private_data;
+ 
+-	return ret;
++	return rockchip_drm_gem_object_mmap(obj, vma);
+ }
+ 
+ struct rockchip_gem_object *
+diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
+index d6b55e3e3716..a43a836e6f88 100644
+--- a/drivers/gpu/drm/tegra/dpaux.c
++++ b/drivers/gpu/drm/tegra/dpaux.c
+@@ -72,34 +72,32 @@ static inline void tegra_dpaux_writel(struct tegra_dpaux *dpaux,
+ static void tegra_dpaux_write_fifo(struct tegra_dpaux *dpaux, const u8 *buffer,
+ 				   size_t size)
+ {
+-	unsigned long offset = DPAUX_DP_AUXDATA_WRITE(0);
+ 	size_t i, j;
+ 
+-	for (i = 0; i < size; i += 4) {
+-		size_t num = min_t(size_t, size - i, 4);
++	for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
++		size_t num = min_t(size_t, size - i * 4, 4);
+ 		unsigned long value = 0;
+ 
+ 		for (j = 0; j < num; j++)
+-			value |= buffer[i + j] << (j * 8);
++			value |= buffer[i * 4 + j] << (j * 8);
+ 
+-		tegra_dpaux_writel(dpaux, value, offset++);
++		tegra_dpaux_writel(dpaux, value, DPAUX_DP_AUXDATA_WRITE(i));
+ 	}
+ }
+ 
+ static void tegra_dpaux_read_fifo(struct tegra_dpaux *dpaux, u8 *buffer,
+ 				  size_t size)
+ {
+-	unsigned long offset = DPAUX_DP_AUXDATA_READ(0);
+ 	size_t i, j;
+ 
+-	for (i = 0; i < size; i += 4) {
+-		size_t num = min_t(size_t, size - i, 4);
++	for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
++		size_t num = min_t(size_t, size - i * 4, 4);
+ 		unsigned long value;
+ 
+-		value = tegra_dpaux_readl(dpaux, offset++);
++		value = tegra_dpaux_readl(dpaux, DPAUX_DP_AUXDATA_READ(i));
+ 
+ 		for (j = 0; j < num; j++)
+-			buffer[i + j] = value >> (j * 8);
++			buffer[i * 4 + j] = value >> (j * 8);
+ 	}
+ }
+ 
+diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
+index 7a207ca547be..6394547cf67a 100644
+--- a/drivers/gpu/drm/vgem/vgem_drv.c
++++ b/drivers/gpu/drm/vgem/vgem_drv.c
+@@ -328,6 +328,8 @@ static int __init vgem_init(void)
+ 		goto out;
+ 	}
+ 
++	drm_dev_set_unique(vgem_device, "vgem");
++
+ 	ret  = drm_dev_register(vgem_device, 0);
+ 
+ 	if (ret)
+diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
+index d219c06a857b..972444a14cca 100644
+--- a/drivers/hwmon/mcp3021.c
++++ b/drivers/hwmon/mcp3021.c
+@@ -31,14 +31,11 @@
+ /* output format */
+ #define MCP3021_SAR_SHIFT	2
+ #define MCP3021_SAR_MASK	0x3ff
+-
+ #define MCP3021_OUTPUT_RES	10	/* 10-bit resolution */
+-#define MCP3021_OUTPUT_SCALE	4
+ 
+ #define MCP3221_SAR_SHIFT	0
+ #define MCP3221_SAR_MASK	0xfff
+ #define MCP3221_OUTPUT_RES	12	/* 12-bit resolution */
+-#define MCP3221_OUTPUT_SCALE	1
+ 
+ enum chips {
+ 	mcp3021,
+@@ -54,7 +51,6 @@ struct mcp3021_data {
+ 	u16 sar_shift;
+ 	u16 sar_mask;
+ 	u8 output_res;
+-	u8 output_scale;
+ };
+ 
+ static int mcp3021_read16(struct i2c_client *client)
+@@ -84,13 +80,7 @@ static int mcp3021_read16(struct i2c_client *client)
+ 
+ static inline u16 volts_from_reg(struct mcp3021_data *data, u16 val)
+ {
+-	if (val == 0)
+-		return 0;
+-
+-	val = val * data->output_scale - data->output_scale / 2;
+-
+-	return val * DIV_ROUND_CLOSEST(data->vdd,
+-			(1 << data->output_res) * data->output_scale);
++	return DIV_ROUND_CLOSEST(data->vdd * val, 1 << data->output_res);
+ }
+ 
+ static ssize_t show_in_input(struct device *dev, struct device_attribute *attr,
+@@ -132,14 +122,12 @@ static int mcp3021_probe(struct i2c_client *client,
+ 		data->sar_shift = MCP3021_SAR_SHIFT;
+ 		data->sar_mask = MCP3021_SAR_MASK;
+ 		data->output_res = MCP3021_OUTPUT_RES;
+-		data->output_scale = MCP3021_OUTPUT_SCALE;
+ 		break;
+ 
+ 	case mcp3221:
+ 		data->sar_shift = MCP3221_SAR_SHIFT;
+ 		data->sar_mask = MCP3221_SAR_MASK;
+ 		data->output_res = MCP3221_OUTPUT_RES;
+-		data->output_scale = MCP3221_OUTPUT_SCALE;
+ 		break;
+ 	}
+ 
+diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
+index 55765790907b..28fcb2e246d5 100644
+--- a/drivers/hwmon/nct7802.c
++++ b/drivers/hwmon/nct7802.c
+@@ -547,7 +547,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj,
+ 	if (index >= 9 && index < 18 &&
+ 	    (reg & 0x0c) != 0x04 && (reg & 0x0c) != 0x08)	/* RD2 */
+ 		return 0;
+-	if (index >= 18 && index < 27 && (reg & 0x30) != 0x10)	/* RD3 */
++	if (index >= 18 && index < 27 && (reg & 0x30) != 0x20)	/* RD3 */
+ 		return 0;
+ 	if (index >= 27 && index < 35)				/* local */
+ 		return attr->mode;
+diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
+index ff23d1bdd230..9bd10a9b4b50 100644
+--- a/drivers/i2c/busses/i2c-at91.c
++++ b/drivers/i2c/busses/i2c-at91.c
+@@ -65,6 +65,9 @@
+ #define	AT91_TWI_UNRE		0x0080	/* Underrun Error */
+ #define	AT91_TWI_NACK		0x0100	/* Not Acknowledged */
+ 
++#define	AT91_TWI_INT_MASK \
++	(AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY | AT91_TWI_NACK)
++
+ #define	AT91_TWI_IER		0x0024	/* Interrupt Enable Register */
+ #define	AT91_TWI_IDR		0x0028	/* Interrupt Disable Register */
+ #define	AT91_TWI_IMR		0x002c	/* Interrupt Mask Register */
+@@ -119,13 +122,12 @@ static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
+ 
+ static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
+ {
+-	at91_twi_write(dev, AT91_TWI_IDR,
+-		       AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
++	at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_INT_MASK);
+ }
+ 
+ static void at91_twi_irq_save(struct at91_twi_dev *dev)
+ {
+-	dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
++	dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & AT91_TWI_INT_MASK;
+ 	at91_disable_twi_interrupts(dev);
+ }
+ 
+@@ -215,6 +217,14 @@ static void at91_twi_write_data_dma_callback(void *data)
+ 	dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
+ 			 dev->buf_len, DMA_TO_DEVICE);
+ 
++	/*
++	 * When this callback is called, THR/TX FIFO is likely not to be empty
++	 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
++	 * Status Register to be sure that the STOP bit has been sent and the
++	 * transfer is completed. The NACK interrupt has already been enabled,
++	 * we just have to enable TXCOMP one.
++	 */
++	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ 	at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+ }
+ 
+@@ -309,7 +319,7 @@ static void at91_twi_read_data_dma_callback(void *data)
+ 	/* The last two bytes have to be read without using dma */
+ 	dev->buf += dev->buf_len - 2;
+ 	dev->buf_len = 2;
+-	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
++	at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY | AT91_TWI_TXCOMP);
+ }
+ 
+ static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
+@@ -370,7 +380,7 @@ static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
+ 	/* catch error flags */
+ 	dev->transfer_status |= status;
+ 
+-	if (irqstatus & AT91_TWI_TXCOMP) {
++	if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
+ 		at91_disable_twi_interrupts(dev);
+ 		complete(&dev->cmd_complete);
+ 	}
+@@ -384,6 +394,34 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ 	unsigned long time_left;
+ 	bool has_unre_flag = dev->pdata->has_unre_flag;
+ 
++	/*
++	 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
++	 * read flag but shows the state of the transmission at the time the
++	 * Status Register is read. According to the programmer datasheet,
++	 * TXCOMP is set when both holding register and internal shifter are
++	 * empty and STOP condition has been sent.
++	 * Consequently, we should enable NACK interrupt rather than TXCOMP to
++	 * detect transmission failure.
++	 *
++	 * Besides, the TXCOMP bit is already set before the i2c transaction
++	 * has been started. For read transactions, this bit is cleared when
++	 * writing the START bit into the Control Register. So the
++	 * corresponding interrupt can safely be enabled just after.
++	 * However for write transactions managed by the CPU, we first write
++	 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
++	 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
++	 * the interrupt handler would be called immediately and the i2c command
++	 * would be reported as completed.
++	 * Also when a write transaction is managed by the DMA controller,
++	 * enabling the TXCOMP interrupt in this function may lead to a race
++	 * condition since we don't know whether the TXCOMP interrupt is enabled
++	 * before or after the DMA has started to write into THR. So the TXCOMP
++	 * interrupt is enabled later by at91_twi_write_data_dma_callback().
++	 * Immediately after in that DMA callback, we still need to send the
++	 * STOP condition manually writing the corresponding bit into the
++	 * Control Register.
++	 */
++
+ 	dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
+ 		(dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
+ 
+@@ -414,26 +452,24 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
+ 		 * seems to be the best solution.
+ 		 */
+ 		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
++			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
+ 			at91_twi_read_data_dma(dev);
+-			/*
+-			 * It is important to enable TXCOMP irq here because
+-			 * doing it only when transferring the last two bytes
+-			 * will mask NACK errors since TXCOMP is set when a
+-			 * NACK occurs.
+-			 */
+-			at91_twi_write(dev, AT91_TWI_IER,
+-			       AT91_TWI_TXCOMP);
+-		} else
++		} else {
+ 			at91_twi_write(dev, AT91_TWI_IER,
+-			       AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
++				       AT91_TWI_TXCOMP |
++				       AT91_TWI_NACK |
++				       AT91_TWI_RXRDY);
++		}
+ 	} else {
+ 		if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
++			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
+ 			at91_twi_write_data_dma(dev);
+-			at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ 		} else {
+ 			at91_twi_write_next_byte(dev);
+ 			at91_twi_write(dev, AT91_TWI_IER,
+-				AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
++				       AT91_TWI_TXCOMP |
++				       AT91_TWI_NACK |
++				       AT91_TWI_TXRDY);
+ 		}
+ 	}
+ 
+diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
+index 06cc1ff088f1..2ba7c0fbc615 100644
+--- a/drivers/i2c/i2c-mux.c
++++ b/drivers/i2c/i2c-mux.c
+@@ -51,7 +51,7 @@ static int i2c_mux_master_xfer(struct i2c_adapter *adap,
+ 
+ 	ret = priv->select(parent, priv->mux_priv, priv->chan_id);
+ 	if (ret >= 0)
+-		ret = parent->algo->master_xfer(parent, msgs, num);
++		ret = __i2c_transfer(parent, msgs, num);
+ 	if (priv->deselect)
+ 		priv->deselect(parent, priv->mux_priv, priv->chan_id);
+ 
+@@ -144,6 +144,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
+ 	priv->adap.dev.parent = &parent->dev;
+ 	priv->adap.retries = parent->retries;
+ 	priv->adap.timeout = parent->timeout;
++	priv->adap.quirks = parent->quirks;
+ 
+ 	/* Sanity check on class */
+ 	if (i2c_mux_parent_classes(parent) & class)
+diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
+index cb772775da43..0c8d4d2cbdaf 100644
+--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
++++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
+@@ -104,7 +104,7 @@ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val)
+ 		buf[0] = command;
+ 		buf[1] = val;
+ 		msg.buf = buf;
+-		ret = adap->algo->master_xfer(adap, &msg, 1);
++		ret = __i2c_transfer(adap, &msg, 1);
+ 	} else {
+ 		union i2c_smbus_data data;
+ 
+@@ -144,7 +144,7 @@ static int pca9541_reg_read(struct i2c_client *client, u8 command)
+ 				.buf = &val
+ 			}
+ 		};
+-		ret = adap->algo->master_xfer(adap, msg, 2);
++		ret = __i2c_transfer(adap, msg, 2);
+ 		if (ret == 2)
+ 			ret = val;
+ 		else if (ret >= 0)
+diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
+index bea0d2de2993..ea4aa9dfcea9 100644
+--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
++++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
+@@ -134,7 +134,7 @@ static int pca954x_reg_write(struct i2c_adapter *adap,
+ 		msg.len = 1;
+ 		buf[0] = val;
+ 		msg.buf = buf;
+-		ret = adap->algo->master_xfer(adap, &msg, 1);
++		ret = __i2c_transfer(adap, &msg, 1);
+ 	} else {
+ 		union i2c_smbus_data data;
+ 		ret = adap->algo->smbus_xfer(adap, client->addr,
+diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
+index 73e87739d219..bf827d012a71 100644
+--- a/drivers/iio/accel/bmc150-accel.c
++++ b/drivers/iio/accel/bmc150-accel.c
+@@ -1465,7 +1465,7 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
+ {
+ 	int i;
+ 
+-	for (i = from; i >= 0; i++) {
++	for (i = from; i >= 0; i--) {
+ 		if (data->triggers[i].indio_trig) {
+ 			iio_trigger_unregister(data->triggers[i].indio_trig);
+ 			data->triggers[i].indio_trig = NULL;
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index e36a73e7c3a8..1bcb65b8d4a1 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -146,8 +146,7 @@ config DA9150_GPADC
+ 
+ config CC10001_ADC
+ 	tristate "Cosmic Circuits 10001 ADC driver"
+-	depends on HAVE_CLK || REGULATOR
+-	depends on HAS_IOMEM
++	depends on HAS_IOMEM && HAVE_CLK && REGULATOR
+ 	select IIO_BUFFER
+ 	select IIO_TRIGGERED_BUFFER
+ 	help
+diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
+index 8a0eb4a04fb5..7b40925dd4ff 100644
+--- a/drivers/iio/adc/at91_adc.c
++++ b/drivers/iio/adc/at91_adc.c
+@@ -182,7 +182,7 @@ struct at91_adc_caps {
+ 	u8	ts_pen_detect_sensitivity;
+ 
+ 	/* startup time calculate function */
+-	u32 (*calc_startup_ticks)(u8 startup_time, u32 adc_clk_khz);
++	u32 (*calc_startup_ticks)(u32 startup_time, u32 adc_clk_khz);
+ 
+ 	u8	num_channels;
+ 	struct at91_adc_reg_desc registers;
+@@ -201,7 +201,7 @@ struct at91_adc_state {
+ 	u8			num_channels;
+ 	void __iomem		*reg_base;
+ 	struct at91_adc_reg_desc *registers;
+-	u8			startup_time;
++	u32			startup_time;
+ 	u8			sample_hold_time;
+ 	bool			sleep_mode;
+ 	struct iio_trigger	**trig;
+@@ -779,7 +779,7 @@ ret:
+ 	return ret;
+ }
+ 
+-static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
++static u32 calc_startup_ticks_9260(u32 startup_time, u32 adc_clk_khz)
+ {
+ 	/*
+ 	 * Number of ticks needed to cover the startup time of the ADC
+@@ -790,7 +790,7 @@ static u32 calc_startup_ticks_9260(u8 startup_time, u32 adc_clk_khz)
+ 	return round_up((startup_time * adc_clk_khz / 1000) - 1, 8) / 8;
+ }
+ 
+-static u32 calc_startup_ticks_9x5(u8 startup_time, u32 adc_clk_khz)
++static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
+ {
+ 	/*
+ 	 * For sama5d3x and at91sam9x5, the formula changes to:
+diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
+index 8d4e019ea4ca..9c311c1e1ac7 100644
+--- a/drivers/iio/adc/rockchip_saradc.c
++++ b/drivers/iio/adc/rockchip_saradc.c
+@@ -349,3 +349,7 @@ static struct platform_driver rockchip_saradc_driver = {
+ };
+ 
+ module_platform_driver(rockchip_saradc_driver);
++
++MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
++MODULE_DESCRIPTION("Rockchip SARADC driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
+index 94c5f05b4bc1..4caecbea4c97 100644
+--- a/drivers/iio/adc/twl4030-madc.c
++++ b/drivers/iio/adc/twl4030-madc.c
+@@ -835,7 +835,8 @@ static int twl4030_madc_probe(struct platform_device *pdev)
+ 	irq = platform_get_irq(pdev, 0);
+ 	ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ 				   twl4030_madc_threaded_irq_handler,
+-				   IRQF_TRIGGER_RISING, "twl4030_madc", madc);
++				   IRQF_TRIGGER_RISING | IRQF_ONESHOT,
++				   "twl4030_madc", madc);
+ 	if (ret) {
+ 		dev_err(&pdev->dev, "could not request irq\n");
+ 		goto err_i2c;
+diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+index 610fc98f88ef..595511022795 100644
+--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
++++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+@@ -36,6 +36,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ 	s32 poll_value = 0;
+ 
+ 	if (state) {
++		if (!atomic_read(&st->user_requested_state))
++			return 0;
+ 		if (sensor_hub_device_open(st->hsdev))
+ 			return -EIO;
+ 
+@@ -52,8 +54,12 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ 
+ 		poll_value = hid_sensor_read_poll_value(st);
+ 	} else {
+-		if (!atomic_dec_and_test(&st->data_ready))
++		int val;
++
++		val = atomic_dec_if_positive(&st->data_ready);
++		if (val < 0)
+ 			return 0;
++
+ 		sensor_hub_device_close(st->hsdev);
+ 		state_val = hid_sensor_get_usage_index(st->hsdev,
+ 			st->power_state.report_id,
+@@ -92,9 +98,11 @@ EXPORT_SYMBOL(hid_sensor_power_state);
+ 
+ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ {
++
+ #ifdef CONFIG_PM
+ 	int ret;
+ 
++	atomic_set(&st->user_requested_state, state);
+ 	if (state)
+ 		ret = pm_runtime_get_sync(&st->pdev->dev);
+ 	else {
+@@ -109,6 +117,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
+ 
+  	return 0;
+ #else
++	atomic_set(&st->user_requested_state, state);
+ 	return _hid_sensor_power_state(st, state);
+ #endif
+ }
+diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
+index 61bb9d4239ea..e98428df0d44 100644
+--- a/drivers/iio/dac/ad5624r_spi.c
++++ b/drivers/iio/dac/ad5624r_spi.c
+@@ -22,7 +22,7 @@
+ #include "ad5624r.h"
+ 
+ static int ad5624r_spi_write(struct spi_device *spi,
+-			     u8 cmd, u8 addr, u16 val, u8 len)
++			     u8 cmd, u8 addr, u16 val, u8 shift)
+ {
+ 	u32 data;
+ 	u8 msg[3];
+@@ -35,7 +35,7 @@ static int ad5624r_spi_write(struct spi_device *spi,
+ 	 * 14-, 12-bit input code followed by 0, 2, or 4 don't care bits,
+ 	 * for the AD5664R, AD5644R, and AD5624R, respectively.
+ 	 */
+-	data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << (16 - len));
++	data = (0 << 22) | (cmd << 19) | (addr << 16) | (val << shift);
+ 	msg[0] = data >> 16;
+ 	msg[1] = data >> 8;
+ 	msg[2] = data;
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+index 17d4bb15be4d..65ce86837177 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+@@ -431,6 +431,23 @@ static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
+ 	return -EINVAL;
+ }
+ 
++static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
++				 struct iio_chan_spec const *chan, long mask)
++{
++	switch (mask) {
++	case IIO_CHAN_INFO_SCALE:
++		switch (chan->type) {
++		case IIO_ANGL_VEL:
++			return IIO_VAL_INT_PLUS_NANO;
++		default:
++			return IIO_VAL_INT_PLUS_MICRO;
++		}
++	default:
++		return IIO_VAL_INT_PLUS_MICRO;
++	}
++
++	return -EINVAL;
++}
+ static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
+ {
+ 	int result, i;
+@@ -696,6 +713,7 @@ static const struct iio_info mpu_info = {
+ 	.driver_module = THIS_MODULE,
+ 	.read_raw = &inv_mpu6050_read_raw,
+ 	.write_raw = &inv_mpu6050_write_raw,
++	.write_raw_get_fmt = &inv_write_raw_get_fmt,
+ 	.attrs = &inv_attribute_group,
+ 	.validate_trigger = inv_mpu6050_validate_trigger,
+ };
+diff --git a/drivers/iio/light/cm3323.c b/drivers/iio/light/cm3323.c
+index 869033e48a1f..a1d4905cc9d2 100644
+--- a/drivers/iio/light/cm3323.c
++++ b/drivers/iio/light/cm3323.c
+@@ -123,7 +123,7 @@ static int cm3323_set_it_bits(struct cm3323_data *data, int val, int val2)
+ 	for (i = 0; i < ARRAY_SIZE(cm3323_int_time); i++) {
+ 		if (val == cm3323_int_time[i].val &&
+ 		    val2 == cm3323_int_time[i].val2) {
+-			reg_conf = data->reg_conf;
++			reg_conf = data->reg_conf & ~CM3323_CONF_IT_MASK;
+ 			reg_conf |= i << CM3323_CONF_IT_SHIFT;
+ 
+ 			ret = i2c_smbus_write_word_data(data->client,
+diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
+index 71c2bde275aa..f8b1df018abe 100644
+--- a/drivers/iio/light/tcs3414.c
++++ b/drivers/iio/light/tcs3414.c
+@@ -185,7 +185,7 @@ static int tcs3414_write_raw(struct iio_dev *indio_dev,
+ 		if (val != 0)
+ 			return -EINVAL;
+ 		for (i = 0; i < ARRAY_SIZE(tcs3414_times); i++) {
+-			if (val == tcs3414_times[i] * 1000) {
++			if (val2 == tcs3414_times[i] * 1000) {
+ 				data->timing &= ~TCS3414_INTEG_MASK;
+ 				data->timing |= i;
+ 				return i2c_smbus_write_byte_data(
+diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
+index fa40f6d0ca39..bd26a484abcc 100644
+--- a/drivers/iio/proximity/sx9500.c
++++ b/drivers/iio/proximity/sx9500.c
+@@ -206,7 +206,7 @@ static int sx9500_read_proximity(struct sx9500_data *data,
+ 	if (ret < 0)
+ 		return ret;
+ 
+-	*val = 32767 - (s16)be16_to_cpu(regval);
++	*val = be16_to_cpu(regval);
+ 
+ 	return IIO_VAL_INT;
+ }
+diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
+index 84a0789c3d96..7a8050996b4e 100644
+--- a/drivers/iio/temperature/tmp006.c
++++ b/drivers/iio/temperature/tmp006.c
+@@ -132,6 +132,9 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
+ 	struct tmp006_data *data = iio_priv(indio_dev);
+ 	int i;
+ 
++	if (mask != IIO_CHAN_INFO_SAMP_FREQ)
++		return -EINVAL;
++
+ 	for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
+ 		if ((val == tmp006_freqs[i][0]) &&
+ 		    (val2 == tmp006_freqs[i][1])) {
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+index 9dcb66077d6c..219f2122f9b9 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+@@ -679,7 +679,6 @@ err:
+ 		ocrdma_release_ucontext_pd(uctx);
+ 	} else {
+ 		status = _ocrdma_dealloc_pd(dev, pd);
+-		kfree(pd);
+ 	}
+ exit:
+ 	return ERR_PTR(status);
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 135a0907e9de..c90118e90708 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
+ 	bitmap_super_t *sb;
+ 	unsigned long chunksize, daemon_sleep, write_behind;
+ 
+-	bitmap->storage.sb_page = alloc_page(GFP_KERNEL);
++	bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ 	if (bitmap->storage.sb_page == NULL)
+ 		return -ENOMEM;
+ 	bitmap->storage.sb_page->index = 0;
+@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
+ 	sb->state = cpu_to_le32(bitmap->flags);
+ 	bitmap->events_cleared = bitmap->mddev->events;
+ 	sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
++	bitmap->mddev->bitmap_info.nodes = 0;
+ 
+ 	kunmap_atomic(sb);
+ 
+@@ -611,8 +612,16 @@ re_read:
+ 	daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
+ 	write_behind = le32_to_cpu(sb->write_behind);
+ 	sectors_reserved = le32_to_cpu(sb->sectors_reserved);
+-	nodes = le32_to_cpu(sb->nodes);
+-	strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64);
++	/* XXX: This is a hack to ensure that we don't use clustering
++	 *  in case:
++	 *	- dm-raid is in use and
++	 *	- the nodes written in bitmap_sb is erroneous.
++	 */
++	if (!bitmap->mddev->sync_super) {
++		nodes = le32_to_cpu(sb->nodes);
++		strlcpy(bitmap->mddev->bitmap_info.cluster_name,
++				sb->cluster_name, 64);
++	}
+ 
+ 	/* verify that the bitmap-specific fields are valid */
+ 	if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
+diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
+index b04d1f904d07..004e463c9423 100644
+--- a/drivers/md/dm-cache-policy-cleaner.c
++++ b/drivers/md/dm-cache-policy-cleaner.c
+@@ -171,7 +171,8 @@ static void remove_cache_hash_entry(struct wb_cache_entry *e)
+ /* Public interface (see dm-cache-policy.h */
+ static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
+ 		  bool can_block, bool can_migrate, bool discarded_oblock,
+-		  struct bio *bio, struct policy_result *result)
++		  struct bio *bio, struct policy_locker *locker,
++		  struct policy_result *result)
+ {
+ 	struct policy *p = to_policy(pe);
+ 	struct wb_cache_entry *e;
+diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
+index 2256a1f24f73..c198e6defb9c 100644
+--- a/drivers/md/dm-cache-policy-internal.h
++++ b/drivers/md/dm-cache-policy-internal.h
+@@ -16,9 +16,10 @@
+  */
+ static inline int policy_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ 			     bool can_block, bool can_migrate, bool discarded_oblock,
+-			     struct bio *bio, struct policy_result *result)
++			     struct bio *bio, struct policy_locker *locker,
++			     struct policy_result *result)
+ {
+-	return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, result);
++	return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, locker, result);
+ }
+ 
+ static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
+index 3ddd1162334d..515d44bf24d3 100644
+--- a/drivers/md/dm-cache-policy-mq.c
++++ b/drivers/md/dm-cache-policy-mq.c
+@@ -693,9 +693,10 @@ static void requeue(struct mq_policy *mq, struct entry *e)
+  * - set the hit count to a hard coded value other than 1, eg, is it better
+  *   if it goes in at level 2?
+  */
+-static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
++static int demote_cblock(struct mq_policy *mq,
++			 struct policy_locker *locker, dm_oblock_t *oblock)
+ {
+-	struct entry *demoted = pop(mq, &mq->cache_clean);
++	struct entry *demoted = peek(&mq->cache_clean);
+ 
+ 	if (!demoted)
+ 		/*
+@@ -707,6 +708,13 @@ static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+ 		 */
+ 		return -ENOSPC;
+ 
++	if (locker->fn(locker, demoted->oblock))
++		/*
++		 * We couldn't lock the demoted block.
++		 */
++		return -EBUSY;
++
++	del(mq, demoted);
+ 	*oblock = demoted->oblock;
+ 	free_entry(&mq->cache_pool, demoted);
+ 
+@@ -795,6 +803,7 @@ static int cache_entry_found(struct mq_policy *mq,
+  * finding which cache block to use.
+  */
+ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
++			      struct policy_locker *locker,
+ 			      struct policy_result *result)
+ {
+ 	int r;
+@@ -803,11 +812,12 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
+ 	/* Ensure there's a free cblock in the cache */
+ 	if (epool_empty(&mq->cache_pool)) {
+ 		result->op = POLICY_REPLACE;
+-		r = demote_cblock(mq, &result->old_oblock);
++		r = demote_cblock(mq, locker, &result->old_oblock);
+ 		if (r) {
+ 			result->op = POLICY_MISS;
+ 			return 0;
+ 		}
++
+ 	} else
+ 		result->op = POLICY_NEW;
+ 
+@@ -829,7 +839,8 @@ static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
+ 
+ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
+ 				 bool can_migrate, bool discarded_oblock,
+-				 int data_dir, struct policy_result *result)
++				 int data_dir, struct policy_locker *locker,
++				 struct policy_result *result)
+ {
+ 	int r = 0;
+ 
+@@ -842,7 +853,7 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
+ 
+ 	else {
+ 		requeue(mq, e);
+-		r = pre_cache_to_cache(mq, e, result);
++		r = pre_cache_to_cache(mq, e, locker, result);
+ 	}
+ 
+ 	return r;
+@@ -872,6 +883,7 @@ static void insert_in_pre_cache(struct mq_policy *mq,
+ }
+ 
+ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
++			    struct policy_locker *locker,
+ 			    struct policy_result *result)
+ {
+ 	int r;
+@@ -879,7 +891,7 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
+ 
+ 	if (epool_empty(&mq->cache_pool)) {
+ 		result->op = POLICY_REPLACE;
+-		r = demote_cblock(mq, &result->old_oblock);
++		r = demote_cblock(mq, locker, &result->old_oblock);
+ 		if (unlikely(r)) {
+ 			result->op = POLICY_MISS;
+ 			insert_in_pre_cache(mq, oblock);
+@@ -907,11 +919,12 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
+ 
+ static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
+ 			  bool can_migrate, bool discarded_oblock,
+-			  int data_dir, struct policy_result *result)
++			  int data_dir, struct policy_locker *locker,
++			  struct policy_result *result)
+ {
+ 	if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
+ 		if (can_migrate)
+-			insert_in_cache(mq, oblock, result);
++			insert_in_cache(mq, oblock, locker, result);
+ 		else
+ 			return -EWOULDBLOCK;
+ 	} else {
+@@ -928,7 +941,8 @@ static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
+  */
+ static int map(struct mq_policy *mq, dm_oblock_t oblock,
+ 	       bool can_migrate, bool discarded_oblock,
+-	       int data_dir, struct policy_result *result)
++	       int data_dir, struct policy_locker *locker,
++	       struct policy_result *result)
+ {
+ 	int r = 0;
+ 	struct entry *e = hash_lookup(mq, oblock);
+@@ -942,11 +956,11 @@ static int map(struct mq_policy *mq, dm_oblock_t oblock,
+ 
+ 	else if (e)
+ 		r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
+-					  data_dir, result);
++					  data_dir, locker, result);
+ 
+ 	else
+ 		r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
+-				   data_dir, result);
++				   data_dir, locker, result);
+ 
+ 	if (r == -EWOULDBLOCK)
+ 		result->op = POLICY_MISS;
+@@ -1012,7 +1026,8 @@ static void copy_tick(struct mq_policy *mq)
+ 
+ static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ 		  bool can_block, bool can_migrate, bool discarded_oblock,
+-		  struct bio *bio, struct policy_result *result)
++		  struct bio *bio, struct policy_locker *locker,
++		  struct policy_result *result)
+ {
+ 	int r;
+ 	struct mq_policy *mq = to_mq_policy(p);
+@@ -1028,7 +1043,7 @@ static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ 
+ 	iot_examine_bio(&mq->tracker, bio);
+ 	r = map(mq, oblock, can_migrate, discarded_oblock,
+-		bio_data_dir(bio), result);
++		bio_data_dir(bio), locker, result);
+ 
+ 	mutex_unlock(&mq->lock);
+ 
+diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
+index f50fe360c546..5524e21e4836 100644
+--- a/drivers/md/dm-cache-policy.h
++++ b/drivers/md/dm-cache-policy.h
+@@ -70,6 +70,18 @@ enum policy_operation {
+ };
+ 
+ /*
++ * When issuing a POLICY_REPLACE the policy needs to make a callback to
++ * lock the block being demoted.  This doesn't need to occur during a
++ * writeback operation since the block remains in the cache.
++ */
++struct policy_locker;
++typedef int (*policy_lock_fn)(struct policy_locker *l, dm_oblock_t oblock);
++
++struct policy_locker {
++	policy_lock_fn fn;
++};
++
++/*
+  * This is the instruction passed back to the core target.
+  */
+ struct policy_result {
+@@ -122,7 +134,8 @@ struct dm_cache_policy {
+ 	 */
+ 	int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
+ 		   bool can_block, bool can_migrate, bool discarded_oblock,
+-		   struct bio *bio, struct policy_result *result);
++		   struct bio *bio, struct policy_locker *locker,
++		   struct policy_result *result);
+ 
+ 	/*
+ 	 * Sometimes we want to see if a block is in the cache, without
+diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
+index 7755af351867..e049becaaf2d 100644
+--- a/drivers/md/dm-cache-target.c
++++ b/drivers/md/dm-cache-target.c
+@@ -1445,16 +1445,43 @@ static void inc_miss_counter(struct cache *cache, struct bio *bio)
+ 		   &cache->stats.read_miss : &cache->stats.write_miss);
+ }
+ 
++/*----------------------------------------------------------------*/
++
++struct old_oblock_lock {
++	struct policy_locker locker;
++	struct cache *cache;
++	struct prealloc *structs;
++	struct dm_bio_prison_cell *cell;
++};
++
++static int null_locker(struct policy_locker *locker, dm_oblock_t b)
++{
++	/* This should never be called */
++	BUG();
++	return 0;
++}
++
++static int cell_locker(struct policy_locker *locker, dm_oblock_t b)
++{
++	struct old_oblock_lock *l = container_of(locker, struct old_oblock_lock, locker);
++	struct dm_bio_prison_cell *cell_prealloc = prealloc_get_cell(l->structs);
++
++	return bio_detain(l->cache, b, NULL, cell_prealloc,
++			  (cell_free_fn) prealloc_put_cell,
++			  l->structs, &l->cell);
++}
++
+ static void process_bio(struct cache *cache, struct prealloc *structs,
+ 			struct bio *bio)
+ {
+ 	int r;
+ 	bool release_cell = true;
+ 	dm_oblock_t block = get_bio_block(cache, bio);
+-	struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
++	struct dm_bio_prison_cell *cell_prealloc, *new_ocell;
+ 	struct policy_result lookup_result;
+ 	bool passthrough = passthrough_mode(&cache->features);
+ 	bool discarded_block, can_migrate;
++	struct old_oblock_lock ool;
+ 
+ 	/*
+ 	 * Check to see if that block is currently migrating.
+@@ -1469,8 +1496,12 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
+ 	discarded_block = is_discarded_oblock(cache, block);
+ 	can_migrate = !passthrough && (discarded_block || spare_migration_bandwidth(cache));
+ 
++	ool.locker.fn = cell_locker;
++	ool.cache = cache;
++	ool.structs = structs;
++	ool.cell = NULL;
+ 	r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
+-		       bio, &lookup_result);
++		       bio, &ool.locker, &lookup_result);
+ 
+ 	if (r == -EWOULDBLOCK)
+ 		/* migration has been denied */
+@@ -1527,27 +1558,11 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
+ 		break;
+ 
+ 	case POLICY_REPLACE:
+-		cell_prealloc = prealloc_get_cell(structs);
+-		r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
+-			       (cell_free_fn) prealloc_put_cell,
+-			       structs, &old_ocell);
+-		if (r > 0) {
+-			/*
+-			 * We have to be careful to avoid lock inversion of
+-			 * the cells.  So we back off, and wait for the
+-			 * old_ocell to become free.
+-			 */
+-			policy_force_mapping(cache->policy, block,
+-					     lookup_result.old_oblock);
+-			atomic_inc(&cache->stats.cache_cell_clash);
+-			break;
+-		}
+ 		atomic_inc(&cache->stats.demotion);
+ 		atomic_inc(&cache->stats.promotion);
+-
+ 		demote_then_promote(cache, structs, lookup_result.old_oblock,
+ 				    block, lookup_result.cblock,
+-				    old_ocell, new_ocell);
++				    ool.cell, new_ocell);
+ 		release_cell = false;
+ 		break;
+ 
+@@ -2595,6 +2610,9 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
+ 	bool discarded_block;
+ 	struct policy_result lookup_result;
+ 	struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
++	struct old_oblock_lock ool;
++
++	ool.locker.fn = null_locker;
+ 
+ 	if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
+ 		/*
+@@ -2633,7 +2651,7 @@ static int __cache_map(struct cache *cache, struct bio *bio, struct dm_bio_priso
+ 	discarded_block = is_discarded_oblock(cache, block);
+ 
+ 	r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
+-		       bio, &lookup_result);
++		       bio, &ool.locker, &lookup_result);
+ 	if (r == -EWOULDBLOCK) {
+ 		cell_defer(cache, *cell, true);
+ 		return DM_MAPIO_SUBMITTED;
+diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
+index f478a4c96d2f..419bdd4fc8b8 100644
+--- a/drivers/md/dm-stats.c
++++ b/drivers/md/dm-stats.c
+@@ -795,6 +795,8 @@ static int message_stats_create(struct mapped_device *md,
+ 		return -EINVAL;
+ 
+ 	if (sscanf(argv[2], "/%u%c", &divisor, &dummy) == 1) {
++		if (!divisor)
++			return -EINVAL;
+ 		step = end - start;
+ 		if (do_div(step, divisor))
+ 			step++;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 921aafd12aee..e22e6c892b8a 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -18,6 +18,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/vmalloc.h>
+ #include <linux/sort.h>
+ #include <linux/rbtree.h>
+ 
+@@ -260,7 +261,7 @@ struct pool {
+ 	process_mapping_fn process_prepared_mapping;
+ 	process_mapping_fn process_prepared_discard;
+ 
+-	struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
++	struct dm_bio_prison_cell **cell_sort_array;
+ };
+ 
+ static enum pool_mode get_pool_mode(struct pool *pool);
+@@ -2499,6 +2500,7 @@ static void __pool_destroy(struct pool *pool)
+ {
+ 	__pool_table_remove(pool);
+ 
++	vfree(pool->cell_sort_array);
+ 	if (dm_pool_metadata_close(pool->pmd) < 0)
+ 		DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
+ 
+@@ -2611,6 +2613,13 @@ static struct pool *pool_create(struct mapped_device *pool_md,
+ 		goto bad_mapping_pool;
+ 	}
+ 
++	pool->cell_sort_array = vmalloc(sizeof(*pool->cell_sort_array) * CELL_SORT_ARRAY_SIZE);
++	if (!pool->cell_sort_array) {
++		*error = "Error allocating cell sort array";
++		err_p = ERR_PTR(-ENOMEM);
++		goto bad_sort_array;
++	}
++
+ 	pool->ref_count = 1;
+ 	pool->last_commit_jiffies = jiffies;
+ 	pool->pool_md = pool_md;
+@@ -2619,6 +2628,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
+ 
+ 	return pool;
+ 
++bad_sort_array:
++	mempool_destroy(pool->mapping_pool);
+ bad_mapping_pool:
+ 	dm_deferred_set_destroy(pool->all_io_ds);
+ bad_all_io_ds:
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 4dbed4a67aaf..b9200282fd77 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4005,8 +4005,10 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
+ 	else
+ 		rdev = md_import_device(dev, -1, -1);
+ 
+-	if (IS_ERR(rdev))
++	if (IS_ERR(rdev)) {
++		mddev_unlock(mddev);
+ 		return PTR_ERR(rdev);
++	}
+ 	err = bind_rdev_to_array(rdev, mddev);
+  out:
+ 	if (err)
+@@ -5159,6 +5161,7 @@ int md_run(struct mddev *mddev)
+ 		mddev_detach(mddev);
+ 		if (mddev->private)
+ 			pers->free(mddev, mddev->private);
++		mddev->private = NULL;
+ 		module_put(pers->owner);
+ 		bitmap_destroy(mddev);
+ 		return err;
+@@ -5294,6 +5297,7 @@ static void md_clean(struct mddev *mddev)
+ 	mddev->changed = 0;
+ 	mddev->degraded = 0;
+ 	mddev->safemode = 0;
++	mddev->private = NULL;
+ 	mddev->merge_check_needed = 0;
+ 	mddev->bitmap_info.offset = 0;
+ 	mddev->bitmap_info.default_offset = 0;
+@@ -5366,6 +5370,7 @@ static void __md_stop(struct mddev *mddev)
+ 	mddev->pers = NULL;
+ 	spin_unlock(&mddev->lock);
+ 	pers->free(mddev, mddev->private);
++	mddev->private = NULL;
+ 	if (pers->sync_request && mddev->to_remove == NULL)
+ 		mddev->to_remove = &md_redundancy_group;
+ 	module_put(pers->owner);
+@@ -6375,7 +6380,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
+ 	    mddev->ctime         != info->ctime         ||
+ 	    mddev->level         != info->level         ||
+ /*	    mddev->layout        != info->layout        || */
+-	    !mddev->persistent	 != info->not_persistent||
++	    mddev->persistent	 != !info->not_persistent ||
+ 	    mddev->chunk_sectors != info->chunk_size >> 9 ||
+ 	    /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */
+ 	    ((state^info->state) & 0xfffffe00)
+diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
+index b88757cd0d1d..a03178e91a79 100644
+--- a/drivers/md/persistent-data/dm-btree-remove.c
++++ b/drivers/md/persistent-data/dm-btree-remove.c
+@@ -309,8 +309,8 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ 
+ 		if (s < 0 && nr_center < -s) {
+ 			/* not enough in central node */
+-			shift(left, center, nr_center);
+-			s = nr_center - target;
++			shift(left, center, -nr_center);
++			s += nr_center;
+ 			shift(left, right, s);
+ 			nr_right += s;
+ 		} else
+@@ -323,7 +323,7 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ 		if (s > 0 && nr_center < s) {
+ 			/* not enough in central node */
+ 			shift(center, right, nr_center);
+-			s = target - nr_center;
++			s -= nr_center;
+ 			shift(left, right, s);
+ 			nr_left -= s;
+ 		} else
+diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
+index 200ac12a1d40..fdd3793e22f9 100644
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -255,7 +255,7 @@ int dm_btree_del(struct dm_btree_info *info, dm_block_t root)
+ 	int r;
+ 	struct del_stack *s;
+ 
+-	s = kmalloc(sizeof(*s), GFP_KERNEL);
++	s = kmalloc(sizeof(*s), GFP_NOIO);
+ 	if (!s)
+ 		return -ENOMEM;
+ 	s->info = info;
+diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
+index e8a904298887..53091295fce9 100644
+--- a/drivers/md/persistent-data/dm-space-map-metadata.c
++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
+@@ -204,6 +204,27 @@ static void in(struct sm_metadata *smm)
+ 	smm->recursion_count++;
+ }
+ 
++static int apply_bops(struct sm_metadata *smm)
++{
++	int r = 0;
++
++	while (!brb_empty(&smm->uncommitted)) {
++		struct block_op bop;
++
++		r = brb_pop(&smm->uncommitted, &bop);
++		if (r) {
++			DMERR("bug in bop ring buffer");
++			break;
++		}
++
++		r = commit_bop(smm, &bop);
++		if (r)
++			break;
++	}
++
++	return r;
++}
++
+ static int out(struct sm_metadata *smm)
+ {
+ 	int r = 0;
+@@ -216,21 +237,8 @@ static int out(struct sm_metadata *smm)
+ 		return -ENOMEM;
+ 	}
+ 
+-	if (smm->recursion_count == 1) {
+-		while (!brb_empty(&smm->uncommitted)) {
+-			struct block_op bop;
+-
+-			r = brb_pop(&smm->uncommitted, &bop);
+-			if (r) {
+-				DMERR("bug in bop ring buffer");
+-				break;
+-			}
+-
+-			r = commit_bop(smm, &bop);
+-			if (r)
+-				break;
+-		}
+-	}
++	if (smm->recursion_count == 1)
++		apply_bops(smm);
+ 
+ 	smm->recursion_count--;
+ 
+@@ -704,6 +712,12 @@ static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+ 		}
+ 		old_len = smm->begin;
+ 
++		r = apply_bops(smm);
++		if (r) {
++			DMERR("%s: apply_bops failed", __func__);
++			goto out;
++		}
++
+ 		r = sm_ll_commit(&smm->ll);
+ 		if (r)
+ 			goto out;
+@@ -773,6 +787,12 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
+ 	if (r)
+ 		return r;
+ 
++	r = apply_bops(smm);
++	if (r) {
++		DMERR("%s: apply_bops failed", __func__);
++		return r;
++	}
++
+ 	return sm_metadata_commit(sm);
+ }
+ 
+diff --git a/drivers/media/dvb-frontends/af9013.c b/drivers/media/dvb-frontends/af9013.c
+index 8001690d7576..ba6c8f6c42a1 100644
+--- a/drivers/media/dvb-frontends/af9013.c
++++ b/drivers/media/dvb-frontends/af9013.c
+@@ -605,6 +605,10 @@ static int af9013_set_frontend(struct dvb_frontend *fe)
+ 			}
+ 		}
+ 
++		/* Return an error if can't find bandwidth or the right clock */
++		if (i == ARRAY_SIZE(coeff_lut))
++			return -EINVAL;
++
+ 		ret = af9013_wr_regs(state, 0xae00, coeff_lut[i].val,
+ 			sizeof(coeff_lut[i].val));
+ 	}
+diff --git a/drivers/media/dvb-frontends/cx24116.c b/drivers/media/dvb-frontends/cx24116.c
+index 2916d7c74a1d..7bc68b355c0b 100644
+--- a/drivers/media/dvb-frontends/cx24116.c
++++ b/drivers/media/dvb-frontends/cx24116.c
+@@ -963,6 +963,10 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
+ 	struct cx24116_state *state = fe->demodulator_priv;
+ 	int i, ret;
+ 
++	/* Validate length */
++	if (d->msg_len > sizeof(d->msg))
++                return -EINVAL;
++
+ 	/* Dump DiSEqC message */
+ 	if (debug) {
+ 		printk(KERN_INFO "cx24116: %s(", __func__);
+@@ -974,10 +978,6 @@ static int cx24116_send_diseqc_msg(struct dvb_frontend *fe,
+ 		printk(") toneburst=%d\n", toneburst);
+ 	}
+ 
+-	/* Validate length */
+-	if (d->msg_len > (CX24116_ARGLEN - CX24116_DISEQC_MSGOFS))
+-		return -EINVAL;
+-
+ 	/* DiSEqC message */
+ 	for (i = 0; i < d->msg_len; i++)
+ 		state->dsec_cmd.args[CX24116_DISEQC_MSGOFS + i] = d->msg[i];
+diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
+index acb965ce0358..af6363573efd 100644
+--- a/drivers/media/dvb-frontends/cx24117.c
++++ b/drivers/media/dvb-frontends/cx24117.c
+@@ -1043,7 +1043,7 @@ static int cx24117_send_diseqc_msg(struct dvb_frontend *fe,
+ 	dev_dbg(&state->priv->i2c->dev, ")\n");
+ 
+ 	/* Validate length */
+-	if (d->msg_len > 15)
++	if (d->msg_len > sizeof(d->msg))
+ 		return -EINVAL;
+ 
+ 	/* DiSEqC message */
+diff --git a/drivers/media/dvb-frontends/s5h1420.c b/drivers/media/dvb-frontends/s5h1420.c
+index 93eeaf7118fd..0b4f8fe6bf99 100644
+--- a/drivers/media/dvb-frontends/s5h1420.c
++++ b/drivers/media/dvb-frontends/s5h1420.c
+@@ -180,7 +180,7 @@ static int s5h1420_send_master_cmd (struct dvb_frontend* fe,
+ 	int result = 0;
+ 
+ 	dprintk("enter %s\n", __func__);
+-	if (cmd->msg_len > 8)
++	if (cmd->msg_len > sizeof(cmd->msg))
+ 		return -EINVAL;
+ 
+ 	/* setup for DISEQC */
+diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
+index c82d25d53341..c9860845264f 100644
+--- a/drivers/media/pci/cx18/cx18-streams.c
++++ b/drivers/media/pci/cx18/cx18-streams.c
+@@ -90,6 +90,7 @@ static struct {
+ 		"encoder PCM audio",
+ 		VFL_TYPE_GRABBER, CX18_V4L2_ENC_PCM_OFFSET,
+ 		PCI_DMA_FROMDEVICE,
++		V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
+ 	},
+ 	{	/* CX18_ENC_STREAM_TYPE_IDX */
+ 		"encoder IDX",
+diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
+index 9266965412c3..7a0a65146723 100644
+--- a/drivers/media/pci/saa7164/saa7164-encoder.c
++++ b/drivers/media/pci/saa7164/saa7164-encoder.c
+@@ -721,13 +721,14 @@ static int vidioc_querycap(struct file *file, void  *priv,
+ 		sizeof(cap->card));
+ 	sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
+ 
+-	cap->capabilities =
++	cap->device_caps =
+ 		V4L2_CAP_VIDEO_CAPTURE |
+-		V4L2_CAP_READWRITE     |
+-		0;
++		V4L2_CAP_READWRITE |
++		V4L2_CAP_TUNER;
+ 
+-	cap->capabilities |= V4L2_CAP_TUNER;
+-	cap->version = 0;
++	cap->capabilities = cap->device_caps |
++		V4L2_CAP_VBI_CAPTURE |
++		V4L2_CAP_DEVICE_CAPS;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
+index 6e025fea2542..06117e6c0596 100644
+--- a/drivers/media/pci/saa7164/saa7164-vbi.c
++++ b/drivers/media/pci/saa7164/saa7164-vbi.c
+@@ -660,13 +660,14 @@ static int vidioc_querycap(struct file *file, void  *priv,
+ 		sizeof(cap->card));
+ 	sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
+ 
+-	cap->capabilities =
++	cap->device_caps =
+ 		V4L2_CAP_VBI_CAPTURE |
+-		V4L2_CAP_READWRITE     |
+-		0;
++		V4L2_CAP_READWRITE |
++		V4L2_CAP_TUNER;
+ 
+-	cap->capabilities |= V4L2_CAP_TUNER;
+-	cap->version = 0;
++	cap->capabilities = cap->device_caps |
++		V4L2_CAP_VIDEO_CAPTURE |
++		V4L2_CAP_DEVICE_CAPS;
+ 
+ 	return 0;
+ }
+diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
+index 2b40393836ff..0d248ce02a9b 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_core.c
++++ b/drivers/media/usb/dvb-usb/dib0700_core.c
+@@ -655,10 +655,20 @@ out:
+ struct dib0700_rc_response {
+ 	u8 report_id;
+ 	u8 data_state;
+-	u8 system;
+-	u8 not_system;
+-	u8 data;
+-	u8 not_data;
++	union {
++		struct {
++			u8 system;
++			u8 not_system;
++			u8 data;
++			u8 not_data;
++		} nec;
++		struct {
++			u8 not_used;
++			u8 system;
++			u8 data;
++			u8 not_data;
++		} rc5;
++	};
+ };
+ #define RC_MSG_SIZE_V1_20 6
+ 
+@@ -694,8 +704,8 @@ static void dib0700_rc_urb_completion(struct urb *purb)
+ 
+ 	deb_data("IR ID = %02X state = %02X System = %02X %02X Cmd = %02X %02X (len %d)\n",
+ 		 poll_reply->report_id, poll_reply->data_state,
+-		 poll_reply->system, poll_reply->not_system,
+-		 poll_reply->data, poll_reply->not_data,
++		 poll_reply->nec.system, poll_reply->nec.not_system,
++		 poll_reply->nec.data, poll_reply->nec.not_data,
+ 		 purb->actual_length);
+ 
+ 	switch (d->props.rc.core.protocol) {
+@@ -704,30 +714,30 @@ static void dib0700_rc_urb_completion(struct urb *purb)
+ 		toggle = 0;
+ 
+ 		/* NEC protocol sends repeat code as 0 0 0 FF */
+-		if (poll_reply->system     == 0x00 &&
+-		    poll_reply->not_system == 0x00 &&
+-		    poll_reply->data       == 0x00 &&
+-		    poll_reply->not_data   == 0xff) {
++		if (poll_reply->nec.system     == 0x00 &&
++		    poll_reply->nec.not_system == 0x00 &&
++		    poll_reply->nec.data       == 0x00 &&
++		    poll_reply->nec.not_data   == 0xff) {
+ 			poll_reply->data_state = 2;
+ 			break;
+ 		}
+ 
+-		if ((poll_reply->data ^ poll_reply->not_data) != 0xff) {
++		if ((poll_reply->nec.data ^ poll_reply->nec.not_data) != 0xff) {
+ 			deb_data("NEC32 protocol\n");
+-			keycode = RC_SCANCODE_NEC32(poll_reply->system     << 24 |
+-						     poll_reply->not_system << 16 |
+-						     poll_reply->data       << 8  |
+-						     poll_reply->not_data);
+-		} else if ((poll_reply->system ^ poll_reply->not_system) != 0xff) {
++			keycode = RC_SCANCODE_NEC32(poll_reply->nec.system     << 24 |
++						     poll_reply->nec.not_system << 16 |
++						     poll_reply->nec.data       << 8  |
++						     poll_reply->nec.not_data);
++		} else if ((poll_reply->nec.system ^ poll_reply->nec.not_system) != 0xff) {
+ 			deb_data("NEC extended protocol\n");
+-			keycode = RC_SCANCODE_NECX(poll_reply->system << 8 |
+-						    poll_reply->not_system,
+-						    poll_reply->data);
++			keycode = RC_SCANCODE_NECX(poll_reply->nec.system << 8 |
++						    poll_reply->nec.not_system,
++						    poll_reply->nec.data);
+ 
+ 		} else {
+ 			deb_data("NEC normal protocol\n");
+-			keycode = RC_SCANCODE_NEC(poll_reply->system,
+-						   poll_reply->data);
++			keycode = RC_SCANCODE_NEC(poll_reply->nec.system,
++						   poll_reply->nec.data);
+ 		}
+ 
+ 		break;
+@@ -735,19 +745,19 @@ static void dib0700_rc_urb_completion(struct urb *purb)
+ 		deb_data("RC5 protocol\n");
+ 		protocol = RC_TYPE_RC5;
+ 		toggle = poll_reply->report_id;
+-		keycode = RC_SCANCODE_RC5(poll_reply->system, poll_reply->data);
++		keycode = RC_SCANCODE_RC5(poll_reply->rc5.system, poll_reply->rc5.data);
++
++		if ((poll_reply->rc5.data ^ poll_reply->rc5.not_data) != 0xff) {
++			/* Key failed integrity check */
++			err("key failed integrity check: %02x %02x %02x %02x",
++			    poll_reply->rc5.not_used, poll_reply->rc5.system,
++			    poll_reply->rc5.data, poll_reply->rc5.not_data);
++			goto resubmit;
++		}
+ 
+ 		break;
+ 	}
+ 
+-	if ((poll_reply->data + poll_reply->not_data) != 0xff) {
+-		/* Key failed integrity check */
+-		err("key failed integrity check: %02x %02x %02x %02x",
+-		    poll_reply->system,  poll_reply->not_system,
+-		    poll_reply->data, poll_reply->not_data);
+-		goto resubmit;
+-	}
+-
+ 	rc_keydown(d->rc_dev, protocol, keycode, toggle);
+ 
+ resubmit:
+diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
+index d7d55a20e959..c170523226aa 100644
+--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
++++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
+@@ -3944,6 +3944,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
+ 
+ 				DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ 			}},
++				.size_of_priv = sizeof(struct
++						dib0700_adapter_state),
+ 			}, {
+ 			.num_frontends = 1,
+ 			.fe = {{
+@@ -3956,6 +3958,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
+ 
+ 				DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+ 			}},
++				.size_of_priv = sizeof(struct
++						dib0700_adapter_state),
+ 			}
+ 		},
+ 
+@@ -4009,6 +4013,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
+ 
+ 				DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+ 			}},
++				.size_of_priv = sizeof(struct
++						dib0700_adapter_state),
+ 			},
+ 		},
+ 
+diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
+index 66ada01c796c..cf9d644a8aff 100644
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -1237,6 +1237,23 @@ void vb2_discard_done(struct vb2_queue *q)
+ }
+ EXPORT_SYMBOL_GPL(vb2_discard_done);
+ 
++static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
++{
++	static bool __check_once __read_mostly;
++
++	if (__check_once)
++		return;
++
++	__check_once = true;
++	__WARN();
++
++	pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
++	if (vb->vb2_queue->allow_zero_bytesused)
++		pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
++	else
++		pr_warn_once("use the actual size instead.\n");
++}
++
+ /**
+  * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
+  * v4l2_buffer by the userspace. The caller has already verified that struct
+@@ -1247,16 +1264,6 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
+ {
+ 	unsigned int plane;
+ 
+-	if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+-		if (WARN_ON_ONCE(b->bytesused == 0)) {
+-			pr_warn_once("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+-			if (vb->vb2_queue->allow_zero_bytesused)
+-				pr_warn_once("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+-			else
+-				pr_warn_once("use the actual size instead.\n");
+-		}
+-	}
+-
+ 	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ 		if (b->memory == V4L2_MEMORY_USERPTR) {
+ 			for (plane = 0; plane < vb->num_planes; ++plane) {
+@@ -1297,6 +1304,9 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
+ 				struct v4l2_plane *pdst = &v4l2_planes[plane];
+ 				struct v4l2_plane *psrc = &b->m.planes[plane];
+ 
++				if (psrc->bytesused == 0)
++					vb2_warn_zero_bytesused(vb);
++
+ 				if (vb->vb2_queue->allow_zero_bytesused)
+ 					pdst->bytesused = psrc->bytesused;
+ 				else
+@@ -1331,6 +1341,9 @@ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b
+ 		}
+ 
+ 		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
++			if (b->bytesused == 0)
++				vb2_warn_zero_bytesused(vb);
++
+ 			if (vb->vb2_queue->allow_zero_bytesused)
+ 				v4l2_planes[0].bytesused = b->bytesused;
+ 			else
+diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
+index 60f7141a6b02..31d2627d9d4d 100644
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
+ 
+ 	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+ 
++	mmc_blk_put(md);
++
+ 	return ret;
+ }
+ 
+@@ -1910,9 +1912,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+ 			break;
+ 		case MMC_BLK_CMD_ERR:
+ 			ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+-			if (!mmc_blk_reset(md, card->host, type))
+-				break;
+-			goto cmd_abort;
++			if (mmc_blk_reset(md, card->host, type))
++				goto cmd_abort;
++			if (!ret)
++				goto start_new_req;
++			break;
+ 		case MMC_BLK_RETRY:
+ 			if (retry++ < 5)
+ 				break;
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 9231cdfe2757..d3dbb28057e9 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3315,13 +3315,14 @@ int sdhci_add_host(struct sdhci_host *host)
+ 				   SDHCI_MAX_CURRENT_MULTIPLIER;
+ 	}
+ 
+-	/* If OCR set by external regulators, use it instead */
++	/* If OCR set by host, use it instead. */
++	if (host->ocr_mask)
++		ocr_avail = host->ocr_mask;
++
++	/* If OCR set by external regulators, give it highest prio. */
+ 	if (mmc->ocr_avail)
+ 		ocr_avail = mmc->ocr_avail;
+ 
+-	if (host->ocr_mask)
+-		ocr_avail &= host->ocr_mask;
+-
+ 	mmc->ocr_avail = ocr_avail;
+ 	mmc->ocr_avail_sdio = ocr_avail;
+ 	if (host->ocr_avail_sdio)
+diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
+index dc79ed85030b..32e77755a9c6 100644
+--- a/drivers/net/ethernet/intel/e1000e/82571.c
++++ b/drivers/net/ethernet/intel/e1000e/82571.c
+@@ -2010,7 +2010,7 @@ const struct e1000_info e1000_82573_info = {
+ 	.flags2			= FLAG2_DISABLE_ASPM_L1
+ 				  | FLAG2_DISABLE_ASPM_L0S,
+ 	.pba			= 20,
+-	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
++	.max_hw_frame_size	= VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
+ 	.get_variants		= e1000_get_variants_82571,
+ 	.mac_ops		= &e82571_mac_ops,
+ 	.phy_ops		= &e82_phy_ops_m88,
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index 9d81c0317433..e2498dbf3c3b 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -1563,7 +1563,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
+ 	    ((adapter->hw.mac.type >= e1000_pch2lan) &&
+ 	     (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
+ 		adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
+-		adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
++		adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+ 
+ 		hw->mac.ops.blink_led = NULL;
+ 	}
+@@ -5681,7 +5681,7 @@ const struct e1000_info e1000_ich8_info = {
+ 				  | FLAG_HAS_FLASH
+ 				  | FLAG_APME_IN_WUC,
+ 	.pba			= 8,
+-	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
++	.max_hw_frame_size	= VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
+ 	.get_variants		= e1000_get_variants_ich8lan,
+ 	.mac_ops		= &ich8_mac_ops,
+ 	.phy_ops		= &ich8_phy_ops,
+@@ -5754,7 +5754,7 @@ const struct e1000_info e1000_pch2_info = {
+ 	.flags2			= FLAG2_HAS_PHY_STATS
+ 				  | FLAG2_HAS_EEE,
+ 	.pba			= 26,
+-	.max_hw_frame_size	= 9018,
++	.max_hw_frame_size	= 9022,
+ 	.get_variants		= e1000_get_variants_ich8lan,
+ 	.mac_ops		= &ich8_mac_ops,
+ 	.phy_ops		= &ich8_phy_ops,
+@@ -5774,7 +5774,7 @@ const struct e1000_info e1000_pch_lpt_info = {
+ 	.flags2			= FLAG2_HAS_PHY_STATS
+ 				  | FLAG2_HAS_EEE,
+ 	.pba			= 26,
+-	.max_hw_frame_size	= 9018,
++	.max_hw_frame_size	= 9022,
+ 	.get_variants		= e1000_get_variants_ich8lan,
+ 	.mac_ops		= &ich8_mac_ops,
+ 	.phy_ops		= &ich8_phy_ops,
+@@ -5794,7 +5794,7 @@ const struct e1000_info e1000_pch_spt_info = {
+ 	.flags2			= FLAG2_HAS_PHY_STATS
+ 				  | FLAG2_HAS_EEE,
+ 	.pba			= 26,
+-	.max_hw_frame_size	= 9018,
++	.max_hw_frame_size	= 9022,
+ 	.get_variants		= e1000_get_variants_ich8lan,
+ 	.mac_ops		= &ich8_mac_ops,
+ 	.phy_ops		= &ich8_phy_ops,
+diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
+index c509a5c900f5..68913d103542 100644
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -3807,7 +3807,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
+ 	/* reset Packet Buffer Allocation to default */
+ 	ew32(PBA, pba);
+ 
+-	if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
++	if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ 		/* To maintain wire speed transmits, the Tx FIFO should be
+ 		 * large enough to accommodate two full transmit packets,
+ 		 * rounded up to the next 1KB and expressed in KB.  Likewise,
+@@ -4196,9 +4196,9 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
+ {
+ 	struct net_device *netdev = adapter->netdev;
+ 
+-	adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
++	adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+ 	adapter->rx_ps_bsize0 = 128;
+-	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
++	adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+ 	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+ 	adapter->tx_ring_count = E1000_DEFAULT_TXD;
+ 	adapter->rx_ring_count = E1000_DEFAULT_RXD;
+@@ -5781,17 +5781,17 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ 	struct e1000_adapter *adapter = netdev_priv(netdev);
+-	int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
++	int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+ 
+ 	/* Jumbo frame support */
+-	if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
++	if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) &&
+ 	    !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ 		e_err("Jumbo Frames not supported.\n");
+ 		return -EINVAL;
+ 	}
+ 
+ 	/* Supported frame sizes */
+-	if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
++	if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) ||
+ 	    (max_frame > adapter->max_hw_frame_size)) {
+ 		e_err("Unsupported MTU setting\n");
+ 		return -EINVAL;
+@@ -5831,10 +5831,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+ 		adapter->rx_buffer_len = 4096;
+ 
+ 	/* adjust allocation if LPE protects us, and we aren't using SBP */
+-	if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
+-	    (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
+-		adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
+-		    + ETH_FCS_LEN;
++	if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN))
++		adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
+ 
+ 	if (netif_running(netdev))
+ 		e1000e_up(adapter);
+diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
+index e82a0d4ce23f..5dbc617ecf8a 100644
+--- a/drivers/net/wireless/ath/ath9k/htc.h
++++ b/drivers/net/wireless/ath/ath9k/htc.h
+@@ -440,9 +440,9 @@ static inline void ath9k_htc_stop_btcoex(struct ath9k_htc_priv *priv)
+ }
+ #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+ 
+-#define OP_BT_PRIORITY_DETECTED    BIT(3)
+-#define OP_BT_SCAN                 BIT(4)
+-#define OP_TSF_RESET               BIT(6)
++#define OP_BT_PRIORITY_DETECTED    3
++#define OP_BT_SCAN                 4
++#define OP_TSF_RESET               6
+ 
+ enum htc_op_flags {
+ 	HTC_FWFLAG_NO_RMW,
+diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
+index b0badef71ce7..d5f2fbf62d72 100644
+--- a/drivers/net/wireless/ath/ath9k/main.c
++++ b/drivers/net/wireless/ath/ath9k/main.c
+@@ -216,11 +216,13 @@ static bool ath_prepare_reset(struct ath_softc *sc)
+ 	ath_stop_ani(sc);
+ 	ath9k_hw_disable_interrupts(ah);
+ 
+-	if (!ath_drain_all_txq(sc))
+-		ret = false;
+-
+-	if (!ath_stoprecv(sc))
+-		ret = false;
++	if (AR_SREV_9300_20_OR_LATER(ah)) {
++		ret &= ath_stoprecv(sc);
++		ret &= ath_drain_all_txq(sc);
++	} else {
++		ret &= ath_drain_all_txq(sc);
++		ret &= ath_stoprecv(sc);
++	}
+ 
+ 	return ret;
+ }
+diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+index 9ac04c1ea706..8c17b943cc6f 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
++++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+@@ -6,7 +6,7 @@
+  * GPL LICENSE SUMMARY
+  *
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
++ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -32,7 +32,7 @@
+  * BSD LICENSE
+  *
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
++ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -1356,6 +1356,7 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
+ 	PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
+ 	PRINT_MVM_REF(IWL_MVM_REF_SCAN);
+ 	PRINT_MVM_REF(IWL_MVM_REF_ROC);
++	PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
+ 	PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
+ 	PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
+ 	PRINT_MVM_REF(IWL_MVM_REF_USER);
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+index dda9f7b5f342..60c138a9bf4f 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+@@ -1404,7 +1404,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
+ 	 * The work item could be running or queued if the
+ 	 * ROC time event stops just as we get here.
+ 	 */
+-	cancel_work_sync(&mvm->roc_done_wk);
++	flush_work(&mvm->roc_done_wk);
+ 
+ 	iwl_trans_stop_device(mvm->trans);
+ 
+diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
+index cf70f681d1ac..6af21daaaaef 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
++++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
+@@ -275,6 +275,7 @@ enum iwl_mvm_ref_type {
+ 	IWL_MVM_REF_UCODE_DOWN,
+ 	IWL_MVM_REF_SCAN,
+ 	IWL_MVM_REF_ROC,
++	IWL_MVM_REF_ROC_AUX,
+ 	IWL_MVM_REF_P2P_CLIENT,
+ 	IWL_MVM_REF_AP_IBSS,
+ 	IWL_MVM_REF_USER,
+diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
+index fd7b0d36f9a6..a7448cf01688 100644
+--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
++++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
+@@ -6,7 +6,7 @@
+  * GPL LICENSE SUMMARY
+  *
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
++ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  *
+  * This program is free software; you can redistribute it and/or modify
+  * it under the terms of version 2 of the GNU General Public License as
+@@ -32,7 +32,7 @@
+  * BSD LICENSE
+  *
+  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
++ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+  * All rights reserved.
+  *
+  * Redistribution and use in source and binary forms, with or without
+@@ -108,12 +108,14 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
+ 	 * in the case that the time event actually completed in the firmware
+ 	 * (which is handled in iwl_mvm_te_handle_notif).
+ 	 */
+-	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
++	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
+ 		queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE);
+-	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
++		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
++	}
++	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
+ 		queues |= BIT(mvm->aux_queue);
+-
+-	iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
++		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
++	}
+ 
+ 	synchronize_net();
+ 
+@@ -393,6 +395,7 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
+ 	} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
+ 		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+ 		te_data->running = true;
++		iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
+ 		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+ 	} else {
+ 		IWL_DEBUG_TE(mvm,
+diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+index 86ce5b1930e6..e5d8108f1987 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
+@@ -1354,27 +1354,11 @@ void rtl88ee_set_qos(struct ieee80211_hw *hw, int aci)
+ 	}
+ }
+ 
+-static void rtl88ee_clear_interrupt(struct ieee80211_hw *hw)
+-{
+-	struct rtl_priv *rtlpriv = rtl_priv(hw);
+-	u32 tmp;
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HISR);
+-	rtl_write_dword(rtlpriv, REG_HISR, tmp);
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
+-	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HSISR);
+-	rtl_write_dword(rtlpriv, REG_HSISR, tmp);
+-}
+-
+ void rtl88ee_enable_interrupt(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 
+-	rtl88ee_clear_interrupt(hw);/*clear it here first*/
+ 	rtl_write_dword(rtlpriv, REG_HIMR,
+ 			rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ 	rtl_write_dword(rtlpriv, REG_HIMRE,
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+index da0a6125f314..cbf2ca7c7c6d 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+@@ -1584,28 +1584,11 @@ void rtl92ee_set_qos(struct ieee80211_hw *hw, int aci)
+ 	}
+ }
+ 
+-static void rtl92ee_clear_interrupt(struct ieee80211_hw *hw)
+-{
+-	struct rtl_priv *rtlpriv = rtl_priv(hw);
+-	u32 tmp;
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HISR);
+-	rtl_write_dword(rtlpriv, REG_HISR, tmp);
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
+-	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HSISR);
+-	rtl_write_dword(rtlpriv, REG_HSISR, tmp);
+-}
+-
+ void rtl92ee_enable_interrupt(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 
+-	rtl92ee_clear_interrupt(hw);/*clear it here first*/
+-
+ 	rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ 	rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ 	rtlpci->irq_enabled = true;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+index 67bb47d77b68..a4b7eac6856f 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
+@@ -1258,18 +1258,6 @@ void rtl8723e_set_qos(struct ieee80211_hw *hw, int aci)
+ 	}
+ }
+ 
+-static void rtl8723e_clear_interrupt(struct ieee80211_hw *hw)
+-{
+-	struct rtl_priv *rtlpriv = rtl_priv(hw);
+-	u32 tmp;
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HISR);
+-	rtl_write_dword(rtlpriv, REG_HISR, tmp);
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
+-	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
+-}
+-
+ void rtl8723e_enable_interrupt(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+@@ -1284,7 +1272,6 @@ void rtl8723e_disable_interrupt(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+-	rtl8723e_clear_interrupt(hw);/*clear it here first*/
+ 	rtl_write_dword(rtlpriv, 0x3a8, IMR8190_DISABLED);
+ 	rtl_write_dword(rtlpriv, 0x3ac, IMR8190_DISABLED);
+ 	rtlpci->irq_enabled = false;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+index b681af3c7a35..b9417268427e 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8723be/hw.c
+@@ -1634,28 +1634,11 @@ void rtl8723be_set_qos(struct ieee80211_hw *hw, int aci)
+ 	}
+ }
+ 
+-static void rtl8723be_clear_interrupt(struct ieee80211_hw *hw)
+-{
+-	struct rtl_priv *rtlpriv = rtl_priv(hw);
+-	u32 tmp;
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HISR);
+-	rtl_write_dword(rtlpriv, REG_HISR, tmp);
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
+-	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HSISR);
+-	rtl_write_dword(rtlpriv, REG_HSISR, tmp);
+-}
+-
+ void rtl8723be_enable_interrupt(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 
+-	rtl8723be_clear_interrupt(hw);/*clear it here first*/
+-
+ 	rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ 	rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ 	rtlpci->irq_enabled = true;
+diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+index 8704eee9f3a4..57966e3c8e8d 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+@@ -2253,31 +2253,11 @@ void rtl8821ae_set_qos(struct ieee80211_hw *hw, int aci)
+ 	}
+ }
+ 
+-static void rtl8821ae_clear_interrupt(struct ieee80211_hw *hw)
+-{
+-	struct rtl_priv *rtlpriv = rtl_priv(hw);
+-	u32 tmp;
+-	tmp = rtl_read_dword(rtlpriv, REG_HISR);
+-	/*printk("clear interrupt first:\n");
+-	printk("0x%x = 0x%08x\n",REG_HISR, tmp);*/
+-	rtl_write_dword(rtlpriv, REG_HISR, tmp);
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HISRE);
+-	/*printk("0x%x = 0x%08x\n",REG_HISRE, tmp);*/
+-	rtl_write_dword(rtlpriv, REG_HISRE, tmp);
+-
+-	tmp = rtl_read_dword(rtlpriv, REG_HSISR);
+-	/*printk("0x%x = 0x%08x\n",REG_HSISR, tmp);*/
+-	rtl_write_dword(rtlpriv, REG_HSISR, tmp);
+-}
+-
+ void rtl8821ae_enable_interrupt(struct ieee80211_hw *hw)
+ {
+ 	struct rtl_priv *rtlpriv = rtl_priv(hw);
+ 	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ 
+-	rtl8821ae_clear_interrupt(hw);/*clear it here first*/
+-
+ 	rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
+ 	rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
+ 	rtlpci->irq_enabled = true;
+diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
+index 76a4cad41cec..c44f8cf5391a 100644
+--- a/drivers/nfc/st21nfcb/i2c.c
++++ b/drivers/nfc/st21nfcb/i2c.c
+@@ -87,11 +87,6 @@ static void st21nfcb_nci_i2c_disable(void *phy_id)
+ 	gpio_set_value(phy->gpio_reset, 1);
+ }
+ 
+-static void st21nfcb_nci_remove_header(struct sk_buff *skb)
+-{
+-	skb_pull(skb, ST21NFCB_FRAME_HEADROOM);
+-}
+-
+ /*
+  * Writing a frame must not return the number of written bytes.
+  * It must return either zero for success, or <0 for error.
+@@ -121,8 +116,6 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
+ 			r = 0;
+ 	}
+ 
+-	st21nfcb_nci_remove_header(skb);
+-
+ 	return r;
+ }
+ 
+@@ -366,9 +359,6 @@ static int st21nfcb_nci_i2c_remove(struct i2c_client *client)
+ 
+ 	ndlc_remove(phy->ndlc);
+ 
+-	if (phy->powered)
+-		st21nfcb_nci_i2c_disable(phy);
+-
+ 	return 0;
+ }
+ 
+diff --git a/drivers/nfc/st21nfcb/st21nfcb.c b/drivers/nfc/st21nfcb/st21nfcb.c
+index ca9871ab3fb3..c7dc282d5c3b 100644
+--- a/drivers/nfc/st21nfcb/st21nfcb.c
++++ b/drivers/nfc/st21nfcb/st21nfcb.c
+@@ -131,11 +131,8 @@ EXPORT_SYMBOL_GPL(st21nfcb_nci_probe);
+ 
+ void st21nfcb_nci_remove(struct nci_dev *ndev)
+ {
+-	struct st21nfcb_nci_info *info = nci_get_drvdata(ndev);
+-
+ 	nci_unregister_device(ndev);
+ 	nci_free_device(ndev);
+-	kfree(info);
+ }
+ EXPORT_SYMBOL_GPL(st21nfcb_nci_remove);
+ 
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index 6906a3f61bd8..8bfda6ade2c0 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -712,7 +712,7 @@ int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size)
+ 	}
+ 
+ 	/* add the range to the list */
+-	range = kzalloc(sizeof(*range), GFP_KERNEL);
++	range = kzalloc(sizeof(*range), GFP_ATOMIC);
+ 	if (!range) {
+ 		err = -ENOMEM;
+ 		goto end_register;
+diff --git a/drivers/of/base.c b/drivers/of/base.c
+index f0650265febf..5ed97246c2e7 100644
+--- a/drivers/of/base.c
++++ b/drivers/of/base.c
+@@ -89,7 +89,7 @@ EXPORT_SYMBOL(of_n_size_cells);
+ #ifdef CONFIG_NUMA
+ int __weak of_node_to_nid(struct device_node *np)
+ {
+-	return numa_node_id();
++	return NUMA_NO_NODE;
+ }
+ #endif
+ 
+diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
+index c6fc95b53083..ab54f2864451 100644
+--- a/drivers/phy/phy-berlin-usb.c
++++ b/drivers/phy/phy-berlin-usb.c
+@@ -106,8 +106,8 @@
+ static const u32 phy_berlin_pll_dividers[] = {
+ 	/* Berlin 2 */
+ 	CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
+-	/* Berlin 2CD */
+-	CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
++	/* Berlin 2CD/Q */
++	CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
+ };
+ 
+ struct phy_berlin_usb_priv {
+diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
+index bc42d6a8939f..8882afbef688 100644
+--- a/drivers/phy/phy-twl4030-usb.c
++++ b/drivers/phy/phy-twl4030-usb.c
+@@ -711,7 +711,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
+ 	pm_runtime_use_autosuspend(&pdev->dev);
+ 	pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+ 	pm_runtime_enable(&pdev->dev);
+-	pm_runtime_get_sync(&pdev->dev);
+ 
+ 	/* Our job is to use irqs and status from the power module
+ 	 * to keep the transceiver disabled when nothing's connected.
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+index 03aa58c4cb85..1eb084c3b0c9 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+@@ -370,11 +370,11 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
+ 	MPP_MODE(64,
+ 	   MPP_FUNCTION(0x0, "gpio", NULL),
+ 	   MPP_FUNCTION(0x1, "spi0", "miso"),
+-	   MPP_FUNCTION(0x2, "spi0-1", "cs1")),
++	   MPP_FUNCTION(0x2, "spi0", "cs1")),
+ 	MPP_MODE(65,
+ 	   MPP_FUNCTION(0x0, "gpio", NULL),
+ 	   MPP_FUNCTION(0x1, "spi0", "mosi"),
+-	   MPP_FUNCTION(0x2, "spi0-1", "cs2")),
++	   MPP_FUNCTION(0x2, "spi0", "cs2")),
+ };
+ 
+ static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info;
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
+index ca1e7571fedb..203291bde608 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-375.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
+@@ -92,19 +92,17 @@ static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
+ 		 MPP_FUNCTION(0x5, "nand", "io1")),
+ 	MPP_MODE(8,
+ 		 MPP_FUNCTION(0x0, "gpio", NULL),
+-		 MPP_FUNCTION(0x1, "dev ", "bootcs"),
++		 MPP_FUNCTION(0x1, "dev", "bootcs"),
+ 		 MPP_FUNCTION(0x2, "spi0", "cs0"),
+ 		 MPP_FUNCTION(0x3, "spi1", "cs0"),
+ 		 MPP_FUNCTION(0x5, "nand", "ce")),
+ 	MPP_MODE(9,
+ 		 MPP_FUNCTION(0x0, "gpio", NULL),
+-		 MPP_FUNCTION(0x1, "nf", "wen"),
+ 		 MPP_FUNCTION(0x2, "spi0", "sck"),
+ 		 MPP_FUNCTION(0x3, "spi1", "sck"),
+ 		 MPP_FUNCTION(0x5, "nand", "we")),
+ 	MPP_MODE(10,
+ 		 MPP_FUNCTION(0x0, "gpio", NULL),
+-		 MPP_FUNCTION(0x1, "nf", "ren"),
+ 		 MPP_FUNCTION(0x2, "dram", "vttctrl"),
+ 		 MPP_FUNCTION(0x3, "led", "c1"),
+ 		 MPP_FUNCTION(0x5, "nand", "re"),
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+index 83bbcc72be1f..ff411a53b5a4 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
+@@ -94,37 +94,39 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "ge0",   "rxd0",       V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "spi0",  "cs1",        V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(5, "dev",   "ad14",       V_88F6810_PLUS)),
++		 MPP_VAR_FUNCTION(5, "dev",   "ad14",       V_88F6810_PLUS),
++		 MPP_VAR_FUNCTION(6, "pcie3", "clkreq",     V_88F6810_PLUS)),
+ 	MPP_MODE(13,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "ge0",   "rxd1",       V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "pcie0", "clkreq",     V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "pcie1", "clkreq",     V_88F6820_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "spi0",  "cs2",        V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(5, "dev",   "ad15",       V_88F6810_PLUS)),
++		 MPP_VAR_FUNCTION(5, "dev",   "ad15",       V_88F6810_PLUS),
++		 MPP_VAR_FUNCTION(6, "pcie2", "clkreq",     V_88F6810_PLUS)),
+ 	MPP_MODE(14,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "ge0",   "rxd2",       V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "ptp",   "clk",        V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "m",     "vtt_ctrl",   V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "spi0",  "cs3",        V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(5, "dev",   "wen1",       V_88F6810_PLUS)),
++		 MPP_VAR_FUNCTION(5, "dev",   "wen1",       V_88F6810_PLUS),
++		 MPP_VAR_FUNCTION(6, "pcie3", "clkreq",     V_88F6810_PLUS)),
+ 	MPP_MODE(15,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "ge0",   "rxd3",       V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "ge",    "mdc slave",  V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "pcie0", "rstout",     V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(4, "spi0",  "mosi",       V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(5, "pcie1", "rstout",     V_88F6820_PLUS)),
++		 MPP_VAR_FUNCTION(4, "spi0",  "mosi",       V_88F6810_PLUS)),
+ 	MPP_MODE(16,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "ge0",   "rxctl",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "ge",    "mdio slave", V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "m",     "decc_err",   V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "spi0",  "miso",       V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(5, "pcie0", "clkreq",     V_88F6810_PLUS)),
++		 MPP_VAR_FUNCTION(5, "pcie0", "clkreq",     V_88F6810_PLUS),
++		 MPP_VAR_FUNCTION(6, "pcie1", "clkreq",     V_88F6820_PLUS)),
+ 	MPP_MODE(17,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "ge0",   "rxclk",      V_88F6810_PLUS),
+@@ -137,13 +139,12 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(1, "ge0",   "rxerr",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "ptp",   "trig_gen",   V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "ua1",   "txd",        V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(4, "spi0",  "cs0",        V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(5, "pcie1", "rstout",     V_88F6820_PLUS)),
++		 MPP_VAR_FUNCTION(4, "spi0",  "cs0",        V_88F6810_PLUS)),
+ 	MPP_MODE(19,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "ge0",   "col",        V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "ptp",   "event_req",  V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(3, "pcie0", "clkreq",     V_88F6810_PLUS),
++		 MPP_VAR_FUNCTION(3, "ge0",   "txerr",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "sata1", "prsnt",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(5, "ua0",   "cts",        V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(6, "ua1",   "rxd",        V_88F6810_PLUS)),
+@@ -151,7 +152,6 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "ge0",   "txclk",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "ptp",   "clk",        V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "sata0", "prsnt",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(5, "ua0",   "rts",        V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(6, "ua1",   "txd",        V_88F6810_PLUS)),
+@@ -277,35 +277,27 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(1, "pcie0", "clkreq",     V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "m",     "vtt_ctrl",   V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "m",     "decc_err",   V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(4, "pcie0", "rstout",     V_88F6810_PLUS),
++		 MPP_VAR_FUNCTION(4, "spi1",  "cs2",        V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(5, "dev",   "clkout",     V_88F6810_PLUS)),
+ 	MPP_MODE(44,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "sata2", "prsnt",      V_88F6828),
+-		 MPP_VAR_FUNCTION(4, "sata3", "prsnt",      V_88F6828),
+-		 MPP_VAR_FUNCTION(5, "pcie0", "rstout",     V_88F6810_PLUS)),
++		 MPP_VAR_FUNCTION(4, "sata3", "prsnt",      V_88F6828)),
+ 	MPP_MODE(45,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "ref",   "clk_out0",   V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+-		 MPP_VAR_FUNCTION(4, "pcie2", "rstout",     V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(5, "pcie3", "rstout",     V_88F6810_PLUS)),
++		 MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS)),
+ 	MPP_MODE(46,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "ref",   "clk_out1",   V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+-		 MPP_VAR_FUNCTION(4, "pcie2", "rstout",     V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(5, "pcie3", "rstout",     V_88F6810_PLUS)),
++		 MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS)),
+ 	MPP_MODE(47,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "sata2", "prsnt",      V_88F6828),
+-		 MPP_VAR_FUNCTION(4, "spi1",  "cs2",        V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(5, "sata3", "prsnt",      V_88F6828)),
+ 	MPP_MODE(48,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+@@ -313,18 +305,19 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(2, "m",     "vtt_ctrl",   V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "tdm2c", "pclk",       V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "audio", "mclk",       V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(5, "sd0",   "d4",         V_88F6810_PLUS)),
++		 MPP_VAR_FUNCTION(5, "sd0",   "d4",         V_88F6810_PLUS),
++		 MPP_VAR_FUNCTION(6, "pcie0", "clkreq",     V_88F6810_PLUS)),
+ 	MPP_MODE(49,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "sata2", "prsnt",      V_88F6828),
+ 		 MPP_VAR_FUNCTION(2, "sata3", "prsnt",      V_88F6828),
+ 		 MPP_VAR_FUNCTION(3, "tdm2c", "fsync",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "audio", "lrclk",      V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(5, "sd0",   "d5",         V_88F6810_PLUS)),
++		 MPP_VAR_FUNCTION(5, "sd0",   "d5",         V_88F6810_PLUS),
++		 MPP_VAR_FUNCTION(6, "pcie1", "clkreq",     V_88F6820_PLUS)),
+ 	MPP_MODE(50,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(2, "pcie1", "rstout",     V_88F6820_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "tdm2c", "drx",        V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "audio", "extclk",     V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(5, "sd0",   "cmd",        V_88F6810_PLUS)),
+@@ -336,7 +329,6 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+ 	MPP_MODE(52,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(2, "pcie1", "rstout",     V_88F6820_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "tdm2c", "intn",       V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "audio", "sdi",        V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(5, "sd0",   "d6",         V_88F6810_PLUS)),
+@@ -352,7 +344,7 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(3, "pcie0", "rstout",     V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(4, "pcie1", "rstout",     V_88F6820_PLUS),
++		 MPP_VAR_FUNCTION(4, "ge0",   "txerr",      V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(5, "sd0",   "d3",         V_88F6810_PLUS)),
+ 	MPP_MODE(55,
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+@@ -382,7 +374,6 @@ static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(2, "i2c1",  "sda",        V_88F6810_PLUS),
+-		 MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+ 		 MPP_VAR_FUNCTION(4, "spi1",  "cs0",        V_88F6810_PLUS),
+ 		 MPP_VAR_FUNCTION(5, "sd0",   "d2",         V_88F6810_PLUS)),
+ };
+@@ -411,7 +402,7 @@ static struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
+ 
+ static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
+ 	MPP_GPIO_RANGE(0,   0,  0, 32),
+-	MPP_GPIO_RANGE(1,  32, 32, 27),
++	MPP_GPIO_RANGE(1,  32, 32, 28),
+ };
+ 
+ static int armada_38x_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
+index 42491624d660..2dcf9b41e01e 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-39x.c
+@@ -380,7 +380,7 @@ static struct mvebu_mpp_ctrl armada_39x_mpp_controls[] = {
+ 
+ static struct pinctrl_gpio_range armada_39x_mpp_gpio_ranges[] = {
+ 	MPP_GPIO_RANGE(0,   0,  0, 32),
+-	MPP_GPIO_RANGE(1,  32, 32, 27),
++	MPP_GPIO_RANGE(1,  32, 32, 28),
+ };
+ 
+ static int armada_39x_pinctrl_probe(struct platform_device *pdev)
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+index 578db9f033b2..d7cdb146f44d 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c
+@@ -14,10 +14,7 @@
+  * available: mv78230, mv78260 and mv78460. From a pin muxing
+  * perspective, the mv78230 has 49 MPP pins. The mv78260 and mv78460
+  * both have 67 MPP pins (more GPIOs and address lines for the memory
+- * bus mainly). The only difference between the mv78260 and the
+- * mv78460 in terms of pin muxing is the addition of two functions on
+- * pins 43 and 56 to access the VDD of the CPU2 and 3 (mv78260 has two
+- * cores, mv78460 has four cores).
++ * bus mainly).
+  */
+ 
+ #include <linux/err.h>
+@@ -172,20 +169,17 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
+ 	MPP_MODE(24,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "sata1", "prsnt",    V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x2, "nf", "bootcs-re",   V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x3, "tdm", "rst",        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x4, "lcd", "hsync",      V_MV78230_PLUS)),
+ 	MPP_MODE(25,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "sata0", "prsnt",    V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x2, "nf", "bootcs-we",   V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x3, "tdm", "pclk",       V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x4, "lcd", "vsync",      V_MV78230_PLUS)),
+ 	MPP_MODE(26,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x3, "tdm", "fsync",      V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x4, "lcd", "clk",        V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd",    V_MV78230_PLUS)),
++		 MPP_VAR_FUNCTION(0x4, "lcd", "clk",        V_MV78230_PLUS)),
+ 	MPP_MODE(27,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "ptp", "trig",       V_MV78230_PLUS),
+@@ -200,8 +194,7 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "ptp", "clk",        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x3, "tdm", "int0",       V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk",    V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd",    V_MV78230_PLUS)),
++		 MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk",    V_MV78230_PLUS)),
+ 	MPP_MODE(30,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "sd0", "clk",        V_MV78230_PLUS),
+@@ -209,13 +202,11 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
+ 	MPP_MODE(31,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "sd0", "cmd",        V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x3, "tdm", "int2",       V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd",    V_MV78230_PLUS)),
++		 MPP_VAR_FUNCTION(0x3, "tdm", "int2",       V_MV78230_PLUS)),
+ 	MPP_MODE(32,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "sd0", "d0",         V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x3, "tdm", "int3",       V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd",    V_MV78230_PLUS)),
++		 MPP_VAR_FUNCTION(0x3, "tdm", "int3",       V_MV78230_PLUS)),
+ 	MPP_MODE(33,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "sd0", "d1",         V_MV78230_PLUS),
+@@ -247,7 +238,6 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "spi", "cs1",        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x2, "uart2", "cts",      V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x3, "vdd", "cpu1-pd",    V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x4, "lcd", "vga-hsync",  V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x5, "pcie", "clkreq0",   V_MV78230_PLUS)),
+ 	MPP_MODE(41,
+@@ -262,15 +252,13 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(0x1, "uart2", "rxd",      V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x2, "uart0", "cts",      V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x3, "tdm", "int7",       V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x4, "tdm-1", "timer",    V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd",    V_MV78230_PLUS)),
++		 MPP_VAR_FUNCTION(0x4, "tdm-1", "timer",    V_MV78230_PLUS)),
+ 	MPP_MODE(43,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "uart2", "txd",      V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x2, "uart0", "rts",      V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x3, "spi", "cs3",        V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x4, "pcie", "rstout",    V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x5, "vdd", "cpu2-3-pd",  V_MV78460)),
++		 MPP_VAR_FUNCTION(0x4, "pcie", "rstout",    V_MV78230_PLUS)),
+ 	MPP_MODE(44,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "uart2", "cts",      V_MV78230_PLUS),
+@@ -299,7 +287,7 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(0x5, "pcie", "clkreq3",   V_MV78230_PLUS)),
+ 	MPP_MODE(48,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78230_PLUS),
+-		 MPP_VAR_FUNCTION(0x1, "tclk", NULL,        V_MV78230_PLUS),
++		 MPP_VAR_FUNCTION(0x1, "dev", "clkout",     V_MV78230_PLUS),
+ 		 MPP_VAR_FUNCTION(0x2, "dev", "burst/last", V_MV78230_PLUS)),
+ 	MPP_MODE(49,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
+@@ -321,16 +309,13 @@ static struct mvebu_mpp_mode armada_xp_mpp_modes[] = {
+ 		 MPP_VAR_FUNCTION(0x1, "dev", "ad19",       V_MV78260_PLUS)),
+ 	MPP_MODE(55,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
+-		 MPP_VAR_FUNCTION(0x1, "dev", "ad20",       V_MV78260_PLUS),
+-		 MPP_VAR_FUNCTION(0x2, "vdd", "cpu0-pd",    V_MV78260_PLUS)),
++		 MPP_VAR_FUNCTION(0x1, "dev", "ad20",       V_MV78260_PLUS)),
+ 	MPP_MODE(56,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
+-		 MPP_VAR_FUNCTION(0x1, "dev", "ad21",       V_MV78260_PLUS),
+-		 MPP_VAR_FUNCTION(0x2, "vdd", "cpu1-pd",    V_MV78260_PLUS)),
++		 MPP_VAR_FUNCTION(0x1, "dev", "ad21",       V_MV78260_PLUS)),
+ 	MPP_MODE(57,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
+-		 MPP_VAR_FUNCTION(0x1, "dev", "ad22",       V_MV78260_PLUS),
+-		 MPP_VAR_FUNCTION(0x2, "vdd", "cpu2-3-pd",  V_MV78460)),
++		 MPP_VAR_FUNCTION(0x1, "dev", "ad22",       V_MV78260_PLUS)),
+ 	MPP_MODE(58,
+ 		 MPP_VAR_FUNCTION(0x0, "gpio", NULL,        V_MV78260_PLUS),
+ 		 MPP_VAR_FUNCTION(0x1, "dev", "ad23",       V_MV78260_PLUS)),
+diff --git a/drivers/pinctrl/pinctrl-zynq.c b/drivers/pinctrl/pinctrl-zynq.c
+index 22280bddb9e2..8c51a3c65513 100644
+--- a/drivers/pinctrl/pinctrl-zynq.c
++++ b/drivers/pinctrl/pinctrl-zynq.c
+@@ -714,12 +714,13 @@ static const char * const gpio0_groups[] = {"gpio0_0_grp",
+ 		.mux_val = mval,			\
+ 	}
+ 
+-#define DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(fname, mval, mux, mask, shift)	\
++#define DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(fname, mval, offset, mask, shift)\
+ 	[ZYNQ_PMUX_##fname] = {				\
+ 		.name = #fname,				\
+ 		.groups = fname##_groups,		\
+ 		.ngroups = ARRAY_SIZE(fname##_groups),	\
+ 		.mux_val = mval,			\
++		.mux = offset,				\
+ 		.mux_mask = mask,			\
+ 		.mux_shift = shift,			\
+ 	}
+@@ -744,15 +745,15 @@ static const struct zynq_pinmux_function zynq_pmux_functions[] = {
+ 	DEFINE_ZYNQ_PINMUX_FUNCTION(spi1, 0x50),
+ 	DEFINE_ZYNQ_PINMUX_FUNCTION(sdio0, 0x40),
+ 	DEFINE_ZYNQ_PINMUX_FUNCTION(sdio0_pc, 0xc),
+-	DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_wp, 0, 130, ZYNQ_SDIO_WP_MASK,
++	DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_wp, 0, 0x130, ZYNQ_SDIO_WP_MASK,
+ 					ZYNQ_SDIO_WP_SHIFT),
+-	DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_cd, 0, 130, ZYNQ_SDIO_CD_MASK,
++	DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio0_cd, 0, 0x130, ZYNQ_SDIO_CD_MASK,
+ 					ZYNQ_SDIO_CD_SHIFT),
+ 	DEFINE_ZYNQ_PINMUX_FUNCTION(sdio1, 0x40),
+ 	DEFINE_ZYNQ_PINMUX_FUNCTION(sdio1_pc, 0xc),
+-	DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_wp, 0, 134, ZYNQ_SDIO_WP_MASK,
++	DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_wp, 0, 0x134, ZYNQ_SDIO_WP_MASK,
+ 					ZYNQ_SDIO_WP_SHIFT),
+-	DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_cd, 0, 134, ZYNQ_SDIO_CD_MASK,
++	DEFINE_ZYNQ_PINMUX_FUNCTION_MUX(sdio1_cd, 0, 0x134, ZYNQ_SDIO_CD_MASK,
+ 					ZYNQ_SDIO_CD_SHIFT),
+ 	DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor, 4),
+ 	DEFINE_ZYNQ_PINMUX_FUNCTION(smc0_nor_cs1, 8),
+diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
+index d688d806a8a5..2c1d5f5432a9 100644
+--- a/drivers/platform/x86/dell-laptop.c
++++ b/drivers/platform/x86/dell-laptop.c
+@@ -305,7 +305,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
+ };
+ 
+ static struct calling_interface_buffer *buffer;
+-static struct page *bufferpage;
+ static DEFINE_MUTEX(buffer_mutex);
+ 
+ static int hwswitch_state;
+@@ -1896,12 +1895,11 @@ static int __init dell_init(void)
+ 	 * Allocate buffer below 4GB for SMI data--only 32-bit physical addr
+ 	 * is passed to SMI handler.
+ 	 */
+-	bufferpage = alloc_page(GFP_KERNEL | GFP_DMA32);
+-	if (!bufferpage) {
++	buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32);
++	if (!buffer) {
+ 		ret = -ENOMEM;
+ 		goto fail_buffer;
+ 	}
+-	buffer = page_address(bufferpage);
+ 
+ 	ret = dell_setup_rfkill();
+ 
+@@ -1965,7 +1963,7 @@ fail_backlight:
+ 	cancel_delayed_work_sync(&dell_rfkill_work);
+ 	dell_cleanup_rfkill();
+ fail_rfkill:
+-	free_page((unsigned long)bufferpage);
++	free_page((unsigned long)buffer);
+ fail_buffer:
+ 	platform_device_del(platform_device);
+ fail_platform_device2:
+diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
+index b496db87bc05..cb7cd8d79329 100644
+--- a/drivers/platform/x86/ideapad-laptop.c
++++ b/drivers/platform/x86/ideapad-laptop.c
+@@ -464,8 +464,9 @@ static const struct ideapad_rfk_data ideapad_rfk_data[] = {
+ static int ideapad_rfk_set(void *data, bool blocked)
+ {
+ 	struct ideapad_rfk_priv *priv = data;
++	int opcode = ideapad_rfk_data[priv->dev].opcode;
+ 
+-	return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked);
++	return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked);
+ }
+ 
+ static struct rfkill_ops ideapad_rfk_ops = {
+@@ -837,6 +838,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
+ 		},
+ 	},
+ 	{
++		.ident = "Lenovo G50-30",
++		.matches = {
++			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++			DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"),
++		},
++	},
++	{
+ 		.ident = "Lenovo Yoga 2 11 / 13 / Pro",
+ 		.matches = {
+ 			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
+index 515f33882ab8..49c1720df59a 100644
+--- a/drivers/pnp/system.c
++++ b/drivers/pnp/system.c
+@@ -7,7 +7,6 @@
+  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
+  */
+ 
+-#include <linux/acpi.h>
+ #include <linux/pnp.h>
+ #include <linux/device.h>
+ #include <linux/init.h>
+@@ -23,41 +22,25 @@ static const struct pnp_device_id pnp_dev_table[] = {
+ 	{"", 0}
+ };
+ 
+-#ifdef CONFIG_ACPI
+-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
+-{
+-	u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
+-	return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
+-}
+-#else
+-static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
+-{
+-	struct resource *res;
+-
+-	res = io ? request_region(start, length, desc) :
+-		request_mem_region(start, length, desc);
+-	if (res) {
+-		res->flags &= ~IORESOURCE_BUSY;
+-		return true;
+-	}
+-	return false;
+-}
+-#endif
+-
+ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
+ {
+ 	char *regionid;
+ 	const char *pnpid = dev_name(&dev->dev);
+ 	resource_size_t start = r->start, end = r->end;
+-	bool reserved;
++	struct resource *res;
+ 
+ 	regionid = kmalloc(16, GFP_KERNEL);
+ 	if (!regionid)
+ 		return;
+ 
+ 	snprintf(regionid, 16, "pnp %s", pnpid);
+-	reserved = __reserve_range(start, end - start + 1, !!port, regionid);
+-	if (!reserved)
++	if (port)
++		res = request_region(start, end - start + 1, regionid);
++	else
++		res = request_mem_region(start, end - start + 1, regionid);
++	if (res)
++		res->flags &= ~IORESOURCE_BUSY;
++	else
+ 		kfree(regionid);
+ 
+ 	/*
+@@ -66,7 +49,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
+ 	 * have double reservations.
+ 	 */
+ 	dev_info(&dev->dev, "%pR %s reserved\n", r,
+-		 reserved ? "has been" : "could not be");
++		 res ? "has been" : "could not be");
+ }
+ 
+ static void reserve_resources_of_dev(struct pnp_dev *dev)
+diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
+index 0479e807a776..d87a85cefb66 100644
+--- a/drivers/rtc/rtc-snvs.c
++++ b/drivers/rtc/rtc-snvs.c
+@@ -322,6 +322,13 @@ static int snvs_rtc_suspend(struct device *dev)
+ 	if (device_may_wakeup(dev))
+ 		enable_irq_wake(data->irq);
+ 
++	return 0;
++}
++
++static int snvs_rtc_suspend_noirq(struct device *dev)
++{
++	struct snvs_rtc_data *data = dev_get_drvdata(dev);
++
+ 	if (data->clk)
+ 		clk_disable_unprepare(data->clk);
+ 
+@@ -331,23 +338,28 @@ static int snvs_rtc_suspend(struct device *dev)
+ static int snvs_rtc_resume(struct device *dev)
+ {
+ 	struct snvs_rtc_data *data = dev_get_drvdata(dev);
+-	int ret;
+ 
+ 	if (device_may_wakeup(dev))
+-		disable_irq_wake(data->irq);
++		return disable_irq_wake(data->irq);
+ 
+-	if (data->clk) {
+-		ret = clk_prepare_enable(data->clk);
+-		if (ret)
+-			return ret;
+-	}
++	return 0;
++}
++
++static int snvs_rtc_resume_noirq(struct device *dev)
++{
++	struct snvs_rtc_data *data = dev_get_drvdata(dev);
++
++	if (data->clk)
++		return clk_prepare_enable(data->clk);
+ 
+ 	return 0;
+ }
+ 
+ static const struct dev_pm_ops snvs_rtc_pm_ops = {
+-	.suspend_noirq = snvs_rtc_suspend,
+-	.resume_noirq = snvs_rtc_resume,
++	.suspend = snvs_rtc_suspend,
++	.suspend_noirq = snvs_rtc_suspend_noirq,
++	.resume = snvs_rtc_resume,
++	.resume_noirq = snvs_rtc_resume_noirq,
+ };
+ 
+ #define SNVS_RTC_PM_OPS	(&snvs_rtc_pm_ops)
+diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
+index c458e5010a74..4ebf5aae5019 100644
+--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
++++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
+@@ -243,7 +243,7 @@ static int cb_pcimdas_ao_insn_write(struct comedi_device *dev,
+ 	return insn->n;
+ }
+ 
+-static int cb_pcimdas_di_insn_read(struct comedi_device *dev,
++static int cb_pcimdas_di_insn_bits(struct comedi_device *dev,
+ 				   struct comedi_subdevice *s,
+ 				   struct comedi_insn *insn,
+ 				   unsigned int *data)
+@@ -258,7 +258,7 @@ static int cb_pcimdas_di_insn_read(struct comedi_device *dev,
+ 	return insn->n;
+ }
+ 
+-static int cb_pcimdas_do_insn_write(struct comedi_device *dev,
++static int cb_pcimdas_do_insn_bits(struct comedi_device *dev,
+ 				    struct comedi_subdevice *s,
+ 				    struct comedi_insn *insn,
+ 				    unsigned int *data)
+@@ -424,7 +424,7 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
+ 	s->n_chan	= 4;
+ 	s->maxdata	= 1;
+ 	s->range_table	= &range_digital;
+-	s->insn_read	= cb_pcimdas_di_insn_read;
++	s->insn_bits	= cb_pcimdas_di_insn_bits;
+ 
+ 	/* Digital Output subdevice (main connector) */
+ 	s = &dev->subdevices[4];
+@@ -433,7 +433,7 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
+ 	s->n_chan	= 4;
+ 	s->maxdata	= 1;
+ 	s->range_table	= &range_digital;
+-	s->insn_write	= cb_pcimdas_do_insn_write;
++	s->insn_bits	= cb_pcimdas_do_insn_bits;
+ 
+ 	/* Counter subdevice (8254) */
+ 	s = &dev->subdevices[5];
+diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
+index 50227b598e0c..fcb8c61b2884 100644
+--- a/drivers/staging/rtl8712/rtl8712_recv.c
++++ b/drivers/staging/rtl8712/rtl8712_recv.c
+@@ -1056,7 +1056,8 @@ static int recvbuf2recvframe(struct _adapter *padapter, struct sk_buff *pskb)
+ 		/* for first fragment packet, driver need allocate 1536 +
+ 		 * drvinfo_sz + RXDESC_SIZE to defrag packet. */
+ 		if ((mf == 1) && (frag == 0))
+-			alloc_sz = 1658;/*1658+6=1664, 1664 is 128 alignment.*/
++			/*1658+6=1664, 1664 is 128 alignment.*/
++			alloc_sz = max_t(u16, tmp_len, 1658);
+ 		else
+ 			alloc_sz = tmp_len;
+ 		/* 2 is for IP header 4 bytes alignment in QoS packet case.
+diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
+index 0343ae386f03..15baacb126ad 100644
+--- a/drivers/staging/vt6655/device_main.c
++++ b/drivers/staging/vt6655/device_main.c
+@@ -807,6 +807,10 @@ static int device_rx_srv(struct vnt_private *pDevice, unsigned int uIdx)
+ 	     pRD = pRD->next) {
+ 		if (works++ > 15)
+ 			break;
++
++		if (!pRD->pRDInfo->skb)
++			break;
++
+ 		if (vnt_receive_frame(pDevice, pRD)) {
+ 			if (!device_alloc_rx_buf(pDevice, pRD)) {
+ 				dev_err(&pDevice->pcid->dev,
+@@ -1417,7 +1421,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
+ 
+ 	priv->current_aid = conf->aid;
+ 
+-	if (changed & BSS_CHANGED_BSSID) {
++	if (changed & BSS_CHANGED_BSSID && conf->bssid) {
+ 		unsigned long flags;
+ 
+ 		spin_lock_irqsave(&priv->lock, flags);
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index ab3ab84cb0a7..766fdcece074 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -701,7 +701,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
+ 
+ 	priv->current_aid = conf->aid;
+ 
+-	if (changed & BSS_CHANGED_BSSID)
++	if (changed & BSS_CHANGED_BSSID && conf->bssid)
+ 		vnt_mac_set_bssid_addr(priv, (u8 *)conf->bssid);
+ 
+ 
+diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
+index f8120c1bde14..8cd35348fc19 100644
+--- a/drivers/tty/serial/Kconfig
++++ b/drivers/tty/serial/Kconfig
+@@ -241,7 +241,6 @@ config SERIAL_SAMSUNG
+ 	tristate "Samsung SoC serial support"
+ 	depends on PLAT_SAMSUNG || ARCH_EXYNOS
+ 	select SERIAL_CORE
+-	select SERIAL_EARLYCON
+ 	help
+ 	  Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
+ 	  providing /dev/ttySAC0, 1 and 2 (note, some machines may not
+@@ -277,6 +276,7 @@ config SERIAL_SAMSUNG_CONSOLE
+ 	bool "Support for console on Samsung SoC serial port"
+ 	depends on SERIAL_SAMSUNG=y
+ 	select SERIAL_CORE_CONSOLE
++	select SERIAL_EARLYCON
+ 	help
+ 	  Allow selection of the S3C24XX on-board serial ports for use as
+ 	  an virtual console.
+diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
+index 27dade29646b..5ca1dfb0561c 100644
+--- a/drivers/tty/serial/atmel_serial.c
++++ b/drivers/tty/serial/atmel_serial.c
+@@ -315,8 +315,7 @@ static int atmel_config_rs485(struct uart_port *port,
+ 	if (rs485conf->flags & SER_RS485_ENABLED) {
+ 		dev_dbg(port->dev, "Setting UART to RS485\n");
+ 		atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
+-		if ((rs485conf->delay_rts_after_send) > 0)
+-			UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
++		UART_PUT_TTGR(port, rs485conf->delay_rts_after_send);
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	} else {
+ 		dev_dbg(port->dev, "Setting UART to RS232\n");
+@@ -354,8 +353,7 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl)
+ 
+ 	/* override mode to RS485 if needed, otherwise keep the current mode */
+ 	if (port->rs485.flags & SER_RS485_ENABLED) {
+-		if ((port->rs485.delay_rts_after_send) > 0)
+-			UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
++		UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+ 		mode &= ~ATMEL_US_USMODE;
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	}
+@@ -2061,8 +2059,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
+ 
+ 	/* mode */
+ 	if (port->rs485.flags & SER_RS485_ENABLED) {
+-		if ((port->rs485.delay_rts_after_send) > 0)
+-			UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
++		UART_PUT_TTGR(port, port->rs485.delay_rts_after_send);
+ 		mode |= ATMEL_US_USMODE_RS485;
+ 	} else if (termios->c_cflag & CRTSCTS) {
+ 		/* RS232 with hardware handshake (RTS/CTS) */
+diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
+index 843f2cdc280b..9ffdfcf2ec6e 100644
+--- a/drivers/tty/sysrq.c
++++ b/drivers/tty/sysrq.c
+@@ -55,9 +55,6 @@
+ static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
+ static bool __read_mostly sysrq_always_enabled;
+ 
+-unsigned short platform_sysrq_reset_seq[] __weak = { KEY_RESERVED };
+-int sysrq_reset_downtime_ms __weak;
+-
+ static bool sysrq_on(void)
+ {
+ 	return sysrq_enabled || sysrq_always_enabled;
+@@ -569,6 +566,7 @@ void handle_sysrq(int key)
+ EXPORT_SYMBOL(handle_sysrq);
+ 
+ #ifdef CONFIG_INPUT
++static int sysrq_reset_downtime_ms;
+ 
+ /* Simple translation table for the SysRq keys */
+ static const unsigned char sysrq_xlate[KEY_CNT] =
+@@ -949,23 +947,8 @@ static bool sysrq_handler_registered;
+ 
+ static inline void sysrq_register_handler(void)
+ {
+-	unsigned short key;
+ 	int error;
+-	int i;
+-
+-	/* First check if a __weak interface was instantiated. */
+-	for (i = 0; i < ARRAY_SIZE(sysrq_reset_seq); i++) {
+-		key = platform_sysrq_reset_seq[i];
+-		if (key == KEY_RESERVED || key > KEY_MAX)
+-			break;
+-
+-		sysrq_reset_seq[sysrq_reset_seq_len++] = key;
+-	}
+ 
+-	/*
+-	 * DT configuration takes precedence over anything that would
+-	 * have been defined via the __weak interface.
+-	 */
+ 	sysrq_of_get_keyreset_config();
+ 
+ 	error = input_register_handler(&sysrq_handler);
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index 4b0448c26810..986abde07683 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -513,7 +513,7 @@ static void async_completed(struct urb *urb)
+ 	snoop(&urb->dev->dev, "urb complete\n");
+ 	snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
+ 			as->status, COMPLETE, NULL, 0);
+-	if ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_IN)
++	if ((urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN)
+ 		snoop_urb_data(urb, urb->actual_length);
+ 
+ 	if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
+diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
+index 45a915ccd71c..1c1385e3a824 100644
+--- a/drivers/usb/core/hcd.c
++++ b/drivers/usb/core/hcd.c
+@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
+ 				dev_name(&usb_dev->dev), retval);
+ 		return (retval < 0) ? retval : -EMSGSIZE;
+ 	}
+-	if (usb_dev->speed == USB_SPEED_SUPER) {
++
++	if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
+ 		retval = usb_get_bos_descriptor(usb_dev);
+-		if (retval < 0) {
++		if (!retval) {
++			usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
++		} else if (usb_dev->speed == USB_SPEED_SUPER) {
+ 			mutex_unlock(&usb_bus_list_lock);
+ 			dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
+ 					dev_name(&usb_dev->dev), retval);
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 3b7151687776..1e9a8c9aa531 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
+ 	return usb_get_intfdata(hdev->actconfig->interface[0]);
+ }
+ 
+-static int usb_device_supports_lpm(struct usb_device *udev)
++int usb_device_supports_lpm(struct usb_device *udev)
+ {
+ 	/* USB 2.1 (and greater) devices indicate LPM support through
+ 	 * their USB 2.0 Extended Capabilities BOS descriptor.
+@@ -2616,9 +2616,6 @@ static bool use_new_scheme(struct usb_device *udev, int retry)
+ 	return USE_NEW_SCHEME(retry);
+ }
+ 
+-static int hub_port_reset(struct usb_hub *hub, int port1,
+-			struct usb_device *udev, unsigned int delay, bool warm);
+-
+ /* Is a USB 3.0 port in the Inactive or Compliance Mode state?
+  * Port worm reset is required to recover
+  */
+@@ -2706,44 +2703,6 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
+ 	return 0;
+ }
+ 
+-static void hub_port_finish_reset(struct usb_hub *hub, int port1,
+-			struct usb_device *udev, int *status)
+-{
+-	switch (*status) {
+-	case 0:
+-		/* TRSTRCY = 10 ms; plus some extra */
+-		msleep(10 + 40);
+-		if (udev) {
+-			struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+-
+-			update_devnum(udev, 0);
+-			/* The xHC may think the device is already reset,
+-			 * so ignore the status.
+-			 */
+-			if (hcd->driver->reset_device)
+-				hcd->driver->reset_device(hcd, udev);
+-		}
+-		/* FALL THROUGH */
+-	case -ENOTCONN:
+-	case -ENODEV:
+-		usb_clear_port_feature(hub->hdev,
+-				port1, USB_PORT_FEAT_C_RESET);
+-		if (hub_is_superspeed(hub->hdev)) {
+-			usb_clear_port_feature(hub->hdev, port1,
+-					USB_PORT_FEAT_C_BH_PORT_RESET);
+-			usb_clear_port_feature(hub->hdev, port1,
+-					USB_PORT_FEAT_C_PORT_LINK_STATE);
+-			usb_clear_port_feature(hub->hdev, port1,
+-					USB_PORT_FEAT_C_CONNECTION);
+-		}
+-		if (udev)
+-			usb_set_device_state(udev, *status
+-					? USB_STATE_NOTATTACHED
+-					: USB_STATE_DEFAULT);
+-		break;
+-	}
+-}
+-
+ /* Handle port reset and port warm(BH) reset (for USB3 protocol ports) */
+ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 			struct usb_device *udev, unsigned int delay, bool warm)
+@@ -2767,13 +2726,10 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 		 * If the caller hasn't explicitly requested a warm reset,
+ 		 * double check and see if one is needed.
+ 		 */
+-		status = hub_port_status(hub, port1,
+-					&portstatus, &portchange);
+-		if (status < 0)
+-			goto done;
+-
+-		if (hub_port_warm_reset_required(hub, port1, portstatus))
+-			warm = true;
++		if (hub_port_status(hub, port1, &portstatus, &portchange) == 0)
++			if (hub_port_warm_reset_required(hub, port1,
++							portstatus))
++				warm = true;
+ 	}
+ 	clear_bit(port1, hub->warm_reset_bits);
+ 
+@@ -2799,11 +2755,19 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 
+ 		/* Check for disconnect or reset */
+ 		if (status == 0 || status == -ENOTCONN || status == -ENODEV) {
+-			hub_port_finish_reset(hub, port1, udev, &status);
++			usb_clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_RESET);
+ 
+ 			if (!hub_is_superspeed(hub->hdev))
+ 				goto done;
+ 
++			usb_clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_BH_PORT_RESET);
++			usb_clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_PORT_LINK_STATE);
++			usb_clear_port_feature(hub->hdev, port1,
++					USB_PORT_FEAT_C_CONNECTION);
++
+ 			/*
+ 			 * If a USB 3.0 device migrates from reset to an error
+ 			 * state, re-issue the warm reset.
+@@ -2836,6 +2800,26 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
+ 	dev_err(&port_dev->dev, "Cannot enable. Maybe the USB cable is bad?\n");
+ 
+ done:
++	if (status == 0) {
++		/* TRSTRCY = 10 ms; plus some extra */
++		msleep(10 + 40);
++		if (udev) {
++			struct usb_hcd *hcd = bus_to_hcd(udev->bus);
++
++			update_devnum(udev, 0);
++			/* The xHC may think the device is already reset,
++			 * so ignore the status.
++			 */
++			if (hcd->driver->reset_device)
++				hcd->driver->reset_device(hcd, udev);
++
++			usb_set_device_state(udev, USB_STATE_DEFAULT);
++		}
++	} else {
++		if (udev)
++			usb_set_device_state(udev, USB_STATE_NOTATTACHED);
++	}
++
+ 	if (!hub_is_superspeed(hub->hdev))
+ 		up_read(&ehci_cf_port_reset_rwsem);
+ 
+diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
+index 7eb1e26798e5..457255a3306a 100644
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -65,6 +65,7 @@ extern int  usb_hub_init(void);
+ extern void usb_hub_cleanup(void);
+ extern int usb_major_init(void);
+ extern void usb_major_cleanup(void);
++extern int usb_device_supports_lpm(struct usb_device *udev);
+ 
+ #ifdef	CONFIG_PM
+ 
+diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
+index 2ef3c8d6a9db..69e769c35cf5 100644
+--- a/drivers/usb/dwc3/ep0.c
++++ b/drivers/usb/dwc3/ep0.c
+@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+ 		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
+ 		ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
+ 		break;
++	case USB_REQ_SET_INTERFACE:
++		dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
++		dwc->start_config_issued = false;
++		/* Fall through */
+ 	default:
+ 		dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
+ 		ret = dwc3_ep0_delegate_req(dwc, ctrl);
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 8946c32047e9..333a7c0078fc 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -291,6 +291,8 @@ int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
+ 			dwc3_trace(trace_dwc3_gadget,
+ 					"Command Complete --> %d",
+ 					DWC3_DGCMD_STATUS(reg));
++			if (DWC3_DGCMD_STATUS(reg))
++				return -EINVAL;
+ 			return 0;
+ 		}
+ 
+@@ -328,6 +330,8 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
+ 			dwc3_trace(trace_dwc3_gadget,
+ 					"Command Complete --> %d",
+ 					DWC3_DEPCMD_STATUS(reg));
++			if (DWC3_DEPCMD_STATUS(reg))
++				return -EINVAL;
+ 			return 0;
+ 		}
+ 
+@@ -1902,12 +1906,16 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
+ {
+ 	unsigned		status = 0;
+ 	int			clean_busy;
++	u32			is_xfer_complete;
++
++	is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
+ 
+ 	if (event->status & DEPEVT_STATUS_BUSERR)
+ 		status = -ECONNRESET;
+ 
+ 	clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
+-	if (clean_busy)
++	if (clean_busy && (is_xfer_complete ||
++				usb_endpoint_xfer_isoc(dep->endpoint.desc)))
+ 		dep->flags &= ~DWC3_EP_BUSY;
+ 
+ 	/*
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 4e3447bbd097..58b4657fc721 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1758,10 +1758,13 @@ unknown:
+ 		 * take such requests too, if that's ever needed:  to work
+ 		 * in config 0, etc.
+ 		 */
+-		list_for_each_entry(f, &cdev->config->functions, list)
+-			if (f->req_match && f->req_match(f, ctrl))
+-				goto try_fun_setup;
+-		f = NULL;
++		if (cdev->config) {
++			list_for_each_entry(f, &cdev->config->functions, list)
++				if (f->req_match && f->req_match(f, ctrl))
++					goto try_fun_setup;
++			f = NULL;
++		}
++
+ 		switch (ctrl->bRequestType & USB_RECIP_MASK) {
+ 		case USB_RECIP_INTERFACE:
+ 			if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 45b8c8b338df..6e7be91e6097 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -924,7 +924,8 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
+ 
+ 	kiocb->private = p;
+ 
+-	kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
++	if (p->aio)
++		kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+ 
+ 	res = ffs_epfile_io(kiocb->ki_filp, p);
+ 	if (res == -EIOCBQUEUED)
+@@ -968,7 +969,8 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
+ 
+ 	kiocb->private = p;
+ 
+-	kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
++	if (p->aio)
++		kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
+ 
+ 	res = ffs_epfile_io(kiocb->ki_filp, p);
+ 	if (res == -EIOCBQUEUED)
+diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
+index 3cc109f3c9c8..15c307155037 100644
+--- a/drivers/usb/gadget/function/f_mass_storage.c
++++ b/drivers/usb/gadget/function/f_mass_storage.c
+@@ -2786,7 +2786,7 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
+ 		return -EINVAL;
+ 	}
+ 
+-	curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
++	curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL);
+ 	if (unlikely(!curlun))
+ 		return -ENOMEM;
+ 
+@@ -2796,8 +2796,6 @@ int fsg_common_set_nluns(struct fsg_common *common, int nluns)
+ 	common->luns = curlun;
+ 	common->nluns = nluns;
+ 
+-	pr_info("Number of LUNs=%d\n", common->nluns);
+-
+ 	return 0;
+ }
+ EXPORT_SYMBOL_GPL(fsg_common_set_nluns);
+@@ -3563,14 +3561,26 @@ static struct usb_function *fsg_alloc(struct usb_function_instance *fi)
+ 	struct fsg_opts *opts = fsg_opts_from_func_inst(fi);
+ 	struct fsg_common *common = opts->common;
+ 	struct fsg_dev *fsg;
++	unsigned nluns, i;
+ 
+ 	fsg = kzalloc(sizeof(*fsg), GFP_KERNEL);
+ 	if (unlikely(!fsg))
+ 		return ERR_PTR(-ENOMEM);
+ 
+ 	mutex_lock(&opts->lock);
++	if (!opts->refcnt) {
++		for (nluns = i = 0; i < FSG_MAX_LUNS; ++i)
++			if (common->luns[i])
++				nluns = i + 1;
++		if (!nluns)
++			pr_warn("No LUNS defined, continuing anyway\n");
++		else
++			common->nluns = nluns;
++		pr_info("Number of LUNs=%u\n", common->nluns);
++	}
+ 	opts->refcnt++;
+ 	mutex_unlock(&opts->lock);
++
+ 	fsg->function.name	= FSG_DRIVER_DESC;
+ 	fsg->function.bind	= fsg_bind;
+ 	fsg->function.unbind	= fsg_unbind;
+diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
+index d32160d6463f..5da37c957b53 100644
+--- a/drivers/usb/gadget/udc/mv_udc_core.c
++++ b/drivers/usb/gadget/udc/mv_udc_core.c
+@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
+ 		return -ENODEV;
+ 	}
+ 
+-	udc->phy_regs = ioremap(r->start, resource_size(r));
++	udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ 	if (udc->phy_regs == NULL) {
+ 		dev_err(&pdev->dev, "failed to map phy I/O memory\n");
+ 		return -EBUSY;
+diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
+index 1463c398d322..fe1d5fc7da2d 100644
+--- a/drivers/usb/host/ohci-q.c
++++ b/drivers/usb/host/ohci-q.c
+@@ -980,10 +980,6 @@ rescan_all:
+ 		int			completed, modified;
+ 		__hc32			*prev;
+ 
+-		/* Is this ED already invisible to the hardware? */
+-		if (ed->state == ED_IDLE)
+-			goto ed_idle;
+-
+ 		/* only take off EDs that the HC isn't using, accounting for
+ 		 * frame counter wraps and EDs with partially retired TDs
+ 		 */
+@@ -1011,12 +1007,10 @@ skip_ed:
+ 		}
+ 
+ 		/* ED's now officially unlinked, hc doesn't see */
+-		ed->state = ED_IDLE;
+ 		ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
+ 		ed->hwNextED = 0;
+ 		wmb();
+ 		ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
+-ed_idle:
+ 
+ 		/* reentrancy:  if we drop the schedule lock, someone might
+ 		 * have modified this list.  normally it's just prepending
+@@ -1087,6 +1081,7 @@ rescan_this:
+ 		if (list_empty(&ed->td_list)) {
+ 			*last = ed->ed_next;
+ 			ed->ed_next = NULL;
++			ed->state = ED_IDLE;
+ 			list_del(&ed->in_use_list);
+ 		} else if (ohci->rh_state == OHCI_RH_RUNNING) {
+ 			*last = ed->ed_next;
+diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
+index f8336408ef07..3e442f77a2b9 100644
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
+ 		/* Attempt to use the ring cache */
+ 		if (virt_dev->num_rings_cached == 0)
+ 			return -ENOMEM;
++		virt_dev->num_rings_cached--;
+ 		virt_dev->eps[ep_index].new_ring =
+ 			virt_dev->ring_cache[virt_dev->num_rings_cached];
+ 		virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
+-		virt_dev->num_rings_cached--;
+ 		xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
+ 					1, type);
+ 	}
+diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
+index 86c4b533e90b..4731baca377f 100644
+--- a/drivers/usb/musb/musb_virthub.c
++++ b/drivers/usb/musb/musb_virthub.c
+@@ -273,9 +273,7 @@ static int musb_has_gadget(struct musb *musb)
+ #ifdef CONFIG_USB_MUSB_HOST
+ 	return 1;
+ #else
+-	if (musb->port_mode == MUSB_PORT_MODE_HOST)
+-		return 1;
+-	return musb->g.dev.driver != NULL;
++	return musb->port_mode == MUSB_PORT_MODE_HOST;
+ #endif
+ }
+ 
+diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
+index 8f7cb068d29b..3fcc0483a081 100644
+--- a/drivers/usb/phy/phy-mxs-usb.c
++++ b/drivers/usb/phy/phy-mxs-usb.c
+@@ -217,6 +217,9 @@ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
+ {
+ 	unsigned int vbus_value;
+ 
++	if (!mxs_phy->regmap_anatop)
++		return false;
++
+ 	if (mxs_phy->port_id == 0)
+ 		regmap_read(mxs_phy->regmap_anatop,
+ 			ANADIG_USB1_VBUS_DET_STAT,
+diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
+index ffd739e31bfc..eac7ccaa3c85 100644
+--- a/drivers/usb/serial/cp210x.c
++++ b/drivers/usb/serial/cp210x.c
+@@ -187,6 +187,7 @@ static const struct usb_device_id id_table[] = {
+ 	{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
+ 	{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
+ 	{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
++	{ USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
+ 	{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ 	{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+ 	{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index f0c0c53359ad..19b85ee98a72 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1765,6 +1765,7 @@ static const struct usb_device_id option_ids[] = {
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
++	{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+ 	{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
+ 	{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
+ 	{ } /* Terminating entry */
+diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
+index 529066bbc7e8..46f1f13b41f1 100644
+--- a/drivers/usb/serial/usb-serial.c
++++ b/drivers/usb/serial/usb-serial.c
+@@ -1306,6 +1306,7 @@ static void __exit usb_serial_exit(void)
+ 	tty_unregister_driver(usb_serial_tty_driver);
+ 	put_tty_driver(usb_serial_tty_driver);
+ 	bus_unregister(&usb_serial_bus_type);
++	idr_destroy(&serial_minors);
+ }
+ 
+ 
+diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
+index 1f11a20a8ab9..55eb86c9e214 100644
+--- a/drivers/w1/slaves/w1_therm.c
++++ b/drivers/w1/slaves/w1_therm.c
+@@ -59,16 +59,32 @@ MODULE_ALIAS("w1-family-" __stringify(W1_THERM_DS28EA00));
+ static int w1_strong_pullup = 1;
+ module_param_named(strong_pullup, w1_strong_pullup, int, 0);
+ 
++struct w1_therm_family_data {
++	uint8_t rom[9];
++	atomic_t refcnt;
++};
++
++/* return the address of the refcnt in the family data */
++#define THERM_REFCNT(family_data) \
++	(&((struct w1_therm_family_data*)family_data)->refcnt)
++
+ static int w1_therm_add_slave(struct w1_slave *sl)
+ {
+-	sl->family_data = kzalloc(9, GFP_KERNEL);
++	sl->family_data = kzalloc(sizeof(struct w1_therm_family_data),
++		GFP_KERNEL);
+ 	if (!sl->family_data)
+ 		return -ENOMEM;
++	atomic_set(THERM_REFCNT(sl->family_data), 1);
+ 	return 0;
+ }
+ 
+ static void w1_therm_remove_slave(struct w1_slave *sl)
+ {
++	int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data));
++	while(refcnt) {
++		msleep(1000);
++		refcnt = atomic_read(THERM_REFCNT(sl->family_data));
++	}
+ 	kfree(sl->family_data);
+ 	sl->family_data = NULL;
+ }
+@@ -194,13 +210,22 @@ static ssize_t w1_slave_show(struct device *device,
+ 	struct w1_slave *sl = dev_to_w1_slave(device);
+ 	struct w1_master *dev = sl->master;
+ 	u8 rom[9], crc, verdict, external_power;
+-	int i, max_trying = 10;
++	int i, ret, max_trying = 10;
+ 	ssize_t c = PAGE_SIZE;
++	u8 *family_data = sl->family_data;
++
++	ret = mutex_lock_interruptible(&dev->bus_mutex);
++	if (ret != 0)
++		goto post_unlock;
+ 
+-	i = mutex_lock_interruptible(&dev->bus_mutex);
+-	if (i != 0)
+-		return i;
++	if(!sl->family_data)
++	{
++		ret = -ENODEV;
++		goto pre_unlock;
++	}
+ 
++	/* prevent the slave from going away in sleep */
++	atomic_inc(THERM_REFCNT(family_data));
+ 	memset(rom, 0, sizeof(rom));
+ 
+ 	while (max_trying--) {
+@@ -230,17 +255,19 @@ static ssize_t w1_slave_show(struct device *device,
+ 				mutex_unlock(&dev->bus_mutex);
+ 
+ 				sleep_rem = msleep_interruptible(tm);
+-				if (sleep_rem != 0)
+-					return -EINTR;
++				if (sleep_rem != 0) {
++					ret = -EINTR;
++					goto post_unlock;
++				}
+ 
+-				i = mutex_lock_interruptible(&dev->bus_mutex);
+-				if (i != 0)
+-					return i;
++				ret = mutex_lock_interruptible(&dev->bus_mutex);
++				if (ret != 0)
++					goto post_unlock;
+ 			} else if (!w1_strong_pullup) {
+ 				sleep_rem = msleep_interruptible(tm);
+ 				if (sleep_rem != 0) {
+-					mutex_unlock(&dev->bus_mutex);
+-					return -EINTR;
++					ret = -EINTR;
++					goto pre_unlock;
+ 				}
+ 			}
+ 
+@@ -269,19 +296,24 @@ static ssize_t w1_slave_show(struct device *device,
+ 	c -= snprintf(buf + PAGE_SIZE - c, c, ": crc=%02x %s\n",
+ 			   crc, (verdict) ? "YES" : "NO");
+ 	if (verdict)
+-		memcpy(sl->family_data, rom, sizeof(rom));
++		memcpy(family_data, rom, sizeof(rom));
+ 	else
+ 		dev_warn(device, "Read failed CRC check\n");
+ 
+ 	for (i = 0; i < 9; ++i)
+ 		c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ",
+-			      ((u8 *)sl->family_data)[i]);
++			      ((u8 *)family_data)[i]);
+ 
+ 	c -= snprintf(buf + PAGE_SIZE - c, c, "t=%d\n",
+ 		w1_convert_temp(rom, sl->family->fid));
++	ret = PAGE_SIZE - c;
++
++pre_unlock:
+ 	mutex_unlock(&dev->bus_mutex);
+ 
+-	return PAGE_SIZE - c;
++post_unlock:
++	atomic_dec(THERM_REFCNT(family_data));
++	return ret;
+ }
+ 
+ static int __init w1_therm_init(void)
+diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
+index 1e6be9e40577..c9c97dacf452 100644
+--- a/drivers/watchdog/omap_wdt.c
++++ b/drivers/watchdog/omap_wdt.c
+@@ -132,6 +132,13 @@ static int omap_wdt_start(struct watchdog_device *wdog)
+ 
+ 	pm_runtime_get_sync(wdev->dev);
+ 
++	/*
++	 * Make sure the watchdog is disabled. This is unfortunately required
++	 * because writing to various registers with the watchdog running has no
++	 * effect.
++	 */
++	omap_wdt_disable(wdev);
++
+ 	/* initialize prescaler */
+ 	while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01)
+ 		cpu_relax();
+diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
+index 703342e309f5..53f1e8a21707 100644
+--- a/fs/9p/vfs_inode.c
++++ b/fs/9p/vfs_inode.c
+@@ -540,8 +540,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
+ 	unlock_new_inode(inode);
+ 	return inode;
+ error:
+-	unlock_new_inode(inode);
+-	iput(inode);
++	iget_failed(inode);
+ 	return ERR_PTR(retval);
+ 
+ }
+diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
+index 9861c7c951a6..4d3ecfb55fcf 100644
+--- a/fs/9p/vfs_inode_dotl.c
++++ b/fs/9p/vfs_inode_dotl.c
+@@ -149,8 +149,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
+ 	unlock_new_inode(inode);
+ 	return inode;
+ error:
+-	unlock_new_inode(inode);
+-	iput(inode);
++	iget_failed(inode);
+ 	return ERR_PTR(retval);
+ 
+ }
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index f6a596d5a637..d4a582ac3f73 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -246,6 +246,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
+ {
+ 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+ 	struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
++	spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
+ 	struct btrfs_free_space *info;
+ 	struct rb_node *n;
+ 	u64 count;
+@@ -254,24 +255,30 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
+ 		return;
+ 
+ 	while (1) {
++		bool add_to_ctl = true;
++
++		spin_lock(rbroot_lock);
+ 		n = rb_first(rbroot);
+-		if (!n)
++		if (!n) {
++			spin_unlock(rbroot_lock);
+ 			break;
++		}
+ 
+ 		info = rb_entry(n, struct btrfs_free_space, offset_index);
+ 		BUG_ON(info->bitmap); /* Logic error */
+ 
+ 		if (info->offset > root->ino_cache_progress)
+-			goto free;
++			add_to_ctl = false;
+ 		else if (info->offset + info->bytes > root->ino_cache_progress)
+ 			count = root->ino_cache_progress - info->offset + 1;
+ 		else
+ 			count = info->bytes;
+ 
+-		__btrfs_add_free_space(ctl, info->offset, count);
+-free:
+ 		rb_erase(&info->offset_index, rbroot);
+-		kfree(info);
++		spin_unlock(rbroot_lock);
++		if (add_to_ctl)
++			__btrfs_add_free_space(ctl, info->offset, count);
++		kmem_cache_free(btrfs_free_space_cachep, info);
+ 	}
+ }
+ 
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 1c22c6518504..37d456a9a3b8 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -2413,8 +2413,6 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
+ 		goto out_unlock_inode;
+ 	}
+ 
+-	d_invalidate(dentry);
+-
+ 	down_write(&root->fs_info->subvol_sem);
+ 
+ 	err = may_destroy_subvol(dest);
+@@ -2508,7 +2506,7 @@ out_up_write:
+ out_unlock_inode:
+ 	mutex_unlock(&inode->i_mutex);
+ 	if (!err) {
+-		shrink_dcache_sb(root->fs_info->sb);
++		d_invalidate(dentry);
+ 		btrfs_invalidate_inodes(dest);
+ 		d_delete(dentry);
+ 		ASSERT(dest->send_in_progress == 0);
+@@ -2940,7 +2938,7 @@ out_unlock:
+ static long btrfs_ioctl_file_extent_same(struct file *file,
+ 			struct btrfs_ioctl_same_args __user *argp)
+ {
+-	struct btrfs_ioctl_same_args *same;
++	struct btrfs_ioctl_same_args *same = NULL;
+ 	struct btrfs_ioctl_same_extent_info *info;
+ 	struct inode *src = file_inode(file);
+ 	u64 off;
+@@ -2970,6 +2968,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
+ 
+ 	if (IS_ERR(same)) {
+ 		ret = PTR_ERR(same);
++		same = NULL;
+ 		goto out;
+ 	}
+ 
+@@ -3040,6 +3039,7 @@ static long btrfs_ioctl_file_extent_same(struct file *file,
+ 
+ out:
+ 	mnt_drop_write_file(file);
++	kfree(same);
+ 	return ret;
+ }
+ 
+@@ -3434,6 +3434,20 @@ process_slot:
+ 				u64 trim = 0;
+ 				u64 aligned_end = 0;
+ 
++				/*
++				 * Don't copy an inline extent into an offset
++				 * greater than zero. Having an inline extent
++				 * at such an offset results in chaos as btrfs
++				 * isn't prepared for such cases. Just skip
++				 * this case for the same reasons as commented
++				 * at btrfs_ioctl_clone().
++				 */
++				if (last_dest_end > 0) {
++					ret = -EOPNOTSUPP;
++					btrfs_end_transaction(trans, root);
++					goto out;
++				}
++
+ 				if (off > key.offset) {
+ 					skip = off - key.offset;
+ 					new_key.offset += skip;
+diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
+index 5628e25250c0..94e909c5a503 100644
+--- a/fs/btrfs/transaction.c
++++ b/fs/btrfs/transaction.c
+@@ -758,7 +758,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
+ 
+ 	if (!list_empty(&trans->ordered)) {
+ 		spin_lock(&info->trans_lock);
+-		list_splice(&trans->ordered, &cur_trans->pending_ordered);
++		list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
+ 		spin_unlock(&info->trans_lock);
+ 	}
+ 
+@@ -1848,7 +1848,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
+ 	}
+ 
+ 	spin_lock(&root->fs_info->trans_lock);
+-	list_splice(&trans->ordered, &cur_trans->pending_ordered);
++	list_splice_init(&trans->ordered, &cur_trans->pending_ordered);
+ 	if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
+ 		spin_unlock(&root->fs_info->trans_lock);
+ 		atomic_inc(&cur_trans->use_count);
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index d04968374e9d..4920fceffacb 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -4161,6 +4161,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ 	u64 ino = btrfs_ino(inode);
+ 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+ 	u64 logged_isize = 0;
++	bool need_log_inode_item = true;
+ 
+ 	path = btrfs_alloc_path();
+ 	if (!path)
+@@ -4269,11 +4270,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
+ 		} else {
+ 			if (inode_only == LOG_INODE_ALL)
+ 				fast_search = true;
+-			ret = log_inode_item(trans, log, dst_path, inode);
+-			if (ret) {
+-				err = ret;
+-				goto out_unlock;
+-			}
+ 			goto log_extents;
+ 		}
+ 
+@@ -4296,6 +4292,9 @@ again:
+ 		if (min_key.type > max_key.type)
+ 			break;
+ 
++		if (min_key.type == BTRFS_INODE_ITEM_KEY)
++			need_log_inode_item = false;
++
+ 		src = path->nodes[0];
+ 		if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
+ 			ins_nr++;
+@@ -4366,6 +4365,11 @@ next_slot:
+ log_extents:
+ 	btrfs_release_path(path);
+ 	btrfs_release_path(dst_path);
++	if (need_log_inode_item) {
++		err = log_inode_item(trans, log, dst_path, inode);
++		if (err)
++			goto out_unlock;
++	}
+ 	if (fast_search) {
+ 		/*
+ 		 * Some ordered extents started by fsync might have completed
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index e003a1e81dc3..87ba10d1d3bc 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -503,7 +503,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
+ 	struct buffer_head		*bh;
+ 	int				err;
+ 
+-	bh = sb_getblk(inode->i_sb, pblk);
++	bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
+ 	if (unlikely(!bh))
+ 		return ERR_PTR(-ENOMEM);
+ 
+@@ -1088,7 +1088,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
+ 		err = -EIO;
+ 		goto cleanup;
+ 	}
+-	bh = sb_getblk(inode->i_sb, newblock);
++	bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
+ 	if (unlikely(!bh)) {
+ 		err = -ENOMEM;
+ 		goto cleanup;
+@@ -1282,7 +1282,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
+ 	if (newblock == 0)
+ 		return err;
+ 
+-	bh = sb_getblk(inode->i_sb, newblock);
++	bh = sb_getblk_gfp(inode->i_sb, newblock, __GFP_MOVABLE | GFP_NOFS);
+ 	if (unlikely(!bh))
+ 		return -ENOMEM;
+ 	lock_buffer(bh);
+diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
+index 958824019509..94ae6874c2cb 100644
+--- a/fs/ext4/indirect.c
++++ b/fs/ext4/indirect.c
+@@ -565,7 +565,7 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
+ 				       EXT4_FEATURE_RO_COMPAT_BIGALLOC)) {
+ 		EXT4_ERROR_INODE(inode, "Can't allocate blocks for "
+ 				 "non-extent mapped inodes with bigalloc");
+-		return -ENOSPC;
++		return -EUCLEAN;
+ 	}
+ 
+ 	/* Set up for the direct block allocation */
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 0554b0b5957b..966c614822cc 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1342,7 +1342,7 @@ static void ext4_da_page_release_reservation(struct page *page,
+ 					     unsigned int offset,
+ 					     unsigned int length)
+ {
+-	int to_release = 0;
++	int to_release = 0, contiguous_blks = 0;
+ 	struct buffer_head *head, *bh;
+ 	unsigned int curr_off = 0;
+ 	struct inode *inode = page->mapping->host;
+@@ -1363,14 +1363,23 @@ static void ext4_da_page_release_reservation(struct page *page,
+ 
+ 		if ((offset <= curr_off) && (buffer_delay(bh))) {
+ 			to_release++;
++			contiguous_blks++;
+ 			clear_buffer_delay(bh);
++		} else if (contiguous_blks) {
++			lblk = page->index <<
++			       (PAGE_CACHE_SHIFT - inode->i_blkbits);
++			lblk += (curr_off >> inode->i_blkbits) -
++				contiguous_blks;
++			ext4_es_remove_extent(inode, lblk, contiguous_blks);
++			contiguous_blks = 0;
+ 		}
+ 		curr_off = next_off;
+ 	} while ((bh = bh->b_this_page) != head);
+ 
+-	if (to_release) {
++	if (contiguous_blks) {
+ 		lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
+-		ext4_es_remove_extent(inode, lblk, to_release);
++		lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
++		ext4_es_remove_extent(inode, lblk, contiguous_blks);
+ 	}
+ 
+ 	/* If we have released all the blocks belonging to a cluster, then we
+@@ -1701,19 +1710,32 @@ static int __ext4_journalled_writepage(struct page *page,
+ 		ext4_walk_page_buffers(handle, page_bufs, 0, len,
+ 				       NULL, bget_one);
+ 	}
+-	/* As soon as we unlock the page, it can go away, but we have
+-	 * references to buffers so we are safe */
++	/*
++	 * We need to release the page lock before we start the
++	 * journal, so grab a reference so the page won't disappear
++	 * out from under us.
++	 */
++	get_page(page);
+ 	unlock_page(page);
+ 
+ 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
+ 				    ext4_writepage_trans_blocks(inode));
+ 	if (IS_ERR(handle)) {
+ 		ret = PTR_ERR(handle);
+-		goto out;
++		put_page(page);
++		goto out_no_pagelock;
+ 	}
+-
+ 	BUG_ON(!ext4_handle_valid(handle));
+ 
++	lock_page(page);
++	put_page(page);
++	if (page->mapping != mapping) {
++		/* The page got truncated from under us */
++		ext4_journal_stop(handle);
++		ret = 0;
++		goto out;
++	}
++
+ 	if (inline_data) {
+ 		BUFFER_TRACE(inode_bh, "get write access");
+ 		ret = ext4_journal_get_write_access(handle, inode_bh);
+@@ -1739,6 +1761,8 @@ static int __ext4_journalled_writepage(struct page *page,
+ 				       NULL, bput_one);
+ 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
+ out:
++	unlock_page(page);
++out_no_pagelock:
+ 	brelse(inode_bh);
+ 	return ret;
+ }
+@@ -4345,7 +4369,12 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
+ 	int inode_size = EXT4_INODE_SIZE(sb);
+ 
+ 	oi.orig_ino = orig_ino;
+-	ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
++	/*
++	 * Calculate the first inode in the inode table block.  Inode
++	 * numbers are one-based.  That is, the first inode in a block
++	 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
++	 */
++	ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
+ 	for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
+ 		if (ino == orig_ino)
+ 			continue;
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 8d1e60214ef0..41260489d3bc 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4800,18 +4800,12 @@ do_more:
+ 		/*
+ 		 * blocks being freed are metadata. these blocks shouldn't
+ 		 * be used until this transaction is committed
++		 *
++		 * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed
++		 * to fail.
+ 		 */
+-	retry:
+-		new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
+-		if (!new_entry) {
+-			/*
+-			 * We use a retry loop because
+-			 * ext4_free_blocks() is not allowed to fail.
+-			 */
+-			cond_resched();
+-			congestion_wait(BLK_RW_ASYNC, HZ/50);
+-			goto retry;
+-		}
++		new_entry = kmem_cache_alloc(ext4_free_data_cachep,
++				GFP_NOFS|__GFP_NOFAIL);
+ 		new_entry->efd_start_cluster = bit;
+ 		new_entry->efd_group = block_group;
+ 		new_entry->efd_count = count_clusters;
+diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
+index b52374e42102..6163ad21cb0e 100644
+--- a/fs/ext4/migrate.c
++++ b/fs/ext4/migrate.c
+@@ -620,6 +620,7 @@ int ext4_ind_migrate(struct inode *inode)
+ 	struct ext4_inode_info		*ei = EXT4_I(inode);
+ 	struct ext4_extent		*ex;
+ 	unsigned int			i, len;
++	ext4_lblk_t			start, end;
+ 	ext4_fsblk_t			blk;
+ 	handle_t			*handle;
+ 	int				ret;
+@@ -633,6 +634,14 @@ int ext4_ind_migrate(struct inode *inode)
+ 				       EXT4_FEATURE_RO_COMPAT_BIGALLOC))
+ 		return -EOPNOTSUPP;
+ 
++	/*
++	 * In order to get correct extent info, force all delayed allocation
++	 * blocks to be allocated, otherwise delayed allocation blocks may not
++	 * be reflected and bypass the checks on extent header.
++	 */
++	if (test_opt(inode->i_sb, DELALLOC))
++		ext4_alloc_da_blocks(inode);
++
+ 	handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
+ 	if (IS_ERR(handle))
+ 		return PTR_ERR(handle);
+@@ -650,11 +659,13 @@ int ext4_ind_migrate(struct inode *inode)
+ 		goto errout;
+ 	}
+ 	if (eh->eh_entries == 0)
+-		blk = len = 0;
++		blk = len = start = end = 0;
+ 	else {
+ 		len = le16_to_cpu(ex->ee_len);
+ 		blk = ext4_ext_pblock(ex);
+-		if (len > EXT4_NDIR_BLOCKS) {
++		start = le32_to_cpu(ex->ee_block);
++		end = start + len - 1;
++		if (end >= EXT4_NDIR_BLOCKS) {
+ 			ret = -EOPNOTSUPP;
+ 			goto errout;
+ 		}
+@@ -662,7 +673,7 @@ int ext4_ind_migrate(struct inode *inode)
+ 
+ 	ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
+ 	memset(ei->i_data, 0, sizeof(ei->i_data));
+-	for (i=0; i < len; i++)
++	for (i = start; i <= end; i++)
+ 		ei->i_data[i] = cpu_to_le32(blk++);
+ 	ext4_mark_inode_dirty(handle, inode);
+ errout:
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index ca9d4a2fed41..ca12affdba96 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -807,6 +807,7 @@ static void ext4_put_super(struct super_block *sb)
+ 		dump_orphan_list(sb, sbi);
+ 	J_ASSERT(list_empty(&sbi->s_orphan));
+ 
++	sync_blockdev(sb->s_bdev);
+ 	invalidate_bdev(sb->s_bdev);
+ 	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
+ 		/*
+@@ -4943,6 +4944,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
+ 		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
+ 	}
+ 
++	if (*flags & MS_LAZYTIME)
++		sb->s_flags |= MS_LAZYTIME;
++
+ 	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
+ 		if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
+ 			err = -EROFS;
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 18dacf9ed8ff..708d697113fc 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -1026,6 +1026,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
+ 		goto err_fput;
+ 
+ 	fuse_conn_init(fc);
++	fc->release = fuse_free_conn;
+ 
+ 	fc->dev = sb->s_dev;
+ 	fc->sb = sb;
+@@ -1040,7 +1041,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
+ 		fc->dont_mask = 1;
+ 	sb->s_flags |= MS_POSIXACL;
+ 
+-	fc->release = fuse_free_conn;
+ 	fc->flags = d.flags;
+ 	fc->user_id = d.user_id;
+ 	fc->group_id = d.group_id;
+diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
+index 7cd00d3a7c9b..8685c655737f 100644
+--- a/fs/hpfs/super.c
++++ b/fs/hpfs/super.c
+@@ -52,17 +52,20 @@ static void unmark_dirty(struct super_block *s)
+ }
+ 
+ /* Filesystem error... */
+-static char err_buf[1024];
+-
+ void hpfs_error(struct super_block *s, const char *fmt, ...)
+ {
++	struct va_format vaf;
+ 	va_list args;
+ 
+ 	va_start(args, fmt);
+-	vsnprintf(err_buf, sizeof(err_buf), fmt, args);
++
++	vaf.fmt = fmt;
++	vaf.va = &args;
++
++	pr_err("filesystem error: %pV", &vaf);
++
+ 	va_end(args);
+ 
+-	pr_err("filesystem error: %s", err_buf);
+ 	if (!hpfs_sb(s)->sb_was_error) {
+ 		if (hpfs_sb(s)->sb_err == 2) {
+ 			pr_cont("; crashing the system because you wanted it\n");
+@@ -424,11 +427,14 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
+ 	int o;
+ 	struct hpfs_sb_info *sbi = hpfs_sb(s);
+ 	char *new_opts = kstrdup(data, GFP_KERNEL);
+-	
++
++	if (!new_opts)
++		return -ENOMEM;
++
+ 	sync_filesystem(s);
+ 
+ 	*flags |= MS_NOATIME;
+-	
++
+ 	hpfs_lock(s);
+ 	uid = sbi->sb_uid; gid = sbi->sb_gid;
+ 	umask = 0777 & ~sbi->sb_mode;
+diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
+index 988b32ed4c87..4227dc4f7437 100644
+--- a/fs/jbd2/checkpoint.c
++++ b/fs/jbd2/checkpoint.c
+@@ -390,7 +390,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+ 	unsigned long	blocknr;
+ 
+ 	if (is_journal_aborted(journal))
+-		return 1;
++		return -EIO;
+ 
+ 	if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr))
+ 		return 1;
+@@ -405,10 +405,9 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
+ 	 * jbd2_cleanup_journal_tail() doesn't get called all that often.
+ 	 */
+ 	if (journal->j_flags & JBD2_BARRIER)
+-		blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
++		blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
+ 
+-	__jbd2_update_log_tail(journal, first_tid, blocknr);
+-	return 0;
++	return __jbd2_update_log_tail(journal, first_tid, blocknr);
+ }
+ 
+ 
+diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
+index b96bd8076b70..112fad9e1e20 100644
+--- a/fs/jbd2/journal.c
++++ b/fs/jbd2/journal.c
+@@ -885,9 +885,10 @@ int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
+  *
+  * Requires j_checkpoint_mutex
+  */
+-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
++int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+ {
+ 	unsigned long freed;
++	int ret;
+ 
+ 	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+ 
+@@ -897,7 +898,10 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+ 	 * space and if we lose sb update during power failure we'd replay
+ 	 * old transaction with possibly newly overwritten data.
+ 	 */
+-	jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
++	ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
++	if (ret)
++		goto out;
++
+ 	write_lock(&journal->j_state_lock);
+ 	freed = block - journal->j_tail;
+ 	if (block < journal->j_tail)
+@@ -913,6 +917,9 @@ void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
+ 	journal->j_tail_sequence = tid;
+ 	journal->j_tail = block;
+ 	write_unlock(&journal->j_state_lock);
++
++out:
++	return ret;
+ }
+ 
+ /*
+@@ -1331,7 +1338,7 @@ static int journal_reset(journal_t *journal)
+ 	return jbd2_journal_start_thread(journal);
+ }
+ 
+-static void jbd2_write_superblock(journal_t *journal, int write_op)
++static int jbd2_write_superblock(journal_t *journal, int write_op)
+ {
+ 	struct buffer_head *bh = journal->j_sb_buffer;
+ 	journal_superblock_t *sb = journal->j_superblock;
+@@ -1370,7 +1377,10 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
+ 		printk(KERN_ERR "JBD2: Error %d detected when updating "
+ 		       "journal superblock for %s.\n", ret,
+ 		       journal->j_devname);
++		jbd2_journal_abort(journal, ret);
+ 	}
++
++	return ret;
+ }
+ 
+ /**
+@@ -1383,10 +1393,11 @@ static void jbd2_write_superblock(journal_t *journal, int write_op)
+  * Update a journal's superblock information about log tail and write it to
+  * disk, waiting for the IO to complete.
+  */
+-void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
++int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+ 				     unsigned long tail_block, int write_op)
+ {
+ 	journal_superblock_t *sb = journal->j_superblock;
++	int ret;
+ 
+ 	BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
+ 	jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
+@@ -1395,13 +1406,18 @@ void jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
+ 	sb->s_sequence = cpu_to_be32(tail_tid);
+ 	sb->s_start    = cpu_to_be32(tail_block);
+ 
+-	jbd2_write_superblock(journal, write_op);
++	ret = jbd2_write_superblock(journal, write_op);
++	if (ret)
++		goto out;
+ 
+ 	/* Log is no longer empty */
+ 	write_lock(&journal->j_state_lock);
+ 	WARN_ON(!sb->s_sequence);
+ 	journal->j_flags &= ~JBD2_FLUSHED;
+ 	write_unlock(&journal->j_state_lock);
++
++out:
++	return ret;
+ }
+ 
+ /**
+@@ -1950,7 +1966,14 @@ int jbd2_journal_flush(journal_t *journal)
+ 		return -EIO;
+ 
+ 	mutex_lock(&journal->j_checkpoint_mutex);
+-	jbd2_cleanup_journal_tail(journal);
++	if (!err) {
++		err = jbd2_cleanup_journal_tail(journal);
++		if (err < 0) {
++			mutex_unlock(&journal->j_checkpoint_mutex);
++			goto out;
++		}
++		err = 0;
++	}
+ 
+ 	/* Finally, mark the journal as really needing no recovery.
+ 	 * This sets s_start==0 in the underlying superblock, which is
+@@ -1966,7 +1989,8 @@ int jbd2_journal_flush(journal_t *journal)
+ 	J_ASSERT(journal->j_head == journal->j_tail);
+ 	J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
+ 	write_unlock(&journal->j_state_lock);
+-	return 0;
++out:
++	return err;
+ }
+ 
+ /**
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 7d05089e52d6..6f5f0f425e86 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -631,7 +631,7 @@ static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
+ 			nfs_direct_set_resched_writes(hdr->dreq);
+ 			/* fake unstable write to let common nfs resend pages */
+ 			hdr->verf.committed = NFS_UNSTABLE;
+-			hdr->good_bytes = 0;
++			hdr->good_bytes = hdr->args.count;
+ 		}
+ 		return;
+ 	}
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index 77a2d026aa12..f13e1969eedd 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -324,7 +324,8 @@ static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror,
+ 				__func__, PTR_ERR(cred));
+ 			return PTR_ERR(cred);
+ 		} else {
+-			mirror->cred = cred;
++			if (cmpxchg(&mirror->cred, NULL, cred))
++				put_rpccred(cred);
+ 		}
+ 	}
+ 	return 0;
+@@ -386,7 +387,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ 	/* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
+ 	smp_rmb();
+ 	if (ds->ds_clp)
+-		goto out;
++		goto out_update_creds;
+ 
+ 	flavor = nfs4_ff_layout_choose_authflavor(mirror);
+ 
+@@ -430,7 +431,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
+ 			}
+ 		}
+ 	}
+-
++out_update_creds:
+ 	if (ff_layout_update_mirror_cred(mirror, ds))
+ 		ds = NULL;
+ out:
+diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
+index 53852a4bd88b..9b04c2e6fffc 100644
+--- a/fs/nfs/nfs3xdr.c
++++ b/fs/nfs/nfs3xdr.c
+@@ -1342,7 +1342,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
+ 	if (args->npages != 0)
+ 		xdr_write_pages(xdr, args->pages, 0, args->len);
+ 	else
+-		xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE);
++		xdr_reserve_space(xdr, args->len);
+ 
+ 	error = nfsacl_encode(xdr->buf, base, args->inode,
+ 			    (args->mask & NFS_ACL) ?
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index 2782cfca2265..ddef1dc80cf7 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -1482,6 +1482,8 @@ restart:
+ 					spin_unlock(&state->state_lock);
+ 				}
+ 				nfs4_put_open_state(state);
++				clear_bit(NFS4CLNT_RECLAIM_NOGRACE,
++					&state->flags);
+ 				spin_lock(&sp->so_lock);
+ 				goto restart;
+ 			}
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index 230606243be6..d47c188682b1 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -1821,6 +1821,7 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
+ 	/* Resend all requests through the MDS */
+ 	nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
+ 			      hdr->completion_ops);
++	set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
+ 	return nfs_pageio_resend(&pgio, hdr);
+ }
+ EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
+@@ -1865,6 +1866,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
+ 		mirror->pg_recoalesce = 1;
+ 	}
+ 	nfs_pgio_data_destroy(hdr);
++	hdr->release(hdr);
+ }
+ 
+ static enum pnfs_try_status
+@@ -1979,6 +1981,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
+ 		mirror->pg_recoalesce = 1;
+ 	}
+ 	nfs_pgio_data_destroy(hdr);
++	hdr->release(hdr);
+ }
+ 
+ /*
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index dfc19f1575a1..daf355642845 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1289,6 +1289,7 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
+ static void nfs_redirty_request(struct nfs_page *req)
+ {
+ 	nfs_mark_request_dirty(req);
++	set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags);
+ 	nfs_unlock_request(req);
+ 	nfs_end_page_writeback(req);
+ 	nfs_release_request(req);
+diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
+index 907870e81a72..70e9af551600 100644
+--- a/fs/overlayfs/readdir.c
++++ b/fs/overlayfs/readdir.c
+@@ -23,6 +23,7 @@ struct ovl_cache_entry {
+ 	u64 ino;
+ 	struct list_head l_node;
+ 	struct rb_node node;
++	struct ovl_cache_entry *next_maybe_whiteout;
+ 	bool is_whiteout;
+ 	char name[];
+ };
+@@ -39,7 +40,7 @@ struct ovl_readdir_data {
+ 	struct rb_root root;
+ 	struct list_head *list;
+ 	struct list_head middle;
+-	struct dentry *dir;
++	struct ovl_cache_entry *first_maybe_whiteout;
+ 	int count;
+ 	int err;
+ };
+@@ -79,7 +80,7 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
+ 	return NULL;
+ }
+ 
+-static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
++static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
+ 						   const char *name, int len,
+ 						   u64 ino, unsigned int d_type)
+ {
+@@ -98,29 +99,8 @@ static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
+ 	p->is_whiteout = false;
+ 
+ 	if (d_type == DT_CHR) {
+-		struct dentry *dentry;
+-		const struct cred *old_cred;
+-		struct cred *override_cred;
+-
+-		override_cred = prepare_creds();
+-		if (!override_cred) {
+-			kfree(p);
+-			return NULL;
+-		}
+-
+-		/*
+-		 * CAP_DAC_OVERRIDE for lookup
+-		 */
+-		cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+-		old_cred = override_creds(override_cred);
+-
+-		dentry = lookup_one_len(name, dir, len);
+-		if (!IS_ERR(dentry)) {
+-			p->is_whiteout = ovl_is_whiteout(dentry);
+-			dput(dentry);
+-		}
+-		revert_creds(old_cred);
+-		put_cred(override_cred);
++		p->next_maybe_whiteout = rdd->first_maybe_whiteout;
++		rdd->first_maybe_whiteout = p;
+ 	}
+ 	return p;
+ }
+@@ -148,7 +128,7 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
+ 			return 0;
+ 	}
+ 
+-	p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
++	p = ovl_cache_entry_new(rdd, name, len, ino, d_type);
+ 	if (p == NULL)
+ 		return -ENOMEM;
+ 
+@@ -169,7 +149,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd,
+ 	if (p) {
+ 		list_move_tail(&p->l_node, &rdd->middle);
+ 	} else {
+-		p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
++		p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type);
+ 		if (p == NULL)
+ 			rdd->err = -ENOMEM;
+ 		else
+@@ -219,6 +199,43 @@ static int ovl_fill_merge(struct dir_context *ctx, const char *name,
+ 		return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
+ }
+ 
++static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
++{
++	int err;
++	struct ovl_cache_entry *p;
++	struct dentry *dentry;
++	const struct cred *old_cred;
++	struct cred *override_cred;
++
++	override_cred = prepare_creds();
++	if (!override_cred)
++		return -ENOMEM;
++
++	/*
++	 * CAP_DAC_OVERRIDE for lookup
++	 */
++	cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
++	old_cred = override_creds(override_cred);
++
++	err = mutex_lock_killable(&dir->d_inode->i_mutex);
++	if (!err) {
++		while (rdd->first_maybe_whiteout) {
++			p = rdd->first_maybe_whiteout;
++			rdd->first_maybe_whiteout = p->next_maybe_whiteout;
++			dentry = lookup_one_len(p->name, dir, p->len);
++			if (!IS_ERR(dentry)) {
++				p->is_whiteout = ovl_is_whiteout(dentry);
++				dput(dentry);
++			}
++		}
++		mutex_unlock(&dir->d_inode->i_mutex);
++	}
++	revert_creds(old_cred);
++	put_cred(override_cred);
++
++	return err;
++}
++
+ static inline int ovl_dir_read(struct path *realpath,
+ 			       struct ovl_readdir_data *rdd)
+ {
+@@ -229,7 +246,7 @@ static inline int ovl_dir_read(struct path *realpath,
+ 	if (IS_ERR(realfile))
+ 		return PTR_ERR(realfile);
+ 
+-	rdd->dir = realpath->dentry;
++	rdd->first_maybe_whiteout = NULL;
+ 	rdd->ctx.pos = 0;
+ 	do {
+ 		rdd->count = 0;
+@@ -238,6 +255,10 @@ static inline int ovl_dir_read(struct path *realpath,
+ 		if (err >= 0)
+ 			err = rdd->err;
+ 	} while (!err && rdd->count);
++
++	if (!err && rdd->first_maybe_whiteout)
++		err = ovl_check_whiteouts(realpath->dentry, rdd);
++
+ 	fput(realfile);
+ 
+ 	return err;
+diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
+index 3fbf167cfb4c..73e75a87af50 100644
+--- a/fs/xfs/xfs_attr_inactive.c
++++ b/fs/xfs/xfs_attr_inactive.c
+@@ -435,8 +435,14 @@ xfs_attr_inactive(
+ 	 */
+ 	xfs_trans_ijoin(trans, dp, 0);
+ 
+-	/* invalidate and truncate the attribute fork extents */
+-	if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
++	/*
++	 * Invalidate and truncate the attribute fork extents. Make sure the
++	 * fork actually has attributes as otherwise the invalidation has no
++	 * blocks to read and returns an error. In this case, just do the fork
++	 * removal below.
++	 */
++	if (xfs_inode_hasattr(dp) &&
++	    dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
+ 		error = xfs_attr3_root_inactive(&trans, dp);
+ 		if (error)
+ 			goto out_cancel;
+diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
+index 3df411eadb86..40c076523cfa 100644
+--- a/fs/xfs/xfs_symlink.c
++++ b/fs/xfs/xfs_symlink.c
+@@ -104,7 +104,7 @@ xfs_readlink_bmap(
+ 			cur_chunk += sizeof(struct xfs_dsymlink_hdr);
+ 		}
+ 
+-		memcpy(link + offset, bp->b_addr, byte_cnt);
++		memcpy(link + offset, cur_chunk, byte_cnt);
+ 
+ 		pathlen -= byte_cnt;
+ 		offset += byte_cnt;
+diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
+index 08ef57bc8d63..f5ed1f17f061 100644
+--- a/include/acpi/acpixf.h
++++ b/include/acpi/acpixf.h
+@@ -195,9 +195,18 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
+  * address. Although ACPICA adheres to the ACPI specification which
+  * requires the use of the corresponding 64-bit address if it is non-zero,
+  * some machines have been found to have a corrupted non-zero 64-bit
+- * address. Default is TRUE, favor the 32-bit addresses.
++ * address. Default is FALSE, do not favor the 32-bit addresses.
+  */
+-ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE);
++ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE);
++
++/*
++ * Optionally use 32-bit FACS table addresses.
++ * It is reported that some platforms fail to resume from system suspending
++ * if 64-bit FACS table address is selected:
++ * https://bugzilla.kernel.org/show_bug.cgi?id=74021
++ * Default is TRUE, favor the 32-bit addresses.
++ */
++ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_facs_addresses, TRUE);
+ 
+ /*
+  * Optionally truncate I/O addresses to 16 bits. Provides compatibility
+diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
+index 1c3002e1db20..181427ef3549 100644
+--- a/include/acpi/actypes.h
++++ b/include/acpi/actypes.h
+@@ -572,6 +572,7 @@ typedef u64 acpi_integer;
+ #define ACPI_NO_ACPI_ENABLE             0x10
+ #define ACPI_NO_DEVICE_INIT             0x20
+ #define ACPI_NO_OBJECT_INIT             0x40
++#define ACPI_NO_FACS_INIT               0x80
+ 
+ /*
+  * Initialization state
+diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
+index c157103492b0..3f13b910f8d2 100644
+--- a/include/drm/drm_atomic.h
++++ b/include/drm/drm_atomic.h
+@@ -77,26 +77,26 @@ int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
+ 
+ #define for_each_connector_in_state(state, connector, connector_state, __i) \
+ 	for ((__i) = 0;							\
+-	     (connector) = (state)->connectors[__i],			\
+-	     (connector_state) = (state)->connector_states[__i],	\
+-	     (__i) < (state)->num_connector;				\
++	     (__i) < (state)->num_connector &&				\
++	     ((connector) = (state)->connectors[__i],			\
++	     (connector_state) = (state)->connector_states[__i], 1); 	\
+ 	     (__i)++)							\
+ 		if (connector)
+ 
+ #define for_each_crtc_in_state(state, crtc, crtc_state, __i)	\
+ 	for ((__i) = 0;						\
+-	     (crtc) = (state)->crtcs[__i],			\
+-	     (crtc_state) = (state)->crtc_states[__i],		\
+-	     (__i) < (state)->dev->mode_config.num_crtc;	\
++	     (__i) < (state)->dev->mode_config.num_crtc &&	\
++	     ((crtc) = (state)->crtcs[__i],			\
++	     (crtc_state) = (state)->crtc_states[__i], 1);	\
+ 	     (__i)++)						\
+ 		if (crtc_state)
+ 
+-#define for_each_plane_in_state(state, plane, plane_state, __i)	\
+-	for ((__i) = 0;						\
+-	     (plane) = (state)->planes[__i],			\
+-	     (plane_state) = (state)->plane_states[__i],	\
+-	     (__i) < (state)->dev->mode_config.num_total_plane;	\
+-	     (__i)++)						\
++#define for_each_plane_in_state(state, plane, plane_state, __i)		\
++	for ((__i) = 0;							\
++	     (__i) < (state)->dev->mode_config.num_total_plane &&	\
++	     ((plane) = (state)->planes[__i],				\
++	     (plane_state) = (state)->plane_states[__i], 1);		\
++	     (__i)++)							\
+ 		if (plane_state)
+ 
+ #endif /* DRM_ATOMIC_H_ */
+diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
+index ca71c03143d1..54233583c6cb 100644
+--- a/include/drm/drm_crtc.h
++++ b/include/drm/drm_crtc.h
+@@ -731,6 +731,8 @@ struct drm_connector {
+ 	uint8_t num_h_tile, num_v_tile;
+ 	uint8_t tile_h_loc, tile_v_loc;
+ 	uint16_t tile_h_size, tile_v_size;
++
++	struct list_head destroy_list;
+ };
+ 
+ /**
+diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
+index a2507817be41..86d0b25ed054 100644
+--- a/include/drm/drm_dp_mst_helper.h
++++ b/include/drm/drm_dp_mst_helper.h
+@@ -463,6 +463,10 @@ struct drm_dp_mst_topology_mgr {
+ 	struct work_struct work;
+ 
+ 	struct work_struct tx_work;
++
++	struct list_head destroy_connector_list;
++	struct mutex destroy_connector_lock;
++	struct work_struct destroy_connector_work;
+ };
+ 
+ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, int conn_base_id);
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 5da2d2e9d38e..4550be3bb63b 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -332,9 +332,6 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
+ 
+ int acpi_resources_are_enforced(void);
+ 
+-int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
+-			unsigned long flags, char *desc);
+-
+ #ifdef CONFIG_HIBERNATION
+ void __init acpi_no_s4_hw_signature(void);
+ #endif
+@@ -530,13 +527,6 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
+ 	return 0;
+ }
+ 
+-static inline int acpi_reserve_region(u64 start, unsigned int length,
+-				      u8 space_id, unsigned long flags,
+-				      char *desc)
+-{
+-	return -ENXIO;
+-}
+-
+ struct acpi_table_header;
+ static inline int acpi_table_parse(char *id,
+ 				int (*handler)(struct acpi_table_header *))
+diff --git a/include/linux/ata.h b/include/linux/ata.h
+index b666b773e111..533dbb6428f5 100644
+--- a/include/linux/ata.h
++++ b/include/linux/ata.h
+@@ -45,6 +45,7 @@ enum {
+ 	ATA_SECT_SIZE		= 512,
+ 	ATA_MAX_SECTORS_128	= 128,
+ 	ATA_MAX_SECTORS		= 256,
++	ATA_MAX_SECTORS_1024    = 1024,
+ 	ATA_MAX_SECTORS_LBA48	= 65535,/* TODO: 65536? */
+ 	ATA_MAX_SECTORS_TAPE	= 65535,
+ 
+diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
+index 73b45225a7ca..e6797ded700e 100644
+--- a/include/linux/buffer_head.h
++++ b/include/linux/buffer_head.h
+@@ -317,6 +317,13 @@ sb_getblk(struct super_block *sb, sector_t block)
+ 	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
+ }
+ 
++
++static inline struct buffer_head *
++sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
++{
++	return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
++}
++
+ static inline struct buffer_head *
+ sb_find_get_block(struct super_block *sb, sector_t block)
+ {
+diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
+index 0c9a2f2c2802..d4c71132d07f 100644
+--- a/include/linux/compiler-intel.h
++++ b/include/linux/compiler-intel.h
+@@ -13,10 +13,12 @@
+ /* Intel ECC compiler doesn't support gcc specific asm stmts.
+  * It uses intrinsics to do the equivalent things.
+  */
++#undef barrier
+ #undef barrier_data
+ #undef RELOC_HIDE
+ #undef OPTIMIZER_HIDE_VAR
+ 
++#define barrier() __memory_barrier()
+ #define barrier_data(ptr) barrier()
+ 
+ #define RELOC_HIDE(ptr, off)					\
+diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
+index 3a7c9ffd5ab9..da042657dc31 100644
+--- a/include/linux/gpio/consumer.h
++++ b/include/linux/gpio/consumer.h
+@@ -406,6 +406,21 @@ static inline int desc_to_gpio(const struct gpio_desc *desc)
+ 	return -EINVAL;
+ }
+ 
++/* Child properties interface */
++struct fwnode_handle;
++
++static inline struct gpio_desc *fwnode_get_named_gpiod(
++	struct fwnode_handle *fwnode, const char *propname)
++{
++	return ERR_PTR(-ENOSYS);
++}
++
++static inline struct gpio_desc *devm_get_gpiod_from_child(
++	struct device *dev, const char *con_id, struct fwnode_handle *child)
++{
++	return ERR_PTR(-ENOSYS);
++}
++
+ #endif /* CONFIG_GPIOLIB */
+ 
+ /*
+diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
+index 0042bf330b99..c02b5ce6c5cd 100644
+--- a/include/linux/hid-sensor-hub.h
++++ b/include/linux/hid-sensor-hub.h
+@@ -230,6 +230,7 @@ struct hid_sensor_common {
+ 	struct platform_device *pdev;
+ 	unsigned usage_id;
+ 	atomic_t data_ready;
++	atomic_t user_requested_state;
+ 	struct iio_trigger *trigger;
+ 	struct hid_sensor_hub_attribute_info poll;
+ 	struct hid_sensor_hub_attribute_info report_state;
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 20e7f78041c8..edb640ae9a94 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1035,7 +1035,7 @@ struct buffer_head *jbd2_journal_get_descriptor_buffer(journal_t *journal);
+ int jbd2_journal_next_log_block(journal_t *, unsigned long long *);
+ int jbd2_journal_get_log_tail(journal_t *journal, tid_t *tid,
+ 			      unsigned long *block);
+-void __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
++int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
+ 
+ /* Commit management */
+@@ -1157,7 +1157,7 @@ extern int	   jbd2_journal_recover    (journal_t *journal);
+ extern int	   jbd2_journal_wipe       (journal_t *, int);
+ extern int	   jbd2_journal_skip_recovery	(journal_t *);
+ extern void	   jbd2_journal_update_sb_errno(journal_t *);
+-extern void	   jbd2_journal_update_sb_log_tail	(journal_t *, tid_t,
++extern int	   jbd2_journal_update_sb_log_tail	(journal_t *, tid_t,
+ 				unsigned long, int);
+ extern void	   __jbd2_journal_abort_hard	(journal_t *);
+ extern void	   jbd2_journal_abort      (journal_t *, int);
+diff --git a/include/linux/libata.h b/include/linux/libata.h
+index 28aeae46f355..e0e33787c485 100644
+--- a/include/linux/libata.h
++++ b/include/linux/libata.h
+@@ -431,6 +431,9 @@ enum {
+ 	ATA_HORKAGE_NOLPM	= (1 << 20),	/* don't use LPM */
+ 	ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21),	/* some WDs have broken LPM */
+ 	ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
++	ATA_HORKAGE_NO_NCQ_LOG	= (1 << 23),	/* don't use NCQ for log read */
++	ATA_HORKAGE_NOTRIM	= (1 << 24),	/* don't use TRIM */
++	ATA_HORKAGE_MAX_SEC_1024 = (1 << 25),	/* Limit max sects to 1024 */
+ 
+ 	 /* DMA mask for user DMA control: User visible values; DO NOT
+ 	    renumber */
+diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
+index 93ab6071bbe9..e9e9a8dcfb47 100644
+--- a/include/linux/nfs_xdr.h
++++ b/include/linux/nfs_xdr.h
+@@ -1142,7 +1142,7 @@ struct nfs41_state_protection {
+ 	struct nfs4_op_map allow;
+ };
+ 
+-#define NFS4_EXCHANGE_ID_LEN	(48)
++#define NFS4_EXCHANGE_ID_LEN	(127)
+ struct nfs41_exchange_id_args {
+ 	struct nfs_client		*client;
+ 	nfs4_verifier			*verifier;
+diff --git a/include/linux/of.h b/include/linux/of.h
+index b871ff9d81d7..8135d507d089 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -673,7 +673,10 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
+ #if defined(CONFIG_OF) && defined(CONFIG_NUMA)
+ extern int of_node_to_nid(struct device_node *np);
+ #else
+-static inline int of_node_to_nid(struct device_node *device) { return 0; }
++static inline int of_node_to_nid(struct device_node *device)
++{
++	return NUMA_NO_NODE;
++}
+ #endif
+ 
+ static inline struct device_node *of_find_matching_node(
+diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
+index 551b6737f5df..a7e41fb6ed54 100644
+--- a/include/uapi/drm/i915_drm.h
++++ b/include/uapi/drm/i915_drm.h
+@@ -1065,6 +1065,14 @@ struct drm_i915_reg_read {
+ 	__u64 offset;
+ 	__u64 val; /* Return value */
+ };
++/* Known registers:
++ *
++ * Render engine timestamp - 0x2358 + 64bit - gen7+
++ * - Note this register returns an invalid value if using the default
++ *   single instruction 8byte read, in order to workaround that use
++ *   offset (0x2538 | 1) instead.
++ *
++ */
+ 
+ struct drm_i915_reset_stats {
+ 	__u32 ctx_id;
+diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
+index 7e01f78f0417..9e302315e33d 100644
+--- a/kernel/power/Kconfig
++++ b/kernel/power/Kconfig
+@@ -187,7 +187,7 @@ config DPM_WATCHDOG
+ config DPM_WATCHDOG_TIMEOUT
+ 	int "Watchdog timeout in seconds"
+ 	range 1 120
+-	default 12
++	default 60
+ 	depends on DPM_WATCHDOG
+ 
+ config PM_TRACE
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index c099b082cd02..bff0169e1ad8 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -484,11 +484,11 @@ int check_syslog_permissions(int type, bool from_file)
+ 	 * already done the capabilities checks at open time.
+ 	 */
+ 	if (from_file && type != SYSLOG_ACTION_OPEN)
+-		return 0;
++		goto ok;
+ 
+ 	if (syslog_action_restricted(type)) {
+ 		if (capable(CAP_SYSLOG))
+-			return 0;
++			goto ok;
+ 		/*
+ 		 * For historical reasons, accept CAP_SYS_ADMIN too, with
+ 		 * a warning.
+@@ -498,10 +498,11 @@ int check_syslog_permissions(int type, bool from_file)
+ 				     "CAP_SYS_ADMIN but no CAP_SYSLOG "
+ 				     "(deprecated).\n",
+ 				 current->comm, task_pid_nr(current));
+-			return 0;
++			goto ok;
+ 		}
+ 		return -EPERM;
+ 	}
++ok:
+ 	return security_syslog(type);
+ }
+ 
+@@ -1263,10 +1264,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
+ 	if (error)
+ 		goto out;
+ 
+-	error = security_syslog(type);
+-	if (error)
+-		return error;
+-
+ 	switch (type) {
+ 	case SYSLOG_ACTION_CLOSE:	/* Close log */
+ 		break;
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index d2612016de94..921691c5cb04 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -444,6 +444,7 @@ enum {
+ 
+ 	TRACE_CONTROL_BIT,
+ 
++	TRACE_BRANCH_BIT,
+ /*
+  * Abuse of the trace_recursion.
+  * As we need a way to maintain state if we are tracing the function
+@@ -1312,7 +1313,7 @@ void trace_event_init(void);
+ void trace_event_enum_update(struct trace_enum_map **map, int len);
+ #else
+ static inline void __init trace_event_init(void) { }
+-static inlin void trace_event_enum_update(struct trace_enum_map **map, int len) { }
++static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { }
+ #endif
+ 
+ extern struct trace_iterator *tracepoint_print_iter;
+diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
+index 57cbf1efdd44..1879980f06c2 100644
+--- a/kernel/trace/trace_branch.c
++++ b/kernel/trace/trace_branch.c
+@@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+ 	struct trace_branch *entry;
+ 	struct ring_buffer *buffer;
+ 	unsigned long flags;
+-	int cpu, pc;
++	int pc;
+ 	const char *p;
+ 
++	if (current->trace_recursion & TRACE_BRANCH_BIT)
++		return;
++
+ 	/*
+ 	 * I would love to save just the ftrace_likely_data pointer, but
+ 	 * this code can also be used by modules. Ugly things can happen
+@@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+ 	if (unlikely(!tr))
+ 		return;
+ 
+-	local_irq_save(flags);
+-	cpu = raw_smp_processor_id();
+-	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
+-	if (atomic_inc_return(&data->disabled) != 1)
++	raw_local_irq_save(flags);
++	current->trace_recursion |= TRACE_BRANCH_BIT;
++	data = this_cpu_ptr(tr->trace_buffer.data);
++	if (atomic_read(&data->disabled))
+ 		goto out;
+ 
+ 	pc = preempt_count();
+@@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
+ 		__buffer_unlock_commit(buffer, event);
+ 
+  out:
+-	atomic_dec(&data->disabled);
+-	local_irq_restore(flags);
++	current->trace_recursion &= ~TRACE_BRANCH_BIT;
++	raw_local_irq_restore(flags);
+ }
+ 
+ static inline
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 7f2e97ce71a7..52adf02d7619 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -1056,6 +1056,9 @@ static void parse_init(struct filter_parse_state *ps,
+ 
+ static char infix_next(struct filter_parse_state *ps)
+ {
++	if (!ps->infix.cnt)
++		return 0;
++
+ 	ps->infix.cnt--;
+ 
+ 	return ps->infix.string[ps->infix.tail++];
+@@ -1071,6 +1074,9 @@ static char infix_peek(struct filter_parse_state *ps)
+ 
+ static void infix_advance(struct filter_parse_state *ps)
+ {
++	if (!ps->infix.cnt)
++		return;
++
+ 	ps->infix.cnt--;
+ 	ps->infix.tail++;
+ }
+@@ -1385,7 +1391,9 @@ static int check_preds(struct filter_parse_state *ps)
+ 		if (elt->op != OP_NOT)
+ 			cnt--;
+ 		n_normal_preds++;
+-		WARN_ON_ONCE(cnt < 0);
++		/* all ops should have operands */
++		if (cnt < 0)
++			break;
+ 	}
+ 
+ 	if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
+diff --git a/lib/bitmap.c b/lib/bitmap.c
+index 64c0926f5dd8..40162f87ea2d 100644
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -506,12 +506,12 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ 	unsigned a, b;
+ 	int c, old_c, totaldigits;
+ 	const char __user __force *ubuf = (const char __user __force *)buf;
+-	int exp_digit, in_range;
++	int at_start, in_range;
+ 
+ 	totaldigits = c = 0;
+ 	bitmap_zero(maskp, nmaskbits);
+ 	do {
+-		exp_digit = 1;
++		at_start = 1;
+ 		in_range = 0;
+ 		a = b = 0;
+ 
+@@ -540,11 +540,10 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ 				break;
+ 
+ 			if (c == '-') {
+-				if (exp_digit || in_range)
++				if (at_start || in_range)
+ 					return -EINVAL;
+ 				b = 0;
+ 				in_range = 1;
+-				exp_digit = 1;
+ 				continue;
+ 			}
+ 
+@@ -554,16 +553,18 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
+ 			b = b * 10 + (c - '0');
+ 			if (!in_range)
+ 				a = b;
+-			exp_digit = 0;
++			at_start = 0;
+ 			totaldigits++;
+ 		}
+ 		if (!(a <= b))
+ 			return -EINVAL;
+ 		if (b >= nmaskbits)
+ 			return -ERANGE;
+-		while (a <= b) {
+-			set_bit(a, maskp);
+-			a++;
++		if (!at_start) {
++			while (a <= b) {
++				set_bit(a, maskp);
++				a++;
++			}
+ 		}
+ 	} while (buflen && c == ',');
+ 	return 0;
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 271e4432734c..8c4c1f9f9a9a 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -40,6 +40,11 @@ int hugepages_treat_as_movable;
+ int hugetlb_max_hstate __read_mostly;
+ unsigned int default_hstate_idx;
+ struct hstate hstates[HUGE_MAX_HSTATE];
++/*
++ * Minimum page order among possible hugepage sizes, set to a proper value
++ * at boot time.
++ */
++static unsigned int minimum_order __read_mostly = UINT_MAX;
+ 
+ __initdata LIST_HEAD(huge_boot_pages);
+ 
+@@ -1188,19 +1193,13 @@ static void dissolve_free_huge_page(struct page *page)
+  */
+ void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+ {
+-	unsigned int order = 8 * sizeof(void *);
+ 	unsigned long pfn;
+-	struct hstate *h;
+ 
+ 	if (!hugepages_supported())
+ 		return;
+ 
+-	/* Set scan step to minimum hugepage size */
+-	for_each_hstate(h)
+-		if (order > huge_page_order(h))
+-			order = huge_page_order(h);
+-	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
+-	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
++	VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
++	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
+ 		dissolve_free_huge_page(pfn_to_page(pfn));
+ }
+ 
+@@ -1627,10 +1626,14 @@ static void __init hugetlb_init_hstates(void)
+ 	struct hstate *h;
+ 
+ 	for_each_hstate(h) {
++		if (minimum_order > huge_page_order(h))
++			minimum_order = huge_page_order(h);
++
+ 		/* oversize hugepages were init'ed in early boot */
+ 		if (!hstate_is_gigantic(h))
+ 			hugetlb_hstate_alloc_pages(h);
+ 	}
++	VM_BUG_ON(minimum_order == UINT_MAX);
+ }
+ 
+ static char * __init memfmt(char *buf, unsigned long n)
+diff --git a/mm/memory.c b/mm/memory.c
+index 22e037e3364e..2a9e09870c20 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2669,6 +2669,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
+ 
+ 	pte_unmap(page_table);
+ 
++	/* File mapping without ->vm_ops ? */
++	if (vma->vm_flags & VM_SHARED)
++		return VM_FAULT_SIGBUS;
++
+ 	/* Check if we need to add a guard page to the stack */
+ 	if (check_stack_guard_page(vma, address) < 0)
+ 		return VM_FAULT_SIGSEGV;
+@@ -3097,6 +3101,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ 			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ 
+ 	pte_unmap(page_table);
++	/* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */
++	if (!vma->vm_ops->fault)
++		return VM_FAULT_SIGBUS;
+ 	if (!(flags & FAULT_FLAG_WRITE))
+ 		return do_read_fault(mm, vma, address, pmd, pgoff, flags,
+ 				orig_pte);
+@@ -3242,13 +3249,12 @@ static int handle_pte_fault(struct mm_struct *mm,
+ 	barrier();
+ 	if (!pte_present(entry)) {
+ 		if (pte_none(entry)) {
+-			if (vma->vm_ops) {
+-				if (likely(vma->vm_ops->fault))
+-					return do_fault(mm, vma, address, pte,
+-							pmd, flags, entry);
+-			}
+-			return do_anonymous_page(mm, vma, address,
+-						 pte, pmd, flags);
++			if (vma->vm_ops)
++				return do_fault(mm, vma, address, pte, pmd,
++						flags, entry);
++
++			return do_anonymous_page(mm, vma, address, pte, pmd,
++					flags);
+ 		}
+ 		return do_swap_page(mm, vma, address,
+ 					pte, pmd, flags, entry);
+diff --git a/net/9p/client.c b/net/9p/client.c
+index 6f4c4c88db84..81925b923318 100644
+--- a/net/9p/client.c
++++ b/net/9p/client.c
+@@ -843,7 +843,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type,
+ 	if (err < 0) {
+ 		if (err == -EIO)
+ 			c->status = Disconnected;
+-		goto reterr;
++		if (err != -ERESTARTSYS)
++			goto reterr;
+ 	}
+ 	if (req->status == REQ_STATUS_ERROR) {
+ 		p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err);
+@@ -1647,6 +1648,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err)
+ 		if (*err) {
+ 			trace_9p_protocol_dump(clnt, req->rc);
+ 			p9_free_req(clnt, req);
++			break;
+ 		}
+ 
+ 		p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count);
+diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
+index 56f9edbf3d05..e11a5cfda4b1 100644
+--- a/net/bluetooth/hci_sock.c
++++ b/net/bluetooth/hci_sock.c
+@@ -741,10 +741,11 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
+ 			goto done;
+ 		}
+ 
+-		if (test_bit(HCI_UP, &hdev->flags) ||
+-		    test_bit(HCI_INIT, &hdev->flags) ||
++		if (test_bit(HCI_INIT, &hdev->flags) ||
+ 		    hci_dev_test_flag(hdev, HCI_SETUP) ||
+-		    hci_dev_test_flag(hdev, HCI_CONFIG)) {
++		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
++		    (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
++		     test_bit(HCI_UP, &hdev->flags))) {
+ 			err = -EBUSY;
+ 			hci_dev_put(hdev);
+ 			goto done;
+@@ -760,10 +761,21 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
+ 
+ 		err = hci_dev_open(hdev->id);
+ 		if (err) {
+-			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
+-			mgmt_index_added(hdev);
+-			hci_dev_put(hdev);
+-			goto done;
++			if (err == -EALREADY) {
++				/* In case the transport is already up and
++				 * running, clear the error here.
++				 *
++				 * This can happen when opening an user
++				 * channel and HCI_AUTO_OFF grace period
++				 * is still active.
++				 */
++				err = 0;
++			} else {
++				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
++				mgmt_index_added(hdev);
++				hci_dev_put(hdev);
++				goto done;
++			}
+ 		}
+ 
+ 		atomic_inc(&hdev->promisc);
+diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
+index 15796696d64e..4a3125836b64 100644
+--- a/net/ceph/osdmap.c
++++ b/net/ceph/osdmap.c
+@@ -89,7 +89,7 @@ static int crush_decode_tree_bucket(void **p, void *end,
+ {
+ 	int j;
+ 	dout("crush_decode_tree_bucket %p to %p\n", *p, end);
+-	ceph_decode_32_safe(p, end, b->num_nodes, bad);
++	ceph_decode_8_safe(p, end, b->num_nodes, bad);
+ 	b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
+ 	if (b->node_weights == NULL)
+ 		return -ENOMEM;
+diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
+index b60c65f70346..627a2537634e 100644
+--- a/net/ieee802154/socket.c
++++ b/net/ieee802154/socket.c
+@@ -739,6 +739,12 @@ static int dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ 	sock_recv_ts_and_drops(msg, sk, skb);
+ 
+ 	if (saddr) {
++		/* Clear the implicit padding in struct sockaddr_ieee802154
++		 * (16 bits between 'family' and 'addr') and in struct
++		 * ieee802154_addr_sa (16 bits at the end of the structure).
++		 */
++		memset(saddr, 0, sizeof(*saddr));
++
+ 		saddr->family = AF_IEEE802154;
+ 		ieee802154_addr_to_sa(&saddr->addr, &mac_cb(skb)->source);
+ 		*addr_len = sizeof(*saddr);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index ff347a0eebd4..f06d42267306 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -3356,6 +3356,7 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
+ 	/* Update CSA counters */
+ 	if (sdata->vif.csa_active &&
+ 	    (sdata->vif.type == NL80211_IFTYPE_AP ||
++	     sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
+ 	     sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
+ 	    params->n_csa_offsets) {
+ 		int i;
+diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
+index bfef1b215050..a9c9d961f039 100644
+--- a/net/mac80211/ibss.c
++++ b/net/mac80211/ibss.c
+@@ -146,6 +146,7 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
+ 				csa_settings->chandef.chan->center_freq);
+ 		presp->csa_counter_offsets[0] = (pos - presp->head);
+ 		*pos++ = csa_settings->count;
++		presp->csa_current_counter = csa_settings->count;
+ 	}
+ 
+ 	/* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index df3051d96aff..e86daed83c6f 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -249,6 +249,7 @@ static void ieee80211_restart_work(struct work_struct *work)
+ {
+ 	struct ieee80211_local *local =
+ 		container_of(work, struct ieee80211_local, restart_work);
++	struct ieee80211_sub_if_data *sdata;
+ 
+ 	/* wait for scan work complete */
+ 	flush_workqueue(local->workqueue);
+@@ -257,6 +258,8 @@ static void ieee80211_restart_work(struct work_struct *work)
+ 	     "%s called with hardware scan in progress\n", __func__);
+ 
+ 	rtnl_lock();
++	list_for_each_entry(sdata, &local->interfaces, list)
++		flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+ 	ieee80211_scan_cancel(local);
+ 	ieee80211_reconfig(local);
+ 	rtnl_unlock();
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index d4684242e78b..817098add1d6 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -680,6 +680,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
+ 		*pos++ = 0x0;
+ 		*pos++ = ieee80211_frequency_to_channel(
+ 				csa->settings.chandef.chan->center_freq);
++		bcn->csa_current_counter = csa->settings.count;
+ 		bcn->csa_counter_offsets[0] = hdr_len + 6;
+ 		*pos++ = csa->settings.count;
+ 		*pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
+diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
+index 9dd0ea8db463..28504dfd3dad 100644
+--- a/net/sunrpc/backchannel_rqst.c
++++ b/net/sunrpc/backchannel_rqst.c
+@@ -60,7 +60,7 @@ static void xprt_free_allocation(struct rpc_rqst *req)
+ 
+ 	dprintk("RPC:        free allocations for req= %p\n", req);
+ 	WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
+-	xbufp = &req->rq_private_buf;
++	xbufp = &req->rq_rcv_buf;
+ 	free_page((unsigned long)xbufp->head[0].iov_base);
+ 	xbufp = &req->rq_snd_buf;
+ 	free_page((unsigned long)xbufp->head[0].iov_base);
+diff --git a/net/wireless/util.c b/net/wireless/util.c
+index 70051ab52f4f..7e4e3fffe7ce 100644
+--- a/net/wireless/util.c
++++ b/net/wireless/util.c
+@@ -944,7 +944,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ 	     ntype == NL80211_IFTYPE_P2P_CLIENT))
+ 		return -EBUSY;
+ 
+-	if (ntype != otype && netif_running(dev)) {
++	if (ntype != otype) {
+ 		dev->ieee80211_ptr->use_4addr = false;
+ 		dev->ieee80211_ptr->mesh_id_up_len = 0;
+ 		wdev_lock(dev->ieee80211_ptr);
+diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
+index 8965d1bb8811..125d6402f64f 100644
+--- a/samples/trace_events/trace-events-sample.h
++++ b/samples/trace_events/trace-events-sample.h
+@@ -168,7 +168,10 @@
+  *
+  *      For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
+  *            Use __get_dynamic_array_len(foo) to get the length of the array
+- *            saved.
++ *            saved. Note, __get_dynamic_array_len() returns the total allocated
++ *            length of the dynamic array; __print_array() expects the second
++ *            parameter to be the number of elements. To get that, the array length
++ *            needs to be divided by the element size.
+  *
+  *      For __string(foo, bar) use __get_str(foo)
+  *
+@@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar,
+  *    This prints out the array that is defined by __array in a nice format.
+  */
+ 		  __print_array(__get_dynamic_array(list),
+-				__get_dynamic_array_len(list),
++				__get_dynamic_array_len(list) / sizeof(int),
+ 				sizeof(int)),
+ 		  __get_str(str), __get_bitmask(cpus))
+ );
+diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
+index 10f994307a04..582091498819 100644
+--- a/security/integrity/evm/evm_main.c
++++ b/security/integrity/evm/evm_main.c
+@@ -296,6 +296,17 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ 		iint = integrity_iint_find(d_backing_inode(dentry));
+ 		if (iint && (iint->flags & IMA_NEW_FILE))
+ 			return 0;
++
++		/* exception for pseudo filesystems */
++		if (dentry->d_inode->i_sb->s_magic == TMPFS_MAGIC
++		    || dentry->d_inode->i_sb->s_magic == SYSFS_MAGIC)
++			return 0;
++
++		integrity_audit_msg(AUDIT_INTEGRITY_METADATA,
++				    dentry->d_inode, dentry->d_name.name,
++				    "update_metadata",
++				    integrity_status_msg[evm_status],
++				    -EPERM, 0);
+ 	}
+ out:
+ 	if (evm_status != INTEGRITY_PASS)
+diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
+index 8ee997dff139..fc56d4dfa954 100644
+--- a/security/integrity/ima/ima.h
++++ b/security/integrity/ima/ima.h
+@@ -106,7 +106,7 @@ void ima_add_violation(struct file *file, const unsigned char *filename,
+ 		       const char *op, const char *cause);
+ int ima_init_crypto(void);
+ void ima_putc(struct seq_file *m, void *data, int datalen);
+-void ima_print_digest(struct seq_file *m, u8 *digest, int size);
++void ima_print_digest(struct seq_file *m, u8 *digest, u32 size);
+ struct ima_template_desc *ima_template_desc_current(void);
+ int ima_init_template(void);
+ 
+diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
+index 461215e5fd31..816d175da79a 100644
+--- a/security/integrity/ima/ima_fs.c
++++ b/security/integrity/ima/ima_fs.c
+@@ -190,9 +190,9 @@ static const struct file_operations ima_measurements_ops = {
+ 	.release = seq_release,
+ };
+ 
+-void ima_print_digest(struct seq_file *m, u8 *digest, int size)
++void ima_print_digest(struct seq_file *m, u8 *digest, u32 size)
+ {
+-	int i;
++	u32 i;
+ 
+ 	for (i = 0; i < size; i++)
+ 		seq_printf(m, "%02x", *(digest + i));
+diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
+index d1eefb9d65fb..3997e206f82d 100644
+--- a/security/integrity/ima/ima_policy.c
++++ b/security/integrity/ima/ima_policy.c
+@@ -27,6 +27,8 @@
+ #define IMA_UID		0x0008
+ #define IMA_FOWNER	0x0010
+ #define IMA_FSUUID	0x0020
++#define IMA_INMASK	0x0040
++#define IMA_EUID	0x0080
+ 
+ #define UNKNOWN		0
+ #define MEASURE		0x0001	/* same as IMA_MEASURE */
+@@ -42,6 +44,8 @@ enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
+ 	LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
+ };
+ 
++enum policy_types { ORIGINAL_TCB = 1, DEFAULT_TCB };
++
+ struct ima_rule_entry {
+ 	struct list_head list;
+ 	int action;
+@@ -70,7 +74,7 @@ struct ima_rule_entry {
+  * normal users can easily run the machine out of memory simply building
+  * and running executables.
+  */
+-static struct ima_rule_entry default_rules[] = {
++static struct ima_rule_entry dont_measure_rules[] = {
+ 	{.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ 	{.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ 	{.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+@@ -79,12 +83,31 @@ static struct ima_rule_entry default_rules[] = {
+ 	{.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ 	{.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ 	{.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
++	{.action = DONT_MEASURE, .fsmagic = CGROUP_SUPER_MAGIC,
++	 .flags = IMA_FSMAGIC},
++	{.action = DONT_MEASURE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC}
++};
++
++static struct ima_rule_entry original_measurement_rules[] = {
+ 	{.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
+ 	 .flags = IMA_FUNC | IMA_MASK},
+ 	{.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
+ 	 .flags = IMA_FUNC | IMA_MASK},
+-	{.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ, .uid = GLOBAL_ROOT_UID,
+-	 .flags = IMA_FUNC | IMA_MASK | IMA_UID},
++	{.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
++	 .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_MASK | IMA_UID},
++	{.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
++	{.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
++};
++
++static struct ima_rule_entry default_measurement_rules[] = {
++	{.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
++	 .flags = IMA_FUNC | IMA_MASK},
++	{.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
++	 .flags = IMA_FUNC | IMA_MASK},
++	{.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
++	 .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_INMASK | IMA_EUID},
++	{.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ,
++	 .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_INMASK | IMA_UID},
+ 	{.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
+ 	{.action = MEASURE, .func = FIRMWARE_CHECK, .flags = IMA_FUNC},
+ };
+@@ -99,6 +122,7 @@ static struct ima_rule_entry default_appraise_rules[] = {
+ 	{.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ 	{.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ 	{.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
++	{.action = DONT_APPRAISE, .fsmagic = NSFS_MAGIC, .flags = IMA_FSMAGIC},
+ 	{.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ #ifndef CONFIG_IMA_APPRAISE_SIGNED_INIT
+ 	{.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .flags = IMA_FOWNER},
+@@ -115,14 +139,29 @@ static struct list_head *ima_rules;
+ 
+ static DEFINE_MUTEX(ima_rules_mutex);
+ 
+-static bool ima_use_tcb __initdata;
++static int ima_policy __initdata;
+ static int __init default_measure_policy_setup(char *str)
+ {
+-	ima_use_tcb = 1;
++	if (ima_policy)
++		return 1;
++
++	ima_policy = ORIGINAL_TCB;
+ 	return 1;
+ }
+ __setup("ima_tcb", default_measure_policy_setup);
+ 
++static int __init policy_setup(char *str)
++{
++	if (ima_policy)
++		return 1;
++
++	if (strcmp(str, "tcb") == 0)
++		ima_policy = DEFAULT_TCB;
++
++	return 1;
++}
++__setup("ima_policy=", policy_setup);
++
+ static bool ima_use_appraise_tcb __initdata;
+ static int __init default_appraise_policy_setup(char *str)
+ {
+@@ -182,6 +221,9 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ 	if ((rule->flags & IMA_MASK) &&
+ 	    (rule->mask != mask && func != POST_SETATTR))
+ 		return false;
++	if ((rule->flags & IMA_INMASK) &&
++	    (!(rule->mask & mask) && func != POST_SETATTR))
++		return false;
+ 	if ((rule->flags & IMA_FSMAGIC)
+ 	    && rule->fsmagic != inode->i_sb->s_magic)
+ 		return false;
+@@ -190,6 +232,16 @@ static bool ima_match_rules(struct ima_rule_entry *rule,
+ 		return false;
+ 	if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
+ 		return false;
++	if (rule->flags & IMA_EUID) {
++		if (has_capability_noaudit(current, CAP_SETUID)) {
++			if (!uid_eq(rule->uid, cred->euid)
++			    && !uid_eq(rule->uid, cred->suid)
++			    && !uid_eq(rule->uid, cred->uid))
++				return false;
++		} else if (!uid_eq(rule->uid, cred->euid))
++			return false;
++	}
++
+ 	if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid))
+ 		return false;
+ 	for (i = 0; i < MAX_LSM_RULES; i++) {
+@@ -333,21 +385,31 @@ void __init ima_init_policy(void)
+ {
+ 	int i, measure_entries, appraise_entries;
+ 
+-	/* if !ima_use_tcb set entries = 0 so we load NO default rules */
+-	measure_entries = ima_use_tcb ? ARRAY_SIZE(default_rules) : 0;
++	/* if !ima_policy set entries = 0 so we load NO default rules */
++	measure_entries = ima_policy ? ARRAY_SIZE(dont_measure_rules) : 0;
+ 	appraise_entries = ima_use_appraise_tcb ?
+ 			 ARRAY_SIZE(default_appraise_rules) : 0;
+ 
+-	for (i = 0; i < measure_entries + appraise_entries; i++) {
+-		if (i < measure_entries)
+-			list_add_tail(&default_rules[i].list,
+-				      &ima_default_rules);
+-		else {
+-			int j = i - measure_entries;
++	for (i = 0; i < measure_entries; i++)
++		list_add_tail(&dont_measure_rules[i].list, &ima_default_rules);
+ 
+-			list_add_tail(&default_appraise_rules[j].list,
++	switch (ima_policy) {
++	case ORIGINAL_TCB:
++		for (i = 0; i < ARRAY_SIZE(original_measurement_rules); i++)
++			list_add_tail(&original_measurement_rules[i].list,
+ 				      &ima_default_rules);
+-		}
++		break;
++	case DEFAULT_TCB:
++		for (i = 0; i < ARRAY_SIZE(default_measurement_rules); i++)
++			list_add_tail(&default_measurement_rules[i].list,
++				      &ima_default_rules);
++	default:
++		break;
++	}
++
++	for (i = 0; i < appraise_entries; i++) {
++		list_add_tail(&default_appraise_rules[i].list,
++			      &ima_default_rules);
+ 	}
+ 
+ 	ima_rules = &ima_default_rules;
+@@ -373,7 +435,8 @@ enum {
+ 	Opt_audit,
+ 	Opt_obj_user, Opt_obj_role, Opt_obj_type,
+ 	Opt_subj_user, Opt_subj_role, Opt_subj_type,
+-	Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
++	Opt_func, Opt_mask, Opt_fsmagic,
++	Opt_uid, Opt_euid, Opt_fowner,
+ 	Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
+ };
+ 
+@@ -394,6 +457,7 @@ static match_table_t policy_tokens = {
+ 	{Opt_fsmagic, "fsmagic=%s"},
+ 	{Opt_fsuuid, "fsuuid=%s"},
+ 	{Opt_uid, "uid=%s"},
++	{Opt_euid, "euid=%s"},
+ 	{Opt_fowner, "fowner=%s"},
+ 	{Opt_appraise_type, "appraise_type=%s"},
+ 	{Opt_permit_directio, "permit_directio"},
+@@ -435,6 +499,7 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
+ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ {
+ 	struct audit_buffer *ab;
++	char *from;
+ 	char *p;
+ 	int result = 0;
+ 
+@@ -525,18 +590,23 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 			if (entry->mask)
+ 				result = -EINVAL;
+ 
+-			if ((strcmp(args[0].from, "MAY_EXEC")) == 0)
++			from = args[0].from;
++			if (*from == '^')
++				from++;
++
++			if ((strcmp(from, "MAY_EXEC")) == 0)
+ 				entry->mask = MAY_EXEC;
+-			else if (strcmp(args[0].from, "MAY_WRITE") == 0)
++			else if (strcmp(from, "MAY_WRITE") == 0)
+ 				entry->mask = MAY_WRITE;
+-			else if (strcmp(args[0].from, "MAY_READ") == 0)
++			else if (strcmp(from, "MAY_READ") == 0)
+ 				entry->mask = MAY_READ;
+-			else if (strcmp(args[0].from, "MAY_APPEND") == 0)
++			else if (strcmp(from, "MAY_APPEND") == 0)
+ 				entry->mask = MAY_APPEND;
+ 			else
+ 				result = -EINVAL;
+ 			if (!result)
+-				entry->flags |= IMA_MASK;
++				entry->flags |= (*args[0].from == '^')
++				     ? IMA_INMASK : IMA_MASK;
+ 			break;
+ 		case Opt_fsmagic:
+ 			ima_log_string(ab, "fsmagic", args[0].from);
+@@ -566,6 +636,9 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 			break;
+ 		case Opt_uid:
+ 			ima_log_string(ab, "uid", args[0].from);
++		case Opt_euid:
++			if (token == Opt_euid)
++				ima_log_string(ab, "euid", args[0].from);
+ 
+ 			if (uid_valid(entry->uid)) {
+ 				result = -EINVAL;
+@@ -574,11 +647,14 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
+ 
+ 			result = kstrtoul(args[0].from, 10, &lnum);
+ 			if (!result) {
+-				entry->uid = make_kuid(current_user_ns(), (uid_t)lnum);
+-				if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum))
++				entry->uid = make_kuid(current_user_ns(),
++						       (uid_t) lnum);
++				if (!uid_valid(entry->uid) ||
++				    (uid_t)lnum != lnum)
+ 					result = -EINVAL;
+ 				else
+-					entry->flags |= IMA_UID;
++					entry->flags |= (token == Opt_uid)
++					    ? IMA_UID : IMA_EUID;
+ 			}
+ 			break;
+ 		case Opt_fowner:
+diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
+index bcfc36cbde6a..61fbd0c0d95c 100644
+--- a/security/integrity/ima/ima_template_lib.c
++++ b/security/integrity/ima/ima_template_lib.c
+@@ -70,7 +70,8 @@ static void ima_show_template_data_ascii(struct seq_file *m,
+ 					 enum data_formats datafmt,
+ 					 struct ima_field_data *field_data)
+ {
+-	u8 *buf_ptr = field_data->data, buflen = field_data->len;
++	u8 *buf_ptr = field_data->data;
++	u32 buflen = field_data->len;
+ 
+ 	switch (datafmt) {
+ 	case DATA_FMT_DIGEST_WITH_ALGO:
+diff --git a/security/keys/keyring.c b/security/keys/keyring.c
+index e72548b5897e..d33437007ad2 100644
+--- a/security/keys/keyring.c
++++ b/security/keys/keyring.c
+@@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
+ 	if (index_key->type == &key_type_keyring)
+ 		up_write(&keyring_serialise_link_sem);
+ 
+-	if (edit && !edit->dead_leaf) {
+-		key_payload_reserve(keyring,
+-				    keyring->datalen - KEYQUOTA_LINK_BYTES);
++	if (edit) {
++		if (!edit->dead_leaf) {
++			key_payload_reserve(keyring,
++				keyring->datalen - KEYQUOTA_LINK_BYTES);
++		}
+ 		assoc_array_cancel_edit(edit);
+ 	}
+ 	up_write(&keyring->sem);
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 212070e1de1a..7f8d7f19e044 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -3288,7 +3288,8 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
+ 	int rc = 0;
+ 
+ 	if (default_noexec &&
+-	    (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
++	    (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
++				   (!shared && (prot & PROT_WRITE)))) {
+ 		/*
+ 		 * We are making executable an anonymous mapping or a
+ 		 * private file mapping that will also be writable.
+diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
+index afe6a269ec17..57644b1dc42e 100644
+--- a/security/selinux/ss/ebitmap.c
++++ b/security/selinux/ss/ebitmap.c
+@@ -153,6 +153,12 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap,
+ 		if (offset == (u32)-1)
+ 			return 0;
+ 
++		/* don't waste ebitmap space if the netlabel bitmap is empty */
++		if (bitmap == 0) {
++			offset += EBITMAP_UNIT_SIZE;
++			continue;
++		}
++
+ 		if (e_iter == NULL ||
+ 		    offset >= e_iter->startbit + EBITMAP_SIZE) {
+ 			e_prev = e_iter;
+diff --git a/sound/soc/codecs/max98925.c b/sound/soc/codecs/max98925.c
+index 9b5a17de4690..aad664225dc3 100644
+--- a/sound/soc/codecs/max98925.c
++++ b/sound/soc/codecs/max98925.c
+@@ -346,7 +346,7 @@ static int max98925_dai_set_fmt(struct snd_soc_dai *codec_dai,
+ 	}
+ 
+ 	regmap_update_bits(max98925->regmap, MAX98925_FORMAT,
+-			M98925_DAI_BCI_MASK, invert);
++			M98925_DAI_BCI_MASK | M98925_DAI_WCI_MASK, invert);
+ 	return 0;
+ }
+ 
+diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
+index be4d741c45ba..2ee44abd56a6 100644
+--- a/sound/soc/codecs/rt5645.c
++++ b/sound/soc/codecs/rt5645.c
+@@ -2837,6 +2837,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
+ 		}
+ 	}
+ 
++	INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
++
+ 	if (rt5645->i2c->irq) {
+ 		ret = request_threaded_irq(rt5645->i2c->irq, NULL, rt5645_irq,
+ 			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+@@ -2855,8 +2857,6 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
+ 			dev_err(&i2c->dev, "Fail gpio_direction hp_det_gpio\n");
+ 	}
+ 
+-	INIT_DELAYED_WORK(&rt5645->jack_detect_work, rt5645_jack_detect_work);
+-
+ 	return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5645,
+ 				      rt5645_dai, ARRAY_SIZE(rt5645_dai));
+ }
+diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
+index dfb4ff5cc9ea..18558595ba72 100644
+--- a/sound/soc/codecs/tas2552.c
++++ b/sound/soc/codecs/tas2552.c
+@@ -120,6 +120,9 @@ static void tas2552_sw_shutdown(struct tas2552_data *tas_data, int sw_shutdown)
+ {
+ 	u8 cfg1_reg;
+ 
++	if (!tas_data->codec)
++		return;
++
+ 	if (sw_shutdown)
+ 		cfg1_reg = 0;
+ 	else
+@@ -335,7 +338,6 @@ static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 24);
+ static const struct snd_kcontrol_new tas2552_snd_controls[] = {
+ 	SOC_SINGLE_TLV("Speaker Driver Playback Volume",
+ 			 TAS2552_PGA_GAIN, 0, 0x1f, 1, dac_tlv),
+-	SOC_DAPM_SINGLE("Playback AMP", SND_SOC_NOPM, 0, 1, 0),
+ };
+ 
+ static const struct reg_default tas2552_init_regs[] = {
+diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
+index 0c6d1bc0526e..d476221dba51 100644
+--- a/sound/soc/codecs/wm5102.c
++++ b/sound/soc/codecs/wm5102.c
+@@ -42,7 +42,7 @@ struct wm5102_priv {
+ static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+ static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
++static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
+ static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
+ 
+ static const struct wm_adsp_region wm5102_dsp1_regions[] = {
+diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
+index fbaeddb3e903..3ee6cfd0578b 100644
+--- a/sound/soc/codecs/wm5110.c
++++ b/sound/soc/codecs/wm5110.c
+@@ -167,7 +167,7 @@ static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
+ static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+ static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
++static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
+ static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
+ 
+ #define WM5110_NG_SRC(name, base) \
+diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
+index ada9ac1ba2c6..51171e457fa4 100644
+--- a/sound/soc/codecs/wm8737.c
++++ b/sound/soc/codecs/wm8737.c
+@@ -483,7 +483,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
+ 
+ 			/* Fast VMID ramp at 2*2.5k */
+ 			snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+-					    WM8737_VMIDSEL_MASK, 0x4);
++					    WM8737_VMIDSEL_MASK,
++					    2 << WM8737_VMIDSEL_SHIFT);
+ 
+ 			/* Bring VMID up */
+ 			snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
+@@ -497,7 +498,8 @@ static int wm8737_set_bias_level(struct snd_soc_codec *codec,
+ 
+ 		/* VMID at 2*300k */
+ 		snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+-				    WM8737_VMIDSEL_MASK, 2);
++				    WM8737_VMIDSEL_MASK,
++				    1 << WM8737_VMIDSEL_SHIFT);
+ 
+ 		break;
+ 
+diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
+index db949311c0f2..0bb4a647755d 100644
+--- a/sound/soc/codecs/wm8903.h
++++ b/sound/soc/codecs/wm8903.h
+@@ -172,7 +172,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
+ #define WM8903_VMID_BUF_ENA_WIDTH                    1  /* VMID_BUF_ENA */
+ 
+ #define WM8903_VMID_RES_50K                          2
+-#define WM8903_VMID_RES_250K                         3
++#define WM8903_VMID_RES_250K                         4
+ #define WM8903_VMID_RES_5K                           6
+ 
+ /*
+diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
+index 00bec915d652..03e04bf6c5ba 100644
+--- a/sound/soc/codecs/wm8955.c
++++ b/sound/soc/codecs/wm8955.c
+@@ -298,7 +298,7 @@ static int wm8955_configure_clocking(struct snd_soc_codec *codec)
+ 		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
+ 				    WM8955_K_17_9_MASK,
+ 				    (pll.k >> 9) & WM8955_K_17_9_MASK);
+-		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
++		snd_soc_update_bits(codec, WM8955_PLL_CONTROL_3,
+ 				    WM8955_K_8_0_MASK,
+ 				    pll.k & WM8955_K_8_0_MASK);
+ 		if (pll.k)
+diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
+index e97a7615df85..8d7f63253440 100644
+--- a/sound/soc/codecs/wm8960.c
++++ b/sound/soc/codecs/wm8960.c
+@@ -245,7 +245,7 @@ SOC_SINGLE("PCM Playback -6dB Switch", WM8960_DACCTL1, 7, 1, 0),
+ SOC_ENUM("ADC Polarity", wm8960_enum[0]),
+ SOC_SINGLE("ADC High Pass Filter Switch", WM8960_DACCTL1, 0, 1, 0),
+ 
+-SOC_ENUM("DAC Polarity", wm8960_enum[2]),
++SOC_ENUM("DAC Polarity", wm8960_enum[1]),
+ SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
+ 		    wm8960_get_deemph, wm8960_put_deemph),
+ 
+diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
+index a4d11770630c..e7c81baefe66 100644
+--- a/sound/soc/codecs/wm8997.c
++++ b/sound/soc/codecs/wm8997.c
+@@ -40,7 +40,7 @@ struct wm8997_priv {
+ static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+ static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+ static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+-static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
++static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
+ static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
+ 
+ static const struct reg_default wm8997_sysclk_reva_patch[] = {
+diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
+index cd146d4fa805..b38b98cae855 100644
+--- a/sound/soc/fsl/imx-wm8962.c
++++ b/sound/soc/fsl/imx-wm8962.c
+@@ -190,7 +190,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
+ 		dev_err(&pdev->dev, "audmux internal port setup failed\n");
+ 		return ret;
+ 	}
+-	imx_audmux_v2_configure_port(ext_port,
++	ret = imx_audmux_v2_configure_port(ext_port,
+ 			IMX_AUDMUX_V2_PTCR_SYN,
+ 			IMX_AUDMUX_V2_PDCR_RXDSEL(int_port));
+ 	if (ret) {
+diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
+index 6768e4f7d7d0..30d0109703a9 100644
+--- a/sound/soc/omap/Kconfig
++++ b/sound/soc/omap/Kconfig
+@@ -100,12 +100,13 @@ config SND_OMAP_SOC_OMAP_TWL4030
+ 
+ config SND_OMAP_SOC_OMAP_ABE_TWL6040
+ 	tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
+-	depends on TWL6040_CORE && SND_OMAP_SOC && (ARCH_OMAP4 || SOC_OMAP5 || COMPILE_TEST)
++	depends on TWL6040_CORE && SND_OMAP_SOC
++	depends on ARCH_OMAP4 || (SOC_OMAP5 && MFD_PALMAS) || COMPILE_TEST
+ 	select SND_OMAP_SOC_DMIC
+ 	select SND_OMAP_SOC_MCPDM
+ 	select SND_SOC_TWL6040
+ 	select SND_SOC_DMIC
+-	select COMMON_CLK_PALMAS if MFD_PALMAS
++	select COMMON_CLK_PALMAS if (SOC_OMAP5 && MFD_PALMAS)
+ 	help
+ 	  Say Y if you want to add support for SoC audio on OMAP boards using
+ 	  ABE and twl6040 codec. This driver currently supports:
+diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
+index 5f58e4f1bca9..b07f183fc47f 100644
+--- a/sound/soc/qcom/Kconfig
++++ b/sound/soc/qcom/Kconfig
+@@ -6,12 +6,10 @@ config SND_SOC_QCOM
+ 
+ config SND_SOC_LPASS_CPU
+ 	tristate
+-	depends on SND_SOC_QCOM
+ 	select REGMAP_MMIO
+ 
+ config SND_SOC_LPASS_PLATFORM
+ 	tristate
+-	depends on SND_SOC_QCOM
+ 	select REGMAP_MMIO
+ 
+ config SND_SOC_STORM
+diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
+index 85b523885f9d..2babddaa2481 100644
+--- a/tools/perf/util/cloexec.c
++++ b/tools/perf/util/cloexec.c
+@@ -7,11 +7,15 @@
+ 
+ static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
+ 
++#ifdef __GLIBC_PREREQ
++#if !__GLIBC_PREREQ(2, 6)
+ int __weak sched_getcpu(void)
+ {
+ 	errno = ENOSYS;
+ 	return -1;
+ }
++#endif
++#endif
+ 
+ static int perf_flag_probe(void)
+ {


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-07-22 10:31 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-07-22 10:31 UTC (permalink / raw
  To: gentoo-commits

commit:     03632db5397e289b513257c01a2149c7657a3c3c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 22 10:31:21 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 22 10:31:21 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=03632db5

Update kdbus patch. 7-22-2015.

 0000_README                                        |    2 +-
 ...bus-7-17-15.patch => 5015_kdbus-7-22-2015.patch | 2609 +-------------------
 2 files changed, 2 insertions(+), 2609 deletions(-)

diff --git a/0000_README b/0000_README
index 8e9fdc7..eab69c9 100644
--- a/0000_README
+++ b/0000_README
@@ -107,6 +107,6 @@ Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.
 
-Patch:  5015_kdbus-7-17-15.patch
+Patch:  5015_kdbus-7-22-15.patch
 From:   https://lkml.org
 Desc:   Kernel-level IPC implementation

diff --git a/5015_kdbus-7-17-15.patch b/5015_kdbus-7-22-2015.patch
similarity index 92%
rename from 5015_kdbus-7-17-15.patch
rename to 5015_kdbus-7-22-2015.patch
index 61102dd..b110b5c 100644
--- a/5015_kdbus-7-17-15.patch
+++ b/5015_kdbus-7-22-2015.patch
@@ -8,19 +8,6 @@ index bc05482..e2127a7 100644
 +	filesystems filesystems ia64 kdbus laptops mic misc-devices \
  	networking pcmcia prctl ptp spi timers vDSO video4linux \
  	watchdog
-diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
-index f5a8ca2..750d577 100644
---- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
-+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
-@@ -1,7 +1,7 @@
- * Marvell Armada 370 / Armada XP Ethernet Controller (NETA)
- 
- Required properties:
--- compatible: "marvell,armada-370-neta" or "marvell,armada-xp-neta".
-+- compatible: should be "marvell,armada-370-neta".
- - reg: address and length of the register set for the device.
- - interrupts: interrupt for the device
- - phy: See ethernet.txt file in the same directory.
 diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
 index 51f4221..ec7c81b 100644
 --- a/Documentation/ioctl/ioctl-number.txt
@@ -7470,20 +7457,9 @@ index d8afd29..02f7668 100644
  M:	Vivek Goyal <vgoyal@redhat.com>
  M:	Haren Myneni <hbabu@us.ibm.com>
 diff --git a/Makefile b/Makefile
-index cef84c0..a1c8d57 100644
+index f5c8983..a1c8d57 100644
 --- a/Makefile
 +++ b/Makefile
-@@ -1,8 +1,8 @@
- VERSION = 4
- PATCHLEVEL = 1
--SUBLEVEL = 2
-+SUBLEVEL = 0
- EXTRAVERSION =
--NAME = Series 4800
-+NAME = Hurr durr I'ma sheep
- 
- # *DOCUMENTATION*
- # To see a list of typical targets execute "make help"
 @@ -1343,6 +1343,7 @@ $(help-board-dirs): help-%:
  %docs: scripts_basic FORCE
  	$(Q)$(MAKE) $(build)=scripts build_docproc
@@ -7492,2075 +7468,6 @@ index cef84c0..a1c8d57 100644
  
  else # KBUILD_EXTMOD
  
-diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
-index 06a2f2a..ec96f0b 100644
---- a/arch/arm/boot/dts/armada-370-xp.dtsi
-+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
-@@ -270,6 +270,7 @@
- 			};
- 
- 			eth0: ethernet@70000 {
-+				compatible = "marvell,armada-370-neta";
- 				reg = <0x70000 0x4000>;
- 				interrupts = <8>;
- 				clocks = <&gateclk 4>;
-@@ -285,6 +286,7 @@
- 			};
- 
- 			eth1: ethernet@74000 {
-+				compatible = "marvell,armada-370-neta";
- 				reg = <0x74000 0x4000>;
- 				interrupts = <10>;
- 				clocks = <&gateclk 3>;
-diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
-index ca4257b..00b50db5 100644
---- a/arch/arm/boot/dts/armada-370.dtsi
-+++ b/arch/arm/boot/dts/armada-370.dtsi
-@@ -307,14 +307,6 @@
- 					dmacap,memset;
- 				};
- 			};
--
--			ethernet@70000 {
--				compatible = "marvell,armada-370-neta";
--			};
--
--			ethernet@74000 {
--				compatible = "marvell,armada-370-neta";
--			};
- 		};
- 	};
- };
-diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
-index c5fdc99..8479fdc 100644
---- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
-+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
-@@ -318,7 +318,7 @@
- 			};
- 
- 			eth3: ethernet@34000 {
--				compatible = "marvell,armada-xp-neta";
-+				compatible = "marvell,armada-370-neta";
- 				reg = <0x34000 0x4000>;
- 				interrupts = <14>;
- 				clocks = <&gateclk 1>;
-diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
-index 0e24f1a..661d54c 100644
---- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
-+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
-@@ -356,7 +356,7 @@
- 			};
- 
- 			eth3: ethernet@34000 {
--				compatible = "marvell,armada-xp-neta";
-+				compatible = "marvell,armada-370-neta";
- 				reg = <0x34000 0x4000>;
- 				interrupts = <14>;
- 				clocks = <&gateclk 1>;
-diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
-index 8fdd6d7..013d63f 100644
---- a/arch/arm/boot/dts/armada-xp.dtsi
-+++ b/arch/arm/boot/dts/armada-xp.dtsi
-@@ -177,7 +177,7 @@
- 			};
- 
- 			eth2: ethernet@30000 {
--				compatible = "marvell,armada-xp-neta";
-+				compatible = "marvell,armada-370-neta";
- 				reg = <0x30000 0x4000>;
- 				interrupts = <12>;
- 				clocks = <&gateclk 2>;
-@@ -220,14 +220,6 @@
- 				};
- 			};
- 
--			ethernet@70000 {
--				compatible = "marvell,armada-xp-neta";
--			};
--
--			ethernet@74000 {
--				compatible = "marvell,armada-xp-neta";
--			};
--
- 			xor@f0900 {
- 				compatible = "marvell,orion-xor";
- 				reg = <0xF0900 0x100
-diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
-index 3794ca1..2fd8988 100644
---- a/arch/arm/boot/dts/sun5i-a10s.dtsi
-+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
-@@ -573,7 +573,7 @@
- 		};
- 
- 		rtp: rtp@01c25000 {
--			compatible = "allwinner,sun5i-a13-ts";
-+			compatible = "allwinner,sun4i-a10-ts";
- 			reg = <0x01c25000 0x100>;
- 			interrupts = <29>;
- 			#thermal-sensor-cells = <0>;
-diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
-index 5098185..883cb48 100644
---- a/arch/arm/boot/dts/sun5i-a13.dtsi
-+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
-@@ -555,7 +555,7 @@
- 		};
- 
- 		rtp: rtp@01c25000 {
--			compatible = "allwinner,sun5i-a13-ts";
-+			compatible = "allwinner,sun4i-a10-ts";
- 			reg = <0x01c25000 0x100>;
- 			interrupts = <29>;
- 			#thermal-sensor-cells = <0>;
-diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
-index 2b4847c..fdd1817 100644
---- a/arch/arm/boot/dts/sun7i-a20.dtsi
-+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
-@@ -1042,7 +1042,7 @@
- 		};
- 
- 		rtp: rtp@01c25000 {
--			compatible = "allwinner,sun5i-a13-ts";
-+			compatible = "allwinner,sun4i-a10-ts";
- 			reg = <0x01c25000 0x100>;
- 			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
- 			#thermal-sensor-cells = <0>;
-diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
-index f7db3a5..79caf79 100644
---- a/arch/arm/kvm/interrupts.S
-+++ b/arch/arm/kvm/interrupts.S
-@@ -170,9 +170,13 @@ __kvm_vcpu_return:
- 	@ Don't trap coprocessor accesses for host kernel
- 	set_hstr vmexit
- 	set_hdcr vmexit
--	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
-+	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
- 
- #ifdef CONFIG_VFPv3
-+	@ Save floating point registers we if let guest use them.
-+	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
-+	bne	after_vfp_restore
-+
- 	@ Switch VFP/NEON hardware state to the host's
- 	add	r7, vcpu, #VCPU_VFP_GUEST
- 	store_vfp_state r7
-@@ -184,8 +188,6 @@ after_vfp_restore:
- 	@ Restore FPEXC_EN which we clobbered on entry
- 	pop	{r2}
- 	VFPFMXR FPEXC, r2
--#else
--after_vfp_restore:
- #endif
- 
- 	@ Reset Hyp-role
-@@ -481,7 +483,7 @@ switch_to_guest_vfp:
- 	push	{r3-r7}
- 
- 	@ NEON/VFP used.  Turn on VFP access.
--	set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
-+	set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
- 
- 	@ Switch VFP/NEON hardware state to the guest's
- 	add	r7, r0, #VCPU_VFP_HOST
-diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
-index 48efe2e..35e4a3a 100644
---- a/arch/arm/kvm/interrupts_head.S
-+++ b/arch/arm/kvm/interrupts_head.S
-@@ -591,13 +591,8 @@ ARM_BE8(rev	r6, r6  )
- .endm
- 
- /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
-- * (hardware reset value is 0). Keep previous value in r2.
-- * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
-- * VFP wasn't already enabled (always executed on vmtrap).
-- * If a label is specified with vmexit, it is branched to if VFP wasn't
-- * enabled.
-- */
--.macro set_hcptr operation, mask, label = none
-+ * (hardware reset value is 0). Keep previous value in r2. */
-+.macro set_hcptr operation, mask
- 	mrc	p15, 4, r2, c1, c1, 2
- 	ldr	r3, =\mask
- 	.if \operation == vmentry
-@@ -606,17 +601,6 @@ ARM_BE8(rev	r6, r6  )
- 	bic	r3, r2, r3		@ Don't trap defined coproc-accesses
- 	.endif
- 	mcr	p15, 4, r3, c1, c1, 2
--	.if \operation != vmentry
--	.if \operation == vmexit
--	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
--	beq	1f
--	.endif
--	isb
--	.if \label != none
--	b	\label
--	.endif
--1:
--	.endif
- .endm
- 
- /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
-diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
-index 531e922..02fa8ef 100644
---- a/arch/arm/kvm/psci.c
-+++ b/arch/arm/kvm/psci.c
-@@ -230,6 +230,10 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
- 	case PSCI_0_2_FN64_AFFINITY_INFO:
- 		val = kvm_psci_vcpu_affinity_info(vcpu);
- 		break;
-+	case PSCI_0_2_FN_MIGRATE:
-+	case PSCI_0_2_FN64_MIGRATE:
-+		val = PSCI_RET_NOT_SUPPORTED;
-+		break;
- 	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
- 		/*
- 		 * Trusted OS is MP hence does not require migration
-@@ -238,6 +242,10 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
- 		 */
- 		val = PSCI_0_2_TOS_MP;
- 		break;
-+	case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
-+	case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
-+		val = PSCI_RET_NOT_SUPPORTED;
-+		break;
- 	case PSCI_0_2_FN_SYSTEM_OFF:
- 		kvm_psci_system_off(vcpu);
- 		/*
-@@ -263,8 +271,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
- 		ret = 0;
- 		break;
- 	default:
--		val = PSCI_RET_NOT_SUPPORTED;
--		break;
-+		return -EINVAL;
- 	}
- 
- 	*vcpu_reg(vcpu, 0) = val;
-@@ -284,9 +291,12 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
- 	case KVM_PSCI_FN_CPU_ON:
- 		val = kvm_psci_vcpu_on(vcpu);
- 		break;
--	default:
-+	case KVM_PSCI_FN_CPU_SUSPEND:
-+	case KVM_PSCI_FN_MIGRATE:
- 		val = PSCI_RET_NOT_SUPPORTED;
- 		break;
-+	default:
-+		return -EINVAL;
- 	}
- 
- 	*vcpu_reg(vcpu, 0) = val;
-diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
-index a2e8ef3..469a150 100644
---- a/arch/arm/mach-imx/clk-imx6q.c
-+++ b/arch/arm/mach-imx/clk-imx6q.c
-@@ -443,7 +443,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
- 	clk[IMX6QDL_CLK_GPMI_IO]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
- 	clk[IMX6QDL_CLK_GPMI_APB]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
- 	clk[IMX6QDL_CLK_ROM]          = imx_clk_gate2("rom",           "ahb",               base + 0x7c, 0);
--	clk[IMX6QDL_CLK_SATA]         = imx_clk_gate2("sata",          "ahb",               base + 0x7c, 4);
-+	clk[IMX6QDL_CLK_SATA]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
- 	clk[IMX6QDL_CLK_SDMA]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
- 	clk[IMX6QDL_CLK_SPBA]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
- 	clk[IMX6QDL_CLK_SPDIF]        = imx_clk_gate2("spdif",         "spdif_podf",        base + 0x7c, 14);
-diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c
-index 301ab38..6dfd4ab 100644
---- a/arch/arm/mach-mvebu/pm-board.c
-+++ b/arch/arm/mach-mvebu/pm-board.c
-@@ -43,9 +43,6 @@ static void mvebu_armada_xp_gp_pm_enter(void __iomem *sdram_reg, u32 srcmd)
- 	for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++)
- 		ackcmd |= BIT(pic_raw_gpios[i]);
- 
--	srcmd = cpu_to_le32(srcmd);
--	ackcmd = cpu_to_le32(ackcmd);
--
- 	/*
- 	 * Wait a while, the PIC needs quite a bit of time between the
- 	 * two GPIO commands.
-diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
-index 7469347..88de2dc 100644
---- a/arch/arm/mach-tegra/cpuidle-tegra20.c
-+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
-@@ -34,7 +34,6 @@
- #include "iomap.h"
- #include "irq.h"
- #include "pm.h"
--#include "reset.h"
- #include "sleep.h"
- 
- #ifdef CONFIG_PM_SLEEP
-@@ -71,13 +70,15 @@ static struct cpuidle_driver tegra_idle_driver = {
- 
- #ifdef CONFIG_PM_SLEEP
- #ifdef CONFIG_SMP
-+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
-+
- static int tegra20_reset_sleeping_cpu_1(void)
- {
- 	int ret = 0;
- 
- 	tegra_pen_lock();
- 
--	if (readb(tegra20_cpu1_resettable_status) == CPU_RESETTABLE)
-+	if (readl(pmc + PMC_SCRATCH41) == CPU_RESETTABLE)
- 		tegra20_cpu_shutdown(1);
- 	else
- 		ret = -EINVAL;
-diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
-index e3070fd..71be4af 100644
---- a/arch/arm/mach-tegra/reset-handler.S
-+++ b/arch/arm/mach-tegra/reset-handler.S
-@@ -169,10 +169,10 @@ after_errata:
- 	cmp	r6, #TEGRA20
- 	bne	1f
- 	/* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
--	mov32	r5, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET
--	mov	r0, #CPU_NOT_RESETTABLE
-+	mov32	r5, TEGRA_PMC_BASE
-+	mov	r0, #0
- 	cmp	r10, #0
--	strneb	r0, [r5, #__tegra20_cpu1_resettable_status_offset]
-+	strne	r0, [r5, #PMC_SCRATCH41]
- 1:
- #endif
- 
-@@ -281,10 +281,6 @@ __tegra_cpu_reset_handler_data:
- 	.rept	TEGRA_RESET_DATA_SIZE
- 	.long	0
- 	.endr
--	.globl	__tegra20_cpu1_resettable_status_offset
--	.equ	__tegra20_cpu1_resettable_status_offset, \
--					. - __tegra_cpu_reset_handler_start
--	.byte	0
- 	.align L1_CACHE_SHIFT
- 
- ENTRY(__tegra_cpu_reset_handler_end)
-diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h
-index 29c3dec..76a9343 100644
---- a/arch/arm/mach-tegra/reset.h
-+++ b/arch/arm/mach-tegra/reset.h
-@@ -35,7 +35,6 @@ extern unsigned long __tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE];
- 
- void __tegra_cpu_reset_handler_start(void);
- void __tegra_cpu_reset_handler(void);
--void __tegra20_cpu1_resettable_status_offset(void);
- void __tegra_cpu_reset_handler_end(void);
- void tegra_secondary_startup(void);
- 
-@@ -48,9 +47,6 @@ void tegra_secondary_startup(void);
- 	(IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
- 	((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_LP2] - \
- 	 (u32)__tegra_cpu_reset_handler_start)))
--#define tegra20_cpu1_resettable_status \
--	(IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
--	 (u32)__tegra20_cpu1_resettable_status_offset))
- #endif
- 
- #define tegra_cpu_reset_handler_offset \
-diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
-index e6b684e..be4bc5f 100644
---- a/arch/arm/mach-tegra/sleep-tegra20.S
-+++ b/arch/arm/mach-tegra/sleep-tegra20.S
-@@ -97,10 +97,9 @@ ENDPROC(tegra20_hotplug_shutdown)
- ENTRY(tegra20_cpu_shutdown)
- 	cmp	r0, #0
- 	reteq	lr			@ must not be called for CPU 0
--	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
--	ldr	r2, =__tegra20_cpu1_resettable_status_offset
-+	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
- 	mov	r12, #CPU_RESETTABLE
--	strb	r12, [r1, r2]
-+	str	r12, [r1]
- 
- 	cpu_to_halt_reg r1, r0
- 	ldr	r3, =TEGRA_FLOW_CTRL_VIRT
-@@ -183,41 +182,38 @@ ENDPROC(tegra_pen_unlock)
- /*
-  * tegra20_cpu_clear_resettable(void)
-  *
-- * Called to clear the "resettable soon" flag in IRAM variable when
-+ * Called to clear the "resettable soon" flag in PMC_SCRATCH41 when
-  * it is expected that the secondary CPU will be idle soon.
-  */
- ENTRY(tegra20_cpu_clear_resettable)
--	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
--	ldr	r2, =__tegra20_cpu1_resettable_status_offset
-+	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
- 	mov	r12, #CPU_NOT_RESETTABLE
--	strb	r12, [r1, r2]
-+	str	r12, [r1]
- 	ret	lr
- ENDPROC(tegra20_cpu_clear_resettable)
- 
- /*
-  * tegra20_cpu_set_resettable_soon(void)
-  *
-- * Called to set the "resettable soon" flag in IRAM variable when
-+ * Called to set the "resettable soon" flag in PMC_SCRATCH41 when
-  * it is expected that the secondary CPU will be idle soon.
-  */
- ENTRY(tegra20_cpu_set_resettable_soon)
--	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
--	ldr	r2, =__tegra20_cpu1_resettable_status_offset
-+	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
- 	mov	r12, #CPU_RESETTABLE_SOON
--	strb	r12, [r1, r2]
-+	str	r12, [r1]
- 	ret	lr
- ENDPROC(tegra20_cpu_set_resettable_soon)
- 
- /*
-  * tegra20_cpu_is_resettable_soon(void)
-  *
-- * Returns true if the "resettable soon" flag in IRAM variable has been
-+ * Returns true if the "resettable soon" flag in PMC_SCRATCH41 has been
-  * set because it is expected that the secondary CPU will be idle soon.
-  */
- ENTRY(tegra20_cpu_is_resettable_soon)
--	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
--	ldr	r2, =__tegra20_cpu1_resettable_status_offset
--	ldrb	r12, [r1, r2]
-+	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
-+	ldr	r12, [r1]
- 	cmp	r12, #CPU_RESETTABLE_SOON
- 	moveq	r0, #1
- 	movne	r0, #0
-@@ -260,10 +256,9 @@ ENTRY(tegra20_sleep_cpu_secondary_finish)
- 	mov	r0, #TEGRA_FLUSH_CACHE_LOUIS
- 	bl	tegra_disable_clean_inv_dcache
- 
--	mov32	r0, TEGRA_IRAM_RESET_BASE_VIRT
--	ldr	r4, =__tegra20_cpu1_resettable_status_offset
-+	mov32	r0, TEGRA_PMC_VIRT + PMC_SCRATCH41
- 	mov	r3, #CPU_RESETTABLE
--	strb	r3, [r0, r4]
-+	str	r3, [r0]
- 
- 	bl	tegra_cpu_do_idle
- 
-@@ -279,10 +274,10 @@ ENTRY(tegra20_sleep_cpu_secondary_finish)
- 
- 	bl	tegra_pen_lock
- 
--	mov32	r0, TEGRA_IRAM_RESET_BASE_VIRT
--	ldr	r4, =__tegra20_cpu1_resettable_status_offset
-+	mov32	r3, TEGRA_PMC_VIRT
-+	add	r0, r3, #PMC_SCRATCH41
- 	mov	r3, #CPU_NOT_RESETTABLE
--	strb	r3, [r0, r4]
-+	str	r3, [r0]
- 
- 	bl	tegra_pen_unlock
- 
-diff --git a/arch/arm/mach-tegra/sleep.h b/arch/arm/mach-tegra/sleep.h
-index 0d59360..92d46ec 100644
---- a/arch/arm/mach-tegra/sleep.h
-+++ b/arch/arm/mach-tegra/sleep.h
-@@ -18,7 +18,6 @@
- #define __MACH_TEGRA_SLEEP_H
- 
- #include "iomap.h"
--#include "irammap.h"
- 
- #define TEGRA_ARM_PERIF_VIRT (TEGRA_ARM_PERIF_BASE - IO_CPU_PHYS \
- 					+ IO_CPU_VIRT)
-@@ -30,9 +29,6 @@
- 					+ IO_APB_VIRT)
- #define TEGRA_PMC_VIRT	(TEGRA_PMC_BASE - IO_APB_PHYS + IO_APB_VIRT)
- 
--#define TEGRA_IRAM_RESET_BASE_VIRT (IO_IRAM_VIRT + \
--				TEGRA_IRAM_RESET_HANDLER_OFFSET)
--
- /* PMC_SCRATCH37-39 and 41 are used for tegra_pen_lock and idle */
- #define PMC_SCRATCH37	0x130
- #define PMC_SCRATCH38	0x134
-diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
-index afc96ec..9488fa5 100644
---- a/arch/mips/include/asm/mach-generic/spaces.h
-+++ b/arch/mips/include/asm/mach-generic/spaces.h
-@@ -94,11 +94,7 @@
- #endif
- 
- #ifndef FIXADDR_TOP
--#ifdef CONFIG_KVM_GUEST
--#define FIXADDR_TOP		((unsigned long)(long)(int)0x7ffe0000)
--#else
- #define FIXADDR_TOP		((unsigned long)(long)(int)0xfffe0000)
- #endif
--#endif
- 
- #endif /* __ASM_MACH_GENERIC_SPACES_H */
-diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
-index 52f205a..bb68e8d 100644
---- a/arch/mips/kvm/mips.c
-+++ b/arch/mips/kvm/mips.c
-@@ -982,7 +982,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
- 
- 	/* If nothing is dirty, don't bother messing with page tables. */
- 	if (is_dirty) {
--		memslot = id_to_memslot(kvm->memslots, log->slot);
-+		memslot = &kvm->memslots->memslots[log->slot];
- 
- 		ga = memslot->base_gfn << PAGE_SHIFT;
- 		ga_end = ga + (memslot->npages << PAGE_SHIFT);
-diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
-index d90893b..12b6384 100644
---- a/arch/powerpc/perf/core-book3s.c
-+++ b/arch/powerpc/perf/core-book3s.c
-@@ -131,16 +131,7 @@ static void pmao_restore_workaround(bool ebb) { }
- 
- static bool regs_use_siar(struct pt_regs *regs)
- {
--	/*
--	 * When we take a performance monitor exception the regs are setup
--	 * using perf_read_regs() which overloads some fields, in particular
--	 * regs->result to tell us whether to use SIAR.
--	 *
--	 * However if the regs are from another exception, eg. a syscall, then
--	 * they have not been setup using perf_read_regs() and so regs->result
--	 * is something random.
--	 */
--	return ((TRAP(regs) == 0xf00) && regs->result);
-+	return !!regs->result;
- }
- 
- /*
-diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
-index 49b7445..9f73c80 100644
---- a/arch/s390/kernel/crash_dump.c
-+++ b/arch/s390/kernel/crash_dump.c
-@@ -415,7 +415,7 @@ static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
- 	ptr += len;
- 	/* Copy lower halves of SIMD registers 0-15 */
- 	for (i = 0; i < 16; i++) {
--		memcpy(ptr, &vx_regs[i].u[2], 8);
-+		memcpy(ptr, &vx_regs[i], 8);
- 		ptr += 8;
- 	}
- 	return ptr;
-diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
-index b745a10..9de4726 100644
---- a/arch/s390/kvm/interrupt.c
-+++ b/arch/s390/kvm/interrupt.c
-@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
- 	if (sclp_has_sigpif())
- 		return __inject_extcall_sigpif(vcpu, src_id);
- 
--	if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
-+	if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
- 		return -EBUSY;
- 	*extcall = irq->u.extcall;
- 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-@@ -1606,9 +1606,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
- 	int i;
- 
- 	spin_lock(&fi->lock);
--	fi->pending_irqs = 0;
--	memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
--	memset(&fi->mchk, 0, sizeof(fi->mchk));
- 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
- 		clear_irq_list(&fi->lists[i]);
- 	for (i = 0; i < FIRQ_MAX_COUNT; i++)
-diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
-index 9afb9d6..55423d8 100644
---- a/arch/s390/net/bpf_jit_comp.c
-+++ b/arch/s390/net/bpf_jit_comp.c
-@@ -227,7 +227,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
- ({								\
- 	/* Branch instruction needs 6 bytes */			\
- 	int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
--	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask);	\
-+	_EMIT6(op1 | reg(b1, b2) << 16 | rel, op2 | mask);	\
- 	REG_SET_SEEN(b1);					\
- 	REG_SET_SEEN(b2);					\
- })
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 41b06fc..f4a555b 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -591,7 +591,7 @@ struct kvm_arch {
- 	struct kvm_pic *vpic;
- 	struct kvm_ioapic *vioapic;
- 	struct kvm_pit *vpit;
--	atomic_t vapics_in_nmi_mode;
-+	int vapics_in_nmi_mode;
- 	struct mutex apic_map_lock;
- 	struct kvm_apic_map *apic_map;
- 
-diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
-index aa4e3a7..4f7001f 100644
---- a/arch/x86/kernel/cpu/perf_event.c
-+++ b/arch/x86/kernel/cpu/perf_event.c
-@@ -270,7 +270,11 @@ msr_fail:
- 
- static void hw_perf_event_destroy(struct perf_event *event)
- {
--	x86_release_hardware();
-+	if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
-+		release_pmc_hardware();
-+		release_ds_buffers();
-+		mutex_unlock(&pmc_reserve_mutex);
-+	}
- }
- 
- void hw_perf_lbr_event_destroy(struct perf_event *event)
-@@ -320,35 +324,6 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
- 	return x86_pmu_extra_regs(val, event);
- }
- 
--int x86_reserve_hardware(void)
--{
--	int err = 0;
--
--	if (!atomic_inc_not_zero(&active_events)) {
--		mutex_lock(&pmc_reserve_mutex);
--		if (atomic_read(&active_events) == 0) {
--			if (!reserve_pmc_hardware())
--				err = -EBUSY;
--			else
--				reserve_ds_buffers();
--		}
--		if (!err)
--			atomic_inc(&active_events);
--		mutex_unlock(&pmc_reserve_mutex);
--	}
--
--	return err;
--}
--
--void x86_release_hardware(void)
--{
--	if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
--		release_pmc_hardware();
--		release_ds_buffers();
--		mutex_unlock(&pmc_reserve_mutex);
--	}
--}
--
- /*
-  * Check if we can create event of a certain type (that no conflicting events
-  * are present).
-@@ -361,10 +336,9 @@ int x86_add_exclusive(unsigned int what)
- 		return 0;
- 
- 	mutex_lock(&pmc_reserve_mutex);
--	for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
-+	for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++)
- 		if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
- 			goto out;
--	}
- 
- 	atomic_inc(&x86_pmu.lbr_exclusive[what]);
- 	ret = 0;
-@@ -553,7 +527,19 @@ static int __x86_pmu_event_init(struct perf_event *event)
- 	if (!x86_pmu_initialized())
- 		return -ENODEV;
- 
--	err = x86_reserve_hardware();
-+	err = 0;
-+	if (!atomic_inc_not_zero(&active_events)) {
-+		mutex_lock(&pmc_reserve_mutex);
-+		if (atomic_read(&active_events) == 0) {
-+			if (!reserve_pmc_hardware())
-+				err = -EBUSY;
-+			else
-+				reserve_ds_buffers();
-+		}
-+		if (!err)
-+			atomic_inc(&active_events);
-+		mutex_unlock(&pmc_reserve_mutex);
-+	}
- 	if (err)
- 		return err;
- 
-diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
-index f068695..ef78516 100644
---- a/arch/x86/kernel/cpu/perf_event.h
-+++ b/arch/x86/kernel/cpu/perf_event.h
-@@ -703,10 +703,6 @@ int x86_add_exclusive(unsigned int what);
- 
- void x86_del_exclusive(unsigned int what);
- 
--int x86_reserve_hardware(void);
--
--void x86_release_hardware(void);
--
- void hw_perf_lbr_event_destroy(struct perf_event *event);
- 
- int x86_setup_perfctr(struct perf_event *event);
-diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
-index 2813ea0..a1e35c9 100644
---- a/arch/x86/kernel/cpu/perf_event_intel.c
-+++ b/arch/x86/kernel/cpu/perf_event_intel.c
-@@ -3253,8 +3253,6 @@ __init int intel_pmu_init(void)
- 
- 	case 61: /* 14nm Broadwell Core-M */
- 	case 86: /* 14nm Broadwell Xeon D */
--	case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
--	case 79: /* 14nm Broadwell Server */
- 		x86_pmu.late_ack = true;
- 		memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
- 		memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
-@@ -3324,13 +3322,13 @@ __init int intel_pmu_init(void)
- 		 * counter, so do not extend mask to generic counters
- 		 */
- 		for_each_event_constraint(c, x86_pmu.event_constraints) {
--			if (c->cmask == FIXED_EVENT_FLAGS
--			    && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
--				c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
-+			if (c->cmask != FIXED_EVENT_FLAGS
-+			    || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
-+				continue;
- 			}
--			c->idxmsk64 &=
--				~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
--			c->weight = hweight64(c->idxmsk64);
-+
-+			c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
-+			c->weight += x86_pmu.num_counters;
- 		}
- 	}
- 
-diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
-index 7795f3f..ac1f0c5 100644
---- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
-+++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
-@@ -483,26 +483,17 @@ static int bts_event_add(struct perf_event *event, int mode)
- 
- static void bts_event_destroy(struct perf_event *event)
- {
--	x86_release_hardware();
- 	x86_del_exclusive(x86_lbr_exclusive_bts);
- }
- 
- static int bts_event_init(struct perf_event *event)
- {
--	int ret;
--
- 	if (event->attr.type != bts_pmu.type)
- 		return -ENOENT;
- 
- 	if (x86_add_exclusive(x86_lbr_exclusive_bts))
- 		return -EBUSY;
- 
--	ret = x86_reserve_hardware();
--	if (ret) {
--		x86_del_exclusive(x86_lbr_exclusive_bts);
--		return ret;
--	}
--
- 	event->destroy = bts_event_destroy;
- 
- 	return 0;
-diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
-index 7e429c9..53eeb22 100644
---- a/arch/x86/kernel/head_32.S
-+++ b/arch/x86/kernel/head_32.S
-@@ -62,16 +62,9 @@
- #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
- #endif
- 
--/*
-- * Number of possible pages in the lowmem region.
-- *
-- * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
-- * gas warning about overflowing shift count when gas has been compiled
-- * with only a host target support using a 32-bit type for internal
-- * representation.
-- */
--LOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT)
--
-+/* Number of possible pages in the lowmem region */
-+LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
-+	
- /* Enough space to fit pagetables for the low memory linear map */
- MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
- 
-diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
-index f90952f..4dce6f8 100644
---- a/arch/x86/kvm/i8254.c
-+++ b/arch/x86/kvm/i8254.c
-@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
- 		 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
- 		 * VCPU0, and only if its LVT0 is in EXTINT mode.
- 		 */
--		if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
-+		if (kvm->arch.vapics_in_nmi_mode > 0)
- 			kvm_for_each_vcpu(i, vcpu, kvm)
- 				kvm_apic_nmi_wd_deliver(vcpu);
- 	}
-diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
-index 67d07e0..4c7deb4 100644
---- a/arch/x86/kvm/lapic.c
-+++ b/arch/x86/kvm/lapic.c
-@@ -1250,10 +1250,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
- 		if (!nmi_wd_enabled) {
- 			apic_debug("Receive NMI setting on APIC_LVT0 "
- 				   "for cpu %d\n", apic->vcpu->vcpu_id);
--			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
-+			apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
- 		}
- 	} else if (nmi_wd_enabled)
--		atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
-+		apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
- }
- 
- static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
-@@ -1808,7 +1808,6 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
- 	apic_update_ppr(apic);
- 	hrtimer_cancel(&apic->lapic_timer.timer);
- 	apic_update_lvtt(apic);
--	apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
- 	update_divide_count(apic);
- 	start_apic_timer(apic);
- 	apic->irr_pending = true;
-diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
-index 4911bf1..9afa233 100644
---- a/arch/x86/kvm/svm.c
-+++ b/arch/x86/kvm/svm.c
-@@ -511,10 +511,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
- {
- 	struct vcpu_svm *svm = to_svm(vcpu);
- 
--	if (svm->vmcb->control.next_rip != 0) {
--		WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
-+	if (svm->vmcb->control.next_rip != 0)
- 		svm->next_rip = svm->vmcb->control.next_rip;
--	}
- 
- 	if (!svm->next_rip) {
- 		if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
-@@ -4319,9 +4317,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
- 		break;
- 	}
- 
--	/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
--	if (static_cpu_has(X86_FEATURE_NRIPS))
--		vmcb->control.next_rip  = info->next_rip;
-+	vmcb->control.next_rip  = info->next_rip;
- 	vmcb->control.exit_code = icpt_info.exit_code;
- 	vmexit = nested_svm_exit_handled(svm);
- 
-diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
-index ff99117..14a63ed 100644
---- a/arch/x86/pci/acpi.c
-+++ b/arch/x86/pci/acpi.c
-@@ -81,17 +81,6 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
- 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
- 		},
- 	},
--	/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
--	/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
--	{
--		.callback = set_use_crs,
--		.ident = "Foxconn K8M890-8237A",
--		.matches = {
--			DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
--			DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
--			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
--		},
--	},
- 
- 	/* Now for the blacklist.. */
- 
-@@ -132,10 +121,8 @@ void __init pci_acpi_crs_quirks(void)
- {
- 	int year;
- 
--	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) {
--		if (iomem_resource.end <= 0xffffffff)
--			pci_use_crs = false;
--	}
-+	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
-+		pci_use_crs = false;
- 
- 	dmi_check_system(pci_crs_quirks);
- 
-diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
-index e527a3e..8c81af6 100644
---- a/drivers/bluetooth/ath3k.c
-+++ b/drivers/bluetooth/ath3k.c
-@@ -80,7 +80,6 @@ static const struct usb_device_id ath3k_table[] = {
- 	{ USB_DEVICE(0x0489, 0xe057) },
- 	{ USB_DEVICE(0x0489, 0xe056) },
- 	{ USB_DEVICE(0x0489, 0xe05f) },
--	{ USB_DEVICE(0x0489, 0xe076) },
- 	{ USB_DEVICE(0x0489, 0xe078) },
- 	{ USB_DEVICE(0x04c5, 0x1330) },
- 	{ USB_DEVICE(0x04CA, 0x3004) },
-@@ -89,7 +88,6 @@ static const struct usb_device_id ath3k_table[] = {
- 	{ USB_DEVICE(0x04CA, 0x3007) },
- 	{ USB_DEVICE(0x04CA, 0x3008) },
- 	{ USB_DEVICE(0x04CA, 0x300b) },
--	{ USB_DEVICE(0x04CA, 0x300d) },
- 	{ USB_DEVICE(0x04CA, 0x300f) },
- 	{ USB_DEVICE(0x04CA, 0x3010) },
- 	{ USB_DEVICE(0x0930, 0x0219) },
-@@ -115,7 +113,6 @@ static const struct usb_device_id ath3k_table[] = {
- 	{ USB_DEVICE(0x13d3, 0x3408) },
- 	{ USB_DEVICE(0x13d3, 0x3423) },
- 	{ USB_DEVICE(0x13d3, 0x3432) },
--	{ USB_DEVICE(0x13d3, 0x3474) },
- 
- 	/* Atheros AR5BBU12 with sflash firmware */
- 	{ USB_DEVICE(0x0489, 0xE02C) },
-@@ -140,7 +137,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
- 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
--	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
-@@ -149,7 +145,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
- 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
--	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
-@@ -175,7 +170,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
- 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
--	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
- 
- 	/* Atheros AR5BBU22 with sflash firmware */
- 	{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
-diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
-index 420cc9f..3c10d4d 100644
---- a/drivers/bluetooth/btusb.c
-+++ b/drivers/bluetooth/btusb.c
-@@ -178,7 +178,6 @@ static const struct usb_device_id blacklist_table[] = {
- 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
--	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
-@@ -187,7 +186,6 @@ static const struct usb_device_id blacklist_table[] = {
- 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
--	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
-@@ -213,7 +211,6 @@ static const struct usb_device_id blacklist_table[] = {
- 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
- 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
--	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
- 
- 	/* Atheros AR5BBU12 with sflash firmware */
- 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
-diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
-index c45d274..6414661 100644
---- a/drivers/cpufreq/intel_pstate.c
-+++ b/drivers/cpufreq/intel_pstate.c
-@@ -535,7 +535,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
- 
- 	val |= vid;
- 
--	wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
-+	wrmsrl(MSR_IA32_PERF_CTL, val);
- }
- 
- #define BYT_BCLK_FREQS 5
-diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
-index 3442764..5937207 100644
---- a/drivers/cpuidle/cpuidle-powernv.c
-+++ b/drivers/cpuidle/cpuidle-powernv.c
-@@ -60,8 +60,6 @@ static int nap_loop(struct cpuidle_device *dev,
- 	return index;
- }
- 
--/* Register for fastsleep only in oneshot mode of broadcast */
--#ifdef CONFIG_TICK_ONESHOT
- static int fastsleep_loop(struct cpuidle_device *dev,
- 				struct cpuidle_driver *drv,
- 				int index)
-@@ -85,7 +83,7 @@ static int fastsleep_loop(struct cpuidle_device *dev,
- 
- 	return index;
- }
--#endif
-+
- /*
-  * States for dedicated partition case.
-  */
-@@ -211,14 +209,7 @@ static int powernv_add_idle_states(void)
- 			powernv_states[nr_idle_states].flags = 0;
- 			powernv_states[nr_idle_states].target_residency = 100;
- 			powernv_states[nr_idle_states].enter = &nap_loop;
--		}
--
--		/*
--		 * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
--		 * within this config dependency check.
--		 */
--#ifdef CONFIG_TICK_ONESHOT
--		if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
-+		} else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
- 			flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
- 			/* Add FASTSLEEP state */
- 			strcpy(powernv_states[nr_idle_states].name, "FastSleep");
-@@ -227,7 +218,7 @@ static int powernv_add_idle_states(void)
- 			powernv_states[nr_idle_states].target_residency = 300000;
- 			powernv_states[nr_idle_states].enter = &fastsleep_loop;
- 		}
--#endif
-+
- 		powernv_states[nr_idle_states].exit_latency =
- 				((unsigned int)latency_ns[i]) / 1000;
- 
-diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
-index f062158..857414a 100644
---- a/drivers/crypto/talitos.c
-+++ b/drivers/crypto/talitos.c
-@@ -925,8 +925,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
- 		sg_count--;
- 		link_tbl_ptr--;
- 	}
--	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
--					+ cryptlen);
-+	be16_add_cpu(&link_tbl_ptr->len, cryptlen);
- 
- 	/* tag end of link table */
- 	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
-@@ -2562,7 +2561,6 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
- 		break;
- 	default:
- 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
--		kfree(t_alg);
- 		return ERR_PTR(-EINVAL);
- 	}
- 
-diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
-index ca9f4ed..e1c7e9e 100644
---- a/drivers/iommu/amd_iommu.c
-+++ b/drivers/iommu/amd_iommu.c
-@@ -1869,15 +1869,9 @@ static void free_pt_##LVL (unsigned long __pt)			\
- 	pt = (u64 *)__pt;					\
- 								\
- 	for (i = 0; i < 512; ++i) {				\
--		/* PTE present? */				\
- 		if (!IOMMU_PTE_PRESENT(pt[i]))			\
- 			continue;				\
- 								\
--		/* Large PTE? */				\
--		if (PM_PTE_LEVEL(pt[i]) == 0 ||			\
--		    PM_PTE_LEVEL(pt[i]) == 7)			\
--			continue;				\
--								\
- 		p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);	\
- 		FN(p);						\
- 	}							\
-diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
-index 65075ef..66a803b 100644
---- a/drivers/iommu/arm-smmu.c
-+++ b/drivers/iommu/arm-smmu.c
-@@ -1567,7 +1567,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
- 		return -ENODEV;
- 	}
- 
--	if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
-+	if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
- 		smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
- 		dev_notice(smmu->dev, "\taddress translation ops\n");
- 	}
-diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
-index 9231cdf..c80287a 100644
---- a/drivers/mmc/host/sdhci.c
-+++ b/drivers/mmc/host/sdhci.c
-@@ -848,7 +848,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
- 			int sg_cnt;
- 
- 			sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
--			if (sg_cnt <= 0) {
-+			if (sg_cnt == 0) {
- 				/*
- 				 * This only happens when someone fed
- 				 * us an invalid request.
-diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
-index e9b1810..b0f6924 100644
---- a/drivers/net/can/dev.c
-+++ b/drivers/net/can/dev.c
-@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
- 		struct can_frame *cf = (struct can_frame *)skb->data;
- 		u8 dlc = cf->can_dlc;
- 
--		if (!(skb->tstamp.tv64))
--			__net_timestamp(skb);
--
- 		netif_rx(priv->echo_skb[idx]);
- 		priv->echo_skb[idx] = NULL;
- 
-@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
- 	if (unlikely(!skb))
- 		return NULL;
- 
--	__net_timestamp(skb);
- 	skb->protocol = htons(ETH_P_CAN);
- 	skb->pkt_type = PACKET_BROADCAST;
- 	skb->ip_summed = CHECKSUM_UNNECESSARY;
-@@ -607,7 +603,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
- 	if (unlikely(!skb))
- 		return NULL;
- 
--	__net_timestamp(skb);
- 	skb->protocol = htons(ETH_P_CANFD);
- 	skb->pkt_type = PACKET_BROADCAST;
- 	skb->ip_summed = CHECKSUM_UNNECESSARY;
-diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
-index f64f529..c837eb9 100644
---- a/drivers/net/can/slcan.c
-+++ b/drivers/net/can/slcan.c
-@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
- 	if (!skb)
- 		return;
- 
--	__net_timestamp(skb);
- 	skb->dev = sl->dev;
- 	skb->protocol = htons(ETH_P_CAN);
- 	skb->pkt_type = PACKET_BROADCAST;
-diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
-index 0ce868d..674f367 100644
---- a/drivers/net/can/vcan.c
-+++ b/drivers/net/can/vcan.c
-@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
- 	skb->dev       = dev;
- 	skb->ip_summed = CHECKSUM_UNNECESSARY;
- 
--	if (!(skb->tstamp.tv64))
--		__net_timestamp(skb);
--
- 	netif_rx_ni(skb);
- }
- 
-diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
-index 5c92fb7..d81fc6b 100644
---- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
-+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
-@@ -263,7 +263,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
- 	int ret;
- 
- 	/* Try to obtain pages, decreasing order if necessary */
--	gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
-+	gfp |= __GFP_COLD | __GFP_COMP;
- 	while (order >= 0) {
- 		pages = alloc_pages(gfp, order);
- 		if (pages)
-diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
-index 8a97d28..33501bc 100644
---- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
-+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
-@@ -9323,8 +9323,7 @@ unload_error:
- 	 * function stop ramrod is sent, since as part of this ramrod FW access
- 	 * PTP registers.
- 	 */
--	if (bp->flags & PTP_SUPPORTED)
--		bnx2x_stop_ptp(bp);
-+	bnx2x_stop_ptp(bp);
- 
- 	/* Disable HW interrupts, NAPI */
- 	bnx2x_netif_stop(bp, 1);
-diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
-index 74d0389..ce5f7f9 100644
---- a/drivers/net/ethernet/marvell/mvneta.c
-+++ b/drivers/net/ethernet/marvell/mvneta.c
-@@ -310,7 +310,6 @@ struct mvneta_port {
- 	unsigned int link;
- 	unsigned int duplex;
- 	unsigned int speed;
--	unsigned int tx_csum_limit;
- 	int use_inband_status:1;
- };
- 
-@@ -1014,12 +1013,6 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
- 		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
- 		val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
- 		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
--	} else {
--		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
--		val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
--		       MVNETA_GMAC_AN_SPEED_EN |
--		       MVNETA_GMAC_AN_DUPLEX_EN);
--		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
- 	}
- 
- 	mvneta_set_ucast_table(pp, -1);
-@@ -2509,10 +2502,8 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
- 
- 	dev->mtu = mtu;
- 
--	if (!netif_running(dev)) {
--		netdev_update_features(dev);
-+	if (!netif_running(dev))
- 		return 0;
--	}
- 
- 	/* The interface is running, so we have to force a
- 	 * reallocation of the queues
-@@ -2541,26 +2532,9 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
- 	mvneta_start_dev(pp);
- 	mvneta_port_up(pp);
- 
--	netdev_update_features(dev);
--
- 	return 0;
- }
- 
--static netdev_features_t mvneta_fix_features(struct net_device *dev,
--					     netdev_features_t features)
--{
--	struct mvneta_port *pp = netdev_priv(dev);
--
--	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
--		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
--		netdev_info(dev,
--			    "Disable IP checksum for MTU greater than %dB\n",
--			    pp->tx_csum_limit);
--	}
--
--	return features;
--}
--
- /* Get mac address */
- static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
- {
-@@ -2882,7 +2856,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
- 	.ndo_set_rx_mode     = mvneta_set_rx_mode,
- 	.ndo_set_mac_address = mvneta_set_mac_addr,
- 	.ndo_change_mtu      = mvneta_change_mtu,
--	.ndo_fix_features    = mvneta_fix_features,
- 	.ndo_get_stats64     = mvneta_get_stats64,
- 	.ndo_do_ioctl        = mvneta_ioctl,
- };
-@@ -3128,9 +3101,6 @@ static int mvneta_probe(struct platform_device *pdev)
- 		}
- 	}
- 
--	if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
--		pp->tx_csum_limit = 1600;
--
- 	pp->tx_ring_size = MVNETA_MAX_TXD;
- 	pp->rx_ring_size = MVNETA_MAX_RXD;
- 
-@@ -3209,7 +3179,6 @@ static int mvneta_remove(struct platform_device *pdev)
- 
- static const struct of_device_id mvneta_match[] = {
- 	{ .compatible = "marvell,armada-370-neta" },
--	{ .compatible = "marvell,armada-xp-neta" },
- 	{ }
- };
- MODULE_DEVICE_TABLE(of, mvneta_match);
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-index a5a0b84..cf467a9 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
-@@ -1973,6 +1973,10 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
- 			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
- 	}
- 
-+	if (priv->base_tx_qpn) {
-+		mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
-+		priv->base_tx_qpn = 0;
-+	}
- }
- 
- int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-index eab4e08..2a77a6b 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
-@@ -723,7 +723,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
- }
- #endif
- static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
--		      netdev_features_t dev_features)
-+		      int hwtstamp_rx_filter)
- {
- 	__wsum hw_checksum = 0;
- 
-@@ -731,8 +731,14 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
- 
- 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
- 
--	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
--	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
-+	if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
-+	    hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
-+		/* next protocol non IPv4 or IPv6 */
-+		if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
-+		    != htons(ETH_P_IP) &&
-+		    ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
-+		    != htons(ETH_P_IPV6))
-+			return -1;
- 		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
- 		hdr += sizeof(struct vlan_hdr);
- 	}
-@@ -895,8 +901,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 
- 			if (ip_summed == CHECKSUM_COMPLETE) {
- 				void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
--				if (check_csum(cqe, gro_skb, va,
--					       dev->features)) {
-+				if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
- 					ip_summed = CHECKSUM_NONE;
- 					ring->csum_none++;
- 					ring->csum_complete--;
-@@ -951,7 +956,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
- 		}
- 
- 		if (ip_summed == CHECKSUM_COMPLETE) {
--			if (check_csum(cqe, skb, skb->data, dev->features)) {
-+			if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
- 				ip_summed = CHECKSUM_NONE;
- 				ring->csum_complete--;
- 				ring->csum_none++;
-diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-index c10d98f..7bed3a8 100644
---- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
-@@ -66,7 +66,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
- 	ring->size = size;
- 	ring->size_mask = size - 1;
- 	ring->stride = stride;
--	ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
- 
- 	tmp = size * sizeof(struct mlx4_en_tx_info);
- 	ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
-@@ -181,7 +180,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
- 		mlx4_bf_free(mdev->dev, &ring->bf);
- 	mlx4_qp_remove(mdev->dev, &ring->qp);
- 	mlx4_qp_free(mdev->dev, &ring->qp);
--	mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
- 	mlx4_en_unmap_buffer(&ring->wqres.buf);
- 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
- 	kfree(ring->bounce_buf);
-@@ -233,11 +231,6 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
- 		       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
- }
- 
--static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
--{
--	return ring->prod - ring->cons > ring->full_size;
--}
--
- static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
- 			      struct mlx4_en_tx_ring *ring, int index,
- 			      u8 owner)
-@@ -480,10 +473,11 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
- 
- 	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
- 
--	/* Wakeup Tx queue if this stopped, and ring is not full.
-+	/*
-+	 * Wakeup Tx queue if this stopped, and at least 1 packet
-+	 * was completed
- 	 */
--	if (netif_tx_queue_stopped(ring->tx_queue) &&
--	    !mlx4_en_is_tx_ring_full(ring)) {
-+	if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
- 		netif_tx_wake_queue(ring->tx_queue);
- 		ring->wake_queue++;
- 	}
-@@ -927,7 +921,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
- 	skb_tx_timestamp(skb);
- 
- 	/* Check available TXBBs And 2K spare for prefetch */
--	stop_queue = mlx4_en_is_tx_ring_full(ring);
-+	stop_queue = (int)(ring->prod - ring_cons) >
-+		      ring->size - HEADROOM - MAX_DESC_TXBBS;
- 	if (unlikely(stop_queue)) {
- 		netif_tx_stop_queue(ring->tx_queue);
- 		ring->queue_stopped++;
-@@ -996,7 +991,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
- 		smp_rmb();
- 
- 		ring_cons = ACCESS_ONCE(ring->cons);
--		if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
-+		if (unlikely(((int)(ring->prod - ring_cons)) <=
-+			     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
- 			netif_tx_wake_queue(ring->tx_queue);
- 			ring->wake_queue++;
- 		}
-diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
-index 0d80aed..6fce587 100644
---- a/drivers/net/ethernet/mellanox/mlx4/intf.c
-+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
-@@ -93,14 +93,8 @@ int mlx4_register_interface(struct mlx4_interface *intf)
- 	mutex_lock(&intf_mutex);
- 
- 	list_add_tail(&intf->list, &intf_list);
--	list_for_each_entry(priv, &dev_list, dev_list) {
--		if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
--			mlx4_dbg(&priv->dev,
--				 "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
--			intf->flags &= ~MLX4_INTFF_BONDING;
--		}
-+	list_for_each_entry(priv, &dev_list, dev_list)
- 		mlx4_add_device(intf, priv);
--	}
- 
- 	mutex_unlock(&intf_mutex);
- 
-diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-index 909fcf8..d021f07 100644
---- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
-@@ -279,7 +279,6 @@ struct mlx4_en_tx_ring {
- 	u32			size; /* number of TXBBs */
- 	u32			size_mask;
- 	u16			stride;
--	u32			full_size;
- 	u16			cqn;	/* index of port CQ associated with this ring */
- 	u32			buf_size;
- 	__be32			doorbell_qpn;
-@@ -580,6 +579,7 @@ struct mlx4_en_priv {
- 	int vids[128];
- 	bool wol;
- 	struct device *ddev;
-+	int base_tx_qpn;
- 	struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
- 	struct hwtstamp_config hwtstamp_config;
- 
-diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
-index d551df6..bdfe51f 100644
---- a/drivers/net/phy/phy_device.c
-+++ b/drivers/net/phy/phy_device.c
-@@ -796,11 +796,10 @@ static int genphy_config_advert(struct phy_device *phydev)
- 	if (phydev->supported & (SUPPORTED_1000baseT_Half |
- 				 SUPPORTED_1000baseT_Full)) {
- 		adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
-+		if (adv != oldadv)
-+			changed = 1;
- 	}
- 
--	if (adv != oldadv)
--		changed = 1;
--
- 	err = phy_write(phydev, MII_CTRL1000, adv);
- 	if (err < 0)
- 		return err;
-diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
-index 4cdac78..b2f9521 100644
---- a/drivers/net/wireless/b43/main.c
-+++ b/drivers/net/wireless/b43/main.c
-@@ -5365,10 +5365,6 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
- 		*have_5ghz_phy = true;
- 		return;
- 	case 0x4321: /* BCM4306 */
--		/* There are 14e4:4321 PCI devs with 2.4 GHz BCM4321 (N-PHY) */
--		if (dev->phy.type != B43_PHYTYPE_G)
--			break;
--		/* fall through */
- 	case 0x4313: /* BCM4311 */
- 	case 0x431a: /* BCM4318 */
- 	case 0x432a: /* BCM4321 */
-diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
-index ec383b0..968787a 100644
---- a/drivers/net/xen-netback/xenbus.c
-+++ b/drivers/net/xen-netback/xenbus.c
-@@ -681,9 +681,6 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
- 	char *node;
- 	unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
- 
--	if (vif->credit_watch.node)
--		return -EADDRINUSE;
--
- 	node = kmalloc(maxlen, GFP_KERNEL);
- 	if (!node)
- 		return -ENOMEM;
-@@ -773,7 +770,6 @@ static void connect(struct backend_info *be)
- 	}
- 
- 	xen_net_read_rate(dev, &credit_bytes, &credit_usec);
--	xen_unregister_watchers(be->vif);
- 	xen_register_watchers(dev, be->vif);
- 	read_xenbus_vif_flags(be);
- 
-diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
-index f8d8fdb..6f1fa17 100644
---- a/drivers/s390/kvm/virtio_ccw.c
-+++ b/drivers/s390/kvm/virtio_ccw.c
-@@ -65,7 +65,6 @@ struct virtio_ccw_device {
- 	bool is_thinint;
- 	bool going_away;
- 	bool device_lost;
--	unsigned int config_ready;
- 	void *airq_info;
- };
- 
-@@ -834,11 +833,8 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
- 	if (ret)
- 		goto out_free;
- 
--	memcpy(vcdev->config, config_area, offset + len);
--	if (buf)
--		memcpy(buf, &vcdev->config[offset], len);
--	if (vcdev->config_ready < offset + len)
--		vcdev->config_ready = offset + len;
-+	memcpy(vcdev->config, config_area, sizeof(vcdev->config));
-+	memcpy(buf, &vcdev->config[offset], len);
- 
- out_free:
- 	kfree(config_area);
-@@ -861,9 +857,6 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
- 	if (!config_area)
- 		goto out_free;
- 
--	/* Make sure we don't overwrite fields. */
--	if (vcdev->config_ready < offset)
--		virtio_ccw_get_config(vdev, 0, NULL, offset);
- 	memcpy(&vcdev->config[offset], buf, len);
- 	/* Write the config area to the host. */
- 	memcpy(config_area, vcdev->config, sizeof(vcdev->config));
-diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
-index a086e1d..5c8f581 100644
---- a/drivers/usb/class/cdc-acm.c
-+++ b/drivers/usb/class/cdc-acm.c
-@@ -1477,11 +1477,6 @@ skip_countries:
- 		goto alloc_fail8;
- 	}
- 
--	if (quirks & CLEAR_HALT_CONDITIONS) {
--		usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress));
--		usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress));
--	}
--
- 	return 0;
- alloc_fail8:
- 	if (acm->country_codes) {
-@@ -1761,10 +1756,6 @@ static const struct usb_device_id acm_ids[] = {
- 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
- 	},
- 
--	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
--	.driver_info = CLEAR_HALT_CONDITIONS,
--	},
--
- 	/* Nokia S60 phones expose two ACM channels. The first is
- 	 * a modem and is picked up by the standard AT-command
- 	 * information below. The second is 'vendor-specific' but
-diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
-index b3b6c9d..ffeb3c8 100644
---- a/drivers/usb/class/cdc-acm.h
-+++ b/drivers/usb/class/cdc-acm.h
-@@ -133,4 +133,3 @@ struct acm {
- #define NO_DATA_INTERFACE		BIT(4)
- #define IGNORE_DEVICE			BIT(5)
- #define QUIRK_CONTROL_LINE_STATE	BIT(6)
--#define CLEAR_HALT_CONDITIONS		BIT(7)
-diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
-index 45b8c8b..3507f88 100644
---- a/drivers/usb/gadget/function/f_fs.c
-+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -3435,7 +3435,6 @@ done:
- static void ffs_closed(struct ffs_data *ffs)
- {
- 	struct ffs_dev *ffs_obj;
--	struct f_fs_opts *opts;
- 
- 	ENTER();
- 	ffs_dev_lock();
-@@ -3450,13 +3449,8 @@ static void ffs_closed(struct ffs_data *ffs)
- 	    ffs_obj->ffs_closed_callback)
- 		ffs_obj->ffs_closed_callback(ffs);
- 
--	if (ffs_obj->opts)
--		opts = ffs_obj->opts;
--	else
--		goto done;
--
--	if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
--	    || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
-+	if (!ffs_obj->opts || ffs_obj->opts->no_configfs
-+	    || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
- 		goto done;
- 
- 	unregister_gadget_item(ffs_obj->opts->
-diff --git a/fs/dcache.c b/fs/dcache.c
-index 50bb3c2..37b5afd 100644
---- a/fs/dcache.c
-+++ b/fs/dcache.c
-@@ -2927,6 +2927,17 @@ restart:
- 				vfsmnt = &mnt->mnt;
- 				continue;
- 			}
-+			/*
-+			 * Filesystems needing to implement special "root names"
-+			 * should do so with ->d_dname()
-+			 */
-+			if (IS_ROOT(dentry) &&
-+			   (dentry->d_name.len != 1 ||
-+			    dentry->d_name.name[0] != '/')) {
-+				WARN(1, "Root dentry has weird name <%.*s>\n",
-+				     (int) dentry->d_name.len,
-+				     dentry->d_name.name);
-+			}
- 			if (!error)
- 				error = is_mounted(vfsmnt) ? 1 : 2;
- 			break;
-diff --git a/fs/inode.c b/fs/inode.c
-index 6e342ca..ea37cd1 100644
---- a/fs/inode.c
-+++ b/fs/inode.c
-@@ -1693,8 +1693,8 @@ int file_remove_suid(struct file *file)
- 		error = security_inode_killpriv(dentry);
- 	if (!error && killsuid)
- 		error = __remove_suid(dentry, killsuid);
--	if (!error)
--		inode_has_no_xattr(inode);
-+	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
-+		inode->i_flags |= S_NOSEC;
- 
- 	return error;
- }
-diff --git a/fs/namespace.c b/fs/namespace.c
-index 1d4a97c..1b9e111 100644
---- a/fs/namespace.c
-+++ b/fs/namespace.c
-@@ -3185,15 +3185,11 @@ bool fs_fully_visible(struct file_system_type *type)
- 		if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
- 			continue;
- 
--		/* This mount is not fully visible if there are any
--		 * locked child mounts that cover anything except for
--		 * empty directories.
-+		/* This mount is not fully visible if there are any child mounts
-+		 * that cover anything except for empty directories.
- 		 */
- 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
- 			struct inode *inode = child->mnt_mountpoint->d_inode;
--			/* Only worry about locked mounts */
--			if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
--				continue;
- 			if (!S_ISDIR(inode->i_mode))
- 				goto next;
- 			if (inode->i_nlink > 2)
-diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
-index a7106ed..2c10360 100644
---- a/fs/ufs/balloc.c
-+++ b/fs/ufs/balloc.c
-@@ -51,8 +51,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
- 	
- 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
- 		ufs_error (sb, "ufs_free_fragments", "internal error");
--
--	mutex_lock(&UFS_SB(sb)->s_lock);
-+	
-+	lock_ufs(sb);
- 	
- 	cgno = ufs_dtog(uspi, fragment);
- 	bit = ufs_dtogd(uspi, fragment);
-@@ -115,13 +115,13 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
- 	if (sb->s_flags & MS_SYNCHRONOUS)
- 		ubh_sync_block(UCPI_UBH(ucpi));
- 	ufs_mark_sb_dirty(sb);
--
--	mutex_unlock(&UFS_SB(sb)->s_lock);
-+	
-+	unlock_ufs(sb);
- 	UFSD("EXIT\n");
- 	return;
- 
- failed:
--	mutex_unlock(&UFS_SB(sb)->s_lock);
-+	unlock_ufs(sb);
- 	UFSD("EXIT (FAILED)\n");
- 	return;
- }
-@@ -151,7 +151,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
- 		goto failed;
- 	}
- 
--	mutex_lock(&UFS_SB(sb)->s_lock);
-+	lock_ufs(sb);
- 	
- do_more:
- 	overflow = 0;
-@@ -211,12 +211,12 @@ do_more:
- 	}
- 
- 	ufs_mark_sb_dirty(sb);
--	mutex_unlock(&UFS_SB(sb)->s_lock);
-+	unlock_ufs(sb);
- 	UFSD("EXIT\n");
- 	return;
- 
- failed_unlock:
--	mutex_unlock(&UFS_SB(sb)->s_lock);
-+	unlock_ufs(sb);
- failed:
- 	UFSD("EXIT (FAILED)\n");
- 	return;
-@@ -357,7 +357,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- 	usb1 = ubh_get_usb_first(uspi);
- 	*err = -ENOSPC;
- 
--	mutex_lock(&UFS_SB(sb)->s_lock);
-+	lock_ufs(sb);
- 	tmp = ufs_data_ptr_to_cpu(sb, p);
- 
- 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
-@@ -378,19 +378,19 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- 				  "fragment %llu, tmp %llu\n",
- 				  (unsigned long long)fragment,
- 				  (unsigned long long)tmp);
--			mutex_unlock(&UFS_SB(sb)->s_lock);
-+			unlock_ufs(sb);
- 			return INVBLOCK;
- 		}
- 		if (fragment < UFS_I(inode)->i_lastfrag) {
- 			UFSD("EXIT (ALREADY ALLOCATED)\n");
--			mutex_unlock(&UFS_SB(sb)->s_lock);
-+			unlock_ufs(sb);
- 			return 0;
- 		}
- 	}
- 	else {
- 		if (tmp) {
- 			UFSD("EXIT (ALREADY ALLOCATED)\n");
--			mutex_unlock(&UFS_SB(sb)->s_lock);
-+			unlock_ufs(sb);
- 			return 0;
- 		}
- 	}
-@@ -399,7 +399,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- 	 * There is not enough space for user on the device
- 	 */
- 	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
--		mutex_unlock(&UFS_SB(sb)->s_lock);
-+		unlock_ufs(sb);
- 		UFSD("EXIT (FAILED)\n");
- 		return 0;
- 	}
-@@ -424,7 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- 			ufs_clear_frags(inode, result + oldcount,
- 					newcount - oldcount, locked_page != NULL);
- 		}
--		mutex_unlock(&UFS_SB(sb)->s_lock);
-+		unlock_ufs(sb);
- 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
- 		return result;
- 	}
-@@ -439,7 +439,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- 						fragment + count);
- 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
- 				locked_page != NULL);
--		mutex_unlock(&UFS_SB(sb)->s_lock);
-+		unlock_ufs(sb);
- 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
- 		return result;
- 	}
-@@ -477,7 +477,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- 		*err = 0;
- 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
- 						fragment + count);
--		mutex_unlock(&UFS_SB(sb)->s_lock);
-+		unlock_ufs(sb);
- 		if (newcount < request)
- 			ufs_free_fragments (inode, result + newcount, request - newcount);
- 		ufs_free_fragments (inode, tmp, oldcount);
-@@ -485,7 +485,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
- 		return result;
- 	}
- 
--	mutex_unlock(&UFS_SB(sb)->s_lock);
-+	unlock_ufs(sb);
- 	UFSD("EXIT (FAILED)\n");
- 	return 0;
- }		
-diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
-index fd0203c..7caa016 100644
---- a/fs/ufs/ialloc.c
-+++ b/fs/ufs/ialloc.c
-@@ -69,11 +69,11 @@ void ufs_free_inode (struct inode * inode)
- 	
- 	ino = inode->i_ino;
- 
--	mutex_lock(&UFS_SB(sb)->s_lock);
-+	lock_ufs(sb);
- 
- 	if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
- 		ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
--		mutex_unlock(&UFS_SB(sb)->s_lock);
-+		unlock_ufs(sb);
- 		return;
- 	}
- 	
-@@ -81,7 +81,7 @@ void ufs_free_inode (struct inode * inode)
- 	bit = ufs_inotocgoff (ino);
- 	ucpi = ufs_load_cylinder (sb, cg);
- 	if (!ucpi) {
--		mutex_unlock(&UFS_SB(sb)->s_lock);
-+		unlock_ufs(sb);
- 		return;
- 	}
- 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
-@@ -115,7 +115,7 @@ void ufs_free_inode (struct inode * inode)
- 		ubh_sync_block(UCPI_UBH(ucpi));
- 	
- 	ufs_mark_sb_dirty(sb);
--	mutex_unlock(&UFS_SB(sb)->s_lock);
-+	unlock_ufs(sb);
- 	UFSD("EXIT\n");
- }
- 
-@@ -193,7 +193,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
- 	sbi = UFS_SB(sb);
- 	uspi = sbi->s_uspi;
- 
--	mutex_lock(&sbi->s_lock);
-+	lock_ufs(sb);
- 
- 	/*
- 	 * Try to place the inode in its parent directory
-@@ -331,21 +331,21 @@ cg_found:
- 			sync_dirty_buffer(bh);
- 		brelse(bh);
- 	}
--	mutex_unlock(&sbi->s_lock);
-+	unlock_ufs(sb);
- 
- 	UFSD("allocating inode %lu\n", inode->i_ino);
- 	UFSD("EXIT\n");
- 	return inode;
- 
- fail_remove_inode:
--	mutex_unlock(&sbi->s_lock);
-+	unlock_ufs(sb);
- 	clear_nlink(inode);
- 	unlock_new_inode(inode);
- 	iput(inode);
- 	UFSD("EXIT (FAILED): err %d\n", err);
- 	return ERR_PTR(err);
- failed:
--	mutex_unlock(&sbi->s_lock);
-+	unlock_ufs(sb);
- 	make_bad_inode(inode);
- 	iput (inode);
- 	UFSD("EXIT (FAILED): err %d\n", err);
-diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
-index 2d93ab0..be7d42c 100644
---- a/fs/ufs/inode.c
-+++ b/fs/ufs/inode.c
-@@ -902,9 +902,6 @@ void ufs_evict_inode(struct inode * inode)
- 	invalidate_inode_buffers(inode);
- 	clear_inode(inode);
- 
--	if (want_delete) {
--		lock_ufs(inode->i_sb);
-+	if (want_delete)
- 		ufs_free_inode(inode);
--		unlock_ufs(inode->i_sb);
--	}
- }
-diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
-index 60ee322..e491a93 100644
---- a/fs/ufs/namei.c
-+++ b/fs/ufs/namei.c
-@@ -128,12 +128,12 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
- 	if (l > sb->s_blocksize)
- 		goto out_notlocked;
- 
--	lock_ufs(dir->i_sb);
- 	inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
- 	err = PTR_ERR(inode);
- 	if (IS_ERR(inode))
--		goto out;
-+		goto out_notlocked;
- 
-+	lock_ufs(dir->i_sb);
- 	if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
- 		/* slow symlink */
- 		inode->i_op = &ufs_symlink_inode_operations;
-@@ -174,12 +174,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
- 	inode_inc_link_count(inode);
- 	ihold(inode);
- 
--	error = ufs_add_link(dentry, inode);
--	if (error) {
--		inode_dec_link_count(inode);
--		iput(inode);
--	} else
--		d_instantiate(dentry, inode);
-+	error = ufs_add_nondir(dentry, inode);
- 	unlock_ufs(dir->i_sb);
- 	return error;
- }
-@@ -189,13 +184,9 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
- 	struct inode * inode;
- 	int err;
- 
--	lock_ufs(dir->i_sb);
--	inode_inc_link_count(dir);
--
- 	inode = ufs_new_inode(dir, S_IFDIR|mode);
--	err = PTR_ERR(inode);
- 	if (IS_ERR(inode))
--		goto out_dir;
-+		return PTR_ERR(inode);
- 
- 	inode->i_op = &ufs_dir_inode_operations;
- 	inode->i_fop = &ufs_dir_operations;
-@@ -203,6 +194,9 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
- 
- 	inode_inc_link_count(inode);
- 
-+	lock_ufs(dir->i_sb);
-+	inode_inc_link_count(dir);
-+
- 	err = ufs_make_empty(inode, dir);
- 	if (err)
- 		goto out_fail;
-@@ -212,7 +206,6 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
- 		goto out_fail;
- 	unlock_ufs(dir->i_sb);
- 
--	unlock_new_inode(inode);
- 	d_instantiate(dentry, inode);
- out:
- 	return err;
-@@ -222,7 +215,6 @@ out_fail:
- 	inode_dec_link_count(inode);
- 	unlock_new_inode(inode);
- 	iput (inode);
--out_dir:
- 	inode_dec_link_count(dir);
- 	unlock_ufs(dir->i_sb);
- 	goto out;
-diff --git a/fs/ufs/super.c b/fs/ufs/super.c
-index dc33f94..b3bc3e7 100644
---- a/fs/ufs/super.c
-+++ b/fs/ufs/super.c
-@@ -694,7 +694,6 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
- 	unsigned flags;
- 
- 	lock_ufs(sb);
--	mutex_lock(&UFS_SB(sb)->s_lock);
- 
- 	UFSD("ENTER\n");
- 
-@@ -712,7 +711,6 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
- 	ufs_put_cstotal(sb);
- 
- 	UFSD("EXIT\n");
--	mutex_unlock(&UFS_SB(sb)->s_lock);
- 	unlock_ufs(sb);
- 
- 	return 0;
-@@ -801,7 +799,6 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
- 	UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
- 	
- 	mutex_init(&sbi->mutex);
--	mutex_init(&sbi->s_lock);
- 	spin_lock_init(&sbi->work_lock);
- 	INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
- 	/*
-@@ -1280,7 +1277,6 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- 
- 	sync_filesystem(sb);
- 	lock_ufs(sb);
--	mutex_lock(&UFS_SB(sb)->s_lock);
- 	uspi = UFS_SB(sb)->s_uspi;
- 	flags = UFS_SB(sb)->s_flags;
- 	usb1 = ubh_get_usb_first(uspi);
-@@ -1294,7 +1290,6 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- 	new_mount_opt = 0;
- 	ufs_set_opt (new_mount_opt, ONERROR_LOCK);
- 	if (!ufs_parse_options (data, &new_mount_opt)) {
--		mutex_unlock(&UFS_SB(sb)->s_lock);
- 		unlock_ufs(sb);
- 		return -EINVAL;
- 	}
-@@ -1302,14 +1297,12 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- 		new_mount_opt |= ufstype;
- 	} else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
- 		pr_err("ufstype can't be changed during remount\n");
--		mutex_unlock(&UFS_SB(sb)->s_lock);
- 		unlock_ufs(sb);
- 		return -EINVAL;
- 	}
- 
- 	if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
- 		UFS_SB(sb)->s_mount_opt = new_mount_opt;
--		mutex_unlock(&UFS_SB(sb)->s_lock);
- 		unlock_ufs(sb);
- 		return 0;
- 	}
-@@ -1333,7 +1326,6 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- 	 */
- #ifndef CONFIG_UFS_FS_WRITE
- 		pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n");
--		mutex_unlock(&UFS_SB(sb)->s_lock);
- 		unlock_ufs(sb);
- 		return -EINVAL;
- #else
-@@ -1343,13 +1335,11 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- 		    ufstype != UFS_MOUNT_UFSTYPE_SUNx86 &&
- 		    ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
- 			pr_err("this ufstype is read-only supported\n");
--			mutex_unlock(&UFS_SB(sb)->s_lock);
- 			unlock_ufs(sb);
- 			return -EINVAL;
- 		}
- 		if (!ufs_read_cylinder_structures(sb)) {
- 			pr_err("failed during remounting\n");
--			mutex_unlock(&UFS_SB(sb)->s_lock);
- 			unlock_ufs(sb);
- 			return -EPERM;
- 		}
-@@ -1357,7 +1347,6 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
- #endif
- 	}
- 	UFS_SB(sb)->s_mount_opt = new_mount_opt;
--	mutex_unlock(&UFS_SB(sb)->s_lock);
- 	unlock_ufs(sb);
- 	return 0;
- }
-diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
-index cf6368d..2a07396 100644
---- a/fs/ufs/ufs.h
-+++ b/fs/ufs/ufs.h
-@@ -30,7 +30,6 @@ struct ufs_sb_info {
- 	int work_queued; /* non-zero if the delayed work is queued */
- 	struct delayed_work sync_work; /* FS sync delayed work */
- 	spinlock_t work_lock; /* protects sync_work and work_queued */
--	struct mutex s_lock;
- };
- 
- struct ufs_inode_info {
-diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
-index 8ba379f..3573a81 100644
---- a/include/net/netns/sctp.h
-+++ b/include/net/netns/sctp.h
-@@ -31,7 +31,6 @@ struct netns_sctp {
- 	struct list_head addr_waitq;
- 	struct timer_list addr_wq_timer;
- 	struct list_head auto_asconf_splist;
--	/* Lock that protects both addr_waitq and auto_asconf_splist */
- 	spinlock_t addr_wq_lock;
- 
- 	/* Lock that protects the local_addr_list writers */
-diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
-index 495c87e..2bb2fcf 100644
---- a/include/net/sctp/structs.h
-+++ b/include/net/sctp/structs.h
-@@ -223,10 +223,6 @@ struct sctp_sock {
- 	atomic_t pd_mode;
- 	/* Receive to here while partial delivery is in effect. */
- 	struct sk_buff_head pd_lobby;
--
--	/* These must be the last fields, as they will skipped on copies,
--	 * like on accept and peeloff operations
--	 */
- 	struct list_head auto_asconf_list;
- 	int do_auto_asconf;
- };
 diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
 index 1a0006a..4842a98 100644
 --- a/include/uapi/linux/Kbuild
@@ -24084,463 +21991,6 @@ index 0000000..5297166
 +size_t kdbus_kvec_pad(struct kvec *kvec, u64 *len);
 +
 +#endif
-diff --git a/kernel/events/core.c b/kernel/events/core.c
-index 0ceb386..eddf1ed 100644
---- a/kernel/events/core.c
-+++ b/kernel/events/core.c
-@@ -4331,20 +4331,20 @@ static void ring_buffer_attach(struct perf_event *event,
- 		WARN_ON_ONCE(event->rcu_pending);
- 
- 		old_rb = event->rb;
-+		event->rcu_batches = get_state_synchronize_rcu();
-+		event->rcu_pending = 1;
-+
- 		spin_lock_irqsave(&old_rb->event_lock, flags);
- 		list_del_rcu(&event->rb_entry);
- 		spin_unlock_irqrestore(&old_rb->event_lock, flags);
-+	}
- 
--		event->rcu_batches = get_state_synchronize_rcu();
--		event->rcu_pending = 1;
-+	if (event->rcu_pending && rb) {
-+		cond_synchronize_rcu(event->rcu_batches);
-+		event->rcu_pending = 0;
- 	}
- 
- 	if (rb) {
--		if (event->rcu_pending) {
--			cond_synchronize_rcu(event->rcu_batches);
--			event->rcu_pending = 0;
--		}
--
- 		spin_lock_irqsave(&rb->event_lock, flags);
- 		list_add_rcu(&event->rb_entry, &rb->event_list);
- 		spin_unlock_irqrestore(&rb->event_lock, flags);
-diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
-index 8d423bc..a9a4a1b 100644
---- a/net/bridge/br_ioctl.c
-+++ b/net/bridge/br_ioctl.c
-@@ -247,7 +247,9 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
- 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
- 			return -EPERM;
- 
-+		spin_lock_bh(&br->lock);
- 		br_stp_set_bridge_priority(br, args[1]);
-+		spin_unlock_bh(&br->lock);
- 		return 0;
- 
- 	case BRCTL_SET_PORT_PRIORITY:
-diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
-index 7832d07..4114687 100644
---- a/net/bridge/br_stp_if.c
-+++ b/net/bridge/br_stp_if.c
-@@ -243,13 +243,12 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
- 	return true;
- }
- 
--/* Acquires and releases bridge lock */
-+/* called under bridge lock */
- void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
- {
- 	struct net_bridge_port *p;
- 	int wasroot;
- 
--	spin_lock_bh(&br->lock);
- 	wasroot = br_is_root_bridge(br);
- 
- 	list_for_each_entry(p, &br->port_list, list) {
-@@ -267,7 +266,6 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
- 	br_port_state_selection(br);
- 	if (br_is_root_bridge(br) && !wasroot)
- 		br_become_root_bridge(br);
--	spin_unlock_bh(&br->lock);
- }
- 
- /* called under bridge lock */
-diff --git a/net/can/af_can.c b/net/can/af_can.c
-index 689c818..32d710e 100644
---- a/net/can/af_can.c
-+++ b/net/can/af_can.c
-@@ -310,12 +310,8 @@ int can_send(struct sk_buff *skb, int loop)
- 		return err;
- 	}
- 
--	if (newskb) {
--		if (!(newskb->tstamp.tv64))
--			__net_timestamp(newskb);
--
-+	if (newskb)
- 		netif_rx_ni(newskb);
--	}
- 
- 	/* update statistics */
- 	can_stats.tx_frames++;
-diff --git a/net/core/neighbour.c b/net/core/neighbour.c
-index 2237c1b..3de6542 100644
---- a/net/core/neighbour.c
-+++ b/net/core/neighbour.c
-@@ -957,8 +957,6 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
- 	rc = 0;
- 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
- 		goto out_unlock_bh;
--	if (neigh->dead)
--		goto out_dead;
- 
- 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
- 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
-@@ -1015,13 +1013,6 @@ out_unlock_bh:
- 		write_unlock(&neigh->lock);
- 	local_bh_enable();
- 	return rc;
--
--out_dead:
--	if (neigh->nud_state & NUD_STALE)
--		goto out_unlock_bh;
--	write_unlock_bh(&neigh->lock);
--	kfree_skb(skb);
--	return 1;
- }
- EXPORT_SYMBOL(__neigh_event_send);
- 
-@@ -1085,8 +1076,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
- 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
- 	    (old & (NUD_NOARP | NUD_PERMANENT)))
- 		goto out;
--	if (neigh->dead)
--		goto out;
- 
- 	if (!(new & NUD_VALID)) {
- 		neigh_del_timer(neigh);
-@@ -1236,8 +1225,6 @@ EXPORT_SYMBOL(neigh_update);
-  */
- void __neigh_set_probe_once(struct neighbour *neigh)
- {
--	if (neigh->dead)
--		return;
- 	neigh->updated = jiffies;
- 	if (!(neigh->nud_state & NUD_FAILED))
- 		return;
-diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
-index a5aa54e..8b47a4d 100644
---- a/net/ipv4/af_inet.c
-+++ b/net/ipv4/af_inet.c
-@@ -228,8 +228,6 @@ int inet_listen(struct socket *sock, int backlog)
- 				err = 0;
- 			if (err)
- 				goto out;
--
--			tcp_fastopen_init_key_once(true);
- 		}
- 		err = inet_csk_listen_start(sk, backlog);
- 		if (err)
-diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
-index 6ddde89..7cfb089 100644
---- a/net/ipv4/ip_sockglue.c
-+++ b/net/ipv4/ip_sockglue.c
-@@ -432,15 +432,6 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
- 		kfree_skb(skb);
- }
- 
--/* For some errors we have valid addr_offset even with zero payload and
-- * zero port. Also, addr_offset should be supported if port is set.
-- */
--static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
--{
--	return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
--	       serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
--}
--
- /* IPv4 supports cmsg on all imcp errors and some timestamps
-  *
-  * Timestamp code paths do not initialize the fields expected by cmsg:
-@@ -507,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
- 
- 	serr = SKB_EXT_ERR(skb);
- 
--	if (sin && ipv4_datagram_support_addr(serr)) {
-+	if (sin && serr->port) {
- 		sin->sin_family = AF_INET;
- 		sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
- 						   serr->addr_offset);
-diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
-index bb2ce74..f1377f2 100644
---- a/net/ipv4/tcp.c
-+++ b/net/ipv4/tcp.c
-@@ -2545,13 +2545,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
- 
- 	case TCP_FASTOPEN:
- 		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
--		    TCPF_LISTEN))) {
--			tcp_fastopen_init_key_once(true);
--
-+		    TCPF_LISTEN)))
- 			err = fastopen_init_queue(sk, val);
--		} else {
-+		else
- 			err = -EINVAL;
--		}
- 		break;
- 	case TCP_TIMESTAMP:
- 		if (!tp->repair)
-diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
-index f9c0fb8..46b087a 100644
---- a/net/ipv4/tcp_fastopen.c
-+++ b/net/ipv4/tcp_fastopen.c
-@@ -78,6 +78,8 @@ static bool __tcp_fastopen_cookie_gen(const void *path,
- 	struct tcp_fastopen_context *ctx;
- 	bool ok = false;
- 
-+	tcp_fastopen_init_key_once(true);
-+
- 	rcu_read_lock();
- 	ctx = rcu_dereference(tcp_fastopen_ctx);
- 	if (ctx) {
-diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
-index 62d908e..762a58c 100644
---- a/net/ipv6/datagram.c
-+++ b/net/ipv6/datagram.c
-@@ -325,16 +325,6 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
- 	kfree_skb(skb);
- }
- 
--/* For some errors we have valid addr_offset even with zero payload and
-- * zero port. Also, addr_offset should be supported if port is set.
-- */
--static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
--{
--	return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
--	       serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
--	       serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
--}
--
- /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
-  *
-  * At one point, excluding local errors was a quick test to identify icmp/icmp6
-@@ -399,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
- 
- 	serr = SKB_EXT_ERR(skb);
- 
--	if (sin && ipv6_datagram_support_addr(serr)) {
-+	if (sin && serr->port) {
- 		const unsigned char *nh = skb_network_header(skb);
- 		sin->sin6_family = AF_INET6;
- 		sin->sin6_flowinfo = 0;
-diff --git a/net/mac80211/key.c b/net/mac80211/key.c
-index 81e9785..a907f2d 100644
---- a/net/mac80211/key.c
-+++ b/net/mac80211/key.c
-@@ -66,15 +66,12 @@ update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
- 	if (sdata->vif.type != NL80211_IFTYPE_AP)
- 		return;
- 
--	/* crypto_tx_tailroom_needed_cnt is protected by this */
--	assert_key_lock(sdata->local);
--
--	rcu_read_lock();
-+	mutex_lock(&sdata->local->mtx);
- 
--	list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list)
-+	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
- 		vlan->crypto_tx_tailroom_needed_cnt += delta;
- 
--	rcu_read_unlock();
-+	mutex_unlock(&sdata->local->mtx);
- }
- 
- static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
-@@ -98,8 +95,6 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
- 	 * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
- 	 */
- 
--	assert_key_lock(sdata->local);
--
- 	update_vlan_tailroom_need_count(sdata, 1);
- 
- 	if (!sdata->crypto_tx_tailroom_needed_cnt++) {
-@@ -114,8 +109,6 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
- static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
- 					 int delta)
- {
--	assert_key_lock(sdata->local);
--
- 	WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
- 
- 	update_vlan_tailroom_need_count(sdata, -delta);
-diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
-index fe1610d..b5989c6 100644
---- a/net/packet/af_packet.c
-+++ b/net/packet/af_packet.c
-@@ -1272,6 +1272,16 @@ static void packet_sock_destruct(struct sock *sk)
- 	sk_refcnt_debug_dec(sk);
- }
- 
-+static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
-+{
-+	int x = atomic_read(&f->rr_cur) + 1;
-+
-+	if (x >= num)
-+		x = 0;
-+
-+	return x;
-+}
-+
- static unsigned int fanout_demux_hash(struct packet_fanout *f,
- 				      struct sk_buff *skb,
- 				      unsigned int num)
-@@ -1283,9 +1293,13 @@ static unsigned int fanout_demux_lb(struct packet_fanout *f,
- 				    struct sk_buff *skb,
- 				    unsigned int num)
- {
--	unsigned int val = atomic_inc_return(&f->rr_cur);
-+	int cur, old;
- 
--	return val % num;
-+	cur = atomic_read(&f->rr_cur);
-+	while ((old = atomic_cmpxchg(&f->rr_cur, cur,
-+				     fanout_rr_next(f, num))) != cur)
-+		cur = old;
-+	return cur;
- }
- 
- static unsigned int fanout_demux_cpu(struct packet_fanout *f,
-@@ -1339,7 +1353,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
- 			     struct packet_type *pt, struct net_device *orig_dev)
- {
- 	struct packet_fanout *f = pt->af_packet_priv;
--	unsigned int num = READ_ONCE(f->num_members);
-+	unsigned int num = f->num_members;
- 	struct packet_sock *po;
- 	unsigned int idx;
- 
-diff --git a/net/sctp/output.c b/net/sctp/output.c
-index abe7c2d..fc5e45b 100644
---- a/net/sctp/output.c
-+++ b/net/sctp/output.c
-@@ -599,9 +599,7 @@ out:
- 	return err;
- no_route:
- 	kfree_skb(nskb);
--
--	if (asoc)
--		IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
-+	IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
- 
- 	/* FIXME: Returning the 'err' will effect all the associations
- 	 * associated with a socket, although only one of the paths of the
-diff --git a/net/sctp/socket.c b/net/sctp/socket.c
-index 5f6c4e6..f09de7f 100644
---- a/net/sctp/socket.c
-+++ b/net/sctp/socket.c
-@@ -1528,10 +1528,8 @@ static void sctp_close(struct sock *sk, long timeout)
- 
- 	/* Supposedly, no process has access to the socket, but
- 	 * the net layers still may.
--	 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
--	 * held and that should be grabbed before socket lock.
- 	 */
--	spin_lock_bh(&net->sctp.addr_wq_lock);
-+	local_bh_disable();
- 	bh_lock_sock(sk);
- 
- 	/* Hold the sock, since sk_common_release() will put sock_put()
-@@ -1541,7 +1539,7 @@ static void sctp_close(struct sock *sk, long timeout)
- 	sk_common_release(sk);
- 
- 	bh_unlock_sock(sk);
--	spin_unlock_bh(&net->sctp.addr_wq_lock);
-+	local_bh_enable();
- 
- 	sock_put(sk);
- 
-@@ -3582,7 +3580,6 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
- 	if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
- 		return 0;
- 
--	spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
- 	if (val == 0 && sp->do_auto_asconf) {
- 		list_del(&sp->auto_asconf_list);
- 		sp->do_auto_asconf = 0;
-@@ -3591,7 +3588,6 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
- 		    &sock_net(sk)->sctp.auto_asconf_splist);
- 		sp->do_auto_asconf = 1;
- 	}
--	spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
- 	return 0;
- }
- 
-@@ -4125,28 +4121,18 @@ static int sctp_init_sock(struct sock *sk)
- 	local_bh_disable();
- 	percpu_counter_inc(&sctp_sockets_allocated);
- 	sock_prot_inuse_add(net, sk->sk_prot, 1);
--
--	/* Nothing can fail after this block, otherwise
--	 * sctp_destroy_sock() will be called without addr_wq_lock held
--	 */
- 	if (net->sctp.default_auto_asconf) {
--		spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
- 		list_add_tail(&sp->auto_asconf_list,
- 		    &net->sctp.auto_asconf_splist);
- 		sp->do_auto_asconf = 1;
--		spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
--	} else {
-+	} else
- 		sp->do_auto_asconf = 0;
--	}
--
- 	local_bh_enable();
- 
- 	return 0;
- }
- 
--/* Cleanup any SCTP per socket resources. Must be called with
-- * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
-- */
-+/* Cleanup any SCTP per socket resources.  */
- static void sctp_destroy_sock(struct sock *sk)
- {
- 	struct sctp_sock *sp;
-@@ -7209,19 +7195,6 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
- 	newinet->mc_list = NULL;
- }
- 
--static inline void sctp_copy_descendant(struct sock *sk_to,
--					const struct sock *sk_from)
--{
--	int ancestor_size = sizeof(struct inet_sock) +
--			    sizeof(struct sctp_sock) -
--			    offsetof(struct sctp_sock, auto_asconf_list);
--
--	if (sk_from->sk_family == PF_INET6)
--		ancestor_size += sizeof(struct ipv6_pinfo);
--
--	__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
--}
--
- /* Populate the fields of the newsk from the oldsk and migrate the assoc
-  * and its messages to the newsk.
-  */
-@@ -7236,6 +7209,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
- 	struct sk_buff *skb, *tmp;
- 	struct sctp_ulpevent *event;
- 	struct sctp_bind_hashbucket *head;
-+	struct list_head tmplist;
- 
- 	/* Migrate socket buffer sizes and all the socket level options to the
- 	 * new socket.
-@@ -7243,7 +7217,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
- 	newsk->sk_sndbuf = oldsk->sk_sndbuf;
- 	newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
- 	/* Brute force copy old sctp opt. */
--	sctp_copy_descendant(newsk, oldsk);
-+	if (oldsp->do_auto_asconf) {
-+		memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
-+		inet_sk_copy_descendant(newsk, oldsk);
-+		memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
-+	} else
-+		inet_sk_copy_descendant(newsk, oldsk);
- 
- 	/* Restore the ep value that was overwritten with the above structure
- 	 * copy.
 diff --git a/samples/Kconfig b/samples/Kconfig
 index 224ebb4..a4c6b2f 100644
 --- a/samples/Kconfig
@@ -26063,39 +23513,6 @@ index 0000000..c3ba958
 +}
 +
 +#endif /* libc sanity check */
-diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
-index 212070e..7dade28 100644
---- a/security/selinux/hooks.c
-+++ b/security/selinux/hooks.c
-@@ -403,7 +403,6 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
- 	return sbsec->behavior == SECURITY_FS_USE_XATTR ||
- 		sbsec->behavior == SECURITY_FS_USE_TRANS ||
- 		sbsec->behavior == SECURITY_FS_USE_TASK ||
--		sbsec->behavior == SECURITY_FS_USE_NATIVE ||
- 		/* Special handling. Genfs but also in-core setxattr handler */
- 		!strcmp(sb->s_type->name, "sysfs") ||
- 		!strcmp(sb->s_type->name, "pstore") ||
-diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
-index 98cfc38..10df572 100644
---- a/tools/build/Makefile.build
-+++ b/tools/build/Makefile.build
-@@ -94,12 +94,12 @@ obj-y        := $(patsubst %/, %/$(obj)-in.o, $(obj-y))
- subdir-obj-y := $(filter %/$(obj)-in.o, $(obj-y))
- 
- # '$(OUTPUT)/dir' prefix to all objects
--objprefix    := $(subst ./,,$(OUTPUT)$(dir)/)
--obj-y        := $(addprefix $(objprefix),$(obj-y))
--subdir-obj-y := $(addprefix $(objprefix),$(subdir-obj-y))
-+prefix       := $(subst ./,,$(OUTPUT)$(dir)/)
-+obj-y        := $(addprefix $(prefix),$(obj-y))
-+subdir-obj-y := $(addprefix $(prefix),$(subdir-obj-y))
- 
- # Final '$(obj)-in.o' object
--in-target := $(objprefix)$(obj)-in.o
-+in-target := $(prefix)$(obj)-in.o
- 
- PHONY += $(subdir-y)
- 
 diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
 index 95abddc..b57100c 100644
 --- a/tools/testing/selftests/Makefile
@@ -36683,27 +34100,3 @@ index 0000000..cfd1930
 +
 +	return TEST_OK;
 +}
-diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
-index 950064a..78fb820 100644
---- a/virt/kvm/arm/vgic.c
-+++ b/virt/kvm/arm/vgic.c
-@@ -1561,7 +1561,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
- 			goto out;
- 	}
- 
--	if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
-+	if (irq_num >= kvm->arch.vgic.nr_irqs)
- 		return -EINVAL;
- 
- 	vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
-@@ -2161,7 +2161,10 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id,
- 
- 	BUG_ON(!vgic_initialized(kvm));
- 
-+	if (spi > kvm->arch.vgic.nr_irqs)
-+		return -EINVAL;
- 	return kvm_vgic_inject_irq(kvm, 0, spi, level);
-+
- }
- 
- /* MSI not implemented yet */


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-07-22 10:09 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-07-22 10:09 UTC (permalink / raw
  To: gentoo-commits

commit:     40044434d47f847553adb9b9cde5c31516fe435c
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul 22 10:09:43 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul 22 10:09:43 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=40044434

Linux patch 4.1.3

 0000_README            |    4 +
 1002_linux-4.1.3.patch | 3990 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 3994 insertions(+)

diff --git a/0000_README b/0000_README
index 3b87439..8e9fdc7 100644
--- a/0000_README
+++ b/0000_README
@@ -51,6 +51,10 @@ Patch:  1001_linux-4.1.2.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.2
 
+Patch:  1002_linux-4.1.3.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.3
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1002_linux-4.1.3.patch b/1002_linux-4.1.3.patch
new file mode 100644
index 0000000..a9dbb97
--- /dev/null
+++ b/1002_linux-4.1.3.patch
@@ -0,0 +1,3990 @@
+diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
+index 0f7afb2bb442..aef8cc5a677b 100644
+--- a/Documentation/DMA-API-HOWTO.txt
++++ b/Documentation/DMA-API-HOWTO.txt
+@@ -25,13 +25,18 @@ physical addresses.  These are the addresses in /proc/iomem.  The physical
+ address is not directly useful to a driver; it must use ioremap() to map
+ the space and produce a virtual address.
+ 
+-I/O devices use a third kind of address: a "bus address" or "DMA address".
+-If a device has registers at an MMIO address, or if it performs DMA to read
+-or write system memory, the addresses used by the device are bus addresses.
+-In some systems, bus addresses are identical to CPU physical addresses, but
+-in general they are not.  IOMMUs and host bridges can produce arbitrary
++I/O devices use a third kind of address: a "bus address".  If a device has
++registers at an MMIO address, or if it performs DMA to read or write system
++memory, the addresses used by the device are bus addresses.  In some
++systems, bus addresses are identical to CPU physical addresses, but in
++general they are not.  IOMMUs and host bridges can produce arbitrary
+ mappings between physical and bus addresses.
+ 
++From a device's point of view, DMA uses the bus address space, but it may
++be restricted to a subset of that space.  For example, even if a system
++supports 64-bit addresses for main memory and PCI BARs, it may use an IOMMU
++so devices only need to use 32-bit DMA addresses.
++
+ Here's a picture and some examples:
+ 
+                CPU                  CPU                  Bus
+@@ -72,11 +77,11 @@ can use virtual address X to access the buffer, but the device itself
+ cannot because DMA doesn't go through the CPU virtual memory system.
+ 
+ In some simple systems, the device can do DMA directly to physical address
+-Y.  But in many others, there is IOMMU hardware that translates bus
++Y.  But in many others, there is IOMMU hardware that translates DMA
+ addresses to physical addresses, e.g., it translates Z to Y.  This is part
+ of the reason for the DMA API: the driver can give a virtual address X to
+ an interface like dma_map_single(), which sets up any required IOMMU
+-mapping and returns the bus address Z.  The driver then tells the device to
++mapping and returns the DMA address Z.  The driver then tells the device to
+ do DMA to Z, and the IOMMU maps it to the buffer at address Y in system
+ RAM.
+ 
+@@ -98,7 +103,7 @@ First of all, you should make sure
+ #include <linux/dma-mapping.h>
+ 
+ is in your driver, which provides the definition of dma_addr_t.  This type
+-can hold any valid DMA or bus address for the platform and should be used
++can hold any valid DMA address for the platform and should be used
+ everywhere you hold a DMA address returned from the DMA mapping functions.
+ 
+ 			 What memory is DMA'able?
+@@ -316,7 +321,7 @@ There are two types of DMA mappings:
+   Think of "consistent" as "synchronous" or "coherent".
+ 
+   The current default is to return consistent memory in the low 32
+-  bits of the bus space.  However, for future compatibility you should
++  bits of the DMA space.  However, for future compatibility you should
+   set the consistent mask even if this default is fine for your
+   driver.
+ 
+@@ -403,7 +408,7 @@ dma_alloc_coherent() returns two values: the virtual address which you
+ can use to access it from the CPU and dma_handle which you pass to the
+ card.
+ 
+-The CPU virtual address and the DMA bus address are both
++The CPU virtual address and the DMA address are both
+ guaranteed to be aligned to the smallest PAGE_SIZE order which
+ is greater than or equal to the requested size.  This invariant
+ exists (for example) to guarantee that if you allocate a chunk
+@@ -645,8 +650,8 @@ PLEASE NOTE:  The 'nents' argument to the dma_unmap_sg call must be
+               dma_map_sg call.
+ 
+ Every dma_map_{single,sg}() call should have its dma_unmap_{single,sg}()
+-counterpart, because the bus address space is a shared resource and
+-you could render the machine unusable by consuming all bus addresses.
++counterpart, because the DMA address space is a shared resource and
++you could render the machine unusable by consuming all DMA addresses.
+ 
+ If you need to use the same streaming DMA region multiple times and touch
+ the data in between the DMA transfers, the buffer needs to be synced
+diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
+index 52088408668a..7eba542eff7c 100644
+--- a/Documentation/DMA-API.txt
++++ b/Documentation/DMA-API.txt
+@@ -18,10 +18,10 @@ Part I - dma_ API
+ To get the dma_ API, you must #include <linux/dma-mapping.h>.  This
+ provides dma_addr_t and the interfaces described below.
+ 
+-A dma_addr_t can hold any valid DMA or bus address for the platform.  It
+-can be given to a device to use as a DMA source or target.  A CPU cannot
+-reference a dma_addr_t directly because there may be translation between
+-its physical address space and the bus address space.
++A dma_addr_t can hold any valid DMA address for the platform.  It can be
++given to a device to use as a DMA source or target.  A CPU cannot reference
++a dma_addr_t directly because there may be translation between its physical
++address space and the DMA address space.
+ 
+ Part Ia - Using large DMA-coherent buffers
+ ------------------------------------------
+@@ -42,7 +42,7 @@ It returns a pointer to the allocated region (in the processor's virtual
+ address space) or NULL if the allocation failed.
+ 
+ It also returns a <dma_handle> which may be cast to an unsigned integer the
+-same width as the bus and given to the device as the bus address base of
++same width as the bus and given to the device as the DMA address base of
+ the region.
+ 
+ Note: consistent memory can be expensive on some platforms, and the
+@@ -193,7 +193,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ 		      enum dma_data_direction direction)
+ 
+ Maps a piece of processor virtual memory so it can be accessed by the
+-device and returns the bus address of the memory.
++device and returns the DMA address of the memory.
+ 
+ The direction for both APIs may be converted freely by casting.
+ However the dma_ API uses a strongly typed enumerator for its
+@@ -212,20 +212,20 @@ contiguous piece of memory.  For this reason, memory to be mapped by
+ this API should be obtained from sources which guarantee it to be
+ physically contiguous (like kmalloc).
+ 
+-Further, the bus address of the memory must be within the
++Further, the DMA address of the memory must be within the
+ dma_mask of the device (the dma_mask is a bit mask of the
+-addressable region for the device, i.e., if the bus address of
+-the memory ANDed with the dma_mask is still equal to the bus
++addressable region for the device, i.e., if the DMA address of
++the memory ANDed with the dma_mask is still equal to the DMA
+ address, then the device can perform DMA to the memory).  To
+ ensure that the memory allocated by kmalloc is within the dma_mask,
+ the driver may specify various platform-dependent flags to restrict
+-the bus address range of the allocation (e.g., on x86, GFP_DMA
+-guarantees to be within the first 16MB of available bus addresses,
++the DMA address range of the allocation (e.g., on x86, GFP_DMA
++guarantees to be within the first 16MB of available DMA addresses,
+ as required by ISA devices).
+ 
+ Note also that the above constraints on physical contiguity and
+ dma_mask may not apply if the platform has an IOMMU (a device which
+-maps an I/O bus address to a physical memory address).  However, to be
++maps an I/O DMA address to a physical memory address).  However, to be
+ portable, device driver writers may *not* assume that such an IOMMU
+ exists.
+ 
+@@ -296,7 +296,7 @@ reduce current DMA mapping usage or delay and try again later).
+ 	dma_map_sg(struct device *dev, struct scatterlist *sg,
+ 		int nents, enum dma_data_direction direction)
+ 
+-Returns: the number of bus address segments mapped (this may be shorter
++Returns: the number of DMA address segments mapped (this may be shorter
+ than <nents> passed in if some elements of the scatter/gather list are
+ physically or virtually adjacent and an IOMMU maps them with a single
+ entry).
+@@ -340,7 +340,7 @@ must be the same as those and passed in to the scatter/gather mapping
+ API.
+ 
+ Note: <nents> must be the number you passed in, *not* the number of
+-bus address entries returned.
++DMA address entries returned.
+ 
+ void
+ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+@@ -507,7 +507,7 @@ it's asked for coherent memory for this device.
+ phys_addr is the CPU physical address to which the memory is currently
+ assigned (this will be ioremapped so the CPU can access the region).
+ 
+-device_addr is the bus address the device needs to be programmed
++device_addr is the DMA address the device needs to be programmed
+ with to actually address this memory (this will be handed out as the
+ dma_addr_t in dma_alloc_coherent()).
+ 
+diff --git a/Documentation/devicetree/bindings/spi/spi_pl022.txt b/Documentation/devicetree/bindings/spi/spi_pl022.txt
+index 22ed6797216d..4d1673ca8cf8 100644
+--- a/Documentation/devicetree/bindings/spi/spi_pl022.txt
++++ b/Documentation/devicetree/bindings/spi/spi_pl022.txt
+@@ -4,9 +4,9 @@ Required properties:
+ - compatible : "arm,pl022", "arm,primecell"
+ - reg : Offset and length of the register set for the device
+ - interrupts : Should contain SPI controller interrupt
++- num-cs : total number of chipselects
+ 
+ Optional properties:
+-- num-cs : total number of chipselects
+ - cs-gpios : should specify GPIOs used for chipselects.
+   The gpios will be referred to as reg = <index> in the SPI child nodes.
+   If unspecified, a single SPI device without a chip select can be used.
+diff --git a/Makefile b/Makefile
+index cef84c061f02..e3cdec4898be 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
+index 9917a45fc430..20b7dc17979e 100644
+--- a/arch/arc/include/asm/atomic.h
++++ b/arch/arc/include/asm/atomic.h
+@@ -43,6 +43,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v)		\
+ {									\
+ 	unsigned int temp;						\
+ 									\
++	/*								\
++	 * Explicit full memory barrier needed before/after as		\
++	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
++	 */								\
++	smp_mb();							\
++									\
+ 	__asm__ __volatile__(						\
+ 	"1:	llock   %0, [%1]	\n"				\
+ 	"	" #asm_op " %0, %0, %2	\n"				\
+@@ -52,6 +58,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v)		\
+ 	: "r"(&v->counter), "ir"(i)					\
+ 	: "cc");							\
+ 									\
++	smp_mb();							\
++									\
+ 	return temp;							\
+ }
+ 
+@@ -105,6 +113,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v)		\
+ 	unsigned long flags;						\
+ 	unsigned long temp;						\
+ 									\
++	/*								\
++	 * spin lock/unlock provides the needed smp_mb() before/after	\
++	 */								\
+ 	atomic_ops_lock(flags);						\
+ 	temp = v->counter;						\
+ 	temp c_op i;							\
+@@ -142,9 +153,19 @@ ATOMIC_OP(and, &=, and)
+ #define __atomic_add_unless(v, a, u)					\
+ ({									\
+ 	int c, old;							\
++									\
++	/*								\
++	 * Explicit full memory barrier needed before/after as		\
++	 * LLOCK/SCOND thmeselves don't provide any such semantics	\
++	 */								\
++	smp_mb();							\
++									\
+ 	c = atomic_read(v);						\
+ 	while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
+ 		c = old;						\
++									\
++	smp_mb();							\
++									\
+ 	c;								\
+ })
+ 
+diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
+index 4051e9525939..624a9d048ca9 100644
+--- a/arch/arc/include/asm/bitops.h
++++ b/arch/arc/include/asm/bitops.h
+@@ -117,6 +117,12 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+ 	if (__builtin_constant_p(nr))
+ 		nr &= 0x1f;
+ 
++	/*
++	 * Explicit full memory barrier needed before/after as
++	 * LLOCK/SCOND themselves don't provide any such semantics
++	 */
++	smp_mb();
++
+ 	__asm__ __volatile__(
+ 	"1:	llock   %0, [%2]	\n"
+ 	"	bset    %1, %0, %3	\n"
+@@ -126,6 +132,8 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+ 	: "r"(m), "ir"(nr)
+ 	: "cc");
+ 
++	smp_mb();
++
+ 	return (old & (1 << nr)) != 0;
+ }
+ 
+@@ -139,6 +147,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+ 	if (__builtin_constant_p(nr))
+ 		nr &= 0x1f;
+ 
++	smp_mb();
++
+ 	__asm__ __volatile__(
+ 	"1:	llock   %0, [%2]	\n"
+ 	"	bclr    %1, %0, %3	\n"
+@@ -148,6 +158,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+ 	: "r"(m), "ir"(nr)
+ 	: "cc");
+ 
++	smp_mb();
++
+ 	return (old & (1 << nr)) != 0;
+ }
+ 
+@@ -161,6 +173,8 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+ 	if (__builtin_constant_p(nr))
+ 		nr &= 0x1f;
+ 
++	smp_mb();
++
+ 	__asm__ __volatile__(
+ 	"1:	llock   %0, [%2]	\n"
+ 	"	bxor    %1, %0, %3	\n"
+@@ -170,6 +184,8 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+ 	: "r"(m), "ir"(nr)
+ 	: "cc");
+ 
++	smp_mb();
++
+ 	return (old & (1 << nr)) != 0;
+ }
+ 
+@@ -249,6 +265,9 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+ 	if (__builtin_constant_p(nr))
+ 		nr &= 0x1f;
+ 
++	/*
++	 * spin lock/unlock provide the needed smp_mb() before/after
++	 */
+ 	bitops_lock(flags);
+ 
+ 	old = *m;
+diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
+index 03cd6894855d..44fd531f4d7b 100644
+--- a/arch/arc/include/asm/cmpxchg.h
++++ b/arch/arc/include/asm/cmpxchg.h
+@@ -10,6 +10,8 @@
+ #define __ASM_ARC_CMPXCHG_H
+ 
+ #include <linux/types.h>
++
++#include <asm/barrier.h>
+ #include <asm/smp.h>
+ 
+ #ifdef CONFIG_ARC_HAS_LLSC
+@@ -19,16 +21,25 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+ {
+ 	unsigned long prev;
+ 
++	/*
++	 * Explicit full memory barrier needed before/after as
++	 * LLOCK/SCOND thmeselves don't provide any such semantics
++	 */
++	smp_mb();
++
+ 	__asm__ __volatile__(
+ 	"1:	llock   %0, [%1]	\n"
+ 	"	brne    %0, %2, 2f	\n"
+ 	"	scond   %3, [%1]	\n"
+ 	"	bnz     1b		\n"
+ 	"2:				\n"
+-	: "=&r"(prev)
+-	: "r"(ptr), "ir"(expected),
+-	  "r"(new) /* can't be "ir". scond can't take limm for "b" */
+-	: "cc");
++	: "=&r"(prev)	/* Early clobber, to prevent reg reuse */
++	: "r"(ptr),	/* Not "m": llock only supports reg direct addr mode */
++	  "ir"(expected),
++	  "r"(new)	/* can't be "ir". scond can't take LIMM for "b" */
++	: "cc", "memory"); /* so that gcc knows memory is being written here */
++
++	smp_mb();
+ 
+ 	return prev;
+ }
+@@ -42,6 +53,9 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+ 	int prev;
+ 	volatile unsigned long *p = ptr;
+ 
++	/*
++	 * spin lock/unlock provide the needed smp_mb() before/after
++	 */
+ 	atomic_ops_lock(flags);
+ 	prev = *p;
+ 	if (prev == expected)
+@@ -77,12 +91,16 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+ 
+ 	switch (size) {
+ 	case 4:
++		smp_mb();
++
+ 		__asm__ __volatile__(
+ 		"	ex  %0, [%1]	\n"
+ 		: "+r"(val)
+ 		: "r"(ptr)
+ 		: "memory");
+ 
++		smp_mb();
++
+ 		return val;
+ 	}
+ 	return __xchg_bad_pointer();
+diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
+index b6a8c2dfbe6e..e1651df6a93d 100644
+--- a/arch/arc/include/asm/spinlock.h
++++ b/arch/arc/include/asm/spinlock.h
+@@ -22,24 +22,46 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
+ {
+ 	unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+ 
++	/*
++	 * This smp_mb() is technically superfluous, we only need the one
++	 * after the lock for providing the ACQUIRE semantics.
++	 * However doing the "right" thing was regressing hackbench
++	 * so keeping this, pending further investigation
++	 */
++	smp_mb();
++
+ 	__asm__ __volatile__(
+ 	"1:	ex  %0, [%1]		\n"
+ 	"	breq  %0, %2, 1b	\n"
+ 	: "+&r" (tmp)
+ 	: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
+ 	: "memory");
++
++	/*
++	 * ACQUIRE barrier to ensure load/store after taking the lock
++	 * don't "bleed-up" out of the critical section (leak-in is allowed)
++	 * http://www.spinics.net/lists/kernel/msg2010409.html
++	 *
++	 * ARCv2 only has load-load, store-store and all-all barrier
++	 * thus need the full all-all barrier
++	 */
++	smp_mb();
+ }
+ 
+ static inline int arch_spin_trylock(arch_spinlock_t *lock)
+ {
+ 	unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+ 
++	smp_mb();
++
+ 	__asm__ __volatile__(
+ 	"1:	ex  %0, [%1]		\n"
+ 	: "+r" (tmp)
+ 	: "r"(&(lock->slock))
+ 	: "memory");
+ 
++	smp_mb();
++
+ 	return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+ }
+ 
+@@ -47,12 +69,22 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
+ {
+ 	unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+ 
++	/*
++	 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
++	 * is the only option
++	 */
++	smp_mb();
++
+ 	__asm__ __volatile__(
+ 	"	ex  %0, [%1]		\n"
+ 	: "+r" (tmp)
+ 	: "r"(&(lock->slock))
+ 	: "memory");
+ 
++	/*
++	 * superfluous, but keeping for now - see pairing version in
++	 * arch_spin_lock above
++	 */
+ 	smp_mb();
+ }
+ 
+diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
+index fd2ec50102f2..57b58f52d825 100644
+--- a/arch/arc/kernel/perf_event.c
++++ b/arch/arc/kernel/perf_event.c
+@@ -266,7 +266,6 @@ static int arc_pmu_add(struct perf_event *event, int flags)
+ 
+ static int arc_pmu_device_probe(struct platform_device *pdev)
+ {
+-	struct arc_pmu *arc_pmu;
+ 	struct arc_reg_pct_build pct_bcr;
+ 	struct arc_reg_cc_build cc_bcr;
+ 	int i, j, ret;
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 959fe8733560..bddd04d031db 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -517,6 +517,7 @@ el0_sp_pc:
+ 	mrs	x26, far_el1
+ 	// enable interrupts before calling the main handler
+ 	enable_dbg_and_irq
++	ct_user_exit
+ 	mov	x0, x26
+ 	mov	x1, x25
+ 	mov	x2, sp
+diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
+index ff3bddea482d..f6fe17d88da5 100644
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-builtin
+ ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
+ 		$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ 
++# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
++# down to collect2, resulting in silent corruption of the vDSO image.
++ccflags-y += -Wl,-shared
++
+ obj-y += vdso.o
+ extra-y += vdso.lds vdso-offsets.h
+ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
+index baa758d37021..76c1e6cd36fc 100644
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -92,6 +92,14 @@ static void reset_context(void *info)
+ 	unsigned int cpu = smp_processor_id();
+ 	struct mm_struct *mm = current->active_mm;
+ 
++	/*
++	 * current->active_mm could be init_mm for the idle thread immediately
++	 * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
++	 * the reserved value, so no need to reset any context.
++	 */
++	if (mm == &init_mm)
++		return;
++
+ 	smp_rmb();
+ 	asid = cpu_last_asid + cpu;
+ 
+diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
+index 597831bdddf3..ad87ce826cce 100644
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -262,7 +262,7 @@ static void __init free_unused_memmap(void)
+ 		 * memmap entries are valid from the bank end aligned to
+ 		 * MAX_ORDER_NR_PAGES.
+ 		 */
+-		prev_end = ALIGN(start + __phys_to_pfn(reg->size),
++		prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
+ 				 MAX_ORDER_NR_PAGES);
+ 	}
+ 
+diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
+index d3f896a35b98..2eeb0a0f506d 100644
+--- a/arch/s390/hypfs/inode.c
++++ b/arch/s390/hypfs/inode.c
+@@ -456,8 +456,6 @@ static const struct super_operations hypfs_s_ops = {
+ 	.show_options	= hypfs_show_options,
+ };
+ 
+-static struct kobject *s390_kobj;
+-
+ static int __init hypfs_init(void)
+ {
+ 	int rc;
+@@ -481,18 +479,16 @@ static int __init hypfs_init(void)
+ 		rc = -ENODATA;
+ 		goto fail_hypfs_sprp_exit;
+ 	}
+-	s390_kobj = kobject_create_and_add("s390", hypervisor_kobj);
+-	if (!s390_kobj) {
+-		rc = -ENOMEM;
++	rc = sysfs_create_mount_point(hypervisor_kobj, "s390");
++	if (rc)
+ 		goto fail_hypfs_diag0c_exit;
+-	}
+ 	rc = register_filesystem(&hypfs_type);
+ 	if (rc)
+ 		goto fail_filesystem;
+ 	return 0;
+ 
+ fail_filesystem:
+-	kobject_put(s390_kobj);
++	sysfs_remove_mount_point(hypervisor_kobj, "s390");
+ fail_hypfs_diag0c_exit:
+ 	hypfs_diag0c_exit();
+ fail_hypfs_sprp_exit:
+@@ -510,7 +506,7 @@ fail_dbfs_exit:
+ static void __exit hypfs_exit(void)
+ {
+ 	unregister_filesystem(&hypfs_type);
+-	kobject_put(s390_kobj);
++	sysfs_remove_mount_point(hypervisor_kobj, "s390");
+ 	hypfs_diag0c_exit();
+ 	hypfs_sprp_exit();
+ 	hypfs_vm_exit();
+diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
+index c412fdb28d34..513e7230e3d0 100644
+--- a/drivers/acpi/bus.c
++++ b/drivers/acpi/bus.c
+@@ -470,6 +470,16 @@ static int __init acpi_bus_init_irq(void)
+ 	return 0;
+ }
+ 
++/**
++ * acpi_early_init - Initialize ACPICA and populate the ACPI namespace.
++ *
++ * The ACPI tables are accessible after this, but the handling of events has not
++ * been initialized and the global lock is not available yet, so AML should not
++ * be executed at this point.
++ *
++ * Doing this before switching the EFI runtime services to virtual mode allows
++ * the EfiBootServices memory to be freed slightly earlier on boot.
++ */
+ void __init acpi_early_init(void)
+ {
+ 	acpi_status status;
+@@ -533,26 +543,42 @@ void __init acpi_early_init(void)
+ 		acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
+ 	}
+ #endif
++	return;
++
++ error0:
++	disable_acpi();
++}
++
++/**
++ * acpi_subsystem_init - Finalize the early initialization of ACPI.
++ *
++ * Switch over the platform to the ACPI mode (if possible), initialize the
++ * handling of ACPI events, install the interrupt and global lock handlers.
++ *
++ * Doing this too early is generally unsafe, but at the same time it needs to be
++ * done before all things that really depend on ACPI.  The right spot appears to
++ * be before finalizing the EFI initialization.
++ */
++void __init acpi_subsystem_init(void)
++{
++	acpi_status status;
++
++	if (acpi_disabled)
++		return;
+ 
+ 	status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE);
+ 	if (ACPI_FAILURE(status)) {
+ 		printk(KERN_ERR PREFIX "Unable to enable ACPI\n");
+-		goto error0;
++		disable_acpi();
++	} else {
++		/*
++		 * If the system is using ACPI then we can be reasonably
++		 * confident that any regulators are managed by the firmware
++		 * so tell the regulator core it has everything it needs to
++		 * know.
++		 */
++		regulator_has_full_constraints();
+ 	}
+-
+-	/*
+-	 * If the system is using ACPI then we can be reasonably
+-	 * confident that any regulators are managed by the firmware
+-	 * so tell the regulator core it has everything it needs to
+-	 * know.
+-	 */
+-	regulator_has_full_constraints();
+-
+-	return;
+-
+-      error0:
+-	disable_acpi();
+-	return;
+ }
+ 
+ static int __init acpi_bus_init(void)
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index 735db11a9b00..8217e0bda60f 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -953,6 +953,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_prepare);
+  */
+ void acpi_subsys_complete(struct device *dev)
+ {
++	pm_generic_complete(dev);
+ 	/*
+ 	 * If the device had been runtime-suspended before the system went into
+ 	 * the sleep state it is going out of and it has never been resumed till
+diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
+index 7ccba395c9dd..5226a8b921ae 100644
+--- a/drivers/acpi/osl.c
++++ b/drivers/acpi/osl.c
+@@ -175,11 +175,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
+ 	if (!addr || !length)
+ 		return;
+ 
+-	/* Resources are never freed */
+-	if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+-		request_region(addr, length, desc);
+-	else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+-		request_mem_region(addr, length, desc);
++	acpi_reserve_region(addr, length, gas->space_id, 0, desc);
+ }
+ 
+ static void __init acpi_reserve_resources(void)
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 8244f013f210..fcb7807ea8b7 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -26,6 +26,7 @@
+ #include <linux/device.h>
+ #include <linux/export.h>
+ #include <linux/ioport.h>
++#include <linux/list.h>
+ #include <linux/slab.h>
+ 
+ #ifdef CONFIG_X86
+@@ -621,3 +622,162 @@ int acpi_dev_filter_resource_type(struct acpi_resource *ares,
+ 	return (type & types) ? 0 : 1;
+ }
+ EXPORT_SYMBOL_GPL(acpi_dev_filter_resource_type);
++
++struct reserved_region {
++	struct list_head node;
++	u64 start;
++	u64 end;
++};
++
++static LIST_HEAD(reserved_io_regions);
++static LIST_HEAD(reserved_mem_regions);
++
++static int request_range(u64 start, u64 end, u8 space_id, unsigned long flags,
++			 char *desc)
++{
++	unsigned int length = end - start + 1;
++	struct resource *res;
++
++	res = space_id == ACPI_ADR_SPACE_SYSTEM_IO ?
++		request_region(start, length, desc) :
++		request_mem_region(start, length, desc);
++	if (!res)
++		return -EIO;
++
++	res->flags &= ~flags;
++	return 0;
++}
++
++static int add_region_before(u64 start, u64 end, u8 space_id,
++			     unsigned long flags, char *desc,
++			     struct list_head *head)
++{
++	struct reserved_region *reg;
++	int error;
++
++	reg = kmalloc(sizeof(*reg), GFP_KERNEL);
++	if (!reg)
++		return -ENOMEM;
++
++	error = request_range(start, end, space_id, flags, desc);
++	if (error)
++		return error;
++
++	reg->start = start;
++	reg->end = end;
++	list_add_tail(&reg->node, head);
++	return 0;
++}
++
++/**
++ * acpi_reserve_region - Reserve an I/O or memory region as a system resource.
++ * @start: Starting address of the region.
++ * @length: Length of the region.
++ * @space_id: Identifier of address space to reserve the region from.
++ * @flags: Resource flags to clear for the region after requesting it.
++ * @desc: Region description (for messages).
++ *
++ * Reserve an I/O or memory region as a system resource to prevent others from
++ * using it.  If the new region overlaps with one of the regions (in the given
++ * address space) already reserved by this routine, only the non-overlapping
++ * parts of it will be reserved.
++ *
++ * Returned is either 0 (success) or a negative error code indicating a resource
++ * reservation problem.  It is the code of the first encountered error, but the
++ * routine doesn't abort until it has attempted to request all of the parts of
++ * the new region that don't overlap with other regions reserved previously.
++ *
++ * The resources requested by this routine are never released.
++ */
++int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
++			unsigned long flags, char *desc)
++{
++	struct list_head *regions;
++	struct reserved_region *reg;
++	u64 end = start + length - 1;
++	int ret = 0, error = 0;
++
++	if (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
++		regions = &reserved_io_regions;
++	else if (space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
++		regions = &reserved_mem_regions;
++	else
++		return -EINVAL;
++
++	if (list_empty(regions))
++		return add_region_before(start, end, space_id, flags, desc, regions);
++
++	list_for_each_entry(reg, regions, node)
++		if (reg->start == end + 1) {
++			/* The new region can be prepended to this one. */
++			ret = request_range(start, end, space_id, flags, desc);
++			if (!ret)
++				reg->start = start;
++
++			return ret;
++		} else if (reg->start > end) {
++			/* No overlap.  Add the new region here and get out. */
++			return add_region_before(start, end, space_id, flags,
++						 desc, &reg->node);
++		} else if (reg->end == start - 1) {
++			goto combine;
++		} else if (reg->end >= start) {
++			goto overlap;
++		}
++
++	/* The new region goes after the last existing one. */
++	return add_region_before(start, end, space_id, flags, desc, regions);
++
++ overlap:
++	/*
++	 * The new region overlaps an existing one.
++	 *
++	 * The head part of the new region immediately preceding the existing
++	 * overlapping one can be combined with it right away.
++	 */
++	if (reg->start > start) {
++		error = request_range(start, reg->start - 1, space_id, flags, desc);
++		if (error)
++			ret = error;
++		else
++			reg->start = start;
++	}
++
++ combine:
++	/*
++	 * The new region is adjacent to an existing one.  If it extends beyond
++	 * that region all the way to the next one, it is possible to combine
++	 * all three of them.
++	 */
++	while (reg->end < end) {
++		struct reserved_region *next = NULL;
++		u64 a = reg->end + 1, b = end;
++
++		if (!list_is_last(&reg->node, regions)) {
++			next = list_next_entry(reg, node);
++			if (next->start <= end)
++				b = next->start - 1;
++		}
++		error = request_range(a, b, space_id, flags, desc);
++		if (!error) {
++			if (next && next->start == b + 1) {
++				reg->end = next->end;
++				list_del(&next->node);
++				kfree(next);
++			} else {
++				reg->end = end;
++				break;
++			}
++		} else if (next) {
++			if (!ret)
++				ret = error;
++
++			reg = next;
++		} else {
++			break;
++		}
++	}
++
++	return ret ? ret : error;
++}
++EXPORT_SYMBOL_GPL(acpi_reserve_region);
+diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
+index 6273ff072f3e..1c76dcb502cf 100644
+--- a/drivers/base/regmap/regmap.c
++++ b/drivers/base/regmap/regmap.c
+@@ -945,11 +945,10 @@ EXPORT_SYMBOL_GPL(devm_regmap_init);
+ static void regmap_field_init(struct regmap_field *rm_field,
+ 	struct regmap *regmap, struct reg_field reg_field)
+ {
+-	int field_bits = reg_field.msb - reg_field.lsb + 1;
+ 	rm_field->regmap = regmap;
+ 	rm_field->reg = reg_field.reg;
+ 	rm_field->shift = reg_field.lsb;
+-	rm_field->mask = ((BIT(field_bits) - 1) << reg_field.lsb);
++	rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
+ 	rm_field->id_size = reg_field.id_size;
+ 	rm_field->id_offset = reg_field.id_offset;
+ }
+@@ -2318,7 +2317,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
+ 					  &ival);
+ 			if (ret != 0)
+ 				return ret;
+-			memcpy(val + (i * val_bytes), &ival, val_bytes);
++			map->format.format_val(val + (i * val_bytes), ival, 0);
+ 		}
+ 	}
+ 
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 3061bb8629dc..e14363d12690 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -65,7 +65,6 @@ static int __init parse_efi_cmdline(char *str)
+ early_param("efi", parse_efi_cmdline);
+ 
+ static struct kobject *efi_kobj;
+-static struct kobject *efivars_kobj;
+ 
+ /*
+  * Let's not leave out systab information that snuck into
+@@ -212,10 +211,9 @@ static int __init efisubsys_init(void)
+ 		goto err_remove_group;
+ 
+ 	/* and the standard mountpoint for efivarfs */
+-	efivars_kobj = kobject_create_and_add("efivars", efi_kobj);
+-	if (!efivars_kobj) {
++	error = sysfs_create_mount_point(efi_kobj, "efivars");
++	if (error) {
+ 		pr_err("efivars: Subsystem registration failed.\n");
+-		error = -ENOMEM;
+ 		goto err_remove_group;
+ 	}
+ 
+diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
+index 91a7ffe83135..ab457fc00e75 100644
+--- a/drivers/gpio/gpio-crystalcove.c
++++ b/drivers/gpio/gpio-crystalcove.c
+@@ -255,6 +255,7 @@ static struct irq_chip crystalcove_irqchip = {
+ 	.irq_set_type		= crystalcove_irq_type,
+ 	.irq_bus_lock		= crystalcove_bus_lock,
+ 	.irq_bus_sync_unlock	= crystalcove_bus_sync_unlock,
++	.flags			= IRQCHIP_SKIP_SET_WAKE,
+ };
+ 
+ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
+diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
+index fd3977465948..1e14a6c74ed1 100644
+--- a/drivers/gpio/gpio-rcar.c
++++ b/drivers/gpio/gpio-rcar.c
+@@ -177,8 +177,17 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
+ 	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ 	struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ 						gpio_chip);
+-
+-	irq_set_irq_wake(p->irq_parent, on);
++	int error;
++
++	if (p->irq_parent) {
++		error = irq_set_irq_wake(p->irq_parent, on);
++		if (error) {
++			dev_dbg(&p->pdev->dev,
++				"irq %u doesn't support irq_set_wake\n",
++				p->irq_parent);
++			p->irq_parent = 0;
++		}
++	}
+ 
+ 	if (!p->clk)
+ 		return 0;
+diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
+index 51da3692d561..5b7a860df524 100644
+--- a/drivers/iio/accel/kxcjk-1013.c
++++ b/drivers/iio/accel/kxcjk-1013.c
+@@ -1418,6 +1418,7 @@ static const struct dev_pm_ops kxcjk1013_pm_ops = {
+ static const struct acpi_device_id kx_acpi_match[] = {
+ 	{"KXCJ1013", KXCJK1013},
+ 	{"KXCJ1008", KXCJ91008},
++	{"KXCJ9000", KXCJ91008},
+ 	{"KXTJ1009", KXTJ21009},
+ 	{"SMO8500",  KXCJ91008},
+ 	{ },
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
+index 918814cd0f80..75c01b27bd0b 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.c
++++ b/drivers/infiniband/ulp/srp/ib_srp.c
+@@ -465,14 +465,13 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
+  */
+ static void srp_destroy_qp(struct srp_rdma_ch *ch)
+ {
+-	struct srp_target_port *target = ch->target;
+ 	static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ 	static struct ib_recv_wr wr = { .wr_id = SRP_LAST_WR_ID };
+ 	struct ib_recv_wr *bad_wr;
+ 	int ret;
+ 
+ 	/* Destroying a QP and reusing ch->done is only safe if not connected */
+-	WARN_ON_ONCE(target->connected);
++	WARN_ON_ONCE(ch->connected);
+ 
+ 	ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
+ 	WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
+@@ -811,35 +810,19 @@ static bool srp_queue_remove_work(struct srp_target_port *target)
+ 	return changed;
+ }
+ 
+-static bool srp_change_conn_state(struct srp_target_port *target,
+-				  bool connected)
+-{
+-	bool changed = false;
+-
+-	spin_lock_irq(&target->lock);
+-	if (target->connected != connected) {
+-		target->connected = connected;
+-		changed = true;
+-	}
+-	spin_unlock_irq(&target->lock);
+-
+-	return changed;
+-}
+-
+ static void srp_disconnect_target(struct srp_target_port *target)
+ {
+ 	struct srp_rdma_ch *ch;
+ 	int i;
+ 
+-	if (srp_change_conn_state(target, false)) {
+-		/* XXX should send SRP_I_LOGOUT request */
++	/* XXX should send SRP_I_LOGOUT request */
+ 
+-		for (i = 0; i < target->ch_count; i++) {
+-			ch = &target->ch[i];
+-			if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
+-				shost_printk(KERN_DEBUG, target->scsi_host,
+-					     PFX "Sending CM DREQ failed\n");
+-			}
++	for (i = 0; i < target->ch_count; i++) {
++		ch = &target->ch[i];
++		ch->connected = false;
++		if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
++			shost_printk(KERN_DEBUG, target->scsi_host,
++				     PFX "Sending CM DREQ failed\n");
+ 		}
+ 	}
+ }
+@@ -986,14 +969,26 @@ static void srp_rport_delete(struct srp_rport *rport)
+ 	srp_queue_remove_work(target);
+ }
+ 
++/**
++ * srp_connected_ch() - number of connected channels
++ * @target: SRP target port.
++ */
++static int srp_connected_ch(struct srp_target_port *target)
++{
++	int i, c = 0;
++
++	for (i = 0; i < target->ch_count; i++)
++		c += target->ch[i].connected;
++
++	return c;
++}
++
+ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
+ {
+ 	struct srp_target_port *target = ch->target;
+ 	int ret;
+ 
+-	WARN_ON_ONCE(!multich && target->connected);
+-
+-	target->qp_in_error = false;
++	WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
+ 
+ 	ret = srp_lookup_path(ch);
+ 	if (ret)
+@@ -1016,7 +1011,7 @@ static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
+ 		 */
+ 		switch (ch->status) {
+ 		case 0:
+-			srp_change_conn_state(target, true);
++			ch->connected = true;
+ 			return 0;
+ 
+ 		case SRP_PORT_REDIRECT:
+@@ -1243,13 +1238,13 @@ static int srp_rport_reconnect(struct srp_rport *rport)
+ 		for (j = 0; j < target->queue_size; ++j)
+ 			list_add(&ch->tx_ring[j]->list, &ch->free_tx);
+ 	}
++
++	target->qp_in_error = false;
++
+ 	for (i = 0; i < target->ch_count; i++) {
+ 		ch = &target->ch[i];
+-		if (ret || !ch->target) {
+-			if (i > 1)
+-				ret = 0;
++		if (ret || !ch->target)
+ 			break;
+-		}
+ 		ret = srp_connect_ch(ch, multich);
+ 		multich = true;
+ 	}
+@@ -1929,7 +1924,7 @@ static void srp_handle_qp_err(u64 wr_id, enum ib_wc_status wc_status,
+ 		return;
+ 	}
+ 
+-	if (target->connected && !target->qp_in_error) {
++	if (ch->connected && !target->qp_in_error) {
+ 		if (wr_id & LOCAL_INV_WR_ID_MASK) {
+ 			shost_printk(KERN_ERR, target->scsi_host, PFX
+ 				     "LOCAL_INV failed with status %d\n",
+@@ -2367,7 +2362,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+ 	case IB_CM_DREQ_RECEIVED:
+ 		shost_printk(KERN_WARNING, target->scsi_host,
+ 			     PFX "DREQ received - connection closed\n");
+-		srp_change_conn_state(target, false);
++		ch->connected = false;
+ 		if (ib_send_cm_drep(cm_id, NULL, 0))
+ 			shost_printk(KERN_ERR, target->scsi_host,
+ 				     PFX "Sending CM DREP failed\n");
+@@ -2423,7 +2418,7 @@ static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag,
+ 	struct srp_iu *iu;
+ 	struct srp_tsk_mgmt *tsk_mgmt;
+ 
+-	if (!target->connected || target->qp_in_error)
++	if (!ch->connected || target->qp_in_error)
+ 		return -1;
+ 
+ 	init_completion(&ch->tsk_mgmt_done);
+@@ -2797,7 +2792,8 @@ static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
+ 	scsi_scan_target(&target->scsi_host->shost_gendev,
+ 			 0, target->scsi_id, SCAN_WILD_CARD, 0);
+ 
+-	if (!target->connected || target->qp_in_error) {
++	if (srp_connected_ch(target) < target->ch_count ||
++	    target->qp_in_error) {
+ 		shost_printk(KERN_INFO, target->scsi_host,
+ 			     PFX "SCSI scan failed - removing SCSI host\n");
+ 		srp_queue_remove_work(target);
+@@ -3172,11 +3168,11 @@ static ssize_t srp_create_target(struct device *dev,
+ 
+ 	ret = srp_parse_options(buf, target);
+ 	if (ret)
+-		goto err;
++		goto out;
+ 
+ 	ret = scsi_init_shared_tag_map(target_host, target_host->can_queue);
+ 	if (ret)
+-		goto err;
++		goto out;
+ 
+ 	target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
+ 
+@@ -3187,7 +3183,7 @@ static ssize_t srp_create_target(struct device *dev,
+ 			     be64_to_cpu(target->ioc_guid),
+ 			     be64_to_cpu(target->initiator_ext));
+ 		ret = -EEXIST;
+-		goto err;
++		goto out;
+ 	}
+ 
+ 	if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
+@@ -3208,7 +3204,7 @@ static ssize_t srp_create_target(struct device *dev,
+ 	spin_lock_init(&target->lock);
+ 	ret = ib_query_gid(ibdev, host->port, 0, &target->sgid);
+ 	if (ret)
+-		goto err;
++		goto out;
+ 
+ 	ret = -ENOMEM;
+ 	target->ch_count = max_t(unsigned, num_online_nodes(),
+@@ -3219,7 +3215,7 @@ static ssize_t srp_create_target(struct device *dev,
+ 	target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
+ 			     GFP_KERNEL);
+ 	if (!target->ch)
+-		goto err;
++		goto out;
+ 
+ 	node_idx = 0;
+ 	for_each_online_node(node) {
+@@ -3315,9 +3311,6 @@ err_disconnect:
+ 	}
+ 
+ 	kfree(target->ch);
+-
+-err:
+-	scsi_host_put(target_host);
+ 	goto out;
+ }
+ 
+diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
+index a611556406ac..e690847a46dd 100644
+--- a/drivers/infiniband/ulp/srp/ib_srp.h
++++ b/drivers/infiniband/ulp/srp/ib_srp.h
+@@ -170,6 +170,7 @@ struct srp_rdma_ch {
+ 
+ 	struct completion	tsk_mgmt_done;
+ 	u8			tsk_mgmt_status;
++	bool			connected;
+ };
+ 
+ /**
+@@ -214,7 +215,6 @@ struct srp_target_port {
+ 	__be16			pkey;
+ 
+ 	u32			rq_tmo_jiffies;
+-	bool			connected;
+ 
+ 	int			zero_req_lim;
+ 
+diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
+index 2c2107147319..8f3e243a62bf 100644
+--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
++++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
+@@ -78,7 +78,7 @@ static void pixcir_ts_parse(struct pixcir_i2c_ts_data *tsdata,
+ 	}
+ 
+ 	ret = i2c_master_recv(tsdata->client, rdbuf, readsize);
+-	if (ret != sizeof(rdbuf)) {
++	if (ret != readsize) {
+ 		dev_err(&tsdata->client->dev,
+ 			"%s: i2c_master_recv failed(), ret=%d\n",
+ 			__func__, ret);
+diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
+index 728681debdbe..7fb2a19ac649 100644
+--- a/drivers/leds/led-class.c
++++ b/drivers/leds/led-class.c
+@@ -187,6 +187,7 @@ void led_classdev_resume(struct led_classdev *led_cdev)
+ }
+ EXPORT_SYMBOL_GPL(led_classdev_resume);
+ 
++#ifdef CONFIG_PM_SLEEP
+ static int led_suspend(struct device *dev)
+ {
+ 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+@@ -206,11 +207,9 @@ static int led_resume(struct device *dev)
+ 
+ 	return 0;
+ }
++#endif
+ 
+-static const struct dev_pm_ops leds_class_dev_pm_ops = {
+-	.suspend        = led_suspend,
+-	.resume         = led_resume,
+-};
++static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
+ 
+ static int match_name(struct device *dev, const void *data)
+ {
+diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
+index 1e99ef6a54a2..b2b9f4382d77 100644
+--- a/drivers/misc/mei/client.c
++++ b/drivers/misc/mei/client.c
+@@ -699,7 +699,7 @@ void mei_host_client_init(struct work_struct *work)
+ bool mei_hbuf_acquire(struct mei_device *dev)
+ {
+ 	if (mei_pg_state(dev) == MEI_PG_ON ||
+-	    dev->pg_event == MEI_PG_EVENT_WAIT) {
++	    mei_pg_in_transition(dev)) {
+ 		dev_dbg(dev->dev, "device is in pg\n");
+ 		return false;
+ 	}
+diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
+index 6fb75e62a764..43d7101ff993 100644
+--- a/drivers/misc/mei/hw-me.c
++++ b/drivers/misc/mei/hw-me.c
+@@ -663,11 +663,27 @@ int mei_me_pg_exit_sync(struct mei_device *dev)
+ 	mutex_lock(&dev->device_lock);
+ 
+ reply:
+-	if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
+-		ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
++	if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
++		ret = -ETIME;
++		goto out;
++	}
++
++	dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
++	ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
++	if (ret)
++		return ret;
++
++	mutex_unlock(&dev->device_lock);
++	wait_event_timeout(dev->wait_pg,
++		dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
++	mutex_lock(&dev->device_lock);
++
++	if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
++		ret = 0;
+ 	else
+ 		ret = -ETIME;
+ 
++out:
+ 	dev->pg_event = MEI_PG_EVENT_IDLE;
+ 	hw->pg_state = MEI_PG_OFF;
+ 
+@@ -675,6 +691,19 @@ reply:
+ }
+ 
+ /**
++ * mei_me_pg_in_transition - is device now in pg transition
++ *
++ * @dev: the device structure
++ *
++ * Return: true if in pg transition, false otherwise
++ */
++static bool mei_me_pg_in_transition(struct mei_device *dev)
++{
++	return dev->pg_event >= MEI_PG_EVENT_WAIT &&
++	       dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
++}
++
++/**
+  * mei_me_pg_is_enabled - detect if PG is supported by HW
+  *
+  * @dev: the device structure
+@@ -705,6 +734,24 @@ notsupported:
+ }
+ 
+ /**
++ * mei_me_pg_intr - perform pg processing in interrupt thread handler
++ *
++ * @dev: the device structure
++ */
++static void mei_me_pg_intr(struct mei_device *dev)
++{
++	struct mei_me_hw *hw = to_me_hw(dev);
++
++	if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
++		return;
++
++	dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
++	hw->pg_state = MEI_PG_OFF;
++	if (waitqueue_active(&dev->wait_pg))
++		wake_up(&dev->wait_pg);
++}
++
++/**
+  * mei_me_irq_quick_handler - The ISR of the MEI device
+  *
+  * @irq: The irq number
+@@ -761,6 +808,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
+ 		goto end;
+ 	}
+ 
++	mei_me_pg_intr(dev);
++
+ 	/*  check if we need to start the dev */
+ 	if (!mei_host_is_ready(dev)) {
+ 		if (mei_hw_is_ready(dev)) {
+@@ -797,9 +846,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
+ 	/*
+ 	 * During PG handshake only allowed write is the replay to the
+ 	 * PG exit message, so block calling write function
+-	 * if the pg state is not idle
++	 * if the pg event is in PG handshake
+ 	 */
+-	if (dev->pg_event == MEI_PG_EVENT_IDLE) {
++	if (dev->pg_event != MEI_PG_EVENT_WAIT &&
++	    dev->pg_event != MEI_PG_EVENT_RECEIVED) {
+ 		rets = mei_irq_write_handler(dev, &complete_list);
+ 		dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+ 	}
+@@ -824,6 +874,7 @@ static const struct mei_hw_ops mei_me_hw_ops = {
+ 	.hw_config = mei_me_hw_config,
+ 	.hw_start = mei_me_hw_start,
+ 
++	.pg_in_transition = mei_me_pg_in_transition,
+ 	.pg_is_enabled = mei_me_pg_is_enabled,
+ 
+ 	.intr_clear = mei_me_intr_clear,
+diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
+index 7abafe7d120d..bae680c648ff 100644
+--- a/drivers/misc/mei/hw-txe.c
++++ b/drivers/misc/mei/hw-txe.c
+@@ -16,6 +16,7 @@
+ 
+ #include <linux/pci.h>
+ #include <linux/jiffies.h>
++#include <linux/ktime.h>
+ #include <linux/delay.h>
+ #include <linux/kthread.h>
+ #include <linux/irqreturn.h>
+@@ -218,26 +219,25 @@ static u32 mei_txe_aliveness_get(struct mei_device *dev)
+  *
+  * Polls for HICR_HOST_ALIVENESS_RESP.ALIVENESS_RESP to be set
+  *
+- * Return: > 0 if the expected value was received, -ETIME otherwise
++ * Return: 0 if the expected value was received, -ETIME otherwise
+  */
+ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
+ {
+ 	struct mei_txe_hw *hw = to_txe_hw(dev);
+-	int t = 0;
++	ktime_t stop, start;
+ 
++	start = ktime_get();
++	stop = ktime_add(start, ms_to_ktime(SEC_ALIVENESS_WAIT_TIMEOUT));
+ 	do {
+ 		hw->aliveness = mei_txe_aliveness_get(dev);
+ 		if (hw->aliveness == expected) {
+ 			dev->pg_event = MEI_PG_EVENT_IDLE;
+-			dev_dbg(dev->dev,
+-				"aliveness settled after %d msecs\n", t);
+-			return t;
++			dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
++				ktime_to_us(ktime_sub(ktime_get(), start)));
++			return 0;
+ 		}
+-		mutex_unlock(&dev->device_lock);
+-		msleep(MSEC_PER_SEC / 5);
+-		mutex_lock(&dev->device_lock);
+-		t += MSEC_PER_SEC / 5;
+-	} while (t < SEC_ALIVENESS_WAIT_TIMEOUT);
++		usleep_range(20, 50);
++	} while (ktime_before(ktime_get(), stop));
+ 
+ 	dev->pg_event = MEI_PG_EVENT_IDLE;
+ 	dev_err(dev->dev, "aliveness timed out\n");
+@@ -302,6 +302,18 @@ int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req)
+ }
+ 
+ /**
++ * mei_txe_pg_in_transition - is device now in pg transition
++ *
++ * @dev: the device structure
++ *
++ * Return: true if in pg transition, false otherwise
++ */
++static bool mei_txe_pg_in_transition(struct mei_device *dev)
++{
++	return dev->pg_event == MEI_PG_EVENT_WAIT;
++}
++
++/**
+  * mei_txe_pg_is_enabled - detect if PG is supported by HW
+  *
+  * @dev: the device structure
+@@ -1138,6 +1150,7 @@ static const struct mei_hw_ops mei_txe_hw_ops = {
+ 	.hw_config = mei_txe_hw_config,
+ 	.hw_start = mei_txe_hw_start,
+ 
++	.pg_in_transition = mei_txe_pg_in_transition,
+ 	.pg_is_enabled = mei_txe_pg_is_enabled,
+ 
+ 	.intr_clear = mei_txe_intr_clear,
+diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
+index f066ecd71939..f84c39ee28a8 100644
+--- a/drivers/misc/mei/mei_dev.h
++++ b/drivers/misc/mei/mei_dev.h
+@@ -271,6 +271,7 @@ struct mei_cl {
+ 
+  * @fw_status        : get fw status registers
+  * @pg_state         : power gating state of the device
++ * @pg_in_transition : is device now in pg transition
+  * @pg_is_enabled    : is power gating enabled
+ 
+  * @intr_clear       : clear pending interrupts
+@@ -300,6 +301,7 @@ struct mei_hw_ops {
+ 
+ 	int (*fw_status)(struct mei_device *dev, struct mei_fw_status *fw_sts);
+ 	enum mei_pg_state (*pg_state)(struct mei_device *dev);
++	bool (*pg_in_transition)(struct mei_device *dev);
+ 	bool (*pg_is_enabled)(struct mei_device *dev);
+ 
+ 	void (*intr_clear)(struct mei_device *dev);
+@@ -398,11 +400,15 @@ struct mei_cl_device {
+  * @MEI_PG_EVENT_IDLE: the driver is not in power gating transition
+  * @MEI_PG_EVENT_WAIT: the driver is waiting for a pg event to complete
+  * @MEI_PG_EVENT_RECEIVED: the driver received pg event
++ * @MEI_PG_EVENT_INTR_WAIT: the driver is waiting for a pg event interrupt
++ * @MEI_PG_EVENT_INTR_RECEIVED: the driver received pg event interrupt
+  */
+ enum mei_pg_event {
+ 	MEI_PG_EVENT_IDLE,
+ 	MEI_PG_EVENT_WAIT,
+ 	MEI_PG_EVENT_RECEIVED,
++	MEI_PG_EVENT_INTR_WAIT,
++	MEI_PG_EVENT_INTR_RECEIVED,
+ };
+ 
+ /**
+@@ -717,6 +723,11 @@ static inline enum mei_pg_state mei_pg_state(struct mei_device *dev)
+ 	return dev->ops->pg_state(dev);
+ }
+ 
++static inline bool mei_pg_in_transition(struct mei_device *dev)
++{
++	return dev->ops->pg_in_transition(dev);
++}
++
+ static inline bool mei_pg_is_enabled(struct mei_device *dev)
+ {
+ 	return dev->ops->pg_is_enabled(dev);
+diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
+index f8a7dd14cee0..70a3db3ab856 100644
+--- a/drivers/mtd/maps/dc21285.c
++++ b/drivers/mtd/maps/dc21285.c
+@@ -38,9 +38,9 @@ static void nw_en_write(void)
+ 	 * we want to write a bit pattern XXX1 to Xilinx to enable
+ 	 * the write gate, which will be open for about the next 2ms.
+ 	 */
+-	spin_lock_irqsave(&nw_gpio_lock, flags);
++	raw_spin_lock_irqsave(&nw_gpio_lock, flags);
+ 	nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
+-	spin_unlock_irqrestore(&nw_gpio_lock, flags);
++	raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
+ 
+ 	/*
+ 	 * let the ISA bus to catch on...
+diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
+index 2b0c52870999..df7c6c70757a 100644
+--- a/drivers/mtd/mtd_blkdevs.c
++++ b/drivers/mtd/mtd_blkdevs.c
+@@ -197,6 +197,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 		return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
+ 
+ 	mutex_lock(&dev->lock);
++	mutex_lock(&mtd_table_mutex);
+ 
+ 	if (dev->open)
+ 		goto unlock;
+@@ -220,6 +221,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
+ 
+ unlock:
+ 	dev->open++;
++	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+@@ -230,6 +232,7 @@ error_release:
+ error_put:
+ 	module_put(dev->tr->owner);
+ 	kref_put(&dev->ref, blktrans_dev_release);
++	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ 	return ret;
+@@ -243,6 +246,7 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
+ 		return;
+ 
+ 	mutex_lock(&dev->lock);
++	mutex_lock(&mtd_table_mutex);
+ 
+ 	if (--dev->open)
+ 		goto unlock;
+@@ -256,6 +260,7 @@ static void blktrans_release(struct gendisk *disk, fmode_t mode)
+ 		__put_mtd_device(dev->mtd);
+ 	}
+ unlock:
++	mutex_unlock(&mtd_table_mutex);
+ 	mutex_unlock(&dev->lock);
+ 	blktrans_dev_put(dev);
+ }
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index 78a7dcbec7d8..6906a3f61bd8 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -765,7 +765,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
+ 	spin_lock(&io_range_lock);
+ 	list_for_each_entry(res, &io_range_list, list) {
+ 		if (address >= res->start && address < res->start + res->size) {
+-			addr = res->start - address + offset;
++			addr = address - res->start + offset;
+ 			break;
+ 		}
+ 		offset += res->size;
+diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
+index 7a8f1c5e65af..73de4efcbe6e 100644
+--- a/drivers/pci/Kconfig
++++ b/drivers/pci/Kconfig
+@@ -1,6 +1,10 @@
+ #
+ # PCI configuration
+ #
++config PCI_BUS_ADDR_T_64BIT
++	def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT)
++	depends on PCI
++
+ config PCI_MSI
+ 	bool "Message Signaled Interrupts (MSI and MSI-X)"
+ 	depends on PCI
+diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
+index 90fa3a78fb7c..6fbd3f2b5992 100644
+--- a/drivers/pci/bus.c
++++ b/drivers/pci/bus.c
+@@ -92,11 +92,11 @@ void pci_bus_remove_resources(struct pci_bus *bus)
+ }
+ 
+ static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL};
+-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
+ static struct pci_bus_region pci_64_bit = {0,
+-				(dma_addr_t) 0xffffffffffffffffULL};
+-static struct pci_bus_region pci_high = {(dma_addr_t) 0x100000000ULL,
+-				(dma_addr_t) 0xffffffffffffffffULL};
++				(pci_bus_addr_t) 0xffffffffffffffffULL};
++static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL,
++				(pci_bus_addr_t) 0xffffffffffffffffULL};
+ #endif
+ 
+ /*
+@@ -200,7 +200,7 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
+ 					  resource_size_t),
+ 		void *alignf_data)
+ {
+-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
+ 	int rc;
+ 
+ 	if (res->flags & IORESOURCE_MEM_64) {
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 0ebf754fc177..6d6868811e56 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -176,20 +176,17 @@ static void pcie_wait_cmd(struct controller *ctrl)
+ 			  jiffies_to_msecs(jiffies - ctrl->cmd_started));
+ }
+ 
+-/**
+- * pcie_write_cmd - Issue controller command
+- * @ctrl: controller to which the command is issued
+- * @cmd:  command value written to slot control register
+- * @mask: bitmask of slot control register to be modified
+- */
+-static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
++static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
++			      u16 mask, bool wait)
+ {
+ 	struct pci_dev *pdev = ctrl_dev(ctrl);
+ 	u16 slot_ctrl;
+ 
+ 	mutex_lock(&ctrl->ctrl_lock);
+ 
+-	/* Wait for any previous command that might still be in progress */
++	/*
++	 * Always wait for any previous command that might still be in progress
++	 */
+ 	pcie_wait_cmd(ctrl);
+ 
+ 	pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
+@@ -201,9 +198,33 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
+ 	ctrl->cmd_started = jiffies;
+ 	ctrl->slot_ctrl = slot_ctrl;
+ 
++	/*
++	 * Optionally wait for the hardware to be ready for a new command,
++	 * indicating completion of the above issued command.
++	 */
++	if (wait)
++		pcie_wait_cmd(ctrl);
++
+ 	mutex_unlock(&ctrl->ctrl_lock);
+ }
+ 
++/**
++ * pcie_write_cmd - Issue controller command
++ * @ctrl: controller to which the command is issued
++ * @cmd:  command value written to slot control register
++ * @mask: bitmask of slot control register to be modified
++ */
++static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
++{
++	pcie_do_write_cmd(ctrl, cmd, mask, true);
++}
++
++/* Same as above without waiting for the hardware to latch */
++static void pcie_write_cmd_nowait(struct controller *ctrl, u16 cmd, u16 mask)
++{
++	pcie_do_write_cmd(ctrl, cmd, mask, false);
++}
++
+ bool pciehp_check_link_active(struct controller *ctrl)
+ {
+ 	struct pci_dev *pdev = ctrl_dev(ctrl);
+@@ -422,7 +443,7 @@ void pciehp_set_attention_status(struct slot *slot, u8 value)
+ 	default:
+ 		return;
+ 	}
+-	pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
++	pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
+ 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ }
+@@ -434,7 +455,8 @@ void pciehp_green_led_on(struct slot *slot)
+ 	if (!PWR_LED(ctrl))
+ 		return;
+ 
+-	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_PIC);
++	pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON,
++			      PCI_EXP_SLTCTL_PIC);
+ 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ 		 PCI_EXP_SLTCTL_PWR_IND_ON);
+@@ -447,7 +469,8 @@ void pciehp_green_led_off(struct slot *slot)
+ 	if (!PWR_LED(ctrl))
+ 		return;
+ 
+-	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_PIC);
++	pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
++			      PCI_EXP_SLTCTL_PIC);
+ 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ 		 PCI_EXP_SLTCTL_PWR_IND_OFF);
+@@ -460,7 +483,8 @@ void pciehp_green_led_blink(struct slot *slot)
+ 	if (!PWR_LED(ctrl))
+ 		return;
+ 
+-	pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK, PCI_EXP_SLTCTL_PIC);
++	pcie_write_cmd_nowait(ctrl, PCI_EXP_SLTCTL_PWR_IND_BLINK,
++			      PCI_EXP_SLTCTL_PIC);
+ 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
+ 		 PCI_EXP_SLTCTL_PWR_IND_BLINK);
+@@ -613,7 +637,7 @@ void pcie_enable_notification(struct controller *ctrl)
+ 		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
+ 		PCI_EXP_SLTCTL_DLLSCE);
+ 
+-	pcie_write_cmd(ctrl, cmd, mask);
++	pcie_write_cmd_nowait(ctrl, cmd, mask);
+ 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
+ }
+@@ -664,7 +688,7 @@ int pciehp_reset_slot(struct slot *slot, int probe)
+ 	pci_reset_bridge_secondary_bus(ctrl->pcie->port);
+ 
+ 	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
+-	pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask);
++	pcie_write_cmd_nowait(ctrl, ctrl_mask, ctrl_mask);
+ 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+ 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
+ 	if (pciehp_poll_mode)
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index acc4b6ef78c4..c44393f26fd3 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -4324,6 +4324,17 @@ bool pci_device_is_present(struct pci_dev *pdev)
+ }
+ EXPORT_SYMBOL_GPL(pci_device_is_present);
+ 
++void pci_ignore_hotplug(struct pci_dev *dev)
++{
++	struct pci_dev *bridge = dev->bus->self;
++
++	dev->ignore_hotplug = 1;
++	/* Propagate the "ignore hotplug" setting to the parent bridge. */
++	if (bridge)
++		bridge->ignore_hotplug = 1;
++}
++EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
++
+ #define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
+ static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
+ static DEFINE_SPINLOCK(resource_alignment_lock);
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 6675a7a1b9fc..c91185721345 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -254,8 +254,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 	}
+ 
+ 	if (res->flags & IORESOURCE_MEM_64) {
+-		if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
+-		    sz64 > 0x100000000ULL) {
++		if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
++		    && sz64 > 0x100000000ULL) {
+ 			res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+ 			res->start = 0;
+ 			res->end = 0;
+@@ -264,7 +264,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ 			goto out;
+ 		}
+ 
+-		if ((sizeof(dma_addr_t) < 8) && l) {
++		if ((sizeof(pci_bus_addr_t) < 8) && l) {
+ 			/* Above 32-bit boundary; try to reallocate */
+ 			res->flags |= IORESOURCE_UNSET;
+ 			res->start = 0;
+@@ -399,7 +399,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
+ 	struct pci_dev *dev = child->self;
+ 	u16 mem_base_lo, mem_limit_lo;
+ 	u64 base64, limit64;
+-	dma_addr_t base, limit;
++	pci_bus_addr_t base, limit;
+ 	struct pci_bus_region region;
+ 	struct resource *res;
+ 
+@@ -426,8 +426,8 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
+ 		}
+ 	}
+ 
+-	base = (dma_addr_t) base64;
+-	limit = (dma_addr_t) limit64;
++	base = (pci_bus_addr_t) base64;
++	limit = (pci_bus_addr_t) limit64;
+ 
+ 	if (base != base64) {
+ 		dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
+diff --git a/drivers/pcmcia/topic.h b/drivers/pcmcia/topic.h
+index 615a45a8fe86..582688fe7505 100644
+--- a/drivers/pcmcia/topic.h
++++ b/drivers/pcmcia/topic.h
+@@ -104,6 +104,9 @@
+ #define TOPIC_EXCA_IF_CONTROL		0x3e	/* 8 bit */
+ #define TOPIC_EXCA_IFC_33V_ENA		0x01
+ 
++#define TOPIC_PCI_CFG_PPBCN		0x3e	/* 16-bit */
++#define TOPIC_PCI_CFG_PPBCN_WBEN	0x0400
++
+ static void topic97_zoom_video(struct pcmcia_socket *sock, int onoff)
+ {
+ 	struct yenta_socket *socket = container_of(sock, struct yenta_socket, socket);
+@@ -138,6 +141,7 @@ static int topic97_override(struct yenta_socket *socket)
+ static int topic95_override(struct yenta_socket *socket)
+ {
+ 	u8 fctrl;
++	u16 ppbcn;
+ 
+ 	/* enable 3.3V support for 16bit cards */
+ 	fctrl = exca_readb(socket, TOPIC_EXCA_IF_CONTROL);
+@@ -146,6 +150,18 @@ static int topic95_override(struct yenta_socket *socket)
+ 	/* tell yenta to use exca registers to power 16bit cards */
+ 	socket->flags |= YENTA_16BIT_POWER_EXCA | YENTA_16BIT_POWER_DF;
+ 
++	/* Disable write buffers to prevent lockups under load with numerous
++	   Cardbus cards, observed on Tecra 500CDT and reported elsewhere on the
++	   net.  This is not a power-on default according to the datasheet
++	   but some BIOSes seem to set it. */
++	if (pci_read_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, &ppbcn) == 0
++	    && socket->dev->revision <= 7
++	    && (ppbcn & TOPIC_PCI_CFG_PPBCN_WBEN)) {
++		ppbcn &= ~TOPIC_PCI_CFG_PPBCN_WBEN;
++		pci_write_config_word(socket->dev, TOPIC_PCI_CFG_PPBCN, ppbcn);
++		dev_info(&socket->dev->dev, "Disabled ToPIC95 Cardbus write buffers.\n");
++	}
++
+ 	return 0;
+ }
+ 
+diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
+index 49c1720df59a..515f33882ab8 100644
+--- a/drivers/pnp/system.c
++++ b/drivers/pnp/system.c
+@@ -7,6 +7,7 @@
+  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
+  */
+ 
++#include <linux/acpi.h>
+ #include <linux/pnp.h>
+ #include <linux/device.h>
+ #include <linux/init.h>
+@@ -22,25 +23,41 @@ static const struct pnp_device_id pnp_dev_table[] = {
+ 	{"", 0}
+ };
+ 
++#ifdef CONFIG_ACPI
++static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
++{
++	u8 space_id = io ? ACPI_ADR_SPACE_SYSTEM_IO : ACPI_ADR_SPACE_SYSTEM_MEMORY;
++	return !acpi_reserve_region(start, length, space_id, IORESOURCE_BUSY, desc);
++}
++#else
++static bool __reserve_range(u64 start, unsigned int length, bool io, char *desc)
++{
++	struct resource *res;
++
++	res = io ? request_region(start, length, desc) :
++		request_mem_region(start, length, desc);
++	if (res) {
++		res->flags &= ~IORESOURCE_BUSY;
++		return true;
++	}
++	return false;
++}
++#endif
++
+ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
+ {
+ 	char *regionid;
+ 	const char *pnpid = dev_name(&dev->dev);
+ 	resource_size_t start = r->start, end = r->end;
+-	struct resource *res;
++	bool reserved;
+ 
+ 	regionid = kmalloc(16, GFP_KERNEL);
+ 	if (!regionid)
+ 		return;
+ 
+ 	snprintf(regionid, 16, "pnp %s", pnpid);
+-	if (port)
+-		res = request_region(start, end - start + 1, regionid);
+-	else
+-		res = request_mem_region(start, end - start + 1, regionid);
+-	if (res)
+-		res->flags &= ~IORESOURCE_BUSY;
+-	else
++	reserved = __reserve_range(start, end - start + 1, !!port, regionid);
++	if (!reserved)
+ 		kfree(regionid);
+ 
+ 	/*
+@@ -49,7 +66,7 @@ static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
+ 	 * have double reservations.
+ 	 */
+ 	dev_info(&dev->dev, "%pR %s reserved\n", r,
+-		 res ? "has been" : "could not be");
++		 reserved ? "has been" : "could not be");
+ }
+ 
+ static void reserve_resources_of_dev(struct pnp_dev *dev)
+diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
+index 2ed4a4a6b3c5..4bc0c7f459a5 100644
+--- a/drivers/power/power_supply_core.c
++++ b/drivers/power/power_supply_core.c
+@@ -30,6 +30,8 @@ EXPORT_SYMBOL_GPL(power_supply_notifier);
+ 
+ static struct device_type power_supply_dev_type;
+ 
++#define POWER_SUPPLY_DEFERRED_REGISTER_TIME	msecs_to_jiffies(10)
++
+ static bool __power_supply_is_supplied_by(struct power_supply *supplier,
+ 					 struct power_supply *supply)
+ {
+@@ -121,6 +123,30 @@ void power_supply_changed(struct power_supply *psy)
+ }
+ EXPORT_SYMBOL_GPL(power_supply_changed);
+ 
++/*
++ * Notify that power supply was registered after parent finished the probing.
++ *
++ * Often power supply is registered from driver's probe function. However
++ * calling power_supply_changed() directly from power_supply_register()
++ * would lead to execution of get_property() function provided by the driver
++ * too early - before the probe ends.
++ *
++ * Avoid that by waiting on parent's mutex.
++ */
++static void power_supply_deferred_register_work(struct work_struct *work)
++{
++	struct power_supply *psy = container_of(work, struct power_supply,
++						deferred_register_work.work);
++
++	if (psy->dev.parent)
++		mutex_lock(&psy->dev.parent->mutex);
++
++	power_supply_changed(psy);
++
++	if (psy->dev.parent)
++		mutex_unlock(&psy->dev.parent->mutex);
++}
++
+ #ifdef CONFIG_OF
+ #include <linux/of.h>
+ 
+@@ -645,6 +671,10 @@ __power_supply_register(struct device *parent,
+ 	struct power_supply *psy;
+ 	int rc;
+ 
++	if (!parent)
++		pr_warn("%s: Expected proper parent device for '%s'\n",
++			__func__, desc->name);
++
+ 	psy = kzalloc(sizeof(*psy), GFP_KERNEL);
+ 	if (!psy)
+ 		return ERR_PTR(-ENOMEM);
+@@ -659,7 +689,6 @@ __power_supply_register(struct device *parent,
+ 	dev->release = power_supply_dev_release;
+ 	dev_set_drvdata(dev, psy);
+ 	psy->desc = desc;
+-	atomic_inc(&psy->use_cnt);
+ 	if (cfg) {
+ 		psy->drv_data = cfg->drv_data;
+ 		psy->of_node = cfg->of_node;
+@@ -672,6 +701,8 @@ __power_supply_register(struct device *parent,
+ 		goto dev_set_name_failed;
+ 
+ 	INIT_WORK(&psy->changed_work, power_supply_changed_work);
++	INIT_DELAYED_WORK(&psy->deferred_register_work,
++			  power_supply_deferred_register_work);
+ 
+ 	rc = power_supply_check_supplies(psy);
+ 	if (rc) {
+@@ -700,7 +731,20 @@ __power_supply_register(struct device *parent,
+ 	if (rc)
+ 		goto create_triggers_failed;
+ 
+-	power_supply_changed(psy);
++	/*
++	 * Update use_cnt after any uevents (most notably from device_add()).
++	 * We are here still during driver's probe but
++	 * the power_supply_uevent() calls back driver's get_property
++	 * method so:
++	 * 1. Driver did not assigned the returned struct power_supply,
++	 * 2. Driver could not finish initialization (anything in its probe
++	 *    after calling power_supply_register()).
++	 */
++	atomic_inc(&psy->use_cnt);
++
++	queue_delayed_work(system_power_efficient_wq,
++			   &psy->deferred_register_work,
++			   POWER_SUPPLY_DEFERRED_REGISTER_TIME);
+ 
+ 	return psy;
+ 
+@@ -720,7 +764,8 @@ dev_set_name_failed:
+ 
+ /**
+  * power_supply_register() - Register new power supply
+- * @parent:	Device to be a parent of power supply's device
++ * @parent:	Device to be a parent of power supply's device, usually
++ *		the device which probe function calls this
+  * @desc:	Description of power supply, must be valid through whole
+  *		lifetime of this power supply
+  * @cfg:	Run-time specific configuration accessed during registering,
+@@ -741,7 +786,8 @@ EXPORT_SYMBOL_GPL(power_supply_register);
+ 
+ /**
+  * power_supply_register() - Register new non-waking-source power supply
+- * @parent:	Device to be a parent of power supply's device
++ * @parent:	Device to be a parent of power supply's device, usually
++ *		the device which probe function calls this
+  * @desc:	Description of power supply, must be valid through whole
+  *		lifetime of this power supply
+  * @cfg:	Run-time specific configuration accessed during registering,
+@@ -770,7 +816,8 @@ static void devm_power_supply_release(struct device *dev, void *res)
+ 
+ /**
+  * power_supply_register() - Register managed power supply
+- * @parent:	Device to be a parent of power supply's device
++ * @parent:	Device to be a parent of power supply's device, usually
++ *		the device which probe function calls this
+  * @desc:	Description of power supply, must be valid through whole
+  *		lifetime of this power supply
+  * @cfg:	Run-time specific configuration accessed during registering,
+@@ -805,7 +852,8 @@ EXPORT_SYMBOL_GPL(devm_power_supply_register);
+ 
+ /**
+  * power_supply_register() - Register managed non-waking-source power supply
+- * @parent:	Device to be a parent of power supply's device
++ * @parent:	Device to be a parent of power supply's device, usually
++ *		the device which probe function calls this
+  * @desc:	Description of power supply, must be valid through whole
+  *		lifetime of this power supply
+  * @cfg:	Run-time specific configuration accessed during registering,
+@@ -849,6 +897,7 @@ void power_supply_unregister(struct power_supply *psy)
+ {
+ 	WARN_ON(atomic_dec_return(&psy->use_cnt));
+ 	cancel_work_sync(&psy->changed_work);
++	cancel_delayed_work_sync(&psy->deferred_register_work);
+ 	sysfs_remove_link(&psy->dev.kobj, "powers");
+ 	power_supply_remove_triggers(psy);
+ 	psy_unregister_cooler(psy);
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 443eaab933fc..8a28116b5805 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -779,7 +779,7 @@ static int suspend_prepare(struct regulator_dev *rdev, suspend_state_t state)
+ static void print_constraints(struct regulator_dev *rdev)
+ {
+ 	struct regulation_constraints *constraints = rdev->constraints;
+-	char buf[80] = "";
++	char buf[160] = "";
+ 	int count = 0;
+ 	int ret;
+ 
+diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
+index 15fb1416bfbd..c064e32fb3b9 100644
+--- a/drivers/regulator/max77686.c
++++ b/drivers/regulator/max77686.c
+@@ -88,7 +88,7 @@ enum max77686_ramp_rate {
+ };
+ 
+ struct max77686_data {
+-	u64 gpio_enabled:MAX77686_REGULATORS;
++	DECLARE_BITMAP(gpio_enabled, MAX77686_REGULATORS);
+ 
+ 	/* Array indexed by regulator id */
+ 	unsigned int opmode[MAX77686_REGULATORS];
+@@ -121,7 +121,7 @@ static unsigned int max77686_map_normal_mode(struct max77686_data *max77686,
+ 	case MAX77686_BUCK8:
+ 	case MAX77686_BUCK9:
+ 	case MAX77686_LDO20 ... MAX77686_LDO22:
+-		if (max77686->gpio_enabled & (1 << id))
++		if (test_bit(id, max77686->gpio_enabled))
+ 			return MAX77686_GPIO_CONTROL;
+ 	}
+ 
+@@ -277,7 +277,7 @@ static int max77686_of_parse_cb(struct device_node *np,
+ 	}
+ 
+ 	if (gpio_is_valid(config->ena_gpio)) {
+-		max77686->gpio_enabled |= (1 << desc->id);
++		set_bit(desc->id, max77686->gpio_enabled);
+ 
+ 		return regmap_update_bits(config->regmap, desc->enable_reg,
+ 					  desc->enable_mask,
+diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
+index 47412cf4eaac..73790a1d0969 100644
+--- a/drivers/scsi/ipr.h
++++ b/drivers/scsi/ipr.h
+@@ -272,7 +272,7 @@
+ #define IPR_RUNTIME_RESET				0x40000000
+ 
+ #define IPR_IPL_INIT_MIN_STAGE_TIME			5
+-#define IPR_IPL_INIT_DEFAULT_STAGE_TIME                 15
++#define IPR_IPL_INIT_DEFAULT_STAGE_TIME                 30
+ #define IPR_IPL_INIT_STAGE_UNKNOWN			0x0
+ #define IPR_IPL_INIT_STAGE_TRANSOP			0xB0000000
+ #define IPR_IPL_INIT_STAGE_MASK				0xff000000
+diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
+index ae45bd99baed..f115f67a6ba5 100644
+--- a/drivers/scsi/scsi_transport_srp.c
++++ b/drivers/scsi/scsi_transport_srp.c
+@@ -396,6 +396,36 @@ static void srp_reconnect_work(struct work_struct *work)
+ 	}
+ }
+ 
++/**
++ * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
++ * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
++ *
++ * To do: add support for scsi-mq in this function.
++ */
++static int scsi_request_fn_active(struct Scsi_Host *shost)
++{
++	struct scsi_device *sdev;
++	struct request_queue *q;
++	int request_fn_active = 0;
++
++	shost_for_each_device(sdev, shost) {
++		q = sdev->request_queue;
++
++		spin_lock_irq(q->queue_lock);
++		request_fn_active += q->request_fn_active;
++		spin_unlock_irq(q->queue_lock);
++	}
++
++	return request_fn_active;
++}
++
++/* Wait until ongoing shost->hostt->queuecommand() calls have finished. */
++static void srp_wait_for_queuecommand(struct Scsi_Host *shost)
++{
++	while (scsi_request_fn_active(shost))
++		msleep(20);
++}
++
+ static void __rport_fail_io_fast(struct srp_rport *rport)
+ {
+ 	struct Scsi_Host *shost = rport_to_shost(rport);
+@@ -409,8 +439,10 @@ static void __rport_fail_io_fast(struct srp_rport *rport)
+ 
+ 	/* Involve the LLD if possible to terminate all I/O on the rport. */
+ 	i = to_srp_internal(shost->transportt);
+-	if (i->f->terminate_rport_io)
++	if (i->f->terminate_rport_io) {
++		srp_wait_for_queuecommand(shost);
+ 		i->f->terminate_rport_io(rport);
++	}
+ }
+ 
+ /**
+@@ -504,27 +536,6 @@ void srp_start_tl_fail_timers(struct srp_rport *rport)
+ EXPORT_SYMBOL(srp_start_tl_fail_timers);
+ 
+ /**
+- * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn()
+- * @shost: SCSI host for which to count the number of scsi_request_fn() callers.
+- */
+-static int scsi_request_fn_active(struct Scsi_Host *shost)
+-{
+-	struct scsi_device *sdev;
+-	struct request_queue *q;
+-	int request_fn_active = 0;
+-
+-	shost_for_each_device(sdev, shost) {
+-		q = sdev->request_queue;
+-
+-		spin_lock_irq(q->queue_lock);
+-		request_fn_active += q->request_fn_active;
+-		spin_unlock_irq(q->queue_lock);
+-	}
+-
+-	return request_fn_active;
+-}
+-
+-/**
+  * srp_reconnect_rport() - reconnect to an SRP target port
+  * @rport: SRP target port.
+  *
+@@ -559,8 +570,7 @@ int srp_reconnect_rport(struct srp_rport *rport)
+ 	if (res)
+ 		goto out;
+ 	scsi_target_block(&shost->shost_gendev);
+-	while (scsi_request_fn_active(shost))
+-		msleep(20);
++	srp_wait_for_queuecommand(shost);
+ 	res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
+ 	pr_debug("%s (state %d): transport.reconnect() returned %d\n",
+ 		 dev_name(&shost->shost_gendev), rport->state, res);
+diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
+index 861664776672..ff97cabdaa81 100644
+--- a/drivers/spi/spi-orion.c
++++ b/drivers/spi/spi-orion.c
+@@ -61,6 +61,12 @@ enum orion_spi_type {
+ 
+ struct orion_spi_dev {
+ 	enum orion_spi_type	typ;
++	/*
++	 * min_divisor and max_hz should be exclusive, the only we can
++	 * have both is for managing the armada-370-spi case with old
++	 * device tree
++	 */
++	unsigned long		max_hz;
+ 	unsigned int		min_divisor;
+ 	unsigned int		max_divisor;
+ 	u32			prescale_mask;
+@@ -387,8 +393,9 @@ static const struct orion_spi_dev orion_spi_dev_data = {
+ 
+ static const struct orion_spi_dev armada_spi_dev_data = {
+ 	.typ = ARMADA_SPI,
+-	.min_divisor = 1,
++	.min_divisor = 4,
+ 	.max_divisor = 1920,
++	.max_hz = 50000000,
+ 	.prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+ };
+ 
+@@ -454,7 +461,21 @@ static int orion_spi_probe(struct platform_device *pdev)
+ 		goto out;
+ 
+ 	tclk_hz = clk_get_rate(spi->clk);
+-	master->max_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
++
++	/*
++	 * With old device tree, armada-370-spi could be used with
++	 * Armada XP, however for this SoC the maximum frequency is
++	 * 50MHz instead of tclk/4. On Armada 370, tclk cannot be
++	 * higher than 200MHz. So, in order to be able to handle both
++	 * SoCs, we can take the minimum of 50MHz and tclk/4.
++	 */
++	if (of_device_is_compatible(pdev->dev.of_node,
++					"marvell,armada-370-spi"))
++		master->max_speed_hz = min(devdata->max_hz,
++				DIV_ROUND_UP(tclk_hz, devdata->min_divisor));
++	else
++		master->max_speed_hz =
++			DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
+ 	master->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor);
+ 
+ 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 50910d85df5a..d35c1a13217c 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -988,9 +988,6 @@ void spi_finalize_current_message(struct spi_master *master)
+ 
+ 	spin_lock_irqsave(&master->queue_lock, flags);
+ 	mesg = master->cur_msg;
+-	master->cur_msg = NULL;
+-
+-	queue_kthread_work(&master->kworker, &master->pump_messages);
+ 	spin_unlock_irqrestore(&master->queue_lock, flags);
+ 
+ 	spi_unmap_msg(master, mesg);
+@@ -1003,9 +1000,13 @@ void spi_finalize_current_message(struct spi_master *master)
+ 		}
+ 	}
+ 
+-	trace_spi_message_done(mesg);
+-
++	spin_lock_irqsave(&master->queue_lock, flags);
++	master->cur_msg = NULL;
+ 	master->cur_msg_prepared = false;
++	queue_kthread_work(&master->kworker, &master->pump_messages);
++	spin_unlock_irqrestore(&master->queue_lock, flags);
++
++	trace_spi_message_done(mesg);
+ 
+ 	mesg->state = NULL;
+ 	if (mesg->complete)
+diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c
+index f8ac4a452f26..0f64165b0147 100644
+--- a/drivers/video/fbdev/mxsfb.c
++++ b/drivers/video/fbdev/mxsfb.c
+@@ -316,6 +316,18 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var,
+ 	return 0;
+ }
+ 
++static inline void mxsfb_enable_axi_clk(struct mxsfb_info *host)
++{
++	if (host->clk_axi)
++		clk_prepare_enable(host->clk_axi);
++}
++
++static inline void mxsfb_disable_axi_clk(struct mxsfb_info *host)
++{
++	if (host->clk_axi)
++		clk_disable_unprepare(host->clk_axi);
++}
++
+ static void mxsfb_enable_controller(struct fb_info *fb_info)
+ {
+ 	struct mxsfb_info *host = to_imxfb_host(fb_info);
+@@ -333,14 +345,13 @@ static void mxsfb_enable_controller(struct fb_info *fb_info)
+ 		}
+ 	}
+ 
+-	if (host->clk_axi)
+-		clk_prepare_enable(host->clk_axi);
+-
+ 	if (host->clk_disp_axi)
+ 		clk_prepare_enable(host->clk_disp_axi);
+ 	clk_prepare_enable(host->clk);
+ 	clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
+ 
++	mxsfb_enable_axi_clk(host);
++
+ 	/* if it was disabled, re-enable the mode again */
+ 	writel(CTRL_DOTCLK_MODE, host->base + LCDC_CTRL + REG_SET);
+ 
+@@ -380,11 +391,11 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
+ 	reg = readl(host->base + LCDC_VDCTRL4);
+ 	writel(reg & ~VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4);
+ 
++	mxsfb_disable_axi_clk(host);
++
+ 	clk_disable_unprepare(host->clk);
+ 	if (host->clk_disp_axi)
+ 		clk_disable_unprepare(host->clk_disp_axi);
+-	if (host->clk_axi)
+-		clk_disable_unprepare(host->clk_axi);
+ 
+ 	host->enabled = 0;
+ 
+@@ -421,6 +432,8 @@ static int mxsfb_set_par(struct fb_info *fb_info)
+ 		mxsfb_disable_controller(fb_info);
+ 	}
+ 
++	mxsfb_enable_axi_clk(host);
++
+ 	/* clear the FIFOs */
+ 	writel(CTRL1_FIFO_CLEAR, host->base + LCDC_CTRL1 + REG_SET);
+ 
+@@ -438,6 +451,7 @@ static int mxsfb_set_par(struct fb_info *fb_info)
+ 		ctrl |= CTRL_SET_WORD_LENGTH(3);
+ 		switch (host->ld_intf_width) {
+ 		case STMLCDIF_8BIT:
++			mxsfb_disable_axi_clk(host);
+ 			dev_err(&host->pdev->dev,
+ 					"Unsupported LCD bus width mapping\n");
+ 			return -EINVAL;
+@@ -451,6 +465,7 @@ static int mxsfb_set_par(struct fb_info *fb_info)
+ 		writel(CTRL1_SET_BYTE_PACKAGING(0x7), host->base + LCDC_CTRL1);
+ 		break;
+ 	default:
++		mxsfb_disable_axi_clk(host);
+ 		dev_err(&host->pdev->dev, "Unhandled color depth of %u\n",
+ 				fb_info->var.bits_per_pixel);
+ 		return -EINVAL;
+@@ -504,6 +519,8 @@ static int mxsfb_set_par(struct fb_info *fb_info)
+ 			fb_info->fix.line_length * fb_info->var.yoffset,
+ 			host->base + host->devdata->next_buf);
+ 
++	mxsfb_disable_axi_clk(host);
++
+ 	if (reenable)
+ 		mxsfb_enable_controller(fb_info);
+ 
+@@ -582,10 +599,14 @@ static int mxsfb_pan_display(struct fb_var_screeninfo *var,
+ 
+ 	offset = fb_info->fix.line_length * var->yoffset;
+ 
++	mxsfb_enable_axi_clk(host);
++
+ 	/* update on next VSYNC */
+ 	writel(fb_info->fix.smem_start + offset,
+ 			host->base + host->devdata->next_buf);
+ 
++	mxsfb_disable_axi_clk(host);
++
+ 	return 0;
+ }
+ 
+@@ -608,13 +629,17 @@ static int mxsfb_restore_mode(struct mxsfb_info *host,
+ 	unsigned line_count;
+ 	unsigned period;
+ 	unsigned long pa, fbsize;
+-	int bits_per_pixel, ofs;
++	int bits_per_pixel, ofs, ret = 0;
+ 	u32 transfer_count, vdctrl0, vdctrl2, vdctrl3, vdctrl4, ctrl;
+ 
++	mxsfb_enable_axi_clk(host);
++
+ 	/* Only restore the mode when the controller is running */
+ 	ctrl = readl(host->base + LCDC_CTRL);
+-	if (!(ctrl & CTRL_RUN))
+-		return -EINVAL;
++	if (!(ctrl & CTRL_RUN)) {
++		ret = -EINVAL;
++		goto err;
++	}
+ 
+ 	vdctrl0 = readl(host->base + LCDC_VDCTRL0);
+ 	vdctrl2 = readl(host->base + LCDC_VDCTRL2);
+@@ -635,7 +660,8 @@ static int mxsfb_restore_mode(struct mxsfb_info *host,
+ 		break;
+ 	case 1:
+ 	default:
+-		return -EINVAL;
++		ret = -EINVAL;
++		goto err;
+ 	}
+ 
+ 	fb_info->var.bits_per_pixel = bits_per_pixel;
+@@ -673,10 +699,14 @@ static int mxsfb_restore_mode(struct mxsfb_info *host,
+ 
+ 	pa = readl(host->base + host->devdata->cur_buf);
+ 	fbsize = fb_info->fix.line_length * vmode->yres;
+-	if (pa < fb_info->fix.smem_start)
+-		return -EINVAL;
+-	if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len)
+-		return -EINVAL;
++	if (pa < fb_info->fix.smem_start) {
++		ret = -EINVAL;
++		goto err;
++	}
++	if (pa + fbsize > fb_info->fix.smem_start + fb_info->fix.smem_len) {
++		ret = -EINVAL;
++		goto err;
++	}
+ 	ofs = pa - fb_info->fix.smem_start;
+ 	if (ofs) {
+ 		memmove(fb_info->screen_base, fb_info->screen_base + ofs, fbsize);
+@@ -689,7 +719,11 @@ static int mxsfb_restore_mode(struct mxsfb_info *host,
+ 	clk_prepare_enable(host->clk);
+ 	host->enabled = 1;
+ 
+-	return 0;
++err:
++	if (ret)
++		mxsfb_disable_axi_clk(host);
++
++	return ret;
+ }
+ 
+ static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host,
+@@ -915,7 +949,9 @@ static int mxsfb_probe(struct platform_device *pdev)
+ 	}
+ 
+ 	if (!host->enabled) {
++		mxsfb_enable_axi_clk(host);
+ 		writel(0, host->base + LCDC_CTRL);
++		mxsfb_disable_axi_clk(host);
+ 		mxsfb_set_par(fb_info);
+ 		mxsfb_enable_controller(fb_info);
+ 	}
+@@ -954,11 +990,15 @@ static void mxsfb_shutdown(struct platform_device *pdev)
+ 	struct fb_info *fb_info = platform_get_drvdata(pdev);
+ 	struct mxsfb_info *host = to_imxfb_host(fb_info);
+ 
++	mxsfb_enable_axi_clk(host);
++
+ 	/*
+ 	 * Force stop the LCD controller as keeping it running during reboot
+ 	 * might interfere with the BootROM's boot mode pads sampling.
+ 	 */
+ 	writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
++
++	mxsfb_disable_axi_clk(host);
+ }
+ 
+ static struct platform_driver mxsfb_driver = {
+diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
+index 537356742091..a8f3b589a2df 100644
+--- a/fs/configfs/mount.c
++++ b/fs/configfs/mount.c
+@@ -129,8 +129,6 @@ void configfs_release_fs(void)
+ }
+ 
+ 
+-static struct kobject *config_kobj;
+-
+ static int __init configfs_init(void)
+ {
+ 	int err = -ENOMEM;
+@@ -141,8 +139,8 @@ static int __init configfs_init(void)
+ 	if (!configfs_dir_cachep)
+ 		goto out;
+ 
+-	config_kobj = kobject_create_and_add("config", kernel_kobj);
+-	if (!config_kobj)
++	err = sysfs_create_mount_point(kernel_kobj, "config");
++	if (err)
+ 		goto out2;
+ 
+ 	err = register_filesystem(&configfs_fs_type);
+@@ -152,7 +150,7 @@ static int __init configfs_init(void)
+ 	return 0;
+ out3:
+ 	pr_err("Unable to register filesystem!\n");
+-	kobject_put(config_kobj);
++	sysfs_remove_mount_point(kernel_kobj, "config");
+ out2:
+ 	kmem_cache_destroy(configfs_dir_cachep);
+ 	configfs_dir_cachep = NULL;
+@@ -163,7 +161,7 @@ out:
+ static void __exit configfs_exit(void)
+ {
+ 	unregister_filesystem(&configfs_fs_type);
+-	kobject_put(config_kobj);
++	sysfs_remove_mount_point(kernel_kobj, "config");
+ 	kmem_cache_destroy(configfs_dir_cachep);
+ 	configfs_dir_cachep = NULL;
+ }
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index c1e7ffb0dab6..12756040ca20 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -716,20 +716,17 @@ bool debugfs_initialized(void)
+ }
+ EXPORT_SYMBOL_GPL(debugfs_initialized);
+ 
+-
+-static struct kobject *debug_kobj;
+-
+ static int __init debugfs_init(void)
+ {
+ 	int retval;
+ 
+-	debug_kobj = kobject_create_and_add("debug", kernel_kobj);
+-	if (!debug_kobj)
+-		return -EINVAL;
++	retval = sysfs_create_mount_point(kernel_kobj, "debug");
++	if (retval)
++		return retval;
+ 
+ 	retval = register_filesystem(&debug_fs_type);
+ 	if (retval)
+-		kobject_put(debug_kobj);
++		sysfs_remove_mount_point(kernel_kobj, "debug");
+ 	else
+ 		debugfs_registered = true;
+ 
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index 082ac1c97f39..18dacf9ed8ff 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -1238,7 +1238,6 @@ static void fuse_fs_cleanup(void)
+ }
+ 
+ static struct kobject *fuse_kobj;
+-static struct kobject *connections_kobj;
+ 
+ static int fuse_sysfs_init(void)
+ {
+@@ -1250,11 +1249,9 @@ static int fuse_sysfs_init(void)
+ 		goto out_err;
+ 	}
+ 
+-	connections_kobj = kobject_create_and_add("connections", fuse_kobj);
+-	if (!connections_kobj) {
+-		err = -ENOMEM;
++	err = sysfs_create_mount_point(fuse_kobj, "connections");
++	if (err)
+ 		goto out_fuse_unregister;
+-	}
+ 
+ 	return 0;
+ 
+@@ -1266,7 +1263,7 @@ static int fuse_sysfs_init(void)
+ 
+ static void fuse_sysfs_cleanup(void)
+ {
+-	kobject_put(connections_kobj);
++	sysfs_remove_mount_point(fuse_kobj, "connections");
+ 	kobject_put(fuse_kobj);
+ }
+ 
+diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
+index fffca9517321..2d48d28e1640 100644
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -592,6 +592,9 @@ int kernfs_add_one(struct kernfs_node *kn)
+ 		goto out_unlock;
+ 
+ 	ret = -ENOENT;
++	if (parent->flags & KERNFS_EMPTY_DIR)
++		goto out_unlock;
++
+ 	if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
+ 		goto out_unlock;
+ 
+@@ -783,6 +786,38 @@ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
+ 	return ERR_PTR(rc);
+ }
+ 
++/**
++ * kernfs_create_empty_dir - create an always empty directory
++ * @parent: parent in which to create a new directory
++ * @name: name of the new directory
++ *
++ * Returns the created node on success, ERR_PTR() value on failure.
++ */
++struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
++					    const char *name)
++{
++	struct kernfs_node *kn;
++	int rc;
++
++	/* allocate */
++	kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, KERNFS_DIR);
++	if (!kn)
++		return ERR_PTR(-ENOMEM);
++
++	kn->flags |= KERNFS_EMPTY_DIR;
++	kn->dir.root = parent->dir.root;
++	kn->ns = NULL;
++	kn->priv = NULL;
++
++	/* link in */
++	rc = kernfs_add_one(kn);
++	if (!rc)
++		return kn;
++
++	kernfs_put(kn);
++	return ERR_PTR(rc);
++}
++
+ static struct dentry *kernfs_iop_lookup(struct inode *dir,
+ 					struct dentry *dentry,
+ 					unsigned int flags)
+@@ -1254,7 +1289,8 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
+ 	mutex_lock(&kernfs_mutex);
+ 
+ 	error = -ENOENT;
+-	if (!kernfs_active(kn) || !kernfs_active(new_parent))
++	if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
++	    (new_parent->flags & KERNFS_EMPTY_DIR))
+ 		goto out;
+ 
+ 	error = 0;
+diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
+index 2da8493a380b..756dd56aaf60 100644
+--- a/fs/kernfs/inode.c
++++ b/fs/kernfs/inode.c
+@@ -296,6 +296,8 @@ static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode)
+ 	case KERNFS_DIR:
+ 		inode->i_op = &kernfs_dir_iops;
+ 		inode->i_fop = &kernfs_dir_fops;
++		if (kn->flags & KERNFS_EMPTY_DIR)
++			make_empty_dir_inode(inode);
+ 		break;
+ 	case KERNFS_FILE:
+ 		inode->i_size = kn->attr.size;
+diff --git a/fs/libfs.c b/fs/libfs.c
+index cb1fb4b9b637..02813592e121 100644
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -1093,3 +1093,99 @@ simple_nosetlease(struct file *filp, long arg, struct file_lock **flp,
+ 	return -EINVAL;
+ }
+ EXPORT_SYMBOL(simple_nosetlease);
++
++
++/*
++ * Operations for a permanently empty directory.
++ */
++static struct dentry *empty_dir_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
++{
++	return ERR_PTR(-ENOENT);
++}
++
++static int empty_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
++				 struct kstat *stat)
++{
++	struct inode *inode = d_inode(dentry);
++	generic_fillattr(inode, stat);
++	return 0;
++}
++
++static int empty_dir_setattr(struct dentry *dentry, struct iattr *attr)
++{
++	return -EPERM;
++}
++
++static int empty_dir_setxattr(struct dentry *dentry, const char *name,
++			      const void *value, size_t size, int flags)
++{
++	return -EOPNOTSUPP;
++}
++
++static ssize_t empty_dir_getxattr(struct dentry *dentry, const char *name,
++				  void *value, size_t size)
++{
++	return -EOPNOTSUPP;
++}
++
++static int empty_dir_removexattr(struct dentry *dentry, const char *name)
++{
++	return -EOPNOTSUPP;
++}
++
++static ssize_t empty_dir_listxattr(struct dentry *dentry, char *list, size_t size)
++{
++	return -EOPNOTSUPP;
++}
++
++static const struct inode_operations empty_dir_inode_operations = {
++	.lookup		= empty_dir_lookup,
++	.permission	= generic_permission,
++	.setattr	= empty_dir_setattr,
++	.getattr	= empty_dir_getattr,
++	.setxattr	= empty_dir_setxattr,
++	.getxattr	= empty_dir_getxattr,
++	.removexattr	= empty_dir_removexattr,
++	.listxattr	= empty_dir_listxattr,
++};
++
++static loff_t empty_dir_llseek(struct file *file, loff_t offset, int whence)
++{
++	/* An empty directory has two entries . and .. at offsets 0 and 1 */
++	return generic_file_llseek_size(file, offset, whence, 2, 2);
++}
++
++static int empty_dir_readdir(struct file *file, struct dir_context *ctx)
++{
++	dir_emit_dots(file, ctx);
++	return 0;
++}
++
++static const struct file_operations empty_dir_operations = {
++	.llseek		= empty_dir_llseek,
++	.read		= generic_read_dir,
++	.iterate	= empty_dir_readdir,
++	.fsync		= noop_fsync,
++};
++
++
++void make_empty_dir_inode(struct inode *inode)
++{
++	set_nlink(inode, 2);
++	inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
++	inode->i_uid = GLOBAL_ROOT_UID;
++	inode->i_gid = GLOBAL_ROOT_GID;
++	inode->i_rdev = 0;
++	inode->i_size = 2;
++	inode->i_blkbits = PAGE_SHIFT;
++	inode->i_blocks = 0;
++
++	inode->i_op = &empty_dir_inode_operations;
++	inode->i_fop = &empty_dir_operations;
++}
++
++bool is_empty_dir_inode(struct inode *inode)
++{
++	return (inode->i_fop == &empty_dir_operations) &&
++		(inode->i_op == &empty_dir_inode_operations);
++}
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 1d4a97c573e0..02c6875dd945 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2332,6 +2332,8 @@ unlock:
+ 	return err;
+ }
+ 
++static bool fs_fully_visible(struct file_system_type *fs_type, int *new_mnt_flags);
++
+ /*
+  * create a new mount for userspace and request it to be added into the
+  * namespace's tree
+@@ -2363,6 +2365,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
+ 			flags |= MS_NODEV;
+ 			mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
+ 		}
++		if (type->fs_flags & FS_USERNS_VISIBLE) {
++			if (!fs_fully_visible(type, &mnt_flags))
++				return -EPERM;
++		}
+ 	}
+ 
+ 	mnt = vfs_kern_mount(type, flags, name, data);
+@@ -3164,9 +3170,10 @@ bool current_chrooted(void)
+ 	return chrooted;
+ }
+ 
+-bool fs_fully_visible(struct file_system_type *type)
++static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
+ {
+ 	struct mnt_namespace *ns = current->nsproxy->mnt_ns;
++	int new_flags = *new_mnt_flags;
+ 	struct mount *mnt;
+ 	bool visible = false;
+ 
+@@ -3185,6 +3192,19 @@ bool fs_fully_visible(struct file_system_type *type)
+ 		if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
+ 			continue;
+ 
++		/* Verify the mount flags are equal to or more permissive
++		 * than the proposed new mount.
++		 */
++		if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) &&
++		    !(new_flags & MNT_READONLY))
++			continue;
++		if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
++		    !(new_flags & MNT_NODEV))
++			continue;
++		if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) &&
++		    ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
++			continue;
++
+ 		/* This mount is not fully visible if there are any
+ 		 * locked child mounts that cover anything except for
+ 		 * empty directories.
+@@ -3194,11 +3214,14 @@ bool fs_fully_visible(struct file_system_type *type)
+ 			/* Only worry about locked mounts */
+ 			if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
+ 				continue;
+-			if (!S_ISDIR(inode->i_mode))
+-				goto next;
+-			if (inode->i_nlink > 2)
++			/* Is the directory permanetly empty? */
++			if (!is_empty_dir_inode(inode))
+ 				goto next;
+ 		}
++		/* Preserve the locked attributes */
++		*new_mnt_flags |= mnt->mnt.mnt_flags & (MNT_LOCK_READONLY | \
++							MNT_LOCK_NODEV    | \
++							MNT_LOCK_ATIME);
+ 		visible = true;
+ 		goto found;
+ 	next:	;
+diff --git a/fs/proc/generic.c b/fs/proc/generic.c
+index df6327a2b865..e5dee5c3188e 100644
+--- a/fs/proc/generic.c
++++ b/fs/proc/generic.c
+@@ -373,6 +373,10 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
+ 		WARN(1, "create '/proc/%s' by hand\n", qstr.name);
+ 		return NULL;
+ 	}
++	if (is_empty_pde(*parent)) {
++		WARN(1, "attempt to add to permanently empty directory");
++		return NULL;
++	}
+ 
+ 	ent = kzalloc(sizeof(struct proc_dir_entry) + qstr.len + 1, GFP_KERNEL);
+ 	if (!ent)
+@@ -455,6 +459,25 @@ struct proc_dir_entry *proc_mkdir(const char *name,
+ }
+ EXPORT_SYMBOL(proc_mkdir);
+ 
++struct proc_dir_entry *proc_create_mount_point(const char *name)
++{
++	umode_t mode = S_IFDIR | S_IRUGO | S_IXUGO;
++	struct proc_dir_entry *ent, *parent = NULL;
++
++	ent = __proc_create(&parent, name, mode, 2);
++	if (ent) {
++		ent->data = NULL;
++		ent->proc_fops = NULL;
++		ent->proc_iops = NULL;
++		if (proc_register(parent, ent) < 0) {
++			kfree(ent);
++			parent->nlink--;
++			ent = NULL;
++		}
++	}
++	return ent;
++}
++
+ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode,
+ 					struct proc_dir_entry *parent,
+ 					const struct file_operations *proc_fops,
+diff --git a/fs/proc/inode.c b/fs/proc/inode.c
+index 8272aaba1bb0..e3eb5524639f 100644
+--- a/fs/proc/inode.c
++++ b/fs/proc/inode.c
+@@ -423,6 +423,10 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
+ 		inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ 		PROC_I(inode)->pde = de;
+ 
++		if (is_empty_pde(de)) {
++			make_empty_dir_inode(inode);
++			return inode;
++		}
+ 		if (de->mode) {
+ 			inode->i_mode = de->mode;
+ 			inode->i_uid = de->uid;
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index c835b94c0cd3..aa2781095bd1 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -191,6 +191,12 @@ static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
+ }
+ extern void pde_put(struct proc_dir_entry *);
+ 
++static inline bool is_empty_pde(const struct proc_dir_entry *pde)
++{
++	return S_ISDIR(pde->mode) && !pde->proc_iops;
++}
++struct proc_dir_entry *proc_create_mount_point(const char *name);
++
+ /*
+  * inode.c
+  */
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index fea2561d773b..fdda62e6115e 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -19,6 +19,28 @@ static const struct inode_operations proc_sys_inode_operations;
+ static const struct file_operations proc_sys_dir_file_operations;
+ static const struct inode_operations proc_sys_dir_operations;
+ 
++/* Support for permanently empty directories */
++
++struct ctl_table sysctl_mount_point[] = {
++	{ }
++};
++
++static bool is_empty_dir(struct ctl_table_header *head)
++{
++	return head->ctl_table[0].child == sysctl_mount_point;
++}
++
++static void set_empty_dir(struct ctl_dir *dir)
++{
++	dir->header.ctl_table[0].child = sysctl_mount_point;
++}
++
++static void clear_empty_dir(struct ctl_dir *dir)
++
++{
++	dir->header.ctl_table[0].child = NULL;
++}
++
+ void proc_sys_poll_notify(struct ctl_table_poll *poll)
+ {
+ 	if (!poll)
+@@ -187,6 +209,17 @@ static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header)
+ 	struct ctl_table *entry;
+ 	int err;
+ 
++	/* Is this a permanently empty directory? */
++	if (is_empty_dir(&dir->header))
++		return -EROFS;
++
++	/* Am I creating a permanently empty directory? */
++	if (header->ctl_table == sysctl_mount_point) {
++		if (!RB_EMPTY_ROOT(&dir->root))
++			return -EINVAL;
++		set_empty_dir(dir);
++	}
++
+ 	dir->header.nreg++;
+ 	header->parent = dir;
+ 	err = insert_links(header);
+@@ -202,6 +235,8 @@ fail:
+ 	erase_header(header);
+ 	put_links(header);
+ fail_links:
++	if (header->ctl_table == sysctl_mount_point)
++		clear_empty_dir(dir);
+ 	header->parent = NULL;
+ 	drop_sysctl_table(&dir->header);
+ 	return err;
+@@ -419,6 +454,8 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
+ 		inode->i_mode |= S_IFDIR;
+ 		inode->i_op = &proc_sys_dir_operations;
+ 		inode->i_fop = &proc_sys_dir_file_operations;
++		if (is_empty_dir(head))
++			make_empty_dir_inode(inode);
+ 	}
+ out:
+ 	return inode;
+diff --git a/fs/proc/root.c b/fs/proc/root.c
+index b7fa4bfe896a..68feb0f70e63 100644
+--- a/fs/proc/root.c
++++ b/fs/proc/root.c
+@@ -112,9 +112,6 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
+ 		ns = task_active_pid_ns(current);
+ 		options = data;
+ 
+-		if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type))
+-			return ERR_PTR(-EPERM);
+-
+ 		/* Does the mounter have privilege over the pid namespace? */
+ 		if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+ 			return ERR_PTR(-EPERM);
+@@ -159,7 +156,7 @@ static struct file_system_type proc_fs_type = {
+ 	.name		= "proc",
+ 	.mount		= proc_mount,
+ 	.kill_sb	= proc_kill_sb,
+-	.fs_flags	= FS_USERNS_MOUNT,
++	.fs_flags	= FS_USERNS_VISIBLE | FS_USERNS_MOUNT,
+ };
+ 
+ void __init proc_root_init(void)
+@@ -182,10 +179,10 @@ void __init proc_root_init(void)
+ #endif
+ 	proc_mkdir("fs", NULL);
+ 	proc_mkdir("driver", NULL);
+-	proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */
++	proc_create_mount_point("fs/nfsd"); /* somewhere for the nfsd filesystem to be mounted */
+ #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE)
+ 	/* just give it a mountpoint */
+-	proc_mkdir("openprom", NULL);
++	proc_create_mount_point("openprom");
+ #endif
+ 	proc_tty_init();
+ 	proc_mkdir("bus", NULL);
+diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
+index dc43b5f29305..3adcc4669fac 100644
+--- a/fs/pstore/inode.c
++++ b/fs/pstore/inode.c
+@@ -461,22 +461,18 @@ static struct file_system_type pstore_fs_type = {
+ 	.kill_sb	= pstore_kill_sb,
+ };
+ 
+-static struct kobject *pstore_kobj;
+-
+ static int __init init_pstore_fs(void)
+ {
+-	int err = 0;
++	int err;
+ 
+ 	/* Create a convenient mount point for people to access pstore */
+-	pstore_kobj = kobject_create_and_add("pstore", fs_kobj);
+-	if (!pstore_kobj) {
+-		err = -ENOMEM;
++	err = sysfs_create_mount_point(fs_kobj, "pstore");
++	if (err)
+ 		goto out;
+-	}
+ 
+ 	err = register_filesystem(&pstore_fs_type);
+ 	if (err < 0)
+-		kobject_put(pstore_kobj);
++		sysfs_remove_mount_point(fs_kobj, "pstore");
+ 
+ out:
+ 	return err;
+diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
+index 0b45ff42f374..94374e435025 100644
+--- a/fs/sysfs/dir.c
++++ b/fs/sysfs/dir.c
+@@ -121,3 +121,37 @@ int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj,
+ 
+ 	return kernfs_rename_ns(kn, new_parent, kn->name, new_ns);
+ }
++
++/**
++ * sysfs_create_mount_point - create an always empty directory
++ * @parent_kobj:  kobject that will contain this always empty directory
++ * @name: The name of the always empty directory to add
++ */
++int sysfs_create_mount_point(struct kobject *parent_kobj, const char *name)
++{
++	struct kernfs_node *kn, *parent = parent_kobj->sd;
++
++	kn = kernfs_create_empty_dir(parent, name);
++	if (IS_ERR(kn)) {
++		if (PTR_ERR(kn) == -EEXIST)
++			sysfs_warn_dup(parent, name);
++		return PTR_ERR(kn);
++	}
++
++	return 0;
++}
++EXPORT_SYMBOL_GPL(sysfs_create_mount_point);
++
++/**
++ *	sysfs_remove_mount_point - remove an always empty directory.
++ *	@parent_kobj: kobject that will contain this always empty directory
++ *	@name: The name of the always empty directory to remove
++ *
++ */
++void sysfs_remove_mount_point(struct kobject *parent_kobj, const char *name)
++{
++	struct kernfs_node *parent = parent_kobj->sd;
++
++	kernfs_remove_by_name_ns(parent, name, NULL);
++}
++EXPORT_SYMBOL_GPL(sysfs_remove_mount_point);
+diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
+index 8a49486bf30c..1c6ac6fcee9f 100644
+--- a/fs/sysfs/mount.c
++++ b/fs/sysfs/mount.c
+@@ -31,9 +31,6 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
+ 	bool new_sb;
+ 
+ 	if (!(flags & MS_KERNMOUNT)) {
+-		if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type))
+-			return ERR_PTR(-EPERM);
+-
+ 		if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
+ 			return ERR_PTR(-EPERM);
+ 	}
+@@ -58,7 +55,7 @@ static struct file_system_type sysfs_fs_type = {
+ 	.name		= "sysfs",
+ 	.mount		= sysfs_mount,
+ 	.kill_sb	= sysfs_kill_sb,
+-	.fs_flags	= FS_USERNS_MOUNT,
++	.fs_flags	= FS_USERNS_VISIBLE | FS_USERNS_MOUNT,
+ };
+ 
+ int __init sysfs_init(void)
+diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
+index d92bdf3b079a..a43df11a163f 100644
+--- a/fs/tracefs/inode.c
++++ b/fs/tracefs/inode.c
+@@ -631,14 +631,12 @@ bool tracefs_initialized(void)
+ 	return tracefs_registered;
+ }
+ 
+-static struct kobject *trace_kobj;
+-
+ static int __init tracefs_init(void)
+ {
+ 	int retval;
+ 
+-	trace_kobj = kobject_create_and_add("tracing", kernel_kobj);
+-	if (!trace_kobj)
++	retval = sysfs_create_mount_point(kernel_kobj, "tracing");
++	if (retval)
+ 		return -EINVAL;
+ 
+ 	retval = register_filesystem(&trace_fs_type);
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index e4da5e35e29c..5da2d2e9d38e 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -332,6 +332,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
+ 
+ int acpi_resources_are_enforced(void);
+ 
++int acpi_reserve_region(u64 start, unsigned int length, u8 space_id,
++			unsigned long flags, char *desc);
++
+ #ifdef CONFIG_HIBERNATION
+ void __init acpi_no_s4_hw_signature(void);
+ #endif
+@@ -440,6 +443,7 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
+ #define ACPI_OST_SC_INSERT_NOT_SUPPORTED	0x82
+ 
+ extern void acpi_early_init(void);
++extern void acpi_subsystem_init(void);
+ 
+ extern int acpi_nvs_register(__u64 start, __u64 size);
+ 
+@@ -494,6 +498,7 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
+ }
+ 
+ static inline void acpi_early_init(void) { }
++static inline void acpi_subsystem_init(void) { }
+ 
+ static inline int early_acpi_boot_init(void)
+ {
+@@ -525,6 +530,13 @@ static inline int acpi_check_region(resource_size_t start, resource_size_t n,
+ 	return 0;
+ }
+ 
++static inline int acpi_reserve_region(u64 start, unsigned int length,
++				      u8 space_id, unsigned long flags,
++				      char *desc)
++{
++	return -ENXIO;
++}
++
+ struct acpi_table_header;
+ static inline int acpi_table_parse(char *id,
+ 				int (*handler)(struct acpi_table_header *))
+diff --git a/include/linux/fs.h b/include/linux/fs.h
+index 35ec87e490b1..571aab91bfc0 100644
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1897,6 +1897,7 @@ struct file_system_type {
+ #define FS_HAS_SUBTYPE		4
+ #define FS_USERNS_MOUNT		8	/* Can be mounted by userns root */
+ #define FS_USERNS_DEV_MOUNT	16 /* A userns mount does not imply MNT_NODEV */
++#define FS_USERNS_VISIBLE	32	/* FS must already be visible */
+ #define FS_RENAME_DOES_D_MOVE	32768	/* FS will handle d_move() during rename() internally. */
+ 	struct dentry *(*mount) (struct file_system_type *, int,
+ 		       const char *, void *);
+@@ -1984,7 +1985,6 @@ extern int vfs_ustat(dev_t, struct kstatfs *);
+ extern int freeze_super(struct super_block *super);
+ extern int thaw_super(struct super_block *super);
+ extern bool our_mnt(struct vfsmount *mnt);
+-extern bool fs_fully_visible(struct file_system_type *);
+ 
+ extern int current_umask(void);
+ 
+@@ -2780,6 +2780,8 @@ extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned in
+ extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
+ extern const struct file_operations simple_dir_operations;
+ extern const struct inode_operations simple_dir_inode_operations;
++extern void make_empty_dir_inode(struct inode *inode);
++extern bool is_empty_dir_inode(struct inode *inode);
+ struct tree_descr { char *name; const struct file_operations *ops; int mode; };
+ struct dentry *d_alloc_name(struct dentry *, const char *);
+ extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
+diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
+index 71ecdab1671b..29d1896c3ba5 100644
+--- a/include/linux/kernfs.h
++++ b/include/linux/kernfs.h
+@@ -45,6 +45,7 @@ enum kernfs_node_flag {
+ 	KERNFS_LOCKDEP		= 0x0100,
+ 	KERNFS_SUICIDAL		= 0x0400,
+ 	KERNFS_SUICIDED		= 0x0800,
++	KERNFS_EMPTY_DIR	= 0x1000,
+ };
+ 
+ /* @flags for kernfs_create_root() */
+@@ -285,6 +286,8 @@ void kernfs_destroy_root(struct kernfs_root *root);
+ struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
+ 					 const char *name, umode_t mode,
+ 					 void *priv, const void *ns);
++struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
++					    const char *name);
+ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
+ 					 const char *name,
+ 					 umode_t mode, loff_t size,
+diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
+index e705467ddb47..d0a1f99e24e3 100644
+--- a/include/linux/kmemleak.h
++++ b/include/linux/kmemleak.h
+@@ -28,7 +28,8 @@
+ extern void kmemleak_init(void) __ref;
+ extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
+ 			   gfp_t gfp) __ref;
+-extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
++extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
++				  gfp_t gfp) __ref;
+ extern void kmemleak_free(const void *ptr) __ref;
+ extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
+ extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
+@@ -71,7 +72,8 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
+ 					    gfp_t gfp)
+ {
+ }
+-static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
++static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
++					 gfp_t gfp)
+ {
+ }
+ static inline void kmemleak_free(const void *ptr)
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 353db8dc4c6e..3ef3a52068df 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -577,9 +577,15 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
+ int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
+ 		  int reg, int len, u32 val);
+ 
++#ifdef CONFIG_PCI_BUS_ADDR_T_64BIT
++typedef u64 pci_bus_addr_t;
++#else
++typedef u32 pci_bus_addr_t;
++#endif
++
+ struct pci_bus_region {
+-	dma_addr_t start;
+-	dma_addr_t end;
++	pci_bus_addr_t start;
++	pci_bus_addr_t end;
+ };
+ 
+ struct pci_dynids {
+@@ -1006,6 +1012,7 @@ int __must_check pci_assign_resource(struct pci_dev *dev, int i);
+ int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
+ int pci_select_bars(struct pci_dev *dev, unsigned long flags);
+ bool pci_device_is_present(struct pci_dev *pdev);
++void pci_ignore_hotplug(struct pci_dev *dev);
+ 
+ /* ROM control related routines */
+ int pci_enable_rom(struct pci_dev *pdev);
+@@ -1043,11 +1050,6 @@ bool pci_dev_run_wake(struct pci_dev *dev);
+ bool pci_check_pme_status(struct pci_dev *dev);
+ void pci_pme_wakeup_bus(struct pci_bus *bus);
+ 
+-static inline void pci_ignore_hotplug(struct pci_dev *dev)
+-{
+-	dev->ignore_hotplug = 1;
+-}
+-
+ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+ 				  bool enable)
+ {
+@@ -1128,7 +1130,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
+ 
+ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+ 
+-static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
++static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
+ {
+ 	struct pci_bus_region region;
+ 
+diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
+index 75a1dd8dc56e..a80f1fd01ddb 100644
+--- a/include/linux/power_supply.h
++++ b/include/linux/power_supply.h
+@@ -237,6 +237,7 @@ struct power_supply {
+ 	/* private */
+ 	struct device dev;
+ 	struct work_struct changed_work;
++	struct delayed_work deferred_register_work;
+ 	spinlock_t changed_lock;
+ 	bool changed;
+ 	atomic_t use_cnt;
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index 795d5fea5697..fa7bc29925c9 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -188,6 +188,9 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
+ void unregister_sysctl_table(struct ctl_table_header * table);
+ 
+ extern int sysctl_init(void);
++
++extern struct ctl_table sysctl_mount_point[];
++
+ #else /* CONFIG_SYSCTL */
+ static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
+ {
+diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
+index 99382c0df17e..9f65758311a4 100644
+--- a/include/linux/sysfs.h
++++ b/include/linux/sysfs.h
+@@ -210,6 +210,10 @@ int __must_check sysfs_rename_dir_ns(struct kobject *kobj, const char *new_name,
+ int __must_check sysfs_move_dir_ns(struct kobject *kobj,
+ 				   struct kobject *new_parent_kobj,
+ 				   const void *new_ns);
++int __must_check sysfs_create_mount_point(struct kobject *parent_kobj,
++					  const char *name);
++void sysfs_remove_mount_point(struct kobject *parent_kobj,
++			      const char *name);
+ 
+ int __must_check sysfs_create_file_ns(struct kobject *kobj,
+ 				      const struct attribute *attr,
+@@ -298,6 +302,17 @@ static inline int sysfs_move_dir_ns(struct kobject *kobj,
+ 	return 0;
+ }
+ 
++static inline int sysfs_create_mount_point(struct kobject *parent_kobj,
++					   const char *name)
++{
++	return 0;
++}
++
++static inline void sysfs_remove_mount_point(struct kobject *parent_kobj,
++					    const char *name)
++{
++}
++
+ static inline int sysfs_create_file_ns(struct kobject *kobj,
+ 				       const struct attribute *attr,
+ 				       const void *ns)
+diff --git a/include/linux/types.h b/include/linux/types.h
+index 59698be03490..8715287c3b1f 100644
+--- a/include/linux/types.h
++++ b/include/linux/types.h
+@@ -139,12 +139,20 @@ typedef unsigned long blkcnt_t;
+  */
+ #define pgoff_t unsigned long
+ 
+-/* A dma_addr_t can hold any valid DMA or bus address for the platform */
++/*
++ * A dma_addr_t can hold any valid DMA address, i.e., any address returned
++ * by the DMA API.
++ *
++ * If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
++ * bits wide.  Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
++ * but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
++ * so they don't care about the size of the actual bus addresses.
++ */
+ #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ typedef u64 dma_addr_t;
+ #else
+ typedef u32 dma_addr_t;
+-#endif /* dma_addr_t */
++#endif
+ 
+ typedef unsigned __bitwise__ gfp_t;
+ typedef unsigned __bitwise__ fmode_t;
+diff --git a/init/main.c b/init/main.c
+index 2115055faeac..2a89545e0a5d 100644
+--- a/init/main.c
++++ b/init/main.c
+@@ -664,6 +664,7 @@ asmlinkage __visible void __init start_kernel(void)
+ 
+ 	check_bugs();
+ 
++	acpi_subsystem_init();
+ 	sfi_init_late();
+ 
+ 	if (efi_enabled(EFI_RUNTIME_SERVICES)) {
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index 469dd547770c..e8a5491be756 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -1924,8 +1924,6 @@ static struct file_system_type cgroup_fs_type = {
+ 	.kill_sb = cgroup_kill_sb,
+ };
+ 
+-static struct kobject *cgroup_kobj;
+-
+ /**
+  * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
+  * @task: target task
+@@ -5044,13 +5042,13 @@ int __init cgroup_init(void)
+ 			ss->bind(init_css_set.subsys[ssid]);
+ 	}
+ 
+-	cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
+-	if (!cgroup_kobj)
+-		return -ENOMEM;
++	err = sysfs_create_mount_point(fs_kobj, "cgroup");
++	if (err)
++		return err;
+ 
+ 	err = register_filesystem(&cgroup_fs_type);
+ 	if (err < 0) {
+-		kobject_put(cgroup_kobj);
++		sysfs_remove_mount_point(fs_kobj, "cgroup");
+ 		return err;
+ 	}
+ 
+diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
+index d5d0f7345c54..74d90a754268 100644
+--- a/kernel/irq/devres.c
++++ b/kernel/irq/devres.c
+@@ -104,7 +104,7 @@ int devm_request_any_context_irq(struct device *dev, unsigned int irq,
+ 		return -ENOMEM;
+ 
+ 	rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
+-	if (rc) {
++	if (rc < 0) {
+ 		devres_free(dr);
+ 		return rc;
+ 	}
+@@ -113,7 +113,7 @@ int devm_request_any_context_irq(struct device *dev, unsigned int irq,
+ 	dr->dev_id = dev_id;
+ 	devres_add(dev, dr);
+ 
+-	return 0;
++	return rc;
+ }
+ EXPORT_SYMBOL(devm_request_any_context_irq);
+ 
+diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
+index 284e2691e380..9ec555732f1a 100644
+--- a/kernel/livepatch/core.c
++++ b/kernel/livepatch/core.c
+@@ -179,7 +179,9 @@ static int klp_find_object_symbol(const char *objname, const char *name,
+ 		.count = 0
+ 	};
+ 
++	mutex_lock(&module_mutex);
+ 	kallsyms_on_each_symbol(klp_find_callback, &args);
++	mutex_unlock(&module_mutex);
+ 
+ 	if (args.count == 0)
+ 		pr_err("symbol '%s' not found in symbol table\n", name);
+@@ -219,13 +221,19 @@ static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
+ 		.name = name,
+ 		.addr = addr,
+ 	};
++	int ret;
+ 
+-	if (kallsyms_on_each_symbol(klp_verify_callback, &args))
+-		return 0;
++	mutex_lock(&module_mutex);
++	ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
++	mutex_unlock(&module_mutex);
+ 
+-	pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
+-		name, addr);
+-	return -EINVAL;
++	if (!ret) {
++		pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
++			name, addr);
++		return -EINVAL;
++	}
++
++	return 0;
+ }
+ 
+ static int klp_find_verify_func_addr(struct klp_object *obj,
+diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
+index 069742d61c68..ec3086879cb5 100644
+--- a/kernel/rcu/tiny.c
++++ b/kernel/rcu/tiny.c
+@@ -170,6 +170,11 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
+ 
+ 	/* Move the ready-to-invoke callbacks to a local list. */
+ 	local_irq_save(flags);
++	if (rcp->donetail == &rcp->rcucblist) {
++		/* No callbacks ready, so just leave. */
++		local_irq_restore(flags);
++		return;
++	}
+ 	RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
+ 	list = rcp->rcucblist;
+ 	rcp->rcucblist = *rcp->donetail;
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 2082b1a88fb9..c3eee4c6d6c1 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1531,12 +1531,6 @@ static struct ctl_table vm_table[] = {
+ 	{ }
+ };
+ 
+-#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
+-static struct ctl_table binfmt_misc_table[] = {
+-	{ }
+-};
+-#endif
+-
+ static struct ctl_table fs_table[] = {
+ 	{
+ 		.procname	= "inode-nr",
+@@ -1690,7 +1684,7 @@ static struct ctl_table fs_table[] = {
+ 	{
+ 		.procname	= "binfmt_misc",
+ 		.mode		= 0555,
+-		.child		= binfmt_misc_table,
++		.child		= sysctl_mount_point,
+ 	},
+ #endif
+ 	{
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index f0fe4f2c1fa7..3716cdb8ba42 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -195,6 +195,8 @@ static struct kmem_cache *scan_area_cache;
+ 
+ /* set if tracing memory operations is enabled */
+ static int kmemleak_enabled;
++/* same as above but only for the kmemleak_free() callback */
++static int kmemleak_free_enabled;
+ /* set in the late_initcall if there were no errors */
+ static int kmemleak_initialized;
+ /* enables or disables early logging of the memory operations */
+@@ -907,12 +909,13 @@ EXPORT_SYMBOL_GPL(kmemleak_alloc);
+  * kmemleak_alloc_percpu - register a newly allocated __percpu object
+  * @ptr:	__percpu pointer to beginning of the object
+  * @size:	size of the object
++ * @gfp:	flags used for kmemleak internal memory allocations
+  *
+  * This function is called from the kernel percpu allocator when a new object
+- * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
+- * allocation.
++ * (memory block) is allocated (alloc_percpu).
+  */
+-void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
++void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
++				 gfp_t gfp)
+ {
+ 	unsigned int cpu;
+ 
+@@ -925,7 +928,7 @@ void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
+ 	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
+ 		for_each_possible_cpu(cpu)
+ 			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
+-				      size, 0, GFP_KERNEL);
++				      size, 0, gfp);
+ 	else if (kmemleak_early_log)
+ 		log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
+ }
+@@ -942,7 +945,7 @@ void __ref kmemleak_free(const void *ptr)
+ {
+ 	pr_debug("%s(0x%p)\n", __func__, ptr);
+ 
+-	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
++	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
+ 		delete_object_full((unsigned long)ptr);
+ 	else if (kmemleak_early_log)
+ 		log_early(KMEMLEAK_FREE, ptr, 0, 0);
+@@ -982,7 +985,7 @@ void __ref kmemleak_free_percpu(const void __percpu *ptr)
+ 
+ 	pr_debug("%s(0x%p)\n", __func__, ptr);
+ 
+-	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
++	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
+ 		for_each_possible_cpu(cpu)
+ 			delete_object_full((unsigned long)per_cpu_ptr(ptr,
+ 								      cpu));
+@@ -1750,6 +1753,13 @@ static void kmemleak_do_cleanup(struct work_struct *work)
+ 	mutex_lock(&scan_mutex);
+ 	stop_scan_thread();
+ 
++	/*
++	 * Once the scan thread has stopped, it is safe to no longer track
++	 * object freeing. Ordering of the scan thread stopping and the memory
++	 * accesses below is guaranteed by the kthread_stop() function.
++	 */
++	kmemleak_free_enabled = 0;
++
+ 	if (!kmemleak_found_leaks)
+ 		__kmemleak_do_cleanup();
+ 	else
+@@ -1776,6 +1786,8 @@ static void kmemleak_disable(void)
+ 	/* check whether it is too early for a kernel thread */
+ 	if (kmemleak_initialized)
+ 		schedule_work(&cleanup_work);
++	else
++		kmemleak_free_enabled = 0;
+ 
+ 	pr_info("Kernel memory leak detector disabled\n");
+ }
+@@ -1840,8 +1852,10 @@ void __init kmemleak_init(void)
+ 	if (kmemleak_error) {
+ 		local_irq_restore(flags);
+ 		return;
+-	} else
++	} else {
+ 		kmemleak_enabled = 1;
++		kmemleak_free_enabled = 1;
++	}
+ 	local_irq_restore(flags);
+ 
+ 	/*
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index 747743237d9f..99d4c1d0b858 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -1972,35 +1972,41 @@ retry_cpuset:
+ 	pol = get_vma_policy(vma, addr);
+ 	cpuset_mems_cookie = read_mems_allowed_begin();
+ 
+-	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage &&
+-					pol->mode != MPOL_INTERLEAVE)) {
++	if (pol->mode == MPOL_INTERLEAVE) {
++		unsigned nid;
++
++		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
++		mpol_cond_put(pol);
++		page = alloc_page_interleave(gfp, order, nid);
++		goto out;
++	}
++
++	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
++		int hpage_node = node;
++
+ 		/*
+ 		 * For hugepage allocation and non-interleave policy which
+-		 * allows the current node, we only try to allocate from the
+-		 * current node and don't fall back to other nodes, as the
+-		 * cost of remote accesses would likely offset THP benefits.
++		 * allows the current node (or other explicitly preferred
++		 * node) we only try to allocate from the current/preferred
++		 * node and don't fall back to other nodes, as the cost of
++		 * remote accesses would likely offset THP benefits.
+ 		 *
+ 		 * If the policy is interleave, or does not allow the current
+ 		 * node in its nodemask, we allocate the standard way.
+ 		 */
++		if (pol->mode == MPOL_PREFERRED &&
++						!(pol->flags & MPOL_F_LOCAL))
++			hpage_node = pol->v.preferred_node;
++
+ 		nmask = policy_nodemask(gfp, pol);
+-		if (!nmask || node_isset(node, *nmask)) {
++		if (!nmask || node_isset(hpage_node, *nmask)) {
+ 			mpol_cond_put(pol);
+-			page = alloc_pages_exact_node(node,
++			page = alloc_pages_exact_node(hpage_node,
+ 						gfp | __GFP_THISNODE, order);
+ 			goto out;
+ 		}
+ 	}
+ 
+-	if (pol->mode == MPOL_INTERLEAVE) {
+-		unsigned nid;
+-
+-		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
+-		mpol_cond_put(pol);
+-		page = alloc_page_interleave(gfp, order, nid);
+-		goto out;
+-	}
+-
+ 	nmask = policy_nodemask(gfp, pol);
+ 	zl = policy_zonelist(gfp, pol, node);
+ 	mpol_cond_put(pol);
+diff --git a/mm/percpu.c b/mm/percpu.c
+index dfd02484e8de..2dd74487a0af 100644
+--- a/mm/percpu.c
++++ b/mm/percpu.c
+@@ -1030,7 +1030,7 @@ area_found:
+ 		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
+ 
+ 	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
+-	kmemleak_alloc_percpu(ptr, size);
++	kmemleak_alloc_percpu(ptr, size, gfp);
+ 	return ptr;
+ 
+ fail_unlock:
+diff --git a/security/inode.c b/security/inode.c
+index 91503b79c5f8..0e37e4fba8fa 100644
+--- a/security/inode.c
++++ b/security/inode.c
+@@ -215,19 +215,17 @@ void securityfs_remove(struct dentry *dentry)
+ }
+ EXPORT_SYMBOL_GPL(securityfs_remove);
+ 
+-static struct kobject *security_kobj;
+-
+ static int __init securityfs_init(void)
+ {
+ 	int retval;
+ 
+-	security_kobj = kobject_create_and_add("security", kernel_kobj);
+-	if (!security_kobj)
+-		return -EINVAL;
++	retval = sysfs_create_mount_point(kernel_kobj, "security");
++	if (retval)
++		return retval;
+ 
+ 	retval = register_filesystem(&fs_type);
+ 	if (retval)
+-		kobject_put(security_kobj);
++		sysfs_remove_mount_point(kernel_kobj, "security");
+ 	return retval;
+ }
+ 
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index d2787cca1fcb..3d2201413028 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -1853,7 +1853,6 @@ static struct file_system_type sel_fs_type = {
+ };
+ 
+ struct vfsmount *selinuxfs_mount;
+-static struct kobject *selinuxfs_kobj;
+ 
+ static int __init init_sel_fs(void)
+ {
+@@ -1862,13 +1861,13 @@ static int __init init_sel_fs(void)
+ 	if (!selinux_enabled)
+ 		return 0;
+ 
+-	selinuxfs_kobj = kobject_create_and_add("selinux", fs_kobj);
+-	if (!selinuxfs_kobj)
+-		return -ENOMEM;
++	err = sysfs_create_mount_point(fs_kobj, "selinux");
++	if (err)
++		return err;
+ 
+ 	err = register_filesystem(&sel_fs_type);
+ 	if (err) {
+-		kobject_put(selinuxfs_kobj);
++		sysfs_remove_mount_point(fs_kobj, "selinux");
+ 		return err;
+ 	}
+ 
+@@ -1887,7 +1886,7 @@ __initcall(init_sel_fs);
+ #ifdef CONFIG_SECURITY_SELINUX_DISABLE
+ void exit_sel_fs(void)
+ {
+-	kobject_put(selinuxfs_kobj);
++	sysfs_remove_mount_point(fs_kobj, "selinux");
+ 	kern_unmount(selinuxfs_mount);
+ 	unregister_filesystem(&sel_fs_type);
+ }
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index d9682985349e..ac4cac7c661a 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -2241,16 +2241,16 @@ static const struct file_operations smk_revoke_subj_ops = {
+ 	.llseek		= generic_file_llseek,
+ };
+ 
+-static struct kset *smackfs_kset;
+ /**
+  * smk_init_sysfs - initialize /sys/fs/smackfs
+  *
+  */
+ static int smk_init_sysfs(void)
+ {
+-	smackfs_kset = kset_create_and_add("smackfs", NULL, fs_kobj);
+-	if (!smackfs_kset)
+-		return -ENOMEM;
++	int err;
++	err = sysfs_create_mount_point(fs_kobj, "smackfs");
++	if (err)
++		return err;
+ 	return 0;
+ }
+ 
+diff --git a/sound/core/pcm.c b/sound/core/pcm.c
+index b25bcf5b8644..dfed728d8c87 100644
+--- a/sound/core/pcm.c
++++ b/sound/core/pcm.c
+@@ -1027,7 +1027,8 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
+ static ssize_t show_pcm_class(struct device *dev,
+ 			      struct device_attribute *attr, char *buf)
+ {
+-	struct snd_pcm *pcm;
++	struct snd_pcm_str *pstr = container_of(dev, struct snd_pcm_str, dev);
++	struct snd_pcm *pcm = pstr->pcm;
+ 	const char *str;
+ 	static const char *strs[SNDRV_PCM_CLASS_LAST + 1] = {
+ 		[SNDRV_PCM_CLASS_GENERIC] = "generic",
+@@ -1036,8 +1037,7 @@ static ssize_t show_pcm_class(struct device *dev,
+ 		[SNDRV_PCM_CLASS_DIGITIZER] = "digitizer",
+ 	};
+ 
+-	if (! (pcm = dev_get_drvdata(dev)) ||
+-	    pcm->dev_class > SNDRV_PCM_CLASS_LAST)
++	if (pcm->dev_class > SNDRV_PCM_CLASS_LAST)
+ 		str = "none";
+ 	else
+ 		str = strs[pcm->dev_class];
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index b6db25b23dd3..c403dd10d126 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2054,6 +2054,8 @@ static const struct pci_device_id azx_ids[] = {
+ 	{ PCI_DEVICE(0x1022, 0x780d),
+ 	  .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ 	/* ATI HDMI */
++	{ PCI_DEVICE(0x1002, 0x1308),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0x793b),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ 	{ PCI_DEVICE(0x1002, 0x7919),
+@@ -2062,6 +2064,8 @@ static const struct pci_device_id azx_ids[] = {
+ 	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ 	{ PCI_DEVICE(0x1002, 0x970f),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
++	{ PCI_DEVICE(0x1002, 0x9840),
++	  .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+ 	{ PCI_DEVICE(0x1002, 0xaa00),
+ 	  .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+ 	{ PCI_DEVICE(0x1002, 0xaa08),
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 6d010452c1f5..0e75998db39f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4458,6 +4458,7 @@ enum {
+ 	ALC269_FIXUP_LIFEBOOK,
+ 	ALC269_FIXUP_LIFEBOOK_EXTMIC,
+ 	ALC269_FIXUP_LIFEBOOK_HP_PIN,
++	ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT,
+ 	ALC269_FIXUP_AMIC,
+ 	ALC269_FIXUP_DMIC,
+ 	ALC269VB_FIXUP_AMIC,
+@@ -4478,6 +4479,7 @@ enum {
+ 	ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+ 	ALC269_FIXUP_HEADSET_MODE,
+ 	ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
++	ALC269_FIXUP_ASPIRE_HEADSET_MIC,
+ 	ALC269_FIXUP_ASUS_X101_FUNC,
+ 	ALC269_FIXUP_ASUS_X101_VERB,
+ 	ALC269_FIXUP_ASUS_X101,
+@@ -4505,6 +4507,7 @@ enum {
+ 	ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
+ 	ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC292_FIXUP_TPT440_DOCK,
++	ALC292_FIXUP_TPT440_DOCK2,
+ 	ALC283_FIXUP_BXBT2807_MIC,
+ 	ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
+ 	ALC282_FIXUP_ASPIRE_V5_PINS,
+@@ -4515,6 +4518,8 @@ enum {
+ 	ALC288_FIXUP_DELL_HEADSET_MODE,
+ 	ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
+ 	ALC288_FIXUP_DELL_XPS_13_GPIO6,
++	ALC288_FIXUP_DELL_XPS_13,
++	ALC288_FIXUP_DISABLE_AAMIX,
+ 	ALC292_FIXUP_DELL_E7X,
+ 	ALC292_FIXUP_DISABLE_AAMIX,
+ };
+@@ -4623,6 +4628,10 @@ static const struct hda_fixup alc269_fixups[] = {
+ 			{ }
+ 		},
+ 	},
++	[ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
++	},
+ 	[ALC269_FIXUP_AMIC] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -4751,6 +4760,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_headset_mode_no_hp_mic,
+ 	},
++	[ALC269_FIXUP_ASPIRE_HEADSET_MIC] = {
++		.type = HDA_FIXUP_PINS,
++		.v.pins = (const struct hda_pintbl[]) {
++			{ 0x19, 0x01a1913c }, /* headset mic w/o jack detect */
++			{ }
++		},
++		.chained = true,
++		.chain_id = ALC269_FIXUP_HEADSET_MODE,
++	},
+ 	[ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+@@ -4953,6 +4971,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chain_id = ALC269_FIXUP_HEADSET_MODE
+ 	},
+ 	[ALC292_FIXUP_TPT440_DOCK] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc269_fixup_pincfg_no_hp_to_lineout,
++		.chained = true,
++		.chain_id = ALC292_FIXUP_TPT440_DOCK2
++	},
++	[ALC292_FIXUP_TPT440_DOCK2] = {
+ 		.type = HDA_FIXUP_PINS,
+ 		.v.pins = (const struct hda_pintbl[]) {
+ 			{ 0x16, 0x21211010 }, /* dock headphone */
+@@ -5039,9 +5063,23 @@ static const struct hda_fixup alc269_fixups[] = {
+ 		.chained = true,
+ 		.chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE
+ 	},
++	[ALC288_FIXUP_DISABLE_AAMIX] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_disable_aamix,
++		.chained = true,
++		.chain_id = ALC288_FIXUP_DELL_XPS_13_GPIO6
++	},
++	[ALC288_FIXUP_DELL_XPS_13] = {
++		.type = HDA_FIXUP_FUNC,
++		.v.func = alc_fixup_dell_xps13,
++		.chained = true,
++		.chain_id = ALC288_FIXUP_DISABLE_AAMIX
++	},
+ 	[ALC292_FIXUP_DISABLE_AAMIX] = {
+ 		.type = HDA_FIXUP_FUNC,
+ 		.v.func = alc_fixup_disable_aamix,
++		.chained = true,
++		.chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE
+ 	},
+ 	[ALC292_FIXUP_DELL_E7X] = {
+ 		.type = HDA_FIXUP_FUNC,
+@@ -5056,6 +5094,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
+ 	SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
++	SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
++	SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
+ 	SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ 	SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ 	SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
+@@ -5069,10 +5109,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
++	SND_PCI_QUIRK(0x1028, 0x062e, "Dell Latitude E7450", ALC292_FIXUP_DELL_E7X),
+ 	SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
+ 	SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+-	SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC292_FIXUP_DELL_E7X),
++	SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
+ 	SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ 	SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+@@ -5156,6 +5197,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ 	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+ 	SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
+ 	SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
++	SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
+ 	SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+ 	SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index bab6c04932aa..0baeecc2213c 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -238,7 +238,9 @@ static int via_pin_power_ctl_get(struct snd_kcontrol *kcontrol,
+ 				 struct snd_ctl_elem_value *ucontrol)
+ {
+ 	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+-	ucontrol->value.enumerated.item[0] = codec->power_save_node;
++	struct via_spec *spec = codec->spec;
++
++	ucontrol->value.enumerated.item[0] = spec->gen.power_down_unused;
+ 	return 0;
+ }
+ 
+@@ -249,9 +251,9 @@ static int via_pin_power_ctl_put(struct snd_kcontrol *kcontrol,
+ 	struct via_spec *spec = codec->spec;
+ 	bool val = !!ucontrol->value.enumerated.item[0];
+ 
+-	if (val == codec->power_save_node)
++	if (val == spec->gen.power_down_unused)
+ 		return 0;
+-	codec->power_save_node = val;
++	/* codec->power_save_node = val; */ /* widget PM seems yet broken */
+ 	spec->gen.power_down_unused = val;
+ 	analog_low_current_mode(codec);
+ 	return 1;
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 95abddcd7839..f76830643086 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -27,7 +27,7 @@ TARGETS_HOTPLUG += memory-hotplug
+ # Makefile to avoid test build failures when test
+ # Makefile doesn't have explicit build rules.
+ ifeq (1,$(MAKELEVEL))
+-undefine LDFLAGS
++override LDFLAGS =
+ override MAKEFLAGS =
+ endif
+ 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-07-19 18:55 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-07-19 18:55 UTC (permalink / raw
  To: gentoo-commits

commit:     01b9c43422fe77d31b59e745af7521cb1daecd99
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sun Jul 19 18:55:33 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sun Jul 19 18:55:33 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=01b9c434

BFQ patchset for kernel 4.1

 0000_README                                        |   12 +
 ...roups-kconfig-build-bits-for-BFQ-v7r8-4.1.patch |  104 +
 ...introduce-the-BFQ-v7r8-I-O-sched-for-4.1.patch1 | 6952 ++++++++++++++++++++
 ...rly-Queue-Merge-EQM-to-BFQ-v7r8-for-4.1.0.patch | 1220 ++++
 4 files changed, 8288 insertions(+)

diff --git a/0000_README b/0000_README
index 43154ce..3b87439 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,18 @@ Patch:  5000_enable-additional-cpu-optimizations-for-gcc.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc < v4.9 optimizations for additional CPUs.
 
+Patch:  5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r8-4.1.patch
+From:   http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc:   BFQ v7r8 patch 1 for 4.1: Build, cgroups and kconfig bits
+
+Patch:  5002_block-introduce-the-BFQ-v7r8-I-O-sched-for-4.1.patch1
+From:   http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc:   BFQ v7r8 patch 2 for 4.1: BFQ Scheduler
+
+Patch:  5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r8-for-4.1.0.patch
+From:   http://algo.ing.unimo.it/people/paolo/disk_sched/
+Desc:   BFQ v7r8 patch 3 for 4.1: Early Queue Merge (EQM)
+
 Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.

diff --git a/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r8-4.1.patch b/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r8-4.1.patch
new file mode 100644
index 0000000..97e1a74
--- /dev/null
+++ b/5001_block-cgroups-kconfig-build-bits-for-BFQ-v7r8-4.1.patch
@@ -0,0 +1,104 @@
+From ba1f3efda7e8670fc74715ba9eed93bc1172672d Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@unimore.it>
+Date: Tue, 7 Apr 2015 13:39:12 +0200
+Subject: [PATCH 1/3] block: cgroups, kconfig, build bits for BFQ-v7r8-4.1
+
+Update Kconfig.iosched and do the related Makefile changes to include
+kernel configuration options for BFQ. Also add the bfqio controller
+to the cgroups subsystem.
+
+Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
+---
+ block/Kconfig.iosched         | 32 ++++++++++++++++++++++++++++++++
+ block/Makefile                |  1 +
+ include/linux/cgroup_subsys.h |  4 ++++
+ 3 files changed, 37 insertions(+)
+
+diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
+index 421bef9..0ee5f0f 100644
+--- a/block/Kconfig.iosched
++++ b/block/Kconfig.iosched
+@@ -39,6 +39,27 @@ config CFQ_GROUP_IOSCHED
+ 	---help---
+ 	  Enable group IO scheduling in CFQ.
+ 
++config IOSCHED_BFQ
++	tristate "BFQ I/O scheduler"
++	default n
++	---help---
++	  The BFQ I/O scheduler tries to distribute bandwidth among
++	  all processes according to their weights.
++	  It aims at distributing the bandwidth as desired, independently of
++	  the disk parameters and with any workload. It also tries to
++	  guarantee low latency to interactive and soft real-time
++	  applications. If compiled built-in (saying Y here), BFQ can
++	  be configured to support hierarchical scheduling.
++
++config CGROUP_BFQIO
++	bool "BFQ hierarchical scheduling support"
++	depends on CGROUPS && IOSCHED_BFQ=y
++	default n
++	---help---
++	  Enable hierarchical scheduling in BFQ, using the cgroups
++	  filesystem interface.  The name of the subsystem will be
++	  bfqio.
++
+ choice
+ 	prompt "Default I/O scheduler"
+ 	default DEFAULT_CFQ
+@@ -52,6 +73,16 @@ choice
+ 	config DEFAULT_CFQ
+ 		bool "CFQ" if IOSCHED_CFQ=y
+ 
++	config DEFAULT_BFQ
++		bool "BFQ" if IOSCHED_BFQ=y
++		help
++		  Selects BFQ as the default I/O scheduler which will be
++		  used by default for all block devices.
++		  The BFQ I/O scheduler aims at distributing the bandwidth
++		  as desired, independently of the disk parameters and with
++		  any workload. It also tries to guarantee low latency to
++		  interactive and soft real-time applications.
++
+ 	config DEFAULT_NOOP
+ 		bool "No-op"
+ 
+@@ -61,6 +92,7 @@ config DEFAULT_IOSCHED
+ 	string
+ 	default "deadline" if DEFAULT_DEADLINE
+ 	default "cfq" if DEFAULT_CFQ
++	default "bfq" if DEFAULT_BFQ
+ 	default "noop" if DEFAULT_NOOP
+ 
+ endmenu
+diff --git a/block/Makefile b/block/Makefile
+index 00ecc97..1ed86d5 100644
+--- a/block/Makefile
++++ b/block/Makefile
+@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING)	+= blk-throttle.o
+ obj-$(CONFIG_IOSCHED_NOOP)	+= noop-iosched.o
+ obj-$(CONFIG_IOSCHED_DEADLINE)	+= deadline-iosched.o
+ obj-$(CONFIG_IOSCHED_CFQ)	+= cfq-iosched.o
++obj-$(CONFIG_IOSCHED_BFQ)	+= bfq-iosched.o
+ 
+ obj-$(CONFIG_BLOCK_COMPAT)	+= compat_ioctl.o
+ obj-$(CONFIG_BLK_CMDLINE_PARSER)	+= cmdline-parser.o
+diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
+index e4a96fb..267d681 100644
+--- a/include/linux/cgroup_subsys.h
++++ b/include/linux/cgroup_subsys.h
+@@ -35,6 +35,10 @@ SUBSYS(freezer)
+ SUBSYS(net_cls)
+ #endif
+ 
++#if IS_ENABLED(CONFIG_CGROUP_BFQIO)
++SUBSYS(bfqio)
++#endif
++
+ #if IS_ENABLED(CONFIG_CGROUP_PERF)
+ SUBSYS(perf_event)
+ #endif
+-- 
+2.1.4
+

diff --git a/5002_block-introduce-the-BFQ-v7r8-I-O-sched-for-4.1.patch1 b/5002_block-introduce-the-BFQ-v7r8-I-O-sched-for-4.1.patch1
new file mode 100644
index 0000000..00878b7
--- /dev/null
+++ b/5002_block-introduce-the-BFQ-v7r8-I-O-sched-for-4.1.patch1
@@ -0,0 +1,6952 @@
+From af6f05b4c3bcc92a3c28be57411b9fcc290fa301 Mon Sep 17 00:00:00 2001
+From: Paolo Valente <paolo.valente@unimore.it>
+Date: Thu, 9 May 2013 19:10:02 +0200
+Subject: [PATCH 2/3] block: introduce the BFQ-v7r8 I/O sched for 4.1
+
+Add the BFQ-v7r8 I/O scheduler to 4.1.
+The general structure is borrowed from CFQ, as much of the code for
+handling I/O contexts. Over time, several useful features have been
+ported from CFQ as well (details in the changelog in README.BFQ). A
+(bfq_)queue is associated to each task doing I/O on a device, and each
+time a scheduling decision has to be made a queue is selected and served
+until it expires.
+
+    - Slices are given in the service domain: tasks are assigned
+      budgets, measured in number of sectors. Once got the disk, a task
+      must however consume its assigned budget within a configurable
+      maximum time (by default, the maximum possible value of the
+      budgets is automatically computed to comply with this timeout).
+      This allows the desired latency vs "throughput boosting" tradeoff
+      to be set.
+
+    - Budgets are scheduled according to a variant of WF2Q+, implemented
+      using an augmented rb-tree to take eligibility into account while
+      preserving an O(log N) overall complexity.
+
+    - A low-latency tunable is provided; if enabled, both interactive
+      and soft real-time applications are guaranteed a very low latency.
+
+    - Latency guarantees are preserved also in the presence of NCQ.
+
+    - Also with flash-based devices, a high throughput is achieved
+      while still preserving latency guarantees.
+
+    - BFQ features Early Queue Merge (EQM), a sort of fusion of the
+      cooperating-queue-merging and the preemption mechanisms present
+      in CFQ. EQM is in fact a unified mechanism that tries to get a
+      sequential read pattern, and hence a high throughput, with any
+      set of processes performing interleaved I/O over a contiguous
+      sequence of sectors.
+
+    - BFQ supports full hierarchical scheduling, exporting a cgroups
+      interface.  Since each node has a full scheduler, each group can
+      be assigned its own weight.
+
+    - If the cgroups interface is not used, only I/O priorities can be
+      assigned to processes, with ioprio values mapped to weights
+      with the relation weight = IOPRIO_BE_NR - ioprio.
+
+    - ioprio classes are served in strict priority order, i.e., lower
+      priority queues are not served as long as there are higher
+      priority queues.  Among queues in the same class the bandwidth is
+      distributed in proportion to the weight of each queue. A very
+      thin extra bandwidth is however guaranteed to the Idle class, to
+      prevent it from starving.
+
+Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
+---
+ block/bfq-cgroup.c  |  936 +++++++++++++
+ block/bfq-ioc.c     |   36 +
+ block/bfq-iosched.c | 3898 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ block/bfq-sched.c   | 1208 ++++++++++++++++
+ block/bfq.h         |  771 ++++++++++
+ 5 files changed, 6849 insertions(+)
+ create mode 100644 block/bfq-cgroup.c
+ create mode 100644 block/bfq-ioc.c
+ create mode 100644 block/bfq-iosched.c
+ create mode 100644 block/bfq-sched.c
+ create mode 100644 block/bfq.h
+
+diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
+new file mode 100644
+index 0000000..11e2f1d
+--- /dev/null
++++ b/block/bfq-cgroup.c
+@@ -0,0 +1,936 @@
++/*
++ * BFQ: CGROUPS support.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ *		      Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++
++static DEFINE_MUTEX(bfqio_mutex);
++
++static bool bfqio_is_removed(struct bfqio_cgroup *bgrp)
++{
++	return bgrp ? !bgrp->online : false;
++}
++
++static struct bfqio_cgroup bfqio_root_cgroup = {
++	.weight = BFQ_DEFAULT_GRP_WEIGHT,
++	.ioprio = BFQ_DEFAULT_GRP_IOPRIO,
++	.ioprio_class = BFQ_DEFAULT_GRP_CLASS,
++};
++
++static inline void bfq_init_entity(struct bfq_entity *entity,
++				   struct bfq_group *bfqg)
++{
++	entity->weight = entity->new_weight;
++	entity->orig_weight = entity->new_weight;
++	entity->ioprio = entity->new_ioprio;
++	entity->ioprio_class = entity->new_ioprio_class;
++	entity->parent = bfqg->my_entity;
++	entity->sched_data = &bfqg->sched_data;
++}
++
++static struct bfqio_cgroup *css_to_bfqio(struct cgroup_subsys_state *css)
++{
++	return css ? container_of(css, struct bfqio_cgroup, css) : NULL;
++}
++
++/*
++ * Search the bfq_group for bfqd into the hash table (by now only a list)
++ * of bgrp.  Must be called under rcu_read_lock().
++ */
++static struct bfq_group *bfqio_lookup_group(struct bfqio_cgroup *bgrp,
++					    struct bfq_data *bfqd)
++{
++	struct bfq_group *bfqg;
++	void *key;
++
++	hlist_for_each_entry_rcu(bfqg, &bgrp->group_data, group_node) {
++		key = rcu_dereference(bfqg->bfqd);
++		if (key == bfqd)
++			return bfqg;
++	}
++
++	return NULL;
++}
++
++static inline void bfq_group_init_entity(struct bfqio_cgroup *bgrp,
++					 struct bfq_group *bfqg)
++{
++	struct bfq_entity *entity = &bfqg->entity;
++
++	/*
++	 * If the weight of the entity has never been set via the sysfs
++	 * interface, then bgrp->weight == 0. In this case we initialize
++	 * the weight from the current ioprio value. Otherwise, the group
++	 * weight, if set, has priority over the ioprio value.
++	 */
++	if (bgrp->weight == 0) {
++		entity->new_weight = bfq_ioprio_to_weight(bgrp->ioprio);
++		entity->new_ioprio = bgrp->ioprio;
++	} else {
++		if (bgrp->weight < BFQ_MIN_WEIGHT ||
++		    bgrp->weight > BFQ_MAX_WEIGHT) {
++			printk(KERN_CRIT "bfq_group_init_entity: "
++					 "bgrp->weight %d\n", bgrp->weight);
++			BUG();
++		}
++		entity->new_weight = bgrp->weight;
++		entity->new_ioprio = bfq_weight_to_ioprio(bgrp->weight);
++	}
++	entity->orig_weight = entity->weight = entity->new_weight;
++	entity->ioprio = entity->new_ioprio;
++	entity->ioprio_class = entity->new_ioprio_class = bgrp->ioprio_class;
++	entity->my_sched_data = &bfqg->sched_data;
++	bfqg->active_entities = 0;
++}
++
++static inline void bfq_group_set_parent(struct bfq_group *bfqg,
++					struct bfq_group *parent)
++{
++	struct bfq_entity *entity;
++
++	BUG_ON(parent == NULL);
++	BUG_ON(bfqg == NULL);
++
++	entity = &bfqg->entity;
++	entity->parent = parent->my_entity;
++	entity->sched_data = &parent->sched_data;
++}
++
++/**
++ * bfq_group_chain_alloc - allocate a chain of groups.
++ * @bfqd: queue descriptor.
++ * @css: the leaf cgroup_subsys_state this chain starts from.
++ *
++ * Allocate a chain of groups starting from the one belonging to
++ * @cgroup up to the root cgroup.  Stop if a cgroup on the chain
++ * to the root has already an allocated group on @bfqd.
++ */
++static struct bfq_group *bfq_group_chain_alloc(struct bfq_data *bfqd,
++					       struct cgroup_subsys_state *css)
++{
++	struct bfqio_cgroup *bgrp;
++	struct bfq_group *bfqg, *prev = NULL, *leaf = NULL;
++
++	for (; css != NULL; css = css->parent) {
++		bgrp = css_to_bfqio(css);
++
++		bfqg = bfqio_lookup_group(bgrp, bfqd);
++		if (bfqg != NULL) {
++			/*
++			 * All the cgroups in the path from there to the
++			 * root must have a bfq_group for bfqd, so we don't
++			 * need any more allocations.
++			 */
++			break;
++		}
++
++		bfqg = kzalloc(sizeof(*bfqg), GFP_ATOMIC);
++		if (bfqg == NULL)
++			goto cleanup;
++
++		bfq_group_init_entity(bgrp, bfqg);
++		bfqg->my_entity = &bfqg->entity;
++
++		if (leaf == NULL) {
++			leaf = bfqg;
++			prev = leaf;
++		} else {
++			bfq_group_set_parent(prev, bfqg);
++			/*
++			 * Build a list of allocated nodes using the bfqd
++			 * filed, that is still unused and will be
++			 * initialized only after the node will be
++			 * connected.
++			 */
++			prev->bfqd = bfqg;
++			prev = bfqg;
++		}
++	}
++
++	return leaf;
++
++cleanup:
++	while (leaf != NULL) {
++		prev = leaf;
++		leaf = leaf->bfqd;
++		kfree(prev);
++	}
++
++	return NULL;
++}
++
++/**
++ * bfq_group_chain_link - link an allocated group chain to a cgroup
++ *                        hierarchy.
++ * @bfqd: the queue descriptor.
++ * @css: the leaf cgroup_subsys_state to start from.
++ * @leaf: the leaf group (to be associated to @cgroup).
++ *
++ * Try to link a chain of groups to a cgroup hierarchy, connecting the
++ * nodes bottom-up, so we can be sure that when we find a cgroup in the
++ * hierarchy that already as a group associated to @bfqd all the nodes
++ * in the path to the root cgroup have one too.
++ *
++ * On locking: the queue lock protects the hierarchy (there is a hierarchy
++ * per device) while the bfqio_cgroup lock protects the list of groups
++ * belonging to the same cgroup.
++ */
++static void bfq_group_chain_link(struct bfq_data *bfqd,
++				 struct cgroup_subsys_state *css,
++				 struct bfq_group *leaf)
++{
++	struct bfqio_cgroup *bgrp;
++	struct bfq_group *bfqg, *next, *prev = NULL;
++	unsigned long flags;
++
++	assert_spin_locked(bfqd->queue->queue_lock);
++
++	for (; css != NULL && leaf != NULL; css = css->parent) {
++		bgrp = css_to_bfqio(css);
++		next = leaf->bfqd;
++
++		bfqg = bfqio_lookup_group(bgrp, bfqd);
++		BUG_ON(bfqg != NULL);
++
++		spin_lock_irqsave(&bgrp->lock, flags);
++
++		rcu_assign_pointer(leaf->bfqd, bfqd);
++		hlist_add_head_rcu(&leaf->group_node, &bgrp->group_data);
++		hlist_add_head(&leaf->bfqd_node, &bfqd->group_list);
++
++		spin_unlock_irqrestore(&bgrp->lock, flags);
++
++		prev = leaf;
++		leaf = next;
++	}
++
++	BUG_ON(css == NULL && leaf != NULL);
++	if (css != NULL && prev != NULL) {
++		bgrp = css_to_bfqio(css);
++		bfqg = bfqio_lookup_group(bgrp, bfqd);
++		bfq_group_set_parent(prev, bfqg);
++	}
++}
++
++/**
++ * bfq_find_alloc_group - return the group associated to @bfqd in @cgroup.
++ * @bfqd: queue descriptor.
++ * @cgroup: cgroup being searched for.
++ *
++ * Return a group associated to @bfqd in @cgroup, allocating one if
++ * necessary.  When a group is returned all the cgroups in the path
++ * to the root have a group associated to @bfqd.
++ *
++ * If the allocation fails, return the root group: this breaks guarantees
++ * but is a safe fallback.  If this loss becomes a problem it can be
++ * mitigated using the equivalent weight (given by the product of the
++ * weights of the groups in the path from @group to the root) in the
++ * root scheduler.
++ *
++ * We allocate all the missing nodes in the path from the leaf cgroup
++ * to the root and we connect the nodes only after all the allocations
++ * have been successful.
++ */
++static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
++					      struct cgroup_subsys_state *css)
++{
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++	struct bfq_group *bfqg;
++
++	bfqg = bfqio_lookup_group(bgrp, bfqd);
++	if (bfqg != NULL)
++		return bfqg;
++
++	bfqg = bfq_group_chain_alloc(bfqd, css);
++	if (bfqg != NULL)
++		bfq_group_chain_link(bfqd, css, bfqg);
++	else
++		bfqg = bfqd->root_group;
++
++	return bfqg;
++}
++
++/**
++ * bfq_bfqq_move - migrate @bfqq to @bfqg.
++ * @bfqd: queue descriptor.
++ * @bfqq: the queue to move.
++ * @entity: @bfqq's entity.
++ * @bfqg: the group to move to.
++ *
++ * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
++ * it on the new one.  Avoid putting the entity on the old group idle tree.
++ *
++ * Must be called under the queue lock; the cgroup owning @bfqg must
++ * not disappear (by now this just means that we are called under
++ * rcu_read_lock()).
++ */
++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++			  struct bfq_entity *entity, struct bfq_group *bfqg)
++{
++	int busy, resume;
++
++	busy = bfq_bfqq_busy(bfqq);
++	resume = !RB_EMPTY_ROOT(&bfqq->sort_list);
++
++	BUG_ON(resume && !entity->on_st);
++	BUG_ON(busy && !resume && entity->on_st &&
++	       bfqq != bfqd->in_service_queue);
++
++	if (busy) {
++		BUG_ON(atomic_read(&bfqq->ref) < 2);
++
++		if (!resume)
++			bfq_del_bfqq_busy(bfqd, bfqq, 0);
++		else
++			bfq_deactivate_bfqq(bfqd, bfqq, 0);
++	} else if (entity->on_st)
++		bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
++
++	/*
++	 * Here we use a reference to bfqg.  We don't need a refcounter
++	 * as the cgroup reference will not be dropped, so that its
++	 * destroy() callback will not be invoked.
++	 */
++	entity->parent = bfqg->my_entity;
++	entity->sched_data = &bfqg->sched_data;
++
++	if (busy && resume)
++		bfq_activate_bfqq(bfqd, bfqq);
++
++	if (bfqd->in_service_queue == NULL && !bfqd->rq_in_driver)
++		bfq_schedule_dispatch(bfqd);
++}
++
++/**
++ * __bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bfqd: the queue descriptor.
++ * @bic: the bic to move.
++ * @cgroup: the cgroup to move to.
++ *
++ * Move bic to cgroup, assuming that bfqd->queue is locked; the caller
++ * has to make sure that the reference to cgroup is valid across the call.
++ *
++ * NOTE: an alternative approach might have been to store the current
++ * cgroup in bfqq and getting a reference to it, reducing the lookup
++ * time here, at the price of slightly more complex code.
++ */
++static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
++						struct bfq_io_cq *bic,
++						struct cgroup_subsys_state *css)
++{
++	struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
++	struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
++	struct bfq_entity *entity;
++	struct bfq_group *bfqg;
++	struct bfqio_cgroup *bgrp;
++
++	bgrp = css_to_bfqio(css);
++
++	bfqg = bfq_find_alloc_group(bfqd, css);
++	if (async_bfqq != NULL) {
++		entity = &async_bfqq->entity;
++
++		if (entity->sched_data != &bfqg->sched_data) {
++			bic_set_bfqq(bic, NULL, 0);
++			bfq_log_bfqq(bfqd, async_bfqq,
++				     "bic_change_group: %p %d",
++				     async_bfqq, atomic_read(&async_bfqq->ref));
++			bfq_put_queue(async_bfqq);
++		}
++	}
++
++	if (sync_bfqq != NULL) {
++		entity = &sync_bfqq->entity;
++		if (entity->sched_data != &bfqg->sched_data)
++			bfq_bfqq_move(bfqd, sync_bfqq, entity, bfqg);
++	}
++
++	return bfqg;
++}
++
++/**
++ * bfq_bic_change_cgroup - move @bic to @cgroup.
++ * @bic: the bic being migrated.
++ * @cgroup: the destination cgroup.
++ *
++ * When the task owning @bic is moved to @cgroup, @bic is immediately
++ * moved into its new parent group.
++ */
++static void bfq_bic_change_cgroup(struct bfq_io_cq *bic,
++				  struct cgroup_subsys_state *css)
++{
++	struct bfq_data *bfqd;
++	unsigned long uninitialized_var(flags);
++
++	bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
++				   &flags);
++	if (bfqd != NULL) {
++		__bfq_bic_change_cgroup(bfqd, bic, css);
++		bfq_put_bfqd_unlock(bfqd, &flags);
++	}
++}
++
++/**
++ * bfq_bic_update_cgroup - update the cgroup of @bic.
++ * @bic: the @bic to update.
++ *
++ * Make sure that @bic is enqueued in the cgroup of the current task.
++ * We need this in addition to moving bics during the cgroup attach
++ * phase because the task owning @bic could be at its first disk
++ * access or we may end up in the root cgroup as the result of a
++ * memory allocation failure and here we try to move to the right
++ * group.
++ *
++ * Must be called under the queue lock.  It is safe to use the returned
++ * value even after the rcu_read_unlock() as the migration/destruction
++ * paths act under the queue lock too.  IOW it is impossible to race with
++ * group migration/destruction and end up with an invalid group as:
++ *   a) here cgroup has not yet been destroyed, nor its destroy callback
++ *      has started execution, as current holds a reference to it,
++ *   b) if it is destroyed after rcu_read_unlock() [after current is
++ *      migrated to a different cgroup] its attach() callback will have
++ *      taken care of remove all the references to the old cgroup data.
++ */
++static struct bfq_group *bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++	struct bfq_data *bfqd = bic_to_bfqd(bic);
++	struct bfq_group *bfqg;
++	struct cgroup_subsys_state *css;
++
++	BUG_ON(bfqd == NULL);
++
++	rcu_read_lock();
++	css = task_css(current, bfqio_cgrp_id);
++	bfqg = __bfq_bic_change_cgroup(bfqd, bic, css);
++	rcu_read_unlock();
++
++	return bfqg;
++}
++
++/**
++ * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
++ * @st: the service tree being flushed.
++ */
++static inline void bfq_flush_idle_tree(struct bfq_service_tree *st)
++{
++	struct bfq_entity *entity = st->first_idle;
++
++	for (; entity != NULL; entity = st->first_idle)
++		__bfq_deactivate_entity(entity, 0);
++}
++
++/**
++ * bfq_reparent_leaf_entity - move leaf entity to the root_group.
++ * @bfqd: the device data structure with the root group.
++ * @entity: the entity to move.
++ */
++static inline void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
++					    struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++	BUG_ON(bfqq == NULL);
++	bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
++	return;
++}
++
++/**
++ * bfq_reparent_active_entities - move to the root group all active
++ *                                entities.
++ * @bfqd: the device data structure with the root group.
++ * @bfqg: the group to move from.
++ * @st: the service tree with the entities.
++ *
++ * Needs queue_lock to be taken and reference to be valid over the call.
++ */
++static inline void bfq_reparent_active_entities(struct bfq_data *bfqd,
++						struct bfq_group *bfqg,
++						struct bfq_service_tree *st)
++{
++	struct rb_root *active = &st->active;
++	struct bfq_entity *entity = NULL;
++
++	if (!RB_EMPTY_ROOT(&st->active))
++		entity = bfq_entity_of(rb_first(active));
++
++	for (; entity != NULL; entity = bfq_entity_of(rb_first(active)))
++		bfq_reparent_leaf_entity(bfqd, entity);
++
++	if (bfqg->sched_data.in_service_entity != NULL)
++		bfq_reparent_leaf_entity(bfqd,
++			bfqg->sched_data.in_service_entity);
++
++	return;
++}
++
++/**
++ * bfq_destroy_group - destroy @bfqg.
++ * @bgrp: the bfqio_cgroup containing @bfqg.
++ * @bfqg: the group being destroyed.
++ *
++ * Destroy @bfqg, making sure that it is not referenced from its parent.
++ */
++static void bfq_destroy_group(struct bfqio_cgroup *bgrp, struct bfq_group *bfqg)
++{
++	struct bfq_data *bfqd;
++	struct bfq_service_tree *st;
++	struct bfq_entity *entity = bfqg->my_entity;
++	unsigned long uninitialized_var(flags);
++	int i;
++
++	hlist_del(&bfqg->group_node);
++
++	/*
++	 * Empty all service_trees belonging to this group before
++	 * deactivating the group itself.
++	 */
++	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
++		st = bfqg->sched_data.service_tree + i;
++
++		/*
++		 * The idle tree may still contain bfq_queues belonging
++		 * to exited task because they never migrated to a different
++		 * cgroup from the one being destroyed now.  No one else
++		 * can access them so it's safe to act without any lock.
++		 */
++		bfq_flush_idle_tree(st);
++
++		/*
++		 * It may happen that some queues are still active
++		 * (busy) upon group destruction (if the corresponding
++		 * processes have been forced to terminate). We move
++		 * all the leaf entities corresponding to these queues
++		 * to the root_group.
++		 * Also, it may happen that the group has an entity
++		 * in service, which is disconnected from the active
++		 * tree: it must be moved, too.
++		 * There is no need to put the sync queues, as the
++		 * scheduler has taken no reference.
++		 */
++		bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++		if (bfqd != NULL) {
++			bfq_reparent_active_entities(bfqd, bfqg, st);
++			bfq_put_bfqd_unlock(bfqd, &flags);
++		}
++		BUG_ON(!RB_EMPTY_ROOT(&st->active));
++		BUG_ON(!RB_EMPTY_ROOT(&st->idle));
++	}
++	BUG_ON(bfqg->sched_data.next_in_service != NULL);
++	BUG_ON(bfqg->sched_data.in_service_entity != NULL);
++
++	/*
++	 * We may race with device destruction, take extra care when
++	 * dereferencing bfqg->bfqd.
++	 */
++	bfqd = bfq_get_bfqd_locked(&bfqg->bfqd, &flags);
++	if (bfqd != NULL) {
++		hlist_del(&bfqg->bfqd_node);
++		__bfq_deactivate_entity(entity, 0);
++		bfq_put_async_queues(bfqd, bfqg);
++		bfq_put_bfqd_unlock(bfqd, &flags);
++	}
++	BUG_ON(entity->tree != NULL);
++
++	/*
++	 * No need to defer the kfree() to the end of the RCU grace
++	 * period: we are called from the destroy() callback of our
++	 * cgroup, so we can be sure that no one is a) still using
++	 * this cgroup or b) doing lookups in it.
++	 */
++	kfree(bfqg);
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++	struct hlist_node *tmp;
++	struct bfq_group *bfqg;
++
++	hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node)
++		bfq_end_wr_async_queues(bfqd, bfqg);
++	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++/**
++ * bfq_disconnect_groups - disconnect @bfqd from all its groups.
++ * @bfqd: the device descriptor being exited.
++ *
++ * When the device exits we just make sure that no lookup can return
++ * the now unused group structures.  They will be deallocated on cgroup
++ * destruction.
++ */
++static void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++	struct hlist_node *tmp;
++	struct bfq_group *bfqg;
++
++	bfq_log(bfqd, "disconnect_groups beginning");
++	hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) {
++		hlist_del(&bfqg->bfqd_node);
++
++		__bfq_deactivate_entity(bfqg->my_entity, 0);
++
++		/*
++		 * Don't remove from the group hash, just set an
++		 * invalid key.  No lookups can race with the
++		 * assignment as bfqd is being destroyed; this
++		 * implies also that new elements cannot be added
++		 * to the list.
++		 */
++		rcu_assign_pointer(bfqg->bfqd, NULL);
++
++		bfq_log(bfqd, "disconnect_groups: put async for group %p",
++			bfqg);
++		bfq_put_async_queues(bfqd, bfqg);
++	}
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++	struct bfqio_cgroup *bgrp = &bfqio_root_cgroup;
++	struct bfq_group *bfqg = bfqd->root_group;
++
++	bfq_put_async_queues(bfqd, bfqg);
++
++	spin_lock_irq(&bgrp->lock);
++	hlist_del_rcu(&bfqg->group_node);
++	spin_unlock_irq(&bgrp->lock);
++
++	/*
++	 * No need to synchronize_rcu() here: since the device is gone
++	 * there cannot be any read-side access to its root_group.
++	 */
++	kfree(bfqg);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++	struct bfq_group *bfqg;
++	struct bfqio_cgroup *bgrp;
++	int i;
++
++	bfqg = kzalloc_node(sizeof(*bfqg), GFP_KERNEL, node);
++	if (bfqg == NULL)
++		return NULL;
++
++	bfqg->entity.parent = NULL;
++	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++	bgrp = &bfqio_root_cgroup;
++	spin_lock_irq(&bgrp->lock);
++	rcu_assign_pointer(bfqg->bfqd, bfqd);
++	hlist_add_head_rcu(&bfqg->group_node, &bgrp->group_data);
++	spin_unlock_irq(&bgrp->lock);
++
++	return bfqg;
++}
++
++#define SHOW_FUNCTION(__VAR)						\
++static u64 bfqio_cgroup_##__VAR##_read(struct cgroup_subsys_state *css, \
++				       struct cftype *cftype)		\
++{									\
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);			\
++	u64 ret = -ENODEV;						\
++									\
++	mutex_lock(&bfqio_mutex);					\
++	if (bfqio_is_removed(bgrp))					\
++		goto out_unlock;					\
++									\
++	spin_lock_irq(&bgrp->lock);					\
++	ret = bgrp->__VAR;						\
++	spin_unlock_irq(&bgrp->lock);					\
++									\
++out_unlock:								\
++	mutex_unlock(&bfqio_mutex);					\
++	return ret;							\
++}
++
++SHOW_FUNCTION(weight);
++SHOW_FUNCTION(ioprio);
++SHOW_FUNCTION(ioprio_class);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__VAR, __MIN, __MAX)				\
++static int bfqio_cgroup_##__VAR##_write(struct cgroup_subsys_state *css,\
++					struct cftype *cftype,		\
++					u64 val)			\
++{									\
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);			\
++	struct bfq_group *bfqg;						\
++	int ret = -EINVAL;						\
++									\
++	if (val < (__MIN) || val > (__MAX))				\
++		return ret;						\
++									\
++	ret = -ENODEV;							\
++	mutex_lock(&bfqio_mutex);					\
++	if (bfqio_is_removed(bgrp))					\
++		goto out_unlock;					\
++	ret = 0;							\
++									\
++	spin_lock_irq(&bgrp->lock);					\
++	bgrp->__VAR = (unsigned short)val;				\
++	hlist_for_each_entry(bfqg, &bgrp->group_data, group_node) {	\
++		/*							\
++		 * Setting the ioprio_changed flag of the entity        \
++		 * to 1 with new_##__VAR == ##__VAR would re-set        \
++		 * the value of the weight to its ioprio mapping.       \
++		 * Set the flag only if necessary.			\
++		 */							\
++		if ((unsigned short)val != bfqg->entity.new_##__VAR) {  \
++			bfqg->entity.new_##__VAR = (unsigned short)val; \
++			/*						\
++			 * Make sure that the above new value has been	\
++			 * stored in bfqg->entity.new_##__VAR before	\
++			 * setting the ioprio_changed flag. In fact,	\
++			 * this flag may be read asynchronously (in	\
++			 * critical sections protected by a different	\
++			 * lock than that held here), and finding this	\
++			 * flag set may cause the execution of the code	\
++			 * for updating parameters whose value may	\
++			 * depend also on bfqg->entity.new_##__VAR (in	\
++			 * __bfq_entity_update_weight_prio).		\
++			 * This barrier makes sure that the new value	\
++			 * of bfqg->entity.new_##__VAR is correctly	\
++			 * seen in that code.				\
++			 */						\
++			smp_wmb();                                      \
++			bfqg->entity.ioprio_changed = 1;                \
++		}							\
++	}								\
++	spin_unlock_irq(&bgrp->lock);					\
++									\
++out_unlock:								\
++	mutex_unlock(&bfqio_mutex);					\
++	return ret;							\
++}
++
++STORE_FUNCTION(weight, BFQ_MIN_WEIGHT, BFQ_MAX_WEIGHT);
++STORE_FUNCTION(ioprio, 0, IOPRIO_BE_NR - 1);
++STORE_FUNCTION(ioprio_class, IOPRIO_CLASS_RT, IOPRIO_CLASS_IDLE);
++#undef STORE_FUNCTION
++
++static struct cftype bfqio_files[] = {
++	{
++		.name = "weight",
++		.read_u64 = bfqio_cgroup_weight_read,
++		.write_u64 = bfqio_cgroup_weight_write,
++	},
++	{
++		.name = "ioprio",
++		.read_u64 = bfqio_cgroup_ioprio_read,
++		.write_u64 = bfqio_cgroup_ioprio_write,
++	},
++	{
++		.name = "ioprio_class",
++		.read_u64 = bfqio_cgroup_ioprio_class_read,
++		.write_u64 = bfqio_cgroup_ioprio_class_write,
++	},
++	{ },	/* terminate */
++};
++
++static struct cgroup_subsys_state *bfqio_create(struct cgroup_subsys_state
++						*parent_css)
++{
++	struct bfqio_cgroup *bgrp;
++
++	if (parent_css != NULL) {
++		bgrp = kzalloc(sizeof(*bgrp), GFP_KERNEL);
++		if (bgrp == NULL)
++			return ERR_PTR(-ENOMEM);
++	} else
++		bgrp = &bfqio_root_cgroup;
++
++	spin_lock_init(&bgrp->lock);
++	INIT_HLIST_HEAD(&bgrp->group_data);
++	bgrp->ioprio = BFQ_DEFAULT_GRP_IOPRIO;
++	bgrp->ioprio_class = BFQ_DEFAULT_GRP_CLASS;
++
++	return &bgrp->css;
++}
++
++/*
++ * We cannot support shared io contexts, as we have no means to support
++ * two tasks with the same ioc in two different groups without major rework
++ * of the main bic/bfqq data structures.  By now we allow a task to change
++ * its cgroup only if it's the only owner of its ioc; the drawback of this
++ * behavior is that a group containing a task that forked using CLONE_IO
++ * will not be destroyed until the tasks sharing the ioc die.
++ */
++static int bfqio_can_attach(struct cgroup_subsys_state *css,
++			    struct cgroup_taskset *tset)
++{
++	struct task_struct *task;
++	struct io_context *ioc;
++	int ret = 0;
++
++	cgroup_taskset_for_each(task, tset) {
++		/*
++		 * task_lock() is needed to avoid races with
++		 * exit_io_context()
++		 */
++		task_lock(task);
++		ioc = task->io_context;
++		if (ioc != NULL && atomic_read(&ioc->nr_tasks) > 1)
++			/*
++			 * ioc == NULL means that the task is either too
++			 * young or exiting: if it has still no ioc the
++			 * ioc can't be shared, if the task is exiting the
++			 * attach will fail anyway, no matter what we
++			 * return here.
++			 */
++			ret = -EINVAL;
++		task_unlock(task);
++		if (ret)
++			break;
++	}
++
++	return ret;
++}
++
++static void bfqio_attach(struct cgroup_subsys_state *css,
++			 struct cgroup_taskset *tset)
++{
++	struct task_struct *task;
++	struct io_context *ioc;
++	struct io_cq *icq;
++
++	/*
++	 * IMPORTANT NOTE: The move of more than one process at a time to a
++	 * new group has not yet been tested.
++	 */
++	cgroup_taskset_for_each(task, tset) {
++		ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
++		if (ioc) {
++			/*
++			 * Handle cgroup change here.
++			 */
++			rcu_read_lock();
++			hlist_for_each_entry_rcu(icq, &ioc->icq_list, ioc_node)
++				if (!strncmp(
++					icq->q->elevator->type->elevator_name,
++					"bfq", ELV_NAME_MAX))
++					bfq_bic_change_cgroup(icq_to_bic(icq),
++							      css);
++			rcu_read_unlock();
++			put_io_context(ioc);
++		}
++	}
++}
++
++static void bfqio_destroy(struct cgroup_subsys_state *css)
++{
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++	struct hlist_node *tmp;
++	struct bfq_group *bfqg;
++
++	/*
++	 * Since we are destroying the cgroup, there are no more tasks
++	 * referencing it, and all the RCU grace periods that may have
++	 * referenced it are ended (as the destruction of the parent
++	 * cgroup is RCU-safe); bgrp->group_data will not be accessed by
++	 * anything else and we don't need any synchronization.
++	 */
++	hlist_for_each_entry_safe(bfqg, tmp, &bgrp->group_data, group_node)
++		bfq_destroy_group(bgrp, bfqg);
++
++	BUG_ON(!hlist_empty(&bgrp->group_data));
++
++	kfree(bgrp);
++}
++
++static int bfqio_css_online(struct cgroup_subsys_state *css)
++{
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++	mutex_lock(&bfqio_mutex);
++	bgrp->online = true;
++	mutex_unlock(&bfqio_mutex);
++
++	return 0;
++}
++
++static void bfqio_css_offline(struct cgroup_subsys_state *css)
++{
++	struct bfqio_cgroup *bgrp = css_to_bfqio(css);
++
++	mutex_lock(&bfqio_mutex);
++	bgrp->online = false;
++	mutex_unlock(&bfqio_mutex);
++}
++
++struct cgroup_subsys bfqio_cgrp_subsys = {
++	.css_alloc = bfqio_create,
++	.css_online = bfqio_css_online,
++	.css_offline = bfqio_css_offline,
++	.can_attach = bfqio_can_attach,
++	.attach = bfqio_attach,
++	.css_free = bfqio_destroy,
++	.legacy_cftypes = bfqio_files,
++};
++#else
++static inline void bfq_init_entity(struct bfq_entity *entity,
++				   struct bfq_group *bfqg)
++{
++	entity->weight = entity->new_weight;
++	entity->orig_weight = entity->new_weight;
++	entity->ioprio = entity->new_ioprio;
++	entity->ioprio_class = entity->new_ioprio_class;
++	entity->sched_data = &bfqg->sched_data;
++}
++
++static inline struct bfq_group *
++bfq_bic_update_cgroup(struct bfq_io_cq *bic)
++{
++	struct bfq_data *bfqd = bic_to_bfqd(bic);
++	return bfqd->root_group;
++}
++
++static inline void bfq_bfqq_move(struct bfq_data *bfqd,
++				 struct bfq_queue *bfqq,
++				 struct bfq_entity *entity,
++				 struct bfq_group *bfqg)
++{
++}
++
++static void bfq_end_wr_async(struct bfq_data *bfqd)
++{
++	bfq_end_wr_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_disconnect_groups(struct bfq_data *bfqd)
++{
++	bfq_put_async_queues(bfqd, bfqd->root_group);
++}
++
++static inline void bfq_free_root_group(struct bfq_data *bfqd)
++{
++	kfree(bfqd->root_group);
++}
++
++static struct bfq_group *bfq_alloc_root_group(struct bfq_data *bfqd, int node)
++{
++	struct bfq_group *bfqg;
++	int i;
++
++	bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
++	if (bfqg == NULL)
++		return NULL;
++
++	for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
++		bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
++
++	return bfqg;
++}
++#endif
+diff --git a/block/bfq-ioc.c b/block/bfq-ioc.c
+new file mode 100644
+index 0000000..7f6b000
+--- /dev/null
++++ b/block/bfq-ioc.c
+@@ -0,0 +1,36 @@
++/*
++ * BFQ: I/O context handling.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ *		      Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++/**
++ * icq_to_bic - convert iocontext queue structure to bfq_io_cq.
++ * @icq: the iocontext queue.
++ */
++static inline struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
++{
++	/* bic->icq is the first member, %NULL will convert to %NULL */
++	return container_of(icq, struct bfq_io_cq, icq);
++}
++
++/**
++ * bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
++ * @bfqd: the lookup key.
++ * @ioc: the io_context of the process doing I/O.
++ *
++ * Queue lock must be held.
++ */
++static inline struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
++					       struct io_context *ioc)
++{
++	if (ioc)
++		return icq_to_bic(ioc_lookup_icq(ioc, bfqd->queue));
++	return NULL;
++}
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+new file mode 100644
+index 0000000..773b2ee
+--- /dev/null
++++ b/block/bfq-iosched.c
+@@ -0,0 +1,3898 @@
++/*
++ * Budget Fair Queueing (BFQ) disk scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ *		      Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ
++ * file.
++ *
++ * BFQ is a proportional-share storage-I/O scheduling algorithm based on
++ * the slice-by-slice service scheme of CFQ. But BFQ assigns budgets,
++ * measured in number of sectors, to processes instead of time slices. The
++ * device is not granted to the in-service process for a given time slice,
++ * but until it has exhausted its assigned budget. This change from the time
++ * to the service domain allows BFQ to distribute the device throughput
++ * among processes as desired, without any distortion due to ZBR, workload
++ * fluctuations or other factors. BFQ uses an ad hoc internal scheduler,
++ * called B-WF2Q+, to schedule processes according to their budgets. More
++ * precisely, BFQ schedules queues associated to processes. Thanks to the
++ * accurate policy of B-WF2Q+, BFQ can afford to assign high budgets to
++ * I/O-bound processes issuing sequential requests (to boost the
++ * throughput), and yet guarantee a low latency to interactive and soft
++ * real-time applications.
++ *
++ * BFQ is described in [1], where also a reference to the initial, more
++ * theoretical paper on BFQ can be found. The interested reader can find
++ * in the latter paper full details on the main algorithm, as well as
++ * formulas of the guarantees and formal proofs of all the properties.
++ * With respect to the version of BFQ presented in these papers, this
++ * implementation adds a few more heuristics, such as the one that
++ * guarantees a low latency to soft real-time applications, and a
++ * hierarchical extension based on H-WF2Q+.
++ *
++ * B-WF2Q+ is based on WF2Q+, that is described in [2], together with
++ * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N)
++ * complexity derives from the one introduced with EEVDF in [3].
++ *
++ * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness
++ *     with the BFQ Disk I/O Scheduler'',
++ *     Proceedings of the 5th Annual International Systems and Storage
++ *     Conference (SYSTOR '12), June 2012.
++ *
++ * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf
++ *
++ * [2] Jon C.R. Bennett and H. Zhang, ``Hierarchical Packet Fair Queueing
++ *     Algorithms,'' IEEE/ACM Transactions on Networking, 5(5):675-689,
++ *     Oct 1997.
++ *
++ * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz
++ *
++ * [3] I. Stoica and H. Abdel-Wahab, ``Earliest Eligible Virtual Deadline
++ *     First: A Flexible and Accurate Mechanism for Proportional Share
++ *     Resource Allocation,'' technical report.
++ *
++ * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf
++ */
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/cgroup.h>
++#include <linux/elevator.h>
++#include <linux/jiffies.h>
++#include <linux/rbtree.h>
++#include <linux/ioprio.h>
++#include "bfq.h"
++#include "blk.h"
++
++/* Expiration time of sync (0) and async (1) requests, in jiffies. */
++static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
++
++/* Maximum backwards seek, in KiB. */
++static const int bfq_back_max = 16 * 1024;
++
++/* Penalty of a backwards seek, in number of sectors. */
++static const int bfq_back_penalty = 2;
++
++/* Idling period duration, in jiffies. */
++static int bfq_slice_idle = HZ / 125;
++
++/* Default maximum budget values, in sectors and number of requests. */
++static const int bfq_default_max_budget = 16 * 1024;
++static const int bfq_max_budget_async_rq = 4;
++
++/*
++ * Async to sync throughput distribution is controlled as follows:
++ * when an async request is served, the entity is charged the number
++ * of sectors of the request, multiplied by the factor below
++ */
++static const int bfq_async_charge_factor = 10;
++
++/* Default timeout values, in jiffies, approximating CFQ defaults. */
++static const int bfq_timeout_sync = HZ / 8;
++static int bfq_timeout_async = HZ / 25;
++
++struct kmem_cache *bfq_pool;
++
++/* Below this threshold (in ms), we consider thinktime immediate. */
++#define BFQ_MIN_TT		2
++
++/* hw_tag detection: parallel requests threshold and min samples needed. */
++#define BFQ_HW_QUEUE_THRESHOLD	4
++#define BFQ_HW_QUEUE_SAMPLES	32
++
++#define BFQQ_SEEK_THR	 (sector_t)(8 * 1024)
++#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR)
++
++/* Min samples used for peak rate estimation (for autotuning). */
++#define BFQ_PEAK_RATE_SAMPLES	32
++
++/* Shift used for peak rate fixed precision calculations. */
++#define BFQ_RATE_SHIFT		16
++
++/*
++ * By default, BFQ computes the duration of the weight raising for
++ * interactive applications automatically, using the following formula:
++ * duration = (R / r) * T, where r is the peak rate of the device, and
++ * R and T are two reference parameters.
++ * In particular, R is the peak rate of the reference device (see below),
++ * and T is a reference time: given the systems that are likely to be
++ * installed on the reference device according to its speed class, T is
++ * about the maximum time needed, under BFQ and while reading two files in
++ * parallel, to load typical large applications on these systems.
++ * In practice, the slower/faster the device at hand is, the more/less it
++ * takes to load applications with respect to the reference device.
++ * Accordingly, the longer/shorter BFQ grants weight raising to interactive
++ * applications.
++ *
++ * BFQ uses four different reference pairs (R, T), depending on:
++ * . whether the device is rotational or non-rotational;
++ * . whether the device is slow, such as old or portable HDDs, as well as
++ *   SD cards, or fast, such as newer HDDs and SSDs.
++ *
++ * The device's speed class is dynamically (re)detected in
++ * bfq_update_peak_rate() every time the estimated peak rate is updated.
++ *
++ * In the following definitions, R_slow[0]/R_fast[0] and T_slow[0]/T_fast[0]
++ * are the reference values for a slow/fast rotational device, whereas
++ * R_slow[1]/R_fast[1] and T_slow[1]/T_fast[1] are the reference values for
++ * a slow/fast non-rotational device. Finally, device_speed_thresh are the
++ * thresholds used to switch between speed classes.
++ * Both the reference peak rates and the thresholds are measured in
++ * sectors/usec, left-shifted by BFQ_RATE_SHIFT.
++ */
++static int R_slow[2] = {1536, 10752};
++static int R_fast[2] = {17415, 34791};
++/*
++ * To improve readability, a conversion function is used to initialize the
++ * following arrays, which entails that they can be initialized only in a
++ * function.
++ */
++static int T_slow[2];
++static int T_fast[2];
++static int device_speed_thresh[2];
++
++#define BFQ_SERVICE_TREE_INIT	((struct bfq_service_tree)		\
++				{ RB_ROOT, RB_ROOT, NULL, NULL, 0, 0 })
++
++#define RQ_BIC(rq)		((struct bfq_io_cq *) (rq)->elv.priv[0])
++#define RQ_BFQQ(rq)		((rq)->elv.priv[1])
++
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd);
++
++#include "bfq-ioc.c"
++#include "bfq-sched.c"
++#include "bfq-cgroup.c"
++
++#define bfq_class_idle(bfqq)	((bfqq)->entity.ioprio_class ==\
++				 IOPRIO_CLASS_IDLE)
++#define bfq_class_rt(bfqq)	((bfqq)->entity.ioprio_class ==\
++				 IOPRIO_CLASS_RT)
++
++#define bfq_sample_valid(samples)	((samples) > 80)
++
++/*
++ * The following macro groups conditions that need to be evaluated when
++ * checking if existing queues and groups form a symmetric scenario
++ * and therefore idling can be reduced or disabled for some of the
++ * queues. See the comment to the function bfq_bfqq_must_not_expire()
++ * for further details.
++ */
++#ifdef CONFIG_CGROUP_BFQIO
++#define symmetric_scenario	  (!bfqd->active_numerous_groups && \
++				   !bfq_differentiated_weights(bfqd))
++#else
++#define symmetric_scenario	  (!bfq_differentiated_weights(bfqd))
++#endif
++
++/*
++ * We regard a request as SYNC, if either it's a read or has the SYNC bit
++ * set (in which case it could also be a direct WRITE).
++ */
++static inline int bfq_bio_sync(struct bio *bio)
++{
++	if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC))
++		return 1;
++
++	return 0;
++}
++
++/*
++ * Scheduler run of queue, if there are requests pending and no one in the
++ * driver that will restart queueing.
++ */
++static inline void bfq_schedule_dispatch(struct bfq_data *bfqd)
++{
++	if (bfqd->queued != 0) {
++		bfq_log(bfqd, "schedule dispatch");
++		kblockd_schedule_work(&bfqd->unplug_work);
++	}
++}
++
++/*
++ * Lifted from AS - choose which of rq1 and rq2 that is best served now.
++ * We choose the request that is closesr to the head right now.  Distance
++ * behind the head is penalized and only allowed to a certain extent.
++ */
++static struct request *bfq_choose_req(struct bfq_data *bfqd,
++				      struct request *rq1,
++				      struct request *rq2,
++				      sector_t last)
++{
++	sector_t s1, s2, d1 = 0, d2 = 0;
++	unsigned long back_max;
++#define BFQ_RQ1_WRAP	0x01 /* request 1 wraps */
++#define BFQ_RQ2_WRAP	0x02 /* request 2 wraps */
++	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
++
++	if (rq1 == NULL || rq1 == rq2)
++		return rq2;
++	if (rq2 == NULL)
++		return rq1;
++
++	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
++		return rq1;
++	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
++		return rq2;
++	if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
++		return rq1;
++	else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META))
++		return rq2;
++
++	s1 = blk_rq_pos(rq1);
++	s2 = blk_rq_pos(rq2);
++
++	/*
++	 * By definition, 1KiB is 2 sectors.
++	 */
++	back_max = bfqd->bfq_back_max * 2;
++
++	/*
++	 * Strict one way elevator _except_ in the case where we allow
++	 * short backward seeks which are biased as twice the cost of a
++	 * similar forward seek.
++	 */
++	if (s1 >= last)
++		d1 = s1 - last;
++	else if (s1 + back_max >= last)
++		d1 = (last - s1) * bfqd->bfq_back_penalty;
++	else
++		wrap |= BFQ_RQ1_WRAP;
++
++	if (s2 >= last)
++		d2 = s2 - last;
++	else if (s2 + back_max >= last)
++		d2 = (last - s2) * bfqd->bfq_back_penalty;
++	else
++		wrap |= BFQ_RQ2_WRAP;
++
++	/* Found required data */
++
++	/*
++	 * By doing switch() on the bit mask "wrap" we avoid having to
++	 * check two variables for all permutations: --> faster!
++	 */
++	switch (wrap) {
++	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
++		if (d1 < d2)
++			return rq1;
++		else if (d2 < d1)
++			return rq2;
++		else {
++			if (s1 >= s2)
++				return rq1;
++			else
++				return rq2;
++		}
++
++	case BFQ_RQ2_WRAP:
++		return rq1;
++	case BFQ_RQ1_WRAP:
++		return rq2;
++	case (BFQ_RQ1_WRAP|BFQ_RQ2_WRAP): /* both rqs wrapped */
++	default:
++		/*
++		 * Since both rqs are wrapped,
++		 * start with the one that's further behind head
++		 * (--> only *one* back seek required),
++		 * since back seek takes more time than forward.
++		 */
++		if (s1 <= s2)
++			return rq1;
++		else
++			return rq2;
++	}
++}
++
++static struct bfq_queue *
++bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root,
++		     sector_t sector, struct rb_node **ret_parent,
++		     struct rb_node ***rb_link)
++{
++	struct rb_node **p, *parent;
++	struct bfq_queue *bfqq = NULL;
++
++	parent = NULL;
++	p = &root->rb_node;
++	while (*p) {
++		struct rb_node **n;
++
++		parent = *p;
++		bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++
++		/*
++		 * Sort strictly based on sector. Smallest to the left,
++		 * largest to the right.
++		 */
++		if (sector > blk_rq_pos(bfqq->next_rq))
++			n = &(*p)->rb_right;
++		else if (sector < blk_rq_pos(bfqq->next_rq))
++			n = &(*p)->rb_left;
++		else
++			break;
++		p = n;
++		bfqq = NULL;
++	}
++
++	*ret_parent = parent;
++	if (rb_link)
++		*rb_link = p;
++
++	bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
++		(long long unsigned)sector,
++		bfqq != NULL ? bfqq->pid : 0);
++
++	return bfqq;
++}
++
++static void bfq_rq_pos_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	struct rb_node **p, *parent;
++	struct bfq_queue *__bfqq;
++
++	if (bfqq->pos_root != NULL) {
++		rb_erase(&bfqq->pos_node, bfqq->pos_root);
++		bfqq->pos_root = NULL;
++	}
++
++	if (bfq_class_idle(bfqq))
++		return;
++	if (!bfqq->next_rq)
++		return;
++
++	bfqq->pos_root = &bfqd->rq_pos_tree;
++	__bfqq = bfq_rq_pos_tree_lookup(bfqd, bfqq->pos_root,
++			blk_rq_pos(bfqq->next_rq), &parent, &p);
++	if (__bfqq == NULL) {
++		rb_link_node(&bfqq->pos_node, parent, p);
++		rb_insert_color(&bfqq->pos_node, bfqq->pos_root);
++	} else
++		bfqq->pos_root = NULL;
++}
++
++/*
++ * Tell whether there are active queues or groups with differentiated weights.
++ */
++static inline bool bfq_differentiated_weights(struct bfq_data *bfqd)
++{
++	/*
++	 * For weights to differ, at least one of the trees must contain
++	 * at least two nodes.
++	 */
++	return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
++		(bfqd->queue_weights_tree.rb_node->rb_left ||
++		 bfqd->queue_weights_tree.rb_node->rb_right)
++#ifdef CONFIG_CGROUP_BFQIO
++	       ) ||
++	       (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
++		(bfqd->group_weights_tree.rb_node->rb_left ||
++		 bfqd->group_weights_tree.rb_node->rb_right)
++#endif
++	       );
++}
++
++/*
++ * If the weight-counter tree passed as input contains no counter for
++ * the weight of the input entity, then add that counter; otherwise just
++ * increment the existing counter.
++ *
++ * Note that weight-counter trees contain few nodes in mostly symmetric
++ * scenarios. For example, if all queues have the same weight, then the
++ * weight-counter tree for the queues may contain at most one node.
++ * This holds even if low_latency is on, because weight-raised queues
++ * are not inserted in the tree.
++ * In most scenarios, the rate at which nodes are created/destroyed
++ * should be low too.
++ */
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++				 struct bfq_entity *entity,
++				 struct rb_root *root)
++{
++	struct rb_node **new = &(root->rb_node), *parent = NULL;
++
++	/*
++	 * Do not insert if the entity is already associated with a
++	 * counter, which happens if:
++	 *   1) the entity is associated with a queue,
++	 *   2) a request arrival has caused the queue to become both
++	 *      non-weight-raised, and hence change its weight, and
++	 *      backlogged; in this respect, each of the two events
++	 *      causes an invocation of this function,
++	 *   3) this is the invocation of this function caused by the
++	 *      second event. This second invocation is actually useless,
++	 *      and we handle this fact by exiting immediately. More
++	 *      efficient or clearer solutions might possibly be adopted.
++	 */
++	if (entity->weight_counter)
++		return;
++
++	while (*new) {
++		struct bfq_weight_counter *__counter = container_of(*new,
++						struct bfq_weight_counter,
++						weights_node);
++		parent = *new;
++
++		if (entity->weight == __counter->weight) {
++			entity->weight_counter = __counter;
++			goto inc_counter;
++		}
++		if (entity->weight < __counter->weight)
++			new = &((*new)->rb_left);
++		else
++			new = &((*new)->rb_right);
++	}
++
++	entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
++					 GFP_ATOMIC);
++	entity->weight_counter->weight = entity->weight;
++	rb_link_node(&entity->weight_counter->weights_node, parent, new);
++	rb_insert_color(&entity->weight_counter->weights_node, root);
++
++inc_counter:
++	entity->weight_counter->num_active++;
++}
++
++/*
++ * Decrement the weight counter associated with the entity, and, if the
++ * counter reaches 0, remove the counter from the tree.
++ * See the comments to the function bfq_weights_tree_add() for considerations
++ * about overhead.
++ */
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++				    struct bfq_entity *entity,
++				    struct rb_root *root)
++{
++	if (!entity->weight_counter)
++		return;
++
++	BUG_ON(RB_EMPTY_ROOT(root));
++	BUG_ON(entity->weight_counter->weight != entity->weight);
++
++	BUG_ON(!entity->weight_counter->num_active);
++	entity->weight_counter->num_active--;
++	if (entity->weight_counter->num_active > 0)
++		goto reset_entity_pointer;
++
++	rb_erase(&entity->weight_counter->weights_node, root);
++	kfree(entity->weight_counter);
++
++reset_entity_pointer:
++	entity->weight_counter = NULL;
++}
++
++static struct request *bfq_find_next_rq(struct bfq_data *bfqd,
++					struct bfq_queue *bfqq,
++					struct request *last)
++{
++	struct rb_node *rbnext = rb_next(&last->rb_node);
++	struct rb_node *rbprev = rb_prev(&last->rb_node);
++	struct request *next = NULL, *prev = NULL;
++
++	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
++
++	if (rbprev != NULL)
++		prev = rb_entry_rq(rbprev);
++
++	if (rbnext != NULL)
++		next = rb_entry_rq(rbnext);
++	else {
++		rbnext = rb_first(&bfqq->sort_list);
++		if (rbnext && rbnext != &last->rb_node)
++			next = rb_entry_rq(rbnext);
++	}
++
++	return bfq_choose_req(bfqd, next, prev, blk_rq_pos(last));
++}
++
++/* see the definition of bfq_async_charge_factor for details */
++static inline unsigned long bfq_serv_to_charge(struct request *rq,
++					       struct bfq_queue *bfqq)
++{
++	return blk_rq_sectors(rq) *
++		(1 + ((!bfq_bfqq_sync(bfqq)) * (bfqq->wr_coeff == 1) *
++		bfq_async_charge_factor));
++}
++
++/**
++ * bfq_updated_next_req - update the queue after a new next_rq selection.
++ * @bfqd: the device data the queue belongs to.
++ * @bfqq: the queue to update.
++ *
++ * If the first request of a queue changes we make sure that the queue
++ * has enough budget to serve at least its first request (if the
++ * request has grown).  We do this because if the queue has not enough
++ * budget for its first request, it has to go through two dispatch
++ * rounds to actually get it dispatched.
++ */
++static void bfq_updated_next_req(struct bfq_data *bfqd,
++				 struct bfq_queue *bfqq)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++	struct request *next_rq = bfqq->next_rq;
++	unsigned long new_budget;
++
++	if (next_rq == NULL)
++		return;
++
++	if (bfqq == bfqd->in_service_queue)
++		/*
++		 * In order not to break guarantees, budgets cannot be
++		 * changed after an entity has been selected.
++		 */
++		return;
++
++	BUG_ON(entity->tree != &st->active);
++	BUG_ON(entity == entity->sched_data->in_service_entity);
++
++	new_budget = max_t(unsigned long, bfqq->max_budget,
++			   bfq_serv_to_charge(next_rq, bfqq));
++	if (entity->budget != new_budget) {
++		entity->budget = new_budget;
++		bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu",
++					 new_budget);
++		bfq_activate_bfqq(bfqd, bfqq);
++	}
++}
++
++static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
++{
++	u64 dur;
++
++	if (bfqd->bfq_wr_max_time > 0)
++		return bfqd->bfq_wr_max_time;
++
++	dur = bfqd->RT_prod;
++	do_div(dur, bfqd->peak_rate);
++
++	return dur;
++}
++
++/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
++static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
++					struct bfq_queue *bfqq)
++{
++	struct bfq_queue *item;
++	struct hlist_node *n;
++
++	hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node)
++		hlist_del_init(&item->burst_list_node);
++	hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++	bfqd->burst_size = 1;
++}
++
++/* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */
++static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	/* Increment burst size to take into account also bfqq */
++	bfqd->burst_size++;
++
++	if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) {
++		struct bfq_queue *pos, *bfqq_item;
++		struct hlist_node *n;
++
++		/*
++		 * Enough queues have been activated shortly after each
++		 * other to consider this burst as large.
++		 */
++		bfqd->large_burst = true;
++
++		/*
++		 * We can now mark all queues in the burst list as
++		 * belonging to a large burst.
++		 */
++		hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
++				     burst_list_node)
++		        bfq_mark_bfqq_in_large_burst(bfqq_item);
++		bfq_mark_bfqq_in_large_burst(bfqq);
++
++		/*
++		 * From now on, and until the current burst finishes, any
++		 * new queue being activated shortly after the last queue
++		 * was inserted in the burst can be immediately marked as
++		 * belonging to a large burst. So the burst list is not
++		 * needed any more. Remove it.
++		 */
++		hlist_for_each_entry_safe(pos, n, &bfqd->burst_list,
++					  burst_list_node)
++			hlist_del_init(&pos->burst_list_node);
++	} else /* burst not yet large: add bfqq to the burst list */
++		hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list);
++}
++
++/*
++ * If many queues happen to become active shortly after each other, then,
++ * to help the processes associated to these queues get their job done as
++ * soon as possible, it is usually better to not grant either weight-raising
++ * or device idling to these queues. In this comment we describe, firstly,
++ * the reasons why this fact holds, and, secondly, the next function, which
++ * implements the main steps needed to properly mark these queues so that
++ * they can then be treated in a different way.
++ *
++ * As for the terminology, we say that a queue becomes active, i.e.,
++ * switches from idle to backlogged, either when it is created (as a
++ * consequence of the arrival of an I/O request), or, if already existing,
++ * when a new request for the queue arrives while the queue is idle.
++ * Bursts of activations, i.e., activations of different queues occurring
++ * shortly after each other, are typically caused by services or applications
++ * that spawn or reactivate many parallel threads/processes. Examples are
++ * systemd during boot or git grep.
++ *
++ * These services or applications benefit mostly from a high throughput:
++ * the quicker the requests of the activated queues are cumulatively served,
++ * the sooner the target job of these queues gets completed. As a consequence,
++ * weight-raising any of these queues, which also implies idling the device
++ * for it, is almost always counterproductive: in most cases it just lowers
++ * throughput.
++ *
++ * On the other hand, a burst of activations may be also caused by the start
++ * of an application that does not consist in a lot of parallel I/O-bound
++ * threads. In fact, with a complex application, the burst may be just a
++ * consequence of the fact that several processes need to be executed to
++ * start-up the application. To start an application as quickly as possible,
++ * the best thing to do is to privilege the I/O related to the application
++ * with respect to all other I/O. Therefore, the best strategy to start as
++ * quickly as possible an application that causes a burst of activations is
++ * to weight-raise all the queues activated during the burst. This is the
++ * exact opposite of the best strategy for the other type of bursts.
++ *
++ * In the end, to take the best action for each of the two cases, the two
++ * types of bursts need to be distinguished. Fortunately, this seems
++ * relatively easy to do, by looking at the sizes of the bursts. In
++ * particular, we found a threshold such that bursts with a larger size
++ * than that threshold are apparently caused only by services or commands
++ * such as systemd or git grep. For brevity, hereafter we call just 'large'
++ * these bursts. BFQ *does not* weight-raise queues whose activations occur
++ * in a large burst. In addition, for each of these queues BFQ performs or
++ * does not perform idling depending on which choice boosts the throughput
++ * most. The exact choice depends on the device and request pattern at
++ * hand.
++ *
++ * Turning back to the next function, it implements all the steps needed
++ * to detect the occurrence of a large burst and to properly mark all the
++ * queues belonging to it (so that they can then be treated in a different
++ * way). This goal is achieved by maintaining a special "burst list" that
++ * holds, temporarily, the queues that belong to the burst in progress. The
++ * list is then used to mark these queues as belonging to a large burst if
++ * the burst does become large. The main steps are the following.
++ *
++ * . when the very first queue is activated, the queue is inserted into the
++ *   list (as it could be the first queue in a possible burst)
++ *
++ * . if the current burst has not yet become large, and a queue Q that does
++ *   not yet belong to the burst is activated shortly after the last time
++ *   at which a new queue entered the burst list, then the function appends
++ *   Q to the burst list
++ *
++ * . if, as a consequence of the previous step, the burst size reaches
++ *   the large-burst threshold, then
++ *
++ *     . all the queues in the burst list are marked as belonging to a
++ *       large burst
++ *
++ *     . the burst list is deleted; in fact, the burst list already served
++ *       its purpose (keeping temporarily track of the queues in a burst,
++ *       so as to be able to mark them as belonging to a large burst in the
++ *       previous sub-step), and now is not needed any more
++ *
++ *     . the device enters a large-burst mode
++ *
++ * . if a queue Q that does not belong to the burst is activated while
++ *   the device is in large-burst mode and shortly after the last time
++ *   at which a queue either entered the burst list or was marked as
++ *   belonging to the current large burst, then Q is immediately marked
++ *   as belonging to a large burst.
++ *
++ * . if a queue Q that does not belong to the burst is activated a while
++ *   later, i.e., not shortly after, than the last time at which a queue
++ *   either entered the burst list or was marked as belonging to the
++ *   current large burst, then the current burst is deemed as finished and:
++ *
++ *        . the large-burst mode is reset if set
++ *
++ *        . the burst list is emptied
++ *
++ *        . Q is inserted in the burst list, as Q may be the first queue
++ *          in a possible new burst (then the burst list contains just Q
++ *          after this step).
++ */
++static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++			     bool idle_for_long_time)
++{
++	/*
++	 * If bfqq happened to be activated in a burst, but has been idle
++	 * for at least as long as an interactive queue, then we assume
++	 * that, in the overall I/O initiated in the burst, the I/O
++	 * associated to bfqq is finished. So bfqq does not need to be
++	 * treated as a queue belonging to a burst anymore. Accordingly,
++	 * we reset bfqq's in_large_burst flag if set, and remove bfqq
++	 * from the burst list if it's there. We do not decrement instead
++	 * burst_size, because the fact that bfqq does not need to belong
++	 * to the burst list any more does not invalidate the fact that
++	 * bfqq may have been activated during the current burst.
++	 */
++	if (idle_for_long_time) {
++		hlist_del_init(&bfqq->burst_list_node);
++		bfq_clear_bfqq_in_large_burst(bfqq);
++	}
++
++	/*
++	 * If bfqq is already in the burst list or is part of a large
++	 * burst, then there is nothing else to do.
++	 */
++	if (!hlist_unhashed(&bfqq->burst_list_node) ||
++	    bfq_bfqq_in_large_burst(bfqq))
++		return;
++
++	/*
++	 * If bfqq's activation happens late enough, then the current
++	 * burst is finished, and related data structures must be reset.
++	 *
++	 * In this respect, consider the special case where bfqq is the very
++	 * first queue being activated. In this case, last_ins_in_burst is
++	 * not yet significant when we get here. But it is easy to verify
++	 * that, whether or not the following condition is true, bfqq will
++	 * end up being inserted into the burst list. In particular the
++	 * list will happen to contain only bfqq. And this is exactly what
++	 * has to happen, as bfqq may be the first queue in a possible
++	 * burst.
++	 */
++	if (time_is_before_jiffies(bfqd->last_ins_in_burst +
++	    bfqd->bfq_burst_interval)) {
++		bfqd->large_burst = false;
++		bfq_reset_burst_list(bfqd, bfqq);
++		return;
++	}
++
++	/*
++	 * If we get here, then bfqq is being activated shortly after the
++	 * last queue. So, if the current burst is also large, we can mark
++	 * bfqq as belonging to this large burst immediately.
++	 */
++	if (bfqd->large_burst) {
++		bfq_mark_bfqq_in_large_burst(bfqq);
++		return;
++	}
++
++	/*
++	 * If we get here, then a large-burst state has not yet been
++	 * reached, but bfqq is being activated shortly after the last
++	 * queue. Then we add bfqq to the burst.
++	 */
++	bfq_add_to_burst(bfqd, bfqq);
++}
++
++static void bfq_add_request(struct request *rq)
++{
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++	struct bfq_entity *entity = &bfqq->entity;
++	struct bfq_data *bfqd = bfqq->bfqd;
++	struct request *next_rq, *prev;
++	unsigned long old_wr_coeff = bfqq->wr_coeff;
++	bool interactive = false;
++
++	bfq_log_bfqq(bfqd, bfqq, "add_request %d", rq_is_sync(rq));
++	bfqq->queued[rq_is_sync(rq)]++;
++	bfqd->queued++;
++
++	elv_rb_add(&bfqq->sort_list, rq);
++
++	/*
++	 * Check if this request is a better next-serve candidate.
++	 */
++	prev = bfqq->next_rq;
++	next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position);
++	BUG_ON(next_rq == NULL);
++	bfqq->next_rq = next_rq;
++
++	/*
++	 * Adjust priority tree position, if next_rq changes.
++	 */
++	if (prev != bfqq->next_rq)
++		bfq_rq_pos_tree_add(bfqd, bfqq);
++
++	if (!bfq_bfqq_busy(bfqq)) {
++		bool soft_rt,
++		     idle_for_long_time = time_is_before_jiffies(
++						bfqq->budget_timeout +
++						bfqd->bfq_wr_min_idle_time);
++
++		if (bfq_bfqq_sync(bfqq)) {
++			bool already_in_burst =
++			   !hlist_unhashed(&bfqq->burst_list_node) ||
++			   bfq_bfqq_in_large_burst(bfqq);
++			bfq_handle_burst(bfqd, bfqq, idle_for_long_time);
++			/*
++			 * If bfqq was not already in the current burst,
++			 * then, at this point, bfqq either has been
++			 * added to the current burst or has caused the
++			 * current burst to terminate. In particular, in
++			 * the second case, bfqq has become the first
++			 * queue in a possible new burst.
++			 * In both cases last_ins_in_burst needs to be
++			 * moved forward.
++			 */
++			if (!already_in_burst)
++				bfqd->last_ins_in_burst = jiffies;
++		}
++
++		soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
++			!bfq_bfqq_in_large_burst(bfqq) &&
++			time_is_before_jiffies(bfqq->soft_rt_next_start);
++		interactive = !bfq_bfqq_in_large_burst(bfqq) &&
++			      idle_for_long_time;
++		entity->budget = max_t(unsigned long, bfqq->max_budget,
++				       bfq_serv_to_charge(next_rq, bfqq));
++
++		if (!bfq_bfqq_IO_bound(bfqq)) {
++			if (time_before(jiffies,
++					RQ_BIC(rq)->ttime.last_end_request +
++					bfqd->bfq_slice_idle)) {
++				bfqq->requests_within_timer++;
++				if (bfqq->requests_within_timer >=
++				    bfqd->bfq_requests_within_timer)
++					bfq_mark_bfqq_IO_bound(bfqq);
++			} else
++				bfqq->requests_within_timer = 0;
++		}
++
++		if (!bfqd->low_latency)
++			goto add_bfqq_busy;
++
++		/*
++		 * If the queue is not being boosted and has been idle
++		 * for enough time, start a weight-raising period
++		 */
++		if (old_wr_coeff == 1 && (interactive || soft_rt)) {
++			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++			if (interactive)
++				bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++			else
++				bfqq->wr_cur_max_time =
++					bfqd->bfq_wr_rt_max_time;
++			bfq_log_bfqq(bfqd, bfqq,
++				     "wrais starting at %lu, rais_max_time %u",
++				     jiffies,
++				     jiffies_to_msecs(bfqq->wr_cur_max_time));
++		} else if (old_wr_coeff > 1) {
++			if (interactive)
++				bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++			else if (bfq_bfqq_in_large_burst(bfqq) ||
++				 (bfqq->wr_cur_max_time ==
++				  bfqd->bfq_wr_rt_max_time &&
++				  !soft_rt)) {
++				bfqq->wr_coeff = 1;
++				bfq_log_bfqq(bfqd, bfqq,
++					"wrais ending at %lu, rais_max_time %u",
++					jiffies,
++					jiffies_to_msecs(bfqq->
++						wr_cur_max_time));
++			} else if (time_before(
++					bfqq->last_wr_start_finish +
++					bfqq->wr_cur_max_time,
++					jiffies +
++					bfqd->bfq_wr_rt_max_time) &&
++				   soft_rt) {
++				/*
++				 *
++				 * The remaining weight-raising time is lower
++				 * than bfqd->bfq_wr_rt_max_time, which
++				 * means that the application is enjoying
++				 * weight raising either because deemed soft-
++				 * rt in the near past, or because deemed
++				 * interactive a long ago. In both cases,
++				 * resetting now the current remaining weight-
++				 * raising time for the application to the
++				 * weight-raising duration for soft rt
++				 * applications would not cause any latency
++				 * increase for the application (as the new
++				 * duration would be higher than the remaining
++				 * time).
++				 *
++				 * In addition, the application is now meeting
++				 * the requirements for being deemed soft rt.
++				 * In the end we can correctly and safely
++				 * (re)charge the weight-raising duration for
++				 * the application with the weight-raising
++				 * duration for soft rt applications.
++				 *
++				 * In particular, doing this recharge now, i.e.,
++				 * before the weight-raising period for the
++				 * application finishes, reduces the probability
++				 * of the following negative scenario:
++				 * 1) the weight of a soft rt application is
++				 *    raised at startup (as for any newly
++				 *    created application),
++				 * 2) since the application is not interactive,
++				 *    at a certain time weight-raising is
++				 *    stopped for the application,
++				 * 3) at that time the application happens to
++				 *    still have pending requests, and hence
++				 *    is destined to not have a chance to be
++				 *    deemed soft rt before these requests are
++				 *    completed (see the comments to the
++				 *    function bfq_bfqq_softrt_next_start()
++				 *    for details on soft rt detection),
++				 * 4) these pending requests experience a high
++				 *    latency because the application is not
++				 *    weight-raised while they are pending.
++				 */
++				bfqq->last_wr_start_finish = jiffies;
++				bfqq->wr_cur_max_time =
++					bfqd->bfq_wr_rt_max_time;
++			}
++		}
++		if (old_wr_coeff != bfqq->wr_coeff)
++			entity->ioprio_changed = 1;
++add_bfqq_busy:
++		bfqq->last_idle_bklogged = jiffies;
++		bfqq->service_from_backlogged = 0;
++		bfq_clear_bfqq_softrt_update(bfqq);
++		bfq_add_bfqq_busy(bfqd, bfqq);
++	} else {
++		if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) &&
++		    time_is_before_jiffies(
++				bfqq->last_wr_start_finish +
++				bfqd->bfq_wr_min_inter_arr_async)) {
++			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
++			bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
++
++			bfqd->wr_busy_queues++;
++			entity->ioprio_changed = 1;
++			bfq_log_bfqq(bfqd, bfqq,
++			    "non-idle wrais starting at %lu, rais_max_time %u",
++			    jiffies,
++			    jiffies_to_msecs(bfqq->wr_cur_max_time));
++		}
++		if (prev != bfqq->next_rq)
++			bfq_updated_next_req(bfqd, bfqq);
++	}
++
++	if (bfqd->low_latency &&
++		(old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive))
++		bfqq->last_wr_start_finish = jiffies;
++}
++
++static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd,
++					  struct bio *bio)
++{
++	struct task_struct *tsk = current;
++	struct bfq_io_cq *bic;
++	struct bfq_queue *bfqq;
++
++	bic = bfq_bic_lookup(bfqd, tsk->io_context);
++	if (bic == NULL)
++		return NULL;
++
++	bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++	if (bfqq != NULL)
++		return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio));
++
++	return NULL;
++}
++
++static void bfq_activate_request(struct request_queue *q, struct request *rq)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++
++	bfqd->rq_in_driver++;
++	bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
++	bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
++		(long long unsigned)bfqd->last_position);
++}
++
++static inline void bfq_deactivate_request(struct request_queue *q,
++					  struct request *rq)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++
++	BUG_ON(bfqd->rq_in_driver == 0);
++	bfqd->rq_in_driver--;
++}
++
++static void bfq_remove_request(struct request *rq)
++{
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++	struct bfq_data *bfqd = bfqq->bfqd;
++	const int sync = rq_is_sync(rq);
++
++	if (bfqq->next_rq == rq) {
++		bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq);
++		bfq_updated_next_req(bfqd, bfqq);
++	}
++
++	if (rq->queuelist.prev != &rq->queuelist)
++		list_del_init(&rq->queuelist);
++	BUG_ON(bfqq->queued[sync] == 0);
++	bfqq->queued[sync]--;
++	bfqd->queued--;
++	elv_rb_del(&bfqq->sort_list, rq);
++
++	if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++		if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue)
++			bfq_del_bfqq_busy(bfqd, bfqq, 1);
++		/*
++		 * Remove queue from request-position tree as it is empty.
++		 */
++		if (bfqq->pos_root != NULL) {
++			rb_erase(&bfqq->pos_node, bfqq->pos_root);
++			bfqq->pos_root = NULL;
++		}
++	}
++
++	if (rq->cmd_flags & REQ_META) {
++		BUG_ON(bfqq->meta_pending == 0);
++		bfqq->meta_pending--;
++	}
++}
++
++static int bfq_merge(struct request_queue *q, struct request **req,
++		     struct bio *bio)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct request *__rq;
++
++	__rq = bfq_find_rq_fmerge(bfqd, bio);
++	if (__rq != NULL && elv_rq_merge_ok(__rq, bio)) {
++		*req = __rq;
++		return ELEVATOR_FRONT_MERGE;
++	}
++
++	return ELEVATOR_NO_MERGE;
++}
++
++static void bfq_merged_request(struct request_queue *q, struct request *req,
++			       int type)
++{
++	if (type == ELEVATOR_FRONT_MERGE &&
++	    rb_prev(&req->rb_node) &&
++	    blk_rq_pos(req) <
++	    blk_rq_pos(container_of(rb_prev(&req->rb_node),
++				    struct request, rb_node))) {
++		struct bfq_queue *bfqq = RQ_BFQQ(req);
++		struct bfq_data *bfqd = bfqq->bfqd;
++		struct request *prev, *next_rq;
++
++		/* Reposition request in its sort_list */
++		elv_rb_del(&bfqq->sort_list, req);
++		elv_rb_add(&bfqq->sort_list, req);
++		/* Choose next request to be served for bfqq */
++		prev = bfqq->next_rq;
++		next_rq = bfq_choose_req(bfqd, bfqq->next_rq, req,
++					 bfqd->last_position);
++		BUG_ON(next_rq == NULL);
++		bfqq->next_rq = next_rq;
++		/*
++		 * If next_rq changes, update both the queue's budget to
++		 * fit the new request and the queue's position in its
++		 * rq_pos_tree.
++		 */
++		if (prev != bfqq->next_rq) {
++			bfq_updated_next_req(bfqd, bfqq);
++			bfq_rq_pos_tree_add(bfqd, bfqq);
++		}
++	}
++}
++
++static void bfq_merged_requests(struct request_queue *q, struct request *rq,
++				struct request *next)
++{
++	struct bfq_queue *bfqq = RQ_BFQQ(rq), *next_bfqq = RQ_BFQQ(next);
++
++	/*
++	 * If next and rq belong to the same bfq_queue and next is older
++	 * than rq, then reposition rq in the fifo (by substituting next
++	 * with rq). Otherwise, if next and rq belong to different
++	 * bfq_queues, never reposition rq: in fact, we would have to
++	 * reposition it with respect to next's position in its own fifo,
++	 * which would most certainly be too expensive with respect to
++	 * the benefits.
++	 */
++	if (bfqq == next_bfqq &&
++	    !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
++	    time_before(next->fifo_time, rq->fifo_time)) {
++		list_del_init(&rq->queuelist);
++		list_replace_init(&next->queuelist, &rq->queuelist);
++		rq->fifo_time = next->fifo_time;
++	}
++
++	if (bfqq->next_rq == next)
++		bfqq->next_rq = rq;
++
++	bfq_remove_request(next);
++}
++
++/* Must be called with bfqq != NULL */
++static inline void bfq_bfqq_end_wr(struct bfq_queue *bfqq)
++{
++	BUG_ON(bfqq == NULL);
++	if (bfq_bfqq_busy(bfqq))
++		bfqq->bfqd->wr_busy_queues--;
++	bfqq->wr_coeff = 1;
++	bfqq->wr_cur_max_time = 0;
++	/* Trigger a weight change on the next activation of the queue */
++	bfqq->entity.ioprio_changed = 1;
++}
++
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++				    struct bfq_group *bfqg)
++{
++	int i, j;
++
++	for (i = 0; i < 2; i++)
++		for (j = 0; j < IOPRIO_BE_NR; j++)
++			if (bfqg->async_bfqq[i][j] != NULL)
++				bfq_bfqq_end_wr(bfqg->async_bfqq[i][j]);
++	if (bfqg->async_idle_bfqq != NULL)
++		bfq_bfqq_end_wr(bfqg->async_idle_bfqq);
++}
++
++static void bfq_end_wr(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq;
++
++	spin_lock_irq(bfqd->queue->queue_lock);
++
++	list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
++		bfq_bfqq_end_wr(bfqq);
++	list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list)
++		bfq_bfqq_end_wr(bfqq);
++	bfq_end_wr_async(bfqd);
++
++	spin_unlock_irq(bfqd->queue->queue_lock);
++}
++
++static int bfq_allow_merge(struct request_queue *q, struct request *rq,
++			   struct bio *bio)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_io_cq *bic;
++	struct bfq_queue *bfqq;
++
++	/*
++	 * Disallow merge of a sync bio into an async request.
++	 */
++	if (bfq_bio_sync(bio) && !rq_is_sync(rq))
++		return 0;
++
++	/*
++	 * Lookup the bfqq that this bio will be queued with. Allow
++	 * merge only if rq is queued there.
++	 * Queue lock is held here.
++	 */
++	bic = bfq_bic_lookup(bfqd, current->io_context);
++	if (bic == NULL)
++		return 0;
++
++	bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++	return bfqq == RQ_BFQQ(rq);
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++				       struct bfq_queue *bfqq)
++{
++	if (bfqq != NULL) {
++		bfq_mark_bfqq_must_alloc(bfqq);
++		bfq_mark_bfqq_budget_new(bfqq);
++		bfq_clear_bfqq_fifo_expire(bfqq);
++
++		bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++		bfq_log_bfqq(bfqd, bfqq,
++			     "set_in_service_queue, cur-budget = %lu",
++			     bfqq->entity.budget);
++	}
++
++	bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd,
++						  struct bfq_queue *bfqq)
++{
++	if (!bfqq)
++		bfqq = bfq_get_next_queue(bfqd);
++	else
++		bfq_get_next_queue_forced(bfqd, bfqq);
++
++	__bfq_set_in_service_queue(bfqd, bfqq);
++	return bfqq;
++}
++
++static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
++					  struct request *rq)
++{
++	if (blk_rq_pos(rq) >= bfqd->last_position)
++		return blk_rq_pos(rq) - bfqd->last_position;
++	else
++		return bfqd->last_position - blk_rq_pos(rq);
++}
++
++/*
++ * Return true if bfqq has no request pending and rq is close enough to
++ * bfqd->last_position, or if rq is closer to bfqd->last_position than
++ * bfqq->next_rq
++ */
++static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
++{
++	return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
++}
++
++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
++{
++	struct rb_root *root = &bfqd->rq_pos_tree;
++	struct rb_node *parent, *node;
++	struct bfq_queue *__bfqq;
++	sector_t sector = bfqd->last_position;
++
++	if (RB_EMPTY_ROOT(root))
++		return NULL;
++
++	/*
++	 * First, if we find a request starting at the end of the last
++	 * request, choose it.
++	 */
++	__bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, &parent, NULL);
++	if (__bfqq != NULL)
++		return __bfqq;
++
++	/*
++	 * If the exact sector wasn't found, the parent of the NULL leaf
++	 * will contain the closest sector (rq_pos_tree sorted by
++	 * next_request position).
++	 */
++	__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
++	if (bfq_rq_close(bfqd, __bfqq->next_rq))
++		return __bfqq;
++
++	if (blk_rq_pos(__bfqq->next_rq) < sector)
++		node = rb_next(&__bfqq->pos_node);
++	else
++		node = rb_prev(&__bfqq->pos_node);
++	if (node == NULL)
++		return NULL;
++
++	__bfqq = rb_entry(node, struct bfq_queue, pos_node);
++	if (bfq_rq_close(bfqd, __bfqq->next_rq))
++		return __bfqq;
++
++	return NULL;
++}
++
++/*
++ * bfqd - obvious
++ * cur_bfqq - passed in so that we don't decide that the current queue
++ *            is closely cooperating with itself.
++ *
++ * We are assuming that cur_bfqq has dispatched at least one request,
++ * and that bfqd->last_position reflects a position on the disk associated
++ * with the I/O issued by cur_bfqq.
++ */
++static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
++					      struct bfq_queue *cur_bfqq)
++{
++	struct bfq_queue *bfqq;
++
++	if (bfq_class_idle(cur_bfqq))
++		return NULL;
++	if (!bfq_bfqq_sync(cur_bfqq))
++		return NULL;
++	if (BFQQ_SEEKY(cur_bfqq))
++		return NULL;
++
++	/* If device has only one backlogged bfq_queue, don't search. */
++	if (bfqd->busy_queues == 1)
++		return NULL;
++
++	/*
++	 * We should notice if some of the queues are cooperating, e.g.
++	 * working closely on the same area of the disk. In that case,
++	 * we can group them together and don't waste time idling.
++	 */
++	bfqq = bfqq_close(bfqd);
++	if (bfqq == NULL || bfqq == cur_bfqq)
++		return NULL;
++
++	/*
++	 * Do not merge queues from different bfq_groups.
++	*/
++	if (bfqq->entity.parent != cur_bfqq->entity.parent)
++		return NULL;
++
++	/*
++	 * It only makes sense to merge sync queues.
++	 */
++	if (!bfq_bfqq_sync(bfqq))
++		return NULL;
++	if (BFQQ_SEEKY(bfqq))
++		return NULL;
++
++	/*
++	 * Do not merge queues of different priority classes.
++	 */
++	if (bfq_class_rt(bfqq) != bfq_class_rt(cur_bfqq))
++		return NULL;
++
++	return bfqq;
++}
++
++/*
++ * If enough samples have been computed, return the current max budget
++ * stored in bfqd, which is dynamically updated according to the
++ * estimated disk peak rate; otherwise return the default max budget
++ */
++static inline unsigned long bfq_max_budget(struct bfq_data *bfqd)
++{
++	if (bfqd->budgets_assigned < 194)
++		return bfq_default_max_budget;
++	else
++		return bfqd->bfq_max_budget;
++}
++
++/*
++ * Return min budget, which is a fraction of the current or default
++ * max budget (trying with 1/32)
++ */
++static inline unsigned long bfq_min_budget(struct bfq_data *bfqd)
++{
++	if (bfqd->budgets_assigned < 194)
++		return bfq_default_max_budget / 32;
++	else
++		return bfqd->bfq_max_budget / 32;
++}
++
++static void bfq_arm_slice_timer(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq = bfqd->in_service_queue;
++	struct bfq_io_cq *bic;
++	unsigned long sl;
++
++	BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++	/* Processes have exited, don't wait. */
++	bic = bfqd->in_service_bic;
++	if (bic == NULL || atomic_read(&bic->icq.ioc->active_ref) == 0)
++		return;
++
++	bfq_mark_bfqq_wait_request(bfqq);
++
++	/*
++	 * We don't want to idle for seeks, but we do want to allow
++	 * fair distribution of slice time for a process doing back-to-back
++	 * seeks. So allow a little bit of time for him to submit a new rq.
++	 *
++	 * To prevent processes with (partly) seeky workloads from
++	 * being too ill-treated, grant them a small fraction of the
++	 * assigned budget before reducing the waiting time to
++	 * BFQ_MIN_TT. This happened to help reduce latency.
++	 */
++	sl = bfqd->bfq_slice_idle;
++	/*
++	 * Unless the queue is being weight-raised or the scenario is
++	 * asymmetric, grant only minimum idle time if the queue either
++	 * has been seeky for long enough or has already proved to be
++	 * constantly seeky.
++	 */
++	if (bfq_sample_valid(bfqq->seek_samples) &&
++	    ((BFQQ_SEEKY(bfqq) && bfqq->entity.service >
++				  bfq_max_budget(bfqq->bfqd) / 8) ||
++	      bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1 &&
++	    symmetric_scenario)
++		sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT));
++	else if (bfqq->wr_coeff > 1)
++		sl = sl * 3;
++	bfqd->last_idling_start = ktime_get();
++	mod_timer(&bfqd->idle_slice_timer, jiffies + sl);
++	bfq_log(bfqd, "arm idle: %u/%u ms",
++		jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle));
++}
++
++/*
++ * Set the maximum time for the in-service queue to consume its
++ * budget. This prevents seeky processes from lowering the disk
++ * throughput (always guaranteed with a time slice scheme as in CFQ).
++ */
++static void bfq_set_budget_timeout(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq = bfqd->in_service_queue;
++	unsigned int timeout_coeff;
++	if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
++		timeout_coeff = 1;
++	else
++		timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;
++
++	bfqd->last_budget_start = ktime_get();
++
++	bfq_clear_bfqq_budget_new(bfqq);
++	bfqq->budget_timeout = jiffies +
++		bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff;
++
++	bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u",
++		jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] *
++		timeout_coeff));
++}
++
++/*
++ * Move request from internal lists to the request queue dispatch list.
++ */
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++	/*
++	 * For consistency, the next instruction should have been executed
++	 * after removing the request from the queue and dispatching it.
++	 * We execute instead this instruction before bfq_remove_request()
++	 * (and hence introduce a temporary inconsistency), for efficiency.
++	 * In fact, in a forced_dispatch, this prevents two counters related
++	 * to bfqq->dispatched to risk to be uselessly decremented if bfqq
++	 * is not in service, and then to be incremented again after
++	 * incrementing bfqq->dispatched.
++	 */
++	bfqq->dispatched++;
++	bfq_remove_request(rq);
++	elv_dispatch_sort(q, rq);
++
++	if (bfq_bfqq_sync(bfqq))
++		bfqd->sync_flight++;
++}
++
++/*
++ * Return expired entry, or NULL to just start from scratch in rbtree.
++ */
++static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
++{
++	struct request *rq = NULL;
++
++	if (bfq_bfqq_fifo_expire(bfqq))
++		return NULL;
++
++	bfq_mark_bfqq_fifo_expire(bfqq);
++
++	if (list_empty(&bfqq->fifo))
++		return NULL;
++
++	rq = rq_entry_fifo(bfqq->fifo.next);
++
++	if (time_before(jiffies, rq->fifo_time))
++		return NULL;
++
++	return rq;
++}
++
++/* Must be called with the queue_lock held. */
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++	int process_refs, io_refs;
++
++	io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++	process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
++	BUG_ON(process_refs < 0);
++	return process_refs;
++}
++
++static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++	int process_refs, new_process_refs;
++	struct bfq_queue *__bfqq;
++
++	/*
++	 * If there are no process references on the new_bfqq, then it is
++	 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++	 * may have dropped their last reference (not just their last process
++	 * reference).
++	 */
++	if (!bfqq_process_refs(new_bfqq))
++		return;
++
++	/* Avoid a circular list and skip interim queue merges. */
++	while ((__bfqq = new_bfqq->new_bfqq)) {
++		if (__bfqq == bfqq)
++			return;
++		new_bfqq = __bfqq;
++	}
++
++	process_refs = bfqq_process_refs(bfqq);
++	new_process_refs = bfqq_process_refs(new_bfqq);
++	/*
++	 * If the process for the bfqq has gone away, there is no
++	 * sense in merging the queues.
++	 */
++	if (process_refs == 0 || new_process_refs == 0)
++		return;
++
++	/*
++	 * Merge in the direction of the lesser amount of work.
++	 */
++	if (new_process_refs >= process_refs) {
++		bfqq->new_bfqq = new_bfqq;
++		atomic_add(process_refs, &new_bfqq->ref);
++	} else {
++		new_bfqq->new_bfqq = bfqq;
++		atomic_add(new_process_refs, &bfqq->ref);
++	}
++	bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++		new_bfqq->pid);
++}
++
++static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++	return entity->budget - entity->service;
++}
++
++static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	BUG_ON(bfqq != bfqd->in_service_queue);
++
++	__bfq_bfqd_reset_in_service(bfqd);
++
++	/*
++	 * If this bfqq is shared between multiple processes, check
++	 * to make sure that those processes are still issuing I/Os
++	 * within the mean seek distance. If not, it may be time to
++	 * break the queues apart again.
++	 */
++	if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq))
++		bfq_mark_bfqq_split_coop(bfqq);
++
++	if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
++		/*
++		 * Overloading budget_timeout field to store the time
++		 * at which the queue remains with no backlog; used by
++		 * the weight-raising mechanism.
++		 */
++		bfqq->budget_timeout = jiffies;
++		bfq_del_bfqq_busy(bfqd, bfqq, 1);
++	} else {
++		bfq_activate_bfqq(bfqd, bfqq);
++		/*
++		 * Resort priority tree of potential close cooperators.
++		 */
++		bfq_rq_pos_tree_add(bfqd, bfqq);
++	}
++}
++
++/**
++ * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior.
++ * @bfqd: device data.
++ * @bfqq: queue to update.
++ * @reason: reason for expiration.
++ *
++ * Handle the feedback on @bfqq budget.  See the body for detailed
++ * comments.
++ */
++static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd,
++				     struct bfq_queue *bfqq,
++				     enum bfqq_expiration reason)
++{
++	struct request *next_rq;
++	unsigned long budget, min_budget;
++
++	budget = bfqq->max_budget;
++	min_budget = bfq_min_budget(bfqd);
++
++	BUG_ON(bfqq != bfqd->in_service_queue);
++
++	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %lu, budg left %lu",
++		bfqq->entity.budget, bfq_bfqq_budget_left(bfqq));
++	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %lu, min budg %lu",
++		budget, bfq_min_budget(bfqd));
++	bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d",
++		bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue));
++
++	if (bfq_bfqq_sync(bfqq)) {
++		switch (reason) {
++		/*
++		 * Caveat: in all the following cases we trade latency
++		 * for throughput.
++		 */
++		case BFQ_BFQQ_TOO_IDLE:
++			/*
++			 * This is the only case where we may reduce
++			 * the budget: if there is no request of the
++			 * process still waiting for completion, then
++			 * we assume (tentatively) that the timer has
++			 * expired because the batch of requests of
++			 * the process could have been served with a
++			 * smaller budget.  Hence, betting that
++			 * process will behave in the same way when it
++			 * becomes backlogged again, we reduce its
++			 * next budget.  As long as we guess right,
++			 * this budget cut reduces the latency
++			 * experienced by the process.
++			 *
++			 * However, if there are still outstanding
++			 * requests, then the process may have not yet
++			 * issued its next request just because it is
++			 * still waiting for the completion of some of
++			 * the still outstanding ones.  So in this
++			 * subcase we do not reduce its budget, on the
++			 * contrary we increase it to possibly boost
++			 * the throughput, as discussed in the
++			 * comments to the BUDGET_TIMEOUT case.
++			 */
++			if (bfqq->dispatched > 0) /* still outstanding reqs */
++				budget = min(budget * 2, bfqd->bfq_max_budget);
++			else {
++				if (budget > 5 * min_budget)
++					budget -= 4 * min_budget;
++				else
++					budget = min_budget;
++			}
++			break;
++		case BFQ_BFQQ_BUDGET_TIMEOUT:
++			/*
++			 * We double the budget here because: 1) it
++			 * gives the chance to boost the throughput if
++			 * this is not a seeky process (which may have
++			 * bumped into this timeout because of, e.g.,
++			 * ZBR), 2) together with charge_full_budget
++			 * it helps give seeky processes higher
++			 * timestamps, and hence be served less
++			 * frequently.
++			 */
++			budget = min(budget * 2, bfqd->bfq_max_budget);
++			break;
++		case BFQ_BFQQ_BUDGET_EXHAUSTED:
++			/*
++			 * The process still has backlog, and did not
++			 * let either the budget timeout or the disk
++			 * idling timeout expire. Hence it is not
++			 * seeky, has a short thinktime and may be
++			 * happy with a higher budget too. So
++			 * definitely increase the budget of this good
++			 * candidate to boost the disk throughput.
++			 */
++			budget = min(budget * 4, bfqd->bfq_max_budget);
++			break;
++		case BFQ_BFQQ_NO_MORE_REQUESTS:
++		       /*
++			* Leave the budget unchanged.
++			*/
++		default:
++			return;
++		}
++	} else /* async queue */
++	    /* async queues get always the maximum possible budget
++	     * (their ability to dispatch is limited by
++	     * @bfqd->bfq_max_budget_async_rq).
++	     */
++		budget = bfqd->bfq_max_budget;
++
++	bfqq->max_budget = budget;
++
++	if (bfqd->budgets_assigned >= 194 && bfqd->bfq_user_max_budget == 0 &&
++	    bfqq->max_budget > bfqd->bfq_max_budget)
++		bfqq->max_budget = bfqd->bfq_max_budget;
++
++	/*
++	 * Make sure that we have enough budget for the next request.
++	 * Since the finish time of the bfqq must be kept in sync with
++	 * the budget, be sure to call __bfq_bfqq_expire() after the
++	 * update.
++	 */
++	next_rq = bfqq->next_rq;
++	if (next_rq != NULL)
++		bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget,
++					    bfq_serv_to_charge(next_rq, bfqq));
++	else
++		bfqq->entity.budget = bfqq->max_budget;
++
++	bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %lu",
++			next_rq != NULL ? blk_rq_sectors(next_rq) : 0,
++			bfqq->entity.budget);
++}
++
++static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout)
++{
++	unsigned long max_budget;
++
++	/*
++	 * The max_budget calculated when autotuning is equal to the
++	 * amount of sectors transfered in timeout_sync at the
++	 * estimated peak rate.
++	 */
++	max_budget = (unsigned long)(peak_rate * 1000 *
++				     timeout >> BFQ_RATE_SHIFT);
++
++	return max_budget;
++}
++
++/*
++ * In addition to updating the peak rate, checks whether the process
++ * is "slow", and returns 1 if so. This slow flag is used, in addition
++ * to the budget timeout, to reduce the amount of service provided to
++ * seeky processes, and hence reduce their chances to lower the
++ * throughput. See the code for more details.
++ */
++static int bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++				int compensate, enum bfqq_expiration reason)
++{
++	u64 bw, usecs, expected, timeout;
++	ktime_t delta;
++	int update = 0;
++
++	if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq))
++		return 0;
++
++	if (compensate)
++		delta = bfqd->last_idling_start;
++	else
++		delta = ktime_get();
++	delta = ktime_sub(delta, bfqd->last_budget_start);
++	usecs = ktime_to_us(delta);
++
++	/* Don't trust short/unrealistic values. */
++	if (usecs < 100 || usecs >= LONG_MAX)
++		return 0;
++
++	/*
++	 * Calculate the bandwidth for the last slice.  We use a 64 bit
++	 * value to store the peak rate, in sectors per usec in fixed
++	 * point math.  We do so to have enough precision in the estimate
++	 * and to avoid overflows.
++	 */
++	bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT;
++	do_div(bw, (unsigned long)usecs);
++
++	timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++	/*
++	 * Use only long (> 20ms) intervals to filter out spikes for
++	 * the peak rate estimation.
++	 */
++	if (usecs > 20000) {
++		if (bw > bfqd->peak_rate ||
++		   (!BFQQ_SEEKY(bfqq) &&
++		    reason == BFQ_BFQQ_BUDGET_TIMEOUT)) {
++			bfq_log(bfqd, "measured bw =%llu", bw);
++			/*
++			 * To smooth oscillations use a low-pass filter with
++			 * alpha=7/8, i.e.,
++			 * new_rate = (7/8) * old_rate + (1/8) * bw
++			 */
++			do_div(bw, 8);
++			if (bw == 0)
++				return 0;
++			bfqd->peak_rate *= 7;
++			do_div(bfqd->peak_rate, 8);
++			bfqd->peak_rate += bw;
++			update = 1;
++			bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate);
++		}
++
++		update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1;
++
++		if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES)
++			bfqd->peak_rate_samples++;
++
++		if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
++		    update) {
++			int dev_type = blk_queue_nonrot(bfqd->queue);
++			if (bfqd->bfq_user_max_budget == 0) {
++				bfqd->bfq_max_budget =
++					bfq_calc_max_budget(bfqd->peak_rate,
++							    timeout);
++				bfq_log(bfqd, "new max_budget=%lu",
++					bfqd->bfq_max_budget);
++			}
++			if (bfqd->device_speed == BFQ_BFQD_FAST &&
++			    bfqd->peak_rate < device_speed_thresh[dev_type]) {
++				bfqd->device_speed = BFQ_BFQD_SLOW;
++				bfqd->RT_prod = R_slow[dev_type] *
++						T_slow[dev_type];
++			} else if (bfqd->device_speed == BFQ_BFQD_SLOW &&
++			    bfqd->peak_rate > device_speed_thresh[dev_type]) {
++				bfqd->device_speed = BFQ_BFQD_FAST;
++				bfqd->RT_prod = R_fast[dev_type] *
++						T_fast[dev_type];
++			}
++		}
++	}
++
++	/*
++	 * If the process has been served for a too short time
++	 * interval to let its possible sequential accesses prevail on
++	 * the initial seek time needed to move the disk head on the
++	 * first sector it requested, then give the process a chance
++	 * and for the moment return false.
++	 */
++	if (bfqq->entity.budget <= bfq_max_budget(bfqd) / 8)
++		return 0;
++
++	/*
++	 * A process is considered ``slow'' (i.e., seeky, so that we
++	 * cannot treat it fairly in the service domain, as it would
++	 * slow down too much the other processes) if, when a slice
++	 * ends for whatever reason, it has received service at a
++	 * rate that would not be high enough to complete the budget
++	 * before the budget timeout expiration.
++	 */
++	expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT;
++
++	/*
++	 * Caveat: processes doing IO in the slower disk zones will
++	 * tend to be slow(er) even if not seeky. And the estimated
++	 * peak rate will actually be an average over the disk
++	 * surface. Hence, to not be too harsh with unlucky processes,
++	 * we keep a budget/3 margin of safety before declaring a
++	 * process slow.
++	 */
++	return expected > (4 * bfqq->entity.budget) / 3;
++}
++
++/*
++ * To be deemed as soft real-time, an application must meet two
++ * requirements. First, the application must not require an average
++ * bandwidth higher than the approximate bandwidth required to playback or
++ * record a compressed high-definition video.
++ * The next function is invoked on the completion of the last request of a
++ * batch, to compute the next-start time instant, soft_rt_next_start, such
++ * that, if the next request of the application does not arrive before
++ * soft_rt_next_start, then the above requirement on the bandwidth is met.
++ *
++ * The second requirement is that the request pattern of the application is
++ * isochronous, i.e., that, after issuing a request or a batch of requests,
++ * the application stops issuing new requests until all its pending requests
++ * have been completed. After that, the application may issue a new batch,
++ * and so on.
++ * For this reason the next function is invoked to compute
++ * soft_rt_next_start only for applications that meet this requirement,
++ * whereas soft_rt_next_start is set to infinity for applications that do
++ * not.
++ *
++ * Unfortunately, even a greedy application may happen to behave in an
++ * isochronous way if the CPU load is high. In fact, the application may
++ * stop issuing requests while the CPUs are busy serving other processes,
++ * then restart, then stop again for a while, and so on. In addition, if
++ * the disk achieves a low enough throughput with the request pattern
++ * issued by the application (e.g., because the request pattern is random
++ * and/or the device is slow), then the application may meet the above
++ * bandwidth requirement too. To prevent such a greedy application to be
++ * deemed as soft real-time, a further rule is used in the computation of
++ * soft_rt_next_start: soft_rt_next_start must be higher than the current
++ * time plus the maximum time for which the arrival of a request is waited
++ * for when a sync queue becomes idle, namely bfqd->bfq_slice_idle.
++ * This filters out greedy applications, as the latter issue instead their
++ * next request as soon as possible after the last one has been completed
++ * (in contrast, when a batch of requests is completed, a soft real-time
++ * application spends some time processing data).
++ *
++ * Unfortunately, the last filter may easily generate false positives if
++ * only bfqd->bfq_slice_idle is used as a reference time interval and one
++ * or both the following cases occur:
++ * 1) HZ is so low that the duration of a jiffy is comparable to or higher
++ *    than bfqd->bfq_slice_idle. This happens, e.g., on slow devices with
++ *    HZ=100.
++ * 2) jiffies, instead of increasing at a constant rate, may stop increasing
++ *    for a while, then suddenly 'jump' by several units to recover the lost
++ *    increments. This seems to happen, e.g., inside virtual machines.
++ * To address this issue, we do not use as a reference time interval just
++ * bfqd->bfq_slice_idle, but bfqd->bfq_slice_idle plus a few jiffies. In
++ * particular we add the minimum number of jiffies for which the filter
++ * seems to be quite precise also in embedded systems and KVM/QEMU virtual
++ * machines.
++ */
++static inline unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
++						       struct bfq_queue *bfqq)
++{
++	return max(bfqq->last_idle_bklogged +
++		   HZ * bfqq->service_from_backlogged /
++		   bfqd->bfq_wr_max_softrt_rate,
++		   jiffies + bfqq->bfqd->bfq_slice_idle + 4);
++}
++
++/*
++ * Return the largest-possible time instant such that, for as long as possible,
++ * the current time will be lower than this time instant according to the macro
++ * time_is_before_jiffies().
++ */
++static inline unsigned long bfq_infinity_from_now(unsigned long now)
++{
++	return now + ULONG_MAX / 2;
++}
++
++/**
++ * bfq_bfqq_expire - expire a queue.
++ * @bfqd: device owning the queue.
++ * @bfqq: the queue to expire.
++ * @compensate: if true, compensate for the time spent idling.
++ * @reason: the reason causing the expiration.
++ *
++ *
++ * If the process associated to the queue is slow (i.e., seeky), or in
++ * case of budget timeout, or, finally, if it is async, we
++ * artificially charge it an entire budget (independently of the
++ * actual service it received). As a consequence, the queue will get
++ * higher timestamps than the correct ones upon reactivation, and
++ * hence it will be rescheduled as if it had received more service
++ * than what it actually received. In the end, this class of processes
++ * will receive less service in proportion to how slowly they consume
++ * their budgets (and hence how seriously they tend to lower the
++ * throughput).
++ *
++ * In contrast, when a queue expires because it has been idling for
++ * too much or because it exhausted its budget, we do not touch the
++ * amount of service it has received. Hence when the queue will be
++ * reactivated and its timestamps updated, the latter will be in sync
++ * with the actual service received by the queue until expiration.
++ *
++ * Charging a full budget to the first type of queues and the exact
++ * service to the others has the effect of using the WF2Q+ policy to
++ * schedule the former on a timeslice basis, without violating the
++ * service domain guarantees of the latter.
++ */
++static void bfq_bfqq_expire(struct bfq_data *bfqd,
++			    struct bfq_queue *bfqq,
++			    int compensate,
++			    enum bfqq_expiration reason)
++{
++	int slow;
++	BUG_ON(bfqq != bfqd->in_service_queue);
++
++	/* Update disk peak rate for autotuning and check whether the
++	 * process is slow (see bfq_update_peak_rate).
++	 */
++	slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason);
++
++	/*
++	 * As above explained, 'punish' slow (i.e., seeky), timed-out
++	 * and async queues, to favor sequential sync workloads.
++	 *
++	 * Processes doing I/O in the slower disk zones will tend to be
++	 * slow(er) even if not seeky. Hence, since the estimated peak
++	 * rate is actually an average over the disk surface, these
++	 * processes may timeout just for bad luck. To avoid punishing
++	 * them we do not charge a full budget to a process that
++	 * succeeded in consuming at least 2/3 of its budget.
++	 */
++	if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++		     bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3))
++		bfq_bfqq_charge_full_budget(bfqq);
++
++	bfqq->service_from_backlogged += bfqq->entity.service;
++
++	if (BFQQ_SEEKY(bfqq) && reason == BFQ_BFQQ_BUDGET_TIMEOUT &&
++	    !bfq_bfqq_constantly_seeky(bfqq)) {
++		bfq_mark_bfqq_constantly_seeky(bfqq);
++		if (!blk_queue_nonrot(bfqd->queue))
++			bfqd->const_seeky_busy_in_flight_queues++;
++	}
++
++	if (reason == BFQ_BFQQ_TOO_IDLE &&
++	    bfqq->entity.service <= 2 * bfqq->entity.budget / 10 )
++		bfq_clear_bfqq_IO_bound(bfqq);
++
++	if (bfqd->low_latency && bfqq->wr_coeff == 1)
++		bfqq->last_wr_start_finish = jiffies;
++
++	if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 &&
++	    RB_EMPTY_ROOT(&bfqq->sort_list)) {
++		/*
++		 * If we get here, and there are no outstanding requests,
++		 * then the request pattern is isochronous (see the comments
++		 * to the function bfq_bfqq_softrt_next_start()). Hence we
++		 * can compute soft_rt_next_start. If, instead, the queue
++		 * still has outstanding requests, then we have to wait
++		 * for the completion of all the outstanding requests to
++		 * discover whether the request pattern is actually
++		 * isochronous.
++		 */
++		if (bfqq->dispatched == 0)
++			bfqq->soft_rt_next_start =
++				bfq_bfqq_softrt_next_start(bfqd, bfqq);
++		else {
++			/*
++			 * The application is still waiting for the
++			 * completion of one or more requests:
++			 * prevent it from possibly being incorrectly
++			 * deemed as soft real-time by setting its
++			 * soft_rt_next_start to infinity. In fact,
++			 * without this assignment, the application
++			 * would be incorrectly deemed as soft
++			 * real-time if:
++			 * 1) it issued a new request before the
++			 *    completion of all its in-flight
++			 *    requests, and
++			 * 2) at that time, its soft_rt_next_start
++			 *    happened to be in the past.
++			 */
++			bfqq->soft_rt_next_start =
++				bfq_infinity_from_now(jiffies);
++			/*
++			 * Schedule an update of soft_rt_next_start to when
++			 * the task may be discovered to be isochronous.
++			 */
++			bfq_mark_bfqq_softrt_update(bfqq);
++		}
++	}
++
++	bfq_log_bfqq(bfqd, bfqq,
++		"expire (%d, slow %d, num_disp %d, idle_win %d)", reason,
++		slow, bfqq->dispatched, bfq_bfqq_idle_window(bfqq));
++
++	/*
++	 * Increase, decrease or leave budget unchanged according to
++	 * reason.
++	 */
++	__bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
++	__bfq_bfqq_expire(bfqd, bfqq);
++}
++
++/*
++ * Budget timeout is not implemented through a dedicated timer, but
++ * just checked on request arrivals and completions, as well as on
++ * idle timer expirations.
++ */
++static int bfq_bfqq_budget_timeout(struct bfq_queue *bfqq)
++{
++	if (bfq_bfqq_budget_new(bfqq) ||
++	    time_before(jiffies, bfqq->budget_timeout))
++		return 0;
++	return 1;
++}
++
++/*
++ * If we expire a queue that is waiting for the arrival of a new
++ * request, we may prevent the fictitious timestamp back-shifting that
++ * allows the guarantees of the queue to be preserved (see [1] for
++ * this tricky aspect). Hence we return true only if this condition
++ * does not hold, or if the queue is slow enough to deserve only to be
++ * kicked off for preserving a high throughput.
++*/
++static inline int bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq)
++{
++	bfq_log_bfqq(bfqq->bfqd, bfqq,
++		"may_budget_timeout: wait_request %d left %d timeout %d",
++		bfq_bfqq_wait_request(bfqq),
++			bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3,
++		bfq_bfqq_budget_timeout(bfqq));
++
++	return (!bfq_bfqq_wait_request(bfqq) ||
++		bfq_bfqq_budget_left(bfqq) >=  bfqq->entity.budget / 3)
++		&&
++		bfq_bfqq_budget_timeout(bfqq);
++}
++
++/*
++ * Device idling is allowed only for the queues for which this function
++ * returns true. For this reason, the return value of this function plays a
++ * critical role for both throughput boosting and service guarantees. The
++ * return value is computed through a logical expression. In this rather
++ * long comment, we try to briefly describe all the details and motivations
++ * behind the components of this logical expression.
++ *
++ * First, the expression is false if bfqq is not sync, or if: bfqq happened
++ * to become active during a large burst of queue activations, and the
++ * pattern of requests bfqq contains boosts the throughput if bfqq is
++ * expired. In fact, queues that became active during a large burst benefit
++ * only from throughput, as discussed in the comments to bfq_handle_burst.
++ * In this respect, expiring bfqq certainly boosts the throughput on NCQ-
++ * capable flash-based devices, whereas, on rotational devices, it boosts
++ * the throughput only if bfqq contains random requests.
++ *
++ * On the opposite end, if (a) bfqq is sync, (b) the above burst-related
++ * condition does not hold, and (c) bfqq is being weight-raised, then the
++ * expression always evaluates to true, as device idling is instrumental
++ * for preserving low-latency guarantees (see [1]). If, instead, conditions
++ * (a) and (b) do hold, but (c) does not, then the expression evaluates to
++ * true only if: (1) bfqq is I/O-bound and has a non-null idle window, and
++ * (2) at least one of the following two conditions holds.
++ * The first condition is that the device is not performing NCQ, because
++ * idling the device most certainly boosts the throughput if this condition
++ * holds and bfqq is I/O-bound and has been granted a non-null idle window.
++ * The second compound condition is made of the logical AND of two components.
++ *
++ * The first component is true only if there is no weight-raised busy
++ * queue. This guarantees that the device is not idled for a sync non-
++ * weight-raised queue when there are busy weight-raised queues. The former
++ * is then expired immediately if empty. Combined with the timestamping
++ * rules of BFQ (see [1] for details), this causes sync non-weight-raised
++ * queues to get a lower number of requests served, and hence to ask for a
++ * lower number of requests from the request pool, before the busy weight-
++ * raised queues get served again.
++ *
++ * This is beneficial for the processes associated with weight-raised
++ * queues, when the request pool is saturated (e.g., in the presence of
++ * write hogs). In fact, if the processes associated with the other queues
++ * ask for requests at a lower rate, then weight-raised processes have a
++ * higher probability to get a request from the pool immediately (or at
++ * least soon) when they need one. Hence they have a higher probability to
++ * actually get a fraction of the disk throughput proportional to their
++ * high weight. This is especially true with NCQ-capable drives, which
++ * enqueue several requests in advance and further reorder internally-
++ * queued requests.
++ *
++ * In the end, mistreating non-weight-raised queues when there are busy
++ * weight-raised queues seems to mitigate starvation problems in the
++ * presence of heavy write workloads and NCQ, and hence to guarantee a
++ * higher application and system responsiveness in these hostile scenarios.
++ *
++ * If the first component of the compound condition is instead true, i.e.,
++ * there is no weight-raised busy queue, then the second component of the
++ * compound condition takes into account service-guarantee and throughput
++ * issues related to NCQ (recall that the compound condition is evaluated
++ * only if the device is detected as supporting NCQ).
++ *
++ * As for service guarantees, allowing the drive to enqueue more than one
++ * request at a time, and hence delegating de facto final scheduling
++ * decisions to the drive's internal scheduler, causes loss of control on
++ * the actual request service order. In this respect, when the drive is
++ * allowed to enqueue more than one request at a time, the service
++ * distribution enforced by the drive's internal scheduler is likely to
++ * coincide with the desired device-throughput distribution only in the
++ * following, perfectly symmetric, scenario:
++ * 1) all active queues have the same weight,
++ * 2) all active groups at the same level in the groups tree have the same
++ *    weight,
++ * 3) all active groups at the same level in the groups tree have the same
++ *    number of children.
++ *
++ * Even in such a scenario, sequential I/O may still receive a preferential
++ * treatment, but this is not likely to be a big issue with flash-based
++ * devices, because of their non-dramatic loss of throughput with random
++ * I/O. Things do differ with HDDs, for which additional care is taken, as
++ * explained after completing the discussion for flash-based devices.
++ *
++ * Unfortunately, keeping the necessary state for evaluating exactly the
++ * above symmetry conditions would be quite complex and time-consuming.
++ * Therefore BFQ evaluates instead the following stronger sub-conditions,
++ * for which it is much easier to maintain the needed state:
++ * 1) all active queues have the same weight,
++ * 2) all active groups have the same weight,
++ * 3) all active groups have at most one active child each.
++ * In particular, the last two conditions are always true if hierarchical
++ * support and the cgroups interface are not enabled, hence no state needs
++ * to be maintained in this case.
++ *
++ * According to the above considerations, the second component of the
++ * compound condition evaluates to true if any of the above symmetry
++ * sub-condition does not hold, or the device is not flash-based. Therefore,
++ * if also the first component is true, then idling is allowed for a sync
++ * queue. These are the only sub-conditions considered if the device is
++ * flash-based, as, for such a device, it is sensible to force idling only
++ * for service-guarantee issues. In fact, as for throughput, idling
++ * NCQ-capable flash-based devices would not boost the throughput even
++ * with sequential I/O; rather it would lower the throughput in proportion
++ * to how fast the device is. In the end, (only) if all the three
++ * sub-conditions hold and the device is flash-based, the compound
++ * condition evaluates to false and therefore no idling is performed.
++ *
++ * As already said, things change with a rotational device, where idling
++ * boosts the throughput with sequential I/O (even with NCQ). Hence, for
++ * such a device the second component of the compound condition evaluates
++ * to true also if the following additional sub-condition does not hold:
++ * the queue is constantly seeky. Unfortunately, this different behavior
++ * with respect to flash-based devices causes an additional asymmetry: if
++ * some sync queues enjoy idling and some other sync queues do not, then
++ * the latter get a low share of the device throughput, simply because the
++ * former get many requests served after being set as in service, whereas
++ * the latter do not. As a consequence, to guarantee the desired throughput
++ * distribution, on HDDs the compound expression evaluates to true (and
++ * hence device idling is performed) also if the following last symmetry
++ * condition does not hold: no other queue is benefiting from idling. Also
++ * this last condition is actually replaced with a simpler-to-maintain and
++ * stronger condition: there is no busy queue which is not constantly seeky
++ * (and hence may also benefit from idling).
++ *
++ * To sum up, when all the required symmetry and throughput-boosting
++ * sub-conditions hold, the second component of the compound condition
++ * evaluates to false, and hence no idling is performed. This helps to
++ * keep the drives' internal queues full on NCQ-capable devices, and hence
++ * to boost the throughput, without causing 'almost' any loss of service
++ * guarantees. The 'almost' follows from the fact that, if the internal
++ * queue of one such device is filled while all the sub-conditions hold,
++ * but at some point in time some sub-condition stops to hold, then it may
++ * become impossible to let requests be served in the new desired order
++ * until all the requests already queued in the device have been served.
++ */
++static inline bool bfq_bfqq_must_not_expire(struct bfq_queue *bfqq)
++{
++	struct bfq_data *bfqd = bfqq->bfqd;
++#define cond_for_seeky_on_ncq_hdd (bfq_bfqq_constantly_seeky(bfqq) && \
++				   bfqd->busy_in_flight_queues == \
++				   bfqd->const_seeky_busy_in_flight_queues)
++
++#define cond_for_expiring_in_burst	(bfq_bfqq_in_large_burst(bfqq) && \
++					 bfqd->hw_tag && \
++					 (blk_queue_nonrot(bfqd->queue) || \
++					  bfq_bfqq_constantly_seeky(bfqq)))
++
++/*
++ * Condition for expiring a non-weight-raised queue (and hence not idling
++ * the device).
++ */
++#define cond_for_expiring_non_wr  (bfqd->hw_tag && \
++				   (bfqd->wr_busy_queues > 0 || \
++				    (blk_queue_nonrot(bfqd->queue) || \
++				      cond_for_seeky_on_ncq_hdd)))
++
++	return bfq_bfqq_sync(bfqq) &&
++		!cond_for_expiring_in_burst &&
++		(bfqq->wr_coeff > 1 || !symmetric_scenario ||
++		 (bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_idle_window(bfqq) &&
++		  !cond_for_expiring_non_wr)
++	);
++}
++
++/*
++ * If the in-service queue is empty but sync, and the function
++ * bfq_bfqq_must_not_expire returns true, then:
++ * 1) the queue must remain in service and cannot be expired, and
++ * 2) the disk must be idled to wait for the possible arrival of a new
++ *    request for the queue.
++ * See the comments to the function bfq_bfqq_must_not_expire for the reasons
++ * why performing device idling is the best choice to boost the throughput
++ * and preserve service guarantees when bfq_bfqq_must_not_expire itself
++ * returns true.
++ */
++static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
++{
++	struct bfq_data *bfqd = bfqq->bfqd;
++
++	return RB_EMPTY_ROOT(&bfqq->sort_list) && bfqd->bfq_slice_idle != 0 &&
++	       bfq_bfqq_must_not_expire(bfqq);
++}
++
++/*
++ * Select a queue for service.  If we have a current queue in service,
++ * check whether to continue servicing it, or retrieve and set a new one.
++ */
++static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq, *new_bfqq = NULL;
++	struct request *next_rq;
++	enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++
++	bfqq = bfqd->in_service_queue;
++	if (bfqq == NULL)
++		goto new_queue;
++
++	bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
++
++	/*
++         * If another queue has a request waiting within our mean seek
++         * distance, let it run. The expire code will check for close
++         * cooperators and put the close queue at the front of the
++         * service tree. If possible, merge the expiring queue with the
++         * new bfqq.
++         */
++        new_bfqq = bfq_close_cooperator(bfqd, bfqq);
++        if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
++                bfq_setup_merge(bfqq, new_bfqq);
++
++	if (bfq_may_expire_for_budg_timeout(bfqq) &&
++	    !timer_pending(&bfqd->idle_slice_timer) &&
++	    !bfq_bfqq_must_idle(bfqq))
++		goto expire;
++
++	next_rq = bfqq->next_rq;
++	/*
++	 * If bfqq has requests queued and it has enough budget left to
++	 * serve them, keep the queue, otherwise expire it.
++	 */
++	if (next_rq != NULL) {
++		if (bfq_serv_to_charge(next_rq, bfqq) >
++			bfq_bfqq_budget_left(bfqq)) {
++			reason = BFQ_BFQQ_BUDGET_EXHAUSTED;
++			goto expire;
++		} else {
++			/*
++			 * The idle timer may be pending because we may
++			 * not disable disk idling even when a new request
++			 * arrives.
++			 */
++			if (timer_pending(&bfqd->idle_slice_timer)) {
++				/*
++				 * If we get here: 1) at least a new request
++				 * has arrived but we have not disabled the
++				 * timer because the request was too small,
++				 * 2) then the block layer has unplugged
++				 * the device, causing the dispatch to be
++				 * invoked.
++				 *
++				 * Since the device is unplugged, now the
++				 * requests are probably large enough to
++				 * provide a reasonable throughput.
++				 * So we disable idling.
++				 */
++				bfq_clear_bfqq_wait_request(bfqq);
++				del_timer(&bfqd->idle_slice_timer);
++			}
++			if (new_bfqq == NULL)
++				goto keep_queue;
++			else
++				goto expire;
++		}
++	}
++
++	/*
++	 * No requests pending. However, if the in-service queue is idling
++	 * for a new request, or has requests waiting for a completion and
++	 * may idle after their completion, then keep it anyway.
++	 */
++	if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
++	    (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) {
++		bfqq = NULL;
++		goto keep_queue;
++	} else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
++		/*
++		 * Expiring the queue because there is a close cooperator,
++		 * cancel timer.
++		 */
++		bfq_clear_bfqq_wait_request(bfqq);
++		del_timer(&bfqd->idle_slice_timer);
++	}
++
++	reason = BFQ_BFQQ_NO_MORE_REQUESTS;
++expire:
++	bfq_bfqq_expire(bfqd, bfqq, 0, reason);
++new_queue:
++	bfqq = bfq_set_in_service_queue(bfqd, new_bfqq);
++	bfq_log(bfqd, "select_queue: new queue %d returned",
++		bfqq != NULL ? bfqq->pid : 0);
++keep_queue:
++	return bfqq;
++}
++
++static void bfq_update_wr_data(struct bfq_data *bfqd,
++			       struct bfq_queue *bfqq)
++{
++	if (bfqq->wr_coeff > 1) { /* queue is being boosted */
++		struct bfq_entity *entity = &bfqq->entity;
++
++		bfq_log_bfqq(bfqd, bfqq,
++			"raising period dur %u/%u msec, old coeff %u, w %d(%d)",
++			jiffies_to_msecs(jiffies -
++				bfqq->last_wr_start_finish),
++			jiffies_to_msecs(bfqq->wr_cur_max_time),
++			bfqq->wr_coeff,
++			bfqq->entity.weight, bfqq->entity.orig_weight);
++
++		BUG_ON(bfqq != bfqd->in_service_queue && entity->weight !=
++		       entity->orig_weight * bfqq->wr_coeff);
++		if (entity->ioprio_changed)
++			bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++		/*
++		 * If the queue was activated in a burst, or
++		 * too much time has elapsed from the beginning
++		 * of this weight-raising, then end weight raising.
++		 */
++		if (bfq_bfqq_in_large_burst(bfqq) ||
++		    time_is_before_jiffies(bfqq->last_wr_start_finish +
++					   bfqq->wr_cur_max_time)) {
++			bfqq->last_wr_start_finish = jiffies;
++			bfq_log_bfqq(bfqd, bfqq,
++				     "wrais ending at %lu, rais_max_time %u",
++				     bfqq->last_wr_start_finish,
++				     jiffies_to_msecs(bfqq->wr_cur_max_time));
++			bfq_bfqq_end_wr(bfqq);
++			__bfq_entity_update_weight_prio(
++				bfq_entity_service_tree(entity),
++				entity);
++		}
++	}
++}
++
++/*
++ * Dispatch one request from bfqq, moving it to the request queue
++ * dispatch list.
++ */
++static int bfq_dispatch_request(struct bfq_data *bfqd,
++				struct bfq_queue *bfqq)
++{
++	int dispatched = 0;
++	struct request *rq;
++	unsigned long service_to_charge;
++
++	BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list));
++
++	/* Follow expired path, else get first next available. */
++	rq = bfq_check_fifo(bfqq);
++	if (rq == NULL)
++		rq = bfqq->next_rq;
++	service_to_charge = bfq_serv_to_charge(rq, bfqq);
++
++	if (service_to_charge > bfq_bfqq_budget_left(bfqq)) {
++		/*
++		 * This may happen if the next rq is chosen in fifo order
++		 * instead of sector order. The budget is properly
++		 * dimensioned to be always sufficient to serve the next
++		 * request only if it is chosen in sector order. The reason
++		 * is that it would be quite inefficient and little useful
++		 * to always make sure that the budget is large enough to
++		 * serve even the possible next rq in fifo order.
++		 * In fact, requests are seldom served in fifo order.
++		 *
++		 * Expire the queue for budget exhaustion, and make sure
++		 * that the next act_budget is enough to serve the next
++		 * request, even if it comes from the fifo expired path.
++		 */
++		bfqq->next_rq = rq;
++		/*
++		 * Since this dispatch is failed, make sure that
++		 * a new one will be performed
++		 */
++		if (!bfqd->rq_in_driver)
++			bfq_schedule_dispatch(bfqd);
++		goto expire;
++	}
++
++	/* Finally, insert request into driver dispatch list. */
++	bfq_bfqq_served(bfqq, service_to_charge);
++	bfq_dispatch_insert(bfqd->queue, rq);
++
++	bfq_update_wr_data(bfqd, bfqq);
++
++	bfq_log_bfqq(bfqd, bfqq,
++			"dispatched %u sec req (%llu), budg left %lu",
++			blk_rq_sectors(rq),
++			(long long unsigned)blk_rq_pos(rq),
++			bfq_bfqq_budget_left(bfqq));
++
++	dispatched++;
++
++	if (bfqd->in_service_bic == NULL) {
++		atomic_long_inc(&RQ_BIC(rq)->icq.ioc->refcount);
++		bfqd->in_service_bic = RQ_BIC(rq);
++	}
++
++	if (bfqd->busy_queues > 1 && ((!bfq_bfqq_sync(bfqq) &&
++	    dispatched >= bfqd->bfq_max_budget_async_rq) ||
++	    bfq_class_idle(bfqq)))
++		goto expire;
++
++	return dispatched;
++
++expire:
++	bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_EXHAUSTED);
++	return dispatched;
++}
++
++static int __bfq_forced_dispatch_bfqq(struct bfq_queue *bfqq)
++{
++	int dispatched = 0;
++
++	while (bfqq->next_rq != NULL) {
++		bfq_dispatch_insert(bfqq->bfqd->queue, bfqq->next_rq);
++		dispatched++;
++	}
++
++	BUG_ON(!list_empty(&bfqq->fifo));
++	return dispatched;
++}
++
++/*
++ * Drain our current requests.
++ * Used for barriers and when switching io schedulers on-the-fly.
++ */
++static int bfq_forced_dispatch(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq, *n;
++	struct bfq_service_tree *st;
++	int dispatched = 0;
++
++	bfqq = bfqd->in_service_queue;
++	if (bfqq != NULL)
++		__bfq_bfqq_expire(bfqd, bfqq);
++
++	/*
++	 * Loop through classes, and be careful to leave the scheduler
++	 * in a consistent state, as feedback mechanisms and vtime
++	 * updates cannot be disabled during the process.
++	 */
++	list_for_each_entry_safe(bfqq, n, &bfqd->active_list, bfqq_list) {
++		st = bfq_entity_service_tree(&bfqq->entity);
++
++		dispatched += __bfq_forced_dispatch_bfqq(bfqq);
++		bfqq->max_budget = bfq_max_budget(bfqd);
++
++		bfq_forget_idle(st);
++	}
++
++	BUG_ON(bfqd->busy_queues != 0);
++
++	return dispatched;
++}
++
++static int bfq_dispatch_requests(struct request_queue *q, int force)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_queue *bfqq;
++	int max_dispatch;
++
++	bfq_log(bfqd, "dispatch requests: %d busy queues", bfqd->busy_queues);
++	if (bfqd->busy_queues == 0)
++		return 0;
++
++	if (unlikely(force))
++		return bfq_forced_dispatch(bfqd);
++
++	bfqq = bfq_select_queue(bfqd);
++	if (bfqq == NULL)
++		return 0;
++
++	if (bfq_class_idle(bfqq))
++		max_dispatch = 1;
++
++	if (!bfq_bfqq_sync(bfqq))
++		max_dispatch = bfqd->bfq_max_budget_async_rq;
++
++	if (!bfq_bfqq_sync(bfqq) && bfqq->dispatched >= max_dispatch) {
++		if (bfqd->busy_queues > 1)
++			return 0;
++		if (bfqq->dispatched >= 4 * max_dispatch)
++			return 0;
++	}
++
++	if (bfqd->sync_flight != 0 && !bfq_bfqq_sync(bfqq))
++		return 0;
++
++	bfq_clear_bfqq_wait_request(bfqq);
++	BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++	if (!bfq_dispatch_request(bfqd, bfqq))
++		return 0;
++
++	bfq_log_bfqq(bfqd, bfqq, "dispatched %s request",
++			bfq_bfqq_sync(bfqq) ? "sync" : "async");
++
++	return 1;
++}
++
++/*
++ * Task holds one reference to the queue, dropped when task exits.  Each rq
++ * in-flight on this queue also holds a reference, dropped when rq is freed.
++ *
++ * Queue lock must be held here.
++ */
++static void bfq_put_queue(struct bfq_queue *bfqq)
++{
++	struct bfq_data *bfqd = bfqq->bfqd;
++
++	BUG_ON(atomic_read(&bfqq->ref) <= 0);
++
++	bfq_log_bfqq(bfqd, bfqq, "put_queue: %p %d", bfqq,
++		     atomic_read(&bfqq->ref));
++	if (!atomic_dec_and_test(&bfqq->ref))
++		return;
++
++	BUG_ON(rb_first(&bfqq->sort_list) != NULL);
++	BUG_ON(bfqq->allocated[READ] + bfqq->allocated[WRITE] != 0);
++	BUG_ON(bfqq->entity.tree != NULL);
++	BUG_ON(bfq_bfqq_busy(bfqq));
++	BUG_ON(bfqd->in_service_queue == bfqq);
++
++	if (bfq_bfqq_sync(bfqq))
++		/*
++		 * The fact that this queue is being destroyed does not
++		 * invalidate the fact that this queue may have been
++		 * activated during the current burst. As a consequence,
++		 * although the queue does not exist anymore, and hence
++		 * needs to be removed from the burst list if there,
++		 * the burst size has not to be decremented.
++		 */
++		hlist_del_init(&bfqq->burst_list_node);
++
++	bfq_log_bfqq(bfqd, bfqq, "put_queue: %p freed", bfqq);
++
++	kmem_cache_free(bfq_pool, bfqq);
++}
++
++static void bfq_put_cooperator(struct bfq_queue *bfqq)
++{
++	struct bfq_queue *__bfqq, *next;
++
++	/*
++	 * If this queue was scheduled to merge with another queue, be
++	 * sure to drop the reference taken on that queue (and others in
++	 * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs.
++	 */
++	__bfqq = bfqq->new_bfqq;
++	while (__bfqq) {
++		if (__bfqq == bfqq)
++			break;
++		next = __bfqq->new_bfqq;
++		bfq_put_queue(__bfqq);
++		__bfqq = next;
++	}
++}
++
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	if (bfqq == bfqd->in_service_queue) {
++		__bfq_bfqq_expire(bfqd, bfqq);
++		bfq_schedule_dispatch(bfqd);
++	}
++
++	bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
++		     atomic_read(&bfqq->ref));
++
++	bfq_put_cooperator(bfqq);
++
++	bfq_put_queue(bfqq);
++}
++
++static inline void bfq_init_icq(struct io_cq *icq)
++{
++	struct bfq_io_cq *bic = icq_to_bic(icq);
++
++	bic->ttime.last_end_request = jiffies;
++}
++
++static void bfq_exit_icq(struct io_cq *icq)
++{
++	struct bfq_io_cq *bic = icq_to_bic(icq);
++	struct bfq_data *bfqd = bic_to_bfqd(bic);
++
++	if (bic->bfqq[BLK_RW_ASYNC]) {
++		bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_ASYNC]);
++		bic->bfqq[BLK_RW_ASYNC] = NULL;
++	}
++
++	if (bic->bfqq[BLK_RW_SYNC]) {
++		bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
++		bic->bfqq[BLK_RW_SYNC] = NULL;
++	}
++}
++
++/*
++ * Update the entity prio values; note that the new values will not
++ * be used until the next (re)activation.
++ */
++static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++	struct task_struct *tsk = current;
++	int ioprio_class;
++
++	ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++	switch (ioprio_class) {
++	default:
++		dev_err(bfqq->bfqd->queue->backing_dev_info.dev,
++			"bfq: bad prio class %d\n", ioprio_class);
++	case IOPRIO_CLASS_NONE:
++		/*
++		 * No prio set, inherit CPU scheduling settings.
++		 */
++		bfqq->entity.new_ioprio = task_nice_ioprio(tsk);
++		bfqq->entity.new_ioprio_class = task_nice_ioclass(tsk);
++		break;
++	case IOPRIO_CLASS_RT:
++		bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++		bfqq->entity.new_ioprio_class = IOPRIO_CLASS_RT;
++		break;
++	case IOPRIO_CLASS_BE:
++		bfqq->entity.new_ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++		bfqq->entity.new_ioprio_class = IOPRIO_CLASS_BE;
++		break;
++	case IOPRIO_CLASS_IDLE:
++		bfqq->entity.new_ioprio_class = IOPRIO_CLASS_IDLE;
++		bfqq->entity.new_ioprio = 7;
++		bfq_clear_bfqq_idle_window(bfqq);
++		break;
++	}
++
++	if (bfqq->entity.new_ioprio < 0 ||
++	    bfqq->entity.new_ioprio >= IOPRIO_BE_NR) {
++		printk(KERN_CRIT "bfq_set_next_ioprio_data: new_ioprio %d\n",
++				 bfqq->entity.new_ioprio);
++		BUG();
++	}
++
++	bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->entity.new_ioprio);
++	bfqq->entity.ioprio_changed = 1;
++}
++
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic)
++{
++	struct bfq_data *bfqd;
++	struct bfq_queue *bfqq, *new_bfqq;
++	struct bfq_group *bfqg;
++	unsigned long uninitialized_var(flags);
++	int ioprio = bic->icq.ioc->ioprio;
++
++	bfqd = bfq_get_bfqd_locked(&(bic->icq.q->elevator->elevator_data),
++				   &flags);
++	/*
++	 * This condition may trigger on a newly created bic, be sure to
++	 * drop the lock before returning.
++	 */
++	if (unlikely(bfqd == NULL) || likely(bic->ioprio == ioprio))
++		goto out;
++
++	bic->ioprio = ioprio;
++
++	bfqq = bic->bfqq[BLK_RW_ASYNC];
++	if (bfqq != NULL) {
++		bfqg = container_of(bfqq->entity.sched_data, struct bfq_group,
++				    sched_data);
++		new_bfqq = bfq_get_queue(bfqd, bfqg, BLK_RW_ASYNC, bic,
++					 GFP_ATOMIC);
++		if (new_bfqq != NULL) {
++			bic->bfqq[BLK_RW_ASYNC] = new_bfqq;
++			bfq_log_bfqq(bfqd, bfqq,
++				     "check_ioprio_change: bfqq %p %d",
++				     bfqq, atomic_read(&bfqq->ref));
++			bfq_put_queue(bfqq);
++		}
++	}
++
++	bfqq = bic->bfqq[BLK_RW_SYNC];
++	if (bfqq != NULL)
++		bfq_set_next_ioprio_data(bfqq, bic);
++
++out:
++	bfq_put_bfqd_unlock(bfqd, &flags);
++}
++
++static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++			  struct bfq_io_cq *bic, pid_t pid, int is_sync)
++{
++	RB_CLEAR_NODE(&bfqq->entity.rb_node);
++	INIT_LIST_HEAD(&bfqq->fifo);
++	INIT_HLIST_NODE(&bfqq->burst_list_node);
++
++	atomic_set(&bfqq->ref, 0);
++	bfqq->bfqd = bfqd;
++
++	if (bic)
++		bfq_set_next_ioprio_data(bfqq, bic);
++
++	if (is_sync) {
++		if (!bfq_class_idle(bfqq))
++			bfq_mark_bfqq_idle_window(bfqq);
++		bfq_mark_bfqq_sync(bfqq);
++	}
++	bfq_mark_bfqq_IO_bound(bfqq);
++
++	/* Tentative initial value to trade off between thr and lat */
++	bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3;
++	bfqq->pid = pid;
++
++	bfqq->wr_coeff = 1;
++	bfqq->last_wr_start_finish = 0;
++	/*
++	 * Set to the value for which bfqq will not be deemed as
++	 * soft rt when it becomes backlogged.
++	 */
++	bfqq->soft_rt_next_start = bfq_infinity_from_now(jiffies);
++}
++
++static struct bfq_queue *bfq_find_alloc_queue(struct bfq_data *bfqd,
++					      struct bfq_group *bfqg,
++					      int is_sync,
++					      struct bfq_io_cq *bic,
++					      gfp_t gfp_mask)
++{
++	struct bfq_queue *bfqq, *new_bfqq = NULL;
++
++retry:
++	/* bic always exists here */
++	bfqq = bic_to_bfqq(bic, is_sync);
++
++	/*
++	 * Always try a new alloc if we fall back to the OOM bfqq
++	 * originally, since it should just be a temporary situation.
++	 */
++	if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++		bfqq = NULL;
++		if (new_bfqq != NULL) {
++			bfqq = new_bfqq;
++			new_bfqq = NULL;
++		} else if (gfp_mask & __GFP_WAIT) {
++			spin_unlock_irq(bfqd->queue->queue_lock);
++			new_bfqq = kmem_cache_alloc_node(bfq_pool,
++					gfp_mask | __GFP_ZERO,
++					bfqd->queue->node);
++			spin_lock_irq(bfqd->queue->queue_lock);
++			if (new_bfqq != NULL)
++				goto retry;
++		} else {
++			bfqq = kmem_cache_alloc_node(bfq_pool,
++					gfp_mask | __GFP_ZERO,
++					bfqd->queue->node);
++		}
++
++		if (bfqq != NULL) {
++			bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
++                                      is_sync);
++			bfq_init_entity(&bfqq->entity, bfqg);
++			bfq_log_bfqq(bfqd, bfqq, "allocated");
++		} else {
++			bfqq = &bfqd->oom_bfqq;
++			bfq_log_bfqq(bfqd, bfqq, "using oom bfqq");
++		}
++	}
++
++	if (new_bfqq != NULL)
++		kmem_cache_free(bfq_pool, new_bfqq);
++
++	return bfqq;
++}
++
++static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd,
++					       struct bfq_group *bfqg,
++					       int ioprio_class, int ioprio)
++{
++	switch (ioprio_class) {
++	case IOPRIO_CLASS_RT:
++		return &bfqg->async_bfqq[0][ioprio];
++	case IOPRIO_CLASS_NONE:
++		ioprio = IOPRIO_NORM;
++		/* fall through */
++	case IOPRIO_CLASS_BE:
++		return &bfqg->async_bfqq[1][ioprio];
++	case IOPRIO_CLASS_IDLE:
++		return &bfqg->async_idle_bfqq;
++	default:
++		BUG();
++	}
++}
++
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++				       struct bfq_group *bfqg, int is_sync,
++				       struct bfq_io_cq *bic, gfp_t gfp_mask)
++{
++	const int ioprio = IOPRIO_PRIO_DATA(bic->ioprio);
++	const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio);
++	struct bfq_queue **async_bfqq = NULL;
++	struct bfq_queue *bfqq = NULL;
++
++	if (!is_sync) {
++		async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
++						  ioprio);
++		bfqq = *async_bfqq;
++	}
++
++	if (bfqq == NULL)
++		bfqq = bfq_find_alloc_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++
++	/*
++	 * Pin the queue now that it's allocated, scheduler exit will
++	 * prune it.
++	 */
++	if (!is_sync && *async_bfqq == NULL) {
++		atomic_inc(&bfqq->ref);
++		bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d",
++			     bfqq, atomic_read(&bfqq->ref));
++		*async_bfqq = bfqq;
++	}
++
++	atomic_inc(&bfqq->ref);
++	bfq_log_bfqq(bfqd, bfqq, "get_queue, at end: %p, %d", bfqq,
++		     atomic_read(&bfqq->ref));
++	return bfqq;
++}
++
++static void bfq_update_io_thinktime(struct bfq_data *bfqd,
++				    struct bfq_io_cq *bic)
++{
++	unsigned long elapsed = jiffies - bic->ttime.last_end_request;
++	unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle);
++
++	bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8;
++	bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8;
++	bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) /
++				bic->ttime.ttime_samples;
++}
++
++static void bfq_update_io_seektime(struct bfq_data *bfqd,
++				   struct bfq_queue *bfqq,
++				   struct request *rq)
++{
++	sector_t sdist;
++	u64 total;
++
++	if (bfqq->last_request_pos < blk_rq_pos(rq))
++		sdist = blk_rq_pos(rq) - bfqq->last_request_pos;
++	else
++		sdist = bfqq->last_request_pos - blk_rq_pos(rq);
++
++	/*
++	 * Don't allow the seek distance to get too large from the
++	 * odd fragment, pagein, etc.
++	 */
++	if (bfqq->seek_samples == 0) /* first request, not really a seek */
++		sdist = 0;
++	else if (bfqq->seek_samples <= 60) /* second & third seek */
++		sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024);
++	else
++		sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64);
++
++	bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8;
++	bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8;
++	total = bfqq->seek_total + (bfqq->seek_samples/2);
++	do_div(total, bfqq->seek_samples);
++	bfqq->seek_mean = (sector_t)total;
++
++	bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist,
++			(u64)bfqq->seek_mean);
++}
++
++/*
++ * Disable idle window if the process thinks too long or seeks so much that
++ * it doesn't matter.
++ */
++static void bfq_update_idle_window(struct bfq_data *bfqd,
++				   struct bfq_queue *bfqq,
++				   struct bfq_io_cq *bic)
++{
++	int enable_idle;
++
++	/* Don't idle for async or idle io prio class. */
++	if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
++		return;
++
++	enable_idle = bfq_bfqq_idle_window(bfqq);
++
++	if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
++	    bfqd->bfq_slice_idle == 0 ||
++		(bfqd->hw_tag && BFQQ_SEEKY(bfqq) &&
++			bfqq->wr_coeff == 1))
++		enable_idle = 0;
++	else if (bfq_sample_valid(bic->ttime.ttime_samples)) {
++		if (bic->ttime.ttime_mean > bfqd->bfq_slice_idle &&
++			bfqq->wr_coeff == 1)
++			enable_idle = 0;
++		else
++			enable_idle = 1;
++	}
++	bfq_log_bfqq(bfqd, bfqq, "update_idle_window: enable_idle %d",
++		enable_idle);
++
++	if (enable_idle)
++		bfq_mark_bfqq_idle_window(bfqq);
++	else
++		bfq_clear_bfqq_idle_window(bfqq);
++}
++
++/*
++ * Called when a new fs request (rq) is added to bfqq.  Check if there's
++ * something we should do about it.
++ */
++static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++			    struct request *rq)
++{
++	struct bfq_io_cq *bic = RQ_BIC(rq);
++
++	if (rq->cmd_flags & REQ_META)
++		bfqq->meta_pending++;
++
++	bfq_update_io_thinktime(bfqd, bic);
++	bfq_update_io_seektime(bfqd, bfqq, rq);
++	if (!BFQQ_SEEKY(bfqq) && bfq_bfqq_constantly_seeky(bfqq)) {
++		bfq_clear_bfqq_constantly_seeky(bfqq);
++		if (!blk_queue_nonrot(bfqd->queue)) {
++			BUG_ON(!bfqd->const_seeky_busy_in_flight_queues);
++			bfqd->const_seeky_busy_in_flight_queues--;
++		}
++	}
++	if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
++	    !BFQQ_SEEKY(bfqq))
++		bfq_update_idle_window(bfqd, bfqq, bic);
++
++	bfq_log_bfqq(bfqd, bfqq,
++		     "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
++		     bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
++		     (long long unsigned)bfqq->seek_mean);
++
++	bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
++
++	if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) {
++		int small_req = bfqq->queued[rq_is_sync(rq)] == 1 &&
++				blk_rq_sectors(rq) < 32;
++		int budget_timeout = bfq_bfqq_budget_timeout(bfqq);
++
++		/*
++		 * There is just this request queued: if the request
++		 * is small and the queue is not to be expired, then
++		 * just exit.
++		 *
++		 * In this way, if the disk is being idled to wait for
++		 * a new request from the in-service queue, we avoid
++		 * unplugging the device and committing the disk to serve
++		 * just a small request. On the contrary, we wait for
++		 * the block layer to decide when to unplug the device:
++		 * hopefully, new requests will be merged to this one
++		 * quickly, then the device will be unplugged and
++		 * larger requests will be dispatched.
++		 */
++		if (small_req && !budget_timeout)
++			return;
++
++		/*
++		 * A large enough request arrived, or the queue is to
++		 * be expired: in both cases disk idling is to be
++		 * stopped, so clear wait_request flag and reset
++		 * timer.
++		 */
++		bfq_clear_bfqq_wait_request(bfqq);
++		del_timer(&bfqd->idle_slice_timer);
++
++		/*
++		 * The queue is not empty, because a new request just
++		 * arrived. Hence we can safely expire the queue, in
++		 * case of budget timeout, without risking that the
++		 * timestamps of the queue are not updated correctly.
++		 * See [1] for more details.
++		 */
++		if (budget_timeout)
++			bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++
++		/*
++		 * Let the request rip immediately, or let a new queue be
++		 * selected if bfqq has just been expired.
++		 */
++		__blk_run_queue(bfqd->queue);
++	}
++}
++
++static void bfq_insert_request(struct request_queue *q, struct request *rq)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++	assert_spin_locked(bfqd->queue->queue_lock);
++
++	bfq_add_request(rq);
++
++	rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
++	list_add_tail(&rq->queuelist, &bfqq->fifo);
++
++	bfq_rq_enqueued(bfqd, bfqq, rq);
++}
++
++static void bfq_update_hw_tag(struct bfq_data *bfqd)
++{
++	bfqd->max_rq_in_driver = max(bfqd->max_rq_in_driver,
++				     bfqd->rq_in_driver);
++
++	if (bfqd->hw_tag == 1)
++		return;
++
++	/*
++	 * This sample is valid if the number of outstanding requests
++	 * is large enough to allow a queueing behavior.  Note that the
++	 * sum is not exact, as it's not taking into account deactivated
++	 * requests.
++	 */
++	if (bfqd->rq_in_driver + bfqd->queued < BFQ_HW_QUEUE_THRESHOLD)
++		return;
++
++	if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES)
++		return;
++
++	bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD;
++	bfqd->max_rq_in_driver = 0;
++	bfqd->hw_tag_samples = 0;
++}
++
++static void bfq_completed_request(struct request_queue *q, struct request *rq)
++{
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++	struct bfq_data *bfqd = bfqq->bfqd;
++	bool sync = bfq_bfqq_sync(bfqq);
++
++	bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left (%d)",
++		     blk_rq_sectors(rq), sync);
++
++	bfq_update_hw_tag(bfqd);
++
++	BUG_ON(!bfqd->rq_in_driver);
++	BUG_ON(!bfqq->dispatched);
++	bfqd->rq_in_driver--;
++	bfqq->dispatched--;
++
++	if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) {
++		bfq_weights_tree_remove(bfqd, &bfqq->entity,
++					&bfqd->queue_weights_tree);
++		if (!blk_queue_nonrot(bfqd->queue)) {
++			BUG_ON(!bfqd->busy_in_flight_queues);
++			bfqd->busy_in_flight_queues--;
++			if (bfq_bfqq_constantly_seeky(bfqq)) {
++				BUG_ON(!bfqd->
++					const_seeky_busy_in_flight_queues);
++				bfqd->const_seeky_busy_in_flight_queues--;
++			}
++		}
++	}
++
++	if (sync) {
++		bfqd->sync_flight--;
++		RQ_BIC(rq)->ttime.last_end_request = jiffies;
++	}
++
++	/*
++	 * If we are waiting to discover whether the request pattern of the
++	 * task associated with the queue is actually isochronous, and
++	 * both requisites for this condition to hold are satisfied, then
++	 * compute soft_rt_next_start (see the comments to the function
++	 * bfq_bfqq_softrt_next_start()).
++	 */
++	if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 &&
++	    RB_EMPTY_ROOT(&bfqq->sort_list))
++		bfqq->soft_rt_next_start =
++			bfq_bfqq_softrt_next_start(bfqd, bfqq);
++
++	/*
++	 * If this is the in-service queue, check if it needs to be expired,
++	 * or if we want to idle in case it has no pending requests.
++	 */
++	if (bfqd->in_service_queue == bfqq) {
++		if (bfq_bfqq_budget_new(bfqq))
++			bfq_set_budget_timeout(bfqd);
++
++		if (bfq_bfqq_must_idle(bfqq)) {
++			bfq_arm_slice_timer(bfqd);
++			goto out;
++		} else if (bfq_may_expire_for_budg_timeout(bfqq))
++			bfq_bfqq_expire(bfqd, bfqq, 0, BFQ_BFQQ_BUDGET_TIMEOUT);
++		else if (RB_EMPTY_ROOT(&bfqq->sort_list) &&
++			 (bfqq->dispatched == 0 ||
++			  !bfq_bfqq_must_not_expire(bfqq)))
++			bfq_bfqq_expire(bfqd, bfqq, 0,
++					BFQ_BFQQ_NO_MORE_REQUESTS);
++	}
++
++	if (!bfqd->rq_in_driver)
++		bfq_schedule_dispatch(bfqd);
++
++out:
++	return;
++}
++
++static inline int __bfq_may_queue(struct bfq_queue *bfqq)
++{
++	if (bfq_bfqq_wait_request(bfqq) && bfq_bfqq_must_alloc(bfqq)) {
++		bfq_clear_bfqq_must_alloc(bfqq);
++		return ELV_MQUEUE_MUST;
++	}
++
++	return ELV_MQUEUE_MAY;
++}
++
++static int bfq_may_queue(struct request_queue *q, int rw)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct task_struct *tsk = current;
++	struct bfq_io_cq *bic;
++	struct bfq_queue *bfqq;
++
++	/*
++	 * Don't force setup of a queue from here, as a call to may_queue
++	 * does not necessarily imply that a request actually will be
++	 * queued. So just lookup a possibly existing queue, or return
++	 * 'may queue' if that fails.
++	 */
++	bic = bfq_bic_lookup(bfqd, tsk->io_context);
++	if (bic == NULL)
++		return ELV_MQUEUE_MAY;
++
++	bfqq = bic_to_bfqq(bic, rw_is_sync(rw));
++	if (bfqq != NULL)
++		return __bfq_may_queue(bfqq);
++
++	return ELV_MQUEUE_MAY;
++}
++
++/*
++ * Queue lock held here.
++ */
++static void bfq_put_request(struct request *rq)
++{
++	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++
++	if (bfqq != NULL) {
++		const int rw = rq_data_dir(rq);
++
++		BUG_ON(!bfqq->allocated[rw]);
++		bfqq->allocated[rw]--;
++
++		rq->elv.priv[0] = NULL;
++		rq->elv.priv[1] = NULL;
++
++		bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d",
++			     bfqq, atomic_read(&bfqq->ref));
++		bfq_put_queue(bfqq);
++	}
++}
++
++static struct bfq_queue *
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++		struct bfq_queue *bfqq)
++{
++	bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++		(long unsigned)bfqq->new_bfqq->pid);
++	bic_set_bfqq(bic, bfqq->new_bfqq, 1);
++	bfq_mark_bfqq_coop(bfqq->new_bfqq);
++	bfq_put_queue(bfqq);
++	return bic_to_bfqq(bic, 1);
++}
++
++/*
++ * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
++ * was the last process referring to said bfqq.
++ */
++static struct bfq_queue *
++bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
++{
++	bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++	if (bfqq_process_refs(bfqq) == 1) {
++		bfqq->pid = current->pid;
++		bfq_clear_bfqq_coop(bfqq);
++		bfq_clear_bfqq_split_coop(bfqq);
++		return bfqq;
++	}
++
++	bic_set_bfqq(bic, NULL, 1);
++
++	bfq_put_cooperator(bfqq);
++
++	bfq_put_queue(bfqq);
++	return NULL;
++}
++
++/*
++ * Allocate bfq data structures associated with this request.
++ */
++static int bfq_set_request(struct request_queue *q, struct request *rq,
++			   struct bio *bio, gfp_t gfp_mask)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_io_cq *bic = icq_to_bic(rq->elv.icq);
++	const int rw = rq_data_dir(rq);
++	const int is_sync = rq_is_sync(rq);
++	struct bfq_queue *bfqq;
++	struct bfq_group *bfqg;
++	unsigned long flags;
++
++	might_sleep_if(gfp_mask & __GFP_WAIT);
++
++	bfq_check_ioprio_change(bic);
++
++	spin_lock_irqsave(q->queue_lock, flags);
++
++	if (bic == NULL)
++		goto queue_fail;
++
++	bfqg = bfq_bic_update_cgroup(bic);
++
++new_queue:
++	bfqq = bic_to_bfqq(bic, is_sync);
++	if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
++		bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
++		bic_set_bfqq(bic, bfqq, is_sync);
++	} else {
++		/*
++		 * If the queue was seeky for too long, break it apart.
++		 */
++		if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
++			bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
++			bfqq = bfq_split_bfqq(bic, bfqq);
++			if (!bfqq)
++				goto new_queue;
++		}
++
++		/*
++		 * Check to see if this queue is scheduled to merge with
++		 * another closely cooperating queue. The merging of queues
++		 * happens here as it must be done in process context.
++		 * The reference on new_bfqq was taken in merge_bfqqs.
++		 */
++		if (bfqq->new_bfqq != NULL)
++			bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq);
++	}
++
++	bfqq->allocated[rw]++;
++	atomic_inc(&bfqq->ref);
++	bfq_log_bfqq(bfqd, bfqq, "set_request: bfqq %p, %d", bfqq,
++		     atomic_read(&bfqq->ref));
++
++	rq->elv.priv[0] = bic;
++	rq->elv.priv[1] = bfqq;
++
++	spin_unlock_irqrestore(q->queue_lock, flags);
++
++	return 0;
++
++queue_fail:
++	bfq_schedule_dispatch(bfqd);
++	spin_unlock_irqrestore(q->queue_lock, flags);
++
++	return 1;
++}
++
++static void bfq_kick_queue(struct work_struct *work)
++{
++	struct bfq_data *bfqd =
++		container_of(work, struct bfq_data, unplug_work);
++	struct request_queue *q = bfqd->queue;
++
++	spin_lock_irq(q->queue_lock);
++	__blk_run_queue(q);
++	spin_unlock_irq(q->queue_lock);
++}
++
++/*
++ * Handler of the expiration of the timer running if the in-service queue
++ * is idling inside its time slice.
++ */
++static void bfq_idle_slice_timer(unsigned long data)
++{
++	struct bfq_data *bfqd = (struct bfq_data *)data;
++	struct bfq_queue *bfqq;
++	unsigned long flags;
++	enum bfqq_expiration reason;
++
++	spin_lock_irqsave(bfqd->queue->queue_lock, flags);
++
++	bfqq = bfqd->in_service_queue;
++	/*
++	 * Theoretical race here: the in-service queue can be NULL or
++	 * different from the queue that was idling if the timer handler
++	 * spins on the queue_lock and a new request arrives for the
++	 * current queue and there is a full dispatch cycle that changes
++	 * the in-service queue.  This can hardly happen, but in the worst
++	 * case we just expire a queue too early.
++	 */
++	if (bfqq != NULL) {
++		bfq_log_bfqq(bfqd, bfqq, "slice_timer expired");
++		if (bfq_bfqq_budget_timeout(bfqq))
++			/*
++			 * Also here the queue can be safely expired
++			 * for budget timeout without wasting
++			 * guarantees
++			 */
++			reason = BFQ_BFQQ_BUDGET_TIMEOUT;
++		else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0)
++			/*
++			 * The queue may not be empty upon timer expiration,
++			 * because we may not disable the timer when the
++			 * first request of the in-service queue arrives
++			 * during disk idling.
++			 */
++			reason = BFQ_BFQQ_TOO_IDLE;
++		else
++			goto schedule_dispatch;
++
++		bfq_bfqq_expire(bfqd, bfqq, 1, reason);
++	}
++
++schedule_dispatch:
++	bfq_schedule_dispatch(bfqd);
++
++	spin_unlock_irqrestore(bfqd->queue->queue_lock, flags);
++}
++
++static void bfq_shutdown_timer_wq(struct bfq_data *bfqd)
++{
++	del_timer_sync(&bfqd->idle_slice_timer);
++	cancel_work_sync(&bfqd->unplug_work);
++}
++
++static inline void __bfq_put_async_bfqq(struct bfq_data *bfqd,
++					struct bfq_queue **bfqq_ptr)
++{
++	struct bfq_group *root_group = bfqd->root_group;
++	struct bfq_queue *bfqq = *bfqq_ptr;
++
++	bfq_log(bfqd, "put_async_bfqq: %p", bfqq);
++	if (bfqq != NULL) {
++		bfq_bfqq_move(bfqd, bfqq, &bfqq->entity, root_group);
++		bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d",
++			     bfqq, atomic_read(&bfqq->ref));
++		bfq_put_queue(bfqq);
++		*bfqq_ptr = NULL;
++	}
++}
++
++/*
++ * Release all the bfqg references to its async queues.  If we are
++ * deallocating the group these queues may still contain requests, so
++ * we reparent them to the root cgroup (i.e., the only one that will
++ * exist for sure until all the requests on a device are gone).
++ */
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
++{
++	int i, j;
++
++	for (i = 0; i < 2; i++)
++		for (j = 0; j < IOPRIO_BE_NR; j++)
++			__bfq_put_async_bfqq(bfqd, &bfqg->async_bfqq[i][j]);
++
++	__bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq);
++}
++
++static void bfq_exit_queue(struct elevator_queue *e)
++{
++	struct bfq_data *bfqd = e->elevator_data;
++	struct request_queue *q = bfqd->queue;
++	struct bfq_queue *bfqq, *n;
++
++	bfq_shutdown_timer_wq(bfqd);
++
++	spin_lock_irq(q->queue_lock);
++
++	BUG_ON(bfqd->in_service_queue != NULL);
++	list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
++		bfq_deactivate_bfqq(bfqd, bfqq, 0);
++
++	bfq_disconnect_groups(bfqd);
++	spin_unlock_irq(q->queue_lock);
++
++	bfq_shutdown_timer_wq(bfqd);
++
++	synchronize_rcu();
++
++	BUG_ON(timer_pending(&bfqd->idle_slice_timer));
++
++	bfq_free_root_group(bfqd);
++	kfree(bfqd);
++}
++
++static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++{
++	struct bfq_group *bfqg;
++	struct bfq_data *bfqd;
++	struct elevator_queue *eq;
++
++	eq = elevator_alloc(q, e);
++	if (eq == NULL)
++		return -ENOMEM;
++
++	bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
++	if (bfqd == NULL) {
++		kobject_put(&eq->kobj);
++		return -ENOMEM;
++	}
++	eq->elevator_data = bfqd;
++
++	/*
++	 * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues.
++	 * Grab a permanent reference to it, so that the normal code flow
++	 * will not attempt to free it.
++	 */
++	bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0);
++	atomic_inc(&bfqd->oom_bfqq.ref);
++	bfqd->oom_bfqq.entity.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO;
++	bfqd->oom_bfqq.entity.new_ioprio_class = IOPRIO_CLASS_BE;
++	bfqd->oom_bfqq.entity.new_weight =
++		bfq_ioprio_to_weight(bfqd->oom_bfqq.entity.new_ioprio);
++	/*
++	 * Trigger weight initialization, according to ioprio, at the
++	 * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio
++	 * class won't be changed any more.
++	 */
++	bfqd->oom_bfqq.entity.ioprio_changed = 1;
++
++	bfqd->queue = q;
++
++	spin_lock_irq(q->queue_lock);
++	q->elevator = eq;
++	spin_unlock_irq(q->queue_lock);
++
++	bfqg = bfq_alloc_root_group(bfqd, q->node);
++	if (bfqg == NULL) {
++		kfree(bfqd);
++		kobject_put(&eq->kobj);
++		return -ENOMEM;
++	}
++
++	bfqd->root_group = bfqg;
++	bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group);
++#ifdef CONFIG_CGROUP_BFQIO
++	bfqd->active_numerous_groups = 0;
++#endif
++
++	init_timer(&bfqd->idle_slice_timer);
++	bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
++	bfqd->idle_slice_timer.data = (unsigned long)bfqd;
++
++	bfqd->rq_pos_tree = RB_ROOT;
++	bfqd->queue_weights_tree = RB_ROOT;
++	bfqd->group_weights_tree = RB_ROOT;
++
++	INIT_WORK(&bfqd->unplug_work, bfq_kick_queue);
++
++	INIT_LIST_HEAD(&bfqd->active_list);
++	INIT_LIST_HEAD(&bfqd->idle_list);
++	INIT_HLIST_HEAD(&bfqd->burst_list);
++
++	bfqd->hw_tag = -1;
++
++	bfqd->bfq_max_budget = bfq_default_max_budget;
++
++	bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0];
++	bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1];
++	bfqd->bfq_back_max = bfq_back_max;
++	bfqd->bfq_back_penalty = bfq_back_penalty;
++	bfqd->bfq_slice_idle = bfq_slice_idle;
++	bfqd->bfq_class_idle_last_service = 0;
++	bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq;
++	bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
++	bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
++
++	bfqd->bfq_coop_thresh = 2;
++	bfqd->bfq_failed_cooperations = 7000;
++	bfqd->bfq_requests_within_timer = 120;
++
++	bfqd->bfq_large_burst_thresh = 11;
++	bfqd->bfq_burst_interval = msecs_to_jiffies(500);
++
++	bfqd->low_latency = true;
++
++	bfqd->bfq_wr_coeff = 20;
++	bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300);
++	bfqd->bfq_wr_max_time = 0;
++	bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000);
++	bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(500);
++	bfqd->bfq_wr_max_softrt_rate = 7000; /*
++					      * Approximate rate required
++					      * to playback or record a
++					      * high-definition compressed
++					      * video.
++					      */
++	bfqd->wr_busy_queues = 0;
++	bfqd->busy_in_flight_queues = 0;
++	bfqd->const_seeky_busy_in_flight_queues = 0;
++
++	/*
++	 * Begin by assuming, optimistically, that the device peak rate is
++	 * equal to the highest reference rate.
++	 */
++	bfqd->RT_prod = R_fast[blk_queue_nonrot(bfqd->queue)] *
++			T_fast[blk_queue_nonrot(bfqd->queue)];
++	bfqd->peak_rate = R_fast[blk_queue_nonrot(bfqd->queue)];
++	bfqd->device_speed = BFQ_BFQD_FAST;
++
++	return 0;
++}
++
++static void bfq_slab_kill(void)
++{
++	if (bfq_pool != NULL)
++		kmem_cache_destroy(bfq_pool);
++}
++
++static int __init bfq_slab_setup(void)
++{
++	bfq_pool = KMEM_CACHE(bfq_queue, 0);
++	if (bfq_pool == NULL)
++		return -ENOMEM;
++	return 0;
++}
++
++static ssize_t bfq_var_show(unsigned int var, char *page)
++{
++	return sprintf(page, "%d\n", var);
++}
++
++static ssize_t bfq_var_store(unsigned long *var, const char *page,
++			     size_t count)
++{
++	unsigned long new_val;
++	int ret = kstrtoul(page, 10, &new_val);
++
++	if (ret == 0)
++		*var = new_val;
++
++	return count;
++}
++
++static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
++{
++	struct bfq_data *bfqd = e->elevator_data;
++	return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
++		       jiffies_to_msecs(bfqd->bfq_wr_max_time) :
++		       jiffies_to_msecs(bfq_wr_duration(bfqd)));
++}
++
++static ssize_t bfq_weights_show(struct elevator_queue *e, char *page)
++{
++	struct bfq_queue *bfqq;
++	struct bfq_data *bfqd = e->elevator_data;
++	ssize_t num_char = 0;
++
++	num_char += sprintf(page + num_char, "Tot reqs queued %d\n\n",
++			    bfqd->queued);
++
++	spin_lock_irq(bfqd->queue->queue_lock);
++
++	num_char += sprintf(page + num_char, "Active:\n");
++	list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
++	  num_char += sprintf(page + num_char,
++			      "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n",
++			      bfqq->pid,
++			      bfqq->entity.weight,
++			      bfqq->queued[0],
++			      bfqq->queued[1],
++			jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
++			jiffies_to_msecs(bfqq->wr_cur_max_time));
++	}
++
++	num_char += sprintf(page + num_char, "Idle:\n");
++	list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
++			num_char += sprintf(page + num_char,
++				"pid%d: weight %hu, dur %d/%u\n",
++				bfqq->pid,
++				bfqq->entity.weight,
++				jiffies_to_msecs(jiffies -
++					bfqq->last_wr_start_finish),
++				jiffies_to_msecs(bfqq->wr_cur_max_time));
++	}
++
++	spin_unlock_irq(bfqd->queue->queue_lock);
++
++	return num_char;
++}
++
++#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
++static ssize_t __FUNC(struct elevator_queue *e, char *page)		\
++{									\
++	struct bfq_data *bfqd = e->elevator_data;			\
++	unsigned int __data = __VAR;					\
++	if (__CONV)							\
++		__data = jiffies_to_msecs(__data);			\
++	return bfq_var_show(__data, (page));				\
++}
++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1);
++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1);
++SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0);
++SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0);
++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1);
++SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0);
++SHOW_FUNCTION(bfq_max_budget_async_rq_show,
++	      bfqd->bfq_max_budget_async_rq, 0);
++SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout[BLK_RW_SYNC], 1);
++SHOW_FUNCTION(bfq_timeout_async_show, bfqd->bfq_timeout[BLK_RW_ASYNC], 1);
++SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0);
++SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0);
++SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1);
++SHOW_FUNCTION(bfq_wr_min_idle_time_show, bfqd->bfq_wr_min_idle_time, 1);
++SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async,
++	1);
++SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0);
++#undef SHOW_FUNCTION
++
++#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
++static ssize_t								\
++__FUNC(struct elevator_queue *e, const char *page, size_t count)	\
++{									\
++	struct bfq_data *bfqd = e->elevator_data;			\
++	unsigned long uninitialized_var(__data);			\
++	int ret = bfq_var_store(&__data, (page), count);		\
++	if (__data < (MIN))						\
++		__data = (MIN);						\
++	else if (__data > (MAX))					\
++		__data = (MAX);						\
++	if (__CONV)							\
++		*(__PTR) = msecs_to_jiffies(__data);			\
++	else								\
++		*(__PTR) = __data;					\
++	return ret;							\
++}
++STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1,
++		INT_MAX, 1);
++STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1,
++		INT_MAX, 1);
++STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0);
++STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1,
++		INT_MAX, 0);
++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq,
++		1, INT_MAX, 0);
++STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0,
++		INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0);
++STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX,
++		1);
++STORE_FUNCTION(bfq_wr_min_idle_time_store, &bfqd->bfq_wr_min_idle_time, 0,
++		INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_min_inter_arr_async_store,
++		&bfqd->bfq_wr_min_inter_arr_async, 0, INT_MAX, 1);
++STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0,
++		INT_MAX, 0);
++#undef STORE_FUNCTION
++
++/* do nothing for the moment */
++static ssize_t bfq_weights_store(struct elevator_queue *e,
++				    const char *page, size_t count)
++{
++	return count;
++}
++
++static inline unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd)
++{
++	u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]);
++
++	if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES)
++		return bfq_calc_max_budget(bfqd->peak_rate, timeout);
++	else
++		return bfq_default_max_budget;
++}
++
++static ssize_t bfq_max_budget_store(struct elevator_queue *e,
++				    const char *page, size_t count)
++{
++	struct bfq_data *bfqd = e->elevator_data;
++	unsigned long uninitialized_var(__data);
++	int ret = bfq_var_store(&__data, (page), count);
++
++	if (__data == 0)
++		bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++	else {
++		if (__data > INT_MAX)
++			__data = INT_MAX;
++		bfqd->bfq_max_budget = __data;
++	}
++
++	bfqd->bfq_user_max_budget = __data;
++
++	return ret;
++}
++
++static ssize_t bfq_timeout_sync_store(struct elevator_queue *e,
++				      const char *page, size_t count)
++{
++	struct bfq_data *bfqd = e->elevator_data;
++	unsigned long uninitialized_var(__data);
++	int ret = bfq_var_store(&__data, (page), count);
++
++	if (__data < 1)
++		__data = 1;
++	else if (__data > INT_MAX)
++		__data = INT_MAX;
++
++	bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data);
++	if (bfqd->bfq_user_max_budget == 0)
++		bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd);
++
++	return ret;
++}
++
++static ssize_t bfq_low_latency_store(struct elevator_queue *e,
++				     const char *page, size_t count)
++{
++	struct bfq_data *bfqd = e->elevator_data;
++	unsigned long uninitialized_var(__data);
++	int ret = bfq_var_store(&__data, (page), count);
++
++	if (__data > 1)
++		__data = 1;
++	if (__data == 0 && bfqd->low_latency != 0)
++		bfq_end_wr(bfqd);
++	bfqd->low_latency = __data;
++
++	return ret;
++}
++
++#define BFQ_ATTR(name) \
++	__ATTR(name, S_IRUGO|S_IWUSR, bfq_##name##_show, bfq_##name##_store)
++
++static struct elv_fs_entry bfq_attrs[] = {
++	BFQ_ATTR(fifo_expire_sync),
++	BFQ_ATTR(fifo_expire_async),
++	BFQ_ATTR(back_seek_max),
++	BFQ_ATTR(back_seek_penalty),
++	BFQ_ATTR(slice_idle),
++	BFQ_ATTR(max_budget),
++	BFQ_ATTR(max_budget_async_rq),
++	BFQ_ATTR(timeout_sync),
++	BFQ_ATTR(timeout_async),
++	BFQ_ATTR(low_latency),
++	BFQ_ATTR(wr_coeff),
++	BFQ_ATTR(wr_max_time),
++	BFQ_ATTR(wr_rt_max_time),
++	BFQ_ATTR(wr_min_idle_time),
++	BFQ_ATTR(wr_min_inter_arr_async),
++	BFQ_ATTR(wr_max_softrt_rate),
++	BFQ_ATTR(weights),
++	__ATTR_NULL
++};
++
++static struct elevator_type iosched_bfq = {
++	.ops = {
++		.elevator_merge_fn =		bfq_merge,
++		.elevator_merged_fn =		bfq_merged_request,
++		.elevator_merge_req_fn =	bfq_merged_requests,
++		.elevator_allow_merge_fn =	bfq_allow_merge,
++		.elevator_dispatch_fn =		bfq_dispatch_requests,
++		.elevator_add_req_fn =		bfq_insert_request,
++		.elevator_activate_req_fn =	bfq_activate_request,
++		.elevator_deactivate_req_fn =	bfq_deactivate_request,
++		.elevator_completed_req_fn =	bfq_completed_request,
++		.elevator_former_req_fn =	elv_rb_former_request,
++		.elevator_latter_req_fn =	elv_rb_latter_request,
++		.elevator_init_icq_fn =		bfq_init_icq,
++		.elevator_exit_icq_fn =		bfq_exit_icq,
++		.elevator_set_req_fn =		bfq_set_request,
++		.elevator_put_req_fn =		bfq_put_request,
++		.elevator_may_queue_fn =	bfq_may_queue,
++		.elevator_init_fn =		bfq_init_queue,
++		.elevator_exit_fn =		bfq_exit_queue,
++	},
++	.icq_size =		sizeof(struct bfq_io_cq),
++	.icq_align =		__alignof__(struct bfq_io_cq),
++	.elevator_attrs =	bfq_attrs,
++	.elevator_name =	"bfq",
++	.elevator_owner =	THIS_MODULE,
++};
++
++static int __init bfq_init(void)
++{
++	/*
++	 * Can be 0 on HZ < 1000 setups.
++	 */
++	if (bfq_slice_idle == 0)
++		bfq_slice_idle = 1;
++
++	if (bfq_timeout_async == 0)
++		bfq_timeout_async = 1;
++
++	if (bfq_slab_setup())
++		return -ENOMEM;
++
++	/*
++	 * Times to load large popular applications for the typical systems
++	 * installed on the reference devices (see the comments before the
++	 * definitions of the two arrays).
++	 */
++	T_slow[0] = msecs_to_jiffies(2600);
++	T_slow[1] = msecs_to_jiffies(1000);
++	T_fast[0] = msecs_to_jiffies(5500);
++	T_fast[1] = msecs_to_jiffies(2000);
++
++	/*
++	 * Thresholds that determine the switch between speed classes (see
++	 * the comments before the definition of the array).
++	 */
++	device_speed_thresh[0] = (R_fast[0] + R_slow[0]) / 2;
++	device_speed_thresh[1] = (R_fast[1] + R_slow[1]) / 2;
++
++	elv_register(&iosched_bfq);
++	pr_info("BFQ I/O-scheduler: v7r8");
++
++	return 0;
++}
++
++static void __exit bfq_exit(void)
++{
++	elv_unregister(&iosched_bfq);
++	bfq_slab_kill();
++}
++
++module_init(bfq_init);
++module_exit(bfq_exit);
++
++MODULE_AUTHOR("Fabio Checconi, Paolo Valente");
++MODULE_LICENSE("GPL");
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+new file mode 100644
+index 0000000..c343099
+--- /dev/null
++++ b/block/bfq-sched.c
+@@ -0,0 +1,1208 @@
++/*
++ * BFQ: Hierarchical B-WF2Q+ scheduler.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ *		      Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifdef CONFIG_CGROUP_BFQIO
++#define for_each_entity(entity)	\
++	for (; entity != NULL; entity = entity->parent)
++
++#define for_each_entity_safe(entity, parent) \
++	for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
++
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++						 int extract,
++						 struct bfq_data *bfqd);
++
++static inline void bfq_update_budget(struct bfq_entity *next_in_service)
++{
++	struct bfq_entity *bfqg_entity;
++	struct bfq_group *bfqg;
++	struct bfq_sched_data *group_sd;
++
++	BUG_ON(next_in_service == NULL);
++
++	group_sd = next_in_service->sched_data;
++
++	bfqg = container_of(group_sd, struct bfq_group, sched_data);
++	/*
++	 * bfq_group's my_entity field is not NULL only if the group
++	 * is not the root group. We must not touch the root entity
++	 * as it must never become an in-service entity.
++	 */
++	bfqg_entity = bfqg->my_entity;
++	if (bfqg_entity != NULL)
++		bfqg_entity->budget = next_in_service->budget;
++}
++
++static int bfq_update_next_in_service(struct bfq_sched_data *sd)
++{
++	struct bfq_entity *next_in_service;
++
++	if (sd->in_service_entity != NULL)
++		/* will update/requeue at the end of service */
++		return 0;
++
++	/*
++	 * NOTE: this can be improved in many ways, such as returning
++	 * 1 (and thus propagating upwards the update) only when the
++	 * budget changes, or caching the bfqq that will be scheduled
++	 * next from this subtree.  By now we worry more about
++	 * correctness than about performance...
++	 */
++	next_in_service = bfq_lookup_next_entity(sd, 0, NULL);
++	sd->next_in_service = next_in_service;
++
++	if (next_in_service != NULL)
++		bfq_update_budget(next_in_service);
++
++	return 1;
++}
++
++static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
++					     struct bfq_entity *entity)
++{
++	BUG_ON(sd->next_in_service != entity);
++}
++#else
++#define for_each_entity(entity)	\
++	for (; entity != NULL; entity = NULL)
++
++#define for_each_entity_safe(entity, parent) \
++	for (parent = NULL; entity != NULL; entity = parent)
++
++static inline int bfq_update_next_in_service(struct bfq_sched_data *sd)
++{
++	return 0;
++}
++
++static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
++					     struct bfq_entity *entity)
++{
++}
++
++static inline void bfq_update_budget(struct bfq_entity *next_in_service)
++{
++}
++#endif
++
++/*
++ * Shift for timestamp calculations.  This actually limits the maximum
++ * service allowed in one timestamp delta (small shift values increase it),
++ * the maximum total weight that can be used for the queues in the system
++ * (big shift values increase it), and the period of virtual time
++ * wraparounds.
++ */
++#define WFQ_SERVICE_SHIFT	22
++
++/**
++ * bfq_gt - compare two timestamps.
++ * @a: first ts.
++ * @b: second ts.
++ *
++ * Return @a > @b, dealing with wrapping correctly.
++ */
++static inline int bfq_gt(u64 a, u64 b)
++{
++	return (s64)(a - b) > 0;
++}
++
++static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = NULL;
++
++	BUG_ON(entity == NULL);
++
++	if (entity->my_sched_data == NULL)
++		bfqq = container_of(entity, struct bfq_queue, entity);
++
++	return bfqq;
++}
++
++
++/**
++ * bfq_delta - map service into the virtual time domain.
++ * @service: amount of service.
++ * @weight: scale factor (weight of an entity or weight sum).
++ */
++static inline u64 bfq_delta(unsigned long service,
++					unsigned long weight)
++{
++	u64 d = (u64)service << WFQ_SERVICE_SHIFT;
++
++	do_div(d, weight);
++	return d;
++}
++
++/**
++ * bfq_calc_finish - assign the finish time to an entity.
++ * @entity: the entity to act upon.
++ * @service: the service to be charged to the entity.
++ */
++static inline void bfq_calc_finish(struct bfq_entity *entity,
++				   unsigned long service)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++	BUG_ON(entity->weight == 0);
++
++	entity->finish = entity->start +
++		bfq_delta(service, entity->weight);
++
++	if (bfqq != NULL) {
++		bfq_log_bfqq(bfqq->bfqd, bfqq,
++			"calc_finish: serv %lu, w %d",
++			service, entity->weight);
++		bfq_log_bfqq(bfqq->bfqd, bfqq,
++			"calc_finish: start %llu, finish %llu, delta %llu",
++			entity->start, entity->finish,
++			bfq_delta(service, entity->weight));
++	}
++}
++
++/**
++ * bfq_entity_of - get an entity from a node.
++ * @node: the node field of the entity.
++ *
++ * Convert a node pointer to the relative entity.  This is used only
++ * to simplify the logic of some functions and not as the generic
++ * conversion mechanism because, e.g., in the tree walking functions,
++ * the check for a %NULL value would be redundant.
++ */
++static inline struct bfq_entity *bfq_entity_of(struct rb_node *node)
++{
++	struct bfq_entity *entity = NULL;
++
++	if (node != NULL)
++		entity = rb_entry(node, struct bfq_entity, rb_node);
++
++	return entity;
++}
++
++/**
++ * bfq_extract - remove an entity from a tree.
++ * @root: the tree root.
++ * @entity: the entity to remove.
++ */
++static inline void bfq_extract(struct rb_root *root,
++			       struct bfq_entity *entity)
++{
++	BUG_ON(entity->tree != root);
++
++	entity->tree = NULL;
++	rb_erase(&entity->rb_node, root);
++}
++
++/**
++ * bfq_idle_extract - extract an entity from the idle tree.
++ * @st: the service tree of the owning @entity.
++ * @entity: the entity being removed.
++ */
++static void bfq_idle_extract(struct bfq_service_tree *st,
++			     struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++	struct rb_node *next;
++
++	BUG_ON(entity->tree != &st->idle);
++
++	if (entity == st->first_idle) {
++		next = rb_next(&entity->rb_node);
++		st->first_idle = bfq_entity_of(next);
++	}
++
++	if (entity == st->last_idle) {
++		next = rb_prev(&entity->rb_node);
++		st->last_idle = bfq_entity_of(next);
++	}
++
++	bfq_extract(&st->idle, entity);
++
++	if (bfqq != NULL)
++		list_del(&bfqq->bfqq_list);
++}
++
++/**
++ * bfq_insert - generic tree insertion.
++ * @root: tree root.
++ * @entity: entity to insert.
++ *
++ * This is used for the idle and the active tree, since they are both
++ * ordered by finish time.
++ */
++static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
++{
++	struct bfq_entity *entry;
++	struct rb_node **node = &root->rb_node;
++	struct rb_node *parent = NULL;
++
++	BUG_ON(entity->tree != NULL);
++
++	while (*node != NULL) {
++		parent = *node;
++		entry = rb_entry(parent, struct bfq_entity, rb_node);
++
++		if (bfq_gt(entry->finish, entity->finish))
++			node = &parent->rb_left;
++		else
++			node = &parent->rb_right;
++	}
++
++	rb_link_node(&entity->rb_node, parent, node);
++	rb_insert_color(&entity->rb_node, root);
++
++	entity->tree = root;
++}
++
++/**
++ * bfq_update_min - update the min_start field of a entity.
++ * @entity: the entity to update.
++ * @node: one of its children.
++ *
++ * This function is called when @entity may store an invalid value for
++ * min_start due to updates to the active tree.  The function  assumes
++ * that the subtree rooted at @node (which may be its left or its right
++ * child) has a valid min_start value.
++ */
++static inline void bfq_update_min(struct bfq_entity *entity,
++				  struct rb_node *node)
++{
++	struct bfq_entity *child;
++
++	if (node != NULL) {
++		child = rb_entry(node, struct bfq_entity, rb_node);
++		if (bfq_gt(entity->min_start, child->min_start))
++			entity->min_start = child->min_start;
++	}
++}
++
++/**
++ * bfq_update_active_node - recalculate min_start.
++ * @node: the node to update.
++ *
++ * @node may have changed position or one of its children may have moved,
++ * this function updates its min_start value.  The left and right subtrees
++ * are assumed to hold a correct min_start value.
++ */
++static inline void bfq_update_active_node(struct rb_node *node)
++{
++	struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
++
++	entity->min_start = entity->start;
++	bfq_update_min(entity, node->rb_right);
++	bfq_update_min(entity, node->rb_left);
++}
++
++/**
++ * bfq_update_active_tree - update min_start for the whole active tree.
++ * @node: the starting node.
++ *
++ * @node must be the deepest modified node after an update.  This function
++ * updates its min_start using the values held by its children, assuming
++ * that they did not change, and then updates all the nodes that may have
++ * changed in the path to the root.  The only nodes that may have changed
++ * are the ones in the path or their siblings.
++ */
++static void bfq_update_active_tree(struct rb_node *node)
++{
++	struct rb_node *parent;
++
++up:
++	bfq_update_active_node(node);
++
++	parent = rb_parent(node);
++	if (parent == NULL)
++		return;
++
++	if (node == parent->rb_left && parent->rb_right != NULL)
++		bfq_update_active_node(parent->rb_right);
++	else if (parent->rb_left != NULL)
++		bfq_update_active_node(parent->rb_left);
++
++	node = parent;
++	goto up;
++}
++
++static void bfq_weights_tree_add(struct bfq_data *bfqd,
++				 struct bfq_entity *entity,
++				 struct rb_root *root);
++
++static void bfq_weights_tree_remove(struct bfq_data *bfqd,
++				    struct bfq_entity *entity,
++				    struct rb_root *root);
++
++
++/**
++ * bfq_active_insert - insert an entity in the active tree of its
++ *                     group/device.
++ * @st: the service tree of the entity.
++ * @entity: the entity being inserted.
++ *
++ * The active tree is ordered by finish time, but an extra key is kept
++ * per each node, containing the minimum value for the start times of
++ * its children (and the node itself), so it's possible to search for
++ * the eligible node with the lowest finish time in logarithmic time.
++ */
++static void bfq_active_insert(struct bfq_service_tree *st,
++			      struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++	struct rb_node *node = &entity->rb_node;
++#ifdef CONFIG_CGROUP_BFQIO
++	struct bfq_sched_data *sd = NULL;
++	struct bfq_group *bfqg = NULL;
++	struct bfq_data *bfqd = NULL;
++#endif
++
++	bfq_insert(&st->active, entity);
++
++	if (node->rb_left != NULL)
++		node = node->rb_left;
++	else if (node->rb_right != NULL)
++		node = node->rb_right;
++
++	bfq_update_active_tree(node);
++
++#ifdef CONFIG_CGROUP_BFQIO
++	sd = entity->sched_data;
++	bfqg = container_of(sd, struct bfq_group, sched_data);
++	BUG_ON(!bfqg);
++	bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++	if (bfqq != NULL)
++		list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
++#ifdef CONFIG_CGROUP_BFQIO
++	else { /* bfq_group */
++		BUG_ON(!bfqd);
++		bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
++	}
++	if (bfqg != bfqd->root_group) {
++		BUG_ON(!bfqg);
++		BUG_ON(!bfqd);
++		bfqg->active_entities++;
++		if (bfqg->active_entities == 2)
++			bfqd->active_numerous_groups++;
++	}
++#endif
++}
++
++/**
++ * bfq_ioprio_to_weight - calc a weight from an ioprio.
++ * @ioprio: the ioprio value to convert.
++ */
++static inline unsigned short bfq_ioprio_to_weight(int ioprio)
++{
++	BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
++	return IOPRIO_BE_NR - ioprio;
++}
++
++/**
++ * bfq_weight_to_ioprio - calc an ioprio from a weight.
++ * @weight: the weight value to convert.
++ *
++ * To preserve as mush as possible the old only-ioprio user interface,
++ * 0 is used as an escape ioprio value for weights (numerically) equal or
++ * larger than IOPRIO_BE_NR
++ */
++static inline unsigned short bfq_weight_to_ioprio(int weight)
++{
++	BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
++	return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight;
++}
++
++static inline void bfq_get_entity(struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++
++	if (bfqq != NULL) {
++		atomic_inc(&bfqq->ref);
++		bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
++			     bfqq, atomic_read(&bfqq->ref));
++	}
++}
++
++/**
++ * bfq_find_deepest - find the deepest node that an extraction can modify.
++ * @node: the node being removed.
++ *
++ * Do the first step of an extraction in an rb tree, looking for the
++ * node that will replace @node, and returning the deepest node that
++ * the following modifications to the tree can touch.  If @node is the
++ * last node in the tree return %NULL.
++ */
++static struct rb_node *bfq_find_deepest(struct rb_node *node)
++{
++	struct rb_node *deepest;
++
++	if (node->rb_right == NULL && node->rb_left == NULL)
++		deepest = rb_parent(node);
++	else if (node->rb_right == NULL)
++		deepest = node->rb_left;
++	else if (node->rb_left == NULL)
++		deepest = node->rb_right;
++	else {
++		deepest = rb_next(node);
++		if (deepest->rb_right != NULL)
++			deepest = deepest->rb_right;
++		else if (rb_parent(deepest) != node)
++			deepest = rb_parent(deepest);
++	}
++
++	return deepest;
++}
++
++/**
++ * bfq_active_extract - remove an entity from the active tree.
++ * @st: the service_tree containing the tree.
++ * @entity: the entity being removed.
++ */
++static void bfq_active_extract(struct bfq_service_tree *st,
++			       struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++	struct rb_node *node;
++#ifdef CONFIG_CGROUP_BFQIO
++	struct bfq_sched_data *sd = NULL;
++	struct bfq_group *bfqg = NULL;
++	struct bfq_data *bfqd = NULL;
++#endif
++
++	node = bfq_find_deepest(&entity->rb_node);
++	bfq_extract(&st->active, entity);
++
++	if (node != NULL)
++		bfq_update_active_tree(node);
++
++#ifdef CONFIG_CGROUP_BFQIO
++	sd = entity->sched_data;
++	bfqg = container_of(sd, struct bfq_group, sched_data);
++	BUG_ON(!bfqg);
++	bfqd = (struct bfq_data *)bfqg->bfqd;
++#endif
++	if (bfqq != NULL)
++		list_del(&bfqq->bfqq_list);
++#ifdef CONFIG_CGROUP_BFQIO
++	else { /* bfq_group */
++		BUG_ON(!bfqd);
++		bfq_weights_tree_remove(bfqd, entity,
++					&bfqd->group_weights_tree);
++	}
++	if (bfqg != bfqd->root_group) {
++		BUG_ON(!bfqg);
++		BUG_ON(!bfqd);
++		BUG_ON(!bfqg->active_entities);
++		bfqg->active_entities--;
++		if (bfqg->active_entities == 1) {
++			BUG_ON(!bfqd->active_numerous_groups);
++			bfqd->active_numerous_groups--;
++		}
++	}
++#endif
++}
++
++/**
++ * bfq_idle_insert - insert an entity into the idle tree.
++ * @st: the service tree containing the tree.
++ * @entity: the entity to insert.
++ */
++static void bfq_idle_insert(struct bfq_service_tree *st,
++			    struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++	struct bfq_entity *first_idle = st->first_idle;
++	struct bfq_entity *last_idle = st->last_idle;
++
++	if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
++		st->first_idle = entity;
++	if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish))
++		st->last_idle = entity;
++
++	bfq_insert(&st->idle, entity);
++
++	if (bfqq != NULL)
++		list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
++}
++
++/**
++ * bfq_forget_entity - remove an entity from the wfq trees.
++ * @st: the service tree.
++ * @entity: the entity being removed.
++ *
++ * Update the device status and forget everything about @entity, putting
++ * the device reference to it, if it is a queue.  Entities belonging to
++ * groups are not refcounted.
++ */
++static void bfq_forget_entity(struct bfq_service_tree *st,
++			      struct bfq_entity *entity)
++{
++	struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++	struct bfq_sched_data *sd;
++
++	BUG_ON(!entity->on_st);
++
++	entity->on_st = 0;
++	st->wsum -= entity->weight;
++	if (bfqq != NULL) {
++		sd = entity->sched_data;
++		bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d",
++			     bfqq, atomic_read(&bfqq->ref));
++		bfq_put_queue(bfqq);
++	}
++}
++
++/**
++ * bfq_put_idle_entity - release the idle tree ref of an entity.
++ * @st: service tree for the entity.
++ * @entity: the entity being released.
++ */
++static void bfq_put_idle_entity(struct bfq_service_tree *st,
++				struct bfq_entity *entity)
++{
++	bfq_idle_extract(st, entity);
++	bfq_forget_entity(st, entity);
++}
++
++/**
++ * bfq_forget_idle - update the idle tree if necessary.
++ * @st: the service tree to act upon.
++ *
++ * To preserve the global O(log N) complexity we only remove one entry here;
++ * as the idle tree will not grow indefinitely this can be done safely.
++ */
++static void bfq_forget_idle(struct bfq_service_tree *st)
++{
++	struct bfq_entity *first_idle = st->first_idle;
++	struct bfq_entity *last_idle = st->last_idle;
++
++	if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL &&
++	    !bfq_gt(last_idle->finish, st->vtime)) {
++		/*
++		 * Forget the whole idle tree, increasing the vtime past
++		 * the last finish time of idle entities.
++		 */
++		st->vtime = last_idle->finish;
++	}
++
++	if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime))
++		bfq_put_idle_entity(st, first_idle);
++}
++
++static struct bfq_service_tree *
++__bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
++			 struct bfq_entity *entity)
++{
++	struct bfq_service_tree *new_st = old_st;
++
++	if (entity->ioprio_changed) {
++		struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
++		unsigned short prev_weight, new_weight;
++		struct bfq_data *bfqd = NULL;
++		struct rb_root *root;
++#ifdef CONFIG_CGROUP_BFQIO
++		struct bfq_sched_data *sd;
++		struct bfq_group *bfqg;
++#endif
++
++		if (bfqq != NULL)
++			bfqd = bfqq->bfqd;
++#ifdef CONFIG_CGROUP_BFQIO
++		else {
++			sd = entity->my_sched_data;
++			bfqg = container_of(sd, struct bfq_group, sched_data);
++			BUG_ON(!bfqg);
++			bfqd = (struct bfq_data *)bfqg->bfqd;
++			BUG_ON(!bfqd);
++		}
++#endif
++
++		BUG_ON(old_st->wsum < entity->weight);
++		old_st->wsum -= entity->weight;
++
++		if (entity->new_weight != entity->orig_weight) {
++			if (entity->new_weight < BFQ_MIN_WEIGHT ||
++			    entity->new_weight > BFQ_MAX_WEIGHT) {
++				printk(KERN_CRIT "update_weight_prio: "
++						 "new_weight %d\n",
++					entity->new_weight);
++				BUG();
++			}
++			entity->orig_weight = entity->new_weight;
++			entity->ioprio =
++				bfq_weight_to_ioprio(entity->orig_weight);
++		}
++
++		entity->ioprio_class = entity->new_ioprio_class;
++		entity->ioprio_changed = 0;
++
++		/*
++		 * NOTE: here we may be changing the weight too early,
++		 * this will cause unfairness.  The correct approach
++		 * would have required additional complexity to defer
++		 * weight changes to the proper time instants (i.e.,
++		 * when entity->finish <= old_st->vtime).
++		 */
++		new_st = bfq_entity_service_tree(entity);
++
++		prev_weight = entity->weight;
++		new_weight = entity->orig_weight *
++			     (bfqq != NULL ? bfqq->wr_coeff : 1);
++		/*
++		 * If the weight of the entity changes, remove the entity
++		 * from its old weight counter (if there is a counter
++		 * associated with the entity), and add it to the counter
++		 * associated with its new weight.
++		 */
++		if (prev_weight != new_weight) {
++			root = bfqq ? &bfqd->queue_weights_tree :
++				      &bfqd->group_weights_tree;
++			bfq_weights_tree_remove(bfqd, entity, root);
++		}
++		entity->weight = new_weight;
++		/*
++		 * Add the entity to its weights tree only if it is
++		 * not associated with a weight-raised queue.
++		 */
++		if (prev_weight != new_weight &&
++		    (bfqq ? bfqq->wr_coeff == 1 : 1))
++			/* If we get here, root has been initialized. */
++			bfq_weights_tree_add(bfqd, entity, root);
++
++		new_st->wsum += entity->weight;
++
++		if (new_st != old_st)
++			entity->start = new_st->vtime;
++	}
++
++	return new_st;
++}
++
++/**
++ * bfq_bfqq_served - update the scheduler status after selection for
++ *                   service.
++ * @bfqq: the queue being served.
++ * @served: bytes to transfer.
++ *
++ * NOTE: this can be optimized, as the timestamps of upper level entities
++ * are synchronized every time a new bfqq is selected for service.  By now,
++ * we keep it to better check consistency.
++ */
++static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++	struct bfq_service_tree *st;
++
++	for_each_entity(entity) {
++		st = bfq_entity_service_tree(entity);
++
++		entity->service += served;
++		BUG_ON(entity->service > entity->budget);
++		BUG_ON(st->wsum == 0);
++
++		st->vtime += bfq_delta(served, st->wsum);
++		bfq_forget_idle(st);
++	}
++	bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served);
++}
++
++/**
++ * bfq_bfqq_charge_full_budget - set the service to the entity budget.
++ * @bfqq: the queue that needs a service update.
++ *
++ * When it's not possible to be fair in the service domain, because
++ * a queue is not consuming its budget fast enough (the meaning of
++ * fast depends on the timeout parameter), we charge it a full
++ * budget.  In this way we should obtain a sort of time-domain
++ * fairness among all the seeky/slow queues.
++ */
++static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++
++	bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
++
++	bfq_bfqq_served(bfqq, entity->budget - entity->service);
++}
++
++/**
++ * __bfq_activate_entity - activate an entity.
++ * @entity: the entity being activated.
++ *
++ * Called whenever an entity is activated, i.e., it is not active and one
++ * of its children receives a new request, or has to be reactivated due to
++ * budget exhaustion.  It uses the current budget of the entity (and the
++ * service received if @entity is active) of the queue to calculate its
++ * timestamps.
++ */
++static void __bfq_activate_entity(struct bfq_entity *entity)
++{
++	struct bfq_sched_data *sd = entity->sched_data;
++	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++
++	if (entity == sd->in_service_entity) {
++		BUG_ON(entity->tree != NULL);
++		/*
++		 * If we are requeueing the current entity we have
++		 * to take care of not charging to it service it has
++		 * not received.
++		 */
++		bfq_calc_finish(entity, entity->service);
++		entity->start = entity->finish;
++		sd->in_service_entity = NULL;
++	} else if (entity->tree == &st->active) {
++		/*
++		 * Requeueing an entity due to a change of some
++		 * next_in_service entity below it.  We reuse the
++		 * old start time.
++		 */
++		bfq_active_extract(st, entity);
++	} else if (entity->tree == &st->idle) {
++		/*
++		 * Must be on the idle tree, bfq_idle_extract() will
++		 * check for that.
++		 */
++		bfq_idle_extract(st, entity);
++		entity->start = bfq_gt(st->vtime, entity->finish) ?
++				       st->vtime : entity->finish;
++	} else {
++		/*
++		 * The finish time of the entity may be invalid, and
++		 * it is in the past for sure, otherwise the queue
++		 * would have been on the idle tree.
++		 */
++		entity->start = st->vtime;
++		st->wsum += entity->weight;
++		bfq_get_entity(entity);
++
++		BUG_ON(entity->on_st);
++		entity->on_st = 1;
++	}
++
++	st = __bfq_entity_update_weight_prio(st, entity);
++	bfq_calc_finish(entity, entity->budget);
++	bfq_active_insert(st, entity);
++}
++
++/**
++ * bfq_activate_entity - activate an entity and its ancestors if necessary.
++ * @entity: the entity to activate.
++ *
++ * Activate @entity and all the entities on the path from it to the root.
++ */
++static void bfq_activate_entity(struct bfq_entity *entity)
++{
++	struct bfq_sched_data *sd;
++
++	for_each_entity(entity) {
++		__bfq_activate_entity(entity);
++
++		sd = entity->sched_data;
++		if (!bfq_update_next_in_service(sd))
++			/*
++			 * No need to propagate the activation to the
++			 * upper entities, as they will be updated when
++			 * the in-service entity is rescheduled.
++			 */
++			break;
++	}
++}
++
++/**
++ * __bfq_deactivate_entity - deactivate an entity from its service tree.
++ * @entity: the entity to deactivate.
++ * @requeue: if false, the entity will not be put into the idle tree.
++ *
++ * Deactivate an entity, independently from its previous state.  If the
++ * entity was not on a service tree just return, otherwise if it is on
++ * any scheduler tree, extract it from that tree, and if necessary
++ * and if the caller did not specify @requeue, put it on the idle tree.
++ *
++ * Return %1 if the caller should update the entity hierarchy, i.e.,
++ * if the entity was in service or if it was the next_in_service for
++ * its sched_data; return %0 otherwise.
++ */
++static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++	struct bfq_sched_data *sd = entity->sched_data;
++	struct bfq_service_tree *st = bfq_entity_service_tree(entity);
++	int was_in_service = entity == sd->in_service_entity;
++	int ret = 0;
++
++	if (!entity->on_st)
++		return 0;
++
++	BUG_ON(was_in_service && entity->tree != NULL);
++
++	if (was_in_service) {
++		bfq_calc_finish(entity, entity->service);
++		sd->in_service_entity = NULL;
++	} else if (entity->tree == &st->active)
++		bfq_active_extract(st, entity);
++	else if (entity->tree == &st->idle)
++		bfq_idle_extract(st, entity);
++	else if (entity->tree != NULL)
++		BUG();
++
++	if (was_in_service || sd->next_in_service == entity)
++		ret = bfq_update_next_in_service(sd);
++
++	if (!requeue || !bfq_gt(entity->finish, st->vtime))
++		bfq_forget_entity(st, entity);
++	else
++		bfq_idle_insert(st, entity);
++
++	BUG_ON(sd->in_service_entity == entity);
++	BUG_ON(sd->next_in_service == entity);
++
++	return ret;
++}
++
++/**
++ * bfq_deactivate_entity - deactivate an entity.
++ * @entity: the entity to deactivate.
++ * @requeue: true if the entity can be put on the idle tree
++ */
++static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
++{
++	struct bfq_sched_data *sd;
++	struct bfq_entity *parent;
++
++	for_each_entity_safe(entity, parent) {
++		sd = entity->sched_data;
++
++		if (!__bfq_deactivate_entity(entity, requeue))
++			/*
++			 * The parent entity is still backlogged, and
++			 * we don't need to update it as it is still
++			 * in service.
++			 */
++			break;
++
++		if (sd->next_in_service != NULL)
++			/*
++			 * The parent entity is still backlogged and
++			 * the budgets on the path towards the root
++			 * need to be updated.
++			 */
++			goto update;
++
++		/*
++		 * If we reach there the parent is no more backlogged and
++		 * we want to propagate the dequeue upwards.
++		 */
++		requeue = 1;
++	}
++
++	return;
++
++update:
++	entity = parent;
++	for_each_entity(entity) {
++		__bfq_activate_entity(entity);
++
++		sd = entity->sched_data;
++		if (!bfq_update_next_in_service(sd))
++			break;
++	}
++}
++
++/**
++ * bfq_update_vtime - update vtime if necessary.
++ * @st: the service tree to act upon.
++ *
++ * If necessary update the service tree vtime to have at least one
++ * eligible entity, skipping to its start time.  Assumes that the
++ * active tree of the device is not empty.
++ *
++ * NOTE: this hierarchical implementation updates vtimes quite often,
++ * we may end up with reactivated processes getting timestamps after a
++ * vtime skip done because we needed a ->first_active entity on some
++ * intermediate node.
++ */
++static void bfq_update_vtime(struct bfq_service_tree *st)
++{
++	struct bfq_entity *entry;
++	struct rb_node *node = st->active.rb_node;
++
++	entry = rb_entry(node, struct bfq_entity, rb_node);
++	if (bfq_gt(entry->min_start, st->vtime)) {
++		st->vtime = entry->min_start;
++		bfq_forget_idle(st);
++	}
++}
++
++/**
++ * bfq_first_active_entity - find the eligible entity with
++ *                           the smallest finish time
++ * @st: the service tree to select from.
++ *
++ * This function searches the first schedulable entity, starting from the
++ * root of the tree and going on the left every time on this side there is
++ * a subtree with at least one eligible (start >= vtime) entity. The path on
++ * the right is followed only if a) the left subtree contains no eligible
++ * entities and b) no eligible entity has been found yet.
++ */
++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
++{
++	struct bfq_entity *entry, *first = NULL;
++	struct rb_node *node = st->active.rb_node;
++
++	while (node != NULL) {
++		entry = rb_entry(node, struct bfq_entity, rb_node);
++left:
++		if (!bfq_gt(entry->start, st->vtime))
++			first = entry;
++
++		BUG_ON(bfq_gt(entry->min_start, st->vtime));
++
++		if (node->rb_left != NULL) {
++			entry = rb_entry(node->rb_left,
++					 struct bfq_entity, rb_node);
++			if (!bfq_gt(entry->min_start, st->vtime)) {
++				node = node->rb_left;
++				goto left;
++			}
++		}
++		if (first != NULL)
++			break;
++		node = node->rb_right;
++	}
++
++	BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active));
++	return first;
++}
++
++/**
++ * __bfq_lookup_next_entity - return the first eligible entity in @st.
++ * @st: the service tree.
++ *
++ * Update the virtual time in @st and return the first eligible entity
++ * it contains.
++ */
++static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st,
++						   bool force)
++{
++	struct bfq_entity *entity, *new_next_in_service = NULL;
++
++	if (RB_EMPTY_ROOT(&st->active))
++		return NULL;
++
++	bfq_update_vtime(st);
++	entity = bfq_first_active_entity(st);
++	BUG_ON(bfq_gt(entity->start, st->vtime));
++
++	/*
++	 * If the chosen entity does not match with the sched_data's
++	 * next_in_service and we are forcedly serving the IDLE priority
++	 * class tree, bubble up budget update.
++	 */
++	if (unlikely(force && entity != entity->sched_data->next_in_service)) {
++		new_next_in_service = entity;
++		for_each_entity(new_next_in_service)
++			bfq_update_budget(new_next_in_service);
++	}
++
++	return entity;
++}
++
++/**
++ * bfq_lookup_next_entity - return the first eligible entity in @sd.
++ * @sd: the sched_data.
++ * @extract: if true the returned entity will be also extracted from @sd.
++ *
++ * NOTE: since we cache the next_in_service entity at each level of the
++ * hierarchy, the complexity of the lookup can be decreased with
++ * absolutely no effort just returning the cached next_in_service value;
++ * we prefer to do full lookups to test the consistency of * the data
++ * structures.
++ */
++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
++						 int extract,
++						 struct bfq_data *bfqd)
++{
++	struct bfq_service_tree *st = sd->service_tree;
++	struct bfq_entity *entity;
++	int i = 0;
++
++	BUG_ON(sd->in_service_entity != NULL);
++
++	if (bfqd != NULL &&
++	    jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) {
++		entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1,
++						  true);
++		if (entity != NULL) {
++			i = BFQ_IOPRIO_CLASSES - 1;
++			bfqd->bfq_class_idle_last_service = jiffies;
++			sd->next_in_service = entity;
++		}
++	}
++	for (; i < BFQ_IOPRIO_CLASSES; i++) {
++		entity = __bfq_lookup_next_entity(st + i, false);
++		if (entity != NULL) {
++			if (extract) {
++				bfq_check_next_in_service(sd, entity);
++				bfq_active_extract(st + i, entity);
++				sd->in_service_entity = entity;
++				sd->next_in_service = NULL;
++			}
++			break;
++		}
++	}
++
++	return entity;
++}
++
++/*
++ * Get next queue for service.
++ */
++static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
++{
++	struct bfq_entity *entity = NULL;
++	struct bfq_sched_data *sd;
++	struct bfq_queue *bfqq;
++
++	BUG_ON(bfqd->in_service_queue != NULL);
++
++	if (bfqd->busy_queues == 0)
++		return NULL;
++
++	sd = &bfqd->root_group->sched_data;
++	for (; sd != NULL; sd = entity->my_sched_data) {
++		entity = bfq_lookup_next_entity(sd, 1, bfqd);
++		BUG_ON(entity == NULL);
++		entity->service = 0;
++	}
++
++	bfqq = bfq_entity_to_bfqq(entity);
++	BUG_ON(bfqq == NULL);
++
++	return bfqq;
++}
++
++/*
++ * Forced extraction of the given queue.
++ */
++static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
++				      struct bfq_queue *bfqq)
++{
++	struct bfq_entity *entity;
++	struct bfq_sched_data *sd;
++
++	BUG_ON(bfqd->in_service_queue != NULL);
++
++	entity = &bfqq->entity;
++	/*
++	 * Bubble up extraction/update from the leaf to the root.
++	*/
++	for_each_entity(entity) {
++		sd = entity->sched_data;
++		bfq_update_budget(entity);
++		bfq_update_vtime(bfq_entity_service_tree(entity));
++		bfq_active_extract(bfq_entity_service_tree(entity), entity);
++		sd->in_service_entity = entity;
++		sd->next_in_service = NULL;
++		entity->service = 0;
++	}
++
++	return;
++}
++
++static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
++{
++	if (bfqd->in_service_bic != NULL) {
++		put_io_context(bfqd->in_service_bic->icq.ioc);
++		bfqd->in_service_bic = NULL;
++	}
++
++	bfqd->in_service_queue = NULL;
++	del_timer(&bfqd->idle_slice_timer);
++}
++
++static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++				int requeue)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++
++	if (bfqq == bfqd->in_service_queue)
++		__bfq_bfqd_reset_in_service(bfqd);
++
++	bfq_deactivate_entity(entity, requeue);
++}
++
++static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	struct bfq_entity *entity = &bfqq->entity;
++
++	bfq_activate_entity(entity);
++}
++
++/*
++ * Called when the bfqq no longer has requests pending, remove it from
++ * the service tree.
++ */
++static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++			      int requeue)
++{
++	BUG_ON(!bfq_bfqq_busy(bfqq));
++	BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
++
++	bfq_log_bfqq(bfqd, bfqq, "del from busy");
++
++	bfq_clear_bfqq_busy(bfqq);
++
++	BUG_ON(bfqd->busy_queues == 0);
++	bfqd->busy_queues--;
++
++	if (!bfqq->dispatched) {
++		bfq_weights_tree_remove(bfqd, &bfqq->entity,
++					&bfqd->queue_weights_tree);
++		if (!blk_queue_nonrot(bfqd->queue)) {
++			BUG_ON(!bfqd->busy_in_flight_queues);
++			bfqd->busy_in_flight_queues--;
++			if (bfq_bfqq_constantly_seeky(bfqq)) {
++				BUG_ON(!bfqd->
++					const_seeky_busy_in_flight_queues);
++				bfqd->const_seeky_busy_in_flight_queues--;
++			}
++		}
++	}
++	if (bfqq->wr_coeff > 1)
++		bfqd->wr_busy_queues--;
++
++	bfq_deactivate_bfqq(bfqd, bfqq, requeue);
++}
++
++/*
++ * Called when an inactive queue receives a new request.
++ */
++static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
++{
++	BUG_ON(bfq_bfqq_busy(bfqq));
++	BUG_ON(bfqq == bfqd->in_service_queue);
++
++	bfq_log_bfqq(bfqd, bfqq, "add to busy");
++
++	bfq_activate_bfqq(bfqd, bfqq);
++
++	bfq_mark_bfqq_busy(bfqq);
++	bfqd->busy_queues++;
++
++	if (!bfqq->dispatched) {
++		if (bfqq->wr_coeff == 1)
++			bfq_weights_tree_add(bfqd, &bfqq->entity,
++					     &bfqd->queue_weights_tree);
++		if (!blk_queue_nonrot(bfqd->queue)) {
++			bfqd->busy_in_flight_queues++;
++			if (bfq_bfqq_constantly_seeky(bfqq))
++				bfqd->const_seeky_busy_in_flight_queues++;
++		}
++	}
++	if (bfqq->wr_coeff > 1)
++		bfqd->wr_busy_queues++;
++}
+diff --git a/block/bfq.h b/block/bfq.h
+new file mode 100644
+index 0000000..00feff7
+--- /dev/null
++++ b/block/bfq.h
+@@ -0,0 +1,771 @@
++/*
++ * BFQ-v7r8 for 4.1.0: data structures and common functions prototypes.
++ *
++ * Based on ideas and code from CFQ:
++ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
++ *
++ * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
++ *		      Paolo Valente <paolo.valente@unimore.it>
++ *
++ * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
++ */
++
++#ifndef _BFQ_H
++#define _BFQ_H
++
++#include <linux/blktrace_api.h>
++#include <linux/hrtimer.h>
++#include <linux/ioprio.h>
++#include <linux/rbtree.h>
++
++#define BFQ_IOPRIO_CLASSES	3
++#define BFQ_CL_IDLE_TIMEOUT	(HZ/5)
++
++#define BFQ_MIN_WEIGHT	1
++#define BFQ_MAX_WEIGHT	1000
++
++#define BFQ_DEFAULT_QUEUE_IOPRIO	4
++
++#define BFQ_DEFAULT_GRP_WEIGHT	10
++#define BFQ_DEFAULT_GRP_IOPRIO	0
++#define BFQ_DEFAULT_GRP_CLASS	IOPRIO_CLASS_BE
++
++struct bfq_entity;
++
++/**
++ * struct bfq_service_tree - per ioprio_class service tree.
++ * @active: tree for active entities (i.e., those backlogged).
++ * @idle: tree for idle entities (i.e., those not backlogged, with V <= F_i).
++ * @first_idle: idle entity with minimum F_i.
++ * @last_idle: idle entity with maximum F_i.
++ * @vtime: scheduler virtual time.
++ * @wsum: scheduler weight sum; active and idle entities contribute to it.
++ *
++ * Each service tree represents a B-WF2Q+ scheduler on its own.  Each
++ * ioprio_class has its own independent scheduler, and so its own
++ * bfq_service_tree.  All the fields are protected by the queue lock
++ * of the containing bfqd.
++ */
++struct bfq_service_tree {
++	struct rb_root active;
++	struct rb_root idle;
++
++	struct bfq_entity *first_idle;
++	struct bfq_entity *last_idle;
++
++	u64 vtime;
++	unsigned long wsum;
++};
++
++/**
++ * struct bfq_sched_data - multi-class scheduler.
++ * @in_service_entity: entity in service.
++ * @next_in_service: head-of-the-line entity in the scheduler.
++ * @service_tree: array of service trees, one per ioprio_class.
++ *
++ * bfq_sched_data is the basic scheduler queue.  It supports three
++ * ioprio_classes, and can be used either as a toplevel queue or as
++ * an intermediate queue on a hierarchical setup.
++ * @next_in_service points to the active entity of the sched_data
++ * service trees that will be scheduled next.
++ *
++ * The supported ioprio_classes are the same as in CFQ, in descending
++ * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE.
++ * Requests from higher priority queues are served before all the
++ * requests from lower priority queues; among requests of the same
++ * queue requests are served according to B-WF2Q+.
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_sched_data {
++	struct bfq_entity *in_service_entity;
++	struct bfq_entity *next_in_service;
++	struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES];
++};
++
++/**
++ * struct bfq_weight_counter - counter of the number of all active entities
++ *                             with a given weight.
++ * @weight: weight of the entities that this counter refers to.
++ * @num_active: number of active entities with this weight.
++ * @weights_node: weights tree member (see bfq_data's @queue_weights_tree
++ *                and @group_weights_tree).
++ */
++struct bfq_weight_counter {
++	short int weight;
++	unsigned int num_active;
++	struct rb_node weights_node;
++};
++
++/**
++ * struct bfq_entity - schedulable entity.
++ * @rb_node: service_tree member.
++ * @weight_counter: pointer to the weight counter associated with this entity.
++ * @on_st: flag, true if the entity is on a tree (either the active or
++ *         the idle one of its service_tree).
++ * @finish: B-WF2Q+ finish timestamp (aka F_i).
++ * @start: B-WF2Q+ start timestamp (aka S_i).
++ * @tree: tree the entity is enqueued into; %NULL if not on a tree.
++ * @min_start: minimum start time of the (active) subtree rooted at
++ *             this entity; used for O(log N) lookups into active trees.
++ * @service: service received during the last round of service.
++ * @budget: budget used to calculate F_i; F_i = S_i + @budget / @weight.
++ * @weight: weight of the queue
++ * @parent: parent entity, for hierarchical scheduling.
++ * @my_sched_data: for non-leaf nodes in the cgroup hierarchy, the
++ *                 associated scheduler queue, %NULL on leaf nodes.
++ * @sched_data: the scheduler queue this entity belongs to.
++ * @ioprio: the ioprio in use.
++ * @new_weight: when a weight change is requested, the new weight value.
++ * @orig_weight: original weight, used to implement weight boosting
++ * @new_ioprio: when an ioprio change is requested, the new ioprio value.
++ * @ioprio_class: the ioprio_class in use.
++ * @new_ioprio_class: when an ioprio_class change is requested, the new
++ *                    ioprio_class value.
++ * @ioprio_changed: flag, true when the user requested a weight, ioprio or
++ *                  ioprio_class change.
++ *
++ * A bfq_entity is used to represent either a bfq_queue (leaf node in the
++ * cgroup hierarchy) or a bfq_group into the upper level scheduler.  Each
++ * entity belongs to the sched_data of the parent group in the cgroup
++ * hierarchy.  Non-leaf entities have also their own sched_data, stored
++ * in @my_sched_data.
++ *
++ * Each entity stores independently its priority values; this would
++ * allow different weights on different devices, but this
++ * functionality is not exported to userspace by now.  Priorities and
++ * weights are updated lazily, first storing the new values into the
++ * new_* fields, then setting the @ioprio_changed flag.  As soon as
++ * there is a transition in the entity state that allows the priority
++ * update to take place the effective and the requested priority
++ * values are synchronized.
++ *
++ * Unless cgroups are used, the weight value is calculated from the
++ * ioprio to export the same interface as CFQ.  When dealing with
++ * ``well-behaved'' queues (i.e., queues that do not spend too much
++ * time to consume their budget and have true sequential behavior, and
++ * when there are no external factors breaking anticipation) the
++ * relative weights at each level of the cgroups hierarchy should be
++ * guaranteed.  All the fields are protected by the queue lock of the
++ * containing bfqd.
++ */
++struct bfq_entity {
++	struct rb_node rb_node;
++	struct bfq_weight_counter *weight_counter;
++
++	int on_st;
++
++	u64 finish;
++	u64 start;
++
++	struct rb_root *tree;
++
++	u64 min_start;
++
++	unsigned long service, budget;
++	unsigned short weight, new_weight;
++	unsigned short orig_weight;
++
++	struct bfq_entity *parent;
++
++	struct bfq_sched_data *my_sched_data;
++	struct bfq_sched_data *sched_data;
++
++	unsigned short ioprio, new_ioprio;
++	unsigned short ioprio_class, new_ioprio_class;
++
++	int ioprio_changed;
++};
++
++struct bfq_group;
++
++/**
++ * struct bfq_queue - leaf schedulable entity.
++ * @ref: reference counter.
++ * @bfqd: parent bfq_data.
++ * @new_bfqq: shared bfq_queue if queue is cooperating with
++ *           one or more other queues.
++ * @pos_node: request-position tree member (see bfq_data's @rq_pos_tree).
++ * @pos_root: request-position tree root (see bfq_data's @rq_pos_tree).
++ * @sort_list: sorted list of pending requests.
++ * @next_rq: if fifo isn't expired, next request to serve.
++ * @queued: nr of requests queued in @sort_list.
++ * @allocated: currently allocated requests.
++ * @meta_pending: pending metadata requests.
++ * @fifo: fifo list of requests in sort_list.
++ * @entity: entity representing this queue in the scheduler.
++ * @max_budget: maximum budget allowed from the feedback mechanism.
++ * @budget_timeout: budget expiration (in jiffies).
++ * @dispatched: number of requests on the dispatch list or inside driver.
++ * @flags: status flags.
++ * @bfqq_list: node for active/idle bfqq list inside our bfqd.
++ * @burst_list_node: node for the device's burst list.
++ * @seek_samples: number of seeks sampled
++ * @seek_total: sum of the distances of the seeks sampled
++ * @seek_mean: mean seek distance
++ * @last_request_pos: position of the last request enqueued
++ * @requests_within_timer: number of consecutive pairs of request completion
++ *                         and arrival, such that the queue becomes idle
++ *                         after the completion, but the next request arrives
++ *                         within an idle time slice; used only if the queue's
++ *                         IO_bound has been cleared.
++ * @pid: pid of the process owning the queue, used for logging purposes.
++ * @last_wr_start_finish: start time of the current weight-raising period if
++ *                        the @bfq-queue is being weight-raised, otherwise
++ *                        finish time of the last weight-raising period
++ * @wr_cur_max_time: current max raising time for this queue
++ * @soft_rt_next_start: minimum time instant such that, only if a new
++ *                      request is enqueued after this time instant in an
++ *                      idle @bfq_queue with no outstanding requests, then
++ *                      the task associated with the queue it is deemed as
++ *                      soft real-time (see the comments to the function
++ *                      bfq_bfqq_softrt_next_start()).
++ * @last_idle_bklogged: time of the last transition of the @bfq_queue from
++ *                      idle to backlogged
++ * @service_from_backlogged: cumulative service received from the @bfq_queue
++ *                           since the last transition from idle to
++ *                           backlogged
++ *
++ * A bfq_queue is a leaf request queue; it can be associated with an io_context
++ * or more, if it is async or shared between cooperating processes. @cgroup
++ * holds a reference to the cgroup, to be sure that it does not disappear while
++ * a bfqq still references it (mostly to avoid races between request issuing and
++ * task migration followed by cgroup destruction).
++ * All the fields are protected by the queue lock of the containing bfqd.
++ */
++struct bfq_queue {
++	atomic_t ref;
++	struct bfq_data *bfqd;
++
++	/* fields for cooperating queues handling */
++	struct bfq_queue *new_bfqq;
++	struct rb_node pos_node;
++	struct rb_root *pos_root;
++
++	struct rb_root sort_list;
++	struct request *next_rq;
++	int queued[2];
++	int allocated[2];
++	int meta_pending;
++	struct list_head fifo;
++
++	struct bfq_entity entity;
++
++	unsigned long max_budget;
++	unsigned long budget_timeout;
++
++	int dispatched;
++
++	unsigned int flags;
++
++	struct list_head bfqq_list;
++
++	struct hlist_node burst_list_node;
++
++	unsigned int seek_samples;
++	u64 seek_total;
++	sector_t seek_mean;
++	sector_t last_request_pos;
++
++	unsigned int requests_within_timer;
++
++	pid_t pid;
++
++	/* weight-raising fields */
++	unsigned long wr_cur_max_time;
++	unsigned long soft_rt_next_start;
++	unsigned long last_wr_start_finish;
++	unsigned int wr_coeff;
++	unsigned long last_idle_bklogged;
++	unsigned long service_from_backlogged;
++};
++
++/**
++ * struct bfq_ttime - per process thinktime stats.
++ * @ttime_total: total process thinktime
++ * @ttime_samples: number of thinktime samples
++ * @ttime_mean: average process thinktime
++ */
++struct bfq_ttime {
++	unsigned long last_end_request;
++
++	unsigned long ttime_total;
++	unsigned long ttime_samples;
++	unsigned long ttime_mean;
++};
++
++/**
++ * struct bfq_io_cq - per (request_queue, io_context) structure.
++ * @icq: associated io_cq structure
++ * @bfqq: array of two process queues, the sync and the async
++ * @ttime: associated @bfq_ttime struct
++ */
++struct bfq_io_cq {
++	struct io_cq icq; /* must be the first member */
++	struct bfq_queue *bfqq[2];
++	struct bfq_ttime ttime;
++	int ioprio;
++};
++
++enum bfq_device_speed {
++	BFQ_BFQD_FAST,
++	BFQ_BFQD_SLOW,
++};
++
++/**
++ * struct bfq_data - per device data structure.
++ * @queue: request queue for the managed device.
++ * @root_group: root bfq_group for the device.
++ * @rq_pos_tree: rbtree sorted by next_request position, used when
++ *               determining if two or more queues have interleaving
++ *               requests (see bfq_close_cooperator()).
++ * @active_numerous_groups: number of bfq_groups containing more than one
++ *                          active @bfq_entity.
++ * @queue_weights_tree: rbtree of weight counters of @bfq_queues, sorted by
++ *                      weight. Used to keep track of whether all @bfq_queues
++ *                     have the same weight. The tree contains one counter
++ *                     for each distinct weight associated to some active
++ *                     and not weight-raised @bfq_queue (see the comments to
++ *                      the functions bfq_weights_tree_[add|remove] for
++ *                     further details).
++ * @group_weights_tree: rbtree of non-queue @bfq_entity weight counters, sorted
++ *                      by weight. Used to keep track of whether all
++ *                     @bfq_groups have the same weight. The tree contains
++ *                     one counter for each distinct weight associated to
++ *                     some active @bfq_group (see the comments to the
++ *                     functions bfq_weights_tree_[add|remove] for further
++ *                     details).
++ * @busy_queues: number of bfq_queues containing requests (including the
++ *		 queue in service, even if it is idling).
++ * @busy_in_flight_queues: number of @bfq_queues containing pending or
++ *                         in-flight requests, plus the @bfq_queue in
++ *                         service, even if idle but waiting for the
++ *                         possible arrival of its next sync request. This
++ *                         field is updated only if the device is rotational,
++ *                         but used only if the device is also NCQ-capable.
++ *                         The reason why the field is updated also for non-
++ *                         NCQ-capable rotational devices is related to the
++ *                         fact that the value of @hw_tag may be set also
++ *                         later than when busy_in_flight_queues may need to
++ *                         be incremented for the first time(s). Taking also
++ *                         this possibility into account, to avoid unbalanced
++ *                         increments/decrements, would imply more overhead
++ *                         than just updating busy_in_flight_queues
++ *                         regardless of the value of @hw_tag.
++ * @const_seeky_busy_in_flight_queues: number of constantly-seeky @bfq_queues
++ *                                     (that is, seeky queues that expired
++ *                                     for budget timeout at least once)
++ *                                     containing pending or in-flight
++ *                                     requests, including the in-service
++ *                                     @bfq_queue if constantly seeky. This
++ *                                     field is updated only if the device
++ *                                     is rotational, but used only if the
++ *                                     device is also NCQ-capable (see the
++ *                                     comments to @busy_in_flight_queues).
++ * @wr_busy_queues: number of weight-raised busy @bfq_queues.
++ * @queued: number of queued requests.
++ * @rq_in_driver: number of requests dispatched and waiting for completion.
++ * @sync_flight: number of sync requests in the driver.
++ * @max_rq_in_driver: max number of reqs in driver in the last
++ *                    @hw_tag_samples completed requests.
++ * @hw_tag_samples: nr of samples used to calculate hw_tag.
++ * @hw_tag: flag set to one if the driver is showing a queueing behavior.
++ * @budgets_assigned: number of budgets assigned.
++ * @idle_slice_timer: timer set when idling for the next sequential request
++ *                    from the queue in service.
++ * @unplug_work: delayed work to restart dispatching on the request queue.
++ * @in_service_queue: bfq_queue in service.
++ * @in_service_bic: bfq_io_cq (bic) associated with the @in_service_queue.
++ * @last_position: on-disk position of the last served request.
++ * @last_budget_start: beginning of the last budget.
++ * @last_idling_start: beginning of the last idle slice.
++ * @peak_rate: peak transfer rate observed for a budget.
++ * @peak_rate_samples: number of samples used to calculate @peak_rate.
++ * @bfq_max_budget: maximum budget allotted to a bfq_queue before
++ *                  rescheduling.
++ * @group_list: list of all the bfq_groups active on the device.
++ * @active_list: list of all the bfq_queues active on the device.
++ * @idle_list: list of all the bfq_queues idle on the device.
++ * @bfq_fifo_expire: timeout for async/sync requests; when it expires
++ *                   requests are served in fifo order.
++ * @bfq_back_penalty: weight of backward seeks wrt forward ones.
++ * @bfq_back_max: maximum allowed backward seek.
++ * @bfq_slice_idle: maximum idling time.
++ * @bfq_user_max_budget: user-configured max budget value
++ *                       (0 for auto-tuning).
++ * @bfq_max_budget_async_rq: maximum budget (in nr of requests) allotted to
++ *                           async queues.
++ * @bfq_timeout: timeout for bfq_queues to consume their budget; used to
++ *               to prevent seeky queues to impose long latencies to well
++ *               behaved ones (this also implies that seeky queues cannot
++ *               receive guarantees in the service domain; after a timeout
++ *               they are charged for the whole allocated budget, to try
++ *               to preserve a behavior reasonably fair among them, but
++ *               without service-domain guarantees).
++ * @bfq_coop_thresh: number of queue merges after which a @bfq_queue is
++ *                   no more granted any weight-raising.
++ * @bfq_failed_cooperations: number of consecutive failed cooperation
++ *                           chances after which weight-raising is restored
++ *                           to a queue subject to more than bfq_coop_thresh
++ *                           queue merges.
++ * @bfq_requests_within_timer: number of consecutive requests that must be
++ *                             issued within the idle time slice to set
++ *                             again idling to a queue which was marked as
++ *                             non-I/O-bound (see the definition of the
++ *                             IO_bound flag for further details).
++ * @last_ins_in_burst: last time at which a queue entered the current
++ *                     burst of queues being activated shortly after
++ *                     each other; for more details about this and the
++ *                     following parameters related to a burst of
++ *                     activations, see the comments to the function
++ *                     @bfq_handle_burst.
++ * @bfq_burst_interval: reference time interval used to decide whether a
++ *                      queue has been activated shortly after
++ *                      @last_ins_in_burst.
++ * @burst_size: number of queues in the current burst of queue activations.
++ * @bfq_large_burst_thresh: maximum burst size above which the current
++ * 			    queue-activation burst is deemed as 'large'.
++ * @large_burst: true if a large queue-activation burst is in progress.
++ * @burst_list: head of the burst list (as for the above fields, more details
++ * 		in the comments to the function bfq_handle_burst).
++ * @low_latency: if set to true, low-latency heuristics are enabled.
++ * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised
++ *                queue is multiplied.
++ * @bfq_wr_max_time: maximum duration of a weight-raising period (jiffies).
++ * @bfq_wr_rt_max_time: maximum duration for soft real-time processes.
++ * @bfq_wr_min_idle_time: minimum idle period after which weight-raising
++ *			  may be reactivated for a queue (in jiffies).
++ * @bfq_wr_min_inter_arr_async: minimum period between request arrivals
++ *				after which weight-raising may be
++ *				reactivated for an already busy queue
++ *				(in jiffies).
++ * @bfq_wr_max_softrt_rate: max service-rate for a soft real-time queue,
++ *			    sectors per seconds.
++ * @RT_prod: cached value of the product R*T used for computing the maximum
++ *	     duration of the weight raising automatically.
++ * @device_speed: device-speed class for the low-latency heuristic.
++ * @oom_bfqq: fallback dummy bfqq for extreme OOM conditions.
++ *
++ * All the fields are protected by the @queue lock.
++ */
++struct bfq_data {
++	struct request_queue *queue;
++
++	struct bfq_group *root_group;
++	struct rb_root rq_pos_tree;
++
++#ifdef CONFIG_CGROUP_BFQIO
++	int active_numerous_groups;
++#endif
++
++	struct rb_root queue_weights_tree;
++	struct rb_root group_weights_tree;
++
++	int busy_queues;
++	int busy_in_flight_queues;
++	int const_seeky_busy_in_flight_queues;
++	int wr_busy_queues;
++	int queued;
++	int rq_in_driver;
++	int sync_flight;
++
++	int max_rq_in_driver;
++	int hw_tag_samples;
++	int hw_tag;
++
++	int budgets_assigned;
++
++	struct timer_list idle_slice_timer;
++	struct work_struct unplug_work;
++
++	struct bfq_queue *in_service_queue;
++	struct bfq_io_cq *in_service_bic;
++
++	sector_t last_position;
++
++	ktime_t last_budget_start;
++	ktime_t last_idling_start;
++	int peak_rate_samples;
++	u64 peak_rate;
++	unsigned long bfq_max_budget;
++
++	struct hlist_head group_list;
++	struct list_head active_list;
++	struct list_head idle_list;
++
++	unsigned int bfq_fifo_expire[2];
++	unsigned int bfq_back_penalty;
++	unsigned int bfq_back_max;
++	unsigned int bfq_slice_idle;
++	u64 bfq_class_idle_last_service;
++
++	unsigned int bfq_user_max_budget;
++	unsigned int bfq_max_budget_async_rq;
++	unsigned int bfq_timeout[2];
++
++	unsigned int bfq_coop_thresh;
++	unsigned int bfq_failed_cooperations;
++	unsigned int bfq_requests_within_timer;
++
++	unsigned long last_ins_in_burst;
++	unsigned long bfq_burst_interval;
++	int burst_size;
++	unsigned long bfq_large_burst_thresh;
++	bool large_burst;
++	struct hlist_head burst_list;
++
++	bool low_latency;
++
++	/* parameters of the low_latency heuristics */
++	unsigned int bfq_wr_coeff;
++	unsigned int bfq_wr_max_time;
++	unsigned int bfq_wr_rt_max_time;
++	unsigned int bfq_wr_min_idle_time;
++	unsigned long bfq_wr_min_inter_arr_async;
++	unsigned int bfq_wr_max_softrt_rate;
++	u64 RT_prod;
++	enum bfq_device_speed device_speed;
++
++	struct bfq_queue oom_bfqq;
++};
++
++enum bfqq_state_flags {
++	BFQ_BFQQ_FLAG_busy = 0,		/* has requests or is in service */
++	BFQ_BFQQ_FLAG_wait_request,	/* waiting for a request */
++	BFQ_BFQQ_FLAG_must_alloc,	/* must be allowed rq alloc */
++	BFQ_BFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
++	BFQ_BFQQ_FLAG_idle_window,	/* slice idling enabled */
++	BFQ_BFQQ_FLAG_sync,		/* synchronous queue */
++	BFQ_BFQQ_FLAG_budget_new,	/* no completion with this budget */
++	BFQ_BFQQ_FLAG_IO_bound,         /*
++					 * bfqq has timed-out at least once
++					 * having consumed at most 2/10 of
++					 * its budget
++					 */
++	BFQ_BFQQ_FLAG_in_large_burst,	/*
++					 * bfqq activated in a large burst,
++					 * see comments to bfq_handle_burst.
++					 */
++	BFQ_BFQQ_FLAG_constantly_seeky,	/*
++					 * bfqq has proved to be slow and
++					 * seeky until budget timeout
++					 */
++	BFQ_BFQQ_FLAG_softrt_update,    /*
++					 * may need softrt-next-start
++					 * update
++					 */
++	BFQ_BFQQ_FLAG_coop,		/* bfqq is shared */
++	BFQ_BFQQ_FLAG_split_coop,	/* shared bfqq will be splitted */
++};
++
++#define BFQ_BFQQ_FNS(name)						\
++static inline void bfq_mark_bfqq_##name(struct bfq_queue *bfqq)		\
++{									\
++	(bfqq)->flags |= (1 << BFQ_BFQQ_FLAG_##name);			\
++}									\
++static inline void bfq_clear_bfqq_##name(struct bfq_queue *bfqq)	\
++{									\
++	(bfqq)->flags &= ~(1 << BFQ_BFQQ_FLAG_##name);			\
++}									\
++static inline int bfq_bfqq_##name(const struct bfq_queue *bfqq)		\
++{									\
++	return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0;	\
++}
++
++BFQ_BFQQ_FNS(busy);
++BFQ_BFQQ_FNS(wait_request);
++BFQ_BFQQ_FNS(must_alloc);
++BFQ_BFQQ_FNS(fifo_expire);
++BFQ_BFQQ_FNS(idle_window);
++BFQ_BFQQ_FNS(sync);
++BFQ_BFQQ_FNS(budget_new);
++BFQ_BFQQ_FNS(IO_bound);
++BFQ_BFQQ_FNS(in_large_burst);
++BFQ_BFQQ_FNS(constantly_seeky);
++BFQ_BFQQ_FNS(coop);
++BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(softrt_update);
++#undef BFQ_BFQQ_FNS
++
++/* Logging facilities. */
++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \
++	blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args)
++
++#define bfq_log(bfqd, fmt, args...) \
++	blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args)
++
++/* Expiration reasons. */
++enum bfqq_expiration {
++	BFQ_BFQQ_TOO_IDLE = 0,		/*
++					 * queue has been idling for
++					 * too long
++					 */
++	BFQ_BFQQ_BUDGET_TIMEOUT,	/* budget took too long to be used */
++	BFQ_BFQQ_BUDGET_EXHAUSTED,	/* budget consumed */
++	BFQ_BFQQ_NO_MORE_REQUESTS,	/* the queue has no more requests */
++};
++
++#ifdef CONFIG_CGROUP_BFQIO
++/**
++ * struct bfq_group - per (device, cgroup) data structure.
++ * @entity: schedulable entity to insert into the parent group sched_data.
++ * @sched_data: own sched_data, to contain child entities (they may be
++ *              both bfq_queues and bfq_groups).
++ * @group_node: node to be inserted into the bfqio_cgroup->group_data
++ *              list of the containing cgroup's bfqio_cgroup.
++ * @bfqd_node: node to be inserted into the @bfqd->group_list list
++ *             of the groups active on the same device; used for cleanup.
++ * @bfqd: the bfq_data for the device this group acts upon.
++ * @async_bfqq: array of async queues for all the tasks belonging to
++ *              the group, one queue per ioprio value per ioprio_class,
++ *              except for the idle class that has only one queue.
++ * @async_idle_bfqq: async queue for the idle class (ioprio is ignored).
++ * @my_entity: pointer to @entity, %NULL for the toplevel group; used
++ *             to avoid too many special cases during group creation/
++ *             migration.
++ * @active_entities: number of active entities belonging to the group;
++ *                   unused for the root group. Used to know whether there
++ *                   are groups with more than one active @bfq_entity
++ *                   (see the comments to the function
++ *                   bfq_bfqq_must_not_expire()).
++ *
++ * Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
++ * there is a set of bfq_groups, each one collecting the lower-level
++ * entities belonging to the group that are acting on the same device.
++ *
++ * Locking works as follows:
++ *    o @group_node is protected by the bfqio_cgroup lock, and is accessed
++ *      via RCU from its readers.
++ *    o @bfqd is protected by the queue lock, RCU is used to access it
++ *      from the readers.
++ *    o All the other fields are protected by the @bfqd queue lock.
++ */
++struct bfq_group {
++	struct bfq_entity entity;
++	struct bfq_sched_data sched_data;
++
++	struct hlist_node group_node;
++	struct hlist_node bfqd_node;
++
++	void *bfqd;
++
++	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++	struct bfq_queue *async_idle_bfqq;
++
++	struct bfq_entity *my_entity;
++
++	int active_entities;
++};
++
++/**
++ * struct bfqio_cgroup - bfq cgroup data structure.
++ * @css: subsystem state for bfq in the containing cgroup.
++ * @online: flag marked when the subsystem is inserted.
++ * @weight: cgroup weight.
++ * @ioprio: cgroup ioprio.
++ * @ioprio_class: cgroup ioprio_class.
++ * @lock: spinlock that protects @ioprio, @ioprio_class and @group_data.
++ * @group_data: list containing the bfq_group belonging to this cgroup.
++ *
++ * @group_data is accessed using RCU, with @lock protecting the updates,
++ * @ioprio and @ioprio_class are protected by @lock.
++ */
++struct bfqio_cgroup {
++	struct cgroup_subsys_state css;
++	bool online;
++
++	unsigned short weight, ioprio, ioprio_class;
++
++	spinlock_t lock;
++	struct hlist_head group_data;
++};
++#else
++struct bfq_group {
++	struct bfq_sched_data sched_data;
++
++	struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
++	struct bfq_queue *async_idle_bfqq;
++};
++#endif
++
++static inline struct bfq_service_tree *
++bfq_entity_service_tree(struct bfq_entity *entity)
++{
++	struct bfq_sched_data *sched_data = entity->sched_data;
++	unsigned int idx = entity->ioprio_class - 1;
++
++	BUG_ON(idx >= BFQ_IOPRIO_CLASSES);
++	BUG_ON(sched_data == NULL);
++
++	return sched_data->service_tree + idx;
++}
++
++static inline struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic,
++					    bool is_sync)
++{
++	return bic->bfqq[is_sync];
++}
++
++static inline void bic_set_bfqq(struct bfq_io_cq *bic,
++				struct bfq_queue *bfqq, bool is_sync)
++{
++	bic->bfqq[is_sync] = bfqq;
++}
++
++static inline struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic)
++{
++	return bic->icq.q->elevator->elevator_data;
++}
++
++/**
++ * bfq_get_bfqd_locked - get a lock to a bfqd using a RCU protected pointer.
++ * @ptr: a pointer to a bfqd.
++ * @flags: storage for the flags to be saved.
++ *
++ * This function allows bfqg->bfqd to be protected by the
++ * queue lock of the bfqd they reference; the pointer is dereferenced
++ * under RCU, so the storage for bfqd is assured to be safe as long
++ * as the RCU read side critical section does not end.  After the
++ * bfqd->queue->queue_lock is taken the pointer is rechecked, to be
++ * sure that no other writer accessed it.  If we raced with a writer,
++ * the function returns NULL, with the queue unlocked, otherwise it
++ * returns the dereferenced pointer, with the queue locked.
++ */
++static inline struct bfq_data *bfq_get_bfqd_locked(void **ptr,
++						   unsigned long *flags)
++{
++	struct bfq_data *bfqd;
++
++	rcu_read_lock();
++	bfqd = rcu_dereference(*(struct bfq_data **)ptr);
++
++	if (bfqd != NULL) {
++		spin_lock_irqsave(bfqd->queue->queue_lock, *flags);
++		if (*ptr == bfqd)
++			goto out;
++		spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++	}
++
++	bfqd = NULL;
++out:
++	rcu_read_unlock();
++	return bfqd;
++}
++
++static inline void bfq_put_bfqd_unlock(struct bfq_data *bfqd,
++				       unsigned long *flags)
++{
++	spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
++}
++
++static void bfq_check_ioprio_change(struct bfq_io_cq *bic);
++static void bfq_put_queue(struct bfq_queue *bfqq);
++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
++static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
++				       struct bfq_group *bfqg, int is_sync,
++				       struct bfq_io_cq *bic, gfp_t gfp_mask);
++static void bfq_end_wr_async_queues(struct bfq_data *bfqd,
++				    struct bfq_group *bfqg);
++static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
++static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
++
++#endif /* _BFQ_H */
+-- 
+2.1.4
+

diff --git a/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r8-for-4.1.0.patch b/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r8-for-4.1.0.patch
new file mode 100644
index 0000000..7c977bd
--- /dev/null
+++ b/5003_block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r8-for-4.1.0.patch
@@ -0,0 +1,1220 @@
+From 0a52c636b77988d8aa0d24bf36144e4228d43df9 Mon Sep 17 00:00:00 2001
+From: Mauro Andreolini <mauro.andreolini@unimore.it>
+Date: Fri, 5 Jun 2015 17:45:40 +0200
+Subject: [PATCH 3/3] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r8 for
+ 4.1.0
+
+A set of processes may happen  to  perform interleaved reads, i.e.,requests
+whose union would give rise to a  sequential read  pattern.  There are two
+typical  cases: in the first  case,   processes  read  fixed-size chunks of
+data at a fixed distance from each other, while in the second case processes
+may read variable-size chunks at  variable distances. The latter case occurs
+for  example with  QEMU, which  splits the  I/O generated  by the  guest into
+multiple chunks,  and lets these chunks  be served by a  pool of cooperating
+processes,  iteratively  assigning  the  next  chunk of  I/O  to  the first
+available  process. CFQ  uses actual  queue merging  for the  first type of
+rocesses, whereas it  uses preemption to get a sequential  read pattern out
+of the read requests  performed by the second type of  processes. In the end
+it uses  two different  mechanisms to  achieve the  same goal: boosting the
+throughput with interleaved I/O.
+
+This patch introduces  Early Queue Merge (EQM), a unified mechanism to get a
+sequential  read pattern  with both  types of  processes. The  main idea is
+checking newly arrived requests against the next request of the active queue
+both in case of actual request insert and in case of request merge. By doing
+so, both the types of processes can be handled by just merging their queues.
+EQM is  then simpler and  more compact than the  pair of mechanisms used in
+CFQ.
+
+Finally, EQM  also preserves the  typical low-latency properties of BFQ, by
+properly restoring the weight-raising state of a queue when it gets back to
+a non-merged state.
+
+Signed-off-by: Mauro Andreolini <mauro.andreolini@unimore.it>
+Signed-off-by: Arianna Avanzini <avanzini.arianna@gmail.com>
+Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
+---
+ block/bfq-iosched.c | 750 +++++++++++++++++++++++++++++++++++++---------------
+ block/bfq-sched.c   |  28 --
+ block/bfq.h         |  54 +++-
+ 3 files changed, 580 insertions(+), 252 deletions(-)
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index 773b2ee..71b51c1 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -573,6 +573,57 @@ static inline unsigned int bfq_wr_duration(struct bfq_data *bfqd)
+ 	return dur;
+ }
+ 
++static inline unsigned
++bfq_bfqq_cooperations(struct bfq_queue *bfqq)
++{
++	return bfqq->bic ? bfqq->bic->cooperations : 0;
++}
++
++static inline void
++bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
++{
++	if (bic->saved_idle_window)
++		bfq_mark_bfqq_idle_window(bfqq);
++	else
++		bfq_clear_bfqq_idle_window(bfqq);
++	if (bic->saved_IO_bound)
++		bfq_mark_bfqq_IO_bound(bfqq);
++	else
++		bfq_clear_bfqq_IO_bound(bfqq);
++	/* Assuming that the flag in_large_burst is already correctly set */
++	if (bic->wr_time_left && bfqq->bfqd->low_latency &&
++	    !bfq_bfqq_in_large_burst(bfqq) &&
++	    bic->cooperations < bfqq->bfqd->bfq_coop_thresh) {
++		/*
++		 * Start a weight raising period with the duration given by
++		 * the raising_time_left snapshot.
++		 */
++		if (bfq_bfqq_busy(bfqq))
++			bfqq->bfqd->wr_busy_queues++;
++		bfqq->wr_coeff = bfqq->bfqd->bfq_wr_coeff;
++		bfqq->wr_cur_max_time = bic->wr_time_left;
++		bfqq->last_wr_start_finish = jiffies;
++		bfqq->entity.ioprio_changed = 1;
++	}
++	/*
++	 * Clear wr_time_left to prevent bfq_bfqq_save_state() from
++	 * getting confused about the queue's need of a weight-raising
++	 * period.
++	 */
++	bic->wr_time_left = 0;
++}
++
++/* Must be called with the queue_lock held. */
++static int bfqq_process_refs(struct bfq_queue *bfqq)
++{
++	int process_refs, io_refs;
++
++	io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
++	process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
++	BUG_ON(process_refs < 0);
++	return process_refs;
++}
++
+ /* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
+ static inline void bfq_reset_burst_list(struct bfq_data *bfqd,
+ 					struct bfq_queue *bfqq)
+@@ -817,7 +868,7 @@ static void bfq_add_request(struct request *rq)
+ 		bfq_rq_pos_tree_add(bfqd, bfqq);
+ 
+ 	if (!bfq_bfqq_busy(bfqq)) {
+-		bool soft_rt,
++		bool soft_rt, coop_or_in_burst,
+ 		     idle_for_long_time = time_is_before_jiffies(
+ 						bfqq->budget_timeout +
+ 						bfqd->bfq_wr_min_idle_time);
+@@ -841,11 +892,12 @@ static void bfq_add_request(struct request *rq)
+ 				bfqd->last_ins_in_burst = jiffies;
+ 		}
+ 
++		coop_or_in_burst = bfq_bfqq_in_large_burst(bfqq) ||
++			bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh;
+ 		soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 &&
+-			!bfq_bfqq_in_large_burst(bfqq) &&
++			!coop_or_in_burst &&
+ 			time_is_before_jiffies(bfqq->soft_rt_next_start);
+-		interactive = !bfq_bfqq_in_large_burst(bfqq) &&
+-			      idle_for_long_time;
++		interactive = !coop_or_in_burst && idle_for_long_time;
+ 		entity->budget = max_t(unsigned long, bfqq->max_budget,
+ 				       bfq_serv_to_charge(next_rq, bfqq));
+ 
+@@ -864,11 +916,20 @@ static void bfq_add_request(struct request *rq)
+ 		if (!bfqd->low_latency)
+ 			goto add_bfqq_busy;
+ 
++		if (bfq_bfqq_just_split(bfqq))
++			goto set_ioprio_changed;
++
+ 		/*
+-		 * If the queue is not being boosted and has been idle
+-		 * for enough time, start a weight-raising period
++		 * If the queue:
++		 * - is not being boosted,
++		 * - has been idle for enough time,
++		 * - is not a sync queue or is linked to a bfq_io_cq (it is
++		 *   shared "for its nature" or it is not shared and its
++		 *   requests have not been redirected to a shared queue)
++		 * start a weight-raising period.
+ 		 */
+-		if (old_wr_coeff == 1 && (interactive || soft_rt)) {
++		if (old_wr_coeff == 1 && (interactive || soft_rt) &&
++		    (!bfq_bfqq_sync(bfqq) || bfqq->bic != NULL)) {
+ 			bfqq->wr_coeff = bfqd->bfq_wr_coeff;
+ 			if (interactive)
+ 				bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+@@ -882,7 +943,7 @@ static void bfq_add_request(struct request *rq)
+ 		} else if (old_wr_coeff > 1) {
+ 			if (interactive)
+ 				bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
+-			else if (bfq_bfqq_in_large_burst(bfqq) ||
++			else if (coop_or_in_burst ||
+ 				 (bfqq->wr_cur_max_time ==
+ 				  bfqd->bfq_wr_rt_max_time &&
+ 				  !soft_rt)) {
+@@ -901,18 +962,18 @@ static void bfq_add_request(struct request *rq)
+ 				/*
+ 				 *
+ 				 * The remaining weight-raising time is lower
+-				 * than bfqd->bfq_wr_rt_max_time, which
+-				 * means that the application is enjoying
+-				 * weight raising either because deemed soft-
+-				 * rt in the near past, or because deemed
+-				 * interactive a long ago. In both cases,
+-				 * resetting now the current remaining weight-
+-				 * raising time for the application to the
+-				 * weight-raising duration for soft rt
+-				 * applications would not cause any latency
+-				 * increase for the application (as the new
+-				 * duration would be higher than the remaining
+-				 * time).
++				 * than bfqd->bfq_wr_rt_max_time, which means
++				 * that the application is enjoying weight
++				 * raising either because deemed soft-rt in
++				 * the near past, or because deemed interactive
++				 * a long ago.
++				 * In both cases, resetting now the current
++				 * remaining weight-raising time for the
++				 * application to the weight-raising duration
++				 * for soft rt applications would not cause any
++				 * latency increase for the application (as the
++				 * new duration would be higher than the
++				 * remaining time).
+ 				 *
+ 				 * In addition, the application is now meeting
+ 				 * the requirements for being deemed soft rt.
+@@ -947,6 +1008,7 @@ static void bfq_add_request(struct request *rq)
+ 					bfqd->bfq_wr_rt_max_time;
+ 			}
+ 		}
++set_ioprio_changed:
+ 		if (old_wr_coeff != bfqq->wr_coeff)
+ 			entity->ioprio_changed = 1;
+ add_bfqq_busy:
+@@ -1167,90 +1229,35 @@ static void bfq_end_wr(struct bfq_data *bfqd)
+ 	spin_unlock_irq(bfqd->queue->queue_lock);
+ }
+ 
+-static int bfq_allow_merge(struct request_queue *q, struct request *rq,
+-			   struct bio *bio)
++static inline sector_t bfq_io_struct_pos(void *io_struct, bool request)
+ {
+-	struct bfq_data *bfqd = q->elevator->elevator_data;
+-	struct bfq_io_cq *bic;
+-	struct bfq_queue *bfqq;
+-
+-	/*
+-	 * Disallow merge of a sync bio into an async request.
+-	 */
+-	if (bfq_bio_sync(bio) && !rq_is_sync(rq))
+-		return 0;
+-
+-	/*
+-	 * Lookup the bfqq that this bio will be queued with. Allow
+-	 * merge only if rq is queued there.
+-	 * Queue lock is held here.
+-	 */
+-	bic = bfq_bic_lookup(bfqd, current->io_context);
+-	if (bic == NULL)
+-		return 0;
+-
+-	bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
+-	return bfqq == RQ_BFQQ(rq);
+-}
+-
+-static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+-				       struct bfq_queue *bfqq)
+-{
+-	if (bfqq != NULL) {
+-		bfq_mark_bfqq_must_alloc(bfqq);
+-		bfq_mark_bfqq_budget_new(bfqq);
+-		bfq_clear_bfqq_fifo_expire(bfqq);
+-
+-		bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
+-
+-		bfq_log_bfqq(bfqd, bfqq,
+-			     "set_in_service_queue, cur-budget = %lu",
+-			     bfqq->entity.budget);
+-	}
+-
+-	bfqd->in_service_queue = bfqq;
+-}
+-
+-/*
+- * Get and set a new queue for service.
+- */
+-static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd,
+-						  struct bfq_queue *bfqq)
+-{
+-	if (!bfqq)
+-		bfqq = bfq_get_next_queue(bfqd);
++	if (request)
++		return blk_rq_pos(io_struct);
+ 	else
+-		bfq_get_next_queue_forced(bfqd, bfqq);
+-
+-	__bfq_set_in_service_queue(bfqd, bfqq);
+-	return bfqq;
++		return ((struct bio *)io_struct)->bi_iter.bi_sector;
+ }
+ 
+-static inline sector_t bfq_dist_from_last(struct bfq_data *bfqd,
+-					  struct request *rq)
++static inline sector_t bfq_dist_from(sector_t pos1,
++				     sector_t pos2)
+ {
+-	if (blk_rq_pos(rq) >= bfqd->last_position)
+-		return blk_rq_pos(rq) - bfqd->last_position;
++	if (pos1 >= pos2)
++		return pos1 - pos2;
+ 	else
+-		return bfqd->last_position - blk_rq_pos(rq);
++		return pos2 - pos1;
+ }
+ 
+-/*
+- * Return true if bfqq has no request pending and rq is close enough to
+- * bfqd->last_position, or if rq is closer to bfqd->last_position than
+- * bfqq->next_rq
+- */
+-static inline int bfq_rq_close(struct bfq_data *bfqd, struct request *rq)
++static inline int bfq_rq_close_to_sector(void *io_struct, bool request,
++					 sector_t sector)
+ {
+-	return bfq_dist_from_last(bfqd, rq) <= BFQQ_SEEK_THR;
++	return bfq_dist_from(bfq_io_struct_pos(io_struct, request), sector) <=
++	       BFQQ_SEEK_THR;
+ }
+ 
+-static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
++static struct bfq_queue *bfqq_close(struct bfq_data *bfqd, sector_t sector)
+ {
+ 	struct rb_root *root = &bfqd->rq_pos_tree;
+ 	struct rb_node *parent, *node;
+ 	struct bfq_queue *__bfqq;
+-	sector_t sector = bfqd->last_position;
+ 
+ 	if (RB_EMPTY_ROOT(root))
+ 		return NULL;
+@@ -1269,7 +1276,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
+ 	 * next_request position).
+ 	 */
+ 	__bfqq = rb_entry(parent, struct bfq_queue, pos_node);
+-	if (bfq_rq_close(bfqd, __bfqq->next_rq))
++	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
+ 		return __bfqq;
+ 
+ 	if (blk_rq_pos(__bfqq->next_rq) < sector)
+@@ -1280,7 +1287,7 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
+ 		return NULL;
+ 
+ 	__bfqq = rb_entry(node, struct bfq_queue, pos_node);
+-	if (bfq_rq_close(bfqd, __bfqq->next_rq))
++	if (bfq_rq_close_to_sector(__bfqq->next_rq, true, sector))
+ 		return __bfqq;
+ 
+ 	return NULL;
+@@ -1289,14 +1296,12 @@ static struct bfq_queue *bfqq_close(struct bfq_data *bfqd)
+ /*
+  * bfqd - obvious
+  * cur_bfqq - passed in so that we don't decide that the current queue
+- *            is closely cooperating with itself.
+- *
+- * We are assuming that cur_bfqq has dispatched at least one request,
+- * and that bfqd->last_position reflects a position on the disk associated
+- * with the I/O issued by cur_bfqq.
++ *            is closely cooperating with itself
++ * sector - used as a reference point to search for a close queue
+  */
+ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
+-					      struct bfq_queue *cur_bfqq)
++					      struct bfq_queue *cur_bfqq,
++					      sector_t sector)
+ {
+ 	struct bfq_queue *bfqq;
+ 
+@@ -1316,7 +1321,7 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
+ 	 * working closely on the same area of the disk. In that case,
+ 	 * we can group them together and don't waste time idling.
+ 	 */
+-	bfqq = bfqq_close(bfqd);
++	bfqq = bfqq_close(bfqd, sector);
+ 	if (bfqq == NULL || bfqq == cur_bfqq)
+ 		return NULL;
+ 
+@@ -1343,6 +1348,315 @@ static struct bfq_queue *bfq_close_cooperator(struct bfq_data *bfqd,
+ 	return bfqq;
+ }
+ 
++static struct bfq_queue *
++bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++	int process_refs, new_process_refs;
++	struct bfq_queue *__bfqq;
++
++	/*
++	 * If there are no process references on the new_bfqq, then it is
++	 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
++	 * may have dropped their last reference (not just their last process
++	 * reference).
++	 */
++	if (!bfqq_process_refs(new_bfqq))
++		return NULL;
++
++	/* Avoid a circular list and skip interim queue merges. */
++	while ((__bfqq = new_bfqq->new_bfqq)) {
++		if (__bfqq == bfqq)
++			return NULL;
++		new_bfqq = __bfqq;
++	}
++
++	process_refs = bfqq_process_refs(bfqq);
++	new_process_refs = bfqq_process_refs(new_bfqq);
++	/*
++	 * If the process for the bfqq has gone away, there is no
++	 * sense in merging the queues.
++	 */
++	if (process_refs == 0 || new_process_refs == 0)
++		return NULL;
++
++	bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
++		new_bfqq->pid);
++
++	/*
++	 * Merging is just a redirection: the requests of the process
++	 * owning one of the two queues are redirected to the other queue.
++	 * The latter queue, in its turn, is set as shared if this is the
++	 * first time that the requests of some process are redirected to
++	 * it.
++	 *
++	 * We redirect bfqq to new_bfqq and not the opposite, because we
++	 * are in the context of the process owning bfqq, hence we have
++	 * the io_cq of this process. So we can immediately configure this
++	 * io_cq to redirect the requests of the process to new_bfqq.
++	 *
++	 * NOTE, even if new_bfqq coincides with the in-service queue, the
++	 * io_cq of new_bfqq is not available, because, if the in-service
++	 * queue is shared, bfqd->in_service_bic may not point to the
++	 * io_cq of the in-service queue.
++	 * Redirecting the requests of the process owning bfqq to the
++	 * currently in-service queue is in any case the best option, as
++	 * we feed the in-service queue with new requests close to the
++	 * last request served and, by doing so, hopefully increase the
++	 * throughput.
++	 */
++	bfqq->new_bfqq = new_bfqq;
++	atomic_add(process_refs, &new_bfqq->ref);
++	return new_bfqq;
++}
++
++/*
++ * Attempt to schedule a merge of bfqq with the currently in-service queue
++ * or with a close queue among the scheduled queues.
++ * Return NULL if no merge was scheduled, a pointer to the shared bfq_queue
++ * structure otherwise.
++ *
++ * The OOM queue is not allowed to participate to cooperation: in fact, since
++ * the requests temporarily redirected to the OOM queue could be redirected
++ * again to dedicated queues at any time, the state needed to correctly
++ * handle merging with the OOM queue would be quite complex and expensive
++ * to maintain. Besides, in such a critical condition as an out of memory,
++ * the benefits of queue merging may be little relevant, or even negligible.
++ */
++static struct bfq_queue *
++bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
++		     void *io_struct, bool request)
++{
++	struct bfq_queue *in_service_bfqq, *new_bfqq;
++
++	if (bfqq->new_bfqq)
++		return bfqq->new_bfqq;
++
++	if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
++		return NULL;
++
++	in_service_bfqq = bfqd->in_service_queue;
++
++	if (in_service_bfqq == NULL || in_service_bfqq == bfqq ||
++	    !bfqd->in_service_bic ||
++	    unlikely(in_service_bfqq == &bfqd->oom_bfqq))
++		goto check_scheduled;
++
++	if (bfq_class_idle(in_service_bfqq) || bfq_class_idle(bfqq))
++		goto check_scheduled;
++
++	if (bfq_class_rt(in_service_bfqq) != bfq_class_rt(bfqq))
++		goto check_scheduled;
++
++	if (in_service_bfqq->entity.parent != bfqq->entity.parent)
++		goto check_scheduled;
++
++	if (bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
++	    bfq_bfqq_sync(in_service_bfqq) && bfq_bfqq_sync(bfqq)) {
++		new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
++		if (new_bfqq != NULL)
++			return new_bfqq; /* Merge with in-service queue */
++	}
++
++	/*
++	 * Check whether there is a cooperator among currently scheduled
++	 * queues. The only thing we need is that the bio/request is not
++	 * NULL, as we need it to establish whether a cooperator exists.
++	 */
++check_scheduled:
++	new_bfqq = bfq_close_cooperator(bfqd, bfqq,
++					bfq_io_struct_pos(io_struct, request));
++	if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq))
++		return bfq_setup_merge(bfqq, new_bfqq);
++
++	return NULL;
++}
++
++static inline void
++bfq_bfqq_save_state(struct bfq_queue *bfqq)
++{
++	/*
++	 * If bfqq->bic == NULL, the queue is already shared or its requests
++	 * have already been redirected to a shared queue; both idle window
++	 * and weight raising state have already been saved. Do nothing.
++	 */
++	if (bfqq->bic == NULL)
++		return;
++	if (bfqq->bic->wr_time_left)
++		/*
++		 * This is the queue of a just-started process, and would
++		 * deserve weight raising: we set wr_time_left to the full
++		 * weight-raising duration to trigger weight-raising when
++		 * and if the queue is split and the first request of the
++		 * queue is enqueued.
++		 */
++		bfqq->bic->wr_time_left = bfq_wr_duration(bfqq->bfqd);
++	else if (bfqq->wr_coeff > 1) {
++		unsigned long wr_duration =
++			jiffies - bfqq->last_wr_start_finish;
++		/*
++		 * It may happen that a queue's weight raising period lasts
++		 * longer than its wr_cur_max_time, as weight raising is
++		 * handled only when a request is enqueued or dispatched (it
++		 * does not use any timer). If the weight raising period is
++		 * about to end, don't save it.
++		 */
++		if (bfqq->wr_cur_max_time <= wr_duration)
++			bfqq->bic->wr_time_left = 0;
++		else
++			bfqq->bic->wr_time_left =
++				bfqq->wr_cur_max_time - wr_duration;
++		/*
++		 * The bfq_queue is becoming shared or the requests of the
++		 * process owning the queue are being redirected to a shared
++		 * queue. Stop the weight raising period of the queue, as in
++		 * both cases it should not be owned by an interactive or
++		 * soft real-time application.
++		 */
++		bfq_bfqq_end_wr(bfqq);
++	} else
++		bfqq->bic->wr_time_left = 0;
++	bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq);
++	bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq);
++	bfqq->bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq);
++	bfqq->bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node);
++	bfqq->bic->cooperations++;
++	bfqq->bic->failed_cooperations = 0;
++}
++
++static inline void
++bfq_get_bic_reference(struct bfq_queue *bfqq)
++{
++	/*
++	 * If bfqq->bic has a non-NULL value, the bic to which it belongs
++	 * is about to begin using a shared bfq_queue.
++	 */
++	if (bfqq->bic)
++		atomic_long_inc(&bfqq->bic->icq.ioc->refcount);
++}
++
++static void
++bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
++		struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
++{
++	bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
++		(long unsigned)new_bfqq->pid);
++	/* Save weight raising and idle window of the merged queues */
++	bfq_bfqq_save_state(bfqq);
++	bfq_bfqq_save_state(new_bfqq);
++	if (bfq_bfqq_IO_bound(bfqq))
++		bfq_mark_bfqq_IO_bound(new_bfqq);
++	bfq_clear_bfqq_IO_bound(bfqq);
++	/*
++	 * Grab a reference to the bic, to prevent it from being destroyed
++	 * before being possibly touched by a bfq_split_bfqq().
++	 */
++	bfq_get_bic_reference(bfqq);
++	bfq_get_bic_reference(new_bfqq);
++	/*
++	 * Merge queues (that is, let bic redirect its requests to new_bfqq)
++	 */
++	bic_set_bfqq(bic, new_bfqq, 1);
++	bfq_mark_bfqq_coop(new_bfqq);
++	/*
++	 * new_bfqq now belongs to at least two bics (it is a shared queue):
++	 * set new_bfqq->bic to NULL. bfqq either:
++	 * - does not belong to any bic any more, and hence bfqq->bic must
++	 *   be set to NULL, or
++	 * - is a queue whose owning bics have already been redirected to a
++	 *   different queue, hence the queue is destined to not belong to
++	 *   any bic soon and bfqq->bic is already NULL (therefore the next
++	 *   assignment causes no harm).
++	 */
++	new_bfqq->bic = NULL;
++	bfqq->bic = NULL;
++	bfq_put_queue(bfqq);
++}
++
++static inline void bfq_bfqq_increase_failed_cooperations(struct bfq_queue *bfqq)
++{
++	struct bfq_io_cq *bic = bfqq->bic;
++	struct bfq_data *bfqd = bfqq->bfqd;
++
++	if (bic && bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh) {
++		bic->failed_cooperations++;
++		if (bic->failed_cooperations >= bfqd->bfq_failed_cooperations)
++			bic->cooperations = 0;
++	}
++}
++
++static int bfq_allow_merge(struct request_queue *q, struct request *rq,
++			   struct bio *bio)
++{
++	struct bfq_data *bfqd = q->elevator->elevator_data;
++	struct bfq_io_cq *bic;
++	struct bfq_queue *bfqq, *new_bfqq;
++
++	/*
++	 * Disallow merge of a sync bio into an async request.
++	 */
++	if (bfq_bio_sync(bio) && !rq_is_sync(rq))
++		return 0;
++
++	/*
++	 * Lookup the bfqq that this bio will be queued with. Allow
++	 * merge only if rq is queued there.
++	 * Queue lock is held here.
++	 */
++	bic = bfq_bic_lookup(bfqd, current->io_context);
++	if (bic == NULL)
++		return 0;
++
++	bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio));
++	/*
++	 * We take advantage of this function to perform an early merge
++	 * of the queues of possible cooperating processes.
++	 */
++	if (bfqq != NULL) {
++		new_bfqq = bfq_setup_cooperator(bfqd, bfqq, bio, false);
++		if (new_bfqq != NULL) {
++			bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq);
++			/*
++			 * If we get here, the bio will be queued in the
++			 * shared queue, i.e., new_bfqq, so use new_bfqq
++			 * to decide whether bio and rq can be merged.
++			 */
++			bfqq = new_bfqq;
++		} else
++			bfq_bfqq_increase_failed_cooperations(bfqq);
++	}
++
++	return bfqq == RQ_BFQQ(rq);
++}
++
++static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
++				       struct bfq_queue *bfqq)
++{
++	if (bfqq != NULL) {
++		bfq_mark_bfqq_must_alloc(bfqq);
++		bfq_mark_bfqq_budget_new(bfqq);
++		bfq_clear_bfqq_fifo_expire(bfqq);
++
++		bfqd->budgets_assigned = (bfqd->budgets_assigned*7 + 256) / 8;
++
++		bfq_log_bfqq(bfqd, bfqq,
++			     "set_in_service_queue, cur-budget = %lu",
++			     bfqq->entity.budget);
++	}
++
++	bfqd->in_service_queue = bfqq;
++}
++
++/*
++ * Get and set a new queue for service.
++ */
++static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd)
++{
++	struct bfq_queue *bfqq = bfq_get_next_queue(bfqd);
++
++	__bfq_set_in_service_queue(bfqd, bfqq);
++	return bfqq;
++}
++
+ /*
+  * If enough samples have been computed, return the current max budget
+  * stored in bfqd, which is dynamically updated according to the
+@@ -1488,61 +1802,6 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq)
+ 	return rq;
+ }
+ 
+-/* Must be called with the queue_lock held. */
+-static int bfqq_process_refs(struct bfq_queue *bfqq)
+-{
+-	int process_refs, io_refs;
+-
+-	io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE];
+-	process_refs = atomic_read(&bfqq->ref) - io_refs - bfqq->entity.on_st;
+-	BUG_ON(process_refs < 0);
+-	return process_refs;
+-}
+-
+-static void bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+-{
+-	int process_refs, new_process_refs;
+-	struct bfq_queue *__bfqq;
+-
+-	/*
+-	 * If there are no process references on the new_bfqq, then it is
+-	 * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain
+-	 * may have dropped their last reference (not just their last process
+-	 * reference).
+-	 */
+-	if (!bfqq_process_refs(new_bfqq))
+-		return;
+-
+-	/* Avoid a circular list and skip interim queue merges. */
+-	while ((__bfqq = new_bfqq->new_bfqq)) {
+-		if (__bfqq == bfqq)
+-			return;
+-		new_bfqq = __bfqq;
+-	}
+-
+-	process_refs = bfqq_process_refs(bfqq);
+-	new_process_refs = bfqq_process_refs(new_bfqq);
+-	/*
+-	 * If the process for the bfqq has gone away, there is no
+-	 * sense in merging the queues.
+-	 */
+-	if (process_refs == 0 || new_process_refs == 0)
+-		return;
+-
+-	/*
+-	 * Merge in the direction of the lesser amount of work.
+-	 */
+-	if (new_process_refs >= process_refs) {
+-		bfqq->new_bfqq = new_bfqq;
+-		atomic_add(process_refs, &new_bfqq->ref);
+-	} else {
+-		new_bfqq->new_bfqq = bfqq;
+-		atomic_add(new_process_refs, &bfqq->ref);
+-	}
+-	bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
+-		new_bfqq->pid);
+-}
+-
+ static inline unsigned long bfq_bfqq_budget_left(struct bfq_queue *bfqq)
+ {
+ 	struct bfq_entity *entity = &bfqq->entity;
+@@ -2269,7 +2528,7 @@ static inline bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
+  */
+ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ {
+-	struct bfq_queue *bfqq, *new_bfqq = NULL;
++	struct bfq_queue *bfqq;
+ 	struct request *next_rq;
+ 	enum bfqq_expiration reason = BFQ_BFQQ_BUDGET_TIMEOUT;
+ 
+@@ -2279,17 +2538,6 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ 
+ 	bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue");
+ 
+-	/*
+-         * If another queue has a request waiting within our mean seek
+-         * distance, let it run. The expire code will check for close
+-         * cooperators and put the close queue at the front of the
+-         * service tree. If possible, merge the expiring queue with the
+-         * new bfqq.
+-         */
+-        new_bfqq = bfq_close_cooperator(bfqd, bfqq);
+-        if (new_bfqq != NULL && bfqq->new_bfqq == NULL)
+-                bfq_setup_merge(bfqq, new_bfqq);
+-
+ 	if (bfq_may_expire_for_budg_timeout(bfqq) &&
+ 	    !timer_pending(&bfqd->idle_slice_timer) &&
+ 	    !bfq_bfqq_must_idle(bfqq))
+@@ -2328,10 +2576,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ 				bfq_clear_bfqq_wait_request(bfqq);
+ 				del_timer(&bfqd->idle_slice_timer);
+ 			}
+-			if (new_bfqq == NULL)
+-				goto keep_queue;
+-			else
+-				goto expire;
++			goto keep_queue;
+ 		}
+ 	}
+ 
+@@ -2340,40 +2585,30 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd)
+ 	 * for a new request, or has requests waiting for a completion and
+ 	 * may idle after their completion, then keep it anyway.
+ 	 */
+-	if (new_bfqq == NULL && (timer_pending(&bfqd->idle_slice_timer) ||
+-	    (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq)))) {
++	if (timer_pending(&bfqd->idle_slice_timer) ||
++	    (bfqq->dispatched != 0 && bfq_bfqq_must_not_expire(bfqq))) {
+ 		bfqq = NULL;
+ 		goto keep_queue;
+-	} else if (new_bfqq != NULL && timer_pending(&bfqd->idle_slice_timer)) {
+-		/*
+-		 * Expiring the queue because there is a close cooperator,
+-		 * cancel timer.
+-		 */
+-		bfq_clear_bfqq_wait_request(bfqq);
+-		del_timer(&bfqd->idle_slice_timer);
+ 	}
+ 
+ 	reason = BFQ_BFQQ_NO_MORE_REQUESTS;
+ expire:
+ 	bfq_bfqq_expire(bfqd, bfqq, 0, reason);
+ new_queue:
+-	bfqq = bfq_set_in_service_queue(bfqd, new_bfqq);
++	bfqq = bfq_set_in_service_queue(bfqd);
+ 	bfq_log(bfqd, "select_queue: new queue %d returned",
+ 		bfqq != NULL ? bfqq->pid : 0);
+ keep_queue:
+ 	return bfqq;
+ }
+ 
+-static void bfq_update_wr_data(struct bfq_data *bfqd,
+-			       struct bfq_queue *bfqq)
++static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+ {
+-	if (bfqq->wr_coeff > 1) { /* queue is being boosted */
+-		struct bfq_entity *entity = &bfqq->entity;
+-
++	struct bfq_entity *entity = &bfqq->entity;
++	if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
+ 		bfq_log_bfqq(bfqd, bfqq,
+ 			"raising period dur %u/%u msec, old coeff %u, w %d(%d)",
+-			jiffies_to_msecs(jiffies -
+-				bfqq->last_wr_start_finish),
++			jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish),
+ 			jiffies_to_msecs(bfqq->wr_cur_max_time),
+ 			bfqq->wr_coeff,
+ 			bfqq->entity.weight, bfqq->entity.orig_weight);
+@@ -2382,12 +2617,16 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
+ 		       entity->orig_weight * bfqq->wr_coeff);
+ 		if (entity->ioprio_changed)
+ 			bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change");
++
+ 		/*
+ 		 * If the queue was activated in a burst, or
+ 		 * too much time has elapsed from the beginning
+-		 * of this weight-raising, then end weight raising.
++		 * of this weight-raising period, or the queue has
++		 * exceeded the acceptable number of cooperations,
++		 * then end weight raising.
+ 		 */
+ 		if (bfq_bfqq_in_large_burst(bfqq) ||
++		    bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh ||
+ 		    time_is_before_jiffies(bfqq->last_wr_start_finish +
+ 					   bfqq->wr_cur_max_time)) {
+ 			bfqq->last_wr_start_finish = jiffies;
+@@ -2396,11 +2635,13 @@ static void bfq_update_wr_data(struct bfq_data *bfqd,
+ 				     bfqq->last_wr_start_finish,
+ 				     jiffies_to_msecs(bfqq->wr_cur_max_time));
+ 			bfq_bfqq_end_wr(bfqq);
+-			__bfq_entity_update_weight_prio(
+-				bfq_entity_service_tree(entity),
+-				entity);
+ 		}
+ 	}
++	/* Update weight both if it must be raised and if it must be lowered */
++	if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1))
++		__bfq_entity_update_weight_prio(
++			bfq_entity_service_tree(entity),
++			entity);
+ }
+ 
+ /*
+@@ -2647,6 +2888,25 @@ static inline void bfq_init_icq(struct io_cq *icq)
+ 	struct bfq_io_cq *bic = icq_to_bic(icq);
+ 
+ 	bic->ttime.last_end_request = jiffies;
++	/*
++	 * A newly created bic indicates that the process has just
++	 * started doing I/O, and is probably mapping into memory its
++	 * executable and libraries: it definitely needs weight raising.
++	 * There is however the possibility that the process performs,
++	 * for a while, I/O close to some other process. EQM intercepts
++	 * this behavior and may merge the queue corresponding to the
++	 * process  with some other queue, BEFORE the weight of the queue
++	 * is raised. Merged queues are not weight-raised (they are assumed
++	 * to belong to processes that benefit only from high throughput).
++	 * If the merge is basically the consequence of an accident, then
++	 * the queue will be split soon and will get back its old weight.
++	 * It is then important to write down somewhere that this queue
++	 * does need weight raising, even if it did not make it to get its
++	 * weight raised before being merged. To this purpose, we overload
++	 * the field raising_time_left and assign 1 to it, to mark the queue
++	 * as needing weight raising.
++	 */
++	bic->wr_time_left = 1;
+ }
+ 
+ static void bfq_exit_icq(struct io_cq *icq)
+@@ -2660,6 +2920,13 @@ static void bfq_exit_icq(struct io_cq *icq)
+ 	}
+ 
+ 	if (bic->bfqq[BLK_RW_SYNC]) {
++		/*
++		 * If the bic is using a shared queue, put the reference
++		 * taken on the io_context when the bic started using a
++		 * shared bfq_queue.
++		 */
++		if (bfq_bfqq_coop(bic->bfqq[BLK_RW_SYNC]))
++			put_io_context(icq->ioc);
+ 		bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
+ 		bic->bfqq[BLK_RW_SYNC] = NULL;
+ 	}
+@@ -2952,6 +3219,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
+ 	if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
+ 		return;
+ 
++	/* Idle window just restored, statistics are meaningless. */
++	if (bfq_bfqq_just_split(bfqq))
++		return;
++
+ 	enable_idle = bfq_bfqq_idle_window(bfqq);
+ 
+ 	if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
+@@ -2999,6 +3270,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ 	if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
+ 	    !BFQQ_SEEKY(bfqq))
+ 		bfq_update_idle_window(bfqd, bfqq, bic);
++	bfq_clear_bfqq_just_split(bfqq);
+ 
+ 	bfq_log_bfqq(bfqd, bfqq,
+ 		     "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
+@@ -3059,12 +3331,47 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+ static void bfq_insert_request(struct request_queue *q, struct request *rq)
+ {
+ 	struct bfq_data *bfqd = q->elevator->elevator_data;
+-	struct bfq_queue *bfqq = RQ_BFQQ(rq);
++	struct bfq_queue *bfqq = RQ_BFQQ(rq), *new_bfqq;
+ 
+ 	assert_spin_locked(bfqd->queue->queue_lock);
+ 
++	/*
++	 * An unplug may trigger a requeue of a request from the device
++	 * driver: make sure we are in process context while trying to
++	 * merge two bfq_queues.
++	 */
++	if (!in_interrupt()) {
++		new_bfqq = bfq_setup_cooperator(bfqd, bfqq, rq, true);
++		if (new_bfqq != NULL) {
++			if (bic_to_bfqq(RQ_BIC(rq), 1) != bfqq)
++				new_bfqq = bic_to_bfqq(RQ_BIC(rq), 1);
++			/*
++			 * Release the request's reference to the old bfqq
++			 * and make sure one is taken to the shared queue.
++			 */
++			new_bfqq->allocated[rq_data_dir(rq)]++;
++			bfqq->allocated[rq_data_dir(rq)]--;
++			atomic_inc(&new_bfqq->ref);
++			bfq_put_queue(bfqq);
++			if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq)
++				bfq_merge_bfqqs(bfqd, RQ_BIC(rq),
++						bfqq, new_bfqq);
++			rq->elv.priv[1] = new_bfqq;
++			bfqq = new_bfqq;
++		} else
++			bfq_bfqq_increase_failed_cooperations(bfqq);
++	}
++
+ 	bfq_add_request(rq);
+ 
++	/*
++	 * Here a newly-created bfq_queue has already started a weight-raising
++	 * period: clear raising_time_left to prevent bfq_bfqq_save_state()
++	 * from assigning it a full weight-raising period. See the detailed
++	 * comments about this field in bfq_init_icq().
++	 */
++	if (bfqq->bic != NULL)
++		bfqq->bic->wr_time_left = 0;
+ 	rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
+ 	list_add_tail(&rq->queuelist, &bfqq->fifo);
+ 
+@@ -3226,18 +3533,6 @@ static void bfq_put_request(struct request *rq)
+ 	}
+ }
+ 
+-static struct bfq_queue *
+-bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic,
+-		struct bfq_queue *bfqq)
+-{
+-	bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
+-		(long unsigned)bfqq->new_bfqq->pid);
+-	bic_set_bfqq(bic, bfqq->new_bfqq, 1);
+-	bfq_mark_bfqq_coop(bfqq->new_bfqq);
+-	bfq_put_queue(bfqq);
+-	return bic_to_bfqq(bic, 1);
+-}
+-
+ /*
+  * Returns NULL if a new bfqq should be allocated, or the old bfqq if this
+  * was the last process referring to said bfqq.
+@@ -3246,6 +3541,9 @@ static struct bfq_queue *
+ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq)
+ {
+ 	bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue");
++
++	put_io_context(bic->icq.ioc);
++
+ 	if (bfqq_process_refs(bfqq) == 1) {
+ 		bfqq->pid = current->pid;
+ 		bfq_clear_bfqq_coop(bfqq);
+@@ -3274,6 +3572,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
+ 	struct bfq_queue *bfqq;
+ 	struct bfq_group *bfqg;
+ 	unsigned long flags;
++	bool split = false;
+ 
+ 	might_sleep_if(gfp_mask & __GFP_WAIT);
+ 
+@@ -3291,25 +3590,26 @@ new_queue:
+ 	if (bfqq == NULL || bfqq == &bfqd->oom_bfqq) {
+ 		bfqq = bfq_get_queue(bfqd, bfqg, is_sync, bic, gfp_mask);
+ 		bic_set_bfqq(bic, bfqq, is_sync);
++		if (split && is_sync) {
++			if ((bic->was_in_burst_list && bfqd->large_burst) ||
++			    bic->saved_in_large_burst)
++				bfq_mark_bfqq_in_large_burst(bfqq);
++			else {
++			    bfq_clear_bfqq_in_large_burst(bfqq);
++			    if (bic->was_in_burst_list)
++			       hlist_add_head(&bfqq->burst_list_node,
++				              &bfqd->burst_list);
++			}
++		}
+ 	} else {
+-		/*
+-		 * If the queue was seeky for too long, break it apart.
+-		 */
++		/* If the queue was seeky for too long, break it apart. */
+ 		if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq)) {
+ 			bfq_log_bfqq(bfqd, bfqq, "breaking apart bfqq");
+ 			bfqq = bfq_split_bfqq(bic, bfqq);
++			split = true;
+ 			if (!bfqq)
+ 				goto new_queue;
+ 		}
+-
+-		/*
+-		 * Check to see if this queue is scheduled to merge with
+-		 * another closely cooperating queue. The merging of queues
+-		 * happens here as it must be done in process context.
+-		 * The reference on new_bfqq was taken in merge_bfqqs.
+-		 */
+-		if (bfqq->new_bfqq != NULL)
+-			bfqq = bfq_merge_bfqqs(bfqd, bic, bfqq);
+ 	}
+ 
+ 	bfqq->allocated[rw]++;
+@@ -3320,6 +3620,26 @@ new_queue:
+ 	rq->elv.priv[0] = bic;
+ 	rq->elv.priv[1] = bfqq;
+ 
++	/*
++	 * If a bfq_queue has only one process reference, it is owned
++	 * by only one bfq_io_cq: we can set the bic field of the
++	 * bfq_queue to the address of that structure. Also, if the
++	 * queue has just been split, mark a flag so that the
++	 * information is available to the other scheduler hooks.
++	 */
++	if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) {
++		bfqq->bic = bic;
++		if (split) {
++			bfq_mark_bfqq_just_split(bfqq);
++			/*
++			 * If the queue has just been split from a shared
++			 * queue, restore the idle window and the possible
++			 * weight raising period.
++			 */
++			bfq_bfqq_resume_state(bfqq, bic);
++		}
++	}
++
+ 	spin_unlock_irqrestore(q->queue_lock, flags);
+ 
+ 	return 0;
+diff --git a/block/bfq-sched.c b/block/bfq-sched.c
+index c343099..d0890c6 100644
+--- a/block/bfq-sched.c
++++ b/block/bfq-sched.c
+@@ -1085,34 +1085,6 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
+ 	return bfqq;
+ }
+ 
+-/*
+- * Forced extraction of the given queue.
+- */
+-static void bfq_get_next_queue_forced(struct bfq_data *bfqd,
+-				      struct bfq_queue *bfqq)
+-{
+-	struct bfq_entity *entity;
+-	struct bfq_sched_data *sd;
+-
+-	BUG_ON(bfqd->in_service_queue != NULL);
+-
+-	entity = &bfqq->entity;
+-	/*
+-	 * Bubble up extraction/update from the leaf to the root.
+-	*/
+-	for_each_entity(entity) {
+-		sd = entity->sched_data;
+-		bfq_update_budget(entity);
+-		bfq_update_vtime(bfq_entity_service_tree(entity));
+-		bfq_active_extract(bfq_entity_service_tree(entity), entity);
+-		sd->in_service_entity = entity;
+-		sd->next_in_service = NULL;
+-		entity->service = 0;
+-	}
+-
+-	return;
+-}
+-
+ static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
+ {
+ 	if (bfqd->in_service_bic != NULL) {
+diff --git a/block/bfq.h b/block/bfq.h
+index 00feff7..96ffbf7 100644
+--- a/block/bfq.h
++++ b/block/bfq.h
+@@ -218,18 +218,21 @@ struct bfq_group;
+  *                      idle @bfq_queue with no outstanding requests, then
+  *                      the task associated with the queue it is deemed as
+  *                      soft real-time (see the comments to the function
+- *                      bfq_bfqq_softrt_next_start()).
++ *                      bfq_bfqq_softrt_next_start())
+  * @last_idle_bklogged: time of the last transition of the @bfq_queue from
+  *                      idle to backlogged
+  * @service_from_backlogged: cumulative service received from the @bfq_queue
+  *                           since the last transition from idle to
+  *                           backlogged
++ * @bic: pointer to the bfq_io_cq owning the bfq_queue, set to %NULL if the
++ *	 queue is shared
+  *
+- * A bfq_queue is a leaf request queue; it can be associated with an io_context
+- * or more, if it is async or shared between cooperating processes. @cgroup
+- * holds a reference to the cgroup, to be sure that it does not disappear while
+- * a bfqq still references it (mostly to avoid races between request issuing and
+- * task migration followed by cgroup destruction).
++ * A bfq_queue is a leaf request queue; it can be associated with an
++ * io_context or more, if it  is  async or shared  between  cooperating
++ * processes. @cgroup holds a reference to the cgroup, to be sure that it
++ * does not disappear while a bfqq still references it (mostly to avoid
++ * races between request issuing and task migration followed by cgroup
++ * destruction).
+  * All the fields are protected by the queue lock of the containing bfqd.
+  */
+ struct bfq_queue {
+@@ -269,6 +272,7 @@ struct bfq_queue {
+ 	unsigned int requests_within_timer;
+ 
+ 	pid_t pid;
++	struct bfq_io_cq *bic;
+ 
+ 	/* weight-raising fields */
+ 	unsigned long wr_cur_max_time;
+@@ -298,12 +302,42 @@ struct bfq_ttime {
+  * @icq: associated io_cq structure
+  * @bfqq: array of two process queues, the sync and the async
+  * @ttime: associated @bfq_ttime struct
++ * @wr_time_left: snapshot of the time left before weight raising ends
++ *                for the sync queue associated to this process; this
++ *		  snapshot is taken to remember this value while the weight
++ *		  raising is suspended because the queue is merged with a
++ *		  shared queue, and is used to set @raising_cur_max_time
++ *		  when the queue is split from the shared queue and its
++ *		  weight is raised again
++ * @saved_idle_window: same purpose as the previous field for the idle
++ *                     window
++ * @saved_IO_bound: same purpose as the previous two fields for the I/O
++ *                  bound classification of a queue
++ * @saved_in_large_burst: same purpose as the previous fields for the
++ *                        value of the field keeping the queue's belonging
++ *                        to a large burst
++ * @was_in_burst_list: true if the queue belonged to a burst list
++ *                     before its merge with another cooperating queue
++ * @cooperations: counter of consecutive successful queue merges underwent
++ *                by any of the process' @bfq_queues
++ * @failed_cooperations: counter of consecutive failed queue merges of any
++ *                       of the process' @bfq_queues
+  */
+ struct bfq_io_cq {
+ 	struct io_cq icq; /* must be the first member */
+ 	struct bfq_queue *bfqq[2];
+ 	struct bfq_ttime ttime;
+ 	int ioprio;
++
++	unsigned int wr_time_left;
++	bool saved_idle_window;
++	bool saved_IO_bound;
++
++	bool saved_in_large_burst;
++	bool was_in_burst_list;
++
++	unsigned int cooperations;
++	unsigned int failed_cooperations;
+ };
+ 
+ enum bfq_device_speed {
+@@ -536,7 +570,7 @@ enum bfqq_state_flags {
+ 	BFQ_BFQQ_FLAG_idle_window,	/* slice idling enabled */
+ 	BFQ_BFQQ_FLAG_sync,		/* synchronous queue */
+ 	BFQ_BFQQ_FLAG_budget_new,	/* no completion with this budget */
+-	BFQ_BFQQ_FLAG_IO_bound,         /*
++	BFQ_BFQQ_FLAG_IO_bound,		/*
+ 					 * bfqq has timed-out at least once
+ 					 * having consumed at most 2/10 of
+ 					 * its budget
+@@ -549,12 +583,13 @@ enum bfqq_state_flags {
+ 					 * bfqq has proved to be slow and
+ 					 * seeky until budget timeout
+ 					 */
+-	BFQ_BFQQ_FLAG_softrt_update,    /*
++	BFQ_BFQQ_FLAG_softrt_update,	/*
+ 					 * may need softrt-next-start
+ 					 * update
+ 					 */
+ 	BFQ_BFQQ_FLAG_coop,		/* bfqq is shared */
+-	BFQ_BFQQ_FLAG_split_coop,	/* shared bfqq will be splitted */
++	BFQ_BFQQ_FLAG_split_coop,	/* shared bfqq will be split */
++	BFQ_BFQQ_FLAG_just_split,	/* queue has just been split */
+ };
+ 
+ #define BFQ_BFQQ_FNS(name)						\
+@@ -583,6 +618,7 @@ BFQ_BFQQ_FNS(in_large_burst);
+ BFQ_BFQQ_FNS(constantly_seeky);
+ BFQ_BFQQ_FNS(coop);
+ BFQ_BFQQ_FNS(split_coop);
++BFQ_BFQQ_FNS(just_split);
+ BFQ_BFQQ_FNS(softrt_update);
+ #undef BFQ_BFQQ_FNS
+ 
+-- 
+2.1.4
+


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-07-17 15:24 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-07-17 15:24 UTC (permalink / raw
  To: gentoo-commits

commit:     d5f2323178576f3fb33aa8881f5c6475085fea06
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul 17 15:24:39 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul 17 15:24:39 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d5f23231

Update for kdbus patch

 0000_README                                        |    2 +-
 ...kdbus-6-27-15.patch => 5015_kdbus-7-17-15.patch | 6808 ++++++++++++++------
 2 files changed, 4790 insertions(+), 2020 deletions(-)

diff --git a/0000_README b/0000_README
index 784c55d..43154ce 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,6 @@ Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.
 
-Patch:  5015_kdbus-6-27-15.patch
+Patch:  5015_kdbus-7-17-15.patch
 From:   https://lkml.org
 Desc:   Kernel-level IPC implementation

diff --git a/5015_kdbus-6-27-15.patch b/5015_kdbus-7-17-15.patch
similarity index 86%
rename from 5015_kdbus-6-27-15.patch
rename to 5015_kdbus-7-17-15.patch
index bc17abe..61102dd 100644
--- a/5015_kdbus-6-27-15.patch
+++ b/5015_kdbus-7-17-15.patch
@@ -8,6 +8,19 @@ index bc05482..e2127a7 100644
 +	filesystems filesystems ia64 kdbus laptops mic misc-devices \
  	networking pcmcia prctl ptp spi timers vDSO video4linux \
  	watchdog
+diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+index f5a8ca2..750d577 100644
+--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
++++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+@@ -1,7 +1,7 @@
+ * Marvell Armada 370 / Armada XP Ethernet Controller (NETA)
+ 
+ Required properties:
+-- compatible: "marvell,armada-370-neta" or "marvell,armada-xp-neta".
++- compatible: should be "marvell,armada-370-neta".
+ - reg: address and length of the register set for the device.
+ - interrupts: interrupt for the device
+ - phy: See ethernet.txt file in the same directory.
 diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
 index 51f4221..ec7c81b 100644
 --- a/Documentation/ioctl/ioctl-number.txt
@@ -30,10 +43,10 @@ index 0000000..b4a77cc
 +*.html
 diff --git a/Documentation/kdbus/Makefile b/Documentation/kdbus/Makefile
 new file mode 100644
-index 0000000..af87641
+index 0000000..8caffe5
 --- /dev/null
 +++ b/Documentation/kdbus/Makefile
-@@ -0,0 +1,40 @@
+@@ -0,0 +1,44 @@
 +DOCS :=	\
 +	kdbus.xml		\
 +	kdbus.bus.xml		\
@@ -74,12 +87,16 @@ index 0000000..af87641
 +htmldocs: $(HTMLFILES)
 +
 +clean-files := $(MANFILES) $(HTMLFILES)
++
++# we don't support other %docs targets right now
++%docs:
++	@true
 diff --git a/Documentation/kdbus/kdbus.bus.xml b/Documentation/kdbus/kdbus.bus.xml
 new file mode 100644
-index 0000000..4b9a0ac
+index 0000000..83f1198
 --- /dev/null
 +++ b/Documentation/kdbus/kdbus.bus.xml
-@@ -0,0 +1,359 @@
+@@ -0,0 +1,344 @@
 +<?xml version='1.0'?> <!--*-nxml-*-->
 +<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
 +        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
@@ -280,21 +297,6 @@ index 0000000..4b9a0ac
 +            </varlistentry>
 +
 +            <varlistentry>
-+              <term><constant>KDBUS_ITEM_ATTACH_FLAGS_RECV</constant></term>
-+              <listitem>
-+                <para>
-+                  An optional item that contains a set of required attach flags
-+                  that connections must allow. This item is used as a
-+                  negotiation measure during connection creation. If connections
-+                  do not satisfy the bus requirements, they are not allowed on
-+                  the bus. If not set, the bus does not require any metadata to
-+                  be attached; in this case connections are free to set their
-+                  own attach flags.
-+                </para>
-+              </listitem>
-+            </varlistentry>
-+
-+            <varlistentry>
 +              <term><constant>KDBUS_ITEM_ATTACH_FLAGS_SEND</constant></term>
 +              <listitem>
 +                <para>
@@ -441,10 +443,10 @@ index 0000000..4b9a0ac
 +</refentry>
 diff --git a/Documentation/kdbus/kdbus.connection.xml b/Documentation/kdbus/kdbus.connection.xml
 new file mode 100644
-index 0000000..cefb419
+index 0000000..4bb5f30
 --- /dev/null
 +++ b/Documentation/kdbus/kdbus.connection.xml
-@@ -0,0 +1,1250 @@
+@@ -0,0 +1,1244 @@
 +<?xml version='1.0'?> <!--*-nxml-*-->
 +<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
 +        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
@@ -802,13 +804,7 @@ index 0000000..cefb419
 +          Set the bits for metadata this connection permits to be sent to the
 +          receiving peer. Only metadata items that are both allowed to be sent
 +          by the sender and that are requested by the receiver will be attached
-+          to the message. Note, however, that the bus may optionally require
-+          some of those bits to be set. If the match fails, the ioctl will fail
-+          with <varname>errno</varname> set to
-+          <constant>ECONNREFUSED</constant>. In either case, when returning the
-+          field will be set to the mask of metadata items that are enforced by
-+          the bus with the <constant>KDBUS_FLAGS_KERNEL</constant> bit set as
-+          well.
++          to the message.
 +        </para></listitem>
 +      </varlistentry>
 +
@@ -7474,9 +7470,20 @@ index d8afd29..02f7668 100644
  M:	Vivek Goyal <vgoyal@redhat.com>
  M:	Haren Myneni <hbabu@us.ibm.com>
 diff --git a/Makefile b/Makefile
-index f5c8983..a1c8d57 100644
+index cef84c0..a1c8d57 100644
 --- a/Makefile
 +++ b/Makefile
+@@ -1,8 +1,8 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 2
++SUBLEVEL = 0
+ EXTRAVERSION =
+-NAME = Series 4800
++NAME = Hurr durr I'ma sheep
+ 
+ # *DOCUMENTATION*
+ # To see a list of typical targets execute "make help"
 @@ -1343,6 +1343,7 @@ $(help-board-dirs): help-%:
  %docs: scripts_basic FORCE
  	$(Q)$(MAKE) $(build)=scripts build_docproc
@@ -7485,6 +7492,2075 @@ index f5c8983..a1c8d57 100644
  
  else # KBUILD_EXTMOD
  
+diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
+index 06a2f2a..ec96f0b 100644
+--- a/arch/arm/boot/dts/armada-370-xp.dtsi
++++ b/arch/arm/boot/dts/armada-370-xp.dtsi
+@@ -270,6 +270,7 @@
+ 			};
+ 
+ 			eth0: ethernet@70000 {
++				compatible = "marvell,armada-370-neta";
+ 				reg = <0x70000 0x4000>;
+ 				interrupts = <8>;
+ 				clocks = <&gateclk 4>;
+@@ -285,6 +286,7 @@
+ 			};
+ 
+ 			eth1: ethernet@74000 {
++				compatible = "marvell,armada-370-neta";
+ 				reg = <0x74000 0x4000>;
+ 				interrupts = <10>;
+ 				clocks = <&gateclk 3>;
+diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
+index ca4257b..00b50db5 100644
+--- a/arch/arm/boot/dts/armada-370.dtsi
++++ b/arch/arm/boot/dts/armada-370.dtsi
+@@ -307,14 +307,6 @@
+ 					dmacap,memset;
+ 				};
+ 			};
+-
+-			ethernet@70000 {
+-				compatible = "marvell,armada-370-neta";
+-			};
+-
+-			ethernet@74000 {
+-				compatible = "marvell,armada-370-neta";
+-			};
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+index c5fdc99..8479fdc 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+@@ -318,7 +318,7 @@
+ 			};
+ 
+ 			eth3: ethernet@34000 {
+-				compatible = "marvell,armada-xp-neta";
++				compatible = "marvell,armada-370-neta";
+ 				reg = <0x34000 0x4000>;
+ 				interrupts = <14>;
+ 				clocks = <&gateclk 1>;
+diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+index 0e24f1a..661d54c 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+@@ -356,7 +356,7 @@
+ 			};
+ 
+ 			eth3: ethernet@34000 {
+-				compatible = "marvell,armada-xp-neta";
++				compatible = "marvell,armada-370-neta";
+ 				reg = <0x34000 0x4000>;
+ 				interrupts = <14>;
+ 				clocks = <&gateclk 1>;
+diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
+index 8fdd6d7..013d63f 100644
+--- a/arch/arm/boot/dts/armada-xp.dtsi
++++ b/arch/arm/boot/dts/armada-xp.dtsi
+@@ -177,7 +177,7 @@
+ 			};
+ 
+ 			eth2: ethernet@30000 {
+-				compatible = "marvell,armada-xp-neta";
++				compatible = "marvell,armada-370-neta";
+ 				reg = <0x30000 0x4000>;
+ 				interrupts = <12>;
+ 				clocks = <&gateclk 2>;
+@@ -220,14 +220,6 @@
+ 				};
+ 			};
+ 
+-			ethernet@70000 {
+-				compatible = "marvell,armada-xp-neta";
+-			};
+-
+-			ethernet@74000 {
+-				compatible = "marvell,armada-xp-neta";
+-			};
+-
+ 			xor@f0900 {
+ 				compatible = "marvell,orion-xor";
+ 				reg = <0xF0900 0x100
+diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
+index 3794ca1..2fd8988 100644
+--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
++++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
+@@ -573,7 +573,7 @@
+ 		};
+ 
+ 		rtp: rtp@01c25000 {
+-			compatible = "allwinner,sun5i-a13-ts";
++			compatible = "allwinner,sun4i-a10-ts";
+ 			reg = <0x01c25000 0x100>;
+ 			interrupts = <29>;
+ 			#thermal-sensor-cells = <0>;
+diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
+index 5098185..883cb48 100644
+--- a/arch/arm/boot/dts/sun5i-a13.dtsi
++++ b/arch/arm/boot/dts/sun5i-a13.dtsi
+@@ -555,7 +555,7 @@
+ 		};
+ 
+ 		rtp: rtp@01c25000 {
+-			compatible = "allwinner,sun5i-a13-ts";
++			compatible = "allwinner,sun4i-a10-ts";
+ 			reg = <0x01c25000 0x100>;
+ 			interrupts = <29>;
+ 			#thermal-sensor-cells = <0>;
+diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
+index 2b4847c..fdd1817 100644
+--- a/arch/arm/boot/dts/sun7i-a20.dtsi
++++ b/arch/arm/boot/dts/sun7i-a20.dtsi
+@@ -1042,7 +1042,7 @@
+ 		};
+ 
+ 		rtp: rtp@01c25000 {
+-			compatible = "allwinner,sun5i-a13-ts";
++			compatible = "allwinner,sun4i-a10-ts";
+ 			reg = <0x01c25000 0x100>;
+ 			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ 			#thermal-sensor-cells = <0>;
+diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
+index f7db3a5..79caf79 100644
+--- a/arch/arm/kvm/interrupts.S
++++ b/arch/arm/kvm/interrupts.S
+@@ -170,9 +170,13 @@ __kvm_vcpu_return:
+ 	@ Don't trap coprocessor accesses for host kernel
+ 	set_hstr vmexit
+ 	set_hdcr vmexit
+-	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
++	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
+ 
+ #ifdef CONFIG_VFPv3
++	@ Save floating point registers we if let guest use them.
++	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
++	bne	after_vfp_restore
++
+ 	@ Switch VFP/NEON hardware state to the host's
+ 	add	r7, vcpu, #VCPU_VFP_GUEST
+ 	store_vfp_state r7
+@@ -184,8 +188,6 @@ after_vfp_restore:
+ 	@ Restore FPEXC_EN which we clobbered on entry
+ 	pop	{r2}
+ 	VFPFMXR FPEXC, r2
+-#else
+-after_vfp_restore:
+ #endif
+ 
+ 	@ Reset Hyp-role
+@@ -481,7 +483,7 @@ switch_to_guest_vfp:
+ 	push	{r3-r7}
+ 
+ 	@ NEON/VFP used.  Turn on VFP access.
+-	set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
++	set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
+ 
+ 	@ Switch VFP/NEON hardware state to the guest's
+ 	add	r7, r0, #VCPU_VFP_HOST
+diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
+index 48efe2e..35e4a3a 100644
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -591,13 +591,8 @@ ARM_BE8(rev	r6, r6  )
+ .endm
+ 
+ /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
+- * (hardware reset value is 0). Keep previous value in r2.
+- * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
+- * VFP wasn't already enabled (always executed on vmtrap).
+- * If a label is specified with vmexit, it is branched to if VFP wasn't
+- * enabled.
+- */
+-.macro set_hcptr operation, mask, label = none
++ * (hardware reset value is 0). Keep previous value in r2. */
++.macro set_hcptr operation, mask
+ 	mrc	p15, 4, r2, c1, c1, 2
+ 	ldr	r3, =\mask
+ 	.if \operation == vmentry
+@@ -606,17 +601,6 @@ ARM_BE8(rev	r6, r6  )
+ 	bic	r3, r2, r3		@ Don't trap defined coproc-accesses
+ 	.endif
+ 	mcr	p15, 4, r3, c1, c1, 2
+-	.if \operation != vmentry
+-	.if \operation == vmexit
+-	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
+-	beq	1f
+-	.endif
+-	isb
+-	.if \label != none
+-	b	\label
+-	.endif
+-1:
+-	.endif
+ .endm
+ 
+ /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
+diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
+index 531e922..02fa8ef 100644
+--- a/arch/arm/kvm/psci.c
++++ b/arch/arm/kvm/psci.c
+@@ -230,6 +230,10 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ 	case PSCI_0_2_FN64_AFFINITY_INFO:
+ 		val = kvm_psci_vcpu_affinity_info(vcpu);
+ 		break;
++	case PSCI_0_2_FN_MIGRATE:
++	case PSCI_0_2_FN64_MIGRATE:
++		val = PSCI_RET_NOT_SUPPORTED;
++		break;
+ 	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ 		/*
+ 		 * Trusted OS is MP hence does not require migration
+@@ -238,6 +242,10 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ 		 */
+ 		val = PSCI_0_2_TOS_MP;
+ 		break;
++	case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
++	case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
++		val = PSCI_RET_NOT_SUPPORTED;
++		break;
+ 	case PSCI_0_2_FN_SYSTEM_OFF:
+ 		kvm_psci_system_off(vcpu);
+ 		/*
+@@ -263,8 +271,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ 		ret = 0;
+ 		break;
+ 	default:
+-		val = PSCI_RET_NOT_SUPPORTED;
+-		break;
++		return -EINVAL;
+ 	}
+ 
+ 	*vcpu_reg(vcpu, 0) = val;
+@@ -284,9 +291,12 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ 	case KVM_PSCI_FN_CPU_ON:
+ 		val = kvm_psci_vcpu_on(vcpu);
+ 		break;
+-	default:
++	case KVM_PSCI_FN_CPU_SUSPEND:
++	case KVM_PSCI_FN_MIGRATE:
+ 		val = PSCI_RET_NOT_SUPPORTED;
+ 		break;
++	default:
++		return -EINVAL;
+ 	}
+ 
+ 	*vcpu_reg(vcpu, 0) = val;
+diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
+index a2e8ef3..469a150 100644
+--- a/arch/arm/mach-imx/clk-imx6q.c
++++ b/arch/arm/mach-imx/clk-imx6q.c
+@@ -443,7 +443,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ 	clk[IMX6QDL_CLK_GPMI_IO]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
+ 	clk[IMX6QDL_CLK_GPMI_APB]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
+ 	clk[IMX6QDL_CLK_ROM]          = imx_clk_gate2("rom",           "ahb",               base + 0x7c, 0);
+-	clk[IMX6QDL_CLK_SATA]         = imx_clk_gate2("sata",          "ahb",               base + 0x7c, 4);
++	clk[IMX6QDL_CLK_SATA]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
+ 	clk[IMX6QDL_CLK_SDMA]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
+ 	clk[IMX6QDL_CLK_SPBA]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
+ 	clk[IMX6QDL_CLK_SPDIF]        = imx_clk_gate2("spdif",         "spdif_podf",        base + 0x7c, 14);
+diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c
+index 301ab38..6dfd4ab 100644
+--- a/arch/arm/mach-mvebu/pm-board.c
++++ b/arch/arm/mach-mvebu/pm-board.c
+@@ -43,9 +43,6 @@ static void mvebu_armada_xp_gp_pm_enter(void __iomem *sdram_reg, u32 srcmd)
+ 	for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++)
+ 		ackcmd |= BIT(pic_raw_gpios[i]);
+ 
+-	srcmd = cpu_to_le32(srcmd);
+-	ackcmd = cpu_to_le32(ackcmd);
+-
+ 	/*
+ 	 * Wait a while, the PIC needs quite a bit of time between the
+ 	 * two GPIO commands.
+diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
+index 7469347..88de2dc 100644
+--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
++++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
+@@ -34,7 +34,6 @@
+ #include "iomap.h"
+ #include "irq.h"
+ #include "pm.h"
+-#include "reset.h"
+ #include "sleep.h"
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -71,13 +70,15 @@ static struct cpuidle_driver tegra_idle_driver = {
+ 
+ #ifdef CONFIG_PM_SLEEP
+ #ifdef CONFIG_SMP
++static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
++
+ static int tegra20_reset_sleeping_cpu_1(void)
+ {
+ 	int ret = 0;
+ 
+ 	tegra_pen_lock();
+ 
+-	if (readb(tegra20_cpu1_resettable_status) == CPU_RESETTABLE)
++	if (readl(pmc + PMC_SCRATCH41) == CPU_RESETTABLE)
+ 		tegra20_cpu_shutdown(1);
+ 	else
+ 		ret = -EINVAL;
+diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
+index e3070fd..71be4af 100644
+--- a/arch/arm/mach-tegra/reset-handler.S
++++ b/arch/arm/mach-tegra/reset-handler.S
+@@ -169,10 +169,10 @@ after_errata:
+ 	cmp	r6, #TEGRA20
+ 	bne	1f
+ 	/* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
+-	mov32	r5, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET
+-	mov	r0, #CPU_NOT_RESETTABLE
++	mov32	r5, TEGRA_PMC_BASE
++	mov	r0, #0
+ 	cmp	r10, #0
+-	strneb	r0, [r5, #__tegra20_cpu1_resettable_status_offset]
++	strne	r0, [r5, #PMC_SCRATCH41]
+ 1:
+ #endif
+ 
+@@ -281,10 +281,6 @@ __tegra_cpu_reset_handler_data:
+ 	.rept	TEGRA_RESET_DATA_SIZE
+ 	.long	0
+ 	.endr
+-	.globl	__tegra20_cpu1_resettable_status_offset
+-	.equ	__tegra20_cpu1_resettable_status_offset, \
+-					. - __tegra_cpu_reset_handler_start
+-	.byte	0
+ 	.align L1_CACHE_SHIFT
+ 
+ ENTRY(__tegra_cpu_reset_handler_end)
+diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h
+index 29c3dec..76a9343 100644
+--- a/arch/arm/mach-tegra/reset.h
++++ b/arch/arm/mach-tegra/reset.h
+@@ -35,7 +35,6 @@ extern unsigned long __tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE];
+ 
+ void __tegra_cpu_reset_handler_start(void);
+ void __tegra_cpu_reset_handler(void);
+-void __tegra20_cpu1_resettable_status_offset(void);
+ void __tegra_cpu_reset_handler_end(void);
+ void tegra_secondary_startup(void);
+ 
+@@ -48,9 +47,6 @@ void tegra_secondary_startup(void);
+ 	(IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
+ 	((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_LP2] - \
+ 	 (u32)__tegra_cpu_reset_handler_start)))
+-#define tegra20_cpu1_resettable_status \
+-	(IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
+-	 (u32)__tegra20_cpu1_resettable_status_offset))
+ #endif
+ 
+ #define tegra_cpu_reset_handler_offset \
+diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
+index e6b684e..be4bc5f 100644
+--- a/arch/arm/mach-tegra/sleep-tegra20.S
++++ b/arch/arm/mach-tegra/sleep-tegra20.S
+@@ -97,10 +97,9 @@ ENDPROC(tegra20_hotplug_shutdown)
+ ENTRY(tegra20_cpu_shutdown)
+ 	cmp	r0, #0
+ 	reteq	lr			@ must not be called for CPU 0
+-	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
+-	ldr	r2, =__tegra20_cpu1_resettable_status_offset
++	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
+ 	mov	r12, #CPU_RESETTABLE
+-	strb	r12, [r1, r2]
++	str	r12, [r1]
+ 
+ 	cpu_to_halt_reg r1, r0
+ 	ldr	r3, =TEGRA_FLOW_CTRL_VIRT
+@@ -183,41 +182,38 @@ ENDPROC(tegra_pen_unlock)
+ /*
+  * tegra20_cpu_clear_resettable(void)
+  *
+- * Called to clear the "resettable soon" flag in IRAM variable when
++ * Called to clear the "resettable soon" flag in PMC_SCRATCH41 when
+  * it is expected that the secondary CPU will be idle soon.
+  */
+ ENTRY(tegra20_cpu_clear_resettable)
+-	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
+-	ldr	r2, =__tegra20_cpu1_resettable_status_offset
++	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
+ 	mov	r12, #CPU_NOT_RESETTABLE
+-	strb	r12, [r1, r2]
++	str	r12, [r1]
+ 	ret	lr
+ ENDPROC(tegra20_cpu_clear_resettable)
+ 
+ /*
+  * tegra20_cpu_set_resettable_soon(void)
+  *
+- * Called to set the "resettable soon" flag in IRAM variable when
++ * Called to set the "resettable soon" flag in PMC_SCRATCH41 when
+  * it is expected that the secondary CPU will be idle soon.
+  */
+ ENTRY(tegra20_cpu_set_resettable_soon)
+-	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
+-	ldr	r2, =__tegra20_cpu1_resettable_status_offset
++	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
+ 	mov	r12, #CPU_RESETTABLE_SOON
+-	strb	r12, [r1, r2]
++	str	r12, [r1]
+ 	ret	lr
+ ENDPROC(tegra20_cpu_set_resettable_soon)
+ 
+ /*
+  * tegra20_cpu_is_resettable_soon(void)
+  *
+- * Returns true if the "resettable soon" flag in IRAM variable has been
++ * Returns true if the "resettable soon" flag in PMC_SCRATCH41 has been
+  * set because it is expected that the secondary CPU will be idle soon.
+  */
+ ENTRY(tegra20_cpu_is_resettable_soon)
+-	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
+-	ldr	r2, =__tegra20_cpu1_resettable_status_offset
+-	ldrb	r12, [r1, r2]
++	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
++	ldr	r12, [r1]
+ 	cmp	r12, #CPU_RESETTABLE_SOON
+ 	moveq	r0, #1
+ 	movne	r0, #0
+@@ -260,10 +256,9 @@ ENTRY(tegra20_sleep_cpu_secondary_finish)
+ 	mov	r0, #TEGRA_FLUSH_CACHE_LOUIS
+ 	bl	tegra_disable_clean_inv_dcache
+ 
+-	mov32	r0, TEGRA_IRAM_RESET_BASE_VIRT
+-	ldr	r4, =__tegra20_cpu1_resettable_status_offset
++	mov32	r0, TEGRA_PMC_VIRT + PMC_SCRATCH41
+ 	mov	r3, #CPU_RESETTABLE
+-	strb	r3, [r0, r4]
++	str	r3, [r0]
+ 
+ 	bl	tegra_cpu_do_idle
+ 
+@@ -279,10 +274,10 @@ ENTRY(tegra20_sleep_cpu_secondary_finish)
+ 
+ 	bl	tegra_pen_lock
+ 
+-	mov32	r0, TEGRA_IRAM_RESET_BASE_VIRT
+-	ldr	r4, =__tegra20_cpu1_resettable_status_offset
++	mov32	r3, TEGRA_PMC_VIRT
++	add	r0, r3, #PMC_SCRATCH41
+ 	mov	r3, #CPU_NOT_RESETTABLE
+-	strb	r3, [r0, r4]
++	str	r3, [r0]
+ 
+ 	bl	tegra_pen_unlock
+ 
+diff --git a/arch/arm/mach-tegra/sleep.h b/arch/arm/mach-tegra/sleep.h
+index 0d59360..92d46ec 100644
+--- a/arch/arm/mach-tegra/sleep.h
++++ b/arch/arm/mach-tegra/sleep.h
+@@ -18,7 +18,6 @@
+ #define __MACH_TEGRA_SLEEP_H
+ 
+ #include "iomap.h"
+-#include "irammap.h"
+ 
+ #define TEGRA_ARM_PERIF_VIRT (TEGRA_ARM_PERIF_BASE - IO_CPU_PHYS \
+ 					+ IO_CPU_VIRT)
+@@ -30,9 +29,6 @@
+ 					+ IO_APB_VIRT)
+ #define TEGRA_PMC_VIRT	(TEGRA_PMC_BASE - IO_APB_PHYS + IO_APB_VIRT)
+ 
+-#define TEGRA_IRAM_RESET_BASE_VIRT (IO_IRAM_VIRT + \
+-				TEGRA_IRAM_RESET_HANDLER_OFFSET)
+-
+ /* PMC_SCRATCH37-39 and 41 are used for tegra_pen_lock and idle */
+ #define PMC_SCRATCH37	0x130
+ #define PMC_SCRATCH38	0x134
+diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
+index afc96ec..9488fa5 100644
+--- a/arch/mips/include/asm/mach-generic/spaces.h
++++ b/arch/mips/include/asm/mach-generic/spaces.h
+@@ -94,11 +94,7 @@
+ #endif
+ 
+ #ifndef FIXADDR_TOP
+-#ifdef CONFIG_KVM_GUEST
+-#define FIXADDR_TOP		((unsigned long)(long)(int)0x7ffe0000)
+-#else
+ #define FIXADDR_TOP		((unsigned long)(long)(int)0xfffe0000)
+ #endif
+-#endif
+ 
+ #endif /* __ASM_MACH_GENERIC_SPACES_H */
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index 52f205a..bb68e8d 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -982,7 +982,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+ 
+ 	/* If nothing is dirty, don't bother messing with page tables. */
+ 	if (is_dirty) {
+-		memslot = id_to_memslot(kvm->memslots, log->slot);
++		memslot = &kvm->memslots->memslots[log->slot];
+ 
+ 		ga = memslot->base_gfn << PAGE_SHIFT;
+ 		ga_end = ga + (memslot->npages << PAGE_SHIFT);
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index d90893b..12b6384 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -131,16 +131,7 @@ static void pmao_restore_workaround(bool ebb) { }
+ 
+ static bool regs_use_siar(struct pt_regs *regs)
+ {
+-	/*
+-	 * When we take a performance monitor exception the regs are setup
+-	 * using perf_read_regs() which overloads some fields, in particular
+-	 * regs->result to tell us whether to use SIAR.
+-	 *
+-	 * However if the regs are from another exception, eg. a syscall, then
+-	 * they have not been setup using perf_read_regs() and so regs->result
+-	 * is something random.
+-	 */
+-	return ((TRAP(regs) == 0xf00) && regs->result);
++	return !!regs->result;
+ }
+ 
+ /*
+diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
+index 49b7445..9f73c80 100644
+--- a/arch/s390/kernel/crash_dump.c
++++ b/arch/s390/kernel/crash_dump.c
+@@ -415,7 +415,7 @@ static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
+ 	ptr += len;
+ 	/* Copy lower halves of SIMD registers 0-15 */
+ 	for (i = 0; i < 16; i++) {
+-		memcpy(ptr, &vx_regs[i].u[2], 8);
++		memcpy(ptr, &vx_regs[i], 8);
+ 		ptr += 8;
+ 	}
+ 	return ptr;
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index b745a10..9de4726 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+ 	if (sclp_has_sigpif())
+ 		return __inject_extcall_sigpif(vcpu, src_id);
+ 
+-	if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
++	if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
+ 		return -EBUSY;
+ 	*extcall = irq->u.extcall;
+ 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+@@ -1606,9 +1606,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
+ 	int i;
+ 
+ 	spin_lock(&fi->lock);
+-	fi->pending_irqs = 0;
+-	memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
+-	memset(&fi->mchk, 0, sizeof(fi->mchk));
+ 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
+ 		clear_irq_list(&fi->lists[i]);
+ 	for (i = 0; i < FIRQ_MAX_COUNT; i++)
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 9afb9d6..55423d8 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -227,7 +227,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
+ ({								\
+ 	/* Branch instruction needs 6 bytes */			\
+ 	int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
+-	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask);	\
++	_EMIT6(op1 | reg(b1, b2) << 16 | rel, op2 | mask);	\
+ 	REG_SET_SEEN(b1);					\
+ 	REG_SET_SEEN(b2);					\
+ })
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 41b06fc..f4a555b 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -591,7 +591,7 @@ struct kvm_arch {
+ 	struct kvm_pic *vpic;
+ 	struct kvm_ioapic *vioapic;
+ 	struct kvm_pit *vpit;
+-	atomic_t vapics_in_nmi_mode;
++	int vapics_in_nmi_mode;
+ 	struct mutex apic_map_lock;
+ 	struct kvm_apic_map *apic_map;
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index aa4e3a7..4f7001f 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -270,7 +270,11 @@ msr_fail:
+ 
+ static void hw_perf_event_destroy(struct perf_event *event)
+ {
+-	x86_release_hardware();
++	if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
++		release_pmc_hardware();
++		release_ds_buffers();
++		mutex_unlock(&pmc_reserve_mutex);
++	}
+ }
+ 
+ void hw_perf_lbr_event_destroy(struct perf_event *event)
+@@ -320,35 +324,6 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
+ 	return x86_pmu_extra_regs(val, event);
+ }
+ 
+-int x86_reserve_hardware(void)
+-{
+-	int err = 0;
+-
+-	if (!atomic_inc_not_zero(&active_events)) {
+-		mutex_lock(&pmc_reserve_mutex);
+-		if (atomic_read(&active_events) == 0) {
+-			if (!reserve_pmc_hardware())
+-				err = -EBUSY;
+-			else
+-				reserve_ds_buffers();
+-		}
+-		if (!err)
+-			atomic_inc(&active_events);
+-		mutex_unlock(&pmc_reserve_mutex);
+-	}
+-
+-	return err;
+-}
+-
+-void x86_release_hardware(void)
+-{
+-	if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
+-		release_pmc_hardware();
+-		release_ds_buffers();
+-		mutex_unlock(&pmc_reserve_mutex);
+-	}
+-}
+-
+ /*
+  * Check if we can create event of a certain type (that no conflicting events
+  * are present).
+@@ -361,10 +336,9 @@ int x86_add_exclusive(unsigned int what)
+ 		return 0;
+ 
+ 	mutex_lock(&pmc_reserve_mutex);
+-	for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
++	for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++)
+ 		if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
+ 			goto out;
+-	}
+ 
+ 	atomic_inc(&x86_pmu.lbr_exclusive[what]);
+ 	ret = 0;
+@@ -553,7 +527,19 @@ static int __x86_pmu_event_init(struct perf_event *event)
+ 	if (!x86_pmu_initialized())
+ 		return -ENODEV;
+ 
+-	err = x86_reserve_hardware();
++	err = 0;
++	if (!atomic_inc_not_zero(&active_events)) {
++		mutex_lock(&pmc_reserve_mutex);
++		if (atomic_read(&active_events) == 0) {
++			if (!reserve_pmc_hardware())
++				err = -EBUSY;
++			else
++				reserve_ds_buffers();
++		}
++		if (!err)
++			atomic_inc(&active_events);
++		mutex_unlock(&pmc_reserve_mutex);
++	}
+ 	if (err)
+ 		return err;
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
+index f068695..ef78516 100644
+--- a/arch/x86/kernel/cpu/perf_event.h
++++ b/arch/x86/kernel/cpu/perf_event.h
+@@ -703,10 +703,6 @@ int x86_add_exclusive(unsigned int what);
+ 
+ void x86_del_exclusive(unsigned int what);
+ 
+-int x86_reserve_hardware(void);
+-
+-void x86_release_hardware(void);
+-
+ void hw_perf_lbr_event_destroy(struct perf_event *event);
+ 
+ int x86_setup_perfctr(struct perf_event *event);
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index 2813ea0..a1e35c9 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -3253,8 +3253,6 @@ __init int intel_pmu_init(void)
+ 
+ 	case 61: /* 14nm Broadwell Core-M */
+ 	case 86: /* 14nm Broadwell Xeon D */
+-	case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
+-	case 79: /* 14nm Broadwell Server */
+ 		x86_pmu.late_ack = true;
+ 		memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ 		memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+@@ -3324,13 +3322,13 @@ __init int intel_pmu_init(void)
+ 		 * counter, so do not extend mask to generic counters
+ 		 */
+ 		for_each_event_constraint(c, x86_pmu.event_constraints) {
+-			if (c->cmask == FIXED_EVENT_FLAGS
+-			    && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
+-				c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
++			if (c->cmask != FIXED_EVENT_FLAGS
++			    || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
++				continue;
+ 			}
+-			c->idxmsk64 &=
+-				~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
+-			c->weight = hweight64(c->idxmsk64);
++
++			c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
++			c->weight += x86_pmu.num_counters;
+ 		}
+ 	}
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
+index 7795f3f..ac1f0c5 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
+@@ -483,26 +483,17 @@ static int bts_event_add(struct perf_event *event, int mode)
+ 
+ static void bts_event_destroy(struct perf_event *event)
+ {
+-	x86_release_hardware();
+ 	x86_del_exclusive(x86_lbr_exclusive_bts);
+ }
+ 
+ static int bts_event_init(struct perf_event *event)
+ {
+-	int ret;
+-
+ 	if (event->attr.type != bts_pmu.type)
+ 		return -ENOENT;
+ 
+ 	if (x86_add_exclusive(x86_lbr_exclusive_bts))
+ 		return -EBUSY;
+ 
+-	ret = x86_reserve_hardware();
+-	if (ret) {
+-		x86_del_exclusive(x86_lbr_exclusive_bts);
+-		return ret;
+-	}
+-
+ 	event->destroy = bts_event_destroy;
+ 
+ 	return 0;
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 7e429c9..53eeb22 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -62,16 +62,9 @@
+ #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+ #endif
+ 
+-/*
+- * Number of possible pages in the lowmem region.
+- *
+- * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
+- * gas warning about overflowing shift count when gas has been compiled
+- * with only a host target support using a 32-bit type for internal
+- * representation.
+- */
+-LOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT)
+-
++/* Number of possible pages in the lowmem region */
++LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
++	
+ /* Enough space to fit pagetables for the low memory linear map */
+ MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
+ 
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index f90952f..4dce6f8 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
+ 		 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
+ 		 * VCPU0, and only if its LVT0 is in EXTINT mode.
+ 		 */
+-		if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
++		if (kvm->arch.vapics_in_nmi_mode > 0)
+ 			kvm_for_each_vcpu(i, vcpu, kvm)
+ 				kvm_apic_nmi_wd_deliver(vcpu);
+ 	}
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 67d07e0..4c7deb4 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1250,10 +1250,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
+ 		if (!nmi_wd_enabled) {
+ 			apic_debug("Receive NMI setting on APIC_LVT0 "
+ 				   "for cpu %d\n", apic->vcpu->vcpu_id);
+-			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
++			apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
+ 		}
+ 	} else if (nmi_wd_enabled)
+-		atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
++		apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
+ }
+ 
+ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
+@@ -1808,7 +1808,6 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
+ 	apic_update_ppr(apic);
+ 	hrtimer_cancel(&apic->lapic_timer.timer);
+ 	apic_update_lvtt(apic);
+-	apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
+ 	update_divide_count(apic);
+ 	start_apic_timer(apic);
+ 	apic->irr_pending = true;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 4911bf1..9afa233 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -511,10 +511,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+-	if (svm->vmcb->control.next_rip != 0) {
+-		WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
++	if (svm->vmcb->control.next_rip != 0)
+ 		svm->next_rip = svm->vmcb->control.next_rip;
+-	}
+ 
+ 	if (!svm->next_rip) {
+ 		if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
+@@ -4319,9 +4317,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
+ 		break;
+ 	}
+ 
+-	/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
+-	if (static_cpu_has(X86_FEATURE_NRIPS))
+-		vmcb->control.next_rip  = info->next_rip;
++	vmcb->control.next_rip  = info->next_rip;
+ 	vmcb->control.exit_code = icpt_info.exit_code;
+ 	vmexit = nested_svm_exit_handled(svm);
+ 
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index ff99117..14a63ed 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -81,17 +81,6 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
+ 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+ 		},
+ 	},
+-	/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
+-	/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
+-	{
+-		.callback = set_use_crs,
+-		.ident = "Foxconn K8M890-8237A",
+-		.matches = {
+-			DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
+-			DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
+-			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+-		},
+-	},
+ 
+ 	/* Now for the blacklist.. */
+ 
+@@ -132,10 +121,8 @@ void __init pci_acpi_crs_quirks(void)
+ {
+ 	int year;
+ 
+-	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) {
+-		if (iomem_resource.end <= 0xffffffff)
+-			pci_use_crs = false;
+-	}
++	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
++		pci_use_crs = false;
+ 
+ 	dmi_check_system(pci_crs_quirks);
+ 
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index e527a3e..8c81af6 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -80,7 +80,6 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe057) },
+ 	{ USB_DEVICE(0x0489, 0xe056) },
+ 	{ USB_DEVICE(0x0489, 0xe05f) },
+-	{ USB_DEVICE(0x0489, 0xe076) },
+ 	{ USB_DEVICE(0x0489, 0xe078) },
+ 	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x04CA, 0x3004) },
+@@ -89,7 +88,6 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x3007) },
+ 	{ USB_DEVICE(0x04CA, 0x3008) },
+ 	{ USB_DEVICE(0x04CA, 0x300b) },
+-	{ USB_DEVICE(0x04CA, 0x300d) },
+ 	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
+@@ -115,7 +113,6 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408) },
+ 	{ USB_DEVICE(0x13d3, 0x3423) },
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
+-	{ USB_DEVICE(0x13d3, 0x3474) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE02C) },
+@@ -140,7 +137,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+@@ -149,7 +145,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+@@ -175,7 +170,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 420cc9f..3c10d4d 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -178,7 +178,6 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+@@ -187,7 +186,6 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+@@ -213,7 +211,6 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
+-	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index c45d274..6414661 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -535,7 +535,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+ 
+ 	val |= vid;
+ 
+-	wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
++	wrmsrl(MSR_IA32_PERF_CTL, val);
+ }
+ 
+ #define BYT_BCLK_FREQS 5
+diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
+index 3442764..5937207 100644
+--- a/drivers/cpuidle/cpuidle-powernv.c
++++ b/drivers/cpuidle/cpuidle-powernv.c
+@@ -60,8 +60,6 @@ static int nap_loop(struct cpuidle_device *dev,
+ 	return index;
+ }
+ 
+-/* Register for fastsleep only in oneshot mode of broadcast */
+-#ifdef CONFIG_TICK_ONESHOT
+ static int fastsleep_loop(struct cpuidle_device *dev,
+ 				struct cpuidle_driver *drv,
+ 				int index)
+@@ -85,7 +83,7 @@ static int fastsleep_loop(struct cpuidle_device *dev,
+ 
+ 	return index;
+ }
+-#endif
++
+ /*
+  * States for dedicated partition case.
+  */
+@@ -211,14 +209,7 @@ static int powernv_add_idle_states(void)
+ 			powernv_states[nr_idle_states].flags = 0;
+ 			powernv_states[nr_idle_states].target_residency = 100;
+ 			powernv_states[nr_idle_states].enter = &nap_loop;
+-		}
+-
+-		/*
+-		 * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
+-		 * within this config dependency check.
+-		 */
+-#ifdef CONFIG_TICK_ONESHOT
+-		if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
++		} else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
+ 			flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
+ 			/* Add FASTSLEEP state */
+ 			strcpy(powernv_states[nr_idle_states].name, "FastSleep");
+@@ -227,7 +218,7 @@ static int powernv_add_idle_states(void)
+ 			powernv_states[nr_idle_states].target_residency = 300000;
+ 			powernv_states[nr_idle_states].enter = &fastsleep_loop;
+ 		}
+-#endif
++
+ 		powernv_states[nr_idle_states].exit_latency =
+ 				((unsigned int)latency_ns[i]) / 1000;
+ 
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index f062158..857414a 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -925,8 +925,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
+ 		sg_count--;
+ 		link_tbl_ptr--;
+ 	}
+-	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
+-					+ cryptlen);
++	be16_add_cpu(&link_tbl_ptr->len, cryptlen);
+ 
+ 	/* tag end of link table */
+ 	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+@@ -2562,7 +2561,6 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ 		break;
+ 	default:
+ 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
+-		kfree(t_alg);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index ca9f4ed..e1c7e9e 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1869,15 +1869,9 @@ static void free_pt_##LVL (unsigned long __pt)			\
+ 	pt = (u64 *)__pt;					\
+ 								\
+ 	for (i = 0; i < 512; ++i) {				\
+-		/* PTE present? */				\
+ 		if (!IOMMU_PTE_PRESENT(pt[i]))			\
+ 			continue;				\
+ 								\
+-		/* Large PTE? */				\
+-		if (PM_PTE_LEVEL(pt[i]) == 0 ||			\
+-		    PM_PTE_LEVEL(pt[i]) == 7)			\
+-			continue;				\
+-								\
+ 		p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);	\
+ 		FN(p);						\
+ 	}							\
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index 65075ef..66a803b 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -1567,7 +1567,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+ 		return -ENODEV;
+ 	}
+ 
+-	if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
++	if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
+ 		smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
+ 		dev_notice(smmu->dev, "\taddress translation ops\n");
+ 	}
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 9231cdf..c80287a 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -848,7 +848,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ 			int sg_cnt;
+ 
+ 			sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
+-			if (sg_cnt <= 0) {
++			if (sg_cnt == 0) {
+ 				/*
+ 				 * This only happens when someone fed
+ 				 * us an invalid request.
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index e9b1810..b0f6924 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -440,9 +440,6 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
+ 		struct can_frame *cf = (struct can_frame *)skb->data;
+ 		u8 dlc = cf->can_dlc;
+ 
+-		if (!(skb->tstamp.tv64))
+-			__net_timestamp(skb);
+-
+ 		netif_rx(priv->echo_skb[idx]);
+ 		priv->echo_skb[idx] = NULL;
+ 
+@@ -578,7 +575,6 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
+-	__net_timestamp(skb);
+ 	skb->protocol = htons(ETH_P_CAN);
+ 	skb->pkt_type = PACKET_BROADCAST;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -607,7 +603,6 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
+-	__net_timestamp(skb);
+ 	skb->protocol = htons(ETH_P_CANFD);
+ 	skb->pkt_type = PACKET_BROADCAST;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
+index f64f529..c837eb9 100644
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -207,7 +207,6 @@ static void slc_bump(struct slcan *sl)
+ 	if (!skb)
+ 		return;
+ 
+-	__net_timestamp(skb);
+ 	skb->dev = sl->dev;
+ 	skb->protocol = htons(ETH_P_CAN);
+ 	skb->pkt_type = PACKET_BROADCAST;
+diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
+index 0ce868d..674f367 100644
+--- a/drivers/net/can/vcan.c
++++ b/drivers/net/can/vcan.c
+@@ -78,9 +78,6 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
+ 	skb->dev       = dev;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
+-	if (!(skb->tstamp.tv64))
+-		__net_timestamp(skb);
+-
+ 	netif_rx_ni(skb);
+ }
+ 
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+index 5c92fb7..d81fc6b 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -263,7 +263,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ 	int ret;
+ 
+ 	/* Try to obtain pages, decreasing order if necessary */
+-	gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
++	gfp |= __GFP_COLD | __GFP_COMP;
+ 	while (order >= 0) {
+ 		pages = alloc_pages(gfp, order);
+ 		if (pages)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 8a97d28..33501bc 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -9323,8 +9323,7 @@ unload_error:
+ 	 * function stop ramrod is sent, since as part of this ramrod FW access
+ 	 * PTP registers.
+ 	 */
+-	if (bp->flags & PTP_SUPPORTED)
+-		bnx2x_stop_ptp(bp);
++	bnx2x_stop_ptp(bp);
+ 
+ 	/* Disable HW interrupts, NAPI */
+ 	bnx2x_netif_stop(bp, 1);
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 74d0389..ce5f7f9 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -310,7 +310,6 @@ struct mvneta_port {
+ 	unsigned int link;
+ 	unsigned int duplex;
+ 	unsigned int speed;
+-	unsigned int tx_csum_limit;
+ 	int use_inband_status:1;
+ };
+ 
+@@ -1014,12 +1013,6 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
+ 		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ 		val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ 		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
+-	} else {
+-		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
+-		val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
+-		       MVNETA_GMAC_AN_SPEED_EN |
+-		       MVNETA_GMAC_AN_DUPLEX_EN);
+-		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ 	}
+ 
+ 	mvneta_set_ucast_table(pp, -1);
+@@ -2509,10 +2502,8 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
+ 
+ 	dev->mtu = mtu;
+ 
+-	if (!netif_running(dev)) {
+-		netdev_update_features(dev);
++	if (!netif_running(dev))
+ 		return 0;
+-	}
+ 
+ 	/* The interface is running, so we have to force a
+ 	 * reallocation of the queues
+@@ -2541,26 +2532,9 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
+ 	mvneta_start_dev(pp);
+ 	mvneta_port_up(pp);
+ 
+-	netdev_update_features(dev);
+-
+ 	return 0;
+ }
+ 
+-static netdev_features_t mvneta_fix_features(struct net_device *dev,
+-					     netdev_features_t features)
+-{
+-	struct mvneta_port *pp = netdev_priv(dev);
+-
+-	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
+-		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
+-		netdev_info(dev,
+-			    "Disable IP checksum for MTU greater than %dB\n",
+-			    pp->tx_csum_limit);
+-	}
+-
+-	return features;
+-}
+-
+ /* Get mac address */
+ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
+ {
+@@ -2882,7 +2856,6 @@ static const struct net_device_ops mvneta_netdev_ops = {
+ 	.ndo_set_rx_mode     = mvneta_set_rx_mode,
+ 	.ndo_set_mac_address = mvneta_set_mac_addr,
+ 	.ndo_change_mtu      = mvneta_change_mtu,
+-	.ndo_fix_features    = mvneta_fix_features,
+ 	.ndo_get_stats64     = mvneta_get_stats64,
+ 	.ndo_do_ioctl        = mvneta_ioctl,
+ };
+@@ -3128,9 +3101,6 @@ static int mvneta_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
+-	if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
+-		pp->tx_csum_limit = 1600;
+-
+ 	pp->tx_ring_size = MVNETA_MAX_TXD;
+ 	pp->rx_ring_size = MVNETA_MAX_RXD;
+ 
+@@ -3209,7 +3179,6 @@ static int mvneta_remove(struct platform_device *pdev)
+ 
+ static const struct of_device_id mvneta_match[] = {
+ 	{ .compatible = "marvell,armada-370-neta" },
+-	{ .compatible = "marvell,armada-xp-neta" },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, mvneta_match);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index a5a0b84..cf467a9 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1973,6 +1973,10 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+ 			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+ 	}
+ 
++	if (priv->base_tx_qpn) {
++		mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
++		priv->base_tx_qpn = 0;
++	}
+ }
+ 
+ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index eab4e08..2a77a6b 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -723,7 +723,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
+ }
+ #endif
+ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
+-		      netdev_features_t dev_features)
++		      int hwtstamp_rx_filter)
+ {
+ 	__wsum hw_checksum = 0;
+ 
+@@ -731,8 +731,14 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
+ 
+ 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
+ 
+-	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
+-	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
++	if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
++	    hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
++		/* next protocol non IPv4 or IPv6 */
++		if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
++		    != htons(ETH_P_IP) &&
++		    ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
++		    != htons(ETH_P_IPV6))
++			return -1;
+ 		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
+ 		hdr += sizeof(struct vlan_hdr);
+ 	}
+@@ -895,8 +901,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ 
+ 			if (ip_summed == CHECKSUM_COMPLETE) {
+ 				void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
+-				if (check_csum(cqe, gro_skb, va,
+-					       dev->features)) {
++				if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
+ 					ip_summed = CHECKSUM_NONE;
+ 					ring->csum_none++;
+ 					ring->csum_complete--;
+@@ -951,7 +956,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ 		}
+ 
+ 		if (ip_summed == CHECKSUM_COMPLETE) {
+-			if (check_csum(cqe, skb, skb->data, dev->features)) {
++			if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
+ 				ip_summed = CHECKSUM_NONE;
+ 				ring->csum_complete--;
+ 				ring->csum_none++;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index c10d98f..7bed3a8 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -66,7 +66,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ 	ring->size = size;
+ 	ring->size_mask = size - 1;
+ 	ring->stride = stride;
+-	ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
+ 
+ 	tmp = size * sizeof(struct mlx4_en_tx_info);
+ 	ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
+@@ -181,7 +180,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+ 		mlx4_bf_free(mdev->dev, &ring->bf);
+ 	mlx4_qp_remove(mdev->dev, &ring->qp);
+ 	mlx4_qp_free(mdev->dev, &ring->qp);
+-	mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
+ 	mlx4_en_unmap_buffer(&ring->wqres.buf);
+ 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ 	kfree(ring->bounce_buf);
+@@ -233,11 +231,6 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ 		       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
+ }
+ 
+-static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
+-{
+-	return ring->prod - ring->cons > ring->full_size;
+-}
+-
+ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+ 			      struct mlx4_en_tx_ring *ring, int index,
+ 			      u8 owner)
+@@ -480,10 +473,11 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ 
+ 	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
+ 
+-	/* Wakeup Tx queue if this stopped, and ring is not full.
++	/*
++	 * Wakeup Tx queue if this stopped, and at least 1 packet
++	 * was completed
+ 	 */
+-	if (netif_tx_queue_stopped(ring->tx_queue) &&
+-	    !mlx4_en_is_tx_ring_full(ring)) {
++	if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
+ 		netif_tx_wake_queue(ring->tx_queue);
+ 		ring->wake_queue++;
+ 	}
+@@ -927,7 +921,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	skb_tx_timestamp(skb);
+ 
+ 	/* Check available TXBBs And 2K spare for prefetch */
+-	stop_queue = mlx4_en_is_tx_ring_full(ring);
++	stop_queue = (int)(ring->prod - ring_cons) >
++		      ring->size - HEADROOM - MAX_DESC_TXBBS;
+ 	if (unlikely(stop_queue)) {
+ 		netif_tx_stop_queue(ring->tx_queue);
+ 		ring->queue_stopped++;
+@@ -996,7 +991,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		smp_rmb();
+ 
+ 		ring_cons = ACCESS_ONCE(ring->cons);
+-		if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
++		if (unlikely(((int)(ring->prod - ring_cons)) <=
++			     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ 			netif_tx_wake_queue(ring->tx_queue);
+ 			ring->wake_queue++;
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
+index 0d80aed..6fce587 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
++++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
+@@ -93,14 +93,8 @@ int mlx4_register_interface(struct mlx4_interface *intf)
+ 	mutex_lock(&intf_mutex);
+ 
+ 	list_add_tail(&intf->list, &intf_list);
+-	list_for_each_entry(priv, &dev_list, dev_list) {
+-		if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
+-			mlx4_dbg(&priv->dev,
+-				 "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
+-			intf->flags &= ~MLX4_INTFF_BONDING;
+-		}
++	list_for_each_entry(priv, &dev_list, dev_list)
+ 		mlx4_add_device(intf, priv);
+-	}
+ 
+ 	mutex_unlock(&intf_mutex);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index 909fcf8..d021f07 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -279,7 +279,6 @@ struct mlx4_en_tx_ring {
+ 	u32			size; /* number of TXBBs */
+ 	u32			size_mask;
+ 	u16			stride;
+-	u32			full_size;
+ 	u16			cqn;	/* index of port CQ associated with this ring */
+ 	u32			buf_size;
+ 	__be32			doorbell_qpn;
+@@ -580,6 +579,7 @@ struct mlx4_en_priv {
+ 	int vids[128];
+ 	bool wol;
+ 	struct device *ddev;
++	int base_tx_qpn;
+ 	struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
+ 	struct hwtstamp_config hwtstamp_config;
+ 
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index d551df6..bdfe51f 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -796,11 +796,10 @@ static int genphy_config_advert(struct phy_device *phydev)
+ 	if (phydev->supported & (SUPPORTED_1000baseT_Half |
+ 				 SUPPORTED_1000baseT_Full)) {
+ 		adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
++		if (adv != oldadv)
++			changed = 1;
+ 	}
+ 
+-	if (adv != oldadv)
+-		changed = 1;
+-
+ 	err = phy_write(phydev, MII_CTRL1000, adv);
+ 	if (err < 0)
+ 		return err;
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index 4cdac78..b2f9521 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -5365,10 +5365,6 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
+ 		*have_5ghz_phy = true;
+ 		return;
+ 	case 0x4321: /* BCM4306 */
+-		/* There are 14e4:4321 PCI devs with 2.4 GHz BCM4321 (N-PHY) */
+-		if (dev->phy.type != B43_PHYTYPE_G)
+-			break;
+-		/* fall through */
+ 	case 0x4313: /* BCM4311 */
+ 	case 0x431a: /* BCM4318 */
+ 	case 0x432a: /* BCM4321 */
+diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
+index ec383b0..968787a 100644
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -681,9 +681,6 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
+ 	char *node;
+ 	unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
+ 
+-	if (vif->credit_watch.node)
+-		return -EADDRINUSE;
+-
+ 	node = kmalloc(maxlen, GFP_KERNEL);
+ 	if (!node)
+ 		return -ENOMEM;
+@@ -773,7 +770,6 @@ static void connect(struct backend_info *be)
+ 	}
+ 
+ 	xen_net_read_rate(dev, &credit_bytes, &credit_usec);
+-	xen_unregister_watchers(be->vif);
+ 	xen_register_watchers(dev, be->vif);
+ 	read_xenbus_vif_flags(be);
+ 
+diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
+index f8d8fdb..6f1fa17 100644
+--- a/drivers/s390/kvm/virtio_ccw.c
++++ b/drivers/s390/kvm/virtio_ccw.c
+@@ -65,7 +65,6 @@ struct virtio_ccw_device {
+ 	bool is_thinint;
+ 	bool going_away;
+ 	bool device_lost;
+-	unsigned int config_ready;
+ 	void *airq_info;
+ };
+ 
+@@ -834,11 +833,8 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
+ 	if (ret)
+ 		goto out_free;
+ 
+-	memcpy(vcdev->config, config_area, offset + len);
+-	if (buf)
+-		memcpy(buf, &vcdev->config[offset], len);
+-	if (vcdev->config_ready < offset + len)
+-		vcdev->config_ready = offset + len;
++	memcpy(vcdev->config, config_area, sizeof(vcdev->config));
++	memcpy(buf, &vcdev->config[offset], len);
+ 
+ out_free:
+ 	kfree(config_area);
+@@ -861,9 +857,6 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
+ 	if (!config_area)
+ 		goto out_free;
+ 
+-	/* Make sure we don't overwrite fields. */
+-	if (vcdev->config_ready < offset)
+-		virtio_ccw_get_config(vdev, 0, NULL, offset);
+ 	memcpy(&vcdev->config[offset], buf, len);
+ 	/* Write the config area to the host. */
+ 	memcpy(config_area, vcdev->config, sizeof(vcdev->config));
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index a086e1d..5c8f581 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1477,11 +1477,6 @@ skip_countries:
+ 		goto alloc_fail8;
+ 	}
+ 
+-	if (quirks & CLEAR_HALT_CONDITIONS) {
+-		usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress));
+-		usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress));
+-	}
+-
+ 	return 0;
+ alloc_fail8:
+ 	if (acm->country_codes) {
+@@ -1761,10 +1756,6 @@ static const struct usb_device_id acm_ids[] = {
+ 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ 	},
+ 
+-	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
+-	.driver_info = CLEAR_HALT_CONDITIONS,
+-	},
+-
+ 	/* Nokia S60 phones expose two ACM channels. The first is
+ 	 * a modem and is picked up by the standard AT-command
+ 	 * information below. The second is 'vendor-specific' but
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index b3b6c9d..ffeb3c8 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -133,4 +133,3 @@ struct acm {
+ #define NO_DATA_INTERFACE		BIT(4)
+ #define IGNORE_DEVICE			BIT(5)
+ #define QUIRK_CONTROL_LINE_STATE	BIT(6)
+-#define CLEAR_HALT_CONDITIONS		BIT(7)
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 45b8c8b..3507f88 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -3435,7 +3435,6 @@ done:
+ static void ffs_closed(struct ffs_data *ffs)
+ {
+ 	struct ffs_dev *ffs_obj;
+-	struct f_fs_opts *opts;
+ 
+ 	ENTER();
+ 	ffs_dev_lock();
+@@ -3450,13 +3449,8 @@ static void ffs_closed(struct ffs_data *ffs)
+ 	    ffs_obj->ffs_closed_callback)
+ 		ffs_obj->ffs_closed_callback(ffs);
+ 
+-	if (ffs_obj->opts)
+-		opts = ffs_obj->opts;
+-	else
+-		goto done;
+-
+-	if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
+-	    || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
++	if (!ffs_obj->opts || ffs_obj->opts->no_configfs
++	    || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
+ 		goto done;
+ 
+ 	unregister_gadget_item(ffs_obj->opts->
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 50bb3c2..37b5afd 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2927,6 +2927,17 @@ restart:
+ 				vfsmnt = &mnt->mnt;
+ 				continue;
+ 			}
++			/*
++			 * Filesystems needing to implement special "root names"
++			 * should do so with ->d_dname()
++			 */
++			if (IS_ROOT(dentry) &&
++			   (dentry->d_name.len != 1 ||
++			    dentry->d_name.name[0] != '/')) {
++				WARN(1, "Root dentry has weird name <%.*s>\n",
++				     (int) dentry->d_name.len,
++				     dentry->d_name.name);
++			}
+ 			if (!error)
+ 				error = is_mounted(vfsmnt) ? 1 : 2;
+ 			break;
+diff --git a/fs/inode.c b/fs/inode.c
+index 6e342ca..ea37cd1 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1693,8 +1693,8 @@ int file_remove_suid(struct file *file)
+ 		error = security_inode_killpriv(dentry);
+ 	if (!error && killsuid)
+ 		error = __remove_suid(dentry, killsuid);
+-	if (!error)
+-		inode_has_no_xattr(inode);
++	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
++		inode->i_flags |= S_NOSEC;
+ 
+ 	return error;
+ }
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 1d4a97c..1b9e111 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -3185,15 +3185,11 @@ bool fs_fully_visible(struct file_system_type *type)
+ 		if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
+ 			continue;
+ 
+-		/* This mount is not fully visible if there are any
+-		 * locked child mounts that cover anything except for
+-		 * empty directories.
++		/* This mount is not fully visible if there are any child mounts
++		 * that cover anything except for empty directories.
+ 		 */
+ 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+ 			struct inode *inode = child->mnt_mountpoint->d_inode;
+-			/* Only worry about locked mounts */
+-			if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
+-				continue;
+ 			if (!S_ISDIR(inode->i_mode))
+ 				goto next;
+ 			if (inode->i_nlink > 2)
+diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
+index a7106ed..2c10360 100644
+--- a/fs/ufs/balloc.c
++++ b/fs/ufs/balloc.c
+@@ -51,8 +51,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
+ 	
+ 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
+ 		ufs_error (sb, "ufs_free_fragments", "internal error");
+-
+-	mutex_lock(&UFS_SB(sb)->s_lock);
++	
++	lock_ufs(sb);
+ 	
+ 	cgno = ufs_dtog(uspi, fragment);
+ 	bit = ufs_dtogd(uspi, fragment);
+@@ -115,13 +115,13 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
+ 	if (sb->s_flags & MS_SYNCHRONOUS)
+ 		ubh_sync_block(UCPI_UBH(ucpi));
+ 	ufs_mark_sb_dirty(sb);
+-
+-	mutex_unlock(&UFS_SB(sb)->s_lock);
++	
++	unlock_ufs(sb);
+ 	UFSD("EXIT\n");
+ 	return;
+ 
+ failed:
+-	mutex_unlock(&UFS_SB(sb)->s_lock);
++	unlock_ufs(sb);
+ 	UFSD("EXIT (FAILED)\n");
+ 	return;
+ }
+@@ -151,7 +151,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
+ 		goto failed;
+ 	}
+ 
+-	mutex_lock(&UFS_SB(sb)->s_lock);
++	lock_ufs(sb);
+ 	
+ do_more:
+ 	overflow = 0;
+@@ -211,12 +211,12 @@ do_more:
+ 	}
+ 
+ 	ufs_mark_sb_dirty(sb);
+-	mutex_unlock(&UFS_SB(sb)->s_lock);
++	unlock_ufs(sb);
+ 	UFSD("EXIT\n");
+ 	return;
+ 
+ failed_unlock:
+-	mutex_unlock(&UFS_SB(sb)->s_lock);
++	unlock_ufs(sb);
+ failed:
+ 	UFSD("EXIT (FAILED)\n");
+ 	return;
+@@ -357,7 +357,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 	usb1 = ubh_get_usb_first(uspi);
+ 	*err = -ENOSPC;
+ 
+-	mutex_lock(&UFS_SB(sb)->s_lock);
++	lock_ufs(sb);
+ 	tmp = ufs_data_ptr_to_cpu(sb, p);
+ 
+ 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
+@@ -378,19 +378,19 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 				  "fragment %llu, tmp %llu\n",
+ 				  (unsigned long long)fragment,
+ 				  (unsigned long long)tmp);
+-			mutex_unlock(&UFS_SB(sb)->s_lock);
++			unlock_ufs(sb);
+ 			return INVBLOCK;
+ 		}
+ 		if (fragment < UFS_I(inode)->i_lastfrag) {
+ 			UFSD("EXIT (ALREADY ALLOCATED)\n");
+-			mutex_unlock(&UFS_SB(sb)->s_lock);
++			unlock_ufs(sb);
+ 			return 0;
+ 		}
+ 	}
+ 	else {
+ 		if (tmp) {
+ 			UFSD("EXIT (ALREADY ALLOCATED)\n");
+-			mutex_unlock(&UFS_SB(sb)->s_lock);
++			unlock_ufs(sb);
+ 			return 0;
+ 		}
+ 	}
+@@ -399,7 +399,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 	 * There is not enough space for user on the device
+ 	 */
+ 	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
+-		mutex_unlock(&UFS_SB(sb)->s_lock);
++		unlock_ufs(sb);
+ 		UFSD("EXIT (FAILED)\n");
+ 		return 0;
+ 	}
+@@ -424,7 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 			ufs_clear_frags(inode, result + oldcount,
+ 					newcount - oldcount, locked_page != NULL);
+ 		}
+-		mutex_unlock(&UFS_SB(sb)->s_lock);
++		unlock_ufs(sb);
+ 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
+ 		return result;
+ 	}
+@@ -439,7 +439,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 						fragment + count);
+ 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
+ 				locked_page != NULL);
+-		mutex_unlock(&UFS_SB(sb)->s_lock);
++		unlock_ufs(sb);
+ 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
+ 		return result;
+ 	}
+@@ -477,7 +477,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 		*err = 0;
+ 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
+ 						fragment + count);
+-		mutex_unlock(&UFS_SB(sb)->s_lock);
++		unlock_ufs(sb);
+ 		if (newcount < request)
+ 			ufs_free_fragments (inode, result + newcount, request - newcount);
+ 		ufs_free_fragments (inode, tmp, oldcount);
+@@ -485,7 +485,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 		return result;
+ 	}
+ 
+-	mutex_unlock(&UFS_SB(sb)->s_lock);
++	unlock_ufs(sb);
+ 	UFSD("EXIT (FAILED)\n");
+ 	return 0;
+ }		
+diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
+index fd0203c..7caa016 100644
+--- a/fs/ufs/ialloc.c
++++ b/fs/ufs/ialloc.c
+@@ -69,11 +69,11 @@ void ufs_free_inode (struct inode * inode)
+ 	
+ 	ino = inode->i_ino;
+ 
+-	mutex_lock(&UFS_SB(sb)->s_lock);
++	lock_ufs(sb);
+ 
+ 	if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
+ 		ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
+-		mutex_unlock(&UFS_SB(sb)->s_lock);
++		unlock_ufs(sb);
+ 		return;
+ 	}
+ 	
+@@ -81,7 +81,7 @@ void ufs_free_inode (struct inode * inode)
+ 	bit = ufs_inotocgoff (ino);
+ 	ucpi = ufs_load_cylinder (sb, cg);
+ 	if (!ucpi) {
+-		mutex_unlock(&UFS_SB(sb)->s_lock);
++		unlock_ufs(sb);
+ 		return;
+ 	}
+ 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
+@@ -115,7 +115,7 @@ void ufs_free_inode (struct inode * inode)
+ 		ubh_sync_block(UCPI_UBH(ucpi));
+ 	
+ 	ufs_mark_sb_dirty(sb);
+-	mutex_unlock(&UFS_SB(sb)->s_lock);
++	unlock_ufs(sb);
+ 	UFSD("EXIT\n");
+ }
+ 
+@@ -193,7 +193,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
+ 	sbi = UFS_SB(sb);
+ 	uspi = sbi->s_uspi;
+ 
+-	mutex_lock(&sbi->s_lock);
++	lock_ufs(sb);
+ 
+ 	/*
+ 	 * Try to place the inode in its parent directory
+@@ -331,21 +331,21 @@ cg_found:
+ 			sync_dirty_buffer(bh);
+ 		brelse(bh);
+ 	}
+-	mutex_unlock(&sbi->s_lock);
++	unlock_ufs(sb);
+ 
+ 	UFSD("allocating inode %lu\n", inode->i_ino);
+ 	UFSD("EXIT\n");
+ 	return inode;
+ 
+ fail_remove_inode:
+-	mutex_unlock(&sbi->s_lock);
++	unlock_ufs(sb);
+ 	clear_nlink(inode);
+ 	unlock_new_inode(inode);
+ 	iput(inode);
+ 	UFSD("EXIT (FAILED): err %d\n", err);
+ 	return ERR_PTR(err);
+ failed:
+-	mutex_unlock(&sbi->s_lock);
++	unlock_ufs(sb);
+ 	make_bad_inode(inode);
+ 	iput (inode);
+ 	UFSD("EXIT (FAILED): err %d\n", err);
+diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
+index 2d93ab0..be7d42c 100644
+--- a/fs/ufs/inode.c
++++ b/fs/ufs/inode.c
+@@ -902,9 +902,6 @@ void ufs_evict_inode(struct inode * inode)
+ 	invalidate_inode_buffers(inode);
+ 	clear_inode(inode);
+ 
+-	if (want_delete) {
+-		lock_ufs(inode->i_sb);
++	if (want_delete)
+ 		ufs_free_inode(inode);
+-		unlock_ufs(inode->i_sb);
+-	}
+ }
+diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
+index 60ee322..e491a93 100644
+--- a/fs/ufs/namei.c
++++ b/fs/ufs/namei.c
+@@ -128,12 +128,12 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
+ 	if (l > sb->s_blocksize)
+ 		goto out_notlocked;
+ 
+-	lock_ufs(dir->i_sb);
+ 	inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
+ 	err = PTR_ERR(inode);
+ 	if (IS_ERR(inode))
+-		goto out;
++		goto out_notlocked;
+ 
++	lock_ufs(dir->i_sb);
+ 	if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
+ 		/* slow symlink */
+ 		inode->i_op = &ufs_symlink_inode_operations;
+@@ -174,12 +174,7 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
+ 	inode_inc_link_count(inode);
+ 	ihold(inode);
+ 
+-	error = ufs_add_link(dentry, inode);
+-	if (error) {
+-		inode_dec_link_count(inode);
+-		iput(inode);
+-	} else
+-		d_instantiate(dentry, inode);
++	error = ufs_add_nondir(dentry, inode);
+ 	unlock_ufs(dir->i_sb);
+ 	return error;
+ }
+@@ -189,13 +184,9 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+ 	struct inode * inode;
+ 	int err;
+ 
+-	lock_ufs(dir->i_sb);
+-	inode_inc_link_count(dir);
+-
+ 	inode = ufs_new_inode(dir, S_IFDIR|mode);
+-	err = PTR_ERR(inode);
+ 	if (IS_ERR(inode))
+-		goto out_dir;
++		return PTR_ERR(inode);
+ 
+ 	inode->i_op = &ufs_dir_inode_operations;
+ 	inode->i_fop = &ufs_dir_operations;
+@@ -203,6 +194,9 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+ 
+ 	inode_inc_link_count(inode);
+ 
++	lock_ufs(dir->i_sb);
++	inode_inc_link_count(dir);
++
+ 	err = ufs_make_empty(inode, dir);
+ 	if (err)
+ 		goto out_fail;
+@@ -212,7 +206,6 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+ 		goto out_fail;
+ 	unlock_ufs(dir->i_sb);
+ 
+-	unlock_new_inode(inode);
+ 	d_instantiate(dentry, inode);
+ out:
+ 	return err;
+@@ -222,7 +215,6 @@ out_fail:
+ 	inode_dec_link_count(inode);
+ 	unlock_new_inode(inode);
+ 	iput (inode);
+-out_dir:
+ 	inode_dec_link_count(dir);
+ 	unlock_ufs(dir->i_sb);
+ 	goto out;
+diff --git a/fs/ufs/super.c b/fs/ufs/super.c
+index dc33f94..b3bc3e7 100644
+--- a/fs/ufs/super.c
++++ b/fs/ufs/super.c
+@@ -694,7 +694,6 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
+ 	unsigned flags;
+ 
+ 	lock_ufs(sb);
+-	mutex_lock(&UFS_SB(sb)->s_lock);
+ 
+ 	UFSD("ENTER\n");
+ 
+@@ -712,7 +711,6 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
+ 	ufs_put_cstotal(sb);
+ 
+ 	UFSD("EXIT\n");
+-	mutex_unlock(&UFS_SB(sb)->s_lock);
+ 	unlock_ufs(sb);
+ 
+ 	return 0;
+@@ -801,7 +799,6 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
+ 	UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
+ 	
+ 	mutex_init(&sbi->mutex);
+-	mutex_init(&sbi->s_lock);
+ 	spin_lock_init(&sbi->work_lock);
+ 	INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
+ 	/*
+@@ -1280,7 +1277,6 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ 
+ 	sync_filesystem(sb);
+ 	lock_ufs(sb);
+-	mutex_lock(&UFS_SB(sb)->s_lock);
+ 	uspi = UFS_SB(sb)->s_uspi;
+ 	flags = UFS_SB(sb)->s_flags;
+ 	usb1 = ubh_get_usb_first(uspi);
+@@ -1294,7 +1290,6 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ 	new_mount_opt = 0;
+ 	ufs_set_opt (new_mount_opt, ONERROR_LOCK);
+ 	if (!ufs_parse_options (data, &new_mount_opt)) {
+-		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		unlock_ufs(sb);
+ 		return -EINVAL;
+ 	}
+@@ -1302,14 +1297,12 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ 		new_mount_opt |= ufstype;
+ 	} else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
+ 		pr_err("ufstype can't be changed during remount\n");
+-		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		unlock_ufs(sb);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
+ 		UFS_SB(sb)->s_mount_opt = new_mount_opt;
+-		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		unlock_ufs(sb);
+ 		return 0;
+ 	}
+@@ -1333,7 +1326,6 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ 	 */
+ #ifndef CONFIG_UFS_FS_WRITE
+ 		pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n");
+-		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		unlock_ufs(sb);
+ 		return -EINVAL;
+ #else
+@@ -1343,13 +1335,11 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ 		    ufstype != UFS_MOUNT_UFSTYPE_SUNx86 &&
+ 		    ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
+ 			pr_err("this ufstype is read-only supported\n");
+-			mutex_unlock(&UFS_SB(sb)->s_lock);
+ 			unlock_ufs(sb);
+ 			return -EINVAL;
+ 		}
+ 		if (!ufs_read_cylinder_structures(sb)) {
+ 			pr_err("failed during remounting\n");
+-			mutex_unlock(&UFS_SB(sb)->s_lock);
+ 			unlock_ufs(sb);
+ 			return -EPERM;
+ 		}
+@@ -1357,7 +1347,6 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ #endif
+ 	}
+ 	UFS_SB(sb)->s_mount_opt = new_mount_opt;
+-	mutex_unlock(&UFS_SB(sb)->s_lock);
+ 	unlock_ufs(sb);
+ 	return 0;
+ }
+diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
+index cf6368d..2a07396 100644
+--- a/fs/ufs/ufs.h
++++ b/fs/ufs/ufs.h
+@@ -30,7 +30,6 @@ struct ufs_sb_info {
+ 	int work_queued; /* non-zero if the delayed work is queued */
+ 	struct delayed_work sync_work; /* FS sync delayed work */
+ 	spinlock_t work_lock; /* protects sync_work and work_queued */
+-	struct mutex s_lock;
+ };
+ 
+ struct ufs_inode_info {
+diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
+index 8ba379f..3573a81 100644
+--- a/include/net/netns/sctp.h
++++ b/include/net/netns/sctp.h
+@@ -31,7 +31,6 @@ struct netns_sctp {
+ 	struct list_head addr_waitq;
+ 	struct timer_list addr_wq_timer;
+ 	struct list_head auto_asconf_splist;
+-	/* Lock that protects both addr_waitq and auto_asconf_splist */
+ 	spinlock_t addr_wq_lock;
+ 
+ 	/* Lock that protects the local_addr_list writers */
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 495c87e..2bb2fcf 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -223,10 +223,6 @@ struct sctp_sock {
+ 	atomic_t pd_mode;
+ 	/* Receive to here while partial delivery is in effect. */
+ 	struct sk_buff_head pd_lobby;
+-
+-	/* These must be the last fields, as they will skipped on copies,
+-	 * like on accept and peeloff operations
+-	 */
+ 	struct list_head auto_asconf_list;
+ 	int do_auto_asconf;
+ };
 diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
 index 1a0006a..4842a98 100644
 --- a/include/uapi/linux/Kbuild
@@ -7499,10 +9575,10 @@ index 1a0006a..4842a98 100644
  header-y += kernelcapi.h
 diff --git a/include/uapi/linux/kdbus.h b/include/uapi/linux/kdbus.h
 new file mode 100644
-index 0000000..00a6e14
+index 0000000..ecffc6b
 --- /dev/null
 +++ b/include/uapi/linux/kdbus.h
-@@ -0,0 +1,979 @@
+@@ -0,0 +1,980 @@
 +/*
 + * kdbus is free software; you can redistribute it and/or modify it under
 + * the terms of the GNU Lesser General Public License as published by the
@@ -7879,6 +9955,7 @@ index 0000000..00a6e14
 +	KDBUS_ITEM_ATTACH_FLAGS_RECV,
 +	KDBUS_ITEM_ID,
 +	KDBUS_ITEM_NAME,
++	KDBUS_ITEM_DST_ID,
 +
 +	/* keep these item types in sync with KDBUS_ATTACH_* flags */
 +	_KDBUS_ITEM_ATTACH_BASE	= 0x1000,
@@ -8529,11 +10606,22 @@ index 86c7300..68ec416 100644
 +obj-$(CONFIG_KDBUS) += kdbus/
 diff --git a/ipc/kdbus/Makefile b/ipc/kdbus/Makefile
 new file mode 100644
-index 0000000..7ee9271
+index 0000000..66663a1
 --- /dev/null
 +++ b/ipc/kdbus/Makefile
-@@ -0,0 +1,22 @@
-+kdbus-y := \
+@@ -0,0 +1,33 @@
++#
++# By setting KDBUS_EXT=2, the kdbus module will be built as kdbus2.ko, and
++# KBUILD_MODNAME=kdbus2. This has the effect that all exported objects have
++# different names than usually (kdbus2fs, /sys/fs/kdbus2/) and you can run
++# your test-infrastructure against the kdbus2.ko, while running your system
++# on kdbus.ko.
++#
++# To just build the module, use:
++#     make KDBUS_EXT=2 M=ipc/kdbus
++#
++
++kdbus$(KDBUS_EXT)-y := \
 +	bus.o \
 +	connection.o \
 +	endpoint.o \
@@ -8554,13 +10642,13 @@ index 0000000..7ee9271
 +	queue.o \
 +	util.o
 +
-+obj-$(CONFIG_KDBUS) += kdbus.o
++obj-$(CONFIG_KDBUS) += kdbus$(KDBUS_EXT).o
 diff --git a/ipc/kdbus/bus.c b/ipc/kdbus/bus.c
 new file mode 100644
-index 0000000..bbdf0f2
+index 0000000..a67f825
 --- /dev/null
 +++ b/ipc/kdbus/bus.c
-@@ -0,0 +1,542 @@
+@@ -0,0 +1,514 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -8629,23 +10717,16 @@ index 0000000..bbdf0f2
 +				       const char *name,
 +				       struct kdbus_bloom_parameter *bloom,
 +				       const u64 *pattach_owner,
-+				       const u64 *pattach_recv,
 +				       u64 flags, kuid_t uid, kgid_t gid)
 +{
 +	struct kdbus_bus *b;
 +	u64 attach_owner;
-+	u64 attach_recv;
 +	int ret;
 +
 +	if (bloom->size < 8 || bloom->size > KDBUS_BUS_BLOOM_MAX_SIZE ||
 +	    !KDBUS_IS_ALIGNED8(bloom->size) || bloom->n_hash < 1)
 +		return ERR_PTR(-EINVAL);
 +
-+	ret = kdbus_sanitize_attach_flags(pattach_recv ? *pattach_recv : 0,
-+					  &attach_recv);
-+	if (ret < 0)
-+		return ERR_PTR(ret);
-+
 +	ret = kdbus_sanitize_attach_flags(pattach_owner ? *pattach_owner : 0,
 +					  &attach_owner);
 +	if (ret < 0)
@@ -8674,7 +10755,6 @@ index 0000000..bbdf0f2
 +
 +	b->id = atomic64_inc_return(&domain->last_id);
 +	b->bus_flags = flags;
-+	b->attach_flags_req = attach_recv;
 +	b->attach_flags_owner = attach_owner;
 +	generate_random_uuid(b->id128);
 +	b->bloom = *bloom;
@@ -8803,9 +10883,9 @@ index 0000000..bbdf0f2
 + * kdbus_bus_broadcast() - send a message to all subscribed connections
 + * @bus:	The bus the connections are connected to
 + * @conn_src:	The source connection, may be %NULL for kernel notifications
-+ * @kmsg:	The message to send.
++ * @staging:	Staging object containing the message to send
 + *
-+ * Send @kmsg to all connections that are currently active on the bus.
++ * Send message to all connections that are currently active on the bus.
 + * Connections must still have matches installed in order to let the message
 + * pass.
 + *
@@ -8813,7 +10893,7 @@ index 0000000..bbdf0f2
 + */
 +void kdbus_bus_broadcast(struct kdbus_bus *bus,
 +			 struct kdbus_conn *conn_src,
-+			 struct kdbus_kmsg *kmsg)
++			 struct kdbus_staging *staging)
 +{
 +	struct kdbus_conn *conn_dst;
 +	unsigned int i;
@@ -8830,12 +10910,10 @@ index 0000000..bbdf0f2
 +	 * can re-construct order via sequence numbers), but we should at least
 +	 * try to avoid re-ordering for monitors.
 +	 */
-+	kdbus_bus_eavesdrop(bus, conn_src, kmsg);
++	kdbus_bus_eavesdrop(bus, conn_src, staging);
 +
 +	down_read(&bus->conn_rwlock);
 +	hash_for_each(bus->conn_hash, i, conn_dst, hentry) {
-+		if (conn_dst->id == kmsg->msg.src_id)
-+			continue;
 +		if (!kdbus_conn_is_ordinary(conn_dst))
 +			continue;
 +
@@ -8843,8 +10921,8 @@ index 0000000..bbdf0f2
 +		 * Check if there is a match for the kmsg object in
 +		 * the destination connection match db
 +		 */
-+		if (!kdbus_match_db_match_kmsg(conn_dst->match_db, conn_src,
-+					       kmsg))
++		if (!kdbus_match_db_match_msg(conn_dst->match_db, conn_src,
++					      staging))
 +			continue;
 +
 +		if (conn_src) {
@@ -8855,13 +10933,6 @@ index 0000000..bbdf0f2
 +			 */
 +			if (!kdbus_conn_policy_talk(conn_dst, NULL, conn_src))
 +				continue;
-+
-+			ret = kdbus_kmsg_collect_metadata(kmsg, conn_src,
-+							  conn_dst);
-+			if (ret < 0) {
-+				kdbus_conn_lost_message(conn_dst);
-+				continue;
-+			}
 +		} else {
 +			/*
 +			 * Check if there is a policy db that prevents the
@@ -8869,11 +10940,12 @@ index 0000000..bbdf0f2
 +			 * notification
 +			 */
 +			if (!kdbus_conn_policy_see_notification(conn_dst, NULL,
-+								kmsg))
++								staging->msg))
 +				continue;
 +		}
 +
-+		ret = kdbus_conn_entry_insert(conn_src, conn_dst, kmsg, NULL);
++		ret = kdbus_conn_entry_insert(conn_src, conn_dst, staging,
++					      NULL, NULL);
 +		if (ret < 0)
 +			kdbus_conn_lost_message(conn_dst);
 +	}
@@ -8884,16 +10956,16 @@ index 0000000..bbdf0f2
 + * kdbus_bus_eavesdrop() - send a message to all subscribed monitors
 + * @bus:	The bus the monitors are connected to
 + * @conn_src:	The source connection, may be %NULL for kernel notifications
-+ * @kmsg:	The message to send.
++ * @staging:	Staging object containing the message to send
 + *
-+ * Send @kmsg to all monitors that are currently active on the bus. Monitors
++ * Send message to all monitors that are currently active on the bus. Monitors
 + * must still have matches installed in order to let the message pass.
 + *
 + * The caller must hold the name-registry lock of @bus.
 + */
 +void kdbus_bus_eavesdrop(struct kdbus_bus *bus,
 +			 struct kdbus_conn *conn_src,
-+			 struct kdbus_kmsg *kmsg)
++			 struct kdbus_staging *staging)
 +{
 +	struct kdbus_conn *conn_dst;
 +	int ret;
@@ -8907,16 +10979,8 @@ index 0000000..bbdf0f2
 +
 +	down_read(&bus->conn_rwlock);
 +	list_for_each_entry(conn_dst, &bus->monitors_list, monitor_entry) {
-+		if (conn_src) {
-+			ret = kdbus_kmsg_collect_metadata(kmsg, conn_src,
-+							  conn_dst);
-+			if (ret < 0) {
-+				kdbus_conn_lost_message(conn_dst);
-+				continue;
-+			}
-+		}
-+
-+		ret = kdbus_conn_entry_insert(conn_src, conn_dst, kmsg, NULL);
++		ret = kdbus_conn_entry_insert(conn_src, conn_dst, staging,
++					      NULL, NULL);
 +		if (ret < 0)
 +			kdbus_conn_lost_message(conn_dst);
 +	}
@@ -8943,7 +11007,6 @@ index 0000000..bbdf0f2
 +		{ .type = KDBUS_ITEM_MAKE_NAME, .mandatory = true },
 +		{ .type = KDBUS_ITEM_BLOOM_PARAMETER, .mandatory = true },
 +		{ .type = KDBUS_ITEM_ATTACH_FLAGS_SEND },
-+		{ .type = KDBUS_ITEM_ATTACH_FLAGS_RECV },
 +	};
 +	struct kdbus_args args = {
 +		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
@@ -8962,7 +11025,6 @@ index 0000000..bbdf0f2
 +	bus = kdbus_bus_new(domain,
 +			    argv[1].item->str, &argv[2].item->bloom_parameter,
 +			    argv[3].item ? argv[3].item->data64 : NULL,
-+			    argv[4].item ? argv[4].item->data64 : NULL,
 +			    cmd->flags, current_euid(), current_egid());
 +	if (IS_ERR(bus)) {
 +		ret = PTR_ERR(bus);
@@ -9029,13 +11091,12 @@ index 0000000..bbdf0f2
 +	struct kdbus_cmd_info *cmd;
 +	struct kdbus_bus *bus = conn->ep->bus;
 +	struct kdbus_pool_slice *slice = NULL;
++	struct kdbus_item *meta_items = NULL;
 +	struct kdbus_item_header item_hdr;
 +	struct kdbus_info info = {};
-+	size_t meta_size, name_len;
-+	struct kvec kvec[5];
-+	u64 hdr_size = 0;
-+	u64 attach_flags;
-+	size_t cnt = 0;
++	size_t meta_size, name_len, cnt = 0;
++	struct kvec kvec[6];
++	u64 attach_flags, size = 0;
 +	int ret;
 +
 +	struct kdbus_arg argv[] = {
@@ -9057,8 +11118,8 @@ index 0000000..bbdf0f2
 +
 +	attach_flags &= bus->attach_flags_owner;
 +
-+	ret = kdbus_meta_export_prepare(bus->creator_meta, NULL,
-+					&attach_flags, &meta_size);
++	ret = kdbus_meta_emit(bus->creator_meta, NULL, NULL, conn,
++			      attach_flags, &meta_items, &meta_size);
 +	if (ret < 0)
 +		goto exit;
 +
@@ -9068,30 +11129,29 @@ index 0000000..bbdf0f2
 +	item_hdr.type = KDBUS_ITEM_MAKE_NAME;
 +	item_hdr.size = KDBUS_ITEM_HEADER_SIZE + name_len;
 +
-+	kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &hdr_size);
-+	kdbus_kvec_set(&kvec[cnt++], &item_hdr, sizeof(item_hdr), &hdr_size);
-+	kdbus_kvec_set(&kvec[cnt++], bus->node.name, name_len, &hdr_size);
-+	cnt += !!kdbus_kvec_pad(&kvec[cnt], &hdr_size);
++	kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &size);
++	kdbus_kvec_set(&kvec[cnt++], &item_hdr, sizeof(item_hdr), &size);
++	kdbus_kvec_set(&kvec[cnt++], bus->node.name, name_len, &size);
++	cnt += !!kdbus_kvec_pad(&kvec[cnt], &size);
++	if (meta_size > 0) {
++		kdbus_kvec_set(&kvec[cnt++], meta_items, meta_size, &size);
++		cnt += !!kdbus_kvec_pad(&kvec[cnt], &size);
++	}
++
++	info.size = size;
 +
-+	slice = kdbus_pool_slice_alloc(conn->pool, hdr_size + meta_size, false);
++	slice = kdbus_pool_slice_alloc(conn->pool, size, false);
 +	if (IS_ERR(slice)) {
 +		ret = PTR_ERR(slice);
 +		slice = NULL;
 +		goto exit;
 +	}
 +
-+	ret = kdbus_meta_export(bus->creator_meta, NULL, attach_flags,
-+				slice, hdr_size, &meta_size);
++	ret = kdbus_pool_slice_copy_kvec(slice, 0, kvec, cnt, size);
 +	if (ret < 0)
 +		goto exit;
 +
-+	info.size = hdr_size + meta_size;
-+
-+	ret = kdbus_pool_slice_copy_kvec(slice, 0, kvec, cnt, hdr_size);
-+	if (ret < 0)
-+		goto exit;
-+
-+	kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->info_size);
++	kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->info_size);
 +
 +	if (kdbus_member_set_user(&cmd->offset, argp, typeof(*cmd), offset) ||
 +	    kdbus_member_set_user(&cmd->info_size, argp,
@@ -9100,15 +11160,15 @@ index 0000000..bbdf0f2
 +
 +exit:
 +	kdbus_pool_slice_release(slice);
-+
++	kfree(meta_items);
 +	return kdbus_args_clear(&args, ret);
 +}
 diff --git a/ipc/kdbus/bus.h b/ipc/kdbus/bus.h
 new file mode 100644
-index 0000000..5bea5ef
+index 0000000..238986e
 --- /dev/null
 +++ b/ipc/kdbus/bus.h
-@@ -0,0 +1,101 @@
+@@ -0,0 +1,99 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -9140,7 +11200,7 @@ index 0000000..5bea5ef
 +
 +struct kdbus_conn;
 +struct kdbus_domain;
-+struct kdbus_kmsg;
++struct kdbus_staging;
 +struct kdbus_user;
 +
 +/**
@@ -9148,7 +11208,6 @@ index 0000000..5bea5ef
 + * @node:		kdbus_node
 + * @id:			ID of this bus in the domain
 + * @bus_flags:		Simple pass-through flags from userspace to userspace
-+ * @attach_flags_req:	KDBUS_ATTACH_* flags required by connecting peers
 + * @attach_flags_owner:	KDBUS_ATTACH_* flags of bus creator that other
 + *			connections can see or query
 + * @id128:		Unique random 128 bit ID of this bus
@@ -9171,7 +11230,6 @@ index 0000000..5bea5ef
 +	/* static */
 +	u64 id;
 +	u64 bus_flags;
-+	u64 attach_flags_req;
 +	u64 attach_flags_owner;
 +	u8 id128[16];
 +	struct kdbus_bloom_parameter bloom;
@@ -9200,10 +11258,10 @@ index 0000000..5bea5ef
 +struct kdbus_conn *kdbus_bus_find_conn_by_id(struct kdbus_bus *bus, u64 id);
 +void kdbus_bus_broadcast(struct kdbus_bus *bus,
 +			 struct kdbus_conn *conn_src,
-+			 struct kdbus_kmsg *kmsg);
++			 struct kdbus_staging *staging);
 +void kdbus_bus_eavesdrop(struct kdbus_bus *bus,
 +			 struct kdbus_conn *conn_src,
-+			 struct kdbus_kmsg *kmsg);
++			 struct kdbus_staging *staging);
 +
 +struct kdbus_bus *kdbus_cmd_bus_make(struct kdbus_domain *domain,
 +				     void __user *argp);
@@ -9212,10 +11270,10 @@ index 0000000..5bea5ef
 +#endif
 diff --git a/ipc/kdbus/connection.c b/ipc/kdbus/connection.c
 new file mode 100644
-index 0000000..9993753
+index 0000000..d94b417e
 --- /dev/null
 +++ b/ipc/kdbus/connection.c
-@@ -0,0 +1,2178 @@
+@@ -0,0 +1,2207 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -9330,10 +11388,6 @@ index 0000000..9993753
 +	if (ret < 0)
 +		return ERR_PTR(ret);
 +
-+	/* The attach flags must always satisfy the bus requirements. */
-+	if (bus->attach_flags_req & ~attach_flags_send)
-+		return ERR_PTR(-ECONNREFUSED);
-+
 +	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
 +	if (!conn)
 +		return ERR_PTR(-ENOMEM);
@@ -9352,6 +11406,8 @@ index 0000000..9993753
 +	atomic_set(&conn->lost_count, 0);
 +	INIT_DELAYED_WORK(&conn->work, kdbus_reply_list_scan_work);
 +	conn->cred = get_current_cred();
++	conn->pid = get_pid(task_pid(current));
++	get_fs_root(current->fs, &conn->root_path);
 +	init_waitqueue_head(&conn->wait);
 +	kdbus_queue_init(&conn->queue);
 +	conn->privileged = privileged;
@@ -9391,22 +11447,28 @@ index 0000000..9993753
 +	BUILD_BUG_ON(sizeof(bus->id128) != sizeof(hello->id128));
 +	memcpy(hello->id128, bus->id128, sizeof(hello->id128));
 +
-+	conn->meta = kdbus_meta_proc_new();
-+	if (IS_ERR(conn->meta)) {
-+		ret = PTR_ERR(conn->meta);
-+		conn->meta = NULL;
-+		goto exit_unref;
-+	}
-+
 +	/* privileged processes can impersonate somebody else */
 +	if (creds || pids || seclabel) {
-+		ret = kdbus_meta_proc_fake(conn->meta, creds, pids, seclabel);
-+		if (ret < 0)
++		conn->meta_fake = kdbus_meta_fake_new();
++		if (IS_ERR(conn->meta_fake)) {
++			ret = PTR_ERR(conn->meta_fake);
++			conn->meta_fake = NULL;
 +			goto exit_unref;
++		}
 +
-+		conn->faked_meta = true;
++		ret = kdbus_meta_fake_collect(conn->meta_fake,
++					      creds, pids, seclabel);
++		if (ret < 0)
++			goto exit_unref;
 +	} else {
-+		ret = kdbus_meta_proc_collect(conn->meta,
++		conn->meta_proc = kdbus_meta_proc_new();
++		if (IS_ERR(conn->meta_proc)) {
++			ret = PTR_ERR(conn->meta_proc);
++			conn->meta_proc = NULL;
++			goto exit_unref;
++		}
++
++		ret = kdbus_meta_proc_collect(conn->meta_proc,
 +					      KDBUS_ATTACH_CREDS |
 +					      KDBUS_ATTACH_PIDS |
 +					      KDBUS_ATTACH_AUXGROUPS |
@@ -9489,10 +11551,13 @@ index 0000000..9993753
 +		kdbus_user_unref(conn->user);
 +	}
 +
-+	kdbus_meta_proc_unref(conn->meta);
++	kdbus_meta_fake_free(conn->meta_fake);
++	kdbus_meta_proc_unref(conn->meta_proc);
 +	kdbus_match_db_free(conn->match_db);
 +	kdbus_pool_free(conn->pool);
 +	kdbus_ep_unref(conn->ep);
++	path_put(&conn->root_path);
++	put_pid(conn->pid);
 +	put_cred(conn->cred);
 +	kfree(conn->description);
 +	kfree(conn->quota);
@@ -9824,9 +11889,9 @@ index 0000000..9993753
 +}
 +
 +struct kdbus_quota {
-+	uint32_t memory;
-+	uint16_t msgs;
-+	uint8_t fds;
++	u32 memory;
++	u16 msgs;
++	u8 fds;
 +};
 +
 +/**
@@ -9864,7 +11929,7 @@ index 0000000..9993753
 +	 * allocation schemes. Furthermore, resource utilization should be
 +	 * maximized, so only minimal resources stay reserved. However, we need
 +	 * to adapt to a dynamic number of users, as we cannot know how many
-+	 * users will talk to a connection. Therefore, the current allocations
++	 * users will talk to a connection. Therefore, the current allocation
 +	 * works like this:
 +	 * We limit the number of bytes in a destination's pool per sending
 +	 * user. The space available for a user is 33% of the unused pool space
@@ -9906,7 +11971,7 @@ index 0000000..9993753
 +
 +	/*
 +	 * Pool owner slices are un-accounted slices; they can claim more
-+	 * than 50% of the queue. However, the slice we're dealing with here
++	 * than 50% of the queue. However, the slices we're dealing with here
 +	 * belong to the incoming queue, hence they are 'accounted' slices
 +	 * to which the 50%-limit applies.
 +	 */
@@ -9988,9 +12053,9 @@ index 0000000..9993753
 +
 +/* Callers should take the conn_dst lock */
 +static struct kdbus_queue_entry *
-+kdbus_conn_entry_make(struct kdbus_conn *conn_dst,
-+		      const struct kdbus_kmsg *kmsg,
-+		      struct kdbus_user *user)
++kdbus_conn_entry_make(struct kdbus_conn *conn_src,
++		      struct kdbus_conn *conn_dst,
++		      struct kdbus_staging *staging)
 +{
 +	/* The remote connection was disconnected */
 +	if (!kdbus_conn_active(conn_dst))
@@ -10005,10 +12070,10 @@ index 0000000..9993753
 +	 */
 +	if (!kdbus_conn_is_monitor(conn_dst) &&
 +	    !(conn_dst->flags & KDBUS_HELLO_ACCEPT_FD) &&
-+	    kmsg->res && kmsg->res->fds_count > 0)
++	    staging->gaps && staging->gaps->n_fds > 0)
 +		return ERR_PTR(-ECOMM);
 +
-+	return kdbus_queue_entry_new(conn_dst, kmsg, user);
++	return kdbus_queue_entry_new(conn_src, conn_dst, staging);
 +}
 +
 +/*
@@ -10017,12 +12082,11 @@ index 0000000..9993753
 + * The connection's queue will never get to see it.
 + */
 +static int kdbus_conn_entry_sync_attach(struct kdbus_conn *conn_dst,
-+					const struct kdbus_kmsg *kmsg,
++					struct kdbus_staging *staging,
 +					struct kdbus_reply *reply_wake)
 +{
 +	struct kdbus_queue_entry *entry;
-+	int remote_ret;
-+	int ret = 0;
++	int remote_ret, ret = 0;
 +
 +	mutex_lock(&reply_wake->reply_dst->lock);
 +
@@ -10031,8 +12095,8 @@ index 0000000..9993753
 +	 * entry and attach it to the reply object
 +	 */
 +	if (reply_wake->waiting) {
-+		entry = kdbus_conn_entry_make(conn_dst, kmsg,
-+					      reply_wake->reply_src->user);
++		entry = kdbus_conn_entry_make(reply_wake->reply_src, conn_dst,
++					      staging);
 +		if (IS_ERR(entry))
 +			ret = PTR_ERR(entry);
 +		else
@@ -10073,23 +12137,24 @@ index 0000000..9993753
 + * kdbus_conn_entry_insert() - enqueue a message into the receiver's pool
 + * @conn_src:		The sending connection
 + * @conn_dst:		The connection to queue into
-+ * @kmsg:		The kmsg to queue
++ * @staging:		Message to send
 + * @reply:		The reply tracker to attach to the queue entry
++ * @name:		Destination name this msg is sent to, or NULL
 + *
 + * Return: 0 on success. negative error otherwise.
 + */
 +int kdbus_conn_entry_insert(struct kdbus_conn *conn_src,
 +			    struct kdbus_conn *conn_dst,
-+			    const struct kdbus_kmsg *kmsg,
-+			    struct kdbus_reply *reply)
++			    struct kdbus_staging *staging,
++			    struct kdbus_reply *reply,
++			    const struct kdbus_name_entry *name)
 +{
 +	struct kdbus_queue_entry *entry;
 +	int ret;
 +
 +	kdbus_conn_lock2(conn_src, conn_dst);
 +
-+	entry = kdbus_conn_entry_make(conn_dst, kmsg,
-+				      conn_src ? conn_src->user : NULL);
++	entry = kdbus_conn_entry_make(conn_src, conn_dst, staging);
 +	if (IS_ERR(entry)) {
 +		ret = PTR_ERR(entry);
 +		goto exit_unlock;
@@ -10101,6 +12166,14 @@ index 0000000..9993753
 +			schedule_delayed_work(&conn_src->work, 0);
 +	}
 +
++	/*
++	 * Record the sequence number of the registered name; it will
++	 * be remembered by the queue, in case messages addressed to a
++	 * name need to be moved from or to an activator.
++	 */
++	if (name)
++		entry->dst_name_id = name->name_id;
++
 +	kdbus_queue_entry_enqueue(entry, reply);
 +	wake_up_interruptible(&conn_dst->wait);
 +
@@ -10233,22 +12306,18 @@ index 0000000..9993753
 +}
 +
 +static int kdbus_pin_dst(struct kdbus_bus *bus,
-+			 struct kdbus_kmsg *kmsg,
++			 struct kdbus_staging *staging,
 +			 struct kdbus_name_entry **out_name,
 +			 struct kdbus_conn **out_dst)
 +{
-+	struct kdbus_msg_resources *res = kmsg->res;
++	const struct kdbus_msg *msg = staging->msg;
 +	struct kdbus_name_entry *name = NULL;
 +	struct kdbus_conn *dst = NULL;
-+	struct kdbus_msg *msg = &kmsg->msg;
 +	int ret;
 +
-+	if (WARN_ON(!res))
-+		return -EINVAL;
-+
 +	lockdep_assert_held(&bus->name_registry->rwlock);
 +
-+	if (!res->dst_name) {
++	if (!staging->dst_name) {
 +		dst = kdbus_bus_find_conn_by_id(bus, msg->dst_id);
 +		if (!dst)
 +			return -ENXIO;
@@ -10259,7 +12328,7 @@ index 0000000..9993753
 +		}
 +	} else {
 +		name = kdbus_name_lookup_unlocked(bus->name_registry,
-+						  res->dst_name);
++						  staging->dst_name);
 +		if (!name)
 +			return -ESRCH;
 +
@@ -10285,13 +12354,6 @@ index 0000000..9993753
 +			ret = -EADDRNOTAVAIL;
 +			goto error;
 +		}
-+
-+		/*
-+		 * Record the sequence number of the registered name; it will
-+		 * be passed on to the queue, in case messages addressed to a
-+		 * name need to be moved from or to an activator.
-+		 */
-+		kmsg->dst_name_id = name->name_id;
 +	}
 +
 +	*out_name = name;
@@ -10303,17 +12365,19 @@ index 0000000..9993753
 +	return ret;
 +}
 +
-+static int kdbus_conn_reply(struct kdbus_conn *src, struct kdbus_kmsg *kmsg)
++static int kdbus_conn_reply(struct kdbus_conn *src,
++			    struct kdbus_staging *staging)
 +{
++	const struct kdbus_msg *msg = staging->msg;
 +	struct kdbus_name_entry *name = NULL;
 +	struct kdbus_reply *reply, *wake = NULL;
 +	struct kdbus_conn *dst = NULL;
 +	struct kdbus_bus *bus = src->ep->bus;
 +	int ret;
 +
-+	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
-+	    WARN_ON(kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) ||
-+	    WARN_ON(kmsg->msg.flags & KDBUS_MSG_SIGNAL))
++	if (WARN_ON(msg->dst_id == KDBUS_DST_ID_BROADCAST) ||
++	    WARN_ON(msg->flags & KDBUS_MSG_EXPECT_REPLY) ||
++	    WARN_ON(msg->flags & KDBUS_MSG_SIGNAL))
 +		return -EINVAL;
 +
 +	/* name-registry must be locked for lookup *and* collecting data */
@@ -10321,12 +12385,12 @@ index 0000000..9993753
 +
 +	/* find and pin destination */
 +
-+	ret = kdbus_pin_dst(bus, kmsg, &name, &dst);
++	ret = kdbus_pin_dst(bus, staging, &name, &dst);
 +	if (ret < 0)
 +		goto exit;
 +
 +	mutex_lock(&dst->lock);
-+	reply = kdbus_reply_find(src, dst, kmsg->msg.cookie_reply);
++	reply = kdbus_reply_find(src, dst, msg->cookie_reply);
 +	if (reply) {
 +		if (reply->sync)
 +			wake = kdbus_reply_ref(reply);
@@ -10339,20 +12403,14 @@ index 0000000..9993753
 +		goto exit;
 +	}
 +
-+	/* attach metadata */
-+
-+	ret = kdbus_kmsg_collect_metadata(kmsg, src, dst);
-+	if (ret < 0)
-+		goto exit;
-+
 +	/* send message */
 +
-+	kdbus_bus_eavesdrop(bus, src, kmsg);
++	kdbus_bus_eavesdrop(bus, src, staging);
 +
 +	if (wake)
-+		ret = kdbus_conn_entry_sync_attach(dst, kmsg, wake);
++		ret = kdbus_conn_entry_sync_attach(dst, staging, wake);
 +	else
-+		ret = kdbus_conn_entry_insert(src, dst, kmsg, NULL);
++		ret = kdbus_conn_entry_insert(src, dst, staging, NULL, name);
 +
 +exit:
 +	up_read(&bus->name_registry->rwlock);
@@ -10362,24 +12420,25 @@ index 0000000..9993753
 +}
 +
 +static struct kdbus_reply *kdbus_conn_call(struct kdbus_conn *src,
-+					   struct kdbus_kmsg *kmsg,
++					   struct kdbus_staging *staging,
 +					   ktime_t exp)
 +{
++	const struct kdbus_msg *msg = staging->msg;
 +	struct kdbus_name_entry *name = NULL;
 +	struct kdbus_reply *wait = NULL;
 +	struct kdbus_conn *dst = NULL;
 +	struct kdbus_bus *bus = src->ep->bus;
 +	int ret;
 +
-+	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
-+	    WARN_ON(kmsg->msg.flags & KDBUS_MSG_SIGNAL) ||
-+	    WARN_ON(!(kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY)))
++	if (WARN_ON(msg->dst_id == KDBUS_DST_ID_BROADCAST) ||
++	    WARN_ON(msg->flags & KDBUS_MSG_SIGNAL) ||
++	    WARN_ON(!(msg->flags & KDBUS_MSG_EXPECT_REPLY)))
 +		return ERR_PTR(-EINVAL);
 +
 +	/* resume previous wait-context, if available */
 +
 +	mutex_lock(&src->lock);
-+	wait = kdbus_reply_find(NULL, src, kmsg->msg.cookie);
++	wait = kdbus_reply_find(NULL, src, msg->cookie);
 +	if (wait) {
 +		if (wait->interrupted) {
 +			kdbus_reply_ref(wait);
@@ -10401,7 +12460,7 @@ index 0000000..9993753
 +
 +	/* find and pin destination */
 +
-+	ret = kdbus_pin_dst(bus, kmsg, &name, &dst);
++	ret = kdbus_pin_dst(bus, staging, &name, &dst);
 +	if (ret < 0)
 +		goto exit;
 +
@@ -10410,24 +12469,18 @@ index 0000000..9993753
 +		goto exit;
 +	}
 +
-+	wait = kdbus_reply_new(dst, src, &kmsg->msg, name, true);
++	wait = kdbus_reply_new(dst, src, msg, name, true);
 +	if (IS_ERR(wait)) {
 +		ret = PTR_ERR(wait);
 +		wait = NULL;
 +		goto exit;
 +	}
 +
-+	/* attach metadata */
-+
-+	ret = kdbus_kmsg_collect_metadata(kmsg, src, dst);
-+	if (ret < 0)
-+		goto exit;
-+
 +	/* send message */
 +
-+	kdbus_bus_eavesdrop(bus, src, kmsg);
++	kdbus_bus_eavesdrop(bus, src, staging);
 +
-+	ret = kdbus_conn_entry_insert(src, dst, kmsg, wait);
++	ret = kdbus_conn_entry_insert(src, dst, staging, wait, name);
 +	if (ret < 0)
 +		goto exit;
 +
@@ -10443,18 +12496,20 @@ index 0000000..9993753
 +	return wait;
 +}
 +
-+static int kdbus_conn_unicast(struct kdbus_conn *src, struct kdbus_kmsg *kmsg)
++static int kdbus_conn_unicast(struct kdbus_conn *src,
++			      struct kdbus_staging *staging)
 +{
++	const struct kdbus_msg *msg = staging->msg;
 +	struct kdbus_name_entry *name = NULL;
 +	struct kdbus_reply *wait = NULL;
 +	struct kdbus_conn *dst = NULL;
 +	struct kdbus_bus *bus = src->ep->bus;
-+	bool is_signal = (kmsg->msg.flags & KDBUS_MSG_SIGNAL);
++	bool is_signal = (msg->flags & KDBUS_MSG_SIGNAL);
 +	int ret = 0;
 +
-+	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
-+	    WARN_ON(!(kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) &&
-+		    kmsg->msg.cookie_reply != 0))
++	if (WARN_ON(msg->dst_id == KDBUS_DST_ID_BROADCAST) ||
++	    WARN_ON(!(msg->flags & KDBUS_MSG_EXPECT_REPLY) &&
++		    msg->cookie_reply != 0))
 +		return -EINVAL;
 +
 +	/* name-registry must be locked for lookup *and* collecting data */
@@ -10462,23 +12517,23 @@ index 0000000..9993753
 +
 +	/* find and pin destination */
 +
-+	ret = kdbus_pin_dst(bus, kmsg, &name, &dst);
++	ret = kdbus_pin_dst(bus, staging, &name, &dst);
 +	if (ret < 0)
 +		goto exit;
 +
 +	if (is_signal) {
 +		/* like broadcasts we eavesdrop even if the msg is dropped */
-+		kdbus_bus_eavesdrop(bus, src, kmsg);
++		kdbus_bus_eavesdrop(bus, src, staging);
 +
 +		/* drop silently if peer is not interested or not privileged */
-+		if (!kdbus_match_db_match_kmsg(dst->match_db, src, kmsg) ||
++		if (!kdbus_match_db_match_msg(dst->match_db, src, staging) ||
 +		    !kdbus_conn_policy_talk(dst, NULL, src))
 +			goto exit;
 +	} else if (!kdbus_conn_policy_talk(src, current_cred(), dst)) {
 +		ret = -EPERM;
 +		goto exit;
-+	} else if (kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) {
-+		wait = kdbus_reply_new(dst, src, &kmsg->msg, name, false);
++	} else if (msg->flags & KDBUS_MSG_EXPECT_REPLY) {
++		wait = kdbus_reply_new(dst, src, msg, name, false);
 +		if (IS_ERR(wait)) {
 +			ret = PTR_ERR(wait);
 +			wait = NULL;
@@ -10486,18 +12541,12 @@ index 0000000..9993753
 +		}
 +	}
 +
-+	/* attach metadata */
-+
-+	ret = kdbus_kmsg_collect_metadata(kmsg, src, dst);
-+	if (ret < 0)
-+		goto exit;
-+
 +	/* send message */
 +
 +	if (!is_signal)
-+		kdbus_bus_eavesdrop(bus, src, kmsg);
++		kdbus_bus_eavesdrop(bus, src, staging);
 +
-+	ret = kdbus_conn_entry_insert(src, dst, kmsg, wait);
++	ret = kdbus_conn_entry_insert(src, dst, staging, wait, name);
 +	if (ret < 0 && !is_signal)
 +		goto exit;
 +
@@ -10567,7 +12616,7 @@ index 0000000..9993753
 +			continue;
 +
 +		if (!(conn_dst->flags & KDBUS_HELLO_ACCEPT_FD) &&
-+		    e->msg_res && e->msg_res->fds_count > 0) {
++		    e->gaps && e->gaps->n_fds > 0) {
 +			kdbus_conn_lost_message(conn_dst);
 +			kdbus_queue_entry_free(e);
 +			continue;
@@ -10751,19 +12800,16 @@ index 0000000..9993753
 + *					  receive a given kernel notification
 + * @conn:		Connection
 + * @conn_creds:		Credentials of @conn to use for policy check
-+ * @kmsg:		The message carrying the notification
++ * @msg:		Notification message
 + *
-+ * This checks whether @conn is allowed to see the kernel notification @kmsg.
++ * This checks whether @conn is allowed to see the kernel notification.
 + *
 + * Return: true if allowed, false if not.
 + */
 +bool kdbus_conn_policy_see_notification(struct kdbus_conn *conn,
 +					const struct cred *conn_creds,
-+					const struct kdbus_kmsg *kmsg)
++					const struct kdbus_msg *msg)
 +{
-+	if (WARN_ON(kmsg->msg.src_id != KDBUS_SRC_ID_KERNEL))
-+		return false;
-+
 +	/*
 +	 * Depending on the notification type, broadcasted kernel notifications
 +	 * have to be filtered:
@@ -10776,12 +12822,12 @@ index 0000000..9993753
 +	 *     broadcast to everyone, to allow tracking peers.
 +	 */
 +
-+	switch (kmsg->notify_type) {
++	switch (msg->items[0].type) {
 +	case KDBUS_ITEM_NAME_ADD:
 +	case KDBUS_ITEM_NAME_REMOVE:
 +	case KDBUS_ITEM_NAME_CHANGE:
 +		return kdbus_conn_policy_see_name(conn, conn_creds,
-+						  kmsg->notify_name);
++					msg->items[0].name_change.name);
 +
 +	case KDBUS_ITEM_ID_ADD:
 +	case KDBUS_ITEM_ID_REMOVE:
@@ -10789,7 +12835,7 @@ index 0000000..9993753
 +
 +	default:
 +		WARN(1, "Invalid type for notification broadcast: %llu\n",
-+		     (unsigned long long)kmsg->notify_type);
++		     (unsigned long long)msg->items[0].type);
 +		return false;
 +	}
 +}
@@ -10927,13 +12973,14 @@ index 0000000..9993753
 +	struct kdbus_pool_slice *slice = NULL;
 +	struct kdbus_name_entry *entry = NULL;
 +	struct kdbus_conn *owner_conn = NULL;
++	struct kdbus_item *meta_items = NULL;
 +	struct kdbus_info info = {};
 +	struct kdbus_cmd_info *cmd;
 +	struct kdbus_bus *bus = conn->ep->bus;
-+	struct kvec kvec;
-+	size_t meta_size;
++	struct kvec kvec[3];
++	size_t meta_size, cnt = 0;
 +	const char *name;
-+	u64 attach_flags;
++	u64 attach_flags, size = 0;
 +	int ret;
 +
 +	struct kdbus_arg argv[] = {
@@ -10983,10 +13030,6 @@ index 0000000..9993753
 +		goto exit;
 +	}
 +
-+	info.id = owner_conn->id;
-+	info.flags = owner_conn->flags;
-+	kdbus_kvec_set(&kvec, &info, sizeof(info), &info.size);
-+
 +	attach_flags &= atomic64_read(&owner_conn->attach_flags_send);
 +
 +	conn_meta = kdbus_meta_conn_new();
@@ -10996,32 +13039,35 @@ index 0000000..9993753
 +		goto exit;
 +	}
 +
-+	ret = kdbus_meta_conn_collect(conn_meta, NULL, owner_conn,
-+				      attach_flags);
++	ret = kdbus_meta_conn_collect(conn_meta, owner_conn, 0, attach_flags);
 +	if (ret < 0)
 +		goto exit;
 +
-+	ret = kdbus_meta_export_prepare(owner_conn->meta, conn_meta,
-+					&attach_flags, &meta_size);
++	ret = kdbus_meta_emit(owner_conn->meta_proc, owner_conn->meta_fake,
++			      conn_meta, conn, attach_flags,
++			      &meta_items, &meta_size);
 +	if (ret < 0)
 +		goto exit;
 +
-+	slice = kdbus_pool_slice_alloc(conn->pool,
-+				       info.size + meta_size, false);
++	info.id = owner_conn->id;
++	info.flags = owner_conn->flags;
++
++	kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &size);
++	if (meta_size > 0) {
++		kdbus_kvec_set(&kvec[cnt++], meta_items, meta_size, &size);
++		cnt += !!kdbus_kvec_pad(&kvec[cnt], &size);
++	}
++
++	info.size = size;
++
++	slice = kdbus_pool_slice_alloc(conn->pool, size, false);
 +	if (IS_ERR(slice)) {
 +		ret = PTR_ERR(slice);
 +		slice = NULL;
 +		goto exit;
 +	}
 +
-+	ret = kdbus_meta_export(owner_conn->meta, conn_meta, attach_flags,
-+				slice, sizeof(info), &meta_size);
-+	if (ret < 0)
-+		goto exit;
-+
-+	info.size += meta_size;
-+
-+	ret = kdbus_pool_slice_copy_kvec(slice, 0, &kvec, 1, sizeof(info));
++	ret = kdbus_pool_slice_copy_kvec(slice, 0, kvec, cnt, size);
 +	if (ret < 0)
 +		goto exit;
 +
@@ -11039,6 +13085,7 @@ index 0000000..9993753
 +exit:
 +	up_read(&bus->name_registry->rwlock);
 +	kdbus_pool_slice_release(slice);
++	kfree(meta_items);
 +	kdbus_meta_conn_unref(conn_meta);
 +	kdbus_conn_unref(owner_conn);
 +	return kdbus_args_clear(&args, ret);
@@ -11053,7 +13100,6 @@ index 0000000..9993753
 + */
 +int kdbus_cmd_update(struct kdbus_conn *conn, void __user *argp)
 +{
-+	struct kdbus_bus *bus = conn->ep->bus;
 +	struct kdbus_item *item_policy;
 +	u64 *item_attach_send = NULL;
 +	u64 *item_attach_recv = NULL;
@@ -11094,11 +13140,6 @@ index 0000000..9993753
 +						  &attach_send);
 +		if (ret < 0)
 +			goto exit;
-+
-+		if (bus->attach_flags_req & ~attach_send) {
-+			ret = -EINVAL;
-+			goto exit;
-+		}
 +	}
 +
 +	if (item_attach_recv) {
@@ -11151,10 +13192,12 @@ index 0000000..9993753
 +int kdbus_cmd_send(struct kdbus_conn *conn, struct file *f, void __user *argp)
 +{
 +	struct kdbus_cmd_send *cmd;
-+	struct kdbus_kmsg *kmsg = NULL;
++	struct kdbus_staging *staging = NULL;
++	struct kdbus_msg *msg = NULL;
 +	struct file *cancel_fd = NULL;
-+	int ret;
++	int ret, ret2;
 +
++	/* command arguments */
 +	struct kdbus_arg argv[] = {
 +		{ .type = KDBUS_ITEM_NEGOTIATE },
 +		{ .type = KDBUS_ITEM_CANCEL_FD },
@@ -11166,12 +13209,48 @@ index 0000000..9993753
 +		.argc = ARRAY_SIZE(argv),
 +	};
 +
++	/* message arguments */
++	struct kdbus_arg msg_argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_PAYLOAD_VEC, .multiple = true },
++		{ .type = KDBUS_ITEM_PAYLOAD_MEMFD, .multiple = true },
++		{ .type = KDBUS_ITEM_FDS },
++		{ .type = KDBUS_ITEM_BLOOM_FILTER },
++		{ .type = KDBUS_ITEM_DST_NAME },
++	};
++	struct kdbus_args msg_args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
++				 KDBUS_MSG_EXPECT_REPLY |
++				 KDBUS_MSG_NO_AUTO_START |
++				 KDBUS_MSG_SIGNAL,
++		.argv = msg_argv,
++		.argc = ARRAY_SIZE(msg_argv),
++	};
++
 +	if (!kdbus_conn_is_ordinary(conn))
 +		return -EOPNOTSUPP;
 +
++	/* make sure to parse both, @cmd and @msg on negotiation */
++
 +	ret = kdbus_args_parse(&args, argp, &cmd);
-+	if (ret != 0)
-+		return ret;
++	if (ret < 0)
++		goto exit;
++	else if (ret > 0 && !cmd->msg_address) /* negotiation without msg */
++		goto exit;
++
++	ret2 = kdbus_args_parse_msg(&msg_args, KDBUS_PTR(cmd->msg_address),
++				    &msg);
++	if (ret2 < 0) { /* cannot parse message */
++		ret = ret2;
++		goto exit;
++	} else if (ret2 > 0 && !ret) { /* msg-negot implies cmd-negot */
++		ret = -EINVAL;
++		goto exit;
++	} else if (ret > 0) { /* negotiation */
++		goto exit;
++	}
++
++	/* here we parsed both, @cmd and @msg, and neither wants negotiation */
 +
 +	cmd->reply.return_flags = 0;
 +	kdbus_pool_publish_empty(conn->pool, &cmd->reply.offset,
@@ -11190,23 +13269,30 @@ index 0000000..9993753
 +		}
 +	}
 +
-+	kmsg = kdbus_kmsg_new_from_cmd(conn, cmd);
-+	if (IS_ERR(kmsg)) {
-+		ret = PTR_ERR(kmsg);
-+		kmsg = NULL;
++	/* patch-in the source of this message */
++	if (msg->src_id > 0 && msg->src_id != conn->id) {
++		ret = -EINVAL;
++		goto exit;
++	}
++	msg->src_id = conn->id;
++
++	staging = kdbus_staging_new_user(conn->ep->bus, cmd, msg);
++	if (IS_ERR(staging)) {
++		ret = PTR_ERR(staging);
++		staging = NULL;
 +		goto exit;
 +	}
 +
-+	if (kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) {
++	if (msg->dst_id == KDBUS_DST_ID_BROADCAST) {
 +		down_read(&conn->ep->bus->name_registry->rwlock);
-+		kdbus_bus_broadcast(conn->ep->bus, conn, kmsg);
++		kdbus_bus_broadcast(conn->ep->bus, conn, staging);
 +		up_read(&conn->ep->bus->name_registry->rwlock);
 +	} else if (cmd->flags & KDBUS_SEND_SYNC_REPLY) {
 +		struct kdbus_reply *r;
 +		ktime_t exp;
 +
-+		exp = ns_to_ktime(kmsg->msg.timeout_ns);
-+		r = kdbus_conn_call(conn, kmsg, exp);
++		exp = ns_to_ktime(msg->timeout_ns);
++		r = kdbus_conn_call(conn, staging, exp);
 +		if (IS_ERR(r)) {
 +			ret = PTR_ERR(r);
 +			goto exit;
@@ -11216,13 +13302,13 @@ index 0000000..9993753
 +		kdbus_reply_unref(r);
 +		if (ret < 0)
 +			goto exit;
-+	} else if ((kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) ||
-+		   kmsg->msg.cookie_reply == 0) {
-+		ret = kdbus_conn_unicast(conn, kmsg);
++	} else if ((msg->flags & KDBUS_MSG_EXPECT_REPLY) ||
++		   msg->cookie_reply == 0) {
++		ret = kdbus_conn_unicast(conn, staging);
 +		if (ret < 0)
 +			goto exit;
 +	} else {
-+		ret = kdbus_conn_reply(conn, kmsg);
++		ret = kdbus_conn_reply(conn, staging);
 +		if (ret < 0)
 +			goto exit;
 +	}
@@ -11233,7 +13319,8 @@ index 0000000..9993753
 +exit:
 +	if (cancel_fd)
 +		fput(cancel_fd);
-+	kdbus_kmsg_free(kmsg);
++	kdbus_staging_free(staging);
++	ret = kdbus_args_clear(&msg_args, ret);
 +	return kdbus_args_clear(&args, ret);
 +}
 +
@@ -11396,10 +13483,10 @@ index 0000000..9993753
 +}
 diff --git a/ipc/kdbus/connection.h b/ipc/kdbus/connection.h
 new file mode 100644
-index 0000000..d1ffe90
+index 0000000..5ee864e
 --- /dev/null
 +++ b/ipc/kdbus/connection.h
-@@ -0,0 +1,257 @@
+@@ -0,0 +1,261 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -11433,7 +13520,7 @@ index 0000000..d1ffe90
 +					 KDBUS_HELLO_MONITOR)
 +
 +struct kdbus_quota;
-+struct kdbus_kmsg;
++struct kdbus_staging;
 +
 +/**
 + * struct kdbus_conn - connection to a bus
@@ -11456,11 +13543,13 @@ index 0000000..d1ffe90
 + * @work:		Delayed work to handle timeouts
 + *			activator for
 + * @match_db:		Subscription filter to broadcast messages
-+ * @meta:		Active connection creator's metadata/credentials,
-+ *			either from the handle or from HELLO
++ * @meta_proc:		Process metadata of connection creator, or NULL
++ * @meta_fake:		Faked metadata, or NULL
 + * @pool:		The user's buffer to receive messages
 + * @user:		Owner of the connection
 + * @cred:		The credentials of the connection at creation time
++ * @pid:		Pid at creation time
++ * @root_path:		Root path at creation time
 + * @name_count:		Number of owned well-known names
 + * @request_count:	Number of pending requests issued by this
 + *			connection that are waiting for replies from
@@ -11474,7 +13563,6 @@ index 0000000..d1ffe90
 + * @names_list:		List of well-known names
 + * @names_queue_list:	Well-known names this connection waits for
 + * @privileged:		Whether this connection is privileged on the bus
-+ * @faked_meta:		Whether the metadata was faked on HELLO
 + */
 +struct kdbus_conn {
 +	struct kref kref;
@@ -11495,10 +13583,13 @@ index 0000000..d1ffe90
 +	struct list_head reply_list;
 +	struct delayed_work work;
 +	struct kdbus_match_db *match_db;
-+	struct kdbus_meta_proc *meta;
++	struct kdbus_meta_proc *meta_proc;
++	struct kdbus_meta_fake *meta_fake;
 +	struct kdbus_pool *pool;
 +	struct kdbus_user *user;
 +	const struct cred *cred;
++	struct pid *pid;
++	struct path root_path;
 +	atomic_t name_count;
 +	atomic_t request_count;
 +	atomic_t lost_count;
@@ -11514,7 +13605,6 @@ index 0000000..d1ffe90
 +	struct list_head names_queue_list;
 +
 +	bool privileged:1;
-+	bool faked_meta:1;
 +};
 +
 +struct kdbus_conn *kdbus_conn_ref(struct kdbus_conn *conn);
@@ -11531,8 +13621,9 @@ index 0000000..d1ffe90
 +void kdbus_conn_lost_message(struct kdbus_conn *c);
 +int kdbus_conn_entry_insert(struct kdbus_conn *conn_src,
 +			    struct kdbus_conn *conn_dst,
-+			    const struct kdbus_kmsg *kmsg,
-+			    struct kdbus_reply *reply);
++			    struct kdbus_staging *staging,
++			    struct kdbus_reply *reply,
++			    const struct kdbus_name_entry *name);
 +void kdbus_conn_move_messages(struct kdbus_conn *conn_dst,
 +			      struct kdbus_conn *conn_src,
 +			      u64 name_id);
@@ -11549,7 +13640,7 @@ index 0000000..d1ffe90
 +					 const char *name);
 +bool kdbus_conn_policy_see_notification(struct kdbus_conn *conn,
 +					const struct cred *curr_creds,
-+					const struct kdbus_kmsg *kmsg);
++					const struct kdbus_msg *msg);
 +
 +/* command dispatcher */
 +struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, bool privileged,
@@ -12044,7 +14135,7 @@ index 0000000..447a2bd
 +#endif
 diff --git a/ipc/kdbus/endpoint.c b/ipc/kdbus/endpoint.c
 new file mode 100644
-index 0000000..9a95a5e
+index 0000000..977964d
 --- /dev/null
 +++ b/ipc/kdbus/endpoint.c
 @@ -0,0 +1,275 @@
@@ -12128,7 +14219,7 @@ index 0000000..9a95a5e
 + * @gid:		The gid of the node
 + * @is_custom:		Whether this is a custom endpoint
 + *
-+ * This function will create a new enpoint with the given
++ * This function will create a new endpoint with the given
 + * name and properties for a given bus.
 + *
 + * Return: a new kdbus_ep on success, ERR_PTR on failure.
@@ -12325,7 +14416,7 @@ index 0000000..9a95a5e
 +}
 diff --git a/ipc/kdbus/endpoint.h b/ipc/kdbus/endpoint.h
 new file mode 100644
-index 0000000..d31954b
+index 0000000..bc1b94a
 --- /dev/null
 +++ b/ipc/kdbus/endpoint.h
 @@ -0,0 +1,67 @@
@@ -12356,7 +14447,7 @@ index 0000000..d31954b
 +struct kdbus_user;
 +
 +/**
-+ * struct kdbus_ep - enpoint to access a bus
++ * struct kdbus_ep - endpoint to access a bus
 + * @node:		The kdbus node
 + * @lock:		Endpoint data lock
 + * @bus:		Bus behind this endpoint
@@ -12364,7 +14455,7 @@ index 0000000..d31954b
 + * @policy_db:		Uploaded policy
 + * @conn_list:		Connections of this endpoint
 + *
-+ * An enpoint offers access to a bus; the default endpoint node name is "bus".
++ * An endpoint offers access to a bus; the default endpoint node name is "bus".
 + * Additional custom endpoints to the same bus can be created and they can
 + * carry their own policies/filters.
 + */
@@ -12398,10 +14489,10 @@ index 0000000..d31954b
 +#endif
 diff --git a/ipc/kdbus/fs.c b/ipc/kdbus/fs.c
 new file mode 100644
-index 0000000..d01f33b
+index 0000000..09c4809
 --- /dev/null
 +++ b/ipc/kdbus/fs.c
-@@ -0,0 +1,510 @@
+@@ -0,0 +1,508 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -12478,7 +14569,7 @@ index 0000000..d01f33b
 +	 * closest node to that position and cannot use our node pointer. This
 +	 * means iterating the rb-tree to find the closest match and start over
 +	 * from there.
-+	 * Note that hash values are not neccessarily unique. Therefore, llseek
++	 * Note that hash values are not necessarily unique. Therefore, llseek
 +	 * is not guaranteed to seek to the same node that you got when you
 +	 * retrieved the position. Seeking to 0, 1, 2 and >=INT_MAX is safe,
 +	 * though. We could use the inode-number as position, but this would
@@ -12729,9 +14820,7 @@ index 0000000..d01f33b
 +	}
 +
 +	kill_anon_super(sb);
-+
-+	if (domain)
-+		kdbus_domain_unref(domain);
++	kdbus_domain_unref(domain);
 +}
 +
 +static int fs_super_set(struct super_block *sb, void *data)
@@ -12948,10 +15037,10 @@ index 0000000..62f7d6a
 +#endif
 diff --git a/ipc/kdbus/handle.c b/ipc/kdbus/handle.c
 new file mode 100644
-index 0000000..0752799
+index 0000000..e0e06b0
 --- /dev/null
 +++ b/ipc/kdbus/handle.c
-@@ -0,0 +1,702 @@
+@@ -0,0 +1,709 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -13080,6 +15169,7 @@ index 0000000..0752799
 +/**
 + * __kdbus_args_parse() - parse payload of kdbus command
 + * @args:		object to parse data into
++ * @is_cmd:		whether this is a command or msg payload
 + * @argp:		user-space location of command payload to parse
 + * @type_size:		overall size of command payload to parse
 + * @items_offset:	offset of items array in command payload
@@ -13094,10 +15184,14 @@ index 0000000..0752799
 + * If this function succeeded, you must call kdbus_args_clear() to release
 + * allocated resources before destroying @args.
 + *
++ * This can also be used to import kdbus_msg objects. In that case, @is_cmd must
++ * be set to 'false' and the 'return_flags' field will not be touched (as it
++ * doesn't exist on kdbus_msg).
++ *
 + * Return: On failure a negative error code is returned. Otherwise, 1 is
 + * returned if negotiation was requested, 0 if not.
 + */
-+int __kdbus_args_parse(struct kdbus_args *args, void __user *argp,
++int __kdbus_args_parse(struct kdbus_args *args, bool is_cmd, void __user *argp,
 +		       size_t type_size, size_t items_offset, void **out)
 +{
 +	u64 user_size;
@@ -13127,10 +15221,12 @@ index 0000000..0752799
 +		goto error;
 +	}
 +
-+	args->cmd->return_flags = 0;
++	if (is_cmd)
++		args->cmd->return_flags = 0;
 +	args->user = argp;
 +	args->items = (void *)((u8 *)args->cmd + items_offset);
 +	args->items_size = args->cmd->size - items_offset;
++	args->is_cmd = is_cmd;
 +
 +	if (args->cmd->flags & ~args->allowed_flags) {
 +		ret = -EINVAL;
@@ -13179,8 +15275,8 @@ index 0000000..0752799
 +		return ret;
 +
 +	if (!IS_ERR_OR_NULL(args->cmd)) {
-+		if (put_user(args->cmd->return_flags,
-+			     &args->user->return_flags))
++		if (args->is_cmd && put_user(args->cmd->return_flags,
++					     &args->user->return_flags))
 +			ret = -EFAULT;
 +		if (args->cmd != (void*)args->cmd_buf)
 +			kfree(args->cmd);
@@ -13656,10 +15752,10 @@ index 0000000..0752799
 +};
 diff --git a/ipc/kdbus/handle.h b/ipc/kdbus/handle.h
 new file mode 100644
-index 0000000..13c59d9
+index 0000000..8a36c05
 --- /dev/null
 +++ b/ipc/kdbus/handle.h
-@@ -0,0 +1,90 @@
+@@ -0,0 +1,103 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -13710,6 +15806,7 @@ index 0000000..13c59d9
 + * @cmd_buf:		512 bytes inline buf to avoid kmalloc() on small cmds
 + * @items:		points to item array in @cmd
 + * @items_size:		size of @items in bytes
++ * @is_cmd:		whether this is a command-payload or msg-payload
 + *
 + * This structure is used to parse ioctl command payloads on each invocation.
 + * The ioctl handler has to pre-fill the flags and allowed items before passing
@@ -13730,9 +15827,10 @@ index 0000000..13c59d9
 +
 +	struct kdbus_item *items;
 +	size_t items_size;
++	bool is_cmd : 1;
 +};
 +
-+int __kdbus_args_parse(struct kdbus_args *args, void __user *argp,
++int __kdbus_args_parse(struct kdbus_args *args, bool is_cmd, void __user *argp,
 +		       size_t type_size, size_t items_offset, void **out);
 +int kdbus_args_clear(struct kdbus_args *args, int ret);
 +
@@ -13744,7 +15842,18 @@ index 0000000..13c59d9
 +			     offsetof(struct kdbus_cmd, flags));        \
 +		BUILD_BUG_ON(offsetof(typeof(**(_v)), return_flags) !=  \
 +			     offsetof(struct kdbus_cmd, return_flags)); \
-+		__kdbus_args_parse((_args), (_argp), sizeof(**(_v)),    \
++		__kdbus_args_parse((_args), 1, (_argp), sizeof(**(_v)), \
++				   offsetof(typeof(**(_v)), items),     \
++				   (void **)(_v));                      \
++	})
++
++#define kdbus_args_parse_msg(_args, _argp, _v)                          \
++	({                                                              \
++		BUILD_BUG_ON(offsetof(typeof(**(_v)), size) !=          \
++			     offsetof(struct kdbus_cmd, size));         \
++		BUILD_BUG_ON(offsetof(typeof(**(_v)), flags) !=         \
++			     offsetof(struct kdbus_cmd, flags));        \
++		__kdbus_args_parse((_args), 0, (_argp), sizeof(**(_v)), \
 +				   offsetof(typeof(**(_v)), items),     \
 +				   (void **)(_v));                      \
 +	})
@@ -13752,10 +15861,10 @@ index 0000000..13c59d9
 +#endif
 diff --git a/ipc/kdbus/item.c b/ipc/kdbus/item.c
 new file mode 100644
-index 0000000..1ee72c2
+index 0000000..ce78dba
 --- /dev/null
 +++ b/ipc/kdbus/item.c
-@@ -0,0 +1,333 @@
+@@ -0,0 +1,293 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -13905,6 +16014,7 @@ index 0000000..1ee72c2
 +	case KDBUS_ITEM_ATTACH_FLAGS_SEND:
 +	case KDBUS_ITEM_ATTACH_FLAGS_RECV:
 +	case KDBUS_ITEM_ID:
++	case KDBUS_ITEM_DST_ID:
 +		if (payload_size != sizeof(u64))
 +			return -EINVAL;
 +		break;
@@ -14018,47 +16128,6 @@ index 0000000..1ee72c2
 +	return 0;
 +}
 +
-+static struct kdbus_item *kdbus_items_get(const struct kdbus_item *items,
-+					  size_t items_size,
-+					  unsigned int item_type)
-+{
-+	const struct kdbus_item *iter, *found = NULL;
-+
-+	KDBUS_ITEMS_FOREACH(iter, items, items_size) {
-+		if (iter->type == item_type) {
-+			if (found)
-+				return ERR_PTR(-EEXIST);
-+			found = iter;
-+		}
-+	}
-+
-+	return (struct kdbus_item *)found ? : ERR_PTR(-EBADMSG);
-+}
-+
-+/**
-+ * kdbus_items_get_str() - get string from a list of items
-+ * @items:		The items to walk
-+ * @items_size:		The size of all items
-+ * @item_type:		The item type to look for
-+ *
-+ * This function walks a list of items and searches for items of type
-+ * @item_type. If it finds exactly one such item, @str_ret will be set to
-+ * the .str member of the item.
-+ *
-+ * Return: the string, if the item was found exactly once, ERR_PTR(-EEXIST)
-+ * if the item was found more than once, and ERR_PTR(-EBADMSG) if there was
-+ * no item of the given type.
-+ */
-+const char *kdbus_items_get_str(const struct kdbus_item *items,
-+				size_t items_size,
-+				unsigned int item_type)
-+{
-+	const struct kdbus_item *item;
-+
-+	item = kdbus_items_get(items, items_size, item_type);
-+	return IS_ERR(item) ? ERR_CAST(item) : item->str;
-+}
-+
 +/**
 + * kdbus_item_set() - Set item content
 + * @item:	The item to modify
@@ -14091,10 +16160,10 @@ index 0000000..1ee72c2
 +}
 diff --git a/ipc/kdbus/item.h b/ipc/kdbus/item.h
 new file mode 100644
-index 0000000..bca63b4
+index 0000000..3a7e6cc
 --- /dev/null
 +++ b/ipc/kdbus/item.h
-@@ -0,0 +1,64 @@
+@@ -0,0 +1,61 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -14152,19 +16221,16 @@ index 0000000..bca63b4
 +int kdbus_item_validate_name(const struct kdbus_item *item);
 +int kdbus_item_validate(const struct kdbus_item *item);
 +int kdbus_items_validate(const struct kdbus_item *items, size_t items_size);
-+const char *kdbus_items_get_str(const struct kdbus_item *items,
-+				size_t items_size,
-+				unsigned int item_type);
 +struct kdbus_item *kdbus_item_set(struct kdbus_item *item, u64 type,
 +				  const void *data, size_t len);
 +
 +#endif
 diff --git a/ipc/kdbus/limits.h b/ipc/kdbus/limits.h
 new file mode 100644
-index 0000000..6450f58
+index 0000000..c54925a
 --- /dev/null
 +++ b/ipc/kdbus/limits.h
-@@ -0,0 +1,64 @@
+@@ -0,0 +1,61 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -14186,9 +16252,6 @@ index 0000000..6450f58
 +/* maximum size of message header and items */
 +#define KDBUS_MSG_MAX_SIZE		SZ_8K
 +
-+/* maximum number of message items */
-+#define KDBUS_MSG_MAX_ITEMS		128
-+
 +/* maximum number of memfd items per message */
 +#define KDBUS_MSG_MAX_MEMFD_ITEMS	16
 +
@@ -14351,10 +16414,10 @@ index 0000000..1ad4dc8
 +MODULE_ALIAS_FS(KBUILD_MODNAME "fs");
 diff --git a/ipc/kdbus/match.c b/ipc/kdbus/match.c
 new file mode 100644
-index 0000000..cc083b4
+index 0000000..4ee6a1f
 --- /dev/null
 +++ b/ipc/kdbus/match.c
-@@ -0,0 +1,559 @@
+@@ -0,0 +1,546 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -14423,7 +16486,7 @@ index 0000000..cc083b4
 +
 +/**
 + * struct kdbus_match_rule - a rule appended to a match entry
-+ * @type:		An item type to match agains
++ * @type:		An item type to match against
 + * @bloom_mask:		Bloom mask to match a message's filter against, used
 + *			with KDBUS_ITEM_BLOOM_MASK
 + * @name:		Name to match against, used with KDBUS_ITEM_NAME,
@@ -14435,6 +16498,7 @@ index 0000000..cc083b4
 + *			KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE},
 + *			KDBUS_ITEM_ID_REMOVE
 + * @src_id:		ID to match against, used with KDBUS_ITEM_ID
++ * @dst_id:		Message destination ID, used with KDBUS_ITEM_DST_ID
 + * @rules_entry:	Entry in the entry's rules list
 + */
 +struct kdbus_match_rule {
@@ -14447,6 +16511,7 @@ index 0000000..cc083b4
 +			u64 new_id;
 +		};
 +		u64 src_id;
++		u64 dst_id;
 +	};
 +	struct list_head rules_entry;
 +};
@@ -14469,6 +16534,7 @@ index 0000000..cc083b4
 +		break;
 +
 +	case KDBUS_ITEM_ID:
++	case KDBUS_ITEM_DST_ID:
 +	case KDBUS_ITEM_ID_ADD:
 +	case KDBUS_ITEM_ID_REMOVE:
 +		break;
@@ -14561,96 +16627,74 @@ index 0000000..cc083b4
 +	return true;
 +}
 +
-+static bool kdbus_match_rules(const struct kdbus_match_entry *entry,
-+			      struct kdbus_conn *conn_src,
-+			      struct kdbus_kmsg *kmsg)
++static bool kdbus_match_rule_conn(const struct kdbus_match_rule *r,
++				  struct kdbus_conn *c,
++				  const struct kdbus_staging *s)
 +{
-+	struct kdbus_match_rule *r;
-+
-+	if (conn_src)
-+		lockdep_assert_held(&conn_src->ep->bus->name_registry->rwlock);
-+
-+	/*
-+	 * Walk all the rules and bail out immediately
-+	 * if any of them is unsatisfied.
-+	 */
-+
-+	list_for_each_entry(r, &entry->rules_list, rules_entry) {
-+		if (conn_src) {
-+			/* messages from userspace */
-+
-+			switch (r->type) {
-+			case KDBUS_ITEM_BLOOM_MASK:
-+				if (!kdbus_match_bloom(kmsg->bloom_filter,
-+						       &r->bloom_mask,
-+						       conn_src))
-+					return false;
-+				break;
-+
-+			case KDBUS_ITEM_ID:
-+				if (r->src_id != conn_src->id &&
-+				    r->src_id != KDBUS_MATCH_ID_ANY)
-+					return false;
-+
-+				break;
-+
-+			case KDBUS_ITEM_NAME:
-+				if (!kdbus_conn_has_name(conn_src, r->name))
-+					return false;
-+
-+				break;
-+
-+			default:
-+				return false;
-+			}
-+		} else {
-+			/* kernel notifications */
-+
-+			if (kmsg->notify_type != r->type)
-+				return false;
-+
-+			switch (r->type) {
-+			case KDBUS_ITEM_ID_ADD:
-+				if (r->new_id != KDBUS_MATCH_ID_ANY &&
-+				    r->new_id != kmsg->notify_new_id)
-+					return false;
++	lockdep_assert_held(&c->ep->bus->name_registry->rwlock);
 +
-+				break;
++	switch (r->type) {
++	case KDBUS_ITEM_BLOOM_MASK:
++		return kdbus_match_bloom(s->bloom_filter, &r->bloom_mask, c);
++	case KDBUS_ITEM_ID:
++		return r->src_id == c->id || r->src_id == KDBUS_MATCH_ID_ANY;
++	case KDBUS_ITEM_DST_ID:
++		return r->dst_id == s->msg->dst_id ||
++		       r->dst_id == KDBUS_MATCH_ID_ANY;
++	case KDBUS_ITEM_NAME:
++		return kdbus_conn_has_name(c, r->name);
++	default:
++		return false;
++	}
++}
 +
-+			case KDBUS_ITEM_ID_REMOVE:
-+				if (r->old_id != KDBUS_MATCH_ID_ANY &&
-+				    r->old_id != kmsg->notify_old_id)
-+					return false;
++static bool kdbus_match_rule_kernel(const struct kdbus_match_rule *r,
++				    const struct kdbus_staging *s)
++{
++	struct kdbus_item *n = s->notify;
 +
-+				break;
++	if (WARN_ON(!n) || n->type != r->type)
++		return false;
 +
-+			case KDBUS_ITEM_NAME_ADD:
-+			case KDBUS_ITEM_NAME_CHANGE:
-+			case KDBUS_ITEM_NAME_REMOVE:
-+				if ((r->old_id != KDBUS_MATCH_ID_ANY &&
-+				     r->old_id != kmsg->notify_old_id) ||
-+				    (r->new_id != KDBUS_MATCH_ID_ANY &&
-+				     r->new_id != kmsg->notify_new_id) ||
-+				    (r->name && kmsg->notify_name &&
-+				     strcmp(r->name, kmsg->notify_name) != 0))
-+					return false;
++	switch (r->type) {
++	case KDBUS_ITEM_ID_ADD:
++		return r->new_id == KDBUS_MATCH_ID_ANY ||
++		       r->new_id == n->id_change.id;
++	case KDBUS_ITEM_ID_REMOVE:
++		return r->old_id == KDBUS_MATCH_ID_ANY ||
++		       r->old_id == n->id_change.id;
++	case KDBUS_ITEM_NAME_ADD:
++	case KDBUS_ITEM_NAME_CHANGE:
++	case KDBUS_ITEM_NAME_REMOVE:
++		return (r->old_id == KDBUS_MATCH_ID_ANY ||
++		        r->old_id == n->name_change.old_id.id) &&
++		       (r->new_id == KDBUS_MATCH_ID_ANY ||
++		        r->new_id == n->name_change.new_id.id) &&
++		       (!r->name || !strcmp(r->name, n->name_change.name));
++	default:
++		return false;
++	}
++}
 +
-+				break;
++static bool kdbus_match_rules(const struct kdbus_match_entry *entry,
++			      struct kdbus_conn *c,
++			      const struct kdbus_staging *s)
++{
++	struct kdbus_match_rule *r;
 +
-+			default:
-+				return false;
-+			}
-+		}
-+	}
++	list_for_each_entry(r, &entry->rules_list, rules_entry)
++		if ((c && !kdbus_match_rule_conn(r, c, s)) ||
++		    (!c && !kdbus_match_rule_kernel(r, s)))
++			return false;
 +
 +	return true;
 +}
 +
 +/**
-+ * kdbus_match_db_match_kmsg() - match a kmsg object agains the database entries
++ * kdbus_match_db_match_msg() - match a msg object agains the database entries
 + * @mdb:		The match database
 + * @conn_src:		The connection object originating the message
-+ * @kmsg:		The kmsg to perform the match on
++ * @staging:		Staging object containing the message to match against
 + *
 + * This function will walk through all the database entries previously uploaded
 + * with kdbus_match_db_add(). As soon as any of them has an all-satisfied rule
@@ -14661,16 +16705,16 @@ index 0000000..cc083b4
 + *
 + * Return: true if there was a matching database entry, false otherwise.
 + */
-+bool kdbus_match_db_match_kmsg(struct kdbus_match_db *mdb,
-+			       struct kdbus_conn *conn_src,
-+			       struct kdbus_kmsg *kmsg)
++bool kdbus_match_db_match_msg(struct kdbus_match_db *mdb,
++			      struct kdbus_conn *conn_src,
++			      const struct kdbus_staging *staging)
 +{
 +	struct kdbus_match_entry *entry;
 +	bool matched = false;
 +
 +	down_read(&mdb->mdb_rwlock);
 +	list_for_each_entry(entry, &mdb->entries_list, list_entry) {
-+		matched = kdbus_match_rules(entry, conn_src, kmsg);
++		matched = kdbus_match_rules(entry, conn_src, staging);
 +		if (matched)
 +			break;
 +	}
@@ -14710,6 +16754,7 @@ index 0000000..cc083b4
 + * KDBUS_ITEM_BLOOM_MASK:	A bloom mask
 + * KDBUS_ITEM_NAME:		A connection's source name
 + * KDBUS_ITEM_ID:		A connection ID
++ * KDBUS_ITEM_DST_ID:		A connection ID
 + * KDBUS_ITEM_NAME_ADD:
 + * KDBUS_ITEM_NAME_REMOVE:
 + * KDBUS_ITEM_NAME_CHANGE:	Well-known name changes, carry
@@ -14721,9 +16766,9 @@ index 0000000..cc083b4
 + * For kdbus_notify_{id,name}_change structs, only the ID and name fields
 + * are looked at when adding an entry. The flags are unused.
 + *
-+ * Also note that KDBUS_ITEM_BLOOM_MASK, KDBUS_ITEM_NAME and KDBUS_ITEM_ID
-+ * are used to match messages from userspace, while the others apply to
-+ * kernel-generated notifications.
++ * Also note that KDBUS_ITEM_BLOOM_MASK, KDBUS_ITEM_NAME, KDBUS_ITEM_ID,
++ * and KDBUS_ITEM_DST_ID are used to match messages from userspace, while the
++ * others apply to kernel-generated notifications.
 + *
 + * Return: >=0 on success, negative error code on failure.
 + */
@@ -14740,6 +16785,7 @@ index 0000000..cc083b4
 +		{ .type = KDBUS_ITEM_BLOOM_MASK, .multiple = true },
 +		{ .type = KDBUS_ITEM_NAME, .multiple = true },
 +		{ .type = KDBUS_ITEM_ID, .multiple = true },
++		{ .type = KDBUS_ITEM_DST_ID, .multiple = true },
 +		{ .type = KDBUS_ITEM_NAME_ADD, .multiple = true },
 +		{ .type = KDBUS_ITEM_NAME_REMOVE, .multiple = true },
 +		{ .type = KDBUS_ITEM_NAME_CHANGE, .multiple = true },
@@ -14822,6 +16868,10 @@ index 0000000..cc083b4
 +			rule->src_id = item->id;
 +			break;
 +
++		case KDBUS_ITEM_DST_ID:
++			rule->dst_id = item->id;
++			break;
++
 +		case KDBUS_ITEM_NAME_ADD:
 +		case KDBUS_ITEM_NAME_REMOVE:
 +		case KDBUS_ITEM_NAME_CHANGE:
@@ -14916,7 +16966,7 @@ index 0000000..cc083b4
 +}
 diff --git a/ipc/kdbus/match.h b/ipc/kdbus/match.h
 new file mode 100644
-index 0000000..ea42929
+index 0000000..ceb492f
 --- /dev/null
 +++ b/ipc/kdbus/match.h
 @@ -0,0 +1,35 @@
@@ -14938,8 +16988,8 @@ index 0000000..ea42929
 +#define __KDBUS_MATCH_H
 +
 +struct kdbus_conn;
-+struct kdbus_kmsg;
 +struct kdbus_match_db;
++struct kdbus_staging;
 +
 +struct kdbus_match_db *kdbus_match_db_new(void);
 +void kdbus_match_db_free(struct kdbus_match_db *db);
@@ -14947,9 +16997,9 @@ index 0000000..ea42929
 +		       struct kdbus_cmd_match *cmd);
 +int kdbus_match_db_remove(struct kdbus_conn *conn,
 +			  struct kdbus_cmd_match *cmd);
-+bool kdbus_match_db_match_kmsg(struct kdbus_match_db *db,
-+			       struct kdbus_conn *conn_src,
-+			       struct kdbus_kmsg *kmsg);
++bool kdbus_match_db_match_msg(struct kdbus_match_db *db,
++			      struct kdbus_conn *conn_src,
++			      const struct kdbus_staging *staging);
 +
 +int kdbus_cmd_match_add(struct kdbus_conn *conn, void __user *argp);
 +int kdbus_cmd_match_remove(struct kdbus_conn *conn, void __user *argp);
@@ -14957,10 +17007,10 @@ index 0000000..ea42929
 +#endif
 diff --git a/ipc/kdbus/message.c b/ipc/kdbus/message.c
 new file mode 100644
-index 0000000..066e816
+index 0000000..3520f45
 --- /dev/null
 +++ b/ipc/kdbus/message.c
-@@ -0,0 +1,640 @@
+@@ -0,0 +1,1040 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -15000,613 +17050,1013 @@ index 0000000..066e816
 +#include "names.h"
 +#include "policy.h"
 +
-+#define KDBUS_KMSG_HEADER_SIZE offsetof(struct kdbus_kmsg, msg)
++static const char * const zeros = "\0\0\0\0\0\0\0";
 +
-+static struct kdbus_msg_resources *kdbus_msg_resources_new(void)
++static struct kdbus_gaps *kdbus_gaps_new(size_t n_memfds, size_t n_fds)
 +{
-+	struct kdbus_msg_resources *r;
++	size_t size_offsets, size_memfds, size_fds, size;
++	struct kdbus_gaps *gaps;
 +
-+	r = kzalloc(sizeof(*r), GFP_KERNEL);
-+	if (!r)
++	size_offsets = n_memfds * sizeof(*gaps->memfd_offsets);
++	size_memfds = n_memfds * sizeof(*gaps->memfd_files);
++	size_fds = n_fds * sizeof(*gaps->fd_files);
++	size = sizeof(*gaps) + size_offsets + size_memfds + size_fds;
++
++	gaps = kzalloc(size, GFP_KERNEL);
++	if (!gaps)
 +		return ERR_PTR(-ENOMEM);
 +
-+	kref_init(&r->kref);
++	kref_init(&gaps->kref);
++	gaps->n_memfds = 0; /* we reserve n_memfds, but don't enforce them */
++	gaps->memfd_offsets = (void *)(gaps + 1);
++	gaps->memfd_files = (void *)((u8 *)gaps->memfd_offsets + size_offsets);
++	gaps->n_fds = 0; /* we reserve n_fds, but don't enforce them */
++	gaps->fd_files = (void *)((u8 *)gaps->memfd_files + size_memfds);
 +
-+	return r;
++	return gaps;
 +}
 +
-+static void __kdbus_msg_resources_free(struct kref *kref)
++static void kdbus_gaps_free(struct kref *kref)
 +{
-+	struct kdbus_msg_resources *r =
-+		container_of(kref, struct kdbus_msg_resources, kref);
++	struct kdbus_gaps *gaps = container_of(kref, struct kdbus_gaps, kref);
 +	size_t i;
 +
-+	for (i = 0; i < r->data_count; ++i) {
-+		switch (r->data[i].type) {
-+		case KDBUS_MSG_DATA_VEC:
-+			/* nothing to do */
-+			break;
-+		case KDBUS_MSG_DATA_MEMFD:
-+			if (r->data[i].memfd.file)
-+				fput(r->data[i].memfd.file);
-+			break;
-+		}
-+	}
-+
-+	for (i = 0; i < r->fds_count; i++)
-+		if (r->fds[i])
-+			fput(r->fds[i]);
++	for (i = 0; i < gaps->n_fds; ++i)
++		if (gaps->fd_files[i])
++			fput(gaps->fd_files[i]);
++	for (i = 0; i < gaps->n_memfds; ++i)
++		if (gaps->memfd_files[i])
++			fput(gaps->memfd_files[i]);
 +
-+	kfree(r->dst_name);
-+	kfree(r->data);
-+	kfree(r->fds);
-+	kfree(r);
++	kfree(gaps);
 +}
 +
 +/**
-+ * kdbus_msg_resources_ref() - Acquire reference to msg resources
-+ * @r:		resources to acquire ref to
++ * kdbus_gaps_ref() - gain reference
++ * @gaps:	gaps object
 + *
-+ * Return: The acquired resource
++ * Return: @gaps is returned
 + */
-+struct kdbus_msg_resources *
-+kdbus_msg_resources_ref(struct kdbus_msg_resources *r)
++struct kdbus_gaps *kdbus_gaps_ref(struct kdbus_gaps *gaps)
 +{
-+	if (r)
-+		kref_get(&r->kref);
-+	return r;
++	if (gaps)
++		kref_get(&gaps->kref);
++	return gaps;
 +}
 +
 +/**
-+ * kdbus_msg_resources_unref() - Drop reference to msg resources
-+ * @r:		resources to drop reference of
++ * kdbus_gaps_unref() - drop reference
++ * @gaps:	gaps object
 + *
 + * Return: NULL
 + */
-+struct kdbus_msg_resources *
-+kdbus_msg_resources_unref(struct kdbus_msg_resources *r)
++struct kdbus_gaps *kdbus_gaps_unref(struct kdbus_gaps *gaps)
 +{
-+	if (r)
-+		kref_put(&r->kref, __kdbus_msg_resources_free);
++	if (gaps)
++		kref_put(&gaps->kref, kdbus_gaps_free);
 +	return NULL;
 +}
 +
 +/**
-+ * kdbus_kmsg_free() - free allocated message
-+ * @kmsg:		Message
++ * kdbus_gaps_install() - install file-descriptors
++ * @gaps:		gaps object, or NULL
++ * @slice:		pool slice that contains the message
++ * @out_incomplete	output variable to note incomplete fds
++ *
++ * This function installs all file-descriptors of @gaps into the current
++ * process and copies the file-descriptor numbers into the target pool slice.
++ *
++ * If the file-descriptors were only partially installed, then @out_incomplete
++ * will be set to true. Otherwise, it's set to false.
++ *
++ * Return: 0 on success, negative error code on failure
 + */
-+void kdbus_kmsg_free(struct kdbus_kmsg *kmsg)
++int kdbus_gaps_install(struct kdbus_gaps *gaps, struct kdbus_pool_slice *slice,
++		       bool *out_incomplete)
 +{
-+	if (!kmsg)
-+		return;
++	bool incomplete_fds = false;
++	struct kvec kvec;
++	size_t i, n_fds;
++	int ret, *fds;
 +
-+	kdbus_msg_resources_unref(kmsg->res);
-+	kdbus_meta_conn_unref(kmsg->conn_meta);
-+	kdbus_meta_proc_unref(kmsg->proc_meta);
-+	kfree(kmsg->iov);
-+	kfree(kmsg);
-+}
++	if (!gaps) {
++		/* nothing to do */
++		*out_incomplete = incomplete_fds;
++		return 0;
++	}
 +
-+/**
-+ * kdbus_kmsg_new() - allocate message
-+ * @bus:		Bus this message is allocated on
-+ * @extra_size:		Additional size to reserve for data
-+ *
-+ * Return: new kdbus_kmsg on success, ERR_PTR on failure.
-+ */
-+struct kdbus_kmsg *kdbus_kmsg_new(struct kdbus_bus *bus, size_t extra_size)
-+{
-+	struct kdbus_kmsg *m;
-+	size_t size;
-+	int ret;
++	n_fds = gaps->n_fds + gaps->n_memfds;
++	if (n_fds < 1) {
++		/* nothing to do */
++		*out_incomplete = incomplete_fds;
++		return 0;
++	}
 +
-+	size = sizeof(struct kdbus_kmsg) + KDBUS_ITEM_SIZE(extra_size);
-+	m = kzalloc(size, GFP_KERNEL);
-+	if (!m)
-+		return ERR_PTR(-ENOMEM);
++	fds = kmalloc_array(n_fds, sizeof(*fds), GFP_TEMPORARY);
++	n_fds = 0;
++	if (!fds)
++		return -ENOMEM;
 +
-+	m->seq = atomic64_inc_return(&bus->domain->last_id);
-+	m->msg.size = size - KDBUS_KMSG_HEADER_SIZE;
-+	m->msg.items[0].size = KDBUS_ITEM_SIZE(extra_size);
++	/* 1) allocate fds and copy them over */
 +
-+	m->proc_meta = kdbus_meta_proc_new();
-+	if (IS_ERR(m->proc_meta)) {
-+		ret = PTR_ERR(m->proc_meta);
-+		m->proc_meta = NULL;
-+		goto exit;
++	if (gaps->n_fds > 0) {
++		for (i = 0; i < gaps->n_fds; ++i) {
++			int fd;
++
++			fd = get_unused_fd_flags(O_CLOEXEC);
++			if (fd < 0)
++				incomplete_fds = true;
++
++			WARN_ON(!gaps->fd_files[i]);
++
++			fds[n_fds++] = fd < 0 ? -1 : fd;
++		}
++
++		/*
++		 * The file-descriptor array can only be present once per
++		 * message. Hence, prepare all fds and then copy them over with
++		 * a single kvec.
++		 */
++
++		WARN_ON(!gaps->fd_offset);
++
++		kvec.iov_base = fds;
++		kvec.iov_len = gaps->n_fds * sizeof(*fds);
++		ret = kdbus_pool_slice_copy_kvec(slice, gaps->fd_offset,
++						 &kvec, 1, kvec.iov_len);
++		if (ret < 0)
++			goto exit;
 +	}
 +
-+	m->conn_meta = kdbus_meta_conn_new();
-+	if (IS_ERR(m->conn_meta)) {
-+		ret = PTR_ERR(m->conn_meta);
-+		m->conn_meta = NULL;
-+		goto exit;
++	for (i = 0; i < gaps->n_memfds; ++i) {
++		int memfd;
++
++		memfd = get_unused_fd_flags(O_CLOEXEC);
++		if (memfd < 0) {
++			incomplete_fds = true;
++			/* memfds are initialized to -1, skip copying it */
++			continue;
++		}
++
++		fds[n_fds++] = memfd;
++
++		/*
++		 * memfds have to be copied individually as they each are put
++		 * into a separate item. This should not be an issue, though,
++		 * as usually there is no need to send more than one memfd per
++		 * message.
++		 */
++
++		WARN_ON(!gaps->memfd_offsets[i]);
++		WARN_ON(!gaps->memfd_files[i]);
++
++		kvec.iov_base = &memfd;
++		kvec.iov_len = sizeof(memfd);
++		ret = kdbus_pool_slice_copy_kvec(slice, gaps->memfd_offsets[i],
++						 &kvec, 1, kvec.iov_len);
++		if (ret < 0)
++			goto exit;
 +	}
 +
-+	return m;
++	/* 2) install fds now that everything was successful */
++
++	for (i = 0; i < gaps->n_fds; ++i)
++		if (fds[i] >= 0)
++			fd_install(fds[i], get_file(gaps->fd_files[i]));
++	for (i = 0; i < gaps->n_memfds; ++i)
++		if (fds[gaps->n_fds + i] >= 0)
++			fd_install(fds[gaps->n_fds + i],
++				   get_file(gaps->memfd_files[i]));
++
++	ret = 0;
 +
 +exit:
-+	kdbus_kmsg_free(m);
-+	return ERR_PTR(ret);
++	if (ret < 0)
++		for (i = 0; i < n_fds; ++i)
++			put_unused_fd(fds[i]);
++	kfree(fds);
++	*out_incomplete = incomplete_fds;
++	return ret;
 +}
 +
-+static int kdbus_handle_check_file(struct file *file)
++static struct file *kdbus_get_fd(int fd)
 +{
-+	struct inode *inode = file_inode(file);
++	struct file *f, *ret;
++	struct inode *inode;
 +	struct socket *sock;
 +
-+	/*
-+	 * Don't allow file descriptors in the transport that themselves allow
-+	 * file descriptor queueing. This will eventually be allowed once both
-+	 * unix domain sockets and kdbus share a generic garbage collector.
-+	 */
++	if (fd < 0)
++		return ERR_PTR(-EBADF);
 +
-+	if (file->f_op == &kdbus_handle_ops)
-+		return -EOPNOTSUPP;
++	f = fget_raw(fd);
++	if (!f)
++		return ERR_PTR(-EBADF);
 +
-+	if (!S_ISSOCK(inode->i_mode))
-+		return 0;
++	inode = file_inode(f);
++	sock = S_ISSOCK(inode->i_mode) ? SOCKET_I(inode) : NULL;
 +
-+	if (file->f_mode & FMODE_PATH)
-+		return 0;
++	if (f->f_mode & FMODE_PATH)
++		ret = f; /* O_PATH is always allowed */
++	else if (f->f_op == &kdbus_handle_ops)
++		ret = ERR_PTR(-EOPNOTSUPP); /* disallow kdbus-fd over kdbus */
++	else if (sock && sock->sk && sock->ops && sock->ops->family == PF_UNIX)
++		ret = ERR_PTR(-EOPNOTSUPP); /* disallow UDS over kdbus */
++	else
++		ret = f; /* all other are allowed */
 +
-+	sock = SOCKET_I(inode);
-+	if (sock->sk && sock->ops && sock->ops->family == PF_UNIX)
-+		return -EOPNOTSUPP;
++	if (f != ret)
++		fput(f);
 +
-+	return 0;
++	return ret;
 +}
 +
-+static const char * const zeros = "\0\0\0\0\0\0\0";
++static struct file *kdbus_get_memfd(const struct kdbus_memfd *memfd)
++{
++	const int m = F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL;
++	struct file *f, *ret;
++	int s;
 +
-+/*
-+ * kdbus_msg_scan_items() - validate incoming data and prepare parsing
-+ * @kmsg:		Message
-+ * @bus:		Bus the message is sent over
-+ *
-+ * Return: 0 on success, negative errno on failure.
-+ *
-+ * Files references in MEMFD or FDS items are pinned.
-+ *
-+ * On errors, the caller should drop any taken reference with
-+ * kdbus_kmsg_free()
-+ */
-+static int kdbus_msg_scan_items(struct kdbus_kmsg *kmsg,
-+				struct kdbus_bus *bus)
++	if (memfd->fd < 0)
++		return ERR_PTR(-EBADF);
++
++	f = fget(memfd->fd);
++	if (!f)
++		return ERR_PTR(-EBADF);
++
++	s = shmem_get_seals(f);
++	if (s < 0)
++		ret = ERR_PTR(-EMEDIUMTYPE);
++	else if ((s & m) != m)
++		ret = ERR_PTR(-ETXTBSY);
++	else if (memfd->start + memfd->size > (u64)i_size_read(file_inode(f)))
++		ret = ERR_PTR(-EFAULT);
++	else
++		ret = f;
++
++	if (f != ret)
++		fput(f);
++
++	return ret;
++}
++
++static int kdbus_msg_examine(struct kdbus_msg *msg, struct kdbus_bus *bus,
++			     struct kdbus_cmd_send *cmd, size_t *out_n_memfds,
++			     size_t *out_n_fds, size_t *out_n_parts)
 +{
-+	struct kdbus_msg_resources *res = kmsg->res;
-+	const struct kdbus_msg *msg = &kmsg->msg;
-+	const struct kdbus_item *item;
-+	size_t n, n_vecs, n_memfds;
-+	bool has_bloom = false;
-+	bool has_name = false;
-+	bool has_fds = false;
-+	bool is_broadcast;
-+	bool is_signal;
-+	u64 vec_size;
-+
-+	is_broadcast = (msg->dst_id == KDBUS_DST_ID_BROADCAST);
-+	is_signal = !!(msg->flags & KDBUS_MSG_SIGNAL);
-+
-+	/* count data payloads */
-+	n_vecs = 0;
-+	n_memfds = 0;
-+	KDBUS_ITEMS_FOREACH(item, msg->items, KDBUS_ITEMS_SIZE(msg, items)) {
-+		switch (item->type) {
-+		case KDBUS_ITEM_PAYLOAD_VEC:
-+			++n_vecs;
-+			break;
-+		case KDBUS_ITEM_PAYLOAD_MEMFD:
-+			++n_memfds;
-+			if (item->memfd.size % 8)
-+				++n_vecs;
-+			break;
-+		default:
-+			break;
-+		}
-+	}
++	struct kdbus_item *item, *fds = NULL, *bloom = NULL, *dstname = NULL;
++	u64 n_parts, n_memfds, n_fds, vec_size;
 +
-+	n = n_vecs + n_memfds;
-+	if (n > 0) {
-+		res->data = kcalloc(n, sizeof(*res->data), GFP_KERNEL);
-+		if (!res->data)
-+			return -ENOMEM;
++	/*
++	 * Step 1:
++	 * Validate the message and command parameters.
++	 */
++
++	/* KDBUS_PAYLOAD_KERNEL is reserved to kernel messages */
++	if (msg->payload_type == KDBUS_PAYLOAD_KERNEL)
++		return -EINVAL;
++
++	if (msg->dst_id == KDBUS_DST_ID_BROADCAST) {
++		/* broadcasts must be marked as signals */
++		if (!(msg->flags & KDBUS_MSG_SIGNAL))
++			return -EBADMSG;
++		/* broadcasts cannot have timeouts */
++		if (msg->timeout_ns > 0)
++			return -ENOTUNIQ;
 +	}
 +
-+	if (n_vecs > 0) {
-+		kmsg->iov = kcalloc(n_vecs, sizeof(*kmsg->iov), GFP_KERNEL);
-+		if (!kmsg->iov)
-+			return -ENOMEM;
++	if (msg->flags & KDBUS_MSG_EXPECT_REPLY) {
++		/* if you expect a reply, you must specify a timeout */
++		if (msg->timeout_ns == 0)
++			return -EINVAL;
++		/* signals cannot have replies */
++		if (msg->flags & KDBUS_MSG_SIGNAL)
++			return -ENOTUNIQ;
++	} else {
++		/* must expect reply if sent as synchronous call */
++		if (cmd->flags & KDBUS_SEND_SYNC_REPLY)
++			return -EINVAL;
++		/* cannot mark replies as signal */
++		if (msg->cookie_reply && (msg->flags & KDBUS_MSG_SIGNAL))
++			return -EINVAL;
 +	}
 +
-+	/* import data payloads */
-+	n = 0;
-+	vec_size = 0;
-+	KDBUS_ITEMS_FOREACH(item, msg->items, KDBUS_ITEMS_SIZE(msg, items)) {
-+		size_t payload_size = KDBUS_ITEM_PAYLOAD_SIZE(item);
-+		struct iovec *iov = kmsg->iov + kmsg->iov_count;
++	/*
++	 * Step 2:
++	 * Validate all passed items. While at it, select some statistics that
++	 * are required to allocate state objects later on.
++	 *
++	 * Generic item validation has already been done via
++	 * kdbus_item_validate(). Furthermore, the number of items is naturally
++	 * limited by the maximum message size. Hence, only non-generic item
++	 * checks are performed here (mainly integer overflow tests).
++	 */
 +
-+		if (++n > KDBUS_MSG_MAX_ITEMS)
-+			return -E2BIG;
++	n_parts = 0;
++	n_memfds = 0;
++	n_fds = 0;
++	vec_size = 0;
 +
++	KDBUS_ITEMS_FOREACH(item, msg->items, KDBUS_ITEMS_SIZE(msg, items)) {
 +		switch (item->type) {
 +		case KDBUS_ITEM_PAYLOAD_VEC: {
-+			struct kdbus_msg_data *d = res->data + res->data_count;
 +			void __force __user *ptr = KDBUS_PTR(item->vec.address);
-+			size_t size = item->vec.size;
++			u64 size = item->vec.size;
 +
 +			if (vec_size + size < vec_size)
 +				return -EMSGSIZE;
 +			if (vec_size + size > KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE)
 +				return -EMSGSIZE;
++			if (ptr && unlikely(!access_ok(VERIFY_READ, ptr, size)))
++				return -EFAULT;
 +
-+			d->type = KDBUS_MSG_DATA_VEC;
-+			d->size = size;
-+
-+			if (ptr) {
-+				if (unlikely(!access_ok(VERIFY_READ, ptr,
-+							size)))
-+					return -EFAULT;
-+
-+				d->vec.off = kmsg->pool_size;
-+				iov->iov_base = ptr;
-+				iov->iov_len = size;
-+			} else {
-+				d->vec.off = ~0ULL;
-+				iov->iov_base = (char __user *)zeros;
-+				iov->iov_len = size % 8;
-+			}
-+
-+			if (kmsg->pool_size + iov->iov_len < kmsg->pool_size)
-+				return -EMSGSIZE;
-+
-+			kmsg->pool_size += iov->iov_len;
-+			++kmsg->iov_count;
-+			++res->vec_count;
-+			++res->data_count;
-+			vec_size += size;
-+
++			if (ptr || size % 8) /* data or padding */
++				++n_parts;
 +			break;
 +		}
-+
 +		case KDBUS_ITEM_PAYLOAD_MEMFD: {
-+			struct kdbus_msg_data *d = res->data + res->data_count;
 +			u64 start = item->memfd.start;
 +			u64 size = item->memfd.size;
-+			size_t pad = size % 8;
-+			int seals, mask;
-+			struct file *f;
 +
-+			if (kmsg->pool_size + size % 8 < kmsg->pool_size)
-+				return -EMSGSIZE;
 +			if (start + size < start)
 +				return -EMSGSIZE;
-+
-+			if (item->memfd.fd < 0)
-+				return -EBADF;
-+
-+			if (res->memfd_count >= KDBUS_MSG_MAX_MEMFD_ITEMS)
++			if (n_memfds >= KDBUS_MSG_MAX_MEMFD_ITEMS)
 +				return -E2BIG;
 +
-+			f = fget(item->memfd.fd);
-+			if (!f)
-+				return -EBADF;
-+
-+			if (pad) {
-+				iov->iov_base = (char __user *)zeros;
-+				iov->iov_len = pad;
++			++n_memfds;
++			if (size % 8) /* vec-padding required */
++				++n_parts;
++			break;
++		}
++		case KDBUS_ITEM_FDS: {
++			if (fds)
++				return -EEXIST;
 +
-+				kmsg->pool_size += pad;
-+				++kmsg->iov_count;
-+			}
++			fds = item;
++			n_fds = KDBUS_ITEM_PAYLOAD_SIZE(item) / sizeof(int);
++			if (n_fds > KDBUS_CONN_MAX_FDS_PER_USER)
++				return -EMFILE;
 +
-+			++res->data_count;
-+			++res->memfd_count;
++			break;
++		}
++		case KDBUS_ITEM_BLOOM_FILTER: {
++			u64 bloom_size;
 +
-+			d->type = KDBUS_MSG_DATA_MEMFD;
-+			d->size = size;
-+			d->memfd.start = start;
-+			d->memfd.file = f;
++			if (bloom)
++				return -EEXIST;
 +
-+			/*
-+			 * We only accept a sealed memfd file whose content
-+			 * cannot be altered by the sender or anybody else
-+			 * while it is shared or in-flight. Other files need
-+			 * to be passed with KDBUS_MSG_FDS.
-+			 */
-+			seals = shmem_get_seals(f);
-+			if (seals < 0)
-+				return -EMEDIUMTYPE;
++			bloom = item;
++			bloom_size = KDBUS_ITEM_PAYLOAD_SIZE(item) -
++				     offsetof(struct kdbus_bloom_filter, data);
++			if (!KDBUS_IS_ALIGNED8(bloom_size))
++				return -EFAULT;
++			if (bloom_size != bus->bloom.size)
++				return -EDOM;
 +
-+			mask = F_SEAL_SHRINK | F_SEAL_GROW |
-+				F_SEAL_WRITE | F_SEAL_SEAL;
-+			if ((seals & mask) != mask)
-+				return -ETXTBSY;
++			break;
++		}
++		case KDBUS_ITEM_DST_NAME: {
++			if (dstname)
++				return -EEXIST;
 +
-+			if (start + size > (u64)i_size_read(file_inode(f)))
-+				return -EBADF;
++			dstname = item;
++			if (!kdbus_name_is_valid(item->str, false))
++				return -EINVAL;
++			if (msg->dst_id == KDBUS_DST_ID_BROADCAST)
++				return -EBADMSG;
 +
 +			break;
 +		}
++		default:
++			return -EINVAL;
++		}
++	}
 +
-+		case KDBUS_ITEM_FDS: {
-+			unsigned int i;
-+			unsigned int fds_count = payload_size / sizeof(int);
++	/*
++	 * Step 3:
++	 * Validate that required items were actually passed, and that no item
++	 * contradicts the message flags.
++	 */
 +
-+			/* do not allow multiple fd arrays */
-+			if (has_fds)
-+				return -EEXIST;
-+			has_fds = true;
++	/* bloom filters must be attached _iff_ it's a signal */
++	if (!(msg->flags & KDBUS_MSG_SIGNAL) != !bloom)
++		return -EBADMSG;
++	/* destination name is required if no ID is given */
++	if (msg->dst_id == KDBUS_DST_ID_NAME && !dstname)
++		return -EDESTADDRREQ;
++	/* cannot send file-descriptors attached to broadcasts */
++	if (msg->dst_id == KDBUS_DST_ID_BROADCAST && fds)
++		return -ENOTUNIQ;
 +
-+			/* Do not allow to broadcast file descriptors */
-+			if (is_broadcast)
-+				return -ENOTUNIQ;
++	*out_n_memfds = n_memfds;
++	*out_n_fds = n_fds;
++	*out_n_parts = n_parts;
 +
-+			if (fds_count > KDBUS_CONN_MAX_FDS_PER_USER)
-+				return -EMFILE;
++	return 0;
++}
 +
-+			res->fds = kcalloc(fds_count, sizeof(struct file *),
-+					   GFP_KERNEL);
-+			if (!res->fds)
-+				return -ENOMEM;
++static bool kdbus_staging_merge_vecs(struct kdbus_staging *staging,
++				     struct kdbus_item **prev_item,
++				     struct iovec **prev_vec,
++				     const struct kdbus_item *merge)
++{
++	void __user *ptr = (void __user *)KDBUS_PTR(merge->vec.address);
++	u64 padding = merge->vec.size % 8;
++	struct kdbus_item *prev = *prev_item;
++	struct iovec *vec = *prev_vec;
 +
-+			for (i = 0; i < fds_count; i++) {
-+				int fd = item->fds[i];
-+				int ret;
++	/* XXX: merging is disabled so far */
++	if (0 && prev && prev->type == KDBUS_ITEM_PAYLOAD_OFF &&
++	    !merge->vec.address == !prev->vec.address) {
++		/*
++		 * If we merge two VECs, we can always drop the second
++		 * PAYLOAD_VEC item. Hence, include its size in the previous
++		 * one.
++		 */
++		prev->vec.size += merge->vec.size;
 +
-+				/*
-+				 * Verify the fd and increment the usage count.
-+				 * Use fget_raw() to allow passing O_PATH fds.
-+				 */
-+				if (fd < 0)
-+					return -EBADF;
++		if (ptr) {
++			/*
++			 * If we merge two data VECs, we need two iovecs to copy
++			 * the data. But the items can be easily merged by
++			 * summing their lengths.
++			 */
++			vec = &staging->parts[staging->n_parts++];
++			vec->iov_len = merge->vec.size;
++			vec->iov_base = ptr;
++			staging->n_payload += vec->iov_len;
++		} else if (padding) {
++			/*
++			 * If we merge two 0-vecs with the second 0-vec
++			 * requiring padding, we need to insert an iovec to copy
++			 * the 0-padding. We try merging it with the previous
++			 * 0-padding iovec. This might end up with an
++			 * iov_len==0, in which case we simply drop the iovec.
++			 */
++			if (vec) {
++				staging->n_payload -= vec->iov_len;
++				vec->iov_len = prev->vec.size % 8;
++				if (!vec->iov_len) {
++					--staging->n_parts;
++					vec = NULL;
++				} else {
++					staging->n_payload += vec->iov_len;
++				}
++			} else {
++				vec = &staging->parts[staging->n_parts++];
++				vec->iov_len = padding;
++				vec->iov_base = (char __user *)zeros;
++				staging->n_payload += vec->iov_len;
++			}
++		} else {
++			/*
++			 * If we merge two 0-vecs with the second 0-vec having
++			 * no padding, we know the padding of the first stays
++			 * the same. Hence, @vec needs no adjustment.
++			 */
++		}
++
++		/* successfully merged with previous item */
++		merge = prev;
++	} else {
++		/*
++		 * If we cannot merge the payload item with the previous one,
++		 * we simply insert a new iovec for the data/padding.
++		 */
++		if (ptr) {
++			vec = &staging->parts[staging->n_parts++];
++			vec->iov_len = merge->vec.size;
++			vec->iov_base = ptr;
++			staging->n_payload += vec->iov_len;
++		} else if (padding) {
++			vec = &staging->parts[staging->n_parts++];
++			vec->iov_len = padding;
++			vec->iov_base = (char __user *)zeros;
++			staging->n_payload += vec->iov_len;
++		} else {
++			vec = NULL;
++		}
++	}
 +
-+				res->fds[i] = fget_raw(fd);
-+				if (!res->fds[i])
-+					return -EBADF;
++	*prev_item = (struct kdbus_item *)merge;
++	*prev_vec = vec;
 +
-+				res->fds_count++;
++	return merge == prev;
++}
 +
-+				ret = kdbus_handle_check_file(res->fds[i]);
-+				if (ret < 0)
-+					return ret;
++static int kdbus_staging_import(struct kdbus_staging *staging)
++{
++	struct kdbus_item *it, *item, *last, *prev_payload;
++	struct kdbus_gaps *gaps = staging->gaps;
++	struct kdbus_msg *msg = staging->msg;
++	struct iovec *part, *prev_part;
++	bool drop_item;
++
++	drop_item = false;
++	last = NULL;
++	prev_payload = NULL;
++	prev_part = NULL;
++
++	/*
++	 * We modify msg->items along the way; make sure to use @item as offset
++	 * to the next item (instead of the iterator @it).
++	 */
++	for (it = item = msg->items;
++	     it >= msg->items &&
++	             (u8 *)it < (u8 *)msg + msg->size &&
++	             (u8 *)it + it->size <= (u8 *)msg + msg->size; ) {
++		/*
++		 * If we dropped items along the way, move current item to
++		 * front. We must not access @it afterwards, but use @item
++		 * instead!
++		 */
++		if (it != item)
++			memmove(item, it, it->size);
++		it = (void *)((u8 *)it + KDBUS_ALIGN8(item->size));
++
++		switch (item->type) {
++		case KDBUS_ITEM_PAYLOAD_VEC: {
++			size_t offset = staging->n_payload;
++
++			if (kdbus_staging_merge_vecs(staging, &prev_payload,
++						     &prev_part, item)) {
++				drop_item = true;
++			} else if (item->vec.address) {
++				/* real offset is patched later on */
++				item->type = KDBUS_ITEM_PAYLOAD_OFF;
++				item->vec.offset = offset;
++			} else {
++				item->type = KDBUS_ITEM_PAYLOAD_OFF;
++				item->vec.offset = ~0ULL;
 +			}
 +
 +			break;
 +		}
++		case KDBUS_ITEM_PAYLOAD_MEMFD: {
++			struct file *f;
 +
-+		case KDBUS_ITEM_BLOOM_FILTER: {
-+			u64 bloom_size;
++			f = kdbus_get_memfd(&item->memfd);
++			if (IS_ERR(f))
++				return PTR_ERR(f);
++
++			gaps->memfd_files[gaps->n_memfds] = f;
++			gaps->memfd_offsets[gaps->n_memfds] =
++					(u8 *)&item->memfd.fd - (u8 *)msg;
++			++gaps->n_memfds;
++
++			/* memfds cannot be merged */
++			prev_payload = item;
++			prev_part = NULL;
++
++			/* insert padding to make following VECs aligned */
++			if (item->memfd.size % 8) {
++				part = &staging->parts[staging->n_parts++];
++				part->iov_len = item->memfd.size % 8;
++				part->iov_base = (char __user *)zeros;
++				staging->n_payload += part->iov_len;
++			}
 +
-+			/* do not allow multiple bloom filters */
-+			if (has_bloom)
-+				return -EEXIST;
-+			has_bloom = true;
++			break;
++		}
++		case KDBUS_ITEM_FDS: {
++			size_t i, n_fds;
 +
-+			bloom_size = payload_size -
-+				     offsetof(struct kdbus_bloom_filter, data);
++			n_fds = KDBUS_ITEM_PAYLOAD_SIZE(item) / sizeof(int);
++			for (i = 0; i < n_fds; ++i) {
++				struct file *f;
 +
-+			/*
-+			* Allow only bloom filter sizes of a multiple of 64bit.
-+			*/
-+			if (!KDBUS_IS_ALIGNED8(bloom_size))
-+				return -EFAULT;
++				f = kdbus_get_fd(item->fds[i]);
++				if (IS_ERR(f))
++					return PTR_ERR(f);
 +
-+			/* do not allow mismatching bloom filter sizes */
-+			if (bloom_size != bus->bloom.size)
-+				return -EDOM;
++				gaps->fd_files[gaps->n_fds++] = f;
++			}
++
++			gaps->fd_offset = (u8 *)item->fds - (u8 *)msg;
 +
-+			kmsg->bloom_filter = &item->bloom_filter;
 +			break;
 +		}
-+
++		case KDBUS_ITEM_BLOOM_FILTER:
++			staging->bloom_filter = &item->bloom_filter;
++			break;
 +		case KDBUS_ITEM_DST_NAME:
-+			/* do not allow multiple names */
-+			if (has_name)
-+				return -EEXIST;
-+			has_name = true;
-+
-+			if (!kdbus_name_is_valid(item->str, false))
-+				return -EINVAL;
-+
-+			res->dst_name = kstrdup(item->str, GFP_KERNEL);
-+			if (!res->dst_name)
-+				return -ENOMEM;
++			staging->dst_name = item->str;
 +			break;
++		}
 +
-+		default:
-+			return -EINVAL;
++		/* drop item if we merged it with a previous one */
++		if (drop_item) {
++			drop_item = false;
++		} else {
++			last = item;
++			item = KDBUS_ITEM_NEXT(item);
 +		}
 +	}
 +
-+	/* name is needed if no ID is given */
-+	if (msg->dst_id == KDBUS_DST_ID_NAME && !has_name)
-+		return -EDESTADDRREQ;
++	/* adjust message size regarding dropped items */
++	msg->size = offsetof(struct kdbus_msg, items);
++	if (last)
++		msg->size += ((u8 *)last - (u8 *)msg->items) + last->size;
 +
-+	if (is_broadcast) {
-+		/* Broadcasts can't take names */
-+		if (has_name)
-+			return -EBADMSG;
++	return 0;
++}
 +
-+		/* All broadcasts have to be signals */
-+		if (!is_signal)
-+			return -EBADMSG;
++static void kdbus_staging_reserve(struct kdbus_staging *staging)
++{
++	struct iovec *part;
 +
-+		/* Timeouts are not allowed for broadcasts */
-+		if (msg->timeout_ns > 0)
-+			return -ENOTUNIQ;
++	part = &staging->parts[staging->n_parts++];
++	part->iov_base = (void __user *)zeros;
++	part->iov_len = 0;
++}
++
++static struct kdbus_staging *kdbus_staging_new(struct kdbus_bus *bus,
++					       size_t n_parts,
++					       size_t msg_extra_size)
++{
++	const size_t reserved_parts = 5; /* see below for explanation */
++	struct kdbus_staging *staging;
++	int ret;
++
++	n_parts += reserved_parts;
++
++	staging = kzalloc(sizeof(*staging) + n_parts * sizeof(*staging->parts) +
++			  msg_extra_size, GFP_TEMPORARY);
++	if (!staging)
++		return ERR_PTR(-ENOMEM);
++
++	staging->msg_seqnum = atomic64_inc_return(&bus->domain->last_id);
++	staging->n_parts = 0; /* we reserve n_parts, but don't enforce them */
++	staging->parts = (void *)(staging + 1);
++
++	if (msg_extra_size) /* if requested, allocate message, too */
++		staging->msg = (void *)((u8 *)staging->parts +
++				        n_parts * sizeof(*staging->parts));
++
++	staging->meta_proc = kdbus_meta_proc_new();
++	if (IS_ERR(staging->meta_proc)) {
++		ret = PTR_ERR(staging->meta_proc);
++		staging->meta_proc = NULL;
++		goto error;
++	}
++
++	staging->meta_conn = kdbus_meta_conn_new();
++	if (IS_ERR(staging->meta_conn)) {
++		ret = PTR_ERR(staging->meta_conn);
++		staging->meta_conn = NULL;
++		goto error;
 +	}
 +
 +	/*
-+	 * Signal messages require a bloom filter, and bloom filters are
-+	 * only valid with signals.
++	 * Prepare iovecs to copy the message into the target pool. We use the
++	 * following iovecs:
++	 *   * iovec to copy "kdbus_msg.size"
++	 *   * iovec to copy "struct kdbus_msg" (minus size) plus items
++	 *   * iovec for possible padding after the items
++	 *   * iovec for metadata items
++	 *   * iovec for possible padding after the items
++	 *
++	 * Make sure to update @reserved_parts if you add more parts here.
 +	 */
-+	if (is_signal ^ has_bloom)
-+		return -EBADMSG;
 +
-+	return 0;
++	kdbus_staging_reserve(staging); /* msg.size */
++	kdbus_staging_reserve(staging); /* msg (minus msg.size) plus items */
++	kdbus_staging_reserve(staging); /* msg padding */
++	kdbus_staging_reserve(staging); /* meta */
++	kdbus_staging_reserve(staging); /* meta padding */
++
++	return staging;
++
++error:
++	kdbus_staging_free(staging);
++	return ERR_PTR(ret);
 +}
 +
-+/**
-+ * kdbus_kmsg_new_from_cmd() - create kernel message from send payload
-+ * @conn:		Connection
-+ * @cmd_send:		Payload of KDBUS_CMD_SEND
-+ *
-+ * Return: a new kdbus_kmsg on success, ERR_PTR on failure.
-+ */
-+struct kdbus_kmsg *kdbus_kmsg_new_from_cmd(struct kdbus_conn *conn,
-+					   struct kdbus_cmd_send *cmd_send)
++struct kdbus_staging *kdbus_staging_new_kernel(struct kdbus_bus *bus,
++					       u64 dst, u64 cookie_timeout,
++					       size_t it_size, size_t it_type)
 +{
-+	struct kdbus_kmsg *m;
-+	u64 size;
++	struct kdbus_staging *staging;
++	size_t size;
++
++	size = offsetof(struct kdbus_msg, items) +
++	       KDBUS_ITEM_HEADER_SIZE + it_size;
++
++	staging = kdbus_staging_new(bus, 0, KDBUS_ALIGN8(size));
++	if (IS_ERR(staging))
++		return ERR_CAST(staging);
++
++	staging->msg->size = size;
++	staging->msg->flags = (dst == KDBUS_DST_ID_BROADCAST) ?
++							KDBUS_MSG_SIGNAL : 0;
++	staging->msg->dst_id = dst;
++	staging->msg->src_id = KDBUS_SRC_ID_KERNEL;
++	staging->msg->payload_type = KDBUS_PAYLOAD_KERNEL;
++	staging->msg->cookie_reply = cookie_timeout;
++	staging->notify = staging->msg->items;
++	staging->notify->size = KDBUS_ITEM_HEADER_SIZE + it_size;
++	staging->notify->type = it_type;
++
++	return staging;
++}
++
++struct kdbus_staging *kdbus_staging_new_user(struct kdbus_bus *bus,
++					     struct kdbus_cmd_send *cmd,
++					     struct kdbus_msg *msg)
++{
++	const size_t reserved_parts = 1; /* see below for explanation */
++	size_t n_memfds, n_fds, n_parts;
++	struct kdbus_staging *staging;
 +	int ret;
 +
-+	ret = kdbus_copy_from_user(&size, KDBUS_PTR(cmd_send->msg_address),
-+				   sizeof(size));
++	/*
++	 * Examine user-supplied message and figure out how many resources we
++	 * need to allocate in our staging area. This requires us to iterate
++	 * the message twice, but saves us from re-allocating our resources
++	 * all the time.
++	 */
++
++	ret = kdbus_msg_examine(msg, bus, cmd, &n_memfds, &n_fds, &n_parts);
 +	if (ret < 0)
 +		return ERR_PTR(ret);
 +
-+	if (size < sizeof(struct kdbus_msg) || size > KDBUS_MSG_MAX_SIZE)
-+		return ERR_PTR(-EINVAL);
++	n_parts += reserved_parts;
 +
-+	m = kmalloc(size + KDBUS_KMSG_HEADER_SIZE, GFP_KERNEL);
-+	if (!m)
-+		return ERR_PTR(-ENOMEM);
++	/*
++	 * Allocate staging area with the number of required resources. Make
++	 * sure that we have enough iovecs for all required parts pre-allocated
++	 * so this will hopefully be the only memory allocation for this
++	 * message transaction.
++	 */
 +
-+	memset(m, 0, KDBUS_KMSG_HEADER_SIZE);
-+	m->seq = atomic64_inc_return(&conn->ep->bus->domain->last_id);
++	staging = kdbus_staging_new(bus, n_parts, 0);
++	if (IS_ERR(staging))
++		return ERR_CAST(staging);
 +
-+	m->proc_meta = kdbus_meta_proc_new();
-+	if (IS_ERR(m->proc_meta)) {
-+		ret = PTR_ERR(m->proc_meta);
-+		m->proc_meta = NULL;
-+		goto exit_free;
-+	}
++	staging->msg = msg;
 +
-+	m->conn_meta = kdbus_meta_conn_new();
-+	if (IS_ERR(m->conn_meta)) {
-+		ret = PTR_ERR(m->conn_meta);
-+		m->conn_meta = NULL;
-+		goto exit_free;
-+	}
++	/*
++	 * If the message contains memfds or fd items, we need to remember some
++	 * state so we can fill in the requested information at RECV time.
++	 * File-descriptors cannot be passed at SEND time. Hence, allocate a
++	 * gaps-object to remember that state. That gaps object is linked to
++	 * from the staging area, but will also be linked to from the message
++	 * queue of each peer. Hence, each receiver owns a reference to it, and
++	 * it will later be used to fill the 'gaps' in message that couldn't be
++	 * filled at SEND time.
++	 * Note that the 'gaps' object is read-only once the staging-allocator
++	 * returns. There might be connections receiving a queued message while
++	 * the sender still broadcasts the message to other receivers.
++	 */
 +
-+	if (copy_from_user(&m->msg, KDBUS_PTR(cmd_send->msg_address), size)) {
-+		ret = -EFAULT;
-+		goto exit_free;
++	if (n_memfds > 0 || n_fds > 0) {
++		staging->gaps = kdbus_gaps_new(n_memfds, n_fds);
++		if (IS_ERR(staging->gaps)) {
++			ret = PTR_ERR(staging->gaps);
++			staging->gaps = NULL;
++			kdbus_staging_free(staging);
++			return ERR_PTR(ret);
++		}
 +	}
 +
-+	if (m->msg.size != size) {
-+		ret = -EINVAL;
-+		goto exit_free;
-+	}
++	/*
++	 * kdbus_staging_new() already reserves parts for message setup. For
++	 * user-supplied messages, we add the following iovecs:
++	 *   ... variable number of iovecs for payload ...
++	 *   * final iovec for possible padding of payload
++	 *
++	 * Make sure to update @reserved_parts if you add more parts here.
++	 */
 +
-+	if (m->msg.flags & ~(KDBUS_MSG_EXPECT_REPLY |
-+			     KDBUS_MSG_NO_AUTO_START |
-+			     KDBUS_MSG_SIGNAL)) {
-+		ret = -EINVAL;
-+		goto exit_free;
++	ret = kdbus_staging_import(staging); /* payload */
++	kdbus_staging_reserve(staging); /* payload padding */
++
++	if (ret < 0)
++		goto error;
++
++	return staging;
++
++error:
++	kdbus_staging_free(staging);
++	return ERR_PTR(ret);
++}
++
++struct kdbus_staging *kdbus_staging_free(struct kdbus_staging *staging)
++{
++	if (!staging)
++		return NULL;
++
++	kdbus_meta_conn_unref(staging->meta_conn);
++	kdbus_meta_proc_unref(staging->meta_proc);
++	kdbus_gaps_unref(staging->gaps);
++	kfree(staging);
++
++	return NULL;
++}
++
++static int kdbus_staging_collect_metadata(struct kdbus_staging *staging,
++					  struct kdbus_conn *src,
++					  struct kdbus_conn *dst,
++					  u64 *out_attach)
++{
++	u64 attach;
++	int ret;
++
++	if (src)
++		attach = kdbus_meta_msg_mask(src, dst);
++	else
++		attach = KDBUS_ATTACH_TIMESTAMP; /* metadata for kernel msgs */
++
++	if (src && !src->meta_fake) {
++		ret = kdbus_meta_proc_collect(staging->meta_proc, attach);
++		if (ret < 0)
++			return ret;
 +	}
 +
-+	ret = kdbus_items_validate(m->msg.items,
-+				   KDBUS_ITEMS_SIZE(&m->msg, items));
++	ret = kdbus_meta_conn_collect(staging->meta_conn, src,
++				      staging->msg_seqnum, attach);
 +	if (ret < 0)
-+		goto exit_free;
++		return ret;
 +
-+	m->res = kdbus_msg_resources_new();
-+	if (IS_ERR(m->res)) {
-+		ret = PTR_ERR(m->res);
-+		m->res = NULL;
-+		goto exit_free;
++	*out_attach = attach;
++	return 0;
++}
++
++/**
++ * kdbus_staging_emit() - emit linearized message in target pool
++ * @staging:		staging object to create message from
++ * @src:		sender of the message (or NULL)
++ * @dst:		target connection to allocate message for
++ *
++ * This allocates a pool-slice for @dst and copies the message provided by
++ * @staging into it. The new slice is then returned to the caller for further
++ * processing. It's not linked into any queue, yet.
++ *
++ * Return: Newly allocated slice or ERR_PTR on failure.
++ */
++struct kdbus_pool_slice *kdbus_staging_emit(struct kdbus_staging *staging,
++					    struct kdbus_conn *src,
++					    struct kdbus_conn *dst)
++{
++	struct kdbus_item *item, *meta_items = NULL;
++	struct kdbus_pool_slice *slice = NULL;
++	size_t off, size, msg_size, meta_size;
++	struct iovec *v;
++	u64 attach;
++	int ret;
++
++	/*
++	 * Step 1:
++	 * Collect metadata from @src depending on the attach-flags allowed for
++	 * @dst. Translate it into the namespaces pinned by @dst.
++	 */
++
++	ret = kdbus_staging_collect_metadata(staging, src, dst, &attach);
++	if (ret < 0)
++		goto error;
++
++	ret = kdbus_meta_emit(staging->meta_proc, NULL, staging->meta_conn,
++			      dst, attach, &meta_items, &meta_size);
++	if (ret < 0)
++		goto error;
++
++	/*
++	 * Step 2:
++	 * Setup iovecs for the message. See kdbus_staging_new() for allocation
++	 * of those iovecs. All reserved iovecs have been initialized with
++	 * iov_len=0 + iov_base=zeros. Furthermore, the iovecs to copy the
++	 * actual message payload have already been initialized and need not be
++	 * touched.
++	 */
++
++	v = staging->parts;
++	msg_size = staging->msg->size;
++
++	/* msg.size */
++	v->iov_len = sizeof(msg_size);
++	v->iov_base = &msg_size;
++	++v;
++
++	/* msg (after msg.size) plus items */
++	v->iov_len = staging->msg->size - sizeof(staging->msg->size);
++	v->iov_base = (void __user *)((u8 *)staging->msg +
++				      sizeof(staging->msg->size));
++	++v;
++
++	/* padding after msg */
++	v->iov_len = KDBUS_ALIGN8(staging->msg->size) - staging->msg->size;
++	v->iov_base = (void __user *)zeros;
++	++v;
++
++	if (meta_size > 0) {
++		/* metadata items */
++		v->iov_len = meta_size;
++		v->iov_base = meta_items;
++		++v;
++
++		/* padding after metadata */
++		v->iov_len = KDBUS_ALIGN8(meta_size) - meta_size;
++		v->iov_base = (void __user *)zeros;
++		++v;
++
++		msg_size = KDBUS_ALIGN8(msg_size) + meta_size;
++	} else {
++		/* metadata items */
++		v->iov_len = 0;
++		v->iov_base = (void __user *)zeros;
++		++v;
++
++		/* padding after metadata */
++		v->iov_len = 0;
++		v->iov_base = (void __user *)zeros;
++		++v;
 +	}
 +
-+	/* do not accept kernel-generated messages */
-+	if (m->msg.payload_type == KDBUS_PAYLOAD_KERNEL) {
-+		ret = -EINVAL;
-+		goto exit_free;
++	/* ... payload iovecs are already filled in ... */
++
++	/* compute overall size and fill in padding after payload */
++	size = KDBUS_ALIGN8(msg_size);
++
++	if (staging->n_payload > 0) {
++		size += staging->n_payload;
++
++		v = &staging->parts[staging->n_parts - 1];
++		v->iov_len = KDBUS_ALIGN8(size) - size;
++		v->iov_base = (void __user *)zeros;
++
++		size = KDBUS_ALIGN8(size);
 +	}
 +
-+	if (m->msg.flags & KDBUS_MSG_EXPECT_REPLY) {
-+		/* requests for replies need timeout and cookie */
-+		if (m->msg.timeout_ns == 0 || m->msg.cookie == 0) {
-+			ret = -EINVAL;
-+			goto exit_free;
-+		}
++	/*
++	 * Step 3:
++	 * The PAYLOAD_OFF items in the message contain a relative 'offset'
++	 * field that tells the receiver where to find the actual payload. This
++	 * offset is relative to the start of the message, and as such depends
++	 * on the size of the metadata items we inserted. This size is variable
++	 * and changes for each peer we send the message to. Hence, we remember
++	 * the last relative offset that was used to calculate the 'offset'
++	 * fields. For each message, we re-calculate it and patch all items, in
++	 * case it changed.
++	 */
 +
-+		/* replies may not be expected for broadcasts */
-+		if (m->msg.dst_id == KDBUS_DST_ID_BROADCAST) {
-+			ret = -ENOTUNIQ;
-+			goto exit_free;
-+		}
++	off = KDBUS_ALIGN8(msg_size);
 +
-+		/* replies may not be expected for signals */
-+		if (m->msg.flags & KDBUS_MSG_SIGNAL) {
-+			ret = -EINVAL;
-+			goto exit_free;
-+		}
-+	} else {
-+		/*
-+		 * KDBUS_SEND_SYNC_REPLY is only valid together with
-+		 * KDBUS_MSG_EXPECT_REPLY
-+		 */
-+		if (cmd_send->flags & KDBUS_SEND_SYNC_REPLY) {
-+			ret = -EINVAL;
-+			goto exit_free;
-+		}
++	if (off != staging->i_payload) {
++		KDBUS_ITEMS_FOREACH(item, staging->msg->items,
++				    KDBUS_ITEMS_SIZE(staging->msg, items)) {
++			if (item->type != KDBUS_ITEM_PAYLOAD_OFF)
++				continue;
 +
-+		/* replies cannot be signals */
-+		if (m->msg.cookie_reply && (m->msg.flags & KDBUS_MSG_SIGNAL)) {
-+			ret = -EINVAL;
-+			goto exit_free;
++			item->vec.offset -= staging->i_payload;
++			item->vec.offset += off;
 +		}
++
++		staging->i_payload = off;
 +	}
 +
-+	ret = kdbus_msg_scan_items(m, conn->ep->bus);
++	/*
++	 * Step 4:
++	 * Allocate pool slice and copy over all data. Make sure to properly
++	 * account on user quota.
++	 */
++
++	ret = kdbus_conn_quota_inc(dst, src ? src->user : NULL, size,
++				   staging->gaps ? staging->gaps->n_fds : 0);
 +	if (ret < 0)
-+		goto exit_free;
++		goto error;
 +
-+	/* patch-in the source of this message */
-+	if (m->msg.src_id > 0 && m->msg.src_id != conn->id) {
-+		ret = -EINVAL;
-+		goto exit_free;
++	slice = kdbus_pool_slice_alloc(dst->pool, size, true);
++	if (IS_ERR(slice)) {
++		ret = PTR_ERR(slice);
++		slice = NULL;
++		goto error;
 +	}
-+	m->msg.src_id = conn->id;
-+
-+	return m;
 +
-+exit_free:
-+	kdbus_kmsg_free(m);
-+	return ERR_PTR(ret);
-+}
++	WARN_ON(kdbus_pool_slice_size(slice) != size);
 +
-+/**
-+ * kdbus_kmsg_collect_metadata() - collect metadata
-+ * @kmsg:	message to collect metadata on
-+ * @src:	source connection of message
-+ * @dst:	destination connection of message
-+ *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_kmsg_collect_metadata(struct kdbus_kmsg *kmsg, struct kdbus_conn *src,
-+				struct kdbus_conn *dst)
-+{
-+	u64 attach;
-+	int ret;
++	ret = kdbus_pool_slice_copy_iovec(slice, 0, staging->parts,
++					  staging->n_parts, size);
++	if (ret < 0)
++		goto error;
 +
-+	attach = kdbus_meta_calc_attach_flags(src, dst);
-+	if (!src->faked_meta) {
-+		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
-+		if (ret < 0)
-+			return ret;
-+	}
++	/* all done, return slice to caller */
++	goto exit;
 +
-+	return kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
++error:
++	if (slice)
++		kdbus_conn_quota_dec(dst, src ? src->user : NULL, size,
++				     staging->gaps ? staging->gaps->n_fds : 0);
++	kdbus_pool_slice_release(slice);
++	slice = ERR_PTR(ret);
++exit:
++	kfree(meta_items);
++	return slice;
 +}
 diff --git a/ipc/kdbus/message.h b/ipc/kdbus/message.h
 new file mode 100644
-index 0000000..cdaa65c
+index 0000000..298f9c9
 --- /dev/null
 +++ b/ipc/kdbus/message.h
-@@ -0,0 +1,135 @@
+@@ -0,0 +1,120 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -15623,131 +18073,116 @@ index 0000000..cdaa65c
 +#ifndef __KDBUS_MESSAGE_H
 +#define __KDBUS_MESSAGE_H
 +
-+#include "util.h"
-+#include "metadata.h"
-+
-+/**
-+ * enum kdbus_msg_data_type - Type of kdbus_msg_data payloads
-+ * @KDBUS_MSG_DATA_VEC:		Data vector provided by user-space
-+ * @KDBUS_MSG_DATA_MEMFD:	Memfd payload
-+ */
-+enum kdbus_msg_data_type {
-+	KDBUS_MSG_DATA_VEC,
-+	KDBUS_MSG_DATA_MEMFD,
-+};
-+
-+/**
-+ * struct kdbus_msg_data - Data payload as stored by messages
-+ * @type:	Type of payload (KDBUS_MSG_DATA_*)
-+ * @size:	Size of the described payload
-+ * @off:	The offset, relative to the vec slice
-+ * @start:	Offset inside the memfd
-+ * @file:	Backing file referenced by the memfd
-+ */
-+struct kdbus_msg_data {
-+	unsigned int type;
-+	u64 size;
++#include <linux/fs.h>
++#include <linux/kref.h>
++#include <uapi/linux/kdbus.h>
 +
-+	union {
-+		struct {
-+			u64 off;
-+		} vec;
-+		struct {
-+			u64 start;
-+			struct file *file;
-+		} memfd;
-+	};
-+};
++struct kdbus_bus;
++struct kdbus_conn;
++struct kdbus_meta_conn;
++struct kdbus_meta_proc;
++struct kdbus_pool_slice;
 +
 +/**
-+ * struct kdbus_kmsg_resources - resources of a message
++ * struct kdbus_gaps - gaps in message to be filled later
 + * @kref:		Reference counter
-+ * @dst_name:		Short-cut to msg for faster lookup
-+ * @fds:		Array of file descriptors to pass
-+ * @fds_count:		Number of file descriptors to pass
-+ * @data:		Array of data payloads
-+ * @vec_count:		Number of VEC entries
-+ * @memfd_count:	Number of MEMFD entries in @data
-+ * @data_count:		Sum of @vec_count + @memfd_count
-+ */
-+struct kdbus_msg_resources {
++ * @n_memfd_offs:	Number of memfds
++ * @memfd_offs:		Offsets of kdbus_memfd items in target slice
++ * @n_fds:		Number of fds
++ * @fds:		Array of sent fds
++ * @fds_offset:		Offset of fd-array in target slice
++ *
++ * The 'gaps' object is used to track data that is needed to fill gaps in a
++ * message at RECV time. Usually, we try to compile the whole message at SEND
++ * time. This has the advantage, that we don't have to cache any information and
++ * can keep the memory consumption small. Furthermore, all copy operations can
++ * be combined into a single function call, which speeds up transactions
++ * considerably.
++ * However, things like file-descriptors can only be fully installed at RECV
++ * time. The gaps object tracks this data and pins it until a message is
++ * received. The gaps object is shared between all receivers of the same
++ * message.
++ */
++struct kdbus_gaps {
 +	struct kref kref;
-+	const char *dst_name;
 +
-+	struct file **fds;
-+	unsigned int fds_count;
++	/* state tracking for KDBUS_ITEM_PAYLOAD_MEMFD entries */
++	size_t n_memfds;
++	u64 *memfd_offsets;
++	struct file **memfd_files;
 +
-+	struct kdbus_msg_data *data;
-+	size_t vec_count;
-+	size_t memfd_count;
-+	size_t data_count;
++	/* state tracking for KDBUS_ITEM_FDS */
++	size_t n_fds;
++	struct file **fd_files;
++	u64 fd_offset;
 +};
 +
-+struct kdbus_msg_resources *
-+kdbus_msg_resources_ref(struct kdbus_msg_resources *r);
-+struct kdbus_msg_resources *
-+kdbus_msg_resources_unref(struct kdbus_msg_resources *r);
++struct kdbus_gaps *kdbus_gaps_ref(struct kdbus_gaps *gaps);
++struct kdbus_gaps *kdbus_gaps_unref(struct kdbus_gaps *gaps);
++int kdbus_gaps_install(struct kdbus_gaps *gaps, struct kdbus_pool_slice *slice,
++		       bool *out_incomplete);
 +
 +/**
-+ * struct kdbus_kmsg - internal message handling data
-+ * @seq:		Domain-global message sequence number
-+ * @notify_type:	Short-cut for faster lookup
-+ * @notify_old_id:	Short-cut for faster lookup
-+ * @notify_new_id:	Short-cut for faster lookup
-+ * @notify_name:	Short-cut for faster lookup
-+ * @dst_name_id:	Short-cut to msg for faster lookup
-+ * @bloom_filter:	Bloom filter to match message properties
-+ * @bloom_generation:	Generation of bloom element set
-+ * @notify_entry:	List of kernel-generated notifications
-+ * @iov:		Array of iovec, describing the payload to copy
-+ * @iov_count:		Number of array members in @iov
-+ * @pool_size:		Overall size of inlined data referenced by @iov
-+ * @proc_meta:		Appended SCM-like metadata of the sending process
-+ * @conn_meta:		Appended SCM-like metadata of the sending connection
-+ * @res:		Message resources
-+ * @msg:		Message from or to userspace
-+ */
-+struct kdbus_kmsg {
-+	u64 seq;
-+	u64 notify_type;
-+	u64 notify_old_id;
-+	u64 notify_new_id;
-+	const char *notify_name;
-+
-+	u64 dst_name_id;
-+	const struct kdbus_bloom_filter *bloom_filter;
-+	u64 bloom_generation;
++ * struct kdbus_staging - staging area to import messages
++ * @msg:		User-supplied message
++ * @gaps:		Gaps-object created during import (or NULL if empty)
++ * @msg_seqnum:		Message sequence number
++ * @notify_entry:	Entry into list of kernel-generated notifications
++ * @i_payload:		Current relative index of start of payload
++ * @n_payload:		Total number of bytes needed for payload
++ * @n_parts:		Number of parts
++ * @parts:		Array of iovecs that make up the whole message
++ * @meta_proc:		Process metadata of the sender (or NULL if empty)
++ * @meta_conn:		Connection metadata of the sender (or NULL if empty)
++ * @bloom_filter:	Pointer to the bloom-item in @msg, or NULL
++ * @dst_name:		Pointer to the dst-name-item in @msg, or NULL
++ * @notify:		Pointer to the notification item in @msg, or NULL
++ *
++ * The kdbus_staging object is a temporary staging area to import user-supplied
++ * messages into the kernel. It is only used during SEND and dropped once the
++ * message is queued. Any data that cannot be collected during SEND, is
++ * collected in a kdbus_gaps object and attached to the message queue.
++ */
++struct kdbus_staging {
++	struct kdbus_msg *msg;
++	struct kdbus_gaps *gaps;
++	u64 msg_seqnum;
 +	struct list_head notify_entry;
 +
-+	struct iovec *iov;
-+	size_t iov_count;
-+	u64 pool_size;
++	/* crafted iovecs to copy the message */
++	size_t i_payload;
++	size_t n_payload;
++	size_t n_parts;
++	struct iovec *parts;
 +
-+	struct kdbus_meta_proc *proc_meta;
-+	struct kdbus_meta_conn *conn_meta;
-+	struct kdbus_msg_resources *res;
++	/* metadata state */
++	struct kdbus_meta_proc *meta_proc;
++	struct kdbus_meta_conn *meta_conn;
 +
-+	/* variable size, must be the last member */
-+	struct kdbus_msg msg;
++	/* cached pointers into @msg */
++	const struct kdbus_bloom_filter *bloom_filter;
++	const char *dst_name;
++	struct kdbus_item *notify;
 +};
 +
-+struct kdbus_bus;
-+struct kdbus_conn;
-+
-+struct kdbus_kmsg *kdbus_kmsg_new(struct kdbus_bus *bus, size_t extra_size);
-+struct kdbus_kmsg *kdbus_kmsg_new_from_cmd(struct kdbus_conn *conn,
-+					   struct kdbus_cmd_send *cmd_send);
-+void kdbus_kmsg_free(struct kdbus_kmsg *kmsg);
-+int kdbus_kmsg_collect_metadata(struct kdbus_kmsg *kmsg, struct kdbus_conn *src,
-+				struct kdbus_conn *dst);
++struct kdbus_staging *kdbus_staging_new_kernel(struct kdbus_bus *bus,
++					       u64 dst, u64 cookie_timeout,
++					       size_t it_size, size_t it_type);
++struct kdbus_staging *kdbus_staging_new_user(struct kdbus_bus *bus,
++					     struct kdbus_cmd_send *cmd,
++					     struct kdbus_msg *msg);
++struct kdbus_staging *kdbus_staging_free(struct kdbus_staging *staging);
++struct kdbus_pool_slice *kdbus_staging_emit(struct kdbus_staging *staging,
++					    struct kdbus_conn *src,
++					    struct kdbus_conn *dst);
 +
 +#endif
 diff --git a/ipc/kdbus/metadata.c b/ipc/kdbus/metadata.c
 new file mode 100644
-index 0000000..c36b9cc
+index 0000000..d4973a9
 --- /dev/null
 +++ b/ipc/kdbus/metadata.c
-@@ -0,0 +1,1184 @@
+@@ -0,0 +1,1342 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -15794,26 +18229,16 @@ index 0000000..c36b9cc
 + * @lock:		Object lock
 + * @collected:		Bitmask of collected items
 + * @valid:		Bitmask of collected and valid items
-+ * @uid:		UID of process
-+ * @euid:		EUID of process
-+ * @suid:		SUID of process
-+ * @fsuid:		FSUID of process
-+ * @gid:		GID of process
-+ * @egid:		EGID of process
-+ * @sgid:		SGID of process
-+ * @fsgid:		FSGID of process
++ * @cred:		Credentials
 + * @pid:		PID of process
 + * @tgid:		TGID of process
 + * @ppid:		PPID of process
-+ * @auxgrps:		Auxiliary groups
-+ * @n_auxgrps:		Number of items in @auxgrps
 + * @tid_comm:		TID comm line
 + * @pid_comm:		PID comm line
 + * @exe_path:		Executable path
 + * @root_path:		Root-FS path
 + * @cmdline:		Command-line
 + * @cgroup:		Full cgroup path
-+ * @cred:		Credentials
 + * @seclabel:		Seclabel
 + * @audit_loginuid:	Audit login-UID
 + * @audit_sessionid:	Audit session-ID
@@ -15825,18 +18250,15 @@ index 0000000..c36b9cc
 +	u64 valid;
 +
 +	/* KDBUS_ITEM_CREDS */
-+	kuid_t uid, euid, suid, fsuid;
-+	kgid_t gid, egid, sgid, fsgid;
++	/* KDBUS_ITEM_AUXGROUPS */
++	/* KDBUS_ITEM_CAPS */
++	const struct cred *cred;
 +
 +	/* KDBUS_ITEM_PIDS */
 +	struct pid *pid;
 +	struct pid *tgid;
 +	struct pid *ppid;
 +
-+	/* KDBUS_ITEM_AUXGROUPS */
-+	kgid_t *auxgrps;
-+	size_t n_auxgrps;
-+
 +	/* KDBUS_ITEM_TID_COMM */
 +	char tid_comm[TASK_COMM_LEN];
 +	/* KDBUS_ITEM_PID_COMM */
@@ -15852,9 +18274,6 @@ index 0000000..c36b9cc
 +	/* KDBUS_ITEM_CGROUP */
 +	char *cgroup;
 +
-+	/* KDBUS_ITEM_CAPS */
-+	const struct cred *cred;
-+
 +	/* KDBUS_ITEM_SECLABEL */
 +	char *seclabel;
 +
@@ -15932,7 +18351,6 @@ index 0000000..c36b9cc
 +	put_pid(mp->pid);
 +
 +	kfree(mp->seclabel);
-+	kfree(mp->auxgrps);
 +	kfree(mp->cmdline);
 +	kfree(mp->cgroup);
 +	kfree(mp);
@@ -15964,21 +18382,6 @@ index 0000000..c36b9cc
 +	return NULL;
 +}
 +
-+static void kdbus_meta_proc_collect_creds(struct kdbus_meta_proc *mp)
-+{
-+	mp->uid		= current_uid();
-+	mp->euid	= current_euid();
-+	mp->suid	= current_suid();
-+	mp->fsuid	= current_fsuid();
-+
-+	mp->gid		= current_gid();
-+	mp->egid	= current_egid();
-+	mp->sgid	= current_sgid();
-+	mp->fsgid	= current_fsgid();
-+
-+	mp->valid |= KDBUS_ATTACH_CREDS;
-+}
-+
 +static void kdbus_meta_proc_collect_pids(struct kdbus_meta_proc *mp)
 +{
 +	struct task_struct *parent;
@@ -15994,30 +18397,6 @@ index 0000000..c36b9cc
 +	mp->valid |= KDBUS_ATTACH_PIDS;
 +}
 +
-+static int kdbus_meta_proc_collect_auxgroups(struct kdbus_meta_proc *mp)
-+{
-+	const struct group_info *info;
-+	size_t i;
-+
-+	/* no need to lock/ref, current creds cannot change */
-+	info = current_cred()->group_info;
-+
-+	if (info->ngroups > 0) {
-+		mp->auxgrps = kmalloc_array(info->ngroups, sizeof(kgid_t),
-+					    GFP_KERNEL);
-+		if (!mp->auxgrps)
-+			return -ENOMEM;
-+
-+		for (i = 0; i < info->ngroups; i++)
-+			mp->auxgrps[i] = GROUP_AT(info, i);
-+	}
-+
-+	mp->n_auxgrps = info->ngroups;
-+	mp->valid |= KDBUS_ATTACH_AUXGROUPS;
-+
-+	return 0;
-+}
-+
 +static void kdbus_meta_proc_collect_tid_comm(struct kdbus_meta_proc *mp)
 +{
 +	get_task_comm(mp->tid_comm, current);
@@ -16090,12 +18469,6 @@ index 0000000..c36b9cc
 +	return 0;
 +}
 +
-+static void kdbus_meta_proc_collect_caps(struct kdbus_meta_proc *mp)
-+{
-+	mp->cred = get_current_cred();
-+	mp->valid |= KDBUS_ATTACH_CAPS;
-+}
-+
 +static int kdbus_meta_proc_collect_seclabel(struct kdbus_meta_proc *mp)
 +{
 +#ifdef CONFIG_SECURITY
@@ -16162,10 +18535,17 @@ index 0000000..c36b9cc
 +
 +	mutex_lock(&mp->lock);
 +
-+	if ((what & KDBUS_ATTACH_CREDS) &&
-+	    !(mp->collected & KDBUS_ATTACH_CREDS)) {
-+		kdbus_meta_proc_collect_creds(mp);
-+		mp->collected |= KDBUS_ATTACH_CREDS;
++	/* creds, auxgrps and caps share "struct cred" as context */
++	{
++		const u64 m_cred = KDBUS_ATTACH_CREDS |
++				   KDBUS_ATTACH_AUXGROUPS |
++				   KDBUS_ATTACH_CAPS;
++
++		if ((what & m_cred) && !(mp->collected & m_cred)) {
++			mp->cred = get_current_cred();
++			mp->valid |= m_cred;
++			mp->collected |= m_cred;
++		}
 +	}
 +
 +	if ((what & KDBUS_ATTACH_PIDS) &&
@@ -16174,14 +18554,6 @@ index 0000000..c36b9cc
 +		mp->collected |= KDBUS_ATTACH_PIDS;
 +	}
 +
-+	if ((what & KDBUS_ATTACH_AUXGROUPS) &&
-+	    !(mp->collected & KDBUS_ATTACH_AUXGROUPS)) {
-+		ret = kdbus_meta_proc_collect_auxgroups(mp);
-+		if (ret < 0)
-+			goto exit_unlock;
-+		mp->collected |= KDBUS_ATTACH_AUXGROUPS;
-+	}
-+
 +	if ((what & KDBUS_ATTACH_TID_COMM) &&
 +	    !(mp->collected & KDBUS_ATTACH_TID_COMM)) {
 +		kdbus_meta_proc_collect_tid_comm(mp);
@@ -16216,12 +18588,6 @@ index 0000000..c36b9cc
 +		mp->collected |= KDBUS_ATTACH_CGROUP;
 +	}
 +
-+	if ((what & KDBUS_ATTACH_CAPS) &&
-+	    !(mp->collected & KDBUS_ATTACH_CAPS)) {
-+		kdbus_meta_proc_collect_caps(mp);
-+		mp->collected |= KDBUS_ATTACH_CAPS;
-+	}
-+
 +	if ((what & KDBUS_ATTACH_SECLABEL) &&
 +	    !(mp->collected & KDBUS_ATTACH_SECLABEL)) {
 +		ret = kdbus_meta_proc_collect_seclabel(mp);
@@ -16244,101 +18610,116 @@ index 0000000..c36b9cc
 +}
 +
 +/**
-+ * kdbus_meta_proc_fake() - Fill process metadata from faked credentials
-+ * @mp:		Metadata
++ * kdbus_meta_fake_new() - Create fake metadata object
++ *
++ * Return: Pointer to new object on success, ERR_PTR on failure.
++ */
++struct kdbus_meta_fake *kdbus_meta_fake_new(void)
++{
++	struct kdbus_meta_fake *mf;
++
++	mf = kzalloc(sizeof(*mf), GFP_KERNEL);
++	if (!mf)
++		return ERR_PTR(-ENOMEM);
++
++	return mf;
++}
++
++/**
++ * kdbus_meta_fake_free() - Free fake metadata object
++ * @mf:		Fake metadata object
++ *
++ * Return: NULL
++ */
++struct kdbus_meta_fake *kdbus_meta_fake_free(struct kdbus_meta_fake *mf)
++{
++	if (mf) {
++		put_pid(mf->ppid);
++		put_pid(mf->tgid);
++		put_pid(mf->pid);
++		kfree(mf->seclabel);
++		kfree(mf);
++	}
++
++	return NULL;
++}
++
++/**
++ * kdbus_meta_fake_collect() - Fill fake metadata from faked credentials
++ * @mf:		Fake metadata object
 + * @creds:	Creds to set, may be %NULL
 + * @pids:	PIDs to set, may be %NULL
 + * @seclabel:	Seclabel to set, may be %NULL
 + *
 + * This function takes information stored in @creds, @pids and @seclabel and
-+ * resolves them to kernel-representations, if possible. A call to this function
-+ * is considered an alternative to calling kdbus_meta_add_current(), which
-+ * derives the same information from the 'current' task.
++ * resolves them to kernel-representations, if possible. This call uses the
++ * current task's namespaces to resolve the given information.
 + *
-+ * This call uses the current task's namespaces to resolve the given
-+ * information.
-+ *
-+ * Return: 0 on success, negative error number otherwise.
++ * Return: 0 on success, negative error code on failure.
 + */
-+int kdbus_meta_proc_fake(struct kdbus_meta_proc *mp,
-+			 const struct kdbus_creds *creds,
-+			 const struct kdbus_pids *pids,
-+			 const char *seclabel)
++int kdbus_meta_fake_collect(struct kdbus_meta_fake *mf,
++			    const struct kdbus_creds *creds,
++			    const struct kdbus_pids *pids,
++			    const char *seclabel)
 +{
-+	int ret;
-+
-+	if (!mp)
-+		return 0;
-+
-+	mutex_lock(&mp->lock);
++	if (mf->valid)
++		return -EALREADY;
 +
-+	if (creds && !(mp->collected & KDBUS_ATTACH_CREDS)) {
++	if (creds) {
 +		struct user_namespace *ns = current_user_ns();
 +
-+		mp->uid		= make_kuid(ns, creds->uid);
-+		mp->euid	= make_kuid(ns, creds->euid);
-+		mp->suid	= make_kuid(ns, creds->suid);
-+		mp->fsuid	= make_kuid(ns, creds->fsuid);
-+
-+		mp->gid		= make_kgid(ns, creds->gid);
-+		mp->egid	= make_kgid(ns, creds->egid);
-+		mp->sgid	= make_kgid(ns, creds->sgid);
-+		mp->fsgid	= make_kgid(ns, creds->fsgid);
-+
-+		if ((creds->uid   != (uid_t)-1 && !uid_valid(mp->uid))   ||
-+		    (creds->euid  != (uid_t)-1 && !uid_valid(mp->euid))  ||
-+		    (creds->suid  != (uid_t)-1 && !uid_valid(mp->suid))  ||
-+		    (creds->fsuid != (uid_t)-1 && !uid_valid(mp->fsuid)) ||
-+		    (creds->gid   != (gid_t)-1 && !gid_valid(mp->gid))   ||
-+		    (creds->egid  != (gid_t)-1 && !gid_valid(mp->egid))  ||
-+		    (creds->sgid  != (gid_t)-1 && !gid_valid(mp->sgid))  ||
-+		    (creds->fsgid != (gid_t)-1 && !gid_valid(mp->fsgid))) {
-+			ret = -EINVAL;
-+			goto exit_unlock;
-+		}
++		mf->uid		= make_kuid(ns, creds->uid);
++		mf->euid	= make_kuid(ns, creds->euid);
++		mf->suid	= make_kuid(ns, creds->suid);
++		mf->fsuid	= make_kuid(ns, creds->fsuid);
++
++		mf->gid		= make_kgid(ns, creds->gid);
++		mf->egid	= make_kgid(ns, creds->egid);
++		mf->sgid	= make_kgid(ns, creds->sgid);
++		mf->fsgid	= make_kgid(ns, creds->fsgid);
++
++		if ((creds->uid   != (uid_t)-1 && !uid_valid(mf->uid))   ||
++		    (creds->euid  != (uid_t)-1 && !uid_valid(mf->euid))  ||
++		    (creds->suid  != (uid_t)-1 && !uid_valid(mf->suid))  ||
++		    (creds->fsuid != (uid_t)-1 && !uid_valid(mf->fsuid)) ||
++		    (creds->gid   != (gid_t)-1 && !gid_valid(mf->gid))   ||
++		    (creds->egid  != (gid_t)-1 && !gid_valid(mf->egid))  ||
++		    (creds->sgid  != (gid_t)-1 && !gid_valid(mf->sgid))  ||
++		    (creds->fsgid != (gid_t)-1 && !gid_valid(mf->fsgid)))
++			return -EINVAL;
 +
-+		mp->valid |= KDBUS_ATTACH_CREDS;
-+		mp->collected |= KDBUS_ATTACH_CREDS;
++		mf->valid |= KDBUS_ATTACH_CREDS;
 +	}
 +
-+	if (pids && !(mp->collected & KDBUS_ATTACH_PIDS)) {
-+		mp->pid = get_pid(find_vpid(pids->tid));
-+		mp->tgid = get_pid(find_vpid(pids->pid));
-+		mp->ppid = get_pid(find_vpid(pids->ppid));
++	if (pids) {
++		mf->pid = get_pid(find_vpid(pids->tid));
++		mf->tgid = get_pid(find_vpid(pids->pid));
++		mf->ppid = get_pid(find_vpid(pids->ppid));
 +
-+		if ((pids->tid != 0 && !mp->pid) ||
-+		    (pids->pid != 0 && !mp->tgid) ||
-+		    (pids->ppid != 0 && !mp->ppid)) {
-+			put_pid(mp->pid);
-+			put_pid(mp->tgid);
-+			put_pid(mp->ppid);
-+			mp->pid = NULL;
-+			mp->tgid = NULL;
-+			mp->ppid = NULL;
-+			ret = -EINVAL;
-+			goto exit_unlock;
++		if ((pids->tid != 0 && !mf->pid) ||
++		    (pids->pid != 0 && !mf->tgid) ||
++		    (pids->ppid != 0 && !mf->ppid)) {
++			put_pid(mf->pid);
++			put_pid(mf->tgid);
++			put_pid(mf->ppid);
++			mf->pid = NULL;
++			mf->tgid = NULL;
++			mf->ppid = NULL;
++			return -EINVAL;
 +		}
 +
-+		mp->valid |= KDBUS_ATTACH_PIDS;
-+		mp->collected |= KDBUS_ATTACH_PIDS;
++		mf->valid |= KDBUS_ATTACH_PIDS;
 +	}
 +
-+	if (seclabel && !(mp->collected & KDBUS_ATTACH_SECLABEL)) {
-+		mp->seclabel = kstrdup(seclabel, GFP_KERNEL);
-+		if (!mp->seclabel) {
-+			ret = -ENOMEM;
-+			goto exit_unlock;
-+		}
++	if (seclabel) {
++		mf->seclabel = kstrdup(seclabel, GFP_KERNEL);
++		if (!mf->seclabel)
++			return -ENOMEM;
 +
-+		mp->valid |= KDBUS_ATTACH_SECLABEL;
-+		mp->collected |= KDBUS_ATTACH_SECLABEL;
++		mf->valid |= KDBUS_ATTACH_SECLABEL;
 +	}
 +
-+	ret = 0;
-+
-+exit_unlock:
-+	mutex_unlock(&mp->lock);
-+	return ret;
++	return 0;
 +}
 +
 +/**
@@ -16393,13 +18774,13 @@ index 0000000..c36b9cc
 +}
 +
 +static void kdbus_meta_conn_collect_timestamp(struct kdbus_meta_conn *mc,
-+					      struct kdbus_kmsg *kmsg)
++					      u64 msg_seqnum)
 +{
 +	mc->ts.monotonic_ns = ktime_get_ns();
 +	mc->ts.realtime_ns = ktime_get_real_ns();
 +
-+	if (kmsg)
-+		mc->ts.seqnum = kmsg->seq;
++	if (msg_seqnum)
++		mc->ts.seqnum = msg_seqnum;
 +
 +	mc->valid |= KDBUS_ATTACH_TIMESTAMP;
 +}
@@ -16414,14 +18795,16 @@ index 0000000..c36b9cc
 +	lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
 +
 +	size = 0;
++	/* open-code length calculation to avoid final padding */
 +	list_for_each_entry(e, &conn->names_list, conn_entry)
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_name) +
-+					strlen(e->name) + 1);
++		size = KDBUS_ALIGN8(size) + KDBUS_ITEM_HEADER_SIZE +
++			sizeof(struct kdbus_name) + strlen(e->name) + 1;
 +
 +	if (!size)
 +		return 0;
 +
-+	item = kmalloc(size, GFP_KERNEL);
++	/* make sure we include zeroed padding for convenience helpers */
++	item = kmalloc(KDBUS_ALIGN8(size), GFP_KERNEL);
 +	if (!item)
 +		return -ENOMEM;
 +
@@ -16438,7 +18821,8 @@ index 0000000..c36b9cc
 +	}
 +
 +	/* sanity check: the buffer should be completely written now */
-+	WARN_ON((u8 *)item != (u8 *)mc->owned_names_items + size);
++	WARN_ON((u8 *)item !=
++			(u8 *)mc->owned_names_items + KDBUS_ALIGN8(size));
 +
 +	mc->valid |= KDBUS_ATTACH_NAMES;
 +	return 0;
@@ -16461,184 +18845,63 @@ index 0000000..c36b9cc
 +/**
 + * kdbus_meta_conn_collect() - Collect connection metadata
 + * @mc:		Message metadata object
-+ * @kmsg:	Kmsg to collect data from
 + * @conn:	Connection to collect data from
++ * @msg_seqnum:	Sequence number of the message to send
 + * @what:	Attach flags to collect
 + *
-+ * This collects connection metadata from @kmsg and @conn and saves it in @mc.
++ * This collects connection metadata from @msg_seqnum and @conn and saves it
++ * in @mc.
 + *
 + * If KDBUS_ATTACH_NAMES is set in @what and @conn is non-NULL, the caller must
 + * hold the name-registry read-lock of conn->ep->bus->registry.
 + *
-+ * Return: 0 on success, negative error code on failure.
-+ */
-+int kdbus_meta_conn_collect(struct kdbus_meta_conn *mc,
-+			    struct kdbus_kmsg *kmsg,
-+			    struct kdbus_conn *conn,
-+			    u64 what)
-+{
-+	int ret;
-+
-+	if (!mc || !(what & (KDBUS_ATTACH_TIMESTAMP |
-+			     KDBUS_ATTACH_NAMES |
-+			     KDBUS_ATTACH_CONN_DESCRIPTION)))
-+		return 0;
-+
-+	mutex_lock(&mc->lock);
-+
-+	if (kmsg && (what & KDBUS_ATTACH_TIMESTAMP) &&
-+	    !(mc->collected & KDBUS_ATTACH_TIMESTAMP)) {
-+		kdbus_meta_conn_collect_timestamp(mc, kmsg);
-+		mc->collected |= KDBUS_ATTACH_TIMESTAMP;
-+	}
-+
-+	if (conn && (what & KDBUS_ATTACH_NAMES) &&
-+	    !(mc->collected & KDBUS_ATTACH_NAMES)) {
-+		ret = kdbus_meta_conn_collect_names(mc, conn);
-+		if (ret < 0)
-+			goto exit_unlock;
-+		mc->collected |= KDBUS_ATTACH_NAMES;
-+	}
-+
-+	if (conn && (what & KDBUS_ATTACH_CONN_DESCRIPTION) &&
-+	    !(mc->collected & KDBUS_ATTACH_CONN_DESCRIPTION)) {
-+		ret = kdbus_meta_conn_collect_description(mc, conn);
-+		if (ret < 0)
-+			goto exit_unlock;
-+		mc->collected |= KDBUS_ATTACH_CONN_DESCRIPTION;
-+	}
-+
-+	ret = 0;
-+
-+exit_unlock:
-+	mutex_unlock(&mc->lock);
-+	return ret;
-+}
-+
-+/*
-+ * kdbus_meta_export_prepare() - Prepare metadata for export
-+ * @mp:		Process metadata, or NULL
-+ * @mc:		Connection metadata, or NULL
-+ * @mask:	Pointer to mask of KDBUS_ATTACH_* flags to export
-+ * @sz:		Pointer to return the size needed by the metadata
-+ *
-+ * Does a conservative calculation of how much space metadata information
-+ * will take up during export. It is 'conservative' because for string
-+ * translations in namespaces, it will use the kernel namespaces, which is
-+ * the longest possible version.
-+ *
-+ * The actual size consumed by kdbus_meta_export() may hence vary from the
-+ * one reported here, but it is guaranteed never to be greater.
-+ *
-+ * Return: 0 on success, negative error number otherwise.
-+ */
-+int kdbus_meta_export_prepare(struct kdbus_meta_proc *mp,
-+			      struct kdbus_meta_conn *mc,
-+			      u64 *mask, size_t *sz)
-+{
-+	char *exe_pathname = NULL;
-+	void *exe_page = NULL;
-+	size_t size = 0;
-+	u64 valid = 0;
-+	int ret = 0;
-+
-+	if (mp) {
-+		mutex_lock(&mp->lock);
-+		valid |= mp->valid;
-+		mutex_unlock(&mp->lock);
-+	}
-+
-+	if (mc) {
-+		mutex_lock(&mc->lock);
-+		valid |= mc->valid;
-+		mutex_unlock(&mc->lock);
-+	}
-+
-+	*mask &= valid;
-+
-+	if (!*mask)
-+		goto exit;
-+
-+	/* process metadata */
-+
-+	if (mp && (*mask & KDBUS_ATTACH_CREDS))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_creds));
-+
-+	if (mp && (*mask & KDBUS_ATTACH_PIDS))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_pids));
-+
-+	if (mp && (*mask & KDBUS_ATTACH_AUXGROUPS))
-+		size += KDBUS_ITEM_SIZE(mp->n_auxgrps * sizeof(u64));
-+
-+	if (mp && (*mask & KDBUS_ATTACH_TID_COMM))
-+		size += KDBUS_ITEM_SIZE(strlen(mp->tid_comm) + 1);
-+
-+	if (mp && (*mask & KDBUS_ATTACH_PID_COMM))
-+		size += KDBUS_ITEM_SIZE(strlen(mp->pid_comm) + 1);
-+
-+	if (mp && (*mask & KDBUS_ATTACH_EXE)) {
-+		exe_page = (void *)__get_free_page(GFP_TEMPORARY);
-+		if (!exe_page) {
-+			ret = -ENOMEM;
-+			goto exit;
-+		}
-+
-+		exe_pathname = d_path(&mp->exe_path, exe_page, PAGE_SIZE);
-+		if (IS_ERR(exe_pathname)) {
-+			ret = PTR_ERR(exe_pathname);
-+			goto exit;
-+		}
-+
-+		size += KDBUS_ITEM_SIZE(strlen(exe_pathname) + 1);
-+		free_page((unsigned long)exe_page);
-+	}
-+
-+	if (mp && (*mask & KDBUS_ATTACH_CMDLINE))
-+		size += KDBUS_ITEM_SIZE(strlen(mp->cmdline) + 1);
-+
-+	if (mp && (*mask & KDBUS_ATTACH_CGROUP))
-+		size += KDBUS_ITEM_SIZE(strlen(mp->cgroup) + 1);
-+
-+	if (mp && (*mask & KDBUS_ATTACH_CAPS))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_meta_caps));
-+
-+	if (mp && (*mask & KDBUS_ATTACH_SECLABEL))
-+		size += KDBUS_ITEM_SIZE(strlen(mp->seclabel) + 1);
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_meta_conn_collect(struct kdbus_meta_conn *mc,
++			    struct kdbus_conn *conn,
++			    u64 msg_seqnum, u64 what)
++{
++	int ret;
 +
-+	if (mp && (*mask & KDBUS_ATTACH_AUDIT))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_audit));
++	if (!mc || !(what & (KDBUS_ATTACH_TIMESTAMP |
++			     KDBUS_ATTACH_NAMES |
++			     KDBUS_ATTACH_CONN_DESCRIPTION)))
++		return 0;
 +
-+	/* connection metadata */
++	mutex_lock(&mc->lock);
 +
-+	if (mc && (*mask & KDBUS_ATTACH_NAMES))
-+		size += mc->owned_names_size;
++	if (msg_seqnum && (what & KDBUS_ATTACH_TIMESTAMP) &&
++	    !(mc->collected & KDBUS_ATTACH_TIMESTAMP)) {
++		kdbus_meta_conn_collect_timestamp(mc, msg_seqnum);
++		mc->collected |= KDBUS_ATTACH_TIMESTAMP;
++	}
 +
-+	if (mc && (*mask & KDBUS_ATTACH_CONN_DESCRIPTION))
-+		size += KDBUS_ITEM_SIZE(strlen(mc->conn_description) + 1);
++	if (conn && (what & KDBUS_ATTACH_NAMES) &&
++	    !(mc->collected & KDBUS_ATTACH_NAMES)) {
++		ret = kdbus_meta_conn_collect_names(mc, conn);
++		if (ret < 0)
++			goto exit_unlock;
++		mc->collected |= KDBUS_ATTACH_NAMES;
++	}
 +
-+	if (mc && (*mask & KDBUS_ATTACH_TIMESTAMP))
-+		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_timestamp));
++	if (conn && (what & KDBUS_ATTACH_CONN_DESCRIPTION) &&
++	    !(mc->collected & KDBUS_ATTACH_CONN_DESCRIPTION)) {
++		ret = kdbus_meta_conn_collect_description(mc, conn);
++		if (ret < 0)
++			goto exit_unlock;
++		mc->collected |= KDBUS_ATTACH_CONN_DESCRIPTION;
++	}
 +
-+exit:
-+	*sz = size;
++	ret = 0;
 +
++exit_unlock:
++	mutex_unlock(&mc->lock);
 +	return ret;
 +}
 +
-+static int kdbus_meta_push_kvec(struct kvec *kvec,
-+				struct kdbus_item_header *hdr,
-+				u64 type, void *payload,
-+				size_t payload_size, u64 *size)
-+{
-+	hdr->type = type;
-+	hdr->size = KDBUS_ITEM_HEADER_SIZE + payload_size;
-+	kdbus_kvec_set(kvec++, hdr, sizeof(*hdr), size);
-+	kdbus_kvec_set(kvec++, payload, payload_size, size);
-+	return 2 + !!kdbus_kvec_pad(kvec++, size);
-+}
-+
 +static void kdbus_meta_export_caps(struct kdbus_meta_caps *out,
-+				   struct kdbus_meta_proc *mp)
++				   const struct kdbus_meta_proc *mp,
++				   struct user_namespace *user_ns)
 +{
 +	struct user_namespace *iter;
 +	const struct cred *cred = mp->cred;
@@ -16646,18 +18909,18 @@ index 0000000..c36b9cc
 +	int i;
 +
 +	/*
-+	 * This translates the effective capabilities of 'cred' into the current
-+	 * user-namespace. If the current user-namespace is a child-namespace of
++	 * This translates the effective capabilities of 'cred' into the given
++	 * user-namespace. If the given user-namespace is a child-namespace of
 +	 * the user-namespace of 'cred', the mask can be copied verbatim. If
 +	 * not, the mask is cleared.
 +	 * There's one exception: If 'cred' is the owner of any user-namespace
-+	 * in the path between the current user-namespace and the user-namespace
++	 * in the path between the given user-namespace and the user-namespace
 +	 * of 'cred', then it has all effective capabilities set. This means,
 +	 * the user who created a user-namespace always has all effective
 +	 * capabilities in any child namespaces. Note that this is based on the
 +	 * uid of the namespace creator, not the task hierarchy.
 +	 */
-+	for (iter = current_user_ns(); iter; iter = iter->parent) {
++	for (iter = user_ns; iter; iter = iter->parent) {
 +		if (iter == cred->user_ns) {
 +			parent = true;
 +			break;
@@ -16701,126 +18964,327 @@ index 0000000..c36b9cc
 +}
 +
 +/* This is equivalent to from_kuid_munged(), but maps INVALID_UID to itself */
-+static uid_t kdbus_from_kuid_keep(kuid_t uid)
++static uid_t kdbus_from_kuid_keep(struct user_namespace *ns, kuid_t uid)
 +{
-+	return uid_valid(uid) ?
-+		from_kuid_munged(current_user_ns(), uid) : ((uid_t)-1);
++	return uid_valid(uid) ? from_kuid_munged(ns, uid) : ((uid_t)-1);
 +}
 +
 +/* This is equivalent to from_kgid_munged(), but maps INVALID_GID to itself */
-+static gid_t kdbus_from_kgid_keep(kgid_t gid)
++static gid_t kdbus_from_kgid_keep(struct user_namespace *ns, kgid_t gid)
 +{
-+	return gid_valid(gid) ?
-+		from_kgid_munged(current_user_ns(), gid) : ((gid_t)-1);
++	return gid_valid(gid) ? from_kgid_munged(ns, gid) : ((gid_t)-1);
 +}
 +
-+/**
-+ * kdbus_meta_export() - export information from metadata into a slice
-+ * @mp:		Process metadata, or NULL
-+ * @mc:		Connection metadata, or NULL
-+ * @mask:	Mask of KDBUS_ATTACH_* flags to export
-+ * @slice:	The slice to export to
-+ * @offset:	The offset inside @slice to write to
-+ * @real_size:	The real size the metadata consumed
-+ *
-+ * This function exports information from metadata into @slice at offset
-+ * @offset inside that slice. Only information that is requested in @mask
-+ * and that has been collected before is exported.
-+ *
-+ * In order to make sure not to write out of bounds, @mask must be the same
-+ * value that was previously returned from kdbus_meta_export_prepare(). The
-+ * function will, however, not necessarily write as many bytes as returned by
-+ * kdbus_meta_export_prepare(); depending on the namespaces in question, it
-+ * might use up less than that.
-+ *
-+ * All information will be translated using the current namespaces.
-+ *
-+ * Return: 0 on success, negative error number otherwise.
-+ */
-+int kdbus_meta_export(struct kdbus_meta_proc *mp,
-+		      struct kdbus_meta_conn *mc,
-+		      u64 mask,
-+		      struct kdbus_pool_slice *slice,
-+		      off_t offset,
-+		      size_t *real_size)
-+{
-+	struct user_namespace *user_ns = current_user_ns();
-+	struct kdbus_item_header item_hdr[13], *hdr;
-+	char *exe_pathname = NULL;
-+	struct kdbus_creds creds;
-+	struct kdbus_pids pids;
-+	void *exe_page = NULL;
-+	struct kvec kvec[40];
-+	u64 *auxgrps = NULL;
-+	size_t cnt = 0;
-+	u64 size = 0;
-+	int ret = 0;
++struct kdbus_meta_staging {
++	const struct kdbus_meta_proc *mp;
++	const struct kdbus_meta_fake *mf;
++	const struct kdbus_meta_conn *mc;
++	const struct kdbus_conn *conn;
++	u64 mask;
 +
-+	hdr = &item_hdr[0];
++	void *exe;
++	const char *exe_path;
++};
 +
-+	if (mask == 0) {
-+		*real_size = 0;
-+		return 0;
-+	}
++static size_t kdbus_meta_measure(struct kdbus_meta_staging *staging)
++{
++	const struct kdbus_meta_proc *mp = staging->mp;
++	const struct kdbus_meta_fake *mf = staging->mf;
++	const struct kdbus_meta_conn *mc = staging->mc;
++	const u64 mask = staging->mask;
++	size_t size = 0;
++
++	/* process metadata */
++
++	if (mf && (mask & KDBUS_ATTACH_CREDS))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_creds));
++	else if (mp && (mask & KDBUS_ATTACH_CREDS))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_creds));
++
++	if (mf && (mask & KDBUS_ATTACH_PIDS))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_pids));
++	else if (mp && (mask & KDBUS_ATTACH_PIDS))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_pids));
++
++	if (mp && (mask & KDBUS_ATTACH_AUXGROUPS))
++		size += KDBUS_ITEM_SIZE(mp->cred->group_info->ngroups *
++					sizeof(u64));
++
++	if (mp && (mask & KDBUS_ATTACH_TID_COMM))
++		size += KDBUS_ITEM_SIZE(strlen(mp->tid_comm) + 1);
++
++	if (mp && (mask & KDBUS_ATTACH_PID_COMM))
++		size += KDBUS_ITEM_SIZE(strlen(mp->pid_comm) + 1);
++
++	if (staging->exe_path && (mask & KDBUS_ATTACH_EXE))
++		size += KDBUS_ITEM_SIZE(strlen(staging->exe_path) + 1);
++
++	if (mp && (mask & KDBUS_ATTACH_CMDLINE))
++		size += KDBUS_ITEM_SIZE(strlen(mp->cmdline) + 1);
++
++	if (mp && (mask & KDBUS_ATTACH_CGROUP))
++		size += KDBUS_ITEM_SIZE(strlen(mp->cgroup) + 1);
++
++	if (mp && (mask & KDBUS_ATTACH_CAPS))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_meta_caps));
++
++	if (mf && (mask & KDBUS_ATTACH_SECLABEL))
++		size += KDBUS_ITEM_SIZE(strlen(mf->seclabel) + 1);
++	else if (mp && (mask & KDBUS_ATTACH_SECLABEL))
++		size += KDBUS_ITEM_SIZE(strlen(mp->seclabel) + 1);
++
++	if (mp && (mask & KDBUS_ATTACH_AUDIT))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_audit));
++
++	/* connection metadata */
++
++	if (mc && (mask & KDBUS_ATTACH_NAMES))
++		size += KDBUS_ALIGN8(mc->owned_names_size);
++
++	if (mc && (mask & KDBUS_ATTACH_CONN_DESCRIPTION))
++		size += KDBUS_ITEM_SIZE(strlen(mc->conn_description) + 1);
++
++	if (mc && (mask & KDBUS_ATTACH_TIMESTAMP))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_timestamp));
++
++	return size;
++}
++
++static struct kdbus_item *kdbus_write_head(struct kdbus_item **iter,
++					   u64 type, u64 size)
++{
++	struct kdbus_item *item = *iter;
++	size_t padding;
++
++	item->type = type;
++	item->size = KDBUS_ITEM_HEADER_SIZE + size;
++
++	/* clear padding */
++	padding = KDBUS_ALIGN8(item->size) - item->size;
++	if (padding)
++		memset(item->data + size, 0, padding);
++
++	*iter = KDBUS_ITEM_NEXT(item);
++	return item;
++}
++
++static struct kdbus_item *kdbus_write_full(struct kdbus_item **iter,
++					   u64 type, u64 size, const void *data)
++{
++	struct kdbus_item *item;
++
++	item = kdbus_write_head(iter, type, size);
++	memcpy(item->data, data, size);
++	return item;
++}
++
++static size_t kdbus_meta_write(struct kdbus_meta_staging *staging, void *mem,
++			       size_t size)
++{
++	struct user_namespace *user_ns = staging->conn->cred->user_ns;
++	struct pid_namespace *pid_ns = ns_of_pid(staging->conn->pid);
++	struct kdbus_item *item = NULL, *items = mem;
++	u8 *end, *owned_names_end = NULL;
 +
 +	/* process metadata */
 +
-+	if (mp && (mask & KDBUS_ATTACH_CREDS)) {
-+		creds.uid	= kdbus_from_kuid_keep(mp->uid);
-+		creds.euid	= kdbus_from_kuid_keep(mp->euid);
-+		creds.suid	= kdbus_from_kuid_keep(mp->suid);
-+		creds.fsuid	= kdbus_from_kuid_keep(mp->fsuid);
-+		creds.gid	= kdbus_from_kgid_keep(mp->gid);
-+		creds.egid	= kdbus_from_kgid_keep(mp->egid);
-+		creds.sgid	= kdbus_from_kgid_keep(mp->sgid);
-+		creds.fsgid	= kdbus_from_kgid_keep(mp->fsgid);
++	if (staging->mf && (staging->mask & KDBUS_ATTACH_CREDS)) {
++		const struct kdbus_meta_fake *mf = staging->mf;
++
++		item = kdbus_write_head(&items, KDBUS_ITEM_CREDS,
++					sizeof(struct kdbus_creds));
++		item->creds = (struct kdbus_creds){
++			.uid	= kdbus_from_kuid_keep(user_ns, mf->uid),
++			.euid	= kdbus_from_kuid_keep(user_ns, mf->euid),
++			.suid	= kdbus_from_kuid_keep(user_ns, mf->suid),
++			.fsuid	= kdbus_from_kuid_keep(user_ns, mf->fsuid),
++			.gid	= kdbus_from_kgid_keep(user_ns, mf->gid),
++			.egid	= kdbus_from_kgid_keep(user_ns, mf->egid),
++			.sgid	= kdbus_from_kgid_keep(user_ns, mf->sgid),
++			.fsgid	= kdbus_from_kgid_keep(user_ns, mf->fsgid),
++		};
++	} else if (staging->mp && (staging->mask & KDBUS_ATTACH_CREDS)) {
++		const struct cred *c = staging->mp->cred;
++
++		item = kdbus_write_head(&items, KDBUS_ITEM_CREDS,
++					sizeof(struct kdbus_creds));
++		item->creds = (struct kdbus_creds){
++			.uid	= kdbus_from_kuid_keep(user_ns, c->uid),
++			.euid	= kdbus_from_kuid_keep(user_ns, c->euid),
++			.suid	= kdbus_from_kuid_keep(user_ns, c->suid),
++			.fsuid	= kdbus_from_kuid_keep(user_ns, c->fsuid),
++			.gid	= kdbus_from_kgid_keep(user_ns, c->gid),
++			.egid	= kdbus_from_kgid_keep(user_ns, c->egid),
++			.sgid	= kdbus_from_kgid_keep(user_ns, c->sgid),
++			.fsgid	= kdbus_from_kgid_keep(user_ns, c->fsgid),
++		};
++	}
++
++	if (staging->mf && (staging->mask & KDBUS_ATTACH_PIDS)) {
++		item = kdbus_write_head(&items, KDBUS_ITEM_PIDS,
++					sizeof(struct kdbus_pids));
++		item->pids = (struct kdbus_pids){
++			.pid = pid_nr_ns(staging->mf->tgid, pid_ns),
++			.tid = pid_nr_ns(staging->mf->pid, pid_ns),
++			.ppid = pid_nr_ns(staging->mf->ppid, pid_ns),
++		};
++	} else if (staging->mp && (staging->mask & KDBUS_ATTACH_PIDS)) {
++		item = kdbus_write_head(&items, KDBUS_ITEM_PIDS,
++					sizeof(struct kdbus_pids));
++		item->pids = (struct kdbus_pids){
++			.pid = pid_nr_ns(staging->mp->tgid, pid_ns),
++			.tid = pid_nr_ns(staging->mp->pid, pid_ns),
++			.ppid = pid_nr_ns(staging->mp->ppid, pid_ns),
++		};
++	}
 +
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++, KDBUS_ITEM_CREDS,
-+					    &creds, sizeof(creds), &size);
++	if (staging->mp && (staging->mask & KDBUS_ATTACH_AUXGROUPS)) {
++		const struct group_info *info = staging->mp->cred->group_info;
++		size_t i;
++
++		item = kdbus_write_head(&items, KDBUS_ITEM_AUXGROUPS,
++					info->ngroups * sizeof(u64));
++		for (i = 0; i < info->ngroups; ++i)
++			item->data64[i] = from_kgid_munged(user_ns,
++							   GROUP_AT(info, i));
++	}
++
++	if (staging->mp && (staging->mask & KDBUS_ATTACH_TID_COMM))
++		item = kdbus_write_full(&items, KDBUS_ITEM_TID_COMM,
++					strlen(staging->mp->tid_comm) + 1,
++					staging->mp->tid_comm);
++
++	if (staging->mp && (staging->mask & KDBUS_ATTACH_PID_COMM))
++		item = kdbus_write_full(&items, KDBUS_ITEM_PID_COMM,
++					strlen(staging->mp->pid_comm) + 1,
++					staging->mp->pid_comm);
++
++	if (staging->exe_path && (staging->mask & KDBUS_ATTACH_EXE))
++		item = kdbus_write_full(&items, KDBUS_ITEM_EXE,
++					strlen(staging->exe_path) + 1,
++					staging->exe_path);
++
++	if (staging->mp && (staging->mask & KDBUS_ATTACH_CMDLINE))
++		item = kdbus_write_full(&items, KDBUS_ITEM_CMDLINE,
++					strlen(staging->mp->cmdline) + 1,
++					staging->mp->cmdline);
++
++	if (staging->mp && (staging->mask & KDBUS_ATTACH_CGROUP))
++		item = kdbus_write_full(&items, KDBUS_ITEM_CGROUP,
++					strlen(staging->mp->cgroup) + 1,
++					staging->mp->cgroup);
++
++	if (staging->mp && (staging->mask & KDBUS_ATTACH_CAPS)) {
++		item = kdbus_write_head(&items, KDBUS_ITEM_CAPS,
++					sizeof(struct kdbus_meta_caps));
++		kdbus_meta_export_caps((void*)&item->caps, staging->mp,
++				       user_ns);
++	}
++
++	if (staging->mf && (staging->mask & KDBUS_ATTACH_SECLABEL))
++		item = kdbus_write_full(&items, KDBUS_ITEM_SECLABEL,
++					strlen(staging->mf->seclabel) + 1,
++					staging->mf->seclabel);
++	else if (staging->mp && (staging->mask & KDBUS_ATTACH_SECLABEL))
++		item = kdbus_write_full(&items, KDBUS_ITEM_SECLABEL,
++					strlen(staging->mp->seclabel) + 1,
++					staging->mp->seclabel);
++
++	if (staging->mp && (staging->mask & KDBUS_ATTACH_AUDIT)) {
++		item = kdbus_write_head(&items, KDBUS_ITEM_AUDIT,
++					sizeof(struct kdbus_audit));
++		item->audit = (struct kdbus_audit){
++			.loginuid = from_kuid(user_ns,
++					      staging->mp->audit_loginuid),
++			.sessionid = staging->mp->audit_sessionid,
++		};
 +	}
 +
-+	if (mp && (mask & KDBUS_ATTACH_PIDS)) {
-+		pids.pid = pid_vnr(mp->tgid);
-+		pids.tid = pid_vnr(mp->pid);
-+		pids.ppid = pid_vnr(mp->ppid);
++	/* connection metadata */
 +
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++, KDBUS_ITEM_PIDS,
-+					    &pids, sizeof(pids), &size);
++	if (staging->mc && (staging->mask & KDBUS_ATTACH_NAMES)) {
++		memcpy(items, staging->mc->owned_names_items,
++		       KDBUS_ALIGN8(staging->mc->owned_names_size));
++		owned_names_end = (u8 *)items + staging->mc->owned_names_size;
++		items = (void *)KDBUS_ALIGN8((unsigned long)owned_names_end);
 +	}
 +
-+	if (mp && (mask & KDBUS_ATTACH_AUXGROUPS)) {
-+		size_t payload_size = mp->n_auxgrps * sizeof(u64);
-+		int i;
++	if (staging->mc && (staging->mask & KDBUS_ATTACH_CONN_DESCRIPTION))
++		item = kdbus_write_full(&items, KDBUS_ITEM_CONN_DESCRIPTION,
++				strlen(staging->mc->conn_description) + 1,
++				staging->mc->conn_description);
 +
-+		auxgrps = kmalloc(payload_size, GFP_KERNEL);
-+		if (!auxgrps) {
-+			ret = -ENOMEM;
-+			goto exit;
-+		}
++	if (staging->mc && (staging->mask & KDBUS_ATTACH_TIMESTAMP))
++		item = kdbus_write_full(&items, KDBUS_ITEM_TIMESTAMP,
++					sizeof(staging->mc->ts),
++					&staging->mc->ts);
++
++	/*
++	 * Return real size (minus trailing padding). In case of 'owned_names'
++	 * we cannot deduce it from item->size, so treat it special.
++	 */
++
++	if (items == (void *)KDBUS_ALIGN8((unsigned long)owned_names_end))
++		end = owned_names_end;
++	else if (item)
++		end = (u8 *)item + item->size;
++	else
++		end = mem;
++
++	WARN_ON((u8 *)items - (u8 *)mem != size);
++	WARN_ON((void *)KDBUS_ALIGN8((unsigned long)end) != (void *)items);
++
++	return end - (u8 *)mem;
++}
++
++int kdbus_meta_emit(struct kdbus_meta_proc *mp,
++		    struct kdbus_meta_fake *mf,
++		    struct kdbus_meta_conn *mc,
++		    struct kdbus_conn *conn,
++		    u64 mask,
++		    struct kdbus_item **out_items,
++		    size_t *out_size)
++{
++	struct kdbus_meta_staging staging = {};
++	struct kdbus_item *items = NULL;
++	size_t size = 0;
++	int ret;
++
++	if (WARN_ON(mf && mp))
++		mp = NULL;
 +
-+		for (i = 0; i < mp->n_auxgrps; i++)
-+			auxgrps[i] = from_kgid_munged(user_ns, mp->auxgrps[i]);
++	staging.mp = mp;
++	staging.mf = mf;
++	staging.mc = mc;
++	staging.conn = conn;
 +
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+					    KDBUS_ITEM_AUXGROUPS,
-+					    auxgrps, payload_size, &size);
++	/* get mask of valid items */
++	if (mf)
++		staging.mask |= mf->valid;
++	if (mp) {
++		mutex_lock(&mp->lock);
++		staging.mask |= mp->valid;
++		mutex_unlock(&mp->lock);
++	}
++	if (mc) {
++		mutex_lock(&mc->lock);
++		staging.mask |= mc->valid;
++		mutex_unlock(&mc->lock);
 +	}
 +
-+	if (mp && (mask & KDBUS_ATTACH_TID_COMM))
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+					    KDBUS_ITEM_TID_COMM, mp->tid_comm,
-+					    strlen(mp->tid_comm) + 1, &size);
++	staging.mask &= mask;
 +
-+	if (mp && (mask & KDBUS_ATTACH_PID_COMM))
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+					    KDBUS_ITEM_PID_COMM, mp->pid_comm,
-+					    strlen(mp->pid_comm) + 1, &size);
++	if (!staging.mask) { /* bail out if nothing to do */
++		ret = 0;
++		goto exit;
++	}
 +
-+	if (mp && (mask & KDBUS_ATTACH_EXE)) {
++	/* EXE is special as it needs a temporary page to assemble */
++	if (mp && (staging.mask & KDBUS_ATTACH_EXE)) {
 +		struct path p;
 +
 +		/*
-+		 * TODO: We need access to __d_path() so we can write the path
++		 * XXX: We need access to __d_path() so we can write the path
 +		 * relative to conn->root_path. Once upstream, we need
 +		 * EXPORT_SYMBOL(__d_path) or an equivalent of d_path() that
 +		 * takes the root path directly. Until then, we drop this item
@@ -16828,116 +19292,245 @@ index 0000000..c36b9cc
 +		 */
 +
 +		get_fs_root(current->fs, &p);
-+		if (path_equal(&p, &mp->root_path)) {
-+			exe_page = (void *)__get_free_page(GFP_TEMPORARY);
-+			if (!exe_page) {
++		if (path_equal(&p, &conn->root_path)) {
++			staging.exe = (void *)__get_free_page(GFP_TEMPORARY);
++			if (!staging.exe) {
 +				path_put(&p);
 +				ret = -ENOMEM;
 +				goto exit;
 +			}
 +
-+			exe_pathname = d_path(&mp->exe_path, exe_page,
-+					      PAGE_SIZE);
-+			if (IS_ERR(exe_pathname)) {
++			staging.exe_path = d_path(&mp->exe_path, staging.exe,
++						  PAGE_SIZE);
++			if (IS_ERR(staging.exe_path)) {
 +				path_put(&p);
-+				ret = PTR_ERR(exe_pathname);
++				ret = PTR_ERR(staging.exe_path);
 +				goto exit;
 +			}
-+
-+			cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+						    KDBUS_ITEM_EXE,
-+						    exe_pathname,
-+						    strlen(exe_pathname) + 1,
-+						    &size);
 +		}
 +		path_put(&p);
 +	}
 +
-+	if (mp && (mask & KDBUS_ATTACH_CMDLINE))
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+					    KDBUS_ITEM_CMDLINE, mp->cmdline,
-+					    strlen(mp->cmdline) + 1, &size);
++	size = kdbus_meta_measure(&staging);
++	if (!size) { /* bail out if nothing to do */
++		ret = 0;
++		goto exit;
++	}
 +
-+	if (mp && (mask & KDBUS_ATTACH_CGROUP))
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+					    KDBUS_ITEM_CGROUP, mp->cgroup,
-+					    strlen(mp->cgroup) + 1, &size);
++	items = kmalloc(size, GFP_KERNEL);
++	if (!items) {
++		ret = -ENOMEM;
++		goto exit;
++	}
++
++	size = kdbus_meta_write(&staging, items, size);
++	if (!size) {
++		kfree(items);
++		items = NULL;
++	}
 +
-+	if (mp && (mask & KDBUS_ATTACH_CAPS)) {
-+		struct kdbus_meta_caps caps = {};
++	ret = 0;
 +
-+		kdbus_meta_export_caps(&caps, mp);
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+					    KDBUS_ITEM_CAPS, &caps,
-+					    sizeof(caps), &size);
++exit:
++	if (staging.exe)
++		free_page((unsigned long)staging.exe);
++	if (ret >= 0) {
++		*out_items = items;
++		*out_size = size;
 +	}
++	return ret;
++}
 +
-+	if (mp && (mask & KDBUS_ATTACH_SECLABEL))
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+					    KDBUS_ITEM_SECLABEL, mp->seclabel,
-+					    strlen(mp->seclabel) + 1, &size);
++enum {
++	KDBUS_META_PROC_NONE,
++	KDBUS_META_PROC_NORMAL,
++};
 +
-+	if (mp && (mask & KDBUS_ATTACH_AUDIT)) {
-+		struct kdbus_audit a = {
-+			.loginuid = from_kuid(user_ns, mp->audit_loginuid),
-+			.sessionid = mp->audit_sessionid,
-+		};
++/**
++ * kdbus_proc_permission() - check /proc permissions on target pid
++ * @pid_ns:		namespace we operate in
++ * @cred:		credentials of requestor
++ * @target:		target process
++ *
++ * This checks whether a process with credentials @cred can access information
++ * of @target in the namespace @pid_ns. This tries to follow /proc permissions,
++ * but is slightly more restrictive.
++ *
++ * Return: The /proc access level (KDBUS_META_PROC_*) is returned.
++ */
++static unsigned int kdbus_proc_permission(const struct pid_namespace *pid_ns,
++					  const struct cred *cred,
++					  struct pid *target)
++{
++	if (pid_ns->hide_pid < 1)
++		return KDBUS_META_PROC_NORMAL;
 +
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++, KDBUS_ITEM_AUDIT,
-+					    &a, sizeof(a), &size);
-+	}
++	/* XXX: we need groups_search() exported for aux-groups */
++	if (gid_eq(cred->egid, pid_ns->pid_gid))
++		return KDBUS_META_PROC_NORMAL;
 +
-+	/* connection metadata */
++	/*
++	 * XXX: If ptrace_may_access(PTRACE_MODE_READ) is granted, you can
++	 * overwrite hide_pid. However, ptrace_may_access() only supports
++	 * checking 'current', hence, we cannot use this here. But we
++	 * simply decide to not support this override, so no need to worry.
++	 */
 +
-+	if (mc && (mask & KDBUS_ATTACH_NAMES))
-+		kdbus_kvec_set(&kvec[cnt++], mc->owned_names_items,
-+			       mc->owned_names_size, &size);
++	return KDBUS_META_PROC_NONE;
++}
 +
-+	if (mc && (mask & KDBUS_ATTACH_CONN_DESCRIPTION))
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+					    KDBUS_ITEM_CONN_DESCRIPTION,
-+					    mc->conn_description,
-+					    strlen(mc->conn_description) + 1,
-+					    &size);
++/**
++ * kdbus_meta_proc_mask() - calculate which metadata would be visible to
++ *			    a connection via /proc
++ * @prv_pid:		pid of metadata provider
++ * @req_pid:		pid of metadata requestor
++ * @req_cred:		credentials of metadata reqeuestor
++ * @wanted:		metadata that is requested
++ *
++ * This checks which metadata items of @prv_pid can be read via /proc by the
++ * requestor @req_pid.
++ *
++ * Return: Set of metadata flags the requestor can see (limited by @wanted).
++ */
++static u64 kdbus_meta_proc_mask(struct pid *prv_pid,
++				struct pid *req_pid,
++				const struct cred *req_cred,
++				u64 wanted)
++{
++	struct pid_namespace *prv_ns, *req_ns;
++	unsigned int proc;
 +
-+	if (mc && (mask & KDBUS_ATTACH_TIMESTAMP))
-+		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+					    KDBUS_ITEM_TIMESTAMP, &mc->ts,
-+					    sizeof(mc->ts), &size);
++	prv_ns = ns_of_pid(prv_pid);
++	req_ns = ns_of_pid(req_pid);
++
++	/*
++	 * If the sender is not visible in the receiver namespace, then the
++	 * receiver cannot access the sender via its own procfs. Hence, we do
++	 * not attach any additional metadata.
++	 */
++	if (!pid_nr_ns(prv_pid, req_ns))
++		return 0;
 +
-+	ret = kdbus_pool_slice_copy_kvec(slice, offset, kvec, cnt, size);
-+	*real_size = size;
++	/*
++	 * If the pid-namespace of the receiver has hide_pid set, it cannot see
++	 * any process but its own. We shortcut this /proc permission check if
++	 * provider and requestor are the same. If not, we perform rather
++	 * expensive /proc permission checks.
++	 */
++	if (prv_pid == req_pid)
++		proc = KDBUS_META_PROC_NORMAL;
++	else
++		proc = kdbus_proc_permission(req_ns, req_cred, prv_pid);
++
++	/* you need /proc access to read standard process attributes */
++	if (proc < KDBUS_META_PROC_NORMAL)
++		wanted &= ~(KDBUS_ATTACH_TID_COMM |
++			    KDBUS_ATTACH_PID_COMM |
++			    KDBUS_ATTACH_SECLABEL |
++			    KDBUS_ATTACH_CMDLINE |
++			    KDBUS_ATTACH_CGROUP |
++			    KDBUS_ATTACH_AUDIT |
++			    KDBUS_ATTACH_CAPS |
++			    KDBUS_ATTACH_EXE);
++
++	/* clear all non-/proc flags */
++	return wanted & (KDBUS_ATTACH_TID_COMM |
++			 KDBUS_ATTACH_PID_COMM |
++			 KDBUS_ATTACH_SECLABEL |
++			 KDBUS_ATTACH_CMDLINE |
++			 KDBUS_ATTACH_CGROUP |
++			 KDBUS_ATTACH_AUDIT |
++			 KDBUS_ATTACH_CAPS |
++			 KDBUS_ATTACH_EXE);
++}
 +
-+exit:
-+	kfree(auxgrps);
++/**
++ * kdbus_meta_get_mask() - calculate attach flags mask for metadata request
++ * @prv_pid:		pid of metadata provider
++ * @prv_mask:		mask of metadata the provide grants unchecked
++ * @req_pid:		pid of metadata requestor
++ * @req_cred:		credentials of metadata requestor
++ * @req_mask:		mask of metadata that is requested
++ *
++ * This calculates the metadata items that the requestor @req_pid can access
++ * from the metadata provider @prv_pid. This permission check consists of
++ * several different parts:
++ *  - Providers can grant metadata items unchecked. Regardless of their type,
++ *    they're always granted to the requestor. This mask is passed as @prv_mask.
++ *  - Basic items (credentials and connection metadata) are granted implicitly
++ *    to everyone. They're publicly available to any bus-user that can see the
++ *    provider.
++ *  - Process credentials that are not granted implicitly follow the same
++ *    permission checks as /proc. This means, we always assume a requestor
++ *    process has access to their *own* /proc mount, if they have access to
++ *    kdbusfs.
++ *
++ * Return: Mask of metadata that is granted.
++ */
++static u64 kdbus_meta_get_mask(struct pid *prv_pid, u64 prv_mask,
++			       struct pid *req_pid,
++			       const struct cred *req_cred, u64 req_mask)
++{
++	u64 missing, impl_mask, proc_mask = 0;
 +
-+	if (exe_page)
-+		free_page((unsigned long)exe_page);
++	/*
++	 * Connection metadata and basic unix process credentials are
++	 * transmitted implicitly, and cannot be suppressed. Both are required
++	 * to perform user-space policies on the receiver-side. Furthermore,
++	 * connection metadata is public state, anyway, and unix credentials
++	 * are needed for UDS-compatibility. We extend them slightly by
++	 * auxiliary groups and additional uids/gids/pids.
++	 */
++	impl_mask = /* connection metadata */
++		    KDBUS_ATTACH_CONN_DESCRIPTION |
++		    KDBUS_ATTACH_TIMESTAMP |
++		    KDBUS_ATTACH_NAMES |
++		    /* credentials and pids */
++		    KDBUS_ATTACH_AUXGROUPS |
++		    KDBUS_ATTACH_CREDS |
++		    KDBUS_ATTACH_PIDS;
 +
-+	return ret;
++	/*
++	 * Calculate the set of metadata that is not granted implicitly nor by
++	 * the sender, but still requested by the receiver. If any are left,
++	 * perform rather expensive /proc access checks for them.
++	 */
++	missing = req_mask & ~((prv_mask | impl_mask) & req_mask);
++	if (missing)
++		proc_mask = kdbus_meta_proc_mask(prv_pid, req_pid, req_cred,
++						 missing);
++
++	return (prv_mask | impl_mask | proc_mask) & req_mask;
++}
++
++/**
++ */
++u64 kdbus_meta_info_mask(const struct kdbus_conn *conn, u64 mask)
++{
++	return kdbus_meta_get_mask(conn->pid,
++				   atomic64_read(&conn->attach_flags_send),
++				   task_pid(current),
++				   current_cred(),
++				   mask);
 +}
 +
 +/**
-+ * kdbus_meta_calc_attach_flags() - calculate attach flags for a sender
-+ *				    and a receiver
-+ * @sender:		Sending connection
-+ * @receiver:		Receiving connection
-+ *
-+ * Return: the attach flags both the sender and the receiver have opted-in
-+ * for.
 + */
-+u64 kdbus_meta_calc_attach_flags(const struct kdbus_conn *sender,
-+				 const struct kdbus_conn *receiver)
++u64 kdbus_meta_msg_mask(const struct kdbus_conn *snd,
++			const struct kdbus_conn *rcv)
 +{
-+	return atomic64_read(&sender->attach_flags_send) &
-+	       atomic64_read(&receiver->attach_flags_recv);
++	return kdbus_meta_get_mask(task_pid(current),
++				   atomic64_read(&snd->attach_flags_send),
++				   rcv->pid,
++				   rcv->cred,
++				   atomic64_read(&rcv->attach_flags_recv));
 +}
 diff --git a/ipc/kdbus/metadata.h b/ipc/kdbus/metadata.h
 new file mode 100644
-index 0000000..79b6ac3
+index 0000000..dba7cc7
 --- /dev/null
 +++ b/ipc/kdbus/metadata.h
-@@ -0,0 +1,55 @@
+@@ -0,0 +1,86 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -16958,44 +19551,75 @@ index 0000000..79b6ac3
 +#include <linux/kernel.h>
 +
 +struct kdbus_conn;
-+struct kdbus_kmsg;
 +struct kdbus_pool_slice;
 +
 +struct kdbus_meta_proc;
 +struct kdbus_meta_conn;
 +
++/**
++ * struct kdbus_meta_fake - Fake metadata
++ * @valid:		Bitmask of collected and valid items
++ * @uid:		UID of process
++ * @euid:		EUID of process
++ * @suid:		SUID of process
++ * @fsuid:		FSUID of process
++ * @gid:		GID of process
++ * @egid:		EGID of process
++ * @sgid:		SGID of process
++ * @fsgid:		FSGID of process
++ * @pid:		PID of process
++ * @tgid:		TGID of process
++ * @ppid:		PPID of process
++ * @seclabel:		Seclabel
++ */
++struct kdbus_meta_fake {
++	u64 valid;
++
++	/* KDBUS_ITEM_CREDS */
++	kuid_t uid, euid, suid, fsuid;
++	kgid_t gid, egid, sgid, fsgid;
++
++	/* KDBUS_ITEM_PIDS */
++	struct pid *pid, *tgid, *ppid;
++
++	/* KDBUS_ITEM_SECLABEL */
++	char *seclabel;
++};
++
 +struct kdbus_meta_proc *kdbus_meta_proc_new(void);
 +struct kdbus_meta_proc *kdbus_meta_proc_ref(struct kdbus_meta_proc *mp);
 +struct kdbus_meta_proc *kdbus_meta_proc_unref(struct kdbus_meta_proc *mp);
 +int kdbus_meta_proc_collect(struct kdbus_meta_proc *mp, u64 what);
-+int kdbus_meta_proc_fake(struct kdbus_meta_proc *mp,
-+			 const struct kdbus_creds *creds,
-+			 const struct kdbus_pids *pids,
-+			 const char *seclabel);
++
++struct kdbus_meta_fake *kdbus_meta_fake_new(void);
++struct kdbus_meta_fake *kdbus_meta_fake_free(struct kdbus_meta_fake *mf);
++int kdbus_meta_fake_collect(struct kdbus_meta_fake *mf,
++			    const struct kdbus_creds *creds,
++			    const struct kdbus_pids *pids,
++			    const char *seclabel);
 +
 +struct kdbus_meta_conn *kdbus_meta_conn_new(void);
 +struct kdbus_meta_conn *kdbus_meta_conn_ref(struct kdbus_meta_conn *mc);
 +struct kdbus_meta_conn *kdbus_meta_conn_unref(struct kdbus_meta_conn *mc);
 +int kdbus_meta_conn_collect(struct kdbus_meta_conn *mc,
-+			    struct kdbus_kmsg *kmsg,
 +			    struct kdbus_conn *conn,
-+			    u64 what);
-+
-+int kdbus_meta_export_prepare(struct kdbus_meta_proc *mp,
-+			      struct kdbus_meta_conn *mc,
-+			      u64 *mask, size_t *sz);
-+int kdbus_meta_export(struct kdbus_meta_proc *mp,
-+		      struct kdbus_meta_conn *mc,
-+		      u64 mask,
-+		      struct kdbus_pool_slice *slice,
-+		      off_t offset, size_t *real_size);
-+u64 kdbus_meta_calc_attach_flags(const struct kdbus_conn *sender,
-+				 const struct kdbus_conn *receiver);
++			    u64 msg_seqnum, u64 what);
++
++int kdbus_meta_emit(struct kdbus_meta_proc *mp,
++		    struct kdbus_meta_fake *mf,
++		    struct kdbus_meta_conn *mc,
++		    struct kdbus_conn *conn,
++		    u64 mask,
++		    struct kdbus_item **out_items,
++		    size_t *out_size);
++u64 kdbus_meta_info_mask(const struct kdbus_conn *conn, u64 mask);
++u64 kdbus_meta_msg_mask(const struct kdbus_conn *snd,
++			const struct kdbus_conn *rcv);
 +
 +#endif
 diff --git a/ipc/kdbus/names.c b/ipc/kdbus/names.c
 new file mode 100644
-index 0000000..d77ee08
+index 0000000..057f806
 --- /dev/null
 +++ b/ipc/kdbus/names.c
 @@ -0,0 +1,770 @@
@@ -17445,7 +20069,7 @@ index 0000000..d77ee08
 +
 +	down_write(&reg->rwlock);
 +
-+	if (kdbus_conn_is_activator(conn)) {
++	if (conn->activator_of) {
 +		activator = conn->activator_of->activator;
 +		conn->activator_of->activator = NULL;
 +	}
@@ -17851,7 +20475,7 @@ index 0000000..3dd2589
 +#endif
 diff --git a/ipc/kdbus/node.c b/ipc/kdbus/node.c
 new file mode 100644
-index 0000000..0d65c65
+index 0000000..89f58bc
 --- /dev/null
 +++ b/ipc/kdbus/node.c
 @@ -0,0 +1,897 @@
@@ -17977,7 +20601,7 @@ index 0000000..0d65c65
 + * new active references can be acquired.
 + * Once all active references are dropped, the node is considered 'drained'. Now
 + * kdbus_node_deactivate() is called on each child of the node before we
-+ * continue deactvating our node. That is, once all children are entirely
++ * continue deactivating our node. That is, once all children are entirely
 + * deactivated, we call ->release_cb() of our node. ->release_cb() can release
 + * any resources on that node which are bound to the "active" state of a node.
 + * When done, we unlink the node from its parent rb-tree, mark it as
@@ -18494,7 +21118,7 @@ index 0000000..0d65c65
 +			kdbus_fs_flush(pos);
 +
 +			/*
-+			 * If the node was activated and somone subtracted BIAS
++			 * If the node was activated and someone subtracted BIAS
 +			 * from it to deactivate it, we, and only us, are
 +			 * responsible to release the extra ref-count that was
 +			 * taken once in kdbus_node_activate().
@@ -18846,10 +21470,10 @@ index 0000000..970e02b
 +#endif
 diff --git a/ipc/kdbus/notify.c b/ipc/kdbus/notify.c
 new file mode 100644
-index 0000000..e4a4542
+index 0000000..375758c
 --- /dev/null
 +++ b/ipc/kdbus/notify.c
-@@ -0,0 +1,248 @@
+@@ -0,0 +1,204 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -18880,40 +21504,24 @@ index 0000000..e4a4542
 +#include "message.h"
 +#include "notify.h"
 +
-+static inline void kdbus_notify_add_tail(struct kdbus_kmsg *kmsg,
++static inline void kdbus_notify_add_tail(struct kdbus_staging *staging,
 +					 struct kdbus_bus *bus)
 +{
 +	spin_lock(&bus->notify_lock);
-+	list_add_tail(&kmsg->notify_entry, &bus->notify_list);
++	list_add_tail(&staging->notify_entry, &bus->notify_list);
 +	spin_unlock(&bus->notify_lock);
 +}
 +
 +static int kdbus_notify_reply(struct kdbus_bus *bus, u64 id,
 +			      u64 cookie, u64 msg_type)
 +{
-+	struct kdbus_kmsg *kmsg = NULL;
-+
-+	WARN_ON(id == 0);
-+
-+	kmsg = kdbus_kmsg_new(bus, 0);
-+	if (IS_ERR(kmsg))
-+		return PTR_ERR(kmsg);
++	struct kdbus_staging *s;
 +
-+	/*
-+	 * a kernel-generated notification can only contain one
-+	 * struct kdbus_item, so make a shortcut here for
-+	 * faster lookup in the match db.
-+	 */
-+	kmsg->notify_type = msg_type;
-+	kmsg->msg.flags = KDBUS_MSG_SIGNAL;
-+	kmsg->msg.dst_id = id;
-+	kmsg->msg.src_id = KDBUS_SRC_ID_KERNEL;
-+	kmsg->msg.payload_type = KDBUS_PAYLOAD_KERNEL;
-+	kmsg->msg.cookie_reply = cookie;
-+	kmsg->msg.items[0].type = msg_type;
-+
-+	kdbus_notify_add_tail(kmsg, bus);
++	s = kdbus_staging_new_kernel(bus, id, cookie, 0, msg_type);
++	if (IS_ERR(s))
++		return PTR_ERR(s);
 +
++	kdbus_notify_add_tail(s, bus);
 +	return 0;
 +}
 +
@@ -18967,78 +21575,52 @@ index 0000000..e4a4542
 +			     u64 old_flags, u64 new_flags,
 +			     const char *name)
 +{
-+	struct kdbus_kmsg *kmsg = NULL;
 +	size_t name_len, extra_size;
++	struct kdbus_staging *s;
 +
 +	name_len = strlen(name) + 1;
 +	extra_size = sizeof(struct kdbus_notify_name_change) + name_len;
-+	kmsg = kdbus_kmsg_new(bus, extra_size);
-+	if (IS_ERR(kmsg))
-+		return PTR_ERR(kmsg);
-+
-+	kmsg->msg.flags = KDBUS_MSG_SIGNAL;
-+	kmsg->msg.dst_id = KDBUS_DST_ID_BROADCAST;
-+	kmsg->msg.src_id = KDBUS_SRC_ID_KERNEL;
-+	kmsg->msg.payload_type = KDBUS_PAYLOAD_KERNEL;
-+	kmsg->notify_type = type;
-+	kmsg->notify_old_id = old_id;
-+	kmsg->notify_new_id = new_id;
-+	kmsg->msg.items[0].type = type;
-+	kmsg->msg.items[0].name_change.old_id.id = old_id;
-+	kmsg->msg.items[0].name_change.old_id.flags = old_flags;
-+	kmsg->msg.items[0].name_change.new_id.id = new_id;
-+	kmsg->msg.items[0].name_change.new_id.flags = new_flags;
-+	memcpy(kmsg->msg.items[0].name_change.name, name, name_len);
-+	kmsg->notify_name = kmsg->msg.items[0].name_change.name;
-+
-+	kdbus_notify_add_tail(kmsg, bus);
 +
++	s = kdbus_staging_new_kernel(bus, KDBUS_DST_ID_BROADCAST, 0,
++				     extra_size, type);
++	if (IS_ERR(s))
++		return PTR_ERR(s);
++
++	s->notify->name_change.old_id.id = old_id;
++	s->notify->name_change.old_id.flags = old_flags;
++	s->notify->name_change.new_id.id = new_id;
++	s->notify->name_change.new_id.flags = new_flags;
++	memcpy(s->notify->name_change.name, name, name_len);
++
++	kdbus_notify_add_tail(s, bus);
 +	return 0;
 +}
 +
 +/**
-+ * kdbus_notify_id_change() - queue a notification about a unique ID change
-+ * @bus:		Bus which queues the messages
-+ * @type:		The type if the notification; KDBUS_ITEM_ID_ADD or
-+ *			KDBUS_ITEM_ID_REMOVE
-+ * @id:			The id of the connection that was added or removed
-+ * @flags:		The flags to pass in the KDBUS_ITEM flags field
-+ *
-+ * Return: 0 on success, negative errno on failure.
-+ */
-+int kdbus_notify_id_change(struct kdbus_bus *bus, u64 type, u64 id, u64 flags)
-+{
-+	struct kdbus_kmsg *kmsg = NULL;
-+
-+	kmsg = kdbus_kmsg_new(bus, sizeof(struct kdbus_notify_id_change));
-+	if (IS_ERR(kmsg))
-+		return PTR_ERR(kmsg);
-+
-+	kmsg->msg.flags = KDBUS_MSG_SIGNAL;
-+	kmsg->msg.dst_id = KDBUS_DST_ID_BROADCAST;
-+	kmsg->msg.src_id = KDBUS_SRC_ID_KERNEL;
-+	kmsg->msg.payload_type = KDBUS_PAYLOAD_KERNEL;
-+	kmsg->notify_type = type;
-+
-+	switch (type) {
-+	case KDBUS_ITEM_ID_ADD:
-+		kmsg->notify_new_id = id;
-+		break;
-+
-+	case KDBUS_ITEM_ID_REMOVE:
-+		kmsg->notify_old_id = id;
-+		break;
-+
-+	default:
-+		BUG();
-+	}
++ * kdbus_notify_id_change() - queue a notification about a unique ID change
++ * @bus:		Bus which queues the messages
++ * @type:		The type if the notification; KDBUS_ITEM_ID_ADD or
++ *			KDBUS_ITEM_ID_REMOVE
++ * @id:			The id of the connection that was added or removed
++ * @flags:		The flags to pass in the KDBUS_ITEM flags field
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++int kdbus_notify_id_change(struct kdbus_bus *bus, u64 type, u64 id, u64 flags)
++{
++	struct kdbus_staging *s;
++	size_t extra_size;
 +
-+	kmsg->msg.items[0].type = type;
-+	kmsg->msg.items[0].id_change.id = id;
-+	kmsg->msg.items[0].id_change.flags = flags;
++	extra_size = sizeof(struct kdbus_notify_id_change);
++	s = kdbus_staging_new_kernel(bus, KDBUS_DST_ID_BROADCAST, 0,
++				     extra_size, type);
++	if (IS_ERR(s))
++		return PTR_ERR(s);
 +
-+	kdbus_notify_add_tail(kmsg, bus);
++	s->notify->id_change.id = id;
++	s->notify->id_change.flags = flags;
 +
++	kdbus_notify_add_tail(s, bus);
 +	return 0;
 +}
 +
@@ -19051,7 +21633,7 @@ index 0000000..e4a4542
 +void kdbus_notify_flush(struct kdbus_bus *bus)
 +{
 +	LIST_HEAD(notify_list);
-+	struct kdbus_kmsg *kmsg, *tmp;
++	struct kdbus_staging *s, *tmp;
 +
 +	mutex_lock(&bus->notify_flush_lock);
 +	down_read(&bus->name_registry->rwlock);
@@ -19060,25 +21642,23 @@ index 0000000..e4a4542
 +	list_splice_init(&bus->notify_list, &notify_list);
 +	spin_unlock(&bus->notify_lock);
 +
-+	list_for_each_entry_safe(kmsg, tmp, &notify_list, notify_entry) {
-+		kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, NULL,
-+					KDBUS_ATTACH_TIMESTAMP);
-+
-+		if (kmsg->msg.dst_id != KDBUS_DST_ID_BROADCAST) {
++	list_for_each_entry_safe(s, tmp, &notify_list, notify_entry) {
++		if (s->msg->dst_id != KDBUS_DST_ID_BROADCAST) {
 +			struct kdbus_conn *conn;
 +
-+			conn = kdbus_bus_find_conn_by_id(bus, kmsg->msg.dst_id);
++			conn = kdbus_bus_find_conn_by_id(bus, s->msg->dst_id);
 +			if (conn) {
-+				kdbus_bus_eavesdrop(bus, NULL, kmsg);
-+				kdbus_conn_entry_insert(NULL, conn, kmsg, NULL);
++				kdbus_bus_eavesdrop(bus, NULL, s);
++				kdbus_conn_entry_insert(NULL, conn, s, NULL,
++							NULL);
 +				kdbus_conn_unref(conn);
 +			}
 +		} else {
-+			kdbus_bus_broadcast(bus, NULL, kmsg);
++			kdbus_bus_broadcast(bus, NULL, s);
 +		}
 +
-+		list_del(&kmsg->notify_entry);
-+		kdbus_kmsg_free(kmsg);
++		list_del(&s->notify_entry);
++		kdbus_staging_free(s);
 +	}
 +
 +	up_read(&bus->name_registry->rwlock);
@@ -19091,11 +21671,11 @@ index 0000000..e4a4542
 + */
 +void kdbus_notify_free(struct kdbus_bus *bus)
 +{
-+	struct kdbus_kmsg *kmsg, *tmp;
++	struct kdbus_staging *s, *tmp;
 +
-+	list_for_each_entry_safe(kmsg, tmp, &bus->notify_list, notify_entry) {
-+		list_del(&kmsg->notify_entry);
-+		kdbus_kmsg_free(kmsg);
++	list_for_each_entry_safe(s, tmp, &bus->notify_list, notify_entry) {
++		list_del(&s->notify_entry);
++		kdbus_staging_free(s);
 +	}
 +}
 diff --git a/ipc/kdbus/notify.h b/ipc/kdbus/notify.h
@@ -19136,7 +21716,7 @@ index 0000000..03df464
 +#endif
 diff --git a/ipc/kdbus/policy.c b/ipc/kdbus/policy.c
 new file mode 100644
-index 0000000..dd7fffa
+index 0000000..f2618e15
 --- /dev/null
 +++ b/ipc/kdbus/policy.c
 @@ -0,0 +1,489 @@
@@ -19486,7 +22066,7 @@ index 0000000..dd7fffa
 + * In order to allow atomic replacement of rules, the function first removes
 + * all entries that have been created for the given owner previously.
 + *
-+ * Callers to this function must make sur that the owner is a custom
++ * Callers to this function must make sure that the owner is a custom
 + * endpoint, or if the endpoint is a default endpoint, then it must be
 + * either a policy holder or an activator.
 + *
@@ -19688,7 +22268,7 @@ index 0000000..15dd7bc
 +#endif
 diff --git a/ipc/kdbus/pool.c b/ipc/kdbus/pool.c
 new file mode 100644
-index 0000000..45dcdea
+index 0000000..63ccd55
 --- /dev/null
 +++ b/ipc/kdbus/pool.c
 @@ -0,0 +1,728 @@
@@ -19738,7 +22318,7 @@ index 0000000..45dcdea
 + * The receiver's buffer, managed as a pool of allocated and free
 + * slices containing the queued messages.
 + *
-+ * Messages sent with KDBUS_CMD_SEND are copied direcly by the
++ * Messages sent with KDBUS_CMD_SEND are copied directly by the
 + * sending process into the receiver's pool.
 + *
 + * Messages received with KDBUS_CMD_RECV just return the offset
@@ -20474,10 +23054,10 @@ index 0000000..a903821
 +#endif
 diff --git a/ipc/kdbus/queue.c b/ipc/kdbus/queue.c
 new file mode 100644
-index 0000000..25bb3ad
+index 0000000..f9c44d7
 --- /dev/null
 +++ b/ipc/kdbus/queue.c
-@@ -0,0 +1,678 @@
+@@ -0,0 +1,363 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -20651,242 +23231,43 @@ index 0000000..25bb3ad
 +
 +/**
 + * kdbus_queue_entry_new() - allocate a queue entry
-+ * @conn_dst:	destination connection
-+ * @kmsg:	kmsg object the queue entry should track
-+ * @user:	user to account message on (or NULL for kernel messages)
++ * @src:	source connection, or NULL
++ * @dst:	destination connection
++ * @s:		staging object carrying the message
 + *
-+ * Allocates a queue entry based on a given kmsg and allocate space for
++ * Allocates a queue entry based on a given msg and allocate space for
 + * the message payload and the requested metadata in the connection's pool.
 + * The entry is not actually added to the queue's lists at this point.
 + *
 + * Return: the allocated entry on success, or an ERR_PTR on failures.
 + */
-+struct kdbus_queue_entry *kdbus_queue_entry_new(struct kdbus_conn *conn_dst,
-+						const struct kdbus_kmsg *kmsg,
-+						struct kdbus_user *user)
++struct kdbus_queue_entry *kdbus_queue_entry_new(struct kdbus_conn *src,
++						struct kdbus_conn *dst,
++						struct kdbus_staging *s)
 +{
-+	struct kdbus_msg_resources *res = kmsg->res;
-+	const struct kdbus_msg *msg = &kmsg->msg;
 +	struct kdbus_queue_entry *entry;
-+	size_t memfd_cnt = 0;
-+	struct kvec kvec[2];
-+	size_t meta_size;
-+	size_t msg_size;
-+	u64 payload_off;
-+	u64 size = 0;
-+	int ret = 0;
++	int ret;
 +
 +	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 +	if (!entry)
 +		return ERR_PTR(-ENOMEM);
 +
 +	INIT_LIST_HEAD(&entry->entry);
-+	entry->priority = msg->priority;
-+	entry->dst_name_id = kmsg->dst_name_id;
-+	entry->msg_res = kdbus_msg_resources_ref(res);
-+	entry->proc_meta = kdbus_meta_proc_ref(kmsg->proc_meta);
-+	entry->conn_meta = kdbus_meta_conn_ref(kmsg->conn_meta);
-+	entry->conn = kdbus_conn_ref(conn_dst);
-+
-+	if (kmsg->msg.src_id == KDBUS_SRC_ID_KERNEL)
-+		msg_size = msg->size;
-+	else
-+		msg_size = offsetof(struct kdbus_msg, items);
-+
-+	/* sum up the size of the needed slice */
-+	size = msg_size;
-+
-+	if (res) {
-+		size += res->vec_count *
-+			KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+
-+		if (res->memfd_count) {
-+			entry->memfd_offset =
-+				kcalloc(res->memfd_count, sizeof(size_t),
-+					GFP_KERNEL);
-+			if (!entry->memfd_offset) {
-+				ret = -ENOMEM;
-+				goto exit_free_entry;
-+			}
-+
-+			size += res->memfd_count *
-+				KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
-+		}
-+
-+		if (res->fds_count)
-+			size += KDBUS_ITEM_SIZE(sizeof(int) * res->fds_count);
-+
-+		if (res->dst_name)
-+			size += KDBUS_ITEM_SIZE(strlen(res->dst_name) + 1);
-+	}
-+
-+	/*
-+	 * Remember the offset of the metadata part, so we can override
-+	 * this part later during kdbus_queue_entry_install().
-+	 */
-+	entry->meta_offset = size;
-+
-+	if (entry->proc_meta || entry->conn_meta) {
-+		entry->attach_flags =
-+			atomic64_read(&conn_dst->attach_flags_recv);
-+
-+		ret = kdbus_meta_export_prepare(entry->proc_meta,
-+						entry->conn_meta,
-+						&entry->attach_flags,
-+						&meta_size);
-+		if (ret < 0)
-+			goto exit_free_entry;
-+
-+		size += meta_size;
-+	}
-+
-+	payload_off = size;
-+	size += kmsg->pool_size;
-+	size = KDBUS_ALIGN8(size);
-+
-+	ret = kdbus_conn_quota_inc(conn_dst, user, size,
-+				   res ? res->fds_count : 0);
-+	if (ret < 0)
-+		goto exit_free_entry;
++	entry->priority = s->msg->priority;
++	entry->conn = kdbus_conn_ref(dst);
++	entry->gaps = kdbus_gaps_ref(s->gaps);
 +
-+	entry->slice = kdbus_pool_slice_alloc(conn_dst->pool, size, true);
++	entry->slice = kdbus_staging_emit(s, src, dst);
 +	if (IS_ERR(entry->slice)) {
 +		ret = PTR_ERR(entry->slice);
 +		entry->slice = NULL;
-+		kdbus_conn_quota_dec(conn_dst, user, size,
-+				     res ? res->fds_count : 0);
-+		goto exit_free_entry;
-+	}
-+
-+	/* we accounted for exactly 'size' bytes, make sure it didn't grow */
-+	WARN_ON(kdbus_pool_slice_size(entry->slice) != size);
-+	entry->user = kdbus_user_ref(user);
-+
-+	/* copy message header */
-+	kvec[0].iov_base = (char *)msg;
-+	kvec[0].iov_len = msg_size;
-+
-+	ret = kdbus_pool_slice_copy_kvec(entry->slice, 0, kvec, 1, msg_size);
-+	if (ret < 0)
-+		goto exit_free_entry;
-+
-+	/* 'size' will now track the write position */
-+	size = msg_size;
-+
-+	/* create message payload items */
-+	if (res) {
-+		size_t dst_name_len = 0;
-+		unsigned int i;
-+		size_t sz = 0;
-+
-+		if (res->dst_name) {
-+			dst_name_len = strlen(res->dst_name) + 1;
-+			sz += KDBUS_ITEM_SIZE(dst_name_len);
-+		}
-+
-+		for (i = 0; i < res->data_count; ++i) {
-+			struct kdbus_vec v;
-+			struct kdbus_memfd m;
-+
-+			switch (res->data[i].type) {
-+			case KDBUS_MSG_DATA_VEC:
-+				sz += KDBUS_ITEM_SIZE(sizeof(v));
-+				break;
-+
-+			case KDBUS_MSG_DATA_MEMFD:
-+				sz += KDBUS_ITEM_SIZE(sizeof(m));
-+				break;
-+			}
-+		}
-+
-+		if (sz) {
-+			struct kdbus_item *items, *item;
-+
-+			items = kmalloc(sz, GFP_KERNEL);
-+			if (!items) {
-+				ret = -ENOMEM;
-+				goto exit_free_entry;
-+			}
-+
-+			item = items;
-+
-+			if (res->dst_name)
-+				item = kdbus_item_set(item, KDBUS_ITEM_DST_NAME,
-+						      res->dst_name,
-+						      dst_name_len);
-+
-+			for (i = 0; i < res->data_count; ++i) {
-+				struct kdbus_msg_data *d = res->data + i;
-+				struct kdbus_memfd m = {};
-+				struct kdbus_vec v = {};
-+
-+				switch (d->type) {
-+				case KDBUS_MSG_DATA_VEC:
-+					v.size = d->size;
-+					v.offset = d->vec.off;
-+					if (v.offset != ~0ULL)
-+						v.offset += payload_off;
-+
-+					item = kdbus_item_set(item,
-+							KDBUS_ITEM_PAYLOAD_OFF,
-+							&v, sizeof(v));
-+					break;
-+
-+				case KDBUS_MSG_DATA_MEMFD:
-+					/*
-+					 * Remember the location of memfds, so
-+					 * we can override the content from
-+					 * kdbus_queue_entry_install().
-+					 */
-+					entry->memfd_offset[memfd_cnt++] =
-+						msg_size +
-+						(char *)item - (char *)items +
-+						offsetof(struct kdbus_item,
-+							 memfd);
-+
-+					item = kdbus_item_set(item,
-+						       KDBUS_ITEM_PAYLOAD_MEMFD,
-+						       &m, sizeof(m));
-+					break;
-+				}
-+			}
-+
-+			kvec[0].iov_base = items;
-+			kvec[0].iov_len = sz;
-+
-+			ret = kdbus_pool_slice_copy_kvec(entry->slice, size,
-+							 kvec, 1, sz);
-+			kfree(items);
-+
-+			if (ret < 0)
-+				goto exit_free_entry;
-+
-+			size += sz;
-+		}
-+
-+		/*
-+		 * Remember the location of the FD part, so we can override the
-+		 * content in kdbus_queue_entry_install().
-+		 */
-+		if (res->fds_count) {
-+			entry->fds_offset = size;
-+			size += KDBUS_ITEM_SIZE(sizeof(int) * res->fds_count);
-+		}
-+	}
-+
-+	/* finally, copy over the actual message payload */
-+	if (kmsg->iov_count) {
-+		ret = kdbus_pool_slice_copy_iovec(entry->slice, payload_off,
-+						  kmsg->iov,
-+						  kmsg->iov_count,
-+						  kmsg->pool_size);
-+		if (ret < 0)
-+			goto exit_free_entry;
++		goto error;
 +	}
 +
++	entry->user = src ? kdbus_user_ref(src->user) : NULL;
 +	return entry;
 +
-+exit_free_entry:
++error:
 +	kdbus_queue_entry_free(entry);
 +	return ERR_PTR(ret);
 +}
@@ -20911,17 +23292,13 @@ index 0000000..25bb3ad
 +	if (entry->slice) {
 +		kdbus_conn_quota_dec(entry->conn, entry->user,
 +				     kdbus_pool_slice_size(entry->slice),
-+				     entry->msg_res ?
-+						entry->msg_res->fds_count : 0);
++				     entry->gaps ? entry->gaps->n_fds : 0);
 +		kdbus_pool_slice_release(entry->slice);
-+		kdbus_user_unref(entry->user);
 +	}
 +
-+	kdbus_msg_resources_unref(entry->msg_res);
-+	kdbus_meta_conn_unref(entry->conn_meta);
-+	kdbus_meta_proc_unref(entry->proc_meta);
++	kdbus_user_unref(entry->user);
++	kdbus_gaps_unref(entry->gaps);
 +	kdbus_conn_unref(entry->conn);
-+	kfree(entry->memfd_offset);
 +	kfree(entry);
 +}
 +
@@ -20932,134 +23309,22 @@ index 0000000..25bb3ad
 + * @return_flags:	Pointer to store the return flags for userspace
 + * @install_fds:	Whether or not to install associated file descriptors
 + *
-+ * This function will create a slice to transport the message header, the
-+ * metadata items and other items for information stored in @entry, and
-+ * store it as entry->slice.
-+ *
-+ * If @install_fds is %true, file descriptors will as well be installed.
-+ * This function must always be called from the task context of the receiver.
-+ *
 + * Return: 0 on success.
 + */
 +int kdbus_queue_entry_install(struct kdbus_queue_entry *entry,
 +			      u64 *return_flags, bool install_fds)
 +{
-+	u64 msg_size = entry->meta_offset;
-+	struct kdbus_conn *conn_dst = entry->conn;
-+	struct kdbus_msg_resources *res;
 +	bool incomplete_fds = false;
-+	struct kvec kvec[2];
-+	size_t memfds = 0;
-+	int i, ret;
-+
-+	lockdep_assert_held(&conn_dst->lock);
-+
-+	if (entry->proc_meta || entry->conn_meta) {
-+		size_t meta_size;
-+
-+		ret = kdbus_meta_export(entry->proc_meta,
-+					entry->conn_meta,
-+					entry->attach_flags,
-+					entry->slice,
-+					entry->meta_offset,
-+					&meta_size);
-+		if (ret < 0)
-+			return ret;
-+
-+		msg_size += meta_size;
-+	}
++	int ret;
 +
-+	/* Update message size at offset 0 */
-+	kvec[0].iov_base = &msg_size;
-+	kvec[0].iov_len = sizeof(msg_size);
++	lockdep_assert_held(&entry->conn->lock);
 +
-+	ret = kdbus_pool_slice_copy_kvec(entry->slice, 0, kvec, 1,
-+					 sizeof(msg_size));
++	ret = kdbus_gaps_install(entry->gaps, entry->slice, &incomplete_fds);
 +	if (ret < 0)
 +		return ret;
 +
-+	res = entry->msg_res;
-+
-+	if (!res)
-+		return 0;
-+
-+	if (res->fds_count) {
-+		struct kdbus_item_header hdr;
-+		size_t off;
-+		int *fds;
-+
-+		fds = kmalloc_array(res->fds_count, sizeof(int), GFP_KERNEL);
-+		if (!fds)
-+			return -ENOMEM;
-+
-+		for (i = 0; i < res->fds_count; i++) {
-+			if (install_fds) {
-+				fds[i] = get_unused_fd_flags(O_CLOEXEC);
-+				if (fds[i] >= 0)
-+					fd_install(fds[i],
-+						   get_file(res->fds[i]));
-+				else
-+					incomplete_fds = true;
-+			} else {
-+				fds[i] = -1;
-+			}
-+		}
-+
-+		off = entry->fds_offset;
-+
-+		hdr.type = KDBUS_ITEM_FDS;
-+		hdr.size = KDBUS_ITEM_HEADER_SIZE +
-+			   sizeof(int) * res->fds_count;
-+
-+		kvec[0].iov_base = &hdr;
-+		kvec[0].iov_len = sizeof(hdr);
-+
-+		kvec[1].iov_base = fds;
-+		kvec[1].iov_len = sizeof(int) * res->fds_count;
-+
-+		ret = kdbus_pool_slice_copy_kvec(entry->slice, off,
-+						 kvec, 2, hdr.size);
-+		kfree(fds);
-+
-+		if (ret < 0)
-+			return ret;
-+	}
-+
-+	for (i = 0; i < res->data_count; ++i) {
-+		struct kdbus_msg_data *d = res->data + i;
-+		struct kdbus_memfd m;
-+
-+		if (d->type != KDBUS_MSG_DATA_MEMFD)
-+			continue;
-+
-+		m.start = d->memfd.start;
-+		m.size = d->size;
-+		m.fd = -1;
-+
-+		if (install_fds) {
-+			m.fd = get_unused_fd_flags(O_CLOEXEC);
-+			if (m.fd < 0) {
-+				m.fd = -1;
-+				incomplete_fds = true;
-+			} else {
-+				fd_install(m.fd,
-+					   get_file(d->memfd.file));
-+			}
-+		}
-+
-+		kvec[0].iov_base = &m;
-+		kvec[0].iov_len = sizeof(m);
-+
-+		ret = kdbus_pool_slice_copy_kvec(entry->slice,
-+						 entry->memfd_offset[memfds++],
-+						 kvec, 1, sizeof(m));
-+		if (ret < 0)
-+			return ret;
-+	}
-+
 +	if (incomplete_fds)
 +		*return_flags |= KDBUS_RECV_RETURN_INCOMPLETE_FDS;
-+
 +	return 0;
 +}
 +
@@ -21123,7 +23388,7 @@ index 0000000..25bb3ad
 +		return 0;
 +
 +	size = kdbus_pool_slice_size(e->slice);
-+	fds = e->msg_res ? e->msg_res->fds_count : 0;
++	fds = e->gaps ? e->gaps->n_fds : 0;
 +
 +	ret = kdbus_conn_quota_inc(dst, e->user, size, fds);
 +	if (ret < 0)
@@ -21158,10 +23423,10 @@ index 0000000..25bb3ad
 +}
 diff --git a/ipc/kdbus/queue.h b/ipc/kdbus/queue.h
 new file mode 100644
-index 0000000..7f2db96
+index 0000000..bf686d1
 --- /dev/null
 +++ b/ipc/kdbus/queue.h
-@@ -0,0 +1,92 @@
+@@ -0,0 +1,84 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -21179,6 +23444,13 @@ index 0000000..7f2db96
 +#ifndef __KDBUS_QUEUE_H
 +#define __KDBUS_QUEUE_H
 +
++#include <linux/list.h>
++#include <linux/rbtree.h>
++
++struct kdbus_conn;
++struct kdbus_pool_slice;
++struct kdbus_reply;
++struct kdbus_staging;
 +struct kdbus_user;
 +
 +/**
@@ -21199,52 +23471,37 @@ index 0000000..7f2db96
 + * @entry:		Entry in the connection's list
 + * @prio_node:		Entry in the priority queue tree
 + * @prio_entry:		Queue tree node entry in the list of one priority
-+ * @slice:		Slice in the receiver's pool for the message
-+ * @attach_flags:	Attach flags used during slice allocation
-+ * @meta_offset:	Offset of first metadata item in slice
-+ * @fds_offset:		Offset of FD item in slice
-+ * @memfd_offset:	Array of slice-offsets for all memfd items
 + * @priority:		Message priority
 + * @dst_name_id:	The sequence number of the name this message is
 + *			addressed to, 0 for messages sent to an ID
-+ * @msg_res:		Message resources
-+ * @proc_meta:		Process metadata, captured at message arrival
-+ * @conn_meta:		Connection metadata, captured at message arrival
-+ * @reply:		The reply block if a reply to this message is expected
++ * @conn:		Connection this entry is queued on
++ * @gaps:		Gaps object to fill message gaps at RECV time
 + * @user:		User used for accounting
++ * @slice:		Slice in the receiver's pool for the message
++ * @reply:		The reply block if a reply to this message is expected
 + */
 +struct kdbus_queue_entry {
 +	struct list_head entry;
 +	struct rb_node prio_node;
 +	struct list_head prio_entry;
 +
-+	struct kdbus_pool_slice *slice;
-+
-+	u64 attach_flags;
-+	size_t meta_offset;
-+	size_t fds_offset;
-+	size_t *memfd_offset;
-+
 +	s64 priority;
 +	u64 dst_name_id;
 +
-+	struct kdbus_msg_resources *msg_res;
-+	struct kdbus_meta_proc *proc_meta;
-+	struct kdbus_meta_conn *conn_meta;
-+	struct kdbus_reply *reply;
 +	struct kdbus_conn *conn;
++	struct kdbus_gaps *gaps;
 +	struct kdbus_user *user;
++	struct kdbus_pool_slice *slice;
++	struct kdbus_reply *reply;
 +};
 +
-+struct kdbus_kmsg;
-+
 +void kdbus_queue_init(struct kdbus_queue *queue);
 +struct kdbus_queue_entry *kdbus_queue_peek(struct kdbus_queue *queue,
 +					   s64 priority, bool use_priority);
 +
-+struct kdbus_queue_entry *kdbus_queue_entry_new(struct kdbus_conn *conn_dst,
-+						const struct kdbus_kmsg *kmsg,
-+						struct kdbus_user *user);
++struct kdbus_queue_entry *kdbus_queue_entry_new(struct kdbus_conn *src,
++						struct kdbus_conn *dst,
++						struct kdbus_staging *s);
 +void kdbus_queue_entry_free(struct kdbus_queue_entry *entry);
 +int kdbus_queue_entry_install(struct kdbus_queue_entry *entry,
 +			      u64 *return_flags, bool install_fds);
@@ -21827,6 +24084,463 @@ index 0000000..5297166
 +size_t kdbus_kvec_pad(struct kvec *kvec, u64 *len);
 +
 +#endif
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 0ceb386..eddf1ed 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4331,20 +4331,20 @@ static void ring_buffer_attach(struct perf_event *event,
+ 		WARN_ON_ONCE(event->rcu_pending);
+ 
+ 		old_rb = event->rb;
++		event->rcu_batches = get_state_synchronize_rcu();
++		event->rcu_pending = 1;
++
+ 		spin_lock_irqsave(&old_rb->event_lock, flags);
+ 		list_del_rcu(&event->rb_entry);
+ 		spin_unlock_irqrestore(&old_rb->event_lock, flags);
++	}
+ 
+-		event->rcu_batches = get_state_synchronize_rcu();
+-		event->rcu_pending = 1;
++	if (event->rcu_pending && rb) {
++		cond_synchronize_rcu(event->rcu_batches);
++		event->rcu_pending = 0;
+ 	}
+ 
+ 	if (rb) {
+-		if (event->rcu_pending) {
+-			cond_synchronize_rcu(event->rcu_batches);
+-			event->rcu_pending = 0;
+-		}
+-
+ 		spin_lock_irqsave(&rb->event_lock, flags);
+ 		list_add_rcu(&event->rb_entry, &rb->event_list);
+ 		spin_unlock_irqrestore(&rb->event_lock, flags);
+diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
+index 8d423bc..a9a4a1b 100644
+--- a/net/bridge/br_ioctl.c
++++ b/net/bridge/br_ioctl.c
+@@ -247,7 +247,9 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
+ 			return -EPERM;
+ 
++		spin_lock_bh(&br->lock);
+ 		br_stp_set_bridge_priority(br, args[1]);
++		spin_unlock_bh(&br->lock);
+ 		return 0;
+ 
+ 	case BRCTL_SET_PORT_PRIORITY:
+diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
+index 7832d07..4114687 100644
+--- a/net/bridge/br_stp_if.c
++++ b/net/bridge/br_stp_if.c
+@@ -243,13 +243,12 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
+ 	return true;
+ }
+ 
+-/* Acquires and releases bridge lock */
++/* called under bridge lock */
+ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
+ {
+ 	struct net_bridge_port *p;
+ 	int wasroot;
+ 
+-	spin_lock_bh(&br->lock);
+ 	wasroot = br_is_root_bridge(br);
+ 
+ 	list_for_each_entry(p, &br->port_list, list) {
+@@ -267,7 +266,6 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
+ 	br_port_state_selection(br);
+ 	if (br_is_root_bridge(br) && !wasroot)
+ 		br_become_root_bridge(br);
+-	spin_unlock_bh(&br->lock);
+ }
+ 
+ /* called under bridge lock */
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 689c818..32d710e 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -310,12 +310,8 @@ int can_send(struct sk_buff *skb, int loop)
+ 		return err;
+ 	}
+ 
+-	if (newskb) {
+-		if (!(newskb->tstamp.tv64))
+-			__net_timestamp(newskb);
+-
++	if (newskb)
+ 		netif_rx_ni(newskb);
+-	}
+ 
+ 	/* update statistics */
+ 	can_stats.tx_frames++;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 2237c1b..3de6542 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -957,8 +957,6 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+ 	rc = 0;
+ 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
+ 		goto out_unlock_bh;
+-	if (neigh->dead)
+-		goto out_dead;
+ 
+ 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
+ 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
+@@ -1015,13 +1013,6 @@ out_unlock_bh:
+ 		write_unlock(&neigh->lock);
+ 	local_bh_enable();
+ 	return rc;
+-
+-out_dead:
+-	if (neigh->nud_state & NUD_STALE)
+-		goto out_unlock_bh;
+-	write_unlock_bh(&neigh->lock);
+-	kfree_skb(skb);
+-	return 1;
+ }
+ EXPORT_SYMBOL(__neigh_event_send);
+ 
+@@ -1085,8 +1076,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+ 	    (old & (NUD_NOARP | NUD_PERMANENT)))
+ 		goto out;
+-	if (neigh->dead)
+-		goto out;
+ 
+ 	if (!(new & NUD_VALID)) {
+ 		neigh_del_timer(neigh);
+@@ -1236,8 +1225,6 @@ EXPORT_SYMBOL(neigh_update);
+  */
+ void __neigh_set_probe_once(struct neighbour *neigh)
+ {
+-	if (neigh->dead)
+-		return;
+ 	neigh->updated = jiffies;
+ 	if (!(neigh->nud_state & NUD_FAILED))
+ 		return;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index a5aa54e..8b47a4d 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -228,8 +228,6 @@ int inet_listen(struct socket *sock, int backlog)
+ 				err = 0;
+ 			if (err)
+ 				goto out;
+-
+-			tcp_fastopen_init_key_once(true);
+ 		}
+ 		err = inet_csk_listen_start(sk, backlog);
+ 		if (err)
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 6ddde89..7cfb089 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -432,15 +432,6 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
+ 		kfree_skb(skb);
+ }
+ 
+-/* For some errors we have valid addr_offset even with zero payload and
+- * zero port. Also, addr_offset should be supported if port is set.
+- */
+-static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
+-{
+-	return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+-	       serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
+-}
+-
+ /* IPv4 supports cmsg on all imcp errors and some timestamps
+  *
+  * Timestamp code paths do not initialize the fields expected by cmsg:
+@@ -507,7 +498,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ 
+ 	serr = SKB_EXT_ERR(skb);
+ 
+-	if (sin && ipv4_datagram_support_addr(serr)) {
++	if (sin && serr->port) {
+ 		sin->sin_family = AF_INET;
+ 		sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
+ 						   serr->addr_offset);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index bb2ce74..f1377f2 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2545,13 +2545,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ 
+ 	case TCP_FASTOPEN:
+ 		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
+-		    TCPF_LISTEN))) {
+-			tcp_fastopen_init_key_once(true);
+-
++		    TCPF_LISTEN)))
+ 			err = fastopen_init_queue(sk, val);
+-		} else {
++		else
+ 			err = -EINVAL;
+-		}
+ 		break;
+ 	case TCP_TIMESTAMP:
+ 		if (!tp->repair)
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index f9c0fb8..46b087a 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -78,6 +78,8 @@ static bool __tcp_fastopen_cookie_gen(const void *path,
+ 	struct tcp_fastopen_context *ctx;
+ 	bool ok = false;
+ 
++	tcp_fastopen_init_key_once(true);
++
+ 	rcu_read_lock();
+ 	ctx = rcu_dereference(tcp_fastopen_ctx);
+ 	if (ctx) {
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 62d908e..762a58c 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -325,16 +325,6 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
+ 	kfree_skb(skb);
+ }
+ 
+-/* For some errors we have valid addr_offset even with zero payload and
+- * zero port. Also, addr_offset should be supported if port is set.
+- */
+-static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
+-{
+-	return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
+-	       serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
+-	       serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
+-}
+-
+ /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
+  *
+  * At one point, excluding local errors was a quick test to identify icmp/icmp6
+@@ -399,7 +389,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ 
+ 	serr = SKB_EXT_ERR(skb);
+ 
+-	if (sin && ipv6_datagram_support_addr(serr)) {
++	if (sin && serr->port) {
+ 		const unsigned char *nh = skb_network_header(skb);
+ 		sin->sin6_family = AF_INET6;
+ 		sin->sin6_flowinfo = 0;
+diff --git a/net/mac80211/key.c b/net/mac80211/key.c
+index 81e9785..a907f2d 100644
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -66,15 +66,12 @@ update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
+ 	if (sdata->vif.type != NL80211_IFTYPE_AP)
+ 		return;
+ 
+-	/* crypto_tx_tailroom_needed_cnt is protected by this */
+-	assert_key_lock(sdata->local);
+-
+-	rcu_read_lock();
++	mutex_lock(&sdata->local->mtx);
+ 
+-	list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list)
++	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ 		vlan->crypto_tx_tailroom_needed_cnt += delta;
+ 
+-	rcu_read_unlock();
++	mutex_unlock(&sdata->local->mtx);
+ }
+ 
+ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
+@@ -98,8 +95,6 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
+ 	 * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
+ 	 */
+ 
+-	assert_key_lock(sdata->local);
+-
+ 	update_vlan_tailroom_need_count(sdata, 1);
+ 
+ 	if (!sdata->crypto_tx_tailroom_needed_cnt++) {
+@@ -114,8 +109,6 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
+ static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
+ 					 int delta)
+ {
+-	assert_key_lock(sdata->local);
+-
+ 	WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
+ 
+ 	update_vlan_tailroom_need_count(sdata, -delta);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index fe1610d..b5989c6 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1272,6 +1272,16 @@ static void packet_sock_destruct(struct sock *sk)
+ 	sk_refcnt_debug_dec(sk);
+ }
+ 
++static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
++{
++	int x = atomic_read(&f->rr_cur) + 1;
++
++	if (x >= num)
++		x = 0;
++
++	return x;
++}
++
+ static unsigned int fanout_demux_hash(struct packet_fanout *f,
+ 				      struct sk_buff *skb,
+ 				      unsigned int num)
+@@ -1283,9 +1293,13 @@ static unsigned int fanout_demux_lb(struct packet_fanout *f,
+ 				    struct sk_buff *skb,
+ 				    unsigned int num)
+ {
+-	unsigned int val = atomic_inc_return(&f->rr_cur);
++	int cur, old;
+ 
+-	return val % num;
++	cur = atomic_read(&f->rr_cur);
++	while ((old = atomic_cmpxchg(&f->rr_cur, cur,
++				     fanout_rr_next(f, num))) != cur)
++		cur = old;
++	return cur;
+ }
+ 
+ static unsigned int fanout_demux_cpu(struct packet_fanout *f,
+@@ -1339,7 +1353,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
+ 			     struct packet_type *pt, struct net_device *orig_dev)
+ {
+ 	struct packet_fanout *f = pt->af_packet_priv;
+-	unsigned int num = READ_ONCE(f->num_members);
++	unsigned int num = f->num_members;
+ 	struct packet_sock *po;
+ 	unsigned int idx;
+ 
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index abe7c2d..fc5e45b 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -599,9 +599,7 @@ out:
+ 	return err;
+ no_route:
+ 	kfree_skb(nskb);
+-
+-	if (asoc)
+-		IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
++	IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+ 
+ 	/* FIXME: Returning the 'err' will effect all the associations
+ 	 * associated with a socket, although only one of the paths of the
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 5f6c4e6..f09de7f 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1528,10 +1528,8 @@ static void sctp_close(struct sock *sk, long timeout)
+ 
+ 	/* Supposedly, no process has access to the socket, but
+ 	 * the net layers still may.
+-	 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
+-	 * held and that should be grabbed before socket lock.
+ 	 */
+-	spin_lock_bh(&net->sctp.addr_wq_lock);
++	local_bh_disable();
+ 	bh_lock_sock(sk);
+ 
+ 	/* Hold the sock, since sk_common_release() will put sock_put()
+@@ -1541,7 +1539,7 @@ static void sctp_close(struct sock *sk, long timeout)
+ 	sk_common_release(sk);
+ 
+ 	bh_unlock_sock(sk);
+-	spin_unlock_bh(&net->sctp.addr_wq_lock);
++	local_bh_enable();
+ 
+ 	sock_put(sk);
+ 
+@@ -3582,7 +3580,6 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
+ 	if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
+ 		return 0;
+ 
+-	spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 	if (val == 0 && sp->do_auto_asconf) {
+ 		list_del(&sp->auto_asconf_list);
+ 		sp->do_auto_asconf = 0;
+@@ -3591,7 +3588,6 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
+ 		    &sock_net(sk)->sctp.auto_asconf_splist);
+ 		sp->do_auto_asconf = 1;
+ 	}
+-	spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 	return 0;
+ }
+ 
+@@ -4125,28 +4121,18 @@ static int sctp_init_sock(struct sock *sk)
+ 	local_bh_disable();
+ 	percpu_counter_inc(&sctp_sockets_allocated);
+ 	sock_prot_inuse_add(net, sk->sk_prot, 1);
+-
+-	/* Nothing can fail after this block, otherwise
+-	 * sctp_destroy_sock() will be called without addr_wq_lock held
+-	 */
+ 	if (net->sctp.default_auto_asconf) {
+-		spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+ 		list_add_tail(&sp->auto_asconf_list,
+ 		    &net->sctp.auto_asconf_splist);
+ 		sp->do_auto_asconf = 1;
+-		spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
+-	} else {
++	} else
+ 		sp->do_auto_asconf = 0;
+-	}
+-
+ 	local_bh_enable();
+ 
+ 	return 0;
+ }
+ 
+-/* Cleanup any SCTP per socket resources. Must be called with
+- * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
+- */
++/* Cleanup any SCTP per socket resources.  */
+ static void sctp_destroy_sock(struct sock *sk)
+ {
+ 	struct sctp_sock *sp;
+@@ -7209,19 +7195,6 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ 	newinet->mc_list = NULL;
+ }
+ 
+-static inline void sctp_copy_descendant(struct sock *sk_to,
+-					const struct sock *sk_from)
+-{
+-	int ancestor_size = sizeof(struct inet_sock) +
+-			    sizeof(struct sctp_sock) -
+-			    offsetof(struct sctp_sock, auto_asconf_list);
+-
+-	if (sk_from->sk_family == PF_INET6)
+-		ancestor_size += sizeof(struct ipv6_pinfo);
+-
+-	__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
+-}
+-
+ /* Populate the fields of the newsk from the oldsk and migrate the assoc
+  * and its messages to the newsk.
+  */
+@@ -7236,6 +7209,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ 	struct sk_buff *skb, *tmp;
+ 	struct sctp_ulpevent *event;
+ 	struct sctp_bind_hashbucket *head;
++	struct list_head tmplist;
+ 
+ 	/* Migrate socket buffer sizes and all the socket level options to the
+ 	 * new socket.
+@@ -7243,7 +7217,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ 	newsk->sk_sndbuf = oldsk->sk_sndbuf;
+ 	newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
+ 	/* Brute force copy old sctp opt. */
+-	sctp_copy_descendant(newsk, oldsk);
++	if (oldsp->do_auto_asconf) {
++		memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
++		inet_sk_copy_descendant(newsk, oldsk);
++		memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
++	} else
++		inet_sk_copy_descendant(newsk, oldsk);
+ 
+ 	/* Restore the ep value that was overwritten with the above structure
+ 	 * copy.
 diff --git a/samples/Kconfig b/samples/Kconfig
 index 224ebb4..a4c6b2f 100644
 --- a/samples/Kconfig
@@ -23349,6 +26063,39 @@ index 0000000..c3ba958
 +}
 +
 +#endif /* libc sanity check */
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 212070e..7dade28 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -403,7 +403,6 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
+ 	return sbsec->behavior == SECURITY_FS_USE_XATTR ||
+ 		sbsec->behavior == SECURITY_FS_USE_TRANS ||
+ 		sbsec->behavior == SECURITY_FS_USE_TASK ||
+-		sbsec->behavior == SECURITY_FS_USE_NATIVE ||
+ 		/* Special handling. Genfs but also in-core setxattr handler */
+ 		!strcmp(sb->s_type->name, "sysfs") ||
+ 		!strcmp(sb->s_type->name, "pstore") ||
+diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
+index 98cfc38..10df572 100644
+--- a/tools/build/Makefile.build
++++ b/tools/build/Makefile.build
+@@ -94,12 +94,12 @@ obj-y        := $(patsubst %/, %/$(obj)-in.o, $(obj-y))
+ subdir-obj-y := $(filter %/$(obj)-in.o, $(obj-y))
+ 
+ # '$(OUTPUT)/dir' prefix to all objects
+-objprefix    := $(subst ./,,$(OUTPUT)$(dir)/)
+-obj-y        := $(addprefix $(objprefix),$(obj-y))
+-subdir-obj-y := $(addprefix $(objprefix),$(subdir-obj-y))
++prefix       := $(subst ./,,$(OUTPUT)$(dir)/)
++obj-y        := $(addprefix $(prefix),$(obj-y))
++subdir-obj-y := $(addprefix $(prefix),$(subdir-obj-y))
+ 
+ # Final '$(obj)-in.o' object
+-in-target := $(objprefix)$(obj)-in.o
++in-target := $(prefix)$(obj)-in.o
+ 
+ PHONY += $(subdir-y)
+ 
 diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
 index 95abddc..b57100c 100644
 --- a/tools/testing/selftests/Makefile
@@ -23546,10 +26293,10 @@ index 0000000..ed28cca
 +const char *enum_PAYLOAD(long long id);
 diff --git a/tools/testing/selftests/kdbus/kdbus-test.c b/tools/testing/selftests/kdbus/kdbus-test.c
 new file mode 100644
-index 0000000..294e82a
+index 0000000..db732e5
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-test.c
-@@ -0,0 +1,900 @@
+@@ -0,0 +1,899 @@
 +#include <errno.h>
 +#include <stdio.h>
 +#include <string.h>
@@ -23851,7 +26598,6 @@ index 0000000..294e82a
 +
 +		ret = kdbus_create_bus(env->control_fd,
 +				       args->busname ?: n,
-+				       _KDBUS_ATTACH_ALL,
 +				       _KDBUS_ATTACH_ALL, &s);
 +		free(n);
 +		ASSERT_RETURN(ret == 0);
@@ -24541,10 +27287,10 @@ index 0000000..a5c6ae8
 +#endif /* _TEST_KDBUS_H_ */
 diff --git a/tools/testing/selftests/kdbus/kdbus-util.c b/tools/testing/selftests/kdbus/kdbus-util.c
 new file mode 100644
-index 0000000..29a0cb1
+index 0000000..a5e54ca
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-util.c
-@@ -0,0 +1,1617 @@
+@@ -0,0 +1,1611 @@
 +/*
 + * Copyright (C) 2013-2015 Daniel Mack
 + * Copyright (C) 2013-2015 Kay Sievers
@@ -24661,8 +27407,7 @@ index 0000000..29a0cb1
 +}
 +
 +int kdbus_create_bus(int control_fd, const char *name,
-+		     uint64_t req_meta, uint64_t owner_meta,
-+		     char **path)
++		     uint64_t owner_meta, char **path)
 +{
 +	struct {
 +		struct kdbus_cmd cmd;
@@ -24674,12 +27419,12 @@ index 0000000..29a0cb1
 +			struct kdbus_bloom_parameter bloom;
 +		} bp;
 +
-+		/* required and owner metadata items */
++		/* owner metadata items */
 +		struct {
 +			uint64_t size;
 +			uint64_t type;
 +			uint64_t flags;
-+		} attach[2];
++		} attach;
 +
 +		/* name item */
 +		struct {
@@ -24699,13 +27444,9 @@ index 0000000..29a0cb1
 +	snprintf(bus_make.name.str, sizeof(bus_make.name.str),
 +		 "%u-%s", getuid(), name);
 +
-+	bus_make.attach[0].type = KDBUS_ITEM_ATTACH_FLAGS_RECV;
-+	bus_make.attach[0].size = sizeof(bus_make.attach[0]);
-+	bus_make.attach[0].flags = req_meta;
-+
-+	bus_make.attach[1].type = KDBUS_ITEM_ATTACH_FLAGS_SEND;
-+	bus_make.attach[1].size = sizeof(bus_make.attach[0]);
-+	bus_make.attach[1].flags = owner_meta;
++	bus_make.attach.type = KDBUS_ITEM_ATTACH_FLAGS_SEND;
++	bus_make.attach.size = sizeof(bus_make.attach);
++	bus_make.attach.flags = owner_meta;
 +
 +	bus_make.name.type = KDBUS_ITEM_MAKE_NAME;
 +	bus_make.name.size = KDBUS_ITEM_HEADER_SIZE +
@@ -24714,8 +27455,7 @@ index 0000000..29a0cb1
 +	bus_make.cmd.flags = KDBUS_MAKE_ACCESS_WORLD;
 +	bus_make.cmd.size = sizeof(bus_make.cmd) +
 +			     bus_make.bp.size +
-+			     bus_make.attach[0].size +
-+			     bus_make.attach[1].size +
++			     bus_make.attach.size +
 +			     bus_make.name.size;
 +
 +	kdbus_printf("Creating bus with name >%s< on control fd %d ...\n",
@@ -26164,10 +28904,10 @@ index 0000000..29a0cb1
 +}
 diff --git a/tools/testing/selftests/kdbus/kdbus-util.h b/tools/testing/selftests/kdbus/kdbus-util.h
 new file mode 100644
-index 0000000..d1a0f1b
+index 0000000..e1e18b9
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-util.h
-@@ -0,0 +1,219 @@
+@@ -0,0 +1,218 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Daniel Mack
@@ -26338,8 +29078,7 @@ index 0000000..d1a0f1b
 +int kdbus_msg_dump(const struct kdbus_conn *conn,
 +		   const struct kdbus_msg *msg);
 +int kdbus_create_bus(int control_fd, const char *name,
-+		     uint64_t req_meta, uint64_t owner_meta,
-+		     char **path);
++		     uint64_t owner_meta, char **path);
 +int kdbus_msg_send(const struct kdbus_conn *conn, const char *name,
 +		   uint64_t cookie, uint64_t flags, uint64_t timeout,
 +		   int64_t priority, uint64_t dst_id);
@@ -27479,10 +30218,10 @@ index 0000000..71a92d8
 +}
 diff --git a/tools/testing/selftests/kdbus/test-connection.c b/tools/testing/selftests/kdbus/test-connection.c
 new file mode 100644
-index 0000000..e7c4866
+index 0000000..4688ce8
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-connection.c
-@@ -0,0 +1,606 @@
+@@ -0,0 +1,597 @@
 +#include <stdio.h>
 +#include <string.h>
 +#include <fcntl.h>
@@ -27555,15 +30294,6 @@ index 0000000..e7c4866
 +
 +	hello.pool_size = POOL_SIZE;
 +
-+	/*
-+	 * The connection created by the core requires ALL meta flags
-+	 * to be sent. An attempt to send less than that should result in
-+	 * -ECONNREFUSED.
-+	 */
-+	hello.attach_flags_send = _KDBUS_ATTACH_ALL & ~KDBUS_ATTACH_TIMESTAMP;
-+	ret = kdbus_cmd_hello(fd, &hello);
-+	ASSERT_RETURN(ret == -ECONNREFUSED);
-+
 +	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
 +	hello.offset = (__u64)-1;
 +
@@ -29832,10 +32562,10 @@ index 0000000..2360dc1
 +}
 diff --git a/tools/testing/selftests/kdbus/test-message.c b/tools/testing/selftests/kdbus/test-message.c
 new file mode 100644
-index 0000000..f1615da
+index 0000000..ddc1e0a
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-message.c
-@@ -0,0 +1,731 @@
+@@ -0,0 +1,734 @@
 +#include <stdio.h>
 +#include <string.h>
 +#include <fcntl.h>
@@ -29892,9 +32622,12 @@ index 0000000..f1615da
 +			     KDBUS_DST_ID_BROADCAST);
 +	ASSERT_RETURN(ret == 0);
 +
-+	/* Make sure that we do not get our own broadcasts */
-+	ret = kdbus_msg_recv(sender, NULL, NULL);
-+	ASSERT_RETURN(ret == -EAGAIN);
++	/* Make sure that we do get our own broadcasts */
++	ret = kdbus_msg_recv(sender, &msg, &offset);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == cookie);
++
++	kdbus_msg_free(msg);
 +
 +	/* ... and receive on the 2nd */
 +	ret = kdbus_msg_recv_poll(conn, 100, &msg, &offset);
@@ -30569,10 +33302,10 @@ index 0000000..f1615da
 +}
 diff --git a/tools/testing/selftests/kdbus/test-metadata-ns.c b/tools/testing/selftests/kdbus/test-metadata-ns.c
 new file mode 100644
-index 0000000..ccdfae0
+index 0000000..1f6edc0
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-metadata-ns.c
-@@ -0,0 +1,503 @@
+@@ -0,0 +1,500 @@
 +/*
 + * Test metadata in new namespaces. Even if our tests can run
 + * in a namespaced setup, this test is necessary so we can inspect
@@ -30743,9 +33476,8 @@ index 0000000..ccdfae0
 +		ASSERT_EXIT(ret == 0);
 +		ASSERT_EXIT(msg->dst_id == userns_conn->id);
 +
-+		/* Different namespaces no CAPS */
 +		item = kdbus_get_item(msg, KDBUS_ITEM_CAPS);
-+		ASSERT_EXIT(item == NULL);
++		ASSERT_EXIT(item);
 +
 +		/* uid/gid not mapped, so we have unpriv cached creds */
 +		ret = kdbus_match_kdbus_creds(msg, &unmapped_creds);
@@ -30771,9 +33503,8 @@ index 0000000..ccdfae0
 +		ASSERT_EXIT(ret == 0);
 +		ASSERT_EXIT(msg->dst_id == KDBUS_DST_ID_BROADCAST);
 +
-+		/* Different namespaces no CAPS */
 +		item = kdbus_get_item(msg, KDBUS_ITEM_CAPS);
-+		ASSERT_EXIT(item == NULL);
++		ASSERT_EXIT(item);
 +
 +		/* uid/gid not mapped, so we have unpriv cached creds */
 +		ret = kdbus_match_kdbus_creds(msg, &unmapped_creds);
@@ -30933,9 +33664,8 @@ index 0000000..ccdfae0
 +
 +	userns_conn_id = msg->src_id;
 +
-+	/* We do not share the userns, os no KDBUS_ITEM_CAPS */
 +	item = kdbus_get_item(msg, KDBUS_ITEM_CAPS);
-+	ASSERT_RETURN(item == NULL);
++	ASSERT_RETURN(item);
 +
 +	/*
 +	 * Compare received items, creds must be translated into
@@ -32098,10 +34828,10 @@ index 0000000..3437012
 +}
 diff --git a/tools/testing/selftests/kdbus/test-policy-priv.c b/tools/testing/selftests/kdbus/test-policy-priv.c
 new file mode 100644
-index 0000000..a318ccc
+index 0000000..0208638
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-policy-priv.c
-@@ -0,0 +1,1269 @@
+@@ -0,0 +1,1285 @@
 +#include <errno.h>
 +#include <stdio.h>
 +#include <string.h>
@@ -32214,6 +34944,12 @@ index 0000000..a318ccc
 +					     KDBUS_DST_ID_BROADCAST);
 +			ASSERT_RETURN(ret == 0);
 +
++			/* drop own broadcast */
++			ret = kdbus_msg_recv(child_2, &msg, NULL);
++			ASSERT_RETURN(ret == 0);
++			ASSERT_RETURN(msg->src_id == child_2->id);
++			kdbus_msg_free(msg);
++
 +			/* Use a little bit high time */
 +			ret = kdbus_msg_recv_poll(child_2, 1000,
 +						  &msg, NULL);
@@ -32249,6 +34985,12 @@ index 0000000..a318ccc
 +						KDBUS_DST_ID_BROADCAST);
 +				ASSERT_EXIT(ret == 0);
 +
++				/* drop own broadcast */
++				ret = kdbus_msg_recv(child_2, &msg, NULL);
++				ASSERT_RETURN(ret == 0);
++				ASSERT_RETURN(msg->src_id == child_2->id);
++				kdbus_msg_free(msg);
++
 +				/* Use a little bit high time */
 +				ret = kdbus_msg_recv_poll(child_2, 1000,
 +							  &msg, NULL);
@@ -32417,11 +35159,6 @@ index 0000000..a318ccc
 +	 * receiver is not able to TALK to that name.
 +	 */
 +
-+	ret = test_policy_priv_by_broadcast(env->buspath, owner_a,
-+					    DO_NOT_DROP,
-+					    -ETIMEDOUT, -ETIMEDOUT);
-+	ASSERT_RETURN(ret == 0);
-+
 +	/* Activate matching for a privileged connection */
 +	ret = kdbus_add_match_empty(owner_a);
 +	ASSERT_RETURN(ret == 0);
@@ -32512,6 +35249,15 @@ index 0000000..a318ccc
 +			     0, 0, KDBUS_DST_ID_BROADCAST);
 +	ASSERT_RETURN(ret == 0);
 +
++	ret = kdbus_msg_recv_poll(owner_a, 100, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == expected_cookie);
++
++	/* Check src ID */
++	ASSERT_RETURN(msg->src_id == owner_a->id);
++
++	kdbus_msg_free(msg);
++
 +	ret = kdbus_msg_recv_poll(owner_b, 100, &msg, NULL);
 +	ASSERT_RETURN(ret == 0);
 +	ASSERT_RETURN(msg->cookie == expected_cookie);
@@ -33937,3 +36683,27 @@ index 0000000..cfd1930
 +
 +	return TEST_OK;
 +}
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index 950064a..78fb820 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1561,7 +1561,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+ 			goto out;
+ 	}
+ 
+-	if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
++	if (irq_num >= kvm->arch.vgic.nr_irqs)
+ 		return -EINVAL;
+ 
+ 	vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
+@@ -2161,7 +2161,10 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id,
+ 
+ 	BUG_ON(!vgic_initialized(kvm));
+ 
++	if (spi > kvm->arch.vgic.nr_irqs)
++		return -EINVAL;
+ 	return kvm_vgic_inject_irq(kvm, 0, spi, level);
++
+ }
+ 
+ /* MSI not implemented yet */


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-07-10 23:47 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-07-10 23:47 UTC (permalink / raw
  To: gentoo-commits

commit:     6de9c5bd82aa04106c0053e3e5da83ecea932eed
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jul 10 23:34:57 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jul 10 23:34:57 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=6de9c5bd

Linux patch 4.1.2

 0000_README            |    4 +
 1001_linux-4.1.2.patch | 2235 ++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 2239 insertions(+)

diff --git a/0000_README b/0000_README
index 8bf61f5..784c55d 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch:  1000_linux-4.1.1.patch
 From:   http://www.kernel.org
 Desc:   Linux 4.1.1
 
+Patch:  1001_linux-4.1.2.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.2
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1001_linux-4.1.2.patch b/1001_linux-4.1.2.patch
new file mode 100644
index 0000000..c8b2f30
--- /dev/null
+++ b/1001_linux-4.1.2.patch
@@ -0,0 +1,2235 @@
+diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+index 750d577e8083..f5a8ca29aff0 100644
+--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
++++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+@@ -1,7 +1,7 @@
+ * Marvell Armada 370 / Armada XP Ethernet Controller (NETA)
+ 
+ Required properties:
+-- compatible: should be "marvell,armada-370-neta".
++- compatible: "marvell,armada-370-neta" or "marvell,armada-xp-neta".
+ - reg: address and length of the register set for the device.
+ - interrupts: interrupt for the device
+ - phy: See ethernet.txt file in the same directory.
+diff --git a/Makefile b/Makefile
+index 1caf4ad3eb8a..cef84c061f02 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Series 4800
+ 
+diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
+index ec96f0b36346..06a2f2ae9d1e 100644
+--- a/arch/arm/boot/dts/armada-370-xp.dtsi
++++ b/arch/arm/boot/dts/armada-370-xp.dtsi
+@@ -270,7 +270,6 @@
+ 			};
+ 
+ 			eth0: ethernet@70000 {
+-				compatible = "marvell,armada-370-neta";
+ 				reg = <0x70000 0x4000>;
+ 				interrupts = <8>;
+ 				clocks = <&gateclk 4>;
+@@ -286,7 +285,6 @@
+ 			};
+ 
+ 			eth1: ethernet@74000 {
+-				compatible = "marvell,armada-370-neta";
+ 				reg = <0x74000 0x4000>;
+ 				interrupts = <10>;
+ 				clocks = <&gateclk 3>;
+diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
+index 00b50db57c9c..ca4257b2f77d 100644
+--- a/arch/arm/boot/dts/armada-370.dtsi
++++ b/arch/arm/boot/dts/armada-370.dtsi
+@@ -307,6 +307,14 @@
+ 					dmacap,memset;
+ 				};
+ 			};
++
++			ethernet@70000 {
++				compatible = "marvell,armada-370-neta";
++			};
++
++			ethernet@74000 {
++				compatible = "marvell,armada-370-neta";
++			};
+ 		};
+ 	};
+ };
+diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+index 8479fdc9e9c2..c5fdc99f0dbe 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+@@ -318,7 +318,7 @@
+ 			};
+ 
+ 			eth3: ethernet@34000 {
+-				compatible = "marvell,armada-370-neta";
++				compatible = "marvell,armada-xp-neta";
+ 				reg = <0x34000 0x4000>;
+ 				interrupts = <14>;
+ 				clocks = <&gateclk 1>;
+diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+index 661d54c81580..0e24f1a38540 100644
+--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+@@ -356,7 +356,7 @@
+ 			};
+ 
+ 			eth3: ethernet@34000 {
+-				compatible = "marvell,armada-370-neta";
++				compatible = "marvell,armada-xp-neta";
+ 				reg = <0x34000 0x4000>;
+ 				interrupts = <14>;
+ 				clocks = <&gateclk 1>;
+diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
+index 013d63f69e36..8fdd6d7c0ab1 100644
+--- a/arch/arm/boot/dts/armada-xp.dtsi
++++ b/arch/arm/boot/dts/armada-xp.dtsi
+@@ -177,7 +177,7 @@
+ 			};
+ 
+ 			eth2: ethernet@30000 {
+-				compatible = "marvell,armada-370-neta";
++				compatible = "marvell,armada-xp-neta";
+ 				reg = <0x30000 0x4000>;
+ 				interrupts = <12>;
+ 				clocks = <&gateclk 2>;
+@@ -220,6 +220,14 @@
+ 				};
+ 			};
+ 
++			ethernet@70000 {
++				compatible = "marvell,armada-xp-neta";
++			};
++
++			ethernet@74000 {
++				compatible = "marvell,armada-xp-neta";
++			};
++
+ 			xor@f0900 {
+ 				compatible = "marvell,orion-xor";
+ 				reg = <0xF0900 0x100
+diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
+index 2fd8988f310c..3794ca16499d 100644
+--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
++++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
+@@ -573,7 +573,7 @@
+ 		};
+ 
+ 		rtp: rtp@01c25000 {
+-			compatible = "allwinner,sun4i-a10-ts";
++			compatible = "allwinner,sun5i-a13-ts";
+ 			reg = <0x01c25000 0x100>;
+ 			interrupts = <29>;
+ 			#thermal-sensor-cells = <0>;
+diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
+index 883cb4873688..5098185abde6 100644
+--- a/arch/arm/boot/dts/sun5i-a13.dtsi
++++ b/arch/arm/boot/dts/sun5i-a13.dtsi
+@@ -555,7 +555,7 @@
+ 		};
+ 
+ 		rtp: rtp@01c25000 {
+-			compatible = "allwinner,sun4i-a10-ts";
++			compatible = "allwinner,sun5i-a13-ts";
+ 			reg = <0x01c25000 0x100>;
+ 			interrupts = <29>;
+ 			#thermal-sensor-cells = <0>;
+diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
+index fdd181792b4b..2b4847c7cbd4 100644
+--- a/arch/arm/boot/dts/sun7i-a20.dtsi
++++ b/arch/arm/boot/dts/sun7i-a20.dtsi
+@@ -1042,7 +1042,7 @@
+ 		};
+ 
+ 		rtp: rtp@01c25000 {
+-			compatible = "allwinner,sun4i-a10-ts";
++			compatible = "allwinner,sun5i-a13-ts";
+ 			reg = <0x01c25000 0x100>;
+ 			interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
+ 			#thermal-sensor-cells = <0>;
+diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
+index 79caf79b304a..f7db3a5d80e3 100644
+--- a/arch/arm/kvm/interrupts.S
++++ b/arch/arm/kvm/interrupts.S
+@@ -170,13 +170,9 @@ __kvm_vcpu_return:
+ 	@ Don't trap coprocessor accesses for host kernel
+ 	set_hstr vmexit
+ 	set_hdcr vmexit
+-	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
++	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
+ 
+ #ifdef CONFIG_VFPv3
+-	@ Save floating point registers we if let guest use them.
+-	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
+-	bne	after_vfp_restore
+-
+ 	@ Switch VFP/NEON hardware state to the host's
+ 	add	r7, vcpu, #VCPU_VFP_GUEST
+ 	store_vfp_state r7
+@@ -188,6 +184,8 @@ after_vfp_restore:
+ 	@ Restore FPEXC_EN which we clobbered on entry
+ 	pop	{r2}
+ 	VFPFMXR FPEXC, r2
++#else
++after_vfp_restore:
+ #endif
+ 
+ 	@ Reset Hyp-role
+@@ -483,7 +481,7 @@ switch_to_guest_vfp:
+ 	push	{r3-r7}
+ 
+ 	@ NEON/VFP used.  Turn on VFP access.
+-	set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
++	set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
+ 
+ 	@ Switch VFP/NEON hardware state to the guest's
+ 	add	r7, r0, #VCPU_VFP_HOST
+diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
+index 35e4a3a0c476..48efe2ee452c 100644
+--- a/arch/arm/kvm/interrupts_head.S
++++ b/arch/arm/kvm/interrupts_head.S
+@@ -591,8 +591,13 @@ ARM_BE8(rev	r6, r6  )
+ .endm
+ 
+ /* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
+- * (hardware reset value is 0). Keep previous value in r2. */
+-.macro set_hcptr operation, mask
++ * (hardware reset value is 0). Keep previous value in r2.
++ * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
++ * VFP wasn't already enabled (always executed on vmtrap).
++ * If a label is specified with vmexit, it is branched to if VFP wasn't
++ * enabled.
++ */
++.macro set_hcptr operation, mask, label = none
+ 	mrc	p15, 4, r2, c1, c1, 2
+ 	ldr	r3, =\mask
+ 	.if \operation == vmentry
+@@ -601,6 +606,17 @@ ARM_BE8(rev	r6, r6  )
+ 	bic	r3, r2, r3		@ Don't trap defined coproc-accesses
+ 	.endif
+ 	mcr	p15, 4, r3, c1, c1, 2
++	.if \operation != vmentry
++	.if \operation == vmexit
++	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
++	beq	1f
++	.endif
++	isb
++	.if \label != none
++	b	\label
++	.endif
++1:
++	.endif
+ .endm
+ 
+ /* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
+diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
+index 02fa8eff6ae1..531e922486b2 100644
+--- a/arch/arm/kvm/psci.c
++++ b/arch/arm/kvm/psci.c
+@@ -230,10 +230,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ 	case PSCI_0_2_FN64_AFFINITY_INFO:
+ 		val = kvm_psci_vcpu_affinity_info(vcpu);
+ 		break;
+-	case PSCI_0_2_FN_MIGRATE:
+-	case PSCI_0_2_FN64_MIGRATE:
+-		val = PSCI_RET_NOT_SUPPORTED;
+-		break;
+ 	case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
+ 		/*
+ 		 * Trusted OS is MP hence does not require migration
+@@ -242,10 +238,6 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ 		 */
+ 		val = PSCI_0_2_TOS_MP;
+ 		break;
+-	case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
+-	case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
+-		val = PSCI_RET_NOT_SUPPORTED;
+-		break;
+ 	case PSCI_0_2_FN_SYSTEM_OFF:
+ 		kvm_psci_system_off(vcpu);
+ 		/*
+@@ -271,7 +263,8 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ 		ret = 0;
+ 		break;
+ 	default:
+-		return -EINVAL;
++		val = PSCI_RET_NOT_SUPPORTED;
++		break;
+ 	}
+ 
+ 	*vcpu_reg(vcpu, 0) = val;
+@@ -291,12 +284,9 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ 	case KVM_PSCI_FN_CPU_ON:
+ 		val = kvm_psci_vcpu_on(vcpu);
+ 		break;
+-	case KVM_PSCI_FN_CPU_SUSPEND:
+-	case KVM_PSCI_FN_MIGRATE:
++	default:
+ 		val = PSCI_RET_NOT_SUPPORTED;
+ 		break;
+-	default:
+-		return -EINVAL;
+ 	}
+ 
+ 	*vcpu_reg(vcpu, 0) = val;
+diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
+index 469a150bf98f..a2e8ef3c0bd9 100644
+--- a/arch/arm/mach-imx/clk-imx6q.c
++++ b/arch/arm/mach-imx/clk-imx6q.c
+@@ -443,7 +443,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ 	clk[IMX6QDL_CLK_GPMI_IO]      = imx_clk_gate2("gpmi_io",       "enfc",              base + 0x78, 28);
+ 	clk[IMX6QDL_CLK_GPMI_APB]     = imx_clk_gate2("gpmi_apb",      "usdhc3",            base + 0x78, 30);
+ 	clk[IMX6QDL_CLK_ROM]          = imx_clk_gate2("rom",           "ahb",               base + 0x7c, 0);
+-	clk[IMX6QDL_CLK_SATA]         = imx_clk_gate2("sata",          "ipg",               base + 0x7c, 4);
++	clk[IMX6QDL_CLK_SATA]         = imx_clk_gate2("sata",          "ahb",               base + 0x7c, 4);
+ 	clk[IMX6QDL_CLK_SDMA]         = imx_clk_gate2("sdma",          "ahb",               base + 0x7c, 6);
+ 	clk[IMX6QDL_CLK_SPBA]         = imx_clk_gate2("spba",          "ipg",               base + 0x7c, 12);
+ 	clk[IMX6QDL_CLK_SPDIF]        = imx_clk_gate2("spdif",         "spdif_podf",        base + 0x7c, 14);
+diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c
+index 6dfd4ab97b2a..301ab38d38ba 100644
+--- a/arch/arm/mach-mvebu/pm-board.c
++++ b/arch/arm/mach-mvebu/pm-board.c
+@@ -43,6 +43,9 @@ static void mvebu_armada_xp_gp_pm_enter(void __iomem *sdram_reg, u32 srcmd)
+ 	for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++)
+ 		ackcmd |= BIT(pic_raw_gpios[i]);
+ 
++	srcmd = cpu_to_le32(srcmd);
++	ackcmd = cpu_to_le32(ackcmd);
++
+ 	/*
+ 	 * Wait a while, the PIC needs quite a bit of time between the
+ 	 * two GPIO commands.
+diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
+index 88de2dce2e87..7469347b1749 100644
+--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
++++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
+@@ -34,6 +34,7 @@
+ #include "iomap.h"
+ #include "irq.h"
+ #include "pm.h"
++#include "reset.h"
+ #include "sleep.h"
+ 
+ #ifdef CONFIG_PM_SLEEP
+@@ -70,15 +71,13 @@ static struct cpuidle_driver tegra_idle_driver = {
+ 
+ #ifdef CONFIG_PM_SLEEP
+ #ifdef CONFIG_SMP
+-static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+-
+ static int tegra20_reset_sleeping_cpu_1(void)
+ {
+ 	int ret = 0;
+ 
+ 	tegra_pen_lock();
+ 
+-	if (readl(pmc + PMC_SCRATCH41) == CPU_RESETTABLE)
++	if (readb(tegra20_cpu1_resettable_status) == CPU_RESETTABLE)
+ 		tegra20_cpu_shutdown(1);
+ 	else
+ 		ret = -EINVAL;
+diff --git a/arch/arm/mach-tegra/reset-handler.S b/arch/arm/mach-tegra/reset-handler.S
+index 71be4af5e975..e3070fdab80b 100644
+--- a/arch/arm/mach-tegra/reset-handler.S
++++ b/arch/arm/mach-tegra/reset-handler.S
+@@ -169,10 +169,10 @@ after_errata:
+ 	cmp	r6, #TEGRA20
+ 	bne	1f
+ 	/* If not CPU0, don't let CPU0 reset CPU1 now that CPU1 is coming up. */
+-	mov32	r5, TEGRA_PMC_BASE
+-	mov	r0, #0
++	mov32	r5, TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET
++	mov	r0, #CPU_NOT_RESETTABLE
+ 	cmp	r10, #0
+-	strne	r0, [r5, #PMC_SCRATCH41]
++	strneb	r0, [r5, #__tegra20_cpu1_resettable_status_offset]
+ 1:
+ #endif
+ 
+@@ -281,6 +281,10 @@ __tegra_cpu_reset_handler_data:
+ 	.rept	TEGRA_RESET_DATA_SIZE
+ 	.long	0
+ 	.endr
++	.globl	__tegra20_cpu1_resettable_status_offset
++	.equ	__tegra20_cpu1_resettable_status_offset, \
++					. - __tegra_cpu_reset_handler_start
++	.byte	0
+ 	.align L1_CACHE_SHIFT
+ 
+ ENTRY(__tegra_cpu_reset_handler_end)
+diff --git a/arch/arm/mach-tegra/reset.h b/arch/arm/mach-tegra/reset.h
+index 76a93434c6ee..29c3dec0126a 100644
+--- a/arch/arm/mach-tegra/reset.h
++++ b/arch/arm/mach-tegra/reset.h
+@@ -35,6 +35,7 @@ extern unsigned long __tegra_cpu_reset_handler_data[TEGRA_RESET_DATA_SIZE];
+ 
+ void __tegra_cpu_reset_handler_start(void);
+ void __tegra_cpu_reset_handler(void);
++void __tegra20_cpu1_resettable_status_offset(void);
+ void __tegra_cpu_reset_handler_end(void);
+ void tegra_secondary_startup(void);
+ 
+@@ -47,6 +48,9 @@ void tegra_secondary_startup(void);
+ 	(IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
+ 	((u32)&__tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_LP2] - \
+ 	 (u32)__tegra_cpu_reset_handler_start)))
++#define tegra20_cpu1_resettable_status \
++	(IO_ADDRESS(TEGRA_IRAM_BASE + TEGRA_IRAM_RESET_HANDLER_OFFSET + \
++	 (u32)__tegra20_cpu1_resettable_status_offset))
+ #endif
+ 
+ #define tegra_cpu_reset_handler_offset \
+diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
+index be4bc5f853f5..e6b684e14322 100644
+--- a/arch/arm/mach-tegra/sleep-tegra20.S
++++ b/arch/arm/mach-tegra/sleep-tegra20.S
+@@ -97,9 +97,10 @@ ENDPROC(tegra20_hotplug_shutdown)
+ ENTRY(tegra20_cpu_shutdown)
+ 	cmp	r0, #0
+ 	reteq	lr			@ must not be called for CPU 0
+-	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
++	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
++	ldr	r2, =__tegra20_cpu1_resettable_status_offset
+ 	mov	r12, #CPU_RESETTABLE
+-	str	r12, [r1]
++	strb	r12, [r1, r2]
+ 
+ 	cpu_to_halt_reg r1, r0
+ 	ldr	r3, =TEGRA_FLOW_CTRL_VIRT
+@@ -182,38 +183,41 @@ ENDPROC(tegra_pen_unlock)
+ /*
+  * tegra20_cpu_clear_resettable(void)
+  *
+- * Called to clear the "resettable soon" flag in PMC_SCRATCH41 when
++ * Called to clear the "resettable soon" flag in IRAM variable when
+  * it is expected that the secondary CPU will be idle soon.
+  */
+ ENTRY(tegra20_cpu_clear_resettable)
+-	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
++	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
++	ldr	r2, =__tegra20_cpu1_resettable_status_offset
+ 	mov	r12, #CPU_NOT_RESETTABLE
+-	str	r12, [r1]
++	strb	r12, [r1, r2]
+ 	ret	lr
+ ENDPROC(tegra20_cpu_clear_resettable)
+ 
+ /*
+  * tegra20_cpu_set_resettable_soon(void)
+  *
+- * Called to set the "resettable soon" flag in PMC_SCRATCH41 when
++ * Called to set the "resettable soon" flag in IRAM variable when
+  * it is expected that the secondary CPU will be idle soon.
+  */
+ ENTRY(tegra20_cpu_set_resettable_soon)
+-	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
++	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
++	ldr	r2, =__tegra20_cpu1_resettable_status_offset
+ 	mov	r12, #CPU_RESETTABLE_SOON
+-	str	r12, [r1]
++	strb	r12, [r1, r2]
+ 	ret	lr
+ ENDPROC(tegra20_cpu_set_resettable_soon)
+ 
+ /*
+  * tegra20_cpu_is_resettable_soon(void)
+  *
+- * Returns true if the "resettable soon" flag in PMC_SCRATCH41 has been
++ * Returns true if the "resettable soon" flag in IRAM variable has been
+  * set because it is expected that the secondary CPU will be idle soon.
+  */
+ ENTRY(tegra20_cpu_is_resettable_soon)
+-	mov32	r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
+-	ldr	r12, [r1]
++	mov32	r1, TEGRA_IRAM_RESET_BASE_VIRT
++	ldr	r2, =__tegra20_cpu1_resettable_status_offset
++	ldrb	r12, [r1, r2]
+ 	cmp	r12, #CPU_RESETTABLE_SOON
+ 	moveq	r0, #1
+ 	movne	r0, #0
+@@ -256,9 +260,10 @@ ENTRY(tegra20_sleep_cpu_secondary_finish)
+ 	mov	r0, #TEGRA_FLUSH_CACHE_LOUIS
+ 	bl	tegra_disable_clean_inv_dcache
+ 
+-	mov32	r0, TEGRA_PMC_VIRT + PMC_SCRATCH41
++	mov32	r0, TEGRA_IRAM_RESET_BASE_VIRT
++	ldr	r4, =__tegra20_cpu1_resettable_status_offset
+ 	mov	r3, #CPU_RESETTABLE
+-	str	r3, [r0]
++	strb	r3, [r0, r4]
+ 
+ 	bl	tegra_cpu_do_idle
+ 
+@@ -274,10 +279,10 @@ ENTRY(tegra20_sleep_cpu_secondary_finish)
+ 
+ 	bl	tegra_pen_lock
+ 
+-	mov32	r3, TEGRA_PMC_VIRT
+-	add	r0, r3, #PMC_SCRATCH41
++	mov32	r0, TEGRA_IRAM_RESET_BASE_VIRT
++	ldr	r4, =__tegra20_cpu1_resettable_status_offset
+ 	mov	r3, #CPU_NOT_RESETTABLE
+-	str	r3, [r0]
++	strb	r3, [r0, r4]
+ 
+ 	bl	tegra_pen_unlock
+ 
+diff --git a/arch/arm/mach-tegra/sleep.h b/arch/arm/mach-tegra/sleep.h
+index 92d46ec1361a..0d59360d891d 100644
+--- a/arch/arm/mach-tegra/sleep.h
++++ b/arch/arm/mach-tegra/sleep.h
+@@ -18,6 +18,7 @@
+ #define __MACH_TEGRA_SLEEP_H
+ 
+ #include "iomap.h"
++#include "irammap.h"
+ 
+ #define TEGRA_ARM_PERIF_VIRT (TEGRA_ARM_PERIF_BASE - IO_CPU_PHYS \
+ 					+ IO_CPU_VIRT)
+@@ -29,6 +30,9 @@
+ 					+ IO_APB_VIRT)
+ #define TEGRA_PMC_VIRT	(TEGRA_PMC_BASE - IO_APB_PHYS + IO_APB_VIRT)
+ 
++#define TEGRA_IRAM_RESET_BASE_VIRT (IO_IRAM_VIRT + \
++				TEGRA_IRAM_RESET_HANDLER_OFFSET)
++
+ /* PMC_SCRATCH37-39 and 41 are used for tegra_pen_lock and idle */
+ #define PMC_SCRATCH37	0x130
+ #define PMC_SCRATCH38	0x134
+diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
+index 9488fa5f8866..afc96ecb9004 100644
+--- a/arch/mips/include/asm/mach-generic/spaces.h
++++ b/arch/mips/include/asm/mach-generic/spaces.h
+@@ -94,7 +94,11 @@
+ #endif
+ 
+ #ifndef FIXADDR_TOP
++#ifdef CONFIG_KVM_GUEST
++#define FIXADDR_TOP		((unsigned long)(long)(int)0x7ffe0000)
++#else
+ #define FIXADDR_TOP		((unsigned long)(long)(int)0xfffe0000)
+ #endif
++#endif
+ 
+ #endif /* __ASM_MACH_GENERIC_SPACES_H */
+diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
+index bb68e8d520e8..52f205ae1281 100644
+--- a/arch/mips/kvm/mips.c
++++ b/arch/mips/kvm/mips.c
+@@ -982,7 +982,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+ 
+ 	/* If nothing is dirty, don't bother messing with page tables. */
+ 	if (is_dirty) {
+-		memslot = &kvm->memslots->memslots[log->slot];
++		memslot = id_to_memslot(kvm->memslots, log->slot);
+ 
+ 		ga = memslot->base_gfn << PAGE_SHIFT;
+ 		ga_end = ga + (memslot->npages << PAGE_SHIFT);
+diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
+index 12b638425bb9..d90893b76e7c 100644
+--- a/arch/powerpc/perf/core-book3s.c
++++ b/arch/powerpc/perf/core-book3s.c
+@@ -131,7 +131,16 @@ static void pmao_restore_workaround(bool ebb) { }
+ 
+ static bool regs_use_siar(struct pt_regs *regs)
+ {
+-	return !!regs->result;
++	/*
++	 * When we take a performance monitor exception the regs are setup
++	 * using perf_read_regs() which overloads some fields, in particular
++	 * regs->result to tell us whether to use SIAR.
++	 *
++	 * However if the regs are from another exception, eg. a syscall, then
++	 * they have not been setup using perf_read_regs() and so regs->result
++	 * is something random.
++	 */
++	return ((TRAP(regs) == 0xf00) && regs->result);
+ }
+ 
+ /*
+diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
+index 9f73c8059022..49b74454d7ee 100644
+--- a/arch/s390/kernel/crash_dump.c
++++ b/arch/s390/kernel/crash_dump.c
+@@ -415,7 +415,7 @@ static void *nt_s390_vx_low(void *ptr, __vector128 *vx_regs)
+ 	ptr += len;
+ 	/* Copy lower halves of SIMD registers 0-15 */
+ 	for (i = 0; i < 16; i++) {
+-		memcpy(ptr, &vx_regs[i], 8);
++		memcpy(ptr, &vx_regs[i].u[2], 8);
+ 		ptr += 8;
+ 	}
+ 	return ptr;
+diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
+index 9de47265ef73..b745a109bfc1 100644
+--- a/arch/s390/kvm/interrupt.c
++++ b/arch/s390/kvm/interrupt.c
+@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
+ 	if (sclp_has_sigpif())
+ 		return __inject_extcall_sigpif(vcpu, src_id);
+ 
+-	if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
++	if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
+ 		return -EBUSY;
+ 	*extcall = irq->u.extcall;
+ 	atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+@@ -1606,6 +1606,9 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
+ 	int i;
+ 
+ 	spin_lock(&fi->lock);
++	fi->pending_irqs = 0;
++	memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
++	memset(&fi->mchk, 0, sizeof(fi->mchk));
+ 	for (i = 0; i < FIRQ_LIST_COUNT; i++)
+ 		clear_irq_list(&fi->lists[i]);
+ 	for (i = 0; i < FIRQ_MAX_COUNT; i++)
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 55423d8be580..9afb9d602f84 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -227,7 +227,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
+ ({								\
+ 	/* Branch instruction needs 6 bytes */			\
+ 	int rel = (addrs[i + off + 1] - (addrs[i + 1] - 6)) / 2;\
+-	_EMIT6(op1 | reg(b1, b2) << 16 | rel, op2 | mask);	\
++	_EMIT6(op1 | reg(b1, b2) << 16 | (rel & 0xffff), op2 | mask);	\
+ 	REG_SET_SEEN(b1);					\
+ 	REG_SET_SEEN(b2);					\
+ })
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index f4a555beef19..41b06fca39f7 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -591,7 +591,7 @@ struct kvm_arch {
+ 	struct kvm_pic *vpic;
+ 	struct kvm_ioapic *vioapic;
+ 	struct kvm_pit *vpit;
+-	int vapics_in_nmi_mode;
++	atomic_t vapics_in_nmi_mode;
+ 	struct mutex apic_map_lock;
+ 	struct kvm_apic_map *apic_map;
+ 
+diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
+index 4dce6f8b6129..f90952f64e79 100644
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -305,7 +305,7 @@ static void pit_do_work(struct kthread_work *work)
+ 		 * LVT0 to NMI delivery. Other PIC interrupts are just sent to
+ 		 * VCPU0, and only if its LVT0 is in EXTINT mode.
+ 		 */
+-		if (kvm->arch.vapics_in_nmi_mode > 0)
++		if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
+ 			kvm_for_each_vcpu(i, vcpu, kvm)
+ 				kvm_apic_nmi_wd_deliver(vcpu);
+ 	}
+diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
+index 4c7deb4f78a1..67d07e051436 100644
+--- a/arch/x86/kvm/lapic.c
++++ b/arch/x86/kvm/lapic.c
+@@ -1250,10 +1250,10 @@ static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
+ 		if (!nmi_wd_enabled) {
+ 			apic_debug("Receive NMI setting on APIC_LVT0 "
+ 				   "for cpu %d\n", apic->vcpu->vcpu_id);
+-			apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
++			atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
+ 		}
+ 	} else if (nmi_wd_enabled)
+-		apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
++		atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
+ }
+ 
+ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
+@@ -1808,6 +1808,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
+ 	apic_update_ppr(apic);
+ 	hrtimer_cancel(&apic->lapic_timer.timer);
+ 	apic_update_lvtt(apic);
++	apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
+ 	update_divide_count(apic);
+ 	start_apic_timer(apic);
+ 	apic->irr_pending = true;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 9afa233b5482..4911bf19122b 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -511,8 +511,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
+ {
+ 	struct vcpu_svm *svm = to_svm(vcpu);
+ 
+-	if (svm->vmcb->control.next_rip != 0)
++	if (svm->vmcb->control.next_rip != 0) {
++		WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
+ 		svm->next_rip = svm->vmcb->control.next_rip;
++	}
+ 
+ 	if (!svm->next_rip) {
+ 		if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
+@@ -4317,7 +4319,9 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
+ 		break;
+ 	}
+ 
+-	vmcb->control.next_rip  = info->next_rip;
++	/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
++	if (static_cpu_has(X86_FEATURE_NRIPS))
++		vmcb->control.next_rip  = info->next_rip;
+ 	vmcb->control.exit_code = icpt_info.exit_code;
+ 	vmexit = nested_svm_exit_handled(svm);
+ 
+diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
+index 14a63ed6fe09..ff9911707160 100644
+--- a/arch/x86/pci/acpi.c
++++ b/arch/x86/pci/acpi.c
+@@ -81,6 +81,17 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
+ 			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+ 		},
+ 	},
++	/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/931368 */
++	/* https://bugs.launchpad.net/ubuntu/+source/alsa-driver/+bug/1033299 */
++	{
++		.callback = set_use_crs,
++		.ident = "Foxconn K8M890-8237A",
++		.matches = {
++			DMI_MATCH(DMI_BOARD_VENDOR, "Foxconn"),
++			DMI_MATCH(DMI_BOARD_NAME, "K8M890-8237A"),
++			DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
++		},
++	},
+ 
+ 	/* Now for the blacklist.. */
+ 
+@@ -121,8 +132,10 @@ void __init pci_acpi_crs_quirks(void)
+ {
+ 	int year;
+ 
+-	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
+-		pci_use_crs = false;
++	if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) {
++		if (iomem_resource.end <= 0xffffffff)
++			pci_use_crs = false;
++	}
+ 
+ 	dmi_check_system(pci_crs_quirks);
+ 
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 6414661ac1c4..c45d274a75c8 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -535,7 +535,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+ 
+ 	val |= vid;
+ 
+-	wrmsrl(MSR_IA32_PERF_CTL, val);
++	wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
+ }
+ 
+ #define BYT_BCLK_FREQS 5
+diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
+index 59372077ec7c..3442764a5293 100644
+--- a/drivers/cpuidle/cpuidle-powernv.c
++++ b/drivers/cpuidle/cpuidle-powernv.c
+@@ -60,6 +60,8 @@ static int nap_loop(struct cpuidle_device *dev,
+ 	return index;
+ }
+ 
++/* Register for fastsleep only in oneshot mode of broadcast */
++#ifdef CONFIG_TICK_ONESHOT
+ static int fastsleep_loop(struct cpuidle_device *dev,
+ 				struct cpuidle_driver *drv,
+ 				int index)
+@@ -83,7 +85,7 @@ static int fastsleep_loop(struct cpuidle_device *dev,
+ 
+ 	return index;
+ }
+-
++#endif
+ /*
+  * States for dedicated partition case.
+  */
+@@ -209,7 +211,14 @@ static int powernv_add_idle_states(void)
+ 			powernv_states[nr_idle_states].flags = 0;
+ 			powernv_states[nr_idle_states].target_residency = 100;
+ 			powernv_states[nr_idle_states].enter = &nap_loop;
+-		} else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
++		}
++
++		/*
++		 * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come
++		 * within this config dependency check.
++		 */
++#ifdef CONFIG_TICK_ONESHOT
++		if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
+ 			flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
+ 			/* Add FASTSLEEP state */
+ 			strcpy(powernv_states[nr_idle_states].name, "FastSleep");
+@@ -218,7 +227,7 @@ static int powernv_add_idle_states(void)
+ 			powernv_states[nr_idle_states].target_residency = 300000;
+ 			powernv_states[nr_idle_states].enter = &fastsleep_loop;
+ 		}
+-
++#endif
+ 		powernv_states[nr_idle_states].exit_latency =
+ 				((unsigned int)latency_ns[i]) / 1000;
+ 
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 857414afa29a..f062158d4dc9 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -925,7 +925,8 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
+ 		sg_count--;
+ 		link_tbl_ptr--;
+ 	}
+-	be16_add_cpu(&link_tbl_ptr->len, cryptlen);
++	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
++					+ cryptlen);
+ 
+ 	/* tag end of link table */
+ 	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
+@@ -2561,6 +2562,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
+ 		break;
+ 	default:
+ 		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
++		kfree(t_alg);
+ 		return ERR_PTR(-EINVAL);
+ 	}
+ 
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index e1c7e9e51045..ca9f4edbb940 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1869,9 +1869,15 @@ static void free_pt_##LVL (unsigned long __pt)			\
+ 	pt = (u64 *)__pt;					\
+ 								\
+ 	for (i = 0; i < 512; ++i) {				\
++		/* PTE present? */				\
+ 		if (!IOMMU_PTE_PRESENT(pt[i]))			\
+ 			continue;				\
+ 								\
++		/* Large PTE? */				\
++		if (PM_PTE_LEVEL(pt[i]) == 0 ||			\
++		    PM_PTE_LEVEL(pt[i]) == 7)			\
++			continue;				\
++								\
+ 		p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);	\
+ 		FN(p);						\
+ 	}							\
+diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
+index 66a803b9dd3a..65075ef75e2a 100644
+--- a/drivers/iommu/arm-smmu.c
++++ b/drivers/iommu/arm-smmu.c
+@@ -1567,7 +1567,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+ 		return -ENODEV;
+ 	}
+ 
+-	if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) {
++	if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
+ 		smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
+ 		dev_notice(smmu->dev, "\taddress translation ops\n");
+ 	}
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index c80287a02735..9231cdfe2757 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -848,7 +848,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ 			int sg_cnt;
+ 
+ 			sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
+-			if (sg_cnt == 0) {
++			if (sg_cnt <= 0) {
+ 				/*
+ 				 * This only happens when someone fed
+ 				 * us an invalid request.
+diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
+index b0f69248cb71..e9b1810d319f 100644
+--- a/drivers/net/can/dev.c
++++ b/drivers/net/can/dev.c
+@@ -440,6 +440,9 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
+ 		struct can_frame *cf = (struct can_frame *)skb->data;
+ 		u8 dlc = cf->can_dlc;
+ 
++		if (!(skb->tstamp.tv64))
++			__net_timestamp(skb);
++
+ 		netif_rx(priv->echo_skb[idx]);
+ 		priv->echo_skb[idx] = NULL;
+ 
+@@ -575,6 +578,7 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
++	__net_timestamp(skb);
+ 	skb->protocol = htons(ETH_P_CAN);
+ 	skb->pkt_type = PACKET_BROADCAST;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+@@ -603,6 +607,7 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ 	if (unlikely(!skb))
+ 		return NULL;
+ 
++	__net_timestamp(skb);
+ 	skb->protocol = htons(ETH_P_CANFD);
+ 	skb->pkt_type = PACKET_BROADCAST;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
+index c837eb91d43e..f64f5290d6f8 100644
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -207,6 +207,7 @@ static void slc_bump(struct slcan *sl)
+ 	if (!skb)
+ 		return;
+ 
++	__net_timestamp(skb);
+ 	skb->dev = sl->dev;
+ 	skb->protocol = htons(ETH_P_CAN);
+ 	skb->pkt_type = PACKET_BROADCAST;
+diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
+index 674f367087c5..0ce868de855d 100644
+--- a/drivers/net/can/vcan.c
++++ b/drivers/net/can/vcan.c
+@@ -78,6 +78,9 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
+ 	skb->dev       = dev;
+ 	skb->ip_summed = CHECKSUM_UNNECESSARY;
+ 
++	if (!(skb->tstamp.tv64))
++		__net_timestamp(skb);
++
+ 	netif_rx_ni(skb);
+ }
+ 
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+index d81fc6bd4759..5c92fb71b37e 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -263,7 +263,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ 	int ret;
+ 
+ 	/* Try to obtain pages, decreasing order if necessary */
+-	gfp |= __GFP_COLD | __GFP_COMP;
++	gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ 	while (order >= 0) {
+ 		pages = alloc_pages(gfp, order);
+ 		if (pages)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index 33501bcddc48..8a97d28f3d65 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -9323,7 +9323,8 @@ unload_error:
+ 	 * function stop ramrod is sent, since as part of this ramrod FW access
+ 	 * PTP registers.
+ 	 */
+-	bnx2x_stop_ptp(bp);
++	if (bp->flags & PTP_SUPPORTED)
++		bnx2x_stop_ptp(bp);
+ 
+ 	/* Disable HW interrupts, NAPI */
+ 	bnx2x_netif_stop(bp, 1);
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index ce5f7f9cff06..74d0389bf233 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -310,6 +310,7 @@ struct mvneta_port {
+ 	unsigned int link;
+ 	unsigned int duplex;
+ 	unsigned int speed;
++	unsigned int tx_csum_limit;
+ 	int use_inband_status:1;
+ };
+ 
+@@ -1013,6 +1014,12 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
+ 		val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ 		val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ 		mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
++	} else {
++		val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
++		val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
++		       MVNETA_GMAC_AN_SPEED_EN |
++		       MVNETA_GMAC_AN_DUPLEX_EN);
++		mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ 	}
+ 
+ 	mvneta_set_ucast_table(pp, -1);
+@@ -2502,8 +2509,10 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
+ 
+ 	dev->mtu = mtu;
+ 
+-	if (!netif_running(dev))
++	if (!netif_running(dev)) {
++		netdev_update_features(dev);
+ 		return 0;
++	}
+ 
+ 	/* The interface is running, so we have to force a
+ 	 * reallocation of the queues
+@@ -2532,9 +2541,26 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
+ 	mvneta_start_dev(pp);
+ 	mvneta_port_up(pp);
+ 
++	netdev_update_features(dev);
++
+ 	return 0;
+ }
+ 
++static netdev_features_t mvneta_fix_features(struct net_device *dev,
++					     netdev_features_t features)
++{
++	struct mvneta_port *pp = netdev_priv(dev);
++
++	if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
++		features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
++		netdev_info(dev,
++			    "Disable IP checksum for MTU greater than %dB\n",
++			    pp->tx_csum_limit);
++	}
++
++	return features;
++}
++
+ /* Get mac address */
+ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
+ {
+@@ -2856,6 +2882,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
+ 	.ndo_set_rx_mode     = mvneta_set_rx_mode,
+ 	.ndo_set_mac_address = mvneta_set_mac_addr,
+ 	.ndo_change_mtu      = mvneta_change_mtu,
++	.ndo_fix_features    = mvneta_fix_features,
+ 	.ndo_get_stats64     = mvneta_get_stats64,
+ 	.ndo_do_ioctl        = mvneta_ioctl,
+ };
+@@ -3101,6 +3128,9 @@ static int mvneta_probe(struct platform_device *pdev)
+ 		}
+ 	}
+ 
++	if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
++		pp->tx_csum_limit = 1600;
++
+ 	pp->tx_ring_size = MVNETA_MAX_TXD;
+ 	pp->rx_ring_size = MVNETA_MAX_RXD;
+ 
+@@ -3179,6 +3209,7 @@ static int mvneta_remove(struct platform_device *pdev)
+ 
+ static const struct of_device_id mvneta_match[] = {
+ 	{ .compatible = "marvell,armada-370-neta" },
++	{ .compatible = "marvell,armada-xp-neta" },
+ 	{ }
+ };
+ MODULE_DEVICE_TABLE(of, mvneta_match);
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+index cf467a9f6cc7..a5a0b8420d26 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1973,10 +1973,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+ 			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+ 	}
+ 
+-	if (priv->base_tx_qpn) {
+-		mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
+-		priv->base_tx_qpn = 0;
+-	}
+ }
+ 
+ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+index 2a77a6b19121..eab4e080ebd2 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -723,7 +723,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
+ }
+ #endif
+ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
+-		      int hwtstamp_rx_filter)
++		      netdev_features_t dev_features)
+ {
+ 	__wsum hw_checksum = 0;
+ 
+@@ -731,14 +731,8 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
+ 
+ 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
+ 
+-	if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
+-	    hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
+-		/* next protocol non IPv4 or IPv6 */
+-		if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
+-		    != htons(ETH_P_IP) &&
+-		    ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
+-		    != htons(ETH_P_IPV6))
+-			return -1;
++	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
++	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ 		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
+ 		hdr += sizeof(struct vlan_hdr);
+ 	}
+@@ -901,7 +895,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ 
+ 			if (ip_summed == CHECKSUM_COMPLETE) {
+ 				void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
+-				if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
++				if (check_csum(cqe, gro_skb, va,
++					       dev->features)) {
+ 					ip_summed = CHECKSUM_NONE;
+ 					ring->csum_none++;
+ 					ring->csum_complete--;
+@@ -956,7 +951,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
+ 		}
+ 
+ 		if (ip_summed == CHECKSUM_COMPLETE) {
+-			if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
++			if (check_csum(cqe, skb, skb->data, dev->features)) {
+ 				ip_summed = CHECKSUM_NONE;
+ 				ring->csum_complete--;
+ 				ring->csum_none++;
+diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+index 7bed3a88579f..c10d98f6ad96 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -66,6 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ 	ring->size = size;
+ 	ring->size_mask = size - 1;
+ 	ring->stride = stride;
++	ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
+ 
+ 	tmp = size * sizeof(struct mlx4_en_tx_info);
+ 	ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
+@@ -180,6 +181,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+ 		mlx4_bf_free(mdev->dev, &ring->bf);
+ 	mlx4_qp_remove(mdev->dev, &ring->qp);
+ 	mlx4_qp_free(mdev->dev, &ring->qp);
++	mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
+ 	mlx4_en_unmap_buffer(&ring->wqres.buf);
+ 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ 	kfree(ring->bounce_buf);
+@@ -231,6 +233,11 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ 		       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
+ }
+ 
++static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
++{
++	return ring->prod - ring->cons > ring->full_size;
++}
++
+ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+ 			      struct mlx4_en_tx_ring *ring, int index,
+ 			      u8 owner)
+@@ -473,11 +480,10 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
+ 
+ 	netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
+ 
+-	/*
+-	 * Wakeup Tx queue if this stopped, and at least 1 packet
+-	 * was completed
++	/* Wakeup Tx queue if this stopped, and ring is not full.
+ 	 */
+-	if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
++	if (netif_tx_queue_stopped(ring->tx_queue) &&
++	    !mlx4_en_is_tx_ring_full(ring)) {
+ 		netif_tx_wake_queue(ring->tx_queue);
+ 		ring->wake_queue++;
+ 	}
+@@ -921,8 +927,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 	skb_tx_timestamp(skb);
+ 
+ 	/* Check available TXBBs And 2K spare for prefetch */
+-	stop_queue = (int)(ring->prod - ring_cons) >
+-		      ring->size - HEADROOM - MAX_DESC_TXBBS;
++	stop_queue = mlx4_en_is_tx_ring_full(ring);
+ 	if (unlikely(stop_queue)) {
+ 		netif_tx_stop_queue(ring->tx_queue);
+ 		ring->queue_stopped++;
+@@ -991,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+ 		smp_rmb();
+ 
+ 		ring_cons = ACCESS_ONCE(ring->cons);
+-		if (unlikely(((int)(ring->prod - ring_cons)) <=
+-			     ring->size - HEADROOM - MAX_DESC_TXBBS)) {
++		if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
+ 			netif_tx_wake_queue(ring->tx_queue);
+ 			ring->wake_queue++;
+ 		}
+diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
+index 6fce58718837..0d80aed59043 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
++++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
+@@ -93,8 +93,14 @@ int mlx4_register_interface(struct mlx4_interface *intf)
+ 	mutex_lock(&intf_mutex);
+ 
+ 	list_add_tail(&intf->list, &intf_list);
+-	list_for_each_entry(priv, &dev_list, dev_list)
++	list_for_each_entry(priv, &dev_list, dev_list) {
++		if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
++			mlx4_dbg(&priv->dev,
++				 "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
++			intf->flags &= ~MLX4_INTFF_BONDING;
++		}
+ 		mlx4_add_device(intf, priv);
++	}
+ 
+ 	mutex_unlock(&intf_mutex);
+ 
+diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+index d021f079f181..909fcf803c54 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -279,6 +279,7 @@ struct mlx4_en_tx_ring {
+ 	u32			size; /* number of TXBBs */
+ 	u32			size_mask;
+ 	u16			stride;
++	u32			full_size;
+ 	u16			cqn;	/* index of port CQ associated with this ring */
+ 	u32			buf_size;
+ 	__be32			doorbell_qpn;
+@@ -579,7 +580,6 @@ struct mlx4_en_priv {
+ 	int vids[128];
+ 	bool wol;
+ 	struct device *ddev;
+-	int base_tx_qpn;
+ 	struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
+ 	struct hwtstamp_config hwtstamp_config;
+ 
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index bdfe51fc3a65..d551df62e61a 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -796,10 +796,11 @@ static int genphy_config_advert(struct phy_device *phydev)
+ 	if (phydev->supported & (SUPPORTED_1000baseT_Half |
+ 				 SUPPORTED_1000baseT_Full)) {
+ 		adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
+-		if (adv != oldadv)
+-			changed = 1;
+ 	}
+ 
++	if (adv != oldadv)
++		changed = 1;
++
+ 	err = phy_write(phydev, MII_CTRL1000, adv);
+ 	if (err < 0)
+ 		return err;
+diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
+index 968787abf78d..ec383b0f5443 100644
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -681,6 +681,9 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
+ 	char *node;
+ 	unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
+ 
++	if (vif->credit_watch.node)
++		return -EADDRINUSE;
++
+ 	node = kmalloc(maxlen, GFP_KERNEL);
+ 	if (!node)
+ 		return -ENOMEM;
+@@ -770,6 +773,7 @@ static void connect(struct backend_info *be)
+ 	}
+ 
+ 	xen_net_read_rate(dev, &credit_bytes, &credit_usec);
++	xen_unregister_watchers(be->vif);
+ 	xen_register_watchers(dev, be->vif);
+ 	read_xenbus_vif_flags(be);
+ 
+diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
+index 6f1fa1773e76..f8d8fdb26b72 100644
+--- a/drivers/s390/kvm/virtio_ccw.c
++++ b/drivers/s390/kvm/virtio_ccw.c
+@@ -65,6 +65,7 @@ struct virtio_ccw_device {
+ 	bool is_thinint;
+ 	bool going_away;
+ 	bool device_lost;
++	unsigned int config_ready;
+ 	void *airq_info;
+ };
+ 
+@@ -833,8 +834,11 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
+ 	if (ret)
+ 		goto out_free;
+ 
+-	memcpy(vcdev->config, config_area, sizeof(vcdev->config));
+-	memcpy(buf, &vcdev->config[offset], len);
++	memcpy(vcdev->config, config_area, offset + len);
++	if (buf)
++		memcpy(buf, &vcdev->config[offset], len);
++	if (vcdev->config_ready < offset + len)
++		vcdev->config_ready = offset + len;
+ 
+ out_free:
+ 	kfree(config_area);
+@@ -857,6 +861,9 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
+ 	if (!config_area)
+ 		goto out_free;
+ 
++	/* Make sure we don't overwrite fields. */
++	if (vcdev->config_ready < offset)
++		virtio_ccw_get_config(vdev, 0, NULL, offset);
+ 	memcpy(&vcdev->config[offset], buf, len);
+ 	/* Write the config area to the host. */
+ 	memcpy(config_area, vcdev->config, sizeof(vcdev->config));
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 3507f880eb74..45b8c8b338df 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -3435,6 +3435,7 @@ done:
+ static void ffs_closed(struct ffs_data *ffs)
+ {
+ 	struct ffs_dev *ffs_obj;
++	struct f_fs_opts *opts;
+ 
+ 	ENTER();
+ 	ffs_dev_lock();
+@@ -3449,8 +3450,13 @@ static void ffs_closed(struct ffs_data *ffs)
+ 	    ffs_obj->ffs_closed_callback)
+ 		ffs_obj->ffs_closed_callback(ffs);
+ 
+-	if (!ffs_obj->opts || ffs_obj->opts->no_configfs
+-	    || !ffs_obj->opts->func_inst.group.cg_item.ci_parent)
++	if (ffs_obj->opts)
++		opts = ffs_obj->opts;
++	else
++		goto done;
++
++	if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
++	    || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
+ 		goto done;
+ 
+ 	unregister_gadget_item(ffs_obj->opts->
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 37b5afdaf698..50bb3c207621 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -2927,17 +2927,6 @@ restart:
+ 				vfsmnt = &mnt->mnt;
+ 				continue;
+ 			}
+-			/*
+-			 * Filesystems needing to implement special "root names"
+-			 * should do so with ->d_dname()
+-			 */
+-			if (IS_ROOT(dentry) &&
+-			   (dentry->d_name.len != 1 ||
+-			    dentry->d_name.name[0] != '/')) {
+-				WARN(1, "Root dentry has weird name <%.*s>\n",
+-				     (int) dentry->d_name.len,
+-				     dentry->d_name.name);
+-			}
+ 			if (!error)
+ 				error = is_mounted(vfsmnt) ? 1 : 2;
+ 			break;
+diff --git a/fs/inode.c b/fs/inode.c
+index ea37cd17b53f..6e342cadef81 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -1693,8 +1693,8 @@ int file_remove_suid(struct file *file)
+ 		error = security_inode_killpriv(dentry);
+ 	if (!error && killsuid)
+ 		error = __remove_suid(dentry, killsuid);
+-	if (!error && (inode->i_sb->s_flags & MS_NOSEC))
+-		inode->i_flags |= S_NOSEC;
++	if (!error)
++		inode_has_no_xattr(inode);
+ 
+ 	return error;
+ }
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 1b9e11167bae..1d4a97c573e0 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -3185,11 +3185,15 @@ bool fs_fully_visible(struct file_system_type *type)
+ 		if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
+ 			continue;
+ 
+-		/* This mount is not fully visible if there are any child mounts
+-		 * that cover anything except for empty directories.
++		/* This mount is not fully visible if there are any
++		 * locked child mounts that cover anything except for
++		 * empty directories.
+ 		 */
+ 		list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+ 			struct inode *inode = child->mnt_mountpoint->d_inode;
++			/* Only worry about locked mounts */
++			if (!(mnt->mnt.mnt_flags & MNT_LOCKED))
++				continue;
+ 			if (!S_ISDIR(inode->i_mode))
+ 				goto next;
+ 			if (inode->i_nlink > 2)
+diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
+index 2c1036080d52..a7106eda5024 100644
+--- a/fs/ufs/balloc.c
++++ b/fs/ufs/balloc.c
+@@ -51,8 +51,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
+ 	
+ 	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
+ 		ufs_error (sb, "ufs_free_fragments", "internal error");
+-	
+-	lock_ufs(sb);
++
++	mutex_lock(&UFS_SB(sb)->s_lock);
+ 	
+ 	cgno = ufs_dtog(uspi, fragment);
+ 	bit = ufs_dtogd(uspi, fragment);
+@@ -115,13 +115,13 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
+ 	if (sb->s_flags & MS_SYNCHRONOUS)
+ 		ubh_sync_block(UCPI_UBH(ucpi));
+ 	ufs_mark_sb_dirty(sb);
+-	
+-	unlock_ufs(sb);
++
++	mutex_unlock(&UFS_SB(sb)->s_lock);
+ 	UFSD("EXIT\n");
+ 	return;
+ 
+ failed:
+-	unlock_ufs(sb);
++	mutex_unlock(&UFS_SB(sb)->s_lock);
+ 	UFSD("EXIT (FAILED)\n");
+ 	return;
+ }
+@@ -151,7 +151,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
+ 		goto failed;
+ 	}
+ 
+-	lock_ufs(sb);
++	mutex_lock(&UFS_SB(sb)->s_lock);
+ 	
+ do_more:
+ 	overflow = 0;
+@@ -211,12 +211,12 @@ do_more:
+ 	}
+ 
+ 	ufs_mark_sb_dirty(sb);
+-	unlock_ufs(sb);
++	mutex_unlock(&UFS_SB(sb)->s_lock);
+ 	UFSD("EXIT\n");
+ 	return;
+ 
+ failed_unlock:
+-	unlock_ufs(sb);
++	mutex_unlock(&UFS_SB(sb)->s_lock);
+ failed:
+ 	UFSD("EXIT (FAILED)\n");
+ 	return;
+@@ -357,7 +357,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 	usb1 = ubh_get_usb_first(uspi);
+ 	*err = -ENOSPC;
+ 
+-	lock_ufs(sb);
++	mutex_lock(&UFS_SB(sb)->s_lock);
+ 	tmp = ufs_data_ptr_to_cpu(sb, p);
+ 
+ 	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
+@@ -378,19 +378,19 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 				  "fragment %llu, tmp %llu\n",
+ 				  (unsigned long long)fragment,
+ 				  (unsigned long long)tmp);
+-			unlock_ufs(sb);
++			mutex_unlock(&UFS_SB(sb)->s_lock);
+ 			return INVBLOCK;
+ 		}
+ 		if (fragment < UFS_I(inode)->i_lastfrag) {
+ 			UFSD("EXIT (ALREADY ALLOCATED)\n");
+-			unlock_ufs(sb);
++			mutex_unlock(&UFS_SB(sb)->s_lock);
+ 			return 0;
+ 		}
+ 	}
+ 	else {
+ 		if (tmp) {
+ 			UFSD("EXIT (ALREADY ALLOCATED)\n");
+-			unlock_ufs(sb);
++			mutex_unlock(&UFS_SB(sb)->s_lock);
+ 			return 0;
+ 		}
+ 	}
+@@ -399,7 +399,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 	 * There is not enough space for user on the device
+ 	 */
+ 	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
+-		unlock_ufs(sb);
++		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		UFSD("EXIT (FAILED)\n");
+ 		return 0;
+ 	}
+@@ -424,7 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 			ufs_clear_frags(inode, result + oldcount,
+ 					newcount - oldcount, locked_page != NULL);
+ 		}
+-		unlock_ufs(sb);
++		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
+ 		return result;
+ 	}
+@@ -439,7 +439,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 						fragment + count);
+ 		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
+ 				locked_page != NULL);
+-		unlock_ufs(sb);
++		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		UFSD("EXIT, result %llu\n", (unsigned long long)result);
+ 		return result;
+ 	}
+@@ -477,7 +477,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 		*err = 0;
+ 		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
+ 						fragment + count);
+-		unlock_ufs(sb);
++		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		if (newcount < request)
+ 			ufs_free_fragments (inode, result + newcount, request - newcount);
+ 		ufs_free_fragments (inode, tmp, oldcount);
+@@ -485,7 +485,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
+ 		return result;
+ 	}
+ 
+-	unlock_ufs(sb);
++	mutex_unlock(&UFS_SB(sb)->s_lock);
+ 	UFSD("EXIT (FAILED)\n");
+ 	return 0;
+ }		
+diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
+index 7caa01652888..fd0203ce1f7f 100644
+--- a/fs/ufs/ialloc.c
++++ b/fs/ufs/ialloc.c
+@@ -69,11 +69,11 @@ void ufs_free_inode (struct inode * inode)
+ 	
+ 	ino = inode->i_ino;
+ 
+-	lock_ufs(sb);
++	mutex_lock(&UFS_SB(sb)->s_lock);
+ 
+ 	if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) {
+ 		ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino);
+-		unlock_ufs(sb);
++		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		return;
+ 	}
+ 	
+@@ -81,7 +81,7 @@ void ufs_free_inode (struct inode * inode)
+ 	bit = ufs_inotocgoff (ino);
+ 	ucpi = ufs_load_cylinder (sb, cg);
+ 	if (!ucpi) {
+-		unlock_ufs(sb);
++		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		return;
+ 	}
+ 	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
+@@ -115,7 +115,7 @@ void ufs_free_inode (struct inode * inode)
+ 		ubh_sync_block(UCPI_UBH(ucpi));
+ 	
+ 	ufs_mark_sb_dirty(sb);
+-	unlock_ufs(sb);
++	mutex_unlock(&UFS_SB(sb)->s_lock);
+ 	UFSD("EXIT\n");
+ }
+ 
+@@ -193,7 +193,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode)
+ 	sbi = UFS_SB(sb);
+ 	uspi = sbi->s_uspi;
+ 
+-	lock_ufs(sb);
++	mutex_lock(&sbi->s_lock);
+ 
+ 	/*
+ 	 * Try to place the inode in its parent directory
+@@ -331,21 +331,21 @@ cg_found:
+ 			sync_dirty_buffer(bh);
+ 		brelse(bh);
+ 	}
+-	unlock_ufs(sb);
++	mutex_unlock(&sbi->s_lock);
+ 
+ 	UFSD("allocating inode %lu\n", inode->i_ino);
+ 	UFSD("EXIT\n");
+ 	return inode;
+ 
+ fail_remove_inode:
+-	unlock_ufs(sb);
++	mutex_unlock(&sbi->s_lock);
+ 	clear_nlink(inode);
+ 	unlock_new_inode(inode);
+ 	iput(inode);
+ 	UFSD("EXIT (FAILED): err %d\n", err);
+ 	return ERR_PTR(err);
+ failed:
+-	unlock_ufs(sb);
++	mutex_unlock(&sbi->s_lock);
+ 	make_bad_inode(inode);
+ 	iput (inode);
+ 	UFSD("EXIT (FAILED): err %d\n", err);
+diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
+index be7d42c7d938..2d93ab07da8a 100644
+--- a/fs/ufs/inode.c
++++ b/fs/ufs/inode.c
+@@ -902,6 +902,9 @@ void ufs_evict_inode(struct inode * inode)
+ 	invalidate_inode_buffers(inode);
+ 	clear_inode(inode);
+ 
+-	if (want_delete)
++	if (want_delete) {
++		lock_ufs(inode->i_sb);
+ 		ufs_free_inode(inode);
++		unlock_ufs(inode->i_sb);
++	}
+ }
+diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
+index e491a93a7e9a..60ee32249b72 100644
+--- a/fs/ufs/namei.c
++++ b/fs/ufs/namei.c
+@@ -128,12 +128,12 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
+ 	if (l > sb->s_blocksize)
+ 		goto out_notlocked;
+ 
++	lock_ufs(dir->i_sb);
+ 	inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO);
+ 	err = PTR_ERR(inode);
+ 	if (IS_ERR(inode))
+-		goto out_notlocked;
++		goto out;
+ 
+-	lock_ufs(dir->i_sb);
+ 	if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) {
+ 		/* slow symlink */
+ 		inode->i_op = &ufs_symlink_inode_operations;
+@@ -174,7 +174,12 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir,
+ 	inode_inc_link_count(inode);
+ 	ihold(inode);
+ 
+-	error = ufs_add_nondir(dentry, inode);
++	error = ufs_add_link(dentry, inode);
++	if (error) {
++		inode_dec_link_count(inode);
++		iput(inode);
++	} else
++		d_instantiate(dentry, inode);
+ 	unlock_ufs(dir->i_sb);
+ 	return error;
+ }
+@@ -184,9 +189,13 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+ 	struct inode * inode;
+ 	int err;
+ 
++	lock_ufs(dir->i_sb);
++	inode_inc_link_count(dir);
++
+ 	inode = ufs_new_inode(dir, S_IFDIR|mode);
++	err = PTR_ERR(inode);
+ 	if (IS_ERR(inode))
+-		return PTR_ERR(inode);
++		goto out_dir;
+ 
+ 	inode->i_op = &ufs_dir_inode_operations;
+ 	inode->i_fop = &ufs_dir_operations;
+@@ -194,9 +203,6 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+ 
+ 	inode_inc_link_count(inode);
+ 
+-	lock_ufs(dir->i_sb);
+-	inode_inc_link_count(dir);
+-
+ 	err = ufs_make_empty(inode, dir);
+ 	if (err)
+ 		goto out_fail;
+@@ -206,6 +212,7 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
+ 		goto out_fail;
+ 	unlock_ufs(dir->i_sb);
+ 
++	unlock_new_inode(inode);
+ 	d_instantiate(dentry, inode);
+ out:
+ 	return err;
+@@ -215,6 +222,7 @@ out_fail:
+ 	inode_dec_link_count(inode);
+ 	unlock_new_inode(inode);
+ 	iput (inode);
++out_dir:
+ 	inode_dec_link_count(dir);
+ 	unlock_ufs(dir->i_sb);
+ 	goto out;
+diff --git a/fs/ufs/super.c b/fs/ufs/super.c
+index b3bc3e7ae79d..dc33f9416340 100644
+--- a/fs/ufs/super.c
++++ b/fs/ufs/super.c
+@@ -694,6 +694,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
+ 	unsigned flags;
+ 
+ 	lock_ufs(sb);
++	mutex_lock(&UFS_SB(sb)->s_lock);
+ 
+ 	UFSD("ENTER\n");
+ 
+@@ -711,6 +712,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait)
+ 	ufs_put_cstotal(sb);
+ 
+ 	UFSD("EXIT\n");
++	mutex_unlock(&UFS_SB(sb)->s_lock);
+ 	unlock_ufs(sb);
+ 
+ 	return 0;
+@@ -799,6 +801,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
+ 	UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
+ 	
+ 	mutex_init(&sbi->mutex);
++	mutex_init(&sbi->s_lock);
+ 	spin_lock_init(&sbi->work_lock);
+ 	INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
+ 	/*
+@@ -1277,6 +1280,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ 
+ 	sync_filesystem(sb);
+ 	lock_ufs(sb);
++	mutex_lock(&UFS_SB(sb)->s_lock);
+ 	uspi = UFS_SB(sb)->s_uspi;
+ 	flags = UFS_SB(sb)->s_flags;
+ 	usb1 = ubh_get_usb_first(uspi);
+@@ -1290,6 +1294,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ 	new_mount_opt = 0;
+ 	ufs_set_opt (new_mount_opt, ONERROR_LOCK);
+ 	if (!ufs_parse_options (data, &new_mount_opt)) {
++		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		unlock_ufs(sb);
+ 		return -EINVAL;
+ 	}
+@@ -1297,12 +1302,14 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ 		new_mount_opt |= ufstype;
+ 	} else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) {
+ 		pr_err("ufstype can't be changed during remount\n");
++		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		unlock_ufs(sb);
+ 		return -EINVAL;
+ 	}
+ 
+ 	if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
+ 		UFS_SB(sb)->s_mount_opt = new_mount_opt;
++		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		unlock_ufs(sb);
+ 		return 0;
+ 	}
+@@ -1326,6 +1333,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ 	 */
+ #ifndef CONFIG_UFS_FS_WRITE
+ 		pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n");
++		mutex_unlock(&UFS_SB(sb)->s_lock);
+ 		unlock_ufs(sb);
+ 		return -EINVAL;
+ #else
+@@ -1335,11 +1343,13 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ 		    ufstype != UFS_MOUNT_UFSTYPE_SUNx86 &&
+ 		    ufstype != UFS_MOUNT_UFSTYPE_UFS2) {
+ 			pr_err("this ufstype is read-only supported\n");
++			mutex_unlock(&UFS_SB(sb)->s_lock);
+ 			unlock_ufs(sb);
+ 			return -EINVAL;
+ 		}
+ 		if (!ufs_read_cylinder_structures(sb)) {
+ 			pr_err("failed during remounting\n");
++			mutex_unlock(&UFS_SB(sb)->s_lock);
+ 			unlock_ufs(sb);
+ 			return -EPERM;
+ 		}
+@@ -1347,6 +1357,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
+ #endif
+ 	}
+ 	UFS_SB(sb)->s_mount_opt = new_mount_opt;
++	mutex_unlock(&UFS_SB(sb)->s_lock);
+ 	unlock_ufs(sb);
+ 	return 0;
+ }
+diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
+index 2a07396d5f9e..cf6368d42d4a 100644
+--- a/fs/ufs/ufs.h
++++ b/fs/ufs/ufs.h
+@@ -30,6 +30,7 @@ struct ufs_sb_info {
+ 	int work_queued; /* non-zero if the delayed work is queued */
+ 	struct delayed_work sync_work; /* FS sync delayed work */
+ 	spinlock_t work_lock; /* protects sync_work and work_queued */
++	struct mutex s_lock;
+ };
+ 
+ struct ufs_inode_info {
+diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
+index 3573a81815ad..8ba379f9e467 100644
+--- a/include/net/netns/sctp.h
++++ b/include/net/netns/sctp.h
+@@ -31,6 +31,7 @@ struct netns_sctp {
+ 	struct list_head addr_waitq;
+ 	struct timer_list addr_wq_timer;
+ 	struct list_head auto_asconf_splist;
++	/* Lock that protects both addr_waitq and auto_asconf_splist */
+ 	spinlock_t addr_wq_lock;
+ 
+ 	/* Lock that protects the local_addr_list writers */
+diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
+index 2bb2fcf5b11f..495c87e367b3 100644
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -223,6 +223,10 @@ struct sctp_sock {
+ 	atomic_t pd_mode;
+ 	/* Receive to here while partial delivery is in effect. */
+ 	struct sk_buff_head pd_lobby;
++
++	/* These must be the last fields, as they will skipped on copies,
++	 * like on accept and peeloff operations
++	 */
+ 	struct list_head auto_asconf_list;
+ 	int do_auto_asconf;
+ };
+diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
+index a9a4a1b7863d..8d423bc649b9 100644
+--- a/net/bridge/br_ioctl.c
++++ b/net/bridge/br_ioctl.c
+@@ -247,9 +247,7 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ 		if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
+ 			return -EPERM;
+ 
+-		spin_lock_bh(&br->lock);
+ 		br_stp_set_bridge_priority(br, args[1]);
+-		spin_unlock_bh(&br->lock);
+ 		return 0;
+ 
+ 	case BRCTL_SET_PORT_PRIORITY:
+diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
+index 41146872c1b4..7832d07f48f6 100644
+--- a/net/bridge/br_stp_if.c
++++ b/net/bridge/br_stp_if.c
+@@ -243,12 +243,13 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
+ 	return true;
+ }
+ 
+-/* called under bridge lock */
++/* Acquires and releases bridge lock */
+ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
+ {
+ 	struct net_bridge_port *p;
+ 	int wasroot;
+ 
++	spin_lock_bh(&br->lock);
+ 	wasroot = br_is_root_bridge(br);
+ 
+ 	list_for_each_entry(p, &br->port_list, list) {
+@@ -266,6 +267,7 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
+ 	br_port_state_selection(br);
+ 	if (br_is_root_bridge(br) && !wasroot)
+ 		br_become_root_bridge(br);
++	spin_unlock_bh(&br->lock);
+ }
+ 
+ /* called under bridge lock */
+diff --git a/net/can/af_can.c b/net/can/af_can.c
+index 32d710eaf1fc..689c818ed007 100644
+--- a/net/can/af_can.c
++++ b/net/can/af_can.c
+@@ -310,8 +310,12 @@ int can_send(struct sk_buff *skb, int loop)
+ 		return err;
+ 	}
+ 
+-	if (newskb)
++	if (newskb) {
++		if (!(newskb->tstamp.tv64))
++			__net_timestamp(newskb);
++
+ 		netif_rx_ni(newskb);
++	}
+ 
+ 	/* update statistics */
+ 	can_stats.tx_frames++;
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index 3de654256028..2237c1b3cdd2 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -957,6 +957,8 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+ 	rc = 0;
+ 	if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
+ 		goto out_unlock_bh;
++	if (neigh->dead)
++		goto out_dead;
+ 
+ 	if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
+ 		if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
+@@ -1013,6 +1015,13 @@ out_unlock_bh:
+ 		write_unlock(&neigh->lock);
+ 	local_bh_enable();
+ 	return rc;
++
++out_dead:
++	if (neigh->nud_state & NUD_STALE)
++		goto out_unlock_bh;
++	write_unlock_bh(&neigh->lock);
++	kfree_skb(skb);
++	return 1;
+ }
+ EXPORT_SYMBOL(__neigh_event_send);
+ 
+@@ -1076,6 +1085,8 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ 	if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+ 	    (old & (NUD_NOARP | NUD_PERMANENT)))
+ 		goto out;
++	if (neigh->dead)
++		goto out;
+ 
+ 	if (!(new & NUD_VALID)) {
+ 		neigh_del_timer(neigh);
+@@ -1225,6 +1236,8 @@ EXPORT_SYMBOL(neigh_update);
+  */
+ void __neigh_set_probe_once(struct neighbour *neigh)
+ {
++	if (neigh->dead)
++		return;
+ 	neigh->updated = jiffies;
+ 	if (!(neigh->nud_state & NUD_FAILED))
+ 		return;
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 8b47a4d79d04..a5aa54ea6533 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -228,6 +228,8 @@ int inet_listen(struct socket *sock, int backlog)
+ 				err = 0;
+ 			if (err)
+ 				goto out;
++
++			tcp_fastopen_init_key_once(true);
+ 		}
+ 		err = inet_csk_listen_start(sk, backlog);
+ 		if (err)
+diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
+index 7cfb0893f263..6ddde89996f4 100644
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -432,6 +432,15 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
+ 		kfree_skb(skb);
+ }
+ 
++/* For some errors we have valid addr_offset even with zero payload and
++ * zero port. Also, addr_offset should be supported if port is set.
++ */
++static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
++{
++	return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
++	       serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
++}
++
+ /* IPv4 supports cmsg on all imcp errors and some timestamps
+  *
+  * Timestamp code paths do not initialize the fields expected by cmsg:
+@@ -498,7 +507,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ 
+ 	serr = SKB_EXT_ERR(skb);
+ 
+-	if (sin && serr->port) {
++	if (sin && ipv4_datagram_support_addr(serr)) {
+ 		sin->sin_family = AF_INET;
+ 		sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
+ 						   serr->addr_offset);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index f1377f2a0472..bb2ce74f6004 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2545,10 +2545,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ 
+ 	case TCP_FASTOPEN:
+ 		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
+-		    TCPF_LISTEN)))
++		    TCPF_LISTEN))) {
++			tcp_fastopen_init_key_once(true);
++
+ 			err = fastopen_init_queue(sk, val);
+-		else
++		} else {
+ 			err = -EINVAL;
++		}
+ 		break;
+ 	case TCP_TIMESTAMP:
+ 		if (!tp->repair)
+diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
+index 46b087a27503..f9c0fb84e435 100644
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -78,8 +78,6 @@ static bool __tcp_fastopen_cookie_gen(const void *path,
+ 	struct tcp_fastopen_context *ctx;
+ 	bool ok = false;
+ 
+-	tcp_fastopen_init_key_once(true);
+-
+ 	rcu_read_lock();
+ 	ctx = rcu_dereference(tcp_fastopen_ctx);
+ 	if (ctx) {
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 762a58c772b8..62d908e64eeb 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -325,6 +325,16 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
+ 	kfree_skb(skb);
+ }
+ 
++/* For some errors we have valid addr_offset even with zero payload and
++ * zero port. Also, addr_offset should be supported if port is set.
++ */
++static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
++{
++	return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
++	       serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
++	       serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
++}
++
+ /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
+  *
+  * At one point, excluding local errors was a quick test to identify icmp/icmp6
+@@ -389,7 +399,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
+ 
+ 	serr = SKB_EXT_ERR(skb);
+ 
+-	if (sin && serr->port) {
++	if (sin && ipv6_datagram_support_addr(serr)) {
+ 		const unsigned char *nh = skb_network_header(skb);
+ 		sin->sin6_family = AF_INET6;
+ 		sin->sin6_flowinfo = 0;
+diff --git a/net/mac80211/key.c b/net/mac80211/key.c
+index a907f2d5c12d..81e9785f38bc 100644
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -66,12 +66,15 @@ update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
+ 	if (sdata->vif.type != NL80211_IFTYPE_AP)
+ 		return;
+ 
+-	mutex_lock(&sdata->local->mtx);
++	/* crypto_tx_tailroom_needed_cnt is protected by this */
++	assert_key_lock(sdata->local);
++
++	rcu_read_lock();
+ 
+-	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
++	list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ 		vlan->crypto_tx_tailroom_needed_cnt += delta;
+ 
+-	mutex_unlock(&sdata->local->mtx);
++	rcu_read_unlock();
+ }
+ 
+ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
+@@ -95,6 +98,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
+ 	 * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
+ 	 */
+ 
++	assert_key_lock(sdata->local);
++
+ 	update_vlan_tailroom_need_count(sdata, 1);
+ 
+ 	if (!sdata->crypto_tx_tailroom_needed_cnt++) {
+@@ -109,6 +114,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
+ static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
+ 					 int delta)
+ {
++	assert_key_lock(sdata->local);
++
+ 	WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
+ 
+ 	update_vlan_tailroom_need_count(sdata, -delta);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index b5989c6ee551..fe1610ddeacf 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1272,16 +1272,6 @@ static void packet_sock_destruct(struct sock *sk)
+ 	sk_refcnt_debug_dec(sk);
+ }
+ 
+-static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
+-{
+-	int x = atomic_read(&f->rr_cur) + 1;
+-
+-	if (x >= num)
+-		x = 0;
+-
+-	return x;
+-}
+-
+ static unsigned int fanout_demux_hash(struct packet_fanout *f,
+ 				      struct sk_buff *skb,
+ 				      unsigned int num)
+@@ -1293,13 +1283,9 @@ static unsigned int fanout_demux_lb(struct packet_fanout *f,
+ 				    struct sk_buff *skb,
+ 				    unsigned int num)
+ {
+-	int cur, old;
++	unsigned int val = atomic_inc_return(&f->rr_cur);
+ 
+-	cur = atomic_read(&f->rr_cur);
+-	while ((old = atomic_cmpxchg(&f->rr_cur, cur,
+-				     fanout_rr_next(f, num))) != cur)
+-		cur = old;
+-	return cur;
++	return val % num;
+ }
+ 
+ static unsigned int fanout_demux_cpu(struct packet_fanout *f,
+@@ -1353,7 +1339,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
+ 			     struct packet_type *pt, struct net_device *orig_dev)
+ {
+ 	struct packet_fanout *f = pt->af_packet_priv;
+-	unsigned int num = f->num_members;
++	unsigned int num = READ_ONCE(f->num_members);
+ 	struct packet_sock *po;
+ 	unsigned int idx;
+ 
+diff --git a/net/sctp/output.c b/net/sctp/output.c
+index fc5e45b8a832..abe7c2db2412 100644
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -599,7 +599,9 @@ out:
+ 	return err;
+ no_route:
+ 	kfree_skb(nskb);
+-	IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
++
++	if (asoc)
++		IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+ 
+ 	/* FIXME: Returning the 'err' will effect all the associations
+ 	 * associated with a socket, although only one of the paths of the
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index f09de7fac2e6..5f6c4e61325b 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1528,8 +1528,10 @@ static void sctp_close(struct sock *sk, long timeout)
+ 
+ 	/* Supposedly, no process has access to the socket, but
+ 	 * the net layers still may.
++	 * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
++	 * held and that should be grabbed before socket lock.
+ 	 */
+-	local_bh_disable();
++	spin_lock_bh(&net->sctp.addr_wq_lock);
+ 	bh_lock_sock(sk);
+ 
+ 	/* Hold the sock, since sk_common_release() will put sock_put()
+@@ -1539,7 +1541,7 @@ static void sctp_close(struct sock *sk, long timeout)
+ 	sk_common_release(sk);
+ 
+ 	bh_unlock_sock(sk);
+-	local_bh_enable();
++	spin_unlock_bh(&net->sctp.addr_wq_lock);
+ 
+ 	sock_put(sk);
+ 
+@@ -3580,6 +3582,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
+ 	if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
+ 		return 0;
+ 
++	spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 	if (val == 0 && sp->do_auto_asconf) {
+ 		list_del(&sp->auto_asconf_list);
+ 		sp->do_auto_asconf = 0;
+@@ -3588,6 +3591,7 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
+ 		    &sock_net(sk)->sctp.auto_asconf_splist);
+ 		sp->do_auto_asconf = 1;
+ 	}
++	spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ 	return 0;
+ }
+ 
+@@ -4121,18 +4125,28 @@ static int sctp_init_sock(struct sock *sk)
+ 	local_bh_disable();
+ 	percpu_counter_inc(&sctp_sockets_allocated);
+ 	sock_prot_inuse_add(net, sk->sk_prot, 1);
++
++	/* Nothing can fail after this block, otherwise
++	 * sctp_destroy_sock() will be called without addr_wq_lock held
++	 */
+ 	if (net->sctp.default_auto_asconf) {
++		spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+ 		list_add_tail(&sp->auto_asconf_list,
+ 		    &net->sctp.auto_asconf_splist);
+ 		sp->do_auto_asconf = 1;
+-	} else
++		spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
++	} else {
+ 		sp->do_auto_asconf = 0;
++	}
++
+ 	local_bh_enable();
+ 
+ 	return 0;
+ }
+ 
+-/* Cleanup any SCTP per socket resources.  */
++/* Cleanup any SCTP per socket resources. Must be called with
++ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
++ */
+ static void sctp_destroy_sock(struct sock *sk)
+ {
+ 	struct sctp_sock *sp;
+@@ -7195,6 +7209,19 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
+ 	newinet->mc_list = NULL;
+ }
+ 
++static inline void sctp_copy_descendant(struct sock *sk_to,
++					const struct sock *sk_from)
++{
++	int ancestor_size = sizeof(struct inet_sock) +
++			    sizeof(struct sctp_sock) -
++			    offsetof(struct sctp_sock, auto_asconf_list);
++
++	if (sk_from->sk_family == PF_INET6)
++		ancestor_size += sizeof(struct ipv6_pinfo);
++
++	__inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
++}
++
+ /* Populate the fields of the newsk from the oldsk and migrate the assoc
+  * and its messages to the newsk.
+  */
+@@ -7209,7 +7236,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ 	struct sk_buff *skb, *tmp;
+ 	struct sctp_ulpevent *event;
+ 	struct sctp_bind_hashbucket *head;
+-	struct list_head tmplist;
+ 
+ 	/* Migrate socket buffer sizes and all the socket level options to the
+ 	 * new socket.
+@@ -7217,12 +7243,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
+ 	newsk->sk_sndbuf = oldsk->sk_sndbuf;
+ 	newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
+ 	/* Brute force copy old sctp opt. */
+-	if (oldsp->do_auto_asconf) {
+-		memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
+-		inet_sk_copy_descendant(newsk, oldsk);
+-		memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
+-	} else
+-		inet_sk_copy_descendant(newsk, oldsk);
++	sctp_copy_descendant(newsk, oldsk);
+ 
+ 	/* Restore the ep value that was overwritten with the above structure
+ 	 * copy.
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 7dade28affba..212070e1de1a 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -403,6 +403,7 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
+ 	return sbsec->behavior == SECURITY_FS_USE_XATTR ||
+ 		sbsec->behavior == SECURITY_FS_USE_TRANS ||
+ 		sbsec->behavior == SECURITY_FS_USE_TASK ||
++		sbsec->behavior == SECURITY_FS_USE_NATIVE ||
+ 		/* Special handling. Genfs but also in-core setxattr handler */
+ 		!strcmp(sb->s_type->name, "sysfs") ||
+ 		!strcmp(sb->s_type->name, "pstore") ||
+diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
+index 78fb8201014f..950064a0942d 100644
+--- a/virt/kvm/arm/vgic.c
++++ b/virt/kvm/arm/vgic.c
+@@ -1561,7 +1561,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
+ 			goto out;
+ 	}
+ 
+-	if (irq_num >= kvm->arch.vgic.nr_irqs)
++	if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
+ 		return -EINVAL;
+ 
+ 	vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
+@@ -2161,10 +2161,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id,
+ 
+ 	BUG_ON(!vgic_initialized(kvm));
+ 
+-	if (spi > kvm->arch.vgic.nr_irqs)
+-		return -EINVAL;
+ 	return kvm_vgic_inject_irq(kvm, 0, spi, level);
+-
+ }
+ 
+ /* MSI not implemented yet */


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-07-01 15:33 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-07-01 15:33 UTC (permalink / raw
  To: gentoo-commits

commit:     b441dda5fa50d4b8c51ccf0641501d3574851296
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Wed Jul  1 15:33:29 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Wed Jul  1 15:33:29 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=b441dda5

Add the 4.1.1 patchset. For real this time.

 0000_README            |   4 +
 1000_linux-4.1.1.patch | 388 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 392 insertions(+)

diff --git a/0000_README b/0000_README
index dd82b40..8bf61f5 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,10 @@ EXPERIMENTAL
 Individual Patch Descriptions:
 --------------------------------------------------------------------------
 
+Patch:  1000_linux-4.1.1.patch
+From:   http://www.kernel.org
+Desc:   Linux 4.1.1
+
 Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.

diff --git a/1000_linux-4.1.1.patch b/1000_linux-4.1.1.patch
new file mode 100644
index 0000000..658aefa
--- /dev/null
+++ b/1000_linux-4.1.1.patch
@@ -0,0 +1,388 @@
+diff --git a/Makefile b/Makefile
+index f5c8983aeeb7..1caf4ad3eb8a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,8 +1,8 @@
+ VERSION = 4
+ PATCHLEVEL = 1
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+-NAME = Hurr durr I'ma sheep
++NAME = Series 4800
+ 
+ # *DOCUMENTATION*
+ # To see a list of typical targets execute "make help"
+diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
+index 4f7001f28936..aa4e3a74e541 100644
+--- a/arch/x86/kernel/cpu/perf_event.c
++++ b/arch/x86/kernel/cpu/perf_event.c
+@@ -270,11 +270,7 @@ msr_fail:
+ 
+ static void hw_perf_event_destroy(struct perf_event *event)
+ {
+-	if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
+-		release_pmc_hardware();
+-		release_ds_buffers();
+-		mutex_unlock(&pmc_reserve_mutex);
+-	}
++	x86_release_hardware();
+ }
+ 
+ void hw_perf_lbr_event_destroy(struct perf_event *event)
+@@ -324,6 +320,35 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
+ 	return x86_pmu_extra_regs(val, event);
+ }
+ 
++int x86_reserve_hardware(void)
++{
++	int err = 0;
++
++	if (!atomic_inc_not_zero(&active_events)) {
++		mutex_lock(&pmc_reserve_mutex);
++		if (atomic_read(&active_events) == 0) {
++			if (!reserve_pmc_hardware())
++				err = -EBUSY;
++			else
++				reserve_ds_buffers();
++		}
++		if (!err)
++			atomic_inc(&active_events);
++		mutex_unlock(&pmc_reserve_mutex);
++	}
++
++	return err;
++}
++
++void x86_release_hardware(void)
++{
++	if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
++		release_pmc_hardware();
++		release_ds_buffers();
++		mutex_unlock(&pmc_reserve_mutex);
++	}
++}
++
+ /*
+  * Check if we can create event of a certain type (that no conflicting events
+  * are present).
+@@ -336,9 +361,10 @@ int x86_add_exclusive(unsigned int what)
+ 		return 0;
+ 
+ 	mutex_lock(&pmc_reserve_mutex);
+-	for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++)
++	for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
+ 		if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
+ 			goto out;
++	}
+ 
+ 	atomic_inc(&x86_pmu.lbr_exclusive[what]);
+ 	ret = 0;
+@@ -527,19 +553,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
+ 	if (!x86_pmu_initialized())
+ 		return -ENODEV;
+ 
+-	err = 0;
+-	if (!atomic_inc_not_zero(&active_events)) {
+-		mutex_lock(&pmc_reserve_mutex);
+-		if (atomic_read(&active_events) == 0) {
+-			if (!reserve_pmc_hardware())
+-				err = -EBUSY;
+-			else
+-				reserve_ds_buffers();
+-		}
+-		if (!err)
+-			atomic_inc(&active_events);
+-		mutex_unlock(&pmc_reserve_mutex);
+-	}
++	err = x86_reserve_hardware();
+ 	if (err)
+ 		return err;
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
+index ef78516850fb..f068695eaca0 100644
+--- a/arch/x86/kernel/cpu/perf_event.h
++++ b/arch/x86/kernel/cpu/perf_event.h
+@@ -703,6 +703,10 @@ int x86_add_exclusive(unsigned int what);
+ 
+ void x86_del_exclusive(unsigned int what);
+ 
++int x86_reserve_hardware(void);
++
++void x86_release_hardware(void);
++
+ void hw_perf_lbr_event_destroy(struct perf_event *event);
+ 
+ int x86_setup_perfctr(struct perf_event *event);
+diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
+index a1e35c9f06b9..2813ea0f142e 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel.c
++++ b/arch/x86/kernel/cpu/perf_event_intel.c
+@@ -3253,6 +3253,8 @@ __init int intel_pmu_init(void)
+ 
+ 	case 61: /* 14nm Broadwell Core-M */
+ 	case 86: /* 14nm Broadwell Xeon D */
++	case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
++	case 79: /* 14nm Broadwell Server */
+ 		x86_pmu.late_ack = true;
+ 		memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ 		memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+@@ -3322,13 +3324,13 @@ __init int intel_pmu_init(void)
+ 		 * counter, so do not extend mask to generic counters
+ 		 */
+ 		for_each_event_constraint(c, x86_pmu.event_constraints) {
+-			if (c->cmask != FIXED_EVENT_FLAGS
+-			    || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
+-				continue;
++			if (c->cmask == FIXED_EVENT_FLAGS
++			    && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
++				c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+ 			}
+-
+-			c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+-			c->weight += x86_pmu.num_counters;
++			c->idxmsk64 &=
++				~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
++			c->weight = hweight64(c->idxmsk64);
+ 		}
+ 	}
+ 
+diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c
+index ac1f0c55f379..7795f3f8b1d5 100644
+--- a/arch/x86/kernel/cpu/perf_event_intel_bts.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c
+@@ -483,17 +483,26 @@ static int bts_event_add(struct perf_event *event, int mode)
+ 
+ static void bts_event_destroy(struct perf_event *event)
+ {
++	x86_release_hardware();
+ 	x86_del_exclusive(x86_lbr_exclusive_bts);
+ }
+ 
+ static int bts_event_init(struct perf_event *event)
+ {
++	int ret;
++
+ 	if (event->attr.type != bts_pmu.type)
+ 		return -ENOENT;
+ 
+ 	if (x86_add_exclusive(x86_lbr_exclusive_bts))
+ 		return -EBUSY;
+ 
++	ret = x86_reserve_hardware();
++	if (ret) {
++		x86_del_exclusive(x86_lbr_exclusive_bts);
++		return ret;
++	}
++
+ 	event->destroy = bts_event_destroy;
+ 
+ 	return 0;
+diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
+index 53eeb226657c..7e429c99c728 100644
+--- a/arch/x86/kernel/head_32.S
++++ b/arch/x86/kernel/head_32.S
+@@ -62,9 +62,16 @@
+ #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
+ #endif
+ 
+-/* Number of possible pages in the lowmem region */
+-LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
+-	
++/*
++ * Number of possible pages in the lowmem region.
++ *
++ * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
++ * gas warning about overflowing shift count when gas has been compiled
++ * with only a host target support using a 32-bit type for internal
++ * representation.
++ */
++LOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT)
++
+ /* Enough space to fit pagetables for the low memory linear map */
+ MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
+ 
+diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
+index 8c81af6dbe06..e527a3e13939 100644
+--- a/drivers/bluetooth/ath3k.c
++++ b/drivers/bluetooth/ath3k.c
+@@ -80,6 +80,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe057) },
+ 	{ USB_DEVICE(0x0489, 0xe056) },
+ 	{ USB_DEVICE(0x0489, 0xe05f) },
++	{ USB_DEVICE(0x0489, 0xe076) },
+ 	{ USB_DEVICE(0x0489, 0xe078) },
+ 	{ USB_DEVICE(0x04c5, 0x1330) },
+ 	{ USB_DEVICE(0x04CA, 0x3004) },
+@@ -88,6 +89,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x04CA, 0x3007) },
+ 	{ USB_DEVICE(0x04CA, 0x3008) },
+ 	{ USB_DEVICE(0x04CA, 0x300b) },
++	{ USB_DEVICE(0x04CA, 0x300d) },
+ 	{ USB_DEVICE(0x04CA, 0x300f) },
+ 	{ USB_DEVICE(0x04CA, 0x3010) },
+ 	{ USB_DEVICE(0x0930, 0x0219) },
+@@ -113,6 +115,7 @@ static const struct usb_device_id ath3k_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408) },
+ 	{ USB_DEVICE(0x13d3, 0x3423) },
+ 	{ USB_DEVICE(0x13d3, 0x3432) },
++	{ USB_DEVICE(0x13d3, 0x3474) },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE02C) },
+@@ -137,6 +140,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+@@ -145,6 +149,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+@@ -170,6 +175,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU22 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 },
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 3c10d4dfe9a7..420cc9f3eb76 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -178,6 +178,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
+@@ -186,6 +187,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+@@ -211,6 +213,7 @@ static const struct usb_device_id blacklist_table[] = {
+ 	{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
+ 	{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },
++	{ USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 },
+ 
+ 	/* Atheros AR5BBU12 with sflash firmware */
+ 	{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index b2f9521fe551..4cdac7801c8b 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -5365,6 +5365,10 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
+ 		*have_5ghz_phy = true;
+ 		return;
+ 	case 0x4321: /* BCM4306 */
++		/* There are 14e4:4321 PCI devs with 2.4 GHz BCM4321 (N-PHY) */
++		if (dev->phy.type != B43_PHYTYPE_G)
++			break;
++		/* fall through */
+ 	case 0x4313: /* BCM4311 */
+ 	case 0x431a: /* BCM4318 */
+ 	case 0x432a: /* BCM4321 */
+diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
+index 5c8f58114677..a086e1d69bc7 100644
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -1477,6 +1477,11 @@ skip_countries:
+ 		goto alloc_fail8;
+ 	}
+ 
++	if (quirks & CLEAR_HALT_CONDITIONS) {
++		usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, epread->bEndpointAddress));
++		usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress));
++	}
++
+ 	return 0;
+ alloc_fail8:
+ 	if (acm->country_codes) {
+@@ -1756,6 +1761,10 @@ static const struct usb_device_id acm_ids[] = {
+ 	.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ 	},
+ 
++	{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
++	.driver_info = CLEAR_HALT_CONDITIONS,
++	},
++
+ 	/* Nokia S60 phones expose two ACM channels. The first is
+ 	 * a modem and is picked up by the standard AT-command
+ 	 * information below. The second is 'vendor-specific' but
+diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
+index ffeb3c83941f..b3b6c9db6fe5 100644
+--- a/drivers/usb/class/cdc-acm.h
++++ b/drivers/usb/class/cdc-acm.h
+@@ -133,3 +133,4 @@ struct acm {
+ #define NO_DATA_INTERFACE		BIT(4)
+ #define IGNORE_DEVICE			BIT(5)
+ #define QUIRK_CONTROL_LINE_STATE	BIT(6)
++#define CLEAR_HALT_CONDITIONS		BIT(7)
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index eddf1ed4155e..0ceb386777ae 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -4331,20 +4331,20 @@ static void ring_buffer_attach(struct perf_event *event,
+ 		WARN_ON_ONCE(event->rcu_pending);
+ 
+ 		old_rb = event->rb;
+-		event->rcu_batches = get_state_synchronize_rcu();
+-		event->rcu_pending = 1;
+-
+ 		spin_lock_irqsave(&old_rb->event_lock, flags);
+ 		list_del_rcu(&event->rb_entry);
+ 		spin_unlock_irqrestore(&old_rb->event_lock, flags);
+-	}
+ 
+-	if (event->rcu_pending && rb) {
+-		cond_synchronize_rcu(event->rcu_batches);
+-		event->rcu_pending = 0;
++		event->rcu_batches = get_state_synchronize_rcu();
++		event->rcu_pending = 1;
+ 	}
+ 
+ 	if (rb) {
++		if (event->rcu_pending) {
++			cond_synchronize_rcu(event->rcu_batches);
++			event->rcu_pending = 0;
++		}
++
+ 		spin_lock_irqsave(&rb->event_lock, flags);
+ 		list_add_rcu(&event->rb_entry, &rb->event_list);
+ 		spin_unlock_irqrestore(&rb->event_lock, flags);
+diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
+index 10df57237a66..98cfc388ea33 100644
+--- a/tools/build/Makefile.build
++++ b/tools/build/Makefile.build
+@@ -94,12 +94,12 @@ obj-y        := $(patsubst %/, %/$(obj)-in.o, $(obj-y))
+ subdir-obj-y := $(filter %/$(obj)-in.o, $(obj-y))
+ 
+ # '$(OUTPUT)/dir' prefix to all objects
+-prefix       := $(subst ./,,$(OUTPUT)$(dir)/)
+-obj-y        := $(addprefix $(prefix),$(obj-y))
+-subdir-obj-y := $(addprefix $(prefix),$(subdir-obj-y))
++objprefix    := $(subst ./,,$(OUTPUT)$(dir)/)
++obj-y        := $(addprefix $(objprefix),$(obj-y))
++subdir-obj-y := $(addprefix $(objprefix),$(subdir-obj-y))
+ 
+ # Final '$(obj)-in.o' object
+-in-target := $(prefix)$(obj)-in.o
++in-target := $(objprefix)$(obj)-in.o
+ 
+ PHONY += $(subdir-y)
+ 


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-06-27 19:50 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-06-27 19:50 UTC (permalink / raw
  To: gentoo-commits

commit:     45ecdfc0d977755e3f15ba10a6b4fdb93581357b
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jun 27 19:50:31 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jun 27 19:50:31 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=45ecdfc0

Update kdbus patch, rename for clarity

 0000_README                                        |    2 +-
 ...kdbus-4.1-rc1.patch => 5015_kdbus-6-27-15.patch | 1765 ++++++--------------
 2 files changed, 504 insertions(+), 1263 deletions(-)

diff --git a/0000_README b/0000_README
index d33ec2f..dd82b40 100644
--- a/0000_README
+++ b/0000_README
@@ -83,6 +83,6 @@ Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.
 
-Patch:  5015_kdbus-4.1-rc1.patch
+Patch:  5015_kdbus-6-27-15.patch
 From:   https://lkml.org
 Desc:   Kernel-level IPC implementation

diff --git a/5015_kdbus-4.1-rc1.patch b/5015_kdbus-6-27-15.patch
similarity index 96%
rename from 5015_kdbus-4.1-rc1.patch
rename to 5015_kdbus-6-27-15.patch
index a5169bd..bc17abe 100644
--- a/5015_kdbus-4.1-rc1.patch
+++ b/5015_kdbus-6-27-15.patch
@@ -1,15 +1,15 @@
 diff --git a/Documentation/Makefile b/Documentation/Makefile
-index 6883a1b..5e3fde6 100644
+index bc05482..e2127a7 100644
 --- a/Documentation/Makefile
 +++ b/Documentation/Makefile
 @@ -1,4 +1,4 @@
- subdir-y := accounting arm auxdisplay blackfin connector \
+ subdir-y := accounting auxdisplay blackfin connector \
 -	filesystems filesystems ia64 laptops mic misc-devices \
 +	filesystems filesystems ia64 kdbus laptops mic misc-devices \
  	networking pcmcia prctl ptp spi timers vDSO video4linux \
  	watchdog
 diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
-index 8136e1f..54e091e 100644
+index 51f4221..ec7c81b 100644
 --- a/Documentation/ioctl/ioctl-number.txt
 +++ b/Documentation/ioctl/ioctl-number.txt
 @@ -292,6 +292,7 @@ Code  Seq#(hex)	Include File		Comments
@@ -2262,7 +2262,7 @@ index 0000000..8c2a90e
 +</refentry>
 diff --git a/Documentation/kdbus/kdbus.item.xml b/Documentation/kdbus/kdbus.item.xml
 new file mode 100644
-index 0000000..09f8b90
+index 0000000..ee09dfa
 --- /dev/null
 +++ b/Documentation/kdbus/kdbus.item.xml
 @@ -0,0 +1,839 @@
@@ -2337,13 +2337,13 @@ index 0000000..09f8b90
 +#define KDBUS_ALIGN8(val) (((val) + 7) & ~7)
 +
 +#define KDBUS_ITEM_NEXT(item) \
-+    (typeof(item))(((uint8_t *)item) + KDBUS_ALIGN8((item)->size))
++    (typeof(item))((uint8_t *)(item) + KDBUS_ALIGN8((item)->size))
 +
 +#define KDBUS_ITEM_FOREACH(item, head, first)                      \
-+    for (item = (head)->first;                                     \
++    for ((item) = (head)->first;                                   \
 +         ((uint8_t *)(item) < (uint8_t *)(head) + (head)->size) && \
 +          ((uint8_t *)(item) >= (uint8_t *)(head));                \
-+         item = KDBUS_ITEM_NEXT(item))
++         (item) = KDBUS_ITEM_NEXT(item))
 +      ]]></programlisting>
 +    </refsect2>
 +  </refsect1>
@@ -7450,10 +7450,10 @@ index 0000000..52565ea
 +	</template>
 +</stylesheet>
 diff --git a/MAINTAINERS b/MAINTAINERS
-index 6239a30..e924246 100644
+index d8afd29..02f7668 100644
 --- a/MAINTAINERS
 +++ b/MAINTAINERS
-@@ -5503,6 +5503,19 @@ S:	Maintained
+@@ -5585,6 +5585,19 @@ S:	Maintained
  F:	Documentation/kbuild/kconfig-language.txt
  F:	scripts/kconfig/
  
@@ -7474,10 +7474,10 @@ index 6239a30..e924246 100644
  M:	Vivek Goyal <vgoyal@redhat.com>
  M:	Haren Myneni <hbabu@us.ibm.com>
 diff --git a/Makefile b/Makefile
-index 1100ff3..08c9818 100644
+index f5c8983..a1c8d57 100644
 --- a/Makefile
 +++ b/Makefile
-@@ -1350,6 +1350,7 @@ $(help-board-dirs): help-%:
+@@ -1343,6 +1343,7 @@ $(help-board-dirs): help-%:
  %docs: scripts_basic FORCE
  	$(Q)$(MAKE) $(build)=scripts build_docproc
  	$(Q)$(MAKE) $(build)=Documentation/DocBook $@
@@ -7486,10 +7486,10 @@ index 1100ff3..08c9818 100644
  else # KBUILD_EXTMOD
  
 diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
-index 68ceb97..ddc413e 100644
+index 1a0006a..4842a98 100644
 --- a/include/uapi/linux/Kbuild
 +++ b/include/uapi/linux/Kbuild
-@@ -214,6 +214,7 @@ header-y += ixjuser.h
+@@ -215,6 +215,7 @@ header-y += ixjuser.h
  header-y += jffs2.h
  header-y += joystick.h
  header-y += kcmp.h
@@ -8483,10 +8483,10 @@ index 0000000..00a6e14
 +
 +#endif /* _UAPI_KDBUS_H_ */
 diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
-index 7d664ea..1cf05c0 100644
+index 7b1425a..ce2ac5a 100644
 --- a/include/uapi/linux/magic.h
 +++ b/include/uapi/linux/magic.h
-@@ -74,4 +74,6 @@
+@@ -76,4 +76,6 @@
  #define BTRFS_TEST_MAGIC	0x73727279
  #define NSFS_MAGIC		0x6e736673
  
@@ -8494,7 +8494,7 @@ index 7d664ea..1cf05c0 100644
 +
  #endif /* __LINUX_MAGIC_H__ */
 diff --git a/init/Kconfig b/init/Kconfig
-index f5dbc6d..6bda631 100644
+index dc24dec..9388071 100644
 --- a/init/Kconfig
 +++ b/init/Kconfig
 @@ -261,6 +261,19 @@ config POSIX_MQUEUE_SYSCTL
@@ -8557,10 +8557,10 @@ index 0000000..7ee9271
 +obj-$(CONFIG_KDBUS) += kdbus.o
 diff --git a/ipc/kdbus/bus.c b/ipc/kdbus/bus.c
 new file mode 100644
-index 0000000..9d0679e
+index 0000000..bbdf0f2
 --- /dev/null
 +++ b/ipc/kdbus/bus.c
-@@ -0,0 +1,560 @@
+@@ -0,0 +1,542 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -8848,8 +8848,6 @@ index 0000000..9d0679e
 +			continue;
 +
 +		if (conn_src) {
-+			u64 attach_flags;
-+
 +			/*
 +			 * Anyone can send broadcasts, as they have no
 +			 * destination. But a receiver needs TALK access to
@@ -8858,19 +8856,12 @@ index 0000000..9d0679e
 +			if (!kdbus_conn_policy_talk(conn_dst, NULL, conn_src))
 +				continue;
 +
-+			attach_flags = kdbus_meta_calc_attach_flags(conn_src,
-+								    conn_dst);
-+
-+			/*
-+			 * Keep sending messages even if we cannot acquire the
-+			 * requested metadata. It's up to the receiver to drop
-+			 * messages that lack expected metadata.
-+			 */
-+			if (!conn_src->faked_meta)
-+				kdbus_meta_proc_collect(kmsg->proc_meta,
-+							attach_flags);
-+			kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, conn_src,
-+						attach_flags);
++			ret = kdbus_kmsg_collect_metadata(kmsg, conn_src,
++							  conn_dst);
++			if (ret < 0) {
++				kdbus_conn_lost_message(conn_dst);
++				continue;
++			}
 +		} else {
 +			/*
 +			 * Check if there is a policy db that prevents the
@@ -8916,22 +8907,13 @@ index 0000000..9d0679e
 +
 +	down_read(&bus->conn_rwlock);
 +	list_for_each_entry(conn_dst, &bus->monitors_list, monitor_entry) {
-+		/*
-+		 * Collect metadata requested by the destination connection.
-+		 * Ignore errors, as receivers need to check metadata
-+		 * availability, anyway. So it's still better to send messages
-+		 * that lack data, than to skip it entirely.
-+		 */
 +		if (conn_src) {
-+			u64 attach_flags;
-+
-+			attach_flags = kdbus_meta_calc_attach_flags(conn_src,
-+								    conn_dst);
-+			if (!conn_src->faked_meta)
-+				kdbus_meta_proc_collect(kmsg->proc_meta,
-+							attach_flags);
-+			kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, conn_src,
-+						attach_flags);
++			ret = kdbus_kmsg_collect_metadata(kmsg, conn_src,
++							  conn_dst);
++			if (ret < 0) {
++				kdbus_conn_lost_message(conn_dst);
++				continue;
++			}
 +		}
 +
 +		ret = kdbus_conn_entry_insert(conn_src, conn_dst, kmsg, NULL);
@@ -8946,7 +8928,7 @@ index 0000000..9d0679e
 + * @domain:		domain to operate on
 + * @argp:		command payload
 + *
-+ * Return: Newly created bus on success, ERR_PTR on failure.
++ * Return: NULL or newly created bus on success, ERR_PTR on failure.
 + */
 +struct kdbus_bus *kdbus_cmd_bus_make(struct kdbus_domain *domain,
 +				     void __user *argp)
@@ -9040,7 +9022,7 @@ index 0000000..9d0679e
 + * @conn:		connection to operate on
 + * @argp:		command payload
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_bus_creator_info(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -9230,10 +9212,10 @@ index 0000000..5bea5ef
 +#endif
 diff --git a/ipc/kdbus/connection.c b/ipc/kdbus/connection.c
 new file mode 100644
-index 0000000..ab476fa
+index 0000000..9993753
 --- /dev/null
 +++ b/ipc/kdbus/connection.c
-@@ -0,0 +1,2214 @@
+@@ -0,0 +1,2178 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -9668,7 +9650,7 @@ index 0000000..ab476fa
 +	 * directly, and won't cause any notifications.
 +	 */
 +	if (!kdbus_conn_is_monitor(conn)) {
-+		ret = kdbus_notify_id_change(conn->ep->bus, KDBUS_ITEM_ID_ADD,
++		ret = kdbus_notify_id_change(bus, KDBUS_ITEM_ID_ADD,
 +					     conn->id, conn->flags);
 +		if (ret < 0)
 +			goto exit_disconnect;
@@ -9795,17 +9777,16 @@ index 0000000..ab476fa
 +	hash_for_each(bus->conn_hash, i, c, hentry) {
 +		mutex_lock(&c->lock);
 +		list_for_each_entry_safe(r, r_tmp, &c->reply_list, entry) {
-+			if (r->reply_src == conn) {
-+				if (r->sync) {
-+					kdbus_sync_reply_wakeup(r, -EPIPE);
-+					kdbus_reply_unlink(r);
-+					continue;
-+				}
++			if (r->reply_src != conn)
++				continue;
 +
++			if (r->sync)
++				kdbus_sync_reply_wakeup(r, -EPIPE);
++			else
 +				/* send a 'connection dead' notification */
 +				kdbus_notify_reply_dead(bus, c->id, r->cookie);
-+				kdbus_reply_unlink(r);
-+			}
++
++			kdbus_reply_unlink(r);
 +		}
 +		mutex_unlock(&c->lock);
 +	}
@@ -9989,7 +9970,7 @@ index 0000000..ab476fa
 + *
 + * kdbus is reliable. That means, we try hard to never lose messages. However,
 + * memory is limited, so we cannot rely on transmissions to never fail.
-+ * Therefore, we use quota-limits to let callers know if there unicast message
++ * Therefore, we use quota-limits to let callers know if their unicast message
 + * cannot be transmitted to a peer. This works fine for unicasts, but for
 + * broadcasts we cannot make the caller handle the transmission failure.
 + * Instead, we must let the destination know that it couldn't receive a
@@ -10011,8 +9992,6 @@ index 0000000..ab476fa
 +		      const struct kdbus_kmsg *kmsg,
 +		      struct kdbus_user *user)
 +{
-+	struct kdbus_queue_entry *entry;
-+
 +	/* The remote connection was disconnected */
 +	if (!kdbus_conn_active(conn_dst))
 +		return ERR_PTR(-ECONNRESET);
@@ -10029,11 +10008,7 @@ index 0000000..ab476fa
 +	    kmsg->res && kmsg->res->fds_count > 0)
 +		return ERR_PTR(-ECOMM);
 +
-+	entry = kdbus_queue_entry_new(conn_dst, kmsg, user);
-+	if (IS_ERR(entry))
-+		return entry;
-+
-+	return entry;
++	return kdbus_queue_entry_new(conn_dst, kmsg, user);
 +}
 +
 +/*
@@ -10334,7 +10309,6 @@ index 0000000..ab476fa
 +	struct kdbus_reply *reply, *wake = NULL;
 +	struct kdbus_conn *dst = NULL;
 +	struct kdbus_bus *bus = src->ep->bus;
-+	u64 attach;
 +	int ret;
 +
 +	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
@@ -10367,15 +10341,7 @@ index 0000000..ab476fa
 +
 +	/* attach metadata */
 +
-+	attach = kdbus_meta_calc_attach_flags(src, dst);
-+
-+	if (!src->faked_meta) {
-+		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
-+		if (ret < 0)
-+			goto exit;
-+	}
-+
-+	ret = kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
++	ret = kdbus_kmsg_collect_metadata(kmsg, src, dst);
 +	if (ret < 0)
 +		goto exit;
 +
@@ -10403,7 +10369,6 @@ index 0000000..ab476fa
 +	struct kdbus_reply *wait = NULL;
 +	struct kdbus_conn *dst = NULL;
 +	struct kdbus_bus *bus = src->ep->bus;
-+	u64 attach;
 +	int ret;
 +
 +	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
@@ -10454,15 +10419,7 @@ index 0000000..ab476fa
 +
 +	/* attach metadata */
 +
-+	attach = kdbus_meta_calc_attach_flags(src, dst);
-+
-+	if (!src->faked_meta) {
-+		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
-+		if (ret < 0)
-+			goto exit;
-+	}
-+
-+	ret = kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
++	ret = kdbus_kmsg_collect_metadata(kmsg, src, dst);
 +	if (ret < 0)
 +		goto exit;
 +
@@ -10493,7 +10450,6 @@ index 0000000..ab476fa
 +	struct kdbus_conn *dst = NULL;
 +	struct kdbus_bus *bus = src->ep->bus;
 +	bool is_signal = (kmsg->msg.flags & KDBUS_MSG_SIGNAL);
-+	u64 attach;
 +	int ret = 0;
 +
 +	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
@@ -10532,16 +10488,8 @@ index 0000000..ab476fa
 +
 +	/* attach metadata */
 +
-+	attach = kdbus_meta_calc_attach_flags(src, dst);
-+
-+	if (!src->faked_meta) {
-+		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
-+		if (ret < 0 && !is_signal)
-+			goto exit;
-+	}
-+
-+	ret = kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
-+	if (ret < 0 && !is_signal)
++	ret = kdbus_kmsg_collect_metadata(kmsg, src, dst);
++	if (ret < 0)
 +		goto exit;
 +
 +	/* send message */
@@ -10824,10 +10772,8 @@ index 0000000..ab476fa
 +	 *     to a peer if, and only if, that peer can see the name this
 +	 *     notification is for.
 +	 *
-+	 * KDBUS_ITEM_ID_{ADD,REMOVE}: As new peers cannot have names, and all
-+	 *     names are dropped before a peer is removed, those notifications
-+	 *     cannot be seen on custom endpoints. Thus, we only pass them
-+	 *     through on default endpoints.
++	 * KDBUS_ITEM_ID_{ADD,REMOVE}: Notifications for ID changes are
++	 *     broadcast to everyone, to allow tracking peers.
 +	 */
 +
 +	switch (kmsg->notify_type) {
@@ -10839,7 +10785,7 @@ index 0000000..ab476fa
 +
 +	case KDBUS_ITEM_ID_ADD:
 +	case KDBUS_ITEM_ID_REMOVE:
-+		return !conn->ep->user;
++		return true;
 +
 +	default:
 +		WARN(1, "Invalid type for notification broadcast: %llu\n",
@@ -10854,7 +10800,7 @@ index 0000000..ab476fa
 + * @privileged:		Whether the caller is privileged
 + * @argp:		Command payload
 + *
-+ * Return: Newly created connection on success, ERR_PTR on failure.
++ * Return: NULL or newly created connection on success, ERR_PTR on failure.
 + */
 +struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, bool privileged,
 +				   void __user *argp)
@@ -10941,7 +10887,7 @@ index 0000000..ab476fa
 + *
 + * The caller must not hold any active reference to @conn or this will deadlock.
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_byebye_unlocked(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -10973,7 +10919,7 @@ index 0000000..ab476fa
 + * @conn:		connection to operate on
 + * @argp:		command payload
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_conn_info(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -11103,7 +11049,7 @@ index 0000000..ab476fa
 + * @conn:		connection to operate on
 + * @argp:		command payload
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_update(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -11200,7 +11146,7 @@ index 0000000..ab476fa
 + * @f:			file this command was called on
 + * @argp:		command payload
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_send(struct kdbus_conn *conn, struct file *f, void __user *argp)
 +{
@@ -11296,7 +11242,7 @@ index 0000000..ab476fa
 + * @conn:		connection to operate on
 + * @argp:		command payload
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_recv(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -11419,7 +11365,7 @@ index 0000000..ab476fa
 + * @conn:		connection to operate on
 + * @argp:		command payload
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_free(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -12098,7 +12044,7 @@ index 0000000..447a2bd
 +#endif
 diff --git a/ipc/kdbus/endpoint.c b/ipc/kdbus/endpoint.c
 new file mode 100644
-index 0000000..174d274
+index 0000000..9a95a5e
 --- /dev/null
 +++ b/ipc/kdbus/endpoint.c
 @@ -0,0 +1,275 @@
@@ -12292,7 +12238,7 @@ index 0000000..174d274
 + * @bus:		bus to operate on
 + * @argp:		command payload
 + *
-+ * Return: Newly created endpoint on success, ERR_PTR on failure.
++ * Return: NULL or newly created endpoint on success, ERR_PTR on failure.
 + */
 +struct kdbus_ep *kdbus_cmd_ep_make(struct kdbus_bus *bus, void __user *argp)
 +{
@@ -12351,7 +12297,7 @@ index 0000000..174d274
 + * @ep:			endpoint to operate on
 + * @argp:		command payload
 + *
-+ * Return: Newly created endpoint on success, ERR_PTR on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_ep_update(struct kdbus_ep *ep, void __user *argp)
 +{
@@ -13002,10 +12948,10 @@ index 0000000..62f7d6a
 +#endif
 diff --git a/ipc/kdbus/handle.c b/ipc/kdbus/handle.c
 new file mode 100644
-index 0000000..f72dbe5
+index 0000000..0752799
 --- /dev/null
 +++ b/ipc/kdbus/handle.c
-@@ -0,0 +1,617 @@
+@@ -0,0 +1,702 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -13026,6 +12972,7 @@ index 0000000..f72dbe5
 +#include <linux/init.h>
 +#include <linux/kdev_t.h>
 +#include <linux/module.h>
++#include <linux/mutex.h>
 +#include <linux/poll.h>
 +#include <linux/rwsem.h>
 +#include <linux/sched.h>
@@ -13079,10 +13026,6 @@ index 0000000..f72dbe5
 +	if (!KDBUS_ITEMS_END(item, args->items, args->items_size))
 +		return -EINVAL;
 +
-+	for (i = 0; i < args->argc; ++i)
-+		if (args->argv[i].mandatory && !args->argv[i].item)
-+			return -EINVAL;
-+
 +	return 0;
 +}
 +
@@ -13157,11 +13100,32 @@ index 0000000..f72dbe5
 +int __kdbus_args_parse(struct kdbus_args *args, void __user *argp,
 +		       size_t type_size, size_t items_offset, void **out)
 +{
-+	int ret;
++	u64 user_size;
++	int ret, i;
++
++	ret = kdbus_copy_from_user(&user_size, argp, sizeof(user_size));
++	if (ret < 0)
++		return ret;
++
++	if (user_size < type_size)
++		return -EINVAL;
++	if (user_size > KDBUS_CMD_MAX_SIZE)
++		return -EMSGSIZE;
++
++	if (user_size <= sizeof(args->cmd_buf)) {
++		if (copy_from_user(args->cmd_buf, argp, user_size))
++			return -EFAULT;
++		args->cmd = (void*)args->cmd_buf;
++	} else {
++		args->cmd = memdup_user(argp, user_size);
++		if (IS_ERR(args->cmd))
++			return PTR_ERR(args->cmd);
++	}
 +
-+	args->cmd = kdbus_memdup_user(argp, type_size, KDBUS_CMD_MAX_SIZE);
-+	if (IS_ERR(args->cmd))
-+		return PTR_ERR(args->cmd);
++	if (args->cmd->size != user_size) {
++		ret = -EINVAL;
++		goto error;
++	}
 +
 +	args->cmd->return_flags = 0;
 +	args->user = argp;
@@ -13181,6 +13145,15 @@ index 0000000..f72dbe5
 +	if (ret < 0)
 +		goto error;
 +
++	/* mandatory items must be given (but not on negotiation) */
++	if (!(args->cmd->flags & KDBUS_FLAG_NEGOTIATE)) {
++		for (i = 0; i < args->argc; ++i)
++			if (args->argv[i].mandatory && !args->argv[i].item) {
++				ret = -EINVAL;
++				goto error;
++			}
++	}
++
 +	*out = args->cmd;
 +	return !!(args->cmd->flags & KDBUS_FLAG_NEGOTIATE);
 +
@@ -13209,7 +13182,8 @@ index 0000000..f72dbe5
 +		if (put_user(args->cmd->return_flags,
 +			     &args->user->return_flags))
 +			ret = -EFAULT;
-+		kfree(args->cmd);
++		if (args->cmd != (void*)args->cmd_buf)
++			kfree(args->cmd);
 +		args->cmd = NULL;
 +	}
 +
@@ -13232,7 +13206,7 @@ index 0000000..f72dbe5
 +
 +/**
 + * struct kdbus_handle - handle to the kdbus system
-+ * @rwlock:		handle lock
++ * @lock:		handle lock
 + * @type:		type of this handle (KDBUS_HANDLE_*)
 + * @bus_owner:		bus this handle owns
 + * @ep_owner:		endpoint this handle owns
@@ -13240,7 +13214,7 @@ index 0000000..f72dbe5
 + * @privileged:		Flag to mark a handle as privileged
 + */
 +struct kdbus_handle {
-+	struct rw_semaphore rwlock;
++	struct mutex lock;
 +
 +	enum kdbus_handle_type type;
 +	union {
@@ -13268,7 +13242,7 @@ index 0000000..f72dbe5
 +		goto exit;
 +	}
 +
-+	init_rwsem(&handle->rwlock);
++	mutex_init(&handle->lock);
 +	handle->type = KDBUS_HANDLE_NONE;
 +
 +	if (node->type == KDBUS_NODE_ENDPOINT) {
@@ -13358,8 +13332,8 @@ index 0000000..f72dbe5
 +			break;
 +		}
 +
-+		handle->type = KDBUS_HANDLE_BUS_OWNER;
 +		handle->bus_owner = bus;
++		ret = KDBUS_HANDLE_BUS_OWNER;
 +		break;
 +	}
 +
@@ -13399,8 +13373,8 @@ index 0000000..f72dbe5
 +			break;
 +		}
 +
-+		handle->type = KDBUS_HANDLE_EP_OWNER;
 +		handle->ep_owner = ep;
++		ret = KDBUS_HANDLE_EP_OWNER;
 +		break;
 +
 +	case KDBUS_CMD_HELLO:
@@ -13410,8 +13384,8 @@ index 0000000..f72dbe5
 +			break;
 +		}
 +
-+		handle->type = KDBUS_HANDLE_CONNECTED;
 +		handle->conn = conn;
++		ret = KDBUS_HANDLE_CONNECTED;
 +		break;
 +
 +	default:
@@ -13525,19 +13499,41 @@ index 0000000..f72dbe5
 +	case KDBUS_CMD_BUS_MAKE:
 +	case KDBUS_CMD_ENDPOINT_MAKE:
 +	case KDBUS_CMD_HELLO:
-+		/* bail out early if already typed */
-+		if (handle->type != KDBUS_HANDLE_NONE)
-+			break;
-+
-+		down_write(&handle->rwlock);
++		mutex_lock(&handle->lock);
 +		if (handle->type == KDBUS_HANDLE_NONE) {
 +			if (node->type == KDBUS_NODE_CONTROL)
 +				ret = kdbus_handle_ioctl_control(file, cmd,
 +								 argp);
 +			else if (node->type == KDBUS_NODE_ENDPOINT)
 +				ret = kdbus_handle_ioctl_ep(file, cmd, argp);
++
++			if (ret > 0) {
++				/*
++				 * The data given via open() is not sufficient
++				 * to setup a kdbus handle. Hence, we require
++				 * the user to perform a setup ioctl. This setup
++				 * can only be performed once and defines the
++				 * type of the handle. The different setup
++				 * ioctls are locked against each other so they
++				 * cannot race. Once the handle type is set,
++				 * the type-dependent ioctls are enabled. To
++				 * improve performance, we don't lock those via
++				 * handle->lock. Instead, we issue a
++				 * write-barrier before performing the
++				 * type-change, which pairs with smp_rmb() in
++				 * all handlers that access the type field. This
++				 * guarantees the handle is fully setup, if
++				 * handle->type is set. If handle->type is
++				 * unset, you must not make any assumptions
++				 * without taking handle->lock.
++				 * Note that handle->type is only set once. It
++				 * will never change afterwards.
++				 */
++				smp_wmb();
++				handle->type = ret;
++			}
 +		}
-+		up_write(&handle->rwlock);
++		mutex_unlock(&handle->lock);
 +		break;
 +
 +	case KDBUS_CMD_ENDPOINT_UPDATE:
@@ -13552,14 +13548,30 @@ index 0000000..f72dbe5
 +	case KDBUS_CMD_MATCH_REMOVE:
 +	case KDBUS_CMD_SEND:
 +	case KDBUS_CMD_RECV:
-+	case KDBUS_CMD_FREE:
-+		down_read(&handle->rwlock);
-+		if (handle->type == KDBUS_HANDLE_EP_OWNER)
++	case KDBUS_CMD_FREE: {
++		enum kdbus_handle_type type;
++
++		/*
++		 * This read-barrier pairs with smp_wmb() of the handle setup.
++		 * it guarantees the handle is fully written, in case the
++		 * type has been set. It allows us to access the handle without
++		 * taking handle->lock, given the guarantee that the type is
++		 * only ever set once, and stays constant afterwards.
++		 * Furthermore, the handle object itself is not modified in any
++		 * way after the type is set. That is, the type-field is the
++		 * last field that is written on any handle. If it has not been
++		 * set, we must not access the handle here.
++		 */
++		type = handle->type;
++		smp_rmb();
++
++		if (type == KDBUS_HANDLE_EP_OWNER)
 +			ret = kdbus_handle_ioctl_ep_owner(file, cmd, argp);
-+		else if (handle->type == KDBUS_HANDLE_CONNECTED)
++		else if (type == KDBUS_HANDLE_CONNECTED)
 +			ret = kdbus_handle_ioctl_connected(file, cmd, argp);
-+		up_read(&handle->rwlock);
++
 +		break;
++	}
 +	default:
 +		ret = -ENOTTY;
 +		break;
@@ -13572,42 +13584,61 @@ index 0000000..f72dbe5
 +				      struct poll_table_struct *wait)
 +{
 +	struct kdbus_handle *handle = file->private_data;
++	enum kdbus_handle_type type;
 +	unsigned int mask = POLLOUT | POLLWRNORM;
-+	int ret;
 +
-+	/* Only a connected endpoint can read/write data */
-+	down_read(&handle->rwlock);
-+	if (handle->type != KDBUS_HANDLE_CONNECTED) {
-+		up_read(&handle->rwlock);
-+		return POLLERR | POLLHUP;
-+	}
-+	up_read(&handle->rwlock);
++	/*
++	 * This pairs with smp_wmb() during handle setup. It guarantees that
++	 * _iff_ the handle type is set, handle->conn is valid. Furthermore,
++	 * _iff_ the type is set, the handle object is constant and never
++	 * changed again. If it's not set, we must not access the handle but
++	 * bail out. We also must assume no setup has taken place, yet.
++	 */
++	type = handle->type;
++	smp_rmb();
 +
-+	ret = kdbus_conn_acquire(handle->conn);
-+	if (ret < 0)
++	/* Only a connected endpoint can read/write data */
++	if (type != KDBUS_HANDLE_CONNECTED)
 +		return POLLERR | POLLHUP;
 +
 +	poll_wait(file, &handle->conn->wait, wait);
 +
++	/*
++	 * Verify the connection hasn't been deactivated _after_ adding the
++	 * wait-queue. This guarantees, that if the connection is deactivated
++	 * after we checked it, the waitqueue is signaled and we're called
++	 * again.
++	 */
++	if (!kdbus_conn_active(handle->conn))
++		return POLLERR | POLLHUP;
++
 +	if (!list_empty(&handle->conn->queue.msg_list) ||
 +	    atomic_read(&handle->conn->lost_count) > 0)
 +		mask |= POLLIN | POLLRDNORM;
 +
-+	kdbus_conn_release(handle->conn);
-+
 +	return mask;
 +}
 +
 +static int kdbus_handle_mmap(struct file *file, struct vm_area_struct *vma)
 +{
 +	struct kdbus_handle *handle = file->private_data;
++	enum kdbus_handle_type type;
 +	int ret = -EBADFD;
 +
-+	if (down_read_trylock(&handle->rwlock)) {
-+		if (handle->type == KDBUS_HANDLE_CONNECTED)
-+			ret = kdbus_pool_mmap(handle->conn->pool, vma);
-+		up_read(&handle->rwlock);
-+	}
++	/*
++	 * This pairs with smp_wmb() during handle setup. It guarantees that
++	 * _iff_ the handle type is set, handle->conn is valid. Furthermore,
++	 * _iff_ the type is set, the handle object is constant and never
++	 * changed again. If it's not set, we must not access the handle but
++	 * bail out. We also must assume no setup has taken place, yet.
++	 */
++	type = handle->type;
++	smp_rmb();
++
++	/* Only connected handles have a pool we can map */
++	if (type == KDBUS_HANDLE_CONNECTED)
++		ret = kdbus_pool_mmap(handle->conn->pool, vma);
++
 +	return ret;
 +}
 +
@@ -13625,10 +13656,10 @@ index 0000000..f72dbe5
 +};
 diff --git a/ipc/kdbus/handle.h b/ipc/kdbus/handle.h
 new file mode 100644
-index 0000000..93a372d
+index 0000000..13c59d9
 --- /dev/null
 +++ b/ipc/kdbus/handle.h
-@@ -0,0 +1,85 @@
+@@ -0,0 +1,90 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -13676,6 +13707,7 @@ index 0000000..93a372d
 + * @argv:		array of items this command supports
 + * @user:		set by parser to user-space location of current command
 + * @cmd:		set by parser to kernel copy of command payload
++ * @cmd_buf:		512 bytes inline buf to avoid kmalloc() on small cmds
 + * @items:		points to item array in @cmd
 + * @items_size:		size of @items in bytes
 + *
@@ -13683,6 +13715,9 @@ index 0000000..93a372d
 + * The ioctl handler has to pre-fill the flags and allowed items before passing
 + * the object to kdbus_args_parse(). The parser will copy the command payload
 + * into kernel-space and verify the correctness of the data.
++ *
++ * We use a 512 bytes buffer for small command payloads, to be allocated on
++ * stack on syscall entrance.
 + */
 +struct kdbus_args {
 +	u64 allowed_flags;
@@ -13691,6 +13726,7 @@ index 0000000..93a372d
 +
 +	struct kdbus_cmd __user *user;
 +	struct kdbus_cmd *cmd;
++	u8 cmd_buf[512];
 +
 +	struct kdbus_item *items;
 +	size_t items_size;
@@ -13716,10 +13752,10 @@ index 0000000..93a372d
 +#endif
 diff --git a/ipc/kdbus/item.c b/ipc/kdbus/item.c
 new file mode 100644
-index 0000000..745ad54
+index 0000000..1ee72c2
 --- /dev/null
 +++ b/ipc/kdbus/item.c
-@@ -0,0 +1,339 @@
+@@ -0,0 +1,333 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -13818,12 +13854,6 @@ index 0000000..745ad54
 +		break;
 +
 +	case KDBUS_ITEM_PAYLOAD_VEC:
-+		if (payload_size != sizeof(struct kdbus_vec))
-+			return -EINVAL;
-+		if (item->vec.size == 0 || item->vec.size > SIZE_MAX)
-+			return -EINVAL;
-+		break;
-+
 +	case KDBUS_ITEM_PAYLOAD_OFF:
 +		if (payload_size != sizeof(struct kdbus_vec))
 +			return -EINVAL;
@@ -14061,7 +14091,7 @@ index 0000000..745ad54
 +}
 diff --git a/ipc/kdbus/item.h b/ipc/kdbus/item.h
 new file mode 100644
-index 0000000..eeefd8b
+index 0000000..bca63b4
 --- /dev/null
 +++ b/ipc/kdbus/item.h
 @@ -0,0 +1,64 @@
@@ -14088,17 +14118,17 @@ index 0000000..eeefd8b
 +#include "util.h"
 +
 +/* generic access and iterators over a stream of items */
-+#define KDBUS_ITEM_NEXT(_i) (typeof(_i))(((u8 *)_i) + KDBUS_ALIGN8((_i)->size))
-+#define KDBUS_ITEMS_SIZE(_h, _is) ((_h)->size - offsetof(typeof(*_h), _is))
++#define KDBUS_ITEM_NEXT(_i) (typeof(_i))((u8 *)(_i) + KDBUS_ALIGN8((_i)->size))
++#define KDBUS_ITEMS_SIZE(_h, _is) ((_h)->size - offsetof(typeof(*(_h)), _is))
 +#define KDBUS_ITEM_HEADER_SIZE offsetof(struct kdbus_item, data)
 +#define KDBUS_ITEM_SIZE(_s) KDBUS_ALIGN8(KDBUS_ITEM_HEADER_SIZE + (_s))
 +#define KDBUS_ITEM_PAYLOAD_SIZE(_i) ((_i)->size - KDBUS_ITEM_HEADER_SIZE)
 +
 +#define KDBUS_ITEMS_FOREACH(_i, _is, _s)				\
-+	for (_i = _is;							\
++	for ((_i) = (_is);						\
 +	     ((u8 *)(_i) < (u8 *)(_is) + (_s)) &&			\
 +	       ((u8 *)(_i) >= (u8 *)(_is));				\
-+	     _i = KDBUS_ITEM_NEXT(_i))
++	     (_i) = KDBUS_ITEM_NEXT(_i))
 +
 +#define KDBUS_ITEM_VALID(_i, _is, _s)					\
 +	((_i)->size >= KDBUS_ITEM_HEADER_SIZE &&			\
@@ -14107,7 +14137,7 @@ index 0000000..eeefd8b
 +	 (u8 *)(_i) >= (u8 *)(_is))
 +
 +#define KDBUS_ITEMS_END(_i, _is, _s)					\
-+	((u8 *)_i == ((u8 *)(_is) + KDBUS_ALIGN8(_s)))
++	((u8 *)(_i) == ((u8 *)(_is) + KDBUS_ALIGN8(_s)))
 +
 +/**
 + * struct kdbus_item_header - Describes the fix part of an item
@@ -14201,10 +14231,10 @@ index 0000000..6450f58
 +#endif
 diff --git a/ipc/kdbus/main.c b/ipc/kdbus/main.c
 new file mode 100644
-index 0000000..785f529
+index 0000000..1ad4dc8
 --- /dev/null
 +++ b/ipc/kdbus/main.c
-@@ -0,0 +1,125 @@
+@@ -0,0 +1,114 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -14222,7 +14252,6 @@ index 0000000..785f529
 +#include <linux/fs.h>
 +#include <linux/init.h>
 +#include <linux/module.h>
-+#include <linux/moduleparam.h>
 +
 +#include "util.h"
 +#include "fs.h"
@@ -14286,17 +14315,6 @@ index 0000000..785f529
 +/* kdbus mount-point /sys/fs/kdbus */
 +static struct kobject *kdbus_dir;
 +
-+/* global module option to apply a mask to exported metadata */
-+unsigned long long kdbus_meta_attach_mask = KDBUS_ATTACH_TIMESTAMP |
-+					    KDBUS_ATTACH_CREDS |
-+					    KDBUS_ATTACH_PIDS |
-+					    KDBUS_ATTACH_AUXGROUPS |
-+					    KDBUS_ATTACH_NAMES |
-+					    KDBUS_ATTACH_SECLABEL |
-+					    KDBUS_ATTACH_CONN_DESCRIPTION;
-+MODULE_PARM_DESC(attach_flags_mask, "Attach-flags mask for exported metadata");
-+module_param_named(attach_flags_mask, kdbus_meta_attach_mask, ullong, 0644);
-+
 +static int __init kdbus_init(void)
 +{
 +	int ret;
@@ -14323,6 +14341,7 @@ index 0000000..785f529
 +{
 +	kdbus_fs_exit();
 +	kobject_put(kdbus_dir);
++	ida_destroy(&kdbus_node_ida);
 +}
 +
 +module_init(kdbus_init);
@@ -14332,7 +14351,7 @@ index 0000000..785f529
 +MODULE_ALIAS_FS(KBUILD_MODNAME "fs");
 diff --git a/ipc/kdbus/match.c b/ipc/kdbus/match.c
 new file mode 100644
-index 0000000..30cec1c
+index 0000000..cc083b4
 --- /dev/null
 +++ b/ipc/kdbus/match.c
 @@ -0,0 +1,559 @@
@@ -14706,7 +14725,7 @@ index 0000000..30cec1c
 + * are used to match messages from userspace, while the others apply to
 + * kernel-generated notifications.
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_match_add(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -14866,7 +14885,7 @@ index 0000000..30cec1c
 + * @conn:		connection to operate on
 + * @argp:		command payload
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_match_remove(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -14938,10 +14957,10 @@ index 0000000..ea42929
 +#endif
 diff --git a/ipc/kdbus/message.c b/ipc/kdbus/message.c
 new file mode 100644
-index 0000000..8096075
+index 0000000..066e816
 --- /dev/null
 +++ b/ipc/kdbus/message.c
-@@ -0,0 +1,616 @@
+@@ -0,0 +1,640 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -15558,12 +15577,36 @@ index 0000000..8096075
 +	kdbus_kmsg_free(m);
 +	return ERR_PTR(ret);
 +}
++
++/**
++ * kdbus_kmsg_collect_metadata() - collect metadata
++ * @kmsg:	message to collect metadata on
++ * @src:	source connection of message
++ * @dst:	destination connection of message
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_kmsg_collect_metadata(struct kdbus_kmsg *kmsg, struct kdbus_conn *src,
++				struct kdbus_conn *dst)
++{
++	u64 attach;
++	int ret;
++
++	attach = kdbus_meta_calc_attach_flags(src, dst);
++	if (!src->faked_meta) {
++		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
++		if (ret < 0)
++			return ret;
++	}
++
++	return kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
++}
 diff --git a/ipc/kdbus/message.h b/ipc/kdbus/message.h
 new file mode 100644
-index 0000000..af47758
+index 0000000..cdaa65c
 --- /dev/null
 +++ b/ipc/kdbus/message.h
-@@ -0,0 +1,133 @@
+@@ -0,0 +1,135 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -15695,14 +15738,16 @@ index 0000000..af47758
 +struct kdbus_kmsg *kdbus_kmsg_new_from_cmd(struct kdbus_conn *conn,
 +					   struct kdbus_cmd_send *cmd_send);
 +void kdbus_kmsg_free(struct kdbus_kmsg *kmsg);
++int kdbus_kmsg_collect_metadata(struct kdbus_kmsg *kmsg, struct kdbus_conn *src,
++				struct kdbus_conn *dst);
 +
 +#endif
 diff --git a/ipc/kdbus/metadata.c b/ipc/kdbus/metadata.c
 new file mode 100644
-index 0000000..3adc6c2
+index 0000000..c36b9cc
 --- /dev/null
 +++ b/ipc/kdbus/metadata.c
-@@ -0,0 +1,1159 @@
+@@ -0,0 +1,1184 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -15734,7 +15779,6 @@ index 0000000..3adc6c2
 +#include <linux/uidgid.h>
 +#include <linux/uio.h>
 +#include <linux/user_namespace.h>
-+#include <linux/version.h>
 +
 +#include "bus.h"
 +#include "connection.h"
@@ -15769,8 +15813,7 @@ index 0000000..3adc6c2
 + * @root_path:		Root-FS path
 + * @cmdline:		Command-line
 + * @cgroup:		Full cgroup path
-+ * @caps:		Capabilities
-+ * @caps_namespace:	User-namespace of @caps
++ * @cred:		Credentials
 + * @seclabel:		Seclabel
 + * @audit_loginuid:	Audit login-UID
 + * @audit_sessionid:	Audit session-ID
@@ -15810,14 +15853,7 @@ index 0000000..3adc6c2
 +	char *cgroup;
 +
 +	/* KDBUS_ITEM_CAPS */
-+	struct caps {
-+		/* binary compatible to kdbus_caps */
-+		u32 last_cap;
-+		struct {
-+			u32 caps[_KERNEL_CAPABILITY_U32S];
-+		} set[4];
-+	} caps;
-+	struct user_namespace *caps_namespace;
++	const struct cred *cred;
 +
 +	/* KDBUS_ITEM_SECLABEL */
 +	char *seclabel;
@@ -15855,6 +15891,14 @@ index 0000000..3adc6c2
 +	char *conn_description;
 +};
 +
++/* fixed size equivalent of "kdbus_caps" */
++struct kdbus_meta_caps {
++	u32 last_cap;
++	struct {
++		u32 caps[_KERNEL_CAPABILITY_U32S];
++	} set[4];
++};
++
 +/**
 + * kdbus_meta_proc_new() - Create process metadata object
 + *
@@ -15881,7 +15925,8 @@ index 0000000..3adc6c2
 +
 +	path_put(&mp->exe_path);
 +	path_put(&mp->root_path);
-+	put_user_ns(mp->caps_namespace);
++	if (mp->cred)
++		put_cred(mp->cred);
 +	put_pid(mp->ppid);
 +	put_pid(mp->tgid);
 +	put_pid(mp->pid);
@@ -15951,25 +15996,23 @@ index 0000000..3adc6c2
 +
 +static int kdbus_meta_proc_collect_auxgroups(struct kdbus_meta_proc *mp)
 +{
-+	struct group_info *info;
++	const struct group_info *info;
 +	size_t i;
 +
-+	info = get_current_groups();
++	/* no need to lock/ref, current creds cannot change */
++	info = current_cred()->group_info;
 +
 +	if (info->ngroups > 0) {
 +		mp->auxgrps = kmalloc_array(info->ngroups, sizeof(kgid_t),
 +					    GFP_KERNEL);
-+		if (!mp->auxgrps) {
-+			put_group_info(info);
++		if (!mp->auxgrps)
 +			return -ENOMEM;
-+		}
 +
 +		for (i = 0; i < info->ngroups; i++)
 +			mp->auxgrps[i] = GROUP_AT(info, i);
 +	}
 +
 +	mp->n_auxgrps = info->ngroups;
-+	put_group_info(info);
 +	mp->valid |= KDBUS_ATTACH_AUXGROUPS;
 +
 +	return 0;
@@ -15989,42 +16032,29 @@ index 0000000..3adc6c2
 +
 +static void kdbus_meta_proc_collect_exe(struct kdbus_meta_proc *mp)
 +{
-+	struct mm_struct *mm;
++	struct file *exe_file;
 +
-+	mm = get_task_mm(current);
-+	if (!mm)
-+		return;
-+
-+	down_read(&mm->mmap_sem);
-+	if (mm->exe_file) {
-+		mp->exe_path = mm->exe_file->f_path;
++	rcu_read_lock();
++	exe_file = rcu_dereference(current->mm->exe_file);
++	if (exe_file) {
++		mp->exe_path = exe_file->f_path;
 +		path_get(&mp->exe_path);
 +		get_fs_root(current->fs, &mp->root_path);
 +		mp->valid |= KDBUS_ATTACH_EXE;
 +	}
-+	up_read(&mm->mmap_sem);
-+
-+	mmput(mm);
++	rcu_read_unlock();
 +}
 +
 +static int kdbus_meta_proc_collect_cmdline(struct kdbus_meta_proc *mp)
 +{
-+	struct mm_struct *mm;
++	struct mm_struct *mm = current->mm;
 +	char *cmdline;
 +
-+	mm = get_task_mm(current);
-+	if (!mm)
-+		return 0;
-+
-+	if (!mm->arg_end) {
-+		mmput(mm);
++	if (!mm->arg_end)
 +		return 0;
-+	}
 +
 +	cmdline = strndup_user((const char __user *)mm->arg_start,
 +			       mm->arg_end - mm->arg_start);
-+	mmput(mm);
-+
 +	if (IS_ERR(cmdline))
 +		return PTR_ERR(cmdline);
 +
@@ -16062,25 +16092,7 @@ index 0000000..3adc6c2
 +
 +static void kdbus_meta_proc_collect_caps(struct kdbus_meta_proc *mp)
 +{
-+	const struct cred *c = current_cred();
-+	int i;
-+
-+	/* ABI: "last_cap" equals /proc/sys/kernel/cap_last_cap */
-+	mp->caps.last_cap = CAP_LAST_CAP;
-+	mp->caps_namespace = get_user_ns(current_user_ns());
-+
-+	CAP_FOR_EACH_U32(i) {
-+		mp->caps.set[0].caps[i] = c->cap_inheritable.cap[i];
-+		mp->caps.set[1].caps[i] = c->cap_permitted.cap[i];
-+		mp->caps.set[2].caps[i] = c->cap_effective.cap[i];
-+		mp->caps.set[3].caps[i] = c->cap_bset.cap[i];
-+	}
-+
-+	/* clear unused bits */
-+	for (i = 0; i < 4; i++)
-+		mp->caps.set[i].caps[CAP_TO_INDEX(CAP_LAST_CAP)] &=
-+						CAP_LAST_U32_VALID_MASK;
-+
++	mp->cred = get_current_cred();
 +	mp->valid |= KDBUS_ATTACH_CAPS;
 +}
 +
@@ -16543,7 +16555,6 @@ index 0000000..3adc6c2
 +	}
 +
 +	*mask &= valid;
-+	*mask &= kdbus_meta_attach_mask;
 +
 +	if (!*mask)
 +		goto exit;
@@ -16589,7 +16600,7 @@ index 0000000..3adc6c2
 +		size += KDBUS_ITEM_SIZE(strlen(mp->cgroup) + 1);
 +
 +	if (mp && (*mask & KDBUS_ATTACH_CAPS))
-+		size += KDBUS_ITEM_SIZE(sizeof(mp->caps));
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_meta_caps));
 +
 +	if (mp && (*mask & KDBUS_ATTACH_SECLABEL))
 +		size += KDBUS_ITEM_SIZE(strlen(mp->seclabel) + 1);
@@ -16626,6 +16637,69 @@ index 0000000..3adc6c2
 +	return 2 + !!kdbus_kvec_pad(kvec++, size);
 +}
 +
++static void kdbus_meta_export_caps(struct kdbus_meta_caps *out,
++				   struct kdbus_meta_proc *mp)
++{
++	struct user_namespace *iter;
++	const struct cred *cred = mp->cred;
++	bool parent = false, owner = false;
++	int i;
++
++	/*
++	 * This translates the effective capabilities of 'cred' into the current
++	 * user-namespace. If the current user-namespace is a child-namespace of
++	 * the user-namespace of 'cred', the mask can be copied verbatim. If
++	 * not, the mask is cleared.
++	 * There's one exception: If 'cred' is the owner of any user-namespace
++	 * in the path between the current user-namespace and the user-namespace
++	 * of 'cred', then it has all effective capabilities set. This means,
++	 * the user who created a user-namespace always has all effective
++	 * capabilities in any child namespaces. Note that this is based on the
++	 * uid of the namespace creator, not the task hierarchy.
++	 */
++	for (iter = current_user_ns(); iter; iter = iter->parent) {
++		if (iter == cred->user_ns) {
++			parent = true;
++			break;
++		}
++
++		if (iter == &init_user_ns)
++			break;
++
++		if ((iter->parent == cred->user_ns) &&
++		    uid_eq(iter->owner, cred->euid)) {
++			owner = true;
++			break;
++		}
++	}
++
++	out->last_cap = CAP_LAST_CAP;
++
++	CAP_FOR_EACH_U32(i) {
++		if (parent) {
++			out->set[0].caps[i] = cred->cap_inheritable.cap[i];
++			out->set[1].caps[i] = cred->cap_permitted.cap[i];
++			out->set[2].caps[i] = cred->cap_effective.cap[i];
++			out->set[3].caps[i] = cred->cap_bset.cap[i];
++		} else if (owner) {
++			out->set[0].caps[i] = 0U;
++			out->set[1].caps[i] = ~0U;
++			out->set[2].caps[i] = ~0U;
++			out->set[3].caps[i] = ~0U;
++		} else {
++			out->set[0].caps[i] = 0U;
++			out->set[1].caps[i] = 0U;
++			out->set[2].caps[i] = 0U;
++			out->set[3].caps[i] = 0U;
++		}
++	}
++
++	/* clear unused bits */
++	for (i = 0; i < 4; i++)
++		out->set[i].caps[CAP_TO_INDEX(CAP_LAST_CAP)] &=
++					CAP_LAST_U32_VALID_MASK;
++}
++
 +/* This is equivalent to from_kuid_munged(), but maps INVALID_UID to itself */
 +static uid_t kdbus_from_kuid_keep(kuid_t uid)
 +{
@@ -16684,14 +16758,6 @@ index 0000000..3adc6c2
 +
 +	hdr = &item_hdr[0];
 +
-+	/*
-+	 * TODO: We currently have no sane way of translating a set of caps
-+	 * between different user namespaces. Until that changes, we have
-+	 * to drop such items.
-+	 */
-+	if (mp && mp->caps_namespace != user_ns)
-+		mask &= ~KDBUS_ATTACH_CAPS;
-+
 +	if (mask == 0) {
 +		*real_size = 0;
 +		return 0;
@@ -16797,10 +16863,14 @@ index 0000000..3adc6c2
 +					    KDBUS_ITEM_CGROUP, mp->cgroup,
 +					    strlen(mp->cgroup) + 1, &size);
 +
-+	if (mp && (mask & KDBUS_ATTACH_CAPS))
++	if (mp && (mask & KDBUS_ATTACH_CAPS)) {
++		struct kdbus_meta_caps caps = {};
++
++		kdbus_meta_export_caps(&caps, mp);
 +		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
-+					    KDBUS_ITEM_CAPS, &mp->caps,
-+					    sizeof(mp->caps), &size);
++					    KDBUS_ITEM_CAPS, &caps,
++					    sizeof(caps), &size);
++	}
 +
 +	if (mp && (mask & KDBUS_ATTACH_SECLABEL))
 +		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
@@ -16864,10 +16934,10 @@ index 0000000..3adc6c2
 +}
 diff --git a/ipc/kdbus/metadata.h b/ipc/kdbus/metadata.h
 new file mode 100644
-index 0000000..42c942b
+index 0000000..79b6ac3
 --- /dev/null
 +++ b/ipc/kdbus/metadata.h
-@@ -0,0 +1,57 @@
+@@ -0,0 +1,55 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -16894,8 +16964,6 @@ index 0000000..42c942b
 +struct kdbus_meta_proc;
 +struct kdbus_meta_conn;
 +
-+extern unsigned long long kdbus_meta_attach_mask;
-+
 +struct kdbus_meta_proc *kdbus_meta_proc_new(void);
 +struct kdbus_meta_proc *kdbus_meta_proc_ref(struct kdbus_meta_proc *mp);
 +struct kdbus_meta_proc *kdbus_meta_proc_unref(struct kdbus_meta_proc *mp);
@@ -16927,10 +16995,10 @@ index 0000000..42c942b
 +#endif
 diff --git a/ipc/kdbus/names.c b/ipc/kdbus/names.c
 new file mode 100644
-index 0000000..657008e
+index 0000000..d77ee08
 --- /dev/null
 +++ b/ipc/kdbus/names.c
-@@ -0,0 +1,772 @@
+@@ -0,0 +1,770 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -17402,7 +17470,7 @@ index 0000000..657008e
 + * @conn:		connection to operate on
 + * @argp:		command payload
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_name_acquire(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -17447,8 +17515,6 @@ index 0000000..657008e
 +
 +	ret = kdbus_name_acquire(conn->ep->bus->name_registry, conn, item_name,
 +				 cmd->flags, &cmd->return_flags);
-+	if (ret < 0)
-+		goto exit_dec;
 +
 +exit_dec:
 +	atomic_dec(&conn->name_count);
@@ -17461,7 +17527,7 @@ index 0000000..657008e
 + * @conn:		connection to operate on
 + * @argp:		command payload
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_name_release(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -17632,7 +17698,7 @@ index 0000000..657008e
 + * @conn:		connection to operate on
 + * @argp:		command payload
 + *
-+ * Return: 0 on success, negative error code on failure.
++ * Return: >=0 on success, negative error code on failure.
 + */
 +int kdbus_cmd_list(struct kdbus_conn *conn, void __user *argp)
 +{
@@ -17785,10 +17851,10 @@ index 0000000..3dd2589
 +#endif
 diff --git a/ipc/kdbus/node.c b/ipc/kdbus/node.c
 new file mode 100644
-index 0000000..520df00
+index 0000000..0d65c65
 --- /dev/null
 +++ b/ipc/kdbus/node.c
-@@ -0,0 +1,910 @@
+@@ -0,0 +1,897 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -17969,7 +18035,7 @@ index 0000000..520df00
 + *                   accessed by other callers to properly initialize
 + *                   filesystem nodes.
 + *
-+ *     * node->id: This is an unsigned 32bit integer allocated by an IDR. It is
++ *     * node->id: This is an unsigned 32bit integer allocated by an IDA. It is
 + *                 always kept as small as possible during allocation and is
 + *                 globally unique across all nodes allocated by this module. 0
 + *                 is reserved as "not assigned" and is the default.
@@ -18024,8 +18090,7 @@ index 0000000..520df00
 +#define KDBUS_NODE_NEW			(KDBUS_NODE_BIAS - 4)
 +
 +/* global unique ID mapping for kdbus nodes */
-+static DEFINE_IDR(kdbus_node_idr);
-+static DECLARE_RWSEM(kdbus_node_idr_lock);
++DEFINE_IDA(kdbus_node_ida);
 +
 +/**
 + * kdbus_node_name_hash() - hash a name
@@ -18128,15 +18193,11 @@ index 0000000..520df00
 +		node->hash = kdbus_node_name_hash(name);
 +	}
 +
-+	down_write(&kdbus_node_idr_lock);
-+	ret = idr_alloc(&kdbus_node_idr, node, 1, 0, GFP_KERNEL);
-+	if (ret >= 0)
-+		node->id = ret;
-+	up_write(&kdbus_node_idr_lock);
-+
++	ret = ida_simple_get(&kdbus_node_ida, 1, 0, GFP_KERNEL);
 +	if (ret < 0)
 +		return ret;
 +
++	node->id = ret;
 +	ret = 0;
 +
 +	if (parent) {
@@ -18231,16 +18292,8 @@ index 0000000..520df00
 +
 +		if (node->free_cb)
 +			node->free_cb(node);
-+
-+		down_write(&kdbus_node_idr_lock);
 +		if (safe.id > 0)
-+			idr_remove(&kdbus_node_idr, safe.id);
-+		/* drop caches after last node to not leak memory on unload */
-+		if (idr_is_empty(&kdbus_node_idr)) {
-+			idr_destroy(&kdbus_node_idr);
-+			idr_init(&kdbus_node_idr);
-+		}
-+		up_write(&kdbus_node_idr_lock);
++			ida_simple_remove(&kdbus_node_ida, safe.id);
 +
 +		kfree(safe.name);
 +
@@ -18701,10 +18754,10 @@ index 0000000..520df00
 +}
 diff --git a/ipc/kdbus/node.h b/ipc/kdbus/node.h
 new file mode 100644
-index 0000000..be125ce
+index 0000000..970e02b
 --- /dev/null
 +++ b/ipc/kdbus/node.h
-@@ -0,0 +1,84 @@
+@@ -0,0 +1,86 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -18765,6 +18818,8 @@ index 0000000..be125ce
 +
 +#define kdbus_node_from_rb(_node) rb_entry((_node), struct kdbus_node, rb)
 +
++extern struct ida kdbus_node_ida;
++
 +void kdbus_node_init(struct kdbus_node *node, unsigned int type);
 +
 +int kdbus_node_link(struct kdbus_node *node, struct kdbus_node *parent,
@@ -19633,7 +19688,7 @@ index 0000000..15dd7bc
 +#endif
 diff --git a/ipc/kdbus/pool.c b/ipc/kdbus/pool.c
 new file mode 100644
-index 0000000..139bb77
+index 0000000..45dcdea
 --- /dev/null
 +++ b/ipc/kdbus/pool.c
 @@ -0,0 +1,728 @@
@@ -20314,7 +20369,7 @@ index 0000000..139bb77
 +		}
 +
 +		kaddr = (char __force __user *)kmap(page) + page_off;
-+		n_read = f_src->f_op->read(f_src, kaddr, copy_len, &off_src);
++		n_read = __vfs_read(f_src, kaddr, copy_len, &off_src);
 +		kunmap(page);
 +		mark_page_accessed(page);
 +		flush_dcache_page(page);
@@ -20419,7 +20474,7 @@ index 0000000..a903821
 +#endif
 diff --git a/ipc/kdbus/queue.c b/ipc/kdbus/queue.c
 new file mode 100644
-index 0000000..a449464
+index 0000000..25bb3ad
 --- /dev/null
 +++ b/ipc/kdbus/queue.c
 @@ -0,0 +1,678 @@
@@ -21062,7 +21117,7 @@ index 0000000..a449464
 +	lockdep_assert_held(&src->lock);
 +	lockdep_assert_held(&dst->lock);
 +
-+	if (WARN_ON(IS_ERR(e->user)) || WARN_ON(list_empty(&e->entry)))
++	if (WARN_ON(list_empty(&e->entry)))
 +		return -EINVAL;
 +	if (src == dst)
 +		return 0;
@@ -21201,10 +21256,10 @@ index 0000000..7f2db96
 +#endif /* __KDBUS_QUEUE_H */
 diff --git a/ipc/kdbus/reply.c b/ipc/kdbus/reply.c
 new file mode 100644
-index 0000000..008dca8
+index 0000000..e6791d8
 --- /dev/null
 +++ b/ipc/kdbus/reply.c
-@@ -0,0 +1,257 @@
+@@ -0,0 +1,252 @@
 +#include <linux/init.h>
 +#include <linux/mm.h>
 +#include <linux/module.h>
@@ -21244,7 +21299,7 @@ index 0000000..008dca8
 +				    bool sync)
 +{
 +	struct kdbus_reply *r;
-+	int ret = 0;
++	int ret;
 +
 +	if (atomic_inc_return(&reply_dst->request_count) >
 +	    KDBUS_CONN_MAX_REQUESTS_PENDING) {
@@ -21271,13 +21326,11 @@ index 0000000..008dca8
 +		r->waiting = true;
 +	}
 +
-+exit_dec_request_count:
-+	if (ret < 0) {
-+		atomic_dec(&reply_dst->request_count);
-+		return ERR_PTR(ret);
-+	}
-+
 +	return r;
++
++exit_dec_request_count:
++	atomic_dec(&reply_dst->request_count);
++	return ERR_PTR(ret);
 +}
 +
 +static void __kdbus_reply_free(struct kref *kref)
@@ -21347,8 +21400,7 @@ index 0000000..008dca8
 + * @reply:	The reply object
 + * @err:	Error code to set on the remote side
 + *
-+ * Remove the synchronous reply object from its connection reply_list, and
-+ * wake up remote peer (method origin) with the appropriate synchronous reply
++ * Wake up remote peer (method origin) with the appropriate synchronous reply
 + * code.
 + */
 +void kdbus_sync_reply_wakeup(struct kdbus_reply *reply, int err)
@@ -21379,17 +21431,15 @@ index 0000000..008dca8
 +				     struct kdbus_conn *reply_dst,
 +				     u64 cookie)
 +{
-+	struct kdbus_reply *r, *reply = NULL;
++	struct kdbus_reply *r;
 +
 +	list_for_each_entry(r, &reply_dst->reply_list, entry) {
 +		if (r->cookie == cookie &&
-+		    (!replying || r->reply_src == replying)) {
-+			reply = r;
-+			break;
-+		}
++		    (!replying || r->reply_src == replying))
++			return r;
 +	}
 +
-+	return reply;
++	return NULL;
 +}
 +
 +/**
@@ -21538,10 +21588,10 @@ index 0000000..68d5232
 +#endif /* __KDBUS_REPLY_H */
 diff --git a/ipc/kdbus/util.c b/ipc/kdbus/util.c
 new file mode 100644
-index 0000000..eaa806a
+index 0000000..72b1883
 --- /dev/null
 +++ b/ipc/kdbus/util.c
-@@ -0,0 +1,201 @@
+@@ -0,0 +1,156 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -21594,51 +21644,6 @@ index 0000000..eaa806a
 +}
 +
 +/**
-+ * kdbus_memdup_user() - copy dynamically sized object from user-space
-+ * @user_ptr:	user-provided source buffer
-+ * @sz_min:	minimum object size
-+ * @sz_max:	maximum object size
-+ *
-+ * This copies a dynamically sized object from user-space into kernel-space. We
-+ * require the object to have a 64bit size field at offset 0. We read it out
-+ * first, allocate a suitably sized buffer and then copy all data.
-+ *
-+ * The @sz_min and @sz_max parameters define possible min and max object sizes
-+ * so user-space cannot trigger un-bound kernel-space allocations.
-+ *
-+ * The same alignment-restrictions as described in kdbus_copy_from_user() apply.
-+ *
-+ * Return: pointer to dynamically allocated copy, or ERR_PTR() on failure.
-+ */
-+void *kdbus_memdup_user(void __user *user_ptr, size_t sz_min, size_t sz_max)
-+{
-+	void *ptr;
-+	u64 size;
-+	int ret;
-+
-+	ret = kdbus_copy_from_user(&size, user_ptr, sizeof(size));
-+	if (ret < 0)
-+		return ERR_PTR(ret);
-+
-+	if (size < sz_min)
-+		return ERR_PTR(-EINVAL);
-+
-+	if (size > sz_max)
-+		return ERR_PTR(-EMSGSIZE);
-+
-+	ptr = memdup_user(user_ptr, size);
-+	if (IS_ERR(ptr))
-+		return ptr;
-+
-+	if (*(u64 *)ptr != size) {
-+		kfree(ptr);
-+		return ERR_PTR(-EINVAL);
-+	}
-+
-+	return ptr;
-+}
-+
-+/**
 + * kdbus_verify_uid_prefix() - verify UID prefix of a user-supplied name
 + * @name:	user-supplied name to verify
 + * @user_ns:	user-namespace to act in
@@ -21745,10 +21750,10 @@ index 0000000..eaa806a
 +}
 diff --git a/ipc/kdbus/util.h b/ipc/kdbus/util.h
 new file mode 100644
-index 0000000..740b198
+index 0000000..5297166
 --- /dev/null
 +++ b/ipc/kdbus/util.h
-@@ -0,0 +1,74 @@
+@@ -0,0 +1,73 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
@@ -21791,7 +21796,7 @@ index 0000000..740b198
 +({									\
 +	u64 __user *_sz =						\
 +		(void __user *)((u8 __user *)(_b) + offsetof(_t, _m));	\
-+	copy_to_user(_sz, _s, sizeof(((_t *)0)->_m));			\
++	copy_to_user(_sz, _s, FIELD_SIZEOF(_t, _m));			\
 +})
 +
 +/**
@@ -21815,7 +21820,6 @@ index 0000000..740b198
 +int kdbus_sanitize_attach_flags(u64 flags, u64 *attach_flags);
 +
 +int kdbus_copy_from_user(void *dest, void __user *user_ptr, size_t size);
-+void *kdbus_memdup_user(void __user *user_ptr, size_t sz_min, size_t sz_max);
 +
 +struct kvec;
 +
@@ -21876,7 +21880,7 @@ index 0000000..137f842
 +HOSTLOADLIBES_kdbus-workers := -lrt
 diff --git a/samples/kdbus/kdbus-api.h b/samples/kdbus/kdbus-api.h
 new file mode 100644
-index 0000000..5ed5907
+index 0000000..7f3abae
 --- /dev/null
 +++ b/samples/kdbus/kdbus-api.h
 @@ -0,0 +1,114 @@
@@ -21890,12 +21894,12 @@ index 0000000..5ed5907
 +#define KDBUS_ITEM_HEADER_SIZE offsetof(struct kdbus_item, data)
 +#define KDBUS_ITEM_SIZE(s) KDBUS_ALIGN8((s) + KDBUS_ITEM_HEADER_SIZE)
 +#define KDBUS_ITEM_NEXT(item) \
-+	(typeof(item))(((uint8_t *)item) + KDBUS_ALIGN8((item)->size))
++	(typeof(item))((uint8_t *)(item) + KDBUS_ALIGN8((item)->size))
 +#define KDBUS_FOREACH(iter, first, _size)				\
-+	for (iter = (first);						\
++	for ((iter) = (first);						\
 +	     ((uint8_t *)(iter) < (uint8_t *)(first) + (_size)) &&	\
 +	       ((uint8_t *)(iter) >= (uint8_t *)(first));		\
-+	     iter = (void*)(((uint8_t *)iter) + KDBUS_ALIGN8((iter)->size)))
++	     (iter) = (void *)((uint8_t *)(iter) + KDBUS_ALIGN8((iter)->size)))
 +
 +static inline int kdbus_cmd_bus_make(int control_fd, struct kdbus_cmd *cmd)
 +{
@@ -21996,10 +22000,10 @@ index 0000000..5ed5907
 +#endif /* KDBUS_API_H */
 diff --git a/samples/kdbus/kdbus-workers.c b/samples/kdbus/kdbus-workers.c
 new file mode 100644
-index 0000000..d331e01
+index 0000000..c3ba958
 --- /dev/null
 +++ b/samples/kdbus/kdbus-workers.c
-@@ -0,0 +1,1326 @@
+@@ -0,0 +1,1345 @@
 +/*
 + * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
 + *
@@ -22059,6 +22063,12 @@ index 0000000..d331e01
 + * top-down, but requires some forward-declarations. Just ignore those.
 + */
 +
++#include <stdio.h>
++#include <stdlib.h>
++
++/* glibc < 2.7 does not ship sys/signalfd.h */
++#if __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 7
++
 +#include <ctype.h>
 +#include <errno.h>
 +#include <fcntl.h>
@@ -22067,8 +22077,6 @@ index 0000000..d331e01
 +#include <stdbool.h>
 +#include <stddef.h>
 +#include <stdint.h>
-+#include <stdio.h>
-+#include <stdlib.h>
 +#include <string.h>
 +#include <sys/mman.h>
 +#include <sys/poll.h>
@@ -23326,13 +23334,28 @@ index 0000000..d331e01
 +
 +	return fd;
 +}
-diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
-index 4e51122..7b51cce 100644
---- a/tools/testing/selftests/Makefile
-+++ b/tools/testing/selftests/Makefile
-@@ -5,6 +5,7 @@ TARGETS += exec
- TARGETS += firmware
- TARGETS += ftrace
++
++#else
++
++#warning "Skipping compilation due to unsupported libc version"
++
++int main(int argc, char **argv)
++{
++	fprintf(stderr,
++		"Compilation of %s was skipped due to unsupported libc.\n",
++		argv[0]);
++
++	return EXIT_FAILURE;
++}
++
++#endif /* libc sanity check */
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 95abddc..b57100c 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -5,6 +5,7 @@ TARGETS += exec
+ TARGETS += firmware
+ TARGETS += ftrace
  TARGETS += kcmp
 +TARGETS += kdbus
  TARGETS += memfd
@@ -23347,10 +23370,10 @@ index 0000000..d3ef42f
 +kdbus-test
 diff --git a/tools/testing/selftests/kdbus/Makefile b/tools/testing/selftests/kdbus/Makefile
 new file mode 100644
-index 0000000..de8242f
+index 0000000..8f36cb5
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/Makefile
-@@ -0,0 +1,48 @@
+@@ -0,0 +1,49 @@
 +CFLAGS += -I../../../../usr/include/
 +CFLAGS += -I../../../../samples/kdbus/
 +CFLAGS += -I../../../../include/uapi/
@@ -23364,7 +23387,6 @@ index 0000000..de8242f
 +	kdbus-test.o		\
 +	kdbus-test.o		\
 +	test-activator.o	\
-+	test-attach-flags.o	\
 +	test-benchmark.o	\
 +	test-bus.o		\
 +	test-chat.o		\
@@ -23388,12 +23410,14 @@ index 0000000..de8242f
 +
 +include ../lib.mk
 +
-+%.o: %.c
++%.o: %.c kdbus-enum.h kdbus-test.h kdbus-util.h
 +	$(CC) $(CFLAGS) -c $< -o $@
 +
 +kdbus-test: $(OBJS)
 +	$(CC) $(CFLAGS) $^ $(LDLIBS) -o $@
 +
++TEST_PROGS := kdbus-test
++
 +run_tests:
 +	./kdbus-test --tap
 +
@@ -23501,10 +23525,10 @@ index 0000000..4f1e579
 +LOOKUP(PAYLOAD);
 diff --git a/tools/testing/selftests/kdbus/kdbus-enum.h b/tools/testing/selftests/kdbus/kdbus-enum.h
 new file mode 100644
-index 0000000..a67cec3
+index 0000000..ed28cca
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-enum.h
-@@ -0,0 +1,14 @@
+@@ -0,0 +1,15 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + *
@@ -23513,6 +23537,7 @@ index 0000000..a67cec3
 + * Free Software Foundation; either version 2.1 of the License, or (at
 + * your option) any later version.
 + */
++
 +#pragma once
 +
 +const char *enum_CMD(long long id);
@@ -23521,10 +23546,10 @@ index 0000000..a67cec3
 +const char *enum_PAYLOAD(long long id);
 diff --git a/tools/testing/selftests/kdbus/kdbus-test.c b/tools/testing/selftests/kdbus/kdbus-test.c
 new file mode 100644
-index 0000000..a43674c
+index 0000000..294e82a
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-test.c
-@@ -0,0 +1,923 @@
+@@ -0,0 +1,900 @@
 +#include <errno.h>
 +#include <stdio.h>
 +#include <string.h>
@@ -23575,7 +23600,6 @@ index 0000000..a43674c
 +	char *root;
 +	char *test;
 +	char *busname;
-+	char *mask_param_path;
 +};
 +
 +static const struct kdbus_test tests[] = {
@@ -23801,13 +23825,6 @@ index 0000000..a43674c
 +		.func	= kdbus_test_benchmark_uds,
 +		.flags	= TEST_CREATE_BUS,
 +	},
-+	{
-+		/* Last test */
-+		.name	= "attach-flags",
-+		.desc	= "attach flags mask",
-+		.func	= kdbus_test_attach_flags,
-+		.flags	= 0,
-+	},
 +};
 +
 +#define N_TESTS ((int) (sizeof(tests) / sizeof(tests[0])))
@@ -23850,7 +23867,6 @@ index 0000000..a43674c
 +
 +	env->root = args->root;
 +	env->module = args->module;
-+	env->mask_param_path = args->mask_param_path;
 +
 +	return 0;
 +}
@@ -24281,8 +24297,7 @@ index 0000000..a43674c
 +{
 +	int ret;
 +	bool namespaces;
-+	uint64_t kdbus_param_mask;
-+	static char fspath[4096], parampath[4096];
++	static char fspath[4096];
 +
 +	namespaces = (kdbus_args->mntns || kdbus_args->pidns ||
 +		      kdbus_args->userns);
@@ -24318,19 +24333,6 @@ index 0000000..a43674c
 +		kdbus_args->root = fspath;
 +	}
 +
-+	snprintf(parampath, sizeof(parampath),
-+		 "/sys/module/%s/parameters/attach_flags_mask",
-+		 kdbus_args->module);
-+	kdbus_args->mask_param_path = parampath;
-+
-+	ret = kdbus_sysfs_get_parameter_mask(kdbus_args->mask_param_path,
-+					     &kdbus_param_mask);
-+	if (ret < 0)
-+		return TEST_ERR;
-+
-+	printf("# Starting tests with an attach_flags_mask=0x%llx\n",
-+		(unsigned long long)kdbus_param_mask);
-+
 +	/* Start tests */
 +	if (namespaces)
 +		ret = run_tests_in_namespaces(kdbus_args);
@@ -24450,10 +24452,10 @@ index 0000000..a43674c
 +}
 diff --git a/tools/testing/selftests/kdbus/kdbus-test.h b/tools/testing/selftests/kdbus/kdbus-test.h
 new file mode 100644
-index 0000000..6473318
+index 0000000..a5c6ae8
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-test.h
-@@ -0,0 +1,85 @@
+@@ -0,0 +1,83 @@
 +#ifndef _TEST_KDBUS_H_
 +#define _TEST_KDBUS_H_
 +
@@ -24461,7 +24463,6 @@ index 0000000..6473318
 +	char *buspath;
 +	const char *root;
 +	const char *module;
-+	const char *mask_param_path;
 +	int control_fd;
 +	struct kdbus_conn *conn;
 +};
@@ -24500,7 +24501,6 @@ index 0000000..6473318
 +	ASSERT_EXIT_VAL(cond, EXIT_FAILURE)
 +
 +int kdbus_test_activator(struct kdbus_test_env *env);
-+int kdbus_test_attach_flags(struct kdbus_test_env *env);
 +int kdbus_test_benchmark(struct kdbus_test_env *env);
 +int kdbus_test_benchmark_nomemfds(struct kdbus_test_env *env);
 +int kdbus_test_benchmark_uds(struct kdbus_test_env *env);
@@ -24541,10 +24541,10 @@ index 0000000..6473318
 +#endif /* _TEST_KDBUS_H_ */
 diff --git a/tools/testing/selftests/kdbus/kdbus-util.c b/tools/testing/selftests/kdbus/kdbus-util.c
 new file mode 100644
-index 0000000..4b376ec
+index 0000000..29a0cb1
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-util.c
-@@ -0,0 +1,1615 @@
+@@ -0,0 +1,1617 @@
 +/*
 + * Copyright (C) 2013-2015 Daniel Mack
 + * Copyright (C) 2013-2015 Kay Sievers
@@ -24955,11 +24955,9 @@ index 0000000..4b376ec
 +{
 +	int ret, fd;
 +
-+	ret = syscall(__NR_memfd_create, name, MFD_ALLOW_SEALING);
-+	if (ret < 0)
-+		return ret;
-+
-+	fd = ret;
++	fd = syscall(__NR_memfd_create, name, MFD_ALLOW_SEALING);
++	if (fd < 0)
++		return fd;
 +
 +	ret = ftruncate(fd, size);
 +	if (ret < 0) {
@@ -25001,8 +24999,8 @@ index 0000000..4b376ec
 +			    uint64_t cmd_flags,
 +			    int cancel_fd)
 +{
-+	struct kdbus_cmd_send *cmd;
-+	struct kdbus_msg *msg;
++	struct kdbus_cmd_send *cmd = NULL;
++	struct kdbus_msg *msg = NULL;
 +	const char ref1[1024 * 128 + 3] = "0123456789_0";
 +	const char ref2[] = "0123456789_1";
 +	struct kdbus_item *item;
@@ -25011,10 +25009,7 @@ index 0000000..4b376ec
 +	int memfd = -1;
 +	int ret;
 +
-+	size = sizeof(*msg);
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
-+	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++	size = sizeof(*msg) + 3 * KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
 +
 +	if (dst_id == KDBUS_DST_ID_BROADCAST)
 +		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
@@ -25028,14 +25023,14 @@ index 0000000..4b376ec
 +		if (write(memfd, "kdbus memfd 1234567", 19) != 19) {
 +			ret = -errno;
 +			kdbus_printf("writing to memfd failed: %m\n");
-+			return ret;
++			goto out;
 +		}
 +
 +		ret = sys_memfd_seal_set(memfd);
 +		if (ret < 0) {
 +			ret = -errno;
 +			kdbus_printf("memfd sealing failed: %m\n");
-+			return ret;
++			goto out;
 +		}
 +
 +		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
@@ -25048,7 +25043,7 @@ index 0000000..4b376ec
 +	if (!msg) {
 +		ret = -errno;
 +		kdbus_printf("unable to malloc()!?\n");
-+		return ret;
++		goto out;
 +	}
 +
 +	if (dst_id == KDBUS_DST_ID_BROADCAST)
@@ -25066,7 +25061,7 @@ index 0000000..4b376ec
 +	if (timeout) {
 +		ret = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
 +		if (ret < 0)
-+			return ret;
++			goto out;
 +
 +		msg->timeout_ns = now.tv_sec * 1000000000ULL +
 +				  now.tv_nsec + timeout;
@@ -25117,6 +25112,12 @@ index 0000000..4b376ec
 +		size += KDBUS_ITEM_SIZE(sizeof(cancel_fd));
 +
 +	cmd = malloc(size);
++	if (!cmd) {
++		ret = -errno;
++		kdbus_printf("unable to malloc()!?\n");
++		goto out;
++	}
++
 +	cmd->size = size;
 +	cmd->flags = cmd_flags;
 +	cmd->msg_address = (uintptr_t)msg;
@@ -25131,12 +25132,9 @@ index 0000000..4b376ec
 +	}
 +
 +	ret = kdbus_cmd_send(conn->fd, cmd);
-+	if (memfd >= 0)
-+		close(memfd);
-+
 +	if (ret < 0) {
 +		kdbus_printf("error sending message: %d (%m)\n", ret);
-+		return ret;
++		goto out;
 +	}
 +
 +	if (cmd_flags & KDBUS_SEND_SYNC_REPLY) {
@@ -25150,13 +25148,17 @@ index 0000000..4b376ec
 +
 +		ret = kdbus_free(conn, cmd->reply.offset);
 +		if (ret < 0)
-+			return ret;
++			goto out;
 +	}
 +
++out:
 +	free(msg);
 +	free(cmd);
 +
-+	return 0;
++	if (memfd >= 0)
++		close(memfd);
++
++	return ret < 0 ? ret : 0;
 +}
 +
 +int kdbus_msg_send(const struct kdbus_conn *conn, const char *name,
@@ -25900,7 +25902,7 @@ index 0000000..4b376ec
 +	return 0;
 +}
 +
-+int all_uids_gids_are_mapped()
++int all_uids_gids_are_mapped(void)
 +{
 +	int ret;
 +
@@ -26094,7 +26096,7 @@ index 0000000..4b376ec
 +	cap_t caps;
 +
 +	caps = cap_get_proc();
-+	if (!cap) {
++	if (!caps) {
 +		ret = -errno;
 +		kdbus_printf("error cap_get_proc(): %d (%m)\n", ret);
 +		return ret;
@@ -26162,10 +26164,10 @@ index 0000000..4b376ec
 +}
 diff --git a/tools/testing/selftests/kdbus/kdbus-util.h b/tools/testing/selftests/kdbus/kdbus-util.h
 new file mode 100644
-index 0000000..50ff071
+index 0000000..d1a0f1b
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/kdbus-util.h
-@@ -0,0 +1,222 @@
+@@ -0,0 +1,219 @@
 +/*
 + * Copyright (C) 2013-2015 Kay Sievers
 + * Copyright (C) 2013-2015 Daniel Mack
@@ -26175,6 +26177,7 @@ index 0000000..50ff071
 + * Free Software Foundation; either version 2.1 of the License, or (at
 + * your option) any later version.
 + */
++
 +#pragma once
 +
 +#define BIT(X) (1 << (X))
@@ -26194,28 +26197,26 @@ index 0000000..50ff071
 +#define KDBUS_ITEM_SIZE(s) KDBUS_ALIGN8((s) + KDBUS_ITEM_HEADER_SIZE)
 +
 +#define KDBUS_ITEM_NEXT(item) \
-+	(typeof(item))(((uint8_t *)item) + KDBUS_ALIGN8((item)->size))
++	(typeof(item))((uint8_t *)(item) + KDBUS_ALIGN8((item)->size))
 +#define KDBUS_ITEM_FOREACH(item, head, first)				\
-+	for (item = (head)->first;					\
++	for ((item) = (head)->first;					\
 +	     ((uint8_t *)(item) < (uint8_t *)(head) + (head)->size) &&	\
-+	       ((uint8_t *)(item) >= (uint8_t *)(head));	\
-+	     item = KDBUS_ITEM_NEXT(item))
++	       ((uint8_t *)(item) >= (uint8_t *)(head));		\
++	     (item) = KDBUS_ITEM_NEXT(item))
 +#define KDBUS_FOREACH(iter, first, _size)				\
-+	for (iter = (first);						\
++	for ((iter) = (first);						\
 +	     ((uint8_t *)(iter) < (uint8_t *)(first) + (_size)) &&	\
 +	       ((uint8_t *)(iter) >= (uint8_t *)(first));		\
-+	     iter = (void*)(((uint8_t *)iter) + KDBUS_ALIGN8((iter)->size)))
++	     (iter) = (void *)((uint8_t *)(iter) + KDBUS_ALIGN8((iter)->size)))
 +
-+
-+#define _KDBUS_ATTACH_BITS_SET_NR  (__builtin_popcountll(_KDBUS_ATTACH_ALL))
++#define _KDBUS_ATTACH_BITS_SET_NR (__builtin_popcountll(_KDBUS_ATTACH_ALL))
 +
 +/* Sum of KDBUS_ITEM_* that reflects _KDBUS_ATTACH_ALL */
-+#define KDBUS_ATTACH_ITEMS_TYPE_SUM \
-+	((((_KDBUS_ATTACH_BITS_SET_NR - 1) * \
-+	((_KDBUS_ATTACH_BITS_SET_NR - 1) + 1)) / 2 ) + \
++#define KDBUS_ATTACH_ITEMS_TYPE_SUM					\
++	((((_KDBUS_ATTACH_BITS_SET_NR - 1) *				\
++	((_KDBUS_ATTACH_BITS_SET_NR - 1) + 1)) / 2) +			\
 +	(_KDBUS_ITEM_ATTACH_BASE * _KDBUS_ATTACH_BITS_SET_NR))
 +
-+
 +#define POOL_SIZE (16 * 1024LU * 1024LU)
 +
 +#define UNPRIV_UID 65534
@@ -26273,7 +26274,7 @@ index 0000000..50ff071
 +	_setup_;							\
 +	efd = eventfd(0, EFD_CLOEXEC);					\
 +	ASSERT_RETURN(efd >= 0);					\
-+	*clone_ret = 0;							\
++	*(clone_ret) = 0;						\
 +	pid = syscall(__NR_clone, flags, NULL);				\
 +	if (pid == 0) {							\
 +		eventfd_t event_status = 0;				\
@@ -26298,7 +26299,7 @@ index 0000000..50ff071
 +		ret = TEST_OK;						\
 +	} else {							\
 +		ret = -errno;						\
-+		*clone_ret = -errno;					\
++		*(clone_ret) = -errno;					\
 +	}								\
 +	close(efd);							\
 +	ret;								\
@@ -26375,14 +26376,12 @@ index 0000000..50ff071
 +		       uint64_t type, uint64_t id);
 +int kdbus_add_match_empty(struct kdbus_conn *conn);
 +
-+int all_uids_gids_are_mapped();
++int all_uids_gids_are_mapped(void);
 +int drop_privileges(uid_t uid, gid_t gid);
 +uint64_t now(clockid_t clock);
 +char *unique_name(const char *prefix);
 +
-+int userns_map_uid_gid(pid_t pid,
-+		       const char *map_uid,
-+		       const char *map_gid);
++int userns_map_uid_gid(pid_t pid, const char *map_uid, const char *map_gid);
 +int test_is_capable(int cap, ...);
 +int config_user_ns_is_enabled(void);
 +int config_auditsyscall_is_enabled(void);
@@ -26712,762 +26711,6 @@ index 0000000..3d1b763
 +
 +	return TEST_OK;
 +}
-diff --git a/tools/testing/selftests/kdbus/test-attach-flags.c b/tools/testing/selftests/kdbus/test-attach-flags.c
-new file mode 100644
-index 0000000..deee7c3
---- /dev/null
-+++ b/tools/testing/selftests/kdbus/test-attach-flags.c
-@@ -0,0 +1,750 @@
-+#include <stdio.h>
-+#include <string.h>
-+#include <stdlib.h>
-+#include <stdbool.h>
-+#include <stddef.h>
-+#include <fcntl.h>
-+#include <unistd.h>
-+#include <stdint.h>
-+#include <errno.h>
-+#include <assert.h>
-+#include <sys/capability.h>
-+#include <sys/mman.h>
-+#include <sys/stat.h>
-+#include <sys/types.h>
-+#include <linux/unistd.h>
-+
-+#include "kdbus-api.h"
-+#include "kdbus-test.h"
-+#include "kdbus-util.h"
-+#include "kdbus-enum.h"
-+
-+/*
-+ * Should be the sum of the currently supported and compiled-in
-+ * KDBUS_ITEMS_* that reflect KDBUS_ATTACH_* flags.
-+ */
-+static unsigned int KDBUS_TEST_ITEMS_SUM = KDBUS_ATTACH_ITEMS_TYPE_SUM;
-+
-+static struct kdbus_conn *__kdbus_hello(const char *path, uint64_t flags,
-+					uint64_t attach_flags_send,
-+					uint64_t attach_flags_recv)
-+{
-+	struct kdbus_cmd_free cmd_free = {};
-+	int ret, fd;
-+	struct kdbus_conn *conn;
-+	struct {
-+		struct kdbus_cmd_hello hello;
-+
-+		struct {
-+			uint64_t size;
-+			uint64_t type;
-+			char str[16];
-+		} conn_name;
-+
-+		uint8_t extra_items[0];
-+	} h;
-+
-+	memset(&h, 0, sizeof(h));
-+
-+	kdbus_printf("-- opening bus connection %s\n", path);
-+	fd = open(path, O_RDWR|O_CLOEXEC);
-+	if (fd < 0) {
-+		kdbus_printf("--- error %d (%m)\n", fd);
-+		return NULL;
-+	}
-+
-+	h.hello.flags = flags | KDBUS_HELLO_ACCEPT_FD;
-+	h.hello.attach_flags_send = attach_flags_send;
-+	h.hello.attach_flags_recv = attach_flags_recv;
-+	h.conn_name.type = KDBUS_ITEM_CONN_DESCRIPTION;
-+	strcpy(h.conn_name.str, "this-is-my-name");
-+	h.conn_name.size = KDBUS_ITEM_HEADER_SIZE + strlen(h.conn_name.str) + 1;
-+
-+	h.hello.size = sizeof(h);
-+	h.hello.pool_size = POOL_SIZE;
-+
-+	ret = kdbus_cmd_hello(fd, (struct kdbus_cmd_hello *) &h.hello);
-+	if (ret < 0) {
-+		kdbus_printf("--- error when saying hello: %d (%m)\n", ret);
-+		return NULL;
-+	}
-+
-+	kdbus_printf("-- New connection ID : %llu\n",
-+		     (unsigned long long)h.hello.id);
-+
-+	cmd_free.size = sizeof(cmd_free);
-+	cmd_free.offset = h.hello.offset;
-+	ret = kdbus_cmd_free(fd, &cmd_free);
-+	if (ret < 0)
-+		return NULL;
-+
-+	conn = malloc(sizeof(*conn));
-+	if (!conn) {
-+		kdbus_printf("unable to malloc()!?\n");
-+		return NULL;
-+	}
-+
-+	conn->buf = mmap(NULL, POOL_SIZE, PROT_READ, MAP_SHARED, fd, 0);
-+	if (conn->buf == MAP_FAILED) {
-+		ret = -errno;
-+		free(conn);
-+		close(fd);
-+		kdbus_printf("--- error mmap: %d (%m)\n", ret);
-+		return NULL;
-+	}
-+
-+	conn->fd = fd;
-+	conn->id = h.hello.id;
-+	return conn;
-+}
-+
-+static int kdbus_test_peers_creation(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	int control_fd;
-+	char *path;
-+	char *busname;
-+	char buspath[2048];
-+	char control_path[2048];
-+	uint64_t attach_flags_mask;
-+	struct kdbus_conn *conn;
-+
-+	snprintf(control_path, sizeof(control_path),
-+		 "%s/control", env->root);
-+
-+	/*
-+	 * Set kdbus system-wide mask to 0, this has nothing
-+	 * to do with the following tests, bus and connection
-+	 * creation nor connection update, but we do it so we are
-+	 * sure that everything work as expected
-+	 */
-+
-+	attach_flags_mask = 0;
-+	ret = kdbus_sysfs_set_parameter_mask(env->mask_param_path,
-+					     attach_flags_mask);
-+	ASSERT_RETURN(ret == 0);
-+
-+
-+	/*
-+	 * Create bus with a full set of ATTACH flags
-+	 */
-+
-+	control_fd = open(control_path, O_RDWR);
-+	ASSERT_RETURN(control_fd >= 0);
-+
-+	busname = unique_name("test-peers-creation-bus");
-+	ASSERT_RETURN(busname);
-+
-+	ret = kdbus_create_bus(control_fd, busname, _KDBUS_ATTACH_ALL,
-+			       0, &path);
-+	ASSERT_RETURN(ret == 0);
-+
-+	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
-+
-+	/*
-+	 * Create a connection with an empty send attach flags, or
-+	 * with just KDBUS_ATTACH_CREDS, this should fail
-+	 */
-+	conn = __kdbus_hello(buspath, 0, 0, 0);
-+	ASSERT_RETURN(conn == NULL);
-+	ASSERT_RETURN(errno == ECONNREFUSED);
-+
-+	conn = __kdbus_hello(buspath, 0, KDBUS_ATTACH_CREDS,
-+			     _KDBUS_ATTACH_ALL);
-+	ASSERT_RETURN(conn == NULL);
-+	ASSERT_RETURN(errno == ECONNREFUSED);
-+
-+	conn = __kdbus_hello(buspath, 0, _KDBUS_ATTACH_ALL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	/* Try to cut back some send attach flags */
-+	ret = kdbus_conn_update_attach_flags(conn,
-+					     KDBUS_ATTACH_CREDS|
-+					     KDBUS_ATTACH_PIDS,
-+					     _KDBUS_ATTACH_ALL);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	ret = kdbus_conn_update_attach_flags(conn,
-+					     _KDBUS_ATTACH_ALL, 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_conn_free(conn);
-+	free(path);
-+	free(busname);
-+	close(control_fd);
-+
-+
-+	/* Test a new bus with KDBUS_ATTACH_PIDS */
-+
-+	control_fd = open(control_path, O_RDWR);
-+	ASSERT_RETURN(control_fd >= 0);
-+
-+	busname = unique_name("test-peer-flags-bus");
-+	ASSERT_RETURN(busname);
-+
-+	ret = kdbus_create_bus(control_fd, busname, KDBUS_ATTACH_PIDS,
-+			       0, &path);
-+	ASSERT_RETURN(ret == 0);
-+
-+	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
-+
-+	/*
-+	 * Create a connection with an empty send attach flags, or
-+	 * all flags except KDBUS_ATTACH_PIDS
-+	 */
-+	conn = __kdbus_hello(buspath, 0, 0, 0);
-+	ASSERT_RETURN(conn == NULL);
-+	ASSERT_RETURN(errno == ECONNREFUSED);
-+
-+	conn = __kdbus_hello(buspath, 0,
-+			     _KDBUS_ATTACH_ALL & ~KDBUS_ATTACH_PIDS,
-+			     _KDBUS_ATTACH_ALL);
-+	ASSERT_RETURN(conn == NULL);
-+	ASSERT_RETURN(errno == ECONNREFUSED);
-+
-+	/* The following should succeed */
-+	conn = __kdbus_hello(buspath, 0, KDBUS_ATTACH_PIDS, 0);
-+	ASSERT_RETURN(conn);
-+	kdbus_conn_free(conn);
-+
-+	conn = __kdbus_hello(buspath, 0, _KDBUS_ATTACH_ALL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	ret = kdbus_conn_update_attach_flags(conn,
-+					     _KDBUS_ATTACH_ALL &
-+					     ~KDBUS_ATTACH_PIDS,
-+					     _KDBUS_ATTACH_ALL);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	ret = kdbus_conn_update_attach_flags(conn, 0,
-+					     _KDBUS_ATTACH_ALL);
-+	ASSERT_RETURN(ret == -EINVAL);
-+
-+	/* Now we want only KDBUS_ATTACH_PIDS */
-+	ret = kdbus_conn_update_attach_flags(conn,
-+					     KDBUS_ATTACH_PIDS, 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_conn_free(conn);
-+	free(path);
-+	free(busname);
-+	close(control_fd);
-+
-+
-+	/*
-+	 * Create bus with 0 as ATTACH flags, the bus does not
-+	 * require any attach flags
-+	 */
-+
-+	control_fd = open(control_path, O_RDWR);
-+	ASSERT_RETURN(control_fd >= 0);
-+
-+	busname = unique_name("test-peer-flags-bus");
-+	ASSERT_RETURN(busname);
-+
-+	ret = kdbus_create_bus(control_fd, busname, 0, 0, &path);
-+	ASSERT_RETURN(ret == 0);
-+
-+	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
-+
-+	/* Bus is open it does not require any send attach flags */
-+	conn = __kdbus_hello(buspath, 0, 0, 0);
-+	ASSERT_RETURN(conn);
-+	kdbus_conn_free(conn);
-+
-+	conn = __kdbus_hello(buspath, 0, _KDBUS_ATTACH_ALL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	ret = kdbus_conn_update_attach_flags(conn, 0, 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_conn_update_attach_flags(conn, KDBUS_ATTACH_CREDS, 0);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_conn_free(conn);
-+	free(path);
-+	free(busname);
-+	close(control_fd);
-+
-+	return 0;
-+}
-+
-+static int kdbus_test_peers_info(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	int control_fd;
-+	char *path;
-+	char *busname;
-+	unsigned int i = 0;
-+	uint64_t offset = 0;
-+	char buspath[2048];
-+	char control_path[2048];
-+	uint64_t attach_flags_mask;
-+	struct kdbus_item *item;
-+	struct kdbus_info *info;
-+	struct kdbus_conn *conn;
-+	struct kdbus_conn *reader;
-+	unsigned long long attach_count = 0;
-+
-+	snprintf(control_path, sizeof(control_path),
-+		 "%s/control", env->root);
-+
-+	attach_flags_mask = 0;
-+	ret = kdbus_sysfs_set_parameter_mask(env->mask_param_path,
-+					     attach_flags_mask);
-+	ASSERT_RETURN(ret == 0);
-+
-+	control_fd = open(control_path, O_RDWR);
-+	ASSERT_RETURN(control_fd >= 0);
-+
-+	busname = unique_name("test-peers-info-bus");
-+	ASSERT_RETURN(busname);
-+
-+	ret = kdbus_create_bus(control_fd, busname, _KDBUS_ATTACH_ALL,
-+			       0, &path);
-+	ASSERT_RETURN(ret == 0);
-+
-+	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
-+
-+	/* Create connections with the appropriate flags */
-+	conn = __kdbus_hello(buspath, 0, _KDBUS_ATTACH_ALL, 0);
-+	ASSERT_RETURN(conn);
-+
-+	reader = __kdbus_hello(buspath, 0, _KDBUS_ATTACH_ALL, 0);
-+	ASSERT_RETURN(reader);
-+
-+	ret = kdbus_conn_info(reader, conn->id, NULL,
-+			      _KDBUS_ATTACH_ALL, &offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	info = (struct kdbus_info *)(reader->buf + offset);
-+	ASSERT_RETURN(info->id == conn->id);
-+
-+	/* all attach flags are masked, no metadata */
-+	KDBUS_ITEM_FOREACH(item, info, items)
-+		i++;
-+
-+	ASSERT_RETURN(i == 0);
-+
-+	kdbus_free(reader, offset);
-+
-+	/* Set the mask to _KDBUS_ATTACH_ANY */
-+	attach_flags_mask = _KDBUS_ATTACH_ANY;
-+	ret = kdbus_sysfs_set_parameter_mask(env->mask_param_path,
-+					     attach_flags_mask);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_conn_info(reader, conn->id, NULL,
-+			      _KDBUS_ATTACH_ALL, &offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	info = (struct kdbus_info *)(reader->buf + offset);
-+	ASSERT_RETURN(info->id == conn->id);
-+
-+	attach_count = 0;
-+	KDBUS_ITEM_FOREACH(item, info, items)
-+		    attach_count += item->type;
-+
-+	/*
-+	 * All flags have been returned except for:
-+	 * KDBUS_ITEM_TIMESTAMP and
-+	 * KDBUS_ITEM_OWNED_NAME we do not own any name.
-+	 */
-+	ASSERT_RETURN(attach_count == (KDBUS_TEST_ITEMS_SUM -
-+				       KDBUS_ITEM_OWNED_NAME -
-+				       KDBUS_ITEM_TIMESTAMP));
-+
-+	kdbus_free(reader, offset);
-+
-+	/* Request only OWNED names */
-+	ret = kdbus_conn_info(reader, conn->id, NULL,
-+			      KDBUS_ATTACH_NAMES, &offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	info = (struct kdbus_info *)(reader->buf + offset);
-+	ASSERT_RETURN(info->id == conn->id);
-+
-+	attach_count = 0;
-+	KDBUS_ITEM_FOREACH(item, info, items)
-+		attach_count += item->type;
-+
-+	/* we should not get any metadata since we do not own names */
-+	ASSERT_RETURN(attach_count == 0);
-+
-+	kdbus_free(reader, offset);
-+
-+	kdbus_conn_free(conn);
-+	kdbus_conn_free(reader);
-+
-+	return 0;
-+}
-+
-+/**
-+ * @kdbus_mask_param:	kdbus module mask parameter (system-wide)
-+ * @requested_meta:	The bus owner metadata that we want
-+ * @expected_items:	The returned KDBUS_ITEMS_* sum. Used to
-+ *			validate the returned metadata items
-+ */
-+static int kdbus_cmp_bus_creator_metadata(struct kdbus_test_env *env,
-+					  struct kdbus_conn *conn,
-+					  uint64_t kdbus_mask_param,
-+					  uint64_t requested_meta,
-+					  unsigned long expected_items)
-+{
-+	int ret;
-+	uint64_t offset = 0;
-+	struct kdbus_info *info;
-+	struct kdbus_item *item;
-+	unsigned long attach_count = 0;
-+
-+	ret = kdbus_sysfs_set_parameter_mask(env->mask_param_path,
-+					     kdbus_mask_param);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_bus_creator_info(conn, requested_meta, &offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	info = (struct kdbus_info *)(conn->buf + offset);
-+
-+	KDBUS_ITEM_FOREACH(item, info, items)
-+		attach_count += item->type;
-+
-+	ASSERT_RETURN(attach_count == expected_items);
-+
-+	ret = kdbus_free(conn, offset);
-+	ASSERT_RETURN(ret == 0);
-+
-+	return 0;
-+}
-+
-+static int kdbus_test_bus_creator_info(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	int control_fd;
-+	char *path;
-+	char *busname;
-+	char buspath[2048];
-+	char control_path[2048];
-+	uint64_t attach_flags_mask;
-+	struct kdbus_conn *conn;
-+	unsigned long expected_items = 0;
-+
-+	snprintf(control_path, sizeof(control_path),
-+		 "%s/control", env->root);
-+
-+	control_fd = open(control_path, O_RDWR);
-+	ASSERT_RETURN(control_fd >= 0);
-+
-+	busname = unique_name("test-peers-info-bus");
-+	ASSERT_RETURN(busname);
-+
-+	/*
-+	 * Now the bus allows us to see all its KDBUS_ATTACH_*
-+	 * items
-+	 */
-+	ret = kdbus_create_bus(control_fd, busname, 0,
-+			       _KDBUS_ATTACH_ALL, &path);
-+	ASSERT_RETURN(ret == 0);
-+
-+	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
-+
-+	conn = __kdbus_hello(buspath, 0, 0, 0);
-+	ASSERT_RETURN(conn);
-+
-+	/*
-+	 * Start with a kdbus module mask set to _KDBUS_ATTACH_ANY
-+	 */
-+	attach_flags_mask = _KDBUS_ATTACH_ANY;
-+
-+	/*
-+	 * All flags will be returned except for:
-+	 * KDBUS_ITEM_TIMESTAMP
-+	 * KDBUS_ITEM_OWNED_NAME
-+	 * KDBUS_ITEM_CONN_DESCRIPTION
-+	 *
-+	 * An extra flags is always returned KDBUS_ITEM_MAKE_NAME
-+	 * which contains the bus name
-+	 */
-+	expected_items = KDBUS_TEST_ITEMS_SUM + KDBUS_ITEM_MAKE_NAME;
-+	expected_items -= KDBUS_ITEM_TIMESTAMP +
-+			  KDBUS_ITEM_OWNED_NAME +
-+			  KDBUS_ITEM_CONN_DESCRIPTION;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     _KDBUS_ATTACH_ALL,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * We should have:
-+	 * KDBUS_ITEM_PIDS + KDBUS_ITEM_CREDS + KDBUS_ITEM_MAKE_NAME
-+	 */
-+	expected_items = KDBUS_ITEM_PIDS + KDBUS_ITEM_CREDS +
-+			 KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     KDBUS_ATTACH_PIDS |
-+					     KDBUS_ATTACH_CREDS,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* KDBUS_ITEM_MAKE_NAME is always returned */
-+	expected_items = KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     0, expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Restrict kdbus system-wide mask to KDBUS_ATTACH_PIDS
-+	 */
-+
-+	attach_flags_mask = KDBUS_ATTACH_PIDS;
-+
-+	/*
-+	 * We should have:
-+	 * KDBUS_ITEM_PIDS + KDBUS_ITEM_MAKE_NAME
-+	 */
-+	expected_items = KDBUS_ITEM_PIDS + KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     _KDBUS_ATTACH_ALL,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+
-+	/* system-wide mask to 0 */
-+	attach_flags_mask = 0;
-+
-+	/* we should only see: KDBUS_ITEM_MAKE_NAME */
-+	expected_items = KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     _KDBUS_ATTACH_ALL,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_conn_free(conn);
-+	free(path);
-+	free(busname);
-+	close(control_fd);
-+
-+
-+	/*
-+	 * A new bus that hides all its owner metadata
-+	 */
-+
-+	control_fd = open(control_path, O_RDWR);
-+	ASSERT_RETURN(control_fd >= 0);
-+
-+	busname = unique_name("test-peers-info-bus");
-+	ASSERT_RETURN(busname);
-+
-+	ret = kdbus_create_bus(control_fd, busname, 0, 0, &path);
-+	ASSERT_RETURN(ret == 0);
-+
-+	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
-+
-+	conn = __kdbus_hello(buspath, 0, 0, 0);
-+	ASSERT_RETURN(conn);
-+
-+	/*
-+	 * Start with a kdbus module mask set to _KDBUS_ATTACH_ANY
-+	 */
-+	attach_flags_mask = _KDBUS_ATTACH_ANY;
-+
-+	/*
-+	 * We only get the KDBUS_ITEM_MAKE_NAME
-+	 */
-+	expected_items = KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     _KDBUS_ATTACH_ALL,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * We still get only kdbus_ITEM_MAKE_NAME
-+	 */
-+	attach_flags_mask = 0;
-+	expected_items = KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     _KDBUS_ATTACH_ALL,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	kdbus_conn_free(conn);
-+	free(path);
-+	free(busname);
-+	close(control_fd);
-+
-+
-+	/*
-+	 * A new bus that shows only the PID and CREDS metadata
-+	 * of the bus owner.
-+	 */
-+	control_fd = open(control_path, O_RDWR);
-+	ASSERT_RETURN(control_fd >= 0);
-+
-+	busname = unique_name("test-peers-info-bus");
-+	ASSERT_RETURN(busname);
-+
-+	ret = kdbus_create_bus(control_fd, busname, 0,
-+			       KDBUS_ATTACH_PIDS|
-+			       KDBUS_ATTACH_CREDS, &path);
-+	ASSERT_RETURN(ret == 0);
-+
-+	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
-+
-+	conn = __kdbus_hello(buspath, 0, 0, 0);
-+	ASSERT_RETURN(conn);
-+
-+	/*
-+	 * Start with a kdbus module mask set to _KDBUS_ATTACH_ANY
-+	 */
-+	attach_flags_mask = _KDBUS_ATTACH_ANY;
-+
-+	/*
-+	 * We should have:
-+	 * KDBUS_ITEM_PIDS + KDBUS_ITEM_CREDS + KDBUS_ITEM_MAKE_NAME
-+	 */
-+	expected_items = KDBUS_ITEM_PIDS + KDBUS_ITEM_CREDS +
-+			 KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     _KDBUS_ATTACH_ALL,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	expected_items = KDBUS_ITEM_CREDS + KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     KDBUS_ATTACH_CREDS,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* KDBUS_ITEM_MAKE_NAME is always returned */
-+	expected_items = KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     0, expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Restrict kdbus system-wide mask to KDBUS_ATTACH_PIDS
-+	 */
-+
-+	attach_flags_mask = KDBUS_ATTACH_PIDS;
-+	/*
-+	 * We should have:
-+	 * KDBUS_ITEM_PIDS + KDBUS_ITEM_MAKE_NAME
-+	 */
-+	expected_items = KDBUS_ITEM_PIDS + KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     _KDBUS_ATTACH_ALL,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* No KDBUS_ATTACH_CREDS */
-+	expected_items = KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     KDBUS_ATTACH_CREDS,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* system-wide mask to 0 */
-+	attach_flags_mask = 0;
-+
-+	/* we should only see: KDBUS_ITEM_MAKE_NAME */
-+	expected_items = KDBUS_ITEM_MAKE_NAME;
-+	ret = kdbus_cmp_bus_creator_metadata(env, conn,
-+					     attach_flags_mask,
-+					     _KDBUS_ATTACH_ALL,
-+					     expected_items);
-+	ASSERT_RETURN(ret == 0);
-+
-+
-+	kdbus_conn_free(conn);
-+	free(path);
-+	free(busname);
-+	close(control_fd);
-+
-+	return 0;
-+}
-+
-+int kdbus_test_attach_flags(struct kdbus_test_env *env)
-+{
-+	int ret;
-+	uint64_t flags_mask;
-+	uint64_t old_kdbus_flags_mask;
-+
-+	/* We need CAP_DAC_OVERRIDE to overwrite the kdbus mask */
-+	ret = test_is_capable(CAP_DAC_OVERRIDE, -1);
-+	ASSERT_RETURN(ret >= 0);
-+
-+	/* no enough privileges, SKIP test */
-+	if (!ret)
-+		return TEST_SKIP;
-+
-+	/*
-+	 * We need to be able to write to
-+	 * "/sys/module/kdbus/parameters/attach_flags_mask"
-+	 * perhaps we are unprvileged/privileged in its userns
-+	 */
-+	ret = access(env->mask_param_path, W_OK);
-+	if (ret < 0) {
-+		kdbus_printf("--- access() '%s' failed: %d (%m)\n",
-+			     env->mask_param_path, -errno);
-+		return TEST_SKIP;
-+	}
-+
-+	ret = kdbus_sysfs_get_parameter_mask(env->mask_param_path,
-+					     &old_kdbus_flags_mask);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/* setup the right KDBUS_TEST_ITEMS_SUM */
-+	if (!config_auditsyscall_is_enabled())
-+		KDBUS_TEST_ITEMS_SUM -= KDBUS_ITEM_AUDIT;
-+
-+	if (!config_cgroups_is_enabled())
-+		KDBUS_TEST_ITEMS_SUM -= KDBUS_ITEM_CGROUP;
-+
-+	if (!config_security_is_enabled())
-+		KDBUS_TEST_ITEMS_SUM -= KDBUS_ITEM_SECLABEL;
-+
-+	/*
-+	 * Test the connection creation attach flags
-+	 */
-+	ret = kdbus_test_peers_creation(env);
-+	/* Restore previous kdbus mask */
-+	kdbus_sysfs_set_parameter_mask(env->mask_param_path,
-+				       old_kdbus_flags_mask);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Test the CONN_INFO attach flags
-+	 */
-+	ret = kdbus_test_peers_info(env);
-+	/* Restore previous kdbus mask */
-+	kdbus_sysfs_set_parameter_mask(env->mask_param_path,
-+				       old_kdbus_flags_mask);
-+	ASSERT_RETURN(ret == 0);
-+
-+	/*
-+	 * Test the Bus creator info and its attach flags
-+	 */
-+	ret = kdbus_test_bus_creator_info(env);
-+	/* Restore previous kdbus mask */
-+	kdbus_sysfs_set_parameter_mask(env->mask_param_path,
-+				       old_kdbus_flags_mask);
-+	ASSERT_RETURN(ret == 0);
-+
-+	ret = kdbus_sysfs_get_parameter_mask(env->mask_param_path,
-+					     &flags_mask);
-+	ASSERT_RETURN(ret == 0 && old_kdbus_flags_mask == flags_mask);
-+
-+	return TEST_OK;
-+}
 diff --git a/tools/testing/selftests/kdbus/test-benchmark.c b/tools/testing/selftests/kdbus/test-benchmark.c
 new file mode 100644
 index 0000000..8a9744b
@@ -28236,10 +27479,10 @@ index 0000000..71a92d8
 +}
 diff --git a/tools/testing/selftests/kdbus/test-connection.c b/tools/testing/selftests/kdbus/test-connection.c
 new file mode 100644
-index 0000000..5c2bf35
+index 0000000..e7c4866
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-connection.c
-@@ -0,0 +1,616 @@
+@@ -0,0 +1,606 @@
 +#include <stdio.h>
 +#include <string.h>
 +#include <fcntl.h>
@@ -28427,13 +27670,10 @@ index 0000000..5c2bf35
 +	int ret;
 +	unsigned int cnt = 0;
 +	uint64_t offset = 0;
-+	uint64_t kdbus_flags_mask;
 +	struct kdbus_info *info;
 +	struct kdbus_conn *conn;
 +	struct kdbus_conn *privileged;
 +	const struct kdbus_item *item;
-+	uint64_t valid_flags_set;
-+	uint64_t invalid_flags_set;
 +	uint64_t valid_flags = KDBUS_ATTACH_NAMES |
 +			       KDBUS_ATTACH_CREDS |
 +			       KDBUS_ATTACH_PIDS |
@@ -28469,13 +27709,6 @@ index 0000000..5c2bf35
 +		.ppid	= getppid(),
 +	};
 +
-+	ret = kdbus_sysfs_get_parameter_mask(env->mask_param_path,
-+					     &kdbus_flags_mask);
-+	ASSERT_RETURN(ret == 0);
-+
-+	valid_flags_set = valid_flags & kdbus_flags_mask;
-+	invalid_flags_set = invalid_flags & kdbus_flags_mask;
-+
 +	ret = kdbus_conn_info(env->conn, env->conn->id, NULL,
 +			      valid_flags, &offset);
 +	ASSERT_RETURN(ret == 0);
@@ -28488,7 +27721,7 @@ index 0000000..5c2bf35
 +	ASSERT_RETURN(item == NULL);
 +
 +	item = kdbus_get_item(info, KDBUS_ITEM_CONN_DESCRIPTION);
-+	if (valid_flags_set & KDBUS_ATTACH_CONN_DESCRIPTION) {
++	if (valid_flags & KDBUS_ATTACH_CONN_DESCRIPTION) {
 +		ASSERT_RETURN(item);
 +	} else {
 +		ASSERT_RETURN(item == NULL);
@@ -28513,7 +27746,7 @@ index 0000000..5c2bf35
 +	ASSERT_RETURN(item == NULL);
 +
 +	cnt = kdbus_count_item(info, KDBUS_ITEM_CREDS);
-+	if (valid_flags_set & KDBUS_ATTACH_CREDS) {
++	if (valid_flags & KDBUS_ATTACH_CREDS) {
 +		ASSERT_RETURN(cnt == 1);
 +
 +		item = kdbus_get_item(info, KDBUS_ITEM_CREDS);
@@ -28527,7 +27760,7 @@ index 0000000..5c2bf35
 +	}
 +
 +	item = kdbus_get_item(info, KDBUS_ITEM_PIDS);
-+	if (valid_flags_set & KDBUS_ATTACH_PIDS) {
++	if (valid_flags & KDBUS_ATTACH_PIDS) {
 +		ASSERT_RETURN(item);
 +
 +		/* Compare item->pids with cached PIDs */
@@ -28554,7 +27787,7 @@ index 0000000..5c2bf35
 +	ASSERT_RETURN(info->id == conn->id);
 +
 +	item = kdbus_get_item(info, KDBUS_ITEM_OWNED_NAME);
-+	if (valid_flags_set & KDBUS_ATTACH_NAMES) {
++	if (valid_flags & KDBUS_ATTACH_NAMES) {
 +		ASSERT_RETURN(item && !strcmp(item->name.name, "com.example.a"));
 +	} else {
 +		ASSERT_RETURN(item == NULL);
@@ -28582,14 +27815,14 @@ index 0000000..5c2bf35
 +		info = (struct kdbus_info *)(conn->buf + offset);
 +		ASSERT_EXIT(info->id == conn->id);
 +
-+		if (valid_flags_set & KDBUS_ATTACH_NAMES) {
++		if (valid_flags & KDBUS_ATTACH_NAMES) {
 +			item = kdbus_get_item(info, KDBUS_ITEM_OWNED_NAME);
 +			ASSERT_EXIT(item &&
 +				    strcmp(item->name.name,
 +				           "com.example.a") == 0);
 +		}
 +
-+		if (valid_flags_set & KDBUS_ATTACH_CREDS) {
++		if (valid_flags & KDBUS_ATTACH_CREDS) {
 +			item = kdbus_get_item(info, KDBUS_ITEM_CREDS);
 +			ASSERT_EXIT(item);
 +
@@ -28598,7 +27831,7 @@ index 0000000..5c2bf35
 +				    sizeof(struct kdbus_creds)) == 0);
 +		}
 +
-+		if (valid_flags_set & KDBUS_ATTACH_PIDS) {
++		if (valid_flags & KDBUS_ATTACH_PIDS) {
 +			item = kdbus_get_item(info, KDBUS_ITEM_PIDS);
 +			ASSERT_EXIT(item);
 +
@@ -28627,7 +27860,7 @@ index 0000000..5c2bf35
 +		 * it points to the cached creds.
 +		 */
 +		cnt = kdbus_count_item(info, KDBUS_ITEM_CREDS);
-+		if (invalid_flags_set & KDBUS_ATTACH_CREDS) {
++		if (invalid_flags & KDBUS_ATTACH_CREDS) {
 +			ASSERT_EXIT(cnt == 1);
 +
 +			item = kdbus_get_item(info, KDBUS_ITEM_CREDS);
@@ -28640,7 +27873,7 @@ index 0000000..5c2bf35
 +			ASSERT_EXIT(cnt == 0);
 +		}
 +
-+		if (invalid_flags_set & KDBUS_ATTACH_PIDS) {
++		if (invalid_flags & KDBUS_ATTACH_PIDS) {
 +			cnt = kdbus_count_item(info, KDBUS_ITEM_PIDS);
 +			ASSERT_EXIT(cnt == 1);
 +
@@ -28653,14 +27886,14 @@ index 0000000..5c2bf35
 +		}
 +
 +		cnt = kdbus_count_item(info, KDBUS_ITEM_CGROUP);
-+		if (invalid_flags_set & KDBUS_ATTACH_CGROUP) {
++		if (invalid_flags & KDBUS_ATTACH_CGROUP) {
 +			ASSERT_EXIT(cnt == 1);
 +		} else {
 +			ASSERT_EXIT(cnt == 0);
 +		}
 +
 +		cnt = kdbus_count_item(info, KDBUS_ITEM_CAPS);
-+		if (invalid_flags_set & KDBUS_ATTACH_CAPS) {
++		if (invalid_flags & KDBUS_ATTACH_CAPS) {
 +			ASSERT_EXIT(cnt == 1);
 +		} else {
 +			ASSERT_EXIT(cnt == 0);
@@ -28684,7 +27917,7 @@ index 0000000..5c2bf35
 +	ASSERT_RETURN(info->id == conn->id);
 +
 +	cnt = kdbus_count_item(info, KDBUS_ITEM_OWNED_NAME);
-+	if (valid_flags_set & KDBUS_ATTACH_NAMES) {
++	if (valid_flags & KDBUS_ATTACH_NAMES) {
 +		ASSERT_RETURN(cnt == 2);
 +	} else {
 +		ASSERT_RETURN(cnt == 0);
@@ -28929,10 +28162,10 @@ index 0000000..8bc2386
 +}
 diff --git a/tools/testing/selftests/kdbus/test-endpoint.c b/tools/testing/selftests/kdbus/test-endpoint.c
 new file mode 100644
-index 0000000..dcc6ab9
+index 0000000..34a7be4
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-endpoint.c
-@@ -0,0 +1,341 @@
+@@ -0,0 +1,352 @@
 +#include <stdio.h>
 +#include <string.h>
 +#include <fcntl.h>
@@ -29190,6 +28423,13 @@ index 0000000..dcc6ab9
 +	ep_conn = kdbus_hello(ep, 0, NULL, 0);
 +	ASSERT_RETURN(ep_conn);
 +
++	/* Check that the reader got the IdAdd notification */
++	ret = kdbus_msg_recv(reader, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_ID_ADD);
++	ASSERT_RETURN(msg->items[0].id_change.id == ep_conn->id);
++	kdbus_msg_free(msg);
++
 +	/*
 +	 * Add a name add match on the endpoint connection, acquire name from
 +	 * the unfiltered connection, and make sure the filtered connection
@@ -29218,7 +28458,7 @@ index 0000000..dcc6ab9
 +	ret = kdbus_conn_info(ep_conn, 0x0fffffffffffffffULL, NULL, 0, NULL);
 +	ASSERT_RETURN(ret == -ENXIO);
 +
-+	/* Check that the reader did not receive anything */
++	/* Check that the reader did not receive the name notification */
 +	ret = kdbus_msg_recv(reader, NULL, NULL);
 +	ASSERT_RETURN(ret == -EAGAIN);
 +
@@ -29230,6 +28470,10 @@ index 0000000..dcc6ab9
 +	ret = kdbus_name_release(env->conn, name);
 +	ASSERT_RETURN(ret == 0);
 +
++	/* Check that the reader did not receive the name notification */
++	ret = kdbus_msg_recv(reader, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
 +	ret = update_endpoint(ep_fd, name);
 +	ASSERT_RETURN(ret == 0);
 +
@@ -31325,10 +30569,10 @@ index 0000000..f1615da
 +}
 diff --git a/tools/testing/selftests/kdbus/test-metadata-ns.c b/tools/testing/selftests/kdbus/test-metadata-ns.c
 new file mode 100644
-index 0000000..2cb1d4d
+index 0000000..ccdfae0
 --- /dev/null
 +++ b/tools/testing/selftests/kdbus/test-metadata-ns.c
-@@ -0,0 +1,506 @@
+@@ -0,0 +1,503 @@
 +/*
 + * Test metadata in new namespaces. Even if our tests can run
 + * in a namespaced setup, this test is necessary so we can inspect
@@ -31612,16 +30856,13 @@ index 0000000..2cb1d4d
 +static int kdbus_clone_userns_test(const char *bus,
 +				   struct kdbus_conn *conn)
 +{
-+	int ret;
-+	int status;
-+	int efd = -1;
++	int ret, status, efd;
 +	pid_t pid, ppid;
-+	uint64_t unpriv_conn_id = 0;
-+	uint64_t userns_conn_id = 0;
++	uint64_t unpriv_conn_id, userns_conn_id;
 +	struct kdbus_msg *msg;
 +	const struct kdbus_item *item;
 +	struct kdbus_pids expected_pids;
-+	struct kdbus_conn *monitor = NULL;
++	struct kdbus_conn *monitor;
 +
 +	kdbus_printf("STARTING TEST 'metadata-ns'.\n");
 +


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-06-26 22:36 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-06-26 22:36 UTC (permalink / raw
  To: gentoo-commits

commit:     20ba937d586c2e0e21e4f9bf37a1a2935c996147
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Fri Jun 26 22:36:56 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Fri Jun 26 22:36:56 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=20ba937d

Add the kernel-level IPC implementation kdbus as an optional patch.

 0000_README              |     4 +
 5015_kdbus-4.1-rc1.patch | 34698 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 34702 insertions(+)

diff --git a/0000_README b/0000_README
index 69f60dc..d33ec2f 100644
--- a/0000_README
+++ b/0000_README
@@ -82,3 +82,7 @@ Desc:   Kernel patch enables gcc < v4.9 optimizations for additional CPUs.
 Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
 From:   https://github.com/graysky2/kernel_gcc_patch/
 Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.
+
+Patch:  5015_kdbus-4.1-rc1.patch
+From:   https://lkml.org
+Desc:   Kernel-level IPC implementation

diff --git a/5015_kdbus-4.1-rc1.patch b/5015_kdbus-4.1-rc1.patch
new file mode 100644
index 0000000..a5169bd
--- /dev/null
+++ b/5015_kdbus-4.1-rc1.patch
@@ -0,0 +1,34698 @@
+diff --git a/Documentation/Makefile b/Documentation/Makefile
+index 6883a1b..5e3fde6 100644
+--- a/Documentation/Makefile
++++ b/Documentation/Makefile
+@@ -1,4 +1,4 @@
+ subdir-y := accounting arm auxdisplay blackfin connector \
+-	filesystems filesystems ia64 laptops mic misc-devices \
++	filesystems filesystems ia64 kdbus laptops mic misc-devices \
+ 	networking pcmcia prctl ptp spi timers vDSO video4linux \
+ 	watchdog
+diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
+index 8136e1f..54e091e 100644
+--- a/Documentation/ioctl/ioctl-number.txt
++++ b/Documentation/ioctl/ioctl-number.txt
+@@ -292,6 +292,7 @@ Code  Seq#(hex)	Include File		Comments
+ 0x92	00-0F	drivers/usb/mon/mon_bin.c
+ 0x93	60-7F	linux/auto_fs.h
+ 0x94	all	fs/btrfs/ioctl.h
++0x95	all	uapi/linux/kdbus.h	kdbus IPC driver
+ 0x97	00-7F	fs/ceph/ioctl.h		Ceph file system
+ 0x99	00-0F				537-Addinboard driver
+ 					<mailto:buk@buks.ipn.de>
+diff --git a/Documentation/kdbus/.gitignore b/Documentation/kdbus/.gitignore
+new file mode 100644
+index 0000000..b4a77cc
+--- /dev/null
++++ b/Documentation/kdbus/.gitignore
+@@ -0,0 +1,2 @@
++*.7
++*.html
+diff --git a/Documentation/kdbus/Makefile b/Documentation/kdbus/Makefile
+new file mode 100644
+index 0000000..af87641
+--- /dev/null
++++ b/Documentation/kdbus/Makefile
+@@ -0,0 +1,40 @@
++DOCS :=	\
++	kdbus.xml		\
++	kdbus.bus.xml		\
++	kdbus.connection.xml	\
++	kdbus.endpoint.xml	\
++	kdbus.fs.xml		\
++	kdbus.item.xml		\
++	kdbus.match.xml		\
++	kdbus.message.xml	\
++	kdbus.name.xml		\
++	kdbus.policy.xml	\
++	kdbus.pool.xml
++
++XMLFILES := $(addprefix $(obj)/,$(DOCS))
++MANFILES := $(patsubst %.xml, %.7, $(XMLFILES))
++HTMLFILES := $(patsubst %.xml, %.html, $(XMLFILES))
++
++XMLTO_ARGS := -m $(srctree)/$(src)/stylesheet.xsl --skip-validation
++
++quiet_cmd_db2man = MAN     $@
++      cmd_db2man = xmlto man $(XMLTO_ARGS) -o $(obj) $<
++%.7: %.xml
++	@(which xmlto > /dev/null 2>&1) || \
++	 (echo "*** You need to install xmlto ***"; \
++	  exit 1)
++	$(call cmd,db2man)
++
++quiet_cmd_db2html = HTML    $@
++      cmd_db2html = xmlto html-nochunks $(XMLTO_ARGS) -o $(obj) $<
++%.html: %.xml
++	@(which xmlto > /dev/null 2>&1) || \
++	 (echo "*** You need to install xmlto ***"; \
++	  exit 1)
++	$(call cmd,db2html)
++
++mandocs: $(MANFILES)
++
++htmldocs: $(HTMLFILES)
++
++clean-files := $(MANFILES) $(HTMLFILES)
+diff --git a/Documentation/kdbus/kdbus.bus.xml b/Documentation/kdbus/kdbus.bus.xml
+new file mode 100644
+index 0000000..4b9a0ac
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.bus.xml
+@@ -0,0 +1,359 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus.bus">
++
++  <refentryinfo>
++    <title>kdbus.bus</title>
++    <productname>kdbus.bus</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus.bus</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus.bus</refname>
++    <refpurpose>kdbus bus</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>Description</title>
++
++    <para>
++      A bus is a resource that is shared between connections in order to
++      transmit messages (see
++      <citerefentry>
++        <refentrytitle>kdbus.message</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>).
++      Each bus is independent, and operations on the bus will not have any
++      effect on other buses. A bus is a management entity that controls the
++      addresses of its connections, their policies and message transactions
++      performed via this bus.
++    </para>
++    <para>
++      Each bus is bound to the mount instance it was created on. It has a
++      custom name that is unique across all buses of a domain. In
++      <citerefentry>
++        <refentrytitle>kdbus.fs</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      a bus is presented as a directory. No operations can be performed on
++      the bus itself; instead you need to perform the operations on an endpoint
++      associated with the bus. Endpoints are accessible as files underneath the
++      bus directory. A default endpoint called <constant>bus</constant> is
++      provided on each bus.
++    </para>
++    <para>
++      Bus names may be chosen freely except for one restriction: the name must
++      be prefixed with the numeric effective UID of the creator and a dash. This
++      is required to avoid namespace clashes between different users. When
++      creating a bus, the name that is passed in must be properly formatted, or
++      the kernel will refuse creation of the bus. Example:
++      <literal>1047-foobar</literal> is an acceptable name for a bus
++      registered by a user with UID 1047. However,
++      <literal>1024-foobar</literal> is not, and neither is
++      <literal>foobar</literal>. The UID must be provided in the
++      user-namespace of the bus owner.
++    </para>
++    <para>
++      To create a new bus, you need to open the control file of a domain and
++      employ the <constant>KDBUS_CMD_BUS_MAKE</constant> ioctl. The control
++      file descriptor that was used to issue
++      <constant>KDBUS_CMD_BUS_MAKE</constant> must not previously have been
++      used for any other control-ioctl and must be kept open for the entire
++      life-time of the created bus. Closing it will immediately cleanup the
++      entire bus and all its associated resources and endpoints. Every control
++      file descriptor can only be used to create a single new bus; from that
++      point on, it is not used for any further communication until the final
++      <citerefentry>
++        <refentrytitle>close</refentrytitle>
++        <manvolnum>2</manvolnum>
++      </citerefentry>
++      .
++    </para>
++    <para>
++      Each bus will generate a random, 128-bit UUID upon creation. This UUID
++      will be returned to creators of connections through
++      <varname>kdbus_cmd_hello.id128</varname> and can be used to uniquely
++      identify buses, even across different machines or containers. The UUID
++      will have its variant bits set to <literal>DCE</literal>, and denote
++      version 4 (random). For more details on UUIDs, see <ulink
++      url="https://en.wikipedia.org/wiki/Universally_unique_identifier">
++      the Wikipedia article on UUIDs</ulink>.
++    </para>
++
++  </refsect1>
++
++  <refsect1>
++    <title>Creating buses</title>
++    <para>
++      To create a new bus, the <constant>KDBUS_CMD_BUS_MAKE</constant>
++      command is used. It takes a <type>struct kdbus_cmd</type> argument.
++    </para>
++    <programlisting>
++struct kdbus_cmd {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>The flags for creation.</para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_MAKE_ACCESS_GROUP</constant></term>
++              <listitem>
++                <para>Make the bus file group-accessible.</para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_MAKE_ACCESS_WORLD</constant></term>
++              <listitem>
++                <para>Make the bus file world-accessible.</para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
++              <listitem>
++                <para>
++                  Requests a set of valid flags for this ioctl. When this bit is
++                  set, no action is taken; the ioctl will return
++                  <errorcode>0</errorcode>, and the <varname>flags</varname>
++                  field will have all bits set that are valid for this command.
++                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
++                  cleared by the operation.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            The following items (see
++            <citerefentry>
++              <refentrytitle>kdbus.item</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>)
++            are expected for <constant>KDBUS_CMD_BUS_MAKE</constant>.
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_MAKE_NAME</constant></term>
++              <listitem>
++                <para>
++                  Contains a null-terminated string that identifies the
++                  bus. The name must be unique across the kdbus domain and
++                  must start with the effective UID of the caller, followed by
++                  a '<literal>-</literal>' (dash). This item is mandatory.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_BLOOM_PARAMETER</constant></term>
++              <listitem>
++                <para>
++                  Bus-wide bloom parameters passed in a
++                  <type>struct kdbus_bloom_parameter</type>. These settings are
++                  copied back to new connections verbatim. This item is
++                  mandatory. See
++                  <citerefentry>
++                    <refentrytitle>kdbus.item</refentrytitle>
++                    <manvolnum>7</manvolnum>
++                  </citerefentry>
++                  for a more detailed description of this item.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_ATTACH_FLAGS_RECV</constant></term>
++              <listitem>
++                <para>
++                  An optional item that contains a set of required attach flags
++                  that connections must allow. This item is used as a
++                  negotiation measure during connection creation. If connections
++                  do not satisfy the bus requirements, they are not allowed on
++                  the bus. If not set, the bus does not require any metadata to
++                  be attached; in this case connections are free to set their
++                  own attach flags.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_ATTACH_FLAGS_SEND</constant></term>
++              <listitem>
++                <para>
++                  An optional item that contains a set of attach flags that are
++                  returned to connections when they query the bus creator
++                  metadata. If not set, no metadata is returned.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
++              <listitem><para>
++                With this item, programs can <emphasis>probe</emphasis> the
++                kernel for known item types. See
++                <citerefentry>
++                  <refentrytitle>kdbus.item</refentrytitle>
++                  <manvolnum>7</manvolnum>
++                </citerefentry>
++                for more details.
++              </para></listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      Unrecognized items are rejected, and the ioctl will fail with
++      <varname>errno</varname> set to <constant>EINVAL</constant>.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Return value</title>
++    <para>
++      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
++      on error, <errorcode>-1</errorcode> is returned, and
++      <varname>errno</varname> is set to indicate the error.
++      If the issued ioctl is illegal for the file descriptor used,
++      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
++    </para>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_BUS_MAKE</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EBADMSG</constant></term>
++          <listitem><para>
++            A mandatory item is missing.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            The flags supplied in the <constant>struct kdbus_cmd</constant>
++            are invalid or the supplied name does not start with the current
++            UID and a '<literal>-</literal>' (dash).
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EEXIST</constant></term>
++          <listitem><para>
++            A bus of that name already exists.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ESHUTDOWN</constant></term>
++          <listitem><para>
++            The kdbus mount instance for the bus was already shut down.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EMFILE</constant></term>
++          <listitem><para>
++            The maximum number of buses for the current user is exhausted.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.connection</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.endpoint</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.fs</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.message</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.pool</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++    </simplelist>
++  </refsect1>
++</refentry>
+diff --git a/Documentation/kdbus/kdbus.connection.xml b/Documentation/kdbus/kdbus.connection.xml
+new file mode 100644
+index 0000000..cefb419
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.connection.xml
+@@ -0,0 +1,1250 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus.connection">
++
++  <refentryinfo>
++    <title>kdbus.connection</title>
++    <productname>kdbus.connection</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus.connection</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus.connection</refname>
++    <refpurpose>kdbus connection</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>Description</title>
++
++    <para>
++      Connections are identified by their <emphasis>connection ID</emphasis>,
++      internally implemented as a <type>uint64_t</type> counter.
++      The IDs of every newly created bus start at <constant>1</constant>, and
++      every new connection will increment the counter by <constant>1</constant>.
++      The IDs are not reused.
++    </para>
++    <para>
++      In higher level tools, the user visible representation of a connection is
++      defined by the D-Bus protocol specification as
++      <constant>":1.&lt;ID&gt;"</constant>.
++    </para>
++    <para>
++      Messages with a specific <type>uint64_t</type> destination ID are
++      directly delivered to the connection with the corresponding ID. Signal
++      messages (see
++      <citerefentry>
++        <refentrytitle>kdbus.message</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>)
++      may be addressed to the special destination ID
++      <constant>KDBUS_DST_ID_BROADCAST</constant> (~0ULL) and will then
++      potentially be delivered to all currently active connections on the bus.
++      However, in order to receive any signal messages, clients must subscribe
++      to them by installing a match (see
++      <citerefentry>
++        <refentrytitle>kdbus.match</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>).
++    </para>
++    <para>
++      Messages synthesized and sent directly by the kernel will carry the
++      special source ID <constant>KDBUS_SRC_ID_KERNEL</constant> (0).
++    </para>
++    <para>
++      In addition to the unique <type>uint64_t</type> connection ID,
++      established connections can request the ownership of
++      <emphasis>well-known names</emphasis>, under which they can be found and
++      addressed by other bus clients. A well-known name is associated with one
++      and only one connection at a time. See
++      <citerefentry>
++        <refentrytitle>kdbus.name</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      on name acquisition, the name registry, and the validity of names.
++    </para>
++    <para>
++      Messages can specify the special destination ID
++      <constant>KDBUS_DST_ID_NAME</constant> (0) and carry a well-known name
++      in the message data. Such a message is delivered to the destination
++      connection which owns that well-known name.
++    </para>
++
++    <programlisting><![CDATA[
++  +-------------------------------------------------------------------------+
++  | +---------------+     +---------------------------+                     |
++  | | Connection    |     | Message                   | -----------------+  |
++  | | :1.22         | --> | src: 22                   |                  |  |
++  | |               |     | dst: 25                   |                  |  |
++  | |               |     |                           |                  |  |
++  | |               |     |                           |                  |  |
++  | |               |     +---------------------------+                  |  |
++  | |               |                                                    |  |
++  | |               | <--------------------------------------+           |  |
++  | +---------------+                                        |           |  |
++  |                                                          |           |  |
++  | +---------------+     +---------------------------+      |           |  |
++  | | Connection    |     | Message                   | -----+           |  |
++  | | :1.25         | --> | src: 25                   |                  |  |
++  | |               |     | dst: 0xffffffffffffffff   | -------------+   |  |
++  | |               |     |  (KDBUS_DST_ID_BROADCAST) |              |   |  |
++  | |               |     |                           | ---------+   |   |  |
++  | |               |     +---------------------------+          |   |   |  |
++  | |               |                                            |   |   |  |
++  | |               | <--------------------------------------------------+  |
++  | +---------------+                                            |   |      |
++  |                                                              |   |      |
++  | +---------------+     +---------------------------+          |   |      |
++  | | Connection    |     | Message                   | --+      |   |      |
++  | | :1.55         | --> | src: 55                   |   |      |   |      |
++  | |               |     | dst: 0 / org.foo.bar      |   |      |   |      |
++  | |               |     |                           |   |      |   |      |
++  | |               |     |                           |   |      |   |      |
++  | |               |     +---------------------------+   |      |   |      |
++  | |               |                                     |      |   |      |
++  | |               | <------------------------------------------+   |      |
++  | +---------------+                                     |          |      |
++  |                                                       |          |      |
++  | +---------------+                                     |          |      |
++  | | Connection    |                                     |          |      |
++  | | :1.81         |                                     |          |      |
++  | | org.foo.bar   |                                     |          |      |
++  | |               |                                     |          |      |
++  | |               |                                     |          |      |
++  | |               | <-----------------------------------+          |      |
++  | |               |                                                |      |
++  | |               | <----------------------------------------------+      |
++  | +---------------+                                                       |
++  +-------------------------------------------------------------------------+
++    ]]></programlisting>
++  </refsect1>
++
++  <refsect1>
++    <title>Privileged connections</title>
++    <para>
++      A connection is considered <emphasis>privileged</emphasis> if the user
++      it was created by is the same that created the bus, or if the creating
++      task had <constant>CAP_IPC_OWNER</constant> set when it called
++      <constant>KDBUS_CMD_HELLO</constant> (see below).
++    </para>
++    <para>
++      Privileged connections have permission to employ certain restricted
++      functions and commands, which are explained below and in other kdbus
++      man-pages.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Activator and policy holder connection</title>
++    <para>
++      An <emphasis>activator</emphasis> connection is a placeholder for a
++      <emphasis>well-known name</emphasis>. Messages sent to such a connection
++      can be used to start an implementer connection, which will then get all
++      the messages from the activator copied over. An activator connection
++      cannot be used to send any message.
++    </para>
++    <para>
++      A <emphasis>policy holder</emphasis> connection only installs a policy
++      for one or more names. These policy entries are kept active as long as
++      the connection is alive, and are removed once it terminates. Such a
++      policy connection type can be used to deploy restrictions for names that
++      are not yet active on the bus. A policy holder connection cannot be used
++      to send any message.
++    </para>
++    <para>
++      The creation of activator or policy holder connections is restricted to
++      privileged users on the bus (see above).
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Monitor connections</title>
++    <para>
++      Monitors are eavesdropping connections that receive all the traffic on the
++      bus, but is invisible to other connections. Such connections have all
++      properties of any other, regular connection, except for the following
++      details:
++    </para>
++
++    <itemizedlist>
++      <listitem><para>
++        They will get every message sent over the bus, both unicasts and
++        broadcasts.
++      </para></listitem>
++
++      <listitem><para>
++        Installing matches for signal messages is neither necessary
++        nor allowed.
++      </para></listitem>
++
++      <listitem><para>
++        They cannot send messages or be directly addressed as receiver.
++      </para></listitem>
++
++      <listitem><para>
++        They cannot own well-known names. Therefore, they also can't operate as
++        activators.
++      </para></listitem>
++
++      <listitem><para>
++        Their creation and destruction will not cause
++        <constant>KDBUS_ITEM_ID_{ADD,REMOVE}</constant> (see
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>).
++      </para></listitem>
++
++      <listitem><para>
++        They are not listed with their unique name in name registry dumps
++        (see <constant>KDBUS_CMD_NAME_LIST</constant> in
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>), so other connections cannot detect the presence of
++	a monitor.
++      </para></listitem>
++    </itemizedlist>
++    <para>
++      The creation of monitor connections is restricted to privileged users on
++      the bus (see above).
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Creating connections</title>
++    <para>
++      A connection to a bus is created by opening an endpoint file (see
++      <citerefentry>
++        <refentrytitle>kdbus.endpoint</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>)
++      of a bus and becoming an active client with the
++      <constant>KDBUS_CMD_HELLO</constant> ioctl. Every connection has a unique
++      identifier on the bus and can address messages to every other connection
++      on the same bus by using the peer's connection ID as the destination.
++    </para>
++    <para>
++      The <constant>KDBUS_CMD_HELLO</constant> ioctl takes a <type>struct
++      kdbus_cmd_hello</type> as argument.
++    </para>
++
++    <programlisting>
++struct kdbus_cmd_hello {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  __u64 attach_flags_send;
++  __u64 attach_flags_recv;
++  __u64 bus_flags;
++  __u64 id;
++  __u64 pool_size;
++  __u64 offset;
++  __u8 id128[16];
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem>
++          <para>Flags to apply to this connection</para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_HELLO_ACCEPT_FD</constant></term>
++              <listitem>
++                <para>
++                  When this flag is set, the connection can be sent file
++                  descriptors as message payload of unicast messages. If it's
++                  not set, an attempt to send file descriptors will result in
++                  <constant>-ECOMM</constant> on the sender's side.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_HELLO_ACTIVATOR</constant></term>
++              <listitem>
++                <para>
++                  Make this connection an activator (see above). With this bit
++                  set, an item of type <constant>KDBUS_ITEM_NAME</constant> has
++                  to be attached. This item describes the well-known name this
++                  connection should be an activator for.
++                  A connection can not be an activator and a policy holder at
++                  the same time time, so this bit is not allowed together with
++                  <constant>KDBUS_HELLO_POLICY_HOLDER</constant>.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_HELLO_POLICY_HOLDER</constant></term>
++              <listitem>
++                <para>
++                  Make this connection a policy holder (see above). With this
++                  bit set, an item of type <constant>KDBUS_ITEM_NAME</constant>
++                  has to be attached. This item describes the well-known name
++                  this connection should hold a policy for.
++                  A connection can not be an activator and a policy holder at
++                  the same time time, so this bit is not allowed together with
++                  <constant>KDBUS_HELLO_ACTIVATOR</constant>.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_HELLO_MONITOR</constant></term>
++              <listitem>
++                <para>
++                  Make this connection a monitor connection (see above).
++                </para>
++                <para>
++                  This flag can only be set by privileged bus connections. See
++                  below for more information.
++                  A connection can not be monitor and an activator or a policy
++                  holder at the same time time, so this bit is not allowed
++                  together with <constant>KDBUS_HELLO_ACTIVATOR</constant> or
++                  <constant>KDBUS_HELLO_POLICY_HOLDER</constant>.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
++              <listitem>
++                <para>
++                  Requests a set of valid flags for this ioctl. When this bit is
++                  set, no action is taken; the ioctl will return
++                  <errorcode>0</errorcode>, and the <varname>flags</varname>
++                  field will have all bits set that are valid for this command.
++                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
++                  cleared by the operation.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>attach_flags_send</varname></term>
++        <listitem><para>
++          Set the bits for metadata this connection permits to be sent to the
++          receiving peer. Only metadata items that are both allowed to be sent
++          by the sender and that are requested by the receiver will be attached
++          to the message. Note, however, that the bus may optionally require
++          some of those bits to be set. If the match fails, the ioctl will fail
++          with <varname>errno</varname> set to
++          <constant>ECONNREFUSED</constant>. In either case, when returning the
++          field will be set to the mask of metadata items that are enforced by
++          the bus with the <constant>KDBUS_FLAGS_KERNEL</constant> bit set as
++          well.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>attach_flags_recv</varname></term>
++        <listitem><para>
++          Request the attachment of metadata for each message received by this
++          connection. See
++          <citerefentry>
++            <refentrytitle>kdbus</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>
++          for information about metadata, and
++          <citerefentry>
++            <refentrytitle>kdbus.item</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>
++          regarding items in general.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>bus_flags</varname></term>
++        <listitem><para>
++          Upon successful completion of the ioctl, this member will contain the
++          flags of the bus it connected to.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>id</varname></term>
++        <listitem><para>
++          Upon successful completion of the command, this member will contain
++          the numerical ID of the new connection.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>pool_size</varname></term>
++        <listitem><para>
++          The size of the communication pool, in bytes. The pool can be
++          accessed by calling
++          <citerefentry>
++            <refentrytitle>mmap</refentrytitle>
++            <manvolnum>2</manvolnum>
++          </citerefentry>
++          on the file descriptor that was used to issue the
++          <constant>KDBUS_CMD_HELLO</constant> ioctl.
++          The pool size of a connection must be greater than
++          <constant>0</constant> and a multiple of
++          <constant>PAGE_SIZE</constant>. See
++          <citerefentry>
++            <refentrytitle>kdbus.pool</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>
++          for more information.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>offset</varname></term>
++        <listitem><para>
++          The kernel will return the offset in the pool where returned details
++          will be stored. See below.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>id128</varname></term>
++        <listitem><para>
++          Upon successful completion of the ioctl, this member will contain the
++          <emphasis>128-bit UUID</emphasis> of the connected bus.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            Variable list of items containing optional additional information.
++            The following items are currently expected/valid:
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_CONN_DESCRIPTION</constant></term>
++              <listitem>
++                <para>
++                  Contains a string that describes this connection, so it can
++                  be identified later.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NAME</constant></term>
++              <term><constant>KDBUS_ITEM_POLICY_ACCESS</constant></term>
++              <listitem>
++                <para>
++                  For activators and policy holders only, combinations of
++                  these two items describe policy access entries. See
++                  <citerefentry>
++                    <refentrytitle>kdbus.policy</refentrytitle>
++                    <manvolnum>7</manvolnum>
++                  </citerefentry>
++                  for further details.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_CREDS</constant></term>
++              <term><constant>KDBUS_ITEM_PIDS</constant></term>
++              <term><constant>KDBUS_ITEM_SECLABEL</constant></term>
++              <listitem>
++                <para>
++                  Privileged bus users may submit these types in order to
++                  create connections with faked credentials. This information
++                  will be returned when peer information is queried by
++                  <constant>KDBUS_CMD_CONN_INFO</constant>. See below for more
++                  information on retrieving information on connections.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
++              <listitem><para>
++                With this item, programs can <emphasis>probe</emphasis> the
++                kernel for known item types. See
++                <citerefentry>
++                  <refentrytitle>kdbus.item</refentrytitle>
++                  <manvolnum>7</manvolnum>
++                </citerefentry>
++                for more details.
++              </para></listitem>
++            </varlistentry>
++          </variablelist>
++
++          <para>
++            Unrecognized items are rejected, and the ioctl will fail with
++            <varname>errno</varname> set to <constant>EINVAL</constant>.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      At the offset returned in the <varname>offset</varname> field of
++      <type>struct kdbus_cmd_hello</type>, the kernel will store items
++      of the following types:
++    </para>
++
++    <variablelist>
++      <varlistentry>
++        <term><constant>KDBUS_ITEM_BLOOM_PARAMETER</constant></term>
++        <listitem>
++          <para>
++            Bloom filter parameter as defined by the bus creator.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      The offset in the pool has to be freed with the
++      <constant>KDBUS_CMD_FREE</constant> ioctl. See
++      <citerefentry>
++        <refentrytitle>kdbus.pool</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for further information.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Retrieving information on a connection</title>
++    <para>
++      The <constant>KDBUS_CMD_CONN_INFO</constant> ioctl can be used to
++      retrieve credentials and properties of the initial creator of a
++      connection. This ioctl uses the following struct.
++    </para>
++
++    <programlisting>
++struct kdbus_cmd_info {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  __u64 id;
++  __u64 attach_flags;
++  __u64 offset;
++  __u64 info_size;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>
++          Currently, no flags are supported.
++          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
++          valid flags. If set, the ioctl will return <errorcode>0</errorcode>,
++          and the <varname>flags</varname> field is set to
++          <constant>0</constant>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>id</varname></term>
++        <listitem><para>
++          The numerical ID of the connection for which information is to be
++          retrieved. If set to a non-zero value, the
++          <constant>KDBUS_ITEM_OWNED_NAME</constant> item is ignored.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>attach_flags</varname></term>
++        <listitem><para>
++          Specifies which metadata items should be attached to the answer. See
++          <citerefentry>
++            <refentrytitle>kdbus.message</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>offset</varname></term>
++        <listitem><para>
++          When the ioctl returns, this field will contain the offset of the
++          connection information inside the caller's pool. See
++          <citerefentry>
++            <refentrytitle>kdbus.pool</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>
++          for further information.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>info_size</varname></term>
++        <listitem><para>
++          The kernel will return the size of the returned information, so
++          applications can optionally
++          <citerefentry>
++            <refentrytitle>mmap</refentrytitle>
++            <manvolnum>2</manvolnum>
++          </citerefentry>
++          specific parts of the pool. See
++          <citerefentry>
++            <refentrytitle>kdbus.pool</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>
++          for further information.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            The following items are expected for
++            <constant>KDBUS_CMD_CONN_INFO</constant>.
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_OWNED_NAME</constant></term>
++              <listitem>
++                <para>
++                  Contains the well-known name of the connection to look up as.
++                  This item is mandatory if the <varname>id</varname> field is
++                  set to 0.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
++              <listitem><para>
++                With this item, programs can <emphasis>probe</emphasis> the
++                kernel for known item types. See
++                <citerefentry>
++                  <refentrytitle>kdbus.item</refentrytitle>
++                  <manvolnum>7</manvolnum>
++                </citerefentry>
++                for more details.
++              </para></listitem>
++            </varlistentry>
++          </variablelist>
++          <para>
++            Unrecognized items are rejected, and the ioctl will fail with
++            <varname>errno</varname> set to <constant>EINVAL</constant>.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      When the ioctl returns, the following struct will be stored in the
++      caller's pool at <varname>offset</varname>. The fields in this struct
++      are described below.
++    </para>
++
++    <programlisting>
++struct kdbus_info {
++  __u64 size;
++  __u64 id;
++  __u64 flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>id</varname></term>
++        <listitem><para>
++          The connection's unique ID.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>
++          The connection's flags as specified when it was created.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            Depending on the <varname>flags</varname> field in
++            <type>struct kdbus_cmd_info</type>, items of types
++            <constant>KDBUS_ITEM_OWNED_NAME</constant> and
++            <constant>KDBUS_ITEM_CONN_DESCRIPTION</constant> may follow here.
++            <constant>KDBUS_ITEM_NEGOTIATE</constant> is also allowed.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      Once the caller is finished with parsing the return buffer, it needs to
++      employ the <constant>KDBUS_CMD_FREE</constant> command for the offset, in
++      order to free the buffer part. See
++      <citerefentry>
++        <refentrytitle>kdbus.pool</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for further information.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Getting information about a connection's bus creator</title>
++    <para>
++      The <constant>KDBUS_CMD_BUS_CREATOR_INFO</constant> ioctl takes the same
++      struct as <constant>KDBUS_CMD_CONN_INFO</constant>, but is used to
++      retrieve information about the creator of the bus the connection is
++      attached to. The metadata returned by this call is collected during the
++      creation of the bus and is never altered afterwards, so it provides
++      pristine information on the task that created the bus, at the moment when
++      it did so.
++    </para>
++    <para>
++      In response to this call, a slice in the connection's pool is allocated
++      and filled with an object of type <type>struct kdbus_info</type>,
++      pointed to by the ioctl's <varname>offset</varname> field.
++    </para>
++
++    <programlisting>
++struct kdbus_info {
++  __u64 size;
++  __u64 id;
++  __u64 flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>id</varname></term>
++        <listitem><para>
++          The bus ID.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>
++          The bus flags as specified when it was created.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            Metadata information is stored in items here. The item list
++            contains a <constant>KDBUS_ITEM_MAKE_NAME</constant> item that
++            indicates the bus name of the calling connection.
++            <constant>KDBUS_ITEM_NEGOTIATE</constant> is allowed to probe
++            for known item types.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      Once the caller is finished with parsing the return buffer, it needs to
++      employ the <constant>KDBUS_CMD_FREE</constant> command for the offset, in
++      order to free the buffer part. See
++      <citerefentry>
++        <refentrytitle>kdbus.pool</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for further information.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Updating connection details</title>
++    <para>
++      Some of a connection's details can be updated with the
++      <constant>KDBUS_CMD_CONN_UPDATE</constant> ioctl, using the file
++      descriptor that was used to create the connection. The update command
++      uses the following struct.
++    </para>
++
++    <programlisting>
++struct kdbus_cmd {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>
++          Currently, no flags are supported.
++          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
++          valid flags. If set, the ioctl will return <errorcode>0</errorcode>,
++          and the <varname>flags</varname> field is set to
++          <constant>0</constant>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            Items to describe the connection details to be updated. The
++            following item types are supported.
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_ATTACH_FLAGS_SEND</constant></term>
++              <listitem>
++                <para>
++                  Supply a new set of metadata items that this connection
++                  permits to be sent along with messages.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_ATTACH_FLAGS_RECV</constant></term>
++              <listitem>
++                <para>
++                  Supply a new set of metadata items that this connection
++                  requests to be attached to each message.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NAME</constant></term>
++              <term><constant>KDBUS_ITEM_POLICY_ACCESS</constant></term>
++              <listitem>
++                <para>
++                  Policy holder connections may supply a new set of policy
++                  information with these items. For other connection types,
++                  <constant>EOPNOTSUPP</constant> is returned in
++                  <varname>errno</varname>.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
++              <listitem><para>
++                With this item, programs can <emphasis>probe</emphasis> the
++                kernel for known item types. See
++                <citerefentry>
++                  <refentrytitle>kdbus.item</refentrytitle>
++                  <manvolnum>7</manvolnum>
++                </citerefentry>
++                for more details.
++              </para></listitem>
++            </varlistentry>
++          </variablelist>
++
++          <para>
++            Unrecognized items are rejected, and the ioctl will fail with
++            <varname>errno</varname> set to <constant>EINVAL</constant>.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++  </refsect1>
++
++  <refsect1>
++    <title>Termination of connections</title>
++    <para>
++      A connection can be terminated by simply calling
++      <citerefentry>
++        <refentrytitle>close</refentrytitle>
++        <manvolnum>2</manvolnum>
++      </citerefentry>
++      on its file descriptor. All pending incoming messages will be discarded,
++      and the memory allocated by the pool will be freed.
++    </para>
++
++    <para>
++      An alternative way of closing down a connection is via the
++      <constant>KDBUS_CMD_BYEBYE</constant> ioctl. This ioctl will succeed only
++      if the message queue of the connection is empty at the time of closing;
++      otherwise, the ioctl will fail with <varname>errno</varname> set to
++      <constant>EBUSY</constant>. When this ioctl returns
++      successfully, the connection has been terminated and won't accept any new
++      messages from remote peers. This way, a connection can be terminated
++      race-free, without losing any messages. The ioctl takes an argument of
++      type <type>struct kdbus_cmd</type>.
++    </para>
++
++    <programlisting>
++struct kdbus_cmd {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>
++          Currently, no flags are supported.
++          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
++          valid flags. If set, the ioctl will fail with
++          <varname>errno</varname> set to <constant>EPROTO</constant>, and
++          the <varname>flags</varname> field is set to <constant>0</constant>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            The following item types are supported.
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
++              <listitem><para>
++                With this item, programs can <emphasis>probe</emphasis> the
++                kernel for known item types. See
++                <citerefentry>
++                  <refentrytitle>kdbus.item</refentrytitle>
++                  <manvolnum>7</manvolnum>
++                </citerefentry>
++                for more details.
++              </para></listitem>
++            </varlistentry>
++          </variablelist>
++
++          <para>
++            Unrecognized items are rejected, and the ioctl will fail with
++            <varname>errno</varname> set to <constant>EINVAL</constant>.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++  </refsect1>
++
++  <refsect1>
++    <title>Return value</title>
++    <para>
++      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
++      on error, <errorcode>-1</errorcode> is returned, and
++      <varname>errno</varname> is set to indicate the error.
++      If the issued ioctl is illegal for the file descriptor used,
++      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
++    </para>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_HELLO</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EFAULT</constant></term>
++          <listitem><para>
++            The supplied pool size was 0 or not a multiple of the page size.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            The flags supplied in <type>struct kdbus_cmd_hello</type>
++            are invalid.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            An illegal combination of
++            <constant>KDBUS_HELLO_MONITOR</constant>,
++            <constant>KDBUS_HELLO_ACTIVATOR</constant> and
++            <constant>KDBUS_HELLO_POLICY_HOLDER</constant> was passed in
++            <varname>flags</varname>.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            An invalid set of items was supplied.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ECONNREFUSED</constant></term>
++          <listitem><para>
++            The attach_flags_send field did not satisfy the requirements of
++            the bus.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EPERM</constant></term>
++          <listitem><para>
++            A <constant>KDBUS_ITEM_CREDS</constant> items was supplied, but the
++            current user is not privileged.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ESHUTDOWN</constant></term>
++          <listitem><para>
++            The bus you were trying to connect to has already been shut down.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EMFILE</constant></term>
++          <listitem><para>
++            The maximum number of connections on the bus has been reached.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EOPNOTSUPP</constant></term>
++          <listitem><para>
++            The endpoint does not support the connection flags supplied in
++            <type>struct kdbus_cmd_hello</type>.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_BYEBYE</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EALREADY</constant></term>
++          <listitem><para>
++            The connection has already been shut down.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EBUSY</constant></term>
++          <listitem><para>
++            There are still messages queued up in the connection's pool.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_CONN_INFO</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Invalid flags, or neither an ID nor a name was provided, or the
++            name is invalid.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ESRCH</constant></term>
++          <listitem><para>
++            Connection lookup by name failed.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ENXIO</constant></term>
++          <listitem><para>
++            No connection with the provided connection ID found.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_CONN_UPDATE</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Illegal flags or items.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Wildcards submitted in policy entries, or illegal sequence
++            of policy items.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EOPNOTSUPP</constant></term>
++          <listitem><para>
++            Operation not supported by connection.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>E2BIG</constant></term>
++          <listitem><para>
++            Too many policy items attached.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.endpoint</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.message</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.policy</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.pool</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++    </simplelist>
++  </refsect1>
++</refentry>
+diff --git a/Documentation/kdbus/kdbus.endpoint.xml b/Documentation/kdbus/kdbus.endpoint.xml
+new file mode 100644
+index 0000000..6632485
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.endpoint.xml
+@@ -0,0 +1,429 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus.endpoint">
++
++  <refentryinfo>
++    <title>kdbus.endpoint</title>
++    <productname>kdbus.endpoint</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus.endpoint</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus.endpoint</refname>
++    <refpurpose>kdbus endpoint</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>Description</title>
++
++    <para>
++      Endpoints are entry points to a bus (see
++      <citerefentry>
++        <refentrytitle>kdbus.bus</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>).
++      By default, each bus has a default
++      endpoint called 'bus'. The bus owner has the ability to create custom
++      endpoints with specific names, permissions, and policy databases
++      (see below). An endpoint is presented as file underneath the directory
++      of the parent bus.
++    </para>
++    <para>
++      To create a custom endpoint, open the default endpoint
++      (<literal>bus</literal>) and use the
++      <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> ioctl with
++      <type>struct kdbus_cmd</type>. Custom endpoints always have a policy
++      database that, by default, forbids any operation. You have to explicitly
++      install policy entries to allow any operation on this endpoint.
++    </para>
++    <para>
++      Once <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> succeeded, the new
++      endpoint will appear in the filesystem
++      (<citerefentry>
++        <refentrytitle>kdbus.bus</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>), and the used file descriptor will manage the
++      newly created endpoint resource. It cannot be used to manage further
++      resources and must be kept open as long as the endpoint is needed. The
++      endpoint will be terminated as soon as the file descriptor is closed.
++    </para>
++    <para>
++      Endpoint names may be chosen freely except for one restriction: the name
++      must be prefixed with the numeric effective UID of the creator and a dash.
++      This is required to avoid namespace clashes between different users. When
++      creating an endpoint, the name that is passed in must be properly
++      formatted or the kernel will refuse creation of the endpoint. Example:
++      <literal>1047-my-endpoint</literal> is an acceptable name for an
++      endpoint registered by a user with UID 1047. However,
++      <literal>1024-my-endpoint</literal> is not, and neither is
++      <literal>my-endpoint</literal>. The UID must be provided in the
++      user-namespace of the bus.
++    </para>
++    <para>
++      To create connections to a bus, use <constant>KDBUS_CMD_HELLO</constant>
++      on a file descriptor returned by <function>open()</function> on an
++      endpoint node. See
++      <citerefentry>
++        <refentrytitle>kdbus.connection</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for further details.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Creating custom endpoints</title>
++    <para>
++      To create a new endpoint, the
++      <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> command is used. Along with
++      the endpoint's name, which will be used to expose the endpoint in the
++      <citerefentry>
++        <refentrytitle>kdbus.fs</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>,
++      the command also optionally takes items to set up the endpoint's
++      <citerefentry>
++        <refentrytitle>kdbus.policy</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>.
++      <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> takes a
++      <type>struct kdbus_cmd</type> argument.
++    </para>
++    <programlisting>
++struct kdbus_cmd {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>The flags for creation.</para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_MAKE_ACCESS_GROUP</constant></term>
++              <listitem>
++                <para>Make the endpoint file group-accessible.</para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_MAKE_ACCESS_WORLD</constant></term>
++              <listitem>
++                <para>Make the endpoint file world-accessible.</para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
++              <listitem>
++                <para>
++                  Requests a set of valid flags for this ioctl. When this bit is
++                  set, no action is taken; the ioctl will return
++                  <errorcode>0</errorcode>, and the <varname>flags</varname>
++                  field will have all bits set that are valid for this command.
++                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
++                  cleared by the operation.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            The following items are expected for
++            <constant>KDBUS_CMD_ENDPOINT_MAKE</constant>.
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_MAKE_NAME</constant></term>
++              <listitem>
++                <para>Contains a string to identify the endpoint name.</para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NAME</constant></term>
++              <term><constant>KDBUS_ITEM_POLICY_ACCESS</constant></term>
++              <listitem>
++                <para>
++                  These items are used to set the policy attached to the
++                  endpoint. For more details on bus and endpoint policies, see
++                  <citerefentry>
++                    <refentrytitle>kdbus.policy</refentrytitle>
++                    <manvolnum>7</manvolnum>
++                  </citerefentry>.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++          <para>
++            Unrecognized items are rejected, and the ioctl will fail with
++            <varname>errno</varname> set to <varname>EINVAL</varname>.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++  </refsect1>
++
++  <refsect1>
++    <title>Updating endpoints</title>
++    <para>
++      To update an existing endpoint, the
++      <constant>KDBUS_CMD_ENDPOINT_UPDATE</constant> command is used on the file
++      descriptor that was used to create the endpoint, using
++      <constant>KDBUS_CMD_ENDPOINT_MAKE</constant>. The only relevant detail of
++      the endpoint that can be updated is the policy. When the command is
++      employed, the policy of the endpoint is <emphasis>replaced</emphasis>
++      atomically with the new set of rules.
++      The command takes a <type>struct kdbus_cmd</type> argument.
++    </para>
++    <programlisting>
++struct kdbus_cmd {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>
++          Unused for this command.
++          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
++          valid flags. If set, the ioctl will return <errorcode>0</errorcode>,
++          and the <varname>flags</varname> field is set to
++          <constant>0</constant>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            The following items are expected for
++            <constant>KDBUS_CMD_ENDPOINT_UPDATE</constant>.
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NAME</constant></term>
++              <term><constant>KDBUS_ITEM_POLICY_ACCESS</constant></term>
++              <listitem>
++                <para>
++                  These items are used to set the policy attached to the
++                  endpoint. For more details on bus and endpoint policies, see
++                  <citerefentry>
++                    <refentrytitle>kdbus.policy</refentrytitle>
++                    <manvolnum>7</manvolnum>
++                  </citerefentry>.
++                  Existing policy is atomically replaced with the new rules
++                  provided.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
++              <listitem><para>
++                With this item, programs can <emphasis>probe</emphasis> the
++                kernel for known item types. See
++                <citerefentry>
++                  <refentrytitle>kdbus.item</refentrytitle>
++                  <manvolnum>7</manvolnum>
++                </citerefentry>
++                for more details.
++              </para></listitem>
++            </varlistentry>
++          </variablelist>
++          <para>
++            Unrecognized items are rejected, and the ioctl will fail with
++            <varname>errno</varname> set to <constant>EINVAL</constant>.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++  </refsect1>
++
++  <refsect1>
++    <title>Return value</title>
++    <para>
++      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
++      on error, <errorcode>-1</errorcode> is returned, and
++      <varname>errno</varname> is set to indicate the error.
++      If the issued ioctl is illegal for the file descriptor used,
++      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
++    </para>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> may fail with the
++        following errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            The flags supplied in the <type>struct kdbus_cmd</type>
++            are invalid.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Illegal combination of <constant>KDBUS_ITEM_NAME</constant> and
++            <constant>KDBUS_ITEM_POLICY_ACCESS</constant> was provided.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EEXIST</constant></term>
++          <listitem><para>
++            An endpoint of that name already exists.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EPERM</constant></term>
++          <listitem><para>
++            The calling user is not privileged. See
++            <citerefentry>
++              <refentrytitle>kdbus</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for information about privileged users.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_ENDPOINT_UPDATE</constant> may fail with the
++        following errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            The flags supplied in <type>struct kdbus_cmd</type>
++            are invalid.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Illegal combination of <constant>KDBUS_ITEM_NAME</constant> and
++            <constant>KDBUS_ITEM_POLICY_ACCESS</constant> was provided.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.endpoint</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.fs</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.message</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.pool</refentrytitle>
++           <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++    </simplelist>
++  </refsect1>
++</refentry>
+diff --git a/Documentation/kdbus/kdbus.fs.xml b/Documentation/kdbus/kdbus.fs.xml
+new file mode 100644
+index 0000000..8c2a90e
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.fs.xml
+@@ -0,0 +1,124 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus_fs">
++
++  <refentryinfo>
++    <title>kdbus.fs</title>
++    <productname>kdbus.fs</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus.fs</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus.fs</refname>
++    <refpurpose>kdbus file system</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>File-system Layout</title>
++
++    <para>
++      The <emphasis>kdbusfs</emphasis> pseudo filesystem provides access to
++      kdbus entities, such as <emphasis>buses</emphasis> and
++      <emphasis>endpoints</emphasis>. Each time the filesystem is mounted,
++      a new, isolated kdbus instance is created, which is independent from the
++      other instances.
++    </para>
++    <para>
++      The system-wide standard mount point for <emphasis>kdbusfs</emphasis> is
++      <constant>/sys/fs/kdbus</constant>.
++    </para>
++
++    <para>
++      Buses are represented as directories in the file system layout, whereas
++      endpoints are exposed as files inside these directories. At the top-level,
++      a <emphasis>control</emphasis> node is present, which can be opened to
++      create new buses via the <constant>KDBUS_CMD_BUS_MAKE</constant> ioctl.
++      Each <emphasis>bus</emphasis> shows a default endpoint called
++      <varname>bus</varname>, which can be opened to either create a connection
++      with the <constant>KDBUS_CMD_HELLO</constant> ioctl, or to create new
++      custom endpoints for the bus with
++      <constant>KDBUS_CMD_ENDPOINT_MAKE</constant>. See
++      <citerefentry>
++        <refentrytitle>kdbus.bus</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>,
++      <citerefentry>
++        <refentrytitle>kdbus.connection</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry> and
++      <citerefentry>
++        <refentrytitle>kdbus.endpoint</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more details.
++    </para>
++
++    <para>Following, you can see an example layout of the
++    <emphasis>kdbusfs</emphasis> filesystem:</para>
++
++<programlisting>
++        /sys/fs/kdbus/                          ; mount-point
++        |-- 0-system                            ; bus directory
++        |   |-- bus                             ; default endpoint
++        |   `-- 1017-custom                     ; custom endpoint
++        |-- 1000-user                           ; bus directory
++        |   |-- bus                             ; default endpoint
++        |   |-- 1000-service-A                  ; custom endpoint
++        |   `-- 1000-service-B                  ; custom endpoint
++        `-- control                             ; control file
++</programlisting>
++  </refsect1>
++
++  <refsect1>
++    <title>Mounting instances</title>
++    <para>
++      In order to get a new and separate kdbus environment, a new instance
++      of <emphasis>kdbusfs</emphasis> can be mounted like this:
++    </para>
++<programlisting>
++  # mount -t kdbusfs kdbusfs /tmp/new_kdbus/
++</programlisting>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.connection</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.endpoint</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>mount</refentrytitle>
++          <manvolnum>8</manvolnum>
++        </citerefentry>
++      </member>
++    </simplelist>
++  </refsect1>
++</refentry>
+diff --git a/Documentation/kdbus/kdbus.item.xml b/Documentation/kdbus/kdbus.item.xml
+new file mode 100644
+index 0000000..09f8b90
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.item.xml
+@@ -0,0 +1,839 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus">
++
++  <refentryinfo>
++    <title>kdbus.item</title>
++    <productname>kdbus item</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus.item</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus.item</refname>
++    <refpurpose>kdbus item structure, layout and usage</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>Description</title>
++
++    <para>
++      To flexibly augment transport structures, data blobs of type
++      <type>struct kdbus_item</type> can be attached to the structs passed
++      into the ioctls. Some ioctls make items of certain types mandatory,
++      others are optional. Items that are unsupported by ioctls they are
++      attached to will cause the ioctl to fail with <varname>errno</varname>
++      set to <constant>EINVAL</constant>.
++      Items are also used for information stored in a connection's
++      <emphasis>pool</emphasis>, such as received messages, name lists or
++      requested connection or bus owner information. Depending on the type of
++      an item, its total size is either fixed or variable.
++    </para>
++
++    <refsect2>
++      <title>Chaining items</title>
++      <para>
++        Whenever items are used as part of the kdbus kernel API, they are
++        embedded in structs that are embedded inside structs that themselves
++        include a size field containing the overall size of the structure.
++        This allows multiple items to be chained up, and an item iterator
++        (see below) is capable of detecting the end of an item chain.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Alignment</title>
++      <para>
++        The kernel expects all items to be aligned to 8-byte boundaries.
++        Unaligned items will cause the ioctl they are used with to fail
++        with <varname>errno</varname> set to <constant>EINVAL</constant>.
++        An item that has an unaligned size itself hence needs to be padded
++        if it is followed by another item.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Iterating items</title>
++      <para>
++        A simple iterator would iterate over the items until the items have
++        reached the embedding structure's overall size. An example
++        implementation is shown below.
++      </para>
++
++      <programlisting><![CDATA[
++#define KDBUS_ALIGN8(val) (((val) + 7) & ~7)
++
++#define KDBUS_ITEM_NEXT(item) \
++    (typeof(item))(((uint8_t *)item) + KDBUS_ALIGN8((item)->size))
++
++#define KDBUS_ITEM_FOREACH(item, head, first)                      \
++    for (item = (head)->first;                                     \
++         ((uint8_t *)(item) < (uint8_t *)(head) + (head)->size) && \
++          ((uint8_t *)(item) >= (uint8_t *)(head));                \
++         item = KDBUS_ITEM_NEXT(item))
++      ]]></programlisting>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>Item layout</title>
++    <para>
++      A <type>struct kdbus_item</type> consists of a
++      <varname>size</varname> field, describing its overall size, and a
++      <varname>type</varname> field, both 64 bit wide. They are followed by
++      a union to store information that is specific to the item's type.
++      The struct layout is shown below.
++    </para>
++
++    <programlisting>
++struct kdbus_item {
++  __u64 size;
++  __u64 type;
++  /* item payload - see below */
++  union {
++    __u8 data[0];
++    __u32 data32[0];
++    __u64 data64[0];
++    char str[0];
++
++    __u64 id;
++    struct kdbus_vec vec;
++    struct kdbus_creds creds;
++    struct kdbus_pids pids;
++    struct kdbus_audit audit;
++    struct kdbus_caps caps;
++    struct kdbus_timestamp timestamp;
++    struct kdbus_name name;
++    struct kdbus_bloom_parameter bloom_parameter;
++    struct kdbus_bloom_filter bloom_filter;
++    struct kdbus_memfd memfd;
++    int fds[0];
++    struct kdbus_notify_name_change name_change;
++    struct kdbus_notify_id_change id_change;
++    struct kdbus_policy_access policy_access;
++  };
++};
++    </programlisting>
++
++    <para>
++      <type>struct kdbus_item</type> should never be used to allocate
++      an item instance, as its size may grow in future releases of the API.
++      Instead, it should be manually assembled by storing the
++      <varname>size</varname>, <varname>type</varname> and payload to a
++      struct of its own.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Item types</title>
++
++    <refsect2>
++      <title>Negotiation item</title>
++      <variablelist>
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
++          <listitem><para>
++            With this item is attached to any ioctl, programs can
++            <emphasis>probe</emphasis> the kernel for known item types.
++            The item carries an array of <type>uint64_t</type> values in
++            <varname>item.data64</varname>, each set to an item type to
++            probe. The kernel will reset each member of this array that is
++            not recognized as valid item type to <constant>0</constant>.
++            This way, users can negotiate kernel features at start-up to
++            keep newer userspace compatible with older kernels. This item
++            is never attached by the kernel in response to any command.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>Command specific items</title>
++      <variablelist>
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_PAYLOAD_VEC</constant></term>
++          <term><constant>KDBUS_ITEM_PAYLOAD_OFF</constant></term>
++          <listitem><para>
++            Messages are directly copied by the sending process into the
++            receiver's
++            <citerefentry>
++              <refentrytitle>kdbus.pool</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++            This way, two peers can exchange data by effectively doing a
++            single-copy from one process to another; the kernel will not buffer
++            the data anywhere else. <constant>KDBUS_ITEM_PAYLOAD_VEC</constant>
++            is used when <emphasis>sending</emphasis> message. The item
++            references a memory address when the payload data can be found.
++            <constant>KDBUS_ITEM_PAYLOAD_OFF</constant> is used when messages
++            are <emphasis>received</emphasis>, and the
++            <constant>offset</constant> value describes the offset inside the
++            receiving connection's
++            <citerefentry>
++              <refentrytitle>kdbus.pool</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            where the message payload can be found. See
++            <citerefentry>
++              <refentrytitle>kdbus.message</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on passing of payload data along with a
++            message.
++            <programlisting>
++struct kdbus_vec {
++  __u64 size;
++  union {
++    __u64 address;
++    __u64 offset;
++  };
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant></term>
++          <listitem><para>
++            Transports a file descriptor of a <emphasis>memfd</emphasis> in
++            <type>struct kdbus_memfd</type> in <varname>item.memfd</varname>.
++            The <varname>size</varname> field has to match the actual size of
++            the memfd that was specified when it was created. The
++            <varname>start</varname> parameter denotes the offset inside the
++            memfd at which the referenced payload starts. See
++            <citerefentry>
++              <refentrytitle>kdbus.message</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on passing of payload data along with a
++            message.
++            <programlisting>
++struct kdbus_memfd {
++  __u64 start;
++  __u64 size;
++  int fd;
++  __u32 __pad;
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_FDS</constant></term>
++          <listitem><para>
++            Contains an array of <emphasis>file descriptors</emphasis>.
++            When used with <constant>KDBUS_CMD_SEND</constant>, the values of
++            this array must be filled with valid file descriptor numbers.
++            When received as item attached to a message, the array will
++            contain the numbers of the installed file descriptors, or
++            <constant>-1</constant> in case an error occurred.
++            In either case, the number of entries in the array is derived from
++            the item's total size. See
++            <citerefentry>
++              <refentrytitle>kdbus.message</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>Items specific to some commands</title>
++      <variablelist>
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_CANCEL_FD</constant></term>
++          <listitem><para>
++            Transports a file descriptor that can be used to cancel a
++            synchronous <constant>KDBUS_CMD_SEND</constant> operation by
++            writing to it. The file descriptor is stored in
++            <varname>item.fd[0]</varname>. The item may only contain one
++            file descriptor. See
++            <citerefentry>
++              <refentrytitle>kdbus.message</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on this item and how to use it.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_BLOOM_PARAMETER</constant></term>
++          <listitem><para>
++            Contains a set of <emphasis>bloom parameters</emphasis> as
++            <type>struct kdbus_bloom_parameter</type> in
++            <varname>item.bloom_parameter</varname>.
++            The item is passed from userspace to kernel during the
++            <constant>KDBUS_CMD_BUS_MAKE</constant> ioctl, and returned
++            verbatim when <constant>KDBUS_CMD_HELLO</constant> is called.
++            The kernel does not use the bloom parameters, but they need to
++            be known by each connection on the bus in order to define the
++            bloom filter hash details. See
++            <citerefentry>
++              <refentrytitle>kdbus.match</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on matching and bloom filters.
++            <programlisting>
++struct kdbus_bloom_parameter {
++  __u64 size;
++  __u64 n_hash;
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_BLOOM_FILTER</constant></term>
++          <listitem><para>
++            Carries a <emphasis>bloom filter</emphasis> as
++            <type>struct kdbus_bloom_filter</type> in
++            <varname>item.bloom_filter</varname>. It is mandatory to send this
++            item attached to a <type>struct kdbus_msg</type>, in case the
++            message is a signal. This item is never transported from kernel to
++            userspace. See
++            <citerefentry>
++              <refentrytitle>kdbus.match</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on matching and bloom filters.
++            <programlisting>
++struct kdbus_bloom_filter {
++  __u64 generation;
++  __u64 data[0];
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_BLOOM_MASK</constant></term>
++          <listitem><para>
++            Transports a <emphasis>bloom mask</emphasis> as binary data blob
++            stored in <varname>item.data</varname>. This item is used to
++            describe a match into a connection's match database. See
++            <citerefentry>
++              <refentrytitle>kdbus.match</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on matching and bloom filters.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_DST_NAME</constant></term>
++          <listitem><para>
++            Contains a <emphasis>well-known name</emphasis> to send a
++            message to, as null-terminated string in
++            <varname>item.str</varname>. This item is used with
++            <constant>KDBUS_CMD_SEND</constant>. See
++            <citerefentry>
++              <refentrytitle>kdbus.message</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on how to send a message.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_MAKE_NAME</constant></term>
++          <listitem><para>
++            Contains a <emphasis>bus name</emphasis> or
++            <emphasis>endpoint name</emphasis>, stored as null-terminated
++            string in <varname>item.str</varname>. This item is sent from
++            userspace to kernel when buses or endpoints are created, and
++            returned back to userspace when the bus creator information is
++            queried. See
++            <citerefentry>
++              <refentrytitle>kdbus.bus</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            and
++            <citerefentry>
++              <refentrytitle>kdbus.endpoint</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_ATTACH_FLAGS_SEND</constant></term>
++          <term><constant>KDBUS_ITEM_ATTACH_FLAGS_RECV</constant></term>
++          <listitem><para>
++            Contains a set of <emphasis>attach flags</emphasis> at
++            <emphasis>send</emphasis> or <emphasis>receive</emphasis> time. See
++            <citerefentry>
++              <refentrytitle>kdbus</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>,
++            <citerefentry>
++              <refentrytitle>kdbus.bus</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry> and
++            <citerefentry>
++              <refentrytitle>kdbus.connection</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on attach flags.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_ID</constant></term>
++          <listitem><para>
++            Transports a connection's <emphasis>numerical ID</emphasis> of
++            a connection as <type>uint64_t</type> value in
++            <varname>item.id</varname>.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_NAME</constant></term>
++          <listitem><para>
++            Transports a name associated with the
++            <emphasis>name registry</emphasis> as null-terminated string as
++            <type>struct kdbus_name</type> in
++            <varname>item.name</varname>. The <varname>flags</varname>
++            contains the flags of the name. See
++            <citerefentry>
++              <refentrytitle>kdbus.name</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on how to access the name registry of a bus.
++            <programlisting>
++struct kdbus_name {
++  __u64 flags;
++  char name[0];
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>Items attached by the kernel as metadata</title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_TIMESTAMP</constant></term>
++          <listitem><para>
++            Contains both the <emphasis>monotonic</emphasis> and the
++            <emphasis>realtime</emphasis> timestamp, taken when the message
++            was processed on the kernel side.
++            Stored as <type>struct kdbus_timestamp</type> in
++            <varname>item.timestamp</varname>.
++            <programlisting>
++struct kdbus_timestamp {
++  __u64 seqnum;
++  __u64 monotonic_ns;
++  __u64 realtime_ns;
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_CREDS</constant></term>
++          <listitem><para>
++            Contains a set of <emphasis>user</emphasis> and
++            <emphasis>group</emphasis> information as 32-bit values, in the
++            usual four flavors: real, effective, saved and filesystem related.
++            Stored as <type>struct kdbus_creds</type> in
++            <varname>item.creds</varname>.
++            <programlisting>
++struct kdbus_creds {
++  __u32 uid;
++  __u32 euid;
++  __u32 suid;
++  __u32 fsuid;
++  __u32 gid;
++  __u32 egid;
++  __u32 sgid;
++  __u32 fsgid;
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_PIDS</constant></term>
++          <listitem><para>
++            Contains the <emphasis>PID</emphasis>, <emphasis>TID</emphasis>
++            and <emphasis>parent PID (PPID)</emphasis> of a remote peer.
++            Stored as <type>struct kdbus_pids</type> in
++            <varname>item.pids</varname>.
++            <programlisting>
++struct kdbus_pids {
++  __u64 pid;
++  __u64 tid;
++  __u64 ppid;
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_AUXGROUPS</constant></term>
++          <listitem><para>
++            Contains the <emphasis>auxiliary (supplementary) groups</emphasis>
++            a remote peer is a member of, stored as array of
++            <type>uint32_t</type> values in <varname>item.data32</varname>.
++            The array length can be determined by looking at the item's total
++            size, subtracting the size of the header and dividing the
++            remainder by <constant>sizeof(uint32_t)</constant>.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_OWNED_NAME</constant></term>
++          <listitem><para>
++            Contains a <emphasis>well-known name</emphasis> currently owned
++            by a connection. The name is stored as null-terminated string in
++            <varname>item.str</varname>. Its length can also be derived from
++            the item's total size.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_TID_COMM</constant> [*]</term>
++          <listitem><para>
++            Contains the <emphasis>comm</emphasis> string of a task's
++            <emphasis>TID</emphasis> (thread ID), stored as null-terminated
++            string in <varname>item.str</varname>. Its length can also be
++            derived from the item's total size. Receivers of this item should
++            not use its contents for any kind of security measures. See below.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_PID_COMM</constant> [*]</term>
++          <listitem><para>
++            Contains the <emphasis>comm</emphasis> string of a task's
++            <emphasis>PID</emphasis> (process ID), stored as null-terminated
++            string in <varname>item.str</varname>. Its length can also be
++            derived from the item's total size. Receivers of this item should
++            not use its contents for any kind of security measures. See below.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_EXE</constant> [*]</term>
++          <listitem><para>
++            Contains the <emphasis>path to the executable</emphasis> of a task,
++            stored as null-terminated string in <varname>item.str</varname>. Its
++            length can also be derived from the item's total size. Receivers of
++            this item should not use its contents for any kind of security
++            measures. See below.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_CMDLINE</constant> [*]</term>
++          <listitem><para>
++            Contains the <emphasis>command line arguments</emphasis> of a
++            task, stored as an <emphasis>array</emphasis> of null-terminated
++            strings in <varname>item.str</varname>. The total length of all
++            strings in the array can be derived from the item's total size.
++            Receivers of this item should not use its contents for any kind
++            of security measures. See below.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_CGROUP</constant></term>
++          <listitem><para>
++            Contains the <emphasis>cgroup path</emphasis> of a task, stored
++            as null-terminated string in <varname>item.str</varname>. Its
++            length can also be derived from the item's total size.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_CAPS</constant></term>
++          <listitem><para>
++            Contains sets of <emphasis>capabilities</emphasis>, stored as
++            <type>struct kdbus_caps</type> in <varname>item.caps</varname>.
++            As the item size may increase in the future, programs should be
++            written in a way that it takes
++            <varname>item.caps.last_cap</varname> into account, and derive
++            the number of sets and rows from the item size and the reported
++            number of valid capability bits.
++            <programlisting>
++struct kdbus_caps {
++  __u32 last_cap;
++  __u32 caps[0];
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_SECLABEL</constant></term>
++          <listitem><para>
++            Contains the <emphasis>LSM label</emphasis> of a task, stored as
++            null-terminated string in <varname>item.str</varname>. Its length
++            can also be derived from the item's total size.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_AUDIT</constant></term>
++          <listitem><para>
++            Contains the audit <emphasis>sessionid</emphasis> and
++            <emphasis>loginuid</emphasis> of a task, stored as
++            <type>struct kdbus_audit</type> in
++            <varname>item.audit</varname>.
++            <programlisting>
++struct kdbus_audit {
++  __u32 sessionid;
++  __u32 loginuid;
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_CONN_DESCRIPTION</constant></term>
++          <listitem><para>
++            Contains the <emphasis>connection description</emphasis>, as set
++            by <constant>KDBUS_CMD_HELLO</constant> or
++            <constant>KDBUS_CMD_CONN_UPDATE</constant>, stored as
++            null-terminated string in <varname>item.str</varname>. Its length
++            can also be derived from the item's total size.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++
++      <para>
++        All metadata is automatically translated into the
++        <emphasis>namespaces</emphasis> of the task that receives them. See
++        <citerefentry>
++          <refentrytitle>kdbus.message</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more information.
++      </para>
++
++      <para>
++        [*] Note that the content stored in metadata items of type
++        <constant>KDBUS_ITEM_TID_COMM</constant>,
++        <constant>KDBUS_ITEM_PID_COMM</constant>,
++        <constant>KDBUS_ITEM_EXE</constant> and
++        <constant>KDBUS_ITEM_CMDLINE</constant>
++        can easily be tampered by the sending tasks. Therefore, they should
++        <emphasis>not</emphasis> be used for any sort of security relevant
++        assumptions. The only reason they are transmitted is to let
++        receivers know about details that were set when metadata was
++        collected, even though the task they were collected from is not
++        active any longer when the items are received.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Items used for policy entries, matches and notifications</title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_POLICY_ACCESS</constant></term>
++          <listitem><para>
++            This item describes a <emphasis>policy access</emphasis> entry to
++            access the policy database of a
++            <citerefentry>
++              <refentrytitle>kdbus.bus</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry> or
++            <citerefentry>
++              <refentrytitle>kdbus.endpoint</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++            Please refer to
++            <citerefentry>
++              <refentrytitle>kdbus.policy</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on the policy database and how to access it.
++            <programlisting>
++struct kdbus_policy_access {
++  __u64 type;
++  __u64 access;
++  __u64 id;
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_ID_ADD</constant></term>
++          <term><constant>KDBUS_ITEM_ID_REMOVE</constant></term>
++          <listitem><para>
++            This item is sent as attachment to a
++            <emphasis>kernel notification</emphasis> and indicates that a
++            new connection was created on the bus, or that a connection was
++            disconnected, respectively. It stores a
++            <type>struct kdbus_notify_id_change</type> in
++            <varname>item.id_change</varname>.
++            The <varname>id</varname> field contains the numeric ID of the
++            connection that was added or removed, and <varname>flags</varname>
++            is set to the connection flags, as passed by
++            <constant>KDBUS_CMD_HELLO</constant>. See
++            <citerefentry>
++              <refentrytitle>kdbus.match</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            and
++            <citerefentry>
++              <refentrytitle>kdbus.message</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on matches and notification messages.
++            <programlisting>
++struct kdbus_notify_id_change {
++  __u64 id;
++  __u64 flags;
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_NAME_ADD</constant></term>
++          <term><constant>KDBUS_ITEM_NAME_REMOVE</constant></term>
++          <term><constant>KDBUS_ITEM_NAME_CHANGE</constant></term>
++          <listitem><para>
++            This item is sent as attachment to a
++            <emphasis>kernel notification</emphasis> and indicates that a
++            <emphasis>well-known name</emphasis> appeared, disappeared or
++            transferred to another owner on the bus. It stores a
++            <type>struct kdbus_notify_name_change</type> in
++            <varname>item.name_change</varname>.
++            <varname>old_id</varname> describes the former owner of the name
++            and is set to <constant>0</constant> values in case of
++            <constant>KDBUS_ITEM_NAME_ADD</constant>.
++            <varname>new_id</varname> describes the new owner of the name and
++            is set to <constant>0</constant> values in case of
++            <constant>KDBUS_ITEM_NAME_REMOVE</constant>.
++            The <varname>name</varname> field contains the well-known name the
++            notification is about, as null-terminated string. See
++            <citerefentry>
++              <refentrytitle>kdbus.match</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            and
++            <citerefentry>
++              <refentrytitle>kdbus.message</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information on matches and notification messages.
++            <programlisting>
++struct kdbus_notify_name_change {
++  struct kdbus_notify_id_change old_id;
++  struct kdbus_notify_id_change new_id;
++  char name[0];
++};
++            </programlisting>
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_REPLY_TIMEOUT</constant></term>
++          <listitem><para>
++            This item is sent as attachment to a
++            <emphasis>kernel notification</emphasis>. It informs the receiver
++            that an expected reply to a message was not received in time.
++            The remote peer ID and the message cookie are stored in the message
++            header. See
++            <citerefentry>
++              <refentrytitle>kdbus.message</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information about messages, timeouts and notifications.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ITEM_REPLY_DEAD</constant></term>
++          <listitem><para>
++            This item is sent as attachment to a
++            <emphasis>kernel notification</emphasis>. It informs the receiver
++            that a remote connection a reply is expected from was disconnected
++            before that reply was sent. The remote peer ID and the message
++            cookie are stored in the message header. See
++            <citerefentry>
++              <refentrytitle>kdbus.message</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for more information about messages, timeouts and notifications.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.connection</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.endpoint</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.fs</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.message</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.pool</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>memfd_create</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++    </simplelist>
++  </refsect1>
++
++</refentry>
+diff --git a/Documentation/kdbus/kdbus.match.xml b/Documentation/kdbus/kdbus.match.xml
+new file mode 100644
+index 0000000..ae38e04
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.match.xml
+@@ -0,0 +1,555 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus.match">
++
++  <refentryinfo>
++    <title>kdbus.match</title>
++    <productname>kdbus.match</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus.match</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus.match</refname>
++    <refpurpose>kdbus match</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>Description</title>
++
++    <para>
++      kdbus connections can install matches in order to subscribe to signal
++      messages sent on the bus. Such signal messages can be either directed
++      to a single connection (by setting a specific connection ID in
++      <varname>struct kdbus_msg.dst_id</varname> or by sending it to a
++      well-known name), or to potentially <emphasis>all</emphasis> currently
++      active connections on the bus (by setting
++      <varname>struct kdbus_msg.dst_id</varname> to
++      <constant>KDBUS_DST_ID_BROADCAST</constant>).
++      A signal message always has the <constant>KDBUS_MSG_SIGNAL</constant>
++      bit set in the <varname>flags</varname> bitfield.
++      Also, signal messages can originate from either the kernel (called
++      <emphasis>notifications</emphasis>), or from other bus connections.
++      In either case, a bus connection needs to have a suitable
++      <emphasis>match</emphasis> installed in order to receive any signal
++      message. Without any rules installed in the connection, no signal message
++      will be received.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Matches for signal messages from other connections</title>
++    <para>
++      Matches for messages from other connections (not kernel notifications)
++      are implemented as bloom filters (see below). The sender adds certain
++      properties of the message as elements to a bloom filter bit field, and
++      sends that along with the signal message.
++
++      The receiving connection adds the message properties it is interested in
++      as elements to a bloom mask bit field, and uploads the mask as match rule,
++      possibly along with some other rules to further limit the match.
++
++      The kernel will match the signal message's bloom filter against the
++      connection's bloom mask (simply by &amp;-ing it), and will decide whether
++      the message should be delivered to a connection.
++    </para>
++    <para>
++      The kernel has no notion of any specific properties of the signal message,
++      all it sees are the bit fields of the bloom filter and the mask to match
++      against. The use of bloom filters allows simple and efficient matching,
++      without exposing any message properties or internals to the kernel side.
++      Clients need to deal with the fact that they might receive signal messages
++      which they did not subscribe to, as the bloom filter might allow
++      false-positives to pass the filter.
++
++      To allow the future extension of the set of elements in the bloom filter,
++      the filter specifies a <emphasis>generation</emphasis> number. A later
++      generation must always contain all elements of the set of the previous
++      generation, but can add new elements to the set. The match rules mask can
++      carry an array with all previous generations of masks individually stored.
++      When the filter and mask are matched by the kernel, the mask with the
++      closest matching generation is selected as the index into the mask array.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Bloom filters</title>
++    <para>
++      Bloom filters allow checking whether a given word is present in a
++      dictionary.  This allows connections to set up a mask for information it
++      is interested in, and will be delivered signal messages that have a
++      matching filter.
++
++      For general information, see
++      <ulink url="https://en.wikipedia.org/wiki/Bloom_filter">the Wikipedia
++      article on bloom filters</ulink>.
++    </para>
++    <para>
++      The size of the bloom filter is defined per bus when it is created, in
++      <varname>kdbus_bloom_parameter.size</varname>. All bloom filters attached
++      to signal messages on the bus must match this size, and all bloom filter
++      matches uploaded by connections must also match the size, or a multiple
++      thereof (see below).
++
++      The calculation of the mask has to be done in userspace applications. The
++      kernel just checks the bitmasks to decide whether or not to let the
++      message pass. All bits in the mask must match the filter in and bit-wise
++      <emphasis>AND</emphasis> logic, but the mask may have more bits set than
++      the filter. Consequently, false positive matches are expected to happen,
++      and programs must deal with that fact by checking the contents of the
++      payload again at receive time.
++    </para>
++    <para>
++      Masks are entities that are always passed to the kernel as part of a
++      match (with an item of type <constant>KDBUS_ITEM_BLOOM_MASK</constant>),
++      and filters can be attached to signals, with an item of type
++      <constant>KDBUS_ITEM_BLOOM_FILTER</constant>. For a filter to match, all
++      its bits have to be set in the match mask as well.
++    </para>
++    <para>
++      For example, consider a bus that has a bloom size of 8 bytes, and the
++      following mask/filter combinations:
++    </para>
++    <programlisting><![CDATA[
++          filter  0x0101010101010101
++          mask    0x0101010101010101
++                  -> matches
++
++          filter  0x0303030303030303
++          mask    0x0101010101010101
++                  -> doesn't match
++
++          filter  0x0101010101010101
++          mask    0x0303030303030303
++                  -> matches
++    ]]></programlisting>
++
++    <para>
++      Hence, in order to catch all messages, a mask filled with
++      <constant>0xff</constant> bytes can be installed as a wildcard match rule.
++    </para>
++
++    <refsect2>
++      <title>Generations</title>
++
++      <para>
++        Uploaded matches may contain multiple masks, which have to be as large
++        as the bloom filter size defined by the bus. Each block of a mask is
++        called a <emphasis>generation</emphasis>, starting at index 0.
++
++        At match time, when a signal is about to be delivered, a bloom mask
++        generation is passed, which denotes which of the bloom masks the filter
++        should be matched against. This allows programs to provide backward
++        compatible masks at upload time, while older clients can still match
++        against older versions of filters.
++      </para>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>Matches for kernel notifications</title>
++    <para>
++      To receive kernel generated notifications (see
++      <citerefentry>
++        <refentrytitle>kdbus.message</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>),
++      a connection must install match rules that are different from
++      the bloom filter matches described in the section above. They can be
++      filtered by the connection ID that caused the notification to be sent, by
++      one of the names it currently owns, or by the type of the notification
++      (ID/name add/remove/change).
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Adding a match</title>
++    <para>
++      To add a match, the <constant>KDBUS_CMD_MATCH_ADD</constant> ioctl is
++      used, which takes a <type>struct kdbus_cmd_match</type> as an argument
++      described below.
++
++      Note that each of the items attached to this command will internally
++      create one match <emphasis>rule</emphasis>, and the collection of them,
++      which is submitted as one block via the ioctl, is called a
++      <emphasis>match</emphasis>. To allow a message to pass, all rules of a
++      match have to be satisfied. Hence, adding more items to the command will
++      only narrow the possibility of a match to effectively let the message
++      pass, and will decrease the chance that the connection's process will be
++      woken up needlessly.
++
++      Multiple matches can be installed per connection. As long as one of it has
++      a set of rules which allows the message to pass, this one will be
++      decisive.
++    </para>
++
++    <programlisting>
++struct kdbus_cmd_match {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  __u64 cookie;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>Flags to control the behavior of the ioctl.</para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_MATCH_REPLACE</constant></term>
++              <listitem>
++                <para>Make the endpoint file group-accessible</para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
++              <listitem>
++                <para>
++                  Requests a set of valid flags for this ioctl. When this bit is
++                  set, no action is taken; the ioctl will return
++                  <errorcode>0</errorcode>, and the <varname>flags</varname>
++                  field will have all bits set that are valid for this command.
++                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
++                  cleared by the operation.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>cookie</varname></term>
++        <listitem><para>
++          A cookie which identifies the match, so it can be referred to when
++          removing it.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++        <para>
++          Items to define the actual rules of the matches. The following item
++          types are expected. Each item will create one new match rule.
++        </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_BLOOM_MASK</constant></term>
++              <listitem>
++                <para>
++                  An item that carries the bloom filter mask to match against
++                  in its data field. The payload size must match the bloom
++                  filter size that was specified when the bus was created.
++                  See the "Bloom filters" section above for more information on
++                  bloom filters.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NAME</constant></term>
++              <listitem>
++                <para>
++                  When used as part of kernel notifications, this item specifies
++                  a name that is acquired, lost or that changed its owner (see
++                  below). When used as part of a match for user-generated signal
++                  messages, it specifies a name that the sending connection must
++                  own at the time of sending the signal.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_ID</constant></term>
++              <listitem>
++                <para>
++                  Specify a sender connection's ID that will match this rule.
++                  For kernel notifications, this specifies the ID of a
++                  connection that was added to or removed from the bus.
++                  For used-generated signals, it specifies the ID of the
++                  connection that sent the signal message.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NAME_ADD</constant></term>
++              <term><constant>KDBUS_ITEM_NAME_REMOVE</constant></term>
++              <term><constant>KDBUS_ITEM_NAME_CHANGE</constant></term>
++              <listitem>
++                <para>
++                  These items request delivery of kernel notifications that
++                  describe a name acquisition, loss, or change. The details
++                  are stored in the item's
++                  <varname>kdbus_notify_name_change</varname> member.
++                  All information specified must be matched in order to make
++                  the message pass. Use
++                  <constant>KDBUS_MATCH_ID_ANY</constant> to
++                  match against any unique connection ID.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_ID_ADD</constant></term>
++              <term><constant>KDBUS_ITEM_ID_REMOVE</constant></term>
++              <listitem>
++                <para>
++                  These items request delivery of kernel notifications that are
++                  generated when a connection is created or terminated.
++                  <type>struct kdbus_notify_id_change</type> is used to
++                  store the actual match information. This item can be used to
++                  monitor one particular connection ID, or, when the ID field
++                  is set to <constant>KDBUS_MATCH_ID_ANY</constant>,
++                  all of them.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_NEGOTIATE</constant></term>
++              <listitem><para>
++                With this item, programs can <emphasis>probe</emphasis> the
++                kernel for known item types. See
++                <citerefentry>
++                  <refentrytitle>kdbus.item</refentrytitle>
++                  <manvolnum>7</manvolnum>
++                </citerefentry>
++                for more details.
++              </para></listitem>
++            </varlistentry>
++          </variablelist>
++
++          <para>
++            Unrecognized items are rejected, and the ioctl will fail with
++            <varname>errno</varname> set to <constant>EINVAL</constant>.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      Refer to
++      <citerefentry>
++        <refentrytitle>kdbus.message</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more information on message types.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Removing a match</title>
++    <para>
++      Matches can be removed with the
++      <constant>KDBUS_CMD_MATCH_REMOVE</constant> ioctl, which takes
++      <type>struct kdbus_cmd_match</type> as argument, but its fields
++      usage slightly differs compared to that of
++      <constant>KDBUS_CMD_MATCH_ADD</constant>.
++    </para>
++
++    <programlisting>
++struct kdbus_cmd_match {
++  __u64 size;
++  __u64 cookie;
++  __u64 flags;
++  __u64 return_flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>cookie</varname></term>
++        <listitem><para>
++          The cookie of the match, as it was passed when the match was added.
++          All matches that have this cookie will be removed.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>
++          No flags are supported for this use case.
++          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
++          valid flags. If set, the ioctl will fail with
++          <errorcode>-1</errorcode>, <varname>errno</varname> is set to
++          <constant>EPROTO</constant>, and the <varname>flags</varname> field
++          is set to <constant>0</constant>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            No items are supported for this use case, but
++            <constant>KDBUS_ITEM_NEGOTIATE</constant> is allowed nevertheless.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++  </refsect1>
++
++  <refsect1>
++    <title>Return value</title>
++    <para>
++      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
++      on error, <errorcode>-1</errorcode> is returned, and
++      <varname>errno</varname> is set to indicate the error.
++      If the issued ioctl is illegal for the file descriptor used,
++      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
++    </para>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_MATCH_ADD</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Illegal flags or items.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EDOM</constant></term>
++          <listitem><para>
++            Illegal bloom filter size.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EMFILE</constant></term>
++          <listitem><para>
++            Too many matches for this connection.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_MATCH_REMOVE</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Illegal flags.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EBADSLT</constant></term>
++          <listitem><para>
++            A match entry with the given cookie could not be found.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.match</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.fs</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.message</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.pool</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++    </simplelist>
++  </refsect1>
++</refentry>
+diff --git a/Documentation/kdbus/kdbus.message.xml b/Documentation/kdbus/kdbus.message.xml
+new file mode 100644
+index 0000000..0115d9d
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.message.xml
+@@ -0,0 +1,1276 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus.message">
++
++  <refentryinfo>
++    <title>kdbus.message</title>
++    <productname>kdbus.message</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus.message</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus.message</refname>
++    <refpurpose>kdbus message</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>Description</title>
++
++    <para>
++      A kdbus message is used to exchange information between two connections
++      on a bus, or to transport notifications from the kernel to one or many
++      connections. This document describes the layout of messages, how payload
++      is added to them and how they are sent and received.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Message layout</title>
++
++    <para>The layout of a message is shown below.</para>
++
++    <programlisting>
++  +-------------------------------------------------------------------------+
++  | Message                                                                 |
++  | +---------------------------------------------------------------------+ |
++  | | Header                                                              | |
++  | | size:          overall message size, including the data records     | |
++  | | destination:   connection ID of the receiver                        | |
++  | | source:        connection ID of the sender (set by kernel)          | |
++  | | payload_type:  "DBusDBus" textual identifier stored as uint64_t     | |
++  | +---------------------------------------------------------------------+ |
++  | +---------------------------------------------------------------------+ |
++  | | Data Record                                                         | |
++  | | size:  overall record size (without padding)                        | |
++  | | type:  type of data                                                 | |
++  | | data:  reference to data (address or file descriptor)               | |
++  | +---------------------------------------------------------------------+ |
++  | +---------------------------------------------------------------------+ |
++  | | padding bytes to the next 8 byte alignment                          | |
++  | +---------------------------------------------------------------------+ |
++  | +---------------------------------------------------------------------+ |
++  | | Data Record                                                         | |
++  | | size:  overall record size (without padding)                        | |
++  | | ...                                                                 | |
++  | +---------------------------------------------------------------------+ |
++  | +---------------------------------------------------------------------+ |
++  | | padding bytes to the next 8 byte alignment                          | |
++  | +---------------------------------------------------------------------+ |
++  | +---------------------------------------------------------------------+ |
++  | | Data Record                                                         | |
++  | | size:  overall record size                                          | |
++  | | ...                                                                 | |
++  | +---------------------------------------------------------------------+ |
++  |   ... further data records ...                                          |
++  +-------------------------------------------------------------------------+
++    </programlisting>
++  </refsect1>
++
++  <refsect1>
++    <title>Message payload</title>
++
++    <para>
++      When connecting to the bus, receivers request a memory pool of a given
++      size, large enough to carry all backlog of data enqueued for the
++      connection. The pool is internally backed by a shared memory file which
++      can be <function>mmap()</function>ed by the receiver. See
++      <citerefentry>
++        <refentrytitle>kdbus.pool</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more information.
++    </para>
++
++    <para>
++      Message payload must be described in items attached to a message when
++      it is sent. A receiver can access the payload by looking at the items
++      that are attached to a message in its pool. The following items are used.
++    </para>
++
++    <variablelist>
++      <varlistentry>
++        <term><constant>KDBUS_ITEM_PAYLOAD_VEC</constant></term>
++        <listitem>
++          <para>
++            This item references a piece of memory on the sender side which is
++            directly copied into the receiver's pool. This way, two peers can
++            exchange data by effectively doing a single-copy from one process
++            to another; the kernel will not buffer the data anywhere else.
++            This item is never found in a message received by a connection.
++          </para>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><constant>KDBUS_ITEM_PAYLOAD_OFF</constant></term>
++        <listitem>
++          <para>
++            This item is attached to messages on the receiving side and points
++            to a memory area inside the receiver's pool. The
++            <varname>offset</varname> variable in the item denotes the memory
++            location relative to the message itself.
++          </para>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant></term>
++        <listitem>
++          <para>
++            Messages can reference <emphasis>memfd</emphasis> files which
++            contain the data. memfd files are tmpfs-backed files that allow
++            sealing of the content of the file, which prevents all writable
++            access to the file content.
++          </para>
++          <para>
++            Only memfds that have
++            <constant>(F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_SEAL)
++            </constant>
++            set are accepted as payload data, which enforces reliable passing of
++            data. The receiver can assume that neither the sender nor anyone
++            else can alter the content after the message is sent. If those
++            seals are not set on the memfd, the ioctl will fail with
++            <errorcode>-1</errorcode>, and <varname>errno</varname> will be
++            set to <constant>ETXTBUSY</constant>.
++          </para>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><constant>KDBUS_ITEM_FDS</constant></term>
++        <listitem>
++          <para>
++            Messages can transport regular file descriptors via
++            <constant>KDBUS_ITEM_FDS</constant>. This item carries an array
++            of <type>int</type> values in <varname>item.fd</varname>. The
++            maximum number of file descriptors in the item is
++            <constant>253</constant>, and only one item of this type is
++            accepted per message. All passed values must be valid file
++            descriptors; the open count of each file descriptors is increased
++            by installing it to the receiver's task. This item can only be
++            used for directed messages, not for broadcasts, and only to
++            remote peers that have opted-in for receiving file descriptors
++            at connection time (<constant>KDBUS_HELLO_ACCEPT_FD</constant>).
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      The sender must not make any assumptions on the type in which data is
++      received by the remote peer. The kernel is free to re-pack multiple
++      <constant>KDBUS_ITEM_PAYLOAD_VEC</constant> and
++      <constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant> payloads. For instance, the
++      kernel may decide to merge multiple <constant>VECs</constant> into a
++      single <constant>VEC</constant>, inline <constant>MEMFD</constant>
++      payloads into memory, or merge all passed <constant>VECs</constant> into a
++      single <constant>MEMFD</constant>. However, the kernel preserves the order
++      of passed data. This means that the order of all <constant>VEC</constant>
++      and <constant>MEMFD</constant> items is not changed in respect to each
++      other. In other words: All passed <constant>VEC</constant> and
++      <constant>MEMFD</constant> data payloads are treated as a single stream
++      of data that may be received by the remote peer in a different set of
++      chunks than it was sent as.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Sending messages</title>
++
++    <para>
++      Messages are passed to the kernel with the
++      <constant>KDBUS_CMD_SEND</constant> ioctl. Depending on the destination
++      address of the message, the kernel delivers the message to the specific
++      destination connection, or to some subset of all connections on the same
++      bus. Sending messages across buses is not possible. Messages are always
++      queued in the memory pool of the destination connection (see above).
++    </para>
++
++    <para>
++      The <constant>KDBUS_CMD_SEND</constant> ioctl uses a
++      <type>struct kdbus_cmd_send</type> to describe the message
++      transfer.
++    </para>
++    <programlisting>
++struct kdbus_cmd_send {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  __u64 msg_address;
++  struct kdbus_msg_info reply;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>Flags for message delivery</para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_SEND_SYNC_REPLY</constant></term>
++              <listitem>
++                <para>
++                  By default, all calls to kdbus are considered asynchronous,
++                  non-blocking. However, as there are many use cases that need
++                  to wait for a remote peer to answer a method call, there's a
++                  way to send a message and wait for a reply in a synchronous
++                  fashion. This is what the
++                  <constant>KDBUS_SEND_SYNC_REPLY</constant> controls. The
++                  <constant>KDBUS_CMD_SEND</constant> ioctl will block until the
++                  reply has arrived, the timeout limit is reached, in case the
++                  remote connection was shut down, or if interrupted by a signal
++                  before any reply; see
++                  <citerefentry>
++                    <refentrytitle>signal</refentrytitle>
++                    <manvolnum>7</manvolnum>
++                  </citerefentry>.
++
++                  The offset of the reply message in the sender's pool is stored
++                  in <varname>reply</varname> when the ioctl has returned without
++                  error. Hence, there is no need for another
++                  <constant>KDBUS_CMD_RECV</constant> ioctl or anything else to
++                  receive the reply.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
++              <listitem>
++                <para>
++                  Request a set of valid flags for this ioctl. When this bit is
++                  set, no action is taken; the ioctl will fail with
++                  <errorcode>-1</errorcode>, <varname>errno</varname>
++                  is set to <constant>EPROTO</constant>.
++                  Once the ioctl returned, the <varname>flags</varname>
++                  field will have all bits set that the kernel recognizes as
++                  valid for this command.
++                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
++                  cleared by the operation.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>msg_address</varname></term>
++        <listitem><para>
++          In this field, users have to provide a pointer to a message
++          (<type>struct kdbus_msg</type>) to send. See below for a
++          detailed description.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>reply</varname></term>
++        <listitem><para>
++          Only used for synchronous replies. See description of
++          <type>struct kdbus_cmd_recv</type> for more details.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            The following items are currently recognized.
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_CANCEL_FD</constant></term>
++              <listitem>
++                <para>
++                  When this optional item is passed in, and the call is
++                  executed as SYNC call, the passed in file descriptor can be
++                  used as alternative cancellation point. The kernel will call
++                  <citerefentry>
++                    <refentrytitle>poll</refentrytitle>
++                    <manvolnum>2</manvolnum>
++                  </citerefentry>
++                  on this file descriptor, and once it reports any incoming
++                  bytes, the blocking send operation will be canceled; the
++                  blocking, synchronous ioctl call will return
++                  <errorcode>-1</errorcode>, and <varname>errno</varname> will
++                  be set to <errorname>ECANCELED</errorname>.
++                  Any type of file descriptor on which
++                  <citerefentry>
++                    <refentrytitle>poll</refentrytitle>
++                    <manvolnum>2</manvolnum>
++                  </citerefentry>
++                  can be called on can be used as payload to this item; for
++                  example, an eventfd can be used for this purpose, see
++                  <citerefentry>
++                    <refentrytitle>eventfd</refentrytitle>
++                    <manvolnum>2</manvolnum>
++                  </citerefentry>.
++                  For asynchronous message sending, this item is allowed but
++                  ignored.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++          <para>
++            Unrecognized items are rejected, and the ioctl will fail with
++            <varname>errno</varname> set to <constant>EINVAL</constant>.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      The message referenced by the <varname>msg_address</varname> above has
++      the following layout.
++    </para>
++
++    <programlisting>
++struct kdbus_msg {
++  __u64 size;
++  __u64 flags;
++  __s64 priority;
++  __u64 dst_id;
++  __u64 src_id;
++  __u64 payload_type;
++  __u64 cookie;
++  __u64 timeout_ns;
++  __u64 cookie_reply;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>Flags to describe message details.</para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_MSG_EXPECT_REPLY</constant></term>
++              <listitem>
++                <para>
++                  Expect a reply to this message from the remote peer. With
++                  this bit set, the timeout_ns field must be set to a non-zero
++                  number of nanoseconds in which the receiving peer is expected
++                  to reply. If such a reply is not received in time, the sender
++                  will be notified with a timeout message (see below). The
++                  value must be an absolute value, in nanoseconds and based on
++                  <constant>CLOCK_MONOTONIC</constant>.
++                </para><para>
++                  For a message to be accepted as reply, it must be a direct
++                  message to the original sender (not a broadcast and not a
++                  signal message), and its
++                  <varname>kdbus_msg.cookie_reply</varname> must match the
++                  previous message's <varname>kdbus_msg.cookie</varname>.
++                </para><para>
++                  Expected replies also temporarily open the policy of the
++                  sending connection, so the other peer is allowed to respond
++                  within the given time window.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_MSG_NO_AUTO_START</constant></term>
++              <listitem>
++                <para>
++                  By default, when a message is sent to an activator
++                  connection, the activator is notified and will start an
++                  implementer. This flag inhibits that behavior. With this bit
++                  set, and the remote being an activator, the ioctl will fail
++                  with <varname>errno</varname> set to
++                  <constant>EADDRNOTAVAIL</constant>.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
++              <listitem>
++                <para>
++                  Requests a set of valid flags for this ioctl. When this bit is
++                  set, no action is taken; the ioctl will return
++                  <errorcode>0</errorcode>, and the <varname>flags</varname>
++                  field will have all bits set that are valid for this command.
++                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
++                  cleared by the operation.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>priority</varname></term>
++        <listitem><para>
++          The priority of this message. Receiving messages (see below) may
++          optionally be constrained to messages of a minimal priority. This
++          allows for use cases where timing critical data is interleaved with
++          control data on the same connection. If unused, the priority field
++          should be set to <constant>0</constant>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>dst_id</varname></term>
++        <listitem><para>
++          The numeric ID of the destination connection, or
++          <constant>KDBUS_DST_ID_BROADCAST</constant>
++          (~0ULL) to address every peer on the bus, or
++          <constant>KDBUS_DST_ID_NAME</constant> (0) to look
++          it up dynamically from the bus' name registry.
++          In the latter case, an item of type
++          <constant>KDBUS_ITEM_DST_NAME</constant> is mandatory.
++          Also see
++          <citerefentry>
++            <refentrytitle>kdbus.name</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>
++          .
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>src_id</varname></term>
++        <listitem><para>
++          Upon return of the ioctl, this member will contain the sending
++          connection's numerical ID. Should be 0 at send time.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>payload_type</varname></term>
++        <listitem><para>
++          Type of the payload in the actual data records. Currently, only
++          <constant>KDBUS_PAYLOAD_DBUS</constant> is accepted as input value
++          of this field. When receiving messages that are generated by the
++          kernel (notifications), this field will contain
++          <constant>KDBUS_PAYLOAD_KERNEL</constant>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>cookie</varname></term>
++        <listitem><para>
++          Cookie of this message, for later recognition. Also, when replying
++          to a message (see above), the <varname>cookie_reply</varname>
++          field must match this value.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>timeout_ns</varname></term>
++        <listitem><para>
++          If the message sent requires a reply from the remote peer (see above),
++          this field contains the timeout in absolute nanoseconds based on
++          <constant>CLOCK_MONOTONIC</constant>. Also see
++          <citerefentry>
++            <refentrytitle>clock_gettime</refentrytitle>
++            <manvolnum>2</manvolnum>
++          </citerefentry>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>cookie_reply</varname></term>
++        <listitem><para>
++          If the message sent is a reply to another message, this field must
++          match the cookie of the formerly received message.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            A dynamically sized list of items to contain additional information.
++            The following items are expected/valid:
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_PAYLOAD_VEC</constant></term>
++              <term><constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant></term>
++              <term><constant>KDBUS_ITEM_FDS</constant></term>
++              <listitem>
++                <para>
++                  Actual data records containing the payload. See section
++                  "Message payload".
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_BLOOM_FILTER</constant></term>
++              <listitem>
++                <para>
++                  Bloom filter for matches (see below).
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_ITEM_DST_NAME</constant></term>
++              <listitem>
++                <para>
++                  Well-known name to send this message to. Required if
++                  <varname>dst_id</varname> is set to
++                  <constant>KDBUS_DST_ID_NAME</constant>.
++                  If a connection holding the given name can't be found,
++                  the ioctl will fail with <varname>errno</varname> set to
++                  <constant>ESRCH</constant> is returned.
++                </para>
++                <para>
++                  For messages to a unique name (ID), this item is optional. If
++                  present, the kernel will make sure the name owner matches the
++                  given unique name. This allows programs to tie the message
++                  sending to the condition that a name is currently owned by a
++                  certain unique name.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      The message will be augmented by the requested metadata items when
++      queued into the receiver's pool. See
++      <citerefentry>
++        <refentrytitle>kdbus.connection</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      and
++      <citerefentry>
++        <refentrytitle>kdbus.item</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more information on metadata.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Receiving messages</title>
++
++    <para>
++      Messages are received by the client with the
++      <constant>KDBUS_CMD_RECV</constant> ioctl. The endpoint file of the bus
++      supports <function>poll()/epoll()/select()</function>; when new messages
++      are available on the connection's file descriptor,
++      <constant>POLLIN</constant> is reported. For compatibility reasons,
++      <constant>POLLOUT</constant> is always reported as well. Note, however,
++      that the latter does not guarantee that a message can in fact be sent, as
++      this depends on how many pending messages the receiver has in its pool.
++    </para>
++
++    <para>
++      With the <constant>KDBUS_CMD_RECV</constant> ioctl, a
++      <type>struct kdbus_cmd_recv</type> is used.
++    </para>
++
++    <programlisting>
++struct kdbus_cmd_recv {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  __s64 priority;
++  __u64 dropped_msgs;
++  struct kdbus_msg_info msg;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>Flags to control the receive command.</para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_RECV_PEEK</constant></term>
++              <listitem>
++                <para>
++                  Just return the location of the next message. Do not install
++                  file descriptors or anything else. This is usually used to
++                  determine the sender of the next queued message.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_RECV_DROP</constant></term>
++              <listitem>
++                <para>
++                  Drop the next message without doing anything else with it,
++                  and free the pool slice. This a short-cut for
++                  <constant>KDBUS_RECV_PEEK</constant> and
++                  <constant>KDBUS_CMD_FREE</constant>.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_RECV_USE_PRIORITY</constant></term>
++              <listitem>
++                <para>
++                  Dequeue the messages ordered by their priority, and filtering
++                  them with the priority field (see below).
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
++              <listitem>
++                <para>
++                  Request a set of valid flags for this ioctl. When this bit is
++                  set, no action is taken; the ioctl will fail with
++                  <errorcode>-1</errorcode>, <varname>errno</varname>
++                  is set to <constant>EPROTO</constant>.
++                  Once the ioctl returned, the <varname>flags</varname>
++                  field will have all bits set that the kernel recognizes as
++                  valid for this command.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. If the <varname>dropped_msgs</varname>
++          field is non-zero, <constant>KDBUS_RECV_RETURN_DROPPED_MSGS</constant>
++          is set. If a file descriptor could not be installed, the
++          <constant>KDBUS_RECV_RETURN_INCOMPLETE_FDS</constant> flag is set.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>priority</varname></term>
++        <listitem><para>
++          With <constant>KDBUS_RECV_USE_PRIORITY</constant> set in
++          <varname>flags</varname>, messages will be dequeued ordered by their
++          priority, starting with the highest value. Also, messages will be
++          filtered by the value given in this field, so the returned message
++          will at least have the requested priority. If no such message is
++          waiting in the queue, the ioctl will fail, and
++          <varname>errno</varname> will be set to <constant>EAGAIN</constant>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>dropped_msgs</varname></term>
++        <listitem><para>
++          Whenever a message with <constant>KDBUS_MSG_SIGNAL</constant> is sent
++          but cannot be queued on a peer (e.g., as it contains FDs but the peer
++          does not support FDs, or there is no space left in the peer's pool)
++          the 'dropped_msgs' counter of the peer is incremented. On the next
++          RECV ioctl, the 'dropped_msgs' field is copied into the ioctl struct
++          and cleared on the peer. If it was non-zero, the
++          <constant>KDBUS_RECV_RETURN_DROPPED_MSGS</constant> flag will be set
++          in <varname>return_flags</varname>. Note that this will only happen
++          if the ioctl succeeded or failed with <constant>EAGAIN</constant>. In
++          other error cases, the 'dropped_msgs' field of the peer is left
++          untouched.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>msg</varname></term>
++        <listitem><para>
++          Embedded struct containing information on the received message when
++          this command succeeded (see below).
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem><para>
++          Items to specify further details for the receive command.
++          Currently unused, and all items will be rejected with
++          <varname>errno</varname> set to <constant>EINVAL</constant>.
++        </para></listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      Both <type>struct kdbus_cmd_recv</type> and
++      <type>struct kdbus_cmd_send</type> embed
++      <type>struct kdbus_msg_info</type>.
++      For the <constant>KDBUS_CMD_SEND</constant> ioctl, it is used to catch
++      synchronous replies, if one was requested, and is unused otherwise.
++    </para>
++
++    <programlisting>
++struct kdbus_msg_info {
++  __u64 offset;
++  __u64 msg_size;
++  __u64 return_flags;
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>offset</varname></term>
++        <listitem><para>
++          Upon return of the ioctl, this field contains the offset in the
++          receiver's memory pool. The memory must be freed with
++          <constant>KDBUS_CMD_FREE</constant>. See
++          <citerefentry>
++            <refentrytitle>kdbus.pool</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>
++          for further details.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>msg_size</varname></term>
++        <listitem><para>
++          Upon successful return of the ioctl, this field contains the size of
++          the allocated slice at offset <varname>offset</varname>.
++          It is the combination of the size of the stored
++          <type>struct kdbus_msg</type> object plus all appended VECs.
++          You can use it in combination with <varname>offset</varname> to map
++          a single message, instead of mapping the entire pool. See
++          <citerefentry>
++            <refentrytitle>kdbus.pool</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>
++          for further details.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem>
++          <para>
++            Kernel-provided return flags. Currently, the following flags are
++            defined.
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_RECV_RETURN_INCOMPLETE_FDS</constant></term>
++              <listitem>
++                <para>
++                  The message contained memfds or file descriptors, and the
++                  kernel failed to install one or more of them at receive time.
++                  Most probably that happened because the maximum number of
++                  file descriptors for the receiver's task were exceeded.
++                  In such cases, the message is still delivered, so this is not
++                  a fatal condition. File descriptors numbers inside the
++                  <constant>KDBUS_ITEM_FDS</constant> item or memfd files
++                  referenced by <constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant>
++                  items which could not be installed will be set to
++                  <constant>-1</constant>.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      Unless <constant>KDBUS_RECV_DROP</constant> was passed, the
++      <varname>offset</varname> field contains the location of the new message
++      inside the receiver's pool after the <constant>KDBUS_CMD_RECV</constant>
++      ioctl was employed. The message is stored as <type>struct kdbus_msg</type>
++      at this offset, and can be interpreted with the semantics described above.
++    </para>
++    <para>
++      Also, if the connection allowed for file descriptor to be passed
++      (<constant>KDBUS_HELLO_ACCEPT_FD</constant>), and if the message contained
++      any, they will be installed into the receiving process when the
++      <constant>KDBUS_CMD_RECV</constant> ioctl is called.
++      <emphasis>memfds</emphasis> may always be part of the message payload.
++      The receiving task is obliged to close all file descriptors appropriately
++      once no longer needed. If <constant>KDBUS_RECV_PEEK</constant> is set, no
++      file descriptors are installed. This allows for peeking at a message,
++      looking at its metadata only and dropping it via
++      <constant>KDBUS_RECV_DROP</constant>, without installing any of the file
++      descriptors into the receiving process.
++    </para>
++    <para>
++      The caller is obliged to call the <constant>KDBUS_CMD_FREE</constant>
++      ioctl with the returned offset when the memory is no longer needed.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Notifications</title>
++    <para>
++      A kernel notification is a regular kdbus message with the following
++      details.
++    </para>
++
++    <itemizedlist>
++      <listitem><para>
++          kdbus_msg.src_id == <constant>KDBUS_SRC_ID_KERNEL</constant>
++      </para></listitem>
++      <listitem><para>
++        kdbus_msg.dst_id == <constant>KDBUS_DST_ID_BROADCAST</constant>
++      </para></listitem>
++      <listitem><para>
++        kdbus_msg.payload_type == <constant>KDBUS_PAYLOAD_KERNEL</constant>
++      </para></listitem>
++      <listitem><para>
++        Has exactly one of the items attached that are described below.
++      </para></listitem>
++      <listitem><para>
++        Always has a timestamp item (<constant>KDBUS_ITEM_TIMESTAMP</constant>)
++        attached.
++      </para></listitem>
++    </itemizedlist>
++
++    <para>
++      The kernel will notify its users of the following events.
++    </para>
++
++    <itemizedlist>
++      <listitem><para>
++        When connection <emphasis>A</emphasis> is terminated while connection
++        <emphasis>B</emphasis> is waiting for a reply from it, connection
++        <emphasis>B</emphasis> is notified with a message with an item of
++        type <constant>KDBUS_ITEM_REPLY_DEAD</constant>.
++      </para></listitem>
++
++      <listitem><para>
++        When connection <emphasis>A</emphasis> does not receive a reply from
++        connection <emphasis>B</emphasis> within the specified timeout window,
++        connection <emphasis>A</emphasis> will receive a message with an
++        item of type <constant>KDBUS_ITEM_REPLY_TIMEOUT</constant>.
++      </para></listitem>
++
++      <listitem><para>
++        When an ordinary connection (not a monitor) is created on or removed
++        from a bus, messages with an item of type
++        <constant>KDBUS_ITEM_ID_ADD</constant> or
++        <constant>KDBUS_ITEM_ID_REMOVE</constant>, respectively, are delivered
++        to all bus members that match these messages through their match
++        database. Eavesdroppers (monitor connections) do not cause such
++        notifications to be sent. They are invisible on the bus.
++      </para></listitem>
++
++      <listitem><para>
++        When a connection gains or loses ownership of a name, messages with an
++        item of type <constant>KDBUS_ITEM_NAME_ADD</constant>,
++        <constant>KDBUS_ITEM_NAME_REMOVE</constant> or
++        <constant>KDBUS_ITEM_NAME_CHANGE</constant> are delivered to all bus
++        members that match these messages through their match database.
++      </para></listitem>
++    </itemizedlist>
++  </refsect1>
++
++  <refsect1>
++    <title>Return value</title>
++    <para>
++      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
++      on error, <errorcode>-1</errorcode> is returned, and
++      <varname>errno</varname> is set to indicate the error.
++      If the issued ioctl is illegal for the file descriptor used,
++      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
++    </para>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_SEND</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EOPNOTSUPP</constant></term>
++          <listitem><para>
++            The connection is not an ordinary connection, or the passed
++            file descriptors in <constant>KDBUS_ITEM_FDS</constant> item are
++            either kdbus handles or unix domain sockets. Both are currently
++            unsupported.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            The submitted payload type is
++            <constant>KDBUS_PAYLOAD_KERNEL</constant>,
++            <constant>KDBUS_MSG_EXPECT_REPLY</constant> was set without timeout
++            or cookie values, <constant>KDBUS_SEND_SYNC_REPLY</constant> was
++            set without <constant>KDBUS_MSG_EXPECT_REPLY</constant>, an invalid
++            item was supplied, <constant>src_id</constant> was non-zero and was
++            different from the current connection's ID, a supplied memfd had a
++            size of 0, or a string was not properly null-terminated.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ENOTUNIQ</constant></term>
++          <listitem><para>
++            The supplied destination is
++            <constant>KDBUS_DST_ID_BROADCAST</constant> and either
++            file descriptors were passed, or
++            <constant>KDBUS_MSG_EXPECT_REPLY</constant> was set,
++            or a timeout was given.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>E2BIG</constant></term>
++          <listitem><para>
++            Too many items.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EMSGSIZE</constant></term>
++          <listitem><para>
++            The size of the message header and items or the payload vector
++            is excessive.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EEXIST</constant></term>
++          <listitem><para>
++            Multiple <constant>KDBUS_ITEM_FDS</constant>,
++            <constant>KDBUS_ITEM_BLOOM_FILTER</constant> or
++            <constant>KDBUS_ITEM_DST_NAME</constant> items were supplied.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EBADF</constant></term>
++          <listitem><para>
++            The supplied <constant>KDBUS_ITEM_FDS</constant> or
++            <constant>KDBUS_ITEM_PAYLOAD_MEMFD</constant> items
++            contained an illegal file descriptor.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EMEDIUMTYPE</constant></term>
++          <listitem><para>
++            The supplied memfd is not a sealed kdbus memfd.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EMFILE</constant></term>
++          <listitem><para>
++            Too many file descriptors inside a
++            <constant>KDBUS_ITEM_FDS</constant>.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EBADMSG</constant></term>
++          <listitem><para>
++            An item had illegal size, both a <constant>dst_id</constant> and a
++            <constant>KDBUS_ITEM_DST_NAME</constant> was given, or both a name
++            and a bloom filter was given.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ETXTBSY</constant></term>
++          <listitem><para>
++            The supplied kdbus memfd file cannot be sealed or the seal
++            was removed, because it is shared with other processes or
++            still mapped with
++            <citerefentry>
++              <refentrytitle>mmap</refentrytitle>
++              <manvolnum>2</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ECOMM</constant></term>
++          <listitem><para>
++            A peer does not accept the file descriptors addressed to it.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EFAULT</constant></term>
++          <listitem><para>
++            The supplied bloom filter size was not 64-bit aligned, or supplied
++            memory could not be accessed by the kernel.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EDOM</constant></term>
++          <listitem><para>
++            The supplied bloom filter size did not match the bloom filter
++            size of the bus.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EDESTADDRREQ</constant></term>
++          <listitem><para>
++            <constant>dst_id</constant> was set to
++            <constant>KDBUS_DST_ID_NAME</constant>, but no
++            <constant>KDBUS_ITEM_DST_NAME</constant> was attached.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ESRCH</constant></term>
++          <listitem><para>
++            The name to look up was not found in the name registry.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EADDRNOTAVAIL</constant></term>
++          <listitem><para>
++            <constant>KDBUS_MSG_NO_AUTO_START</constant> was given but the
++            destination connection is an activator.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ENXIO</constant></term>
++          <listitem><para>
++            The passed numeric destination connection ID couldn't be found,
++            or is not connected.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ECONNRESET</constant></term>
++          <listitem><para>
++            The destination connection is no longer active.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ETIMEDOUT</constant></term>
++          <listitem><para>
++            Timeout while synchronously waiting for a reply.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINTR</constant></term>
++          <listitem><para>
++            Interrupted system call while synchronously waiting for a reply.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EPIPE</constant></term>
++          <listitem><para>
++            When sending a message, a synchronous reply from the receiving
++            connection was expected but the connection died before answering.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ENOBUFS</constant></term>
++          <listitem><para>
++            Too many pending messages on the receiver side.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EREMCHG</constant></term>
++          <listitem><para>
++            Both a well-known name and a unique name (ID) was given, but
++            the name is not currently owned by that connection.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EXFULL</constant></term>
++          <listitem><para>
++            The memory pool of the receiver is full.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EREMOTEIO</constant></term>
++          <listitem><para>
++            While synchronously waiting for a reply, the remote peer
++            failed with an I/O error.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_RECV</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EOPNOTSUPP</constant></term>
++          <listitem><para>
++            The connection is not an ordinary connection, or the passed
++            file descriptors are either kdbus handles or unix domain
++            sockets. Both are currently unsupported.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Invalid flags or offset.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EAGAIN</constant></term>
++          <listitem><para>
++            No message found in the queue.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.connection</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.endpoint</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.fs</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.pool</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>clock_gettime</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>ioctl</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>poll</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>select</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>epoll</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>eventfd</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>memfd_create</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++    </simplelist>
++  </refsect1>
++</refentry>
+diff --git a/Documentation/kdbus/kdbus.name.xml b/Documentation/kdbus/kdbus.name.xml
+new file mode 100644
+index 0000000..3f5f6a6
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.name.xml
+@@ -0,0 +1,711 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus.name">
++
++  <refentryinfo>
++    <title>kdbus.name</title>
++    <productname>kdbus.name</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus.name</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus.name</refname>
++    <refpurpose>kdbus.name</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>Description</title>
++    <para>
++      Each
++      <citerefentry>
++        <refentrytitle>kdbus.bus</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      instantiates a name registry to resolve well-known names into unique
++      connection IDs for message delivery. The registry will be queried when a
++      message is sent with <varname>kdbus_msg.dst_id</varname> set to
++      <constant>KDBUS_DST_ID_NAME</constant>, or when a registry dump is
++      requested with <constant>KDBUS_CMD_NAME_LIST</constant>.
++    </para>
++
++    <para>
++      All of the below is subject to policy rules for <emphasis>SEE</emphasis>
++      and <emphasis>OWN</emphasis> permissions. See
++      <citerefentry>
++        <refentrytitle>kdbus.policy</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more information.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Name validity</title>
++    <para>
++      A name has to comply with the following rules in order to be considered
++      valid.
++    </para>
++
++    <itemizedlist>
++      <listitem>
++        <para>
++          The name has two or more elements separated by a
++          '<literal>.</literal>' (period) character.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          All elements must contain at least one character.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          Each element must only contain the ASCII characters
++          <literal>[A-Z][a-z][0-9]_</literal> and must not begin with a
++          digit.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          The name must contain at least one '<literal>.</literal>' (period)
++          character (and thus at least two elements).
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          The name must not begin with a '<literal>.</literal>' (period)
++          character.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          The name must not exceed <constant>255</constant> characters in
++          length.
++        </para>
++      </listitem>
++    </itemizedlist>
++  </refsect1>
++
++  <refsect1>
++    <title>Acquiring a name</title>
++    <para>
++      To acquire a name, a client uses the
++      <constant>KDBUS_CMD_NAME_ACQUIRE</constant> ioctl with
++      <type>struct kdbus_cmd</type> as argument.
++    </para>
++
++    <programlisting>
++struct kdbus_cmd {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>Flags to control details in the name acquisition.</para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_NAME_REPLACE_EXISTING</constant></term>
++              <listitem>
++                <para>
++                  Acquiring a name that is already present usually fails,
++                  unless this flag is set in the call, and
++                  <constant>KDBUS_NAME_ALLOW_REPLACEMENT</constant> (see below)
++                  was set when the current owner of the name acquired it, or
++                  if the current owner is an activator connection (see
++                  <citerefentry>
++                    <refentrytitle>kdbus.connection</refentrytitle>
++                    <manvolnum>7</manvolnum>
++                  </citerefentry>).
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_NAME_ALLOW_REPLACEMENT</constant></term>
++              <listitem>
++                <para>
++                  Allow other connections to take over this name. When this
++                  happens, the former owner of the connection will be notified
++                  of the name loss.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_NAME_QUEUE</constant></term>
++              <listitem>
++                <para>
++                  A name that is already acquired by a connection can not be
++                  acquired again (unless the
++                  <constant>KDBUS_NAME_ALLOW_REPLACEMENT</constant> flag was
++                  set during acquisition; see above).
++                  However, a connection can put itself in a queue of
++                  connections waiting for the name to be released. Once that
++                  happens, the first connection in that queue becomes the new
++                  owner and is notified accordingly.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
++              <listitem>
++                <para>
++                  Request a set of valid flags for this ioctl. When this bit is
++                  set, no action is taken; the ioctl will fail with
++                  <errorcode>-1</errorcode>, and <varname>errno</varname>
++                  is set to <constant>EPROTO</constant>.
++                  Once the ioctl returned, the <varname>flags</varname>
++                  field will have all bits set that the kernel recognizes as
++                  valid for this command.
++                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
++                  cleared by the operation.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem>
++          <para>
++            Flags returned by the kernel. Currently, the following may be
++            returned by the kernel.
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_NAME_IN_QUEUE</constant></term>
++              <listitem>
++                <para>
++                  The name was not acquired yet, but the connection was
++                  placed in the queue of peers waiting for the name.
++                  This can only happen if <constant>KDBUS_NAME_QUEUE</constant>
++                  was set in the <varname>flags</varname> member (see above).
++                  The connection will receive a name owner change notification
++                  once the current owner has given up the name and its
++                  ownership was transferred.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            Items to submit the name. Currently, one item of type
++            <constant>KDBUS_ITEM_NAME</constant> is expected and allowed, and
++            the contained string must be a valid bus name.
++            <constant>KDBUS_ITEM_NEGOTIATE</constant> may be used to probe for
++            valid item types. See
++            <citerefentry>
++              <refentrytitle>kdbus.item</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for a detailed description of how this item is used.
++          </para>
++          <para>
++            Unrecognized items are rejected, and the ioctl will fail with
++            <varname>errno</varname> set to <errorname>>EINVAL</errorname>.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++  </refsect1>
++
++  <refsect1>
++    <title>Releasing a name</title>
++    <para>
++      A connection may release a name explicitly with the
++      <constant>KDBUS_CMD_NAME_RELEASE</constant> ioctl. If the connection was
++      an implementer of an activatable name, its pending messages are moved
++      back to the activator. If there are any connections queued up as waiters
++      for the name, the first one in the queue (the oldest entry) will become
++      the new owner. The same happens implicitly for all names once a
++      connection terminates. See
++      <citerefentry>
++        <refentrytitle>kdbus.connection</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more information on connections.
++    </para>
++    <para>
++      The <constant>KDBUS_CMD_NAME_RELEASE</constant> ioctl uses the same data
++      structure as the acquisition call
++      (<constant>KDBUS_CMD_NAME_ACQUIRE</constant>),
++      but with slightly different field usage.
++    </para>
++
++    <programlisting>
++struct kdbus_cmd {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>
++          Flags to the command. Currently unused.
++          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
++          valid flags. If set, the ioctl will return <errorcode>0</errorcode>,
++          and the <varname>flags</varname> field is set to
++          <constant>0</constant>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            Items to submit the name. Currently, one item of type
++            <constant>KDBUS_ITEM_NAME</constant> is expected and allowed, and
++            the contained string must be a valid bus name.
++            <constant>KDBUS_ITEM_NEGOTIATE</constant> may be used to probe for
++            valid item types. See
++            <citerefentry>
++              <refentrytitle>kdbus.item</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++            for a detailed description of how this item is used.
++          </para>
++          <para>
++            Unrecognized items are rejected, and the ioctl will fail with
++            <varname>errno</varname> set to <constant>EINVAL</constant>.
++          </para>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++  </refsect1>
++
++  <refsect1>
++    <title>Dumping the name registry</title>
++    <para>
++      A connection may request a complete or filtered dump of currently active
++      bus names with the <constant>KDBUS_CMD_LIST</constant> ioctl, which
++      takes a <type>struct kdbus_cmd_list</type> as argument.
++    </para>
++
++    <programlisting>
++struct kdbus_cmd_list {
++  __u64 flags;
++  __u64 return_flags;
++  __u64 offset;
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem>
++          <para>
++            Any combination of flags to specify which names should be dumped.
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_LIST_UNIQUE</constant></term>
++              <listitem>
++                <para>
++                  List the unique (numeric) IDs of the connection, whether it
++                  owns a name or not.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_LIST_NAMES</constant></term>
++              <listitem>
++                <para>
++                  List well-known names stored in the database which are
++                  actively owned by a real connection (not an activator).
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_LIST_ACTIVATORS</constant></term>
++              <listitem>
++                <para>
++                  List names that are owned by an activator.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_LIST_QUEUED</constant></term>
++              <listitem>
++                <para>
++                  List connections that are not yet owning a name but are
++                  waiting for it to become available.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
++              <listitem>
++                <para>
++                  Request a set of valid flags for this ioctl. When this bit is
++                  set, no action is taken; the ioctl will fail with
++                  <errorcode>-1</errorcode>, and <varname>errno</varname>
++                  is set to <constant>EPROTO</constant>.
++                  Once the ioctl returned, the <varname>flags</varname>
++                  field will have all bits set that the kernel recognizes as
++                  valid for this command.
++                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
++                  cleared by the operation.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>offset</varname></term>
++        <listitem><para>
++          When the ioctl returns successfully, the offset to the name registry
++          dump inside the connection's pool will be stored in this field.
++        </para></listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      The returned list of names is stored in a <type>struct kdbus_list</type>
++      that in turn contains an array of type <type>struct kdbus_info</type>,
++      The array-size in bytes is given as <varname>list_size</varname>.
++      The fields inside <type>struct kdbus_info</type> is described next.
++    </para>
++
++    <programlisting>
++struct kdbus_info {
++  __u64 size;
++  __u64 id;
++  __u64 flags;
++  struct kdbus_item items[0];
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>id</varname></term>
++        <listitem><para>
++          The owning connection's unique ID.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>
++          The flags of the owning connection.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem>
++          <para>
++            Items containing the actual name. Currently, one item of type
++            <constant>KDBUS_ITEM_OWNED_NAME</constant> will be attached,
++            including the name's flags. In that item, the flags field of the
++            name may carry the following bits:
++          </para>
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_NAME_ALLOW_REPLACEMENT</constant></term>
++              <listitem>
++                <para>
++                  Other connections are allowed to take over this name from the
++                  connection that owns it.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_NAME_IN_QUEUE</constant></term>
++              <listitem>
++                <para>
++                  When retrieving a list of currently acquired names in the
++                  registry, this flag indicates whether the connection
++                  actually owns the name or is currently waiting for it to
++                  become available.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_NAME_ACTIVATOR</constant></term>
++              <listitem>
++                <para>
++                  An activator connection owns a name as a placeholder for an
++                  implementer, which is started on demand by programs as soon
++                  as the first message arrives. There's some more information
++                  on this topic in
++                  <citerefentry>
++                    <refentrytitle>kdbus.connection</refentrytitle>
++                    <manvolnum>7</manvolnum>
++                  </citerefentry>
++                  .
++                </para>
++                <para>
++                  In contrast to
++                  <constant>KDBUS_NAME_REPLACE_EXISTING</constant>,
++                  when a name is taken over from an activator connection, all
++                  the messages that have been queued in the activator
++                  connection will be moved over to the new owner. The activator
++                  connection will still be tracked for the name and will take
++                  control again if the implementer connection terminates.
++                </para>
++                <para>
++                  This flag can not be used when acquiring a name, but is
++                  implicitly set through <constant>KDBUS_CMD_HELLO</constant>
++                  with <constant>KDBUS_HELLO_ACTIVATOR</constant> set in
++                  <varname>kdbus_cmd_hello.conn_flags</varname>.
++                </para>
++              </listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_FLAG_NEGOTIATE</constant></term>
++              <listitem>
++                <para>
++                  Requests a set of valid flags for this ioctl. When this bit is
++                  set, no action is taken; the ioctl will return
++                  <errorcode>0</errorcode>, and the <varname>flags</varname>
++                  field will have all bits set that are valid for this command.
++                  The <constant>KDBUS_FLAG_NEGOTIATE</constant> bit will be
++                  cleared by the operation.
++                </para>
++              </listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      The returned buffer must be freed with the
++      <constant>KDBUS_CMD_FREE</constant> ioctl when the user is finished with
++      it. See
++      <citerefentry>
++        <refentrytitle>kdbus.pool</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more information.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Return value</title>
++    <para>
++      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
++      on error, <errorcode>-1</errorcode> is returned, and
++      <varname>errno</varname> is set to indicate the error.
++      If the issued ioctl is illegal for the file descriptor used,
++      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
++    </para>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_NAME_ACQUIRE</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Illegal command flags, illegal name provided, or an activator
++            tried to acquire a second name.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EPERM</constant></term>
++          <listitem><para>
++            Policy prohibited name ownership.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EALREADY</constant></term>
++          <listitem><para>
++            Connection already owns that name.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EEXIST</constant></term>
++          <listitem><para>
++            The name already exists and can not be taken over.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>E2BIG</constant></term>
++          <listitem><para>
++            The maximum number of well-known names per connection is exhausted.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_NAME_RELEASE</constant>
++        may fail with the following errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Invalid command flags, or invalid name provided.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ESRCH</constant></term>
++          <listitem><para>
++            Name is not found in the registry.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EADDRINUSE</constant></term>
++          <listitem><para>
++            Name is owned by a different connection and can't be released.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_LIST</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Invalid command flags
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>ENOBUFS</constant></term>
++          <listitem><para>
++            No available memory in the connection's pool.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.connection</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.policy</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.pool</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++    </simplelist>
++  </refsect1>
++</refentry>
+diff --git a/Documentation/kdbus/kdbus.policy.xml b/Documentation/kdbus/kdbus.policy.xml
+new file mode 100644
+index 0000000..6732416
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.policy.xml
+@@ -0,0 +1,406 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus.policy">
++
++  <refentryinfo>
++    <title>kdbus.policy</title>
++    <productname>kdbus.policy</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus.policy</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus.policy</refname>
++    <refpurpose>kdbus policy</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>Description</title>
++
++    <para>
++      A kdbus policy restricts the possibilities of connections to own, see and
++      talk to well-known names. A policy can be associated with a bus (through a
++      policy holder connection) or a custom endpoint. kdbus stores its policy
++      information in a database that can be accessed through the following
++      ioctl commands:
++    </para>
++
++    <variablelist>
++      <varlistentry>
++        <term><constant>KDBUS_CMD_HELLO</constant></term>
++        <listitem><para>
++          When creating, or updating, a policy holder connection. See
++          <citerefentry>
++            <refentrytitle>kdbus.connection</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><constant>KDBUS_CMD_ENDPOINT_MAKE</constant></term>
++        <term><constant>KDBUS_CMD_ENDPOINT_UPDATE</constant></term>
++        <listitem><para>
++          When creating, or updating, a bus custom endpoint. See
++          <citerefentry>
++            <refentrytitle>kdbus.endpoint</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>.
++        </para></listitem>
++      </varlistentry>
++    </variablelist>
++
++    <para>
++      In all cases, the name and policy access information is stored in items
++      of type <constant>KDBUS_ITEM_NAME</constant> and
++      <constant>KDBUS_ITEM_POLICY_ACCESS</constant>. For this transport, the
++      following rules apply.
++    </para>
++
++    <itemizedlist>
++      <listitem>
++        <para>
++          An item of type <constant>KDBUS_ITEM_NAME</constant> must be followed
++          by at least one <constant>KDBUS_ITEM_POLICY_ACCESS</constant> item.
++        </para>
++      </listitem>
++
++      <listitem>
++        <para>
++          An item of type <constant>KDBUS_ITEM_NAME</constant> can be followed
++          by an arbitrary number of
++          <constant>KDBUS_ITEM_POLICY_ACCESS</constant> items.
++        </para>
++      </listitem>
++
++      <listitem>
++        <para>
++          An arbitrary number of groups of names and access levels can be given.
++        </para>
++      </listitem>
++    </itemizedlist>
++
++    <para>
++      Names passed in items of type <constant>KDBUS_ITEM_NAME</constant> must
++      comply to the rules of valid kdbus.name. See
++      <citerefentry>
++        <refentrytitle>kdbus.name</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more information.
++
++      The payload of an item of type
++      <constant>KDBUS_ITEM_POLICY_ACCESS</constant> is defined by the following
++      struct. For more information on the layout of items, please refer to
++      <citerefentry>
++        <refentrytitle>kdbus.item</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>.
++    </para>
++
++    <programlisting>
++struct kdbus_policy_access {
++  __u64 type;
++  __u64 access;
++  __u64 id;
++};
++    </programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>type</varname></term>
++        <listitem>
++          <para>
++            One of the following.
++          </para>
++
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_POLICY_ACCESS_USER</constant></term>
++              <listitem><para>
++                Grant access to a user with the UID stored in the
++                <varname>id</varname> field.
++              </para></listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_POLICY_ACCESS_GROUP</constant></term>
++              <listitem><para>
++                Grant access to a user with the GID stored in the
++                <varname>id</varname> field.
++              </para></listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_POLICY_ACCESS_WORLD</constant></term>
++              <listitem><para>
++                Grant access to everyone. The <varname>id</varname> field
++                is ignored.
++              </para></listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>access</varname></term>
++        <listitem>
++          <para>
++            The access to grant. One of the following.
++          </para>
++
++          <variablelist>
++            <varlistentry>
++              <term><constant>KDBUS_POLICY_SEE</constant></term>
++              <listitem><para>
++                Allow the name to be seen.
++              </para></listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_POLICY_TALK</constant></term>
++              <listitem><para>
++                Allow the name to be talked to.
++              </para></listitem>
++            </varlistentry>
++
++            <varlistentry>
++              <term><constant>KDBUS_POLICY_OWN</constant></term>
++              <listitem><para>
++                Allow the name to be owned.
++              </para></listitem>
++            </varlistentry>
++          </variablelist>
++        </listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>id</varname></term>
++        <listitem><para>
++           For <constant>KDBUS_POLICY_ACCESS_USER</constant>, stores the UID.
++           For <constant>KDBUS_POLICY_ACCESS_GROUP</constant>, stores the GID.
++        </para></listitem>
++      </varlistentry>
++
++    </variablelist>
++
++    <para>
++      All endpoints of buses have an empty policy database by default.
++      Therefore, unless policy rules are added, all operations will also be
++      denied by default. Also see
++      <citerefentry>
++        <refentrytitle>kdbus.endpoint</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Wildcard names</title>
++    <para>
++      Policy holder connections may upload names that contain the wildcard
++      suffix (<literal>".*"</literal>). Such a policy entry is effective for
++      every well-known name that extends the provided name by exactly one more
++      level.
++
++      For example, the name <literal>foo.bar.*</literal> matches both
++      <literal>"foo.bar.baz"</literal> and
++      <literal>"foo.bar.bazbaz"</literal> are, but not
++      <literal>"foo.bar.baz.baz"</literal>.
++
++      This allows connections to take control over multiple names that the
++      policy holder doesn't need to know about when uploading the policy.
++
++      Such wildcard entries are not allowed for custom endpoints.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Privileged connections</title>
++    <para>
++      The policy database is overruled when action is taken by a privileged
++      connection. Please refer to
++      <citerefentry>
++        <refentrytitle>kdbus.connection</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more information on what makes a connection privileged.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Examples</title>
++    <para>
++      For instance, a set of policy rules may look like this:
++    </para>
++
++    <programlisting>
++KDBUS_ITEM_NAME: str='org.foo.bar'
++KDBUS_ITEM_POLICY_ACCESS: type=USER, access=OWN, ID=1000
++KDBUS_ITEM_POLICY_ACCESS: type=USER, access=TALK, ID=1001
++KDBUS_ITEM_POLICY_ACCESS: type=WORLD, access=SEE
++
++KDBUS_ITEM_NAME: str='org.blah.baz'
++KDBUS_ITEM_POLICY_ACCESS: type=USER, access=OWN, ID=0
++KDBUS_ITEM_POLICY_ACCESS: type=WORLD, access=TALK
++    </programlisting>
++
++    <para>
++      That means that 'org.foo.bar' may only be owned by UID 1000, but every
++      user on the bus is allowed to see the name. However, only UID 1001 may
++      actually send a message to the connection and receive a reply from it.
++
++      The second rule allows 'org.blah.baz' to be owned by UID 0 only, but
++      every user may talk to it.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>TALK access and multiple well-known names per connection</title>
++    <para>
++      Note that TALK access is checked against all names of a connection. For
++      example, if a connection owns both <constant>'org.foo.bar'</constant> and
++      <constant>'org.blah.baz'</constant>, and the policy database allows
++      <constant>'org.blah.baz'</constant> to be talked to by WORLD, then this
++      permission is also granted to <constant>'org.foo.bar'</constant>. That
++      might sound illogical, but after all, we allow messages to be directed to
++      either the ID or a well-known name, and policy is applied to the
++      connection, not the name. In other words, the effective TALK policy for a
++      connection is the most permissive of all names the connection owns.
++
++      For broadcast messages, the receiver needs TALK permissions to the sender
++      to receive the broadcast.
++    </para>
++    <para>
++      Both the endpoint and the bus policy databases are consulted to allow
++      name registry listing, owning a well-known name and message delivery.
++      If either one fails, the operation is failed with
++      <varname>errno</varname> set to <constant>EPERM</constant>.
++
++      For best practices, connections that own names with a restricted TALK
++      access should not install matches. This avoids cases where the sent
++      message may pass the bloom filter due to false-positives and may also
++      satisfy the policy rules.
++
++      Also see
++      <citerefentry>
++        <refentrytitle>kdbus.match</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Implicit policies</title>
++    <para>
++      Depending on the type of the endpoint, a set of implicit rules that
++      override installed policies might be enforced.
++
++      On default endpoints, the following set is enforced and checked before
++      any user-supplied policy is checked.
++    </para>
++
++    <itemizedlist>
++      <listitem>
++        <para>
++          Privileged connections always override any installed policy. Those
++          connections could easily install their own policies, so there is no
++          reason to enforce installed policies.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          Connections can always talk to connections of the same user. This
++          includes broadcast messages.
++        </para>
++      </listitem>
++    </itemizedlist>
++
++    <para>
++      Custom endpoints have stricter policies. The following rules apply:
++    </para>
++
++    <itemizedlist>
++      <listitem>
++        <para>
++          Policy rules are always enforced, even if the connection is a
++          privileged connection.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          Policy rules are always enforced for <constant>TALK</constant> access,
++          even if both ends are running under the same user. This includes
++          broadcast messages.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          To restrict the set of names that can be seen, endpoint policies can
++          install <constant>SEE</constant> policies.
++        </para>
++      </listitem>
++    </itemizedlist>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.endpoint</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.fs</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.message</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.pool</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++    </simplelist>
++  </refsect1>
++</refentry>
+diff --git a/Documentation/kdbus/kdbus.pool.xml b/Documentation/kdbus/kdbus.pool.xml
+new file mode 100644
+index 0000000..a9e16f1
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.pool.xml
+@@ -0,0 +1,326 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus.pool">
++
++  <refentryinfo>
++    <title>kdbus.pool</title>
++    <productname>kdbus.pool</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus.pool</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus.pool</refname>
++    <refpurpose>kdbus pool</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>Description</title>
++    <para>
++      A pool for data received from the kernel is installed for every
++      <emphasis>connection</emphasis> of the <emphasis>bus</emphasis>, and
++      is sized according to the information stored in the
++      <varname>pool_size</varname> member of <type>struct kdbus_cmd_hello</type>
++      when <constant>KDBUS_CMD_HELLO</constant> is employed. Internally, the
++      pool is segmented into <emphasis>slices</emphasis>, each referenced by its
++      <emphasis>offset</emphasis> in the pool, expressed in <type>bytes</type>.
++      See
++      <citerefentry>
++        <refentrytitle>kdbus.connection</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more information about <constant>KDBUS_CMD_HELLO</constant>.
++    </para>
++
++    <para>
++      The pool is written to by the kernel when one of the following
++      <emphasis>ioctls</emphasis> is issued:
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>KDBUS_CMD_HELLO</constant></term>
++          <listitem><para>
++            ... to receive details about the bus the connection was made to
++          </para></listitem>
++        </varlistentry>
++        <varlistentry>
++          <term><constant>KDBUS_CMD_RECV</constant></term>
++          <listitem><para>
++            ... to receive a message
++          </para></listitem>
++        </varlistentry>
++        <varlistentry>
++          <term><constant>KDBUS_CMD_LIST</constant></term>
++          <listitem><para>
++            ... to dump the name registry
++          </para></listitem>
++        </varlistentry>
++        <varlistentry>
++          <term><constant>KDBUS_CMD_CONN_INFO</constant></term>
++          <listitem><para>
++            ... to retrieve information on a connection
++          </para></listitem>
++        </varlistentry>
++        <varlistentry>
++          <term><constant>KDBUS_CMD_BUS_CREATOR_INFO</constant></term>
++          <listitem><para>
++            ... to retrieve information about a connection's bus creator
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++
++    </para>
++    <para>
++      The <varname>offset</varname> fields returned by either one of the
++      aforementioned ioctls describe offsets inside the pool. In order to make
++      the slice available for subsequent calls,
++      <constant>KDBUS_CMD_FREE</constant> has to be called on that offset
++      (see below). Otherwise, the pool will fill up, and the connection won't
++      be able to receive any more information through its pool.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Pool slice allocation</title>
++    <para>
++      Pool slices are allocated by the kernel in order to report information
++      back to a task, such as messages, returned name list etc.
++      Allocation of pool slices cannot be initiated by userspace. See
++      <citerefentry>
++        <refentrytitle>kdbus.connection</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      and
++      <citerefentry>
++        <refentrytitle>kdbus.name</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for examples of commands that use the <emphasis>pool</emphasis> to
++      return data.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Accessing the pool memory</title>
++    <para>
++      Memory in the pool is read-only for userspace and may only be written
++      to by the kernel. To read from the pool memory, the caller is expected to
++      <citerefentry>
++        <refentrytitle>mmap</refentrytitle>
++        <manvolnum>2</manvolnum>
++      </citerefentry>
++      the buffer into its task, like this:
++    </para>
++    <programlisting>
++uint8_t *buf = mmap(NULL, size, PROT_READ, MAP_SHARED, conn_fd, 0);
++    </programlisting>
++
++    <para>
++      In order to map the entire pool, the <varname>size</varname> parameter in
++      the example above should be set to the value of the
++      <varname>pool_size</varname> member of
++      <type>struct kdbus_cmd_hello</type> when
++      <constant>KDBUS_CMD_HELLO</constant> was employed to create the
++      connection (see above).
++    </para>
++
++    <para>
++      The <emphasis>file descriptor</emphasis> used to map the memory must be
++      the one that was used to create the <emphasis>connection</emphasis>.
++      In other words, the one that was used to call
++      <constant>KDBUS_CMD_HELLO</constant>. See
++      <citerefentry>
++        <refentrytitle>kdbus.connection</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>
++      for more details.
++    </para>
++
++    <para>
++      Alternatively, instead of mapping the entire pool buffer, only parts
++      of it can be mapped. Every kdbus command that returns an
++      <emphasis>offset</emphasis> (see above) also reports a
++      <emphasis>size</emphasis> along with it, so programs can be written
++      in a way that it only maps portions of the pool to access a specific
++      <emphasis>slice</emphasis>.
++    </para>
++
++    <para>
++      When access to the pool memory is no longer needed, programs should
++      call <function>munmap()</function> on the pointer returned by
++      <function>mmap()</function>.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Freeing pool slices</title>
++    <para>
++      The <constant>KDBUS_CMD_FREE</constant> ioctl is used to free a slice
++      inside the pool, describing an offset that was returned in an
++      <varname>offset</varname> field of another ioctl struct.
++      The <constant>KDBUS_CMD_FREE</constant> command takes a
++      <type>struct kdbus_cmd_free</type> as argument.
++    </para>
++
++<programlisting>
++struct kdbus_cmd_free {
++  __u64 size;
++  __u64 flags;
++  __u64 return_flags;
++  __u64 offset;
++  struct kdbus_item items[0];
++};
++</programlisting>
++
++    <para>The fields in this struct are described below.</para>
++
++    <variablelist>
++      <varlistentry>
++        <term><varname>size</varname></term>
++        <listitem><para>
++          The overall size of the struct, including its items.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>flags</varname></term>
++        <listitem><para>
++          Currently unused.
++          <constant>KDBUS_FLAG_NEGOTIATE</constant> is accepted to probe for
++          valid flags. If set, the ioctl will return <errorcode>0</errorcode>,
++          and the <varname>flags</varname> field is set to
++          <constant>0</constant>.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>return_flags</varname></term>
++        <listitem><para>
++          Flags returned by the kernel. Currently unused and always set to
++          <constant>0</constant> by the kernel.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>offset</varname></term>
++        <listitem><para>
++          The offset to free, as returned by other ioctls that allocated
++          memory for returned information.
++        </para></listitem>
++      </varlistentry>
++
++      <varlistentry>
++        <term><varname>items</varname></term>
++        <listitem><para>
++          Items to specify further details for the receive command.
++          Currently unused.
++          Unrecognized items are rejected, and the ioctl will fail with
++          <varname>errno</varname> set to <constant>EINVAL</constant>.
++          All items except for
++          <constant>KDBUS_ITEM_NEGOTIATE</constant> (see
++            <citerefentry>
++              <refentrytitle>kdbus.item</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>
++          ) will be rejected.
++        </para></listitem>
++      </varlistentry>
++    </variablelist>
++  </refsect1>
++
++  <refsect1>
++    <title>Return value</title>
++    <para>
++      On success, all mentioned ioctl commands return <errorcode>0</errorcode>;
++      on error, <errorcode>-1</errorcode> is returned, and
++      <varname>errno</varname> is set to indicate the error.
++      If the issued ioctl is illegal for the file descriptor used,
++      <varname>errno</varname> will be set to <constant>ENOTTY</constant>.
++    </para>
++
++    <refsect2>
++      <title>
++        <constant>KDBUS_CMD_FREE</constant> may fail with the following
++        errors
++      </title>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>ENXIO</constant></term>
++          <listitem><para>
++            No pool slice found at given offset.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            Invalid flags provided.
++          </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>EINVAL</constant></term>
++          <listitem><para>
++            The offset is valid, but the user is not allowed to free the slice.
++            This happens, for example, if the offset was retrieved with
++            <constant>KDBUS_RECV_PEEK</constant>.
++          </para></listitem>
++        </varlistentry>
++      </variablelist>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.connection</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.endpoint</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>mmap</refentrytitle>
++            <manvolnum>2</manvolnum>
++          </citerefentry>
++        </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>munmap</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++    </simplelist>
++  </refsect1>
++</refentry>
+diff --git a/Documentation/kdbus/kdbus.xml b/Documentation/kdbus/kdbus.xml
+new file mode 100644
+index 0000000..d8e7400
+--- /dev/null
++++ b/Documentation/kdbus/kdbus.xml
+@@ -0,0 +1,1012 @@
++<?xml version='1.0'?> <!--*-nxml-*-->
++<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++        "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
++
++<refentry id="kdbus">
++
++  <refentryinfo>
++    <title>kdbus</title>
++    <productname>kdbus</productname>
++  </refentryinfo>
++
++  <refmeta>
++    <refentrytitle>kdbus</refentrytitle>
++    <manvolnum>7</manvolnum>
++  </refmeta>
++
++  <refnamediv>
++    <refname>kdbus</refname>
++    <refpurpose>Kernel Message Bus</refpurpose>
++  </refnamediv>
++
++  <refsect1>
++    <title>Synopsis</title>
++    <para>
++      kdbus is an inter-process communication bus system controlled by the
++      kernel. It provides user-space with an API to create buses and send
++      unicast and multicast messages to one, or many, peers connected to the
++      same bus. It does not enforce any layout on the transmitted data, but
++      only provides the transport layer used for message interchange between
++      peers.
++    </para>
++    <para>
++      This set of man-pages gives a comprehensive overview of the kernel-level
++      API, with all ioctl commands, associated structs and bit masks. However,
++      most people will not use this API level directly, but rather let one of
++      the high-level abstraction libraries help them integrate D-Bus
++      functionality into their applications.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>Description</title>
++    <para>
++      kdbus provides a pseudo filesystem called <emphasis>kdbusfs</emphasis>,
++      which is usually mounted on <filename>/sys/fs/kdbus</filename>. Bus
++      primitives can be accessed as files and sub-directories underneath this
++      mount-point. Any advanced operations are done via
++      <function>ioctl()</function> on files created by
++      <emphasis>kdbusfs</emphasis>. Multiple mount-points of
++      <emphasis>kdbusfs</emphasis> are independent of each other. This allows
++      namespacing of kdbus by mounting a new instance of
++      <emphasis>kdbusfs</emphasis> in a new mount-namespace. kdbus calls these
++      mount instances domains and each bus belongs to exactly one domain.
++    </para>
++
++    <para>
++      kdbus was designed as a transport layer for D-Bus, but is in no way
++      limited, nor controlled by the D-Bus protocol specification. The D-Bus
++      protocol is one possible application layer on top of kdbus.
++    </para>
++
++    <para>
++      For the general D-Bus protocol specification, its payload format, its
++      marshaling, and its communication semantics, please refer to the
++      <ulink url="http://dbus.freedesktop.org/doc/dbus-specification.html">
++      D-Bus specification</ulink>.
++    </para>
++
++  </refsect1>
++
++  <refsect1>
++    <title>Terminology</title>
++
++    <refsect2>
++      <title>Domain</title>
++      <para>
++        A domain is a <emphasis>kdbusfs</emphasis> mount-point containing all
++        the bus primitives. Each domain is independent, and separate domains
++        do not affect each other.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Bus</title>
++      <para>
++        A bus is a named object inside a domain. Clients exchange messages
++        over a bus. Multiple buses themselves have no connection to each other;
++        messages can only be exchanged on the same bus. The default endpoint of
++        a bus, to which clients establish connections, is the "bus" file
++        /sys/fs/kdbus/&lt;bus name&gt;/bus.
++        Common operating system setups create one "system bus" per system,
++        and one "user bus" for every logged-in user. Applications or services
++        may create their own private buses. The kernel driver does not
++        distinguish between different bus types, they are all handled the same
++        way. See
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more details.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Endpoint</title>
++      <para>
++        An endpoint provides a file to talk to a bus. Opening an endpoint
++        creates a new connection to the bus to which the endpoint belongs. All
++        endpoints have unique names and are accessible as files underneath the
++        directory of a bus, e.g., /sys/fs/kdbus/&lt;bus&gt;/&lt;endpoint&gt;
++        Every bus has a default endpoint called "bus".
++        A bus can optionally offer additional endpoints with custom names
++        to provide restricted access to the bus. Custom endpoints carry
++        additional policy which can be used to create sandboxes with
++        locked-down, limited, filtered access to a bus. See
++        <citerefentry>
++          <refentrytitle>kdbus.endpoint</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more details.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Connection</title>
++      <para>
++        A connection to a bus is created by opening an endpoint file of a
++        bus. Every ordinary client connection has a unique identifier on the
++        bus and can address messages to every other connection on the same
++        bus by using the peer's connection ID as the destination. See
++        <citerefentry>
++          <refentrytitle>kdbus.connection</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more details.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Pool</title>
++      <para>
++        Each connection allocates a piece of shmem-backed memory that is
++        used to receive messages and answers to ioctl commands from the kernel.
++        It is never used to send anything to the kernel. In order to access that
++        memory, an application must mmap() it into its address space. See
++        <citerefentry>
++          <refentrytitle>kdbus.pool</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more details.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Well-known Name</title>
++      <para>
++        A connection can, in addition to its implicit unique connection ID,
++        request the ownership of a textual well-known name. Well-known names are
++        noted in reverse-domain notation, such as com.example.service1. A
++        connection that offers a service on a bus is usually reached by its
++        well-known name. An analogy of connection ID and well-known name is an
++        IP address and a DNS name associated with that address. See
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more details.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Message</title>
++      <para>
++        Connections can exchange messages with other connections by addressing
++        the peers with their connection ID or well-known name. A message
++        consists of a message header with information on how to route the
++        message, and the message payload, which is a logical byte stream of
++        arbitrary size. Messages can carry additional file descriptors to be
++        passed from one connection to another, just like passing file
++        descriptors over UNIX domain sockets. Every connection can specify which
++        set of metadata the kernel should attach to the message when it is
++        delivered to the receiving connection. Metadata contains information
++        like: system time stamps, UID, GID, TID, proc-starttime, well-known
++        names, process comm, process exe, process argv, cgroup, capabilities,
++        seclabel, audit session, loginuid and the connection's human-readable
++        name. See
++        <citerefentry>
++          <refentrytitle>kdbus.message</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more details.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Item</title>
++      <para>
++        The API of kdbus implements the notion of items, submitted through and
++        returned by most ioctls, and stored inside data structures in the
++        connection's pool. See
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more details.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Broadcast, signal, filter, match</title>
++      <para>
++        Signals are messages that a receiver opts in for by installing a blob of
++        bytes, called a 'match'. Signal messages must always carry a
++        counter-part blob, called a 'filter', and signals are only delivered to
++        peers which have a match that white-lists the message's filter. Senders
++        of signal messages can use either a single connection ID as receiver,
++        or the special connection ID
++        <constant>KDBUS_DST_ID_BROADCAST</constant> to potentially send it to
++        all connections of a bus, following the logic described above. See
++        <citerefentry>
++          <refentrytitle>kdbus.match</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        and
++        <citerefentry>
++          <refentrytitle>kdbus.message</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more details.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Policy</title>
++      <para>
++        A policy is a set of rules that define which connections can see, talk
++        to, or register a well-known name on the bus. A policy is attached to
++        buses and custom endpoints, and modified by policy holder connections or
++        owners of custom endpoints. See
++        <citerefentry>
++          <refentrytitle>kdbus.policy</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more details.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Privileged bus users</title>
++      <para>
++        A user connecting to the bus is considered privileged if it is either
++        the creator of the bus, or if it has the CAP_IPC_OWNER capability flag
++        set. See
++        <citerefentry>
++          <refentrytitle>kdbus.connection</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for more details.
++      </para>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>Bus Layout</title>
++
++    <para>
++      A <emphasis>bus</emphasis> provides and defines an environment that peers
++      can connect to for message interchange. A bus is created via the kdbus
++      control interface and can be modified by the bus creator. It applies the
++      policy that control all bus operations. The bus creator itself does not
++      participate as a peer. To establish a peer
++      <emphasis>connection</emphasis>, you have to open one of the
++      <emphasis>endpoints</emphasis> of a bus. Each bus provides a default
++      endpoint, but further endpoints can be created on-demand. Endpoints are
++      used to apply additional policies for all connections on this endpoint.
++      Thus, they provide additional filters to further restrict access of
++      specific connections to the bus.
++    </para>
++
++    <para>
++      Following, you can see an example bus layout:
++    </para>
++
++    <programlisting><![CDATA[
++                                  Bus Creator
++                                       |
++                                       |
++                                    +-----+
++                                    | Bus |
++                                    +-----+
++                                       |
++                    __________________/ \__________________
++                   /                                       \
++                   |                                       |
++             +----------+                             +----------+
++             | Endpoint |                             | Endpoint |
++             +----------+                             +----------+
++         _________/|\_________                   _________/|\_________
++        /          |          \                 /          |          \
++        |          |          |                 |          |          |
++        |          |          |                 |          |          |
++   Connection  Connection  Connection      Connection  Connection  Connection
++    ]]></programlisting>
++
++  </refsect1>
++
++  <refsect1>
++    <title>Data structures and interconnections</title>
++    <programlisting><![CDATA[
++  +--------------------------------------------------------------------------+
++  | Domain (Mount Point)                                                     |
++  | /sys/fs/kdbus/control                                                    |
++  | +----------------------------------------------------------------------+ |
++  | | Bus (System Bus)                                                     | |
++  | | /sys/fs/kdbus/0-system/                                              | |
++  | | +-------------------------------+ +--------------------------------+ | |
++  | | | Endpoint                      | | Endpoint                       | | |
++  | | | /sys/fs/kdbus/0-system/bus    | | /sys/fs/kdbus/0-system/ep.app  | | |
++  | | +-------------------------------+ +--------------------------------+ | |
++  | | +--------------+ +--------------+ +--------------+ +---------------+ | |
++  | | | Connection   | | Connection   | | Connection   | | Connection    | | |
++  | | | :1.22        | | :1.25        | | :1.55        | | :1.81         | | |
++  | | +--------------+ +--------------+ +--------------+ +---------------+ | |
++  | +----------------------------------------------------------------------+ |
++  |                                                                          |
++  | +----------------------------------------------------------------------+ |
++  | | Bus (User Bus for UID 2702)                                          | |
++  | | /sys/fs/kdbus/2702-user/                                             | |
++  | | +-------------------------------+ +--------------------------------+ | |
++  | | | Endpoint                      | | Endpoint                       | | |
++  | | | /sys/fs/kdbus/2702-user/bus   | | /sys/fs/kdbus/2702-user/ep.app | | |
++  | | +-------------------------------+ +--------------------------------+ | |
++  | | +--------------+ +--------------+ +--------------+ +---------------+ | |
++  | | | Connection   | | Connection   | | Connection   | | Connection    | | |
++  | | | :1.22        | | :1.25        | | :1.55        | | :1.81         | | |
++  | | +--------------+ +--------------+ +--------------------------------+ | |
++  | +----------------------------------------------------------------------+ |
++  +--------------------------------------------------------------------------+
++    ]]></programlisting>
++  </refsect1>
++
++  <refsect1>
++    <title>Metadata</title>
++
++    <refsect2>
++      <title>When metadata is collected</title>
++      <para>
++        kdbus records data about the system in certain situations. Such metadata
++        can refer to the currently active process (creds, PIDs, current user
++        groups, process names and its executable path, cgroup membership,
++        capabilities, security label and audit information), connection
++        information (description string, currently owned names) and time stamps.
++      </para>
++      <para>
++        Metadata is collected at the following times.
++      </para>
++
++      <itemizedlist>
++        <listitem><para>
++          When a bus is created (<constant>KDBUS_CMD_MAKE</constant>),
++          information about the calling task is collected. This data is returned
++          by the kernel via the <constant>KDBUS_CMD_BUS_CREATOR_INFO</constant>
++          call.
++        </para></listitem>
++
++        <listitem>
++          <para>
++            When a connection is created (<constant>KDBUS_CMD_HELLO</constant>),
++            information about the calling task is collected. Alternatively, a
++            privileged connection may provide 'faked' information about
++            credentials, PIDs and security labels which will be stored instead.
++            This data is returned by the kernel as information on a connection
++            (<constant>KDBUS_CMD_CONN_INFO</constant>). Only metadata that a
++            connection allowed to be sent (by setting its bit in
++            <varname>attach_flags_send</varname>) will be exported in this way.
++          </para>
++        </listitem>
++
++        <listitem>
++          <para>
++            When a message is sent (<constant>KDBUS_CMD_SEND</constant>),
++            information about the sending task and the sending connection is
++            collected. This metadata will be attached to the message when it
++            arrives in the receiver's pool. If the connection sending the
++            message installed faked credentials (see
++            <citerefentry>
++              <refentrytitle>kdbus.connection</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>),
++            the message will not be augmented by any information about the
++            currently sending task. Note that only metadata that was requested
++            by the receiving connection will be collected and attached to
++            messages.
++          </para>
++        </listitem>
++      </itemizedlist>
++
++      <para>
++        Which metadata items are actually delivered depends on the following
++        sets and masks:
++      </para>
++
++      <itemizedlist>
++        <listitem><para>
++          (a) the system-wide kmod creds mask
++          (module parameter <varname>attach_flags_mask</varname>)
++        </para></listitem>
++
++        <listitem><para>
++          (b) the per-connection send creds mask, set by the connecting client
++        </para></listitem>
++
++        <listitem><para>
++          (c) the per-connection receive creds mask, set by the connecting
++          client
++        </para></listitem>
++
++        <listitem><para>
++          (d) the per-bus minimal creds mask, set by the bus creator
++        </para></listitem>
++
++        <listitem><para>
++          (e) the per-bus owner creds mask, set by the bus creator
++        </para></listitem>
++
++        <listitem><para>
++          (f) the mask specified when querying creds of a bus peer
++        </para></listitem>
++
++        <listitem><para>
++          (g) the mask specified when querying creds of a bus owner
++        </para></listitem>
++      </itemizedlist>
++
++      <para>
++        With the following rules:
++      </para>
++
++      <itemizedlist>
++        <listitem>
++          <para>
++            [1] The creds attached to messages are determined as
++            <constant>a &amp; b &amp; c</constant>.
++          </para>
++        </listitem>
++
++        <listitem>
++          <para>
++            [2] When connecting to a bus (<constant>KDBUS_CMD_HELLO</constant>),
++            and <constant>~b &amp; d != 0</constant>, the call will fail with,
++            <errorcode>-1</errorcode>, and <varname>errno</varname> is set to
++            <constant>ECONNREFUSED</constant>.
++          </para>
++        </listitem>
++
++        <listitem>
++          <para>
++            [3] When querying creds of a bus peer, the creds returned are
++            <constant>a &amp; b &amp; f</constant>.
++          </para>
++        </listitem>
++
++        <listitem>
++          <para>
++            [4] When querying creds of a bus owner, the creds returned are
++            <constant>a &amp; e &amp; g</constant>.
++          </para>
++        </listitem>
++      </itemizedlist>
++
++      <para>
++        Hence, programs might not always get all requested metadata items that
++        it requested. Code must be written so that it can cope with this fact.
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Benefits and heads-up</title>
++      <para>
++        Attaching metadata to messages has two major benefits.
++
++        <itemizedlist>
++          <listitem>
++            <para>
++              Metadata attached to messages is gathered at the moment when the
++              other side calls <constant>KDBUS_CMD_SEND</constant>, or,
++              respectively, then the kernel notification is generated. There is
++              no need for the receiving peer to retrieve information about the
++              task in a second step. This closes a race gap that would otherwise
++              be inherent.
++            </para>
++          </listitem>
++          <listitem>
++            <para>
++              As metadata is delivered along with messages in the same data
++              blob, no extra calls to kernel functions etc. are needed to gather
++              them.
++            </para>
++          </listitem>
++        </itemizedlist>
++
++        Note, however, that collecting metadata does come at a price for
++        performance, so developers should carefully assess which metadata to
++        really opt-in for. For best practice, data that is not needed as part
++        of a message should not be requested by the connection in the first
++        place (see <varname>attach_flags_recv</varname> in
++        <constant>KDBUS_CMD_HELLO</constant>).
++      </para>
++    </refsect2>
++
++    <refsect2>
++      <title>Attach flags for metadata items</title>
++      <para>
++        To let the kernel know which metadata information to attach as items
++        to the aforementioned commands, it uses a bitmask. In those, the
++        following <emphasis>attach flags</emphasis> are currently supported.
++        Both the <varname>attach_flags_recv</varname> and
++        <varname>attach_flags_send</varname> fields of
++        <type>struct kdbus_cmd_hello</type>, as well as the payload of the
++        <constant>KDBUS_ITEM_ATTACH_FLAGS_SEND</constant> and
++        <constant>KDBUS_ITEM_ATTACH_FLAGS_RECV</constant> items follow this
++        scheme.
++      </para>
++
++      <variablelist>
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_TIMESTAMP</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_TIMESTAMP</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_CREDS</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_CREDS</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_PIDS</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_PIDS</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_AUXGROUPS</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_AUXGROUPS</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_NAMES</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_OWNED_NAME</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_TID_COMM</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_TID_COMM</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_PID_COMM</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_PID_COMM</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_EXE</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_EXE</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_CMDLINE</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_CMDLINE</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_CGROUP</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_CGROUP</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_CAPS</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_CAPS</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_SECLABEL</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_SECLABEL</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_AUDIT</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_AUDIT</constant>.
++            </para></listitem>
++        </varlistentry>
++
++        <varlistentry>
++          <term><constant>KDBUS_ATTACH_CONN_DESCRIPTION</constant></term>
++            <listitem><para>
++              Requests the attachment of an item of type
++              <constant>KDBUS_ITEM_CONN_DESCRIPTION</constant>.
++            </para></listitem>
++        </varlistentry>
++      </variablelist>
++
++      <para>
++        Please refer to
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++        for detailed information about the layout and payload of items and
++        what metadata should be used to.
++      </para>
++    </refsect2>
++  </refsect1>
++
++  <refsect1>
++    <title>The ioctl interface</title>
++
++    <para>
++      As stated in the 'synopsis' section above, application developers are
++      strongly encouraged to use kdbus through one of the high-level D-Bus
++      abstraction libraries, rather than using the low-level API directly.
++    </para>
++
++    <para>
++      kdbus on the kernel level exposes its functions exclusively through
++      <citerefentry>
++        <refentrytitle>ioctl</refentrytitle>
++        <manvolnum>2</manvolnum>
++      </citerefentry>,
++      employed on file descriptors returned by
++      <citerefentry>
++        <refentrytitle>open</refentrytitle>
++        <manvolnum>2</manvolnum>
++      </citerefentry>
++      on pseudo files exposed by
++      <citerefentry>
++        <refentrytitle>kdbus.fs</refentrytitle>
++        <manvolnum>7</manvolnum>
++      </citerefentry>.
++    </para>
++    <para>
++      Following is a list of all the ioctls, along with the command structs
++      they must be used with.
++    </para>
++
++    <informaltable frame="none">
++      <tgroup cols="3" colsep="1">
++        <thead>
++          <row>
++            <entry>ioctl signature</entry>
++            <entry>command</entry>
++            <entry>transported struct</entry>
++          </row>
++        </thead>
++        <tbody>
++          <row>
++            <entry><constant>0x40189500</constant></entry>
++            <entry><constant>KDBUS_CMD_BUS_MAKE</constant></entry>
++            <entry><type>struct kdbus_cmd *</type></entry>
++          </row><row>
++            <entry><constant>0x40189510</constant></entry>
++            <entry><constant>KDBUS_CMD_ENDPOINT_MAKE</constant></entry>
++            <entry><type>struct kdbus_cmd *</type></entry>
++          </row><row>
++            <entry><constant>0xc0609580</constant></entry>
++            <entry><constant>KDBUS_CMD_HELLO</constant></entry>
++            <entry><type>struct kdbus_cmd_hello *</type></entry>
++          </row><row>
++            <entry><constant>0x40189582</constant></entry>
++            <entry><constant>KDBUS_CMD_BYEBYE</constant></entry>
++            <entry><type>struct kdbus_cmd *</type></entry>
++          </row><row>
++            <entry><constant>0x40389590</constant></entry>
++            <entry><constant>KDBUS_CMD_SEND</constant></entry>
++            <entry><type>struct kdbus_cmd_send *</type></entry>
++          </row><row>
++            <entry><constant>0x80409591</constant></entry>
++            <entry><constant>KDBUS_CMD_RECV</constant></entry>
++            <entry><type>struct kdbus_cmd_recv *</type></entry>
++          </row><row>
++            <entry><constant>0x40209583</constant></entry>
++            <entry><constant>KDBUS_CMD_FREE</constant></entry>
++            <entry><type>struct kdbus_cmd_free *</type></entry>
++          </row><row>
++            <entry><constant>0x401895a0</constant></entry>
++            <entry><constant>KDBUS_CMD_NAME_ACQUIRE</constant></entry>
++            <entry><type>struct kdbus_cmd *</type></entry>
++          </row><row>
++            <entry><constant>0x401895a1</constant></entry>
++            <entry><constant>KDBUS_CMD_NAME_RELEASE</constant></entry>
++            <entry><type>struct kdbus_cmd *</type></entry>
++          </row><row>
++            <entry><constant>0x80289586</constant></entry>
++            <entry><constant>KDBUS_CMD_LIST</constant></entry>
++            <entry><type>struct kdbus_cmd_list *</type></entry>
++          </row><row>
++            <entry><constant>0x80309584</constant></entry>
++            <entry><constant>KDBUS_CMD_CONN_INFO</constant></entry>
++            <entry><type>struct kdbus_cmd_info *</type></entry>
++          </row><row>
++            <entry><constant>0x40209551</constant></entry>
++            <entry><constant>KDBUS_CMD_UPDATE</constant></entry>
++            <entry><type>struct kdbus_cmd *</type></entry>
++          </row><row>
++            <entry><constant>0x80309585</constant></entry>
++            <entry><constant>KDBUS_CMD_BUS_CREATOR_INFO</constant></entry>
++            <entry><type>struct kdbus_cmd_info *</type></entry>
++          </row><row>
++            <entry><constant>0x40189511</constant></entry>
++            <entry><constant>KDBUS_CMD_ENDPOINT_UPDATE</constant></entry>
++            <entry><type>struct kdbus_cmd *</type></entry>
++          </row><row>
++            <entry><constant>0x402095b0</constant></entry>
++            <entry><constant>KDBUS_CMD_MATCH_ADD</constant></entry>
++            <entry><type>struct kdbus_cmd_match *</type></entry>
++          </row><row>
++            <entry><constant>0x402095b1</constant></entry>
++            <entry><constant>KDBUS_CMD_MATCH_REMOVE</constant></entry>
++            <entry><type>struct kdbus_cmd_match *</type></entry>
++          </row>
++        </tbody>
++      </tgroup>
++    </informaltable>
++
++    <para>
++      Depending on the type of <emphasis>kdbusfs</emphasis> node that was
++      opened and what ioctls have been executed on a file descriptor before,
++      a different sub-set of ioctl commands is allowed.
++    </para>
++
++    <itemizedlist>
++      <listitem>
++        <para>
++          On a file descriptor resulting from opening a
++          <emphasis>control node</emphasis>, only the
++          <constant>KDBUS_CMD_BUS_MAKE</constant> ioctl may be executed.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          On a file descriptor resulting from opening a
++          <emphasis>bus endpoint node</emphasis>, only the
++          <constant>KDBUS_CMD_ENDPOINT_MAKE</constant> and
++          <constant>KDBUS_CMD_HELLO</constant> ioctls may be executed.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          A file descriptor that was used to create a bus
++          (via <constant>KDBUS_CMD_BUS_MAKE</constant>) is called a
++          <emphasis>bus owner</emphasis> file descriptor. The bus will be
++          active as long as the file descriptor is kept open.
++          A bus owner file descriptor can not be used to
++          employ any further ioctls. As soon as
++          <citerefentry>
++            <refentrytitle>close</refentrytitle>
++            <manvolnum>2</manvolnum>
++          </citerefentry>
++          is called on it, the bus will be shut down, along will all associated
++          endpoints and connections. See
++          <citerefentry>
++            <refentrytitle>kdbus.bus</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>
++          for more details.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          A file descriptor that was used to create an endpoint
++          (via <constant>KDBUS_CMD_ENDPOINT_MAKE</constant>) is called an
++          <emphasis>endpoint owner</emphasis> file descriptor. The endpoint
++          will be active as long as the file descriptor is kept open.
++          An endpoint owner file descriptor can only be used
++          to update details of an endpoint through the
++          <constant>KDBUS_CMD_ENDPOINT_UPDATE</constant> ioctl. As soon as
++          <citerefentry>
++            <refentrytitle>close</refentrytitle>
++            <manvolnum>2</manvolnum>
++          </citerefentry>
++          is called on it, the endpoint will be removed from the bus, and all
++          connections that are connected to the bus through it are shut down.
++          See
++          <citerefentry>
++            <refentrytitle>kdbus.endpoint</refentrytitle>
++            <manvolnum>7</manvolnum>
++          </citerefentry>
++          for more details.
++        </para>
++      </listitem>
++      <listitem>
++        <para>
++          A file descriptor that was used to create a connection
++          (via <constant>KDBUS_CMD_HELLO</constant>) is called a
++          <emphasis>connection owner</emphasis> file descriptor. The connection
++          will be active as long as the file descriptor is kept open.
++          A connection owner file descriptor may be used to
++          issue any of the following ioctls.
++        </para>
++
++        <itemizedlist>
++          <listitem><para>
++            <constant>KDBUS_CMD_UPDATE</constant> to tweak details of the
++            connection. See
++            <citerefentry>
++              <refentrytitle>kdbus.connection</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++
++          <listitem><para>
++            <constant>KDBUS_CMD_BYEBYE</constant> to shut down a connection
++            without losing messages. See
++            <citerefentry>
++              <refentrytitle>kdbus.connection</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++
++          <listitem><para>
++            <constant>KDBUS_CMD_FREE</constant> to free a slice of memory in
++            the pool. See
++            <citerefentry>
++              <refentrytitle>kdbus.pool</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++
++          <listitem><para>
++            <constant>KDBUS_CMD_CONN_INFO</constant> to retrieve information
++            on other connections on the bus. See
++            <citerefentry>
++              <refentrytitle>kdbus.connection</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++
++          <listitem><para>
++            <constant>KDBUS_CMD_BUS_CREATOR_INFO</constant> to retrieve
++            information on the bus creator. See
++            <citerefentry>
++              <refentrytitle>kdbus.connection</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++
++          <listitem><para>
++            <constant>KDBUS_CMD_LIST</constant> to retrieve a list of
++            currently active well-known names and unique IDs on the bus. See
++            <citerefentry>
++              <refentrytitle>kdbus.name</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++
++          <listitem><para>
++            <constant>KDBUS_CMD_SEND</constant> and
++            <constant>KDBUS_CMD_RECV</constant> to send or receive a message.
++            See
++            <citerefentry>
++              <refentrytitle>kdbus.message</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++
++          <listitem><para>
++            <constant>KDBUS_CMD_NAME_ACQUIRE</constant> and
++            <constant>KDBUS_CMD_NAME_RELEASE</constant> to acquire or release
++            a well-known name on the bus. See
++            <citerefentry>
++              <refentrytitle>kdbus.name</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++
++          <listitem><para>
++            <constant>KDBUS_CMD_MATCH_ADD</constant> and
++            <constant>KDBUS_CMD_MATCH_REMOVE</constant> to add or remove
++            a match for signal messages. See
++            <citerefentry>
++              <refentrytitle>kdbus.match</refentrytitle>
++              <manvolnum>7</manvolnum>
++            </citerefentry>.
++          </para></listitem>
++        </itemizedlist>
++      </listitem>
++    </itemizedlist>
++
++    <para>
++      These ioctls, along with the structs they transport, are explained in
++      detail in the other documents linked to in the "See Also" section below.
++    </para>
++  </refsect1>
++
++  <refsect1>
++    <title>See Also</title>
++    <simplelist type="inline">
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.bus</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.connection</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.endpoint</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.fs</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.item</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.message</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.name</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>kdbus.pool</refentrytitle>
++          <manvolnum>7</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>ioctl</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>mmap</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>open</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <citerefentry>
++          <refentrytitle>close</refentrytitle>
++          <manvolnum>2</manvolnum>
++        </citerefentry>
++      </member>
++      <member>
++        <ulink url="http://freedesktop.org/wiki/Software/dbus">D-Bus</ulink>
++      </member>
++    </simplelist>
++  </refsect1>
++
++</refentry>
+diff --git a/Documentation/kdbus/stylesheet.xsl b/Documentation/kdbus/stylesheet.xsl
+new file mode 100644
+index 0000000..52565ea
+--- /dev/null
++++ b/Documentation/kdbus/stylesheet.xsl
+@@ -0,0 +1,16 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<stylesheet xmlns="http://www.w3.org/1999/XSL/Transform" version="1.0">
++	<param name="chunk.quietly">1</param>
++	<param name="funcsynopsis.style">ansi</param>
++	<param name="funcsynopsis.tabular.threshold">80</param>
++	<param name="callout.graphics">0</param>
++	<param name="paper.type">A4</param>
++	<param name="generate.section.toc.level">2</param>
++	<param name="use.id.as.filename">1</param>
++	<param name="citerefentry.link">1</param>
++	<strip-space elements="*"/>
++	<template name="generate.citerefentry.link">
++		<value-of select="refentrytitle"/>
++		<text>.html</text>
++	</template>
++</stylesheet>
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 6239a30..e924246 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -5503,6 +5503,19 @@ S:	Maintained
+ F:	Documentation/kbuild/kconfig-language.txt
+ F:	scripts/kconfig/
+ 
++KDBUS
++M:	Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++M:	Daniel Mack <daniel@zonque.org>
++M:	David Herrmann <dh.herrmann@googlemail.com>
++M:	Djalal Harouni <tixxdz@opendz.org>
++L:	linux-kernel@vger.kernel.org
++S:	Maintained
++F:	ipc/kdbus/*
++F:	samples/kdbus/*
++F:	Documentation/kdbus/*
++F:	include/uapi/linux/kdbus.h
++F:	tools/testing/selftests/kdbus/
++
+ KDUMP
+ M:	Vivek Goyal <vgoyal@redhat.com>
+ M:	Haren Myneni <hbabu@us.ibm.com>
+diff --git a/Makefile b/Makefile
+index 1100ff3..08c9818 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1350,6 +1350,7 @@ $(help-board-dirs): help-%:
+ %docs: scripts_basic FORCE
+ 	$(Q)$(MAKE) $(build)=scripts build_docproc
+ 	$(Q)$(MAKE) $(build)=Documentation/DocBook $@
++	$(Q)$(MAKE) $(build)=Documentation/kdbus $@
+ 
+ else # KBUILD_EXTMOD
+ 
+diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
+index 68ceb97..ddc413e 100644
+--- a/include/uapi/linux/Kbuild
++++ b/include/uapi/linux/Kbuild
+@@ -214,6 +214,7 @@ header-y += ixjuser.h
+ header-y += jffs2.h
+ header-y += joystick.h
+ header-y += kcmp.h
++header-y += kdbus.h
+ header-y += kdev_t.h
+ header-y += kd.h
+ header-y += kernelcapi.h
+diff --git a/include/uapi/linux/kdbus.h b/include/uapi/linux/kdbus.h
+new file mode 100644
+index 0000000..00a6e14
+--- /dev/null
++++ b/include/uapi/linux/kdbus.h
+@@ -0,0 +1,979 @@
++/*
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef _UAPI_KDBUS_H_
++#define _UAPI_KDBUS_H_
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++#define KDBUS_IOCTL_MAGIC		0x95
++#define KDBUS_SRC_ID_KERNEL		(0)
++#define KDBUS_DST_ID_NAME		(0)
++#define KDBUS_MATCH_ID_ANY		(~0ULL)
++#define KDBUS_DST_ID_BROADCAST		(~0ULL)
++#define KDBUS_FLAG_NEGOTIATE		(1ULL << 63)
++
++/**
++ * struct kdbus_notify_id_change - name registry change message
++ * @id:			New or former owner of the name
++ * @flags:		flags field from KDBUS_HELLO_*
++ *
++ * Sent from kernel to userspace when the owner or activator of
++ * a well-known name changes.
++ *
++ * Attached to:
++ *   KDBUS_ITEM_ID_ADD
++ *   KDBUS_ITEM_ID_REMOVE
++ */
++struct kdbus_notify_id_change {
++	__u64 id;
++	__u64 flags;
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_notify_name_change - name registry change message
++ * @old_id:		ID and flags of former owner of a name
++ * @new_id:		ID and flags of new owner of a name
++ * @name:		Well-known name
++ *
++ * Sent from kernel to userspace when the owner or activator of
++ * a well-known name changes.
++ *
++ * Attached to:
++ *   KDBUS_ITEM_NAME_ADD
++ *   KDBUS_ITEM_NAME_REMOVE
++ *   KDBUS_ITEM_NAME_CHANGE
++ */
++struct kdbus_notify_name_change {
++	struct kdbus_notify_id_change old_id;
++	struct kdbus_notify_id_change new_id;
++	char name[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_creds - process credentials
++ * @uid:		User ID
++ * @euid:		Effective UID
++ * @suid:		Saved UID
++ * @fsuid:		Filesystem UID
++ * @gid:		Group ID
++ * @egid:		Effective GID
++ * @sgid:		Saved GID
++ * @fsgid:		Filesystem GID
++ *
++ * Attached to:
++ *   KDBUS_ITEM_CREDS
++ */
++struct kdbus_creds {
++	__u64 uid;
++	__u64 euid;
++	__u64 suid;
++	__u64 fsuid;
++	__u64 gid;
++	__u64 egid;
++	__u64 sgid;
++	__u64 fsgid;
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_pids - process identifiers
++ * @pid:		Process ID
++ * @tid:		Thread ID
++ * @ppid:		Parent process ID
++ *
++ * The PID and TID of a process.
++ *
++ * Attached to:
++ *   KDBUS_ITEM_PIDS
++ */
++struct kdbus_pids {
++	__u64 pid;
++	__u64 tid;
++	__u64 ppid;
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_caps - process capabilities
++ * @last_cap:	Highest currently known capability bit
++ * @caps:	Variable number of 32-bit capabilities flags
++ *
++ * Contains a variable number of 32-bit capabilities flags.
++ *
++ * Attached to:
++ *   KDBUS_ITEM_CAPS
++ */
++struct kdbus_caps {
++	__u32 last_cap;
++	__u32 caps[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_audit - audit information
++ * @sessionid:		The audit session ID
++ * @loginuid:		The audit login uid
++ *
++ * Attached to:
++ *   KDBUS_ITEM_AUDIT
++ */
++struct kdbus_audit {
++	__u32 sessionid;
++	__u32 loginuid;
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_timestamp
++ * @seqnum:		Global per-domain message sequence number
++ * @monotonic_ns:	Monotonic timestamp, in nanoseconds
++ * @realtime_ns:	Realtime timestamp, in nanoseconds
++ *
++ * Attached to:
++ *   KDBUS_ITEM_TIMESTAMP
++ */
++struct kdbus_timestamp {
++	__u64 seqnum;
++	__u64 monotonic_ns;
++	__u64 realtime_ns;
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_vec - I/O vector for kdbus payload items
++ * @size:		The size of the vector
++ * @address:		Memory address of data buffer
++ * @offset:		Offset in the in-message payload memory,
++ *			relative to the message head
++ *
++ * Attached to:
++ *   KDBUS_ITEM_PAYLOAD_VEC, KDBUS_ITEM_PAYLOAD_OFF
++ */
++struct kdbus_vec {
++	__u64 size;
++	union {
++		__u64 address;
++		__u64 offset;
++	};
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_bloom_parameter - bus-wide bloom parameters
++ * @size:		Size of the bit field in bytes (m / 8)
++ * @n_hash:		Number of hash functions used (k)
++ */
++struct kdbus_bloom_parameter {
++	__u64 size;
++	__u64 n_hash;
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_bloom_filter - bloom filter containing n elements
++ * @generation:		Generation of the element set in the filter
++ * @data:		Bit field, multiple of 8 bytes
++ */
++struct kdbus_bloom_filter {
++	__u64 generation;
++	__u64 data[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_memfd - a kdbus memfd
++ * @start:		The offset into the memfd where the segment starts
++ * @size:		The size of the memfd segment
++ * @fd:			The file descriptor number
++ * @__pad:		Padding to ensure proper alignment and size
++ *
++ * Attached to:
++ *   KDBUS_ITEM_PAYLOAD_MEMFD
++ */
++struct kdbus_memfd {
++	__u64 start;
++	__u64 size;
++	int fd;
++	__u32 __pad;
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_name - a registered well-known name with its flags
++ * @flags:		Flags from KDBUS_NAME_*
++ * @name:		Well-known name
++ *
++ * Attached to:
++ *   KDBUS_ITEM_OWNED_NAME
++ */
++struct kdbus_name {
++	__u64 flags;
++	char name[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * enum kdbus_policy_access_type - permissions of a policy record
++ * @_KDBUS_POLICY_ACCESS_NULL:	Uninitialized/invalid
++ * @KDBUS_POLICY_ACCESS_USER:	Grant access to a uid
++ * @KDBUS_POLICY_ACCESS_GROUP:	Grant access to gid
++ * @KDBUS_POLICY_ACCESS_WORLD:	World-accessible
++ */
++enum kdbus_policy_access_type {
++	_KDBUS_POLICY_ACCESS_NULL,
++	KDBUS_POLICY_ACCESS_USER,
++	KDBUS_POLICY_ACCESS_GROUP,
++	KDBUS_POLICY_ACCESS_WORLD,
++};
++
++/**
++ * enum kdbus_policy_access_flags - mode flags
++ * @KDBUS_POLICY_OWN:		Allow to own a well-known name
++ *				Implies KDBUS_POLICY_TALK and KDBUS_POLICY_SEE
++ * @KDBUS_POLICY_TALK:		Allow communication to a well-known name
++ *				Implies KDBUS_POLICY_SEE
++ * @KDBUS_POLICY_SEE:		Allow to see a well-known name
++ */
++enum kdbus_policy_type {
++	KDBUS_POLICY_SEE	= 0,
++	KDBUS_POLICY_TALK,
++	KDBUS_POLICY_OWN,
++};
++
++/**
++ * struct kdbus_policy_access - policy access item
++ * @type:		One of KDBUS_POLICY_ACCESS_* types
++ * @access:		Access to grant
++ * @id:			For KDBUS_POLICY_ACCESS_USER, the uid
++ *			For KDBUS_POLICY_ACCESS_GROUP, the gid
++ */
++struct kdbus_policy_access {
++	__u64 type;	/* USER, GROUP, WORLD */
++	__u64 access;	/* OWN, TALK, SEE */
++	__u64 id;	/* uid, gid, 0 */
++} __attribute__((__aligned__(8)));
++
++/**
++ * enum kdbus_attach_flags - flags for metadata attachments
++ * @KDBUS_ATTACH_TIMESTAMP:		Timestamp
++ * @KDBUS_ATTACH_CREDS:			Credentials
++ * @KDBUS_ATTACH_PIDS:			PIDs
++ * @KDBUS_ATTACH_AUXGROUPS:		Auxiliary groups
++ * @KDBUS_ATTACH_NAMES:			Well-known names
++ * @KDBUS_ATTACH_TID_COMM:		The "comm" process identifier of the TID
++ * @KDBUS_ATTACH_PID_COMM:		The "comm" process identifier of the PID
++ * @KDBUS_ATTACH_EXE:			The path of the executable
++ * @KDBUS_ATTACH_CMDLINE:		The process command line
++ * @KDBUS_ATTACH_CGROUP:		The croup membership
++ * @KDBUS_ATTACH_CAPS:			The process capabilities
++ * @KDBUS_ATTACH_SECLABEL:		The security label
++ * @KDBUS_ATTACH_AUDIT:			The audit IDs
++ * @KDBUS_ATTACH_CONN_DESCRIPTION:	The human-readable connection name
++ * @_KDBUS_ATTACH_ALL:			All of the above
++ * @_KDBUS_ATTACH_ANY:			Wildcard match to enable any kind of
++ *					metatdata.
++ */
++enum kdbus_attach_flags {
++	KDBUS_ATTACH_TIMESTAMP		=  1ULL <<  0,
++	KDBUS_ATTACH_CREDS		=  1ULL <<  1,
++	KDBUS_ATTACH_PIDS		=  1ULL <<  2,
++	KDBUS_ATTACH_AUXGROUPS		=  1ULL <<  3,
++	KDBUS_ATTACH_NAMES		=  1ULL <<  4,
++	KDBUS_ATTACH_TID_COMM		=  1ULL <<  5,
++	KDBUS_ATTACH_PID_COMM		=  1ULL <<  6,
++	KDBUS_ATTACH_EXE		=  1ULL <<  7,
++	KDBUS_ATTACH_CMDLINE		=  1ULL <<  8,
++	KDBUS_ATTACH_CGROUP		=  1ULL <<  9,
++	KDBUS_ATTACH_CAPS		=  1ULL << 10,
++	KDBUS_ATTACH_SECLABEL		=  1ULL << 11,
++	KDBUS_ATTACH_AUDIT		=  1ULL << 12,
++	KDBUS_ATTACH_CONN_DESCRIPTION	=  1ULL << 13,
++	_KDBUS_ATTACH_ALL		=  (1ULL << 14) - 1,
++	_KDBUS_ATTACH_ANY		=  ~0ULL
++};
++
++/**
++ * enum kdbus_item_type - item types to chain data in a list
++ * @_KDBUS_ITEM_NULL:			Uninitialized/invalid
++ * @_KDBUS_ITEM_USER_BASE:		Start of user items
++ * @KDBUS_ITEM_NEGOTIATE:		Negotiate supported items
++ * @KDBUS_ITEM_PAYLOAD_VEC:		Vector to data
++ * @KDBUS_ITEM_PAYLOAD_OFF:		Data at returned offset to message head
++ * @KDBUS_ITEM_PAYLOAD_MEMFD:		Data as sealed memfd
++ * @KDBUS_ITEM_FDS:			Attached file descriptors
++ * @KDBUS_ITEM_CANCEL_FD:		FD used to cancel a synchronous
++ *					operation by writing to it from
++ *					userspace
++ * @KDBUS_ITEM_BLOOM_PARAMETER:		Bus-wide bloom parameters, used with
++ *					KDBUS_CMD_BUS_MAKE, carries a
++ *					struct kdbus_bloom_parameter
++ * @KDBUS_ITEM_BLOOM_FILTER:		Bloom filter carried with a message,
++ *					used to match against a bloom mask of a
++ *					connection, carries a struct
++ *					kdbus_bloom_filter
++ * @KDBUS_ITEM_BLOOM_MASK:		Bloom mask used to match against a
++ *					message'sbloom filter
++ * @KDBUS_ITEM_DST_NAME:		Destination's well-known name
++ * @KDBUS_ITEM_MAKE_NAME:		Name of domain, bus, endpoint
++ * @KDBUS_ITEM_ATTACH_FLAGS_SEND:	Attach-flags, used for updating which
++ *					metadata a connection opts in to send
++ * @KDBUS_ITEM_ATTACH_FLAGS_RECV:	Attach-flags, used for updating which
++ *					metadata a connection requests to
++ *					receive for each reeceived message
++ * @KDBUS_ITEM_ID:			Connection ID
++ * @KDBUS_ITEM_NAME:			Well-know name with flags
++ * @_KDBUS_ITEM_ATTACH_BASE:		Start of metadata attach items
++ * @KDBUS_ITEM_TIMESTAMP:		Timestamp
++ * @KDBUS_ITEM_CREDS:			Process credentials
++ * @KDBUS_ITEM_PIDS:			Process identifiers
++ * @KDBUS_ITEM_AUXGROUPS:		Auxiliary process groups
++ * @KDBUS_ITEM_OWNED_NAME:		A name owned by the associated
++ *					connection
++ * @KDBUS_ITEM_TID_COMM:		Thread ID "comm" identifier
++ *					(Don't trust this, see below.)
++ * @KDBUS_ITEM_PID_COMM:		Process ID "comm" identifier
++ *					(Don't trust this, see below.)
++ * @KDBUS_ITEM_EXE:			The path of the executable
++ *					(Don't trust this, see below.)
++ * @KDBUS_ITEM_CMDLINE:			The process command line
++ *					(Don't trust this, see below.)
++ * @KDBUS_ITEM_CGROUP:			The croup membership
++ * @KDBUS_ITEM_CAPS:			The process capabilities
++ * @KDBUS_ITEM_SECLABEL:		The security label
++ * @KDBUS_ITEM_AUDIT:			The audit IDs
++ * @KDBUS_ITEM_CONN_DESCRIPTION:	The connection's human-readable name
++ *					(debugging)
++ * @_KDBUS_ITEM_POLICY_BASE:		Start of policy items
++ * @KDBUS_ITEM_POLICY_ACCESS:		Policy access block
++ * @_KDBUS_ITEM_KERNEL_BASE:		Start of kernel-generated message items
++ * @KDBUS_ITEM_NAME_ADD:		Notification in kdbus_notify_name_change
++ * @KDBUS_ITEM_NAME_REMOVE:		Notification in kdbus_notify_name_change
++ * @KDBUS_ITEM_NAME_CHANGE:		Notification in kdbus_notify_name_change
++ * @KDBUS_ITEM_ID_ADD:			Notification in kdbus_notify_id_change
++ * @KDBUS_ITEM_ID_REMOVE:		Notification in kdbus_notify_id_change
++ * @KDBUS_ITEM_REPLY_TIMEOUT:		Timeout has been reached
++ * @KDBUS_ITEM_REPLY_DEAD:		Destination died
++ *
++ * N.B: The process and thread COMM fields, as well as the CMDLINE and
++ * EXE fields may be altered by unprivileged processes und should
++ * hence *not* used for security decisions. Peers should make use of
++ * these items only for informational purposes, such as generating log
++ * records.
++ */
++enum kdbus_item_type {
++	_KDBUS_ITEM_NULL,
++	_KDBUS_ITEM_USER_BASE,
++	KDBUS_ITEM_NEGOTIATE	= _KDBUS_ITEM_USER_BASE,
++	KDBUS_ITEM_PAYLOAD_VEC,
++	KDBUS_ITEM_PAYLOAD_OFF,
++	KDBUS_ITEM_PAYLOAD_MEMFD,
++	KDBUS_ITEM_FDS,
++	KDBUS_ITEM_CANCEL_FD,
++	KDBUS_ITEM_BLOOM_PARAMETER,
++	KDBUS_ITEM_BLOOM_FILTER,
++	KDBUS_ITEM_BLOOM_MASK,
++	KDBUS_ITEM_DST_NAME,
++	KDBUS_ITEM_MAKE_NAME,
++	KDBUS_ITEM_ATTACH_FLAGS_SEND,
++	KDBUS_ITEM_ATTACH_FLAGS_RECV,
++	KDBUS_ITEM_ID,
++	KDBUS_ITEM_NAME,
++
++	/* keep these item types in sync with KDBUS_ATTACH_* flags */
++	_KDBUS_ITEM_ATTACH_BASE	= 0x1000,
++	KDBUS_ITEM_TIMESTAMP	= _KDBUS_ITEM_ATTACH_BASE,
++	KDBUS_ITEM_CREDS,
++	KDBUS_ITEM_PIDS,
++	KDBUS_ITEM_AUXGROUPS,
++	KDBUS_ITEM_OWNED_NAME,
++	KDBUS_ITEM_TID_COMM,
++	KDBUS_ITEM_PID_COMM,
++	KDBUS_ITEM_EXE,
++	KDBUS_ITEM_CMDLINE,
++	KDBUS_ITEM_CGROUP,
++	KDBUS_ITEM_CAPS,
++	KDBUS_ITEM_SECLABEL,
++	KDBUS_ITEM_AUDIT,
++	KDBUS_ITEM_CONN_DESCRIPTION,
++
++	_KDBUS_ITEM_POLICY_BASE	= 0x2000,
++	KDBUS_ITEM_POLICY_ACCESS = _KDBUS_ITEM_POLICY_BASE,
++
++	_KDBUS_ITEM_KERNEL_BASE	= 0x8000,
++	KDBUS_ITEM_NAME_ADD	= _KDBUS_ITEM_KERNEL_BASE,
++	KDBUS_ITEM_NAME_REMOVE,
++	KDBUS_ITEM_NAME_CHANGE,
++	KDBUS_ITEM_ID_ADD,
++	KDBUS_ITEM_ID_REMOVE,
++	KDBUS_ITEM_REPLY_TIMEOUT,
++	KDBUS_ITEM_REPLY_DEAD,
++};
++
++/**
++ * struct kdbus_item - chain of data blocks
++ * @size:		Overall data record size
++ * @type:		Kdbus_item type of data
++ * @data:		Generic bytes
++ * @data32:		Generic 32 bit array
++ * @data64:		Generic 64 bit array
++ * @str:		Generic string
++ * @id:			Connection ID
++ * @vec:		KDBUS_ITEM_PAYLOAD_VEC
++ * @creds:		KDBUS_ITEM_CREDS
++ * @audit:		KDBUS_ITEM_AUDIT
++ * @timestamp:		KDBUS_ITEM_TIMESTAMP
++ * @name:		KDBUS_ITEM_NAME
++ * @bloom_parameter:	KDBUS_ITEM_BLOOM_PARAMETER
++ * @bloom_filter:	KDBUS_ITEM_BLOOM_FILTER
++ * @memfd:		KDBUS_ITEM_PAYLOAD_MEMFD
++ * @name_change:	KDBUS_ITEM_NAME_ADD
++ *			KDBUS_ITEM_NAME_REMOVE
++ *			KDBUS_ITEM_NAME_CHANGE
++ * @id_change:		KDBUS_ITEM_ID_ADD
++ *			KDBUS_ITEM_ID_REMOVE
++ * @policy:		KDBUS_ITEM_POLICY_ACCESS
++ */
++struct kdbus_item {
++	__u64 size;
++	__u64 type;
++	union {
++		__u8 data[0];
++		__u32 data32[0];
++		__u64 data64[0];
++		char str[0];
++
++		__u64 id;
++		struct kdbus_vec vec;
++		struct kdbus_creds creds;
++		struct kdbus_pids pids;
++		struct kdbus_audit audit;
++		struct kdbus_caps caps;
++		struct kdbus_timestamp timestamp;
++		struct kdbus_name name;
++		struct kdbus_bloom_parameter bloom_parameter;
++		struct kdbus_bloom_filter bloom_filter;
++		struct kdbus_memfd memfd;
++		int fds[0];
++		struct kdbus_notify_name_change name_change;
++		struct kdbus_notify_id_change id_change;
++		struct kdbus_policy_access policy_access;
++	};
++} __attribute__((__aligned__(8)));
++
++/**
++ * enum kdbus_msg_flags - type of message
++ * @KDBUS_MSG_EXPECT_REPLY:	Expect a reply message, used for
++ *				method calls. The userspace-supplied
++ *				cookie identifies the message and the
++ *				respective reply carries the cookie
++ *				in cookie_reply
++ * @KDBUS_MSG_NO_AUTO_START:	Do not start a service if the addressed
++ *				name is not currently active. This flag is
++ *				not looked at by the kernel but only
++ *				serves as hint for userspace implementations.
++ * @KDBUS_MSG_SIGNAL:		Treat this message as signal
++ */
++enum kdbus_msg_flags {
++	KDBUS_MSG_EXPECT_REPLY	= 1ULL << 0,
++	KDBUS_MSG_NO_AUTO_START	= 1ULL << 1,
++	KDBUS_MSG_SIGNAL	= 1ULL << 2,
++};
++
++/**
++ * enum kdbus_payload_type - type of payload carried by message
++ * @KDBUS_PAYLOAD_KERNEL:	Kernel-generated simple message
++ * @KDBUS_PAYLOAD_DBUS:		D-Bus marshalling "DBusDBus"
++ *
++ * Any payload-type is accepted. Common types will get added here once
++ * established.
++ */
++enum kdbus_payload_type {
++	KDBUS_PAYLOAD_KERNEL,
++	KDBUS_PAYLOAD_DBUS	= 0x4442757344427573ULL,
++};
++
++/**
++ * struct kdbus_msg - the representation of a kdbus message
++ * @size:		Total size of the message
++ * @flags:		Message flags (KDBUS_MSG_*), userspace → kernel
++ * @priority:		Message queue priority value
++ * @dst_id:		64-bit ID of the destination connection
++ * @src_id:		64-bit ID of the source connection
++ * @payload_type:	Payload type (KDBUS_PAYLOAD_*)
++ * @cookie:		Userspace-supplied cookie, for the connection
++ *			to identify its messages
++ * @timeout_ns:		The time to wait for a message reply from the peer.
++ *			If there is no reply, and the send command is
++ *			executed asynchronously, a kernel-generated message
++ *			with an attached KDBUS_ITEM_REPLY_TIMEOUT item
++ *			is sent to @src_id. For synchronously executed send
++ *			command, the value denotes the maximum time the call
++ *			blocks to wait for a reply. The timeout is expected in
++ *			nanoseconds and as absolute CLOCK_MONOTONIC value.
++ * @cookie_reply:	A reply to the requesting message with the same
++ *			cookie. The requesting connection can match its
++ *			request and the reply with this value
++ * @items:		A list of kdbus_items containing the message payload
++ */
++struct kdbus_msg {
++	__u64 size;
++	__u64 flags;
++	__s64 priority;
++	__u64 dst_id;
++	__u64 src_id;
++	__u64 payload_type;
++	__u64 cookie;
++	union {
++		__u64 timeout_ns;
++		__u64 cookie_reply;
++	};
++	struct kdbus_item items[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_msg_info - returned message container
++ * @offset:		Offset of kdbus_msg slice in pool
++ * @msg_size:		Copy of the kdbus_msg.size field
++ * @return_flags:	Command return flags, kernel → userspace
++ */
++struct kdbus_msg_info {
++	__u64 offset;
++	__u64 msg_size;
++	__u64 return_flags;
++} __attribute__((__aligned__(8)));
++
++/**
++ * enum kdbus_send_flags - flags for sending messages
++ * @KDBUS_SEND_SYNC_REPLY:	Wait for destination connection to
++ *				reply to this message. The
++ *				KDBUS_CMD_SEND ioctl() will block
++ *				until the reply is received, and
++ *				reply in struct kdbus_cmd_send will
++ *				yield the offset in the sender's pool
++ *				where the reply can be found.
++ *				This flag is only valid if
++ *				@KDBUS_MSG_EXPECT_REPLY is set as well.
++ */
++enum kdbus_send_flags {
++	KDBUS_SEND_SYNC_REPLY		= 1ULL << 0,
++};
++
++/**
++ * struct kdbus_cmd_send - send message
++ * @size:		Overall size of this structure
++ * @flags:		Flags to change send behavior (KDBUS_SEND_*)
++ * @return_flags:	Command return flags, kernel → userspace
++ * @msg_address:	Storage address of the kdbus_msg to send
++ * @reply:		Storage for message reply if KDBUS_SEND_SYNC_REPLY
++ *			was given
++ * @items:		Additional items for this command
++ */
++struct kdbus_cmd_send {
++	__u64 size;
++	__u64 flags;
++	__u64 return_flags;
++	__u64 msg_address;
++	struct kdbus_msg_info reply;
++	struct kdbus_item items[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * enum kdbus_recv_flags - flags for de-queuing messages
++ * @KDBUS_RECV_PEEK:		Return the next queued message without
++ *				actually de-queuing it, and without installing
++ *				any file descriptors or other resources. It is
++ *				usually used to determine the activating
++ *				connection of a bus name.
++ * @KDBUS_RECV_DROP:		Drop and free the next queued message and all
++ *				its resources without actually receiving it.
++ * @KDBUS_RECV_USE_PRIORITY:	Only de-queue messages with the specified or
++ *				higher priority (lowest values); if not set,
++ *				the priority value is ignored.
++ */
++enum kdbus_recv_flags {
++	KDBUS_RECV_PEEK		= 1ULL <<  0,
++	KDBUS_RECV_DROP		= 1ULL <<  1,
++	KDBUS_RECV_USE_PRIORITY	= 1ULL <<  2,
++};
++
++/**
++ * enum kdbus_recv_return_flags - return flags for message receive commands
++ * @KDBUS_RECV_RETURN_INCOMPLETE_FDS:	One or more file descriptors could not
++ *					be installed. These descriptors in
++ *					KDBUS_ITEM_FDS will carry the value -1.
++ * @KDBUS_RECV_RETURN_DROPPED_MSGS:	There have been dropped messages since
++ *					the last time a message was received.
++ *					The 'dropped_msgs' counter contains the
++ *					number of messages dropped pool
++ *					overflows or other missed broadcasts.
++ */
++enum kdbus_recv_return_flags {
++	KDBUS_RECV_RETURN_INCOMPLETE_FDS	= 1ULL <<  0,
++	KDBUS_RECV_RETURN_DROPPED_MSGS		= 1ULL <<  1,
++};
++
++/**
++ * struct kdbus_cmd_recv - struct to de-queue a buffered message
++ * @size:		Overall size of this object
++ * @flags:		KDBUS_RECV_* flags, userspace → kernel
++ * @return_flags:	Command return flags, kernel → userspace
++ * @priority:		Minimum priority of the messages to de-queue. Lowest
++ *			values have the highest priority.
++ * @dropped_msgs:	In case there were any dropped messages since the last
++ *			time a message was received, this will be set to the
++ *			number of lost messages and
++ *			KDBUS_RECV_RETURN_DROPPED_MSGS will be set in
++ *			'return_flags'. This can only happen if the ioctl
++ *			returns 0 or EAGAIN.
++ * @msg:		Return storage for received message.
++ * @items:		Additional items for this command.
++ *
++ * This struct is used with the KDBUS_CMD_RECV ioctl.
++ */
++struct kdbus_cmd_recv {
++	__u64 size;
++	__u64 flags;
++	__u64 return_flags;
++	__s64 priority;
++	__u64 dropped_msgs;
++	struct kdbus_msg_info msg;
++	struct kdbus_item items[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_cmd_free - struct to free a slice of memory in the pool
++ * @size:		Overall size of this structure
++ * @flags:		Flags for the free command, userspace → kernel
++ * @return_flags:	Command return flags, kernel → userspace
++ * @offset:		The offset of the memory slice, as returned by other
++ *			ioctls
++ * @items:		Additional items to modify the behavior
++ *
++ * This struct is used with the KDBUS_CMD_FREE ioctl.
++ */
++struct kdbus_cmd_free {
++	__u64 size;
++	__u64 flags;
++	__u64 return_flags;
++	__u64 offset;
++	struct kdbus_item items[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * enum kdbus_hello_flags - flags for struct kdbus_cmd_hello
++ * @KDBUS_HELLO_ACCEPT_FD:	The connection allows the reception of
++ *				any passed file descriptors
++ * @KDBUS_HELLO_ACTIVATOR:	Special-purpose connection which registers
++ *				a well-know name for a process to be started
++ *				when traffic arrives
++ * @KDBUS_HELLO_POLICY_HOLDER:	Special-purpose connection which registers
++ *				policy entries for a name. The provided name
++ *				is not activated and not registered with the
++ *				name database, it only allows unprivileged
++ *				connections to acquire a name, talk or discover
++ *				a service
++ * @KDBUS_HELLO_MONITOR:	Special-purpose connection to monitor
++ *				bus traffic
++ */
++enum kdbus_hello_flags {
++	KDBUS_HELLO_ACCEPT_FD		=  1ULL <<  0,
++	KDBUS_HELLO_ACTIVATOR		=  1ULL <<  1,
++	KDBUS_HELLO_POLICY_HOLDER	=  1ULL <<  2,
++	KDBUS_HELLO_MONITOR		=  1ULL <<  3,
++};
++
++/**
++ * struct kdbus_cmd_hello - struct to say hello to kdbus
++ * @size:		The total size of the structure
++ * @flags:		Connection flags (KDBUS_HELLO_*), userspace → kernel
++ * @return_flags:	Command return flags, kernel → userspace
++ * @attach_flags_send:	Mask of metadata to attach to each message sent
++ *			off by this connection (KDBUS_ATTACH_*)
++ * @attach_flags_recv:	Mask of metadata to attach to each message receieved
++ *			by the new connection (KDBUS_ATTACH_*)
++ * @bus_flags:		The flags field copied verbatim from the original
++ *			KDBUS_CMD_BUS_MAKE ioctl. It's intended to be useful
++ *			to do negotiation of features of the payload that is
++ *			transferred (kernel → userspace)
++ * @id:			The ID of this connection (kernel → userspace)
++ * @pool_size:		Size of the connection's buffer where the received
++ *			messages are placed
++ * @offset:		Pool offset where items are returned to report
++ *			additional information about the bus and the newly
++ *			created connection.
++ * @items_size:		Size of buffer returned in the pool slice at @offset.
++ * @id128:		Unique 128-bit ID of the bus (kernel → userspace)
++ * @items:		A list of items
++ *
++ * This struct is used with the KDBUS_CMD_HELLO ioctl.
++ */
++struct kdbus_cmd_hello {
++	__u64 size;
++	__u64 flags;
++	__u64 return_flags;
++	__u64 attach_flags_send;
++	__u64 attach_flags_recv;
++	__u64 bus_flags;
++	__u64 id;
++	__u64 pool_size;
++	__u64 offset;
++	__u64 items_size;
++	__u8 id128[16];
++	struct kdbus_item items[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_info - connection information
++ * @size:		total size of the struct
++ * @id:			64bit object ID
++ * @flags:		object creation flags
++ * @items:		list of items
++ *
++ * Note that the user is responsible for freeing the allocated memory with
++ * the KDBUS_CMD_FREE ioctl.
++ */
++struct kdbus_info {
++	__u64 size;
++	__u64 id;
++	__u64 flags;
++	struct kdbus_item items[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * enum kdbus_list_flags - what to include into the returned list
++ * @KDBUS_LIST_UNIQUE:		active connections
++ * @KDBUS_LIST_ACTIVATORS:	activator connections
++ * @KDBUS_LIST_NAMES:		known well-known names
++ * @KDBUS_LIST_QUEUED:		queued-up names
++ */
++enum kdbus_list_flags {
++	KDBUS_LIST_UNIQUE		= 1ULL <<  0,
++	KDBUS_LIST_NAMES		= 1ULL <<  1,
++	KDBUS_LIST_ACTIVATORS		= 1ULL <<  2,
++	KDBUS_LIST_QUEUED		= 1ULL <<  3,
++};
++
++/**
++ * struct kdbus_cmd_list - list connections
++ * @size:		overall size of this object
++ * @flags:		flags for the query (KDBUS_LIST_*), userspace → kernel
++ * @return_flags:	command return flags, kernel → userspace
++ * @offset:		Offset in the caller's pool buffer where an array of
++ *			kdbus_info objects is stored.
++ *			The user must use KDBUS_CMD_FREE to free the
++ *			allocated memory.
++ * @list_size:		size of returned list in bytes
++ * @items:		Items for the command. Reserved for future use.
++ *
++ * This structure is used with the KDBUS_CMD_LIST ioctl.
++ */
++struct kdbus_cmd_list {
++	__u64 size;
++	__u64 flags;
++	__u64 return_flags;
++	__u64 offset;
++	__u64 list_size;
++	struct kdbus_item items[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * struct kdbus_cmd_info - struct used for KDBUS_CMD_CONN_INFO ioctl
++ * @size:		The total size of the struct
++ * @flags:		Flags for this ioctl, userspace → kernel
++ * @return_flags:	Command return flags, kernel → userspace
++ * @id:			The 64-bit ID of the connection. If set to zero, passing
++ *			@name is required. kdbus will look up the name to
++ *			determine the ID in this case.
++ * @attach_flags:	Set of attach flags to specify the set of information
++ *			to receive, userspace → kernel
++ * @offset:		Returned offset in the caller's pool buffer where the
++ *			kdbus_info struct result is stored. The user must
++ *			use KDBUS_CMD_FREE to free the allocated memory.
++ * @info_size:		Output buffer to report size of data at @offset.
++ * @items:		The optional item list, containing the
++ *			well-known name to look up as a KDBUS_ITEM_NAME.
++ *			Only needed in case @id is zero.
++ *
++ * On success, the KDBUS_CMD_CONN_INFO ioctl will return 0 and @offset will
++ * tell the user the offset in the connection pool buffer at which to find the
++ * result in a struct kdbus_info.
++ */
++struct kdbus_cmd_info {
++	__u64 size;
++	__u64 flags;
++	__u64 return_flags;
++	__u64 id;
++	__u64 attach_flags;
++	__u64 offset;
++	__u64 info_size;
++	struct kdbus_item items[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * enum kdbus_cmd_match_flags - flags to control the KDBUS_CMD_MATCH_ADD ioctl
++ * @KDBUS_MATCH_REPLACE:	If entries with the supplied cookie already
++ *				exists, remove them before installing the new
++ *				matches.
++ */
++enum kdbus_cmd_match_flags {
++	KDBUS_MATCH_REPLACE	= 1ULL <<  0,
++};
++
++/**
++ * struct kdbus_cmd_match - struct to add or remove matches
++ * @size:		The total size of the struct
++ * @flags:		Flags for match command (KDBUS_MATCH_*),
++ *			userspace → kernel
++ * @return_flags:	Command return flags, kernel → userspace
++ * @cookie:		Userspace supplied cookie. When removing, the cookie
++ *			identifies the match to remove
++ * @items:		A list of items for additional information
++ *
++ * This structure is used with the KDBUS_CMD_MATCH_ADD and
++ * KDBUS_CMD_MATCH_REMOVE ioctl.
++ */
++struct kdbus_cmd_match {
++	__u64 size;
++	__u64 flags;
++	__u64 return_flags;
++	__u64 cookie;
++	struct kdbus_item items[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * enum kdbus_make_flags - Flags for KDBUS_CMD_{BUS,ENDPOINT}_MAKE
++ * @KDBUS_MAKE_ACCESS_GROUP:	Make the bus or endpoint node group-accessible
++ * @KDBUS_MAKE_ACCESS_WORLD:	Make the bus or endpoint node world-accessible
++ */
++enum kdbus_make_flags {
++	KDBUS_MAKE_ACCESS_GROUP		= 1ULL <<  0,
++	KDBUS_MAKE_ACCESS_WORLD		= 1ULL <<  1,
++};
++
++/**
++ * enum kdbus_name_flags - flags for KDBUS_CMD_NAME_ACQUIRE
++ * @KDBUS_NAME_REPLACE_EXISTING:	Try to replace name of other connections
++ * @KDBUS_NAME_ALLOW_REPLACEMENT:	Allow the replacement of the name
++ * @KDBUS_NAME_QUEUE:			Name should be queued if busy
++ * @KDBUS_NAME_IN_QUEUE:		Name is queued
++ * @KDBUS_NAME_ACTIVATOR:		Name is owned by a activator connection
++ */
++enum kdbus_name_flags {
++	KDBUS_NAME_REPLACE_EXISTING	= 1ULL <<  0,
++	KDBUS_NAME_ALLOW_REPLACEMENT	= 1ULL <<  1,
++	KDBUS_NAME_QUEUE		= 1ULL <<  2,
++	KDBUS_NAME_IN_QUEUE		= 1ULL <<  3,
++	KDBUS_NAME_ACTIVATOR		= 1ULL <<  4,
++};
++
++/**
++ * struct kdbus_cmd - generic ioctl payload
++ * @size:		Overall size of this structure
++ * @flags:		Flags for this ioctl, userspace → kernel
++ * @return_flags:	Ioctl return flags, kernel → userspace
++ * @items:		Additional items to modify the behavior
++ *
++ * This is a generic ioctl payload object. It's used by all ioctls that only
++ * take flags and items as input.
++ */
++struct kdbus_cmd {
++	__u64 size;
++	__u64 flags;
++	__u64 return_flags;
++	struct kdbus_item items[0];
++} __attribute__((__aligned__(8)));
++
++/**
++ * Ioctl API
++ *
++ * KDBUS_CMD_BUS_MAKE:		After opening the "control" node, this command
++ *				creates a new bus with the specified
++ *				name. The bus is immediately shut down and
++ *				cleaned up when the opened file descriptor is
++ *				closed.
++ *
++ * KDBUS_CMD_ENDPOINT_MAKE:	Creates a new named special endpoint to talk to
++ *				the bus. Such endpoints usually carry a more
++ *				restrictive policy and grant restricted access
++ *				to specific applications.
++ * KDBUS_CMD_ENDPOINT_UPDATE:	Update the properties of a custom enpoint. Used
++ *				to update the policy.
++ *
++ * KDBUS_CMD_HELLO:		By opening the bus node, a connection is
++ *				created. After a HELLO the opened connection
++ *				becomes an active peer on the bus.
++ * KDBUS_CMD_UPDATE:		Update the properties of a connection. Used to
++ *				update the metadata subscription mask and
++ *				policy.
++ * KDBUS_CMD_BYEBYE:		Disconnect a connection. If there are no
++ *				messages queued up in the connection's pool,
++ *				the call succeeds, and the handle is rendered
++ *				unusable. Otherwise, -EBUSY is returned without
++ *				any further side-effects.
++ * KDBUS_CMD_FREE:		Release the allocated memory in the receiver's
++ *				pool.
++ * KDBUS_CMD_CONN_INFO:		Retrieve credentials and properties of the
++ *				initial creator of the connection. The data was
++ *				stored at registration time and does not
++ *				necessarily represent the connected process or
++ *				the actual state of the process.
++ * KDBUS_CMD_BUS_CREATOR_INFO:	Retrieve information of the creator of the bus
++ *				a connection is attached to.
++ *
++ * KDBUS_CMD_SEND:		Send a message and pass data from userspace to
++ *				the kernel.
++ * KDBUS_CMD_RECV:		Receive a message from the kernel which is
++ *				placed in the receiver's pool.
++ *
++ * KDBUS_CMD_NAME_ACQUIRE:	Request a well-known bus name to associate with
++ *				the connection. Well-known names are used to
++ *				address a peer on the bus.
++ * KDBUS_CMD_NAME_RELEASE:	Release a well-known name the connection
++ *				currently owns.
++ * KDBUS_CMD_LIST:		Retrieve the list of all currently registered
++ *				well-known and unique names.
++ *
++ * KDBUS_CMD_MATCH_ADD:		Install a match which broadcast messages should
++ *				be delivered to the connection.
++ * KDBUS_CMD_MATCH_REMOVE:	Remove a current match for broadcast messages.
++ */
++enum kdbus_ioctl_type {
++	/* bus owner (00-0f) */
++	KDBUS_CMD_BUS_MAKE =		_IOW(KDBUS_IOCTL_MAGIC, 0x00,
++					     struct kdbus_cmd),
++
++	/* endpoint owner (10-1f) */
++	KDBUS_CMD_ENDPOINT_MAKE =	_IOW(KDBUS_IOCTL_MAGIC, 0x10,
++					     struct kdbus_cmd),
++	KDBUS_CMD_ENDPOINT_UPDATE =	_IOW(KDBUS_IOCTL_MAGIC, 0x11,
++					     struct kdbus_cmd),
++
++	/* connection owner (80-ff) */
++	KDBUS_CMD_HELLO =		_IOWR(KDBUS_IOCTL_MAGIC, 0x80,
++					      struct kdbus_cmd_hello),
++	KDBUS_CMD_UPDATE =		_IOW(KDBUS_IOCTL_MAGIC, 0x81,
++					     struct kdbus_cmd),
++	KDBUS_CMD_BYEBYE =		_IOW(KDBUS_IOCTL_MAGIC, 0x82,
++					     struct kdbus_cmd),
++	KDBUS_CMD_FREE =		_IOW(KDBUS_IOCTL_MAGIC, 0x83,
++					     struct kdbus_cmd_free),
++	KDBUS_CMD_CONN_INFO =		_IOR(KDBUS_IOCTL_MAGIC, 0x84,
++					     struct kdbus_cmd_info),
++	KDBUS_CMD_BUS_CREATOR_INFO =	_IOR(KDBUS_IOCTL_MAGIC, 0x85,
++					     struct kdbus_cmd_info),
++	KDBUS_CMD_LIST =		_IOR(KDBUS_IOCTL_MAGIC, 0x86,
++					     struct kdbus_cmd_list),
++
++	KDBUS_CMD_SEND =		_IOW(KDBUS_IOCTL_MAGIC, 0x90,
++					     struct kdbus_cmd_send),
++	KDBUS_CMD_RECV =		_IOR(KDBUS_IOCTL_MAGIC, 0x91,
++					     struct kdbus_cmd_recv),
++
++	KDBUS_CMD_NAME_ACQUIRE =	_IOW(KDBUS_IOCTL_MAGIC, 0xa0,
++					     struct kdbus_cmd),
++	KDBUS_CMD_NAME_RELEASE =	_IOW(KDBUS_IOCTL_MAGIC, 0xa1,
++					     struct kdbus_cmd),
++
++	KDBUS_CMD_MATCH_ADD =		_IOW(KDBUS_IOCTL_MAGIC, 0xb0,
++					     struct kdbus_cmd_match),
++	KDBUS_CMD_MATCH_REMOVE =	_IOW(KDBUS_IOCTL_MAGIC, 0xb1,
++					     struct kdbus_cmd_match),
++};
++
++#endif /* _UAPI_KDBUS_H_ */
+diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
+index 7d664ea..1cf05c0 100644
+--- a/include/uapi/linux/magic.h
++++ b/include/uapi/linux/magic.h
+@@ -74,4 +74,6 @@
+ #define BTRFS_TEST_MAGIC	0x73727279
+ #define NSFS_MAGIC		0x6e736673
+ 
++#define KDBUS_SUPER_MAGIC	0x44427573
++
+ #endif /* __LINUX_MAGIC_H__ */
+diff --git a/init/Kconfig b/init/Kconfig
+index f5dbc6d..6bda631 100644
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -261,6 +261,19 @@ config POSIX_MQUEUE_SYSCTL
+ 	depends on SYSCTL
+ 	default y
+ 
++config KDBUS
++	tristate "kdbus interprocess communication"
++	depends on TMPFS
++	help
++	  D-Bus is a system for low-latency, low-overhead, easy to use
++	  interprocess communication (IPC).
++
++	  See the man-pages and HTML files in Documentation/kdbus/
++	  that are generated by 'make mandocs' and 'make htmldocs'.
++
++	  If you have an ordinary machine, select M here. The module
++	  will be called kdbus.
++
+ config CROSS_MEMORY_ATTACH
+ 	bool "Enable process_vm_readv/writev syscalls"
+ 	depends on MMU
+diff --git a/ipc/Makefile b/ipc/Makefile
+index 86c7300..68ec416 100644
+--- a/ipc/Makefile
++++ b/ipc/Makefile
+@@ -9,4 +9,4 @@ obj_mq-$(CONFIG_COMPAT) += compat_mq.o
+ obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y)
+ obj-$(CONFIG_IPC_NS) += namespace.o
+ obj-$(CONFIG_POSIX_MQUEUE_SYSCTL) += mq_sysctl.o
+-
++obj-$(CONFIG_KDBUS) += kdbus/
+diff --git a/ipc/kdbus/Makefile b/ipc/kdbus/Makefile
+new file mode 100644
+index 0000000..7ee9271
+--- /dev/null
++++ b/ipc/kdbus/Makefile
+@@ -0,0 +1,22 @@
++kdbus-y := \
++	bus.o \
++	connection.o \
++	endpoint.o \
++	fs.o \
++	handle.o \
++	item.o \
++	main.o \
++	match.o \
++	message.o \
++	metadata.o \
++	names.o \
++	node.o \
++	notify.o \
++	domain.o \
++	policy.o \
++	pool.o \
++	reply.o \
++	queue.o \
++	util.o
++
++obj-$(CONFIG_KDBUS) += kdbus.o
+diff --git a/ipc/kdbus/bus.c b/ipc/kdbus/bus.c
+new file mode 100644
+index 0000000..9d0679e
+--- /dev/null
++++ b/ipc/kdbus/bus.c
+@@ -0,0 +1,560 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/fs.h>
++#include <linux/hashtable.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/random.h>
++#include <linux/sched.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/uio.h>
++
++#include "bus.h"
++#include "notify.h"
++#include "connection.h"
++#include "domain.h"
++#include "endpoint.h"
++#include "handle.h"
++#include "item.h"
++#include "match.h"
++#include "message.h"
++#include "metadata.h"
++#include "names.h"
++#include "policy.h"
++#include "util.h"
++
++static void kdbus_bus_free(struct kdbus_node *node)
++{
++	struct kdbus_bus *bus = container_of(node, struct kdbus_bus, node);
++
++	WARN_ON(!list_empty(&bus->monitors_list));
++	WARN_ON(!hash_empty(bus->conn_hash));
++
++	kdbus_notify_free(bus);
++
++	kdbus_user_unref(bus->creator);
++	kdbus_name_registry_free(bus->name_registry);
++	kdbus_domain_unref(bus->domain);
++	kdbus_policy_db_clear(&bus->policy_db);
++	kdbus_meta_proc_unref(bus->creator_meta);
++	kfree(bus);
++}
++
++static void kdbus_bus_release(struct kdbus_node *node, bool was_active)
++{
++	struct kdbus_bus *bus = container_of(node, struct kdbus_bus, node);
++
++	if (was_active)
++		atomic_dec(&bus->creator->buses);
++}
++
++static struct kdbus_bus *kdbus_bus_new(struct kdbus_domain *domain,
++				       const char *name,
++				       struct kdbus_bloom_parameter *bloom,
++				       const u64 *pattach_owner,
++				       const u64 *pattach_recv,
++				       u64 flags, kuid_t uid, kgid_t gid)
++{
++	struct kdbus_bus *b;
++	u64 attach_owner;
++	u64 attach_recv;
++	int ret;
++
++	if (bloom->size < 8 || bloom->size > KDBUS_BUS_BLOOM_MAX_SIZE ||
++	    !KDBUS_IS_ALIGNED8(bloom->size) || bloom->n_hash < 1)
++		return ERR_PTR(-EINVAL);
++
++	ret = kdbus_sanitize_attach_flags(pattach_recv ? *pattach_recv : 0,
++					  &attach_recv);
++	if (ret < 0)
++		return ERR_PTR(ret);
++
++	ret = kdbus_sanitize_attach_flags(pattach_owner ? *pattach_owner : 0,
++					  &attach_owner);
++	if (ret < 0)
++		return ERR_PTR(ret);
++
++	ret = kdbus_verify_uid_prefix(name, domain->user_namespace, uid);
++	if (ret < 0)
++		return ERR_PTR(ret);
++
++	b = kzalloc(sizeof(*b), GFP_KERNEL);
++	if (!b)
++		return ERR_PTR(-ENOMEM);
++
++	kdbus_node_init(&b->node, KDBUS_NODE_BUS);
++
++	b->node.free_cb = kdbus_bus_free;
++	b->node.release_cb = kdbus_bus_release;
++	b->node.uid = uid;
++	b->node.gid = gid;
++	b->node.mode = S_IRUSR | S_IXUSR;
++
++	if (flags & (KDBUS_MAKE_ACCESS_GROUP | KDBUS_MAKE_ACCESS_WORLD))
++		b->node.mode |= S_IRGRP | S_IXGRP;
++	if (flags & KDBUS_MAKE_ACCESS_WORLD)
++		b->node.mode |= S_IROTH | S_IXOTH;
++
++	b->id = atomic64_inc_return(&domain->last_id);
++	b->bus_flags = flags;
++	b->attach_flags_req = attach_recv;
++	b->attach_flags_owner = attach_owner;
++	generate_random_uuid(b->id128);
++	b->bloom = *bloom;
++	b->domain = kdbus_domain_ref(domain);
++
++	kdbus_policy_db_init(&b->policy_db);
++
++	init_rwsem(&b->conn_rwlock);
++	hash_init(b->conn_hash);
++	INIT_LIST_HEAD(&b->monitors_list);
++
++	INIT_LIST_HEAD(&b->notify_list);
++	spin_lock_init(&b->notify_lock);
++	mutex_init(&b->notify_flush_lock);
++
++	ret = kdbus_node_link(&b->node, &domain->node, name);
++	if (ret < 0)
++		goto exit_unref;
++
++	/* cache the metadata/credentials of the creator */
++	b->creator_meta = kdbus_meta_proc_new();
++	if (IS_ERR(b->creator_meta)) {
++		ret = PTR_ERR(b->creator_meta);
++		b->creator_meta = NULL;
++		goto exit_unref;
++	}
++
++	ret = kdbus_meta_proc_collect(b->creator_meta,
++				      KDBUS_ATTACH_CREDS |
++				      KDBUS_ATTACH_PIDS |
++				      KDBUS_ATTACH_AUXGROUPS |
++				      KDBUS_ATTACH_TID_COMM |
++				      KDBUS_ATTACH_PID_COMM |
++				      KDBUS_ATTACH_EXE |
++				      KDBUS_ATTACH_CMDLINE |
++				      KDBUS_ATTACH_CGROUP |
++				      KDBUS_ATTACH_CAPS |
++				      KDBUS_ATTACH_SECLABEL |
++				      KDBUS_ATTACH_AUDIT);
++	if (ret < 0)
++		goto exit_unref;
++
++	b->name_registry = kdbus_name_registry_new();
++	if (IS_ERR(b->name_registry)) {
++		ret = PTR_ERR(b->name_registry);
++		b->name_registry = NULL;
++		goto exit_unref;
++	}
++
++	/*
++	 * Bus-limits of the creator are accounted on its real UID, just like
++	 * all other per-user limits.
++	 */
++	b->creator = kdbus_user_lookup(domain, current_uid());
++	if (IS_ERR(b->creator)) {
++		ret = PTR_ERR(b->creator);
++		b->creator = NULL;
++		goto exit_unref;
++	}
++
++	return b;
++
++exit_unref:
++	kdbus_node_deactivate(&b->node);
++	kdbus_node_unref(&b->node);
++	return ERR_PTR(ret);
++}
++
++/**
++ * kdbus_bus_ref() - increase the reference counter of a kdbus_bus
++ * @bus:		The bus to reference
++ *
++ * Every user of a bus, except for its creator, must add a reference to the
++ * kdbus_bus using this function.
++ *
++ * Return: the bus itself
++ */
++struct kdbus_bus *kdbus_bus_ref(struct kdbus_bus *bus)
++{
++	if (bus)
++		kdbus_node_ref(&bus->node);
++	return bus;
++}
++
++/**
++ * kdbus_bus_unref() - decrease the reference counter of a kdbus_bus
++ * @bus:		The bus to unref
++ *
++ * Release a reference. If the reference count drops to 0, the bus will be
++ * freed.
++ *
++ * Return: NULL
++ */
++struct kdbus_bus *kdbus_bus_unref(struct kdbus_bus *bus)
++{
++	if (bus)
++		kdbus_node_unref(&bus->node);
++	return NULL;
++}
++
++/**
++ * kdbus_bus_find_conn_by_id() - find a connection with a given id
++ * @bus:		The bus to look for the connection
++ * @id:			The 64-bit connection id
++ *
++ * Looks up a connection with a given id. The returned connection
++ * is ref'ed, and needs to be unref'ed by the user. Returns NULL if
++ * the connection can't be found.
++ */
++struct kdbus_conn *kdbus_bus_find_conn_by_id(struct kdbus_bus *bus, u64 id)
++{
++	struct kdbus_conn *conn, *found = NULL;
++
++	down_read(&bus->conn_rwlock);
++	hash_for_each_possible(bus->conn_hash, conn, hentry, id)
++		if (conn->id == id) {
++			found = kdbus_conn_ref(conn);
++			break;
++		}
++	up_read(&bus->conn_rwlock);
++
++	return found;
++}
++
++/**
++ * kdbus_bus_broadcast() - send a message to all subscribed connections
++ * @bus:	The bus the connections are connected to
++ * @conn_src:	The source connection, may be %NULL for kernel notifications
++ * @kmsg:	The message to send.
++ *
++ * Send @kmsg to all connections that are currently active on the bus.
++ * Connections must still have matches installed in order to let the message
++ * pass.
++ *
++ * The caller must hold the name-registry lock of @bus.
++ */
++void kdbus_bus_broadcast(struct kdbus_bus *bus,
++			 struct kdbus_conn *conn_src,
++			 struct kdbus_kmsg *kmsg)
++{
++	struct kdbus_conn *conn_dst;
++	unsigned int i;
++	int ret;
++
++	lockdep_assert_held(&bus->name_registry->rwlock);
++
++	/*
++	 * Make sure broadcast are queued on monitors before we send it out to
++	 * anyone else. Otherwise, connections might react to broadcasts before
++	 * the monitor gets the broadcast queued. In the worst case, the
++	 * monitor sees a reaction to the broadcast before the broadcast itself.
++	 * We don't give ordering guarantees across connections (and monitors
++	 * can re-construct order via sequence numbers), but we should at least
++	 * try to avoid re-ordering for monitors.
++	 */
++	kdbus_bus_eavesdrop(bus, conn_src, kmsg);
++
++	down_read(&bus->conn_rwlock);
++	hash_for_each(bus->conn_hash, i, conn_dst, hentry) {
++		if (conn_dst->id == kmsg->msg.src_id)
++			continue;
++		if (!kdbus_conn_is_ordinary(conn_dst))
++			continue;
++
++		/*
++		 * Check if there is a match for the kmsg object in
++		 * the destination connection match db
++		 */
++		if (!kdbus_match_db_match_kmsg(conn_dst->match_db, conn_src,
++					       kmsg))
++			continue;
++
++		if (conn_src) {
++			u64 attach_flags;
++
++			/*
++			 * Anyone can send broadcasts, as they have no
++			 * destination. But a receiver needs TALK access to
++			 * the sender in order to receive broadcasts.
++			 */
++			if (!kdbus_conn_policy_talk(conn_dst, NULL, conn_src))
++				continue;
++
++			attach_flags = kdbus_meta_calc_attach_flags(conn_src,
++								    conn_dst);
++
++			/*
++			 * Keep sending messages even if we cannot acquire the
++			 * requested metadata. It's up to the receiver to drop
++			 * messages that lack expected metadata.
++			 */
++			if (!conn_src->faked_meta)
++				kdbus_meta_proc_collect(kmsg->proc_meta,
++							attach_flags);
++			kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, conn_src,
++						attach_flags);
++		} else {
++			/*
++			 * Check if there is a policy db that prevents the
++			 * destination connection from receiving this kernel
++			 * notification
++			 */
++			if (!kdbus_conn_policy_see_notification(conn_dst, NULL,
++								kmsg))
++				continue;
++		}
++
++		ret = kdbus_conn_entry_insert(conn_src, conn_dst, kmsg, NULL);
++		if (ret < 0)
++			kdbus_conn_lost_message(conn_dst);
++	}
++	up_read(&bus->conn_rwlock);
++}
++
++/**
++ * kdbus_bus_eavesdrop() - send a message to all subscribed monitors
++ * @bus:	The bus the monitors are connected to
++ * @conn_src:	The source connection, may be %NULL for kernel notifications
++ * @kmsg:	The message to send.
++ *
++ * Send @kmsg to all monitors that are currently active on the bus. Monitors
++ * must still have matches installed in order to let the message pass.
++ *
++ * The caller must hold the name-registry lock of @bus.
++ */
++void kdbus_bus_eavesdrop(struct kdbus_bus *bus,
++			 struct kdbus_conn *conn_src,
++			 struct kdbus_kmsg *kmsg)
++{
++	struct kdbus_conn *conn_dst;
++	int ret;
++
++	/*
++	 * Monitor connections get all messages; ignore possible errors
++	 * when sending messages to monitor connections.
++	 */
++
++	lockdep_assert_held(&bus->name_registry->rwlock);
++
++	down_read(&bus->conn_rwlock);
++	list_for_each_entry(conn_dst, &bus->monitors_list, monitor_entry) {
++		/*
++		 * Collect metadata requested by the destination connection.
++		 * Ignore errors, as receivers need to check metadata
++		 * availability, anyway. So it's still better to send messages
++		 * that lack data, than to skip it entirely.
++		 */
++		if (conn_src) {
++			u64 attach_flags;
++
++			attach_flags = kdbus_meta_calc_attach_flags(conn_src,
++								    conn_dst);
++			if (!conn_src->faked_meta)
++				kdbus_meta_proc_collect(kmsg->proc_meta,
++							attach_flags);
++			kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, conn_src,
++						attach_flags);
++		}
++
++		ret = kdbus_conn_entry_insert(conn_src, conn_dst, kmsg, NULL);
++		if (ret < 0)
++			kdbus_conn_lost_message(conn_dst);
++	}
++	up_read(&bus->conn_rwlock);
++}
++
++/**
++ * kdbus_cmd_bus_make() - handle KDBUS_CMD_BUS_MAKE
++ * @domain:		domain to operate on
++ * @argp:		command payload
++ *
++ * Return: Newly created bus on success, ERR_PTR on failure.
++ */
++struct kdbus_bus *kdbus_cmd_bus_make(struct kdbus_domain *domain,
++				     void __user *argp)
++{
++	struct kdbus_bus *bus = NULL;
++	struct kdbus_cmd *cmd;
++	struct kdbus_ep *ep = NULL;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_MAKE_NAME, .mandatory = true },
++		{ .type = KDBUS_ITEM_BLOOM_PARAMETER, .mandatory = true },
++		{ .type = KDBUS_ITEM_ATTACH_FLAGS_SEND },
++		{ .type = KDBUS_ITEM_ATTACH_FLAGS_RECV },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
++				 KDBUS_MAKE_ACCESS_GROUP |
++				 KDBUS_MAKE_ACCESS_WORLD,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret < 0)
++		return ERR_PTR(ret);
++	if (ret > 0)
++		return NULL;
++
++	bus = kdbus_bus_new(domain,
++			    argv[1].item->str, &argv[2].item->bloom_parameter,
++			    argv[3].item ? argv[3].item->data64 : NULL,
++			    argv[4].item ? argv[4].item->data64 : NULL,
++			    cmd->flags, current_euid(), current_egid());
++	if (IS_ERR(bus)) {
++		ret = PTR_ERR(bus);
++		bus = NULL;
++		goto exit;
++	}
++
++	if (atomic_inc_return(&bus->creator->buses) > KDBUS_USER_MAX_BUSES) {
++		atomic_dec(&bus->creator->buses);
++		ret = -EMFILE;
++		goto exit;
++	}
++
++	if (!kdbus_node_activate(&bus->node)) {
++		atomic_dec(&bus->creator->buses);
++		ret = -ESHUTDOWN;
++		goto exit;
++	}
++
++	ep = kdbus_ep_new(bus, "bus", cmd->flags, bus->node.uid, bus->node.gid,
++			  false);
++	if (IS_ERR(ep)) {
++		ret = PTR_ERR(ep);
++		ep = NULL;
++		goto exit;
++	}
++
++	if (!kdbus_node_activate(&ep->node)) {
++		ret = -ESHUTDOWN;
++		goto exit;
++	}
++
++	/*
++	 * Drop our own reference, effectively causing the endpoint to be
++	 * deactivated and released when the parent bus is.
++	 */
++	ep = kdbus_ep_unref(ep);
++
++exit:
++	ret = kdbus_args_clear(&args, ret);
++	if (ret < 0) {
++		if (ep) {
++			kdbus_node_deactivate(&ep->node);
++			kdbus_ep_unref(ep);
++		}
++		if (bus) {
++			kdbus_node_deactivate(&bus->node);
++			kdbus_bus_unref(bus);
++		}
++		return ERR_PTR(ret);
++	}
++	return bus;
++}
++
++/**
++ * kdbus_cmd_bus_creator_info() - handle KDBUS_CMD_BUS_CREATOR_INFO
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_bus_creator_info(struct kdbus_conn *conn, void __user *argp)
++{
++	struct kdbus_cmd_info *cmd;
++	struct kdbus_bus *bus = conn->ep->bus;
++	struct kdbus_pool_slice *slice = NULL;
++	struct kdbus_item_header item_hdr;
++	struct kdbus_info info = {};
++	size_t meta_size, name_len;
++	struct kvec kvec[5];
++	u64 hdr_size = 0;
++	u64 attach_flags;
++	size_t cnt = 0;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	ret = kdbus_sanitize_attach_flags(cmd->attach_flags, &attach_flags);
++	if (ret < 0)
++		goto exit;
++
++	attach_flags &= bus->attach_flags_owner;
++
++	ret = kdbus_meta_export_prepare(bus->creator_meta, NULL,
++					&attach_flags, &meta_size);
++	if (ret < 0)
++		goto exit;
++
++	name_len = strlen(bus->node.name) + 1;
++	info.id = bus->id;
++	info.flags = bus->bus_flags;
++	item_hdr.type = KDBUS_ITEM_MAKE_NAME;
++	item_hdr.size = KDBUS_ITEM_HEADER_SIZE + name_len;
++
++	kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &hdr_size);
++	kdbus_kvec_set(&kvec[cnt++], &item_hdr, sizeof(item_hdr), &hdr_size);
++	kdbus_kvec_set(&kvec[cnt++], bus->node.name, name_len, &hdr_size);
++	cnt += !!kdbus_kvec_pad(&kvec[cnt], &hdr_size);
++
++	slice = kdbus_pool_slice_alloc(conn->pool, hdr_size + meta_size, false);
++	if (IS_ERR(slice)) {
++		ret = PTR_ERR(slice);
++		slice = NULL;
++		goto exit;
++	}
++
++	ret = kdbus_meta_export(bus->creator_meta, NULL, attach_flags,
++				slice, hdr_size, &meta_size);
++	if (ret < 0)
++		goto exit;
++
++	info.size = hdr_size + meta_size;
++
++	ret = kdbus_pool_slice_copy_kvec(slice, 0, kvec, cnt, hdr_size);
++	if (ret < 0)
++		goto exit;
++
++	kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->info_size);
++
++	if (kdbus_member_set_user(&cmd->offset, argp, typeof(*cmd), offset) ||
++	    kdbus_member_set_user(&cmd->info_size, argp,
++				  typeof(*cmd), info_size))
++		ret = -EFAULT;
++
++exit:
++	kdbus_pool_slice_release(slice);
++
++	return kdbus_args_clear(&args, ret);
++}
+diff --git a/ipc/kdbus/bus.h b/ipc/kdbus/bus.h
+new file mode 100644
+index 0000000..5bea5ef
+--- /dev/null
++++ b/ipc/kdbus/bus.h
+@@ -0,0 +1,101 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_BUS_H
++#define __KDBUS_BUS_H
++
++#include <linux/hashtable.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/rwsem.h>
++#include <linux/spinlock.h>
++#include <uapi/linux/kdbus.h>
++
++#include "metadata.h"
++#include "names.h"
++#include "node.h"
++#include "policy.h"
++
++struct kdbus_conn;
++struct kdbus_domain;
++struct kdbus_kmsg;
++struct kdbus_user;
++
++/**
++ * struct kdbus_bus - bus in a domain
++ * @node:		kdbus_node
++ * @id:			ID of this bus in the domain
++ * @bus_flags:		Simple pass-through flags from userspace to userspace
++ * @attach_flags_req:	KDBUS_ATTACH_* flags required by connecting peers
++ * @attach_flags_owner:	KDBUS_ATTACH_* flags of bus creator that other
++ *			connections can see or query
++ * @id128:		Unique random 128 bit ID of this bus
++ * @bloom:		Bloom parameters
++ * @domain:		Domain of this bus
++ * @creator:		Creator of the bus
++ * @creator_meta:	Meta information about the bus creator
++ * @policy_db:		Policy database for this bus
++ * @name_registry:	Name registry of this bus
++ * @conn_rwlock:	Read/Write lock for all lists of child connections
++ * @conn_hash:		Map of connection IDs
++ * @monitors_list:	Connections that monitor this bus
++ * @notify_list:	List of pending kernel-generated messages
++ * @notify_lock:	Notification list lock
++ * @notify_flush_lock:	Notification flushing lock
++ */
++struct kdbus_bus {
++	struct kdbus_node node;
++
++	/* static */
++	u64 id;
++	u64 bus_flags;
++	u64 attach_flags_req;
++	u64 attach_flags_owner;
++	u8 id128[16];
++	struct kdbus_bloom_parameter bloom;
++	struct kdbus_domain *domain;
++	struct kdbus_user *creator;
++	struct kdbus_meta_proc *creator_meta;
++
++	/* protected by own locks */
++	struct kdbus_policy_db policy_db;
++	struct kdbus_name_registry *name_registry;
++
++	/* protected by conn_rwlock */
++	struct rw_semaphore conn_rwlock;
++	DECLARE_HASHTABLE(conn_hash, 8);
++	struct list_head monitors_list;
++
++	/* protected by notify_lock */
++	struct list_head notify_list;
++	spinlock_t notify_lock;
++	struct mutex notify_flush_lock;
++};
++
++struct kdbus_bus *kdbus_bus_ref(struct kdbus_bus *bus);
++struct kdbus_bus *kdbus_bus_unref(struct kdbus_bus *bus);
++
++struct kdbus_conn *kdbus_bus_find_conn_by_id(struct kdbus_bus *bus, u64 id);
++void kdbus_bus_broadcast(struct kdbus_bus *bus,
++			 struct kdbus_conn *conn_src,
++			 struct kdbus_kmsg *kmsg);
++void kdbus_bus_eavesdrop(struct kdbus_bus *bus,
++			 struct kdbus_conn *conn_src,
++			 struct kdbus_kmsg *kmsg);
++
++struct kdbus_bus *kdbus_cmd_bus_make(struct kdbus_domain *domain,
++				     void __user *argp);
++int kdbus_cmd_bus_creator_info(struct kdbus_conn *conn, void __user *argp);
++
++#endif
+diff --git a/ipc/kdbus/connection.c b/ipc/kdbus/connection.c
+new file mode 100644
+index 0000000..ab476fa
+--- /dev/null
++++ b/ipc/kdbus/connection.c
+@@ -0,0 +1,2214 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/audit.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/fs_struct.h>
++#include <linux/hashtable.h>
++#include <linux/idr.h>
++#include <linux/init.h>
++#include <linux/math64.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/path.h>
++#include <linux/poll.h>
++#include <linux/sched.h>
++#include <linux/shmem_fs.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/syscalls.h>
++#include <linux/uio.h>
++
++#include "bus.h"
++#include "connection.h"
++#include "endpoint.h"
++#include "handle.h"
++#include "match.h"
++#include "message.h"
++#include "metadata.h"
++#include "names.h"
++#include "domain.h"
++#include "item.h"
++#include "notify.h"
++#include "policy.h"
++#include "pool.h"
++#include "reply.h"
++#include "util.h"
++#include "queue.h"
++
++#define KDBUS_CONN_ACTIVE_BIAS	(INT_MIN + 2)
++#define KDBUS_CONN_ACTIVE_NEW	(INT_MIN + 1)
++
++static struct kdbus_conn *kdbus_conn_new(struct kdbus_ep *ep, bool privileged,
++					 struct kdbus_cmd_hello *hello,
++					 const char *name,
++					 const struct kdbus_creds *creds,
++					 const struct kdbus_pids *pids,
++					 const char *seclabel,
++					 const char *conn_description)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	static struct lock_class_key __key;
++#endif
++	struct kdbus_pool_slice *slice = NULL;
++	struct kdbus_bus *bus = ep->bus;
++	struct kdbus_conn *conn;
++	u64 attach_flags_send;
++	u64 attach_flags_recv;
++	u64 items_size = 0;
++	bool is_policy_holder;
++	bool is_activator;
++	bool is_monitor;
++	struct kvec kvec;
++	int ret;
++
++	struct {
++		u64 size;
++		u64 type;
++		struct kdbus_bloom_parameter bloom;
++	} bloom_item;
++
++	is_monitor = hello->flags & KDBUS_HELLO_MONITOR;
++	is_activator = hello->flags & KDBUS_HELLO_ACTIVATOR;
++	is_policy_holder = hello->flags & KDBUS_HELLO_POLICY_HOLDER;
++
++	if (!hello->pool_size || !IS_ALIGNED(hello->pool_size, PAGE_SIZE))
++		return ERR_PTR(-EINVAL);
++	if (is_monitor + is_activator + is_policy_holder > 1)
++		return ERR_PTR(-EINVAL);
++	if (name && !is_activator && !is_policy_holder)
++		return ERR_PTR(-EINVAL);
++	if (!name && (is_activator || is_policy_holder))
++		return ERR_PTR(-EINVAL);
++	if (name && !kdbus_name_is_valid(name, true))
++		return ERR_PTR(-EINVAL);
++	if (is_monitor && ep->user)
++		return ERR_PTR(-EOPNOTSUPP);
++	if (!privileged && (is_activator || is_policy_holder || is_monitor))
++		return ERR_PTR(-EPERM);
++	if ((creds || pids || seclabel) && !privileged)
++		return ERR_PTR(-EPERM);
++
++	ret = kdbus_sanitize_attach_flags(hello->attach_flags_send,
++					  &attach_flags_send);
++	if (ret < 0)
++		return ERR_PTR(ret);
++
++	ret = kdbus_sanitize_attach_flags(hello->attach_flags_recv,
++					  &attach_flags_recv);
++	if (ret < 0)
++		return ERR_PTR(ret);
++
++	/* The attach flags must always satisfy the bus requirements. */
++	if (bus->attach_flags_req & ~attach_flags_send)
++		return ERR_PTR(-ECONNREFUSED);
++
++	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
++	if (!conn)
++		return ERR_PTR(-ENOMEM);
++
++	kref_init(&conn->kref);
++	atomic_set(&conn->active, KDBUS_CONN_ACTIVE_NEW);
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	lockdep_init_map(&conn->dep_map, "s_active", &__key, 0);
++#endif
++	mutex_init(&conn->lock);
++	INIT_LIST_HEAD(&conn->names_list);
++	INIT_LIST_HEAD(&conn->names_queue_list);
++	INIT_LIST_HEAD(&conn->reply_list);
++	atomic_set(&conn->name_count, 0);
++	atomic_set(&conn->request_count, 0);
++	atomic_set(&conn->lost_count, 0);
++	INIT_DELAYED_WORK(&conn->work, kdbus_reply_list_scan_work);
++	conn->cred = get_current_cred();
++	init_waitqueue_head(&conn->wait);
++	kdbus_queue_init(&conn->queue);
++	conn->privileged = privileged;
++	conn->ep = kdbus_ep_ref(ep);
++	conn->id = atomic64_inc_return(&bus->domain->last_id);
++	conn->flags = hello->flags;
++	atomic64_set(&conn->attach_flags_send, attach_flags_send);
++	atomic64_set(&conn->attach_flags_recv, attach_flags_recv);
++	INIT_LIST_HEAD(&conn->monitor_entry);
++
++	if (conn_description) {
++		conn->description = kstrdup(conn_description, GFP_KERNEL);
++		if (!conn->description) {
++			ret = -ENOMEM;
++			goto exit_unref;
++		}
++	}
++
++	conn->pool = kdbus_pool_new(conn->description, hello->pool_size);
++	if (IS_ERR(conn->pool)) {
++		ret = PTR_ERR(conn->pool);
++		conn->pool = NULL;
++		goto exit_unref;
++	}
++
++	conn->match_db = kdbus_match_db_new();
++	if (IS_ERR(conn->match_db)) {
++		ret = PTR_ERR(conn->match_db);
++		conn->match_db = NULL;
++		goto exit_unref;
++	}
++
++	/* return properties of this connection to the caller */
++	hello->bus_flags = bus->bus_flags;
++	hello->id = conn->id;
++
++	BUILD_BUG_ON(sizeof(bus->id128) != sizeof(hello->id128));
++	memcpy(hello->id128, bus->id128, sizeof(hello->id128));
++
++	conn->meta = kdbus_meta_proc_new();
++	if (IS_ERR(conn->meta)) {
++		ret = PTR_ERR(conn->meta);
++		conn->meta = NULL;
++		goto exit_unref;
++	}
++
++	/* privileged processes can impersonate somebody else */
++	if (creds || pids || seclabel) {
++		ret = kdbus_meta_proc_fake(conn->meta, creds, pids, seclabel);
++		if (ret < 0)
++			goto exit_unref;
++
++		conn->faked_meta = true;
++	} else {
++		ret = kdbus_meta_proc_collect(conn->meta,
++					      KDBUS_ATTACH_CREDS |
++					      KDBUS_ATTACH_PIDS |
++					      KDBUS_ATTACH_AUXGROUPS |
++					      KDBUS_ATTACH_TID_COMM |
++					      KDBUS_ATTACH_PID_COMM |
++					      KDBUS_ATTACH_EXE |
++					      KDBUS_ATTACH_CMDLINE |
++					      KDBUS_ATTACH_CGROUP |
++					      KDBUS_ATTACH_CAPS |
++					      KDBUS_ATTACH_SECLABEL |
++					      KDBUS_ATTACH_AUDIT);
++		if (ret < 0)
++			goto exit_unref;
++	}
++
++	/*
++	 * Account the connection against the current user (UID), or for
++	 * custom endpoints use the anonymous user assigned to the endpoint.
++	 * Note that limits are always accounted against the real UID, not
++	 * the effective UID (cred->user always points to the accounting of
++	 * cred->uid, not cred->euid).
++	 */
++	if (ep->user) {
++		conn->user = kdbus_user_ref(ep->user);
++	} else {
++		conn->user = kdbus_user_lookup(ep->bus->domain, current_uid());
++		if (IS_ERR(conn->user)) {
++			ret = PTR_ERR(conn->user);
++			conn->user = NULL;
++			goto exit_unref;
++		}
++	}
++
++	if (atomic_inc_return(&conn->user->connections) > KDBUS_USER_MAX_CONN) {
++		/* decremented by destructor as conn->user is valid */
++		ret = -EMFILE;
++		goto exit_unref;
++	}
++
++	bloom_item.size = sizeof(bloom_item);
++	bloom_item.type = KDBUS_ITEM_BLOOM_PARAMETER;
++	bloom_item.bloom = bus->bloom;
++	kdbus_kvec_set(&kvec, &bloom_item, bloom_item.size, &items_size);
++
++	slice = kdbus_pool_slice_alloc(conn->pool, items_size, false);
++	if (IS_ERR(slice)) {
++		ret = PTR_ERR(slice);
++		slice = NULL;
++		goto exit_unref;
++	}
++
++	ret = kdbus_pool_slice_copy_kvec(slice, 0, &kvec, 1, items_size);
++	if (ret < 0)
++		goto exit_unref;
++
++	kdbus_pool_slice_publish(slice, &hello->offset, &hello->items_size);
++	kdbus_pool_slice_release(slice);
++
++	return conn;
++
++exit_unref:
++	kdbus_pool_slice_release(slice);
++	kdbus_conn_unref(conn);
++	return ERR_PTR(ret);
++}
++
++static void __kdbus_conn_free(struct kref *kref)
++{
++	struct kdbus_conn *conn = container_of(kref, struct kdbus_conn, kref);
++
++	WARN_ON(kdbus_conn_active(conn));
++	WARN_ON(delayed_work_pending(&conn->work));
++	WARN_ON(!list_empty(&conn->queue.msg_list));
++	WARN_ON(!list_empty(&conn->names_list));
++	WARN_ON(!list_empty(&conn->names_queue_list));
++	WARN_ON(!list_empty(&conn->reply_list));
++
++	if (conn->user) {
++		atomic_dec(&conn->user->connections);
++		kdbus_user_unref(conn->user);
++	}
++
++	kdbus_meta_proc_unref(conn->meta);
++	kdbus_match_db_free(conn->match_db);
++	kdbus_pool_free(conn->pool);
++	kdbus_ep_unref(conn->ep);
++	put_cred(conn->cred);
++	kfree(conn->description);
++	kfree(conn->quota);
++	kfree(conn);
++}
++
++/**
++ * kdbus_conn_ref() - take a connection reference
++ * @conn:		Connection, may be %NULL
++ *
++ * Return: the connection itself
++ */
++struct kdbus_conn *kdbus_conn_ref(struct kdbus_conn *conn)
++{
++	if (conn)
++		kref_get(&conn->kref);
++	return conn;
++}
++
++/**
++ * kdbus_conn_unref() - drop a connection reference
++ * @conn:		Connection (may be NULL)
++ *
++ * When the last reference is dropped, the connection's internal structure
++ * is freed.
++ *
++ * Return: NULL
++ */
++struct kdbus_conn *kdbus_conn_unref(struct kdbus_conn *conn)
++{
++	if (conn)
++		kref_put(&conn->kref, __kdbus_conn_free);
++	return NULL;
++}
++
++/**
++ * kdbus_conn_active() - connection is not disconnected
++ * @conn:		Connection to check
++ *
++ * Return true if the connection was not disconnected, yet. Note that a
++ * connection might be disconnected asynchronously, unless you hold the
++ * connection lock. If that's not suitable for you, see kdbus_conn_acquire() to
++ * suppress connection shutdown for a short period.
++ *
++ * Return: true if the connection is still active
++ */
++bool kdbus_conn_active(const struct kdbus_conn *conn)
++{
++	return atomic_read(&conn->active) >= 0;
++}
++
++/**
++ * kdbus_conn_acquire() - acquire an active connection reference
++ * @conn:		Connection
++ *
++ * Users can close a connection via KDBUS_BYEBYE (or by destroying the
++ * endpoint/bus/...) at any time. Whenever this happens, we should deny any
++ * user-visible action on this connection and signal ECONNRESET instead.
++ * To avoid testing for connection availability everytime you take the
++ * connection-lock, you can acquire a connection for short periods.
++ *
++ * By calling kdbus_conn_acquire(), you gain an "active reference" to the
++ * connection. You must also hold a regular reference at any time! As long as
++ * you hold the active-ref, the connection will not be shut down. However, if
++ * the connection was shut down, you can never acquire an active-ref again.
++ *
++ * kdbus_conn_disconnect() disables the connection and then waits for all active
++ * references to be dropped. It will also wake up any pending operation.
++ * However, you must not sleep for an indefinite period while holding an
++ * active-reference. Otherwise, kdbus_conn_disconnect() might stall. If you need
++ * to sleep for an indefinite period, either release the reference and try to
++ * acquire it again after waking up, or make kdbus_conn_disconnect() wake up
++ * your wait-queue.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_conn_acquire(struct kdbus_conn *conn)
++{
++	if (!atomic_inc_unless_negative(&conn->active))
++		return -ECONNRESET;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	rwsem_acquire_read(&conn->dep_map, 0, 1, _RET_IP_);
++#endif
++
++	return 0;
++}
++
++/**
++ * kdbus_conn_release() - release an active connection reference
++ * @conn:		Connection
++ *
++ * This releases an active reference that has been acquired via
++ * kdbus_conn_acquire(). If the connection was already disabled and this is the
++ * last active-ref that is dropped, the disconnect-waiter will be woken up and
++ * properly close the connection.
++ */
++void kdbus_conn_release(struct kdbus_conn *conn)
++{
++	int v;
++
++	if (!conn)
++		return;
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	rwsem_release(&conn->dep_map, 1, _RET_IP_);
++#endif
++
++	v = atomic_dec_return(&conn->active);
++	if (v != KDBUS_CONN_ACTIVE_BIAS)
++		return;
++
++	wake_up_all(&conn->wait);
++}
++
++static int kdbus_conn_connect(struct kdbus_conn *conn, const char *name)
++{
++	struct kdbus_ep *ep = conn->ep;
++	struct kdbus_bus *bus = ep->bus;
++	int ret;
++
++	if (WARN_ON(atomic_read(&conn->active) != KDBUS_CONN_ACTIVE_NEW))
++		return -EALREADY;
++
++	/* make sure the ep-node is active while we add our connection */
++	if (!kdbus_node_acquire(&ep->node))
++		return -ESHUTDOWN;
++
++	/* lock order: domain -> bus -> ep -> names -> conn */
++	mutex_lock(&ep->lock);
++	down_write(&bus->conn_rwlock);
++
++	/* link into monitor list */
++	if (kdbus_conn_is_monitor(conn))
++		list_add_tail(&conn->monitor_entry, &bus->monitors_list);
++
++	/* link into bus and endpoint */
++	list_add_tail(&conn->ep_entry, &ep->conn_list);
++	hash_add(bus->conn_hash, &conn->hentry, conn->id);
++
++	/* enable lookups and acquire active ref */
++	atomic_set(&conn->active, 1);
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	rwsem_acquire_read(&conn->dep_map, 0, 1, _RET_IP_);
++#endif
++
++	up_write(&bus->conn_rwlock);
++	mutex_unlock(&ep->lock);
++
++	kdbus_node_release(&ep->node);
++
++	/*
++	 * Notify subscribers about the new active connection, unless it is
++	 * a monitor. Monitors are invisible on the bus, can't be addressed
++	 * directly, and won't cause any notifications.
++	 */
++	if (!kdbus_conn_is_monitor(conn)) {
++		ret = kdbus_notify_id_change(conn->ep->bus, KDBUS_ITEM_ID_ADD,
++					     conn->id, conn->flags);
++		if (ret < 0)
++			goto exit_disconnect;
++	}
++
++	if (kdbus_conn_is_activator(conn)) {
++		u64 flags = KDBUS_NAME_ACTIVATOR;
++
++		if (WARN_ON(!name)) {
++			ret = -EINVAL;
++			goto exit_disconnect;
++		}
++
++		ret = kdbus_name_acquire(bus->name_registry, conn, name,
++					 flags, NULL);
++		if (ret < 0)
++			goto exit_disconnect;
++	}
++
++	kdbus_conn_release(conn);
++	kdbus_notify_flush(bus);
++	return 0;
++
++exit_disconnect:
++	kdbus_conn_release(conn);
++	kdbus_conn_disconnect(conn, false);
++	return ret;
++}
++
++/**
++ * kdbus_conn_disconnect() - disconnect a connection
++ * @conn:		The connection to disconnect
++ * @ensure_queue_empty:	Flag to indicate if the call should fail in
++ *			case the connection's message list is not
++ *			empty
++ *
++ * If @ensure_msg_list_empty is true, and the connection has pending messages,
++ * -EBUSY is returned.
++ *
++ * Return: 0 on success, negative errno on failure
++ */
++int kdbus_conn_disconnect(struct kdbus_conn *conn, bool ensure_queue_empty)
++{
++	struct kdbus_queue_entry *entry, *tmp;
++	struct kdbus_bus *bus = conn->ep->bus;
++	struct kdbus_reply *r, *r_tmp;
++	struct kdbus_conn *c;
++	int i, v;
++
++	mutex_lock(&conn->lock);
++	v = atomic_read(&conn->active);
++	if (v == KDBUS_CONN_ACTIVE_NEW) {
++		/* was never connected */
++		mutex_unlock(&conn->lock);
++		return 0;
++	}
++	if (v < 0) {
++		/* already dead */
++		mutex_unlock(&conn->lock);
++		return -ECONNRESET;
++	}
++	if (ensure_queue_empty && !list_empty(&conn->queue.msg_list)) {
++		/* still busy */
++		mutex_unlock(&conn->lock);
++		return -EBUSY;
++	}
++
++	atomic_add(KDBUS_CONN_ACTIVE_BIAS, &conn->active);
++	mutex_unlock(&conn->lock);
++
++	wake_up_interruptible(&conn->wait);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	rwsem_acquire(&conn->dep_map, 0, 0, _RET_IP_);
++	if (atomic_read(&conn->active) != KDBUS_CONN_ACTIVE_BIAS)
++		lock_contended(&conn->dep_map, _RET_IP_);
++#endif
++
++	wait_event(conn->wait,
++		   atomic_read(&conn->active) == KDBUS_CONN_ACTIVE_BIAS);
++
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	lock_acquired(&conn->dep_map, _RET_IP_);
++	rwsem_release(&conn->dep_map, 1, _RET_IP_);
++#endif
++
++	cancel_delayed_work_sync(&conn->work);
++	kdbus_policy_remove_owner(&conn->ep->bus->policy_db, conn);
++
++	/* lock order: domain -> bus -> ep -> names -> conn */
++	mutex_lock(&conn->ep->lock);
++	down_write(&bus->conn_rwlock);
++
++	/* remove from bus and endpoint */
++	hash_del(&conn->hentry);
++	list_del(&conn->monitor_entry);
++	list_del(&conn->ep_entry);
++
++	up_write(&bus->conn_rwlock);
++	mutex_unlock(&conn->ep->lock);
++
++	/*
++	 * Remove all names associated with this connection; this possibly
++	 * moves queued messages back to the activator connection.
++	 */
++	kdbus_name_release_all(bus->name_registry, conn);
++
++	/* if we die while other connections wait for our reply, notify them */
++	mutex_lock(&conn->lock);
++	list_for_each_entry_safe(entry, tmp, &conn->queue.msg_list, entry) {
++		if (entry->reply)
++			kdbus_notify_reply_dead(bus,
++						entry->reply->reply_dst->id,
++						entry->reply->cookie);
++		kdbus_queue_entry_free(entry);
++	}
++
++	list_for_each_entry_safe(r, r_tmp, &conn->reply_list, entry)
++		kdbus_reply_unlink(r);
++	mutex_unlock(&conn->lock);
++
++	/* lock order: domain -> bus -> ep -> names -> conn */
++	down_read(&bus->conn_rwlock);
++	hash_for_each(bus->conn_hash, i, c, hentry) {
++		mutex_lock(&c->lock);
++		list_for_each_entry_safe(r, r_tmp, &c->reply_list, entry) {
++			if (r->reply_src == conn) {
++				if (r->sync) {
++					kdbus_sync_reply_wakeup(r, -EPIPE);
++					kdbus_reply_unlink(r);
++					continue;
++				}
++
++				/* send a 'connection dead' notification */
++				kdbus_notify_reply_dead(bus, c->id, r->cookie);
++				kdbus_reply_unlink(r);
++			}
++		}
++		mutex_unlock(&c->lock);
++	}
++	up_read(&bus->conn_rwlock);
++
++	if (!kdbus_conn_is_monitor(conn))
++		kdbus_notify_id_change(bus, KDBUS_ITEM_ID_REMOVE,
++				       conn->id, conn->flags);
++
++	kdbus_notify_flush(bus);
++
++	return 0;
++}
++
++/**
++ * kdbus_conn_has_name() - check if a connection owns a name
++ * @conn:		Connection
++ * @name:		Well-know name to check for
++ *
++ * The caller must hold the registry lock of conn->ep->bus.
++ *
++ * Return: true if the name is currently owned by the connection
++ */
++bool kdbus_conn_has_name(struct kdbus_conn *conn, const char *name)
++{
++	struct kdbus_name_entry *e;
++
++	lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
++
++	list_for_each_entry(e, &conn->names_list, conn_entry)
++		if (strcmp(e->name, name) == 0)
++			return true;
++
++	return false;
++}
++
++struct kdbus_quota {
++	uint32_t memory;
++	uint16_t msgs;
++	uint8_t fds;
++};
++
++/**
++ * kdbus_conn_quota_inc() - increase quota accounting
++ * @c:		connection owning the quota tracking
++ * @u:		user to account for (or NULL for kernel accounting)
++ * @memory:	size of memory to account for
++ * @fds:	number of FDs to account for
++ *
++ * This call manages the quotas on resource @c. That is, it's used if other
++ * users want to use the resources of connection @c, which so far only concerns
++ * the receive queue of the destination.
++ *
++ * This increases the quota-accounting for user @u by @memory bytes and @fds
++ * file descriptors. If the user has already reached the quota limits, this call
++ * will not do any accounting but return a negative error code indicating the
++ * failure.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_conn_quota_inc(struct kdbus_conn *c, struct kdbus_user *u,
++			 size_t memory, size_t fds)
++{
++	struct kdbus_quota *quota;
++	size_t available, accounted;
++	unsigned int id;
++
++	/*
++	 * Pool Layout:
++	 * 50% of a pool is always owned by the connection. It is reserved for
++	 * kernel queries, handling received messages and other tasks that are
++	 * under control of the pool owner. The other 50% of the pool are used
++	 * as incoming queue.
++	 * As we optionally support user-space based policies, we need fair
++	 * allocation schemes. Furthermore, resource utilization should be
++	 * maximized, so only minimal resources stay reserved. However, we need
++	 * to adapt to a dynamic number of users, as we cannot know how many
++	 * users will talk to a connection. Therefore, the current allocations
++	 * works like this:
++	 * We limit the number of bytes in a destination's pool per sending
++	 * user. The space available for a user is 33% of the unused pool space
++	 * (whereas the space used by the user itself is also treated as
++	 * 'unused'). This way, we favor users coming first, but keep enough
++	 * pool space available for any following users. Given that messages are
++	 * dequeued in FIFO order, this should balance nicely if the number of
++	 * users grows. At the same time, this algorithm guarantees that the
++	 * space available to a connection is reduced dynamically, the more
++	 * concurrent users talk to a connection.
++	 */
++
++	/* per user-accounting is expensive, so we keep state small */
++	BUILD_BUG_ON(sizeof(quota->memory) != 4);
++	BUILD_BUG_ON(sizeof(quota->msgs) != 2);
++	BUILD_BUG_ON(sizeof(quota->fds) != 1);
++	BUILD_BUG_ON(KDBUS_CONN_MAX_MSGS > U16_MAX);
++	BUILD_BUG_ON(KDBUS_CONN_MAX_FDS_PER_USER > U8_MAX);
++
++	id = u ? u->id : KDBUS_USER_KERNEL_ID;
++	if (id >= c->n_quota) {
++		unsigned int users;
++
++		users = max(KDBUS_ALIGN8(id) + 8, id);
++		quota = krealloc(c->quota, users * sizeof(*quota),
++				 GFP_KERNEL | __GFP_ZERO);
++		if (!quota)
++			return -ENOMEM;
++
++		c->n_quota = users;
++		c->quota = quota;
++	}
++
++	quota = &c->quota[id];
++	kdbus_pool_accounted(c->pool, &available, &accounted);
++
++	/* half the pool is _always_ reserved for the pool owner */
++	available /= 2;
++
++	/*
++	 * Pool owner slices are un-accounted slices; they can claim more
++	 * than 50% of the queue. However, the slice we're dealing with here
++	 * belong to the incoming queue, hence they are 'accounted' slices
++	 * to which the 50%-limit applies.
++	 */
++	if (available < accounted)
++		return -ENOBUFS;
++
++	/* 1/3 of the remaining space (including your own memory) */
++	available = (available - accounted + quota->memory) / 3;
++
++	if (available < quota->memory ||
++	    available - quota->memory < memory ||
++	    quota->memory + memory > U32_MAX)
++		return -ENOBUFS;
++	if (quota->msgs >= KDBUS_CONN_MAX_MSGS)
++		return -ENOBUFS;
++	if (quota->fds + fds < quota->fds ||
++	    quota->fds + fds > KDBUS_CONN_MAX_FDS_PER_USER)
++		return -EMFILE;
++
++	quota->memory += memory;
++	quota->fds += fds;
++	++quota->msgs;
++	return 0;
++}
++
++/**
++ * kdbus_conn_quota_dec() - decrease quota accounting
++ * @c:		connection owning the quota tracking
++ * @u:		user which was accounted for (or NULL for kernel accounting)
++ * @memory:	size of memory which was accounted for
++ * @fds:	number of FDs which were accounted for
++ *
++ * This does the reverse of kdbus_conn_quota_inc(). You have to release any
++ * accounted resources that you called kdbus_conn_quota_inc() for. However, you
++ * must not call kdbus_conn_quota_dec() if the accounting failed (that is,
++ * kdbus_conn_quota_inc() failed).
++ */
++void kdbus_conn_quota_dec(struct kdbus_conn *c, struct kdbus_user *u,
++			  size_t memory, size_t fds)
++{
++	struct kdbus_quota *quota;
++	unsigned int id;
++
++	id = u ? u->id : KDBUS_USER_KERNEL_ID;
++	if (WARN_ON(id >= c->n_quota))
++		return;
++
++	quota = &c->quota[id];
++
++	if (!WARN_ON(quota->msgs == 0))
++		--quota->msgs;
++	if (!WARN_ON(quota->memory < memory))
++		quota->memory -= memory;
++	if (!WARN_ON(quota->fds < fds))
++		quota->fds -= fds;
++}
++
++/**
++ * kdbus_conn_lost_message() - handle lost messages
++ * @c:		connection that lost a message
++ *
++ * kdbus is reliable. That means, we try hard to never lose messages. However,
++ * memory is limited, so we cannot rely on transmissions to never fail.
++ * Therefore, we use quota-limits to let callers know if there unicast message
++ * cannot be transmitted to a peer. This works fine for unicasts, but for
++ * broadcasts we cannot make the caller handle the transmission failure.
++ * Instead, we must let the destination know that it couldn't receive a
++ * broadcast.
++ * As this is an unlikely scenario, we keep it simple. A single lost-counter
++ * remembers the number of lost messages since the last call to RECV. The next
++ * message retrieval will notify the connection that it lost messages since the
++ * last message retrieval and thus should resync its state.
++ */
++void kdbus_conn_lost_message(struct kdbus_conn *c)
++{
++	if (atomic_inc_return(&c->lost_count) == 1)
++		wake_up_interruptible(&c->wait);
++}
++
++/* Callers should take the conn_dst lock */
++static struct kdbus_queue_entry *
++kdbus_conn_entry_make(struct kdbus_conn *conn_dst,
++		      const struct kdbus_kmsg *kmsg,
++		      struct kdbus_user *user)
++{
++	struct kdbus_queue_entry *entry;
++
++	/* The remote connection was disconnected */
++	if (!kdbus_conn_active(conn_dst))
++		return ERR_PTR(-ECONNRESET);
++
++	/*
++	 * If the connection does not accept file descriptors but the message
++	 * has some attached, refuse it.
++	 *
++	 * If this is a monitor connection, accept the message. In that
++	 * case, all file descriptors will be set to -1 at receive time.
++	 */
++	if (!kdbus_conn_is_monitor(conn_dst) &&
++	    !(conn_dst->flags & KDBUS_HELLO_ACCEPT_FD) &&
++	    kmsg->res && kmsg->res->fds_count > 0)
++		return ERR_PTR(-ECOMM);
++
++	entry = kdbus_queue_entry_new(conn_dst, kmsg, user);
++	if (IS_ERR(entry))
++		return entry;
++
++	return entry;
++}
++
++/*
++ * Synchronously responding to a message, allocate a queue entry
++ * and attach it to the reply tracking object.
++ * The connection's queue will never get to see it.
++ */
++static int kdbus_conn_entry_sync_attach(struct kdbus_conn *conn_dst,
++					const struct kdbus_kmsg *kmsg,
++					struct kdbus_reply *reply_wake)
++{
++	struct kdbus_queue_entry *entry;
++	int remote_ret;
++	int ret = 0;
++
++	mutex_lock(&reply_wake->reply_dst->lock);
++
++	/*
++	 * If we are still waiting then proceed, allocate a queue
++	 * entry and attach it to the reply object
++	 */
++	if (reply_wake->waiting) {
++		entry = kdbus_conn_entry_make(conn_dst, kmsg,
++					      reply_wake->reply_src->user);
++		if (IS_ERR(entry))
++			ret = PTR_ERR(entry);
++		else
++			/* Attach the entry to the reply object */
++			reply_wake->queue_entry = entry;
++	} else {
++		ret = -ECONNRESET;
++	}
++
++	/*
++	 * Update the reply object and wake up remote peer only
++	 * on appropriate return codes
++	 *
++	 * * -ECOMM: if the replying connection failed with -ECOMM
++	 *           then wakeup remote peer with -EREMOTEIO
++	 *
++	 *           We do this to differenciate between -ECOMM errors
++	 *           from the original sender perspective:
++	 *           -ECOMM error during the sync send and
++	 *           -ECOMM error during the sync reply, this last
++	 *           one is rewritten to -EREMOTEIO
++	 *
++	 * * Wake up on all other return codes.
++	 */
++	remote_ret = ret;
++
++	if (ret == -ECOMM)
++		remote_ret = -EREMOTEIO;
++
++	kdbus_sync_reply_wakeup(reply_wake, remote_ret);
++	kdbus_reply_unlink(reply_wake);
++	mutex_unlock(&reply_wake->reply_dst->lock);
++
++	return ret;
++}
++
++/**
++ * kdbus_conn_entry_insert() - enqueue a message into the receiver's pool
++ * @conn_src:		The sending connection
++ * @conn_dst:		The connection to queue into
++ * @kmsg:		The kmsg to queue
++ * @reply:		The reply tracker to attach to the queue entry
++ *
++ * Return: 0 on success. negative error otherwise.
++ */
++int kdbus_conn_entry_insert(struct kdbus_conn *conn_src,
++			    struct kdbus_conn *conn_dst,
++			    const struct kdbus_kmsg *kmsg,
++			    struct kdbus_reply *reply)
++{
++	struct kdbus_queue_entry *entry;
++	int ret;
++
++	kdbus_conn_lock2(conn_src, conn_dst);
++
++	entry = kdbus_conn_entry_make(conn_dst, kmsg,
++				      conn_src ? conn_src->user : NULL);
++	if (IS_ERR(entry)) {
++		ret = PTR_ERR(entry);
++		goto exit_unlock;
++	}
++
++	if (reply) {
++		kdbus_reply_link(reply);
++		if (!reply->sync)
++			schedule_delayed_work(&conn_src->work, 0);
++	}
++
++	kdbus_queue_entry_enqueue(entry, reply);
++	wake_up_interruptible(&conn_dst->wait);
++
++	ret = 0;
++
++exit_unlock:
++	kdbus_conn_unlock2(conn_src, conn_dst);
++	return ret;
++}
++
++static int kdbus_conn_wait_reply(struct kdbus_conn *conn_src,
++				 struct kdbus_cmd_send *cmd_send,
++				 struct file *ioctl_file,
++				 struct file *cancel_fd,
++				 struct kdbus_reply *reply_wait,
++				 ktime_t expire)
++{
++	struct kdbus_queue_entry *entry;
++	struct poll_wqueues pwq = {};
++	int ret;
++
++	if (WARN_ON(!reply_wait))
++		return -EIO;
++
++	/*
++	 * Block until the reply arrives. reply_wait is left untouched
++	 * by the timeout scans that might be conducted for other,
++	 * asynchronous replies of conn_src.
++	 */
++
++	poll_initwait(&pwq);
++	poll_wait(ioctl_file, &conn_src->wait, &pwq.pt);
++
++	for (;;) {
++		/*
++		 * Any of the following conditions will stop our synchronously
++		 * blocking SEND command:
++		 *
++		 * a) The origin sender closed its connection
++		 * b) The remote peer answered, setting reply_wait->waiting = 0
++		 * c) The cancel FD was written to
++		 * d) A signal was received
++		 * e) The specified timeout was reached, and none of the above
++		 *    conditions kicked in.
++		 */
++
++		/*
++		 * We have already acquired an active reference when
++		 * entering here, but another thread may call
++		 * KDBUS_CMD_BYEBYE which does not acquire an active
++		 * reference, therefore kdbus_conn_disconnect() will
++		 * not wait for us.
++		 */
++		if (!kdbus_conn_active(conn_src)) {
++			ret = -ECONNRESET;
++			break;
++		}
++
++		/*
++		 * After the replying peer unset the waiting variable
++		 * it will wake up us.
++		 */
++		if (!reply_wait->waiting) {
++			ret = reply_wait->err;
++			break;
++		}
++
++		if (cancel_fd) {
++			unsigned int r;
++
++			r = cancel_fd->f_op->poll(cancel_fd, &pwq.pt);
++			if (r & POLLIN) {
++				ret = -ECANCELED;
++				break;
++			}
++		}
++
++		if (signal_pending(current)) {
++			ret = -EINTR;
++			break;
++		}
++
++		if (!poll_schedule_timeout(&pwq, TASK_INTERRUPTIBLE,
++					   &expire, 0)) {
++			ret = -ETIMEDOUT;
++			break;
++		}
++
++		/*
++		 * Reset the poll worker func, so the waitqueues are not
++		 * added to the poll table again. We just reuse what we've
++		 * collected earlier for further iterations.
++		 */
++		init_poll_funcptr(&pwq.pt, NULL);
++	}
++
++	poll_freewait(&pwq);
++
++	if (ret == -EINTR) {
++		/*
++		 * Interrupted system call. Unref the reply object, and pass
++		 * the return value down the chain. Mark the reply as
++		 * interrupted, so the cleanup work can remove it, but do not
++		 * unlink it from the list. Once the syscall restarts, we'll
++		 * pick it up and wait on it again.
++		 */
++		mutex_lock(&conn_src->lock);
++		reply_wait->interrupted = true;
++		schedule_delayed_work(&conn_src->work, 0);
++		mutex_unlock(&conn_src->lock);
++
++		return -ERESTARTSYS;
++	}
++
++	mutex_lock(&conn_src->lock);
++	reply_wait->waiting = false;
++	entry = reply_wait->queue_entry;
++	if (entry) {
++		ret = kdbus_queue_entry_install(entry,
++						&cmd_send->reply.return_flags,
++						true);
++		kdbus_pool_slice_publish(entry->slice, &cmd_send->reply.offset,
++					 &cmd_send->reply.msg_size);
++		kdbus_queue_entry_free(entry);
++	}
++	kdbus_reply_unlink(reply_wait);
++	mutex_unlock(&conn_src->lock);
++
++	return ret;
++}
++
++static int kdbus_pin_dst(struct kdbus_bus *bus,
++			 struct kdbus_kmsg *kmsg,
++			 struct kdbus_name_entry **out_name,
++			 struct kdbus_conn **out_dst)
++{
++	struct kdbus_msg_resources *res = kmsg->res;
++	struct kdbus_name_entry *name = NULL;
++	struct kdbus_conn *dst = NULL;
++	struct kdbus_msg *msg = &kmsg->msg;
++	int ret;
++
++	if (WARN_ON(!res))
++		return -EINVAL;
++
++	lockdep_assert_held(&bus->name_registry->rwlock);
++
++	if (!res->dst_name) {
++		dst = kdbus_bus_find_conn_by_id(bus, msg->dst_id);
++		if (!dst)
++			return -ENXIO;
++
++		if (!kdbus_conn_is_ordinary(dst)) {
++			ret = -ENXIO;
++			goto error;
++		}
++	} else {
++		name = kdbus_name_lookup_unlocked(bus->name_registry,
++						  res->dst_name);
++		if (!name)
++			return -ESRCH;
++
++		/*
++		 * If both a name and a connection ID are given as destination
++		 * of a message, check that the currently owning connection of
++		 * the name matches the specified ID.
++		 * This way, we allow userspace to send the message to a
++		 * specific connection by ID only if the connection currently
++		 * owns the given name.
++		 */
++		if (msg->dst_id != KDBUS_DST_ID_NAME &&
++		    msg->dst_id != name->conn->id)
++			return -EREMCHG;
++
++		if (!name->conn && name->activator)
++			dst = kdbus_conn_ref(name->activator);
++		else
++			dst = kdbus_conn_ref(name->conn);
++
++		if ((msg->flags & KDBUS_MSG_NO_AUTO_START) &&
++		    kdbus_conn_is_activator(dst)) {
++			ret = -EADDRNOTAVAIL;
++			goto error;
++		}
++
++		/*
++		 * Record the sequence number of the registered name; it will
++		 * be passed on to the queue, in case messages addressed to a
++		 * name need to be moved from or to an activator.
++		 */
++		kmsg->dst_name_id = name->name_id;
++	}
++
++	*out_name = name;
++	*out_dst = dst;
++	return 0;
++
++error:
++	kdbus_conn_unref(dst);
++	return ret;
++}
++
++static int kdbus_conn_reply(struct kdbus_conn *src, struct kdbus_kmsg *kmsg)
++{
++	struct kdbus_name_entry *name = NULL;
++	struct kdbus_reply *reply, *wake = NULL;
++	struct kdbus_conn *dst = NULL;
++	struct kdbus_bus *bus = src->ep->bus;
++	u64 attach;
++	int ret;
++
++	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
++	    WARN_ON(kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) ||
++	    WARN_ON(kmsg->msg.flags & KDBUS_MSG_SIGNAL))
++		return -EINVAL;
++
++	/* name-registry must be locked for lookup *and* collecting data */
++	down_read(&bus->name_registry->rwlock);
++
++	/* find and pin destination */
++
++	ret = kdbus_pin_dst(bus, kmsg, &name, &dst);
++	if (ret < 0)
++		goto exit;
++
++	mutex_lock(&dst->lock);
++	reply = kdbus_reply_find(src, dst, kmsg->msg.cookie_reply);
++	if (reply) {
++		if (reply->sync)
++			wake = kdbus_reply_ref(reply);
++		kdbus_reply_unlink(reply);
++	}
++	mutex_unlock(&dst->lock);
++
++	if (!reply) {
++		ret = -EPERM;
++		goto exit;
++	}
++
++	/* attach metadata */
++
++	attach = kdbus_meta_calc_attach_flags(src, dst);
++
++	if (!src->faked_meta) {
++		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
++		if (ret < 0)
++			goto exit;
++	}
++
++	ret = kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
++	if (ret < 0)
++		goto exit;
++
++	/* send message */
++
++	kdbus_bus_eavesdrop(bus, src, kmsg);
++
++	if (wake)
++		ret = kdbus_conn_entry_sync_attach(dst, kmsg, wake);
++	else
++		ret = kdbus_conn_entry_insert(src, dst, kmsg, NULL);
++
++exit:
++	up_read(&bus->name_registry->rwlock);
++	kdbus_reply_unref(wake);
++	kdbus_conn_unref(dst);
++	return ret;
++}
++
++static struct kdbus_reply *kdbus_conn_call(struct kdbus_conn *src,
++					   struct kdbus_kmsg *kmsg,
++					   ktime_t exp)
++{
++	struct kdbus_name_entry *name = NULL;
++	struct kdbus_reply *wait = NULL;
++	struct kdbus_conn *dst = NULL;
++	struct kdbus_bus *bus = src->ep->bus;
++	u64 attach;
++	int ret;
++
++	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
++	    WARN_ON(kmsg->msg.flags & KDBUS_MSG_SIGNAL) ||
++	    WARN_ON(!(kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY)))
++		return ERR_PTR(-EINVAL);
++
++	/* resume previous wait-context, if available */
++
++	mutex_lock(&src->lock);
++	wait = kdbus_reply_find(NULL, src, kmsg->msg.cookie);
++	if (wait) {
++		if (wait->interrupted) {
++			kdbus_reply_ref(wait);
++			wait->interrupted = false;
++		} else {
++			wait = NULL;
++		}
++	}
++	mutex_unlock(&src->lock);
++
++	if (wait)
++		return wait;
++
++	if (ktime_compare(ktime_get(), exp) >= 0)
++		return ERR_PTR(-ETIMEDOUT);
++
++	/* name-registry must be locked for lookup *and* collecting data */
++	down_read(&bus->name_registry->rwlock);
++
++	/* find and pin destination */
++
++	ret = kdbus_pin_dst(bus, kmsg, &name, &dst);
++	if (ret < 0)
++		goto exit;
++
++	if (!kdbus_conn_policy_talk(src, current_cred(), dst)) {
++		ret = -EPERM;
++		goto exit;
++	}
++
++	wait = kdbus_reply_new(dst, src, &kmsg->msg, name, true);
++	if (IS_ERR(wait)) {
++		ret = PTR_ERR(wait);
++		wait = NULL;
++		goto exit;
++	}
++
++	/* attach metadata */
++
++	attach = kdbus_meta_calc_attach_flags(src, dst);
++
++	if (!src->faked_meta) {
++		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
++		if (ret < 0)
++			goto exit;
++	}
++
++	ret = kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
++	if (ret < 0)
++		goto exit;
++
++	/* send message */
++
++	kdbus_bus_eavesdrop(bus, src, kmsg);
++
++	ret = kdbus_conn_entry_insert(src, dst, kmsg, wait);
++	if (ret < 0)
++		goto exit;
++
++	ret = 0;
++
++exit:
++	up_read(&bus->name_registry->rwlock);
++	if (ret < 0) {
++		kdbus_reply_unref(wait);
++		wait = ERR_PTR(ret);
++	}
++	kdbus_conn_unref(dst);
++	return wait;
++}
++
++static int kdbus_conn_unicast(struct kdbus_conn *src, struct kdbus_kmsg *kmsg)
++{
++	struct kdbus_name_entry *name = NULL;
++	struct kdbus_reply *wait = NULL;
++	struct kdbus_conn *dst = NULL;
++	struct kdbus_bus *bus = src->ep->bus;
++	bool is_signal = (kmsg->msg.flags & KDBUS_MSG_SIGNAL);
++	u64 attach;
++	int ret = 0;
++
++	if (WARN_ON(kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) ||
++	    WARN_ON(!(kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) &&
++		    kmsg->msg.cookie_reply != 0))
++		return -EINVAL;
++
++	/* name-registry must be locked for lookup *and* collecting data */
++	down_read(&bus->name_registry->rwlock);
++
++	/* find and pin destination */
++
++	ret = kdbus_pin_dst(bus, kmsg, &name, &dst);
++	if (ret < 0)
++		goto exit;
++
++	if (is_signal) {
++		/* like broadcasts we eavesdrop even if the msg is dropped */
++		kdbus_bus_eavesdrop(bus, src, kmsg);
++
++		/* drop silently if peer is not interested or not privileged */
++		if (!kdbus_match_db_match_kmsg(dst->match_db, src, kmsg) ||
++		    !kdbus_conn_policy_talk(dst, NULL, src))
++			goto exit;
++	} else if (!kdbus_conn_policy_talk(src, current_cred(), dst)) {
++		ret = -EPERM;
++		goto exit;
++	} else if (kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) {
++		wait = kdbus_reply_new(dst, src, &kmsg->msg, name, false);
++		if (IS_ERR(wait)) {
++			ret = PTR_ERR(wait);
++			wait = NULL;
++			goto exit;
++		}
++	}
++
++	/* attach metadata */
++
++	attach = kdbus_meta_calc_attach_flags(src, dst);
++
++	if (!src->faked_meta) {
++		ret = kdbus_meta_proc_collect(kmsg->proc_meta, attach);
++		if (ret < 0 && !is_signal)
++			goto exit;
++	}
++
++	ret = kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, src, attach);
++	if (ret < 0 && !is_signal)
++		goto exit;
++
++	/* send message */
++
++	if (!is_signal)
++		kdbus_bus_eavesdrop(bus, src, kmsg);
++
++	ret = kdbus_conn_entry_insert(src, dst, kmsg, wait);
++	if (ret < 0 && !is_signal)
++		goto exit;
++
++	/* signals are treated like broadcasts, recv-errors are ignored */
++	ret = 0;
++
++exit:
++	up_read(&bus->name_registry->rwlock);
++	kdbus_reply_unref(wait);
++	kdbus_conn_unref(dst);
++	return ret;
++}
++
++/**
++ * kdbus_conn_move_messages() - move messages from one connection to another
++ * @conn_dst:		Connection to copy to
++ * @conn_src:		Connection to copy from
++ * @name_id:		Filter for the sequence number of the registered
++ *			name, 0 means no filtering.
++ *
++ * Move all messages from one connection to another. This is used when
++ * an implementer connection is taking over/giving back a well-known name
++ * from/to an activator connection.
++ */
++void kdbus_conn_move_messages(struct kdbus_conn *conn_dst,
++			      struct kdbus_conn *conn_src,
++			      u64 name_id)
++{
++	struct kdbus_queue_entry *e, *e_tmp;
++	struct kdbus_reply *r, *r_tmp;
++	struct kdbus_bus *bus;
++	struct kdbus_conn *c;
++	LIST_HEAD(msg_list);
++	int i, ret = 0;
++
++	if (WARN_ON(conn_src == conn_dst))
++		return;
++
++	bus = conn_src->ep->bus;
++
++	/* lock order: domain -> bus -> ep -> names -> conn */
++	down_read(&bus->conn_rwlock);
++	hash_for_each(bus->conn_hash, i, c, hentry) {
++		if (c == conn_src || c == conn_dst)
++			continue;
++
++		mutex_lock(&c->lock);
++		list_for_each_entry_safe(r, r_tmp, &c->reply_list, entry) {
++			if (r->reply_src != conn_src)
++				continue;
++
++			/* filter messages for a specific name */
++			if (name_id > 0 && r->name_id != name_id)
++				continue;
++
++			kdbus_conn_unref(r->reply_src);
++			r->reply_src = kdbus_conn_ref(conn_dst);
++		}
++		mutex_unlock(&c->lock);
++	}
++	up_read(&bus->conn_rwlock);
++
++	kdbus_conn_lock2(conn_src, conn_dst);
++	list_for_each_entry_safe(e, e_tmp, &conn_src->queue.msg_list, entry) {
++		/* filter messages for a specific name */
++		if (name_id > 0 && e->dst_name_id != name_id)
++			continue;
++
++		if (!(conn_dst->flags & KDBUS_HELLO_ACCEPT_FD) &&
++		    e->msg_res && e->msg_res->fds_count > 0) {
++			kdbus_conn_lost_message(conn_dst);
++			kdbus_queue_entry_free(e);
++			continue;
++		}
++
++		ret = kdbus_queue_entry_move(e, conn_dst);
++		if (ret < 0) {
++			kdbus_conn_lost_message(conn_dst);
++			kdbus_queue_entry_free(e);
++			continue;
++		}
++	}
++	kdbus_conn_unlock2(conn_src, conn_dst);
++
++	/* wake up poll() */
++	wake_up_interruptible(&conn_dst->wait);
++}
++
++/* query the policy-database for all names of @whom */
++static bool kdbus_conn_policy_query_all(struct kdbus_conn *conn,
++					const struct cred *conn_creds,
++					struct kdbus_policy_db *db,
++					struct kdbus_conn *whom,
++					unsigned int access)
++{
++	struct kdbus_name_entry *ne;
++	bool pass = false;
++	int res;
++
++	lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
++
++	down_read(&db->entries_rwlock);
++	mutex_lock(&whom->lock);
++
++	list_for_each_entry(ne, &whom->names_list, conn_entry) {
++		res = kdbus_policy_query_unlocked(db, conn_creds ? : conn->cred,
++						  ne->name,
++						  kdbus_strhash(ne->name));
++		if (res >= (int)access) {
++			pass = true;
++			break;
++		}
++	}
++
++	mutex_unlock(&whom->lock);
++	up_read(&db->entries_rwlock);
++
++	return pass;
++}
++
++/**
++ * kdbus_conn_policy_own_name() - verify a connection can own the given name
++ * @conn:		Connection
++ * @conn_creds:		Credentials of @conn to use for policy check
++ * @name:		Name
++ *
++ * This verifies that @conn is allowed to acquire the well-known name @name.
++ *
++ * Return: true if allowed, false if not.
++ */
++bool kdbus_conn_policy_own_name(struct kdbus_conn *conn,
++				const struct cred *conn_creds,
++				const char *name)
++{
++	unsigned int hash = kdbus_strhash(name);
++	int res;
++
++	if (!conn_creds)
++		conn_creds = conn->cred;
++
++	if (conn->ep->user) {
++		res = kdbus_policy_query(&conn->ep->policy_db, conn_creds,
++					 name, hash);
++		if (res < KDBUS_POLICY_OWN)
++			return false;
++	}
++
++	if (conn->privileged)
++		return true;
++
++	res = kdbus_policy_query(&conn->ep->bus->policy_db, conn_creds,
++				 name, hash);
++	return res >= KDBUS_POLICY_OWN;
++}
++
++/**
++ * kdbus_conn_policy_talk() - verify a connection can talk to a given peer
++ * @conn:		Connection that tries to talk
++ * @conn_creds:		Credentials of @conn to use for policy check
++ * @to:			Connection that is talked to
++ *
++ * This verifies that @conn is allowed to talk to @to.
++ *
++ * Return: true if allowed, false if not.
++ */
++bool kdbus_conn_policy_talk(struct kdbus_conn *conn,
++			    const struct cred *conn_creds,
++			    struct kdbus_conn *to)
++{
++	if (!conn_creds)
++		conn_creds = conn->cred;
++
++	if (conn->ep->user &&
++	    !kdbus_conn_policy_query_all(conn, conn_creds, &conn->ep->policy_db,
++					 to, KDBUS_POLICY_TALK))
++		return false;
++
++	if (conn->privileged)
++		return true;
++	if (uid_eq(conn_creds->euid, to->cred->uid))
++		return true;
++
++	return kdbus_conn_policy_query_all(conn, conn_creds,
++					   &conn->ep->bus->policy_db, to,
++					   KDBUS_POLICY_TALK);
++}
++
++/**
++ * kdbus_conn_policy_see_name_unlocked() - verify a connection can see a given
++ *					   name
++ * @conn:		Connection
++ * @conn_creds:		Credentials of @conn to use for policy check
++ * @name:		Name
++ *
++ * This verifies that @conn is allowed to see the well-known name @name. Caller
++ * must hold policy-lock.
++ *
++ * Return: true if allowed, false if not.
++ */
++bool kdbus_conn_policy_see_name_unlocked(struct kdbus_conn *conn,
++					 const struct cred *conn_creds,
++					 const char *name)
++{
++	int res;
++
++	/*
++	 * By default, all names are visible on a bus. SEE policies can only be
++	 * installed on custom endpoints, where by default no name is visible.
++	 */
++	if (!conn->ep->user)
++		return true;
++
++	res = kdbus_policy_query_unlocked(&conn->ep->policy_db,
++					  conn_creds ? : conn->cred,
++					  name, kdbus_strhash(name));
++	return res >= KDBUS_POLICY_SEE;
++}
++
++static bool kdbus_conn_policy_see_name(struct kdbus_conn *conn,
++				       const struct cred *conn_creds,
++				       const char *name)
++{
++	bool res;
++
++	down_read(&conn->ep->policy_db.entries_rwlock);
++	res = kdbus_conn_policy_see_name_unlocked(conn, conn_creds, name);
++	up_read(&conn->ep->policy_db.entries_rwlock);
++
++	return res;
++}
++
++static bool kdbus_conn_policy_see(struct kdbus_conn *conn,
++				  const struct cred *conn_creds,
++				  struct kdbus_conn *whom)
++{
++	/*
++	 * By default, all names are visible on a bus, so a connection can
++	 * always see other connections. SEE policies can only be installed on
++	 * custom endpoints, where by default no name is visible and we hide
++	 * peers from each other, unless you see at least _one_ name of the
++	 * peer.
++	 */
++	return !conn->ep->user ||
++	       kdbus_conn_policy_query_all(conn, conn_creds,
++					   &conn->ep->policy_db, whom,
++					   KDBUS_POLICY_SEE);
++}
++
++/**
++ * kdbus_conn_policy_see_notification() - verify a connection is allowed to
++ *					  receive a given kernel notification
++ * @conn:		Connection
++ * @conn_creds:		Credentials of @conn to use for policy check
++ * @kmsg:		The message carrying the notification
++ *
++ * This checks whether @conn is allowed to see the kernel notification @kmsg.
++ *
++ * Return: true if allowed, false if not.
++ */
++bool kdbus_conn_policy_see_notification(struct kdbus_conn *conn,
++					const struct cred *conn_creds,
++					const struct kdbus_kmsg *kmsg)
++{
++	if (WARN_ON(kmsg->msg.src_id != KDBUS_SRC_ID_KERNEL))
++		return false;
++
++	/*
++	 * Depending on the notification type, broadcasted kernel notifications
++	 * have to be filtered:
++	 *
++	 * KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE}: This notification is forwarded
++	 *     to a peer if, and only if, that peer can see the name this
++	 *     notification is for.
++	 *
++	 * KDBUS_ITEM_ID_{ADD,REMOVE}: As new peers cannot have names, and all
++	 *     names are dropped before a peer is removed, those notifications
++	 *     cannot be seen on custom endpoints. Thus, we only pass them
++	 *     through on default endpoints.
++	 */
++
++	switch (kmsg->notify_type) {
++	case KDBUS_ITEM_NAME_ADD:
++	case KDBUS_ITEM_NAME_REMOVE:
++	case KDBUS_ITEM_NAME_CHANGE:
++		return kdbus_conn_policy_see_name(conn, conn_creds,
++						  kmsg->notify_name);
++
++	case KDBUS_ITEM_ID_ADD:
++	case KDBUS_ITEM_ID_REMOVE:
++		return !conn->ep->user;
++
++	default:
++		WARN(1, "Invalid type for notification broadcast: %llu\n",
++		     (unsigned long long)kmsg->notify_type);
++		return false;
++	}
++}
++
++/**
++ * kdbus_cmd_hello() - handle KDBUS_CMD_HELLO
++ * @ep:			Endpoint to operate on
++ * @privileged:		Whether the caller is privileged
++ * @argp:		Command payload
++ *
++ * Return: Newly created connection on success, ERR_PTR on failure.
++ */
++struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, bool privileged,
++				   void __user *argp)
++{
++	struct kdbus_cmd_hello *cmd;
++	struct kdbus_conn *c = NULL;
++	const char *item_name;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_NAME },
++		{ .type = KDBUS_ITEM_CREDS },
++		{ .type = KDBUS_ITEM_PIDS },
++		{ .type = KDBUS_ITEM_SECLABEL },
++		{ .type = KDBUS_ITEM_CONN_DESCRIPTION },
++		{ .type = KDBUS_ITEM_POLICY_ACCESS, .multiple = true },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
++				 KDBUS_HELLO_ACCEPT_FD |
++				 KDBUS_HELLO_ACTIVATOR |
++				 KDBUS_HELLO_POLICY_HOLDER |
++				 KDBUS_HELLO_MONITOR,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret < 0)
++		return ERR_PTR(ret);
++	if (ret > 0)
++		return NULL;
++
++	item_name = argv[1].item ? argv[1].item->str : NULL;
++
++	c = kdbus_conn_new(ep, privileged, cmd, item_name,
++			   argv[2].item ? &argv[2].item->creds : NULL,
++			   argv[3].item ? &argv[3].item->pids : NULL,
++			   argv[4].item ? argv[4].item->str : NULL,
++			   argv[5].item ? argv[5].item->str : NULL);
++	if (IS_ERR(c)) {
++		ret = PTR_ERR(c);
++		c = NULL;
++		goto exit;
++	}
++
++	ret = kdbus_conn_connect(c, item_name);
++	if (ret < 0)
++		goto exit;
++
++	if (kdbus_conn_is_activator(c) || kdbus_conn_is_policy_holder(c)) {
++		ret = kdbus_conn_acquire(c);
++		if (ret < 0)
++			goto exit;
++
++		ret = kdbus_policy_set(&c->ep->bus->policy_db, args.items,
++				       args.items_size, 1,
++				       kdbus_conn_is_policy_holder(c), c);
++		kdbus_conn_release(c);
++		if (ret < 0)
++			goto exit;
++	}
++
++	if (copy_to_user(argp, cmd, sizeof(*cmd)))
++		ret = -EFAULT;
++
++exit:
++	ret = kdbus_args_clear(&args, ret);
++	if (ret < 0) {
++		if (c) {
++			kdbus_conn_disconnect(c, false);
++			kdbus_conn_unref(c);
++		}
++		return ERR_PTR(ret);
++	}
++	return c;
++}
++
++/**
++ * kdbus_cmd_byebye_unlocked() - handle KDBUS_CMD_BYEBYE
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * The caller must not hold any active reference to @conn or this will deadlock.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_byebye_unlocked(struct kdbus_conn *conn, void __user *argp)
++{
++	struct kdbus_cmd *cmd;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	if (!kdbus_conn_is_ordinary(conn))
++		return -EOPNOTSUPP;
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	ret = kdbus_conn_disconnect(conn, true);
++	return kdbus_args_clear(&args, ret);
++}
++
++/**
++ * kdbus_cmd_conn_info() - handle KDBUS_CMD_CONN_INFO
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_conn_info(struct kdbus_conn *conn, void __user *argp)
++{
++	struct kdbus_meta_conn *conn_meta = NULL;
++	struct kdbus_pool_slice *slice = NULL;
++	struct kdbus_name_entry *entry = NULL;
++	struct kdbus_conn *owner_conn = NULL;
++	struct kdbus_info info = {};
++	struct kdbus_cmd_info *cmd;
++	struct kdbus_bus *bus = conn->ep->bus;
++	struct kvec kvec;
++	size_t meta_size;
++	const char *name;
++	u64 attach_flags;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_NAME },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	/* registry must be held throughout lookup *and* collecting data */
++	down_read(&bus->name_registry->rwlock);
++
++	ret = kdbus_sanitize_attach_flags(cmd->attach_flags, &attach_flags);
++	if (ret < 0)
++		goto exit;
++
++	name = argv[1].item ? argv[1].item->str : NULL;
++
++	if (name) {
++		entry = kdbus_name_lookup_unlocked(bus->name_registry, name);
++		if (!entry || !entry->conn ||
++		    !kdbus_conn_policy_see_name(conn, current_cred(), name) ||
++		    (cmd->id != 0 && entry->conn->id != cmd->id)) {
++			/* pretend a name doesn't exist if you cannot see it */
++			ret = -ESRCH;
++			goto exit;
++		}
++
++		owner_conn = kdbus_conn_ref(entry->conn);
++	} else if (cmd->id > 0) {
++		owner_conn = kdbus_bus_find_conn_by_id(bus, cmd->id);
++		if (!owner_conn || !kdbus_conn_policy_see(conn, current_cred(),
++							  owner_conn)) {
++			/* pretend an id doesn't exist if you cannot see it */
++			ret = -ENXIO;
++			goto exit;
++		}
++	} else {
++		ret = -EINVAL;
++		goto exit;
++	}
++
++	info.id = owner_conn->id;
++	info.flags = owner_conn->flags;
++	kdbus_kvec_set(&kvec, &info, sizeof(info), &info.size);
++
++	attach_flags &= atomic64_read(&owner_conn->attach_flags_send);
++
++	conn_meta = kdbus_meta_conn_new();
++	if (IS_ERR(conn_meta)) {
++		ret = PTR_ERR(conn_meta);
++		conn_meta = NULL;
++		goto exit;
++	}
++
++	ret = kdbus_meta_conn_collect(conn_meta, NULL, owner_conn,
++				      attach_flags);
++	if (ret < 0)
++		goto exit;
++
++	ret = kdbus_meta_export_prepare(owner_conn->meta, conn_meta,
++					&attach_flags, &meta_size);
++	if (ret < 0)
++		goto exit;
++
++	slice = kdbus_pool_slice_alloc(conn->pool,
++				       info.size + meta_size, false);
++	if (IS_ERR(slice)) {
++		ret = PTR_ERR(slice);
++		slice = NULL;
++		goto exit;
++	}
++
++	ret = kdbus_meta_export(owner_conn->meta, conn_meta, attach_flags,
++				slice, sizeof(info), &meta_size);
++	if (ret < 0)
++		goto exit;
++
++	info.size += meta_size;
++
++	ret = kdbus_pool_slice_copy_kvec(slice, 0, &kvec, 1, sizeof(info));
++	if (ret < 0)
++		goto exit;
++
++	kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->info_size);
++
++	if (kdbus_member_set_user(&cmd->offset, argp, typeof(*cmd), offset) ||
++	    kdbus_member_set_user(&cmd->info_size, argp,
++				  typeof(*cmd), info_size)) {
++		ret = -EFAULT;
++		goto exit;
++	}
++
++	ret = 0;
++
++exit:
++	up_read(&bus->name_registry->rwlock);
++	kdbus_pool_slice_release(slice);
++	kdbus_meta_conn_unref(conn_meta);
++	kdbus_conn_unref(owner_conn);
++	return kdbus_args_clear(&args, ret);
++}
++
++/**
++ * kdbus_cmd_update() - handle KDBUS_CMD_UPDATE
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_update(struct kdbus_conn *conn, void __user *argp)
++{
++	struct kdbus_bus *bus = conn->ep->bus;
++	struct kdbus_item *item_policy;
++	u64 *item_attach_send = NULL;
++	u64 *item_attach_recv = NULL;
++	struct kdbus_cmd *cmd;
++	u64 attach_send;
++	u64 attach_recv;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_ATTACH_FLAGS_SEND },
++		{ .type = KDBUS_ITEM_ATTACH_FLAGS_RECV },
++		{ .type = KDBUS_ITEM_NAME, .multiple = true },
++		{ .type = KDBUS_ITEM_POLICY_ACCESS, .multiple = true },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	item_attach_send = argv[1].item ? &argv[1].item->data64[0] : NULL;
++	item_attach_recv = argv[2].item ? &argv[2].item->data64[0] : NULL;
++	item_policy = argv[3].item ? : argv[4].item;
++
++	if (item_attach_send) {
++		if (!kdbus_conn_is_ordinary(conn) &&
++		    !kdbus_conn_is_monitor(conn)) {
++			ret = -EOPNOTSUPP;
++			goto exit;
++		}
++
++		ret = kdbus_sanitize_attach_flags(*item_attach_send,
++						  &attach_send);
++		if (ret < 0)
++			goto exit;
++
++		if (bus->attach_flags_req & ~attach_send) {
++			ret = -EINVAL;
++			goto exit;
++		}
++	}
++
++	if (item_attach_recv) {
++		if (!kdbus_conn_is_ordinary(conn) &&
++		    !kdbus_conn_is_monitor(conn) &&
++		    !kdbus_conn_is_activator(conn)) {
++			ret = -EOPNOTSUPP;
++			goto exit;
++		}
++
++		ret = kdbus_sanitize_attach_flags(*item_attach_recv,
++						  &attach_recv);
++		if (ret < 0)
++			goto exit;
++	}
++
++	if (item_policy && !kdbus_conn_is_policy_holder(conn)) {
++		ret = -EOPNOTSUPP;
++		goto exit;
++	}
++
++	/* now that we verified the input, update the connection */
++
++	if (item_policy) {
++		ret = kdbus_policy_set(&conn->ep->bus->policy_db, cmd->items,
++				       KDBUS_ITEMS_SIZE(cmd, items),
++				       1, true, conn);
++		if (ret < 0)
++			goto exit;
++	}
++
++	if (item_attach_send)
++		atomic64_set(&conn->attach_flags_send, attach_send);
++
++	if (item_attach_recv)
++		atomic64_set(&conn->attach_flags_recv, attach_recv);
++
++exit:
++	return kdbus_args_clear(&args, ret);
++}
++
++/**
++ * kdbus_cmd_send() - handle KDBUS_CMD_SEND
++ * @conn:		connection to operate on
++ * @f:			file this command was called on
++ * @argp:		command payload
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_send(struct kdbus_conn *conn, struct file *f, void __user *argp)
++{
++	struct kdbus_cmd_send *cmd;
++	struct kdbus_kmsg *kmsg = NULL;
++	struct file *cancel_fd = NULL;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_CANCEL_FD },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
++				 KDBUS_SEND_SYNC_REPLY,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	if (!kdbus_conn_is_ordinary(conn))
++		return -EOPNOTSUPP;
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	cmd->reply.return_flags = 0;
++	kdbus_pool_publish_empty(conn->pool, &cmd->reply.offset,
++				 &cmd->reply.msg_size);
++
++	if (argv[1].item) {
++		cancel_fd = fget(argv[1].item->fds[0]);
++		if (!cancel_fd) {
++			ret = -EBADF;
++			goto exit;
++		}
++
++		if (!cancel_fd->f_op->poll) {
++			ret = -EINVAL;
++			goto exit;
++		}
++	}
++
++	kmsg = kdbus_kmsg_new_from_cmd(conn, cmd);
++	if (IS_ERR(kmsg)) {
++		ret = PTR_ERR(kmsg);
++		kmsg = NULL;
++		goto exit;
++	}
++
++	if (kmsg->msg.dst_id == KDBUS_DST_ID_BROADCAST) {
++		down_read(&conn->ep->bus->name_registry->rwlock);
++		kdbus_bus_broadcast(conn->ep->bus, conn, kmsg);
++		up_read(&conn->ep->bus->name_registry->rwlock);
++	} else if (cmd->flags & KDBUS_SEND_SYNC_REPLY) {
++		struct kdbus_reply *r;
++		ktime_t exp;
++
++		exp = ns_to_ktime(kmsg->msg.timeout_ns);
++		r = kdbus_conn_call(conn, kmsg, exp);
++		if (IS_ERR(r)) {
++			ret = PTR_ERR(r);
++			goto exit;
++		}
++
++		ret = kdbus_conn_wait_reply(conn, cmd, f, cancel_fd, r, exp);
++		kdbus_reply_unref(r);
++		if (ret < 0)
++			goto exit;
++	} else if ((kmsg->msg.flags & KDBUS_MSG_EXPECT_REPLY) ||
++		   kmsg->msg.cookie_reply == 0) {
++		ret = kdbus_conn_unicast(conn, kmsg);
++		if (ret < 0)
++			goto exit;
++	} else {
++		ret = kdbus_conn_reply(conn, kmsg);
++		if (ret < 0)
++			goto exit;
++	}
++
++	if (kdbus_member_set_user(&cmd->reply, argp, typeof(*cmd), reply))
++		ret = -EFAULT;
++
++exit:
++	if (cancel_fd)
++		fput(cancel_fd);
++	kdbus_kmsg_free(kmsg);
++	return kdbus_args_clear(&args, ret);
++}
++
++/**
++ * kdbus_cmd_recv() - handle KDBUS_CMD_RECV
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_recv(struct kdbus_conn *conn, void __user *argp)
++{
++	struct kdbus_queue_entry *entry;
++	struct kdbus_cmd_recv *cmd;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
++				 KDBUS_RECV_PEEK |
++				 KDBUS_RECV_DROP |
++				 KDBUS_RECV_USE_PRIORITY,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	if (!kdbus_conn_is_ordinary(conn) &&
++	    !kdbus_conn_is_monitor(conn) &&
++	    !kdbus_conn_is_activator(conn))
++		return -EOPNOTSUPP;
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	cmd->dropped_msgs = 0;
++	cmd->msg.return_flags = 0;
++	kdbus_pool_publish_empty(conn->pool, &cmd->msg.offset,
++				 &cmd->msg.msg_size);
++
++	/* DROP+priority is not realiably, so prevent it */
++	if ((cmd->flags & KDBUS_RECV_DROP) &&
++	    (cmd->flags & KDBUS_RECV_USE_PRIORITY)) {
++		ret = -EINVAL;
++		goto exit;
++	}
++
++	mutex_lock(&conn->lock);
++
++	entry = kdbus_queue_peek(&conn->queue, cmd->priority,
++				 cmd->flags & KDBUS_RECV_USE_PRIORITY);
++	if (!entry) {
++		mutex_unlock(&conn->lock);
++		ret = -EAGAIN;
++	} else if (cmd->flags & KDBUS_RECV_DROP) {
++		struct kdbus_reply *reply = kdbus_reply_ref(entry->reply);
++
++		kdbus_queue_entry_free(entry);
++
++		mutex_unlock(&conn->lock);
++
++		if (reply) {
++			mutex_lock(&reply->reply_dst->lock);
++			if (!list_empty(&reply->entry)) {
++				kdbus_reply_unlink(reply);
++				if (reply->sync)
++					kdbus_sync_reply_wakeup(reply, -EPIPE);
++				else
++					kdbus_notify_reply_dead(conn->ep->bus,
++							reply->reply_dst->id,
++							reply->cookie);
++			}
++			mutex_unlock(&reply->reply_dst->lock);
++			kdbus_notify_flush(conn->ep->bus);
++		}
++
++		kdbus_reply_unref(reply);
++	} else {
++		bool install_fds;
++
++		/*
++		 * PEEK just returns the location of the next message. Do not
++		 * install FDs nor memfds nor anything else. The only
++		 * information of interest should be the message header and
++		 * metadata. Any FD numbers in the payload is undefined for
++		 * PEEK'ed messages.
++		 * Also make sure to never install fds into a connection that
++		 * has refused to receive any. Ordinary connections will not get
++		 * messages with FDs queued (the receiver will get -ECOMM), but
++		 * eavesdroppers might.
++		 */
++		install_fds = (conn->flags & KDBUS_HELLO_ACCEPT_FD) &&
++			      !(cmd->flags & KDBUS_RECV_PEEK);
++
++		ret = kdbus_queue_entry_install(entry,
++						&cmd->msg.return_flags,
++						install_fds);
++		if (ret < 0) {
++			mutex_unlock(&conn->lock);
++			goto exit;
++		}
++
++		kdbus_pool_slice_publish(entry->slice, &cmd->msg.offset,
++					 &cmd->msg.msg_size);
++
++		if (!(cmd->flags & KDBUS_RECV_PEEK))
++			kdbus_queue_entry_free(entry);
++
++		mutex_unlock(&conn->lock);
++	}
++
++	cmd->dropped_msgs = atomic_xchg(&conn->lost_count, 0);
++	if (cmd->dropped_msgs > 0)
++		cmd->return_flags |= KDBUS_RECV_RETURN_DROPPED_MSGS;
++
++	if (kdbus_member_set_user(&cmd->msg, argp, typeof(*cmd), msg) ||
++	    kdbus_member_set_user(&cmd->dropped_msgs, argp, typeof(*cmd),
++				  dropped_msgs))
++		ret = -EFAULT;
++
++exit:
++	return kdbus_args_clear(&args, ret);
++}
++
++/**
++ * kdbus_cmd_free() - handle KDBUS_CMD_FREE
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_free(struct kdbus_conn *conn, void __user *argp)
++{
++	struct kdbus_cmd_free *cmd;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	if (!kdbus_conn_is_ordinary(conn) &&
++	    !kdbus_conn_is_monitor(conn) &&
++	    !kdbus_conn_is_activator(conn))
++		return -EOPNOTSUPP;
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	ret = kdbus_pool_release_offset(conn->pool, cmd->offset);
++
++	return kdbus_args_clear(&args, ret);
++}
+diff --git a/ipc/kdbus/connection.h b/ipc/kdbus/connection.h
+new file mode 100644
+index 0000000..d1ffe90
+--- /dev/null
++++ b/ipc/kdbus/connection.h
+@@ -0,0 +1,257 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_CONNECTION_H
++#define __KDBUS_CONNECTION_H
++
++#include <linux/atomic.h>
++#include <linux/kref.h>
++#include <linux/lockdep.h>
++#include <linux/path.h>
++
++#include "limits.h"
++#include "metadata.h"
++#include "pool.h"
++#include "queue.h"
++#include "util.h"
++
++#define KDBUS_HELLO_SPECIAL_CONN	(KDBUS_HELLO_ACTIVATOR | \
++					 KDBUS_HELLO_POLICY_HOLDER | \
++					 KDBUS_HELLO_MONITOR)
++
++struct kdbus_quota;
++struct kdbus_kmsg;
++
++/**
++ * struct kdbus_conn - connection to a bus
++ * @kref:		Reference count
++ * @active:		Active references to the connection
++ * @id:			Connection ID
++ * @flags:		KDBUS_HELLO_* flags
++ * @attach_flags_send:	KDBUS_ATTACH_* flags for sending
++ * @attach_flags_recv:	KDBUS_ATTACH_* flags for receiving
++ * @description:	Human-readable connection description, used for
++ *			debugging. This field is only set when the
++ *			connection is created.
++ * @ep:			The endpoint this connection belongs to
++ * @lock:		Connection data lock
++ * @hentry:		Entry in ID <-> connection map
++ * @ep_entry:		Entry in endpoint
++ * @monitor_entry:	Entry in monitor, if the connection is a monitor
++ * @reply_list:		List of connections this connection should
++ *			reply to
++ * @work:		Delayed work to handle timeouts
++ *			activator for
++ * @match_db:		Subscription filter to broadcast messages
++ * @meta:		Active connection creator's metadata/credentials,
++ *			either from the handle or from HELLO
++ * @pool:		The user's buffer to receive messages
++ * @user:		Owner of the connection
++ * @cred:		The credentials of the connection at creation time
++ * @name_count:		Number of owned well-known names
++ * @request_count:	Number of pending requests issued by this
++ *			connection that are waiting for replies from
++ *			other peers
++ * @lost_count:		Number of lost broadcast messages
++ * @wait:		Wake up this endpoint
++ * @queue:		The message queue associated with this connection
++ * @quota:		Array of per-user quota indexed by user->id
++ * @n_quota:		Number of elements in quota array
++ * @activator_of:	Well-known name entry this connection acts as an
++ * @names_list:		List of well-known names
++ * @names_queue_list:	Well-known names this connection waits for
++ * @privileged:		Whether this connection is privileged on the bus
++ * @faked_meta:		Whether the metadata was faked on HELLO
++ */
++struct kdbus_conn {
++	struct kref kref;
++	atomic_t active;
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++	struct lockdep_map dep_map;
++#endif
++	u64 id;
++	u64 flags;
++	atomic64_t attach_flags_send;
++	atomic64_t attach_flags_recv;
++	const char *description;
++	struct kdbus_ep *ep;
++	struct mutex lock;
++	struct hlist_node hentry;
++	struct list_head ep_entry;
++	struct list_head monitor_entry;
++	struct list_head reply_list;
++	struct delayed_work work;
++	struct kdbus_match_db *match_db;
++	struct kdbus_meta_proc *meta;
++	struct kdbus_pool *pool;
++	struct kdbus_user *user;
++	const struct cred *cred;
++	atomic_t name_count;
++	atomic_t request_count;
++	atomic_t lost_count;
++	wait_queue_head_t wait;
++	struct kdbus_queue queue;
++
++	struct kdbus_quota *quota;
++	unsigned int n_quota;
++
++	/* protected by registry->rwlock */
++	struct kdbus_name_entry *activator_of;
++	struct list_head names_list;
++	struct list_head names_queue_list;
++
++	bool privileged:1;
++	bool faked_meta:1;
++};
++
++struct kdbus_conn *kdbus_conn_ref(struct kdbus_conn *conn);
++struct kdbus_conn *kdbus_conn_unref(struct kdbus_conn *conn);
++bool kdbus_conn_active(const struct kdbus_conn *conn);
++int kdbus_conn_acquire(struct kdbus_conn *conn);
++void kdbus_conn_release(struct kdbus_conn *conn);
++int kdbus_conn_disconnect(struct kdbus_conn *conn, bool ensure_queue_empty);
++bool kdbus_conn_has_name(struct kdbus_conn *conn, const char *name);
++int kdbus_conn_quota_inc(struct kdbus_conn *c, struct kdbus_user *u,
++			 size_t memory, size_t fds);
++void kdbus_conn_quota_dec(struct kdbus_conn *c, struct kdbus_user *u,
++			  size_t memory, size_t fds);
++void kdbus_conn_lost_message(struct kdbus_conn *c);
++int kdbus_conn_entry_insert(struct kdbus_conn *conn_src,
++			    struct kdbus_conn *conn_dst,
++			    const struct kdbus_kmsg *kmsg,
++			    struct kdbus_reply *reply);
++void kdbus_conn_move_messages(struct kdbus_conn *conn_dst,
++			      struct kdbus_conn *conn_src,
++			      u64 name_id);
++
++/* policy */
++bool kdbus_conn_policy_own_name(struct kdbus_conn *conn,
++				const struct cred *conn_creds,
++				const char *name);
++bool kdbus_conn_policy_talk(struct kdbus_conn *conn,
++			    const struct cred *conn_creds,
++			    struct kdbus_conn *to);
++bool kdbus_conn_policy_see_name_unlocked(struct kdbus_conn *conn,
++					 const struct cred *curr_creds,
++					 const char *name);
++bool kdbus_conn_policy_see_notification(struct kdbus_conn *conn,
++					const struct cred *curr_creds,
++					const struct kdbus_kmsg *kmsg);
++
++/* command dispatcher */
++struct kdbus_conn *kdbus_cmd_hello(struct kdbus_ep *ep, bool privileged,
++				   void __user *argp);
++int kdbus_cmd_byebye_unlocked(struct kdbus_conn *conn, void __user *argp);
++int kdbus_cmd_conn_info(struct kdbus_conn *conn, void __user *argp);
++int kdbus_cmd_update(struct kdbus_conn *conn, void __user *argp);
++int kdbus_cmd_send(struct kdbus_conn *conn, struct file *f, void __user *argp);
++int kdbus_cmd_recv(struct kdbus_conn *conn, void __user *argp);
++int kdbus_cmd_free(struct kdbus_conn *conn, void __user *argp);
++
++/**
++ * kdbus_conn_is_ordinary() - Check if connection is ordinary
++ * @conn:		The connection to check
++ *
++ * Return: Non-zero if the connection is an ordinary connection
++ */
++static inline int kdbus_conn_is_ordinary(const struct kdbus_conn *conn)
++{
++	return !(conn->flags & KDBUS_HELLO_SPECIAL_CONN);
++}
++
++/**
++ * kdbus_conn_is_activator() - Check if connection is an activator
++ * @conn:		The connection to check
++ *
++ * Return: Non-zero if the connection is an activator
++ */
++static inline int kdbus_conn_is_activator(const struct kdbus_conn *conn)
++{
++	return conn->flags & KDBUS_HELLO_ACTIVATOR;
++}
++
++/**
++ * kdbus_conn_is_policy_holder() - Check if connection is a policy holder
++ * @conn:		The connection to check
++ *
++ * Return: Non-zero if the connection is a policy holder
++ */
++static inline int kdbus_conn_is_policy_holder(const struct kdbus_conn *conn)
++{
++	return conn->flags & KDBUS_HELLO_POLICY_HOLDER;
++}
++
++/**
++ * kdbus_conn_is_monitor() - Check if connection is a monitor
++ * @conn:		The connection to check
++ *
++ * Return: Non-zero if the connection is a monitor
++ */
++static inline int kdbus_conn_is_monitor(const struct kdbus_conn *conn)
++{
++	return conn->flags & KDBUS_HELLO_MONITOR;
++}
++
++/**
++ * kdbus_conn_lock2() - Lock two connections
++ * @a:		connection A to lock or NULL
++ * @b:		connection B to lock or NULL
++ *
++ * Lock two connections at once. As we need to have a stable locking order, we
++ * always lock the connection with lower memory address first.
++ */
++static inline void kdbus_conn_lock2(struct kdbus_conn *a, struct kdbus_conn *b)
++{
++	if (a < b) {
++		if (a)
++			mutex_lock(&a->lock);
++		if (b && b != a)
++			mutex_lock_nested(&b->lock, !!a);
++	} else {
++		if (b)
++			mutex_lock(&b->lock);
++		if (a && a != b)
++			mutex_lock_nested(&a->lock, !!b);
++	}
++}
++
++/**
++ * kdbus_conn_unlock2() - Unlock two connections
++ * @a:		connection A to unlock or NULL
++ * @b:		connection B to unlock or NULL
++ *
++ * Unlock two connections at once. See kdbus_conn_lock2().
++ */
++static inline void kdbus_conn_unlock2(struct kdbus_conn *a,
++				      struct kdbus_conn *b)
++{
++	if (a)
++		mutex_unlock(&a->lock);
++	if (b && b != a)
++		mutex_unlock(&b->lock);
++}
++
++/**
++ * kdbus_conn_assert_active() - lockdep assert on active lock
++ * @conn:	connection that shall be active
++ *
++ * This verifies via lockdep that the caller holds an active reference to the
++ * given connection.
++ */
++static inline void kdbus_conn_assert_active(struct kdbus_conn *conn)
++{
++	lockdep_assert_held(conn);
++}
++
++#endif
+diff --git a/ipc/kdbus/domain.c b/ipc/kdbus/domain.c
+new file mode 100644
+index 0000000..ac9f760
+--- /dev/null
++++ b/ipc/kdbus/domain.c
+@@ -0,0 +1,296 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/fs.h>
++#include <linux/idr.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++
++#include "bus.h"
++#include "domain.h"
++#include "handle.h"
++#include "item.h"
++#include "limits.h"
++#include "util.h"
++
++static void kdbus_domain_control_free(struct kdbus_node *node)
++{
++	kfree(node);
++}
++
++static struct kdbus_node *kdbus_domain_control_new(struct kdbus_domain *domain,
++						   unsigned int access)
++{
++	struct kdbus_node *node;
++	int ret;
++
++	node = kzalloc(sizeof(*node), GFP_KERNEL);
++	if (!node)
++		return ERR_PTR(-ENOMEM);
++
++	kdbus_node_init(node, KDBUS_NODE_CONTROL);
++
++	node->free_cb = kdbus_domain_control_free;
++	node->mode = domain->node.mode;
++	node->mode = S_IRUSR | S_IWUSR;
++	if (access & (KDBUS_MAKE_ACCESS_GROUP | KDBUS_MAKE_ACCESS_WORLD))
++		node->mode |= S_IRGRP | S_IWGRP;
++	if (access & KDBUS_MAKE_ACCESS_WORLD)
++		node->mode |= S_IROTH | S_IWOTH;
++
++	ret = kdbus_node_link(node, &domain->node, "control");
++	if (ret < 0)
++		goto exit_free;
++
++	return node;
++
++exit_free:
++	kdbus_node_deactivate(node);
++	kdbus_node_unref(node);
++	return ERR_PTR(ret);
++}
++
++static void kdbus_domain_free(struct kdbus_node *node)
++{
++	struct kdbus_domain *domain =
++		container_of(node, struct kdbus_domain, node);
++
++	put_user_ns(domain->user_namespace);
++	ida_destroy(&domain->user_ida);
++	idr_destroy(&domain->user_idr);
++	kfree(domain);
++}
++
++/**
++ * kdbus_domain_new() - create a new domain
++ * @access:		The access mode for this node (KDBUS_MAKE_ACCESS_*)
++ *
++ * Return: a new kdbus_domain on success, ERR_PTR on failure
++ */
++struct kdbus_domain *kdbus_domain_new(unsigned int access)
++{
++	struct kdbus_domain *d;
++	int ret;
++
++	d = kzalloc(sizeof(*d), GFP_KERNEL);
++	if (!d)
++		return ERR_PTR(-ENOMEM);
++
++	kdbus_node_init(&d->node, KDBUS_NODE_DOMAIN);
++
++	d->node.free_cb = kdbus_domain_free;
++	d->node.mode = S_IRUSR | S_IXUSR;
++	if (access & (KDBUS_MAKE_ACCESS_GROUP | KDBUS_MAKE_ACCESS_WORLD))
++		d->node.mode |= S_IRGRP | S_IXGRP;
++	if (access & KDBUS_MAKE_ACCESS_WORLD)
++		d->node.mode |= S_IROTH | S_IXOTH;
++
++	mutex_init(&d->lock);
++	idr_init(&d->user_idr);
++	ida_init(&d->user_ida);
++
++	/* Pin user namespace so we can guarantee domain-unique bus * names. */
++	d->user_namespace = get_user_ns(current_user_ns());
++
++	ret = kdbus_node_link(&d->node, NULL, NULL);
++	if (ret < 0)
++		goto exit_unref;
++
++	return d;
++
++exit_unref:
++	kdbus_node_deactivate(&d->node);
++	kdbus_node_unref(&d->node);
++	return ERR_PTR(ret);
++}
++
++/**
++ * kdbus_domain_ref() - take a domain reference
++ * @domain:		Domain
++ *
++ * Return: the domain itself
++ */
++struct kdbus_domain *kdbus_domain_ref(struct kdbus_domain *domain)
++{
++	if (domain)
++		kdbus_node_ref(&domain->node);
++	return domain;
++}
++
++/**
++ * kdbus_domain_unref() - drop a domain reference
++ * @domain:		Domain
++ *
++ * When the last reference is dropped, the domain internal structure
++ * is freed.
++ *
++ * Return: NULL
++ */
++struct kdbus_domain *kdbus_domain_unref(struct kdbus_domain *domain)
++{
++	if (domain)
++		kdbus_node_unref(&domain->node);
++	return NULL;
++}
++
++/**
++ * kdbus_domain_populate() - populate static domain nodes
++ * @domain:	domain to populate
++ * @access:	KDBUS_MAKE_ACCESS_* access restrictions for new nodes
++ *
++ * Allocate and activate static sub-nodes of the given domain. This will fail if
++ * you call it on a non-active node or if the domain was already populated.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_domain_populate(struct kdbus_domain *domain, unsigned int access)
++{
++	struct kdbus_node *control;
++
++	/*
++	 * Create a control-node for this domain. We drop our own reference
++	 * immediately, effectively causing the node to be deactivated and
++	 * released when the parent domain is.
++	 */
++	control = kdbus_domain_control_new(domain, access);
++	if (IS_ERR(control))
++		return PTR_ERR(control);
++
++	kdbus_node_activate(control);
++	kdbus_node_unref(control);
++	return 0;
++}
++
++/**
++ * kdbus_user_lookup() - lookup a kdbus_user object
++ * @domain:		domain of the user
++ * @uid:		uid of the user; INVALID_UID for an anon user
++ *
++ * Lookup the kdbus user accounting object for the given domain. If INVALID_UID
++ * is passed, a new anonymous user is created which is private to the caller.
++ *
++ * Return: The user object is returned, ERR_PTR on failure.
++ */
++struct kdbus_user *kdbus_user_lookup(struct kdbus_domain *domain, kuid_t uid)
++{
++	struct kdbus_user *u = NULL, *old = NULL;
++	int ret;
++
++	mutex_lock(&domain->lock);
++
++	if (uid_valid(uid)) {
++		old = idr_find(&domain->user_idr, __kuid_val(uid));
++		/*
++		 * If the object is about to be destroyed, ignore it and
++		 * replace the slot in the IDR later on.
++		 */
++		if (old && kref_get_unless_zero(&old->kref)) {
++			mutex_unlock(&domain->lock);
++			return old;
++		}
++	}
++
++	u = kzalloc(sizeof(*u), GFP_KERNEL);
++	if (!u) {
++		ret = -ENOMEM;
++		goto exit;
++	}
++
++	kref_init(&u->kref);
++	u->domain = kdbus_domain_ref(domain);
++	u->uid = uid;
++	atomic_set(&u->buses, 0);
++	atomic_set(&u->connections, 0);
++
++	if (uid_valid(uid)) {
++		if (old) {
++			idr_replace(&domain->user_idr, u, __kuid_val(uid));
++			old->uid = INVALID_UID; /* mark old as removed */
++		} else {
++			ret = idr_alloc(&domain->user_idr, u, __kuid_val(uid),
++					__kuid_val(uid) + 1, GFP_KERNEL);
++			if (ret < 0)
++				goto exit;
++		}
++	}
++
++	/*
++	 * Allocate the smallest possible index for this user; used
++	 * in arrays for accounting user quota in receiver queues.
++	 */
++	ret = ida_simple_get(&domain->user_ida, 1, 0, GFP_KERNEL);
++	if (ret < 0)
++		goto exit;
++
++	u->id = ret;
++	mutex_unlock(&domain->lock);
++	return u;
++
++exit:
++	if (u) {
++		if (uid_valid(u->uid))
++			idr_remove(&domain->user_idr, __kuid_val(u->uid));
++		kdbus_domain_unref(u->domain);
++		kfree(u);
++	}
++	mutex_unlock(&domain->lock);
++	return ERR_PTR(ret);
++}
++
++static void __kdbus_user_free(struct kref *kref)
++{
++	struct kdbus_user *user = container_of(kref, struct kdbus_user, kref);
++
++	WARN_ON(atomic_read(&user->buses) > 0);
++	WARN_ON(atomic_read(&user->connections) > 0);
++
++	mutex_lock(&user->domain->lock);
++	ida_simple_remove(&user->domain->user_ida, user->id);
++	if (uid_valid(user->uid))
++		idr_remove(&user->domain->user_idr, __kuid_val(user->uid));
++	mutex_unlock(&user->domain->lock);
++
++	kdbus_domain_unref(user->domain);
++	kfree(user);
++}
++
++/**
++ * kdbus_user_ref() - take a user reference
++ * @u:		User
++ *
++ * Return: @u is returned
++ */
++struct kdbus_user *kdbus_user_ref(struct kdbus_user *u)
++{
++	if (u)
++		kref_get(&u->kref);
++	return u;
++}
++
++/**
++ * kdbus_user_unref() - drop a user reference
++ * @u:		User
++ *
++ * Return: NULL
++ */
++struct kdbus_user *kdbus_user_unref(struct kdbus_user *u)
++{
++	if (u)
++		kref_put(&u->kref, __kdbus_user_free);
++	return NULL;
++}
+diff --git a/ipc/kdbus/domain.h b/ipc/kdbus/domain.h
+new file mode 100644
+index 0000000..447a2bd
+--- /dev/null
++++ b/ipc/kdbus/domain.h
+@@ -0,0 +1,77 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_DOMAIN_H
++#define __KDBUS_DOMAIN_H
++
++#include <linux/fs.h>
++#include <linux/idr.h>
++#include <linux/kref.h>
++#include <linux/user_namespace.h>
++
++#include "node.h"
++
++/**
++ * struct kdbus_domain - domain for buses
++ * @node:		Underlying API node
++ * @lock:		Domain data lock
++ * @last_id:		Last used object id
++ * @user_idr:		Set of all users indexed by UID
++ * @user_ida:		Set of all users to compute small indices
++ * @user_namespace:	User namespace, pinned at creation time
++ * @dentry:		Root dentry of VFS mount (don't use outside of kdbusfs)
++ */
++struct kdbus_domain {
++	struct kdbus_node node;
++	struct mutex lock;
++	atomic64_t last_id;
++	struct idr user_idr;
++	struct ida user_ida;
++	struct user_namespace *user_namespace;
++	struct dentry *dentry;
++};
++
++/**
++ * struct kdbus_user - resource accounting for users
++ * @kref:		Reference counter
++ * @domain:		Domain of the user
++ * @id:			Index of this user
++ * @uid:		UID of the user
++ * @buses:		Number of buses the user has created
++ * @connections:	Number of connections the user has created
++ */
++struct kdbus_user {
++	struct kref kref;
++	struct kdbus_domain *domain;
++	unsigned int id;
++	kuid_t uid;
++	atomic_t buses;
++	atomic_t connections;
++};
++
++#define kdbus_domain_from_node(_node) \
++	container_of((_node), struct kdbus_domain, node)
++
++struct kdbus_domain *kdbus_domain_new(unsigned int access);
++struct kdbus_domain *kdbus_domain_ref(struct kdbus_domain *domain);
++struct kdbus_domain *kdbus_domain_unref(struct kdbus_domain *domain);
++int kdbus_domain_populate(struct kdbus_domain *domain, unsigned int access);
++
++#define KDBUS_USER_KERNEL_ID 0 /* ID 0 is reserved for kernel accounting */
++
++struct kdbus_user *kdbus_user_lookup(struct kdbus_domain *domain, kuid_t uid);
++struct kdbus_user *kdbus_user_ref(struct kdbus_user *u);
++struct kdbus_user *kdbus_user_unref(struct kdbus_user *u);
++
++#endif
+diff --git a/ipc/kdbus/endpoint.c b/ipc/kdbus/endpoint.c
+new file mode 100644
+index 0000000..174d274
+--- /dev/null
++++ b/ipc/kdbus/endpoint.c
+@@ -0,0 +1,275 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/fs.h>
++#include <linux/idr.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/uio.h>
++
++#include "bus.h"
++#include "connection.h"
++#include "domain.h"
++#include "endpoint.h"
++#include "handle.h"
++#include "item.h"
++#include "message.h"
++#include "policy.h"
++
++static void kdbus_ep_free(struct kdbus_node *node)
++{
++	struct kdbus_ep *ep = container_of(node, struct kdbus_ep, node);
++
++	WARN_ON(!list_empty(&ep->conn_list));
++
++	kdbus_policy_db_clear(&ep->policy_db);
++	kdbus_bus_unref(ep->bus);
++	kdbus_user_unref(ep->user);
++	kfree(ep);
++}
++
++static void kdbus_ep_release(struct kdbus_node *node, bool was_active)
++{
++	struct kdbus_ep *ep = container_of(node, struct kdbus_ep, node);
++
++	/* disconnect all connections to this endpoint */
++	for (;;) {
++		struct kdbus_conn *conn;
++
++		mutex_lock(&ep->lock);
++		conn = list_first_entry_or_null(&ep->conn_list,
++						struct kdbus_conn,
++						ep_entry);
++		if (!conn) {
++			mutex_unlock(&ep->lock);
++			break;
++		}
++
++		/* take reference, release lock, disconnect without lock */
++		kdbus_conn_ref(conn);
++		mutex_unlock(&ep->lock);
++
++		kdbus_conn_disconnect(conn, false);
++		kdbus_conn_unref(conn);
++	}
++}
++
++/**
++ * kdbus_ep_new() - create a new endpoint
++ * @bus:		The bus this endpoint will be created for
++ * @name:		The name of the endpoint
++ * @access:		The access flags for this node (KDBUS_MAKE_ACCESS_*)
++ * @uid:		The uid of the node
++ * @gid:		The gid of the node
++ * @is_custom:		Whether this is a custom endpoint
++ *
++ * This function will create a new enpoint with the given
++ * name and properties for a given bus.
++ *
++ * Return: a new kdbus_ep on success, ERR_PTR on failure.
++ */
++struct kdbus_ep *kdbus_ep_new(struct kdbus_bus *bus, const char *name,
++			      unsigned int access, kuid_t uid, kgid_t gid,
++			      bool is_custom)
++{
++	struct kdbus_ep *e;
++	int ret;
++
++	/*
++	 * Validate only custom endpoints names, default endpoints
++	 * with a "bus" name are created when the bus is created
++	 */
++	if (is_custom) {
++		ret = kdbus_verify_uid_prefix(name, bus->domain->user_namespace,
++					      uid);
++		if (ret < 0)
++			return ERR_PTR(ret);
++	}
++
++	e = kzalloc(sizeof(*e), GFP_KERNEL);
++	if (!e)
++		return ERR_PTR(-ENOMEM);
++
++	kdbus_node_init(&e->node, KDBUS_NODE_ENDPOINT);
++
++	e->node.free_cb = kdbus_ep_free;
++	e->node.release_cb = kdbus_ep_release;
++	e->node.uid = uid;
++	e->node.gid = gid;
++	e->node.mode = S_IRUSR | S_IWUSR;
++	if (access & (KDBUS_MAKE_ACCESS_GROUP | KDBUS_MAKE_ACCESS_WORLD))
++		e->node.mode |= S_IRGRP | S_IWGRP;
++	if (access & KDBUS_MAKE_ACCESS_WORLD)
++		e->node.mode |= S_IROTH | S_IWOTH;
++
++	mutex_init(&e->lock);
++	INIT_LIST_HEAD(&e->conn_list);
++	kdbus_policy_db_init(&e->policy_db);
++	e->bus = kdbus_bus_ref(bus);
++
++	ret = kdbus_node_link(&e->node, &bus->node, name);
++	if (ret < 0)
++		goto exit_unref;
++
++	/*
++	 * Transactions on custom endpoints are never accounted on the global
++	 * user limits. Instead, for each custom endpoint, we create a custom,
++	 * unique user, which all transactions are accounted on. Regardless of
++	 * the user using that endpoint, it is always accounted on the same
++	 * user-object. This budget is not shared with ordinary users on
++	 * non-custom endpoints.
++	 */
++	if (is_custom) {
++		e->user = kdbus_user_lookup(bus->domain, INVALID_UID);
++		if (IS_ERR(e->user)) {
++			ret = PTR_ERR(e->user);
++			e->user = NULL;
++			goto exit_unref;
++		}
++	}
++
++	return e;
++
++exit_unref:
++	kdbus_node_deactivate(&e->node);
++	kdbus_node_unref(&e->node);
++	return ERR_PTR(ret);
++}
++
++/**
++ * kdbus_ep_ref() - increase the reference counter of a kdbus_ep
++ * @ep:			The endpoint to reference
++ *
++ * Every user of an endpoint, except for its creator, must add a reference to
++ * the kdbus_ep instance using this function.
++ *
++ * Return: the ep itself
++ */
++struct kdbus_ep *kdbus_ep_ref(struct kdbus_ep *ep)
++{
++	if (ep)
++		kdbus_node_ref(&ep->node);
++	return ep;
++}
++
++/**
++ * kdbus_ep_unref() - decrease the reference counter of a kdbus_ep
++ * @ep:		The ep to unref
++ *
++ * Release a reference. If the reference count drops to 0, the ep will be
++ * freed.
++ *
++ * Return: NULL
++ */
++struct kdbus_ep *kdbus_ep_unref(struct kdbus_ep *ep)
++{
++	if (ep)
++		kdbus_node_unref(&ep->node);
++	return NULL;
++}
++
++/**
++ * kdbus_cmd_ep_make() - handle KDBUS_CMD_ENDPOINT_MAKE
++ * @bus:		bus to operate on
++ * @argp:		command payload
++ *
++ * Return: Newly created endpoint on success, ERR_PTR on failure.
++ */
++struct kdbus_ep *kdbus_cmd_ep_make(struct kdbus_bus *bus, void __user *argp)
++{
++	const char *item_make_name;
++	struct kdbus_ep *ep = NULL;
++	struct kdbus_cmd *cmd;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_MAKE_NAME, .mandatory = true },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
++				 KDBUS_MAKE_ACCESS_GROUP |
++				 KDBUS_MAKE_ACCESS_WORLD,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret < 0)
++		return ERR_PTR(ret);
++	if (ret > 0)
++		return NULL;
++
++	item_make_name = argv[1].item->str;
++
++	ep = kdbus_ep_new(bus, item_make_name, cmd->flags,
++			  current_euid(), current_egid(), true);
++	if (IS_ERR(ep)) {
++		ret = PTR_ERR(ep);
++		ep = NULL;
++		goto exit;
++	}
++
++	if (!kdbus_node_activate(&ep->node)) {
++		ret = -ESHUTDOWN;
++		goto exit;
++	}
++
++exit:
++	ret = kdbus_args_clear(&args, ret);
++	if (ret < 0) {
++		if (ep) {
++			kdbus_node_deactivate(&ep->node);
++			kdbus_ep_unref(ep);
++		}
++		return ERR_PTR(ret);
++	}
++	return ep;
++}
++
++/**
++ * kdbus_cmd_ep_update() - handle KDBUS_CMD_ENDPOINT_UPDATE
++ * @ep:			endpoint to operate on
++ * @argp:		command payload
++ *
++ * Return: Newly created endpoint on success, ERR_PTR on failure.
++ */
++int kdbus_cmd_ep_update(struct kdbus_ep *ep, void __user *argp)
++{
++	struct kdbus_cmd *cmd;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_NAME, .multiple = true },
++		{ .type = KDBUS_ITEM_POLICY_ACCESS, .multiple = true },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	ret = kdbus_policy_set(&ep->policy_db, args.items, args.items_size,
++			       0, true, ep);
++	return kdbus_args_clear(&args, ret);
++}
+diff --git a/ipc/kdbus/endpoint.h b/ipc/kdbus/endpoint.h
+new file mode 100644
+index 0000000..d31954b
+--- /dev/null
++++ b/ipc/kdbus/endpoint.h
+@@ -0,0 +1,67 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_ENDPOINT_H
++#define __KDBUS_ENDPOINT_H
++
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <linux/uidgid.h>
++#include "node.h"
++#include "policy.h"
++
++struct kdbus_bus;
++struct kdbus_user;
++
++/**
++ * struct kdbus_ep - enpoint to access a bus
++ * @node:		The kdbus node
++ * @lock:		Endpoint data lock
++ * @bus:		Bus behind this endpoint
++ * @user:		Custom enpoints account against an anonymous user
++ * @policy_db:		Uploaded policy
++ * @conn_list:		Connections of this endpoint
++ *
++ * An enpoint offers access to a bus; the default endpoint node name is "bus".
++ * Additional custom endpoints to the same bus can be created and they can
++ * carry their own policies/filters.
++ */
++struct kdbus_ep {
++	struct kdbus_node node;
++	struct mutex lock;
++
++	/* static */
++	struct kdbus_bus *bus;
++	struct kdbus_user *user;
++
++	/* protected by own locks */
++	struct kdbus_policy_db policy_db;
++
++	/* protected by ep->lock */
++	struct list_head conn_list;
++};
++
++#define kdbus_ep_from_node(_node) \
++	container_of((_node), struct kdbus_ep, node)
++
++struct kdbus_ep *kdbus_ep_new(struct kdbus_bus *bus, const char *name,
++			      unsigned int access, kuid_t uid, kgid_t gid,
++			      bool policy);
++struct kdbus_ep *kdbus_ep_ref(struct kdbus_ep *ep);
++struct kdbus_ep *kdbus_ep_unref(struct kdbus_ep *ep);
++
++struct kdbus_ep *kdbus_cmd_ep_make(struct kdbus_bus *bus, void __user *argp);
++int kdbus_cmd_ep_update(struct kdbus_ep *ep, void __user *argp);
++
++#endif
+diff --git a/ipc/kdbus/fs.c b/ipc/kdbus/fs.c
+new file mode 100644
+index 0000000..d01f33b
+--- /dev/null
++++ b/ipc/kdbus/fs.c
+@@ -0,0 +1,510 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/dcache.h>
++#include <linux/fs.h>
++#include <linux/fsnotify.h>
++#include <linux/init.h>
++#include <linux/ipc_namespace.h>
++#include <linux/magic.h>
++#include <linux/module.h>
++#include <linux/mount.h>
++#include <linux/mutex.h>
++#include <linux/namei.h>
++#include <linux/pagemap.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++
++#include "bus.h"
++#include "domain.h"
++#include "endpoint.h"
++#include "fs.h"
++#include "handle.h"
++#include "node.h"
++
++#define kdbus_node_from_dentry(_dentry) \
++	((struct kdbus_node *)(_dentry)->d_fsdata)
++
++static struct inode *fs_inode_get(struct super_block *sb,
++				  struct kdbus_node *node);
++
++/*
++ * Directory Management
++ */
++
++static inline unsigned char kdbus_dt_type(struct kdbus_node *node)
++{
++	switch (node->type) {
++	case KDBUS_NODE_DOMAIN:
++	case KDBUS_NODE_BUS:
++		return DT_DIR;
++	case KDBUS_NODE_CONTROL:
++	case KDBUS_NODE_ENDPOINT:
++		return DT_REG;
++	}
++
++	return DT_UNKNOWN;
++}
++
++static int fs_dir_fop_iterate(struct file *file, struct dir_context *ctx)
++{
++	struct dentry *dentry = file->f_path.dentry;
++	struct kdbus_node *parent = kdbus_node_from_dentry(dentry);
++	struct kdbus_node *old, *next = file->private_data;
++
++	/*
++	 * kdbusfs directory iterator (modelled after sysfs/kernfs)
++	 * When iterating kdbusfs directories, we iterate all children of the
++	 * parent kdbus_node object. We use ctx->pos to store the hash of the
++	 * child and file->private_data to store a reference to the next node
++	 * object. If ctx->pos is not modified via llseek while you iterate a
++	 * directory, then we use the file->private_data node pointer to
++	 * directly access the next node in the tree.
++	 * However, if you directly seek on the directory, we have to find the
++	 * closest node to that position and cannot use our node pointer. This
++	 * means iterating the rb-tree to find the closest match and start over
++	 * from there.
++	 * Note that hash values are not neccessarily unique. Therefore, llseek
++	 * is not guaranteed to seek to the same node that you got when you
++	 * retrieved the position. Seeking to 0, 1, 2 and >=INT_MAX is safe,
++	 * though. We could use the inode-number as position, but this would
++	 * require another rb-tree for fast access. Kernfs and others already
++	 * ignore those conflicts, so we should be fine, too.
++	 */
++
++	if (!dir_emit_dots(file, ctx))
++		return 0;
++
++	/* acquire @next; if deactivated, or seek detected, find next node */
++	old = next;
++	if (next && ctx->pos == next->hash) {
++		if (kdbus_node_acquire(next))
++			kdbus_node_ref(next);
++		else
++			next = kdbus_node_next_child(parent, next);
++	} else {
++		next = kdbus_node_find_closest(parent, ctx->pos);
++	}
++	kdbus_node_unref(old);
++
++	while (next) {
++		/* emit @next */
++		file->private_data = next;
++		ctx->pos = next->hash;
++
++		kdbus_node_release(next);
++
++		if (!dir_emit(ctx, next->name, strlen(next->name), next->id,
++			      kdbus_dt_type(next)))
++			return 0;
++
++		/* find next node after @next */
++		old = next;
++		next = kdbus_node_next_child(parent, next);
++		kdbus_node_unref(old);
++	}
++
++	file->private_data = NULL;
++	ctx->pos = INT_MAX;
++
++	return 0;
++}
++
++static loff_t fs_dir_fop_llseek(struct file *file, loff_t offset, int whence)
++{
++	struct inode *inode = file_inode(file);
++	loff_t ret;
++
++	/* protect f_off against fop_iterate */
++	mutex_lock(&inode->i_mutex);
++	ret = generic_file_llseek(file, offset, whence);
++	mutex_unlock(&inode->i_mutex);
++
++	return ret;
++}
++
++static int fs_dir_fop_release(struct inode *inode, struct file *file)
++{
++	kdbus_node_unref(file->private_data);
++	return 0;
++}
++
++static const struct file_operations fs_dir_fops = {
++	.read		= generic_read_dir,
++	.iterate	= fs_dir_fop_iterate,
++	.llseek		= fs_dir_fop_llseek,
++	.release	= fs_dir_fop_release,
++};
++
++static struct dentry *fs_dir_iop_lookup(struct inode *dir,
++					struct dentry *dentry,
++					unsigned int flags)
++{
++	struct dentry *dnew = NULL;
++	struct kdbus_node *parent;
++	struct kdbus_node *node;
++	struct inode *inode;
++
++	parent = kdbus_node_from_dentry(dentry->d_parent);
++	if (!kdbus_node_acquire(parent))
++		return NULL;
++
++	/* returns reference to _acquired_ child node */
++	node = kdbus_node_find_child(parent, dentry->d_name.name);
++	if (node) {
++		dentry->d_fsdata = node;
++		inode = fs_inode_get(dir->i_sb, node);
++		if (IS_ERR(inode))
++			dnew = ERR_CAST(inode);
++		else
++			dnew = d_splice_alias(inode, dentry);
++
++		kdbus_node_release(node);
++	}
++
++	kdbus_node_release(parent);
++	return dnew;
++}
++
++static const struct inode_operations fs_dir_iops = {
++	.permission	= generic_permission,
++	.lookup		= fs_dir_iop_lookup,
++};
++
++/*
++ * Inode Management
++ */
++
++static const struct inode_operations fs_inode_iops = {
++	.permission	= generic_permission,
++};
++
++static struct inode *fs_inode_get(struct super_block *sb,
++				  struct kdbus_node *node)
++{
++	struct inode *inode;
++
++	inode = iget_locked(sb, node->id);
++	if (!inode)
++		return ERR_PTR(-ENOMEM);
++	if (!(inode->i_state & I_NEW))
++		return inode;
++
++	inode->i_private = kdbus_node_ref(node);
++	inode->i_mapping->a_ops = &empty_aops;
++	inode->i_mode = node->mode & S_IALLUGO;
++	inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME;
++	inode->i_uid = node->uid;
++	inode->i_gid = node->gid;
++
++	switch (node->type) {
++	case KDBUS_NODE_DOMAIN:
++	case KDBUS_NODE_BUS:
++		inode->i_mode |= S_IFDIR;
++		inode->i_op = &fs_dir_iops;
++		inode->i_fop = &fs_dir_fops;
++		set_nlink(inode, 2);
++		break;
++	case KDBUS_NODE_CONTROL:
++	case KDBUS_NODE_ENDPOINT:
++		inode->i_mode |= S_IFREG;
++		inode->i_op = &fs_inode_iops;
++		inode->i_fop = &kdbus_handle_ops;
++		break;
++	}
++
++	unlock_new_inode(inode);
++
++	return inode;
++}
++
++/*
++ * Superblock Management
++ */
++
++static int fs_super_dop_revalidate(struct dentry *dentry, unsigned int flags)
++{
++	struct kdbus_node *node;
++
++	/* Force lookup on negatives */
++	if (!dentry->d_inode)
++		return 0;
++
++	node = kdbus_node_from_dentry(dentry);
++
++	/* see whether the node has been removed */
++	if (!kdbus_node_is_active(node))
++		return 0;
++
++	return 1;
++}
++
++static void fs_super_dop_release(struct dentry *dentry)
++{
++	kdbus_node_unref(dentry->d_fsdata);
++}
++
++static const struct dentry_operations fs_super_dops = {
++	.d_revalidate	= fs_super_dop_revalidate,
++	.d_release	= fs_super_dop_release,
++};
++
++static void fs_super_sop_evict_inode(struct inode *inode)
++{
++	struct kdbus_node *node = kdbus_node_from_inode(inode);
++
++	truncate_inode_pages_final(&inode->i_data);
++	clear_inode(inode);
++	kdbus_node_unref(node);
++}
++
++static const struct super_operations fs_super_sops = {
++	.statfs		= simple_statfs,
++	.drop_inode	= generic_delete_inode,
++	.evict_inode	= fs_super_sop_evict_inode,
++};
++
++static int fs_super_fill(struct super_block *sb)
++{
++	struct kdbus_domain *domain = sb->s_fs_info;
++	struct inode *inode;
++	int ret;
++
++	sb->s_blocksize = PAGE_CACHE_SIZE;
++	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
++	sb->s_magic = KDBUS_SUPER_MAGIC;
++	sb->s_maxbytes = MAX_LFS_FILESIZE;
++	sb->s_op = &fs_super_sops;
++	sb->s_time_gran = 1;
++
++	inode = fs_inode_get(sb, &domain->node);
++	if (IS_ERR(inode))
++		return PTR_ERR(inode);
++
++	sb->s_root = d_make_root(inode);
++	if (!sb->s_root) {
++		/* d_make_root iput()s the inode on failure */
++		return -ENOMEM;
++	}
++
++	/* sb holds domain reference */
++	sb->s_root->d_fsdata = &domain->node;
++	sb->s_d_op = &fs_super_dops;
++
++	/* sb holds root reference */
++	domain->dentry = sb->s_root;
++
++	if (!kdbus_node_activate(&domain->node))
++		return -ESHUTDOWN;
++
++	ret = kdbus_domain_populate(domain, KDBUS_MAKE_ACCESS_WORLD);
++	if (ret < 0)
++		return ret;
++
++	sb->s_flags |= MS_ACTIVE;
++	return 0;
++}
++
++static void fs_super_kill(struct super_block *sb)
++{
++	struct kdbus_domain *domain = sb->s_fs_info;
++
++	if (domain) {
++		kdbus_node_deactivate(&domain->node);
++		domain->dentry = NULL;
++	}
++
++	kill_anon_super(sb);
++
++	if (domain)
++		kdbus_domain_unref(domain);
++}
++
++static int fs_super_set(struct super_block *sb, void *data)
++{
++	int ret;
++
++	ret = set_anon_super(sb, data);
++	if (!ret)
++		sb->s_fs_info = data;
++
++	return ret;
++}
++
++static struct dentry *fs_super_mount(struct file_system_type *fs_type,
++				     int flags, const char *dev_name,
++				     void *data)
++{
++	struct kdbus_domain *domain;
++	struct super_block *sb;
++	int ret;
++
++	domain = kdbus_domain_new(KDBUS_MAKE_ACCESS_WORLD);
++	if (IS_ERR(domain))
++		return ERR_CAST(domain);
++
++	sb = sget(fs_type, NULL, fs_super_set, flags, domain);
++	if (IS_ERR(sb)) {
++		kdbus_node_deactivate(&domain->node);
++		kdbus_domain_unref(domain);
++		return ERR_CAST(sb);
++	}
++
++	WARN_ON(sb->s_fs_info != domain);
++	WARN_ON(sb->s_root);
++
++	ret = fs_super_fill(sb);
++	if (ret < 0) {
++		/* calls into ->kill_sb() when done */
++		deactivate_locked_super(sb);
++		return ERR_PTR(ret);
++	}
++
++	return dget(sb->s_root);
++}
++
++static struct file_system_type fs_type = {
++	.name		= KBUILD_MODNAME "fs",
++	.owner		= THIS_MODULE,
++	.mount		= fs_super_mount,
++	.kill_sb	= fs_super_kill,
++	.fs_flags	= FS_USERNS_MOUNT,
++};
++
++/**
++ * kdbus_fs_init() - register kdbus filesystem
++ *
++ * This registers a filesystem with the VFS layer. The filesystem is called
++ * `KBUILD_MODNAME "fs"', which usually resolves to `kdbusfs'. The nameing
++ * scheme allows to set KBUILD_MODNAME to "kdbus2" and you will get an
++ * independent filesystem for developers.
++ *
++ * Each mount of the kdbusfs filesystem has an kdbus_domain attached.
++ * Operations on this mount will only affect the attached domain. On each mount
++ * a new domain is automatically created and used for this mount exclusively.
++ * If you want to share a domain across multiple mounts, you need to bind-mount
++ * it.
++ *
++ * Mounts of kdbusfs (with a different domain each) are unrelated to each other
++ * and will never have any effect on any domain but their own.
++ *
++ * Return: 0 on success, negative error otherwise.
++ */
++int kdbus_fs_init(void)
++{
++	return register_filesystem(&fs_type);
++}
++
++/**
++ * kdbus_fs_exit() - unregister kdbus filesystem
++ *
++ * This does the reverse to kdbus_fs_init(). It unregisters the kdbusfs
++ * filesystem from VFS and cleans up any allocated resources.
++ */
++void kdbus_fs_exit(void)
++{
++	unregister_filesystem(&fs_type);
++}
++
++/* acquire domain of @node, making sure all ancestors are active */
++static struct kdbus_domain *fs_acquire_domain(struct kdbus_node *node)
++{
++	struct kdbus_domain *domain;
++	struct kdbus_node *iter;
++
++	/* caller must guarantee that @node is linked */
++	for (iter = node; iter->parent; iter = iter->parent)
++		if (!kdbus_node_is_active(iter->parent))
++			return NULL;
++
++	/* root nodes are always domains */
++	if (WARN_ON(iter->type != KDBUS_NODE_DOMAIN))
++		return NULL;
++
++	domain = kdbus_domain_from_node(iter);
++	if (!kdbus_node_acquire(&domain->node))
++		return NULL;
++
++	return domain;
++}
++
++/**
++ * kdbus_fs_flush() - flush dcache entries of a node
++ * @node:		Node to flush entries of
++ *
++ * This flushes all VFS filesystem cache entries for a node and all its
++ * children. This should be called whenever a node is destroyed during
++ * runtime. It will flush the cache entries so the linked objects can be
++ * deallocated.
++ *
++ * This is a no-op if you call it on active nodes (they really should stay in
++ * cache) or on nodes with deactivated parents (flushing the parent is enough).
++ * Furthermore, there is no need to call it on nodes whose lifetime is bound to
++ * their parents'. In those cases, the parent-flush will always also flush the
++ * children.
++ */
++void kdbus_fs_flush(struct kdbus_node *node)
++{
++	struct dentry *dentry, *parent_dentry = NULL;
++	struct kdbus_domain *domain;
++	struct qstr name;
++
++	/* active nodes should remain in cache */
++	if (!kdbus_node_is_deactivated(node))
++		return;
++
++	/* nodes that were never linked were never instantiated */
++	if (!node->parent)
++		return;
++
++	/* acquire domain and verify all ancestors are active */
++	domain = fs_acquire_domain(node);
++	if (!domain)
++		return;
++
++	switch (node->type) {
++	case KDBUS_NODE_ENDPOINT:
++		if (WARN_ON(!node->parent || !node->parent->name))
++			goto exit;
++
++		name.name = node->parent->name;
++		name.len = strlen(node->parent->name);
++		parent_dentry = d_hash_and_lookup(domain->dentry, &name);
++		if (IS_ERR_OR_NULL(parent_dentry))
++			goto exit;
++
++		/* fallthrough */
++	case KDBUS_NODE_BUS:
++		if (WARN_ON(!node->name))
++			goto exit;
++
++		name.name = node->name;
++		name.len = strlen(node->name);
++		dentry = d_hash_and_lookup(parent_dentry ? : domain->dentry,
++					   &name);
++		if (!IS_ERR_OR_NULL(dentry)) {
++			d_invalidate(dentry);
++			dput(dentry);
++		}
++
++		dput(parent_dentry);
++		break;
++
++	default:
++		/* all other types are bound to their parent lifetime */
++		break;
++	}
++
++exit:
++	kdbus_node_release(&domain->node);
++}
+diff --git a/ipc/kdbus/fs.h b/ipc/kdbus/fs.h
+new file mode 100644
+index 0000000..62f7d6a
+--- /dev/null
++++ b/ipc/kdbus/fs.h
+@@ -0,0 +1,28 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUSFS_H
++#define __KDBUSFS_H
++
++#include <linux/kernel.h>
++
++struct kdbus_node;
++
++int kdbus_fs_init(void);
++void kdbus_fs_exit(void);
++void kdbus_fs_flush(struct kdbus_node *node);
++
++#define kdbus_node_from_inode(_inode) \
++	((struct kdbus_node *)(_inode)->i_private)
++
++#endif
+diff --git a/ipc/kdbus/handle.c b/ipc/kdbus/handle.c
+new file mode 100644
+index 0000000..f72dbe5
+--- /dev/null
++++ b/ipc/kdbus/handle.c
+@@ -0,0 +1,617 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/idr.h>
++#include <linux/init.h>
++#include <linux/kdev_t.h>
++#include <linux/module.h>
++#include <linux/poll.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/syscalls.h>
++
++#include "bus.h"
++#include "connection.h"
++#include "endpoint.h"
++#include "fs.h"
++#include "handle.h"
++#include "item.h"
++#include "match.h"
++#include "message.h"
++#include "names.h"
++#include "domain.h"
++#include "policy.h"
++
++static int kdbus_args_verify(struct kdbus_args *args)
++{
++	struct kdbus_item *item;
++	size_t i;
++	int ret;
++
++	KDBUS_ITEMS_FOREACH(item, args->items, args->items_size) {
++		struct kdbus_arg *arg = NULL;
++
++		if (!KDBUS_ITEM_VALID(item, args->items, args->items_size))
++			return -EINVAL;
++
++		for (i = 0; i < args->argc; ++i)
++			if (args->argv[i].type == item->type)
++				break;
++		if (i >= args->argc)
++			return -EINVAL;
++
++		arg = &args->argv[i];
++
++		ret = kdbus_item_validate(item);
++		if (ret < 0)
++			return ret;
++
++		if (arg->item && !arg->multiple)
++			return -EINVAL;
++
++		arg->item = item;
++	}
++
++	if (!KDBUS_ITEMS_END(item, args->items, args->items_size))
++		return -EINVAL;
++
++	for (i = 0; i < args->argc; ++i)
++		if (args->argv[i].mandatory && !args->argv[i].item)
++			return -EINVAL;
++
++	return 0;
++}
++
++static int kdbus_args_negotiate(struct kdbus_args *args)
++{
++	struct kdbus_item __user *user;
++	struct kdbus_item *negotiation;
++	size_t i, j, num;
++
++	/*
++	 * If KDBUS_FLAG_NEGOTIATE is set, we overwrite the flags field with
++	 * the set of supported flags. Furthermore, if an KDBUS_ITEM_NEGOTIATE
++	 * item is passed, we iterate its payload (array of u64, each set to an
++	 * item type) and clear all unsupported item-types to 0.
++	 * The caller might do this recursively, if other flags or objects are
++	 * embedded in the payload itself.
++	 */
++
++	if (args->cmd->flags & KDBUS_FLAG_NEGOTIATE) {
++		if (put_user(args->allowed_flags & ~KDBUS_FLAG_NEGOTIATE,
++			     &args->user->flags))
++			return -EFAULT;
++	}
++
++	if (args->argc < 1 || args->argv[0].type != KDBUS_ITEM_NEGOTIATE ||
++	    !args->argv[0].item)
++		return 0;
++
++	negotiation = args->argv[0].item;
++	user = (struct kdbus_item __user *)
++		((u8 __user *)args->user +
++		 ((u8 *)negotiation - (u8 *)args->cmd));
++	num = KDBUS_ITEM_PAYLOAD_SIZE(negotiation) / sizeof(u64);
++
++	for (i = 0; i < num; ++i) {
++		for (j = 0; j < args->argc; ++j)
++			if (negotiation->data64[i] == args->argv[j].type)
++				break;
++
++		if (j < args->argc)
++			continue;
++
++		/* this item is not supported, clear it out */
++		negotiation->data64[i] = 0;
++		if (put_user(negotiation->data64[i], &user->data64[i]))
++			return -EFAULT;
++	}
++
++	return 0;
++}
++
++/**
++ * __kdbus_args_parse() - parse payload of kdbus command
++ * @args:		object to parse data into
++ * @argp:		user-space location of command payload to parse
++ * @type_size:		overall size of command payload to parse
++ * @items_offset:	offset of items array in command payload
++ * @out:		output variable to store pointer to copied payload
++ *
++ * This parses the ioctl payload at user-space location @argp into @args. @args
++ * must be pre-initialized by the caller to reflect the supported flags and
++ * items of this command. This parser will then copy the command payload into
++ * kernel-space, verify correctness and consistency and cache pointers to parsed
++ * items and other data in @args.
++ *
++ * If this function succeeded, you must call kdbus_args_clear() to release
++ * allocated resources before destroying @args.
++ *
++ * Return: On failure a negative error code is returned. Otherwise, 1 is
++ * returned if negotiation was requested, 0 if not.
++ */
++int __kdbus_args_parse(struct kdbus_args *args, void __user *argp,
++		       size_t type_size, size_t items_offset, void **out)
++{
++	int ret;
++
++	args->cmd = kdbus_memdup_user(argp, type_size, KDBUS_CMD_MAX_SIZE);
++	if (IS_ERR(args->cmd))
++		return PTR_ERR(args->cmd);
++
++	args->cmd->return_flags = 0;
++	args->user = argp;
++	args->items = (void *)((u8 *)args->cmd + items_offset);
++	args->items_size = args->cmd->size - items_offset;
++
++	if (args->cmd->flags & ~args->allowed_flags) {
++		ret = -EINVAL;
++		goto error;
++	}
++
++	ret = kdbus_args_verify(args);
++	if (ret < 0)
++		goto error;
++
++	ret = kdbus_args_negotiate(args);
++	if (ret < 0)
++		goto error;
++
++	*out = args->cmd;
++	return !!(args->cmd->flags & KDBUS_FLAG_NEGOTIATE);
++
++error:
++	return kdbus_args_clear(args, ret);
++}
++
++/**
++ * kdbus_args_clear() - release allocated command resources
++ * @args:	object to release resources of
++ * @ret:	return value of this command
++ *
++ * This frees all allocated resources on @args and copies the command result
++ * flags into user-space. @ret is usually returned unchanged by this function,
++ * so it can be used in the final 'return' statement of the command handler.
++ *
++ * Return: -EFAULT if return values cannot be copied into user-space, otherwise
++ *         @ret is returned unchanged.
++ */
++int kdbus_args_clear(struct kdbus_args *args, int ret)
++{
++	if (!args)
++		return ret;
++
++	if (!IS_ERR_OR_NULL(args->cmd)) {
++		if (put_user(args->cmd->return_flags,
++			     &args->user->return_flags))
++			ret = -EFAULT;
++		kfree(args->cmd);
++		args->cmd = NULL;
++	}
++
++	return ret;
++}
++
++/**
++ * enum kdbus_handle_type - type an handle can be of
++ * @KDBUS_HANDLE_NONE:		no type set, yet
++ * @KDBUS_HANDLE_BUS_OWNER:	bus owner
++ * @KDBUS_HANDLE_EP_OWNER:	endpoint owner
++ * @KDBUS_HANDLE_CONNECTED:	endpoint connection after HELLO
++ */
++enum kdbus_handle_type {
++	KDBUS_HANDLE_NONE,
++	KDBUS_HANDLE_BUS_OWNER,
++	KDBUS_HANDLE_EP_OWNER,
++	KDBUS_HANDLE_CONNECTED,
++};
++
++/**
++ * struct kdbus_handle - handle to the kdbus system
++ * @rwlock:		handle lock
++ * @type:		type of this handle (KDBUS_HANDLE_*)
++ * @bus_owner:		bus this handle owns
++ * @ep_owner:		endpoint this handle owns
++ * @conn:		connection this handle owns
++ * @privileged:		Flag to mark a handle as privileged
++ */
++struct kdbus_handle {
++	struct rw_semaphore rwlock;
++
++	enum kdbus_handle_type type;
++	union {
++		struct kdbus_bus *bus_owner;
++		struct kdbus_ep *ep_owner;
++		struct kdbus_conn *conn;
++	};
++
++	bool privileged:1;
++};
++
++static int kdbus_handle_open(struct inode *inode, struct file *file)
++{
++	struct kdbus_handle *handle;
++	struct kdbus_node *node;
++	int ret;
++
++	node = kdbus_node_from_inode(inode);
++	if (!kdbus_node_acquire(node))
++		return -ESHUTDOWN;
++
++	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
++	if (!handle) {
++		ret = -ENOMEM;
++		goto exit;
++	}
++
++	init_rwsem(&handle->rwlock);
++	handle->type = KDBUS_HANDLE_NONE;
++
++	if (node->type == KDBUS_NODE_ENDPOINT) {
++		struct kdbus_ep *ep = kdbus_ep_from_node(node);
++		struct kdbus_bus *bus = ep->bus;
++
++		/*
++		 * A connection is privileged if it is opened on an endpoint
++		 * without custom policy and either:
++		 *   * the user has CAP_IPC_OWNER in the domain user namespace
++		 * or
++		 *   * the callers euid matches the uid of the bus creator
++		 */
++		if (!ep->user &&
++		    (ns_capable(bus->domain->user_namespace, CAP_IPC_OWNER) ||
++		     uid_eq(file->f_cred->euid, bus->node.uid)))
++			handle->privileged = true;
++	}
++
++	file->private_data = handle;
++	ret = 0;
++
++exit:
++	kdbus_node_release(node);
++	return ret;
++}
++
++static int kdbus_handle_release(struct inode *inode, struct file *file)
++{
++	struct kdbus_handle *handle = file->private_data;
++
++	switch (handle->type) {
++	case KDBUS_HANDLE_BUS_OWNER:
++		if (handle->bus_owner) {
++			kdbus_node_deactivate(&handle->bus_owner->node);
++			kdbus_bus_unref(handle->bus_owner);
++		}
++		break;
++	case KDBUS_HANDLE_EP_OWNER:
++		if (handle->ep_owner) {
++			kdbus_node_deactivate(&handle->ep_owner->node);
++			kdbus_ep_unref(handle->ep_owner);
++		}
++		break;
++	case KDBUS_HANDLE_CONNECTED:
++		kdbus_conn_disconnect(handle->conn, false);
++		kdbus_conn_unref(handle->conn);
++		break;
++	case KDBUS_HANDLE_NONE:
++		/* nothing to clean up */
++		break;
++	}
++
++	kfree(handle);
++
++	return 0;
++}
++
++static long kdbus_handle_ioctl_control(struct file *file, unsigned int cmd,
++				       void __user *argp)
++{
++	struct kdbus_handle *handle = file->private_data;
++	struct kdbus_node *node = file_inode(file)->i_private;
++	struct kdbus_domain *domain;
++	int ret = 0;
++
++	if (!kdbus_node_acquire(node))
++		return -ESHUTDOWN;
++
++	/*
++	 * The parent of control-nodes is always a domain, make sure to pin it
++	 * so the parent is actually valid.
++	 */
++	domain = kdbus_domain_from_node(node->parent);
++	if (!kdbus_node_acquire(&domain->node)) {
++		kdbus_node_release(node);
++		return -ESHUTDOWN;
++	}
++
++	switch (cmd) {
++	case KDBUS_CMD_BUS_MAKE: {
++		struct kdbus_bus *bus;
++
++		bus = kdbus_cmd_bus_make(domain, argp);
++		if (IS_ERR_OR_NULL(bus)) {
++			ret = PTR_ERR_OR_ZERO(bus);
++			break;
++		}
++
++		handle->type = KDBUS_HANDLE_BUS_OWNER;
++		handle->bus_owner = bus;
++		break;
++	}
++
++	default:
++		ret = -EBADFD;
++		break;
++	}
++
++	kdbus_node_release(&domain->node);
++	kdbus_node_release(node);
++	return ret;
++}
++
++static long kdbus_handle_ioctl_ep(struct file *file, unsigned int cmd,
++				  void __user *buf)
++{
++	struct kdbus_handle *handle = file->private_data;
++	struct kdbus_node *node = file_inode(file)->i_private;
++	struct kdbus_ep *ep, *file_ep = kdbus_ep_from_node(node);
++	struct kdbus_conn *conn;
++	int ret = 0;
++
++	if (!kdbus_node_acquire(node))
++		return -ESHUTDOWN;
++
++	switch (cmd) {
++	case KDBUS_CMD_ENDPOINT_MAKE:
++		/* creating custom endpoints is a privileged operation */
++		if (!handle->privileged) {
++			ret = -EPERM;
++			break;
++		}
++
++		ep = kdbus_cmd_ep_make(file_ep->bus, buf);
++		if (IS_ERR_OR_NULL(ep)) {
++			ret = PTR_ERR_OR_ZERO(ep);
++			break;
++		}
++
++		handle->type = KDBUS_HANDLE_EP_OWNER;
++		handle->ep_owner = ep;
++		break;
++
++	case KDBUS_CMD_HELLO:
++		conn = kdbus_cmd_hello(file_ep, handle->privileged, buf);
++		if (IS_ERR_OR_NULL(conn)) {
++			ret = PTR_ERR_OR_ZERO(conn);
++			break;
++		}
++
++		handle->type = KDBUS_HANDLE_CONNECTED;
++		handle->conn = conn;
++		break;
++
++	default:
++		ret = -EBADFD;
++		break;
++	}
++
++	kdbus_node_release(node);
++	return ret;
++}
++
++static long kdbus_handle_ioctl_ep_owner(struct file *file, unsigned int command,
++					void __user *buf)
++{
++	struct kdbus_handle *handle = file->private_data;
++	struct kdbus_ep *ep = handle->ep_owner;
++	int ret;
++
++	if (!kdbus_node_acquire(&ep->node))
++		return -ESHUTDOWN;
++
++	switch (command) {
++	case KDBUS_CMD_ENDPOINT_UPDATE:
++		ret = kdbus_cmd_ep_update(ep, buf);
++		break;
++	default:
++		ret = -EBADFD;
++		break;
++	}
++
++	kdbus_node_release(&ep->node);
++	return ret;
++}
++
++static long kdbus_handle_ioctl_connected(struct file *file,
++					 unsigned int command, void __user *buf)
++{
++	struct kdbus_handle *handle = file->private_data;
++	struct kdbus_conn *conn = handle->conn;
++	struct kdbus_conn *release_conn = NULL;
++	int ret;
++
++	release_conn = conn;
++	ret = kdbus_conn_acquire(release_conn);
++	if (ret < 0)
++		return ret;
++
++	switch (command) {
++	case KDBUS_CMD_BYEBYE:
++		/*
++		 * BYEBYE is special; we must not acquire a connection when
++		 * calling into kdbus_conn_disconnect() or we will deadlock,
++		 * because kdbus_conn_disconnect() will wait for all acquired
++		 * references to be dropped.
++		 */
++		kdbus_conn_release(release_conn);
++		release_conn = NULL;
++		ret = kdbus_cmd_byebye_unlocked(conn, buf);
++		break;
++	case KDBUS_CMD_NAME_ACQUIRE:
++		ret = kdbus_cmd_name_acquire(conn, buf);
++		break;
++	case KDBUS_CMD_NAME_RELEASE:
++		ret = kdbus_cmd_name_release(conn, buf);
++		break;
++	case KDBUS_CMD_LIST:
++		ret = kdbus_cmd_list(conn, buf);
++		break;
++	case KDBUS_CMD_CONN_INFO:
++		ret = kdbus_cmd_conn_info(conn, buf);
++		break;
++	case KDBUS_CMD_BUS_CREATOR_INFO:
++		ret = kdbus_cmd_bus_creator_info(conn, buf);
++		break;
++	case KDBUS_CMD_UPDATE:
++		ret = kdbus_cmd_update(conn, buf);
++		break;
++	case KDBUS_CMD_MATCH_ADD:
++		ret = kdbus_cmd_match_add(conn, buf);
++		break;
++	case KDBUS_CMD_MATCH_REMOVE:
++		ret = kdbus_cmd_match_remove(conn, buf);
++		break;
++	case KDBUS_CMD_SEND:
++		ret = kdbus_cmd_send(conn, file, buf);
++		break;
++	case KDBUS_CMD_RECV:
++		ret = kdbus_cmd_recv(conn, buf);
++		break;
++	case KDBUS_CMD_FREE:
++		ret = kdbus_cmd_free(conn, buf);
++		break;
++	default:
++		ret = -EBADFD;
++		break;
++	}
++
++	kdbus_conn_release(release_conn);
++	return ret;
++}
++
++static long kdbus_handle_ioctl(struct file *file, unsigned int cmd,
++			       unsigned long arg)
++{
++	struct kdbus_handle *handle = file->private_data;
++	struct kdbus_node *node = kdbus_node_from_inode(file_inode(file));
++	void __user *argp = (void __user *)arg;
++	long ret = -EBADFD;
++
++	switch (cmd) {
++	case KDBUS_CMD_BUS_MAKE:
++	case KDBUS_CMD_ENDPOINT_MAKE:
++	case KDBUS_CMD_HELLO:
++		/* bail out early if already typed */
++		if (handle->type != KDBUS_HANDLE_NONE)
++			break;
++
++		down_write(&handle->rwlock);
++		if (handle->type == KDBUS_HANDLE_NONE) {
++			if (node->type == KDBUS_NODE_CONTROL)
++				ret = kdbus_handle_ioctl_control(file, cmd,
++								 argp);
++			else if (node->type == KDBUS_NODE_ENDPOINT)
++				ret = kdbus_handle_ioctl_ep(file, cmd, argp);
++		}
++		up_write(&handle->rwlock);
++		break;
++
++	case KDBUS_CMD_ENDPOINT_UPDATE:
++	case KDBUS_CMD_BYEBYE:
++	case KDBUS_CMD_NAME_ACQUIRE:
++	case KDBUS_CMD_NAME_RELEASE:
++	case KDBUS_CMD_LIST:
++	case KDBUS_CMD_CONN_INFO:
++	case KDBUS_CMD_BUS_CREATOR_INFO:
++	case KDBUS_CMD_UPDATE:
++	case KDBUS_CMD_MATCH_ADD:
++	case KDBUS_CMD_MATCH_REMOVE:
++	case KDBUS_CMD_SEND:
++	case KDBUS_CMD_RECV:
++	case KDBUS_CMD_FREE:
++		down_read(&handle->rwlock);
++		if (handle->type == KDBUS_HANDLE_EP_OWNER)
++			ret = kdbus_handle_ioctl_ep_owner(file, cmd, argp);
++		else if (handle->type == KDBUS_HANDLE_CONNECTED)
++			ret = kdbus_handle_ioctl_connected(file, cmd, argp);
++		up_read(&handle->rwlock);
++		break;
++	default:
++		ret = -ENOTTY;
++		break;
++	}
++
++	return ret < 0 ? ret : 0;
++}
++
++static unsigned int kdbus_handle_poll(struct file *file,
++				      struct poll_table_struct *wait)
++{
++	struct kdbus_handle *handle = file->private_data;
++	unsigned int mask = POLLOUT | POLLWRNORM;
++	int ret;
++
++	/* Only a connected endpoint can read/write data */
++	down_read(&handle->rwlock);
++	if (handle->type != KDBUS_HANDLE_CONNECTED) {
++		up_read(&handle->rwlock);
++		return POLLERR | POLLHUP;
++	}
++	up_read(&handle->rwlock);
++
++	ret = kdbus_conn_acquire(handle->conn);
++	if (ret < 0)
++		return POLLERR | POLLHUP;
++
++	poll_wait(file, &handle->conn->wait, wait);
++
++	if (!list_empty(&handle->conn->queue.msg_list) ||
++	    atomic_read(&handle->conn->lost_count) > 0)
++		mask |= POLLIN | POLLRDNORM;
++
++	kdbus_conn_release(handle->conn);
++
++	return mask;
++}
++
++static int kdbus_handle_mmap(struct file *file, struct vm_area_struct *vma)
++{
++	struct kdbus_handle *handle = file->private_data;
++	int ret = -EBADFD;
++
++	if (down_read_trylock(&handle->rwlock)) {
++		if (handle->type == KDBUS_HANDLE_CONNECTED)
++			ret = kdbus_pool_mmap(handle->conn->pool, vma);
++		up_read(&handle->rwlock);
++	}
++	return ret;
++}
++
++const struct file_operations kdbus_handle_ops = {
++	.owner =		THIS_MODULE,
++	.open =			kdbus_handle_open,
++	.release =		kdbus_handle_release,
++	.poll =			kdbus_handle_poll,
++	.llseek =		noop_llseek,
++	.unlocked_ioctl =	kdbus_handle_ioctl,
++	.mmap =			kdbus_handle_mmap,
++#ifdef CONFIG_COMPAT
++	.compat_ioctl =		kdbus_handle_ioctl,
++#endif
++};
+diff --git a/ipc/kdbus/handle.h b/ipc/kdbus/handle.h
+new file mode 100644
+index 0000000..93a372d
+--- /dev/null
++++ b/ipc/kdbus/handle.h
+@@ -0,0 +1,85 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_HANDLE_H
++#define __KDBUS_HANDLE_H
++
++#include <linux/fs.h>
++#include <uapi/linux/kdbus.h>
++
++extern const struct file_operations kdbus_handle_ops;
++
++/**
++ * kdbus_arg - information and state of a single ioctl command item
++ * @type:		item type
++ * @item:		set by the parser to the first found item of this type
++ * @multiple:		whether multiple items of this type are allowed
++ * @mandatory:		whether at least one item of this type is required
++ *
++ * This structure describes a single item in an ioctl command payload. The
++ * caller has to pre-fill the type and flags, the parser will then use this
++ * information to verify the ioctl payload. @item is set by the parser to point
++ * to the first occurrence of the item.
++ */
++struct kdbus_arg {
++	u64 type;
++	struct kdbus_item *item;
++	bool multiple : 1;
++	bool mandatory : 1;
++};
++
++/**
++ * kdbus_args - information and state of ioctl command parser
++ * @allowed_flags:	set of flags this command supports
++ * @argc:		number of items in @argv
++ * @argv:		array of items this command supports
++ * @user:		set by parser to user-space location of current command
++ * @cmd:		set by parser to kernel copy of command payload
++ * @items:		points to item array in @cmd
++ * @items_size:		size of @items in bytes
++ *
++ * This structure is used to parse ioctl command payloads on each invocation.
++ * The ioctl handler has to pre-fill the flags and allowed items before passing
++ * the object to kdbus_args_parse(). The parser will copy the command payload
++ * into kernel-space and verify the correctness of the data.
++ */
++struct kdbus_args {
++	u64 allowed_flags;
++	size_t argc;
++	struct kdbus_arg *argv;
++
++	struct kdbus_cmd __user *user;
++	struct kdbus_cmd *cmd;
++
++	struct kdbus_item *items;
++	size_t items_size;
++};
++
++int __kdbus_args_parse(struct kdbus_args *args, void __user *argp,
++		       size_t type_size, size_t items_offset, void **out);
++int kdbus_args_clear(struct kdbus_args *args, int ret);
++
++#define kdbus_args_parse(_args, _argp, _v)                              \
++	({                                                              \
++		BUILD_BUG_ON(offsetof(typeof(**(_v)), size) !=          \
++			     offsetof(struct kdbus_cmd, size));         \
++		BUILD_BUG_ON(offsetof(typeof(**(_v)), flags) !=         \
++			     offsetof(struct kdbus_cmd, flags));        \
++		BUILD_BUG_ON(offsetof(typeof(**(_v)), return_flags) !=  \
++			     offsetof(struct kdbus_cmd, return_flags)); \
++		__kdbus_args_parse((_args), (_argp), sizeof(**(_v)),    \
++				   offsetof(typeof(**(_v)), items),     \
++				   (void **)(_v));                      \
++	})
++
++#endif
+diff --git a/ipc/kdbus/item.c b/ipc/kdbus/item.c
+new file mode 100644
+index 0000000..745ad54
+--- /dev/null
++++ b/ipc/kdbus/item.c
+@@ -0,0 +1,339 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/ctype.h>
++#include <linux/fs.h>
++#include <linux/string.h>
++
++#include "item.h"
++#include "limits.h"
++#include "util.h"
++
++/*
++ * This verifies the string at position @str with size @size is properly
++ * zero-terminated and does not contain a 0-byte but at the end.
++ */
++static bool kdbus_str_valid(const char *str, size_t size)
++{
++	return size > 0 && memchr(str, '\0', size) == str + size - 1;
++}
++
++/**
++ * kdbus_item_validate_name() - validate an item containing a name
++ * @item:		Item to validate
++ *
++ * Return: zero on success or an negative error code on failure
++ */
++int kdbus_item_validate_name(const struct kdbus_item *item)
++{
++	const char *name = item->str;
++	unsigned int i;
++	size_t len;
++
++	if (item->size < KDBUS_ITEM_HEADER_SIZE + 2)
++		return -EINVAL;
++
++	if (item->size > KDBUS_ITEM_HEADER_SIZE +
++			 KDBUS_SYSNAME_MAX_LEN + 1)
++		return -ENAMETOOLONG;
++
++	if (!kdbus_str_valid(name, KDBUS_ITEM_PAYLOAD_SIZE(item)))
++		return -EINVAL;
++
++	len = strlen(name);
++	if (len == 0)
++		return -EINVAL;
++
++	for (i = 0; i < len; i++) {
++		if (isalpha(name[i]))
++			continue;
++		if (isdigit(name[i]))
++			continue;
++		if (name[i] == '_')
++			continue;
++		if (i > 0 && i + 1 < len && (name[i] == '-' || name[i] == '.'))
++			continue;
++
++		return -EINVAL;
++	}
++
++	return 0;
++}
++
++/**
++ * kdbus_item_validate() - validate a single item
++ * @item:	item to validate
++ *
++ * Return: 0 if item is valid, negative error code if not.
++ */
++int kdbus_item_validate(const struct kdbus_item *item)
++{
++	size_t payload_size = KDBUS_ITEM_PAYLOAD_SIZE(item);
++	size_t l;
++	int ret;
++
++	BUILD_BUG_ON(KDBUS_ITEM_HEADER_SIZE !=
++		     sizeof(struct kdbus_item_header));
++
++	if (item->size < KDBUS_ITEM_HEADER_SIZE)
++		return -EINVAL;
++
++	switch (item->type) {
++	case KDBUS_ITEM_NEGOTIATE:
++		if (payload_size % sizeof(u64) != 0)
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_PAYLOAD_VEC:
++		if (payload_size != sizeof(struct kdbus_vec))
++			return -EINVAL;
++		if (item->vec.size == 0 || item->vec.size > SIZE_MAX)
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_PAYLOAD_OFF:
++		if (payload_size != sizeof(struct kdbus_vec))
++			return -EINVAL;
++		if (item->vec.size == 0 || item->vec.size > SIZE_MAX)
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_PAYLOAD_MEMFD:
++		if (payload_size != sizeof(struct kdbus_memfd))
++			return -EINVAL;
++		if (item->memfd.size == 0 || item->memfd.size > SIZE_MAX)
++			return -EINVAL;
++		if (item->memfd.fd < 0)
++			return -EBADF;
++		break;
++
++	case KDBUS_ITEM_FDS:
++		if (payload_size % sizeof(int) != 0)
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_CANCEL_FD:
++		if (payload_size != sizeof(int))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_BLOOM_PARAMETER:
++		if (payload_size != sizeof(struct kdbus_bloom_parameter))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_BLOOM_FILTER:
++		/* followed by the bloom-mask, depends on the bloom-size */
++		if (payload_size < sizeof(struct kdbus_bloom_filter))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_BLOOM_MASK:
++		/* size depends on bloom-size of bus */
++		break;
++
++	case KDBUS_ITEM_CONN_DESCRIPTION:
++	case KDBUS_ITEM_MAKE_NAME:
++		ret = kdbus_item_validate_name(item);
++		if (ret < 0)
++			return ret;
++		break;
++
++	case KDBUS_ITEM_ATTACH_FLAGS_SEND:
++	case KDBUS_ITEM_ATTACH_FLAGS_RECV:
++	case KDBUS_ITEM_ID:
++		if (payload_size != sizeof(u64))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_TIMESTAMP:
++		if (payload_size != sizeof(struct kdbus_timestamp))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_CREDS:
++		if (payload_size != sizeof(struct kdbus_creds))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_AUXGROUPS:
++		if (payload_size % sizeof(u32) != 0)
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_NAME:
++	case KDBUS_ITEM_DST_NAME:
++	case KDBUS_ITEM_PID_COMM:
++	case KDBUS_ITEM_TID_COMM:
++	case KDBUS_ITEM_EXE:
++	case KDBUS_ITEM_CMDLINE:
++	case KDBUS_ITEM_CGROUP:
++	case KDBUS_ITEM_SECLABEL:
++		if (!kdbus_str_valid(item->str, payload_size))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_CAPS:
++		if (payload_size < sizeof(u32))
++			return -EINVAL;
++		if (payload_size < sizeof(u32) +
++		    4 * CAP_TO_INDEX(item->caps.last_cap) * sizeof(u32))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_AUDIT:
++		if (payload_size != sizeof(struct kdbus_audit))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_POLICY_ACCESS:
++		if (payload_size != sizeof(struct kdbus_policy_access))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_NAME_ADD:
++	case KDBUS_ITEM_NAME_REMOVE:
++	case KDBUS_ITEM_NAME_CHANGE:
++		if (payload_size < sizeof(struct kdbus_notify_name_change))
++			return -EINVAL;
++		l = payload_size - offsetof(struct kdbus_notify_name_change,
++					    name);
++		if (l > 0 && !kdbus_str_valid(item->name_change.name, l))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_ID_ADD:
++	case KDBUS_ITEM_ID_REMOVE:
++		if (payload_size != sizeof(struct kdbus_notify_id_change))
++			return -EINVAL;
++		break;
++
++	case KDBUS_ITEM_REPLY_TIMEOUT:
++	case KDBUS_ITEM_REPLY_DEAD:
++		if (payload_size != 0)
++			return -EINVAL;
++		break;
++
++	default:
++		break;
++	}
++
++	return 0;
++}
++
++/**
++ * kdbus_items_validate() - validate items passed by user-space
++ * @items:		items to validate
++ * @items_size:		number of items
++ *
++ * This verifies that the passed items pointer is consistent and valid.
++ * Furthermore, each item is checked for:
++ *  - valid "size" value
++ *  - payload is of expected type
++ *  - payload is fully included in the item
++ *  - string payloads are zero-terminated
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_items_validate(const struct kdbus_item *items, size_t items_size)
++{
++	const struct kdbus_item *item;
++	int ret;
++
++	KDBUS_ITEMS_FOREACH(item, items, items_size) {
++		if (!KDBUS_ITEM_VALID(item, items, items_size))
++			return -EINVAL;
++
++		ret = kdbus_item_validate(item);
++		if (ret < 0)
++			return ret;
++	}
++
++	if (!KDBUS_ITEMS_END(item, items, items_size))
++		return -EINVAL;
++
++	return 0;
++}
++
++static struct kdbus_item *kdbus_items_get(const struct kdbus_item *items,
++					  size_t items_size,
++					  unsigned int item_type)
++{
++	const struct kdbus_item *iter, *found = NULL;
++
++	KDBUS_ITEMS_FOREACH(iter, items, items_size) {
++		if (iter->type == item_type) {
++			if (found)
++				return ERR_PTR(-EEXIST);
++			found = iter;
++		}
++	}
++
++	return (struct kdbus_item *)found ? : ERR_PTR(-EBADMSG);
++}
++
++/**
++ * kdbus_items_get_str() - get string from a list of items
++ * @items:		The items to walk
++ * @items_size:		The size of all items
++ * @item_type:		The item type to look for
++ *
++ * This function walks a list of items and searches for items of type
++ * @item_type. If it finds exactly one such item, @str_ret will be set to
++ * the .str member of the item.
++ *
++ * Return: the string, if the item was found exactly once, ERR_PTR(-EEXIST)
++ * if the item was found more than once, and ERR_PTR(-EBADMSG) if there was
++ * no item of the given type.
++ */
++const char *kdbus_items_get_str(const struct kdbus_item *items,
++				size_t items_size,
++				unsigned int item_type)
++{
++	const struct kdbus_item *item;
++
++	item = kdbus_items_get(items, items_size, item_type);
++	return IS_ERR(item) ? ERR_CAST(item) : item->str;
++}
++
++/**
++ * kdbus_item_set() - Set item content
++ * @item:	The item to modify
++ * @type:	The item type to set (KDBUS_ITEM_*)
++ * @data:	Data to copy to item->data, may be %NULL
++ * @len:	Number of bytes in @data
++ *
++ * This sets type, size and data fields of an item. If @data is NULL, the data
++ * memory is cleared.
++ *
++ * Note that you must align your @data memory to 8 bytes. Trailing padding (in
++ * case @len is not 8byte aligned) is cleared by this call.
++ *
++ * Returns: Pointer to the following item.
++ */
++struct kdbus_item *kdbus_item_set(struct kdbus_item *item, u64 type,
++				  const void *data, size_t len)
++{
++	item->type = type;
++	item->size = KDBUS_ITEM_HEADER_SIZE + len;
++
++	if (data) {
++		memcpy(item->data, data, len);
++		memset(item->data + len, 0, KDBUS_ALIGN8(len) - len);
++	} else {
++		memset(item->data, 0, KDBUS_ALIGN8(len));
++	}
++
++	return KDBUS_ITEM_NEXT(item);
++}
+diff --git a/ipc/kdbus/item.h b/ipc/kdbus/item.h
+new file mode 100644
+index 0000000..eeefd8b
+--- /dev/null
++++ b/ipc/kdbus/item.h
+@@ -0,0 +1,64 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_ITEM_H
++#define __KDBUS_ITEM_H
++
++#include <linux/kernel.h>
++#include <uapi/linux/kdbus.h>
++
++#include "util.h"
++
++/* generic access and iterators over a stream of items */
++#define KDBUS_ITEM_NEXT(_i) (typeof(_i))(((u8 *)_i) + KDBUS_ALIGN8((_i)->size))
++#define KDBUS_ITEMS_SIZE(_h, _is) ((_h)->size - offsetof(typeof(*_h), _is))
++#define KDBUS_ITEM_HEADER_SIZE offsetof(struct kdbus_item, data)
++#define KDBUS_ITEM_SIZE(_s) KDBUS_ALIGN8(KDBUS_ITEM_HEADER_SIZE + (_s))
++#define KDBUS_ITEM_PAYLOAD_SIZE(_i) ((_i)->size - KDBUS_ITEM_HEADER_SIZE)
++
++#define KDBUS_ITEMS_FOREACH(_i, _is, _s)				\
++	for (_i = _is;							\
++	     ((u8 *)(_i) < (u8 *)(_is) + (_s)) &&			\
++	       ((u8 *)(_i) >= (u8 *)(_is));				\
++	     _i = KDBUS_ITEM_NEXT(_i))
++
++#define KDBUS_ITEM_VALID(_i, _is, _s)					\
++	((_i)->size >= KDBUS_ITEM_HEADER_SIZE &&			\
++	 (u8 *)(_i) + (_i)->size > (u8 *)(_i) &&			\
++	 (u8 *)(_i) + (_i)->size <= (u8 *)(_is) + (_s) &&		\
++	 (u8 *)(_i) >= (u8 *)(_is))
++
++#define KDBUS_ITEMS_END(_i, _is, _s)					\
++	((u8 *)_i == ((u8 *)(_is) + KDBUS_ALIGN8(_s)))
++
++/**
++ * struct kdbus_item_header - Describes the fix part of an item
++ * @size:	The total size of the item
++ * @type:	The item type, one of KDBUS_ITEM_*
++ */
++struct kdbus_item_header {
++	u64 size;
++	u64 type;
++};
++
++int kdbus_item_validate_name(const struct kdbus_item *item);
++int kdbus_item_validate(const struct kdbus_item *item);
++int kdbus_items_validate(const struct kdbus_item *items, size_t items_size);
++const char *kdbus_items_get_str(const struct kdbus_item *items,
++				size_t items_size,
++				unsigned int item_type);
++struct kdbus_item *kdbus_item_set(struct kdbus_item *item, u64 type,
++				  const void *data, size_t len);
++
++#endif
+diff --git a/ipc/kdbus/limits.h b/ipc/kdbus/limits.h
+new file mode 100644
+index 0000000..6450f58
+--- /dev/null
++++ b/ipc/kdbus/limits.h
+@@ -0,0 +1,64 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_DEFAULTS_H
++#define __KDBUS_DEFAULTS_H
++
++#include <linux/kernel.h>
++
++/* maximum size of message header and items */
++#define KDBUS_MSG_MAX_SIZE		SZ_8K
++
++/* maximum number of message items */
++#define KDBUS_MSG_MAX_ITEMS		128
++
++/* maximum number of memfd items per message */
++#define KDBUS_MSG_MAX_MEMFD_ITEMS	16
++
++/* max size of ioctl command data */
++#define KDBUS_CMD_MAX_SIZE		SZ_32K
++
++/* maximum number of inflight fds in a target queue per user */
++#define KDBUS_CONN_MAX_FDS_PER_USER	16
++
++/* maximum message payload size */
++#define KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE		SZ_2M
++
++/* maximum size of bloom bit field in bytes */
++#define KDBUS_BUS_BLOOM_MAX_SIZE		SZ_4K
++
++/* maximum length of well-known bus name */
++#define KDBUS_NAME_MAX_LEN			255
++
++/* maximum length of bus, domain, ep name */
++#define KDBUS_SYSNAME_MAX_LEN			63
++
++/* maximum number of matches per connection */
++#define KDBUS_MATCH_MAX				256
++
++/* maximum number of queued messages from the same individual user */
++#define KDBUS_CONN_MAX_MSGS			256
++
++/* maximum number of well-known names per connection */
++#define KDBUS_CONN_MAX_NAMES			256
++
++/* maximum number of queued requests waiting for a reply */
++#define KDBUS_CONN_MAX_REQUESTS_PENDING		128
++
++/* maximum number of connections per user in one domain */
++#define KDBUS_USER_MAX_CONN			1024
++
++/* maximum number of buses per user in one domain */
++#define KDBUS_USER_MAX_BUSES			16
++
++#endif
+diff --git a/ipc/kdbus/main.c b/ipc/kdbus/main.c
+new file mode 100644
+index 0000000..785f529
+--- /dev/null
++++ b/ipc/kdbus/main.c
+@@ -0,0 +1,125 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++
++#include "util.h"
++#include "fs.h"
++#include "handle.h"
++#include "metadata.h"
++#include "node.h"
++
++/*
++ * This is a simplified outline of the internal kdbus object relations, for
++ * those interested in the inner life of the driver implementation.
++ *
++ * From a mount point's (domain's) perspective:
++ *
++ * struct kdbus_domain
++ *   |» struct kdbus_user *user (many, owned)
++ *   '» struct kdbus_node node (embedded)
++ *       |» struct kdbus_node children (many, referenced)
++ *       |» struct kdbus_node *parent (pinned)
++ *       '» struct kdbus_bus (many, pinned)
++ *           |» struct kdbus_node node (embedded)
++ *           '» struct kdbus_ep (many, pinned)
++ *               |» struct kdbus_node node (embedded)
++ *               |» struct kdbus_bus *bus (pinned)
++ *               |» struct kdbus_conn conn_list (many, pinned)
++ *               |   |» struct kdbus_ep *ep (pinned)
++ *               |   |» struct kdbus_name_entry *activator_of (owned)
++ *               |   |» struct kdbus_match_db *match_db (owned)
++ *               |   |» struct kdbus_meta *meta (owned)
++ *               |   |» struct kdbus_match_db *match_db (owned)
++ *               |   |    '» struct kdbus_match_entry (many, owned)
++ *               |   |
++ *               |   |» struct kdbus_pool *pool (owned)
++ *               |   |    '» struct kdbus_pool_slice *slices (many, owned)
++ *               |   |       '» struct kdbus_pool *pool (pinned)
++ *               |   |
++ *               |   |» struct kdbus_user *user (pinned)
++ *               |   `» struct kdbus_queue_entry entries (many, embedded)
++ *               |        |» struct kdbus_pool_slice *slice (pinned)
++ *               |        |» struct kdbus_conn_reply *reply (owned)
++ *               |        '» struct kdbus_user *user (pinned)
++ *               |
++ *               '» struct kdbus_user *user (pinned)
++ *                   '» struct kdbus_policy_db policy_db (embedded)
++ *                        |» struct kdbus_policy_db_entry (many, owned)
++ *                        |   |» struct kdbus_conn (pinned)
++ *                        |   '» struct kdbus_ep (pinned)
++ *                        |
++ *                        '» struct kdbus_policy_db_cache_entry (many, owned)
++ *                            '» struct kdbus_conn (pinned)
++ *
++ * For the life-time of a file descriptor derived from calling open() on a file
++ * inside the mount point:
++ *
++ * struct kdbus_handle
++ *  |» struct kdbus_meta *meta (owned)
++ *  |» struct kdbus_ep *ep (pinned)
++ *  |» struct kdbus_conn *conn (owned)
++ *  '» struct kdbus_ep *ep (owned)
++ */
++
++/* kdbus mount-point /sys/fs/kdbus */
++static struct kobject *kdbus_dir;
++
++/* global module option to apply a mask to exported metadata */
++unsigned long long kdbus_meta_attach_mask = KDBUS_ATTACH_TIMESTAMP |
++					    KDBUS_ATTACH_CREDS |
++					    KDBUS_ATTACH_PIDS |
++					    KDBUS_ATTACH_AUXGROUPS |
++					    KDBUS_ATTACH_NAMES |
++					    KDBUS_ATTACH_SECLABEL |
++					    KDBUS_ATTACH_CONN_DESCRIPTION;
++MODULE_PARM_DESC(attach_flags_mask, "Attach-flags mask for exported metadata");
++module_param_named(attach_flags_mask, kdbus_meta_attach_mask, ullong, 0644);
++
++static int __init kdbus_init(void)
++{
++	int ret;
++
++	kdbus_dir = kobject_create_and_add(KBUILD_MODNAME, fs_kobj);
++	if (!kdbus_dir)
++		return -ENOMEM;
++
++	ret = kdbus_fs_init();
++	if (ret < 0) {
++		pr_err("cannot register filesystem: %d\n", ret);
++		goto exit_dir;
++	}
++
++	pr_info("initialized\n");
++	return 0;
++
++exit_dir:
++	kobject_put(kdbus_dir);
++	return ret;
++}
++
++static void __exit kdbus_exit(void)
++{
++	kdbus_fs_exit();
++	kobject_put(kdbus_dir);
++}
++
++module_init(kdbus_init);
++module_exit(kdbus_exit);
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("D-Bus, powerful, easy to use interprocess communication");
++MODULE_ALIAS_FS(KBUILD_MODNAME "fs");
+diff --git a/ipc/kdbus/match.c b/ipc/kdbus/match.c
+new file mode 100644
+index 0000000..30cec1c
+--- /dev/null
++++ b/ipc/kdbus/match.c
+@@ -0,0 +1,559 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/fs.h>
++#include <linux/hash.h>
++#include <linux/init.h>
++#include <linux/mutex.h>
++#include <linux/sched.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++
++#include "bus.h"
++#include "connection.h"
++#include "endpoint.h"
++#include "handle.h"
++#include "item.h"
++#include "match.h"
++#include "message.h"
++#include "names.h"
++
++/**
++ * struct kdbus_match_db - message filters
++ * @entries_list:	List of matches
++ * @mdb_rwlock:		Match data lock
++ * @entries_count:	Number of entries in database
++ */
++struct kdbus_match_db {
++	struct list_head entries_list;
++	struct rw_semaphore mdb_rwlock;
++	unsigned int entries_count;
++};
++
++/**
++ * struct kdbus_match_entry - a match database entry
++ * @cookie:		User-supplied cookie to lookup the entry
++ * @list_entry:		The list entry element for the db list
++ * @rules_list:		The list head for tracking rules of this entry
++ */
++struct kdbus_match_entry {
++	u64 cookie;
++	struct list_head list_entry;
++	struct list_head rules_list;
++};
++
++/**
++ * struct kdbus_bloom_mask - mask to match against filter
++ * @generations:	Number of generations carried
++ * @data:		Array of bloom bit fields
++ */
++struct kdbus_bloom_mask {
++	u64 generations;
++	u64 *data;
++};
++
++/**
++ * struct kdbus_match_rule - a rule appended to a match entry
++ * @type:		An item type to match agains
++ * @bloom_mask:		Bloom mask to match a message's filter against, used
++ *			with KDBUS_ITEM_BLOOM_MASK
++ * @name:		Name to match against, used with KDBUS_ITEM_NAME,
++ *			KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE}
++ * @old_id:		ID to match against, used with
++ *			KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE},
++ *			KDBUS_ITEM_ID_REMOVE
++ * @new_id:		ID to match against, used with
++ *			KDBUS_ITEM_NAME_{ADD,REMOVE,CHANGE},
++ *			KDBUS_ITEM_ID_REMOVE
++ * @src_id:		ID to match against, used with KDBUS_ITEM_ID
++ * @rules_entry:	Entry in the entry's rules list
++ */
++struct kdbus_match_rule {
++	u64 type;
++	union {
++		struct kdbus_bloom_mask bloom_mask;
++		struct {
++			char *name;
++			u64 old_id;
++			u64 new_id;
++		};
++		u64 src_id;
++	};
++	struct list_head rules_entry;
++};
++
++static void kdbus_match_rule_free(struct kdbus_match_rule *rule)
++{
++	if (!rule)
++		return;
++
++	switch (rule->type) {
++	case KDBUS_ITEM_BLOOM_MASK:
++		kfree(rule->bloom_mask.data);
++		break;
++
++	case KDBUS_ITEM_NAME:
++	case KDBUS_ITEM_NAME_ADD:
++	case KDBUS_ITEM_NAME_REMOVE:
++	case KDBUS_ITEM_NAME_CHANGE:
++		kfree(rule->name);
++		break;
++
++	case KDBUS_ITEM_ID:
++	case KDBUS_ITEM_ID_ADD:
++	case KDBUS_ITEM_ID_REMOVE:
++		break;
++
++	default:
++		BUG();
++	}
++
++	list_del(&rule->rules_entry);
++	kfree(rule);
++}
++
++static void kdbus_match_entry_free(struct kdbus_match_entry *entry)
++{
++	struct kdbus_match_rule *r, *tmp;
++
++	if (!entry)
++		return;
++
++	list_for_each_entry_safe(r, tmp, &entry->rules_list, rules_entry)
++		kdbus_match_rule_free(r);
++
++	list_del(&entry->list_entry);
++	kfree(entry);
++}
++
++/**
++ * kdbus_match_db_free() - free match db resources
++ * @mdb:		The match database
++ */
++void kdbus_match_db_free(struct kdbus_match_db *mdb)
++{
++	struct kdbus_match_entry *entry, *tmp;
++
++	if (!mdb)
++		return;
++
++	list_for_each_entry_safe(entry, tmp, &mdb->entries_list, list_entry)
++		kdbus_match_entry_free(entry);
++
++	kfree(mdb);
++}
++
++/**
++ * kdbus_match_db_new() - create a new match database
++ *
++ * Return: a new kdbus_match_db on success, ERR_PTR on failure.
++ */
++struct kdbus_match_db *kdbus_match_db_new(void)
++{
++	struct kdbus_match_db *d;
++
++	d = kzalloc(sizeof(*d), GFP_KERNEL);
++	if (!d)
++		return ERR_PTR(-ENOMEM);
++
++	init_rwsem(&d->mdb_rwlock);
++	INIT_LIST_HEAD(&d->entries_list);
++
++	return d;
++}
++
++static bool kdbus_match_bloom(const struct kdbus_bloom_filter *filter,
++			      const struct kdbus_bloom_mask *mask,
++			      const struct kdbus_conn *conn)
++{
++	size_t n = conn->ep->bus->bloom.size / sizeof(u64);
++	const u64 *m;
++	size_t i;
++
++	/*
++	 * The message's filter carries a generation identifier, the
++	 * match's mask possibly carries an array of multiple generations
++	 * of the mask. Select the mask with the closest match of the
++	 * filter's generation.
++	 */
++	m = mask->data + (min(filter->generation, mask->generations - 1) * n);
++
++	/*
++	 * The message's filter contains the messages properties,
++	 * the match's mask contains the properties to look for in the
++	 * message. Check the mask bit field against the filter bit field,
++	 * if the message possibly carries the properties the connection
++	 * has subscribed to.
++	 */
++	for (i = 0; i < n; i++)
++		if ((filter->data[i] & m[i]) != m[i])
++			return false;
++
++	return true;
++}
++
++static bool kdbus_match_rules(const struct kdbus_match_entry *entry,
++			      struct kdbus_conn *conn_src,
++			      struct kdbus_kmsg *kmsg)
++{
++	struct kdbus_match_rule *r;
++
++	if (conn_src)
++		lockdep_assert_held(&conn_src->ep->bus->name_registry->rwlock);
++
++	/*
++	 * Walk all the rules and bail out immediately
++	 * if any of them is unsatisfied.
++	 */
++
++	list_for_each_entry(r, &entry->rules_list, rules_entry) {
++		if (conn_src) {
++			/* messages from userspace */
++
++			switch (r->type) {
++			case KDBUS_ITEM_BLOOM_MASK:
++				if (!kdbus_match_bloom(kmsg->bloom_filter,
++						       &r->bloom_mask,
++						       conn_src))
++					return false;
++				break;
++
++			case KDBUS_ITEM_ID:
++				if (r->src_id != conn_src->id &&
++				    r->src_id != KDBUS_MATCH_ID_ANY)
++					return false;
++
++				break;
++
++			case KDBUS_ITEM_NAME:
++				if (!kdbus_conn_has_name(conn_src, r->name))
++					return false;
++
++				break;
++
++			default:
++				return false;
++			}
++		} else {
++			/* kernel notifications */
++
++			if (kmsg->notify_type != r->type)
++				return false;
++
++			switch (r->type) {
++			case KDBUS_ITEM_ID_ADD:
++				if (r->new_id != KDBUS_MATCH_ID_ANY &&
++				    r->new_id != kmsg->notify_new_id)
++					return false;
++
++				break;
++
++			case KDBUS_ITEM_ID_REMOVE:
++				if (r->old_id != KDBUS_MATCH_ID_ANY &&
++				    r->old_id != kmsg->notify_old_id)
++					return false;
++
++				break;
++
++			case KDBUS_ITEM_NAME_ADD:
++			case KDBUS_ITEM_NAME_CHANGE:
++			case KDBUS_ITEM_NAME_REMOVE:
++				if ((r->old_id != KDBUS_MATCH_ID_ANY &&
++				     r->old_id != kmsg->notify_old_id) ||
++				    (r->new_id != KDBUS_MATCH_ID_ANY &&
++				     r->new_id != kmsg->notify_new_id) ||
++				    (r->name && kmsg->notify_name &&
++				     strcmp(r->name, kmsg->notify_name) != 0))
++					return false;
++
++				break;
++
++			default:
++				return false;
++			}
++		}
++	}
++
++	return true;
++}
++
++/**
++ * kdbus_match_db_match_kmsg() - match a kmsg object agains the database entries
++ * @mdb:		The match database
++ * @conn_src:		The connection object originating the message
++ * @kmsg:		The kmsg to perform the match on
++ *
++ * This function will walk through all the database entries previously uploaded
++ * with kdbus_match_db_add(). As soon as any of them has an all-satisfied rule
++ * set, this function will return true.
++ *
++ * The caller must hold the registry lock of conn_src->ep->bus, in case conn_src
++ * is non-NULL.
++ *
++ * Return: true if there was a matching database entry, false otherwise.
++ */
++bool kdbus_match_db_match_kmsg(struct kdbus_match_db *mdb,
++			       struct kdbus_conn *conn_src,
++			       struct kdbus_kmsg *kmsg)
++{
++	struct kdbus_match_entry *entry;
++	bool matched = false;
++
++	down_read(&mdb->mdb_rwlock);
++	list_for_each_entry(entry, &mdb->entries_list, list_entry) {
++		matched = kdbus_match_rules(entry, conn_src, kmsg);
++		if (matched)
++			break;
++	}
++	up_read(&mdb->mdb_rwlock);
++
++	return matched;
++}
++
++static int kdbus_match_db_remove_unlocked(struct kdbus_match_db *mdb,
++					  u64 cookie)
++{
++	struct kdbus_match_entry *entry, *tmp;
++	bool found = false;
++
++	list_for_each_entry_safe(entry, tmp, &mdb->entries_list, list_entry)
++		if (entry->cookie == cookie) {
++			kdbus_match_entry_free(entry);
++			--mdb->entries_count;
++			found = true;
++		}
++
++	return found ? 0 : -EBADSLT;
++}
++
++/**
++ * kdbus_cmd_match_add() - handle KDBUS_CMD_MATCH_ADD
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * One call to this function (or one ioctl(KDBUS_CMD_MATCH_ADD), respectively,
++ * adds one new database entry with n rules attached to it. Each rule is
++ * described with an kdbus_item, and an entry is considered matching if all
++ * its rules are satisfied.
++ *
++ * The items attached to a kdbus_cmd_match struct have the following mapping:
++ *
++ * KDBUS_ITEM_BLOOM_MASK:	A bloom mask
++ * KDBUS_ITEM_NAME:		A connection's source name
++ * KDBUS_ITEM_ID:		A connection ID
++ * KDBUS_ITEM_NAME_ADD:
++ * KDBUS_ITEM_NAME_REMOVE:
++ * KDBUS_ITEM_NAME_CHANGE:	Well-known name changes, carry
++ *				kdbus_notify_name_change
++ * KDBUS_ITEM_ID_ADD:
++ * KDBUS_ITEM_ID_REMOVE:	Connection ID changes, carry
++ *				kdbus_notify_id_change
++ *
++ * For kdbus_notify_{id,name}_change structs, only the ID and name fields
++ * are looked at when adding an entry. The flags are unused.
++ *
++ * Also note that KDBUS_ITEM_BLOOM_MASK, KDBUS_ITEM_NAME and KDBUS_ITEM_ID
++ * are used to match messages from userspace, while the others apply to
++ * kernel-generated notifications.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_match_add(struct kdbus_conn *conn, void __user *argp)
++{
++	struct kdbus_match_db *mdb = conn->match_db;
++	struct kdbus_match_entry *entry = NULL;
++	struct kdbus_cmd_match *cmd;
++	struct kdbus_item *item;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_BLOOM_MASK, .multiple = true },
++		{ .type = KDBUS_ITEM_NAME, .multiple = true },
++		{ .type = KDBUS_ITEM_ID, .multiple = true },
++		{ .type = KDBUS_ITEM_NAME_ADD, .multiple = true },
++		{ .type = KDBUS_ITEM_NAME_REMOVE, .multiple = true },
++		{ .type = KDBUS_ITEM_NAME_CHANGE, .multiple = true },
++		{ .type = KDBUS_ITEM_ID_ADD, .multiple = true },
++		{ .type = KDBUS_ITEM_ID_REMOVE, .multiple = true },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
++				 KDBUS_MATCH_REPLACE,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	if (!kdbus_conn_is_ordinary(conn))
++		return -EOPNOTSUPP;
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
++	if (!entry) {
++		ret = -ENOMEM;
++		goto exit;
++	}
++
++	entry->cookie = cmd->cookie;
++	INIT_LIST_HEAD(&entry->list_entry);
++	INIT_LIST_HEAD(&entry->rules_list);
++
++	KDBUS_ITEMS_FOREACH(item, cmd->items, KDBUS_ITEMS_SIZE(cmd, items)) {
++		struct kdbus_match_rule *rule;
++		size_t size = item->size - offsetof(struct kdbus_item, data);
++
++		rule = kzalloc(sizeof(*rule), GFP_KERNEL);
++		if (!rule) {
++			ret = -ENOMEM;
++			goto exit;
++		}
++
++		rule->type = item->type;
++		INIT_LIST_HEAD(&rule->rules_entry);
++
++		switch (item->type) {
++		case KDBUS_ITEM_BLOOM_MASK: {
++			u64 bsize = conn->ep->bus->bloom.size;
++			u64 generations;
++			u64 remainder;
++
++			generations = div64_u64_rem(size, bsize, &remainder);
++			if (size < bsize || remainder > 0) {
++				ret = -EDOM;
++				break;
++			}
++
++			rule->bloom_mask.data = kmemdup(item->data,
++							size, GFP_KERNEL);
++			if (!rule->bloom_mask.data) {
++				ret = -ENOMEM;
++				break;
++			}
++
++			rule->bloom_mask.generations = generations;
++			break;
++		}
++
++		case KDBUS_ITEM_NAME:
++			if (!kdbus_name_is_valid(item->str, false)) {
++				ret = -EINVAL;
++				break;
++			}
++
++			rule->name = kstrdup(item->str, GFP_KERNEL);
++			if (!rule->name)
++				ret = -ENOMEM;
++
++			break;
++
++		case KDBUS_ITEM_ID:
++			rule->src_id = item->id;
++			break;
++
++		case KDBUS_ITEM_NAME_ADD:
++		case KDBUS_ITEM_NAME_REMOVE:
++		case KDBUS_ITEM_NAME_CHANGE:
++			rule->old_id = item->name_change.old_id.id;
++			rule->new_id = item->name_change.new_id.id;
++
++			if (size > sizeof(struct kdbus_notify_name_change)) {
++				rule->name = kstrdup(item->name_change.name,
++						     GFP_KERNEL);
++				if (!rule->name)
++					ret = -ENOMEM;
++			}
++
++			break;
++
++		case KDBUS_ITEM_ID_ADD:
++		case KDBUS_ITEM_ID_REMOVE:
++			if (item->type == KDBUS_ITEM_ID_ADD)
++				rule->new_id = item->id_change.id;
++			else
++				rule->old_id = item->id_change.id;
++
++			break;
++		}
++
++		if (ret < 0) {
++			kdbus_match_rule_free(rule);
++			goto exit;
++		}
++
++		list_add_tail(&rule->rules_entry, &entry->rules_list);
++	}
++
++	down_write(&mdb->mdb_rwlock);
++
++	/* Remove any entry that has the same cookie as the current one. */
++	if (cmd->flags & KDBUS_MATCH_REPLACE)
++		kdbus_match_db_remove_unlocked(mdb, entry->cookie);
++
++	/*
++	 * If the above removal caught any entry, there will be room for the
++	 * new one.
++	 */
++	if (++mdb->entries_count > KDBUS_MATCH_MAX) {
++		--mdb->entries_count;
++		ret = -EMFILE;
++	} else {
++		list_add_tail(&entry->list_entry, &mdb->entries_list);
++		entry = NULL;
++	}
++
++	up_write(&mdb->mdb_rwlock);
++
++exit:
++	kdbus_match_entry_free(entry);
++	return kdbus_args_clear(&args, ret);
++}
++
++/**
++ * kdbus_cmd_match_remove() - handle KDBUS_CMD_MATCH_REMOVE
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_match_remove(struct kdbus_conn *conn, void __user *argp)
++{
++	struct kdbus_cmd_match *cmd;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	if (!kdbus_conn_is_ordinary(conn))
++		return -EOPNOTSUPP;
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	down_write(&conn->match_db->mdb_rwlock);
++	ret = kdbus_match_db_remove_unlocked(conn->match_db, cmd->cookie);
++	up_write(&conn->match_db->mdb_rwlock);
++
++	return kdbus_args_clear(&args, ret);
++}
+diff --git a/ipc/kdbus/match.h b/ipc/kdbus/match.h
+new file mode 100644
+index 0000000..ea42929
+--- /dev/null
++++ b/ipc/kdbus/match.h
+@@ -0,0 +1,35 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_MATCH_H
++#define __KDBUS_MATCH_H
++
++struct kdbus_conn;
++struct kdbus_kmsg;
++struct kdbus_match_db;
++
++struct kdbus_match_db *kdbus_match_db_new(void);
++void kdbus_match_db_free(struct kdbus_match_db *db);
++int kdbus_match_db_add(struct kdbus_conn *conn,
++		       struct kdbus_cmd_match *cmd);
++int kdbus_match_db_remove(struct kdbus_conn *conn,
++			  struct kdbus_cmd_match *cmd);
++bool kdbus_match_db_match_kmsg(struct kdbus_match_db *db,
++			       struct kdbus_conn *conn_src,
++			       struct kdbus_kmsg *kmsg);
++
++int kdbus_cmd_match_add(struct kdbus_conn *conn, void __user *argp);
++int kdbus_cmd_match_remove(struct kdbus_conn *conn, void __user *argp);
++
++#endif
+diff --git a/ipc/kdbus/message.c b/ipc/kdbus/message.c
+new file mode 100644
+index 0000000..8096075
+--- /dev/null
++++ b/ipc/kdbus/message.c
+@@ -0,0 +1,616 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/capability.h>
++#include <linux/cgroup.h>
++#include <linux/cred.h>
++#include <linux/file.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/sched.h>
++#include <linux/shmem_fs.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <net/sock.h>
++
++#include "bus.h"
++#include "connection.h"
++#include "domain.h"
++#include "endpoint.h"
++#include "handle.h"
++#include "item.h"
++#include "match.h"
++#include "message.h"
++#include "names.h"
++#include "policy.h"
++
++#define KDBUS_KMSG_HEADER_SIZE offsetof(struct kdbus_kmsg, msg)
++
++static struct kdbus_msg_resources *kdbus_msg_resources_new(void)
++{
++	struct kdbus_msg_resources *r;
++
++	r = kzalloc(sizeof(*r), GFP_KERNEL);
++	if (!r)
++		return ERR_PTR(-ENOMEM);
++
++	kref_init(&r->kref);
++
++	return r;
++}
++
++static void __kdbus_msg_resources_free(struct kref *kref)
++{
++	struct kdbus_msg_resources *r =
++		container_of(kref, struct kdbus_msg_resources, kref);
++	size_t i;
++
++	for (i = 0; i < r->data_count; ++i) {
++		switch (r->data[i].type) {
++		case KDBUS_MSG_DATA_VEC:
++			/* nothing to do */
++			break;
++		case KDBUS_MSG_DATA_MEMFD:
++			if (r->data[i].memfd.file)
++				fput(r->data[i].memfd.file);
++			break;
++		}
++	}
++
++	for (i = 0; i < r->fds_count; i++)
++		if (r->fds[i])
++			fput(r->fds[i]);
++
++	kfree(r->dst_name);
++	kfree(r->data);
++	kfree(r->fds);
++	kfree(r);
++}
++
++/**
++ * kdbus_msg_resources_ref() - Acquire reference to msg resources
++ * @r:		resources to acquire ref to
++ *
++ * Return: The acquired resource
++ */
++struct kdbus_msg_resources *
++kdbus_msg_resources_ref(struct kdbus_msg_resources *r)
++{
++	if (r)
++		kref_get(&r->kref);
++	return r;
++}
++
++/**
++ * kdbus_msg_resources_unref() - Drop reference to msg resources
++ * @r:		resources to drop reference of
++ *
++ * Return: NULL
++ */
++struct kdbus_msg_resources *
++kdbus_msg_resources_unref(struct kdbus_msg_resources *r)
++{
++	if (r)
++		kref_put(&r->kref, __kdbus_msg_resources_free);
++	return NULL;
++}
++
++/**
++ * kdbus_kmsg_free() - free allocated message
++ * @kmsg:		Message
++ */
++void kdbus_kmsg_free(struct kdbus_kmsg *kmsg)
++{
++	if (!kmsg)
++		return;
++
++	kdbus_msg_resources_unref(kmsg->res);
++	kdbus_meta_conn_unref(kmsg->conn_meta);
++	kdbus_meta_proc_unref(kmsg->proc_meta);
++	kfree(kmsg->iov);
++	kfree(kmsg);
++}
++
++/**
++ * kdbus_kmsg_new() - allocate message
++ * @bus:		Bus this message is allocated on
++ * @extra_size:		Additional size to reserve for data
++ *
++ * Return: new kdbus_kmsg on success, ERR_PTR on failure.
++ */
++struct kdbus_kmsg *kdbus_kmsg_new(struct kdbus_bus *bus, size_t extra_size)
++{
++	struct kdbus_kmsg *m;
++	size_t size;
++	int ret;
++
++	size = sizeof(struct kdbus_kmsg) + KDBUS_ITEM_SIZE(extra_size);
++	m = kzalloc(size, GFP_KERNEL);
++	if (!m)
++		return ERR_PTR(-ENOMEM);
++
++	m->seq = atomic64_inc_return(&bus->domain->last_id);
++	m->msg.size = size - KDBUS_KMSG_HEADER_SIZE;
++	m->msg.items[0].size = KDBUS_ITEM_SIZE(extra_size);
++
++	m->proc_meta = kdbus_meta_proc_new();
++	if (IS_ERR(m->proc_meta)) {
++		ret = PTR_ERR(m->proc_meta);
++		m->proc_meta = NULL;
++		goto exit;
++	}
++
++	m->conn_meta = kdbus_meta_conn_new();
++	if (IS_ERR(m->conn_meta)) {
++		ret = PTR_ERR(m->conn_meta);
++		m->conn_meta = NULL;
++		goto exit;
++	}
++
++	return m;
++
++exit:
++	kdbus_kmsg_free(m);
++	return ERR_PTR(ret);
++}
++
++static int kdbus_handle_check_file(struct file *file)
++{
++	struct inode *inode = file_inode(file);
++	struct socket *sock;
++
++	/*
++	 * Don't allow file descriptors in the transport that themselves allow
++	 * file descriptor queueing. This will eventually be allowed once both
++	 * unix domain sockets and kdbus share a generic garbage collector.
++	 */
++
++	if (file->f_op == &kdbus_handle_ops)
++		return -EOPNOTSUPP;
++
++	if (!S_ISSOCK(inode->i_mode))
++		return 0;
++
++	if (file->f_mode & FMODE_PATH)
++		return 0;
++
++	sock = SOCKET_I(inode);
++	if (sock->sk && sock->ops && sock->ops->family == PF_UNIX)
++		return -EOPNOTSUPP;
++
++	return 0;
++}
++
++static const char * const zeros = "\0\0\0\0\0\0\0";
++
++/*
++ * kdbus_msg_scan_items() - validate incoming data and prepare parsing
++ * @kmsg:		Message
++ * @bus:		Bus the message is sent over
++ *
++ * Return: 0 on success, negative errno on failure.
++ *
++ * Files references in MEMFD or FDS items are pinned.
++ *
++ * On errors, the caller should drop any taken reference with
++ * kdbus_kmsg_free()
++ */
++static int kdbus_msg_scan_items(struct kdbus_kmsg *kmsg,
++				struct kdbus_bus *bus)
++{
++	struct kdbus_msg_resources *res = kmsg->res;
++	const struct kdbus_msg *msg = &kmsg->msg;
++	const struct kdbus_item *item;
++	size_t n, n_vecs, n_memfds;
++	bool has_bloom = false;
++	bool has_name = false;
++	bool has_fds = false;
++	bool is_broadcast;
++	bool is_signal;
++	u64 vec_size;
++
++	is_broadcast = (msg->dst_id == KDBUS_DST_ID_BROADCAST);
++	is_signal = !!(msg->flags & KDBUS_MSG_SIGNAL);
++
++	/* count data payloads */
++	n_vecs = 0;
++	n_memfds = 0;
++	KDBUS_ITEMS_FOREACH(item, msg->items, KDBUS_ITEMS_SIZE(msg, items)) {
++		switch (item->type) {
++		case KDBUS_ITEM_PAYLOAD_VEC:
++			++n_vecs;
++			break;
++		case KDBUS_ITEM_PAYLOAD_MEMFD:
++			++n_memfds;
++			if (item->memfd.size % 8)
++				++n_vecs;
++			break;
++		default:
++			break;
++		}
++	}
++
++	n = n_vecs + n_memfds;
++	if (n > 0) {
++		res->data = kcalloc(n, sizeof(*res->data), GFP_KERNEL);
++		if (!res->data)
++			return -ENOMEM;
++	}
++
++	if (n_vecs > 0) {
++		kmsg->iov = kcalloc(n_vecs, sizeof(*kmsg->iov), GFP_KERNEL);
++		if (!kmsg->iov)
++			return -ENOMEM;
++	}
++
++	/* import data payloads */
++	n = 0;
++	vec_size = 0;
++	KDBUS_ITEMS_FOREACH(item, msg->items, KDBUS_ITEMS_SIZE(msg, items)) {
++		size_t payload_size = KDBUS_ITEM_PAYLOAD_SIZE(item);
++		struct iovec *iov = kmsg->iov + kmsg->iov_count;
++
++		if (++n > KDBUS_MSG_MAX_ITEMS)
++			return -E2BIG;
++
++		switch (item->type) {
++		case KDBUS_ITEM_PAYLOAD_VEC: {
++			struct kdbus_msg_data *d = res->data + res->data_count;
++			void __force __user *ptr = KDBUS_PTR(item->vec.address);
++			size_t size = item->vec.size;
++
++			if (vec_size + size < vec_size)
++				return -EMSGSIZE;
++			if (vec_size + size > KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE)
++				return -EMSGSIZE;
++
++			d->type = KDBUS_MSG_DATA_VEC;
++			d->size = size;
++
++			if (ptr) {
++				if (unlikely(!access_ok(VERIFY_READ, ptr,
++							size)))
++					return -EFAULT;
++
++				d->vec.off = kmsg->pool_size;
++				iov->iov_base = ptr;
++				iov->iov_len = size;
++			} else {
++				d->vec.off = ~0ULL;
++				iov->iov_base = (char __user *)zeros;
++				iov->iov_len = size % 8;
++			}
++
++			if (kmsg->pool_size + iov->iov_len < kmsg->pool_size)
++				return -EMSGSIZE;
++
++			kmsg->pool_size += iov->iov_len;
++			++kmsg->iov_count;
++			++res->vec_count;
++			++res->data_count;
++			vec_size += size;
++
++			break;
++		}
++
++		case KDBUS_ITEM_PAYLOAD_MEMFD: {
++			struct kdbus_msg_data *d = res->data + res->data_count;
++			u64 start = item->memfd.start;
++			u64 size = item->memfd.size;
++			size_t pad = size % 8;
++			int seals, mask;
++			struct file *f;
++
++			if (kmsg->pool_size + size % 8 < kmsg->pool_size)
++				return -EMSGSIZE;
++			if (start + size < start)
++				return -EMSGSIZE;
++
++			if (item->memfd.fd < 0)
++				return -EBADF;
++
++			if (res->memfd_count >= KDBUS_MSG_MAX_MEMFD_ITEMS)
++				return -E2BIG;
++
++			f = fget(item->memfd.fd);
++			if (!f)
++				return -EBADF;
++
++			if (pad) {
++				iov->iov_base = (char __user *)zeros;
++				iov->iov_len = pad;
++
++				kmsg->pool_size += pad;
++				++kmsg->iov_count;
++			}
++
++			++res->data_count;
++			++res->memfd_count;
++
++			d->type = KDBUS_MSG_DATA_MEMFD;
++			d->size = size;
++			d->memfd.start = start;
++			d->memfd.file = f;
++
++			/*
++			 * We only accept a sealed memfd file whose content
++			 * cannot be altered by the sender or anybody else
++			 * while it is shared or in-flight. Other files need
++			 * to be passed with KDBUS_MSG_FDS.
++			 */
++			seals = shmem_get_seals(f);
++			if (seals < 0)
++				return -EMEDIUMTYPE;
++
++			mask = F_SEAL_SHRINK | F_SEAL_GROW |
++				F_SEAL_WRITE | F_SEAL_SEAL;
++			if ((seals & mask) != mask)
++				return -ETXTBSY;
++
++			if (start + size > (u64)i_size_read(file_inode(f)))
++				return -EBADF;
++
++			break;
++		}
++
++		case KDBUS_ITEM_FDS: {
++			unsigned int i;
++			unsigned int fds_count = payload_size / sizeof(int);
++
++			/* do not allow multiple fd arrays */
++			if (has_fds)
++				return -EEXIST;
++			has_fds = true;
++
++			/* Do not allow to broadcast file descriptors */
++			if (is_broadcast)
++				return -ENOTUNIQ;
++
++			if (fds_count > KDBUS_CONN_MAX_FDS_PER_USER)
++				return -EMFILE;
++
++			res->fds = kcalloc(fds_count, sizeof(struct file *),
++					   GFP_KERNEL);
++			if (!res->fds)
++				return -ENOMEM;
++
++			for (i = 0; i < fds_count; i++) {
++				int fd = item->fds[i];
++				int ret;
++
++				/*
++				 * Verify the fd and increment the usage count.
++				 * Use fget_raw() to allow passing O_PATH fds.
++				 */
++				if (fd < 0)
++					return -EBADF;
++
++				res->fds[i] = fget_raw(fd);
++				if (!res->fds[i])
++					return -EBADF;
++
++				res->fds_count++;
++
++				ret = kdbus_handle_check_file(res->fds[i]);
++				if (ret < 0)
++					return ret;
++			}
++
++			break;
++		}
++
++		case KDBUS_ITEM_BLOOM_FILTER: {
++			u64 bloom_size;
++
++			/* do not allow multiple bloom filters */
++			if (has_bloom)
++				return -EEXIST;
++			has_bloom = true;
++
++			bloom_size = payload_size -
++				     offsetof(struct kdbus_bloom_filter, data);
++
++			/*
++			* Allow only bloom filter sizes of a multiple of 64bit.
++			*/
++			if (!KDBUS_IS_ALIGNED8(bloom_size))
++				return -EFAULT;
++
++			/* do not allow mismatching bloom filter sizes */
++			if (bloom_size != bus->bloom.size)
++				return -EDOM;
++
++			kmsg->bloom_filter = &item->bloom_filter;
++			break;
++		}
++
++		case KDBUS_ITEM_DST_NAME:
++			/* do not allow multiple names */
++			if (has_name)
++				return -EEXIST;
++			has_name = true;
++
++			if (!kdbus_name_is_valid(item->str, false))
++				return -EINVAL;
++
++			res->dst_name = kstrdup(item->str, GFP_KERNEL);
++			if (!res->dst_name)
++				return -ENOMEM;
++			break;
++
++		default:
++			return -EINVAL;
++		}
++	}
++
++	/* name is needed if no ID is given */
++	if (msg->dst_id == KDBUS_DST_ID_NAME && !has_name)
++		return -EDESTADDRREQ;
++
++	if (is_broadcast) {
++		/* Broadcasts can't take names */
++		if (has_name)
++			return -EBADMSG;
++
++		/* All broadcasts have to be signals */
++		if (!is_signal)
++			return -EBADMSG;
++
++		/* Timeouts are not allowed for broadcasts */
++		if (msg->timeout_ns > 0)
++			return -ENOTUNIQ;
++	}
++
++	/*
++	 * Signal messages require a bloom filter, and bloom filters are
++	 * only valid with signals.
++	 */
++	if (is_signal ^ has_bloom)
++		return -EBADMSG;
++
++	return 0;
++}
++
++/**
++ * kdbus_kmsg_new_from_cmd() - create kernel message from send payload
++ * @conn:		Connection
++ * @cmd_send:		Payload of KDBUS_CMD_SEND
++ *
++ * Return: a new kdbus_kmsg on success, ERR_PTR on failure.
++ */
++struct kdbus_kmsg *kdbus_kmsg_new_from_cmd(struct kdbus_conn *conn,
++					   struct kdbus_cmd_send *cmd_send)
++{
++	struct kdbus_kmsg *m;
++	u64 size;
++	int ret;
++
++	ret = kdbus_copy_from_user(&size, KDBUS_PTR(cmd_send->msg_address),
++				   sizeof(size));
++	if (ret < 0)
++		return ERR_PTR(ret);
++
++	if (size < sizeof(struct kdbus_msg) || size > KDBUS_MSG_MAX_SIZE)
++		return ERR_PTR(-EINVAL);
++
++	m = kmalloc(size + KDBUS_KMSG_HEADER_SIZE, GFP_KERNEL);
++	if (!m)
++		return ERR_PTR(-ENOMEM);
++
++	memset(m, 0, KDBUS_KMSG_HEADER_SIZE);
++	m->seq = atomic64_inc_return(&conn->ep->bus->domain->last_id);
++
++	m->proc_meta = kdbus_meta_proc_new();
++	if (IS_ERR(m->proc_meta)) {
++		ret = PTR_ERR(m->proc_meta);
++		m->proc_meta = NULL;
++		goto exit_free;
++	}
++
++	m->conn_meta = kdbus_meta_conn_new();
++	if (IS_ERR(m->conn_meta)) {
++		ret = PTR_ERR(m->conn_meta);
++		m->conn_meta = NULL;
++		goto exit_free;
++	}
++
++	if (copy_from_user(&m->msg, KDBUS_PTR(cmd_send->msg_address), size)) {
++		ret = -EFAULT;
++		goto exit_free;
++	}
++
++	if (m->msg.size != size) {
++		ret = -EINVAL;
++		goto exit_free;
++	}
++
++	if (m->msg.flags & ~(KDBUS_MSG_EXPECT_REPLY |
++			     KDBUS_MSG_NO_AUTO_START |
++			     KDBUS_MSG_SIGNAL)) {
++		ret = -EINVAL;
++		goto exit_free;
++	}
++
++	ret = kdbus_items_validate(m->msg.items,
++				   KDBUS_ITEMS_SIZE(&m->msg, items));
++	if (ret < 0)
++		goto exit_free;
++
++	m->res = kdbus_msg_resources_new();
++	if (IS_ERR(m->res)) {
++		ret = PTR_ERR(m->res);
++		m->res = NULL;
++		goto exit_free;
++	}
++
++	/* do not accept kernel-generated messages */
++	if (m->msg.payload_type == KDBUS_PAYLOAD_KERNEL) {
++		ret = -EINVAL;
++		goto exit_free;
++	}
++
++	if (m->msg.flags & KDBUS_MSG_EXPECT_REPLY) {
++		/* requests for replies need timeout and cookie */
++		if (m->msg.timeout_ns == 0 || m->msg.cookie == 0) {
++			ret = -EINVAL;
++			goto exit_free;
++		}
++
++		/* replies may not be expected for broadcasts */
++		if (m->msg.dst_id == KDBUS_DST_ID_BROADCAST) {
++			ret = -ENOTUNIQ;
++			goto exit_free;
++		}
++
++		/* replies may not be expected for signals */
++		if (m->msg.flags & KDBUS_MSG_SIGNAL) {
++			ret = -EINVAL;
++			goto exit_free;
++		}
++	} else {
++		/*
++		 * KDBUS_SEND_SYNC_REPLY is only valid together with
++		 * KDBUS_MSG_EXPECT_REPLY
++		 */
++		if (cmd_send->flags & KDBUS_SEND_SYNC_REPLY) {
++			ret = -EINVAL;
++			goto exit_free;
++		}
++
++		/* replies cannot be signals */
++		if (m->msg.cookie_reply && (m->msg.flags & KDBUS_MSG_SIGNAL)) {
++			ret = -EINVAL;
++			goto exit_free;
++		}
++	}
++
++	ret = kdbus_msg_scan_items(m, conn->ep->bus);
++	if (ret < 0)
++		goto exit_free;
++
++	/* patch-in the source of this message */
++	if (m->msg.src_id > 0 && m->msg.src_id != conn->id) {
++		ret = -EINVAL;
++		goto exit_free;
++	}
++	m->msg.src_id = conn->id;
++
++	return m;
++
++exit_free:
++	kdbus_kmsg_free(m);
++	return ERR_PTR(ret);
++}
+diff --git a/ipc/kdbus/message.h b/ipc/kdbus/message.h
+new file mode 100644
+index 0000000..af47758
+--- /dev/null
++++ b/ipc/kdbus/message.h
+@@ -0,0 +1,133 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_MESSAGE_H
++#define __KDBUS_MESSAGE_H
++
++#include "util.h"
++#include "metadata.h"
++
++/**
++ * enum kdbus_msg_data_type - Type of kdbus_msg_data payloads
++ * @KDBUS_MSG_DATA_VEC:		Data vector provided by user-space
++ * @KDBUS_MSG_DATA_MEMFD:	Memfd payload
++ */
++enum kdbus_msg_data_type {
++	KDBUS_MSG_DATA_VEC,
++	KDBUS_MSG_DATA_MEMFD,
++};
++
++/**
++ * struct kdbus_msg_data - Data payload as stored by messages
++ * @type:	Type of payload (KDBUS_MSG_DATA_*)
++ * @size:	Size of the described payload
++ * @off:	The offset, relative to the vec slice
++ * @start:	Offset inside the memfd
++ * @file:	Backing file referenced by the memfd
++ */
++struct kdbus_msg_data {
++	unsigned int type;
++	u64 size;
++
++	union {
++		struct {
++			u64 off;
++		} vec;
++		struct {
++			u64 start;
++			struct file *file;
++		} memfd;
++	};
++};
++
++/**
++ * struct kdbus_kmsg_resources - resources of a message
++ * @kref:		Reference counter
++ * @dst_name:		Short-cut to msg for faster lookup
++ * @fds:		Array of file descriptors to pass
++ * @fds_count:		Number of file descriptors to pass
++ * @data:		Array of data payloads
++ * @vec_count:		Number of VEC entries
++ * @memfd_count:	Number of MEMFD entries in @data
++ * @data_count:		Sum of @vec_count + @memfd_count
++ */
++struct kdbus_msg_resources {
++	struct kref kref;
++	const char *dst_name;
++
++	struct file **fds;
++	unsigned int fds_count;
++
++	struct kdbus_msg_data *data;
++	size_t vec_count;
++	size_t memfd_count;
++	size_t data_count;
++};
++
++struct kdbus_msg_resources *
++kdbus_msg_resources_ref(struct kdbus_msg_resources *r);
++struct kdbus_msg_resources *
++kdbus_msg_resources_unref(struct kdbus_msg_resources *r);
++
++/**
++ * struct kdbus_kmsg - internal message handling data
++ * @seq:		Domain-global message sequence number
++ * @notify_type:	Short-cut for faster lookup
++ * @notify_old_id:	Short-cut for faster lookup
++ * @notify_new_id:	Short-cut for faster lookup
++ * @notify_name:	Short-cut for faster lookup
++ * @dst_name_id:	Short-cut to msg for faster lookup
++ * @bloom_filter:	Bloom filter to match message properties
++ * @bloom_generation:	Generation of bloom element set
++ * @notify_entry:	List of kernel-generated notifications
++ * @iov:		Array of iovec, describing the payload to copy
++ * @iov_count:		Number of array members in @iov
++ * @pool_size:		Overall size of inlined data referenced by @iov
++ * @proc_meta:		Appended SCM-like metadata of the sending process
++ * @conn_meta:		Appended SCM-like metadata of the sending connection
++ * @res:		Message resources
++ * @msg:		Message from or to userspace
++ */
++struct kdbus_kmsg {
++	u64 seq;
++	u64 notify_type;
++	u64 notify_old_id;
++	u64 notify_new_id;
++	const char *notify_name;
++
++	u64 dst_name_id;
++	const struct kdbus_bloom_filter *bloom_filter;
++	u64 bloom_generation;
++	struct list_head notify_entry;
++
++	struct iovec *iov;
++	size_t iov_count;
++	u64 pool_size;
++
++	struct kdbus_meta_proc *proc_meta;
++	struct kdbus_meta_conn *conn_meta;
++	struct kdbus_msg_resources *res;
++
++	/* variable size, must be the last member */
++	struct kdbus_msg msg;
++};
++
++struct kdbus_bus;
++struct kdbus_conn;
++
++struct kdbus_kmsg *kdbus_kmsg_new(struct kdbus_bus *bus, size_t extra_size);
++struct kdbus_kmsg *kdbus_kmsg_new_from_cmd(struct kdbus_conn *conn,
++					   struct kdbus_cmd_send *cmd_send);
++void kdbus_kmsg_free(struct kdbus_kmsg *kmsg);
++
++#endif
+diff --git a/ipc/kdbus/metadata.c b/ipc/kdbus/metadata.c
+new file mode 100644
+index 0000000..3adc6c2
+--- /dev/null
++++ b/ipc/kdbus/metadata.c
+@@ -0,0 +1,1159 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/audit.h>
++#include <linux/capability.h>
++#include <linux/cgroup.h>
++#include <linux/cred.h>
++#include <linux/file.h>
++#include <linux/fs_struct.h>
++#include <linux/init.h>
++#include <linux/kref.h>
++#include <linux/mutex.h>
++#include <linux/sched.h>
++#include <linux/security.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/uidgid.h>
++#include <linux/uio.h>
++#include <linux/user_namespace.h>
++#include <linux/version.h>
++
++#include "bus.h"
++#include "connection.h"
++#include "endpoint.h"
++#include "item.h"
++#include "message.h"
++#include "metadata.h"
++#include "names.h"
++
++/**
++ * struct kdbus_meta_proc - Process metadata
++ * @kref:		Reference counting
++ * @lock:		Object lock
++ * @collected:		Bitmask of collected items
++ * @valid:		Bitmask of collected and valid items
++ * @uid:		UID of process
++ * @euid:		EUID of process
++ * @suid:		SUID of process
++ * @fsuid:		FSUID of process
++ * @gid:		GID of process
++ * @egid:		EGID of process
++ * @sgid:		SGID of process
++ * @fsgid:		FSGID of process
++ * @pid:		PID of process
++ * @tgid:		TGID of process
++ * @ppid:		PPID of process
++ * @auxgrps:		Auxiliary groups
++ * @n_auxgrps:		Number of items in @auxgrps
++ * @tid_comm:		TID comm line
++ * @pid_comm:		PID comm line
++ * @exe_path:		Executable path
++ * @root_path:		Root-FS path
++ * @cmdline:		Command-line
++ * @cgroup:		Full cgroup path
++ * @caps:		Capabilities
++ * @caps_namespace:	User-namespace of @caps
++ * @seclabel:		Seclabel
++ * @audit_loginuid:	Audit login-UID
++ * @audit_sessionid:	Audit session-ID
++ */
++struct kdbus_meta_proc {
++	struct kref kref;
++	struct mutex lock;
++	u64 collected;
++	u64 valid;
++
++	/* KDBUS_ITEM_CREDS */
++	kuid_t uid, euid, suid, fsuid;
++	kgid_t gid, egid, sgid, fsgid;
++
++	/* KDBUS_ITEM_PIDS */
++	struct pid *pid;
++	struct pid *tgid;
++	struct pid *ppid;
++
++	/* KDBUS_ITEM_AUXGROUPS */
++	kgid_t *auxgrps;
++	size_t n_auxgrps;
++
++	/* KDBUS_ITEM_TID_COMM */
++	char tid_comm[TASK_COMM_LEN];
++	/* KDBUS_ITEM_PID_COMM */
++	char pid_comm[TASK_COMM_LEN];
++
++	/* KDBUS_ITEM_EXE */
++	struct path exe_path;
++	struct path root_path;
++
++	/* KDBUS_ITEM_CMDLINE */
++	char *cmdline;
++
++	/* KDBUS_ITEM_CGROUP */
++	char *cgroup;
++
++	/* KDBUS_ITEM_CAPS */
++	struct caps {
++		/* binary compatible to kdbus_caps */
++		u32 last_cap;
++		struct {
++			u32 caps[_KERNEL_CAPABILITY_U32S];
++		} set[4];
++	} caps;
++	struct user_namespace *caps_namespace;
++
++	/* KDBUS_ITEM_SECLABEL */
++	char *seclabel;
++
++	/* KDBUS_ITEM_AUDIT */
++	kuid_t audit_loginuid;
++	unsigned int audit_sessionid;
++};
++
++/**
++ * struct kdbus_meta_conn
++ * @kref:		Reference counting
++ * @lock:		Object lock
++ * @collected:		Bitmask of collected items
++ * @valid:		Bitmask of collected and valid items
++ * @ts:			Timestamp values
++ * @owned_names_items:	Serialized items for owned names
++ * @owned_names_size:	Size of @owned_names_items
++ * @conn_description:	Connection description
++ */
++struct kdbus_meta_conn {
++	struct kref kref;
++	struct mutex lock;
++	u64 collected;
++	u64 valid;
++
++	/* KDBUS_ITEM_TIMESTAMP */
++	struct kdbus_timestamp ts;
++
++	/* KDBUS_ITEM_OWNED_NAME */
++	struct kdbus_item *owned_names_items;
++	size_t owned_names_size;
++
++	/* KDBUS_ITEM_CONN_DESCRIPTION */
++	char *conn_description;
++};
++
++/**
++ * kdbus_meta_proc_new() - Create process metadata object
++ *
++ * Return: Pointer to new object on success, ERR_PTR on failure.
++ */
++struct kdbus_meta_proc *kdbus_meta_proc_new(void)
++{
++	struct kdbus_meta_proc *mp;
++
++	mp = kzalloc(sizeof(*mp), GFP_KERNEL);
++	if (!mp)
++		return ERR_PTR(-ENOMEM);
++
++	kref_init(&mp->kref);
++	mutex_init(&mp->lock);
++
++	return mp;
++}
++
++static void kdbus_meta_proc_free(struct kref *kref)
++{
++	struct kdbus_meta_proc *mp = container_of(kref, struct kdbus_meta_proc,
++						  kref);
++
++	path_put(&mp->exe_path);
++	path_put(&mp->root_path);
++	put_user_ns(mp->caps_namespace);
++	put_pid(mp->ppid);
++	put_pid(mp->tgid);
++	put_pid(mp->pid);
++
++	kfree(mp->seclabel);
++	kfree(mp->auxgrps);
++	kfree(mp->cmdline);
++	kfree(mp->cgroup);
++	kfree(mp);
++}
++
++/**
++ * kdbus_meta_proc_ref() - Gain reference
++ * @mp:		Process metadata object
++ *
++ * Return: @mp is returned
++ */
++struct kdbus_meta_proc *kdbus_meta_proc_ref(struct kdbus_meta_proc *mp)
++{
++	if (mp)
++		kref_get(&mp->kref);
++	return mp;
++}
++
++/**
++ * kdbus_meta_proc_unref() - Drop reference
++ * @mp:		Process metadata object
++ *
++ * Return: NULL
++ */
++struct kdbus_meta_proc *kdbus_meta_proc_unref(struct kdbus_meta_proc *mp)
++{
++	if (mp)
++		kref_put(&mp->kref, kdbus_meta_proc_free);
++	return NULL;
++}
++
++static void kdbus_meta_proc_collect_creds(struct kdbus_meta_proc *mp)
++{
++	mp->uid		= current_uid();
++	mp->euid	= current_euid();
++	mp->suid	= current_suid();
++	mp->fsuid	= current_fsuid();
++
++	mp->gid		= current_gid();
++	mp->egid	= current_egid();
++	mp->sgid	= current_sgid();
++	mp->fsgid	= current_fsgid();
++
++	mp->valid |= KDBUS_ATTACH_CREDS;
++}
++
++static void kdbus_meta_proc_collect_pids(struct kdbus_meta_proc *mp)
++{
++	struct task_struct *parent;
++
++	mp->pid = get_pid(task_pid(current));
++	mp->tgid = get_pid(task_tgid(current));
++
++	rcu_read_lock();
++	parent = rcu_dereference(current->real_parent);
++	mp->ppid = get_pid(task_tgid(parent));
++	rcu_read_unlock();
++
++	mp->valid |= KDBUS_ATTACH_PIDS;
++}
++
++static int kdbus_meta_proc_collect_auxgroups(struct kdbus_meta_proc *mp)
++{
++	struct group_info *info;
++	size_t i;
++
++	info = get_current_groups();
++
++	if (info->ngroups > 0) {
++		mp->auxgrps = kmalloc_array(info->ngroups, sizeof(kgid_t),
++					    GFP_KERNEL);
++		if (!mp->auxgrps) {
++			put_group_info(info);
++			return -ENOMEM;
++		}
++
++		for (i = 0; i < info->ngroups; i++)
++			mp->auxgrps[i] = GROUP_AT(info, i);
++	}
++
++	mp->n_auxgrps = info->ngroups;
++	put_group_info(info);
++	mp->valid |= KDBUS_ATTACH_AUXGROUPS;
++
++	return 0;
++}
++
++static void kdbus_meta_proc_collect_tid_comm(struct kdbus_meta_proc *mp)
++{
++	get_task_comm(mp->tid_comm, current);
++	mp->valid |= KDBUS_ATTACH_TID_COMM;
++}
++
++static void kdbus_meta_proc_collect_pid_comm(struct kdbus_meta_proc *mp)
++{
++	get_task_comm(mp->pid_comm, current->group_leader);
++	mp->valid |= KDBUS_ATTACH_PID_COMM;
++}
++
++static void kdbus_meta_proc_collect_exe(struct kdbus_meta_proc *mp)
++{
++	struct mm_struct *mm;
++
++	mm = get_task_mm(current);
++	if (!mm)
++		return;
++
++	down_read(&mm->mmap_sem);
++	if (mm->exe_file) {
++		mp->exe_path = mm->exe_file->f_path;
++		path_get(&mp->exe_path);
++		get_fs_root(current->fs, &mp->root_path);
++		mp->valid |= KDBUS_ATTACH_EXE;
++	}
++	up_read(&mm->mmap_sem);
++
++	mmput(mm);
++}
++
++static int kdbus_meta_proc_collect_cmdline(struct kdbus_meta_proc *mp)
++{
++	struct mm_struct *mm;
++	char *cmdline;
++
++	mm = get_task_mm(current);
++	if (!mm)
++		return 0;
++
++	if (!mm->arg_end) {
++		mmput(mm);
++		return 0;
++	}
++
++	cmdline = strndup_user((const char __user *)mm->arg_start,
++			       mm->arg_end - mm->arg_start);
++	mmput(mm);
++
++	if (IS_ERR(cmdline))
++		return PTR_ERR(cmdline);
++
++	mp->cmdline = cmdline;
++	mp->valid |= KDBUS_ATTACH_CMDLINE;
++
++	return 0;
++}
++
++static int kdbus_meta_proc_collect_cgroup(struct kdbus_meta_proc *mp)
++{
++#ifdef CONFIG_CGROUPS
++	void *page;
++	char *s;
++
++	page = (void *)__get_free_page(GFP_TEMPORARY);
++	if (!page)
++		return -ENOMEM;
++
++	s = task_cgroup_path(current, page, PAGE_SIZE);
++	if (s) {
++		mp->cgroup = kstrdup(s, GFP_KERNEL);
++		if (!mp->cgroup) {
++			free_page((unsigned long)page);
++			return -ENOMEM;
++		}
++	}
++
++	free_page((unsigned long)page);
++	mp->valid |= KDBUS_ATTACH_CGROUP;
++#endif
++
++	return 0;
++}
++
++static void kdbus_meta_proc_collect_caps(struct kdbus_meta_proc *mp)
++{
++	const struct cred *c = current_cred();
++	int i;
++
++	/* ABI: "last_cap" equals /proc/sys/kernel/cap_last_cap */
++	mp->caps.last_cap = CAP_LAST_CAP;
++	mp->caps_namespace = get_user_ns(current_user_ns());
++
++	CAP_FOR_EACH_U32(i) {
++		mp->caps.set[0].caps[i] = c->cap_inheritable.cap[i];
++		mp->caps.set[1].caps[i] = c->cap_permitted.cap[i];
++		mp->caps.set[2].caps[i] = c->cap_effective.cap[i];
++		mp->caps.set[3].caps[i] = c->cap_bset.cap[i];
++	}
++
++	/* clear unused bits */
++	for (i = 0; i < 4; i++)
++		mp->caps.set[i].caps[CAP_TO_INDEX(CAP_LAST_CAP)] &=
++						CAP_LAST_U32_VALID_MASK;
++
++	mp->valid |= KDBUS_ATTACH_CAPS;
++}
++
++static int kdbus_meta_proc_collect_seclabel(struct kdbus_meta_proc *mp)
++{
++#ifdef CONFIG_SECURITY
++	char *ctx = NULL;
++	u32 sid, len;
++	int ret;
++
++	security_task_getsecid(current, &sid);
++	ret = security_secid_to_secctx(sid, &ctx, &len);
++	if (ret < 0) {
++		/*
++		 * EOPNOTSUPP means no security module is active,
++		 * lets skip adding the seclabel then. This effectively
++		 * drops the SECLABEL item.
++		 */
++		return (ret == -EOPNOTSUPP) ? 0 : ret;
++	}
++
++	mp->seclabel = kstrdup(ctx, GFP_KERNEL);
++	security_release_secctx(ctx, len);
++	if (!mp->seclabel)
++		return -ENOMEM;
++
++	mp->valid |= KDBUS_ATTACH_SECLABEL;
++#endif
++
++	return 0;
++}
++
++static void kdbus_meta_proc_collect_audit(struct kdbus_meta_proc *mp)
++{
++#ifdef CONFIG_AUDITSYSCALL
++	mp->audit_loginuid = audit_get_loginuid(current);
++	mp->audit_sessionid = audit_get_sessionid(current);
++	mp->valid |= KDBUS_ATTACH_AUDIT;
++#endif
++}
++
++/**
++ * kdbus_meta_proc_collect() - Collect process metadata
++ * @mp:		Process metadata object
++ * @what:	Attach flags to collect
++ *
++ * This collects process metadata from current and saves it in @mp.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_meta_proc_collect(struct kdbus_meta_proc *mp, u64 what)
++{
++	int ret;
++
++	if (!mp || !(what & (KDBUS_ATTACH_CREDS |
++			     KDBUS_ATTACH_PIDS |
++			     KDBUS_ATTACH_AUXGROUPS |
++			     KDBUS_ATTACH_TID_COMM |
++			     KDBUS_ATTACH_PID_COMM |
++			     KDBUS_ATTACH_EXE |
++			     KDBUS_ATTACH_CMDLINE |
++			     KDBUS_ATTACH_CGROUP |
++			     KDBUS_ATTACH_CAPS |
++			     KDBUS_ATTACH_SECLABEL |
++			     KDBUS_ATTACH_AUDIT)))
++		return 0;
++
++	mutex_lock(&mp->lock);
++
++	if ((what & KDBUS_ATTACH_CREDS) &&
++	    !(mp->collected & KDBUS_ATTACH_CREDS)) {
++		kdbus_meta_proc_collect_creds(mp);
++		mp->collected |= KDBUS_ATTACH_CREDS;
++	}
++
++	if ((what & KDBUS_ATTACH_PIDS) &&
++	    !(mp->collected & KDBUS_ATTACH_PIDS)) {
++		kdbus_meta_proc_collect_pids(mp);
++		mp->collected |= KDBUS_ATTACH_PIDS;
++	}
++
++	if ((what & KDBUS_ATTACH_AUXGROUPS) &&
++	    !(mp->collected & KDBUS_ATTACH_AUXGROUPS)) {
++		ret = kdbus_meta_proc_collect_auxgroups(mp);
++		if (ret < 0)
++			goto exit_unlock;
++		mp->collected |= KDBUS_ATTACH_AUXGROUPS;
++	}
++
++	if ((what & KDBUS_ATTACH_TID_COMM) &&
++	    !(mp->collected & KDBUS_ATTACH_TID_COMM)) {
++		kdbus_meta_proc_collect_tid_comm(mp);
++		mp->collected |= KDBUS_ATTACH_TID_COMM;
++	}
++
++	if ((what & KDBUS_ATTACH_PID_COMM) &&
++	    !(mp->collected & KDBUS_ATTACH_PID_COMM)) {
++		kdbus_meta_proc_collect_pid_comm(mp);
++		mp->collected |= KDBUS_ATTACH_PID_COMM;
++	}
++
++	if ((what & KDBUS_ATTACH_EXE) &&
++	    !(mp->collected & KDBUS_ATTACH_EXE)) {
++		kdbus_meta_proc_collect_exe(mp);
++		mp->collected |= KDBUS_ATTACH_EXE;
++	}
++
++	if ((what & KDBUS_ATTACH_CMDLINE) &&
++	    !(mp->collected & KDBUS_ATTACH_CMDLINE)) {
++		ret = kdbus_meta_proc_collect_cmdline(mp);
++		if (ret < 0)
++			goto exit_unlock;
++		mp->collected |= KDBUS_ATTACH_CMDLINE;
++	}
++
++	if ((what & KDBUS_ATTACH_CGROUP) &&
++	    !(mp->collected & KDBUS_ATTACH_CGROUP)) {
++		ret = kdbus_meta_proc_collect_cgroup(mp);
++		if (ret < 0)
++			goto exit_unlock;
++		mp->collected |= KDBUS_ATTACH_CGROUP;
++	}
++
++	if ((what & KDBUS_ATTACH_CAPS) &&
++	    !(mp->collected & KDBUS_ATTACH_CAPS)) {
++		kdbus_meta_proc_collect_caps(mp);
++		mp->collected |= KDBUS_ATTACH_CAPS;
++	}
++
++	if ((what & KDBUS_ATTACH_SECLABEL) &&
++	    !(mp->collected & KDBUS_ATTACH_SECLABEL)) {
++		ret = kdbus_meta_proc_collect_seclabel(mp);
++		if (ret < 0)
++			goto exit_unlock;
++		mp->collected |= KDBUS_ATTACH_SECLABEL;
++	}
++
++	if ((what & KDBUS_ATTACH_AUDIT) &&
++	    !(mp->collected & KDBUS_ATTACH_AUDIT)) {
++		kdbus_meta_proc_collect_audit(mp);
++		mp->collected |= KDBUS_ATTACH_AUDIT;
++	}
++
++	ret = 0;
++
++exit_unlock:
++	mutex_unlock(&mp->lock);
++	return ret;
++}
++
++/**
++ * kdbus_meta_proc_fake() - Fill process metadata from faked credentials
++ * @mp:		Metadata
++ * @creds:	Creds to set, may be %NULL
++ * @pids:	PIDs to set, may be %NULL
++ * @seclabel:	Seclabel to set, may be %NULL
++ *
++ * This function takes information stored in @creds, @pids and @seclabel and
++ * resolves them to kernel-representations, if possible. A call to this function
++ * is considered an alternative to calling kdbus_meta_add_current(), which
++ * derives the same information from the 'current' task.
++ *
++ * This call uses the current task's namespaces to resolve the given
++ * information.
++ *
++ * Return: 0 on success, negative error number otherwise.
++ */
++int kdbus_meta_proc_fake(struct kdbus_meta_proc *mp,
++			 const struct kdbus_creds *creds,
++			 const struct kdbus_pids *pids,
++			 const char *seclabel)
++{
++	int ret;
++
++	if (!mp)
++		return 0;
++
++	mutex_lock(&mp->lock);
++
++	if (creds && !(mp->collected & KDBUS_ATTACH_CREDS)) {
++		struct user_namespace *ns = current_user_ns();
++
++		mp->uid		= make_kuid(ns, creds->uid);
++		mp->euid	= make_kuid(ns, creds->euid);
++		mp->suid	= make_kuid(ns, creds->suid);
++		mp->fsuid	= make_kuid(ns, creds->fsuid);
++
++		mp->gid		= make_kgid(ns, creds->gid);
++		mp->egid	= make_kgid(ns, creds->egid);
++		mp->sgid	= make_kgid(ns, creds->sgid);
++		mp->fsgid	= make_kgid(ns, creds->fsgid);
++
++		if ((creds->uid   != (uid_t)-1 && !uid_valid(mp->uid))   ||
++		    (creds->euid  != (uid_t)-1 && !uid_valid(mp->euid))  ||
++		    (creds->suid  != (uid_t)-1 && !uid_valid(mp->suid))  ||
++		    (creds->fsuid != (uid_t)-1 && !uid_valid(mp->fsuid)) ||
++		    (creds->gid   != (gid_t)-1 && !gid_valid(mp->gid))   ||
++		    (creds->egid  != (gid_t)-1 && !gid_valid(mp->egid))  ||
++		    (creds->sgid  != (gid_t)-1 && !gid_valid(mp->sgid))  ||
++		    (creds->fsgid != (gid_t)-1 && !gid_valid(mp->fsgid))) {
++			ret = -EINVAL;
++			goto exit_unlock;
++		}
++
++		mp->valid |= KDBUS_ATTACH_CREDS;
++		mp->collected |= KDBUS_ATTACH_CREDS;
++	}
++
++	if (pids && !(mp->collected & KDBUS_ATTACH_PIDS)) {
++		mp->pid = get_pid(find_vpid(pids->tid));
++		mp->tgid = get_pid(find_vpid(pids->pid));
++		mp->ppid = get_pid(find_vpid(pids->ppid));
++
++		if ((pids->tid != 0 && !mp->pid) ||
++		    (pids->pid != 0 && !mp->tgid) ||
++		    (pids->ppid != 0 && !mp->ppid)) {
++			put_pid(mp->pid);
++			put_pid(mp->tgid);
++			put_pid(mp->ppid);
++			mp->pid = NULL;
++			mp->tgid = NULL;
++			mp->ppid = NULL;
++			ret = -EINVAL;
++			goto exit_unlock;
++		}
++
++		mp->valid |= KDBUS_ATTACH_PIDS;
++		mp->collected |= KDBUS_ATTACH_PIDS;
++	}
++
++	if (seclabel && !(mp->collected & KDBUS_ATTACH_SECLABEL)) {
++		mp->seclabel = kstrdup(seclabel, GFP_KERNEL);
++		if (!mp->seclabel) {
++			ret = -ENOMEM;
++			goto exit_unlock;
++		}
++
++		mp->valid |= KDBUS_ATTACH_SECLABEL;
++		mp->collected |= KDBUS_ATTACH_SECLABEL;
++	}
++
++	ret = 0;
++
++exit_unlock:
++	mutex_unlock(&mp->lock);
++	return ret;
++}
++
++/**
++ * kdbus_meta_conn_new() - Create connection metadata object
++ *
++ * Return: Pointer to new object on success, ERR_PTR on failure.
++ */
++struct kdbus_meta_conn *kdbus_meta_conn_new(void)
++{
++	struct kdbus_meta_conn *mc;
++
++	mc = kzalloc(sizeof(*mc), GFP_KERNEL);
++	if (!mc)
++		return ERR_PTR(-ENOMEM);
++
++	kref_init(&mc->kref);
++	mutex_init(&mc->lock);
++
++	return mc;
++}
++
++static void kdbus_meta_conn_free(struct kref *kref)
++{
++	struct kdbus_meta_conn *mc =
++		container_of(kref, struct kdbus_meta_conn, kref);
++
++	kfree(mc->conn_description);
++	kfree(mc->owned_names_items);
++	kfree(mc);
++}
++
++/**
++ * kdbus_meta_conn_ref() - Gain reference
++ * @mc:		Connection metadata object
++ */
++struct kdbus_meta_conn *kdbus_meta_conn_ref(struct kdbus_meta_conn *mc)
++{
++	if (mc)
++		kref_get(&mc->kref);
++	return mc;
++}
++
++/**
++ * kdbus_meta_conn_unref() - Drop reference
++ * @mc:		Connection metadata object
++ */
++struct kdbus_meta_conn *kdbus_meta_conn_unref(struct kdbus_meta_conn *mc)
++{
++	if (mc)
++		kref_put(&mc->kref, kdbus_meta_conn_free);
++	return NULL;
++}
++
++static void kdbus_meta_conn_collect_timestamp(struct kdbus_meta_conn *mc,
++					      struct kdbus_kmsg *kmsg)
++{
++	mc->ts.monotonic_ns = ktime_get_ns();
++	mc->ts.realtime_ns = ktime_get_real_ns();
++
++	if (kmsg)
++		mc->ts.seqnum = kmsg->seq;
++
++	mc->valid |= KDBUS_ATTACH_TIMESTAMP;
++}
++
++static int kdbus_meta_conn_collect_names(struct kdbus_meta_conn *mc,
++					 struct kdbus_conn *conn)
++{
++	const struct kdbus_name_entry *e;
++	struct kdbus_item *item;
++	size_t slen, size;
++
++	lockdep_assert_held(&conn->ep->bus->name_registry->rwlock);
++
++	size = 0;
++	list_for_each_entry(e, &conn->names_list, conn_entry)
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_name) +
++					strlen(e->name) + 1);
++
++	if (!size)
++		return 0;
++
++	item = kmalloc(size, GFP_KERNEL);
++	if (!item)
++		return -ENOMEM;
++
++	mc->owned_names_items = item;
++	mc->owned_names_size = size;
++
++	list_for_each_entry(e, &conn->names_list, conn_entry) {
++		slen = strlen(e->name) + 1;
++		kdbus_item_set(item, KDBUS_ITEM_OWNED_NAME, NULL,
++			       sizeof(struct kdbus_name) + slen);
++		item->name.flags = e->flags;
++		memcpy(item->name.name, e->name, slen);
++		item = KDBUS_ITEM_NEXT(item);
++	}
++
++	/* sanity check: the buffer should be completely written now */
++	WARN_ON((u8 *)item != (u8 *)mc->owned_names_items + size);
++
++	mc->valid |= KDBUS_ATTACH_NAMES;
++	return 0;
++}
++
++static int kdbus_meta_conn_collect_description(struct kdbus_meta_conn *mc,
++					       struct kdbus_conn *conn)
++{
++	if (!conn->description)
++		return 0;
++
++	mc->conn_description = kstrdup(conn->description, GFP_KERNEL);
++	if (!mc->conn_description)
++		return -ENOMEM;
++
++	mc->valid |= KDBUS_ATTACH_CONN_DESCRIPTION;
++	return 0;
++}
++
++/**
++ * kdbus_meta_conn_collect() - Collect connection metadata
++ * @mc:		Message metadata object
++ * @kmsg:	Kmsg to collect data from
++ * @conn:	Connection to collect data from
++ * @what:	Attach flags to collect
++ *
++ * This collects connection metadata from @kmsg and @conn and saves it in @mc.
++ *
++ * If KDBUS_ATTACH_NAMES is set in @what and @conn is non-NULL, the caller must
++ * hold the name-registry read-lock of conn->ep->bus->registry.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_meta_conn_collect(struct kdbus_meta_conn *mc,
++			    struct kdbus_kmsg *kmsg,
++			    struct kdbus_conn *conn,
++			    u64 what)
++{
++	int ret;
++
++	if (!mc || !(what & (KDBUS_ATTACH_TIMESTAMP |
++			     KDBUS_ATTACH_NAMES |
++			     KDBUS_ATTACH_CONN_DESCRIPTION)))
++		return 0;
++
++	mutex_lock(&mc->lock);
++
++	if (kmsg && (what & KDBUS_ATTACH_TIMESTAMP) &&
++	    !(mc->collected & KDBUS_ATTACH_TIMESTAMP)) {
++		kdbus_meta_conn_collect_timestamp(mc, kmsg);
++		mc->collected |= KDBUS_ATTACH_TIMESTAMP;
++	}
++
++	if (conn && (what & KDBUS_ATTACH_NAMES) &&
++	    !(mc->collected & KDBUS_ATTACH_NAMES)) {
++		ret = kdbus_meta_conn_collect_names(mc, conn);
++		if (ret < 0)
++			goto exit_unlock;
++		mc->collected |= KDBUS_ATTACH_NAMES;
++	}
++
++	if (conn && (what & KDBUS_ATTACH_CONN_DESCRIPTION) &&
++	    !(mc->collected & KDBUS_ATTACH_CONN_DESCRIPTION)) {
++		ret = kdbus_meta_conn_collect_description(mc, conn);
++		if (ret < 0)
++			goto exit_unlock;
++		mc->collected |= KDBUS_ATTACH_CONN_DESCRIPTION;
++	}
++
++	ret = 0;
++
++exit_unlock:
++	mutex_unlock(&mc->lock);
++	return ret;
++}
++
++/*
++ * kdbus_meta_export_prepare() - Prepare metadata for export
++ * @mp:		Process metadata, or NULL
++ * @mc:		Connection metadata, or NULL
++ * @mask:	Pointer to mask of KDBUS_ATTACH_* flags to export
++ * @sz:		Pointer to return the size needed by the metadata
++ *
++ * Does a conservative calculation of how much space metadata information
++ * will take up during export. It is 'conservative' because for string
++ * translations in namespaces, it will use the kernel namespaces, which is
++ * the longest possible version.
++ *
++ * The actual size consumed by kdbus_meta_export() may hence vary from the
++ * one reported here, but it is guaranteed never to be greater.
++ *
++ * Return: 0 on success, negative error number otherwise.
++ */
++int kdbus_meta_export_prepare(struct kdbus_meta_proc *mp,
++			      struct kdbus_meta_conn *mc,
++			      u64 *mask, size_t *sz)
++{
++	char *exe_pathname = NULL;
++	void *exe_page = NULL;
++	size_t size = 0;
++	u64 valid = 0;
++	int ret = 0;
++
++	if (mp) {
++		mutex_lock(&mp->lock);
++		valid |= mp->valid;
++		mutex_unlock(&mp->lock);
++	}
++
++	if (mc) {
++		mutex_lock(&mc->lock);
++		valid |= mc->valid;
++		mutex_unlock(&mc->lock);
++	}
++
++	*mask &= valid;
++	*mask &= kdbus_meta_attach_mask;
++
++	if (!*mask)
++		goto exit;
++
++	/* process metadata */
++
++	if (mp && (*mask & KDBUS_ATTACH_CREDS))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_creds));
++
++	if (mp && (*mask & KDBUS_ATTACH_PIDS))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_pids));
++
++	if (mp && (*mask & KDBUS_ATTACH_AUXGROUPS))
++		size += KDBUS_ITEM_SIZE(mp->n_auxgrps * sizeof(u64));
++
++	if (mp && (*mask & KDBUS_ATTACH_TID_COMM))
++		size += KDBUS_ITEM_SIZE(strlen(mp->tid_comm) + 1);
++
++	if (mp && (*mask & KDBUS_ATTACH_PID_COMM))
++		size += KDBUS_ITEM_SIZE(strlen(mp->pid_comm) + 1);
++
++	if (mp && (*mask & KDBUS_ATTACH_EXE)) {
++		exe_page = (void *)__get_free_page(GFP_TEMPORARY);
++		if (!exe_page) {
++			ret = -ENOMEM;
++			goto exit;
++		}
++
++		exe_pathname = d_path(&mp->exe_path, exe_page, PAGE_SIZE);
++		if (IS_ERR(exe_pathname)) {
++			ret = PTR_ERR(exe_pathname);
++			goto exit;
++		}
++
++		size += KDBUS_ITEM_SIZE(strlen(exe_pathname) + 1);
++		free_page((unsigned long)exe_page);
++	}
++
++	if (mp && (*mask & KDBUS_ATTACH_CMDLINE))
++		size += KDBUS_ITEM_SIZE(strlen(mp->cmdline) + 1);
++
++	if (mp && (*mask & KDBUS_ATTACH_CGROUP))
++		size += KDBUS_ITEM_SIZE(strlen(mp->cgroup) + 1);
++
++	if (mp && (*mask & KDBUS_ATTACH_CAPS))
++		size += KDBUS_ITEM_SIZE(sizeof(mp->caps));
++
++	if (mp && (*mask & KDBUS_ATTACH_SECLABEL))
++		size += KDBUS_ITEM_SIZE(strlen(mp->seclabel) + 1);
++
++	if (mp && (*mask & KDBUS_ATTACH_AUDIT))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_audit));
++
++	/* connection metadata */
++
++	if (mc && (*mask & KDBUS_ATTACH_NAMES))
++		size += mc->owned_names_size;
++
++	if (mc && (*mask & KDBUS_ATTACH_CONN_DESCRIPTION))
++		size += KDBUS_ITEM_SIZE(strlen(mc->conn_description) + 1);
++
++	if (mc && (*mask & KDBUS_ATTACH_TIMESTAMP))
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_timestamp));
++
++exit:
++	*sz = size;
++
++	return ret;
++}
++
++static int kdbus_meta_push_kvec(struct kvec *kvec,
++				struct kdbus_item_header *hdr,
++				u64 type, void *payload,
++				size_t payload_size, u64 *size)
++{
++	hdr->type = type;
++	hdr->size = KDBUS_ITEM_HEADER_SIZE + payload_size;
++	kdbus_kvec_set(kvec++, hdr, sizeof(*hdr), size);
++	kdbus_kvec_set(kvec++, payload, payload_size, size);
++	return 2 + !!kdbus_kvec_pad(kvec++, size);
++}
++
++/* This is equivalent to from_kuid_munged(), but maps INVALID_UID to itself */
++static uid_t kdbus_from_kuid_keep(kuid_t uid)
++{
++	return uid_valid(uid) ?
++		from_kuid_munged(current_user_ns(), uid) : ((uid_t)-1);
++}
++
++/* This is equivalent to from_kgid_munged(), but maps INVALID_GID to itself */
++static gid_t kdbus_from_kgid_keep(kgid_t gid)
++{
++	return gid_valid(gid) ?
++		from_kgid_munged(current_user_ns(), gid) : ((gid_t)-1);
++}
++
++/**
++ * kdbus_meta_export() - export information from metadata into a slice
++ * @mp:		Process metadata, or NULL
++ * @mc:		Connection metadata, or NULL
++ * @mask:	Mask of KDBUS_ATTACH_* flags to export
++ * @slice:	The slice to export to
++ * @offset:	The offset inside @slice to write to
++ * @real_size:	The real size the metadata consumed
++ *
++ * This function exports information from metadata into @slice at offset
++ * @offset inside that slice. Only information that is requested in @mask
++ * and that has been collected before is exported.
++ *
++ * In order to make sure not to write out of bounds, @mask must be the same
++ * value that was previously returned from kdbus_meta_export_prepare(). The
++ * function will, however, not necessarily write as many bytes as returned by
++ * kdbus_meta_export_prepare(); depending on the namespaces in question, it
++ * might use up less than that.
++ *
++ * All information will be translated using the current namespaces.
++ *
++ * Return: 0 on success, negative error number otherwise.
++ */
++int kdbus_meta_export(struct kdbus_meta_proc *mp,
++		      struct kdbus_meta_conn *mc,
++		      u64 mask,
++		      struct kdbus_pool_slice *slice,
++		      off_t offset,
++		      size_t *real_size)
++{
++	struct user_namespace *user_ns = current_user_ns();
++	struct kdbus_item_header item_hdr[13], *hdr;
++	char *exe_pathname = NULL;
++	struct kdbus_creds creds;
++	struct kdbus_pids pids;
++	void *exe_page = NULL;
++	struct kvec kvec[40];
++	u64 *auxgrps = NULL;
++	size_t cnt = 0;
++	u64 size = 0;
++	int ret = 0;
++
++	hdr = &item_hdr[0];
++
++	/*
++	 * TODO: We currently have no sane way of translating a set of caps
++	 * between different user namespaces. Until that changes, we have
++	 * to drop such items.
++	 */
++	if (mp && mp->caps_namespace != user_ns)
++		mask &= ~KDBUS_ATTACH_CAPS;
++
++	if (mask == 0) {
++		*real_size = 0;
++		return 0;
++	}
++
++	/* process metadata */
++
++	if (mp && (mask & KDBUS_ATTACH_CREDS)) {
++		creds.uid	= kdbus_from_kuid_keep(mp->uid);
++		creds.euid	= kdbus_from_kuid_keep(mp->euid);
++		creds.suid	= kdbus_from_kuid_keep(mp->suid);
++		creds.fsuid	= kdbus_from_kuid_keep(mp->fsuid);
++		creds.gid	= kdbus_from_kgid_keep(mp->gid);
++		creds.egid	= kdbus_from_kgid_keep(mp->egid);
++		creds.sgid	= kdbus_from_kgid_keep(mp->sgid);
++		creds.fsgid	= kdbus_from_kgid_keep(mp->fsgid);
++
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++, KDBUS_ITEM_CREDS,
++					    &creds, sizeof(creds), &size);
++	}
++
++	if (mp && (mask & KDBUS_ATTACH_PIDS)) {
++		pids.pid = pid_vnr(mp->tgid);
++		pids.tid = pid_vnr(mp->pid);
++		pids.ppid = pid_vnr(mp->ppid);
++
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++, KDBUS_ITEM_PIDS,
++					    &pids, sizeof(pids), &size);
++	}
++
++	if (mp && (mask & KDBUS_ATTACH_AUXGROUPS)) {
++		size_t payload_size = mp->n_auxgrps * sizeof(u64);
++		int i;
++
++		auxgrps = kmalloc(payload_size, GFP_KERNEL);
++		if (!auxgrps) {
++			ret = -ENOMEM;
++			goto exit;
++		}
++
++		for (i = 0; i < mp->n_auxgrps; i++)
++			auxgrps[i] = from_kgid_munged(user_ns, mp->auxgrps[i]);
++
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
++					    KDBUS_ITEM_AUXGROUPS,
++					    auxgrps, payload_size, &size);
++	}
++
++	if (mp && (mask & KDBUS_ATTACH_TID_COMM))
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
++					    KDBUS_ITEM_TID_COMM, mp->tid_comm,
++					    strlen(mp->tid_comm) + 1, &size);
++
++	if (mp && (mask & KDBUS_ATTACH_PID_COMM))
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
++					    KDBUS_ITEM_PID_COMM, mp->pid_comm,
++					    strlen(mp->pid_comm) + 1, &size);
++
++	if (mp && (mask & KDBUS_ATTACH_EXE)) {
++		struct path p;
++
++		/*
++		 * TODO: We need access to __d_path() so we can write the path
++		 * relative to conn->root_path. Once upstream, we need
++		 * EXPORT_SYMBOL(__d_path) or an equivalent of d_path() that
++		 * takes the root path directly. Until then, we drop this item
++		 * if the root-paths differ.
++		 */
++
++		get_fs_root(current->fs, &p);
++		if (path_equal(&p, &mp->root_path)) {
++			exe_page = (void *)__get_free_page(GFP_TEMPORARY);
++			if (!exe_page) {
++				path_put(&p);
++				ret = -ENOMEM;
++				goto exit;
++			}
++
++			exe_pathname = d_path(&mp->exe_path, exe_page,
++					      PAGE_SIZE);
++			if (IS_ERR(exe_pathname)) {
++				path_put(&p);
++				ret = PTR_ERR(exe_pathname);
++				goto exit;
++			}
++
++			cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
++						    KDBUS_ITEM_EXE,
++						    exe_pathname,
++						    strlen(exe_pathname) + 1,
++						    &size);
++		}
++		path_put(&p);
++	}
++
++	if (mp && (mask & KDBUS_ATTACH_CMDLINE))
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
++					    KDBUS_ITEM_CMDLINE, mp->cmdline,
++					    strlen(mp->cmdline) + 1, &size);
++
++	if (mp && (mask & KDBUS_ATTACH_CGROUP))
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
++					    KDBUS_ITEM_CGROUP, mp->cgroup,
++					    strlen(mp->cgroup) + 1, &size);
++
++	if (mp && (mask & KDBUS_ATTACH_CAPS))
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
++					    KDBUS_ITEM_CAPS, &mp->caps,
++					    sizeof(mp->caps), &size);
++
++	if (mp && (mask & KDBUS_ATTACH_SECLABEL))
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
++					    KDBUS_ITEM_SECLABEL, mp->seclabel,
++					    strlen(mp->seclabel) + 1, &size);
++
++	if (mp && (mask & KDBUS_ATTACH_AUDIT)) {
++		struct kdbus_audit a = {
++			.loginuid = from_kuid(user_ns, mp->audit_loginuid),
++			.sessionid = mp->audit_sessionid,
++		};
++
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++, KDBUS_ITEM_AUDIT,
++					    &a, sizeof(a), &size);
++	}
++
++	/* connection metadata */
++
++	if (mc && (mask & KDBUS_ATTACH_NAMES))
++		kdbus_kvec_set(&kvec[cnt++], mc->owned_names_items,
++			       mc->owned_names_size, &size);
++
++	if (mc && (mask & KDBUS_ATTACH_CONN_DESCRIPTION))
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
++					    KDBUS_ITEM_CONN_DESCRIPTION,
++					    mc->conn_description,
++					    strlen(mc->conn_description) + 1,
++					    &size);
++
++	if (mc && (mask & KDBUS_ATTACH_TIMESTAMP))
++		cnt += kdbus_meta_push_kvec(kvec + cnt, hdr++,
++					    KDBUS_ITEM_TIMESTAMP, &mc->ts,
++					    sizeof(mc->ts), &size);
++
++	ret = kdbus_pool_slice_copy_kvec(slice, offset, kvec, cnt, size);
++	*real_size = size;
++
++exit:
++	kfree(auxgrps);
++
++	if (exe_page)
++		free_page((unsigned long)exe_page);
++
++	return ret;
++}
++
++/**
++ * kdbus_meta_calc_attach_flags() - calculate attach flags for a sender
++ *				    and a receiver
++ * @sender:		Sending connection
++ * @receiver:		Receiving connection
++ *
++ * Return: the attach flags both the sender and the receiver have opted-in
++ * for.
++ */
++u64 kdbus_meta_calc_attach_flags(const struct kdbus_conn *sender,
++				 const struct kdbus_conn *receiver)
++{
++	return atomic64_read(&sender->attach_flags_send) &
++	       atomic64_read(&receiver->attach_flags_recv);
++}
+diff --git a/ipc/kdbus/metadata.h b/ipc/kdbus/metadata.h
+new file mode 100644
+index 0000000..42c942b
+--- /dev/null
++++ b/ipc/kdbus/metadata.h
+@@ -0,0 +1,57 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_METADATA_H
++#define __KDBUS_METADATA_H
++
++#include <linux/kernel.h>
++
++struct kdbus_conn;
++struct kdbus_kmsg;
++struct kdbus_pool_slice;
++
++struct kdbus_meta_proc;
++struct kdbus_meta_conn;
++
++extern unsigned long long kdbus_meta_attach_mask;
++
++struct kdbus_meta_proc *kdbus_meta_proc_new(void);
++struct kdbus_meta_proc *kdbus_meta_proc_ref(struct kdbus_meta_proc *mp);
++struct kdbus_meta_proc *kdbus_meta_proc_unref(struct kdbus_meta_proc *mp);
++int kdbus_meta_proc_collect(struct kdbus_meta_proc *mp, u64 what);
++int kdbus_meta_proc_fake(struct kdbus_meta_proc *mp,
++			 const struct kdbus_creds *creds,
++			 const struct kdbus_pids *pids,
++			 const char *seclabel);
++
++struct kdbus_meta_conn *kdbus_meta_conn_new(void);
++struct kdbus_meta_conn *kdbus_meta_conn_ref(struct kdbus_meta_conn *mc);
++struct kdbus_meta_conn *kdbus_meta_conn_unref(struct kdbus_meta_conn *mc);
++int kdbus_meta_conn_collect(struct kdbus_meta_conn *mc,
++			    struct kdbus_kmsg *kmsg,
++			    struct kdbus_conn *conn,
++			    u64 what);
++
++int kdbus_meta_export_prepare(struct kdbus_meta_proc *mp,
++			      struct kdbus_meta_conn *mc,
++			      u64 *mask, size_t *sz);
++int kdbus_meta_export(struct kdbus_meta_proc *mp,
++		      struct kdbus_meta_conn *mc,
++		      u64 mask,
++		      struct kdbus_pool_slice *slice,
++		      off_t offset, size_t *real_size);
++u64 kdbus_meta_calc_attach_flags(const struct kdbus_conn *sender,
++				 const struct kdbus_conn *receiver);
++
++#endif
+diff --git a/ipc/kdbus/names.c b/ipc/kdbus/names.c
+new file mode 100644
+index 0000000..657008e
+--- /dev/null
++++ b/ipc/kdbus/names.c
+@@ -0,0 +1,772 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/ctype.h>
++#include <linux/fs.h>
++#include <linux/hash.h>
++#include <linux/idr.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/uio.h>
++
++#include "bus.h"
++#include "connection.h"
++#include "endpoint.h"
++#include "handle.h"
++#include "item.h"
++#include "names.h"
++#include "notify.h"
++#include "policy.h"
++
++struct kdbus_name_pending {
++	u64 flags;
++	struct kdbus_conn *conn;
++	struct kdbus_name_entry *name;
++	struct list_head conn_entry;
++	struct list_head name_entry;
++};
++
++static int kdbus_name_pending_new(struct kdbus_name_entry *e,
++				  struct kdbus_conn *conn, u64 flags)
++{
++	struct kdbus_name_pending *p;
++
++	kdbus_conn_assert_active(conn);
++
++	p = kmalloc(sizeof(*p), GFP_KERNEL);
++	if (!p)
++		return -ENOMEM;
++
++	p->flags = flags;
++	p->conn = conn;
++	p->name = e;
++	list_add_tail(&p->conn_entry, &conn->names_queue_list);
++	list_add_tail(&p->name_entry, &e->queue);
++
++	return 0;
++}
++
++static void kdbus_name_pending_free(struct kdbus_name_pending *p)
++{
++	if (!p)
++		return;
++
++	list_del(&p->name_entry);
++	list_del(&p->conn_entry);
++	kfree(p);
++}
++
++static struct kdbus_name_entry *
++kdbus_name_entry_new(struct kdbus_name_registry *r, u32 hash, const char *name)
++{
++	struct kdbus_name_entry *e;
++	size_t namelen;
++
++	namelen = strlen(name);
++
++	e = kmalloc(sizeof(*e) + namelen + 1, GFP_KERNEL);
++	if (!e)
++		return ERR_PTR(-ENOMEM);
++
++	e->name_id = ++r->name_seq_last;
++	e->flags = 0;
++	e->conn = NULL;
++	e->activator = NULL;
++	INIT_LIST_HEAD(&e->queue);
++	INIT_LIST_HEAD(&e->conn_entry);
++	hash_add(r->entries_hash, &e->hentry, hash);
++	memcpy(e->name, name, namelen + 1);
++
++	return e;
++}
++
++static void kdbus_name_entry_free(struct kdbus_name_entry *e)
++{
++	if (!e)
++		return;
++
++	WARN_ON(!list_empty(&e->conn_entry));
++	WARN_ON(!list_empty(&e->queue));
++	WARN_ON(e->activator);
++	WARN_ON(e->conn);
++
++	hash_del(&e->hentry);
++	kfree(e);
++}
++
++static void kdbus_name_entry_set_owner(struct kdbus_name_entry *e,
++				       struct kdbus_conn *conn, u64 flags)
++{
++	WARN_ON(e->conn);
++
++	e->conn = kdbus_conn_ref(conn);
++	e->flags = flags;
++	atomic_inc(&conn->name_count);
++	list_add_tail(&e->conn_entry, &e->conn->names_list);
++}
++
++static void kdbus_name_entry_remove_owner(struct kdbus_name_entry *e)
++{
++	WARN_ON(!e->conn);
++
++	list_del_init(&e->conn_entry);
++	atomic_dec(&e->conn->name_count);
++	e->flags = 0;
++	e->conn = kdbus_conn_unref(e->conn);
++}
++
++static void kdbus_name_entry_replace_owner(struct kdbus_name_entry *e,
++					   struct kdbus_conn *conn, u64 flags)
++{
++	if (WARN_ON(!e->conn) || WARN_ON(conn == e->conn))
++		return;
++
++	kdbus_notify_name_change(conn->ep->bus, KDBUS_ITEM_NAME_CHANGE,
++				 e->conn->id, conn->id,
++				 e->flags, flags, e->name);
++	kdbus_name_entry_remove_owner(e);
++	kdbus_name_entry_set_owner(e, conn, flags);
++}
++
++/**
++ * kdbus_name_is_valid() - check if a name is valid
++ * @p:			The name to check
++ * @allow_wildcard:	Whether or not to allow a wildcard name
++ *
++ * A name is valid if all of the following criterias are met:
++ *
++ *  - The name has two or more elements separated by a period ('.') character.
++ *  - All elements must contain at least one character.
++ *  - Each element must only contain the ASCII characters "[A-Z][a-z][0-9]_-"
++ *    and must not begin with a digit.
++ *  - The name must not exceed KDBUS_NAME_MAX_LEN.
++ *  - If @allow_wildcard is true, the name may end on '.*'
++ */
++bool kdbus_name_is_valid(const char *p, bool allow_wildcard)
++{
++	bool dot, found_dot = false;
++	const char *q;
++
++	for (dot = true, q = p; *q; q++) {
++		if (*q == '.') {
++			if (dot)
++				return false;
++
++			found_dot = true;
++			dot = true;
++		} else {
++			bool good;
++
++			good = isalpha(*q) || (!dot && isdigit(*q)) ||
++				*q == '_' || *q == '-' ||
++				(allow_wildcard && dot &&
++					*q == '*' && *(q + 1) == '\0');
++
++			if (!good)
++				return false;
++
++			dot = false;
++		}
++	}
++
++	if (q - p > KDBUS_NAME_MAX_LEN)
++		return false;
++
++	if (dot)
++		return false;
++
++	if (!found_dot)
++		return false;
++
++	return true;
++}
++
++/**
++ * kdbus_name_registry_new() - create a new name registry
++ *
++ * Return: a new kdbus_name_registry on success, ERR_PTR on failure.
++ */
++struct kdbus_name_registry *kdbus_name_registry_new(void)
++{
++	struct kdbus_name_registry *r;
++
++	r = kmalloc(sizeof(*r), GFP_KERNEL);
++	if (!r)
++		return ERR_PTR(-ENOMEM);
++
++	hash_init(r->entries_hash);
++	init_rwsem(&r->rwlock);
++	r->name_seq_last = 0;
++
++	return r;
++}
++
++/**
++ * kdbus_name_registry_free() - drop a name reg's reference
++ * @reg:		The name registry, may be %NULL
++ *
++ * Cleanup the name registry's internal structures.
++ */
++void kdbus_name_registry_free(struct kdbus_name_registry *reg)
++{
++	if (!reg)
++		return;
++
++	WARN_ON(!hash_empty(reg->entries_hash));
++	kfree(reg);
++}
++
++static struct kdbus_name_entry *
++kdbus_name_find(struct kdbus_name_registry *reg, u32 hash, const char *name)
++{
++	struct kdbus_name_entry *e;
++
++	lockdep_assert_held(&reg->rwlock);
++
++	hash_for_each_possible(reg->entries_hash, e, hentry, hash)
++		if (strcmp(e->name, name) == 0)
++			return e;
++
++	return NULL;
++}
++
++/**
++ * kdbus_name_lookup_unlocked() - lookup name in registry
++ * @reg:		name registry
++ * @name:		name to lookup
++ *
++ * This looks up @name in the given name-registry and returns the
++ * kdbus_name_entry object. The caller must hold the registry-lock and must not
++ * access the returned object after releasing the lock.
++ *
++ * Return: Pointer to name-entry, or NULL if not found.
++ */
++struct kdbus_name_entry *
++kdbus_name_lookup_unlocked(struct kdbus_name_registry *reg, const char *name)
++{
++	return kdbus_name_find(reg, kdbus_strhash(name), name);
++}
++
++/**
++ * kdbus_name_acquire() - acquire a name
++ * @reg:		The name registry
++ * @conn:		The connection to pin this entry to
++ * @name:		The name to acquire
++ * @flags:		Acquisition flags (KDBUS_NAME_*)
++ * @return_flags:	Pointer to return flags for the acquired name
++ *			(KDBUS_NAME_*), may be %NULL
++ *
++ * Callers must ensure that @conn is either a privileged bus user or has
++ * sufficient privileges in the policy-db to own the well-known name @name.
++ *
++ * Return: 0 success, negative error number on failure.
++ */
++int kdbus_name_acquire(struct kdbus_name_registry *reg,
++		       struct kdbus_conn *conn, const char *name,
++		       u64 flags, u64 *return_flags)
++{
++	struct kdbus_name_entry *e;
++	u64 rflags = 0;
++	int ret = 0;
++	u32 hash;
++
++	kdbus_conn_assert_active(conn);
++
++	down_write(&reg->rwlock);
++
++	if (!kdbus_conn_policy_own_name(conn, current_cred(), name)) {
++		ret = -EPERM;
++		goto exit_unlock;
++	}
++
++	hash = kdbus_strhash(name);
++	e = kdbus_name_find(reg, hash, name);
++	if (!e) {
++		/* claim new name */
++
++		if (conn->activator_of) {
++			ret = -EINVAL;
++			goto exit_unlock;
++		}
++
++		e = kdbus_name_entry_new(reg, hash, name);
++		if (IS_ERR(e)) {
++			ret = PTR_ERR(e);
++			goto exit_unlock;
++		}
++
++		if (kdbus_conn_is_activator(conn)) {
++			e->activator = kdbus_conn_ref(conn);
++			conn->activator_of = e;
++		}
++
++		kdbus_name_entry_set_owner(e, conn, flags);
++		kdbus_notify_name_change(e->conn->ep->bus, KDBUS_ITEM_NAME_ADD,
++					 0, e->conn->id, 0, e->flags, e->name);
++	} else if (e->conn == conn || e == conn->activator_of) {
++		/* connection already owns that name */
++		ret = -EALREADY;
++	} else if (kdbus_conn_is_activator(conn)) {
++		/* activator claims existing name */
++
++		if (conn->activator_of) {
++			ret = -EINVAL; /* multiple names not allowed */
++		} else if (e->activator) {
++			ret = -EEXIST; /* only one activator per name */
++		} else {
++			e->activator = kdbus_conn_ref(conn);
++			conn->activator_of = e;
++		}
++	} else if (e->flags & KDBUS_NAME_ACTIVATOR) {
++		/* claim name of an activator */
++
++		kdbus_conn_move_messages(conn, e->activator, 0);
++		kdbus_name_entry_replace_owner(e, conn, flags);
++	} else if ((flags & KDBUS_NAME_REPLACE_EXISTING) &&
++		   (e->flags & KDBUS_NAME_ALLOW_REPLACEMENT)) {
++		/* claim name of a previous owner */
++
++		if (e->flags & KDBUS_NAME_QUEUE) {
++			/* move owner back to queue if they asked for it */
++			ret = kdbus_name_pending_new(e, e->conn, e->flags);
++			if (ret < 0)
++				goto exit_unlock;
++		}
++
++		kdbus_name_entry_replace_owner(e, conn, flags);
++	} else if (flags & KDBUS_NAME_QUEUE) {
++		/* add to waiting-queue of the name */
++
++		ret = kdbus_name_pending_new(e, conn, flags);
++		if (ret >= 0)
++			/* tell the caller that we queued it */
++			rflags |= KDBUS_NAME_IN_QUEUE;
++	} else {
++		/* the name is busy, return a failure */
++		ret = -EEXIST;
++	}
++
++	if (ret == 0 && return_flags)
++		*return_flags = rflags;
++
++exit_unlock:
++	up_write(&reg->rwlock);
++	kdbus_notify_flush(conn->ep->bus);
++	return ret;
++}
++
++static void kdbus_name_release_unlocked(struct kdbus_name_registry *reg,
++					struct kdbus_name_entry *e)
++{
++	struct kdbus_name_pending *p;
++
++	lockdep_assert_held(&reg->rwlock);
++
++	p = list_first_entry_or_null(&e->queue, struct kdbus_name_pending,
++				     name_entry);
++
++	if (p) {
++		/* give it to first active waiter in the queue */
++		kdbus_name_entry_replace_owner(e, p->conn, p->flags);
++		kdbus_name_pending_free(p);
++	} else if (e->activator && e->activator != e->conn) {
++		/* hand it back to an active activator connection */
++		kdbus_conn_move_messages(e->activator, e->conn, e->name_id);
++		kdbus_name_entry_replace_owner(e, e->activator,
++					       KDBUS_NAME_ACTIVATOR);
++	} else {
++		/* release the name */
++		kdbus_notify_name_change(e->conn->ep->bus,
++					 KDBUS_ITEM_NAME_REMOVE,
++					 e->conn->id, 0, e->flags, 0, e->name);
++		kdbus_name_entry_remove_owner(e);
++		kdbus_name_entry_free(e);
++	}
++}
++
++static int kdbus_name_release(struct kdbus_name_registry *reg,
++			      struct kdbus_conn *conn,
++			      const char *name)
++{
++	struct kdbus_name_pending *p;
++	struct kdbus_name_entry *e;
++	int ret = 0;
++
++	down_write(&reg->rwlock);
++	e = kdbus_name_find(reg, kdbus_strhash(name), name);
++	if (!e) {
++		ret = -ESRCH;
++	} else if (e->conn == conn) {
++		kdbus_name_release_unlocked(reg, e);
++	} else {
++		ret = -EADDRINUSE;
++		list_for_each_entry(p, &e->queue, name_entry) {
++			if (p->conn == conn) {
++				kdbus_name_pending_free(p);
++				ret = 0;
++				break;
++			}
++		}
++	}
++	up_write(&reg->rwlock);
++
++	kdbus_notify_flush(conn->ep->bus);
++	return ret;
++}
++
++/**
++ * kdbus_name_release_all() - remove all name entries of a given connection
++ * @reg:		name registry
++ * @conn:		connection
++ */
++void kdbus_name_release_all(struct kdbus_name_registry *reg,
++			    struct kdbus_conn *conn)
++{
++	struct kdbus_name_pending *p;
++	struct kdbus_conn *activator = NULL;
++	struct kdbus_name_entry *e;
++
++	down_write(&reg->rwlock);
++
++	if (kdbus_conn_is_activator(conn)) {
++		activator = conn->activator_of->activator;
++		conn->activator_of->activator = NULL;
++	}
++
++	while ((p = list_first_entry_or_null(&conn->names_queue_list,
++					     struct kdbus_name_pending,
++					     conn_entry)))
++		kdbus_name_pending_free(p);
++	while ((e = list_first_entry_or_null(&conn->names_list,
++					     struct kdbus_name_entry,
++					     conn_entry)))
++		kdbus_name_release_unlocked(reg, e);
++
++	up_write(&reg->rwlock);
++
++	kdbus_conn_unref(activator);
++	kdbus_notify_flush(conn->ep->bus);
++}
++
++/**
++ * kdbus_cmd_name_acquire() - handle KDBUS_CMD_NAME_ACQUIRE
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_name_acquire(struct kdbus_conn *conn, void __user *argp)
++{
++	const char *item_name;
++	struct kdbus_cmd *cmd;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_NAME, .mandatory = true },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
++				 KDBUS_NAME_REPLACE_EXISTING |
++				 KDBUS_NAME_ALLOW_REPLACEMENT |
++				 KDBUS_NAME_QUEUE,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	if (!kdbus_conn_is_ordinary(conn))
++		return -EOPNOTSUPP;
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	item_name = argv[1].item->str;
++	if (!kdbus_name_is_valid(item_name, false)) {
++		ret = -EINVAL;
++		goto exit;
++	}
++
++	/*
++	 * Do atomic_inc_return here to reserve our slot, then decrement
++	 * it before returning.
++	 */
++	if (atomic_inc_return(&conn->name_count) > KDBUS_CONN_MAX_NAMES) {
++		ret = -E2BIG;
++		goto exit_dec;
++	}
++
++	ret = kdbus_name_acquire(conn->ep->bus->name_registry, conn, item_name,
++				 cmd->flags, &cmd->return_flags);
++	if (ret < 0)
++		goto exit_dec;
++
++exit_dec:
++	atomic_dec(&conn->name_count);
++exit:
++	return kdbus_args_clear(&args, ret);
++}
++
++/**
++ * kdbus_cmd_name_release() - handle KDBUS_CMD_NAME_RELEASE
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_name_release(struct kdbus_conn *conn, void __user *argp)
++{
++	struct kdbus_cmd *cmd;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++		{ .type = KDBUS_ITEM_NAME, .mandatory = true },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	if (!kdbus_conn_is_ordinary(conn))
++		return -EOPNOTSUPP;
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	ret = kdbus_name_release(conn->ep->bus->name_registry, conn,
++				 argv[1].item->str);
++	return kdbus_args_clear(&args, ret);
++}
++
++static int kdbus_list_write(struct kdbus_conn *conn,
++			    struct kdbus_conn *c,
++			    struct kdbus_pool_slice *slice,
++			    size_t *pos,
++			    struct kdbus_name_entry *e,
++			    bool write)
++{
++	struct kvec kvec[4];
++	size_t cnt = 0;
++	int ret;
++
++	/* info header */
++	struct kdbus_info info = {
++		.size = 0,
++		.id = c->id,
++		.flags = c->flags,
++	};
++
++	/* fake the header of a kdbus_name item */
++	struct {
++		u64 size;
++		u64 type;
++		u64 flags;
++	} h = {};
++
++	if (e && !kdbus_conn_policy_see_name_unlocked(conn, current_cred(),
++						      e->name))
++		return 0;
++
++	kdbus_kvec_set(&kvec[cnt++], &info, sizeof(info), &info.size);
++
++	/* append name */
++	if (e) {
++		size_t slen = strlen(e->name) + 1;
++
++		h.size = offsetof(struct kdbus_item, name.name) + slen;
++		h.type = KDBUS_ITEM_OWNED_NAME;
++		h.flags = e->flags;
++
++		kdbus_kvec_set(&kvec[cnt++], &h, sizeof(h), &info.size);
++		kdbus_kvec_set(&kvec[cnt++], e->name, slen, &info.size);
++		cnt += !!kdbus_kvec_pad(&kvec[cnt], &info.size);
++	}
++
++	if (write) {
++		ret = kdbus_pool_slice_copy_kvec(slice, *pos, kvec,
++						 cnt, info.size);
++		if (ret < 0)
++			return ret;
++	}
++
++	*pos += info.size;
++	return 0;
++}
++
++static int kdbus_list_all(struct kdbus_conn *conn, u64 flags,
++			  struct kdbus_pool_slice *slice,
++			  size_t *pos, bool write)
++{
++	struct kdbus_conn *c;
++	size_t p = *pos;
++	int ret, i;
++
++	hash_for_each(conn->ep->bus->conn_hash, i, c, hentry) {
++		bool added = false;
++
++		/* skip monitors */
++		if (kdbus_conn_is_monitor(c))
++			continue;
++
++		/* skip activators */
++		if (!(flags & KDBUS_LIST_ACTIVATORS) &&
++		    kdbus_conn_is_activator(c))
++			continue;
++
++		/* all names the connection owns */
++		if (flags & (KDBUS_LIST_NAMES | KDBUS_LIST_ACTIVATORS)) {
++			struct kdbus_name_entry *e;
++
++			list_for_each_entry(e, &c->names_list, conn_entry) {
++				struct kdbus_conn *a = e->activator;
++
++				if ((flags & KDBUS_LIST_ACTIVATORS) &&
++				    a && a != c) {
++					ret = kdbus_list_write(conn, a, slice,
++							       &p, e, write);
++					if (ret < 0) {
++						mutex_unlock(&c->lock);
++						return ret;
++					}
++
++					added = true;
++				}
++
++				if (flags & KDBUS_LIST_NAMES ||
++				    kdbus_conn_is_activator(c)) {
++					ret = kdbus_list_write(conn, c, slice,
++							       &p, e, write);
++					if (ret < 0) {
++						mutex_unlock(&c->lock);
++						return ret;
++					}
++
++					added = true;
++				}
++			}
++		}
++
++		/* queue of names the connection is currently waiting for */
++		if (flags & KDBUS_LIST_QUEUED) {
++			struct kdbus_name_pending *q;
++
++			list_for_each_entry(q, &c->names_queue_list,
++					    conn_entry) {
++				ret = kdbus_list_write(conn, c, slice, &p,
++						       q->name, write);
++				if (ret < 0) {
++					mutex_unlock(&c->lock);
++					return ret;
++				}
++
++				added = true;
++			}
++		}
++
++		/* nothing added so far, just add the unique ID */
++		if (!added && flags & KDBUS_LIST_UNIQUE) {
++			ret = kdbus_list_write(conn, c, slice, &p, NULL, write);
++			if (ret < 0)
++				return ret;
++		}
++	}
++
++	*pos = p;
++	return 0;
++}
++
++/**
++ * kdbus_cmd_list() - handle KDBUS_CMD_LIST
++ * @conn:		connection to operate on
++ * @argp:		command payload
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_cmd_list(struct kdbus_conn *conn, void __user *argp)
++{
++	struct kdbus_name_registry *reg = conn->ep->bus->name_registry;
++	struct kdbus_pool_slice *slice = NULL;
++	struct kdbus_cmd_list *cmd;
++	size_t pos, size;
++	int ret;
++
++	struct kdbus_arg argv[] = {
++		{ .type = KDBUS_ITEM_NEGOTIATE },
++	};
++	struct kdbus_args args = {
++		.allowed_flags = KDBUS_FLAG_NEGOTIATE |
++				 KDBUS_LIST_UNIQUE |
++				 KDBUS_LIST_NAMES |
++				 KDBUS_LIST_ACTIVATORS |
++				 KDBUS_LIST_QUEUED,
++		.argv = argv,
++		.argc = ARRAY_SIZE(argv),
++	};
++
++	ret = kdbus_args_parse(&args, argp, &cmd);
++	if (ret != 0)
++		return ret;
++
++	/* lock order: domain -> bus -> ep -> names -> conn */
++	down_read(&reg->rwlock);
++	down_read(&conn->ep->bus->conn_rwlock);
++	down_read(&conn->ep->policy_db.entries_rwlock);
++
++	/* size of records */
++	size = 0;
++	ret = kdbus_list_all(conn, cmd->flags, NULL, &size, false);
++	if (ret < 0)
++		goto exit_unlock;
++
++	if (size == 0) {
++		kdbus_pool_publish_empty(conn->pool, &cmd->offset,
++					 &cmd->list_size);
++	} else {
++		slice = kdbus_pool_slice_alloc(conn->pool, size, false);
++		if (IS_ERR(slice)) {
++			ret = PTR_ERR(slice);
++			slice = NULL;
++			goto exit_unlock;
++		}
++
++		/* copy the records */
++		pos = 0;
++		ret = kdbus_list_all(conn, cmd->flags, slice, &pos, true);
++		if (ret < 0)
++			goto exit_unlock;
++
++		WARN_ON(pos != size);
++		kdbus_pool_slice_publish(slice, &cmd->offset, &cmd->list_size);
++	}
++
++	if (kdbus_member_set_user(&cmd->offset, argp, typeof(*cmd), offset) ||
++	    kdbus_member_set_user(&cmd->list_size, argp,
++				  typeof(*cmd), list_size))
++		ret = -EFAULT;
++
++exit_unlock:
++	up_read(&conn->ep->policy_db.entries_rwlock);
++	up_read(&conn->ep->bus->conn_rwlock);
++	up_read(&reg->rwlock);
++	kdbus_pool_slice_release(slice);
++	return kdbus_args_clear(&args, ret);
++}
+diff --git a/ipc/kdbus/names.h b/ipc/kdbus/names.h
+new file mode 100644
+index 0000000..3dd2589
+--- /dev/null
++++ b/ipc/kdbus/names.h
+@@ -0,0 +1,74 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_NAMES_H
++#define __KDBUS_NAMES_H
++
++#include <linux/hashtable.h>
++#include <linux/rwsem.h>
++
++/**
++ * struct kdbus_name_registry - names registered for a bus
++ * @entries_hash:	Map of entries
++ * @lock:		Registry data lock
++ * @name_seq_last:	Last used sequence number to assign to a name entry
++ */
++struct kdbus_name_registry {
++	DECLARE_HASHTABLE(entries_hash, 8);
++	struct rw_semaphore rwlock;
++	u64 name_seq_last;
++};
++
++/**
++ * struct kdbus_name_entry - well-know name entry
++ * @name_id:		Sequence number of name entry to be able to uniquely
++ *			identify a name over its registration lifetime
++ * @flags:		KDBUS_NAME_* flags
++ * @conn:		Connection owning the name
++ * @activator:		Connection of the activator queuing incoming messages
++ * @queue:		List of queued connections
++ * @conn_entry:		Entry in connection
++ * @hentry:		Entry in registry map
++ * @name:		The well-known name
++ */
++struct kdbus_name_entry {
++	u64 name_id;
++	u64 flags;
++	struct kdbus_conn *conn;
++	struct kdbus_conn *activator;
++	struct list_head queue;
++	struct list_head conn_entry;
++	struct hlist_node hentry;
++	char name[];
++};
++
++bool kdbus_name_is_valid(const char *p, bool allow_wildcard);
++
++struct kdbus_name_registry *kdbus_name_registry_new(void);
++void kdbus_name_registry_free(struct kdbus_name_registry *reg);
++
++struct kdbus_name_entry *
++kdbus_name_lookup_unlocked(struct kdbus_name_registry *reg, const char *name);
++
++int kdbus_name_acquire(struct kdbus_name_registry *reg,
++		       struct kdbus_conn *conn, const char *name,
++		       u64 flags, u64 *return_flags);
++void kdbus_name_release_all(struct kdbus_name_registry *reg,
++			    struct kdbus_conn *conn);
++
++int kdbus_cmd_name_acquire(struct kdbus_conn *conn, void __user *argp);
++int kdbus_cmd_name_release(struct kdbus_conn *conn, void __user *argp);
++int kdbus_cmd_list(struct kdbus_conn *conn, void __user *argp);
++
++#endif
+diff --git a/ipc/kdbus/node.c b/ipc/kdbus/node.c
+new file mode 100644
+index 0000000..520df00
+--- /dev/null
++++ b/ipc/kdbus/node.c
+@@ -0,0 +1,910 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/atomic.h>
++#include <linux/fs.h>
++#include <linux/idr.h>
++#include <linux/kdev_t.h>
++#include <linux/rbtree.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/wait.h>
++
++#include "bus.h"
++#include "domain.h"
++#include "endpoint.h"
++#include "fs.h"
++#include "handle.h"
++#include "node.h"
++#include "util.h"
++
++/**
++ * DOC: kdbus nodes
++ *
++ * Nodes unify lifetime management across exposed kdbus objects and provide a
++ * hierarchy. Each kdbus object, that might be exposed to user-space, has a
++ * kdbus_node object embedded and is linked into the hierarchy. Each node can
++ * have any number (0-n) of child nodes linked. Each child retains a reference
++ * to its parent node. For root-nodes, the parent is NULL.
++ *
++ * Each node object goes through a bunch of states during it's lifetime:
++ *     * NEW
++ *       * LINKED    (can be skipped by NEW->FREED transition)
++ *         * ACTIVE  (can be skipped by LINKED->INACTIVE transition)
++ *       * INACTIVE
++ *       * DRAINED
++ *     * FREED
++ *
++ * Each node is allocated by the caller and initialized via kdbus_node_init().
++ * This never fails and sets the object into state NEW. From now on, ref-counts
++ * on the node manage its lifetime. During init, the ref-count is set to 1. Once
++ * it drops to 0, the node goes to state FREED and the node->free_cb() callback
++ * is called to deallocate any memory.
++ *
++ * After initializing a node, you usually link it into the hierarchy. You need
++ * to provide a parent node and a name. The node will be linked as child to the
++ * parent and a globally unique ID is assigned to the child. The name of the
++ * child must be unique for all children of this parent. Otherwise, linking the
++ * child will fail with -EEXIST.
++ * Note that the child is not marked active, yet. Admittedly, it prevents any
++ * other node from being linked with the same name (thus, it reserves that
++ * name), but any child-lookup (via name or unique ID) will never return this
++ * child unless it has been marked active.
++ *
++ * Once successfully linked, you can use kdbus_node_activate() to activate a
++ * child. This will mark the child active. This state can be skipped by directly
++ * deactivating the child via kdbus_node_deactivate() (see below).
++ * By activating a child, you enable any lookups on this child to succeed from
++ * now on. Furthermore, any code that got its hands on a reference to the node,
++ * can from now on "acquire" the node.
++ *
++ *     Active References (or: 'acquiring' and 'releasing' a node)
++ *     Additionally to normal object references, nodes support something we call
++ *     "active references". An active reference can be acquired via
++ *     kdbus_node_acquire() and released via kdbus_node_release(). A caller
++ *     _must_ own a normal object reference whenever calling those functions.
++ *     Unlike object references, acquiring an active reference can fail (by
++ *     returning 'false' from kdbus_node_acquire()). An active reference can
++ *     only be acquired if the node is marked active. If it is not marked
++ *     active, yet, or if it was already deactivated, no more active references
++ *     can be acquired, ever!
++ *     Active references are used to track tasks working on a node. Whenever a
++ *     task enters kernel-space to perform an action on a node, it acquires an
++ *     active reference, performs the action and releases the reference again.
++ *     While holding an active reference, the node is guaranteed to stay active.
++ *     If the node is deactivated in parallel, the node is marked as
++ *     deactivated, then we wait for all active references to be dropped, before
++ *     we finally proceed with any cleanups. That is, if you hold an active
++ *     reference to a node, any resources that are bound to the "active" state
++ *     are guaranteed to stay accessible until you release your reference.
++ *
++ *     Active-references are very similar to rw-locks, where acquiring a node is
++ *     equal to try-read-lock and releasing to read-unlock. Deactivating a node
++ *     means write-lock and never releasing it again.
++ *     Unlike rw-locks, the 'active reference' concept is more versatile and
++ *     avoids unusual rw-lock usage (never releasing a write-lock..).
++ *
++ *     It is safe to acquire multiple active-references recursively. But you
++ *     need to check the return value of kdbus_node_acquire() on _each_ call. It
++ *     may stop granting references at _any_ time.
++ *
++ *     You're free to perform any operations you want while holding an active
++ *     reference, except sleeping for an indefinite period. Sleeping for a fixed
++ *     amount of time is fine, but you usually should not wait on wait-queues
++ *     without a timeout.
++ *     For example, if you wait for I/O to happen, you should gather all data
++ *     and schedule the I/O operation, then release your active reference and
++ *     wait for it to complete. Then try to acquire a new reference. If it
++ *     fails, perform any cleanup (the node is now dead). Otherwise, you can
++ *     finish your operation.
++ *
++ * All nodes can be deactivated via kdbus_node_deactivate() at any time. You can
++ * call this multiple times, even in parallel or on nodes that were never
++ * linked, and it will just work. The only restriction is, you must not hold an
++ * active reference when calling kdbus_node_deactivate().
++ * By deactivating a node, it is immediately marked inactive. Then, we wait for
++ * all active references to be released (called 'draining' the node). This
++ * shouldn't take very long as we don't perform long-lasting operations while
++ * holding an active reference. Note that once the node is marked inactive, no
++ * new active references can be acquired.
++ * Once all active references are dropped, the node is considered 'drained'. Now
++ * kdbus_node_deactivate() is called on each child of the node before we
++ * continue deactvating our node. That is, once all children are entirely
++ * deactivated, we call ->release_cb() of our node. ->release_cb() can release
++ * any resources on that node which are bound to the "active" state of a node.
++ * When done, we unlink the node from its parent rb-tree, mark it as
++ * 'released' and return.
++ * If kdbus_node_deactivate() is called multiple times (even in parallel), all
++ * but one caller will just wait until the node is fully deactivated. That is,
++ * one random caller of kdbus_node_deactivate() is selected to call
++ * ->release_cb() and cleanup the node. Only once all this is done, all other
++ * callers will return from kdbus_node_deactivate(). That is, it doesn't matter
++ * whether you're the selected caller or not, it will only return after
++ * everything is fully done.
++ *
++ * When a node is activated, we acquire a normal object reference to the node.
++ * This reference is dropped after deactivation is fully done (and only iff the
++ * node really was activated). This allows callers to link+activate a child node
++ * and then drop all refs. The node will be deactivated together with the
++ * parent, and then be freed when this reference is dropped.
++ *
++ * Currently, nodes provide a bunch of resources that external code can use
++ * directly. This includes:
++ *
++ *     * node->waitq: Each node has its own wait-queue that is used to manage
++ *                    the 'active' state. When a node is deactivated, we wait on
++ *                    this queue until all active refs are dropped. Analogously,
++ *                    when you release an active reference on a deactivated
++ *                    node, and the active ref-count drops to 0, we wake up a
++ *                    single thread on this queue. Furthermore, once the
++ *                    ->release_cb() callback finished, we wake up all waiters.
++ *                    The node-owner is free to re-use this wait-queue for other
++ *                    purposes. As node-management uses this queue only during
++ *                    deactivation, it is usually totally fine to re-use the
++ *                    queue for other, preferably low-overhead, use-cases.
++ *
++ *     * node->type: This field defines the type of the owner of this node. It
++ *                   must be set during node initialization and must remain
++ *                   constant. The node management never looks at this value,
++ *                   but external users might use to gain access to the owner
++ *                   object of a node.
++ *                   It is totally up to the owner of the node to define what
++ *                   their type means. Usually it means you can access the
++ *                   parent structure via container_of(), as long as you hold an
++ *                   active reference to the node.
++ *
++ *     * node->free_cb:    callback after all references are dropped
++ *       node->release_cb: callback during node deactivation
++ *                         These fields must be set by the node owner during
++ *                         node initialization. They must remain constant. If
++ *                         NULL, they're skipped.
++ *
++ *     * node->mode: filesystem access modes
++ *       node->uid:  filesystem owner uid
++ *       node->gid:  filesystem owner gid
++ *                   These fields must be set by the node owner during node
++ *                   initialization. They must remain constant and may be
++ *                   accessed by other callers to properly initialize
++ *                   filesystem nodes.
++ *
++ *     * node->id: This is an unsigned 32bit integer allocated by an IDR. It is
++ *                 always kept as small as possible during allocation and is
++ *                 globally unique across all nodes allocated by this module. 0
++ *                 is reserved as "not assigned" and is the default.
++ *                 The ID is assigned during kdbus_node_link() and is kept until
++ *                 the object is freed. Thus, the ID surpasses the active
++ *                 lifetime of a node. As long as you hold an object reference
++ *                 to a node (and the node was linked once), the ID is valid and
++ *                 unique.
++ *
++ *     * node->name: name of this node
++ *       node->hash: 31bit hash-value of @name (range [2..INT_MAX-1])
++ *                   These values follow the same lifetime rules as node->id.
++ *                   They're initialized when the node is linked and then remain
++ *                   constant until the last object reference is dropped.
++ *                   Unlike the id, the name is only unique across all siblings
++ *                   and only until the node is deactivated. Currently, the name
++ *                   is even unique if linked but not activated, yet. This might
++ *                   change in the future, though. Code should not rely on this.
++ *
++ *     * node->lock:     lock to protect node->children, node->rb, node->parent
++ *     * node->parent: Reference to parent node. This is set during LINK time
++ *                     and is dropped during destruction. You must not access
++ *                     it unless you hold an active reference to the node or if
++ *                     you know the node is dead.
++ *     * node->children: rb-tree of all linked children of this node. You must
++ *                       not access this directly, but use one of the iterator
++ *                       or lookup helpers.
++ */
++
++/*
++ * Bias values track states of "active references". They're all negative. If a
++ * node is active, its active-ref-counter is >=0 and tracks all active
++ * references. Once a node is deactivaed, we subtract NODE_BIAS. This means, the
++ * counter is now negative but still counts the active references. Once it drops
++ * to exactly NODE_BIAS, we know all active references were dropped. Exactly one
++ * thread will change it to NODE_RELEASE now, perform cleanup and then put it
++ * into NODE_DRAINED. Once drained, all other threads that tried deactivating
++ * the node will now be woken up (thus, they wait until the node is fully done).
++ * The initial state during node-setup is NODE_NEW. If a node is directly
++ * deactivated without having ever been active, it is put into
++ * NODE_RELEASE_DIRECT instead of NODE_BIAS. This tracks this one-bit state
++ * across node-deactivation. The task putting it into NODE_RELEASE now knows
++ * whether the node was active before or not.
++ *
++ * Some archs implement atomic_sub(v) with atomic_add(-v), so reserve INT_MIN
++ * to avoid overflows if multiplied by -1.
++ */
++#define KDBUS_NODE_BIAS			(INT_MIN + 5)
++#define KDBUS_NODE_RELEASE_DIRECT	(KDBUS_NODE_BIAS - 1)
++#define KDBUS_NODE_RELEASE		(KDBUS_NODE_BIAS - 2)
++#define KDBUS_NODE_DRAINED		(KDBUS_NODE_BIAS - 3)
++#define KDBUS_NODE_NEW			(KDBUS_NODE_BIAS - 4)
++
++/* global unique ID mapping for kdbus nodes */
++static DEFINE_IDR(kdbus_node_idr);
++static DECLARE_RWSEM(kdbus_node_idr_lock);
++
++/**
++ * kdbus_node_name_hash() - hash a name
++ * @name:	The string to hash
++ *
++ * This computes the hash of @name. It is guaranteed to be in the range
++ * [2..INT_MAX-1]. The values 1, 2 and INT_MAX are unused as they are reserved
++ * for the filesystem code.
++ *
++ * Return: hash value of the passed string
++ */
++static unsigned int kdbus_node_name_hash(const char *name)
++{
++	unsigned int hash;
++
++	/* reserve hash numbers 0, 1 and >=INT_MAX for magic directories */
++	hash = kdbus_strhash(name) & INT_MAX;
++	if (hash < 2)
++		hash += 2;
++	if (hash >= INT_MAX)
++		hash = INT_MAX - 1;
++
++	return hash;
++}
++
++/**
++ * kdbus_node_name_compare() - compare a name with a node's name
++ * @hash:	hash of the string to compare the node with
++ * @name:	name to compare the node with
++ * @node:	node to compare the name with
++ *
++ * Return: 0 if @name and @hash exactly match the information in @node, or
++ * an integer less than or greater than zero if @name is found, respectively,
++ * to be less than or be greater than the string stored in @node.
++ */
++static int kdbus_node_name_compare(unsigned int hash, const char *name,
++				   const struct kdbus_node *node)
++{
++	if (hash != node->hash)
++		return hash - node->hash;
++
++	return strcmp(name, node->name);
++}
++
++/**
++ * kdbus_node_init() - initialize a kdbus_node
++ * @node:	Pointer to the node to initialize
++ * @type:	The type the node will have (KDBUS_NODE_*)
++ *
++ * The caller is responsible of allocating @node and initializating it to zero.
++ * Once this call returns, you must use the node_ref() and node_unref()
++ * functions to manage this node.
++ */
++void kdbus_node_init(struct kdbus_node *node, unsigned int type)
++{
++	atomic_set(&node->refcnt, 1);
++	mutex_init(&node->lock);
++	node->id = 0;
++	node->type = type;
++	RB_CLEAR_NODE(&node->rb);
++	node->children = RB_ROOT;
++	init_waitqueue_head(&node->waitq);
++	atomic_set(&node->active, KDBUS_NODE_NEW);
++}
++
++/**
++ * kdbus_node_link() - link a node into the nodes system
++ * @node:	Pointer to the node to initialize
++ * @parent:	Pointer to a parent node, may be %NULL
++ * @name:	The name of the node (or NULL if root node)
++ *
++ * This links a node into the hierarchy. This must not be called multiple times.
++ * If @parent is NULL, the node becomes a new root node.
++ *
++ * This call will fail if @name is not unique across all its siblings or if no
++ * ID could be allocated. You must not activate a node if linking failed! It is
++ * safe to deactivate it, though.
++ *
++ * Once you linked a node, you must call kdbus_node_deactivate() before you drop
++ * the last reference (even if you never activate the node).
++ *
++ * Return: 0 on success. negative error otherwise.
++ */
++int kdbus_node_link(struct kdbus_node *node, struct kdbus_node *parent,
++		    const char *name)
++{
++	int ret;
++
++	if (WARN_ON(node->type != KDBUS_NODE_DOMAIN && !parent))
++		return -EINVAL;
++
++	if (WARN_ON(parent && !name))
++		return -EINVAL;
++
++	if (name) {
++		node->name = kstrdup(name, GFP_KERNEL);
++		if (!node->name)
++			return -ENOMEM;
++
++		node->hash = kdbus_node_name_hash(name);
++	}
++
++	down_write(&kdbus_node_idr_lock);
++	ret = idr_alloc(&kdbus_node_idr, node, 1, 0, GFP_KERNEL);
++	if (ret >= 0)
++		node->id = ret;
++	up_write(&kdbus_node_idr_lock);
++
++	if (ret < 0)
++		return ret;
++
++	ret = 0;
++
++	if (parent) {
++		struct rb_node **n, *prev;
++
++		if (!kdbus_node_acquire(parent))
++			return -ESHUTDOWN;
++
++		mutex_lock(&parent->lock);
++
++		n = &parent->children.rb_node;
++		prev = NULL;
++
++		while (*n) {
++			struct kdbus_node *pos;
++			int result;
++
++			pos = kdbus_node_from_rb(*n);
++			prev = *n;
++			result = kdbus_node_name_compare(node->hash,
++							 node->name,
++							 pos);
++			if (result == 0) {
++				ret = -EEXIST;
++				goto exit_unlock;
++			}
++
++			if (result < 0)
++				n = &pos->rb.rb_left;
++			else
++				n = &pos->rb.rb_right;
++		}
++
++		/* add new node and rebalance the tree */
++		rb_link_node(&node->rb, prev, n);
++		rb_insert_color(&node->rb, &parent->children);
++		node->parent = kdbus_node_ref(parent);
++
++exit_unlock:
++		mutex_unlock(&parent->lock);
++		kdbus_node_release(parent);
++	}
++
++	return ret;
++}
++
++/**
++ * kdbus_node_ref() - Acquire object reference
++ * @node:	node to acquire reference to (or NULL)
++ *
++ * This acquires a new reference to @node. You must already own a reference when
++ * calling this!
++ * If @node is NULL, this is a no-op.
++ *
++ * Return: @node is returned
++ */
++struct kdbus_node *kdbus_node_ref(struct kdbus_node *node)
++{
++	if (node)
++		atomic_inc(&node->refcnt);
++	return node;
++}
++
++/**
++ * kdbus_node_unref() - Drop object reference
++ * @node:	node to drop reference to (or NULL)
++ *
++ * This drops an object reference to @node. You must not access the node if you
++ * no longer own a reference.
++ * If the ref-count drops to 0, the object will be destroyed (->free_cb will be
++ * called).
++ *
++ * If you linked or activated the node, you must deactivate the node before you
++ * drop your last reference! If you didn't link or activate the node, you can
++ * drop any reference you want.
++ *
++ * Note that this calls into ->free_cb() and thus _might_ sleep. The ->free_cb()
++ * callbacks must not acquire any outer locks, though. So you can safely drop
++ * references while holding locks.
++ *
++ * If @node is NULL, this is a no-op.
++ *
++ * Return: This always returns NULL
++ */
++struct kdbus_node *kdbus_node_unref(struct kdbus_node *node)
++{
++	if (node && atomic_dec_and_test(&node->refcnt)) {
++		struct kdbus_node safe = *node;
++
++		WARN_ON(atomic_read(&node->active) != KDBUS_NODE_DRAINED);
++		WARN_ON(!RB_EMPTY_NODE(&node->rb));
++
++		if (node->free_cb)
++			node->free_cb(node);
++
++		down_write(&kdbus_node_idr_lock);
++		if (safe.id > 0)
++			idr_remove(&kdbus_node_idr, safe.id);
++		/* drop caches after last node to not leak memory on unload */
++		if (idr_is_empty(&kdbus_node_idr)) {
++			idr_destroy(&kdbus_node_idr);
++			idr_init(&kdbus_node_idr);
++		}
++		up_write(&kdbus_node_idr_lock);
++
++		kfree(safe.name);
++
++		/*
++		 * kdbusfs relies on the parent to be available even after the
++		 * node was deactivated and unlinked. Therefore, we pin it
++		 * until a node is destroyed.
++		 */
++		kdbus_node_unref(safe.parent);
++	}
++
++	return NULL;
++}
++
++/**
++ * kdbus_node_is_active() - test whether a node is active
++ * @node:	node to test
++ *
++ * This checks whether @node is active. That means, @node was linked and
++ * activated by the node owner and hasn't been deactivated, yet. If, and only
++ * if, a node is active, kdbus_node_acquire() will be able to acquire active
++ * references.
++ *
++ * Note that this function does not give any lifetime guarantees. After this
++ * call returns, the node might be deactivated immediately. Normally, what you
++ * want is to acquire a real active reference via kdbus_node_acquire().
++ *
++ * Return: true if @node is active, false otherwise
++ */
++bool kdbus_node_is_active(struct kdbus_node *node)
++{
++	return atomic_read(&node->active) >= 0;
++}
++
++/**
++ * kdbus_node_is_deactivated() - test whether a node was already deactivated
++ * @node:	node to test
++ *
++ * This checks whether kdbus_node_deactivate() was called on @node. Note that
++ * this might be true even if you never deactivated the node directly, but only
++ * one of its ancestors.
++ *
++ * Note that even if this returns 'false', the node might get deactivated
++ * immediately after the call returns.
++ *
++ * Return: true if @node was already deactivated, false if not
++ */
++bool kdbus_node_is_deactivated(struct kdbus_node *node)
++{
++	int v;
++
++	v = atomic_read(&node->active);
++	return v != KDBUS_NODE_NEW && v < 0;
++}
++
++/**
++ * kdbus_node_activate() - activate a node
++ * @node:	node to activate
++ *
++ * This marks @node as active if, and only if, the node wasn't activated nor
++ * deactivated, yet, and the parent is still active. Any but the first call to
++ * kdbus_node_activate() is a no-op.
++ * If you called kdbus_node_deactivate() before, then even the first call to
++ * kdbus_node_activate() will be a no-op.
++ *
++ * This call doesn't give any lifetime guarantees. The node might get
++ * deactivated immediately after this call returns. Or the parent might already
++ * be deactivated, which will make this call a no-op.
++ *
++ * If this call successfully activated a node, it will take an object reference
++ * to it. This reference is dropped after the node is deactivated. Therefore,
++ * the object owner can safely drop their reference to @node iff they know that
++ * its parent node will get deactivated at some point. Once the parent node is
++ * deactivated, it will deactivate all its child and thus drop this reference
++ * again.
++ *
++ * Return: True if this call successfully activated the node, otherwise false.
++ *         Note that this might return false, even if the node is still active
++ *         (eg., if you called this a second time).
++ */
++bool kdbus_node_activate(struct kdbus_node *node)
++{
++	bool res = false;
++
++	mutex_lock(&node->lock);
++	if (atomic_read(&node->active) == KDBUS_NODE_NEW) {
++		atomic_sub(KDBUS_NODE_NEW, &node->active);
++		/* activated nodes have ref +1 */
++		kdbus_node_ref(node);
++		res = true;
++	}
++	mutex_unlock(&node->lock);
++
++	return res;
++}
++
++/**
++ * kdbus_node_deactivate() - deactivate a node
++ * @node:	The node to deactivate.
++ *
++ * This function recursively deactivates this node and all its children. It
++ * returns only once all children and the node itself were recursively disabled
++ * (even if you call this function multiple times in parallel).
++ *
++ * It is safe to call this function on _any_ node that was initialized _any_
++ * number of times.
++ *
++ * This call may sleep, as it waits for all active references to be dropped.
++ */
++void kdbus_node_deactivate(struct kdbus_node *node)
++{
++	struct kdbus_node *pos, *child;
++	struct rb_node *rb;
++	int v_pre, v_post;
++
++	pos = node;
++
++	/*
++	 * To avoid recursion, we perform back-tracking while deactivating
++	 * nodes. For each node we enter, we first mark the active-counter as
++	 * deactivated by adding BIAS. If the node as children, we set the first
++	 * child as current position and start over. If the node has no
++	 * children, we drain the node by waiting for all active refs to be
++	 * dropped and then releasing the node.
++	 *
++	 * After the node is released, we set its parent as current position
++	 * and start over. If the current position was the initial node, we're
++	 * done.
++	 *
++	 * Note that this function can be called in parallel by multiple
++	 * callers. We make sure that each node is only released once, and any
++	 * racing caller will wait until the other thread fully released that
++	 * node.
++	 */
++
++	for (;;) {
++		/*
++		 * Add BIAS to node->active to mark it as inactive. If it was
++		 * never active before, immediately mark it as RELEASE_INACTIVE
++		 * so we remember this state.
++		 * We cannot remember v_pre as we might iterate into the
++		 * children, overwriting v_pre, before we can release our node.
++		 */
++		mutex_lock(&pos->lock);
++		v_pre = atomic_read(&pos->active);
++		if (v_pre >= 0)
++			atomic_add_return(KDBUS_NODE_BIAS, &pos->active);
++		else if (v_pre == KDBUS_NODE_NEW)
++			atomic_set(&pos->active, KDBUS_NODE_RELEASE_DIRECT);
++		mutex_unlock(&pos->lock);
++
++		/* wait until all active references were dropped */
++		wait_event(pos->waitq,
++			   atomic_read(&pos->active) <= KDBUS_NODE_BIAS);
++
++		mutex_lock(&pos->lock);
++		/* recurse into first child if any */
++		rb = rb_first(&pos->children);
++		if (rb) {
++			child = kdbus_node_ref(kdbus_node_from_rb(rb));
++			mutex_unlock(&pos->lock);
++			pos = child;
++			continue;
++		}
++
++		/* mark object as RELEASE */
++		v_post = atomic_read(&pos->active);
++		if (v_post == KDBUS_NODE_BIAS ||
++		    v_post == KDBUS_NODE_RELEASE_DIRECT)
++			atomic_set(&pos->active, KDBUS_NODE_RELEASE);
++		mutex_unlock(&pos->lock);
++
++		/*
++		 * If this is the thread that marked the object as RELEASE, we
++		 * perform the actual release. Otherwise, we wait until the
++		 * release is done and the node is marked as DRAINED.
++		 */
++		if (v_post == KDBUS_NODE_BIAS ||
++		    v_post == KDBUS_NODE_RELEASE_DIRECT) {
++			if (pos->release_cb)
++				pos->release_cb(pos, v_post == KDBUS_NODE_BIAS);
++
++			if (pos->parent) {
++				mutex_lock(&pos->parent->lock);
++				if (!RB_EMPTY_NODE(&pos->rb)) {
++					rb_erase(&pos->rb,
++						 &pos->parent->children);
++					RB_CLEAR_NODE(&pos->rb);
++				}
++				mutex_unlock(&pos->parent->lock);
++			}
++
++			/* mark as DRAINED */
++			atomic_set(&pos->active, KDBUS_NODE_DRAINED);
++			wake_up_all(&pos->waitq);
++
++			/* drop VFS cache */
++			kdbus_fs_flush(pos);
++
++			/*
++			 * If the node was activated and somone subtracted BIAS
++			 * from it to deactivate it, we, and only us, are
++			 * responsible to release the extra ref-count that was
++			 * taken once in kdbus_node_activate().
++			 * If the node was never activated, no-one ever
++			 * subtracted BIAS, but instead skipped that state and
++			 * immediately went to NODE_RELEASE_DIRECT. In that case
++			 * we must not drop the reference.
++			 */
++			if (v_post == KDBUS_NODE_BIAS)
++				kdbus_node_unref(pos);
++		} else {
++			/* wait until object is DRAINED */
++			wait_event(pos->waitq,
++			    atomic_read(&pos->active) == KDBUS_NODE_DRAINED);
++		}
++
++		/*
++		 * We're done with the current node. Continue on its parent
++		 * again, which will try deactivating its next child, or itself
++		 * if no child is left.
++		 * If we've reached our initial node again, we are done and
++		 * can safely return.
++		 */
++		if (pos == node)
++			break;
++
++		child = pos;
++		pos = pos->parent;
++		kdbus_node_unref(child);
++	}
++}
++
++/**
++ * kdbus_node_acquire() - Acquire an active ref on a node
++ * @node:	The node
++ *
++ * This acquires an active-reference to @node. This will only succeed if the
++ * node is active. You must release this active reference via
++ * kdbus_node_release() again.
++ *
++ * See the introduction to "active references" for more details.
++ *
++ * Return: %true if @node was non-NULL and active
++ */
++bool kdbus_node_acquire(struct kdbus_node *node)
++{
++	return node && atomic_inc_unless_negative(&node->active);
++}
++
++/**
++ * kdbus_node_release() - Release an active ref on a node
++ * @node:	The node
++ *
++ * This releases an active reference that was previously acquired via
++ * kdbus_node_acquire(). See kdbus_node_acquire() for details.
++ */
++void kdbus_node_release(struct kdbus_node *node)
++{
++	if (node && atomic_dec_return(&node->active) == KDBUS_NODE_BIAS)
++		wake_up(&node->waitq);
++}
++
++/**
++ * kdbus_node_find_child() - Find child by name
++ * @node:	parent node to search through
++ * @name:	name of child node
++ *
++ * This searches through all children of @node for a child-node with name @name.
++ * If not found, or if the child is deactivated, NULL is returned. Otherwise,
++ * the child is acquired and a new reference is returned.
++ *
++ * If you're done with the child, you need to release it and drop your
++ * reference.
++ *
++ * This function does not acquire the parent node. However, if the parent was
++ * already deactivated, then kdbus_node_deactivate() will, at some point, also
++ * deactivate the child. Therefore, we can rely on the explicit ordering during
++ * deactivation.
++ *
++ * Return: Reference to acquired child node, or NULL if not found / not active.
++ */
++struct kdbus_node *kdbus_node_find_child(struct kdbus_node *node,
++					 const char *name)
++{
++	struct kdbus_node *child;
++	struct rb_node *rb;
++	unsigned int hash;
++	int ret;
++
++	hash = kdbus_node_name_hash(name);
++
++	mutex_lock(&node->lock);
++	rb = node->children.rb_node;
++	while (rb) {
++		child = kdbus_node_from_rb(rb);
++		ret = kdbus_node_name_compare(hash, name, child);
++		if (ret < 0)
++			rb = rb->rb_left;
++		else if (ret > 0)
++			rb = rb->rb_right;
++		else
++			break;
++	}
++	if (rb && kdbus_node_acquire(child))
++		kdbus_node_ref(child);
++	else
++		child = NULL;
++	mutex_unlock(&node->lock);
++
++	return child;
++}
++
++static struct kdbus_node *node_find_closest_unlocked(struct kdbus_node *node,
++						     unsigned int hash,
++						     const char *name)
++{
++	struct kdbus_node *n, *pos = NULL;
++	struct rb_node *rb;
++	int res;
++
++	/*
++	 * Find the closest child with ``node->hash >= hash'', or, if @name is
++	 * valid, ``node->name >= name'' (where '>=' is the lex. order).
++	 */
++
++	rb = node->children.rb_node;
++	while (rb) {
++		n = kdbus_node_from_rb(rb);
++
++		if (name)
++			res = kdbus_node_name_compare(hash, name, n);
++		else
++			res = hash - n->hash;
++
++		if (res <= 0) {
++			rb = rb->rb_left;
++			pos = n;
++		} else { /* ``hash > n->hash'', ``name > n->name'' */
++			rb = rb->rb_right;
++		}
++	}
++
++	return pos;
++}
++
++/**
++ * kdbus_node_find_closest() - Find closest child-match
++ * @node:	parent node to search through
++ * @hash:	hash value to find closest match for
++ *
++ * Find the closest child of @node with a hash greater than or equal to @hash.
++ * The closest match is the left-most child of @node with this property. Which
++ * means, it is the first child with that hash returned by
++ * kdbus_node_next_child(), if you'd iterate the whole parent node.
++ *
++ * Return: Reference to acquired child, or NULL if none found.
++ */
++struct kdbus_node *kdbus_node_find_closest(struct kdbus_node *node,
++					   unsigned int hash)
++{
++	struct kdbus_node *child;
++	struct rb_node *rb;
++
++	mutex_lock(&node->lock);
++
++	child = node_find_closest_unlocked(node, hash, NULL);
++	while (child && !kdbus_node_acquire(child)) {
++		rb = rb_next(&child->rb);
++		if (rb)
++			child = kdbus_node_from_rb(rb);
++		else
++			child = NULL;
++	}
++	kdbus_node_ref(child);
++
++	mutex_unlock(&node->lock);
++
++	return child;
++}
++
++/**
++ * kdbus_node_next_child() - Acquire next child
++ * @node:	parent node
++ * @prev:	previous child-node position or NULL
++ *
++ * This function returns a reference to the next active child of @node, after
++ * the passed position @prev. If @prev is NULL, a reference to the first active
++ * child is returned. If no more active children are found, NULL is returned.
++ *
++ * This function acquires the next child it returns. If you're done with the
++ * returned pointer, you need to release _and_ unref it.
++ *
++ * The passed in pointer @prev is not modified by this function, and it does
++ * *not* have to be active. If @prev was acquired via different means, or if it
++ * was unlinked from its parent before you pass it in, then this iterator will
++ * still return the next active child (it will have to search through the
++ * rb-tree based on the node-name, though).
++ * However, @prev must not be linked to a different parent than @node!
++ *
++ * Return: Reference to next acquired child, or NULL if at the end.
++ */
++struct kdbus_node *kdbus_node_next_child(struct kdbus_node *node,
++					 struct kdbus_node *prev)
++{
++	struct kdbus_node *pos = NULL;
++	struct rb_node *rb;
++
++	mutex_lock(&node->lock);
++
++	if (!prev) {
++		/*
++		 * New iteration; find first node in rb-tree and try to acquire
++		 * it. If we got it, directly return it as first element.
++		 * Otherwise, the loop below will find the next active node.
++		 */
++		rb = rb_first(&node->children);
++		if (!rb)
++			goto exit;
++		pos = kdbus_node_from_rb(rb);
++		if (kdbus_node_acquire(pos))
++			goto exit;
++	} else if (RB_EMPTY_NODE(&prev->rb)) {
++		/*
++		 * The current iterator is no longer linked to the rb-tree. Use
++		 * its hash value and name to find the next _higher_ node and
++		 * acquire it. If we got it, return it as next element.
++		 * Otherwise, the loop below will find the next active node.
++		 */
++		pos = node_find_closest_unlocked(node, prev->hash, prev->name);
++		if (!pos)
++			goto exit;
++		if (kdbus_node_acquire(pos))
++			goto exit;
++	} else {
++		/*
++		 * The current iterator is still linked to the parent. Set it
++		 * as current position and use the loop below to find the next
++		 * active element.
++		 */
++		pos = prev;
++	}
++
++	/* @pos was already returned or is inactive; find next active node */
++	do {
++		rb = rb_next(&pos->rb);
++		if (rb)
++			pos = kdbus_node_from_rb(rb);
++		else
++			pos = NULL;
++	} while (pos && !kdbus_node_acquire(pos));
++
++exit:
++	/* @pos is NULL or acquired. Take ref if non-NULL and return it */
++	kdbus_node_ref(pos);
++	mutex_unlock(&node->lock);
++	return pos;
++}
+diff --git a/ipc/kdbus/node.h b/ipc/kdbus/node.h
+new file mode 100644
+index 0000000..be125ce
+--- /dev/null
++++ b/ipc/kdbus/node.h
+@@ -0,0 +1,84 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_NODE_H
++#define __KDBUS_NODE_H
++
++#include <linux/atomic.h>
++#include <linux/kernel.h>
++#include <linux/mutex.h>
++#include <linux/wait.h>
++
++struct kdbus_node;
++
++enum kdbus_node_type {
++	KDBUS_NODE_DOMAIN,
++	KDBUS_NODE_CONTROL,
++	KDBUS_NODE_BUS,
++	KDBUS_NODE_ENDPOINT,
++};
++
++typedef void (*kdbus_node_free_t) (struct kdbus_node *node);
++typedef void (*kdbus_node_release_t) (struct kdbus_node *node, bool was_active);
++
++struct kdbus_node {
++	atomic_t refcnt;
++	atomic_t active;
++	wait_queue_head_t waitq;
++
++	/* static members */
++	unsigned int type;
++	kdbus_node_free_t free_cb;
++	kdbus_node_release_t release_cb;
++	umode_t mode;
++	kuid_t uid;
++	kgid_t gid;
++
++	/* valid once linked */
++	char *name;
++	unsigned int hash;
++	unsigned int id;
++	struct kdbus_node *parent; /* may be NULL */
++
++	/* valid iff active */
++	struct mutex lock;
++	struct rb_node rb;
++	struct rb_root children;
++};
++
++#define kdbus_node_from_rb(_node) rb_entry((_node), struct kdbus_node, rb)
++
++void kdbus_node_init(struct kdbus_node *node, unsigned int type);
++
++int kdbus_node_link(struct kdbus_node *node, struct kdbus_node *parent,
++		    const char *name);
++
++struct kdbus_node *kdbus_node_ref(struct kdbus_node *node);
++struct kdbus_node *kdbus_node_unref(struct kdbus_node *node);
++
++bool kdbus_node_is_active(struct kdbus_node *node);
++bool kdbus_node_is_deactivated(struct kdbus_node *node);
++bool kdbus_node_activate(struct kdbus_node *node);
++void kdbus_node_deactivate(struct kdbus_node *node);
++
++bool kdbus_node_acquire(struct kdbus_node *node);
++void kdbus_node_release(struct kdbus_node *node);
++
++struct kdbus_node *kdbus_node_find_child(struct kdbus_node *node,
++					 const char *name);
++struct kdbus_node *kdbus_node_find_closest(struct kdbus_node *node,
++					   unsigned int hash);
++struct kdbus_node *kdbus_node_next_child(struct kdbus_node *node,
++					 struct kdbus_node *prev);
++
++#endif
+diff --git a/ipc/kdbus/notify.c b/ipc/kdbus/notify.c
+new file mode 100644
+index 0000000..e4a4542
+--- /dev/null
++++ b/ipc/kdbus/notify.c
+@@ -0,0 +1,248 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++
++#include "bus.h"
++#include "connection.h"
++#include "domain.h"
++#include "endpoint.h"
++#include "item.h"
++#include "message.h"
++#include "notify.h"
++
++static inline void kdbus_notify_add_tail(struct kdbus_kmsg *kmsg,
++					 struct kdbus_bus *bus)
++{
++	spin_lock(&bus->notify_lock);
++	list_add_tail(&kmsg->notify_entry, &bus->notify_list);
++	spin_unlock(&bus->notify_lock);
++}
++
++static int kdbus_notify_reply(struct kdbus_bus *bus, u64 id,
++			      u64 cookie, u64 msg_type)
++{
++	struct kdbus_kmsg *kmsg = NULL;
++
++	WARN_ON(id == 0);
++
++	kmsg = kdbus_kmsg_new(bus, 0);
++	if (IS_ERR(kmsg))
++		return PTR_ERR(kmsg);
++
++	/*
++	 * a kernel-generated notification can only contain one
++	 * struct kdbus_item, so make a shortcut here for
++	 * faster lookup in the match db.
++	 */
++	kmsg->notify_type = msg_type;
++	kmsg->msg.flags = KDBUS_MSG_SIGNAL;
++	kmsg->msg.dst_id = id;
++	kmsg->msg.src_id = KDBUS_SRC_ID_KERNEL;
++	kmsg->msg.payload_type = KDBUS_PAYLOAD_KERNEL;
++	kmsg->msg.cookie_reply = cookie;
++	kmsg->msg.items[0].type = msg_type;
++
++	kdbus_notify_add_tail(kmsg, bus);
++
++	return 0;
++}
++
++/**
++ * kdbus_notify_reply_timeout() - queue a timeout reply
++ * @bus:		Bus which queues the messages
++ * @id:			The destination's connection ID
++ * @cookie:		The cookie to set in the reply.
++ *
++ * Queues a message that has a KDBUS_ITEM_REPLY_TIMEOUT item attached.
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++int kdbus_notify_reply_timeout(struct kdbus_bus *bus, u64 id, u64 cookie)
++{
++	return kdbus_notify_reply(bus, id, cookie, KDBUS_ITEM_REPLY_TIMEOUT);
++}
++
++/**
++ * kdbus_notify_reply_dead() - queue a 'dead' reply
++ * @bus:		Bus which queues the messages
++ * @id:			The destination's connection ID
++ * @cookie:		The cookie to set in the reply.
++ *
++ * Queues a message that has a KDBUS_ITEM_REPLY_DEAD item attached.
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++int kdbus_notify_reply_dead(struct kdbus_bus *bus, u64 id, u64 cookie)
++{
++	return kdbus_notify_reply(bus, id, cookie, KDBUS_ITEM_REPLY_DEAD);
++}
++
++/**
++ * kdbus_notify_name_change() - queue a notification about a name owner change
++ * @bus:		Bus which queues the messages
++ * @type:		The type if the notification; KDBUS_ITEM_NAME_ADD,
++ *			KDBUS_ITEM_NAME_CHANGE or KDBUS_ITEM_NAME_REMOVE
++ * @old_id:		The id of the connection that used to own the name
++ * @new_id:		The id of the new owner connection
++ * @old_flags:		The flags to pass in the KDBUS_ITEM flags field for
++ *			the old owner
++ * @new_flags:		The flags to pass in the KDBUS_ITEM flags field for
++ *			the new owner
++ * @name:		The name that was removed or assigned to a new owner
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++int kdbus_notify_name_change(struct kdbus_bus *bus, u64 type,
++			     u64 old_id, u64 new_id,
++			     u64 old_flags, u64 new_flags,
++			     const char *name)
++{
++	struct kdbus_kmsg *kmsg = NULL;
++	size_t name_len, extra_size;
++
++	name_len = strlen(name) + 1;
++	extra_size = sizeof(struct kdbus_notify_name_change) + name_len;
++	kmsg = kdbus_kmsg_new(bus, extra_size);
++	if (IS_ERR(kmsg))
++		return PTR_ERR(kmsg);
++
++	kmsg->msg.flags = KDBUS_MSG_SIGNAL;
++	kmsg->msg.dst_id = KDBUS_DST_ID_BROADCAST;
++	kmsg->msg.src_id = KDBUS_SRC_ID_KERNEL;
++	kmsg->msg.payload_type = KDBUS_PAYLOAD_KERNEL;
++	kmsg->notify_type = type;
++	kmsg->notify_old_id = old_id;
++	kmsg->notify_new_id = new_id;
++	kmsg->msg.items[0].type = type;
++	kmsg->msg.items[0].name_change.old_id.id = old_id;
++	kmsg->msg.items[0].name_change.old_id.flags = old_flags;
++	kmsg->msg.items[0].name_change.new_id.id = new_id;
++	kmsg->msg.items[0].name_change.new_id.flags = new_flags;
++	memcpy(kmsg->msg.items[0].name_change.name, name, name_len);
++	kmsg->notify_name = kmsg->msg.items[0].name_change.name;
++
++	kdbus_notify_add_tail(kmsg, bus);
++
++	return 0;
++}
++
++/**
++ * kdbus_notify_id_change() - queue a notification about a unique ID change
++ * @bus:		Bus which queues the messages
++ * @type:		The type if the notification; KDBUS_ITEM_ID_ADD or
++ *			KDBUS_ITEM_ID_REMOVE
++ * @id:			The id of the connection that was added or removed
++ * @flags:		The flags to pass in the KDBUS_ITEM flags field
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++int kdbus_notify_id_change(struct kdbus_bus *bus, u64 type, u64 id, u64 flags)
++{
++	struct kdbus_kmsg *kmsg = NULL;
++
++	kmsg = kdbus_kmsg_new(bus, sizeof(struct kdbus_notify_id_change));
++	if (IS_ERR(kmsg))
++		return PTR_ERR(kmsg);
++
++	kmsg->msg.flags = KDBUS_MSG_SIGNAL;
++	kmsg->msg.dst_id = KDBUS_DST_ID_BROADCAST;
++	kmsg->msg.src_id = KDBUS_SRC_ID_KERNEL;
++	kmsg->msg.payload_type = KDBUS_PAYLOAD_KERNEL;
++	kmsg->notify_type = type;
++
++	switch (type) {
++	case KDBUS_ITEM_ID_ADD:
++		kmsg->notify_new_id = id;
++		break;
++
++	case KDBUS_ITEM_ID_REMOVE:
++		kmsg->notify_old_id = id;
++		break;
++
++	default:
++		BUG();
++	}
++
++	kmsg->msg.items[0].type = type;
++	kmsg->msg.items[0].id_change.id = id;
++	kmsg->msg.items[0].id_change.flags = flags;
++
++	kdbus_notify_add_tail(kmsg, bus);
++
++	return 0;
++}
++
++/**
++ * kdbus_notify_flush() - send a list of collected messages
++ * @bus:		Bus which queues the messages
++ *
++ * The list is empty after sending the messages.
++ */
++void kdbus_notify_flush(struct kdbus_bus *bus)
++{
++	LIST_HEAD(notify_list);
++	struct kdbus_kmsg *kmsg, *tmp;
++
++	mutex_lock(&bus->notify_flush_lock);
++	down_read(&bus->name_registry->rwlock);
++
++	spin_lock(&bus->notify_lock);
++	list_splice_init(&bus->notify_list, &notify_list);
++	spin_unlock(&bus->notify_lock);
++
++	list_for_each_entry_safe(kmsg, tmp, &notify_list, notify_entry) {
++		kdbus_meta_conn_collect(kmsg->conn_meta, kmsg, NULL,
++					KDBUS_ATTACH_TIMESTAMP);
++
++		if (kmsg->msg.dst_id != KDBUS_DST_ID_BROADCAST) {
++			struct kdbus_conn *conn;
++
++			conn = kdbus_bus_find_conn_by_id(bus, kmsg->msg.dst_id);
++			if (conn) {
++				kdbus_bus_eavesdrop(bus, NULL, kmsg);
++				kdbus_conn_entry_insert(NULL, conn, kmsg, NULL);
++				kdbus_conn_unref(conn);
++			}
++		} else {
++			kdbus_bus_broadcast(bus, NULL, kmsg);
++		}
++
++		list_del(&kmsg->notify_entry);
++		kdbus_kmsg_free(kmsg);
++	}
++
++	up_read(&bus->name_registry->rwlock);
++	mutex_unlock(&bus->notify_flush_lock);
++}
++
++/**
++ * kdbus_notify_free() - free a list of collected messages
++ * @bus:		Bus which queues the messages
++ */
++void kdbus_notify_free(struct kdbus_bus *bus)
++{
++	struct kdbus_kmsg *kmsg, *tmp;
++
++	list_for_each_entry_safe(kmsg, tmp, &bus->notify_list, notify_entry) {
++		list_del(&kmsg->notify_entry);
++		kdbus_kmsg_free(kmsg);
++	}
++}
+diff --git a/ipc/kdbus/notify.h b/ipc/kdbus/notify.h
+new file mode 100644
+index 0000000..03df464
+--- /dev/null
++++ b/ipc/kdbus/notify.h
+@@ -0,0 +1,30 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_NOTIFY_H
++#define __KDBUS_NOTIFY_H
++
++struct kdbus_bus;
++
++int kdbus_notify_id_change(struct kdbus_bus *bus, u64 type, u64 id, u64 flags);
++int kdbus_notify_reply_timeout(struct kdbus_bus *bus, u64 id, u64 cookie);
++int kdbus_notify_reply_dead(struct kdbus_bus *bus, u64 id, u64 cookie);
++int kdbus_notify_name_change(struct kdbus_bus *bus, u64 type,
++			     u64 old_id, u64 new_id,
++			     u64 old_flags, u64 new_flags,
++			     const char *name);
++void kdbus_notify_flush(struct kdbus_bus *bus);
++void kdbus_notify_free(struct kdbus_bus *bus);
++
++#endif
+diff --git a/ipc/kdbus/policy.c b/ipc/kdbus/policy.c
+new file mode 100644
+index 0000000..dd7fffa
+--- /dev/null
++++ b/ipc/kdbus/policy.c
+@@ -0,0 +1,489 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/dcache.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/mutex.h>
++#include <linux/sched.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++
++#include "bus.h"
++#include "connection.h"
++#include "domain.h"
++#include "item.h"
++#include "names.h"
++#include "policy.h"
++
++#define KDBUS_POLICY_HASH_SIZE	64
++
++/**
++ * struct kdbus_policy_db_entry_access - a database entry access item
++ * @type:		One of KDBUS_POLICY_ACCESS_* types
++ * @access:		Access to grant. One of KDBUS_POLICY_*
++ * @uid:		For KDBUS_POLICY_ACCESS_USER, the global uid
++ * @gid:		For KDBUS_POLICY_ACCESS_GROUP, the global gid
++ * @list:		List entry item for the entry's list
++ *
++ * This is the internal version of struct kdbus_policy_db_access.
++ */
++struct kdbus_policy_db_entry_access {
++	u8 type;		/* USER, GROUP, WORLD */
++	u8 access;		/* OWN, TALK, SEE */
++	union {
++		kuid_t uid;	/* global uid */
++		kgid_t gid;	/* global gid */
++	};
++	struct list_head list;
++};
++
++/**
++ * struct kdbus_policy_db_entry - a policy database entry
++ * @name:		The name to match the policy entry against
++ * @hentry:		The hash entry for the database's entries_hash
++ * @access_list:	List head for keeping tracks of the entry's
++ *			access items.
++ * @owner:		The owner of this entry. Can be a kdbus_conn or
++ *			a kdbus_ep object.
++ * @wildcard:		The name is a wildcard, such as ending on '.*'
++ */
++struct kdbus_policy_db_entry {
++	char *name;
++	struct hlist_node hentry;
++	struct list_head access_list;
++	const void *owner;
++	bool wildcard:1;
++};
++
++static void kdbus_policy_entry_free(struct kdbus_policy_db_entry *e)
++{
++	struct kdbus_policy_db_entry_access *a, *tmp;
++
++	list_for_each_entry_safe(a, tmp, &e->access_list, list) {
++		list_del(&a->list);
++		kfree(a);
++	}
++
++	kfree(e->name);
++	kfree(e);
++}
++
++static unsigned int kdbus_strnhash(const char *str, size_t len)
++{
++	unsigned long hash = init_name_hash();
++
++	while (len--)
++		hash = partial_name_hash(*str++, hash);
++
++	return end_name_hash(hash);
++}
++
++static const struct kdbus_policy_db_entry *
++kdbus_policy_lookup(struct kdbus_policy_db *db, const char *name, u32 hash)
++{
++	struct kdbus_policy_db_entry *e;
++	const char *dot;
++	size_t len;
++
++	/* find exact match */
++	hash_for_each_possible(db->entries_hash, e, hentry, hash)
++		if (strcmp(e->name, name) == 0 && !e->wildcard)
++			return e;
++
++	/* find wildcard match */
++
++	dot = strrchr(name, '.');
++	if (!dot)
++		return NULL;
++
++	len = dot - name;
++	hash = kdbus_strnhash(name, len);
++
++	hash_for_each_possible(db->entries_hash, e, hentry, hash)
++		if (e->wildcard && !strncmp(e->name, name, len) &&
++		    !e->name[len])
++			return e;
++
++	return NULL;
++}
++
++/**
++ * kdbus_policy_db_clear - release all memory from a policy db
++ * @db:		The policy database
++ */
++void kdbus_policy_db_clear(struct kdbus_policy_db *db)
++{
++	struct kdbus_policy_db_entry *e;
++	struct hlist_node *tmp;
++	unsigned int i;
++
++	/* purge entries */
++	down_write(&db->entries_rwlock);
++	hash_for_each_safe(db->entries_hash, i, tmp, e, hentry) {
++		hash_del(&e->hentry);
++		kdbus_policy_entry_free(e);
++	}
++	up_write(&db->entries_rwlock);
++}
++
++/**
++ * kdbus_policy_db_init() - initialize a new policy database
++ * @db:		The location of the database
++ *
++ * This initializes a new policy-db. The underlying memory must have been
++ * cleared to zero by the caller.
++ */
++void kdbus_policy_db_init(struct kdbus_policy_db *db)
++{
++	hash_init(db->entries_hash);
++	init_rwsem(&db->entries_rwlock);
++}
++
++/**
++ * kdbus_policy_query_unlocked() - Query the policy database
++ * @db:		Policy database
++ * @cred:	Credentials to test against
++ * @name:	Name to query
++ * @hash:	Hash value of @name
++ *
++ * Same as kdbus_policy_query() but requires the caller to lock the policy
++ * database against concurrent writes.
++ *
++ * Return: The highest KDBUS_POLICY_* access type found, or -EPERM if none.
++ */
++int kdbus_policy_query_unlocked(struct kdbus_policy_db *db,
++				const struct cred *cred, const char *name,
++				unsigned int hash)
++{
++	struct kdbus_policy_db_entry_access *a;
++	const struct kdbus_policy_db_entry *e;
++	int i, highest = -EPERM;
++
++	e = kdbus_policy_lookup(db, name, hash);
++	if (!e)
++		return -EPERM;
++
++	list_for_each_entry(a, &e->access_list, list) {
++		if ((int)a->access <= highest)
++			continue;
++
++		switch (a->type) {
++		case KDBUS_POLICY_ACCESS_USER:
++			if (uid_eq(cred->euid, a->uid))
++				highest = a->access;
++			break;
++		case KDBUS_POLICY_ACCESS_GROUP:
++			if (gid_eq(cred->egid, a->gid)) {
++				highest = a->access;
++				break;
++			}
++
++			for (i = 0; i < cred->group_info->ngroups; i++) {
++				kgid_t gid = GROUP_AT(cred->group_info, i);
++
++				if (gid_eq(gid, a->gid)) {
++					highest = a->access;
++					break;
++				}
++			}
++
++			break;
++		case KDBUS_POLICY_ACCESS_WORLD:
++			highest = a->access;
++			break;
++		}
++
++		/* OWN is the highest possible policy */
++		if (highest >= KDBUS_POLICY_OWN)
++			break;
++	}
++
++	return highest;
++}
++
++/**
++ * kdbus_policy_query() - Query the policy database
++ * @db:		Policy database
++ * @cred:	Credentials to test against
++ * @name:	Name to query
++ * @hash:	Hash value of @name
++ *
++ * Query the policy database @db for the access rights of @cred to the name
++ * @name. The access rights of @cred are returned, or -EPERM if no access is
++ * granted.
++ *
++ * This call effectively searches for the highest access-right granted to
++ * @cred. The caller should really cache those as policy lookups are rather
++ * expensive.
++ *
++ * Return: The highest KDBUS_POLICY_* access type found, or -EPERM if none.
++ */
++int kdbus_policy_query(struct kdbus_policy_db *db, const struct cred *cred,
++		       const char *name, unsigned int hash)
++{
++	int ret;
++
++	down_read(&db->entries_rwlock);
++	ret = kdbus_policy_query_unlocked(db, cred, name, hash);
++	up_read(&db->entries_rwlock);
++
++	return ret;
++}
++
++static void __kdbus_policy_remove_owner(struct kdbus_policy_db *db,
++					const void *owner)
++{
++	struct kdbus_policy_db_entry *e;
++	struct hlist_node *tmp;
++	int i;
++
++	hash_for_each_safe(db->entries_hash, i, tmp, e, hentry)
++		if (e->owner == owner) {
++			hash_del(&e->hentry);
++			kdbus_policy_entry_free(e);
++		}
++}
++
++/**
++ * kdbus_policy_remove_owner() - remove all entries related to a connection
++ * @db:		The policy database
++ * @owner:	The connection which items to remove
++ */
++void kdbus_policy_remove_owner(struct kdbus_policy_db *db,
++			       const void *owner)
++{
++	down_write(&db->entries_rwlock);
++	__kdbus_policy_remove_owner(db, owner);
++	up_write(&db->entries_rwlock);
++}
++
++/*
++ * Convert user provided policy access to internal kdbus policy
++ * access
++ */
++static struct kdbus_policy_db_entry_access *
++kdbus_policy_make_access(const struct kdbus_policy_access *uaccess)
++{
++	int ret;
++	struct kdbus_policy_db_entry_access *a;
++
++	a = kzalloc(sizeof(*a), GFP_KERNEL);
++	if (!a)
++		return ERR_PTR(-ENOMEM);
++
++	ret = -EINVAL;
++	switch (uaccess->access) {
++	case KDBUS_POLICY_SEE:
++	case KDBUS_POLICY_TALK:
++	case KDBUS_POLICY_OWN:
++		a->access = uaccess->access;
++		break;
++	default:
++		goto err;
++	}
++
++	switch (uaccess->type) {
++	case KDBUS_POLICY_ACCESS_USER:
++		a->uid = make_kuid(current_user_ns(), uaccess->id);
++		if (!uid_valid(a->uid))
++			goto err;
++
++		break;
++	case KDBUS_POLICY_ACCESS_GROUP:
++		a->gid = make_kgid(current_user_ns(), uaccess->id);
++		if (!gid_valid(a->gid))
++			goto err;
++
++		break;
++	case KDBUS_POLICY_ACCESS_WORLD:
++		break;
++	default:
++		goto err;
++	}
++
++	a->type = uaccess->type;
++
++	return a;
++
++err:
++	kfree(a);
++	return ERR_PTR(ret);
++}
++
++/**
++ * kdbus_policy_set() - set a connection's policy rules
++ * @db:				The policy database
++ * @items:			A list of kdbus_item elements that contain both
++ *				names and access rules to set.
++ * @items_size:			The total size of the items.
++ * @max_policies:		The maximum number of policy entries to allow.
++ *				Pass 0 for no limit.
++ * @allow_wildcards:		Boolean value whether wildcard entries (such
++ *				ending on '.*') should be allowed.
++ * @owner:			The owner of the new policy items.
++ *
++ * This function sets a new set of policies for a given owner. The names and
++ * access rules are gathered by walking the list of items passed in as
++ * argument. An item of type KDBUS_ITEM_NAME is expected before any number of
++ * KDBUS_ITEM_POLICY_ACCESS items. If there are more repetitions of this
++ * pattern than denoted in @max_policies, -EINVAL is returned.
++ *
++ * In order to allow atomic replacement of rules, the function first removes
++ * all entries that have been created for the given owner previously.
++ *
++ * Callers to this function must make sur that the owner is a custom
++ * endpoint, or if the endpoint is a default endpoint, then it must be
++ * either a policy holder or an activator.
++ *
++ * Return: 0 on success, negative errno on failure.
++ */
++int kdbus_policy_set(struct kdbus_policy_db *db,
++		     const struct kdbus_item *items,
++		     size_t items_size,
++		     size_t max_policies,
++		     bool allow_wildcards,
++		     const void *owner)
++{
++	struct kdbus_policy_db_entry_access *a;
++	struct kdbus_policy_db_entry *e, *p;
++	const struct kdbus_item *item;
++	struct hlist_node *tmp;
++	HLIST_HEAD(entries);
++	HLIST_HEAD(restore);
++	size_t count = 0;
++	int i, ret = 0;
++	u32 hash;
++
++	/* Walk the list of items and look for new policies */
++	e = NULL;
++	KDBUS_ITEMS_FOREACH(item, items, items_size) {
++		switch (item->type) {
++		case KDBUS_ITEM_NAME: {
++			size_t len;
++
++			if (max_policies && ++count > max_policies) {
++				ret = -E2BIG;
++				goto exit;
++			}
++
++			if (!kdbus_name_is_valid(item->str, true)) {
++				ret = -EINVAL;
++				goto exit;
++			}
++
++			e = kzalloc(sizeof(*e), GFP_KERNEL);
++			if (!e) {
++				ret = -ENOMEM;
++				goto exit;
++			}
++
++			INIT_LIST_HEAD(&e->access_list);
++			e->owner = owner;
++			hlist_add_head(&e->hentry, &entries);
++
++			e->name = kstrdup(item->str, GFP_KERNEL);
++			if (!e->name) {
++				ret = -ENOMEM;
++				goto exit;
++			}
++
++			/*
++			 * If a supplied name ends with an '.*', cut off that
++			 * part, only store anything before it, and mark the
++			 * entry as wildcard.
++			 */
++			len = strlen(e->name);
++			if (len > 2 &&
++			    e->name[len - 3] == '.' &&
++			    e->name[len - 2] == '*') {
++				if (!allow_wildcards) {
++					ret = -EINVAL;
++					goto exit;
++				}
++
++				e->name[len - 3] = '\0';
++				e->wildcard = true;
++			}
++
++			break;
++		}
++
++		case KDBUS_ITEM_POLICY_ACCESS:
++			if (!e) {
++				ret = -EINVAL;
++				goto exit;
++			}
++
++			a = kdbus_policy_make_access(&item->policy_access);
++			if (IS_ERR(a)) {
++				ret = PTR_ERR(a);
++				goto exit;
++			}
++
++			list_add_tail(&a->list, &e->access_list);
++			break;
++		}
++	}
++
++	down_write(&db->entries_rwlock);
++
++	/* remember previous entries to restore in case of failure */
++	hash_for_each_safe(db->entries_hash, i, tmp, e, hentry)
++		if (e->owner == owner) {
++			hash_del(&e->hentry);
++			hlist_add_head(&e->hentry, &restore);
++		}
++
++	hlist_for_each_entry_safe(e, tmp, &entries, hentry) {
++		/* prevent duplicates */
++		hash = kdbus_strhash(e->name);
++		hash_for_each_possible(db->entries_hash, p, hentry, hash)
++			if (strcmp(e->name, p->name) == 0 &&
++			    e->wildcard == p->wildcard) {
++				ret = -EEXIST;
++				goto restore;
++			}
++
++		hlist_del(&e->hentry);
++		hash_add(db->entries_hash, &e->hentry, hash);
++	}
++
++restore:
++	/* if we failed, flush all entries we added so far */
++	if (ret < 0)
++		__kdbus_policy_remove_owner(db, owner);
++
++	/* if we failed, restore entries, otherwise release them */
++	hlist_for_each_entry_safe(e, tmp, &restore, hentry) {
++		hlist_del(&e->hentry);
++		if (ret < 0) {
++			hash = kdbus_strhash(e->name);
++			hash_add(db->entries_hash, &e->hentry, hash);
++		} else {
++			kdbus_policy_entry_free(e);
++		}
++	}
++
++	up_write(&db->entries_rwlock);
++
++exit:
++	hlist_for_each_entry_safe(e, tmp, &entries, hentry) {
++		hlist_del(&e->hentry);
++		kdbus_policy_entry_free(e);
++	}
++
++	return ret;
++}
+diff --git a/ipc/kdbus/policy.h b/ipc/kdbus/policy.h
+new file mode 100644
+index 0000000..15dd7bc
+--- /dev/null
++++ b/ipc/kdbus/policy.h
+@@ -0,0 +1,51 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_POLICY_H
++#define __KDBUS_POLICY_H
++
++#include <linux/hashtable.h>
++#include <linux/rwsem.h>
++
++struct kdbus_conn;
++struct kdbus_item;
++
++/**
++ * struct kdbus_policy_db - policy database
++ * @entries_hash:	Hashtable of entries
++ * @entries_rwlock:	Mutex to protect the database's access entries
++ */
++struct kdbus_policy_db {
++	DECLARE_HASHTABLE(entries_hash, 6);
++	struct rw_semaphore entries_rwlock;
++};
++
++void kdbus_policy_db_init(struct kdbus_policy_db *db);
++void kdbus_policy_db_clear(struct kdbus_policy_db *db);
++
++int kdbus_policy_query_unlocked(struct kdbus_policy_db *db,
++				const struct cred *cred, const char *name,
++				unsigned int hash);
++int kdbus_policy_query(struct kdbus_policy_db *db, const struct cred *cred,
++		       const char *name, unsigned int hash);
++
++void kdbus_policy_remove_owner(struct kdbus_policy_db *db,
++			       const void *owner);
++int kdbus_policy_set(struct kdbus_policy_db *db,
++		     const struct kdbus_item *items,
++		     size_t items_size,
++		     size_t max_policies,
++		     bool allow_wildcards,
++		     const void *owner);
++
++#endif
+diff --git a/ipc/kdbus/pool.c b/ipc/kdbus/pool.c
+new file mode 100644
+index 0000000..139bb77
+--- /dev/null
++++ b/ipc/kdbus/pool.c
+@@ -0,0 +1,728 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/aio.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/highmem.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/pagemap.h>
++#include <linux/rbtree.h>
++#include <linux/sched.h>
++#include <linux/shmem_fs.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++#include <linux/uio.h>
++
++#include "pool.h"
++#include "util.h"
++
++/**
++ * struct kdbus_pool - the receiver's buffer
++ * @f:			The backing shmem file
++ * @size:		The size of the file
++ * @accounted_size:	Currently accounted memory in bytes
++ * @lock:		Pool data lock
++ * @slices:		All slices sorted by address
++ * @slices_busy:	Tree of allocated slices
++ * @slices_free:	Tree of free slices
++ *
++ * The receiver's buffer, managed as a pool of allocated and free
++ * slices containing the queued messages.
++ *
++ * Messages sent with KDBUS_CMD_SEND are copied direcly by the
++ * sending process into the receiver's pool.
++ *
++ * Messages received with KDBUS_CMD_RECV just return the offset
++ * to the data placed in the pool.
++ *
++ * The internally allocated memory needs to be returned by the receiver
++ * with KDBUS_CMD_FREE.
++ */
++struct kdbus_pool {
++	struct file *f;
++	size_t size;
++	size_t accounted_size;
++	struct mutex lock;
++
++	struct list_head slices;
++	struct rb_root slices_busy;
++	struct rb_root slices_free;
++};
++
++/**
++ * struct kdbus_pool_slice - allocated element in kdbus_pool
++ * @pool:		Pool this slice belongs to
++ * @off:		Offset of slice in the shmem file
++ * @size:		Size of slice
++ * @entry:		Entry in "all slices" list
++ * @rb_node:		Entry in free or busy list
++ * @free:		Unused slice
++ * @accounted:		Accounted as queue slice
++ * @ref_kernel:		Kernel holds a reference
++ * @ref_user:		Userspace holds a reference
++ *
++ * The pool has one or more slices, always spanning the entire size of the
++ * pool.
++ *
++ * Every slice is an element in a list sorted by the buffer address, to
++ * provide access to the next neighbor slice.
++ *
++ * Every slice is member in either the busy or the free tree. The free
++ * tree is organized by slice size, the busy tree organized by buffer
++ * offset.
++ */
++struct kdbus_pool_slice {
++	struct kdbus_pool *pool;
++	size_t off;
++	size_t size;
++
++	struct list_head entry;
++	struct rb_node rb_node;
++
++	bool free:1;
++	bool accounted:1;
++	bool ref_kernel:1;
++	bool ref_user:1;
++};
++
++static struct kdbus_pool_slice *kdbus_pool_slice_new(struct kdbus_pool *pool,
++						     size_t off, size_t size)
++{
++	struct kdbus_pool_slice *slice;
++
++	slice = kzalloc(sizeof(*slice), GFP_KERNEL);
++	if (!slice)
++		return NULL;
++
++	slice->pool = pool;
++	slice->off = off;
++	slice->size = size;
++	slice->free = true;
++	return slice;
++}
++
++/* insert a slice into the free tree */
++static void kdbus_pool_add_free_slice(struct kdbus_pool *pool,
++				      struct kdbus_pool_slice *slice)
++{
++	struct rb_node **n;
++	struct rb_node *pn = NULL;
++
++	n = &pool->slices_free.rb_node;
++	while (*n) {
++		struct kdbus_pool_slice *pslice;
++
++		pn = *n;
++		pslice = rb_entry(pn, struct kdbus_pool_slice, rb_node);
++		if (slice->size < pslice->size)
++			n = &pn->rb_left;
++		else
++			n = &pn->rb_right;
++	}
++
++	rb_link_node(&slice->rb_node, pn, n);
++	rb_insert_color(&slice->rb_node, &pool->slices_free);
++}
++
++/* insert a slice into the busy tree */
++static void kdbus_pool_add_busy_slice(struct kdbus_pool *pool,
++				      struct kdbus_pool_slice *slice)
++{
++	struct rb_node **n;
++	struct rb_node *pn = NULL;
++
++	n = &pool->slices_busy.rb_node;
++	while (*n) {
++		struct kdbus_pool_slice *pslice;
++
++		pn = *n;
++		pslice = rb_entry(pn, struct kdbus_pool_slice, rb_node);
++		if (slice->off < pslice->off)
++			n = &pn->rb_left;
++		else if (slice->off > pslice->off)
++			n = &pn->rb_right;
++		else
++			BUG();
++	}
++
++	rb_link_node(&slice->rb_node, pn, n);
++	rb_insert_color(&slice->rb_node, &pool->slices_busy);
++}
++
++static struct kdbus_pool_slice *kdbus_pool_find_slice(struct kdbus_pool *pool,
++						      size_t off)
++{
++	struct rb_node *n;
++
++	n = pool->slices_busy.rb_node;
++	while (n) {
++		struct kdbus_pool_slice *s;
++
++		s = rb_entry(n, struct kdbus_pool_slice, rb_node);
++		if (off < s->off)
++			n = n->rb_left;
++		else if (off > s->off)
++			n = n->rb_right;
++		else
++			return s;
++	}
++
++	return NULL;
++}
++
++/**
++ * kdbus_pool_slice_alloc() - allocate memory from a pool
++ * @pool:	The receiver's pool
++ * @size:	The number of bytes to allocate
++ * @accounted:	Whether this slice should be accounted for
++ *
++ * The returned slice is used for kdbus_pool_slice_release() to
++ * free the allocated memory. If either @kvec or @iovec is non-NULL, the data
++ * will be copied from kernel or userspace memory into the new slice at
++ * offset 0.
++ *
++ * Return: the allocated slice on success, ERR_PTR on failure.
++ */
++struct kdbus_pool_slice *kdbus_pool_slice_alloc(struct kdbus_pool *pool,
++						size_t size, bool accounted)
++{
++	size_t slice_size = KDBUS_ALIGN8(size);
++	struct rb_node *n, *found = NULL;
++	struct kdbus_pool_slice *s;
++	int ret = 0;
++
++	if (WARN_ON(!size))
++		return ERR_PTR(-EINVAL);
++
++	/* search a free slice with the closest matching size */
++	mutex_lock(&pool->lock);
++	n = pool->slices_free.rb_node;
++	while (n) {
++		s = rb_entry(n, struct kdbus_pool_slice, rb_node);
++		if (slice_size < s->size) {
++			found = n;
++			n = n->rb_left;
++		} else if (slice_size > s->size) {
++			n = n->rb_right;
++		} else {
++			found = n;
++			break;
++		}
++	}
++
++	/* no slice with the minimum size found in the pool */
++	if (!found) {
++		ret = -EXFULL;
++		goto exit_unlock;
++	}
++
++	/* no exact match, use the closest one */
++	if (!n) {
++		struct kdbus_pool_slice *s_new;
++
++		s = rb_entry(found, struct kdbus_pool_slice, rb_node);
++
++		/* split-off the remainder of the size to its own slice */
++		s_new = kdbus_pool_slice_new(pool, s->off + slice_size,
++					     s->size - slice_size);
++		if (!s_new) {
++			ret = -ENOMEM;
++			goto exit_unlock;
++		}
++
++		list_add(&s_new->entry, &s->entry);
++		kdbus_pool_add_free_slice(pool, s_new);
++
++		/* adjust our size now that we split-off another slice */
++		s->size = slice_size;
++	}
++
++	/* move slice from free to the busy tree */
++	rb_erase(found, &pool->slices_free);
++	kdbus_pool_add_busy_slice(pool, s);
++
++	WARN_ON(s->ref_kernel || s->ref_user);
++
++	s->ref_kernel = true;
++	s->free = false;
++	s->accounted = accounted;
++	if (accounted)
++		pool->accounted_size += s->size;
++	mutex_unlock(&pool->lock);
++
++	return s;
++
++exit_unlock:
++	mutex_unlock(&pool->lock);
++	return ERR_PTR(ret);
++}
++
++static void __kdbus_pool_slice_release(struct kdbus_pool_slice *slice)
++{
++	struct kdbus_pool *pool = slice->pool;
++
++	/* don't free the slice if either has a reference */
++	if (slice->ref_kernel || slice->ref_user)
++		return;
++
++	if (WARN_ON(slice->free))
++		return;
++
++	rb_erase(&slice->rb_node, &pool->slices_busy);
++
++	/* merge with the next free slice */
++	if (!list_is_last(&slice->entry, &pool->slices)) {
++		struct kdbus_pool_slice *s;
++
++		s = list_entry(slice->entry.next,
++			       struct kdbus_pool_slice, entry);
++		if (s->free) {
++			rb_erase(&s->rb_node, &pool->slices_free);
++			list_del(&s->entry);
++			slice->size += s->size;
++			kfree(s);
++		}
++	}
++
++	/* merge with previous free slice */
++	if (pool->slices.next != &slice->entry) {
++		struct kdbus_pool_slice *s;
++
++		s = list_entry(slice->entry.prev,
++			       struct kdbus_pool_slice, entry);
++		if (s->free) {
++			rb_erase(&s->rb_node, &pool->slices_free);
++			list_del(&slice->entry);
++			s->size += slice->size;
++			kfree(slice);
++			slice = s;
++		}
++	}
++
++	slice->free = true;
++	kdbus_pool_add_free_slice(pool, slice);
++}
++
++/**
++ * kdbus_pool_slice_release() - drop kernel-reference on allocated slice
++ * @slice:		Slice allocated from the pool
++ *
++ * This releases the kernel-reference on the given slice. If the
++ * kernel-reference and the user-reference on a slice are dropped, the slice is
++ * returned to the pool.
++ *
++ * So far, we do not implement full ref-counting on slices. Each, kernel and
++ * user-space can have exactly one reference to a slice. If both are dropped at
++ * the same time, the slice is released.
++ */
++void kdbus_pool_slice_release(struct kdbus_pool_slice *slice)
++{
++	struct kdbus_pool *pool;
++
++	if (!slice)
++		return;
++
++	/* @slice may be freed, so keep local ptr to @pool */
++	pool = slice->pool;
++
++	mutex_lock(&pool->lock);
++	/* kernel must own a ref to @slice to drop it */
++	WARN_ON(!slice->ref_kernel);
++	slice->ref_kernel = false;
++	/* no longer kernel-owned, de-account slice */
++	if (slice->accounted && !WARN_ON(pool->accounted_size < slice->size))
++		pool->accounted_size -= slice->size;
++	__kdbus_pool_slice_release(slice);
++	mutex_unlock(&pool->lock);
++}
++
++/**
++ * kdbus_pool_release_offset() - release a public offset
++ * @pool:		pool to operate on
++ * @off:		offset to release
++ *
++ * This should be called whenever user-space frees a slice given to them. It
++ * verifies the slice is available and public, and then drops it. It ensures
++ * correct locking and barriers against queues.
++ *
++ * Return: 0 on success, ENXIO if the offset is invalid or not public.
++ */
++int kdbus_pool_release_offset(struct kdbus_pool *pool, size_t off)
++{
++	struct kdbus_pool_slice *slice;
++	int ret = 0;
++
++	/* 'pool->size' is used as dummy offset for empty slices */
++	if (off == pool->size)
++		return 0;
++
++	mutex_lock(&pool->lock);
++	slice = kdbus_pool_find_slice(pool, off);
++	if (slice && slice->ref_user) {
++		slice->ref_user = false;
++		__kdbus_pool_slice_release(slice);
++	} else {
++		ret = -ENXIO;
++	}
++	mutex_unlock(&pool->lock);
++
++	return ret;
++}
++
++/**
++ * kdbus_pool_publish_empty() - publish empty slice to user-space
++ * @pool:		pool to operate on
++ * @off:		output storage for offset, or NULL
++ * @size:		output storage for size, or NULL
++ *
++ * This is the same as kdbus_pool_slice_publish(), but uses a dummy slice with
++ * size 0. The returned offset points to the end of the pool and is never
++ * returned on real slices.
++ */
++void kdbus_pool_publish_empty(struct kdbus_pool *pool, u64 *off, u64 *size)
++{
++	if (off)
++		*off = pool->size;
++	if (size)
++		*size = 0;
++}
++
++/**
++ * kdbus_pool_slice_publish() - publish slice to user-space
++ * @slice:		The slice
++ * @out_offset:		Output storage for offset, or NULL
++ * @out_size:		Output storage for size, or NULL
++ *
++ * This prepares a slice to be published to user-space.
++ *
++ * This call combines the following operations:
++ *   * the memory region is flushed so the user's memory view is consistent
++ *   * the slice is marked as referenced by user-space, so user-space has to
++ *     call KDBUS_CMD_FREE to release it
++ *   * the offset and size of the slice are written to the given output
++ *     arguments, if non-NULL
++ */
++void kdbus_pool_slice_publish(struct kdbus_pool_slice *slice,
++			      u64 *out_offset, u64 *out_size)
++{
++	mutex_lock(&slice->pool->lock);
++	/* kernel must own a ref to @slice to gain a user-space ref */
++	WARN_ON(!slice->ref_kernel);
++	slice->ref_user = true;
++	mutex_unlock(&slice->pool->lock);
++
++	if (out_offset)
++		*out_offset = slice->off;
++	if (out_size)
++		*out_size = slice->size;
++}
++
++/**
++ * kdbus_pool_slice_offset() - Get a slice's offset inside the pool
++ * @slice:	Slice to return the offset of
++ *
++ * Return: The internal offset @slice inside the pool.
++ */
++off_t kdbus_pool_slice_offset(const struct kdbus_pool_slice *slice)
++{
++	return slice->off;
++}
++
++/**
++ * kdbus_pool_slice_size() - get size of a pool slice
++ * @slice:	slice to query
++ *
++ * Return: size of the given slice
++ */
++size_t kdbus_pool_slice_size(const struct kdbus_pool_slice *slice)
++{
++	return slice->size;
++}
++
++/**
++ * kdbus_pool_new() - create a new pool
++ * @name:		Name of the (deleted) file which shows up in
++ *			/proc, used for debugging
++ * @size:		Maximum size of the pool
++ *
++ * Return: a new kdbus_pool on success, ERR_PTR on failure.
++ */
++struct kdbus_pool *kdbus_pool_new(const char *name, size_t size)
++{
++	struct kdbus_pool_slice *s;
++	struct kdbus_pool *p;
++	struct file *f;
++	char *n = NULL;
++	int ret;
++
++	p = kzalloc(sizeof(*p), GFP_KERNEL);
++	if (!p)
++		return ERR_PTR(-ENOMEM);
++
++	if (name) {
++		n = kasprintf(GFP_KERNEL, KBUILD_MODNAME "-conn:%s", name);
++		if (!n) {
++			ret = -ENOMEM;
++			goto exit_free;
++		}
++	}
++
++	f = shmem_file_setup(n ?: KBUILD_MODNAME "-conn", size, 0);
++	kfree(n);
++
++	if (IS_ERR(f)) {
++		ret = PTR_ERR(f);
++		goto exit_free;
++	}
++
++	ret = get_write_access(file_inode(f));
++	if (ret < 0)
++		goto exit_put_shmem;
++
++	/* allocate first slice spanning the entire pool */
++	s = kdbus_pool_slice_new(p, 0, size);
++	if (!s) {
++		ret = -ENOMEM;
++		goto exit_put_write;
++	}
++
++	p->f = f;
++	p->size = size;
++	p->slices_free = RB_ROOT;
++	p->slices_busy = RB_ROOT;
++	mutex_init(&p->lock);
++
++	INIT_LIST_HEAD(&p->slices);
++	list_add(&s->entry, &p->slices);
++
++	kdbus_pool_add_free_slice(p, s);
++	return p;
++
++exit_put_write:
++	put_write_access(file_inode(f));
++exit_put_shmem:
++	fput(f);
++exit_free:
++	kfree(p);
++	return ERR_PTR(ret);
++}
++
++/**
++ * kdbus_pool_free() - destroy pool
++ * @pool:		The receiver's pool
++ */
++void kdbus_pool_free(struct kdbus_pool *pool)
++{
++	struct kdbus_pool_slice *s, *tmp;
++
++	if (!pool)
++		return;
++
++	list_for_each_entry_safe(s, tmp, &pool->slices, entry) {
++		list_del(&s->entry);
++		kfree(s);
++	}
++
++	put_write_access(file_inode(pool->f));
++	fput(pool->f);
++	kfree(pool);
++}
++
++/**
++ * kdbus_pool_accounted() - retrieve accounting information
++ * @pool:		pool to query
++ * @size:		output for overall pool size
++ * @acc:		output for currently accounted size
++ *
++ * This returns accounting information of the pool. Note that the data might
++ * change after the function returns, as the pool lock is dropped. You need to
++ * protect the data via other means, if you need reliable accounting.
++ */
++void kdbus_pool_accounted(struct kdbus_pool *pool, size_t *size, size_t *acc)
++{
++	mutex_lock(&pool->lock);
++	if (size)
++		*size = pool->size;
++	if (acc)
++		*acc = pool->accounted_size;
++	mutex_unlock(&pool->lock);
++}
++
++/**
++ * kdbus_pool_slice_copy_iovec() - copy user memory to a slice
++ * @slice:		The slice to write to
++ * @off:		Offset in the slice to write to
++ * @iov:		iovec array, pointing to data to copy
++ * @iov_len:		Number of elements in @iov
++ * @total_len:		Total number of bytes described in members of @iov
++ *
++ * User memory referenced by @iov will be copied into @slice at offset @off.
++ *
++ * Return: the numbers of bytes copied, negative errno on failure.
++ */
++ssize_t
++kdbus_pool_slice_copy_iovec(const struct kdbus_pool_slice *slice, loff_t off,
++			    struct iovec *iov, size_t iov_len, size_t total_len)
++{
++	struct iov_iter iter;
++	ssize_t len;
++
++	if (WARN_ON(off + total_len > slice->size))
++		return -EFAULT;
++
++	off += slice->off;
++	iov_iter_init(&iter, WRITE, iov, iov_len, total_len);
++	len = vfs_iter_write(slice->pool->f, &iter, &off);
++
++	return (len >= 0 && len != total_len) ? -EFAULT : len;
++}
++
++/**
++ * kdbus_pool_slice_copy_kvec() - copy kernel memory to a slice
++ * @slice:		The slice to write to
++ * @off:		Offset in the slice to write to
++ * @kvec:		kvec array, pointing to data to copy
++ * @kvec_len:		Number of elements in @kvec
++ * @total_len:		Total number of bytes described in members of @kvec
++ *
++ * Kernel memory referenced by @kvec will be copied into @slice at offset @off.
++ *
++ * Return: the numbers of bytes copied, negative errno on failure.
++ */
++ssize_t kdbus_pool_slice_copy_kvec(const struct kdbus_pool_slice *slice,
++				   loff_t off, struct kvec *kvec,
++				   size_t kvec_len, size_t total_len)
++{
++	struct iov_iter iter;
++	mm_segment_t old_fs;
++	ssize_t len;
++
++	if (WARN_ON(off + total_len > slice->size))
++		return -EFAULT;
++
++	off += slice->off;
++	iov_iter_kvec(&iter, WRITE | ITER_KVEC, kvec, kvec_len, total_len);
++
++	old_fs = get_fs();
++	set_fs(get_ds());
++	len = vfs_iter_write(slice->pool->f, &iter, &off);
++	set_fs(old_fs);
++
++	return (len >= 0 && len != total_len) ? -EFAULT : len;
++}
++
++/**
++ * kdbus_pool_slice_copy() - copy data from one slice into another
++ * @slice_dst:		destination slice
++ * @slice_src:		source slice
++ *
++ * Return: 0 on success, negative error number on failure.
++ */
++int kdbus_pool_slice_copy(const struct kdbus_pool_slice *slice_dst,
++			  const struct kdbus_pool_slice *slice_src)
++{
++	struct file *f_src = slice_src->pool->f;
++	struct file *f_dst = slice_dst->pool->f;
++	struct inode *i_dst = file_inode(f_dst);
++	struct address_space *mapping_dst = f_dst->f_mapping;
++	const struct address_space_operations *aops = mapping_dst->a_ops;
++	unsigned long len = slice_src->size;
++	loff_t off_src = slice_src->off;
++	loff_t off_dst = slice_dst->off;
++	mm_segment_t old_fs;
++	int ret = 0;
++
++	if (WARN_ON(slice_src->size != slice_dst->size) ||
++	    WARN_ON(slice_src->free || slice_dst->free))
++		return -EINVAL;
++
++	mutex_lock(&i_dst->i_mutex);
++	old_fs = get_fs();
++	set_fs(get_ds());
++	while (len > 0) {
++		unsigned long page_off;
++		unsigned long copy_len;
++		char __user *kaddr;
++		struct page *page;
++		ssize_t n_read;
++		void *fsdata;
++		long status;
++
++		page_off = off_dst & (PAGE_CACHE_SIZE - 1);
++		copy_len = min_t(unsigned long,
++				 PAGE_CACHE_SIZE - page_off, len);
++
++		status = aops->write_begin(f_dst, mapping_dst, off_dst,
++					   copy_len, 0, &page, &fsdata);
++		if (unlikely(status < 0)) {
++			ret = status;
++			break;
++		}
++
++		kaddr = (char __force __user *)kmap(page) + page_off;
++		n_read = f_src->f_op->read(f_src, kaddr, copy_len, &off_src);
++		kunmap(page);
++		mark_page_accessed(page);
++		flush_dcache_page(page);
++
++		if (unlikely(n_read != copy_len)) {
++			ret = -EFAULT;
++			break;
++		}
++
++		status = aops->write_end(f_dst, mapping_dst, off_dst,
++					 copy_len, copy_len, page, fsdata);
++		if (unlikely(status != copy_len)) {
++			ret = -EFAULT;
++			break;
++		}
++
++		off_dst += copy_len;
++		len -= copy_len;
++	}
++	set_fs(old_fs);
++	mutex_unlock(&i_dst->i_mutex);
++
++	return ret;
++}
++
++/**
++ * kdbus_pool_mmap() -  map the pool into the process
++ * @pool:		The receiver's pool
++ * @vma:		passed by mmap() syscall
++ *
++ * Return: the result of the mmap() call, negative errno on failure.
++ */
++int kdbus_pool_mmap(const struct kdbus_pool *pool, struct vm_area_struct *vma)
++{
++	/* deny write access to the pool */
++	if (vma->vm_flags & VM_WRITE)
++		return -EPERM;
++	vma->vm_flags &= ~VM_MAYWRITE;
++
++	/* do not allow to map more than the size of the file */
++	if ((vma->vm_end - vma->vm_start) > pool->size)
++		return -EFAULT;
++
++	/* replace the connection file with our shmem file */
++	if (vma->vm_file)
++		fput(vma->vm_file);
++	vma->vm_file = get_file(pool->f);
++
++	return pool->f->f_op->mmap(pool->f, vma);
++}
+diff --git a/ipc/kdbus/pool.h b/ipc/kdbus/pool.h
+new file mode 100644
+index 0000000..a903821
+--- /dev/null
++++ b/ipc/kdbus/pool.h
+@@ -0,0 +1,46 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_POOL_H
++#define __KDBUS_POOL_H
++
++#include <linux/uio.h>
++
++struct kdbus_pool;
++struct kdbus_pool_slice;
++
++struct kdbus_pool *kdbus_pool_new(const char *name, size_t size);
++void kdbus_pool_free(struct kdbus_pool *pool);
++void kdbus_pool_accounted(struct kdbus_pool *pool, size_t *size, size_t *acc);
++int kdbus_pool_mmap(const struct kdbus_pool *pool, struct vm_area_struct *vma);
++int kdbus_pool_release_offset(struct kdbus_pool *pool, size_t off);
++void kdbus_pool_publish_empty(struct kdbus_pool *pool, u64 *off, u64 *size);
++
++struct kdbus_pool_slice *kdbus_pool_slice_alloc(struct kdbus_pool *pool,
++						size_t size, bool accounted);
++void kdbus_pool_slice_release(struct kdbus_pool_slice *slice);
++void kdbus_pool_slice_publish(struct kdbus_pool_slice *slice,
++			      u64 *out_offset, u64 *out_size);
++off_t kdbus_pool_slice_offset(const struct kdbus_pool_slice *slice);
++size_t kdbus_pool_slice_size(const struct kdbus_pool_slice *slice);
++int kdbus_pool_slice_copy(const struct kdbus_pool_slice *slice_dst,
++			  const struct kdbus_pool_slice *slice_src);
++ssize_t kdbus_pool_slice_copy_kvec(const struct kdbus_pool_slice *slice,
++				   loff_t off, struct kvec *kvec,
++				   size_t kvec_count, size_t total_len);
++ssize_t kdbus_pool_slice_copy_iovec(const struct kdbus_pool_slice *slice,
++				    loff_t off, struct iovec *iov,
++				    size_t iov_count, size_t total_len);
++
++#endif
+diff --git a/ipc/kdbus/queue.c b/ipc/kdbus/queue.c
+new file mode 100644
+index 0000000..a449464
+--- /dev/null
++++ b/ipc/kdbus/queue.c
+@@ -0,0 +1,678 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/audit.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/hashtable.h>
++#include <linux/idr.h>
++#include <linux/init.h>
++#include <linux/math64.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/poll.h>
++#include <linux/sched.h>
++#include <linux/sizes.h>
++#include <linux/slab.h>
++#include <linux/syscalls.h>
++#include <linux/uio.h>
++
++#include "util.h"
++#include "domain.h"
++#include "connection.h"
++#include "item.h"
++#include "message.h"
++#include "metadata.h"
++#include "queue.h"
++#include "reply.h"
++
++/**
++ * kdbus_queue_init() - initialize data structure related to a queue
++ * @queue:	The queue to initialize
++ */
++void kdbus_queue_init(struct kdbus_queue *queue)
++{
++	INIT_LIST_HEAD(&queue->msg_list);
++	queue->msg_prio_queue = RB_ROOT;
++}
++
++/**
++ * kdbus_queue_peek() - Retrieves an entry from a queue
++ * @queue:		The queue
++ * @priority:		The minimum priority of the entry to peek
++ * @use_priority:	Boolean flag whether or not to peek by priority
++ *
++ * Look for a entry in a queue, either by priority, or the oldest one (FIFO).
++ * The entry is not freed, put off the queue's lists or anything else.
++ *
++ * Return: the peeked queue entry on success, NULL if no suitable msg is found
++ */
++struct kdbus_queue_entry *kdbus_queue_peek(struct kdbus_queue *queue,
++					   s64 priority, bool use_priority)
++{
++	struct kdbus_queue_entry *e;
++
++	if (list_empty(&queue->msg_list))
++		return NULL;
++
++	if (use_priority) {
++		/* get next entry with highest priority */
++		e = rb_entry(queue->msg_prio_highest,
++			     struct kdbus_queue_entry, prio_node);
++
++		/* no entry with the requested priority */
++		if (e->priority > priority)
++			return NULL;
++	} else {
++		/* ignore the priority, return the next entry in the entry */
++		e = list_first_entry(&queue->msg_list,
++				     struct kdbus_queue_entry, entry);
++	}
++
++	return e;
++}
++
++static void kdbus_queue_entry_link(struct kdbus_queue_entry *entry)
++{
++	struct kdbus_queue *queue = &entry->conn->queue;
++	struct rb_node **n, *pn = NULL;
++	bool highest = true;
++
++	lockdep_assert_held(&entry->conn->lock);
++	if (WARN_ON(!list_empty(&entry->entry)))
++		return;
++
++	/* sort into priority entry tree */
++	n = &queue->msg_prio_queue.rb_node;
++	while (*n) {
++		struct kdbus_queue_entry *e;
++
++		pn = *n;
++		e = rb_entry(pn, struct kdbus_queue_entry, prio_node);
++
++		/* existing node for this priority, add to its list */
++		if (likely(entry->priority == e->priority)) {
++			list_add_tail(&entry->prio_entry, &e->prio_entry);
++			goto prio_done;
++		}
++
++		if (entry->priority < e->priority) {
++			n = &pn->rb_left;
++		} else {
++			n = &pn->rb_right;
++			highest = false;
++		}
++	}
++
++	/* cache highest-priority entry */
++	if (highest)
++		queue->msg_prio_highest = &entry->prio_node;
++
++	/* new node for this priority */
++	rb_link_node(&entry->prio_node, pn, n);
++	rb_insert_color(&entry->prio_node, &queue->msg_prio_queue);
++	INIT_LIST_HEAD(&entry->prio_entry);
++
++prio_done:
++	/* add to unsorted fifo list */
++	list_add_tail(&entry->entry, &queue->msg_list);
++}
++
++static void kdbus_queue_entry_unlink(struct kdbus_queue_entry *entry)
++{
++	struct kdbus_queue *queue = &entry->conn->queue;
++
++	lockdep_assert_held(&entry->conn->lock);
++	if (list_empty(&entry->entry))
++		return;
++
++	list_del_init(&entry->entry);
++
++	if (list_empty(&entry->prio_entry)) {
++		/*
++		 * Single entry for this priority, update cached
++		 * highest-priority entry, remove the tree node.
++		 */
++		if (queue->msg_prio_highest == &entry->prio_node)
++			queue->msg_prio_highest = rb_next(&entry->prio_node);
++
++		rb_erase(&entry->prio_node, &queue->msg_prio_queue);
++	} else {
++		struct kdbus_queue_entry *q;
++
++		/*
++		 * Multiple entries for this priority entry, get next one in
++		 * the list. Update cached highest-priority entry, store the
++		 * new one as the tree node.
++		 */
++		q = list_first_entry(&entry->prio_entry,
++				     struct kdbus_queue_entry, prio_entry);
++		list_del(&entry->prio_entry);
++
++		if (queue->msg_prio_highest == &entry->prio_node)
++			queue->msg_prio_highest = &q->prio_node;
++
++		rb_replace_node(&entry->prio_node, &q->prio_node,
++				&queue->msg_prio_queue);
++	}
++}
++
++/**
++ * kdbus_queue_entry_new() - allocate a queue entry
++ * @conn_dst:	destination connection
++ * @kmsg:	kmsg object the queue entry should track
++ * @user:	user to account message on (or NULL for kernel messages)
++ *
++ * Allocates a queue entry based on a given kmsg and allocate space for
++ * the message payload and the requested metadata in the connection's pool.
++ * The entry is not actually added to the queue's lists at this point.
++ *
++ * Return: the allocated entry on success, or an ERR_PTR on failures.
++ */
++struct kdbus_queue_entry *kdbus_queue_entry_new(struct kdbus_conn *conn_dst,
++						const struct kdbus_kmsg *kmsg,
++						struct kdbus_user *user)
++{
++	struct kdbus_msg_resources *res = kmsg->res;
++	const struct kdbus_msg *msg = &kmsg->msg;
++	struct kdbus_queue_entry *entry;
++	size_t memfd_cnt = 0;
++	struct kvec kvec[2];
++	size_t meta_size;
++	size_t msg_size;
++	u64 payload_off;
++	u64 size = 0;
++	int ret = 0;
++
++	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
++	if (!entry)
++		return ERR_PTR(-ENOMEM);
++
++	INIT_LIST_HEAD(&entry->entry);
++	entry->priority = msg->priority;
++	entry->dst_name_id = kmsg->dst_name_id;
++	entry->msg_res = kdbus_msg_resources_ref(res);
++	entry->proc_meta = kdbus_meta_proc_ref(kmsg->proc_meta);
++	entry->conn_meta = kdbus_meta_conn_ref(kmsg->conn_meta);
++	entry->conn = kdbus_conn_ref(conn_dst);
++
++	if (kmsg->msg.src_id == KDBUS_SRC_ID_KERNEL)
++		msg_size = msg->size;
++	else
++		msg_size = offsetof(struct kdbus_msg, items);
++
++	/* sum up the size of the needed slice */
++	size = msg_size;
++
++	if (res) {
++		size += res->vec_count *
++			KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++
++		if (res->memfd_count) {
++			entry->memfd_offset =
++				kcalloc(res->memfd_count, sizeof(size_t),
++					GFP_KERNEL);
++			if (!entry->memfd_offset) {
++				ret = -ENOMEM;
++				goto exit_free_entry;
++			}
++
++			size += res->memfd_count *
++				KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
++		}
++
++		if (res->fds_count)
++			size += KDBUS_ITEM_SIZE(sizeof(int) * res->fds_count);
++
++		if (res->dst_name)
++			size += KDBUS_ITEM_SIZE(strlen(res->dst_name) + 1);
++	}
++
++	/*
++	 * Remember the offset of the metadata part, so we can override
++	 * this part later during kdbus_queue_entry_install().
++	 */
++	entry->meta_offset = size;
++
++	if (entry->proc_meta || entry->conn_meta) {
++		entry->attach_flags =
++			atomic64_read(&conn_dst->attach_flags_recv);
++
++		ret = kdbus_meta_export_prepare(entry->proc_meta,
++						entry->conn_meta,
++						&entry->attach_flags,
++						&meta_size);
++		if (ret < 0)
++			goto exit_free_entry;
++
++		size += meta_size;
++	}
++
++	payload_off = size;
++	size += kmsg->pool_size;
++	size = KDBUS_ALIGN8(size);
++
++	ret = kdbus_conn_quota_inc(conn_dst, user, size,
++				   res ? res->fds_count : 0);
++	if (ret < 0)
++		goto exit_free_entry;
++
++	entry->slice = kdbus_pool_slice_alloc(conn_dst->pool, size, true);
++	if (IS_ERR(entry->slice)) {
++		ret = PTR_ERR(entry->slice);
++		entry->slice = NULL;
++		kdbus_conn_quota_dec(conn_dst, user, size,
++				     res ? res->fds_count : 0);
++		goto exit_free_entry;
++	}
++
++	/* we accounted for exactly 'size' bytes, make sure it didn't grow */
++	WARN_ON(kdbus_pool_slice_size(entry->slice) != size);
++	entry->user = kdbus_user_ref(user);
++
++	/* copy message header */
++	kvec[0].iov_base = (char *)msg;
++	kvec[0].iov_len = msg_size;
++
++	ret = kdbus_pool_slice_copy_kvec(entry->slice, 0, kvec, 1, msg_size);
++	if (ret < 0)
++		goto exit_free_entry;
++
++	/* 'size' will now track the write position */
++	size = msg_size;
++
++	/* create message payload items */
++	if (res) {
++		size_t dst_name_len = 0;
++		unsigned int i;
++		size_t sz = 0;
++
++		if (res->dst_name) {
++			dst_name_len = strlen(res->dst_name) + 1;
++			sz += KDBUS_ITEM_SIZE(dst_name_len);
++		}
++
++		for (i = 0; i < res->data_count; ++i) {
++			struct kdbus_vec v;
++			struct kdbus_memfd m;
++
++			switch (res->data[i].type) {
++			case KDBUS_MSG_DATA_VEC:
++				sz += KDBUS_ITEM_SIZE(sizeof(v));
++				break;
++
++			case KDBUS_MSG_DATA_MEMFD:
++				sz += KDBUS_ITEM_SIZE(sizeof(m));
++				break;
++			}
++		}
++
++		if (sz) {
++			struct kdbus_item *items, *item;
++
++			items = kmalloc(sz, GFP_KERNEL);
++			if (!items) {
++				ret = -ENOMEM;
++				goto exit_free_entry;
++			}
++
++			item = items;
++
++			if (res->dst_name)
++				item = kdbus_item_set(item, KDBUS_ITEM_DST_NAME,
++						      res->dst_name,
++						      dst_name_len);
++
++			for (i = 0; i < res->data_count; ++i) {
++				struct kdbus_msg_data *d = res->data + i;
++				struct kdbus_memfd m = {};
++				struct kdbus_vec v = {};
++
++				switch (d->type) {
++				case KDBUS_MSG_DATA_VEC:
++					v.size = d->size;
++					v.offset = d->vec.off;
++					if (v.offset != ~0ULL)
++						v.offset += payload_off;
++
++					item = kdbus_item_set(item,
++							KDBUS_ITEM_PAYLOAD_OFF,
++							&v, sizeof(v));
++					break;
++
++				case KDBUS_MSG_DATA_MEMFD:
++					/*
++					 * Remember the location of memfds, so
++					 * we can override the content from
++					 * kdbus_queue_entry_install().
++					 */
++					entry->memfd_offset[memfd_cnt++] =
++						msg_size +
++						(char *)item - (char *)items +
++						offsetof(struct kdbus_item,
++							 memfd);
++
++					item = kdbus_item_set(item,
++						       KDBUS_ITEM_PAYLOAD_MEMFD,
++						       &m, sizeof(m));
++					break;
++				}
++			}
++
++			kvec[0].iov_base = items;
++			kvec[0].iov_len = sz;
++
++			ret = kdbus_pool_slice_copy_kvec(entry->slice, size,
++							 kvec, 1, sz);
++			kfree(items);
++
++			if (ret < 0)
++				goto exit_free_entry;
++
++			size += sz;
++		}
++
++		/*
++		 * Remember the location of the FD part, so we can override the
++		 * content in kdbus_queue_entry_install().
++		 */
++		if (res->fds_count) {
++			entry->fds_offset = size;
++			size += KDBUS_ITEM_SIZE(sizeof(int) * res->fds_count);
++		}
++	}
++
++	/* finally, copy over the actual message payload */
++	if (kmsg->iov_count) {
++		ret = kdbus_pool_slice_copy_iovec(entry->slice, payload_off,
++						  kmsg->iov,
++						  kmsg->iov_count,
++						  kmsg->pool_size);
++		if (ret < 0)
++			goto exit_free_entry;
++	}
++
++	return entry;
++
++exit_free_entry:
++	kdbus_queue_entry_free(entry);
++	return ERR_PTR(ret);
++}
++
++/**
++ * kdbus_queue_entry_free() - free resources of an entry
++ * @entry:	The entry to free
++ *
++ * Removes resources allocated by a queue entry, along with the entry itself.
++ * Note that the entry's slice is not freed at this point.
++ */
++void kdbus_queue_entry_free(struct kdbus_queue_entry *entry)
++{
++	if (!entry)
++		return;
++
++	lockdep_assert_held(&entry->conn->lock);
++
++	kdbus_queue_entry_unlink(entry);
++	kdbus_reply_unref(entry->reply);
++
++	if (entry->slice) {
++		kdbus_conn_quota_dec(entry->conn, entry->user,
++				     kdbus_pool_slice_size(entry->slice),
++				     entry->msg_res ?
++						entry->msg_res->fds_count : 0);
++		kdbus_pool_slice_release(entry->slice);
++		kdbus_user_unref(entry->user);
++	}
++
++	kdbus_msg_resources_unref(entry->msg_res);
++	kdbus_meta_conn_unref(entry->conn_meta);
++	kdbus_meta_proc_unref(entry->proc_meta);
++	kdbus_conn_unref(entry->conn);
++	kfree(entry->memfd_offset);
++	kfree(entry);
++}
++
++/**
++ * kdbus_queue_entry_install() - install message components into the
++ *				 receiver's process
++ * @entry:		The queue entry to install
++ * @return_flags:	Pointer to store the return flags for userspace
++ * @install_fds:	Whether or not to install associated file descriptors
++ *
++ * This function will create a slice to transport the message header, the
++ * metadata items and other items for information stored in @entry, and
++ * store it as entry->slice.
++ *
++ * If @install_fds is %true, file descriptors will as well be installed.
++ * This function must always be called from the task context of the receiver.
++ *
++ * Return: 0 on success.
++ */
++int kdbus_queue_entry_install(struct kdbus_queue_entry *entry,
++			      u64 *return_flags, bool install_fds)
++{
++	u64 msg_size = entry->meta_offset;
++	struct kdbus_conn *conn_dst = entry->conn;
++	struct kdbus_msg_resources *res;
++	bool incomplete_fds = false;
++	struct kvec kvec[2];
++	size_t memfds = 0;
++	int i, ret;
++
++	lockdep_assert_held(&conn_dst->lock);
++
++	if (entry->proc_meta || entry->conn_meta) {
++		size_t meta_size;
++
++		ret = kdbus_meta_export(entry->proc_meta,
++					entry->conn_meta,
++					entry->attach_flags,
++					entry->slice,
++					entry->meta_offset,
++					&meta_size);
++		if (ret < 0)
++			return ret;
++
++		msg_size += meta_size;
++	}
++
++	/* Update message size at offset 0 */
++	kvec[0].iov_base = &msg_size;
++	kvec[0].iov_len = sizeof(msg_size);
++
++	ret = kdbus_pool_slice_copy_kvec(entry->slice, 0, kvec, 1,
++					 sizeof(msg_size));
++	if (ret < 0)
++		return ret;
++
++	res = entry->msg_res;
++
++	if (!res)
++		return 0;
++
++	if (res->fds_count) {
++		struct kdbus_item_header hdr;
++		size_t off;
++		int *fds;
++
++		fds = kmalloc_array(res->fds_count, sizeof(int), GFP_KERNEL);
++		if (!fds)
++			return -ENOMEM;
++
++		for (i = 0; i < res->fds_count; i++) {
++			if (install_fds) {
++				fds[i] = get_unused_fd_flags(O_CLOEXEC);
++				if (fds[i] >= 0)
++					fd_install(fds[i],
++						   get_file(res->fds[i]));
++				else
++					incomplete_fds = true;
++			} else {
++				fds[i] = -1;
++			}
++		}
++
++		off = entry->fds_offset;
++
++		hdr.type = KDBUS_ITEM_FDS;
++		hdr.size = KDBUS_ITEM_HEADER_SIZE +
++			   sizeof(int) * res->fds_count;
++
++		kvec[0].iov_base = &hdr;
++		kvec[0].iov_len = sizeof(hdr);
++
++		kvec[1].iov_base = fds;
++		kvec[1].iov_len = sizeof(int) * res->fds_count;
++
++		ret = kdbus_pool_slice_copy_kvec(entry->slice, off,
++						 kvec, 2, hdr.size);
++		kfree(fds);
++
++		if (ret < 0)
++			return ret;
++	}
++
++	for (i = 0; i < res->data_count; ++i) {
++		struct kdbus_msg_data *d = res->data + i;
++		struct kdbus_memfd m;
++
++		if (d->type != KDBUS_MSG_DATA_MEMFD)
++			continue;
++
++		m.start = d->memfd.start;
++		m.size = d->size;
++		m.fd = -1;
++
++		if (install_fds) {
++			m.fd = get_unused_fd_flags(O_CLOEXEC);
++			if (m.fd < 0) {
++				m.fd = -1;
++				incomplete_fds = true;
++			} else {
++				fd_install(m.fd,
++					   get_file(d->memfd.file));
++			}
++		}
++
++		kvec[0].iov_base = &m;
++		kvec[0].iov_len = sizeof(m);
++
++		ret = kdbus_pool_slice_copy_kvec(entry->slice,
++						 entry->memfd_offset[memfds++],
++						 kvec, 1, sizeof(m));
++		if (ret < 0)
++			return ret;
++	}
++
++	if (incomplete_fds)
++		*return_flags |= KDBUS_RECV_RETURN_INCOMPLETE_FDS;
++
++	return 0;
++}
++
++/**
++ * kdbus_queue_entry_enqueue() - enqueue an entry
++ * @entry:		entry to enqueue
++ * @reply:		reply to link to this entry (or NULL if none)
++ *
++ * This enqueues an unqueued entry into the message queue of the linked
++ * connection. It also binds a reply object to the entry so we can remember it
++ * when the message is moved.
++ *
++ * Once this call returns (and the connection lock is released), this entry can
++ * be dequeued by the target connection. Note that the entry will not be removed
++ * from the queue until it is destroyed.
++ */
++void kdbus_queue_entry_enqueue(struct kdbus_queue_entry *entry,
++			       struct kdbus_reply *reply)
++{
++	lockdep_assert_held(&entry->conn->lock);
++
++	if (WARN_ON(entry->reply) || WARN_ON(!list_empty(&entry->entry)))
++		return;
++
++	entry->reply = kdbus_reply_ref(reply);
++	kdbus_queue_entry_link(entry);
++}
++
++/**
++ * kdbus_queue_entry_move() - move queue entry
++ * @e:		queue entry to move
++ * @dst:	destination connection to queue the entry on
++ *
++ * This moves a queue entry onto a different connection. It allocates a new
++ * slice on the target connection and copies the message over. If the copy
++ * succeeded, we move the entry from @src to @dst.
++ *
++ * On failure, the entry is left untouched.
++ *
++ * The queue entry must be queued right now, and after the call succeeds it will
++ * be queued on the destination, but no longer on the source.
++ *
++ * The caller must hold the connection lock of the source *and* destination.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_queue_entry_move(struct kdbus_queue_entry *e,
++			   struct kdbus_conn *dst)
++{
++	struct kdbus_pool_slice *slice = NULL;
++	struct kdbus_conn *src = e->conn;
++	size_t size, fds;
++	int ret;
++
++	lockdep_assert_held(&src->lock);
++	lockdep_assert_held(&dst->lock);
++
++	if (WARN_ON(IS_ERR(e->user)) || WARN_ON(list_empty(&e->entry)))
++		return -EINVAL;
++	if (src == dst)
++		return 0;
++
++	size = kdbus_pool_slice_size(e->slice);
++	fds = e->msg_res ? e->msg_res->fds_count : 0;
++
++	ret = kdbus_conn_quota_inc(dst, e->user, size, fds);
++	if (ret < 0)
++		return ret;
++
++	slice = kdbus_pool_slice_alloc(dst->pool, size, true);
++	if (IS_ERR(slice)) {
++		ret = PTR_ERR(slice);
++		slice = NULL;
++		goto error;
++	}
++
++	ret = kdbus_pool_slice_copy(slice, e->slice);
++	if (ret < 0)
++		goto error;
++
++	kdbus_queue_entry_unlink(e);
++	kdbus_conn_quota_dec(src, e->user, size, fds);
++	kdbus_pool_slice_release(e->slice);
++	kdbus_conn_unref(e->conn);
++
++	e->slice = slice;
++	e->conn = kdbus_conn_ref(dst);
++	kdbus_queue_entry_link(e);
++
++	return 0;
++
++error:
++	kdbus_pool_slice_release(slice);
++	kdbus_conn_quota_dec(dst, e->user, size, fds);
++	return ret;
++}
+diff --git a/ipc/kdbus/queue.h b/ipc/kdbus/queue.h
+new file mode 100644
+index 0000000..7f2db96
+--- /dev/null
++++ b/ipc/kdbus/queue.h
+@@ -0,0 +1,92 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_QUEUE_H
++#define __KDBUS_QUEUE_H
++
++struct kdbus_user;
++
++/**
++ * struct kdbus_queue - a connection's message queue
++ * @msg_list:		List head for kdbus_queue_entry objects
++ * @msg_prio_queue:	RB tree root for messages, sorted by priority
++ * @msg_prio_highest:	Link to the RB node referencing the message with the
++ *			highest priority in the tree.
++ */
++struct kdbus_queue {
++	struct list_head msg_list;
++	struct rb_root msg_prio_queue;
++	struct rb_node *msg_prio_highest;
++};
++
++/**
++ * struct kdbus_queue_entry - messages waiting to be read
++ * @entry:		Entry in the connection's list
++ * @prio_node:		Entry in the priority queue tree
++ * @prio_entry:		Queue tree node entry in the list of one priority
++ * @slice:		Slice in the receiver's pool for the message
++ * @attach_flags:	Attach flags used during slice allocation
++ * @meta_offset:	Offset of first metadata item in slice
++ * @fds_offset:		Offset of FD item in slice
++ * @memfd_offset:	Array of slice-offsets for all memfd items
++ * @priority:		Message priority
++ * @dst_name_id:	The sequence number of the name this message is
++ *			addressed to, 0 for messages sent to an ID
++ * @msg_res:		Message resources
++ * @proc_meta:		Process metadata, captured at message arrival
++ * @conn_meta:		Connection metadata, captured at message arrival
++ * @reply:		The reply block if a reply to this message is expected
++ * @user:		User used for accounting
++ */
++struct kdbus_queue_entry {
++	struct list_head entry;
++	struct rb_node prio_node;
++	struct list_head prio_entry;
++
++	struct kdbus_pool_slice *slice;
++
++	u64 attach_flags;
++	size_t meta_offset;
++	size_t fds_offset;
++	size_t *memfd_offset;
++
++	s64 priority;
++	u64 dst_name_id;
++
++	struct kdbus_msg_resources *msg_res;
++	struct kdbus_meta_proc *proc_meta;
++	struct kdbus_meta_conn *conn_meta;
++	struct kdbus_reply *reply;
++	struct kdbus_conn *conn;
++	struct kdbus_user *user;
++};
++
++struct kdbus_kmsg;
++
++void kdbus_queue_init(struct kdbus_queue *queue);
++struct kdbus_queue_entry *kdbus_queue_peek(struct kdbus_queue *queue,
++					   s64 priority, bool use_priority);
++
++struct kdbus_queue_entry *kdbus_queue_entry_new(struct kdbus_conn *conn_dst,
++						const struct kdbus_kmsg *kmsg,
++						struct kdbus_user *user);
++void kdbus_queue_entry_free(struct kdbus_queue_entry *entry);
++int kdbus_queue_entry_install(struct kdbus_queue_entry *entry,
++			      u64 *return_flags, bool install_fds);
++void kdbus_queue_entry_enqueue(struct kdbus_queue_entry *entry,
++			       struct kdbus_reply *reply);
++int kdbus_queue_entry_move(struct kdbus_queue_entry *entry,
++			   struct kdbus_conn *dst);
++
++#endif /* __KDBUS_QUEUE_H */
+diff --git a/ipc/kdbus/reply.c b/ipc/kdbus/reply.c
+new file mode 100644
+index 0000000..008dca8
+--- /dev/null
++++ b/ipc/kdbus/reply.c
+@@ -0,0 +1,257 @@
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/slab.h>
++#include <linux/uio.h>
++
++#include "bus.h"
++#include "connection.h"
++#include "endpoint.h"
++#include "message.h"
++#include "metadata.h"
++#include "names.h"
++#include "domain.h"
++#include "item.h"
++#include "notify.h"
++#include "policy.h"
++#include "reply.h"
++#include "util.h"
++
++/**
++ * kdbus_reply_new() - Allocate and set up a new kdbus_reply object
++ * @reply_src:		The connection a reply is expected from
++ * @reply_dst:		The connection this reply object belongs to
++ * @msg:		Message associated with the reply
++ * @name_entry:		Name entry used to send the message
++ * @sync:		Whether or not to make this reply synchronous
++ *
++ * Allocate and fill a new kdbus_reply object.
++ *
++ * Return: New kdbus_conn object on success, ERR_PTR on error.
++ */
++struct kdbus_reply *kdbus_reply_new(struct kdbus_conn *reply_src,
++				    struct kdbus_conn *reply_dst,
++				    const struct kdbus_msg *msg,
++				    struct kdbus_name_entry *name_entry,
++				    bool sync)
++{
++	struct kdbus_reply *r;
++	int ret = 0;
++
++	if (atomic_inc_return(&reply_dst->request_count) >
++	    KDBUS_CONN_MAX_REQUESTS_PENDING) {
++		ret = -EMLINK;
++		goto exit_dec_request_count;
++	}
++
++	r = kzalloc(sizeof(*r), GFP_KERNEL);
++	if (!r) {
++		ret = -ENOMEM;
++		goto exit_dec_request_count;
++	}
++
++	kref_init(&r->kref);
++	INIT_LIST_HEAD(&r->entry);
++	r->reply_src = kdbus_conn_ref(reply_src);
++	r->reply_dst = kdbus_conn_ref(reply_dst);
++	r->cookie = msg->cookie;
++	r->name_id = name_entry ? name_entry->name_id : 0;
++	r->deadline_ns = msg->timeout_ns;
++
++	if (sync) {
++		r->sync = true;
++		r->waiting = true;
++	}
++
++exit_dec_request_count:
++	if (ret < 0) {
++		atomic_dec(&reply_dst->request_count);
++		return ERR_PTR(ret);
++	}
++
++	return r;
++}
++
++static void __kdbus_reply_free(struct kref *kref)
++{
++	struct kdbus_reply *reply =
++		container_of(kref, struct kdbus_reply, kref);
++
++	atomic_dec(&reply->reply_dst->request_count);
++	kdbus_conn_unref(reply->reply_src);
++	kdbus_conn_unref(reply->reply_dst);
++	kfree(reply);
++}
++
++/**
++ * kdbus_reply_ref() - Increase reference on kdbus_reply
++ * @r:		The reply, may be %NULL
++ *
++ * Return: The reply object with an extra reference
++ */
++struct kdbus_reply *kdbus_reply_ref(struct kdbus_reply *r)
++{
++	if (r)
++		kref_get(&r->kref);
++	return r;
++}
++
++/**
++ * kdbus_reply_unref() - Decrease reference on kdbus_reply
++ * @r:		The reply, may be %NULL
++ *
++ * Return: NULL
++ */
++struct kdbus_reply *kdbus_reply_unref(struct kdbus_reply *r)
++{
++	if (r)
++		kref_put(&r->kref, __kdbus_reply_free);
++	return NULL;
++}
++
++/**
++ * kdbus_reply_link() - Link reply object into target connection
++ * @r:		Reply to link
++ */
++void kdbus_reply_link(struct kdbus_reply *r)
++{
++	if (WARN_ON(!list_empty(&r->entry)))
++		return;
++
++	list_add(&r->entry, &r->reply_dst->reply_list);
++	kdbus_reply_ref(r);
++}
++
++/**
++ * kdbus_reply_unlink() - Unlink reply object from target connection
++ * @r:		Reply to unlink
++ */
++void kdbus_reply_unlink(struct kdbus_reply *r)
++{
++	if (!list_empty(&r->entry)) {
++		list_del_init(&r->entry);
++		kdbus_reply_unref(r);
++	}
++}
++
++/**
++ * kdbus_sync_reply_wakeup() - Wake a synchronously blocking reply
++ * @reply:	The reply object
++ * @err:	Error code to set on the remote side
++ *
++ * Remove the synchronous reply object from its connection reply_list, and
++ * wake up remote peer (method origin) with the appropriate synchronous reply
++ * code.
++ */
++void kdbus_sync_reply_wakeup(struct kdbus_reply *reply, int err)
++{
++	if (WARN_ON(!reply->sync))
++		return;
++
++	reply->waiting = false;
++	reply->err = err;
++	wake_up_interruptible(&reply->reply_dst->wait);
++}
++
++/**
++ * kdbus_reply_find() - Find the corresponding reply object
++ * @replying:	The replying connection or NULL
++ * @reply_dst:	The connection the reply will be sent to
++ *		(method origin)
++ * @cookie:	The cookie of the requesting message
++ *
++ * Lookup a reply object that should be sent as a reply by
++ * @replying to @reply_dst with the given cookie.
++ *
++ * Callers must take the @reply_dst lock.
++ *
++ * Return: the corresponding reply object or NULL if not found
++ */
++struct kdbus_reply *kdbus_reply_find(struct kdbus_conn *replying,
++				     struct kdbus_conn *reply_dst,
++				     u64 cookie)
++{
++	struct kdbus_reply *r, *reply = NULL;
++
++	list_for_each_entry(r, &reply_dst->reply_list, entry) {
++		if (r->cookie == cookie &&
++		    (!replying || r->reply_src == replying)) {
++			reply = r;
++			break;
++		}
++	}
++
++	return reply;
++}
++
++/**
++ * kdbus_reply_list_scan_work() - Worker callback to scan the replies of a
++ *				  connection for exceeded timeouts
++ * @work:		Work struct of the connection to scan
++ *
++ * Walk the list of replies stored with a connection and look for entries
++ * that have exceeded their timeout. If such an entry is found, a timeout
++ * notification is sent to the waiting peer, and the reply is removed from
++ * the list.
++ *
++ * The work is rescheduled to the nearest timeout found during the list
++ * iteration.
++ */
++void kdbus_reply_list_scan_work(struct work_struct *work)
++{
++	struct kdbus_conn *conn =
++		container_of(work, struct kdbus_conn, work.work);
++	struct kdbus_reply *reply, *reply_tmp;
++	u64 deadline = ~0ULL;
++	u64 now;
++
++	now = ktime_get_ns();
++
++	mutex_lock(&conn->lock);
++	if (!kdbus_conn_active(conn)) {
++		mutex_unlock(&conn->lock);
++		return;
++	}
++
++	list_for_each_entry_safe(reply, reply_tmp, &conn->reply_list, entry) {
++		/*
++		 * If the reply block is waiting for synchronous I/O,
++		 * the timeout is handled by wait_event_*_timeout(),
++		 * so we don't have to care for it here.
++		 */
++		if (reply->sync && !reply->interrupted)
++			continue;
++
++		WARN_ON(reply->reply_dst != conn);
++
++		if (reply->deadline_ns > now) {
++			/* remember next timeout */
++			if (deadline > reply->deadline_ns)
++				deadline = reply->deadline_ns;
++
++			continue;
++		}
++
++		/*
++		 * A zero deadline means the connection died, was
++		 * cleaned up already and the notification was sent.
++		 * Don't send notifications for reply trackers that were
++		 * left in an interrupted syscall state.
++		 */
++		if (reply->deadline_ns != 0 && !reply->interrupted)
++			kdbus_notify_reply_timeout(conn->ep->bus, conn->id,
++						   reply->cookie);
++
++		kdbus_reply_unlink(reply);
++	}
++
++	/* rearm delayed work with next timeout */
++	if (deadline != ~0ULL)
++		schedule_delayed_work(&conn->work,
++				      nsecs_to_jiffies(deadline - now));
++
++	mutex_unlock(&conn->lock);
++
++	kdbus_notify_flush(conn->ep->bus);
++}
+diff --git a/ipc/kdbus/reply.h b/ipc/kdbus/reply.h
+new file mode 100644
+index 0000000..68d5232
+--- /dev/null
++++ b/ipc/kdbus/reply.h
+@@ -0,0 +1,68 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_REPLY_H
++#define __KDBUS_REPLY_H
++
++/**
++ * struct kdbus_reply - an entry of kdbus_conn's list of replies
++ * @kref:		Ref-count of this object
++ * @entry:		The entry of the connection's reply_list
++ * @reply_src:		The connection the reply will be sent from
++ * @reply_dst:		The connection the reply will be sent to
++ * @queue_entry:	The queue entry item that is prepared by the replying
++ *			connection
++ * @deadline_ns:	The deadline of the reply, in nanoseconds
++ * @cookie:		The cookie of the requesting message
++ * @name_id:		ID of the well-known name the original msg was sent to
++ * @sync:		The reply block is waiting for synchronous I/O
++ * @waiting:		The condition to synchronously wait for
++ * @interrupted:	The sync reply was left in an interrupted state
++ * @err:		The error code for the synchronous reply
++ */
++struct kdbus_reply {
++	struct kref kref;
++	struct list_head entry;
++	struct kdbus_conn *reply_src;
++	struct kdbus_conn *reply_dst;
++	struct kdbus_queue_entry *queue_entry;
++	u64 deadline_ns;
++	u64 cookie;
++	u64 name_id;
++	bool sync:1;
++	bool waiting:1;
++	bool interrupted:1;
++	int err;
++};
++
++struct kdbus_reply *kdbus_reply_new(struct kdbus_conn *reply_src,
++				    struct kdbus_conn *reply_dst,
++				    const struct kdbus_msg *msg,
++				    struct kdbus_name_entry *name_entry,
++				    bool sync);
++
++struct kdbus_reply *kdbus_reply_ref(struct kdbus_reply *r);
++struct kdbus_reply *kdbus_reply_unref(struct kdbus_reply *r);
++
++void kdbus_reply_link(struct kdbus_reply *r);
++void kdbus_reply_unlink(struct kdbus_reply *r);
++
++struct kdbus_reply *kdbus_reply_find(struct kdbus_conn *replying,
++				     struct kdbus_conn *reply_dst,
++				     u64 cookie);
++
++void kdbus_sync_reply_wakeup(struct kdbus_reply *reply, int err);
++void kdbus_reply_list_scan_work(struct work_struct *work);
++
++#endif /* __KDBUS_REPLY_H */
+diff --git a/ipc/kdbus/util.c b/ipc/kdbus/util.c
+new file mode 100644
+index 0000000..eaa806a
+--- /dev/null
++++ b/ipc/kdbus/util.c
+@@ -0,0 +1,201 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <linux/capability.h>
++#include <linux/cred.h>
++#include <linux/ctype.h>
++#include <linux/err.h>
++#include <linux/file.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/uaccess.h>
++#include <linux/uio.h>
++#include <linux/user_namespace.h>
++
++#include "limits.h"
++#include "util.h"
++
++/**
++ * kdbus_copy_from_user() - copy aligned data from user-space
++ * @dest:	target buffer in kernel memory
++ * @user_ptr:	user-provided source buffer
++ * @size:	memory size to copy from user
++ *
++ * This copies @size bytes from @user_ptr into the kernel, just like
++ * copy_from_user() does. But we enforce an 8-byte alignment and reject any
++ * unaligned user-space pointers.
++ *
++ * Return: 0 on success, negative error code on failure.
++ */
++int kdbus_copy_from_user(void *dest, void __user *user_ptr, size_t size)
++{
++	if (!KDBUS_IS_ALIGNED8((uintptr_t)user_ptr))
++		return -EFAULT;
++
++	if (copy_from_user(dest, user_ptr, size))
++		return -EFAULT;
++
++	return 0;
++}
++
++/**
++ * kdbus_memdup_user() - copy dynamically sized object from user-space
++ * @user_ptr:	user-provided source buffer
++ * @sz_min:	minimum object size
++ * @sz_max:	maximum object size
++ *
++ * This copies a dynamically sized object from user-space into kernel-space. We
++ * require the object to have a 64bit size field at offset 0. We read it out
++ * first, allocate a suitably sized buffer and then copy all data.
++ *
++ * The @sz_min and @sz_max parameters define possible min and max object sizes
++ * so user-space cannot trigger un-bound kernel-space allocations.
++ *
++ * The same alignment-restrictions as described in kdbus_copy_from_user() apply.
++ *
++ * Return: pointer to dynamically allocated copy, or ERR_PTR() on failure.
++ */
++void *kdbus_memdup_user(void __user *user_ptr, size_t sz_min, size_t sz_max)
++{
++	void *ptr;
++	u64 size;
++	int ret;
++
++	ret = kdbus_copy_from_user(&size, user_ptr, sizeof(size));
++	if (ret < 0)
++		return ERR_PTR(ret);
++
++	if (size < sz_min)
++		return ERR_PTR(-EINVAL);
++
++	if (size > sz_max)
++		return ERR_PTR(-EMSGSIZE);
++
++	ptr = memdup_user(user_ptr, size);
++	if (IS_ERR(ptr))
++		return ptr;
++
++	if (*(u64 *)ptr != size) {
++		kfree(ptr);
++		return ERR_PTR(-EINVAL);
++	}
++
++	return ptr;
++}
++
++/**
++ * kdbus_verify_uid_prefix() - verify UID prefix of a user-supplied name
++ * @name:	user-supplied name to verify
++ * @user_ns:	user-namespace to act in
++ * @kuid:	Kernel internal uid of user
++ *
++ * This verifies that the user-supplied name @name has their UID as prefix. This
++ * is the default name-spacing policy we enforce on user-supplied names for
++ * public kdbus entities like buses and endpoints.
++ *
++ * The user must supply names prefixed with "<UID>-", whereas the UID is
++ * interpreted in the user-namespace of the domain. If the user fails to supply
++ * such a prefixed name, we reject it.
++ *
++ * Return: 0 on success, negative error code on failure
++ */
++int kdbus_verify_uid_prefix(const char *name, struct user_namespace *user_ns,
++			    kuid_t kuid)
++{
++	uid_t uid;
++	char prefix[16];
++
++	/*
++	 * The kuid must have a mapping into the userns of the domain
++	 * otherwise do not allow creation of buses nor endpoints.
++	 */
++	uid = from_kuid(user_ns, kuid);
++	if (uid == (uid_t) -1)
++		return -EINVAL;
++
++	snprintf(prefix, sizeof(prefix), "%u-", uid);
++	if (strncmp(name, prefix, strlen(prefix)) != 0)
++		return -EINVAL;
++
++	return 0;
++}
++
++/**
++ * kdbus_sanitize_attach_flags() - Sanitize attach flags from user-space
++ * @flags:		Attach flags provided by userspace
++ * @attach_flags:	A pointer where to store the valid attach flags
++ *
++ * Convert attach-flags provided by user-space into a valid mask. If the mask
++ * is invalid, an error is returned. The sanitized attach flags are stored in
++ * the output parameter.
++ *
++ * Return: 0 on success, negative error on failure.
++ */
++int kdbus_sanitize_attach_flags(u64 flags, u64 *attach_flags)
++{
++	/* 'any' degrades to 'all' for compatibility */
++	if (flags == _KDBUS_ATTACH_ANY)
++		flags = _KDBUS_ATTACH_ALL;
++
++	/* reject unknown attach flags */
++	if (flags & ~_KDBUS_ATTACH_ALL)
++		return -EINVAL;
++
++	*attach_flags = flags;
++	return 0;
++}
++
++/**
++ * kdbus_kvec_set - helper utility to assemble kvec arrays
++ * @kvec:	kvec entry to use
++ * @src:	Source address to set in @kvec
++ * @len:	Number of bytes in @src
++ * @total_len:	Pointer to total length variable
++ *
++ * Set @src and @len in @kvec, and increase @total_len by @len.
++ */
++void kdbus_kvec_set(struct kvec *kvec, void *src, size_t len, u64 *total_len)
++{
++	kvec->iov_base = src;
++	kvec->iov_len = len;
++	*total_len += len;
++}
++
++static const char * const zeros = "\0\0\0\0\0\0\0";
++
++/**
++ * kdbus_kvec_pad - conditionally write a padding kvec
++ * @kvec:	kvec entry to use
++ * @len:	Total length used for kvec array
++ *
++ * Check if the current total byte length of the array in @len is aligned to
++ * 8 bytes. If it isn't, fill @kvec with padding information and increase @len
++ * by the number of bytes stored in @kvec.
++ *
++ * Return: the number of added padding bytes.
++ */
++size_t kdbus_kvec_pad(struct kvec *kvec, u64 *len)
++{
++	size_t pad = KDBUS_ALIGN8(*len) - *len;
++
++	if (!pad)
++		return 0;
++
++	kvec->iov_base = (void *)zeros;
++	kvec->iov_len = pad;
++
++	*len += pad;
++
++	return pad;
++}
+diff --git a/ipc/kdbus/util.h b/ipc/kdbus/util.h
+new file mode 100644
+index 0000000..740b198
+--- /dev/null
++++ b/ipc/kdbus/util.h
+@@ -0,0 +1,74 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
++ * Copyright (C) 2013-2015 Daniel Mack <daniel@zonque.org>
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ * Copyright (C) 2013-2015 Linux Foundation
++ * Copyright (C) 2014-2015 Djalal Harouni <tixxdz@opendz.org>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#ifndef __KDBUS_UTIL_H
++#define __KDBUS_UTIL_H
++
++#include <linux/dcache.h>
++#include <linux/ioctl.h>
++
++#include <uapi/linux/kdbus.h>
++
++/* all exported addresses are 64 bit */
++#define KDBUS_PTR(addr) ((void __user *)(uintptr_t)(addr))
++
++/* all exported sizes are 64 bit and data aligned to 64 bit */
++#define KDBUS_ALIGN8(s) ALIGN((s), 8)
++#define KDBUS_IS_ALIGNED8(s) (IS_ALIGNED(s, 8))
++
++/**
++ * kdbus_member_set_user - write a structure member to user memory
++ * @_s:		Variable to copy from
++ * @_b:		Buffer to write to
++ * @_t:		Structure type
++ * @_m:		Member name in the passed structure
++ *
++ * Return: the result of copy_to_user()
++ */
++#define kdbus_member_set_user(_s, _b, _t, _m)				\
++({									\
++	u64 __user *_sz =						\
++		(void __user *)((u8 __user *)(_b) + offsetof(_t, _m));	\
++	copy_to_user(_sz, _s, sizeof(((_t *)0)->_m));			\
++})
++
++/**
++ * kdbus_strhash - calculate a hash
++ * @str:	String
++ *
++ * Return: hash value
++ */
++static inline unsigned int kdbus_strhash(const char *str)
++{
++	unsigned long hash = init_name_hash();
++
++	while (*str)
++		hash = partial_name_hash(*str++, hash);
++
++	return end_name_hash(hash);
++}
++
++int kdbus_verify_uid_prefix(const char *name, struct user_namespace *user_ns,
++			    kuid_t kuid);
++int kdbus_sanitize_attach_flags(u64 flags, u64 *attach_flags);
++
++int kdbus_copy_from_user(void *dest, void __user *user_ptr, size_t size);
++void *kdbus_memdup_user(void __user *user_ptr, size_t sz_min, size_t sz_max);
++
++struct kvec;
++
++void kdbus_kvec_set(struct kvec *kvec, void *src, size_t len, u64 *total_len);
++size_t kdbus_kvec_pad(struct kvec *kvec, u64 *len);
++
++#endif
+diff --git a/samples/Kconfig b/samples/Kconfig
+index 224ebb4..a4c6b2f 100644
+--- a/samples/Kconfig
++++ b/samples/Kconfig
+@@ -55,6 +55,13 @@ config SAMPLE_KDB
+ 	  Build an example of how to dynamically add the hello
+ 	  command to the kdb shell.
+ 
++config SAMPLE_KDBUS
++	bool "Build kdbus API example"
++	depends on KDBUS
++	help
++	  Build an example of how the kdbus API can be used from
++	  userspace.
++
+ config SAMPLE_RPMSG_CLIENT
+ 	tristate "Build rpmsg client sample -- loadable modules only"
+ 	depends on RPMSG && m
+diff --git a/samples/Makefile b/samples/Makefile
+index f00257b..f0ad51e 100644
+--- a/samples/Makefile
++++ b/samples/Makefile
+@@ -1,4 +1,5 @@
+ # Makefile for Linux samples code
+ 
+ obj-$(CONFIG_SAMPLES)	+= kobject/ kprobes/ trace_events/ livepatch/ \
+-			   hw_breakpoint/ kfifo/ kdb/ hidraw/ rpmsg/ seccomp/
++			   hw_breakpoint/ kfifo/ kdb/ kdbus/ hidraw/ rpmsg/ \
++			   seccomp/
+diff --git a/samples/kdbus/.gitignore b/samples/kdbus/.gitignore
+new file mode 100644
+index 0000000..ee07d98
+--- /dev/null
++++ b/samples/kdbus/.gitignore
+@@ -0,0 +1 @@
++kdbus-workers
+diff --git a/samples/kdbus/Makefile b/samples/kdbus/Makefile
+new file mode 100644
+index 0000000..137f842
+--- /dev/null
++++ b/samples/kdbus/Makefile
+@@ -0,0 +1,9 @@
++# kbuild trick to avoid linker error. Can be omitted if a module is built.
++obj- := dummy.o
++
++hostprogs-$(CONFIG_SAMPLE_KDBUS) += kdbus-workers
++
++always := $(hostprogs-y)
++
++HOSTCFLAGS_kdbus-workers.o += -I$(objtree)/usr/include
++HOSTLOADLIBES_kdbus-workers := -lrt
+diff --git a/samples/kdbus/kdbus-api.h b/samples/kdbus/kdbus-api.h
+new file mode 100644
+index 0000000..5ed5907
+--- /dev/null
++++ b/samples/kdbus/kdbus-api.h
+@@ -0,0 +1,114 @@
++#ifndef KDBUS_API_H
++#define KDBUS_API_H
++
++#include <sys/ioctl.h>
++#include <linux/kdbus.h>
++
++#define KDBUS_ALIGN8(l) (((l) + 7) & ~7)
++#define KDBUS_ITEM_HEADER_SIZE offsetof(struct kdbus_item, data)
++#define KDBUS_ITEM_SIZE(s) KDBUS_ALIGN8((s) + KDBUS_ITEM_HEADER_SIZE)
++#define KDBUS_ITEM_NEXT(item) \
++	(typeof(item))(((uint8_t *)item) + KDBUS_ALIGN8((item)->size))
++#define KDBUS_FOREACH(iter, first, _size)				\
++	for (iter = (first);						\
++	     ((uint8_t *)(iter) < (uint8_t *)(first) + (_size)) &&	\
++	       ((uint8_t *)(iter) >= (uint8_t *)(first));		\
++	     iter = (void*)(((uint8_t *)iter) + KDBUS_ALIGN8((iter)->size)))
++
++static inline int kdbus_cmd_bus_make(int control_fd, struct kdbus_cmd *cmd)
++{
++	int ret = ioctl(control_fd, KDBUS_CMD_BUS_MAKE, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_endpoint_make(int bus_fd, struct kdbus_cmd *cmd)
++{
++	int ret = ioctl(bus_fd, KDBUS_CMD_ENDPOINT_MAKE, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_endpoint_update(int ep_fd, struct kdbus_cmd *cmd)
++{
++	int ret = ioctl(ep_fd, KDBUS_CMD_ENDPOINT_UPDATE, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_hello(int bus_fd, struct kdbus_cmd_hello *cmd)
++{
++	int ret = ioctl(bus_fd, KDBUS_CMD_HELLO, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_update(int fd, struct kdbus_cmd *cmd)
++{
++	int ret = ioctl(fd, KDBUS_CMD_UPDATE, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_byebye(int conn_fd, struct kdbus_cmd *cmd)
++{
++	int ret = ioctl(conn_fd, KDBUS_CMD_BYEBYE, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_free(int conn_fd, struct kdbus_cmd_free *cmd)
++{
++	int ret = ioctl(conn_fd, KDBUS_CMD_FREE, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_conn_info(int conn_fd, struct kdbus_cmd_info *cmd)
++{
++	int ret = ioctl(conn_fd, KDBUS_CMD_CONN_INFO, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_bus_creator_info(int conn_fd, struct kdbus_cmd_info *cmd)
++{
++	int ret = ioctl(conn_fd, KDBUS_CMD_BUS_CREATOR_INFO, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_list(int fd, struct kdbus_cmd_list *cmd)
++{
++	int ret = ioctl(fd, KDBUS_CMD_LIST, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_send(int conn_fd, struct kdbus_cmd_send *cmd)
++{
++	int ret = ioctl(conn_fd, KDBUS_CMD_SEND, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_recv(int conn_fd, struct kdbus_cmd_recv *cmd)
++{
++	int ret = ioctl(conn_fd, KDBUS_CMD_RECV, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_name_acquire(int conn_fd, struct kdbus_cmd *cmd)
++{
++	int ret = ioctl(conn_fd, KDBUS_CMD_NAME_ACQUIRE, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_name_release(int conn_fd, struct kdbus_cmd *cmd)
++{
++	int ret = ioctl(conn_fd, KDBUS_CMD_NAME_RELEASE, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_match_add(int conn_fd, struct kdbus_cmd_match *cmd)
++{
++	int ret = ioctl(conn_fd, KDBUS_CMD_MATCH_ADD, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++static inline int kdbus_cmd_match_remove(int conn_fd, struct kdbus_cmd_match *cmd)
++{
++	int ret = ioctl(conn_fd, KDBUS_CMD_MATCH_REMOVE, cmd);
++	return (ret < 0) ? (errno > 0 ? -errno : -EINVAL) : 0;
++}
++
++#endif /* KDBUS_API_H */
+diff --git a/samples/kdbus/kdbus-workers.c b/samples/kdbus/kdbus-workers.c
+new file mode 100644
+index 0000000..d331e01
+--- /dev/null
++++ b/samples/kdbus/kdbus-workers.c
+@@ -0,0 +1,1326 @@
++/*
++ * Copyright (C) 2013-2015 David Herrmann <dh.herrmann@gmail.com>
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++/*
++ * Example: Workers
++ * This program computes prime-numbers based on the sieve of Eratosthenes. The
++ * master sets up a shared memory region and spawns workers which clear out the
++ * non-primes. The master reacts to keyboard input and to client-requests to
++ * control what each worker does. Note that this is in no way meant as efficient
++ * way to compute primes. It should only serve as example how a master/worker
++ * concept can be implemented with kdbus used as control messages.
++ *
++ * The main process is called the 'master'. It creates a new, private bus which
++ * will be used between the master and its workers to communicate. The master
++ * then spawns a fixed number of workers. Whenever a worker dies (detected via
++ * SIGCHLD), the master spawns a new worker. When done, the master waits for all
++ * workers to exit, prints a status report and exits itself.
++ *
++ * The master process does *not* keep track of its workers. Instead, this
++ * example implements a PULL model. That is, the master acquires a well-known
++ * name on the bus which each worker uses to request tasks from the master. If
++ * there are no more tasks, the master will return an empty task-list, which
++ * casues a worker to exit immediately.
++ *
++ * As tasks can be computationally expensive, we support cancellation. Whenever
++ * the master process is interrupted, it will drop its well-known name on the
++ * bus. This causes kdbus to broadcast a name-change notification. The workers
++ * check for broadcast messages regularly and will exit if they receive one.
++ *
++ * This example exists of 4 objects:
++ *  * master: The master object contains the context of the master process. This
++ *            process manages the prime-context, spawns workers and assigns
++ *            prime-ranges to each worker to compute.
++ *            The master itself does not do any prime-computations itself.
++ *  * child:  The child object contains the context of a worker. It inherits the
++ *            prime context from its parent (the master) and then creates a new
++ *            bus context to request prime-ranges to compute.
++ *  * prime:  The "prime" object is used to abstract how we compute primes. When
++ *            allocated, it prepares a memory region to hold 1 bit for each
++ *            natural number up to a fixed maximum ('MAX_PRIMES').
++ *            The memory region is backed by a memfd which we share between
++ *            processes. Each worker now gets assigned a range of natural
++ *            numbers which it clears multiples of off the memory region. The
++ *            master process is responsible of distributing all natural numbers
++ *            up to the fixed maximum to its workers.
++ *  * bus:    The bus object is an abstraction of the kdbus API. It is pretty
++ *            straightfoward and only manages the connection-fd plus the
++ *            memory-mapped pool in a single object.
++ *
++ * This example is in reversed order, which should make it easier to read
++ * top-down, but requires some forward-declarations. Just ignore those.
++ */
++
++#include <ctype.h>
++#include <errno.h>
++#include <fcntl.h>
++#include <linux/memfd.h>
++#include <signal.h>
++#include <stdbool.h>
++#include <stddef.h>
++#include <stdint.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <sys/mman.h>
++#include <sys/poll.h>
++#include <sys/signalfd.h>
++#include <sys/syscall.h>
++#include <sys/time.h>
++#include <sys/wait.h>
++#include <time.h>
++#include <unistd.h>
++#include "kdbus-api.h"
++
++/* FORWARD DECLARATIONS */
++
++#define POOL_SIZE (16 * 1024 * 1024)
++#define MAX_PRIMES (2UL << 24)
++#define WORKER_COUNT (16)
++#define PRIME_STEPS (65536 * 4)
++
++static const char *arg_busname = "example-workers";
++static const char *arg_modname = "kdbus";
++static const char *arg_master = "org.freedesktop.master";
++
++static int err_assert(int r_errno, const char *msg, const char *func, int line,
++		      const char *file)
++{
++	r_errno = (r_errno != 0) ? -abs(r_errno) : -EFAULT;
++	if (r_errno < 0) {
++		errno = -r_errno;
++		fprintf(stderr, "ERR: %s: %m (%s:%d in %s)\n",
++			msg, func, line, file);
++	}
++	return r_errno;
++}
++
++#define err_r(_r, _msg) err_assert((_r), (_msg), __func__, __LINE__, __FILE__)
++#define err(_msg) err_r(errno, (_msg))
++
++struct prime;
++struct bus;
++struct master;
++struct child;
++
++struct prime {
++	int fd;
++	uint8_t *area;
++	size_t max;
++	size_t done;
++	size_t status;
++};
++
++static int prime_new(struct prime **out);
++static void prime_free(struct prime *p);
++static bool prime_done(struct prime *p);
++static void prime_consume(struct prime *p, size_t amount);
++static int prime_run(struct prime *p, struct bus *cancel, size_t number);
++static void prime_print(struct prime *p);
++
++struct bus {
++	int fd;
++	uint8_t *pool;
++};
++
++static int bus_open_connection(struct bus **out, uid_t uid, const char *name,
++			       uint64_t recv_flags);
++static void bus_close_connection(struct bus *b);
++static void bus_poool_free_slice(struct bus *b, uint64_t offset);
++static int bus_acquire_name(struct bus *b, const char *name);
++static int bus_install_name_loss_match(struct bus *b, const char *name);
++static int bus_poll(struct bus *b);
++static int bus_make(uid_t uid, const char *name);
++
++struct master {
++	size_t n_workers;
++	size_t max_workers;
++
++	int signal_fd;
++	int control_fd;
++
++	struct prime *prime;
++	struct bus *bus;
++};
++
++static int master_new(struct master **out);
++static void master_free(struct master *m);
++static int master_run(struct master *m);
++static int master_poll(struct master *m);
++static int master_handle_stdin(struct master *m);
++static int master_handle_signal(struct master *m);
++static int master_handle_bus(struct master *m);
++static int master_reply(struct master *m, const struct kdbus_msg *msg);
++static int master_waitpid(struct master *m);
++static int master_spawn(struct master *m);
++
++struct child {
++	struct bus *bus;
++	struct prime *prime;
++};
++
++static int child_new(struct child **out, struct prime *p);
++static void child_free(struct child *c);
++static int child_run(struct child *c);
++
++/* END OF FORWARD DECLARATIONS */
++
++/*
++ * This is the main entrypoint of this example. It is pretty straightforward. We
++ * create a master object, run the computation, print a status report and then
++ * exit. Nothing particularly interesting here, so lets look into the master
++ * object...
++ */
++int main(int argc, char **argv)
++{
++	struct master *m = NULL;
++	int r;
++
++	r = master_new(&m);
++	if (r < 0)
++		goto out;
++
++	r = master_run(m);
++	if (r < 0)
++		goto out;
++
++	if (0)
++		prime_print(m->prime);
++
++out:
++	master_free(m);
++	if (r < 0 && r != -EINTR)
++		fprintf(stderr, "failed\n");
++	else
++		fprintf(stderr, "done\n");
++	return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
++}
++
++/*
++ * ...this will allocate a new master context. It keeps track of the current
++ * number of children/workers that are running, manages a signalfd to track
++ * SIGCHLD, and creates a private kdbus bus. Afterwards, it opens its connection
++ * to the bus and acquires a well known-name (arg_master).
++ */
++static int master_new(struct master **out)
++{
++	struct master *m;
++	sigset_t smask;
++	int r;
++
++	m = calloc(1, sizeof(*m));
++	if (!m)
++		return err("cannot allocate master");
++
++	m->max_workers = WORKER_COUNT;
++	m->signal_fd = -1;
++	m->control_fd = -1;
++
++	/* Block SIGINT and SIGCHLD signals */
++	sigemptyset(&smask);
++	sigaddset(&smask, SIGINT);
++	sigaddset(&smask, SIGCHLD);
++	sigprocmask(SIG_BLOCK, &smask, NULL);
++
++	m->signal_fd = signalfd(-1, &smask, SFD_CLOEXEC);
++	if (m->signal_fd < 0) {
++		r = err("cannot create signalfd");
++		goto error;
++	}
++
++	r = prime_new(&m->prime);
++	if (r < 0)
++		goto error;
++
++	m->control_fd = bus_make(getuid(), arg_busname);
++	if (m->control_fd < 0) {
++		r = m->control_fd;
++		goto error;
++	}
++
++	/*
++	 * Open a bus connection for the master, and require each received
++	 * message to have a metadata item of type KDBUS_ITEM_PIDS attached.
++	 * The current UID is needed to compute the name of the bus node to
++	 * connect to.
++	 */
++	r = bus_open_connection(&m->bus, getuid(),
++				arg_busname, KDBUS_ATTACH_PIDS);
++	if (r < 0)
++		goto error;
++
++	/*
++	 * Acquire a well-known name on the bus, so children can address
++	 * messages to the master using KDBUS_DST_ID_NAME as destination-ID
++	 * of messages.
++	 */
++	r = bus_acquire_name(m->bus, arg_master);
++	if (r < 0)
++		goto error;
++
++	*out = m;
++	return 0;
++
++error:
++	master_free(m);
++	return r;
++}
++
++/* pretty straightforward destructor of a master object */
++static void master_free(struct master *m)
++{
++	if (!m)
++		return;
++
++	bus_close_connection(m->bus);
++	if (m->control_fd >= 0)
++		close(m->control_fd);
++	prime_free(m->prime);
++	if (m->signal_fd >= 0)
++		close(m->signal_fd);
++	free(m);
++}
++
++static int master_run(struct master *m)
++{
++	int res, r = 0;
++
++	while (!prime_done(m->prime)) {
++		while (m->n_workers < m->max_workers) {
++			r = master_spawn(m);
++			if (r < 0)
++				break;
++		}
++
++		r = master_poll(m);
++		if (r < 0)
++			break;
++	}
++
++	if (r < 0) {
++		bus_close_connection(m->bus);
++		m->bus = NULL;
++	}
++
++	while (m->n_workers > 0) {
++		res = master_poll(m);
++		if (res < 0) {
++			if (m->bus) {
++				bus_close_connection(m->bus);
++				m->bus = NULL;
++			}
++			r = res;
++		}
++	}
++
++	return r == -EINTR ? 0 : r;
++}
++
++static int master_poll(struct master *m)
++{
++	struct pollfd fds[3] = {};
++	int r = 0, n = 0;
++
++	/*
++	 * Add stdin, the eventfd and the connection owner file descriptor to
++	 * the pollfd table, and handle incoming traffic on the latter in
++	 * master_handle_bus().
++	 */
++	fds[n].fd = STDIN_FILENO;
++	fds[n++].events = POLLIN;
++	fds[n].fd = m->signal_fd;
++	fds[n++].events = POLLIN;
++	if (m->bus) {
++		fds[n].fd = m->bus->fd;
++		fds[n++].events = POLLIN;
++	}
++
++	r = poll(fds, n, -1);
++	if (r < 0)
++		return err("poll() failed");
++
++	if (fds[0].revents & POLLIN)
++		r = master_handle_stdin(m);
++	else if (fds[0].revents)
++		r = err("ERR/HUP on stdin");
++	if (r < 0)
++		return r;
++
++	if (fds[1].revents & POLLIN)
++		r = master_handle_signal(m);
++	else if (fds[1].revents)
++		r = err("ERR/HUP on signalfd");
++	if (r < 0)
++		return r;
++
++	if (fds[2].revents & POLLIN)
++		r = master_handle_bus(m);
++	else if (fds[2].revents)
++		r = err("ERR/HUP on bus");
++
++	return r;
++}
++
++static int master_handle_stdin(struct master *m)
++{
++	char buf[128];
++	ssize_t l;
++	int r = 0;
++
++	l = read(STDIN_FILENO, buf, sizeof(buf));
++	if (l < 0)
++		return err("cannot read stdin");
++	if (l == 0)
++		return err_r(-EINVAL, "EOF on stdin");
++
++	while (l-- > 0) {
++		switch (buf[l]) {
++		case 'q':
++			/* quit */
++			r = -EINTR;
++			break;
++		case '\n':
++		case ' ':
++			/* ignore */
++			break;
++		default:
++			if (isgraph(buf[l]))
++				fprintf(stderr, "invalid input '%c'\n", buf[l]);
++			else
++				fprintf(stderr, "invalid input 0x%x\n", buf[l]);
++			break;
++		}
++	}
++
++	return r;
++}
++
++static int master_handle_signal(struct master *m)
++{
++	struct signalfd_siginfo val;
++	ssize_t l;
++
++	l = read(m->signal_fd, &val, sizeof(val));
++	if (l < 0)
++		return err("cannot read signalfd");
++	if (l != sizeof(val))
++		return err_r(-EINVAL, "invalid data from signalfd");
++
++	switch (val.ssi_signo) {
++	case SIGCHLD:
++		return master_waitpid(m);
++	case SIGINT:
++		return err_r(-EINTR, "interrupted");
++	default:
++		return err_r(-EINVAL, "caught invalid signal");
++	}
++}
++
++static int master_handle_bus(struct master *m)
++{
++	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
++	const struct kdbus_msg *msg = NULL;
++	const struct kdbus_item *item;
++	const struct kdbus_vec *vec = NULL;
++	int r = 0;
++
++	/*
++	 * To receive a message, the KDBUS_CMD_RECV ioctl is used.
++	 * It takes an argument of type 'struct kdbus_cmd_recv', which
++	 * will contain information on the received message when the call
++	 * returns. See kdbus.message(7).
++	 */
++	r = kdbus_cmd_recv(m->bus->fd, &recv);
++	/*
++	 * EAGAIN is returned when there is no message waiting on this
++	 * connection. This is not an error - simply bail out.
++	 */
++	if (r == -EAGAIN)
++		return 0;
++	if (r < 0)
++		return err_r(r, "cannot receive message");
++
++	/*
++	 * Messages received by a connection are stored inside the connection's
++	 * pool, at an offset that has been returned in the 'recv' command
++	 * struct above. The value describes the relative offset from the
++	 * start address of the pool. A message is described with
++	 * 'struct kdbus_msg'. See kdbus.message(7).
++	 */
++	msg = (void *)(m->bus->pool + recv.msg.offset);
++
++	/*
++	 * A messages describes its actual payload in an array of items.
++	 * KDBUS_FOREACH() is a simple iterator that walks such an array.
++	 * struct kdbus_msg has a field to denote its total size, which is
++	 * needed to determine the number of items in the array.
++	 */
++	KDBUS_FOREACH(item, msg->items,
++		      msg->size - offsetof(struct kdbus_msg, items)) {
++		/*
++		 * An item of type PAYLOAD_OFF describes in-line memory
++		 * stored in the pool at a described offset. That offset is
++		 * relative to the start address of the message header.
++		 * This example program only expects one single item of that
++		 * type, remembers the struct kdbus_vec member of the item
++		 * when it sees it, and bails out if there is more than one
++		 * of them.
++		 */
++		if (item->type == KDBUS_ITEM_PAYLOAD_OFF) {
++			if (vec) {
++				r = err_r(-EEXIST,
++					  "message with multiple vecs");
++				break;
++			}
++			vec = &item->vec;
++			if (vec->size != 1) {
++				r = err_r(-EINVAL, "invalid message size");
++				break;
++			}
++
++		/*
++		 * MEMFDs are transported as items of type PAYLOAD_MEMFD.
++		 * If such an item is attached, a new file descriptor was
++		 * installed into the task when KDBUS_CMD_RECV was called, and
++		 * its number is stored in item->memfd.fd.
++		 * Implementers *must* handle this item type and close the
++		 * file descriptor when no longer needed in order to prevent
++		 * file descriptor exhaustion. This example program just bails
++		 * out with an error in this case, as memfds are not expected
++		 * in this context.
++		 */
++		} else if (item->type == KDBUS_ITEM_PAYLOAD_MEMFD) {
++			r = err_r(-EINVAL, "message with memfd");
++			break;
++		}
++	}
++	if (r < 0)
++		goto exit;
++	if (!vec) {
++		r = err_r(-EINVAL, "empty message");
++		goto exit;
++	}
++
++	switch (*((const uint8_t *)msg + vec->offset)) {
++	case 'r': {
++		r = master_reply(m, msg);
++		break;
++	}
++	default:
++		r = err_r(-EINVAL, "invalid message type");
++		break;
++	}
++
++exit:
++	/*
++	 * We are done with the memory slice that was given to us through
++	 * recv.msg.offset. Tell the kernel it can use it for other content
++	 * in the future. See kdbus.pool(7).
++	 */
++	bus_poool_free_slice(m->bus, recv.msg.offset);
++	return r;
++}
++
++static int master_reply(struct master *m, const struct kdbus_msg *msg)
++{
++	struct kdbus_cmd_send cmd;
++	struct kdbus_item *item;
++	struct kdbus_msg *reply;
++	size_t size, status, p[2];
++	int r;
++
++	/*
++	 * This functions sends a message over kdbus. To do this, it uses the
++	 * KDBUS_CMD_SEND ioctl, which takes a command struct argument of type
++	 * 'struct kdbus_cmd_send'. This struct stores a pointer to the actual
++	 * message to send. See kdbus.message(7).
++	 */
++	p[0] = m->prime->done;
++	p[1] = prime_done(m->prime) ? 0 : PRIME_STEPS;
++
++	size = sizeof(*reply);
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++
++	/* Prepare the message to send */
++	reply = alloca(size);
++	memset(reply, 0, size);
++	reply->size = size;
++
++	/* Each message has a cookie that can be used to send replies */
++	reply->cookie = 1;
++
++	/* The payload_type is arbitrary, but it must be non-zero */
++	reply->payload_type = 0xdeadbeef;
++
++	/*
++	 * We are sending a reply. Let the kernel know the cookie of the
++	 * message we are replying to.
++	 */
++	reply->cookie_reply = msg->cookie;
++
++	/*
++	 * Messages can either be directed to a well-known name (stored as
++	 * string) or to a unique name (stored as number). This example does
++	 * the latter. If the message would be directed to a well-known name
++	 * instead, the message's dst_id field would be set to
++	 * KDBUS_DST_ID_NAME, and the name would be attaches in an item of type
++	 * KDBUS_ITEM_DST_NAME. See below for an example, and also refer to
++	 * kdbus.message(7).
++	 */
++	reply->dst_id = msg->src_id;
++
++	/* Our message has exactly one item to store its payload */
++	item = reply->items;
++	item->type = KDBUS_ITEM_PAYLOAD_VEC;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
++	item->vec.address = (uintptr_t)p;
++	item->vec.size = sizeof(p);
++
++	/*
++	 * Now prepare the command struct, and reference the message we want
++	 * to send.
++	 */
++	memset(&cmd, 0, sizeof(cmd));
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)reply;
++
++	/*
++	 * Finally, employ the command on the connection owner
++	 * file descriptor.
++	 */
++	r = kdbus_cmd_send(m->bus->fd, &cmd);
++	if (r < 0)
++		return err_r(r, "cannot send reply");
++
++	if (p[1]) {
++		prime_consume(m->prime, p[1]);
++		status = m->prime->done * 10000 / m->prime->max;
++		if (status != m->prime->status) {
++			m->prime->status = status;
++			fprintf(stderr, "status: %7.3lf%%\n",
++				(double)status / 100);
++		}
++	}
++
++	return 0;
++}
++
++static int master_waitpid(struct master *m)
++{
++	pid_t pid;
++	int r;
++
++	while ((pid = waitpid(-1, &r, WNOHANG)) > 0) {
++		if (m->n_workers > 0)
++			--m->n_workers;
++		if (!WIFEXITED(r))
++			r = err_r(-EINVAL, "child died unexpectedly");
++		else if (WEXITSTATUS(r) != 0)
++			r = err_r(-WEXITSTATUS(r), "child failed");
++	}
++
++	return r;
++}
++
++static int master_spawn(struct master *m)
++{
++	struct child *c = NULL;
++	struct prime *p = NULL;
++	pid_t pid;
++	int r;
++
++	/* Spawn off one child and call child_run() inside it */
++
++	pid = fork();
++	if (pid < 0)
++		return err("cannot fork");
++	if (pid > 0) {
++		/* parent */
++		++m->n_workers;
++		return 0;
++	}
++
++	/* child */
++
++	p = m->prime;
++	m->prime = NULL;
++	master_free(m);
++
++	r = child_new(&c, p);
++	if (r < 0)
++		goto exit;
++
++	r = child_run(c);
++
++exit:
++	child_free(c);
++	exit(abs(r));
++}
++
++static int child_new(struct child **out, struct prime *p)
++{
++	struct child *c;
++	int r;
++
++	c = calloc(1, sizeof(*c));
++	if (!c)
++		return err("cannot allocate child");
++
++	c->prime = p;
++
++	/*
++	 * Open a connection to the bus and require each received message to
++	 * carry a list of the well-known names the sendind connection currently
++	 * owns. The current UID is needed in order to determine the name of the
++	 * bus node to connect to.
++	 */
++	r = bus_open_connection(&c->bus, getuid(),
++				arg_busname, KDBUS_ATTACH_NAMES);
++	if (r < 0)
++		goto error;
++
++	/*
++	 * Install a kdbus match so the child's connection gets notified when
++	 * the master loses its well-known name.
++	 */
++	r = bus_install_name_loss_match(c->bus, arg_master);
++	if (r < 0)
++		goto error;
++
++	*out = c;
++	return 0;
++
++error:
++	child_free(c);
++	return r;
++}
++
++static void child_free(struct child *c)
++{
++	if (!c)
++		return;
++
++	bus_close_connection(c->bus);
++	prime_free(c->prime);
++	free(c);
++}
++
++static int child_run(struct child *c)
++{
++	struct kdbus_cmd_send cmd;
++	struct kdbus_item *item;
++	struct kdbus_vec *vec = NULL;
++	struct kdbus_msg *msg;
++	struct timespec spec;
++	size_t n, steps, size;
++	int r = 0;
++
++	/*
++	 * Let's send a message to the master and ask for work. To do this,
++	 * we use the KDBUS_CMD_SEND ioctl, which takes an argument of type
++	 * 'struct kdbus_cmd_send'. This struct stores a pointer to the actual
++	 * message to send. See kdbus.message(7).
++	 */
++	size = sizeof(*msg);
++	size += KDBUS_ITEM_SIZE(strlen(arg_master) + 1);
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++
++	msg = alloca(size);
++	memset(msg, 0, size);
++	msg->size = size;
++
++	/*
++	 * Tell the kernel that we expect a reply to this message. This means
++	 * that
++	 *
++	 * a) The remote peer will gain temporary permission to talk to us
++	 *    even if it would not be allowed to normally.
++	 *
++	 * b) A timeout value is required.
++	 *
++	 *    For asynchronous send commands, if no reply is received, we will
++	 *    get a kernel notification with an item of type
++	 *    KDBUS_ITEM_REPLY_TIMEOUT attached.
++	 *
++	 *    For synchronous send commands (which this example does), the
++	 *    ioctl will block until a reply is received or the timeout is
++	 *    exceeded.
++	 */
++	msg->flags = KDBUS_MSG_EXPECT_REPLY;
++
++	/* Set our cookie. Replies must use this cookie to send their reply. */
++	msg->cookie = 1;
++
++	/* The payload_type is arbitrary, but it must be non-zero */
++	msg->payload_type = 0xdeadbeef;
++
++	/*
++	 * We are sending our message to the current owner of a well-known
++	 * name. This makes an item of type KDBUS_ITEM_DST_NAME mandatory.
++	 */
++	msg->dst_id = KDBUS_DST_ID_NAME;
++
++	/*
++	 * Set the reply timeout to 5 seconds. Timeouts are always set in
++	 * absolute timestamps, based con CLOCK_MONOTONIC. See kdbus.message(7).
++	 */
++	clock_gettime(CLOCK_MONOTONIC_COARSE, &spec);
++	msg->timeout_ns += (5 + spec.tv_sec) * 1000ULL * 1000ULL * 1000ULL;
++	msg->timeout_ns += spec.tv_nsec;
++
++	/*
++	 * Fill the appended items. First, set the well-known name of the
++	 * destination we want to talk to.
++	 */
++	item = msg->items;
++	item->type = KDBUS_ITEM_DST_NAME;
++	item->size = KDBUS_ITEM_HEADER_SIZE + strlen(arg_master) + 1;
++	strcpy(item->str, arg_master);
++
++	/*
++	 * The 2nd item contains a vector to memory we want to send. It
++	 * can be content of any type. In our case, we're sending a one-byte
++	 * string only. The memory referenced by this item will be copied into
++	 * the pool of the receiver connection, and does not need to be valid
++	 * after the command is employed.
++	 */
++	item = KDBUS_ITEM_NEXT(item);
++	item->type = KDBUS_ITEM_PAYLOAD_VEC;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
++	item->vec.address = (uintptr_t)"r";
++	item->vec.size = 1;
++
++	/* Set up the command struct and reference the message we prepared */
++	memset(&cmd, 0, sizeof(cmd));
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg;
++
++	/*
++	 * The send commands knows a mode in which it will block until a
++	 * reply to a message is received. This example uses that mode.
++	 * The pool offset to the received reply will be stored in the command
++	 * struct after the send command returned. See below.
++	 */
++	cmd.flags = KDBUS_SEND_SYNC_REPLY;
++
++	/*
++	 * Finally, employ the command on the connection owner
++	 * file descriptor.
++	 */
++	r = kdbus_cmd_send(c->bus->fd, &cmd);
++	if (r == -ESRCH || r == -EPIPE || r == -ECONNRESET)
++		return 0;
++	if (r < 0)
++		return err_r(r, "cannot send request to master");
++
++	/*
++	 * The command was sent with the KDBUS_SEND_SYNC_REPLY flag set,
++	 * and returned successfully, which means that cmd.reply.offset now
++	 * points to a message inside our connection's pool where the reply
++	 * is found. This is equivalent to receiving the reply with
++	 * KDBUS_CMD_RECV, but it doesn't require waiting for the reply with
++	 * poll() and also saves the ioctl to receive the message.
++	 */
++	msg = (void *)(c->bus->pool + cmd.reply.offset);
++
++	/*
++	 * A messages describes its actual payload in an array of items.
++	 * KDBUS_FOREACH() is a simple iterator that walks such an array.
++	 * struct kdbus_msg has a field to denote its total size, which is
++	 * needed to determine the number of items in the array.
++	 */
++	KDBUS_FOREACH(item, msg->items,
++		      msg->size - offsetof(struct kdbus_msg, items)) {
++		/*
++		 * An item of type PAYLOAD_OFF describes in-line memory
++		 * stored in the pool at a described offset. That offset is
++		 * relative to the start address of the message header.
++		 * This example program only expects one single item of that
++		 * type, remembers the struct kdbus_vec member of the item
++		 * when it sees it, and bails out if there is more than one
++		 * of them.
++		 */
++		if (item->type == KDBUS_ITEM_PAYLOAD_OFF) {
++			if (vec) {
++				r = err_r(-EEXIST,
++					  "message with multiple vecs");
++				break;
++			}
++			vec = &item->vec;
++			if (vec->size != 2 * sizeof(size_t)) {
++				r = err_r(-EINVAL, "invalid message size");
++				break;
++			}
++		/*
++		 * MEMFDs are transported as items of type PAYLOAD_MEMFD.
++		 * If such an item is attached, a new file descriptor was
++		 * installed into the task when KDBUS_CMD_RECV was called, and
++		 * its number is stored in item->memfd.fd.
++		 * Implementers *must* handle this item type close the
++		 * file descriptor when no longer needed in order to prevent
++		 * file descriptor exhaustion. This example program just bails
++		 * out with an error in this case, as memfds are not expected
++		 * in this context.
++		 */
++		} else if (item->type == KDBUS_ITEM_PAYLOAD_MEMFD) {
++			r = err_r(-EINVAL, "message with memfd");
++			break;
++		}
++	}
++	if (r < 0)
++		goto exit;
++	if (!vec) {
++		r = err_r(-EINVAL, "empty message");
++		goto exit;
++	}
++
++	n = ((size_t *)((const uint8_t *)msg + vec->offset))[0];
++	steps = ((size_t *)((const uint8_t *)msg + vec->offset))[1];
++
++	while (steps-- > 0) {
++		++n;
++		r = prime_run(c->prime, c->bus, n);
++		if (r < 0)
++			break;
++		r = bus_poll(c->bus);
++		if (r != 0) {
++			r = r < 0 ? r : -EINTR;
++			break;
++		}
++	}
++
++exit:
++	/*
++	 * We are done with the memory slice that was given to us through
++	 * cmd.reply.offset. Tell the kernel it can use it for other content
++	 * in the future. See kdbus.pool(7).
++	 */
++	bus_poool_free_slice(c->bus, cmd.reply.offset);
++	return r;
++}
++
++/*
++ * Prime Computation
++ *
++ */
++
++static int prime_new(struct prime **out)
++{
++	struct prime *p;
++	int r;
++
++	p = calloc(1, sizeof(*p));
++	if (!p)
++		return err("cannot allocate prime memory");
++
++	p->fd = -1;
++	p->area = MAP_FAILED;
++	p->max = MAX_PRIMES;
++
++	/*
++	 * Prepare and map a memfd to store the bit-fields for the number
++	 * ranges we want to perform the prime detection on.
++	 */
++	p->fd = syscall(__NR_memfd_create, "prime-area", MFD_CLOEXEC);
++	if (p->fd < 0) {
++		r = err("cannot create memfd");
++		goto error;
++	}
++
++	r = ftruncate(p->fd, p->max / 8 + 1);
++	if (r < 0) {
++		r = err("cannot ftruncate area");
++		goto error;
++	}
++
++	p->area = mmap(NULL, p->max / 8 + 1, PROT_READ | PROT_WRITE,
++		       MAP_SHARED, p->fd, 0);
++	if (p->area == MAP_FAILED) {
++		r = err("cannot mmap memfd");
++		goto error;
++	}
++
++	*out = p;
++	return 0;
++
++error:
++	prime_free(p);
++	return r;
++}
++
++static void prime_free(struct prime *p)
++{
++	if (!p)
++		return;
++
++	if (p->area != MAP_FAILED)
++		munmap(p->area, p->max / 8 + 1);
++	if (p->fd >= 0)
++		close(p->fd);
++	free(p);
++}
++
++static bool prime_done(struct prime *p)
++{
++	return p->done >= p->max;
++}
++
++static void prime_consume(struct prime *p, size_t amount)
++{
++	p->done += amount;
++}
++
++static int prime_run(struct prime *p, struct bus *cancel, size_t number)
++{
++	size_t i, n = 0;
++	int r;
++
++	if (number < 2 || number > 65535)
++		return 0;
++
++	for (i = number * number;
++	     i < p->max && i > number;
++	     i += number) {
++		p->area[i / 8] |= 1 << (i % 8);
++
++		if (!(++n % (1 << 20))) {
++			r = bus_poll(cancel);
++			if (r != 0)
++				return r < 0 ? r : -EINTR;
++		}
++	}
++
++	return 0;
++}
++
++static void prime_print(struct prime *p)
++{
++	size_t i, l = 0;
++
++	fprintf(stderr, "PRIMES:");
++	for (i = 0; i < p->max; ++i) {
++		if (!(p->area[i / 8] & (1 << (i % 8))))
++			fprintf(stderr, "%c%7zu", !(l++ % 16) ? '\n' : ' ', i);
++	}
++	fprintf(stderr, "\nEND\n");
++}
++
++static int bus_open_connection(struct bus **out, uid_t uid, const char *name,
++			       uint64_t recv_flags)
++{
++	struct kdbus_cmd_hello hello;
++	char path[128];
++	struct bus *b;
++	int r;
++
++	/*
++	 * The 'bus' object is our representation of a kdbus connection which
++	 * stores two details: the connection owner file descriptor, and the
++	 * mmap()ed memory of its associated pool. See kdbus.connection(7) and
++	 * kdbus.pool(7).
++	 */
++	b = calloc(1, sizeof(*b));
++	if (!b)
++		return err("cannot allocate bus memory");
++
++	b->fd = -1;
++	b->pool = MAP_FAILED;
++
++	/* Compute the name of the bus node to connect to. */
++	snprintf(path, sizeof(path), "/sys/fs/%s/%lu-%s/bus",
++		 arg_modname, (unsigned long)uid, name);
++	b->fd = open(path, O_RDWR | O_CLOEXEC);
++	if (b->fd < 0) {
++		r = err("cannot open bus");
++		goto error;
++	}
++
++	/*
++	 * To make a connection to the bus, the KDBUS_CMD_HELLO ioctl is used.
++	 * It takes an argument of type 'struct kdbus_cmd_hello'.
++	 */
++	memset(&hello, 0, sizeof(hello));
++	hello.size = sizeof(hello);
++
++	/*
++	 * Specify a mask of metadata attach flags, describing metadata items
++	 * that this new connection allows to be sent.
++	 */
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++
++	/*
++	 * Specify a mask of metadata attach flags, describing metadata items
++	 * that this new connection wants to be receive along with each message.
++	 */
++	hello.attach_flags_recv = recv_flags;
++
++	/*
++	 * A connection may choose the size of its pool, but the number has to
++	 * comply with two rules: a) it must be greater than 0, and b) it must
++	 * be a mulitple of PAGE_SIZE. See kdbus.pool(7).
++	 */
++	hello.pool_size = POOL_SIZE;
++
++	/*
++	 * Now employ the command on the file descriptor opened above.
++	 * This command will turn the file descriptor into a connection-owner
++	 * file descriptor that controls the life-time of the connection; once
++	 * it's closed, the connection is shut down.
++	 */
++	r = kdbus_cmd_hello(b->fd, &hello);
++	if (r < 0) {
++		err_r(r, "HELLO failed");
++		goto error;
++	}
++
++	bus_poool_free_slice(b, hello.offset);
++
++	/*
++	 * Map the pool of the connection. Its size has been set in the
++	 * command struct above. See kdbus.pool(7).
++	 */
++	b->pool = mmap(NULL, POOL_SIZE, PROT_READ, MAP_SHARED, b->fd, 0);
++	if (b->pool == MAP_FAILED) {
++		r = err("cannot mmap pool");
++		goto error;
++	}
++
++	*out = b;
++	return 0;
++
++error:
++	bus_close_connection(b);
++	return r;
++}
++
++static void bus_close_connection(struct bus *b)
++{
++	if (!b)
++		return;
++
++	/*
++	 * A bus connection is closed by simply calling close() on the
++	 * connection owner file descriptor. The unique name and all owned
++	 * well-known names of the conneciton will disappear.
++	 * See kdbus.connection(7).
++	 */
++	if (b->pool != MAP_FAILED)
++		munmap(b->pool, POOL_SIZE);
++	if (b->fd >= 0)
++		close(b->fd);
++	free(b);
++}
++
++static void bus_poool_free_slice(struct bus *b, uint64_t offset)
++{
++	struct kdbus_cmd_free cmd = {
++		.size = sizeof(cmd),
++		.offset = offset,
++	};
++	int r;
++
++	/*
++	 * Once we're done with a piece of pool memory that was returned
++	 * by a command, we have to call the KDBUS_CMD_FREE ioctl on it so it
++	 * can be reused. The command takes an argument of type
++	 * 'struct kdbus_cmd_free', in which the pool offset of the slice to
++	 * free is stored. The ioctl is employed on the connection owner
++	 * file descriptor. See kdbus.pool(7),
++	 */
++	r = kdbus_cmd_free(b->fd, &cmd);
++	if (r < 0)
++		err_r(r, "cannot free pool slice");
++}
++
++static int bus_acquire_name(struct bus *b, const char *name)
++{
++	struct kdbus_item *item;
++	struct kdbus_cmd *cmd;
++	size_t size;
++	int r;
++
++	/*
++	 * This function acquires a well-known name on the bus through the
++	 * KDBUS_CMD_NAME_ACQUIRE ioctl. This ioctl takes an argument of type
++	 * 'struct kdbus_cmd', which is assembled below. See kdbus.name(7).
++	 */
++	size = sizeof(*cmd);
++	size += KDBUS_ITEM_SIZE(strlen(name) + 1);
++
++	cmd = alloca(size);
++	memset(cmd, 0, size);
++	cmd->size = size;
++
++	/*
++	 * The command requires an item of type KDBUS_ITEM_NAME, and its
++	 * content must be a valid bus name.
++	 */
++	item = cmd->items;
++	item->type = KDBUS_ITEM_NAME;
++	item->size = KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
++	strcpy(item->str, name);
++
++	/*
++	 * Employ the command on the connection owner file descriptor.
++	 */
++	r = kdbus_cmd_name_acquire(b->fd, cmd);
++	if (r < 0)
++		return err_r(r, "cannot acquire name");
++
++	return 0;
++}
++
++static int bus_install_name_loss_match(struct bus *b, const char *name)
++{
++	struct kdbus_cmd_match *match;
++	struct kdbus_item *item;
++	size_t size;
++	int r;
++
++	/*
++	 * In order to install a match for signal messages, we have to
++	 * assemble a 'struct kdbus_cmd_match' and use it along with the
++	 * KDBUS_CMD_MATCH_ADD ioctl. See kdbus.match(7).
++	 */
++	size = sizeof(*match);
++	size += KDBUS_ITEM_SIZE(sizeof(item->name_change) + strlen(name) + 1);
++
++	match = alloca(size);
++	memset(match, 0, size);
++	match->size = size;
++
++	/*
++	 * A match is comprised of many 'rules', each of which describes a
++	 * mandatory detail of the message. All rules of a match must be
++	 * satified in order to make a message pass.
++	 */
++	item = match->items;
++
++	/*
++	 * In this case, we're interested in notifications that inform us
++	 * about a well-known name being removed from the bus.
++	 */
++	item->type = KDBUS_ITEM_NAME_REMOVE;
++	item->size = KDBUS_ITEM_HEADER_SIZE +
++			sizeof(item->name_change) + strlen(name) + 1;
++
++	/*
++	 * We could limit the match further and require a specific unique-ID
++	 * to be the new or the old owner of the name. In this case, however,
++	 * we don't, and allow 'any' id.
++	 */
++	item->name_change.old_id.id = KDBUS_MATCH_ID_ANY;
++	item->name_change.new_id.id = KDBUS_MATCH_ID_ANY;
++
++	/* Copy in the well-known name we're interested in */
++	strcpy(item->name_change.name, name);
++
++	/*
++	 * Add the match through the KDBUS_CMD_MATCH_ADD ioctl, employed on
++	 * the connection owner fd.
++	 */
++	r = kdbus_cmd_match_add(b->fd, match);
++	if (r < 0)
++		return err_r(r, "cannot add match");
++
++	return 0;
++}
++
++static int bus_poll(struct bus *b)
++{
++	struct pollfd fds[1] = {};
++	int r;
++
++	/*
++	 * A connection endpoint supports poll() and will wake-up the
++	 * task with POLLIN set once a message has arrived.
++	 */
++	fds[0].fd = b->fd;
++	fds[0].events = POLLIN;
++	r = poll(fds, sizeof(fds) / sizeof(*fds), 0);
++	if (r < 0)
++		return err("cannot poll bus");
++	return !!(fds[0].revents & POLLIN);
++}
++
++static int bus_make(uid_t uid, const char *name)
++{
++	struct kdbus_item *item;
++	struct kdbus_cmd *make;
++	char path[128], busname[128];
++	size_t size;
++	int r, fd;
++
++	/*
++	 * Compute the full path to the 'control' node. 'arg_modname' may be
++	 * set to a different value than 'kdbus' for development purposes.
++	 * The 'control' node is the primary entry point to kdbus that must be
++	 * used in order to create a bus. See kdbus(7) and kdbus.bus(7).
++	 */
++	snprintf(path, sizeof(path), "/sys/fs/%s/control", arg_modname);
++
++	/*
++	 * Compute the bus name. A valid bus name must always be prefixed with
++	 * the EUID of the currently running process in order to avoid name
++	 * conflicts. See kdbus.bus(7).
++	 */
++	snprintf(busname, sizeof(busname), "%lu-%s", (unsigned long)uid, name);
++
++	fd = open(path, O_RDWR | O_CLOEXEC);
++	if (fd < 0)
++		return err("cannot open control file");
++
++	/*
++	 * The KDBUS_CMD_BUS_MAKE ioctl takes an argument of type
++	 * 'struct kdbus_cmd', and expects at least two items attached to
++	 * it: one to decribe the bloom parameters to be propagated to
++	 * connections of the bus, and the name of the bus that was computed
++	 * above. Assemble this struct now, and fill it with values.
++	 */
++	size = sizeof(*make);
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_parameter));
++	size += KDBUS_ITEM_SIZE(strlen(busname) + 1);
++
++	make = alloca(size);
++	memset(make, 0, size);
++	make->size = size;
++
++	/*
++	 * Each item has a 'type' and 'size' field, and must be stored at an
++	 * 8-byte aligned address. The KDBUS_ITEM_NEXT macro is used to advance
++	 * the pointer. See kdbus.item(7) for more details.
++	 */
++	item = make->items;
++	item->type = KDBUS_ITEM_BLOOM_PARAMETER;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(item->bloom_parameter);
++	item->bloom_parameter.size = 8;
++	item->bloom_parameter.n_hash = 1;
++
++	/* The name of the new bus is stored in the next item. */
++	item = KDBUS_ITEM_NEXT(item);
++	item->type = KDBUS_ITEM_MAKE_NAME;
++	item->size = KDBUS_ITEM_HEADER_SIZE + strlen(busname) + 1;
++	strcpy(item->str, busname);
++
++	/*
++	 * Now create the bus via the KDBUS_CMD_BUS_MAKE ioctl and return the
++	 * fd that was used back to the caller of this function. This fd is now
++	 * called a 'bus owner file descriptor', and it controls the life-time
++	 * of the newly created bus; once the file descriptor is closed, the
++	 * bus goes away, and all connections are shut down. See kdbus.bus(7).
++	 */
++	r = kdbus_cmd_bus_make(fd, make);
++	if (r < 0) {
++		err_r(r, "cannot make bus");
++		close(fd);
++		return r;
++	}
++
++	return fd;
++}
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 4e51122..7b51cce 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -5,6 +5,7 @@ TARGETS += exec
+ TARGETS += firmware
+ TARGETS += ftrace
+ TARGETS += kcmp
++TARGETS += kdbus
+ TARGETS += memfd
+ TARGETS += memory-hotplug
+ TARGETS += mount
+diff --git a/tools/testing/selftests/kdbus/.gitignore b/tools/testing/selftests/kdbus/.gitignore
+new file mode 100644
+index 0000000..d3ef42f
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/.gitignore
+@@ -0,0 +1 @@
++kdbus-test
+diff --git a/tools/testing/selftests/kdbus/Makefile b/tools/testing/selftests/kdbus/Makefile
+new file mode 100644
+index 0000000..de8242f
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/Makefile
+@@ -0,0 +1,48 @@
++CFLAGS += -I../../../../usr/include/
++CFLAGS += -I../../../../samples/kdbus/
++CFLAGS += -I../../../../include/uapi/
++CFLAGS += -std=gnu99
++CFLAGS += -DKBUILD_MODNAME=\"kdbus\" -D_GNU_SOURCE
++LDLIBS = -pthread -lcap -lm
++
++OBJS= \
++	kdbus-enum.o		\
++	kdbus-util.o		\
++	kdbus-test.o		\
++	kdbus-test.o		\
++	test-activator.o	\
++	test-attach-flags.o	\
++	test-benchmark.o	\
++	test-bus.o		\
++	test-chat.o		\
++	test-connection.o	\
++	test-daemon.o		\
++	test-endpoint.o		\
++	test-fd.o		\
++	test-free.o		\
++	test-match.o		\
++	test-message.o		\
++	test-metadata-ns.o	\
++	test-monitor.o		\
++	test-names.o		\
++	test-policy.o		\
++	test-policy-ns.o	\
++	test-policy-priv.o	\
++	test-sync.o		\
++	test-timeout.o
++
++all: kdbus-test
++
++include ../lib.mk
++
++%.o: %.c
++	$(CC) $(CFLAGS) -c $< -o $@
++
++kdbus-test: $(OBJS)
++	$(CC) $(CFLAGS) $^ $(LDLIBS) -o $@
++
++run_tests:
++	./kdbus-test --tap
++
++clean:
++	rm -f *.o kdbus-test
+diff --git a/tools/testing/selftests/kdbus/kdbus-enum.c b/tools/testing/selftests/kdbus/kdbus-enum.c
+new file mode 100644
+index 0000000..4f1e579
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/kdbus-enum.c
+@@ -0,0 +1,94 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++struct kdbus_enum_table {
++	long long id;
++	const char *name;
++};
++
++#define TABLE(what) static struct kdbus_enum_table kdbus_table_##what[]
++#define ENUM(_id) { .id = _id, .name = STRINGIFY(_id) }
++#define LOOKUP(what)							\
++	const char *enum_##what(long long id)				\
++	{								\
++		for (size_t i = 0; i < ELEMENTSOF(kdbus_table_##what); i++) \
++			if (id == kdbus_table_##what[i].id)		\
++				return kdbus_table_##what[i].name;	\
++		return "UNKNOWN";					\
++	}
++
++TABLE(CMD) = {
++	ENUM(KDBUS_CMD_BUS_MAKE),
++	ENUM(KDBUS_CMD_ENDPOINT_MAKE),
++	ENUM(KDBUS_CMD_HELLO),
++	ENUM(KDBUS_CMD_SEND),
++	ENUM(KDBUS_CMD_RECV),
++	ENUM(KDBUS_CMD_LIST),
++	ENUM(KDBUS_CMD_NAME_RELEASE),
++	ENUM(KDBUS_CMD_CONN_INFO),
++	ENUM(KDBUS_CMD_MATCH_ADD),
++	ENUM(KDBUS_CMD_MATCH_REMOVE),
++};
++LOOKUP(CMD);
++
++TABLE(MSG) = {
++	ENUM(_KDBUS_ITEM_NULL),
++	ENUM(KDBUS_ITEM_PAYLOAD_VEC),
++	ENUM(KDBUS_ITEM_PAYLOAD_OFF),
++	ENUM(KDBUS_ITEM_PAYLOAD_MEMFD),
++	ENUM(KDBUS_ITEM_FDS),
++	ENUM(KDBUS_ITEM_BLOOM_PARAMETER),
++	ENUM(KDBUS_ITEM_BLOOM_FILTER),
++	ENUM(KDBUS_ITEM_DST_NAME),
++	ENUM(KDBUS_ITEM_MAKE_NAME),
++	ENUM(KDBUS_ITEM_ATTACH_FLAGS_SEND),
++	ENUM(KDBUS_ITEM_ATTACH_FLAGS_RECV),
++	ENUM(KDBUS_ITEM_ID),
++	ENUM(KDBUS_ITEM_NAME),
++	ENUM(KDBUS_ITEM_TIMESTAMP),
++	ENUM(KDBUS_ITEM_CREDS),
++	ENUM(KDBUS_ITEM_PIDS),
++	ENUM(KDBUS_ITEM_AUXGROUPS),
++	ENUM(KDBUS_ITEM_OWNED_NAME),
++	ENUM(KDBUS_ITEM_TID_COMM),
++	ENUM(KDBUS_ITEM_PID_COMM),
++	ENUM(KDBUS_ITEM_EXE),
++	ENUM(KDBUS_ITEM_CMDLINE),
++	ENUM(KDBUS_ITEM_CGROUP),
++	ENUM(KDBUS_ITEM_CAPS),
++	ENUM(KDBUS_ITEM_SECLABEL),
++	ENUM(KDBUS_ITEM_AUDIT),
++	ENUM(KDBUS_ITEM_CONN_DESCRIPTION),
++	ENUM(KDBUS_ITEM_NAME_ADD),
++	ENUM(KDBUS_ITEM_NAME_REMOVE),
++	ENUM(KDBUS_ITEM_NAME_CHANGE),
++	ENUM(KDBUS_ITEM_ID_ADD),
++	ENUM(KDBUS_ITEM_ID_REMOVE),
++	ENUM(KDBUS_ITEM_REPLY_TIMEOUT),
++	ENUM(KDBUS_ITEM_REPLY_DEAD),
++};
++LOOKUP(MSG);
++
++TABLE(PAYLOAD) = {
++	ENUM(KDBUS_PAYLOAD_KERNEL),
++	ENUM(KDBUS_PAYLOAD_DBUS),
++};
++LOOKUP(PAYLOAD);
+diff --git a/tools/testing/selftests/kdbus/kdbus-enum.h b/tools/testing/selftests/kdbus/kdbus-enum.h
+new file mode 100644
+index 0000000..a67cec3
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/kdbus-enum.h
+@@ -0,0 +1,14 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++#pragma once
++
++const char *enum_CMD(long long id);
++const char *enum_MSG(long long id);
++const char *enum_MATCH(long long id);
++const char *enum_PAYLOAD(long long id);
+diff --git a/tools/testing/selftests/kdbus/kdbus-test.c b/tools/testing/selftests/kdbus/kdbus-test.c
+new file mode 100644
+index 0000000..a43674c
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/kdbus-test.c
+@@ -0,0 +1,923 @@
++#include <errno.h>
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <time.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <assert.h>
++#include <getopt.h>
++#include <stdbool.h>
++#include <signal.h>
++#include <sys/mount.h>
++#include <sys/prctl.h>
++#include <sys/wait.h>
++#include <sys/syscall.h>
++#include <sys/eventfd.h>
++#include <linux/sched.h>
++
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++#include "kdbus-test.h"
++
++enum {
++	TEST_CREATE_BUS		= 1 << 0,
++	TEST_CREATE_CONN	= 1 << 1,
++};
++
++struct kdbus_test {
++	const char *name;
++	const char *desc;
++	int (*func)(struct kdbus_test_env *env);
++	unsigned int flags;
++};
++
++struct kdbus_test_args {
++	bool mntns;
++	bool pidns;
++	bool userns;
++	char *uid_map;
++	char *gid_map;
++	int loop;
++	int wait;
++	int fork;
++	int tap_output;
++	char *module;
++	char *root;
++	char *test;
++	char *busname;
++	char *mask_param_path;
++};
++
++static const struct kdbus_test tests[] = {
++	{
++		.name	= "bus-make",
++		.desc	= "bus make functions",
++		.func	= kdbus_test_bus_make,
++		.flags	= 0,
++	},
++	{
++		.name	= "hello",
++		.desc	= "the HELLO command",
++		.func	= kdbus_test_hello,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "byebye",
++		.desc	= "the BYEBYE command",
++		.func	= kdbus_test_byebye,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "chat",
++		.desc	= "a chat pattern",
++		.func	= kdbus_test_chat,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "daemon",
++		.desc	= "a simple daemon",
++		.func	= kdbus_test_daemon,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "fd-passing",
++		.desc	= "file descriptor passing",
++		.func	= kdbus_test_fd_passing,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "endpoint",
++		.desc	= "custom endpoint",
++		.func	= kdbus_test_custom_endpoint,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "monitor",
++		.desc	= "monitor functionality",
++		.func	= kdbus_test_monitor,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "name-basics",
++		.desc	= "basic name registry functions",
++		.func	= kdbus_test_name_basic,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "name-conflict",
++		.desc	= "name registry conflict details",
++		.func	= kdbus_test_name_conflict,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "name-queue",
++		.desc	= "queuing of names",
++		.func	= kdbus_test_name_queue,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "message-basic",
++		.desc	= "basic message handling",
++		.func	= kdbus_test_message_basic,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "message-prio",
++		.desc	= "handling of messages with priority",
++		.func	= kdbus_test_message_prio,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "message-quota",
++		.desc	= "message quotas are enforced",
++		.func	= kdbus_test_message_quota,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "memory-access",
++		.desc	= "memory access",
++		.func	= kdbus_test_memory_access,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "timeout",
++		.desc	= "timeout",
++		.func	= kdbus_test_timeout,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "sync-byebye",
++		.desc	= "synchronous replies vs. BYEBYE",
++		.func	= kdbus_test_sync_byebye,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "sync-reply",
++		.desc	= "synchronous replies",
++		.func	= kdbus_test_sync_reply,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "message-free",
++		.desc	= "freeing of memory",
++		.func	= kdbus_test_free,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "connection-info",
++		.desc	= "retrieving connection information",
++		.func	= kdbus_test_conn_info,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "connection-update",
++		.desc	= "updating connection information",
++		.func	= kdbus_test_conn_update,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "writable-pool",
++		.desc	= "verifying pools are never writable",
++		.func	= kdbus_test_writable_pool,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "policy",
++		.desc	= "policy",
++		.func	= kdbus_test_policy,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "policy-priv",
++		.desc	= "unprivileged bus access",
++		.func	= kdbus_test_policy_priv,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "policy-ns",
++		.desc	= "policy in user namespaces",
++		.func	= kdbus_test_policy_ns,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "metadata-ns",
++		.desc	= "metadata in different namespaces",
++		.func	= kdbus_test_metadata_ns,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "match-id-add",
++		.desc	= "adding of matches by id",
++		.func	= kdbus_test_match_id_add,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "match-id-remove",
++		.desc	= "removing of matches by id",
++		.func	= kdbus_test_match_id_remove,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "match-replace",
++		.desc	= "replace of matches with the same cookie",
++		.func	= kdbus_test_match_replace,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "match-name-add",
++		.desc	= "adding of matches by name",
++		.func	= kdbus_test_match_name_add,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "match-name-remove",
++		.desc	= "removing of matches by name",
++		.func	= kdbus_test_match_name_remove,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "match-name-change",
++		.desc	= "matching for name changes",
++		.func	= kdbus_test_match_name_change,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "match-bloom",
++		.desc	= "matching with bloom filters",
++		.func	= kdbus_test_match_bloom,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "activator",
++		.desc	= "activator connections",
++		.func	= kdbus_test_activator,
++		.flags	= TEST_CREATE_BUS | TEST_CREATE_CONN,
++	},
++	{
++		.name	= "benchmark",
++		.desc	= "benchmark",
++		.func	= kdbus_test_benchmark,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "benchmark-nomemfds",
++		.desc	= "benchmark without using memfds",
++		.func	= kdbus_test_benchmark_nomemfds,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		.name	= "benchmark-uds",
++		.desc	= "benchmark comparison to UDS",
++		.func	= kdbus_test_benchmark_uds,
++		.flags	= TEST_CREATE_BUS,
++	},
++	{
++		/* Last test */
++		.name	= "attach-flags",
++		.desc	= "attach flags mask",
++		.func	= kdbus_test_attach_flags,
++		.flags	= 0,
++	},
++};
++
++#define N_TESTS ((int) (sizeof(tests) / sizeof(tests[0])))
++
++static int test_prepare_env(const struct kdbus_test *t,
++			    const struct kdbus_test_args *args,
++			    struct kdbus_test_env *env)
++{
++	if (t->flags & TEST_CREATE_BUS) {
++		char *s;
++		char *n = NULL;
++		int ret;
++
++		asprintf(&s, "%s/control", args->root);
++
++		env->control_fd = open(s, O_RDWR);
++		free(s);
++		ASSERT_RETURN(env->control_fd >= 0);
++
++		if (!args->busname) {
++			n = unique_name("test-bus");
++			ASSERT_RETURN(n);
++		}
++
++		ret = kdbus_create_bus(env->control_fd,
++				       args->busname ?: n,
++				       _KDBUS_ATTACH_ALL,
++				       _KDBUS_ATTACH_ALL, &s);
++		free(n);
++		ASSERT_RETURN(ret == 0);
++
++		asprintf(&env->buspath, "%s/%s/bus", args->root, s);
++		free(s);
++	}
++
++	if (t->flags & TEST_CREATE_CONN) {
++		env->conn = kdbus_hello(env->buspath, 0, NULL, 0);
++		ASSERT_RETURN(env->conn);
++	}
++
++	env->root = args->root;
++	env->module = args->module;
++	env->mask_param_path = args->mask_param_path;
++
++	return 0;
++}
++
++void test_unprepare_env(const struct kdbus_test *t, struct kdbus_test_env *env)
++{
++	if (env->conn) {
++		kdbus_conn_free(env->conn);
++		env->conn = NULL;
++	}
++
++	if (env->control_fd >= 0) {
++		close(env->control_fd);
++		env->control_fd = -1;
++	}
++
++	if (env->buspath) {
++		free(env->buspath);
++		env->buspath = NULL;
++	}
++}
++
++static int test_run(const struct kdbus_test *t,
++		    const struct kdbus_test_args *kdbus_args,
++		    int wait)
++{
++	int ret;
++	struct kdbus_test_env env = {};
++
++	ret = test_prepare_env(t, kdbus_args, &env);
++	if (ret != TEST_OK)
++		return ret;
++
++	if (wait > 0) {
++		printf("Sleeping %d seconds before running test ...\n", wait);
++		sleep(wait);
++	}
++
++	ret = t->func(&env);
++	test_unprepare_env(t, &env);
++	return ret;
++}
++
++static int test_run_forked(const struct kdbus_test *t,
++			   const struct kdbus_test_args *kdbus_args,
++			   int wait)
++{
++	int ret;
++	pid_t pid;
++
++	pid = fork();
++	if (pid < 0) {
++		return TEST_ERR;
++	} else if (pid == 0) {
++		ret = test_run(t, kdbus_args, wait);
++		_exit(ret);
++	}
++
++	pid = waitpid(pid, &ret, 0);
++	if (pid <= 0)
++		return TEST_ERR;
++	else if (!WIFEXITED(ret))
++		return TEST_ERR;
++	else
++		return WEXITSTATUS(ret);
++}
++
++static void print_test_result(int ret)
++{
++	switch (ret) {
++	case TEST_OK:
++		printf("OK");
++		break;
++	case TEST_SKIP:
++		printf("SKIPPED");
++		break;
++	case TEST_ERR:
++		printf("ERROR");
++		break;
++	}
++}
++
++static int start_all_tests(struct kdbus_test_args *kdbus_args)
++{
++	int ret;
++	unsigned int fail_cnt = 0;
++	unsigned int skip_cnt = 0;
++	unsigned int ok_cnt = 0;
++	unsigned int i;
++
++	if (kdbus_args->tap_output) {
++		printf("1..%d\n", N_TESTS);
++		fflush(stdout);
++	}
++
++	kdbus_util_verbose = false;
++
++	for (i = 0; i < N_TESTS; i++) {
++		const struct kdbus_test *t = tests + i;
++
++		if (!kdbus_args->tap_output) {
++			unsigned int n;
++
++			printf("Testing %s (%s) ", t->desc, t->name);
++			for (n = 0; n < 60 - strlen(t->desc) - strlen(t->name); n++)
++				printf(".");
++			printf(" ");
++		}
++
++		ret = test_run_forked(t, kdbus_args, 0);
++		switch (ret) {
++		case TEST_OK:
++			ok_cnt++;
++			break;
++		case TEST_SKIP:
++			skip_cnt++;
++			break;
++		case TEST_ERR:
++			fail_cnt++;
++			break;
++		}
++
++		if (kdbus_args->tap_output) {
++			printf("%sok %d - %s%s (%s)\n",
++			       (ret == TEST_ERR) ? "not " : "", i + 1,
++			       (ret == TEST_SKIP) ? "# SKIP " : "",
++			       t->desc, t->name);
++			fflush(stdout);
++		} else {
++			print_test_result(ret);
++			printf("\n");
++		}
++	}
++
++	if (kdbus_args->tap_output)
++		printf("Failed %d/%d tests, %.2f%% okay\n", fail_cnt, N_TESTS,
++		       100.0 - (fail_cnt * 100.0) / ((float) N_TESTS));
++	else
++		printf("\nSUMMARY: %u tests passed, %u skipped, %u failed\n",
++		       ok_cnt, skip_cnt, fail_cnt);
++
++	return fail_cnt > 0 ? TEST_ERR : TEST_OK;
++}
++
++static int start_one_test(struct kdbus_test_args *kdbus_args)
++{
++	int i, ret;
++	bool test_found = false;
++
++	for (i = 0; i < N_TESTS; i++) {
++		const struct kdbus_test *t = tests + i;
++
++		if (strcmp(t->name, kdbus_args->test))
++			continue;
++
++		do {
++			test_found = true;
++			if (kdbus_args->fork)
++				ret = test_run_forked(t, kdbus_args,
++						      kdbus_args->wait);
++			else
++				ret = test_run(t, kdbus_args,
++					       kdbus_args->wait);
++
++			printf("Testing %s: ", t->desc);
++			print_test_result(ret);
++			printf("\n");
++
++			if (ret != TEST_OK)
++				break;
++		} while (kdbus_args->loop);
++
++		return ret;
++	}
++
++	if (!test_found) {
++		printf("Unknown test-id '%s'\n", kdbus_args->test);
++		return TEST_ERR;
++	}
++
++	return TEST_OK;
++}
++
++static void usage(const char *argv0)
++{
++	unsigned int i, j;
++
++	printf("Usage: %s [options]\n"
++	       "Options:\n"
++	       "\t-a, --tap		Output test results in TAP format\n"
++	       "\t-m, --module <module>	Kdbus module name\n"
++	       "\t-x, --loop		Run in a loop\n"
++	       "\t-f, --fork		Fork before running a test\n"
++	       "\t-h, --help		Print this help\n"
++	       "\t-r, --root <root>	Toplevel of the kdbus hierarchy\n"
++	       "\t-t, --test <test-id>	Run one specific test only, in verbose mode\n"
++	       "\t-b, --bus <busname>	Instead of generating a random bus name, take <busname>.\n"
++	       "\t-w, --wait <secs>	Wait <secs> before actually starting test\n"
++	       "\t    --mntns		New mount namespace\n"
++	       "\t    --pidns		New PID namespace\n"
++	       "\t    --userns		New user namespace\n"
++	       "\t    --uidmap uid_map	UID map for user namespace\n"
++	       "\t    --gidmap gid_map	GID map for user namespace\n"
++	       "\n", argv0);
++
++	printf("By default, all test are run once, and a summary is printed.\n"
++	       "Available tests for --test:\n\n");
++
++	for (i = 0; i < N_TESTS; i++) {
++		const struct kdbus_test *t = tests + i;
++
++		printf("\t%s", t->name);
++
++		for (j = 0; j < 24 - strlen(t->name); j++)
++			printf(" ");
++
++		printf("Test %s\n", t->desc);
++	}
++
++	printf("\n");
++	printf("Note that some tests may, if run specifically by --test, "
++	       "behave differently, and not terminate by themselves.\n");
++
++	exit(EXIT_FAILURE);
++}
++
++void print_kdbus_test_args(struct kdbus_test_args *args)
++{
++	if (args->userns || args->pidns || args->mntns)
++		printf("# Starting tests in new %s%s%s namespaces%s\n",
++			args->mntns ? "MOUNT " : "",
++			args->pidns ? "PID " : "",
++			args->userns ? "USER " : "",
++			args->mntns ? ", kdbusfs will be remounted" : "");
++	else
++		printf("# Starting tests in the same namespaces\n");
++}
++
++void print_metadata_support(void)
++{
++	bool no_meta_audit, no_meta_cgroups, no_meta_seclabel;
++
++	/*
++	 * KDBUS_ATTACH_CGROUP, KDBUS_ATTACH_AUDIT and
++	 * KDBUS_ATTACH_SECLABEL
++	 */
++	no_meta_audit = !config_auditsyscall_is_enabled();
++	no_meta_cgroups = !config_cgroups_is_enabled();
++	no_meta_seclabel = !config_security_is_enabled();
++
++	if (no_meta_audit | no_meta_cgroups | no_meta_seclabel)
++		printf("# Starting tests without %s%s%s metadata support\n",
++		       no_meta_audit ? "AUDIT " : "",
++		       no_meta_cgroups ? "CGROUP " : "",
++		       no_meta_seclabel ? "SECLABEL " : "");
++	else
++		printf("# Starting tests with full metadata support\n");
++}
++
++int run_tests(struct kdbus_test_args *kdbus_args)
++{
++	int ret;
++	static char control[4096];
++
++	snprintf(control, sizeof(control), "%s/control", kdbus_args->root);
++
++	if (access(control, W_OK) < 0) {
++		printf("Unable to locate control node at '%s'.\n",
++			control);
++		return TEST_ERR;
++	}
++
++	if (kdbus_args->test) {
++		ret = start_one_test(kdbus_args);
++	} else {
++		do {
++			ret = start_all_tests(kdbus_args);
++			if (ret != TEST_OK)
++				break;
++		} while (kdbus_args->loop);
++	}
++
++	return ret;
++}
++
++static void nop_handler(int sig) {}
++
++static int test_prepare_mounts(struct kdbus_test_args *kdbus_args)
++{
++	int ret;
++	char kdbusfs[64] = {'\0'};
++
++	snprintf(kdbusfs, sizeof(kdbusfs), "%sfs", kdbus_args->module);
++
++	/* make current mount slave */
++	ret = mount(NULL, "/", NULL, MS_SLAVE|MS_REC, NULL);
++	if (ret < 0) {
++		ret = -errno;
++		printf("error mount() root: %d (%m)\n", ret);
++		return ret;
++	}
++
++	/* Remount procfs since we need it in our tests */
++	if (kdbus_args->pidns) {
++		ret = mount("proc", "/proc", "proc",
++			    MS_NOSUID|MS_NOEXEC|MS_NODEV, NULL);
++		if (ret < 0) {
++			ret = -errno;
++			printf("error mount() /proc : %d (%m)\n", ret);
++			return ret;
++		}
++	}
++
++	/* Remount kdbusfs */
++	ret = mount(kdbusfs, kdbus_args->root, kdbusfs,
++		    MS_NOSUID|MS_NOEXEC|MS_NODEV, NULL);
++	if (ret < 0) {
++		ret = -errno;
++		printf("error mount() %s :%d (%m)\n", kdbusfs, ret);
++		return ret;
++	}
++
++	return 0;
++}
++
++int run_tests_in_namespaces(struct kdbus_test_args *kdbus_args)
++{
++	int ret;
++	int efd = -1;
++	int status;
++	pid_t pid, rpid;
++	struct sigaction oldsa;
++	struct sigaction sa = {
++		.sa_handler = nop_handler,
++		.sa_flags = SA_NOCLDSTOP,
++	};
++
++	efd = eventfd(0, EFD_CLOEXEC);
++	if (efd < 0) {
++		ret = -errno;
++		printf("eventfd() failed: %d (%m)\n", ret);
++		return TEST_ERR;
++	}
++
++	ret = sigaction(SIGCHLD, &sa, &oldsa);
++	if (ret < 0) {
++		ret = -errno;
++		printf("sigaction() failed: %d (%m)\n", ret);
++		return TEST_ERR;
++	}
++
++	/* setup namespaces */
++	pid = syscall(__NR_clone, SIGCHLD|
++		      (kdbus_args->userns ? CLONE_NEWUSER : 0) |
++		      (kdbus_args->mntns ? CLONE_NEWNS : 0) |
++		      (kdbus_args->pidns ? CLONE_NEWPID : 0), NULL);
++	if (pid < 0) {
++		printf("clone() failed: %d (%m)\n", -errno);
++		return TEST_ERR;
++	}
++
++	if (pid == 0) {
++		eventfd_t event_status = 0;
++
++		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
++		if (ret < 0) {
++			ret = -errno;
++			printf("error prctl(): %d (%m)\n", ret);
++			_exit(TEST_ERR);
++		}
++
++		/* reset sighandlers of childs */
++		ret = sigaction(SIGCHLD, &oldsa, NULL);
++		if (ret < 0) {
++			ret = -errno;
++			printf("sigaction() failed: %d (%m)\n", ret);
++			_exit(TEST_ERR);
++		}
++
++		ret = eventfd_read(efd, &event_status);
++		if (ret < 0 || event_status != 1) {
++			printf("error eventfd_read()\n");
++			_exit(TEST_ERR);
++		}
++
++		if (kdbus_args->mntns) {
++			ret = test_prepare_mounts(kdbus_args);
++			if (ret < 0) {
++				printf("error preparing mounts\n");
++				_exit(TEST_ERR);
++			}
++		}
++
++		ret = run_tests(kdbus_args);
++		_exit(ret);
++	}
++
++	/* Setup userns mapping */
++	if (kdbus_args->userns) {
++		ret = userns_map_uid_gid(pid, kdbus_args->uid_map,
++					 kdbus_args->gid_map);
++		if (ret < 0) {
++			printf("error mapping uid and gid in userns\n");
++			eventfd_write(efd, 2);
++			return TEST_ERR;
++		}
++	}
++
++	ret = eventfd_write(efd, 1);
++	if (ret < 0) {
++		ret = -errno;
++		printf("error eventfd_write(): %d (%m)\n", ret);
++		return TEST_ERR;
++	}
++
++	rpid = waitpid(pid, &status, 0);
++	ASSERT_RETURN_VAL(rpid == pid, TEST_ERR);
++
++	close(efd);
++
++	if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
++		return TEST_ERR;
++
++	return TEST_OK;
++}
++
++int start_tests(struct kdbus_test_args *kdbus_args)
++{
++	int ret;
++	bool namespaces;
++	uint64_t kdbus_param_mask;
++	static char fspath[4096], parampath[4096];
++
++	namespaces = (kdbus_args->mntns || kdbus_args->pidns ||
++		      kdbus_args->userns);
++
++	/* for pidns we need mntns set */
++	if (kdbus_args->pidns && !kdbus_args->mntns) {
++		printf("Failed: please set both pid and mnt namesapces\n");
++		return TEST_ERR;
++	}
++
++	if (kdbus_args->userns) {
++		if (!config_user_ns_is_enabled()) {
++			printf("User namespace not supported\n");
++			return TEST_ERR;
++		}
++
++		if (!kdbus_args->uid_map || !kdbus_args->gid_map) {
++			printf("Failed: please specify uid or gid mapping\n");
++			return TEST_ERR;
++		}
++	}
++
++	print_kdbus_test_args(kdbus_args);
++	print_metadata_support();
++
++	/* setup kdbus paths */
++	if (!kdbus_args->module)
++		kdbus_args->module = "kdbus";
++
++	if (!kdbus_args->root) {
++		snprintf(fspath, sizeof(fspath), "/sys/fs/%s",
++			 kdbus_args->module);
++		kdbus_args->root = fspath;
++	}
++
++	snprintf(parampath, sizeof(parampath),
++		 "/sys/module/%s/parameters/attach_flags_mask",
++		 kdbus_args->module);
++	kdbus_args->mask_param_path = parampath;
++
++	ret = kdbus_sysfs_get_parameter_mask(kdbus_args->mask_param_path,
++					     &kdbus_param_mask);
++	if (ret < 0)
++		return TEST_ERR;
++
++	printf("# Starting tests with an attach_flags_mask=0x%llx\n",
++		(unsigned long long)kdbus_param_mask);
++
++	/* Start tests */
++	if (namespaces)
++		ret = run_tests_in_namespaces(kdbus_args);
++	else
++		ret = run_tests(kdbus_args);
++
++	return ret;
++}
++
++int main(int argc, char *argv[])
++{
++	int t, ret = 0;
++	struct kdbus_test_args *kdbus_args;
++	enum {
++		ARG_MNTNS = 0x100,
++		ARG_PIDNS,
++		ARG_USERNS,
++		ARG_UIDMAP,
++		ARG_GIDMAP,
++	};
++
++	kdbus_args = malloc(sizeof(*kdbus_args));
++	if (!kdbus_args) {
++		printf("unable to malloc() kdbus_args\n");
++		return EXIT_FAILURE;
++	}
++
++	memset(kdbus_args, 0, sizeof(*kdbus_args));
++
++	static const struct option options[] = {
++		{ "loop",	no_argument,		NULL, 'x' },
++		{ "help",	no_argument,		NULL, 'h' },
++		{ "root",	required_argument,	NULL, 'r' },
++		{ "test",	required_argument,	NULL, 't' },
++		{ "bus",	required_argument,	NULL, 'b' },
++		{ "wait",	required_argument,	NULL, 'w' },
++		{ "fork",	no_argument,		NULL, 'f' },
++		{ "module",	required_argument,	NULL, 'm' },
++		{ "tap",	no_argument,		NULL, 'a' },
++		{ "mntns",	no_argument,		NULL, ARG_MNTNS },
++		{ "pidns",	no_argument,		NULL, ARG_PIDNS },
++		{ "userns",	no_argument,		NULL, ARG_USERNS },
++		{ "uidmap",	required_argument,	NULL, ARG_UIDMAP },
++		{ "gidmap",	required_argument,	NULL, ARG_GIDMAP },
++		{}
++	};
++
++	srand(time(NULL));
++
++	while ((t = getopt_long(argc, argv, "hxfm:r:t:b:w:a", options, NULL)) >= 0) {
++		switch (t) {
++		case 'x':
++			kdbus_args->loop = 1;
++			break;
++
++		case 'm':
++			kdbus_args->module = optarg;
++			break;
++
++		case 'r':
++			kdbus_args->root = optarg;
++			break;
++
++		case 't':
++			kdbus_args->test = optarg;
++			break;
++
++		case 'b':
++			kdbus_args->busname = optarg;
++			break;
++
++		case 'w':
++			kdbus_args->wait = strtol(optarg, NULL, 10);
++			break;
++
++		case 'f':
++			kdbus_args->fork = 1;
++			break;
++
++		case 'a':
++			kdbus_args->tap_output = 1;
++			break;
++
++		case ARG_MNTNS:
++			kdbus_args->mntns = true;
++			break;
++
++		case ARG_PIDNS:
++			kdbus_args->pidns = true;
++			break;
++
++		case ARG_USERNS:
++			kdbus_args->userns = true;
++			break;
++
++		case ARG_UIDMAP:
++			kdbus_args->uid_map = optarg;
++			break;
++
++		case ARG_GIDMAP:
++			kdbus_args->gid_map = optarg;
++			break;
++
++		default:
++		case 'h':
++			usage(argv[0]);
++		}
++	}
++
++	ret = start_tests(kdbus_args);
++	if (ret == TEST_ERR)
++		return EXIT_FAILURE;
++
++	free(kdbus_args);
++
++	return 0;
++}
+diff --git a/tools/testing/selftests/kdbus/kdbus-test.h b/tools/testing/selftests/kdbus/kdbus-test.h
+new file mode 100644
+index 0000000..6473318
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/kdbus-test.h
+@@ -0,0 +1,85 @@
++#ifndef _TEST_KDBUS_H_
++#define _TEST_KDBUS_H_
++
++struct kdbus_test_env {
++	char *buspath;
++	const char *root;
++	const char *module;
++	const char *mask_param_path;
++	int control_fd;
++	struct kdbus_conn *conn;
++};
++
++enum {
++	TEST_OK,
++	TEST_SKIP,
++	TEST_ERR,
++};
++
++#define ASSERT_RETURN_VAL(cond, val)		\
++	if (!(cond)) {			\
++		fprintf(stderr,	"Assertion '%s' failed in %s(), %s:%d\n", \
++			#cond, __func__, __FILE__, __LINE__);	\
++		return val;	\
++	}
++
++#define ASSERT_EXIT_VAL(cond, val)		\
++	if (!(cond)) {			\
++		fprintf(stderr, "Assertion '%s' failed in %s(), %s:%d\n", \
++			#cond, __func__, __FILE__, __LINE__);	\
++		_exit(val);	\
++	}
++
++#define ASSERT_BREAK(cond)		\
++	if (!(cond)) {			\
++		fprintf(stderr, "Assertion '%s' failed in %s(), %s:%d\n", \
++			#cond, __func__, __FILE__, __LINE__);	\
++		break; \
++	}
++
++#define ASSERT_RETURN(cond)		\
++	ASSERT_RETURN_VAL(cond, TEST_ERR)
++
++#define ASSERT_EXIT(cond)		\
++	ASSERT_EXIT_VAL(cond, EXIT_FAILURE)
++
++int kdbus_test_activator(struct kdbus_test_env *env);
++int kdbus_test_attach_flags(struct kdbus_test_env *env);
++int kdbus_test_benchmark(struct kdbus_test_env *env);
++int kdbus_test_benchmark_nomemfds(struct kdbus_test_env *env);
++int kdbus_test_benchmark_uds(struct kdbus_test_env *env);
++int kdbus_test_bus_make(struct kdbus_test_env *env);
++int kdbus_test_byebye(struct kdbus_test_env *env);
++int kdbus_test_chat(struct kdbus_test_env *env);
++int kdbus_test_conn_info(struct kdbus_test_env *env);
++int kdbus_test_conn_update(struct kdbus_test_env *env);
++int kdbus_test_daemon(struct kdbus_test_env *env);
++int kdbus_test_custom_endpoint(struct kdbus_test_env *env);
++int kdbus_test_fd_passing(struct kdbus_test_env *env);
++int kdbus_test_free(struct kdbus_test_env *env);
++int kdbus_test_hello(struct kdbus_test_env *env);
++int kdbus_test_match_bloom(struct kdbus_test_env *env);
++int kdbus_test_match_id_add(struct kdbus_test_env *env);
++int kdbus_test_match_id_remove(struct kdbus_test_env *env);
++int kdbus_test_match_replace(struct kdbus_test_env *env);
++int kdbus_test_match_name_add(struct kdbus_test_env *env);
++int kdbus_test_match_name_change(struct kdbus_test_env *env);
++int kdbus_test_match_name_remove(struct kdbus_test_env *env);
++int kdbus_test_message_basic(struct kdbus_test_env *env);
++int kdbus_test_message_prio(struct kdbus_test_env *env);
++int kdbus_test_message_quota(struct kdbus_test_env *env);
++int kdbus_test_memory_access(struct kdbus_test_env *env);
++int kdbus_test_metadata_ns(struct kdbus_test_env *env);
++int kdbus_test_monitor(struct kdbus_test_env *env);
++int kdbus_test_name_basic(struct kdbus_test_env *env);
++int kdbus_test_name_conflict(struct kdbus_test_env *env);
++int kdbus_test_name_queue(struct kdbus_test_env *env);
++int kdbus_test_policy(struct kdbus_test_env *env);
++int kdbus_test_policy_ns(struct kdbus_test_env *env);
++int kdbus_test_policy_priv(struct kdbus_test_env *env);
++int kdbus_test_sync_byebye(struct kdbus_test_env *env);
++int kdbus_test_sync_reply(struct kdbus_test_env *env);
++int kdbus_test_timeout(struct kdbus_test_env *env);
++int kdbus_test_writable_pool(struct kdbus_test_env *env);
++
++#endif /* _TEST_KDBUS_H_ */
+diff --git a/tools/testing/selftests/kdbus/kdbus-util.c b/tools/testing/selftests/kdbus/kdbus-util.c
+new file mode 100644
+index 0000000..4b376ec
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/kdbus-util.c
+@@ -0,0 +1,1615 @@
++/*
++ * Copyright (C) 2013-2015 Daniel Mack
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2014-2015 Djalal Harouni
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <stdio.h>
++#include <stdarg.h>
++#include <string.h>
++#include <time.h>
++#include <inttypes.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <stdbool.h>
++#include <errno.h>
++#include <assert.h>
++#include <poll.h>
++#include <grp.h>
++#include <sys/capability.h>
++#include <sys/mman.h>
++#include <sys/stat.h>
++#include <sys/time.h>
++#include <linux/unistd.h>
++#include <linux/memfd.h>
++
++#ifndef __NR_memfd_create
++  #ifdef __x86_64__
++    #define __NR_memfd_create 319
++  #elif defined __arm__
++    #define __NR_memfd_create 385
++  #else
++    #define __NR_memfd_create 356
++  #endif
++#endif
++
++#include "kdbus-api.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++#ifndef F_ADD_SEALS
++#define F_LINUX_SPECIFIC_BASE	1024
++#define F_ADD_SEALS     (F_LINUX_SPECIFIC_BASE + 9)
++#define F_GET_SEALS     (F_LINUX_SPECIFIC_BASE + 10)
++
++#define F_SEAL_SEAL     0x0001  /* prevent further seals from being set */
++#define F_SEAL_SHRINK   0x0002  /* prevent file from shrinking */
++#define F_SEAL_GROW     0x0004  /* prevent file from growing */
++#define F_SEAL_WRITE    0x0008  /* prevent writes */
++#endif
++
++int kdbus_util_verbose = true;
++
++int kdbus_sysfs_get_parameter_mask(const char *path, uint64_t *mask)
++{
++	int ret;
++	FILE *file;
++	unsigned long long value;
++
++	file = fopen(path, "r");
++	if (!file) {
++		ret = -errno;
++		kdbus_printf("--- error fopen(): %d (%m)\n", ret);
++		return ret;
++	}
++
++	ret = fscanf(file, "%llu", &value);
++	if (ret != 1) {
++		if (ferror(file))
++			ret = -errno;
++		else
++			ret = -EIO;
++
++		kdbus_printf("--- error fscanf(): %d\n", ret);
++		fclose(file);
++		return ret;
++	}
++
++	*mask = (uint64_t)value;
++
++	fclose(file);
++
++	return 0;
++}
++
++int kdbus_sysfs_set_parameter_mask(const char *path, uint64_t mask)
++{
++	int ret;
++	FILE *file;
++
++	file = fopen(path, "w");
++	if (!file) {
++		ret = -errno;
++		kdbus_printf("--- error open(): %d (%m)\n", ret);
++		return ret;
++	}
++
++	ret = fprintf(file, "%llu", (unsigned long long)mask);
++	if (ret <= 0) {
++		ret = -EIO;
++		kdbus_printf("--- error fprintf(): %d\n", ret);
++	}
++
++	fclose(file);
++
++	return ret > 0 ? 0 : ret;
++}
++
++int kdbus_create_bus(int control_fd, const char *name,
++		     uint64_t req_meta, uint64_t owner_meta,
++		     char **path)
++{
++	struct {
++		struct kdbus_cmd cmd;
++
++		/* bloom size item */
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_bloom_parameter bloom;
++		} bp;
++
++		/* required and owner metadata items */
++		struct {
++			uint64_t size;
++			uint64_t type;
++			uint64_t flags;
++		} attach[2];
++
++		/* name item */
++		struct {
++			uint64_t size;
++			uint64_t type;
++			char str[64];
++		} name;
++	} bus_make;
++	int ret;
++
++	memset(&bus_make, 0, sizeof(bus_make));
++	bus_make.bp.size = sizeof(bus_make.bp);
++	bus_make.bp.type = KDBUS_ITEM_BLOOM_PARAMETER;
++	bus_make.bp.bloom.size = 64;
++	bus_make.bp.bloom.n_hash = 1;
++
++	snprintf(bus_make.name.str, sizeof(bus_make.name.str),
++		 "%u-%s", getuid(), name);
++
++	bus_make.attach[0].type = KDBUS_ITEM_ATTACH_FLAGS_RECV;
++	bus_make.attach[0].size = sizeof(bus_make.attach[0]);
++	bus_make.attach[0].flags = req_meta;
++
++	bus_make.attach[1].type = KDBUS_ITEM_ATTACH_FLAGS_SEND;
++	bus_make.attach[1].size = sizeof(bus_make.attach[0]);
++	bus_make.attach[1].flags = owner_meta;
++
++	bus_make.name.type = KDBUS_ITEM_MAKE_NAME;
++	bus_make.name.size = KDBUS_ITEM_HEADER_SIZE +
++			     strlen(bus_make.name.str) + 1;
++
++	bus_make.cmd.flags = KDBUS_MAKE_ACCESS_WORLD;
++	bus_make.cmd.size = sizeof(bus_make.cmd) +
++			     bus_make.bp.size +
++			     bus_make.attach[0].size +
++			     bus_make.attach[1].size +
++			     bus_make.name.size;
++
++	kdbus_printf("Creating bus with name >%s< on control fd %d ...\n",
++		     name, control_fd);
++
++	ret = kdbus_cmd_bus_make(control_fd, &bus_make.cmd);
++	if (ret < 0) {
++		kdbus_printf("--- error when making bus: %d (%m)\n", ret);
++		return ret;
++	}
++
++	if (ret == 0 && path)
++		*path = strdup(bus_make.name.str);
++
++	return ret;
++}
++
++struct kdbus_conn *
++kdbus_hello(const char *path, uint64_t flags,
++	    const struct kdbus_item *item, size_t item_size)
++{
++	struct kdbus_cmd_free cmd_free = {};
++	int fd, ret;
++	struct {
++		struct kdbus_cmd_hello hello;
++
++		struct {
++			uint64_t size;
++			uint64_t type;
++			char str[16];
++		} conn_name;
++
++		uint8_t extra_items[item_size];
++	} h;
++	struct kdbus_conn *conn;
++
++	memset(&h, 0, sizeof(h));
++
++	if (item_size > 0)
++		memcpy(h.extra_items, item, item_size);
++
++	kdbus_printf("-- opening bus connection %s\n", path);
++	fd = open(path, O_RDWR|O_CLOEXEC);
++	if (fd < 0) {
++		kdbus_printf("--- error %d (%m)\n", fd);
++		return NULL;
++	}
++
++	h.hello.flags = flags | KDBUS_HELLO_ACCEPT_FD;
++	h.hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++	h.hello.attach_flags_recv = _KDBUS_ATTACH_ALL;
++	h.conn_name.type = KDBUS_ITEM_CONN_DESCRIPTION;
++	strcpy(h.conn_name.str, "this-is-my-name");
++	h.conn_name.size = KDBUS_ITEM_HEADER_SIZE + strlen(h.conn_name.str) + 1;
++
++	h.hello.size = sizeof(h);
++	h.hello.pool_size = POOL_SIZE;
++
++	ret = kdbus_cmd_hello(fd, (struct kdbus_cmd_hello *) &h.hello);
++	if (ret < 0) {
++		kdbus_printf("--- error when saying hello: %d (%m)\n", ret);
++		return NULL;
++	}
++	kdbus_printf("-- Our peer ID for %s: %llu -- bus uuid: '%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x'\n",
++		     path, (unsigned long long)h.hello.id,
++		     h.hello.id128[0],  h.hello.id128[1],  h.hello.id128[2],
++		     h.hello.id128[3],  h.hello.id128[4],  h.hello.id128[5],
++		     h.hello.id128[6],  h.hello.id128[7],  h.hello.id128[8],
++		     h.hello.id128[9],  h.hello.id128[10], h.hello.id128[11],
++		     h.hello.id128[12], h.hello.id128[13], h.hello.id128[14],
++		     h.hello.id128[15]);
++
++	cmd_free.size = sizeof(cmd_free);
++	cmd_free.offset = h.hello.offset;
++	kdbus_cmd_free(fd, &cmd_free);
++
++	conn = malloc(sizeof(*conn));
++	if (!conn) {
++		kdbus_printf("unable to malloc()!?\n");
++		return NULL;
++	}
++
++	conn->buf = mmap(NULL, POOL_SIZE, PROT_READ, MAP_SHARED, fd, 0);
++	if (conn->buf == MAP_FAILED) {
++		free(conn);
++		close(fd);
++		kdbus_printf("--- error mmap (%m)\n");
++		return NULL;
++	}
++
++	conn->fd = fd;
++	conn->id = h.hello.id;
++	return conn;
++}
++
++struct kdbus_conn *
++kdbus_hello_registrar(const char *path, const char *name,
++		      const struct kdbus_policy_access *access,
++		      size_t num_access, uint64_t flags)
++{
++	struct kdbus_item *item, *items;
++	size_t i, size;
++
++	size = KDBUS_ITEM_SIZE(strlen(name) + 1) +
++		num_access * KDBUS_ITEM_SIZE(sizeof(*access));
++
++	items = alloca(size);
++
++	item = items;
++	item->size = KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
++	item->type = KDBUS_ITEM_NAME;
++	strcpy(item->str, name);
++	item = KDBUS_ITEM_NEXT(item);
++
++	for (i = 0; i < num_access; i++) {
++		item->size = KDBUS_ITEM_HEADER_SIZE +
++			     sizeof(struct kdbus_policy_access);
++		item->type = KDBUS_ITEM_POLICY_ACCESS;
++
++		item->policy_access.type = access[i].type;
++		item->policy_access.access = access[i].access;
++		item->policy_access.id = access[i].id;
++
++		item = KDBUS_ITEM_NEXT(item);
++	}
++
++	return kdbus_hello(path, flags, items, size);
++}
++
++struct kdbus_conn *kdbus_hello_activator(const char *path, const char *name,
++				   const struct kdbus_policy_access *access,
++				   size_t num_access)
++{
++	return kdbus_hello_registrar(path, name, access, num_access,
++				     KDBUS_HELLO_ACTIVATOR);
++}
++
++bool kdbus_item_in_message(struct kdbus_msg *msg, uint64_t type)
++{
++	const struct kdbus_item *item;
++
++	KDBUS_ITEM_FOREACH(item, msg, items)
++		if (item->type == type)
++			return true;
++
++	return false;
++}
++
++int kdbus_bus_creator_info(struct kdbus_conn *conn,
++			   uint64_t flags,
++			   uint64_t *offset)
++{
++	struct kdbus_cmd_info *cmd;
++	size_t size = sizeof(*cmd);
++	int ret;
++
++	cmd = alloca(size);
++	memset(cmd, 0, size);
++	cmd->size = size;
++	cmd->attach_flags = flags;
++
++	ret = kdbus_cmd_bus_creator_info(conn->fd, cmd);
++	if (ret < 0) {
++		kdbus_printf("--- error when requesting info: %d (%m)\n", ret);
++		return ret;
++	}
++
++	if (offset)
++		*offset = cmd->offset;
++	else
++		kdbus_free(conn, cmd->offset);
++
++	return 0;
++}
++
++int kdbus_conn_info(struct kdbus_conn *conn, uint64_t id,
++		    const char *name, uint64_t flags,
++		    uint64_t *offset)
++{
++	struct kdbus_cmd_info *cmd;
++	size_t size = sizeof(*cmd);
++	struct kdbus_info *info;
++	int ret;
++
++	if (name)
++		size += KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
++
++	cmd = alloca(size);
++	memset(cmd, 0, size);
++	cmd->size = size;
++	cmd->attach_flags = flags;
++
++	if (name) {
++		cmd->items[0].size = KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
++		cmd->items[0].type = KDBUS_ITEM_NAME;
++		strcpy(cmd->items[0].str, name);
++	} else {
++		cmd->id = id;
++	}
++
++	ret = kdbus_cmd_conn_info(conn->fd, cmd);
++	if (ret < 0) {
++		kdbus_printf("--- error when requesting info: %d (%m)\n", ret);
++		return ret;
++	}
++
++	info = (struct kdbus_info *) (conn->buf + cmd->offset);
++	if (info->size != cmd->info_size) {
++		kdbus_printf("%s(): size mismatch: %d != %d\n", __func__,
++				(int) info->size, (int) cmd->info_size);
++		return -EIO;
++	}
++
++	if (offset)
++		*offset = cmd->offset;
++	else
++		kdbus_free(conn, cmd->offset);
++
++	return 0;
++}
++
++void kdbus_conn_free(struct kdbus_conn *conn)
++{
++	if (!conn)
++		return;
++
++	if (conn->buf)
++		munmap(conn->buf, POOL_SIZE);
++
++	if (conn->fd >= 0)
++		close(conn->fd);
++
++	free(conn);
++}
++
++int sys_memfd_create(const char *name, __u64 size)
++{
++	int ret, fd;
++
++	ret = syscall(__NR_memfd_create, name, MFD_ALLOW_SEALING);
++	if (ret < 0)
++		return ret;
++
++	fd = ret;
++
++	ret = ftruncate(fd, size);
++	if (ret < 0) {
++		close(fd);
++		return ret;
++	}
++
++	return fd;
++}
++
++int sys_memfd_seal_set(int fd)
++{
++	return fcntl(fd, F_ADD_SEALS, F_SEAL_SHRINK |
++			 F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL);
++}
++
++off_t sys_memfd_get_size(int fd, off_t *size)
++{
++	struct stat stat;
++	int ret;
++
++	ret = fstat(fd, &stat);
++	if (ret < 0) {
++		kdbus_printf("stat() failed: %m\n");
++		return ret;
++	}
++
++	*size = stat.st_size;
++	return 0;
++}
++
++static int __kdbus_msg_send(const struct kdbus_conn *conn,
++			    const char *name,
++			    uint64_t cookie,
++			    uint64_t flags,
++			    uint64_t timeout,
++			    int64_t priority,
++			    uint64_t dst_id,
++			    uint64_t cmd_flags,
++			    int cancel_fd)
++{
++	struct kdbus_cmd_send *cmd;
++	struct kdbus_msg *msg;
++	const char ref1[1024 * 128 + 3] = "0123456789_0";
++	const char ref2[] = "0123456789_1";
++	struct kdbus_item *item;
++	struct timespec now;
++	uint64_t size;
++	int memfd = -1;
++	int ret;
++
++	size = sizeof(*msg);
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++
++	if (dst_id == KDBUS_DST_ID_BROADCAST)
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
++	else {
++		memfd = sys_memfd_create("my-name-is-nice", 1024 * 1024);
++		if (memfd < 0) {
++			kdbus_printf("failed to create memfd: %m\n");
++			return memfd;
++		}
++
++		if (write(memfd, "kdbus memfd 1234567", 19) != 19) {
++			ret = -errno;
++			kdbus_printf("writing to memfd failed: %m\n");
++			return ret;
++		}
++
++		ret = sys_memfd_seal_set(memfd);
++		if (ret < 0) {
++			ret = -errno;
++			kdbus_printf("memfd sealing failed: %m\n");
++			return ret;
++		}
++
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
++	}
++
++	if (name)
++		size += KDBUS_ITEM_SIZE(strlen(name) + 1);
++
++	msg = malloc(size);
++	if (!msg) {
++		ret = -errno;
++		kdbus_printf("unable to malloc()!?\n");
++		return ret;
++	}
++
++	if (dst_id == KDBUS_DST_ID_BROADCAST)
++		flags |= KDBUS_MSG_SIGNAL;
++
++	memset(msg, 0, size);
++	msg->flags = flags;
++	msg->priority = priority;
++	msg->size = size;
++	msg->src_id = conn->id;
++	msg->dst_id = name ? 0 : dst_id;
++	msg->cookie = cookie;
++	msg->payload_type = KDBUS_PAYLOAD_DBUS;
++
++	if (timeout) {
++		ret = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
++		if (ret < 0)
++			return ret;
++
++		msg->timeout_ns = now.tv_sec * 1000000000ULL +
++				  now.tv_nsec + timeout;
++	}
++
++	item = msg->items;
++
++	if (name) {
++		item->type = KDBUS_ITEM_DST_NAME;
++		item->size = KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
++		strcpy(item->str, name);
++		item = KDBUS_ITEM_NEXT(item);
++	}
++
++	item->type = KDBUS_ITEM_PAYLOAD_VEC;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
++	item->vec.address = (uintptr_t)&ref1;
++	item->vec.size = sizeof(ref1);
++	item = KDBUS_ITEM_NEXT(item);
++
++	/* data padding for ref1 */
++	item->type = KDBUS_ITEM_PAYLOAD_VEC;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
++	item->vec.address = (uintptr_t)NULL;
++	item->vec.size =  KDBUS_ALIGN8(sizeof(ref1)) - sizeof(ref1);
++	item = KDBUS_ITEM_NEXT(item);
++
++	item->type = KDBUS_ITEM_PAYLOAD_VEC;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
++	item->vec.address = (uintptr_t)&ref2;
++	item->vec.size = sizeof(ref2);
++	item = KDBUS_ITEM_NEXT(item);
++
++	if (dst_id == KDBUS_DST_ID_BROADCAST) {
++		item->type = KDBUS_ITEM_BLOOM_FILTER;
++		item->size = KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
++		item->bloom_filter.generation = 0;
++	} else {
++		item->type = KDBUS_ITEM_PAYLOAD_MEMFD;
++		item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_memfd);
++		item->memfd.size = 16;
++		item->memfd.fd = memfd;
++	}
++	item = KDBUS_ITEM_NEXT(item);
++
++	size = sizeof(*cmd);
++	if (cancel_fd != -1)
++		size += KDBUS_ITEM_SIZE(sizeof(cancel_fd));
++
++	cmd = malloc(size);
++	cmd->size = size;
++	cmd->flags = cmd_flags;
++	cmd->msg_address = (uintptr_t)msg;
++
++	item = cmd->items;
++
++	if (cancel_fd != -1) {
++		item->type = KDBUS_ITEM_CANCEL_FD;
++		item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(cancel_fd);
++		item->fds[0] = cancel_fd;
++		item = KDBUS_ITEM_NEXT(item);
++	}
++
++	ret = kdbus_cmd_send(conn->fd, cmd);
++	if (memfd >= 0)
++		close(memfd);
++
++	if (ret < 0) {
++		kdbus_printf("error sending message: %d (%m)\n", ret);
++		return ret;
++	}
++
++	if (cmd_flags & KDBUS_SEND_SYNC_REPLY) {
++		struct kdbus_msg *reply;
++
++		kdbus_printf("SYNC REPLY @offset %llu:\n", cmd->reply.offset);
++		reply = (struct kdbus_msg *)(conn->buf + cmd->reply.offset);
++		kdbus_msg_dump(conn, reply);
++
++		kdbus_msg_free(reply);
++
++		ret = kdbus_free(conn, cmd->reply.offset);
++		if (ret < 0)
++			return ret;
++	}
++
++	free(msg);
++	free(cmd);
++
++	return 0;
++}
++
++int kdbus_msg_send(const struct kdbus_conn *conn, const char *name,
++		   uint64_t cookie, uint64_t flags, uint64_t timeout,
++		   int64_t priority, uint64_t dst_id)
++{
++	return __kdbus_msg_send(conn, name, cookie, flags, timeout, priority,
++				dst_id, 0, -1);
++}
++
++int kdbus_msg_send_sync(const struct kdbus_conn *conn, const char *name,
++			uint64_t cookie, uint64_t flags, uint64_t timeout,
++			int64_t priority, uint64_t dst_id, int cancel_fd)
++{
++	return __kdbus_msg_send(conn, name, cookie, flags, timeout, priority,
++				dst_id, KDBUS_SEND_SYNC_REPLY, cancel_fd);
++}
++
++int kdbus_msg_send_reply(const struct kdbus_conn *conn,
++			 uint64_t reply_cookie,
++			 uint64_t dst_id)
++{
++	struct kdbus_cmd_send cmd = {};
++	struct kdbus_msg *msg;
++	const char ref1[1024 * 128 + 3] = "0123456789_0";
++	struct kdbus_item *item;
++	uint64_t size;
++	int ret;
++
++	size = sizeof(struct kdbus_msg);
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++
++	msg = malloc(size);
++	if (!msg) {
++		kdbus_printf("unable to malloc()!?\n");
++		return -ENOMEM;
++	}
++
++	memset(msg, 0, size);
++	msg->size = size;
++	msg->src_id = conn->id;
++	msg->dst_id = dst_id;
++	msg->cookie_reply = reply_cookie;
++	msg->payload_type = KDBUS_PAYLOAD_DBUS;
++
++	item = msg->items;
++
++	item->type = KDBUS_ITEM_PAYLOAD_VEC;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
++	item->vec.address = (uintptr_t)&ref1;
++	item->vec.size = sizeof(ref1);
++	item = KDBUS_ITEM_NEXT(item);
++
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg;
++
++	ret = kdbus_cmd_send(conn->fd, &cmd);
++	if (ret < 0)
++		kdbus_printf("error sending message: %d (%m)\n", ret);
++
++	free(msg);
++
++	return ret;
++}
++
++static char *msg_id(uint64_t id, char *buf)
++{
++	if (id == 0)
++		return "KERNEL";
++	if (id == ~0ULL)
++		return "BROADCAST";
++	sprintf(buf, "%llu", (unsigned long long)id);
++	return buf;
++}
++
++int kdbus_msg_dump(const struct kdbus_conn *conn, const struct kdbus_msg *msg)
++{
++	const struct kdbus_item *item = msg->items;
++	char buf_src[32];
++	char buf_dst[32];
++	uint64_t timeout = 0;
++	uint64_t cookie_reply = 0;
++	int ret = 0;
++
++	if (msg->flags & KDBUS_MSG_EXPECT_REPLY)
++		timeout = msg->timeout_ns;
++	else
++		cookie_reply = msg->cookie_reply;
++
++	kdbus_printf("MESSAGE: %s (%llu bytes) flags=0x%08llx, %s → %s, "
++		     "cookie=%llu, timeout=%llu cookie_reply=%llu priority=%lli\n",
++		enum_PAYLOAD(msg->payload_type), (unsigned long long)msg->size,
++		(unsigned long long)msg->flags,
++		msg_id(msg->src_id, buf_src), msg_id(msg->dst_id, buf_dst),
++		(unsigned long long)msg->cookie, (unsigned long long)timeout,
++		(unsigned long long)cookie_reply, (long long)msg->priority);
++
++	KDBUS_ITEM_FOREACH(item, msg, items) {
++		if (item->size < KDBUS_ITEM_HEADER_SIZE) {
++			kdbus_printf("  +%s (%llu bytes) invalid data record\n",
++				     enum_MSG(item->type), item->size);
++			ret = -EINVAL;
++			break;
++		}
++
++		switch (item->type) {
++		case KDBUS_ITEM_PAYLOAD_OFF: {
++			char *s;
++
++			if (item->vec.offset == ~0ULL)
++				s = "[\\0-bytes]";
++			else
++				s = (char *)msg + item->vec.offset;
++
++			kdbus_printf("  +%s (%llu bytes) off=%llu size=%llu '%s'\n",
++			       enum_MSG(item->type), item->size,
++			       (unsigned long long)item->vec.offset,
++			       (unsigned long long)item->vec.size, s);
++			break;
++		}
++
++		case KDBUS_ITEM_FDS: {
++			int i, n = (item->size - KDBUS_ITEM_HEADER_SIZE) /
++					sizeof(int);
++
++			kdbus_printf("  +%s (%llu bytes, %d fds)\n",
++			       enum_MSG(item->type), item->size, n);
++
++			for (i = 0; i < n; i++)
++				kdbus_printf("    fd[%d] = %d\n",
++					     i, item->fds[i]);
++
++			break;
++		}
++
++		case KDBUS_ITEM_PAYLOAD_MEMFD: {
++			char *buf;
++			off_t size;
++
++			buf = mmap(NULL, item->memfd.size, PROT_READ,
++				   MAP_PRIVATE, item->memfd.fd, 0);
++			if (buf == MAP_FAILED) {
++				kdbus_printf("mmap() fd=%i size=%llu failed: %m\n",
++					     item->memfd.fd, item->memfd.size);
++				break;
++			}
++
++			if (sys_memfd_get_size(item->memfd.fd, &size) < 0) {
++				kdbus_printf("KDBUS_CMD_MEMFD_SIZE_GET failed: %m\n");
++				break;
++			}
++
++			kdbus_printf("  +%s (%llu bytes) fd=%i size=%llu filesize=%llu '%s'\n",
++			       enum_MSG(item->type), item->size, item->memfd.fd,
++			       (unsigned long long)item->memfd.size,
++			       (unsigned long long)size, buf);
++			munmap(buf, item->memfd.size);
++			break;
++		}
++
++		case KDBUS_ITEM_CREDS:
++			kdbus_printf("  +%s (%llu bytes) uid=%lld, euid=%lld, suid=%lld, fsuid=%lld, "
++							"gid=%lld, egid=%lld, sgid=%lld, fsgid=%lld\n",
++				enum_MSG(item->type), item->size,
++				item->creds.uid, item->creds.euid,
++				item->creds.suid, item->creds.fsuid,
++				item->creds.gid, item->creds.egid,
++				item->creds.sgid, item->creds.fsgid);
++			break;
++
++		case KDBUS_ITEM_PIDS:
++			kdbus_printf("  +%s (%llu bytes) pid=%lld, tid=%lld, ppid=%lld\n",
++				enum_MSG(item->type), item->size,
++				item->pids.pid, item->pids.tid,
++				item->pids.ppid);
++			break;
++
++		case KDBUS_ITEM_AUXGROUPS: {
++			int i, n;
++
++			kdbus_printf("  +%s (%llu bytes)\n",
++				     enum_MSG(item->type), item->size);
++			n = (item->size - KDBUS_ITEM_HEADER_SIZE) /
++				sizeof(uint64_t);
++
++			for (i = 0; i < n; i++)
++				kdbus_printf("    gid[%d] = %lld\n",
++					     i, item->data64[i]);
++			break;
++		}
++
++		case KDBUS_ITEM_NAME:
++		case KDBUS_ITEM_PID_COMM:
++		case KDBUS_ITEM_TID_COMM:
++		case KDBUS_ITEM_EXE:
++		case KDBUS_ITEM_CGROUP:
++		case KDBUS_ITEM_SECLABEL:
++		case KDBUS_ITEM_DST_NAME:
++		case KDBUS_ITEM_CONN_DESCRIPTION:
++			kdbus_printf("  +%s (%llu bytes) '%s' (%zu)\n",
++				     enum_MSG(item->type), item->size,
++				     item->str, strlen(item->str));
++			break;
++
++		case KDBUS_ITEM_OWNED_NAME: {
++			kdbus_printf("  +%s (%llu bytes) '%s' (%zu) flags=0x%08llx\n",
++				     enum_MSG(item->type), item->size,
++				     item->name.name, strlen(item->name.name),
++				     item->name.flags);
++			break;
++		}
++
++		case KDBUS_ITEM_CMDLINE: {
++			size_t size = item->size - KDBUS_ITEM_HEADER_SIZE;
++			const char *str = item->str;
++			int count = 0;
++
++			kdbus_printf("  +%s (%llu bytes) ",
++				     enum_MSG(item->type), item->size);
++			while (size) {
++				kdbus_printf("'%s' ", str);
++				size -= strlen(str) + 1;
++				str += strlen(str) + 1;
++				count++;
++			}
++
++			kdbus_printf("(%d string%s)\n",
++				     count, (count == 1) ? "" : "s");
++			break;
++		}
++
++		case KDBUS_ITEM_AUDIT:
++			kdbus_printf("  +%s (%llu bytes) loginuid=%u sessionid=%u\n",
++			       enum_MSG(item->type), item->size,
++			       item->audit.loginuid, item->audit.sessionid);
++			break;
++
++		case KDBUS_ITEM_CAPS: {
++			const uint32_t *cap;
++			int n, i;
++
++			kdbus_printf("  +%s (%llu bytes) len=%llu bytes, last_cap %d\n",
++				     enum_MSG(item->type), item->size,
++				     (unsigned long long)item->size -
++					KDBUS_ITEM_HEADER_SIZE,
++				     (int) item->caps.last_cap);
++
++			cap = item->caps.caps;
++			n = (item->size - offsetof(struct kdbus_item, caps.caps))
++				/ 4 / sizeof(uint32_t);
++
++			kdbus_printf("    CapInh=");
++			for (i = 0; i < n; i++)
++				kdbus_printf("%08x", cap[(0 * n) + (n - i - 1)]);
++
++			kdbus_printf(" CapPrm=");
++			for (i = 0; i < n; i++)
++				kdbus_printf("%08x", cap[(1 * n) + (n - i - 1)]);
++
++			kdbus_printf(" CapEff=");
++			for (i = 0; i < n; i++)
++				kdbus_printf("%08x", cap[(2 * n) + (n - i - 1)]);
++
++			kdbus_printf(" CapBnd=");
++			for (i = 0; i < n; i++)
++				kdbus_printf("%08x", cap[(3 * n) + (n - i - 1)]);
++			kdbus_printf("\n");
++			break;
++		}
++
++		case KDBUS_ITEM_TIMESTAMP:
++			kdbus_printf("  +%s (%llu bytes) seq=%llu realtime=%lluns monotonic=%lluns\n",
++			       enum_MSG(item->type), item->size,
++			       (unsigned long long)item->timestamp.seqnum,
++			       (unsigned long long)item->timestamp.realtime_ns,
++			       (unsigned long long)item->timestamp.monotonic_ns);
++			break;
++
++		case KDBUS_ITEM_REPLY_TIMEOUT:
++			kdbus_printf("  +%s (%llu bytes) cookie=%llu\n",
++			       enum_MSG(item->type), item->size,
++			       msg->cookie_reply);
++			break;
++
++		case KDBUS_ITEM_NAME_ADD:
++		case KDBUS_ITEM_NAME_REMOVE:
++		case KDBUS_ITEM_NAME_CHANGE:
++			kdbus_printf("  +%s (%llu bytes) '%s', old id=%lld, now id=%lld, old_flags=0x%llx new_flags=0x%llx\n",
++				enum_MSG(item->type),
++				(unsigned long long) item->size,
++				item->name_change.name,
++				item->name_change.old_id.id,
++				item->name_change.new_id.id,
++				item->name_change.old_id.flags,
++				item->name_change.new_id.flags);
++			break;
++
++		case KDBUS_ITEM_ID_ADD:
++		case KDBUS_ITEM_ID_REMOVE:
++			kdbus_printf("  +%s (%llu bytes) id=%llu flags=%llu\n",
++			       enum_MSG(item->type),
++			       (unsigned long long) item->size,
++			       (unsigned long long) item->id_change.id,
++			       (unsigned long long) item->id_change.flags);
++			break;
++
++		default:
++			kdbus_printf("  +%s (%llu bytes)\n",
++				     enum_MSG(item->type), item->size);
++			break;
++		}
++	}
++
++	if ((char *)item - ((char *)msg + msg->size) >= 8) {
++		kdbus_printf("invalid padding at end of message\n");
++		ret = -EINVAL;
++	}
++
++	kdbus_printf("\n");
++
++	return ret;
++}
++
++void kdbus_msg_free(struct kdbus_msg *msg)
++{
++	const struct kdbus_item *item;
++	int nfds, i;
++
++	if (!msg)
++		return;
++
++	KDBUS_ITEM_FOREACH(item, msg, items) {
++		switch (item->type) {
++		/* close all memfds */
++		case KDBUS_ITEM_PAYLOAD_MEMFD:
++			close(item->memfd.fd);
++			break;
++		case KDBUS_ITEM_FDS:
++			nfds = (item->size - KDBUS_ITEM_HEADER_SIZE) /
++				sizeof(int);
++
++			for (i = 0; i < nfds; i++)
++				close(item->fds[i]);
++
++			break;
++		}
++	}
++}
++
++int kdbus_msg_recv(struct kdbus_conn *conn,
++		   struct kdbus_msg **msg_out,
++		   uint64_t *offset)
++{
++	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
++	struct kdbus_msg *msg;
++	int ret;
++
++	ret = kdbus_cmd_recv(conn->fd, &recv);
++	if (ret < 0)
++		return ret;
++
++	msg = (struct kdbus_msg *)(conn->buf + recv.msg.offset);
++	ret = kdbus_msg_dump(conn, msg);
++	if (ret < 0) {
++		kdbus_msg_free(msg);
++		return ret;
++	}
++
++	if (msg_out) {
++		*msg_out = msg;
++
++		if (offset)
++			*offset = recv.msg.offset;
++	} else {
++		kdbus_msg_free(msg);
++
++		ret = kdbus_free(conn, recv.msg.offset);
++		if (ret < 0)
++			return ret;
++	}
++
++	return 0;
++}
++
++/*
++ * Returns: 0 on success, negative errno on failure.
++ *
++ * We must return -ETIMEDOUT, -ECONNREST, -EAGAIN and other errors.
++ * We must return the result of kdbus_msg_recv()
++ */
++int kdbus_msg_recv_poll(struct kdbus_conn *conn,
++			int timeout_ms,
++			struct kdbus_msg **msg_out,
++			uint64_t *offset)
++{
++	int ret;
++
++	do {
++		struct timeval before, after, diff;
++		struct pollfd fd;
++
++		fd.fd = conn->fd;
++		fd.events = POLLIN | POLLPRI | POLLHUP;
++		fd.revents = 0;
++
++		gettimeofday(&before, NULL);
++		ret = poll(&fd, 1, timeout_ms);
++		gettimeofday(&after, NULL);
++
++		if (ret == 0) {
++			ret = -ETIMEDOUT;
++			break;
++		}
++
++		if (ret > 0) {
++			if (fd.revents & POLLIN)
++				ret = kdbus_msg_recv(conn, msg_out, offset);
++
++			if (fd.revents & (POLLHUP | POLLERR))
++				ret = -ECONNRESET;
++		}
++
++		if (ret == 0 || ret != -EAGAIN)
++			break;
++
++		timersub(&after, &before, &diff);
++		timeout_ms -= diff.tv_sec * 1000UL +
++			      diff.tv_usec / 1000UL;
++	} while (timeout_ms > 0);
++
++	return ret;
++}
++
++int kdbus_free(const struct kdbus_conn *conn, uint64_t offset)
++{
++	struct kdbus_cmd_free cmd_free = {};
++	int ret;
++
++	cmd_free.size = sizeof(cmd_free);
++	cmd_free.offset = offset;
++	cmd_free.flags = 0;
++
++	ret = kdbus_cmd_free(conn->fd, &cmd_free);
++	if (ret < 0) {
++		kdbus_printf("KDBUS_CMD_FREE failed: %d (%m)\n", ret);
++		return ret;
++	}
++
++	return 0;
++}
++
++int kdbus_name_acquire(struct kdbus_conn *conn,
++		       const char *name, uint64_t *flags)
++{
++	struct kdbus_cmd *cmd_name;
++	size_t name_len = strlen(name) + 1;
++	uint64_t size = sizeof(*cmd_name) + KDBUS_ITEM_SIZE(name_len);
++	struct kdbus_item *item;
++	int ret;
++
++	cmd_name = alloca(size);
++
++	memset(cmd_name, 0, size);
++
++	item = cmd_name->items;
++	item->size = KDBUS_ITEM_HEADER_SIZE + name_len;
++	item->type = KDBUS_ITEM_NAME;
++	strcpy(item->str, name);
++
++	cmd_name->size = size;
++	if (flags)
++		cmd_name->flags = *flags;
++
++	ret = kdbus_cmd_name_acquire(conn->fd, cmd_name);
++	if (ret < 0) {
++		kdbus_printf("error aquiring name: %s\n", strerror(-ret));
++		return ret;
++	}
++
++	kdbus_printf("%s(): flags after call: 0x%llx\n", __func__,
++		     cmd_name->return_flags);
++
++	if (flags)
++		*flags = cmd_name->return_flags;
++
++	return 0;
++}
++
++int kdbus_name_release(struct kdbus_conn *conn, const char *name)
++{
++	struct kdbus_cmd *cmd_name;
++	size_t name_len = strlen(name) + 1;
++	uint64_t size = sizeof(*cmd_name) + KDBUS_ITEM_SIZE(name_len);
++	struct kdbus_item *item;
++	int ret;
++
++	cmd_name = alloca(size);
++
++	memset(cmd_name, 0, size);
++
++	item = cmd_name->items;
++	item->size = KDBUS_ITEM_HEADER_SIZE + name_len;
++	item->type = KDBUS_ITEM_NAME;
++	strcpy(item->str, name);
++
++	cmd_name->size = size;
++
++	kdbus_printf("conn %lld giving up name '%s'\n",
++		     (unsigned long long) conn->id, name);
++
++	ret = kdbus_cmd_name_release(conn->fd, cmd_name);
++	if (ret < 0) {
++		kdbus_printf("error releasing name: %s\n", strerror(-ret));
++		return ret;
++	}
++
++	return 0;
++}
++
++int kdbus_list(struct kdbus_conn *conn, uint64_t flags)
++{
++	struct kdbus_cmd_list cmd_list = {};
++	struct kdbus_info *list, *name;
++	int ret;
++
++	cmd_list.size = sizeof(cmd_list);
++	cmd_list.flags = flags;
++
++	ret = kdbus_cmd_list(conn->fd, &cmd_list);
++	if (ret < 0) {
++		kdbus_printf("error listing names: %d (%m)\n", ret);
++		return ret;
++	}
++
++	kdbus_printf("REGISTRY:\n");
++	list = (struct kdbus_info *)(conn->buf + cmd_list.offset);
++
++	KDBUS_FOREACH(name, list, cmd_list.list_size) {
++		uint64_t flags = 0;
++		struct kdbus_item *item;
++		const char *n = "MISSING-NAME";
++
++		if (name->size == sizeof(struct kdbus_cmd))
++			continue;
++
++		KDBUS_ITEM_FOREACH(item, name, items)
++			if (item->type == KDBUS_ITEM_OWNED_NAME) {
++				n = item->name.name;
++				flags = item->name.flags;
++			}
++
++		kdbus_printf("%8llu flags=0x%08llx conn=0x%08llx '%s'\n",
++			     name->id, (unsigned long long) flags,
++			     name->flags, n);
++	}
++	kdbus_printf("\n");
++
++	ret = kdbus_free(conn, cmd_list.offset);
++
++	return ret;
++}
++
++int kdbus_conn_update_attach_flags(struct kdbus_conn *conn,
++				   uint64_t attach_flags_send,
++				   uint64_t attach_flags_recv)
++{
++	int ret;
++	size_t size;
++	struct kdbus_cmd *update;
++	struct kdbus_item *item;
++
++	size = sizeof(struct kdbus_cmd);
++	size += KDBUS_ITEM_SIZE(sizeof(uint64_t)) * 2;
++
++	update = malloc(size);
++	if (!update) {
++		kdbus_printf("error malloc: %m\n");
++		return -ENOMEM;
++	}
++
++	memset(update, 0, size);
++	update->size = size;
++
++	item = update->items;
++
++	item->type = KDBUS_ITEM_ATTACH_FLAGS_SEND;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(uint64_t);
++	item->data64[0] = attach_flags_send;
++	item = KDBUS_ITEM_NEXT(item);
++
++	item->type = KDBUS_ITEM_ATTACH_FLAGS_RECV;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(uint64_t);
++	item->data64[0] = attach_flags_recv;
++	item = KDBUS_ITEM_NEXT(item);
++
++	ret = kdbus_cmd_update(conn->fd, update);
++	if (ret < 0)
++		kdbus_printf("error conn update: %d (%m)\n", ret);
++
++	free(update);
++
++	return ret;
++}
++
++int kdbus_conn_update_policy(struct kdbus_conn *conn, const char *name,
++			     const struct kdbus_policy_access *access,
++			     size_t num_access)
++{
++	struct kdbus_cmd *update;
++	struct kdbus_item *item;
++	size_t i, size;
++	int ret;
++
++	size = sizeof(struct kdbus_cmd);
++	size += KDBUS_ITEM_SIZE(strlen(name) + 1);
++	size += num_access * KDBUS_ITEM_SIZE(sizeof(struct kdbus_policy_access));
++
++	update = malloc(size);
++	if (!update) {
++		kdbus_printf("error malloc: %m\n");
++		return -ENOMEM;
++	}
++
++	memset(update, 0, size);
++	update->size = size;
++
++	item = update->items;
++
++	item->type = KDBUS_ITEM_NAME;
++	item->size = KDBUS_ITEM_HEADER_SIZE + strlen(name) + 1;
++	strcpy(item->str, name);
++	item = KDBUS_ITEM_NEXT(item);
++
++	for (i = 0; i < num_access; i++) {
++		item->size = KDBUS_ITEM_HEADER_SIZE +
++			     sizeof(struct kdbus_policy_access);
++		item->type = KDBUS_ITEM_POLICY_ACCESS;
++
++		item->policy_access.type = access[i].type;
++		item->policy_access.access = access[i].access;
++		item->policy_access.id = access[i].id;
++
++		item = KDBUS_ITEM_NEXT(item);
++	}
++
++	ret = kdbus_cmd_update(conn->fd, update);
++	if (ret < 0)
++		kdbus_printf("error conn update: %d (%m)\n", ret);
++
++	free(update);
++
++	return ret;
++}
++
++int kdbus_add_match_id(struct kdbus_conn *conn, uint64_t cookie,
++		       uint64_t type, uint64_t id)
++{
++	struct {
++		struct kdbus_cmd_match cmd;
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_notify_id_change chg;
++		} item;
++	} buf;
++	int ret;
++
++	memset(&buf, 0, sizeof(buf));
++
++	buf.cmd.size = sizeof(buf);
++	buf.cmd.cookie = cookie;
++	buf.item.size = sizeof(buf.item);
++	buf.item.type = type;
++	buf.item.chg.id = id;
++
++	ret = kdbus_cmd_match_add(conn->fd, &buf.cmd);
++	if (ret < 0)
++		kdbus_printf("--- error adding conn match: %d (%m)\n", ret);
++
++	return ret;
++}
++
++int kdbus_add_match_empty(struct kdbus_conn *conn)
++{
++	struct {
++		struct kdbus_cmd_match cmd;
++		struct kdbus_item item;
++	} buf;
++	int ret;
++
++	memset(&buf, 0, sizeof(buf));
++
++	buf.item.size = sizeof(uint64_t) * 3;
++	buf.item.type = KDBUS_ITEM_ID;
++	buf.item.id = KDBUS_MATCH_ID_ANY;
++
++	buf.cmd.size = sizeof(buf.cmd) + buf.item.size;
++
++	ret = kdbus_cmd_match_add(conn->fd, &buf.cmd);
++	if (ret < 0)
++		kdbus_printf("--- error adding conn match: %d (%m)\n", ret);
++
++	return ret;
++}
++
++static int all_ids_are_mapped(const char *path)
++{
++	int ret;
++	FILE *file;
++	uint32_t inside_id, length;
++
++	file = fopen(path, "r");
++	if (!file) {
++		ret = -errno;
++		kdbus_printf("error fopen() %s: %d (%m)\n",
++			     path, ret);
++		return ret;
++	}
++
++	ret = fscanf(file, "%u\t%*u\t%u", &inside_id, &length);
++	if (ret != 2) {
++		if (ferror(file))
++			ret = -errno;
++		else
++			ret = -EIO;
++
++		kdbus_printf("--- error fscanf(): %d\n", ret);
++		fclose(file);
++		return ret;
++	}
++
++	fclose(file);
++
++	/*
++	 * If length is 4294967295 which means the invalid uid
++	 * (uid_t) -1 then we are able to map all uid/gids
++	 */
++	if (inside_id == 0 && length == (uid_t) -1)
++		return 1;
++
++	return 0;
++}
++
++int all_uids_gids_are_mapped()
++{
++	int ret;
++
++	ret = all_ids_are_mapped("/proc/self/uid_map");
++	if (ret <= 0) {
++		kdbus_printf("--- error not all uids are mapped\n");
++		return 0;
++	}
++
++	ret = all_ids_are_mapped("/proc/self/gid_map");
++	if (ret <= 0) {
++		kdbus_printf("--- error not all gids are mapped\n");
++		return 0;
++	}
++
++	return 1;
++}
++
++int drop_privileges(uid_t uid, gid_t gid)
++{
++	int ret;
++
++	ret = setgroups(0, NULL);
++	if (ret < 0) {
++		ret = -errno;
++		kdbus_printf("error setgroups: %d (%m)\n", ret);
++		return ret;
++	}
++
++	ret = setresgid(gid, gid, gid);
++	if (ret < 0) {
++		ret = -errno;
++		kdbus_printf("error setresgid: %d (%m)\n", ret);
++		return ret;
++	}
++
++	ret = setresuid(uid, uid, uid);
++	if (ret < 0) {
++		ret = -errno;
++		kdbus_printf("error setresuid: %d (%m)\n", ret);
++		return ret;
++	}
++
++	return ret;
++}
++
++uint64_t now(clockid_t clock)
++{
++	struct timespec spec;
++
++	clock_gettime(clock, &spec);
++	return spec.tv_sec * 1000ULL * 1000ULL * 1000ULL + spec.tv_nsec;
++}
++
++char *unique_name(const char *prefix)
++{
++	unsigned int i;
++	uint64_t u_now;
++	char n[17];
++	char *str;
++	int r;
++
++	/*
++	 * This returns a random string which is guaranteed to be
++	 * globally unique across all calls to unique_name(). We
++	 * compose the string as:
++	 *   <prefix>-<random>-<time>
++	 * With:
++	 *   <prefix>: string provided by the caller
++	 *   <random>: a random alpha string of 16 characters
++	 *   <time>: the current time in micro-seconds since last boot
++	 *
++	 * The <random> part makes the string always look vastly different,
++	 * the <time> part makes sure no two calls return the same string.
++	 */
++
++	u_now = now(CLOCK_MONOTONIC);
++
++	for (i = 0; i < sizeof(n) - 1; ++i)
++		n[i] = 'a' + (rand() % ('z' - 'a'));
++	n[sizeof(n) - 1] = 0;
++
++	r = asprintf(&str, "%s-%s-%" PRIu64, prefix, n, u_now);
++	if (r < 0)
++		return NULL;
++
++	return str;
++}
++
++static int do_userns_map_id(pid_t pid,
++			    const char *map_file,
++			    const char *map_id)
++{
++	int ret;
++	int fd;
++	char *map;
++	unsigned int i;
++
++	map = strndupa(map_id, strlen(map_id));
++	if (!map) {
++		ret = -errno;
++		kdbus_printf("error strndupa %s: %d (%m)\n",
++			map_file, ret);
++		return ret;
++	}
++
++	for (i = 0; i < strlen(map); i++)
++		if (map[i] == ',')
++			map[i] = '\n';
++
++	fd = open(map_file, O_RDWR);
++	if (fd < 0) {
++		ret = -errno;
++		kdbus_printf("error open %s: %d (%m)\n",
++			map_file, ret);
++		return ret;
++	}
++
++	ret = write(fd, map, strlen(map));
++	if (ret < 0) {
++		ret = -errno;
++		kdbus_printf("error write to %s: %d (%m)\n",
++			     map_file, ret);
++		goto out;
++	}
++
++	ret = 0;
++
++out:
++	close(fd);
++	return ret;
++}
++
++int userns_map_uid_gid(pid_t pid,
++		       const char *map_uid,
++		       const char *map_gid)
++{
++	int fd, ret;
++	char file_id[128] = {'\0'};
++
++	snprintf(file_id, sizeof(file_id), "/proc/%ld/uid_map",
++		 (long) pid);
++
++	ret = do_userns_map_id(pid, file_id, map_uid);
++	if (ret < 0)
++		return ret;
++
++	snprintf(file_id, sizeof(file_id), "/proc/%ld/setgroups",
++		 (long) pid);
++
++	fd = open(file_id, O_WRONLY);
++	if (fd >= 0) {
++		write(fd, "deny\n", 5);
++		close(fd);
++	}
++
++	snprintf(file_id, sizeof(file_id), "/proc/%ld/gid_map",
++		 (long) pid);
++
++	return do_userns_map_id(pid, file_id, map_gid);
++}
++
++static int do_cap_get_flag(cap_t caps, cap_value_t cap)
++{
++	int ret;
++	cap_flag_value_t flag_set;
++
++	ret = cap_get_flag(caps, cap, CAP_EFFECTIVE, &flag_set);
++	if (ret < 0) {
++		ret = -errno;
++		kdbus_printf("error cap_get_flag(): %d (%m)\n", ret);
++		return ret;
++	}
++
++	return (flag_set == CAP_SET);
++}
++
++/*
++ * Returns:
++ *  1 in case all the requested effective capabilities are set.
++ *  0 in case we do not have the requested capabilities. This value
++ *    will be used to abort tests with TEST_SKIP
++ *  Negative errno on failure.
++ *
++ *  Terminate args with a negative value.
++ */
++int test_is_capable(int cap, ...)
++{
++	int ret;
++	va_list ap;
++	cap_t caps;
++
++	caps = cap_get_proc();
++	if (!cap) {
++		ret = -errno;
++		kdbus_printf("error cap_get_proc(): %d (%m)\n", ret);
++		return ret;
++	}
++
++	ret = do_cap_get_flag(caps, (cap_value_t)cap);
++	if (ret <= 0)
++		goto out;
++
++	va_start(ap, cap);
++	while ((cap = va_arg(ap, int)) > 0) {
++		ret = do_cap_get_flag(caps, (cap_value_t)cap);
++		if (ret <= 0)
++			break;
++	}
++	va_end(ap);
++
++out:
++	cap_free(caps);
++	return ret;
++}
++
++int config_user_ns_is_enabled(void)
++{
++	return (access("/proc/self/uid_map", F_OK) == 0);
++}
++
++int config_auditsyscall_is_enabled(void)
++{
++	return (access("/proc/self/loginuid", F_OK) == 0);
++}
++
++int config_cgroups_is_enabled(void)
++{
++	return (access("/proc/self/cgroup", F_OK) == 0);
++}
++
++int config_security_is_enabled(void)
++{
++	int fd;
++	int ret;
++	char buf[128];
++
++	/* CONFIG_SECURITY is disabled */
++	if (access("/proc/self/attr/current", F_OK) != 0)
++		return 0;
++
++	/*
++	 * Now only if read() fails with -EINVAL then we assume
++	 * that SECLABEL and LSM are disabled
++	 */
++	fd = open("/proc/self/attr/current", O_RDONLY|O_CLOEXEC);
++	if (fd < 0)
++		return 1;
++
++	ret = read(fd, buf, sizeof(buf));
++	if (ret == -1 && errno == EINVAL)
++		ret = 0;
++	else
++		ret = 1;
++
++	close(fd);
++
++	return ret;
++}
+diff --git a/tools/testing/selftests/kdbus/kdbus-util.h b/tools/testing/selftests/kdbus/kdbus-util.h
+new file mode 100644
+index 0000000..50ff071
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/kdbus-util.h
+@@ -0,0 +1,222 @@
++/*
++ * Copyright (C) 2013-2015 Kay Sievers
++ * Copyright (C) 2013-2015 Daniel Mack
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++#pragma once
++
++#define BIT(X) (1 << (X))
++
++#include <time.h>
++#include <stdbool.h>
++#include <linux/kdbus.h>
++
++#define _STRINGIFY(x) #x
++#define STRINGIFY(x) _STRINGIFY(x)
++#define ELEMENTSOF(x) (sizeof(x)/sizeof((x)[0]))
++
++#define KDBUS_PTR(addr) ((void *)(uintptr_t)(addr))
++
++#define KDBUS_ALIGN8(l) (((l) + 7) & ~7)
++#define KDBUS_ITEM_HEADER_SIZE offsetof(struct kdbus_item, data)
++#define KDBUS_ITEM_SIZE(s) KDBUS_ALIGN8((s) + KDBUS_ITEM_HEADER_SIZE)
++
++#define KDBUS_ITEM_NEXT(item) \
++	(typeof(item))(((uint8_t *)item) + KDBUS_ALIGN8((item)->size))
++#define KDBUS_ITEM_FOREACH(item, head, first)				\
++	for (item = (head)->first;					\
++	     ((uint8_t *)(item) < (uint8_t *)(head) + (head)->size) &&	\
++	       ((uint8_t *)(item) >= (uint8_t *)(head));	\
++	     item = KDBUS_ITEM_NEXT(item))
++#define KDBUS_FOREACH(iter, first, _size)				\
++	for (iter = (first);						\
++	     ((uint8_t *)(iter) < (uint8_t *)(first) + (_size)) &&	\
++	       ((uint8_t *)(iter) >= (uint8_t *)(first));		\
++	     iter = (void*)(((uint8_t *)iter) + KDBUS_ALIGN8((iter)->size)))
++
++
++#define _KDBUS_ATTACH_BITS_SET_NR  (__builtin_popcountll(_KDBUS_ATTACH_ALL))
++
++/* Sum of KDBUS_ITEM_* that reflects _KDBUS_ATTACH_ALL */
++#define KDBUS_ATTACH_ITEMS_TYPE_SUM \
++	((((_KDBUS_ATTACH_BITS_SET_NR - 1) * \
++	((_KDBUS_ATTACH_BITS_SET_NR - 1) + 1)) / 2 ) + \
++	(_KDBUS_ITEM_ATTACH_BASE * _KDBUS_ATTACH_BITS_SET_NR))
++
++
++#define POOL_SIZE (16 * 1024LU * 1024LU)
++
++#define UNPRIV_UID 65534
++#define UNPRIV_GID 65534
++
++/* Dump as user of process, useful for user namespace testing */
++#define SUID_DUMP_USER	1
++
++extern int kdbus_util_verbose;
++
++#define kdbus_printf(X...) \
++	if (kdbus_util_verbose) \
++		printf(X)
++
++#define RUN_UNPRIVILEGED(child_uid, child_gid, _child_, _parent_) ({	\
++		pid_t pid, rpid;					\
++		int ret;						\
++									\
++		pid = fork();						\
++		if (pid == 0) {						\
++			ret = drop_privileges(child_uid, child_gid);	\
++			ASSERT_EXIT_VAL(ret == 0, ret);			\
++									\
++			_child_;					\
++			_exit(0);					\
++		} else if (pid > 0) {					\
++			_parent_;					\
++			rpid = waitpid(pid, &ret, 0);			\
++			ASSERT_RETURN(rpid == pid);			\
++			ASSERT_RETURN(WIFEXITED(ret));			\
++			ASSERT_RETURN(WEXITSTATUS(ret) == 0);		\
++			ret = TEST_OK;					\
++		} else {						\
++			ret = pid;					\
++		}							\
++									\
++		ret;							\
++	})
++
++#define RUN_UNPRIVILEGED_CONN(_var_, _bus_, _code_)			\
++	RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_GID, ({			\
++		struct kdbus_conn *_var_;				\
++		_var_ = kdbus_hello(_bus_, 0, NULL, 0);			\
++		ASSERT_EXIT(_var_);					\
++		_code_;							\
++		kdbus_conn_free(_var_);					\
++	}), ({ 0; }))
++
++#define RUN_CLONE_CHILD(clone_ret, flags, _setup_, _child_body_,	\
++			_parent_setup_, _parent_body_) ({		\
++	pid_t pid, rpid;						\
++	int ret;							\
++	int efd = -1;							\
++									\
++	_setup_;							\
++	efd = eventfd(0, EFD_CLOEXEC);					\
++	ASSERT_RETURN(efd >= 0);					\
++	*clone_ret = 0;							\
++	pid = syscall(__NR_clone, flags, NULL);				\
++	if (pid == 0) {							\
++		eventfd_t event_status = 0;				\
++		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);			\
++		ASSERT_EXIT(ret == 0);					\
++		ret = eventfd_read(efd, &event_status);			\
++		if (ret < 0 || event_status != 1) {			\
++			kdbus_printf("error eventfd_read()\n");		\
++			_exit(EXIT_FAILURE);				\
++		}							\
++		_child_body_;						\
++		_exit(0);						\
++	} else if (pid > 0) {						\
++		_parent_setup_;						\
++		ret = eventfd_write(efd, 1);				\
++		ASSERT_RETURN(ret >= 0);				\
++		_parent_body_;						\
++		rpid = waitpid(pid, &ret, 0);				\
++		ASSERT_RETURN(rpid == pid);				\
++		ASSERT_RETURN(WIFEXITED(ret));				\
++		ASSERT_RETURN(WEXITSTATUS(ret) == 0);			\
++		ret = TEST_OK;						\
++	} else {							\
++		ret = -errno;						\
++		*clone_ret = -errno;					\
++	}								\
++	close(efd);							\
++	ret;								\
++})
++
++/* Enums for parent if it should drop privs or not */
++enum kdbus_drop_parent {
++	DO_NOT_DROP,
++	DROP_SAME_UNPRIV,
++	DROP_OTHER_UNPRIV,
++};
++
++struct kdbus_conn {
++	int fd;
++	uint64_t id;
++	unsigned char *buf;
++};
++
++int kdbus_sysfs_get_parameter_mask(const char *path, uint64_t *mask);
++int kdbus_sysfs_set_parameter_mask(const char *path, uint64_t mask);
++
++int sys_memfd_create(const char *name, __u64 size);
++int sys_memfd_seal_set(int fd);
++off_t sys_memfd_get_size(int fd, off_t *size);
++
++int kdbus_list(struct kdbus_conn *conn, uint64_t flags);
++int kdbus_name_release(struct kdbus_conn *conn, const char *name);
++int kdbus_name_acquire(struct kdbus_conn *conn, const char *name,
++		       uint64_t *flags);
++void kdbus_msg_free(struct kdbus_msg *msg);
++int kdbus_msg_recv(struct kdbus_conn *conn,
++		   struct kdbus_msg **msg, uint64_t *offset);
++int kdbus_msg_recv_poll(struct kdbus_conn *conn, int timeout_ms,
++			struct kdbus_msg **msg_out, uint64_t *offset);
++int kdbus_free(const struct kdbus_conn *conn, uint64_t offset);
++int kdbus_msg_dump(const struct kdbus_conn *conn,
++		   const struct kdbus_msg *msg);
++int kdbus_create_bus(int control_fd, const char *name,
++		     uint64_t req_meta, uint64_t owner_meta,
++		     char **path);
++int kdbus_msg_send(const struct kdbus_conn *conn, const char *name,
++		   uint64_t cookie, uint64_t flags, uint64_t timeout,
++		   int64_t priority, uint64_t dst_id);
++int kdbus_msg_send_sync(const struct kdbus_conn *conn, const char *name,
++			uint64_t cookie, uint64_t flags, uint64_t timeout,
++			int64_t priority, uint64_t dst_id, int cancel_fd);
++int kdbus_msg_send_reply(const struct kdbus_conn *conn,
++			 uint64_t reply_cookie,
++			 uint64_t dst_id);
++struct kdbus_conn *kdbus_hello(const char *path, uint64_t hello_flags,
++			       const struct kdbus_item *item,
++			       size_t item_size);
++struct kdbus_conn *kdbus_hello_registrar(const char *path, const char *name,
++					 const struct kdbus_policy_access *access,
++					 size_t num_access, uint64_t flags);
++struct kdbus_conn *kdbus_hello_activator(const char *path, const char *name,
++					 const struct kdbus_policy_access *access,
++					 size_t num_access);
++bool kdbus_item_in_message(struct kdbus_msg *msg, uint64_t type);
++int kdbus_bus_creator_info(struct kdbus_conn *conn,
++			   uint64_t flags,
++			   uint64_t *offset);
++int kdbus_conn_info(struct kdbus_conn *conn, uint64_t id,
++		    const char *name, uint64_t flags, uint64_t *offset);
++void kdbus_conn_free(struct kdbus_conn *conn);
++int kdbus_conn_update_attach_flags(struct kdbus_conn *conn,
++				   uint64_t attach_flags_send,
++				   uint64_t attach_flags_recv);
++int kdbus_conn_update_policy(struct kdbus_conn *conn, const char *name,
++			     const struct kdbus_policy_access *access,
++			     size_t num_access);
++
++int kdbus_add_match_id(struct kdbus_conn *conn, uint64_t cookie,
++		       uint64_t type, uint64_t id);
++int kdbus_add_match_empty(struct kdbus_conn *conn);
++
++int all_uids_gids_are_mapped();
++int drop_privileges(uid_t uid, gid_t gid);
++uint64_t now(clockid_t clock);
++char *unique_name(const char *prefix);
++
++int userns_map_uid_gid(pid_t pid,
++		       const char *map_uid,
++		       const char *map_gid);
++int test_is_capable(int cap, ...);
++int config_user_ns_is_enabled(void);
++int config_auditsyscall_is_enabled(void);
++int config_cgroups_is_enabled(void);
++int config_security_is_enabled(void);
+diff --git a/tools/testing/selftests/kdbus/test-activator.c b/tools/testing/selftests/kdbus/test-activator.c
+new file mode 100644
+index 0000000..3d1b763
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-activator.c
+@@ -0,0 +1,318 @@
++#include <stdio.h>
++#include <string.h>
++#include <time.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stdbool.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <poll.h>
++#include <sys/capability.h>
++#include <sys/types.h>
++#include <sys/wait.h>
++
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++static int kdbus_starter_poll(struct kdbus_conn *conn)
++{
++	int ret;
++	struct pollfd fd;
++
++	fd.fd = conn->fd;
++	fd.events = POLLIN | POLLPRI | POLLHUP;
++	fd.revents = 0;
++
++	ret = poll(&fd, 1, 100);
++	if (ret == 0)
++		return -ETIMEDOUT;
++	else if (ret > 0) {
++		if (fd.revents & POLLIN)
++			return 0;
++
++		if (fd.revents & (POLLHUP | POLLERR))
++			ret = -ECONNRESET;
++	}
++
++	return ret;
++}
++
++/* Ensure that kdbus activator logic is safe */
++static int kdbus_priv_activator(struct kdbus_test_env *env)
++{
++	int ret;
++	struct kdbus_msg *msg = NULL;
++	uint64_t cookie = 0xdeadbeef;
++	uint64_t flags = KDBUS_NAME_REPLACE_EXISTING;
++	struct kdbus_conn *activator;
++	struct kdbus_conn *service;
++	struct kdbus_conn *client;
++	struct kdbus_conn *holder;
++	struct kdbus_policy_access *access;
++
++	access = (struct kdbus_policy_access[]){
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = getuid(),
++			.access = KDBUS_POLICY_OWN,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = getuid(),
++			.access = KDBUS_POLICY_TALK,
++		},
++	};
++
++	activator = kdbus_hello_activator(env->buspath, "foo.priv.activator",
++					  access, 2);
++	ASSERT_RETURN(activator);
++
++	service = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(service);
++
++	client = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(client);
++
++	/*
++	 * Make sure that other users can't TALK to the activator
++	 */
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		/* Try to talk using the ID */
++		ret = kdbus_msg_send(unpriv, NULL, 0xdeadbeef, 0, 0,
++				     0, activator->id);
++		ASSERT_EXIT(ret == -ENXIO);
++
++		/* Try to talk to the name */
++		ret = kdbus_msg_send(unpriv, "foo.priv.activator",
++				     0xdeadbeef, 0, 0, 0,
++				     KDBUS_DST_ID_NAME);
++		ASSERT_EXIT(ret == -EPERM);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Make sure that we did not receive anything, so the
++	 * service will not be started automatically
++	 */
++
++	ret = kdbus_starter_poll(activator);
++	ASSERT_RETURN(ret == -ETIMEDOUT);
++
++	/*
++	 * Now try to emulate the starter/service logic and
++	 * acquire the name.
++	 */
++
++	cookie++;
++	ret = kdbus_msg_send(service, "foo.priv.activator", cookie,
++			     0, 0, 0, KDBUS_DST_ID_NAME);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_starter_poll(activator);
++	ASSERT_RETURN(ret == 0);
++
++	/* Policies are still checked, access denied */
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "foo.priv.activator",
++					 &flags);
++		ASSERT_RETURN(ret == -EPERM);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	ret = kdbus_name_acquire(service, "foo.priv.activator",
++				 &flags);
++	ASSERT_RETURN(ret == 0);
++
++	/* We read our previous starter message */
++
++	ret = kdbus_msg_recv_poll(service, 100, NULL, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* Try to talk, we still fail */
++
++	cookie++;
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		/* Try to talk to the name */
++		ret = kdbus_msg_send(unpriv, "foo.priv.activator",
++				     cookie, 0, 0, 0,
++				     KDBUS_DST_ID_NAME);
++		ASSERT_EXIT(ret == -EPERM);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/* Still nothing to read */
++
++	ret = kdbus_msg_recv_poll(service, 100, NULL, NULL);
++	ASSERT_RETURN(ret == -ETIMEDOUT);
++
++	/* We receive every thing now */
++
++	cookie++;
++	ret = kdbus_msg_send(client, "foo.priv.activator", cookie,
++			     0, 0, 0, KDBUS_DST_ID_NAME);
++	ASSERT_RETURN(ret == 0);
++	ret = kdbus_msg_recv_poll(service, 100, &msg, NULL);
++	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
++
++	kdbus_msg_free(msg);
++
++	/* Policies default to deny TALK now */
++	kdbus_conn_free(activator);
++
++	cookie++;
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		/* Try to talk to the name */
++		ret = kdbus_msg_send(unpriv, "foo.priv.activator",
++				     cookie, 0, 0, 0,
++				     KDBUS_DST_ID_NAME);
++		ASSERT_EXIT(ret == -EPERM);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	ret = kdbus_msg_recv_poll(service, 100, NULL, NULL);
++	ASSERT_RETURN(ret == -ETIMEDOUT);
++
++	/* Same user is able to TALK */
++	cookie++;
++	ret = kdbus_msg_send(client, "foo.priv.activator", cookie,
++			     0, 0, 0, KDBUS_DST_ID_NAME);
++	ASSERT_RETURN(ret == 0);
++	ret = kdbus_msg_recv_poll(service, 100, &msg, NULL);
++	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
++
++	kdbus_msg_free(msg);
++
++	access = (struct kdbus_policy_access []){
++		{
++			.type = KDBUS_POLICY_ACCESS_WORLD,
++			.id = getuid(),
++			.access = KDBUS_POLICY_TALK,
++		},
++	};
++
++	holder = kdbus_hello_registrar(env->buspath, "foo.priv.activator",
++				       access, 1, KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(holder);
++
++	/* Now we are able to TALK to the name */
++
++	cookie++;
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		/* Try to talk to the name */
++		ret = kdbus_msg_send(unpriv, "foo.priv.activator",
++				     cookie, 0, 0, 0,
++				     KDBUS_DST_ID_NAME);
++		ASSERT_EXIT(ret == 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	ret = kdbus_msg_recv_poll(service, 100, NULL, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "foo.priv.activator",
++					 &flags);
++		ASSERT_RETURN(ret == -EPERM);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	kdbus_conn_free(service);
++	kdbus_conn_free(client);
++	kdbus_conn_free(holder);
++
++	return 0;
++}
++
++int kdbus_test_activator(struct kdbus_test_env *env)
++{
++	int ret;
++	struct kdbus_conn *activator;
++	struct pollfd fds[2];
++	bool activator_done = false;
++	struct kdbus_policy_access access[2];
++
++	access[0].type = KDBUS_POLICY_ACCESS_USER;
++	access[0].id = getuid();
++	access[0].access = KDBUS_POLICY_OWN;
++
++	access[1].type = KDBUS_POLICY_ACCESS_WORLD;
++	access[1].access = KDBUS_POLICY_TALK;
++
++	activator = kdbus_hello_activator(env->buspath, "foo.test.activator",
++					  access, 2);
++	ASSERT_RETURN(activator);
++
++	ret = kdbus_add_match_empty(env->conn);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_list(env->conn, KDBUS_LIST_NAMES |
++				    KDBUS_LIST_UNIQUE |
++				    KDBUS_LIST_ACTIVATORS |
++				    KDBUS_LIST_QUEUED);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_send(env->conn, "foo.test.activator", 0xdeafbeef,
++			     0, 0, 0, KDBUS_DST_ID_NAME);
++	ASSERT_RETURN(ret == 0);
++
++	fds[0].fd = activator->fd;
++	fds[1].fd = env->conn->fd;
++
++	kdbus_printf("-- entering poll loop ...\n");
++
++	for (;;) {
++		int i, nfds = sizeof(fds) / sizeof(fds[0]);
++
++		for (i = 0; i < nfds; i++) {
++			fds[i].events = POLLIN | POLLPRI;
++			fds[i].revents = 0;
++		}
++
++		ret = poll(fds, nfds, 3000);
++		ASSERT_RETURN(ret >= 0);
++
++		ret = kdbus_list(env->conn, KDBUS_LIST_NAMES);
++		ASSERT_RETURN(ret == 0);
++
++		if ((fds[0].revents & POLLIN) && !activator_done) {
++			uint64_t flags = KDBUS_NAME_REPLACE_EXISTING;
++
++			kdbus_printf("Starter was called back!\n");
++
++			ret = kdbus_name_acquire(env->conn,
++						 "foo.test.activator", &flags);
++			ASSERT_RETURN(ret == 0);
++
++			activator_done = true;
++		}
++
++		if (fds[1].revents & POLLIN) {
++			kdbus_msg_recv(env->conn, NULL, NULL);
++			break;
++		}
++	}
++
++	/* Check if all uids/gids are mapped */
++	if (!all_uids_gids_are_mapped())
++		return TEST_SKIP;
++
++	/* Check now capabilities, so we run the previous tests */
++	ret = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
++	ASSERT_RETURN(ret >= 0);
++
++	if (!ret)
++		return TEST_SKIP;
++
++	ret = kdbus_priv_activator(env);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(activator);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-attach-flags.c b/tools/testing/selftests/kdbus/test-attach-flags.c
+new file mode 100644
+index 0000000..deee7c3
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-attach-flags.c
+@@ -0,0 +1,750 @@
++#include <stdio.h>
++#include <string.h>
++#include <stdlib.h>
++#include <stdbool.h>
++#include <stddef.h>
++#include <fcntl.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <sys/capability.h>
++#include <sys/mman.h>
++#include <sys/stat.h>
++#include <sys/types.h>
++#include <linux/unistd.h>
++
++#include "kdbus-api.h"
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++/*
++ * Should be the sum of the currently supported and compiled-in
++ * KDBUS_ITEMS_* that reflect KDBUS_ATTACH_* flags.
++ */
++static unsigned int KDBUS_TEST_ITEMS_SUM = KDBUS_ATTACH_ITEMS_TYPE_SUM;
++
++static struct kdbus_conn *__kdbus_hello(const char *path, uint64_t flags,
++					uint64_t attach_flags_send,
++					uint64_t attach_flags_recv)
++{
++	struct kdbus_cmd_free cmd_free = {};
++	int ret, fd;
++	struct kdbus_conn *conn;
++	struct {
++		struct kdbus_cmd_hello hello;
++
++		struct {
++			uint64_t size;
++			uint64_t type;
++			char str[16];
++		} conn_name;
++
++		uint8_t extra_items[0];
++	} h;
++
++	memset(&h, 0, sizeof(h));
++
++	kdbus_printf("-- opening bus connection %s\n", path);
++	fd = open(path, O_RDWR|O_CLOEXEC);
++	if (fd < 0) {
++		kdbus_printf("--- error %d (%m)\n", fd);
++		return NULL;
++	}
++
++	h.hello.flags = flags | KDBUS_HELLO_ACCEPT_FD;
++	h.hello.attach_flags_send = attach_flags_send;
++	h.hello.attach_flags_recv = attach_flags_recv;
++	h.conn_name.type = KDBUS_ITEM_CONN_DESCRIPTION;
++	strcpy(h.conn_name.str, "this-is-my-name");
++	h.conn_name.size = KDBUS_ITEM_HEADER_SIZE + strlen(h.conn_name.str) + 1;
++
++	h.hello.size = sizeof(h);
++	h.hello.pool_size = POOL_SIZE;
++
++	ret = kdbus_cmd_hello(fd, (struct kdbus_cmd_hello *) &h.hello);
++	if (ret < 0) {
++		kdbus_printf("--- error when saying hello: %d (%m)\n", ret);
++		return NULL;
++	}
++
++	kdbus_printf("-- New connection ID : %llu\n",
++		     (unsigned long long)h.hello.id);
++
++	cmd_free.size = sizeof(cmd_free);
++	cmd_free.offset = h.hello.offset;
++	ret = kdbus_cmd_free(fd, &cmd_free);
++	if (ret < 0)
++		return NULL;
++
++	conn = malloc(sizeof(*conn));
++	if (!conn) {
++		kdbus_printf("unable to malloc()!?\n");
++		return NULL;
++	}
++
++	conn->buf = mmap(NULL, POOL_SIZE, PROT_READ, MAP_SHARED, fd, 0);
++	if (conn->buf == MAP_FAILED) {
++		ret = -errno;
++		free(conn);
++		close(fd);
++		kdbus_printf("--- error mmap: %d (%m)\n", ret);
++		return NULL;
++	}
++
++	conn->fd = fd;
++	conn->id = h.hello.id;
++	return conn;
++}
++
++static int kdbus_test_peers_creation(struct kdbus_test_env *env)
++{
++	int ret;
++	int control_fd;
++	char *path;
++	char *busname;
++	char buspath[2048];
++	char control_path[2048];
++	uint64_t attach_flags_mask;
++	struct kdbus_conn *conn;
++
++	snprintf(control_path, sizeof(control_path),
++		 "%s/control", env->root);
++
++	/*
++	 * Set kdbus system-wide mask to 0, this has nothing
++	 * to do with the following tests, bus and connection
++	 * creation nor connection update, but we do it so we are
++	 * sure that everything work as expected
++	 */
++
++	attach_flags_mask = 0;
++	ret = kdbus_sysfs_set_parameter_mask(env->mask_param_path,
++					     attach_flags_mask);
++	ASSERT_RETURN(ret == 0);
++
++
++	/*
++	 * Create bus with a full set of ATTACH flags
++	 */
++
++	control_fd = open(control_path, O_RDWR);
++	ASSERT_RETURN(control_fd >= 0);
++
++	busname = unique_name("test-peers-creation-bus");
++	ASSERT_RETURN(busname);
++
++	ret = kdbus_create_bus(control_fd, busname, _KDBUS_ATTACH_ALL,
++			       0, &path);
++	ASSERT_RETURN(ret == 0);
++
++	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
++
++	/*
++	 * Create a connection with an empty send attach flags, or
++	 * with just KDBUS_ATTACH_CREDS, this should fail
++	 */
++	conn = __kdbus_hello(buspath, 0, 0, 0);
++	ASSERT_RETURN(conn == NULL);
++	ASSERT_RETURN(errno == ECONNREFUSED);
++
++	conn = __kdbus_hello(buspath, 0, KDBUS_ATTACH_CREDS,
++			     _KDBUS_ATTACH_ALL);
++	ASSERT_RETURN(conn == NULL);
++	ASSERT_RETURN(errno == ECONNREFUSED);
++
++	conn = __kdbus_hello(buspath, 0, _KDBUS_ATTACH_ALL, 0);
++	ASSERT_RETURN(conn);
++
++	/* Try to cut back some send attach flags */
++	ret = kdbus_conn_update_attach_flags(conn,
++					     KDBUS_ATTACH_CREDS|
++					     KDBUS_ATTACH_PIDS,
++					     _KDBUS_ATTACH_ALL);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	ret = kdbus_conn_update_attach_flags(conn,
++					     _KDBUS_ATTACH_ALL, 0);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(conn);
++	free(path);
++	free(busname);
++	close(control_fd);
++
++
++	/* Test a new bus with KDBUS_ATTACH_PIDS */
++
++	control_fd = open(control_path, O_RDWR);
++	ASSERT_RETURN(control_fd >= 0);
++
++	busname = unique_name("test-peer-flags-bus");
++	ASSERT_RETURN(busname);
++
++	ret = kdbus_create_bus(control_fd, busname, KDBUS_ATTACH_PIDS,
++			       0, &path);
++	ASSERT_RETURN(ret == 0);
++
++	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
++
++	/*
++	 * Create a connection with an empty send attach flags, or
++	 * all flags except KDBUS_ATTACH_PIDS
++	 */
++	conn = __kdbus_hello(buspath, 0, 0, 0);
++	ASSERT_RETURN(conn == NULL);
++	ASSERT_RETURN(errno == ECONNREFUSED);
++
++	conn = __kdbus_hello(buspath, 0,
++			     _KDBUS_ATTACH_ALL & ~KDBUS_ATTACH_PIDS,
++			     _KDBUS_ATTACH_ALL);
++	ASSERT_RETURN(conn == NULL);
++	ASSERT_RETURN(errno == ECONNREFUSED);
++
++	/* The following should succeed */
++	conn = __kdbus_hello(buspath, 0, KDBUS_ATTACH_PIDS, 0);
++	ASSERT_RETURN(conn);
++	kdbus_conn_free(conn);
++
++	conn = __kdbus_hello(buspath, 0, _KDBUS_ATTACH_ALL, 0);
++	ASSERT_RETURN(conn);
++
++	ret = kdbus_conn_update_attach_flags(conn,
++					     _KDBUS_ATTACH_ALL &
++					     ~KDBUS_ATTACH_PIDS,
++					     _KDBUS_ATTACH_ALL);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	ret = kdbus_conn_update_attach_flags(conn, 0,
++					     _KDBUS_ATTACH_ALL);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	/* Now we want only KDBUS_ATTACH_PIDS */
++	ret = kdbus_conn_update_attach_flags(conn,
++					     KDBUS_ATTACH_PIDS, 0);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(conn);
++	free(path);
++	free(busname);
++	close(control_fd);
++
++
++	/*
++	 * Create bus with 0 as ATTACH flags, the bus does not
++	 * require any attach flags
++	 */
++
++	control_fd = open(control_path, O_RDWR);
++	ASSERT_RETURN(control_fd >= 0);
++
++	busname = unique_name("test-peer-flags-bus");
++	ASSERT_RETURN(busname);
++
++	ret = kdbus_create_bus(control_fd, busname, 0, 0, &path);
++	ASSERT_RETURN(ret == 0);
++
++	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
++
++	/* Bus is open it does not require any send attach flags */
++	conn = __kdbus_hello(buspath, 0, 0, 0);
++	ASSERT_RETURN(conn);
++	kdbus_conn_free(conn);
++
++	conn = __kdbus_hello(buspath, 0, _KDBUS_ATTACH_ALL, 0);
++	ASSERT_RETURN(conn);
++
++	ret = kdbus_conn_update_attach_flags(conn, 0, 0);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_conn_update_attach_flags(conn, KDBUS_ATTACH_CREDS, 0);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(conn);
++	free(path);
++	free(busname);
++	close(control_fd);
++
++	return 0;
++}
++
++static int kdbus_test_peers_info(struct kdbus_test_env *env)
++{
++	int ret;
++	int control_fd;
++	char *path;
++	char *busname;
++	unsigned int i = 0;
++	uint64_t offset = 0;
++	char buspath[2048];
++	char control_path[2048];
++	uint64_t attach_flags_mask;
++	struct kdbus_item *item;
++	struct kdbus_info *info;
++	struct kdbus_conn *conn;
++	struct kdbus_conn *reader;
++	unsigned long long attach_count = 0;
++
++	snprintf(control_path, sizeof(control_path),
++		 "%s/control", env->root);
++
++	attach_flags_mask = 0;
++	ret = kdbus_sysfs_set_parameter_mask(env->mask_param_path,
++					     attach_flags_mask);
++	ASSERT_RETURN(ret == 0);
++
++	control_fd = open(control_path, O_RDWR);
++	ASSERT_RETURN(control_fd >= 0);
++
++	busname = unique_name("test-peers-info-bus");
++	ASSERT_RETURN(busname);
++
++	ret = kdbus_create_bus(control_fd, busname, _KDBUS_ATTACH_ALL,
++			       0, &path);
++	ASSERT_RETURN(ret == 0);
++
++	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
++
++	/* Create connections with the appropriate flags */
++	conn = __kdbus_hello(buspath, 0, _KDBUS_ATTACH_ALL, 0);
++	ASSERT_RETURN(conn);
++
++	reader = __kdbus_hello(buspath, 0, _KDBUS_ATTACH_ALL, 0);
++	ASSERT_RETURN(reader);
++
++	ret = kdbus_conn_info(reader, conn->id, NULL,
++			      _KDBUS_ATTACH_ALL, &offset);
++	ASSERT_RETURN(ret == 0);
++
++	info = (struct kdbus_info *)(reader->buf + offset);
++	ASSERT_RETURN(info->id == conn->id);
++
++	/* all attach flags are masked, no metadata */
++	KDBUS_ITEM_FOREACH(item, info, items)
++		i++;
++
++	ASSERT_RETURN(i == 0);
++
++	kdbus_free(reader, offset);
++
++	/* Set the mask to _KDBUS_ATTACH_ANY */
++	attach_flags_mask = _KDBUS_ATTACH_ANY;
++	ret = kdbus_sysfs_set_parameter_mask(env->mask_param_path,
++					     attach_flags_mask);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_conn_info(reader, conn->id, NULL,
++			      _KDBUS_ATTACH_ALL, &offset);
++	ASSERT_RETURN(ret == 0);
++
++	info = (struct kdbus_info *)(reader->buf + offset);
++	ASSERT_RETURN(info->id == conn->id);
++
++	attach_count = 0;
++	KDBUS_ITEM_FOREACH(item, info, items)
++		    attach_count += item->type;
++
++	/*
++	 * All flags have been returned except for:
++	 * KDBUS_ITEM_TIMESTAMP and
++	 * KDBUS_ITEM_OWNED_NAME we do not own any name.
++	 */
++	ASSERT_RETURN(attach_count == (KDBUS_TEST_ITEMS_SUM -
++				       KDBUS_ITEM_OWNED_NAME -
++				       KDBUS_ITEM_TIMESTAMP));
++
++	kdbus_free(reader, offset);
++
++	/* Request only OWNED names */
++	ret = kdbus_conn_info(reader, conn->id, NULL,
++			      KDBUS_ATTACH_NAMES, &offset);
++	ASSERT_RETURN(ret == 0);
++
++	info = (struct kdbus_info *)(reader->buf + offset);
++	ASSERT_RETURN(info->id == conn->id);
++
++	attach_count = 0;
++	KDBUS_ITEM_FOREACH(item, info, items)
++		attach_count += item->type;
++
++	/* we should not get any metadata since we do not own names */
++	ASSERT_RETURN(attach_count == 0);
++
++	kdbus_free(reader, offset);
++
++	kdbus_conn_free(conn);
++	kdbus_conn_free(reader);
++
++	return 0;
++}
++
++/**
++ * @kdbus_mask_param:	kdbus module mask parameter (system-wide)
++ * @requested_meta:	The bus owner metadata that we want
++ * @expected_items:	The returned KDBUS_ITEMS_* sum. Used to
++ *			validate the returned metadata items
++ */
++static int kdbus_cmp_bus_creator_metadata(struct kdbus_test_env *env,
++					  struct kdbus_conn *conn,
++					  uint64_t kdbus_mask_param,
++					  uint64_t requested_meta,
++					  unsigned long expected_items)
++{
++	int ret;
++	uint64_t offset = 0;
++	struct kdbus_info *info;
++	struct kdbus_item *item;
++	unsigned long attach_count = 0;
++
++	ret = kdbus_sysfs_set_parameter_mask(env->mask_param_path,
++					     kdbus_mask_param);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_bus_creator_info(conn, requested_meta, &offset);
++	ASSERT_RETURN(ret == 0);
++
++	info = (struct kdbus_info *)(conn->buf + offset);
++
++	KDBUS_ITEM_FOREACH(item, info, items)
++		attach_count += item->type;
++
++	ASSERT_RETURN(attach_count == expected_items);
++
++	ret = kdbus_free(conn, offset);
++	ASSERT_RETURN(ret == 0);
++
++	return 0;
++}
++
++static int kdbus_test_bus_creator_info(struct kdbus_test_env *env)
++{
++	int ret;
++	int control_fd;
++	char *path;
++	char *busname;
++	char buspath[2048];
++	char control_path[2048];
++	uint64_t attach_flags_mask;
++	struct kdbus_conn *conn;
++	unsigned long expected_items = 0;
++
++	snprintf(control_path, sizeof(control_path),
++		 "%s/control", env->root);
++
++	control_fd = open(control_path, O_RDWR);
++	ASSERT_RETURN(control_fd >= 0);
++
++	busname = unique_name("test-peers-info-bus");
++	ASSERT_RETURN(busname);
++
++	/*
++	 * Now the bus allows us to see all its KDBUS_ATTACH_*
++	 * items
++	 */
++	ret = kdbus_create_bus(control_fd, busname, 0,
++			       _KDBUS_ATTACH_ALL, &path);
++	ASSERT_RETURN(ret == 0);
++
++	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
++
++	conn = __kdbus_hello(buspath, 0, 0, 0);
++	ASSERT_RETURN(conn);
++
++	/*
++	 * Start with a kdbus module mask set to _KDBUS_ATTACH_ANY
++	 */
++	attach_flags_mask = _KDBUS_ATTACH_ANY;
++
++	/*
++	 * All flags will be returned except for:
++	 * KDBUS_ITEM_TIMESTAMP
++	 * KDBUS_ITEM_OWNED_NAME
++	 * KDBUS_ITEM_CONN_DESCRIPTION
++	 *
++	 * An extra flags is always returned KDBUS_ITEM_MAKE_NAME
++	 * which contains the bus name
++	 */
++	expected_items = KDBUS_TEST_ITEMS_SUM + KDBUS_ITEM_MAKE_NAME;
++	expected_items -= KDBUS_ITEM_TIMESTAMP +
++			  KDBUS_ITEM_OWNED_NAME +
++			  KDBUS_ITEM_CONN_DESCRIPTION;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     _KDBUS_ATTACH_ALL,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * We should have:
++	 * KDBUS_ITEM_PIDS + KDBUS_ITEM_CREDS + KDBUS_ITEM_MAKE_NAME
++	 */
++	expected_items = KDBUS_ITEM_PIDS + KDBUS_ITEM_CREDS +
++			 KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     KDBUS_ATTACH_PIDS |
++					     KDBUS_ATTACH_CREDS,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	/* KDBUS_ITEM_MAKE_NAME is always returned */
++	expected_items = KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     0, expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Restrict kdbus system-wide mask to KDBUS_ATTACH_PIDS
++	 */
++
++	attach_flags_mask = KDBUS_ATTACH_PIDS;
++
++	/*
++	 * We should have:
++	 * KDBUS_ITEM_PIDS + KDBUS_ITEM_MAKE_NAME
++	 */
++	expected_items = KDBUS_ITEM_PIDS + KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     _KDBUS_ATTACH_ALL,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++
++	/* system-wide mask to 0 */
++	attach_flags_mask = 0;
++
++	/* we should only see: KDBUS_ITEM_MAKE_NAME */
++	expected_items = KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     _KDBUS_ATTACH_ALL,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(conn);
++	free(path);
++	free(busname);
++	close(control_fd);
++
++
++	/*
++	 * A new bus that hides all its owner metadata
++	 */
++
++	control_fd = open(control_path, O_RDWR);
++	ASSERT_RETURN(control_fd >= 0);
++
++	busname = unique_name("test-peers-info-bus");
++	ASSERT_RETURN(busname);
++
++	ret = kdbus_create_bus(control_fd, busname, 0, 0, &path);
++	ASSERT_RETURN(ret == 0);
++
++	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
++
++	conn = __kdbus_hello(buspath, 0, 0, 0);
++	ASSERT_RETURN(conn);
++
++	/*
++	 * Start with a kdbus module mask set to _KDBUS_ATTACH_ANY
++	 */
++	attach_flags_mask = _KDBUS_ATTACH_ANY;
++
++	/*
++	 * We only get the KDBUS_ITEM_MAKE_NAME
++	 */
++	expected_items = KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     _KDBUS_ATTACH_ALL,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * We still get only kdbus_ITEM_MAKE_NAME
++	 */
++	attach_flags_mask = 0;
++	expected_items = KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     _KDBUS_ATTACH_ALL,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(conn);
++	free(path);
++	free(busname);
++	close(control_fd);
++
++
++	/*
++	 * A new bus that shows only the PID and CREDS metadata
++	 * of the bus owner.
++	 */
++	control_fd = open(control_path, O_RDWR);
++	ASSERT_RETURN(control_fd >= 0);
++
++	busname = unique_name("test-peers-info-bus");
++	ASSERT_RETURN(busname);
++
++	ret = kdbus_create_bus(control_fd, busname, 0,
++			       KDBUS_ATTACH_PIDS|
++			       KDBUS_ATTACH_CREDS, &path);
++	ASSERT_RETURN(ret == 0);
++
++	snprintf(buspath, sizeof(buspath), "%s/%s/bus", env->root, path);
++
++	conn = __kdbus_hello(buspath, 0, 0, 0);
++	ASSERT_RETURN(conn);
++
++	/*
++	 * Start with a kdbus module mask set to _KDBUS_ATTACH_ANY
++	 */
++	attach_flags_mask = _KDBUS_ATTACH_ANY;
++
++	/*
++	 * We should have:
++	 * KDBUS_ITEM_PIDS + KDBUS_ITEM_CREDS + KDBUS_ITEM_MAKE_NAME
++	 */
++	expected_items = KDBUS_ITEM_PIDS + KDBUS_ITEM_CREDS +
++			 KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     _KDBUS_ATTACH_ALL,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	expected_items = KDBUS_ITEM_CREDS + KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     KDBUS_ATTACH_CREDS,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	/* KDBUS_ITEM_MAKE_NAME is always returned */
++	expected_items = KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     0, expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Restrict kdbus system-wide mask to KDBUS_ATTACH_PIDS
++	 */
++
++	attach_flags_mask = KDBUS_ATTACH_PIDS;
++	/*
++	 * We should have:
++	 * KDBUS_ITEM_PIDS + KDBUS_ITEM_MAKE_NAME
++	 */
++	expected_items = KDBUS_ITEM_PIDS + KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     _KDBUS_ATTACH_ALL,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	/* No KDBUS_ATTACH_CREDS */
++	expected_items = KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     KDBUS_ATTACH_CREDS,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++	/* system-wide mask to 0 */
++	attach_flags_mask = 0;
++
++	/* we should only see: KDBUS_ITEM_MAKE_NAME */
++	expected_items = KDBUS_ITEM_MAKE_NAME;
++	ret = kdbus_cmp_bus_creator_metadata(env, conn,
++					     attach_flags_mask,
++					     _KDBUS_ATTACH_ALL,
++					     expected_items);
++	ASSERT_RETURN(ret == 0);
++
++
++	kdbus_conn_free(conn);
++	free(path);
++	free(busname);
++	close(control_fd);
++
++	return 0;
++}
++
++int kdbus_test_attach_flags(struct kdbus_test_env *env)
++{
++	int ret;
++	uint64_t flags_mask;
++	uint64_t old_kdbus_flags_mask;
++
++	/* We need CAP_DAC_OVERRIDE to overwrite the kdbus mask */
++	ret = test_is_capable(CAP_DAC_OVERRIDE, -1);
++	ASSERT_RETURN(ret >= 0);
++
++	/* no enough privileges, SKIP test */
++	if (!ret)
++		return TEST_SKIP;
++
++	/*
++	 * We need to be able to write to
++	 * "/sys/module/kdbus/parameters/attach_flags_mask"
++	 * perhaps we are unprvileged/privileged in its userns
++	 */
++	ret = access(env->mask_param_path, W_OK);
++	if (ret < 0) {
++		kdbus_printf("--- access() '%s' failed: %d (%m)\n",
++			     env->mask_param_path, -errno);
++		return TEST_SKIP;
++	}
++
++	ret = kdbus_sysfs_get_parameter_mask(env->mask_param_path,
++					     &old_kdbus_flags_mask);
++	ASSERT_RETURN(ret == 0);
++
++	/* setup the right KDBUS_TEST_ITEMS_SUM */
++	if (!config_auditsyscall_is_enabled())
++		KDBUS_TEST_ITEMS_SUM -= KDBUS_ITEM_AUDIT;
++
++	if (!config_cgroups_is_enabled())
++		KDBUS_TEST_ITEMS_SUM -= KDBUS_ITEM_CGROUP;
++
++	if (!config_security_is_enabled())
++		KDBUS_TEST_ITEMS_SUM -= KDBUS_ITEM_SECLABEL;
++
++	/*
++	 * Test the connection creation attach flags
++	 */
++	ret = kdbus_test_peers_creation(env);
++	/* Restore previous kdbus mask */
++	kdbus_sysfs_set_parameter_mask(env->mask_param_path,
++				       old_kdbus_flags_mask);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Test the CONN_INFO attach flags
++	 */
++	ret = kdbus_test_peers_info(env);
++	/* Restore previous kdbus mask */
++	kdbus_sysfs_set_parameter_mask(env->mask_param_path,
++				       old_kdbus_flags_mask);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Test the Bus creator info and its attach flags
++	 */
++	ret = kdbus_test_bus_creator_info(env);
++	/* Restore previous kdbus mask */
++	kdbus_sysfs_set_parameter_mask(env->mask_param_path,
++				       old_kdbus_flags_mask);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_sysfs_get_parameter_mask(env->mask_param_path,
++					     &flags_mask);
++	ASSERT_RETURN(ret == 0 && old_kdbus_flags_mask == flags_mask);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-benchmark.c b/tools/testing/selftests/kdbus/test-benchmark.c
+new file mode 100644
+index 0000000..8a9744b
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-benchmark.c
+@@ -0,0 +1,451 @@
++#include <stdio.h>
++#include <string.h>
++#include <time.h>
++#include <fcntl.h>
++#include <locale.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <stdbool.h>
++#include <errno.h>
++#include <assert.h>
++#include <poll.h>
++#include <sys/time.h>
++#include <sys/mman.h>
++#include <sys/socket.h>
++#include <math.h>
++
++#include "kdbus-api.h"
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++#define SERVICE_NAME "foo.bar.echo"
++
++/*
++ * To have a banchmark comparison with unix socket, set:
++ * user_memfd	= false;
++ * compare_uds	= true;
++ * attach_none	= true;		do not attached metadata
++ */
++
++static bool use_memfd = true;		/* transmit memfd? */
++static bool compare_uds = false;		/* unix-socket comparison? */
++static bool attach_none = false;		/* clear attach-flags? */
++static char stress_payload[8192];
++
++struct stats {
++	uint64_t count;
++	uint64_t latency_acc;
++	uint64_t latency_low;
++	uint64_t latency_high;
++	uint64_t latency_avg;
++	uint64_t latency_ssquares;
++};
++
++static struct stats stats;
++
++static void reset_stats(void)
++{
++	stats.count = 0;
++	stats.latency_acc = 0;
++	stats.latency_low = UINT64_MAX;
++	stats.latency_high = 0;
++	stats.latency_avg = 0;
++	stats.latency_ssquares = 0;
++}
++
++static void dump_stats(bool is_uds)
++{
++	if (stats.count > 0) {
++		kdbus_printf("stats %s: %'llu packets processed, latency (nsecs) min/max/avg/dev %'7llu // %'7llu // %'7llu // %'7.f\n",
++			     is_uds ? " (UNIX)" : "(KDBUS)",
++			     (unsigned long long) stats.count,
++			     (unsigned long long) stats.latency_low,
++			     (unsigned long long) stats.latency_high,
++			     (unsigned long long) stats.latency_avg,
++			     sqrt(stats.latency_ssquares / stats.count));
++	} else {
++		kdbus_printf("*** no packets received. bus stuck?\n");
++	}
++}
++
++static void add_stats(uint64_t prev)
++{
++	uint64_t diff, latency_avg_prev;
++
++	diff = now(CLOCK_THREAD_CPUTIME_ID) - prev;
++
++	stats.count++;
++	stats.latency_acc += diff;
++
++	/* see Welford62 */
++	latency_avg_prev = stats.latency_avg;
++	stats.latency_avg = stats.latency_acc / stats.count;
++	stats.latency_ssquares += (diff - latency_avg_prev) * (diff - stats.latency_avg);
++
++	if (stats.latency_low > diff)
++		stats.latency_low = diff;
++
++	if (stats.latency_high < diff)
++		stats.latency_high = diff;
++}
++
++static int setup_simple_kdbus_msg(struct kdbus_conn *conn,
++				  uint64_t dst_id,
++				  struct kdbus_msg **msg_out)
++{
++	struct kdbus_msg *msg;
++	struct kdbus_item *item;
++	uint64_t size;
++
++	size = sizeof(struct kdbus_msg);
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++
++	msg = malloc(size);
++	ASSERT_RETURN_VAL(msg, -ENOMEM);
++
++	memset(msg, 0, size);
++	msg->size = size;
++	msg->src_id = conn->id;
++	msg->dst_id = dst_id;
++	msg->payload_type = KDBUS_PAYLOAD_DBUS;
++
++	item = msg->items;
++
++	item->type = KDBUS_ITEM_PAYLOAD_VEC;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
++	item->vec.address = (uintptr_t) stress_payload;
++	item->vec.size = sizeof(stress_payload);
++	item = KDBUS_ITEM_NEXT(item);
++
++	*msg_out = msg;
++
++	return 0;
++}
++
++static int setup_memfd_kdbus_msg(struct kdbus_conn *conn,
++				 uint64_t dst_id,
++				 off_t *memfd_item_offset,
++				 struct kdbus_msg **msg_out)
++{
++	struct kdbus_msg *msg;
++	struct kdbus_item *item;
++	uint64_t size;
++
++	size = sizeof(struct kdbus_msg);
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
++
++	msg = malloc(size);
++	ASSERT_RETURN_VAL(msg, -ENOMEM);
++
++	memset(msg, 0, size);
++	msg->size = size;
++	msg->src_id = conn->id;
++	msg->dst_id = dst_id;
++	msg->payload_type = KDBUS_PAYLOAD_DBUS;
++
++	item = msg->items;
++
++	item->type = KDBUS_ITEM_PAYLOAD_VEC;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
++	item->vec.address = (uintptr_t) stress_payload;
++	item->vec.size = sizeof(stress_payload);
++	item = KDBUS_ITEM_NEXT(item);
++
++	item->type = KDBUS_ITEM_PAYLOAD_MEMFD;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_memfd);
++	item->memfd.size = sizeof(uint64_t);
++
++	*memfd_item_offset = (unsigned char *)item - (unsigned char *)msg;
++	*msg_out = msg;
++
++	return 0;
++}
++
++static int
++send_echo_request(struct kdbus_conn *conn, uint64_t dst_id,
++		  void *kdbus_msg, off_t memfd_item_offset)
++{
++	struct kdbus_cmd_send cmd = {};
++	int memfd = -1;
++	int ret;
++
++	if (use_memfd) {
++		uint64_t now_ns = now(CLOCK_THREAD_CPUTIME_ID);
++		struct kdbus_item *item = memfd_item_offset + kdbus_msg;
++		memfd = sys_memfd_create("memfd-name", 0);
++		ASSERT_RETURN_VAL(memfd >= 0, memfd);
++
++		ret = write(memfd, &now_ns, sizeof(now_ns));
++		ASSERT_RETURN_VAL(ret == sizeof(now_ns), -EAGAIN);
++
++		ret = sys_memfd_seal_set(memfd);
++		ASSERT_RETURN_VAL(ret == 0, -errno);
++
++		item->memfd.fd = memfd;
++	}
++
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)kdbus_msg;
++
++	ret = kdbus_cmd_send(conn->fd, &cmd);
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	close(memfd);
++
++	return 0;
++}
++
++static int
++handle_echo_reply(struct kdbus_conn *conn, uint64_t send_ns)
++{
++	int ret;
++	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
++	struct kdbus_msg *msg;
++	const struct kdbus_item *item;
++	bool has_memfd = false;
++
++	ret = kdbus_cmd_recv(conn->fd, &recv);
++	if (ret == -EAGAIN)
++		return ret;
++
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	if (!use_memfd)
++		goto out;
++
++	msg = (struct kdbus_msg *)(conn->buf + recv.msg.offset);
++
++	KDBUS_ITEM_FOREACH(item, msg, items) {
++		switch (item->type) {
++		case KDBUS_ITEM_PAYLOAD_MEMFD: {
++			char *buf;
++
++			buf = mmap(NULL, item->memfd.size, PROT_READ,
++				   MAP_PRIVATE, item->memfd.fd, 0);
++			ASSERT_RETURN_VAL(buf != MAP_FAILED, -EINVAL);
++			ASSERT_RETURN_VAL(item->memfd.size == sizeof(uint64_t),
++					  -EINVAL);
++
++			add_stats(*(uint64_t*)buf);
++			munmap(buf, item->memfd.size);
++			close(item->memfd.fd);
++			has_memfd = true;
++			break;
++		}
++
++		case KDBUS_ITEM_PAYLOAD_OFF:
++			/* ignore */
++			break;
++		}
++	}
++
++out:
++	if (!has_memfd)
++		add_stats(send_ns);
++
++	ret = kdbus_free(conn, recv.msg.offset);
++	ASSERT_RETURN_VAL(ret == 0, -errno);
++
++	return 0;
++}
++
++static int benchmark(struct kdbus_test_env *env)
++{
++	static char buf[sizeof(stress_payload)];
++	struct kdbus_msg *kdbus_msg = NULL;
++	off_t memfd_cached_offset = 0;
++	int ret;
++	struct kdbus_conn *conn_a, *conn_b;
++	struct pollfd fds[2];
++	uint64_t start, send_ns, now_ns, diff;
++	unsigned int i;
++	int uds[2];
++
++	setlocale(LC_ALL, "");
++
++	for (i = 0; i < sizeof(stress_payload); i++)
++		stress_payload[i] = i;
++
++	/* setup kdbus pair */
++
++	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
++	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn_a && conn_b);
++
++	ret = kdbus_add_match_empty(conn_a);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_add_match_empty(conn_b);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_name_acquire(conn_a, SERVICE_NAME, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	if (attach_none) {
++		ret = kdbus_conn_update_attach_flags(conn_a,
++						     _KDBUS_ATTACH_ALL,
++						     0);
++		ASSERT_RETURN(ret == 0);
++	}
++
++	/* setup UDS pair */
++
++	ret = socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_NONBLOCK, 0, uds);
++	ASSERT_RETURN(ret == 0);
++
++	/* setup a kdbus msg now */
++	if (use_memfd) {
++		ret = setup_memfd_kdbus_msg(conn_b, conn_a->id,
++					    &memfd_cached_offset,
++					    &kdbus_msg);
++		ASSERT_RETURN(ret == 0);
++	} else {
++		ret = setup_simple_kdbus_msg(conn_b, conn_a->id, &kdbus_msg);
++		ASSERT_RETURN(ret == 0);
++	}
++
++	/* start benchmark */
++
++	kdbus_printf("-- entering poll loop ...\n");
++
++	do {
++		/* run kdbus benchmark */
++		fds[0].fd = conn_a->fd;
++		fds[1].fd = conn_b->fd;
++
++		/* cancel any pending message */
++		handle_echo_reply(conn_a, 0);
++
++		start = now(CLOCK_THREAD_CPUTIME_ID);
++		reset_stats();
++
++		send_ns = now(CLOCK_THREAD_CPUTIME_ID);
++		ret = send_echo_request(conn_b, conn_a->id,
++					kdbus_msg, memfd_cached_offset);
++		ASSERT_RETURN(ret == 0);
++
++		while (1) {
++			unsigned int nfds = sizeof(fds) / sizeof(fds[0]);
++			unsigned int i;
++
++			for (i = 0; i < nfds; i++) {
++				fds[i].events = POLLIN | POLLPRI | POLLHUP;
++				fds[i].revents = 0;
++			}
++
++			ret = poll(fds, nfds, 10);
++			if (ret < 0)
++				break;
++
++			if (fds[0].revents & POLLIN) {
++				ret = handle_echo_reply(conn_a, send_ns);
++				ASSERT_RETURN(ret == 0);
++
++				send_ns = now(CLOCK_THREAD_CPUTIME_ID);
++				ret = send_echo_request(conn_b, conn_a->id,
++							kdbus_msg,
++							memfd_cached_offset);
++				ASSERT_RETURN(ret == 0);
++			}
++
++			now_ns = now(CLOCK_THREAD_CPUTIME_ID);
++			diff = now_ns - start;
++			if (diff > 1000000000ULL) {
++				start = now_ns;
++
++				dump_stats(false);
++				break;
++			}
++		}
++
++		if (!compare_uds)
++			continue;
++
++		/* run unix-socket benchmark as comparison */
++
++		fds[0].fd = uds[0];
++		fds[1].fd = uds[1];
++
++		/* cancel any pendign message */
++		read(uds[1], buf, sizeof(buf));
++
++		start = now(CLOCK_THREAD_CPUTIME_ID);
++		reset_stats();
++
++		send_ns = now(CLOCK_THREAD_CPUTIME_ID);
++		ret = write(uds[0], stress_payload, sizeof(stress_payload));
++		ASSERT_RETURN(ret == sizeof(stress_payload));
++
++		while (1) {
++			unsigned int nfds = sizeof(fds) / sizeof(fds[0]);
++			unsigned int i;
++
++			for (i = 0; i < nfds; i++) {
++				fds[i].events = POLLIN | POLLPRI | POLLHUP;
++				fds[i].revents = 0;
++			}
++
++			ret = poll(fds, nfds, 10);
++			if (ret < 0)
++				break;
++
++			if (fds[1].revents & POLLIN) {
++				ret = read(uds[1], buf, sizeof(buf));
++				ASSERT_RETURN(ret == sizeof(buf));
++
++				add_stats(send_ns);
++
++				send_ns = now(CLOCK_THREAD_CPUTIME_ID);
++				ret = write(uds[0], buf, sizeof(buf));
++				ASSERT_RETURN(ret == sizeof(buf));
++			}
++
++			now_ns = now(CLOCK_THREAD_CPUTIME_ID);
++			diff = now_ns - start;
++			if (diff > 1000000000ULL) {
++				start = now_ns;
++
++				dump_stats(true);
++				break;
++			}
++		}
++
++	} while (kdbus_util_verbose);
++
++	kdbus_printf("-- closing bus connections\n");
++
++	free(kdbus_msg);
++
++	kdbus_conn_free(conn_a);
++	kdbus_conn_free(conn_b);
++
++	return (stats.count > 1) ? TEST_OK : TEST_ERR;
++}
++
++int kdbus_test_benchmark(struct kdbus_test_env *env)
++{
++	use_memfd = true;
++	attach_none = false;
++	compare_uds = false;
++	return benchmark(env);
++}
++
++int kdbus_test_benchmark_nomemfds(struct kdbus_test_env *env)
++{
++	use_memfd = false;
++	attach_none = false;
++	compare_uds = false;
++	return benchmark(env);
++}
++
++int kdbus_test_benchmark_uds(struct kdbus_test_env *env)
++{
++	use_memfd = false;
++	attach_none = true;
++	compare_uds = true;
++	return benchmark(env);
++}
+diff --git a/tools/testing/selftests/kdbus/test-bus.c b/tools/testing/selftests/kdbus/test-bus.c
+new file mode 100644
+index 0000000..762fb30
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-bus.c
+@@ -0,0 +1,175 @@
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <limits.h>
++#include <sys/mman.h>
++#include <stdbool.h>
++
++#include "kdbus-api.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++#include "kdbus-test.h"
++
++static struct kdbus_item *kdbus_get_item(struct kdbus_info *info,
++					 uint64_t type)
++{
++	struct kdbus_item *item;
++
++	KDBUS_ITEM_FOREACH(item, info, items)
++		if (item->type == type)
++			return item;
++
++	return NULL;
++}
++
++static int test_bus_creator_info(const char *bus_path)
++{
++	int ret;
++	uint64_t offset;
++	struct kdbus_conn *conn;
++	struct kdbus_info *info;
++	struct kdbus_item *item;
++	char *tmp, *busname;
++
++	/* extract the bus-name from @bus_path */
++	tmp = strdup(bus_path);
++	ASSERT_RETURN(tmp);
++	busname = strrchr(tmp, '/');
++	ASSERT_RETURN(busname);
++	*busname = 0;
++	busname = strrchr(tmp, '/');
++	ASSERT_RETURN(busname);
++	++busname;
++
++	conn = kdbus_hello(bus_path, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	ret = kdbus_bus_creator_info(conn, _KDBUS_ATTACH_ALL, &offset);
++	ASSERT_RETURN(ret == 0);
++
++	info = (struct kdbus_info *)(conn->buf + offset);
++
++	item = kdbus_get_item(info, KDBUS_ITEM_MAKE_NAME);
++	ASSERT_RETURN(item);
++	ASSERT_RETURN(!strcmp(item->str, busname));
++
++	ret = kdbus_free(conn, offset);
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	free(tmp);
++	kdbus_conn_free(conn);
++	return 0;
++}
++
++int kdbus_test_bus_make(struct kdbus_test_env *env)
++{
++	struct {
++		struct kdbus_cmd cmd;
++
++		/* bloom size item */
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_bloom_parameter bloom;
++		} bs;
++
++		/* name item */
++		uint64_t n_size;
++		uint64_t n_type;
++		char name[64];
++	} bus_make;
++	char s[PATH_MAX], *name;
++	int ret, control_fd2;
++	uid_t uid;
++
++	name = unique_name("");
++	ASSERT_RETURN(name);
++
++	snprintf(s, sizeof(s), "%s/control", env->root);
++	env->control_fd = open(s, O_RDWR|O_CLOEXEC);
++	ASSERT_RETURN(env->control_fd >= 0);
++
++	control_fd2 = open(s, O_RDWR|O_CLOEXEC);
++	ASSERT_RETURN(control_fd2 >= 0);
++
++	memset(&bus_make, 0, sizeof(bus_make));
++
++	bus_make.bs.size = sizeof(bus_make.bs);
++	bus_make.bs.type = KDBUS_ITEM_BLOOM_PARAMETER;
++	bus_make.bs.bloom.size = 64;
++	bus_make.bs.bloom.n_hash = 1;
++
++	bus_make.n_type = KDBUS_ITEM_MAKE_NAME;
++
++	uid = getuid();
++
++	/* missing uid prefix */
++	snprintf(bus_make.name, sizeof(bus_make.name), "foo");
++	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
++	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
++			    sizeof(bus_make.bs) + bus_make.n_size;
++	ret = kdbus_cmd_bus_make(env->control_fd, &bus_make.cmd);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	/* non alphanumeric character */
++	snprintf(bus_make.name, sizeof(bus_make.name), "%u-blah@123", uid);
++	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
++	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
++			    sizeof(bus_make.bs) + bus_make.n_size;
++	ret = kdbus_cmd_bus_make(env->control_fd, &bus_make.cmd);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	/* '-' at the end */
++	snprintf(bus_make.name, sizeof(bus_make.name), "%u-blah-", uid);
++	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
++	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
++			    sizeof(bus_make.bs) + bus_make.n_size;
++	ret = kdbus_cmd_bus_make(env->control_fd, &bus_make.cmd);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	/* create a new bus */
++	snprintf(bus_make.name, sizeof(bus_make.name), "%u-%s-1", uid, name);
++	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
++	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
++			    sizeof(bus_make.bs) + bus_make.n_size;
++	ret = kdbus_cmd_bus_make(env->control_fd, &bus_make.cmd);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_cmd_bus_make(control_fd2, &bus_make.cmd);
++	ASSERT_RETURN(ret == -EEXIST);
++
++	snprintf(s, sizeof(s), "%s/%u-%s-1/bus", env->root, uid, name);
++	ASSERT_RETURN(access(s, F_OK) == 0);
++
++	ret = test_bus_creator_info(s);
++	ASSERT_RETURN(ret == 0);
++
++	/* can't use the same fd for bus make twice, even though a different
++	 * bus name is used
++	 */
++	snprintf(bus_make.name, sizeof(bus_make.name), "%u-%s-2", uid, name);
++	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
++	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
++			    sizeof(bus_make.bs) + bus_make.n_size;
++	ret = kdbus_cmd_bus_make(env->control_fd, &bus_make.cmd);
++	ASSERT_RETURN(ret == -EBADFD);
++
++	/* create a new bus, with different fd and different bus name */
++	snprintf(bus_make.name, sizeof(bus_make.name), "%u-%s-2", uid, name);
++	bus_make.n_size = KDBUS_ITEM_HEADER_SIZE + strlen(bus_make.name) + 1;
++	bus_make.cmd.size = sizeof(struct kdbus_cmd) +
++			    sizeof(bus_make.bs) + bus_make.n_size;
++	ret = kdbus_cmd_bus_make(control_fd2, &bus_make.cmd);
++	ASSERT_RETURN(ret == 0);
++
++	close(control_fd2);
++	free(name);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-chat.c b/tools/testing/selftests/kdbus/test-chat.c
+new file mode 100644
+index 0000000..71a92d8
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-chat.c
+@@ -0,0 +1,122 @@
++#include <stdio.h>
++#include <string.h>
++#include <time.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <poll.h>
++#include <stdbool.h>
++
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++int kdbus_test_chat(struct kdbus_test_env *env)
++{
++	int ret, cookie;
++	struct kdbus_conn *conn_a, *conn_b;
++	struct pollfd fds[2];
++	uint64_t flags;
++	int count;
++
++	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
++	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn_a && conn_b);
++
++	flags = KDBUS_NAME_ALLOW_REPLACEMENT;
++	ret = kdbus_name_acquire(conn_a, "foo.bar.test", &flags);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_name_acquire(conn_a, "foo.bar.baz", NULL);
++	ASSERT_RETURN(ret == 0);
++
++	flags = KDBUS_NAME_QUEUE;
++	ret = kdbus_name_acquire(conn_b, "foo.bar.baz", &flags);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_name_acquire(conn_a, "foo.bar.double", NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_name_acquire(conn_a, "foo.bar.double", NULL);
++	ASSERT_RETURN(ret == -EALREADY);
++
++	ret = kdbus_name_release(conn_a, "foo.bar.double");
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_name_release(conn_a, "foo.bar.double");
++	ASSERT_RETURN(ret == -ESRCH);
++
++	ret = kdbus_list(conn_b, KDBUS_LIST_UNIQUE |
++				 KDBUS_LIST_NAMES  |
++				 KDBUS_LIST_QUEUED |
++				 KDBUS_LIST_ACTIVATORS);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_add_match_empty(conn_a);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_add_match_empty(conn_b);
++	ASSERT_RETURN(ret == 0);
++
++	cookie = 0;
++	ret = kdbus_msg_send(conn_b, NULL, 0xc0000000 | cookie, 0, 0, 0,
++			     KDBUS_DST_ID_BROADCAST);
++	ASSERT_RETURN(ret == 0);
++
++	fds[0].fd = conn_a->fd;
++	fds[1].fd = conn_b->fd;
++
++	kdbus_printf("-- entering poll loop ...\n");
++
++	for (count = 0;; count++) {
++		int i, nfds = sizeof(fds) / sizeof(fds[0]);
++
++		for (i = 0; i < nfds; i++) {
++			fds[i].events = POLLIN | POLLPRI | POLLHUP;
++			fds[i].revents = 0;
++		}
++
++		ret = poll(fds, nfds, 3000);
++		ASSERT_RETURN(ret >= 0);
++
++		if (fds[0].revents & POLLIN) {
++			if (count > 2)
++				kdbus_name_release(conn_a, "foo.bar.baz");
++
++			ret = kdbus_msg_recv(conn_a, NULL, NULL);
++			ASSERT_RETURN(ret == 0);
++			ret = kdbus_msg_send(conn_a, NULL,
++					     0xc0000000 | cookie++,
++					     0, 0, 0, conn_b->id);
++			ASSERT_RETURN(ret == 0);
++		}
++
++		if (fds[1].revents & POLLIN) {
++			ret = kdbus_msg_recv(conn_b, NULL, NULL);
++			ASSERT_RETURN(ret == 0);
++			ret = kdbus_msg_send(conn_b, NULL,
++					     0xc0000000 | cookie++,
++					     0, 0, 0, conn_a->id);
++			ASSERT_RETURN(ret == 0);
++		}
++
++		ret = kdbus_list(conn_b, KDBUS_LIST_UNIQUE |
++					 KDBUS_LIST_NAMES  |
++					 KDBUS_LIST_QUEUED |
++					 KDBUS_LIST_ACTIVATORS);
++		ASSERT_RETURN(ret == 0);
++
++		if (count > 10)
++			break;
++	}
++
++	kdbus_printf("-- closing bus connections\n");
++	kdbus_conn_free(conn_a);
++	kdbus_conn_free(conn_b);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-connection.c b/tools/testing/selftests/kdbus/test-connection.c
+new file mode 100644
+index 0000000..5c2bf35
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-connection.c
+@@ -0,0 +1,616 @@
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <limits.h>
++#include <sys/types.h>
++#include <sys/capability.h>
++#include <sys/mman.h>
++#include <sys/syscall.h>
++#include <sys/wait.h>
++#include <stdbool.h>
++
++#include "kdbus-api.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++#include "kdbus-test.h"
++
++int kdbus_test_hello(struct kdbus_test_env *env)
++{
++	struct kdbus_cmd_free cmd_free = {};
++	struct kdbus_cmd_hello hello;
++	int fd, ret;
++
++	memset(&hello, 0, sizeof(hello));
++
++	fd = open(env->buspath, O_RDWR|O_CLOEXEC);
++	ASSERT_RETURN(fd >= 0);
++
++	hello.flags = KDBUS_HELLO_ACCEPT_FD;
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++	hello.attach_flags_recv = _KDBUS_ATTACH_ALL;
++	hello.size = sizeof(struct kdbus_cmd_hello);
++	hello.pool_size = POOL_SIZE;
++
++	/* an unaligned hello must result in -EFAULT */
++	ret = kdbus_cmd_hello(fd, (struct kdbus_cmd_hello *) ((char *) &hello + 1));
++	ASSERT_RETURN(ret == -EFAULT);
++
++	/* a size of 0 must return EMSGSIZE */
++	hello.size = 1;
++	hello.flags = KDBUS_HELLO_ACCEPT_FD;
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++	ret = kdbus_cmd_hello(fd, &hello);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	hello.size = sizeof(struct kdbus_cmd_hello);
++
++	/* check faulty flags */
++	hello.flags = 1ULL << 32;
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++	ret = kdbus_cmd_hello(fd, &hello);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	/* check for faulty pool sizes */
++	hello.pool_size = 0;
++	hello.flags = KDBUS_HELLO_ACCEPT_FD;
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++	ret = kdbus_cmd_hello(fd, &hello);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	hello.pool_size = 4097;
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++	ret = kdbus_cmd_hello(fd, &hello);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	hello.pool_size = POOL_SIZE;
++
++	/*
++	 * The connection created by the core requires ALL meta flags
++	 * to be sent. An attempt to send less than that should result in
++	 * -ECONNREFUSED.
++	 */
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL & ~KDBUS_ATTACH_TIMESTAMP;
++	ret = kdbus_cmd_hello(fd, &hello);
++	ASSERT_RETURN(ret == -ECONNREFUSED);
++
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++	hello.offset = (__u64)-1;
++
++	/* success test */
++	ret = kdbus_cmd_hello(fd, &hello);
++	ASSERT_RETURN(ret == 0);
++
++	/* The kernel should have returned some items */
++	ASSERT_RETURN(hello.offset != (__u64)-1);
++	cmd_free.size = sizeof(cmd_free);
++	cmd_free.offset = hello.offset;
++	ret = kdbus_cmd_free(fd, &cmd_free);
++	ASSERT_RETURN(ret >= 0);
++
++	close(fd);
++
++	fd = open(env->buspath, O_RDWR|O_CLOEXEC);
++	ASSERT_RETURN(fd >= 0);
++
++	/* no ACTIVATOR flag without a name */
++	hello.flags = KDBUS_HELLO_ACTIVATOR;
++	ret = kdbus_cmd_hello(fd, &hello);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	close(fd);
++
++	return TEST_OK;
++}
++
++int kdbus_test_byebye(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn;
++	struct kdbus_cmd_recv cmd_recv = { .size = sizeof(cmd_recv) };
++	struct kdbus_cmd cmd_byebye = { .size = sizeof(cmd_byebye) };
++	int ret;
++
++	/* create a 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++
++	ret = kdbus_add_match_empty(conn);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_add_match_empty(env->conn);
++	ASSERT_RETURN(ret == 0);
++
++	/* send over 1st connection */
++	ret = kdbus_msg_send(env->conn, NULL, 0, 0, 0, 0,
++			     KDBUS_DST_ID_BROADCAST);
++	ASSERT_RETURN(ret == 0);
++
++	/* say byebye on the 2nd, which must fail */
++	ret = kdbus_cmd_byebye(conn->fd, &cmd_byebye);
++	ASSERT_RETURN(ret == -EBUSY);
++
++	/* receive the message */
++	ret = kdbus_cmd_recv(conn->fd, &cmd_recv);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_free(conn, cmd_recv.msg.offset);
++	ASSERT_RETURN(ret == 0);
++
++	/* and try again */
++	ret = kdbus_cmd_byebye(conn->fd, &cmd_byebye);
++	ASSERT_RETURN(ret == 0);
++
++	/* a 2nd try should result in -ECONNRESET */
++	ret = kdbus_cmd_byebye(conn->fd, &cmd_byebye);
++	ASSERT_RETURN(ret == -ECONNRESET);
++
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
++
++/* Get only the first item */
++static struct kdbus_item *kdbus_get_item(struct kdbus_info *info,
++					 uint64_t type)
++{
++	struct kdbus_item *item;
++
++	KDBUS_ITEM_FOREACH(item, info, items)
++		if (item->type == type)
++			return item;
++
++	return NULL;
++}
++
++static unsigned int kdbus_count_item(struct kdbus_info *info,
++				     uint64_t type)
++{
++	unsigned int i = 0;
++	const struct kdbus_item *item;
++
++	KDBUS_ITEM_FOREACH(item, info, items)
++		if (item->type == type)
++			i++;
++
++	return i;
++}
++
++static int kdbus_fuzz_conn_info(struct kdbus_test_env *env, int capable)
++{
++	int ret;
++	unsigned int cnt = 0;
++	uint64_t offset = 0;
++	uint64_t kdbus_flags_mask;
++	struct kdbus_info *info;
++	struct kdbus_conn *conn;
++	struct kdbus_conn *privileged;
++	const struct kdbus_item *item;
++	uint64_t valid_flags_set;
++	uint64_t invalid_flags_set;
++	uint64_t valid_flags = KDBUS_ATTACH_NAMES |
++			       KDBUS_ATTACH_CREDS |
++			       KDBUS_ATTACH_PIDS |
++			       KDBUS_ATTACH_CONN_DESCRIPTION;
++
++	uint64_t invalid_flags = KDBUS_ATTACH_NAMES	|
++				 KDBUS_ATTACH_CREDS	|
++				 KDBUS_ATTACH_PIDS	|
++				 KDBUS_ATTACH_CAPS	|
++				 KDBUS_ATTACH_CGROUP	|
++				 KDBUS_ATTACH_CONN_DESCRIPTION;
++
++	struct kdbus_creds cached_creds;
++	uid_t ruid, euid, suid;
++	gid_t rgid, egid, sgid;
++
++	getresuid(&ruid, &euid, &suid);
++	getresgid(&rgid, &egid, &sgid);
++
++	cached_creds.uid = ruid;
++	cached_creds.euid = euid;
++	cached_creds.suid = suid;
++	cached_creds.fsuid = ruid;
++
++	cached_creds.gid = rgid;
++	cached_creds.egid = egid;
++	cached_creds.sgid = sgid;
++	cached_creds.fsgid = rgid;
++
++	struct kdbus_pids cached_pids = {
++		.pid	= getpid(),
++		.tid	= syscall(SYS_gettid),
++		.ppid	= getppid(),
++	};
++
++	ret = kdbus_sysfs_get_parameter_mask(env->mask_param_path,
++					     &kdbus_flags_mask);
++	ASSERT_RETURN(ret == 0);
++
++	valid_flags_set = valid_flags & kdbus_flags_mask;
++	invalid_flags_set = invalid_flags & kdbus_flags_mask;
++
++	ret = kdbus_conn_info(env->conn, env->conn->id, NULL,
++			      valid_flags, &offset);
++	ASSERT_RETURN(ret == 0);
++
++	info = (struct kdbus_info *)(env->conn->buf + offset);
++	ASSERT_RETURN(info->id == env->conn->id);
++
++	/* We do not have any well-known name */
++	item = kdbus_get_item(info, KDBUS_ITEM_NAME);
++	ASSERT_RETURN(item == NULL);
++
++	item = kdbus_get_item(info, KDBUS_ITEM_CONN_DESCRIPTION);
++	if (valid_flags_set & KDBUS_ATTACH_CONN_DESCRIPTION) {
++		ASSERT_RETURN(item);
++	} else {
++		ASSERT_RETURN(item == NULL);
++	}
++
++	kdbus_free(env->conn, offset);
++
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	privileged = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(privileged);
++
++	ret = kdbus_conn_info(conn, conn->id, NULL, valid_flags, &offset);
++	ASSERT_RETURN(ret == 0);
++
++	info = (struct kdbus_info *)(conn->buf + offset);
++	ASSERT_RETURN(info->id == conn->id);
++
++	/* We do not have any well-known name */
++	item = kdbus_get_item(info, KDBUS_ITEM_NAME);
++	ASSERT_RETURN(item == NULL);
++
++	cnt = kdbus_count_item(info, KDBUS_ITEM_CREDS);
++	if (valid_flags_set & KDBUS_ATTACH_CREDS) {
++		ASSERT_RETURN(cnt == 1);
++
++		item = kdbus_get_item(info, KDBUS_ITEM_CREDS);
++		ASSERT_RETURN(item);
++
++		/* Compare received items with cached creds */
++		ASSERT_RETURN(memcmp(&item->creds, &cached_creds,
++				      sizeof(struct kdbus_creds)) == 0);
++	} else {
++		ASSERT_RETURN(cnt == 0);
++	}
++
++	item = kdbus_get_item(info, KDBUS_ITEM_PIDS);
++	if (valid_flags_set & KDBUS_ATTACH_PIDS) {
++		ASSERT_RETURN(item);
++
++		/* Compare item->pids with cached PIDs */
++		ASSERT_RETURN(item->pids.pid == cached_pids.pid &&
++			      item->pids.tid == cached_pids.tid &&
++			      item->pids.ppid == cached_pids.ppid);
++	} else {
++		ASSERT_RETURN(item == NULL);
++	}
++
++	/* We did not request KDBUS_ITEM_CAPS */
++	item = kdbus_get_item(info, KDBUS_ITEM_CAPS);
++	ASSERT_RETURN(item == NULL);
++
++	kdbus_free(conn, offset);
++
++	ret = kdbus_name_acquire(conn, "com.example.a", NULL);
++	ASSERT_RETURN(ret >= 0);
++
++	ret = kdbus_conn_info(conn, conn->id, NULL, valid_flags, &offset);
++	ASSERT_RETURN(ret == 0);
++
++	info = (struct kdbus_info *)(conn->buf + offset);
++	ASSERT_RETURN(info->id == conn->id);
++
++	item = kdbus_get_item(info, KDBUS_ITEM_OWNED_NAME);
++	if (valid_flags_set & KDBUS_ATTACH_NAMES) {
++		ASSERT_RETURN(item && !strcmp(item->name.name, "com.example.a"));
++	} else {
++		ASSERT_RETURN(item == NULL);
++	}
++
++	kdbus_free(conn, offset);
++
++	ret = kdbus_conn_info(conn, 0, "com.example.a", valid_flags, &offset);
++	ASSERT_RETURN(ret == 0);
++
++	info = (struct kdbus_info *)(conn->buf + offset);
++	ASSERT_RETURN(info->id == conn->id);
++
++	kdbus_free(conn, offset);
++
++	/* does not have the necessary caps to drop to unprivileged */
++	if (!capable)
++		goto continue_test;
++
++	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_GID, ({
++		ret = kdbus_conn_info(conn, conn->id, NULL,
++				      valid_flags, &offset);
++		ASSERT_EXIT(ret == 0);
++
++		info = (struct kdbus_info *)(conn->buf + offset);
++		ASSERT_EXIT(info->id == conn->id);
++
++		if (valid_flags_set & KDBUS_ATTACH_NAMES) {
++			item = kdbus_get_item(info, KDBUS_ITEM_OWNED_NAME);
++			ASSERT_EXIT(item &&
++				    strcmp(item->name.name,
++				           "com.example.a") == 0);
++		}
++
++		if (valid_flags_set & KDBUS_ATTACH_CREDS) {
++			item = kdbus_get_item(info, KDBUS_ITEM_CREDS);
++			ASSERT_EXIT(item);
++
++			/* Compare received items with cached creds */
++			ASSERT_EXIT(memcmp(&item->creds, &cached_creds,
++				    sizeof(struct kdbus_creds)) == 0);
++		}
++
++		if (valid_flags_set & KDBUS_ATTACH_PIDS) {
++			item = kdbus_get_item(info, KDBUS_ITEM_PIDS);
++			ASSERT_EXIT(item);
++
++			/*
++			 * Compare item->pids with cached pids of
++			 * privileged one.
++			 *
++			 * cmd_info will always return cached pids.
++			 */
++			ASSERT_EXIT(item->pids.pid == cached_pids.pid &&
++				    item->pids.tid == cached_pids.tid);
++		}
++
++		kdbus_free(conn, offset);
++
++		/*
++		 * Use invalid_flags and make sure that userspace
++		 * do not play with us.
++		 */
++		ret = kdbus_conn_info(conn, conn->id, NULL,
++				      invalid_flags, &offset);
++		ASSERT_EXIT(ret == 0);
++
++		/*
++		 * Make sure that we return only one creds item and
++		 * it points to the cached creds.
++		 */
++		cnt = kdbus_count_item(info, KDBUS_ITEM_CREDS);
++		if (invalid_flags_set & KDBUS_ATTACH_CREDS) {
++			ASSERT_EXIT(cnt == 1);
++
++			item = kdbus_get_item(info, KDBUS_ITEM_CREDS);
++			ASSERT_EXIT(item);
++
++			/* Compare received items with cached creds */
++			ASSERT_EXIT(memcmp(&item->creds, &cached_creds,
++				    sizeof(struct kdbus_creds)) == 0);
++		} else {
++			ASSERT_EXIT(cnt == 0);
++		}
++
++		if (invalid_flags_set & KDBUS_ATTACH_PIDS) {
++			cnt = kdbus_count_item(info, KDBUS_ITEM_PIDS);
++			ASSERT_EXIT(cnt == 1);
++
++			item = kdbus_get_item(info, KDBUS_ITEM_PIDS);
++			ASSERT_EXIT(item);
++
++			/* Compare item->pids with cached pids */
++			ASSERT_EXIT(item->pids.pid == cached_pids.pid &&
++				    item->pids.tid == cached_pids.tid);
++		}
++
++		cnt = kdbus_count_item(info, KDBUS_ITEM_CGROUP);
++		if (invalid_flags_set & KDBUS_ATTACH_CGROUP) {
++			ASSERT_EXIT(cnt == 1);
++		} else {
++			ASSERT_EXIT(cnt == 0);
++		}
++
++		cnt = kdbus_count_item(info, KDBUS_ITEM_CAPS);
++		if (invalid_flags_set & KDBUS_ATTACH_CAPS) {
++			ASSERT_EXIT(cnt == 1);
++		} else {
++			ASSERT_EXIT(cnt == 0);
++		}
++
++		kdbus_free(conn, offset);
++	}),
++	({ 0; }));
++	ASSERT_RETURN(ret == 0);
++
++continue_test:
++
++	/* A second name */
++	ret = kdbus_name_acquire(conn, "com.example.b", NULL);
++	ASSERT_RETURN(ret >= 0);
++
++	ret = kdbus_conn_info(conn, conn->id, NULL, valid_flags, &offset);
++	ASSERT_RETURN(ret == 0);
++
++	info = (struct kdbus_info *)(conn->buf + offset);
++	ASSERT_RETURN(info->id == conn->id);
++
++	cnt = kdbus_count_item(info, KDBUS_ITEM_OWNED_NAME);
++	if (valid_flags_set & KDBUS_ATTACH_NAMES) {
++		ASSERT_RETURN(cnt == 2);
++	} else {
++		ASSERT_RETURN(cnt == 0);
++	}
++
++	kdbus_free(conn, offset);
++
++	ASSERT_RETURN(ret == 0);
++
++	return 0;
++}
++
++int kdbus_test_conn_info(struct kdbus_test_env *env)
++{
++	int ret;
++	int have_caps;
++	struct {
++		struct kdbus_cmd_info cmd_info;
++
++		struct {
++			uint64_t size;
++			uint64_t type;
++			char str[64];
++		} name;
++	} buf;
++
++	buf.cmd_info.size = sizeof(struct kdbus_cmd_info);
++	buf.cmd_info.flags = 0;
++	buf.cmd_info.attach_flags = 0;
++	buf.cmd_info.id = env->conn->id;
++
++	ret = kdbus_conn_info(env->conn, env->conn->id, NULL, 0, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* try to pass a name that is longer than the buffer's size */
++	buf.name.size = KDBUS_ITEM_HEADER_SIZE + 1;
++	buf.name.type = KDBUS_ITEM_NAME;
++	strcpy(buf.name.str, "foo.bar.bla");
++
++	buf.cmd_info.id = 0;
++	buf.cmd_info.size = sizeof(buf.cmd_info) + buf.name.size;
++	ret = kdbus_cmd_conn_info(env->conn->fd, (struct kdbus_cmd_info *) &buf);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	/* Pass a non existent name */
++	ret = kdbus_conn_info(env->conn, 0, "non.existent.name", 0, NULL);
++	ASSERT_RETURN(ret == -ESRCH);
++
++	if (!all_uids_gids_are_mapped())
++		return TEST_SKIP;
++
++	/* Test for caps here, so we run the previous test */
++	have_caps = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
++	ASSERT_RETURN(have_caps >= 0);
++
++	ret = kdbus_fuzz_conn_info(env, have_caps);
++	ASSERT_RETURN(ret == 0);
++
++	/* Now if we have skipped some tests then let the user know */
++	if (!have_caps)
++		return TEST_SKIP;
++
++	return TEST_OK;
++}
++
++int kdbus_test_conn_update(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn;
++	struct kdbus_msg *msg;
++	int found = 0;
++	int ret;
++
++	/*
++	 * kdbus_hello() sets all attach flags. Receive a message by this
++	 * connection, and make sure a timestamp item (just to pick one) is
++	 * present.
++	 */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	ret = kdbus_msg_send(env->conn, NULL, 0x12345678, 0, 0, 0, conn->id);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	found = kdbus_item_in_message(msg, KDBUS_ITEM_TIMESTAMP);
++	ASSERT_RETURN(found == 1);
++
++	kdbus_msg_free(msg);
++
++	/*
++	 * Now, modify the attach flags and repeat the action. The item must
++	 * now be missing.
++	 */
++	found = 0;
++
++	ret = kdbus_conn_update_attach_flags(conn,
++					     _KDBUS_ATTACH_ALL,
++					     _KDBUS_ATTACH_ALL &
++					     ~KDBUS_ATTACH_TIMESTAMP);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_send(env->conn, NULL, 0x12345678, 0, 0, 0, conn->id);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	found = kdbus_item_in_message(msg, KDBUS_ITEM_TIMESTAMP);
++	ASSERT_RETURN(found == 0);
++
++	/* Provide a bogus attach_flags value */
++	ret = kdbus_conn_update_attach_flags(conn,
++					     _KDBUS_ATTACH_ALL + 1,
++					     _KDBUS_ATTACH_ALL);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	kdbus_msg_free(msg);
++
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
++
++int kdbus_test_writable_pool(struct kdbus_test_env *env)
++{
++	struct kdbus_cmd_free cmd_free = {};
++	struct kdbus_cmd_hello hello;
++	int fd, ret;
++	void *map;
++
++	fd = open(env->buspath, O_RDWR | O_CLOEXEC);
++	ASSERT_RETURN(fd >= 0);
++
++	memset(&hello, 0, sizeof(hello));
++	hello.flags = KDBUS_HELLO_ACCEPT_FD;
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++	hello.attach_flags_recv = _KDBUS_ATTACH_ALL;
++	hello.size = sizeof(struct kdbus_cmd_hello);
++	hello.pool_size = POOL_SIZE;
++	hello.offset = (__u64)-1;
++
++	/* success test */
++	ret = kdbus_cmd_hello(fd, &hello);
++	ASSERT_RETURN(ret == 0);
++
++	/* The kernel should have returned some items */
++	ASSERT_RETURN(hello.offset != (__u64)-1);
++	cmd_free.size = sizeof(cmd_free);
++	cmd_free.offset = hello.offset;
++	ret = kdbus_cmd_free(fd, &cmd_free);
++	ASSERT_RETURN(ret >= 0);
++
++	/* pools cannot be mapped writable */
++	map = mmap(NULL, POOL_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
++	ASSERT_RETURN(map == MAP_FAILED);
++
++	/* pools can always be mapped readable */
++	map = mmap(NULL, POOL_SIZE, PROT_READ, MAP_SHARED, fd, 0);
++	ASSERT_RETURN(map != MAP_FAILED);
++
++	/* make sure we cannot change protection masks to writable */
++	ret = mprotect(map, POOL_SIZE, PROT_READ | PROT_WRITE);
++	ASSERT_RETURN(ret < 0);
++
++	munmap(map, POOL_SIZE);
++	close(fd);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-daemon.c b/tools/testing/selftests/kdbus/test-daemon.c
+new file mode 100644
+index 0000000..8bc2386
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-daemon.c
+@@ -0,0 +1,65 @@
++#include <stdio.h>
++#include <string.h>
++#include <time.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <poll.h>
++#include <stdbool.h>
++
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++int kdbus_test_daemon(struct kdbus_test_env *env)
++{
++	struct pollfd fds[2];
++	int count;
++	int ret;
++
++	/* This test doesn't make any sense in non-interactive mode */
++	if (!kdbus_util_verbose)
++		return TEST_OK;
++
++	printf("Created connection %llu on bus '%s'\n",
++		(unsigned long long) env->conn->id, env->buspath);
++
++	ret = kdbus_name_acquire(env->conn, "com.example.kdbus-test", NULL);
++	ASSERT_RETURN(ret == 0);
++	printf("  Aquired name: com.example.kdbus-test\n");
++
++	fds[0].fd = env->conn->fd;
++	fds[1].fd = STDIN_FILENO;
++
++	printf("Monitoring connections:\n");
++
++	for (count = 0;; count++) {
++		int i, nfds = sizeof(fds) / sizeof(fds[0]);
++
++		for (i = 0; i < nfds; i++) {
++			fds[i].events = POLLIN | POLLPRI | POLLHUP;
++			fds[i].revents = 0;
++		}
++
++		ret = poll(fds, nfds, -1);
++		if (ret <= 0)
++			break;
++
++		if (fds[0].revents & POLLIN) {
++			ret = kdbus_msg_recv(env->conn, NULL, NULL);
++			ASSERT_RETURN(ret == 0);
++		}
++
++		/* stdin */
++		if (fds[1].revents & POLLIN)
++			break;
++	}
++
++	printf("Closing bus connection\n");
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-endpoint.c b/tools/testing/selftests/kdbus/test-endpoint.c
+new file mode 100644
+index 0000000..dcc6ab9
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-endpoint.c
+@@ -0,0 +1,341 @@
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <libgen.h>
++#include <sys/capability.h>
++#include <sys/wait.h>
++#include <stdbool.h>
++
++#include "kdbus-api.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++#include "kdbus-test.h"
++
++#define KDBUS_SYSNAME_MAX_LEN			63
++
++static int install_name_add_match(struct kdbus_conn *conn, const char *name)
++{
++	struct {
++		struct kdbus_cmd_match cmd;
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_notify_name_change chg;
++		} item;
++		char name[64];
++	} buf;
++	int ret;
++
++	/* install the match rule */
++	memset(&buf, 0, sizeof(buf));
++	buf.item.type = KDBUS_ITEM_NAME_ADD;
++	buf.item.chg.old_id.id = KDBUS_MATCH_ID_ANY;
++	buf.item.chg.new_id.id = KDBUS_MATCH_ID_ANY;
++	strncpy(buf.name, name, sizeof(buf.name) - 1);
++	buf.item.size = sizeof(buf.item) + strlen(buf.name) + 1;
++	buf.cmd.size = sizeof(buf.cmd) + buf.item.size;
++
++	ret = kdbus_cmd_match_add(conn->fd, &buf.cmd);
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
++static int create_endpoint(const char *buspath, uid_t uid, const char *name,
++			   uint64_t flags)
++{
++	struct {
++		struct kdbus_cmd cmd;
++
++		/* name item */
++		struct {
++			uint64_t size;
++			uint64_t type;
++			/* max should be KDBUS_SYSNAME_MAX_LEN */
++			char str[128];
++		} name;
++	} ep_make;
++	int fd, ret;
++
++	fd = open(buspath, O_RDWR);
++	if (fd < 0)
++		return fd;
++
++	memset(&ep_make, 0, sizeof(ep_make));
++
++	snprintf(ep_make.name.str,
++		 /* Use the KDBUS_SYSNAME_MAX_LEN or sizeof(str) */
++		 KDBUS_SYSNAME_MAX_LEN > strlen(name) ?
++		 KDBUS_SYSNAME_MAX_LEN : sizeof(ep_make.name.str),
++		 "%u-%s", uid, name);
++
++	ep_make.name.type = KDBUS_ITEM_MAKE_NAME;
++	ep_make.name.size = KDBUS_ITEM_HEADER_SIZE +
++			    strlen(ep_make.name.str) + 1;
++
++	ep_make.cmd.flags = flags;
++	ep_make.cmd.size = sizeof(ep_make.cmd) + ep_make.name.size;
++
++	ret = kdbus_cmd_endpoint_make(fd, &ep_make.cmd);
++	if (ret < 0) {
++		kdbus_printf("error creating endpoint: %d (%m)\n", ret);
++		return ret;
++	}
++
++	return fd;
++}
++
++static int unpriv_test_custom_ep(const char *buspath)
++{
++	int ret, ep_fd1, ep_fd2;
++	char *ep1, *ep2, *tmp1, *tmp2;
++
++	tmp1 = strdup(buspath);
++	tmp2 = strdup(buspath);
++	ASSERT_RETURN(tmp1 && tmp2);
++
++	ret = asprintf(&ep1, "%s/%u-%s", dirname(tmp1), getuid(), "apps1");
++	ASSERT_RETURN(ret >= 0);
++
++	ret = asprintf(&ep2, "%s/%u-%s", dirname(tmp2), getuid(), "apps2");
++	ASSERT_RETURN(ret >= 0);
++
++	free(tmp1);
++	free(tmp2);
++
++	/* endpoint only accessible to current uid */
++	ep_fd1 = create_endpoint(buspath, getuid(), "apps1", 0);
++	ASSERT_RETURN(ep_fd1 >= 0);
++
++	/* endpoint world accessible */
++	ep_fd2 = create_endpoint(buspath, getuid(), "apps2",
++				  KDBUS_MAKE_ACCESS_WORLD);
++	ASSERT_RETURN(ep_fd2 >= 0);
++
++	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_UID, ({
++		int ep_fd;
++		struct kdbus_conn *ep_conn;
++
++		/*
++		 * Make sure that we are not able to create custom
++		 * endpoints
++		 */
++		ep_fd = create_endpoint(buspath, getuid(),
++					"unpriv_costum_ep", 0);
++		ASSERT_EXIT(ep_fd == -EPERM);
++
++		/*
++		 * Endpoint "apps1" only accessible to same users,
++		 * that own the endpoint. Access denied by VFS
++		 */
++		ep_conn = kdbus_hello(ep1, 0, NULL, 0);
++		ASSERT_EXIT(!ep_conn && errno == EACCES);
++
++		/* Endpoint "apps2" world accessible */
++		ep_conn = kdbus_hello(ep2, 0, NULL, 0);
++		ASSERT_EXIT(ep_conn);
++
++		kdbus_conn_free(ep_conn);
++
++		_exit(EXIT_SUCCESS);
++	}),
++	({ 0; }));
++	ASSERT_RETURN(ret == 0);
++
++	close(ep_fd1);
++	close(ep_fd2);
++	free(ep1);
++	free(ep2);
++
++	return 0;
++}
++
++static int update_endpoint(int fd, const char *name)
++{
++	int len = strlen(name) + 1;
++	struct {
++		struct kdbus_cmd cmd;
++
++		/* name item */
++		struct {
++			uint64_t size;
++			uint64_t type;
++			char str[KDBUS_ALIGN8(len)];
++		} name;
++
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_policy_access access;
++		} access;
++	} ep_update;
++	int ret;
++
++	memset(&ep_update, 0, sizeof(ep_update));
++
++	ep_update.name.size = KDBUS_ITEM_HEADER_SIZE + len;
++	ep_update.name.type = KDBUS_ITEM_NAME;
++	strncpy(ep_update.name.str, name, sizeof(ep_update.name.str) - 1);
++
++	ep_update.access.size = sizeof(ep_update.access);
++	ep_update.access.type = KDBUS_ITEM_POLICY_ACCESS;
++	ep_update.access.access.type = KDBUS_POLICY_ACCESS_WORLD;
++	ep_update.access.access.access = KDBUS_POLICY_SEE;
++
++	ep_update.cmd.size = sizeof(ep_update);
++
++	ret = kdbus_cmd_endpoint_update(fd, &ep_update.cmd);
++	if (ret < 0) {
++		kdbus_printf("error updating endpoint: %d (%m)\n", ret);
++		return ret;
++	}
++
++	return 0;
++}
++
++int kdbus_test_custom_endpoint(struct kdbus_test_env *env)
++{
++	char *ep, *tmp;
++	int ret, ep_fd;
++	struct kdbus_msg *msg;
++	struct kdbus_conn *ep_conn;
++	struct kdbus_conn *reader;
++	const char *name = "foo.bar.baz";
++	const char *epname = "foo";
++	char fake_ep[KDBUS_SYSNAME_MAX_LEN + 1] = {'\0'};
++
++	memset(fake_ep, 'X', sizeof(fake_ep) - 1);
++
++	/* Try to create a custom endpoint with a long name */
++	ret = create_endpoint(env->buspath, getuid(), fake_ep, 0);
++	ASSERT_RETURN(ret == -ENAMETOOLONG);
++
++	/* Try to create a custom endpoint with a different uid */
++	ret = create_endpoint(env->buspath, getuid() + 1, "foobar", 0);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	/* create a custom endpoint, and open a connection on it */
++	ep_fd = create_endpoint(env->buspath, getuid(), "foo", 0);
++	ASSERT_RETURN(ep_fd >= 0);
++
++	tmp = strdup(env->buspath);
++	ASSERT_RETURN(tmp);
++
++	ret = asprintf(&ep, "%s/%u-%s", dirname(tmp), getuid(), epname);
++	free(tmp);
++	ASSERT_RETURN(ret >= 0);
++
++	/* Register a connection that listen to broadcasts */
++	reader = kdbus_hello(ep, 0, NULL, 0);
++	ASSERT_RETURN(reader);
++
++	/* Register to kernel signals */
++	ret = kdbus_add_match_id(reader, 0x1, KDBUS_ITEM_ID_ADD,
++				 KDBUS_MATCH_ID_ANY);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_add_match_id(reader, 0x2, KDBUS_ITEM_ID_REMOVE,
++				 KDBUS_MATCH_ID_ANY);
++	ASSERT_RETURN(ret == 0);
++
++	ret = install_name_add_match(reader, name);
++	ASSERT_RETURN(ret == 0);
++
++	/* Monitor connections are not supported on custom endpoints */
++	ep_conn = kdbus_hello(ep, KDBUS_HELLO_MONITOR, NULL, 0);
++	ASSERT_RETURN(!ep_conn && errno == EOPNOTSUPP);
++
++	ep_conn = kdbus_hello(ep, 0, NULL, 0);
++	ASSERT_RETURN(ep_conn);
++
++	/*
++	 * Add a name add match on the endpoint connection, acquire name from
++	 * the unfiltered connection, and make sure the filtered connection
++	 * did not get the notification on the name owner change. Also, the
++	 * endpoint connection may not be able to call conn_info, neither on
++	 * the name nor on the ID.
++	 */
++	ret = install_name_add_match(ep_conn, name);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_name_acquire(env->conn, name, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(ep_conn, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	ret = kdbus_conn_info(ep_conn, 0, name, 0, NULL);
++	ASSERT_RETURN(ret == -ESRCH);
++
++	ret = kdbus_conn_info(ep_conn, 0, "random.crappy.name", 0, NULL);
++	ASSERT_RETURN(ret == -ESRCH);
++
++	ret = kdbus_conn_info(ep_conn, env->conn->id, NULL, 0, NULL);
++	ASSERT_RETURN(ret == -ENXIO);
++
++	ret = kdbus_conn_info(ep_conn, 0x0fffffffffffffffULL, NULL, 0, NULL);
++	ASSERT_RETURN(ret == -ENXIO);
++
++	/* Check that the reader did not receive anything */
++	ret = kdbus_msg_recv(reader, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	/*
++	 * Release the name again, update the custom endpoint policy,
++	 * and try again. This time, the connection on the custom endpoint
++	 * should have gotten it.
++	 */
++	ret = kdbus_name_release(env->conn, name);
++	ASSERT_RETURN(ret == 0);
++
++	ret = update_endpoint(ep_fd, name);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_name_acquire(env->conn, name, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(ep_conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_NAME_ADD);
++	ASSERT_RETURN(msg->items[0].name_change.old_id.id == 0);
++	ASSERT_RETURN(msg->items[0].name_change.new_id.id == env->conn->id);
++	ASSERT_RETURN(strcmp(msg->items[0].name_change.name, name) == 0);
++	kdbus_msg_free(msg);
++
++	ret = kdbus_msg_recv(reader, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(strcmp(msg->items[0].name_change.name, name) == 0);
++
++	kdbus_msg_free(msg);
++
++	ret = kdbus_conn_info(ep_conn, 0, name, 0, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_conn_info(ep_conn, env->conn->id, NULL, 0, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* If we have privileges test custom endpoints */
++	ret = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * All uids/gids are mapped and we have the necessary caps
++	 */
++	if (ret && all_uids_gids_are_mapped()) {
++		ret = unpriv_test_custom_ep(env->buspath);
++		ASSERT_RETURN(ret == 0);
++	}
++
++	kdbus_conn_free(reader);
++	kdbus_conn_free(ep_conn);
++	close(ep_fd);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-fd.c b/tools/testing/selftests/kdbus/test-fd.c
+new file mode 100644
+index 0000000..2ae0f5a
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-fd.c
+@@ -0,0 +1,789 @@
++#include <stdio.h>
++#include <string.h>
++#include <time.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stdbool.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <sys/types.h>
++#include <sys/mman.h>
++#include <sys/socket.h>
++#include <sys/wait.h>
++
++#include "kdbus-api.h"
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++#define KDBUS_MSG_MAX_ITEMS     128
++#define KDBUS_USER_MAX_CONN	256
++
++/* maximum number of inflight fds in a target queue per user */
++#define KDBUS_CONN_MAX_FDS_PER_USER	16
++
++/* maximum number of memfd items per message */
++#define KDBUS_MSG_MAX_MEMFD_ITEMS       16
++
++static int make_msg_payload_dbus(uint64_t src_id, uint64_t dst_id,
++				 uint64_t msg_size,
++				 struct kdbus_msg **msg_dbus)
++{
++	struct kdbus_msg *msg;
++
++	msg = malloc(msg_size);
++	ASSERT_RETURN_VAL(msg, -ENOMEM);
++
++	memset(msg, 0, msg_size);
++	msg->size = msg_size;
++	msg->src_id = src_id;
++	msg->dst_id = dst_id;
++	msg->payload_type = KDBUS_PAYLOAD_DBUS;
++
++	*msg_dbus = msg;
++
++	return 0;
++}
++
++static void make_item_memfds(struct kdbus_item *item,
++			     int *memfds, size_t memfd_size)
++{
++	size_t i;
++
++	for (i = 0; i < memfd_size; i++) {
++		item->type = KDBUS_ITEM_PAYLOAD_MEMFD;
++		item->size = KDBUS_ITEM_HEADER_SIZE +
++			     sizeof(struct kdbus_memfd);
++		item->memfd.fd = memfds[i];
++		item->memfd.size = sizeof(uint64_t); /* const size */
++		item = KDBUS_ITEM_NEXT(item);
++	}
++}
++
++static void make_item_fds(struct kdbus_item *item,
++			  int *fd_array, size_t fd_size)
++{
++	size_t i;
++	item->type = KDBUS_ITEM_FDS;
++	item->size = KDBUS_ITEM_HEADER_SIZE + (sizeof(int) * fd_size);
++
++	for (i = 0; i < fd_size; i++)
++		item->fds[i] = fd_array[i];
++}
++
++static int memfd_write(const char *name, void *buf, size_t bufsize)
++{
++	ssize_t ret;
++	int memfd;
++
++	memfd = sys_memfd_create(name, 0);
++	ASSERT_RETURN_VAL(memfd >= 0, memfd);
++
++	ret = write(memfd, buf, bufsize);
++	ASSERT_RETURN_VAL(ret == (ssize_t)bufsize, -EAGAIN);
++
++	ret = sys_memfd_seal_set(memfd);
++	ASSERT_RETURN_VAL(ret == 0, -errno);
++
++	return memfd;
++}
++
++static int send_memfds(struct kdbus_conn *conn, uint64_t dst_id,
++		       int *memfds_array, size_t memfd_count)
++{
++	struct kdbus_cmd_send cmd = {};
++	struct kdbus_item *item;
++	struct kdbus_msg *msg;
++	uint64_t size;
++	int ret;
++
++	size = sizeof(struct kdbus_msg);
++	size += memfd_count * KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
++
++	if (dst_id == KDBUS_DST_ID_BROADCAST)
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
++
++	ret = make_msg_payload_dbus(conn->id, dst_id, size, &msg);
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	item = msg->items;
++
++	if (dst_id == KDBUS_DST_ID_BROADCAST) {
++		item->type = KDBUS_ITEM_BLOOM_FILTER;
++		item->size = KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
++		item = KDBUS_ITEM_NEXT(item);
++
++		msg->flags |= KDBUS_MSG_SIGNAL;
++	}
++
++	make_item_memfds(item, memfds_array, memfd_count);
++
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg;
++
++	ret = kdbus_cmd_send(conn->fd, &cmd);
++	if (ret < 0) {
++		kdbus_printf("error sending message: %d (%m)\n", ret);
++		return ret;
++	}
++
++	free(msg);
++	return 0;
++}
++
++static int send_fds(struct kdbus_conn *conn, uint64_t dst_id,
++		    int *fd_array, size_t fd_count)
++{
++	struct kdbus_cmd_send cmd = {};
++	struct kdbus_item *item;
++	struct kdbus_msg *msg;
++	uint64_t size;
++	int ret;
++
++	size = sizeof(struct kdbus_msg);
++	size += KDBUS_ITEM_SIZE(sizeof(int) * fd_count);
++
++	if (dst_id == KDBUS_DST_ID_BROADCAST)
++		size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
++
++	ret = make_msg_payload_dbus(conn->id, dst_id, size, &msg);
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	item = msg->items;
++
++	if (dst_id == KDBUS_DST_ID_BROADCAST) {
++		item->type = KDBUS_ITEM_BLOOM_FILTER;
++		item->size = KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + 64;
++		item = KDBUS_ITEM_NEXT(item);
++
++		msg->flags |= KDBUS_MSG_SIGNAL;
++	}
++
++	make_item_fds(item, fd_array, fd_count);
++
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg;
++
++	ret = kdbus_cmd_send(conn->fd, &cmd);
++	if (ret < 0) {
++		kdbus_printf("error sending message: %d (%m)\n", ret);
++		return ret;
++	}
++
++	free(msg);
++	return ret;
++}
++
++static int send_fds_memfds(struct kdbus_conn *conn, uint64_t dst_id,
++			   int *fds_array, size_t fd_count,
++			   int *memfds_array, size_t memfd_count)
++{
++	struct kdbus_cmd_send cmd = {};
++	struct kdbus_item *item;
++	struct kdbus_msg *msg;
++	uint64_t size;
++	int ret;
++
++	size = sizeof(struct kdbus_msg);
++	size += memfd_count * KDBUS_ITEM_SIZE(sizeof(struct kdbus_memfd));
++	size += KDBUS_ITEM_SIZE(sizeof(int) * fd_count);
++
++	ret = make_msg_payload_dbus(conn->id, dst_id, size, &msg);
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	item = msg->items;
++
++	make_item_fds(item, fds_array, fd_count);
++	item = KDBUS_ITEM_NEXT(item);
++	make_item_memfds(item, memfds_array, memfd_count);
++
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg;
++
++	ret = kdbus_cmd_send(conn->fd, &cmd);
++	if (ret < 0) {
++		kdbus_printf("error sending message: %d (%m)\n", ret);
++		return ret;
++	}
++
++	free(msg);
++	return ret;
++}
++
++/* Return the number of received fds */
++static unsigned int kdbus_item_get_nfds(struct kdbus_msg *msg)
++{
++	unsigned int fds = 0;
++	const struct kdbus_item *item;
++
++	KDBUS_ITEM_FOREACH(item, msg, items) {
++		switch (item->type) {
++		case KDBUS_ITEM_FDS: {
++			fds += (item->size - KDBUS_ITEM_HEADER_SIZE) /
++				sizeof(int);
++			break;
++		}
++
++		case KDBUS_ITEM_PAYLOAD_MEMFD:
++			fds++;
++			break;
++
++		default:
++			break;
++		}
++	}
++
++	return fds;
++}
++
++static struct kdbus_msg *
++get_kdbus_msg_with_fd(struct kdbus_conn *conn_src,
++		      uint64_t dst_id, uint64_t cookie, int fd)
++{
++	int ret;
++	uint64_t size;
++	struct kdbus_item *item;
++	struct kdbus_msg *msg;
++
++	size = sizeof(struct kdbus_msg);
++	if (fd >= 0)
++		size += KDBUS_ITEM_SIZE(sizeof(int));
++
++	ret = make_msg_payload_dbus(conn_src->id, dst_id, size, &msg);
++	ASSERT_RETURN_VAL(ret == 0, NULL);
++
++	msg->cookie = cookie;
++
++	if (fd >= 0) {
++		item = msg->items;
++
++		make_item_fds(item, (int *)&fd, 1);
++	}
++
++	return msg;
++}
++
++static int kdbus_test_no_fds(struct kdbus_test_env *env,
++			     int *fds, int *memfd)
++{
++	pid_t pid;
++	int ret, status;
++	uint64_t cookie;
++	int connfd1, connfd2;
++	struct kdbus_msg *msg, *msg_sync_reply;
++	struct kdbus_cmd_hello hello;
++	struct kdbus_conn *conn_src, *conn_dst, *conn_dummy;
++	struct kdbus_cmd_send cmd = {};
++	struct kdbus_cmd_free cmd_free = {};
++
++	conn_src = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn_src);
++
++	connfd1 = open(env->buspath, O_RDWR|O_CLOEXEC);
++	ASSERT_RETURN(connfd1 >= 0);
++
++	connfd2 = open(env->buspath, O_RDWR|O_CLOEXEC);
++	ASSERT_RETURN(connfd2 >= 0);
++
++	/*
++	 * Create connections without KDBUS_HELLO_ACCEPT_FD
++	 * to test if send fd operations are blocked
++	 */
++	conn_dst = malloc(sizeof(*conn_dst));
++	ASSERT_RETURN(conn_dst);
++
++	conn_dummy = malloc(sizeof(*conn_dummy));
++	ASSERT_RETURN(conn_dummy);
++
++	memset(&hello, 0, sizeof(hello));
++	hello.size = sizeof(struct kdbus_cmd_hello);
++	hello.pool_size = POOL_SIZE;
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++
++	ret = kdbus_cmd_hello(connfd1, &hello);
++	ASSERT_RETURN(ret == 0);
++
++	cmd_free.size = sizeof(cmd_free);
++	cmd_free.offset = hello.offset;
++	ret = kdbus_cmd_free(connfd1, &cmd_free);
++	ASSERT_RETURN(ret >= 0);
++
++	conn_dst->fd = connfd1;
++	conn_dst->id = hello.id;
++
++	memset(&hello, 0, sizeof(hello));
++	hello.size = sizeof(struct kdbus_cmd_hello);
++	hello.pool_size = POOL_SIZE;
++	hello.attach_flags_send = _KDBUS_ATTACH_ALL;
++
++	ret = kdbus_cmd_hello(connfd2, &hello);
++	ASSERT_RETURN(ret == 0);
++
++	cmd_free.size = sizeof(cmd_free);
++	cmd_free.offset = hello.offset;
++	ret = kdbus_cmd_free(connfd2, &cmd_free);
++	ASSERT_RETURN(ret >= 0);
++
++	conn_dummy->fd = connfd2;
++	conn_dummy->id = hello.id;
++
++	conn_dst->buf = mmap(NULL, POOL_SIZE, PROT_READ,
++			     MAP_SHARED, connfd1, 0);
++	ASSERT_RETURN(conn_dst->buf != MAP_FAILED);
++
++	conn_dummy->buf = mmap(NULL, POOL_SIZE, PROT_READ,
++			       MAP_SHARED, connfd2, 0);
++	ASSERT_RETURN(conn_dummy->buf != MAP_FAILED);
++
++	/*
++	 * Send fds to connection that do not accept fd passing
++	 */
++	ret = send_fds(conn_src, conn_dst->id, fds, 1);
++	ASSERT_RETURN(ret == -ECOMM);
++
++	/*
++	 * memfd are kdbus payload
++	 */
++	ret = send_memfds(conn_src, conn_dst->id, memfd, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv_poll(conn_dst, 100, NULL, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	cookie = time(NULL);
++
++	pid = fork();
++	ASSERT_RETURN_VAL(pid >= 0, pid);
++
++	if (pid == 0) {
++		struct timespec now;
++
++		/*
++		 * A sync send/reply to a connection that do not
++		 * accept fds should fail if it contains an fd
++		 */
++		msg_sync_reply = get_kdbus_msg_with_fd(conn_dst,
++						       conn_dummy->id,
++						       cookie, fds[0]);
++		ASSERT_EXIT(msg_sync_reply);
++
++		ret = clock_gettime(CLOCK_MONOTONIC_COARSE, &now);
++		ASSERT_EXIT(ret == 0);
++
++		msg_sync_reply->timeout_ns = now.tv_sec * 1000000000ULL +
++					     now.tv_nsec + 100000000ULL;
++		msg_sync_reply->flags = KDBUS_MSG_EXPECT_REPLY;
++
++		memset(&cmd, 0, sizeof(cmd));
++		cmd.size = sizeof(cmd);
++		cmd.msg_address = (uintptr_t)msg_sync_reply;
++		cmd.flags = KDBUS_SEND_SYNC_REPLY;
++
++		ret = kdbus_cmd_send(conn_dst->fd, &cmd);
++		ASSERT_EXIT(ret == -ECOMM);
++
++		/*
++		 * Now send a normal message, but the sync reply
++		 * will fail since it contains an fd that the
++		 * original sender do not want.
++		 *
++		 * The original sender will fail with -ETIMEDOUT
++		 */
++		cookie++;
++		ret = kdbus_msg_send_sync(conn_dst, NULL, cookie,
++					  KDBUS_MSG_EXPECT_REPLY,
++					  5000000000ULL, 0, conn_src->id, -1);
++		ASSERT_EXIT(ret == -EREMOTEIO);
++
++		cookie++;
++		ret = kdbus_msg_recv_poll(conn_dst, 100, &msg, NULL);
++		ASSERT_EXIT(ret == 0);
++		ASSERT_EXIT(msg->cookie == cookie);
++
++		free(msg_sync_reply);
++		kdbus_msg_free(msg);
++
++		_exit(EXIT_SUCCESS);
++	}
++
++	ret = kdbus_msg_recv_poll(conn_dummy, 100, NULL, NULL);
++	ASSERT_RETURN(ret == -ETIMEDOUT);
++
++	cookie++;
++	ret = kdbus_msg_recv_poll(conn_src, 100, &msg, NULL);
++	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
++
++	kdbus_msg_free(msg);
++
++	/*
++	 * Try to reply with a kdbus connection handle, this should
++	 * fail with -EOPNOTSUPP
++	 */
++	msg_sync_reply = get_kdbus_msg_with_fd(conn_src,
++					       conn_dst->id,
++					       cookie, conn_dst->fd);
++	ASSERT_RETURN(msg_sync_reply);
++
++	msg_sync_reply->cookie_reply = cookie;
++
++	memset(&cmd, 0, sizeof(cmd));
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg_sync_reply;
++
++	ret = kdbus_cmd_send(conn_src->fd, &cmd);
++	ASSERT_RETURN(ret == -EOPNOTSUPP);
++
++	free(msg_sync_reply);
++
++	/*
++	 * Try to reply with a normal fd, this should fail even
++	 * if the response is a sync reply
++	 *
++	 * From the sender view we fail with -ECOMM
++	 */
++	msg_sync_reply = get_kdbus_msg_with_fd(conn_src,
++					       conn_dst->id,
++					       cookie, fds[0]);
++	ASSERT_RETURN(msg_sync_reply);
++
++	msg_sync_reply->cookie_reply = cookie;
++
++	memset(&cmd, 0, sizeof(cmd));
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg_sync_reply;
++
++	ret = kdbus_cmd_send(conn_src->fd, &cmd);
++	ASSERT_RETURN(ret == -ECOMM);
++
++	free(msg_sync_reply);
++
++	/*
++	 * Resend another normal message and check if the queue
++	 * is clear
++	 */
++	cookie++;
++	ret = kdbus_msg_send(conn_src, NULL, cookie, 0, 0, 0,
++			     conn_dst->id);
++	ASSERT_RETURN(ret == 0);
++
++	ret = waitpid(pid, &status, 0);
++	ASSERT_RETURN_VAL(ret >= 0, ret);
++
++	kdbus_conn_free(conn_dummy);
++	kdbus_conn_free(conn_dst);
++	kdbus_conn_free(conn_src);
++
++	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
++}
++
++static int kdbus_send_multiple_fds(struct kdbus_conn *conn_src,
++				   struct kdbus_conn *conn_dst)
++{
++	int ret, i;
++	unsigned int nfds;
++	int fds[KDBUS_CONN_MAX_FDS_PER_USER + 1];
++	int memfds[KDBUS_MSG_MAX_ITEMS + 1];
++	struct kdbus_msg *msg;
++	uint64_t dummy_value;
++
++	dummy_value = time(NULL);
++
++	for (i = 0; i < KDBUS_CONN_MAX_FDS_PER_USER + 1; i++) {
++		fds[i] = open("/dev/null", O_RDWR|O_CLOEXEC);
++		ASSERT_RETURN_VAL(fds[i] >= 0, -errno);
++	}
++
++	/* Send KDBUS_CONN_MAX_FDS_PER_USER with one more fd */
++	ret = send_fds(conn_src, conn_dst->id, fds,
++		       KDBUS_CONN_MAX_FDS_PER_USER + 1);
++	ASSERT_RETURN(ret == -EMFILE);
++
++	/* Retry with the correct KDBUS_CONN_MAX_FDS_PER_USER */
++	ret = send_fds(conn_src, conn_dst->id, fds,
++		       KDBUS_CONN_MAX_FDS_PER_USER);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* Check we got the right number of fds */
++	nfds = kdbus_item_get_nfds(msg);
++	ASSERT_RETURN(nfds == KDBUS_CONN_MAX_FDS_PER_USER);
++
++	kdbus_msg_free(msg);
++
++	for (i = 0; i < KDBUS_MSG_MAX_ITEMS + 1; i++, dummy_value++) {
++		memfds[i] = memfd_write("memfd-name",
++					&dummy_value,
++					sizeof(dummy_value));
++		ASSERT_RETURN_VAL(memfds[i] >= 0, memfds[i]);
++	}
++
++	/* Send KDBUS_MSG_MAX_ITEMS with one more memfd */
++	ret = send_memfds(conn_src, conn_dst->id,
++			  memfds, KDBUS_MSG_MAX_ITEMS + 1);
++	ASSERT_RETURN(ret == -E2BIG);
++
++	ret = send_memfds(conn_src, conn_dst->id,
++			  memfds, KDBUS_MSG_MAX_MEMFD_ITEMS + 1);
++	ASSERT_RETURN(ret == -E2BIG);
++
++	/* Retry with the correct KDBUS_MSG_MAX_ITEMS */
++	ret = send_memfds(conn_src, conn_dst->id,
++			  memfds, KDBUS_MSG_MAX_MEMFD_ITEMS);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* Check we got the right number of fds */
++	nfds = kdbus_item_get_nfds(msg);
++	ASSERT_RETURN(nfds == KDBUS_MSG_MAX_MEMFD_ITEMS);
++
++	kdbus_msg_free(msg);
++
++
++	/*
++	 * Combine multiple KDBUS_CONN_MAX_FDS_PER_USER+1 fds and
++	 * 10 memfds
++	 */
++	ret = send_fds_memfds(conn_src, conn_dst->id,
++			      fds, KDBUS_CONN_MAX_FDS_PER_USER + 1,
++			      memfds, 10);
++	ASSERT_RETURN(ret == -EMFILE);
++
++	ret = kdbus_msg_recv(conn_dst, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	/*
++	 * Combine multiple KDBUS_CONN_MAX_FDS_PER_USER fds and
++	 * (128 - 1) + 1 memfds, all fds take one item, while each
++	 * memfd takes one item
++	 */
++	ret = send_fds_memfds(conn_src, conn_dst->id,
++			      fds, KDBUS_CONN_MAX_FDS_PER_USER,
++			      memfds, (KDBUS_MSG_MAX_ITEMS - 1) + 1);
++	ASSERT_RETURN(ret == -E2BIG);
++
++	ret = send_fds_memfds(conn_src, conn_dst->id,
++			      fds, KDBUS_CONN_MAX_FDS_PER_USER,
++			      memfds, KDBUS_MSG_MAX_MEMFD_ITEMS + 1);
++	ASSERT_RETURN(ret == -E2BIG);
++
++	ret = kdbus_msg_recv(conn_dst, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	/*
++	 * Send KDBUS_CONN_MAX_FDS_PER_USER fds +
++	 * KDBUS_MSG_MAX_MEMFD_ITEMS memfds
++	 */
++	ret = send_fds_memfds(conn_src, conn_dst->id,
++			      fds, KDBUS_CONN_MAX_FDS_PER_USER,
++			      memfds, KDBUS_MSG_MAX_MEMFD_ITEMS);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* Check we got the right number of fds */
++	nfds = kdbus_item_get_nfds(msg);
++	ASSERT_RETURN(nfds == KDBUS_CONN_MAX_FDS_PER_USER +
++			      KDBUS_MSG_MAX_MEMFD_ITEMS);
++
++	kdbus_msg_free(msg);
++
++
++	/*
++	 * Re-send fds + memfds, close them, but do not receive them
++	 * and try to queue more
++	 */
++	ret = send_fds_memfds(conn_src, conn_dst->id,
++			      fds, KDBUS_CONN_MAX_FDS_PER_USER,
++			      memfds, KDBUS_MSG_MAX_MEMFD_ITEMS);
++	ASSERT_RETURN(ret == 0);
++
++	/* close old references and get a new ones */
++	for (i = 0; i < KDBUS_CONN_MAX_FDS_PER_USER + 1; i++) {
++		close(fds[i]);
++		fds[i] = open("/dev/null", O_RDWR|O_CLOEXEC);
++		ASSERT_RETURN_VAL(fds[i] >= 0, -errno);
++	}
++
++	/* should fail since we have already fds in the queue */
++	ret = send_fds(conn_src, conn_dst->id, fds,
++		       KDBUS_CONN_MAX_FDS_PER_USER);
++	ASSERT_RETURN(ret == -EMFILE);
++
++	/* This should succeed */
++	ret = send_memfds(conn_src, conn_dst->id,
++			  memfds, KDBUS_MSG_MAX_MEMFD_ITEMS);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	nfds = kdbus_item_get_nfds(msg);
++	ASSERT_RETURN(nfds == KDBUS_CONN_MAX_FDS_PER_USER +
++			      KDBUS_MSG_MAX_MEMFD_ITEMS);
++
++	kdbus_msg_free(msg);
++
++	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	nfds = kdbus_item_get_nfds(msg);
++	ASSERT_RETURN(nfds == KDBUS_MSG_MAX_MEMFD_ITEMS);
++
++	kdbus_msg_free(msg);
++
++	ret = kdbus_msg_recv(conn_dst, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	for (i = 0; i < KDBUS_CONN_MAX_FDS_PER_USER + 1; i++)
++		close(fds[i]);
++
++	for (i = 0; i < KDBUS_MSG_MAX_ITEMS + 1; i++)
++		close(memfds[i]);
++
++	return 0;
++}
++
++int kdbus_test_fd_passing(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn_src, *conn_dst;
++	const char *str = "stackenblocken";
++	const struct kdbus_item *item;
++	struct kdbus_msg *msg;
++	unsigned int i;
++	uint64_t now;
++	int fds_conn[2];
++	int sock_pair[2];
++	int fds[2];
++	int memfd;
++	int ret;
++
++	now = (uint64_t) time(NULL);
++
++	/* create two connections */
++	conn_src = kdbus_hello(env->buspath, 0, NULL, 0);
++	conn_dst = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn_src && conn_dst);
++
++	fds_conn[0] = conn_src->fd;
++	fds_conn[1] = conn_dst->fd;
++
++	ret = socketpair(AF_UNIX, SOCK_STREAM, 0, sock_pair);
++	ASSERT_RETURN(ret == 0);
++
++	/* Setup memfd */
++	memfd = memfd_write("memfd-name", &now, sizeof(now));
++	ASSERT_RETURN(memfd >= 0);
++
++	/* Setup pipes */
++	ret = pipe(fds);
++	ASSERT_RETURN(ret == 0);
++
++	i = write(fds[1], str, strlen(str));
++	ASSERT_RETURN(i == strlen(str));
++
++	/*
++	 * Try to ass the handle of a connection as message payload.
++	 * This must fail.
++	 */
++	ret = send_fds(conn_src, conn_dst->id, fds_conn, 2);
++	ASSERT_RETURN(ret == -ENOTSUP);
++
++	ret = send_fds(conn_dst, conn_src->id, fds_conn, 2);
++	ASSERT_RETURN(ret == -ENOTSUP);
++
++	ret = send_fds(conn_src, conn_dst->id, sock_pair, 2);
++	ASSERT_RETURN(ret == -ENOTSUP);
++
++	/*
++	 * Send fds and memfds to connection that do not accept fds
++	 */
++	ret = kdbus_test_no_fds(env, fds, (int *)&memfd);
++	ASSERT_RETURN(ret == 0);
++
++	/* Try to broadcast file descriptors. This must fail. */
++	ret = send_fds(conn_src, KDBUS_DST_ID_BROADCAST, fds, 1);
++	ASSERT_RETURN(ret == -ENOTUNIQ);
++
++	/* Try to broadcast memfd. This must succeed. */
++	ret = send_memfds(conn_src, KDBUS_DST_ID_BROADCAST, (int *)&memfd, 1);
++	ASSERT_RETURN(ret == 0);
++
++	/* Open code this loop */
++loop_send_fds:
++
++	/*
++	 * Send the read end of the pipe and close it.
++	 */
++	ret = send_fds(conn_src, conn_dst->id, fds, 1);
++	ASSERT_RETURN(ret == 0);
++	close(fds[0]);
++
++	ret = kdbus_msg_recv(conn_dst, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	KDBUS_ITEM_FOREACH(item, msg, items) {
++		if (item->type == KDBUS_ITEM_FDS) {
++			char tmp[14];
++			int nfds = (item->size - KDBUS_ITEM_HEADER_SIZE) /
++					sizeof(int);
++			ASSERT_RETURN(nfds == 1);
++
++			i = read(item->fds[0], tmp, sizeof(tmp));
++			if (i != 0) {
++				ASSERT_RETURN(i == sizeof(tmp));
++				ASSERT_RETURN(memcmp(tmp, str, sizeof(tmp)) == 0);
++
++				/* Write EOF */
++				close(fds[1]);
++
++				/*
++				 * Resend the read end of the pipe,
++				 * the receiver still holds a reference
++				 * to it...
++				 */
++				goto loop_send_fds;
++			}
++
++			/* Got EOF */
++
++			/*
++			 * Close the last reference to the read end
++			 * of the pipe, other references are
++			 * automatically closed just after send.
++			 */
++			close(item->fds[0]);
++		}
++	}
++
++	/*
++	 * Try to resend the read end of the pipe. Must fail with
++	 * -EBADF since both the sender and receiver closed their
++	 * references to it. We assume the above since sender and
++	 * receiver are on the same process.
++	 */
++	ret = send_fds(conn_src, conn_dst->id, fds, 1);
++	ASSERT_RETURN(ret == -EBADF);
++
++	/* Then we clear out received any data... */
++	kdbus_msg_free(msg);
++
++	ret = kdbus_send_multiple_fds(conn_src, conn_dst);
++	ASSERT_RETURN(ret == 0);
++
++	close(sock_pair[0]);
++	close(sock_pair[1]);
++	close(memfd);
++
++	kdbus_conn_free(conn_src);
++	kdbus_conn_free(conn_dst);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-free.c b/tools/testing/selftests/kdbus/test-free.c
+new file mode 100644
+index 0000000..f666da3
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-free.c
+@@ -0,0 +1,64 @@
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <stdbool.h>
++
++#include "kdbus-api.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++#include "kdbus-test.h"
++
++static int sample_ioctl_call(struct kdbus_test_env *env)
++{
++	int ret;
++	struct kdbus_cmd_list cmd_list = {
++		.flags = KDBUS_LIST_QUEUED,
++		.size = sizeof(cmd_list),
++	};
++
++	ret = kdbus_cmd_list(env->conn->fd, &cmd_list);
++	ASSERT_RETURN(ret == 0);
++
++	/* DON'T FREE THIS SLICE OF MEMORY! */
++
++	return TEST_OK;
++}
++
++int kdbus_test_free(struct kdbus_test_env *env)
++{
++	int ret;
++	struct kdbus_cmd_free cmd_free = {};
++
++	/* free an unallocated buffer */
++	cmd_free.size = sizeof(cmd_free);
++	cmd_free.flags = 0;
++	cmd_free.offset = 0;
++	ret = kdbus_cmd_free(env->conn->fd, &cmd_free);
++	ASSERT_RETURN(ret == -ENXIO);
++
++	/* free a buffer out of the pool's bounds */
++	cmd_free.size = sizeof(cmd_free);
++	cmd_free.offset = POOL_SIZE + 1;
++	ret = kdbus_cmd_free(env->conn->fd, &cmd_free);
++	ASSERT_RETURN(ret == -ENXIO);
++
++	/*
++	 * The user application is responsible for freeing the allocated
++	 * memory with the KDBUS_CMD_FREE ioctl, so let's test what happens
++	 * if we forget about it.
++	 */
++
++	ret = sample_ioctl_call(env);
++	ASSERT_RETURN(ret == 0);
++
++	ret = sample_ioctl_call(env);
++	ASSERT_RETURN(ret == 0);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-match.c b/tools/testing/selftests/kdbus/test-match.c
+new file mode 100644
+index 0000000..2360dc1
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-match.c
+@@ -0,0 +1,441 @@
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <stdbool.h>
++
++#include "kdbus-api.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++#include "kdbus-test.h"
++
++int kdbus_test_match_id_add(struct kdbus_test_env *env)
++{
++	struct {
++		struct kdbus_cmd_match cmd;
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_notify_id_change chg;
++		} item;
++	} buf;
++	struct kdbus_conn *conn;
++	struct kdbus_msg *msg;
++	int ret;
++
++	memset(&buf, 0, sizeof(buf));
++
++	buf.cmd.size = sizeof(buf);
++	buf.cmd.cookie = 0xdeafbeefdeaddead;
++	buf.item.size = sizeof(buf.item);
++	buf.item.type = KDBUS_ITEM_ID_ADD;
++	buf.item.chg.id = KDBUS_MATCH_ID_ANY;
++
++	/* match on id add */
++	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
++	ASSERT_RETURN(ret == 0);
++
++	/* create 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++
++	/* 1st connection should have received a notification */
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_ID_ADD);
++	ASSERT_RETURN(msg->items[0].id_change.id == conn->id);
++
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
++
++int kdbus_test_match_id_remove(struct kdbus_test_env *env)
++{
++	struct {
++		struct kdbus_cmd_match cmd;
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_notify_id_change chg;
++		} item;
++	} buf;
++	struct kdbus_conn *conn;
++	struct kdbus_msg *msg;
++	size_t id;
++	int ret;
++
++	/* create 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++	id = conn->id;
++
++	memset(&buf, 0, sizeof(buf));
++	buf.cmd.size = sizeof(buf);
++	buf.cmd.cookie = 0xdeafbeefdeaddead;
++	buf.item.size = sizeof(buf.item);
++	buf.item.type = KDBUS_ITEM_ID_REMOVE;
++	buf.item.chg.id = id;
++
++	/* register match on 2nd connection */
++	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
++	ASSERT_RETURN(ret == 0);
++
++	/* remove 2nd connection again */
++	kdbus_conn_free(conn);
++
++	/* 1st connection should have received a notification */
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_ID_REMOVE);
++	ASSERT_RETURN(msg->items[0].id_change.id == id);
++
++	return TEST_OK;
++}
++
++int kdbus_test_match_replace(struct kdbus_test_env *env)
++{
++	struct {
++		struct kdbus_cmd_match cmd;
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_notify_id_change chg;
++		} item;
++	} buf;
++	struct kdbus_conn *conn;
++	struct kdbus_msg *msg;
++	size_t id;
++	int ret;
++
++	/* add a match to id_add */
++	ASSERT_RETURN(kdbus_test_match_id_add(env) == TEST_OK);
++
++	/* do a replace of the match from id_add to id_remove */
++	memset(&buf, 0, sizeof(buf));
++
++	buf.cmd.size = sizeof(buf);
++	buf.cmd.cookie = 0xdeafbeefdeaddead;
++	buf.cmd.flags = KDBUS_MATCH_REPLACE;
++	buf.item.size = sizeof(buf.item);
++	buf.item.type = KDBUS_ITEM_ID_REMOVE;
++	buf.item.chg.id = KDBUS_MATCH_ID_ANY;
++
++	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
++
++	/* create 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++	id = conn->id;
++
++	/* 1st connection should _not_ have received a notification */
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret != 0);
++
++	/* remove 2nd connection */
++	kdbus_conn_free(conn);
++
++	/* 1st connection should _now_ have received a notification */
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_ID_REMOVE);
++	ASSERT_RETURN(msg->items[0].id_change.id == id);
++
++	return TEST_OK;
++}
++
++int kdbus_test_match_name_add(struct kdbus_test_env *env)
++{
++	struct {
++		struct kdbus_cmd_match cmd;
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_notify_name_change chg;
++		} item;
++		char name[64];
++	} buf;
++	struct kdbus_msg *msg;
++	char *name;
++	int ret;
++
++	name = "foo.bla.blaz";
++
++	/* install the match rule */
++	memset(&buf, 0, sizeof(buf));
++	buf.item.type = KDBUS_ITEM_NAME_ADD;
++	buf.item.chg.old_id.id = KDBUS_MATCH_ID_ANY;
++	buf.item.chg.new_id.id = KDBUS_MATCH_ID_ANY;
++	strncpy(buf.name, name, sizeof(buf.name) - 1);
++	buf.item.size = sizeof(buf.item) + strlen(buf.name) + 1;
++	buf.cmd.size = sizeof(buf.cmd) + buf.item.size;
++
++	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
++	ASSERT_RETURN(ret == 0);
++
++	/* acquire the name */
++	ret = kdbus_name_acquire(env->conn, name, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* we should have received a notification */
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_NAME_ADD);
++	ASSERT_RETURN(msg->items[0].name_change.old_id.id == 0);
++	ASSERT_RETURN(msg->items[0].name_change.new_id.id == env->conn->id);
++	ASSERT_RETURN(strcmp(msg->items[0].name_change.name, name) == 0);
++
++	return TEST_OK;
++}
++
++int kdbus_test_match_name_remove(struct kdbus_test_env *env)
++{
++	struct {
++		struct kdbus_cmd_match cmd;
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_notify_name_change chg;
++		} item;
++		char name[64];
++	} buf;
++	struct kdbus_msg *msg;
++	char *name;
++	int ret;
++
++	name = "foo.bla.blaz";
++
++	/* acquire the name */
++	ret = kdbus_name_acquire(env->conn, name, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* install the match rule */
++	memset(&buf, 0, sizeof(buf));
++	buf.item.type = KDBUS_ITEM_NAME_REMOVE;
++	buf.item.chg.old_id.id = KDBUS_MATCH_ID_ANY;
++	buf.item.chg.new_id.id = KDBUS_MATCH_ID_ANY;
++	strncpy(buf.name, name, sizeof(buf.name) - 1);
++	buf.item.size = sizeof(buf.item) + strlen(buf.name) + 1;
++	buf.cmd.size = sizeof(buf.cmd) + buf.item.size;
++
++	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
++	ASSERT_RETURN(ret == 0);
++
++	/* release the name again */
++	kdbus_name_release(env->conn, name);
++	ASSERT_RETURN(ret == 0);
++
++	/* we should have received a notification */
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_NAME_REMOVE);
++	ASSERT_RETURN(msg->items[0].name_change.old_id.id == env->conn->id);
++	ASSERT_RETURN(msg->items[0].name_change.new_id.id == 0);
++	ASSERT_RETURN(strcmp(msg->items[0].name_change.name, name) == 0);
++
++	return TEST_OK;
++}
++
++int kdbus_test_match_name_change(struct kdbus_test_env *env)
++{
++	struct {
++		struct kdbus_cmd_match cmd;
++		struct {
++			uint64_t size;
++			uint64_t type;
++			struct kdbus_notify_name_change chg;
++		} item;
++		char name[64];
++	} buf;
++	struct kdbus_conn *conn;
++	struct kdbus_msg *msg;
++	uint64_t flags;
++	char *name = "foo.bla.baz";
++	int ret;
++
++	/* acquire the name */
++	ret = kdbus_name_acquire(env->conn, name, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* install the match rule */
++	memset(&buf, 0, sizeof(buf));
++	buf.item.type = KDBUS_ITEM_NAME_CHANGE;
++	buf.item.chg.old_id.id = KDBUS_MATCH_ID_ANY;
++	buf.item.chg.new_id.id = KDBUS_MATCH_ID_ANY;
++	strncpy(buf.name, name, sizeof(buf.name) - 1);
++	buf.item.size = sizeof(buf.item) + strlen(buf.name) + 1;
++	buf.cmd.size = sizeof(buf.cmd) + buf.item.size;
++
++	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
++	ASSERT_RETURN(ret == 0);
++
++	/* create a 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++
++	/* allow the new connection to own the same name */
++	/* queue the 2nd connection as waiting owner */
++	flags = KDBUS_NAME_QUEUE;
++	ret = kdbus_name_acquire(conn, name, &flags);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(flags & KDBUS_NAME_IN_QUEUE);
++
++	/* release name from 1st connection */
++	ret = kdbus_name_release(env->conn, name);
++	ASSERT_RETURN(ret == 0);
++
++	/* we should have received a notification */
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ASSERT_RETURN(msg->items[0].type == KDBUS_ITEM_NAME_CHANGE);
++	ASSERT_RETURN(msg->items[0].name_change.old_id.id == env->conn->id);
++	ASSERT_RETURN(msg->items[0].name_change.new_id.id == conn->id);
++	ASSERT_RETURN(strcmp(msg->items[0].name_change.name, name) == 0);
++
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
++
++static int send_bloom_filter(const struct kdbus_conn *conn,
++			     uint64_t cookie,
++			     const uint8_t *filter,
++			     size_t filter_size,
++			     uint64_t filter_generation)
++{
++	struct kdbus_cmd_send cmd = {};
++	struct kdbus_msg *msg;
++	struct kdbus_item *item;
++	uint64_t size;
++	int ret;
++
++	size = sizeof(struct kdbus_msg);
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) + filter_size;
++
++	msg = alloca(size);
++
++	memset(msg, 0, size);
++	msg->size = size;
++	msg->src_id = conn->id;
++	msg->dst_id = KDBUS_DST_ID_BROADCAST;
++	msg->flags = KDBUS_MSG_SIGNAL;
++	msg->payload_type = KDBUS_PAYLOAD_DBUS;
++	msg->cookie = cookie;
++
++	item = msg->items;
++	item->type = KDBUS_ITEM_BLOOM_FILTER;
++	item->size = KDBUS_ITEM_SIZE(sizeof(struct kdbus_bloom_filter)) +
++				filter_size;
++
++	item->bloom_filter.generation = filter_generation;
++	memcpy(item->bloom_filter.data, filter, filter_size);
++
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg;
++
++	ret = kdbus_cmd_send(conn->fd, &cmd);
++	if (ret < 0) {
++		kdbus_printf("error sending message: %d (%m)\n", ret);
++		return ret;
++	}
++
++	return 0;
++}
++
++int kdbus_test_match_bloom(struct kdbus_test_env *env)
++{
++	struct {
++		struct kdbus_cmd_match cmd;
++		struct {
++			uint64_t size;
++			uint64_t type;
++			uint8_t data_gen0[64];
++			uint8_t data_gen1[64];
++		} item;
++	} buf;
++	struct kdbus_conn *conn;
++	struct kdbus_msg *msg;
++	uint64_t cookie = 0xf000f00f;
++	uint8_t filter[64];
++	int ret;
++
++	/* install the match rule */
++	memset(&buf, 0, sizeof(buf));
++	buf.cmd.size = sizeof(buf);
++
++	buf.item.size = sizeof(buf.item);
++	buf.item.type = KDBUS_ITEM_BLOOM_MASK;
++	buf.item.data_gen0[0] = 0x55;
++	buf.item.data_gen0[63] = 0x80;
++
++	buf.item.data_gen1[1] = 0xaa;
++	buf.item.data_gen1[9] = 0x02;
++
++	ret = kdbus_cmd_match_add(env->conn->fd, &buf.cmd);
++	ASSERT_RETURN(ret == 0);
++
++	/* create a 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++
++	/* a message with a 0'ed out filter must not reach the other peer */
++	memset(filter, 0, sizeof(filter));
++	ret = send_bloom_filter(conn, ++cookie, filter, sizeof(filter), 0);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	/* now set the filter to the connection's mask and expect success */
++	filter[0] = 0x55;
++	filter[63] = 0x80;
++	ret = send_bloom_filter(conn, ++cookie, filter, sizeof(filter), 0);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == cookie);
++
++	/* broaden the filter and try again. this should also succeed. */
++	filter[0] = 0xff;
++	filter[8] = 0xff;
++	filter[63] = 0xff;
++	ret = send_bloom_filter(conn, ++cookie, filter, sizeof(filter), 0);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == cookie);
++
++	/* the same filter must not match against bloom generation 1 */
++	ret = send_bloom_filter(conn, ++cookie, filter, sizeof(filter), 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	/* set a different filter and try again */
++	filter[1] = 0xaa;
++	filter[9] = 0x02;
++	ret = send_bloom_filter(conn, ++cookie, filter, sizeof(filter), 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(env->conn, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == cookie);
++
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-message.c b/tools/testing/selftests/kdbus/test-message.c
+new file mode 100644
+index 0000000..f1615da
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-message.c
+@@ -0,0 +1,731 @@
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <time.h>
++#include <stdbool.h>
++#include <sys/eventfd.h>
++#include <sys/types.h>
++#include <sys/wait.h>
++
++#include "kdbus-api.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++#include "kdbus-test.h"
++
++/* maximum number of queued messages from the same individual user */
++#define KDBUS_CONN_MAX_MSGS			256
++
++/* maximum number of queued requests waiting for a reply */
++#define KDBUS_CONN_MAX_REQUESTS_PENDING		128
++
++/* maximum message payload size */
++#define KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE		(2 * 1024UL * 1024UL)
++
++int kdbus_test_message_basic(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn;
++	struct kdbus_conn *sender;
++	struct kdbus_msg *msg;
++	uint64_t cookie = 0x1234abcd5678eeff;
++	uint64_t offset;
++	int ret;
++
++	sender = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(sender != NULL);
++
++	/* create a 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++
++	ret = kdbus_add_match_empty(conn);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_add_match_empty(sender);
++	ASSERT_RETURN(ret == 0);
++
++	/* send over 1st connection */
++	ret = kdbus_msg_send(sender, NULL, cookie, 0, 0, 0,
++			     KDBUS_DST_ID_BROADCAST);
++	ASSERT_RETURN(ret == 0);
++
++	/* Make sure that we do not get our own broadcasts */
++	ret = kdbus_msg_recv(sender, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	/* ... and receive on the 2nd */
++	ret = kdbus_msg_recv_poll(conn, 100, &msg, &offset);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == cookie);
++
++	kdbus_msg_free(msg);
++
++	/* Msgs that expect a reply must have timeout and cookie */
++	ret = kdbus_msg_send(sender, NULL, 0, KDBUS_MSG_EXPECT_REPLY,
++			     0, 0, conn->id);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	/* Faked replies with a valid reply cookie are rejected */
++	ret = kdbus_msg_send_reply(conn, time(NULL) ^ cookie, sender->id);
++	ASSERT_RETURN(ret == -EPERM);
++
++	ret = kdbus_free(conn, offset);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(sender);
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
++
++static int msg_recv_prio(struct kdbus_conn *conn,
++			 int64_t requested_prio,
++			 int64_t expected_prio)
++{
++	struct kdbus_cmd_recv recv = {
++		.size = sizeof(recv),
++		.flags = KDBUS_RECV_USE_PRIORITY,
++		.priority = requested_prio,
++	};
++	struct kdbus_msg *msg;
++	int ret;
++
++	ret = kdbus_cmd_recv(conn->fd, &recv);
++	if (ret < 0) {
++		kdbus_printf("error receiving message: %d (%m)\n", -errno);
++		return ret;
++	}
++
++	msg = (struct kdbus_msg *)(conn->buf + recv.msg.offset);
++	kdbus_msg_dump(conn, msg);
++
++	if (msg->priority != expected_prio) {
++		kdbus_printf("expected message prio %lld, got %lld\n",
++			     (unsigned long long) expected_prio,
++			     (unsigned long long) msg->priority);
++		return -EINVAL;
++	}
++
++	kdbus_msg_free(msg);
++	ret = kdbus_free(conn, recv.msg.offset);
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
++int kdbus_test_message_prio(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *a, *b;
++	uint64_t cookie = 0;
++
++	a = kdbus_hello(env->buspath, 0, NULL, 0);
++	b = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(a && b);
++
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,   25, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0, -600, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,   10, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,  -35, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0, -100, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,   20, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,  -15, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0, -800, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0, -150, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,   10, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0, -800, a->id) == 0);
++	ASSERT_RETURN(kdbus_msg_send(b, NULL, ++cookie, 0, 0,  -10, a->id) == 0);
++
++	ASSERT_RETURN(msg_recv_prio(a, -200, -800) == 0);
++	ASSERT_RETURN(msg_recv_prio(a, -100, -800) == 0);
++	ASSERT_RETURN(msg_recv_prio(a, -400, -600) == 0);
++	ASSERT_RETURN(msg_recv_prio(a, -400, -600) == -EAGAIN);
++	ASSERT_RETURN(msg_recv_prio(a, 10, -150) == 0);
++	ASSERT_RETURN(msg_recv_prio(a, 10, -100) == 0);
++
++	kdbus_printf("--- get priority (all)\n");
++	ASSERT_RETURN(kdbus_msg_recv(a, NULL, NULL) == 0);
++
++	kdbus_conn_free(a);
++	kdbus_conn_free(b);
++
++	return TEST_OK;
++}
++
++static int kdbus_test_notify_kernel_quota(struct kdbus_test_env *env)
++{
++	int ret;
++	unsigned int i;
++	struct kdbus_conn *conn;
++	struct kdbus_conn *reader;
++	struct kdbus_msg *msg = NULL;
++	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
++
++	reader = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(reader);
++
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	/* Register for ID signals */
++	ret = kdbus_add_match_id(reader, 0x1, KDBUS_ITEM_ID_ADD,
++				 KDBUS_MATCH_ID_ANY);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_add_match_id(reader, 0x2, KDBUS_ITEM_ID_REMOVE,
++				 KDBUS_MATCH_ID_ANY);
++	ASSERT_RETURN(ret == 0);
++
++	/* Each iteration two notifications: add and remove ID */
++	for (i = 0; i < KDBUS_CONN_MAX_MSGS / 2; i++) {
++		struct kdbus_conn *notifier;
++
++		notifier = kdbus_hello(env->buspath, 0, NULL, 0);
++		ASSERT_RETURN(notifier);
++
++		kdbus_conn_free(notifier);
++	}
++
++	/*
++	 * Now the reader queue is full with kernel notfications,
++	 * but as a user we still have room to push our messages.
++	 */
++	ret = kdbus_msg_send(conn, NULL, 0xdeadbeef, 0, 0, 0, reader->id);
++	ASSERT_RETURN(ret == 0);
++
++	/* More ID kernel notifications that will be lost */
++	kdbus_conn_free(conn);
++
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	kdbus_conn_free(conn);
++
++	/*
++	 * We lost only 3 packets since only signal msgs are
++	 * accounted. The connection ID add/remove notification
++	 */
++	ret = kdbus_cmd_recv(reader->fd, &recv);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(recv.return_flags & KDBUS_RECV_RETURN_DROPPED_MSGS);
++	ASSERT_RETURN(recv.dropped_msgs == 3);
++
++	msg = (struct kdbus_msg *)(reader->buf + recv.msg.offset);
++	kdbus_msg_free(msg);
++
++	/* Read our queue */
++	for (i = 0; i < KDBUS_CONN_MAX_MSGS - 1; i++) {
++		memset(&recv, 0, sizeof(recv));
++		recv.size = sizeof(recv);
++
++		ret = kdbus_cmd_recv(reader->fd, &recv);
++		ASSERT_RETURN(ret == 0);
++		ASSERT_RETURN(!(recv.return_flags &
++			        KDBUS_RECV_RETURN_DROPPED_MSGS));
++
++		msg = (struct kdbus_msg *)(reader->buf + recv.msg.offset);
++		kdbus_msg_free(msg);
++	}
++
++	ret = kdbus_msg_recv(reader, NULL, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(reader, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	kdbus_conn_free(reader);
++
++	return 0;
++}
++
++/* Return the number of message successfully sent */
++static int kdbus_fill_conn_queue(struct kdbus_conn *conn_src,
++				 uint64_t dst_id,
++				 unsigned int max_msgs)
++{
++	unsigned int i;
++	uint64_t cookie = 0;
++	size_t size;
++	struct kdbus_cmd_send cmd = {};
++	struct kdbus_msg *msg;
++	int ret;
++
++	size = sizeof(struct kdbus_msg);
++	msg = malloc(size);
++	ASSERT_RETURN_VAL(msg, -ENOMEM);
++
++	memset(msg, 0, size);
++	msg->size = size;
++	msg->src_id = conn_src->id;
++	msg->dst_id = dst_id;
++	msg->payload_type = KDBUS_PAYLOAD_DBUS;
++
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg;
++
++	for (i = 0; i < max_msgs; i++) {
++		msg->cookie = cookie++;
++		ret = kdbus_cmd_send(conn_src->fd, &cmd);
++		if (ret < 0)
++			break;
++	}
++
++	free(msg);
++
++	return i;
++}
++
++static int kdbus_test_activator_quota(struct kdbus_test_env *env)
++{
++	int ret;
++	unsigned int i;
++	unsigned int activator_msgs_count = 0;
++	uint64_t cookie = time(NULL);
++	struct kdbus_conn *conn;
++	struct kdbus_conn *sender;
++	struct kdbus_conn *activator;
++	struct kdbus_msg *msg;
++	uint64_t flags = KDBUS_NAME_REPLACE_EXISTING;
++	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
++	struct kdbus_policy_access access = {
++		.type = KDBUS_POLICY_ACCESS_USER,
++		.id = geteuid(),
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	activator = kdbus_hello_activator(env->buspath, "foo.test.activator",
++					  &access, 1);
++	ASSERT_RETURN(activator);
++
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	sender = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn || sender);
++
++	ret = kdbus_list(sender, KDBUS_LIST_NAMES |
++				 KDBUS_LIST_UNIQUE |
++				 KDBUS_LIST_ACTIVATORS |
++				 KDBUS_LIST_QUEUED);
++	ASSERT_RETURN(ret == 0);
++
++	for (i = 0; i < KDBUS_CONN_MAX_MSGS; i++) {
++		ret = kdbus_msg_send(sender, "foo.test.activator",
++				     cookie++, 0, 0, 0,
++				     KDBUS_DST_ID_NAME);
++		if (ret < 0)
++			break;
++		activator_msgs_count++;
++	}
++
++	/* we must have at least sent one message */
++	ASSERT_RETURN_VAL(i > 0, -errno);
++	ASSERT_RETURN(ret == -ENOBUFS);
++
++	/* Good, activator queue is full now */
++
++	/* ENXIO on direct send (activators can never be addressed by ID) */
++	ret = kdbus_msg_send(conn, NULL, cookie++, 0, 0, 0, activator->id);
++	ASSERT_RETURN(ret == -ENXIO);
++
++	/* can't queue more */
++	ret = kdbus_msg_send(conn, "foo.test.activator", cookie++,
++			     0, 0, 0, KDBUS_DST_ID_NAME);
++	ASSERT_RETURN(ret == -ENOBUFS);
++
++	/* no match installed, so the broadcast will not inc dropped_msgs */
++	ret = kdbus_msg_send(sender, NULL, cookie++, 0, 0, 0,
++			     KDBUS_DST_ID_BROADCAST);
++	ASSERT_RETURN(ret == 0);
++
++	/* Check activator queue */
++	ret = kdbus_cmd_recv(activator->fd, &recv);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(recv.dropped_msgs == 0);
++
++	activator_msgs_count--;
++
++	msg = (struct kdbus_msg *)(activator->buf + recv.msg.offset);
++	kdbus_msg_free(msg);
++
++
++	/* Stage 1) of test check the pool memory quota */
++
++	/* Consume the connection pool memory */
++	for (i = 0; i < KDBUS_CONN_MAX_MSGS; i++) {
++		ret = kdbus_msg_send(sender, NULL,
++				     cookie++, 0, 0, 0, conn->id);
++		if (ret < 0)
++			break;
++	}
++
++	/* consume one message, so later at least one can be moved */
++	memset(&recv, 0, sizeof(recv));
++	recv.size = sizeof(recv);
++	ret = kdbus_cmd_recv(conn->fd, &recv);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(recv.dropped_msgs == 0);
++	msg = (struct kdbus_msg *)(conn->buf + recv.msg.offset);
++	kdbus_msg_free(msg);
++
++	/* Try to acquire the name now */
++	ret = kdbus_name_acquire(conn, "foo.test.activator", &flags);
++	ASSERT_RETURN(ret == 0);
++
++	/* try to read messages and see if we have lost some */
++	memset(&recv, 0, sizeof(recv));
++	recv.size = sizeof(recv);
++	ret = kdbus_cmd_recv(conn->fd, &recv);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(recv.dropped_msgs != 0);
++
++	/* number of dropped msgs < received ones (at least one was moved) */
++	ASSERT_RETURN(recv.dropped_msgs < activator_msgs_count);
++
++	/* Deduct the number of dropped msgs from the activator msgs */
++	activator_msgs_count -= recv.dropped_msgs;
++
++	msg = (struct kdbus_msg *)(activator->buf + recv.msg.offset);
++	kdbus_msg_free(msg);
++
++	/*
++	 * Release the name and hand it back to activator, now
++	 * we should have 'activator_msgs_count' msgs again in
++	 * the activator queue
++	 */
++	ret = kdbus_name_release(conn, "foo.test.activator");
++	ASSERT_RETURN(ret == 0);
++
++	/* make sure that we got our previous activator msgs */
++	ret = kdbus_msg_recv(activator, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->src_id == sender->id);
++
++	activator_msgs_count--;
++
++	kdbus_msg_free(msg);
++
++
++	/* Stage 2) of test check max message quota */
++
++	/* Empty conn queue */
++	for (i = 0; i < KDBUS_CONN_MAX_MSGS; i++) {
++		ret = kdbus_msg_recv(conn, NULL, NULL);
++		if (ret == -EAGAIN)
++			break;
++	}
++
++	/* fill queue with max msgs quota */
++	ret = kdbus_fill_conn_queue(sender, conn->id, KDBUS_CONN_MAX_MSGS);
++	ASSERT_RETURN(ret == KDBUS_CONN_MAX_MSGS);
++
++	/* This one is lost but it is not accounted */
++	ret = kdbus_msg_send(sender, NULL,
++			     cookie++, 0, 0, 0, conn->id);
++	ASSERT_RETURN(ret == -ENOBUFS);
++
++	/* Acquire the name again */
++	ret = kdbus_name_acquire(conn, "foo.test.activator", &flags);
++	ASSERT_RETURN(ret == 0);
++
++	memset(&recv, 0, sizeof(recv));
++	recv.size = sizeof(recv);
++
++	/*
++	 * Try to read messages and make sure that we have lost all
++	 * the activator messages due to quota checks. Our queue is
++	 * already full.
++	 */
++	ret = kdbus_cmd_recv(conn->fd, &recv);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(recv.dropped_msgs == activator_msgs_count);
++
++	msg = (struct kdbus_msg *)(activator->buf + recv.msg.offset);
++	kdbus_msg_free(msg);
++
++	kdbus_conn_free(sender);
++	kdbus_conn_free(conn);
++	kdbus_conn_free(activator);
++
++	return 0;
++}
++
++static int kdbus_test_expected_reply_quota(struct kdbus_test_env *env)
++{
++	int ret;
++	unsigned int i, n;
++	unsigned int count;
++	uint64_t cookie = 0x1234abcd5678eeff;
++	struct kdbus_conn *conn;
++	struct kdbus_conn *connections[9];
++
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	for (i = 0; i < 9; i++) {
++		connections[i] = kdbus_hello(env->buspath, 0, NULL, 0);
++		ASSERT_RETURN(connections[i]);
++	}
++
++	count = 0;
++	/* Send 16 messages to 8 different connections */
++	for (i = 0; i < 8; i++) {
++		for (n = 0; n < 16; n++) {
++			ret = kdbus_msg_send(conn, NULL, cookie++,
++					     KDBUS_MSG_EXPECT_REPLY,
++					     100000000ULL, 0,
++					     connections[i]->id);
++			if (ret < 0)
++				break;
++
++			count++;
++		}
++	}
++
++	/*
++	 * We should have queued at least
++	 * KDBUS_CONN_MAX_REQUESTS_PENDING method call
++	 */
++	ASSERT_RETURN(count == KDBUS_CONN_MAX_REQUESTS_PENDING);
++
++	/*
++	 * Now try to send a message to the last connection,
++	 * if we have reached KDBUS_CONN_MAX_REQUESTS_PENDING
++	 * no further requests are allowed
++	 */
++	ret = kdbus_msg_send(conn, NULL, cookie++, KDBUS_MSG_EXPECT_REPLY,
++			     1000000000ULL, 0, connections[8]->id);
++	ASSERT_RETURN(ret == -EMLINK);
++
++	for (i = 0; i < 9; i++)
++		kdbus_conn_free(connections[i]);
++
++	kdbus_conn_free(conn);
++
++	return 0;
++}
++
++int kdbus_test_pool_quota(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *a, *b, *c;
++	struct kdbus_cmd_send cmd = {};
++	struct kdbus_item *item;
++	struct kdbus_msg *recv_msg;
++	struct kdbus_msg *msg;
++	uint64_t cookie = time(NULL);
++	uint64_t size;
++	unsigned int i;
++	char *payload;
++	int ret;
++
++	/* just a guard */
++	if (POOL_SIZE <= KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE ||
++	    POOL_SIZE % KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE != 0)
++		return 0;
++
++	payload = calloc(KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE, sizeof(char));
++	ASSERT_RETURN_VAL(payload, -ENOMEM);
++
++	a = kdbus_hello(env->buspath, 0, NULL, 0);
++	b = kdbus_hello(env->buspath, 0, NULL, 0);
++	c = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(a && b && c);
++
++	size = sizeof(struct kdbus_msg);
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++
++	msg = malloc(size);
++	ASSERT_RETURN_VAL(msg, -ENOMEM);
++
++	memset(msg, 0, size);
++	msg->size = size;
++	msg->src_id = a->id;
++	msg->dst_id = c->id;
++	msg->payload_type = KDBUS_PAYLOAD_DBUS;
++
++	item = msg->items;
++	item->type = KDBUS_ITEM_PAYLOAD_VEC;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
++	item->vec.address = (uintptr_t)payload;
++	item->vec.size = KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE;
++	item = KDBUS_ITEM_NEXT(item);
++
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg;
++
++	/*
++	 * Send 2097248 bytes, a user is only allowed to get 33% of half of
++	 * the free space of the pool, the already used space is
++	 * accounted as free space
++	 */
++	size += KDBUS_MSG_MAX_PAYLOAD_VEC_SIZE;
++	for (i = size; i < (POOL_SIZE / 2 / 3); i += size) {
++		msg->cookie = cookie++;
++
++		ret = kdbus_cmd_send(a->fd, &cmd);
++		ASSERT_RETURN_VAL(ret == 0, ret);
++	}
++
++	/* Try to get more than 33% */
++	msg->cookie = cookie++;
++	ret = kdbus_cmd_send(a->fd, &cmd);
++	ASSERT_RETURN(ret == -ENOBUFS);
++
++	/* We still can pass small messages */
++	ret = kdbus_msg_send(b, NULL, cookie++, 0, 0, 0, c->id);
++	ASSERT_RETURN(ret == 0);
++
++	for (i = size; i < (POOL_SIZE / 2 / 3); i += size) {
++		ret = kdbus_msg_recv(c, &recv_msg, NULL);
++		ASSERT_RETURN(ret == 0);
++		ASSERT_RETURN(recv_msg->src_id == a->id);
++
++		kdbus_msg_free(recv_msg);
++	}
++
++	ret = kdbus_msg_recv(c, &recv_msg, NULL);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(recv_msg->src_id == b->id);
++
++	kdbus_msg_free(recv_msg);
++
++	ret = kdbus_msg_recv(c, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	free(msg);
++	free(payload);
++
++	kdbus_conn_free(c);
++	kdbus_conn_free(b);
++	kdbus_conn_free(a);
++
++	return 0;
++}
++
++int kdbus_test_message_quota(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *a, *b;
++	uint64_t cookie = 0;
++	int ret;
++	int i;
++
++	ret = kdbus_test_activator_quota(env);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_test_notify_kernel_quota(env);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_test_pool_quota(env);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_test_expected_reply_quota(env);
++	ASSERT_RETURN(ret == 0);
++
++	a = kdbus_hello(env->buspath, 0, NULL, 0);
++	b = kdbus_hello(env->buspath, 0, NULL, 0);
++
++	ret = kdbus_fill_conn_queue(b, a->id, KDBUS_CONN_MAX_MSGS);
++	ASSERT_RETURN(ret == KDBUS_CONN_MAX_MSGS);
++
++	ret = kdbus_msg_send(b, NULL, ++cookie, 0, 0, 0, a->id);
++	ASSERT_RETURN(ret == -ENOBUFS);
++
++	for (i = 0; i < KDBUS_CONN_MAX_MSGS; ++i) {
++		ret = kdbus_msg_recv(a, NULL, NULL);
++		ASSERT_RETURN(ret == 0);
++	}
++
++	ret = kdbus_msg_recv(a, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	ret = kdbus_fill_conn_queue(b, a->id, KDBUS_CONN_MAX_MSGS + 1);
++	ASSERT_RETURN(ret == KDBUS_CONN_MAX_MSGS);
++
++	ret = kdbus_msg_send(b, NULL, ++cookie, 0, 0, 0, a->id);
++	ASSERT_RETURN(ret == -ENOBUFS);
++
++	kdbus_conn_free(a);
++	kdbus_conn_free(b);
++
++	return TEST_OK;
++}
++
++int kdbus_test_memory_access(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *a, *b;
++	struct kdbus_cmd_send cmd = {};
++	struct kdbus_item *item;
++	struct kdbus_msg *msg;
++	uint64_t test_addr = 0;
++	char line[256];
++	uint64_t size;
++	FILE *f;
++	int ret;
++
++	/*
++	 * Search in /proc/kallsyms for the address of a kernel symbol that
++	 * should always be there, regardless of the config. Use that address
++	 * in a PAYLOAD_VEC item and make sure it's inaccessible.
++	 */
++
++	f = fopen("/proc/kallsyms", "r");
++	if (!f)
++		return TEST_SKIP;
++
++	while (fgets(line, sizeof(line), f)) {
++		char *s = line;
++
++		if (!strsep(&s, " "))
++			continue;
++
++		if (!strsep(&s, " "))
++			continue;
++
++		if (!strncmp(s, "mutex_lock", 10)) {
++			test_addr = strtoull(line, NULL, 16);
++			break;
++		}
++	}
++
++	fclose(f);
++
++	if (!test_addr)
++		return TEST_SKIP;
++
++	a = kdbus_hello(env->buspath, 0, NULL, 0);
++	b = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(a && b);
++
++	size = sizeof(struct kdbus_msg);
++	size += KDBUS_ITEM_SIZE(sizeof(struct kdbus_vec));
++
++	msg = alloca(size);
++	ASSERT_RETURN_VAL(msg, -ENOMEM);
++
++	memset(msg, 0, size);
++	msg->size = size;
++	msg->src_id = a->id;
++	msg->dst_id = b->id;
++	msg->payload_type = KDBUS_PAYLOAD_DBUS;
++
++	item = msg->items;
++	item->type = KDBUS_ITEM_PAYLOAD_VEC;
++	item->size = KDBUS_ITEM_HEADER_SIZE + sizeof(struct kdbus_vec);
++	item->vec.address = test_addr;
++	item->vec.size = sizeof(void*);
++	item = KDBUS_ITEM_NEXT(item);
++
++	cmd.size = sizeof(cmd);
++	cmd.msg_address = (uintptr_t)msg;
++
++	ret = kdbus_cmd_send(a->fd, &cmd);
++	ASSERT_RETURN(ret == -EFAULT);
++
++	kdbus_conn_free(b);
++	kdbus_conn_free(a);
++
++	return 0;
++}
+diff --git a/tools/testing/selftests/kdbus/test-metadata-ns.c b/tools/testing/selftests/kdbus/test-metadata-ns.c
+new file mode 100644
+index 0000000..2cb1d4d
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-metadata-ns.c
+@@ -0,0 +1,506 @@
++/*
++ * Test metadata in new namespaces. Even if our tests can run
++ * in a namespaced setup, this test is necessary so we can inspect
++ * metadata on the same kdbusfs but between multiple namespaces
++ */
++
++#include <stdio.h>
++#include <string.h>
++#include <sched.h>
++#include <time.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <signal.h>
++#include <sys/wait.h>
++#include <sys/prctl.h>
++#include <sys/eventfd.h>
++#include <sys/syscall.h>
++#include <sys/capability.h>
++#include <linux/sched.h>
++
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++static const struct kdbus_creds privileged_creds = {};
++
++static const struct kdbus_creds unmapped_creds = {
++	.uid	= UNPRIV_UID,
++	.euid	= UNPRIV_UID,
++	.suid	= UNPRIV_UID,
++	.fsuid	= UNPRIV_UID,
++	.gid	= UNPRIV_GID,
++	.egid	= UNPRIV_GID,
++	.sgid	= UNPRIV_GID,
++	.fsgid	= UNPRIV_GID,
++};
++
++static const struct kdbus_pids unmapped_pids = {};
++
++/* Get only the first item */
++static struct kdbus_item *kdbus_get_item(struct kdbus_msg *msg,
++					 uint64_t type)
++{
++	struct kdbus_item *item;
++
++	KDBUS_ITEM_FOREACH(item, msg, items)
++		if (item->type == type)
++			return item;
++
++	return NULL;
++}
++
++static int kdbus_match_kdbus_creds(struct kdbus_msg *msg,
++				   const struct kdbus_creds *expected_creds)
++{
++	struct kdbus_item *item;
++
++	item = kdbus_get_item(msg, KDBUS_ITEM_CREDS);
++	ASSERT_RETURN(item);
++
++	ASSERT_RETURN(memcmp(&item->creds, expected_creds,
++			     sizeof(struct kdbus_creds)) == 0);
++
++	return 0;
++}
++
++static int kdbus_match_kdbus_pids(struct kdbus_msg *msg,
++				  const struct kdbus_pids *expected_pids)
++{
++	struct kdbus_item *item;
++
++	item = kdbus_get_item(msg, KDBUS_ITEM_PIDS);
++	ASSERT_RETURN(item);
++
++	ASSERT_RETURN(memcmp(&item->pids, expected_pids,
++			     sizeof(struct kdbus_pids)) == 0);
++
++	return 0;
++}
++
++static int __kdbus_clone_userns_test(const char *bus,
++				     struct kdbus_conn *conn,
++				     uint64_t grandpa_pid,
++				     int signal_fd)
++{
++	int clone_ret;
++	int ret;
++	struct kdbus_msg *msg = NULL;
++	const struct kdbus_item *item;
++	uint64_t cookie = time(NULL) ^ 0xdeadbeef;
++	struct kdbus_conn *unpriv_conn = NULL;
++	struct kdbus_pids parent_pids = {
++		.pid = getppid(),
++		.tid = getppid(),
++		.ppid = grandpa_pid,
++	};
++
++	ret = drop_privileges(UNPRIV_UID, UNPRIV_GID);
++	ASSERT_EXIT(ret == 0);
++
++	unpriv_conn = kdbus_hello(bus, 0, NULL, 0);
++	ASSERT_EXIT(unpriv_conn);
++
++	ret = kdbus_add_match_empty(unpriv_conn);
++	ASSERT_EXIT(ret == 0);
++
++	/*
++	 * ping privileged connection from this new unprivileged
++	 * one
++	 */
++
++	ret = kdbus_msg_send(unpriv_conn, NULL, cookie, 0, 0,
++			     0, conn->id);
++	ASSERT_EXIT(ret == 0);
++
++	/*
++	 * Since we just dropped privileges, the dumpable flag
++	 * was just cleared which makes the /proc/$clone_child/uid_map
++	 * to be owned by root, hence any userns uid mapping will fail
++	 * with -EPERM since the mapping will be done by uid 65534.
++	 *
++	 * To avoid this set the dumpable flag again which makes
++	 * procfs update the /proc/$clone_child/ inodes owner to 65534.
++	 *
++	 * Using this we will be able write to /proc/$clone_child/uid_map
++	 * as uid 65534 and map the uid 65534 to 0 inside the user namespace.
++	 */
++	ret = prctl(PR_SET_DUMPABLE, SUID_DUMP_USER);
++	ASSERT_EXIT(ret == 0);
++
++	/* Make child privileged in its new userns and run tests */
++
++	ret = RUN_CLONE_CHILD(&clone_ret,
++			      SIGCHLD | CLONE_NEWUSER | CLONE_NEWPID,
++	({ 0;  /* Clone setup, nothing */ }),
++	({
++		eventfd_t event_status = 0;
++		struct kdbus_conn *userns_conn;
++
++		/* ping connection from the new user namespace */
++		userns_conn = kdbus_hello(bus, 0, NULL, 0);
++		ASSERT_EXIT(userns_conn);
++
++		ret = kdbus_add_match_empty(userns_conn);
++		ASSERT_EXIT(ret == 0);
++
++		cookie++;
++		ret = kdbus_msg_send(userns_conn, NULL, cookie,
++				     0, 0, 0, conn->id);
++		ASSERT_EXIT(ret == 0);
++
++		/* Parent did send */
++		ret = eventfd_read(signal_fd, &event_status);
++		ASSERT_RETURN(ret >= 0 && event_status == 1);
++
++		/*
++		 * Receive from privileged connection
++		 */
++		kdbus_printf("Privileged → unprivileged/privileged "
++			     "in its userns "
++			     "(different userns and pidns):\n");
++		ret = kdbus_msg_recv_poll(userns_conn, 300, &msg, NULL);
++		ASSERT_EXIT(ret == 0);
++		ASSERT_EXIT(msg->dst_id == userns_conn->id);
++
++		/* Different namespaces no CAPS */
++		item = kdbus_get_item(msg, KDBUS_ITEM_CAPS);
++		ASSERT_EXIT(item == NULL);
++
++		/* uid/gid not mapped, so we have unpriv cached creds */
++		ret = kdbus_match_kdbus_creds(msg, &unmapped_creds);
++		ASSERT_EXIT(ret == 0);
++
++		/*
++		 * Diffent pid namepsaces. This is the child pidns
++		 * so it should not see its parent kdbus_pids
++		 */
++		ret = kdbus_match_kdbus_pids(msg, &unmapped_pids);
++		ASSERT_EXIT(ret == 0);
++
++		kdbus_msg_free(msg);
++
++
++		/*
++		 * Receive broadcast from privileged connection
++		 */
++		kdbus_printf("Privileged → unprivileged/privileged "
++			     "in its userns "
++			     "(different userns and pidns):\n");
++		ret = kdbus_msg_recv_poll(userns_conn, 300, &msg, NULL);
++		ASSERT_EXIT(ret == 0);
++		ASSERT_EXIT(msg->dst_id == KDBUS_DST_ID_BROADCAST);
++
++		/* Different namespaces no CAPS */
++		item = kdbus_get_item(msg, KDBUS_ITEM_CAPS);
++		ASSERT_EXIT(item == NULL);
++
++		/* uid/gid not mapped, so we have unpriv cached creds */
++		ret = kdbus_match_kdbus_creds(msg, &unmapped_creds);
++		ASSERT_EXIT(ret == 0);
++
++		/*
++		 * Diffent pid namepsaces. This is the child pidns
++		 * so it should not see its parent kdbus_pids
++		 */
++		ret = kdbus_match_kdbus_pids(msg, &unmapped_pids);
++		ASSERT_EXIT(ret == 0);
++
++		kdbus_msg_free(msg);
++
++		kdbus_conn_free(userns_conn);
++	}),
++	({
++		/* Parent setup map child uid/gid */
++		ret = userns_map_uid_gid(pid, "0 65534 1", "0 65534 1");
++		ASSERT_EXIT(ret == 0);
++	}),
++	({ 0; }));
++	/* Unprivileged was not able to create user namespace */
++	if (clone_ret == -EPERM) {
++		kdbus_printf("-- CLONE_NEWUSER TEST Failed for "
++			     "uid: %u\n -- Make sure that your kernel "
++			     "do not allow CLONE_NEWUSER for "
++			     "unprivileged users\n", UNPRIV_UID);
++		ret = 0;
++		goto out;
++	}
++
++	ASSERT_EXIT(ret == 0);
++
++
++	/*
++	 * Receive from privileged connection
++	 */
++	kdbus_printf("\nPrivileged → unprivileged (same namespaces):\n");
++	ret = kdbus_msg_recv_poll(unpriv_conn, 300, &msg, NULL);
++
++	ASSERT_EXIT(ret == 0);
++	ASSERT_EXIT(msg->dst_id == unpriv_conn->id);
++
++	/* will get the privileged creds */
++	ret = kdbus_match_kdbus_creds(msg, &privileged_creds);
++	ASSERT_EXIT(ret == 0);
++
++	/* Same pidns so will get the kdbus_pids */
++	ret = kdbus_match_kdbus_pids(msg, &parent_pids);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_msg_free(msg);
++
++
++	/*
++	 * Receive broadcast from privileged connection
++	 */
++	kdbus_printf("\nPrivileged → unprivileged (same namespaces):\n");
++	ret = kdbus_msg_recv_poll(unpriv_conn, 300, &msg, NULL);
++
++	ASSERT_EXIT(ret == 0);
++	ASSERT_EXIT(msg->dst_id == KDBUS_DST_ID_BROADCAST);
++
++	/* will get the privileged creds */
++	ret = kdbus_match_kdbus_creds(msg, &privileged_creds);
++	ASSERT_EXIT(ret == 0);
++
++	ret = kdbus_match_kdbus_pids(msg, &parent_pids);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_msg_free(msg);
++
++out:
++	kdbus_conn_free(unpriv_conn);
++
++	return ret;
++}
++
++static int kdbus_clone_userns_test(const char *bus,
++				   struct kdbus_conn *conn)
++{
++	int ret;
++	int status;
++	int efd = -1;
++	pid_t pid, ppid;
++	uint64_t unpriv_conn_id = 0;
++	uint64_t userns_conn_id = 0;
++	struct kdbus_msg *msg;
++	const struct kdbus_item *item;
++	struct kdbus_pids expected_pids;
++	struct kdbus_conn *monitor = NULL;
++
++	kdbus_printf("STARTING TEST 'metadata-ns'.\n");
++
++	monitor = kdbus_hello(bus, KDBUS_HELLO_MONITOR, NULL, 0);
++	ASSERT_EXIT(monitor);
++
++	/*
++	 * parent will signal to child that is in its
++	 * userns to read its queue
++	 */
++	efd = eventfd(0, EFD_CLOEXEC);
++	ASSERT_RETURN_VAL(efd >= 0, efd);
++
++	ppid = getppid();
++
++	pid = fork();
++	ASSERT_RETURN_VAL(pid >= 0, -errno);
++
++	if (pid == 0) {
++		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
++		ASSERT_EXIT_VAL(ret == 0, -errno);
++
++		ret = __kdbus_clone_userns_test(bus, conn, ppid, efd);
++		_exit(ret);
++	}
++
++
++	/* Phase 1) privileged receives from unprivileged */
++
++	/*
++	 * Receive from the unprivileged child
++	 */
++	kdbus_printf("\nUnprivileged → privileged (same namespaces):\n");
++	ret = kdbus_msg_recv_poll(conn, 300, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	unpriv_conn_id = msg->src_id;
++
++	/* Unprivileged user */
++	ret = kdbus_match_kdbus_creds(msg, &unmapped_creds);
++	ASSERT_RETURN(ret == 0);
++
++	/* Set the expected creds_pids */
++	expected_pids = (struct kdbus_pids) {
++		.pid = pid,
++		.tid = pid,
++		.ppid = getpid(),
++	};
++	ret = kdbus_match_kdbus_pids(msg, &expected_pids);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_msg_free(msg);
++
++
++	/*
++	 * Receive from the unprivileged that is in his own
++	 * userns and pidns
++	 */
++
++	kdbus_printf("\nUnprivileged/privileged in its userns → privileged "
++		     "(different userns and pidns)\n");
++	ret = kdbus_msg_recv_poll(conn, 300, &msg, NULL);
++	if (ret == -ETIMEDOUT)
++		/* perhaps unprivileged userns is not allowed */
++		goto wait;
++
++	ASSERT_RETURN(ret == 0);
++
++	userns_conn_id = msg->src_id;
++
++	/* We do not share the userns, os no KDBUS_ITEM_CAPS */
++	item = kdbus_get_item(msg, KDBUS_ITEM_CAPS);
++	ASSERT_RETURN(item == NULL);
++
++	/*
++	 * Compare received items, creds must be translated into
++	 * the receiver user namespace, so the user is unprivileged
++	 */
++	ret = kdbus_match_kdbus_creds(msg, &unmapped_creds);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * We should have the kdbus_pids since we are the parent
++	 * pidns
++	 */
++	item = kdbus_get_item(msg, KDBUS_ITEM_PIDS);
++	ASSERT_RETURN(item);
++
++	ASSERT_RETURN(memcmp(&item->pids, &unmapped_pids,
++			     sizeof(struct kdbus_pids)) != 0);
++
++	/*
++	 * Parent pid of the unprivileged/privileged in its userns
++	 * is the unprivileged child pid that was forked here.
++	 */
++	ASSERT_RETURN((uint64_t)pid == item->pids.ppid);
++
++	kdbus_msg_free(msg);
++
++
++	/* Phase 2) Privileged connection sends now 3 packets */
++
++	/*
++	 * Sending to unprivileged connections a unicast
++	 */
++	ret = kdbus_msg_send(conn, NULL, 0xdeadbeef, 0, 0,
++			     0, unpriv_conn_id);
++	ASSERT_RETURN(ret == 0);
++
++	/* signal to child that is in its userns */
++	ret = eventfd_write(efd, 1);
++	ASSERT_EXIT(ret == 0);
++
++	/*
++	 * Sending to unprivileged/privilged in its userns
++	 * connections a unicast
++	 */
++	ret = kdbus_msg_send(conn, NULL, 0xdeadbeef, 0, 0,
++			     0, userns_conn_id);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Sending to unprivileged connections a broadcast
++	 */
++	ret = kdbus_msg_send(conn, NULL, 0xdeadbeef, 0, 0,
++			     0, KDBUS_DST_ID_BROADCAST);
++	ASSERT_RETURN(ret == 0);
++
++
++wait:
++	ret = waitpid(pid, &status, 0);
++	ASSERT_RETURN(ret >= 0);
++
++	ASSERT_RETURN(WIFEXITED(status))
++	ASSERT_RETURN(!WEXITSTATUS(status));
++
++	/* Dump monitor queue */
++	kdbus_printf("\n\nMonitor queue:\n");
++	for (;;) {
++		ret = kdbus_msg_recv_poll(monitor, 100, &msg, NULL);
++		if (ret < 0)
++			break;
++
++		if (msg->payload_type == KDBUS_PAYLOAD_DBUS) {
++			/*
++			 * Parent pidns should see all the
++			 * pids
++			 */
++			item = kdbus_get_item(msg, KDBUS_ITEM_PIDS);
++			ASSERT_RETURN(item);
++
++			ASSERT_RETURN(item->pids.pid != 0 &&
++				      item->pids.tid != 0 &&
++				      item->pids.ppid != 0);
++		}
++
++		kdbus_msg_free(msg);
++	}
++
++	kdbus_conn_free(monitor);
++	close(efd);
++
++	return 0;
++}
++
++int kdbus_test_metadata_ns(struct kdbus_test_env *env)
++{
++	int ret;
++	struct kdbus_conn *holder, *conn;
++	struct kdbus_policy_access policy_access = {
++		/* Allow world so we can inspect metadata in namespace */
++		.type = KDBUS_POLICY_ACCESS_WORLD,
++		.id = geteuid(),
++		.access = KDBUS_POLICY_TALK,
++	};
++
++	/*
++	 * We require user-namespaces and all uids/gids
++	 * should be mapped (we can just require the necessary ones)
++	 */
++	if (!config_user_ns_is_enabled() ||
++	    !all_uids_gids_are_mapped())
++		return TEST_SKIP;
++
++	ret = test_is_capable(CAP_SETUID, CAP_SETGID, CAP_SYS_ADMIN, -1);
++	ASSERT_RETURN(ret >= 0);
++
++	/* no enough privileges, SKIP test */
++	if (!ret)
++		return TEST_SKIP;
++
++	holder = kdbus_hello_registrar(env->buspath, "com.example.metadata",
++				       &policy_access, 1,
++				       KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(holder);
++
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	ret = kdbus_add_match_empty(conn);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_name_acquire(conn, "com.example.metadata", NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	ret = kdbus_clone_userns_test(env->buspath, conn);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(holder);
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-monitor.c b/tools/testing/selftests/kdbus/test-monitor.c
+new file mode 100644
+index 0000000..e00d738
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-monitor.c
+@@ -0,0 +1,176 @@
++#include <stdio.h>
++#include <string.h>
++#include <time.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <stdbool.h>
++#include <errno.h>
++#include <assert.h>
++#include <signal.h>
++#include <sys/time.h>
++#include <sys/mman.h>
++#include <sys/capability.h>
++#include <sys/wait.h>
++
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++#include "kdbus-test.h"
++
++int kdbus_test_monitor(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *monitor, *conn;
++	unsigned int cookie = 0xdeadbeef;
++	struct kdbus_msg *msg;
++	uint64_t offset = 0;
++	int ret;
++
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	/* add matches to make sure the monitor do not trigger an item add or
++	 * remove on connect and disconnect, respectively.
++	 */
++	ret = kdbus_add_match_id(conn, 0x1, KDBUS_ITEM_ID_ADD,
++				 KDBUS_MATCH_ID_ANY);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_add_match_id(conn, 0x2, KDBUS_ITEM_ID_REMOVE,
++				 KDBUS_MATCH_ID_ANY);
++	ASSERT_RETURN(ret == 0);
++
++	/* register a monitor */
++	monitor = kdbus_hello(env->buspath, KDBUS_HELLO_MONITOR, NULL, 0);
++	ASSERT_RETURN(monitor);
++
++	/* make sure we did not receive a monitor connect notification */
++	ret = kdbus_msg_recv(conn, &msg, &offset);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	/* check that a monitor cannot acquire a name */
++	ret = kdbus_name_acquire(monitor, "foo.bar.baz", NULL);
++	ASSERT_RETURN(ret == -EOPNOTSUPP);
++
++	ret = kdbus_msg_send(env->conn, NULL, cookie, 0, 0,  0, conn->id);
++	ASSERT_RETURN(ret == 0);
++
++	/* the recipient should have gotten the message */
++	ret = kdbus_msg_recv(conn, &msg, &offset);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == cookie);
++	kdbus_msg_free(msg);
++	kdbus_free(conn, offset);
++
++	/* and so should the monitor */
++	ret = kdbus_msg_recv(monitor, &msg, &offset);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == cookie);
++
++	kdbus_msg_free(msg);
++	kdbus_free(monitor, offset);
++
++	/* Installing matches for monitors must fais must fail */
++	ret = kdbus_add_match_empty(monitor);
++	ASSERT_RETURN(ret == -EOPNOTSUPP);
++
++	cookie++;
++	ret = kdbus_msg_send(env->conn, NULL, cookie, 0, 0, 0,
++			     KDBUS_DST_ID_BROADCAST);
++	ASSERT_RETURN(ret == 0);
++
++	/* The monitor should get the message. */
++	ret = kdbus_msg_recv_poll(monitor, 100, &msg, &offset);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == cookie);
++
++	kdbus_msg_free(msg);
++	kdbus_free(monitor, offset);
++
++	/*
++	 * Since we are the only monitor, update the attach flags
++	 * and tell we are not interessted in attach flags recv
++	 */
++
++	ret = kdbus_conn_update_attach_flags(monitor,
++					     _KDBUS_ATTACH_ALL,
++					     0);
++	ASSERT_RETURN(ret == 0);
++
++	cookie++;
++	ret = kdbus_msg_send(env->conn, NULL, cookie, 0, 0, 0,
++			     KDBUS_DST_ID_BROADCAST);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv_poll(monitor, 100, &msg, &offset);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == cookie);
++
++	ret = kdbus_item_in_message(msg, KDBUS_ITEM_TIMESTAMP);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_msg_free(msg);
++	kdbus_free(monitor, offset);
++
++	/*
++	 * Now we are interested in KDBUS_ITEM_TIMESTAMP and
++	 * KDBUS_ITEM_CREDS
++	 */
++	ret = kdbus_conn_update_attach_flags(monitor,
++					     _KDBUS_ATTACH_ALL,
++					     KDBUS_ATTACH_TIMESTAMP |
++					     KDBUS_ATTACH_CREDS);
++	ASSERT_RETURN(ret == 0);
++
++	cookie++;
++	ret = kdbus_msg_send(env->conn, NULL, cookie, 0, 0, 0,
++			     KDBUS_DST_ID_BROADCAST);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv_poll(monitor, 100, &msg, &offset);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == cookie);
++
++	ret = kdbus_item_in_message(msg, KDBUS_ITEM_TIMESTAMP);
++	ASSERT_RETURN(ret == 1);
++
++	ret = kdbus_item_in_message(msg, KDBUS_ITEM_CREDS);
++	ASSERT_RETURN(ret == 1);
++
++	/* the KDBUS_ITEM_PID_COMM was not requested */
++	ret = kdbus_item_in_message(msg, KDBUS_ITEM_PID_COMM);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_msg_free(msg);
++	kdbus_free(monitor, offset);
++
++	kdbus_conn_free(monitor);
++	/* make sure we did not receive a monitor disconnect notification */
++	ret = kdbus_msg_recv(conn, &msg, &offset);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	kdbus_conn_free(conn);
++
++	/* Make sure that monitor as unprivileged is not allowed */
++	ret = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
++	ASSERT_RETURN(ret >= 0);
++
++	if (ret && all_uids_gids_are_mapped()) {
++		ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_UID, ({
++			monitor = kdbus_hello(env->buspath,
++					      KDBUS_HELLO_MONITOR,
++					      NULL, 0);
++			ASSERT_EXIT(!monitor && errno == EPERM);
++
++			_exit(EXIT_SUCCESS);
++		}),
++		({ 0; }));
++		ASSERT_RETURN(ret == 0);
++	}
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-names.c b/tools/testing/selftests/kdbus/test-names.c
+new file mode 100644
+index 0000000..66ebb47
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-names.c
+@@ -0,0 +1,194 @@
++#include <stdio.h>
++#include <string.h>
++#include <time.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <limits.h>
++#include <getopt.h>
++#include <stdbool.h>
++
++#include "kdbus-api.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++#include "kdbus-test.h"
++
++static int conn_is_name_owner(const struct kdbus_conn *conn,
++			      const char *needle)
++{
++	struct kdbus_cmd_list cmd_list = { .size = sizeof(cmd_list) };
++	struct kdbus_info *name, *list;
++	bool found = false;
++	int ret;
++
++	cmd_list.flags = KDBUS_LIST_NAMES;
++
++	ret = kdbus_cmd_list(conn->fd, &cmd_list);
++	ASSERT_RETURN(ret == 0);
++
++	list = (struct kdbus_info *)(conn->buf + cmd_list.offset);
++	KDBUS_FOREACH(name, list, cmd_list.list_size) {
++		struct kdbus_item *item;
++		const char *n = NULL;
++
++		KDBUS_ITEM_FOREACH(item, name, items)
++			if (item->type == KDBUS_ITEM_OWNED_NAME)
++				n = item->name.name;
++
++		if (name->id == conn->id &&
++		    n && strcmp(needle, n) == 0) {
++			found = true;
++			break;
++		}
++	}
++
++	ret = kdbus_free(conn, cmd_list.offset);
++	ASSERT_RETURN(ret == 0);
++
++	return found ? 0 : -1;
++}
++
++int kdbus_test_name_basic(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn;
++	char *name, *dot_name, *invalid_name, *wildcard_name;
++	int ret;
++
++	name = "foo.bla.blaz";
++	dot_name = ".bla.blaz";
++	invalid_name = "foo";
++	wildcard_name = "foo.bla.bl.*";
++
++	/* create a 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++
++	/* acquire name "foo.bar.xxx" name */
++	ret = kdbus_name_acquire(conn, "foo.bar.xxx", NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* Name is not valid, must fail */
++	ret = kdbus_name_acquire(env->conn, dot_name, NULL);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	ret = kdbus_name_acquire(env->conn, invalid_name, NULL);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	ret = kdbus_name_acquire(env->conn, wildcard_name, NULL);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	/* check that we can acquire a name */
++	ret = kdbus_name_acquire(env->conn, name, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = conn_is_name_owner(env->conn, name);
++	ASSERT_RETURN(ret == 0);
++
++	/* ... and release it again */
++	ret = kdbus_name_release(env->conn, name);
++	ASSERT_RETURN(ret == 0);
++
++	ret = conn_is_name_owner(env->conn, name);
++	ASSERT_RETURN(ret != 0);
++
++	/* check that we can't release it again */
++	ret = kdbus_name_release(env->conn, name);
++	ASSERT_RETURN(ret == -ESRCH);
++
++	/* check that we can't release a name that we don't own */
++	ret = kdbus_name_release(env->conn, "foo.bar.xxx");
++	ASSERT_RETURN(ret == -EADDRINUSE);
++
++	/* Name is not valid, must fail */
++	ret = kdbus_name_release(env->conn, dot_name);
++	ASSERT_RETURN(ret == -ESRCH);
++
++	ret = kdbus_name_release(env->conn, invalid_name);
++	ASSERT_RETURN(ret == -ESRCH);
++
++	ret = kdbus_name_release(env->conn, wildcard_name);
++	ASSERT_RETURN(ret == -ESRCH);
++
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
++
++int kdbus_test_name_conflict(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn;
++	char *name;
++	int ret;
++
++	name = "foo.bla.blaz";
++
++	/* create a 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++
++	/* allow the new connection to own the same name */
++	/* acquire name from the 1st connection */
++	ret = kdbus_name_acquire(env->conn, name, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = conn_is_name_owner(env->conn, name);
++	ASSERT_RETURN(ret == 0);
++
++	/* check that we can't acquire it again from the 1st connection */
++	ret = kdbus_name_acquire(env->conn, name, NULL);
++	ASSERT_RETURN(ret == -EALREADY);
++
++	/* check that we also can't acquire it again from the 2nd connection */
++	ret = kdbus_name_acquire(conn, name, NULL);
++	ASSERT_RETURN(ret == -EEXIST);
++
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
++
++int kdbus_test_name_queue(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn;
++	const char *name;
++	uint64_t flags;
++	int ret;
++
++	name = "foo.bla.blaz";
++
++	flags = KDBUS_NAME_ALLOW_REPLACEMENT;
++
++	/* create a 2nd connection */
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn != NULL);
++
++	/* allow the new connection to own the same name */
++	/* acquire name from the 1st connection */
++	ret = kdbus_name_acquire(env->conn, name, &flags);
++	ASSERT_RETURN(ret == 0);
++
++	ret = conn_is_name_owner(env->conn, name);
++	ASSERT_RETURN(ret == 0);
++
++	/* queue the 2nd connection as waiting owner */
++	flags = KDBUS_NAME_QUEUE;
++	ret = kdbus_name_acquire(conn, name, &flags);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(flags & KDBUS_NAME_IN_QUEUE);
++
++	/* release name from 1st connection */
++	ret = kdbus_name_release(env->conn, name);
++	ASSERT_RETURN(ret == 0);
++
++	/* now the name should be owned by the 2nd connection */
++	ret = conn_is_name_owner(conn, name);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(conn);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-policy-ns.c b/tools/testing/selftests/kdbus/test-policy-ns.c
+new file mode 100644
+index 0000000..3437012
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-policy-ns.c
+@@ -0,0 +1,632 @@
++/*
++ * Test metadata and policies in new namespaces. Even if our tests
++ * can run in a namespaced setup, this test is necessary so we can
++ * inspect policies on the same kdbusfs but between multiple
++ * namespaces.
++ *
++ * Copyright (C) 2014-2015 Djalal Harouni
++ *
++ * kdbus is free software; you can redistribute it and/or modify it under
++ * the terms of the GNU Lesser General Public License as published by the
++ * Free Software Foundation; either version 2.1 of the License, or (at
++ * your option) any later version.
++ */
++
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <pthread.h>
++#include <sched.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <stdint.h>
++#include <stdbool.h>
++#include <unistd.h>
++#include <errno.h>
++#include <signal.h>
++#include <sys/wait.h>
++#include <sys/prctl.h>
++#include <sys/eventfd.h>
++#include <sys/syscall.h>
++#include <sys/capability.h>
++#include <linux/sched.h>
++
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++#define MAX_CONN	64
++#define POLICY_NAME	"foo.test.policy-test"
++
++#define KDBUS_CONN_MAX_MSGS_PER_USER            16
++
++/**
++ * Note: this test can be used to inspect policy_db->talk_access_hash
++ *
++ * The purpose of these tests:
++ * 1) Check KDBUS_POLICY_TALK
++ * 2) Check the cache state: kdbus_policy_db->talk_access_hash
++ * Should be extended
++ */
++
++/**
++ * Check a list of connections against conn_db[0]
++ * conn_db[0] will own the name "foo.test.policy-test" and the
++ * policy holder connection for this name will update the policy
++ * entries, so different use cases can be tested.
++ */
++static struct kdbus_conn **conn_db;
++
++static void *kdbus_recv_echo(void *ptr)
++{
++	int ret;
++	struct kdbus_conn *conn = ptr;
++
++	ret = kdbus_msg_recv_poll(conn, 200, NULL, NULL);
++
++	return (void *)(long)ret;
++}
++
++/* Trigger kdbus_policy_set() */
++static int kdbus_set_policy_talk(struct kdbus_conn *conn,
++				 const char *name,
++				 uid_t id, unsigned int type)
++{
++	int ret;
++	struct kdbus_policy_access access = {
++		.type = type,
++		.id = id,
++		.access = KDBUS_POLICY_TALK,
++	};
++
++	ret = kdbus_conn_update_policy(conn, name, &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	return TEST_OK;
++}
++
++/* return TEST_OK or TEST_ERR on failure */
++static int kdbus_register_same_activator(char *bus, const char *name,
++					 struct kdbus_conn **c)
++{
++	int ret;
++	struct kdbus_conn *activator;
++
++	activator = kdbus_hello_activator(bus, name, NULL, 0);
++	if (activator) {
++		*c = activator;
++		fprintf(stderr, "--- error was able to register name twice '%s'.\n",
++			name);
++		return TEST_ERR;
++	}
++
++	ret = -errno;
++	/* -EEXIST means test succeeded */
++	if (ret == -EEXIST)
++		return TEST_OK;
++
++	return TEST_ERR;
++}
++
++/* return TEST_OK or TEST_ERR on failure */
++static int kdbus_register_policy_holder(char *bus, const char *name,
++					struct kdbus_conn **conn)
++{
++	struct kdbus_conn *c;
++	struct kdbus_policy_access access[2];
++
++	access[0].type = KDBUS_POLICY_ACCESS_USER;
++	access[0].access = KDBUS_POLICY_OWN;
++	access[0].id = geteuid();
++
++	access[1].type = KDBUS_POLICY_ACCESS_WORLD;
++	access[1].access = KDBUS_POLICY_TALK;
++	access[1].id = geteuid();
++
++	c = kdbus_hello_registrar(bus, name, access, 2,
++				  KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(c);
++
++	*conn = c;
++
++	return TEST_OK;
++}
++
++/**
++ * Create new threads for receiving from multiple senders,
++ * The 'conn_db' will be populated by newly created connections.
++ * Caller should free all allocated connections.
++ *
++ * return 0 on success, negative errno on failure.
++ */
++static int kdbus_recv_in_threads(const char *bus, const char *name,
++				 struct kdbus_conn **conn_db)
++{
++	int ret;
++	bool pool_full = false;
++	unsigned int sent_packets = 0;
++	unsigned int lost_packets = 0;
++	unsigned int i, tid;
++	unsigned long dst_id;
++	unsigned long cookie = 1;
++	unsigned int thread_nr = MAX_CONN - 1;
++	pthread_t thread_id[MAX_CONN - 1] = {'\0'};
++
++	dst_id = name ? KDBUS_DST_ID_NAME : conn_db[0]->id;
++
++	for (tid = 0, i = 1; tid < thread_nr; tid++, i++) {
++		ret = pthread_create(&thread_id[tid], NULL,
++				     kdbus_recv_echo, (void *)conn_db[0]);
++		if (ret < 0) {
++			ret = -errno;
++			kdbus_printf("error pthread_create: %d (%m)\n",
++				      ret);
++			break;
++		}
++
++		/* just free before re-using */
++		kdbus_conn_free(conn_db[i]);
++		conn_db[i] = NULL;
++
++		/* We need to create connections here */
++		conn_db[i] = kdbus_hello(bus, 0, NULL, 0);
++		if (!conn_db[i]) {
++			ret = -errno;
++			break;
++		}
++
++		ret = kdbus_add_match_empty(conn_db[i]);
++		if (ret < 0)
++			break;
++
++		ret = kdbus_msg_send(conn_db[i], name, cookie++,
++				     0, 0, 0, dst_id);
++		if (ret < 0) {
++			/*
++			 * Receivers are not reading their messages,
++			 * not scheduled ?!
++			 *
++			 * So set the pool full here, perhaps the
++			 * connection pool or queue was full, later
++			 * recheck receivers errors
++			 */
++			if (ret == -ENOBUFS || ret == -EXFULL)
++				pool_full = true;
++			break;
++		}
++
++		sent_packets++;
++	}
++
++	for (tid = 0; tid < thread_nr; tid++) {
++		int thread_ret = 0;
++
++		if (thread_id[tid]) {
++			pthread_join(thread_id[tid], (void *)&thread_ret);
++			if (thread_ret < 0) {
++				/* Update only if send did not fail */
++				if (ret == 0)
++					ret = thread_ret;
++
++				lost_packets++;
++			}
++		}
++	}
++
++	/*
++	 * When sending if we did fail with -ENOBUFS or -EXFULL
++	 * then we should have set lost_packet and we should at
++	 * least have sent_packets set to KDBUS_CONN_MAX_MSGS_PER_USER
++	 */
++	if (pool_full) {
++		ASSERT_RETURN(lost_packets > 0);
++
++		/*
++		 * We should at least send KDBUS_CONN_MAX_MSGS_PER_USER
++		 *
++		 * For every send operation we create a thread to
++		 * recv the packet, so we keep the queue clean
++		 */
++		ASSERT_RETURN(sent_packets >= KDBUS_CONN_MAX_MSGS_PER_USER);
++
++		/*
++		 * Set ret to zero since we only failed due to
++		 * the receiving threads that have not been
++		 * scheduled
++		 */
++		ret = 0;
++	}
++
++	return ret;
++}
++
++/* Return: TEST_OK or TEST_ERR on failure */
++static int kdbus_normal_test(const char *bus, const char *name,
++			     struct kdbus_conn **conn_db)
++{
++	int ret;
++
++	ret = kdbus_recv_in_threads(bus, name, conn_db);
++	ASSERT_RETURN(ret >= 0);
++
++	return TEST_OK;
++}
++
++static int kdbus_fork_test_by_id(const char *bus,
++				 struct kdbus_conn **conn_db,
++				 int parent_status, int child_status)
++{
++	int ret;
++	pid_t pid;
++	uint64_t cookie = 0x9876ecba;
++	struct kdbus_msg *msg = NULL;
++	uint64_t offset = 0;
++	int status = 0;
++
++	/*
++	 * If the child_status is not EXIT_SUCCESS, then we expect
++	 * that sending from the child will fail, thus receiving
++	 * from parent must error with -ETIMEDOUT, and vice versa.
++	 */
++	bool parent_timedout = !!child_status;
++	bool child_timedout = !!parent_status;
++
++	pid = fork();
++	ASSERT_RETURN_VAL(pid >= 0, pid);
++
++	if (pid == 0) {
++		struct kdbus_conn *conn_src;
++
++		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
++		ASSERT_EXIT(ret == 0);
++
++		ret = drop_privileges(65534, 65534);
++		ASSERT_EXIT(ret == 0);
++
++		conn_src = kdbus_hello(bus, 0, NULL, 0);
++		ASSERT_EXIT(conn_src);
++
++		ret = kdbus_add_match_empty(conn_src);
++		ASSERT_EXIT(ret == 0);
++
++		/*
++		 * child_status is always checked against send
++		 * operations, in case it fails always return
++		 * EXIT_FAILURE.
++		 */
++		ret = kdbus_msg_send(conn_src, NULL, cookie,
++				     0, 0, 0, conn_db[0]->id);
++		ASSERT_EXIT(ret == child_status);
++
++		ret = kdbus_msg_recv_poll(conn_src, 100, NULL, NULL);
++
++		kdbus_conn_free(conn_src);
++
++		/*
++		 * Child kdbus_msg_recv_poll() should timeout since
++		 * the parent_status was set to a non EXIT_SUCCESS
++		 * value.
++		 */
++		if (child_timedout)
++			_exit(ret == -ETIMEDOUT ? EXIT_SUCCESS : EXIT_FAILURE);
++
++		_exit(ret == 0 ? EXIT_SUCCESS : EXIT_FAILURE);
++	}
++
++	ret = kdbus_msg_recv_poll(conn_db[0], 100, &msg, &offset);
++	/*
++	 * If parent_timedout is set then this should fail with
++	 * -ETIMEDOUT since the child_status was set to a non
++	 * EXIT_SUCCESS value. Otherwise, assume
++	 * that kdbus_msg_recv_poll() has succeeded.
++	 */
++	if (parent_timedout) {
++		ASSERT_RETURN_VAL(ret == -ETIMEDOUT, TEST_ERR);
++
++		/* timedout no need to continue, we don't have the
++		 * child connection ID, so just terminate. */
++		goto out;
++	} else {
++		ASSERT_RETURN_VAL(ret == 0, ret);
++	}
++
++	ret = kdbus_msg_send(conn_db[0], NULL, ++cookie,
++			     0, 0, 0, msg->src_id);
++	/*
++	 * parent_status is checked against send operations,
++	 * on failures always return TEST_ERR.
++	 */
++	ASSERT_RETURN_VAL(ret == parent_status, TEST_ERR);
++
++	kdbus_msg_free(msg);
++	kdbus_free(conn_db[0], offset);
++
++out:
++	ret = waitpid(pid, &status, 0);
++	ASSERT_RETURN_VAL(ret >= 0, ret);
++
++	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
++}
++
++/*
++ * Return: TEST_OK, TEST_ERR or TEST_SKIP
++ * we return TEST_OK only if the children return with the expected
++ * 'expected_status' that is specified as an argument.
++ */
++static int kdbus_fork_test(const char *bus, const char *name,
++			   struct kdbus_conn **conn_db, int expected_status)
++{
++	pid_t pid;
++	int ret = 0;
++	int status = 0;
++
++	pid = fork();
++	ASSERT_RETURN_VAL(pid >= 0, pid);
++
++	if (pid == 0) {
++		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
++		ASSERT_EXIT(ret == 0);
++
++		ret = drop_privileges(65534, 65534);
++		ASSERT_EXIT(ret == 0);
++
++		ret = kdbus_recv_in_threads(bus, name, conn_db);
++		_exit(ret == expected_status ? EXIT_SUCCESS : EXIT_FAILURE);
++	}
++
++	ret = waitpid(pid, &status, 0);
++	ASSERT_RETURN(ret >= 0);
++
++	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
++}
++
++/* Return EXIT_SUCCESS, EXIT_FAILURE or negative errno */
++static int __kdbus_clone_userns_test(const char *bus,
++				     const char *name,
++				     struct kdbus_conn **conn_db,
++				     int expected_status)
++{
++	int efd;
++	pid_t pid;
++	int ret = 0;
++	unsigned int uid = 65534;
++	int status;
++
++	ret = drop_privileges(uid, uid);
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	/*
++	 * Since we just dropped privileges, the dumpable flag was just
++	 * cleared which makes the /proc/$clone_child/uid_map to be
++	 * owned by root, hence any userns uid mapping will fail with
++	 * -EPERM since the mapping will be done by uid 65534.
++	 *
++	 * To avoid this set the dumpable flag again which makes procfs
++	 * update the /proc/$clone_child/ inodes owner to 65534.
++	 *
++	 * Using this we will be able write to /proc/$clone_child/uid_map
++	 * as uid 65534 and map the uid 65534 to 0 inside the user
++	 * namespace.
++	 */
++	ret = prctl(PR_SET_DUMPABLE, SUID_DUMP_USER);
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	/* sync parent/child */
++	efd = eventfd(0, EFD_CLOEXEC);
++	ASSERT_RETURN_VAL(efd >= 0, efd);
++
++	pid = syscall(__NR_clone, SIGCHLD|CLONE_NEWUSER, NULL);
++	if (pid < 0) {
++		ret = -errno;
++		kdbus_printf("error clone: %d (%m)\n", ret);
++		/*
++		 * Normal user not allowed to create userns,
++		 * so nothing to worry about ?
++		 */
++		if (ret == -EPERM) {
++			kdbus_printf("-- CLONE_NEWUSER TEST Failed for uid: %u\n"
++				"-- Make sure that your kernel do not allow "
++				"CLONE_NEWUSER for unprivileged users\n"
++				"-- Upstream Commit: "
++				"https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=5eaf563e\n",
++				uid);
++			ret = 0;
++		}
++
++		return ret;
++	}
++
++	if (pid == 0) {
++		struct kdbus_conn *conn_src;
++		eventfd_t event_status = 0;
++
++		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
++		ASSERT_EXIT(ret == 0);
++
++		ret = eventfd_read(efd, &event_status);
++		ASSERT_EXIT(ret >= 0 && event_status == 1);
++
++		/* ping connection from the new user namespace */
++		conn_src = kdbus_hello(bus, 0, NULL, 0);
++		ASSERT_EXIT(conn_src);
++
++		ret = kdbus_add_match_empty(conn_src);
++		ASSERT_EXIT(ret == 0);
++
++		ret = kdbus_msg_send(conn_src, name, 0xabcd1234,
++				     0, 0, 0, KDBUS_DST_ID_NAME);
++		kdbus_conn_free(conn_src);
++
++		_exit(ret == expected_status ? EXIT_SUCCESS : EXIT_FAILURE);
++	}
++
++	ret = userns_map_uid_gid(pid, "0 65534 1", "0 65534 1");
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	/* Tell child we are ready */
++	ret = eventfd_write(efd, 1);
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	ret = waitpid(pid, &status, 0);
++	ASSERT_RETURN_VAL(ret >= 0, ret);
++
++	close(efd);
++
++	return status == EXIT_SUCCESS ? TEST_OK : TEST_ERR;
++}
++
++static int kdbus_clone_userns_test(const char *bus,
++				   const char *name,
++				   struct kdbus_conn **conn_db,
++				   int expected_status)
++{
++	pid_t pid;
++	int ret = 0;
++	int status;
++
++	pid = fork();
++	ASSERT_RETURN_VAL(pid >= 0, -errno);
++
++	if (pid == 0) {
++		ret = prctl(PR_SET_PDEATHSIG, SIGKILL);
++		if (ret < 0)
++			_exit(EXIT_FAILURE);
++
++		ret = __kdbus_clone_userns_test(bus, name, conn_db,
++						expected_status);
++		_exit(ret);
++	}
++
++	/*
++	 * Receive in the original (root privileged) user namespace,
++	 * must fail with -ETIMEDOUT.
++	 */
++	ret = kdbus_msg_recv_poll(conn_db[0], 100, NULL, NULL);
++	ASSERT_RETURN_VAL(ret == -ETIMEDOUT, ret);
++
++	ret = waitpid(pid, &status, 0);
++	ASSERT_RETURN_VAL(ret >= 0, ret);
++
++	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
++}
++
++int kdbus_test_policy_ns(struct kdbus_test_env *env)
++{
++	int i;
++	int ret;
++	struct kdbus_conn *activator = NULL;
++	struct kdbus_conn *policy_holder = NULL;
++	char *bus = env->buspath;
++
++	ret = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
++	ASSERT_RETURN(ret >= 0);
++
++	/* no enough privileges, SKIP test */
++	if (!ret)
++		return TEST_SKIP;
++
++	/* we require user-namespaces */
++	if (access("/proc/self/uid_map", F_OK) != 0)
++		return TEST_SKIP;
++
++	/* uids/gids must be mapped */
++	if (!all_uids_gids_are_mapped())
++		return TEST_SKIP;
++
++	conn_db = calloc(MAX_CONN, sizeof(struct kdbus_conn *));
++	ASSERT_RETURN(conn_db);
++
++	memset(conn_db, 0, MAX_CONN * sizeof(struct kdbus_conn *));
++
++	conn_db[0] = kdbus_hello(bus, 0, NULL, 0);
++	ASSERT_RETURN(conn_db[0]);
++
++	ret = kdbus_add_match_empty(conn_db[0]);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_fork_test_by_id(bus, conn_db, -EPERM, -EPERM);
++	ASSERT_EXIT(ret == 0);
++
++	ret = kdbus_register_policy_holder(bus, POLICY_NAME,
++					   &policy_holder);
++	ASSERT_RETURN(ret == 0);
++
++	/* Try to register the same name with an activator */
++	ret = kdbus_register_same_activator(bus, POLICY_NAME,
++					    &activator);
++	ASSERT_RETURN(ret == 0);
++
++	/* Acquire POLICY_NAME */
++	ret = kdbus_name_acquire(conn_db[0], POLICY_NAME, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_normal_test(bus, POLICY_NAME, conn_db);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_list(conn_db[0], KDBUS_LIST_NAMES |
++				     KDBUS_LIST_UNIQUE |
++				     KDBUS_LIST_ACTIVATORS |
++				     KDBUS_LIST_QUEUED);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_fork_test(bus, POLICY_NAME, conn_db, EXIT_SUCCESS);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * children connections are able to talk to conn_db[0] since
++	 * current POLICY_NAME TALK type is KDBUS_POLICY_ACCESS_WORLD,
++	 * so expect EXIT_SUCCESS when sending from child. However,
++	 * since the child's connection does not own any well-known
++	 * name, The parent connection conn_db[0] should fail with
++	 * -EPERM but since it is a privileged bus user the TALK is
++	 *  allowed.
++	 */
++	ret = kdbus_fork_test_by_id(bus, conn_db,
++				    EXIT_SUCCESS, EXIT_SUCCESS);
++	ASSERT_EXIT(ret == 0);
++
++	/*
++	 * Connections that can talk are perhaps being destroyed now.
++	 * Restrict the policy and purge cache entries where the
++	 * conn_db[0] is the destination.
++	 *
++	 * Now only connections with uid == 0 are allowed to talk.
++	 */
++	ret = kdbus_set_policy_talk(policy_holder, POLICY_NAME,
++				    geteuid(), KDBUS_POLICY_ACCESS_USER);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Testing connections (FORK+DROP) again:
++	 * After setting the policy re-check connections
++	 * we expect the children to fail with -EPERM
++	 */
++	ret = kdbus_fork_test(bus, POLICY_NAME, conn_db, -EPERM);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Now expect that both parent and child to fail.
++	 *
++	 * Child should fail with -EPERM since we just restricted
++	 * the POLICY_NAME TALK to uid 0 and its uid is 65534.
++	 *
++	 * Since the parent's connection will timeout when receiving
++	 * from the child, we never continue. FWIW just put -EPERM.
++	 */
++	ret = kdbus_fork_test_by_id(bus, conn_db, -EPERM, -EPERM);
++	ASSERT_EXIT(ret == 0);
++
++	/* Check if the name can be reached in a new userns */
++	ret = kdbus_clone_userns_test(bus, POLICY_NAME, conn_db, -EPERM);
++	ASSERT_RETURN(ret == 0);
++
++	for (i = 0; i < MAX_CONN; i++)
++		kdbus_conn_free(conn_db[i]);
++
++	kdbus_conn_free(activator);
++	kdbus_conn_free(policy_holder);
++
++	free(conn_db);
++
++	return ret;
++}
+diff --git a/tools/testing/selftests/kdbus/test-policy-priv.c b/tools/testing/selftests/kdbus/test-policy-priv.c
+new file mode 100644
+index 0000000..a318ccc
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-policy-priv.c
+@@ -0,0 +1,1269 @@
++#include <errno.h>
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stdint.h>
++#include <stdbool.h>
++#include <unistd.h>
++#include <time.h>
++#include <sys/capability.h>
++#include <sys/eventfd.h>
++#include <sys/wait.h>
++
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++static int test_policy_priv_by_id(const char *bus,
++				  struct kdbus_conn *conn_dst,
++				  bool drop_second_user,
++				  int parent_status,
++				  int child_status)
++{
++	int ret = 0;
++	uint64_t expected_cookie = time(NULL) ^ 0xdeadbeef;
++
++	ASSERT_RETURN(conn_dst);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, bus, ({
++		ret = kdbus_msg_send(unpriv, NULL,
++				     expected_cookie, 0, 0, 0,
++				     conn_dst->id);
++		ASSERT_EXIT(ret == child_status);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	ret = kdbus_msg_recv_poll(conn_dst, 300, NULL, NULL);
++	ASSERT_RETURN(ret == parent_status);
++
++	return 0;
++}
++
++static int test_policy_priv_by_broadcast(const char *bus,
++					 struct kdbus_conn *conn_dst,
++					 int drop_second_user,
++					 int parent_status,
++					 int child_status)
++{
++	int efd;
++	int ret = 0;
++	eventfd_t event_status = 0;
++	struct kdbus_msg *msg = NULL;
++	uid_t second_uid = UNPRIV_UID;
++	gid_t second_gid = UNPRIV_GID;
++	struct kdbus_conn *child_2 = conn_dst;
++	uint64_t expected_cookie = time(NULL) ^ 0xdeadbeef;
++
++	/* Drop to another unprivileged user other than UNPRIV_UID */
++	if (drop_second_user == DROP_OTHER_UNPRIV) {
++		second_uid = UNPRIV_UID - 1;
++		second_gid = UNPRIV_GID - 1;
++	}
++
++	/* child will signal parent to send broadcast */
++	efd = eventfd(0, EFD_CLOEXEC);
++	ASSERT_RETURN_VAL(efd >= 0, efd);
++
++	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_GID, ({
++		struct kdbus_conn *child;
++
++		child = kdbus_hello(bus, 0, NULL, 0);
++		ASSERT_EXIT(child);
++
++		ret = kdbus_add_match_empty(child);
++		ASSERT_EXIT(ret == 0);
++
++		/* signal parent */
++		ret = eventfd_write(efd, 1);
++		ASSERT_EXIT(ret == 0);
++
++		/* Use a little bit high time */
++		ret = kdbus_msg_recv_poll(child, 500, &msg, NULL);
++		ASSERT_EXIT(ret == child_status);
++
++		/*
++		 * If we expect the child to get the broadcast
++		 * message, then check the received cookie.
++		 */
++		if (ret == 0) {
++			ASSERT_EXIT(expected_cookie == msg->cookie);
++		}
++
++		/* Use expected_cookie since 'msg' might be NULL */
++		ret = kdbus_msg_send(child, NULL, expected_cookie + 1,
++				     0, 0, 0, KDBUS_DST_ID_BROADCAST);
++		ASSERT_EXIT(ret == 0);
++
++		kdbus_msg_free(msg);
++		kdbus_conn_free(child);
++	}),
++	({
++		if (drop_second_user == DO_NOT_DROP) {
++			ASSERT_RETURN(child_2);
++
++			ret = eventfd_read(efd, &event_status);
++			ASSERT_RETURN(ret >= 0 && event_status == 1);
++
++			ret = kdbus_msg_send(child_2, NULL,
++					     expected_cookie, 0, 0, 0,
++					     KDBUS_DST_ID_BROADCAST);
++			ASSERT_RETURN(ret == 0);
++
++			/* Use a little bit high time */
++			ret = kdbus_msg_recv_poll(child_2, 1000,
++						  &msg, NULL);
++			ASSERT_RETURN(ret == parent_status);
++
++			/*
++			 * Check returned cookie in case we expect
++			 * success.
++			 */
++			if (ret == 0) {
++				ASSERT_RETURN(msg->cookie ==
++					      expected_cookie + 1);
++			}
++
++			kdbus_msg_free(msg);
++		} else {
++			/*
++			 * Two unprivileged users will try to
++			 * communicate using broadcast.
++			 */
++			ret = RUN_UNPRIVILEGED(second_uid, second_gid, ({
++				child_2 = kdbus_hello(bus, 0, NULL, 0);
++				ASSERT_EXIT(child_2);
++
++				ret = kdbus_add_match_empty(child_2);
++				ASSERT_EXIT(ret == 0);
++
++				ret = eventfd_read(efd, &event_status);
++				ASSERT_EXIT(ret >= 0 && event_status == 1);
++
++				ret = kdbus_msg_send(child_2, NULL,
++						expected_cookie, 0, 0, 0,
++						KDBUS_DST_ID_BROADCAST);
++				ASSERT_EXIT(ret == 0);
++
++				/* Use a little bit high time */
++				ret = kdbus_msg_recv_poll(child_2, 1000,
++							  &msg, NULL);
++				ASSERT_EXIT(ret == parent_status);
++
++				/*
++				 * Check returned cookie in case we expect
++				 * success.
++				 */
++				if (ret == 0) {
++					ASSERT_EXIT(msg->cookie ==
++						    expected_cookie + 1);
++				}
++
++				kdbus_msg_free(msg);
++				kdbus_conn_free(child_2);
++			}),
++			({ 0; }));
++			ASSERT_RETURN(ret == 0);
++		}
++	}));
++	ASSERT_RETURN(ret == 0);
++
++	close(efd);
++
++	return ret;
++}
++
++static void nosig(int sig)
++{
++}
++
++static int test_priv_before_policy_upload(struct kdbus_test_env *env)
++{
++	int ret = 0;
++	struct kdbus_conn *conn;
++
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	/*
++	 * Make sure unprivileged bus user cannot acquire names
++	 * before registring any policy holder.
++	 */
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
++		ASSERT_EXIT(ret < 0);
++	}));
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Make sure unprivileged bus users cannot talk by default
++	 * to privileged ones, unless a policy holder that allows
++	 * this was uploaded.
++	 */
++
++	ret = test_policy_priv_by_id(env->buspath, conn, false,
++				     -ETIMEDOUT, -EPERM);
++	ASSERT_RETURN(ret == 0);
++
++	/* Activate matching for a privileged connection */
++	ret = kdbus_add_match_empty(conn);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * First make sure that BROADCAST with msg flag
++	 * KDBUS_MSG_EXPECT_REPLY will fail with -ENOTUNIQ
++	 */
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_msg_send(unpriv, NULL, 0xdeadbeef,
++				     KDBUS_MSG_EXPECT_REPLY,
++				     5000000000ULL, 0,
++				     KDBUS_DST_ID_BROADCAST);
++		ASSERT_EXIT(ret == -ENOTUNIQ);
++	}));
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Test broadcast with a privileged connection.
++	 *
++	 * The first unprivileged receiver should not get the
++	 * broadcast message sent by the privileged connection,
++	 * since there is no a TALK policy that allows the
++	 * unprivileged to TALK to the privileged connection. It
++	 * will fail with -ETIMEDOUT
++	 *
++	 * Then second case:
++	 * The privileged connection should get the broadcast
++	 * message from the unprivileged one. Since the receiver is
++	 * a privileged bus user and it has default TALK access to
++	 * all connections it will receive those.
++	 */
++
++	ret = test_policy_priv_by_broadcast(env->buspath, conn,
++					    DO_NOT_DROP,
++					    0, -ETIMEDOUT);
++	ASSERT_RETURN(ret == 0);
++
++
++	/*
++	 * Test broadcast with two unprivileged connections running
++	 * under the same user.
++	 *
++	 * Both connections should succeed.
++	 */
++
++	ret = test_policy_priv_by_broadcast(env->buspath, NULL,
++					    DROP_SAME_UNPRIV, 0, 0);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Test broadcast with two unprivileged connections running
++	 * under different users.
++	 *
++	 * Both connections will fail with -ETIMEDOUT.
++	 */
++
++	ret = test_policy_priv_by_broadcast(env->buspath, NULL,
++					    DROP_OTHER_UNPRIV,
++					    -ETIMEDOUT, -ETIMEDOUT);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(conn);
++
++	return ret;
++}
++
++static int test_broadcast_after_policy_upload(struct kdbus_test_env *env)
++{
++	int ret;
++	int efd;
++	eventfd_t event_status = 0;
++	struct kdbus_msg *msg = NULL;
++	struct kdbus_conn *owner_a, *owner_b;
++	struct kdbus_conn *holder_a, *holder_b;
++	struct kdbus_policy_access access = {};
++	uint64_t expected_cookie = time(NULL) ^ 0xdeadbeef;
++
++	owner_a = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(owner_a);
++
++	ret = kdbus_name_acquire(owner_a, "com.example.broadcastA", NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	/*
++	 * Make sure unprivileged bus users cannot talk by default
++	 * to privileged ones, unless a policy holder that allows
++	 * this was uploaded.
++	 */
++
++	++expected_cookie;
++	ret = test_policy_priv_by_id(env->buspath, owner_a, false,
++				     -ETIMEDOUT, -EPERM);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Make sure that privileged won't receive broadcasts unless
++	 * it installs a match. It will fail with -ETIMEDOUT
++	 *
++	 * At same time check that the unprivileged connection will
++	 * not receive the broadcast message from the privileged one
++	 * since the privileged one owns a name with a restricted
++	 * policy TALK (actually the TALK policy is still not
++	 * registered so we fail by default), thus the unprivileged
++	 * receiver is not able to TALK to that name.
++	 */
++
++	ret = test_policy_priv_by_broadcast(env->buspath, owner_a,
++					    DO_NOT_DROP,
++					    -ETIMEDOUT, -ETIMEDOUT);
++	ASSERT_RETURN(ret == 0);
++
++	/* Activate matching for a privileged connection */
++	ret = kdbus_add_match_empty(owner_a);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Redo the previous test. The privileged conn owner_a is
++	 * able to TALK to any connection so it will receive the
++	 * broadcast message now.
++	 */
++
++	ret = test_policy_priv_by_broadcast(env->buspath, owner_a,
++					    DO_NOT_DROP,
++					    0, -ETIMEDOUT);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Test that broadcast between two unprivileged users running
++	 * under the same user still succeed.
++	 */
++
++	ret = test_policy_priv_by_broadcast(env->buspath, NULL,
++					    DROP_SAME_UNPRIV, 0, 0);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Test broadcast with two unprivileged connections running
++	 * under different users.
++	 *
++	 * Both connections will fail with -ETIMEDOUT.
++	 */
++
++	ret = test_policy_priv_by_broadcast(env->buspath, NULL,
++					    DROP_OTHER_UNPRIV,
++					    -ETIMEDOUT, -ETIMEDOUT);
++	ASSERT_RETURN(ret == 0);
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_USER,
++		.id = geteuid(),
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	holder_a = kdbus_hello_registrar(env->buspath,
++					 "com.example.broadcastA",
++					 &access, 1,
++					 KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(holder_a);
++
++	holder_b = kdbus_hello_registrar(env->buspath,
++					 "com.example.broadcastB",
++					 &access, 1,
++					 KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(holder_b);
++
++	/* Free connections and their received messages and restart */
++	kdbus_conn_free(owner_a);
++
++	owner_a = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(owner_a);
++
++	/* Activate matching for a privileged connection */
++	ret = kdbus_add_match_empty(owner_a);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_name_acquire(owner_a, "com.example.broadcastA", NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	owner_b = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(owner_b);
++
++	ret = kdbus_name_acquire(owner_b, "com.example.broadcastB", NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	/* Activate matching for a privileged connection */
++	ret = kdbus_add_match_empty(owner_b);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Test that even if "com.example.broadcastA" and
++	 * "com.example.broadcastB" do have a TALK access by default
++	 * they are able to signal each other using broadcast due to
++	 * the fact they are privileged connections, they receive
++	 * all broadcasts if the match allows it.
++	 */
++
++	++expected_cookie;
++	ret = kdbus_msg_send(owner_a, NULL, expected_cookie, 0,
++			     0, 0, KDBUS_DST_ID_BROADCAST);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv_poll(owner_b, 100, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++	ASSERT_RETURN(msg->cookie == expected_cookie);
++
++	/* Check src ID */
++	ASSERT_RETURN(msg->src_id == owner_a->id);
++
++	kdbus_msg_free(msg);
++
++	/* Release name "com.example.broadcastB" */
++
++	ret = kdbus_name_release(owner_b, "com.example.broadcastB");
++	ASSERT_EXIT(ret >= 0);
++
++	/* KDBUS_POLICY_OWN for unprivileged connections */
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_WORLD,
++		.id = geteuid(),
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	/* Update the policy so unprivileged will own the name */
++
++	ret = kdbus_conn_update_policy(holder_b,
++				       "com.example.broadcastB",
++				       &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Send broadcasts from an unprivileged connection that
++	 * owns a name "com.example.broadcastB".
++	 *
++	 * We'll have four destinations here:
++	 *
++	 * 1) destination owner_a: privileged connection that owns
++	 * "com.example.broadcastA". It will receive the broadcast
++	 * since it is a privileged has default TALK access to all
++	 * connections, and it is subscribed to the match.
++	 * Will succeed.
++	 *
++	 * owner_b: privileged connection (running under a different
++	 * uid) that do not own names, but with an empty broadcast
++	 * match, so it will receive broadcasts since it has default
++	 * TALK access to all connection.
++	 *
++	 * unpriv_a: unpriv connection that do not own any name.
++	 * It will receive the broadcast since it is running under
++	 * the same user of the one broadcasting and did install
++	 * matches. It should get the message.
++	 *
++	 * unpriv_b: unpriv connection is not interested in broadcast
++	 * messages, so it did not install broadcast matches. Should
++	 * fail with -ETIMEDOUT
++	 */
++
++	++expected_cookie;
++	efd = eventfd(0, EFD_CLOEXEC);
++	ASSERT_RETURN_VAL(efd >= 0, efd);
++
++	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_UID, ({
++		struct kdbus_conn *unpriv_owner;
++		struct kdbus_conn *unpriv_a, *unpriv_b;
++
++		unpriv_owner = kdbus_hello(env->buspath, 0, NULL, 0);
++		ASSERT_EXIT(unpriv_owner);
++
++		unpriv_a = kdbus_hello(env->buspath, 0, NULL, 0);
++		ASSERT_EXIT(unpriv_a);
++
++		unpriv_b = kdbus_hello(env->buspath, 0, NULL, 0);
++		ASSERT_EXIT(unpriv_b);
++
++		ret = kdbus_name_acquire(unpriv_owner,
++					 "com.example.broadcastB",
++					 NULL);
++		ASSERT_EXIT(ret >= 0);
++
++		ret = kdbus_add_match_empty(unpriv_a);
++		ASSERT_EXIT(ret == 0);
++
++		/* Signal that we are doing broadcasts */
++		ret = eventfd_write(efd, 1);
++		ASSERT_EXIT(ret == 0);
++
++		/*
++		 * Do broadcast from a connection that owns the
++		 * names "com.example.broadcastB".
++		 */
++		ret = kdbus_msg_send(unpriv_owner, NULL,
++				     expected_cookie,
++				     0, 0, 0,
++				     KDBUS_DST_ID_BROADCAST);
++		ASSERT_EXIT(ret == 0);
++
++		/*
++		 * Unprivileged connection running under the same
++		 * user. It should succeed.
++		 */
++		ret = kdbus_msg_recv_poll(unpriv_a, 300, &msg, NULL);
++		ASSERT_EXIT(ret == 0 && msg->cookie == expected_cookie);
++
++		/*
++		 * Did not install matches, not interested in
++		 * broadcasts
++		 */
++		ret = kdbus_msg_recv_poll(unpriv_b, 300, NULL, NULL);
++		ASSERT_EXIT(ret == -ETIMEDOUT);
++	}),
++	({
++		ret = eventfd_read(efd, &event_status);
++		ASSERT_RETURN(ret >= 0 && event_status == 1);
++
++		/*
++		 * owner_a must fail with -ETIMEDOUT, since it owns
++		 * name "com.example.broadcastA" and its TALK
++		 * access is restriced.
++		 */
++		ret = kdbus_msg_recv_poll(owner_a, 300, &msg, NULL);
++		ASSERT_RETURN(ret == 0);
++
++		/* confirm the received cookie */
++		ASSERT_RETURN(msg->cookie == expected_cookie);
++
++		kdbus_msg_free(msg);
++
++		/*
++		 * owner_b got the broadcast from an unprivileged
++		 * connection.
++		 */
++		ret = kdbus_msg_recv_poll(owner_b, 300, &msg, NULL);
++		ASSERT_RETURN(ret == 0);
++
++		/* confirm the received cookie */
++		ASSERT_RETURN(msg->cookie == expected_cookie);
++
++		kdbus_msg_free(msg);
++
++	}));
++	ASSERT_RETURN(ret == 0);
++
++	close(efd);
++
++	/*
++	 * Test broadcast with two unprivileged connections running
++	 * under different users.
++	 *
++	 * Both connections will fail with -ETIMEDOUT.
++	 */
++
++	ret = test_policy_priv_by_broadcast(env->buspath, NULL,
++					    DROP_OTHER_UNPRIV,
++					    -ETIMEDOUT, -ETIMEDOUT);
++	ASSERT_RETURN(ret == 0);
++
++	/* Drop received broadcasts by privileged */
++	ret = kdbus_msg_recv_poll(owner_a, 100, NULL, NULL);
++	ret = kdbus_msg_recv_poll(owner_a, 100, NULL, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(owner_a, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	ret = kdbus_msg_recv_poll(owner_b, 100, NULL, NULL);
++	ret = kdbus_msg_recv_poll(owner_b, 100, NULL, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_msg_recv(owner_b, NULL, NULL);
++	ASSERT_RETURN(ret == -EAGAIN);
++
++	/*
++	 * Perform last tests, allow others to talk to name
++	 * "com.example.broadcastA". So now receiving broadcasts
++	 * from it should succeed since the TALK policy allow it.
++	 */
++
++	/* KDBUS_POLICY_OWN for unprivileged connections */
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_WORLD,
++		.id = geteuid(),
++		.access = KDBUS_POLICY_TALK,
++	};
++
++	ret = kdbus_conn_update_policy(holder_a,
++				       "com.example.broadcastA",
++				       &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Unprivileged is able to TALK to "com.example.broadcastA"
++	 * now so it will receive its broadcasts
++	 */
++	ret = test_policy_priv_by_broadcast(env->buspath, owner_a,
++					    DO_NOT_DROP, 0, 0);
++	ASSERT_RETURN(ret == 0);
++
++	++expected_cookie;
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.broadcastB",
++					 NULL);
++		ASSERT_EXIT(ret >= 0);
++		ret = kdbus_msg_send(unpriv, NULL, expected_cookie,
++				     0, 0, 0, KDBUS_DST_ID_BROADCAST);
++		ASSERT_EXIT(ret == 0);
++	}));
++	ASSERT_RETURN(ret == 0);
++
++	/* owner_a is privileged it will get the broadcast now. */
++	ret = kdbus_msg_recv_poll(owner_a, 300, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* confirm the received cookie */
++	ASSERT_RETURN(msg->cookie == expected_cookie);
++
++	kdbus_msg_free(msg);
++
++	/*
++	 * owner_a released name "com.example.broadcastA". It should
++	 * receive broadcasts since it is still privileged and has
++	 * the right match.
++	 *
++	 * Unprivileged connection will own a name and will try to
++	 * signal to the privileged connection.
++	 */
++
++	ret = kdbus_name_release(owner_a, "com.example.broadcastA");
++	ASSERT_EXIT(ret >= 0);
++
++	++expected_cookie;
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.broadcastB",
++					 NULL);
++		ASSERT_EXIT(ret >= 0);
++		ret = kdbus_msg_send(unpriv, NULL, expected_cookie,
++				     0, 0, 0, KDBUS_DST_ID_BROADCAST);
++		ASSERT_EXIT(ret == 0);
++	}));
++	ASSERT_RETURN(ret == 0);
++
++	/* owner_a will get the broadcast now. */
++	ret = kdbus_msg_recv_poll(owner_a, 300, &msg, NULL);
++	ASSERT_RETURN(ret == 0);
++
++	/* confirm the received cookie */
++	ASSERT_RETURN(msg->cookie == expected_cookie);
++
++	kdbus_msg_free(msg);
++
++	kdbus_conn_free(owner_a);
++	kdbus_conn_free(owner_b);
++	kdbus_conn_free(holder_a);
++	kdbus_conn_free(holder_b);
++
++	return 0;
++}
++
++static int test_policy_priv(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn_a, *conn_b, *conn, *owner;
++	struct kdbus_policy_access access, *acc;
++	sigset_t sset;
++	size_t num;
++	int ret;
++
++	/*
++	 * Make sure we have CAP_SETUID/SETGID so we can drop privileges
++	 */
++
++	ret = test_is_capable(CAP_SETUID, CAP_SETGID, -1);
++	ASSERT_RETURN(ret >= 0);
++
++	if (!ret)
++		return TEST_SKIP;
++
++	/* make sure that uids and gids are mapped */
++	if (!all_uids_gids_are_mapped())
++		return TEST_SKIP;
++
++	/*
++	 * Setup:
++	 *  conn_a: policy holder for com.example.a
++	 *  conn_b: name holder of com.example.b
++	 */
++
++	signal(SIGUSR1, nosig);
++	sigemptyset(&sset);
++	sigaddset(&sset, SIGUSR1);
++	sigprocmask(SIG_BLOCK, &sset, NULL);
++
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	/*
++	 * Before registering any policy holder, make sure that the
++	 * bus is secure by default. This test is necessary, it catches
++	 * several cases where old D-Bus was vulnerable.
++	 */
++
++	ret = test_priv_before_policy_upload(env);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Make sure unprivileged are not able to register policy
++	 * holders
++	 */
++
++	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_GID, ({
++		struct kdbus_conn *holder;
++
++		holder = kdbus_hello_registrar(env->buspath,
++					       "com.example.a", NULL, 0,
++					       KDBUS_HELLO_POLICY_HOLDER);
++		ASSERT_EXIT(holder == NULL && errno == EPERM);
++	}),
++	({ 0; }));
++	ASSERT_RETURN(ret == 0);
++
++
++	/* Register policy holder */
++
++	conn_a = kdbus_hello_registrar(env->buspath, "com.example.a",
++				       NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(conn_a);
++
++	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn_b);
++
++	ret = kdbus_name_acquire(conn_b, "com.example.b", NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	/*
++	 * Make sure bus-owners can always acquire names.
++	 */
++	ret = kdbus_name_acquire(conn, "com.example.a", NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	kdbus_conn_free(conn);
++
++	/*
++	 * Make sure unprivileged users cannot acquire names with default
++	 * policy assigned.
++	 */
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
++		ASSERT_EXIT(ret < 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Make sure unprivileged users can acquire names if we make them
++	 * world-accessible.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_WORLD,
++		.id = 0,
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	/*
++	 * Make sure unprivileged/normal connections are not able
++	 * to update policies
++	 */
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_conn_update_policy(unpriv, "com.example.a",
++					       &access, 1);
++		ASSERT_EXIT(ret == -EOPNOTSUPP);
++	}));
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
++		ASSERT_EXIT(ret >= 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Make sure unprivileged users can acquire names if we make them
++	 * gid-accessible. But only if the gid matches.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_GROUP,
++		.id = UNPRIV_GID,
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
++		ASSERT_EXIT(ret >= 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_GROUP,
++		.id = 1,
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
++		ASSERT_EXIT(ret < 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Make sure unprivileged users can acquire names if we make them
++	 * uid-accessible. But only if the uid matches.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_USER,
++		.id = UNPRIV_UID,
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
++		ASSERT_EXIT(ret >= 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_USER,
++		.id = 1,
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
++		ASSERT_EXIT(ret < 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Make sure unprivileged users cannot acquire names if no owner-policy
++	 * matches, even if SEE/TALK policies match.
++	 */
++
++	num = 4;
++	acc = (struct kdbus_policy_access[]){
++		{
++			.type = KDBUS_POLICY_ACCESS_GROUP,
++			.id = UNPRIV_GID,
++			.access = KDBUS_POLICY_SEE,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = UNPRIV_UID,
++			.access = KDBUS_POLICY_TALK,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_WORLD,
++			.id = 0,
++			.access = KDBUS_POLICY_TALK,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_WORLD,
++			.id = 0,
++			.access = KDBUS_POLICY_SEE,
++		},
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.a", acc, num);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
++		ASSERT_EXIT(ret < 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Make sure unprivileged users can acquire names if the only matching
++	 * policy is somewhere in the middle.
++	 */
++
++	num = 5;
++	acc = (struct kdbus_policy_access[]){
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = 1,
++			.access = KDBUS_POLICY_OWN,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = 2,
++			.access = KDBUS_POLICY_OWN,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = UNPRIV_UID,
++			.access = KDBUS_POLICY_OWN,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = 3,
++			.access = KDBUS_POLICY_OWN,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = 4,
++			.access = KDBUS_POLICY_OWN,
++		},
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.a", acc, num);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_name_acquire(unpriv, "com.example.a", NULL);
++		ASSERT_EXIT(ret >= 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Clear policies
++	 */
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.a", NULL, 0);
++	ASSERT_RETURN(ret == 0);
++
++	/*
++	 * Make sure privileged bus users can _always_ talk to others.
++	 */
++
++	conn = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn);
++
++	ret = kdbus_msg_send(conn, "com.example.b", 0xdeadbeef, 0, 0, 0, 0);
++	ASSERT_EXIT(ret >= 0);
++
++	ret = kdbus_msg_recv_poll(conn_b, 300, NULL, NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	kdbus_conn_free(conn);
++
++	/*
++	 * Make sure unprivileged bus users cannot talk by default.
++	 */
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret == -EPERM);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Make sure unprivileged bus users can talk to equals, even without
++	 * policy.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_USER,
++		.id = UNPRIV_UID,
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.c", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		struct kdbus_conn *owner;
++
++		owner = kdbus_hello(env->buspath, 0, NULL, 0);
++		ASSERT_RETURN(owner);
++
++		ret = kdbus_name_acquire(owner, "com.example.c", NULL);
++		ASSERT_EXIT(ret >= 0);
++
++		ret = kdbus_msg_send(unpriv, "com.example.c", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret >= 0);
++		ret = kdbus_msg_recv_poll(owner, 100, NULL, NULL);
++		ASSERT_EXIT(ret >= 0);
++
++		kdbus_conn_free(owner);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Make sure unprivileged bus users can talk to privileged users if a
++	 * suitable UID policy is set.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_USER,
++		.id = UNPRIV_UID,
++		.access = KDBUS_POLICY_TALK,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.b", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret >= 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	ret = kdbus_msg_recv_poll(conn_b, 100, NULL, NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	/*
++	 * Make sure unprivileged bus users can talk to privileged users if a
++	 * suitable GID policy is set.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_GROUP,
++		.id = UNPRIV_GID,
++		.access = KDBUS_POLICY_TALK,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.b", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret >= 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	ret = kdbus_msg_recv_poll(conn_b, 100, NULL, NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	/*
++	 * Make sure unprivileged bus users can talk to privileged users if a
++	 * suitable WORLD policy is set.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_WORLD,
++		.id = 0,
++		.access = KDBUS_POLICY_TALK,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.b", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret >= 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	ret = kdbus_msg_recv_poll(conn_b, 100, NULL, NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	/*
++	 * Make sure unprivileged bus users cannot talk to privileged users if
++	 * no suitable policy is set.
++	 */
++
++	num = 5;
++	acc = (struct kdbus_policy_access[]){
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = 0,
++			.access = KDBUS_POLICY_OWN,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = 1,
++			.access = KDBUS_POLICY_TALK,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = UNPRIV_UID,
++			.access = KDBUS_POLICY_SEE,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = 3,
++			.access = KDBUS_POLICY_TALK,
++		},
++		{
++			.type = KDBUS_POLICY_ACCESS_USER,
++			.id = 4,
++			.access = KDBUS_POLICY_TALK,
++		},
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.b", acc, num);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret == -EPERM);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Make sure unprivileged bus users can talk to privileged users if a
++	 * suitable OWN privilege overwrites TALK.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_WORLD,
++		.id = 0,
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.b", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret >= 0);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	ret = kdbus_msg_recv_poll(conn_b, 100, NULL, NULL);
++	ASSERT_EXIT(ret >= 0);
++
++	/*
++	 * Make sure the TALK cache is reset correctly when policies are
++	 * updated.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_WORLD,
++		.id = 0,
++		.access = KDBUS_POLICY_TALK,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.b", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = RUN_UNPRIVILEGED_CONN(unpriv, env->buspath, ({
++		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret >= 0);
++
++		ret = kdbus_msg_recv_poll(conn_b, 100, NULL, NULL);
++		ASSERT_EXIT(ret >= 0);
++
++		ret = kdbus_conn_update_policy(conn_a, "com.example.b",
++					       NULL, 0);
++		ASSERT_RETURN(ret == 0);
++
++		ret = kdbus_msg_send(unpriv, "com.example.b", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret == -EPERM);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++	/*
++	 * Make sure the TALK cache is reset correctly when policy holders
++	 * disconnect.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_WORLD,
++		.id = 0,
++		.access = KDBUS_POLICY_OWN,
++	};
++
++	conn = kdbus_hello_registrar(env->buspath, "com.example.c",
++				     NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(conn);
++
++	ret = kdbus_conn_update_policy(conn, "com.example.c", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	owner = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(owner);
++
++	ret = kdbus_name_acquire(owner, "com.example.c", NULL);
++	ASSERT_RETURN(ret >= 0);
++
++	ret = RUN_UNPRIVILEGED(UNPRIV_UID, UNPRIV_GID, ({
++		struct kdbus_conn *unpriv;
++
++		/* wait for parent to be finished */
++		sigemptyset(&sset);
++		ret = sigsuspend(&sset);
++		ASSERT_RETURN(ret == -1 && errno == EINTR);
++
++		unpriv = kdbus_hello(env->buspath, 0, NULL, 0);
++		ASSERT_RETURN(unpriv);
++
++		ret = kdbus_msg_send(unpriv, "com.example.c", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret >= 0);
++
++		ret = kdbus_msg_recv_poll(owner, 100, NULL, NULL);
++		ASSERT_EXIT(ret >= 0);
++
++		/* free policy holder */
++		kdbus_conn_free(conn);
++
++		ret = kdbus_msg_send(unpriv, "com.example.c", 0xdeadbeef, 0, 0,
++				     0, 0);
++		ASSERT_EXIT(ret == -EPERM);
++
++		kdbus_conn_free(unpriv);
++	}), ({
++		/* make sure policy holder is only valid in child */
++		kdbus_conn_free(conn);
++		kill(pid, SIGUSR1);
++	}));
++	ASSERT_RETURN(ret >= 0);
++
++
++	/*
++	 * The following tests are necessary.
++	 */
++
++	ret = test_broadcast_after_policy_upload(env);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_conn_free(owner);
++
++	/*
++	 * cleanup resources
++	 */
++
++	kdbus_conn_free(conn_b);
++	kdbus_conn_free(conn_a);
++
++	return TEST_OK;
++}
++
++int kdbus_test_policy_priv(struct kdbus_test_env *env)
++{
++	pid_t pid;
++	int ret;
++
++	/* make sure to exit() if a child returns from fork() */
++	pid = getpid();
++	ret = test_policy_priv(env);
++	if (pid != getpid())
++		exit(1);
++
++	return ret;
++}
+diff --git a/tools/testing/selftests/kdbus/test-policy.c b/tools/testing/selftests/kdbus/test-policy.c
+new file mode 100644
+index 0000000..96d20d5
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-policy.c
+@@ -0,0 +1,80 @@
++#include <errno.h>
++#include <stdio.h>
++#include <string.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stdint.h>
++#include <stdbool.h>
++#include <unistd.h>
++
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++int kdbus_test_policy(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn_a, *conn_b;
++	struct kdbus_policy_access access;
++	int ret;
++
++	/* Invalid name */
++	conn_a = kdbus_hello_registrar(env->buspath, ".example.a",
++				       NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(conn_a == NULL);
++
++	conn_a = kdbus_hello_registrar(env->buspath, "example",
++				       NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(conn_a == NULL);
++
++	conn_a = kdbus_hello_registrar(env->buspath, "com.example.a",
++				       NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(conn_a);
++
++	conn_b = kdbus_hello_registrar(env->buspath, "com.example.b",
++				       NULL, 0, KDBUS_HELLO_POLICY_HOLDER);
++	ASSERT_RETURN(conn_b);
++
++	/*
++	 * Verify there cannot be any duplicate entries, except for specific vs.
++	 * wildcard entries.
++	 */
++
++	access = (struct kdbus_policy_access){
++		.type = KDBUS_POLICY_ACCESS_USER,
++		.id = geteuid(),
++		.access = KDBUS_POLICY_SEE,
++	};
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.a", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_conn_update_policy(conn_b, "com.example.a", &access, 1);
++	ASSERT_RETURN(ret == -EEXIST);
++
++	ret = kdbus_conn_update_policy(conn_b, "com.example.a.*", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.a.*", &access, 1);
++	ASSERT_RETURN(ret == -EEXIST);
++
++	ret = kdbus_conn_update_policy(conn_a, "com.example.*", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_conn_update_policy(conn_b, "com.example.a", &access, 1);
++	ASSERT_RETURN(ret == 0);
++
++	ret = kdbus_conn_update_policy(conn_b, "com.example.*", &access, 1);
++	ASSERT_RETURN(ret == -EEXIST);
++
++	/* Invalid name */
++	ret = kdbus_conn_update_policy(conn_b, ".example.*", &access, 1);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	ret = kdbus_conn_update_policy(conn_b, "example", &access, 1);
++	ASSERT_RETURN(ret == -EINVAL);
++
++	kdbus_conn_free(conn_b);
++	kdbus_conn_free(conn_a);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-sync.c b/tools/testing/selftests/kdbus/test-sync.c
+new file mode 100644
+index 0000000..e2be910
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-sync.c
+@@ -0,0 +1,369 @@
++#include <stdio.h>
++#include <string.h>
++#include <time.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <pthread.h>
++#include <stdbool.h>
++#include <signal.h>
++#include <sys/wait.h>
++#include <sys/eventfd.h>
++
++#include "kdbus-api.h"
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++static struct kdbus_conn *conn_a, *conn_b;
++static unsigned int cookie = 0xdeadbeef;
++
++static void nop_handler(int sig) {}
++
++static int interrupt_sync(struct kdbus_conn *conn_src,
++			  struct kdbus_conn *conn_dst)
++{
++	pid_t pid;
++	int ret, status;
++	struct kdbus_msg *msg = NULL;
++	struct sigaction sa = {
++		.sa_handler = nop_handler,
++		.sa_flags = SA_NOCLDSTOP|SA_RESTART,
++	};
++
++	cookie++;
++	pid = fork();
++	ASSERT_RETURN_VAL(pid >= 0, pid);
++
++	if (pid == 0) {
++		ret = sigaction(SIGINT, &sa, NULL);
++		ASSERT_EXIT(ret == 0);
++
++		ret = kdbus_msg_send_sync(conn_dst, NULL, cookie,
++					  KDBUS_MSG_EXPECT_REPLY,
++					  100000000ULL, 0, conn_src->id, -1);
++		ASSERT_EXIT(ret == -ETIMEDOUT);
++
++		_exit(EXIT_SUCCESS);
++	}
++
++	ret = kdbus_msg_recv_poll(conn_src, 100, &msg, NULL);
++	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
++
++	kdbus_msg_free(msg);
++
++	ret = kill(pid, SIGINT);
++	ASSERT_RETURN_VAL(ret == 0, ret);
++
++	ret = waitpid(pid, &status, 0);
++	ASSERT_RETURN_VAL(ret >= 0, ret);
++
++	if (WIFSIGNALED(status))
++		return TEST_ERR;
++
++	ret = kdbus_msg_recv_poll(conn_src, 100, NULL, NULL);
++	ASSERT_RETURN(ret == -ETIMEDOUT);
++
++	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
++}
++
++static int close_epipe_sync(const char *bus)
++{
++	pid_t pid;
++	int ret, status;
++	struct kdbus_conn *conn_src;
++	struct kdbus_conn *conn_dst;
++	struct kdbus_msg *msg = NULL;
++
++	conn_src = kdbus_hello(bus, 0, NULL, 0);
++	ASSERT_RETURN(conn_src);
++
++	ret = kdbus_add_match_empty(conn_src);
++	ASSERT_RETURN(ret == 0);
++
++	conn_dst = kdbus_hello(bus, 0, NULL, 0);
++	ASSERT_RETURN(conn_dst);
++
++	cookie++;
++	pid = fork();
++	ASSERT_RETURN_VAL(pid >= 0, pid);
++
++	if (pid == 0) {
++		uint64_t dst_id;
++
++		/* close our reference */
++		dst_id = conn_dst->id;
++		kdbus_conn_free(conn_dst);
++
++		ret = kdbus_msg_recv_poll(conn_src, 100, &msg, NULL);
++		ASSERT_EXIT(ret == 0 && msg->cookie == cookie);
++		ASSERT_EXIT(msg->src_id == dst_id);
++
++		cookie++;
++		ret = kdbus_msg_send_sync(conn_src, NULL, cookie,
++					  KDBUS_MSG_EXPECT_REPLY,
++					  100000000ULL, 0, dst_id, -1);
++		ASSERT_EXIT(ret == -EPIPE);
++
++		_exit(EXIT_SUCCESS);
++	}
++
++	ret = kdbus_msg_send(conn_dst, NULL, cookie, 0, 0, 0,
++			     KDBUS_DST_ID_BROADCAST);
++	ASSERT_RETURN(ret == 0);
++
++	cookie++;
++	ret = kdbus_msg_recv_poll(conn_dst, 100, &msg, NULL);
++	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
++
++	kdbus_msg_free(msg);
++
++	/* destroy connection */
++	kdbus_conn_free(conn_dst);
++	kdbus_conn_free(conn_src);
++
++	ret = waitpid(pid, &status, 0);
++	ASSERT_RETURN_VAL(ret >= 0, ret);
++
++	if (!WIFEXITED(status))
++		return TEST_ERR;
++
++	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
++}
++
++static int cancel_fd_sync(struct kdbus_conn *conn_src,
++			  struct kdbus_conn *conn_dst)
++{
++	pid_t pid;
++	int cancel_fd;
++	int ret, status;
++	uint64_t counter = 1;
++	struct kdbus_msg *msg = NULL;
++
++	cancel_fd = eventfd(0, 0);
++	ASSERT_RETURN_VAL(cancel_fd >= 0, cancel_fd);
++
++	cookie++;
++	pid = fork();
++	ASSERT_RETURN_VAL(pid >= 0, pid);
++
++	if (pid == 0) {
++		ret = kdbus_msg_send_sync(conn_dst, NULL, cookie,
++					  KDBUS_MSG_EXPECT_REPLY,
++					  100000000ULL, 0, conn_src->id,
++					  cancel_fd);
++		ASSERT_EXIT(ret == -ECANCELED);
++
++		_exit(EXIT_SUCCESS);
++	}
++
++	ret = kdbus_msg_recv_poll(conn_src, 100, &msg, NULL);
++	ASSERT_RETURN(ret == 0 && msg->cookie == cookie);
++
++	kdbus_msg_free(msg);
++
++	ret = write(cancel_fd, &counter, sizeof(counter));
++	ASSERT_RETURN(ret == sizeof(counter));
++
++	ret = waitpid(pid, &status, 0);
++	ASSERT_RETURN_VAL(ret >= 0, ret);
++
++	if (WIFSIGNALED(status))
++		return TEST_ERR;
++
++	return (status == EXIT_SUCCESS) ? TEST_OK : TEST_ERR;
++}
++
++static int no_cancel_sync(struct kdbus_conn *conn_src,
++			  struct kdbus_conn *conn_dst)
++{
++	pid_t pid;
++	int cancel_fd;
++	int ret, status;
++	struct kdbus_msg *msg = NULL;
++
++	/* pass eventfd, but never signal it so it shouldn't have any effect */
++
++	cancel_fd = eventfd(0, 0);
++	ASSERT_RETURN_VAL(cancel_fd >= 0, cancel_fd);
++
++	cookie++;
++	pid = fork();
++	ASSERT_RETURN_VAL(pid >= 0, pid);
++
++	if (pid == 0) {
++		ret = kdbus_msg_send_sync(conn_dst, NULL, cookie,
++					  KDBUS_MSG_EXPECT_REPLY,
++					  100000000ULL, 0, conn_src->id,
++					  cancel_fd);
++		ASSERT_EXIT(ret == 0);
++
++		_exit(EXIT_SUCCESS);
++	}
++
++	ret = kdbus_msg_recv_poll(conn_src, 100, &msg, NULL);
++	ASSERT_RETURN_VAL(ret == 0 && msg->cookie == cookie, -1);
++
++	kdbus_msg_free(msg);
++
++	ret = kdbus_msg_send_reply(conn_src, cookie, conn_dst->id);
++	ASSERT_RETURN_VAL(ret >= 0, ret);
++
++	ret = waitpid(pid, &status, 0);
++	ASSERT_RETURN_VAL(ret >= 0, ret);
++
++	if (WIFSIGNALED(status))
++		return -1;
++
++	return (status == EXIT_SUCCESS) ? 0 : -1;
++}
++
++static void *run_thread_reply(void *data)
++{
++	int ret;
++	unsigned long status = TEST_OK;
++
++	ret = kdbus_msg_recv_poll(conn_a, 3000, NULL, NULL);
++	if (ret < 0)
++		goto exit_thread;
++
++	kdbus_printf("Thread received message, sending reply ...\n");
++
++	/* using an unknown cookie must fail */
++	ret = kdbus_msg_send_reply(conn_a, ~cookie, conn_b->id);
++	if (ret != -EPERM) {
++		status = TEST_ERR;
++		goto exit_thread;
++	}
++
++	ret = kdbus_msg_send_reply(conn_a, cookie, conn_b->id);
++	if (ret != 0) {
++		status = TEST_ERR;
++		goto exit_thread;
++	}
++
++exit_thread:
++	pthread_exit(NULL);
++	return (void *) status;
++}
++
++int kdbus_test_sync_reply(struct kdbus_test_env *env)
++{
++	unsigned long status;
++	pthread_t thread;
++	int ret;
++
++	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
++	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn_a && conn_b);
++
++	pthread_create(&thread, NULL, run_thread_reply, NULL);
++
++	ret = kdbus_msg_send_sync(conn_b, NULL, cookie,
++				  KDBUS_MSG_EXPECT_REPLY,
++				  5000000000ULL, 0, conn_a->id, -1);
++
++	pthread_join(thread, (void *) &status);
++	ASSERT_RETURN(status == 0);
++	ASSERT_RETURN(ret == 0);
++
++	ret = interrupt_sync(conn_a, conn_b);
++	ASSERT_RETURN(ret == 0);
++
++	ret = close_epipe_sync(env->buspath);
++	ASSERT_RETURN(ret == 0);
++
++	ret = cancel_fd_sync(conn_a, conn_b);
++	ASSERT_RETURN(ret == 0);
++
++	ret = no_cancel_sync(conn_a, conn_b);
++	ASSERT_RETURN(ret == 0);
++
++	kdbus_printf("-- closing bus connections\n");
++
++	kdbus_conn_free(conn_a);
++	kdbus_conn_free(conn_b);
++
++	return TEST_OK;
++}
++
++#define BYEBYE_ME ((void*)0L)
++#define BYEBYE_THEM ((void*)1L)
++
++static void *run_thread_byebye(void *data)
++{
++	struct kdbus_cmd cmd_byebye = { .size = sizeof(cmd_byebye) };
++	int ret;
++
++	ret = kdbus_msg_recv_poll(conn_a, 3000, NULL, NULL);
++	if (ret == 0) {
++		kdbus_printf("Thread received message, invoking BYEBYE ...\n");
++		kdbus_msg_recv(conn_a, NULL, NULL);
++		if (data == BYEBYE_ME)
++			kdbus_cmd_byebye(conn_b->fd, &cmd_byebye);
++		else if (data == BYEBYE_THEM)
++			kdbus_cmd_byebye(conn_a->fd, &cmd_byebye);
++	}
++
++	pthread_exit(NULL);
++	return NULL;
++}
++
++int kdbus_test_sync_byebye(struct kdbus_test_env *env)
++{
++	pthread_t thread;
++	int ret;
++
++	/*
++	 * This sends a synchronous message to a thread, which waits until it
++	 * received the message and then invokes BYEBYE on the *ORIGINAL*
++	 * connection. That is, on the same connection that synchronously waits
++	 * for an reply.
++	 * This should properly wake the connection up and cause ECONNRESET as
++	 * the connection is disconnected now.
++	 *
++	 * The second time, we do the same but invoke BYEBYE on the *TARGET*
++	 * connection. This should also wake up the synchronous sender as the
++	 * reply cannot be sent by a disconnected target.
++	 */
++
++	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
++	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn_a && conn_b);
++
++	pthread_create(&thread, NULL, run_thread_byebye, BYEBYE_ME);
++
++	ret = kdbus_msg_send_sync(conn_b, NULL, cookie,
++				  KDBUS_MSG_EXPECT_REPLY,
++				  5000000000ULL, 0, conn_a->id, -1);
++
++	ASSERT_RETURN(ret == -ECONNRESET);
++
++	pthread_join(thread, NULL);
++
++	kdbus_conn_free(conn_a);
++	kdbus_conn_free(conn_b);
++
++	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
++	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn_a && conn_b);
++
++	pthread_create(&thread, NULL, run_thread_byebye, BYEBYE_THEM);
++
++	ret = kdbus_msg_send_sync(conn_b, NULL, cookie,
++				  KDBUS_MSG_EXPECT_REPLY,
++				  5000000000ULL, 0, conn_a->id, -1);
++
++	ASSERT_RETURN(ret == -EPIPE);
++
++	pthread_join(thread, NULL);
++
++	kdbus_conn_free(conn_a);
++	kdbus_conn_free(conn_b);
++
++	return TEST_OK;
++}
+diff --git a/tools/testing/selftests/kdbus/test-timeout.c b/tools/testing/selftests/kdbus/test-timeout.c
+new file mode 100644
+index 0000000..cfd1930
+--- /dev/null
++++ b/tools/testing/selftests/kdbus/test-timeout.c
+@@ -0,0 +1,99 @@
++#include <stdio.h>
++#include <string.h>
++#include <time.h>
++#include <fcntl.h>
++#include <stdlib.h>
++#include <stddef.h>
++#include <unistd.h>
++#include <stdint.h>
++#include <errno.h>
++#include <assert.h>
++#include <poll.h>
++#include <stdbool.h>
++
++#include "kdbus-api.h"
++#include "kdbus-test.h"
++#include "kdbus-util.h"
++#include "kdbus-enum.h"
++
++int timeout_msg_recv(struct kdbus_conn *conn, uint64_t *expected)
++{
++	struct kdbus_cmd_recv recv = { .size = sizeof(recv) };
++	struct kdbus_msg *msg;
++	int ret;
++
++	ret = kdbus_cmd_recv(conn->fd, &recv);
++	if (ret < 0) {
++		kdbus_printf("error receiving message: %d (%m)\n", ret);
++		return ret;
++	}
++
++	msg = (struct kdbus_msg *)(conn->buf + recv.msg.offset);
++
++	ASSERT_RETURN_VAL(msg->payload_type == KDBUS_PAYLOAD_KERNEL, -EINVAL);
++	ASSERT_RETURN_VAL(msg->src_id == KDBUS_SRC_ID_KERNEL, -EINVAL);
++	ASSERT_RETURN_VAL(msg->dst_id == conn->id, -EINVAL);
++
++	*expected &= ~(1ULL << msg->cookie_reply);
++	kdbus_printf("Got message timeout for cookie %llu\n",
++		     msg->cookie_reply);
++
++	ret = kdbus_free(conn, recv.msg.offset);
++	if (ret < 0)
++		return ret;
++
++	return 0;
++}
++
++int kdbus_test_timeout(struct kdbus_test_env *env)
++{
++	struct kdbus_conn *conn_a, *conn_b;
++	struct pollfd fd;
++	int ret, i, n_msgs = 4;
++	uint64_t expected = 0;
++	uint64_t cookie = 0xdeadbeef;
++
++	conn_a = kdbus_hello(env->buspath, 0, NULL, 0);
++	conn_b = kdbus_hello(env->buspath, 0, NULL, 0);
++	ASSERT_RETURN(conn_a && conn_b);
++
++	fd.fd = conn_b->fd;
++
++	/*
++	 * send messages that expect a reply (within 100 msec),
++	 * but never answer it.
++	 */
++	for (i = 0; i < n_msgs; i++, cookie++) {
++		kdbus_printf("Sending message with cookie %llu ...\n",
++			     (unsigned long long)cookie);
++		ASSERT_RETURN(kdbus_msg_send(conn_b, NULL, cookie,
++			      KDBUS_MSG_EXPECT_REPLY,
++			      (i + 1) * 100ULL * 1000000ULL, 0,
++			      conn_a->id) == 0);
++		expected |= 1ULL << cookie;
++	}
++
++	for (;;) {
++		fd.events = POLLIN | POLLPRI | POLLHUP;
++		fd.revents = 0;
++
++		ret = poll(&fd, 1, (n_msgs + 1) * 100);
++		if (ret == 0)
++			kdbus_printf("--- timeout\n");
++		if (ret <= 0)
++			break;
++
++		if (fd.revents & POLLIN)
++			ASSERT_RETURN(!timeout_msg_recv(conn_b, &expected));
++
++		if (expected == 0)
++			break;
++	}
++
++	ASSERT_RETURN(expected == 0);
++
++	kdbus_conn_free(conn_a);
++	kdbus_conn_free(conn_b);
++
++	return TEST_OK;
++}


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-06-20 17:37 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-06-20 17:37 UTC (permalink / raw
  To: gentoo-commits

commit:     d02e0e858201a0d8f313e24e2a9e288186007ea4
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Sat Jun 20 17:37:33 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Sat Jun 20 17:37:33 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d02e0e85

Add check to saved_root_name for supported filesystem path naming.

 2900_dev-root-proc-mount-fix.patch | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
index 6ea86e2..4cd558e 100644
--- a/2900_dev-root-proc-mount-fix.patch
+++ b/2900_dev-root-proc-mount-fix.patch
@@ -18,7 +18,7 @@
  #ifdef CONFIG_BLOCK
 -	create_dev("/dev/root", ROOT_DEV);
 -	mount_block_root("/dev/root", root_mountflags);
-+	if (saved_root_name[0]) {
++	if (saved_root_name[0] == '/') {
 +		create_dev(saved_root_name, ROOT_DEV);
 +		mount_block_root(saved_root_name, root_mountflags);
 +	} else {


^ permalink raw reply related	[flat|nested] 71+ messages in thread
* [gentoo-commits] proj/linux-patches:4.1 commit in: /
@ 2015-06-08 17:59 Mike Pagano
  0 siblings, 0 replies; 71+ messages in thread
From: Mike Pagano @ 2015-06-08 17:59 UTC (permalink / raw
  To: gentoo-commits

commit:     d9e0ab9c31aaa675ae74c89e5264d14f2082e02f
Author:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
AuthorDate: Mon Jun  8 17:59:22 2015 +0000
Commit:     Mike Pagano <mpagano <AT> gentoo <DOT> org>
CommitDate: Mon Jun  8 17:59:22 2015 +0000
URL:        https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=d9e0ab9c

Patch to enable link security restrictions by default. Patch to disable Windows 8 compatibility for some Lenovo ThinkPads.  Patch to ensure that /dev/root doesn't appear in /proc/mounts when bootint without an initramfs.  Path to not not lock when UMH is waiting on current thread spawned by linuxrc. (bug #481344) fbcondecor bootsplash patch. Kernel patch that enables gcc < v4.9 optimizations for additional CPUs.   Fix for lz4 compression. Add patch to support namespace user.pax.* on tmpfs, bug #470644.

 0000_README                                        |   32 +
 ...ble-link-security-restrictions-by-default.patch |   22 +
 2700_ThinkPad-30-brightness-control-fix.patch      |   67 +
 2900_dev-root-proc-mount-fix.patch                 |   30 +
 2905_2disk-resume-image-fix.patch                  |   24 +
 2910_lz4-compression-fix.patch                     |   30 +
 4200_fbcondecor-3.19.patch                         | 2119 ++++++++++++++++++++
 ...able-additional-cpu-optimizations-for-gcc.patch |  327 +++
 ...-additional-cpu-optimizations-for-gcc-4.9.patch |  402 ++++
 9 files changed, 3053 insertions(+)

diff --git a/0000_README b/0000_README
index 36c2b96..69f60dc 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,38 @@ Patch:  1500_XATTR_USER_PREFIX.patch
 From:   https://bugs.gentoo.org/show_bug.cgi?id=470644
 Desc:   Support for namespace user.pax.* on tmpfs.
 
+Patch:  1510_fs-enable-link-security-restrictions-by-default.patch
+From:   http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch/
+Desc:   Enable link security restrictions by default.
+
+Patch:  2700_ThinkPad-30-brightness-control-fix.patch
+From:   Seth Forshee <seth.forshee@canonical.com>
+Desc:   ACPI: Disable Windows 8 compatibility for some Lenovo ThinkPads.
+
+Patch:  2900_dev-root-proc-mount-fix.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=438380
+Desc:   Ensure that /dev/root doesn't appear in /proc/mounts when bootint without an initramfs.
+
+Patch:  2905_s2disk-resume-image-fix.patch
+From:   Al Viro <viro <at> ZenIV.linux.org.uk>
+Desc:   Do not lock when UMH is waiting on current thread spawned by linuxrc. (bug #481344)
+
+Patch:  2910_lz4-compression-fix.patch
+From:   https://bugs.gentoo.org/show_bug.cgi?id=546422
+Desc:   Fix for lz4 compression regression. Thanks to Christian Xia. See bug #546422.
+
+Patch:  4200_fbcondecor-3.19.patch
+From:   http://www.mepiscommunity.org/fbcondecor
+Desc:   Bootsplash ported by Marco. (Bug #539616)
+
 Patch:  4567_distro-Gentoo-Kconfig.patch
 From:   Tom Wijsman <TomWij@gentoo.org>
 Desc:   Add Gentoo Linux support config settings and defaults.
+
+Patch:  5000_enable-additional-cpu-optimizations-for-gcc.patch
+From:   https://github.com/graysky2/kernel_gcc_patch/
+Desc:   Kernel patch enables gcc < v4.9 optimizations for additional CPUs.
+
+Patch:  5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
+From:   https://github.com/graysky2/kernel_gcc_patch/
+Desc:   Kernel patch enables gcc >= v4.9 optimizations for additional CPUs.

diff --git a/1510_fs-enable-link-security-restrictions-by-default.patch b/1510_fs-enable-link-security-restrictions-by-default.patch
new file mode 100644
index 0000000..639fb3c
--- /dev/null
+++ b/1510_fs-enable-link-security-restrictions-by-default.patch
@@ -0,0 +1,22 @@
+From: Ben Hutchings <ben@decadent.org.uk>
+Subject: fs: Enable link security restrictions by default
+Date: Fri, 02 Nov 2012 05:32:06 +0000
+Bug-Debian: https://bugs.debian.org/609455
+Forwarded: not-needed
+
+This reverts commit 561ec64ae67ef25cac8d72bb9c4bfc955edfd415
+('VFS: don't do protected {sym,hard}links by default').
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -651,8 +651,8 @@ static inline void put_link(struct namei
+ 	path_put(link);
+ }
+ 
+-int sysctl_protected_symlinks __read_mostly = 0;
+-int sysctl_protected_hardlinks __read_mostly = 0;
++int sysctl_protected_symlinks __read_mostly = 1;
++int sysctl_protected_hardlinks __read_mostly = 1;
+ 
+ /**
+  * may_follow_link - Check symlink following for unsafe situations

diff --git a/2700_ThinkPad-30-brightness-control-fix.patch b/2700_ThinkPad-30-brightness-control-fix.patch
new file mode 100644
index 0000000..b548c6d
--- /dev/null
+++ b/2700_ThinkPad-30-brightness-control-fix.patch
@@ -0,0 +1,67 @@
+diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
+index cb96296..6c242ed 100644
+--- a/drivers/acpi/blacklist.c
++++ b/drivers/acpi/blacklist.c
+@@ -269,6 +276,61 @@  static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+ 	},
+ 
+ 	/*
++	 * The following Lenovo models have a broken workaround in the
++	 * acpi_video backlight implementation to meet the Windows 8
++	 * requirement of 101 backlight levels. Reverting to pre-Win8
++	 * behavior fixes the problem.
++	 */
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad L430",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L430"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad T430s",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad T530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T530"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad W530",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W530"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad X1 Carbon",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
++		},
++	},
++	{
++	.callback = dmi_disable_osi_win8,
++	.ident = "Lenovo ThinkPad X230",
++	.matches = {
++		     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++		     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
++		},
++	},
++
++	/*
+ 	 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
+ 	 * Linux ignores it, except for the machines enumerated below.
+ 	 */
+

diff --git a/2900_dev-root-proc-mount-fix.patch b/2900_dev-root-proc-mount-fix.patch
new file mode 100644
index 0000000..6ea86e2
--- /dev/null
+++ b/2900_dev-root-proc-mount-fix.patch
@@ -0,0 +1,30 @@
+--- a/init/do_mounts.c	2014-08-26 08:03:30.000013100 -0400
++++ b/init/do_mounts.c	2014-08-26 08:11:19.720014712 -0400
+@@ -484,7 +484,10 @@ void __init change_floppy(char *fmt, ...
+ 	va_start(args, fmt);
+ 	vsprintf(buf, fmt, args);
+ 	va_end(args);
+-	fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
++	if (saved_root_name[0])
++		fd = sys_open(saved_root_name, O_RDWR | O_NDELAY, 0);
++	else
++		fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
+ 	if (fd >= 0) {
+ 		sys_ioctl(fd, FDEJECT, 0);
+ 		sys_close(fd);
+@@ -527,8 +530,13 @@ void __init mount_root(void)
+ 	}
+ #endif
+ #ifdef CONFIG_BLOCK
+-	create_dev("/dev/root", ROOT_DEV);
+-	mount_block_root("/dev/root", root_mountflags);
++	if (saved_root_name[0]) {
++		create_dev(saved_root_name, ROOT_DEV);
++		mount_block_root(saved_root_name, root_mountflags);
++	} else {
++		create_dev("/dev/root", ROOT_DEV);
++		mount_block_root("/dev/root", root_mountflags);
++	}
+ #endif
+ }
+ 

diff --git a/2905_2disk-resume-image-fix.patch b/2905_2disk-resume-image-fix.patch
new file mode 100644
index 0000000..7e95d29
--- /dev/null
+++ b/2905_2disk-resume-image-fix.patch
@@ -0,0 +1,24 @@
+diff --git a/kernel/kmod.c b/kernel/kmod.c
+index fb32636..d968882 100644
+--- a/kernel/kmod.c
++++ b/kernel/kmod.c
+@@ -575,7 +575,8 @@
+ 		call_usermodehelper_freeinfo(sub_info);
+ 		return -EINVAL;
+ 	}
+-	helper_lock();
++	if (!(current->flags & PF_FREEZER_SKIP))
++		helper_lock();
+ 	if (!khelper_wq || usermodehelper_disabled) {
+ 		retval = -EBUSY;
+ 		goto out;
+@@ -611,7 +612,8 @@ wait_done:
+ out:
+ 	call_usermodehelper_freeinfo(sub_info);
+ unlock:
+-	helper_unlock();
++	if (!(current->flags & PF_FREEZER_SKIP))
++		helper_unlock();
+ 	return retval;
+ }
+ EXPORT_SYMBOL(call_usermodehelper_exec);

diff --git a/2910_lz4-compression-fix.patch b/2910_lz4-compression-fix.patch
new file mode 100644
index 0000000..1c55f32
--- /dev/null
+++ b/2910_lz4-compression-fix.patch
@@ -0,0 +1,30 @@
+--- a/lib/lz4/lz4_decompress.c	2015-04-13 16:20:04.896315560 +0800
++++ b/lib/lz4/lz4_decompress.c	2015-04-13 16:27:08.929317053 +0800
+@@ -139,8 +139,12 @@
+ 			/* Error: request to write beyond destination buffer */
+ 			if (cpy > oend)
+ 				goto _output_error;
++#if LZ4_ARCH64
++			if ((ref + COPYLENGTH) > oend)
++#else
+ 			if ((ref + COPYLENGTH) > oend ||
+ 					(op + COPYLENGTH) > oend)
++#endif
+ 				goto _output_error;
+ 			LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
+ 			while (op < cpy)
+@@ -270,7 +274,13 @@
+ 		if (cpy > oend - COPYLENGTH) {
+ 			if (cpy > oend)
+ 				goto _output_error; /* write outside of buf */
+-
++#if LZ4_ARCH64
++			if ((ref + COPYLENGTH) > oend)
++#else
++			if ((ref + COPYLENGTH) > oend ||
++			    (op + COPYLENGTH) > oend)
++#endif
++				goto _output_error;
+ 			LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
+ 			while (op < cpy)
+ 				*op++ = *ref++;

diff --git a/4200_fbcondecor-3.19.patch b/4200_fbcondecor-3.19.patch
new file mode 100644
index 0000000..29c379f
--- /dev/null
+++ b/4200_fbcondecor-3.19.patch
@@ -0,0 +1,2119 @@
+diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
+index fe85e7c..2230930 100644
+--- a/Documentation/fb/00-INDEX
++++ b/Documentation/fb/00-INDEX
+@@ -23,6 +23,8 @@ ep93xx-fb.txt
+ 	- info on the driver for EP93xx LCD controller.
+ fbcon.txt
+ 	- intro to and usage guide for the framebuffer console (fbcon).
++fbcondecor.txt
++	- info on the Framebuffer Console Decoration
+ framebuffer.txt
+ 	- introduction to frame buffer devices.
+ gxfb.txt
+diff --git a/Documentation/fb/fbcondecor.txt b/Documentation/fb/fbcondecor.txt
+new file mode 100644
+index 0000000..3388c61
+--- /dev/null
++++ b/Documentation/fb/fbcondecor.txt
+@@ -0,0 +1,207 @@
++What is it?
++-----------
++
++The framebuffer decorations are a kernel feature which allows displaying a 
++background picture on selected consoles.
++
++What do I need to get it to work?
++---------------------------------
++
++To get fbcondecor up-and-running you will have to:
++ 1) get a copy of splashutils [1] or a similar program
++ 2) get some fbcondecor themes
++ 3) build the kernel helper program
++ 4) build your kernel with the FB_CON_DECOR option enabled.
++
++To get fbcondecor operational right after fbcon initialization is finished, you
++will have to include a theme and the kernel helper into your initramfs image.
++Please refer to splashutils documentation for instructions on how to do that.
++
++[1] The splashutils package can be downloaded from:
++    http://github.com/alanhaggai/fbsplash
++
++The userspace helper
++--------------------
++
++The userspace fbcondecor helper (by default: /sbin/fbcondecor_helper) is called by the
++kernel whenever an important event occurs and the kernel needs some kind of
++job to be carried out. Important events include console switches and video
++mode switches (the kernel requests background images and configuration
++parameters for the current console). The fbcondecor helper must be accessible at
++all times. If it's not, fbcondecor will be switched off automatically.
++
++It's possible to set path to the fbcondecor helper by writing it to
++/proc/sys/kernel/fbcondecor.
++
++*****************************************************************************
++
++The information below is mostly technical stuff. There's probably no need to
++read it unless you plan to develop a userspace helper.
++
++The fbcondecor protocol
++-----------------------
++
++The fbcondecor protocol defines a communication interface between the kernel and
++the userspace fbcondecor helper.
++
++The kernel side is responsible for:
++
++ * rendering console text, using an image as a background (instead of a
++   standard solid color fbcon uses),
++ * accepting commands from the user via ioctls on the fbcondecor device,
++ * calling the userspace helper to set things up as soon as the fb subsystem 
++   is initialized.
++
++The userspace helper is responsible for everything else, including parsing
++configuration files, decompressing the image files whenever the kernel needs
++it, and communicating with the kernel if necessary.
++
++The fbcondecor protocol specifies how communication is done in both ways:
++kernel->userspace and userspace->helper.
++  
++Kernel -> Userspace
++-------------------
++
++The kernel communicates with the userspace helper by calling it and specifying
++the task to be done in a series of arguments.
++
++The arguments follow the pattern:
++<fbcondecor protocol version> <command> <parameters>
++
++All commands defined in fbcondecor protocol v2 have the following parameters:
++ virtual console
++ framebuffer number
++ theme
++
++Fbcondecor protocol v1 specified an additional 'fbcondecor mode' after the
++framebuffer number. Fbcondecor protocol v1 is deprecated and should not be used.
++
++Fbcondecor protocol v2 specifies the following commands:
++
++getpic
++------
++ The kernel issues this command to request image data. It's up to the 
++ userspace  helper to find a background image appropriate for the specified 
++ theme and the current resolution. The userspace helper should respond by 
++ issuing the FBIOCONDECOR_SETPIC ioctl.
++
++init
++----
++ The kernel issues this command after the fbcondecor device is created and
++ the fbcondecor interface is initialized. Upon receiving 'init', the userspace
++ helper should parse the kernel command line (/proc/cmdline) or otherwise
++ decide whether fbcondecor is to be activated.
++
++ To activate fbcondecor on the first console the helper should issue the
++ FBIOCONDECOR_SETCFG, FBIOCONDECOR_SETPIC and FBIOCONDECOR_SETSTATE commands,
++ in the above-mentioned order.
++
++ When the userspace helper is called in an early phase of the boot process
++ (right after the initialization of fbcon), no filesystems will be mounted.
++ The helper program should mount sysfs and then create the appropriate
++ framebuffer, fbcondecor and tty0 devices (if they don't already exist) to get
++ current display settings and to be able to communicate with the kernel side.
++ It should probably also mount the procfs to be able to parse the kernel
++ command line parameters.
++
++ Note that the console sem is not held when the kernel calls fbcondecor_helper
++ with the 'init' command. The fbcondecor helper should perform all ioctls with
++ origin set to FBCON_DECOR_IO_ORIG_USER.
++
++modechange
++----------
++ The kernel issues this command on a mode change. The helper's response should
++ be similar to the response to the 'init' command. Note that this time the
++ console sem is held and all ioctls must be performed with origin set to
++ FBCON_DECOR_IO_ORIG_KERNEL.
++
++
++Userspace -> Kernel
++-------------------
++
++Userspace programs can communicate with fbcondecor via ioctls on the
++fbcondecor device. These ioctls are to be used by both the userspace helper
++(called only by the kernel) and userspace configuration tools (run by the users).
++
++The fbcondecor helper should set the origin field to FBCON_DECOR_IO_ORIG_KERNEL
++when doing the appropriate ioctls. All userspace configuration tools should
++use FBCON_DECOR_IO_ORIG_USER. Failure to set the appropriate value in the origin
++field when performing ioctls from the kernel helper will most likely result
++in a console deadlock.
++
++FBCON_DECOR_IO_ORIG_KERNEL instructs fbcondecor not to try to acquire the console
++semaphore. Not surprisingly, FBCON_DECOR_IO_ORIG_USER instructs it to acquire
++the console sem.
++
++The framebuffer console decoration provides the following ioctls (all defined in 
++linux/fb.h):
++
++FBIOCONDECOR_SETPIC
++description: loads a background picture for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct fb_image*
++notes: 
++If called for consoles other than the current foreground one, the picture data
++will be ignored.
++
++If the current virtual console is running in a 8-bpp mode, the cmap substruct
++of fb_image has to be filled appropriately: start should be set to 16 (first
++16 colors are reserved for fbcon), len to a value <= 240 and red, green and
++blue should point to valid cmap data. The transp field is ingored. The fields
++dx, dy, bg_color, fg_color in fb_image are ignored as well.
++
++FBIOCONDECOR_SETCFG
++description: sets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++notes: The structure has to be filled with valid data.
++
++FBIOCONDECOR_GETCFG
++description: gets the fbcondecor config for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: struct vc_decor*
++
++FBIOCONDECOR_SETSTATE
++description: sets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: 0 = disabled, 1 = enabled.
++
++FBIOCONDECOR_GETSTATE
++description: gets the fbcondecor state for a virtual console
++argument: struct fbcon_decor_iowrapper*; data: unsigned int*
++          values: as in FBIOCONDECOR_SETSTATE
++
++Info on used structures:
++
++Definition of struct vc_decor can be found in linux/console_decor.h. It's
++heavily commented. Note that the 'theme' field should point to a string
++no longer than FBCON_DECOR_THEME_LEN. When FBIOCONDECOR_GETCFG call is
++performed, the theme field should point to a char buffer of length
++FBCON_DECOR_THEME_LEN.
++
++Definition of struct fbcon_decor_iowrapper can be found in linux/fb.h.
++The fields in this struct have the following meaning:
++
++vc: 
++Virtual console number.
++
++origin: 
++Specifies if the ioctl is performed as a response to a kernel request. The
++fbcondecor helper should set this field to FBCON_DECOR_IO_ORIG_KERNEL, userspace
++programs should set it to FBCON_DECOR_IO_ORIG_USER. This field is necessary to
++avoid console semaphore deadlocks.
++
++data: 
++Pointer to a data structure appropriate for the performed ioctl. Type of
++the data struct is specified in the ioctls description.
++
++*****************************************************************************
++
++Credit
++------
++
++Original 'bootsplash' project & implementation by:
++  Volker Poplawski <volker@poplawski.de>, Stefan Reinauer <stepan@suse.de>,
++  Steffen Winterfeldt <snwint@suse.de>, Michael Schroeder <mls@suse.de>,
++  Ken Wimer <wimer@suse.de>.
++
++Fbcondecor, fbcondecor protocol design, current implementation & docs by:
++  Michal Januszewski <michalj+fbcondecor@gmail.com>
++
+diff --git a/drivers/Makefile b/drivers/Makefile
+index 7183b6a..d576148 100644
+--- a/drivers/Makefile
++++ b/drivers/Makefile
+@@ -17,6 +17,10 @@ obj-y				+= pwm/
+ obj-$(CONFIG_PCI)		+= pci/
+ obj-$(CONFIG_PARISC)		+= parisc/
+ obj-$(CONFIG_RAPIDIO)		+= rapidio/
++# tty/ comes before char/ so that the VT console is the boot-time
++# default.
++obj-y				+= tty/
++obj-y				+= char/
+ obj-y				+= video/
+ obj-y				+= idle/
+ 
+@@ -42,11 +46,6 @@ obj-$(CONFIG_REGULATOR)		+= regulator/
+ # reset controllers early, since gpu drivers might rely on them to initialize
+ obj-$(CONFIG_RESET_CONTROLLER)	+= reset/
+ 
+-# tty/ comes before char/ so that the VT console is the boot-time
+-# default.
+-obj-y				+= tty/
+-obj-y				+= char/
+-
+ # iommu/ comes before gpu as gpu are using iommu controllers
+ obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
+
+diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
+index fe1cd01..6d2e87a 100644
+--- a/drivers/video/console/Kconfig
++++ b/drivers/video/console/Kconfig
+@@ -126,6 +126,19 @@ config FRAMEBUFFER_CONSOLE_ROTATION
+          such that other users of the framebuffer will remain normally
+          oriented.
+ 
++config FB_CON_DECOR
++	bool "Support for the Framebuffer Console Decorations"
++	depends on FRAMEBUFFER_CONSOLE=y && !FB_TILEBLITTING
++	default n
++	---help---
++	  This option enables support for framebuffer console decorations which
++	  makes it possible to display images in the background of the system
++	  consoles.  Note that userspace utilities are necessary in order to take 
++	  advantage of these features. Refer to Documentation/fb/fbcondecor.txt 
++	  for more information.
++
++	  If unsure, say N.
++
+ config STI_CONSOLE
+         bool "STI text console"
+         depends on PARISC
+diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
+index 43bfa48..cc104b6f 100644
+--- a/drivers/video/console/Makefile
++++ b/drivers/video/console/Makefile
+@@ -16,4 +16,5 @@ obj-$(CONFIG_FRAMEBUFFER_CONSOLE)     += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
+                                          fbcon_ccw.o
+ endif
+ 
++obj-$(CONFIG_FB_CON_DECOR)     	  += fbcondecor.o cfbcondecor.o
+ obj-$(CONFIG_FB_STI)              += sticore.o
+diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
+index 61b182b..984384b 100644
+--- a/drivers/video/console/bitblit.c
++++ b/drivers/video/console/bitblit.c
+@@ -18,6 +18,7 @@
+ #include <linux/console.h>
+ #include <asm/types.h>
+ #include "fbcon.h"
++#include "fbcondecor.h"
+ 
+ /*
+  * Accelerated handlers.
+@@ -55,6 +56,13 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ 	area.height = height * vc->vc_font.height;
+ 	area.width = width * vc->vc_font.width;
+ 
++	if (fbcon_decor_active(info, vc)) {
++ 		area.sx += vc->vc_decor.tx;
++ 		area.sy += vc->vc_decor.ty;
++ 		area.dx += vc->vc_decor.tx;
++ 		area.dy += vc->vc_decor.ty;
++ 	}
++
+ 	info->fbops->fb_copyarea(info, &area);
+ }
+ 
+@@ -380,11 +388,15 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+ 	cursor.image.depth = 1;
+ 	cursor.rop = ROP_XOR;
+ 
+-	if (info->fbops->fb_cursor)
+-		err = info->fbops->fb_cursor(info, &cursor);
++	if (fbcon_decor_active(info, vc)) {
++		fbcon_decor_cursor(info, &cursor);
++	} else {
++		if (info->fbops->fb_cursor)
++			err = info->fbops->fb_cursor(info, &cursor);
+ 
+-	if (err)
+-		soft_cursor(info, &cursor);
++		if (err)
++			soft_cursor(info, &cursor);
++	}
+ 
+ 	ops->cursor_reset = 0;
+ }
+diff --git a/drivers/video/console/cfbcondecor.c b/drivers/video/console/cfbcondecor.c
+new file mode 100644
+index 0000000..a2b4497
+--- /dev/null
++++ b/drivers/video/console/cfbcondecor.c
+@@ -0,0 +1,471 @@
++/*
++ *  linux/drivers/video/cfbcon_decor.c -- Framebuffer decor render functions
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootdecor" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ */
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/selection.h>
++#include <linux/slab.h>
++#include <linux/vt_kern.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++#define parse_pixel(shift,bpp,type)						\
++	do {									\
++		if (d & (0x80 >> (shift)))					\
++			dd2[(shift)] = fgx;					\
++		else								\
++			dd2[(shift)] = transparent ? *(type *)decor_src : bgx;	\
++		decor_src += (bpp);						\
++	} while (0)								\
++
++extern int get_color(struct vc_data *vc, struct fb_info *info,
++		     u16 c, int is_fg);
++
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc)
++{
++	int i, j, k;
++	int minlen = min(min(info->var.red.length, info->var.green.length),
++			     info->var.blue.length);
++	u32 col;
++
++	for (j = i = 0; i < 16; i++) {
++		k = color_table[i];
++
++		col = ((vc->vc_palette[j++]  >> (8-minlen))
++			<< info->var.red.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.green.offset);
++		col |= ((vc->vc_palette[j++] >> (8-minlen))
++			<< info->var.blue.offset);
++			((u32 *)info->pseudo_palette)[k] = col;
++	}
++}
++
++void fbcon_decor_renderc(struct fb_info *info, int ypos, int xpos, int height,
++		      int width, u8* src, u32 fgx, u32 bgx, u8 transparent)
++{
++	unsigned int x, y;
++	u32 dd;
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	unsigned int d = ypos * info->fix.line_length + xpos * bytespp;
++	unsigned int ds = (ypos * info->var.xres + xpos) * bytespp;
++	u16 dd2[4];
++
++	u8* decor_src = (u8 *)(info->bgdecor.data + ds);
++	u8* dst = (u8 *)(info->screen_base + d);
++
++	if ((ypos + height) > info->var.yres || (xpos + width) > info->var.xres)
++		return;
++
++	for (y = 0; y < height; y++) {
++		switch (info->var.bits_per_pixel) {
++
++		case 32:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     *(u32 *)decor_src : bgx;
++
++				d <<= 1;
++				decor_src += 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++		case 24:
++			for (x = 0; x < width; x++) {
++
++				if ((x & 7) == 0)
++					d = *src++;
++				if (d & 0x80)
++					dd = fgx;
++				else
++					dd = transparent ?
++					     (*(u32 *)decor_src & 0xffffff) : bgx;
++
++				d <<= 1;
++				decor_src += 3;
++#ifdef __LITTLE_ENDIAN
++				fb_writew(dd & 0xffff, dst);
++				dst += 2;
++				fb_writeb((dd >> 16), dst);
++#else
++				fb_writew(dd >> 8, dst);
++				dst += 2;
++				fb_writeb(dd & 0xff, dst);
++#endif
++				dst++;
++			}
++			break;
++		case 16:
++			for (x = 0; x < width; x += 2) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 2, u16);
++				parse_pixel(1, 2, u16);
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 16);
++#else
++				dd = dd2[1] | (dd2[0] << 16);
++#endif
++				d <<= 2;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++			break;
++
++		case 8:
++			for (x = 0; x < width; x += 4) {
++				if ((x & 7) == 0)
++					d = *src++;
++
++				parse_pixel(0, 1, u8);
++				parse_pixel(1, 1, u8);
++				parse_pixel(2, 1, u8);
++				parse_pixel(3, 1, u8);
++
++#ifdef __LITTLE_ENDIAN
++				dd = dd2[0] | (dd2[1] << 8) | (dd2[2] << 16) | (dd2[3] << 24);
++#else
++				dd = dd2[3] | (dd2[2] << 8) | (dd2[1] << 16) | (dd2[0] << 24);
++#endif
++				d <<= 4;
++				fb_writel(dd, dst);
++				dst += 4;
++			}
++		}
++
++		dst += info->fix.line_length - width * bytespp;
++		decor_src += (info->var.xres - width) * bytespp;
++	}
++}
++
++#define cc2cx(a) 						\
++	((info->fix.visual == FB_VISUAL_TRUECOLOR || 		\
++	  info->fix.visual == FB_VISUAL_DIRECTCOLOR) ? 		\
++	 ((u32*)info->pseudo_palette)[a] : a)
++
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info,
++		   const unsigned short *s, int count, int yy, int xx)
++{
++	unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
++	struct fbcon_ops *ops = info->fbcon_par;
++	int fg_color, bg_color, transparent;
++	u8 *src;
++	u32 bgx, fgx;
++	u16 c = scr_readw(s);
++
++	fg_color = get_color(vc, info, c, 1);
++        bg_color = get_color(vc, info, c, 0);
++
++	/* Don't paint the background image if console is blanked */
++	transparent = ops->blank_state ? 0 :
++		(vc->vc_decor.bg_color == bg_color);
++
++	xx = xx * vc->vc_font.width + vc->vc_decor.tx;
++	yy = yy * vc->vc_font.height + vc->vc_decor.ty;
++
++	fgx = cc2cx(fg_color);
++	bgx = cc2cx(bg_color);
++
++	while (count--) {
++		c = scr_readw(s++);
++		src = vc->vc_font.data + (c & charmask) * vc->vc_font.height *
++		      ((vc->vc_font.width + 7) >> 3);
++
++		fbcon_decor_renderc(info, yy, xx, vc->vc_font.height,
++			       vc->vc_font.width, src, fgx, bgx, transparent);
++		xx += vc->vc_font.width;
++	}
++}
++
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor)
++{
++	int i;
++	unsigned int dsize, s_pitch;
++	struct fbcon_ops *ops = info->fbcon_par;
++	struct vc_data* vc;
++	u8 *src;
++
++	/* we really don't need any cursors while the console is blanked */
++	if (info->state != FBINFO_STATE_RUNNING || ops->blank_state)
++		return;
++
++	vc = vc_cons[ops->currcon].d;
++
++	src = kmalloc(64 + sizeof(struct fb_image), GFP_ATOMIC);
++	if (!src)
++		return;
++
++	s_pitch = (cursor->image.width + 7) >> 3;
++	dsize = s_pitch * cursor->image.height;
++	if (cursor->enable) {
++		switch (cursor->rop) {
++		case ROP_XOR:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] ^ cursor->mask[i];
++                        break;
++		case ROP_COPY:
++		default:
++			for (i = 0; i < dsize; i++)
++				src[i] = cursor->image.data[i] & cursor->mask[i];
++			break;
++		}
++	} else
++		memcpy(src, cursor->image.data, dsize);
++
++	fbcon_decor_renderc(info,
++			cursor->image.dy + vc->vc_decor.ty,
++			cursor->image.dx + vc->vc_decor.tx,
++			cursor->image.height,
++			cursor->image.width,
++			(u8*)src,
++			cc2cx(cursor->image.fg_color),
++			cc2cx(cursor->image.bg_color),
++			cursor->image.bg_color == vc->vc_decor.bg_color);
++
++	kfree(src);
++}
++
++static void decorset(u8 *dst, int height, int width, int dstbytes,
++		        u32 bgx, int bpp)
++{
++	int i;
++
++	if (bpp == 8)
++		bgx |= bgx << 8;
++	if (bpp == 16 || bpp == 8)
++		bgx |= bgx << 16;
++
++	while (height-- > 0) {
++		u8 *p = dst;
++
++		switch (bpp) {
++
++		case 32:
++			for (i=0; i < width; i++) {
++				fb_writel(bgx, p); p += 4;
++			}
++			break;
++		case 24:
++			for (i=0; i < width; i++) {
++#ifdef __LITTLE_ENDIAN
++				fb_writew((bgx & 0xffff),(u16*)p); p += 2;
++				fb_writeb((bgx >> 16),p++);
++#else
++				fb_writew((bgx >> 8),(u16*)p); p += 2;
++				fb_writeb((bgx & 0xff),p++);
++#endif
++			}
++		case 16:
++			for (i=0; i < width/4; i++) {
++				fb_writel(bgx,p); p += 4;
++				fb_writel(bgx,p); p += 4;
++			}
++			if (width & 2) {
++				fb_writel(bgx,p); p += 4;
++			}
++			if (width & 1)
++				fb_writew(bgx,(u16*)p);
++			break;
++		case 8:
++			for (i=0; i < width/4; i++) {
++				fb_writel(bgx,p); p += 4;
++			}
++
++			if (width & 2) {
++				fb_writew(bgx,p); p += 2;
++			}
++			if (width & 1)
++				fb_writeb(bgx,(u8*)p);
++			break;
++
++		}
++		dst += dstbytes;
++	}
++}
++
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes,
++		   int srclinebytes, int bpp)
++{
++	int i;
++
++	while (height-- > 0) {
++		u32 *p = (u32 *)dst;
++		u32 *q = (u32 *)src;
++
++		switch (bpp) {
++
++		case 32:
++			for (i=0; i < width; i++)
++				fb_writel(*q++, p++);
++			break;
++		case 24:
++			for (i=0; i < (width*3/4); i++)
++				fb_writel(*q++, p++);
++			if ((width*3) % 4) {
++				if (width & 2) {
++					fb_writeb(*(u8*)q, (u8*)p);
++				} else if (width & 1) {
++					fb_writew(*(u16*)q, (u16*)p);
++					fb_writeb(*(u8*)((u16*)q+1),(u8*)((u16*)p+2));
++				}
++			}
++			break;
++		case 16:
++			for (i=0; i < width/4; i++) {
++				fb_writel(*q++, p++);
++				fb_writel(*q++, p++);
++			}
++			if (width & 2)
++				fb_writel(*q++, p++);
++			if (width & 1)
++				fb_writew(*(u16*)q, (u16*)p);
++			break;
++		case 8:
++			for (i=0; i < width/4; i++)
++				fb_writel(*q++, p++);
++
++			if (width & 2) {
++				fb_writew(*(u16*)q, (u16*)p);
++				q = (u32*) ((u16*)q + 1);
++				p = (u32*) ((u16*)p + 1);
++			}
++			if (width & 1)
++				fb_writeb(*(u8*)q, (u8*)p);
++			break;
++		}
++
++		dst += linebytes;
++		src += srclinebytes;
++	}
++}
++
++static void decorfill(struct fb_info *info, int sy, int sx, int height,
++		       int width)
++{
++	int bytespp = ((info->var.bits_per_pixel + 7) >> 3);
++	int d  = sy * info->fix.line_length + sx * bytespp;
++	int ds = (sy * info->var.xres + sx) * bytespp;
++
++	fbcon_decor_copy((u8 *)(info->screen_base + d), (u8 *)(info->bgdecor.data + ds),
++		    height, width, info->fix.line_length, info->var.xres * bytespp,
++		    info->var.bits_per_pixel);
++}
++
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx,
++		    int height, int width)
++{
++	int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
++	struct fbcon_ops *ops = info->fbcon_par;
++	u8 *dst;
++	int transparent, bg_color = attr_bgcol_ec(bgshift, vc, info);
++
++	transparent = (vc->vc_decor.bg_color == bg_color);
++	sy = sy * vc->vc_font.height + vc->vc_decor.ty;
++	sx = sx * vc->vc_font.width + vc->vc_decor.tx;
++	height *= vc->vc_font.height;
++	width *= vc->vc_font.width;
++
++	/* Don't paint the background image if console is blanked */
++	if (transparent && !ops->blank_state) {
++		decorfill(info, sy, sx, height, width);
++	} else {
++		dst = (u8 *)(info->screen_base + sy * info->fix.line_length +
++			     sx * ((info->var.bits_per_pixel + 7) >> 3));
++		decorset(dst, height, width, info->fix.line_length, cc2cx(bg_color),
++			  info->var.bits_per_pixel);
++	}
++}
++
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info,
++			    int bottom_only)
++{
++	unsigned int tw = vc->vc_cols*vc->vc_font.width;
++	unsigned int th = vc->vc_rows*vc->vc_font.height;
++
++	if (!bottom_only) {
++		/* top margin */
++		decorfill(info, 0, 0, vc->vc_decor.ty, info->var.xres);
++		/* left margin */
++		decorfill(info, vc->vc_decor.ty, 0, th, vc->vc_decor.tx);
++		/* right margin */
++		decorfill(info, vc->vc_decor.ty, vc->vc_decor.tx + tw, th, 
++			   info->var.xres - vc->vc_decor.tx - tw);
++	}
++	decorfill(info, vc->vc_decor.ty + th, 0, 
++		   info->var.yres - vc->vc_decor.ty - th, info->var.xres);
++}
++
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, 
++			   int sx, int dx, int width)
++{
++	u16 *d = (u16 *) (vc->vc_origin + vc->vc_size_row * y + dx * 2);
++	u16 *s = d + (dx - sx);
++	u16 *start = d;
++	u16 *ls = d;
++	u16 *le = d + width;
++	u16 c;
++	int x = dx;
++	u16 attr = 1;
++
++	do {
++		c = scr_readw(d);
++		if (attr != (c & 0xff00)) {
++			attr = c & 0xff00;
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start;
++				start = d;
++			}
++		}
++		if (s >= ls && s < le && c == scr_readw(s)) {
++			if (d > start) {
++				fbcon_decor_putcs(vc, info, start, d - start, y, x);
++				x += d - start + 1;
++				start = d + 1;
++			} else {
++				x++;
++				start++;
++			}
++		}
++		s++;
++		d++;
++	} while (d < le);
++	if (d > start)
++		fbcon_decor_putcs(vc, info, start, d - start, y, x);
++}
++
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank)
++{
++	if (blank) {
++		decorset((u8 *)info->screen_base, info->var.yres, info->var.xres,
++			  info->fix.line_length, 0, info->var.bits_per_pixel);
++	} else {
++		update_screen(vc);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++}
++
+diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
+index f447734..da50d61 100644
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -79,6 +79,7 @@
+ #include <asm/irq.h>
+ 
+ #include "fbcon.h"
++#include "../console/fbcondecor.h"
+ 
+ #ifdef FBCONDEBUG
+ #  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+@@ -94,7 +95,7 @@ enum {
+ 
+ static struct display fb_display[MAX_NR_CONSOLES];
+ 
+-static signed char con2fb_map[MAX_NR_CONSOLES];
++signed char con2fb_map[MAX_NR_CONSOLES];
+ static signed char con2fb_map_boot[MAX_NR_CONSOLES];
+ 
+ static int logo_lines;
+@@ -286,7 +287,7 @@ static inline int fbcon_is_inactive(struct vc_data *vc, struct fb_info *info)
+ 		!vt_force_oops_output(vc);
+ }
+ 
+-static int get_color(struct vc_data *vc, struct fb_info *info,
++int get_color(struct vc_data *vc, struct fb_info *info,
+ 	      u16 c, int is_fg)
+ {
+ 	int depth = fb_get_color_depth(&info->var, &info->fix);
+@@ -551,6 +552,9 @@ static int do_fbcon_takeover(int show_logo)
+ 		info_idx = -1;
+ 	} else {
+ 		fbcon_has_console_bind = 1;
++#ifdef CONFIG_FB_CON_DECOR
++		fbcon_decor_init();
++#endif
+ 	}
+ 
+ 	return err;
+@@ -1007,6 +1011,12 @@ static const char *fbcon_startup(void)
+ 	rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 	cols /= vc->vc_font.width;
+ 	rows /= vc->vc_font.height;
++
++	if (fbcon_decor_active(info, vc)) {
++		cols = vc->vc_decor.twidth / vc->vc_font.width;
++		rows = vc->vc_decor.theight / vc->vc_font.height;
++	}
++
+ 	vc_resize(vc, cols, rows);
+ 
+ 	DPRINTK("mode:   %s\n", info->fix.id);
+@@ -1036,7 +1046,7 @@ static void fbcon_init(struct vc_data *vc, int init)
+ 	cap = info->flags;
+ 
+ 	if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
+-	    (info->fix.type == FB_TYPE_TEXT))
++	    (info->fix.type == FB_TYPE_TEXT) || fbcon_decor_active(info, vc))
+ 		logo = 0;
+ 
+ 	if (var_to_display(p, &info->var, info))
+@@ -1260,6 +1270,11 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
+ 		fbcon_clear_margins(vc, 0);
+ 	}
+ 
++ 	if (fbcon_decor_active(info, vc)) {
++ 		fbcon_decor_clear(vc, info, sy, sx, height, width);
++ 		return;
++ 	}
++
+ 	/* Split blits that cross physical y_wrap boundary */
+ 
+ 	y_break = p->vrows - p->yscroll;
+@@ -1279,10 +1294,15 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
+ 	struct display *p = &fb_display[vc->vc_num];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
+-			   get_color(vc, info, scr_readw(s), 1),
+-			   get_color(vc, info, scr_readw(s), 0));
++	if (!fbcon_is_inactive(vc, info)) {
++
++		if (fbcon_decor_active(info, vc))
++			fbcon_decor_putcs(vc, info, s, count, ypos, xpos);
++		else
++			ops->putcs(vc, info, s, count, real_y(p, ypos), xpos,
++				   get_color(vc, info, scr_readw(s), 1),
++				   get_color(vc, info, scr_readw(s), 0));
++	}
+ }
+ 
+ static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+@@ -1298,8 +1318,13 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
+ 	struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ 	struct fbcon_ops *ops = info->fbcon_par;
+ 
+-	if (!fbcon_is_inactive(vc, info))
+-		ops->clear_margins(vc, info, bottom_only);
++	if (!fbcon_is_inactive(vc, info)) {
++	 	if (fbcon_decor_active(info, vc)) {
++	 		fbcon_decor_clear_margins(vc, info, bottom_only);
++ 		} else {
++			ops->clear_margins(vc, info, bottom_only);
++		}
++	}
+ }
+ 
+ static void fbcon_cursor(struct vc_data *vc, int mode)
+@@ -1819,7 +1844,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ 			count = vc->vc_rows;
+ 		if (softback_top)
+ 			fbcon_softback_note(vc, t, count);
+-		if (logo_shown >= 0)
++		if (logo_shown >= 0 || fbcon_decor_active(info, vc))
+ 			goto redraw_up;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+@@ -1912,6 +1937,8 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
+ 			count = vc->vc_rows;
+ 		if (logo_shown >= 0)
+ 			goto redraw_down;
++		if (fbcon_decor_active(info, vc))
++			goto redraw_down;
+ 		switch (p->scrollmode) {
+ 		case SCROLL_MOVE:
+ 			fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+@@ -2060,6 +2087,13 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int s
+ 		}
+ 		return;
+ 	}
++
++	if (fbcon_decor_active(info, vc) && sy == dy && height == 1) {
++ 		/* must use slower redraw bmove to keep background pic intact */
++ 		fbcon_decor_bmove_redraw(vc, info, sy, sx, dx, width);
++ 		return;
++ 	}
++
+ 	ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ 		   height, width);
+ }
+@@ -2130,8 +2164,8 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
+ 	var.yres = virt_h * virt_fh;
+ 	x_diff = info->var.xres - var.xres;
+ 	y_diff = info->var.yres - var.yres;
+-	if (x_diff < 0 || x_diff > virt_fw ||
+-	    y_diff < 0 || y_diff > virt_fh) {
++	if ((x_diff < 0 || x_diff > virt_fw ||
++		y_diff < 0 || y_diff > virt_fh) && !vc->vc_decor.state) {
+ 		const struct fb_videomode *mode;
+ 
+ 		DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
+@@ -2167,6 +2201,21 @@ static int fbcon_switch(struct vc_data *vc)
+ 
+ 	info = registered_fb[con2fb_map[vc->vc_num]];
+ 	ops = info->fbcon_par;
++	prev_console = ops->currcon;
++	if (prev_console != -1)
++		old_info = registered_fb[con2fb_map[prev_console]];
++
++#ifdef CONFIG_FB_CON_DECOR
++	if (!fbcon_decor_active_vc(vc) && info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++		if (vc_curr && fbcon_decor_active_vc(vc_curr)) {
++			/* Clear the screen to avoid displaying funky colors during
++			 * palette updates. */
++			memset((u8*)info->screen_base + info->fix.line_length * info->var.yoffset,
++			       0, info->var.yres * info->fix.line_length);
++		}
++	}
++#endif
+ 
+ 	if (softback_top) {
+ 		if (softback_lines)
+@@ -2185,9 +2234,6 @@ static int fbcon_switch(struct vc_data *vc)
+ 		logo_shown = FBCON_LOGO_CANSHOW;
+ 	}
+ 
+-	prev_console = ops->currcon;
+-	if (prev_console != -1)
+-		old_info = registered_fb[con2fb_map[prev_console]];
+ 	/*
+ 	 * FIXME: If we have multiple fbdev's loaded, we need to
+ 	 * update all info->currcon.  Perhaps, we can place this
+@@ -2231,6 +2277,18 @@ static int fbcon_switch(struct vc_data *vc)
+ 			fbcon_del_cursor_timer(old_info);
+ 	}
+ 
++	if (fbcon_decor_active_vc(vc)) {
++		struct vc_data *vc_curr = vc_cons[prev_console].d;
++
++		if (!vc_curr->vc_decor.theme ||
++			strcmp(vc->vc_decor.theme, vc_curr->vc_decor.theme) ||
++			(fbcon_decor_active_nores(info, vc_curr) &&
++			 !fbcon_decor_active(info, vc_curr))) {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++	}
++
+ 	if (fbcon_is_inactive(vc, info) ||
+ 	    ops->blank_state != FB_BLANK_UNBLANK)
+ 		fbcon_del_cursor_timer(info);
+@@ -2339,15 +2397,20 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+ 		}
+ 	}
+ 
+- 	if (!fbcon_is_inactive(vc, info)) {
++	if (!fbcon_is_inactive(vc, info)) {
+ 		if (ops->blank_state != blank) {
+ 			ops->blank_state = blank;
+ 			fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ 			ops->cursor_flash = (!blank);
+ 
+-			if (!(info->flags & FBINFO_MISC_USEREVENT))
+-				if (fb_blank(info, blank))
+-					fbcon_generic_blank(vc, info, blank);
++			if (!(info->flags & FBINFO_MISC_USEREVENT)) {
++				if (fb_blank(info, blank)) {
++					if (fbcon_decor_active(info, vc))
++						fbcon_decor_blank(vc, info, blank);
++					else
++						fbcon_generic_blank(vc, info, blank);
++				}
++			}
+ 		}
+ 
+ 		if (!blank)
+@@ -2522,13 +2585,22 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ 	}
+ 
+ 	if (resize) {
++		/* reset wrap/pan */
+ 		int cols, rows;
+ 
+ 		cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
++
++		if (fbcon_decor_active(info, vc)) {
++			info->var.xoffset = info->var.yoffset = p->yscroll = 0;
++			cols = vc->vc_decor.twidth;
++			rows = vc->vc_decor.theight;
++		}
+ 		cols /= w;
+ 		rows /= h;
++
+ 		vc_resize(vc, cols, rows);
++
+ 		if (CON_IS_VISIBLE(vc) && softback_buf)
+ 			fbcon_update_softback(vc);
+ 	} else if (CON_IS_VISIBLE(vc)
+@@ -2657,7 +2729,11 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ 	int i, j, k, depth;
+ 	u8 val;
+ 
+-	if (fbcon_is_inactive(vc, info))
++	if (fbcon_is_inactive(vc, info)
++#ifdef CONFIG_FB_CON_DECOR
++			|| vc->vc_num != fg_console
++#endif
++		)
+ 		return -EINVAL;
+ 
+ 	if (!CON_IS_VISIBLE(vc))
+@@ -2683,14 +2759,56 @@ static int fbcon_set_palette(struct vc_data *vc, unsigned char *table)
+ 	} else
+ 		fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
+ 
+-	return fb_set_cmap(&palette_cmap, info);
++	if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
++
++		u16 *red, *green, *blue;
++		int minlen = min(min(info->var.red.length, info->var.green.length),
++				     info->var.blue.length);
++		int h;
++
++		struct fb_cmap cmap = {
++			.start = 0,
++			.len = (1 << minlen),
++			.red = NULL,
++			.green = NULL,
++			.blue = NULL,
++			.transp = NULL
++		};
++
++		red = kmalloc(256 * sizeof(u16) * 3, GFP_KERNEL);
++
++		if (!red)
++			goto out;
++
++		green = red + 256;
++		blue = green + 256;
++		cmap.red = red;
++		cmap.green = green;
++		cmap.blue = blue;
++
++		for (i = 0; i < cmap.len; i++) {
++			red[i] = green[i] = blue[i] = (0xffff * i)/(cmap.len-1);
++		}
++
++		h = fb_set_cmap(&cmap, info);
++		fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++		kfree(red);
++
++		return h;
++
++	} else if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		   info->var.bits_per_pixel == 8 && info->bgdecor.cmap.red != NULL)
++		fb_set_cmap(&info->bgdecor.cmap, info);
++
++out:	return fb_set_cmap(&palette_cmap, info);
+ }
+ 
+ static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
+ {
+ 	unsigned long p;
+ 	int line;
+-	
++
+ 	if (vc->vc_num != fg_console || !softback_lines)
+ 		return (u16 *) (vc->vc_origin + offset);
+ 	line = offset / vc->vc_size_row;
+@@ -2909,7 +3027,14 @@ static void fbcon_modechanged(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++
++		if (!fbcon_decor_active_nores(info, vc)) {
++			vc_resize(vc, cols, rows);
++		} else {
++			fbcon_decor_disable(vc, 0);
++			fbcon_decor_call_helper("modechange", vc->vc_num);
++		}
++
+ 		updatescrollmode(p, info, vc);
+ 		scrollback_max = 0;
+ 		scrollback_current = 0;
+@@ -2954,7 +3079,9 @@ static void fbcon_set_all_vcs(struct fb_info *info)
+ 		rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ 		cols /= vc->vc_font.width;
+ 		rows /= vc->vc_font.height;
+-		vc_resize(vc, cols, rows);
++		if (!fbcon_decor_active_nores(info, vc)) {
++			vc_resize(vc, cols, rows);
++		}
+ 	}
+ 
+ 	if (fg != -1)
+@@ -3596,6 +3723,7 @@ static void fbcon_exit(void)
+ 		}
+ 	}
+ 
++	fbcon_decor_exit();
+ 	fbcon_has_exited = 1;
+ }
+ 
+diff --git a/drivers/video/console/fbcondecor.c b/drivers/video/console/fbcondecor.c
+new file mode 100644
+index 0000000..babc8c5
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.c
+@@ -0,0 +1,555 @@
++/*
++ *  linux/drivers/video/console/fbcondecor.c -- Framebuffer console decorations
++ *
++ *  Copyright (C) 2004-2009 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ *  Code based upon "Bootsplash" (C) 2001-2003
++ *       Volker Poplawski <volker@poplawski.de>,
++ *       Stefan Reinauer <stepan@suse.de>,
++ *       Steffen Winterfeldt <snwint@suse.de>,
++ *       Michael Schroeder <mls@suse.de>,
++ *       Ken Wimer <wimer@suse.de>.
++ *
++ *  Compat ioctl support by Thorsten Klein <TK@Thorsten-Klein.de>.
++ *
++ *  This file is subject to the terms and conditions of the GNU General Public
++ *  License.  See the file COPYING in the main directory of this archive for
++ *  more details.
++ *
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fb.h>
++#include <linux/vt_kern.h>
++#include <linux/vmalloc.h>
++#include <linux/unistd.h>
++#include <linux/syscalls.h>
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/workqueue.h>
++#include <linux/kmod.h>
++#include <linux/miscdevice.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/compat.h>
++#include <linux/console.h>
++
++#include <asm/uaccess.h>
++#include <asm/irq.h>
++
++#include "fbcon.h"
++#include "fbcondecor.h"
++
++extern signed char con2fb_map[];
++static int fbcon_decor_enable(struct vc_data *vc);
++char fbcon_decor_path[KMOD_PATH_LEN] = "/sbin/fbcondecor_helper";
++static int initialized = 0;
++
++int fbcon_decor_call_helper(char* cmd, unsigned short vc)
++{
++	char *envp[] = {
++		"HOME=/",
++		"PATH=/sbin:/bin",
++		NULL
++	};
++
++	char tfb[5];
++	char tcons[5];
++	unsigned char fb = (int) con2fb_map[vc];
++
++	char *argv[] = {
++		fbcon_decor_path,
++		"2",
++		cmd,
++		tcons,
++		tfb,
++		vc_cons[vc].d->vc_decor.theme,
++		NULL
++	};
++
++	snprintf(tfb,5,"%d",fb);
++	snprintf(tcons,5,"%d",vc);
++
++	return call_usermodehelper(fbcon_decor_path, argv, envp, UMH_WAIT_EXEC);
++}
++
++/* Disables fbcondecor on a virtual console; called with console sem held. */
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw)
++{
++	struct fb_info* info;
++
++	if (!vc->vc_decor.state)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	vc->vc_decor.state = 0;
++	vc_resize(vc, info->var.xres / vc->vc_font.width,
++		  info->var.yres / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num && redraw) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'off' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++/* Enables fbcondecor on a virtual console; called with console sem held. */
++static int fbcon_decor_enable(struct vc_data *vc)
++{
++	struct fb_info* info;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (vc->vc_decor.twidth == 0 || vc->vc_decor.theight == 0 ||
++	    info == NULL || vc->vc_decor.state || (!info->bgdecor.data &&
++	    vc->vc_num == fg_console))
++		return -EINVAL;
++
++	vc->vc_decor.state = 1;
++	vc_resize(vc, vc->vc_decor.twidth / vc->vc_font.width,
++		  vc->vc_decor.theight / vc->vc_font.height);
++
++	if (fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++	printk(KERN_INFO "fbcondecor: switched decor state to 'on' on console %d\n",
++			 vc->vc_num);
++
++	return 0;
++}
++
++static inline int fbcon_decor_ioctl_dosetstate(struct vc_data *vc, unsigned int state, unsigned char origin)
++{
++	int ret;
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_lock();
++	if (!state)
++		ret = fbcon_decor_disable(vc, 1);
++	else
++		ret = fbcon_decor_enable(vc);
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	return ret;
++}
++
++static inline void fbcon_decor_ioctl_dogetstate(struct vc_data *vc, unsigned int *state)
++{
++	*state = vc->vc_decor.state;
++}
++
++static int fbcon_decor_ioctl_dosetcfg(struct vc_data *vc, struct vc_decor *cfg, unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	char *tmp;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL || !cfg->twidth || !cfg->theight ||
++	    cfg->tx + cfg->twidth  > info->var.xres ||
++	    cfg->ty + cfg->theight > info->var.yres)
++		return -EINVAL;
++
++	len = strlen_user(cfg->theme);
++	if (!len || len > FBCON_DECOR_THEME_LEN)
++		return -EINVAL;
++	tmp = kmalloc(len, GFP_KERNEL);
++	if (!tmp)
++		return -ENOMEM;
++	if (copy_from_user(tmp, (void __user *)cfg->theme, len))
++		return -EFAULT;
++	cfg->theme = tmp;
++	cfg->state = 0;
++
++	/* If this ioctl is a response to a request from kernel, the console sem
++	 * is already held; we also don't need to disable decor because either the
++	 * new config and background picture will be successfully loaded, and the
++	 * decor will stay on, or in case of a failure it'll be turned off in fbcon. */
++//	if (origin == FBCON_DECOR_IO_ORIG_USER) {
++		console_lock();
++		if (vc->vc_decor.state)
++			fbcon_decor_disable(vc, 1);
++//	}
++
++	if (vc->vc_decor.theme)
++		kfree(vc->vc_decor.theme);
++
++	vc->vc_decor = *cfg;
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	printk(KERN_INFO "fbcondecor: console %d using theme '%s'\n",
++			 vc->vc_num, vc->vc_decor.theme);
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dogetcfg(struct vc_data *vc, struct vc_decor *decor)
++{
++	char __user *tmp;
++
++	tmp = decor->theme;
++	*decor = vc->vc_decor;
++	decor->theme = tmp;
++
++	if (vc->vc_decor.theme) {
++		if (copy_to_user(tmp, vc->vc_decor.theme, strlen(vc->vc_decor.theme) + 1))
++			return -EFAULT;
++	} else
++		if (put_user(0, tmp))
++			return -EFAULT;
++
++	return 0;
++}
++
++static int fbcon_decor_ioctl_dosetpic(struct vc_data *vc, struct fb_image *img, unsigned char origin)
++{
++	struct fb_info *info;
++	int len;
++	u8 *tmp;
++
++	if (vc->vc_num != fg_console)
++		return -EINVAL;
++
++	info = registered_fb[(int) con2fb_map[vc->vc_num]];
++
++	if (info == NULL)
++		return -EINVAL;
++
++	if (img->width != info->var.xres || img->height != info->var.yres) {
++		printk(KERN_ERR "fbcondecor: picture dimensions mismatch\n");
++		printk(KERN_ERR "%dx%d vs %dx%d\n", img->width, img->height, info->var.xres, info->var.yres);
++		return -EINVAL;
++	}
++
++	if (img->depth != info->var.bits_per_pixel) {
++		printk(KERN_ERR "fbcondecor: picture depth mismatch\n");
++		return -EINVAL;
++	}
++
++	if (img->depth == 8) {
++		if (!img->cmap.len || !img->cmap.red || !img->cmap.green ||
++		    !img->cmap.blue)
++			return -EINVAL;
++
++		tmp = vmalloc(img->cmap.len * 3 * 2);
++		if (!tmp)
++			return -ENOMEM;
++
++		if (copy_from_user(tmp,
++			    	   (void __user*)img->cmap.red, (img->cmap.len << 1)) ||
++		    copy_from_user(tmp + (img->cmap.len << 1),
++			    	   (void __user*)img->cmap.green, (img->cmap.len << 1)) ||
++		    copy_from_user(tmp + (img->cmap.len << 2),
++			    	   (void __user*)img->cmap.blue, (img->cmap.len << 1))) {
++			vfree(tmp);
++			return -EFAULT;
++		}
++
++		img->cmap.transp = NULL;
++		img->cmap.red = (u16*)tmp;
++		img->cmap.green = img->cmap.red + img->cmap.len;
++		img->cmap.blue = img->cmap.green + img->cmap.len;
++	} else {
++		img->cmap.red = NULL;
++	}
++
++	len = ((img->depth + 7) >> 3) * img->width * img->height;
++
++	/*
++	 * Allocate an additional byte so that we never go outside of the
++	 * buffer boundaries in the rendering functions in a 24 bpp mode.
++	 */
++	tmp = vmalloc(len + 1);
++
++	if (!tmp)
++		goto out;
++
++	if (copy_from_user(tmp, (void __user*)img->data, len))
++		goto out;
++
++	img->data = tmp;
++
++	/* If this ioctl is a response to a request from kernel, the console sem
++	 * is already held. */
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_lock();
++
++	if (info->bgdecor.data)
++		vfree((u8*)info->bgdecor.data);
++	if (info->bgdecor.cmap.red)
++		vfree(info->bgdecor.cmap.red);
++
++	info->bgdecor = *img;
++
++	if (fbcon_decor_active_vc(vc) && fg_console == vc->vc_num) {
++		redraw_screen(vc, 0);
++		update_region(vc, vc->vc_origin +
++			      vc->vc_size_row * vc->vc_top,
++			      vc->vc_size_row * (vc->vc_bottom - vc->vc_top) / 2);
++		fbcon_decor_clear_margins(vc, info, 0);
++	}
++
++//	if (origin == FBCON_DECOR_IO_ORIG_USER)
++		console_unlock();
++
++	return 0;
++
++out:	if (img->cmap.red)
++		vfree(img->cmap.red);
++
++	if (tmp)
++		vfree(tmp);
++	return -ENOMEM;
++}
++
++static long fbcon_decor_ioctl(struct file *filp, u_int cmd, u_long arg)
++{
++	struct fbcon_decor_iowrapper __user *wrapper = (void __user*) arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++			sizeof(struct fbcon_decor_iowrapper)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data, &wrapper->data);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC:
++	{
++		struct fb_image img;
++		if (copy_from_user(&img, (struct fb_image __user *)data, sizeof(struct fb_image)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++	case FBIOCONDECOR_SETCFG:
++	{
++		struct vc_decor cfg;
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++	case FBIOCONDECOR_GETCFG:
++	{
++		int rval;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg, (struct vc_decor __user *)data, sizeof(struct vc_decor)))
++			return -EFAULT;
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		if (copy_to_user(data, &cfg, sizeof(struct vc_decor)))
++			return -EFAULT;
++		return rval;
++	}
++	case FBIOCONDECOR_SETSTATE:
++	{
++		unsigned int state = 0;
++		if (get_user(state, (unsigned int __user *)data))
++			return -EFAULT;
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++	case FBIOCONDECOR_GETSTATE:
++	{
++		unsigned int state = 0;
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		return put_user(state, (unsigned int __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++
++#ifdef CONFIG_COMPAT
++
++static long fbcon_decor_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) {
++
++	struct fbcon_decor_iowrapper32 __user *wrapper = (void __user *)arg;
++	struct vc_data *vc = NULL;
++	unsigned short vc_num = 0;
++	unsigned char origin = 0;
++	compat_uptr_t data_compat = 0;
++	void __user *data = NULL;
++
++	if (!access_ok(VERIFY_READ, wrapper,
++                       sizeof(struct fbcon_decor_iowrapper32)))
++		return -EFAULT;
++
++	__get_user(vc_num, &wrapper->vc);
++	__get_user(origin, &wrapper->origin);
++	__get_user(data_compat, &wrapper->data);
++	data = compat_ptr(data_compat);
++
++	if (!vc_cons_allocated(vc_num))
++		return -EINVAL;
++
++	vc = vc_cons[vc_num].d;
++
++	switch (cmd) {
++	case FBIOCONDECOR_SETPIC32:
++	{
++		struct fb_image32 img_compat;
++		struct fb_image img;
++
++		if (copy_from_user(&img_compat, (struct fb_image32 __user *)data, sizeof(struct fb_image32)))
++			return -EFAULT;
++
++		fb_image_from_compat(img, img_compat);
++
++		return fbcon_decor_ioctl_dosetpic(vc, &img, origin);
++	}
++
++	case FBIOCONDECOR_SETCFG32:
++	{
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++
++		vc_decor_from_compat(cfg, cfg_compat);
++
++		return fbcon_decor_ioctl_dosetcfg(vc, &cfg, origin);
++	}
++
++	case FBIOCONDECOR_GETCFG32:
++	{
++		int rval;
++		struct vc_decor32 cfg_compat;
++		struct vc_decor cfg;
++
++		if (copy_from_user(&cfg_compat, (struct vc_decor32 __user *)data, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		cfg.theme = compat_ptr(cfg_compat.theme);
++
++		rval = fbcon_decor_ioctl_dogetcfg(vc, &cfg);
++
++		vc_decor_to_compat(cfg_compat, cfg);
++
++		if (copy_to_user((struct vc_decor32 __user *)data, &cfg_compat, sizeof(struct vc_decor32)))
++			return -EFAULT;
++		return rval;
++	}
++
++	case FBIOCONDECOR_SETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		if (get_user(state_compat, (compat_uint_t __user *)data))
++			return -EFAULT;
++
++		state = (unsigned int)state_compat;
++
++		return fbcon_decor_ioctl_dosetstate(vc, state, origin);
++	}
++
++	case FBIOCONDECOR_GETSTATE32:
++	{
++		compat_uint_t state_compat = 0;
++		unsigned int state = 0;
++
++		fbcon_decor_ioctl_dogetstate(vc, &state);
++		state_compat = (compat_uint_t)state;
++
++		return put_user(state_compat, (compat_uint_t __user *)data);
++	}
++
++	default:
++		return -ENOIOCTLCMD;
++	}
++}
++#else
++  #define fbcon_decor_compat_ioctl NULL
++#endif
++
++static struct file_operations fbcon_decor_ops = {
++	.owner = THIS_MODULE,
++	.unlocked_ioctl = fbcon_decor_ioctl,
++	.compat_ioctl = fbcon_decor_compat_ioctl
++};
++
++static struct miscdevice fbcon_decor_dev = {
++	.minor = MISC_DYNAMIC_MINOR,
++	.name = "fbcondecor",
++	.fops = &fbcon_decor_ops
++};
++
++void fbcon_decor_reset(void)
++{
++	int i;
++
++	for (i = 0; i < num_registered_fb; i++) {
++		registered_fb[i]->bgdecor.data = NULL;
++		registered_fb[i]->bgdecor.cmap.red = NULL;
++	}
++
++	for (i = 0; i < MAX_NR_CONSOLES && vc_cons[i].d; i++) {
++		vc_cons[i].d->vc_decor.state = vc_cons[i].d->vc_decor.twidth =
++						vc_cons[i].d->vc_decor.theight = 0;
++		vc_cons[i].d->vc_decor.theme = NULL;
++	}
++
++	return;
++}
++
++int fbcon_decor_init(void)
++{
++	int i;
++
++	fbcon_decor_reset();
++
++	if (initialized)
++		return 0;
++
++	i = misc_register(&fbcon_decor_dev);
++	if (i) {
++		printk(KERN_ERR "fbcondecor: failed to register device\n");
++		return i;
++	}
++
++	fbcon_decor_call_helper("init", 0);
++	initialized = 1;
++	return 0;
++}
++
++int fbcon_decor_exit(void)
++{
++	fbcon_decor_reset();
++	return 0;
++}
++
++EXPORT_SYMBOL(fbcon_decor_path);
+diff --git a/drivers/video/console/fbcondecor.h b/drivers/video/console/fbcondecor.h
+new file mode 100644
+index 0000000..3b3724b
+--- /dev/null
++++ b/drivers/video/console/fbcondecor.h
+@@ -0,0 +1,78 @@
++/* 
++ *  linux/drivers/video/console/fbcondecor.h -- Framebuffer Console Decoration headers
++ *
++ *  Copyright (C) 2004 Michal Januszewski <michalj+fbcondecor@gmail.com>
++ *
++ */
++
++#ifndef __FBCON_DECOR_H
++#define __FBCON_DECOR_H
++
++#ifndef _LINUX_FB_H
++#include <linux/fb.h>
++#endif
++
++/* This is needed for vc_cons in fbcmap.c */
++#include <linux/vt_kern.h>
++
++struct fb_cursor;
++struct fb_info;
++struct vc_data;
++
++#ifdef CONFIG_FB_CON_DECOR
++/* fbcondecor.c */
++int fbcon_decor_init(void);
++int fbcon_decor_exit(void);
++int fbcon_decor_call_helper(char* cmd, unsigned short cons);
++int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw);
++
++/* cfbcondecor.c */
++void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx);
++void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor);
++void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width);
++void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only);
++void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank);
++void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width);
++void fbcon_decor_copy(u8 *dst, u8 *src, int height, int width, int linebytes, int srclinesbytes, int bpp);
++void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc);
++
++/* vt.c */
++void acquire_console_sem(void);
++void release_console_sem(void);
++void do_unblank_screen(int entering_gfx);
++
++/* struct vc_data *y */
++#define fbcon_decor_active_vc(y) (y->vc_decor.state && y->vc_decor.theme) 
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active_nores(x,y) (x->bgdecor.data && fbcon_decor_active_vc(y))
++
++/* struct fb_info *x, struct vc_data *y */
++#define fbcon_decor_active(x,y) (fbcon_decor_active_nores(x,y) &&		\
++			      x->bgdecor.width == x->var.xres && 	\
++			      x->bgdecor.height == x->var.yres &&	\
++			      x->bgdecor.depth == x->var.bits_per_pixel)
++
++
++#else /* CONFIG_FB_CON_DECOR */
++
++static inline void fbcon_decor_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx) {}
++static inline void fbcon_decor_putc(struct vc_data *vc, struct fb_info *info, int c, int ypos, int xpos) {}
++static inline void fbcon_decor_cursor(struct fb_info *info, struct fb_cursor *cursor) {}
++static inline void fbcon_decor_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) {}
++static inline void fbcon_decor_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) {}
++static inline void fbcon_decor_blank(struct vc_data *vc, struct fb_info *info, int blank) {}
++static inline void fbcon_decor_bmove_redraw(struct vc_data *vc, struct fb_info *info, int y, int sx, int dx, int width) {}
++static inline void fbcon_decor_fix_pseudo_pal(struct fb_info *info, struct vc_data *vc) {}
++static inline int fbcon_decor_call_helper(char* cmd, unsigned short cons) { return 0; }
++static inline int fbcon_decor_init(void) { return 0; }
++static inline int fbcon_decor_exit(void) { return 0; }
++static inline int fbcon_decor_disable(struct vc_data *vc, unsigned char redraw) { return 0; }
++
++#define fbcon_decor_active_vc(y) (0)
++#define fbcon_decor_active_nores(x,y) (0)
++#define fbcon_decor_active(x,y) (0)
++
++#endif /* CONFIG_FB_CON_DECOR */
++
++#endif /* __FBCON_DECOR_H */
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index e1f4727..2952e33 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1204,7 +1204,6 @@ config FB_MATROX
+ 	select FB_CFB_FILLRECT
+ 	select FB_CFB_COPYAREA
+ 	select FB_CFB_IMAGEBLIT
+-	select FB_TILEBLITTING
+ 	select FB_MACMODES if PPC_PMAC
+ 	---help---
+ 	  Say Y here if you have a Matrox Millennium, Matrox Millennium II,
+diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
+index f89245b..05e036c 100644
+--- a/drivers/video/fbdev/core/fbcmap.c
++++ b/drivers/video/fbdev/core/fbcmap.c
+@@ -17,6 +17,8 @@
+ #include <linux/slab.h>
+ #include <linux/uaccess.h>
+ 
++#include "../../console/fbcondecor.h"
++
+ static u16 red2[] __read_mostly = {
+     0x0000, 0xaaaa
+ };
+@@ -249,14 +251,17 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info)
+ 			if (transp)
+ 				htransp = *transp++;
+ 			if (info->fbops->fb_setcolreg(start++,
+-						      hred, hgreen, hblue,
++						      hred, hgreen, hblue, 
+ 						      htransp, info))
+ 				break;
+ 		}
+ 	}
+-	if (rc == 0)
++	if (rc == 0) {
+ 		fb_copy_cmap(cmap, &info->cmap);
+-
++		if (fbcon_decor_active(info, vc_cons[fg_console].d) &&
++		    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
++			fbcon_decor_fix_pseudo_pal(info, vc_cons[fg_console].d);
++	}
+ 	return rc;
+ }
+ 
+diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
+index b6d5008..d6703f2 100644
+--- a/drivers/video/fbdev/core/fbmem.c
++++ b/drivers/video/fbdev/core/fbmem.c
+@@ -1250,15 +1250,6 @@ struct fb_fix_screeninfo32 {
+ 	u16			reserved[3];
+ };
+ 
+-struct fb_cmap32 {
+-	u32			start;
+-	u32			len;
+-	compat_caddr_t	red;
+-	compat_caddr_t	green;
+-	compat_caddr_t	blue;
+-	compat_caddr_t	transp;
+-};
+-
+ static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
+ 			  unsigned long arg)
+ {
+diff --git a/include/linux/console_decor.h b/include/linux/console_decor.h
+new file mode 100644
+index 0000000..04b8d80
+--- /dev/null
++++ b/include/linux/console_decor.h
+@@ -0,0 +1,46 @@
++#ifndef _LINUX_CONSOLE_DECOR_H_
++#define _LINUX_CONSOLE_DECOR_H_ 1
++
++/* A structure used by the framebuffer console decorations (drivers/video/console/fbcondecor.c) */
++struct vc_decor {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	char* theme;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++
++struct vc_decor32 {
++	__u8 bg_color;				/* The color that is to be treated as transparent */
++	__u8 state;				/* Current decor state: 0 = off, 1 = on */
++	__u16 tx, ty;				/* Top left corner coordinates of the text field */
++	__u16 twidth, theight;			/* Width and height of the text field */
++	compat_uptr_t theme;
++};
++
++#define vc_decor_from_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = compat_ptr((from).theme)
++
++#define vc_decor_to_compat(to, from) \
++	(to).bg_color = (from).bg_color; \
++	(to).state    = (from).state; \
++	(to).tx       = (from).tx; \
++	(to).ty       = (from).ty; \
++	(to).twidth   = (from).twidth; \
++	(to).theight  = (from).theight; \
++	(to).theme    = ptr_to_compat((from).theme)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#endif
+diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
+index 7f0c329..98f5d60 100644
+--- a/include/linux/console_struct.h
++++ b/include/linux/console_struct.h
+@@ -19,6 +19,7 @@
+ struct vt_struct;
+ 
+ #define NPAR 16
++#include <linux/console_decor.h>
+ 
+ struct vc_data {
+ 	struct tty_port port;			/* Upper level data */
+@@ -107,6 +108,8 @@ struct vc_data {
+ 	unsigned long	vc_uni_pagedir;
+ 	unsigned long	*vc_uni_pagedir_loc;  /* [!] Location of uni_pagedir variable for this console */
+ 	bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
++
++	struct vc_decor vc_decor;
+ 	/* additional information is in vt_kern.h */
+ };
+ 
+diff --git a/include/linux/fb.h b/include/linux/fb.h
+index fe6ac95..1e36b03 100644
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -219,6 +219,34 @@ struct fb_deferred_io {
+ };
+ #endif
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_image32 {
++	__u32 dx;			/* Where to place image */
++	__u32 dy;
++	__u32 width;			/* Size of image */
++	__u32 height;
++	__u32 fg_color;			/* Only used when a mono bitmap */
++	__u32 bg_color;
++	__u8  depth;			/* Depth of the image */
++	const compat_uptr_t data;	/* Pointer to image data */
++	struct fb_cmap32 cmap;		/* color map info */
++};
++
++#define fb_image_from_compat(to, from) \
++	(to).dx       = (from).dx; \
++	(to).dy       = (from).dy; \
++	(to).width    = (from).width; \
++	(to).height   = (from).height; \
++	(to).fg_color = (from).fg_color; \
++	(to).bg_color = (from).bg_color; \
++	(to).depth    = (from).depth; \
++	(to).data     = compat_ptr((from).data); \
++	fb_cmap_from_compat((to).cmap, (from).cmap)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /*
+  * Frame buffer operations
+  *
+@@ -489,6 +517,9 @@ struct fb_info {
+ #define FBINFO_STATE_SUSPENDED	1
+ 	u32 state;			/* Hardware state i.e suspend */
+ 	void *fbcon_par;                /* fbcon use-only private area */
++
++	struct fb_image bgdecor;
++
+ 	/* From here on everything is device dependent */
+ 	void *par;
+ 	/* we need the PCI or similar aperture base/size not
+diff --git a/include/uapi/linux/fb.h b/include/uapi/linux/fb.h
+index fb795c3..dc77a03 100644
+--- a/include/uapi/linux/fb.h
++++ b/include/uapi/linux/fb.h
+@@ -8,6 +8,25 @@
+ 
+ #define FB_MAX			32	/* sufficient for now */
+ 
++struct fbcon_decor_iowrapper
++{
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	void *data;
++};
++
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#include <linux/compat.h>
++struct fbcon_decor_iowrapper32
++{
++	unsigned short vc;		/* Virtual console */
++	unsigned char origin;		/* Point of origin of the request */
++	compat_uptr_t data;
++};
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
+ /* ioctls
+    0x46 is 'F'								*/
+ #define FBIOGET_VSCREENINFO	0x4600
+@@ -35,6 +54,25 @@
+ #define FBIOGET_DISPINFO        0x4618
+ #define FBIO_WAITFORVSYNC	_IOW('F', 0x20, __u32)
+ 
++#define FBIOCONDECOR_SETCFG	_IOWR('F', 0x19, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETCFG	_IOR('F', 0x1A, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETSTATE	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_GETSTATE	_IOR('F', 0x1C, struct fbcon_decor_iowrapper)
++#define FBIOCONDECOR_SETPIC 	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper)
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++#define FBIOCONDECOR_SETCFG32	_IOWR('F', 0x19, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETCFG32	_IOR('F', 0x1A, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETSTATE32	_IOWR('F', 0x1B, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_GETSTATE32	_IOR('F', 0x1C, struct fbcon_decor_iowrapper32)
++#define FBIOCONDECOR_SETPIC32	_IOWR('F', 0x1D, struct fbcon_decor_iowrapper32)
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++#define FBCON_DECOR_THEME_LEN		128	/* Maximum lenght of a theme name */
++#define FBCON_DECOR_IO_ORIG_KERNEL	0	/* Kernel ioctl origin */
++#define FBCON_DECOR_IO_ORIG_USER	1	/* User ioctl origin */
++ 
+ #define FB_TYPE_PACKED_PIXELS		0	/* Packed Pixels	*/
+ #define FB_TYPE_PLANES			1	/* Non interleaved planes */
+ #define FB_TYPE_INTERLEAVED_PLANES	2	/* Interleaved planes	*/
+@@ -277,6 +315,29 @@ struct fb_var_screeninfo {
+ 	__u32 reserved[4];		/* Reserved for future compatibility */
+ };
+ 
++#ifdef __KERNEL__
++#ifdef CONFIG_COMPAT
++struct fb_cmap32 {
++	__u32 start;
++	__u32 len;			/* Number of entries */
++	compat_uptr_t red;		/* Red values	*/
++	compat_uptr_t green;
++	compat_uptr_t blue;
++	compat_uptr_t transp;		/* transparency, can be NULL */
++};
++
++#define fb_cmap_from_compat(to, from) \
++	(to).start  = (from).start; \
++	(to).len    = (from).len; \
++	(to).red    = compat_ptr((from).red); \
++	(to).green  = compat_ptr((from).green); \
++	(to).blue   = compat_ptr((from).blue); \
++	(to).transp = compat_ptr((from).transp)
++
++#endif /* CONFIG_COMPAT */
++#endif /* __KERNEL__ */
++
++
+ struct fb_cmap {
+ 	__u32 start;			/* First entry	*/
+ 	__u32 len;			/* Number of entries */
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 74f5b58..6386ab0 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -146,6 +146,10 @@ static const int cap_last_cap = CAP_LAST_CAP;
+ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
+ #endif
+ 
++#ifdef CONFIG_FB_CON_DECOR
++extern char fbcon_decor_path[];
++#endif
++
+ #ifdef CONFIG_INOTIFY_USER
+ #include <linux/inotify.h>
+ #endif
+@@ -255,6 +259,15 @@ static struct ctl_table sysctl_base_table[] = {
+ 		.mode		= 0555,
+ 		.child		= dev_table,
+ 	},
++#ifdef CONFIG_FB_CON_DECOR
++	{
++		.procname	= "fbcondecor",
++		.data		= &fbcon_decor_path,
++		.maxlen		= KMOD_PATH_LEN,
++		.mode		= 0644,
++		.proc_handler	= &proc_dostring,
++	},
++#endif
+ 	{ }
+ };
+ 

diff --git a/5000_enable-additional-cpu-optimizations-for-gcc.patch b/5000_enable-additional-cpu-optimizations-for-gcc.patch
new file mode 100644
index 0000000..f7ab6f0
--- /dev/null
+++ b/5000_enable-additional-cpu-optimizations-for-gcc.patch
@@ -0,0 +1,327 @@
+This patch has been tested on and known to work with kernel versions from 3.2
+up to the latest git version (pulled on 12/14/2013).
+
+This patch will expand the number of microarchitectures to include new
+processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
+14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
+Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 2nd Gen Core
+i3/i5/i7 (Sandybridge), Intel 3rd Gen Core i3/i5/i7 (Ivybridge), and Intel 4th
+Gen Core i3/i5/i7 (Haswell). It also offers the compiler the 'native' flag.
+
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=3.15
+gcc version <4.9
+
+---
+diff -uprN a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
+--- a/arch/x86/include/asm/module.h	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/include/asm/module.h	2013-12-15 06:21:24.351122516 -0500
+@@ -15,6 +15,16 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MCOREI7
++#define MODULE_PROC_FAMILY "COREI7 "
++#elif defined CONFIG_MCOREI7AVX
++#define MODULE_PROC_FAMILY "COREI7AVX "
++#elif defined CONFIG_MCOREAVXI
++#define MODULE_PROC_FAMILY "COREAVXI "
++#elif defined CONFIG_MCOREAVX2
++#define MODULE_PROC_FAMILY "COREAVX2 "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -33,6 +43,18 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+diff -uprN a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
+--- a/arch/x86/Kconfig.cpu	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/Kconfig.cpu	2013-12-15 06:21:24.351122516 -0500
+@@ -139,7 +139,7 @@ config MPENTIUM4
+ 
+ 
+ config MK6
+-	bool "K6/K6-II/K6-III"
++	bool "AMD K6/K6-II/K6-III"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD K6-family processor.  Enables use of
+@@ -147,7 +147,7 @@ config MK6
+ 	  flags to GCC.
+ 
+ config MK7
+-	bool "Athlon/Duron/K7"
++	bool "AMD Athlon/Duron/K7"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD Athlon K7-family processor.  Enables use of
+@@ -155,12 +155,55 @@ config MK7
+ 	  flags to GCC.
+ 
+ config MK8
+-	bool "Opteron/Athlon64/Hammer/K8"
++	bool "AMD Opteron/Athlon64/Hammer/K8"
+ 	---help---
+ 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ 	  Enables use of some extended instructions, and passes appropriate
+ 	  optimization flags to GCC.
+ 
++config MK10
++	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++	---help---
++	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MBARCELONA
++	bool "AMD Barcelona"
++	---help---
++	  Select this for AMD Barcelona and newer processors.
++
++	  Enables -march=barcelona
++
++config MBOBCAT
++	bool "AMD Bobcat"
++	---help---
++	  Select this for AMD Bobcat processors.
++
++	  Enables -march=btver1
++
++config MBULLDOZER
++	bool "AMD Bulldozer"
++	---help---
++	  Select this for AMD Bulldozer processors.
++
++	  Enables -march=bdver1
++
++config MPILEDRIVER
++	bool "AMD Piledriver"
++	---help---
++	  Select this for AMD Piledriver processors.
++
++	  Enables -march=bdver2
++
++config MJAGUAR
++	bool "AMD Jaguar"
++	---help---
++	  Select this for AMD Jaguar processors.
++
++	  Enables -march=btver2
++
+ config MCRUSOE
+ 	bool "Crusoe"
+ 	depends on X86_32
+@@ -251,8 +294,17 @@ config MPSC
+ 	  using the cpu family field
+ 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 
++config MATOM
++	bool "Intel Atom"
++	---help---
++
++	  Select this for the Intel Atom platform. Intel Atom CPUs have an
++	  in-order pipelining architecture and thus can benefit from
++	  accordingly optimized code. Use a recent GCC with specific Atom
++	  support in order to fully benefit from selecting this option.
++
+ config MCORE2
+-	bool "Core 2/newer Xeon"
++	bool "Intel Core 2"
+ 	---help---
+ 
+ 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -260,14 +312,40 @@ config MCORE2
+ 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ 	  (not a typo)
+ 
+-config MATOM
+-	bool "Intel Atom"
++	  Enables -march=core2
++
++config MCOREI7
++	bool "Intel Core i7"
+ 	---help---
+ 
+-	  Select this for the Intel Atom platform. Intel Atom CPUs have an
+-	  in-order pipelining architecture and thus can benefit from
+-	  accordingly optimized code. Use a recent GCC with specific Atom
+-	  support in order to fully benefit from selecting this option.
++	  Select this for the Intel Nehalem platform. Intel Nehalem proecessors
++	  include Core i3, i5, i7, Xeon: 34xx, 35xx, 55xx, 56xx, 75xx processors.
++
++	  Enables -march=corei7
++
++config MCOREI7AVX
++	bool "Intel Core 2nd Gen AVX"
++	---help---
++
++	  Select this for 2nd Gen Core processors including Sandy Bridge.
++
++	  Enables -march=corei7-avx
++
++config MCOREAVXI
++	bool "Intel Core 3rd Gen AVX"
++	---help---
++
++	  Select this for 3rd Gen Core processors including Ivy Bridge.
++
++	  Enables -march=core-avx-i
++
++config MCOREAVX2
++	bool "Intel Core AVX2"
++	---help---
++
++	  Select this for AVX2 enabled processors including Haswell.
++
++	  Enables -march=core-avx2
+ 
+ config GENERIC_CPU
+ 	bool "Generic-x86-64"
+@@ -276,6 +354,19 @@ config GENERIC_CPU
+ 	  Generic x86-64 CPU.
+ 	  Run equally well on all x86-64 CPUs.
+ 
++config MNATIVE
++ bool "Native optimizations autodetected by GCC"
++ ---help---
++
++   GCC 4.2 and above support -march=native, which automatically detects
++   the optimum settings to use based on your processor. -march=native
++   also detects and applies additional settings beyond -march specific
++   to your CPU, (eg. -msse4). Unless you have a specific reason not to
++   (e.g. distcc cross-compiling), you should probably be using
++   -march=native rather than anything listed below.
++
++   Enables -march=native
++
+ endchoice
+ 
+ config X86_GENERIC
+@@ -300,7 +391,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ 	int
+ 	default "7" if MPENTIUM4 || MPSC
+-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MATOM || MVIAC7 || X86_GENERIC || MNATIVE || GENERIC_CPU
+ 	default "4" if MELAN || M486 || MGEODEGX1
+ 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+ 
+@@ -331,11 +422,11 @@ config X86_ALIGNMENT_16
+ 
+ config X86_INTEL_USERCOPY
+ 	def_bool y
+-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || MNATIVE || X86_GENERIC || MK8 || MK7 || MK10 || MBARCELONA || MEFFICEON || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2
+ 
+ config X86_USE_PPRO_CHECKSUM
+ 	def_bool y
+-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MATOM || MNATIVE
+ 
+ config X86_USE_3DNOW
+ 	def_bool y
+@@ -363,17 +454,17 @@ config X86_P6_NOP
+ 
+ config X86_TSC
+ 	def_bool y
+-	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MCOREI7 || MCOREI7-AVX || MATOM) || X86_64 || MNATIVE
+ 
+ config X86_CMPXCHG64
+ 	def_bool y
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
++	depends on X86_PAE || X86_64 || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+ config X86_CMOV
+ 	def_bool y
+-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++	depends on (MK8 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MCOREI7 || MCOREI7AVX || MCOREAVXI || MCOREAVX2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
+ 
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+diff -uprN a/arch/x86/Makefile b/arch/x86/Makefile
+--- a/arch/x86/Makefile	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/Makefile	2013-12-15 06:21:24.354455723 -0500
+@@ -61,11 +61,26 @@ else
+ 	KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
+ 
+         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+ 
+         cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
++                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
++        cflags-$(CONFIG_MCOREI7) += \
++                $(call cc-option,-march=corei7,$(call cc-option,-mtune=corei7))
++        cflags-$(CONFIG_MCOREI7AVX) += \
++                $(call cc-option,-march=corei7-avx,$(call cc-option,-mtune=corei7-avx))
++        cflags-$(CONFIG_MCOREAVXI) += \
++                $(call cc-option,-march=core-avx-i,$(call cc-option,-mtune=core-avx-i))
++        cflags-$(CONFIG_MCOREAVX2) += \
++                $(call cc-option,-march=core-avx2,$(call cc-option,-mtune=core-avx2))
+ 	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+ 		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
+         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+diff -uprN a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
+--- a/arch/x86/Makefile_32.cpu	2013-11-03 18:41:51.000000000 -0500
++++ b/arch/x86/Makefile_32.cpu	2013-12-15 06:21:24.354455723 -0500
+@@ -23,7 +23,14 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+ # Please note, that patches that add -march=athlon-xp and friends are pointless.
+ # They make zero difference whatsosever to performance at this time.
+ cflags-$(CONFIG_MK7)		+= -march=athlon
++cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
++cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
+ cflags-$(CONFIG_MCRUSOE)	+= -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -32,6 +39,10 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+ cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
+ cflags-$(CONFIG_MVIAC7)		+= -march=i686
+ cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
++cflags-$(CONFIG_MCOREI7)	+= -march=i686 $(call tune,corei7)
++cflags-$(CONFIG_MCOREI7AVX)	+= -march=i686 $(call tune,corei7-avx)
++cflags-$(CONFIG_MCOREAVXI)	+= -march=i686 $(call tune,core-avx-i)
++cflags-$(CONFIG_MCOREAVX2)	+= -march=i686 $(call tune,core-avx2)
+ cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+ 	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))

diff --git a/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
new file mode 100644
index 0000000..c4efd06
--- /dev/null
+++ b/5010_enable-additional-cpu-optimizations-for-gcc-4.9.patch
@@ -0,0 +1,402 @@
+WARNING - this version of the patch works with version 4.9+ of gcc and with
+kernel version 3.15.x+ and should NOT be applied when compiling on older
+versions due to name changes of the flags with the 4.9 release of gcc.
+Use the older version of this patch hosted on the same github for older
+versions of gcc. For example:
+
+corei7 --> nehalem
+corei7-avx --> sandybridge
+core-avx-i --> ivybridge
+core-avx2 --> haswell
+
+For more, see: https://gcc.gnu.org/gcc-4.9/changes.html
+
+It also changes 'atom' to 'bonnell' in accordance with the gcc v4.9 changes.
+Note that upstream is using the deprecated 'match=atom' flags when I believe it
+should use the newer 'march=bonnell' flag for atom processors.
+
+I have made that change to this patch set as well.  See the following kernel
+bug report to see if I'm right: https://bugzilla.kernel.org/show_bug.cgi?id=77461
+
+This patch will expand the number of microarchitectures to include newer
+processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
+14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
+Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 (Nehalem), Intel 1.5 Gen Core
+i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7 (Sandybridge), Intel 3rd Gen
+Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core i3/i5/i7 (Haswell), Intel 5th
+Gen Core i3/i5/i7 (Broadwell), and the low power Silvermont series of Atom
+processors (Silvermont). It also offers the compiler the 'native' flag.
+
+Small but real speed increases are measurable using a make endpoint comparing
+a generic kernel to one built with one of the respective microarchs.
+
+See the following experimental evidence supporting this statement:
+https://github.com/graysky2/kernel_gcc_patch
+
+REQUIREMENTS
+linux version >=3.15
+gcc version >=4.9
+
+--- a/arch/x86/include/asm/module.h	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/include/asm/module.h	2015-03-07 03:27:32.556672424 -0500
+@@ -15,6 +15,22 @@
+ #define MODULE_PROC_FAMILY "586MMX "
+ #elif defined CONFIG_MCORE2
+ #define MODULE_PROC_FAMILY "CORE2 "
++#elif defined CONFIG_MNATIVE
++#define MODULE_PROC_FAMILY "NATIVE "
++#elif defined CONFIG_MNEHALEM
++#define MODULE_PROC_FAMILY "NEHALEM "
++#elif defined CONFIG_MWESTMERE
++#define MODULE_PROC_FAMILY "WESTMERE "
++#elif defined CONFIG_MSILVERMONT
++#define MODULE_PROC_FAMILY "SILVERMONT "
++#elif defined CONFIG_MSANDYBRIDGE
++#define MODULE_PROC_FAMILY "SANDYBRIDGE "
++#elif defined CONFIG_MIVYBRIDGE
++#define MODULE_PROC_FAMILY "IVYBRIDGE "
++#elif defined CONFIG_MHASWELL
++#define MODULE_PROC_FAMILY "HASWELL "
++#elif defined CONFIG_MBROADWELL
++#define MODULE_PROC_FAMILY "BROADWELL "
+ #elif defined CONFIG_MATOM
+ #define MODULE_PROC_FAMILY "ATOM "
+ #elif defined CONFIG_M686
+@@ -33,6 +49,20 @@
+ #define MODULE_PROC_FAMILY "K7 "
+ #elif defined CONFIG_MK8
+ #define MODULE_PROC_FAMILY "K8 "
++#elif defined CONFIG_MK8SSE3
++#define MODULE_PROC_FAMILY "K8SSE3 "
++#elif defined CONFIG_MK10
++#define MODULE_PROC_FAMILY "K10 "
++#elif defined CONFIG_MBARCELONA
++#define MODULE_PROC_FAMILY "BARCELONA "
++#elif defined CONFIG_MBOBCAT
++#define MODULE_PROC_FAMILY "BOBCAT "
++#elif defined CONFIG_MBULLDOZER
++#define MODULE_PROC_FAMILY "BULLDOZER "
++#elif defined CONFIG_MPILEDRIVER
++#define MODULE_PROC_FAMILY "PILEDRIVER "
++#elif defined CONFIG_MJAGUAR
++#define MODULE_PROC_FAMILY "JAGUAR "
+ #elif defined CONFIG_MELAN
+ #define MODULE_PROC_FAMILY "ELAN "
+ #elif defined CONFIG_MCRUSOE
+--- a/arch/x86/Kconfig.cpu	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/Kconfig.cpu	2015-03-07 03:32:14.337713226 -0500
+@@ -137,9 +137,8 @@ config MPENTIUM4
+ 		-Paxville
+ 		-Dempsey
+ 
+-
+ config MK6
+-	bool "K6/K6-II/K6-III"
++	bool "AMD K6/K6-II/K6-III"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD K6-family processor.  Enables use of
+@@ -147,7 +146,7 @@ config MK6
+ 	  flags to GCC.
+ 
+ config MK7
+-	bool "Athlon/Duron/K7"
++	bool "AMD Athlon/Duron/K7"
+ 	depends on X86_32
+ 	---help---
+ 	  Select this for an AMD Athlon K7-family processor.  Enables use of
+@@ -155,12 +154,62 @@ config MK7
+ 	  flags to GCC.
+ 
+ config MK8
+-	bool "Opteron/Athlon64/Hammer/K8"
++	bool "AMD Opteron/Athlon64/Hammer/K8"
+ 	---help---
+ 	  Select this for an AMD Opteron or Athlon64 Hammer-family processor.
+ 	  Enables use of some extended instructions, and passes appropriate
+ 	  optimization flags to GCC.
+ 
++config MK8SSE3
++	bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
++	---help---
++	  Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MK10
++	bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
++	---help---
++	  Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
++		Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
++	  Enables use of some extended instructions, and passes appropriate
++	  optimization flags to GCC.
++
++config MBARCELONA
++	bool "AMD Barcelona"
++	---help---
++	  Select this for AMD Barcelona and newer processors.
++
++	  Enables -march=barcelona
++
++config MBOBCAT
++	bool "AMD Bobcat"
++	---help---
++	  Select this for AMD Bobcat processors.
++
++	  Enables -march=btver1
++
++config MBULLDOZER
++	bool "AMD Bulldozer"
++	---help---
++	  Select this for AMD Bulldozer processors.
++
++	  Enables -march=bdver1
++
++config MPILEDRIVER
++	bool "AMD Piledriver"
++	---help---
++	  Select this for AMD Piledriver processors.
++
++	  Enables -march=bdver2
++
++config MJAGUAR
++	bool "AMD Jaguar"
++	---help---
++	  Select this for AMD Jaguar processors.
++
++	  Enables -march=btver2
++
+ config MCRUSOE
+ 	bool "Crusoe"
+ 	depends on X86_32
+@@ -251,8 +300,17 @@ config MPSC
+ 	  using the cpu family field
+ 	  in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+ 
++config MATOM
++	bool "Intel Atom"
++	---help---
++
++	  Select this for the Intel Atom platform. Intel Atom CPUs have an
++	  in-order pipelining architecture and thus can benefit from
++	  accordingly optimized code. Use a recent GCC with specific Atom
++	  support in order to fully benefit from selecting this option.
++
+ config MCORE2
+-	bool "Core 2/newer Xeon"
++	bool "Intel Core 2"
+ 	---help---
+ 
+ 	  Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
+@@ -260,14 +318,63 @@ config MCORE2
+ 	  family in /proc/cpuinfo. Newer ones have 6 and older ones 15
+ 	  (not a typo)
+ 
+-config MATOM
+-	bool "Intel Atom"
++	  Enables -march=core2
++
++config MNEHALEM
++	bool "Intel Nehalem"
+ 	---help---
+ 
+-	  Select this for the Intel Atom platform. Intel Atom CPUs have an
+-	  in-order pipelining architecture and thus can benefit from
+-	  accordingly optimized code. Use a recent GCC with specific Atom
+-	  support in order to fully benefit from selecting this option.
++	  Select this for 1st Gen Core processors in the Nehalem family.
++
++	  Enables -march=nehalem
++
++config MWESTMERE
++	bool "Intel Westmere"
++	---help---
++
++	  Select this for the Intel Westmere formerly Nehalem-C family.
++
++	  Enables -march=westmere
++
++config MSILVERMONT
++	bool "Intel Silvermont"
++	---help---
++
++	  Select this for the Intel Silvermont platform.
++
++	  Enables -march=silvermont
++
++config MSANDYBRIDGE
++	bool "Intel Sandy Bridge"
++	---help---
++
++	  Select this for 2nd Gen Core processors in the Sandy Bridge family.
++
++	  Enables -march=sandybridge
++
++config MIVYBRIDGE
++	bool "Intel Ivy Bridge"
++	---help---
++
++	  Select this for 3rd Gen Core processors in the Ivy Bridge family.
++
++	  Enables -march=ivybridge
++
++config MHASWELL
++	bool "Intel Haswell"
++	---help---
++
++	  Select this for 4th Gen Core processors in the Haswell family.
++
++	  Enables -march=haswell
++
++config MBROADWELL
++	bool "Intel Broadwell"
++	---help---
++
++	  Select this for 5th Gen Core processors in the Broadwell family.
++
++	  Enables -march=broadwell
+ 
+ config GENERIC_CPU
+ 	bool "Generic-x86-64"
+@@ -276,6 +383,19 @@ config GENERIC_CPU
+ 	  Generic x86-64 CPU.
+ 	  Run equally well on all x86-64 CPUs.
+ 
++config MNATIVE
++ bool "Native optimizations autodetected by GCC"
++ ---help---
++
++   GCC 4.2 and above support -march=native, which automatically detects
++   the optimum settings to use based on your processor. -march=native 
++   also detects and applies additional settings beyond -march specific
++   to your CPU, (eg. -msse4). Unless you have a specific reason not to
++   (e.g. distcc cross-compiling), you should probably be using
++   -march=native rather than anything listed below.
++
++   Enables -march=native
++
+ endchoice
+ 
+ config X86_GENERIC
+@@ -300,7 +420,7 @@ config X86_INTERNODE_CACHE_SHIFT
+ config X86_L1_CACHE_SHIFT
+ 	int
+ 	default "7" if MPENTIUM4 || MPSC
+-	default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
++	default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || BROADWELL || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ 	default "4" if MELAN || M486 || MGEODEGX1
+ 	default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
+ 
+@@ -331,11 +451,11 @@ config X86_ALIGNMENT_16
+ 
+ config X86_INTEL_USERCOPY
+ 	def_bool y
+-	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
++	depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE
+ 
+ config X86_USE_PPRO_CHECKSUM
+ 	def_bool y
+-	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
++	depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MATOM || MNATIVE
+ 
+ config X86_USE_3DNOW
+ 	def_bool y
+@@ -359,17 +479,17 @@ config X86_P6_NOP
+ 
+ config X86_TSC
+ 	def_bool y
+-	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
++	depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MNATIVE || MATOM) || X86_64
+ 
+ config X86_CMPXCHG64
+ 	def_bool y
+-	depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
++	depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
+ 
+ # this should be set for all -march=.. options where the compiler
+ # generates cmov.
+ config X86_CMOV
+ 	def_bool y
+-	depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
++	depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
+ 
+ config X86_MINIMUM_CPU_FAMILY
+ 	int
+--- a/arch/x86/Makefile	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/Makefile	2015-03-07 03:33:27.650843211 -0500
+@@ -92,13 +92,35 @@ else
+ 	KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
+ 
+         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
++        cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
++        cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
++        cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
++        cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
++        cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
++        cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
++        cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
++        cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
+         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
+ 
+         cflags-$(CONFIG_MCORE2) += \
+-                $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
+-	cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
+-		$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++                $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
++        cflags-$(CONFIG_MNEHALEM) += \
++                $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
++        cflags-$(CONFIG_MWESTMERE) += \
++                $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
++        cflags-$(CONFIG_MSILVERMONT) += \
++                $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
++        cflags-$(CONFIG_MSANDYBRIDGE) += \
++                $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
++        cflags-$(CONFIG_MIVYBRIDGE) += \
++                $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
++        cflags-$(CONFIG_MHASWELL) += \
++                $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
++        cflags-$(CONFIG_MBROADWELL) += \
++                $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
++        cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
++                $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+         cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
+         KBUILD_CFLAGS += $(cflags-y)
+ 
+--- a/arch/x86/Makefile_32.cpu	2014-06-16 16:44:27.000000000 -0400
++++ b/arch/x86/Makefile_32.cpu	2015-03-07 03:34:15.203586024 -0500
+@@ -23,7 +23,15 @@ cflags-$(CONFIG_MK6)		+= -march=k6
+ # Please note, that patches that add -march=athlon-xp and friends are pointless.
+ # They make zero difference whatsosever to performance at this time.
+ cflags-$(CONFIG_MK7)		+= -march=athlon
++cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
+ cflags-$(CONFIG_MK8)		+= $(call cc-option,-march=k8,-march=athlon)
++cflags-$(CONFIG_MK8SSE3)		+= $(call cc-option,-march=k8-sse3,-march=athlon)
++cflags-$(CONFIG_MK10)	+= $(call cc-option,-march=amdfam10,-march=athlon)
++cflags-$(CONFIG_MBARCELONA)	+= $(call cc-option,-march=barcelona,-march=athlon)
++cflags-$(CONFIG_MBOBCAT)	+= $(call cc-option,-march=btver1,-march=athlon)
++cflags-$(CONFIG_MBULLDOZER)	+= $(call cc-option,-march=bdver1,-march=athlon)
++cflags-$(CONFIG_MPILEDRIVER)	+= $(call cc-option,-march=bdver2,-march=athlon)
++cflags-$(CONFIG_MJAGUAR)	+= $(call cc-option,-march=btver2,-march=athlon)
+ cflags-$(CONFIG_MCRUSOE)	+= -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MEFFICEON)	+= -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
+ cflags-$(CONFIG_MWINCHIPC6)	+= $(call cc-option,-march=winchip-c6,-march=i586)
+@@ -32,8 +40,15 @@ cflags-$(CONFIG_MCYRIXIII)	+= $(call cc-
+ cflags-$(CONFIG_MVIAC3_2)	+= $(call cc-option,-march=c3-2,-march=i686)
+ cflags-$(CONFIG_MVIAC7)		+= -march=i686
+ cflags-$(CONFIG_MCORE2)		+= -march=i686 $(call tune,core2)
+-cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
+-	$(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
++cflags-$(CONFIG_MNEHALEM)	+= -march=i686 $(call tune,nehalem)
++cflags-$(CONFIG_MWESTMERE)	+= -march=i686 $(call tune,westmere)
++cflags-$(CONFIG_MSILVERMONT)	+= -march=i686 $(call tune,silvermont)
++cflags-$(CONFIG_MSANDYBRIDGE)	+= -march=i686 $(call tune,sandybridge)
++cflags-$(CONFIG_MIVYBRIDGE)	+= -march=i686 $(call tune,ivybridge)
++cflags-$(CONFIG_MHASWELL)	+= -march=i686 $(call tune,haswell)
++cflags-$(CONFIG_MBROADWELL)	+= -march=i686 $(call tune,broadwell)
++cflags-$(CONFIG_MATOM)		+= $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
++	$(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
+ 
+ # AMD Elan support
+ cflags-$(CONFIG_MELAN)		+= -march=i486
+


^ permalink raw reply related	[flat|nested] 71+ messages in thread

end of thread, other threads:[~2018-05-29 10:34 UTC | newest]

Thread overview: 71+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-04-20 11:23 [gentoo-commits] proj/linux-patches:4.1 commit in: / Mike Pagano
  -- strict thread matches above, loose matches on Subject: below --
2018-05-29 10:34 Mike Pagano
2018-01-23  9:37 Alice Ferrazzi
2017-12-15 20:22 Alice Ferrazzi
2017-12-08 14:48 Mike Pagano
2017-12-07 18:53 Mike Pagano
2017-10-18 11:51 Mike Pagano
2017-09-13 19:38 Mike Pagano
2017-08-06 18:01 Mike Pagano
2017-04-14 19:17 Mike Pagano
2017-03-14 11:39 Mike Pagano
2017-03-02 16:31 Mike Pagano
2017-03-02 16:31 Mike Pagano
2017-02-24 16:11 Mike Pagano
2017-01-18 23:50 Alice Ferrazzi
2017-01-10  4:02 Alice Ferrazzi
2016-12-08  0:43 Mike Pagano
2016-11-30 11:45 Mike Pagano
2016-11-23 11:25 Mike Pagano
2016-10-28 10:19 Mike Pagano
2016-10-12 19:52 Mike Pagano
2016-09-18 12:47 Mike Pagano
2016-08-22 23:29 Mike Pagano
2016-08-10 12:55 Mike Pagano
2016-07-31 16:01 Mike Pagano
2016-07-15 14:18 Mike Pagano
2016-07-13 23:38 Mike Pagano
2016-07-02 15:31 Mike Pagano
2016-07-01 19:56 Mike Pagano
2016-06-23 11:45 Mike Pagano
2016-06-08 11:17 Mike Pagano
2016-05-24 12:39 Mike Pagano
2016-05-12  0:12 Mike Pagano
2016-04-28 18:56 Mike Pagano
2016-04-22 18:06 Mike Pagano
2016-04-06 11:23 Mike Pagano
2016-03-22 22:47 Mike Pagano
2016-03-17 22:52 Mike Pagano
2016-03-05 23:38 Mike Pagano
2016-02-16 15:28 Mike Pagano
2016-01-31 23:29 Mike Pagano
2016-01-23 18:30 Mike Pagano
2016-01-20 13:54 Mike Pagano
2015-12-15 11:17 Mike Pagano
2015-12-10 13:54 Mike Pagano
2015-11-10  0:30 Mike Pagano
2015-11-05 23:29 Mike Pagano
2015-11-05 23:29 Mike Pagano
2015-10-27 13:19 Mike Pagano
2015-10-26 20:51 Mike Pagano
2015-10-26 20:49 Mike Pagano
2015-10-03 16:07 Mike Pagano
2015-10-02 12:08 Mike Pagano
2015-09-29 17:50 Mike Pagano
2015-09-28 23:57 Mike Pagano
2015-09-21 22:16 Mike Pagano
2015-09-14 15:20 Mike Pagano
2015-08-17 15:38 Mike Pagano
2015-08-12 14:17 Mike Pagano
2015-08-10 23:42 Mike Pagano
2015-08-03 19:01 Mike Pagano
2015-07-22 10:31 Mike Pagano
2015-07-22 10:09 Mike Pagano
2015-07-19 18:55 Mike Pagano
2015-07-17 15:24 Mike Pagano
2015-07-10 23:47 Mike Pagano
2015-07-01 15:33 Mike Pagano
2015-06-27 19:50 Mike Pagano
2015-06-26 22:36 Mike Pagano
2015-06-20 17:37 Mike Pagano
2015-06-08 17:59 Mike Pagano

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox